get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/118819/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 118819,
    "url": "http://patches.dpdk.org/api/patches/118819/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20221020155749.16643-12-valex@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221020155749.16643-12-valex@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221020155749.16643-12-valex@nvidia.com",
    "date": "2022-10-20T15:57:41",
    "name": "[v6,11/18] net/mlx5/hws: Add HWS definer layer",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "07cec0dd5fa33770dbf2fb92687e7486b3fb7509",
    "submitter": {
        "id": 2858,
        "url": "http://patches.dpdk.org/api/people/2858/?format=api",
        "name": "Alex Vesker",
        "email": "valex@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20221020155749.16643-12-valex@nvidia.com/mbox/",
    "series": [
        {
            "id": 25345,
            "url": "http://patches.dpdk.org/api/series/25345/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=25345",
            "date": "2022-10-20T15:57:30",
            "name": "net/mlx5: Add HW steering low level support",
            "version": 6,
            "mbox": "http://patches.dpdk.org/series/25345/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/118819/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/118819/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 7094FA0553;\n\tThu, 20 Oct 2022 18:00:33 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 7AC5042BC1;\n\tThu, 20 Oct 2022 17:59:33 +0200 (CEST)",
            "from NAM02-SN1-obe.outbound.protection.outlook.com\n (mail-sn1anam02on2046.outbound.protection.outlook.com [40.107.96.46])\n by mails.dpdk.org (Postfix) with ESMTP id 0E13542B70\n for <dev@dpdk.org>; Thu, 20 Oct 2022 17:59:32 +0200 (CEST)",
            "from BN0PR04CA0158.namprd04.prod.outlook.com (2603:10b6:408:eb::13)\n by DM4PR12MB6184.namprd12.prod.outlook.com (2603:10b6:8:a6::8) with\n Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.5723.35; Thu, 20 Oct 2022 15:59:28 +0000",
            "from BN8NAM11FT037.eop-nam11.prod.protection.outlook.com\n (2603:10b6:408:eb:cafe::25) by BN0PR04CA0158.outlook.office365.com\n (2603:10b6:408:eb::13) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5723.35 via Frontend\n Transport; Thu, 20 Oct 2022 15:59:28 +0000",
            "from mail.nvidia.com (216.228.117.161) by\n BN8NAM11FT037.mail.protection.outlook.com (10.13.177.182) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.5746.16 via Frontend Transport; Thu, 20 Oct 2022 15:59:28 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.67) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.26; Thu, 20 Oct\n 2022 08:59:15 -0700",
            "from nvidia.com (10.126.230.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.29; Thu, 20 Oct\n 2022 08:59:12 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=e1z9hU4m83rX84gX4usxEXybWM+d6U/Fo1X9lW+kefZC0qUpNX5Q5Z7SzU7D79CDC87si/baFauVYPUqucZQ1gsrt7rCm76wwlU5Finv5/eWel+rNYLyoDGO890EJjpJUbYPzrhIdjZN9Bx6wp0b4d7daOr0gkVhC+r0KTcXEp+unbOEFvfyqGOa/7Xf5QQok7ofB89f05Yl4Bza7bGTlavqS7SWDeIcHVTChUoXCcUULXM8WzUhcy+wsbaE575QLVaHYS+FdZbZfbygSuFqgzUo86xbNm/7T+0aNyMBtDGkjm2HzweT0Z/akIzIUj46adWx8IMDQkQY/lXW3r2/FQ==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=Ygze3b5gZi26rOeJaC1qM+yL1qBEjCDaSNsSp+4d0DA=;\n b=LXqtCSLliUwtSA5NzxwlOfvAMMOAfpbMbVsNOVuMomv0KsJV64/zo3rBRl1YLTYGYc9VypsqIJOTH2KsbibdNuM8NFZWISlWCeVCcSyxUjaCRXpy6Sy2I44nDf7Yv7U75JVVNGwTdh1Ja/sV378OpNslKkQ1F1JX0YAUzM8/1PaXdopVjkI9TbFPvIxfd/Wvc+qm1Hn4Ex3H2EI+8soHQyKIxm91txqZyT1GFNCHYDtIUUnc5L2wOEnTiLC7Mq/TeYDaST8JKT0BS1xIkXrGZuAsEZomOIxf0swZ2y9K7yR9zP2UqQqtTh4nspTLCdcf06SuqG6UkNxtOqW+c0G+Lw==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=none (sender ip is\n 216.228.117.161) smtp.rcpttodomain=monjalon.net smtp.mailfrom=nvidia.com;\n dmarc=fail (p=reject sp=reject pct=100) action=oreject\n header.from=nvidia.com; dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=Ygze3b5gZi26rOeJaC1qM+yL1qBEjCDaSNsSp+4d0DA=;\n b=Zbrt12yD1C2+CIfoHaEwLBg3y12Pulyc3JRlRUIpccUcfPSDTFVeGJpLOI+e0pCibc/MKhjWjLnMH+XaIAHk2bav7Yx0f3p0wLm89YXin/ggWlxoyeBhfuAKo3Q5i1MSt3RuCTDqDvciQ6I0oB2mcVKVE1N1lraoJcPQGTfppRRddcxRAlXPdZ+a/nc7XixSvru6ELmiATRrvOWydeV9ODU5o/nAldL6JnP/oYVFxxfWrEb6lbvcw7oFYYox6kbe4TCNBiIL/KDpqA045iItEe5lA+wYT46ZnAQDWhxRuA8uIEsP1nIleQ0ijbcon9TXII0DvXM7W8fG0MOE0cAbhA==",
        "X-MS-Exchange-Authentication-Results": "spf=none (sender IP is 216.228.117.161)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=fail action=oreject header.from=nvidia.com;",
        "Received-SPF": "None (protection.outlook.com: nvidia.com does not designate\n permitted sender hosts)",
        "From": "Alex Vesker <valex@nvidia.com>",
        "To": "<valex@nvidia.com>, <viacheslavo@nvidia.com>, <thomas@monjalon.net>,\n <suanmingm@nvidia.com>, Ferruh Yigit <ferruh.yigit@amd.com>, Matan Azrad\n <matan@nvidia.com>",
        "CC": "<dev@dpdk.org>, <orika@nvidia.com>, Mark Bloch <mbloch@nvidia.com>",
        "Subject": "[v6 11/18] net/mlx5/hws: Add HWS definer layer",
        "Date": "Thu, 20 Oct 2022 18:57:41 +0300",
        "Message-ID": "<20221020155749.16643-12-valex@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20221020155749.16643-1-valex@nvidia.com>",
        "References": "<20220922190345.394-1-valex@nvidia.com>\n <20221020155749.16643-1-valex@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.230.35]",
        "X-ClientProxiedBy": "rnnvmail202.nvidia.com (10.129.68.7) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "BN8NAM11FT037:EE_|DM4PR12MB6184:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "f546c4ad-7c3b-4bff-a497-08dab2b41176",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n e7wBp18v5ltpnxvF+HIDMUOfJL1QNitiMC2Wmq8Uacz890XObxeXvg5dR3RHN/FE2ufJ+QhGY0tp4fbpFGoo0IZE3Aw9Nz016wBY4ylWZJCyRJWqC33FTZa+iXtIz/cVhfLxLCzA5i6UCKNOORs9ytz0cZQIRrofDlRkoc2VLwy5y/u4BUW5DvzjbivsWbvIkvR/7//8qjiEyN9K+SK7ZPZ5QMcI5+uv2zslLcLaZaUFo/0NZ8/Lj/WxLNmfeW6TfpH0j/+6yvg8Nq2JZJFpUb3FSzheEu1VfjxcQPxkyMrORpZP9WAcE8R4H9NeLBGeaZkDpiOxik0IDrPiLbC4PoZEG5iJE7HL5jlIs9NlTLhRYjtS97QF6KC1DeVjKYco5DaTvTRgTNXNueXIO6hxvYwV4esC2fM3+e6MmJUb5Wj2smiDnFOjFcVVDU19SjpYKce1PrnBtb5AXzFSTEgkEguZNktwuQCwcUxVNCJMEeRiYnxnhUMXgV2ZFjc3he89REhbkPNkVoQxLPnMbfr/GdUZKpyRpCPSoYQDM5d3JXLwR+lEedaCXSiPaLOoHDQpDpkIKWYz7lBpjBnsXHd/BGn4T9WmI+JUi/bozel8VVWknxKHnhnQc4gA3nS6k1w28pP0h/1TZjlEzD0H5O07ncSKHouWpV4ZEH1lmSK5o1ThVcg2V5pIFDI7b+KXkQ5Z+H961RZ7wKK2lbsDv7KiqjQjqQ8xbyfyPjW7hnJJfUSYQVX/u94iuz86D+k0E1NbxsZS52MHjd1W5bP02K6uP9uZtFk8aulym+ap1eH6S14=",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.161; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge2.nvidia.com; CAT:NONE;\n SFS:(13230022)(4636009)(136003)(376002)(396003)(346002)(39860400002)(451199015)(40470700004)(46966006)(36840700001)(356005)(7636003)(82740400003)(36756003)(55016003)(86362001)(40460700003)(107886003)(40480700001)(30864003)(2906002)(6666004)(41300700001)(478600001)(110136005)(316002)(70206006)(4326008)(8676002)(8936002)(6636002)(54906003)(5660300002)(83380400001)(82310400005)(6286002)(26005)(70586007)(7696005)(36860700001)(1076003)(336012)(2616005)(16526019)(186003)(47076005)(426003)(579004)(559001)(309714004);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "20 Oct 2022 15:59:28.0848 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n f546c4ad-7c3b-4bff-a497-08dab2b41176",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.161];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT037.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "DM4PR12MB6184",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Definers are HW objects that are used for matching, rte items\nare translated to definers, each definer holds the fields and\nbit-masks used for HW flow matching. The definer layer is used\nfor finding the most efficient definer for each set of items.\nIn addition to definer creation we also calculate the field\ncopy (fc) array used for efficient items to WQE conversion.\n\nSigned-off-by: Mark Bloch <mbloch@nvidia.com>\nSigned-off-by: Alex Vesker <valex@nvidia.com>\n---\n doc/guides/nics/features/default.ini  |    1 +\n doc/guides/nics/features/mlx5.ini     |    1 +\n drivers/net/mlx5/hws/mlx5dr_definer.c | 1968 +++++++++++++++++++++++++\n drivers/net/mlx5/hws/mlx5dr_definer.h |  585 ++++++++\n 4 files changed, 2555 insertions(+)\n create mode 100644 drivers/net/mlx5/hws/mlx5dr_definer.c\n create mode 100644 drivers/net/mlx5/hws/mlx5dr_definer.h",
    "diff": "diff --git a/doc/guides/nics/features/default.ini b/doc/guides/nics/features/default.ini\nindex 27f1a70a87..67ba3567c2 100644\n--- a/doc/guides/nics/features/default.ini\n+++ b/doc/guides/nics/features/default.ini\n@@ -140,6 +140,7 @@ udp                  =\n vlan                 =\n vxlan                =\n vxlan_gpe            =\n+meter_color          =\n \n [rte_flow actions]\n age                  =\ndiff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini\nindex 8697515385..b129f5787d 100644\n--- a/doc/guides/nics/features/mlx5.ini\n+++ b/doc/guides/nics/features/mlx5.ini\n@@ -84,6 +84,7 @@ vlan                 = Y\n vxlan                = Y\n vxlan_gpe            = Y\n represented_port     = Y\n+meter_color          = Y\n \n [rte_flow actions]\n age                  = I\ndiff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c\nnew file mode 100644\nindex 0000000000..6b98eb8c96\n--- /dev/null\n+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c\n@@ -0,0 +1,1968 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates\n+ */\n+\n+#include \"mlx5dr_internal.h\"\n+\n+#define GTP_PDU_SC\t0x85\n+#define BAD_PORT\t0xBAD\n+#define ETH_TYPE_IPV4_VXLAN\t0x0800\n+#define ETH_TYPE_IPV6_VXLAN\t0x86DD\n+#define ETH_VXLAN_DEFAULT_PORT\t4789\n+\n+#define STE_NO_VLAN\t0x0\n+#define STE_SVLAN\t0x1\n+#define STE_CVLAN\t0x2\n+#define STE_IPV4\t0x1\n+#define STE_IPV6\t0x2\n+#define STE_TCP\t\t0x1\n+#define STE_UDP\t\t0x2\n+#define STE_ICMP\t0x3\n+\n+/* Setter function based on bit offset and mask, for 32bit DW*/\n+#define _DR_SET_32(p, v, byte_off, bit_off, mask) \\\n+\tdo { \\\n+\t\tu32 _v = v; \\\n+\t\t*((rte_be32_t *)(p) + ((byte_off) / 4)) = \\\n+\t\trte_cpu_to_be_32((rte_be_to_cpu_32(*((u32 *)(p) + \\\n+\t\t\t\t  ((byte_off) / 4))) & \\\n+\t\t\t\t  (~((mask) << (bit_off)))) | \\\n+\t\t\t\t (((_v) & (mask)) << \\\n+\t\t\t\t  (bit_off))); \\\n+\t} while (0)\n+\n+/* Setter function based on bit offset and mask */\n+#define DR_SET(p, v, byte_off, bit_off, mask) \\\n+\tdo { \\\n+\t\tif (unlikely((bit_off) < 0)) { \\\n+\t\t\tu32 _bit_off = -1 * (bit_off); \\\n+\t\t\tu32 second_dw_mask = (mask) & ((1 << _bit_off) - 1); \\\n+\t\t\t_DR_SET_32(p, (v) >> _bit_off, byte_off, 0, (mask) >> _bit_off); \\\n+\t\t\t_DR_SET_32(p, (v) & second_dw_mask, (byte_off) + DW_SIZE, \\\n+\t\t\t\t   (bit_off) % BITS_IN_DW, second_dw_mask); \\\n+\t\t} else { \\\n+\t\t\t_DR_SET_32(p, v, byte_off, (bit_off), (mask)); \\\n+\t\t} \\\n+\t} while (0)\n+\n+/* Setter function based on byte offset to directly set FULL BE32 value  */\n+#define DR_SET_BE32(p, v, byte_off, bit_off, mask) \\\n+\t(*((rte_be32_t *)((uint8_t *)(p) + (byte_off))) = (v))\n+\n+/* Setter function based on byte offset to directly set FULL BE32 value from ptr  */\n+#define DR_SET_BE32P(p, v_ptr, byte_off, bit_off, mask) \\\n+\tmemcpy((uint8_t *)(p) + (byte_off), v_ptr, 4)\n+\n+/* Setter function based on byte offset to directly set FULL BE16 value  */\n+#define DR_SET_BE16(p, v, byte_off, bit_off, mask) \\\n+\t(*((rte_be16_t *)((uint8_t *)(p) + (byte_off))) = (v))\n+\n+/* Setter function based on byte offset to directly set FULL BE16 value from ptr  */\n+#define DR_SET_BE16P(p, v_ptr, byte_off, bit_off, mask) \\\n+\tmemcpy((uint8_t *)(p) + (byte_off), v_ptr, 2)\n+\n+#define DR_CALC_FNAME(field, inner) \\\n+\t((inner) ? MLX5DR_DEFINER_FNAME_##field##_I : \\\n+\t\t   MLX5DR_DEFINER_FNAME_##field##_O)\n+\n+#define DR_CALC_SET_HDR(fc, hdr, field) \\\n+\tdo { \\\n+\t\t(fc)->bit_mask = __mlx5_mask(definer_hl, hdr.field); \\\n+\t\t(fc)->bit_off = __mlx5_dw_bit_off(definer_hl, hdr.field); \\\n+\t\t(fc)->byte_off = MLX5_BYTE_OFF(definer_hl, hdr.field); \\\n+\t} while (0)\n+\n+/* Helper to calculate data used by DR_SET */\n+#define DR_CALC_SET(fc, hdr, field, is_inner) \\\n+\tdo { \\\n+\t\tif (is_inner) { \\\n+\t\t\tDR_CALC_SET_HDR(fc, hdr##_inner, field); \\\n+\t\t} else { \\\n+\t\t\tDR_CALC_SET_HDR(fc, hdr##_outer, field); \\\n+\t\t} \\\n+\t} while (0)\n+\n+ #define DR_GET(typ, p, fld) \\\n+\t((rte_be_to_cpu_32(*((const rte_be32_t *)(p) + \\\n+\t__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \\\n+\t__mlx5_mask(typ, fld))\n+\n+struct mlx5dr_definer_sel_ctrl {\n+\tuint8_t allowed_full_dw; /* Full DW selectors cover all offsets */\n+\tuint8_t allowed_lim_dw;  /* Limited DW selectors cover offset < 64 */\n+\tuint8_t allowed_bytes;   /* Bytes selectors, up to offset 255 */\n+\tuint8_t used_full_dw;\n+\tuint8_t used_lim_dw;\n+\tuint8_t used_bytes;\n+\tuint8_t full_dw_selector[DW_SELECTORS];\n+\tuint8_t lim_dw_selector[DW_SELECTORS_LIMITED];\n+\tuint8_t byte_selector[BYTE_SELECTORS];\n+};\n+\n+struct mlx5dr_definer_conv_data {\n+\tstruct mlx5dr_cmd_query_caps *caps;\n+\tstruct mlx5dr_definer_fc *fc;\n+\tuint8_t relaxed;\n+\tuint8_t tunnel;\n+\tuint8_t *hl;\n+};\n+\n+/* Xmacro used to create generic item setter from items */\n+#define LIST_OF_FIELDS_INFO \\\n+\tX(SET_BE16,\teth_type,\t\tv->type,\t\trte_flow_item_eth) \\\n+\tX(SET_BE32P,\teth_smac_47_16,\t\t&v->src.addr_bytes[0],\trte_flow_item_eth) \\\n+\tX(SET_BE16P,\teth_smac_15_0,\t\t&v->src.addr_bytes[4],\trte_flow_item_eth) \\\n+\tX(SET_BE32P,\teth_dmac_47_16,\t\t&v->dst.addr_bytes[0],\trte_flow_item_eth) \\\n+\tX(SET_BE16P,\teth_dmac_15_0,\t\t&v->dst.addr_bytes[4],\trte_flow_item_eth) \\\n+\tX(SET_BE16,\ttci,\t\t\tv->tci,\t\t\trte_flow_item_vlan) \\\n+\tX(SET,\t\tipv4_ihl,\t\tv->ihl,\t\t\trte_ipv4_hdr) \\\n+\tX(SET,\t\tipv4_tos,\t\tv->type_of_service,\trte_ipv4_hdr) \\\n+\tX(SET,\t\tipv4_time_to_live,\tv->time_to_live,\trte_ipv4_hdr) \\\n+\tX(SET_BE32,\tipv4_dst_addr,\t\tv->dst_addr,\t\trte_ipv4_hdr) \\\n+\tX(SET_BE32,\tipv4_src_addr,\t\tv->src_addr,\t\trte_ipv4_hdr) \\\n+\tX(SET,\t\tipv4_next_proto,\tv->next_proto_id,\trte_ipv4_hdr) \\\n+\tX(SET,\t\tipv4_version,\t\tSTE_IPV4,\t\trte_ipv4_hdr) \\\n+\tX(SET_BE16,\tipv4_frag,\t\tv->fragment_offset,\trte_ipv4_hdr) \\\n+\tX(SET_BE16,\tipv6_payload_len,\tv->hdr.payload_len,\trte_flow_item_ipv6) \\\n+\tX(SET,\t\tipv6_proto,\t\tv->hdr.proto,\t\trte_flow_item_ipv6) \\\n+\tX(SET,\t\tipv6_hop_limits,\tv->hdr.hop_limits,\trte_flow_item_ipv6) \\\n+\tX(SET_BE32P,\tipv6_src_addr_127_96,\t&v->hdr.src_addr[0],\trte_flow_item_ipv6) \\\n+\tX(SET_BE32P,\tipv6_src_addr_95_64,\t&v->hdr.src_addr[4],\trte_flow_item_ipv6) \\\n+\tX(SET_BE32P,\tipv6_src_addr_63_32,\t&v->hdr.src_addr[8],\trte_flow_item_ipv6) \\\n+\tX(SET_BE32P,\tipv6_src_addr_31_0,\t&v->hdr.src_addr[12],\trte_flow_item_ipv6) \\\n+\tX(SET_BE32P,\tipv6_dst_addr_127_96,\t&v->hdr.dst_addr[0],\trte_flow_item_ipv6) \\\n+\tX(SET_BE32P,\tipv6_dst_addr_95_64,\t&v->hdr.dst_addr[4],\trte_flow_item_ipv6) \\\n+\tX(SET_BE32P,\tipv6_dst_addr_63_32,\t&v->hdr.dst_addr[8],\trte_flow_item_ipv6) \\\n+\tX(SET_BE32P,\tipv6_dst_addr_31_0,\t&v->hdr.dst_addr[12],\trte_flow_item_ipv6) \\\n+\tX(SET,\t\tipv6_version,\t\tSTE_IPV6,\t\trte_flow_item_ipv6) \\\n+\tX(SET,\t\tipv6_frag,\t\tv->has_frag_ext,\trte_flow_item_ipv6) \\\n+\tX(SET,\t\ticmp_protocol,\t\tSTE_ICMP,\t\trte_flow_item_icmp) \\\n+\tX(SET,\t\tudp_protocol,\t\tSTE_UDP,\t\trte_flow_item_udp) \\\n+\tX(SET_BE16,\tudp_src_port,\t\tv->hdr.src_port,\trte_flow_item_udp) \\\n+\tX(SET_BE16,\tudp_dst_port,\t\tv->hdr.dst_port,\trte_flow_item_udp) \\\n+\tX(SET,\t\ttcp_flags,\t\tv->hdr.tcp_flags,\trte_flow_item_tcp) \\\n+\tX(SET,\t\ttcp_protocol,\t\tSTE_TCP,\t\trte_flow_item_tcp) \\\n+\tX(SET_BE16,\ttcp_src_port,\t\tv->hdr.src_port,\trte_flow_item_tcp) \\\n+\tX(SET_BE16,\ttcp_dst_port,\t\tv->hdr.dst_port,\trte_flow_item_tcp) \\\n+\tX(SET,\t\tgtp_udp_port,\t\tRTE_GTPU_UDP_PORT,\trte_flow_item_gtp) \\\n+\tX(SET_BE32,\tgtp_teid,\t\tv->teid,\t\trte_flow_item_gtp) \\\n+\tX(SET,\t\tgtp_msg_type,\t\tv->msg_type,\t\trte_flow_item_gtp) \\\n+\tX(SET,\t\tgtp_ext_flag,\t\t!!v->v_pt_rsv_flags,\trte_flow_item_gtp) \\\n+\tX(SET,\t\tgtp_next_ext_hdr,\tGTP_PDU_SC,\t\trte_flow_item_gtp_psc) \\\n+\tX(SET,\t\tgtp_ext_hdr_pdu,\tv->hdr.type,\t\trte_flow_item_gtp_psc) \\\n+\tX(SET,\t\tgtp_ext_hdr_qfi,\tv->hdr.qfi,\t\trte_flow_item_gtp_psc) \\\n+\tX(SET,\t\tvxlan_flags,\t\tv->flags,\t\trte_flow_item_vxlan) \\\n+\tX(SET,\t\tvxlan_udp_port,\t\tETH_VXLAN_DEFAULT_PORT,\trte_flow_item_vxlan) \\\n+\tX(SET,\t\tsource_qp,\t\tv->queue,\t\tmlx5_rte_flow_item_sq) \\\n+\tX(SET,\t\ttag,\t\t\tv->data,\t\trte_flow_item_tag) \\\n+\tX(SET,\t\tmetadata,\t\tv->data,\t\trte_flow_item_meta) \\\n+\tX(SET_BE16,\tgre_c_ver,\t\tv->c_rsvd0_ver,\t\trte_flow_item_gre) \\\n+\tX(SET_BE16,\tgre_protocol_type,\tv->protocol,\t\trte_flow_item_gre) \\\n+\tX(SET,\t\tipv4_protocol_gre,\tIPPROTO_GRE,\t\trte_flow_item_gre) \\\n+\tX(SET_BE32,\tgre_opt_key,\t\tv->key.key,\t\trte_flow_item_gre_opt) \\\n+\tX(SET_BE32,\tgre_opt_seq,\t\tv->sequence.sequence,\trte_flow_item_gre_opt) \\\n+\tX(SET_BE16,\tgre_opt_checksum,\tv->checksum_rsvd.checksum,\trte_flow_item_gre_opt) \\\n+\tX(SET,\t\tmeter_color,\t\trte_col_2_mlx5_col(v->color),\trte_flow_item_meter_color)\n+\n+/* Item set function format */\n+#define X(set_type, func_name, value, item_type) \\\n+static void mlx5dr_definer_##func_name##_set( \\\n+\tstruct mlx5dr_definer_fc *fc, \\\n+\tconst void *item_spec, \\\n+\tuint8_t *tag) \\\n+{ \\\n+\t__rte_unused const struct item_type *v = item_spec; \\\n+\tDR_##set_type(tag, value, fc->byte_off, fc->bit_off, fc->bit_mask); \\\n+}\n+LIST_OF_FIELDS_INFO\n+#undef X\n+\n+static void\n+mlx5dr_definer_ones_set(struct mlx5dr_definer_fc *fc,\n+\t\t\t__rte_unused const void *item_spec,\n+\t\t\t__rte_unused uint8_t *tag)\n+{\n+\tDR_SET(tag, -1, fc->byte_off, fc->bit_off, fc->bit_mask);\n+}\n+\n+static void\n+mlx5dr_definer_eth_first_vlan_q_set(struct mlx5dr_definer_fc *fc,\n+\t\t\t\t    const void *item_spec,\n+\t\t\t\t    uint8_t *tag)\n+{\n+\tconst struct rte_flow_item_eth *v = item_spec;\n+\tuint8_t vlan_type;\n+\n+\tvlan_type = v->has_vlan ? STE_CVLAN : STE_NO_VLAN;\n+\n+\tDR_SET(tag, vlan_type, fc->byte_off, fc->bit_off, fc->bit_mask);\n+}\n+\n+static void\n+mlx5dr_definer_first_vlan_q_set(struct mlx5dr_definer_fc *fc,\n+\t\t\t\tconst void *item_spec,\n+\t\t\t\tuint8_t *tag)\n+{\n+\tconst struct rte_flow_item_vlan *v = item_spec;\n+\tuint8_t vlan_type;\n+\n+\tvlan_type = v->has_more_vlan ? STE_SVLAN : STE_CVLAN;\n+\n+\tDR_SET(tag, vlan_type, fc->byte_off, fc->bit_off, fc->bit_mask);\n+}\n+\n+static void\n+mlx5dr_definer_conntrack_mask(struct mlx5dr_definer_fc *fc,\n+\t\t\t      const void *item_spec,\n+\t\t\t      uint8_t *tag)\n+{\n+\tconst struct rte_flow_item_conntrack *m = item_spec;\n+\tuint32_t reg_mask = 0;\n+\n+\tif (m->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |\n+\t\t\tRTE_FLOW_CONNTRACK_PKT_STATE_INVALID |\n+\t\t\tRTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))\n+\t\treg_mask |= (MLX5_CT_SYNDROME_VALID | MLX5_CT_SYNDROME_INVALID |\n+\t\t\t     MLX5_CT_SYNDROME_TRAP);\n+\n+\tif (m->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)\n+\t\treg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;\n+\n+\tif (m->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)\n+\t\treg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;\n+\n+\tDR_SET(tag, reg_mask, fc->byte_off, fc->bit_off, fc->bit_mask);\n+}\n+\n+static void\n+mlx5dr_definer_conntrack_tag(struct mlx5dr_definer_fc *fc,\n+\t\t\t     const void *item_spec,\n+\t\t\t     uint8_t *tag)\n+{\n+\tconst struct rte_flow_item_conntrack *v = item_spec;\n+\tuint32_t reg_value = 0;\n+\n+\t/* The conflict should be checked in the validation. */\n+\tif (v->flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)\n+\t\treg_value |= MLX5_CT_SYNDROME_VALID;\n+\n+\tif (v->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)\n+\t\treg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;\n+\n+\tif (v->flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)\n+\t\treg_value |= MLX5_CT_SYNDROME_INVALID;\n+\n+\tif (v->flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)\n+\t\treg_value |= MLX5_CT_SYNDROME_TRAP;\n+\n+\tif (v->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)\n+\t\treg_value |= MLX5_CT_SYNDROME_BAD_PACKET;\n+\n+\tDR_SET(tag, reg_value, fc->byte_off, fc->bit_off, fc->bit_mask);\n+}\n+\n+static void\n+mlx5dr_definer_integrity_set(struct mlx5dr_definer_fc *fc,\n+\t\t\t     const void *item_spec,\n+\t\t\t     uint8_t *tag)\n+{\n+\tbool inner = (fc->fname == MLX5DR_DEFINER_FNAME_INTEGRITY_I);\n+\tconst struct rte_flow_item_integrity *v = item_spec;\n+\tuint32_t ok1_bits = 0;\n+\n+\tif (v->l3_ok)\n+\t\tok1_bits |= inner ? BIT(MLX5DR_DEFINER_OKS1_SECOND_L3_OK) |\n+\t\t\t\t    BIT(MLX5DR_DEFINER_OKS1_SECOND_IPV4_CSUM_OK) :\n+\t\t\t\t    BIT(MLX5DR_DEFINER_OKS1_FIRST_L3_OK) |\n+\t\t\t\t    BIT(MLX5DR_DEFINER_OKS1_FIRST_IPV4_CSUM_OK);\n+\n+\tif (v->ipv4_csum_ok)\n+\t\tok1_bits |= inner ? BIT(MLX5DR_DEFINER_OKS1_SECOND_IPV4_CSUM_OK) :\n+\t\t\t\t    BIT(MLX5DR_DEFINER_OKS1_FIRST_IPV4_CSUM_OK);\n+\n+\tif (v->l4_ok)\n+\t\tok1_bits |= inner ? BIT(MLX5DR_DEFINER_OKS1_SECOND_L4_OK) |\n+\t\t\t\t    BIT(MLX5DR_DEFINER_OKS1_SECOND_L4_CSUM_OK) :\n+\t\t\t\t    BIT(MLX5DR_DEFINER_OKS1_FIRST_L4_OK) |\n+\t\t\t\t    BIT(MLX5DR_DEFINER_OKS1_FIRST_L4_CSUM_OK);\n+\n+\tif (v->l4_csum_ok)\n+\t\tok1_bits |= inner ? BIT(MLX5DR_DEFINER_OKS1_SECOND_L4_CSUM_OK) :\n+\t\t\t\t    BIT(MLX5DR_DEFINER_OKS1_FIRST_L4_CSUM_OK);\n+\n+\tDR_SET(tag, ok1_bits, fc->byte_off, fc->bit_off, fc->bit_mask);\n+}\n+\n+static void\n+mlx5dr_definer_gre_key_set(struct mlx5dr_definer_fc *fc,\n+\t\t\t   const void *item_spec,\n+\t\t\t   uint8_t *tag)\n+{\n+\tconst rte_be32_t *v = item_spec;\n+\n+\tDR_SET_BE32(tag, *v, fc->byte_off, fc->bit_off, fc->bit_mask);\n+}\n+\n+static void\n+mlx5dr_definer_vxlan_vni_set(struct mlx5dr_definer_fc *fc,\n+\t\t\t     const void *item_spec,\n+\t\t\t     uint8_t *tag)\n+{\n+\tconst struct rte_flow_item_vxlan *v = item_spec;\n+\n+\tmemcpy(tag + fc->byte_off, v->vni, sizeof(v->vni));\n+}\n+\n+static void\n+mlx5dr_definer_ipv6_tos_set(struct mlx5dr_definer_fc *fc,\n+\t\t\t    const void *item_spec,\n+\t\t\t    uint8_t *tag)\n+{\n+\tconst struct rte_flow_item_ipv6 *v = item_spec;\n+\tuint8_t tos = DR_GET(header_ipv6_vtc, &v->hdr.vtc_flow, tos);\n+\n+\tDR_SET(tag, tos, fc->byte_off, fc->bit_off, fc->bit_mask);\n+}\n+\n+static void\n+mlx5dr_definer_icmp_dw1_set(struct mlx5dr_definer_fc *fc,\n+\t\t\t    const void *item_spec,\n+\t\t\t    uint8_t *tag)\n+{\n+\tconst struct rte_flow_item_icmp *v = item_spec;\n+\trte_be32_t icmp_dw1;\n+\n+\ticmp_dw1 = (v->hdr.icmp_type << __mlx5_dw_bit_off(header_icmp, type)) |\n+\t\t   (v->hdr.icmp_code << __mlx5_dw_bit_off(header_icmp, code)) |\n+\t\t   (rte_be_to_cpu_16(v->hdr.icmp_cksum) << __mlx5_dw_bit_off(header_icmp, cksum));\n+\n+\tDR_SET(tag, icmp_dw1, fc->byte_off, fc->bit_off, fc->bit_mask);\n+}\n+\n+static void\n+mlx5dr_definer_icmp_dw2_set(struct mlx5dr_definer_fc *fc,\n+\t\t\t    const void *item_spec,\n+\t\t\t    uint8_t *tag)\n+{\n+\tconst struct rte_flow_item_icmp *v = item_spec;\n+\trte_be32_t icmp_dw2;\n+\n+\ticmp_dw2 = (rte_be_to_cpu_16(v->hdr.icmp_ident) << __mlx5_dw_bit_off(header_icmp, ident)) |\n+\t\t   (rte_be_to_cpu_16(v->hdr.icmp_seq_nb) << __mlx5_dw_bit_off(header_icmp, seq_nb));\n+\n+\tDR_SET(tag, icmp_dw2, fc->byte_off, fc->bit_off, fc->bit_mask);\n+}\n+\n+static void\n+mlx5dr_definer_icmp6_dw1_set(struct mlx5dr_definer_fc *fc,\n+\t\t\t    const void *item_spec,\n+\t\t\t    uint8_t *tag)\n+{\n+\tconst struct rte_flow_item_icmp6 *v = item_spec;\n+\trte_be32_t icmp_dw1;\n+\n+\ticmp_dw1 = (v->type << __mlx5_dw_bit_off(header_icmp, type)) |\n+\t\t   (v->code << __mlx5_dw_bit_off(header_icmp, code)) |\n+\t\t   (rte_be_to_cpu_16(v->checksum) << __mlx5_dw_bit_off(header_icmp, cksum));\n+\n+\tDR_SET(tag, icmp_dw1, fc->byte_off, fc->bit_off, fc->bit_mask);\n+}\n+\n+static void\n+mlx5dr_definer_ipv6_flow_label_set(struct mlx5dr_definer_fc *fc,\n+\t\t\t\t   const void *item_spec,\n+\t\t\t\t   uint8_t *tag)\n+{\n+\tconst struct rte_flow_item_ipv6 *v = item_spec;\n+\tuint32_t flow_label = DR_GET(header_ipv6_vtc, &v->hdr.vtc_flow, flow_label);\n+\n+\tDR_SET(tag, flow_label, fc->byte_off, fc->bit_off, fc->bit_mask);\n+}\n+\n+static void\n+mlx5dr_definer_vport_set(struct mlx5dr_definer_fc *fc,\n+\t\t\t const void *item_spec,\n+\t\t\t uint8_t *tag)\n+{\n+\tconst struct rte_flow_item_ethdev *v = item_spec;\n+\tconst struct flow_hw_port_info *port_info;\n+\tuint32_t regc_value;\n+\n+\tport_info = flow_hw_conv_port_id(v->port_id);\n+\tif (unlikely(!port_info))\n+\t\tregc_value = BAD_PORT;\n+\telse\n+\t\tregc_value = port_info->regc_value >> fc->bit_off;\n+\n+\t/* Bit offset is set to 0 to since regc value is 32bit */\n+\tDR_SET(tag, regc_value, fc->byte_off, fc->bit_off, fc->bit_mask);\n+}\n+\n+static int\n+mlx5dr_definer_conv_item_eth(struct mlx5dr_definer_conv_data *cd,\n+\t\t\t     struct rte_flow_item *item,\n+\t\t\t     int item_idx)\n+{\n+\tconst struct rte_flow_item_eth *m = item->mask;\n+\tuint8_t empty_mac[RTE_ETHER_ADDR_LEN] = {0};\n+\tstruct mlx5dr_definer_fc *fc;\n+\tbool inner = cd->tunnel;\n+\n+\tif (!m)\n+\t\treturn 0;\n+\n+\tif (m->reserved) {\n+\t\trte_errno = ENOTSUP;\n+\t\treturn rte_errno;\n+\t}\n+\n+\tif (m->type) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_eth_type_set;\n+\t\tDR_CALC_SET(fc, eth_l2, l3_ethertype, inner);\n+\t}\n+\n+\t/* Check SMAC 47_16 */\n+\tif (memcmp(m->src.addr_bytes, empty_mac, 4)) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(ETH_SMAC_48_16, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_eth_smac_47_16_set;\n+\t\tDR_CALC_SET(fc, eth_l2_src, smac_47_16, inner);\n+\t}\n+\n+\t/* Check SMAC 15_0 */\n+\tif (memcmp(m->src.addr_bytes + 4, empty_mac + 4, 2)) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(ETH_SMAC_15_0, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_eth_smac_15_0_set;\n+\t\tDR_CALC_SET(fc, eth_l2_src, smac_15_0, inner);\n+\t}\n+\n+\t/* Check DMAC 47_16 */\n+\tif (memcmp(m->dst.addr_bytes, empty_mac, 4)) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(ETH_DMAC_48_16, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_eth_dmac_47_16_set;\n+\t\tDR_CALC_SET(fc, eth_l2, dmac_47_16, inner);\n+\t}\n+\n+\t/* Check DMAC 15_0 */\n+\tif (memcmp(m->dst.addr_bytes + 4, empty_mac + 4, 2)) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(ETH_DMAC_15_0, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_eth_dmac_15_0_set;\n+\t\tDR_CALC_SET(fc, eth_l2, dmac_15_0, inner);\n+\t}\n+\n+\tif (m->has_vlan) {\n+\t\t/* Mark packet as tagged (CVLAN) */\n+\t\tfc = &cd->fc[DR_CALC_FNAME(VLAN_TYPE, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_mask_set = &mlx5dr_definer_ones_set;\n+\t\tfc->tag_set = &mlx5dr_definer_eth_first_vlan_q_set;\n+\t\tDR_CALC_SET(fc, eth_l2, first_vlan_qualifier, inner);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+mlx5dr_definer_conv_item_vlan(struct mlx5dr_definer_conv_data *cd,\n+\t\t\t      struct rte_flow_item *item,\n+\t\t\t      int item_idx)\n+{\n+\tconst struct rte_flow_item_vlan *m = item->mask;\n+\tstruct mlx5dr_definer_fc *fc;\n+\tbool inner = cd->tunnel;\n+\n+\tif (!m)\n+\t\treturn 0;\n+\n+\tif (m->reserved) {\n+\t\trte_errno = ENOTSUP;\n+\t\treturn rte_errno;\n+\t}\n+\n+\tif (!cd->relaxed || m->has_more_vlan) {\n+\t\t/* Mark packet as tagged (CVLAN or SVLAN) even if TCI is not specified.*/\n+\t\tfc = &cd->fc[DR_CALC_FNAME(VLAN_TYPE, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_mask_set = &mlx5dr_definer_ones_set;\n+\t\tfc->tag_set = &mlx5dr_definer_first_vlan_q_set;\n+\t\tDR_CALC_SET(fc, eth_l2, first_vlan_qualifier, inner);\n+\t}\n+\n+\tif (m->tci) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(VLAN_TCI, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_tci_set;\n+\t\tDR_CALC_SET(fc, eth_l2, tci, inner);\n+\t}\n+\n+\tif (m->inner_type) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_eth_type_set;\n+\t\tDR_CALC_SET(fc, eth_l2, l3_ethertype, inner);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+mlx5dr_definer_conv_item_ipv4(struct mlx5dr_definer_conv_data *cd,\n+\t\t\t      struct rte_flow_item *item,\n+\t\t\t      int item_idx)\n+{\n+\tconst struct rte_ipv4_hdr *m = item->mask;\n+\tstruct mlx5dr_definer_fc *fc;\n+\tbool inner = cd->tunnel;\n+\n+\tif (!cd->relaxed) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IP_VERSION, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_ipv4_version_set;\n+\t\tfc->tag_mask_set = &mlx5dr_definer_ones_set;\n+\t\tDR_CALC_SET(fc, eth_l2, l3_type, inner);\n+\n+\t\t/* Overwrite - Unset ethertype if present */\n+\t\tmemset(&cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)], 0, sizeof(*fc));\n+\t}\n+\n+\tif (!m)\n+\t\treturn 0;\n+\n+\tif (m->total_length || m->packet_id ||\n+\t    m->hdr_checksum) {\n+\t\trte_errno = ENOTSUP;\n+\t\treturn rte_errno;\n+\t}\n+\n+\tif (m->fragment_offset) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IP_FRAG, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_ipv4_frag_set;\n+\t\tDR_CALC_SET(fc, eth_l3, fragment_offset, inner);\n+\t}\n+\n+\tif (m->next_proto_id) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_ipv4_next_proto_set;\n+\t\tDR_CALC_SET(fc, eth_l3, protocol_next_header, inner);\n+\t}\n+\n+\tif (m->dst_addr) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IPV4_DST, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_ipv4_dst_addr_set;\n+\t\tDR_CALC_SET(fc, ipv4_src_dest, destination_address, inner);\n+\t}\n+\n+\tif (m->src_addr) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IPV4_SRC, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_ipv4_src_addr_set;\n+\t\tDR_CALC_SET(fc, ipv4_src_dest, source_address, inner);\n+\t}\n+\n+\tif (m->ihl) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IPV4_IHL, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_ipv4_ihl_set;\n+\t\tDR_CALC_SET(fc, eth_l3, ihl, inner);\n+\t}\n+\n+\tif (m->time_to_live) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IP_TTL, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_ipv4_time_to_live_set;\n+\t\tDR_CALC_SET(fc, eth_l3, time_to_live_hop_limit, inner);\n+\t}\n+\n+\tif (m->type_of_service) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IP_TOS, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_ipv4_tos_set;\n+\t\tDR_CALC_SET(fc, eth_l3, tos, inner);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+mlx5dr_definer_conv_item_ipv6(struct mlx5dr_definer_conv_data *cd,\n+\t\t\t      struct rte_flow_item *item,\n+\t\t\t      int item_idx)\n+{\n+\tconst struct rte_flow_item_ipv6 *m = item->mask;\n+\tstruct mlx5dr_definer_fc *fc;\n+\tbool inner = cd->tunnel;\n+\n+\tif (!cd->relaxed) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IP_VERSION, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_ipv6_version_set;\n+\t\tfc->tag_mask_set = &mlx5dr_definer_ones_set;\n+\t\tDR_CALC_SET(fc, eth_l2, l3_type, inner);\n+\n+\t\t/* Overwrite - Unset ethertype if present */\n+\t\tmemset(&cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)], 0, sizeof(*fc));\n+\t}\n+\n+\tif (!m)\n+\t\treturn 0;\n+\n+\tif (m->has_hop_ext || m->has_route_ext || m->has_auth_ext ||\n+\t    m->has_esp_ext || m->has_dest_ext || m->has_mobil_ext ||\n+\t    m->has_hip_ext || m->has_shim6_ext) {\n+\t\trte_errno = ENOTSUP;\n+\t\treturn rte_errno;\n+\t}\n+\n+\tif (m->has_frag_ext) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IP_FRAG, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_ipv6_frag_set;\n+\t\tDR_CALC_SET(fc, eth_l4, ip_fragmented, inner);\n+\t}\n+\n+\tif (DR_GET(header_ipv6_vtc, &m->hdr.vtc_flow, tos)) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IP_TOS, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_ipv6_tos_set;\n+\t\tDR_CALC_SET(fc, eth_l3, tos, inner);\n+\t}\n+\n+\tif (DR_GET(header_ipv6_vtc, &m->hdr.vtc_flow, flow_label)) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IPV6_FLOW_LABEL, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_ipv6_flow_label_set;\n+\t\tDR_CALC_SET(fc, eth_l3, flow_label, inner);\n+\t}\n+\n+\tif (m->hdr.payload_len) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IPV6_PAYLOAD_LEN, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_ipv6_payload_len_set;\n+\t\tDR_CALC_SET(fc, eth_l3, ipv6_payload_length, inner);\n+\t}\n+\n+\tif (m->hdr.proto) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_ipv6_proto_set;\n+\t\tDR_CALC_SET(fc, eth_l3, protocol_next_header, inner);\n+\t}\n+\n+\tif (m->hdr.hop_limits) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IP_TTL, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_ipv6_hop_limits_set;\n+\t\tDR_CALC_SET(fc, eth_l3, time_to_live_hop_limit, inner);\n+\t}\n+\n+\tif (!is_mem_zero(m->hdr.src_addr, 4)) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_127_96, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_ipv6_src_addr_127_96_set;\n+\t\tDR_CALC_SET(fc, ipv6_src, ipv6_address_127_96, inner);\n+\t}\n+\n+\tif (!is_mem_zero(m->hdr.src_addr + 4, 4)) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_95_64, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_ipv6_src_addr_95_64_set;\n+\t\tDR_CALC_SET(fc, ipv6_src, ipv6_address_95_64, inner);\n+\t}\n+\n+\tif (!is_mem_zero(m->hdr.src_addr + 8, 4)) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_63_32, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_ipv6_src_addr_63_32_set;\n+\t\tDR_CALC_SET(fc, ipv6_src, ipv6_address_63_32, inner);\n+\t}\n+\n+\tif (!is_mem_zero(m->hdr.src_addr + 12, 4)) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_31_0, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_ipv6_src_addr_31_0_set;\n+\t\tDR_CALC_SET(fc, ipv6_src, ipv6_address_31_0, inner);\n+\t}\n+\n+\tif (!is_mem_zero(m->hdr.dst_addr, 4)) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IPV6_DST_127_96, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_ipv6_dst_addr_127_96_set;\n+\t\tDR_CALC_SET(fc, ipv6_dst, ipv6_address_127_96, inner);\n+\t}\n+\n+\tif (!is_mem_zero(m->hdr.dst_addr + 4, 4)) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IPV6_DST_95_64, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_ipv6_dst_addr_95_64_set;\n+\t\tDR_CALC_SET(fc, ipv6_dst, ipv6_address_95_64, inner);\n+\t}\n+\n+\tif (!is_mem_zero(m->hdr.dst_addr + 8, 4)) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IPV6_DST_63_32, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_ipv6_dst_addr_63_32_set;\n+\t\tDR_CALC_SET(fc, ipv6_dst, ipv6_address_63_32, inner);\n+\t}\n+\n+\tif (!is_mem_zero(m->hdr.dst_addr + 12, 4)) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IPV6_DST_31_0, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_ipv6_dst_addr_31_0_set;\n+\t\tDR_CALC_SET(fc, ipv6_dst, ipv6_address_31_0, inner);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+mlx5dr_definer_conv_item_udp(struct mlx5dr_definer_conv_data *cd,\n+\t\t\t     struct rte_flow_item *item,\n+\t\t\t     int item_idx)\n+{\n+\tconst struct rte_flow_item_udp *m = item->mask;\n+\tstruct mlx5dr_definer_fc *fc;\n+\tbool inner = cd->tunnel;\n+\n+\t/* Set match on L4 type UDP */\n+\tif (!cd->relaxed) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_udp_protocol_set;\n+\t\tfc->tag_mask_set = &mlx5dr_definer_ones_set;\n+\t\tDR_CALC_SET(fc, eth_l2, l4_type_bwc, inner);\n+\t}\n+\n+\tif (!m)\n+\t\treturn 0;\n+\n+\tif (m->hdr.dgram_cksum || m->hdr.dgram_len) {\n+\t\trte_errno = ENOTSUP;\n+\t\treturn rte_errno;\n+\t}\n+\n+\tif (m->hdr.src_port) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(L4_SPORT, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_udp_src_port_set;\n+\t\tDR_CALC_SET(fc, eth_l4, source_port, inner);\n+\t}\n+\n+\tif (m->hdr.dst_port) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(L4_DPORT, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_udp_dst_port_set;\n+\t\tDR_CALC_SET(fc, eth_l4, destination_port, inner);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+mlx5dr_definer_conv_item_tcp(struct mlx5dr_definer_conv_data *cd,\n+\t\t\t     struct rte_flow_item *item,\n+\t\t\t     int item_idx)\n+{\n+\tconst struct rte_flow_item_tcp *m = item->mask;\n+\tstruct mlx5dr_definer_fc *fc;\n+\tbool inner = cd->tunnel;\n+\n+\t/* Overwrite match on L4 type TCP */\n+\tif (!cd->relaxed) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_tcp_protocol_set;\n+\t\tfc->tag_mask_set = &mlx5dr_definer_ones_set;\n+\t\tDR_CALC_SET(fc, eth_l2, l4_type_bwc, inner);\n+\t}\n+\n+\tif (!m)\n+\t\treturn 0;\n+\n+\tif (m->hdr.tcp_flags) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(TCP_FLAGS, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_tcp_flags_set;\n+\t\tDR_CALC_SET(fc, eth_l4, tcp_flags, inner);\n+\t}\n+\n+\tif (m->hdr.src_port) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(L4_SPORT, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_tcp_src_port_set;\n+\t\tDR_CALC_SET(fc, eth_l4, source_port, inner);\n+\t}\n+\n+\tif (m->hdr.dst_port) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(L4_DPORT, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_tcp_dst_port_set;\n+\t\tDR_CALC_SET(fc, eth_l4, destination_port, inner);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+mlx5dr_definer_conv_item_gtp(struct mlx5dr_definer_conv_data *cd,\n+\t\t\t     struct rte_flow_item *item,\n+\t\t\t     int item_idx)\n+{\n+\tconst struct rte_flow_item_gtp *m = item->mask;\n+\tstruct mlx5dr_definer_fc *fc;\n+\n+\t/* Overwrite GTPU dest port if not present */\n+\tfc = &cd->fc[DR_CALC_FNAME(L4_DPORT, false)];\n+\tif (!fc->tag_set && !cd->relaxed) {\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_gtp_udp_port_set;\n+\t\tfc->tag_mask_set = &mlx5dr_definer_ones_set;\n+\t\tDR_CALC_SET(fc, eth_l4, destination_port, false);\n+\t}\n+\n+\tif (!m)\n+\t\treturn 0;\n+\n+\tif (m->msg_len || m->v_pt_rsv_flags & ~MLX5DR_DEFINER_GTP_EXT_HDR_BIT) {\n+\t\trte_errno = ENOTSUP;\n+\t\treturn rte_errno;\n+\t}\n+\n+\tif (m->teid) {\n+\t\tif (!(cd->caps->flex_protocols & MLX5_HCA_FLEX_GTPU_TEID_ENABLED)) {\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\treturn rte_errno;\n+\t\t}\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_TEID];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_gtp_teid_set;\n+\t\tfc->bit_mask = __mlx5_mask(header_gtp, teid);\n+\t\tfc->byte_off = cd->caps->format_select_gtpu_dw_1 * DW_SIZE;\n+\t}\n+\n+\tif (m->v_pt_rsv_flags) {\n+\t\tif (!(cd->caps->flex_protocols & MLX5_HCA_FLEX_GTPU_DW_0_ENABLED)) {\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\treturn rte_errno;\n+\t\t}\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_EXT_FLAG];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_gtp_ext_flag_set;\n+\t\tfc->bit_mask = __mlx5_mask(header_gtp, ext_hdr_flag);\n+\t\tfc->bit_off = __mlx5_dw_bit_off(header_gtp, ext_hdr_flag);\n+\t\tfc->byte_off = cd->caps->format_select_gtpu_dw_0 * DW_SIZE;\n+\t}\n+\n+\n+\tif (m->msg_type) {\n+\t\tif (!(cd->caps->flex_protocols & MLX5_HCA_FLEX_GTPU_DW_0_ENABLED)) {\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\treturn rte_errno;\n+\t\t}\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_MSG_TYPE];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_gtp_msg_type_set;\n+\t\tfc->bit_mask = __mlx5_mask(header_gtp, msg_type);\n+\t\tfc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_type);\n+\t\tfc->byte_off = cd->caps->format_select_gtpu_dw_0 * DW_SIZE;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+mlx5dr_definer_conv_item_gtp_psc(struct mlx5dr_definer_conv_data *cd,\n+\t\t\t\t struct rte_flow_item *item,\n+\t\t\t\t int item_idx)\n+{\n+\tconst struct rte_flow_item_gtp_psc *m = item->mask;\n+\tstruct mlx5dr_definer_fc *fc;\n+\n+\t/* Overwrite GTP extension flag to be 1 */\n+\tif (!cd->relaxed) {\n+\t\tif (!(cd->caps->flex_protocols & MLX5_HCA_FLEX_GTPU_DW_0_ENABLED)) {\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\treturn rte_errno;\n+\t\t}\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_EXT_FLAG];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_ones_set;\n+\t\tfc->bit_mask = __mlx5_mask(header_gtp, ext_hdr_flag);\n+\t\tfc->bit_off = __mlx5_dw_bit_off(header_gtp, ext_hdr_flag);\n+\t\tfc->byte_off = cd->caps->format_select_gtpu_dw_0 * DW_SIZE;\n+\t}\n+\n+\t/* Overwrite next extension header type */\n+\tif (!cd->relaxed) {\n+\t\tif (!(cd->caps->flex_protocols & MLX5_HCA_FLEX_GTPU_DW_2_ENABLED)) {\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\treturn rte_errno;\n+\t\t}\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_NEXT_EXT_HDR];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_gtp_next_ext_hdr_set;\n+\t\tfc->tag_mask_set = &mlx5dr_definer_ones_set;\n+\t\tfc->bit_mask = __mlx5_mask(header_opt_gtp, next_ext_hdr_type);\n+\t\tfc->bit_off = __mlx5_dw_bit_off(header_opt_gtp, next_ext_hdr_type);\n+\t\tfc->byte_off = cd->caps->format_select_gtpu_dw_2 * DW_SIZE;\n+\t}\n+\n+\tif (!m)\n+\t\treturn 0;\n+\n+\tif (m->hdr.type) {\n+\t\tif (!(cd->caps->flex_protocols & MLX5_HCA_FLEX_GTPU_FIRST_EXT_DW_0_ENABLED)) {\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\treturn rte_errno;\n+\t\t}\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_EXT_HDR_PDU];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_gtp_ext_hdr_pdu_set;\n+\t\tfc->bit_mask = __mlx5_mask(header_gtp_psc, pdu_type);\n+\t\tfc->bit_off = __mlx5_dw_bit_off(header_gtp_psc, pdu_type);\n+\t\tfc->byte_off = cd->caps->format_select_gtpu_ext_dw_0 * DW_SIZE;\n+\t}\n+\n+\tif (m->hdr.qfi) {\n+\t\tif (!(cd->caps->flex_protocols & MLX5_HCA_FLEX_GTPU_FIRST_EXT_DW_0_ENABLED)) {\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\treturn rte_errno;\n+\t\t}\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_EXT_HDR_QFI];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_gtp_ext_hdr_qfi_set;\n+\t\tfc->bit_mask = __mlx5_mask(header_gtp_psc, qfi);\n+\t\tfc->bit_off = __mlx5_dw_bit_off(header_gtp_psc, qfi);\n+\t\tfc->byte_off = cd->caps->format_select_gtpu_ext_dw_0 * DW_SIZE;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+mlx5dr_definer_conv_item_port(struct mlx5dr_definer_conv_data *cd,\n+\t\t\t      struct rte_flow_item *item,\n+\t\t\t      int item_idx)\n+{\n+\tconst struct rte_flow_item_ethdev *m = item->mask;\n+\tstruct mlx5dr_definer_fc *fc;\n+\tuint8_t bit_offset = 0;\n+\n+\tif (m->port_id) {\n+\t\tif (!cd->caps->wire_regc_mask) {\n+\t\t\tDR_LOG(ERR, \"Port ID item not supported, missing wire REGC mask\");\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\treturn rte_errno;\n+\t\t}\n+\n+\t\twhile (!(cd->caps->wire_regc_mask & (1 << bit_offset)))\n+\t\t\tbit_offset++;\n+\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_VPORT_REG_C_0];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_vport_set;\n+\t\tfc->tag_mask_set = &mlx5dr_definer_ones_set;\n+\t\tDR_CALC_SET_HDR(fc, registers, register_c_0);\n+\t\tfc->bit_off = bit_offset;\n+\t\tfc->bit_mask = cd->caps->wire_regc_mask >> bit_offset;\n+\t} else {\n+\t\tDR_LOG(ERR, \"Pord ID item mask must specify ID mask\");\n+\t\trte_errno = EINVAL;\n+\t\treturn rte_errno;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+mlx5dr_definer_conv_item_vxlan(struct mlx5dr_definer_conv_data *cd,\n+\t\t\t       struct rte_flow_item *item,\n+\t\t\t       int item_idx)\n+{\n+\tconst struct rte_flow_item_vxlan *m = item->mask;\n+\tstruct mlx5dr_definer_fc *fc;\n+\tbool inner = cd->tunnel;\n+\n+\t/* In order to match on VXLAN we must match on ether_type, ip_protocol\n+\t * and l4_dport.\n+\t */\n+\tif (!cd->relaxed) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];\n+\t\tif (!fc->tag_set) {\n+\t\t\tfc->item_idx = item_idx;\n+\t\t\tfc->tag_mask_set = &mlx5dr_definer_ones_set;\n+\t\t\tfc->tag_set = &mlx5dr_definer_udp_protocol_set;\n+\t\t\tDR_CALC_SET(fc, eth_l2, l4_type_bwc, inner);\n+\t\t}\n+\n+\t\tfc = &cd->fc[DR_CALC_FNAME(L4_DPORT, inner)];\n+\t\tif (!fc->tag_set) {\n+\t\t\tfc->item_idx = item_idx;\n+\t\t\tfc->tag_mask_set = &mlx5dr_definer_ones_set;\n+\t\t\tfc->tag_set = &mlx5dr_definer_vxlan_udp_port_set;\n+\t\t\tDR_CALC_SET(fc, eth_l4, destination_port, inner);\n+\t\t}\n+\t}\n+\n+\tif (!m)\n+\t\treturn 0;\n+\n+\tif (m->flags) {\n+\t\tif (inner) {\n+\t\t\tDR_LOG(ERR, \"Inner VXLAN flags item not supported\");\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\treturn rte_errno;\n+\t\t}\n+\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_FLAGS];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_vxlan_flags_set;\n+\t\tDR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);\n+\t\tfc->bit_mask = __mlx5_mask(header_vxlan, flags);\n+\t\tfc->bit_off = __mlx5_dw_bit_off(header_vxlan, flags);\n+\t}\n+\n+\tif (!is_mem_zero(m->vni, 3)) {\n+\t\tif (inner) {\n+\t\t\tDR_LOG(ERR, \"Inner VXLAN vni item not supported\");\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\treturn rte_errno;\n+\t\t}\n+\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_VNI];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_vxlan_vni_set;\n+\t\tDR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_1);\n+\t\tfc->bit_mask = __mlx5_mask(header_vxlan, vni);\n+\t\tfc->bit_off = __mlx5_dw_bit_off(header_vxlan, vni);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static struct mlx5dr_definer_fc *\n+mlx5dr_definer_get_register_fc(struct mlx5dr_definer_conv_data *cd, int reg)\n+{\n+\tstruct mlx5dr_definer_fc *fc;\n+\n+\tswitch (reg) {\n+\tcase REG_C_0:\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_0];\n+\t\tDR_CALC_SET_HDR(fc, registers, register_c_0);\n+\t\tbreak;\n+\tcase REG_C_1:\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_1];\n+\t\tDR_CALC_SET_HDR(fc, registers, register_c_1);\n+\t\tbreak;\n+\tcase REG_C_2:\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_2];\n+\t\tDR_CALC_SET_HDR(fc, registers, register_c_2);\n+\t\tbreak;\n+\tcase REG_C_3:\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_3];\n+\t\tDR_CALC_SET_HDR(fc, registers, register_c_3);\n+\t\tbreak;\n+\tcase REG_C_4:\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_4];\n+\t\tDR_CALC_SET_HDR(fc, registers, register_c_4);\n+\t\tbreak;\n+\tcase REG_C_5:\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_5];\n+\t\tDR_CALC_SET_HDR(fc, registers, register_c_5);\n+\t\tbreak;\n+\tcase REG_C_6:\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_6];\n+\t\tDR_CALC_SET_HDR(fc, registers, register_c_6);\n+\t\tbreak;\n+\tcase REG_C_7:\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_7];\n+\t\tDR_CALC_SET_HDR(fc, registers, register_c_7);\n+\t\tbreak;\n+\tcase REG_A:\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_A];\n+\t\tDR_CALC_SET_HDR(fc, metadata, general_purpose);\n+\t\tbreak;\n+\tcase REG_B:\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_B];\n+\t\tDR_CALC_SET_HDR(fc, metadata, metadata_to_cqe);\n+\t\tbreak;\n+\tdefault:\n+\t\trte_errno = ENOTSUP;\n+\t\treturn NULL;\n+\t}\n+\n+\treturn fc;\n+}\n+\n+static int\n+mlx5dr_definer_conv_item_tag(struct mlx5dr_definer_conv_data *cd,\n+\t\t\t     struct rte_flow_item *item,\n+\t\t\t     int item_idx)\n+{\n+\tconst struct rte_flow_item_tag *m = item->mask;\n+\tconst struct rte_flow_item_tag *v = item->spec;\n+\tstruct mlx5dr_definer_fc *fc;\n+\tint reg;\n+\n+\tif (!m || !v)\n+\t\treturn 0;\n+\n+\tif (item->type == RTE_FLOW_ITEM_TYPE_TAG)\n+\t\treg = flow_hw_get_reg_id(RTE_FLOW_ITEM_TYPE_TAG, v->index);\n+\telse\n+\t\treg = (int)v->index;\n+\n+\tif (reg <= 0) {\n+\t\tDR_LOG(ERR, \"Invalid register for item tag\");\n+\t\trte_errno = EINVAL;\n+\t\treturn rte_errno;\n+\t}\n+\n+\tfc = mlx5dr_definer_get_register_fc(cd, reg);\n+\tif (!fc)\n+\t\treturn rte_errno;\n+\n+\tfc->item_idx = item_idx;\n+\tfc->tag_set = &mlx5dr_definer_tag_set;\n+\treturn 0;\n+}\n+\n+static int\n+mlx5dr_definer_conv_item_metadata(struct mlx5dr_definer_conv_data *cd,\n+\t\t\t\t  struct rte_flow_item *item,\n+\t\t\t\t  int item_idx)\n+{\n+\tconst struct rte_flow_item_meta *m = item->mask;\n+\tstruct mlx5dr_definer_fc *fc;\n+\tint reg;\n+\n+\tif (!m)\n+\t\treturn 0;\n+\n+\treg = flow_hw_get_reg_id(RTE_FLOW_ITEM_TYPE_META, -1);\n+\tif (reg <= 0) {\n+\t\tDR_LOG(ERR, \"Invalid register for item metadata\");\n+\t\trte_errno = EINVAL;\n+\t\treturn rte_errno;\n+\t}\n+\n+\tfc = mlx5dr_definer_get_register_fc(cd, reg);\n+\tif (!fc)\n+\t\treturn rte_errno;\n+\n+\tfc->item_idx = item_idx;\n+\tfc->tag_set = &mlx5dr_definer_metadata_set;\n+\treturn 0;\n+}\n+\n+static int\n+mlx5dr_definer_conv_item_sq(struct mlx5dr_definer_conv_data *cd,\n+\t\t\t    struct rte_flow_item *item,\n+\t\t\t    int item_idx)\n+{\n+\tconst struct mlx5_rte_flow_item_sq *m = item->mask;\n+\tstruct mlx5dr_definer_fc *fc;\n+\n+\tif (!m)\n+\t\treturn 0;\n+\n+\tif (m->queue) {\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_SOURCE_QP];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_mask_set = &mlx5dr_definer_ones_set;\n+\t\tfc->tag_set = &mlx5dr_definer_source_qp_set;\n+\t\tDR_CALC_SET_HDR(fc, source_qp_gvmi, source_qp);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+mlx5dr_definer_conv_item_gre(struct mlx5dr_definer_conv_data *cd,\n+\t\t\t     struct rte_flow_item *item,\n+\t\t\t     int item_idx)\n+{\n+\tconst struct rte_flow_item_gre *m = item->mask;\n+\tstruct mlx5dr_definer_fc *fc;\n+\tbool inner = cd->tunnel;\n+\n+\tif (inner) {\n+\t\tDR_LOG(ERR, \"Inner GRE item not supported\");\n+\t\trte_errno = ENOTSUP;\n+\t\treturn rte_errno;\n+\t}\n+\n+\tif (!cd->relaxed) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_mask_set = &mlx5dr_definer_ones_set;\n+\t\tfc->tag_set = &mlx5dr_definer_ipv4_protocol_gre_set;\n+\t\tDR_CALC_SET(fc, eth_l3, protocol_next_header, inner);\n+\t}\n+\n+\tif (!m)\n+\t\treturn 0;\n+\n+\tif (m->c_rsvd0_ver) {\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_C_VER];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_gre_c_ver_set;\n+\t\tDR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);\n+\t\tfc->bit_mask = __mlx5_mask(header_gre, c_rsvd0_ver);\n+\t\tfc->bit_off = __mlx5_dw_bit_off(header_gre, c_rsvd0_ver);\n+\t}\n+\n+\tif (m->protocol) {\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_PROTOCOL];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_gre_protocol_type_set;\n+\t\tDR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);\n+\t\tfc->byte_off += MLX5_BYTE_OFF(header_gre, gre_protocol);\n+\t\tfc->bit_mask = __mlx5_mask(header_gre, gre_protocol);\n+\t\tfc->bit_off = __mlx5_dw_bit_off(header_gre, gre_protocol);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+mlx5dr_definer_conv_item_gre_opt(struct mlx5dr_definer_conv_data *cd,\n+\t\t\t\t struct rte_flow_item *item,\n+\t\t\t\t int item_idx)\n+{\n+\tconst struct rte_flow_item_gre_opt *m = item->mask;\n+\tstruct mlx5dr_definer_fc *fc;\n+\n+\tif (!cd->relaxed) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, false)];\n+\t\tif (!fc->tag_set) {\n+\t\t\tfc->item_idx = item_idx;\n+\t\t\tfc->tag_mask_set = &mlx5dr_definer_ones_set;\n+\t\t\tfc->tag_set = &mlx5dr_definer_ipv4_protocol_gre_set;\n+\t\t\tDR_CALC_SET(fc, eth_l3, protocol_next_header, false);\n+\t\t}\n+\t}\n+\n+\tif (!m)\n+\t\treturn 0;\n+\n+\tif (m->checksum_rsvd.checksum) {\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_OPT_CHECKSUM];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_gre_opt_checksum_set;\n+\t\tDR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_1);\n+\t}\n+\n+\tif (m->key.key) {\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_OPT_KEY];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_gre_opt_key_set;\n+\t\tDR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_2);\n+\t}\n+\n+\tif (m->sequence.sequence) {\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_OPT_SEQ];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_gre_opt_seq_set;\n+\t\tDR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_3);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+mlx5dr_definer_conv_item_gre_key(struct mlx5dr_definer_conv_data *cd,\n+\t\t\t\t struct rte_flow_item *item,\n+\t\t\t\t int item_idx)\n+{\n+\tconst rte_be32_t *m = item->mask;\n+\tstruct mlx5dr_definer_fc *fc;\n+\n+\tif (!cd->relaxed) {\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_KEY_PRESENT];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_ones_set;\n+\t\tDR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);\n+\t\tfc->bit_mask = __mlx5_mask(header_gre, gre_k_present);\n+\t\tfc->bit_off = __mlx5_dw_bit_off(header_gre, gre_k_present);\n+\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, false)];\n+\t\tif (!fc->tag_set) {\n+\t\t\tfc->item_idx = item_idx;\n+\t\t\tfc->tag_mask_set = &mlx5dr_definer_ones_set;\n+\t\t\tfc->tag_set = &mlx5dr_definer_ipv4_protocol_gre_set;\n+\t\t\tDR_CALC_SET(fc, eth_l3, protocol_next_header, false);\n+\t\t}\n+\t}\n+\n+\tif (!m)\n+\t\treturn 0;\n+\n+\tif (*m) {\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_OPT_KEY];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_gre_key_set;\n+\t\tDR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_2);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+mlx5dr_definer_conv_item_integrity(struct mlx5dr_definer_conv_data *cd,\n+\t\t\t\t   struct rte_flow_item *item,\n+\t\t\t\t   int item_idx)\n+{\n+\tconst struct rte_flow_item_integrity *m = item->mask;\n+\tstruct mlx5dr_definer_fc *fc;\n+\tbool inner = cd->tunnel;\n+\n+\tif (!m)\n+\t\treturn 0;\n+\n+\tif (m->packet_ok || m->l2_ok || m->l2_crc_ok || m->l3_len_ok) {\n+\t\trte_errno = ENOTSUP;\n+\t\treturn rte_errno;\n+\t}\n+\n+\tif (m->l3_ok || m->ipv4_csum_ok || m->l4_ok || m->l4_csum_ok) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(INTEGRITY, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_integrity_set;\n+\t\tDR_CALC_SET_HDR(fc, oks1, oks1_bits);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+mlx5dr_definer_conv_item_conntrack(struct mlx5dr_definer_conv_data *cd,\n+\t\t\t\t   struct rte_flow_item *item,\n+\t\t\t\t   int item_idx)\n+{\n+\tconst struct rte_flow_item_conntrack *m = item->mask;\n+\tstruct mlx5dr_definer_fc *fc;\n+\tint reg;\n+\n+\tif (!m)\n+\t\treturn 0;\n+\n+\treg = flow_hw_get_reg_id(RTE_FLOW_ITEM_TYPE_CONNTRACK, -1);\n+\tif (reg <= 0) {\n+\t\tDR_LOG(ERR, \"Invalid register for item conntrack\");\n+\t\trte_errno = EINVAL;\n+\t\treturn rte_errno;\n+\t}\n+\n+\tfc = mlx5dr_definer_get_register_fc(cd, reg);\n+\tif (!fc)\n+\t\treturn rte_errno;\n+\n+\tfc->item_idx = item_idx;\n+\tfc->tag_mask_set = &mlx5dr_definer_conntrack_mask;\n+\tfc->tag_set = &mlx5dr_definer_conntrack_tag;\n+\n+\treturn 0;\n+}\n+\n+static int\n+mlx5dr_definer_conv_item_icmp(struct mlx5dr_definer_conv_data *cd,\n+\t\t\t      struct rte_flow_item *item,\n+\t\t\t      int item_idx)\n+{\n+\tconst struct rte_flow_item_icmp *m = item->mask;\n+\tstruct mlx5dr_definer_fc *fc;\n+\tbool inner = cd->tunnel;\n+\n+\t/* Overwrite match on L4 type ICMP */\n+\tif (!cd->relaxed) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_icmp_protocol_set;\n+\t\tfc->tag_mask_set = &mlx5dr_definer_ones_set;\n+\t\tDR_CALC_SET(fc, eth_l2, l4_type, inner);\n+\t}\n+\n+\tif (!m)\n+\t\treturn 0;\n+\n+\tif (m->hdr.icmp_type || m->hdr.icmp_code || m->hdr.icmp_cksum) {\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_ICMP_DW1];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_icmp_dw1_set;\n+\t\tDR_CALC_SET_HDR(fc, tcp_icmp, icmp_dw1);\n+\t}\n+\n+\tif (m->hdr.icmp_ident || m->hdr.icmp_seq_nb) {\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_ICMP_DW2];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_icmp_dw2_set;\n+\t\tDR_CALC_SET_HDR(fc, tcp_icmp, icmp_dw2);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+mlx5dr_definer_conv_item_icmp6(struct mlx5dr_definer_conv_data *cd,\n+\t\t\t       struct rte_flow_item *item,\n+\t\t\t       int item_idx)\n+{\n+\tconst struct rte_flow_item_icmp6 *m = item->mask;\n+\tstruct mlx5dr_definer_fc *fc;\n+\tbool inner = cd->tunnel;\n+\n+\t/* Overwrite match on L4 type ICMP6 */\n+\tif (!cd->relaxed) {\n+\t\tfc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_icmp_protocol_set;\n+\t\tfc->tag_mask_set = &mlx5dr_definer_ones_set;\n+\t\tDR_CALC_SET(fc, eth_l2, l4_type, inner);\n+\t}\n+\n+\tif (!m)\n+\t\treturn 0;\n+\n+\tif (m->type || m->code || m->checksum) {\n+\t\tfc = &cd->fc[MLX5DR_DEFINER_FNAME_ICMP_DW1];\n+\t\tfc->item_idx = item_idx;\n+\t\tfc->tag_set = &mlx5dr_definer_icmp6_dw1_set;\n+\t\tDR_CALC_SET_HDR(fc, tcp_icmp, icmp_dw1);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+mlx5dr_definer_conv_item_meter_color(struct mlx5dr_definer_conv_data *cd,\n+\t\t\t     struct rte_flow_item *item,\n+\t\t\t     int item_idx)\n+{\n+\tconst struct rte_flow_item_meter_color *m = item->mask;\n+\tstruct mlx5dr_definer_fc *fc;\n+\tint reg;\n+\n+\tif (!m)\n+\t\treturn 0;\n+\n+\treg = flow_hw_get_reg_id(RTE_FLOW_ITEM_TYPE_METER_COLOR, 0);\n+\tMLX5_ASSERT(reg > 0);\n+\n+\tfc = mlx5dr_definer_get_register_fc(cd, reg);\n+\tif (!fc)\n+\t\treturn rte_errno;\n+\n+\tfc->item_idx = item_idx;\n+\tfc->tag_set = &mlx5dr_definer_meter_color_set;\n+\treturn 0;\n+}\n+\n+static int\n+mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,\n+\t\t\t\tstruct mlx5dr_match_template *mt,\n+\t\t\t\tuint8_t *hl)\n+{\n+\tstruct mlx5dr_definer_fc fc[MLX5DR_DEFINER_FNAME_MAX] = {{0}};\n+\tstruct mlx5dr_definer_conv_data cd = {0};\n+\tstruct rte_flow_item *items = mt->items;\n+\tuint64_t item_flags = 0;\n+\tuint32_t total = 0;\n+\tint i, j;\n+\tint ret;\n+\n+\tcd.fc = fc;\n+\tcd.hl = hl;\n+\tcd.caps = ctx->caps;\n+\tcd.relaxed = mt->flags & MLX5DR_MATCH_TEMPLATE_FLAG_RELAXED_MATCH;\n+\n+\t/* Collect all RTE fields to the field array and set header layout */\n+\tfor (i = 0; items->type != RTE_FLOW_ITEM_TYPE_END; i++, items++) {\n+\t\tcd.tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);\n+\n+\t\tswitch ((int)items->type) {\n+\t\tcase RTE_FLOW_ITEM_TYPE_ETH:\n+\t\t\tret = mlx5dr_definer_conv_item_eth(&cd, items, i);\n+\t\t\titem_flags |= cd.tunnel ? MLX5_FLOW_LAYER_INNER_L2 :\n+\t\t\t\t\t\t  MLX5_FLOW_LAYER_OUTER_L2;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_VLAN:\n+\t\t\tret = mlx5dr_definer_conv_item_vlan(&cd, items, i);\n+\t\t\titem_flags |= cd.tunnel ?\n+\t\t\t\t(MLX5_FLOW_LAYER_INNER_VLAN | MLX5_FLOW_LAYER_INNER_L2) :\n+\t\t\t\t(MLX5_FLOW_LAYER_OUTER_VLAN | MLX5_FLOW_LAYER_OUTER_L2);\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_IPV4:\n+\t\t\tret = mlx5dr_definer_conv_item_ipv4(&cd, items, i);\n+\t\t\titem_flags |= cd.tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :\n+\t\t\t\t\t\t  MLX5_FLOW_LAYER_OUTER_L3_IPV4;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_IPV6:\n+\t\t\tret = mlx5dr_definer_conv_item_ipv6(&cd, items, i);\n+\t\t\titem_flags |= cd.tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :\n+\t\t\t\t\t\t  MLX5_FLOW_LAYER_OUTER_L3_IPV6;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_UDP:\n+\t\t\tret = mlx5dr_definer_conv_item_udp(&cd, items, i);\n+\t\t\titem_flags |= cd.tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :\n+\t\t\t\t\t\t  MLX5_FLOW_LAYER_OUTER_L4_UDP;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_TCP:\n+\t\t\tret = mlx5dr_definer_conv_item_tcp(&cd, items, i);\n+\t\t\titem_flags |= cd.tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :\n+\t\t\t\t\t\t  MLX5_FLOW_LAYER_OUTER_L4_TCP;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_GTP:\n+\t\t\tret = mlx5dr_definer_conv_item_gtp(&cd, items, i);\n+\t\t\titem_flags |= MLX5_FLOW_LAYER_GTP;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_GTP_PSC:\n+\t\t\tret = mlx5dr_definer_conv_item_gtp_psc(&cd, items, i);\n+\t\t\titem_flags |= MLX5_FLOW_LAYER_GTP_PSC;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:\n+\t\t\tret = mlx5dr_definer_conv_item_port(&cd, items, i);\n+\t\t\titem_flags |= MLX5_FLOW_ITEM_REPRESENTED_PORT;\n+\t\t\tmt->vport_item_id = i;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_VXLAN:\n+\t\t\tret = mlx5dr_definer_conv_item_vxlan(&cd, items, i);\n+\t\t\titem_flags |= MLX5_FLOW_LAYER_VXLAN;\n+\t\t\tbreak;\n+\t\tcase MLX5_RTE_FLOW_ITEM_TYPE_SQ:\n+\t\t\tret = mlx5dr_definer_conv_item_sq(&cd, items, i);\n+\t\t\titem_flags |= MLX5_FLOW_ITEM_SQ;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_TAG:\n+\t\tcase MLX5_RTE_FLOW_ITEM_TYPE_TAG:\n+\t\t\tret = mlx5dr_definer_conv_item_tag(&cd, items, i);\n+\t\t\titem_flags |= MLX5_FLOW_ITEM_TAG;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_META:\n+\t\t\tret = mlx5dr_definer_conv_item_metadata(&cd, items, i);\n+\t\t\titem_flags |= MLX5_FLOW_ITEM_METADATA;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_GRE:\n+\t\t\tret = mlx5dr_definer_conv_item_gre(&cd, items, i);\n+\t\t\titem_flags |= MLX5_FLOW_LAYER_GRE;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_GRE_OPTION:\n+\t\t\tret = mlx5dr_definer_conv_item_gre_opt(&cd, items, i);\n+\t\t\titem_flags |= MLX5_FLOW_LAYER_GRE;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_GRE_KEY:\n+\t\t\tret = mlx5dr_definer_conv_item_gre_key(&cd, items, i);\n+\t\t\titem_flags |= MLX5_FLOW_LAYER_GRE_KEY;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_INTEGRITY:\n+\t\t\tret = mlx5dr_definer_conv_item_integrity(&cd, items, i);\n+\t\t\titem_flags |= cd.tunnel ? MLX5_FLOW_ITEM_INNER_INTEGRITY :\n+\t\t\t\t\t\t  MLX5_FLOW_ITEM_OUTER_INTEGRITY;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_CONNTRACK:\n+\t\t\tret = mlx5dr_definer_conv_item_conntrack(&cd, items, i);\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_ICMP:\n+\t\t\tret = mlx5dr_definer_conv_item_icmp(&cd, items, i);\n+\t\t\titem_flags |= MLX5_FLOW_LAYER_ICMP;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_ICMP6:\n+\t\t\tret = mlx5dr_definer_conv_item_icmp6(&cd, items, i);\n+\t\t\titem_flags |= MLX5_FLOW_LAYER_ICMP6;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_METER_COLOR:\n+\t\t\tret = mlx5dr_definer_conv_item_meter_color(&cd, items, i);\n+\t\t\titem_flags |= MLX5_FLOW_ITEM_METER_COLOR;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tDR_LOG(ERR, \"Unsupported item type %d\", items->type);\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\treturn rte_errno;\n+\t\t}\n+\n+\t\tif (ret) {\n+\t\t\tDR_LOG(ERR, \"Failed processing item type: %d\", items->type);\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n+\tmt->item_flags = item_flags;\n+\n+\t/* Fill in headers layout and calculate total number of fields  */\n+\tfor (i = 0; i < MLX5DR_DEFINER_FNAME_MAX; i++) {\n+\t\tif (fc[i].tag_set) {\n+\t\t\ttotal++;\n+\t\t\tDR_SET(hl, -1, fc[i].byte_off, fc[i].bit_off, fc[i].bit_mask);\n+\t\t}\n+\t}\n+\n+\tmt->fc_sz = total;\n+\tmt->fc = simple_calloc(total, sizeof(*mt->fc));\n+\tif (!mt->fc) {\n+\t\tDR_LOG(ERR, \"Failed to allocate field copy array\");\n+\t\trte_errno = ENOMEM;\n+\t\treturn rte_errno;\n+\t}\n+\n+\tj = 0;\n+\tfor (i = 0; i < MLX5DR_DEFINER_FNAME_MAX; i++) {\n+\t\tif (fc[i].tag_set) {\n+\t\t\tmemcpy(&mt->fc[j], &fc[i], sizeof(*mt->fc));\n+\t\t\tmt->fc[j].fname = i;\n+\t\t\tj++;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+mlx5dr_definer_find_byte_in_tag(struct mlx5dr_definer *definer,\n+\t\t\t\tuint32_t hl_byte_off,\n+\t\t\t\tuint32_t *tag_byte_off)\n+{\n+\tuint8_t byte_offset;\n+\tint i;\n+\n+\t/* Add offset since each DW covers multiple BYTEs */\n+\tbyte_offset = hl_byte_off % DW_SIZE;\n+\tfor (i = 0; i < DW_SELECTORS; i++) {\n+\t\tif (definer->dw_selector[i] == hl_byte_off / DW_SIZE) {\n+\t\t\t*tag_byte_off = byte_offset + DW_SIZE * (DW_SELECTORS - i - 1);\n+\t\t\treturn 0;\n+\t\t}\n+\t}\n+\n+\t/* Add offset to skip DWs in definer */\n+\tbyte_offset = DW_SIZE * DW_SELECTORS;\n+\t/* Iterate in reverse since the code uses bytes from 7 -> 0 */\n+\tfor (i = BYTE_SELECTORS; i-- > 0 ;) {\n+\t\tif (definer->byte_selector[i] == hl_byte_off) {\n+\t\t\t*tag_byte_off = byte_offset + (BYTE_SELECTORS - i - 1);\n+\t\t\treturn 0;\n+\t\t}\n+\t}\n+\n+\t/* The hl byte offset must be part of the definer */\n+\tDR_LOG(INFO, \"Failed to map to definer, HL byte [%d] not found\", byte_offset);\n+\trte_errno = EINVAL;\n+\treturn rte_errno;\n+}\n+\n+static int\n+mlx5dr_definer_fc_bind(struct mlx5dr_definer *definer,\n+\t\t       struct mlx5dr_definer_fc *fc,\n+\t\t       uint32_t fc_sz)\n+{\n+\tuint32_t tag_offset = 0;\n+\tint ret, byte_diff;\n+\tuint32_t i;\n+\n+\tfor (i = 0; i < fc_sz; i++) {\n+\t\t/* Map header layout byte offset to byte offset in tag */\n+\t\tret = mlx5dr_definer_find_byte_in_tag(definer, fc->byte_off, &tag_offset);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\n+\t\t/* Move setter based on the location in the definer */\n+\t\tbyte_diff = fc->byte_off % DW_SIZE - tag_offset % DW_SIZE;\n+\t\tfc->bit_off = fc->bit_off + byte_diff * BITS_IN_BYTE;\n+\n+\t\t/* Update offset in headers layout to offset in tag */\n+\t\tfc->byte_off = tag_offset;\n+\t\tfc++;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static bool\n+mlx5dr_definer_best_hl_fit_recu(struct mlx5dr_definer_sel_ctrl *ctrl,\n+\t\t\t\tuint32_t cur_dw,\n+\t\t\t\tuint32_t *data)\n+{\n+\tuint8_t bytes_set;\n+\tint byte_idx;\n+\tbool ret;\n+\tint i;\n+\n+\t/* Reached end, nothing left to do */\n+\tif (cur_dw == MLX5_ST_SZ_DW(definer_hl))\n+\t\treturn true;\n+\n+\t/* No data set, can skip to next DW */\n+\twhile (!*data) {\n+\t\tcur_dw++;\n+\t\tdata++;\n+\n+\t\t/* Reached end, nothing left to do */\n+\t\tif (cur_dw == MLX5_ST_SZ_DW(definer_hl))\n+\t\t\treturn true;\n+\t}\n+\n+\t/* Used all DW selectors and Byte selectors, no possible solution */\n+\tif (ctrl->allowed_full_dw == ctrl->used_full_dw &&\n+\t    ctrl->allowed_lim_dw == ctrl->used_lim_dw &&\n+\t    ctrl->allowed_bytes == ctrl->used_bytes)\n+\t\treturn false;\n+\n+\t/* Try to use limited DW selectors */\n+\tif (ctrl->allowed_lim_dw > ctrl->used_lim_dw && cur_dw < 64) {\n+\t\tctrl->lim_dw_selector[ctrl->used_lim_dw++] = cur_dw;\n+\n+\t\tret = mlx5dr_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\n+\t\tctrl->lim_dw_selector[--ctrl->used_lim_dw] = 0;\n+\t}\n+\n+\t/* Try to use DW selectors */\n+\tif (ctrl->allowed_full_dw > ctrl->used_full_dw) {\n+\t\tctrl->full_dw_selector[ctrl->used_full_dw++] = cur_dw;\n+\n+\t\tret = mlx5dr_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\n+\t\tctrl->full_dw_selector[--ctrl->used_full_dw] = 0;\n+\t}\n+\n+\t/* No byte selector for offset bigger than 255 */\n+\tif (cur_dw * DW_SIZE > 255)\n+\t\treturn false;\n+\n+\tbytes_set = !!(0x000000ff & *data) +\n+\t\t    !!(0x0000ff00 & *data) +\n+\t\t    !!(0x00ff0000 & *data) +\n+\t\t    !!(0xff000000 & *data);\n+\n+\t/* Check if there are enough byte selectors left */\n+\tif (bytes_set + ctrl->used_bytes > ctrl->allowed_bytes)\n+\t\treturn false;\n+\n+\t/* Try to use Byte selectors */\n+\tfor (i = 0; i < DW_SIZE; i++)\n+\t\tif ((0xff000000 >> (i * BITS_IN_BYTE)) & rte_be_to_cpu_32(*data)) {\n+\t\t\t/* Use byte selectors high to low */\n+\t\t\tbyte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;\n+\t\t\tctrl->byte_selector[byte_idx] = cur_dw * DW_SIZE + i;\n+\t\t\tctrl->used_bytes++;\n+\t\t}\n+\n+\tret = mlx5dr_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tfor (i = 0; i < DW_SIZE; i++)\n+\t\tif ((0xff << (i * BITS_IN_BYTE)) & rte_be_to_cpu_32(*data)) {\n+\t\t\tctrl->used_bytes--;\n+\t\t\tbyte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;\n+\t\t\tctrl->byte_selector[byte_idx] = 0;\n+\t\t}\n+\n+\treturn false;\n+}\n+\n+static void\n+mlx5dr_definer_apply_sel_ctrl(struct mlx5dr_definer_sel_ctrl *ctrl,\n+\t\t\t      struct mlx5dr_definer *definer)\n+{\n+\tmemcpy(definer->byte_selector, ctrl->byte_selector, ctrl->allowed_bytes);\n+\tmemcpy(definer->dw_selector, ctrl->full_dw_selector, ctrl->allowed_full_dw);\n+\tmemcpy(definer->dw_selector + ctrl->allowed_full_dw,\n+\t       ctrl->lim_dw_selector, ctrl->allowed_lim_dw);\n+}\n+\n+static int\n+mlx5dr_definer_find_best_hl_fit(struct mlx5dr_context *ctx,\n+\t\t\t\tstruct mlx5dr_match_template *mt,\n+\t\t\t\tuint8_t *hl)\n+{\n+\tstruct mlx5dr_definer_sel_ctrl ctrl = {0};\n+\tbool found;\n+\n+\t/* Try to create a match definer */\n+\tctrl.allowed_full_dw = DW_SELECTORS_MATCH;\n+\tctrl.allowed_lim_dw = 0;\n+\tctrl.allowed_bytes = BYTE_SELECTORS;\n+\n+\tfound = mlx5dr_definer_best_hl_fit_recu(&ctrl, 0, (uint32_t *)hl);\n+\tif (found) {\n+\t\tmlx5dr_definer_apply_sel_ctrl(&ctrl, mt->definer);\n+\t\tmt->definer->type = MLX5DR_DEFINER_TYPE_MATCH;\n+\t\treturn 0;\n+\t}\n+\n+\t/* Try to create a full/limited jumbo definer */\n+\tctrl.allowed_full_dw = ctx->caps->full_dw_jumbo_support ? DW_SELECTORS :\n+\t\t\t\t\t\t\t\t  DW_SELECTORS_MATCH;\n+\tctrl.allowed_lim_dw = ctx->caps->full_dw_jumbo_support ? 0 :\n+\t\t\t\t\t\t\t\t DW_SELECTORS_LIMITED;\n+\tctrl.allowed_bytes = BYTE_SELECTORS;\n+\n+\tfound = mlx5dr_definer_best_hl_fit_recu(&ctrl, 0, (uint32_t *)hl);\n+\tif (found) {\n+\t\tmlx5dr_definer_apply_sel_ctrl(&ctrl, mt->definer);\n+\t\tmt->definer->type = MLX5DR_DEFINER_TYPE_JUMBO;\n+\t\treturn 0;\n+\t}\n+\n+\tDR_LOG(ERR, \"Unable to find supporting match/jumbo definer combination\");\n+\trte_errno = ENOTSUP;\n+\treturn rte_errno;\n+}\n+\n+static void\n+mlx5dr_definer_create_tag_mask(struct rte_flow_item *items,\n+\t\t\t       struct mlx5dr_definer_fc *fc,\n+\t\t\t       uint32_t fc_sz,\n+\t\t\t       uint8_t *tag)\n+{\n+\tuint32_t i;\n+\n+\tfor (i = 0; i < fc_sz; i++) {\n+\t\tif (fc->tag_mask_set)\n+\t\t\tfc->tag_mask_set(fc, items[fc->item_idx].mask, tag);\n+\t\telse\n+\t\t\tfc->tag_set(fc, items[fc->item_idx].mask, tag);\n+\t\tfc++;\n+\t}\n+}\n+\n+void mlx5dr_definer_create_tag(const struct rte_flow_item *items,\n+\t\t\t       struct mlx5dr_definer_fc *fc,\n+\t\t\t       uint32_t fc_sz,\n+\t\t\t       uint8_t *tag)\n+{\n+\tuint32_t i;\n+\n+\tfor (i = 0; i < fc_sz; i++) {\n+\t\tfc->tag_set(fc, items[fc->item_idx].spec, tag);\n+\t\tfc++;\n+\t}\n+}\n+\n+int mlx5dr_definer_get_id(struct mlx5dr_definer *definer)\n+{\n+\treturn definer->obj->id;\n+}\n+\n+int mlx5dr_definer_compare(struct mlx5dr_definer *definer_a,\n+\t\t\t   struct mlx5dr_definer *definer_b)\n+{\n+\tint i;\n+\n+\tif (definer_a->type != definer_b->type)\n+\t\treturn 1;\n+\n+\tfor (i = 0; i < BYTE_SELECTORS; i++)\n+\t\tif (definer_a->byte_selector[i] != definer_b->byte_selector[i])\n+\t\t\treturn 1;\n+\n+\tfor (i = 0; i < DW_SELECTORS; i++)\n+\t\tif (definer_a->dw_selector[i] != definer_b->dw_selector[i])\n+\t\t\treturn 1;\n+\n+\tfor (i = 0; i < MLX5DR_JUMBO_TAG_SZ; i++)\n+\t\tif (definer_a->mask.jumbo[i] != definer_b->mask.jumbo[i])\n+\t\t\treturn 1;\n+\n+\treturn 0;\n+}\n+\n+int mlx5dr_definer_get(struct mlx5dr_context *ctx,\n+\t\t       struct mlx5dr_match_template *mt)\n+{\n+\tstruct mlx5dr_cmd_definer_create_attr def_attr = {0};\n+\tstruct ibv_context *ibv_ctx = ctx->ibv_ctx;\n+\tuint8_t *hl;\n+\tint ret;\n+\n+\tif (mt->refcount++)\n+\t\treturn 0;\n+\n+\tmt->definer = simple_calloc(1, sizeof(*mt->definer));\n+\tif (!mt->definer) {\n+\t\tDR_LOG(ERR, \"Failed to allocate memory for definer\");\n+\t\trte_errno = ENOMEM;\n+\t\tgoto dec_refcount;\n+\t}\n+\n+\t/* Header layout (hl) holds full bit mask per field */\n+\thl = simple_calloc(1, MLX5_ST_SZ_BYTES(definer_hl));\n+\tif (!hl) {\n+\t\tDR_LOG(ERR, \"Failed to allocate memory for header layout\");\n+\t\trte_errno = ENOMEM;\n+\t\tgoto free_definer;\n+\t}\n+\n+\t/* Convert items to hl and allocate the field copy array (fc) */\n+\tret = mlx5dr_definer_conv_items_to_hl(ctx, mt, hl);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed to convert items to hl\");\n+\t\tgoto free_hl;\n+\t}\n+\n+\t/* Find the definer for given header layout */\n+\tret = mlx5dr_definer_find_best_hl_fit(ctx, mt, hl);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed to create definer from header layout\");\n+\t\tgoto free_field_copy;\n+\t}\n+\n+\t/* Align field copy array based on the new definer */\n+\tret = mlx5dr_definer_fc_bind(mt->definer,\n+\t\t\t\t     mt->fc,\n+\t\t\t\t     mt->fc_sz);\n+\tif (ret) {\n+\t\tDR_LOG(ERR, \"Failed to bind field copy to definer\");\n+\t\tgoto free_field_copy;\n+\t}\n+\n+\t/* Create the tag mask used for definer creation */\n+\tmlx5dr_definer_create_tag_mask(mt->items,\n+\t\t\t\t       mt->fc,\n+\t\t\t\t       mt->fc_sz,\n+\t\t\t\t       mt->definer->mask.jumbo);\n+\n+\t/* Create definer based on the bitmask tag */\n+\tdef_attr.match_mask = mt->definer->mask.jumbo;\n+\tdef_attr.dw_selector = mt->definer->dw_selector;\n+\tdef_attr.byte_selector = mt->definer->byte_selector;\n+\tmt->definer->obj = mlx5dr_cmd_definer_create(ibv_ctx, &def_attr);\n+\tif (!mt->definer->obj)\n+\t\tgoto free_field_copy;\n+\n+\tsimple_free(hl);\n+\n+\treturn 0;\n+\n+free_field_copy:\n+\tsimple_free(mt->fc);\n+free_hl:\n+\tsimple_free(hl);\n+free_definer:\n+\tsimple_free(mt->definer);\n+dec_refcount:\n+\tmt->refcount--;\n+\n+\treturn rte_errno;\n+}\n+\n+void mlx5dr_definer_put(struct mlx5dr_match_template *mt)\n+{\n+\tif (--mt->refcount)\n+\t\treturn;\n+\n+\tsimple_free(mt->fc);\n+\tmlx5dr_cmd_destroy_obj(mt->definer->obj);\n+\tsimple_free(mt->definer);\n+}\ndiff --git a/drivers/net/mlx5/hws/mlx5dr_definer.h b/drivers/net/mlx5/hws/mlx5dr_definer.h\nnew file mode 100644\nindex 0000000000..d52c6b0627\n--- /dev/null\n+++ b/drivers/net/mlx5/hws/mlx5dr_definer.h\n@@ -0,0 +1,585 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates\n+ */\n+\n+#ifndef MLX5DR_DEFINER_H_\n+#define MLX5DR_DEFINER_H_\n+\n+/* Selectors based on match TAG */\n+#define DW_SELECTORS_MATCH 6\n+#define DW_SELECTORS_LIMITED 3\n+#define DW_SELECTORS 9\n+#define BYTE_SELECTORS 8\n+\n+enum mlx5dr_definer_fname {\n+\tMLX5DR_DEFINER_FNAME_ETH_SMAC_48_16_O,\n+\tMLX5DR_DEFINER_FNAME_ETH_SMAC_48_16_I,\n+\tMLX5DR_DEFINER_FNAME_ETH_SMAC_15_0_O,\n+\tMLX5DR_DEFINER_FNAME_ETH_SMAC_15_0_I,\n+\tMLX5DR_DEFINER_FNAME_ETH_DMAC_48_16_O,\n+\tMLX5DR_DEFINER_FNAME_ETH_DMAC_48_16_I,\n+\tMLX5DR_DEFINER_FNAME_ETH_DMAC_15_0_O,\n+\tMLX5DR_DEFINER_FNAME_ETH_DMAC_15_0_I,\n+\tMLX5DR_DEFINER_FNAME_ETH_TYPE_O,\n+\tMLX5DR_DEFINER_FNAME_ETH_TYPE_I,\n+\tMLX5DR_DEFINER_FNAME_VLAN_TYPE_O,\n+\tMLX5DR_DEFINER_FNAME_VLAN_TYPE_I,\n+\tMLX5DR_DEFINER_FNAME_VLAN_TCI_O,\n+\tMLX5DR_DEFINER_FNAME_VLAN_TCI_I,\n+\tMLX5DR_DEFINER_FNAME_IPV4_IHL_O,\n+\tMLX5DR_DEFINER_FNAME_IPV4_IHL_I,\n+\tMLX5DR_DEFINER_FNAME_IP_TTL_O,\n+\tMLX5DR_DEFINER_FNAME_IP_TTL_I,\n+\tMLX5DR_DEFINER_FNAME_IPV4_DST_O,\n+\tMLX5DR_DEFINER_FNAME_IPV4_DST_I,\n+\tMLX5DR_DEFINER_FNAME_IPV4_SRC_O,\n+\tMLX5DR_DEFINER_FNAME_IPV4_SRC_I,\n+\tMLX5DR_DEFINER_FNAME_IP_VERSION_O,\n+\tMLX5DR_DEFINER_FNAME_IP_VERSION_I,\n+\tMLX5DR_DEFINER_FNAME_IP_FRAG_O,\n+\tMLX5DR_DEFINER_FNAME_IP_FRAG_I,\n+\tMLX5DR_DEFINER_FNAME_IPV6_PAYLOAD_LEN_O,\n+\tMLX5DR_DEFINER_FNAME_IPV6_PAYLOAD_LEN_I,\n+\tMLX5DR_DEFINER_FNAME_IP_TOS_O,\n+\tMLX5DR_DEFINER_FNAME_IP_TOS_I,\n+\tMLX5DR_DEFINER_FNAME_IPV6_FLOW_LABEL_O,\n+\tMLX5DR_DEFINER_FNAME_IPV6_FLOW_LABEL_I,\n+\tMLX5DR_DEFINER_FNAME_IPV6_DST_127_96_O,\n+\tMLX5DR_DEFINER_FNAME_IPV6_DST_95_64_O,\n+\tMLX5DR_DEFINER_FNAME_IPV6_DST_63_32_O,\n+\tMLX5DR_DEFINER_FNAME_IPV6_DST_31_0_O,\n+\tMLX5DR_DEFINER_FNAME_IPV6_DST_127_96_I,\n+\tMLX5DR_DEFINER_FNAME_IPV6_DST_95_64_I,\n+\tMLX5DR_DEFINER_FNAME_IPV6_DST_63_32_I,\n+\tMLX5DR_DEFINER_FNAME_IPV6_DST_31_0_I,\n+\tMLX5DR_DEFINER_FNAME_IPV6_SRC_127_96_O,\n+\tMLX5DR_DEFINER_FNAME_IPV6_SRC_95_64_O,\n+\tMLX5DR_DEFINER_FNAME_IPV6_SRC_63_32_O,\n+\tMLX5DR_DEFINER_FNAME_IPV6_SRC_31_0_O,\n+\tMLX5DR_DEFINER_FNAME_IPV6_SRC_127_96_I,\n+\tMLX5DR_DEFINER_FNAME_IPV6_SRC_95_64_I,\n+\tMLX5DR_DEFINER_FNAME_IPV6_SRC_63_32_I,\n+\tMLX5DR_DEFINER_FNAME_IPV6_SRC_31_0_I,\n+\tMLX5DR_DEFINER_FNAME_IP_PROTOCOL_O,\n+\tMLX5DR_DEFINER_FNAME_IP_PROTOCOL_I,\n+\tMLX5DR_DEFINER_FNAME_L4_SPORT_O,\n+\tMLX5DR_DEFINER_FNAME_L4_SPORT_I,\n+\tMLX5DR_DEFINER_FNAME_L4_DPORT_O,\n+\tMLX5DR_DEFINER_FNAME_L4_DPORT_I,\n+\tMLX5DR_DEFINER_FNAME_TCP_FLAGS_I,\n+\tMLX5DR_DEFINER_FNAME_TCP_FLAGS_O,\n+\tMLX5DR_DEFINER_FNAME_GTP_TEID,\n+\tMLX5DR_DEFINER_FNAME_GTP_MSG_TYPE,\n+\tMLX5DR_DEFINER_FNAME_GTP_EXT_FLAG,\n+\tMLX5DR_DEFINER_FNAME_GTP_NEXT_EXT_HDR,\n+\tMLX5DR_DEFINER_FNAME_GTP_EXT_HDR_PDU,\n+\tMLX5DR_DEFINER_FNAME_GTP_EXT_HDR_QFI,\n+\tMLX5DR_DEFINER_FNAME_FLEX_PARSER_0,\n+\tMLX5DR_DEFINER_FNAME_FLEX_PARSER_1,\n+\tMLX5DR_DEFINER_FNAME_FLEX_PARSER_2,\n+\tMLX5DR_DEFINER_FNAME_FLEX_PARSER_3,\n+\tMLX5DR_DEFINER_FNAME_FLEX_PARSER_4,\n+\tMLX5DR_DEFINER_FNAME_FLEX_PARSER_5,\n+\tMLX5DR_DEFINER_FNAME_FLEX_PARSER_6,\n+\tMLX5DR_DEFINER_FNAME_FLEX_PARSER_7,\n+\tMLX5DR_DEFINER_FNAME_VPORT_REG_C_0,\n+\tMLX5DR_DEFINER_FNAME_VXLAN_FLAGS,\n+\tMLX5DR_DEFINER_FNAME_VXLAN_VNI,\n+\tMLX5DR_DEFINER_FNAME_SOURCE_QP,\n+\tMLX5DR_DEFINER_FNAME_REG_0,\n+\tMLX5DR_DEFINER_FNAME_REG_1,\n+\tMLX5DR_DEFINER_FNAME_REG_2,\n+\tMLX5DR_DEFINER_FNAME_REG_3,\n+\tMLX5DR_DEFINER_FNAME_REG_4,\n+\tMLX5DR_DEFINER_FNAME_REG_5,\n+\tMLX5DR_DEFINER_FNAME_REG_6,\n+\tMLX5DR_DEFINER_FNAME_REG_7,\n+\tMLX5DR_DEFINER_FNAME_REG_A,\n+\tMLX5DR_DEFINER_FNAME_REG_B,\n+\tMLX5DR_DEFINER_FNAME_GRE_KEY_PRESENT,\n+\tMLX5DR_DEFINER_FNAME_GRE_C_VER,\n+\tMLX5DR_DEFINER_FNAME_GRE_PROTOCOL,\n+\tMLX5DR_DEFINER_FNAME_GRE_OPT_KEY,\n+\tMLX5DR_DEFINER_FNAME_GRE_OPT_SEQ,\n+\tMLX5DR_DEFINER_FNAME_GRE_OPT_CHECKSUM,\n+\tMLX5DR_DEFINER_FNAME_INTEGRITY_O,\n+\tMLX5DR_DEFINER_FNAME_INTEGRITY_I,\n+\tMLX5DR_DEFINER_FNAME_ICMP_DW1,\n+\tMLX5DR_DEFINER_FNAME_ICMP_DW2,\n+\tMLX5DR_DEFINER_FNAME_MAX,\n+};\n+\n+enum mlx5dr_definer_type {\n+\tMLX5DR_DEFINER_TYPE_MATCH,\n+\tMLX5DR_DEFINER_TYPE_JUMBO,\n+};\n+\n+struct mlx5dr_definer_fc {\n+\tuint8_t item_idx;\n+\tuint32_t byte_off;\n+\tint bit_off;\n+\tuint32_t bit_mask;\n+\tenum mlx5dr_definer_fname fname;\n+\tvoid (*tag_set)(struct mlx5dr_definer_fc *fc,\n+\t\t\tconst void *item_spec,\n+\t\t\tuint8_t *tag);\n+\tvoid (*tag_mask_set)(struct mlx5dr_definer_fc *fc,\n+\t\t\t     const void *item_spec,\n+\t\t\t     uint8_t *tag);\n+};\n+\n+struct mlx5_ifc_definer_hl_eth_l2_bits {\n+\tu8 dmac_47_16[0x20];\n+\tu8 dmac_15_0[0x10];\n+\tu8 l3_ethertype[0x10];\n+\tu8 reserved_at_40[0x1];\n+\tu8 sx_sniffer[0x1];\n+\tu8 functional_lb[0x1];\n+\tu8 ip_fragmented[0x1];\n+\tu8 qp_type[0x2];\n+\tu8 encap_type[0x2];\n+\tu8 port_number[0x2];\n+\tu8 l3_type[0x2];\n+\tu8 l4_type_bwc[0x2];\n+\tu8 first_vlan_qualifier[0x2];\n+\tu8 tci[0x10]; /* contains first_priority[0x3] + first_cfi[0x1] + first_vlan_id[0xc] */\n+\tu8 l4_type[0x4];\n+\tu8 reserved_at_64[0x2];\n+\tu8 ipsec_layer[0x2];\n+\tu8 l2_type[0x2];\n+\tu8 force_lb[0x1];\n+\tu8 l2_ok[0x1];\n+\tu8 l3_ok[0x1];\n+\tu8 l4_ok[0x1];\n+\tu8 second_vlan_qualifier[0x2];\n+\tu8 second_priority[0x3];\n+\tu8 second_cfi[0x1];\n+\tu8 second_vlan_id[0xc];\n+};\n+\n+struct mlx5_ifc_definer_hl_eth_l2_src_bits {\n+\tu8 smac_47_16[0x20];\n+\tu8 smac_15_0[0x10];\n+\tu8 loopback_syndrome[0x8];\n+\tu8 l3_type[0x2];\n+\tu8 l4_type_bwc[0x2];\n+\tu8 first_vlan_qualifier[0x2];\n+\tu8 ip_fragmented[0x1];\n+\tu8 functional_lb[0x1];\n+};\n+\n+struct mlx5_ifc_definer_hl_ib_l2_bits {\n+\tu8 sx_sniffer[0x1];\n+\tu8 force_lb[0x1];\n+\tu8 functional_lb[0x1];\n+\tu8 reserved_at_3[0x3];\n+\tu8 port_number[0x2];\n+\tu8 sl[0x4];\n+\tu8 qp_type[0x2];\n+\tu8 lnh[0x2];\n+\tu8 dlid[0x10];\n+\tu8 vl[0x4];\n+\tu8 lrh_packet_length[0xc];\n+\tu8 slid[0x10];\n+};\n+\n+struct mlx5_ifc_definer_hl_eth_l3_bits {\n+\tu8 ip_version[0x4];\n+\tu8 ihl[0x4];\n+\tunion {\n+\t\tu8 tos[0x8];\n+\t\tstruct {\n+\t\t\tu8 dscp[0x6];\n+\t\t\tu8 ecn[0x2];\n+\t\t};\n+\t};\n+\tu8 time_to_live_hop_limit[0x8];\n+\tu8 protocol_next_header[0x8];\n+\tu8 identification[0x10];\n+\tu8 flags[0x3];\n+\tu8 fragment_offset[0xd];\n+\tu8 ipv4_total_length[0x10];\n+\tu8 checksum[0x10];\n+\tu8 reserved_at_60[0xc];\n+\tu8 flow_label[0x14];\n+\tu8 packet_length[0x10];\n+\tu8 ipv6_payload_length[0x10];\n+};\n+\n+struct mlx5_ifc_definer_hl_eth_l4_bits {\n+\tu8 source_port[0x10];\n+\tu8 destination_port[0x10];\n+\tu8 data_offset[0x4];\n+\tu8 l4_ok[0x1];\n+\tu8 l3_ok[0x1];\n+\tu8 ip_fragmented[0x1];\n+\tu8 tcp_ns[0x1];\n+\tunion {\n+\t\tu8 tcp_flags[0x8];\n+\t\tstruct {\n+\t\t\tu8 tcp_cwr[0x1];\n+\t\t\tu8 tcp_ece[0x1];\n+\t\t\tu8 tcp_urg[0x1];\n+\t\t\tu8 tcp_ack[0x1];\n+\t\t\tu8 tcp_psh[0x1];\n+\t\t\tu8 tcp_rst[0x1];\n+\t\t\tu8 tcp_syn[0x1];\n+\t\t\tu8 tcp_fin[0x1];\n+\t\t};\n+\t};\n+\tu8 first_fragment[0x1];\n+\tu8 reserved_at_31[0xf];\n+};\n+\n+struct mlx5_ifc_definer_hl_src_qp_gvmi_bits {\n+\tu8 loopback_syndrome[0x8];\n+\tu8 l3_type[0x2];\n+\tu8 l4_type_bwc[0x2];\n+\tu8 first_vlan_qualifier[0x2];\n+\tu8 reserved_at_e[0x1];\n+\tu8 functional_lb[0x1];\n+\tu8 source_gvmi[0x10];\n+\tu8 force_lb[0x1];\n+\tu8 ip_fragmented[0x1];\n+\tu8 source_is_requestor[0x1];\n+\tu8 reserved_at_23[0x5];\n+\tu8 source_qp[0x18];\n+};\n+\n+struct mlx5_ifc_definer_hl_ib_l4_bits {\n+\tu8 opcode[0x8];\n+\tu8 qp[0x18];\n+\tu8 se[0x1];\n+\tu8 migreq[0x1];\n+\tu8 ackreq[0x1];\n+\tu8 fecn[0x1];\n+\tu8 becn[0x1];\n+\tu8 bth[0x1];\n+\tu8 deth[0x1];\n+\tu8 dcceth[0x1];\n+\tu8 reserved_at_28[0x2];\n+\tu8 pad_count[0x2];\n+\tu8 tver[0x4];\n+\tu8 p_key[0x10];\n+\tu8 reserved_at_40[0x8];\n+\tu8 deth_source_qp[0x18];\n+};\n+\n+enum mlx5dr_integrity_ok1_bits {\n+\tMLX5DR_DEFINER_OKS1_FIRST_L4_OK = 24,\n+\tMLX5DR_DEFINER_OKS1_FIRST_L3_OK = 25,\n+\tMLX5DR_DEFINER_OKS1_SECOND_L4_OK = 26,\n+\tMLX5DR_DEFINER_OKS1_SECOND_L3_OK = 27,\n+\tMLX5DR_DEFINER_OKS1_FIRST_L4_CSUM_OK = 28,\n+\tMLX5DR_DEFINER_OKS1_FIRST_IPV4_CSUM_OK = 29,\n+\tMLX5DR_DEFINER_OKS1_SECOND_L4_CSUM_OK = 30,\n+\tMLX5DR_DEFINER_OKS1_SECOND_IPV4_CSUM_OK = 31,\n+};\n+\n+struct mlx5_ifc_definer_hl_oks1_bits {\n+\tunion {\n+\t\tu8 oks1_bits[0x20];\n+\t\tstruct {\n+\t\t\tu8 second_ipv4_checksum_ok[0x1];\n+\t\t\tu8 second_l4_checksum_ok[0x1];\n+\t\t\tu8 first_ipv4_checksum_ok[0x1];\n+\t\t\tu8 first_l4_checksum_ok[0x1];\n+\t\t\tu8 second_l3_ok[0x1];\n+\t\t\tu8 second_l4_ok[0x1];\n+\t\t\tu8 first_l3_ok[0x1];\n+\t\t\tu8 first_l4_ok[0x1];\n+\t\t\tu8 flex_parser7_steering_ok[0x1];\n+\t\t\tu8 flex_parser6_steering_ok[0x1];\n+\t\t\tu8 flex_parser5_steering_ok[0x1];\n+\t\t\tu8 flex_parser4_steering_ok[0x1];\n+\t\t\tu8 flex_parser3_steering_ok[0x1];\n+\t\t\tu8 flex_parser2_steering_ok[0x1];\n+\t\t\tu8 flex_parser1_steering_ok[0x1];\n+\t\t\tu8 flex_parser0_steering_ok[0x1];\n+\t\t\tu8 second_ipv6_extension_header_vld[0x1];\n+\t\t\tu8 first_ipv6_extension_header_vld[0x1];\n+\t\t\tu8 l3_tunneling_ok[0x1];\n+\t\t\tu8 l2_tunneling_ok[0x1];\n+\t\t\tu8 second_tcp_ok[0x1];\n+\t\t\tu8 second_udp_ok[0x1];\n+\t\t\tu8 second_ipv4_ok[0x1];\n+\t\t\tu8 second_ipv6_ok[0x1];\n+\t\t\tu8 second_l2_ok[0x1];\n+\t\t\tu8 vxlan_ok[0x1];\n+\t\t\tu8 gre_ok[0x1];\n+\t\t\tu8 first_tcp_ok[0x1];\n+\t\t\tu8 first_udp_ok[0x1];\n+\t\t\tu8 first_ipv4_ok[0x1];\n+\t\t\tu8 first_ipv6_ok[0x1];\n+\t\t\tu8 first_l2_ok[0x1];\n+\t\t};\n+\t};\n+};\n+\n+struct mlx5_ifc_definer_hl_oks2_bits {\n+\tu8 reserved_at_0[0xa];\n+\tu8 second_mpls_ok[0x1];\n+\tu8 second_mpls4_s_bit[0x1];\n+\tu8 second_mpls4_qualifier[0x1];\n+\tu8 second_mpls3_s_bit[0x1];\n+\tu8 second_mpls3_qualifier[0x1];\n+\tu8 second_mpls2_s_bit[0x1];\n+\tu8 second_mpls2_qualifier[0x1];\n+\tu8 second_mpls1_s_bit[0x1];\n+\tu8 second_mpls1_qualifier[0x1];\n+\tu8 second_mpls0_s_bit[0x1];\n+\tu8 second_mpls0_qualifier[0x1];\n+\tu8 first_mpls_ok[0x1];\n+\tu8 first_mpls4_s_bit[0x1];\n+\tu8 first_mpls4_qualifier[0x1];\n+\tu8 first_mpls3_s_bit[0x1];\n+\tu8 first_mpls3_qualifier[0x1];\n+\tu8 first_mpls2_s_bit[0x1];\n+\tu8 first_mpls2_qualifier[0x1];\n+\tu8 first_mpls1_s_bit[0x1];\n+\tu8 first_mpls1_qualifier[0x1];\n+\tu8 first_mpls0_s_bit[0x1];\n+\tu8 first_mpls0_qualifier[0x1];\n+};\n+\n+struct mlx5_ifc_definer_hl_voq_bits {\n+\tu8 reserved_at_0[0x18];\n+\tu8 ecn_ok[0x1];\n+\tu8 congestion[0x1];\n+\tu8 profile[0x2];\n+\tu8 internal_prio[0x4];\n+};\n+\n+struct mlx5_ifc_definer_hl_ipv4_src_dst_bits {\n+\tu8 source_address[0x20];\n+\tu8 destination_address[0x20];\n+};\n+\n+struct mlx5_ifc_definer_hl_ipv6_addr_bits {\n+\tu8 ipv6_address_127_96[0x20];\n+\tu8 ipv6_address_95_64[0x20];\n+\tu8 ipv6_address_63_32[0x20];\n+\tu8 ipv6_address_31_0[0x20];\n+};\n+\n+struct mlx5_ifc_definer_tcp_icmp_header_bits {\n+\tunion {\n+\t\tstruct {\n+\t\t\tu8 icmp_dw1[0x20];\n+\t\t\tu8 icmp_dw2[0x20];\n+\t\t\tu8 icmp_dw3[0x20];\n+\t\t};\n+\t\tstruct {\n+\t\t\tu8 tcp_seq[0x20];\n+\t\t\tu8 tcp_ack[0x20];\n+\t\t\tu8 tcp_win_urg[0x20];\n+\t\t};\n+\t};\n+};\n+\n+struct mlx5_ifc_definer_hl_tunnel_header_bits {\n+\tu8 tunnel_header_0[0x20];\n+\tu8 tunnel_header_1[0x20];\n+\tu8 tunnel_header_2[0x20];\n+\tu8 tunnel_header_3[0x20];\n+};\n+\n+struct mlx5_ifc_definer_hl_ipsec_bits {\n+\tu8 spi[0x20];\n+\tu8 sequence_number[0x20];\n+\tu8 reserved[0x10];\n+\tu8 ipsec_syndrome[0x8];\n+\tu8 next_header[0x8];\n+};\n+\n+struct mlx5_ifc_definer_hl_metadata_bits {\n+\tu8 metadata_to_cqe[0x20];\n+\tu8 general_purpose[0x20];\n+\tu8 acomulated_hash[0x20];\n+};\n+\n+struct mlx5_ifc_definer_hl_flex_parser_bits {\n+\tu8 flex_parser_7[0x20];\n+\tu8 flex_parser_6[0x20];\n+\tu8 flex_parser_5[0x20];\n+\tu8 flex_parser_4[0x20];\n+\tu8 flex_parser_3[0x20];\n+\tu8 flex_parser_2[0x20];\n+\tu8 flex_parser_1[0x20];\n+\tu8 flex_parser_0[0x20];\n+};\n+\n+struct mlx5_ifc_definer_hl_registers_bits {\n+\tu8 register_c_10[0x20];\n+\tu8 register_c_11[0x20];\n+\tu8 register_c_8[0x20];\n+\tu8 register_c_9[0x20];\n+\tu8 register_c_6[0x20];\n+\tu8 register_c_7[0x20];\n+\tu8 register_c_4[0x20];\n+\tu8 register_c_5[0x20];\n+\tu8 register_c_2[0x20];\n+\tu8 register_c_3[0x20];\n+\tu8 register_c_0[0x20];\n+\tu8 register_c_1[0x20];\n+};\n+\n+struct mlx5_ifc_definer_hl_bits {\n+\tstruct mlx5_ifc_definer_hl_eth_l2_bits eth_l2_outer;\n+\tstruct mlx5_ifc_definer_hl_eth_l2_bits eth_l2_inner;\n+\tstruct mlx5_ifc_definer_hl_eth_l2_src_bits eth_l2_src_outer;\n+\tstruct mlx5_ifc_definer_hl_eth_l2_src_bits eth_l2_src_inner;\n+\tstruct mlx5_ifc_definer_hl_ib_l2_bits ib_l2;\n+\tstruct mlx5_ifc_definer_hl_eth_l3_bits eth_l3_outer;\n+\tstruct mlx5_ifc_definer_hl_eth_l3_bits eth_l3_inner;\n+\tstruct mlx5_ifc_definer_hl_eth_l4_bits eth_l4_outer;\n+\tstruct mlx5_ifc_definer_hl_eth_l4_bits eth_l4_inner;\n+\tstruct mlx5_ifc_definer_hl_src_qp_gvmi_bits source_qp_gvmi;\n+\tstruct mlx5_ifc_definer_hl_ib_l4_bits ib_l4;\n+\tstruct mlx5_ifc_definer_hl_oks1_bits oks1;\n+\tstruct mlx5_ifc_definer_hl_oks2_bits oks2;\n+\tstruct mlx5_ifc_definer_hl_voq_bits voq;\n+\tu8 reserved_at_480[0x380];\n+\tstruct mlx5_ifc_definer_hl_ipv4_src_dst_bits ipv4_src_dest_outer;\n+\tstruct mlx5_ifc_definer_hl_ipv4_src_dst_bits ipv4_src_dest_inner;\n+\tstruct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_dst_outer;\n+\tstruct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_dst_inner;\n+\tstruct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_src_outer;\n+\tstruct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_src_inner;\n+\tu8 unsupported_dest_ib_l3[0x80];\n+\tu8 unsupported_source_ib_l3[0x80];\n+\tu8 unsupported_udp_misc_outer[0x20];\n+\tu8 unsupported_udp_misc_inner[0x20];\n+\tstruct mlx5_ifc_definer_tcp_icmp_header_bits tcp_icmp;\n+\tstruct mlx5_ifc_definer_hl_tunnel_header_bits tunnel_header;\n+\tu8 unsupported_mpls_outer[0xa0];\n+\tu8 unsupported_mpls_inner[0xa0];\n+\tu8 unsupported_config_headers_outer[0x80];\n+\tu8 unsupported_config_headers_inner[0x80];\n+\tu8 unsupported_random_number[0x20];\n+\tstruct mlx5_ifc_definer_hl_ipsec_bits ipsec;\n+\tstruct mlx5_ifc_definer_hl_metadata_bits metadata;\n+\tu8 unsupported_utc_timestamp[0x40];\n+\tu8 unsupported_free_running_timestamp[0x40];\n+\tstruct mlx5_ifc_definer_hl_flex_parser_bits flex_parser;\n+\tstruct mlx5_ifc_definer_hl_registers_bits registers;\n+\t/* struct x ib_l3_extended; */\n+\t/* struct x rwh */\n+\t/* struct x dcceth */\n+\t/* struct x dceth */\n+};\n+\n+enum mlx5dr_definer_gtp {\n+\tMLX5DR_DEFINER_GTP_EXT_HDR_BIT = 0x04,\n+};\n+\n+struct mlx5_ifc_header_gtp_bits {\n+\tu8 version[0x3];\n+\tu8 proto_type[0x1];\n+\tu8 reserved1[0x1];\n+\tu8 ext_hdr_flag[0x1];\n+\tu8 seq_num_flag[0x1];\n+\tu8 pdu_flag[0x1];\n+\tu8 msg_type[0x8];\n+\tu8 msg_len[0x8];\n+\tu8 teid[0x20];\n+};\n+\n+struct mlx5_ifc_header_opt_gtp_bits {\n+\tu8 seq_num[0x10];\n+\tu8 pdu_num[0x8];\n+\tu8 next_ext_hdr_type[0x8];\n+};\n+\n+struct mlx5_ifc_header_gtp_psc_bits {\n+\tu8 len[0x8];\n+\tu8 pdu_type[0x4];\n+\tu8 flags[0x4];\n+\tu8 qfi[0x8];\n+\tu8 reserved2[0x8];\n+};\n+\n+struct mlx5_ifc_header_ipv6_vtc_bits {\n+\tu8 version[0x4];\n+\tunion {\n+\t\tu8 tos[0x8];\n+\t\tstruct {\n+\t\t\tu8 dscp[0x6];\n+\t\t\tu8 ecn[0x2];\n+\t\t};\n+\t};\n+\tu8 flow_label[0x14];\n+};\n+\n+struct mlx5_ifc_header_vxlan_bits {\n+\tu8 flags[0x8];\n+\tu8 reserved1[0x18];\n+\tu8 vni[0x18];\n+\tu8 reserved2[0x8];\n+};\n+\n+struct mlx5_ifc_header_gre_bits {\n+\tunion {\n+\t\tu8 c_rsvd0_ver[0x10];\n+\t\tstruct {\n+\t\t\tu8 gre_c_present[0x1];\n+\t\t\tu8 reserved_at_1[0x1];\n+\t\t\tu8 gre_k_present[0x1];\n+\t\t\tu8 gre_s_present[0x1];\n+\t\t\tu8 reserved_at_4[0x9];\n+\t\t\tu8 version[0x3];\n+\t\t};\n+\t};\n+\tu8 gre_protocol[0x10];\n+\tu8 checksum[0x10];\n+\tu8 reserved_at_30[0x10];\n+};\n+\n+struct mlx5_ifc_header_icmp_bits {\n+\tunion {\n+\t\tu8 icmp_dw1[0x20];\n+\t\tstruct {\n+\t\t\tu8 type[0x8];\n+\t\t\tu8 code[0x8];\n+\t\t\tu8 cksum[0x10];\n+\t\t};\n+\t};\n+\tunion {\n+\t\tu8 icmp_dw2[0x20];\n+\t\tstruct {\n+\t\t\tu8 ident[0x10];\n+\t\t\tu8 seq_nb[0x10];\n+\t\t};\n+\t};\n+};\n+\n+struct mlx5dr_definer {\n+\tenum mlx5dr_definer_type type;\n+\tuint8_t dw_selector[DW_SELECTORS];\n+\tuint8_t byte_selector[BYTE_SELECTORS];\n+\tstruct mlx5dr_rule_match_tag mask;\n+\tstruct mlx5dr_devx_obj *obj;\n+};\n+\n+static inline bool\n+mlx5dr_definer_is_jumbo(struct mlx5dr_definer *definer)\n+{\n+\treturn (definer->type == MLX5DR_DEFINER_TYPE_JUMBO);\n+}\n+\n+void mlx5dr_definer_create_tag(const struct rte_flow_item *items,\n+\t\t\t       struct mlx5dr_definer_fc *fc,\n+\t\t\t       uint32_t fc_sz,\n+\t\t\t       uint8_t *tag);\n+\n+int mlx5dr_definer_compare(struct mlx5dr_definer *definer_a,\n+\t\t\t   struct mlx5dr_definer *definer_b);\n+\n+int mlx5dr_definer_get_id(struct mlx5dr_definer *definer);\n+\n+int mlx5dr_definer_get(struct mlx5dr_context *ctx,\n+\t\t       struct mlx5dr_match_template *mt);\n+\n+void mlx5dr_definer_put(struct mlx5dr_match_template *mt);\n+\n+#endif /* MLX5DR_DEFINER_H_ */\n",
    "prefixes": [
        "v6",
        "11/18"
    ]
}