get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/118810/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 118810,
    "url": "http://patches.dpdk.org/api/patches/118810/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20221020155749.16643-3-valex@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221020155749.16643-3-valex@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221020155749.16643-3-valex@nvidia.com",
    "date": "2022-10-20T15:57:32",
    "name": "[v6,02/18] net/mlx5: split flow item matcher and value translation",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "5ee714fba75b878dc21b1b312d7a8477f4070049",
    "submitter": {
        "id": 2858,
        "url": "http://patches.dpdk.org/api/people/2858/?format=api",
        "name": "Alex Vesker",
        "email": "valex@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20221020155749.16643-3-valex@nvidia.com/mbox/",
    "series": [
        {
            "id": 25345,
            "url": "http://patches.dpdk.org/api/series/25345/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=25345",
            "date": "2022-10-20T15:57:30",
            "name": "net/mlx5: Add HW steering low level support",
            "version": 6,
            "mbox": "http://patches.dpdk.org/series/25345/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/118810/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/118810/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 5D1FCA0553;\n\tThu, 20 Oct 2022 17:59:18 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id D5C6C42B6F;\n\tThu, 20 Oct 2022 17:59:11 +0200 (CEST)",
            "from NAM10-BN7-obe.outbound.protection.outlook.com\n (mail-bn7nam10on2047.outbound.protection.outlook.com [40.107.92.47])\n by mails.dpdk.org (Postfix) with ESMTP id 0235A40694\n for <dev@dpdk.org>; Thu, 20 Oct 2022 17:59:10 +0200 (CEST)",
            "from BN0PR04CA0058.namprd04.prod.outlook.com (2603:10b6:408:e8::33)\n by BY5PR12MB4292.namprd12.prod.outlook.com (2603:10b6:a03:212::12)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5723.34; Thu, 20 Oct\n 2022 15:59:05 +0000",
            "from BN8NAM11FT084.eop-nam11.prod.protection.outlook.com\n (2603:10b6:408:e8:cafe::58) by BN0PR04CA0058.outlook.office365.com\n (2603:10b6:408:e8::33) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5723.35 via Frontend\n Transport; Thu, 20 Oct 2022 15:59:05 +0000",
            "from mail.nvidia.com (216.228.117.161) by\n BN8NAM11FT084.mail.protection.outlook.com (10.13.176.169) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.5746.16 via Frontend Transport; Thu, 20 Oct 2022 15:59:04 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.67) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.26; Thu, 20 Oct\n 2022 08:58:50 -0700",
            "from nvidia.com (10.126.230.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.29; Thu, 20 Oct\n 2022 08:58:47 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=Kf5JE6J224kteQnSREzN9OKWwlGAKAtCfO46Kr8qBLsKyow8h/dtVfcF1NQA1d9Cxe2HPUaZnDunGVdAvBCw4VlDufjzeZibtedotwUN55YI/zLbCJwqsP1j8yQwZo+dWtoL7lC8kq7z8mEWvn9Te8to35XUNo9ACCRW8ae5cNePNt1HoSAAO8TDjahRaCnf8f6fVSGVJIjmjrwhZo3noS29+V7YZpbkBxtAZ7BLEBFSReAMdicHoYO3zKnYD1eaemhxJvv0tXgObiqTIeOU0W2X4PQv9732StXMQbdVcKx4Uo49Za+UhHzPlhsunmxOAQi3QJZ9DEZaH2sRhZYJ9g==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=nYBmhlfk1TmMglF9zHu6cOUFZOSXt2SHhNft9ZIgado=;\n b=lIzfsXmmXwx5Ghkisim3WVTwA4f/F3cgaPil4cHyhBNj9Fae+nKvhUn7Pph4h/ENar3QOKw8nRguFG0h5PVS8JqCU2gJeehCdVqLp2HsLRL5dIuyfyNG8zZwpfQYpoO+89NWl2txRq9pvIRK9b2rIrXW+4NQSEzZbZE7Tl7Yyh5astyXXM413LxOzmLq3aZcn8wWwYDpQ6k5kq5rsHY3j5jLR2WYnEGsBCceIdqiKYGry+DuUAdkY4NHDmV6TV64yoql6K4HZaFrwtOWhf50NtEdN1vgB4d84LzTM6hn3Ea8Q0Mzam0l0MI6ipxZNhqG0U1NuyCOVUo0OAYMkpqYpA==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=none (sender ip is\n 216.228.117.161) smtp.rcpttodomain=monjalon.net smtp.mailfrom=nvidia.com;\n dmarc=fail (p=reject sp=reject pct=100) action=oreject\n header.from=nvidia.com; dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=nYBmhlfk1TmMglF9zHu6cOUFZOSXt2SHhNft9ZIgado=;\n b=cfB/sF/7djOZMgdlNaj7s9SEFUiqLVdYPSXC0UyCpHCuTMqWMgJwpXSEceRu58x6KFVGnIoE1X2lkrieyUhi84CYJjI1o/Fkr2y4H2PT1iePYJkD3vYWZ+/HgHP12D0fgVWrSMjIbQKznWBlT4RU3yAERXoaKUnGWLsH5mcDJ7ZTs69dE3gBwUbxCEwZccn8UpUx+SFtb7HnCeoMlKNMYiEI74CQr63Xf8jfTuoI1T0u7bEFaaWLkjKmPIYZqDZj4+a53xG7qEZBNSjw0fonrDT6J16LFL59pFLnB148NoGsGi1o0/dL+hJrzwkb7UhS6k/CJW5+vZEvg1Gasrjxzg==",
        "X-MS-Exchange-Authentication-Results": "spf=none (sender IP is 216.228.117.161)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=fail action=oreject header.from=nvidia.com;",
        "Received-SPF": "None (protection.outlook.com: nvidia.com does not designate\n permitted sender hosts)",
        "From": "Alex Vesker <valex@nvidia.com>",
        "To": "<valex@nvidia.com>, <viacheslavo@nvidia.com>, <thomas@monjalon.net>,\n <suanmingm@nvidia.com>, Matan Azrad <matan@nvidia.com>",
        "CC": "<dev@dpdk.org>, <orika@nvidia.com>",
        "Subject": "[v6 02/18] net/mlx5: split flow item matcher and value translation",
        "Date": "Thu, 20 Oct 2022 18:57:32 +0300",
        "Message-ID": "<20221020155749.16643-3-valex@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20221020155749.16643-1-valex@nvidia.com>",
        "References": "<20220922190345.394-1-valex@nvidia.com>\n <20221020155749.16643-1-valex@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.230.35]",
        "X-ClientProxiedBy": "rnnvmail202.nvidia.com (10.129.68.7) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "BN8NAM11FT084:EE_|BY5PR12MB4292:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "105aab19-0386-47ed-e97b-08dab2b403a0",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n KxB2uwCyoPamvbZrxFw+cFOhkKr9Lo7Cf8lVSvmhm/6PtOZvAkf5HvmlL1rEa00SRWqrZCXypUiYCjiHCp8a/hAMZWpa5S53puxIv39rMoy5YxtFQV5wersoyy2eq4a2IqBY5rmcTuRKUa8scNclcF1Wf3xuWD2bL5B6ZS2a09NuGHjQOtNPCKB5P/I7QinCQ5I22GPq7xicV7+EKxGl4ZBkxpM+UQVnFBLluPrKj3kyYTsA7N+8GEMks1M3v/EDEhQxHV4EUSNpVIrO9uh+mPiNEkNKcdGlEfyvyAjLXaGai+UBLhsWaDm+VSXNh++1BHKRbZxPKQYwN1+ct1Br9bHm/1DiuCwdNTWLxjjQkAnJSxoCRowQ6pyOeRoNur2kpfvduRysVALTsn4nGMhRWnVkM3UKGlpP5URj2u4NdUMC7TQrd/vJO81Y5iG+Iv8R3Vwa8vp6TPN5oaLs9b8s8JvzYvn0KSNs6L/QJYPfdfCU9bvqwzKNvwr26t6KhP43DOpRxASIV+6o66Puk3iEyoInX7WBeqVBon3RSPl+KCqVKzRv1zBVeZjhc6Xsj6yzRL1JrEhe9YFO89qAA99Mo3HKkOyhEbMz9quUnksTYIGLDaL5RDARcGBpTaCwgiEjkHLF5Ai2rUJlSCEMo9c2jOyKIRJTcRdU89p+o0T2H1nGgLGQauNNM+FRHqT+vMTzMrF4gZNYhY0VUKHzRXu+as10k1L7RPw66wBEVa8xIrZPWPIkE7aU2ZTcm9NhEDehfh8kSOSnGXt9MtP5zzwirQ==",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.161; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge2.nvidia.com; CAT:NONE;\n SFS:(13230022)(4636009)(346002)(396003)(376002)(39860400002)(136003)(451199015)(46966006)(36840700001)(40470700004)(6636002)(86362001)(110136005)(54906003)(30864003)(4326008)(8676002)(426003)(47076005)(336012)(70206006)(7696005)(83380400001)(41300700001)(26005)(36756003)(5660300002)(70586007)(316002)(36860700001)(8936002)(55016003)(6286002)(40480700001)(107886003)(6666004)(16526019)(1076003)(82740400003)(186003)(356005)(7636003)(2906002)(82310400005)(478600001)(40460700003)(2616005)(579004)(559001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "20 Oct 2022 15:59:04.8737 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 105aab19-0386-47ed-e97b-08dab2b403a0",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.161];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT084.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BY5PR12MB4292",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Suanming Mou <suanmingm@nvidia.com>\n\nAs hardware steering mode translates flow matcher and value in two\ndifferent stages, split the flow item matcher and value translation\nto help reuse the code.\n\nSigned-off-by: Suanming Mou <suanmingm@nvidia.com>\n---\n drivers/net/mlx5/mlx5_flow.h    |   32 +\n drivers/net/mlx5/mlx5_flow_dv.c | 2314 +++++++++++++++----------------\n 2 files changed, 1185 insertions(+), 1161 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex 8e97fa188a..7e5ade52cb 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -1267,6 +1267,38 @@ struct mlx5_flow_workspace {\n \tuint32_t skip_matcher_reg:1;\n \t/* Indicates if need to skip matcher register in translate. */\n \tuint32_t mark:1; /* Indicates if flow contains mark action. */\n+\tuint32_t vport_meta_tag; /* Used for vport index match. */\n+};\n+\n+/* Matcher translate type. */\n+enum MLX5_SET_MATCHER {\n+\tMLX5_SET_MATCHER_SW_V = 1 << 0,\n+\tMLX5_SET_MATCHER_SW_M = 1 << 1,\n+\tMLX5_SET_MATCHER_HS_V = 1 << 2,\n+\tMLX5_SET_MATCHER_HS_M = 1 << 3,\n+};\n+\n+#define MLX5_SET_MATCHER_SW (MLX5_SET_MATCHER_SW_V | MLX5_SET_MATCHER_SW_M)\n+#define MLX5_SET_MATCHER_HS (MLX5_SET_MATCHER_HS_V | MLX5_SET_MATCHER_HS_M)\n+#define MLX5_SET_MATCHER_V (MLX5_SET_MATCHER_SW_V | MLX5_SET_MATCHER_HS_V)\n+#define MLX5_SET_MATCHER_M (MLX5_SET_MATCHER_SW_M | MLX5_SET_MATCHER_HS_M)\n+\n+/* Flow matcher workspace intermediate data. */\n+struct mlx5_dv_matcher_workspace {\n+\tuint8_t priority; /* Flow priority. */\n+\tuint64_t last_item; /* Last item in pattern. */\n+\tuint64_t item_flags; /* Flow item pattern flags. */\n+\tuint64_t action_flags; /* Flow action flags. */\n+\tbool external; /* External flow or not. */\n+\tuint32_t vlan_tag:12; /* Flow item VLAN tag. */\n+\tuint8_t next_protocol; /* Tunnel next protocol */\n+\tuint32_t geneve_tlv_option; /* Flow item Geneve TLV option. */\n+\tuint32_t group; /* Flow group. */\n+\tuint16_t udp_dport; /* Flow item UDP port. */\n+\tconst struct rte_flow_attr *attr; /* Flow attribute. */\n+\tstruct mlx5_flow_rss_desc *rss_desc; /* RSS descriptor. */\n+\tconst struct rte_flow_item *tunnel_item; /* Flow tunnel item. */\n+\tconst struct rte_flow_item *gre_item; /* Flow GRE item. */\n };\n \n struct mlx5_flow_split_info {\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex 0f3ff4db51..944db9c3e4 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -63,6 +63,25 @@\n #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)\n #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)\n \n+#define MLX5_ITEM_VALID(item, key_type) \\\n+\t(((MLX5_SET_MATCHER_SW & (key_type)) && !((item)->spec)) || \\\n+\t ((MLX5_SET_MATCHER_HS_V == (key_type)) && !((item)->spec)) || \\\n+\t ((MLX5_SET_MATCHER_HS_M == (key_type)) && !((item)->mask)))\n+\n+#define MLX5_ITEM_UPDATE(item, key_type, v, m, gm) \\\n+\tdo { \\\n+\t\tif ((key_type) == MLX5_SET_MATCHER_SW_V) { \\\n+\t\t\tv = (item)->spec; \\\n+\t\t\tm = (item)->mask ? (item)->mask : (gm); \\\n+\t\t} else if ((key_type) == MLX5_SET_MATCHER_HS_V) { \\\n+\t\t\tv = (item)->spec; \\\n+\t\t\tm = (v); \\\n+\t\t} else { \\\n+\t\t\tv = (item)->mask ? (item)->mask : (gm); \\\n+\t\t\tm = (v); \\\n+\t\t} \\\n+\t} while (0)\n+\n union flow_dv_attr {\n \tstruct {\n \t\tuint32_t valid:1;\n@@ -8325,70 +8344,61 @@ flow_dv_check_valid_spec(void *match_mask, void *match_value)\n static inline void\n flow_dv_set_match_ip_version(uint32_t group,\n \t\t\t     void *headers_v,\n-\t\t\t     void *headers_m,\n+\t\t\t     uint32_t key_type,\n \t\t\t     uint8_t ip_version)\n {\n-\tif (group == 0)\n-\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);\n+\tif (group == 0 && (key_type & MLX5_SET_MATCHER_M))\n+\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 0xf);\n \telse\n-\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,\n+\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version,\n \t\t\t ip_version);\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);\n \tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);\n }\n \n /**\n- * Add Ethernet item to matcher and to the value.\n+ * Add Ethernet item to the value.\n  *\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n  *   Flow pattern to translate.\n  * @param[in] inner\n  *   Item is inner pattern.\n+ * @param[in] grpup\n+ *   Flow matcher group.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  */\n static void\n-flow_dv_translate_item_eth(void *matcher, void *key,\n-\t\t\t   const struct rte_flow_item *item, int inner,\n-\t\t\t   uint32_t group)\n+flow_dv_translate_item_eth(void *key, const struct rte_flow_item *item,\n+\t\t\t   int inner, uint32_t group, uint32_t key_type)\n {\n-\tconst struct rte_flow_item_eth *eth_m = item->mask;\n-\tconst struct rte_flow_item_eth *eth_v = item->spec;\n+\tconst struct rte_flow_item_eth *eth_vv = item->spec;\n+\tconst struct rte_flow_item_eth *eth_m;\n+\tconst struct rte_flow_item_eth *eth_v;\n \tconst struct rte_flow_item_eth nic_mask = {\n \t\t.dst.addr_bytes = \"\\xff\\xff\\xff\\xff\\xff\\xff\",\n \t\t.src.addr_bytes = \"\\xff\\xff\\xff\\xff\\xff\\xff\",\n \t\t.type = RTE_BE16(0xffff),\n \t\t.has_vlan = 0,\n \t};\n-\tvoid *hdrs_m;\n \tvoid *hdrs_v;\n \tchar *l24_v;\n \tunsigned int i;\n \n-\tif (!eth_v)\n+\tif (MLX5_ITEM_VALID(item, key_type))\n \t\treturn;\n-\tif (!eth_m)\n-\t\teth_m = &nic_mask;\n-\tif (inner) {\n-\t\thdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t\t inner_headers);\n+\tMLX5_ITEM_UPDATE(item, key_type, eth_v, eth_m, &nic_mask);\n+\tif (!eth_vv)\n+\t\teth_vv = eth_v;\n+\tif (inner)\n \t\thdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);\n-\t} else {\n-\t\thdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t\t outer_headers);\n+\telse\n \t\thdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);\n-\t}\n-\tmemcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),\n-\t       &eth_m->dst, sizeof(eth_m->dst));\n \t/* The value must be in the range of the mask. */\n \tl24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);\n \tfor (i = 0; i < sizeof(eth_m->dst); ++i)\n \t\tl24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];\n-\tmemcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),\n-\t       &eth_m->src, sizeof(eth_m->src));\n \tl24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);\n \t/* The value must be in the range of the mask. */\n \tfor (i = 0; i < sizeof(eth_m->dst); ++i)\n@@ -8402,145 +8412,149 @@ flow_dv_translate_item_eth(void *matcher, void *key,\n \t * eCPRI over Ether layer will use type value 0xAEFE.\n \t */\n \tif (eth_m->type == 0xFFFF) {\n+\t\trte_be16_t type = eth_v->type;\n+\n+\t\t/*\n+\t\t * When set the matcher mask, refer to the original spec\n+\t\t * value.\n+\t\t */\n+\t\tif (key_type == MLX5_SET_MATCHER_SW_M) {\n+\t\t\tMLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);\n+\t\t\ttype = eth_vv->type;\n+\t\t}\n \t\t/* Set cvlan_tag mask for any single\\multi\\un-tagged case. */\n-\t\tMLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);\n-\t\tswitch (eth_v->type) {\n+\t\tswitch (type) {\n \t\tcase RTE_BE16(RTE_ETHER_TYPE_VLAN):\n \t\t\tMLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);\n \t\t\treturn;\n \t\tcase RTE_BE16(RTE_ETHER_TYPE_QINQ):\n-\t\t\tMLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);\n \t\t\tMLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);\n \t\t\treturn;\n \t\tcase RTE_BE16(RTE_ETHER_TYPE_IPV4):\n-\t\t\tflow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);\n+\t\t\tflow_dv_set_match_ip_version(group, hdrs_v, key_type,\n+\t\t\t\t\t\t     4);\n \t\t\treturn;\n \t\tcase RTE_BE16(RTE_ETHER_TYPE_IPV6):\n-\t\t\tflow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);\n+\t\t\tflow_dv_set_match_ip_version(group, hdrs_v, key_type,\n+\t\t\t\t\t\t     6);\n \t\t\treturn;\n \t\tdefault:\n \t\t\tbreak;\n \t\t}\n \t}\n-\tif (eth_m->has_vlan) {\n-\t\tMLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);\n-\t\tif (eth_v->has_vlan) {\n-\t\t\t/*\n-\t\t\t * Here, when also has_more_vlan field in VLAN item is\n-\t\t\t * not set, only single-tagged packets will be matched.\n-\t\t\t */\n-\t\t\tMLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);\n+\t/*\n+\t * Only SW steering value should refer to the mask value.\n+\t * Other cases are using the fake masks, just ignore the mask.\n+\t */\n+\tif (eth_v->has_vlan && eth_m->has_vlan) {\n+\t\t/*\n+\t\t * Here, when also has_more_vlan field in VLAN item is\n+\t\t * not set, only single-tagged packets will be matched.\n+\t\t */\n+\t\tMLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);\n+\t\tif (key_type != MLX5_SET_MATCHER_HS_M && eth_vv->has_vlan)\n \t\t\treturn;\n-\t\t}\n \t}\n-\tMLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,\n-\t\t rte_be_to_cpu_16(eth_m->type));\n \tl24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);\n \t*(uint16_t *)(l24_v) = eth_m->type & eth_v->type;\n }\n \n /**\n- * Add VLAN item to matcher and to the value.\n+ * Add VLAN item to the value.\n  *\n- * @param[in, out] dev_flow\n- *   Flow descriptor.\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n  *   Flow pattern to translate.\n  * @param[in] inner\n  *   Item is inner pattern.\n+ * @param[in] wks\n+ *   Item workspace.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  */\n static void\n-flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,\n-\t\t\t    void *matcher, void *key,\n-\t\t\t    const struct rte_flow_item *item,\n-\t\t\t    int inner, uint32_t group)\n+flow_dv_translate_item_vlan(void *key, const struct rte_flow_item *item,\n+\t\t\t    int inner, struct mlx5_dv_matcher_workspace *wks,\n+\t\t\t    uint32_t key_type)\n {\n-\tconst struct rte_flow_item_vlan *vlan_m = item->mask;\n-\tconst struct rte_flow_item_vlan *vlan_v = item->spec;\n-\tvoid *hdrs_m;\n+\tconst struct rte_flow_item_vlan *vlan_m;\n+\tconst struct rte_flow_item_vlan *vlan_v;\n+\tconst struct rte_flow_item_vlan *vlan_vv = item->spec;\n \tvoid *hdrs_v;\n-\tuint16_t tci_m;\n \tuint16_t tci_v;\n \n \tif (inner) {\n-\t\thdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t\t inner_headers);\n \t\thdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);\n \t} else {\n-\t\thdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t\t outer_headers);\n \t\thdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);\n \t\t/*\n \t\t * This is workaround, masks are not supported,\n \t\t * and pre-validated.\n \t\t */\n-\t\tif (vlan_v)\n-\t\t\tdev_flow->handle->vf_vlan.tag =\n-\t\t\t\t\trte_be_to_cpu_16(vlan_v->tci) & 0x0fff;\n+\t\tif (vlan_vv)\n+\t\t\twks->vlan_tag = rte_be_to_cpu_16(vlan_vv->tci) & 0x0fff;\n \t}\n \t/*\n \t * When VLAN item exists in flow, mark packet as tagged,\n \t * even if TCI is not specified.\n \t */\n-\tif (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {\n-\t\tMLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);\n+\tif (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag))\n \t\tMLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);\n-\t}\n-\tif (!vlan_v)\n+\tif (MLX5_ITEM_VALID(item, key_type))\n \t\treturn;\n-\tif (!vlan_m)\n-\t\tvlan_m = &rte_flow_item_vlan_mask;\n-\ttci_m = rte_be_to_cpu_16(vlan_m->tci);\n+\tMLX5_ITEM_UPDATE(item, key_type, vlan_v, vlan_m,\n+\t\t\t &rte_flow_item_vlan_mask);\n \ttci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);\n-\tMLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);\n \tMLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);\n-\tMLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);\n \tMLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);\n-\tMLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);\n \tMLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);\n \t/*\n \t * HW is optimized for IPv4/IPv6. In such cases, avoid setting\n \t * ethertype, and use ip_version field instead.\n \t */\n \tif (vlan_m->inner_type == 0xFFFF) {\n-\t\tswitch (vlan_v->inner_type) {\n+\t\trte_be16_t inner_type = vlan_v->inner_type;\n+\n+\t\t/*\n+\t\t * When set the matcher mask, refer to the original spec\n+\t\t * value.\n+\t\t */\n+\t\tif (key_type == MLX5_SET_MATCHER_SW_M)\n+\t\t\tinner_type = vlan_vv->inner_type;\n+\t\tswitch (inner_type) {\n \t\tcase RTE_BE16(RTE_ETHER_TYPE_VLAN):\n-\t\t\tMLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);\n \t\t\tMLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);\n-\t\t\tMLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);\n+\t\t\tif (key_type & MLX5_SET_MATCHER_V)\n+\t\t\t\tMLX5_SET(fte_match_set_lyr_2_4, hdrs_v,\n+\t\t\t\t\t cvlan_tag, 0);\n \t\t\treturn;\n \t\tcase RTE_BE16(RTE_ETHER_TYPE_IPV4):\n-\t\t\tflow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);\n+\t\t\tflow_dv_set_match_ip_version\n+\t\t\t\t(wks->group, hdrs_v, key_type, 4);\n \t\t\treturn;\n \t\tcase RTE_BE16(RTE_ETHER_TYPE_IPV6):\n-\t\t\tflow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);\n+\t\t\tflow_dv_set_match_ip_version\n+\t\t\t\t(wks->group, hdrs_v, key_type, 6);\n \t\t\treturn;\n \t\tdefault:\n \t\t\tbreak;\n \t\t}\n \t}\n \tif (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {\n-\t\tMLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);\n \t\tMLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);\n \t\t/* Only one vlan_tag bit can be set. */\n-\t\tMLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);\n+\t\tif (key_type & MLX5_SET_MATCHER_V)\n+\t\t\tMLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);\n \t\treturn;\n \t}\n-\tMLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,\n-\t\t rte_be_to_cpu_16(vlan_m->inner_type));\n \tMLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,\n \t\t rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));\n }\n \n /**\n- * Add IPV4 item to matcher and to the value.\n+ * Add IPV4 item to the value.\n  *\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n@@ -8549,14 +8563,15 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,\n  *   Item is inner pattern.\n  * @param[in] group\n  *   The group to insert the rule.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  */\n static void\n-flow_dv_translate_item_ipv4(void *matcher, void *key,\n-\t\t\t    const struct rte_flow_item *item,\n-\t\t\t    int inner, uint32_t group)\n+flow_dv_translate_item_ipv4(void *key, const struct rte_flow_item *item,\n+\t\t\t    int inner, uint32_t group, uint32_t key_type)\n {\n-\tconst struct rte_flow_item_ipv4 *ipv4_m = item->mask;\n-\tconst struct rte_flow_item_ipv4 *ipv4_v = item->spec;\n+\tconst struct rte_flow_item_ipv4 *ipv4_m;\n+\tconst struct rte_flow_item_ipv4 *ipv4_v;\n \tconst struct rte_flow_item_ipv4 nic_mask = {\n \t\t.hdr = {\n \t\t\t.src_addr = RTE_BE32(0xffffffff),\n@@ -8566,68 +8581,41 @@ flow_dv_translate_item_ipv4(void *matcher, void *key,\n \t\t\t.time_to_live = 0xff,\n \t\t},\n \t};\n-\tvoid *headers_m;\n \tvoid *headers_v;\n-\tchar *l24_m;\n \tchar *l24_v;\n-\tuint8_t tos, ihl_m, ihl_v;\n+\tuint8_t tos;\n \n-\tif (inner) {\n-\t\theaders_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t\t inner_headers);\n-\t\theaders_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);\n-\t} else {\n-\t\theaders_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t\t outer_headers);\n-\t\theaders_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);\n-\t}\n-\tflow_dv_set_match_ip_version(group, headers_v, headers_m, 4);\n-\tif (!ipv4_v)\n+\theaders_v = inner ? MLX5_ADDR_OF(fte_match_param, key, inner_headers) :\n+\t\t\t    MLX5_ADDR_OF(fte_match_param, key, outer_headers);\n+\tflow_dv_set_match_ip_version(group, headers_v, key_type, 4);\n+\tif (MLX5_ITEM_VALID(item, key_type))\n \t\treturn;\n-\tif (!ipv4_m)\n-\t\tipv4_m = &nic_mask;\n-\tl24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,\n-\t\t\t     dst_ipv4_dst_ipv6.ipv4_layout.ipv4);\n+\tMLX5_ITEM_UPDATE(item, key_type, ipv4_v, ipv4_m, &nic_mask);\n \tl24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,\n \t\t\t     dst_ipv4_dst_ipv6.ipv4_layout.ipv4);\n-\t*(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;\n \t*(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;\n-\tl24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,\n-\t\t\t  src_ipv4_src_ipv6.ipv4_layout.ipv4);\n \tl24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,\n \t\t\t  src_ipv4_src_ipv6.ipv4_layout.ipv4);\n-\t*(uint32_t *)l24_m = ipv4_m->hdr.src_addr;\n \t*(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;\n \ttos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;\n-\tihl_m = ipv4_m->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;\n-\tihl_v = ipv4_v->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK;\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_ihl, ihl_m);\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_ihl, ihl_m & ihl_v);\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,\n-\t\t ipv4_m->hdr.type_of_service);\n+\tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_ihl,\n+\t\t ipv4_v->hdr.ihl & ipv4_m->hdr.ihl);\n+\tif (key_type == MLX5_SET_MATCHER_SW_M)\n+\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,\n+\t\t\t ipv4_v->hdr.type_of_service);\n \tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,\n-\t\t ipv4_m->hdr.type_of_service >> 2);\n \tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,\n-\t\t ipv4_m->hdr.next_proto_id);\n \tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,\n \t\t ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,\n-\t\t ipv4_m->hdr.time_to_live);\n \tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,\n \t\t ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,\n-\t\t !!(ipv4_m->hdr.fragment_offset));\n \tMLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,\n \t\t !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));\n }\n \n /**\n- * Add IPV6 item to matcher and to the value.\n+ * Add IPV6 item to the value.\n  *\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n@@ -8636,14 +8624,15 @@ flow_dv_translate_item_ipv4(void *matcher, void *key,\n  *   Item is inner pattern.\n  * @param[in] group\n  *   The group to insert the rule.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  */\n static void\n-flow_dv_translate_item_ipv6(void *matcher, void *key,\n-\t\t\t    const struct rte_flow_item *item,\n-\t\t\t    int inner, uint32_t group)\n+flow_dv_translate_item_ipv6(void *key, const struct rte_flow_item *item,\n+\t\t\t    int inner, uint32_t group, uint32_t key_type)\n {\n-\tconst struct rte_flow_item_ipv6 *ipv6_m = item->mask;\n-\tconst struct rte_flow_item_ipv6 *ipv6_v = item->spec;\n+\tconst struct rte_flow_item_ipv6 *ipv6_m;\n+\tconst struct rte_flow_item_ipv6 *ipv6_v;\n \tconst struct rte_flow_item_ipv6 nic_mask = {\n \t\t.hdr = {\n \t\t\t.src_addr =\n@@ -8657,287 +8646,217 @@ flow_dv_translate_item_ipv6(void *matcher, void *key,\n \t\t\t.hop_limits = 0xff,\n \t\t},\n \t};\n-\tvoid *headers_m;\n \tvoid *headers_v;\n-\tvoid *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);\n \tvoid *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);\n-\tchar *l24_m;\n \tchar *l24_v;\n-\tuint32_t vtc_m;\n \tuint32_t vtc_v;\n \tint i;\n \tint size;\n \n-\tif (inner) {\n-\t\theaders_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t\t inner_headers);\n-\t\theaders_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);\n-\t} else {\n-\t\theaders_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t\t outer_headers);\n-\t\theaders_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);\n-\t}\n-\tflow_dv_set_match_ip_version(group, headers_v, headers_m, 6);\n-\tif (!ipv6_v)\n+\theaders_v = inner ? MLX5_ADDR_OF(fte_match_param, key, inner_headers) :\n+\t\t\t    MLX5_ADDR_OF(fte_match_param, key, outer_headers);\n+\tflow_dv_set_match_ip_version(group, headers_v, key_type, 6);\n+\tif (MLX5_ITEM_VALID(item, key_type))\n \t\treturn;\n-\tif (!ipv6_m)\n-\t\tipv6_m = &nic_mask;\n+\tMLX5_ITEM_UPDATE(item, key_type, ipv6_v, ipv6_m, &nic_mask);\n \tsize = sizeof(ipv6_m->hdr.dst_addr);\n-\tl24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,\n-\t\t\t     dst_ipv4_dst_ipv6.ipv6_layout.ipv6);\n \tl24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,\n \t\t\t     dst_ipv4_dst_ipv6.ipv6_layout.ipv6);\n-\tmemcpy(l24_m, ipv6_m->hdr.dst_addr, size);\n \tfor (i = 0; i < size; ++i)\n-\t\tl24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];\n-\tl24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,\n-\t\t\t     src_ipv4_src_ipv6.ipv6_layout.ipv6);\n+\t\tl24_v[i] = ipv6_m->hdr.dst_addr[i] & ipv6_v->hdr.dst_addr[i];\n \tl24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,\n \t\t\t     src_ipv4_src_ipv6.ipv6_layout.ipv6);\n-\tmemcpy(l24_m, ipv6_m->hdr.src_addr, size);\n \tfor (i = 0; i < size; ++i)\n-\t\tl24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];\n+\t\tl24_v[i] = ipv6_m->hdr.src_addr[i] & ipv6_v->hdr.src_addr[i];\n \t/* TOS. */\n-\tvtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);\n \tvtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);\n \tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);\n \tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);\n \t/* Label. */\n-\tif (inner) {\n-\t\tMLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,\n-\t\t\t vtc_m);\n+\tif (inner)\n \t\tMLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,\n \t\t\t vtc_v);\n-\t} else {\n-\t\tMLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,\n-\t\t\t vtc_m);\n+\telse\n \t\tMLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,\n \t\t\t vtc_v);\n-\t}\n \t/* Protocol. */\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,\n-\t\t ipv6_m->hdr.proto);\n \tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,\n \t\t ipv6_v->hdr.proto & ipv6_m->hdr.proto);\n \t/* Hop limit. */\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,\n-\t\t ipv6_m->hdr.hop_limits);\n \tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,\n \t\t ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,\n-\t\t !!(ipv6_m->has_frag_ext));\n \tMLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,\n \t\t !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));\n }\n \n /**\n- * Add IPV6 fragment extension item to matcher and to the value.\n+ * Add IPV6 fragment extension item to the value.\n  *\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n  *   Flow pattern to translate.\n  * @param[in] inner\n  *   Item is inner pattern.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  */\n static void\n-flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,\n+flow_dv_translate_item_ipv6_frag_ext(void *key,\n \t\t\t\t     const struct rte_flow_item *item,\n-\t\t\t\t     int inner)\n+\t\t\t\t     int inner, uint32_t key_type)\n {\n-\tconst struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;\n-\tconst struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;\n+\tconst struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m;\n+\tconst struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v;\n \tconst struct rte_flow_item_ipv6_frag_ext nic_mask = {\n \t\t.hdr = {\n \t\t\t.next_header = 0xff,\n \t\t\t.frag_data = RTE_BE16(0xffff),\n \t\t},\n \t};\n-\tvoid *headers_m;\n \tvoid *headers_v;\n \n-\tif (inner) {\n-\t\theaders_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t\t inner_headers);\n-\t\theaders_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);\n-\t} else {\n-\t\theaders_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t\t outer_headers);\n-\t\theaders_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);\n-\t}\n+\theaders_v = inner ? MLX5_ADDR_OF(fte_match_param, key, inner_headers) :\n+\t\t\t    MLX5_ADDR_OF(fte_match_param, key, outer_headers);\n \t/* IPv6 fragment extension item exists, so packet is IP fragment. */\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);\n \tMLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);\n-\tif (!ipv6_frag_ext_v)\n+\tif (MLX5_ITEM_VALID(item, key_type))\n \t\treturn;\n-\tif (!ipv6_frag_ext_m)\n-\t\tipv6_frag_ext_m = &nic_mask;\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,\n-\t\t ipv6_frag_ext_m->hdr.next_header);\n+\tMLX5_ITEM_UPDATE(item, key_type, ipv6_frag_ext_v,\n+\t\t\t ipv6_frag_ext_m, &nic_mask);\n \tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,\n \t\t ipv6_frag_ext_v->hdr.next_header &\n \t\t ipv6_frag_ext_m->hdr.next_header);\n }\n \n /**\n- * Add TCP item to matcher and to the value.\n+ * Add TCP item to the value.\n  *\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n  *   Flow pattern to translate.\n  * @param[in] inner\n  *   Item is inner pattern.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  */\n static void\n-flow_dv_translate_item_tcp(void *matcher, void *key,\n-\t\t\t   const struct rte_flow_item *item,\n-\t\t\t   int inner)\n+flow_dv_translate_item_tcp(void *key, const struct rte_flow_item *item,\n+\t\t\t   int inner, uint32_t key_type)\n {\n-\tconst struct rte_flow_item_tcp *tcp_m = item->mask;\n-\tconst struct rte_flow_item_tcp *tcp_v = item->spec;\n-\tvoid *headers_m;\n+\tconst struct rte_flow_item_tcp *tcp_m;\n+\tconst struct rte_flow_item_tcp *tcp_v;\n \tvoid *headers_v;\n \n-\tif (inner) {\n-\t\theaders_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t\t inner_headers);\n-\t\theaders_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);\n-\t} else {\n-\t\theaders_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t\t outer_headers);\n-\t\theaders_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);\n-\t}\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);\n-\tif (!tcp_v)\n+\theaders_v = inner ? MLX5_ADDR_OF(fte_match_param, key, inner_headers) :\n+\t\tMLX5_ADDR_OF(fte_match_param, key, outer_headers);\n+\tif (key_type & MLX5_SET_MATCHER_M)\n+\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v,\n+\t\t\t ip_protocol, 0xff);\n+\telse\n+\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v,\n+\t\t\t ip_protocol, IPPROTO_TCP);\n+\tif (MLX5_ITEM_VALID(item, key_type))\n \t\treturn;\n-\tif (!tcp_m)\n-\t\ttcp_m = &rte_flow_item_tcp_mask;\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,\n-\t\t rte_be_to_cpu_16(tcp_m->hdr.src_port));\n+\tMLX5_ITEM_UPDATE(item, key_type, tcp_v, tcp_m,\n+\t\t\t &rte_flow_item_tcp_mask);\n \tMLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,\n \t\t rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,\n-\t\t rte_be_to_cpu_16(tcp_m->hdr.dst_port));\n \tMLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,\n \t\t rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,\n-\t\t tcp_m->hdr.tcp_flags);\n \tMLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,\n-\t\t (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));\n+\t\t tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags);\n }\n \n /**\n- * Add ESP item to matcher and to the value.\n+ * Add ESP item to the value.\n  *\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n  *   Flow pattern to translate.\n  * @param[in] inner\n  *   Item is inner pattern.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  */\n static void\n-flow_dv_translate_item_esp(void *matcher, void *key,\n-\t\t\t   const struct rte_flow_item *item,\n-\t\t\t   int inner)\n+flow_dv_translate_item_esp(void *key, const struct rte_flow_item *item,\n+\t\t\t   int inner, uint32_t key_type)\n {\n-\tconst struct rte_flow_item_esp *esp_m = item->mask;\n-\tconst struct rte_flow_item_esp *esp_v = item->spec;\n-\tvoid *headers_m;\n+\tconst struct rte_flow_item_esp *esp_m;\n+\tconst struct rte_flow_item_esp *esp_v;\n \tvoid *headers_v;\n-\tchar *spi_m;\n \tchar *spi_v;\n \n-\tif (inner) {\n-\t\theaders_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t\t inner_headers);\n-\t\theaders_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);\n-\t} else {\n-\t\theaders_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t\t outer_headers);\n-\t\theaders_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);\n-\t}\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ESP);\n-\tif (!esp_v)\n+\theaders_v = inner ? MLX5_ADDR_OF(fte_match_param, key, inner_headers) :\n+\t\tMLX5_ADDR_OF(fte_match_param, key, outer_headers);\n+\tif (key_type & MLX5_SET_MATCHER_M)\n+\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v,\n+\t\t\t ip_protocol, 0xff);\n+\telse\n+\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v,\n+\t\t\t ip_protocol, IPPROTO_ESP);\n+\tif (MLX5_ITEM_VALID(item, key_type))\n \t\treturn;\n-\tif (!esp_m)\n-\t\tesp_m = &rte_flow_item_esp_mask;\n-\theaders_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);\n+\tMLX5_ITEM_UPDATE(item, key_type, esp_v, esp_m,\n+\t\t\t &rte_flow_item_esp_mask);\n \theaders_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);\n-\tif (inner) {\n-\t\tspi_m = MLX5_ADDR_OF(fte_match_set_misc, headers_m, inner_esp_spi);\n-\t\tspi_v = MLX5_ADDR_OF(fte_match_set_misc, headers_v, inner_esp_spi);\n-\t} else {\n-\t\tspi_m = MLX5_ADDR_OF(fte_match_set_misc, headers_m, outer_esp_spi);\n-\t\tspi_v = MLX5_ADDR_OF(fte_match_set_misc, headers_v, outer_esp_spi);\n-\t}\n-\t*(uint32_t *)spi_m = esp_m->hdr.spi;\n+\tspi_v = inner ? MLX5_ADDR_OF(fte_match_set_misc, headers_v,\n+\t\t\t\tinner_esp_spi) : MLX5_ADDR_OF(fte_match_set_misc\n+\t\t\t\t, headers_v, outer_esp_spi);\n \t*(uint32_t *)spi_v = esp_m->hdr.spi & esp_v->hdr.spi;\n }\n \n /**\n- * Add UDP item to matcher and to the value.\n+ * Add UDP item to the value.\n  *\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n  *   Flow pattern to translate.\n  * @param[in] inner\n  *   Item is inner pattern.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  */\n static void\n-flow_dv_translate_item_udp(void *matcher, void *key,\n-\t\t\t   const struct rte_flow_item *item,\n-\t\t\t   int inner)\n+flow_dv_translate_item_udp(void *key, const struct rte_flow_item *item,\n+\t\t\t   int inner, struct mlx5_dv_matcher_workspace *wks,\n+\t\t\t   uint32_t key_type)\n {\n-\tconst struct rte_flow_item_udp *udp_m = item->mask;\n-\tconst struct rte_flow_item_udp *udp_v = item->spec;\n-\tvoid *headers_m;\n+\tconst struct rte_flow_item_udp *udp_m;\n+\tconst struct rte_flow_item_udp *udp_v;\n \tvoid *headers_v;\n \n-\tif (inner) {\n-\t\theaders_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t\t inner_headers);\n-\t\theaders_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);\n-\t} else {\n-\t\theaders_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t\t outer_headers);\n-\t\theaders_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);\n-\t}\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);\n-\tif (!udp_v)\n+\theaders_v = inner ? MLX5_ADDR_OF(fte_match_param, key, inner_headers) :\n+\t\tMLX5_ADDR_OF(fte_match_param, key, outer_headers);\n+\tif (key_type & MLX5_SET_MATCHER_M)\n+\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v,\n+\t\t\t ip_protocol, 0xff);\n+\telse\n+\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v,\n+\t\t\t ip_protocol, IPPROTO_UDP);\n+\tif (MLX5_ITEM_VALID(item, key_type))\n \t\treturn;\n-\tif (!udp_m)\n-\t\tudp_m = &rte_flow_item_udp_mask;\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,\n-\t\t rte_be_to_cpu_16(udp_m->hdr.src_port));\n+\tMLX5_ITEM_UPDATE(item, key_type, udp_v, udp_m,\n+\t\t\t &rte_flow_item_udp_mask);\n \tMLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,\n \t\t rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,\n-\t\t rte_be_to_cpu_16(udp_m->hdr.dst_port));\n \tMLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,\n \t\t rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));\n+\t/* Force get UDP dport in case to be used in VXLAN translate. */\n+\tif (key_type & MLX5_SET_MATCHER_SW) {\n+\t\tudp_v = item->spec;\n+\t\twks->udp_dport = rte_be_to_cpu_16(udp_v->hdr.dst_port &\n+\t\t\t\t\t\t  udp_m->hdr.dst_port);\n+\t}\n }\n \n /**\n- * Add GRE optional Key item to matcher and to the value.\n+ * Add GRE optional Key item to the value.\n  *\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n@@ -8946,55 +8865,46 @@ flow_dv_translate_item_udp(void *matcher, void *key,\n  *   Item is inner pattern.\n  */\n static void\n-flow_dv_translate_item_gre_key(void *matcher, void *key,\n-\t\t\t\t   const struct rte_flow_item *item)\n+flow_dv_translate_item_gre_key(void *key, const struct rte_flow_item *item,\n+\t\t\t       uint32_t key_type)\n {\n-\tconst rte_be32_t *key_m = item->mask;\n-\tconst rte_be32_t *key_v = item->spec;\n-\tvoid *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);\n+\tconst rte_be32_t *key_m;\n+\tconst rte_be32_t *key_v;\n \tvoid *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);\n \trte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);\n \n \t/* GRE K bit must be on and should already be validated */\n-\tMLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);\n \tMLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);\n-\tif (!key_v)\n+\tif (MLX5_ITEM_VALID(item, key_type))\n \t\treturn;\n-\tif (!key_m)\n-\t\tkey_m = &gre_key_default_mask;\n-\tMLX5_SET(fte_match_set_misc, misc_m, gre_key_h,\n-\t\t rte_be_to_cpu_32(*key_m) >> 8);\n+\tMLX5_ITEM_UPDATE(item, key_type, key_v, key_m,\n+\t\t\t &gre_key_default_mask);\n \tMLX5_SET(fte_match_set_misc, misc_v, gre_key_h,\n \t\t rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);\n-\tMLX5_SET(fte_match_set_misc, misc_m, gre_key_l,\n-\t\t rte_be_to_cpu_32(*key_m) & 0xFF);\n \tMLX5_SET(fte_match_set_misc, misc_v, gre_key_l,\n \t\t rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);\n }\n \n /**\n- * Add GRE item to matcher and to the value.\n+ * Add GRE item to the value.\n  *\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n  *   Flow pattern to translate.\n  * @param[in] pattern_flags\n  *   Accumulated pattern flags.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  */\n static void\n-flow_dv_translate_item_gre(void *matcher, void *key,\n-\t\t\t   const struct rte_flow_item *item,\n-\t\t\t   uint64_t pattern_flags)\n+flow_dv_translate_item_gre(void *key, const struct rte_flow_item *item,\n+\t\t\t   uint64_t pattern_flags, uint32_t key_type)\n {\n \tstatic const struct rte_flow_item_gre empty_gre = {0,};\n \tconst struct rte_flow_item_gre *gre_m = item->mask;\n \tconst struct rte_flow_item_gre *gre_v = item->spec;\n-\tvoid *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);\n \tvoid *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);\n-\tvoid *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);\n \tvoid *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);\n \tstruct {\n \t\tunion {\n@@ -9012,8 +8922,11 @@ flow_dv_translate_item_gre(void *matcher, void *key,\n \t} gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;\n \tuint16_t protocol_m, protocol_v;\n \n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);\n+\tif (key_type & MLX5_SET_MATCHER_M)\n+\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 0xff);\n+\telse\n+\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,\n+\t\t\t IPPROTO_GRE);\n \tif (!gre_v) {\n \t\tgre_v = &empty_gre;\n \t\tgre_m = &empty_gre;\n@@ -9021,20 +8934,18 @@ flow_dv_translate_item_gre(void *matcher, void *key,\n \t\tif (!gre_m)\n \t\t\tgre_m = &rte_flow_item_gre_mask;\n \t}\n+\tif (key_type & MLX5_SET_MATCHER_M)\n+\t\tgre_v = gre_m;\n+\telse if (key_type == MLX5_SET_MATCHER_HS_V)\n+\t\tgre_m = gre_v;\n \tgre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);\n \tgre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);\n-\tMLX5_SET(fte_match_set_misc, misc_m, gre_c_present,\n-\t\t gre_crks_rsvd0_ver_m.c_present);\n \tMLX5_SET(fte_match_set_misc, misc_v, gre_c_present,\n \t\t gre_crks_rsvd0_ver_v.c_present &\n \t\t gre_crks_rsvd0_ver_m.c_present);\n-\tMLX5_SET(fte_match_set_misc, misc_m, gre_k_present,\n-\t\t gre_crks_rsvd0_ver_m.k_present);\n \tMLX5_SET(fte_match_set_misc, misc_v, gre_k_present,\n \t\t gre_crks_rsvd0_ver_v.k_present &\n \t\t gre_crks_rsvd0_ver_m.k_present);\n-\tMLX5_SET(fte_match_set_misc, misc_m, gre_s_present,\n-\t\t gre_crks_rsvd0_ver_m.s_present);\n \tMLX5_SET(fte_match_set_misc, misc_v, gre_s_present,\n \t\t gre_crks_rsvd0_ver_v.s_present &\n \t\t gre_crks_rsvd0_ver_m.s_present);\n@@ -9045,17 +8956,17 @@ flow_dv_translate_item_gre(void *matcher, void *key,\n \t\tprotocol_v = mlx5_translate_tunnel_etypes(pattern_flags);\n \t\tif (protocol_v)\n \t\t\tprotocol_m = 0xFFFF;\n+\t\t/* Restore the value to mask in mask case. */\n+\t\tif (key_type & MLX5_SET_MATCHER_M)\n+\t\t\tprotocol_v = protocol_m;\n \t}\n-\tMLX5_SET(fte_match_set_misc, misc_m, gre_protocol, protocol_m);\n \tMLX5_SET(fte_match_set_misc, misc_v, gre_protocol,\n \t\t protocol_m & protocol_v);\n }\n \n /**\n- * Add GRE optional items to matcher and to the value.\n+ * Add GRE optional items to the value.\n  *\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n@@ -9064,13 +8975,16 @@ flow_dv_translate_item_gre(void *matcher, void *key,\n  *   Pointer to gre_item.\n  * @param[in] pattern_flags\n  *   Accumulated pattern flags.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  */\n static void\n-flow_dv_translate_item_gre_option(void *matcher, void *key,\n+flow_dv_translate_item_gre_option(void *key,\n \t\t\t\t  const struct rte_flow_item *item,\n \t\t\t\t  const struct rte_flow_item *gre_item,\n-\t\t\t\t  uint64_t pattern_flags)\n+\t\t\t\t  uint64_t pattern_flags, uint32_t key_type)\n {\n+\tvoid *misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);\n \tconst struct rte_flow_item_gre_opt *option_m = item->mask;\n \tconst struct rte_flow_item_gre_opt *option_v = item->spec;\n \tconst struct rte_flow_item_gre *gre_m = gre_item->mask;\n@@ -9079,8 +8993,6 @@ flow_dv_translate_item_gre_option(void *matcher, void *key,\n \tstruct rte_flow_item gre_key_item;\n \tuint16_t c_rsvd0_ver_m, c_rsvd0_ver_v;\n \tuint16_t protocol_m, protocol_v;\n-\tvoid *misc5_m;\n-\tvoid *misc5_v;\n \n \t/*\n \t * If only match key field, keep using misc for matching.\n@@ -9089,11 +9001,10 @@ flow_dv_translate_item_gre_option(void *matcher, void *key,\n \t */\n \tif (!(option_m->sequence.sequence ||\n \t      option_m->checksum_rsvd.checksum)) {\n-\t\tflow_dv_translate_item_gre(matcher, key, gre_item,\n-\t\t\t\t\t   pattern_flags);\n+\t\tflow_dv_translate_item_gre(key, gre_item, pattern_flags, key_type);\n \t\tgre_key_item.spec = &option_v->key.key;\n \t\tgre_key_item.mask = &option_m->key.key;\n-\t\tflow_dv_translate_item_gre_key(matcher, key, &gre_key_item);\n+\t\tflow_dv_translate_item_gre_key(key, &gre_key_item, key_type);\n \t\treturn;\n \t}\n \tif (!gre_v) {\n@@ -9128,57 +9039,49 @@ flow_dv_translate_item_gre_option(void *matcher, void *key,\n \t\tc_rsvd0_ver_v |= RTE_BE16(0x8000);\n \t\tc_rsvd0_ver_m |= RTE_BE16(0x8000);\n \t}\n+\tif (key_type & MLX5_SET_MATCHER_M) {\n+\t\tc_rsvd0_ver_v = c_rsvd0_ver_m;\n+\t\tprotocol_v = protocol_m;\n+\t\toption_v = option_m;\n+\t}\n \t/*\n \t * Hardware parses GRE optional field into the fixed location,\n \t * do not need to adjust the tunnel dword indices.\n \t */\n-\tmisc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);\n-\tmisc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);\n \tMLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_0,\n \t\t rte_be_to_cpu_32((c_rsvd0_ver_v | protocol_v << 16) &\n \t\t\t\t  (c_rsvd0_ver_m | protocol_m << 16)));\n-\tMLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_0,\n-\t\t rte_be_to_cpu_32(c_rsvd0_ver_m | protocol_m << 16));\n \tMLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_1,\n \t\t rte_be_to_cpu_32(option_v->checksum_rsvd.checksum &\n \t\t\t\t  option_m->checksum_rsvd.checksum));\n-\tMLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_1,\n-\t\t rte_be_to_cpu_32(option_m->checksum_rsvd.checksum));\n \tMLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_2,\n \t\t rte_be_to_cpu_32(option_v->key.key & option_m->key.key));\n-\tMLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_2,\n-\t\t rte_be_to_cpu_32(option_m->key.key));\n \tMLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_3,\n \t\t rte_be_to_cpu_32(option_v->sequence.sequence &\n \t\t\t\t  option_m->sequence.sequence));\n-\tMLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_3,\n-\t\t rte_be_to_cpu_32(option_m->sequence.sequence));\n }\n \n /**\n  * Add NVGRE item to matcher and to the value.\n  *\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n  *   Flow pattern to translate.\n  * @param[in] pattern_flags\n  *   Accumulated pattern flags.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  */\n static void\n-flow_dv_translate_item_nvgre(void *matcher, void *key,\n-\t\t\t     const struct rte_flow_item *item,\n-\t\t\t     unsigned long pattern_flags)\n+flow_dv_translate_item_nvgre(void *key, const struct rte_flow_item *item,\n+\t\t\t     unsigned long pattern_flags, uint32_t key_type)\n {\n-\tconst struct rte_flow_item_nvgre *nvgre_m = item->mask;\n-\tconst struct rte_flow_item_nvgre *nvgre_v = item->spec;\n-\tvoid *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);\n+\tconst struct rte_flow_item_nvgre *nvgre_m;\n+\tconst struct rte_flow_item_nvgre *nvgre_v;\n \tvoid *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);\n \tconst char *tni_flow_id_m;\n \tconst char *tni_flow_id_v;\n-\tchar *gre_key_m;\n \tchar *gre_key_v;\n \tint size;\n \tint i;\n@@ -9197,158 +9100,145 @@ flow_dv_translate_item_nvgre(void *matcher, void *key,\n \t\t.mask = &gre_mask,\n \t\t.last = NULL,\n \t};\n-\tflow_dv_translate_item_gre(matcher, key, &gre_item, pattern_flags);\n-\tif (!nvgre_v)\n+\tflow_dv_translate_item_gre(key, &gre_item, pattern_flags, key_type);\n+\tif (MLX5_ITEM_VALID(item, key_type))\n \t\treturn;\n-\tif (!nvgre_m)\n-\t\tnvgre_m = &rte_flow_item_nvgre_mask;\n+\tMLX5_ITEM_UPDATE(item, key_type, nvgre_v, nvgre_m,\n+\t\t    &rte_flow_item_nvgre_mask);\n \ttni_flow_id_m = (const char *)nvgre_m->tni;\n \ttni_flow_id_v = (const char *)nvgre_v->tni;\n \tsize = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);\n-\tgre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);\n \tgre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);\n-\tmemcpy(gre_key_m, tni_flow_id_m, size);\n \tfor (i = 0; i < size; ++i)\n-\t\tgre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];\n+\t\tgre_key_v[i] = tni_flow_id_m[i] & tni_flow_id_v[i];\n }\n \n /**\n- * Add VXLAN item to matcher and to the value.\n+ * Add VXLAN item to the value.\n  *\n  * @param[in] dev\n  *   Pointer to the Ethernet device structure.\n  * @param[in] attr\n  *   Flow rule attributes.\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n  *   Flow pattern to translate.\n  * @param[in] inner\n  *   Item is inner pattern.\n+ * @param[in] wks\n+ *   Matcher workspace.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  */\n static void\n flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,\n \t\t\t     const struct rte_flow_attr *attr,\n-\t\t\t     void *matcher, void *key,\n-\t\t\t     const struct rte_flow_item *item,\n-\t\t\t     int inner)\n+\t\t\t     void *key, const struct rte_flow_item *item,\n+\t\t\t     int inner, struct mlx5_dv_matcher_workspace *wks,\n+\t\t\t     uint32_t key_type)\n {\n-\tconst struct rte_flow_item_vxlan *vxlan_m = item->mask;\n-\tconst struct rte_flow_item_vxlan *vxlan_v = item->spec;\n-\tvoid *headers_m;\n+\tconst struct rte_flow_item_vxlan *vxlan_m;\n+\tconst struct rte_flow_item_vxlan *vxlan_v;\n+\tconst struct rte_flow_item_vxlan *vxlan_vv = item->spec;\n \tvoid *headers_v;\n-\tvoid *misc5_m;\n+\tvoid *misc_v;\n \tvoid *misc5_v;\n+\tuint32_t tunnel_v;\n \tuint32_t *tunnel_header_v;\n-\tuint32_t *tunnel_header_m;\n+\tchar *vni_v;\n \tuint16_t dport;\n+\tint size;\n+\tint i;\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tconst struct rte_flow_item_vxlan nic_mask = {\n \t\t.vni = \"\\xff\\xff\\xff\",\n \t\t.rsvd1 = 0xff,\n \t};\n \n-\tif (inner) {\n-\t\theaders_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t\t inner_headers);\n-\t\theaders_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);\n-\t} else {\n-\t\theaders_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t\t outer_headers);\n-\t\theaders_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);\n-\t}\n+\tmisc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);\n+\theaders_v = inner ? MLX5_ADDR_OF(fte_match_param, key, inner_headers) :\n+\t\tMLX5_ADDR_OF(fte_match_param, key, outer_headers);\n \tdport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?\n \t\tMLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;\n \tif (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {\n-\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);\n-\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);\n-\t}\n-\tdport = MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport);\n-\tif (!vxlan_v)\n-\t\treturn;\n-\tif (!vxlan_m) {\n-\t\tif ((!attr->group && !priv->sh->tunnel_header_0_1) ||\n-\t\t    (attr->group && !priv->sh->misc5_cap))\n-\t\t\tvxlan_m = &rte_flow_item_vxlan_mask;\n+\t\tif (key_type & MLX5_SET_MATCHER_M)\n+\t\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v,\n+\t\t\t\t udp_dport, 0xFFFF);\n \t\telse\n-\t\t\tvxlan_m = &nic_mask;\n+\t\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v,\n+\t\t\t\t udp_dport, dport);\n \t}\n+\t/*\n+\t * Read the UDP dport to check if the value satisfies the VXLAN\n+\t * matching with MISC5 for CX5.\n+\t */\n+\tif (wks->udp_dport)\n+\t\tdport = wks->udp_dport;\n+\tif (MLX5_ITEM_VALID(item, key_type))\n+\t\treturn;\n+\tMLX5_ITEM_UPDATE(item, key_type, vxlan_v, vxlan_m, &nic_mask);\n+\tif (item->mask == &nic_mask &&\n+\t    ((!attr->group && !priv->sh->tunnel_header_0_1) ||\n+\t    (attr->group && !priv->sh->misc5_cap)))\n+\t\tvxlan_m = &rte_flow_item_vxlan_mask;\n \tif ((priv->sh->steering_format_version ==\n-\t    MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 &&\n-\t    dport != MLX5_UDP_PORT_VXLAN) ||\n-\t    (!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||\n+\t     MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 &&\n+\t     dport != MLX5_UDP_PORT_VXLAN) ||\n+\t    (!attr->group && !attr->transfer) ||\n \t    ((attr->group || attr->transfer) && !priv->sh->misc5_cap)) {\n-\t\tvoid *misc_m;\n-\t\tvoid *misc_v;\n-\t\tchar *vni_m;\n-\t\tchar *vni_v;\n-\t\tint size;\n-\t\tint i;\n-\t\tmisc_m = MLX5_ADDR_OF(fte_match_param,\n-\t\t\t\t      matcher, misc_parameters);\n \t\tmisc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);\n \t\tsize = sizeof(vxlan_m->vni);\n-\t\tvni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);\n \t\tvni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);\n-\t\tmemcpy(vni_m, vxlan_m->vni, size);\n \t\tfor (i = 0; i < size; ++i)\n-\t\t\tvni_v[i] = vni_m[i] & vxlan_v->vni[i];\n+\t\t\tvni_v[i] = vxlan_m->vni[i] & vxlan_v->vni[i];\n \t\treturn;\n \t}\n-\tmisc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);\n-\tmisc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);\n \ttunnel_header_v = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,\n \t\t\t\t\t\t   misc5_v,\n \t\t\t\t\t\t   tunnel_header_1);\n-\ttunnel_header_m = (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc5,\n-\t\t\t\t\t\t   misc5_m,\n-\t\t\t\t\t\t   tunnel_header_1);\n-\t*tunnel_header_v = (vxlan_v->vni[0] & vxlan_m->vni[0]) |\n-\t\t\t   (vxlan_v->vni[1] & vxlan_m->vni[1]) << 8 |\n-\t\t\t   (vxlan_v->vni[2] & vxlan_m->vni[2]) << 16;\n-\tif (*tunnel_header_v)\n-\t\t*tunnel_header_m = vxlan_m->vni[0] |\n-\t\t\tvxlan_m->vni[1] << 8 |\n-\t\t\tvxlan_m->vni[2] << 16;\n-\telse\n-\t\t*tunnel_header_m = 0x0;\n-\t*tunnel_header_v |= (vxlan_v->rsvd1 & vxlan_m->rsvd1) << 24;\n-\tif (vxlan_v->rsvd1 & vxlan_m->rsvd1)\n-\t\t*tunnel_header_m |= vxlan_m->rsvd1 << 24;\n+\ttunnel_v = (vxlan_v->vni[0] & vxlan_m->vni[0]) |\n+\t\t   (vxlan_v->vni[1] & vxlan_m->vni[1]) << 8 |\n+\t\t   (vxlan_v->vni[2] & vxlan_m->vni[2]) << 16;\n+\t*tunnel_header_v = tunnel_v;\n+\tif (key_type == MLX5_SET_MATCHER_SW_M) {\n+\t\ttunnel_v = (vxlan_vv->vni[0] & vxlan_m->vni[0]) |\n+\t\t\t   (vxlan_vv->vni[1] & vxlan_m->vni[1]) << 8 |\n+\t\t\t   (vxlan_vv->vni[2] & vxlan_m->vni[2]) << 16;\n+\t\tif (!tunnel_v)\n+\t\t\t*tunnel_header_v = 0x0;\n+\t\tif (vxlan_vv->rsvd1 & vxlan_m->rsvd1)\n+\t\t\t*tunnel_header_v |= vxlan_v->rsvd1 << 24;\n+\t} else {\n+\t\t*tunnel_header_v |= (vxlan_v->rsvd1 & vxlan_m->rsvd1) << 24;\n+\t}\n }\n \n /**\n- * Add VXLAN-GPE item to matcher and to the value.\n+ * Add VXLAN-GPE item to the value.\n  *\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n  *   Flow pattern to translate.\n- * @param[in] inner\n- *   Item is inner pattern.\n+ * @param[in] pattern_flags\n+ *   Item pattern flags.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  */\n \n static void\n-flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,\n-\t\t\t\t const struct rte_flow_item *item,\n-\t\t\t\t const uint64_t pattern_flags)\n+flow_dv_translate_item_vxlan_gpe(void *key, const struct rte_flow_item *item,\n+\t\t\t\t const uint64_t pattern_flags,\n+\t\t\t\t uint32_t key_type)\n {\n \tstatic const struct rte_flow_item_vxlan_gpe dummy_vxlan_gpe_hdr = {0, };\n \tconst struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;\n \tconst struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;\n \t/* The item was validated to be on the outer side */\n-\tvoid *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);\n \tvoid *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);\n-\tvoid *misc_m =\n-\t\tMLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);\n \tvoid *misc_v =\n \t\tMLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);\n-\tchar *vni_m =\n-\t\tMLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);\n \tchar *vni_v =\n \t\tMLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);\n \tint i, size = sizeof(vxlan_m->vni);\n@@ -9357,9 +9247,12 @@ flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,\n \tuint8_t m_protocol, v_protocol;\n \n \tif (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {\n-\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);\n-\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,\n-\t\t\t MLX5_UDP_PORT_VXLAN_GPE);\n+\t\tif (key_type & MLX5_SET_MATCHER_M)\n+\t\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,\n+\t\t\t\t 0xFFFF);\n+\t\telse\n+\t\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,\n+\t\t\t\t MLX5_UDP_PORT_VXLAN_GPE);\n \t}\n \tif (!vxlan_v) {\n \t\tvxlan_v = &dummy_vxlan_gpe_hdr;\n@@ -9368,15 +9261,18 @@ flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,\n \t\tif (!vxlan_m)\n \t\t\tvxlan_m = &rte_flow_item_vxlan_gpe_mask;\n \t}\n-\tmemcpy(vni_m, vxlan_m->vni, size);\n+\tif (key_type & MLX5_SET_MATCHER_M)\n+\t\tvxlan_v = vxlan_m;\n+\telse if (key_type == MLX5_SET_MATCHER_HS_V)\n+\t\tvxlan_m = vxlan_v;\n \tfor (i = 0; i < size; ++i)\n-\t\tvni_v[i] = vni_m[i] & vxlan_v->vni[i];\n+\t\tvni_v[i] = vxlan_m->vni[i] & vxlan_v->vni[i];\n \tif (vxlan_m->flags) {\n \t\tflags_m = vxlan_m->flags;\n \t\tflags_v = vxlan_v->flags;\n \t}\n-\tMLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);\n-\tMLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);\n+\tMLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags,\n+\t\t flags_m & flags_v);\n \tm_protocol = vxlan_m->protocol;\n \tv_protocol = vxlan_v->protocol;\n \tif (!m_protocol) {\n@@ -9389,50 +9285,50 @@ flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,\n \t\t\tv_protocol = RTE_VXLAN_GPE_TYPE_IPV6;\n \t\tif (v_protocol)\n \t\t\tm_protocol = 0xFF;\n+\t\t/* Restore the value to mask in mask case. */\n+\t\tif (key_type & MLX5_SET_MATCHER_M)\n+\t\t\tv_protocol = m_protocol;\n \t}\n-\tMLX5_SET(fte_match_set_misc3, misc_m,\n-\t\t outer_vxlan_gpe_next_protocol, m_protocol);\n \tMLX5_SET(fte_match_set_misc3, misc_v,\n \t\t outer_vxlan_gpe_next_protocol, m_protocol & v_protocol);\n }\n \n /**\n- * Add Geneve item to matcher and to the value.\n+ * Add Geneve item to the value.\n  *\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n  *   Flow pattern to translate.\n- * @param[in] inner\n- *   Item is inner pattern.\n+ * @param[in] pattern_flags\n+ *   Item pattern flags.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  */\n \n static void\n-flow_dv_translate_item_geneve(void *matcher, void *key,\n-\t\t\t      const struct rte_flow_item *item,\n-\t\t\t      uint64_t pattern_flags)\n+flow_dv_translate_item_geneve(void *key, const struct rte_flow_item *item,\n+\t\t\t      uint64_t pattern_flags, uint32_t key_type)\n {\n \tstatic const struct rte_flow_item_geneve empty_geneve = {0,};\n \tconst struct rte_flow_item_geneve *geneve_m = item->mask;\n \tconst struct rte_flow_item_geneve *geneve_v = item->spec;\n \t/* GENEVE flow item validation allows single tunnel item */\n-\tvoid *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);\n \tvoid *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);\n-\tvoid *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);\n \tvoid *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);\n \tuint16_t gbhdr_m;\n \tuint16_t gbhdr_v;\n-\tchar *vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);\n \tchar *vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);\n \tsize_t size = sizeof(geneve_m->vni), i;\n \tuint16_t protocol_m, protocol_v;\n \n \tif (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {\n-\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);\n-\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,\n-\t\t\t MLX5_UDP_PORT_GENEVE);\n+\t\tif (key_type & MLX5_SET_MATCHER_M)\n+\t\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,\n+\t\t\t\t 0xFFFF);\n+\t\telse\n+\t\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,\n+\t\t\t\t MLX5_UDP_PORT_GENEVE);\n \t}\n \tif (!geneve_v) {\n \t\tgeneve_v = &empty_geneve;\n@@ -9441,17 +9337,16 @@ flow_dv_translate_item_geneve(void *matcher, void *key,\n \t\tif (!geneve_m)\n \t\t\tgeneve_m = &rte_flow_item_geneve_mask;\n \t}\n-\tmemcpy(vni_m, geneve_m->vni, size);\n+\tif (key_type & MLX5_SET_MATCHER_M)\n+\t\tgeneve_v = geneve_m;\n+\telse if (key_type == MLX5_SET_MATCHER_HS_V)\n+\t\tgeneve_m = geneve_v;\n \tfor (i = 0; i < size; ++i)\n-\t\tvni_v[i] = vni_m[i] & geneve_v->vni[i];\n+\t\tvni_v[i] = geneve_m->vni[i] & geneve_v->vni[i];\n \tgbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);\n \tgbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);\n-\tMLX5_SET(fte_match_set_misc, misc_m, geneve_oam,\n-\t\t MLX5_GENEVE_OAMF_VAL(gbhdr_m));\n \tMLX5_SET(fte_match_set_misc, misc_v, geneve_oam,\n \t\t MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));\n-\tMLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,\n-\t\t MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));\n \tMLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,\n \t\t MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &\n \t\t MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));\n@@ -9462,8 +9357,10 @@ flow_dv_translate_item_geneve(void *matcher, void *key,\n \t\tprotocol_v = mlx5_translate_tunnel_etypes(pattern_flags);\n \t\tif (protocol_v)\n \t\t\tprotocol_m = 0xFFFF;\n+\t\t/* Restore the value to mask in mask case. */\n+\t\tif (key_type & MLX5_SET_MATCHER_M)\n+\t\t\tprotocol_v = protocol_m;\n \t}\n-\tMLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, protocol_m);\n \tMLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,\n \t\t protocol_m & protocol_v);\n }\n@@ -9473,10 +9370,8 @@ flow_dv_translate_item_geneve(void *matcher, void *key,\n  *\n  * @param dev[in, out]\n  *   Pointer to rte_eth_dev structure.\n- * @param[in, out] tag_be24\n- *   Tag value in big endian then R-shift 8.\n- * @parm[in, out] dev_flow\n- *   Pointer to the dev_flow.\n+ * @param[in] item\n+ *   Flow pattern to translate.\n  * @param[out] error\n  *   pointer to error structure.\n  *\n@@ -9553,38 +9448,38 @@ flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,\n }\n \n /**\n- * Add Geneve TLV option item to matcher.\n+ * Add Geneve TLV option item to value.\n  *\n  * @param[in, out] dev\n  *   Pointer to rte_eth_dev structure.\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n  *   Flow pattern to translate.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  * @param[out] error\n  *   Pointer to error structure.\n  */\n static int\n-flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,\n-\t\t\t\t  void *key, const struct rte_flow_item *item,\n+flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *key,\n+\t\t\t\t  const struct rte_flow_item *item,\n+\t\t\t\t  uint32_t key_type,\n \t\t\t\t  struct rte_flow_error *error)\n {\n-\tconst struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;\n-\tconst struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;\n-\tvoid *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);\n+\tconst struct rte_flow_item_geneve_opt *geneve_opt_m;\n+\tconst struct rte_flow_item_geneve_opt *geneve_opt_v;\n+\tconst struct rte_flow_item_geneve_opt *geneve_opt_vv = item->spec;\n \tvoid *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);\n-\tvoid *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\tmisc_parameters_3);\n \tvoid *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);\n \trte_be32_t opt_data_key = 0, opt_data_mask = 0;\n+\tuint32_t *data;\n \tint ret = 0;\n \n-\tif (!geneve_opt_v)\n+\tif (MLX5_ITEM_VALID(item, key_type))\n \t\treturn -1;\n-\tif (!geneve_opt_m)\n-\t\tgeneve_opt_m = &rte_flow_item_geneve_opt_mask;\n+\tMLX5_ITEM_UPDATE(item, key_type, geneve_opt_v, geneve_opt_m,\n+\t\t\t &rte_flow_item_geneve_opt_mask);\n \tret = flow_dev_geneve_tlv_option_resource_register(dev, item,\n \t\t\t\t\t\t\t   error);\n \tif (ret) {\n@@ -9598,17 +9493,21 @@ flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,\n \t * If the option length was not requested but the GENEVE TLV option item\n \t * is present we set the option length field implicitly.\n \t */\n-\tif (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {\n-\t\tMLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,\n-\t\t\t MLX5_GENEVE_OPTLEN_MASK);\n-\t\tMLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,\n-\t\t\t geneve_opt_v->option_len + 1);\n-\t}\n-\tMLX5_SET(fte_match_set_misc, misc_m, geneve_tlv_option_0_exist, 1);\n-\tMLX5_SET(fte_match_set_misc, misc_v, geneve_tlv_option_0_exist, 1);\n+\tif (!MLX5_GET16(fte_match_set_misc, misc_v, geneve_opt_len)) {\n+\t\tif (key_type & MLX5_SET_MATCHER_M)\n+\t\t\tMLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,\n+\t\t\t\t MLX5_GENEVE_OPTLEN_MASK);\n+\t\telse\n+\t\t\tMLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,\n+\t\t\t\t geneve_opt_v->option_len + 1);\n+\t}\n \t/* Set the data. */\n-\tif (geneve_opt_v->data) {\n-\t\tmemcpy(&opt_data_key, geneve_opt_v->data,\n+\tif (key_type == MLX5_SET_MATCHER_SW_V)\n+\t\tdata = geneve_opt_vv->data;\n+\telse\n+\t\tdata = geneve_opt_v->data;\n+\tif (data) {\n+\t\tmemcpy(&opt_data_key, data,\n \t\t\tRTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),\n \t\t\t\tsizeof(opt_data_key)));\n \t\tMLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=\n@@ -9618,9 +9517,6 @@ flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,\n \t\t\t\tsizeof(opt_data_mask)));\n \t\tMLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=\n \t\t\t\tsizeof(opt_data_mask));\n-\t\tMLX5_SET(fte_match_set_misc3, misc3_m,\n-\t\t\t\tgeneve_tlv_option_0_data,\n-\t\t\t\trte_be_to_cpu_32(opt_data_mask));\n \t\tMLX5_SET(fte_match_set_misc3, misc3_v,\n \t\t\t\tgeneve_tlv_option_0_data,\n \t\t\trte_be_to_cpu_32(opt_data_key & opt_data_mask));\n@@ -9629,10 +9525,8 @@ flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,\n }\n \n /**\n- * Add MPLS item to matcher and to the value.\n+ * Add MPLS item to the value.\n  *\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n@@ -9641,93 +9535,78 @@ flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,\n  *   The protocol layer indicated in previous item.\n  * @param[in] inner\n  *   Item is inner pattern.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  */\n static void\n-flow_dv_translate_item_mpls(void *matcher, void *key,\n-\t\t\t    const struct rte_flow_item *item,\n-\t\t\t    uint64_t prev_layer,\n-\t\t\t    int inner)\n+flow_dv_translate_item_mpls(void *key, const struct rte_flow_item *item,\n+\t\t\t    uint64_t prev_layer, int inner,\n+\t\t\t    uint32_t key_type)\n {\n-\tconst uint32_t *in_mpls_m = item->mask;\n-\tconst uint32_t *in_mpls_v = item->spec;\n-\tuint32_t *out_mpls_m = 0;\n+\tconst uint32_t *in_mpls_m;\n+\tconst uint32_t *in_mpls_v;\n \tuint32_t *out_mpls_v = 0;\n-\tvoid *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);\n \tvoid *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);\n-\tvoid *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t     misc_parameters_2);\n \tvoid *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);\n-\tvoid *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);\n \tvoid *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);\n \n \tswitch (prev_layer) {\n \tcase MLX5_FLOW_LAYER_OUTER_L4_UDP:\n \t\tif (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {\n-\t\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,\n-\t\t\t\t 0xffff);\n-\t\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,\n-\t\t\t\t MLX5_UDP_PORT_MPLS);\n+\t\t\tif (key_type & MLX5_SET_MATCHER_M)\n+\t\t\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v,\n+\t\t\t\t\t udp_dport, 0xffff);\n+\t\t\telse\n+\t\t\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v,\n+\t\t\t\t\t udp_dport, MLX5_UDP_PORT_MPLS);\n \t\t}\n \t\tbreak;\n \tcase MLX5_FLOW_LAYER_GRE:\n \t\t/* Fall-through. */\n \tcase MLX5_FLOW_LAYER_GRE_KEY:\n \t\tif (!MLX5_GET16(fte_match_set_misc, misc_v, gre_protocol)) {\n-\t\t\tMLX5_SET(fte_match_set_misc, misc_m, gre_protocol,\n-\t\t\t\t 0xffff);\n-\t\t\tMLX5_SET(fte_match_set_misc, misc_v, gre_protocol,\n-\t\t\t\t RTE_ETHER_TYPE_MPLS);\n+\t\t\tif (key_type & MLX5_SET_MATCHER_M)\n+\t\t\t\tMLX5_SET(fte_match_set_misc, misc_v,\n+\t\t\t\t\t gre_protocol, 0xffff);\n+\t\t\telse\n+\t\t\t\tMLX5_SET(fte_match_set_misc, misc_v,\n+\t\t\t\t\t gre_protocol, RTE_ETHER_TYPE_MPLS);\n \t\t}\n \t\tbreak;\n \tdefault:\n \t\tbreak;\n \t}\n-\tif (!in_mpls_v)\n+\tif (MLX5_ITEM_VALID(item, key_type))\n \t\treturn;\n-\tif (!in_mpls_m)\n-\t\tin_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;\n+\tMLX5_ITEM_UPDATE(item, key_type, in_mpls_v, in_mpls_m,\n+\t\t\t &rte_flow_item_mpls_mask);\n \tswitch (prev_layer) {\n \tcase MLX5_FLOW_LAYER_OUTER_L4_UDP:\n-\t\tout_mpls_m =\n-\t\t\t(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,\n-\t\t\t\t\t\t outer_first_mpls_over_udp);\n \t\tout_mpls_v =\n \t\t\t(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,\n \t\t\t\t\t\t outer_first_mpls_over_udp);\n \t\tbreak;\n \tcase MLX5_FLOW_LAYER_GRE:\n-\t\tout_mpls_m =\n-\t\t\t(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,\n-\t\t\t\t\t\t outer_first_mpls_over_gre);\n \t\tout_mpls_v =\n \t\t\t(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,\n \t\t\t\t\t\t outer_first_mpls_over_gre);\n \t\tbreak;\n \tdefault:\n \t\t/* Inner MPLS not over GRE is not supported. */\n-\t\tif (!inner) {\n-\t\t\tout_mpls_m =\n-\t\t\t\t(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,\n-\t\t\t\t\t\t\t misc2_m,\n-\t\t\t\t\t\t\t outer_first_mpls);\n+\t\tif (!inner)\n \t\t\tout_mpls_v =\n \t\t\t\t(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,\n \t\t\t\t\t\t\t misc2_v,\n \t\t\t\t\t\t\t outer_first_mpls);\n-\t\t}\n \t\tbreak;\n \t}\n-\tif (out_mpls_m && out_mpls_v) {\n-\t\t*out_mpls_m = *in_mpls_m;\n+\tif (out_mpls_v)\n \t\t*out_mpls_v = *in_mpls_v & *in_mpls_m;\n-\t}\n }\n \n /**\n  * Add metadata register item to matcher\n  *\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] reg_type\n@@ -9738,12 +9617,9 @@ flow_dv_translate_item_mpls(void *matcher, void *key,\n  *   Register mask\n  */\n static void\n-flow_dv_match_meta_reg(void *matcher, void *key,\n-\t\t       enum modify_reg reg_type,\n+flow_dv_match_meta_reg(void *key, enum modify_reg reg_type,\n \t\t       uint32_t data, uint32_t mask)\n {\n-\tvoid *misc2_m =\n-\t\tMLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);\n \tvoid *misc2_v =\n \t\tMLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);\n \tuint32_t temp;\n@@ -9751,11 +9627,9 @@ flow_dv_match_meta_reg(void *matcher, void *key,\n \tdata &= mask;\n \tswitch (reg_type) {\n \tcase REG_A:\n-\t\tMLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);\n \t\tMLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);\n \t\tbreak;\n \tcase REG_B:\n-\t\tMLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);\n \t\tMLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);\n \t\tbreak;\n \tcase REG_C_0:\n@@ -9764,40 +9638,31 @@ flow_dv_match_meta_reg(void *matcher, void *key,\n \t\t * source vport index and META item value, we should set\n \t\t * this field according to specified mask, not as whole one.\n \t\t */\n-\t\ttemp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);\n-\t\ttemp |= mask;\n-\t\tMLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);\n \t\ttemp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);\n-\t\ttemp &= ~mask;\n+\t\tif (mask)\n+\t\t\ttemp &= ~mask;\n \t\ttemp |= data;\n \t\tMLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);\n \t\tbreak;\n \tcase REG_C_1:\n-\t\tMLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);\n \t\tMLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);\n \t\tbreak;\n \tcase REG_C_2:\n-\t\tMLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);\n \t\tMLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);\n \t\tbreak;\n \tcase REG_C_3:\n-\t\tMLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);\n \t\tMLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);\n \t\tbreak;\n \tcase REG_C_4:\n-\t\tMLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);\n \t\tMLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);\n \t\tbreak;\n \tcase REG_C_5:\n-\t\tMLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);\n \t\tMLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);\n \t\tbreak;\n \tcase REG_C_6:\n-\t\tMLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);\n \t\tMLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);\n \t\tbreak;\n \tcase REG_C_7:\n-\t\tMLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);\n \t\tMLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);\n \t\tbreak;\n \tdefault:\n@@ -9806,34 +9671,71 @@ flow_dv_match_meta_reg(void *matcher, void *key,\n \t}\n }\n \n+/**\n+ * Add metadata register item to matcher\n+ *\n+ * @param[in, out] matcher\n+ *   Flow matcher.\n+ * @param[in, out] key\n+ *   Flow matcher value.\n+ * @param[in] reg_type\n+ *   Type of device metadata register\n+ * @param[in] value\n+ *   Register value\n+ * @param[in] mask\n+ *   Register mask\n+ */\n+static void\n+flow_dv_match_meta_reg_all(void *matcher, void *key, enum modify_reg reg_type,\n+\t\t\t   uint32_t data, uint32_t mask)\n+{\n+\tflow_dv_match_meta_reg(key, reg_type, data, mask);\n+\tflow_dv_match_meta_reg(matcher, reg_type, mask, mask);\n+}\n+\n /**\n  * Add MARK item to matcher\n  *\n  * @param[in] dev\n  *   The device to configure through.\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n  *   Flow pattern to translate.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  */\n static void\n-flow_dv_translate_item_mark(struct rte_eth_dev *dev,\n-\t\t\t    void *matcher, void *key,\n-\t\t\t    const struct rte_flow_item *item)\n+flow_dv_translate_item_mark(struct rte_eth_dev *dev, void *key,\n+\t\t\t    const struct rte_flow_item *item,\n+\t\t\t    uint32_t key_type)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tconst struct rte_flow_item_mark *mark;\n \tuint32_t value;\n-\tuint32_t mask;\n-\n-\tmark = item->mask ? (const void *)item->mask :\n-\t\t\t    &rte_flow_item_mark_mask;\n-\tmask = mark->id & priv->sh->dv_mark_mask;\n-\tmark = (const void *)item->spec;\n-\tMLX5_ASSERT(mark);\n-\tvalue = mark->id & priv->sh->dv_mark_mask & mask;\n+\tuint32_t mask = 0;\n+\n+\tif (key_type & MLX5_SET_MATCHER_SW) {\n+\t\tmark = item->mask ? (const void *)item->mask :\n+\t\t\t\t    &rte_flow_item_mark_mask;\n+\t\tmask = mark->id;\n+\t\tif (key_type == MLX5_SET_MATCHER_SW_M) {\n+\t\t\tvalue = mask;\n+\t\t} else {\n+\t\t\tmark = (const void *)item->spec;\n+\t\t\tMLX5_ASSERT(mark);\n+\t\t\tvalue = mark->id;\n+\t\t}\n+\t} else {\n+\t\tmark = (key_type == MLX5_SET_MATCHER_HS_V) ?\n+\t\t\t(const void *)item->spec : (const void *)item->mask;\n+\t\tMLX5_ASSERT(mark);\n+\t\tvalue = mark->id;\n+\t\tif (key_type == MLX5_SET_MATCHER_HS_M)\n+\t\t\tmask = value;\n+\t}\n+\tmask &= priv->sh->dv_mark_mask;\n+\tvalue &= mask;\n \tif (mask) {\n \t\tenum modify_reg reg;\n \n@@ -9849,7 +9751,7 @@ flow_dv_translate_item_mark(struct rte_eth_dev *dev,\n \t\t\tmask <<= shl_c0;\n \t\t\tvalue <<= shl_c0;\n \t\t}\n-\t\tflow_dv_match_meta_reg(matcher, key, reg, value, mask);\n+\t\tflow_dv_match_meta_reg(key, reg, value, mask);\n \t}\n }\n \n@@ -9858,65 +9760,66 @@ flow_dv_translate_item_mark(struct rte_eth_dev *dev,\n  *\n  * @param[in] dev\n  *   The devich to configure through.\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] attr\n  *   Attributes of flow that includes this item.\n  * @param[in] item\n  *   Flow pattern to translate.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  */\n static void\n flow_dv_translate_item_meta(struct rte_eth_dev *dev,\n-\t\t\t    void *matcher, void *key,\n+\t\t\t    void *key,\n \t\t\t    const struct rte_flow_attr *attr,\n-\t\t\t    const struct rte_flow_item *item)\n+\t\t\t    const struct rte_flow_item *item,\n+\t\t\t    uint32_t key_type)\n {\n \tconst struct rte_flow_item_meta *meta_m;\n \tconst struct rte_flow_item_meta *meta_v;\n+\tuint32_t value;\n+\tuint32_t mask = 0;\n+\tint reg;\n \n-\tmeta_m = (const void *)item->mask;\n-\tif (!meta_m)\n-\t\tmeta_m = &rte_flow_item_meta_mask;\n-\tmeta_v = (const void *)item->spec;\n-\tif (meta_v) {\n-\t\tint reg;\n-\t\tuint32_t value = meta_v->data;\n-\t\tuint32_t mask = meta_m->data;\n+\tif (MLX5_ITEM_VALID(item, key_type))\n+\t\treturn;\n+\tMLX5_ITEM_UPDATE(item, key_type, meta_v, meta_m,\n+\t\t\t &rte_flow_item_meta_mask);\n+\tvalue = meta_v->data;\n+\tmask = meta_m->data;\n+\tif (key_type == MLX5_SET_MATCHER_HS_M)\n+\t\tmask = value;\n+\treg = flow_dv_get_metadata_reg(dev, attr, NULL);\n+\tif (reg < 0)\n+\t\treturn;\n+\tMLX5_ASSERT(reg != REG_NON);\n+\tif (reg == REG_C_0) {\n+\t\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\t\tuint32_t msk_c0 = priv->sh->dv_regc0_mask;\n+\t\tuint32_t shl_c0 = rte_bsf32(msk_c0);\n \n-\t\treg = flow_dv_get_metadata_reg(dev, attr, NULL);\n-\t\tif (reg < 0)\n-\t\t\treturn;\n-\t\tMLX5_ASSERT(reg != REG_NON);\n-\t\tif (reg == REG_C_0) {\n-\t\t\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\t\t\tuint32_t msk_c0 = priv->sh->dv_regc0_mask;\n-\t\t\tuint32_t shl_c0 = rte_bsf32(msk_c0);\n-\n-\t\t\tmask &= msk_c0;\n-\t\t\tmask <<= shl_c0;\n-\t\t\tvalue <<= shl_c0;\n-\t\t}\n-\t\tflow_dv_match_meta_reg(matcher, key, reg, value, mask);\n+\t\tmask &= msk_c0;\n+\t\tmask <<= shl_c0;\n+\t\tvalue <<= shl_c0;\n \t}\n+\tflow_dv_match_meta_reg(key, reg, value, mask);\n }\n \n /**\n  * Add vport metadata Reg C0 item to matcher\n  *\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n- * @param[in] reg\n- *   Flow pattern to translate.\n+ * @param[in] value\n+ *   Register value\n+ * @param[in] mask\n+ *   Register mask\n  */\n static void\n-flow_dv_translate_item_meta_vport(void *matcher, void *key,\n-\t\t\t\t  uint32_t value, uint32_t mask)\n+flow_dv_translate_item_meta_vport(void *key, uint32_t value, uint32_t mask)\n {\n-\tflow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);\n+\tflow_dv_match_meta_reg(key, REG_C_0, value, mask);\n }\n \n /**\n@@ -9924,17 +9827,17 @@ flow_dv_translate_item_meta_vport(void *matcher, void *key,\n  *\n  * @param[in] dev\n  *   The devich to configure through.\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n  *   Flow pattern to translate.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  */\n static void\n-flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,\n-\t\t\t\tvoid *matcher, void *key,\n-\t\t\t\tconst struct rte_flow_item *item)\n+flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev, void *key,\n+\t\t\t\tconst struct rte_flow_item *item,\n+\t\t\t\tuint32_t key_type)\n {\n \tconst struct mlx5_rte_flow_item_tag *tag_v = item->spec;\n \tconst struct mlx5_rte_flow_item_tag *tag_m = item->mask;\n@@ -9943,6 +9846,8 @@ flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,\n \tMLX5_ASSERT(tag_v);\n \tvalue = tag_v->data;\n \tmask = tag_m ? tag_m->data : UINT32_MAX;\n+\tif (key_type & MLX5_SET_MATCHER_M)\n+\t\tvalue = mask;\n \tif (tag_v->id == REG_C_0) {\n \t\tstruct mlx5_priv *priv = dev->data->dev_private;\n \t\tuint32_t msk_c0 = priv->sh->dv_regc0_mask;\n@@ -9952,7 +9857,7 @@ flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,\n \t\tmask <<= shl_c0;\n \t\tvalue <<= shl_c0;\n \t}\n-\tflow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);\n+\tflow_dv_match_meta_reg(key, tag_v->id, value, mask);\n }\n \n /**\n@@ -9960,50 +9865,50 @@ flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,\n  *\n  * @param[in] dev\n  *   The devich to configure through.\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n  *   Flow pattern to translate.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  */\n static void\n-flow_dv_translate_item_tag(struct rte_eth_dev *dev,\n-\t\t\t   void *matcher, void *key,\n-\t\t\t   const struct rte_flow_item *item)\n+flow_dv_translate_item_tag(struct rte_eth_dev *dev, void *key,\n+\t\t\t   const struct rte_flow_item *item,\n+\t\t\t   uint32_t key_type)\n {\n-\tconst struct rte_flow_item_tag *tag_v = item->spec;\n-\tconst struct rte_flow_item_tag *tag_m = item->mask;\n+\tconst struct rte_flow_item_tag *tag_vv = item->spec;\n+\tconst struct rte_flow_item_tag *tag_v;\n+\tconst struct rte_flow_item_tag *tag_m;\n \tenum modify_reg reg;\n+\tuint32_t index;\n \n-\tMLX5_ASSERT(tag_v);\n-\ttag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;\n+\tif (MLX5_ITEM_VALID(item, key_type))\n+\t\treturn;\n+\tMLX5_ITEM_UPDATE(item, key_type, tag_v, tag_m,\n+\t\t&rte_flow_item_tag_mask);\n+\t/* When set mask, the index should be from spec. */\n+\tindex = tag_vv ? tag_vv->index : tag_v->index;\n \t/* Get the metadata register index for the tag. */\n-\treg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);\n+\treg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, index, NULL);\n \tMLX5_ASSERT(reg > 0);\n-\tflow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);\n+\tflow_dv_match_meta_reg(key, reg, tag_v->data, tag_m->data);\n }\n \n /**\n  * Add source vport match to the specified matcher.\n  *\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] port\n  *   Source vport value to match\n- * @param[in] mask\n- *   Mask\n  */\n static void\n-flow_dv_translate_item_source_vport(void *matcher, void *key,\n-\t\t\t\t    int16_t port, uint16_t mask)\n+flow_dv_translate_item_source_vport(void *key,\n+\t\t\t\t    int16_t port)\n {\n-\tvoid *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);\n \tvoid *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);\n \n-\tMLX5_SET(fte_match_set_misc, misc_m, source_port, mask);\n \tMLX5_SET(fte_match_set_misc, misc_v, source_port, port);\n }\n \n@@ -10012,31 +9917,34 @@ flow_dv_translate_item_source_vport(void *matcher, void *key,\n  *\n  * @param[in] dev\n  *   The devich to configure through.\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n  *   Flow pattern to translate.\n- * @param[in]\n+ * @param[in] attr\n  *   Flow attributes.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  *\n  * @return\n  *   0 on success, a negative errno value otherwise.\n  */\n static int\n-flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,\n-\t\t\t       void *key, const struct rte_flow_item *item,\n-\t\t\t       const struct rte_flow_attr *attr)\n+flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *key,\n+\t\t\t       const struct rte_flow_item *item,\n+\t\t\t       const struct rte_flow_attr *attr,\n+\t\t\t       uint32_t key_type)\n {\n \tconst struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;\n \tconst struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;\n \tstruct mlx5_priv *priv;\n \tuint16_t mask, id;\n+\tuint32_t vport_meta;\n \n \tif (pid_v && pid_v->id == MLX5_PORT_ESW_MGR) {\n-\t\tflow_dv_translate_item_source_vport(matcher, key,\n-\t\t\tmlx5_flow_get_esw_manager_vport_id(dev), 0xffff);\n+\t\tflow_dv_translate_item_source_vport(key,\n+\t\t\t\tkey_type & MLX5_SET_MATCHER_V ?\n+\t\t\t\tmlx5_flow_get_esw_manager_vport_id(dev) : 0xffff);\n \t\treturn 0;\n \t}\n \tmask = pid_m ? pid_m->id : 0xffff;\n@@ -10044,6 +9952,13 @@ flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,\n \tpriv = mlx5_port_to_eswitch_info(id, item == NULL);\n \tif (!priv)\n \t\treturn -rte_errno;\n+\tif (key_type & MLX5_SET_MATCHER_M) {\n+\t\tid = mask;\n+\t\tvport_meta = priv->vport_meta_mask;\n+\t} else {\n+\t\tid = priv->vport_id;\n+\t\tvport_meta = priv->vport_meta_tag;\n+\t}\n \t/*\n \t * Translate to vport field or to metadata, depending on mode.\n \t * Kernel can use either misc.source_port or half of C0 metadata\n@@ -10057,20 +9972,17 @@ flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,\n \t\t */\n \t\tif (mask == 0xffff && priv->vport_id == 0xffff &&\n \t\t    priv->pf_bond < 0 && attr->transfer)\n-\t\t\tflow_dv_translate_item_source_vport\n-\t\t\t\t(matcher, key, priv->vport_id, mask);\n+\t\t\tflow_dv_translate_item_source_vport(key, id);\n \t\t/*\n \t\t * We should always set the vport metadata register,\n \t\t * otherwise the SW steering library can drop\n \t\t * the rule if wire vport metadata value is not zero,\n \t\t * it depends on kernel configuration.\n \t\t */\n-\t\tflow_dv_translate_item_meta_vport(matcher, key,\n-\t\t\t\t\t\t  priv->vport_meta_tag,\n-\t\t\t\t\t\t  priv->vport_meta_mask);\n+\t\tflow_dv_translate_item_meta_vport\n+\t\t\t\t(key, vport_meta, priv->vport_meta_mask);\n \t} else {\n-\t\tflow_dv_translate_item_source_vport(matcher, key,\n-\t\t\t\t\t\t    priv->vport_id, mask);\n+\t\tflow_dv_translate_item_source_vport(key, id);\n \t}\n \treturn 0;\n }\n@@ -10080,8 +9992,6 @@ flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,\n  *\n  * @param[in] dev\n  *   The devich to configure through.\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n@@ -10093,21 +10003,25 @@ flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,\n  *   0 on success, a negative errno value otherwise.\n  */\n static int\n-flow_dv_translate_item_represented_port(struct rte_eth_dev *dev, void *matcher,\n-\t\t\t\t\tvoid *key,\n+flow_dv_translate_item_represented_port(struct rte_eth_dev *dev, void *key,\n \t\t\t\t\tconst struct rte_flow_item *item,\n-\t\t\t\t\tconst struct rte_flow_attr *attr)\n+\t\t\t\t\tconst struct rte_flow_attr *attr,\n+\t\t\t\t\tuint32_t key_type)\n {\n \tconst struct rte_flow_item_ethdev *pid_m = item ? item->mask : NULL;\n \tconst struct rte_flow_item_ethdev *pid_v = item ? item->spec : NULL;\n+\tstruct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();\n \tstruct mlx5_priv *priv;\n \tuint16_t mask, id;\n+\tuint32_t vport_meta;\n \n+\tMLX5_ASSERT(wks);\n \tif (!pid_m && !pid_v)\n \t\treturn 0;\n \tif (pid_v && pid_v->port_id == UINT16_MAX) {\n-\t\tflow_dv_translate_item_source_vport(matcher, key,\n-\t\t\tmlx5_flow_get_esw_manager_vport_id(dev), UINT16_MAX);\n+\t\tflow_dv_translate_item_source_vport(key,\n+\t\t\tkey_type & MLX5_SET_MATCHER_V ?\n+\t\t\tmlx5_flow_get_esw_manager_vport_id(dev) : 0xffff);\n \t\treturn 0;\n \t}\n \tmask = pid_m ? pid_m->port_id : UINT16_MAX;\n@@ -10115,6 +10029,14 @@ flow_dv_translate_item_represented_port(struct rte_eth_dev *dev, void *matcher,\n \tpriv = mlx5_port_to_eswitch_info(id, item == NULL);\n \tif (!priv)\n \t\treturn -rte_errno;\n+\tif (key_type & MLX5_SET_MATCHER_M) {\n+\t\tid = mask;\n+\t\tvport_meta = priv->vport_meta_mask;\n+\t} else {\n+\t\tid = priv->vport_id;\n+\t\tvport_meta = priv->vport_meta_tag;\n+\t\twks->vport_meta_tag = vport_meta;\n+\t}\n \t/*\n \t * Translate to vport field or to metadata, depending on mode.\n \t * Kernel can use either misc.source_port or half of C0 metadata\n@@ -10127,119 +10049,133 @@ flow_dv_translate_item_represented_port(struct rte_eth_dev *dev, void *matcher,\n \t\t * save the extra vport match.\n \t\t */\n \t\tif (mask == UINT16_MAX && priv->vport_id == UINT16_MAX &&\n-\t\t    priv->pf_bond < 0 && attr->transfer)\n-\t\t\tflow_dv_translate_item_source_vport\n-\t\t\t\t(matcher, key, priv->vport_id, mask);\n+\t\t    priv->pf_bond < 0 && attr->transfer &&\n+\t\t    priv->sh->config.dv_flow_en != 2)\n+\t\t\tflow_dv_translate_item_source_vport(key, id);\n \t\t/*\n \t\t * We should always set the vport metadata register,\n \t\t * otherwise the SW steering library can drop\n \t\t * the rule if wire vport metadata value is not zero,\n \t\t * it depends on kernel configuration.\n \t\t */\n-\t\tflow_dv_translate_item_meta_vport(matcher, key,\n-\t\t\t\t\t\t  priv->vport_meta_tag,\n+\t\tflow_dv_translate_item_meta_vport(key, vport_meta,\n \t\t\t\t\t\t  priv->vport_meta_mask);\n \t} else {\n-\t\tflow_dv_translate_item_source_vport(matcher, key,\n-\t\t\t\t\t\t    priv->vport_id, mask);\n+\t\tflow_dv_translate_item_source_vport(key, id);\n \t}\n \treturn 0;\n }\n \n /**\n- * Add ICMP6 item to matcher and to the value.\n+ * Translate port-id item to eswitch match on  port-id.\n  *\n+ * @param[in] dev\n+ *   The devich to configure through.\n  * @param[in, out] matcher\n  *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n  *   Flow pattern to translate.\n+ * @param[in] attr\n+ *   Flow attributes.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise.\n+ */\n+static int\n+flow_dv_translate_item_port_id_all(struct rte_eth_dev *dev,\n+\t\t\t       void *matcher, void *key,\n+\t\t\t       const struct rte_flow_item *item,\n+\t\t\t       const struct rte_flow_attr *attr)\n+{\n+\tint ret;\n+\n+\tret = flow_dv_translate_item_port_id\n+\t\t\t(dev, matcher, item, attr, MLX5_SET_MATCHER_SW_M);\n+\tif (ret)\n+\t\treturn ret;\n+\tret = flow_dv_translate_item_port_id\n+\t\t\t(dev, key, item, attr, MLX5_SET_MATCHER_SW_V);\n+\treturn ret;\n+}\n+\n+\n+/**\n+ * Add ICMP6 item to the value.\n+ *\n+ * @param[in, out] key\n+ *   Flow matcher value.\n+ * @param[in] item\n+ *   Flow pattern to translate.\n  * @param[in] inner\n  *   Item is inner pattern.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  */\n static void\n-flow_dv_translate_item_icmp6(void *matcher, void *key,\n-\t\t\t      const struct rte_flow_item *item,\n-\t\t\t      int inner)\n+flow_dv_translate_item_icmp6(void *key, const struct rte_flow_item *item,\n+\t\t\t     int inner, uint32_t key_type)\n {\n-\tconst struct rte_flow_item_icmp6 *icmp6_m = item->mask;\n-\tconst struct rte_flow_item_icmp6 *icmp6_v = item->spec;\n-\tvoid *headers_m;\n+\tconst struct rte_flow_item_icmp6 *icmp6_m;\n+\tconst struct rte_flow_item_icmp6 *icmp6_v;\n \tvoid *headers_v;\n-\tvoid *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t     misc_parameters_3);\n \tvoid *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);\n-\tif (inner) {\n-\t\theaders_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t\t inner_headers);\n-\t\theaders_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);\n-\t} else {\n-\t\theaders_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t\t outer_headers);\n-\t\theaders_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);\n-\t}\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);\n-\tif (!icmp6_v)\n+\n+\theaders_v = inner ? MLX5_ADDR_OF(fte_match_param, key, inner_headers) :\n+\t\tMLX5_ADDR_OF(fte_match_param, key, outer_headers);\n+\tif (key_type & MLX5_SET_MATCHER_M)\n+\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 0xFF);\n+\telse\n+\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,\n+\t\t\t IPPROTO_ICMPV6);\n+\tif (MLX5_ITEM_VALID(item, key_type))\n \t\treturn;\n-\tif (!icmp6_m)\n-\t\ticmp6_m = &rte_flow_item_icmp6_mask;\n-\tMLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);\n+\tMLX5_ITEM_UPDATE(item, key_type, icmp6_v, icmp6_m,\n+\t\t&rte_flow_item_icmp6_mask);\n \tMLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,\n \t\t icmp6_v->type & icmp6_m->type);\n-\tMLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);\n \tMLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,\n \t\t icmp6_v->code & icmp6_m->code);\n }\n \n /**\n- * Add ICMP item to matcher and to the value.\n+ * Add ICMP item to the value.\n  *\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n  *   Flow pattern to translate.\n  * @param[in] inner\n  *   Item is inner pattern.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  */\n static void\n-flow_dv_translate_item_icmp(void *matcher, void *key,\n-\t\t\t    const struct rte_flow_item *item,\n-\t\t\t    int inner)\n+flow_dv_translate_item_icmp(void *key, const struct rte_flow_item *item,\n+\t\t\t    int inner, uint32_t key_type)\n {\n-\tconst struct rte_flow_item_icmp *icmp_m = item->mask;\n-\tconst struct rte_flow_item_icmp *icmp_v = item->spec;\n+\tconst struct rte_flow_item_icmp *icmp_m;\n+\tconst struct rte_flow_item_icmp *icmp_v;\n \tuint32_t icmp_header_data_m = 0;\n \tuint32_t icmp_header_data_v = 0;\n-\tvoid *headers_m;\n \tvoid *headers_v;\n-\tvoid *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t     misc_parameters_3);\n \tvoid *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);\n-\tif (inner) {\n-\t\theaders_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t\t inner_headers);\n-\t\theaders_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);\n-\t} else {\n-\t\theaders_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t\t outer_headers);\n-\t\theaders_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);\n-\t}\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);\n-\tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);\n-\tif (!icmp_v)\n+\n+\theaders_v = inner ? MLX5_ADDR_OF(fte_match_param, key, inner_headers) :\n+\t\t\tMLX5_ADDR_OF(fte_match_param, key, outer_headers);\n+\tif (key_type & MLX5_SET_MATCHER_M)\n+\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v,\n+\t\t\t ip_protocol, 0xFF);\n+\telse\n+\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v,\n+\t\t\t ip_protocol, IPPROTO_ICMP);\n+\tif (MLX5_ITEM_VALID(item, key_type))\n \t\treturn;\n-\tif (!icmp_m)\n-\t\ticmp_m = &rte_flow_item_icmp_mask;\n-\tMLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,\n-\t\t icmp_m->hdr.icmp_type);\n+\tMLX5_ITEM_UPDATE(item, key_type, icmp_v, icmp_m,\n+\t\t&rte_flow_item_icmp_mask);\n \tMLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,\n \t\t icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);\n-\tMLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,\n-\t\t icmp_m->hdr.icmp_code);\n \tMLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,\n \t\t icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);\n \ticmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);\n@@ -10248,64 +10184,51 @@ flow_dv_translate_item_icmp(void *matcher, void *key,\n \t\ticmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);\n \t\ticmp_header_data_v |=\n \t\t\t rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;\n-\t\tMLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,\n-\t\t\t icmp_header_data_m);\n \t\tMLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,\n \t\t\t icmp_header_data_v & icmp_header_data_m);\n \t}\n }\n \n /**\n- * Add GTP item to matcher and to the value.\n+ * Add GTP item to the value.\n  *\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n  *   Flow pattern to translate.\n  * @param[in] inner\n  *   Item is inner pattern.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  */\n static void\n-flow_dv_translate_item_gtp(void *matcher, void *key,\n-\t\t\t   const struct rte_flow_item *item, int inner)\n+flow_dv_translate_item_gtp(void *key, const struct rte_flow_item *item,\n+\t\t\t   int inner, uint32_t key_type)\n {\n-\tconst struct rte_flow_item_gtp *gtp_m = item->mask;\n-\tconst struct rte_flow_item_gtp *gtp_v = item->spec;\n-\tvoid *headers_m;\n+\tconst struct rte_flow_item_gtp *gtp_m;\n+\tconst struct rte_flow_item_gtp *gtp_v;\n \tvoid *headers_v;\n-\tvoid *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t     misc_parameters_3);\n \tvoid *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);\n \tuint16_t dport = RTE_GTPU_UDP_PORT;\n \n-\tif (inner) {\n-\t\theaders_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t\t inner_headers);\n-\t\theaders_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);\n-\t} else {\n-\t\theaders_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t\t outer_headers);\n-\t\theaders_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);\n-\t}\n+\theaders_v = inner ? MLX5_ADDR_OF(fte_match_param, key, inner_headers) :\n+\t\t\tMLX5_ADDR_OF(fte_match_param, key, outer_headers);\n \tif (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {\n-\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);\n-\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);\n+\t\tif (key_type & MLX5_SET_MATCHER_M)\n+\t\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v,\n+\t\t\t\t udp_dport, 0xFFFF);\n+\t\telse\n+\t\t\tMLX5_SET(fte_match_set_lyr_2_4, headers_v,\n+\t\t\t\t udp_dport, dport);\n \t}\n-\tif (!gtp_v)\n+\tif (MLX5_ITEM_VALID(item, key_type))\n \t\treturn;\n-\tif (!gtp_m)\n-\t\tgtp_m = &rte_flow_item_gtp_mask;\n-\tMLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,\n-\t\t gtp_m->v_pt_rsv_flags);\n+\tMLX5_ITEM_UPDATE(item, key_type, gtp_v, gtp_m,\n+\t\t&rte_flow_item_gtp_mask);\n \tMLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,\n \t\t gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);\n-\tMLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);\n \tMLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,\n \t\t gtp_v->msg_type & gtp_m->msg_type);\n-\tMLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,\n-\t\t rte_be_to_cpu_32(gtp_m->teid));\n \tMLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,\n \t\t rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));\n }\n@@ -10313,21 +10236,19 @@ flow_dv_translate_item_gtp(void *matcher, void *key,\n /**\n  * Add GTP PSC item to matcher.\n  *\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n  *   Flow pattern to translate.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  */\n static int\n-flow_dv_translate_item_gtp_psc(void *matcher, void *key,\n-\t\t\t       const struct rte_flow_item *item)\n+flow_dv_translate_item_gtp_psc(void *key, const struct rte_flow_item *item,\n+\t\t\t       uint32_t key_type)\n {\n-\tconst struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;\n-\tconst struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;\n-\tvoid *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\tmisc_parameters_3);\n+\tconst struct rte_flow_item_gtp_psc *gtp_psc_m;\n+\tconst struct rte_flow_item_gtp_psc *gtp_psc_v;\n \tvoid *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);\n \tunion {\n \t\tuint32_t w32;\n@@ -10337,52 +10258,40 @@ flow_dv_translate_item_gtp_psc(void *matcher, void *key,\n \t\t\tuint8_t next_ext_header_type;\n \t\t};\n \t} dw_2;\n+\tunion {\n+\t\tuint32_t w32;\n+\t\tstruct {\n+\t\t\tuint8_t len;\n+\t\t\tuint8_t type_flags;\n+\t\t\tuint8_t qfi;\n+\t\t\tuint8_t reserved;\n+\t\t};\n+\t} dw_0;\n \tuint8_t gtp_flags;\n \n \t/* Always set E-flag match on one, regardless of GTP item settings. */\n-\tgtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);\n-\tgtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;\n-\tMLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);\n \tgtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);\n \tgtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;\n \tMLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);\n \t/*Set next extension header type. */\n \tdw_2.seq_num = 0;\n \tdw_2.npdu_num = 0;\n-\tdw_2.next_ext_header_type = 0xff;\n-\tMLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,\n-\t\t rte_cpu_to_be_32(dw_2.w32));\n-\tdw_2.seq_num = 0;\n-\tdw_2.npdu_num = 0;\n-\tdw_2.next_ext_header_type = 0x85;\n+\tif (key_type & MLX5_SET_MATCHER_M)\n+\t\tdw_2.next_ext_header_type = 0xff;\n+\telse\n+\t\tdw_2.next_ext_header_type = 0x85;\n \tMLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,\n \t\t rte_cpu_to_be_32(dw_2.w32));\n-\tif (gtp_psc_v) {\n-\t\tunion {\n-\t\t\tuint32_t w32;\n-\t\t\tstruct {\n-\t\t\t\tuint8_t len;\n-\t\t\t\tuint8_t type_flags;\n-\t\t\t\tuint8_t qfi;\n-\t\t\t\tuint8_t reserved;\n-\t\t\t};\n-\t\t} dw_0;\n-\n-\t\t/*Set extension header PDU type and Qos. */\n-\t\tif (!gtp_psc_m)\n-\t\t\tgtp_psc_m = &rte_flow_item_gtp_psc_mask;\n-\t\tdw_0.w32 = 0;\n-\t\tdw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->hdr.type);\n-\t\tdw_0.qfi = gtp_psc_m->hdr.qfi;\n-\t\tMLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,\n-\t\t\t rte_cpu_to_be_32(dw_0.w32));\n-\t\tdw_0.w32 = 0;\n-\t\tdw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->hdr.type &\n-\t\t\t\t\t\t\tgtp_psc_m->hdr.type);\n-\t\tdw_0.qfi = gtp_psc_v->hdr.qfi & gtp_psc_m->hdr.qfi;\n-\t\tMLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,\n-\t\t\t rte_cpu_to_be_32(dw_0.w32));\n-\t}\n+\tif (MLX5_ITEM_VALID(item, key_type))\n+\t\treturn 0;\n+\tMLX5_ITEM_UPDATE(item, key_type, gtp_psc_v,\n+\t\tgtp_psc_m, &rte_flow_item_gtp_psc_mask);\n+\tdw_0.w32 = 0;\n+\tdw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->hdr.type &\n+\t\t\t\t\t\t  gtp_psc_m->hdr.type);\n+\tdw_0.qfi = gtp_psc_v->hdr.qfi & gtp_psc_m->hdr.qfi;\n+\tMLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,\n+\t\t rte_cpu_to_be_32(dw_0.w32));\n \treturn 0;\n }\n \n@@ -10391,29 +10300,27 @@ flow_dv_translate_item_gtp_psc(void *matcher, void *key,\n  *\n  * @param[in] dev\n  *   The devich to configure through.\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n  *   Flow pattern to translate.\n  * @param[in] last_item\n  *   Last item flags.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  */\n static void\n-flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,\n-\t\t\t     void *key, const struct rte_flow_item *item,\n-\t\t\t     uint64_t last_item)\n+flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *key,\n+\t\t\t     const struct rte_flow_item *item,\n+\t\t\t     uint64_t last_item, uint32_t key_type)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tconst struct rte_flow_item_ecpri *ecpri_m = item->mask;\n-\tconst struct rte_flow_item_ecpri *ecpri_v = item->spec;\n+\tconst struct rte_flow_item_ecpri *ecpri_m;\n+\tconst struct rte_flow_item_ecpri *ecpri_v;\n+\tconst struct rte_flow_item_ecpri *ecpri_vv = item->spec;\n \tstruct rte_ecpri_common_hdr common;\n-\tvoid *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,\n-\t\t\t\t     misc_parameters_4);\n \tvoid *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);\n \tuint32_t *samples;\n-\tvoid *dw_m;\n \tvoid *dw_v;\n \n \t/*\n@@ -10421,21 +10328,22 @@ flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,\n \t * match on eCPRI EtherType implicitly.\n \t */\n \tif (last_item & MLX5_FLOW_LAYER_OUTER_L2) {\n-\t\tvoid *hdrs_m, *hdrs_v, *l2m, *l2v;\n+\t\tvoid *hdrs_v, *l2v;\n \n-\t\thdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);\n \t\thdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);\n-\t\tl2m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, ethertype);\n \t\tl2v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);\n-\t\tif (*(uint16_t *)l2m == 0 && *(uint16_t *)l2v == 0) {\n-\t\t\t*(uint16_t *)l2m = UINT16_MAX;\n-\t\t\t*(uint16_t *)l2v = RTE_BE16(RTE_ETHER_TYPE_ECPRI);\n+\t\tif (*(uint16_t *)l2v == 0) {\n+\t\t\tif (key_type & MLX5_SET_MATCHER_M)\n+\t\t\t\t*(uint16_t *)l2v = UINT16_MAX;\n+\t\t\telse\n+\t\t\t\t*(uint16_t *)l2v =\n+\t\t\t\t\tRTE_BE16(RTE_ETHER_TYPE_ECPRI);\n \t\t}\n \t}\n-\tif (!ecpri_v)\n+\tif (MLX5_ITEM_VALID(item, key_type))\n \t\treturn;\n-\tif (!ecpri_m)\n-\t\tecpri_m = &rte_flow_item_ecpri_mask;\n+\tMLX5_ITEM_UPDATE(item, key_type, ecpri_v, ecpri_m,\n+\t\t&rte_flow_item_ecpri_mask);\n \t/*\n \t * Maximal four DW samples are supported in a single matching now.\n \t * Two are used now for a eCPRI matching:\n@@ -10447,16 +10355,11 @@ flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,\n \t\treturn;\n \tsamples = priv->sh->ecpri_parser.ids;\n \t/* Need to take the whole DW as the mask to fill the entry. */\n-\tdw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,\n-\t\t\t    prog_sample_field_value_0);\n \tdw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,\n \t\t\t    prog_sample_field_value_0);\n \t/* Already big endian (network order) in the header. */\n-\t*(uint32_t *)dw_m = ecpri_m->hdr.common.u32;\n \t*(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;\n \t/* Sample#0, used for matching type, offset 0. */\n-\tMLX5_SET(fte_match_set_misc4, misc4_m,\n-\t\t prog_sample_field_id_0, samples[0]);\n \t/* It makes no sense to set the sample ID in the mask field. */\n \tMLX5_SET(fte_match_set_misc4, misc4_v,\n \t\t prog_sample_field_id_0, samples[0]);\n@@ -10465,21 +10368,19 @@ flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,\n \t * Some wildcard rules only matching type field should be supported.\n \t */\n \tif (ecpri_m->hdr.dummy[0]) {\n-\t\tcommon.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);\n+\t\tif (key_type == MLX5_SET_MATCHER_SW_M)\n+\t\t\tcommon.u32 = rte_be_to_cpu_32(ecpri_vv->hdr.common.u32);\n+\t\telse\n+\t\t\tcommon.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);\n \t\tswitch (common.type) {\n \t\tcase RTE_ECPRI_MSG_TYPE_IQ_DATA:\n \t\tcase RTE_ECPRI_MSG_TYPE_RTC_CTRL:\n \t\tcase RTE_ECPRI_MSG_TYPE_DLY_MSR:\n-\t\t\tdw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,\n-\t\t\t\t\t    prog_sample_field_value_1);\n \t\t\tdw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,\n \t\t\t\t\t    prog_sample_field_value_1);\n-\t\t\t*(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];\n \t\t\t*(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &\n \t\t\t\t\t    ecpri_m->hdr.dummy[0];\n \t\t\t/* Sample#1, to match message body, offset 4. */\n-\t\t\tMLX5_SET(fte_match_set_misc4, misc4_m,\n-\t\t\t\t prog_sample_field_id_1, samples[1]);\n \t\t\tMLX5_SET(fte_match_set_misc4, misc4_v,\n \t\t\t\t prog_sample_field_id_1, samples[1]);\n \t\t\tbreak;\n@@ -10544,7 +10445,7 @@ flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,\n \treg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error);\n \tif (reg_id == REG_NON)\n \t\treturn;\n-\tflow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id,\n+\tflow_dv_match_meta_reg_all(matcher, key, (enum modify_reg)reg_id,\n \t\t\t       reg_value, reg_mask);\n }\n \n@@ -11330,42 +11231,48 @@ flow_dv_translate_create_counter(struct rte_eth_dev *dev,\n  *\n  * @param[in] dev\n  *   Pointer to the dev struct.\n- * @param[in, out] matcher\n- *   Flow matcher.\n  * @param[in, out] key\n  *   Flow matcher value.\n  * @param[in] item\n  *   Flow pattern to translate.\n- * @param[in] inner\n- *   Item is inner pattern.\n+ * @param[in] key_type\n+ *   Set flow matcher mask or value.\n  */\n static void\n flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,\n-\t\t\t\tvoid *matcher, void *key,\n-\t\t\t\tconst struct rte_flow_item *item)\n+\t\t\t\tvoid *key,\n+\t\t\t\tconst struct rte_flow_item *item,\n+\t\t\t\tuint32_t key_type)\n {\n \tconst struct mlx5_rte_flow_item_tx_queue *queue_m;\n \tconst struct mlx5_rte_flow_item_tx_queue *queue_v;\n-\tvoid *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);\n-\tvoid *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);\n-\tstruct mlx5_txq_ctrl *txq;\n-\tuint32_t queue, mask;\n+\tconst struct mlx5_rte_flow_item_tx_queue queue_mask = {\n+\t\t.queue = UINT32_MAX,\n+\t};\n+\tvoid *misc_v =\n+\t\tMLX5_ADDR_OF(fte_match_param, key, misc_parameters);\n+\tstruct mlx5_txq_ctrl *txq = NULL;\n+\tuint32_t queue;\n \n-\tqueue_m = (const void *)item->mask;\n-\tqueue_v = (const void *)item->spec;\n-\tif (!queue_v)\n+\tMLX5_ITEM_UPDATE(item, key_type, queue_v, queue_m, &queue_mask);\n+\tif (!queue_m || !queue_v)\n \t\treturn;\n-\ttxq = mlx5_txq_get(dev, queue_v->queue);\n-\tif (!txq)\n-\t\treturn;\n-\tif (txq->is_hairpin)\n-\t\tqueue = txq->obj->sq->id;\n-\telse\n-\t\tqueue = txq->obj->sq_obj.sq->id;\n-\tmask = queue_m == NULL ? UINT32_MAX : queue_m->queue;\n-\tMLX5_SET(fte_match_set_misc, misc_m, source_sqn, mask);\n-\tMLX5_SET(fte_match_set_misc, misc_v, source_sqn, queue & mask);\n-\tmlx5_txq_release(dev, queue_v->queue);\n+\tif (key_type & MLX5_SET_MATCHER_V) {\n+\t\ttxq = mlx5_txq_get(dev, queue_v->queue);\n+\t\tif (!txq)\n+\t\t\treturn;\n+\t\tif (txq->is_hairpin)\n+\t\t\tqueue = txq->obj->sq->id;\n+\t\telse\n+\t\t\tqueue = txq->obj->sq_obj.sq->id;\n+\t\tif (key_type == MLX5_SET_MATCHER_SW_V)\n+\t\t\tqueue &= queue_m->queue;\n+\t} else {\n+\t\tqueue = queue_m->queue;\n+\t}\n+\tMLX5_SET(fte_match_set_misc, misc_v, source_sqn, queue);\n+\tif (txq)\n+\t\tmlx5_txq_release(dev, queue_v->queue);\n }\n \n /**\n@@ -13076,7 +12983,298 @@ flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,\n }\n \n /**\n- * Translate the flow item to matcher.\n+ * Fill the flow matcher with DV spec.\n+ *\n+ * @param[in] dev\n+ *   Pointer to rte_eth_dev structure.\n+ * @param[in] items\n+ *   Pointer to the list of items.\n+ * @param[in] wks\n+ *   Pointer to the matcher workspace.\n+ * @param[in] key\n+ *   Pointer to the flow matcher key.\n+ * @param[in] key_type\n+ *   Key type.\n+ * @param[out] error\n+ *   Pointer to the error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+flow_dv_translate_items(struct rte_eth_dev *dev,\n+\t\t\tconst struct rte_flow_item *items,\n+\t\t\tstruct mlx5_dv_matcher_workspace *wks,\n+\t\t\tvoid *key, uint32_t key_type,\n+\t\t\tstruct rte_flow_error *error)\n+{\n+\tstruct mlx5_flow_rss_desc *rss_desc = wks->rss_desc;\n+\tuint8_t next_protocol = wks->next_protocol;\n+\tint tunnel = !!(wks->item_flags & MLX5_FLOW_LAYER_TUNNEL);\n+\tint item_type = items->type;\n+\tuint64_t last_item = wks->last_item;\n+\tint ret;\n+\n+\tswitch (item_type) {\n+\tcase RTE_FLOW_ITEM_TYPE_ESP:\n+\t\tflow_dv_translate_item_esp(key, items, tunnel, key_type);\n+\t\twks->priority = MLX5_PRIORITY_MAP_L4;\n+\t\tlast_item = MLX5_FLOW_ITEM_ESP;\n+\t\tbreak;\n+\tcase RTE_FLOW_ITEM_TYPE_PORT_ID:\n+\t\tflow_dv_translate_item_port_id\n+\t\t\t(dev, key, items, wks->attr, key_type);\n+\t\tlast_item = MLX5_FLOW_ITEM_PORT_ID;\n+\t\tbreak;\n+\tcase RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:\n+\t\tflow_dv_translate_item_represented_port\n+\t\t\t(dev, key, items, wks->attr, key_type);\n+\t\tlast_item = MLX5_FLOW_ITEM_REPRESENTED_PORT;\n+\t\tbreak;\n+\tcase RTE_FLOW_ITEM_TYPE_ETH:\n+\t\tflow_dv_translate_item_eth(key, items, tunnel,\n+\t\t\t\t\t   wks->group, key_type);\n+\t\twks->priority = wks->action_flags &\n+\t\t\t\tMLX5_FLOW_ACTION_DEFAULT_MISS &&\n+\t\t\t\t!wks->external ?\n+\t\t\t\tMLX5_PRIORITY_MAP_L3 :\n+\t\t\t\tMLX5_PRIORITY_MAP_L2;\n+\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :\n+\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L2;\n+\t\tbreak;\n+\tcase RTE_FLOW_ITEM_TYPE_VLAN:\n+\t\tflow_dv_translate_item_vlan(key, items, tunnel, wks, key_type);\n+\t\twks->priority = MLX5_PRIORITY_MAP_L2;\n+\t\tlast_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |\n+\t\t\t\t\t  MLX5_FLOW_LAYER_INNER_VLAN) :\n+\t\t\t\t\t (MLX5_FLOW_LAYER_OUTER_L2 |\n+\t\t\t\t\t  MLX5_FLOW_LAYER_OUTER_VLAN);\n+\t\tbreak;\n+\tcase RTE_FLOW_ITEM_TYPE_IPV4:\n+\t\tmlx5_flow_tunnel_ip_check(items, next_protocol,\n+\t\t\t\t\t  &wks->item_flags, &tunnel);\n+\t\tflow_dv_translate_item_ipv4(key, items, tunnel,\n+\t\t\t\t\t    wks->group, key_type);\n+\t\twks->priority = MLX5_PRIORITY_MAP_L3;\n+\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :\n+\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L3_IPV4;\n+\t\tif (items->mask != NULL &&\n+\t\t    items->spec != NULL &&\n+\t\t\t((const struct rte_flow_item_ipv4 *)\n+\t\t\t items->mask)->hdr.next_proto_id) {\n+\t\t\tnext_protocol =\n+\t\t\t\t((const struct rte_flow_item_ipv4 *)\n+\t\t\t\t (items->spec))->hdr.next_proto_id;\n+\t\t\tnext_protocol &=\n+\t\t\t\t((const struct rte_flow_item_ipv4 *)\n+\t\t\t\t (items->mask))->hdr.next_proto_id;\n+\t\t} else if (key_type == MLX5_SET_MATCHER_HS_M &&\n+\t\t\t   items->mask != NULL) {\n+\t\t\tnext_protocol =  ((const struct rte_flow_item_ipv4 *)\n+\t\t\t\t\t(items->mask))->hdr.next_proto_id;\n+\t\t} else if (key_type == MLX5_SET_MATCHER_HS_V &&\n+\t\t\t   items->spec != NULL) {\n+\t\t\tnext_protocol =  ((const struct rte_flow_item_ipv4 *)\n+\t\t\t\t\t(items->spec))->hdr.next_proto_id;\n+\t\t} else {\n+\t\t\t/* Reset for inner layer. */\n+\t\t\tnext_protocol = 0xff;\n+\t\t}\n+\t\tbreak;\n+\tcase RTE_FLOW_ITEM_TYPE_IPV6:\n+\t\tmlx5_flow_tunnel_ip_check(items, next_protocol,\n+\t\t\t\t\t  &wks->item_flags, &tunnel);\n+\t\tflow_dv_translate_item_ipv6(key, items, tunnel,\n+\t\t\t\t\t    wks->group, key_type);\n+\t\twks->priority = MLX5_PRIORITY_MAP_L3;\n+\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :\n+\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L3_IPV6;\n+\t\tif (items->mask != NULL &&\n+\t\t    items->spec != NULL &&\n+\t\t\t((const struct rte_flow_item_ipv6 *)\n+\t\t\t items->mask)->hdr.proto) {\n+\t\t\tnext_protocol =\n+\t\t\t\t((const struct rte_flow_item_ipv6 *)\n+\t\t\t\t items->spec)->hdr.proto;\n+\t\t\tnext_protocol &=\n+\t\t\t\t((const struct rte_flow_item_ipv6 *)\n+\t\t\t\t items->mask)->hdr.proto;\n+\t\t} else if (key_type == MLX5_SET_MATCHER_HS_M &&\n+\t\t\t   items->mask != NULL) {\n+\t\t\tnext_protocol =  ((const struct rte_flow_item_ipv6 *)\n+\t\t\t\t\t(items->mask))->hdr.proto;\n+\t\t} else if (key_type == MLX5_SET_MATCHER_HS_V &&\n+\t\t\t   items->spec != NULL) {\n+\t\t\tnext_protocol =  ((const struct rte_flow_item_ipv6 *)\n+\t\t\t\t\t(items->spec))->hdr.proto;\n+\t\t} else {\n+\t\t\t/* Reset for inner layer. */\n+\t\t\tnext_protocol = 0xff;\n+\t\t}\n+\t\tbreak;\n+\tcase RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:\n+\t\tflow_dv_translate_item_ipv6_frag_ext\n+\t\t\t\t\t(key, items, tunnel, key_type);\n+\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :\n+\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;\n+\t\tif (items->mask != NULL &&\n+\t\t    items->spec != NULL &&\n+\t\t\t((const struct rte_flow_item_ipv6_frag_ext *)\n+\t\t\t items->mask)->hdr.next_header) {\n+\t\t\tnext_protocol =\n+\t\t\t((const struct rte_flow_item_ipv6_frag_ext *)\n+\t\t\t items->spec)->hdr.next_header;\n+\t\t\tnext_protocol &=\n+\t\t\t((const struct rte_flow_item_ipv6_frag_ext *)\n+\t\t\t items->mask)->hdr.next_header;\n+\t\t} else if (key_type == MLX5_SET_MATCHER_HS_M &&\n+\t\t\t   items->mask != NULL) {\n+\t\t\tnext_protocol =  ((const struct rte_flow_item_ipv6_frag_ext *)\n+\t\t\t\t\t(items->mask))->hdr.next_header;\n+\t\t} else if (key_type == MLX5_SET_MATCHER_HS_V &&\n+\t\t\t   items->spec != NULL) {\n+\t\t\tnext_protocol =  ((const struct rte_flow_item_ipv6_frag_ext *)\n+\t\t\t\t\t(items->spec))->hdr.next_header;\n+\t\t} else {\n+\t\t\t/* Reset for inner layer. */\n+\t\t\tnext_protocol = 0xff;\n+\t\t}\n+\t\tbreak;\n+\tcase RTE_FLOW_ITEM_TYPE_TCP:\n+\t\tflow_dv_translate_item_tcp(key, items, tunnel, key_type);\n+\t\twks->priority = MLX5_PRIORITY_MAP_L4;\n+\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :\n+\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L4_TCP;\n+\t\tbreak;\n+\tcase RTE_FLOW_ITEM_TYPE_UDP:\n+\t\tflow_dv_translate_item_udp(key, items, tunnel, wks, key_type);\n+\t\twks->priority = MLX5_PRIORITY_MAP_L4;\n+\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :\n+\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L4_UDP;\n+\t\tbreak;\n+\tcase RTE_FLOW_ITEM_TYPE_GRE:\n+\t\twks->priority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n+\t\twks->tunnel_item = items;\n+\t\twks->gre_item = items;\n+\t\tlast_item = MLX5_FLOW_LAYER_GRE;\n+\t\tbreak;\n+\tcase RTE_FLOW_ITEM_TYPE_GRE_KEY:\n+\t\tflow_dv_translate_item_gre_key(key, items, key_type);\n+\t\tlast_item = MLX5_FLOW_LAYER_GRE_KEY;\n+\t\tbreak;\n+\tcase RTE_FLOW_ITEM_TYPE_GRE_OPTION:\n+\t\twks->priority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n+\t\twks->tunnel_item = items;\n+\t\tlast_item = MLX5_FLOW_LAYER_GRE;\n+\t\tbreak;\n+\tcase RTE_FLOW_ITEM_TYPE_NVGRE:\n+\t\twks->priority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n+\t\twks->tunnel_item = items;\n+\t\tlast_item = MLX5_FLOW_LAYER_GRE;\n+\t\tbreak;\n+\tcase RTE_FLOW_ITEM_TYPE_VXLAN:\n+\t\tflow_dv_translate_item_vxlan(dev, wks->attr, key,\n+\t\t\t\t\t     items, tunnel, wks, key_type);\n+\t\twks->priority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n+\t\tlast_item = MLX5_FLOW_LAYER_VXLAN;\n+\t\tbreak;\n+\tcase RTE_FLOW_ITEM_TYPE_VXLAN_GPE:\n+\t\twks->priority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n+\t\twks->tunnel_item = items;\n+\t\tlast_item = MLX5_FLOW_LAYER_VXLAN_GPE;\n+\t\tbreak;\n+\tcase RTE_FLOW_ITEM_TYPE_GENEVE:\n+\t\twks->priority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n+\t\twks->tunnel_item = items;\n+\t\tlast_item = MLX5_FLOW_LAYER_GENEVE;\n+\t\tbreak;\n+\tcase RTE_FLOW_ITEM_TYPE_GENEVE_OPT:\n+\t\tret = flow_dv_translate_item_geneve_opt\n+\t\t\t\t(dev, key, items, key_type, error);\n+\t\tif (ret)\n+\t\t\treturn rte_flow_error_set(error, -ret,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM, NULL,\n+\t\t\t\t\"cannot create GENEVE TLV option\");\n+\t\twks->geneve_tlv_option = 1;\n+\t\tlast_item = MLX5_FLOW_LAYER_GENEVE_OPT;\n+\t\tbreak;\n+\tcase RTE_FLOW_ITEM_TYPE_MPLS:\n+\t\tflow_dv_translate_item_mpls(key, items, last_item,\n+\t\t\t\t\t    tunnel, key_type);\n+\t\twks->priority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n+\t\tlast_item = MLX5_FLOW_LAYER_MPLS;\n+\t\tbreak;\n+\tcase RTE_FLOW_ITEM_TYPE_MARK:\n+\t\tflow_dv_translate_item_mark(dev, key, items, key_type);\n+\t\tlast_item = MLX5_FLOW_ITEM_MARK;\n+\t\tbreak;\n+\tcase RTE_FLOW_ITEM_TYPE_META:\n+\t\tflow_dv_translate_item_meta\n+\t\t\t\t(dev, key, wks->attr, items, key_type);\n+\t\tlast_item = MLX5_FLOW_ITEM_METADATA;\n+\t\tbreak;\n+\tcase RTE_FLOW_ITEM_TYPE_ICMP:\n+\t\tflow_dv_translate_item_icmp(key, items, tunnel, key_type);\n+\t\twks->priority = MLX5_PRIORITY_MAP_L4;\n+\t\tlast_item = MLX5_FLOW_LAYER_ICMP;\n+\t\tbreak;\n+\tcase RTE_FLOW_ITEM_TYPE_ICMP6:\n+\t\tflow_dv_translate_item_icmp6(key, items, tunnel, key_type);\n+\t\twks->priority = MLX5_PRIORITY_MAP_L4;\n+\t\tlast_item = MLX5_FLOW_LAYER_ICMP6;\n+\t\tbreak;\n+\tcase RTE_FLOW_ITEM_TYPE_TAG:\n+\t\tflow_dv_translate_item_tag(dev, key, items, key_type);\n+\t\tlast_item = MLX5_FLOW_ITEM_TAG;\n+\t\tbreak;\n+\tcase MLX5_RTE_FLOW_ITEM_TYPE_TAG:\n+\t\tflow_dv_translate_mlx5_item_tag(dev, key, items, key_type);\n+\t\tlast_item = MLX5_FLOW_ITEM_TAG;\n+\t\tbreak;\n+\tcase MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:\n+\t\tflow_dv_translate_item_tx_queue(dev, key, items, key_type);\n+\t\tlast_item = MLX5_FLOW_ITEM_TX_QUEUE;\n+\t\tbreak;\n+\tcase RTE_FLOW_ITEM_TYPE_GTP:\n+\t\tflow_dv_translate_item_gtp(key, items, tunnel, key_type);\n+\t\twks->priority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n+\t\tlast_item = MLX5_FLOW_LAYER_GTP;\n+\t\tbreak;\n+\tcase RTE_FLOW_ITEM_TYPE_GTP_PSC:\n+\t\tret = flow_dv_translate_item_gtp_psc(key, items, key_type);\n+\t\tif (ret)\n+\t\t\treturn rte_flow_error_set(error, -ret,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM, NULL,\n+\t\t\t\t\"cannot create GTP PSC item\");\n+\t\tlast_item = MLX5_FLOW_LAYER_GTP_PSC;\n+\t\tbreak;\n+\tcase RTE_FLOW_ITEM_TYPE_ECPRI:\n+\t\tif (!mlx5_flex_parser_ecpri_exist(dev)) {\n+\t\t\t/* Create it only the first time to be used. */\n+\t\t\tret = mlx5_flex_parser_ecpri_alloc(dev);\n+\t\t\tif (ret)\n+\t\t\t\treturn rte_flow_error_set\n+\t\t\t\t\t(error, -ret,\n+\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\tNULL,\n+\t\t\t\t\t\"cannot create eCPRI parser\");\n+\t\t}\n+\t\tflow_dv_translate_item_ecpri\n+\t\t\t\t(dev, key, items, last_item, key_type);\n+\t\t/* No other protocol should follow eCPRI layer. */\n+\t\tlast_item = MLX5_FLOW_LAYER_ECPRI;\n+\t\tbreak;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\twks->item_flags |= last_item;\n+\twks->last_item = last_item;\n+\twks->next_protocol = next_protocol;\n+\treturn 0;\n+}\n+\n+/**\n+ * Fill the SW steering flow with DV spec.\n  *\n  * @param[in] dev\n  *   Pointer to rte_eth_dev structure.\n@@ -13086,7 +13284,7 @@ flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,\n  *   Pointer to the flow attributes.\n  * @param[in] items\n  *   Pointer to the list of items.\n- * @param[in] matcher\n+ * @param[in, out] matcher\n  *   Pointer to the flow matcher.\n  * @param[out] error\n  *   Pointer to the error structure.\n@@ -13095,287 +13293,41 @@ flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n static int\n-flow_dv_translate_items(struct rte_eth_dev *dev,\n-\t\t\tstruct mlx5_flow *dev_flow,\n-\t\t\tconst struct rte_flow_attr *attr,\n-\t\t\tconst struct rte_flow_item items[],\n-\t\t\tstruct mlx5_flow_dv_matcher *matcher,\n-\t\t\tstruct rte_flow_error *error)\n+flow_dv_translate_items_sws(struct rte_eth_dev *dev,\n+\t\t\t    struct mlx5_flow *dev_flow,\n+\t\t\t    const struct rte_flow_attr *attr,\n+\t\t\t    const struct rte_flow_item *items,\n+\t\t\t    struct mlx5_flow_dv_matcher *matcher,\n+\t\t\t    struct rte_flow_error *error)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct rte_flow *flow = dev_flow->flow;\n-\tstruct mlx5_flow_handle *handle = dev_flow->handle;\n-\tstruct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();\n-\tstruct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;\n-\tuint64_t item_flags = 0;\n-\tuint64_t last_item = 0;\n \tvoid *match_mask = matcher->mask.buf;\n \tvoid *match_value = dev_flow->dv.value.buf;\n-\tuint8_t next_protocol = 0xff;\n-\tuint16_t priority = 0;\n+\tstruct mlx5_dv_matcher_workspace wks = {\n+\t\t.action_flags = dev_flow->act_flags,\n+\t\t.item_flags = 0,\n+\t\t.external = dev_flow->external,\n+\t\t.next_protocol = 0xff,\n+\t\t.group = dev_flow->dv.group,\n+\t\t.attr = attr,\n+\t\t.rss_desc = &((struct mlx5_flow_workspace *)\n+\t\t\t     mlx5_flow_get_thread_workspace())->rss_desc,\n+\t};\n+\tstruct mlx5_dv_matcher_workspace wks_m = wks;\n \tconst struct rte_flow_item *integrity_items[2] = {NULL, NULL};\n-\tconst struct rte_flow_item *tunnel_item = NULL;\n-\tconst struct rte_flow_item *gre_item = NULL;\n \tint ret = 0;\n+\tint tunnel;\n \n \tfor (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {\n-\t\tint tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);\n-\t\tint item_type = items->type;\n-\n-\t\tif (!mlx5_flow_os_item_supported(item_type))\n+\t\tif (!mlx5_flow_os_item_supported(items->type))\n \t\t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n \t\t\t\t\t\t  NULL, \"item not supported\");\n-\t\tswitch (item_type) {\n-\t\tcase RTE_FLOW_ITEM_TYPE_ESP:\n-\t\t\tflow_dv_translate_item_esp(match_mask, match_value,\n-\t\t\t\t\t\t   items, tunnel);\n-\t\t\tpriority = MLX5_PRIORITY_MAP_L4;\n-\t\t\tlast_item = MLX5_FLOW_ITEM_ESP;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_PORT_ID:\n-\t\t\tflow_dv_translate_item_port_id\n-\t\t\t\t(dev, match_mask, match_value, items, attr);\n-\t\t\tlast_item = MLX5_FLOW_ITEM_PORT_ID;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:\n-\t\t\tflow_dv_translate_item_represented_port\n-\t\t\t\t(dev, match_mask, match_value, items, attr);\n-\t\t\tlast_item = MLX5_FLOW_ITEM_REPRESENTED_PORT;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_ETH:\n-\t\t\tflow_dv_translate_item_eth(match_mask, match_value,\n-\t\t\t\t\t\t   items, tunnel,\n-\t\t\t\t\t\t   dev_flow->dv.group);\n-\t\t\tpriority = dev_flow->act_flags &\n-\t\t\t\t\tMLX5_FLOW_ACTION_DEFAULT_MISS &&\n-\t\t\t\t\t!dev_flow->external ?\n-\t\t\t\t\tMLX5_PRIORITY_MAP_L3 :\n-\t\t\t\t\tMLX5_PRIORITY_MAP_L2;\n-\t\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :\n-\t\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L2;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_VLAN:\n-\t\t\tflow_dv_translate_item_vlan(dev_flow,\n-\t\t\t\t\t\t    match_mask, match_value,\n-\t\t\t\t\t\t    items, tunnel,\n-\t\t\t\t\t\t    dev_flow->dv.group);\n-\t\t\tpriority = MLX5_PRIORITY_MAP_L2;\n-\t\t\tlast_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |\n-\t\t\t\t\t      MLX5_FLOW_LAYER_INNER_VLAN) :\n-\t\t\t\t\t     (MLX5_FLOW_LAYER_OUTER_L2 |\n-\t\t\t\t\t      MLX5_FLOW_LAYER_OUTER_VLAN);\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_IPV4:\n-\t\t\tmlx5_flow_tunnel_ip_check(items, next_protocol,\n-\t\t\t\t\t\t  &item_flags, &tunnel);\n-\t\t\tflow_dv_translate_item_ipv4(match_mask, match_value,\n-\t\t\t\t\t\t    items, tunnel,\n-\t\t\t\t\t\t    dev_flow->dv.group);\n-\t\t\tpriority = MLX5_PRIORITY_MAP_L3;\n-\t\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :\n-\t\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L3_IPV4;\n-\t\t\tif (items->mask != NULL &&\n-\t\t\t    ((const struct rte_flow_item_ipv4 *)\n-\t\t\t     items->mask)->hdr.next_proto_id) {\n-\t\t\t\tnext_protocol =\n-\t\t\t\t\t((const struct rte_flow_item_ipv4 *)\n-\t\t\t\t\t (items->spec))->hdr.next_proto_id;\n-\t\t\t\tnext_protocol &=\n-\t\t\t\t\t((const struct rte_flow_item_ipv4 *)\n-\t\t\t\t\t (items->mask))->hdr.next_proto_id;\n-\t\t\t} else {\n-\t\t\t\t/* Reset for inner layer. */\n-\t\t\t\tnext_protocol = 0xff;\n-\t\t\t}\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_IPV6:\n-\t\t\tmlx5_flow_tunnel_ip_check(items, next_protocol,\n-\t\t\t\t\t\t  &item_flags, &tunnel);\n-\t\t\tflow_dv_translate_item_ipv6(match_mask, match_value,\n-\t\t\t\t\t\t    items, tunnel,\n-\t\t\t\t\t\t    dev_flow->dv.group);\n-\t\t\tpriority = MLX5_PRIORITY_MAP_L3;\n-\t\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :\n-\t\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L3_IPV6;\n-\t\t\tif (items->mask != NULL &&\n-\t\t\t    ((const struct rte_flow_item_ipv6 *)\n-\t\t\t     items->mask)->hdr.proto) {\n-\t\t\t\tnext_protocol =\n-\t\t\t\t\t((const struct rte_flow_item_ipv6 *)\n-\t\t\t\t\t items->spec)->hdr.proto;\n-\t\t\t\tnext_protocol &=\n-\t\t\t\t\t((const struct rte_flow_item_ipv6 *)\n-\t\t\t\t\t items->mask)->hdr.proto;\n-\t\t\t} else {\n-\t\t\t\t/* Reset for inner layer. */\n-\t\t\t\tnext_protocol = 0xff;\n-\t\t\t}\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:\n-\t\t\tflow_dv_translate_item_ipv6_frag_ext(match_mask,\n-\t\t\t\t\t\t\t     match_value,\n-\t\t\t\t\t\t\t     items, tunnel);\n-\t\t\tlast_item = tunnel ?\n-\t\t\t\t\tMLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :\n-\t\t\t\t\tMLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;\n-\t\t\tif (items->mask != NULL &&\n-\t\t\t    ((const struct rte_flow_item_ipv6_frag_ext *)\n-\t\t\t     items->mask)->hdr.next_header) {\n-\t\t\t\tnext_protocol =\n-\t\t\t\t((const struct rte_flow_item_ipv6_frag_ext *)\n-\t\t\t\t items->spec)->hdr.next_header;\n-\t\t\t\tnext_protocol &=\n-\t\t\t\t((const struct rte_flow_item_ipv6_frag_ext *)\n-\t\t\t\t items->mask)->hdr.next_header;\n-\t\t\t} else {\n-\t\t\t\t/* Reset for inner layer. */\n-\t\t\t\tnext_protocol = 0xff;\n-\t\t\t}\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_TCP:\n-\t\t\tflow_dv_translate_item_tcp(match_mask, match_value,\n-\t\t\t\t\t\t   items, tunnel);\n-\t\t\tpriority = MLX5_PRIORITY_MAP_L4;\n-\t\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :\n-\t\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L4_TCP;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_UDP:\n-\t\t\tflow_dv_translate_item_udp(match_mask, match_value,\n-\t\t\t\t\t\t   items, tunnel);\n-\t\t\tpriority = MLX5_PRIORITY_MAP_L4;\n-\t\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :\n-\t\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L4_UDP;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_GRE:\n-\t\t\tpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n-\t\t\tlast_item = MLX5_FLOW_LAYER_GRE;\n-\t\t\ttunnel_item = items;\n-\t\t\tgre_item = items;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_GRE_KEY:\n-\t\t\tflow_dv_translate_item_gre_key(match_mask,\n-\t\t\t\t\t\t       match_value, items);\n-\t\t\tlast_item = MLX5_FLOW_LAYER_GRE_KEY;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_GRE_OPTION:\n-\t\t\tpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n-\t\t\tlast_item = MLX5_FLOW_LAYER_GRE;\n-\t\t\ttunnel_item = items;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_NVGRE:\n-\t\t\tpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n-\t\t\tlast_item = MLX5_FLOW_LAYER_GRE;\n-\t\t\ttunnel_item = items;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_VXLAN:\n-\t\t\tflow_dv_translate_item_vxlan(dev, attr,\n-\t\t\t\t\t\t     match_mask, match_value,\n-\t\t\t\t\t\t     items, tunnel);\n-\t\t\tpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n-\t\t\tlast_item = MLX5_FLOW_LAYER_VXLAN;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_VXLAN_GPE:\n-\t\t\tpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n-\t\t\tlast_item = MLX5_FLOW_LAYER_VXLAN_GPE;\n-\t\t\ttunnel_item = items;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_GENEVE:\n-\t\t\tpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n-\t\t\tlast_item = MLX5_FLOW_LAYER_GENEVE;\n-\t\t\ttunnel_item = items;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_GENEVE_OPT:\n-\t\t\tret = flow_dv_translate_item_geneve_opt(dev, match_mask,\n-\t\t\t\t\t\t\t  match_value,\n-\t\t\t\t\t\t\t  items, error);\n-\t\t\tif (ret)\n-\t\t\t\treturn rte_flow_error_set(error, -ret,\n-\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM, NULL,\n-\t\t\t\t\t\"cannot create GENEVE TLV option\");\n-\t\t\tflow->geneve_tlv_option = 1;\n-\t\t\tlast_item = MLX5_FLOW_LAYER_GENEVE_OPT;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_MPLS:\n-\t\t\tflow_dv_translate_item_mpls(match_mask, match_value,\n-\t\t\t\t\t\t    items, last_item, tunnel);\n-\t\t\tpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n-\t\t\tlast_item = MLX5_FLOW_LAYER_MPLS;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_MARK:\n-\t\t\tflow_dv_translate_item_mark(dev, match_mask,\n-\t\t\t\t\t\t    match_value, items);\n-\t\t\tlast_item = MLX5_FLOW_ITEM_MARK;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_META:\n-\t\t\tflow_dv_translate_item_meta(dev, match_mask,\n-\t\t\t\t\t\t    match_value, attr, items);\n-\t\t\tlast_item = MLX5_FLOW_ITEM_METADATA;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_ICMP:\n-\t\t\tflow_dv_translate_item_icmp(match_mask, match_value,\n-\t\t\t\t\t\t    items, tunnel);\n-\t\t\tpriority = MLX5_PRIORITY_MAP_L4;\n-\t\t\tlast_item = MLX5_FLOW_LAYER_ICMP;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_ICMP6:\n-\t\t\tflow_dv_translate_item_icmp6(match_mask, match_value,\n-\t\t\t\t\t\t      items, tunnel);\n-\t\t\tpriority = MLX5_PRIORITY_MAP_L4;\n-\t\t\tlast_item = MLX5_FLOW_LAYER_ICMP6;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_TAG:\n-\t\t\tflow_dv_translate_item_tag(dev, match_mask,\n-\t\t\t\t\t\t   match_value, items);\n-\t\t\tlast_item = MLX5_FLOW_ITEM_TAG;\n-\t\t\tbreak;\n-\t\tcase MLX5_RTE_FLOW_ITEM_TYPE_TAG:\n-\t\t\tflow_dv_translate_mlx5_item_tag(dev, match_mask,\n-\t\t\t\t\t\t\tmatch_value, items);\n-\t\t\tlast_item = MLX5_FLOW_ITEM_TAG;\n-\t\t\tbreak;\n-\t\tcase MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:\n-\t\t\tflow_dv_translate_item_tx_queue(dev, match_mask,\n-\t\t\t\t\t\t\tmatch_value,\n-\t\t\t\t\t\t\titems);\n-\t\t\tlast_item = MLX5_FLOW_ITEM_TX_QUEUE;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_GTP:\n-\t\t\tflow_dv_translate_item_gtp(match_mask, match_value,\n-\t\t\t\t\t\t   items, tunnel);\n-\t\t\tpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n-\t\t\tlast_item = MLX5_FLOW_LAYER_GTP;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_GTP_PSC:\n-\t\t\tret = flow_dv_translate_item_gtp_psc(match_mask,\n-\t\t\t\t\t\t\t  match_value,\n-\t\t\t\t\t\t\t  items);\n-\t\t\tif (ret)\n-\t\t\t\treturn rte_flow_error_set(error, -ret,\n-\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM, NULL,\n-\t\t\t\t\t\"cannot create GTP PSC item\");\n-\t\t\tlast_item = MLX5_FLOW_LAYER_GTP_PSC;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_ECPRI:\n-\t\t\tif (!mlx5_flex_parser_ecpri_exist(dev)) {\n-\t\t\t\t/* Create it only the first time to be used. */\n-\t\t\t\tret = mlx5_flex_parser_ecpri_alloc(dev);\n-\t\t\t\tif (ret)\n-\t\t\t\t\treturn rte_flow_error_set\n-\t\t\t\t\t\t(error, -ret,\n-\t\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t\tNULL,\n-\t\t\t\t\t\t\"cannot create eCPRI parser\");\n-\t\t\t}\n-\t\t\tflow_dv_translate_item_ecpri(dev, match_mask,\n-\t\t\t\t\t\t     match_value, items,\n-\t\t\t\t\t\t     last_item);\n-\t\t\t/* No other protocol should follow eCPRI layer. */\n-\t\t\tlast_item = MLX5_FLOW_LAYER_ECPRI;\n-\t\t\tbreak;\n+\t\ttunnel = !!(wks.item_flags & MLX5_FLOW_LAYER_TUNNEL);\n+\t\tswitch (items->type) {\n \t\tcase RTE_FLOW_ITEM_TYPE_INTEGRITY:\n \t\t\tflow_dv_translate_item_integrity(items, integrity_items,\n-\t\t\t\t\t\t\t &last_item);\n+\t\t\t\t\t\t\t &wks.last_item);\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ITEM_TYPE_CONNTRACK:\n \t\t\tflow_dv_translate_item_aso_ct(dev, match_mask,\n@@ -13385,13 +13337,22 @@ flow_dv_translate_items(struct rte_eth_dev *dev,\n \t\t\tflow_dv_translate_item_flex(dev, match_mask,\n \t\t\t\t\t\t    match_value, items,\n \t\t\t\t\t\t    dev_flow, tunnel != 0);\n-\t\t\tlast_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :\n-\t\t\t\t    MLX5_FLOW_ITEM_OUTER_FLEX;\n+\t\t\twks.last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :\n+\t\t\t\t\t\t MLX5_FLOW_ITEM_OUTER_FLEX;\n \t\t\tbreak;\n+\n \t\tdefault:\n+\t\t\tret = flow_dv_translate_items(dev, items, &wks_m,\n+\t\t\t\tmatch_mask, MLX5_SET_MATCHER_SW_M, error);\n+\t\t\tif (ret)\n+\t\t\t\treturn ret;\n+\t\t\tret = flow_dv_translate_items(dev, items, &wks,\n+\t\t\t\tmatch_value, MLX5_SET_MATCHER_SW_V, error);\n+\t\t\tif (ret)\n+\t\t\t\treturn ret;\n \t\t\tbreak;\n \t\t}\n-\t\titem_flags |= last_item;\n+\t\twks.item_flags |= wks.last_item;\n \t}\n \t/*\n \t * When E-Switch mode is enabled, we have two cases where we need to\n@@ -13401,48 +13362,82 @@ flow_dv_translate_items(struct rte_eth_dev *dev,\n \t * In both cases the source port is set according the current port\n \t * in use.\n \t */\n-\tif (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&\n-\t    !(item_flags & MLX5_FLOW_ITEM_REPRESENTED_PORT) && priv->sh->esw_mode &&\n+\tif (!(wks.item_flags & MLX5_FLOW_ITEM_PORT_ID) &&\n+\t    !(wks.item_flags & MLX5_FLOW_ITEM_REPRESENTED_PORT) && priv->sh->esw_mode &&\n \t    !(attr->egress && !attr->transfer)) {\n-\t\tif (flow_dv_translate_item_port_id(dev, match_mask,\n+\t\tif (flow_dv_translate_item_port_id_all(dev, match_mask,\n \t\t\t\t\t\t   match_value, NULL, attr))\n \t\t\treturn -rte_errno;\n \t}\n-\tif (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {\n+\tif (wks.item_flags & MLX5_FLOW_ITEM_INTEGRITY) {\n \t\tflow_dv_translate_item_integrity_post(match_mask, match_value,\n \t\t\t\t\t\t      integrity_items,\n-\t\t\t\t\t\t      item_flags);\n-\t}\n-\tif (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE)\n-\t\tflow_dv_translate_item_vxlan_gpe(match_mask, match_value,\n-\t\t\t\t\t\t tunnel_item, item_flags);\n-\telse if (item_flags & MLX5_FLOW_LAYER_GENEVE)\n-\t\tflow_dv_translate_item_geneve(match_mask, match_value,\n-\t\t\t\t\t      tunnel_item, item_flags);\n-\telse if (item_flags & MLX5_FLOW_LAYER_GRE) {\n-\t\tif (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE)\n-\t\t\tflow_dv_translate_item_gre(match_mask, match_value,\n-\t\t\t\t\t\t   tunnel_item, item_flags);\n-\t\telse if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE)\n-\t\t\tflow_dv_translate_item_nvgre(match_mask, match_value,\n-\t\t\t\t\t\t     tunnel_item, item_flags);\n-\t\telse if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE_OPTION)\n-\t\t\tflow_dv_translate_item_gre_option(match_mask, match_value,\n-\t\t\t\t\ttunnel_item, gre_item, item_flags);\n-\t\telse\n+\t\t\t\t\t\t      wks.item_flags);\n+\t}\n+\tif (wks.item_flags & MLX5_FLOW_LAYER_VXLAN_GPE) {\n+\t\tflow_dv_translate_item_vxlan_gpe(match_mask,\n+\t\t\t\t\t\t wks.tunnel_item,\n+\t\t\t\t\t\t wks.item_flags,\n+\t\t\t\t\t\t MLX5_SET_MATCHER_SW_M);\n+\t\tflow_dv_translate_item_vxlan_gpe(match_value,\n+\t\t\t\t\t\t wks.tunnel_item,\n+\t\t\t\t\t\t wks.item_flags,\n+\t\t\t\t\t\t MLX5_SET_MATCHER_SW_V);\n+\t} else if (wks.item_flags & MLX5_FLOW_LAYER_GENEVE) {\n+\t\tflow_dv_translate_item_geneve(match_mask,\n+\t\t\t\t\t      wks.tunnel_item,\n+\t\t\t\t\t      wks.item_flags,\n+\t\t\t\t\t      MLX5_SET_MATCHER_SW_M);\n+\t\tflow_dv_translate_item_geneve(match_value,\n+\t\t\t\t\t      wks.tunnel_item,\n+\t\t\t\t\t      wks.item_flags,\n+\t\t\t\t\t      MLX5_SET_MATCHER_SW_V);\n+\t} else if (wks.item_flags & MLX5_FLOW_LAYER_GRE) {\n+\t\tif (wks.tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE) {\n+\t\t\tflow_dv_translate_item_gre(match_mask,\n+\t\t\t\t\t\t   wks.tunnel_item,\n+\t\t\t\t\t\t   wks.item_flags,\n+\t\t\t\t\t\t   MLX5_SET_MATCHER_SW_M);\n+\t\t\tflow_dv_translate_item_gre(match_value,\n+\t\t\t\t\t\t   wks.tunnel_item,\n+\t\t\t\t\t\t   wks.item_flags,\n+\t\t\t\t\t\t   MLX5_SET_MATCHER_SW_V);\n+\t\t} else if (wks.tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {\n+\t\t\tflow_dv_translate_item_nvgre(match_mask,\n+\t\t\t\t\t\t     wks.tunnel_item,\n+\t\t\t\t\t\t     wks.item_flags,\n+\t\t\t\t\t\t     MLX5_SET_MATCHER_SW_M);\n+\t\t\tflow_dv_translate_item_nvgre(match_value,\n+\t\t\t\t\t\t     wks.tunnel_item,\n+\t\t\t\t\t\t     wks.item_flags,\n+\t\t\t\t\t\t     MLX5_SET_MATCHER_SW_V);\n+\t\t} else if (wks.tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE_OPTION) {\n+\t\t\tflow_dv_translate_item_gre_option(match_mask,\n+\t\t\t\t\t\t\t  wks.tunnel_item,\n+\t\t\t\t\t\t\t  wks.gre_item,\n+\t\t\t\t\t\t\t  wks.item_flags,\n+\t\t\t\t\t\t\t  MLX5_SET_MATCHER_SW_M);\n+\t\t\tflow_dv_translate_item_gre_option(match_value,\n+\t\t\t\t\t\t\t  wks.tunnel_item,\n+\t\t\t\t\t\t\t  wks.gre_item,\n+\t\t\t\t\t\t\t  wks.item_flags,\n+\t\t\t\t\t\t\t  MLX5_SET_MATCHER_SW_V);\n+\t\t} else {\n \t\t\tMLX5_ASSERT(false);\n+\t\t}\n \t}\n-\tmatcher->priority = priority;\n+\tdev_flow->handle->vf_vlan.tag = wks.vlan_tag;\n+\tmatcher->priority = wks.priority;\n #ifdef RTE_LIBRTE_MLX5_DEBUG\n-\tMLX5_ASSERT(!flow_dv_check_valid_spec(matcher->mask.buf,\n-\t\t\t\t\t      dev_flow->dv.value.buf));\n+\tMLX5_ASSERT(!flow_dv_check_valid_spec(match_mask, match_value));\n #endif\n \t/*\n \t * Layers may be already initialized from prefix flow if this dev_flow\n \t * is the suffix flow.\n \t */\n-\thandle->layers |= item_flags;\n-\treturn ret;\n+\tdev_flow->handle->layers |= wks.item_flags;\n+\tdev_flow->flow->geneve_tlv_option = wks.geneve_tlv_option;\n+\treturn 0;\n }\n \n /**\n@@ -14182,7 +14177,7 @@ flow_dv_translate(struct rte_eth_dev *dev,\n \t\t\tmodify_action_position = actions_n++;\n \t}\n \tdev_flow->act_flags = action_flags;\n-\tret = flow_dv_translate_items(dev, dev_flow, attr, items, &matcher,\n+\tret = flow_dv_translate_items_sws(dev, dev_flow, attr, items, &matcher,\n \t\t\t\t      error);\n \tif (ret)\n \t\treturn -rte_errno;\n@@ -16754,27 +16749,23 @@ __flow_dv_create_policy_flow(struct rte_eth_dev *dev,\n \tstruct mlx5_flow_dv_match_params value = {\n \t\t.size = sizeof(value.buf),\n \t};\n-\tstruct mlx5_flow_dv_match_params matcher = {\n-\t\t.size = sizeof(matcher.buf),\n-\t};\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tuint8_t misc_mask;\n \n \tif (match_src_port && priv->sh->esw_mode) {\n \t\tif (item && item->type == RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT)\n-\t\t\tret = flow_dv_translate_item_represented_port(dev, matcher.buf, value.buf,\n-\t\t\t\t\t\t\t\t      item, attr);\n+\t\t\tret = flow_dv_translate_item_represented_port(dev, value.buf,\n+\t\t\t\t\t\titem, attr, MLX5_SET_MATCHER_SW_V);\n \t\telse\n-\t\t\tret = flow_dv_translate_item_port_id(dev, matcher.buf, value.buf,\n-\t\t\t\t\t\t\t     item, attr);\n+\t\t\tret = flow_dv_translate_item_port_id(dev, value.buf,\n+\t\t\t\t\t\titem, attr, MLX5_SET_MATCHER_SW_V);\n \t\tif (ret) {\n \t\t\tDRV_LOG(ERR, \"Failed to create meter policy%d flow's\"\n \t\t\t\t\" value with port.\", color);\n \t\t\treturn -1;\n \t\t}\n \t}\n-\tflow_dv_match_meta_reg(matcher.buf, value.buf,\n-\t\t\t       (enum modify_reg)color_reg_c_idx,\n+\tflow_dv_match_meta_reg(value.buf, (enum modify_reg)color_reg_c_idx,\n \t\t\t       rte_col_2_mlx5_col(color), UINT32_MAX);\n \tmisc_mask = flow_dv_matcher_enable(value.buf);\n \t__flow_dv_adjust_buf_size(&value.size, misc_mask);\n@@ -16806,9 +16797,6 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,\n \t\t},\n \t\t.tbl = tbl_rsc,\n \t};\n-\tstruct mlx5_flow_dv_match_params value = {\n-\t\t.size = sizeof(value.buf),\n-\t};\n \tstruct mlx5_flow_cb_ctx ctx = {\n \t\t.error = error,\n \t\t.data = &matcher,\n@@ -16821,10 +16809,10 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,\n \tif (match_src_port && priv->sh->esw_mode) {\n \t\tif (item && item->type == RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT)\n \t\t\tret = flow_dv_translate_item_represented_port(dev, matcher.mask.buf,\n-\t\t\t\t\t\t\t\t      value.buf, item, attr);\n+\t\t\t\t\t\titem, attr, MLX5_SET_MATCHER_SW_M);\n \t\telse\n-\t\t\tret = flow_dv_translate_item_port_id(dev, matcher.mask.buf, value.buf,\n-\t\t\t\t\t\t\t     item, attr);\n+\t\t\tret = flow_dv_translate_item_port_id(dev, matcher.mask.buf,\n+\t\t\t\t\t\titem, attr, MLX5_SET_MATCHER_SW_M);\n \t\tif (ret) {\n \t\t\tDRV_LOG(ERR, \"Failed to register meter policy%d matcher\"\n \t\t\t\t\" with port.\", priority);\n@@ -16833,7 +16821,7 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,\n \t}\n \ttbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);\n \tif (priority < RTE_COLOR_RED)\n-\t\tflow_dv_match_meta_reg(matcher.mask.buf, value.buf,\n+\t\tflow_dv_match_meta_reg(matcher.mask.buf,\n \t\t\t(enum modify_reg)color_reg_c_idx, 0, color_mask);\n \tmatcher.priority = priority;\n \tmatcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,\n@@ -17369,7 +17357,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,\n \t\ttbl_data = container_of(mtrmng->drop_tbl[domain],\n \t\t\t\tstruct mlx5_flow_tbl_data_entry, tbl);\n \t\tif (!mtrmng->def_matcher[domain]) {\n-\t\t\tflow_dv_match_meta_reg(matcher.mask.buf, value.buf,\n+\t\t\tflow_dv_match_meta_reg_all(matcher.mask.buf, value.buf,\n \t\t\t\t       (enum modify_reg)mtr_id_reg_c,\n \t\t\t\t       0, 0);\n \t\t\tmatcher.priority = MLX5_MTRS_DEFAULT_RULE_PRIORITY;\n@@ -17389,7 +17377,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,\n \t\tif (!mtrmng->def_rule[domain]) {\n \t\t\ti = 0;\n \t\t\tactions[i++] = priv->sh->dr_drop_action;\n-\t\t\tflow_dv_match_meta_reg(matcher_para.buf, value.buf,\n+\t\t\tflow_dv_match_meta_reg_all(matcher_para.buf, value.buf,\n \t\t\t\t(enum modify_reg)mtr_id_reg_c, 0, 0);\n \t\t\tmisc_mask = flow_dv_matcher_enable(value.buf);\n \t\t\t__flow_dv_adjust_buf_size(&value.size, misc_mask);\n@@ -17408,7 +17396,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,\n \t\tMLX5_ASSERT(mtrmng->max_mtr_bits);\n \t\tif (!mtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1]) {\n \t\t\t/* Create matchers for Drop. */\n-\t\t\tflow_dv_match_meta_reg(matcher.mask.buf, value.buf,\n+\t\t\tflow_dv_match_meta_reg_all(matcher.mask.buf, value.buf,\n \t\t\t\t\t(enum modify_reg)mtr_id_reg_c, 0,\n \t\t\t\t\t(mtr_id_mask << mtr_id_offset));\n \t\t\tmatcher.priority = MLX5_REG_BITS - mtrmng->max_mtr_bits;\n@@ -17428,7 +17416,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,\n \t\tdrop_matcher =\n \t\t\tmtrmng->drop_matcher[domain][mtrmng->max_mtr_bits - 1];\n \t\t/* Create drop rule, matching meter_id only. */\n-\t\tflow_dv_match_meta_reg(matcher_para.buf, value.buf,\n+\t\tflow_dv_match_meta_reg_all(matcher_para.buf, value.buf,\n \t\t\t\t(enum modify_reg)mtr_id_reg_c,\n \t\t\t\t(mtr_idx << mtr_id_offset), UINT32_MAX);\n \t\ti = 0;\n@@ -18910,8 +18898,12 @@ flow_dv_discover_priorities(struct rte_eth_dev *dev,\n \tflow.dv.actions[0] = action;\n \tflow.dv.actions_n = 1;\n \tmemset(&eth, 0, sizeof(eth));\n-\tflow_dv_translate_item_eth(matcher.mask.buf, flow.dv.value.buf,\n-\t\t\t\t   &item, /* inner */ false, /* group */ 0);\n+\tflow_dv_translate_item_eth(matcher.mask.buf, &item,\n+\t\t\t\t   /* inner */ false, /* group */ 0,\n+\t\t\t\t   MLX5_SET_MATCHER_SW_M);\n+\tflow_dv_translate_item_eth(flow.dv.value.buf, &item,\n+\t\t\t\t   /* inner */ false, /* group */ 0,\n+\t\t\t\t   MLX5_SET_MATCHER_SW_V);\n \tmatcher.crc = rte_raw_cksum(matcher.mask.buf, matcher.mask.size);\n \tfor (i = 0; i < vprio_n; i++) {\n \t\t/* Configure the next proposed maximum priority. */\n",
    "prefixes": [
        "v6",
        "02/18"
    ]
}