get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/118808/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 118808,
    "url": "http://patches.dpdk.org/api/patches/118808/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20221020155749.16643-2-valex@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221020155749.16643-2-valex@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221020155749.16643-2-valex@nvidia.com",
    "date": "2022-10-20T15:57:31",
    "name": "[v6,01/18] net/mlx5: split flow item translation",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "8f4dcf3126ea720587c97529c6672fd377fdddbc",
    "submitter": {
        "id": 2858,
        "url": "http://patches.dpdk.org/api/people/2858/?format=api",
        "name": "Alex Vesker",
        "email": "valex@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20221020155749.16643-2-valex@nvidia.com/mbox/",
    "series": [
        {
            "id": 25345,
            "url": "http://patches.dpdk.org/api/series/25345/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=25345",
            "date": "2022-10-20T15:57:30",
            "name": "net/mlx5: Add HW steering low level support",
            "version": 6,
            "mbox": "http://patches.dpdk.org/series/25345/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/118808/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/118808/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id B9121A0553;\n\tThu, 20 Oct 2022 17:59:03 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 8C25C42670;\n\tThu, 20 Oct 2022 17:59:01 +0200 (CEST)",
            "from NAM11-BN8-obe.outbound.protection.outlook.com\n (mail-bn8nam11on2040.outbound.protection.outlook.com [40.107.236.40])\n by mails.dpdk.org (Postfix) with ESMTP id AE32841614\n for <dev@dpdk.org>; Thu, 20 Oct 2022 17:59:00 +0200 (CEST)",
            "from BN9P223CA0010.NAMP223.PROD.OUTLOOK.COM (2603:10b6:408:10b::15)\n by SA0PR12MB4527.namprd12.prod.outlook.com (2603:10b6:806:73::18)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5723.34; Thu, 20 Oct\n 2022 15:58:57 +0000",
            "from BN8NAM11FT105.eop-nam11.prod.protection.outlook.com\n (2603:10b6:408:10b:cafe::45) by BN9P223CA0010.outlook.office365.com\n (2603:10b6:408:10b::15) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5723.34 via Frontend\n Transport; Thu, 20 Oct 2022 15:58:57 +0000",
            "from mail.nvidia.com (216.228.117.161) by\n BN8NAM11FT105.mail.protection.outlook.com (10.13.176.183) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.5746.16 via Frontend Transport; Thu, 20 Oct 2022 15:58:57 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.67) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.26; Thu, 20 Oct\n 2022 08:58:47 -0700",
            "from nvidia.com (10.126.230.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.29; Thu, 20 Oct\n 2022 08:58:45 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=GY1lsAC19JT2fb1xGXnp4YnhVxGJ6EUQWzZU8tcV9e3nePAm/hmUBDrUTvJhgDp7NL9tFFM8lM1tkSVxdhqc229H1N11dgpgeCKDPxSFGCjNTrzR9P3bodKedaT2B03+1cSg6hMPrhYmZO8p382NlrjhCteGK3aOK3J9Lg+KalWkA/QBZH+jRX7LR8wxXurpL+Aj4zu++ofE8/SbgTaSW9JwmflJKf1toECS4mYd2uFRU4TYT1dt5ACg0lOJINssfn/RBRGEFaJdIHmxIQpwMXuw23y69n6GrwwIod1JOo/47JmUWy0Iq1menB8VduocuDeIW8nnwKiEd+zdmkUmwQ==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=YGtINx6nsGxT9bDshMqb+xEK0aRpTLUdSdvjAu8rRms=;\n b=GWM6vmf23Zjxs7oTgjG80D7CZ0ymKnJZod3KfIe+raixOXpcvOhjk00pTizadZLHEfPXP8LdYnm78/iysxgmUsF8VnQw9V32wgI+XFlKSxDYcGOImi04K47SEyG3dPLJaD1VSasGs0Ffl3rYsXN8h2iUwX7IO2HOeBIFHk8XQtGWMDDIc69NO/2SjkmsKJTAyw7v+LkeIjNf7eq10ewqSrVabr4n3Q69F1phkf5K2ufF4GNYZy5WrNFhUHxCTpOMc15A8VhLAn8S7uxXFcE0z63wNridXtZr4sZ8gv8pjet5OUkc3MBCtX0jusjBUzKRGQ1q0xfKhl7AowyXokw13w==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=none (sender ip is\n 216.228.117.161) smtp.rcpttodomain=monjalon.net smtp.mailfrom=nvidia.com;\n dmarc=fail (p=reject sp=reject pct=100) action=oreject\n header.from=nvidia.com; dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=YGtINx6nsGxT9bDshMqb+xEK0aRpTLUdSdvjAu8rRms=;\n b=jnklq+5c32o44GZskoORHrO3BOkQJnb5ifij5z8infaQBkYzl5qlv6dzfzLdOL1pJhtcfQsNjrXlCJ8KTMKnYUY8z5PhihfKQ05JuIoXWWNe3nu0nG0+oqU2tVF5rSA03pQiW6tzDD4wGG6+2oAGXpNgN7hktDLzJEd9R6hhXkMuGs4Hox0JQ6FJ869I5V+8sxrEZwucS+yLZnAM0TrcAgEbin51pFsIrj8orUIfctFp9YrvfMQSLZ5Kujkdm3gafERYrkrBgTeUQLLDSmy01sKGmSbUCmT/CrYGWTPyS0kc7K6vnxB3RJm0lcHDIaG18ln1thw51LDBFydeHMfr7Q==",
        "X-MS-Exchange-Authentication-Results": "spf=none (sender IP is 216.228.117.161)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=fail action=oreject header.from=nvidia.com;",
        "Received-SPF": "None (protection.outlook.com: nvidia.com does not designate\n permitted sender hosts)",
        "From": "Alex Vesker <valex@nvidia.com>",
        "To": "<valex@nvidia.com>, <viacheslavo@nvidia.com>, <thomas@monjalon.net>,\n <suanmingm@nvidia.com>, Matan Azrad <matan@nvidia.com>",
        "CC": "<dev@dpdk.org>, <orika@nvidia.com>",
        "Subject": "[v6 01/18] net/mlx5: split flow item translation",
        "Date": "Thu, 20 Oct 2022 18:57:31 +0300",
        "Message-ID": "<20221020155749.16643-2-valex@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20221020155749.16643-1-valex@nvidia.com>",
        "References": "<20220922190345.394-1-valex@nvidia.com>\n <20221020155749.16643-1-valex@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.230.35]",
        "X-ClientProxiedBy": "rnnvmail202.nvidia.com (10.129.68.7) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "BN8NAM11FT105:EE_|SA0PR12MB4527:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "a23c9803-9a2f-46a7-e7cc-08dab2b3ff36",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n jPJWFrPBPOBlahthMusHtJpZJEw+uvL6pGarGPE2akDwZdRZkv0xgEl6b5s68SnPAMLndhb4Tlt5yx7Om8QPUbT4WmOygf0IId3y4ac8C7hZqZyvsLCh02M7fMcYzwagG5iwGLfPlH1yF8ss882A9bQayz3kYDr4gR8WfuUUszxvNvBGGZ97sj3B+BPoUZ0fgxucXD4uWq/GexmJjdQs2302L3b4QDnfThUhSdqpNICYPZT3L40lKDeNrau2+qDV7y5ZCzjD1sG9WwVjQRji/nRUUy3tAeaEBYeGrs65iT3qOGvTA3/dHqQvmf/rgj8b5m/ifE15HwOEWEe4j+YXWcSVFB7NY4MTg1dULRT6+NayDdZPQckEpYbRyMCUq53G9xAYoY6ZmRmB+ESmKBTuNyDHRWDVl+BU4FJIyvcGzGGg/3OPkRTJMZ8g5GTKw3/DnBpQVG/twrlYxgYNWo6TewQw03OFhGo/DtGrN657bfB0JiUvP/1jncO98aa/4mDYuUwGFpmP3LVmpemBeiw27XCX2A12+QivMMZhbitkbx4DnCotZ6LD3SwBoSRYjKV+rBv0Y39DMujgxf/wv/rYp90vuvK3ZCoJ4mtEHA4eEmIsZ4Kh7AFerioMCZeuGyRY8NOVk1Grfi4x3VbmqhU0X9NV4biY3BHAvbPd548gGa+335UlzQHKWgVG9JZzuG7A6oD5pqLe3oZ/J5T+GZLdqZDpTqXwwOaVvu5q9WF6xZe2HTSx326keDjSueUmEDPHNl8BzcRc8R+sK4STf0NXS/xmG4LBgY2w14+mTcAhXWc=",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.161; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge2.nvidia.com; CAT:NONE;\n SFS:(13230022)(4636009)(39860400002)(136003)(376002)(396003)(346002)(451199015)(36840700001)(46966006)(40470700004)(36756003)(82310400005)(86362001)(82740400003)(41300700001)(7636003)(356005)(426003)(47076005)(36860700001)(2616005)(83380400001)(40460700003)(16526019)(1076003)(186003)(336012)(40480700001)(107886003)(6666004)(7696005)(478600001)(5660300002)(2906002)(316002)(8936002)(6636002)(70206006)(8676002)(4326008)(30864003)(6286002)(26005)(110136005)(54906003)(55016003)(70586007)(579004)(559001)(309714004);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "20 Oct 2022 15:58:57.4681 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n a23c9803-9a2f-46a7-e7cc-08dab2b3ff36",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.161];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT105.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "SA0PR12MB4527",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Suanming Mou <suanmingm@nvidia.com>\n\nIn order to share the item translation code with hardware steering\nmode, this commit splits flow item translation code to a dedicate\nfunction.\n\nSigned-off-by: Suanming Mou <suanmingm@nvidia.com>\n---\n drivers/net/mlx5/mlx5_flow_dv.c | 1915 ++++++++++++++++---------------\n 1 file changed, 979 insertions(+), 936 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex 4bdcb1815b..0f3ff4db51 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -13076,8 +13076,7 @@ flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,\n }\n \n /**\n- * Fill the flow with DV spec, lock free\n- * (mutex should be acquired by caller).\n+ * Translate the flow item to matcher.\n  *\n  * @param[in] dev\n  *   Pointer to rte_eth_dev structure.\n@@ -13087,8 +13086,8 @@ flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,\n  *   Pointer to the flow attributes.\n  * @param[in] items\n  *   Pointer to the list of items.\n- * @param[in] actions\n- *   Pointer to the list of actions.\n+ * @param[in] matcher\n+ *   Pointer to the flow matcher.\n  * @param[out] error\n  *   Pointer to the error structure.\n  *\n@@ -13096,650 +13095,656 @@ flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n static int\n-flow_dv_translate(struct rte_eth_dev *dev,\n-\t\t  struct mlx5_flow *dev_flow,\n-\t\t  const struct rte_flow_attr *attr,\n-\t\t  const struct rte_flow_item items[],\n-\t\t  const struct rte_flow_action actions[],\n-\t\t  struct rte_flow_error *error)\n+flow_dv_translate_items(struct rte_eth_dev *dev,\n+\t\t\tstruct mlx5_flow *dev_flow,\n+\t\t\tconst struct rte_flow_attr *attr,\n+\t\t\tconst struct rte_flow_item items[],\n+\t\t\tstruct mlx5_flow_dv_matcher *matcher,\n+\t\t\tstruct rte_flow_error *error)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_sh_config *dev_conf = &priv->sh->config;\n \tstruct rte_flow *flow = dev_flow->flow;\n \tstruct mlx5_flow_handle *handle = dev_flow->handle;\n \tstruct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();\n-\tstruct mlx5_flow_rss_desc *rss_desc;\n+\tstruct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;\n \tuint64_t item_flags = 0;\n \tuint64_t last_item = 0;\n-\tuint64_t action_flags = 0;\n-\tstruct mlx5_flow_dv_matcher matcher = {\n-\t\t.mask = {\n-\t\t\t.size = sizeof(matcher.mask.buf),\n-\t\t},\n-\t};\n-\tint actions_n = 0;\n-\tbool actions_end = false;\n-\tunion {\n-\t\tstruct mlx5_flow_dv_modify_hdr_resource res;\n-\t\tuint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +\n-\t\t\t    sizeof(struct mlx5_modification_cmd) *\n-\t\t\t    (MLX5_MAX_MODIFY_NUM + 1)];\n-\t} mhdr_dummy;\n-\tstruct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;\n-\tconst struct rte_flow_action_count *count = NULL;\n-\tconst struct rte_flow_action_age *non_shared_age = NULL;\n-\tunion flow_dv_attr flow_attr = { .attr = 0 };\n-\tuint32_t tag_be;\n-\tunion mlx5_flow_tbl_key tbl_key;\n-\tuint32_t modify_action_position = UINT32_MAX;\n-\tvoid *match_mask = matcher.mask.buf;\n+\tvoid *match_mask = matcher->mask.buf;\n \tvoid *match_value = dev_flow->dv.value.buf;\n \tuint8_t next_protocol = 0xff;\n-\tstruct rte_vlan_hdr vlan = { 0 };\n-\tstruct mlx5_flow_dv_dest_array_resource mdest_res;\n-\tstruct mlx5_flow_dv_sample_resource sample_res;\n-\tvoid *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};\n-\tconst struct rte_flow_action_sample *sample = NULL;\n-\tstruct mlx5_flow_sub_actions_list *sample_act;\n-\tuint32_t sample_act_pos = UINT32_MAX;\n-\tuint32_t age_act_pos = UINT32_MAX;\n-\tuint32_t num_of_dest = 0;\n-\tint tmp_actions_n = 0;\n-\tuint32_t table;\n-\tint ret = 0;\n-\tconst struct mlx5_flow_tunnel *tunnel = NULL;\n-\tstruct flow_grp_info grp_info = {\n-\t\t.external = !!dev_flow->external,\n-\t\t.transfer = !!attr->transfer,\n-\t\t.fdb_def_rule = !!priv->fdb_def_rule,\n-\t\t.skip_scale = dev_flow->skip_scale &\n-\t\t\t(1 << MLX5_SCALE_FLOW_GROUP_BIT),\n-\t\t.std_tbl_fix = true,\n-\t};\n+\tuint16_t priority = 0;\n \tconst struct rte_flow_item *integrity_items[2] = {NULL, NULL};\n \tconst struct rte_flow_item *tunnel_item = NULL;\n \tconst struct rte_flow_item *gre_item = NULL;\n+\tint ret = 0;\n \n-\tif (!wks)\n-\t\treturn rte_flow_error_set(error, ENOMEM,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n-\t\t\t\t\t  NULL,\n-\t\t\t\t\t  \"failed to push flow workspace\");\n-\trss_desc = &wks->rss_desc;\n-\tmemset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));\n-\tmemset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));\n-\tmhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :\n-\t\t\t\t\t   MLX5DV_FLOW_TABLE_TYPE_NIC_RX;\n-\t/* update normal path action resource into last index of array */\n-\tsample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];\n-\tif (is_tunnel_offload_active(dev)) {\n-\t\tif (dev_flow->tunnel) {\n-\t\t\tRTE_VERIFY(dev_flow->tof_type ==\n-\t\t\t\t   MLX5_TUNNEL_OFFLOAD_MISS_RULE);\n-\t\t\ttunnel = dev_flow->tunnel;\n-\t\t} else {\n-\t\t\ttunnel = mlx5_get_tof(items, actions,\n-\t\t\t\t\t      &dev_flow->tof_type);\n-\t\t\tdev_flow->tunnel = tunnel;\n-\t\t}\n-\t\tgrp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate\n-\t\t\t\t\t(dev, attr, tunnel, dev_flow->tof_type);\n-\t}\n-\tmhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :\n-\t\t\t\t\t   MLX5DV_FLOW_TABLE_TYPE_NIC_RX;\n-\tret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,\n-\t\t\t\t       &grp_info, error);\n-\tif (ret)\n-\t\treturn ret;\n-\tdev_flow->dv.group = table;\n-\tif (attr->transfer)\n-\t\tmhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;\n-\t/* number of actions must be set to 0 in case of dirty stack. */\n-\tmhdr_res->actions_num = 0;\n-\tif (is_flow_tunnel_match_rule(dev_flow->tof_type)) {\n-\t\t/*\n-\t\t * do not add decap action if match rule drops packet\n-\t\t * HW rejects rules with decap & drop\n-\t\t *\n-\t\t * if tunnel match rule was inserted before matching tunnel set\n-\t\t * rule flow table used in the match rule must be registered.\n-\t\t * current implementation handles that in the\n-\t\t * flow_dv_match_register() at the function end.\n-\t\t */\n-\t\tbool add_decap = true;\n-\t\tconst struct rte_flow_action *ptr = actions;\n-\n-\t\tfor (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {\n-\t\t\tif (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {\n-\t\t\t\tadd_decap = false;\n-\t\t\t\tbreak;\n-\t\t\t}\n-\t\t}\n-\t\tif (add_decap) {\n-\t\t\tif (flow_dv_create_action_l2_decap(dev, dev_flow,\n-\t\t\t\t\t\t\t   attr->transfer,\n-\t\t\t\t\t\t\t   error))\n-\t\t\t\treturn -rte_errno;\n-\t\t\tdev_flow->dv.actions[actions_n++] =\n-\t\t\t\t\tdev_flow->dv.encap_decap->action;\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_DECAP;\n-\t\t}\n-\t}\n-\tfor (; !actions_end ; actions++) {\n-\t\tconst struct rte_flow_action_queue *queue;\n-\t\tconst struct rte_flow_action_rss *rss;\n-\t\tconst struct rte_flow_action *action = actions;\n-\t\tconst uint8_t *rss_key;\n-\t\tstruct mlx5_flow_tbl_resource *tbl;\n-\t\tstruct mlx5_aso_age_action *age_act;\n-\t\tstruct mlx5_flow_counter *cnt_act;\n-\t\tuint32_t port_id = 0;\n-\t\tstruct mlx5_flow_dv_port_id_action_resource port_id_resource;\n-\t\tint action_type = actions->type;\n-\t\tconst struct rte_flow_action *found_action = NULL;\n-\t\tuint32_t jump_group = 0;\n-\t\tuint32_t owner_idx;\n-\t\tstruct mlx5_aso_ct_action *ct;\n+\tfor (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {\n+\t\tint tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);\n+\t\tint item_type = items->type;\n \n-\t\tif (!mlx5_flow_os_action_supported(action_type))\n+\t\tif (!mlx5_flow_os_item_supported(item_type))\n \t\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t\t  actions,\n-\t\t\t\t\t\t  \"action not supported\");\n-\t\tswitch (action_type) {\n-\t\tcase MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t  NULL, \"item not supported\");\n+\t\tswitch (item_type) {\n+\t\tcase RTE_FLOW_ITEM_TYPE_ESP:\n+\t\t\tflow_dv_translate_item_esp(match_mask, match_value,\n+\t\t\t\t\t\t   items, tunnel);\n+\t\t\tpriority = MLX5_PRIORITY_MAP_L4;\n+\t\t\tlast_item = MLX5_FLOW_ITEM_ESP;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_VOID:\n+\t\tcase RTE_FLOW_ITEM_TYPE_PORT_ID:\n+\t\t\tflow_dv_translate_item_port_id\n+\t\t\t\t(dev, match_mask, match_value, items, attr);\n+\t\t\tlast_item = MLX5_FLOW_ITEM_PORT_ID;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_PORT_ID:\n-\t\tcase RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:\n-\t\t\tif (flow_dv_translate_action_port_id(dev, action,\n-\t\t\t\t\t\t\t     &port_id, error))\n-\t\t\t\treturn -rte_errno;\n-\t\t\tport_id_resource.port_id = port_id;\n-\t\t\tMLX5_ASSERT(!handle->rix_port_id_action);\n-\t\t\tif (flow_dv_port_id_action_resource_register\n-\t\t\t    (dev, &port_id_resource, dev_flow, error))\n-\t\t\t\treturn -rte_errno;\n-\t\t\tdev_flow->dv.actions[actions_n++] =\n-\t\t\t\t\tdev_flow->dv.port_id_action->action;\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_PORT_ID;\n-\t\t\tdev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;\n-\t\t\tsample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;\n-\t\t\tnum_of_dest++;\n+\t\tcase RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:\n+\t\t\tflow_dv_translate_item_represented_port\n+\t\t\t\t(dev, match_mask, match_value, items, attr);\n+\t\t\tlast_item = MLX5_FLOW_ITEM_REPRESENTED_PORT;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_FLAG:\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_FLAG;\n-\t\t\twks->mark = 1;\n-\t\t\tif (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {\n-\t\t\t\tstruct rte_flow_action_mark mark = {\n-\t\t\t\t\t.id = MLX5_FLOW_MARK_DEFAULT,\n-\t\t\t\t};\n-\n-\t\t\t\tif (flow_dv_convert_action_mark(dev, &mark,\n-\t\t\t\t\t\t\t\tmhdr_res,\n-\t\t\t\t\t\t\t\terror))\n-\t\t\t\t\treturn -rte_errno;\n-\t\t\t\taction_flags |= MLX5_FLOW_ACTION_MARK_EXT;\n-\t\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_ETH:\n+\t\t\tflow_dv_translate_item_eth(match_mask, match_value,\n+\t\t\t\t\t\t   items, tunnel,\n+\t\t\t\t\t\t   dev_flow->dv.group);\n+\t\t\tpriority = dev_flow->act_flags &\n+\t\t\t\t\tMLX5_FLOW_ACTION_DEFAULT_MISS &&\n+\t\t\t\t\t!dev_flow->external ?\n+\t\t\t\t\tMLX5_PRIORITY_MAP_L3 :\n+\t\t\t\t\tMLX5_PRIORITY_MAP_L2;\n+\t\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :\n+\t\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L2;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_VLAN:\n+\t\t\tflow_dv_translate_item_vlan(dev_flow,\n+\t\t\t\t\t\t    match_mask, match_value,\n+\t\t\t\t\t\t    items, tunnel,\n+\t\t\t\t\t\t    dev_flow->dv.group);\n+\t\t\tpriority = MLX5_PRIORITY_MAP_L2;\n+\t\t\tlast_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |\n+\t\t\t\t\t      MLX5_FLOW_LAYER_INNER_VLAN) :\n+\t\t\t\t\t     (MLX5_FLOW_LAYER_OUTER_L2 |\n+\t\t\t\t\t      MLX5_FLOW_LAYER_OUTER_VLAN);\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_IPV4:\n+\t\t\tmlx5_flow_tunnel_ip_check(items, next_protocol,\n+\t\t\t\t\t\t  &item_flags, &tunnel);\n+\t\t\tflow_dv_translate_item_ipv4(match_mask, match_value,\n+\t\t\t\t\t\t    items, tunnel,\n+\t\t\t\t\t\t    dev_flow->dv.group);\n+\t\t\tpriority = MLX5_PRIORITY_MAP_L3;\n+\t\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :\n+\t\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L3_IPV4;\n+\t\t\tif (items->mask != NULL &&\n+\t\t\t    ((const struct rte_flow_item_ipv4 *)\n+\t\t\t     items->mask)->hdr.next_proto_id) {\n+\t\t\t\tnext_protocol =\n+\t\t\t\t\t((const struct rte_flow_item_ipv4 *)\n+\t\t\t\t\t (items->spec))->hdr.next_proto_id;\n+\t\t\t\tnext_protocol &=\n+\t\t\t\t\t((const struct rte_flow_item_ipv4 *)\n+\t\t\t\t\t (items->mask))->hdr.next_proto_id;\n+\t\t\t} else {\n+\t\t\t\t/* Reset for inner layer. */\n+\t\t\t\tnext_protocol = 0xff;\n \t\t\t}\n-\t\t\ttag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);\n-\t\t\t/*\n-\t\t\t * Only one FLAG or MARK is supported per device flow\n-\t\t\t * right now. So the pointer to the tag resource must be\n-\t\t\t * zero before the register process.\n-\t\t\t */\n-\t\t\tMLX5_ASSERT(!handle->dvh.rix_tag);\n-\t\t\tif (flow_dv_tag_resource_register(dev, tag_be,\n-\t\t\t\t\t\t\t  dev_flow, error))\n-\t\t\t\treturn -rte_errno;\n-\t\t\tMLX5_ASSERT(dev_flow->dv.tag_resource);\n-\t\t\tdev_flow->dv.actions[actions_n++] =\n-\t\t\t\t\tdev_flow->dv.tag_resource->action;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_MARK:\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_MARK;\n-\t\t\twks->mark = 1;\n-\t\t\tif (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {\n-\t\t\t\tconst struct rte_flow_action_mark *mark =\n-\t\t\t\t\t(const struct rte_flow_action_mark *)\n-\t\t\t\t\t\tactions->conf;\n-\n-\t\t\t\tif (flow_dv_convert_action_mark(dev, mark,\n-\t\t\t\t\t\t\t\tmhdr_res,\n-\t\t\t\t\t\t\t\terror))\n-\t\t\t\t\treturn -rte_errno;\n-\t\t\t\taction_flags |= MLX5_FLOW_ACTION_MARK_EXT;\n-\t\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_IPV6:\n+\t\t\tmlx5_flow_tunnel_ip_check(items, next_protocol,\n+\t\t\t\t\t\t  &item_flags, &tunnel);\n+\t\t\tflow_dv_translate_item_ipv6(match_mask, match_value,\n+\t\t\t\t\t\t    items, tunnel,\n+\t\t\t\t\t\t    dev_flow->dv.group);\n+\t\t\tpriority = MLX5_PRIORITY_MAP_L3;\n+\t\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :\n+\t\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L3_IPV6;\n+\t\t\tif (items->mask != NULL &&\n+\t\t\t    ((const struct rte_flow_item_ipv6 *)\n+\t\t\t     items->mask)->hdr.proto) {\n+\t\t\t\tnext_protocol =\n+\t\t\t\t\t((const struct rte_flow_item_ipv6 *)\n+\t\t\t\t\t items->spec)->hdr.proto;\n+\t\t\t\tnext_protocol &=\n+\t\t\t\t\t((const struct rte_flow_item_ipv6 *)\n+\t\t\t\t\t items->mask)->hdr.proto;\n+\t\t\t} else {\n+\t\t\t\t/* Reset for inner layer. */\n+\t\t\t\tnext_protocol = 0xff;\n \t\t\t}\n-\t\t\t/* Fall-through */\n-\t\tcase MLX5_RTE_FLOW_ACTION_TYPE_MARK:\n-\t\t\t/* Legacy (non-extensive) MARK action. */\n-\t\t\ttag_be = mlx5_flow_mark_set\n-\t\t\t      (((const struct rte_flow_action_mark *)\n-\t\t\t       (actions->conf))->id);\n-\t\t\tMLX5_ASSERT(!handle->dvh.rix_tag);\n-\t\t\tif (flow_dv_tag_resource_register(dev, tag_be,\n-\t\t\t\t\t\t\t  dev_flow, error))\n-\t\t\t\treturn -rte_errno;\n-\t\t\tMLX5_ASSERT(dev_flow->dv.tag_resource);\n-\t\t\tdev_flow->dv.actions[actions_n++] =\n-\t\t\t\t\tdev_flow->dv.tag_resource->action;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_META:\n-\t\t\tif (flow_dv_convert_action_set_meta\n-\t\t\t\t(dev, mhdr_res, attr,\n-\t\t\t\t (const struct rte_flow_action_set_meta *)\n-\t\t\t\t  actions->conf, error))\n-\t\t\t\treturn -rte_errno;\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_SET_META;\n+\t\tcase RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:\n+\t\t\tflow_dv_translate_item_ipv6_frag_ext(match_mask,\n+\t\t\t\t\t\t\t     match_value,\n+\t\t\t\t\t\t\t     items, tunnel);\n+\t\t\tlast_item = tunnel ?\n+\t\t\t\t\tMLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :\n+\t\t\t\t\tMLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;\n+\t\t\tif (items->mask != NULL &&\n+\t\t\t    ((const struct rte_flow_item_ipv6_frag_ext *)\n+\t\t\t     items->mask)->hdr.next_header) {\n+\t\t\t\tnext_protocol =\n+\t\t\t\t((const struct rte_flow_item_ipv6_frag_ext *)\n+\t\t\t\t items->spec)->hdr.next_header;\n+\t\t\t\tnext_protocol &=\n+\t\t\t\t((const struct rte_flow_item_ipv6_frag_ext *)\n+\t\t\t\t items->mask)->hdr.next_header;\n+\t\t\t} else {\n+\t\t\t\t/* Reset for inner layer. */\n+\t\t\t\tnext_protocol = 0xff;\n+\t\t\t}\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_TAG:\n-\t\t\tif (flow_dv_convert_action_set_tag\n-\t\t\t\t(dev, mhdr_res,\n-\t\t\t\t (const struct rte_flow_action_set_tag *)\n-\t\t\t\t  actions->conf, error))\n-\t\t\t\treturn -rte_errno;\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_SET_TAG;\n+\t\tcase RTE_FLOW_ITEM_TYPE_TCP:\n+\t\t\tflow_dv_translate_item_tcp(match_mask, match_value,\n+\t\t\t\t\t\t   items, tunnel);\n+\t\t\tpriority = MLX5_PRIORITY_MAP_L4;\n+\t\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :\n+\t\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L4_TCP;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_DROP:\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_DROP;\n-\t\t\tdev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;\n+\t\tcase RTE_FLOW_ITEM_TYPE_UDP:\n+\t\t\tflow_dv_translate_item_udp(match_mask, match_value,\n+\t\t\t\t\t\t   items, tunnel);\n+\t\t\tpriority = MLX5_PRIORITY_MAP_L4;\n+\t\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :\n+\t\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L4_UDP;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_QUEUE:\n-\t\t\tqueue = actions->conf;\n-\t\t\trss_desc->queue_num = 1;\n-\t\t\trss_desc->queue[0] = queue->index;\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_QUEUE;\n-\t\t\tdev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;\n-\t\t\tsample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;\n-\t\t\tnum_of_dest++;\n+\t\tcase RTE_FLOW_ITEM_TYPE_GRE:\n+\t\t\tpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n+\t\t\tlast_item = MLX5_FLOW_LAYER_GRE;\n+\t\t\ttunnel_item = items;\n+\t\t\tgre_item = items;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_RSS:\n-\t\t\trss = actions->conf;\n-\t\t\tmemcpy(rss_desc->queue, rss->queue,\n-\t\t\t       rss->queue_num * sizeof(uint16_t));\n-\t\t\trss_desc->queue_num = rss->queue_num;\n-\t\t\t/* NULL RSS key indicates default RSS key. */\n-\t\t\trss_key = !rss->key ? rss_hash_default_key : rss->key;\n-\t\t\tmemcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);\n-\t\t\t/*\n-\t\t\t * rss->level and rss.types should be set in advance\n-\t\t\t * when expanding items for RSS.\n-\t\t\t */\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_RSS;\n-\t\t\tdev_flow->handle->fate_action = rss_desc->shared_rss ?\n-\t\t\t\tMLX5_FLOW_FATE_SHARED_RSS :\n-\t\t\t\tMLX5_FLOW_FATE_QUEUE;\n+\t\tcase RTE_FLOW_ITEM_TYPE_GRE_KEY:\n+\t\t\tflow_dv_translate_item_gre_key(match_mask,\n+\t\t\t\t\t\t       match_value, items);\n+\t\t\tlast_item = MLX5_FLOW_LAYER_GRE_KEY;\n \t\t\tbreak;\n-\t\tcase MLX5_RTE_FLOW_ACTION_TYPE_AGE:\n-\t\t\towner_idx = (uint32_t)(uintptr_t)action->conf;\n-\t\t\tage_act = flow_aso_age_get_by_idx(dev, owner_idx);\n-\t\t\tif (flow->age == 0) {\n-\t\t\t\tflow->age = owner_idx;\n-\t\t\t\t__atomic_fetch_add(&age_act->refcnt, 1,\n-\t\t\t\t\t\t   __ATOMIC_RELAXED);\n-\t\t\t}\n-\t\t\tage_act_pos = actions_n++;\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_AGE;\n+\t\tcase RTE_FLOW_ITEM_TYPE_GRE_OPTION:\n+\t\t\tpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n+\t\t\tlast_item = MLX5_FLOW_LAYER_GRE;\n+\t\t\ttunnel_item = items;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_AGE:\n-\t\t\tnon_shared_age = action->conf;\n-\t\t\tage_act_pos = actions_n++;\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_AGE;\n+\t\tcase RTE_FLOW_ITEM_TYPE_NVGRE:\n+\t\t\tpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n+\t\t\tlast_item = MLX5_FLOW_LAYER_GRE;\n+\t\t\ttunnel_item = items;\n \t\t\tbreak;\n-\t\tcase MLX5_RTE_FLOW_ACTION_TYPE_COUNT:\n-\t\t\towner_idx = (uint32_t)(uintptr_t)action->conf;\n-\t\t\tcnt_act = flow_dv_counter_get_by_idx(dev, owner_idx,\n-\t\t\t\t\t\t\t     NULL);\n-\t\t\tMLX5_ASSERT(cnt_act != NULL);\n-\t\t\t/**\n-\t\t\t * When creating meter drop flow in drop table, the\n-\t\t\t * counter should not overwrite the rte flow counter.\n-\t\t\t */\n-\t\t\tif (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&\n-\t\t\t    dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP) {\n-\t\t\t\tdev_flow->dv.actions[actions_n++] =\n-\t\t\t\t\t\t\tcnt_act->action;\n-\t\t\t} else {\n-\t\t\t\tif (flow->counter == 0) {\n-\t\t\t\t\tflow->counter = owner_idx;\n-\t\t\t\t\t__atomic_fetch_add\n-\t\t\t\t\t\t(&cnt_act->shared_info.refcnt,\n-\t\t\t\t\t\t 1, __ATOMIC_RELAXED);\n-\t\t\t\t}\n-\t\t\t\t/* Save information first, will apply later. */\n-\t\t\t\taction_flags |= MLX5_FLOW_ACTION_COUNT;\n-\t\t\t}\n+\t\tcase RTE_FLOW_ITEM_TYPE_VXLAN:\n+\t\t\tflow_dv_translate_item_vxlan(dev, attr,\n+\t\t\t\t\t\t     match_mask, match_value,\n+\t\t\t\t\t\t     items, tunnel);\n+\t\t\tpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n+\t\t\tlast_item = MLX5_FLOW_LAYER_VXLAN;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_COUNT:\n-\t\t\tif (!priv->sh->cdev->config.devx) {\n-\t\t\t\treturn rte_flow_error_set\n-\t\t\t\t\t      (error, ENOTSUP,\n-\t\t\t\t\t       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n-\t\t\t\t\t       NULL,\n-\t\t\t\t\t       \"count action not supported\");\n-\t\t\t}\n-\t\t\t/* Save information first, will apply later. */\n-\t\t\tcount = action->conf;\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_COUNT;\n+\t\tcase RTE_FLOW_ITEM_TYPE_VXLAN_GPE:\n+\t\t\tpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n+\t\t\tlast_item = MLX5_FLOW_LAYER_VXLAN_GPE;\n+\t\t\ttunnel_item = items;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:\n-\t\t\tdev_flow->dv.actions[actions_n++] =\n-\t\t\t\t\t\tpriv->sh->pop_vlan_action;\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;\n+\t\tcase RTE_FLOW_ITEM_TYPE_GENEVE:\n+\t\t\tpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n+\t\t\tlast_item = MLX5_FLOW_LAYER_GENEVE;\n+\t\t\ttunnel_item = items;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:\n-\t\t\tif (!(action_flags &\n-\t\t\t      MLX5_FLOW_ACTION_OF_SET_VLAN_VID))\n-\t\t\t\tflow_dev_get_vlan_info_from_items(items, &vlan);\n-\t\t\tvlan.eth_proto = rte_be_to_cpu_16\n-\t\t\t     ((((const struct rte_flow_action_of_push_vlan *)\n-\t\t\t\t\t\t   actions->conf)->ethertype));\n-\t\t\tfound_action = mlx5_flow_find_action\n-\t\t\t\t\t(actions + 1,\n-\t\t\t\t\t RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);\n-\t\t\tif (found_action)\n-\t\t\t\tmlx5_update_vlan_vid_pcp(found_action, &vlan);\n-\t\t\tfound_action = mlx5_flow_find_action\n-\t\t\t\t\t(actions + 1,\n-\t\t\t\t\t RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);\n-\t\t\tif (found_action)\n-\t\t\t\tmlx5_update_vlan_vid_pcp(found_action, &vlan);\n-\t\t\tif (flow_dv_create_action_push_vlan\n-\t\t\t\t\t    (dev, attr, &vlan, dev_flow, error))\n-\t\t\t\treturn -rte_errno;\n-\t\t\tdev_flow->dv.actions[actions_n++] =\n-\t\t\t\t\tdev_flow->dv.push_vlan_res->action;\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;\n+\t\tcase RTE_FLOW_ITEM_TYPE_GENEVE_OPT:\n+\t\t\tret = flow_dv_translate_item_geneve_opt(dev, match_mask,\n+\t\t\t\t\t\t\t  match_value,\n+\t\t\t\t\t\t\t  items, error);\n+\t\t\tif (ret)\n+\t\t\t\treturn rte_flow_error_set(error, -ret,\n+\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM, NULL,\n+\t\t\t\t\t\"cannot create GENEVE TLV option\");\n+\t\t\tflow->geneve_tlv_option = 1;\n+\t\t\tlast_item = MLX5_FLOW_LAYER_GENEVE_OPT;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:\n-\t\t\t/* of_vlan_push action handled this action */\n-\t\t\tMLX5_ASSERT(action_flags &\n-\t\t\t\t    MLX5_FLOW_ACTION_OF_PUSH_VLAN);\n+\t\tcase RTE_FLOW_ITEM_TYPE_MPLS:\n+\t\t\tflow_dv_translate_item_mpls(match_mask, match_value,\n+\t\t\t\t\t\t    items, last_item, tunnel);\n+\t\t\tpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n+\t\t\tlast_item = MLX5_FLOW_LAYER_MPLS;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:\n-\t\t\tif (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)\n-\t\t\t\tbreak;\n-\t\t\tflow_dev_get_vlan_info_from_items(items, &vlan);\n-\t\t\tmlx5_update_vlan_vid_pcp(actions, &vlan);\n-\t\t\t/* If no VLAN push - this is a modify header action */\n-\t\t\tif (flow_dv_convert_action_modify_vlan_vid\n-\t\t\t\t\t\t(mhdr_res, actions, error))\n-\t\t\t\treturn -rte_errno;\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;\n+\t\tcase RTE_FLOW_ITEM_TYPE_MARK:\n+\t\t\tflow_dv_translate_item_mark(dev, match_mask,\n+\t\t\t\t\t\t    match_value, items);\n+\t\t\tlast_item = MLX5_FLOW_ITEM_MARK;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:\n-\t\tcase RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:\n-\t\t\tif (flow_dv_create_action_l2_encap(dev, actions,\n-\t\t\t\t\t\t\t   dev_flow,\n-\t\t\t\t\t\t\t   attr->transfer,\n-\t\t\t\t\t\t\t   error))\n-\t\t\t\treturn -rte_errno;\n-\t\t\tdev_flow->dv.actions[actions_n++] =\n-\t\t\t\t\tdev_flow->dv.encap_decap->action;\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_ENCAP;\n-\t\t\tif (action_flags & MLX5_FLOW_ACTION_SAMPLE)\n-\t\t\t\tsample_act->action_flags |=\n-\t\t\t\t\t\t\tMLX5_FLOW_ACTION_ENCAP;\n+\t\tcase RTE_FLOW_ITEM_TYPE_META:\n+\t\t\tflow_dv_translate_item_meta(dev, match_mask,\n+\t\t\t\t\t\t    match_value, attr, items);\n+\t\t\tlast_item = MLX5_FLOW_ITEM_METADATA;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:\n-\t\tcase RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:\n-\t\t\tif (flow_dv_create_action_l2_decap(dev, dev_flow,\n-\t\t\t\t\t\t\t   attr->transfer,\n-\t\t\t\t\t\t\t   error))\n-\t\t\t\treturn -rte_errno;\n-\t\t\tdev_flow->dv.actions[actions_n++] =\n-\t\t\t\t\tdev_flow->dv.encap_decap->action;\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_DECAP;\n+\t\tcase RTE_FLOW_ITEM_TYPE_ICMP:\n+\t\t\tflow_dv_translate_item_icmp(match_mask, match_value,\n+\t\t\t\t\t\t    items, tunnel);\n+\t\t\tpriority = MLX5_PRIORITY_MAP_L4;\n+\t\t\tlast_item = MLX5_FLOW_LAYER_ICMP;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_RAW_ENCAP:\n-\t\t\t/* Handle encap with preceding decap. */\n-\t\t\tif (action_flags & MLX5_FLOW_ACTION_DECAP) {\n-\t\t\t\tif (flow_dv_create_action_raw_encap\n-\t\t\t\t\t(dev, actions, dev_flow, attr, error))\n-\t\t\t\t\treturn -rte_errno;\n-\t\t\t\tdev_flow->dv.actions[actions_n++] =\n-\t\t\t\t\tdev_flow->dv.encap_decap->action;\n-\t\t\t} else {\n-\t\t\t\t/* Handle encap without preceding decap. */\n-\t\t\t\tif (flow_dv_create_action_l2_encap\n-\t\t\t\t    (dev, actions, dev_flow, attr->transfer,\n-\t\t\t\t     error))\n-\t\t\t\t\treturn -rte_errno;\n-\t\t\t\tdev_flow->dv.actions[actions_n++] =\n-\t\t\t\t\tdev_flow->dv.encap_decap->action;\n-\t\t\t}\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_ENCAP;\n-\t\t\tif (action_flags & MLX5_FLOW_ACTION_SAMPLE)\n-\t\t\t\tsample_act->action_flags |=\n-\t\t\t\t\t\t\tMLX5_FLOW_ACTION_ENCAP;\n+\t\tcase RTE_FLOW_ITEM_TYPE_ICMP6:\n+\t\t\tflow_dv_translate_item_icmp6(match_mask, match_value,\n+\t\t\t\t\t\t      items, tunnel);\n+\t\t\tpriority = MLX5_PRIORITY_MAP_L4;\n+\t\t\tlast_item = MLX5_FLOW_LAYER_ICMP6;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_RAW_DECAP:\n-\t\t\twhile ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)\n-\t\t\t\t;\n-\t\t\tif (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {\n-\t\t\t\tif (flow_dv_create_action_l2_decap\n-\t\t\t\t    (dev, dev_flow, attr->transfer, error))\n-\t\t\t\t\treturn -rte_errno;\n-\t\t\t\tdev_flow->dv.actions[actions_n++] =\n-\t\t\t\t\tdev_flow->dv.encap_decap->action;\n-\t\t\t}\n-\t\t\t/* If decap is followed by encap, handle it at encap. */\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_DECAP;\n+\t\tcase RTE_FLOW_ITEM_TYPE_TAG:\n+\t\t\tflow_dv_translate_item_tag(dev, match_mask,\n+\t\t\t\t\t\t   match_value, items);\n+\t\t\tlast_item = MLX5_FLOW_ITEM_TAG;\n \t\t\tbreak;\n-\t\tcase MLX5_RTE_FLOW_ACTION_TYPE_JUMP:\n-\t\t\tdev_flow->dv.actions[actions_n++] =\n-\t\t\t\t(void *)(uintptr_t)action->conf;\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_JUMP;\n+\t\tcase MLX5_RTE_FLOW_ITEM_TYPE_TAG:\n+\t\t\tflow_dv_translate_mlx5_item_tag(dev, match_mask,\n+\t\t\t\t\t\t\tmatch_value, items);\n+\t\t\tlast_item = MLX5_FLOW_ITEM_TAG;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_JUMP:\n-\t\t\tjump_group = ((const struct rte_flow_action_jump *)\n-\t\t\t\t\t\t\taction->conf)->group;\n-\t\t\tgrp_info.std_tbl_fix = 0;\n-\t\t\tif (dev_flow->skip_scale &\n-\t\t\t\t(1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))\n-\t\t\t\tgrp_info.skip_scale = 1;\n-\t\t\telse\n-\t\t\t\tgrp_info.skip_scale = 0;\n-\t\t\tret = mlx5_flow_group_to_table(dev, tunnel,\n-\t\t\t\t\t\t       jump_group,\n-\t\t\t\t\t\t       &table,\n-\t\t\t\t\t\t       &grp_info, error);\n+\t\tcase MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:\n+\t\t\tflow_dv_translate_item_tx_queue(dev, match_mask,\n+\t\t\t\t\t\t\tmatch_value,\n+\t\t\t\t\t\t\titems);\n+\t\t\tlast_item = MLX5_FLOW_ITEM_TX_QUEUE;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_GTP:\n+\t\t\tflow_dv_translate_item_gtp(match_mask, match_value,\n+\t\t\t\t\t\t   items, tunnel);\n+\t\t\tpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n+\t\t\tlast_item = MLX5_FLOW_LAYER_GTP;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_GTP_PSC:\n+\t\t\tret = flow_dv_translate_item_gtp_psc(match_mask,\n+\t\t\t\t\t\t\t  match_value,\n+\t\t\t\t\t\t\t  items);\n \t\t\tif (ret)\n-\t\t\t\treturn ret;\n-\t\t\ttbl = flow_dv_tbl_resource_get(dev, table, attr->egress,\n-\t\t\t\t\t\t       attr->transfer,\n-\t\t\t\t\t\t       !!dev_flow->external,\n-\t\t\t\t\t\t       tunnel, jump_group, 0,\n-\t\t\t\t\t\t       0, error);\n-\t\t\tif (!tbl)\n-\t\t\t\treturn rte_flow_error_set\n-\t\t\t\t\t\t(error, errno,\n-\t\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t\t NULL,\n-\t\t\t\t\t\t \"cannot create jump action.\");\n-\t\t\tif (flow_dv_jump_tbl_resource_register\n-\t\t\t    (dev, tbl, dev_flow, error)) {\n-\t\t\t\tflow_dv_tbl_resource_release(MLX5_SH(dev), tbl);\n-\t\t\t\treturn rte_flow_error_set\n-\t\t\t\t\t\t(error, errno,\n-\t\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t\t NULL,\n-\t\t\t\t\t\t \"cannot create jump action.\");\n+\t\t\t\treturn rte_flow_error_set(error, -ret,\n+\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM, NULL,\n+\t\t\t\t\t\"cannot create GTP PSC item\");\n+\t\t\tlast_item = MLX5_FLOW_LAYER_GTP_PSC;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_ECPRI:\n+\t\t\tif (!mlx5_flex_parser_ecpri_exist(dev)) {\n+\t\t\t\t/* Create it only the first time to be used. */\n+\t\t\t\tret = mlx5_flex_parser_ecpri_alloc(dev);\n+\t\t\t\tif (ret)\n+\t\t\t\t\treturn rte_flow_error_set\n+\t\t\t\t\t\t(error, -ret,\n+\t\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\tNULL,\n+\t\t\t\t\t\t\"cannot create eCPRI parser\");\n+\t\t\t}\n+\t\t\tflow_dv_translate_item_ecpri(dev, match_mask,\n+\t\t\t\t\t\t     match_value, items,\n+\t\t\t\t\t\t     last_item);\n+\t\t\t/* No other protocol should follow eCPRI layer. */\n+\t\t\tlast_item = MLX5_FLOW_LAYER_ECPRI;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_INTEGRITY:\n+\t\t\tflow_dv_translate_item_integrity(items, integrity_items,\n+\t\t\t\t\t\t\t &last_item);\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_CONNTRACK:\n+\t\t\tflow_dv_translate_item_aso_ct(dev, match_mask,\n+\t\t\t\t\t\t      match_value, items);\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_FLEX:\n+\t\t\tflow_dv_translate_item_flex(dev, match_mask,\n+\t\t\t\t\t\t    match_value, items,\n+\t\t\t\t\t\t    dev_flow, tunnel != 0);\n+\t\t\tlast_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :\n+\t\t\t\t    MLX5_FLOW_ITEM_OUTER_FLEX;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tbreak;\n+\t\t}\n+\t\titem_flags |= last_item;\n+\t}\n+\t/*\n+\t * When E-Switch mode is enabled, we have two cases where we need to\n+\t * set the source port manually.\n+\t * The first one, is in case of NIC ingress steering rule, and the\n+\t * second is E-Switch rule where no port_id item was found.\n+\t * In both cases the source port is set according the current port\n+\t * in use.\n+\t */\n+\tif (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&\n+\t    !(item_flags & MLX5_FLOW_ITEM_REPRESENTED_PORT) && priv->sh->esw_mode &&\n+\t    !(attr->egress && !attr->transfer)) {\n+\t\tif (flow_dv_translate_item_port_id(dev, match_mask,\n+\t\t\t\t\t\t   match_value, NULL, attr))\n+\t\t\treturn -rte_errno;\n+\t}\n+\tif (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {\n+\t\tflow_dv_translate_item_integrity_post(match_mask, match_value,\n+\t\t\t\t\t\t      integrity_items,\n+\t\t\t\t\t\t      item_flags);\n+\t}\n+\tif (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE)\n+\t\tflow_dv_translate_item_vxlan_gpe(match_mask, match_value,\n+\t\t\t\t\t\t tunnel_item, item_flags);\n+\telse if (item_flags & MLX5_FLOW_LAYER_GENEVE)\n+\t\tflow_dv_translate_item_geneve(match_mask, match_value,\n+\t\t\t\t\t      tunnel_item, item_flags);\n+\telse if (item_flags & MLX5_FLOW_LAYER_GRE) {\n+\t\tif (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE)\n+\t\t\tflow_dv_translate_item_gre(match_mask, match_value,\n+\t\t\t\t\t\t   tunnel_item, item_flags);\n+\t\telse if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE)\n+\t\t\tflow_dv_translate_item_nvgre(match_mask, match_value,\n+\t\t\t\t\t\t     tunnel_item, item_flags);\n+\t\telse if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE_OPTION)\n+\t\t\tflow_dv_translate_item_gre_option(match_mask, match_value,\n+\t\t\t\t\ttunnel_item, gre_item, item_flags);\n+\t\telse\n+\t\t\tMLX5_ASSERT(false);\n+\t}\n+\tmatcher->priority = priority;\n+#ifdef RTE_LIBRTE_MLX5_DEBUG\n+\tMLX5_ASSERT(!flow_dv_check_valid_spec(matcher->mask.buf,\n+\t\t\t\t\t      dev_flow->dv.value.buf));\n+#endif\n+\t/*\n+\t * Layers may be already initialized from prefix flow if this dev_flow\n+\t * is the suffix flow.\n+\t */\n+\thandle->layers |= item_flags;\n+\treturn ret;\n+}\n+\n+/**\n+ * Fill the flow with DV spec, lock free\n+ * (mutex should be acquired by caller).\n+ *\n+ * @param[in] dev\n+ *   Pointer to rte_eth_dev structure.\n+ * @param[in, out] dev_flow\n+ *   Pointer to the sub flow.\n+ * @param[in] attr\n+ *   Pointer to the flow attributes.\n+ * @param[in] items\n+ *   Pointer to the list of items.\n+ * @param[in] actions\n+ *   Pointer to the list of actions.\n+ * @param[out] error\n+ *   Pointer to the error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+flow_dv_translate(struct rte_eth_dev *dev,\n+\t\t  struct mlx5_flow *dev_flow,\n+\t\t  const struct rte_flow_attr *attr,\n+\t\t  const struct rte_flow_item items[],\n+\t\t  const struct rte_flow_action actions[],\n+\t\t  struct rte_flow_error *error)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_sh_config *dev_conf = &priv->sh->config;\n+\tstruct rte_flow *flow = dev_flow->flow;\n+\tstruct mlx5_flow_handle *handle = dev_flow->handle;\n+\tstruct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();\n+\tstruct mlx5_flow_rss_desc *rss_desc;\n+\tuint64_t action_flags = 0;\n+\tstruct mlx5_flow_dv_matcher matcher = {\n+\t\t.mask = {\n+\t\t\t.size = sizeof(matcher.mask.buf),\n+\t\t},\n+\t};\n+\tint actions_n = 0;\n+\tbool actions_end = false;\n+\tunion {\n+\t\tstruct mlx5_flow_dv_modify_hdr_resource res;\n+\t\tuint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +\n+\t\t\t    sizeof(struct mlx5_modification_cmd) *\n+\t\t\t    (MLX5_MAX_MODIFY_NUM + 1)];\n+\t} mhdr_dummy;\n+\tstruct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;\n+\tconst struct rte_flow_action_count *count = NULL;\n+\tconst struct rte_flow_action_age *non_shared_age = NULL;\n+\tunion flow_dv_attr flow_attr = { .attr = 0 };\n+\tuint32_t tag_be;\n+\tunion mlx5_flow_tbl_key tbl_key;\n+\tuint32_t modify_action_position = UINT32_MAX;\n+\tstruct rte_vlan_hdr vlan = { 0 };\n+\tstruct mlx5_flow_dv_dest_array_resource mdest_res;\n+\tstruct mlx5_flow_dv_sample_resource sample_res;\n+\tvoid *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};\n+\tconst struct rte_flow_action_sample *sample = NULL;\n+\tstruct mlx5_flow_sub_actions_list *sample_act;\n+\tuint32_t sample_act_pos = UINT32_MAX;\n+\tuint32_t age_act_pos = UINT32_MAX;\n+\tuint32_t num_of_dest = 0;\n+\tint tmp_actions_n = 0;\n+\tuint32_t table;\n+\tint ret = 0;\n+\tconst struct mlx5_flow_tunnel *tunnel = NULL;\n+\tstruct flow_grp_info grp_info = {\n+\t\t.external = !!dev_flow->external,\n+\t\t.transfer = !!attr->transfer,\n+\t\t.fdb_def_rule = !!priv->fdb_def_rule,\n+\t\t.skip_scale = dev_flow->skip_scale &\n+\t\t\t(1 << MLX5_SCALE_FLOW_GROUP_BIT),\n+\t\t.std_tbl_fix = true,\n+\t};\n+\n+\tif (!wks)\n+\t\treturn rte_flow_error_set(error, ENOMEM,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t  NULL,\n+\t\t\t\t\t  \"failed to push flow workspace\");\n+\trss_desc = &wks->rss_desc;\n+\tmemset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));\n+\tmemset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));\n+\tmhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :\n+\t\t\t\t\t   MLX5DV_FLOW_TABLE_TYPE_NIC_RX;\n+\t/* update normal path action resource into last index of array */\n+\tsample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];\n+\tif (is_tunnel_offload_active(dev)) {\n+\t\tif (dev_flow->tunnel) {\n+\t\t\tRTE_VERIFY(dev_flow->tof_type ==\n+\t\t\t\t   MLX5_TUNNEL_OFFLOAD_MISS_RULE);\n+\t\t\ttunnel = dev_flow->tunnel;\n+\t\t} else {\n+\t\t\ttunnel = mlx5_get_tof(items, actions,\n+\t\t\t\t\t      &dev_flow->tof_type);\n+\t\t\tdev_flow->tunnel = tunnel;\n+\t\t}\n+\t\tgrp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate\n+\t\t\t\t\t(dev, attr, tunnel, dev_flow->tof_type);\n+\t}\n+\tmhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :\n+\t\t\t\t\t   MLX5DV_FLOW_TABLE_TYPE_NIC_RX;\n+\tret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,\n+\t\t\t\t       &grp_info, error);\n+\tif (ret)\n+\t\treturn ret;\n+\tdev_flow->dv.group = table;\n+\tif (attr->transfer)\n+\t\tmhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;\n+\t/* number of actions must be set to 0 in case of dirty stack. */\n+\tmhdr_res->actions_num = 0;\n+\tif (is_flow_tunnel_match_rule(dev_flow->tof_type)) {\n+\t\t/*\n+\t\t * do not add decap action if match rule drops packet\n+\t\t * HW rejects rules with decap & drop\n+\t\t *\n+\t\t * if tunnel match rule was inserted before matching tunnel set\n+\t\t * rule flow table used in the match rule must be registered.\n+\t\t * current implementation handles that in the\n+\t\t * flow_dv_match_register() at the function end.\n+\t\t */\n+\t\tbool add_decap = true;\n+\t\tconst struct rte_flow_action *ptr = actions;\n+\n+\t\tfor (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {\n+\t\t\tif (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {\n+\t\t\t\tadd_decap = false;\n+\t\t\t\tbreak;\n \t\t\t}\n-\t\t\tdev_flow->dv.actions[actions_n++] =\n-\t\t\t\t\tdev_flow->dv.jump->action;\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_JUMP;\n-\t\t\tdev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;\n-\t\t\tsample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;\n-\t\t\tnum_of_dest++;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_MAC_DST:\n-\t\t\tif (flow_dv_convert_action_modify_mac\n-\t\t\t\t\t(mhdr_res, actions, error))\n-\t\t\t\treturn -rte_errno;\n-\t\t\taction_flags |= actions->type ==\n-\t\t\t\t\tRTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?\n-\t\t\t\t\tMLX5_FLOW_ACTION_SET_MAC_SRC :\n-\t\t\t\t\tMLX5_FLOW_ACTION_SET_MAC_DST;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:\n-\t\t\tif (flow_dv_convert_action_modify_ipv4\n-\t\t\t\t\t(mhdr_res, actions, error))\n-\t\t\t\treturn -rte_errno;\n-\t\t\taction_flags |= actions->type ==\n-\t\t\t\t\tRTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?\n-\t\t\t\t\tMLX5_FLOW_ACTION_SET_IPV4_SRC :\n-\t\t\t\t\tMLX5_FLOW_ACTION_SET_IPV4_DST;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:\n-\t\t\tif (flow_dv_convert_action_modify_ipv6\n-\t\t\t\t\t(mhdr_res, actions, error))\n+\t\t}\n+\t\tif (add_decap) {\n+\t\t\tif (flow_dv_create_action_l2_decap(dev, dev_flow,\n+\t\t\t\t\t\t\t   attr->transfer,\n+\t\t\t\t\t\t\t   error))\n \t\t\t\treturn -rte_errno;\n-\t\t\taction_flags |= actions->type ==\n-\t\t\t\t\tRTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?\n-\t\t\t\t\tMLX5_FLOW_ACTION_SET_IPV6_SRC :\n-\t\t\t\t\tMLX5_FLOW_ACTION_SET_IPV6_DST;\n+\t\t\tdev_flow->dv.actions[actions_n++] =\n+\t\t\t\t\tdev_flow->dv.encap_decap->action;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_DECAP;\n+\t\t}\n+\t}\n+\tfor (; !actions_end ; actions++) {\n+\t\tconst struct rte_flow_action_queue *queue;\n+\t\tconst struct rte_flow_action_rss *rss;\n+\t\tconst struct rte_flow_action *action = actions;\n+\t\tconst uint8_t *rss_key;\n+\t\tstruct mlx5_flow_tbl_resource *tbl;\n+\t\tstruct mlx5_aso_age_action *age_act;\n+\t\tstruct mlx5_flow_counter *cnt_act;\n+\t\tuint32_t port_id = 0;\n+\t\tstruct mlx5_flow_dv_port_id_action_resource port_id_resource;\n+\t\tint action_type = actions->type;\n+\t\tconst struct rte_flow_action *found_action = NULL;\n+\t\tuint32_t jump_group = 0;\n+\t\tuint32_t owner_idx;\n+\t\tstruct mlx5_aso_ct_action *ct;\n+\n+\t\tif (!mlx5_flow_os_action_supported(action_type))\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\t  actions,\n+\t\t\t\t\t\t  \"action not supported\");\n+\t\tswitch (action_type) {\n+\t\tcase MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_TP_SRC:\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_TP_DST:\n-\t\t\tif (flow_dv_convert_action_modify_tp\n-\t\t\t\t\t(mhdr_res, actions, items,\n-\t\t\t\t\t &flow_attr, dev_flow, !!(action_flags &\n-\t\t\t\t\t MLX5_FLOW_ACTION_DECAP), error))\n-\t\t\t\treturn -rte_errno;\n-\t\t\taction_flags |= actions->type ==\n-\t\t\t\t\tRTE_FLOW_ACTION_TYPE_SET_TP_SRC ?\n-\t\t\t\t\tMLX5_FLOW_ACTION_SET_TP_SRC :\n-\t\t\t\t\tMLX5_FLOW_ACTION_SET_TP_DST;\n+\t\tcase RTE_FLOW_ACTION_TYPE_VOID:\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_DEC_TTL:\n-\t\t\tif (flow_dv_convert_action_modify_dec_ttl\n-\t\t\t\t\t(mhdr_res, items, &flow_attr, dev_flow,\n-\t\t\t\t\t !!(action_flags &\n-\t\t\t\t\t MLX5_FLOW_ACTION_DECAP), error))\n+\t\tcase RTE_FLOW_ACTION_TYPE_PORT_ID:\n+\t\tcase RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:\n+\t\t\tif (flow_dv_translate_action_port_id(dev, action,\n+\t\t\t\t\t\t\t     &port_id, error))\n \t\t\t\treturn -rte_errno;\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_DEC_TTL;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_TTL:\n-\t\t\tif (flow_dv_convert_action_modify_ttl\n-\t\t\t\t\t(mhdr_res, actions, items, &flow_attr,\n-\t\t\t\t\t dev_flow, !!(action_flags &\n-\t\t\t\t\t MLX5_FLOW_ACTION_DECAP), error))\n+\t\t\tport_id_resource.port_id = port_id;\n+\t\t\tMLX5_ASSERT(!handle->rix_port_id_action);\n+\t\t\tif (flow_dv_port_id_action_resource_register\n+\t\t\t    (dev, &port_id_resource, dev_flow, error))\n \t\t\t\treturn -rte_errno;\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_SET_TTL;\n+\t\t\tdev_flow->dv.actions[actions_n++] =\n+\t\t\t\t\tdev_flow->dv.port_id_action->action;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_PORT_ID;\n+\t\t\tdev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;\n+\t\t\tsample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;\n+\t\t\tnum_of_dest++;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:\n-\t\tcase RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:\n-\t\t\tif (flow_dv_convert_action_modify_tcp_seq\n-\t\t\t\t\t(mhdr_res, actions, error))\n+\t\tcase RTE_FLOW_ACTION_TYPE_FLAG:\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_FLAG;\n+\t\t\twks->mark = 1;\n+\t\t\tif (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {\n+\t\t\t\tstruct rte_flow_action_mark mark = {\n+\t\t\t\t\t.id = MLX5_FLOW_MARK_DEFAULT,\n+\t\t\t\t};\n+\n+\t\t\t\tif (flow_dv_convert_action_mark(dev, &mark,\n+\t\t\t\t\t\t\t\tmhdr_res,\n+\t\t\t\t\t\t\t\terror))\n+\t\t\t\t\treturn -rte_errno;\n+\t\t\t\taction_flags |= MLX5_FLOW_ACTION_MARK_EXT;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\ttag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);\n+\t\t\t/*\n+\t\t\t * Only one FLAG or MARK is supported per device flow\n+\t\t\t * right now. So the pointer to the tag resource must be\n+\t\t\t * zero before the register process.\n+\t\t\t */\n+\t\t\tMLX5_ASSERT(!handle->dvh.rix_tag);\n+\t\t\tif (flow_dv_tag_resource_register(dev, tag_be,\n+\t\t\t\t\t\t\t  dev_flow, error))\n \t\t\t\treturn -rte_errno;\n-\t\t\taction_flags |= actions->type ==\n-\t\t\t\t\tRTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?\n-\t\t\t\t\tMLX5_FLOW_ACTION_INC_TCP_SEQ :\n-\t\t\t\t\tMLX5_FLOW_ACTION_DEC_TCP_SEQ;\n+\t\t\tMLX5_ASSERT(dev_flow->dv.tag_resource);\n+\t\t\tdev_flow->dv.actions[actions_n++] =\n+\t\t\t\t\tdev_flow->dv.tag_resource->action;\n \t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_MARK:\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_MARK;\n+\t\t\twks->mark = 1;\n+\t\t\tif (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {\n+\t\t\t\tconst struct rte_flow_action_mark *mark =\n+\t\t\t\t\t(const struct rte_flow_action_mark *)\n+\t\t\t\t\t\tactions->conf;\n \n-\t\tcase RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:\n-\t\tcase RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:\n-\t\t\tif (flow_dv_convert_action_modify_tcp_ack\n-\t\t\t\t\t(mhdr_res, actions, error))\n+\t\t\t\tif (flow_dv_convert_action_mark(dev, mark,\n+\t\t\t\t\t\t\t\tmhdr_res,\n+\t\t\t\t\t\t\t\terror))\n+\t\t\t\t\treturn -rte_errno;\n+\t\t\t\taction_flags |= MLX5_FLOW_ACTION_MARK_EXT;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\t/* Fall-through */\n+\t\tcase MLX5_RTE_FLOW_ACTION_TYPE_MARK:\n+\t\t\t/* Legacy (non-extensive) MARK action. */\n+\t\t\ttag_be = mlx5_flow_mark_set\n+\t\t\t      (((const struct rte_flow_action_mark *)\n+\t\t\t       (actions->conf))->id);\n+\t\t\tMLX5_ASSERT(!handle->dvh.rix_tag);\n+\t\t\tif (flow_dv_tag_resource_register(dev, tag_be,\n+\t\t\t\t\t\t\t  dev_flow, error))\n \t\t\t\treturn -rte_errno;\n-\t\t\taction_flags |= actions->type ==\n-\t\t\t\t\tRTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?\n-\t\t\t\t\tMLX5_FLOW_ACTION_INC_TCP_ACK :\n-\t\t\t\t\tMLX5_FLOW_ACTION_DEC_TCP_ACK;\n+\t\t\tMLX5_ASSERT(dev_flow->dv.tag_resource);\n+\t\t\tdev_flow->dv.actions[actions_n++] =\n+\t\t\t\t\tdev_flow->dv.tag_resource->action;\n \t\t\tbreak;\n-\t\tcase MLX5_RTE_FLOW_ACTION_TYPE_TAG:\n-\t\t\tif (flow_dv_convert_action_set_reg\n-\t\t\t\t\t(mhdr_res, actions, error))\n+\t\tcase RTE_FLOW_ACTION_TYPE_SET_META:\n+\t\t\tif (flow_dv_convert_action_set_meta\n+\t\t\t\t(dev, mhdr_res, attr,\n+\t\t\t\t (const struct rte_flow_action_set_meta *)\n+\t\t\t\t  actions->conf, error))\n \t\t\t\treturn -rte_errno;\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_SET_TAG;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_SET_META;\n \t\t\tbreak;\n-\t\tcase MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:\n-\t\t\tif (flow_dv_convert_action_copy_mreg\n-\t\t\t\t\t(dev, mhdr_res, actions, error))\n+\t\tcase RTE_FLOW_ACTION_TYPE_SET_TAG:\n+\t\t\tif (flow_dv_convert_action_set_tag\n+\t\t\t\t(dev, mhdr_res,\n+\t\t\t\t (const struct rte_flow_action_set_tag *)\n+\t\t\t\t  actions->conf, error))\n \t\t\t\treturn -rte_errno;\n \t\t\taction_flags |= MLX5_FLOW_ACTION_SET_TAG;\n \t\t\tbreak;\n-\t\tcase MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;\n-\t\t\tdev_flow->handle->fate_action =\n-\t\t\t\t\tMLX5_FLOW_FATE_DEFAULT_MISS;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_METER:\n-\t\t\tif (!wks->fm)\n-\t\t\t\treturn rte_flow_error_set(error, rte_errno,\n-\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\tNULL, \"Failed to get meter in flow.\");\n-\t\t\t/* Set the meter action. */\n-\t\t\tdev_flow->dv.actions[actions_n++] =\n-\t\t\t\twks->fm->meter_action_g;\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_METER;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:\n-\t\t\tif (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,\n-\t\t\t\t\t\t\t      actions, error))\n-\t\t\t\treturn -rte_errno;\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:\n-\t\t\tif (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,\n-\t\t\t\t\t\t\t      actions, error))\n-\t\t\t\treturn -rte_errno;\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;\n+\t\tcase RTE_FLOW_ACTION_TYPE_DROP:\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_DROP;\n+\t\t\tdev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_SAMPLE:\n-\t\t\tsample_act_pos = actions_n;\n-\t\t\tsample = (const struct rte_flow_action_sample *)\n-\t\t\t\t action->conf;\n-\t\t\tactions_n++;\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_SAMPLE;\n-\t\t\t/* put encap action into group if work with port id */\n-\t\t\tif ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&\n-\t\t\t    (action_flags & MLX5_FLOW_ACTION_PORT_ID))\n-\t\t\t\tsample_act->action_flags |=\n-\t\t\t\t\t\t\tMLX5_FLOW_ACTION_ENCAP;\n+\t\tcase RTE_FLOW_ACTION_TYPE_QUEUE:\n+\t\t\tqueue = actions->conf;\n+\t\t\trss_desc->queue_num = 1;\n+\t\t\trss_desc->queue[0] = queue->index;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_QUEUE;\n+\t\t\tdev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;\n+\t\t\tsample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;\n+\t\t\tnum_of_dest++;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:\n-\t\t\tif (flow_dv_convert_action_modify_field\n-\t\t\t\t\t(dev, mhdr_res, actions, attr, error))\n-\t\t\t\treturn -rte_errno;\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;\n+\t\tcase RTE_FLOW_ACTION_TYPE_RSS:\n+\t\t\trss = actions->conf;\n+\t\t\tmemcpy(rss_desc->queue, rss->queue,\n+\t\t\t       rss->queue_num * sizeof(uint16_t));\n+\t\t\trss_desc->queue_num = rss->queue_num;\n+\t\t\t/* NULL RSS key indicates default RSS key. */\n+\t\t\trss_key = !rss->key ? rss_hash_default_key : rss->key;\n+\t\t\tmemcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);\n+\t\t\t/*\n+\t\t\t * rss->level and rss.types should be set in advance\n+\t\t\t * when expanding items for RSS.\n+\t\t\t */\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_RSS;\n+\t\t\tdev_flow->handle->fate_action = rss_desc->shared_rss ?\n+\t\t\t\tMLX5_FLOW_FATE_SHARED_RSS :\n+\t\t\t\tMLX5_FLOW_FATE_QUEUE;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_CONNTRACK:\n+\t\tcase MLX5_RTE_FLOW_ACTION_TYPE_AGE:\n \t\t\towner_idx = (uint32_t)(uintptr_t)action->conf;\n-\t\t\tct = flow_aso_ct_get_by_idx(dev, owner_idx);\n-\t\t\tif (!ct)\n-\t\t\t\treturn rte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t\tNULL,\n-\t\t\t\t\t\t\"Failed to get CT object.\");\n-\t\t\tif (mlx5_aso_ct_available(priv->sh, ct))\n-\t\t\t\treturn rte_flow_error_set(error, rte_errno,\n-\t\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION,\n-\t\t\t\t\t\tNULL,\n-\t\t\t\t\t\t\"CT is unavailable.\");\n-\t\t\tif (ct->is_original)\n-\t\t\t\tdev_flow->dv.actions[actions_n] =\n-\t\t\t\t\t\t\tct->dr_action_orig;\n-\t\t\telse\n-\t\t\t\tdev_flow->dv.actions[actions_n] =\n-\t\t\t\t\t\t\tct->dr_action_rply;\n-\t\t\tif (flow->ct == 0) {\n-\t\t\t\tflow->indirect_type =\n-\t\t\t\t\t\tMLX5_INDIRECT_ACTION_TYPE_CT;\n-\t\t\t\tflow->ct = owner_idx;\n-\t\t\t\t__atomic_fetch_add(&ct->refcnt, 1,\n+\t\t\tage_act = flow_aso_age_get_by_idx(dev, owner_idx);\n+\t\t\tif (flow->age == 0) {\n+\t\t\t\tflow->age = owner_idx;\n+\t\t\t\t__atomic_fetch_add(&age_act->refcnt, 1,\n \t\t\t\t\t\t   __ATOMIC_RELAXED);\n \t\t\t}\n-\t\t\tactions_n++;\n-\t\t\taction_flags |= MLX5_FLOW_ACTION_CT;\n+\t\t\tage_act_pos = actions_n++;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_AGE;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:\n \t\t\tdev_flow->dv.actions[actions_n] =\n@@ -13752,396 +13757,435 @@ flow_dv_translate(struct rte_eth_dev *dev,\n \t\t\tdev_flow->handle->fate_action =\n \t\t\t\t\tMLX5_FLOW_FATE_SEND_TO_KERNEL;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ACTION_TYPE_END:\n-\t\t\tactions_end = true;\n-\t\t\tif (mhdr_res->actions_num) {\n-\t\t\t\t/* create modify action if needed. */\n-\t\t\t\tif (flow_dv_modify_hdr_resource_register\n-\t\t\t\t\t(dev, mhdr_res, dev_flow, error))\n-\t\t\t\t\treturn -rte_errno;\n-\t\t\t\tdev_flow->dv.actions[modify_action_position] =\n-\t\t\t\t\thandle->dvh.modify_hdr->action;\n-\t\t\t}\n-\t\t\t/*\n-\t\t\t * Handle AGE and COUNT action by single HW counter\n-\t\t\t * when they are not shared.\n+\t\tcase RTE_FLOW_ACTION_TYPE_AGE:\n+\t\t\tnon_shared_age = action->conf;\n+\t\t\tage_act_pos = actions_n++;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_AGE;\n+\t\t\tbreak;\n+\t\tcase MLX5_RTE_FLOW_ACTION_TYPE_COUNT:\n+\t\t\towner_idx = (uint32_t)(uintptr_t)action->conf;\n+\t\t\tcnt_act = flow_dv_counter_get_by_idx(dev, owner_idx,\n+\t\t\t\t\t\t\t     NULL);\n+\t\t\tMLX5_ASSERT(cnt_act != NULL);\n+\t\t\t/**\n+\t\t\t * When creating meter drop flow in drop table, the\n+\t\t\t * counter should not overwrite the rte flow counter.\n \t\t\t */\n-\t\t\tif (action_flags & MLX5_FLOW_ACTION_AGE) {\n-\t\t\t\tif ((non_shared_age && count) ||\n-\t\t\t\t    !flow_hit_aso_supported(priv->sh, attr)) {\n-\t\t\t\t\t/* Creates age by counters. */\n-\t\t\t\t\tcnt_act = flow_dv_prepare_counter\n-\t\t\t\t\t\t\t\t(dev, dev_flow,\n-\t\t\t\t\t\t\t\t flow, count,\n-\t\t\t\t\t\t\t\t non_shared_age,\n-\t\t\t\t\t\t\t\t error);\n-\t\t\t\t\tif (!cnt_act)\n-\t\t\t\t\t\treturn -rte_errno;\n-\t\t\t\t\tdev_flow->dv.actions[age_act_pos] =\n-\t\t\t\t\t\t\t\tcnt_act->action;\n-\t\t\t\t\tbreak;\n-\t\t\t\t}\n-\t\t\t\tif (!flow->age && non_shared_age) {\n-\t\t\t\t\tflow->age = flow_dv_aso_age_alloc\n-\t\t\t\t\t\t\t\t(dev, error);\n-\t\t\t\t\tif (!flow->age)\n-\t\t\t\t\t\treturn -rte_errno;\n-\t\t\t\t\tflow_dv_aso_age_params_init\n-\t\t\t\t\t\t    (dev, flow->age,\n-\t\t\t\t\t\t     non_shared_age->context ?\n-\t\t\t\t\t\t     non_shared_age->context :\n-\t\t\t\t\t\t     (void *)(uintptr_t)\n-\t\t\t\t\t\t     (dev_flow->flow_idx),\n-\t\t\t\t\t\t     non_shared_age->timeout);\n-\t\t\t\t}\n-\t\t\t\tage_act = flow_aso_age_get_by_idx(dev,\n-\t\t\t\t\t\t\t\t  flow->age);\n-\t\t\t\tdev_flow->dv.actions[age_act_pos] =\n-\t\t\t\t\t\t\t     age_act->dr_action;\n-\t\t\t}\n-\t\t\tif (action_flags & MLX5_FLOW_ACTION_COUNT) {\n-\t\t\t\t/*\n-\t\t\t\t * Create one count action, to be used\n-\t\t\t\t * by all sub-flows.\n-\t\t\t\t */\n-\t\t\t\tcnt_act = flow_dv_prepare_counter(dev, dev_flow,\n-\t\t\t\t\t\t\t\t  flow, count,\n-\t\t\t\t\t\t\t\t  NULL, error);\n-\t\t\t\tif (!cnt_act)\n-\t\t\t\t\treturn -rte_errno;\n+\t\t\tif (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&\n+\t\t\t    dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP) {\n \t\t\t\tdev_flow->dv.actions[actions_n++] =\n-\t\t\t\t\t\t\t\tcnt_act->action;\n+\t\t\t\t\t\t\tcnt_act->action;\n+\t\t\t} else {\n+\t\t\t\tif (flow->counter == 0) {\n+\t\t\t\t\tflow->counter = owner_idx;\n+\t\t\t\t\t__atomic_fetch_add\n+\t\t\t\t\t\t(&cnt_act->shared_info.refcnt,\n+\t\t\t\t\t\t 1, __ATOMIC_RELAXED);\n+\t\t\t\t}\n+\t\t\t\t/* Save information first, will apply later. */\n+\t\t\t\taction_flags |= MLX5_FLOW_ACTION_COUNT;\n \t\t\t}\n-\t\tdefault:\n \t\t\tbreak;\n-\t\t}\n-\t\tif (mhdr_res->actions_num &&\n-\t\t    modify_action_position == UINT32_MAX)\n-\t\t\tmodify_action_position = actions_n++;\n-\t}\n-\tfor (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {\n-\t\tint tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);\n-\t\tint item_type = items->type;\n-\n-\t\tif (!mlx5_flow_os_item_supported(item_type))\n-\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n-\t\t\t\t\t\t  NULL, \"item not supported\");\n-\t\tswitch (item_type) {\n-\t\tcase RTE_FLOW_ITEM_TYPE_ESP:\n-\t\t\tflow_dv_translate_item_esp(match_mask, match_value,\n-\t\t\t\t\t\t   items, tunnel);\n-\t\t\tmatcher.priority = MLX5_PRIORITY_MAP_L4;\n-\t\t\tlast_item = MLX5_FLOW_ITEM_ESP;\n+\t\tcase RTE_FLOW_ACTION_TYPE_COUNT:\n+\t\t\tif (!priv->sh->cdev->config.devx) {\n+\t\t\t\treturn rte_flow_error_set\n+\t\t\t\t\t      (error, ENOTSUP,\n+\t\t\t\t\t       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t       NULL,\n+\t\t\t\t\t       \"count action not supported\");\n+\t\t\t}\n+\t\t\t/* Save information first, will apply later. */\n+\t\t\tcount = action->conf;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_COUNT;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_PORT_ID:\n-\t\t\tflow_dv_translate_item_port_id\n-\t\t\t\t(dev, match_mask, match_value, items, attr);\n-\t\t\tlast_item = MLX5_FLOW_ITEM_PORT_ID;\n+\t\tcase RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:\n+\t\t\tdev_flow->dv.actions[actions_n++] =\n+\t\t\t\t\t\tpriv->sh->pop_vlan_action;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:\n-\t\t\tflow_dv_translate_item_represented_port\n-\t\t\t\t(dev, match_mask, match_value, items, attr);\n-\t\t\tlast_item = MLX5_FLOW_ITEM_REPRESENTED_PORT;\n+\t\tcase RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:\n+\t\t\tif (!(action_flags &\n+\t\t\t      MLX5_FLOW_ACTION_OF_SET_VLAN_VID))\n+\t\t\t\tflow_dev_get_vlan_info_from_items(items, &vlan);\n+\t\t\tvlan.eth_proto = rte_be_to_cpu_16\n+\t\t\t     ((((const struct rte_flow_action_of_push_vlan *)\n+\t\t\t\t\t\t   actions->conf)->ethertype));\n+\t\t\tfound_action = mlx5_flow_find_action\n+\t\t\t\t\t(actions + 1,\n+\t\t\t\t\t RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);\n+\t\t\tif (found_action)\n+\t\t\t\tmlx5_update_vlan_vid_pcp(found_action, &vlan);\n+\t\t\tfound_action = mlx5_flow_find_action\n+\t\t\t\t\t(actions + 1,\n+\t\t\t\t\t RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);\n+\t\t\tif (found_action)\n+\t\t\t\tmlx5_update_vlan_vid_pcp(found_action, &vlan);\n+\t\t\tif (flow_dv_create_action_push_vlan\n+\t\t\t\t\t    (dev, attr, &vlan, dev_flow, error))\n+\t\t\t\treturn -rte_errno;\n+\t\t\tdev_flow->dv.actions[actions_n++] =\n+\t\t\t\t\tdev_flow->dv.push_vlan_res->action;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_ETH:\n-\t\t\tflow_dv_translate_item_eth(match_mask, match_value,\n-\t\t\t\t\t\t   items, tunnel,\n-\t\t\t\t\t\t   dev_flow->dv.group);\n-\t\t\tmatcher.priority = action_flags &\n-\t\t\t\t\tMLX5_FLOW_ACTION_DEFAULT_MISS &&\n-\t\t\t\t\t!dev_flow->external ?\n-\t\t\t\t\tMLX5_PRIORITY_MAP_L3 :\n-\t\t\t\t\tMLX5_PRIORITY_MAP_L2;\n-\t\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :\n-\t\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L2;\n+\t\tcase RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:\n+\t\t\t/* of_vlan_push action handled this action */\n+\t\t\tMLX5_ASSERT(action_flags &\n+\t\t\t\t    MLX5_FLOW_ACTION_OF_PUSH_VLAN);\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_VLAN:\n-\t\t\tflow_dv_translate_item_vlan(dev_flow,\n-\t\t\t\t\t\t    match_mask, match_value,\n-\t\t\t\t\t\t    items, tunnel,\n-\t\t\t\t\t\t    dev_flow->dv.group);\n-\t\t\tmatcher.priority = MLX5_PRIORITY_MAP_L2;\n-\t\t\tlast_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |\n-\t\t\t\t\t      MLX5_FLOW_LAYER_INNER_VLAN) :\n-\t\t\t\t\t     (MLX5_FLOW_LAYER_OUTER_L2 |\n-\t\t\t\t\t      MLX5_FLOW_LAYER_OUTER_VLAN);\n+\t\tcase RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:\n+\t\t\tif (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)\n+\t\t\t\tbreak;\n+\t\t\tflow_dev_get_vlan_info_from_items(items, &vlan);\n+\t\t\tmlx5_update_vlan_vid_pcp(actions, &vlan);\n+\t\t\t/* If no VLAN push - this is a modify header action */\n+\t\t\tif (flow_dv_convert_action_modify_vlan_vid\n+\t\t\t\t\t\t(mhdr_res, actions, error))\n+\t\t\t\treturn -rte_errno;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_IPV4:\n-\t\t\tmlx5_flow_tunnel_ip_check(items, next_protocol,\n-\t\t\t\t\t\t  &item_flags, &tunnel);\n-\t\t\tflow_dv_translate_item_ipv4(match_mask, match_value,\n-\t\t\t\t\t\t    items, tunnel,\n-\t\t\t\t\t\t    dev_flow->dv.group);\n-\t\t\tmatcher.priority = MLX5_PRIORITY_MAP_L3;\n-\t\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :\n-\t\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L3_IPV4;\n-\t\t\tif (items->mask != NULL &&\n-\t\t\t    ((const struct rte_flow_item_ipv4 *)\n-\t\t\t     items->mask)->hdr.next_proto_id) {\n-\t\t\t\tnext_protocol =\n-\t\t\t\t\t((const struct rte_flow_item_ipv4 *)\n-\t\t\t\t\t (items->spec))->hdr.next_proto_id;\n-\t\t\t\tnext_protocol &=\n-\t\t\t\t\t((const struct rte_flow_item_ipv4 *)\n-\t\t\t\t\t (items->mask))->hdr.next_proto_id;\n-\t\t\t} else {\n-\t\t\t\t/* Reset for inner layer. */\n-\t\t\t\tnext_protocol = 0xff;\n-\t\t\t}\n+\t\tcase RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:\n+\t\tcase RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:\n+\t\t\tif (flow_dv_create_action_l2_encap(dev, actions,\n+\t\t\t\t\t\t\t   dev_flow,\n+\t\t\t\t\t\t\t   attr->transfer,\n+\t\t\t\t\t\t\t   error))\n+\t\t\t\treturn -rte_errno;\n+\t\t\tdev_flow->dv.actions[actions_n++] =\n+\t\t\t\t\tdev_flow->dv.encap_decap->action;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_ENCAP;\n+\t\t\tif (action_flags & MLX5_FLOW_ACTION_SAMPLE)\n+\t\t\t\tsample_act->action_flags |=\n+\t\t\t\t\t\t\tMLX5_FLOW_ACTION_ENCAP;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_IPV6:\n-\t\t\tmlx5_flow_tunnel_ip_check(items, next_protocol,\n-\t\t\t\t\t\t  &item_flags, &tunnel);\n-\t\t\tflow_dv_translate_item_ipv6(match_mask, match_value,\n-\t\t\t\t\t\t    items, tunnel,\n-\t\t\t\t\t\t    dev_flow->dv.group);\n-\t\t\tmatcher.priority = MLX5_PRIORITY_MAP_L3;\n-\t\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :\n-\t\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L3_IPV6;\n-\t\t\tif (items->mask != NULL &&\n-\t\t\t    ((const struct rte_flow_item_ipv6 *)\n-\t\t\t     items->mask)->hdr.proto) {\n-\t\t\t\tnext_protocol =\n-\t\t\t\t\t((const struct rte_flow_item_ipv6 *)\n-\t\t\t\t\t items->spec)->hdr.proto;\n-\t\t\t\tnext_protocol &=\n-\t\t\t\t\t((const struct rte_flow_item_ipv6 *)\n-\t\t\t\t\t items->mask)->hdr.proto;\n-\t\t\t} else {\n-\t\t\t\t/* Reset for inner layer. */\n-\t\t\t\tnext_protocol = 0xff;\n-\t\t\t}\n+\t\tcase RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:\n+\t\tcase RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:\n+\t\t\tif (flow_dv_create_action_l2_decap(dev, dev_flow,\n+\t\t\t\t\t\t\t   attr->transfer,\n+\t\t\t\t\t\t\t   error))\n+\t\t\t\treturn -rte_errno;\n+\t\t\tdev_flow->dv.actions[actions_n++] =\n+\t\t\t\t\tdev_flow->dv.encap_decap->action;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_DECAP;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:\n-\t\t\tflow_dv_translate_item_ipv6_frag_ext(match_mask,\n-\t\t\t\t\t\t\t     match_value,\n-\t\t\t\t\t\t\t     items, tunnel);\n-\t\t\tlast_item = tunnel ?\n-\t\t\t\t\tMLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :\n-\t\t\t\t\tMLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;\n-\t\t\tif (items->mask != NULL &&\n-\t\t\t    ((const struct rte_flow_item_ipv6_frag_ext *)\n-\t\t\t     items->mask)->hdr.next_header) {\n-\t\t\t\tnext_protocol =\n-\t\t\t\t((const struct rte_flow_item_ipv6_frag_ext *)\n-\t\t\t\t items->spec)->hdr.next_header;\n-\t\t\t\tnext_protocol &=\n-\t\t\t\t((const struct rte_flow_item_ipv6_frag_ext *)\n-\t\t\t\t items->mask)->hdr.next_header;\n+\t\tcase RTE_FLOW_ACTION_TYPE_RAW_ENCAP:\n+\t\t\t/* Handle encap with preceding decap. */\n+\t\t\tif (action_flags & MLX5_FLOW_ACTION_DECAP) {\n+\t\t\t\tif (flow_dv_create_action_raw_encap\n+\t\t\t\t\t(dev, actions, dev_flow, attr, error))\n+\t\t\t\t\treturn -rte_errno;\n+\t\t\t\tdev_flow->dv.actions[actions_n++] =\n+\t\t\t\t\tdev_flow->dv.encap_decap->action;\n \t\t\t} else {\n-\t\t\t\t/* Reset for inner layer. */\n-\t\t\t\tnext_protocol = 0xff;\n+\t\t\t\t/* Handle encap without preceding decap. */\n+\t\t\t\tif (flow_dv_create_action_l2_encap\n+\t\t\t\t    (dev, actions, dev_flow, attr->transfer,\n+\t\t\t\t     error))\n+\t\t\t\t\treturn -rte_errno;\n+\t\t\t\tdev_flow->dv.actions[actions_n++] =\n+\t\t\t\t\tdev_flow->dv.encap_decap->action;\n \t\t\t}\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_ENCAP;\n+\t\t\tif (action_flags & MLX5_FLOW_ACTION_SAMPLE)\n+\t\t\t\tsample_act->action_flags |=\n+\t\t\t\t\t\t\tMLX5_FLOW_ACTION_ENCAP;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_TCP:\n-\t\t\tflow_dv_translate_item_tcp(match_mask, match_value,\n-\t\t\t\t\t\t   items, tunnel);\n-\t\t\tmatcher.priority = MLX5_PRIORITY_MAP_L4;\n-\t\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :\n-\t\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L4_TCP;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_UDP:\n-\t\t\tflow_dv_translate_item_udp(match_mask, match_value,\n-\t\t\t\t\t\t   items, tunnel);\n-\t\t\tmatcher.priority = MLX5_PRIORITY_MAP_L4;\n-\t\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :\n-\t\t\t\t\t     MLX5_FLOW_LAYER_OUTER_L4_UDP;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_GRE:\n-\t\t\tmatcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n-\t\t\tlast_item = MLX5_FLOW_LAYER_GRE;\n-\t\t\ttunnel_item = items;\n-\t\t\tgre_item = items;\n+\t\tcase RTE_FLOW_ACTION_TYPE_RAW_DECAP:\n+\t\t\twhile ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)\n+\t\t\t\t;\n+\t\t\tif (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {\n+\t\t\t\tif (flow_dv_create_action_l2_decap\n+\t\t\t\t    (dev, dev_flow, attr->transfer, error))\n+\t\t\t\t\treturn -rte_errno;\n+\t\t\t\tdev_flow->dv.actions[actions_n++] =\n+\t\t\t\t\tdev_flow->dv.encap_decap->action;\n+\t\t\t}\n+\t\t\t/* If decap is followed by encap, handle it at encap. */\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_DECAP;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_GRE_KEY:\n-\t\t\tflow_dv_translate_item_gre_key(match_mask,\n-\t\t\t\t\t\t       match_value, items);\n-\t\t\tlast_item = MLX5_FLOW_LAYER_GRE_KEY;\n+\t\tcase MLX5_RTE_FLOW_ACTION_TYPE_JUMP:\n+\t\t\tdev_flow->dv.actions[actions_n++] =\n+\t\t\t\t(void *)(uintptr_t)action->conf;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_JUMP;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_GRE_OPTION:\n-\t\t\tmatcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n-\t\t\tlast_item = MLX5_FLOW_LAYER_GRE;\n-\t\t\ttunnel_item = items;\n+\t\tcase RTE_FLOW_ACTION_TYPE_JUMP:\n+\t\t\tjump_group = ((const struct rte_flow_action_jump *)\n+\t\t\t\t\t\t\taction->conf)->group;\n+\t\t\tgrp_info.std_tbl_fix = 0;\n+\t\t\tif (dev_flow->skip_scale &\n+\t\t\t\t(1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))\n+\t\t\t\tgrp_info.skip_scale = 1;\n+\t\t\telse\n+\t\t\t\tgrp_info.skip_scale = 0;\n+\t\t\tret = mlx5_flow_group_to_table(dev, tunnel,\n+\t\t\t\t\t\t       jump_group,\n+\t\t\t\t\t\t       &table,\n+\t\t\t\t\t\t       &grp_info, error);\n+\t\t\tif (ret)\n+\t\t\t\treturn ret;\n+\t\t\ttbl = flow_dv_tbl_resource_get(dev, table, attr->egress,\n+\t\t\t\t\t\t       attr->transfer,\n+\t\t\t\t\t\t       !!dev_flow->external,\n+\t\t\t\t\t\t       tunnel, jump_group, 0,\n+\t\t\t\t\t\t       0, error);\n+\t\t\tif (!tbl)\n+\t\t\t\treturn rte_flow_error_set\n+\t\t\t\t\t\t(error, errno,\n+\t\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\t NULL,\n+\t\t\t\t\t\t \"cannot create jump action.\");\n+\t\t\tif (flow_dv_jump_tbl_resource_register\n+\t\t\t    (dev, tbl, dev_flow, error)) {\n+\t\t\t\tflow_dv_tbl_resource_release(MLX5_SH(dev), tbl);\n+\t\t\t\treturn rte_flow_error_set\n+\t\t\t\t\t\t(error, errno,\n+\t\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\t NULL,\n+\t\t\t\t\t\t \"cannot create jump action.\");\n+\t\t\t}\n+\t\t\tdev_flow->dv.actions[actions_n++] =\n+\t\t\t\t\tdev_flow->dv.jump->action;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_JUMP;\n+\t\t\tdev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;\n+\t\t\tsample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;\n+\t\t\tnum_of_dest++;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_NVGRE:\n-\t\t\tmatcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n-\t\t\tlast_item = MLX5_FLOW_LAYER_GRE;\n-\t\t\ttunnel_item = items;\n+\t\tcase RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:\n+\t\tcase RTE_FLOW_ACTION_TYPE_SET_MAC_DST:\n+\t\t\tif (flow_dv_convert_action_modify_mac\n+\t\t\t\t\t(mhdr_res, actions, error))\n+\t\t\t\treturn -rte_errno;\n+\t\t\taction_flags |= actions->type ==\n+\t\t\t\t\tRTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?\n+\t\t\t\t\tMLX5_FLOW_ACTION_SET_MAC_SRC :\n+\t\t\t\t\tMLX5_FLOW_ACTION_SET_MAC_DST;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_VXLAN:\n-\t\t\tflow_dv_translate_item_vxlan(dev, attr,\n-\t\t\t\t\t\t     match_mask, match_value,\n-\t\t\t\t\t\t     items, tunnel);\n-\t\t\tmatcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n-\t\t\tlast_item = MLX5_FLOW_LAYER_VXLAN;\n+\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:\n+\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:\n+\t\t\tif (flow_dv_convert_action_modify_ipv4\n+\t\t\t\t\t(mhdr_res, actions, error))\n+\t\t\t\treturn -rte_errno;\n+\t\t\taction_flags |= actions->type ==\n+\t\t\t\t\tRTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?\n+\t\t\t\t\tMLX5_FLOW_ACTION_SET_IPV4_SRC :\n+\t\t\t\t\tMLX5_FLOW_ACTION_SET_IPV4_DST;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_VXLAN_GPE:\n-\t\t\tmatcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n-\t\t\tlast_item = MLX5_FLOW_LAYER_VXLAN_GPE;\n-\t\t\ttunnel_item = items;\n+\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:\n+\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:\n+\t\t\tif (flow_dv_convert_action_modify_ipv6\n+\t\t\t\t\t(mhdr_res, actions, error))\n+\t\t\t\treturn -rte_errno;\n+\t\t\taction_flags |= actions->type ==\n+\t\t\t\t\tRTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?\n+\t\t\t\t\tMLX5_FLOW_ACTION_SET_IPV6_SRC :\n+\t\t\t\t\tMLX5_FLOW_ACTION_SET_IPV6_DST;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_GENEVE:\n-\t\t\tmatcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n-\t\t\tlast_item = MLX5_FLOW_LAYER_GENEVE;\n-\t\t\ttunnel_item = items;\n+\t\tcase RTE_FLOW_ACTION_TYPE_SET_TP_SRC:\n+\t\tcase RTE_FLOW_ACTION_TYPE_SET_TP_DST:\n+\t\t\tif (flow_dv_convert_action_modify_tp\n+\t\t\t\t\t(mhdr_res, actions, items,\n+\t\t\t\t\t &flow_attr, dev_flow, !!(action_flags &\n+\t\t\t\t\t MLX5_FLOW_ACTION_DECAP), error))\n+\t\t\t\treturn -rte_errno;\n+\t\t\taction_flags |= actions->type ==\n+\t\t\t\t\tRTE_FLOW_ACTION_TYPE_SET_TP_SRC ?\n+\t\t\t\t\tMLX5_FLOW_ACTION_SET_TP_SRC :\n+\t\t\t\t\tMLX5_FLOW_ACTION_SET_TP_DST;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_GENEVE_OPT:\n-\t\t\tret = flow_dv_translate_item_geneve_opt(dev, match_mask,\n-\t\t\t\t\t\t\t  match_value,\n-\t\t\t\t\t\t\t  items, error);\n-\t\t\tif (ret)\n-\t\t\t\treturn rte_flow_error_set(error, -ret,\n-\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM, NULL,\n-\t\t\t\t\t\"cannot create GENEVE TLV option\");\n-\t\t\tflow->geneve_tlv_option = 1;\n-\t\t\tlast_item = MLX5_FLOW_LAYER_GENEVE_OPT;\n+\t\tcase RTE_FLOW_ACTION_TYPE_DEC_TTL:\n+\t\t\tif (flow_dv_convert_action_modify_dec_ttl\n+\t\t\t\t\t(mhdr_res, items, &flow_attr, dev_flow,\n+\t\t\t\t\t !!(action_flags &\n+\t\t\t\t\t MLX5_FLOW_ACTION_DECAP), error))\n+\t\t\t\treturn -rte_errno;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_DEC_TTL;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_MPLS:\n-\t\t\tflow_dv_translate_item_mpls(match_mask, match_value,\n-\t\t\t\t\t\t    items, last_item, tunnel);\n-\t\t\tmatcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n-\t\t\tlast_item = MLX5_FLOW_LAYER_MPLS;\n+\t\tcase RTE_FLOW_ACTION_TYPE_SET_TTL:\n+\t\t\tif (flow_dv_convert_action_modify_ttl\n+\t\t\t\t\t(mhdr_res, actions, items, &flow_attr,\n+\t\t\t\t\t dev_flow, !!(action_flags &\n+\t\t\t\t\t MLX5_FLOW_ACTION_DECAP), error))\n+\t\t\t\treturn -rte_errno;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_SET_TTL;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_MARK:\n-\t\t\tflow_dv_translate_item_mark(dev, match_mask,\n-\t\t\t\t\t\t    match_value, items);\n-\t\t\tlast_item = MLX5_FLOW_ITEM_MARK;\n+\t\tcase RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:\n+\t\tcase RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:\n+\t\t\tif (flow_dv_convert_action_modify_tcp_seq\n+\t\t\t\t\t(mhdr_res, actions, error))\n+\t\t\t\treturn -rte_errno;\n+\t\t\taction_flags |= actions->type ==\n+\t\t\t\t\tRTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?\n+\t\t\t\t\tMLX5_FLOW_ACTION_INC_TCP_SEQ :\n+\t\t\t\t\tMLX5_FLOW_ACTION_DEC_TCP_SEQ;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_META:\n-\t\t\tflow_dv_translate_item_meta(dev, match_mask,\n-\t\t\t\t\t\t    match_value, attr, items);\n-\t\t\tlast_item = MLX5_FLOW_ITEM_METADATA;\n+\n+\t\tcase RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:\n+\t\tcase RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:\n+\t\t\tif (flow_dv_convert_action_modify_tcp_ack\n+\t\t\t\t\t(mhdr_res, actions, error))\n+\t\t\t\treturn -rte_errno;\n+\t\t\taction_flags |= actions->type ==\n+\t\t\t\t\tRTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?\n+\t\t\t\t\tMLX5_FLOW_ACTION_INC_TCP_ACK :\n+\t\t\t\t\tMLX5_FLOW_ACTION_DEC_TCP_ACK;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_ICMP:\n-\t\t\tflow_dv_translate_item_icmp(match_mask, match_value,\n-\t\t\t\t\t\t    items, tunnel);\n-\t\t\tmatcher.priority = MLX5_PRIORITY_MAP_L4;\n-\t\t\tlast_item = MLX5_FLOW_LAYER_ICMP;\n+\t\tcase MLX5_RTE_FLOW_ACTION_TYPE_TAG:\n+\t\t\tif (flow_dv_convert_action_set_reg\n+\t\t\t\t\t(mhdr_res, actions, error))\n+\t\t\t\treturn -rte_errno;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_SET_TAG;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_ICMP6:\n-\t\t\tflow_dv_translate_item_icmp6(match_mask, match_value,\n-\t\t\t\t\t\t      items, tunnel);\n-\t\t\tmatcher.priority = MLX5_PRIORITY_MAP_L4;\n-\t\t\tlast_item = MLX5_FLOW_LAYER_ICMP6;\n+\t\tcase MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:\n+\t\t\tif (flow_dv_convert_action_copy_mreg\n+\t\t\t\t\t(dev, mhdr_res, actions, error))\n+\t\t\t\treturn -rte_errno;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_SET_TAG;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_TAG:\n-\t\t\tflow_dv_translate_item_tag(dev, match_mask,\n-\t\t\t\t\t\t   match_value, items);\n-\t\t\tlast_item = MLX5_FLOW_ITEM_TAG;\n+\t\tcase MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;\n+\t\t\tdev_flow->handle->fate_action =\n+\t\t\t\t\tMLX5_FLOW_FATE_DEFAULT_MISS;\n \t\t\tbreak;\n-\t\tcase MLX5_RTE_FLOW_ITEM_TYPE_TAG:\n-\t\t\tflow_dv_translate_mlx5_item_tag(dev, match_mask,\n-\t\t\t\t\t\t\tmatch_value, items);\n-\t\t\tlast_item = MLX5_FLOW_ITEM_TAG;\n+\t\tcase RTE_FLOW_ACTION_TYPE_METER:\n+\t\t\tif (!wks->fm)\n+\t\t\t\treturn rte_flow_error_set(error, rte_errno,\n+\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\tNULL, \"Failed to get meter in flow.\");\n+\t\t\t/* Set the meter action. */\n+\t\t\tdev_flow->dv.actions[actions_n++] =\n+\t\t\t\twks->fm->meter_action_g;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_METER;\n \t\t\tbreak;\n-\t\tcase MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:\n-\t\t\tflow_dv_translate_item_tx_queue(dev, match_mask,\n-\t\t\t\t\t\t\tmatch_value,\n-\t\t\t\t\t\t\titems);\n-\t\t\tlast_item = MLX5_FLOW_ITEM_TX_QUEUE;\n+\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:\n+\t\t\tif (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,\n+\t\t\t\t\t\t\t      actions, error))\n+\t\t\t\treturn -rte_errno;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_GTP:\n-\t\t\tflow_dv_translate_item_gtp(match_mask, match_value,\n-\t\t\t\t\t\t   items, tunnel);\n-\t\t\tmatcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);\n-\t\t\tlast_item = MLX5_FLOW_LAYER_GTP;\n+\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:\n+\t\t\tif (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,\n+\t\t\t\t\t\t\t      actions, error))\n+\t\t\t\treturn -rte_errno;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_GTP_PSC:\n-\t\t\tret = flow_dv_translate_item_gtp_psc(match_mask,\n-\t\t\t\t\t\t\t  match_value,\n-\t\t\t\t\t\t\t  items);\n-\t\t\tif (ret)\n-\t\t\t\treturn rte_flow_error_set(error, -ret,\n-\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM, NULL,\n-\t\t\t\t\t\"cannot create GTP PSC item\");\n-\t\t\tlast_item = MLX5_FLOW_LAYER_GTP_PSC;\n+\t\tcase RTE_FLOW_ACTION_TYPE_SAMPLE:\n+\t\t\tsample_act_pos = actions_n;\n+\t\t\tsample = (const struct rte_flow_action_sample *)\n+\t\t\t\t action->conf;\n+\t\t\tactions_n++;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_SAMPLE;\n+\t\t\t/* put encap action into group if work with port id */\n+\t\t\tif ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&\n+\t\t\t    (action_flags & MLX5_FLOW_ACTION_PORT_ID))\n+\t\t\t\tsample_act->action_flags |=\n+\t\t\t\t\t\t\tMLX5_FLOW_ACTION_ENCAP;\n \t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_ECPRI:\n-\t\t\tif (!mlx5_flex_parser_ecpri_exist(dev)) {\n-\t\t\t\t/* Create it only the first time to be used. */\n-\t\t\t\tret = mlx5_flex_parser_ecpri_alloc(dev);\n-\t\t\t\tif (ret)\n-\t\t\t\t\treturn rte_flow_error_set\n-\t\t\t\t\t\t(error, -ret,\n-\t\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\tcase RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:\n+\t\t\tif (flow_dv_convert_action_modify_field\n+\t\t\t\t\t(dev, mhdr_res, actions, attr, error))\n+\t\t\t\treturn -rte_errno;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_CONNTRACK:\n+\t\t\towner_idx = (uint32_t)(uintptr_t)action->conf;\n+\t\t\tct = flow_aso_ct_get_by_idx(dev, owner_idx);\n+\t\t\tif (!ct)\n+\t\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION,\n \t\t\t\t\t\tNULL,\n-\t\t\t\t\t\t\"cannot create eCPRI parser\");\n+\t\t\t\t\t\t\"Failed to get CT object.\");\n+\t\t\tif (mlx5_aso_ct_available(priv->sh, ct))\n+\t\t\t\treturn rte_flow_error_set(error, rte_errno,\n+\t\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\tNULL,\n+\t\t\t\t\t\t\"CT is unavailable.\");\n+\t\t\tif (ct->is_original)\n+\t\t\t\tdev_flow->dv.actions[actions_n] =\n+\t\t\t\t\t\t\tct->dr_action_orig;\n+\t\t\telse\n+\t\t\t\tdev_flow->dv.actions[actions_n] =\n+\t\t\t\t\t\t\tct->dr_action_rply;\n+\t\t\tif (flow->ct == 0) {\n+\t\t\t\tflow->indirect_type =\n+\t\t\t\t\t\tMLX5_INDIRECT_ACTION_TYPE_CT;\n+\t\t\t\tflow->ct = owner_idx;\n+\t\t\t\t__atomic_fetch_add(&ct->refcnt, 1,\n+\t\t\t\t\t\t   __ATOMIC_RELAXED);\n \t\t\t}\n-\t\t\tflow_dv_translate_item_ecpri(dev, match_mask,\n-\t\t\t\t\t\t     match_value, items,\n-\t\t\t\t\t\t     last_item);\n-\t\t\t/* No other protocol should follow eCPRI layer. */\n-\t\t\tlast_item = MLX5_FLOW_LAYER_ECPRI;\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_INTEGRITY:\n-\t\t\tflow_dv_translate_item_integrity(items, integrity_items,\n-\t\t\t\t\t\t\t &last_item);\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_CONNTRACK:\n-\t\t\tflow_dv_translate_item_aso_ct(dev, match_mask,\n-\t\t\t\t\t\t      match_value, items);\n-\t\t\tbreak;\n-\t\tcase RTE_FLOW_ITEM_TYPE_FLEX:\n-\t\t\tflow_dv_translate_item_flex(dev, match_mask,\n-\t\t\t\t\t\t    match_value, items,\n-\t\t\t\t\t\t    dev_flow, tunnel != 0);\n-\t\t\tlast_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :\n-\t\t\t\t    MLX5_FLOW_ITEM_OUTER_FLEX;\n+\t\t\tactions_n++;\n+\t\t\taction_flags |= MLX5_FLOW_ACTION_CT;\n \t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_END:\n+\t\t\tactions_end = true;\n+\t\t\tif (mhdr_res->actions_num) {\n+\t\t\t\t/* create modify action if needed. */\n+\t\t\t\tif (flow_dv_modify_hdr_resource_register\n+\t\t\t\t\t(dev, mhdr_res, dev_flow, error))\n+\t\t\t\t\treturn -rte_errno;\n+\t\t\t\tdev_flow->dv.actions[modify_action_position] =\n+\t\t\t\t\thandle->dvh.modify_hdr->action;\n+\t\t\t}\n+\t\t\t/*\n+\t\t\t * Handle AGE and COUNT action by single HW counter\n+\t\t\t * when they are not shared.\n+\t\t\t */\n+\t\t\tif (action_flags & MLX5_FLOW_ACTION_AGE) {\n+\t\t\t\tif ((non_shared_age && count) ||\n+\t\t\t\t    !flow_hit_aso_supported(priv->sh, attr)) {\n+\t\t\t\t\t/* Creates age by counters. */\n+\t\t\t\t\tcnt_act = flow_dv_prepare_counter\n+\t\t\t\t\t\t\t\t(dev, dev_flow,\n+\t\t\t\t\t\t\t\t flow, count,\n+\t\t\t\t\t\t\t\t non_shared_age,\n+\t\t\t\t\t\t\t\t error);\n+\t\t\t\t\tif (!cnt_act)\n+\t\t\t\t\t\treturn -rte_errno;\n+\t\t\t\t\tdev_flow->dv.actions[age_act_pos] =\n+\t\t\t\t\t\t\t\tcnt_act->action;\n+\t\t\t\t\tbreak;\n+\t\t\t\t}\n+\t\t\t\tif (!flow->age && non_shared_age) {\n+\t\t\t\t\tflow->age = flow_dv_aso_age_alloc\n+\t\t\t\t\t\t\t\t(dev, error);\n+\t\t\t\t\tif (!flow->age)\n+\t\t\t\t\t\treturn -rte_errno;\n+\t\t\t\t\tflow_dv_aso_age_params_init\n+\t\t\t\t\t\t    (dev, flow->age,\n+\t\t\t\t\t\t     non_shared_age->context ?\n+\t\t\t\t\t\t     non_shared_age->context :\n+\t\t\t\t\t\t     (void *)(uintptr_t)\n+\t\t\t\t\t\t     (dev_flow->flow_idx),\n+\t\t\t\t\t\t     non_shared_age->timeout);\n+\t\t\t\t}\n+\t\t\t\tage_act = flow_aso_age_get_by_idx(dev,\n+\t\t\t\t\t\t\t\t  flow->age);\n+\t\t\t\tdev_flow->dv.actions[age_act_pos] =\n+\t\t\t\t\t\t\t     age_act->dr_action;\n+\t\t\t}\n+\t\t\tif (action_flags & MLX5_FLOW_ACTION_COUNT) {\n+\t\t\t\t/*\n+\t\t\t\t * Create one count action, to be used\n+\t\t\t\t * by all sub-flows.\n+\t\t\t\t */\n+\t\t\t\tcnt_act = flow_dv_prepare_counter(dev, dev_flow,\n+\t\t\t\t\t\t\t\t  flow, count,\n+\t\t\t\t\t\t\t\t  NULL, error);\n+\t\t\t\tif (!cnt_act)\n+\t\t\t\t\treturn -rte_errno;\n+\t\t\t\tdev_flow->dv.actions[actions_n++] =\n+\t\t\t\t\t\t\t\tcnt_act->action;\n+\t\t\t}\n \t\tdefault:\n \t\t\tbreak;\n \t\t}\n-\t\titem_flags |= last_item;\n-\t}\n-\t/*\n-\t * When E-Switch mode is enabled, we have two cases where we need to\n-\t * set the source port manually.\n-\t * The first one, is in case of NIC ingress steering rule, and the\n-\t * second is E-Switch rule where no port_id item was found.\n-\t * In both cases the source port is set according the current port\n-\t * in use.\n-\t */\n-\tif (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&\n-\t    !(item_flags & MLX5_FLOW_ITEM_REPRESENTED_PORT) && priv->sh->esw_mode &&\n-\t    !(attr->egress && !attr->transfer)) {\n-\t\tif (flow_dv_translate_item_port_id(dev, match_mask,\n-\t\t\t\t\t\t   match_value, NULL, attr))\n-\t\t\treturn -rte_errno;\n-\t}\n-\tif (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {\n-\t\tflow_dv_translate_item_integrity_post(match_mask, match_value,\n-\t\t\t\t\t\t      integrity_items,\n-\t\t\t\t\t\t      item_flags);\n-\t}\n-\tif (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE)\n-\t\tflow_dv_translate_item_vxlan_gpe(match_mask, match_value,\n-\t\t\t\t\t\t tunnel_item, item_flags);\n-\telse if (item_flags & MLX5_FLOW_LAYER_GENEVE)\n-\t\tflow_dv_translate_item_geneve(match_mask, match_value,\n-\t\t\t\t\t      tunnel_item, item_flags);\n-\telse if (item_flags & MLX5_FLOW_LAYER_GRE) {\n-\t\tif (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE)\n-\t\t\tflow_dv_translate_item_gre(match_mask, match_value,\n-\t\t\t\t\t\t   tunnel_item, item_flags);\n-\t\telse if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE)\n-\t\t\tflow_dv_translate_item_nvgre(match_mask, match_value,\n-\t\t\t\t\t\t     tunnel_item, item_flags);\n-\t\telse if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE_OPTION)\n-\t\t\tflow_dv_translate_item_gre_option(match_mask, match_value,\n-\t\t\t\t\ttunnel_item, gre_item, item_flags);\n-\t\telse\n-\t\t\tMLX5_ASSERT(false);\n+\t\tif (mhdr_res->actions_num &&\n+\t\t    modify_action_position == UINT32_MAX)\n+\t\t\tmodify_action_position = actions_n++;\n \t}\n-#ifdef RTE_LIBRTE_MLX5_DEBUG\n-\tMLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,\n-\t\t\t\t\t      dev_flow->dv.value.buf));\n-#endif\n-\t/*\n-\t * Layers may be already initialized from prefix flow if this dev_flow\n-\t * is the suffix flow.\n-\t */\n-\thandle->layers |= item_flags;\n+\tdev_flow->act_flags = action_flags;\n+\tret = flow_dv_translate_items(dev, dev_flow, attr, items, &matcher,\n+\t\t\t\t      error);\n+\tif (ret)\n+\t\treturn -rte_errno;\n \tif (action_flags & MLX5_FLOW_ACTION_RSS)\n \t\tflow_dv_hashfields_set(dev_flow->handle->layers,\n \t\t\t\t       rss_desc,\n@@ -14211,7 +14255,6 @@ flow_dv_translate(struct rte_eth_dev *dev,\n \t\tactions_n = tmp_actions_n;\n \t}\n \tdev_flow->dv.actions_n = actions_n;\n-\tdev_flow->act_flags = action_flags;\n \tif (wks->skip_matcher_reg)\n \t\treturn 0;\n \t/* Register matcher. */\n",
    "prefixes": [
        "v6",
        "01/18"
    ]
}