get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/117216/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 117216,
    "url": "http://patches.dpdk.org/api/patches/117216/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20220930125315.5079-7-suanmingm@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220930125315.5079-7-suanmingm@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220930125315.5079-7-suanmingm@nvidia.com",
    "date": "2022-09-30T12:53:04",
    "name": "[v3,06/17] net/mlx5: add extended metadata mode for hardware steering",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "69a0b3413b7a94d8f3607dd42daf51bb834abf9e",
    "submitter": {
        "id": 1887,
        "url": "http://patches.dpdk.org/api/people/1887/?format=api",
        "name": "Suanming Mou",
        "email": "suanmingm@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20220930125315.5079-7-suanmingm@nvidia.com/mbox/",
    "series": [
        {
            "id": 24935,
            "url": "http://patches.dpdk.org/api/series/24935/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=24935",
            "date": "2022-09-30T12:52:58",
            "name": "net/mlx5: HW steering PMD update",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/24935/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/117216/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/117216/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id D6755A00C4;\n\tFri, 30 Sep 2022 14:54:23 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 90BF442B6F;\n\tFri, 30 Sep 2022 14:54:00 +0200 (CEST)",
            "from NAM02-SN1-obe.outbound.protection.outlook.com\n (mail-sn1anam02on2081.outbound.protection.outlook.com [40.107.96.81])\n by mails.dpdk.org (Postfix) with ESMTP id 60F7A42905\n for <dev@dpdk.org>; Fri, 30 Sep 2022 14:53:59 +0200 (CEST)",
            "from DM5PR07CA0112.namprd07.prod.outlook.com (2603:10b6:4:ae::41) by\n IA1PR12MB6234.namprd12.prod.outlook.com (2603:10b6:208:3e6::9) with\n Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.5676.20; Fri, 30 Sep 2022 12:53:56 +0000",
            "from DM6NAM11FT089.eop-nam11.prod.protection.outlook.com\n (2603:10b6:4:ae:cafe::52) by DM5PR07CA0112.outlook.office365.com\n (2603:10b6:4:ae::41) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5676.15 via Frontend\n Transport; Fri, 30 Sep 2022 12:53:56 +0000",
            "from mail.nvidia.com (216.228.117.160) by\n DM6NAM11FT089.mail.protection.outlook.com (10.13.173.82) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.5676.17 via Frontend Transport; Fri, 30 Sep 2022 12:53:56 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.66) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.26; Fri, 30 Sep\n 2022 05:53:46 -0700",
            "from nvidia.com (10.126.230.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.29; Fri, 30 Sep\n 2022 05:53:44 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=khtOAMkuLYcgwYpaZfuB5hsYFGkWRAeIhdSlEWALvzzTpujzMyTpaeorSHQMDKEsbF3lQQjlVdc2KAFiU501KKrHDw9v3CUE03vqSNZQW3UAhWMyV6FAX00jCDJ4PtkkoWKVNLILi/+2HxrczpdSjCYIQTcKfUQ4wQo+zKA50HUWmvdJdT5yeOcgb/7gGQVknAg9yfRdzjWeRli6K8bC+54zOm33BNW85mEycLsJuDqAG+C3CjrI/lwy+szCfCqU6vk3LXKOTO3osn5YjlfNGv+0bn7siKKhazmHDBsvFj1SVbtaLjMM6QwPoF4Jm6ptWVvb6ONuAwamArjNhap8Sw==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=pRer6uCC6vY/pTJLDrLPvZETyzt0LsYzlJ7CmgLILSw=;\n b=W9fenv7fVdkIn2m8nCknZn7h2V0drtu9AWjjGS5wEsQTQYJ2wFhOTHhdm75gdDCdUiplBDiwqfoIEutXUqa8xyqGJKhpuCnRInmV5h5y5xI2E4ytaCohNKsEOCZUPlRWrKmJ6nbS/e2Mb4vUeOHxZZHSgx+cESeZQWvJRpM7aBQgufi4o+HRNHmZhANO0zzW3gTKIZc2iykuKa7KSCsK988veYRqWAbs6rI4JgHQCrkHKiTLFkcSeMUpWWFzIinuSVO6x0bzQnVtxtAk72XMBhjIsS6DEXjLKnqBN16cVMf6YuoUWB8Sj/Bo3cYNq5QCX3VEHK2s6L0rKhdKLLMsjA==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.160) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=pRer6uCC6vY/pTJLDrLPvZETyzt0LsYzlJ7CmgLILSw=;\n b=q3NdlUMHV+m+j5Pp0rwEl2TthZ79WhnQAQVH60qDsXuE+sNXtI05iCyLJ+HFUNV7i9Nyi4S1OEyDF2nSBUI2LNVks7vJYEQEx525bVETPPfzAg9JoPXOL8MRb0Dnwx+GjqlUaLSfNBKUuprbdsBh41M61qYj4/+tG8rOjt1i3aI4xJaATINyTIrdOgahppGUFtN99VFSqOgAnKlz9USO0aZVhKpx2iIOcNrplrf8GRMCF/EU8vsYTsiqapvZOa3pGYGRgiG2xdH+EiyTm1y/h8lku3T/lJ5PF4NGE7gVGnfmcC44BMTQXTHS+hftprQqN+BC24qTDwBbzqdzVHS+kw==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.160)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.160 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.160; helo=mail.nvidia.com; pr=C",
        "From": "Suanming Mou <suanmingm@nvidia.com>",
        "To": "Matan Azrad <matan@nvidia.com>, Viacheslav Ovsiienko\n <viacheslavo@nvidia.com>",
        "CC": "<dev@dpdk.org>, <rasland@nvidia.com>, <orika@nvidia.com>, Bing Zhao\n <bingz@nvidia.com>",
        "Subject": "[PATCH v3 06/17] net/mlx5: add extended metadata mode for hardware\n steering",
        "Date": "Fri, 30 Sep 2022 15:53:04 +0300",
        "Message-ID": "<20220930125315.5079-7-suanmingm@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20220930125315.5079-1-suanmingm@nvidia.com>",
        "References": "<20220923144334.27736-1-suanmingm@nvidia.com>\n <20220930125315.5079-1-suanmingm@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.230.35]",
        "X-ClientProxiedBy": "rnnvmail201.nvidia.com (10.129.68.8) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "DM6NAM11FT089:EE_|IA1PR12MB6234:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "ba1844ac-2a85-4f5d-5cf0-08daa2e2d630",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n j8zDnMeCTEYkKi0HSYJz6EHD0+px3t5NVLndhBkevHyo8jBT4kERNs3GT9lMSlDugHbolYgZAZXqEpSBgqXMcJn6h1lCP3dG59kRKTnZgjD+zsHb2/OjzKxxMgMZ1OZp0TB1DWvXqTIeAiwH9ns2dqweI4fSSIPkafRe5ButKBiZerbvO1fNth8zVfOgCAqlb/p2PqfQVfc4dRTj3l31c/nyh+4VaMCjvRysJIOh3bKXtouAQ0OyVOe9f1XvbrmonewN2bR6W9d/6N1CmEdirakXxT/nvJ1P2rLs7tKkNZ9WK9cZYjlziu611KELTGbo4XpAfSibGWSytguW6ZifWoaaIt/MozFSo3dtBHsTYDa0ckNhHBiEj4pjsOhFf27Lmkx55Q017Ji/aX14M/VbEvpmWb9c44fgzKlg6gEGWO2Gxx76xL5kqshEZLOO2Qaahe8Y2fNI/hF8ymnb8QpuwH6SmBLqRrxeutkmWQfTlyhWMadEMD/Q0TRKRcoGh5GZ1Ckx8G3juj4vf3eHf0zS6DWaxl5r+C5WQCXvQir++ScaSWqhtbfmnrhqp/UHKwzzWrU0ATjpRcbzFZ0YMePXVBtno5gr5WfX7PqPkVBkWRQ1pWZL2YKpOINLRbiUo/BNTnDTosBEXPz6PazK2xgi163wjDB8Tj+VsIyVBN650/pIUTdFodKupbm9xCpY3uQSe0+92RqASdNlmQFwOcLvUxGVFjLaoUjmh/qBbsKWY48v5kiHe/GsNP8bVPvqNnceuI/HKqsqUKEFZUyAYetFlb/XliwBGm9yKXaa+AUgHDA=",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.160; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge1.nvidia.com; CAT:NONE;\n SFS:(13230022)(4636009)(376002)(396003)(136003)(346002)(39860400002)(451199015)(40470700004)(36840700001)(46966006)(86362001)(40480700001)(36860700001)(55016003)(7636003)(36756003)(54906003)(82740400003)(40460700003)(356005)(336012)(4326008)(316002)(110136005)(8676002)(47076005)(5660300002)(6636002)(2906002)(70586007)(70206006)(41300700001)(8936002)(186003)(426003)(16526019)(478600001)(6666004)(82310400005)(107886003)(1076003)(83380400001)(7696005)(2616005)(26005)(30864003)(6286002)(579004)(559001)(309714004);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "30 Sep 2022 12:53:56.4586 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n ba1844ac-2a85-4f5d-5cf0-08daa2e2d630",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.160];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DM6NAM11FT089.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "IA1PR12MB6234",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Bing Zhao <bingz@nvidia.com>\n\nThe new mode 4 of devarg \"dv_xmeta_en\" is added for HWS only. In this\nmode, the Rx / Tx metadata with 32b width copy between FDB and NIC is\nsupported. The mark is only supported in NIC and there is no copy\nsupported.\n\nSigned-off-by: Bing Zhao <bingz@nvidia.com>\n---\n drivers/net/mlx5/linux/mlx5_os.c |  10 +-\n drivers/net/mlx5/mlx5.c          |   7 +-\n drivers/net/mlx5/mlx5.h          |   8 +-\n drivers/net/mlx5/mlx5_flow.c     |   8 +-\n drivers/net/mlx5/mlx5_flow.h     |  14 +\n drivers/net/mlx5/mlx5_flow_dv.c  |  43 +-\n drivers/net/mlx5/mlx5_flow_hw.c  | 864 ++++++++++++++++++++++++++++---\n drivers/net/mlx5/mlx5_trigger.c  |   3 +\n 8 files changed, 872 insertions(+), 85 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c\nindex e0586a4d6f..061b825e7b 100644\n--- a/drivers/net/mlx5/linux/mlx5_os.c\n+++ b/drivers/net/mlx5/linux/mlx5_os.c\n@@ -1554,6 +1554,15 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n #ifdef HAVE_IBV_FLOW_DV_SUPPORT\n \t\tif (priv->vport_meta_mask)\n \t\t\tflow_hw_set_port_info(eth_dev);\n+\t\tif (priv->sh->config.dv_esw_en &&\n+\t\t    priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&\n+\t\t    priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_META32_HWS) {\n+\t\t\tDRV_LOG(ERR,\n+\t\t\t\t\"metadata mode %u is not supported in HWS eswitch mode\",\n+\t\t\t\tpriv->sh->config.dv_xmeta_en);\n+\t\t\t\terr = ENOTSUP;\n+\t\t\t\tgoto error;\n+\t\t}\n \t\t/* Only HWS requires this information. */\n \t\tflow_hw_init_tags_set(eth_dev);\n \t\tif (priv->sh->config.dv_esw_en &&\n@@ -1569,7 +1578,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \t\tgoto error;\n #endif\n \t}\n-\t/* Port representor shares the same max priority with pf port. */\n \tif (!priv->sh->flow_priority_check_flag) {\n \t\t/* Supported Verbs flow priority number detection. */\n \t\terr = mlx5_flow_discover_priorities(eth_dev);\ndiff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex 74adb677f4..cf5146d677 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -1218,7 +1218,8 @@ mlx5_dev_args_check_handler(const char *key, const char *val, void *opaque)\n \t\tif (tmp != MLX5_XMETA_MODE_LEGACY &&\n \t\t    tmp != MLX5_XMETA_MODE_META16 &&\n \t\t    tmp != MLX5_XMETA_MODE_META32 &&\n-\t\t    tmp != MLX5_XMETA_MODE_MISS_INFO) {\n+\t\t    tmp != MLX5_XMETA_MODE_MISS_INFO &&\n+\t\t    tmp != MLX5_XMETA_MODE_META32_HWS) {\n \t\t\tDRV_LOG(ERR, \"Invalid extensive metadata parameter.\");\n \t\t\trte_errno = EINVAL;\n \t\t\treturn -rte_errno;\n@@ -2849,6 +2850,10 @@ mlx5_set_metadata_mask(struct rte_eth_dev *dev)\n \t\tmeta = UINT32_MAX;\n \t\tmark = (reg_c0 >> rte_bsf32(reg_c0)) & MLX5_FLOW_MARK_MASK;\n \t\tbreak;\n+\tcase MLX5_XMETA_MODE_META32_HWS:\n+\t\tmeta = UINT32_MAX;\n+\t\tmark = MLX5_FLOW_MARK_MASK;\n+\t\tbreak;\n \tdefault:\n \t\tmeta = 0;\n \t\tmark = 0;\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex 0bf21c1efe..fc4bc4e6a3 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -298,8 +298,8 @@ struct mlx5_sh_config {\n \tuint32_t reclaim_mode:2; /* Memory reclaim mode. */\n \tuint32_t dv_esw_en:1; /* Enable E-Switch DV flow. */\n \t/* Enable DV flow. 1 means SW steering, 2 means HW steering. */\n-\tunsigned int dv_flow_en:2;\n-\tuint32_t dv_xmeta_en:2; /* Enable extensive flow metadata. */\n+\tuint32_t dv_flow_en:2; /* Enable DV flow. */\n+\tuint32_t dv_xmeta_en:3; /* Enable extensive flow metadata. */\n \tuint32_t dv_miss_info:1; /* Restore packet after partial hw miss. */\n \tuint32_t l3_vxlan_en:1; /* Enable L3 VXLAN flow creation. */\n \tuint32_t vf_nl_en:1; /* Enable Netlink requests in VF mode. */\n@@ -312,7 +312,6 @@ struct mlx5_sh_config {\n \tuint32_t fdb_def_rule:1; /* Create FDB default jump rule */\n };\n \n-\n /* Structure for VF VLAN workaround. */\n struct mlx5_vf_vlan {\n \tuint32_t tag:12;\n@@ -1279,12 +1278,12 @@ struct mlx5_dev_ctx_shared {\n \tstruct mlx5_lb_ctx self_lb; /* QP to enable self loopback for Devx. */\n \tunsigned int flow_max_priority;\n \tenum modify_reg flow_mreg_c[MLX5_MREG_C_NUM];\n+\t/* Availability of mreg_c's. */\n \tvoid *devx_channel_lwm;\n \tstruct rte_intr_handle *intr_handle_lwm;\n \tpthread_mutex_t lwm_config_lock;\n \tuint32_t host_shaper_rate:8;\n \tuint32_t lwm_triggered:1;\n-\t/* Availability of mreg_c's. */\n \tstruct mlx5_dev_shared_port port[]; /* per device port data array. */\n };\n \n@@ -1508,6 +1507,7 @@ struct mlx5_priv {\n \tstruct rte_flow_template_table *hw_esw_sq_miss_root_tbl;\n \tstruct rte_flow_template_table *hw_esw_sq_miss_tbl;\n \tstruct rte_flow_template_table *hw_esw_zero_tbl;\n+\tstruct rte_flow_template_table *hw_tx_meta_cpy_tbl;\n \tstruct mlx5_indexed_pool *flows[MLX5_FLOW_TYPE_MAXI];\n \t/* RTE Flow rules. */\n \tuint32_t ctrl_flows; /* Control flow rules. */\ndiff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 9c44b2e99b..b570ed7f69 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -1107,6 +1107,8 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,\n \t\t\treturn REG_C_0;\n \t\tcase MLX5_XMETA_MODE_META32:\n \t\t\treturn REG_C_1;\n+\t\tcase MLX5_XMETA_MODE_META32_HWS:\n+\t\t\treturn REG_C_1;\n \t\t}\n \t\tbreak;\n \tcase MLX5_METADATA_TX:\n@@ -1119,11 +1121,14 @@ mlx5_flow_get_reg_id(struct rte_eth_dev *dev,\n \t\t\treturn REG_C_0;\n \t\tcase MLX5_XMETA_MODE_META32:\n \t\t\treturn REG_C_1;\n+\t\tcase MLX5_XMETA_MODE_META32_HWS:\n+\t\t\treturn REG_C_1;\n \t\t}\n \t\tbreak;\n \tcase MLX5_FLOW_MARK:\n \t\tswitch (config->dv_xmeta_en) {\n \t\tcase MLX5_XMETA_MODE_LEGACY:\n+\t\tcase MLX5_XMETA_MODE_META32_HWS:\n \t\t\treturn REG_NON;\n \t\tcase MLX5_XMETA_MODE_META16:\n \t\t\treturn REG_C_1;\n@@ -4442,7 +4447,8 @@ static bool flow_check_modify_action_type(struct rte_eth_dev *dev,\n \t\treturn true;\n \tcase RTE_FLOW_ACTION_TYPE_FLAG:\n \tcase RTE_FLOW_ACTION_TYPE_MARK:\n-\t\tif (priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)\n+\t\tif (priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&\n+\t\t    priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_META32_HWS)\n \t\t\treturn true;\n \t\telse\n \t\t\treturn false;\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex 93f0e189d4..a8b27ea494 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -48,6 +48,12 @@ enum mlx5_rte_flow_action_type {\n \tMLX5_RTE_FLOW_ACTION_TYPE_RSS,\n };\n \n+/* Private (internal) Field IDs for MODIFY_FIELD action. */\n+enum mlx5_rte_flow_field_id {\n+\t\tMLX5_RTE_FLOW_FIELD_END = INT_MIN,\n+\t\t\tMLX5_RTE_FLOW_FIELD_META_REG,\n+};\n+\n #define MLX5_INDIRECT_ACTION_TYPE_OFFSET 30\n \n enum {\n@@ -1167,6 +1173,7 @@ struct rte_flow_actions_template {\n \tstruct rte_flow_action *masks; /* Cached action masks.*/\n \tuint16_t mhdr_off; /* Offset of DR modify header action. */\n \tuint32_t refcnt; /* Reference counter. */\n+\tuint16_t rx_cpy_pos; /* Action position of Rx metadata to be copied. */\n };\n \n /* Jump action struct. */\n@@ -1243,6 +1250,11 @@ struct mlx5_flow_group {\n #define MLX5_HW_TBL_MAX_ITEM_TEMPLATE 2\n #define MLX5_HW_TBL_MAX_ACTION_TEMPLATE 32\n \n+struct mlx5_flow_template_table_cfg {\n+\tstruct rte_flow_template_table_attr attr; /* Table attributes passed through flow API. */\n+\tbool external; /* True if created by flow API, false if table is internal to PMD. */\n+};\n+\n struct rte_flow_template_table {\n \tLIST_ENTRY(rte_flow_template_table) next;\n \tstruct mlx5_flow_group *grp; /* The group rte_flow_template_table uses. */\n@@ -1252,6 +1264,7 @@ struct rte_flow_template_table {\n \t/* Action templates bind to the table. */\n \tstruct mlx5_hw_action_template ats[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];\n \tstruct mlx5_indexed_pool *flow; /* The table's flow ipool. */\n+\tstruct mlx5_flow_template_table_cfg cfg;\n \tuint32_t type; /* Flow table type RX/TX/FDB. */\n \tuint8_t nb_item_templates; /* Item template number. */\n \tuint8_t nb_action_templates; /* Action template number. */\n@@ -2333,4 +2346,5 @@ int mlx5_flow_hw_esw_create_mgr_sq_miss_flow(struct rte_eth_dev *dev);\n int mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev,\n \t\t\t\t\t uint32_t txq);\n int mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev);\n+int mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev);\n #endif /* RTE_PMD_MLX5_FLOW_H_ */\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex 3fc2453045..5b72cfaa61 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -1783,7 +1783,8 @@ mlx5_flow_field_id_to_modify_info\n \t\t\tint reg;\n \n \t\t\tif (priv->sh->config.dv_flow_en == 2)\n-\t\t\t\treg = REG_C_1;\n+\t\t\t\treg = flow_hw_get_reg_id(RTE_FLOW_ITEM_TYPE_TAG,\n+\t\t\t\t\t\t\t data->level);\n \t\t\telse\n \t\t\t\treg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG,\n \t\t\t\t\t\t\t   data->level, error);\n@@ -1862,6 +1863,24 @@ mlx5_flow_field_id_to_modify_info\n \t\telse\n \t\t\tinfo[idx].offset = off_be;\n \t\tbreak;\n+\tcase MLX5_RTE_FLOW_FIELD_META_REG:\n+\t\t{\n+\t\t\tuint32_t meta_mask = priv->sh->dv_meta_mask;\n+\t\t\tuint32_t meta_count = __builtin_popcount(meta_mask);\n+\t\t\tuint32_t reg = data->level;\n+\n+\t\t\tRTE_SET_USED(meta_count);\n+\t\t\tMLX5_ASSERT(data->offset + width <= meta_count);\n+\t\t\tMLX5_ASSERT(reg != REG_NON);\n+\t\t\tMLX5_ASSERT(reg < RTE_DIM(reg_to_field));\n+\t\t\tinfo[idx] = (struct field_modify_info){4, 0, reg_to_field[reg]};\n+\t\t\tif (mask)\n+\t\t\t\tmask[idx] = flow_modify_info_mask_32_masked\n+\t\t\t\t\t(width, data->offset, meta_mask);\n+\t\t\telse\n+\t\t\t\tinfo[idx].offset = data->offset;\n+\t\t}\n+\t\tbreak;\n \tcase RTE_FLOW_FIELD_POINTER:\n \tcase RTE_FLOW_FIELD_VALUE:\n \tdefault:\n@@ -9819,7 +9838,19 @@ flow_dv_translate_item_meta(struct rte_eth_dev *dev,\n \tmask = meta_m->data;\n \tif (key_type == MLX5_SET_MATCHER_HS_M)\n \t\tmask = value;\n-\treg = flow_dv_get_metadata_reg(dev, attr, NULL);\n+\t/*\n+\t * In the current implementation, REG_B cannot be used to match.\n+\t * Force to use REG_C_1 in HWS root table as other tables.\n+\t * This map may change.\n+\t * NIC: modify - REG_B to be present in SW\n+\t *      match - REG_C_1 when copied from FDB, different from SWS\n+\t * FDB: modify - REG_C_1 in Xmeta mode, REG_NON in legacy mode\n+\t *      match - REG_C_1 in FDB\n+\t */\n+\tif (!!(key_type & MLX5_SET_MATCHER_SW))\n+\t\treg = flow_dv_get_metadata_reg(dev, attr, NULL);\n+\telse\n+\t\treg = flow_hw_get_reg_id(RTE_FLOW_ITEM_TYPE_META, 0);\n \tif (reg < 0)\n \t\treturn;\n \tMLX5_ASSERT(reg != REG_NON);\n@@ -9919,7 +9950,10 @@ flow_dv_translate_item_tag(struct rte_eth_dev *dev, void *key,\n \t/* When set mask, the index should be from spec. */\n \tindex = tag_vv ? tag_vv->index : tag_v->index;\n \t/* Get the metadata register index for the tag. */\n-\treg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, index, NULL);\n+\tif (!!(key_type & MLX5_SET_MATCHER_SW))\n+\t\treg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, index, NULL);\n+\telse\n+\t\treg = flow_hw_get_reg_id(RTE_FLOW_ITEM_TYPE_TAG, index);\n \tMLX5_ASSERT(reg > 0);\n \tflow_dv_match_meta_reg(key, reg, tag_v->data, tag_m->data);\n }\n@@ -13437,7 +13471,8 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev,\n \t */\n \tif (!(wks.item_flags & MLX5_FLOW_ITEM_PORT_ID) &&\n \t    !(wks.item_flags & MLX5_FLOW_ITEM_REPRESENTED_PORT) && priv->sh->esw_mode &&\n-\t    !(attr->egress && !attr->transfer)) {\n+\t    !(attr->egress && !attr->transfer) &&\n+\t    attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP) {\n \t\tif (flow_dv_translate_item_port_id_all(dev, match_mask,\n \t\t\t\t\t\t   match_value, NULL, attr))\n \t\t\treturn -rte_errno;\ndiff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c\nindex b3b37f36a2..64d06d4fb4 100644\n--- a/drivers/net/mlx5/mlx5_flow_hw.c\n+++ b/drivers/net/mlx5/mlx5_flow_hw.c\n@@ -20,13 +20,27 @@\n /* Default queue to flush the flows. */\n #define MLX5_DEFAULT_FLUSH_QUEUE 0\n \n-/* Maximum number of rules in control flow tables */\n+/* Maximum number of rules in control flow tables. */\n #define MLX5_HW_CTRL_FLOW_NB_RULES (4096)\n \n-/* Flow group for SQ miss default flows/ */\n-#define MLX5_HW_SQ_MISS_GROUP (UINT32_MAX)\n+/* Lowest flow group usable by an application. */\n+#define MLX5_HW_LOWEST_USABLE_GROUP (1)\n+\n+/* Maximum group index usable by user applications for transfer flows. */\n+#define MLX5_HW_MAX_TRANSFER_GROUP (UINT32_MAX - 1)\n+\n+/* Lowest priority for HW root table. */\n+#define MLX5_HW_LOWEST_PRIO_ROOT 15\n+\n+/* Lowest priority for HW non-root table. */\n+#define MLX5_HW_LOWEST_PRIO_NON_ROOT (UINT32_MAX)\n \n static int flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev);\n+static int flow_hw_translate_group(struct rte_eth_dev *dev,\n+\t\t\t\t   const struct mlx5_flow_template_table_cfg *cfg,\n+\t\t\t\t   uint32_t group,\n+\t\t\t\t   uint32_t *table_group,\n+\t\t\t\t   struct rte_flow_error *error);\n \n const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;\n \n@@ -213,12 +227,12 @@ flow_hw_rss_item_flags_get(const struct rte_flow_item items[])\n  */\n static struct mlx5_hw_jump_action *\n flow_hw_jump_action_register(struct rte_eth_dev *dev,\n-\t\t\t     const struct rte_flow_attr *attr,\n+\t\t\t     const struct mlx5_flow_template_table_cfg *cfg,\n \t\t\t     uint32_t dest_group,\n \t\t\t     struct rte_flow_error *error)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct rte_flow_attr jattr = *attr;\n+\tstruct rte_flow_attr jattr = cfg->attr.flow_attr;\n \tstruct mlx5_flow_group *grp;\n \tstruct mlx5_flow_cb_ctx ctx = {\n \t\t.dev = dev,\n@@ -226,9 +240,13 @@ flow_hw_jump_action_register(struct rte_eth_dev *dev,\n \t\t.data = &jattr,\n \t};\n \tstruct mlx5_list_entry *ge;\n+\tuint32_t target_group;\n \n-\tjattr.group = dest_group;\n-\tge = mlx5_hlist_register(priv->sh->flow_tbls, dest_group, &ctx);\n+\ttarget_group = dest_group;\n+\tif (flow_hw_translate_group(dev, cfg, dest_group, &target_group, error))\n+\t\treturn NULL;\n+\tjattr.group = target_group;\n+\tge = mlx5_hlist_register(priv->sh->flow_tbls, target_group, &ctx);\n \tif (!ge)\n \t\treturn NULL;\n \tgrp = container_of(ge, struct mlx5_flow_group, entry);\n@@ -760,7 +778,8 @@ flow_hw_modify_field_compile(struct rte_eth_dev *dev,\n \t\t\t\t(void *)(uintptr_t)conf->src.pvalue :\n \t\t\t\t(void *)(uintptr_t)&conf->src.value;\n \t\tif (conf->dst.field == RTE_FLOW_FIELD_META ||\n-\t\t    conf->dst.field == RTE_FLOW_FIELD_TAG) {\n+\t\t    conf->dst.field == RTE_FLOW_FIELD_TAG ||\n+\t\t    conf->dst.field == (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG) {\n \t\t\tvalue = *(const unaligned_uint32_t *)item.spec;\n \t\t\tvalue = rte_cpu_to_be_32(value);\n \t\t\titem.spec = &value;\n@@ -860,6 +879,9 @@ flow_hw_represented_port_compile(struct rte_eth_dev *dev,\n \tif (m && !!m->port_id) {\n \t\tstruct mlx5_priv *port_priv;\n \n+\t\tif (!v)\n+\t\t\treturn rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\t  action, \"port index was not provided\");\n \t\tport_priv = mlx5_port_to_eswitch_info(v->port_id, false);\n \t\tif (port_priv == NULL)\n \t\t\treturn rte_flow_error_set\n@@ -903,8 +925,8 @@ flow_hw_represented_port_compile(struct rte_eth_dev *dev,\n  *\n  * @param[in] dev\n  *   Pointer to the rte_eth_dev structure.\n- * @param[in] table_attr\n- *   Pointer to the table attributes.\n+ * @param[in] cfg\n+ *   Pointer to the table configuration.\n  * @param[in] item_templates\n  *   Item template array to be binded to the table.\n  * @param[in/out] acts\n@@ -919,12 +941,13 @@ flow_hw_represented_port_compile(struct rte_eth_dev *dev,\n  */\n static int\n flow_hw_actions_translate(struct rte_eth_dev *dev,\n-\t\t\t  const struct rte_flow_template_table_attr *table_attr,\n+\t\t\t  const struct mlx5_flow_template_table_cfg *cfg,\n \t\t\t  struct mlx5_hw_actions *acts,\n \t\t\t  struct rte_flow_actions_template *at,\n \t\t\t  struct rte_flow_error *error)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tconst struct rte_flow_template_table_attr *table_attr = &cfg->attr;\n \tconst struct rte_flow_attr *attr = &table_attr->flow_attr;\n \tstruct rte_flow_action *actions = at->actions;\n \tstruct rte_flow_action *action_start = actions;\n@@ -991,7 +1014,7 @@ flow_hw_actions_translate(struct rte_eth_dev *dev,\n \t\t\t\t\t((const struct rte_flow_action_jump *)\n \t\t\t\t\tactions->conf)->group;\n \t\t\t\tacts->jump = flow_hw_jump_action_register\n-\t\t\t\t\t\t(dev, attr, jump_group, error);\n+\t\t\t\t\t\t(dev, cfg, jump_group, error);\n \t\t\t\tif (!acts->jump)\n \t\t\t\t\tgoto err;\n \t\t\t\tacts->rule_acts[i].action = (!!attr->group) ?\n@@ -1101,6 +1124,16 @@ flow_hw_actions_translate(struct rte_eth_dev *dev,\n \t\t\t\t\t\t\t   error);\n \t\t\tif (err)\n \t\t\t\tgoto err;\n+\t\t\t/*\n+\t\t\t * Adjust the action source position for the following.\n+\t\t\t * ... / MODIFY_FIELD: rx_cpy_pos / (QUEUE|RSS) / ...\n+\t\t\t * The next action will be Q/RSS, there will not be\n+\t\t\t * another adjustment and the real source position of\n+\t\t\t * the following actions will be decreased by 1.\n+\t\t\t * No change of the total actions in the new template.\n+\t\t\t */\n+\t\t\tif ((actions - action_start) == at->rx_cpy_pos)\n+\t\t\t\taction_start += 1;\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:\n \t\t\tif (flow_hw_represented_port_compile\n@@ -1365,7 +1398,8 @@ flow_hw_modify_field_construct(struct mlx5_hw_q_job *job,\n \telse\n \t\trte_memcpy(values, mhdr_action->src.pvalue, sizeof(values));\n \tif (mhdr_action->dst.field == RTE_FLOW_FIELD_META ||\n-\t    mhdr_action->dst.field == RTE_FLOW_FIELD_TAG) {\n+\t    mhdr_action->dst.field == RTE_FLOW_FIELD_TAG ||\n+\t    mhdr_action->dst.field == (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG) {\n \t\tvalue_p = (unaligned_uint32_t *)values;\n \t\t*value_p = rte_cpu_to_be_32(*value_p);\n \t} else if (mhdr_action->dst.field == RTE_FLOW_FIELD_GTP_PSC_QFI) {\n@@ -1513,7 +1547,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \t\t\tjump_group = ((const struct rte_flow_action_jump *)\n \t\t\t\t\t\taction->conf)->group;\n \t\t\tjump = flow_hw_jump_action_register\n-\t\t\t\t(dev, &attr, jump_group, NULL);\n+\t\t\t\t(dev, &table->cfg, jump_group, NULL);\n \t\t\tif (!jump)\n \t\t\t\treturn -1;\n \t\t\trule_acts[act_data->action_dst].action =\n@@ -1710,7 +1744,13 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,\n \tjob->user_data = user_data;\n \trule_attr.user_data = job;\n \thw_acts = &table->ats[action_template_index].acts;\n-\t/* Construct the flow actions based on the input actions.*/\n+\t/*\n+\t * Construct the flow actions based on the input actions.\n+\t * The implicitly appended action is always fixed, like metadata\n+\t * copy action from FDB to NIC Rx.\n+\t * No need to copy and contrust a new \"actions\" list based on the\n+\t * user's input, in order to save the cost.\n+\t */\n \tif (flow_hw_actions_construct(dev, job, hw_acts, pattern_template_index,\n \t\t\t\t  actions, rule_acts, &acts_num)) {\n \t\trte_errno = EINVAL;\n@@ -1981,6 +2021,8 @@ flow_hw_q_flow_flush(struct rte_eth_dev *dev,\n \t/* Flush flow per-table from MLX5_DEFAULT_FLUSH_QUEUE. */\n \thw_q = &priv->hw_q[MLX5_DEFAULT_FLUSH_QUEUE];\n \tLIST_FOREACH(tbl, &priv->flow_hw_tbl, next) {\n+\t\tif (!tbl->cfg.external)\n+\t\t\tcontinue;\n \t\tMLX5_IPOOL_FOREACH(tbl->flow, fidx, flow) {\n \t\t\tif (flow_hw_async_flow_destroy(dev,\n \t\t\t\t\t\tMLX5_DEFAULT_FLUSH_QUEUE,\n@@ -2018,8 +2060,8 @@ flow_hw_q_flow_flush(struct rte_eth_dev *dev,\n  *\n  * @param[in] dev\n  *   Pointer to the rte_eth_dev structure.\n- * @param[in] attr\n- *   Pointer to the table attributes.\n+ * @param[in] table_cfg\n+ *   Pointer to the table configuration.\n  * @param[in] item_templates\n  *   Item template array to be binded to the table.\n  * @param[in] nb_item_templates\n@@ -2036,7 +2078,7 @@ flow_hw_q_flow_flush(struct rte_eth_dev *dev,\n  */\n static struct rte_flow_template_table *\n flow_hw_table_create(struct rte_eth_dev *dev,\n-\t\t     const struct rte_flow_template_table_attr *attr,\n+\t\t     const struct mlx5_flow_template_table_cfg *table_cfg,\n \t\t     struct rte_flow_pattern_template *item_templates[],\n \t\t     uint8_t nb_item_templates,\n \t\t     struct rte_flow_actions_template *action_templates[],\n@@ -2048,6 +2090,7 @@ flow_hw_table_create(struct rte_eth_dev *dev,\n \tstruct rte_flow_template_table *tbl = NULL;\n \tstruct mlx5_flow_group *grp;\n \tstruct mlx5dr_match_template *mt[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];\n+\tconst struct rte_flow_template_table_attr *attr = &table_cfg->attr;\n \tstruct rte_flow_attr flow_attr = attr->flow_attr;\n \tstruct mlx5_flow_cb_ctx ctx = {\n \t\t.dev = dev,\n@@ -2088,6 +2131,7 @@ flow_hw_table_create(struct rte_eth_dev *dev,\n \ttbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl), 0, rte_socket_id());\n \tif (!tbl)\n \t\tgoto error;\n+\ttbl->cfg = *table_cfg;\n \t/* Allocate flow indexed pool. */\n \ttbl->flow = mlx5_ipool_create(&cfg);\n \tif (!tbl->flow)\n@@ -2131,7 +2175,7 @@ flow_hw_table_create(struct rte_eth_dev *dev,\n \t\t\tgoto at_error;\n \t\t}\n \t\tLIST_INIT(&tbl->ats[i].acts.act_list);\n-\t\terr = flow_hw_actions_translate(dev, attr,\n+\t\terr = flow_hw_actions_translate(dev, &tbl->cfg,\n \t\t\t\t\t\t&tbl->ats[i].acts,\n \t\t\t\t\t\taction_templates[i], error);\n \t\tif (err) {\n@@ -2174,6 +2218,96 @@ flow_hw_table_create(struct rte_eth_dev *dev,\n \treturn NULL;\n }\n \n+/**\n+ * Translates group index specified by the user in @p attr to internal\n+ * group index.\n+ *\n+ * Translation is done by incrementing group index, so group n becomes n + 1.\n+ *\n+ * @param[in] dev\n+ *   Pointer to Ethernet device.\n+ * @param[in] cfg\n+ *   Pointer to the template table configuration.\n+ * @param[in] group\n+ *   Currently used group index (table group or jump destination).\n+ * @param[out] table_group\n+ *   Pointer to output group index.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success. Otherwise, returns negative error code, rte_errno is set\n+ *   and error structure is filled.\n+ */\n+static int\n+flow_hw_translate_group(struct rte_eth_dev *dev,\n+\t\t\tconst struct mlx5_flow_template_table_cfg *cfg,\n+\t\t\tuint32_t group,\n+\t\t\tuint32_t *table_group,\n+\t\t\tstruct rte_flow_error *error)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tconst struct rte_flow_attr *flow_attr = &cfg->attr.flow_attr;\n+\n+\tif (priv->sh->config.dv_esw_en && cfg->external && flow_attr->transfer) {\n+\t\tif (group > MLX5_HW_MAX_TRANSFER_GROUP)\n+\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,\n+\t\t\t\t\t\t  NULL,\n+\t\t\t\t\t\t  \"group index not supported\");\n+\t\t*table_group = group + 1;\n+\t} else {\n+\t\t*table_group = group;\n+\t}\n+\treturn 0;\n+}\n+\n+/**\n+ * Create flow table.\n+ *\n+ * This function is a wrapper over @ref flow_hw_table_create(), which translates parameters\n+ * provided by user to proper internal values.\n+ *\n+ * @param[in] dev\n+ *   Pointer to Ethernet device.\n+ * @param[in] attr\n+ *   Pointer to the table attributes.\n+ * @param[in] item_templates\n+ *   Item template array to be binded to the table.\n+ * @param[in] nb_item_templates\n+ *   Number of item templates.\n+ * @param[in] action_templates\n+ *   Action template array to be binded to the table.\n+ * @param[in] nb_action_templates\n+ *   Number of action templates.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   Table on success, Otherwise, returns negative error code, rte_errno is set\n+ *   and error structure is filled.\n+ */\n+static struct rte_flow_template_table *\n+flow_hw_template_table_create(struct rte_eth_dev *dev,\n+\t\t\t      const struct rte_flow_template_table_attr *attr,\n+\t\t\t      struct rte_flow_pattern_template *item_templates[],\n+\t\t\t      uint8_t nb_item_templates,\n+\t\t\t      struct rte_flow_actions_template *action_templates[],\n+\t\t\t      uint8_t nb_action_templates,\n+\t\t\t      struct rte_flow_error *error)\n+{\n+\tstruct mlx5_flow_template_table_cfg cfg = {\n+\t\t.attr = *attr,\n+\t\t.external = true,\n+\t};\n+\tuint32_t group = attr->flow_attr.group;\n+\n+\tif (flow_hw_translate_group(dev, &cfg, group, &cfg.attr.flow_attr.group, error))\n+\t\treturn NULL;\n+\treturn flow_hw_table_create(dev, &cfg, item_templates, nb_item_templates,\n+\t\t\t\t    action_templates, nb_action_templates, error);\n+}\n+\n /**\n  * Destroy flow table.\n  *\n@@ -2309,10 +2443,13 @@ flow_hw_validate_action_represented_port(struct rte_eth_dev *dev,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n \t\t\t\t\t  \"cannot use represented_port actions\"\n \t\t\t\t\t  \" without an E-Switch\");\n-\tif (mask_conf->port_id) {\n+\tif (mask_conf && mask_conf->port_id) {\n \t\tstruct mlx5_priv *port_priv;\n \t\tstruct mlx5_priv *dev_priv;\n \n+\t\tif (!action_conf)\n+\t\t\treturn rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\t  action, \"port index was not provided\");\n \t\tport_priv = mlx5_port_to_eswitch_info(action_conf->port_id, false);\n \t\tif (!port_priv)\n \t\t\treturn rte_flow_error_set(error, rte_errno,\n@@ -2337,20 +2474,77 @@ flow_hw_validate_action_represented_port(struct rte_eth_dev *dev,\n \treturn 0;\n }\n \n+static inline int\n+flow_hw_action_meta_copy_insert(const struct rte_flow_action actions[],\n+\t\t\t\tconst struct rte_flow_action masks[],\n+\t\t\t\tconst struct rte_flow_action *ins_actions,\n+\t\t\t\tconst struct rte_flow_action *ins_masks,\n+\t\t\t\tstruct rte_flow_action *new_actions,\n+\t\t\t\tstruct rte_flow_action *new_masks,\n+\t\t\t\tuint16_t *ins_pos)\n+{\n+\tuint16_t idx, total = 0;\n+\tbool ins = false;\n+\tbool act_end = false;\n+\n+\tMLX5_ASSERT(actions && masks);\n+\tMLX5_ASSERT(new_actions && new_masks);\n+\tMLX5_ASSERT(ins_actions && ins_masks);\n+\tfor (idx = 0; !act_end; idx++) {\n+\t\tif (idx >= MLX5_HW_MAX_ACTS)\n+\t\t\treturn -1;\n+\t\tif (actions[idx].type == RTE_FLOW_ACTION_TYPE_RSS ||\n+\t\t    actions[idx].type == RTE_FLOW_ACTION_TYPE_QUEUE) {\n+\t\t\tins = true;\n+\t\t\t*ins_pos = idx;\n+\t\t}\n+\t\tif (actions[idx].type == RTE_FLOW_ACTION_TYPE_END)\n+\t\t\tact_end = true;\n+\t}\n+\tif (!ins)\n+\t\treturn 0;\n+\telse if (idx == MLX5_HW_MAX_ACTS)\n+\t\treturn -1; /* No more space. */\n+\ttotal = idx;\n+\t/* Before the position, no change for the actions. */\n+\tfor (idx = 0; idx < *ins_pos; idx++) {\n+\t\tnew_actions[idx] = actions[idx];\n+\t\tnew_masks[idx] = masks[idx];\n+\t}\n+\t/* Insert the new action and mask to the position. */\n+\tnew_actions[idx] = *ins_actions;\n+\tnew_masks[idx] = *ins_masks;\n+\t/* Remaining content is right shifted by one position. */\n+\tfor (; idx < total; idx++) {\n+\t\tnew_actions[idx + 1] = actions[idx];\n+\t\tnew_masks[idx + 1] = masks[idx];\n+\t}\n+\treturn 0;\n+}\n+\n static int\n flow_hw_action_validate(struct rte_eth_dev *dev,\n+\t\t\tconst struct rte_flow_actions_template_attr *attr,\n \t\t\tconst struct rte_flow_action actions[],\n \t\t\tconst struct rte_flow_action masks[],\n \t\t\tstruct rte_flow_error *error)\n {\n-\tint i;\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tuint16_t i;\n \tbool actions_end = false;\n \tint ret;\n \n+\t/* FDB actions are only valid to proxy port. */\n+\tif (attr->transfer && (!priv->sh->config.dv_esw_en || !priv->master))\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t  NULL,\n+\t\t\t\t\t  \"transfer actions are only valid to proxy port\");\n \tfor (i = 0; !actions_end; ++i) {\n \t\tconst struct rte_flow_action *action = &actions[i];\n \t\tconst struct rte_flow_action *mask = &masks[i];\n \n+\t\tMLX5_ASSERT(i < MLX5_HW_MAX_ACTS);\n \t\tif (action->type != mask->type)\n \t\t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n@@ -2447,21 +2641,77 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tint len, act_len, mask_len, i;\n-\tstruct rte_flow_actions_template *at;\n+\tstruct rte_flow_actions_template *at = NULL;\n+\tuint16_t pos = MLX5_HW_MAX_ACTS;\n+\tstruct rte_flow_action tmp_action[MLX5_HW_MAX_ACTS];\n+\tstruct rte_flow_action tmp_mask[MLX5_HW_MAX_ACTS];\n+\tconst struct rte_flow_action *ra;\n+\tconst struct rte_flow_action *rm;\n+\tconst struct rte_flow_action_modify_field rx_mreg = {\n+\t\t.operation = RTE_FLOW_MODIFY_SET,\n+\t\t.dst = {\n+\t\t\t.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,\n+\t\t\t.level = REG_B,\n+\t\t},\n+\t\t.src = {\n+\t\t\t.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,\n+\t\t\t.level = REG_C_1,\n+\t\t},\n+\t\t.width = 32,\n+\t};\n+\tconst struct rte_flow_action_modify_field rx_mreg_mask = {\n+\t\t.operation = RTE_FLOW_MODIFY_SET,\n+\t\t.dst = {\n+\t\t\t.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,\n+\t\t\t.level = UINT32_MAX,\n+\t\t\t.offset = UINT32_MAX,\n+\t\t},\n+\t\t.src = {\n+\t\t\t.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,\n+\t\t\t.level = UINT32_MAX,\n+\t\t\t.offset = UINT32_MAX,\n+\t\t},\n+\t\t.width = UINT32_MAX,\n+\t};\n+\tconst struct rte_flow_action rx_cpy = {\n+\t\t.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,\n+\t\t.conf = &rx_mreg,\n+\t};\n+\tconst struct rte_flow_action rx_cpy_mask = {\n+\t\t.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,\n+\t\t.conf = &rx_mreg_mask,\n+\t};\n \n-\tif (flow_hw_action_validate(dev, actions, masks, error))\n+\tif (flow_hw_action_validate(dev, attr, actions, masks, error))\n \t\treturn NULL;\n-\tact_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS,\n-\t\t\t\tNULL, 0, actions, error);\n+\tif (priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&\n+\t    priv->sh->config.dv_esw_en) {\n+\t\tif (flow_hw_action_meta_copy_insert(actions, masks, &rx_cpy, &rx_cpy_mask,\n+\t\t\t\t\t\t    tmp_action, tmp_mask, &pos)) {\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t\t   \"Failed to concatenate new action/mask\");\n+\t\t\treturn NULL;\n+\t\t}\n+\t}\n+\t/* Application should make sure only one Q/RSS exist in one rule. */\n+\tif (pos == MLX5_HW_MAX_ACTS) {\n+\t\tra = actions;\n+\t\trm = masks;\n+\t} else {\n+\t\tra = tmp_action;\n+\t\trm = tmp_mask;\n+\t}\n+\tact_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, ra, error);\n \tif (act_len <= 0)\n \t\treturn NULL;\n \tlen = RTE_ALIGN(act_len, 16);\n-\tmask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS,\n-\t\t\t\t NULL, 0, masks, error);\n+\tmask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, rm, error);\n \tif (mask_len <= 0)\n \t\treturn NULL;\n \tlen += RTE_ALIGN(mask_len, 16);\n-\tat = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at), 64, rte_socket_id());\n+\tat = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at),\n+\t\t\t RTE_CACHE_LINE_SIZE, rte_socket_id());\n \tif (!at) {\n \t\trte_flow_error_set(error, ENOMEM,\n \t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n@@ -2469,18 +2719,20 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,\n \t\t\t\t   \"cannot allocate action template\");\n \t\treturn NULL;\n \t}\n+\t/* Actions part is in the first half. */\n \tat->attr = *attr;\n \tat->actions = (struct rte_flow_action *)(at + 1);\n-\tact_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->actions, len,\n-\t\t\t\tactions, error);\n+\tact_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->actions,\n+\t\t\t\tlen, ra, error);\n \tif (act_len <= 0)\n \t\tgoto error;\n-\tat->masks = (struct rte_flow_action *)\n-\t\t    (((uint8_t *)at->actions) + act_len);\n+\t/* Masks part is in the second half. */\n+\tat->masks = (struct rte_flow_action *)(((uint8_t *)at->actions) + act_len);\n \tmask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->masks,\n-\t\t\t\t len - act_len, masks, error);\n+\t\t\t\t len - act_len, rm, error);\n \tif (mask_len <= 0)\n \t\tgoto error;\n+\tat->rx_cpy_pos = pos;\n \t/*\n \t * mlx5 PMD hacks indirect action index directly to the action conf.\n \t * The rte_flow_conv() function copies the content from conf pointer.\n@@ -2497,7 +2749,8 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,\n \tLIST_INSERT_HEAD(&priv->flow_hw_at, at, next);\n \treturn at;\n error:\n-\tmlx5_free(at);\n+\tif (at)\n+\t\tmlx5_free(at);\n \treturn NULL;\n }\n \n@@ -2572,6 +2825,80 @@ flow_hw_copy_prepend_port_item(const struct rte_flow_item *items,\n \treturn copied_items;\n }\n \n+static int\n+flow_hw_pattern_validate(struct rte_eth_dev *dev,\n+\t\t\t const struct rte_flow_pattern_template_attr *attr,\n+\t\t\t const struct rte_flow_item items[],\n+\t\t\t struct rte_flow_error *error)\n+{\n+\tint i;\n+\tbool items_end = false;\n+\tRTE_SET_USED(dev);\n+\tRTE_SET_USED(attr);\n+\n+\tfor (i = 0; !items_end; i++) {\n+\t\tint type = items[i].type;\n+\n+\t\tswitch (type) {\n+\t\tcase RTE_FLOW_ITEM_TYPE_TAG:\n+\t\t{\n+\t\t\tint reg;\n+\t\t\tconst struct rte_flow_item_tag *tag =\n+\t\t\t\t(const struct rte_flow_item_tag *)items[i].spec;\n+\n+\t\t\treg = flow_hw_get_reg_id(RTE_FLOW_ITEM_TYPE_TAG, tag->index);\n+\t\t\tif (reg == REG_NON)\n+\t\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t\t\t  NULL,\n+\t\t\t\t\t\t\t  \"Unsupported tag index\");\n+\t\t\tbreak;\n+\t\t}\n+\t\tcase MLX5_RTE_FLOW_ITEM_TYPE_TAG:\n+\t\t{\n+\t\t\tconst struct rte_flow_item_tag *tag =\n+\t\t\t\t(const struct rte_flow_item_tag *)items[i].spec;\n+\t\t\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\t\t\tuint8_t regcs = (uint8_t)priv->sh->cdev->config.hca_attr.set_reg_c;\n+\n+\t\t\tif (!((1 << (tag->index - REG_C_0)) & regcs))\n+\t\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t\t\t  NULL,\n+\t\t\t\t\t\t\t  \"Unsupported internal tag index\");\n+\t\t}\n+\t\tcase RTE_FLOW_ITEM_TYPE_VOID:\n+\t\tcase RTE_FLOW_ITEM_TYPE_ETH:\n+\t\tcase RTE_FLOW_ITEM_TYPE_VLAN:\n+\t\tcase RTE_FLOW_ITEM_TYPE_IPV4:\n+\t\tcase RTE_FLOW_ITEM_TYPE_IPV6:\n+\t\tcase RTE_FLOW_ITEM_TYPE_UDP:\n+\t\tcase RTE_FLOW_ITEM_TYPE_TCP:\n+\t\tcase RTE_FLOW_ITEM_TYPE_GTP:\n+\t\tcase RTE_FLOW_ITEM_TYPE_GTP_PSC:\n+\t\tcase RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:\n+\t\tcase RTE_FLOW_ITEM_TYPE_VXLAN:\n+\t\tcase MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:\n+\t\tcase RTE_FLOW_ITEM_TYPE_META:\n+\t\tcase RTE_FLOW_ITEM_TYPE_GRE:\n+\t\tcase RTE_FLOW_ITEM_TYPE_GRE_KEY:\n+\t\tcase RTE_FLOW_ITEM_TYPE_GRE_OPTION:\n+\t\tcase RTE_FLOW_ITEM_TYPE_ICMP:\n+\t\tcase RTE_FLOW_ITEM_TYPE_ICMP6:\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_END:\n+\t\t\titems_end = true;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t\t  NULL,\n+\t\t\t\t\t\t  \"Unsupported item type\");\n+\t\t}\n+\t}\n+\treturn 0;\n+}\n+\n /**\n  * Create flow item template.\n  *\n@@ -2598,6 +2925,8 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,\n \tstruct rte_flow_item *copied_items = NULL;\n \tconst struct rte_flow_item *tmpl_items;\n \n+\tif (flow_hw_pattern_validate(dev, attr, items, error))\n+\t\treturn NULL;\n \tif (priv->sh->config.dv_esw_en && attr->ingress) {\n \t\t/*\n \t\t * Disallow pattern template with ingress and egress/transfer\n@@ -3032,6 +3361,17 @@ flow_hw_free_vport_actions(struct mlx5_priv *priv)\n \tpriv->hw_vport = NULL;\n }\n \n+static uint32_t\n+flow_hw_usable_lsb_vport_mask(struct mlx5_priv *priv)\n+{\n+\tuint32_t usable_mask = ~priv->vport_meta_mask;\n+\n+\tif (usable_mask)\n+\t\treturn (1 << rte_bsf32(usable_mask));\n+\telse\n+\t\treturn 0;\n+}\n+\n /**\n  * Creates a flow pattern template used to match on E-Switch Manager.\n  * This template is used to set up a table for SQ miss default flow.\n@@ -3070,7 +3410,10 @@ flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev)\n }\n \n /**\n- * Creates a flow pattern template used to match on a TX queue.\n+ * Creates a flow pattern template used to match REG_C_0 and a TX queue.\n+ * Matching on REG_C_0 is set up to match on least significant bit usable\n+ * by user-space, which is set when packet was originated from E-Switch Manager.\n+ *\n  * This template is used to set up a table for SQ miss default flow.\n  *\n  * @param dev\n@@ -3080,16 +3423,30 @@ flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev)\n  *   Pointer to flow pattern template on success, NULL otherwise.\n  */\n static struct rte_flow_pattern_template *\n-flow_hw_create_ctrl_sq_pattern_template(struct rte_eth_dev *dev)\n+flow_hw_create_ctrl_regc_sq_pattern_template(struct rte_eth_dev *dev)\n {\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tuint32_t marker_bit = flow_hw_usable_lsb_vport_mask(priv);\n \tstruct rte_flow_pattern_template_attr attr = {\n \t\t.relaxed_matching = 0,\n \t\t.transfer = 1,\n \t};\n+\tstruct rte_flow_item_tag reg_c0_spec = {\n+\t\t.index = (uint8_t)REG_C_0,\n+\t};\n+\tstruct rte_flow_item_tag reg_c0_mask = {\n+\t\t.index = 0xff,\n+\t};\n \tstruct mlx5_rte_flow_item_tx_queue queue_mask = {\n \t\t.queue = UINT32_MAX,\n \t};\n \tstruct rte_flow_item items[] = {\n+\t\t{\n+\t\t\t.type = (enum rte_flow_item_type)\n+\t\t\t\tMLX5_RTE_FLOW_ITEM_TYPE_TAG,\n+\t\t\t.spec = &reg_c0_spec,\n+\t\t\t.mask = &reg_c0_mask,\n+\t\t},\n \t\t{\n \t\t\t.type = (enum rte_flow_item_type)\n \t\t\t\tMLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,\n@@ -3100,6 +3457,12 @@ flow_hw_create_ctrl_sq_pattern_template(struct rte_eth_dev *dev)\n \t\t},\n \t};\n \n+\tif (!marker_bit) {\n+\t\tDRV_LOG(ERR, \"Unable to set up pattern template for SQ miss table\");\n+\t\treturn NULL;\n+\t}\n+\treg_c0_spec.data = marker_bit;\n+\treg_c0_mask.data = marker_bit;\n \treturn flow_hw_pattern_template_create(dev, &attr, items, NULL);\n }\n \n@@ -3137,6 +3500,132 @@ flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev)\n \treturn flow_hw_pattern_template_create(dev, &attr, items, NULL);\n }\n \n+/*\n+ * Creating a flow pattern template with all ETH packets matching.\n+ * This template is used to set up a table for default Tx copy (Tx metadata\n+ * to REG_C_1) flow rule usage.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ *\n+ * @return\n+ *   Pointer to flow pattern template on success, NULL otherwise.\n+ */\n+static struct rte_flow_pattern_template *\n+flow_hw_create_tx_default_mreg_copy_pattern_template(struct rte_eth_dev *dev)\n+{\n+\tstruct rte_flow_pattern_template_attr tx_pa_attr = {\n+\t\t.relaxed_matching = 0,\n+\t\t.egress = 1,\n+\t};\n+\tstruct rte_flow_item_eth promisc = {\n+\t\t.dst.addr_bytes = \"\\x00\\x00\\x00\\x00\\x00\\x00\",\n+\t\t.src.addr_bytes = \"\\x00\\x00\\x00\\x00\\x00\\x00\",\n+\t\t.type = 0,\n+\t};\n+\tstruct rte_flow_item eth_all[] = {\n+\t\t[0] = {\n+\t\t\t.type = RTE_FLOW_ITEM_TYPE_ETH,\n+\t\t\t.spec = &promisc,\n+\t\t\t.mask = &promisc,\n+\t\t},\n+\t\t[1] = {\n+\t\t\t.type = RTE_FLOW_ITEM_TYPE_END,\n+\t\t},\n+\t};\n+\tstruct rte_flow_error drop_err;\n+\n+\tRTE_SET_USED(drop_err);\n+\treturn flow_hw_pattern_template_create(dev, &tx_pa_attr, eth_all, &drop_err);\n+}\n+\n+/**\n+ * Creates a flow actions template with modify field action and masked jump action.\n+ * Modify field action sets the least significant bit of REG_C_0 (usable by user-space)\n+ * to 1, meaning that packet was originated from E-Switch Manager. Jump action\n+ * transfers steering to group 1.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ *\n+ * @return\n+ *   Pointer to flow actions template on success, NULL otherwise.\n+ */\n+static struct rte_flow_actions_template *\n+flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tuint32_t marker_bit = flow_hw_usable_lsb_vport_mask(priv);\n+\tuint32_t marker_bit_mask = UINT32_MAX;\n+\tstruct rte_flow_actions_template_attr attr = {\n+\t\t.transfer = 1,\n+\t};\n+\tstruct rte_flow_action_modify_field set_reg_v = {\n+\t\t.operation = RTE_FLOW_MODIFY_SET,\n+\t\t.dst = {\n+\t\t\t.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,\n+\t\t\t.level = REG_C_0,\n+\t\t},\n+\t\t.src = {\n+\t\t\t.field = RTE_FLOW_FIELD_VALUE,\n+\t\t},\n+\t\t.width = 1,\n+\t};\n+\tstruct rte_flow_action_modify_field set_reg_m = {\n+\t\t.operation = RTE_FLOW_MODIFY_SET,\n+\t\t.dst = {\n+\t\t\t.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,\n+\t\t\t.level = UINT32_MAX,\n+\t\t\t.offset = UINT32_MAX,\n+\t\t},\n+\t\t.src = {\n+\t\t\t.field = RTE_FLOW_FIELD_VALUE,\n+\t\t},\n+\t\t.width = UINT32_MAX,\n+\t};\n+\tstruct rte_flow_action_jump jump_v = {\n+\t\t.group = MLX5_HW_LOWEST_USABLE_GROUP,\n+\t};\n+\tstruct rte_flow_action_jump jump_m = {\n+\t\t.group = UINT32_MAX,\n+\t};\n+\tstruct rte_flow_action actions_v[] = {\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,\n+\t\t\t.conf = &set_reg_v,\n+\t\t},\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_JUMP,\n+\t\t\t.conf = &jump_v,\n+\t\t},\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_END,\n+\t\t}\n+\t};\n+\tstruct rte_flow_action actions_m[] = {\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,\n+\t\t\t.conf = &set_reg_m,\n+\t\t},\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_JUMP,\n+\t\t\t.conf = &jump_m,\n+\t\t},\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_END,\n+\t\t}\n+\t};\n+\n+\tif (!marker_bit) {\n+\t\tDRV_LOG(ERR, \"Unable to set up actions template for SQ miss table\");\n+\t\treturn NULL;\n+\t}\n+\tset_reg_v.dst.offset = rte_bsf32(marker_bit);\n+\trte_memcpy(set_reg_v.src.value, &marker_bit, sizeof(marker_bit));\n+\trte_memcpy(set_reg_m.src.value, &marker_bit_mask, sizeof(marker_bit_mask));\n+\treturn flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, NULL);\n+}\n+\n /**\n  * Creates a flow actions template with an unmasked JUMP action. Flows\n  * based on this template will perform a jump to some group. This template\n@@ -3231,6 +3720,73 @@ flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev)\n \t\t\t\t\t       NULL);\n }\n \n+/*\n+ * Creating an actions template to use header modify action for register\n+ * copying. This template is used to set up a table for copy flow.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ *\n+ * @return\n+ *   Pointer to flow actions template on success, NULL otherwise.\n+ */\n+static struct rte_flow_actions_template *\n+flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev)\n+{\n+\tstruct rte_flow_actions_template_attr tx_act_attr = {\n+\t\t.egress = 1,\n+\t};\n+\tconst struct rte_flow_action_modify_field mreg_action = {\n+\t\t.operation = RTE_FLOW_MODIFY_SET,\n+\t\t.dst = {\n+\t\t\t.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,\n+\t\t\t.level = REG_C_1,\n+\t\t},\n+\t\t.src = {\n+\t\t\t.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,\n+\t\t\t.level = REG_A,\n+\t\t},\n+\t\t.width = 32,\n+\t};\n+\tconst struct rte_flow_action_modify_field mreg_mask = {\n+\t\t.operation = RTE_FLOW_MODIFY_SET,\n+\t\t.dst = {\n+\t\t\t.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,\n+\t\t\t.level = UINT32_MAX,\n+\t\t\t.offset = UINT32_MAX,\n+\t\t},\n+\t\t.src = {\n+\t\t\t.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,\n+\t\t\t.level = UINT32_MAX,\n+\t\t\t.offset = UINT32_MAX,\n+\t\t},\n+\t\t.width = UINT32_MAX,\n+\t};\n+\tconst struct rte_flow_action copy_reg_action[] = {\n+\t\t[0] = {\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,\n+\t\t\t.conf = &mreg_action,\n+\t\t},\n+\t\t[1] = {\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_END,\n+\t\t},\n+\t};\n+\tconst struct rte_flow_action copy_reg_mask[] = {\n+\t\t[0] = {\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,\n+\t\t\t.conf = &mreg_mask,\n+\t\t},\n+\t\t[1] = {\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_END,\n+\t\t},\n+\t};\n+\tstruct rte_flow_error drop_err;\n+\n+\tRTE_SET_USED(drop_err);\n+\treturn flow_hw_actions_template_create(dev, &tx_act_attr, copy_reg_action,\n+\t\t\t\t\t       copy_reg_mask, &drop_err);\n+}\n+\n /**\n  * Creates a control flow table used to transfer traffic from E-Switch Manager\n  * and TX queues from group 0 to group 1.\n@@ -3260,8 +3816,12 @@ flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev,\n \t\t},\n \t\t.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,\n \t};\n+\tstruct mlx5_flow_template_table_cfg cfg = {\n+\t\t.attr = attr,\n+\t\t.external = false,\n+\t};\n \n-\treturn flow_hw_table_create(dev, &attr, &it, 1, &at, 1, NULL);\n+\treturn flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, NULL);\n }\n \n \n@@ -3286,16 +3846,56 @@ flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev,\n {\n \tstruct rte_flow_template_table_attr attr = {\n \t\t.flow_attr = {\n-\t\t\t.group = MLX5_HW_SQ_MISS_GROUP,\n-\t\t\t.priority = 0,\n+\t\t\t.group = 1,\n+\t\t\t.priority = MLX5_HW_LOWEST_PRIO_NON_ROOT,\n \t\t\t.ingress = 0,\n \t\t\t.egress = 0,\n \t\t\t.transfer = 1,\n \t\t},\n \t\t.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,\n \t};\n+\tstruct mlx5_flow_template_table_cfg cfg = {\n+\t\t.attr = attr,\n+\t\t.external = false,\n+\t};\n+\n+\treturn flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, NULL);\n+}\n+\n+/*\n+ * Creating the default Tx metadata copy table on NIC Tx group 0.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ * @param pt\n+ *   Pointer to flow pattern template.\n+ * @param at\n+ *   Pointer to flow actions template.\n+ *\n+ * @return\n+ *   Pointer to flow table on success, NULL otherwise.\n+ */\n+static struct rte_flow_template_table*\n+flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev,\n+\t\t\t\t\t  struct rte_flow_pattern_template *pt,\n+\t\t\t\t\t  struct rte_flow_actions_template *at)\n+{\n+\tstruct rte_flow_template_table_attr tx_tbl_attr = {\n+\t\t.flow_attr = {\n+\t\t\t.group = 0, /* Root */\n+\t\t\t.priority = MLX5_HW_LOWEST_PRIO_ROOT,\n+\t\t\t.egress = 1,\n+\t\t},\n+\t\t.nb_flows = 1, /* One default flow rule for all. */\n+\t};\n+\tstruct mlx5_flow_template_table_cfg tx_tbl_cfg = {\n+\t\t.attr = tx_tbl_attr,\n+\t\t.external = false,\n+\t};\n+\tstruct rte_flow_error drop_err;\n \n-\treturn flow_hw_table_create(dev, &attr, &it, 1, &at, 1, NULL);\n+\tRTE_SET_USED(drop_err);\n+\treturn flow_hw_table_create(dev, &tx_tbl_cfg, &pt, 1, &at, 1, &drop_err);\n }\n \n /**\n@@ -3320,15 +3920,19 @@ flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev,\n \tstruct rte_flow_template_table_attr attr = {\n \t\t.flow_attr = {\n \t\t\t.group = 0,\n-\t\t\t.priority = 15, /* TODO: Flow priority discovery. */\n+\t\t\t.priority = MLX5_HW_LOWEST_PRIO_ROOT,\n \t\t\t.ingress = 0,\n \t\t\t.egress = 0,\n \t\t\t.transfer = 1,\n \t\t},\n \t\t.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,\n \t};\n+\tstruct mlx5_flow_template_table_cfg cfg = {\n+\t\t.attr = attr,\n+\t\t.external = false,\n+\t};\n \n-\treturn flow_hw_table_create(dev, &attr, &it, 1, &at, 1, NULL);\n+\treturn flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, NULL);\n }\n \n /**\n@@ -3346,11 +3950,14 @@ flow_hw_create_ctrl_tables(struct rte_eth_dev *dev)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct rte_flow_pattern_template *esw_mgr_items_tmpl = NULL;\n-\tstruct rte_flow_pattern_template *sq_items_tmpl = NULL;\n+\tstruct rte_flow_pattern_template *regc_sq_items_tmpl = NULL;\n \tstruct rte_flow_pattern_template *port_items_tmpl = NULL;\n-\tstruct rte_flow_actions_template *jump_sq_actions_tmpl = NULL;\n+\tstruct rte_flow_pattern_template *tx_meta_items_tmpl = NULL;\n+\tstruct rte_flow_actions_template *regc_jump_actions_tmpl = NULL;\n \tstruct rte_flow_actions_template *port_actions_tmpl = NULL;\n \tstruct rte_flow_actions_template *jump_one_actions_tmpl = NULL;\n+\tstruct rte_flow_actions_template *tx_meta_actions_tmpl = NULL;\n+\tuint32_t xmeta = priv->sh->config.dv_xmeta_en;\n \n \t/* Item templates */\n \tesw_mgr_items_tmpl = flow_hw_create_ctrl_esw_mgr_pattern_template(dev);\n@@ -3359,8 +3966,8 @@ flow_hw_create_ctrl_tables(struct rte_eth_dev *dev)\n \t\t\t\" template for control flows\", dev->data->port_id);\n \t\tgoto error;\n \t}\n-\tsq_items_tmpl = flow_hw_create_ctrl_sq_pattern_template(dev);\n-\tif (!sq_items_tmpl) {\n+\tregc_sq_items_tmpl = flow_hw_create_ctrl_regc_sq_pattern_template(dev);\n+\tif (!regc_sq_items_tmpl) {\n \t\tDRV_LOG(ERR, \"port %u failed to create SQ item template for\"\n \t\t\t\" control flows\", dev->data->port_id);\n \t\tgoto error;\n@@ -3371,11 +3978,18 @@ flow_hw_create_ctrl_tables(struct rte_eth_dev *dev)\n \t\t\t\" control flows\", dev->data->port_id);\n \t\tgoto error;\n \t}\n+\tif (xmeta == MLX5_XMETA_MODE_META32_HWS) {\n+\t\ttx_meta_items_tmpl = flow_hw_create_tx_default_mreg_copy_pattern_template(dev);\n+\t\tif (!tx_meta_items_tmpl) {\n+\t\t\tDRV_LOG(ERR, \"port %u failed to Tx metadata copy pattern\"\n+\t\t\t\t\" template for control flows\", dev->data->port_id);\n+\t\t\tgoto error;\n+\t\t}\n+\t}\n \t/* Action templates */\n-\tjump_sq_actions_tmpl = flow_hw_create_ctrl_jump_actions_template(dev,\n-\t\t\t\t\t\t\t\t\t MLX5_HW_SQ_MISS_GROUP);\n-\tif (!jump_sq_actions_tmpl) {\n-\t\tDRV_LOG(ERR, \"port %u failed to create jump action template\"\n+\tregc_jump_actions_tmpl = flow_hw_create_ctrl_regc_jump_actions_template(dev);\n+\tif (!regc_jump_actions_tmpl) {\n+\t\tDRV_LOG(ERR, \"port %u failed to create REG_C set and jump action template\"\n \t\t\t\" for control flows\", dev->data->port_id);\n \t\tgoto error;\n \t}\n@@ -3385,23 +3999,32 @@ flow_hw_create_ctrl_tables(struct rte_eth_dev *dev)\n \t\t\t\" for control flows\", dev->data->port_id);\n \t\tgoto error;\n \t}\n-\tjump_one_actions_tmpl = flow_hw_create_ctrl_jump_actions_template(dev, 1);\n+\tjump_one_actions_tmpl = flow_hw_create_ctrl_jump_actions_template\n+\t\t\t(dev, MLX5_HW_LOWEST_USABLE_GROUP);\n \tif (!jump_one_actions_tmpl) {\n \t\tDRV_LOG(ERR, \"port %u failed to create jump action template\"\n \t\t\t\" for control flows\", dev->data->port_id);\n \t\tgoto error;\n \t}\n+\tif (xmeta == MLX5_XMETA_MODE_META32_HWS) {\n+\t\ttx_meta_actions_tmpl = flow_hw_create_tx_default_mreg_copy_actions_template(dev);\n+\t\tif (!tx_meta_actions_tmpl) {\n+\t\t\tDRV_LOG(ERR, \"port %u failed to Tx metadata copy actions\"\n+\t\t\t\t\" template for control flows\", dev->data->port_id);\n+\t\t\tgoto error;\n+\t\t}\n+\t}\n \t/* Tables */\n \tMLX5_ASSERT(priv->hw_esw_sq_miss_root_tbl == NULL);\n \tpriv->hw_esw_sq_miss_root_tbl = flow_hw_create_ctrl_sq_miss_root_table\n-\t\t\t(dev, esw_mgr_items_tmpl, jump_sq_actions_tmpl);\n+\t\t\t(dev, esw_mgr_items_tmpl, regc_jump_actions_tmpl);\n \tif (!priv->hw_esw_sq_miss_root_tbl) {\n \t\tDRV_LOG(ERR, \"port %u failed to create table for default sq miss (root table)\"\n \t\t\t\" for control flows\", dev->data->port_id);\n \t\tgoto error;\n \t}\n \tMLX5_ASSERT(priv->hw_esw_sq_miss_tbl == NULL);\n-\tpriv->hw_esw_sq_miss_tbl = flow_hw_create_ctrl_sq_miss_table(dev, sq_items_tmpl,\n+\tpriv->hw_esw_sq_miss_tbl = flow_hw_create_ctrl_sq_miss_table(dev, regc_sq_items_tmpl,\n \t\t\t\t\t\t\t\t     port_actions_tmpl);\n \tif (!priv->hw_esw_sq_miss_tbl) {\n \t\tDRV_LOG(ERR, \"port %u failed to create table for default sq miss (non-root table)\"\n@@ -3416,6 +4039,16 @@ flow_hw_create_ctrl_tables(struct rte_eth_dev *dev)\n \t\t\t\" for control flows\", dev->data->port_id);\n \t\tgoto error;\n \t}\n+\tif (xmeta == MLX5_XMETA_MODE_META32_HWS) {\n+\t\tMLX5_ASSERT(priv->hw_tx_meta_cpy_tbl == NULL);\n+\t\tpriv->hw_tx_meta_cpy_tbl = flow_hw_create_tx_default_mreg_copy_table(dev,\n+\t\t\t\t\ttx_meta_items_tmpl, tx_meta_actions_tmpl);\n+\t\tif (!priv->hw_tx_meta_cpy_tbl) {\n+\t\t\tDRV_LOG(ERR, \"port %u failed to create table for default\"\n+\t\t\t\t\" Tx metadata copy flow rule\", dev->data->port_id);\n+\t\t\tgoto error;\n+\t\t}\n+\t}\n \treturn 0;\n error:\n \tif (priv->hw_esw_zero_tbl) {\n@@ -3430,16 +4063,20 @@ flow_hw_create_ctrl_tables(struct rte_eth_dev *dev)\n \t\tflow_hw_table_destroy(dev, priv->hw_esw_sq_miss_root_tbl, NULL);\n \t\tpriv->hw_esw_sq_miss_root_tbl = NULL;\n \t}\n+\tif (xmeta == MLX5_XMETA_MODE_META32_HWS && tx_meta_actions_tmpl)\n+\t\tflow_hw_actions_template_destroy(dev, tx_meta_actions_tmpl, NULL);\n \tif (jump_one_actions_tmpl)\n \t\tflow_hw_actions_template_destroy(dev, jump_one_actions_tmpl, NULL);\n \tif (port_actions_tmpl)\n \t\tflow_hw_actions_template_destroy(dev, port_actions_tmpl, NULL);\n-\tif (jump_sq_actions_tmpl)\n-\t\tflow_hw_actions_template_destroy(dev, jump_sq_actions_tmpl, NULL);\n+\tif (regc_jump_actions_tmpl)\n+\t\tflow_hw_actions_template_destroy(dev, regc_jump_actions_tmpl, NULL);\n+\tif (xmeta == MLX5_XMETA_MODE_META32_HWS && tx_meta_items_tmpl)\n+\t\tflow_hw_pattern_template_destroy(dev, tx_meta_items_tmpl, NULL);\n \tif (port_items_tmpl)\n \t\tflow_hw_pattern_template_destroy(dev, port_items_tmpl, NULL);\n-\tif (sq_items_tmpl)\n-\t\tflow_hw_pattern_template_destroy(dev, sq_items_tmpl, NULL);\n+\tif (regc_sq_items_tmpl)\n+\t\tflow_hw_pattern_template_destroy(dev, regc_sq_items_tmpl, NULL);\n \tif (esw_mgr_items_tmpl)\n \t\tflow_hw_pattern_template_destroy(dev, esw_mgr_items_tmpl, NULL);\n \treturn -EINVAL;\n@@ -3491,7 +4128,7 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \tstruct rte_flow_queue_attr **_queue_attr = NULL;\n \tstruct rte_flow_queue_attr ctrl_queue_attr = {0};\n \tbool is_proxy = !!(priv->sh->config.dv_esw_en && priv->master);\n-\tint ret;\n+\tint ret = 0;\n \n \tif (!port_attr || !nb_queue || !queue_attr) {\n \t\trte_errno = EINVAL;\n@@ -3642,6 +4279,9 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \t}\n \tif (_queue_attr)\n \t\tmlx5_free(_queue_attr);\n+\t/* Do not overwrite the internal errno information. */\n+\tif (ret)\n+\t\treturn ret;\n \treturn rte_flow_error_set(error, rte_errno,\n \t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n \t\t\t\t  \"fail to configure port\");\n@@ -3751,17 +4391,17 @@ void flow_hw_init_tags_set(struct rte_eth_dev *dev)\n \t\treturn;\n \tunset |= 1 << (priv->mtr_color_reg - REG_C_0);\n \tunset |= 1 << (REG_C_6 - REG_C_0);\n-\tif (meta_mode == MLX5_XMETA_MODE_META32_HWS) {\n-\t\tunset |= 1 << (REG_C_1 - REG_C_0);\n+\tif (priv->sh->config.dv_esw_en)\n \t\tunset |= 1 << (REG_C_0 - REG_C_0);\n-\t}\n+\tif (meta_mode == MLX5_XMETA_MODE_META32_HWS)\n+\t\tunset |= 1 << (REG_C_1 - REG_C_0);\n \tmasks &= ~unset;\n \tif (mlx5_flow_hw_avl_tags_init_cnt) {\n \t\tfor (i = 0; i < MLX5_FLOW_HW_TAGS_MAX; i++) {\n \t\t\tif (mlx5_flow_hw_avl_tags[i] != REG_NON && !!((1 << i) & masks)) {\n \t\t\t\tcopy[mlx5_flow_hw_avl_tags[i] - REG_C_0] =\n \t\t\t\t\t\tmlx5_flow_hw_avl_tags[i];\n-\t\t\t\tcopy_masks |= (1 << i);\n+\t\t\t\tcopy_masks |= (1 << (mlx5_flow_hw_avl_tags[i] - REG_C_0));\n \t\t\t}\n \t\t}\n \t\tif (copy_masks != masks) {\n@@ -3903,7 +4543,6 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,\n \treturn flow_dv_action_destroy(dev, handle, error);\n }\n \n-\n const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {\n \t.info_get = flow_hw_info_get,\n \t.configure = flow_hw_configure,\n@@ -3911,7 +4550,7 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {\n \t.pattern_template_destroy = flow_hw_pattern_template_destroy,\n \t.actions_template_create = flow_hw_actions_template_create,\n \t.actions_template_destroy = flow_hw_actions_template_destroy,\n-\t.template_table_create = flow_hw_table_create,\n+\t.template_table_create = flow_hw_template_table_create,\n \t.template_table_destroy = flow_hw_table_destroy,\n \t.async_flow_create = flow_hw_async_flow_create,\n \t.async_flow_destroy = flow_hw_async_flow_destroy,\n@@ -3927,13 +4566,6 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {\n \t.action_query = flow_dv_action_query,\n };\n \n-static uint32_t\n-flow_hw_get_ctrl_queue(struct mlx5_priv *priv)\n-{\n-\tMLX5_ASSERT(priv->nb_queue > 0);\n-\treturn priv->nb_queue - 1;\n-}\n-\n /**\n  * Creates a control flow using flow template API on @p proxy_dev device,\n  * on behalf of @p owner_dev device.\n@@ -3971,7 +4603,7 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,\n \t\t\t uint8_t action_template_idx)\n {\n \tstruct mlx5_priv *priv = proxy_dev->data->dev_private;\n-\tuint32_t queue = flow_hw_get_ctrl_queue(priv);\n+\tuint32_t queue = priv->nb_queue - 1;\n \tstruct rte_flow_op_attr op_attr = {\n \t\t.postpone = 0,\n \t};\n@@ -4046,7 +4678,7 @@ static int\n flow_hw_destroy_ctrl_flow(struct rte_eth_dev *dev, struct rte_flow *flow)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tuint32_t queue = flow_hw_get_ctrl_queue(priv);\n+\tuint32_t queue = priv->nb_queue - 1;\n \tstruct rte_flow_op_attr op_attr = {\n \t\t.postpone = 0,\n \t};\n@@ -4183,10 +4815,24 @@ mlx5_flow_hw_esw_create_mgr_sq_miss_flow(struct rte_eth_dev *dev)\n \t\t\t.type = RTE_FLOW_ITEM_TYPE_END,\n \t\t},\n \t};\n+\tstruct rte_flow_action_modify_field modify_field = {\n+\t\t.operation = RTE_FLOW_MODIFY_SET,\n+\t\t.dst = {\n+\t\t\t.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,\n+\t\t},\n+\t\t.src = {\n+\t\t\t.field = RTE_FLOW_FIELD_VALUE,\n+\t\t},\n+\t\t.width = 1,\n+\t};\n \tstruct rte_flow_action_jump jump = {\n-\t\t.group = MLX5_HW_SQ_MISS_GROUP,\n+\t\t.group = 1,\n \t};\n \tstruct rte_flow_action actions[] = {\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,\n+\t\t\t.conf = &modify_field,\n+\t\t},\n \t\t{\n \t\t\t.type = RTE_FLOW_ACTION_TYPE_JUMP,\n \t\t\t.conf = &jump,\n@@ -4209,6 +4855,12 @@ int\n mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t txq)\n {\n \tuint16_t port_id = dev->data->port_id;\n+\tstruct rte_flow_item_tag reg_c0_spec = {\n+\t\t.index = (uint8_t)REG_C_0,\n+\t};\n+\tstruct rte_flow_item_tag reg_c0_mask = {\n+\t\t.index = 0xff,\n+\t};\n \tstruct mlx5_rte_flow_item_tx_queue queue_spec = {\n \t\t.queue = txq,\n \t};\n@@ -4216,6 +4868,12 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t txq)\n \t\t.queue = UINT32_MAX,\n \t};\n \tstruct rte_flow_item items[] = {\n+\t\t{\n+\t\t\t.type = (enum rte_flow_item_type)\n+\t\t\t\tMLX5_RTE_FLOW_ITEM_TYPE_TAG,\n+\t\t\t.spec = &reg_c0_spec,\n+\t\t\t.mask = &reg_c0_mask,\n+\t\t},\n \t\t{\n \t\t\t.type = (enum rte_flow_item_type)\n \t\t\t\tMLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,\n@@ -4241,6 +4899,7 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t txq)\n \tstruct rte_eth_dev *proxy_dev;\n \tstruct mlx5_priv *proxy_priv;\n \tuint16_t proxy_port_id = dev->data->port_id;\n+\tuint32_t marker_bit;\n \tint ret;\n \n \tRTE_SET_USED(txq);\n@@ -4261,6 +4920,14 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t txq)\n \t\trte_errno = ENOMEM;\n \t\treturn -rte_errno;\n \t}\n+\tmarker_bit = flow_hw_usable_lsb_vport_mask(proxy_priv);\n+\tif (!marker_bit) {\n+\t\tDRV_LOG(ERR, \"Unable to set up control flow in SQ miss table\");\n+\t\trte_errno = EINVAL;\n+\t\treturn -rte_errno;\n+\t}\n+\treg_c0_spec.data = marker_bit;\n+\treg_c0_mask.data = marker_bit;\n \treturn flow_hw_create_ctrl_flow(dev, proxy_dev,\n \t\t\t\t\tproxy_priv->hw_esw_sq_miss_tbl,\n \t\t\t\t\titems, 0, actions, 0);\n@@ -4320,4 +4987,53 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev)\n \t\t\t\t\titems, 0, actions, 0);\n }\n \n+int\n+mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct rte_flow_item_eth promisc = {\n+\t\t.dst.addr_bytes = \"\\x00\\x00\\x00\\x00\\x00\\x00\",\n+\t\t.src.addr_bytes = \"\\x00\\x00\\x00\\x00\\x00\\x00\",\n+\t\t.type = 0,\n+\t};\n+\tstruct rte_flow_item eth_all[] = {\n+\t\t[0] = {\n+\t\t\t.type = RTE_FLOW_ITEM_TYPE_ETH,\n+\t\t\t.spec = &promisc,\n+\t\t\t.mask = &promisc,\n+\t\t},\n+\t\t[1] = {\n+\t\t\t.type = RTE_FLOW_ITEM_TYPE_END,\n+\t\t},\n+\t};\n+\tstruct rte_flow_action_modify_field mreg_action = {\n+\t\t.operation = RTE_FLOW_MODIFY_SET,\n+\t\t.dst = {\n+\t\t\t.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,\n+\t\t\t.level = REG_C_1,\n+\t\t},\n+\t\t.src = {\n+\t\t\t.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,\n+\t\t\t.level = REG_A,\n+\t\t},\n+\t\t.width = 32,\n+\t};\n+\tstruct rte_flow_action copy_reg_action[] = {\n+\t\t[0] = {\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,\n+\t\t\t.conf = &mreg_action,\n+\t\t},\n+\t\t[1] = {\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_END,\n+\t\t},\n+\t};\n+\n+\tMLX5_ASSERT(priv->master);\n+\tif (!priv->dr_ctx || !priv->hw_tx_meta_cpy_tbl)\n+\t\treturn 0;\n+\treturn flow_hw_create_ctrl_flow(dev, dev,\n+\t\t\t\t\tpriv->hw_tx_meta_cpy_tbl,\n+\t\t\t\t\teth_all, 0, copy_reg_action, 0);\n+}\n+\n #endif\ndiff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c\nindex 6313602a66..ccefebefc9 100644\n--- a/drivers/net/mlx5/mlx5_trigger.c\n+++ b/drivers/net/mlx5/mlx5_trigger.c\n@@ -1292,6 +1292,9 @@ mlx5_traffic_enable_hws(struct rte_eth_dev *dev)\n \tif (priv->sh->config.dv_esw_en && priv->master) {\n \t\tif (mlx5_flow_hw_esw_create_mgr_sq_miss_flow(dev))\n \t\t\tgoto error;\n+\t\tif (priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS)\n+\t\t\tif (mlx5_flow_hw_create_tx_default_mreg_copy_flow(dev))\n+\t\t\t\tgoto error;\n \t}\n \tfor (i = 0; i < priv->txqs_n; ++i) {\n \t\tstruct mlx5_txq_ctrl *txq = mlx5_txq_get(dev, i);\n",
    "prefixes": [
        "v3",
        "06/17"
    ]
}