get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/117005/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 117005,
    "url": "http://patches.dpdk.org/api/patches/117005/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20220928033130.9106-6-suanmingm@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220928033130.9106-6-suanmingm@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220928033130.9106-6-suanmingm@nvidia.com",
    "date": "2022-09-28T03:31:18",
    "name": "[v2,05/17] net/mlx5: add HW steering port action",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "b1ef91280d9e9f0efde86225e6715b0fc41517ef",
    "submitter": {
        "id": 1887,
        "url": "http://patches.dpdk.org/api/people/1887/?format=api",
        "name": "Suanming Mou",
        "email": "suanmingm@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20220928033130.9106-6-suanmingm@nvidia.com/mbox/",
    "series": [
        {
            "id": 24870,
            "url": "http://patches.dpdk.org/api/series/24870/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=24870",
            "date": "2022-09-28T03:31:15",
            "name": "net/mlx5: HW steering PMD update",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/24870/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/117005/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/117005/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id B6247A00C2;\n\tWed, 28 Sep 2022 05:32:57 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id DEF9142B85;\n\tWed, 28 Sep 2022 05:32:22 +0200 (CEST)",
            "from NAM11-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam11on2057.outbound.protection.outlook.com [40.107.223.57])\n by mails.dpdk.org (Postfix) with ESMTP id C74AC42B83\n for <dev@dpdk.org>; Wed, 28 Sep 2022 05:32:20 +0200 (CEST)",
            "from BN0PR04CA0012.namprd04.prod.outlook.com (2603:10b6:408:ee::17)\n by PH8PR12MB6723.namprd12.prod.outlook.com (2603:10b6:510:1ce::12)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5654.26; Wed, 28 Sep\n 2022 03:32:16 +0000",
            "from BL02EPF0000C407.namprd05.prod.outlook.com\n (2603:10b6:408:ee:cafe::a) by BN0PR04CA0012.outlook.office365.com\n (2603:10b6:408:ee::17) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5676.15 via Frontend\n Transport; Wed, 28 Sep 2022 03:32:16 +0000",
            "from mail.nvidia.com (216.228.117.161) by\n BL02EPF0000C407.mail.protection.outlook.com (10.167.241.9) with Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.5676.13 via Frontend Transport; Wed, 28 Sep 2022 03:32:15 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.67) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.26; Tue, 27 Sep\n 2022 20:31:59 -0700",
            "from nvidia.com (10.126.230.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.29; Tue, 27 Sep\n 2022 20:31:56 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=J5+ZZ2ZFwwThkyHejfn+S2FkCM5CaBo+rZtZHiiDKqsxb2hWVlOrPZwgQ8Y1XMU2MHBDJznnKPguIm4Tz44A42tcW+/L9zi0Dx1+S7Z7XogvT54KqIjhiYcoDcIYibIq830zl/e3ZxkM5D7/eHtDOZznkqKBJqjtvp0plOgesPrDuIRSWKdNcXfl9bx869bzLlub53pZrufmIjBVjjUslvXDBBmy1Gn82le9Zv1cKMXrXS4vlhPn/8lxwU8T/cCM3U3IZnJ3XNk6dVvUwlsMF9beUvV6o0UqiroLatr0775D8Fc7fuFzQNws2dpRk/wYZ1rJ0DGxbBQmY75ldVCh8Q==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=1pbiFSXgXwGNkxeI/WMyV1jHpmJXifSn8Bo0Qqn5wjA=;\n b=ZTXUmcN0b3PILRWeKF9lntHK2C550xWW6mNp4Psi6/LmrWiYHkACytFaillEfL0Lg09G0EPiU5GwDhHKlgD1HibiVVhxeyhkYts43BdWM1VtI3ZzaGXXJDUVarZW8l8cnUyW7/8frv9JlAitjkmHs9hzeJ5yWFBdkmAiZlIww8UtACQdyNwWI+27hcCGaxJ09IYcRBa1GPaqXqEDVIw3LB52RweG8NvRV/03hZo7amv3OF7m/buds2AQ0uclXjA3pZ2OryH5i+Z51GJGDc9avWqca9m9Lr7lBuuRNt+9xLJPa+YQ7caw/bpNT4WLIHCj308t2KnLaEQkrCqqnms3Sg==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.161) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=1pbiFSXgXwGNkxeI/WMyV1jHpmJXifSn8Bo0Qqn5wjA=;\n b=R+HA682Di1xos4usyhqG2+PpoZHaQrgtO0IS0UJAyJJFqtg3Xy8dWPZjyVGsu+zqBVmt5iEl3PgYf4pCZxVzoHDJbxKljggy3iEcvkha0Isoeudo4ULQ7AnkOwPslAR4KaGBZ5yjz6aclC3Jl0B1/ezclwx7N4xA9AGt4qKjNYJh8aPBWp8TN1zIN/62YoxLU11GYDNTJHLyOwu0vJjIi2e2OstzzlJdLvWNUoAfg84mgV7TbbYcEp4zA4UMZQqgBXEadCByQjZWlCWdjnEnLgMa5NvUPxZ981hTCeW3HaaZ/JVTe7Zrq+u8x7L0fkWUOqRNBMayyb3gACUdWpA69w==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.161)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.161 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.161; helo=mail.nvidia.com; pr=C",
        "From": "Suanming Mou <suanmingm@nvidia.com>",
        "To": "Matan Azrad <matan@nvidia.com>, Viacheslav Ovsiienko\n <viacheslavo@nvidia.com>",
        "CC": "<dev@dpdk.org>, <rasland@nvidia.com>, <orika@nvidia.com>, \"Dariusz\n Sosnowski\" <dsosnowski@nvidia.com>",
        "Subject": "[PATCH v2 05/17] net/mlx5: add HW steering port action",
        "Date": "Wed, 28 Sep 2022 06:31:18 +0300",
        "Message-ID": "<20220928033130.9106-6-suanmingm@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20220928033130.9106-1-suanmingm@nvidia.com>",
        "References": "<20220923144334.27736-1-suanmingm@nvidia.com>\n <20220928033130.9106-1-suanmingm@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.230.35]",
        "X-ClientProxiedBy": "rnnvmail201.nvidia.com (10.129.68.8) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "BL02EPF0000C407:EE_|PH8PR12MB6723:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "612b171d-9e98-4b93-15fe-08daa1020a49",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n 14KI/iaG51PkUaF6EeJowQrf+/VqgGVXS/95CzMqV/TVxF/QuLUyPuGx4EVppL13izqSfi/T1NJj8ZauXPmLGsZRu/Ocl3YZsPkXTsIPlRCb2NEwSUrty7aMmrT/dwbCBvlHpqdHfU9DEW10Dh8iMEk8KjquNd4GYjECm2oq6oaSiRK7BBCmI4irDO0ja8ZWvcuPIjYPmrP10yIPj6SvnfxU5YoAAmZTqcIL7p9HLf68h4QmWsfH1PPTroZLRqCWRVSDn0r5NTa/XEpwIoyYzXotrjxiW3yUYbfPhiBiRJmar0Geb4QFXg2c8wJTWP58eS7Et5RVb5jHSct13/U8nh0kVrOooHI5p2+0qlzGWX7Yp2DkJvMGEWtEsPTIESGy0yD7NgZZPdHgkoarWlxo4zHnWgIbLEDCgFBIoSKCjSYHcgDjbxHPDroGNkYC14Dow/MB6guVIypT434Og5BKEUs8xj+5XwEW+/G9kWJtBcfEcf/0Wen3vAMJtrlRWZYeApnqV3s0EQA1GD+Vp0lwuV0OCO55t53JxsgzM9a1qmJu4tUcbv36KRNG1ufDdgVU+OhHkrplIq8CiLWg3Q2uMjeZx8ZNSd44c/3dJDktoa7YZjaXha4XjWtglz6FA7QfFmr3JWj+cX79ylXcJEgeTqtxmeSW/r5rptPyDwkezM9A9PoHDmSFgKus8a7mEPKHJ3mqFr8Njfh2FLDkyR3qz1XaxfBs2qY7KoEIg/9dFCHpjtFd4g8awN/+u32QB/6oO4q12oohM3zL+RddQ51sULBPX0eKyw1/v7muB/tRQI4=",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.161; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge2.nvidia.com; CAT:NONE;\n SFS:(13230022)(4636009)(136003)(376002)(39860400002)(346002)(396003)(451199015)(40470700004)(46966006)(36840700001)(30864003)(478600001)(36756003)(110136005)(186003)(8936002)(54906003)(107886003)(6636002)(41300700001)(7696005)(316002)(83380400001)(6666004)(1076003)(70206006)(5660300002)(8676002)(26005)(4326008)(6286002)(70586007)(2616005)(36860700001)(40460700003)(2906002)(55016003)(16526019)(40480700001)(426003)(336012)(47076005)(86362001)(82740400003)(82310400005)(356005)(7636003)(579004)(559001)(309714004);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "28 Sep 2022 03:32:15.8727 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 612b171d-9e98-4b93-15fe-08daa1020a49",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.161];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BL02EPF0000C407.namprd05.prod.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "PH8PR12MB6723",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Dariusz Sosnowski <dsosnowski@nvidia.com>\n\nThis patch implements creating and caching of port actions for use with\nHW Steering FDB flows.\n\nActions are created on flow template API configuration and created\nonly on the port designated as master. Attaching and detaching of ports\nin the same switching domain causes an update to the port actions cache\nby, respectively, creating and destroying actions.\n\nA new devarg fdb_def_rule_en is being added and it's used to control\nthe default dedicated E-Switch rule is created by PMD implicitly or not,\nand PMD sets this value to 1 by default.\nIf set to 0, the default E-Switch rule will not be created and user can\ncreate the specific E-Switch rule on root table if needed.\n\nSigned-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>\n---\n doc/guides/nics/mlx5.rst           |    9 +\n drivers/net/mlx5/linux/mlx5_os.c   |   14 +\n drivers/net/mlx5/mlx5.c            |   14 +\n drivers/net/mlx5/mlx5.h            |   26 +-\n drivers/net/mlx5/mlx5_flow.c       |   96 +-\n drivers/net/mlx5/mlx5_flow.h       |   22 +-\n drivers/net/mlx5/mlx5_flow_dv.c    |   93 +-\n drivers/net/mlx5/mlx5_flow_hw.c    | 1356 +++++++++++++++++++++++++++-\n drivers/net/mlx5/mlx5_flow_verbs.c |    4 +-\n drivers/net/mlx5/mlx5_trigger.c    |   77 +-\n 10 files changed, 1594 insertions(+), 117 deletions(-)",
    "diff": "diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst\nindex 631f0840eb..c42ac482d8 100644\n--- a/doc/guides/nics/mlx5.rst\n+++ b/doc/guides/nics/mlx5.rst\n@@ -1118,6 +1118,15 @@ for an additional list of options shared with other mlx5 drivers.\n \n   By default, the PMD will set this value to 1.\n \n+- ``fdb_def_rule_en`` parameter [int]\n+\n+  A non-zero value enables the PMD to create a dedicated rule on E-Switch root\n+  table, this dedicated rule forwards all incoming packets into table 1, other\n+  rules will be created in E-Switch table original table level plus one, to\n+  improve the flow insertion rate due to skip root table managed by firmware.\n+  If set to 0, all rules will be created on the original E-Switch table level.\n+\n+  By default, the PMD will set this value to 1.\n \n Supported NICs\n --------------\ndiff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c\nindex b7cc11a2ef..e0586a4d6f 100644\n--- a/drivers/net/mlx5/linux/mlx5_os.c\n+++ b/drivers/net/mlx5/linux/mlx5_os.c\n@@ -1556,6 +1556,13 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \t\t\tflow_hw_set_port_info(eth_dev);\n \t\t/* Only HWS requires this information. */\n \t\tflow_hw_init_tags_set(eth_dev);\n+\t\tif (priv->sh->config.dv_esw_en &&\n+\t\t    flow_hw_create_vport_action(eth_dev)) {\n+\t\t\tDRV_LOG(ERR, \"port %u failed to create vport action\",\n+\t\t\t\teth_dev->data->port_id);\n+\t\t\terr = EINVAL;\n+\t\t\tgoto error;\n+\t\t}\n \t\treturn eth_dev;\n #else\n \t\tDRV_LOG(ERR, \"DV support is missing for HWS.\");\n@@ -1620,6 +1627,13 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \treturn eth_dev;\n error:\n \tif (priv) {\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+\t\tif (eth_dev &&\n+\t\t    priv->sh &&\n+\t\t    priv->sh->config.dv_flow_en == 2 &&\n+\t\t    priv->sh->config.dv_esw_en)\n+\t\t\tflow_hw_destroy_vport_action(eth_dev);\n+#endif\n \t\tif (priv->mreg_cp_tbl)\n \t\t\tmlx5_hlist_destroy(priv->mreg_cp_tbl);\n \t\tif (priv->sh)\ndiff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex b39ef1ecbe..74adb677f4 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -172,6 +172,9 @@\n /* Device parameter to configure the delay drop when creating Rxqs. */\n #define MLX5_DELAY_DROP \"delay_drop\"\n \n+/* Device parameter to create the fdb default rule in PMD */\n+#define MLX5_FDB_DEFAULT_RULE_EN \"fdb_def_rule_en\"\n+\n /* Shared memory between primary and secondary processes. */\n struct mlx5_shared_data *mlx5_shared_data;\n \n@@ -1239,6 +1242,8 @@ mlx5_dev_args_check_handler(const char *key, const char *val, void *opaque)\n \t\tconfig->decap_en = !!tmp;\n \t} else if (strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) == 0) {\n \t\tconfig->allow_duplicate_pattern = !!tmp;\n+\t} else if (strcmp(MLX5_FDB_DEFAULT_RULE_EN, key) == 0) {\n+\t\tconfig->fdb_def_rule = !!tmp;\n \t}\n \treturn 0;\n }\n@@ -1274,6 +1279,7 @@ mlx5_shared_dev_ctx_args_config(struct mlx5_dev_ctx_shared *sh,\n \t\tMLX5_RECLAIM_MEM,\n \t\tMLX5_DECAP_EN,\n \t\tMLX5_ALLOW_DUPLICATE_PATTERN,\n+\t\tMLX5_FDB_DEFAULT_RULE_EN,\n \t\tNULL,\n \t};\n \tint ret = 0;\n@@ -1285,6 +1291,7 @@ mlx5_shared_dev_ctx_args_config(struct mlx5_dev_ctx_shared *sh,\n \tconfig->dv_flow_en = 1;\n \tconfig->decap_en = 1;\n \tconfig->allow_duplicate_pattern = 1;\n+\tconfig->fdb_def_rule = 1;\n \tif (mkvlist != NULL) {\n \t\t/* Process parameters. */\n \t\tret = mlx5_kvargs_process(mkvlist, params,\n@@ -1360,6 +1367,7 @@ mlx5_shared_dev_ctx_args_config(struct mlx5_dev_ctx_shared *sh,\n \tDRV_LOG(DEBUG, \"\\\"decap_en\\\" is %u.\", config->decap_en);\n \tDRV_LOG(DEBUG, \"\\\"allow_duplicate_pattern\\\" is %u.\",\n \t\tconfig->allow_duplicate_pattern);\n+\tDRV_LOG(DEBUG, \"\\\"fdb_def_rule_en\\\" is %u.\", config->fdb_def_rule);\n \treturn 0;\n }\n \n@@ -1943,6 +1951,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)\n \tmlx5_flex_parser_ecpri_release(dev);\n \tmlx5_flex_item_port_cleanup(dev);\n #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)\n+\tflow_hw_destroy_vport_action(dev);\n \tflow_hw_resource_release(dev);\n \tflow_hw_clear_port_info(dev);\n \tif (priv->sh->config.dv_flow_en == 2)\n@@ -2644,6 +2653,11 @@ mlx5_probe_again_args_validate(struct mlx5_common_device *cdev,\n \t\t\tsh->ibdev_name);\n \t\tgoto error;\n \t}\n+\tif (sh->config.fdb_def_rule ^ config->fdb_def_rule) {\n+\t\tDRV_LOG(ERR, \"\\\"fdb_def_rule_en\\\" configuration mismatch for shared %s context.\",\n+\t\t\tsh->ibdev_name);\n+\t\tgoto error;\n+\t}\n \tif (sh->config.l3_vxlan_en ^ config->l3_vxlan_en) {\n \t\tDRV_LOG(ERR, \"\\\"l3_vxlan_en\\\" \"\n \t\t\t\"configuration mismatch for shared %s context.\",\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex a93af75baa..84f6937c95 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -309,6 +309,7 @@ struct mlx5_sh_config {\n \tuint32_t allow_duplicate_pattern:1;\n \tuint32_t lro_allowed:1; /* Whether LRO is allowed. */\n \t/* Allow/Prevent the duplicate rules pattern. */\n+\tuint32_t fdb_def_rule:1; /* Create FDB default jump rule */\n };\n \n \n@@ -337,6 +338,8 @@ enum {\n \tMLX5_HW_Q_JOB_TYPE_DESTROY, /* Flow destroy job type. */\n };\n \n+#define MLX5_HW_MAX_ITEMS (16)\n+\n /* HW steering flow management job descriptor. */\n struct mlx5_hw_q_job {\n \tuint32_t type; /* Job type. */\n@@ -344,6 +347,8 @@ struct mlx5_hw_q_job {\n \tvoid *user_data; /* Job user data. */\n \tuint8_t *encap_data; /* Encap data. */\n \tstruct mlx5_modification_cmd *mhdr_cmd;\n+\tstruct rte_flow_item *items;\n+\tstruct rte_flow_item_ethdev port_spec;\n };\n \n /* HW steering job descriptor LIFO pool. */\n@@ -1202,6 +1207,8 @@ struct mlx5_dev_ctx_shared {\n \tuint32_t flow_priority_check_flag:1; /* Check Flag for flow priority. */\n \tuint32_t metadata_regc_check_flag:1; /* Check Flag for metadata REGC. */\n \tuint32_t hws_tags:1; /* Check if tags info for HWS initialized. */\n+\tuint32_t shared_mark_enabled:1;\n+\t/* If mark action is enabled on Rxqs (shared E-Switch domain). */\n \tuint32_t max_port; /* Maximal IB device port index. */\n \tstruct mlx5_bond_info bond; /* Bonding information. */\n \tstruct mlx5_common_device *cdev; /* Backend mlx5 device. */\n@@ -1450,6 +1457,12 @@ struct mlx5_obj_ops {\n \n #define MLX5_RSS_HASH_FIELDS_LEN RTE_DIM(mlx5_rss_hash_fields)\n \n+struct mlx5_hw_ctrl_flow {\n+\tLIST_ENTRY(mlx5_hw_ctrl_flow) next;\n+\tstruct rte_eth_dev *owner_dev;\n+\tstruct rte_flow *flow;\n+};\n+\n struct mlx5_priv {\n \tstruct rte_eth_dev_data *dev_data;  /* Pointer to device data. */\n \tstruct mlx5_dev_ctx_shared *sh; /* Shared device context. */\n@@ -1490,6 +1503,11 @@ struct mlx5_priv {\n \tunsigned int reta_idx_n; /* RETA index size. */\n \tstruct mlx5_drop drop_queue; /* Flow drop queues. */\n \tvoid *root_drop_action; /* Pointer to root drop action. */\n+\trte_spinlock_t hw_ctrl_lock;\n+\tLIST_HEAD(hw_ctrl_flow, mlx5_hw_ctrl_flow) hw_ctrl_flows;\n+\tstruct rte_flow_template_table *hw_esw_sq_miss_root_tbl;\n+\tstruct rte_flow_template_table *hw_esw_sq_miss_tbl;\n+\tstruct rte_flow_template_table *hw_esw_zero_tbl;\n \tstruct mlx5_indexed_pool *flows[MLX5_FLOW_TYPE_MAXI];\n \t/* RTE Flow rules. */\n \tuint32_t ctrl_flows; /* Control flow rules. */\n@@ -1550,11 +1568,11 @@ struct mlx5_priv {\n \tstruct mlx5_hw_q *hw_q;\n \t/* HW steering rte flow table list header. */\n \tLIST_HEAD(flow_hw_tbl, rte_flow_template_table) flow_hw_tbl;\n+\tstruct mlx5dr_action **hw_vport;\n \t/* HW steering global drop action. */\n-\tstruct mlx5dr_action *hw_drop[MLX5_HW_ACTION_FLAG_MAX]\n-\t\t\t\t     [MLX5DR_TABLE_TYPE_MAX];\n-\t/* HW steering global drop action. */\n-\tstruct mlx5dr_action *hw_tag[MLX5_HW_ACTION_FLAG_MAX];\n+\tstruct mlx5dr_action *hw_drop[2];\n+\t/* HW steering global tag action. */\n+\tstruct mlx5dr_action *hw_tag[2];\n \tstruct mlx5_indexed_pool *acts_ipool; /* Action data indexed pool. */\n #endif\n };\ndiff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 3abb39aa92..9c44b2e99b 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -999,6 +999,7 @@ static const struct rte_flow_ops mlx5_flow_ops = {\n \t.flex_item_create = mlx5_flow_flex_item_create,\n \t.flex_item_release = mlx5_flow_flex_item_release,\n \t.info_get = mlx5_flow_info_get,\n+\t.pick_transfer_proxy = mlx5_flow_pick_transfer_proxy,\n \t.configure = mlx5_flow_port_configure,\n \t.pattern_template_create = mlx5_flow_pattern_template_create,\n \t.pattern_template_destroy = mlx5_flow_pattern_template_destroy,\n@@ -1242,7 +1243,7 @@ mlx5_get_lowest_priority(struct rte_eth_dev *dev,\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \n-\tif (!attr->group && !attr->transfer)\n+\tif (!attr->group && !(attr->transfer && priv->fdb_def_rule))\n \t\treturn priv->sh->flow_max_priority - 2;\n \treturn MLX5_NON_ROOT_FLOW_MAX_PRIO - 1;\n }\n@@ -1269,11 +1270,14 @@ mlx5_get_matcher_priority(struct rte_eth_dev *dev,\n \tuint16_t priority = (uint16_t)attr->priority;\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \n+\t/* NIC root rules */\n \tif (!attr->group && !attr->transfer) {\n \t\tif (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)\n \t\t\tpriority = priv->sh->flow_max_priority - 1;\n \t\treturn mlx5_os_flow_adjust_priority(dev, priority, subpriority);\n-\t} else if (!external && attr->transfer && attr->group == 0 &&\n+\t/* FDB root rules */\n+\t} else if (attr->transfer && (!external || !priv->fdb_def_rule) &&\n+\t\t   attr->group == 0 &&\n \t\t   attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR) {\n \t\treturn (priv->sh->flow_max_priority - 1) * 3;\n \t}\n@@ -1481,13 +1485,32 @@ flow_rxq_mark_flag_set(struct rte_eth_dev *dev)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_rxq_ctrl *rxq_ctrl;\n+\tuint16_t port_id;\n \n-\tif (priv->mark_enabled)\n+\tif (priv->sh->shared_mark_enabled)\n \t\treturn;\n-\tLIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {\n-\t\trxq_ctrl->rxq.mark = 1;\n+\tif (priv->master || priv->representor) {\n+\t\tMLX5_ETH_FOREACH_DEV(port_id, dev->device) {\n+\t\t\tstruct mlx5_priv *opriv =\n+\t\t\t\trte_eth_devices[port_id].data->dev_private;\n+\n+\t\t\tif (!opriv ||\n+\t\t\t    opriv->sh != priv->sh ||\n+\t\t\t    opriv->domain_id != priv->domain_id ||\n+\t\t\t    opriv->mark_enabled)\n+\t\t\t\tcontinue;\n+\t\t\tLIST_FOREACH(rxq_ctrl, &opriv->rxqsctrl, next) {\n+\t\t\t\trxq_ctrl->rxq.mark = 1;\n+\t\t\t}\n+\t\t\topriv->mark_enabled = 1;\n+\t\t}\n+\t} else {\n+\t\tLIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {\n+\t\t\trxq_ctrl->rxq.mark = 1;\n+\t\t}\n+\t\tpriv->mark_enabled = 1;\n \t}\n-\tpriv->mark_enabled = 1;\n+\tpriv->sh->shared_mark_enabled = 1;\n }\n \n /**\n@@ -1623,6 +1646,7 @@ flow_rxq_flags_clear(struct rte_eth_dev *dev)\n \t\trxq->ctrl->rxq.tunnel = 0;\n \t}\n \tpriv->mark_enabled = 0;\n+\tpriv->sh->shared_mark_enabled = 0;\n }\n \n /**\n@@ -2808,8 +2832,8 @@ mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,\n  *   Item specification.\n  * @param[in] item_flags\n  *   Bit-fields that holds the items detected until now.\n- * @param[in] attr\n- *   Flow rule attributes.\n+ * @param root\n+ *   Whether action is on root table.\n  * @param[out] error\n  *   Pointer to error structure.\n  *\n@@ -2821,7 +2845,7 @@ mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,\n \t\t\t      uint16_t udp_dport,\n \t\t\t      const struct rte_flow_item *item,\n \t\t\t      uint64_t item_flags,\n-\t\t\t      const struct rte_flow_attr *attr,\n+\t\t\t      bool root,\n \t\t\t      struct rte_flow_error *error)\n {\n \tconst struct rte_flow_item_vxlan *spec = item->spec;\n@@ -2858,12 +2882,11 @@ mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,\n \tif (priv->sh->steering_format_version !=\n \t    MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 ||\n \t    !udp_dport || udp_dport == MLX5_UDP_PORT_VXLAN) {\n-\t\t/* FDB domain & NIC domain non-zero group */\n-\t\tif ((attr->transfer || attr->group) && priv->sh->misc5_cap)\n+\t\t/* non-root table */\n+\t\tif (!root && priv->sh->misc5_cap)\n \t\t\tvalid_mask = &nic_mask;\n \t\t/* Group zero in NIC domain */\n-\t\tif (!attr->group && !attr->transfer &&\n-\t\t    priv->sh->tunnel_header_0_1)\n+\t\tif (!root && priv->sh->tunnel_header_0_1)\n \t\t\tvalid_mask = &nic_mask;\n \t}\n \tret = mlx5_flow_item_acceptable\n@@ -3102,11 +3125,11 @@ mlx5_flow_validate_item_gre_option(struct rte_eth_dev *dev,\n \tif (mask->checksum_rsvd.checksum || mask->sequence.sequence) {\n \t\tif (priv->sh->steering_format_version ==\n \t\t    MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 ||\n-\t\t    ((attr->group || attr->transfer) &&\n+\t\t    ((attr->group || (attr->transfer && priv->fdb_def_rule)) &&\n \t\t     !priv->sh->misc5_cap) ||\n \t\t    (!(priv->sh->tunnel_header_0_1 &&\n \t\t       priv->sh->tunnel_header_2_3) &&\n-\t\t    !attr->group && !attr->transfer))\n+\t\t    !attr->group && (!attr->transfer || !priv->fdb_def_rule)))\n \t\t\treturn rte_flow_error_set(error, EINVAL,\n \t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n \t\t\t\t\t\t  item,\n@@ -6163,7 +6186,8 @@ flow_create_split_metadata(struct rte_eth_dev *dev,\n \t}\n \tif (qrss) {\n \t\t/* Check if it is in meter suffix table. */\n-\t\tmtr_sfx = attr->group == (attr->transfer ?\n+\t\tmtr_sfx = attr->group ==\n+\t\t\t  ((attr->transfer && priv->fdb_def_rule) ?\n \t\t\t  (MLX5_FLOW_TABLE_LEVEL_METER - 1) :\n \t\t\t  MLX5_FLOW_TABLE_LEVEL_METER);\n \t\t/*\n@@ -11086,3 +11110,43 @@ int mlx5_flow_get_item_vport_id(struct rte_eth_dev *dev,\n \n \treturn 0;\n }\n+\n+int\n+mlx5_flow_pick_transfer_proxy(struct rte_eth_dev *dev,\n+\t\t\t      uint16_t *proxy_port_id,\n+\t\t\t      struct rte_flow_error *error)\n+{\n+\tconst struct mlx5_priv *priv = dev->data->dev_private;\n+\tuint16_t port_id;\n+\n+\tif (!priv->sh->config.dv_esw_en)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t  NULL,\n+\t\t\t\t\t  \"unable to provide a proxy port\"\n+\t\t\t\t\t  \" without E-Switch configured\");\n+\tif (!priv->master && !priv->representor)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t  NULL,\n+\t\t\t\t\t  \"unable to provide a proxy port\"\n+\t\t\t\t\t  \" for port which is not a master\"\n+\t\t\t\t\t  \" or a representor port\");\n+\tif (priv->master) {\n+\t\t*proxy_port_id = dev->data->port_id;\n+\t\treturn 0;\n+\t}\n+\tMLX5_ETH_FOREACH_DEV(port_id, dev->device) {\n+\t\tconst struct rte_eth_dev *port_dev = &rte_eth_devices[port_id];\n+\t\tconst struct mlx5_priv *port_priv = port_dev->data->dev_private;\n+\n+\t\tif (port_priv->master &&\n+\t\t    port_priv->domain_id == priv->domain_id) {\n+\t\t\t*proxy_port_id = port_id;\n+\t\t\treturn 0;\n+\t\t}\n+\t}\n+\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t  NULL, \"unable to find a proxy port\");\n+}\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex 0eab3a3797..93f0e189d4 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -1151,6 +1151,11 @@ struct rte_flow_pattern_template {\n \tstruct mlx5dr_match_template *mt; /* mlx5 match template. */\n \tuint64_t item_flags; /* Item layer flags. */\n \tuint32_t refcnt;  /* Reference counter. */\n+\t/*\n+\t * If true, then rule pattern should be prepended with\n+\t * represented_port pattern item.\n+\t */\n+\tbool implicit_port;\n };\n \n /* Flow action template struct. */\n@@ -1226,6 +1231,7 @@ struct mlx5_hw_action_template {\n /* mlx5 flow group struct. */\n struct mlx5_flow_group {\n \tstruct mlx5_list_entry entry;\n+\tstruct rte_eth_dev *dev; /* Reference to corresponding device. */\n \tstruct mlx5dr_table *tbl; /* HWS table object. */\n \tstruct mlx5_hw_jump_action jump; /* Jump action. */\n \tenum mlx5dr_table_type type; /* Table type. */\n@@ -1484,6 +1490,9 @@ void flow_hw_clear_port_info(struct rte_eth_dev *dev);\n void flow_hw_init_tags_set(struct rte_eth_dev *dev);\n void flow_hw_clear_tags_set(struct rte_eth_dev *dev);\n \n+int flow_hw_create_vport_action(struct rte_eth_dev *dev);\n+void flow_hw_destroy_vport_action(struct rte_eth_dev *dev);\n+\n typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,\n \t\t\t\t    const struct rte_flow_attr *attr,\n \t\t\t\t    const struct rte_flow_item items[],\n@@ -2056,7 +2065,7 @@ int mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,\n \t\t\t\t  uint16_t udp_dport,\n \t\t\t\t  const struct rte_flow_item *item,\n \t\t\t\t  uint64_t item_flags,\n-\t\t\t\t  const struct rte_flow_attr *attr,\n+\t\t\t\t  bool root,\n \t\t\t\t  struct rte_flow_error *error);\n int mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,\n \t\t\t\t      uint64_t item_flags,\n@@ -2313,4 +2322,15 @@ int flow_dv_translate_items_hws(const struct rte_flow_item *items,\n \t\t\t\tuint32_t key_type, uint64_t *item_flags,\n \t\t\t\tuint8_t *match_criteria,\n \t\t\t\tstruct rte_flow_error *error);\n+\n+int mlx5_flow_pick_transfer_proxy(struct rte_eth_dev *dev,\n+\t\t\t\t  uint16_t *proxy_port_id,\n+\t\t\t\t  struct rte_flow_error *error);\n+\n+int mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *dev);\n+\n+int mlx5_flow_hw_esw_create_mgr_sq_miss_flow(struct rte_eth_dev *dev);\n+int mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev,\n+\t\t\t\t\t uint32_t txq);\n+int mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev);\n #endif /* RTE_PMD_MLX5_FLOW_H_ */\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex e4af9d910b..ace69c2b40 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -2471,8 +2471,8 @@ flow_dv_validate_item_gtp(struct rte_eth_dev *dev,\n  *   Previous validated item in the pattern items.\n  * @param[in] gtp_item\n  *   Previous GTP item specification.\n- * @param[in] attr\n- *   Pointer to flow attributes.\n+ * @param root\n+ *   Whether action is on root table.\n  * @param[out] error\n  *   Pointer to error structure.\n  *\n@@ -2483,7 +2483,7 @@ static int\n flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,\n \t\t\t      uint64_t last_item,\n \t\t\t      const struct rte_flow_item *gtp_item,\n-\t\t\t      const struct rte_flow_attr *attr,\n+\t\t\t      bool root,\n \t\t\t      struct rte_flow_error *error)\n {\n \tconst struct rte_flow_item_gtp *gtp_spec;\n@@ -2508,7 +2508,7 @@ flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,\n \t\t\t(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,\n \t\t\t \"GTP E flag must be 1 to match GTP PSC\");\n \t/* Check the flow is not created in group zero. */\n-\tif (!attr->transfer && !attr->group)\n+\tif (root)\n \t\treturn rte_flow_error_set\n \t\t\t(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n \t\t\t \"GTP PSC is not supported for group 0\");\n@@ -3373,20 +3373,19 @@ flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,\n /**\n  * Indicates whether ASO aging is supported.\n  *\n- * @param[in] sh\n- *   Pointer to shared device context structure.\n- * @param[in] attr\n- *   Attributes of flow that includes AGE action.\n+ * @param[in] priv\n+ *   Pointer to device private context structure.\n+ * @param[in] root\n+ *   Whether action is on root table.\n  *\n  * @return\n  *   True when ASO aging is supported, false otherwise.\n  */\n static inline bool\n-flow_hit_aso_supported(const struct mlx5_dev_ctx_shared *sh,\n-\t\tconst struct rte_flow_attr *attr)\n+flow_hit_aso_supported(const struct mlx5_priv *priv, bool root)\n {\n-\tMLX5_ASSERT(sh && attr);\n-\treturn (sh->flow_hit_aso_en && (attr->transfer || attr->group));\n+\tMLX5_ASSERT(priv);\n+\treturn (priv->sh->flow_hit_aso_en && !root);\n }\n \n /**\n@@ -3398,8 +3397,8 @@ flow_hit_aso_supported(const struct mlx5_dev_ctx_shared *sh,\n  *   Indicator if action is shared.\n  * @param[in] action_flags\n  *   Holds the actions detected until now.\n- * @param[in] attr\n- *   Attributes of flow that includes this action.\n+ * @param[in] root\n+ *   Whether action is on root table.\n  * @param[out] error\n  *   Pointer to error structure.\n  *\n@@ -3409,7 +3408,7 @@ flow_hit_aso_supported(const struct mlx5_dev_ctx_shared *sh,\n static int\n flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,\n \t\t\t      uint64_t action_flags,\n-\t\t\t      const struct rte_flow_attr *attr,\n+\t\t\t      bool root,\n \t\t\t      struct rte_flow_error *error)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n@@ -3421,7 +3420,7 @@ flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n \t\t\t\t\t  \"duplicate count actions set\");\n \tif (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&\n-\t    !flow_hit_aso_supported(priv->sh, attr))\n+\t    !flow_hit_aso_supported(priv, root))\n \t\treturn rte_flow_error_set(error, EINVAL,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n \t\t\t\t\t  \"old age and indirect count combination is not supported\");\n@@ -3652,8 +3651,8 @@ flow_dv_validate_action_raw_encap_decap\n  *   Holds the actions detected until now.\n  * @param[in] item_flags\n  *   The items found in this flow rule.\n- * @param[in] attr\n- *   Pointer to flow attributes.\n+ * @param root\n+ *   Whether action is on root table.\n  * @param[out] error\n  *   Pointer to error structure.\n  *\n@@ -3664,12 +3663,12 @@ static int\n flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,\n \t\t\t       uint64_t action_flags,\n \t\t\t       uint64_t item_flags,\n-\t\t\t       const struct rte_flow_attr *attr,\n+\t\t\t       bool root,\n \t\t\t       struct rte_flow_error *error)\n {\n \tRTE_SET_USED(dev);\n \n-\tif (attr->group == 0 && !attr->transfer)\n+\tif (root)\n \t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\t\t  NULL,\n@@ -4919,6 +4918,8 @@ flow_dv_validate_action_modify_ttl(const uint64_t action_flags,\n  *   Pointer to the modify action.\n  * @param[in] attr\n  *   Pointer to the flow attributes.\n+ * @param root\n+ *   Whether action is on root table.\n  * @param[out] error\n  *   Pointer to error structure.\n  *\n@@ -4931,6 +4932,7 @@ flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,\n \t\t\t\t   const uint64_t action_flags,\n \t\t\t\t   const struct rte_flow_action *action,\n \t\t\t\t   const struct rte_flow_attr *attr,\n+\t\t\t\t   bool root,\n \t\t\t\t   struct rte_flow_error *error)\n {\n \tint ret = 0;\n@@ -4978,7 +4980,7 @@ flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,\n \t}\n \tif (action_modify_field->src.field != RTE_FLOW_FIELD_VALUE &&\n \t    action_modify_field->src.field != RTE_FLOW_FIELD_POINTER) {\n-\t\tif (!attr->transfer && !attr->group)\n+\t\tif (root)\n \t\t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION, action,\n \t\t\t\t\t\"modify field action is not\"\n@@ -5068,8 +5070,7 @@ flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,\n \t    action_modify_field->src.field == RTE_FLOW_FIELD_IPV4_ECN ||\n \t    action_modify_field->dst.field == RTE_FLOW_FIELD_IPV6_ECN ||\n \t    action_modify_field->src.field == RTE_FLOW_FIELD_IPV6_ECN)\n-\t\tif (!hca_attr->modify_outer_ip_ecn &&\n-\t\t    !attr->transfer && !attr->group)\n+\t\tif (!hca_attr->modify_outer_ip_ecn && root)\n \t\t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION, action,\n \t\t\t\t\"modifications of the ECN for current firmware is not supported\");\n@@ -5103,11 +5104,12 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev,\n \t\t\t     bool external, struct rte_flow_error *error)\n {\n \tuint32_t target_group, table = 0;\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n \tint ret = 0;\n \tstruct flow_grp_info grp_info = {\n \t\t.external = !!external,\n \t\t.transfer = !!attributes->transfer,\n-\t\t.fdb_def_rule = 1,\n+\t\t.fdb_def_rule = !!priv->fdb_def_rule,\n \t\t.std_tbl_fix = 0\n \t};\n \tif (action_flags & (MLX5_FLOW_FATE_ACTIONS |\n@@ -5687,6 +5689,8 @@ flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)\n  *   Pointer to the COUNT action in sample action list.\n  * @param[out] fdb_mirror_limit\n  *   Pointer to the FDB mirror limitation flag.\n+ * @param root\n+ *   Whether action is on root table.\n  * @param[out] error\n  *   Pointer to error structure.\n  *\n@@ -5703,6 +5707,7 @@ flow_dv_validate_action_sample(uint64_t *action_flags,\n \t\t\t       const struct rte_flow_action_rss **sample_rss,\n \t\t\t       const struct rte_flow_action_count **count,\n \t\t\t       int *fdb_mirror_limit,\n+\t\t\t       bool root,\n \t\t\t       struct rte_flow_error *error)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n@@ -5804,7 +5809,7 @@ flow_dv_validate_action_sample(uint64_t *action_flags,\n \t\tcase RTE_FLOW_ACTION_TYPE_COUNT:\n \t\t\tret = flow_dv_validate_action_count\n \t\t\t\t(dev, false, *action_flags | sub_action_flags,\n-\t\t\t\t attr, error);\n+\t\t\t\t root, error);\n \t\t\tif (ret < 0)\n \t\t\t\treturn ret;\n \t\t\t*count = act->conf;\n@@ -7284,7 +7289,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,\n \t\tcase RTE_FLOW_ITEM_TYPE_VXLAN:\n \t\t\tret = mlx5_flow_validate_item_vxlan(dev, udp_dport,\n \t\t\t\t\t\t\t    items, item_flags,\n-\t\t\t\t\t\t\t    attr, error);\n+\t\t\t\t\t\t\t    is_root, error);\n \t\t\tif (ret < 0)\n \t\t\t\treturn ret;\n \t\t\tlast_item = MLX5_FLOW_LAYER_VXLAN;\n@@ -7378,7 +7383,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ITEM_TYPE_GTP_PSC:\n \t\t\tret = flow_dv_validate_item_gtp_psc(items, last_item,\n-\t\t\t\t\t\t\t    gtp_item, attr,\n+\t\t\t\t\t\t\t    gtp_item, is_root,\n \t\t\t\t\t\t\t    error);\n \t\t\tif (ret < 0)\n \t\t\t\treturn ret;\n@@ -7595,7 +7600,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,\n \t\tcase RTE_FLOW_ACTION_TYPE_COUNT:\n \t\t\tret = flow_dv_validate_action_count(dev, shared_count,\n \t\t\t\t\t\t\t    action_flags,\n-\t\t\t\t\t\t\t    attr, error);\n+\t\t\t\t\t\t\t    is_root, error);\n \t\t\tif (ret < 0)\n \t\t\t\treturn ret;\n \t\t\tcount = actions->conf;\n@@ -7889,7 +7894,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,\n \t\t\trw_act_num += MLX5_ACT_NUM_SET_TAG;\n \t\t\tbreak;\n \t\tcase MLX5_RTE_FLOW_ACTION_TYPE_AGE:\n-\t\t\tif (!attr->transfer && !attr->group)\n+\t\t\tif (is_root)\n \t\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n \t\t\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\t\t\t\t\t\t   NULL,\n@@ -7914,7 +7919,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,\n \t\t\t * Validate the regular AGE action (using counter)\n \t\t\t * mutual exclusion with indirect counter actions.\n \t\t\t */\n-\t\t\tif (!flow_hit_aso_supported(priv->sh, attr)) {\n+\t\t\tif (!flow_hit_aso_supported(priv, is_root)) {\n \t\t\t\tif (shared_count)\n \t\t\t\t\treturn rte_flow_error_set\n \t\t\t\t\t\t(error, EINVAL,\n@@ -7970,6 +7975,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,\n \t\t\t\t\t\t\t     rss, &sample_rss,\n \t\t\t\t\t\t\t     &sample_count,\n \t\t\t\t\t\t\t     &fdb_mirror_limit,\n+\t\t\t\t\t\t\t     is_root,\n \t\t\t\t\t\t\t     error);\n \t\t\tif (ret < 0)\n \t\t\t\treturn ret;\n@@ -7986,6 +7992,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,\n \t\t\t\t\t\t\t\t   action_flags,\n \t\t\t\t\t\t\t\t   actions,\n \t\t\t\t\t\t\t\t   attr,\n+\t\t\t\t\t\t\t\t   is_root,\n \t\t\t\t\t\t\t\t   error);\n \t\t\tif (ret < 0)\n \t\t\t\treturn ret;\n@@ -7999,8 +8006,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_CONNTRACK:\n \t\t\tret = flow_dv_validate_action_aso_ct(dev, action_flags,\n-\t\t\t\t\t\t\t     item_flags, attr,\n-\t\t\t\t\t\t\t     error);\n+\t\t\t\t\t\t\t     item_flags,\n+\t\t\t\t\t\t\t     is_root, error);\n \t\t\tif (ret < 0)\n \t\t\t\treturn ret;\n \t\t\taction_flags |= MLX5_FLOW_ACTION_CT;\n@@ -9200,15 +9207,18 @@ flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,\n \tif (MLX5_ITEM_VALID(item, key_type))\n \t\treturn;\n \tMLX5_ITEM_UPDATE(item, key_type, vxlan_v, vxlan_m, &nic_mask);\n-\tif (item->mask == &nic_mask &&\n-\t    ((!attr->group && !priv->sh->tunnel_header_0_1) ||\n-\t    (attr->group && !priv->sh->misc5_cap)))\n+\tif ((item->mask == &nic_mask) &&\n+\t    ((!attr->group && !(attr->transfer && priv->fdb_def_rule) &&\n+\t    !priv->sh->tunnel_header_0_1) ||\n+\t    ((attr->group || (attr->transfer && priv->fdb_def_rule)) &&\n+\t    !priv->sh->misc5_cap)))\n \t\tvxlan_m = &rte_flow_item_vxlan_mask;\n \tif ((priv->sh->steering_format_version ==\n \t     MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 &&\n \t     dport != MLX5_UDP_PORT_VXLAN) ||\n-\t    (!attr->group && !attr->transfer) ||\n-\t    ((attr->group || attr->transfer) && !priv->sh->misc5_cap)) {\n+\t    (!attr->group && !(attr->transfer && priv->fdb_def_rule)) ||\n+\t    ((attr->group || (attr->transfer && priv->fdb_def_rule)) &&\n+\t    !priv->sh->misc5_cap)) {\n \t\tmisc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);\n \t\tsize = sizeof(vxlan_m->vni);\n \t\tvni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);\n@@ -14180,7 +14190,7 @@ flow_dv_translate(struct rte_eth_dev *dev,\n \t\t\t */\n \t\t\tif (action_flags & MLX5_FLOW_ACTION_AGE) {\n \t\t\t\tif ((non_shared_age && count) ||\n-\t\t\t\t    !flow_hit_aso_supported(priv->sh, attr)) {\n+\t\t\t\t    !flow_hit_aso_supported(priv, !dev_flow->dv.group)) {\n \t\t\t\t\t/* Creates age by counters. */\n \t\t\t\t\tcnt_act = flow_dv_prepare_counter\n \t\t\t\t\t\t\t\t(dev, dev_flow,\n@@ -18329,6 +18339,7 @@ flow_dv_action_validate(struct rte_eth_dev *dev,\n \t\t\tstruct rte_flow_error *err)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n+\t/* called from RTE API */\n \n \tRTE_SET_USED(conf);\n \tswitch (action->type) {\n@@ -18356,7 +18367,7 @@ flow_dv_action_validate(struct rte_eth_dev *dev,\n \t\t\t\t\t\t\"Indirect age action not supported\");\n \t\treturn flow_dv_validate_action_age(0, action, dev, err);\n \tcase RTE_FLOW_ACTION_TYPE_COUNT:\n-\t\treturn flow_dv_validate_action_count(dev, true, 0, NULL, err);\n+\t\treturn flow_dv_validate_action_count(dev, true, 0, false, err);\n \tcase RTE_FLOW_ACTION_TYPE_CONNTRACK:\n \t\tif (!priv->sh->ct_aso_en)\n \t\t\treturn rte_flow_error_set(err, ENOTSUP,\n@@ -18533,6 +18544,8 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,\n \tbool def_green = false;\n \tbool def_yellow = false;\n \tconst struct rte_flow_action_rss *rss_color[RTE_COLORS] = {NULL};\n+\t/* Called from RTE API */\n+\tbool is_root = !(attr->group || (attr->transfer && priv->fdb_def_rule));\n \n \tif (!dev_conf->dv_esw_en)\n \t\tdef_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;\n@@ -18734,7 +18747,7 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,\n \t\t\t\tbreak;\n \t\t\tcase RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:\n \t\t\t\tret = flow_dv_validate_action_modify_field(dev,\n-\t\t\t\t\taction_flags[i], act, attr, &flow_err);\n+\t\t\t\t\taction_flags[i], act, attr, is_root, &flow_err);\n \t\t\t\tif (ret < 0)\n \t\t\t\t\treturn -rte_mtr_error_set(error,\n \t\t\t\t\t  ENOTSUP,\ndiff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c\nindex 3321e17fef..728370328c 100644\n--- a/drivers/net/mlx5/mlx5_flow_hw.c\n+++ b/drivers/net/mlx5/mlx5_flow_hw.c\n@@ -20,6 +20,14 @@\n /* Default queue to flush the flows. */\n #define MLX5_DEFAULT_FLUSH_QUEUE 0\n \n+/* Maximum number of rules in control flow tables */\n+#define MLX5_HW_CTRL_FLOW_NB_RULES (4096)\n+\n+/* Flow group for SQ miss default flows/ */\n+#define MLX5_HW_SQ_MISS_GROUP (UINT32_MAX)\n+\n+static int flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev);\n+\n const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;\n \n /* DR action flags with different table. */\n@@ -57,6 +65,9 @@ flow_hw_rxq_flag_set(struct rte_eth_dev *dev, bool enable)\n \tfor (i = 0; i < priv->rxqs_n; ++i) {\n \t\tstruct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);\n \n+\t\t/* With RXQ start/stop feature, RXQ might be stopped. */\n+\t\tif (!rxq_ctrl)\n+\t\t\tcontinue;\n \t\trxq_ctrl->rxq.mark = enable;\n \t}\n \tpriv->mark_enabled = enable;\n@@ -810,6 +821,77 @@ flow_hw_modify_field_compile(struct rte_eth_dev *dev,\n \treturn 0;\n }\n \n+static int\n+flow_hw_represented_port_compile(struct rte_eth_dev *dev,\n+\t\t\t\t const struct rte_flow_attr *attr,\n+\t\t\t\t const struct rte_flow_action *action_start,\n+\t\t\t\t const struct rte_flow_action *action,\n+\t\t\t\t const struct rte_flow_action *action_mask,\n+\t\t\t\t struct mlx5_hw_actions *acts,\n+\t\t\t\t uint16_t action_dst,\n+\t\t\t\t struct rte_flow_error *error)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tconst struct rte_flow_action_ethdev *v = action->conf;\n+\tconst struct rte_flow_action_ethdev *m = action_mask->conf;\n+\tint ret;\n+\n+\tif (!attr->group)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR, NULL,\n+\t\t\t\t\t  \"represented_port action cannot\"\n+\t\t\t\t\t  \" be used on group 0\");\n+\tif (!attr->transfer)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,\n+\t\t\t\t\t  NULL,\n+\t\t\t\t\t  \"represented_port action requires\"\n+\t\t\t\t\t  \" transfer attribute\");\n+\tif (attr->ingress || attr->egress)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR, NULL,\n+\t\t\t\t\t  \"represented_port action cannot\"\n+\t\t\t\t\t  \" be used with direction attributes\");\n+\tif (!priv->master)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t\t  \"represented_port acton must\"\n+\t\t\t\t\t  \" be used on proxy port\");\n+\tif (m && !!m->port_id) {\n+\t\tstruct mlx5_priv *port_priv;\n+\n+\t\tport_priv = mlx5_port_to_eswitch_info(v->port_id, false);\n+\t\tif (port_priv == NULL)\n+\t\t\treturn rte_flow_error_set\n+\t\t\t\t\t(error, EINVAL,\n+\t\t\t\t\t RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t\t \"port does not exist or unable to\"\n+\t\t\t\t\t \" obtain E-Switch info for port\");\n+\t\tMLX5_ASSERT(priv->hw_vport != NULL);\n+\t\tif (priv->hw_vport[v->port_id]) {\n+\t\t\tacts->rule_acts[action_dst].action =\n+\t\t\t\t\tpriv->hw_vport[v->port_id];\n+\t\t} else {\n+\t\t\treturn rte_flow_error_set\n+\t\t\t\t\t(error, EINVAL,\n+\t\t\t\t\t RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t\t \"cannot use represented_port action\"\n+\t\t\t\t\t \" with this port\");\n+\t\t}\n+\t} else {\n+\t\tret = __flow_hw_act_data_general_append\n+\t\t\t\t(priv, acts, action->type,\n+\t\t\t\t action - action_start, action_dst);\n+\t\tif (ret)\n+\t\t\treturn rte_flow_error_set\n+\t\t\t\t\t(error, ENOMEM,\n+\t\t\t\t\t RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t\t \"not enough memory to store\"\n+\t\t\t\t\t \" vport action\");\n+\t}\n+\treturn 0;\n+}\n+\n /**\n  * Translate rte_flow actions to DR action.\n  *\n@@ -887,7 +969,7 @@ flow_hw_actions_translate(struct rte_eth_dev *dev,\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_DROP:\n \t\t\tacts->rule_acts[i++].action =\n-\t\t\t\tpriv->hw_drop[!!attr->group][type];\n+\t\t\t\tpriv->hw_drop[!!attr->group];\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_MARK:\n \t\t\tacts->mark = true;\n@@ -1020,6 +1102,13 @@ flow_hw_actions_translate(struct rte_eth_dev *dev,\n \t\t\tif (err)\n \t\t\t\tgoto err;\n \t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:\n+\t\t\tif (flow_hw_represented_port_compile\n+\t\t\t\t\t(dev, attr, action_start, actions,\n+\t\t\t\t\t masks, acts, i, error))\n+\t\t\t\tgoto err;\n+\t\t\ti++;\n+\t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_END:\n \t\t\tactions_end = true;\n \t\t\tbreak;\n@@ -1352,11 +1441,13 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \t\t\t  struct mlx5dr_rule_action *rule_acts,\n \t\t\t  uint32_t *acts_num)\n {\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct rte_flow_template_table *table = job->flow->table;\n \tstruct mlx5_action_construct_data *act_data;\n \tconst struct rte_flow_action *action;\n \tconst struct rte_flow_action_raw_encap *raw_encap_data;\n \tconst struct rte_flow_item *enc_item = NULL;\n+\tconst struct rte_flow_action_ethdev *port_action = NULL;\n \tuint8_t *buf = job->encap_data;\n \tstruct rte_flow_attr attr = {\n \t\t\t.ingress = 1,\n@@ -1476,6 +1567,13 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \t\t\tif (ret)\n \t\t\t\treturn -1;\n \t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:\n+\t\t\tport_action = action->conf;\n+\t\t\tif (!priv->hw_vport[port_action->port_id])\n+\t\t\t\treturn -1;\n+\t\t\trule_acts[act_data->action_dst].action =\n+\t\t\t\t\tpriv->hw_vport[port_action->port_id];\n+\t\t\tbreak;\n \t\tdefault:\n \t\t\tbreak;\n \t\t}\n@@ -1488,6 +1586,52 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,\n \treturn 0;\n }\n \n+static const struct rte_flow_item *\n+flow_hw_get_rule_items(struct rte_eth_dev *dev,\n+\t\t       struct rte_flow_template_table *table,\n+\t\t       const struct rte_flow_item items[],\n+\t\t       uint8_t pattern_template_index,\n+\t\t       struct mlx5_hw_q_job *job)\n+{\n+\tif (table->its[pattern_template_index]->implicit_port) {\n+\t\tconst struct rte_flow_item *curr_item;\n+\t\tunsigned int nb_items;\n+\t\tbool found_end;\n+\t\tunsigned int i;\n+\n+\t\t/* Count number of pattern items. */\n+\t\tnb_items = 0;\n+\t\tfound_end = false;\n+\t\tfor (curr_item = items; !found_end; ++curr_item) {\n+\t\t\t++nb_items;\n+\t\t\tif (curr_item->type == RTE_FLOW_ITEM_TYPE_END)\n+\t\t\t\tfound_end = true;\n+\t\t}\n+\t\t/* Prepend represented port item. */\n+\t\tjob->port_spec = (struct rte_flow_item_ethdev){\n+\t\t\t.port_id = dev->data->port_id,\n+\t\t};\n+\t\tjob->items[0] = (struct rte_flow_item){\n+\t\t\t.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,\n+\t\t\t.spec = &job->port_spec,\n+\t\t};\n+\t\tfound_end = false;\n+\t\tfor (i = 1; i < MLX5_HW_MAX_ITEMS && i - 1 < nb_items; ++i) {\n+\t\t\tjob->items[i] = items[i - 1];\n+\t\t\tif (items[i - 1].type == RTE_FLOW_ITEM_TYPE_END) {\n+\t\t\t\tfound_end = true;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t}\n+\t\tif (i >= MLX5_HW_MAX_ITEMS && !found_end) {\n+\t\t\trte_errno = ENOMEM;\n+\t\t\treturn NULL;\n+\t\t}\n+\t\treturn job->items;\n+\t}\n+\treturn items;\n+}\n+\n /**\n  * Enqueue HW steering flow creation.\n  *\n@@ -1539,6 +1683,7 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,\n \tstruct mlx5_hw_actions *hw_acts;\n \tstruct rte_flow_hw *flow;\n \tstruct mlx5_hw_q_job *job;\n+\tconst struct rte_flow_item *rule_items;\n \tuint32_t acts_num, flow_idx;\n \tint ret;\n \n@@ -1565,15 +1710,23 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,\n \tjob->user_data = user_data;\n \trule_attr.user_data = job;\n \thw_acts = &table->ats[action_template_index].acts;\n-\t/* Construct the flow action array based on the input actions.*/\n-\tflow_hw_actions_construct(dev, job, hw_acts, pattern_template_index,\n-\t\t\t\t  actions, rule_acts, &acts_num);\n+\t/* Construct the flow actions based on the input actions.*/\n+\tif (flow_hw_actions_construct(dev, job, hw_acts, pattern_template_index,\n+\t\t\t\t  actions, rule_acts, &acts_num)) {\n+\t\trte_errno = EINVAL;\n+\t\tgoto free;\n+\t}\n+\trule_items = flow_hw_get_rule_items(dev, table, items,\n+\t\t\t\t\t    pattern_template_index, job);\n+\tif (!rule_items)\n+\t\tgoto free;\n \tret = mlx5dr_rule_create(table->matcher,\n \t\t\t\t pattern_template_index, items,\n \t\t\t\t action_template_index, rule_acts,\n \t\t\t\t &rule_attr, &flow->rule);\n \tif (likely(!ret))\n \t\treturn (struct rte_flow *)flow;\n+free:\n \t/* Flow created fail, return the descriptor and flow memory. */\n \tmlx5_ipool_free(table->flow, flow_idx);\n \tpriv->hw_q[queue].job_idx++;\n@@ -1754,7 +1907,9 @@ __flow_hw_pull_comp(struct rte_eth_dev *dev,\n \tstruct rte_flow_op_result comp[BURST_THR];\n \tint ret, i, empty_loop = 0;\n \n-\tflow_hw_push(dev, queue, error);\n+\tret = flow_hw_push(dev, queue, error);\n+\tif (ret < 0)\n+\t\treturn ret;\n \twhile (pending_rules) {\n \t\tret = flow_hw_pull(dev, queue, comp, BURST_THR, error);\n \t\tif (ret < 0)\n@@ -2039,8 +2194,12 @@ flow_hw_table_destroy(struct rte_eth_dev *dev,\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tint i;\n+\tuint32_t fidx = 1;\n \n-\tif (table->refcnt) {\n+\t/* Build ipool allocated object bitmap. */\n+\tmlx5_ipool_flush_cache(table->flow);\n+\t/* Check if ipool has allocated objects. */\n+\tif (table->refcnt || mlx5_ipool_get_next(table->flow, &fidx)) {\n \t\tDRV_LOG(WARNING, \"Table %p is still in using.\", (void *)table);\n \t\treturn rte_flow_error_set(error, EBUSY,\n \t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n@@ -2052,8 +2211,6 @@ flow_hw_table_destroy(struct rte_eth_dev *dev,\n \t\t__atomic_sub_fetch(&table->its[i]->refcnt,\n \t\t\t\t   1, __ATOMIC_RELAXED);\n \tfor (i = 0; i < table->nb_action_templates; i++) {\n-\t\tif (table->ats[i].acts.mark)\n-\t\t\tflow_hw_rxq_flag_set(dev, false);\n \t\t__flow_hw_action_template_destroy(dev, &table->ats[i].acts);\n \t\t__atomic_sub_fetch(&table->ats[i].action_template->refcnt,\n \t\t\t\t   1, __ATOMIC_RELAXED);\n@@ -2119,7 +2276,51 @@ flow_hw_validate_action_modify_field(const struct rte_flow_action *action,\n }\n \n static int\n-flow_hw_action_validate(const struct rte_flow_action actions[],\n+flow_hw_validate_action_represented_port(struct rte_eth_dev *dev,\n+\t\t\t\t\t const struct rte_flow_action *action,\n+\t\t\t\t\t const struct rte_flow_action *mask,\n+\t\t\t\t\t struct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_action_ethdev *action_conf = action->conf;\n+\tconst struct rte_flow_action_ethdev *mask_conf = mask->conf;\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\n+\tif (!priv->sh->config.dv_esw_en)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t\t  \"cannot use represented_port actions\"\n+\t\t\t\t\t  \" without an E-Switch\");\n+\tif (mask_conf->port_id) {\n+\t\tstruct mlx5_priv *port_priv;\n+\t\tstruct mlx5_priv *dev_priv;\n+\n+\t\tport_priv = mlx5_port_to_eswitch_info(action_conf->port_id, false);\n+\t\tif (!port_priv)\n+\t\t\treturn rte_flow_error_set(error, rte_errno,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\t  action,\n+\t\t\t\t\t\t  \"failed to obtain E-Switch\"\n+\t\t\t\t\t\t  \" info for port\");\n+\t\tdev_priv = mlx5_dev_to_eswitch_info(dev);\n+\t\tif (!dev_priv)\n+\t\t\treturn rte_flow_error_set(error, rte_errno,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\t  action,\n+\t\t\t\t\t\t  \"failed to obtain E-Switch\"\n+\t\t\t\t\t\t  \" info for transfer proxy\");\n+\t\tif (port_priv->domain_id != dev_priv->domain_id)\n+\t\t\treturn rte_flow_error_set(error, rte_errno,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\t  action,\n+\t\t\t\t\t\t  \"cannot forward to port from\"\n+\t\t\t\t\t\t  \" a different E-Switch\");\n+\t}\n+\treturn 0;\n+}\n+\n+static int\n+flow_hw_action_validate(struct rte_eth_dev *dev,\n+\t\t\tconst struct rte_flow_action actions[],\n \t\t\tconst struct rte_flow_action masks[],\n \t\t\tstruct rte_flow_error *error)\n {\n@@ -2182,6 +2383,12 @@ flow_hw_action_validate(const struct rte_flow_action actions[],\n \t\t\tif (ret < 0)\n \t\t\t\treturn ret;\n \t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:\n+\t\t\tret = flow_hw_validate_action_represented_port\n+\t\t\t\t\t(dev, action, mask, error);\n+\t\t\tif (ret < 0)\n+\t\t\t\treturn ret;\n+\t\t\tbreak;\n \t\tcase RTE_FLOW_ACTION_TYPE_END:\n \t\t\tactions_end = true;\n \t\t\tbreak;\n@@ -2223,7 +2430,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,\n \tint len, act_len, mask_len, i;\n \tstruct rte_flow_actions_template *at;\n \n-\tif (flow_hw_action_validate(actions, masks, error))\n+\tif (flow_hw_action_validate(dev, actions, masks, error))\n \t\treturn NULL;\n \tact_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS,\n \t\t\t\tNULL, 0, actions, error);\n@@ -2306,6 +2513,46 @@ flow_hw_actions_template_destroy(struct rte_eth_dev *dev __rte_unused,\n \treturn 0;\n }\n \n+static struct rte_flow_item *\n+flow_hw_copy_prepend_port_item(const struct rte_flow_item *items,\n+\t\t\t       struct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_item *curr_item;\n+\tstruct rte_flow_item *copied_items;\n+\tbool found_end;\n+\tunsigned int nb_items;\n+\tunsigned int i;\n+\tsize_t size;\n+\n+\t/* Count number of pattern items. */\n+\tnb_items = 0;\n+\tfound_end = false;\n+\tfor (curr_item = items; !found_end; ++curr_item) {\n+\t\t++nb_items;\n+\t\tif (curr_item->type == RTE_FLOW_ITEM_TYPE_END)\n+\t\t\tfound_end = true;\n+\t}\n+\t/* Allocate new array of items and prepend REPRESENTED_PORT item. */\n+\tsize = sizeof(*copied_items) * (nb_items + 1);\n+\tcopied_items = mlx5_malloc(MLX5_MEM_ZERO, size, 0, rte_socket_id());\n+\tif (!copied_items) {\n+\t\trte_flow_error_set(error, ENOMEM,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t   NULL,\n+\t\t\t\t   \"cannot allocate item template\");\n+\t\treturn NULL;\n+\t}\n+\tcopied_items[0] = (struct rte_flow_item){\n+\t\t.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,\n+\t\t.spec = NULL,\n+\t\t.last = NULL,\n+\t\t.mask = &rte_flow_item_ethdev_mask,\n+\t};\n+\tfor (i = 1; i < nb_items + 1; ++i)\n+\t\tcopied_items[i] = items[i - 1];\n+\treturn copied_items;\n+}\n+\n /**\n  * Create flow item template.\n  *\n@@ -2329,9 +2576,35 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct rte_flow_pattern_template *it;\n+\tstruct rte_flow_item *copied_items = NULL;\n+\tconst struct rte_flow_item *tmpl_items;\n \n+\tif (priv->sh->config.dv_esw_en && attr->ingress) {\n+\t\t/*\n+\t\t * Disallow pattern template with ingress and egress/transfer\n+\t\t * attributes in order to forbid implicit port matching\n+\t\t * on egress and transfer traffic.\n+\t\t */\n+\t\tif (attr->egress || attr->transfer) {\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t   NULL,\n+\t\t\t\t\t   \"item template for ingress traffic\"\n+\t\t\t\t\t   \" cannot be used for egress/transfer\"\n+\t\t\t\t\t   \" traffic when E-Switch is enabled\");\n+\t\t\treturn NULL;\n+\t\t}\n+\t\tcopied_items = flow_hw_copy_prepend_port_item(items, error);\n+\t\tif (!copied_items)\n+\t\t\treturn NULL;\n+\t\ttmpl_items = copied_items;\n+\t} else {\n+\t\ttmpl_items = items;\n+\t}\n \tit = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*it), 0, rte_socket_id());\n \tif (!it) {\n+\t\tif (copied_items)\n+\t\t\tmlx5_free(copied_items);\n \t\trte_flow_error_set(error, ENOMEM,\n \t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n \t\t\t\t   NULL,\n@@ -2339,8 +2612,10 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,\n \t\treturn NULL;\n \t}\n \tit->attr = *attr;\n-\tit->mt = mlx5dr_match_template_create(items, attr->relaxed_matching);\n+\tit->mt = mlx5dr_match_template_create(tmpl_items, attr->relaxed_matching);\n \tif (!it->mt) {\n+\t\tif (copied_items)\n+\t\t\tmlx5_free(copied_items);\n \t\tmlx5_free(it);\n \t\trte_flow_error_set(error, rte_errno,\n \t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n@@ -2348,9 +2623,12 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,\n \t\t\t\t   \"cannot create match template\");\n \t\treturn NULL;\n \t}\n-\tit->item_flags = flow_hw_rss_item_flags_get(items);\n+\tit->item_flags = flow_hw_rss_item_flags_get(tmpl_items);\n+\tit->implicit_port = !!copied_items;\n \t__atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);\n \tLIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);\n+\tif (copied_items)\n+\t\tmlx5_free(copied_items);\n \treturn it;\n }\n \n@@ -2476,6 +2754,7 @@ flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx)\n \t\t\tgoto error;\n \t\tgrp_data->jump.root_action = jump;\n \t}\n+\tgrp_data->dev = dev;\n \tgrp_data->idx = idx;\n \tgrp_data->group_id = attr->group;\n \tgrp_data->type = dr_tbl_attr.type;\n@@ -2544,7 +2823,8 @@ flow_hw_grp_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,\n \tstruct rte_flow_attr *attr =\n \t\t\t(struct rte_flow_attr *)ctx->data;\n \n-\treturn (grp_data->group_id != attr->group) ||\n+\treturn (grp_data->dev != ctx->dev) ||\n+\t\t(grp_data->group_id != attr->group) ||\n \t\t((grp_data->type != MLX5DR_TABLE_TYPE_FDB) &&\n \t\tattr->transfer) ||\n \t\t((grp_data->type != MLX5DR_TABLE_TYPE_NIC_TX) &&\n@@ -2607,6 +2887,545 @@ flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)\n \tmlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);\n }\n \n+/**\n+ * Create and cache a vport action for given @p dev port. vport actions\n+ * cache is used in HWS with FDB flows.\n+ *\n+ * This function does not create any function if proxy port for @p dev port\n+ * was not configured for HW Steering.\n+ *\n+ * This function assumes that E-Switch is enabled and PMD is running with\n+ * HW Steering configured.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device which will be the action destination.\n+ *\n+ * @return\n+ *   0 on success, positive value otherwise.\n+ */\n+int\n+flow_hw_create_vport_action(struct rte_eth_dev *dev)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct rte_eth_dev *proxy_dev;\n+\tstruct mlx5_priv *proxy_priv;\n+\tuint16_t port_id = dev->data->port_id;\n+\tuint16_t proxy_port_id = port_id;\n+\tint ret;\n+\n+\tret = mlx5_flow_pick_transfer_proxy(dev, &proxy_port_id, NULL);\n+\tif (ret)\n+\t\treturn ret;\n+\tproxy_dev = &rte_eth_devices[proxy_port_id];\n+\tproxy_priv = proxy_dev->data->dev_private;\n+\tif (!proxy_priv->hw_vport)\n+\t\treturn 0;\n+\tif (proxy_priv->hw_vport[port_id]) {\n+\t\tDRV_LOG(ERR, \"port %u HWS vport action already created\",\n+\t\t\tport_id);\n+\t\treturn -EINVAL;\n+\t}\n+\tproxy_priv->hw_vport[port_id] = mlx5dr_action_create_dest_vport\n+\t\t\t(proxy_priv->dr_ctx, priv->dev_port,\n+\t\t\t MLX5DR_ACTION_FLAG_HWS_FDB);\n+\tif (!proxy_priv->hw_vport[port_id]) {\n+\t\tDRV_LOG(ERR, \"port %u unable to create HWS vport action\",\n+\t\t\tport_id);\n+\t\treturn -EINVAL;\n+\t}\n+\treturn 0;\n+}\n+\n+/**\n+ * Destroys the vport action associated with @p dev device\n+ * from actions' cache.\n+ *\n+ * This function does not destroy any action if there is no action cached\n+ * for @p dev or proxy port was not configured for HW Steering.\n+ *\n+ * This function assumes that E-Switch is enabled and PMD is running with\n+ * HW Steering configured.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device which will be the action destination.\n+ */\n+void\n+flow_hw_destroy_vport_action(struct rte_eth_dev *dev)\n+{\n+\tstruct rte_eth_dev *proxy_dev;\n+\tstruct mlx5_priv *proxy_priv;\n+\tuint16_t port_id = dev->data->port_id;\n+\tuint16_t proxy_port_id = port_id;\n+\n+\tif (mlx5_flow_pick_transfer_proxy(dev, &proxy_port_id, NULL))\n+\t\treturn;\n+\tproxy_dev = &rte_eth_devices[proxy_port_id];\n+\tproxy_priv = proxy_dev->data->dev_private;\n+\tif (!proxy_priv->hw_vport || !proxy_priv->hw_vport[port_id])\n+\t\treturn;\n+\tmlx5dr_action_destroy(proxy_priv->hw_vport[port_id]);\n+\tproxy_priv->hw_vport[port_id] = NULL;\n+}\n+\n+static int\n+flow_hw_create_vport_actions(struct mlx5_priv *priv)\n+{\n+\tuint16_t port_id;\n+\n+\tMLX5_ASSERT(!priv->hw_vport);\n+\tpriv->hw_vport = mlx5_malloc(MLX5_MEM_ZERO,\n+\t\t\t\t     sizeof(*priv->hw_vport) * RTE_MAX_ETHPORTS,\n+\t\t\t\t     0, SOCKET_ID_ANY);\n+\tif (!priv->hw_vport)\n+\t\treturn -ENOMEM;\n+\tDRV_LOG(DEBUG, \"port %u :: creating vport actions\", priv->dev_data->port_id);\n+\tDRV_LOG(DEBUG, \"port %u ::    domain_id=%u\", priv->dev_data->port_id, priv->domain_id);\n+\tMLX5_ETH_FOREACH_DEV(port_id, NULL) {\n+\t\tstruct mlx5_priv *port_priv = rte_eth_devices[port_id].data->dev_private;\n+\n+\t\tif (!port_priv ||\n+\t\t    port_priv->domain_id != priv->domain_id)\n+\t\t\tcontinue;\n+\t\tDRV_LOG(DEBUG, \"port %u :: for port_id=%u, calling mlx5dr_action_create_dest_vport() with ibport=%u\",\n+\t\t\tpriv->dev_data->port_id, port_id, port_priv->dev_port);\n+\t\tpriv->hw_vport[port_id] = mlx5dr_action_create_dest_vport\n+\t\t\t\t(priv->dr_ctx, port_priv->dev_port,\n+\t\t\t\t MLX5DR_ACTION_FLAG_HWS_FDB);\n+\t\tDRV_LOG(DEBUG, \"port %u :: priv->hw_vport[%u]=%p\",\n+\t\t\tpriv->dev_data->port_id, port_id, (void *)priv->hw_vport[port_id]);\n+\t\tif (!priv->hw_vport[port_id])\n+\t\t\treturn -EINVAL;\n+\t}\n+\treturn 0;\n+}\n+\n+static void\n+flow_hw_free_vport_actions(struct mlx5_priv *priv)\n+{\n+\tuint16_t port_id;\n+\n+\tif (!priv->hw_vport)\n+\t\treturn;\n+\tfor (port_id = 0; port_id < RTE_MAX_ETHPORTS; ++port_id)\n+\t\tif (priv->hw_vport[port_id])\n+\t\t\tmlx5dr_action_destroy(priv->hw_vport[port_id]);\n+\tmlx5_free(priv->hw_vport);\n+\tpriv->hw_vport = NULL;\n+}\n+\n+/**\n+ * Creates a flow pattern template used to match on E-Switch Manager.\n+ * This template is used to set up a table for SQ miss default flow.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ *\n+ * @return\n+ *   Pointer to flow pattern template on success, NULL otherwise.\n+ */\n+static struct rte_flow_pattern_template *\n+flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev)\n+{\n+\tstruct rte_flow_pattern_template_attr attr = {\n+\t\t.relaxed_matching = 0,\n+\t\t.transfer = 1,\n+\t};\n+\tstruct rte_flow_item_ethdev port_spec = {\n+\t\t.port_id = MLX5_REPRESENTED_PORT_ESW_MGR,\n+\t};\n+\tstruct rte_flow_item_ethdev port_mask = {\n+\t\t.port_id = UINT16_MAX,\n+\t};\n+\tstruct rte_flow_item items[] = {\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,\n+\t\t\t.spec = &port_spec,\n+\t\t\t.mask = &port_mask,\n+\t\t},\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ITEM_TYPE_END,\n+\t\t},\n+\t};\n+\n+\treturn flow_hw_pattern_template_create(dev, &attr, items, NULL);\n+}\n+\n+/**\n+ * Creates a flow pattern template used to match on a TX queue.\n+ * This template is used to set up a table for SQ miss default flow.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ *\n+ * @return\n+ *   Pointer to flow pattern template on success, NULL otherwise.\n+ */\n+static struct rte_flow_pattern_template *\n+flow_hw_create_ctrl_sq_pattern_template(struct rte_eth_dev *dev)\n+{\n+\tstruct rte_flow_pattern_template_attr attr = {\n+\t\t.relaxed_matching = 0,\n+\t\t.transfer = 1,\n+\t};\n+\tstruct mlx5_rte_flow_item_tx_queue queue_mask = {\n+\t\t.queue = UINT32_MAX,\n+\t};\n+\tstruct rte_flow_item items[] = {\n+\t\t{\n+\t\t\t.type = (enum rte_flow_item_type)\n+\t\t\t\tMLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,\n+\t\t\t.mask = &queue_mask,\n+\t\t},\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ITEM_TYPE_END,\n+\t\t},\n+\t};\n+\n+\treturn flow_hw_pattern_template_create(dev, &attr, items, NULL);\n+}\n+\n+/**\n+ * Creates a flow pattern template with unmasked represented port matching.\n+ * This template is used to set up a table for default transfer flows\n+ * directing packets to group 1.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ *\n+ * @return\n+ *   Pointer to flow pattern template on success, NULL otherwise.\n+ */\n+static struct rte_flow_pattern_template *\n+flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev)\n+{\n+\tstruct rte_flow_pattern_template_attr attr = {\n+\t\t.relaxed_matching = 0,\n+\t\t.transfer = 1,\n+\t};\n+\tstruct rte_flow_item_ethdev port_mask = {\n+\t\t.port_id = UINT16_MAX,\n+\t};\n+\tstruct rte_flow_item items[] = {\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,\n+\t\t\t.mask = &port_mask,\n+\t\t},\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ITEM_TYPE_END,\n+\t\t},\n+\t};\n+\n+\treturn flow_hw_pattern_template_create(dev, &attr, items, NULL);\n+}\n+\n+/**\n+ * Creates a flow actions template with an unmasked JUMP action. Flows\n+ * based on this template will perform a jump to some group. This template\n+ * is used to set up tables for control flows.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ * @param group\n+ *   Destination group for this action template.\n+ *\n+ * @return\n+ *   Pointer to flow actions template on success, NULL otherwise.\n+ */\n+static struct rte_flow_actions_template *\n+flow_hw_create_ctrl_jump_actions_template(struct rte_eth_dev *dev,\n+\t\t\t\t\t  uint32_t group)\n+{\n+\tstruct rte_flow_actions_template_attr attr = {\n+\t\t.transfer = 1,\n+\t};\n+\tstruct rte_flow_action_jump jump_v = {\n+\t\t.group = group,\n+\t};\n+\tstruct rte_flow_action_jump jump_m = {\n+\t\t.group = UINT32_MAX,\n+\t};\n+\tstruct rte_flow_action actions_v[] = {\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_JUMP,\n+\t\t\t.conf = &jump_v,\n+\t\t},\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_END,\n+\t\t}\n+\t};\n+\tstruct rte_flow_action actions_m[] = {\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_JUMP,\n+\t\t\t.conf = &jump_m,\n+\t\t},\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_END,\n+\t\t}\n+\t};\n+\n+\treturn flow_hw_actions_template_create(dev, &attr, actions_v, actions_m,\n+\t\t\t\t\t       NULL);\n+}\n+\n+/**\n+ * Creates a flow action template with a unmasked REPRESENTED_PORT action.\n+ * It is used to create control flow tables.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ *\n+ * @return\n+ *   Pointer to flow action template on success, NULL otherwise.\n+ */\n+static struct rte_flow_actions_template *\n+flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev)\n+{\n+\tstruct rte_flow_actions_template_attr attr = {\n+\t\t.transfer = 1,\n+\t};\n+\tstruct rte_flow_action_ethdev port_v = {\n+\t\t.port_id = 0,\n+\t};\n+\tstruct rte_flow_action actions_v[] = {\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,\n+\t\t\t.conf = &port_v,\n+\t\t},\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_END,\n+\t\t}\n+\t};\n+\tstruct rte_flow_action_ethdev port_m = {\n+\t\t.port_id = 0,\n+\t};\n+\tstruct rte_flow_action actions_m[] = {\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,\n+\t\t\t.conf = &port_m,\n+\t\t},\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_END,\n+\t\t}\n+\t};\n+\n+\treturn flow_hw_actions_template_create(dev, &attr, actions_v, actions_m,\n+\t\t\t\t\t       NULL);\n+}\n+\n+/**\n+ * Creates a control flow table used to transfer traffic from E-Switch Manager\n+ * and TX queues from group 0 to group 1.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ * @param it\n+ *   Pointer to flow pattern template.\n+ * @param at\n+ *   Pointer to flow actions template.\n+ *\n+ * @return\n+ *   Pointer to flow table on success, NULL otherwise.\n+ */\n+static struct rte_flow_template_table*\n+flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev,\n+\t\t\t\t       struct rte_flow_pattern_template *it,\n+\t\t\t\t       struct rte_flow_actions_template *at)\n+{\n+\tstruct rte_flow_template_table_attr attr = {\n+\t\t.flow_attr = {\n+\t\t\t.group = 0,\n+\t\t\t.priority = 0,\n+\t\t\t.ingress = 0,\n+\t\t\t.egress = 0,\n+\t\t\t.transfer = 1,\n+\t\t},\n+\t\t.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,\n+\t};\n+\n+\treturn flow_hw_table_create(dev, &attr, &it, 1, &at, 1, NULL);\n+}\n+\n+\n+/**\n+ * Creates a control flow table used to transfer traffic from E-Switch Manager\n+ * and TX queues from group 0 to group 1.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ * @param it\n+ *   Pointer to flow pattern template.\n+ * @param at\n+ *   Pointer to flow actions template.\n+ *\n+ * @return\n+ *   Pointer to flow table on success, NULL otherwise.\n+ */\n+static struct rte_flow_template_table*\n+flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev,\n+\t\t\t\t  struct rte_flow_pattern_template *it,\n+\t\t\t\t  struct rte_flow_actions_template *at)\n+{\n+\tstruct rte_flow_template_table_attr attr = {\n+\t\t.flow_attr = {\n+\t\t\t.group = MLX5_HW_SQ_MISS_GROUP,\n+\t\t\t.priority = 0,\n+\t\t\t.ingress = 0,\n+\t\t\t.egress = 0,\n+\t\t\t.transfer = 1,\n+\t\t},\n+\t\t.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,\n+\t};\n+\n+\treturn flow_hw_table_create(dev, &attr, &it, 1, &at, 1, NULL);\n+}\n+\n+/**\n+ * Creates a control flow table used to transfer traffic\n+ * from group 0 to group 1.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ * @param it\n+ *   Pointer to flow pattern template.\n+ * @param at\n+ *   Pointer to flow actions template.\n+ *\n+ * @return\n+ *   Pointer to flow table on success, NULL otherwise.\n+ */\n+static struct rte_flow_template_table *\n+flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev,\n+\t\t\t       struct rte_flow_pattern_template *it,\n+\t\t\t       struct rte_flow_actions_template *at)\n+{\n+\tstruct rte_flow_template_table_attr attr = {\n+\t\t.flow_attr = {\n+\t\t\t.group = 0,\n+\t\t\t.priority = 15, /* TODO: Flow priority discovery. */\n+\t\t\t.ingress = 0,\n+\t\t\t.egress = 0,\n+\t\t\t.transfer = 1,\n+\t\t},\n+\t\t.nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,\n+\t};\n+\n+\treturn flow_hw_table_create(dev, &attr, &it, 1, &at, 1, NULL);\n+}\n+\n+/**\n+ * Creates a set of flow tables used to create control flows used\n+ * when E-Switch is engaged.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ *\n+ * @return\n+ *   0 on success, EINVAL otherwise\n+ */\n+static __rte_unused int\n+flow_hw_create_ctrl_tables(struct rte_eth_dev *dev)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct rte_flow_pattern_template *esw_mgr_items_tmpl = NULL;\n+\tstruct rte_flow_pattern_template *sq_items_tmpl = NULL;\n+\tstruct rte_flow_pattern_template *port_items_tmpl = NULL;\n+\tstruct rte_flow_actions_template *jump_sq_actions_tmpl = NULL;\n+\tstruct rte_flow_actions_template *port_actions_tmpl = NULL;\n+\tstruct rte_flow_actions_template *jump_one_actions_tmpl = NULL;\n+\n+\t/* Item templates */\n+\tesw_mgr_items_tmpl = flow_hw_create_ctrl_esw_mgr_pattern_template(dev);\n+\tif (!esw_mgr_items_tmpl) {\n+\t\tDRV_LOG(ERR, \"port %u failed to create E-Switch Manager item\"\n+\t\t\t\" template for control flows\", dev->data->port_id);\n+\t\tgoto error;\n+\t}\n+\tsq_items_tmpl = flow_hw_create_ctrl_sq_pattern_template(dev);\n+\tif (!sq_items_tmpl) {\n+\t\tDRV_LOG(ERR, \"port %u failed to create SQ item template for\"\n+\t\t\t\" control flows\", dev->data->port_id);\n+\t\tgoto error;\n+\t}\n+\tport_items_tmpl = flow_hw_create_ctrl_port_pattern_template(dev);\n+\tif (!port_items_tmpl) {\n+\t\tDRV_LOG(ERR, \"port %u failed to create SQ item template for\"\n+\t\t\t\" control flows\", dev->data->port_id);\n+\t\tgoto error;\n+\t}\n+\t/* Action templates */\n+\tjump_sq_actions_tmpl = flow_hw_create_ctrl_jump_actions_template(dev,\n+\t\t\t\t\t\t\t\t\t MLX5_HW_SQ_MISS_GROUP);\n+\tif (!jump_sq_actions_tmpl) {\n+\t\tDRV_LOG(ERR, \"port %u failed to create jump action template\"\n+\t\t\t\" for control flows\", dev->data->port_id);\n+\t\tgoto error;\n+\t}\n+\tport_actions_tmpl = flow_hw_create_ctrl_port_actions_template(dev);\n+\tif (!port_actions_tmpl) {\n+\t\tDRV_LOG(ERR, \"port %u failed to create port action template\"\n+\t\t\t\" for control flows\", dev->data->port_id);\n+\t\tgoto error;\n+\t}\n+\tjump_one_actions_tmpl = flow_hw_create_ctrl_jump_actions_template(dev, 1);\n+\tif (!jump_one_actions_tmpl) {\n+\t\tDRV_LOG(ERR, \"port %u failed to create jump action template\"\n+\t\t\t\" for control flows\", dev->data->port_id);\n+\t\tgoto error;\n+\t}\n+\t/* Tables */\n+\tMLX5_ASSERT(priv->hw_esw_sq_miss_root_tbl == NULL);\n+\tpriv->hw_esw_sq_miss_root_tbl = flow_hw_create_ctrl_sq_miss_root_table\n+\t\t\t(dev, esw_mgr_items_tmpl, jump_sq_actions_tmpl);\n+\tif (!priv->hw_esw_sq_miss_root_tbl) {\n+\t\tDRV_LOG(ERR, \"port %u failed to create table for default sq miss (root table)\"\n+\t\t\t\" for control flows\", dev->data->port_id);\n+\t\tgoto error;\n+\t}\n+\tMLX5_ASSERT(priv->hw_esw_sq_miss_tbl == NULL);\n+\tpriv->hw_esw_sq_miss_tbl = flow_hw_create_ctrl_sq_miss_table(dev, sq_items_tmpl,\n+\t\t\t\t\t\t\t\t     port_actions_tmpl);\n+\tif (!priv->hw_esw_sq_miss_tbl) {\n+\t\tDRV_LOG(ERR, \"port %u failed to create table for default sq miss (non-root table)\"\n+\t\t\t\" for control flows\", dev->data->port_id);\n+\t\tgoto error;\n+\t}\n+\tMLX5_ASSERT(priv->hw_esw_zero_tbl == NULL);\n+\tpriv->hw_esw_zero_tbl = flow_hw_create_ctrl_jump_table(dev, port_items_tmpl,\n+\t\t\t\t\t\t\t       jump_one_actions_tmpl);\n+\tif (!priv->hw_esw_zero_tbl) {\n+\t\tDRV_LOG(ERR, \"port %u failed to create table for default jump to group 1\"\n+\t\t\t\" for control flows\", dev->data->port_id);\n+\t\tgoto error;\n+\t}\n+\treturn 0;\n+error:\n+\tif (priv->hw_esw_zero_tbl) {\n+\t\tflow_hw_table_destroy(dev, priv->hw_esw_zero_tbl, NULL);\n+\t\tpriv->hw_esw_zero_tbl = NULL;\n+\t}\n+\tif (priv->hw_esw_sq_miss_tbl) {\n+\t\tflow_hw_table_destroy(dev, priv->hw_esw_sq_miss_tbl, NULL);\n+\t\tpriv->hw_esw_sq_miss_tbl = NULL;\n+\t}\n+\tif (priv->hw_esw_sq_miss_root_tbl) {\n+\t\tflow_hw_table_destroy(dev, priv->hw_esw_sq_miss_root_tbl, NULL);\n+\t\tpriv->hw_esw_sq_miss_root_tbl = NULL;\n+\t}\n+\tif (jump_one_actions_tmpl)\n+\t\tflow_hw_actions_template_destroy(dev, jump_one_actions_tmpl, NULL);\n+\tif (port_actions_tmpl)\n+\t\tflow_hw_actions_template_destroy(dev, port_actions_tmpl, NULL);\n+\tif (jump_sq_actions_tmpl)\n+\t\tflow_hw_actions_template_destroy(dev, jump_sq_actions_tmpl, NULL);\n+\tif (port_items_tmpl)\n+\t\tflow_hw_pattern_template_destroy(dev, port_items_tmpl, NULL);\n+\tif (sq_items_tmpl)\n+\t\tflow_hw_pattern_template_destroy(dev, sq_items_tmpl, NULL);\n+\tif (esw_mgr_items_tmpl)\n+\t\tflow_hw_pattern_template_destroy(dev, esw_mgr_items_tmpl, NULL);\n+\treturn -EINVAL;\n+}\n+\n /**\n  * Configure port HWS resources.\n  *\n@@ -2624,7 +3443,6 @@ flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)\n  * @return\n  *   0 on success, a negative errno value otherwise and rte_errno is set.\n  */\n-\n static int\n flow_hw_configure(struct rte_eth_dev *dev,\n \t\t  const struct rte_flow_port_attr *port_attr,\n@@ -2647,6 +3465,14 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \t\t.free = mlx5_free,\n \t\t.type = \"mlx5_hw_action_construct_data\",\n \t};\n+\t/* Adds one queue to be used by PMD.\n+\t * The last queue will be used by the PMD.\n+\t */\n+\tuint16_t nb_q_updated;\n+\tstruct rte_flow_queue_attr **_queue_attr = NULL;\n+\tstruct rte_flow_queue_attr ctrl_queue_attr = {0};\n+\tbool is_proxy = !!(priv->sh->config.dv_esw_en && priv->master);\n+\tint ret;\n \n \tif (!port_attr || !nb_queue || !queue_attr) {\n \t\trte_errno = EINVAL;\n@@ -2655,7 +3481,7 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \t/* In case re-configuring, release existing context at first. */\n \tif (priv->dr_ctx) {\n \t\t/* */\n-\t\tfor (i = 0; i < nb_queue; i++) {\n+\t\tfor (i = 0; i < priv->nb_queue; i++) {\n \t\t\thw_q = &priv->hw_q[i];\n \t\t\t/* Make sure all queues are empty. */\n \t\t\tif (hw_q->size != hw_q->job_idx) {\n@@ -2665,26 +3491,42 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \t\t}\n \t\tflow_hw_resource_release(dev);\n \t}\n+\tctrl_queue_attr.size = queue_attr[0]->size;\n+\tnb_q_updated = nb_queue + 1;\n+\t_queue_attr = mlx5_malloc(MLX5_MEM_ZERO,\n+\t\t\t\t  nb_q_updated *\n+\t\t\t\t  sizeof(struct rte_flow_queue_attr *),\n+\t\t\t\t  64, SOCKET_ID_ANY);\n+\tif (!_queue_attr) {\n+\t\trte_errno = ENOMEM;\n+\t\tgoto err;\n+\t}\n+\n+\tmemcpy(_queue_attr, queue_attr,\n+\t       sizeof(void *) * nb_queue);\n+\t_queue_attr[nb_queue] = &ctrl_queue_attr;\n \tpriv->acts_ipool = mlx5_ipool_create(&cfg);\n \tif (!priv->acts_ipool)\n \t\tgoto err;\n \t/* Allocate the queue job descriptor LIFO. */\n-\tmem_size = sizeof(priv->hw_q[0]) * nb_queue;\n-\tfor (i = 0; i < nb_queue; i++) {\n+\tmem_size = sizeof(priv->hw_q[0]) * nb_q_updated;\n+\tfor (i = 0; i < nb_q_updated; i++) {\n \t\t/*\n \t\t * Check if the queues' size are all the same as the\n \t\t * limitation from HWS layer.\n \t\t */\n-\t\tif (queue_attr[i]->size != queue_attr[0]->size) {\n+\t\tif (_queue_attr[i]->size != _queue_attr[0]->size) {\n \t\t\trte_errno = EINVAL;\n \t\t\tgoto err;\n \t\t}\n \t\tmem_size += (sizeof(struct mlx5_hw_q_job *) +\n+\t\t\t    sizeof(struct mlx5_hw_q_job) +\n \t\t\t    sizeof(uint8_t) * MLX5_ENCAP_MAX_LEN +\n \t\t\t    sizeof(struct mlx5_modification_cmd) *\n \t\t\t    MLX5_MHDR_MAX_CMD +\n-\t\t\t    sizeof(struct mlx5_hw_q_job)) *\n-\t\t\t    queue_attr[0]->size;\n+\t\t\t    sizeof(struct rte_flow_item) *\n+\t\t\t    MLX5_HW_MAX_ITEMS) *\n+\t\t\t    _queue_attr[i]->size;\n \t}\n \tpriv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,\n \t\t\t\t 64, SOCKET_ID_ANY);\n@@ -2692,58 +3534,82 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \t\trte_errno = ENOMEM;\n \t\tgoto err;\n \t}\n-\tfor (i = 0; i < nb_queue; i++) {\n+\tfor (i = 0; i < nb_q_updated; i++) {\n \t\tuint8_t *encap = NULL;\n \t\tstruct mlx5_modification_cmd *mhdr_cmd = NULL;\n+\t\tstruct rte_flow_item *items = NULL;\n \n-\t\tpriv->hw_q[i].job_idx = queue_attr[i]->size;\n-\t\tpriv->hw_q[i].size = queue_attr[i]->size;\n+\t\tpriv->hw_q[i].job_idx = _queue_attr[i]->size;\n+\t\tpriv->hw_q[i].size = _queue_attr[i]->size;\n \t\tif (i == 0)\n \t\t\tpriv->hw_q[i].job = (struct mlx5_hw_q_job **)\n-\t\t\t\t\t    &priv->hw_q[nb_queue];\n+\t\t\t\t\t    &priv->hw_q[nb_q_updated];\n \t\telse\n \t\t\tpriv->hw_q[i].job = (struct mlx5_hw_q_job **)\n-\t\t\t\t\t    &job[queue_attr[i - 1]->size];\n+\t\t\t\t&job[_queue_attr[i - 1]->size - 1].items\n+\t\t\t\t [MLX5_HW_MAX_ITEMS];\n \t\tjob = (struct mlx5_hw_q_job *)\n-\t\t      &priv->hw_q[i].job[queue_attr[i]->size];\n-\t\tmhdr_cmd = (struct mlx5_modification_cmd *)&job[queue_attr[i]->size];\n-\t\tencap = (uint8_t *)&mhdr_cmd[queue_attr[i]->size * MLX5_MHDR_MAX_CMD];\n-\t\tfor (j = 0; j < queue_attr[i]->size; j++) {\n+\t\t      &priv->hw_q[i].job[_queue_attr[i]->size];\n+\t\tmhdr_cmd = (struct mlx5_modification_cmd *)\n+\t\t\t   &job[_queue_attr[i]->size];\n+\t\tencap = (uint8_t *)\n+\t\t\t &mhdr_cmd[_queue_attr[i]->size * MLX5_MHDR_MAX_CMD];\n+\t\titems = (struct rte_flow_item *)\n+\t\t\t &encap[_queue_attr[i]->size * MLX5_ENCAP_MAX_LEN];\n+\t\tfor (j = 0; j < _queue_attr[i]->size; j++) {\n \t\t\tjob[j].mhdr_cmd = &mhdr_cmd[j * MLX5_MHDR_MAX_CMD];\n \t\t\tjob[j].encap_data = &encap[j * MLX5_ENCAP_MAX_LEN];\n+\t\t\tjob[j].items = &items[j * MLX5_HW_MAX_ITEMS];\n \t\t\tpriv->hw_q[i].job[j] = &job[j];\n \t\t}\n \t}\n \tdr_ctx_attr.pd = priv->sh->cdev->pd;\n-\tdr_ctx_attr.queues = nb_queue;\n+\tdr_ctx_attr.queues = nb_q_updated;\n \t/* Queue size should all be the same. Take the first one. */\n-\tdr_ctx_attr.queue_size = queue_attr[0]->size;\n+\tdr_ctx_attr.queue_size = _queue_attr[0]->size;\n \tdr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);\n \t/* rte_errno has been updated by HWS layer. */\n \tif (!dr_ctx)\n \t\tgoto err;\n \tpriv->dr_ctx = dr_ctx;\n-\tpriv->nb_queue = nb_queue;\n+\tpriv->nb_queue = nb_q_updated;\n+\trte_spinlock_init(&priv->hw_ctrl_lock);\n+\tLIST_INIT(&priv->hw_ctrl_flows);\n \t/* Add global actions. */\n \tfor (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {\n-\t\tfor (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {\n-\t\t\tpriv->hw_drop[i][j] = mlx5dr_action_create_dest_drop\n-\t\t\t\t(priv->dr_ctx, mlx5_hw_act_flag[i][j]);\n-\t\t\tif (!priv->hw_drop[i][j])\n-\t\t\t\tgoto err;\n-\t\t}\n+\t\tuint32_t act_flags = 0;\n+\n+\t\tact_flags = mlx5_hw_act_flag[i][0] | mlx5_hw_act_flag[i][1];\n+\t\tif (is_proxy)\n+\t\t\tact_flags |= mlx5_hw_act_flag[i][2];\n+\t\tpriv->hw_drop[i] = mlx5dr_action_create_dest_drop(priv->dr_ctx, act_flags);\n+\t\tif (!priv->hw_drop[i])\n+\t\t\tgoto err;\n \t\tpriv->hw_tag[i] = mlx5dr_action_create_tag\n \t\t\t(priv->dr_ctx, mlx5_hw_act_flag[i][0]);\n \t\tif (!priv->hw_tag[i])\n \t\t\tgoto err;\n \t}\n+\tif (is_proxy) {\n+\t\tret = flow_hw_create_vport_actions(priv);\n+\t\tif (ret) {\n+\t\t\trte_errno = -ret;\n+\t\t\tgoto err;\n+\t\t}\n+\t\tret = flow_hw_create_ctrl_tables(dev);\n+\t\tif (ret) {\n+\t\t\trte_errno = -ret;\n+\t\t\tgoto err;\n+\t\t}\n+\t}\n+\tif (_queue_attr)\n+\t\tmlx5_free(_queue_attr);\n \treturn 0;\n err:\n+\tflow_hw_free_vport_actions(priv);\n \tfor (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {\n-\t\tfor (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {\n-\t\t\tif (priv->hw_drop[i][j])\n-\t\t\t\tmlx5dr_action_destroy(priv->hw_drop[i][j]);\n-\t\t}\n+\t\tif (priv->hw_drop[i])\n+\t\t\tmlx5dr_action_destroy(priv->hw_drop[i]);\n \t\tif (priv->hw_tag[i])\n \t\t\tmlx5dr_action_destroy(priv->hw_tag[i]);\n \t}\n@@ -2755,6 +3621,8 @@ flow_hw_configure(struct rte_eth_dev *dev,\n \t\tmlx5_ipool_destroy(priv->acts_ipool);\n \t\tpriv->acts_ipool = NULL;\n \t}\n+\tif (_queue_attr)\n+\t\tmlx5_free(_queue_attr);\n \treturn rte_flow_error_set(error, rte_errno,\n \t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n \t\t\t\t  \"fail to configure port\");\n@@ -2773,10 +3641,12 @@ flow_hw_resource_release(struct rte_eth_dev *dev)\n \tstruct rte_flow_template_table *tbl;\n \tstruct rte_flow_pattern_template *it;\n \tstruct rte_flow_actions_template *at;\n-\tint i, j;\n+\tint i;\n \n \tif (!priv->dr_ctx)\n \t\treturn;\n+\tflow_hw_rxq_flag_set(dev, false);\n+\tflow_hw_flush_all_ctrl_flows(dev);\n \twhile (!LIST_EMPTY(&priv->flow_hw_tbl)) {\n \t\ttbl = LIST_FIRST(&priv->flow_hw_tbl);\n \t\tflow_hw_table_destroy(dev, tbl, NULL);\n@@ -2790,13 +3660,12 @@ flow_hw_resource_release(struct rte_eth_dev *dev)\n \t\tflow_hw_actions_template_destroy(dev, at, NULL);\n \t}\n \tfor (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {\n-\t\tfor (j = 0; j < MLX5DR_TABLE_TYPE_MAX; j++) {\n-\t\t\tif (priv->hw_drop[i][j])\n-\t\t\t\tmlx5dr_action_destroy(priv->hw_drop[i][j]);\n-\t\t}\n+\t\tif (priv->hw_drop[i])\n+\t\t\tmlx5dr_action_destroy(priv->hw_drop[i]);\n \t\tif (priv->hw_tag[i])\n \t\t\tmlx5dr_action_destroy(priv->hw_tag[i]);\n \t}\n+\tflow_hw_free_vport_actions(priv);\n \tif (priv->acts_ipool) {\n \t\tmlx5_ipool_destroy(priv->acts_ipool);\n \t\tpriv->acts_ipool = NULL;\n@@ -3039,4 +3908,397 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {\n \t.action_query = flow_dv_action_query,\n };\n \n+static uint32_t\n+flow_hw_get_ctrl_queue(struct mlx5_priv *priv)\n+{\n+\tMLX5_ASSERT(priv->nb_queue > 0);\n+\treturn priv->nb_queue - 1;\n+}\n+\n+/**\n+ * Creates a control flow using flow template API on @p proxy_dev device,\n+ * on behalf of @p owner_dev device.\n+ *\n+ * This function uses locks internally to synchronize access to the\n+ * flow queue.\n+ *\n+ * Created flow is stored in private list associated with @p proxy_dev device.\n+ *\n+ * @param owner_dev\n+ *   Pointer to Ethernet device on behalf of which flow is created.\n+ * @param proxy_dev\n+ *   Pointer to Ethernet device on which flow is created.\n+ * @param table\n+ *   Pointer to flow table.\n+ * @param items\n+ *   Pointer to flow rule items.\n+ * @param item_template_idx\n+ *   Index of an item template associated with @p table.\n+ * @param actions\n+ *   Pointer to flow rule actions.\n+ * @param action_template_idx\n+ *   Index of an action template associated with @p table.\n+ *\n+ * @return\n+ *   0 on success, negative errno value otherwise and rte_errno set.\n+ */\n+static __rte_unused int\n+flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,\n+\t\t\t struct rte_eth_dev *proxy_dev,\n+\t\t\t struct rte_flow_template_table *table,\n+\t\t\t struct rte_flow_item items[],\n+\t\t\t uint8_t item_template_idx,\n+\t\t\t struct rte_flow_action actions[],\n+\t\t\t uint8_t action_template_idx)\n+{\n+\tstruct mlx5_priv *priv = proxy_dev->data->dev_private;\n+\tuint32_t queue = flow_hw_get_ctrl_queue(priv);\n+\tstruct rte_flow_op_attr op_attr = {\n+\t\t.postpone = 0,\n+\t};\n+\tstruct rte_flow *flow = NULL;\n+\tstruct mlx5_hw_ctrl_flow *entry = NULL;\n+\tint ret;\n+\n+\trte_spinlock_lock(&priv->hw_ctrl_lock);\n+\tentry = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_SYS, sizeof(*entry),\n+\t\t\t    0, SOCKET_ID_ANY);\n+\tif (!entry) {\n+\t\tDRV_LOG(ERR, \"port %u not enough memory to create control flows\",\n+\t\t\tproxy_dev->data->port_id);\n+\t\trte_errno = ENOMEM;\n+\t\tret = -rte_errno;\n+\t\tgoto error;\n+\t}\n+\tflow = flow_hw_async_flow_create(proxy_dev, queue, &op_attr, table,\n+\t\t\t\t\t items, item_template_idx,\n+\t\t\t\t\t actions, action_template_idx,\n+\t\t\t\t\t NULL, NULL);\n+\tif (!flow) {\n+\t\tDRV_LOG(ERR, \"port %u failed to enqueue create control\"\n+\t\t\t\" flow operation\", proxy_dev->data->port_id);\n+\t\tret = -rte_errno;\n+\t\tgoto error;\n+\t}\n+\tret = flow_hw_push(proxy_dev, queue, NULL);\n+\tif (ret) {\n+\t\tDRV_LOG(ERR, \"port %u failed to drain control flow queue\",\n+\t\t\tproxy_dev->data->port_id);\n+\t\tgoto error;\n+\t}\n+\tret = __flow_hw_pull_comp(proxy_dev, queue, 1, NULL);\n+\tif (ret) {\n+\t\tDRV_LOG(ERR, \"port %u failed to insert control flow\",\n+\t\t\tproxy_dev->data->port_id);\n+\t\trte_errno = EINVAL;\n+\t\tret = -rte_errno;\n+\t\tgoto error;\n+\t}\n+\tentry->owner_dev = owner_dev;\n+\tentry->flow = flow;\n+\tLIST_INSERT_HEAD(&priv->hw_ctrl_flows, entry, next);\n+\trte_spinlock_unlock(&priv->hw_ctrl_lock);\n+\treturn 0;\n+error:\n+\tif (entry)\n+\t\tmlx5_free(entry);\n+\trte_spinlock_unlock(&priv->hw_ctrl_lock);\n+\treturn ret;\n+}\n+\n+/**\n+ * Destroys a control flow @p flow using flow template API on @p dev device.\n+ *\n+ * This function uses locks internally to synchronize access to the\n+ * flow queue.\n+ *\n+ * If the @p flow is stored on any private list/pool, then caller must free up\n+ * the relevant resources.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ * @param flow\n+ *   Pointer to flow rule.\n+ *\n+ * @return\n+ *   0 on success, non-zero value otherwise.\n+ */\n+static int\n+flow_hw_destroy_ctrl_flow(struct rte_eth_dev *dev, struct rte_flow *flow)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tuint32_t queue = flow_hw_get_ctrl_queue(priv);\n+\tstruct rte_flow_op_attr op_attr = {\n+\t\t.postpone = 0,\n+\t};\n+\tint ret;\n+\n+\trte_spinlock_lock(&priv->hw_ctrl_lock);\n+\tret = flow_hw_async_flow_destroy(dev, queue, &op_attr, flow, NULL, NULL);\n+\tif (ret) {\n+\t\tDRV_LOG(ERR, \"port %u failed to enqueue destroy control\"\n+\t\t\t\" flow operation\", dev->data->port_id);\n+\t\tgoto exit;\n+\t}\n+\tret = flow_hw_push(dev, queue, NULL);\n+\tif (ret) {\n+\t\tDRV_LOG(ERR, \"port %u failed to drain control flow queue\",\n+\t\t\tdev->data->port_id);\n+\t\tgoto exit;\n+\t}\n+\tret = __flow_hw_pull_comp(dev, queue, 1, NULL);\n+\tif (ret) {\n+\t\tDRV_LOG(ERR, \"port %u failed to destroy control flow\",\n+\t\t\tdev->data->port_id);\n+\t\trte_errno = EINVAL;\n+\t\tret = -rte_errno;\n+\t\tgoto exit;\n+\t}\n+exit:\n+\trte_spinlock_unlock(&priv->hw_ctrl_lock);\n+\treturn ret;\n+}\n+\n+/**\n+ * Destroys control flows created on behalf of @p owner_dev device.\n+ *\n+ * @param owner_dev\n+ *   Pointer to Ethernet device owning control flows.\n+ *\n+ * @return\n+ *   0 on success, otherwise negative error code is returned and\n+ *   rte_errno is set.\n+ */\n+int\n+mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *owner_dev)\n+{\n+\tstruct mlx5_priv *owner_priv = owner_dev->data->dev_private;\n+\tstruct rte_eth_dev *proxy_dev;\n+\tstruct mlx5_priv *proxy_priv;\n+\tstruct mlx5_hw_ctrl_flow *cf;\n+\tstruct mlx5_hw_ctrl_flow *cf_next;\n+\tuint16_t owner_port_id = owner_dev->data->port_id;\n+\tuint16_t proxy_port_id = owner_dev->data->port_id;\n+\tint ret;\n+\n+\tif (owner_priv->sh->config.dv_esw_en) {\n+\t\tif (rte_flow_pick_transfer_proxy(owner_port_id, &proxy_port_id, NULL)) {\n+\t\t\tDRV_LOG(ERR, \"Unable to find proxy port for port %u\",\n+\t\t\t\towner_port_id);\n+\t\t\trte_errno = EINVAL;\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t\tproxy_dev = &rte_eth_devices[proxy_port_id];\n+\t\tproxy_priv = proxy_dev->data->dev_private;\n+\t} else {\n+\t\tproxy_dev = owner_dev;\n+\t\tproxy_priv = owner_priv;\n+\t}\n+\tcf = LIST_FIRST(&proxy_priv->hw_ctrl_flows);\n+\twhile (cf != NULL) {\n+\t\tcf_next = LIST_NEXT(cf, next);\n+\t\tif (cf->owner_dev == owner_dev) {\n+\t\t\tret = flow_hw_destroy_ctrl_flow(proxy_dev, cf->flow);\n+\t\t\tif (ret) {\n+\t\t\t\trte_errno = ret;\n+\t\t\t\treturn -ret;\n+\t\t\t}\n+\t\t\tLIST_REMOVE(cf, next);\n+\t\t\tmlx5_free(cf);\n+\t\t}\n+\t\tcf = cf_next;\n+\t}\n+\treturn 0;\n+}\n+\n+/**\n+ * Destroys all control flows created on @p dev device.\n+ *\n+ * @param owner_dev\n+ *   Pointer to Ethernet device.\n+ *\n+ * @return\n+ *   0 on success, otherwise negative error code is returned and\n+ *   rte_errno is set.\n+ */\n+static int\n+flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_hw_ctrl_flow *cf;\n+\tstruct mlx5_hw_ctrl_flow *cf_next;\n+\tint ret;\n+\n+\tcf = LIST_FIRST(&priv->hw_ctrl_flows);\n+\twhile (cf != NULL) {\n+\t\tcf_next = LIST_NEXT(cf, next);\n+\t\tret = flow_hw_destroy_ctrl_flow(dev, cf->flow);\n+\t\tif (ret) {\n+\t\t\trte_errno = ret;\n+\t\t\treturn -ret;\n+\t\t}\n+\t\tLIST_REMOVE(cf, next);\n+\t\tmlx5_free(cf);\n+\t\tcf = cf_next;\n+\t}\n+\treturn 0;\n+}\n+\n+int\n+mlx5_flow_hw_esw_create_mgr_sq_miss_flow(struct rte_eth_dev *dev)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct rte_flow_item_ethdev port_spec = {\n+\t\t.port_id = MLX5_REPRESENTED_PORT_ESW_MGR,\n+\t};\n+\tstruct rte_flow_item_ethdev port_mask = {\n+\t\t.port_id = MLX5_REPRESENTED_PORT_ESW_MGR,\n+\t};\n+\tstruct rte_flow_item items[] = {\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,\n+\t\t\t.spec = &port_spec,\n+\t\t\t.mask = &port_mask,\n+\t\t},\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ITEM_TYPE_END,\n+\t\t},\n+\t};\n+\tstruct rte_flow_action_jump jump = {\n+\t\t.group = MLX5_HW_SQ_MISS_GROUP,\n+\t};\n+\tstruct rte_flow_action actions[] = {\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_JUMP,\n+\t\t\t.conf = &jump,\n+\t\t},\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_END,\n+\t\t},\n+\t};\n+\n+\tMLX5_ASSERT(priv->master);\n+\tif (!priv->dr_ctx ||\n+\t    !priv->hw_esw_sq_miss_root_tbl)\n+\t\treturn 0;\n+\treturn flow_hw_create_ctrl_flow(dev, dev,\n+\t\t\t\t\tpriv->hw_esw_sq_miss_root_tbl,\n+\t\t\t\t\titems, 0, actions, 0);\n+}\n+\n+int\n+mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t txq)\n+{\n+\tuint16_t port_id = dev->data->port_id;\n+\tstruct mlx5_rte_flow_item_tx_queue queue_spec = {\n+\t\t.queue = txq,\n+\t};\n+\tstruct mlx5_rte_flow_item_tx_queue queue_mask = {\n+\t\t.queue = UINT32_MAX,\n+\t};\n+\tstruct rte_flow_item items[] = {\n+\t\t{\n+\t\t\t.type = (enum rte_flow_item_type)\n+\t\t\t\tMLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,\n+\t\t\t.spec = &queue_spec,\n+\t\t\t.mask = &queue_mask,\n+\t\t},\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ITEM_TYPE_END,\n+\t\t},\n+\t};\n+\tstruct rte_flow_action_ethdev port = {\n+\t\t.port_id = port_id,\n+\t};\n+\tstruct rte_flow_action actions[] = {\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,\n+\t\t\t.conf = &port,\n+\t\t},\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_END,\n+\t\t},\n+\t};\n+\tstruct rte_eth_dev *proxy_dev;\n+\tstruct mlx5_priv *proxy_priv;\n+\tuint16_t proxy_port_id = dev->data->port_id;\n+\tint ret;\n+\n+\tRTE_SET_USED(txq);\n+\tret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);\n+\tif (ret) {\n+\t\tDRV_LOG(ERR, \"Unable to pick proxy port for port %u\", port_id);\n+\t\treturn ret;\n+\t}\n+\tproxy_dev = &rte_eth_devices[proxy_port_id];\n+\tproxy_priv = proxy_dev->data->dev_private;\n+\tif (!proxy_priv->dr_ctx)\n+\t\treturn 0;\n+\tif (!proxy_priv->hw_esw_sq_miss_root_tbl ||\n+\t    !proxy_priv->hw_esw_sq_miss_tbl) {\n+\t\tDRV_LOG(ERR, \"port %u proxy port %u was configured but default\"\n+\t\t\t\" flow tables are not created\",\n+\t\t\tport_id, proxy_port_id);\n+\t\trte_errno = ENOMEM;\n+\t\treturn -rte_errno;\n+\t}\n+\treturn flow_hw_create_ctrl_flow(dev, proxy_dev,\n+\t\t\t\t\tproxy_priv->hw_esw_sq_miss_tbl,\n+\t\t\t\t\titems, 0, actions, 0);\n+}\n+\n+int\n+mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev)\n+{\n+\tuint16_t port_id = dev->data->port_id;\n+\tstruct rte_flow_item_ethdev port_spec = {\n+\t\t.port_id = port_id,\n+\t};\n+\tstruct rte_flow_item items[] = {\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,\n+\t\t\t.spec = &port_spec,\n+\t\t},\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ITEM_TYPE_END,\n+\t\t},\n+\t};\n+\tstruct rte_flow_action_jump jump = {\n+\t\t.group = 1,\n+\t};\n+\tstruct rte_flow_action actions[] = {\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_JUMP,\n+\t\t\t.conf = &jump,\n+\t\t},\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ACTION_TYPE_END,\n+\t\t}\n+\t};\n+\tstruct rte_eth_dev *proxy_dev;\n+\tstruct mlx5_priv *proxy_priv;\n+\tuint16_t proxy_port_id = dev->data->port_id;\n+\tint ret;\n+\n+\tret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);\n+\tif (ret) {\n+\t\tDRV_LOG(ERR, \"Unable to pick proxy port for port %u\", port_id);\n+\t\treturn ret;\n+\t}\n+\tproxy_dev = &rte_eth_devices[proxy_port_id];\n+\tproxy_priv = proxy_dev->data->dev_private;\n+\tif (!proxy_priv->dr_ctx)\n+\t\treturn 0;\n+\tif (!proxy_priv->hw_esw_zero_tbl) {\n+\t\tDRV_LOG(ERR, \"port %u proxy port %u was configured but default\"\n+\t\t\t\" flow tables are not created\",\n+\t\t\tport_id, proxy_port_id);\n+\t\trte_errno = EINVAL;\n+\t\treturn -rte_errno;\n+\t}\n+\treturn flow_hw_create_ctrl_flow(dev, proxy_dev,\n+\t\t\t\t\tproxy_priv->hw_esw_zero_tbl,\n+\t\t\t\t\titems, 0, actions, 0);\n+}\n+\n #endif\ndiff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c\nindex fd902078f8..7ffaf4c227 100644\n--- a/drivers/net/mlx5/mlx5_flow_verbs.c\n+++ b/drivers/net/mlx5/mlx5_flow_verbs.c\n@@ -1245,12 +1245,14 @@ flow_verbs_validate(struct rte_eth_dev *dev,\n \tuint16_t ether_type = 0;\n \tbool is_empty_vlan = false;\n \tuint16_t udp_dport = 0;\n+\tbool is_root;\n \n \tif (items == NULL)\n \t\treturn -1;\n \tret = mlx5_flow_validate_attributes(dev, attr, error);\n \tif (ret < 0)\n \t\treturn ret;\n+\tis_root = ret;\n \tfor (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {\n \t\tint tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);\n \t\tint ret = 0;\n@@ -1380,7 +1382,7 @@ flow_verbs_validate(struct rte_eth_dev *dev,\n \t\tcase RTE_FLOW_ITEM_TYPE_VXLAN:\n \t\t\tret = mlx5_flow_validate_item_vxlan(dev, udp_dport,\n \t\t\t\t\t\t\t    items, item_flags,\n-\t\t\t\t\t\t\t    attr, error);\n+\t\t\t\t\t\t\t    is_root, error);\n \t\t\tif (ret < 0)\n \t\t\t\treturn ret;\n \t\t\tlast_item = MLX5_FLOW_LAYER_VXLAN;\ndiff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c\nindex c68b32cf14..6313602a66 100644\n--- a/drivers/net/mlx5/mlx5_trigger.c\n+++ b/drivers/net/mlx5/mlx5_trigger.c\n@@ -1280,6 +1280,52 @@ mlx5_dev_stop(struct rte_eth_dev *dev)\n \treturn 0;\n }\n \n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+\n+static int\n+mlx5_traffic_enable_hws(struct rte_eth_dev *dev)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tunsigned int i;\n+\tint ret;\n+\n+\tif (priv->sh->config.dv_esw_en && priv->master) {\n+\t\tif (mlx5_flow_hw_esw_create_mgr_sq_miss_flow(dev))\n+\t\t\tgoto error;\n+\t}\n+\tfor (i = 0; i < priv->txqs_n; ++i) {\n+\t\tstruct mlx5_txq_ctrl *txq = mlx5_txq_get(dev, i);\n+\t\tuint32_t queue;\n+\n+\t\tif (!txq)\n+\t\t\tcontinue;\n+\t\tif (txq->is_hairpin)\n+\t\t\tqueue = txq->obj->sq->id;\n+\t\telse\n+\t\t\tqueue = txq->obj->sq_obj.sq->id;\n+\t\tif ((priv->representor || priv->master) &&\n+\t\t    priv->sh->config.dv_esw_en) {\n+\t\t\tif (mlx5_flow_hw_esw_create_sq_miss_flow(dev, queue)) {\n+\t\t\t\tmlx5_txq_release(dev, i);\n+\t\t\t\tgoto error;\n+\t\t\t}\n+\t\t}\n+\t\tmlx5_txq_release(dev, i);\n+\t}\n+\tif ((priv->master || priv->representor) && priv->sh->config.dv_esw_en) {\n+\t\tif (mlx5_flow_hw_esw_create_default_jump_flow(dev))\n+\t\t\tgoto error;\n+\t}\n+\treturn 0;\n+error:\n+\tret = rte_errno;\n+\tmlx5_flow_hw_flush_ctrl_flows(dev);\n+\trte_errno = ret;\n+\treturn -rte_errno;\n+}\n+\n+#endif\n+\n /**\n  * Enable traffic flows configured by control plane\n  *\n@@ -1316,6 +1362,10 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)\n \tunsigned int j;\n \tint ret;\n \n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+\tif (priv->sh->config.dv_flow_en == 2)\n+\t\treturn mlx5_traffic_enable_hws(dev);\n+#endif\n \t/*\n \t * Hairpin txq default flow should be created no matter if it is\n \t * isolation mode. Or else all the packets to be sent will be sent\n@@ -1346,13 +1396,17 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)\n \t\t}\n \t\tmlx5_txq_release(dev, i);\n \t}\n-\tif (priv->sh->config.dv_esw_en) {\n-\t\tif (mlx5_flow_create_esw_table_zero_flow(dev))\n-\t\t\tpriv->fdb_def_rule = 1;\n-\t\telse\n-\t\t\tDRV_LOG(INFO, \"port %u FDB default rule cannot be\"\n-\t\t\t\t\" configured - only Eswitch group 0 flows are\"\n-\t\t\t\t\" supported.\", dev->data->port_id);\n+\tif (priv->sh->config.fdb_def_rule) {\n+\t\tif (priv->sh->config.dv_esw_en) {\n+\t\t\tif (mlx5_flow_create_esw_table_zero_flow(dev))\n+\t\t\t\tpriv->fdb_def_rule = 1;\n+\t\t\telse\n+\t\t\t\tDRV_LOG(INFO, \"port %u FDB default rule cannot be configured - only Eswitch group 0 flows are supported.\",\n+\t\t\t\t\tdev->data->port_id);\n+\t\t}\n+\t} else {\n+\t\tDRV_LOG(INFO, \"port %u FDB default rule is disabled\",\n+\t\t\tdev->data->port_id);\n \t}\n \tif (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0) {\n \t\tret = mlx5_flow_lacp_miss(dev);\n@@ -1470,7 +1524,14 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)\n void\n mlx5_traffic_disable(struct rte_eth_dev *dev)\n {\n-\tmlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\n+\tif (priv->sh->config.dv_flow_en == 2)\n+\t\tmlx5_flow_hw_flush_ctrl_flows(dev);\n+\telse\n+#endif\n+\t\tmlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);\n }\n \n /**\n",
    "prefixes": [
        "v2",
        "05/17"
    ]
}