get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/84167/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 84167,
    "url": "https://patches.dpdk.org/api/patches/84167/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20201113145231.13154-6-getelson@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20201113145231.13154-6-getelson@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20201113145231.13154-6-getelson@nvidia.com",
    "date": "2020-11-13T14:52:30",
    "name": "[v2,5/5] net/mlx5: fix non-dv compilation errors",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "7993a13482ba86e74aeddb537a6cf5f95e63788d",
    "submitter": {
        "id": 1882,
        "url": "https://patches.dpdk.org/api/people/1882/?format=api",
        "name": "Gregory Etelson",
        "email": "getelson@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "https://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20201113145231.13154-6-getelson@nvidia.com/mbox/",
    "series": [
        {
            "id": 13876,
            "url": "https://patches.dpdk.org/api/series/13876/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=13876",
            "date": "2020-11-13T14:52:25",
            "name": "restore tunnel offload functionality in mlx5",
            "version": 2,
            "mbox": "https://patches.dpdk.org/series/13876/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/84167/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/84167/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 330E1A09E0;\n\tFri, 13 Nov 2020 15:55:05 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 4F1DDC908;\n\tFri, 13 Nov 2020 15:53:02 +0100 (CET)",
            "from hqnvemgate25.nvidia.com (hqnvemgate25.nvidia.com\n [216.228.121.64]) by dpdk.org (Postfix) with ESMTP id 0DD2FC904\n for <dev@dpdk.org>; Fri, 13 Nov 2020 15:52:59 +0100 (CET)",
            "from hqmail.nvidia.com (Not Verified[216.228.121.13]) by\n hqnvemgate25.nvidia.com (using TLS: TLSv1.2, AES256-SHA)\n id <B5fae9dc30003>; Fri, 13 Nov 2020 06:52:51 -0800",
            "from nvidia.com (10.124.1.5) by HQMAIL107.nvidia.com (172.20.187.13)\n with Microsoft SMTP Server (TLS) id 15.0.1473.3;\n Fri, 13 Nov 2020 14:52:56 +0000"
        ],
        "From": "Gregory Etelson <getelson@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<getelson@nvidia.com>, <matan@nvidia.com>, <rasland@nvidia.com>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>, Shahaf Shuler\n <shahafs@nvidia.com>",
        "Date": "Fri, 13 Nov 2020 16:52:30 +0200",
        "Message-ID": "<20201113145231.13154-6-getelson@nvidia.com>",
        "X-Mailer": "git-send-email 2.29.2",
        "In-Reply-To": "<20201113145231.13154-1-getelson@nvidia.com>",
        "References": "<20201113145231.13154-1-getelson@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "quoted-printable",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.124.1.5]",
        "X-ClientProxiedBy": "HQMAIL105.nvidia.com (172.20.187.12) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=nvidia.com; s=n1;\n t=1605279171; bh=ztnVe0mM59TOxRYT9lvhnFu4urKb4QRc9luOv/cF4E4=;\n h=From:To:CC:Subject:Date:Message-ID:X-Mailer:In-Reply-To:\n References:MIME-Version:Content-Transfer-Encoding:Content-Type:\n X-Originating-IP:X-ClientProxiedBy;\n b=in/8iizxJ3JTFoHhWrpqjdbsUkhm9ihV+wOcnyKj20g41tWyBP9GfkXZS4Bmv479d\n 7TrhHNpxwLyuHs404+o+fz86uL6zvwH3i7Qmjq/GnqPm8aYtVG1wBqcnHkTR3qoAFP\n ZyUXugvAo3LMBbBcXxLBq2jgRCbvFv02UDVTS1tX7f8fjy8EixqMrP+1GDuSJfaXUu\n BHkBcWq1ubO4b+DKAcjpyCzSw5J24OGleDLufcWb83YN6ry37Cu9YtXoL9rIOA78O/\n 5trBUdCezs3qprb1uc43lQ9vduLu8ZIRxYbjDccB+mybTSa8PqTBSvLRge058SvdRR\n oGWfYp3FAza9w==",
        "Subject": "[dpdk-dev] [PATCH v2 5/5] net/mlx5: fix non-dv compilation errors",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "ipools used for tunnel offload are restricted to dv enabled code only\nsince 1d1248d452ff, while the rest of the tunnel offload API did not\nhave compilation limitations. As the results, PMD compilation failed\non non-dv setups.\n\nCurrent patch groups tunnel offload code in dv enabled code area and\nprovides stubs for calls that have to be visible in non-dv\nenvironment. These stabs will notify caller that tunnel offload\nfunctionality is not supported on that setup.\n\nFixes: 1d1248d452ff (\"net/mlx5: fix offloaded tunnel allocation\")\nAcked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>\n\nSigned-off-by: Gregory Etelson <getelson@nvidia.com>\n---\n drivers/net/mlx5/linux/mlx5_os.c |   16 +-\n drivers/net/mlx5/mlx5_flow.c     | 1104 +++++++++++++++++-------------\n drivers/net/mlx5/mlx5_flow.h     |   17 +-\n 3 files changed, 638 insertions(+), 499 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c\nindex c78d56fae3..1aee481bd7 100644\n--- a/drivers/net/mlx5/linux/mlx5_os.c\n+++ b/drivers/net/mlx5/linux/mlx5_os.c\n@@ -301,6 +301,12 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)\n \t\tgoto error;\n \t}\n \tsh->encaps_decaps->ctx = sh;\n+\tif (!sh->tunnel_hub)\n+\t\terr = mlx5_alloc_tunnel_hub(sh);\n+\tif (err) {\n+\t\tDRV_LOG(ERR, \"mlx5_alloc_tunnel_hub failed err=%d\", err);\n+\t\tgoto error;\n+\t}\n #endif\n #ifdef HAVE_MLX5DV_DR\n \tvoid *domain;\n@@ -335,12 +341,6 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)\n \t\tsh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop();\n \t}\n #endif\n-\tif (!sh->tunnel_hub)\n-\t\terr = mlx5_alloc_tunnel_hub(sh);\n-\tif (err) {\n-\t\tDRV_LOG(ERR, \"mlx5_alloc_tunnel_hub failed err=%d\", err);\n-\t\tgoto error;\n-\t}\n \tif (priv->config.reclaim_mode == MLX5_RCM_AGGR) {\n \t\tmlx5_glue->dr_reclaim_domain_memory(sh->rx_domain, 1);\n \t\tmlx5_glue->dr_reclaim_domain_memory(sh->tx_domain, 1);\n@@ -389,10 +389,12 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)\n \t\tmlx5_hlist_destroy(sh->tag_table);\n \t\tsh->tag_table = NULL;\n \t}\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n \tif (sh->tunnel_hub) {\n \t\tmlx5_release_tunnel_hub(sh, priv->dev_port);\n \t\tsh->tunnel_hub = NULL;\n \t}\n+#endif /* HAVE_IBV_FLOW_DV_SUPPORT */\n \tmlx5_free_table_hash_list(priv);\n \treturn err;\n }\n@@ -451,10 +453,12 @@ mlx5_os_free_shared_dr(struct mlx5_priv *priv)\n \t\tmlx5_hlist_destroy(sh->tag_table);\n \t\tsh->tag_table = NULL;\n \t}\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n \tif (sh->tunnel_hub) {\n \t\tmlx5_release_tunnel_hub(sh, priv->dev_port);\n \t\tsh->tunnel_hub = NULL;\n \t}\n+#endif /* HAVE_IBV_FLOW_DV_SUPPORT */\n \tmlx5_cache_list_destroy(&sh->port_id_action_list);\n \tmlx5_cache_list_destroy(&sh->push_vlan_action_list);\n \tmlx5_free_table_hash_list(priv);\ndiff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 358a5f4e72..11bc8e9dde 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -33,25 +33,34 @@\n #include \"mlx5_common_os.h\"\n #include \"rte_pmd_mlx5.h\"\n \n-static bool\n-mlx5_access_tunnel_offload_db\n-\t(struct rte_eth_dev *dev,\n-\t bool (*match)(struct rte_eth_dev *,\n-\t\t       struct mlx5_flow_tunnel *, const void *),\n-\t void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),\n-\t void (*miss)(struct rte_eth_dev *, void *),\n-\t void *ctx, bool lock_op);\n+struct tunnel_default_miss_ctx {\n+\tuint16_t *queue;\n+\t__extension__\n+\tunion {\n+\t\tstruct rte_flow_action_rss action_rss;\n+\t\tstruct rte_flow_action_queue miss_queue;\n+\t\tstruct rte_flow_action_jump miss_jump;\n+\t\tuint8_t raw[0];\n+\t};\n+};\n+\n+static int\n+flow_tunnel_add_default_miss(struct rte_eth_dev *dev,\n+\t\t\t     struct rte_flow *flow,\n+\t\t\t     const struct rte_flow_attr *attr,\n+\t\t\t     const struct rte_flow_action *app_actions,\n+\t\t\t     uint32_t flow_idx,\n+\t\t\t     struct tunnel_default_miss_ctx *ctx,\n+\t\t\t     struct rte_flow_error *error);\n static struct mlx5_flow_tunnel *\n mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id);\n static void\n mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel);\n-static const struct mlx5_flow_tbl_data_entry  *\n-tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark);\n-static int\n-mlx5_get_flow_tunnel(struct rte_eth_dev *dev,\n-\t\t     const struct rte_flow_tunnel *app_tunnel,\n-\t\t     struct mlx5_flow_tunnel **tunnel);\n-\n+static uint32_t\n+tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,\n+\t\t\t\tconst struct mlx5_flow_tunnel *tunnel,\n+\t\t\t\tuint32_t group, uint32_t *table,\n+\t\t\t\tstruct rte_flow_error *error);\n \n /** Device flow drivers. */\n extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;\n@@ -588,203 +597,32 @@ static int mlx5_shared_action_query\n \t\t\t\t const struct rte_flow_shared_action *action,\n \t\t\t\t void *data,\n \t\t\t\t struct rte_flow_error *error);\n-static inline bool\n-mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,\n-\t\t\t  struct rte_flow_tunnel *tunnel,\n-\t\t\t  const char *err_msg)\n-{\n-\terr_msg = NULL;\n-\tif (!is_tunnel_offload_active(dev)) {\n-\t\terr_msg = \"tunnel offload was not activated\";\n-\t\tgoto out;\n-\t} else if (!tunnel) {\n-\t\terr_msg = \"no application tunnel\";\n-\t\tgoto out;\n-\t}\n-\n-\tswitch (tunnel->type) {\n-\tdefault:\n-\t\terr_msg = \"unsupported tunnel type\";\n-\t\tgoto out;\n-\tcase RTE_FLOW_ITEM_TYPE_VXLAN:\n-\t\tbreak;\n-\t}\n-\n-out:\n-\treturn !err_msg;\n-}\n-\n-\n static int\n mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,\n-\t\t    struct rte_flow_tunnel *app_tunnel,\n-\t\t    struct rte_flow_action **actions,\n-\t\t    uint32_t *num_of_actions,\n-\t\t    struct rte_flow_error *error)\n-{\n-\tint ret;\n-\tstruct mlx5_flow_tunnel *tunnel;\n-\tconst char *err_msg = NULL;\n-\tbool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);\n-\n-\tif (!verdict)\n-\t\treturn rte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,\n-\t\t\t\t\t  err_msg);\n-\tret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);\n-\tif (ret < 0) {\n-\t\treturn rte_flow_error_set(error, ret,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,\n-\t\t\t\t\t  \"failed to initialize pmd tunnel\");\n-\t}\n-\t*actions = &tunnel->action;\n-\t*num_of_actions = 1;\n-\treturn 0;\n-}\n-\n+\t\t\t   struct rte_flow_tunnel *app_tunnel,\n+\t\t\t   struct rte_flow_action **actions,\n+\t\t\t   uint32_t *num_of_actions,\n+\t\t\t   struct rte_flow_error *error);\n static int\n mlx5_flow_tunnel_match(struct rte_eth_dev *dev,\n \t\t       struct rte_flow_tunnel *app_tunnel,\n \t\t       struct rte_flow_item **items,\n \t\t       uint32_t *num_of_items,\n-\t\t       struct rte_flow_error *error)\n-{\n-\tint ret;\n-\tstruct mlx5_flow_tunnel *tunnel;\n-\tconst char *err_msg = NULL;\n-\tbool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);\n-\n-\tif (!verdict)\n-\t\treturn rte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n-\t\t\t\t\t  err_msg);\n-\tret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);\n-\tif (ret < 0) {\n-\t\treturn rte_flow_error_set(error, ret,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n-\t\t\t\t\t  \"failed to initialize pmd tunnel\");\n-\t}\n-\t*items = &tunnel->item;\n-\t*num_of_items = 1;\n-\treturn 0;\n-}\n-\n-struct tunnel_db_element_release_ctx {\n-\tstruct rte_flow_item *items;\n-\tstruct rte_flow_action *actions;\n-\tuint32_t num_elements;\n-\tstruct rte_flow_error *error;\n-\tint ret;\n-};\n-\n-static bool\n-tunnel_element_release_match(struct rte_eth_dev *dev,\n-\t\t\t     struct mlx5_flow_tunnel *tunnel, const void *x)\n-{\n-\tconst struct tunnel_db_element_release_ctx *ctx = x;\n-\n-\tRTE_SET_USED(dev);\n-\tif (ctx->num_elements != 1)\n-\t\treturn false;\n-\telse if (ctx->items)\n-\t\treturn ctx->items == &tunnel->item;\n-\telse if (ctx->actions)\n-\t\treturn ctx->actions == &tunnel->action;\n-\n-\treturn false;\n-}\n-\n-static void\n-tunnel_element_release_hit(struct rte_eth_dev *dev,\n-\t\t\t   struct mlx5_flow_tunnel *tunnel, void *x)\n-{\n-\tstruct tunnel_db_element_release_ctx *ctx = x;\n-\tctx->ret = 0;\n-\tif (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))\n-\t\tmlx5_flow_tunnel_free(dev, tunnel);\n-}\n-\n-static void\n-tunnel_element_release_miss(struct rte_eth_dev *dev, void *x)\n-{\n-\tstruct tunnel_db_element_release_ctx *ctx = x;\n-\tRTE_SET_USED(dev);\n-\tctx->ret = rte_flow_error_set(ctx->error, EINVAL,\n-\t\t\t\t      RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n-\t\t\t\t      \"invalid argument\");\n-}\n-\n+\t\t       struct rte_flow_error *error);\n static int\n mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,\n \t\t\t      struct rte_flow_item *pmd_items,\n-\t\t\t      uint32_t num_items, struct rte_flow_error *err)\n-{\n-\tstruct tunnel_db_element_release_ctx ctx = {\n-\t\t.items = pmd_items,\n-\t\t.actions = NULL,\n-\t\t.num_elements = num_items,\n-\t\t.error = err,\n-\t};\n-\n-\tmlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,\n-\t\t\t\t      tunnel_element_release_hit,\n-\t\t\t\t      tunnel_element_release_miss, &ctx, false);\n-\n-\treturn ctx.ret;\n-}\n-\n+\t\t\t      uint32_t num_items, struct rte_flow_error *err);\n static int\n mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,\n \t\t\t\tstruct rte_flow_action *pmd_actions,\n \t\t\t\tuint32_t num_actions,\n-\t\t\t\tstruct rte_flow_error *err)\n-{\n-\tstruct tunnel_db_element_release_ctx ctx = {\n-\t\t.items = NULL,\n-\t\t.actions = pmd_actions,\n-\t\t.num_elements = num_actions,\n-\t\t.error = err,\n-\t};\n-\n-\tmlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,\n-\t\t\t\t      tunnel_element_release_hit,\n-\t\t\t\t      tunnel_element_release_miss, &ctx, false);\n-\n-\treturn ctx.ret;\n-}\n-\n+\t\t\t\tstruct rte_flow_error *err);\n static int\n mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,\n \t\t\t\t  struct rte_mbuf *m,\n \t\t\t\t  struct rte_flow_restore_info *info,\n-\t\t\t\t  struct rte_flow_error *err)\n-{\n-\tuint64_t ol_flags = m->ol_flags;\n-\tconst struct mlx5_flow_tbl_data_entry *tble;\n-\tconst uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;\n-\n-\tif ((ol_flags & mask) != mask)\n-\t\tgoto err;\n-\ttble = tunnel_mark_decode(dev, m->hash.fdir.hi);\n-\tif (!tble) {\n-\t\tDRV_LOG(DEBUG, \"port %u invalid miss tunnel mark %#x\",\n-\t\t\tdev->data->port_id, m->hash.fdir.hi);\n-\t\tgoto err;\n-\t}\n-\tMLX5_ASSERT(tble->tunnel);\n-\tmemcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));\n-\tinfo->group_id = tble->group_id;\n-\tinfo->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |\n-\t\t      RTE_FLOW_RESTORE_INFO_GROUP_ID |\n-\t\t      RTE_FLOW_RESTORE_INFO_ENCAPSULATED;\n-\n-\treturn 0;\n-\n-err:\n-\treturn rte_flow_error_set(err, EINVAL,\n-\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n-\t\t\t\t  \"failed to get restore info\");\n-}\n+\t\t\t\t  struct rte_flow_error *err);\n \n static const struct rte_flow_ops mlx5_flow_ops = {\n \t.validate = mlx5_flow_validate,\n@@ -4206,142 +4044,6 @@ flow_hairpin_split(struct rte_eth_dev *dev,\n \treturn 0;\n }\n \n-__extension__\n-union tunnel_offload_mark {\n-\tuint32_t val;\n-\tstruct {\n-\t\tuint32_t app_reserve:8;\n-\t\tuint32_t table_id:15;\n-\t\tuint32_t transfer:1;\n-\t\tuint32_t _unused_:8;\n-\t};\n-};\n-\n-struct tunnel_default_miss_ctx {\n-\tuint16_t *queue;\n-\t__extension__\n-\tunion {\n-\t\tstruct rte_flow_action_rss action_rss;\n-\t\tstruct rte_flow_action_queue miss_queue;\n-\t\tstruct rte_flow_action_jump miss_jump;\n-\t\tuint8_t raw[0];\n-\t};\n-};\n-\n-static int\n-flow_tunnel_add_default_miss(struct rte_eth_dev *dev,\n-\t\t\t     struct rte_flow *flow,\n-\t\t\t     const struct rte_flow_attr *attr,\n-\t\t\t     const struct rte_flow_action *app_actions,\n-\t\t\t     uint32_t flow_idx,\n-\t\t\t     struct tunnel_default_miss_ctx *ctx,\n-\t\t\t     struct rte_flow_error *error)\n-{\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_flow *dev_flow;\n-\tstruct rte_flow_attr miss_attr = *attr;\n-\tconst struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;\n-\tconst struct rte_flow_item miss_items[2] = {\n-\t\t{\n-\t\t\t.type = RTE_FLOW_ITEM_TYPE_ETH,\n-\t\t\t.spec = NULL,\n-\t\t\t.last = NULL,\n-\t\t\t.mask = NULL\n-\t\t},\n-\t\t{\n-\t\t\t.type = RTE_FLOW_ITEM_TYPE_END,\n-\t\t\t.spec = NULL,\n-\t\t\t.last = NULL,\n-\t\t\t.mask = NULL\n-\t\t}\n-\t};\n-\tunion tunnel_offload_mark mark_id;\n-\tstruct rte_flow_action_mark miss_mark;\n-\tstruct rte_flow_action miss_actions[3] = {\n-\t\t[0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },\n-\t\t[2] = { .type = RTE_FLOW_ACTION_TYPE_END,  .conf = NULL }\n-\t};\n-\tconst struct rte_flow_action_jump *jump_data;\n-\tuint32_t i, flow_table = 0; /* prevent compilation warning */\n-\tstruct flow_grp_info grp_info = {\n-\t\t.external = 1,\n-\t\t.transfer = attr->transfer,\n-\t\t.fdb_def_rule = !!priv->fdb_def_rule,\n-\t\t.std_tbl_fix = 0,\n-\t};\n-\tint ret;\n-\n-\tif (!attr->transfer) {\n-\t\tuint32_t q_size;\n-\n-\t\tmiss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;\n-\t\tq_size = priv->reta_idx_n * sizeof(ctx->queue[0]);\n-\t\tctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,\n-\t\t\t\t\t 0, SOCKET_ID_ANY);\n-\t\tif (!ctx->queue)\n-\t\t\treturn rte_flow_error_set\n-\t\t\t\t(error, ENOMEM,\n-\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION_CONF,\n-\t\t\t\tNULL, \"invalid default miss RSS\");\n-\t\tctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,\n-\t\tctx->action_rss.level = 0,\n-\t\tctx->action_rss.types = priv->rss_conf.rss_hf,\n-\t\tctx->action_rss.key_len = priv->rss_conf.rss_key_len,\n-\t\tctx->action_rss.queue_num = priv->reta_idx_n,\n-\t\tctx->action_rss.key = priv->rss_conf.rss_key,\n-\t\tctx->action_rss.queue = ctx->queue;\n-\t\tif (!priv->reta_idx_n || !priv->rxqs_n)\n-\t\t\treturn rte_flow_error_set\n-\t\t\t\t(error, EINVAL,\n-\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION_CONF,\n-\t\t\t\tNULL, \"invalid port configuration\");\n-\t\tif (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))\n-\t\t\tctx->action_rss.types = 0;\n-\t\tfor (i = 0; i != priv->reta_idx_n; ++i)\n-\t\t\tctx->queue[i] = (*priv->reta_idx)[i];\n-\t} else {\n-\t\tmiss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;\n-\t\tctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;\n-\t}\n-\tmiss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;\n-\tfor (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);\n-\tjump_data = app_actions->conf;\n-\tmiss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;\n-\tmiss_attr.group = jump_data->group;\n-\tret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,\n-\t\t\t\t       &flow_table, grp_info, error);\n-\tif (ret)\n-\t\treturn rte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n-\t\t\t\t\t  NULL, \"invalid tunnel id\");\n-\tmark_id.app_reserve = 0;\n-\tmark_id.table_id = tunnel_flow_tbl_to_id(flow_table);\n-\tmark_id.transfer = !!attr->transfer;\n-\tmark_id._unused_ = 0;\n-\tmiss_mark.id = mark_id.val;\n-\tdev_flow = flow_drv_prepare(dev, flow, &miss_attr,\n-\t\t\t\t    miss_items, miss_actions, flow_idx, error);\n-\tif (!dev_flow)\n-\t\treturn -rte_errno;\n-\tdev_flow->flow = flow;\n-\tdev_flow->external = true;\n-\tdev_flow->tunnel = tunnel;\n-\t/* Subflow object was created, we must include one in the list. */\n-\tSILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,\n-\t\t      dev_flow->handle, next);\n-\tDRV_LOG(DEBUG,\n-\t\t\"port %u tunnel type=%d id=%u miss rule priority=%u group=%u\",\n-\t\tdev->data->port_id, tunnel->app_tunnel.type,\n-\t\ttunnel->tunnel_id, miss_attr.priority, miss_attr.group);\n-\tret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,\n-\t\t\t\t  miss_actions, error);\n-\tif (!ret)\n-\t\tret = flow_mreg_update_copy_table(dev, flow, miss_actions,\n-\t\t\t\t\t\t  error);\n-\n-\treturn ret;\n-}\n-\n /**\n  * The last stage of splitting chain, just creates the subflow\n  * without any modification.\n@@ -5672,7 +5374,8 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,\n \t\t\t\t\t      error);\n \t\tif (ret < 0)\n \t\t\tgoto error;\n-\t\tif (is_flow_tunnel_steer_rule(dev, attr,\n+\t\tif (is_tunnel_offload_active(dev) &&\n+\t\t    is_flow_tunnel_steer_rule(dev, attr,\n \t\t\t\t\t      buf->entry[i].pattern,\n \t\t\t\t\t      p_actions_rx)) {\n \t\t\tret = flow_tunnel_add_default_miss(dev, flow, attr,\n@@ -5743,12 +5446,15 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,\n \twks->flow_idx = wks->flow_nested_idx;\n \tif (wks->flow_nested_idx)\n \t\twks->flow_nested_idx = 0;\n-\ttunnel = flow_tunnel_from_rule(dev, attr, items, actions);\n-\tif (tunnel) {\n-\t\tflow->tunnel = 1;\n-\t\tflow->tunnel_id = tunnel->tunnel_id;\n-\t\t__atomic_add_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED);\n-\t\tmlx5_free(default_miss_ctx.queue);\n+\tif (is_tunnel_offload_active(dev)) {\n+\t\ttunnel = flow_tunnel_from_rule(dev, attr, items, actions);\n+\t\tif (tunnel) {\n+\t\t\tflow->tunnel = 1;\n+\t\t\tflow->tunnel_id = tunnel->tunnel_id;\n+\t\t\t__atomic_add_fetch(&tunnel->refctn, 1,\n+\t\t\t\t\t   __ATOMIC_RELAXED);\n+\t\t\tmlx5_free(default_miss_ctx.queue);\n+\t\t}\n \t}\n \treturn idx;\n error:\n@@ -6945,107 +6651,6 @@ mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,\n \tsh->cmng.pending_queries--;\n }\n \n-static const struct mlx5_flow_tbl_data_entry  *\n-tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)\n-{\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_dev_ctx_shared *sh = priv->sh;\n-\tstruct mlx5_hlist_entry *he;\n-\tunion tunnel_offload_mark mbits = { .val = mark };\n-\tunion mlx5_flow_tbl_key table_key = {\n-\t\t{\n-\t\t\t.table_id = tunnel_id_to_flow_tbl(mbits.table_id),\n-\t\t\t.dummy = 0,\n-\t\t\t.domain = !!mbits.transfer,\n-\t\t\t.direction = 0,\n-\t\t}\n-\t};\n-\the = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);\n-\treturn he ?\n-\t       container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;\n-}\n-\n-static void\n-mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,\n-\t\t\t\t   struct mlx5_hlist_entry *entry)\n-{\n-\tstruct mlx5_dev_ctx_shared *sh = list->ctx;\n-\tstruct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);\n-\n-\tmlx5_ipool_free(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID],\n-\t\t\ttunnel_flow_tbl_to_id(tte->flow_table));\n-\tmlx5_free(tte);\n-}\n-\n-static struct mlx5_hlist_entry *\n-mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list,\n-\t\t\t\t   uint64_t key __rte_unused,\n-\t\t\t\t   void *ctx __rte_unused)\n-{\n-\tstruct mlx5_dev_ctx_shared *sh = list->ctx;\n-\tstruct tunnel_tbl_entry *tte;\n-\n-\ttte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,\n-\t\t\t  sizeof(*tte), 0,\n-\t\t\t  SOCKET_ID_ANY);\n-\tif (!tte)\n-\t\tgoto err;\n-\tmlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID],\n-\t\t\t  &tte->flow_table);\n-\tif (tte->flow_table >= MLX5_MAX_TABLES) {\n-\t\tDRV_LOG(ERR, \"Tunnel TBL ID %d exceed max limit.\",\n-\t\t\ttte->flow_table);\n-\t\tmlx5_ipool_free(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID],\n-\t\t\t\ttte->flow_table);\n-\t\tgoto err;\n-\t} else if (!tte->flow_table) {\n-\t\tgoto err;\n-\t}\n-\ttte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);\n-\treturn &tte->hash;\n-err:\n-\tif (tte)\n-\t\tmlx5_free(tte);\n-\treturn NULL;\n-}\n-\n-static uint32_t\n-tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,\n-\t\t\t\tconst struct mlx5_flow_tunnel *tunnel,\n-\t\t\t\tuint32_t group, uint32_t *table,\n-\t\t\t\tstruct rte_flow_error *error)\n-{\n-\tstruct mlx5_hlist_entry *he;\n-\tstruct tunnel_tbl_entry *tte;\n-\tunion tunnel_tbl_key key = {\n-\t\t.tunnel_id = tunnel ? tunnel->tunnel_id : 0,\n-\t\t.group = group\n-\t};\n-\tstruct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);\n-\tstruct mlx5_hlist *group_hash;\n-\n-\tgroup_hash = tunnel ? tunnel->groups : thub->groups;\n-\the = mlx5_hlist_lookup(group_hash, key.val, NULL);\n-\tif (!he) {\n-\t\tDRV_LOG(DEBUG, \"port %u tunnel %u group=%u - generate table id\",\n-\t\tdev->data->port_id, key.tunnel_id, group);\n-\t\the = mlx5_hlist_register(group_hash, key.val, NULL);\n-\t} else {\n-\t\tDRV_LOG(DEBUG, \"port %u tunnel %u group=%u - skip table id\",\n-\t\tdev->data->port_id, key.tunnel_id, group);\n-\t}\n-\tif (!he)\n-\t\treturn rte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,\n-\t\t\t\t\t  NULL,\n-\t\t\t\t\t  \"tunnel group index not supported\");\n-\ttte = container_of(he, typeof(*tte), hash);\n-\t*table = tte->flow_table;\n-\tDRV_LOG(DEBUG, \"port %u tunnel %u group=%u table=%u\",\n-\tdev->data->port_id, key.tunnel_id, group, *table);\n-\treturn 0;\n-}\n-\n static int\n flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table,\n \t\t    struct flow_grp_info grp_info, struct rte_flow_error *error)\n@@ -7506,23 +7111,38 @@ mlx5_shared_action_flush(struct rte_eth_dev *dev)\n \treturn ret;\n }\n \n-static void\n-mlx5_flow_tunnel_free(struct rte_eth_dev *dev,\n-\t\t      struct mlx5_flow_tunnel *tunnel)\n+#ifndef HAVE_MLX5DV_DR\n+#define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))\n+#else\n+#define MLX5_DOMAIN_SYNC_FLOW \\\n+\t(MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)\n+#endif\n+\n+int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)\n {\n-\t/* no tunnel hub spinlock protection */\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);\n-\tstruct mlx5_indexed_pool *ipool;\n+\tstruct rte_eth_dev *dev = &rte_eth_devices[port_id];\n+\tconst struct mlx5_flow_driver_ops *fops;\n+\tint ret;\n+\tstruct rte_flow_attr attr = { .transfer = 0 };\n \n-\tDRV_LOG(DEBUG, \"port %u release pmd tunnel id=0x%x\",\n-\t\tdev->data->port_id, tunnel->tunnel_id);\n-\trte_spinlock_lock(&thub->sl);\n-\tLIST_REMOVE(tunnel, chain);\n-\trte_spinlock_unlock(&thub->sl);\n-\tmlx5_hlist_destroy(tunnel->groups);\n-\tipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_OFFLOAD];\n-\tmlx5_ipool_free(ipool, tunnel->tunnel_id);\n+\tfops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));\n+\tret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);\n+\tif (ret > 0)\n+\t\tret = -ret;\n+\treturn ret;\n+}\n+\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+static inline uint32_t\n+tunnel_id_to_flow_tbl(uint32_t id)\n+{\n+\treturn id | (1u << 16);\n+}\n+\n+static inline uint32_t\n+tunnel_flow_tbl_to_id(uint32_t flow_tbl)\n+{\n+\treturn flow_tbl & ~(1u << 16);\n }\n \n static bool\n@@ -7532,19 +7152,235 @@ mlx5_access_tunnel_offload_db\n \t\t       struct mlx5_flow_tunnel *, const void *),\n \t void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),\n \t void (*miss)(struct rte_eth_dev *, void *),\n-\t void *ctx, bool lock_op)\n-{\n-\tbool verdict = false;\n-\tstruct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);\n-\tstruct mlx5_flow_tunnel *tunnel;\n+\t void *ctx, bool lock_op);\n+static const struct mlx5_flow_tbl_data_entry  *\n+tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark);\n+static int\n+mlx5_get_flow_tunnel(struct rte_eth_dev *dev,\n+\t\t     const struct rte_flow_tunnel *app_tunnel,\n+\t\t     struct mlx5_flow_tunnel **tunnel);\n \n-\trte_spinlock_lock(&thub->sl);\n-\tLIST_FOREACH(tunnel, &thub->tunnels, chain) {\n-\t\tverdict = match(dev, tunnel, (const void *)ctx);\n-\t\tif (verdict)\n-\t\t\tbreak;\n-\t}\n-\tif (!lock_op)\n+__extension__\n+union tunnel_offload_mark {\n+\tuint32_t val;\n+\tstruct {\n+\t\tuint32_t app_reserve:8;\n+\t\tuint32_t table_id:15;\n+\t\tuint32_t transfer:1;\n+\t\tuint32_t _unused_:8;\n+\t};\n+};\n+\n+static int\n+flow_tunnel_add_default_miss(struct rte_eth_dev *dev,\n+\t\t\t     struct rte_flow *flow,\n+\t\t\t     const struct rte_flow_attr *attr,\n+\t\t\t     const struct rte_flow_action *app_actions,\n+\t\t\t     uint32_t flow_idx,\n+\t\t\t     struct tunnel_default_miss_ctx *ctx,\n+\t\t\t     struct rte_flow_error *error)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_flow *dev_flow;\n+\tstruct rte_flow_attr miss_attr = *attr;\n+\tconst struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;\n+\tconst struct rte_flow_item miss_items[2] = {\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ITEM_TYPE_ETH,\n+\t\t\t.spec = NULL,\n+\t\t\t.last = NULL,\n+\t\t\t.mask = NULL\n+\t\t},\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ITEM_TYPE_END,\n+\t\t\t.spec = NULL,\n+\t\t\t.last = NULL,\n+\t\t\t.mask = NULL\n+\t\t}\n+\t};\n+\tunion tunnel_offload_mark mark_id;\n+\tstruct rte_flow_action_mark miss_mark;\n+\tstruct rte_flow_action miss_actions[3] = {\n+\t\t[0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },\n+\t\t[2] = { .type = RTE_FLOW_ACTION_TYPE_END,  .conf = NULL }\n+\t};\n+\tconst struct rte_flow_action_jump *jump_data;\n+\tuint32_t i, flow_table = 0; /* prevent compilation warning */\n+\tstruct flow_grp_info grp_info = {\n+\t\t.external = 1,\n+\t\t.transfer = attr->transfer,\n+\t\t.fdb_def_rule = !!priv->fdb_def_rule,\n+\t\t.std_tbl_fix = 0,\n+\t};\n+\tint ret;\n+\n+\tif (!attr->transfer) {\n+\t\tuint32_t q_size;\n+\n+\t\tmiss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;\n+\t\tq_size = priv->reta_idx_n * sizeof(ctx->queue[0]);\n+\t\tctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,\n+\t\t\t\t\t 0, SOCKET_ID_ANY);\n+\t\tif (!ctx->queue)\n+\t\t\treturn rte_flow_error_set\n+\t\t\t\t(error, ENOMEM,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION_CONF,\n+\t\t\t\tNULL, \"invalid default miss RSS\");\n+\t\tctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,\n+\t\tctx->action_rss.level = 0,\n+\t\tctx->action_rss.types = priv->rss_conf.rss_hf,\n+\t\tctx->action_rss.key_len = priv->rss_conf.rss_key_len,\n+\t\tctx->action_rss.queue_num = priv->reta_idx_n,\n+\t\tctx->action_rss.key = priv->rss_conf.rss_key,\n+\t\tctx->action_rss.queue = ctx->queue;\n+\t\tif (!priv->reta_idx_n || !priv->rxqs_n)\n+\t\t\treturn rte_flow_error_set\n+\t\t\t\t(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION_CONF,\n+\t\t\t\tNULL, \"invalid port configuration\");\n+\t\tif (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))\n+\t\t\tctx->action_rss.types = 0;\n+\t\tfor (i = 0; i != priv->reta_idx_n; ++i)\n+\t\t\tctx->queue[i] = (*priv->reta_idx)[i];\n+\t} else {\n+\t\tmiss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;\n+\t\tctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;\n+\t}\n+\tmiss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;\n+\tfor (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);\n+\tjump_data = app_actions->conf;\n+\tmiss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;\n+\tmiss_attr.group = jump_data->group;\n+\tret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,\n+\t\t\t\t       &flow_table, grp_info, error);\n+\tif (ret)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n+\t\t\t\t\t  NULL, \"invalid tunnel id\");\n+\tmark_id.app_reserve = 0;\n+\tmark_id.table_id = tunnel_flow_tbl_to_id(flow_table);\n+\tmark_id.transfer = !!attr->transfer;\n+\tmark_id._unused_ = 0;\n+\tmiss_mark.id = mark_id.val;\n+\tdev_flow = flow_drv_prepare(dev, flow, &miss_attr,\n+\t\t\t\t    miss_items, miss_actions, flow_idx, error);\n+\tif (!dev_flow)\n+\t\treturn -rte_errno;\n+\tdev_flow->flow = flow;\n+\tdev_flow->external = true;\n+\tdev_flow->tunnel = tunnel;\n+\t/* Subflow object was created, we must include one in the list. */\n+\tSILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,\n+\t\t      dev_flow->handle, next);\n+\tDRV_LOG(DEBUG,\n+\t\t\"port %u tunnel type=%d id=%u miss rule priority=%u group=%u\",\n+\t\tdev->data->port_id, tunnel->app_tunnel.type,\n+\t\ttunnel->tunnel_id, miss_attr.priority, miss_attr.group);\n+\tret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,\n+\t\t\t\t  miss_actions, error);\n+\tif (!ret)\n+\t\tret = flow_mreg_update_copy_table(dev, flow, miss_actions,\n+\t\t\t\t\t\t  error);\n+\n+\treturn ret;\n+}\n+\n+static const struct mlx5_flow_tbl_data_entry  *\n+tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_dev_ctx_shared *sh = priv->sh;\n+\tstruct mlx5_hlist_entry *he;\n+\tunion tunnel_offload_mark mbits = { .val = mark };\n+\tunion mlx5_flow_tbl_key table_key = {\n+\t\t{\n+\t\t\t.table_id = tunnel_id_to_flow_tbl(mbits.table_id),\n+\t\t\t.dummy = 0,\n+\t\t\t.domain = !!mbits.transfer,\n+\t\t\t.direction = 0,\n+\t\t}\n+\t};\n+\the = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);\n+\treturn he ?\n+\t       container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;\n+}\n+\n+static uint32_t\n+tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,\n+\t\t\t\tconst struct mlx5_flow_tunnel *tunnel,\n+\t\t\t\tuint32_t group, uint32_t *table,\n+\t\t\t\tstruct rte_flow_error *error)\n+{\n+\tstruct mlx5_hlist_entry *he;\n+\tstruct tunnel_tbl_entry *tte;\n+\tunion tunnel_tbl_key key = {\n+\t\t.tunnel_id = tunnel ? tunnel->tunnel_id : 0,\n+\t\t.group = group\n+\t};\n+\tstruct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);\n+\tstruct mlx5_hlist *group_hash;\n+\n+\tgroup_hash = tunnel ? tunnel->groups : thub->groups;\n+\the = mlx5_hlist_lookup(group_hash, key.val, NULL);\n+\tif (!he) {\n+\t\tDRV_LOG(DEBUG, \"port %u tunnel %u group=%u - generate table id\",\n+\t\tdev->data->port_id, key.tunnel_id, group);\n+\t\the = mlx5_hlist_register(group_hash, key.val, NULL);\n+\t} else {\n+\t\tDRV_LOG(DEBUG, \"port %u tunnel %u group=%u - skip table id\",\n+\t\tdev->data->port_id, key.tunnel_id, group);\n+\t}\n+\tif (!he)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,\n+\t\t\t\t\t  NULL,\n+\t\t\t\t\t  \"tunnel group index not supported\");\n+\ttte = container_of(he, typeof(*tte), hash);\n+\t*table = tte->flow_table;\n+\tDRV_LOG(DEBUG, \"port %u tunnel %u group=%u table=%u\",\n+\tdev->data->port_id, key.tunnel_id, group, *table);\n+\treturn 0;\n+}\n+\n+static void\n+mlx5_flow_tunnel_free(struct rte_eth_dev *dev,\n+\t\t      struct mlx5_flow_tunnel *tunnel)\n+{\n+\t/* no tunnel hub spinlock protection */\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);\n+\tstruct mlx5_indexed_pool *ipool;\n+\n+\tDRV_LOG(DEBUG, \"port %u release pmd tunnel id=0x%x\",\n+\t\tdev->data->port_id, tunnel->tunnel_id);\n+\trte_spinlock_lock(&thub->sl);\n+\tLIST_REMOVE(tunnel, chain);\n+\trte_spinlock_unlock(&thub->sl);\n+\tmlx5_hlist_destroy(tunnel->groups);\n+\tipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_OFFLOAD];\n+\tmlx5_ipool_free(ipool, tunnel->tunnel_id);\n+}\n+\n+static bool\n+mlx5_access_tunnel_offload_db\n+\t(struct rte_eth_dev *dev,\n+\t bool (*match)(struct rte_eth_dev *,\n+\t\t       struct mlx5_flow_tunnel *, const void *),\n+\t void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),\n+\t void (*miss)(struct rte_eth_dev *, void *),\n+\t void *ctx, bool lock_op)\n+{\n+\tbool verdict = false;\n+\tstruct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);\n+\tstruct mlx5_flow_tunnel *tunnel;\n+\n+\trte_spinlock_lock(&thub->sl);\n+\tLIST_FOREACH(tunnel, &thub->tunnels, chain) {\n+\t\tverdict = match(dev, tunnel, (const void *)ctx);\n+\t\tif (verdict)\n+\t\t\tbreak;\n+\t}\n+\tif (!lock_op)\n \t\trte_spinlock_unlock(&thub->sl);\n \tif (verdict && hit)\n \t\thit(dev, tunnel, ctx);\n@@ -7593,6 +7429,50 @@ mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)\n \treturn ctx.tunnel;\n }\n \n+static void\n+mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,\n+\t\t\t\t   struct mlx5_hlist_entry *entry)\n+{\n+\tstruct mlx5_dev_ctx_shared *sh = list->ctx;\n+\tstruct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);\n+\n+\tmlx5_ipool_free(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID],\n+\t\t\ttunnel_flow_tbl_to_id(tte->flow_table));\n+\tmlx5_free(tte);\n+}\n+\n+static struct mlx5_hlist_entry *\n+mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list,\n+\t\t\t\t   uint64_t key __rte_unused,\n+\t\t\t\t   void *ctx __rte_unused)\n+{\n+\tstruct mlx5_dev_ctx_shared *sh = list->ctx;\n+\tstruct tunnel_tbl_entry *tte;\n+\n+\ttte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,\n+\t\t\t  sizeof(*tte), 0,\n+\t\t\t  SOCKET_ID_ANY);\n+\tif (!tte)\n+\t\tgoto err;\n+\tmlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID],\n+\t\t\t  &tte->flow_table);\n+\tif (tte->flow_table >= MLX5_MAX_TABLES) {\n+\t\tDRV_LOG(ERR, \"Tunnel TBL ID %d exceed max limit.\",\n+\t\t\ttte->flow_table);\n+\t\tmlx5_ipool_free(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID],\n+\t\t\t\ttte->flow_table);\n+\t\tgoto err;\n+\t} else if (!tte->flow_table) {\n+\t\tgoto err;\n+\t}\n+\ttte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);\n+\treturn &tte->hash;\n+err:\n+\tif (tte)\n+\t\tmlx5_free(tte);\n+\treturn NULL;\n+}\n+\n static struct mlx5_flow_tunnel *\n mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,\n \t\t\t  const struct rte_flow_tunnel *app_tunnel)\n@@ -7678,7 +7558,6 @@ static void get_tunnel_miss(struct rte_eth_dev *dev, void *x)\n \t\tLIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain);\n }\n \n-\n static int\n mlx5_get_flow_tunnel(struct rte_eth_dev *dev,\n \t\t     const struct rte_flow_tunnel *app_tunnel,\n@@ -7738,23 +7617,286 @@ int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh)\n \treturn err;\n }\n \n-#ifndef HAVE_MLX5DV_DR\n-#define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))\n-#else\n-#define MLX5_DOMAIN_SYNC_FLOW \\\n-\t(MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)\n-#endif\n+static inline bool\n+mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,\n+\t\t\t  struct rte_flow_tunnel *tunnel,\n+\t\t\t  const char *err_msg)\n+{\n+\terr_msg = NULL;\n+\tif (!is_tunnel_offload_active(dev)) {\n+\t\terr_msg = \"tunnel offload was not activated\";\n+\t\tgoto out;\n+\t} else if (!tunnel) {\n+\t\terr_msg = \"no application tunnel\";\n+\t\tgoto out;\n+\t}\n \n-int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)\n+\tswitch (tunnel->type) {\n+\tdefault:\n+\t\terr_msg = \"unsupported tunnel type\";\n+\t\tgoto out;\n+\tcase RTE_FLOW_ITEM_TYPE_VXLAN:\n+\t\tbreak;\n+\t}\n+\n+out:\n+\treturn !err_msg;\n+}\n+\n+static int\n+mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,\n+\t\t\t   struct rte_flow_tunnel *app_tunnel,\n+\t\t\t   struct rte_flow_action **actions,\n+\t\t\t   uint32_t *num_of_actions,\n+\t\t\t   struct rte_flow_error *error)\n {\n-\tstruct rte_eth_dev *dev = &rte_eth_devices[port_id];\n-\tconst struct mlx5_flow_driver_ops *fops;\n \tint ret;\n-\tstruct rte_flow_attr attr = { .transfer = 0 };\n+\tstruct mlx5_flow_tunnel *tunnel;\n+\tconst char *err_msg = NULL;\n+\tbool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);\n \n-\tfops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));\n-\tret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);\n-\tif (ret > 0)\n-\t\tret = -ret;\n-\treturn ret;\n+\tif (!verdict)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,\n+\t\t\t\t\t  err_msg);\n+\tret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);\n+\tif (ret < 0) {\n+\t\treturn rte_flow_error_set(error, ret,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,\n+\t\t\t\t\t  \"failed to initialize pmd tunnel\");\n+\t}\n+\t*actions = &tunnel->action;\n+\t*num_of_actions = 1;\n+\treturn 0;\n+}\n+\n+static int\n+mlx5_flow_tunnel_match(struct rte_eth_dev *dev,\n+\t\t       struct rte_flow_tunnel *app_tunnel,\n+\t\t       struct rte_flow_item **items,\n+\t\t       uint32_t *num_of_items,\n+\t\t       struct rte_flow_error *error)\n+{\n+\tint ret;\n+\tstruct mlx5_flow_tunnel *tunnel;\n+\tconst char *err_msg = NULL;\n+\tbool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);\n+\n+\tif (!verdict)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n+\t\t\t\t\t  err_msg);\n+\tret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);\n+\tif (ret < 0) {\n+\t\treturn rte_flow_error_set(error, ret,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n+\t\t\t\t\t  \"failed to initialize pmd tunnel\");\n+\t}\n+\t*items = &tunnel->item;\n+\t*num_of_items = 1;\n+\treturn 0;\n+}\n+\n+struct tunnel_db_element_release_ctx {\n+\tstruct rte_flow_item *items;\n+\tstruct rte_flow_action *actions;\n+\tuint32_t num_elements;\n+\tstruct rte_flow_error *error;\n+\tint ret;\n+};\n+\n+static bool\n+tunnel_element_release_match(struct rte_eth_dev *dev,\n+\t\t\t     struct mlx5_flow_tunnel *tunnel, const void *x)\n+{\n+\tconst struct tunnel_db_element_release_ctx *ctx = x;\n+\n+\tRTE_SET_USED(dev);\n+\tif (ctx->num_elements != 1)\n+\t\treturn false;\n+\telse if (ctx->items)\n+\t\treturn ctx->items == &tunnel->item;\n+\telse if (ctx->actions)\n+\t\treturn ctx->actions == &tunnel->action;\n+\n+\treturn false;\n+}\n+\n+static void\n+tunnel_element_release_hit(struct rte_eth_dev *dev,\n+\t\t\t   struct mlx5_flow_tunnel *tunnel, void *x)\n+{\n+\tstruct tunnel_db_element_release_ctx *ctx = x;\n+\n+\tctx->ret = 0;\n+\tif (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))\n+\t\tmlx5_flow_tunnel_free(dev, tunnel);\n+}\n+\n+static void\n+tunnel_element_release_miss(struct rte_eth_dev *dev, void *x)\n+{\n+\tstruct tunnel_db_element_release_ctx *ctx = x;\n+\n+\tRTE_SET_USED(dev);\n+\tctx->ret = rte_flow_error_set(ctx->error, EINVAL,\n+\t\t\t\t      RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n+\t\t\t\t      \"invalid argument\");\n+}\n+\n+static int\n+mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,\n+\t\t\t      struct rte_flow_item *pmd_items,\n+\t\t\t      uint32_t num_items, struct rte_flow_error *err)\n+{\n+\tstruct tunnel_db_element_release_ctx ctx = {\n+\t\t.items = pmd_items,\n+\t\t.actions = NULL,\n+\t\t.num_elements = num_items,\n+\t\t.error = err,\n+\t};\n+\n+\tmlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,\n+\t\t\t\t      tunnel_element_release_hit,\n+\t\t\t\t      tunnel_element_release_miss, &ctx, false);\n+\n+\treturn ctx.ret;\n+}\n+\n+static int\n+mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,\n+\t\t\t\tstruct rte_flow_action *pmd_actions,\n+\t\t\t\tuint32_t num_actions,\n+\t\t\t\tstruct rte_flow_error *err)\n+{\n+\tstruct tunnel_db_element_release_ctx ctx = {\n+\t\t.items = NULL,\n+\t\t.actions = pmd_actions,\n+\t\t.num_elements = num_actions,\n+\t\t.error = err,\n+\t};\n+\n+\tmlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,\n+\t\t\t\t      tunnel_element_release_hit,\n+\t\t\t\t      tunnel_element_release_miss, &ctx, false);\n+\n+\treturn ctx.ret;\n+}\n+\n+static int\n+mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,\n+\t\t\t\t  struct rte_mbuf *m,\n+\t\t\t\t  struct rte_flow_restore_info *info,\n+\t\t\t\t  struct rte_flow_error *err)\n+{\n+\tuint64_t ol_flags = m->ol_flags;\n+\tconst struct mlx5_flow_tbl_data_entry *tble;\n+\tconst uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;\n+\n+\tif ((ol_flags & mask) != mask)\n+\t\tgoto err;\n+\ttble = tunnel_mark_decode(dev, m->hash.fdir.hi);\n+\tif (!tble) {\n+\t\tDRV_LOG(DEBUG, \"port %u invalid miss tunnel mark %#x\",\n+\t\t\tdev->data->port_id, m->hash.fdir.hi);\n+\t\tgoto err;\n+\t}\n+\tMLX5_ASSERT(tble->tunnel);\n+\tmemcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));\n+\tinfo->group_id = tble->group_id;\n+\tinfo->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |\n+\t\t      RTE_FLOW_RESTORE_INFO_GROUP_ID |\n+\t\t      RTE_FLOW_RESTORE_INFO_ENCAPSULATED;\n+\n+\treturn 0;\n+\n+err:\n+\treturn rte_flow_error_set(err, EINVAL,\n+\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t  \"failed to get restore info\");\n+}\n+#else /* HAVE_IBV_FLOW_DV_SUPPORT */\n+static int\n+mlx5_flow_tunnel_decap_set(__rte_unused struct rte_eth_dev *dev,\n+\t\t\t   __rte_unused struct rte_flow_tunnel *app_tunnel,\n+\t\t\t   __rte_unused struct rte_flow_action **actions,\n+\t\t\t   __rte_unused uint32_t *num_of_actions,\n+\t\t\t   __rte_unused struct rte_flow_error *error)\n+{\n+\treturn -ENOTSUP;\n+}\n+\n+static int\n+mlx5_flow_tunnel_match(__rte_unused struct rte_eth_dev *dev,\n+\t\t       __rte_unused struct rte_flow_tunnel *app_tunnel,\n+\t\t       __rte_unused struct rte_flow_item **items,\n+\t\t       __rte_unused uint32_t *num_of_items,\n+\t\t       __rte_unused struct rte_flow_error *error)\n+{\n+\treturn -ENOTSUP;\n+}\n+\n+static int\n+mlx5_flow_tunnel_item_release(__rte_unused struct rte_eth_dev *dev,\n+\t\t\t      __rte_unused struct rte_flow_item *pmd_items,\n+\t\t\t      __rte_unused uint32_t num_items,\n+\t\t\t      __rte_unused struct rte_flow_error *err)\n+{\n+\treturn -ENOTSUP;\n+}\n+\n+static int\n+mlx5_flow_tunnel_action_release(__rte_unused struct rte_eth_dev *dev,\n+\t\t\t\t__rte_unused struct rte_flow_action *pmd_action,\n+\t\t\t\t__rte_unused uint32_t num_actions,\n+\t\t\t\t__rte_unused struct rte_flow_error *err)\n+{\n+\treturn -ENOTSUP;\n+}\n+\n+static int\n+mlx5_flow_tunnel_get_restore_info(__rte_unused struct rte_eth_dev *dev,\n+\t\t\t\t  __rte_unused struct rte_mbuf *m,\n+\t\t\t\t  __rte_unused struct rte_flow_restore_info *i,\n+\t\t\t\t  __rte_unused struct rte_flow_error *err)\n+{\n+\treturn -ENOTSUP;\n+}\n+\n+static int\n+flow_tunnel_add_default_miss(__rte_unused struct rte_eth_dev *dev,\n+\t\t\t     __rte_unused struct rte_flow *flow,\n+\t\t\t     __rte_unused const struct rte_flow_attr *attr,\n+\t\t\t     __rte_unused const struct rte_flow_action *actions,\n+\t\t\t     __rte_unused uint32_t flow_idx,\n+\t\t\t     __rte_unused struct tunnel_default_miss_ctx *ctx,\n+\t\t\t     __rte_unused struct rte_flow_error *error)\n+{\n+\treturn -ENOTSUP;\n+}\n+\n+static struct mlx5_flow_tunnel *\n+mlx5_find_tunnel_id(__rte_unused struct rte_eth_dev *dev,\n+\t\t    __rte_unused uint32_t id)\n+{\n+\treturn NULL;\n+}\n+\n+static void\n+mlx5_flow_tunnel_free(__rte_unused struct rte_eth_dev *dev,\n+\t\t      __rte_unused struct mlx5_flow_tunnel *tunnel)\n+{\n+}\n+\n+static uint32_t\n+tunnel_flow_group_to_flow_table(__rte_unused struct rte_eth_dev *dev,\n+\t\t\t\t__rte_unused const struct mlx5_flow_tunnel *t,\n+\t\t\t\t__rte_unused uint32_t group,\n+\t\t\t\t__rte_unused uint32_t *table,\n+\t\t\t\tstruct rte_flow_error *error)\n+{\n+\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t  \"tunnel offload requires DV support\");\n }\n+#endif /* HAVE_IBV_FLOW_DV_SUPPORT */\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex bdf2c50090..672c27ecb9 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -965,18 +965,6 @@ struct tunnel_tbl_entry {\n \tuint32_t flow_table;\n };\n \n-static inline uint32_t\n-tunnel_id_to_flow_tbl(uint32_t id)\n-{\n-\treturn id | (1u << 16);\n-}\n-\n-static inline uint32_t\n-tunnel_flow_tbl_to_id(uint32_t flow_tbl)\n-{\n-\treturn flow_tbl & ~(1u << 16);\n-}\n-\n union tunnel_tbl_key {\n \tuint64_t val;\n \tstruct {\n@@ -995,8 +983,13 @@ mlx5_tunnel_hub(struct rte_eth_dev *dev)\n static inline bool\n is_tunnel_offload_active(struct rte_eth_dev *dev)\n {\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \treturn !!priv->config.dv_miss_info;\n+#else\n+\tRTE_SET_USED(dev);\n+\treturn false;\n+#endif\n }\n \n static inline bool\n",
    "prefixes": [
        "v2",
        "5/5"
    ]
}