get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/84214/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 84214,
    "url": "http://patches.dpdk.org/api/patches/84214/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20201116091326.10511-3-getelson@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20201116091326.10511-3-getelson@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20201116091326.10511-3-getelson@nvidia.com",
    "date": "2020-11-16T09:13:22",
    "name": "[v3,2/6] net/mlx5: fix build with Direct Verbs disabled",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "f646a8738e39897b3a732d0ff602b73215a1723d",
    "submitter": {
        "id": 1882,
        "url": "http://patches.dpdk.org/api/people/1882/?format=api",
        "name": "Gregory Etelson",
        "email": "getelson@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20201116091326.10511-3-getelson@nvidia.com/mbox/",
    "series": [
        {
            "id": 13899,
            "url": "http://patches.dpdk.org/api/series/13899/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=13899",
            "date": "2020-11-16T09:13:20",
            "name": "restore tunnel offload functionality in mlx5",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/13899/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/84214/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/84214/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 47ECEA04DB;\n\tMon, 16 Nov 2020 10:14:25 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id C1689C8FA;\n\tMon, 16 Nov 2020 10:13:49 +0100 (CET)",
            "from hqnvemgate24.nvidia.com (hqnvemgate24.nvidia.com\n [216.228.121.143]) by dpdk.org (Postfix) with ESMTP id 9F989C8F8\n for <dev@dpdk.org>; Mon, 16 Nov 2020 10:13:47 +0100 (CET)",
            "from hqmail.nvidia.com (Not Verified[216.228.121.13]) by\n hqnvemgate24.nvidia.com (using TLS: TLSv1.2, AES256-SHA)\n id <B5fb242d30001>; Mon, 16 Nov 2020 01:13:55 -0800",
            "from nvidia.com (10.124.1.5) by HQMAIL107.nvidia.com (172.20.187.13)\n with Microsoft SMTP Server (TLS) id 15.0.1473.3;\n Mon, 16 Nov 2020 09:13:43 +0000"
        ],
        "From": "Gregory Etelson <getelson@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<getelson@nvidia.com>, <matan@nvidia.com>, <rasland@nvidia.com>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>, Shahaf Shuler\n <shahafs@nvidia.com>",
        "Date": "Mon, 16 Nov 2020 11:13:22 +0200",
        "Message-ID": "<20201116091326.10511-3-getelson@nvidia.com>",
        "X-Mailer": "git-send-email 2.29.2",
        "In-Reply-To": "<20201116091326.10511-1-getelson@nvidia.com>",
        "References": "<20201111071417.21177-1-getelson@nvidia.com>\n <20201116091326.10511-1-getelson@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "quoted-printable",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.124.1.5]",
        "X-ClientProxiedBy": "HQMAIL101.nvidia.com (172.20.187.10) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=nvidia.com; s=n1;\n t=1605518035; bh=vsWuH7HCQa3Q5GeVidSX1pRudGTB+k2zr0EY1DX5EQo=;\n h=From:To:CC:Subject:Date:Message-ID:X-Mailer:In-Reply-To:\n References:MIME-Version:Content-Transfer-Encoding:Content-Type:\n X-Originating-IP:X-ClientProxiedBy;\n b=OCCxgyr0wFvh4RKnAxhfnH5VydFc+tg+YMLBi6nbYKmjtvQtI4gCvJJ8PVwNvPo5g\n i43O9yWBlVT36fYZeThG9RpTBzzhgsMY82vtffgln+4fPCV99hbnPv+HVXvDr8whln\n haK57W+LhX1Z9FrIiyYXWEJvYxuyKgMWsPENjLxfTQrkHM4kDqvGggRwqoPXLX8QZ8\n rQ+KIQ/bMpi7B8wrq5IckEnfL8K30wfagurmg40X9EHKRoXt4smFAcpRwf3ThDiE/j\n ggL5iuM8b6RfCaxAsUDYHIRhw30mNZbeOHncOSfNqMXobjzqry7+BXmVSTUmRz/n6/\n 1mEUX7oL3BqTw==",
        "Subject": "[dpdk-dev] [PATCH v3 2/6] net/mlx5: fix build with Direct Verbs\n\tdisabled",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Tunnel offload API is implemented for Direct Verbs environment only.\nCurrent patch re-arranges tunnel related functions for compilation in\nnon Direct Verbs setups to prevent compilation failures.  The patch\ndoes not introduce new functions.\n\nFixes: 4ec6360de37d (\"net/mlx5: implement tunnel offload\")\n\nSigned-off-by: Gregory Etelson <getelson@nvidia.com>\nAcked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>\n---\n drivers/net/mlx5/mlx5_flow.c | 935 ++++++++++++++++++++---------------\n drivers/net/mlx5/mlx5_flow.h |   5 +\n 2 files changed, 535 insertions(+), 405 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 98559ece2b..e4fe78df4c 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -33,16 +33,35 @@\n #include \"mlx5_common_os.h\"\n #include \"rte_pmd_mlx5.h\"\n \n+struct tunnel_default_miss_ctx {\n+\tuint16_t *queue;\n+\t__extension__\n+\tunion {\n+\t\tstruct rte_flow_action_rss action_rss;\n+\t\tstruct rte_flow_action_queue miss_queue;\n+\t\tstruct rte_flow_action_jump miss_jump;\n+\t\tuint8_t raw[0];\n+\t};\n+};\n+\n+static int\n+flow_tunnel_add_default_miss(struct rte_eth_dev *dev,\n+\t\t\t     struct rte_flow *flow,\n+\t\t\t     const struct rte_flow_attr *attr,\n+\t\t\t     const struct rte_flow_action *app_actions,\n+\t\t\t     uint32_t flow_idx,\n+\t\t\t     struct tunnel_default_miss_ctx *ctx,\n+\t\t\t     struct rte_flow_error *error);\n static struct mlx5_flow_tunnel *\n mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id);\n static void\n mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel);\n-static const struct mlx5_flow_tbl_data_entry  *\n-tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark);\n-static int\n-mlx5_get_flow_tunnel(struct rte_eth_dev *dev,\n-\t\t     const struct rte_flow_tunnel *app_tunnel,\n-\t\t     struct mlx5_flow_tunnel **tunnel);\n+static uint32_t\n+tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,\n+\t\t\t\tconst struct mlx5_flow_tunnel *tunnel,\n+\t\t\t\tuint32_t group, uint32_t *table,\n+\t\t\t\tstruct rte_flow_error *error);\n+\n static struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void);\n static void mlx5_flow_pop_thread_workspace(void);\n \n@@ -606,145 +625,32 @@ mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,\n \treturn !err_msg;\n }\n \n-\n static int\n mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,\n \t\t    struct rte_flow_tunnel *app_tunnel,\n \t\t    struct rte_flow_action **actions,\n \t\t    uint32_t *num_of_actions,\n-\t\t    struct rte_flow_error *error)\n-{\n-\tint ret;\n-\tstruct mlx5_flow_tunnel *tunnel;\n-\tconst char *err_msg = NULL;\n-\tbool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);\n-\n-\tif (!verdict)\n-\t\treturn rte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,\n-\t\t\t\t\t  err_msg);\n-\tret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);\n-\tif (ret < 0) {\n-\t\treturn rte_flow_error_set(error, ret,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,\n-\t\t\t\t\t  \"failed to initialize pmd tunnel\");\n-\t}\n-\t*actions = &tunnel->action;\n-\t*num_of_actions = 1;\n-\treturn 0;\n-}\n-\n+\t\t    struct rte_flow_error *error);\n static int\n mlx5_flow_tunnel_match(struct rte_eth_dev *dev,\n \t\t       struct rte_flow_tunnel *app_tunnel,\n \t\t       struct rte_flow_item **items,\n \t\t       uint32_t *num_of_items,\n-\t\t       struct rte_flow_error *error)\n-{\n-\tint ret;\n-\tstruct mlx5_flow_tunnel *tunnel;\n-\tconst char *err_msg = NULL;\n-\tbool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);\n-\n-\tif (!verdict)\n-\t\treturn rte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n-\t\t\t\t\t  err_msg);\n-\tret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);\n-\tif (ret < 0) {\n-\t\treturn rte_flow_error_set(error, ret,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n-\t\t\t\t\t  \"failed to initialize pmd tunnel\");\n-\t}\n-\t*items = &tunnel->item;\n-\t*num_of_items = 1;\n-\treturn 0;\n-}\n-\n+\t\t       struct rte_flow_error *error);\n static int\n mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,\n \t\t\t      struct rte_flow_item *pmd_items,\n-\t\t\t      uint32_t num_items, struct rte_flow_error *err)\n-{\n-\tstruct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);\n-\tstruct mlx5_flow_tunnel *tun;\n-\n-\trte_spinlock_lock(&thub->sl);\n-\tLIST_FOREACH(tun, &thub->tunnels, chain) {\n-\t\tif (&tun->item == pmd_items) {\n-\t\t\tLIST_REMOVE(tun, chain);\n-\t\t\tbreak;\n-\t\t}\n-\t}\n-\trte_spinlock_unlock(&thub->sl);\n-\tif (!tun || num_items != 1)\n-\t\treturn rte_flow_error_set(err, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n-\t\t\t\t\t  \"invalid argument\");\n-\tif (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))\n-\t\tmlx5_flow_tunnel_free(dev, tun);\n-\treturn 0;\n-}\n-\n+\t\t\t      uint32_t num_items, struct rte_flow_error *err);\n static int\n mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,\n \t\t\t\tstruct rte_flow_action *pmd_actions,\n \t\t\t\tuint32_t num_actions,\n-\t\t\t\tstruct rte_flow_error *err)\n-{\n-\tstruct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);\n-\tstruct mlx5_flow_tunnel *tun;\n-\n-\trte_spinlock_lock(&thub->sl);\n-\tLIST_FOREACH(tun, &thub->tunnels, chain) {\n-\t\tif (&tun->action == pmd_actions) {\n-\t\t\tLIST_REMOVE(tun, chain);\n-\t\t\tbreak;\n-\t\t}\n-\t}\n-\trte_spinlock_unlock(&thub->sl);\n-\tif (!tun || num_actions != 1)\n-\t\treturn rte_flow_error_set(err, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n-\t\t\t\t\t  \"invalid argument\");\n-\tif (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))\n-\t\tmlx5_flow_tunnel_free(dev, tun);\n-\n-\treturn 0;\n-}\n-\n+\t\t\t\tstruct rte_flow_error *err);\n static int\n mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,\n \t\t\t\t  struct rte_mbuf *m,\n \t\t\t\t  struct rte_flow_restore_info *info,\n-\t\t\t\t  struct rte_flow_error *err)\n-{\n-\tuint64_t ol_flags = m->ol_flags;\n-\tconst struct mlx5_flow_tbl_data_entry *tble;\n-\tconst uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;\n-\n-\tif ((ol_flags & mask) != mask)\n-\t\tgoto err;\n-\ttble = tunnel_mark_decode(dev, m->hash.fdir.hi);\n-\tif (!tble) {\n-\t\tDRV_LOG(DEBUG, \"port %u invalid miss tunnel mark %#x\",\n-\t\t\tdev->data->port_id, m->hash.fdir.hi);\n-\t\tgoto err;\n-\t}\n-\tMLX5_ASSERT(tble->tunnel);\n-\tmemcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));\n-\tinfo->group_id = tble->group_id;\n-\tinfo->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |\n-\t\t      RTE_FLOW_RESTORE_INFO_GROUP_ID |\n-\t\t      RTE_FLOW_RESTORE_INFO_ENCAPSULATED;\n-\n-\treturn 0;\n-\n-err:\n-\treturn rte_flow_error_set(err, EINVAL,\n-\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n-\t\t\t\t  \"failed to get restore info\");\n-}\n+\t\t\t\t  struct rte_flow_error *err);\n \n static const struct rte_flow_ops mlx5_flow_ops = {\n \t.validate = mlx5_flow_validate,\n@@ -4160,174 +4066,38 @@ flow_hairpin_split(struct rte_eth_dev *dev,\n \treturn 0;\n }\n \n-__extension__\n-union tunnel_offload_mark {\n-\tuint32_t val;\n-\tstruct {\n-\t\tuint32_t app_reserve:8;\n-\t\tuint32_t table_id:15;\n-\t\tuint32_t transfer:1;\n-\t\tuint32_t _unused_:8;\n-\t};\n-};\n-\n-struct tunnel_default_miss_ctx {\n-\tuint16_t *queue;\n-\t__extension__\n-\tunion {\n-\t\tstruct rte_flow_action_rss action_rss;\n-\t\tstruct rte_flow_action_queue miss_queue;\n-\t\tstruct rte_flow_action_jump miss_jump;\n-\t\tuint8_t raw[0];\n-\t};\n-};\n-\n+/**\n+ * The last stage of splitting chain, just creates the subflow\n+ * without any modification.\n+ *\n+ * @param[in] dev\n+ *   Pointer to Ethernet device.\n+ * @param[in] flow\n+ *   Parent flow structure pointer.\n+ * @param[in, out] sub_flow\n+ *   Pointer to return the created subflow, may be NULL.\n+ * @param[in] attr\n+ *   Flow rule attributes.\n+ * @param[in] items\n+ *   Pattern specification (list terminated by the END pattern item).\n+ * @param[in] actions\n+ *   Associated actions (list terminated by the END action).\n+ * @param[in] flow_split_info\n+ *   Pointer to flow split info structure.\n+ * @param[out] error\n+ *   Perform verbose error reporting if not NULL.\n+ * @return\n+ *   0 on success, negative value otherwise\n+ */\n static int\n-flow_tunnel_add_default_miss(struct rte_eth_dev *dev,\n-\t\t\t     struct rte_flow *flow,\n-\t\t\t     const struct rte_flow_attr *attr,\n-\t\t\t     const struct rte_flow_action *app_actions,\n-\t\t\t     uint32_t flow_idx,\n-\t\t\t     struct tunnel_default_miss_ctx *ctx,\n-\t\t\t     struct rte_flow_error *error)\n-{\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_flow *dev_flow;\n-\tstruct rte_flow_attr miss_attr = *attr;\n-\tconst struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;\n-\tconst struct rte_flow_item miss_items[2] = {\n-\t\t{\n-\t\t\t.type = RTE_FLOW_ITEM_TYPE_ETH,\n-\t\t\t.spec = NULL,\n-\t\t\t.last = NULL,\n-\t\t\t.mask = NULL\n-\t\t},\n-\t\t{\n-\t\t\t.type = RTE_FLOW_ITEM_TYPE_END,\n-\t\t\t.spec = NULL,\n-\t\t\t.last = NULL,\n-\t\t\t.mask = NULL\n-\t\t}\n-\t};\n-\tunion tunnel_offload_mark mark_id;\n-\tstruct rte_flow_action_mark miss_mark;\n-\tstruct rte_flow_action miss_actions[3] = {\n-\t\t[0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },\n-\t\t[2] = { .type = RTE_FLOW_ACTION_TYPE_END,  .conf = NULL }\n-\t};\n-\tconst struct rte_flow_action_jump *jump_data;\n-\tuint32_t i, flow_table = 0; /* prevent compilation warning */\n-\tstruct flow_grp_info grp_info = {\n-\t\t.external = 1,\n-\t\t.transfer = attr->transfer,\n-\t\t.fdb_def_rule = !!priv->fdb_def_rule,\n-\t\t.std_tbl_fix = 0,\n-\t};\n-\tint ret;\n-\n-\tif (!attr->transfer) {\n-\t\tuint32_t q_size;\n-\n-\t\tmiss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;\n-\t\tq_size = priv->reta_idx_n * sizeof(ctx->queue[0]);\n-\t\tctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,\n-\t\t\t\t\t 0, SOCKET_ID_ANY);\n-\t\tif (!ctx->queue)\n-\t\t\treturn rte_flow_error_set\n-\t\t\t\t(error, ENOMEM,\n-\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION_CONF,\n-\t\t\t\tNULL, \"invalid default miss RSS\");\n-\t\tctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,\n-\t\tctx->action_rss.level = 0,\n-\t\tctx->action_rss.types = priv->rss_conf.rss_hf,\n-\t\tctx->action_rss.key_len = priv->rss_conf.rss_key_len,\n-\t\tctx->action_rss.queue_num = priv->reta_idx_n,\n-\t\tctx->action_rss.key = priv->rss_conf.rss_key,\n-\t\tctx->action_rss.queue = ctx->queue;\n-\t\tif (!priv->reta_idx_n || !priv->rxqs_n)\n-\t\t\treturn rte_flow_error_set\n-\t\t\t\t(error, EINVAL,\n-\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION_CONF,\n-\t\t\t\tNULL, \"invalid port configuration\");\n-\t\tif (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))\n-\t\t\tctx->action_rss.types = 0;\n-\t\tfor (i = 0; i != priv->reta_idx_n; ++i)\n-\t\t\tctx->queue[i] = (*priv->reta_idx)[i];\n-\t} else {\n-\t\tmiss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;\n-\t\tctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;\n-\t}\n-\tmiss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;\n-\tfor (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);\n-\tjump_data = app_actions->conf;\n-\tmiss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;\n-\tmiss_attr.group = jump_data->group;\n-\tret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,\n-\t\t\t\t       &flow_table, grp_info, error);\n-\tif (ret)\n-\t\treturn rte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n-\t\t\t\t\t  NULL, \"invalid tunnel id\");\n-\tmark_id.app_reserve = 0;\n-\tmark_id.table_id = tunnel_flow_tbl_to_id(flow_table);\n-\tmark_id.transfer = !!attr->transfer;\n-\tmark_id._unused_ = 0;\n-\tmiss_mark.id = mark_id.val;\n-\tdev_flow = flow_drv_prepare(dev, flow, &miss_attr,\n-\t\t\t\t    miss_items, miss_actions, flow_idx, error);\n-\tif (!dev_flow)\n-\t\treturn -rte_errno;\n-\tdev_flow->flow = flow;\n-\tdev_flow->external = true;\n-\tdev_flow->tunnel = tunnel;\n-\t/* Subflow object was created, we must include one in the list. */\n-\tSILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,\n-\t\t      dev_flow->handle, next);\n-\tDRV_LOG(DEBUG,\n-\t\t\"port %u tunnel type=%d id=%u miss rule priority=%u group=%u\",\n-\t\tdev->data->port_id, tunnel->app_tunnel.type,\n-\t\ttunnel->tunnel_id, miss_attr.priority, miss_attr.group);\n-\tret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,\n-\t\t\t\t  miss_actions, error);\n-\tif (!ret)\n-\t\tret = flow_mreg_update_copy_table(dev, flow, miss_actions,\n-\t\t\t\t\t\t  error);\n-\n-\treturn ret;\n-}\n-\n-/**\n- * The last stage of splitting chain, just creates the subflow\n- * without any modification.\n- *\n- * @param[in] dev\n- *   Pointer to Ethernet device.\n- * @param[in] flow\n- *   Parent flow structure pointer.\n- * @param[in, out] sub_flow\n- *   Pointer to return the created subflow, may be NULL.\n- * @param[in] attr\n- *   Flow rule attributes.\n- * @param[in] items\n- *   Pattern specification (list terminated by the END pattern item).\n- * @param[in] actions\n- *   Associated actions (list terminated by the END action).\n- * @param[in] flow_split_info\n- *   Pointer to flow split info structure.\n- * @param[out] error\n- *   Perform verbose error reporting if not NULL.\n- * @return\n- *   0 on success, negative value otherwise\n- */\n-static int\n-flow_create_split_inner(struct rte_eth_dev *dev,\n-\t\t\tstruct rte_flow *flow,\n-\t\t\tstruct mlx5_flow **sub_flow,\n-\t\t\tconst struct rte_flow_attr *attr,\n-\t\t\tconst struct rte_flow_item items[],\n-\t\t\tconst struct rte_flow_action actions[],\n-\t\t\tstruct mlx5_flow_split_info *flow_split_info,\n-\t\t\tstruct rte_flow_error *error)\n+flow_create_split_inner(struct rte_eth_dev *dev,\n+\t\t\tstruct rte_flow *flow,\n+\t\t\tstruct mlx5_flow **sub_flow,\n+\t\t\tconst struct rte_flow_attr *attr,\n+\t\t\tconst struct rte_flow_item items[],\n+\t\t\tconst struct rte_flow_action actions[],\n+\t\t\tstruct mlx5_flow_split_info *flow_split_info,\n+\t\t\tstruct rte_flow_error *error)\n {\n \tstruct mlx5_flow *dev_flow;\n \n@@ -6953,26 +6723,6 @@ mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,\n \tsh->cmng.pending_queries--;\n }\n \n-static const struct mlx5_flow_tbl_data_entry  *\n-tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)\n-{\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_dev_ctx_shared *sh = priv->sh;\n-\tstruct mlx5_hlist_entry *he;\n-\tunion tunnel_offload_mark mbits = { .val = mark };\n-\tunion mlx5_flow_tbl_key table_key = {\n-\t\t{\n-\t\t\t.table_id = tunnel_id_to_flow_tbl(mbits.table_id),\n-\t\t\t.dummy = 0,\n-\t\t\t.domain = !!mbits.transfer,\n-\t\t\t.direction = 0,\n-\t\t}\n-\t};\n-\the = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);\n-\treturn he ?\n-\t       container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;\n-}\n-\n static void\n mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,\n \t\t\t\t   struct mlx5_hlist_entry *entry)\n@@ -7017,35 +6767,6 @@ mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list,\n \treturn NULL;\n }\n \n-static uint32_t\n-tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,\n-\t\t\t\tconst struct mlx5_flow_tunnel *tunnel,\n-\t\t\t\tuint32_t group, uint32_t *table,\n-\t\t\t\tstruct rte_flow_error *error)\n-{\n-\tstruct mlx5_hlist_entry *he;\n-\tstruct tunnel_tbl_entry *tte;\n-\tunion tunnel_tbl_key key = {\n-\t\t.tunnel_id = tunnel ? tunnel->tunnel_id : 0,\n-\t\t.group = group\n-\t};\n-\tstruct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);\n-\tstruct mlx5_hlist *group_hash;\n-\n-\tgroup_hash = tunnel ? tunnel->groups : thub->groups;\n-\the = mlx5_hlist_register(group_hash, key.val, NULL);\n-\tif (!he)\n-\t\treturn rte_flow_error_set(error, EINVAL,\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,\n-\t\t\t\t\t  NULL,\n-\t\t\t\t\t  \"tunnel group index not supported\");\n-\ttte = container_of(he, typeof(*tte), hash);\n-\t*table = tte->flow_table;\n-\tDRV_LOG(DEBUG, \"port %u tunnel %u group=%#x table=%#x\",\n-\t\tdev->data->port_id, key.tunnel_id, group, *table);\n-\treturn 0;\n-}\n-\n static int\n flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table,\n \t\t    struct flow_grp_info grp_info, struct rte_flow_error *error)\n@@ -7505,64 +7226,263 @@ mlx5_shared_action_flush(struct rte_eth_dev *dev)\n \treturn ret;\n }\n \n-static void\n-mlx5_flow_tunnel_free(struct rte_eth_dev *dev,\n-\t\t      struct mlx5_flow_tunnel *tunnel)\n-{\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\n-\tDRV_LOG(DEBUG, \"port %u release pmd tunnel id=0x%x\",\n-\t\tdev->data->port_id, tunnel->tunnel_id);\n-\tRTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));\n-\tmlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID],\n-\t\t\ttunnel->tunnel_id);\n-\tmlx5_hlist_destroy(tunnel->groups);\n-\tmlx5_free(tunnel);\n-}\n+#ifndef HAVE_MLX5DV_DR\n+#define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))\n+#else\n+#define MLX5_DOMAIN_SYNC_FLOW \\\n+\t(MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)\n+#endif\n \n-static struct mlx5_flow_tunnel *\n-mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)\n+int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)\n {\n-\tstruct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);\n-\tstruct mlx5_flow_tunnel *tun;\n-\n-\tLIST_FOREACH(tun, &thub->tunnels, chain) {\n-\t\tif (tun->tunnel_id == id)\n-\t\t\tbreak;\n-\t}\n+\tstruct rte_eth_dev *dev = &rte_eth_devices[port_id];\n+\tconst struct mlx5_flow_driver_ops *fops;\n+\tint ret;\n+\tstruct rte_flow_attr attr = { .transfer = 0 };\n \n-\treturn tun;\n+\tfops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));\n+\tret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);\n+\tif (ret > 0)\n+\t\tret = -ret;\n+\treturn ret;\n }\n \n-static struct mlx5_flow_tunnel *\n-mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,\n-\t\t\t  const struct rte_flow_tunnel *app_tunnel)\n+/**\n+ * tunnel offload functionalilty is defined for DV environment only\n+ */\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+__extension__\n+union tunnel_offload_mark {\n+\tuint32_t val;\n+\tstruct {\n+\t\tuint32_t app_reserve:8;\n+\t\tuint32_t table_id:15;\n+\t\tuint32_t transfer:1;\n+\t\tuint32_t _unused_:8;\n+\t};\n+};\n+\n+static int\n+flow_tunnel_add_default_miss(struct rte_eth_dev *dev,\n+\t\t\t     struct rte_flow *flow,\n+\t\t\t     const struct rte_flow_attr *attr,\n+\t\t\t     const struct rte_flow_action *app_actions,\n+\t\t\t     uint32_t flow_idx,\n+\t\t\t     struct tunnel_default_miss_ctx *ctx,\n+\t\t\t     struct rte_flow_error *error)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_flow_tunnel *tunnel;\n-\tuint32_t id;\n+\tstruct mlx5_flow *dev_flow;\n+\tstruct rte_flow_attr miss_attr = *attr;\n+\tconst struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;\n+\tconst struct rte_flow_item miss_items[2] = {\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ITEM_TYPE_ETH,\n+\t\t\t.spec = NULL,\n+\t\t\t.last = NULL,\n+\t\t\t.mask = NULL\n+\t\t},\n+\t\t{\n+\t\t\t.type = RTE_FLOW_ITEM_TYPE_END,\n+\t\t\t.spec = NULL,\n+\t\t\t.last = NULL,\n+\t\t\t.mask = NULL\n+\t\t}\n+\t};\n+\tunion tunnel_offload_mark mark_id;\n+\tstruct rte_flow_action_mark miss_mark;\n+\tstruct rte_flow_action miss_actions[3] = {\n+\t\t[0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },\n+\t\t[2] = { .type = RTE_FLOW_ACTION_TYPE_END,  .conf = NULL }\n+\t};\n+\tconst struct rte_flow_action_jump *jump_data;\n+\tuint32_t i, flow_table = 0; /* prevent compilation warning */\n+\tstruct flow_grp_info grp_info = {\n+\t\t.external = 1,\n+\t\t.transfer = attr->transfer,\n+\t\t.fdb_def_rule = !!priv->fdb_def_rule,\n+\t\t.std_tbl_fix = 0,\n+\t};\n+\tint ret;\n \n-\tmlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],\n-\t\t\t  &id);\n-\tif (id >= MLX5_MAX_TUNNELS) {\n-\t\tmlx5_ipool_free(priv->sh->ipool\n-\t\t\t\t[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);\n-\t\tDRV_LOG(ERR, \"Tunnel ID %d exceed max limit.\", id);\n-\t\treturn NULL;\n-\t} else if (!id) {\n-\t\treturn NULL;\n-\t}\n-\t/**\n-\t * mlx5 flow tunnel is an auxlilary data structure\n-\t * It's not part of IO. No need to allocate it from\n-\t * huge pages pools dedicated for IO\n-\t */\n-\ttunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel),\n-\t\t\t     0, SOCKET_ID_ANY);\n-\tif (!tunnel) {\n-\t\tmlx5_ipool_free(priv->sh->ipool\n-\t\t\t\t[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);\n-\t\treturn NULL;\n+\tif (!attr->transfer) {\n+\t\tuint32_t q_size;\n+\n+\t\tmiss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;\n+\t\tq_size = priv->reta_idx_n * sizeof(ctx->queue[0]);\n+\t\tctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,\n+\t\t\t\t\t 0, SOCKET_ID_ANY);\n+\t\tif (!ctx->queue)\n+\t\t\treturn rte_flow_error_set\n+\t\t\t\t(error, ENOMEM,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION_CONF,\n+\t\t\t\tNULL, \"invalid default miss RSS\");\n+\t\tctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,\n+\t\tctx->action_rss.level = 0,\n+\t\tctx->action_rss.types = priv->rss_conf.rss_hf,\n+\t\tctx->action_rss.key_len = priv->rss_conf.rss_key_len,\n+\t\tctx->action_rss.queue_num = priv->reta_idx_n,\n+\t\tctx->action_rss.key = priv->rss_conf.rss_key,\n+\t\tctx->action_rss.queue = ctx->queue;\n+\t\tif (!priv->reta_idx_n || !priv->rxqs_n)\n+\t\t\treturn rte_flow_error_set\n+\t\t\t\t(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION_CONF,\n+\t\t\t\tNULL, \"invalid port configuration\");\n+\t\tif (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))\n+\t\t\tctx->action_rss.types = 0;\n+\t\tfor (i = 0; i != priv->reta_idx_n; ++i)\n+\t\t\tctx->queue[i] = (*priv->reta_idx)[i];\n+\t} else {\n+\t\tmiss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;\n+\t\tctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;\n+\t}\n+\tmiss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;\n+\tfor (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);\n+\tjump_data = app_actions->conf;\n+\tmiss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;\n+\tmiss_attr.group = jump_data->group;\n+\tret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,\n+\t\t\t\t       &flow_table, grp_info, error);\n+\tif (ret)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n+\t\t\t\t\t  NULL, \"invalid tunnel id\");\n+\tmark_id.app_reserve = 0;\n+\tmark_id.table_id = tunnel_flow_tbl_to_id(flow_table);\n+\tmark_id.transfer = !!attr->transfer;\n+\tmark_id._unused_ = 0;\n+\tmiss_mark.id = mark_id.val;\n+\tdev_flow = flow_drv_prepare(dev, flow, &miss_attr,\n+\t\t\t\t    miss_items, miss_actions, flow_idx, error);\n+\tif (!dev_flow)\n+\t\treturn -rte_errno;\n+\tdev_flow->flow = flow;\n+\tdev_flow->external = true;\n+\tdev_flow->tunnel = tunnel;\n+\t/* Subflow object was created, we must include one in the list. */\n+\tSILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,\n+\t\t      dev_flow->handle, next);\n+\tDRV_LOG(DEBUG,\n+\t\t\"port %u tunnel type=%d id=%u miss rule priority=%u group=%u\",\n+\t\tdev->data->port_id, tunnel->app_tunnel.type,\n+\t\ttunnel->tunnel_id, miss_attr.priority, miss_attr.group);\n+\tret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,\n+\t\t\t\t  miss_actions, error);\n+\tif (!ret)\n+\t\tret = flow_mreg_update_copy_table(dev, flow, miss_actions,\n+\t\t\t\t\t\t  error);\n+\n+\treturn ret;\n+}\n+\n+static const struct mlx5_flow_tbl_data_entry  *\n+tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_dev_ctx_shared *sh = priv->sh;\n+\tstruct mlx5_hlist_entry *he;\n+\tunion tunnel_offload_mark mbits = { .val = mark };\n+\tunion mlx5_flow_tbl_key table_key = {\n+\t\t{\n+\t\t\t.table_id = tunnel_id_to_flow_tbl(mbits.table_id),\n+\t\t\t.dummy = 0,\n+\t\t\t.domain = !!mbits.transfer,\n+\t\t\t.direction = 0,\n+\t\t}\n+\t};\n+\the = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);\n+\treturn he ?\n+\t       container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;\n+}\n+\n+static uint32_t\n+tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,\n+\t\t\t\tconst struct mlx5_flow_tunnel *tunnel,\n+\t\t\t\tuint32_t group, uint32_t *table,\n+\t\t\t\tstruct rte_flow_error *error)\n+{\n+\tstruct mlx5_hlist_entry *he;\n+\tstruct tunnel_tbl_entry *tte;\n+\tunion tunnel_tbl_key key = {\n+\t\t.tunnel_id = tunnel ? tunnel->tunnel_id : 0,\n+\t\t.group = group\n+\t};\n+\tstruct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);\n+\tstruct mlx5_hlist *group_hash;\n+\n+\tgroup_hash = tunnel ? tunnel->groups : thub->groups;\n+\the = mlx5_hlist_register(group_hash, key.val, NULL);\n+\tif (!he)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,\n+\t\t\t\t\t  NULL,\n+\t\t\t\t\t  \"tunnel group index not supported\");\n+\ttte = container_of(he, typeof(*tte), hash);\n+\t*table = tte->flow_table;\n+\tDRV_LOG(DEBUG, \"port %u tunnel %u group=%#x table=%#x\",\n+\t\tdev->data->port_id, key.tunnel_id, group, *table);\n+\treturn 0;\n+}\n+\n+static void\n+mlx5_flow_tunnel_free(struct rte_eth_dev *dev,\n+\t\t      struct mlx5_flow_tunnel *tunnel)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\n+\tDRV_LOG(DEBUG, \"port %u release pmd tunnel id=0x%x\",\n+\t\tdev->data->port_id, tunnel->tunnel_id);\n+\tRTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));\n+\tmlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID],\n+\t\t\ttunnel->tunnel_id);\n+\tmlx5_hlist_destroy(tunnel->groups);\n+\tmlx5_free(tunnel);\n+}\n+\n+static struct mlx5_flow_tunnel *\n+mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)\n+{\n+\tstruct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);\n+\tstruct mlx5_flow_tunnel *tun;\n+\n+\tLIST_FOREACH(tun, &thub->tunnels, chain) {\n+\t\tif (tun->tunnel_id == id)\n+\t\t\tbreak;\n+\t}\n+\n+\treturn tun;\n+}\n+\n+static struct mlx5_flow_tunnel *\n+mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,\n+\t\t\t  const struct rte_flow_tunnel *app_tunnel)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_flow_tunnel *tunnel;\n+\tuint32_t id;\n+\n+\tmlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],\n+\t\t\t  &id);\n+\tif (id >= MLX5_MAX_TUNNELS) {\n+\t\tmlx5_ipool_free(priv->sh->ipool\n+\t\t\t\t[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);\n+\t\tDRV_LOG(ERR, \"Tunnel ID %d exceed max limit.\", id);\n+\t\treturn NULL;\n+\t} else if (!id) {\n+\t\treturn NULL;\n+\t}\n+\t/**\n+\t * mlx5 flow tunnel is an auxlilary data structure\n+\t * It's not part of IO. No need to allocate it from\n+\t * huge pages pools dedicated for IO\n+\t */\n+\ttunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel),\n+\t\t\t     0, SOCKET_ID_ANY);\n+\tif (!tunnel) {\n+\t\tmlx5_ipool_free(priv->sh->ipool\n+\t\t\t\t[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);\n+\t\treturn NULL;\n \t}\n \ttunnel->groups = mlx5_hlist_create(\"tunnel groups\", 1024, 0, 0,\n \t\t\t\t\t   mlx5_flow_tunnel_grp2tbl_create_cb,\n@@ -7671,23 +7591,228 @@ int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh)\n \treturn err;\n }\n \n-#ifndef HAVE_MLX5DV_DR\n-#define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))\n-#else\n-#define MLX5_DOMAIN_SYNC_FLOW \\\n-\t(MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)\n-#endif\n+static int\n+mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,\n+\t\t    struct rte_flow_tunnel *app_tunnel,\n+\t\t    struct rte_flow_action **actions,\n+\t\t    uint32_t *num_of_actions,\n+\t\t    struct rte_flow_error *error)\n+{\n+\tint ret;\n+\tstruct mlx5_flow_tunnel *tunnel;\n+\tconst char *err_msg = NULL;\n+\tbool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);\n \n-int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)\n+\tif (!verdict)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,\n+\t\t\t\t\t  err_msg);\n+\tret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);\n+\tif (ret < 0) {\n+\t\treturn rte_flow_error_set(error, ret,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,\n+\t\t\t\t\t  \"failed to initialize pmd tunnel\");\n+\t}\n+\t*actions = &tunnel->action;\n+\t*num_of_actions = 1;\n+\treturn 0;\n+}\n+\n+static int\n+mlx5_flow_tunnel_match(struct rte_eth_dev *dev,\n+\t\t       struct rte_flow_tunnel *app_tunnel,\n+\t\t       struct rte_flow_item **items,\n+\t\t       uint32_t *num_of_items,\n+\t\t       struct rte_flow_error *error)\n {\n-\tstruct rte_eth_dev *dev = &rte_eth_devices[port_id];\n-\tconst struct mlx5_flow_driver_ops *fops;\n \tint ret;\n-\tstruct rte_flow_attr attr = { .transfer = 0 };\n+\tstruct mlx5_flow_tunnel *tunnel;\n+\tconst char *err_msg = NULL;\n+\tbool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);\n \n-\tfops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));\n-\tret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);\n-\tif (ret > 0)\n-\t\tret = -ret;\n-\treturn ret;\n+\tif (!verdict)\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n+\t\t\t\t\t  err_msg);\n+\tret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);\n+\tif (ret < 0) {\n+\t\treturn rte_flow_error_set(error, ret,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n+\t\t\t\t\t  \"failed to initialize pmd tunnel\");\n+\t}\n+\t*items = &tunnel->item;\n+\t*num_of_items = 1;\n+\treturn 0;\n+}\n+static int\n+mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,\n+\t\t\t      struct rte_flow_item *pmd_items,\n+\t\t\t      uint32_t num_items, struct rte_flow_error *err)\n+{\n+\tstruct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);\n+\tstruct mlx5_flow_tunnel *tun;\n+\n+\trte_spinlock_lock(&thub->sl);\n+\tLIST_FOREACH(tun, &thub->tunnels, chain) {\n+\t\tif (&tun->item == pmd_items) {\n+\t\t\tLIST_REMOVE(tun, chain);\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\trte_spinlock_unlock(&thub->sl);\n+\tif (!tun || num_items != 1)\n+\t\treturn rte_flow_error_set(err, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n+\t\t\t\t\t  \"invalid argument\");\n+\tif (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))\n+\t\tmlx5_flow_tunnel_free(dev, tun);\n+\treturn 0;\n+}\n+\n+static int\n+mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,\n+\t\t\t\tstruct rte_flow_action *pmd_actions,\n+\t\t\t\tuint32_t num_actions,\n+\t\t\t\tstruct rte_flow_error *err)\n+{\n+\tstruct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);\n+\tstruct mlx5_flow_tunnel *tun;\n+\n+\trte_spinlock_lock(&thub->sl);\n+\tLIST_FOREACH(tun, &thub->tunnels, chain) {\n+\t\tif (&tun->action == pmd_actions) {\n+\t\t\tLIST_REMOVE(tun, chain);\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\trte_spinlock_unlock(&thub->sl);\n+\tif (!tun || num_actions != 1)\n+\t\treturn rte_flow_error_set(err, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n+\t\t\t\t\t  \"invalid argument\");\n+\tif (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))\n+\t\tmlx5_flow_tunnel_free(dev, tun);\n+\n+\treturn 0;\n }\n+\n+static int\n+mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,\n+\t\t\t\t  struct rte_mbuf *m,\n+\t\t\t\t  struct rte_flow_restore_info *info,\n+\t\t\t\t  struct rte_flow_error *err)\n+{\n+\tuint64_t ol_flags = m->ol_flags;\n+\tconst struct mlx5_flow_tbl_data_entry *tble;\n+\tconst uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;\n+\n+\tif ((ol_flags & mask) != mask)\n+\t\tgoto err;\n+\ttble = tunnel_mark_decode(dev, m->hash.fdir.hi);\n+\tif (!tble) {\n+\t\tDRV_LOG(DEBUG, \"port %u invalid miss tunnel mark %#x\",\n+\t\t\tdev->data->port_id, m->hash.fdir.hi);\n+\t\tgoto err;\n+\t}\n+\tMLX5_ASSERT(tble->tunnel);\n+\tmemcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));\n+\tinfo->group_id = tble->group_id;\n+\tinfo->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |\n+\t\t      RTE_FLOW_RESTORE_INFO_GROUP_ID |\n+\t\t      RTE_FLOW_RESTORE_INFO_ENCAPSULATED;\n+\n+\treturn 0;\n+\n+err:\n+\treturn rte_flow_error_set(err, EINVAL,\n+\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t  \"failed to get restore info\");\n+}\n+\n+#else /* HAVE_IBV_FLOW_DV_SUPPORT */\n+static int\n+mlx5_flow_tunnel_decap_set(__rte_unused struct rte_eth_dev *dev,\n+\t\t\t   __rte_unused struct rte_flow_tunnel *app_tunnel,\n+\t\t\t   __rte_unused struct rte_flow_action **actions,\n+\t\t\t   __rte_unused uint32_t *num_of_actions,\n+\t\t\t   __rte_unused struct rte_flow_error *error)\n+{\n+\treturn -ENOTSUP;\n+}\n+\n+static int\n+mlx5_flow_tunnel_match(__rte_unused struct rte_eth_dev *dev,\n+\t\t       __rte_unused struct rte_flow_tunnel *app_tunnel,\n+\t\t       __rte_unused struct rte_flow_item **items,\n+\t\t       __rte_unused uint32_t *num_of_items,\n+\t\t       __rte_unused struct rte_flow_error *error)\n+{\n+\treturn -ENOTSUP;\n+}\n+\n+static int\n+mlx5_flow_tunnel_item_release(__rte_unused struct rte_eth_dev *dev,\n+\t\t\t      __rte_unused struct rte_flow_item *pmd_items,\n+\t\t\t      __rte_unused uint32_t num_items,\n+\t\t\t      __rte_unused struct rte_flow_error *err)\n+{\n+\treturn -ENOTSUP;\n+}\n+\n+static int\n+mlx5_flow_tunnel_action_release(__rte_unused struct rte_eth_dev *dev,\n+\t\t\t\t__rte_unused struct rte_flow_action *pmd_action,\n+\t\t\t\t__rte_unused uint32_t num_actions,\n+\t\t\t\t__rte_unused struct rte_flow_error *err)\n+{\n+\treturn -ENOTSUP;\n+}\n+\n+static int\n+mlx5_flow_tunnel_get_restore_info(__rte_unused struct rte_eth_dev *dev,\n+\t\t\t\t  __rte_unused struct rte_mbuf *m,\n+\t\t\t\t  __rte_unused struct rte_flow_restore_info *i,\n+\t\t\t\t  __rte_unused struct rte_flow_error *err)\n+{\n+\treturn -ENOTSUP;\n+}\n+\n+static int\n+flow_tunnel_add_default_miss(__rte_unused struct rte_eth_dev *dev,\n+\t\t\t     __rte_unused struct rte_flow *flow,\n+\t\t\t     __rte_unused const struct rte_flow_attr *attr,\n+\t\t\t     __rte_unused const struct rte_flow_action *actions,\n+\t\t\t     __rte_unused uint32_t flow_idx,\n+\t\t\t     __rte_unused struct tunnel_default_miss_ctx *ctx,\n+\t\t\t     __rte_unused struct rte_flow_error *error)\n+{\n+\treturn -ENOTSUP;\n+}\n+\n+static struct mlx5_flow_tunnel *\n+mlx5_find_tunnel_id(__rte_unused struct rte_eth_dev *dev,\n+\t\t    __rte_unused uint32_t id)\n+{\n+\treturn NULL;\n+}\n+\n+static void\n+mlx5_flow_tunnel_free(__rte_unused struct rte_eth_dev *dev,\n+\t\t      __rte_unused struct mlx5_flow_tunnel *tunnel)\n+{\n+}\n+\n+static uint32_t\n+tunnel_flow_group_to_flow_table(__rte_unused struct rte_eth_dev *dev,\n+\t\t\t\t__rte_unused const struct mlx5_flow_tunnel *t,\n+\t\t\t\t__rte_unused uint32_t group,\n+\t\t\t\t__rte_unused uint32_t *table,\n+\t\t\t\tstruct rte_flow_error *error)\n+{\n+\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t  \"tunnel offload requires DV support\");\n+}\n+\n+#endif /* HAVE_IBV_FLOW_DV_SUPPORT */\n+\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex 5fac8672fc..fbc6173fcb 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -991,8 +991,13 @@ mlx5_tunnel_hub(struct rte_eth_dev *dev)\n static inline bool\n is_tunnel_offload_active(struct rte_eth_dev *dev)\n {\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \treturn !!priv->config.dv_miss_info;\n+#else\n+\tRTE_SET_USED(dev);\n+\treturn false;\n+#endif\n }\n \n static inline bool\n",
    "prefixes": [
        "v3",
        "2/6"
    ]
}