get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/44087/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 44087,
    "url": "http://patches.dpdk.org/api/patches/44087/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20180831092038.23051-8-adrien.mazarguil@6wind.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20180831092038.23051-8-adrien.mazarguil@6wind.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20180831092038.23051-8-adrien.mazarguil@6wind.com",
    "date": "2018-08-31T09:57:40",
    "name": "[7/8] net/mlx5: add VXLAN encap support to switch flow rules",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "6c92a837a0440cb4b43fa5b2a4e0f2318838a54b",
    "submitter": {
        "id": 165,
        "url": "http://patches.dpdk.org/api/people/165/?format=api",
        "name": "Adrien Mazarguil",
        "email": "adrien.mazarguil@6wind.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20180831092038.23051-8-adrien.mazarguil@6wind.com/mbox/",
    "series": [
        {
            "id": 1126,
            "url": "http://patches.dpdk.org/api/series/1126/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=1126",
            "date": "2018-08-31T09:57:25",
            "name": "net/mlx5: add switch offload for VXLAN encap/decap",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/1126/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/44087/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/44087/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id D155B58EC;\n\tFri, 31 Aug 2018 11:57:59 +0200 (CEST)",
            "from mail-wr1-f67.google.com (mail-wr1-f67.google.com\n\t[209.85.221.67]) by dpdk.org (Postfix) with ESMTP id 68A43548B\n\tfor <dev@dpdk.org>; Fri, 31 Aug 2018 11:57:57 +0200 (CEST)",
            "by mail-wr1-f67.google.com with SMTP id m27-v6so10666745wrf.3\n\tfor <dev@dpdk.org>; Fri, 31 Aug 2018 02:57:57 -0700 (PDT)",
            "from 6wind.com (host.78.145.23.62.rev.coltfrance.com.\n\t[62.23.145.78]) by smtp.gmail.com with ESMTPSA id\n\tn15-v6sm6923317wrm.27.2018.08.31.02.57.55\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128);\n\tFri, 31 Aug 2018 02:57:55 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=6wind-com.20150623.gappssmtp.com; s=20150623;\n\th=date:from:to:cc:subject:message-id:references:mime-version\n\t:content-disposition:in-reply-to;\n\tbh=KOcIVc2Iwu/+NIB+YrgKf2tAqa5JBtsIOIlC6nTsC88=;\n\tb=tA0VDbbEOqL25afBzV6PDOyVfWzeex+hIwitLBG4wgJvtCGJd7qkASOcJPWM8OB9Ms\n\t30dg6kRyMfEq6rNuzce83ZXe/RXdlrwFUohQVxMepstntsJ/nUuN3r3DzZVAseFnAWmw\n\toRpWN9oVDRr3q3+e+ImuxAlqnKMjhdEjZxjrPH6M0wZy0L/H0+VoN8wY0RzMH4kwcJpP\n\tJpw7DtdAvAzM/HFybuXwVLOOB/vverB+cLUyi1djPQ44DKCtnF7IkBK08uYDZeVVSIBm\n\ts6BFHiibqcL3qvQaf5afhArAEULBS/exlK7F2zyRBGodzjj4bdcBvvIVxfjcK+2EgVSp\n\tTBjA==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20161025;\n\th=x-gm-message-state:date:from:to:cc:subject:message-id:references\n\t:mime-version:content-disposition:in-reply-to;\n\tbh=KOcIVc2Iwu/+NIB+YrgKf2tAqa5JBtsIOIlC6nTsC88=;\n\tb=iCAO8NtkLdCwmcQSAbcxkFhCaO1llL2bOlCL9pGK6onlBjdDBmlRjfGd3rn3Cy8iVt\n\tBtlCEaW1YJi9m2kwFenq16vNDABSdSmu+McXoQaQWy2y5sCII2E1KHZu/S2nss1P0LW8\n\tTZ0oC0uBMO1ZsVv1FBuPHo5/G71Nv82JQr0MBb/e4ljqOlixP3RQKDvpzN8ow37LPL0U\n\tCdX7vjZTvnIcOtfv9ZmhrYX0ubpasiRKGF248OG4VLFTXuRR3VTzxc6+pOK+59TIuiHN\n\tA68JEdXd17V1qw+ARPM6+5gPoa83HUFczH4tQiC4i5SYjIjKU3XSvpG8JIaOgsf5U5p7\n\tzCXA==",
        "X-Gm-Message-State": "APzg51BVWav/OnmWoDNfHG/eKnUju1qzKq6jVThB1pz7C9rpMTI8S+YH\n\tvkJo5TJTxUB/LR8uNUtm+nj64A==",
        "X-Google-Smtp-Source": "ANB0VdZCJdQLQ1nnGHAFbt5Ji5AiD8CvC63iK5VxLRPSON48kewthY6AUqWXyT/bzcwQdL6Iu8vyRg==",
        "X-Received": "by 2002:adf:9227:: with SMTP id\n\t36-v6mr10649267wrj.275.1535709476616; \n\tFri, 31 Aug 2018 02:57:56 -0700 (PDT)",
        "Date": "Fri, 31 Aug 2018 11:57:40 +0200",
        "From": "Adrien Mazarguil <adrien.mazarguil@6wind.com>",
        "To": "Shahaf Shuler <shahafs@mellanox.com>, Yongseok Koh <yskoh@mellanox.com>, \n\tSlava Ovsiienko <viacheslavo@mellanox.com>",
        "Cc": "dev@dpdk.org",
        "Message-ID": "<20180831092038.23051-8-adrien.mazarguil@6wind.com>",
        "References": "<20180831092038.23051-1-adrien.mazarguil@6wind.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=us-ascii",
        "Content-Disposition": "inline",
        "In-Reply-To": "<20180831092038.23051-1-adrien.mazarguil@6wind.com>",
        "X-Mailer": "git-send-email 2.11.0",
        "Subject": "[dpdk-dev] [PATCH 7/8] net/mlx5: add VXLAN encap support to switch\n\tflow rules",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch is huge because support for VXLAN encapsulation in switch flow\nrules involves configuration of virtual network interfaces on the host\nsystem including source addresses, routes and neighbor entries for flow\nrules to be offloadable by TC. All of this is done through Netlink.\n\nVXLAN interfaces are dynamically created for each combination of local UDP\nport and outer network interface associated with flow rules, then used as\ntargets for TC \"flower\" filters in order to perform encapsulation.\n\nTo automatically create and remove these interfaces on a needed basis\naccording to the applied flow rules, the PMD maintains global resources\nshared between all PMD instances of the primary process.\n\nTestpmd example:\n\n- Setting up outer properties of VXLAN tunnel:\n\n  set vxlan ip-version ipv4 vni 0x112233 udp-src 4242 udp-dst 4789\n    ip-src 1.1.1.1 ip-dst 2.2.2.2\n    eth-src 00:11:22:33:44:55 eth-dst 66:77:88:99:aa:bb\n\n- Creating a flow rule on port ID 2 performing VXLAN encapsulation with the\n  above properties and directing the resulting traffic to port ID 1:\n\n  flow create 2 ingress transfer pattern eth src is 00:11:22:33:44:55 /\n     ipv4 / udp dst is 5566 / end actions vxlan_encap / port_id id 1 / end\n\nSigned-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>\n---\n drivers/net/mlx5/Makefile       |   10 +\n drivers/net/mlx5/mlx5_nl_flow.c | 1198 +++++++++++++++++++++++++++++++++-\n 2 files changed, 1204 insertions(+), 4 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile\nindex 2e70dec5b..1ba4ce612 100644\n--- a/drivers/net/mlx5/Makefile\n+++ b/drivers/net/mlx5/Makefile\n@@ -384,6 +384,16 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh\n \t\t/usr/include/assert.h \\\n \t\tdefine static_assert \\\n \t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_TC_ACT_TUNNEL_KEY \\\n+\t\tlinux/tc_act/tc_tunnel_key.h \\\n+\t\tdefine TCA_ACT_TUNNEL_KEY \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_TCA_TUNNEL_KEY_ENC_DST_PORT \\\n+\t\tlinux/tc_act/tc_tunnel_key.h \\\n+\t\tenum TCA_TUNNEL_KEY_ENC_DST_PORT \\\n+\t\t$(AUTOCONF_OUTPUT)\n \n # Create mlx5_autoconf.h or update it in case it differs from the new one.\n \ndiff --git a/drivers/net/mlx5/mlx5_nl_flow.c b/drivers/net/mlx5/mlx5_nl_flow.c\nindex 91ff90a13..672f92863 100644\n--- a/drivers/net/mlx5/mlx5_nl_flow.c\n+++ b/drivers/net/mlx5/mlx5_nl_flow.c\n@@ -6,7 +6,31 @@\n #include <assert.h>\n #include <errno.h>\n #include <libmnl/libmnl.h>\n+/*\n+ * Older versions of linux/if.h do not have the required safeties to coexist\n+ * with net/if.h. This causes a compilation failure due to symbol\n+ * redefinitions even when including the latter first.\n+ *\n+ * One workaround is to prevent net/if.h from defining conflicting symbols\n+ * by removing __USE_MISC, and maintaining it undefined while including\n+ * linux/if.h.\n+ *\n+ * Alphabetical order cannot be preserved since net/if.h must always be\n+ * included before linux/if.h regardless.\n+ */\n+#ifdef __USE_MISC\n+#undef __USE_MISC\n+#define RESTORE_USE_MISC\n+#endif\n+#include <net/if.h>\n+#include <linux/if.h>\n+#ifdef RESTORE_USE_MISC\n+#undef RESTORE_USE_MISC\n+#define __USE_MISC 1\n+#endif\n+#include <linux/if_arp.h>\n #include <linux/if_ether.h>\n+#include <linux/if_link.h>\n #include <linux/netlink.h>\n #include <linux/pkt_cls.h>\n #include <linux/pkt_sched.h>\n@@ -14,11 +38,13 @@\n #include <linux/tc_act/tc_gact.h>\n #include <linux/tc_act/tc_mirred.h>\n #include <netinet/in.h>\n+#include <pthread.h>\n #include <stdalign.h>\n #include <stdbool.h>\n #include <stddef.h>\n #include <stdint.h>\n #include <stdlib.h>\n+#include <sys/queue.h>\n #include <sys/socket.h>\n \n #include <rte_byteorder.h>\n@@ -52,6 +78,34 @@ struct tc_vlan {\n \n #endif /* HAVE_TC_ACT_VLAN */\n \n+#ifdef HAVE_TC_ACT_TUNNEL_KEY\n+\n+#include <linux/tc_act/tc_tunnel_key.h>\n+\n+#ifndef HAVE_TCA_TUNNEL_KEY_ENC_DST_PORT\n+#define TCA_TUNNEL_KEY_ENC_DST_PORT 9\n+#endif\n+\n+#else /* HAVE_TC_ACT_TUNNEL_KEY */\n+\n+#define TCA_ACT_TUNNEL_KEY 17\n+#define TCA_TUNNEL_KEY_ACT_SET 1\n+#define TCA_TUNNEL_KEY_ACT_RELEASE 2\n+#define TCA_TUNNEL_KEY_PARMS 2\n+#define TCA_TUNNEL_KEY_ENC_IPV4_SRC 3\n+#define TCA_TUNNEL_KEY_ENC_IPV4_DST 4\n+#define TCA_TUNNEL_KEY_ENC_IPV6_SRC 5\n+#define TCA_TUNNEL_KEY_ENC_IPV6_DST 6\n+#define TCA_TUNNEL_KEY_ENC_KEY_ID 7\n+#define TCA_TUNNEL_KEY_ENC_DST_PORT 9\n+\n+struct tc_tunnel_key {\n+\ttc_gen;\n+\tint t_action;\n+};\n+\n+#endif /* HAVE_TC_ACT_TUNNEL_KEY */\n+\n /* Normally found in linux/netlink.h. */\n #ifndef NETLINK_CAP_ACK\n #define NETLINK_CAP_ACK 10\n@@ -148,6 +202,71 @@ struct tc_vlan {\n #define TCA_FLOWER_KEY_VLAN_ETH_TYPE 25\n #endif\n \n+#define BIT(b) (1 << (b))\n+#define BIT_ENCAP(e) BIT(MLX5_NL_FLOW_ENCAP_ ## e)\n+\n+/** Flags used for @p mask in struct mlx5_nl_flow_encap. */\n+enum mlx5_nl_flow_encap_flag {\n+\tMLX5_NL_FLOW_ENCAP_ETH_SRC,\n+\tMLX5_NL_FLOW_ENCAP_ETH_DST,\n+\tMLX5_NL_FLOW_ENCAP_IPV4_SRC,\n+\tMLX5_NL_FLOW_ENCAP_IPV4_DST,\n+\tMLX5_NL_FLOW_ENCAP_IPV6_SRC,\n+\tMLX5_NL_FLOW_ENCAP_IPV6_DST,\n+\tMLX5_NL_FLOW_ENCAP_UDP_SRC,\n+\tMLX5_NL_FLOW_ENCAP_UDP_DST,\n+\tMLX5_NL_FLOW_ENCAP_VXLAN_VNI,\n+};\n+\n+/** Encapsulation structure with fixed format for convenience. */\n+struct mlx5_nl_flow_encap {\n+\tuint32_t mask;\n+\tstruct {\n+\t\tstruct ether_addr src;\n+\t\tstruct ether_addr dst;\n+\t} eth;\n+\tstruct mlx5_nl_flow_encap_ip {\n+\t\tunion mlx5_nl_flow_encap_ip_addr {\n+\t\t\tstruct in_addr v4;\n+\t\t\tstruct in6_addr v6;\n+\t\t} src;\n+\t\tunion mlx5_nl_flow_encap_ip_addr dst;\n+\t} ip;\n+\tstruct {\n+\t\trte_be16_t src;\n+\t\trte_be16_t dst;\n+\t} udp;\n+\tstruct {\n+\t\trte_be32_t vni;\n+\t} vxlan;\n+};\n+\n+/** Generic address descriptor for encapsulation resources. */\n+struct mlx5_nl_flow_encap_addr {\n+\tLIST_ENTRY(mlx5_nl_flow_encap_addr) next;\n+\tuint32_t refcnt;\n+\tuint32_t mask;\n+\tstruct mlx5_nl_flow_encap_ip ip;\n+};\n+\n+/** VXLAN-specific encapsulation resources. */\n+struct mlx5_nl_flow_encap_vxlan {\n+\tLIST_ENTRY(mlx5_nl_flow_encap_vxlan) next;\n+\tuint32_t refcnt;\n+\trte_be16_t port;\n+\tunsigned int inner;\n+};\n+\n+/** Encapsulation interface descriptor. */\n+struct mlx5_nl_flow_encap_ifindex {\n+\tLIST_ENTRY(mlx5_nl_flow_encap_ifindex) next;\n+\tuint32_t refcnt;\n+\tunsigned int outer;\n+\tLIST_HEAD(, mlx5_nl_flow_encap_vxlan) vxlan;\n+\tLIST_HEAD(, mlx5_nl_flow_encap_addr) local;\n+\tLIST_HEAD(, mlx5_nl_flow_encap_addr) neigh;\n+};\n+\n /** Context object required by most functions. */\n struct mlx5_nl_flow_ctx {\n \tint socket; /**< NUMA socket for memory allocations. */\n@@ -159,8 +278,10 @@ struct mlx5_nl_flow_ctx {\n struct mlx5_nl_flow {\n \tuint32_t size; /**< Size of this object. */\n \tuint32_t applied:1; /**< Whether rule is currently applied. */\n+\tunsigned int encap_ifindex; /**< Interface to use with @p encap. */\n \tunsigned int *ifindex_src; /**< Source interface. */\n \tunsigned int *ifindex_dst; /**< Destination interface. */\n+\tstruct mlx5_nl_flow_encap *encap; /**< Encapsulation properties. */\n \talignas(struct nlmsghdr)\n \tuint8_t msg[]; /**< Netlink message data. */\n };\n@@ -179,6 +300,7 @@ enum mlx5_nl_flow_trans {\n \tITEM_IPV6,\n \tITEM_TCP,\n \tITEM_UDP,\n+\tITEM_VXLAN,\n \tACTIONS,\n \tACTION_VOID,\n \tACTION_PORT_ID,\n@@ -187,6 +309,8 @@ enum mlx5_nl_flow_trans {\n \tACTION_OF_PUSH_VLAN,\n \tACTION_OF_SET_VLAN_VID,\n \tACTION_OF_SET_VLAN_PCP,\n+\tACTION_VXLAN_ENCAP,\n+\tACTION_VXLAN_DECAP,\n \tEND,\n };\n \n@@ -196,7 +320,8 @@ enum mlx5_nl_flow_trans {\n \tITEM_VOID, ITEM_PORT_ID, ACTIONS\n #define ACTIONS_COMMON \\\n \tACTION_VOID, ACTION_OF_POP_VLAN, ACTION_OF_PUSH_VLAN, \\\n-\tACTION_OF_SET_VLAN_VID, ACTION_OF_SET_VLAN_PCP\n+\tACTION_OF_SET_VLAN_VID, ACTION_OF_SET_VLAN_PCP, \\\n+\tACTION_VXLAN_ENCAP, ACTION_VXLAN_DECAP\n #define ACTIONS_FATE \\\n \tACTION_PORT_ID, ACTION_DROP\n \n@@ -213,7 +338,8 @@ static const enum mlx5_nl_flow_trans *const mlx5_nl_flow_trans[] = {\n \t[ITEM_IPV4] = TRANS(ITEM_TCP, ITEM_UDP, PATTERN_COMMON),\n \t[ITEM_IPV6] = TRANS(ITEM_TCP, ITEM_UDP, PATTERN_COMMON),\n \t[ITEM_TCP] = TRANS(PATTERN_COMMON),\n-\t[ITEM_UDP] = TRANS(PATTERN_COMMON),\n+\t[ITEM_UDP] = TRANS(ITEM_VXLAN, PATTERN_COMMON),\n+\t[ITEM_VXLAN] = TRANS(PATTERN_COMMON),\n \t[ACTIONS] = TRANS(ACTIONS_FATE, ACTIONS_COMMON),\n \t[ACTION_VOID] = TRANS(BACK),\n \t[ACTION_PORT_ID] = TRANS(ACTION_VOID, END),\n@@ -222,6 +348,21 @@ static const enum mlx5_nl_flow_trans *const mlx5_nl_flow_trans[] = {\n \t[ACTION_OF_PUSH_VLAN] = TRANS(ACTIONS_FATE, ACTIONS_COMMON),\n \t[ACTION_OF_SET_VLAN_VID] = TRANS(ACTIONS_FATE, ACTIONS_COMMON),\n \t[ACTION_OF_SET_VLAN_PCP] = TRANS(ACTIONS_FATE, ACTIONS_COMMON),\n+\t[ACTION_VXLAN_ENCAP] = TRANS(ACTIONS_FATE, ACTIONS_COMMON),\n+\t[ACTION_VXLAN_DECAP] = TRANS(ACTIONS_FATE, ACTIONS_COMMON),\n+\t[END] = NULL,\n+};\n+\n+/** Parser state transitions used by mlx5_nl_flow_encap_reap(). */\n+static const enum mlx5_nl_flow_trans *const mlx5_nl_flow_encap_reap_trans[] = {\n+\t[INVALID] = NULL,\n+\t[BACK] = NULL,\n+\t[ITEM_VOID] = TRANS(BACK),\n+\t[ITEM_ETH] = TRANS(ITEM_IPV4, ITEM_IPV6, ITEM_VOID),\n+\t[ITEM_IPV4] = TRANS(ITEM_UDP, ITEM_VOID),\n+\t[ITEM_IPV6] = TRANS(ITEM_UDP, ITEM_VOID),\n+\t[ITEM_UDP] = TRANS(ITEM_VXLAN, ITEM_VOID),\n+\t[ITEM_VXLAN] = TRANS(END),\n \t[END] = NULL,\n };\n \n@@ -234,6 +375,7 @@ static const union {\n \tstruct rte_flow_item_ipv6 ipv6;\n \tstruct rte_flow_item_tcp tcp;\n \tstruct rte_flow_item_udp udp;\n+\tstruct rte_flow_item_vxlan vxlan;\n } mlx5_nl_flow_mask_empty;\n \n #define ETHER_ADDR_MASK \"\\xff\\xff\\xff\\xff\\xff\\xff\"\n@@ -242,6 +384,7 @@ static const union {\n \t\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\" \\\n \t\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"\n #define BE16_MASK RTE_BE16(0xffff)\n+#define VXLAN_VNI_MASK \"\\xff\\xff\\xff\"\n \n /** Supported masks for known item types. */\n static const struct {\n@@ -286,6 +429,35 @@ static const struct {\n \t},\n };\n \n+/** Supported masks for known encapsulation item types. */\n+static const struct {\n+\tstruct rte_flow_item_eth eth;\n+\tstruct rte_flow_item_ipv4 ipv4;\n+\tstruct rte_flow_item_ipv6 ipv6;\n+\tstruct rte_flow_item_udp udp;\n+\tstruct rte_flow_item_vxlan vxlan;\n+} mlx5_nl_flow_encap_mask_supported = {\n+\t.eth = {\n+\t\t.dst.addr_bytes = ETHER_ADDR_MASK,\n+\t\t.src.addr_bytes = ETHER_ADDR_MASK,\n+\t},\n+\t.ipv4.hdr = {\n+\t\t.src_addr = IN_ADDR_MASK,\n+\t\t.dst_addr = IN_ADDR_MASK,\n+\t},\n+\t.ipv6.hdr = {\n+\t\t.src_addr = IN6_ADDR_MASK,\n+\t\t.dst_addr = IN6_ADDR_MASK,\n+\t},\n+\t.udp.hdr = {\n+\t\t.src_port = BE16_MASK,\n+\t\t.dst_port = BE16_MASK,\n+\t},\n+\t.vxlan = {\n+\t\t.vni = VXLAN_VNI_MASK,\n+\t},\n+};\n+\n /**\n  * Retrieve mask for pattern item.\n  *\n@@ -361,6 +533,227 @@ mlx5_nl_flow_item_mask(const struct rte_flow_item *item,\n }\n \n /**\n+ * Convert VXLAN VNI to 32-bit integer.\n+ *\n+ * @param[in] vni\n+ *   VXLAN VNI in 24-bit wire format.\n+ *\n+ * @return\n+ *   VXLAN VNI as a 32-bit integer value in network endian.\n+ */\n+static rte_be32_t\n+vxlan_vni_as_be32(const uint8_t vni[3])\n+{\n+\treturn (volatile union { uint8_t u8[4]; rte_be32_t u32; })\n+\t\t{ { 0, vni[0], vni[1], vni[2] } }.u32;\n+}\n+\n+/**\n+ * Populate consolidated encapsulation object from list of pattern items.\n+ *\n+ * Helper function to process configuration of generic actions such as\n+ * RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP.\n+ *\n+ * @param[out] dst\n+ *   Destination object.\n+ * @param[in] src\n+ *   List of pattern items to gather data from.\n+ * @param[out] error\n+ *   Perform verbose error reporting if not NULL.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_nl_flow_encap_reap(struct mlx5_nl_flow_encap *dst,\n+\t\t\tconst struct rte_flow_item *src,\n+\t\t\tstruct rte_flow_error *error)\n+{\n+\tstruct mlx5_nl_flow_encap tmp = {\n+\t\t.mask = 0,\n+\t};\n+\tunsigned int n = 0;\n+\tconst enum mlx5_nl_flow_trans *trans = TRANS(ITEM_ETH);\n+\tconst enum mlx5_nl_flow_trans *back = trans;\n+\n+trans:\n+\tswitch (trans[n++]) {\n+\t\tunion {\n+\t\t\tconst struct rte_flow_item_eth *eth;\n+\t\t\tconst struct rte_flow_item_ipv4 *ipv4;\n+\t\t\tconst struct rte_flow_item_ipv6 *ipv6;\n+\t\t\tconst struct rte_flow_item_udp *udp;\n+\t\t\tconst struct rte_flow_item_vxlan *vxlan;\n+\t\t} spec, mask;\n+\n+\tdefault:\n+\tcase INVALID:\n+\t\tgoto error_encap;\n+\tcase BACK:\n+\t\ttrans = back;\n+\t\tn = 0;\n+\t\tgoto trans;\n+\tcase ITEM_VOID:\n+\t\tif (src->type != RTE_FLOW_ITEM_TYPE_VOID)\n+\t\t\tgoto trans;\n+\t\t++src;\n+\t\tbreak;\n+\tcase ITEM_ETH:\n+\t\tif (src->type != RTE_FLOW_ITEM_TYPE_ETH)\n+\t\t\tgoto trans;\n+\t\tmask.eth = mlx5_nl_flow_item_mask\n+\t\t\t(src, &rte_flow_item_eth_mask,\n+\t\t\t &mlx5_nl_flow_encap_mask_supported.eth,\n+\t\t\t &mlx5_nl_flow_mask_empty.eth,\n+\t\t\t sizeof(rte_flow_item_eth_mask), error);\n+\t\tif (!mask.eth)\n+\t\t\treturn -rte_errno;\n+\t\tif (mask.eth == &mlx5_nl_flow_mask_empty.eth)\n+\t\t\tgoto error_spec;\n+\t\tspec.eth = src->spec;\n+\t\tif (!is_zero_ether_addr(&mask.eth->src)) {\n+\t\t\tif (!is_broadcast_ether_addr(&mask.eth->src))\n+\t\t\t\tgoto error_mask;\n+\t\t\ttmp.eth.src = spec.eth->src;\n+\t\t\ttmp.mask |= BIT_ENCAP(ETH_SRC);\n+\t\t}\n+\t\tif (!is_zero_ether_addr(&mask.eth->dst)) {\n+\t\t\tif (!is_broadcast_ether_addr(&mask.eth->dst))\n+\t\t\t\tgoto error_mask;\n+\t\t\ttmp.eth.dst = spec.eth->dst;\n+\t\t\ttmp.mask |= BIT_ENCAP(ETH_DST);\n+\t\t}\n+\t\t++src;\n+\t\tbreak;\n+\tcase ITEM_IPV4:\n+\t\tif (src->type != RTE_FLOW_ITEM_TYPE_IPV4)\n+\t\t\tgoto trans;\n+\t\tmask.ipv4 = mlx5_nl_flow_item_mask\n+\t\t\t(src, &rte_flow_item_ipv4_mask,\n+\t\t\t &mlx5_nl_flow_encap_mask_supported.ipv4,\n+\t\t\t &mlx5_nl_flow_mask_empty.ipv4,\n+\t\t\t sizeof(rte_flow_item_ipv4_mask), error);\n+\t\tif (!mask.ipv4)\n+\t\t\treturn -rte_errno;\n+\t\tif (mask.ipv4 == &mlx5_nl_flow_mask_empty.ipv4)\n+\t\t\tgoto error_spec;\n+\t\tspec.ipv4 = src->spec;\n+\t\tif (mask.ipv4->hdr.src_addr) {\n+\t\t\tif (mask.ipv4->hdr.src_addr != IN_ADDR_MASK)\n+\t\t\t\tgoto error_mask;\n+\t\t\ttmp.ip.src.v4.s_addr = spec.ipv4->hdr.src_addr;\n+\t\t\ttmp.mask |= BIT_ENCAP(IPV4_SRC);\n+\t\t}\n+\t\tif (mask.ipv4->hdr.dst_addr) {\n+\t\t\tif (mask.ipv4->hdr.dst_addr != IN_ADDR_MASK)\n+\t\t\t\tgoto error_mask;\n+\t\t\ttmp.ip.dst.v4.s_addr = spec.ipv4->hdr.dst_addr;\n+\t\t\ttmp.mask |= BIT_ENCAP(IPV4_DST);\n+\t\t}\n+\t\t++src;\n+\t\tbreak;\n+\tcase ITEM_IPV6:\n+\t\tif (src->type != RTE_FLOW_ITEM_TYPE_IPV6)\n+\t\t\tgoto trans;\n+\t\tmask.ipv6 = mlx5_nl_flow_item_mask\n+\t\t\t(src, &rte_flow_item_ipv6_mask,\n+\t\t\t &mlx5_nl_flow_encap_mask_supported.ipv6,\n+\t\t\t &mlx5_nl_flow_mask_empty.ipv6,\n+\t\t\t sizeof(rte_flow_item_ipv6_mask), error);\n+\t\tif (!mask.ipv6)\n+\t\t\treturn -rte_errno;\n+\t\tif (mask.ipv6 == &mlx5_nl_flow_mask_empty.ipv6)\n+\t\t\tgoto error_spec;\n+\t\tspec.ipv6 = src->spec;\n+\t\tif (!IN6_IS_ADDR_UNSPECIFIED(mask.ipv6->hdr.src_addr)) {\n+\t\t\tif (memcmp(mask.ipv6->hdr.src_addr, IN6_ADDR_MASK, 16))\n+\t\t\t\tgoto error_mask;\n+\t\t\ttmp.ip.src.v6 =\t*(const struct in6_addr *)\n+\t\t\t\tspec.ipv6->hdr.src_addr;\n+\t\t\ttmp.mask |= BIT_ENCAP(IPV6_SRC);\n+\t\t}\n+\t\tif (!IN6_IS_ADDR_UNSPECIFIED(mask.ipv6->hdr.dst_addr)) {\n+\t\t\tif (memcmp(mask.ipv6->hdr.dst_addr, IN6_ADDR_MASK, 16))\n+\t\t\t\tgoto error_mask;\n+\t\t\ttmp.ip.dst.v6 =\t*(const struct in6_addr *)\n+\t\t\t\tspec.ipv6->hdr.dst_addr;\n+\t\t\ttmp.mask |= BIT_ENCAP(IPV6_DST);\n+\t\t}\n+\t\t++src;\n+\t\tbreak;\n+\tcase ITEM_UDP:\n+\t\tif (src->type != RTE_FLOW_ITEM_TYPE_UDP)\n+\t\t\tgoto trans;\n+\t\tmask.udp = mlx5_nl_flow_item_mask\n+\t\t\t(src, &rte_flow_item_udp_mask,\n+\t\t\t &mlx5_nl_flow_encap_mask_supported.udp,\n+\t\t\t &mlx5_nl_flow_mask_empty.udp,\n+\t\t\t sizeof(rte_flow_item_udp_mask), error);\n+\t\tif (!mask.udp)\n+\t\t\treturn -rte_errno;\n+\t\tif (mask.udp == &mlx5_nl_flow_mask_empty.udp)\n+\t\t\tgoto error_spec;\n+\t\tspec.udp = src->spec;\n+\t\tif (mask.udp->hdr.src_port) {\n+\t\t\tif (mask.udp->hdr.src_port != BE16_MASK)\n+\t\t\t\tgoto error_mask;\n+\t\t\ttmp.udp.src = spec.udp->hdr.src_port;\n+\t\t\ttmp.mask |= BIT_ENCAP(UDP_SRC);\n+\t\t}\n+\t\tif (mask.udp->hdr.dst_port) {\n+\t\t\tif (mask.udp->hdr.dst_port != BE16_MASK)\n+\t\t\t\tgoto error_mask;\n+\t\t\ttmp.udp.dst = spec.udp->hdr.dst_port;\n+\t\t\ttmp.mask |= BIT_ENCAP(UDP_DST);\n+\t\t}\n+\t\t++src;\n+\t\tbreak;\n+\tcase ITEM_VXLAN:\n+\t\tif (src->type != RTE_FLOW_ITEM_TYPE_VXLAN)\n+\t\t\tgoto trans;\n+\t\tmask.vxlan = mlx5_nl_flow_item_mask\n+\t\t\t(src, &rte_flow_item_vxlan_mask,\n+\t\t\t &mlx5_nl_flow_encap_mask_supported.vxlan,\n+\t\t\t &mlx5_nl_flow_mask_empty.vxlan,\n+\t\t\t sizeof(rte_flow_item_vxlan_mask), error);\n+\t\tif (!mask.vxlan)\n+\t\t\treturn -rte_errno;\n+\t\tif (mask.vxlan == &mlx5_nl_flow_mask_empty.vxlan)\n+\t\t\tgoto error_spec;\n+\t\tspec.vxlan = src->spec;\n+\t\tif (vxlan_vni_as_be32(mask.vxlan->vni)) {\n+\t\t\tif (memcmp(mask.vxlan->vni, VXLAN_VNI_MASK, 3))\n+\t\t\t\tgoto error_mask;\n+\t\t\ttmp.vxlan.vni = vxlan_vni_as_be32(spec.vxlan->vni);\n+\t\t\ttmp.mask |= BIT_ENCAP(VXLAN_VNI);\n+\t\t}\n+\t\t++src;\n+\t\tbreak;\n+\tcase END:\n+\t\tif (src->type != RTE_FLOW_ITEM_TYPE_END)\n+\t\t\tgoto trans;\n+\t\t*dst = tmp;\n+\t\treturn 0;\n+\t}\n+\tback = trans;\n+\ttrans = mlx5_nl_flow_encap_reap_trans[trans[n - 1]];\n+\tn = 0;\n+\tgoto trans;\n+error_encap:\n+\treturn rte_flow_error_set\n+\t\t(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,\n+\t\t \"unsupported encapsulation format\");\n+error_spec:\n+\treturn rte_flow_error_set\n+\t\t(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,\n+\t\t \"a specification structure is required for encapsulation\");\n+error_mask:\n+\treturn rte_flow_error_set\n+\t\t(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,\n+\t\t \"partial masks are not supported for encapsulation\");\n+}\n+\n+/**\n  * Transpose flow rule description to rtnetlink message.\n  *\n  * This function transposes a flow rule description to a traffic control\n@@ -412,6 +805,7 @@ mlx5_nl_flow_transpose(struct mlx5_nl_flow *nl_flow,\n \tbool vlan_present;\n \tbool vlan_eth_type_set;\n \tbool ip_proto_set;\n+\tstruct mlx5_nl_flow_encap encap;\n \tstruct nlattr *na_flower;\n \tstruct nlattr *na_flower_act;\n \tstruct nlattr *na_vlan_id;\n@@ -425,8 +819,10 @@ mlx5_nl_flow_transpose(struct mlx5_nl_flow *nl_flow,\n \t\tgoto error_nobufs;\n \tnl_flow->size = offsetof(struct mlx5_nl_flow, msg);\n \tnl_flow->applied = 0;\n+\tnl_flow->encap_ifindex = 0;\n \tnl_flow->ifindex_src = NULL;\n \tnl_flow->ifindex_dst = NULL;\n+\tnl_flow->encap = NULL;\n \tsize -= nl_flow->size;\n \titem = pattern;\n \taction = actions;\n@@ -437,6 +833,7 @@ mlx5_nl_flow_transpose(struct mlx5_nl_flow *nl_flow,\n \tvlan_present = false;\n \tvlan_eth_type_set = false;\n \tip_proto_set = false;\n+\tmemset(&encap, 0, sizeof(encap));\n \tna_flower = NULL;\n \tna_flower_act = NULL;\n \tna_vlan_id = NULL;\n@@ -461,6 +858,7 @@ mlx5_nl_flow_transpose(struct mlx5_nl_flow *nl_flow,\n \t\t\t\tof_set_vlan_vid;\n \t\t\tconst struct rte_flow_action_of_set_vlan_pcp *\n \t\t\t\tof_set_vlan_pcp;\n+\t\t\tconst struct rte_flow_action_vxlan_encap *vxlan_encap;\n \t\t} conf;\n \t\tstruct nlmsghdr *nlh;\n \t\tstruct tcmsg *tcm;\n@@ -887,6 +1285,12 @@ mlx5_nl_flow_transpose(struct mlx5_nl_flow *nl_flow,\n \t\t\tgoto error_nobufs;\n \t\t++item;\n \t\tbreak;\n+\tcase ITEM_VXLAN:\n+\t\tif (item->type != RTE_FLOW_ITEM_TYPE_VXLAN)\n+\t\t\tgoto trans;\n+\t\treturn rte_flow_error_set\n+\t\t\t(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t \"VXLAN header matching is not supported yet\");\n \tcase ACTIONS:\n \t\tif (item->type != RTE_FLOW_ITEM_TYPE_END)\n \t\t\tgoto trans;\n@@ -1042,6 +1446,77 @@ mlx5_nl_flow_transpose(struct mlx5_nl_flow *nl_flow,\n \t\t}\n \t\t++action;\n \t\tbreak;\n+\tcase ACTION_VXLAN_ENCAP:\n+\t\tif (action->type != RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)\n+\t\t\tgoto trans;\n+\t\tconf.vxlan_encap = action->conf;\n+\t\tif (mlx5_nl_flow_encap_reap(&encap,\n+\t\t\t\t\t    conf.vxlan_encap->definition,\n+\t\t\t\t\t    error))\n+\t\t\treturn -rte_errno;\n+\t\tact_index =\n+\t\t\tmnl_attr_nest_start_check(buf, size, act_index_cur++);\n+\t\tif (!act_index ||\n+\t\t    !mnl_attr_put_strz_check(buf, size, TCA_ACT_KIND,\n+\t\t\t\t\t     \"tunnel_key\"))\n+\t\t\tgoto error_nobufs;\n+\t\tact = mnl_attr_nest_start_check(buf, size, TCA_ACT_OPTIONS);\n+\t\tif (!act)\n+\t\t\tgoto error_nobufs;\n+\t\tif (!mnl_attr_put_check(buf, size, TCA_TUNNEL_KEY_PARMS,\n+\t\t\t\t\tsizeof(struct tc_tunnel_key),\n+\t\t\t\t\t&(struct tc_tunnel_key){\n+\t\t\t\t\t\t.action = TC_ACT_PIPE,\n+\t\t\t\t\t\t.t_action =\n+\t\t\t\t\t\t\tTCA_TUNNEL_KEY_ACT_SET,\n+\t\t\t\t\t}))\n+\t\t\tgoto error_nobufs;\n+\t\tif (encap.mask & BIT_ENCAP(IPV4_SRC) &&\n+\t\t    !mnl_attr_put_u32_check\n+\t\t    (buf, size, TCA_TUNNEL_KEY_ENC_IPV4_SRC,\n+\t\t     encap.ip.src.v4.s_addr))\n+\t\t\tgoto error_nobufs;\n+\t\tif (encap.mask & BIT_ENCAP(IPV4_DST) &&\n+\t\t    !mnl_attr_put_u32_check\n+\t\t    (buf, size, TCA_TUNNEL_KEY_ENC_IPV4_DST,\n+\t\t     encap.ip.dst.v4.s_addr))\n+\t\t\tgoto error_nobufs;\n+\t\tif (encap.mask & BIT_ENCAP(IPV6_SRC) &&\n+\t\t    !mnl_attr_put_check\n+\t\t    (buf, size, TCA_TUNNEL_KEY_ENC_IPV6_SRC,\n+\t\t     sizeof(encap.ip.src.v6), &encap.ip.src.v6))\n+\t\t\tgoto error_nobufs;\n+\t\tif (encap.mask & BIT_ENCAP(IPV6_DST) &&\n+\t\t    !mnl_attr_put_check\n+\t\t    (buf, size, TCA_TUNNEL_KEY_ENC_IPV6_DST,\n+\t\t     sizeof(encap.ip.dst.v6), &encap.ip.dst.v6))\n+\t\t\tgoto error_nobufs;\n+\t\tif (encap.mask & BIT_ENCAP(UDP_SRC) &&\n+\t\t    nl_flow != (void *)buf_tmp)\n+\t\t\tDRV_LOG(WARNING,\n+\t\t\t\t\"UDP source port cannot be forced\"\n+\t\t\t\t\" for VXLAN encap; parameter ignored\");\n+\t\tif (encap.mask & BIT_ENCAP(UDP_DST) &&\n+\t\t    !mnl_attr_put_u16_check\n+\t\t    (buf, size, TCA_TUNNEL_KEY_ENC_DST_PORT, encap.udp.dst))\n+\t\t\tgoto error_nobufs;\n+\t\tif (!(encap.mask & BIT_ENCAP(VXLAN_VNI)))\n+\t\t\treturn rte_flow_error_set\n+\t\t\t\t(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,\n+\t\t\t\t conf.vxlan_encap, \"VXLAN VNI is missing\");\n+\t\tif (!mnl_attr_put_u32_check\n+\t\t    (buf, size, TCA_TUNNEL_KEY_ENC_KEY_ID, encap.vxlan.vni))\n+\t\t\tgoto error_nobufs;\n+\t\tmnl_attr_nest_end(buf, act);\n+\t\tmnl_attr_nest_end(buf, act_index);\n+\t\t++action;\n+\t\tbreak;\n+\tcase ACTION_VXLAN_DECAP:\n+\t\tif (action->type != RTE_FLOW_ACTION_TYPE_VXLAN_DECAP)\n+\t\t\tgoto trans;\n+\t\treturn rte_flow_error_set\n+\t\t\t(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, action,\n+\t\t\t \"VXLAN decap is not supported yet\");\n \tcase END:\n \t\tif (item->type != RTE_FLOW_ITEM_TYPE_END ||\n \t\t    action->type != RTE_FLOW_ACTION_TYPE_END)\n@@ -1054,6 +1529,21 @@ mlx5_nl_flow_transpose(struct mlx5_nl_flow *nl_flow,\n \t\tbuf = NULL;\n \t\tsize -= nlh->nlmsg_len;\n \t\tnl_flow->size += nlh->nlmsg_len;\n+\t\tif (!encap.mask)\n+\t\t\treturn nl_flow->size;\n+\t\ti = RTE_ALIGN_CEIL(nl_flow->size,\n+\t\t\t\t   alignof(struct mlx5_nl_flow_encap));\n+\t\ti -= nl_flow->size;\n+\t\tif (size < i + sizeof(encap))\n+\t\t\tgoto error_nobufs;\n+\t\tnl_flow->size += i;\n+\t\tbuf = (void *)((uintptr_t)nl_flow + nl_flow->size);\n+\t\tsize -= i;\n+\t\tnl_flow->encap = buf;\n+\t\t*nl_flow->encap = encap;\n+\t\tbuf = NULL;\n+\t\tsize -= sizeof(*nl_flow->encap);\n+\t\tnl_flow->size += sizeof(*nl_flow->encap);\n \t\treturn nl_flow->size;\n \t}\n \tback = trans;\n@@ -1151,6 +1641,671 @@ mlx5_nl_flow_chat(struct mlx5_nl_flow_ctx *ctx, struct nlmsghdr *nlh,\n \treturn -err;\n }\n \n+/** Data structure used by mlx5_nl_flow_init_vxlan_cb(). */\n+struct mlx5_nl_flow_init_vxlan_data {\n+\tunsigned int ifindex; /**< Base interface index. */\n+\trte_be16_t vxlan_port; /**< Remote UDP port. */\n+\tunsigned int *collect; /**< Collected interfaces. */\n+\tunsigned int collect_n; /**< Number of collected interfaces. */\n+};\n+\n+/**\n+ * Collect indices of VXLAN encap/decap interfaces associated with device.\n+ *\n+ * @param nlh\n+ *   Pointer to reply header.\n+ * @param arg\n+ *   Opaque data pointer for this callback.\n+ *\n+ * @return\n+ *   A positive, nonzero value on success, negative errno value otherwise\n+ *   and rte_errno is set.\n+ */\n+static int\n+mlx5_nl_flow_init_vxlan_cb(const struct nlmsghdr *nlh, void *arg)\n+{\n+\tstruct mlx5_nl_flow_init_vxlan_data *data = arg;\n+\tstruct ifinfomsg *ifm;\n+\tstruct nlattr *na;\n+\tstruct nlattr *na_info = NULL;\n+\tstruct nlattr *na_vxlan = NULL;\n+\tstruct nlattr *na_vxlan_port = NULL;\n+\tbool found = false;\n+\tunsigned int *collect;\n+\n+\tif (nlh->nlmsg_type != RTM_NEWLINK)\n+\t\tgoto error_inval;\n+\tifm = mnl_nlmsg_get_payload(nlh);\n+\tmnl_attr_for_each(na, nlh, sizeof(*ifm))\n+\t\tif (mnl_attr_get_type(na) == IFLA_LINKINFO) {\n+\t\t\tna_info = na;\n+\t\t\tbreak;\n+\t\t}\n+\tif (!na_info)\n+\t\treturn 1;\n+\tmnl_attr_for_each_nested(na, na_info) {\n+\t\tswitch (mnl_attr_get_type(na)) {\n+\t\tcase IFLA_INFO_KIND:\n+\t\t\tif (!strncmp(\"vxlan\", mnl_attr_get_str(na),\n+\t\t\t\t     mnl_attr_get_len(na)))\n+\t\t\t\tfound = true;\n+\t\t\tbreak;\n+\t\tcase IFLA_INFO_DATA:\n+\t\t\tna_vxlan = na;\n+\t\t\tbreak;\n+\t\t}\n+\t\tif (found && na_vxlan)\n+\t\t\tbreak;\n+\t}\n+\tif (!found || !na_vxlan)\n+\t\treturn 1;\n+\tfound = false;\n+\tmnl_attr_for_each_nested(na, na_vxlan) {\n+\t\tswitch (mnl_attr_get_type(na)) {\n+\t\tcase IFLA_VXLAN_LINK:\n+\t\t\tif (mnl_attr_get_u32(na) == data->ifindex)\n+\t\t\t\tfound = true;\n+\t\t\tbreak;\n+\t\tcase IFLA_VXLAN_PORT:\n+\t\t\tna_vxlan_port = na;\n+\t\t\tbreak;\n+\t\t}\n+\t\tif (found && na_vxlan_port)\n+\t\t\tbreak;\n+\t}\n+\tif (!found ||\n+\t    (na_vxlan_port &&\n+\t     mnl_attr_get_u16(na_vxlan_port) != data->vxlan_port))\n+\t\treturn 1;\n+\tif (!ifm->ifi_index)\n+\t\tgoto error_inval;\n+\tcollect = realloc(data->collect,\n+\t\t\t  (data->collect_n + 1) * sizeof(*data->collect));\n+\tif (!collect) {\n+\t\trte_errno = errno;\n+\t\treturn -rte_errno;\n+\t}\n+\tcollect[data->collect_n] = ifm->ifi_index;\n+\tdata->collect = collect;\n+\tdata->collect_n += 1;\n+\treturn 1;\n+error_inval:\n+\trte_errno = EINVAL;\n+\treturn -rte_errno;\n+}\n+\n+/**\n+ * Clean up and generate VXLAN encap/decap interface.\n+ *\n+ * @param ctx\n+ *   Context object initialized by mlx5_nl_flow_ctx_create().\n+ * @param ifindex\n+ *   Network interface index to associate VXLAN encap/decap with.\n+ * @param vxlan_port\n+ *   Remote UDP port.\n+ * @param enable\n+ *   If disabled, stop after initial clean up.\n+ * @param[out] error\n+ *   Perform verbose error reporting if not NULL.\n+ *\n+ * @return\n+ *   Interface index on success, zero otherwise and rte_errno is set.\n+ *\n+ *   If @p enable is set, the returned ifindex is that of the new VXLAN\n+ *   interface, otherwise @p ifindex is simply returned as is.\n+ */\n+static unsigned int\n+mlx5_nl_flow_ifindex_vxlan(struct mlx5_nl_flow_ctx *ctx, unsigned int ifindex,\n+\t\t\t   rte_be16_t vxlan_port, int enable,\n+\t\t\t   struct rte_flow_error *error)\n+{\n+\tstruct nlmsghdr *nlh;\n+\tstruct ifinfomsg *ifm;\n+\talignas(struct nlmsghdr)\n+\tuint8_t buf[mnl_nlmsg_size(sizeof(*ifm) + 256)];\n+\tunsigned int ifindex_vxlan = 0;\n+\tstruct mlx5_nl_flow_init_vxlan_data data = {\n+\t\t.ifindex = ifindex,\n+\t\t.vxlan_port = vxlan_port,\n+\t\t.collect = NULL,\n+\t\t.collect_n = 0,\n+\t};\n+\tchar name[IF_NAMESIZE];\n+\tstruct nlattr *na_info;\n+\tstruct nlattr *na_vxlan;\n+\tunsigned int i;\n+\tint ret;\n+\n+\tif (!ifindex) {\n+\t\tret = -EINVAL;\n+\t\tgoto exit;\n+\t}\n+\t/*\n+\t * Seek and destroy leftover VXLAN encap/decap interfaces with\n+\t * matching properties.\n+\t */\n+\tnlh = mnl_nlmsg_put_header(buf);\n+\tnlh->nlmsg_type = RTM_GETLINK;\n+\tnlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;\n+\tifm = mnl_nlmsg_put_extra_header(nlh, sizeof(*ifm));\n+\tifm->ifi_family = AF_UNSPEC;\n+\tret = mlx5_nl_flow_chat(ctx, nlh, mlx5_nl_flow_init_vxlan_cb, &data);\n+\tif (ret)\n+\t\tgoto exit;\n+\tnlh->nlmsg_type = RTM_DELLINK;\n+\tnlh->nlmsg_flags = NLM_F_REQUEST;\n+\tfor (i = 0; i != data.collect_n; ++i) {\n+\t\tifm->ifi_index = data.collect[i];\n+\t\tDRV_LOG(DEBUG, \"cleaning up VXLAN encap/decap ifindex %u\",\n+\t\t\tifm->ifi_index);\n+\t\tret = mlx5_nl_flow_chat(ctx, nlh, NULL, NULL);\n+\t\tif (ret)\n+\t\t\tgoto exit;\n+\t}\n+\tif (!enable)\n+\t\treturn ifindex;\n+\t/* Add fresh VXLAN encap/decap interface. */\n+\tnlh->nlmsg_type = RTM_NEWLINK;\n+\tnlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_REPLACE;\n+\tifm->ifi_type = ARPHRD_ETHER;\n+\tifm->ifi_index = 0;\n+\tifm->ifi_flags = IFF_UP;\n+\tifm->ifi_change = 0xffffffff;\n+\tif (snprintf(name, sizeof(name), \"vxlan_%u_%u\",\n+\t\t     rte_be_to_cpu_16(vxlan_port), ifindex) == -1) {\n+\t\tret = -errno;\n+\t\tgoto exit;\n+\t}\n+\tret = -ENOBUFS;\n+\tif (!mnl_attr_put_strz_check(nlh, sizeof(buf), IFLA_IFNAME, name))\n+\t\tgoto exit;\n+\tna_info = mnl_attr_nest_start_check(nlh, sizeof(buf), IFLA_LINKINFO);\n+\tif (!na_info)\n+\t\tgoto exit;\n+\tif (!mnl_attr_put_strz_check(nlh, sizeof(buf), IFLA_INFO_KIND, \"vxlan\"))\n+\t\tgoto exit;\n+\tna_vxlan = mnl_attr_nest_start_check(nlh, sizeof(buf), IFLA_INFO_DATA);\n+\tif (!na_vxlan)\n+\t\tgoto exit;\n+\tif (!mnl_attr_put_u32_check(nlh, sizeof(buf), IFLA_VXLAN_LINK, ifindex))\n+\t\tgoto exit;\n+\tif (!mnl_attr_put_u8_check(nlh, sizeof(buf),\n+\t\t\t\t   IFLA_VXLAN_COLLECT_METADATA, 1))\n+\t\tgoto exit;\n+\t/*\n+\t * When destination port or VNI are either undefined or set to fixed\n+\t * values, kernel complains with EEXIST (\"A VXLAN device with the\n+\t * specified VNI already exist\") when creating subsequent VXLAN\n+\t * interfaces with the same properties, even if linked with\n+\t * different physical devices.\n+\t *\n+\t * Also since only destination ports assigned to existing VXLAN\n+\t * interfaces can be offloaded to the switch, the above limitation\n+\t * cannot be worked around by picking a random value here and using\n+\t * a different one when creating flow rules later.\n+\t *\n+\t * Therefore request a hopefully unique VNI based on the interface\n+\t * index in order to work around EEXIST. VNI will be overridden\n+\t * later on a flow rule basis thanks to IFLA_VXLAN_COLLECT_METADATA.\n+\t */\n+\tif (!mnl_attr_put_u16_check(nlh, sizeof(buf), IFLA_VXLAN_PORT,\n+\t\t\t\t    vxlan_port))\n+\t\tgoto exit;\n+\tif (!mnl_attr_put_u32_check(nlh, sizeof(buf), IFLA_VXLAN_ID, ifindex))\n+\t\tgoto exit;\n+\tmnl_attr_nest_end(nlh, na_vxlan);\n+\tmnl_attr_nest_end(nlh, na_info);\n+\tret = mlx5_nl_flow_chat(ctx, nlh, NULL, NULL);\n+\tif (ret)\n+\t\tgoto exit;\n+\t/* Lastly, retrieve its ifindex value. */\n+\tnlh->nlmsg_type = RTM_GETLINK;\n+\tnlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;\n+\tdata.collect_n = 0;\n+\tret = mlx5_nl_flow_chat(ctx, nlh, mlx5_nl_flow_init_vxlan_cb, &data);\n+\tif (ret)\n+\t\tgoto exit;\n+\tret = -ENXIO;\n+\tif (data.collect_n != 1 || !*data.collect)\n+\t\tgoto exit;\n+\tifindex_vxlan = *data.collect;\n+\tDRV_LOG(DEBUG, \"created VXLAN encap/decap ifindex %u (%s)\",\n+\t\tifindex_vxlan, name);\n+\tret = mlx5_nl_flow_ifindex_init(ctx, ifindex_vxlan, error);\n+\tif (ret) {\n+\t\tmlx5_nl_flow_ifindex_vxlan(ctx, ifindex_vxlan, vxlan_port,\n+\t\t\t\t\t   false, NULL);\n+\t\tifindex_vxlan = 0;\n+\t\tgoto exit;\n+\t}\n+\tret = 0;\n+exit:\n+\tfree(data.collect);\n+\tif (ret)\n+\t\trte_flow_error_set\n+\t\t\t(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t \"netlink: failed to request VXLAN encap/decap\"\n+\t\t\t \" interface creation/deletion\");\n+\treturn ifindex_vxlan;\n+}\n+\n+/**\n+ * Emit Netlink message to add/remove local address.\n+ *\n+ * Note that an implicit route is maintained by the kernel due to the\n+ * presence of a peer address (IFA_ADDRESS).\n+ *\n+ * @param ctx\n+ *   Context object initialized by mlx5_nl_flow_ctx_create().\n+ * @param[in] encap\n+ *   Encapsulation properties (source address).\n+ * @param ifindex\n+ *   Network interface.\n+ * @param enable\n+ *   Toggle between add and remove.\n+ * @param[out] error\n+ *   Perform verbose error reporting if not NULL.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_nl_flow_encap_local(struct mlx5_nl_flow_ctx *ctx,\n+\t\t\t const struct mlx5_nl_flow_encap *encap,\n+\t\t\t unsigned int ifindex,\n+\t\t\t bool enable,\n+\t\t\t struct rte_flow_error *error)\n+{\n+\tstruct nlmsghdr *nlh;\n+\tstruct ifaddrmsg *ifa;\n+\talignas(struct nlmsghdr)\n+\tuint8_t buf[mnl_nlmsg_size(sizeof(*ifa) + 128)];\n+\n+\tnlh = mnl_nlmsg_put_header(buf);\n+\tnlh->nlmsg_type = enable ? RTM_NEWADDR : RTM_DELADDR;\n+\tnlh->nlmsg_flags =\n+\t\tNLM_F_REQUEST | (enable ? NLM_F_CREATE | NLM_F_REPLACE : 0);\n+\tnlh->nlmsg_seq = 0;\n+\tifa = mnl_nlmsg_put_extra_header(nlh, sizeof(*ifa));\n+\tif (encap->mask & BIT_ENCAP(IPV4_SRC)) {\n+\t\tifa->ifa_family = AF_INET;\n+\t\tifa->ifa_prefixlen = 32;\n+\t} else if (encap->mask & BIT_ENCAP(IPV6_SRC)) {\n+\t\tifa->ifa_family = AF_INET6;\n+\t\tifa->ifa_prefixlen = 128;\n+\t} else {\n+\t\tifa->ifa_family = AF_UNSPEC;\n+\t\tifa->ifa_prefixlen = 0;\n+\t}\n+\tifa->ifa_flags = IFA_F_PERMANENT;\n+\tifa->ifa_scope = RT_SCOPE_LINK;\n+\tifa->ifa_index = ifindex;\n+\tif (encap->mask & BIT_ENCAP(IPV4_SRC) &&\n+\t    !mnl_attr_put_u32_check(nlh, sizeof(buf), IFA_LOCAL,\n+\t\t\t\t    encap->ip.src.v4.s_addr))\n+\t\tgoto error_nobufs;\n+\tif (encap->mask & BIT_ENCAP(IPV6_SRC) &&\n+\t    !mnl_attr_put_check(nlh, sizeof(buf), IFA_LOCAL,\n+\t\t\t\tsizeof(encap->ip.src.v6), &encap->ip.src.v6))\n+\t\tgoto error_nobufs;\n+\tif (encap->mask & BIT_ENCAP(IPV4_DST) &&\n+\t    !mnl_attr_put_u32_check(nlh, sizeof(buf), IFA_ADDRESS,\n+\t\t\t\t    encap->ip.dst.v4.s_addr))\n+\t\tgoto error_nobufs;\n+\tif (encap->mask & BIT_ENCAP(IPV6_DST) &&\n+\t    !mnl_attr_put_check(nlh, sizeof(buf), IFA_ADDRESS,\n+\t\t\t\tsizeof(encap->ip.dst.v6), &encap->ip.dst.v6))\n+\t\tgoto error_nobufs;\n+\tif (!mlx5_nl_flow_chat(ctx, nlh, NULL, NULL))\n+\t\treturn 0;\n+\treturn rte_flow_error_set\n+\t\t(error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t \"cannot complete IFA request\");\n+error_nobufs:\n+\treturn rte_flow_error_set\n+\t\t(error, ENOBUFS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t \"generated IFA message is too large\");\n+}\n+\n+/**\n+ * Emit Netlink message to add/remove neighbor.\n+ *\n+ * @param ctx\n+ *   Context object initialized by mlx5_nl_flow_ctx_create().\n+ * @param[in] encap\n+ *   Encapsulation properties (destination address).\n+ * @param ifindex\n+ *   Network interface.\n+ * @param enable\n+ *   Toggle between add and remove.\n+ * @param[out] error\n+ *   Perform verbose error reporting if not NULL.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_nl_flow_encap_neigh(struct mlx5_nl_flow_ctx *ctx,\n+\t\t\t const struct mlx5_nl_flow_encap *encap,\n+\t\t\t unsigned int ifindex,\n+\t\t\t bool enable,\n+\t\t\t struct rte_flow_error *error)\n+{\n+\tstruct nlmsghdr *nlh;\n+\tstruct ndmsg *ndm;\n+\talignas(struct nlmsghdr)\n+\tuint8_t buf[mnl_nlmsg_size(sizeof(*ndm) + 128)];\n+\n+\tnlh = mnl_nlmsg_put_header(buf);\n+\tnlh->nlmsg_type = enable ? RTM_NEWNEIGH : RTM_DELNEIGH;\n+\tnlh->nlmsg_flags =\n+\t\tNLM_F_REQUEST | (enable ? NLM_F_CREATE | NLM_F_REPLACE : 0);\n+\tnlh->nlmsg_seq = 0;\n+\tndm = mnl_nlmsg_put_extra_header(nlh, sizeof(*ndm));\n+\tif (encap->mask & BIT_ENCAP(IPV4_DST))\n+\t\tndm->ndm_family = AF_INET;\n+\telse if (encap->mask & BIT_ENCAP(IPV6_DST))\n+\t\tndm->ndm_family = AF_INET6;\n+\telse\n+\t\tndm->ndm_family = AF_UNSPEC;\n+\tndm->ndm_ifindex = ifindex;\n+\tndm->ndm_state = NUD_PERMANENT;\n+\tndm->ndm_flags = 0;\n+\tndm->ndm_type = 0;\n+\tif (encap->mask & BIT_ENCAP(IPV4_DST) &&\n+\t    !mnl_attr_put_u32_check(nlh, sizeof(buf), NDA_DST,\n+\t\t\t\t    encap->ip.dst.v4.s_addr))\n+\t\tgoto error_nobufs;\n+\tif (encap->mask & BIT_ENCAP(IPV6_DST) &&\n+\t    !mnl_attr_put_check(nlh, sizeof(buf), NDA_DST,\n+\t\t\t\tsizeof(encap->ip.dst.v6), &encap->ip.dst.v6))\n+\t\tgoto error_nobufs;\n+\tif (encap->mask & BIT_ENCAP(ETH_SRC) && enable)\n+\t\tDRV_LOG(WARNING,\n+\t\t\t\"Ethernet source address cannot be forced\"\n+\t\t\t\" for VXLAN encap; parameter ignored\");\n+\tif (encap->mask & BIT_ENCAP(ETH_DST) &&\n+\t    !mnl_attr_put_check(nlh, sizeof(buf), NDA_LLADDR,\n+\t\t\t\tsizeof(encap->eth.dst), &encap->eth.dst))\n+\t\tgoto error_nobufs;\n+\tif (!mlx5_nl_flow_chat(ctx, nlh, NULL, NULL))\n+\t\treturn 0;\n+\treturn rte_flow_error_set\n+\t\t(error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t \"cannot complete ND request\");\n+error_nobufs:\n+\treturn rte_flow_error_set\n+\t\t(error, ENOBUFS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t \"generated ND message is too large\");\n+}\n+\n+/**\n+ * Look for matching IP source/destination properties.\n+ *\n+ * @param[in] bag\n+ *   Search target.\n+ * @param bag_mask\n+ *   Bit-mask for valid fields in @p bag.\n+ * @param[in] what\n+ *   Properties to look for in @p bag.\n+ * @param what_mask\n+ *   Bit-mask for valid fields in @p what.\n+ *\n+ * @return\n+ *   True if @p what is found in @p bag, false otherwise.\n+ */\n+static bool\n+mlx5_nl_flow_encap_ip_search(const struct mlx5_nl_flow_encap_ip *bag,\n+\t\t\t     uint32_t bag_mask,\n+\t\t\t     const struct mlx5_nl_flow_encap_ip *what,\n+\t\t\t     uint32_t what_mask)\n+{\n+\tif ((what_mask & BIT_ENCAP(IPV4_SRC) &&\n+\t     (!(bag_mask & BIT_ENCAP(IPV4_SRC)) ||\n+\t      bag->src.v4.s_addr != what->src.v4.s_addr)) ||\n+\t    (what_mask & BIT_ENCAP(IPV4_DST) &&\n+\t     (!(bag_mask & BIT_ENCAP(IPV4_DST)) ||\n+\t      bag->dst.v4.s_addr != what->dst.v4.s_addr)) ||\n+\t    (what_mask & BIT_ENCAP(IPV6_SRC) &&\n+\t     (!(bag_mask & BIT_ENCAP(IPV6_SRC)) ||\n+\t      memcmp(&bag->src.v6, &what->src.v6, sizeof(bag->src.v6)))) ||\n+\t    (what_mask & BIT_ENCAP(IPV6_DST) &&\n+\t     (!(bag_mask & BIT_ENCAP(IPV6_DST)) ||\n+\t      memcmp(&bag->dst.v6, &what->dst.v6, sizeof(bag->dst.v6)))))\n+\t\treturn false;\n+\treturn true;\n+}\n+\n+/**\n+ * Interface resources list common to all driver instances of a given\n+ * process. It is protected by a standard mutex because resource allocation\n+ * is slow and involves system calls.\n+ */\n+static LIST_HEAD(, mlx5_nl_flow_encap_ifindex) mlx5_nl_flow_encap_ifindex_list =\n+\tLIST_HEAD_INITIALIZER();\n+static pthread_mutex_t mlx5_nl_flow_encap_ifindex_list_lock =\n+\tPTHREAD_MUTEX_INITIALIZER;\n+\n+/**\n+ * Retrieve target interface index for encapsulation.\n+ *\n+ * Resources are automatically allocated and released as necessary.\n+ *\n+ * @param ctx\n+ *   Context object initialized by mlx5_nl_flow_ctx_create().\n+ * @param[in] encap\n+ *   Encapsulation properties.\n+ * @param ifindex\n+ *   Outer network interface.\n+ * @param enable\n+ *   Toggle whether resources are allocated or released.\n+ * @param[out] error\n+ *   Perform verbose error reporting if not NULL.\n+ *\n+ * @return\n+ *   Interface index on success, zero otherwise and rte_errno is set.\n+ *\n+ *   If @p enable is set, the returned ifindex is that of the inner\n+ *   interface, otherwise @p ifindex is simply returned as is.\n+ */\n+static unsigned int\n+mlx5_nl_flow_encap_ifindex(struct mlx5_nl_flow_ctx *ctx,\n+\t\t\t   const struct mlx5_nl_flow_encap *encap,\n+\t\t\t   unsigned int ifindex,\n+\t\t\t   bool enable,\n+\t\t\t   struct rte_flow_error *error)\n+{\n+\tstruct mlx5_nl_flow_encap_ifindex *encap_ifindex = NULL;\n+\tstruct mlx5_nl_flow_encap_vxlan *encap_vxlan = NULL;\n+\tstruct mlx5_nl_flow_encap_addr *encap_local = NULL;\n+\tstruct mlx5_nl_flow_encap_addr *encap_neigh = NULL;\n+\tunsigned int ifindex_inner = ifindex;\n+\tint ret;\n+\n+\tpthread_mutex_lock(&mlx5_nl_flow_encap_ifindex_list_lock);\n+\t/* Interface descriptor. */\n+\tLIST_FOREACH(encap_ifindex, &mlx5_nl_flow_encap_ifindex_list, next) {\n+\t\tif (encap_ifindex->outer != ifindex)\n+\t\t\tcontinue;\n+\t\tif (enable)\n+\t\t\t++encap_ifindex->refcnt;\n+\t\tbreak;\n+\t}\n+\tif (enable && !encap_ifindex) {\n+\t\tencap_ifindex =\n+\t\t\trte_zmalloc_socket(__func__, sizeof(*encap_ifindex),\n+\t\t\t\t\t   0, ctx->socket);\n+\t\tif (!encap_ifindex) {\n+\t\t\trte_flow_error_set\n+\t\t\t\t(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t NULL, \"missing ifindex encap data\");\n+\t\t\tgoto release;\n+\t\t}\n+\t\t*encap_ifindex = (struct mlx5_nl_flow_encap_ifindex){\n+\t\t\t.refcnt = 1,\n+\t\t\t.outer = ifindex,\n+\t\t\t.vxlan = LIST_HEAD_INITIALIZER(),\n+\t\t\t.local = LIST_HEAD_INITIALIZER(),\n+\t\t\t.neigh = LIST_HEAD_INITIALIZER(),\n+\t\t};\n+\t\tLIST_INSERT_HEAD(&mlx5_nl_flow_encap_ifindex_list,\n+\t\t\t\t encap_ifindex, next);\n+\t}\n+\tif (!encap_ifindex) {\n+\t\tif (!enable)\n+\t\t\tgoto release;\n+\t\trte_flow_error_set\n+\t\t\t(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t \"nonexistent interface\");\n+\t\tgoto release;\n+\t}\n+\t/* VXLAN descriptor. */\n+\tif (!(encap->mask & BIT_ENCAP(VXLAN_VNI)) ||\n+\t    !(encap->mask & BIT_ENCAP(UDP_SRC)))\n+\t\tgoto skip_vxlan;\n+\tLIST_FOREACH(encap_vxlan, &encap_ifindex->vxlan, next) {\n+\t\tif (encap->udp.src != encap_vxlan->port)\n+\t\t\tcontinue;\n+\t\tif (enable)\n+\t\t\t++encap_vxlan->refcnt;\n+\t\tbreak;\n+\t}\n+\tif (enable && !encap_vxlan) {\n+\t\tencap_vxlan =\n+\t\t\trte_zmalloc_socket(__func__, sizeof(*encap_vxlan),\n+\t\t\t\t\t   0, ctx->socket);\n+\t\tif (!encap_vxlan) {\n+\t\t\trte_flow_error_set\n+\t\t\t\t(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t NULL, \"missing VXLAN encap data\");\n+\t\t\tgoto release;\n+\t\t}\n+\t\t*encap_vxlan = (struct mlx5_nl_flow_encap_vxlan){\n+\t\t\t.refcnt = 1,\n+\t\t\t.port = encap->udp.src,\n+\t\t\t.inner = mlx5_nl_flow_ifindex_vxlan\n+\t\t\t\t(ctx, ifindex, encap->udp.src, true, error),\n+\t\t};\n+\t\tif (!encap_vxlan->inner) {\n+\t\t\trte_free(encap_vxlan);\n+\t\t\tencap_vxlan = NULL;\n+\t\t\tgoto release;\n+\t\t}\n+\t\tLIST_INSERT_HEAD(&encap_ifindex->vxlan, encap_vxlan, next);\n+\t}\n+\tifindex_inner = encap_vxlan->inner;\n+skip_vxlan:\n+\t/* Local address descriptor (source). */\n+\tLIST_FOREACH(encap_local, &encap_ifindex->local, next) {\n+\t\tif (!mlx5_nl_flow_encap_ip_search\n+\t\t    (&encap->ip, encap->mask,\n+\t\t     &encap_local->ip, encap_local->mask &\n+\t\t     (BIT_ENCAP(IPV4_SRC) | BIT_ENCAP(IPV6_SRC))))\n+\t\t\tcontinue;\n+\t\tif (enable)\n+\t\t\t++encap_local->refcnt;\n+\t\tbreak;\n+\t}\n+\tif (enable && !encap_local &&\n+\t    encap->mask & (BIT_ENCAP(IPV4_SRC) | BIT_ENCAP(IPV6_SRC))) {\n+\t\tencap_local =\n+\t\t\trte_zmalloc_socket(__func__, sizeof(*encap_local),\n+\t\t\t\t\t   0, ctx->socket);\n+\t\tif (!encap_local) {\n+\t\t\trte_flow_error_set\n+\t\t\t\t(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t NULL, \"missing local encap data\");\n+\t\t\tgoto release;\n+\t\t}\n+\t\tencap_local->refcnt = 1;\n+\t\tencap_local->mask =\n+\t\t\tencap->mask &\n+\t\t\t(BIT_ENCAP(IPV4_SRC) | BIT_ENCAP(IPV6_SRC));\n+\t\tif (encap->mask & BIT_ENCAP(IPV4_SRC))\n+\t\t\tencap_local->ip.src.v4 = encap->ip.src.v4;\n+\t\tif (encap->mask & BIT_ENCAP(IPV6_SRC))\n+\t\t\tencap_local->ip.src.v6 = encap->ip.src.v6;\n+\t\tret = mlx5_nl_flow_encap_local(ctx, encap, ifindex, true,\n+\t\t\t\t\t       error);\n+\t\tif (ret) {\n+\t\t\trte_free(encap_local);\n+\t\t\tencap_local = NULL;\n+\t\t\tgoto release;\n+\t\t}\n+\t\tLIST_INSERT_HEAD(&encap_ifindex->local, encap_local, next);\n+\t}\n+\t/* Neighbor descriptor (destination). */\n+\tLIST_FOREACH(encap_neigh, &encap_ifindex->neigh, next) {\n+\t\tif (!mlx5_nl_flow_encap_ip_search\n+\t\t    (&encap->ip, encap->mask,\n+\t\t     &encap_local->ip, encap_local->mask &\n+\t\t     (BIT_ENCAP(IPV4_DST) | BIT_ENCAP(IPV6_DST))))\n+\t\t\tcontinue;\n+\t\tif (enable)\n+\t\t\t++encap_neigh->refcnt;\n+\t\tbreak;\n+\t}\n+\tif (enable && !encap_neigh &&\n+\t    encap->mask & (BIT_ENCAP(IPV4_DST) | BIT_ENCAP(IPV6_DST))) {\n+\t\tencap_neigh =\n+\t\t\trte_zmalloc_socket(__func__, sizeof(*encap_neigh),\n+\t\t\t\t\t   0, ctx->socket);\n+\t\tif (!encap_neigh) {\n+\t\t\trte_flow_error_set\n+\t\t\t\t(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t NULL, \"missing neigh encap data\");\n+\t\t\tgoto release;\n+\t\t}\n+\t\tencap_neigh->refcnt = 1;\n+\t\tencap_neigh->mask =\n+\t\t\tencap->mask &\n+\t\t\t(BIT_ENCAP(IPV4_DST) | BIT_ENCAP(IPV6_DST));\n+\t\tif (encap->mask & BIT_ENCAP(IPV4_DST))\n+\t\t\tencap_neigh->ip.dst.v4 = encap->ip.dst.v4;\n+\t\tif (encap->mask & BIT_ENCAP(IPV6_DST))\n+\t\t\tencap_neigh->ip.dst.v6 = encap->ip.dst.v6;\n+\t\tret = mlx5_nl_flow_encap_neigh(ctx, encap, ifindex, true,\n+\t\t\t\t\t       error);\n+\t\tif (ret) {\n+\t\t\trte_free(encap_neigh);\n+\t\t\tencap_neigh = NULL;\n+\t\t\tgoto release;\n+\t\t}\n+\t\tLIST_INSERT_HEAD(&encap_ifindex->neigh, encap_neigh, next);\n+\t}\n+\tif (!enable)\n+\t\tgoto release;\n+\tpthread_mutex_unlock(&mlx5_nl_flow_encap_ifindex_list_lock);\n+\treturn ifindex_inner;\n+release:\n+\tret = rte_errno;\n+\tif (encap_neigh && !--encap_neigh->refcnt) {\n+\t\tLIST_REMOVE(encap_neigh, next);\n+\t\tmlx5_nl_flow_encap_neigh(ctx, encap, ifindex, false, NULL);\n+\t\trte_free(encap_neigh);\n+\t}\n+\tif (encap_local && !--encap_local->refcnt) {\n+\t\tLIST_REMOVE(encap_local, next);\n+\t\tmlx5_nl_flow_encap_local(ctx, encap, ifindex, false, NULL);\n+\t\trte_free(encap_local);\n+\t}\n+\tif (encap_vxlan && !--encap_vxlan->refcnt) {\n+\t\tLIST_REMOVE(encap_vxlan, next);\n+\t\tmlx5_nl_flow_ifindex_vxlan\n+\t\t\t(ctx, ifindex, encap_vxlan->port, false, NULL);\n+\t\trte_free(encap_vxlan);\n+\t}\n+\tif (encap_ifindex && !--encap_ifindex->refcnt) {\n+\t\tLIST_REMOVE(encap_ifindex, next);\n+\t\trte_free(encap_ifindex);\n+\t}\n+\tpthread_mutex_unlock(&mlx5_nl_flow_encap_ifindex_list_lock);\n+\tif (!enable)\n+\t\treturn ifindex;\n+\trte_errno = ret;\n+\treturn 0;\n+}\n+\n /**\n  * Create a Netlink flow rule.\n  *\n@@ -1169,17 +2324,35 @@ mlx5_nl_flow_create(struct mlx5_nl_flow_ctx *ctx, struct mlx5_nl_flow *nl_flow,\n \t\t    struct rte_flow_error *error)\n {\n \tstruct nlmsghdr *nlh = (void *)nl_flow->msg;\n+\tstruct mlx5_nl_flow_encap *encap =\n+\t\tnl_flow->encap && nl_flow->ifindex_dst ?\n+\t\tnl_flow->encap : NULL;\n+\tunsigned int ifindex = encap ? *nl_flow->ifindex_dst : 0;\n+\tint ret;\n \n \tif (nl_flow->applied)\n \t\treturn 0;\n \tnlh->nlmsg_type = RTM_NEWTFILTER;\n \tnlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL;\n-\tif (!mlx5_nl_flow_chat(ctx, nlh, NULL, NULL)) {\n+\tif (encap) {\n+\t\tnl_flow->encap_ifindex = mlx5_nl_flow_encap_ifindex\n+\t\t\t(ctx, encap, ifindex, true, error);\n+\t\tif (!nl_flow->encap_ifindex)\n+\t\t\treturn -rte_errno;\n+\t\t*nl_flow->ifindex_dst = nl_flow->encap_ifindex;\n+\t}\n+\tret = mlx5_nl_flow_chat(ctx, nlh, NULL, NULL);\n+\tif (encap)\n+\t\t*nl_flow->ifindex_dst = ifindex;\n+\tif (!ret) {\n \t\tnl_flow->applied = 1;\n \t\treturn 0;\n \t}\n+\tret = rte_errno;\n+\tif (nl_flow->encap_ifindex)\n+\t\tmlx5_nl_flow_encap_ifindex(ctx, encap, ifindex, false, NULL);\n \treturn rte_flow_error_set\n-\t\t(error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n \t\t \"netlink: failed to create TC flow rule\");\n }\n \n@@ -1204,14 +2377,31 @@ mlx5_nl_flow_destroy(struct mlx5_nl_flow_ctx *ctx, struct mlx5_nl_flow *nl_flow,\n \t\t     struct rte_flow_error *error)\n {\n \tstruct nlmsghdr *nlh = (void *)nl_flow->msg;\n+\tstruct mlx5_nl_flow_encap *encap =\n+\t\tnl_flow->encap && nl_flow->ifindex_dst ?\n+\t\tnl_flow->encap : NULL;\n+\tunsigned int ifindex = encap ? *nl_flow->ifindex_dst : 0;\n+\tint err = 0;\n \tint ret;\n \n \tif (!nl_flow->applied)\n \t\treturn 0;\n \tnlh->nlmsg_type = RTM_DELTFILTER;\n \tnlh->nlmsg_flags = NLM_F_REQUEST;\n+\tif (encap) {\n+\t\tif (!mlx5_nl_flow_encap_ifindex\n+\t\t    (ctx, encap, ifindex, false, error))\n+\t\t\terr = rte_errno;\n+\t\t*nl_flow->ifindex_dst = nl_flow->encap_ifindex;\n+\t}\n \tret = mlx5_nl_flow_chat(ctx, nlh, NULL, NULL);\n+\tif (encap)\n+\t\t*nl_flow->ifindex_dst = ifindex;\n \tnl_flow->applied = 0;\n+\tif (err) {\n+\t\trte_errno = err;\n+\t\treturn -rte_errno;\n+\t}\n \tif (!ret)\n \t\treturn 0;\n \treturn rte_flow_error_set\n",
    "prefixes": [
        "7/8"
    ]
}