get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/53767/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 53767,
    "url": "http://patches.dpdk.org/api/patches/53767/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1559042301-73445-3-git-send-email-motih@mellanox.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1559042301-73445-3-git-send-email-motih@mellanox.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1559042301-73445-3-git-send-email-motih@mellanox.com",
    "date": "2019-05-28T11:18:46",
    "name": "[2/2] net/mlx5: remove TCF support from PMD",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "a962df12ee864950cb00187bfb97b4f5fee8d933",
    "submitter": {
        "id": 748,
        "url": "http://patches.dpdk.org/api/people/748/?format=api",
        "name": "Moti Haimovsky",
        "email": "motih@mellanox.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1559042301-73445-3-git-send-email-motih@mellanox.com/mbox/",
    "series": [
        {
            "id": 4793,
            "url": "http://patches.dpdk.org/api/series/4793/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=4793",
            "date": "2019-05-28T11:18:44",
            "name": "net/mlx5: remove TCF support from PMD",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/4793/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/53767/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/53767/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 5B1781B950;\n\tTue, 28 May 2019 15:12:51 +0200 (CEST)",
            "from EUR02-HE1-obe.outbound.protection.outlook.com\n\t(mail-eopbgr10086.outbound.protection.outlook.com [40.107.1.86])\n\tby dpdk.org (Postfix) with ESMTP id B80081B947\n\tfor <dev@dpdk.org>; Tue, 28 May 2019 13:18:53 +0200 (CEST)",
            "from AM0PR05MB4435.eurprd05.prod.outlook.com (52.134.95.151) by\n\tAM0PR05MB5633.eurprd05.prod.outlook.com (20.178.115.210) with\n\tMicrosoft SMTP Server (version=TLS1_2,\n\tcipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n\t15.20.1922.20; Tue, 28 May 2019 11:18:47 +0000",
            "from AM0PR05MB4435.eurprd05.prod.outlook.com\n\t([fe80::c40a:d6fe:1246:7278]) by\n\tAM0PR05MB4435.eurprd05.prod.outlook.com\n\t([fe80::c40a:d6fe:1246:7278%4]) with mapi id 15.20.1922.021;\n\tTue, 28 May 2019 11:18:47 +0000"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Mellanox.com;\n\ts=selector2;\n\th=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n\tbh=dA7OxLpv9iBUcK+MELQ6zx9e5W7NjIxzDQHC7mGQpXA=;\n\tb=C/F7frwTN2e5+mv0O+hZd8N0eBbeBb5MG2wyfrYbqaJKV2dfe6b1notjG4JSvZTnJifOWcuvFf3ZXa+SI7JmAAMfiOlS1Q9fe0N1IQ04Am4Zk+CUw+rtgqgqb3b+uOdIS6BNkK4ugNJC1lqwsrPNEOEcbu8xF8l08MnVtFsJMFw=",
        "From": "Moti Haimovsky <motih@mellanox.com>",
        "To": "Shahaf Shuler <shahafs@mellanox.com>",
        "CC": "\"dev@dpdk.org\" <dev@dpdk.org>, Moti Haimovsky <motih@mellanox.com>",
        "Thread-Topic": "[PATCH 2/2] net/mlx5: remove TCF support from PMD",
        "Thread-Index": "AQHVFUcegYTR5RpaUUi+g1viy5zPxA==",
        "Date": "Tue, 28 May 2019 11:18:46 +0000",
        "Message-ID": "<1559042301-73445-3-git-send-email-motih@mellanox.com>",
        "References": "<1559042301-73445-1-git-send-email-motih@mellanox.com>",
        "In-Reply-To": "<1559042301-73445-1-git-send-email-motih@mellanox.com>",
        "Accept-Language": "he-IL, en-US",
        "Content-Language": "en-US",
        "X-MS-Has-Attach": "",
        "X-MS-TNEF-Correlator": "",
        "x-clientproxiedby": "LO2P265CA0318.GBRP265.PROD.OUTLOOK.COM\n\t(2603:10a6:600:a4::18) To AM0PR05MB4435.eurprd05.prod.outlook.com\n\t(2603:10a6:208:61::23)",
        "authentication-results": "spf=none (sender IP is )\n\tsmtp.mailfrom=motih@mellanox.com; ",
        "x-ms-exchange-messagesentrepresentingtype": "1",
        "x-mailer": "git-send-email 1.8.3.1",
        "x-originating-ip": "[37.142.13.130]",
        "x-ms-publictraffictype": "Email",
        "x-ms-office365-filtering-correlation-id": "94ca1267-9be5-45f2-8417-08d6e35e403c",
        "x-ms-office365-filtering-ht": "Tenant",
        "x-microsoft-antispam": "BCL:0; PCL:0;\n\tRULEID:(2390118)(7020095)(4652040)(8989299)(4534185)(4627221)(201703031133081)(201702281549075)(8990200)(5600148)(711020)(4605104)(1401327)(4618075)(2017052603328)(7193020);\n\tSRVR:AM0PR05MB5633; ",
        "x-ms-traffictypediagnostic": "AM0PR05MB5633:",
        "x-microsoft-antispam-prvs": "<AM0PR05MB563395F9B26ACFCD527FD3D0D21E0@AM0PR05MB5633.eurprd05.prod.outlook.com>",
        "x-ms-oob-tlc-oobclassifiers": "OLM:67;",
        "x-forefront-prvs": "00514A2FE6",
        "x-forefront-antispam-report": "SFV:NSPM;\n\tSFS:(10009020)(366004)(376002)(346002)(396003)(39860400002)(136003)(52314003)(199004)(189003)(8676002)(186003)(8936002)(37006003)(26005)(81166006)(81156014)(107886003)(5660300002)(3846002)(6116002)(53946003)(54906003)(50226002)(4326008)(36756003)(305945005)(6436002)(478600001)(53936002)(86362001)(14454004)(30864003)(6862004)(25786009)(6512007)(6486002)(486006)(316002)(2616005)(476003)(2906002)(71200400001)(256004)(5024004)(14444005)(4720700003)(6636002)(386003)(52116002)(76176011)(68736007)(102836004)(99286004)(6506007)(66476007)(73956011)(66946007)(7736002)(66556008)(64756008)(66446008)(66066001)(11346002)(446003)(71190400001)(21314003)(569006);\n\tDIR:OUT; SFP:1101; SCL:1; SRVR:AM0PR05MB5633;\n\tH:AM0PR05MB4435.eurprd05.prod.outlook.com; FPR:; SPF:None; LANG:en;\n\tPTR:InfoNoRecords; A:1; MX:1; ",
        "received-spf": "None (protection.outlook.com: mellanox.com does not designate\n\tpermitted sender hosts)",
        "x-ms-exchange-senderadcheck": "1",
        "x-microsoft-antispam-message-info": "rZKqQnVdb8pV1c8zSYJ6ooYYRTqNN/Ed4cGsg0Hm/h9i0UXCIOjF8uudx3z7s/WfYWnR0DDjyvWOL0ykm1cr5toWKUnn6IM7lJJkE/zM4m3WPt/OSl8kQQRxnt81eQSBr+b7px5b+Zf4rquNuz74XBl1IefgwPrSVXQ93HzwgYpwyBn6OZHHS2l198gocu8AM1SolqKjyTwNdhK6BHVMPDnom7p1aJBZBqX79GYpoD+w8mmpX6wwbwa+ibhMHseXkUcTayzItZYAkzIeuJfyNdvltM3BGzqJpNdszGLi6dJ36iDqmbWjErZuAgK7KOVmIjyqj2mXxPfY375ZEDgT/6ww3u/2UXzRpFrhj5wZrWWQJLWPXLOa2M+tfBE1sp1th32+Kgw0t0DzRS/nurMepT2voL6iS+IZderc6jjWfE0=",
        "Content-Type": "text/plain; charset=\"utf-8\"",
        "Content-Transfer-Encoding": "base64",
        "MIME-Version": "1.0",
        "X-OriginatorOrg": "Mellanox.com",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "94ca1267-9be5-45f2-8417-08d6e35e403c",
        "X-MS-Exchange-CrossTenant-originalarrivaltime": "28 May 2019 11:18:46.8749\n\t(UTC)",
        "X-MS-Exchange-CrossTenant-fromentityheader": "Hosted",
        "X-MS-Exchange-CrossTenant-id": "a652971c-7d2e-4d9b-a6a4-d149256f461b",
        "X-MS-Exchange-CrossTenant-mailboxtype": "HOSTED",
        "X-MS-Exchange-CrossTenant-userprincipalname": "motih@mellanox.com",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "AM0PR05MB5633",
        "X-Mailman-Approved-At": "Tue, 28 May 2019 15:12:47 +0200",
        "Subject": "[dpdk-dev] [PATCH 2/2] net/mlx5: remove TCF support from PMD",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This commit removes the support of configuring the device E-switch\nusing TCF since it is now possible to configure it via DR (direct\nverbs rules), and by that to also remove the PMD dependency in libmnl.\n\nSigned-off-by: Moti Haimovsky <motih@mellanox.com>\n---\n doc/build-sdk-meson.txt           |    2 +-\n doc/guides/nics/mlx5.rst          |   19 -\n doc/guides/platform/bluefield.rst |    4 -\n drivers/net/mlx5/Makefile         |  303 --\n drivers/net/mlx5/meson.build      |  123 +-\n drivers/net/mlx5/mlx5.c           |   32 -\n drivers/net/mlx5/mlx5.h           |    3 -\n drivers/net/mlx5/mlx5_flow.c      |   14 +-\n drivers/net/mlx5/mlx5_flow.h      |   25 -\n drivers/net/mlx5/mlx5_flow_tcf.c  | 6382 -------------------------------------\n mk/rte.app.mk                     |    2 +-\n 11 files changed, 9 insertions(+), 6900 deletions(-)\n delete mode 100644 drivers/net/mlx5/mlx5_flow_tcf.c",
    "diff": "diff --git a/doc/build-sdk-meson.txt b/doc/build-sdk-meson.txt\r\nindex 7b80244..981f88e 100644\r\n--- a/doc/build-sdk-meson.txt\r\n+++ b/doc/build-sdk-meson.txt\r\n@@ -207,5 +207,5 @@ From examples/helloworld/Makefile::\r\n NOTE: for --static builds, DPDK needs to be built with Meson >= 0.46 in order to\r\n fully generate the list of private dependencies. If DPDK is built with an older\r\n version of Meson, it might be necessary to manually specify dependencies of DPDK\r\n-PMDs/libraries, for example -lmlx5 -lmnl for librte-pmd-mlx5, or the static link\r\n+PMDs/libraries, for example -lmlx5 for librte-pmd-mlx5, or the static link\r\n step might fail.\r\ndiff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst\r\nindex 5176aa8..1311863 100644\r\n--- a/doc/guides/nics/mlx5.rst\r\n+++ b/doc/guides/nics/mlx5.rst\r\n@@ -544,12 +544,6 @@ DPDK and must be installed separately:\r\n   This library basically implements send/receive calls to the hardware\r\n   queues.\r\n \r\n-- **libmnl**\r\n-\r\n-  Minimalistic Netlink library mainly relied on to manage E-Switch flow\r\n-  rules (i.e. those with the \"transfer\" attribute and typically involving\r\n-  port representors).\r\n-\r\n - **Kernel modules**\r\n \r\n   They provide the kernel-side Verbs API and low level device drivers that\r\n@@ -645,19 +639,6 @@ required from that distribution.\r\n    this DPDK release was developed and tested against is strongly\r\n    recommended. Please check the `prerequisites`_.\r\n \r\n-Libmnl\r\n-^^^^^^\r\n-\r\n-Minimal version for libmnl is **1.0.3**.\r\n-\r\n-As a dependency of the **iproute2** suite, this library is often installed\r\n-by default. It is otherwise readily available through standard system\r\n-packages.\r\n-\r\n-Its development headers must be installed in order to compile this PMD.\r\n-These packages are usually named **libmnl-dev** or **libmnl-devel**\r\n-depending on the Linux distribution.\r\n-\r\n Supported NICs\r\n --------------\r\n \r\ndiff --git a/doc/guides/platform/bluefield.rst b/doc/guides/platform/bluefield.rst\r\nindex 0bb58e5..1ff9eb6 100644\r\n--- a/doc/guides/platform/bluefield.rst\r\n+++ b/doc/guides/platform/bluefield.rst\r\n@@ -84,7 +84,6 @@ toolchain for ARM64. Base on that, additional header files and libraries are\r\n required:\r\n \r\n    - libibverbs\r\n-   - libmnl\r\n    - libmlx5\r\n    - libnl-3\r\n    - libnl-route-3\r\n@@ -105,19 +104,16 @@ tarball for the cross toolchain.\r\n         # Copy libraries\r\n         mkdir -p lib64\r\n         cp -a /lib64/libibverbs* lib64/\r\n-        cp -a /lib64/libmnl* lib64/\r\n         cp -a /lib64/libmlx5* lib64/\r\n         cp -a /lib64/libnl-3* lib64/\r\n         cp -a /lib64/libnl-route-3* lib64/\r\n \r\n         # Copy header files\r\n         mkdir -p usr/include/infiniband\r\n-        mkdir -p usr/include/libmnl\r\n         cp -a /usr/include/infiniband/ib_user_ioctl_verbs.h usr/include/infiniband/\r\n         cp -a /usr/include/infiniband/mlx5*.h usr/include/infiniband/\r\n         cp -a /usr/include/infiniband/tm_types.h usr/include/infiniband/\r\n         cp -a /usr/include/infiniband/verbs*.h usr/include/infiniband/\r\n-        cp -a /usr/include/libmnl/libmnl.h usr/include/libmnl/\r\n \r\n         # Create supplementary tarball\r\n         popd\r\ndiff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile\r\nindex 2694916..619e6b6 100644\r\n--- a/drivers/net/mlx5/Makefile\r\n+++ b/drivers/net/mlx5/Makefile\r\n@@ -32,7 +32,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rss.c\r\n SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c\r\n SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c\r\n SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow_dv.c\r\n-SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow_tcf.c\r\n SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow_verbs.c\r\n SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mp.c\r\n SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl.c\r\n@@ -52,7 +51,6 @@ CFLAGS += -D_DEFAULT_SOURCE\r\n CFLAGS += -D_XOPEN_SOURCE=600\r\n CFLAGS += $(WERROR_FLAGS)\r\n CFLAGS += -Wno-strict-prototypes\r\n-CFLAGS += $(shell command -v pkg-config > /dev/null 2>&1 && pkg-config --cflags libmnl)\r\n ifeq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y)\r\n CFLAGS += -DMLX5_GLUE='\"$(LIB_GLUE)\"'\r\n CFLAGS += -DMLX5_GLUE_VERSION='\"$(LIB_GLUE_VERSION)\"'\r\n@@ -63,7 +61,6 @@ LDLIBS += $(shell $(RTE_SDK)/buildtools/options-ibverbs-static.sh)\r\n else\r\n LDLIBS += -libverbs -lmlx5\r\n endif\r\n-LDLIBS += $(shell command -v pkg-config > /dev/null 2>&1 && pkg-config --libs libmnl || echo \"-lmnl\")\r\n LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring\r\n LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs\r\n LDLIBS += -lrte_bus_pci\r\n@@ -256,306 +253,6 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh\r\n \t\tenum IFLA_PHYS_PORT_NAME \\\r\n \t\t$(AUTOCONF_OUTPUT)\r\n \t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_IFLA_VXLAN_COLLECT_METADATA \\\r\n-\t\tlinux/if_link.h \\\r\n-\t\tenum IFLA_VXLAN_COLLECT_METADATA \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_CHAIN \\\r\n-\t\tlinux/rtnetlink.h \\\r\n-\t\tenum TCA_CHAIN \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_ACT \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_ACT \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_FLAGS \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_FLAGS \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_ETH_TYPE \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_ETH_TYPE \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_ETH_DST \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_ETH_DST \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_ETH_DST_MASK \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_ETH_DST_MASK \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_ETH_SRC \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_ETH_SRC \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_ETH_SRC_MASK \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_ETH_SRC_MASK \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_IP_PROTO \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_IP_PROTO \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_IPV4_SRC \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_IPV4_SRC \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_IPV4_SRC_MASK \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_IPV4_SRC_MASK \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_IPV4_DST \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_IPV4_DST \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_IPV4_DST_MASK \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_IPV4_DST_MASK \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_IPV6_SRC \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_IPV6_SRC \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_IPV6_SRC_MASK \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_IPV6_SRC_MASK \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_IPV6_DST \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_IPV6_DST \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_IPV6_DST_MASK \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_IPV6_DST_MASK \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_TCP_SRC \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_TCP_SRC \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_TCP_SRC_MASK \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_TCP_SRC_MASK \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_TCP_DST \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_TCP_DST \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_TCP_DST_MASK \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_TCP_DST_MASK \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_UDP_SRC \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_UDP_SRC \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_UDP_SRC_MASK \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_UDP_SRC_MASK \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_UDP_DST \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_UDP_DST \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_UDP_DST_MASK \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_UDP_DST_MASK \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_VLAN_ID \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_VLAN_ID \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_VLAN_PRIO \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_VLAN_PRIO \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_VLAN_ETH_TYPE \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_VLAN_ETH_TYPE \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_TCP_FLAGS \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_TCP_FLAGS \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_TCP_FLAGS_MASK \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_TCP_FLAGS_MASK \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_IP_TOS \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_IP_TOS \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_IP_TOS_MASK \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_IP_TOS_MASK \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_IP_TTL \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_IP_TTL \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_IP_TTL_MASK \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_IP_TTL_MASK \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TC_ACT_GOTO_CHAIN \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tdefine TC_ACT_GOTO_CHAIN \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TC_ACT_VLAN \\\r\n-\t\tlinux/tc_act/tc_vlan.h \\\r\n-\t\tenum TCA_VLAN_PUSH_VLAN_PRIORITY \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_ENC_KEY_ID \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_ENC_KEY_ID \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_ENC_IPV4_SRC \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_ENC_IPV4_SRC \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_ENC_IPV4_DST \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_ENC_IPV4_DST \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_ENC_IPV4_DST_MASK \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_ENC_IPV4_DST_MASK \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_ENC_IPV6_SRC \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_ENC_IPV6_SRC \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_ENC_IPV6_DST \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_ENC_IPV6_DST \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_ENC_IPV6_DST_MASK \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_ENC_IPV6_DST_MASK \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_ENC_UDP_SRC_PORT \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_ENC_UDP_SRC_PORT \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_ENC_UDP_DST_PORT \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_ENC_UDP_DST_PORT \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_ENC_IP_TOS \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_ENC_IP_TOS \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_ENC_IP_TOS_MASK \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_ENC_IP_TOS_MASK \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_ENC_IP_TTL \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_ENC_IP_TTL \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_FLOWER_KEY_ENC_IP_TTL_MASK \\\r\n-\t\tlinux/pkt_cls.h \\\r\n-\t\tenum TCA_FLOWER_KEY_ENC_IP_TTL_MASK \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TC_ACT_TUNNEL_KEY \\\r\n-\t\tlinux/tc_act/tc_tunnel_key.h \\\r\n-\t\tdefine TCA_ACT_TUNNEL_KEY \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_TUNNEL_KEY_ENC_DST_PORT \\\r\n-\t\tlinux/tc_act/tc_tunnel_key.h \\\r\n-\t\tenum TCA_TUNNEL_KEY_ENC_DST_PORT \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_TUNNEL_KEY_ENC_TOS \\\r\n-\t\tlinux/tc_act/tc_tunnel_key.h \\\r\n-\t\tenum TCA_TUNNEL_KEY_ENC_TOS \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_TUNNEL_KEY_ENC_TTL \\\r\n-\t\tlinux/tc_act/tc_tunnel_key.h \\\r\n-\t\tenum TCA_TUNNEL_KEY_ENC_TTL \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TCA_TUNNEL_KEY_NO_CSUM \\\r\n-\t\tlinux/tc_act/tc_tunnel_key.h \\\r\n-\t\tenum TCA_TUNNEL_KEY_NO_CSUM \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n-\t\tHAVE_TC_ACT_PEDIT \\\r\n-\t\tlinux/tc_act/tc_pedit.h \\\r\n-\t\tenum TCA_PEDIT_KEY_EX_HDR_TYPE_UDP \\\r\n-\t\t$(AUTOCONF_OUTPUT)\r\n-\t$Q sh -- '$<' '$@' \\\r\n \t\tHAVE_SUPPORTED_40000baseKR4_Full \\\r\n \t\t/usr/include/linux/ethtool.h \\\r\n \t\tdefine SUPPORTED_40000baseKR4_Full \\\r\ndiff --git a/drivers/net/mlx5/meson.build b/drivers/net/mlx5/meson.build\r\nindex ac3b529..4dc5ddd 100644\r\n--- a/drivers/net/mlx5/meson.build\r\n+++ b/drivers/net/mlx5/meson.build\r\n@@ -13,7 +13,7 @@ if pmd_dlopen\r\n \t\t'-DMLX5_GLUE_VERSION=\"@0@\"'.format(LIB_GLUE_VERSION),\r\n \t]\r\n endif\r\n-libnames = [ 'mnl', 'mlx5', 'ibverbs' ]\r\n+libnames = [ 'mlx5', 'ibverbs' ]\r\n libs = []\r\n build = true\r\n foreach libname:libnames\r\n@@ -35,7 +35,6 @@ if build\r\n \t\t'mlx5_ethdev.c',\r\n \t\t'mlx5_flow.c',\r\n \t\t'mlx5_flow_dv.c',\r\n-\t\t'mlx5_flow_tcf.c',\r\n \t\t'mlx5_flow_verbs.c',\r\n \t\t'mlx5_mac.c',\r\n \t\t'mlx5_mr.c',\r\n@@ -148,126 +147,6 @@ if build\r\n \t\t'IFLA_PHYS_SWITCH_ID' ],\r\n \t\t[ 'HAVE_IFLA_PHYS_PORT_NAME', 'linux/if_link.h',\r\n \t\t'IFLA_PHYS_PORT_NAME' ],\r\n-\t\t[ 'HAVE_IFLA_VXLAN_COLLECT_METADATA', 'linux/if_link.h',\r\n-\t\t'IFLA_VXLAN_COLLECT_METADATA' ],\r\n-\t\t[ 'HAVE_TCA_CHAIN', 'linux/rtnetlink.h',\r\n-\t\t'TCA_CHAIN' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_ACT', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_ACT' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_FLAGS', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_FLAGS' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_ETH_TYPE', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_ETH_TYPE' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_ETH_DST', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_ETH_DST' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_ETH_DST_MASK', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_ETH_DST_MASK' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_ETH_SRC', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_ETH_SRC' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_ETH_SRC_MASK', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_ETH_SRC_MASK' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_IP_PROTO', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_IP_PROTO' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_IPV4_SRC', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_IPV4_SRC' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_IPV4_SRC_MASK', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_IPV4_SRC_MASK' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_IPV4_DST', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_IPV4_DST' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_IPV4_DST_MASK', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_IPV4_DST_MASK' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_IPV6_SRC', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_IPV6_SRC' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_IPV6_SRC_MASK', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_IPV6_SRC_MASK' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_IPV6_DST', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_IPV6_DST' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_IPV6_DST_MASK', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_IPV6_DST_MASK' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_TCP_SRC', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_TCP_SRC' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_TCP_SRC_MASK', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_TCP_SRC_MASK' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_TCP_DST', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_TCP_DST' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_TCP_DST_MASK', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_TCP_DST_MASK' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_UDP_SRC', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_UDP_SRC' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_UDP_SRC_MASK', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_UDP_SRC_MASK' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_UDP_DST', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_UDP_DST' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_UDP_DST_MASK', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_UDP_DST_MASK' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_VLAN_ID', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_VLAN_ID' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_VLAN_PRIO', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_VLAN_PRIO' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_VLAN_ETH_TYPE', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_VLAN_ETH_TYPE' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_TCP_FLAGS', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_TCP_FLAGS' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_TCP_FLAGS_MASK', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_TCP_FLAGS_MASK' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_IP_TOS', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_IP_TOS' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_IP_TOS_MASK', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_IP_TOS_MASK' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_IP_TTL', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_IP_TTL' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_IP_TTL_MASK', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_IP_TTL_MASK' ],\r\n-\t\t[ 'HAVE_TC_ACT_GOTO_CHAIN', 'linux/pkt_cls.h',\r\n-\t\t'TC_ACT_GOTO_CHAIN' ],\r\n-\t\t[ 'HAVE_TC_ACT_VLAN', 'linux/tc_act/tc_vlan.h',\r\n-\t\t'TCA_VLAN_PUSH_VLAN_PRIORITY' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_ENC_KEY_ID', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_ENC_KEY_ID' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_ENC_IPV4_SRC', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_ENC_IPV4_SRC' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_ENC_IPV4_DST', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_ENC_IPV4_DST' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_ENC_IPV4_DST_MASK', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_ENC_IPV4_DST_MASK' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_ENC_IPV6_SRC', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_ENC_IPV6_SRC' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_ENC_IPV6_DST', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_ENC_IPV6_DST' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_ENC_IPV6_DST_MASK', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_ENC_IPV6_DST_MASK' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_ENC_UDP_SRC_PORT', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_ENC_UDP_SRC_PORT' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_ENC_UDP_DST_PORT', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_ENC_UDP_DST_PORT' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_ENC_IP_TOS', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_ENC_IP_TOS' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_ENC_IP_TOS_MASK', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_ENC_IP_TOS_MASK' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_ENC_IP_TTL', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_ENC_IP_TTL' ],\r\n-\t\t[ 'HAVE_TCA_FLOWER_KEY_ENC_IP_TTL_MASK', 'linux/pkt_cls.h',\r\n-\t\t'TCA_FLOWER_KEY_ENC_IP_TTL_MASK' ],\r\n-\t\t[ 'HAVE_TC_ACT_TUNNEL_KEY', 'linux/tc_act/tc_tunnel_key.h',\r\n-\t\t'TCA_ACT_TUNNEL_KEY' ],\r\n-\t\t[ 'HAVE_TCA_TUNNEL_KEY_ENC_DST_PORT', 'linux/tc_act/tc_tunnel_key.h',\r\n-\t\t'TCA_TUNNEL_KEY_ENC_DST_PORT' ],\r\n-\t\t[ 'HAVE_TCA_TUNNEL_KEY_ENC_TOS', 'linux/tc_act/tc_tunnel_key.h',\r\n-\t\t'TCA_TUNNEL_KEY_ENC_TOS' ],\r\n-\t\t[ 'HAVE_TCA_TUNNEL_KEY_ENC_TTL', 'linux/tc_act/tc_tunnel_key.h',\r\n-\t\t'TCA_TUNNEL_KEY_ENC_TTL' ],\r\n-\t\t[ 'HAVE_TCA_TUNNEL_KEY_NO_CSUM', 'linux/tc_act/tc_tunnel_key.h',\r\n-\t\t'TCA_TUNNEL_KEY_NO_CSUM' ],\r\n-\t\t[ 'HAVE_TC_ACT_PEDIT', 'linux/tc_act/tc_pedit.h',\r\n-\t\t'TCA_PEDIT_KEY_EX_HDR_TYPE_UDP' ],\r\n \t\t[ 'HAVE_RDMA_NL_NLDEV', 'rdma/rdma_netlink.h',\r\n \t\t'RDMA_NL_NLDEV' ],\r\n \t\t[ 'HAVE_RDMA_NLDEV_CMD_GET', 'rdma/rdma_netlink.h',\r\ndiff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\r\nindex 57a6221..09cc2d7 100644\r\n--- a/drivers/net/mlx5/mlx5.c\r\n+++ b/drivers/net/mlx5/mlx5.c\r\n@@ -687,8 +687,6 @@ struct mlx5_dev_spawn_data {\r\n \t\tclose(priv->nl_socket_route);\r\n \tif (priv->nl_socket_rdma >= 0)\r\n \t\tclose(priv->nl_socket_rdma);\r\n-\tif (priv->tcf_context)\r\n-\t\tmlx5_flow_tcf_context_destroy(priv->tcf_context);\r\n \tif (priv->sh) {\r\n \t\t/*\r\n \t\t * Free the shared context in last turn, because the cleanup\r\n@@ -1492,34 +1490,6 @@ struct mlx5_dev_spawn_data {\r\n \tclaim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));\r\n \tif (config.vf && config.vf_nl_en)\r\n \t\tmlx5_nl_mac_addr_sync(eth_dev);\r\n-\tpriv->tcf_context = mlx5_flow_tcf_context_create();\r\n-\tif (!priv->tcf_context) {\r\n-\t\terr = -rte_errno;\r\n-\t\tDRV_LOG(WARNING,\r\n-\t\t\t\"flow rules relying on switch offloads will not be\"\r\n-\t\t\t\" supported: cannot open libmnl socket: %s\",\r\n-\t\t\tstrerror(rte_errno));\r\n-\t} else {\r\n-\t\tstruct rte_flow_error error;\r\n-\t\tunsigned int ifindex = mlx5_ifindex(eth_dev);\r\n-\r\n-\t\tif (!ifindex) {\r\n-\t\t\terr = -rte_errno;\r\n-\t\t\terror.message =\r\n-\t\t\t\t\"cannot retrieve network interface index\";\r\n-\t\t} else {\r\n-\t\t\terr = mlx5_flow_tcf_init(priv->tcf_context,\r\n-\t\t\t\t\t\t ifindex, &error);\r\n-\t\t}\r\n-\t\tif (err) {\r\n-\t\t\tDRV_LOG(WARNING,\r\n-\t\t\t\t\"flow rules relying on switch offloads will\"\r\n-\t\t\t\t\" not be supported: %s: %s\",\r\n-\t\t\t\terror.message, strerror(rte_errno));\r\n-\t\t\tmlx5_flow_tcf_context_destroy(priv->tcf_context);\r\n-\t\t\tpriv->tcf_context = NULL;\r\n-\t\t}\r\n-\t}\r\n \tTAILQ_INIT(&priv->flows);\r\n \tTAILQ_INIT(&priv->ctrl_flows);\r\n \t/* Hint libmlx5 to use PMD allocator for data plane resources */\r\n@@ -1585,8 +1555,6 @@ struct mlx5_dev_spawn_data {\r\n \t\t\tclose(priv->nl_socket_route);\r\n \t\tif (priv->nl_socket_rdma >= 0)\r\n \t\t\tclose(priv->nl_socket_rdma);\r\n-\t\tif (priv->tcf_context)\r\n-\t\t\tmlx5_flow_tcf_context_destroy(priv->tcf_context);\r\n \t\tif (own_domain_id)\r\n \t\t\tclaim_zero(rte_eth_switch_domain_free(priv->domain_id));\r\n \t\trte_free(priv);\r\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\r\nindex 6738a50..27f6768 100644\r\n--- a/drivers/net/mlx5/mlx5.h\r\n+++ b/drivers/net/mlx5/mlx5.h\r\n@@ -237,8 +237,6 @@ struct mlx5_drop {\r\n \tstruct mlx5_rxq_ibv *rxq; /* Verbs Rx queue. */\r\n };\r\n \r\n-struct mlx5_flow_tcf_context;\r\n-\r\n /* Per port data of shared IB device. */\r\n struct mlx5_ibv_shared_port {\r\n \tuint32_t ih_port_id;\r\n@@ -382,7 +380,6 @@ struct mlx5_priv {\r\n \trte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX];\r\n \t/* UAR same-page access control required in 32bit implementations. */\r\n #endif\r\n-\tstruct mlx5_flow_tcf_context *tcf_context; /* TC flower context. */\r\n };\r\n \r\n #define PORT_ID(priv) ((priv)->dev_data->port_id)\r\ndiff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\r\nindex e5a8e33..2f45225 100644\r\n--- a/drivers/net/mlx5/mlx5_flow.c\r\n+++ b/drivers/net/mlx5/mlx5_flow.c\r\n@@ -42,7 +42,6 @@\r\n #ifdef HAVE_IBV_FLOW_DV_SUPPORT\r\n extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops;\r\n #endif\r\n-extern const struct mlx5_flow_driver_ops mlx5_flow_tcf_drv_ops;\r\n extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;\r\n \r\n const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops;\r\n@@ -52,7 +51,6 @@\r\n #ifdef HAVE_IBV_FLOW_DV_SUPPORT\r\n \t[MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,\r\n #endif\r\n-\t[MLX5_FLOW_TYPE_TCF] = &mlx5_flow_tcf_drv_ops,\r\n \t[MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,\r\n \t[MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops\r\n };\r\n@@ -1037,7 +1035,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,\r\n \t\treturn rte_flow_error_set(error, ENOTSUP,\r\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,\r\n \t\t\t\t\t  \"egress is not supported\");\r\n-\tif (attributes->transfer)\r\n+\tif (attributes->transfer && !priv->config.dv_esw_en)\r\n \t\treturn rte_flow_error_set(error, ENOTSUP,\r\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,\r\n \t\t\t\t\t  NULL, \"transfer is not supported\");\r\n@@ -1294,7 +1292,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,\r\n  * @param[in] target_protocol\r\n  *   The next protocol in the previous item.\r\n  * @param[in] flow_mask\r\n- *   mlx5 flow-specific (TCF, DV, verbs, etc.) supported header fields mask.\r\n+ *   mlx5 flow-specific (DV, verbs, etc.) supported header fields mask.\r\n  * @param[out] error\r\n  *   Pointer to error structure.\r\n  *\r\n@@ -1784,9 +1782,9 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,\r\n \tstruct mlx5_priv *priv = dev->data->dev_private;\r\n \tenum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX;\r\n \r\n-\tif (attr->transfer && !priv->config.dv_esw_en)\r\n-\t\ttype = MLX5_FLOW_TYPE_TCF;\r\n-\telse\r\n+\tif (attr->transfer && priv->config.dv_esw_en)\r\n+\t\ttype = MLX5_FLOW_TYPE_DV;\r\n+\tif (!attr->transfer)\r\n \t\ttype = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :\r\n \t\t\t\t\t\t MLX5_FLOW_TYPE_VERBS;\r\n \treturn type;\r\n@@ -1833,7 +1831,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,\r\n  * initializes the device flow and returns the pointer.\r\n  *\r\n  * @note\r\n- *   This function initializes device flow structure such as dv, tcf or verbs in\r\n+ *   This function initializes device flow structure such as dv or verbs in\r\n  *   struct mlx5_flow. However, it is caller's responsibility to initialize the\r\n  *   rest. For example, adding returning device flow to flow->dev_flow list and\r\n  *   setting backward reference to the flow should be done out of this function.\r\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\r\nindex b665420..65cfdbd 100644\r\n--- a/drivers/net/mlx5/mlx5_flow.h\r\n+++ b/drivers/net/mlx5/mlx5_flow.h\r\n@@ -188,7 +188,6 @@\r\n enum mlx5_flow_drv_type {\r\n \tMLX5_FLOW_TYPE_MIN,\r\n \tMLX5_FLOW_TYPE_DV,\r\n-\tMLX5_FLOW_TYPE_TCF,\r\n \tMLX5_FLOW_TYPE_VERBS,\r\n \tMLX5_FLOW_TYPE_MAX,\r\n };\r\n@@ -309,22 +308,6 @@ struct mlx5_flow_dv {\r\n \tint actions_n; /**< number of actions. */\r\n };\r\n \r\n-/** Linux TC flower driver for E-Switch flow. */\r\n-struct mlx5_flow_tcf {\r\n-\tstruct nlmsghdr *nlh;\r\n-\tstruct tcmsg *tcm;\r\n-\tuint32_t *ptc_flags; /**< tc rule applied flags. */\r\n-\tunion { /**< Tunnel encap/decap descriptor. */\r\n-\t\tstruct flow_tcf_tunnel_hdr *tunnel;\r\n-\t\tstruct flow_tcf_vxlan_decap *vxlan_decap;\r\n-\t\tstruct flow_tcf_vxlan_encap *vxlan_encap;\r\n-\t};\r\n-\tuint32_t applied:1; /**< Whether rule is currently applied. */\r\n-#ifndef NDEBUG\r\n-\tuint32_t nlsize; /**< Size of NL message buffer for debug check. */\r\n-#endif\r\n-};\r\n-\r\n /* Verbs specification header. */\r\n struct ibv_spec_header {\r\n \tenum ibv_flow_spec_type type;\r\n@@ -355,7 +338,6 @@ struct mlx5_flow {\r\n #ifdef HAVE_IBV_FLOW_DV_SUPPORT\r\n \t\tstruct mlx5_flow_dv dv;\r\n #endif\r\n-\t\tstruct mlx5_flow_tcf tcf;\r\n \t\tstruct mlx5_flow_verbs verbs;\r\n \t};\r\n };\r\n@@ -513,11 +495,4 @@ int mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,\r\n \t\t\t\t      struct rte_eth_dev *dev,\r\n \t\t\t\t      struct rte_flow_error *error);\r\n \r\n-/* mlx5_flow_tcf.c */\r\n-\r\n-int mlx5_flow_tcf_init(struct mlx5_flow_tcf_context *ctx,\r\n-\t\t       unsigned int ifindex, struct rte_flow_error *error);\r\n-struct mlx5_flow_tcf_context *mlx5_flow_tcf_context_create(void);\r\n-void mlx5_flow_tcf_context_destroy(struct mlx5_flow_tcf_context *ctx);\r\n-\r\n #endif /* RTE_PMD_MLX5_FLOW_H_ */\r\ndiff --git a/drivers/net/mlx5/mlx5_flow_tcf.c b/drivers/net/mlx5/mlx5_flow_tcf.c\r\ndeleted file mode 100644\r\nindex 223ee4f..0000000\r\n--- a/drivers/net/mlx5/mlx5_flow_tcf.c\r\n+++ /dev/null\r\n@@ -1,6382 +0,0 @@\r\n-/* SPDX-License-Identifier: BSD-3-Clause\r\n- * Copyright 2018 6WIND S.A.\r\n- * Copyright 2018 Mellanox Technologies, Ltd\r\n- */\r\n-\r\n-#include <assert.h>\r\n-#include <errno.h>\r\n-#include <libmnl/libmnl.h>\r\n-#include <linux/gen_stats.h>\r\n-#include <linux/if_ether.h>\r\n-#include <linux/netlink.h>\r\n-#include <linux/pkt_cls.h>\r\n-#include <linux/pkt_sched.h>\r\n-#include <linux/rtnetlink.h>\r\n-#include <linux/tc_act/tc_gact.h>\r\n-#include <linux/tc_act/tc_mirred.h>\r\n-#include <netinet/in.h>\r\n-#include <stdalign.h>\r\n-#include <stdbool.h>\r\n-#include <stddef.h>\r\n-#include <stdint.h>\r\n-#include <stdlib.h>\r\n-#include <sys/socket.h>\r\n-\r\n-#include <rte_byteorder.h>\r\n-#include <rte_errno.h>\r\n-#include <rte_ether.h>\r\n-#include <rte_flow.h>\r\n-#include <rte_malloc.h>\r\n-#include <rte_common.h>\r\n-#include <rte_cycles.h>\r\n-\r\n-#include \"mlx5.h\"\r\n-#include \"mlx5_flow.h\"\r\n-#include \"mlx5_autoconf.h\"\r\n-\r\n-#ifdef HAVE_TC_ACT_VLAN\r\n-\r\n-#include <linux/tc_act/tc_vlan.h>\r\n-\r\n-#else /* HAVE_TC_ACT_VLAN */\r\n-\r\n-#define TCA_VLAN_ACT_POP 1\r\n-#define TCA_VLAN_ACT_PUSH 2\r\n-#define TCA_VLAN_ACT_MODIFY 3\r\n-#define TCA_VLAN_PARMS 2\r\n-#define TCA_VLAN_PUSH_VLAN_ID 3\r\n-#define TCA_VLAN_PUSH_VLAN_PROTOCOL 4\r\n-#define TCA_VLAN_PAD 5\r\n-#define TCA_VLAN_PUSH_VLAN_PRIORITY 6\r\n-\r\n-struct tc_vlan {\r\n-\ttc_gen;\r\n-\tint v_action;\r\n-};\r\n-\r\n-#endif /* HAVE_TC_ACT_VLAN */\r\n-\r\n-#ifdef HAVE_TC_ACT_PEDIT\r\n-\r\n-#include <linux/tc_act/tc_pedit.h>\r\n-\r\n-#else /* HAVE_TC_ACT_VLAN */\r\n-\r\n-enum {\r\n-\tTCA_PEDIT_UNSPEC,\r\n-\tTCA_PEDIT_TM,\r\n-\tTCA_PEDIT_PARMS,\r\n-\tTCA_PEDIT_PAD,\r\n-\tTCA_PEDIT_PARMS_EX,\r\n-\tTCA_PEDIT_KEYS_EX,\r\n-\tTCA_PEDIT_KEY_EX,\r\n-\t__TCA_PEDIT_MAX\r\n-};\r\n-\r\n-enum {\r\n-\tTCA_PEDIT_KEY_EX_HTYPE = 1,\r\n-\tTCA_PEDIT_KEY_EX_CMD = 2,\r\n-\t__TCA_PEDIT_KEY_EX_MAX\r\n-};\r\n-\r\n-enum pedit_header_type {\r\n-\tTCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK = 0,\r\n-\tTCA_PEDIT_KEY_EX_HDR_TYPE_ETH = 1,\r\n-\tTCA_PEDIT_KEY_EX_HDR_TYPE_IP4 = 2,\r\n-\tTCA_PEDIT_KEY_EX_HDR_TYPE_IP6 = 3,\r\n-\tTCA_PEDIT_KEY_EX_HDR_TYPE_TCP = 4,\r\n-\tTCA_PEDIT_KEY_EX_HDR_TYPE_UDP = 5,\r\n-\t__PEDIT_HDR_TYPE_MAX,\r\n-};\r\n-\r\n-enum pedit_cmd {\r\n-\tTCA_PEDIT_KEY_EX_CMD_SET = 0,\r\n-\tTCA_PEDIT_KEY_EX_CMD_ADD = 1,\r\n-\t__PEDIT_CMD_MAX,\r\n-};\r\n-\r\n-struct tc_pedit_key {\r\n-\t__u32 mask; /* AND */\r\n-\t__u32 val; /*XOR */\r\n-\t__u32 off; /*offset */\r\n-\t__u32 at;\r\n-\t__u32 offmask;\r\n-\t__u32 shift;\r\n-};\r\n-\r\n-__extension__\r\n-struct tc_pedit_sel {\r\n-\ttc_gen;\r\n-\tunsigned char nkeys;\r\n-\tunsigned char flags;\r\n-\tstruct tc_pedit_key keys[0];\r\n-};\r\n-\r\n-#endif /* HAVE_TC_ACT_VLAN */\r\n-\r\n-#ifdef HAVE_TC_ACT_TUNNEL_KEY\r\n-\r\n-#include <linux/tc_act/tc_tunnel_key.h>\r\n-\r\n-#ifndef HAVE_TCA_TUNNEL_KEY_ENC_DST_PORT\r\n-#define TCA_TUNNEL_KEY_ENC_DST_PORT 9\r\n-#endif\r\n-\r\n-#ifndef HAVE_TCA_TUNNEL_KEY_NO_CSUM\r\n-#define TCA_TUNNEL_KEY_NO_CSUM 10\r\n-#endif\r\n-\r\n-#ifndef HAVE_TCA_TUNNEL_KEY_ENC_TOS\r\n-#define TCA_TUNNEL_KEY_ENC_TOS 12\r\n-#endif\r\n-\r\n-#ifndef\tHAVE_TCA_TUNNEL_KEY_ENC_TTL\r\n-#define TCA_TUNNEL_KEY_ENC_TTL 13\r\n-#endif\r\n-\r\n-#else /* HAVE_TC_ACT_TUNNEL_KEY */\r\n-\r\n-#define TCA_ACT_TUNNEL_KEY 17\r\n-#define TCA_TUNNEL_KEY_ACT_SET 1\r\n-#define TCA_TUNNEL_KEY_ACT_RELEASE 2\r\n-#define TCA_TUNNEL_KEY_PARMS 2\r\n-#define TCA_TUNNEL_KEY_ENC_IPV4_SRC 3\r\n-#define TCA_TUNNEL_KEY_ENC_IPV4_DST 4\r\n-#define TCA_TUNNEL_KEY_ENC_IPV6_SRC 5\r\n-#define TCA_TUNNEL_KEY_ENC_IPV6_DST 6\r\n-#define TCA_TUNNEL_KEY_ENC_KEY_ID 7\r\n-#define TCA_TUNNEL_KEY_ENC_DST_PORT 9\r\n-#define TCA_TUNNEL_KEY_NO_CSUM 10\r\n-#define TCA_TUNNEL_KEY_ENC_TOS 12\r\n-#define TCA_TUNNEL_KEY_ENC_TTL 13\r\n-\r\n-struct tc_tunnel_key {\r\n-\ttc_gen;\r\n-\tint t_action;\r\n-};\r\n-\r\n-#endif /* HAVE_TC_ACT_TUNNEL_KEY */\r\n-\r\n-/* Normally found in linux/netlink.h. */\r\n-#ifndef NETLINK_CAP_ACK\r\n-#define NETLINK_CAP_ACK 10\r\n-#endif\r\n-\r\n-/* Normally found in linux/pkt_sched.h. */\r\n-#ifndef TC_H_MIN_INGRESS\r\n-#define TC_H_MIN_INGRESS 0xfff2u\r\n-#endif\r\n-\r\n-/* Normally found in linux/pkt_cls.h. */\r\n-#ifndef TCA_CLS_FLAGS_SKIP_SW\r\n-#define TCA_CLS_FLAGS_SKIP_SW (1 << 1)\r\n-#endif\r\n-#ifndef TCA_CLS_FLAGS_IN_HW\r\n-#define TCA_CLS_FLAGS_IN_HW (1 << 2)\r\n-#endif\r\n-#ifndef HAVE_TCA_CHAIN\r\n-#define TCA_CHAIN 11\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_ACT\r\n-#define TCA_FLOWER_ACT 3\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_FLAGS\r\n-#define TCA_FLOWER_FLAGS 22\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_ETH_TYPE\r\n-#define TCA_FLOWER_KEY_ETH_TYPE 8\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_ETH_DST\r\n-#define TCA_FLOWER_KEY_ETH_DST 4\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_ETH_DST_MASK\r\n-#define TCA_FLOWER_KEY_ETH_DST_MASK 5\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_ETH_SRC\r\n-#define TCA_FLOWER_KEY_ETH_SRC 6\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_ETH_SRC_MASK\r\n-#define TCA_FLOWER_KEY_ETH_SRC_MASK 7\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_IP_PROTO\r\n-#define TCA_FLOWER_KEY_IP_PROTO 9\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_IPV4_SRC\r\n-#define TCA_FLOWER_KEY_IPV4_SRC 10\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_IPV4_SRC_MASK\r\n-#define TCA_FLOWER_KEY_IPV4_SRC_MASK 11\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_IPV4_DST\r\n-#define TCA_FLOWER_KEY_IPV4_DST 12\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_IPV4_DST_MASK\r\n-#define TCA_FLOWER_KEY_IPV4_DST_MASK 13\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_IPV6_SRC\r\n-#define TCA_FLOWER_KEY_IPV6_SRC 14\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_IPV6_SRC_MASK\r\n-#define TCA_FLOWER_KEY_IPV6_SRC_MASK 15\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_IPV6_DST\r\n-#define TCA_FLOWER_KEY_IPV6_DST 16\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_IPV6_DST_MASK\r\n-#define TCA_FLOWER_KEY_IPV6_DST_MASK 17\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_TCP_SRC\r\n-#define TCA_FLOWER_KEY_TCP_SRC 18\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_TCP_SRC_MASK\r\n-#define TCA_FLOWER_KEY_TCP_SRC_MASK 35\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_TCP_DST\r\n-#define TCA_FLOWER_KEY_TCP_DST 19\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_TCP_DST_MASK\r\n-#define TCA_FLOWER_KEY_TCP_DST_MASK 36\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_UDP_SRC\r\n-#define TCA_FLOWER_KEY_UDP_SRC 20\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_UDP_SRC_MASK\r\n-#define TCA_FLOWER_KEY_UDP_SRC_MASK 37\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_UDP_DST\r\n-#define TCA_FLOWER_KEY_UDP_DST 21\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_UDP_DST_MASK\r\n-#define TCA_FLOWER_KEY_UDP_DST_MASK 38\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_VLAN_ID\r\n-#define TCA_FLOWER_KEY_VLAN_ID 23\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_VLAN_PRIO\r\n-#define TCA_FLOWER_KEY_VLAN_PRIO 24\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_VLAN_ETH_TYPE\r\n-#define TCA_FLOWER_KEY_VLAN_ETH_TYPE 25\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_ENC_KEY_ID\r\n-#define TCA_FLOWER_KEY_ENC_KEY_ID 26\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV4_SRC\r\n-#define TCA_FLOWER_KEY_ENC_IPV4_SRC 27\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK\r\n-#define TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK 28\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV4_DST\r\n-#define TCA_FLOWER_KEY_ENC_IPV4_DST 29\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV4_DST_MASK\r\n-#define TCA_FLOWER_KEY_ENC_IPV4_DST_MASK 30\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV6_SRC\r\n-#define TCA_FLOWER_KEY_ENC_IPV6_SRC 31\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK\r\n-#define TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK 32\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV6_DST\r\n-#define TCA_FLOWER_KEY_ENC_IPV6_DST 33\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV6_DST_MASK\r\n-#define TCA_FLOWER_KEY_ENC_IPV6_DST_MASK 34\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_ENC_UDP_SRC_PORT\r\n-#define TCA_FLOWER_KEY_ENC_UDP_SRC_PORT 43\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK\r\n-#define TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK 44\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_ENC_UDP_DST_PORT\r\n-#define TCA_FLOWER_KEY_ENC_UDP_DST_PORT 45\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK\r\n-#define TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK 46\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_TCP_FLAGS\r\n-#define TCA_FLOWER_KEY_TCP_FLAGS 71\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_TCP_FLAGS_MASK\r\n-#define TCA_FLOWER_KEY_TCP_FLAGS_MASK 72\r\n-#endif\r\n-#ifndef\tHAVE_TCA_FLOWER_KEY_IP_TOS\r\n-#define\tTCA_FLOWER_KEY_IP_TOS 73\r\n-#endif\r\n-#ifndef\tHAVE_TCA_FLOWER_KEY_IP_TOS_MASK\r\n-#define TCA_FLOWER_KEY_IP_TOS_MASK 74\r\n-#endif\r\n-#ifndef\tHAVE_TCA_FLOWER_KEY_IP_TTL\r\n-#define TCA_FLOWER_KEY_IP_TTL 75\r\n-#endif\r\n-#ifndef HAVE_TCA_FLOWER_KEY_IP_TTL_MASK\r\n-#define TCA_FLOWER_KEY_IP_TTL_MASK 76\r\n-#endif\r\n-#ifndef\tHAVE_TCA_FLOWER_KEY_ENC_IP_TOS\r\n-#define TCA_FLOWER_KEY_ENC_IP_TOS 80\r\n-#endif\r\n-#ifndef\tHAVE_TCA_FLOWER_KEY_ENC_IP_TOS_MASK\r\n-#define TCA_FLOWER_KEY_ENC_IP_TOS_MASK 81\r\n-#endif\r\n-#ifndef\tHAVE_TCA_FLOWER_KEY_ENC_IP_TTL\r\n-#define\tTCA_FLOWER_KEY_ENC_IP_TTL 82\r\n-#endif\r\n-#ifndef\tHAVE_TCA_FLOWER_KEY_ENC_IP_TTL_MASK\r\n-#define TCA_FLOWER_KEY_ENC_IP_TTL_MASK 83\r\n-#endif\r\n-\r\n-#ifndef HAVE_TC_ACT_GOTO_CHAIN\r\n-#define TC_ACT_GOTO_CHAIN 0x20000000\r\n-#endif\r\n-\r\n-#ifndef IPV6_ADDR_LEN\r\n-#define IPV6_ADDR_LEN 16\r\n-#endif\r\n-\r\n-#ifndef IPV4_ADDR_LEN\r\n-#define IPV4_ADDR_LEN 4\r\n-#endif\r\n-\r\n-#ifndef TP_PORT_LEN\r\n-#define TP_PORT_LEN 2 /* Transport Port (UDP/TCP) Length */\r\n-#endif\r\n-\r\n-#ifndef TTL_LEN\r\n-#define TTL_LEN 1\r\n-#endif\r\n-\r\n-#ifndef TCA_ACT_MAX_PRIO\r\n-#define TCA_ACT_MAX_PRIO 32\r\n-#endif\r\n-\r\n-/** Parameters of VXLAN devices created by driver. */\r\n-#define MLX5_VXLAN_DEFAULT_VNI\t1\r\n-#define MLX5_VXLAN_DEVICE_PFX \"vmlx_\"\r\n-/**\r\n- * Timeout in milliseconds to wait VXLAN UDP offloaded port\r\n- * registration  completed within the mlx5 driver.\r\n- */\r\n-#define MLX5_VXLAN_WAIT_PORT_REG_MS 250\r\n-\r\n-/** Tunnel action type, used for @p type in header structure. */\r\n-enum flow_tcf_tunact_type {\r\n-\tFLOW_TCF_TUNACT_VXLAN_DECAP,\r\n-\tFLOW_TCF_TUNACT_VXLAN_ENCAP,\r\n-};\r\n-\r\n-/** Flags used for @p mask in tunnel action encap descriptors. */\r\n-#define FLOW_TCF_ENCAP_ETH_SRC (1u << 0)\r\n-#define FLOW_TCF_ENCAP_ETH_DST (1u << 1)\r\n-#define FLOW_TCF_ENCAP_IPV4_SRC (1u << 2)\r\n-#define FLOW_TCF_ENCAP_IPV4_DST (1u << 3)\r\n-#define FLOW_TCF_ENCAP_IPV6_SRC (1u << 4)\r\n-#define FLOW_TCF_ENCAP_IPV6_DST (1u << 5)\r\n-#define FLOW_TCF_ENCAP_UDP_SRC (1u << 6)\r\n-#define FLOW_TCF_ENCAP_UDP_DST (1u << 7)\r\n-#define FLOW_TCF_ENCAP_VXLAN_VNI (1u << 8)\r\n-#define FLOW_TCF_ENCAP_IP_TTL (1u << 9)\r\n-#define FLOW_TCF_ENCAP_IP_TOS (1u << 10)\r\n-\r\n-/**\r\n- * Structure for holding netlink context.\r\n- * Note the size of the message buffer which is MNL_SOCKET_BUFFER_SIZE.\r\n- * Using this (8KB) buffer size ensures that netlink messages will never be\r\n- * truncated.\r\n- */\r\n-struct mlx5_flow_tcf_context {\r\n-\tstruct mnl_socket *nl; /* NETLINK_ROUTE libmnl socket. */\r\n-\tuint32_t seq; /* Message sequence number. */\r\n-\tuint32_t buf_size; /* Message buffer size. */\r\n-\tuint8_t *buf; /* Message buffer. */\r\n-};\r\n-\r\n-/**\r\n- * Neigh rule structure. The neigh rule is applied via Netlink to\r\n- * outer tunnel iface in order to provide destination MAC address\r\n- * for the VXLAN encapsultion. The neigh rule is implicitly related\r\n- * to the Flow itself and can be shared by multiple Flows.\r\n- */\r\n-struct tcf_neigh_rule {\r\n-\tLIST_ENTRY(tcf_neigh_rule) next;\r\n-\tuint32_t refcnt;\r\n-\tstruct rte_ether_addr eth;\r\n-\tuint16_t mask;\r\n-\tunion {\r\n-\t\tstruct {\r\n-\t\t\trte_be32_t dst;\r\n-\t\t} ipv4;\r\n-\t\tstruct {\r\n-\t\t\tuint8_t dst[IPV6_ADDR_LEN];\r\n-\t\t} ipv6;\r\n-\t};\r\n-};\r\n-\r\n-/**\r\n- * Local rule structure. The local rule is applied via Netlink to\r\n- * outer tunnel iface in order to provide local and peer IP addresses\r\n- * of the VXLAN tunnel for encapsulation. The local rule is implicitly\r\n- * related to the Flow itself and can be shared by multiple Flows.\r\n- */\r\n-struct tcf_local_rule {\r\n-\tLIST_ENTRY(tcf_local_rule) next;\r\n-\tuint32_t refcnt;\r\n-\tuint16_t mask;\r\n-\tunion {\r\n-\t\tstruct {\r\n-\t\t\trte_be32_t dst;\r\n-\t\t\trte_be32_t src;\r\n-\t\t} ipv4;\r\n-\t\tstruct {\r\n-\t\t\tuint8_t dst[IPV6_ADDR_LEN];\r\n-\t\t\tuint8_t src[IPV6_ADDR_LEN];\r\n-\t\t} ipv6;\r\n-\t};\r\n-};\r\n-\r\n-/** Outer interface VXLAN encapsulation rules container. */\r\n-struct tcf_irule {\r\n-\tLIST_ENTRY(tcf_irule) next;\r\n-\tLIST_HEAD(, tcf_neigh_rule) neigh;\r\n-\tLIST_HEAD(, tcf_local_rule) local;\r\n-\tuint32_t refcnt;\r\n-\tunsigned int ifouter; /**< Own interface index. */\r\n-};\r\n-\r\n-/** VXLAN virtual netdev. */\r\n-struct tcf_vtep {\r\n-\tLIST_ENTRY(tcf_vtep) next;\r\n-\tuint32_t refcnt;\r\n-\tunsigned int ifindex; /**< Own interface index. */\r\n-\tuint16_t port;\r\n-\tuint32_t created:1; /**< Actually created by PMD. */\r\n-\tuint32_t waitreg:1; /**< Wait for VXLAN UDP port registration. */\r\n-};\r\n-\r\n-/** Tunnel descriptor header, common for all tunnel types. */\r\n-struct flow_tcf_tunnel_hdr {\r\n-\tuint32_t type; /**< Tunnel action type. */\r\n-\tstruct tcf_vtep *vtep; /**< Virtual tunnel endpoint device. */\r\n-\tunsigned int ifindex_org; /**< Original dst/src interface */\r\n-\tunsigned int *ifindex_ptr; /**< Interface ptr in message. */\r\n-};\r\n-\r\n-struct flow_tcf_vxlan_decap {\r\n-\tstruct flow_tcf_tunnel_hdr hdr;\r\n-\tuint16_t udp_port;\r\n-};\r\n-\r\n-struct flow_tcf_vxlan_encap {\r\n-\tstruct flow_tcf_tunnel_hdr hdr;\r\n-\tstruct tcf_irule *iface;\r\n-\tuint32_t mask;\r\n-\tuint8_t ip_tos;\r\n-\tuint8_t ip_ttl_hop;\r\n-\tstruct {\r\n-\t\tstruct rte_ether_addr dst;\r\n-\t\tstruct rte_ether_addr src;\r\n-\t} eth;\r\n-\tunion {\r\n-\t\tstruct {\r\n-\t\t\trte_be32_t dst;\r\n-\t\t\trte_be32_t src;\r\n-\t\t} ipv4;\r\n-\t\tstruct {\r\n-\t\t\tuint8_t dst[IPV6_ADDR_LEN];\r\n-\t\t\tuint8_t src[IPV6_ADDR_LEN];\r\n-\t\t} ipv6;\r\n-\t};\r\n-\tstruct {\r\n-\t\trte_be16_t src;\r\n-\t\trte_be16_t dst;\r\n-\t} udp;\r\n-\tstruct {\r\n-\t\tuint8_t vni[3];\r\n-\t} vxlan;\r\n-};\r\n-\r\n-/** Structure used when extracting the values of a flow counters\r\n- * from a netlink message.\r\n- */\r\n-struct flow_tcf_stats_basic {\r\n-\tbool valid;\r\n-\tstruct gnet_stats_basic counters;\r\n-};\r\n-\r\n-/** Empty masks for known item types. */\r\n-static const union {\r\n-\tstruct rte_flow_item_port_id port_id;\r\n-\tstruct rte_flow_item_eth eth;\r\n-\tstruct rte_flow_item_vlan vlan;\r\n-\tstruct rte_flow_item_ipv4 ipv4;\r\n-\tstruct rte_flow_item_ipv6 ipv6;\r\n-\tstruct rte_flow_item_tcp tcp;\r\n-\tstruct rte_flow_item_udp udp;\r\n-\tstruct rte_flow_item_vxlan vxlan;\r\n-} flow_tcf_mask_empty = {\r\n-\t{0},\r\n-};\r\n-\r\n-/** Supported masks for known item types. */\r\n-static const struct {\r\n-\tstruct rte_flow_item_port_id port_id;\r\n-\tstruct rte_flow_item_eth eth;\r\n-\tstruct rte_flow_item_vlan vlan;\r\n-\tstruct rte_flow_item_ipv4 ipv4;\r\n-\tstruct rte_flow_item_ipv6 ipv6;\r\n-\tstruct rte_flow_item_tcp tcp;\r\n-\tstruct rte_flow_item_udp udp;\r\n-\tstruct rte_flow_item_vxlan vxlan;\r\n-} flow_tcf_mask_supported = {\r\n-\t.port_id = {\r\n-\t\t.id = 0xffffffff,\r\n-\t},\r\n-\t.eth = {\r\n-\t\t.type = RTE_BE16(0xffff),\r\n-\t\t.dst.addr_bytes = \"\\xff\\xff\\xff\\xff\\xff\\xff\",\r\n-\t\t.src.addr_bytes = \"\\xff\\xff\\xff\\xff\\xff\\xff\",\r\n-\t},\r\n-\t.vlan = {\r\n-\t\t/* PCP and VID only, no DEI. */\r\n-\t\t.tci = RTE_BE16(0xefff),\r\n-\t\t.inner_type = RTE_BE16(0xffff),\r\n-\t},\r\n-\t.ipv4.hdr = {\r\n-\t\t.next_proto_id = 0xff,\r\n-\t\t.time_to_live = 0xff,\r\n-\t\t.type_of_service = 0xff,\r\n-\t\t.src_addr = RTE_BE32(0xffffffff),\r\n-\t\t.dst_addr = RTE_BE32(0xffffffff),\r\n-\t},\r\n-\t.ipv6.hdr = {\r\n-\t\t.proto = 0xff,\r\n-\t\t.vtc_flow = RTE_BE32(0xfful << RTE_IPV6_HDR_FL_SHIFT),\r\n-\t\t.hop_limits = 0xff,\r\n-\t\t.src_addr =\r\n-\t\t\t\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"\r\n-\t\t\t\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\",\r\n-\t\t.dst_addr =\r\n-\t\t\t\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"\r\n-\t\t\t\"\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\",\r\n-\t},\r\n-\t.tcp.hdr = {\r\n-\t\t.src_port = RTE_BE16(0xffff),\r\n-\t\t.dst_port = RTE_BE16(0xffff),\r\n-\t\t.tcp_flags = 0xff,\r\n-\t},\r\n-\t.udp.hdr = {\r\n-\t\t.src_port = RTE_BE16(0xffff),\r\n-\t\t.dst_port = RTE_BE16(0xffff),\r\n-\t},\r\n-\t.vxlan = {\r\n-\t       .vni = \"\\xff\\xff\\xff\",\r\n-\t},\r\n-};\r\n-\r\n-#define SZ_NLATTR_HDR MNL_ALIGN(sizeof(struct nlattr))\r\n-#define SZ_NLATTR_NEST SZ_NLATTR_HDR\r\n-#define SZ_NLATTR_DATA_OF(len) MNL_ALIGN(SZ_NLATTR_HDR + (len))\r\n-#define SZ_NLATTR_TYPE_OF(typ) SZ_NLATTR_DATA_OF(sizeof(typ))\r\n-#define SZ_NLATTR_STRZ_OF(str) SZ_NLATTR_DATA_OF(strlen(str) + 1)\r\n-\r\n-#define PTOI_TABLE_SZ_MAX(dev) (mlx5_dev_to_port_id((dev)->device, NULL, 0) + 2)\r\n-\r\n-/** DPDK port to network interface index (ifindex) conversion. */\r\n-struct flow_tcf_ptoi {\r\n-\tuint16_t port_id; /**< DPDK port ID. */\r\n-\tunsigned int ifindex; /**< Network interface index. */\r\n-};\r\n-\r\n-/* Due to a limitation on driver/FW. */\r\n-#define MLX5_TCF_GROUP_ID_MAX 3\r\n-\r\n-/*\r\n- * Due to a limitation on driver/FW, priority ranges from 1 to 16 in kernel.\r\n- * Priority in rte_flow attribute starts from 0 and is added by 1 in\r\n- * translation. This is subject to be changed to determine the max priority\r\n- * based on trial-and-error like Verbs driver once the restriction is lifted or\r\n- * the range is extended.\r\n- */\r\n-#define MLX5_TCF_GROUP_PRIORITY_MAX 15\r\n-\r\n-#define MLX5_TCF_FATE_ACTIONS \\\r\n-\t(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_PORT_ID | \\\r\n-\t MLX5_FLOW_ACTION_JUMP)\r\n-\r\n-#define MLX5_TCF_VLAN_ACTIONS \\\r\n-\t(MLX5_FLOW_ACTION_OF_POP_VLAN | MLX5_FLOW_ACTION_OF_PUSH_VLAN | \\\r\n-\t MLX5_FLOW_ACTION_OF_SET_VLAN_VID | MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)\r\n-\r\n-#define MLX5_TCF_VXLAN_ACTIONS \\\r\n-\t(MLX5_FLOW_ACTION_VXLAN_ENCAP | MLX5_FLOW_ACTION_VXLAN_DECAP)\r\n-\r\n-#define MLX5_TCF_PEDIT_ACTIONS \\\r\n-\t(MLX5_FLOW_ACTION_SET_IPV4_SRC | MLX5_FLOW_ACTION_SET_IPV4_DST | \\\r\n-\t MLX5_FLOW_ACTION_SET_IPV6_SRC | MLX5_FLOW_ACTION_SET_IPV6_DST | \\\r\n-\t MLX5_FLOW_ACTION_SET_TP_SRC | MLX5_FLOW_ACTION_SET_TP_DST | \\\r\n-\t MLX5_FLOW_ACTION_SET_TTL | MLX5_FLOW_ACTION_DEC_TTL | \\\r\n-\t MLX5_FLOW_ACTION_SET_MAC_SRC | MLX5_FLOW_ACTION_SET_MAC_DST)\r\n-\r\n-#define MLX5_TCF_CONFIG_ACTIONS \\\r\n-\t(MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_JUMP | \\\r\n-\t MLX5_FLOW_ACTION_OF_PUSH_VLAN | MLX5_FLOW_ACTION_OF_SET_VLAN_VID | \\\r\n-\t MLX5_FLOW_ACTION_OF_SET_VLAN_PCP | \\\r\n-\t (MLX5_TCF_PEDIT_ACTIONS & ~MLX5_FLOW_ACTION_DEC_TTL))\r\n-\r\n-#define MAX_PEDIT_KEYS 128\r\n-#define SZ_PEDIT_KEY_VAL 4\r\n-\r\n-#define NUM_OF_PEDIT_KEYS(sz) \\\r\n-\t(((sz) / SZ_PEDIT_KEY_VAL) + (((sz) % SZ_PEDIT_KEY_VAL) ? 1 : 0))\r\n-\r\n-struct pedit_key_ex {\r\n-\tenum pedit_header_type htype;\r\n-\tenum pedit_cmd cmd;\r\n-};\r\n-\r\n-struct pedit_parser {\r\n-\tstruct tc_pedit_sel sel;\r\n-\tstruct tc_pedit_key keys[MAX_PEDIT_KEYS];\r\n-\tstruct pedit_key_ex keys_ex[MAX_PEDIT_KEYS];\r\n-};\r\n-\r\n-/**\r\n- * Create space for using the implicitly created TC flow counter.\r\n- *\r\n- * @param[in] dev\r\n- *   Pointer to the Ethernet device structure.\r\n- *\r\n- * @return\r\n- *   A pointer to the counter data structure, NULL otherwise and\r\n- *   rte_errno is set.\r\n- */\r\n-static struct mlx5_flow_counter *\r\n-flow_tcf_counter_new(void)\r\n-{\r\n-\tstruct mlx5_flow_counter *cnt;\r\n-\r\n-\t/*\r\n-\t * eswitch counter cannot be shared and its id is unknown.\r\n-\t * currently returning all with id 0.\r\n-\t * in the future maybe better to switch to unique numbers.\r\n-\t */\r\n-\tstruct mlx5_flow_counter tmpl = {\r\n-\t\t.ref_cnt = 1,\r\n-\t};\r\n-\tcnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);\r\n-\tif (!cnt) {\r\n-\t\trte_errno = ENOMEM;\r\n-\t\treturn NULL;\r\n-\t}\r\n-\t*cnt = tmpl;\r\n-\t/* Implicit counter, do not add to list. */\r\n-\treturn cnt;\r\n-}\r\n-\r\n-/**\r\n- * Set pedit key of MAC address\r\n- *\r\n- * @param[in] actions\r\n- *   pointer to action specification\r\n- * @param[in,out] p_parser\r\n- *   pointer to pedit_parser\r\n- */\r\n-static void\r\n-flow_tcf_pedit_key_set_mac(const struct rte_flow_action *actions,\r\n-\t\t\t   struct pedit_parser *p_parser)\r\n-{\r\n-\tint idx = p_parser->sel.nkeys;\r\n-\tuint32_t off = actions->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?\r\n-\t\t\t\t\toffsetof(struct rte_ether_hdr, s_addr) :\r\n-\t\t\t\t\toffsetof(struct rte_ether_hdr, d_addr);\r\n-\tconst struct rte_flow_action_set_mac *conf =\r\n-\t\t(const struct rte_flow_action_set_mac *)actions->conf;\r\n-\r\n-\tp_parser->keys[idx].off = off;\r\n-\tp_parser->keys[idx].mask = ~UINT32_MAX;\r\n-\tp_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH;\r\n-\tp_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;\r\n-\tmemcpy(&p_parser->keys[idx].val,\r\n-\t\tconf->mac_addr, SZ_PEDIT_KEY_VAL);\r\n-\tidx++;\r\n-\tp_parser->keys[idx].off = off + SZ_PEDIT_KEY_VAL;\r\n-\tp_parser->keys[idx].mask = 0xFFFF0000;\r\n-\tp_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH;\r\n-\tp_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;\r\n-\tmemcpy(&p_parser->keys[idx].val,\r\n-\t\tconf->mac_addr + SZ_PEDIT_KEY_VAL,\r\n-\t\tRTE_ETHER_ADDR_LEN - SZ_PEDIT_KEY_VAL);\r\n-\tp_parser->sel.nkeys = (++idx);\r\n-}\r\n-\r\n-/**\r\n- * Set pedit key of decrease/set ttl\r\n- *\r\n- * @param[in] actions\r\n- *   pointer to action specification\r\n- * @param[in,out] p_parser\r\n- *   pointer to pedit_parser\r\n- * @param[in] item_flags\r\n- *   flags of all items presented\r\n- */\r\n-static void\r\n-flow_tcf_pedit_key_set_dec_ttl(const struct rte_flow_action *actions,\r\n-\t\t\t\tstruct pedit_parser *p_parser,\r\n-\t\t\t\tuint64_t item_flags)\r\n-{\r\n-\tint idx = p_parser->sel.nkeys;\r\n-\r\n-\tp_parser->keys[idx].mask = 0xFFFFFF00;\r\n-\tif (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) {\r\n-\t\tp_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_IP4;\r\n-\t\tp_parser->keys[idx].off =\r\n-\t\t\toffsetof(struct rte_ipv4_hdr, time_to_live);\r\n-\t}\r\n-\tif (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6) {\r\n-\t\tp_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6;\r\n-\t\tp_parser->keys[idx].off =\r\n-\t\t\toffsetof(struct rte_ipv6_hdr, hop_limits);\r\n-\t}\r\n-\tif (actions->type == RTE_FLOW_ACTION_TYPE_DEC_TTL) {\r\n-\t\tp_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_ADD;\r\n-\t\tp_parser->keys[idx].val = 0x000000FF;\r\n-\t} else {\r\n-\t\tp_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;\r\n-\t\tp_parser->keys[idx].val =\r\n-\t\t\t(__u32)((const struct rte_flow_action_set_ttl *)\r\n-\t\t\t actions->conf)->ttl_value;\r\n-\t}\r\n-\tp_parser->sel.nkeys = (++idx);\r\n-}\r\n-\r\n-/**\r\n- * Set pedit key of transport (TCP/UDP) port value\r\n- *\r\n- * @param[in] actions\r\n- *   pointer to action specification\r\n- * @param[in,out] p_parser\r\n- *   pointer to pedit_parser\r\n- * @param[in] item_flags\r\n- *   flags of all items presented\r\n- */\r\n-static void\r\n-flow_tcf_pedit_key_set_tp_port(const struct rte_flow_action *actions,\r\n-\t\t\t\tstruct pedit_parser *p_parser,\r\n-\t\t\t\tuint64_t item_flags)\r\n-{\r\n-\tint idx = p_parser->sel.nkeys;\r\n-\r\n-\tif (item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)\r\n-\t\tp_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_UDP;\r\n-\tif (item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP)\r\n-\t\tp_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_TCP;\r\n-\tp_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;\r\n-\t/* offset of src/dst port is same for TCP and UDP */\r\n-\tp_parser->keys[idx].off =\r\n-\t\tactions->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?\r\n-\t\toffsetof(struct rte_tcp_hdr, src_port) :\r\n-\t\toffsetof(struct rte_tcp_hdr, dst_port);\r\n-\tp_parser->keys[idx].mask = 0xFFFF0000;\r\n-\tp_parser->keys[idx].val =\r\n-\t\t(__u32)((const struct rte_flow_action_set_tp *)\r\n-\t\t\t\tactions->conf)->port;\r\n-\tp_parser->sel.nkeys = (++idx);\r\n-}\r\n-\r\n-/**\r\n- * Set pedit key of ipv6 address\r\n- *\r\n- * @param[in] actions\r\n- *   pointer to action specification\r\n- * @param[in,out] p_parser\r\n- *   pointer to pedit_parser\r\n- */\r\n-static void\r\n-flow_tcf_pedit_key_set_ipv6_addr(const struct rte_flow_action *actions,\r\n-\t\t\t\t struct pedit_parser *p_parser)\r\n-{\r\n-\tint idx = p_parser->sel.nkeys;\r\n-\tint keys = NUM_OF_PEDIT_KEYS(IPV6_ADDR_LEN);\r\n-\tint off_base =\r\n-\t\tactions->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?\r\n-\t\toffsetof(struct rte_ipv6_hdr, src_addr) :\r\n-\t\toffsetof(struct rte_ipv6_hdr, dst_addr);\r\n-\tconst struct rte_flow_action_set_ipv6 *conf =\r\n-\t\t(const struct rte_flow_action_set_ipv6 *)actions->conf;\r\n-\r\n-\tfor (int i = 0; i < keys; i++, idx++) {\r\n-\t\tp_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6;\r\n-\t\tp_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;\r\n-\t\tp_parser->keys[idx].off = off_base + i * SZ_PEDIT_KEY_VAL;\r\n-\t\tp_parser->keys[idx].mask = ~UINT32_MAX;\r\n-\t\tmemcpy(&p_parser->keys[idx].val,\r\n-\t\t\tconf->ipv6_addr + i *  SZ_PEDIT_KEY_VAL,\r\n-\t\t\tSZ_PEDIT_KEY_VAL);\r\n-\t}\r\n-\tp_parser->sel.nkeys += keys;\r\n-}\r\n-\r\n-/**\r\n- * Set pedit key of ipv4 address\r\n- *\r\n- * @param[in] actions\r\n- *   pointer to action specification\r\n- * @param[in,out] p_parser\r\n- *   pointer to pedit_parser\r\n- */\r\n-static void\r\n-flow_tcf_pedit_key_set_ipv4_addr(const struct rte_flow_action *actions,\r\n-\t\t\t\t struct pedit_parser *p_parser)\r\n-{\r\n-\tint idx = p_parser->sel.nkeys;\r\n-\r\n-\tp_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_IP4;\r\n-\tp_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;\r\n-\tp_parser->keys[idx].off =\r\n-\t\tactions->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?\r\n-\t\toffsetof(struct rte_ipv4_hdr, src_addr) :\r\n-\t\toffsetof(struct rte_ipv4_hdr, dst_addr);\r\n-\tp_parser->keys[idx].mask = ~UINT32_MAX;\r\n-\tp_parser->keys[idx].val =\r\n-\t\t((const struct rte_flow_action_set_ipv4 *)\r\n-\t\t actions->conf)->ipv4_addr;\r\n-\tp_parser->sel.nkeys = (++idx);\r\n-}\r\n-\r\n-/**\r\n- * Create the pedit's na attribute in netlink message\r\n- * on pre-allocate message buffer\r\n- *\r\n- * @param[in,out] nl\r\n- *   pointer to pre-allocated netlink message buffer\r\n- * @param[in,out] actions\r\n- *   pointer to pointer of actions specification.\r\n- * @param[in,out] action_flags\r\n- *   pointer to actions flags\r\n- * @param[in] item_flags\r\n- *   flags of all item presented\r\n- */\r\n-static void\r\n-flow_tcf_create_pedit_mnl_msg(struct nlmsghdr *nl,\r\n-\t\t\t      const struct rte_flow_action **actions,\r\n-\t\t\t      uint64_t item_flags)\r\n-{\r\n-\tstruct pedit_parser p_parser;\r\n-\tstruct nlattr *na_act_options;\r\n-\tstruct nlattr *na_pedit_keys;\r\n-\r\n-\tmemset(&p_parser, 0, sizeof(p_parser));\r\n-\tmnl_attr_put_strz(nl, TCA_ACT_KIND, \"pedit\");\r\n-\tna_act_options = mnl_attr_nest_start(nl, TCA_ACT_OPTIONS);\r\n-\t/* all modify header actions should be in one tc-pedit action */\r\n-\tfor (; (*actions)->type != RTE_FLOW_ACTION_TYPE_END; (*actions)++) {\r\n-\t\tswitch ((*actions)->type) {\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:\r\n-\t\t\tflow_tcf_pedit_key_set_ipv4_addr(*actions, &p_parser);\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:\r\n-\t\t\tflow_tcf_pedit_key_set_ipv6_addr(*actions, &p_parser);\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_TP_SRC:\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_TP_DST:\r\n-\t\t\tflow_tcf_pedit_key_set_tp_port(*actions,\r\n-\t\t\t\t\t\t\t&p_parser, item_flags);\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_TTL:\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_DEC_TTL:\r\n-\t\t\tflow_tcf_pedit_key_set_dec_ttl(*actions,\r\n-\t\t\t\t\t\t\t&p_parser, item_flags);\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_MAC_DST:\r\n-\t\t\tflow_tcf_pedit_key_set_mac(*actions, &p_parser);\r\n-\t\t\tbreak;\r\n-\t\tdefault:\r\n-\t\t\tgoto pedit_mnl_msg_done;\r\n-\t\t}\r\n-\t}\r\n-pedit_mnl_msg_done:\r\n-\tp_parser.sel.action = TC_ACT_PIPE;\r\n-\tmnl_attr_put(nl, TCA_PEDIT_PARMS_EX,\r\n-\t\t     sizeof(p_parser.sel) +\r\n-\t\t     p_parser.sel.nkeys * sizeof(struct tc_pedit_key),\r\n-\t\t     &p_parser);\r\n-\tna_pedit_keys =\r\n-\t\tmnl_attr_nest_start(nl, TCA_PEDIT_KEYS_EX | NLA_F_NESTED);\r\n-\tfor (int i = 0; i < p_parser.sel.nkeys; i++) {\r\n-\t\tstruct nlattr *na_pedit_key =\r\n-\t\t\tmnl_attr_nest_start(nl,\r\n-\t\t\t\t\t    TCA_PEDIT_KEY_EX | NLA_F_NESTED);\r\n-\t\tmnl_attr_put_u16(nl, TCA_PEDIT_KEY_EX_HTYPE,\r\n-\t\t\t\t p_parser.keys_ex[i].htype);\r\n-\t\tmnl_attr_put_u16(nl, TCA_PEDIT_KEY_EX_CMD,\r\n-\t\t\t\t p_parser.keys_ex[i].cmd);\r\n-\t\tmnl_attr_nest_end(nl, na_pedit_key);\r\n-\t}\r\n-\tmnl_attr_nest_end(nl, na_pedit_keys);\r\n-\tmnl_attr_nest_end(nl, na_act_options);\r\n-\t(*actions)--;\r\n-}\r\n-\r\n-/**\r\n- * Calculate max memory size of one TC-pedit actions.\r\n- * One TC-pedit action can contain set of keys each defining\r\n- * a rewrite element (rte_flow action)\r\n- *\r\n- * @param[in,out] actions\r\n- *   actions specification.\r\n- * @param[in,out] action_flags\r\n- *   actions flags\r\n- * @param[in,out] size\r\n- *   accumulated size\r\n- * @return\r\n- *   Max memory size of one TC-pedit action\r\n- */\r\n-static int\r\n-flow_tcf_get_pedit_actions_size(const struct rte_flow_action **actions,\r\n-\t\t\t\tuint64_t *action_flags)\r\n-{\r\n-\tint pedit_size = 0;\r\n-\tint keys = 0;\r\n-\tuint64_t flags = 0;\r\n-\r\n-\tpedit_size += SZ_NLATTR_NEST + /* na_act_index. */\r\n-\t\t      SZ_NLATTR_STRZ_OF(\"pedit\") +\r\n-\t\t      SZ_NLATTR_NEST; /* TCA_ACT_OPTIONS. */\r\n-\tfor (; (*actions)->type != RTE_FLOW_ACTION_TYPE_END; (*actions)++) {\r\n-\t\tswitch ((*actions)->type) {\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:\r\n-\t\t\tkeys += NUM_OF_PEDIT_KEYS(IPV4_ADDR_LEN);\r\n-\t\t\tflags |= MLX5_FLOW_ACTION_SET_IPV4_SRC;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:\r\n-\t\t\tkeys += NUM_OF_PEDIT_KEYS(IPV4_ADDR_LEN);\r\n-\t\t\tflags |= MLX5_FLOW_ACTION_SET_IPV4_DST;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:\r\n-\t\t\tkeys += NUM_OF_PEDIT_KEYS(IPV6_ADDR_LEN);\r\n-\t\t\tflags |= MLX5_FLOW_ACTION_SET_IPV6_SRC;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:\r\n-\t\t\tkeys += NUM_OF_PEDIT_KEYS(IPV6_ADDR_LEN);\r\n-\t\t\tflags |= MLX5_FLOW_ACTION_SET_IPV6_DST;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_TP_SRC:\r\n-\t\t\t/* TCP is as same as UDP */\r\n-\t\t\tkeys += NUM_OF_PEDIT_KEYS(TP_PORT_LEN);\r\n-\t\t\tflags |= MLX5_FLOW_ACTION_SET_TP_SRC;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_TP_DST:\r\n-\t\t\t/* TCP is as same as UDP */\r\n-\t\t\tkeys += NUM_OF_PEDIT_KEYS(TP_PORT_LEN);\r\n-\t\t\tflags |= MLX5_FLOW_ACTION_SET_TP_DST;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_TTL:\r\n-\t\t\tkeys += NUM_OF_PEDIT_KEYS(TTL_LEN);\r\n-\t\t\tflags |= MLX5_FLOW_ACTION_SET_TTL;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_DEC_TTL:\r\n-\t\t\tkeys += NUM_OF_PEDIT_KEYS(TTL_LEN);\r\n-\t\t\tflags |= MLX5_FLOW_ACTION_DEC_TTL;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:\r\n-\t\t\tkeys += NUM_OF_PEDIT_KEYS(RTE_ETHER_ADDR_LEN);\r\n-\t\t\tflags |= MLX5_FLOW_ACTION_SET_MAC_SRC;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_MAC_DST:\r\n-\t\t\tkeys += NUM_OF_PEDIT_KEYS(RTE_ETHER_ADDR_LEN);\r\n-\t\t\tflags |= MLX5_FLOW_ACTION_SET_MAC_DST;\r\n-\t\t\tbreak;\r\n-\t\tdefault:\r\n-\t\t\tgoto get_pedit_action_size_done;\r\n-\t\t}\r\n-\t}\r\n-get_pedit_action_size_done:\r\n-\t/* TCA_PEDIT_PARAMS_EX */\r\n-\tpedit_size +=\r\n-\t\tSZ_NLATTR_DATA_OF(sizeof(struct tc_pedit_sel) +\r\n-\t\t\t\t  keys * sizeof(struct tc_pedit_key));\r\n-\tpedit_size += SZ_NLATTR_NEST; /* TCA_PEDIT_KEYS */\r\n-\tpedit_size += keys *\r\n-\t\t      /* TCA_PEDIT_KEY_EX + HTYPE + CMD */\r\n-\t\t      (SZ_NLATTR_NEST + SZ_NLATTR_DATA_OF(2) +\r\n-\t\t       SZ_NLATTR_DATA_OF(2));\r\n-\t(*action_flags) |= flags;\r\n-\t(*actions)--;\r\n-\treturn pedit_size;\r\n-}\r\n-\r\n-/**\r\n- * Retrieve mask for pattern item.\r\n- *\r\n- * This function does basic sanity checks on a pattern item in order to\r\n- * return the most appropriate mask for it.\r\n- *\r\n- * @param[in] item\r\n- *   Item specification.\r\n- * @param[in] mask_default\r\n- *   Default mask for pattern item as specified by the flow API.\r\n- * @param[in] mask_supported\r\n- *   Mask fields supported by the implementation.\r\n- * @param[in] mask_empty\r\n- *   Empty mask to return when there is no specification.\r\n- * @param[out] error\r\n- *   Perform verbose error reporting if not NULL.\r\n- *\r\n- * @return\r\n- *   Either @p item->mask or one of the mask parameters on success, NULL\r\n- *   otherwise and rte_errno is set.\r\n- */\r\n-static const void *\r\n-flow_tcf_item_mask(const struct rte_flow_item *item, const void *mask_default,\r\n-\t\t   const void *mask_supported, const void *mask_empty,\r\n-\t\t   size_t mask_size, struct rte_flow_error *error)\r\n-{\r\n-\tconst uint8_t *mask;\r\n-\tsize_t i;\r\n-\r\n-\t/* item->last and item->mask cannot exist without item->spec. */\r\n-\tif (!item->spec && (item->mask || item->last)) {\r\n-\t\trte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM, item,\r\n-\t\t\t\t   \"\\\"mask\\\" or \\\"last\\\" field provided without\"\r\n-\t\t\t\t   \" a corresponding \\\"spec\\\"\");\r\n-\t\treturn NULL;\r\n-\t}\r\n-\t/* No spec, no mask, no problem. */\r\n-\tif (!item->spec)\r\n-\t\treturn mask_empty;\r\n-\tmask = item->mask ? item->mask : mask_default;\r\n-\tassert(mask);\r\n-\t/*\r\n-\t * Single-pass check to make sure that:\r\n-\t * - Mask is supported, no bits are set outside mask_supported.\r\n-\t * - Both item->spec and item->last are included in mask.\r\n-\t */\r\n-\tfor (i = 0; i != mask_size; ++i) {\r\n-\t\tif (!mask[i])\r\n-\t\t\tcontinue;\r\n-\t\tif ((mask[i] | ((const uint8_t *)mask_supported)[i]) !=\r\n-\t\t    ((const uint8_t *)mask_supported)[i]) {\r\n-\t\t\trte_flow_error_set(error, ENOTSUP,\r\n-\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,\r\n-\t\t\t\t\t   \"unsupported field found\"\r\n-\t\t\t\t\t   \" in \\\"mask\\\"\");\r\n-\t\t\treturn NULL;\r\n-\t\t}\r\n-\t\tif (item->last &&\r\n-\t\t    (((const uint8_t *)item->spec)[i] & mask[i]) !=\r\n-\t\t    (((const uint8_t *)item->last)[i] & mask[i])) {\r\n-\t\t\trte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM_LAST,\r\n-\t\t\t\t\t   item->last,\r\n-\t\t\t\t\t   \"range between \\\"spec\\\" and \\\"last\\\"\"\r\n-\t\t\t\t\t   \" not comprised in \\\"mask\\\"\");\r\n-\t\t\treturn NULL;\r\n-\t\t}\r\n-\t}\r\n-\treturn mask;\r\n-}\r\n-\r\n-/**\r\n- * Build a conversion table between port ID and ifindex.\r\n- *\r\n- * @param[in] dev\r\n- *   Pointer to Ethernet device.\r\n- * @param[out] ptoi\r\n- *   Pointer to ptoi table.\r\n- * @param[in] len\r\n- *   Size of ptoi table provided.\r\n- *\r\n- * @return\r\n- *   Size of ptoi table filled.\r\n- */\r\n-static unsigned int\r\n-flow_tcf_build_ptoi_table(struct rte_eth_dev *dev, struct flow_tcf_ptoi *ptoi,\r\n-\t\t\t  unsigned int len)\r\n-{\r\n-\tunsigned int n = mlx5_dev_to_port_id(dev->device, NULL, 0);\r\n-\tuint16_t port_id[n + 1];\r\n-\tunsigned int i;\r\n-\tunsigned int own = 0;\r\n-\r\n-\t/* At least one port is needed when no switch domain is present. */\r\n-\tif (!n) {\r\n-\t\tn = 1;\r\n-\t\tport_id[0] = dev->data->port_id;\r\n-\t} else {\r\n-\t\tn = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, n), n);\r\n-\t}\r\n-\tif (n > len)\r\n-\t\treturn 0;\r\n-\tfor (i = 0; i != n; ++i) {\r\n-\t\tstruct rte_eth_dev_info dev_info;\r\n-\r\n-\t\trte_eth_dev_info_get(port_id[i], &dev_info);\r\n-\t\tif (port_id[i] == dev->data->port_id)\r\n-\t\t\town = i;\r\n-\t\tptoi[i].port_id = port_id[i];\r\n-\t\tptoi[i].ifindex = dev_info.if_index;\r\n-\t}\r\n-\t/* Ensure first entry of ptoi[] is the current device. */\r\n-\tif (own) {\r\n-\t\tptoi[n] = ptoi[0];\r\n-\t\tptoi[0] = ptoi[own];\r\n-\t\tptoi[own] = ptoi[n];\r\n-\t}\r\n-\t/* An entry with zero ifindex terminates ptoi[]. */\r\n-\tptoi[n].port_id = 0;\r\n-\tptoi[n].ifindex = 0;\r\n-\treturn n;\r\n-}\r\n-\r\n-/**\r\n- * Verify the @p attr will be correctly understood by the E-switch.\r\n- *\r\n- * @param[in] attr\r\n- *   Pointer to flow attributes\r\n- * @param[out] error\r\n- *   Pointer to error structure.\r\n- *\r\n- * @return\r\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\r\n- */\r\n-static int\r\n-flow_tcf_validate_attributes(const struct rte_flow_attr *attr,\r\n-\t\t\t     struct rte_flow_error *error)\r\n-{\r\n-\t/*\r\n-\t * Supported attributes: groups, some priorities and ingress only.\r\n-\t * group is supported only if kernel supports chain. Don't care about\r\n-\t * transfer as it is the caller's problem.\r\n-\t */\r\n-\tif (attr->group > MLX5_TCF_GROUP_ID_MAX)\r\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,\r\n-\t\t\t\t\t  \"group ID larger than \"\r\n-\t\t\t\t\t  RTE_STR(MLX5_TCF_GROUP_ID_MAX)\r\n-\t\t\t\t\t  \" isn't supported\");\r\n-\telse if (attr->priority > MLX5_TCF_GROUP_PRIORITY_MAX)\r\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,\r\n-\t\t\t\t\t  attr,\r\n-\t\t\t\t\t  \"priority more than \"\r\n-\t\t\t\t\t  RTE_STR(MLX5_TCF_GROUP_PRIORITY_MAX)\r\n-\t\t\t\t\t  \" is not supported\");\r\n-\tif (!attr->ingress)\r\n-\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,\r\n-\t\t\t\t\t  attr, \"only ingress is supported\");\r\n-\tif (attr->egress)\r\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,\r\n-\t\t\t\t\t  attr, \"egress is not supported\");\r\n-\treturn 0;\r\n-}\r\n-\r\n-/**\r\n- * Validate VXLAN_ENCAP action RTE_FLOW_ITEM_TYPE_ETH item for E-Switch.\r\n- * The routine checks the L2 fields to be used in encapsulation header.\r\n- *\r\n- * @param[in] item\r\n- *   Pointer to the item structure.\r\n- * @param[out] error\r\n- *   Pointer to the error structure.\r\n- *\r\n- * @return\r\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\r\n- **/\r\n-static int\r\n-flow_tcf_validate_vxlan_encap_eth(const struct rte_flow_item *item,\r\n-\t\t\t\t  struct rte_flow_error *error)\r\n-{\r\n-\tconst struct rte_flow_item_eth *spec = item->spec;\r\n-\tconst struct rte_flow_item_eth *mask = item->mask;\r\n-\r\n-\tif (!spec) {\r\n-\t\t/*\r\n-\t\t * Specification for L2 addresses can be empty\r\n-\t\t * because these ones are optional and not\r\n-\t\t * required directly by tc rule. Kernel tries\r\n-\t\t * to resolve these ones on its own\r\n-\t\t */\r\n-\t\treturn 0;\r\n-\t}\r\n-\tif (!mask) {\r\n-\t\t/* If mask is not specified use the default one. */\r\n-\t\tmask = &rte_flow_item_eth_mask;\r\n-\t}\r\n-\tif (memcmp(&mask->dst,\r\n-\t\t   &flow_tcf_mask_empty.eth.dst,\r\n-\t\t   sizeof(flow_tcf_mask_empty.eth.dst))) {\r\n-\t\tif (memcmp(&mask->dst,\r\n-\t\t\t   &rte_flow_item_eth_mask.dst,\r\n-\t\t\t   sizeof(rte_flow_item_eth_mask.dst)))\r\n-\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t(error, ENOTSUP,\r\n-\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,\r\n-\t\t\t\t \"no support for partial mask on\"\r\n-\t\t\t\t \" \\\"eth.dst\\\" field\");\r\n-\t}\r\n-\tif (memcmp(&mask->src,\r\n-\t\t   &flow_tcf_mask_empty.eth.src,\r\n-\t\t   sizeof(flow_tcf_mask_empty.eth.src))) {\r\n-\t\tif (memcmp(&mask->src,\r\n-\t\t\t   &rte_flow_item_eth_mask.src,\r\n-\t\t\t   sizeof(rte_flow_item_eth_mask.src)))\r\n-\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t(error, ENOTSUP,\r\n-\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,\r\n-\t\t\t\t \"no support for partial mask on\"\r\n-\t\t\t\t \" \\\"eth.src\\\" field\");\r\n-\t}\r\n-\tif (mask->type != RTE_BE16(0x0000)) {\r\n-\t\tif (mask->type != RTE_BE16(0xffff))\r\n-\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t(error, ENOTSUP,\r\n-\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,\r\n-\t\t\t\t \"no support for partial mask on\"\r\n-\t\t\t\t \" \\\"eth.type\\\" field\");\r\n-\t\tDRV_LOG(WARNING,\r\n-\t\t\t\"outer ethernet type field\"\r\n-\t\t\t\" cannot be forced for vxlan\"\r\n-\t\t\t\" encapsulation, parameter ignored\");\r\n-\t}\r\n-\treturn 0;\r\n-}\r\n-\r\n-/**\r\n- * Validate VXLAN_ENCAP action RTE_FLOW_ITEM_TYPE_IPV4 item for E-Switch.\r\n- * The routine checks the IPv4 fields to be used in encapsulation header.\r\n- *\r\n- * @param[in] item\r\n- *   Pointer to the item structure.\r\n- * @param[out] error\r\n- *   Pointer to the error structure.\r\n- *\r\n- * @return\r\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\r\n- **/\r\n-static int\r\n-flow_tcf_validate_vxlan_encap_ipv4(const struct rte_flow_item *item,\r\n-\t\t\t\t   struct rte_flow_error *error)\r\n-{\r\n-\tconst struct rte_flow_item_ipv4 *spec = item->spec;\r\n-\tconst struct rte_flow_item_ipv4 *mask = item->mask;\r\n-\r\n-\tif (!spec) {\r\n-\t\t/*\r\n-\t\t * Specification for IP addresses cannot be empty\r\n-\t\t * because it is required by tunnel_key parameter.\r\n-\t\t */\r\n-\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\r\n-\t\t\t\t\t  \"NULL outer ipv4 address\"\r\n-\t\t\t\t\t  \" specification for vxlan\"\r\n-\t\t\t\t\t  \" encapsulation\");\r\n-\t}\r\n-\tif (!mask)\r\n-\t\tmask = &rte_flow_item_ipv4_mask;\r\n-\tif (mask->hdr.dst_addr != RTE_BE32(0x00000000)) {\r\n-\t\tif (mask->hdr.dst_addr != RTE_BE32(0xffffffff))\r\n-\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t(error, ENOTSUP,\r\n-\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,\r\n-\t\t\t\t \"no support for partial mask on\"\r\n-\t\t\t\t \" \\\"ipv4.hdr.dst_addr\\\" field\"\r\n-\t\t\t\t \" for vxlan encapsulation\");\r\n-\t\t/* More IPv4 address validations can be put here. */\r\n-\t} else {\r\n-\t\t/*\r\n-\t\t * Kernel uses the destination IP address to determine\r\n-\t\t * the routing path and obtain the MAC destination\r\n-\t\t * address, so IP destination address must be\r\n-\t\t * specified in the tc rule.\r\n-\t\t */\r\n-\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\r\n-\t\t\t\t\t  \"outer ipv4 destination address\"\r\n-\t\t\t\t\t  \" must be specified for\"\r\n-\t\t\t\t\t  \" vxlan encapsulation\");\r\n-\t}\r\n-\tif (mask->hdr.src_addr != RTE_BE32(0x00000000)) {\r\n-\t\tif (mask->hdr.src_addr != RTE_BE32(0xffffffff))\r\n-\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t(error, ENOTSUP,\r\n-\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,\r\n-\t\t\t\t \"no support for partial mask on\"\r\n-\t\t\t\t \" \\\"ipv4.hdr.src_addr\\\" field\"\r\n-\t\t\t\t \" for vxlan encapsulation\");\r\n-\t\t/* More IPv4 address validations can be put here. */\r\n-\t} else {\r\n-\t\t/*\r\n-\t\t * Kernel uses the source IP address to select the\r\n-\t\t * interface for egress encapsulated traffic, so\r\n-\t\t * it must be specified in the tc rule.\r\n-\t\t */\r\n-\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\r\n-\t\t\t\t\t  \"outer ipv4 source address\"\r\n-\t\t\t\t\t  \" must be specified for\"\r\n-\t\t\t\t\t  \" vxlan encapsulation\");\r\n-\t}\r\n-\tif (mask->hdr.type_of_service &&\r\n-\t    mask->hdr.type_of_service != 0xff)\r\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,\r\n-\t\t\t\t\t  \"no support for partial mask on\"\r\n-\t\t\t\t\t  \" \\\"ipv4.hdr.type_of_service\\\" field\"\r\n-\t\t\t\t\t  \" for vxlan encapsulation\");\r\n-\tif (mask->hdr.time_to_live &&\r\n-\t    mask->hdr.time_to_live != 0xff)\r\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,\r\n-\t\t\t\t\t  \"no support for partial mask on\"\r\n-\t\t\t\t\t  \" \\\"ipv4.hdr.time_to_live\\\" field\"\r\n-\t\t\t\t\t  \" for vxlan encapsulation\");\r\n-\treturn 0;\r\n-}\r\n-\r\n-/**\r\n- * Validate VXLAN_ENCAP action RTE_FLOW_ITEM_TYPE_IPV6 item for E-Switch.\r\n- * The routine checks the IPv6 fields to be used in encapsulation header.\r\n- *\r\n- * @param[in] item\r\n- *   Pointer to the item structure.\r\n- * @param[out] error\r\n- *   Pointer to the error structure.\r\n- *\r\n- * @return\r\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\r\n- **/\r\n-static int\r\n-flow_tcf_validate_vxlan_encap_ipv6(const struct rte_flow_item *item,\r\n-\t\t\t\t   struct rte_flow_error *error)\r\n-{\r\n-\tconst struct rte_flow_item_ipv6 *spec = item->spec;\r\n-\tconst struct rte_flow_item_ipv6 *mask = item->mask;\r\n-\tuint8_t msk6;\r\n-\r\n-\tif (!spec) {\r\n-\t\t/*\r\n-\t\t * Specification for IP addresses cannot be empty\r\n-\t\t * because it is required by tunnel_key parameter.\r\n-\t\t */\r\n-\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\r\n-\t\t\t\t\t  \"NULL outer ipv6 address\"\r\n-\t\t\t\t\t  \" specification for\"\r\n-\t\t\t\t\t  \" vxlan encapsulation\");\r\n-\t}\r\n-\tif (!mask)\r\n-\t\tmask = &rte_flow_item_ipv6_mask;\r\n-\tif (memcmp(&mask->hdr.dst_addr,\r\n-\t\t   &flow_tcf_mask_empty.ipv6.hdr.dst_addr,\r\n-\t\t   IPV6_ADDR_LEN)) {\r\n-\t\tif (memcmp(&mask->hdr.dst_addr,\r\n-\t\t\t   &rte_flow_item_ipv6_mask.hdr.dst_addr,\r\n-\t\t\t   IPV6_ADDR_LEN))\r\n-\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, ENOTSUP,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,\r\n-\t\t\t\t\t \"no support for partial mask on\"\r\n-\t\t\t\t\t \" \\\"ipv6.hdr.dst_addr\\\" field\"\r\n-\t\t\t\t\t \" for vxlan encapsulation\");\r\n-\t\t/* More IPv6 address validations can be put here. */\r\n-\t} else {\r\n-\t\t/*\r\n-\t\t * Kernel uses the destination IP address to determine\r\n-\t\t * the routing path and obtain the MAC destination\r\n-\t\t * address (heigh or gate), so IP destination address\r\n-\t\t * must be specified within the tc rule.\r\n-\t\t */\r\n-\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\r\n-\t\t\t\t\t  \"outer ipv6 destination address\"\r\n-\t\t\t\t\t  \" must be specified for\"\r\n-\t\t\t\t\t  \" vxlan encapsulation\");\r\n-\t}\r\n-\tif (memcmp(&mask->hdr.src_addr,\r\n-\t\t   &flow_tcf_mask_empty.ipv6.hdr.src_addr,\r\n-\t\t   IPV6_ADDR_LEN)) {\r\n-\t\tif (memcmp(&mask->hdr.src_addr,\r\n-\t\t\t   &rte_flow_item_ipv6_mask.hdr.src_addr,\r\n-\t\t\t   IPV6_ADDR_LEN))\r\n-\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, ENOTSUP,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,\r\n-\t\t\t\t\t \"no support for partial mask on\"\r\n-\t\t\t\t\t \" \\\"ipv6.hdr.src_addr\\\" field\"\r\n-\t\t\t\t\t \" for vxlan encapsulation\");\r\n-\t\t/* More L3 address validation can be put here. */\r\n-\t} else {\r\n-\t\t/*\r\n-\t\t * Kernel uses the source IP address to select the\r\n-\t\t * interface for egress encapsulated traffic, so\r\n-\t\t * it must be specified in the tc rule.\r\n-\t\t */\r\n-\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\r\n-\t\t\t\t\t  \"outer L3 source address\"\r\n-\t\t\t\t\t  \" must be specified for\"\r\n-\t\t\t\t\t  \" vxlan encapsulation\");\r\n-\t}\r\n-\tmsk6 = (rte_be_to_cpu_32(mask->hdr.vtc_flow) >>\r\n-\t\tRTE_IPV6_HDR_TC_SHIFT) & 0xff;\r\n-\tif (msk6 && msk6 != 0xff)\r\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,\r\n-\t\t\t\t\t  \"no support for partial mask on\"\r\n-\t\t\t\t\t  \" \\\"ipv6.hdr.vtc_flow.tos\\\" field\"\r\n-\t\t\t\t\t  \" for vxlan encapsulation\");\r\n-\tif (mask->hdr.hop_limits && mask->hdr.hop_limits != 0xff)\r\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,\r\n-\t\t\t\t\t  \"no support for partial mask on\"\r\n-\t\t\t\t\t  \" \\\"ipv6.hdr.hop_limits\\\" field\"\r\n-\t\t\t\t\t  \" for vxlan encapsulation\");\r\n-\treturn 0;\r\n-}\r\n-\r\n-/**\r\n- * Validate VXLAN_ENCAP action RTE_FLOW_ITEM_TYPE_UDP item for E-Switch.\r\n- * The routine checks the UDP fields to be used in encapsulation header.\r\n- *\r\n- * @param[in] item\r\n- *   Pointer to the item structure.\r\n- * @param[out] error\r\n- *   Pointer to the error structure.\r\n- *\r\n- * @return\r\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\r\n- **/\r\n-static int\r\n-flow_tcf_validate_vxlan_encap_udp(const struct rte_flow_item *item,\r\n-\t\t\t\t  struct rte_flow_error *error)\r\n-{\r\n-\tconst struct rte_flow_item_udp *spec = item->spec;\r\n-\tconst struct rte_flow_item_udp *mask = item->mask;\r\n-\r\n-\tif (!spec) {\r\n-\t\t/*\r\n-\t\t * Specification for UDP ports cannot be empty\r\n-\t\t * because it is required by tunnel_key parameter.\r\n-\t\t */\r\n-\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\r\n-\t\t\t\t\t  \"NULL UDP port specification \"\r\n-\t\t\t\t\t  \" for vxlan encapsulation\");\r\n-\t}\r\n-\tif (!mask)\r\n-\t\tmask = &rte_flow_item_udp_mask;\r\n-\tif (mask->hdr.dst_port != RTE_BE16(0x0000)) {\r\n-\t\tif (mask->hdr.dst_port != RTE_BE16(0xffff))\r\n-\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, ENOTSUP,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,\r\n-\t\t\t\t\t \"no support for partial mask on\"\r\n-\t\t\t\t\t \" \\\"udp.hdr.dst_port\\\" field\"\r\n-\t\t\t\t\t \" for vxlan encapsulation\");\r\n-\t\tif (!spec->hdr.dst_port)\r\n-\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, EINVAL,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM, item,\r\n-\t\t\t\t\t \"outer UDP remote port cannot be\"\r\n-\t\t\t\t\t \" 0 for vxlan encapsulation\");\r\n-\t} else {\r\n-\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\r\n-\t\t\t\t\t  \"outer UDP remote port\"\r\n-\t\t\t\t\t  \" must be specified for\"\r\n-\t\t\t\t\t  \" vxlan encapsulation\");\r\n-\t}\r\n-\tif (mask->hdr.src_port != RTE_BE16(0x0000)) {\r\n-\t\tif (mask->hdr.src_port != RTE_BE16(0xffff))\r\n-\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, ENOTSUP,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,\r\n-\t\t\t\t\t \"no support for partial mask on\"\r\n-\t\t\t\t\t \" \\\"udp.hdr.src_port\\\" field\"\r\n-\t\t\t\t\t \" for vxlan encapsulation\");\r\n-\t\tDRV_LOG(WARNING,\r\n-\t\t\t\"outer UDP source port cannot be\"\r\n-\t\t\t\" forced for vxlan encapsulation,\"\r\n-\t\t\t\" parameter ignored\");\r\n-\t}\r\n-\treturn 0;\r\n-}\r\n-\r\n-/**\r\n- * Validate VXLAN_ENCAP action RTE_FLOW_ITEM_TYPE_VXLAN item for E-Switch.\r\n- * The routine checks the VNIP fields to be used in encapsulation header.\r\n- *\r\n- * @param[in] item\r\n- *   Pointer to the item structure.\r\n- * @param[out] error\r\n- *   Pointer to the error structure.\r\n- *\r\n- * @return\r\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\r\n- **/\r\n-static int\r\n-flow_tcf_validate_vxlan_encap_vni(const struct rte_flow_item *item,\r\n-\t\t\t\t  struct rte_flow_error *error)\r\n-{\r\n-\tconst struct rte_flow_item_vxlan *spec = item->spec;\r\n-\tconst struct rte_flow_item_vxlan *mask = item->mask;\r\n-\r\n-\tif (!spec) {\r\n-\t\t/* Outer VNI is required by tunnel_key parameter. */\r\n-\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\r\n-\t\t\t\t\t  \"NULL VNI specification\"\r\n-\t\t\t\t\t  \" for vxlan encapsulation\");\r\n-\t}\r\n-\tif (!mask)\r\n-\t\tmask = &rte_flow_item_vxlan_mask;\r\n-\tif (!mask->vni[0] && !mask->vni[1] && !mask->vni[2])\r\n-\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\r\n-\t\t\t\t\t  \"outer VNI must be specified \"\r\n-\t\t\t\t\t  \"for vxlan encapsulation\");\r\n-\tif (mask->vni[0] != 0xff ||\r\n-\t    mask->vni[1] != 0xff ||\r\n-\t    mask->vni[2] != 0xff)\r\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,\r\n-\t\t\t\t\t  \"no support for partial mask on\"\r\n-\t\t\t\t\t  \" \\\"vxlan.vni\\\" field\");\r\n-\r\n-\tif (!spec->vni[0] && !spec->vni[1] && !spec->vni[2])\r\n-\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\r\n-\t\t\t\t\t  \"vxlan vni cannot be 0\");\r\n-\treturn 0;\r\n-}\r\n-\r\n-/**\r\n- * Validate VXLAN_ENCAP action item list for E-Switch.\r\n- * The routine checks items to be used in encapsulation header.\r\n- *\r\n- * @param[in] action\r\n- *   Pointer to the VXLAN_ENCAP action structure.\r\n- * @param[out] error\r\n- *   Pointer to the error structure.\r\n- *\r\n- * @return\r\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\r\n- **/\r\n-static int\r\n-flow_tcf_validate_vxlan_encap(const struct rte_flow_action *action,\r\n-\t\t\t      struct rte_flow_error *error)\r\n-{\r\n-\tconst struct rte_flow_item *items;\r\n-\tint ret;\r\n-\tuint32_t item_flags = 0;\r\n-\r\n-\tif (!action->conf)\r\n-\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, action,\r\n-\t\t\t\t\t  \"Missing vxlan tunnel\"\r\n-\t\t\t\t\t  \" action configuration\");\r\n-\titems = ((const struct rte_flow_action_vxlan_encap *)\r\n-\t\t\t\t\taction->conf)->definition;\r\n-\tif (!items)\r\n-\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, action,\r\n-\t\t\t\t\t  \"Missing vxlan tunnel\"\r\n-\t\t\t\t\t  \" encapsulation parameters\");\r\n-\tfor (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {\r\n-\t\tswitch (items->type) {\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_VOID:\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_ETH:\r\n-\t\t\tret = mlx5_flow_validate_item_eth(items, item_flags,\r\n-\t\t\t\t\t\t\t  error);\r\n-\t\t\tif (ret < 0)\r\n-\t\t\t\treturn ret;\r\n-\t\t\tret = flow_tcf_validate_vxlan_encap_eth(items, error);\r\n-\t\t\tif (ret < 0)\r\n-\t\t\t\treturn ret;\r\n-\t\t\titem_flags |= MLX5_FLOW_LAYER_OUTER_L2;\r\n-\t\t\tbreak;\r\n-\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_IPV4:\r\n-\t\t\tret = mlx5_flow_validate_item_ipv4\r\n-\t\t\t\t\t(items, item_flags,\r\n-\t\t\t\t\t &flow_tcf_mask_supported.ipv4, error);\r\n-\t\t\tif (ret < 0)\r\n-\t\t\t\treturn ret;\r\n-\t\t\tret = flow_tcf_validate_vxlan_encap_ipv4(items, error);\r\n-\t\t\tif (ret < 0)\r\n-\t\t\t\treturn ret;\r\n-\t\t\titem_flags |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_IPV6:\r\n-\t\t\tret = mlx5_flow_validate_item_ipv6\r\n-\t\t\t\t\t(items, item_flags,\r\n-\t\t\t\t\t &flow_tcf_mask_supported.ipv6, error);\r\n-\t\t\tif (ret < 0)\r\n-\t\t\t\treturn ret;\r\n-\t\t\tret = flow_tcf_validate_vxlan_encap_ipv6(items, error);\r\n-\t\t\tif (ret < 0)\r\n-\t\t\t\treturn ret;\r\n-\t\t\titem_flags |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_UDP:\r\n-\t\t\tret = mlx5_flow_validate_item_udp(items, item_flags,\r\n-\t\t\t\t\t\t\t   0xFF, error);\r\n-\t\t\tif (ret < 0)\r\n-\t\t\t\treturn ret;\r\n-\t\t\tret = flow_tcf_validate_vxlan_encap_udp(items, error);\r\n-\t\t\tif (ret < 0)\r\n-\t\t\t\treturn ret;\r\n-\t\t\titem_flags |= MLX5_FLOW_LAYER_OUTER_L4_UDP;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_VXLAN:\r\n-\t\t\tret = mlx5_flow_validate_item_vxlan(items,\r\n-\t\t\t\t\t\t\t    item_flags, error);\r\n-\t\t\tif (ret < 0)\r\n-\t\t\t\treturn ret;\r\n-\t\t\tret = flow_tcf_validate_vxlan_encap_vni(items, error);\r\n-\t\t\tif (ret < 0)\r\n-\t\t\t\treturn ret;\r\n-\t\t\titem_flags |= MLX5_FLOW_LAYER_VXLAN;\r\n-\t\t\tbreak;\r\n-\t\tdefault:\r\n-\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, ENOTSUP,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM, items,\r\n-\t\t\t\t\t \"vxlan encap item not supported\");\r\n-\t\t}\r\n-\t}\r\n-\tif (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))\r\n-\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, action,\r\n-\t\t\t\t\t  \"no outer IP layer found\"\r\n-\t\t\t\t\t  \" for vxlan encapsulation\");\r\n-\tif (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))\r\n-\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, action,\r\n-\t\t\t\t\t  \"no outer UDP layer found\"\r\n-\t\t\t\t\t  \" for vxlan encapsulation\");\r\n-\tif (!(item_flags & MLX5_FLOW_LAYER_VXLAN))\r\n-\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, action,\r\n-\t\t\t\t\t  \"no VXLAN VNI found\"\r\n-\t\t\t\t\t  \" for vxlan encapsulation\");\r\n-\treturn 0;\r\n-}\r\n-\r\n-/**\r\n- * Validate outer RTE_FLOW_ITEM_TYPE_UDP item if tunnel item\r\n- * RTE_FLOW_ITEM_TYPE_VXLAN is present in item list.\r\n- *\r\n- * @param[in] udp\r\n- *   Outer UDP layer item (if any, NULL otherwise).\r\n- * @param[out] error\r\n- *   Pointer to the error structure.\r\n- *\r\n- * @return\r\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\r\n- **/\r\n-static int\r\n-flow_tcf_validate_vxlan_decap_udp(const struct rte_flow_item *udp,\r\n-\t\t\t\t  struct rte_flow_error *error)\r\n-{\r\n-\tconst struct rte_flow_item_udp *spec = udp->spec;\r\n-\tconst struct rte_flow_item_udp *mask = udp->mask;\r\n-\r\n-\tif (!spec)\r\n-\t\t/*\r\n-\t\t * Specification for UDP ports cannot be empty\r\n-\t\t * because it is required as decap parameter.\r\n-\t\t */\r\n-\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, udp,\r\n-\t\t\t\t\t  \"NULL UDP port specification\"\r\n-\t\t\t\t\t  \" for VXLAN decapsulation\");\r\n-\tif (!mask)\r\n-\t\tmask = &rte_flow_item_udp_mask;\r\n-\tif (mask->hdr.dst_port != RTE_BE16(0x0000)) {\r\n-\t\tif (mask->hdr.dst_port != RTE_BE16(0xffff))\r\n-\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, ENOTSUP,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,\r\n-\t\t\t\t\t \"no support for partial mask on\"\r\n-\t\t\t\t\t \" \\\"udp.hdr.dst_port\\\" field\");\r\n-\t\tif (!spec->hdr.dst_port)\r\n-\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, EINVAL,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM, udp,\r\n-\t\t\t\t\t \"zero decap local UDP port\");\r\n-\t} else {\r\n-\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, udp,\r\n-\t\t\t\t\t  \"outer UDP destination port must be \"\r\n-\t\t\t\t\t  \"specified for vxlan decapsulation\");\r\n-\t}\r\n-\tif (mask->hdr.src_port != RTE_BE16(0x0000)) {\r\n-\t\tif (mask->hdr.src_port != RTE_BE16(0xffff))\r\n-\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, ENOTSUP,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,\r\n-\t\t\t\t\t \"no support for partial mask on\"\r\n-\t\t\t\t\t \" \\\"udp.hdr.src_port\\\" field\");\r\n-\t\tDRV_LOG(WARNING,\r\n-\t\t\t\"outer UDP local port cannot be \"\r\n-\t\t\t\"forced for VXLAN encapsulation, \"\r\n-\t\t\t\"parameter ignored\");\r\n-\t}\r\n-\treturn 0;\r\n-}\r\n-\r\n-/**\r\n- * Validate flow for E-Switch.\r\n- *\r\n- * @param[in] priv\r\n- *   Pointer to the priv structure.\r\n- * @param[in] attr\r\n- *   Pointer to the flow attributes.\r\n- * @param[in] items\r\n- *   Pointer to the list of items.\r\n- * @param[in] actions\r\n- *   Pointer to the list of actions.\r\n- * @param[out] error\r\n- *   Pointer to the error structure.\r\n- *\r\n- * @return\r\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\r\n- */\r\n-static int\r\n-flow_tcf_validate(struct rte_eth_dev *dev,\r\n-\t\t  const struct rte_flow_attr *attr,\r\n-\t\t  const struct rte_flow_item items[],\r\n-\t\t  const struct rte_flow_action actions[],\r\n-\t\t  struct rte_flow_error *error)\r\n-{\r\n-\tunion {\r\n-\t\tconst struct rte_flow_item_port_id *port_id;\r\n-\t\tconst struct rte_flow_item_eth *eth;\r\n-\t\tconst struct rte_flow_item_vlan *vlan;\r\n-\t\tconst struct rte_flow_item_ipv4 *ipv4;\r\n-\t\tconst struct rte_flow_item_ipv6 *ipv6;\r\n-\t\tconst struct rte_flow_item_tcp *tcp;\r\n-\t\tconst struct rte_flow_item_udp *udp;\r\n-\t\tconst struct rte_flow_item_vxlan *vxlan;\r\n-\t} spec, mask;\r\n-\tunion {\r\n-\t\tconst struct rte_flow_action_port_id *port_id;\r\n-\t\tconst struct rte_flow_action_jump *jump;\r\n-\t\tconst struct rte_flow_action_of_push_vlan *of_push_vlan;\r\n-\t\tconst struct rte_flow_action_of_set_vlan_vid *\r\n-\t\t\tof_set_vlan_vid;\r\n-\t\tconst struct rte_flow_action_of_set_vlan_pcp *\r\n-\t\t\tof_set_vlan_pcp;\r\n-\t\tconst struct rte_flow_action_vxlan_encap *vxlan_encap;\r\n-\t\tconst struct rte_flow_action_set_ipv4 *set_ipv4;\r\n-\t\tconst struct rte_flow_action_set_ipv6 *set_ipv6;\r\n-\t} conf;\r\n-\tconst struct rte_flow_item *outer_udp = NULL;\r\n-\trte_be16_t inner_etype = RTE_BE16(ETH_P_ALL);\r\n-\trte_be16_t outer_etype = RTE_BE16(ETH_P_ALL);\r\n-\trte_be16_t vlan_etype = RTE_BE16(ETH_P_ALL);\r\n-\tuint64_t item_flags = 0;\r\n-\tuint64_t action_flags = 0;\r\n-\tuint8_t next_protocol = 0xff;\r\n-\tunsigned int tcm_ifindex = 0;\r\n-\tuint8_t pedit_validated = 0;\r\n-\tstruct flow_tcf_ptoi ptoi[PTOI_TABLE_SZ_MAX(dev)];\r\n-\tstruct rte_eth_dev *port_id_dev = NULL;\r\n-\tbool in_port_id_set;\r\n-\tint ret;\r\n-\r\n-\tclaim_nonzero(flow_tcf_build_ptoi_table(dev, ptoi,\r\n-\t\t\t\t\t\tPTOI_TABLE_SZ_MAX(dev)));\r\n-\tret = flow_tcf_validate_attributes(attr, error);\r\n-\tif (ret < 0)\r\n-\t\treturn ret;\r\n-\tfor (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {\r\n-\t\tunsigned int i;\r\n-\t\tuint64_t current_action_flag = 0;\r\n-\r\n-\t\tswitch (actions->type) {\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_VOID:\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_PORT_ID:\r\n-\t\t\tcurrent_action_flag = MLX5_FLOW_ACTION_PORT_ID;\r\n-\t\t\tif (!actions->conf)\r\n-\t\t\t\tbreak;\r\n-\t\t\tconf.port_id = actions->conf;\r\n-\t\t\tif (conf.port_id->original)\r\n-\t\t\t\ti = 0;\r\n-\t\t\telse\r\n-\t\t\t\tfor (i = 0; ptoi[i].ifindex; ++i)\r\n-\t\t\t\t\tif (ptoi[i].port_id == conf.port_id->id)\r\n-\t\t\t\t\t\tbreak;\r\n-\t\t\tif (!ptoi[i].ifindex)\r\n-\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, ENODEV,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ACTION_CONF,\r\n-\t\t\t\t\t conf.port_id,\r\n-\t\t\t\t\t \"missing data to convert port ID to\"\r\n-\t\t\t\t\t \" ifindex\");\r\n-\t\t\tport_id_dev = &rte_eth_devices[conf.port_id->id];\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_JUMP:\r\n-\t\t\tcurrent_action_flag = MLX5_FLOW_ACTION_JUMP;\r\n-\t\t\tif (!actions->conf)\r\n-\t\t\t\tbreak;\r\n-\t\t\tconf.jump = actions->conf;\r\n-\t\t\tif (attr->group >= conf.jump->group)\r\n-\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, ENOTSUP,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ACTION,\r\n-\t\t\t\t\t actions,\r\n-\t\t\t\t\t \"can jump only to a group forward\");\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_DROP:\r\n-\t\t\tcurrent_action_flag = MLX5_FLOW_ACTION_DROP;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_COUNT:\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:\r\n-\t\t\tcurrent_action_flag = MLX5_FLOW_ACTION_OF_POP_VLAN;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: {\r\n-\t\t\trte_be16_t ethertype;\r\n-\r\n-\t\t\tcurrent_action_flag = MLX5_FLOW_ACTION_OF_PUSH_VLAN;\r\n-\t\t\tif (!actions->conf)\r\n-\t\t\t\tbreak;\r\n-\t\t\tconf.of_push_vlan = actions->conf;\r\n-\t\t\tethertype = conf.of_push_vlan->ethertype;\r\n-\t\t\tif (ethertype != RTE_BE16(ETH_P_8021Q) &&\r\n-\t\t\t    ethertype != RTE_BE16(ETH_P_8021AD))\r\n-\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, EINVAL,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ACTION, actions,\r\n-\t\t\t\t\t \"vlan push TPID must be \"\r\n-\t\t\t\t\t \"802.1Q or 802.1AD\");\r\n-\t\t\tbreak;\r\n-\t\t}\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:\r\n-\t\t\tif (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))\r\n-\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, ENOTSUP,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ACTION, actions,\r\n-\t\t\t\t\t \"vlan modify is not supported,\"\r\n-\t\t\t\t\t \" set action must follow push action\");\r\n-\t\t\tcurrent_action_flag = MLX5_FLOW_ACTION_OF_SET_VLAN_VID;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:\r\n-\t\t\tif (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))\r\n-\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, ENOTSUP,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ACTION, actions,\r\n-\t\t\t\t\t \"vlan modify is not supported,\"\r\n-\t\t\t\t\t \" set action must follow push action\");\r\n-\t\t\tcurrent_action_flag = MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:\r\n-\t\t\tcurrent_action_flag = MLX5_FLOW_ACTION_VXLAN_DECAP;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:\r\n-\t\t\tret = flow_tcf_validate_vxlan_encap(actions, error);\r\n-\t\t\tif (ret < 0)\r\n-\t\t\t\treturn ret;\r\n-\t\t\tcurrent_action_flag = MLX5_FLOW_ACTION_VXLAN_ENCAP;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:\r\n-\t\t\tcurrent_action_flag = MLX5_FLOW_ACTION_SET_IPV4_SRC;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:\r\n-\t\t\tcurrent_action_flag = MLX5_FLOW_ACTION_SET_IPV4_DST;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:\r\n-\t\t\tcurrent_action_flag = MLX5_FLOW_ACTION_SET_IPV6_SRC;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:\r\n-\t\t\tcurrent_action_flag = MLX5_FLOW_ACTION_SET_IPV6_DST;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_TP_SRC:\r\n-\t\t\tcurrent_action_flag = MLX5_FLOW_ACTION_SET_TP_SRC;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_TP_DST:\r\n-\t\t\tcurrent_action_flag = MLX5_FLOW_ACTION_SET_TP_DST;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_TTL:\r\n-\t\t\tcurrent_action_flag = MLX5_FLOW_ACTION_SET_TTL;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_DEC_TTL:\r\n-\t\t\tcurrent_action_flag = MLX5_FLOW_ACTION_DEC_TTL;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:\r\n-\t\t\tcurrent_action_flag = MLX5_FLOW_ACTION_SET_MAC_SRC;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_MAC_DST:\r\n-\t\t\tcurrent_action_flag = MLX5_FLOW_ACTION_SET_MAC_DST;\r\n-\t\t\tbreak;\r\n-\t\tdefault:\r\n-\t\t\treturn rte_flow_error_set(error, ENOTSUP,\r\n-\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\r\n-\t\t\t\t\t\t  actions,\r\n-\t\t\t\t\t\t  \"action not supported\");\r\n-\t\t}\r\n-\t\tif (current_action_flag & MLX5_TCF_CONFIG_ACTIONS) {\r\n-\t\t\tif (!actions->conf)\r\n-\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, EINVAL,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ACTION_CONF,\r\n-\t\t\t\t\t actions,\r\n-\t\t\t\t\t \"action configuration not set\");\r\n-\t\t}\r\n-\t\tif ((current_action_flag & MLX5_TCF_PEDIT_ACTIONS) &&\r\n-\t\t    pedit_validated)\r\n-\t\t\treturn rte_flow_error_set(error, ENOTSUP,\r\n-\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\r\n-\t\t\t\t\t\t  actions,\r\n-\t\t\t\t\t\t  \"set actions should be \"\r\n-\t\t\t\t\t\t  \"listed successively\");\r\n-\t\tif ((current_action_flag & ~MLX5_TCF_PEDIT_ACTIONS) &&\r\n-\t\t    (action_flags & MLX5_TCF_PEDIT_ACTIONS))\r\n-\t\t\tpedit_validated = 1;\r\n-\t\tif ((current_action_flag & MLX5_TCF_FATE_ACTIONS) &&\r\n-\t\t    (action_flags & MLX5_TCF_FATE_ACTIONS))\r\n-\t\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\r\n-\t\t\t\t\t\t  actions,\r\n-\t\t\t\t\t\t  \"can't have multiple fate\"\r\n-\t\t\t\t\t\t  \" actions\");\r\n-\t\tif ((current_action_flag & MLX5_TCF_VXLAN_ACTIONS) &&\r\n-\t\t    (action_flags & MLX5_TCF_VXLAN_ACTIONS))\r\n-\t\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\r\n-\t\t\t\t\t\t  actions,\r\n-\t\t\t\t\t\t  \"can't have multiple vxlan\"\r\n-\t\t\t\t\t\t  \" actions\");\r\n-\t\tif ((current_action_flag & MLX5_TCF_VXLAN_ACTIONS) &&\r\n-\t\t    (action_flags & MLX5_TCF_VLAN_ACTIONS))\r\n-\t\t\treturn rte_flow_error_set(error, ENOTSUP,\r\n-\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\r\n-\t\t\t\t\t\t  actions,\r\n-\t\t\t\t\t\t  \"can't have vxlan and vlan\"\r\n-\t\t\t\t\t\t  \" actions in the same rule\");\r\n-\t\taction_flags |= current_action_flag;\r\n-\t}\r\n-\tfor (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {\r\n-\t\tunsigned int i;\r\n-\r\n-\t\tswitch (items->type) {\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_VOID:\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_PORT_ID:\r\n-\t\t\tif (item_flags & MLX5_FLOW_LAYER_TUNNEL)\r\n-\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, ENOTSUP,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM, items,\r\n-\t\t\t\t\t \"inner tunnel port id\"\r\n-\t\t\t\t\t \" item is not supported\");\r\n-\t\t\tmask.port_id = flow_tcf_item_mask\r\n-\t\t\t\t(items, &rte_flow_item_port_id_mask,\r\n-\t\t\t\t &flow_tcf_mask_supported.port_id,\r\n-\t\t\t\t &flow_tcf_mask_empty.port_id,\r\n-\t\t\t\t sizeof(flow_tcf_mask_supported.port_id),\r\n-\t\t\t\t error);\r\n-\t\t\tif (!mask.port_id)\r\n-\t\t\t\treturn -rte_errno;\r\n-\t\t\tif (mask.port_id == &flow_tcf_mask_empty.port_id) {\r\n-\t\t\t\tin_port_id_set = 1;\r\n-\t\t\t\tbreak;\r\n-\t\t\t}\r\n-\t\t\tspec.port_id = items->spec;\r\n-\t\t\tif (mask.port_id->id && mask.port_id->id != 0xffffffff)\r\n-\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, ENOTSUP,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM_MASK,\r\n-\t\t\t\t\t mask.port_id,\r\n-\t\t\t\t\t \"no support for partial mask on\"\r\n-\t\t\t\t\t \" \\\"id\\\" field\");\r\n-\t\t\tif (!mask.port_id->id)\r\n-\t\t\t\ti = 0;\r\n-\t\t\telse\r\n-\t\t\t\tfor (i = 0; ptoi[i].ifindex; ++i)\r\n-\t\t\t\t\tif (ptoi[i].port_id == spec.port_id->id)\r\n-\t\t\t\t\t\tbreak;\r\n-\t\t\tif (!ptoi[i].ifindex)\r\n-\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, ENODEV,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM_SPEC,\r\n-\t\t\t\t\t spec.port_id,\r\n-\t\t\t\t\t \"missing data to convert port ID to\"\r\n-\t\t\t\t\t \" ifindex\");\r\n-\t\t\tif (in_port_id_set && ptoi[i].ifindex != tcm_ifindex)\r\n-\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, ENOTSUP,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM_SPEC,\r\n-\t\t\t\t\t spec.port_id,\r\n-\t\t\t\t\t \"cannot match traffic for\"\r\n-\t\t\t\t\t \" several port IDs through\"\r\n-\t\t\t\t\t \" a single flow rule\");\r\n-\t\t\ttcm_ifindex = ptoi[i].ifindex;\r\n-\t\t\tin_port_id_set = 1;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_ETH:\r\n-\t\t\tret = mlx5_flow_validate_item_eth(items, item_flags,\r\n-\t\t\t\t\t\t\t  error);\r\n-\t\t\tif (ret < 0)\r\n-\t\t\t\treturn ret;\r\n-\t\t\titem_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?\r\n-\t\t\t\t      MLX5_FLOW_LAYER_INNER_L2 :\r\n-\t\t\t\t      MLX5_FLOW_LAYER_OUTER_L2;\r\n-\t\t\t/* TODO:\r\n-\t\t\t * Redundant check due to different supported mask.\r\n-\t\t\t * Same for the rest of items.\r\n-\t\t\t */\r\n-\t\t\tmask.eth = flow_tcf_item_mask\r\n-\t\t\t\t(items, &rte_flow_item_eth_mask,\r\n-\t\t\t\t &flow_tcf_mask_supported.eth,\r\n-\t\t\t\t &flow_tcf_mask_empty.eth,\r\n-\t\t\t\t sizeof(flow_tcf_mask_supported.eth),\r\n-\t\t\t\t error);\r\n-\t\t\tif (!mask.eth)\r\n-\t\t\t\treturn -rte_errno;\r\n-\t\t\tif (mask.eth->type && mask.eth->type !=\r\n-\t\t\t    RTE_BE16(0xffff))\r\n-\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, ENOTSUP,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM_MASK,\r\n-\t\t\t\t\t mask.eth,\r\n-\t\t\t\t\t \"no support for partial mask on\"\r\n-\t\t\t\t\t \" \\\"type\\\" field\");\r\n-\t\t\tassert(items->spec);\r\n-\t\t\tspec.eth = items->spec;\r\n-\t\t\tif (mask.eth->type &&\r\n-\t\t\t    (item_flags & MLX5_FLOW_LAYER_TUNNEL) &&\r\n-\t\t\t    inner_etype != RTE_BE16(ETH_P_ALL) &&\r\n-\t\t\t    inner_etype != spec.eth->type)\r\n-\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, EINVAL,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM,\r\n-\t\t\t\t\t items,\r\n-\t\t\t\t\t \"inner eth_type conflict\");\r\n-\t\t\tif (mask.eth->type &&\r\n-\t\t\t    !(item_flags & MLX5_FLOW_LAYER_TUNNEL) &&\r\n-\t\t\t    outer_etype != RTE_BE16(ETH_P_ALL) &&\r\n-\t\t\t    outer_etype != spec.eth->type)\r\n-\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, EINVAL,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM,\r\n-\t\t\t\t\t items,\r\n-\t\t\t\t\t \"outer eth_type conflict\");\r\n-\t\t\tif (mask.eth->type) {\r\n-\t\t\t\tif (item_flags & MLX5_FLOW_LAYER_TUNNEL)\r\n-\t\t\t\t\tinner_etype = spec.eth->type;\r\n-\t\t\t\telse\r\n-\t\t\t\t\touter_etype = spec.eth->type;\r\n-\t\t\t}\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_VLAN:\r\n-\t\t\tif (item_flags & MLX5_FLOW_LAYER_TUNNEL)\r\n-\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, ENOTSUP,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM, items,\r\n-\t\t\t\t\t \"inner tunnel VLAN\"\r\n-\t\t\t\t\t \" is not supported\");\r\n-\t\t\tret = mlx5_flow_validate_item_vlan(items, item_flags,\r\n-\t\t\t\t\t\t\t   error);\r\n-\t\t\tif (ret < 0)\r\n-\t\t\t\treturn ret;\r\n-\t\t\titem_flags |= MLX5_FLOW_LAYER_OUTER_VLAN;\r\n-\t\t\tmask.vlan = flow_tcf_item_mask\r\n-\t\t\t\t(items, &rte_flow_item_vlan_mask,\r\n-\t\t\t\t &flow_tcf_mask_supported.vlan,\r\n-\t\t\t\t &flow_tcf_mask_empty.vlan,\r\n-\t\t\t\t sizeof(flow_tcf_mask_supported.vlan),\r\n-\t\t\t\t error);\r\n-\t\t\tif (!mask.vlan)\r\n-\t\t\t\treturn -rte_errno;\r\n-\t\t\tif ((mask.vlan->tci & RTE_BE16(0xe000) &&\r\n-\t\t\t     (mask.vlan->tci & RTE_BE16(0xe000)) !=\r\n-\t\t\t      RTE_BE16(0xe000)) ||\r\n-\t\t\t    (mask.vlan->tci & RTE_BE16(0x0fff) &&\r\n-\t\t\t     (mask.vlan->tci & RTE_BE16(0x0fff)) !=\r\n-\t\t\t      RTE_BE16(0x0fff)) ||\r\n-\t\t\t    (mask.vlan->inner_type &&\r\n-\t\t\t     mask.vlan->inner_type != RTE_BE16(0xffff)))\r\n-\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, ENOTSUP,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM_MASK,\r\n-\t\t\t\t\t mask.vlan,\r\n-\t\t\t\t\t \"no support for partial masks on\"\r\n-\t\t\t\t\t \" \\\"tci\\\" (PCP and VID parts) and\"\r\n-\t\t\t\t\t \" \\\"inner_type\\\" fields\");\r\n-\t\t\tif (outer_etype != RTE_BE16(ETH_P_ALL) &&\r\n-\t\t\t    outer_etype != RTE_BE16(ETH_P_8021Q))\r\n-\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, EINVAL,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM,\r\n-\t\t\t\t\t items,\r\n-\t\t\t\t\t \"outer eth_type conflict,\"\r\n-\t\t\t\t\t \" must be 802.1Q\");\r\n-\t\t\touter_etype = RTE_BE16(ETH_P_8021Q);\r\n-\t\t\tassert(items->spec);\r\n-\t\t\tspec.vlan = items->spec;\r\n-\t\t\tif (mask.vlan->inner_type &&\r\n-\t\t\t    vlan_etype != RTE_BE16(ETH_P_ALL) &&\r\n-\t\t\t    vlan_etype != spec.vlan->inner_type)\r\n-\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, EINVAL,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM,\r\n-\t\t\t\t\t items,\r\n-\t\t\t\t\t \"vlan eth_type conflict\");\r\n-\t\t\tif (mask.vlan->inner_type)\r\n-\t\t\t\tvlan_etype = spec.vlan->inner_type;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_IPV4:\r\n-\t\t\tret = mlx5_flow_validate_item_ipv4\r\n-\t\t\t\t\t(items, item_flags,\r\n-\t\t\t\t\t &flow_tcf_mask_supported.ipv4, error);\r\n-\t\t\tif (ret < 0)\r\n-\t\t\t\treturn ret;\r\n-\t\t\titem_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?\r\n-\t\t\t\t      MLX5_FLOW_LAYER_INNER_L3_IPV4 :\r\n-\t\t\t\t      MLX5_FLOW_LAYER_OUTER_L3_IPV4;\r\n-\t\t\tmask.ipv4 = flow_tcf_item_mask\r\n-\t\t\t\t(items, &rte_flow_item_ipv4_mask,\r\n-\t\t\t\t &flow_tcf_mask_supported.ipv4,\r\n-\t\t\t\t &flow_tcf_mask_empty.ipv4,\r\n-\t\t\t\t sizeof(flow_tcf_mask_supported.ipv4),\r\n-\t\t\t\t error);\r\n-\t\t\tif (!mask.ipv4)\r\n-\t\t\t\treturn -rte_errno;\r\n-\t\t\tif (mask.ipv4->hdr.next_proto_id &&\r\n-\t\t\t    mask.ipv4->hdr.next_proto_id != 0xff)\r\n-\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, ENOTSUP,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM_MASK,\r\n-\t\t\t\t\t mask.ipv4,\r\n-\t\t\t\t\t \"no support for partial mask on\"\r\n-\t\t\t\t\t \" \\\"hdr.next_proto_id\\\" field\");\r\n-\t\t\telse if (mask.ipv4->hdr.next_proto_id)\r\n-\t\t\t\tnext_protocol =\r\n-\t\t\t\t\t((const struct rte_flow_item_ipv4 *)\r\n-\t\t\t\t\t (items->spec))->hdr.next_proto_id;\r\n-\t\t\tif (item_flags & MLX5_FLOW_LAYER_TUNNEL) {\r\n-\t\t\t\tif (inner_etype != RTE_BE16(ETH_P_ALL) &&\r\n-\t\t\t\t    inner_etype != RTE_BE16(ETH_P_IP))\r\n-\t\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t\t(error, EINVAL,\r\n-\t\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM,\r\n-\t\t\t\t\t\t items,\r\n-\t\t\t\t\t\t \"inner eth_type conflict,\"\r\n-\t\t\t\t\t\t \" IPv4 is required\");\r\n-\t\t\t\tinner_etype = RTE_BE16(ETH_P_IP);\r\n-\t\t\t} else if (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN) {\r\n-\t\t\t\tif (vlan_etype != RTE_BE16(ETH_P_ALL) &&\r\n-\t\t\t\t    vlan_etype != RTE_BE16(ETH_P_IP))\r\n-\t\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t\t(error, EINVAL,\r\n-\t\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM,\r\n-\t\t\t\t\t\t items,\r\n-\t\t\t\t\t\t \"vlan eth_type conflict,\"\r\n-\t\t\t\t\t\t \" IPv4 is required\");\r\n-\t\t\t\tvlan_etype = RTE_BE16(ETH_P_IP);\r\n-\t\t\t} else {\r\n-\t\t\t\tif (outer_etype != RTE_BE16(ETH_P_ALL) &&\r\n-\t\t\t\t    outer_etype != RTE_BE16(ETH_P_IP))\r\n-\t\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t\t(error, EINVAL,\r\n-\t\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM,\r\n-\t\t\t\t\t\t items,\r\n-\t\t\t\t\t\t \"eth_type conflict,\"\r\n-\t\t\t\t\t\t \" IPv4 is required\");\r\n-\t\t\t\touter_etype = RTE_BE16(ETH_P_IP);\r\n-\t\t\t}\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_IPV6:\r\n-\t\t\tret = mlx5_flow_validate_item_ipv6\r\n-\t\t\t\t\t(items, item_flags,\r\n-\t\t\t\t\t &flow_tcf_mask_supported.ipv6, error);\r\n-\t\t\tif (ret < 0)\r\n-\t\t\t\treturn ret;\r\n-\t\t\titem_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?\r\n-\t\t\t\t      MLX5_FLOW_LAYER_INNER_L3_IPV6 :\r\n-\t\t\t\t      MLX5_FLOW_LAYER_OUTER_L3_IPV6;\r\n-\t\t\tmask.ipv6 = flow_tcf_item_mask\r\n-\t\t\t\t(items, &rte_flow_item_ipv6_mask,\r\n-\t\t\t\t &flow_tcf_mask_supported.ipv6,\r\n-\t\t\t\t &flow_tcf_mask_empty.ipv6,\r\n-\t\t\t\t sizeof(flow_tcf_mask_supported.ipv6),\r\n-\t\t\t\t error);\r\n-\t\t\tif (!mask.ipv6)\r\n-\t\t\t\treturn -rte_errno;\r\n-\t\t\tif (mask.ipv6->hdr.proto &&\r\n-\t\t\t    mask.ipv6->hdr.proto != 0xff)\r\n-\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, ENOTSUP,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM_MASK,\r\n-\t\t\t\t\t mask.ipv6,\r\n-\t\t\t\t\t \"no support for partial mask on\"\r\n-\t\t\t\t\t \" \\\"hdr.proto\\\" field\");\r\n-\t\t\telse if (mask.ipv6->hdr.proto)\r\n-\t\t\t\tnext_protocol =\r\n-\t\t\t\t\t((const struct rte_flow_item_ipv6 *)\r\n-\t\t\t\t\t (items->spec))->hdr.proto;\r\n-\t\t\tif (item_flags & MLX5_FLOW_LAYER_TUNNEL) {\r\n-\t\t\t\tif (inner_etype != RTE_BE16(ETH_P_ALL) &&\r\n-\t\t\t\t    inner_etype != RTE_BE16(ETH_P_IPV6))\r\n-\t\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t\t(error, EINVAL,\r\n-\t\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM,\r\n-\t\t\t\t\t\t items,\r\n-\t\t\t\t\t\t \"inner eth_type conflict,\"\r\n-\t\t\t\t\t\t \" IPv6 is required\");\r\n-\t\t\t\tinner_etype = RTE_BE16(ETH_P_IPV6);\r\n-\t\t\t} else if (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN) {\r\n-\t\t\t\tif (vlan_etype != RTE_BE16(ETH_P_ALL) &&\r\n-\t\t\t\t    vlan_etype != RTE_BE16(ETH_P_IPV6))\r\n-\t\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t\t(error, EINVAL,\r\n-\t\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM,\r\n-\t\t\t\t\t\t items,\r\n-\t\t\t\t\t\t \"vlan eth_type conflict,\"\r\n-\t\t\t\t\t\t \" IPv6 is required\");\r\n-\t\t\t\tvlan_etype = RTE_BE16(ETH_P_IPV6);\r\n-\t\t\t} else {\r\n-\t\t\t\tif (outer_etype != RTE_BE16(ETH_P_ALL) &&\r\n-\t\t\t\t    outer_etype != RTE_BE16(ETH_P_IPV6))\r\n-\t\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t\t(error, EINVAL,\r\n-\t\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM,\r\n-\t\t\t\t\t\t items,\r\n-\t\t\t\t\t\t \"eth_type conflict,\"\r\n-\t\t\t\t\t\t \" IPv6 is required\");\r\n-\t\t\t\touter_etype = RTE_BE16(ETH_P_IPV6);\r\n-\t\t\t}\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_UDP:\r\n-\t\t\tret = mlx5_flow_validate_item_udp(items, item_flags,\r\n-\t\t\t\t\t\t\t  next_protocol, error);\r\n-\t\t\tif (ret < 0)\r\n-\t\t\t\treturn ret;\r\n-\t\t\titem_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?\r\n-\t\t\t\t      MLX5_FLOW_LAYER_INNER_L4_UDP :\r\n-\t\t\t\t      MLX5_FLOW_LAYER_OUTER_L4_UDP;\r\n-\t\t\tmask.udp = flow_tcf_item_mask\r\n-\t\t\t\t(items, &rte_flow_item_udp_mask,\r\n-\t\t\t\t &flow_tcf_mask_supported.udp,\r\n-\t\t\t\t &flow_tcf_mask_empty.udp,\r\n-\t\t\t\t sizeof(flow_tcf_mask_supported.udp),\r\n-\t\t\t\t error);\r\n-\t\t\tif (!mask.udp)\r\n-\t\t\t\treturn -rte_errno;\r\n-\t\t\t/*\r\n-\t\t\t * Save the presumed outer UDP item for extra check\r\n-\t\t\t * if the tunnel item will be found later in the list.\r\n-\t\t\t */\r\n-\t\t\tif (!(item_flags & MLX5_FLOW_LAYER_TUNNEL))\r\n-\t\t\t\touter_udp = items;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_TCP:\r\n-\t\t\tret = mlx5_flow_validate_item_tcp\r\n-\t\t\t\t\t     (items, item_flags,\r\n-\t\t\t\t\t      next_protocol,\r\n-\t\t\t\t\t      &flow_tcf_mask_supported.tcp,\r\n-\t\t\t\t\t      error);\r\n-\t\t\tif (ret < 0)\r\n-\t\t\t\treturn ret;\r\n-\t\t\titem_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?\r\n-\t\t\t\t      MLX5_FLOW_LAYER_INNER_L4_TCP :\r\n-\t\t\t\t      MLX5_FLOW_LAYER_OUTER_L4_TCP;\r\n-\t\t\tmask.tcp = flow_tcf_item_mask\r\n-\t\t\t\t(items, &rte_flow_item_tcp_mask,\r\n-\t\t\t\t &flow_tcf_mask_supported.tcp,\r\n-\t\t\t\t &flow_tcf_mask_empty.tcp,\r\n-\t\t\t\t sizeof(flow_tcf_mask_supported.tcp),\r\n-\t\t\t\t error);\r\n-\t\t\tif (!mask.tcp)\r\n-\t\t\t\treturn -rte_errno;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_VXLAN:\r\n-\t\t\tif (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)\r\n-\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, ENOTSUP,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM, items,\r\n-\t\t\t\t\t \"vxlan tunnel over vlan\"\r\n-\t\t\t\t\t \" is not supported\");\r\n-\t\t\tret = mlx5_flow_validate_item_vxlan(items,\r\n-\t\t\t\t\t\t\t    item_flags, error);\r\n-\t\t\tif (ret < 0)\r\n-\t\t\t\treturn ret;\r\n-\t\t\titem_flags |= MLX5_FLOW_LAYER_VXLAN;\r\n-\t\t\tmask.vxlan = flow_tcf_item_mask\r\n-\t\t\t\t(items, &rte_flow_item_vxlan_mask,\r\n-\t\t\t\t &flow_tcf_mask_supported.vxlan,\r\n-\t\t\t\t &flow_tcf_mask_empty.vxlan,\r\n-\t\t\t\t sizeof(flow_tcf_mask_supported.vxlan), error);\r\n-\t\t\tif (!mask.vxlan)\r\n-\t\t\t\treturn -rte_errno;\r\n-\t\t\tif (mask.vxlan->vni[0] != 0xff ||\r\n-\t\t\t    mask.vxlan->vni[1] != 0xff ||\r\n-\t\t\t    mask.vxlan->vni[2] != 0xff)\r\n-\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t(error, ENOTSUP,\r\n-\t\t\t\t\t RTE_FLOW_ERROR_TYPE_ITEM_MASK,\r\n-\t\t\t\t\t mask.vxlan,\r\n-\t\t\t\t\t \"no support for partial or \"\r\n-\t\t\t\t\t \"empty mask on \\\"vxlan.vni\\\" field\");\r\n-\t\t\t/*\r\n-\t\t\t * The VNI item assumes the VXLAN tunnel, it requires\r\n-\t\t\t * at least the outer destination UDP port must be\r\n-\t\t\t * specified without wildcards to allow kernel select\r\n-\t\t\t * the virtual VXLAN device by port. Also outer IPv4\r\n-\t\t\t * or IPv6 item must be specified (wilcards or even\r\n-\t\t\t * zero mask are allowed) to let driver know the tunnel\r\n-\t\t\t * IP version and process UDP traffic correctly.\r\n-\t\t\t */\r\n-\t\t\tif (!(item_flags &\r\n-\t\t\t     (MLX5_FLOW_LAYER_OUTER_L3_IPV4 |\r\n-\t\t\t      MLX5_FLOW_LAYER_OUTER_L3_IPV6)))\r\n-\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t\t (error, EINVAL,\r\n-\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\r\n-\t\t\t\t\t\t  NULL,\r\n-\t\t\t\t\t\t  \"no outer IP pattern found\"\r\n-\t\t\t\t\t\t  \" for vxlan tunnel\");\r\n-\t\t\tif (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))\r\n-\t\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t\t\t (error, EINVAL,\r\n-\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\r\n-\t\t\t\t\t\t  NULL,\r\n-\t\t\t\t\t\t  \"no outer UDP pattern found\"\r\n-\t\t\t\t\t\t  \" for vxlan tunnel\");\r\n-\t\t\t/*\r\n-\t\t\t * All items preceding the tunnel item become outer\r\n-\t\t\t * ones and we should do extra validation for them\r\n-\t\t\t * due to tc limitations for tunnel outer parameters.\r\n-\t\t\t * Currently only outer UDP item requres extra check,\r\n-\t\t\t * use the saved pointer instead of item list rescan.\r\n-\t\t\t */\r\n-\t\t\tassert(outer_udp);\r\n-\t\t\tret = flow_tcf_validate_vxlan_decap_udp\r\n-\t\t\t\t\t\t(outer_udp, error);\r\n-\t\t\tif (ret < 0)\r\n-\t\t\t\treturn ret;\r\n-\t\t\t/* Reset L4 protocol for inner parameters. */\r\n-\t\t\tnext_protocol = 0xff;\r\n-\t\t\tbreak;\r\n-\t\tdefault:\r\n-\t\t\treturn rte_flow_error_set(error, ENOTSUP,\r\n-\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\r\n-\t\t\t\t\t\t  items, \"item not supported\");\r\n-\t\t}\r\n-\t}\r\n-\tif ((action_flags & MLX5_TCF_PEDIT_ACTIONS) &&\r\n-\t    (action_flags & MLX5_FLOW_ACTION_DROP))\r\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\r\n-\t\t\t\t\t  actions,\r\n-\t\t\t\t\t  \"set action is not compatible with \"\r\n-\t\t\t\t\t  \"drop action\");\r\n-\tif ((action_flags & MLX5_TCF_PEDIT_ACTIONS) &&\r\n-\t    !(action_flags & MLX5_FLOW_ACTION_PORT_ID))\r\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\r\n-\t\t\t\t\t  actions,\r\n-\t\t\t\t\t  \"set action must be followed by \"\r\n-\t\t\t\t\t  \"port_id action\");\r\n-\tif (action_flags &\r\n-\t   (MLX5_FLOW_ACTION_SET_IPV4_SRC | MLX5_FLOW_ACTION_SET_IPV4_DST)) {\r\n-\t\tif (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4))\r\n-\t\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\r\n-\t\t\t\t\t\t  actions,\r\n-\t\t\t\t\t\t  \"no ipv4 item found in\"\r\n-\t\t\t\t\t\t  \" pattern\");\r\n-\t}\r\n-\tif (action_flags &\r\n-\t   (MLX5_FLOW_ACTION_SET_IPV6_SRC | MLX5_FLOW_ACTION_SET_IPV6_DST)) {\r\n-\t\tif (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6))\r\n-\t\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\r\n-\t\t\t\t\t\t  actions,\r\n-\t\t\t\t\t\t  \"no ipv6 item found in\"\r\n-\t\t\t\t\t\t  \" pattern\");\r\n-\t}\r\n-\tif (action_flags &\r\n-\t   (MLX5_FLOW_ACTION_SET_TP_SRC | MLX5_FLOW_ACTION_SET_TP_DST)) {\r\n-\t\tif (!(item_flags &\r\n-\t\t     (MLX5_FLOW_LAYER_OUTER_L4_UDP |\r\n-\t\t      MLX5_FLOW_LAYER_OUTER_L4_TCP)))\r\n-\t\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\r\n-\t\t\t\t\t\t  actions,\r\n-\t\t\t\t\t\t  \"no TCP/UDP item found in\"\r\n-\t\t\t\t\t\t  \" pattern\");\r\n-\t}\r\n-\t/*\r\n-\t * FW syndrome (0xA9C090):\r\n-\t *     set_flow_table_entry: push vlan action fte in fdb can ONLY be\r\n-\t *     forward to the uplink.\r\n-\t */\r\n-\tif ((action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&\r\n-\t    (action_flags & MLX5_FLOW_ACTION_PORT_ID) &&\r\n-\t    ((struct mlx5_priv *)port_id_dev->data->dev_private)->representor)\r\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, actions,\r\n-\t\t\t\t\t  \"vlan push can only be applied\"\r\n-\t\t\t\t\t  \" when forwarding to uplink port\");\r\n-\t/*\r\n-\t * FW syndrome (0x294609):\r\n-\t *     set_flow_table_entry: modify/pop/push actions in fdb flow table\r\n-\t *     are supported only while forwarding to vport.\r\n-\t */\r\n-\tif ((action_flags & MLX5_TCF_VLAN_ACTIONS) &&\r\n-\t    !(action_flags & MLX5_FLOW_ACTION_PORT_ID))\r\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, actions,\r\n-\t\t\t\t\t  \"vlan actions are supported\"\r\n-\t\t\t\t\t  \" only with port_id action\");\r\n-\tif ((action_flags & MLX5_TCF_VXLAN_ACTIONS) &&\r\n-\t    !(action_flags & MLX5_FLOW_ACTION_PORT_ID))\r\n-\t\treturn rte_flow_error_set(error, ENOTSUP,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, NULL,\r\n-\t\t\t\t\t  \"vxlan actions are supported\"\r\n-\t\t\t\t\t  \" only with port_id action\");\r\n-\tif (!(action_flags & MLX5_TCF_FATE_ACTIONS))\r\n-\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION, actions,\r\n-\t\t\t\t\t  \"no fate action is found\");\r\n-\tif (action_flags &\r\n-\t   (MLX5_FLOW_ACTION_SET_TTL | MLX5_FLOW_ACTION_DEC_TTL)) {\r\n-\t\tif (!(item_flags &\r\n-\t\t     (MLX5_FLOW_LAYER_OUTER_L3_IPV4 |\r\n-\t\t      MLX5_FLOW_LAYER_OUTER_L3_IPV6)))\r\n-\t\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\r\n-\t\t\t\t\t\t  actions,\r\n-\t\t\t\t\t\t  \"no IP found in pattern\");\r\n-\t}\r\n-\tif (action_flags &\r\n-\t    (MLX5_FLOW_ACTION_SET_MAC_SRC | MLX5_FLOW_ACTION_SET_MAC_DST)) {\r\n-\t\tif (!(item_flags & MLX5_FLOW_LAYER_OUTER_L2))\r\n-\t\t\treturn rte_flow_error_set(error, ENOTSUP,\r\n-\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\r\n-\t\t\t\t\t\t  actions,\r\n-\t\t\t\t\t\t  \"no ethernet found in\"\r\n-\t\t\t\t\t\t  \" pattern\");\r\n-\t}\r\n-\tif ((action_flags & MLX5_FLOW_ACTION_VXLAN_DECAP) &&\r\n-\t    !(item_flags & MLX5_FLOW_LAYER_VXLAN))\r\n-\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\r\n-\t\t\t\t\t  NULL,\r\n-\t\t\t\t\t  \"no VNI pattern found\"\r\n-\t\t\t\t\t  \" for vxlan decap action\");\r\n-\tif ((action_flags & MLX5_FLOW_ACTION_VXLAN_ENCAP) &&\r\n-\t    (item_flags & MLX5_FLOW_LAYER_TUNNEL))\r\n-\t\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\r\n-\t\t\t\t\t  NULL,\r\n-\t\t\t\t\t  \"vxlan encap not supported\"\r\n-\t\t\t\t\t  \" for tunneled traffic\");\r\n-\treturn 0;\r\n-}\r\n-\r\n-/**\r\n- * Calculate maximum size of memory for flow items of Linux TC flower.\r\n- *\r\n- * @param[in] attr\r\n- *   Pointer to the flow attributes.\r\n- * @param[in] items\r\n- *   Pointer to the list of items.\r\n- * @param[out] action_flags\r\n- *   Pointer to the detected actions.\r\n- *\r\n- * @return\r\n- *   Maximum size of memory for items.\r\n- */\r\n-static int\r\n-flow_tcf_get_items_size(const struct rte_flow_attr *attr,\r\n-\t\t\tconst struct rte_flow_item items[],\r\n-\t\t\tuint64_t *action_flags)\r\n-{\r\n-\tint size = 0;\r\n-\r\n-\tsize += SZ_NLATTR_STRZ_OF(\"flower\") +\r\n-\t\tSZ_NLATTR_TYPE_OF(uint16_t) + /* Outer ether type. */\r\n-\t\tSZ_NLATTR_NEST + /* TCA_OPTIONS. */\r\n-\t\tSZ_NLATTR_TYPE_OF(uint32_t); /* TCA_CLS_FLAGS_SKIP_SW. */\r\n-\tif (attr->group > 0)\r\n-\t\tsize += SZ_NLATTR_TYPE_OF(uint32_t); /* TCA_CHAIN. */\r\n-\tfor (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {\r\n-\t\tswitch (items->type) {\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_VOID:\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_PORT_ID:\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_ETH:\r\n-\t\t\tsize += SZ_NLATTR_DATA_OF(RTE_ETHER_ADDR_LEN) * 4;\r\n-\t\t\t\t/* dst/src MAC addr and mask. */\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_VLAN:\r\n-\t\t\tsize +=\tSZ_NLATTR_TYPE_OF(uint16_t) +\r\n-\t\t\t\t/* VLAN Ether type. */\r\n-\t\t\t\tSZ_NLATTR_TYPE_OF(uint8_t) + /* VLAN prio. */\r\n-\t\t\t\tSZ_NLATTR_TYPE_OF(uint16_t); /* VLAN ID. */\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_IPV4: {\r\n-\t\t\tconst struct rte_flow_item_ipv4 *ipv4 = items->mask;\r\n-\r\n-\t\t\tsize +=\tSZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */\r\n-\t\t\t\tSZ_NLATTR_TYPE_OF(uint32_t) * 4;\r\n-\t\t\t\t/* dst/src IP addr and mask. */\r\n-\t\t\tif (ipv4 && ipv4->hdr.time_to_live)\r\n-\t\t\t\tsize += SZ_NLATTR_TYPE_OF(uint8_t) * 2;\r\n-\t\t\tif (ipv4 && ipv4->hdr.type_of_service)\r\n-\t\t\t\tsize += SZ_NLATTR_TYPE_OF(uint8_t) * 2;\r\n-\t\t\tbreak;\r\n-\t\t}\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_IPV6: {\r\n-\t\t\tconst struct rte_flow_item_ipv6 *ipv6 = items->mask;\r\n-\r\n-\t\t\tsize +=\tSZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */\r\n-\t\t\t\tSZ_NLATTR_DATA_OF(IPV6_ADDR_LEN) * 4;\r\n-\t\t\t\t/* dst/src IP addr and mask. */\r\n-\t\t\tif (ipv6 && ipv6->hdr.hop_limits)\r\n-\t\t\t\tsize += SZ_NLATTR_TYPE_OF(uint8_t) * 2;\r\n-\t\t\tif (ipv6 && (rte_be_to_cpu_32(ipv6->hdr.vtc_flow) &\r\n-\t\t\t\t     (0xfful << RTE_IPV6_HDR_TC_SHIFT)))\r\n-\t\t\t\tsize += SZ_NLATTR_TYPE_OF(uint8_t) * 2;\r\n-\t\t\tbreak;\r\n-\t\t}\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_UDP:\r\n-\t\t\tsize += SZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */\r\n-\t\t\t\tSZ_NLATTR_TYPE_OF(uint16_t) * 4;\r\n-\t\t\t\t/* dst/src port and mask. */\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_TCP:\r\n-\t\t\tsize += SZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */\r\n-\t\t\t\tSZ_NLATTR_TYPE_OF(uint16_t) * 4;\r\n-\t\t\t\t/* dst/src port and mask. */\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_VXLAN:\r\n-\t\t\tsize += SZ_NLATTR_TYPE_OF(uint32_t);\r\n-\t\t\t/*\r\n-\t\t\t * There might be no VXLAN decap action in the action\r\n-\t\t\t * list, nonetheless the VXLAN tunnel flow requires\r\n-\t\t\t * the decap structure to be correctly applied to\r\n-\t\t\t * VXLAN device, set the flag to create the structure.\r\n-\t\t\t * Translation routine will not put the decap action\r\n-\t\t\t * in tne Netlink message if there is no actual action\r\n-\t\t\t * in the list.\r\n-\t\t\t */\r\n-\t\t\t*action_flags |= MLX5_FLOW_ACTION_VXLAN_DECAP;\r\n-\t\t\tbreak;\r\n-\t\tdefault:\r\n-\t\t\tDRV_LOG(WARNING,\r\n-\t\t\t\t\"unsupported item %p type %d,\"\r\n-\t\t\t\t\" items must be validated before flow creation\",\r\n-\t\t\t\t(const void *)items, items->type);\r\n-\t\t\tbreak;\r\n-\t\t}\r\n-\t}\r\n-\treturn size;\r\n-}\r\n-\r\n-/**\r\n- * Calculate size of memory to store the VXLAN encapsultion\r\n- * related items in the Netlink message buffer. Items list\r\n- * is specified by RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP action.\r\n- * The item list should be validated.\r\n- *\r\n- * @param[in] action\r\n- *   RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP action object.\r\n- *   List of pattern items to scan data from.\r\n- *\r\n- * @return\r\n- *   The size the part of Netlink message buffer to store the\r\n- *   VXLAN encapsulation item attributes.\r\n- */\r\n-static int\r\n-flow_tcf_vxlan_encap_size(const struct rte_flow_action *action)\r\n-{\r\n-\tconst struct rte_flow_item *items;\r\n-\tint size = 0;\r\n-\r\n-\tassert(action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP);\r\n-\tassert(action->conf);\r\n-\r\n-\titems = ((const struct rte_flow_action_vxlan_encap *)\r\n-\t\t\t\t\taction->conf)->definition;\r\n-\tassert(items);\r\n-\tfor (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {\r\n-\t\tswitch (items->type) {\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_VOID:\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_ETH:\r\n-\t\t\t/* This item does not require message buffer. */\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_IPV4: {\r\n-\t\t\tconst struct rte_flow_item_ipv4 *ipv4 = items->mask;\r\n-\r\n-\t\t\tsize += SZ_NLATTR_DATA_OF(IPV4_ADDR_LEN) * 2;\r\n-\t\t\tif (ipv4 && ipv4->hdr.time_to_live)\r\n-\t\t\t\tsize += SZ_NLATTR_TYPE_OF(uint8_t) * 2;\r\n-\t\t\tif (ipv4 && ipv4->hdr.type_of_service)\r\n-\t\t\t\tsize += SZ_NLATTR_TYPE_OF(uint8_t) * 2;\r\n-\t\t\tbreak;\r\n-\t\t}\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_IPV6: {\r\n-\t\t\tconst struct rte_flow_item_ipv6 *ipv6 = items->mask;\r\n-\r\n-\t\t\tsize += SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN) * 2;\r\n-\t\t\tif (ipv6 && ipv6->hdr.hop_limits)\r\n-\t\t\t\tsize += SZ_NLATTR_TYPE_OF(uint8_t) * 2;\r\n-\t\t\tif (ipv6 && (rte_be_to_cpu_32(ipv6->hdr.vtc_flow) &\r\n-\t\t\t\t     (0xfful << RTE_IPV6_HDR_TC_SHIFT)))\r\n-\t\t\t\tsize += SZ_NLATTR_TYPE_OF(uint8_t) * 2;\r\n-\t\t\tbreak;\r\n-\t\t}\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_UDP: {\r\n-\t\t\tconst struct rte_flow_item_udp *udp = items->mask;\r\n-\r\n-\t\t\tsize += SZ_NLATTR_TYPE_OF(uint16_t);\r\n-\t\t\tif (!udp || udp->hdr.src_port != RTE_BE16(0x0000))\r\n-\t\t\t\tsize += SZ_NLATTR_TYPE_OF(uint16_t);\r\n-\t\t\tbreak;\r\n-\t\t}\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_VXLAN:\r\n-\t\t\tsize +=\tSZ_NLATTR_TYPE_OF(uint32_t);\r\n-\t\t\tbreak;\r\n-\t\tdefault:\r\n-\t\t\tassert(false);\r\n-\t\t\tDRV_LOG(WARNING,\r\n-\t\t\t\t\"unsupported item %p type %d,\"\r\n-\t\t\t\t\" items must be validated\"\r\n-\t\t\t\t\" before flow creation\",\r\n-\t\t\t\t(const void *)items, items->type);\r\n-\t\t\treturn 0;\r\n-\t\t}\r\n-\t}\r\n-\treturn size;\r\n-}\r\n-\r\n-/**\r\n- * Calculate maximum size of memory for flow actions of Linux TC flower and\r\n- * extract specified actions.\r\n- *\r\n- * @param[in] actions\r\n- *   Pointer to the list of actions.\r\n- * @param[out] action_flags\r\n- *   Pointer to the detected actions.\r\n- *\r\n- * @return\r\n- *   Maximum size of memory for actions.\r\n- */\r\n-static int\r\n-flow_tcf_get_actions_and_size(const struct rte_flow_action actions[],\r\n-\t\t\t      uint64_t *action_flags)\r\n-{\r\n-\tint size = 0;\r\n-\tuint64_t flags = *action_flags;\r\n-\r\n-\tsize += SZ_NLATTR_NEST; /* TCA_FLOWER_ACT. */\r\n-\tfor (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {\r\n-\t\tswitch (actions->type) {\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_VOID:\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_PORT_ID:\r\n-\t\t\tsize += SZ_NLATTR_NEST + /* na_act_index. */\r\n-\t\t\t\tSZ_NLATTR_STRZ_OF(\"mirred\") +\r\n-\t\t\t\tSZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */\r\n-\t\t\t\tSZ_NLATTR_TYPE_OF(struct tc_mirred);\r\n-\t\t\tflags |= MLX5_FLOW_ACTION_PORT_ID;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_JUMP:\r\n-\t\t\tsize += SZ_NLATTR_NEST + /* na_act_index. */\r\n-\t\t\t\tSZ_NLATTR_STRZ_OF(\"gact\") +\r\n-\t\t\t\tSZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */\r\n-\t\t\t\tSZ_NLATTR_TYPE_OF(struct tc_gact);\r\n-\t\t\tflags |= MLX5_FLOW_ACTION_JUMP;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_DROP:\r\n-\t\t\tsize += SZ_NLATTR_NEST + /* na_act_index. */\r\n-\t\t\t\tSZ_NLATTR_STRZ_OF(\"gact\") +\r\n-\t\t\t\tSZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */\r\n-\t\t\t\tSZ_NLATTR_TYPE_OF(struct tc_gact);\r\n-\t\t\tflags |= MLX5_FLOW_ACTION_DROP;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_COUNT:\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:\r\n-\t\t\tflags |= MLX5_FLOW_ACTION_OF_POP_VLAN;\r\n-\t\t\tgoto action_of_vlan;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:\r\n-\t\t\tflags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;\r\n-\t\t\tgoto action_of_vlan;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:\r\n-\t\t\tflags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;\r\n-\t\t\tgoto action_of_vlan;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:\r\n-\t\t\tflags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;\r\n-\t\t\tgoto action_of_vlan;\r\n-action_of_vlan:\r\n-\t\t\tsize += SZ_NLATTR_NEST + /* na_act_index. */\r\n-\t\t\t\tSZ_NLATTR_STRZ_OF(\"vlan\") +\r\n-\t\t\t\tSZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */\r\n-\t\t\t\tSZ_NLATTR_TYPE_OF(struct tc_vlan) +\r\n-\t\t\t\tSZ_NLATTR_TYPE_OF(uint16_t) +\r\n-\t\t\t\t/* VLAN protocol. */\r\n-\t\t\t\tSZ_NLATTR_TYPE_OF(uint16_t) + /* VLAN ID. */\r\n-\t\t\t\tSZ_NLATTR_TYPE_OF(uint8_t); /* VLAN prio. */\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:\r\n-\t\t\tsize += SZ_NLATTR_NEST + /* na_act_index. */\r\n-\t\t\t\tSZ_NLATTR_STRZ_OF(\"tunnel_key\") +\r\n-\t\t\t\tSZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */\r\n-\t\t\t\tSZ_NLATTR_TYPE_OF(uint8_t);\r\n-\t\t\tsize += SZ_NLATTR_TYPE_OF(struct tc_tunnel_key);\r\n-\t\t\tsize +=\tflow_tcf_vxlan_encap_size(actions) +\r\n-\t\t\t\tRTE_ALIGN_CEIL /* preceding encap params. */\r\n-\t\t\t\t(sizeof(struct flow_tcf_vxlan_encap),\r\n-\t\t\t\tMNL_ALIGNTO);\r\n-\t\t\tflags |= MLX5_FLOW_ACTION_VXLAN_ENCAP;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:\r\n-\t\t\tsize += SZ_NLATTR_NEST + /* na_act_index. */\r\n-\t\t\t\tSZ_NLATTR_STRZ_OF(\"tunnel_key\") +\r\n-\t\t\t\tSZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */\r\n-\t\t\t\tSZ_NLATTR_TYPE_OF(uint8_t);\r\n-\t\t\tsize +=\tSZ_NLATTR_TYPE_OF(struct tc_tunnel_key);\r\n-\t\t\tsize +=\tRTE_ALIGN_CEIL /* preceding decap params. */\r\n-\t\t\t\t(sizeof(struct flow_tcf_vxlan_decap),\r\n-\t\t\t\tMNL_ALIGNTO);\r\n-\t\t\tflags |= MLX5_FLOW_ACTION_VXLAN_DECAP;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_TP_SRC:\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_TP_DST:\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_TTL:\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_DEC_TTL:\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_MAC_DST:\r\n-\t\t\tsize += flow_tcf_get_pedit_actions_size(&actions,\r\n-\t\t\t\t\t\t\t\t&flags);\r\n-\t\t\tbreak;\r\n-\t\tdefault:\r\n-\t\t\tDRV_LOG(WARNING,\r\n-\t\t\t\t\"unsupported action %p type %d,\"\r\n-\t\t\t\t\" items must be validated before flow creation\",\r\n-\t\t\t\t(const void *)actions, actions->type);\r\n-\t\t\tbreak;\r\n-\t\t}\r\n-\t}\r\n-\t*action_flags = flags;\r\n-\treturn size;\r\n-}\r\n-\r\n-/**\r\n- * Prepare a flow object for Linux TC flower. It calculates the maximum size of\r\n- * memory required, allocates the memory, initializes Netlink message headers\r\n- * and set unique TC message handle.\r\n- *\r\n- * @param[in] attr\r\n- *   Pointer to the flow attributes.\r\n- * @param[in] items\r\n- *   Pointer to the list of items.\r\n- * @param[in] actions\r\n- *   Pointer to the list of actions.\r\n- * @param[out] error\r\n- *   Pointer to the error structure.\r\n- *\r\n- * @return\r\n- *   Pointer to mlx5_flow object on success,\r\n- *   otherwise NULL and rte_errno is set.\r\n- */\r\n-static struct mlx5_flow *\r\n-flow_tcf_prepare(const struct rte_flow_attr *attr,\r\n-\t\t const struct rte_flow_item items[],\r\n-\t\t const struct rte_flow_action actions[],\r\n-\t\t struct rte_flow_error *error)\r\n-{\r\n-\tsize_t size = RTE_ALIGN_CEIL\r\n-\t\t\t(sizeof(struct mlx5_flow),\r\n-\t\t\t alignof(struct flow_tcf_tunnel_hdr)) +\r\n-\t\t      MNL_ALIGN(sizeof(struct nlmsghdr)) +\r\n-\t\t      MNL_ALIGN(sizeof(struct tcmsg));\r\n-\tstruct mlx5_flow *dev_flow;\r\n-\tuint64_t action_flags = 0;\r\n-\tstruct nlmsghdr *nlh;\r\n-\tstruct tcmsg *tcm;\r\n-\tuint8_t *sp, *tun = NULL;\r\n-\r\n-\tsize += flow_tcf_get_items_size(attr, items, &action_flags);\r\n-\tsize += flow_tcf_get_actions_and_size(actions, &action_flags);\r\n-\tdev_flow = rte_zmalloc(__func__, size, MNL_ALIGNTO);\r\n-\tif (!dev_flow) {\r\n-\t\trte_flow_error_set(error, ENOMEM,\r\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\r\n-\t\t\t\t   \"not enough memory to create E-Switch flow\");\r\n-\t\treturn NULL;\r\n-\t}\r\n-\tsp = (uint8_t *)(dev_flow + 1);\r\n-\tif (action_flags & MLX5_FLOW_ACTION_VXLAN_ENCAP) {\r\n-\t\tsp = RTE_PTR_ALIGN\r\n-\t\t\t(sp, alignof(struct flow_tcf_tunnel_hdr));\r\n-\t\ttun = sp;\r\n-\t\tsp += RTE_ALIGN_CEIL\r\n-\t\t\t(sizeof(struct flow_tcf_vxlan_encap),\r\n-\t\t\tMNL_ALIGNTO);\r\n-#ifndef NDEBUG\r\n-\t\tsize -= RTE_ALIGN_CEIL\r\n-\t\t\t(sizeof(struct flow_tcf_vxlan_encap),\r\n-\t\t\tMNL_ALIGNTO);\r\n-#endif\r\n-\t} else if (action_flags & MLX5_FLOW_ACTION_VXLAN_DECAP) {\r\n-\t\tsp = RTE_PTR_ALIGN\r\n-\t\t\t(sp, alignof(struct flow_tcf_tunnel_hdr));\r\n-\t\ttun = sp;\r\n-\t\tsp += RTE_ALIGN_CEIL\r\n-\t\t\t(sizeof(struct flow_tcf_vxlan_decap),\r\n-\t\t\tMNL_ALIGNTO);\r\n-#ifndef NDEBUG\r\n-\t\tsize -= RTE_ALIGN_CEIL\r\n-\t\t\t(sizeof(struct flow_tcf_vxlan_decap),\r\n-\t\t\tMNL_ALIGNTO);\r\n-#endif\r\n-\t} else {\r\n-\t\tsp = RTE_PTR_ALIGN(sp, MNL_ALIGNTO);\r\n-\t}\r\n-\tnlh = mnl_nlmsg_put_header(sp);\r\n-\ttcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));\r\n-\t*dev_flow = (struct mlx5_flow){\r\n-\t\t.tcf = (struct mlx5_flow_tcf){\r\n-#ifndef NDEBUG\r\n-\t\t\t.nlsize = size - RTE_ALIGN_CEIL\r\n-\t\t\t\t(sizeof(struct mlx5_flow),\r\n-\t\t\t\t alignof(struct flow_tcf_tunnel_hdr)),\r\n-#endif\r\n-\t\t\t.tunnel = (struct flow_tcf_tunnel_hdr *)tun,\r\n-\t\t\t.nlh = nlh,\r\n-\t\t\t.tcm = tcm,\r\n-\t\t},\r\n-\t};\r\n-\tif (action_flags & MLX5_FLOW_ACTION_VXLAN_DECAP)\r\n-\t\tdev_flow->tcf.tunnel->type = FLOW_TCF_TUNACT_VXLAN_DECAP;\r\n-\telse if (action_flags & MLX5_FLOW_ACTION_VXLAN_ENCAP)\r\n-\t\tdev_flow->tcf.tunnel->type = FLOW_TCF_TUNACT_VXLAN_ENCAP;\r\n-\treturn dev_flow;\r\n-}\r\n-\r\n-/**\r\n- * Make adjustments for supporting count actions.\r\n- *\r\n- * @param[in] dev\r\n- *   Pointer to the Ethernet device structure.\r\n- * @param[in] dev_flow\r\n- *   Pointer to mlx5_flow.\r\n- * @param[out] error\r\n- *   Pointer to error structure.\r\n- *\r\n- * @return\r\n- *   0 On success else a negative errno value is returned and rte_errno is set.\r\n- */\r\n-static int\r\n-flow_tcf_translate_action_count(struct rte_eth_dev *dev __rte_unused,\r\n-\t\t\t\t  struct mlx5_flow *dev_flow,\r\n-\t\t\t\t  struct rte_flow_error *error)\r\n-{\r\n-\tstruct rte_flow *flow = dev_flow->flow;\r\n-\r\n-\tif (!flow->counter) {\r\n-\t\tflow->counter = flow_tcf_counter_new();\r\n-\t\tif (!flow->counter)\r\n-\t\t\treturn rte_flow_error_set(error, rte_errno,\r\n-\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\r\n-\t\t\t\t\t\t  NULL,\r\n-\t\t\t\t\t\t  \"cannot get counter\"\r\n-\t\t\t\t\t\t  \" context.\");\r\n-\t}\r\n-\treturn 0;\r\n-}\r\n-\r\n-/**\r\n- * Convert VXLAN VNI to 32-bit integer.\r\n- *\r\n- * @param[in] vni\r\n- *   VXLAN VNI in 24-bit wire format.\r\n- *\r\n- * @return\r\n- *   VXLAN VNI as a 32-bit integer value in network endianness.\r\n- */\r\n-static inline rte_be32_t\r\n-vxlan_vni_as_be32(const uint8_t vni[3])\r\n-{\r\n-\tunion {\r\n-\t\tuint8_t vni[4];\r\n-\t\trte_be32_t dword;\r\n-\t} ret = {\r\n-\t\t.vni = { 0, vni[0], vni[1], vni[2] },\r\n-\t};\r\n-\treturn ret.dword;\r\n-}\r\n-\r\n-/**\r\n- * Helper function to process RTE_FLOW_ITEM_TYPE_ETH entry in configuration\r\n- * of action RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP. Fills the MAC address fields\r\n- * in the encapsulation parameters structure. The item must be prevalidated,\r\n- * no any validation checks performed by function.\r\n- *\r\n- * @param[in] spec\r\n- *   RTE_FLOW_ITEM_TYPE_ETH entry specification.\r\n- * @param[in] mask\r\n- *   RTE_FLOW_ITEM_TYPE_ETH entry mask.\r\n- * @param[out] encap\r\n- *   Structure to fill the gathered MAC address data.\r\n- */\r\n-static void\r\n-flow_tcf_parse_vxlan_encap_eth(const struct rte_flow_item_eth *spec,\r\n-\t\t\t       const struct rte_flow_item_eth *mask,\r\n-\t\t\t       struct flow_tcf_vxlan_encap *encap)\r\n-{\r\n-\t/* Item must be validated before. No redundant checks. */\r\n-\tassert(spec);\r\n-\tif (!mask || !memcmp(&mask->dst,\r\n-\t\t\t     &rte_flow_item_eth_mask.dst,\r\n-\t\t\t     sizeof(rte_flow_item_eth_mask.dst))) {\r\n-\t\t/*\r\n-\t\t * Ethernet addresses are not supported by\r\n-\t\t * tc as tunnel_key parameters. Destination\r\n-\t\t * address is needed to form encap packet\r\n-\t\t * header and retrieved by kernel from\r\n-\t\t * implicit sources (ARP table, etc),\r\n-\t\t * address masks are not supported at all.\r\n-\t\t */\r\n-\t\tencap->eth.dst = spec->dst;\r\n-\t\tencap->mask |= FLOW_TCF_ENCAP_ETH_DST;\r\n-\t}\r\n-\tif (!mask || !memcmp(&mask->src,\r\n-\t\t\t     &rte_flow_item_eth_mask.src,\r\n-\t\t\t     sizeof(rte_flow_item_eth_mask.src))) {\r\n-\t\t/*\r\n-\t\t * Ethernet addresses are not supported by\r\n-\t\t * tc as tunnel_key parameters. Source ethernet\r\n-\t\t * address is ignored anyway.\r\n-\t\t */\r\n-\t\tencap->eth.src = spec->src;\r\n-\t\tencap->mask |= FLOW_TCF_ENCAP_ETH_SRC;\r\n-\t}\r\n-}\r\n-\r\n-/**\r\n- * Helper function to process RTE_FLOW_ITEM_TYPE_IPV4 entry in configuration\r\n- * of action RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP. Fills the IPV4 address fields\r\n- * in the encapsulation parameters structure. The item must be prevalidated,\r\n- * no any validation checks performed by function.\r\n- *\r\n- * @param[in] spec\r\n- *   RTE_FLOW_ITEM_TYPE_IPV4 entry specification.\r\n- * @param[in] mask\r\n- *  RTE_FLOW_ITEM_TYPE_IPV4 entry mask.\r\n- * @param[out] encap\r\n- *   Structure to fill the gathered IPV4 address data.\r\n- */\r\n-static void\r\n-flow_tcf_parse_vxlan_encap_ipv4(const struct rte_flow_item_ipv4 *spec,\r\n-\t\t\t\tconst struct rte_flow_item_ipv4 *mask,\r\n-\t\t\t\tstruct flow_tcf_vxlan_encap *encap)\r\n-{\r\n-\t/* Item must be validated before. No redundant checks. */\r\n-\tassert(spec);\r\n-\tencap->ipv4.dst = spec->hdr.dst_addr;\r\n-\tencap->ipv4.src = spec->hdr.src_addr;\r\n-\tencap->mask |= FLOW_TCF_ENCAP_IPV4_SRC |\r\n-\t\t       FLOW_TCF_ENCAP_IPV4_DST;\r\n-\tif (mask && mask->hdr.type_of_service) {\r\n-\t\tencap->mask |= FLOW_TCF_ENCAP_IP_TOS;\r\n-\t\tencap->ip_tos = spec->hdr.type_of_service;\r\n-\t}\r\n-\tif (mask && mask->hdr.time_to_live) {\r\n-\t\tencap->mask |= FLOW_TCF_ENCAP_IP_TTL;\r\n-\t\tencap->ip_ttl_hop = spec->hdr.time_to_live;\r\n-\t}\r\n-}\r\n-\r\n-/**\r\n- * Helper function to process RTE_FLOW_ITEM_TYPE_IPV6 entry in configuration\r\n- * of action RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP. Fills the IPV6 address fields\r\n- * in the encapsulation parameters structure. The item must be prevalidated,\r\n- * no any validation checks performed by function.\r\n- *\r\n- * @param[in] spec\r\n- *   RTE_FLOW_ITEM_TYPE_IPV6 entry specification.\r\n- * @param[in] mask\r\n- *  RTE_FLOW_ITEM_TYPE_IPV6 entry mask.\r\n- * @param[out] encap\r\n- *   Structure to fill the gathered IPV6 address data.\r\n- */\r\n-static void\r\n-flow_tcf_parse_vxlan_encap_ipv6(const struct rte_flow_item_ipv6 *spec,\r\n-\t\t\t\tconst struct rte_flow_item_ipv6 *mask,\r\n-\t\t\t\tstruct flow_tcf_vxlan_encap *encap)\r\n-{\r\n-\t/* Item must be validated before. No redundant checks. */\r\n-\tassert(spec);\r\n-\tmemcpy(encap->ipv6.dst, spec->hdr.dst_addr, IPV6_ADDR_LEN);\r\n-\tmemcpy(encap->ipv6.src, spec->hdr.src_addr, IPV6_ADDR_LEN);\r\n-\tencap->mask |= FLOW_TCF_ENCAP_IPV6_SRC |\r\n-\t\t       FLOW_TCF_ENCAP_IPV6_DST;\r\n-\tif (mask) {\r\n-\t\tif ((rte_be_to_cpu_32(mask->hdr.vtc_flow) >>\r\n-\t\t    RTE_IPV6_HDR_TC_SHIFT) & 0xff) {\r\n-\t\t\tencap->mask |= FLOW_TCF_ENCAP_IP_TOS;\r\n-\t\t\tencap->ip_tos = (rte_be_to_cpu_32\r\n-\t\t\t\t\t\t(spec->hdr.vtc_flow) >>\r\n-\t\t\t\t\t\t RTE_IPV6_HDR_TC_SHIFT) & 0xff;\r\n-\t\t}\r\n-\t\tif (mask->hdr.hop_limits) {\r\n-\t\t\tencap->mask |= FLOW_TCF_ENCAP_IP_TTL;\r\n-\t\t\tencap->ip_ttl_hop = spec->hdr.hop_limits;\r\n-\t\t}\r\n-\t}\r\n-}\r\n-\r\n-/**\r\n- * Helper function to process RTE_FLOW_ITEM_TYPE_UDP entry in configuration\r\n- * of action RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP. Fills the UDP port fields\r\n- * in the encapsulation parameters structure. The item must be prevalidated,\r\n- * no any validation checks performed by function.\r\n- *\r\n- * @param[in] spec\r\n- *   RTE_FLOW_ITEM_TYPE_UDP entry specification.\r\n- * @param[in] mask\r\n- *   RTE_FLOW_ITEM_TYPE_UDP entry mask.\r\n- * @param[out] encap\r\n- *   Structure to fill the gathered UDP port data.\r\n- */\r\n-static void\r\n-flow_tcf_parse_vxlan_encap_udp(const struct rte_flow_item_udp *spec,\r\n-\t\t\t       const struct rte_flow_item_udp *mask,\r\n-\t\t\t       struct flow_tcf_vxlan_encap *encap)\r\n-{\r\n-\tassert(spec);\r\n-\tencap->udp.dst = spec->hdr.dst_port;\r\n-\tencap->mask |= FLOW_TCF_ENCAP_UDP_DST;\r\n-\tif (!mask || mask->hdr.src_port != RTE_BE16(0x0000)) {\r\n-\t\tencap->udp.src = spec->hdr.src_port;\r\n-\t\tencap->mask |= FLOW_TCF_ENCAP_IPV4_SRC;\r\n-\t}\r\n-}\r\n-\r\n-/**\r\n- * Helper function to process RTE_FLOW_ITEM_TYPE_VXLAN entry in configuration\r\n- * of action RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP. Fills the VNI fields\r\n- * in the encapsulation parameters structure. The item must be prevalidated,\r\n- * no any validation checks performed by function.\r\n- *\r\n- * @param[in] spec\r\n- *   RTE_FLOW_ITEM_TYPE_VXLAN entry specification.\r\n- * @param[out] encap\r\n- *   Structure to fill the gathered VNI address data.\r\n- */\r\n-static void\r\n-flow_tcf_parse_vxlan_encap_vni(const struct rte_flow_item_vxlan *spec,\r\n-\t\t\t       struct flow_tcf_vxlan_encap *encap)\r\n-{\r\n-\t/* Item must be validated before. Do not redundant checks. */\r\n-\tassert(spec);\r\n-\tmemcpy(encap->vxlan.vni, spec->vni, sizeof(encap->vxlan.vni));\r\n-\tencap->mask |= FLOW_TCF_ENCAP_VXLAN_VNI;\r\n-}\r\n-\r\n-/**\r\n- * Populate consolidated encapsulation object from list of pattern items.\r\n- *\r\n- * Helper function to process configuration of action such as\r\n- * RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP. The item list should be\r\n- * validated, there is no way to return an meaningful error.\r\n- *\r\n- * @param[in] action\r\n- *   RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP action object.\r\n- *   List of pattern items to gather data from.\r\n- * @param[out] src\r\n- *   Structure to fill gathered data.\r\n- */\r\n-static void\r\n-flow_tcf_vxlan_encap_parse(const struct rte_flow_action *action,\r\n-\t\t\t   struct flow_tcf_vxlan_encap *encap)\r\n-{\r\n-\tunion {\r\n-\t\tconst struct rte_flow_item_eth *eth;\r\n-\t\tconst struct rte_flow_item_ipv4 *ipv4;\r\n-\t\tconst struct rte_flow_item_ipv6 *ipv6;\r\n-\t\tconst struct rte_flow_item_udp *udp;\r\n-\t\tconst struct rte_flow_item_vxlan *vxlan;\r\n-\t} spec, mask;\r\n-\tconst struct rte_flow_item *items;\r\n-\r\n-\tassert(action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP);\r\n-\tassert(action->conf);\r\n-\r\n-\titems = ((const struct rte_flow_action_vxlan_encap *)\r\n-\t\t\t\t\taction->conf)->definition;\r\n-\tassert(items);\r\n-\tfor (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {\r\n-\t\tswitch (items->type) {\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_VOID:\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_ETH:\r\n-\t\t\tmask.eth = items->mask;\r\n-\t\t\tspec.eth = items->spec;\r\n-\t\t\tflow_tcf_parse_vxlan_encap_eth(spec.eth, mask.eth,\r\n-\t\t\t\t\t\t       encap);\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_IPV4:\r\n-\t\t\tspec.ipv4 = items->spec;\r\n-\t\t\tmask.ipv4 = items->mask;\r\n-\t\t\tflow_tcf_parse_vxlan_encap_ipv4(spec.ipv4, mask.ipv4,\r\n-\t\t\t\t\t\t\tencap);\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_IPV6:\r\n-\t\t\tspec.ipv6 = items->spec;\r\n-\t\t\tmask.ipv6 = items->mask;\r\n-\t\t\tflow_tcf_parse_vxlan_encap_ipv6(spec.ipv6, mask.ipv6,\r\n-\t\t\t\t\t\t\tencap);\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_UDP:\r\n-\t\t\tmask.udp = items->mask;\r\n-\t\t\tspec.udp = items->spec;\r\n-\t\t\tflow_tcf_parse_vxlan_encap_udp(spec.udp, mask.udp,\r\n-\t\t\t\t\t\t       encap);\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_VXLAN:\r\n-\t\t\tspec.vxlan = items->spec;\r\n-\t\t\tflow_tcf_parse_vxlan_encap_vni(spec.vxlan, encap);\r\n-\t\t\tbreak;\r\n-\t\tdefault:\r\n-\t\t\tassert(false);\r\n-\t\t\tDRV_LOG(WARNING,\r\n-\t\t\t\t\"unsupported item %p type %d,\"\r\n-\t\t\t\t\" items must be validated\"\r\n-\t\t\t\t\" before flow creation\",\r\n-\t\t\t\t(const void *)items, items->type);\r\n-\t\t\tencap->mask = 0;\r\n-\t\t\treturn;\r\n-\t\t}\r\n-\t}\r\n-}\r\n-\r\n-/**\r\n- * Translate flow for Linux TC flower and construct Netlink message.\r\n- *\r\n- * @param[in] priv\r\n- *   Pointer to the priv structure.\r\n- * @param[in, out] flow\r\n- *   Pointer to the sub flow.\r\n- * @param[in] attr\r\n- *   Pointer to the flow attributes.\r\n- * @param[in] items\r\n- *   Pointer to the list of items.\r\n- * @param[in] actions\r\n- *   Pointer to the list of actions.\r\n- * @param[out] error\r\n- *   Pointer to the error structure.\r\n- *\r\n- * @return\r\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\r\n- */\r\n-static int\r\n-flow_tcf_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,\r\n-\t\t   const struct rte_flow_attr *attr,\r\n-\t\t   const struct rte_flow_item items[],\r\n-\t\t   const struct rte_flow_action actions[],\r\n-\t\t   struct rte_flow_error *error)\r\n-{\r\n-\tunion {\r\n-\t\tconst struct rte_flow_item_port_id *port_id;\r\n-\t\tconst struct rte_flow_item_eth *eth;\r\n-\t\tconst struct rte_flow_item_vlan *vlan;\r\n-\t\tconst struct rte_flow_item_ipv4 *ipv4;\r\n-\t\tconst struct rte_flow_item_ipv6 *ipv6;\r\n-\t\tconst struct rte_flow_item_tcp *tcp;\r\n-\t\tconst struct rte_flow_item_udp *udp;\r\n-\t\tconst struct rte_flow_item_vxlan *vxlan;\r\n-\t} spec, mask;\r\n-\tunion {\r\n-\t\tconst struct rte_flow_action_port_id *port_id;\r\n-\t\tconst struct rte_flow_action_jump *jump;\r\n-\t\tconst struct rte_flow_action_of_push_vlan *of_push_vlan;\r\n-\t\tconst struct rte_flow_action_of_set_vlan_vid *\r\n-\t\t\tof_set_vlan_vid;\r\n-\t\tconst struct rte_flow_action_of_set_vlan_pcp *\r\n-\t\t\tof_set_vlan_pcp;\r\n-\t} conf;\r\n-\tunion {\r\n-\t\tstruct flow_tcf_tunnel_hdr *hdr;\r\n-\t\tstruct flow_tcf_vxlan_decap *vxlan;\r\n-\t} decap = {\r\n-\t\t.hdr = NULL,\r\n-\t};\r\n-\tunion {\r\n-\t\tstruct flow_tcf_tunnel_hdr *hdr;\r\n-\t\tstruct flow_tcf_vxlan_encap *vxlan;\r\n-\t} encap = {\r\n-\t\t.hdr = NULL,\r\n-\t};\r\n-\tstruct flow_tcf_ptoi ptoi[PTOI_TABLE_SZ_MAX(dev)];\r\n-\tstruct nlmsghdr *nlh = dev_flow->tcf.nlh;\r\n-\tstruct tcmsg *tcm = dev_flow->tcf.tcm;\r\n-\tuint32_t na_act_index_cur;\r\n-\trte_be16_t inner_etype = RTE_BE16(ETH_P_ALL);\r\n-\trte_be16_t outer_etype = RTE_BE16(ETH_P_ALL);\r\n-\trte_be16_t vlan_etype = RTE_BE16(ETH_P_ALL);\r\n-\tbool ip_proto_set = 0;\r\n-\tbool tunnel_outer = 0;\r\n-\tstruct nlattr *na_flower;\r\n-\tstruct nlattr *na_flower_act;\r\n-\tstruct nlattr *na_vlan_id = NULL;\r\n-\tstruct nlattr *na_vlan_priority = NULL;\r\n-\tuint64_t item_flags = 0;\r\n-\tint ret;\r\n-\r\n-\tclaim_nonzero(flow_tcf_build_ptoi_table(dev, ptoi,\r\n-\t\t\t\t\t\tPTOI_TABLE_SZ_MAX(dev)));\r\n-\tif (dev_flow->tcf.tunnel) {\r\n-\t\tswitch (dev_flow->tcf.tunnel->type) {\r\n-\t\tcase FLOW_TCF_TUNACT_VXLAN_DECAP:\r\n-\t\t\tdecap.vxlan = dev_flow->tcf.vxlan_decap;\r\n-\t\t\ttunnel_outer = 1;\r\n-\t\t\tbreak;\r\n-\t\tcase FLOW_TCF_TUNACT_VXLAN_ENCAP:\r\n-\t\t\tencap.vxlan = dev_flow->tcf.vxlan_encap;\r\n-\t\t\tbreak;\r\n-\t\t/* New tunnel actions can be added here. */\r\n-\t\tdefault:\r\n-\t\t\tassert(false);\r\n-\t\t\tbreak;\r\n-\t\t}\r\n-\t}\r\n-\tnlh = dev_flow->tcf.nlh;\r\n-\ttcm = dev_flow->tcf.tcm;\r\n-\t/* Prepare API must have been called beforehand. */\r\n-\tassert(nlh != NULL && tcm != NULL);\r\n-\ttcm->tcm_family = AF_UNSPEC;\r\n-\ttcm->tcm_ifindex = ptoi[0].ifindex;\r\n-\ttcm->tcm_parent = TC_H_MAKE(TC_H_INGRESS, TC_H_MIN_INGRESS);\r\n-\t/*\r\n-\t * Priority cannot be zero to prevent the kernel from picking one\r\n-\t * automatically.\r\n-\t */\r\n-\ttcm->tcm_info = TC_H_MAKE((attr->priority + 1) << 16, outer_etype);\r\n-\tif (attr->group > 0)\r\n-\t\tmnl_attr_put_u32(nlh, TCA_CHAIN, attr->group);\r\n-\tmnl_attr_put_strz(nlh, TCA_KIND, \"flower\");\r\n-\tna_flower = mnl_attr_nest_start(nlh, TCA_OPTIONS);\r\n-\tfor (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {\r\n-\t\tunsigned int i;\r\n-\r\n-\t\tswitch (items->type) {\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_VOID:\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_PORT_ID:\r\n-\t\t\tmask.port_id = flow_tcf_item_mask\r\n-\t\t\t\t(items, &rte_flow_item_port_id_mask,\r\n-\t\t\t\t &flow_tcf_mask_supported.port_id,\r\n-\t\t\t\t &flow_tcf_mask_empty.port_id,\r\n-\t\t\t\t sizeof(flow_tcf_mask_supported.port_id),\r\n-\t\t\t\t error);\r\n-\t\t\tassert(mask.port_id);\r\n-\t\t\tif (mask.port_id == &flow_tcf_mask_empty.port_id)\r\n-\t\t\t\tbreak;\r\n-\t\t\tspec.port_id = items->spec;\r\n-\t\t\tif (!mask.port_id->id)\r\n-\t\t\t\ti = 0;\r\n-\t\t\telse\r\n-\t\t\t\tfor (i = 0; ptoi[i].ifindex; ++i)\r\n-\t\t\t\t\tif (ptoi[i].port_id == spec.port_id->id)\r\n-\t\t\t\t\t\tbreak;\r\n-\t\t\tassert(ptoi[i].ifindex);\r\n-\t\t\ttcm->tcm_ifindex = ptoi[i].ifindex;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_ETH:\r\n-\t\t\titem_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?\r\n-\t\t\t\t      MLX5_FLOW_LAYER_INNER_L2 :\r\n-\t\t\t\t      MLX5_FLOW_LAYER_OUTER_L2;\r\n-\t\t\tmask.eth = flow_tcf_item_mask\r\n-\t\t\t\t(items, &rte_flow_item_eth_mask,\r\n-\t\t\t\t &flow_tcf_mask_supported.eth,\r\n-\t\t\t\t &flow_tcf_mask_empty.eth,\r\n-\t\t\t\t sizeof(flow_tcf_mask_supported.eth),\r\n-\t\t\t\t error);\r\n-\t\t\tassert(mask.eth);\r\n-\t\t\tif (mask.eth == &flow_tcf_mask_empty.eth)\r\n-\t\t\t\tbreak;\r\n-\t\t\tspec.eth = items->spec;\r\n-\t\t\tif (mask.eth->type) {\r\n-\t\t\t\tif (item_flags & MLX5_FLOW_LAYER_TUNNEL)\r\n-\t\t\t\t\tinner_etype = spec.eth->type;\r\n-\t\t\t\telse\r\n-\t\t\t\t\touter_etype = spec.eth->type;\r\n-\t\t\t}\r\n-\t\t\tif (tunnel_outer) {\r\n-\t\t\t\tDRV_LOG(WARNING,\r\n-\t\t\t\t\t\"outer L2 addresses cannot be\"\r\n-\t\t\t\t\t\" forced is outer ones for tunnel,\"\r\n-\t\t\t\t\t\" parameter is ignored\");\r\n-\t\t\t\tbreak;\r\n-\t\t\t}\r\n-\t\t\tif (!rte_is_zero_ether_addr(&mask.eth->dst)) {\r\n-\t\t\t\tmnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_DST,\r\n-\t\t\t\t\t     RTE_ETHER_ADDR_LEN,\r\n-\t\t\t\t\t     spec.eth->dst.addr_bytes);\r\n-\t\t\t\tmnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_DST_MASK,\r\n-\t\t\t\t\t     RTE_ETHER_ADDR_LEN,\r\n-\t\t\t\t\t     mask.eth->dst.addr_bytes);\r\n-\t\t\t}\r\n-\t\t\tif (!rte_is_zero_ether_addr(&mask.eth->src)) {\r\n-\t\t\t\tmnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_SRC,\r\n-\t\t\t\t\t     RTE_ETHER_ADDR_LEN,\r\n-\t\t\t\t\t     spec.eth->src.addr_bytes);\r\n-\t\t\t\tmnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_SRC_MASK,\r\n-\t\t\t\t\t     RTE_ETHER_ADDR_LEN,\r\n-\t\t\t\t\t     mask.eth->src.addr_bytes);\r\n-\t\t\t}\r\n-\t\t\tassert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_VLAN:\r\n-\t\t\tassert(!encap.hdr);\r\n-\t\t\tassert(!decap.hdr);\r\n-\t\t\tassert(!tunnel_outer);\r\n-\t\t\titem_flags |= MLX5_FLOW_LAYER_OUTER_VLAN;\r\n-\t\t\tmask.vlan = flow_tcf_item_mask\r\n-\t\t\t\t(items, &rte_flow_item_vlan_mask,\r\n-\t\t\t\t &flow_tcf_mask_supported.vlan,\r\n-\t\t\t\t &flow_tcf_mask_empty.vlan,\r\n-\t\t\t\t sizeof(flow_tcf_mask_supported.vlan),\r\n-\t\t\t\t error);\r\n-\t\t\tassert(mask.vlan);\r\n-\t\t\tif (mask.vlan == &flow_tcf_mask_empty.vlan)\r\n-\t\t\t\tbreak;\r\n-\t\t\tspec.vlan = items->spec;\r\n-\t\t\tassert(outer_etype == RTE_BE16(ETH_P_ALL) ||\r\n-\t\t\t       outer_etype == RTE_BE16(ETH_P_8021Q));\r\n-\t\t\touter_etype = RTE_BE16(ETH_P_8021Q);\r\n-\t\t\tif (mask.vlan->inner_type)\r\n-\t\t\t\tvlan_etype = spec.vlan->inner_type;\r\n-\t\t\tif (mask.vlan->tci & RTE_BE16(0xe000))\r\n-\t\t\t\tmnl_attr_put_u8(nlh, TCA_FLOWER_KEY_VLAN_PRIO,\r\n-\t\t\t\t\t\t(rte_be_to_cpu_16\r\n-\t\t\t\t\t\t (spec.vlan->tci) >> 13) & 0x7);\r\n-\t\t\tif (mask.vlan->tci & RTE_BE16(0x0fff))\r\n-\t\t\t\tmnl_attr_put_u16(nlh, TCA_FLOWER_KEY_VLAN_ID,\r\n-\t\t\t\t\t\t rte_be_to_cpu_16\r\n-\t\t\t\t\t\t (spec.vlan->tci &\r\n-\t\t\t\t\t\t  RTE_BE16(0x0fff)));\r\n-\t\t\tassert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_IPV4:\r\n-\t\t\titem_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?\r\n-\t\t\t\t      MLX5_FLOW_LAYER_INNER_L3_IPV4 :\r\n-\t\t\t\t      MLX5_FLOW_LAYER_OUTER_L3_IPV4;\r\n-\t\t\tmask.ipv4 = flow_tcf_item_mask\r\n-\t\t\t\t(items, &rte_flow_item_ipv4_mask,\r\n-\t\t\t\t &flow_tcf_mask_supported.ipv4,\r\n-\t\t\t\t &flow_tcf_mask_empty.ipv4,\r\n-\t\t\t\t sizeof(flow_tcf_mask_supported.ipv4),\r\n-\t\t\t\t error);\r\n-\t\t\tassert(mask.ipv4);\r\n-\t\t\tif (item_flags & MLX5_FLOW_LAYER_TUNNEL) {\r\n-\t\t\t\tassert(inner_etype == RTE_BE16(ETH_P_ALL) ||\r\n-\t\t\t\t       inner_etype == RTE_BE16(ETH_P_IP));\r\n-\t\t\t\tinner_etype = RTE_BE16(ETH_P_IP);\r\n-\t\t\t} else if (outer_etype == RTE_BE16(ETH_P_8021Q)) {\r\n-\t\t\t\tassert(vlan_etype == RTE_BE16(ETH_P_ALL) ||\r\n-\t\t\t\t       vlan_etype == RTE_BE16(ETH_P_IP));\r\n-\t\t\t\tvlan_etype = RTE_BE16(ETH_P_IP);\r\n-\t\t\t} else {\r\n-\t\t\t\tassert(outer_etype == RTE_BE16(ETH_P_ALL) ||\r\n-\t\t\t\t       outer_etype == RTE_BE16(ETH_P_IP));\r\n-\t\t\t\touter_etype = RTE_BE16(ETH_P_IP);\r\n-\t\t\t}\r\n-\t\t\tspec.ipv4 = items->spec;\r\n-\t\t\tif (!tunnel_outer && mask.ipv4->hdr.next_proto_id) {\r\n-\t\t\t\t/*\r\n-\t\t\t\t * No way to set IP protocol for outer tunnel\r\n-\t\t\t\t * layers. Usually it is fixed, for example,\r\n-\t\t\t\t * to UDP for VXLAN/GPE.\r\n-\t\t\t\t */\r\n-\t\t\t\tassert(spec.ipv4); /* Mask is not empty. */\r\n-\t\t\t\tmnl_attr_put_u8(nlh, TCA_FLOWER_KEY_IP_PROTO,\r\n-\t\t\t\t\t\tspec.ipv4->hdr.next_proto_id);\r\n-\t\t\t\tip_proto_set = 1;\r\n-\t\t\t}\r\n-\t\t\tif (mask.ipv4 == &flow_tcf_mask_empty.ipv4 ||\r\n-\t\t\t     (!mask.ipv4->hdr.src_addr &&\r\n-\t\t\t      !mask.ipv4->hdr.dst_addr)) {\r\n-\t\t\t\tif (!tunnel_outer)\r\n-\t\t\t\t\tbreak;\r\n-\t\t\t\t/*\r\n-\t\t\t\t * For tunnel outer we must set outer IP key\r\n-\t\t\t\t * anyway, even if the specification/mask is\r\n-\t\t\t\t * empty. There is no another way to tell\r\n-\t\t\t\t * kernel about he outer layer protocol.\r\n-\t\t\t\t */\r\n-\t\t\t\tmnl_attr_put_u32\r\n-\t\t\t\t\t(nlh, TCA_FLOWER_KEY_ENC_IPV4_SRC,\r\n-\t\t\t\t\t mask.ipv4->hdr.src_addr);\r\n-\t\t\t\tmnl_attr_put_u32\r\n-\t\t\t\t\t(nlh, TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,\r\n-\t\t\t\t\t mask.ipv4->hdr.src_addr);\r\n-\t\t\t\tassert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);\r\n-\t\t\t\tbreak;\r\n-\t\t\t}\r\n-\t\t\tif (mask.ipv4->hdr.src_addr) {\r\n-\t\t\t\tmnl_attr_put_u32\r\n-\t\t\t\t\t(nlh, tunnel_outer ?\r\n-\t\t\t\t\t TCA_FLOWER_KEY_ENC_IPV4_SRC :\r\n-\t\t\t\t\t TCA_FLOWER_KEY_IPV4_SRC,\r\n-\t\t\t\t\t spec.ipv4->hdr.src_addr);\r\n-\t\t\t\tmnl_attr_put_u32\r\n-\t\t\t\t\t(nlh, tunnel_outer ?\r\n-\t\t\t\t\t TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK :\r\n-\t\t\t\t\t TCA_FLOWER_KEY_IPV4_SRC_MASK,\r\n-\t\t\t\t\t mask.ipv4->hdr.src_addr);\r\n-\t\t\t}\r\n-\t\t\tif (mask.ipv4->hdr.dst_addr) {\r\n-\t\t\t\tmnl_attr_put_u32\r\n-\t\t\t\t\t(nlh, tunnel_outer ?\r\n-\t\t\t\t\t TCA_FLOWER_KEY_ENC_IPV4_DST :\r\n-\t\t\t\t\t TCA_FLOWER_KEY_IPV4_DST,\r\n-\t\t\t\t\t spec.ipv4->hdr.dst_addr);\r\n-\t\t\t\tmnl_attr_put_u32\r\n-\t\t\t\t\t(nlh, tunnel_outer ?\r\n-\t\t\t\t\t TCA_FLOWER_KEY_ENC_IPV4_DST_MASK :\r\n-\t\t\t\t\t TCA_FLOWER_KEY_IPV4_DST_MASK,\r\n-\t\t\t\t\t mask.ipv4->hdr.dst_addr);\r\n-\t\t\t}\r\n-\t\t\tif (mask.ipv4->hdr.time_to_live) {\r\n-\t\t\t\tmnl_attr_put_u8\r\n-\t\t\t\t\t(nlh, tunnel_outer ?\r\n-\t\t\t\t\t TCA_FLOWER_KEY_ENC_IP_TTL :\r\n-\t\t\t\t\t TCA_FLOWER_KEY_IP_TTL,\r\n-\t\t\t\t\t spec.ipv4->hdr.time_to_live);\r\n-\t\t\t\tmnl_attr_put_u8\r\n-\t\t\t\t\t(nlh, tunnel_outer ?\r\n-\t\t\t\t\t TCA_FLOWER_KEY_ENC_IP_TTL_MASK :\r\n-\t\t\t\t\t TCA_FLOWER_KEY_IP_TTL_MASK,\r\n-\t\t\t\t\t mask.ipv4->hdr.time_to_live);\r\n-\t\t\t}\r\n-\t\t\tif (mask.ipv4->hdr.type_of_service) {\r\n-\t\t\t\tmnl_attr_put_u8\r\n-\t\t\t\t\t(nlh, tunnel_outer ?\r\n-\t\t\t\t\t TCA_FLOWER_KEY_ENC_IP_TOS :\r\n-\t\t\t\t\t TCA_FLOWER_KEY_IP_TOS,\r\n-\t\t\t\t\t spec.ipv4->hdr.type_of_service);\r\n-\t\t\t\tmnl_attr_put_u8\r\n-\t\t\t\t\t(nlh, tunnel_outer ?\r\n-\t\t\t\t\t TCA_FLOWER_KEY_ENC_IP_TOS_MASK :\r\n-\t\t\t\t\t TCA_FLOWER_KEY_IP_TOS_MASK,\r\n-\t\t\t\t\t mask.ipv4->hdr.type_of_service);\r\n-\t\t\t}\r\n-\t\t\tassert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_IPV6: {\r\n-\t\t\tbool ipv6_src, ipv6_dst;\r\n-\t\t\tuint8_t msk6, tos6;\r\n-\r\n-\t\t\titem_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?\r\n-\t\t\t\t      MLX5_FLOW_LAYER_INNER_L3_IPV6 :\r\n-\t\t\t\t      MLX5_FLOW_LAYER_OUTER_L3_IPV6;\r\n-\t\t\tmask.ipv6 = flow_tcf_item_mask\r\n-\t\t\t\t(items, &rte_flow_item_ipv6_mask,\r\n-\t\t\t\t &flow_tcf_mask_supported.ipv6,\r\n-\t\t\t\t &flow_tcf_mask_empty.ipv6,\r\n-\t\t\t\t sizeof(flow_tcf_mask_supported.ipv6),\r\n-\t\t\t\t error);\r\n-\t\t\tassert(mask.ipv6);\r\n-\t\t\tif (item_flags & MLX5_FLOW_LAYER_TUNNEL) {\r\n-\t\t\t\tassert(inner_etype == RTE_BE16(ETH_P_ALL) ||\r\n-\t\t\t\t       inner_etype == RTE_BE16(ETH_P_IPV6));\r\n-\t\t\t\tinner_etype = RTE_BE16(ETH_P_IPV6);\r\n-\t\t\t} else if (outer_etype == RTE_BE16(ETH_P_8021Q)) {\r\n-\t\t\t\tassert(vlan_etype == RTE_BE16(ETH_P_ALL) ||\r\n-\t\t\t\t       vlan_etype == RTE_BE16(ETH_P_IPV6));\r\n-\t\t\t\tvlan_etype = RTE_BE16(ETH_P_IPV6);\r\n-\t\t\t} else {\r\n-\t\t\t\tassert(outer_etype == RTE_BE16(ETH_P_ALL) ||\r\n-\t\t\t\t       outer_etype == RTE_BE16(ETH_P_IPV6));\r\n-\t\t\t\touter_etype = RTE_BE16(ETH_P_IPV6);\r\n-\t\t\t}\r\n-\t\t\tspec.ipv6 = items->spec;\r\n-\t\t\tif (!tunnel_outer && mask.ipv6->hdr.proto) {\r\n-\t\t\t\t/*\r\n-\t\t\t\t * No way to set IP protocol for outer tunnel\r\n-\t\t\t\t * layers. Usually it is fixed, for example,\r\n-\t\t\t\t * to UDP for VXLAN/GPE.\r\n-\t\t\t\t */\r\n-\t\t\t\tassert(spec.ipv6); /* Mask is not empty. */\r\n-\t\t\t\tmnl_attr_put_u8(nlh, TCA_FLOWER_KEY_IP_PROTO,\r\n-\t\t\t\t\t\tspec.ipv6->hdr.proto);\r\n-\t\t\t\tip_proto_set = 1;\r\n-\t\t\t}\r\n-\t\t\tipv6_dst = !IN6_IS_ADDR_UNSPECIFIED\r\n-\t\t\t\t\t\t(mask.ipv6->hdr.dst_addr);\r\n-\t\t\tipv6_src = !IN6_IS_ADDR_UNSPECIFIED\r\n-\t\t\t\t\t\t(mask.ipv6->hdr.src_addr);\r\n-\t\t\tif (mask.ipv6 == &flow_tcf_mask_empty.ipv6 ||\r\n-\t\t\t     (!ipv6_dst && !ipv6_src)) {\r\n-\t\t\t\tif (!tunnel_outer)\r\n-\t\t\t\t\tbreak;\r\n-\t\t\t\t/*\r\n-\t\t\t\t * For tunnel outer we must set outer IP key\r\n-\t\t\t\t * anyway, even if the specification/mask is\r\n-\t\t\t\t * empty. There is no another way to tell\r\n-\t\t\t\t * kernel about he outer layer protocol.\r\n-\t\t\t\t */\r\n-\t\t\t\tmnl_attr_put(nlh,\r\n-\t\t\t\t\t     TCA_FLOWER_KEY_ENC_IPV6_SRC,\r\n-\t\t\t\t\t     IPV6_ADDR_LEN,\r\n-\t\t\t\t\t     mask.ipv6->hdr.src_addr);\r\n-\t\t\t\tmnl_attr_put(nlh,\r\n-\t\t\t\t\t     TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,\r\n-\t\t\t\t\t     IPV6_ADDR_LEN,\r\n-\t\t\t\t\t     mask.ipv6->hdr.src_addr);\r\n-\t\t\t\tassert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);\r\n-\t\t\t\tbreak;\r\n-\t\t\t}\r\n-\t\t\tif (ipv6_src) {\r\n-\t\t\t\tmnl_attr_put(nlh, tunnel_outer ?\r\n-\t\t\t\t\t     TCA_FLOWER_KEY_ENC_IPV6_SRC :\r\n-\t\t\t\t\t     TCA_FLOWER_KEY_IPV6_SRC,\r\n-\t\t\t\t\t     IPV6_ADDR_LEN,\r\n-\t\t\t\t\t     spec.ipv6->hdr.src_addr);\r\n-\t\t\t\tmnl_attr_put(nlh, tunnel_outer ?\r\n-\t\t\t\t\t     TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK :\r\n-\t\t\t\t\t     TCA_FLOWER_KEY_IPV6_SRC_MASK,\r\n-\t\t\t\t\t     IPV6_ADDR_LEN,\r\n-\t\t\t\t\t     mask.ipv6->hdr.src_addr);\r\n-\t\t\t}\r\n-\t\t\tif (ipv6_dst) {\r\n-\t\t\t\tmnl_attr_put(nlh, tunnel_outer ?\r\n-\t\t\t\t\t     TCA_FLOWER_KEY_ENC_IPV6_DST :\r\n-\t\t\t\t\t     TCA_FLOWER_KEY_IPV6_DST,\r\n-\t\t\t\t\t     IPV6_ADDR_LEN,\r\n-\t\t\t\t\t     spec.ipv6->hdr.dst_addr);\r\n-\t\t\t\tmnl_attr_put(nlh, tunnel_outer ?\r\n-\t\t\t\t\t     TCA_FLOWER_KEY_ENC_IPV6_DST_MASK :\r\n-\t\t\t\t\t     TCA_FLOWER_KEY_IPV6_DST_MASK,\r\n-\t\t\t\t\t     IPV6_ADDR_LEN,\r\n-\t\t\t\t\t     mask.ipv6->hdr.dst_addr);\r\n-\t\t\t}\r\n-\t\t\tif (mask.ipv6->hdr.hop_limits) {\r\n-\t\t\t\tmnl_attr_put_u8\r\n-\t\t\t\t\t(nlh, tunnel_outer ?\r\n-\t\t\t\t\t TCA_FLOWER_KEY_ENC_IP_TTL :\r\n-\t\t\t\t\t TCA_FLOWER_KEY_IP_TTL,\r\n-\t\t\t\t\t spec.ipv6->hdr.hop_limits);\r\n-\t\t\t\tmnl_attr_put_u8\r\n-\t\t\t\t\t(nlh, tunnel_outer ?\r\n-\t\t\t\t\t TCA_FLOWER_KEY_ENC_IP_TTL_MASK :\r\n-\t\t\t\t\t TCA_FLOWER_KEY_IP_TTL_MASK,\r\n-\t\t\t\t\t mask.ipv6->hdr.hop_limits);\r\n-\t\t\t}\r\n-\t\t\tmsk6 = (rte_be_to_cpu_32(mask.ipv6->hdr.vtc_flow) >>\r\n-\t\t\t\tRTE_IPV6_HDR_TC_SHIFT) & 0xff;\r\n-\t\t\tif (msk6) {\r\n-\t\t\t\ttos6 = (rte_be_to_cpu_32\r\n-\t\t\t\t\t(spec.ipv6->hdr.vtc_flow) >>\r\n-\t\t\t\t\t\tRTE_IPV6_HDR_TC_SHIFT) & 0xff;\r\n-\t\t\t\tmnl_attr_put_u8\r\n-\t\t\t\t\t(nlh, tunnel_outer ?\r\n-\t\t\t\t\t TCA_FLOWER_KEY_ENC_IP_TOS :\r\n-\t\t\t\t\t TCA_FLOWER_KEY_IP_TOS, tos6);\r\n-\t\t\t\tmnl_attr_put_u8\r\n-\t\t\t\t\t(nlh, tunnel_outer ?\r\n-\t\t\t\t\t TCA_FLOWER_KEY_ENC_IP_TOS_MASK :\r\n-\t\t\t\t\t TCA_FLOWER_KEY_IP_TOS_MASK, msk6);\r\n-\t\t\t}\r\n-\t\t\tassert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);\r\n-\t\t\tbreak;\r\n-\t\t}\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_UDP:\r\n-\t\t\titem_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?\r\n-\t\t\t\t      MLX5_FLOW_LAYER_INNER_L4_UDP :\r\n-\t\t\t\t      MLX5_FLOW_LAYER_OUTER_L4_UDP;\r\n-\t\t\tmask.udp = flow_tcf_item_mask\r\n-\t\t\t\t(items, &rte_flow_item_udp_mask,\r\n-\t\t\t\t &flow_tcf_mask_supported.udp,\r\n-\t\t\t\t &flow_tcf_mask_empty.udp,\r\n-\t\t\t\t sizeof(flow_tcf_mask_supported.udp),\r\n-\t\t\t\t error);\r\n-\t\t\tassert(mask.udp);\r\n-\t\t\tspec.udp = items->spec;\r\n-\t\t\tif (!tunnel_outer) {\r\n-\t\t\t\tif (!ip_proto_set)\r\n-\t\t\t\t\tmnl_attr_put_u8\r\n-\t\t\t\t\t\t(nlh, TCA_FLOWER_KEY_IP_PROTO,\r\n-\t\t\t\t\t\tIPPROTO_UDP);\r\n-\t\t\t\tif (mask.udp == &flow_tcf_mask_empty.udp)\r\n-\t\t\t\t\tbreak;\r\n-\t\t\t} else {\r\n-\t\t\t\tassert(mask.udp != &flow_tcf_mask_empty.udp);\r\n-\t\t\t\tdecap.vxlan->udp_port =\r\n-\t\t\t\t\trte_be_to_cpu_16\r\n-\t\t\t\t\t\t(spec.udp->hdr.dst_port);\r\n-\t\t\t}\r\n-\t\t\tif (mask.udp->hdr.src_port) {\r\n-\t\t\t\tmnl_attr_put_u16\r\n-\t\t\t\t\t(nlh, tunnel_outer ?\r\n-\t\t\t\t\t TCA_FLOWER_KEY_ENC_UDP_SRC_PORT :\r\n-\t\t\t\t\t TCA_FLOWER_KEY_UDP_SRC,\r\n-\t\t\t\t\t spec.udp->hdr.src_port);\r\n-\t\t\t\tmnl_attr_put_u16\r\n-\t\t\t\t\t(nlh, tunnel_outer ?\r\n-\t\t\t\t\t TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK :\r\n-\t\t\t\t\t TCA_FLOWER_KEY_UDP_SRC_MASK,\r\n-\t\t\t\t\t mask.udp->hdr.src_port);\r\n-\t\t\t}\r\n-\t\t\tif (mask.udp->hdr.dst_port) {\r\n-\t\t\t\tmnl_attr_put_u16\r\n-\t\t\t\t\t(nlh, tunnel_outer ?\r\n-\t\t\t\t\t TCA_FLOWER_KEY_ENC_UDP_DST_PORT :\r\n-\t\t\t\t\t TCA_FLOWER_KEY_UDP_DST,\r\n-\t\t\t\t\t spec.udp->hdr.dst_port);\r\n-\t\t\t\tmnl_attr_put_u16\r\n-\t\t\t\t\t(nlh, tunnel_outer ?\r\n-\t\t\t\t\t TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK :\r\n-\t\t\t\t\t TCA_FLOWER_KEY_UDP_DST_MASK,\r\n-\t\t\t\t\t mask.udp->hdr.dst_port);\r\n-\t\t\t}\r\n-\t\t\tassert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_TCP:\r\n-\t\t\titem_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?\r\n-\t\t\t\t      MLX5_FLOW_LAYER_INNER_L4_TCP :\r\n-\t\t\t\t      MLX5_FLOW_LAYER_OUTER_L4_TCP;\r\n-\t\t\tmask.tcp = flow_tcf_item_mask\r\n-\t\t\t\t(items, &rte_flow_item_tcp_mask,\r\n-\t\t\t\t &flow_tcf_mask_supported.tcp,\r\n-\t\t\t\t &flow_tcf_mask_empty.tcp,\r\n-\t\t\t\t sizeof(flow_tcf_mask_supported.tcp),\r\n-\t\t\t\t error);\r\n-\t\t\tassert(mask.tcp);\r\n-\t\t\tif (!ip_proto_set)\r\n-\t\t\t\tmnl_attr_put_u8(nlh, TCA_FLOWER_KEY_IP_PROTO,\r\n-\t\t\t\t\t\tIPPROTO_TCP);\r\n-\t\t\tif (mask.tcp == &flow_tcf_mask_empty.tcp)\r\n-\t\t\t\tbreak;\r\n-\t\t\tspec.tcp = items->spec;\r\n-\t\t\tif (mask.tcp->hdr.src_port) {\r\n-\t\t\t\tmnl_attr_put_u16(nlh, TCA_FLOWER_KEY_TCP_SRC,\r\n-\t\t\t\t\t\t spec.tcp->hdr.src_port);\r\n-\t\t\t\tmnl_attr_put_u16(nlh,\r\n-\t\t\t\t\t\t TCA_FLOWER_KEY_TCP_SRC_MASK,\r\n-\t\t\t\t\t\t mask.tcp->hdr.src_port);\r\n-\t\t\t}\r\n-\t\t\tif (mask.tcp->hdr.dst_port) {\r\n-\t\t\t\tmnl_attr_put_u16(nlh, TCA_FLOWER_KEY_TCP_DST,\r\n-\t\t\t\t\t\t spec.tcp->hdr.dst_port);\r\n-\t\t\t\tmnl_attr_put_u16(nlh,\r\n-\t\t\t\t\t\t TCA_FLOWER_KEY_TCP_DST_MASK,\r\n-\t\t\t\t\t\t mask.tcp->hdr.dst_port);\r\n-\t\t\t}\r\n-\t\t\tif (mask.tcp->hdr.tcp_flags) {\r\n-\t\t\t\tmnl_attr_put_u16\r\n-\t\t\t\t\t(nlh,\r\n-\t\t\t\t\t TCA_FLOWER_KEY_TCP_FLAGS,\r\n-\t\t\t\t\t rte_cpu_to_be_16\r\n-\t\t\t\t\t\t(spec.tcp->hdr.tcp_flags));\r\n-\t\t\t\tmnl_attr_put_u16\r\n-\t\t\t\t\t(nlh,\r\n-\t\t\t\t\t TCA_FLOWER_KEY_TCP_FLAGS_MASK,\r\n-\t\t\t\t\t rte_cpu_to_be_16\r\n-\t\t\t\t\t\t(mask.tcp->hdr.tcp_flags));\r\n-\t\t\t}\r\n-\t\t\tassert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ITEM_TYPE_VXLAN:\r\n-\t\t\tassert(decap.vxlan);\r\n-\t\t\ttunnel_outer = 0;\r\n-\t\t\titem_flags |= MLX5_FLOW_LAYER_VXLAN;\r\n-\t\t\tspec.vxlan = items->spec;\r\n-\t\t\tmnl_attr_put_u32(nlh,\r\n-\t\t\t\t\t TCA_FLOWER_KEY_ENC_KEY_ID,\r\n-\t\t\t\t\t vxlan_vni_as_be32(spec.vxlan->vni));\r\n-\t\t\tassert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);\r\n-\t\t\tbreak;\r\n-\t\tdefault:\r\n-\t\t\treturn rte_flow_error_set(error, ENOTSUP,\r\n-\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\r\n-\t\t\t\t\t\t  NULL, \"item not supported\");\r\n-\t\t}\r\n-\t}\r\n-\t/*\r\n-\t * Set the ether_type flower key and tc rule protocol:\r\n-\t * - if there is nor VLAN neither VXLAN the key is taken from\r\n-\t *   eth item directly or deduced from L3 items.\r\n-\t * - if there is vlan item then key is fixed to 802.1q.\r\n-\t * - if there is vxlan item then key is set to inner tunnel type.\r\n-\t * - simultaneous vlan and vxlan items are prohibited.\r\n-\t */\r\n-\tif (outer_etype != RTE_BE16(ETH_P_ALL)) {\r\n-\t\ttcm->tcm_info = TC_H_MAKE((attr->priority + 1) << 16,\r\n-\t\t\t\t\t   outer_etype);\r\n-\t\tif (item_flags & MLX5_FLOW_LAYER_TUNNEL) {\r\n-\t\t\tif (inner_etype != RTE_BE16(ETH_P_ALL))\r\n-\t\t\t\tmnl_attr_put_u16(nlh,\r\n-\t\t\t\t\t\t TCA_FLOWER_KEY_ETH_TYPE,\r\n-\t\t\t\t\t\t inner_etype);\r\n-\t\t} else {\r\n-\t\t\tmnl_attr_put_u16(nlh,\r\n-\t\t\t\t\t TCA_FLOWER_KEY_ETH_TYPE,\r\n-\t\t\t\t\t outer_etype);\r\n-\t\t\tif (outer_etype == RTE_BE16(ETH_P_8021Q) &&\r\n-\t\t\t    vlan_etype != RTE_BE16(ETH_P_ALL))\r\n-\t\t\t\tmnl_attr_put_u16(nlh,\r\n-\t\t\t\t\t\t TCA_FLOWER_KEY_VLAN_ETH_TYPE,\r\n-\t\t\t\t\t\t vlan_etype);\r\n-\t\t}\r\n-\t\tassert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);\r\n-\t}\r\n-\tna_flower_act = mnl_attr_nest_start(nlh, TCA_FLOWER_ACT);\r\n-\tna_act_index_cur = 1;\r\n-\tfor (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {\r\n-\t\tstruct nlattr *na_act_index;\r\n-\t\tstruct nlattr *na_act;\r\n-\t\tunsigned int vlan_act;\r\n-\t\tunsigned int i;\r\n-\r\n-\t\tswitch (actions->type) {\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_VOID:\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_PORT_ID:\r\n-\t\t\tconf.port_id = actions->conf;\r\n-\t\t\tif (conf.port_id->original)\r\n-\t\t\t\ti = 0;\r\n-\t\t\telse\r\n-\t\t\t\tfor (i = 0; ptoi[i].ifindex; ++i)\r\n-\t\t\t\t\tif (ptoi[i].port_id == conf.port_id->id)\r\n-\t\t\t\t\t\tbreak;\r\n-\t\t\tassert(ptoi[i].ifindex);\r\n-\t\t\tna_act_index =\r\n-\t\t\t\tmnl_attr_nest_start(nlh, na_act_index_cur++);\r\n-\t\t\tassert(na_act_index);\r\n-\t\t\tmnl_attr_put_strz(nlh, TCA_ACT_KIND, \"mirred\");\r\n-\t\t\tna_act = mnl_attr_nest_start(nlh, TCA_ACT_OPTIONS);\r\n-\t\t\tassert(na_act);\r\n-\t\t\tif (encap.hdr) {\r\n-\t\t\t\tassert(dev_flow->tcf.tunnel);\r\n-\t\t\t\tdev_flow->tcf.tunnel->ifindex_ptr =\r\n-\t\t\t\t\t&((struct tc_mirred *)\r\n-\t\t\t\t\tmnl_attr_get_payload\r\n-\t\t\t\t\t(mnl_nlmsg_get_payload_tail\r\n-\t\t\t\t\t\t(nlh)))->ifindex;\r\n-\t\t\t} else if (decap.hdr) {\r\n-\t\t\t\tassert(dev_flow->tcf.tunnel);\r\n-\t\t\t\tdev_flow->tcf.tunnel->ifindex_ptr =\r\n-\t\t\t\t\t(unsigned int *)&tcm->tcm_ifindex;\r\n-\t\t\t}\r\n-\t\t\tmnl_attr_put(nlh, TCA_MIRRED_PARMS,\r\n-\t\t\t\t     sizeof(struct tc_mirred),\r\n-\t\t\t\t     &(struct tc_mirred){\r\n-\t\t\t\t\t.action = TC_ACT_STOLEN,\r\n-\t\t\t\t\t.eaction = TCA_EGRESS_REDIR,\r\n-\t\t\t\t\t.ifindex = ptoi[i].ifindex,\r\n-\t\t\t\t     });\r\n-\t\t\tmnl_attr_nest_end(nlh, na_act);\r\n-\t\t\tmnl_attr_nest_end(nlh, na_act_index);\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_JUMP:\r\n-\t\t\tconf.jump = actions->conf;\r\n-\t\t\tna_act_index =\r\n-\t\t\t\tmnl_attr_nest_start(nlh, na_act_index_cur++);\r\n-\t\t\tassert(na_act_index);\r\n-\t\t\tmnl_attr_put_strz(nlh, TCA_ACT_KIND, \"gact\");\r\n-\t\t\tna_act = mnl_attr_nest_start(nlh, TCA_ACT_OPTIONS);\r\n-\t\t\tassert(na_act);\r\n-\t\t\tmnl_attr_put(nlh, TCA_GACT_PARMS,\r\n-\t\t\t\t     sizeof(struct tc_gact),\r\n-\t\t\t\t     &(struct tc_gact){\r\n-\t\t\t\t\t.action = TC_ACT_GOTO_CHAIN |\r\n-\t\t\t\t\t\t  conf.jump->group,\r\n-\t\t\t\t     });\r\n-\t\t\tmnl_attr_nest_end(nlh, na_act);\r\n-\t\t\tmnl_attr_nest_end(nlh, na_act_index);\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_DROP:\r\n-\t\t\tna_act_index =\r\n-\t\t\t\tmnl_attr_nest_start(nlh, na_act_index_cur++);\r\n-\t\t\tassert(na_act_index);\r\n-\t\t\tmnl_attr_put_strz(nlh, TCA_ACT_KIND, \"gact\");\r\n-\t\t\tna_act = mnl_attr_nest_start(nlh, TCA_ACT_OPTIONS);\r\n-\t\t\tassert(na_act);\r\n-\t\t\tmnl_attr_put(nlh, TCA_GACT_PARMS,\r\n-\t\t\t\t     sizeof(struct tc_gact),\r\n-\t\t\t\t     &(struct tc_gact){\r\n-\t\t\t\t\t.action = TC_ACT_SHOT,\r\n-\t\t\t\t     });\r\n-\t\t\tmnl_attr_nest_end(nlh, na_act);\r\n-\t\t\tmnl_attr_nest_end(nlh, na_act_index);\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_COUNT:\r\n-\t\t\t/*\r\n-\t\t\t * Driver adds the count action implicitly for\r\n-\t\t\t * each rule it creates.\r\n-\t\t\t */\r\n-\t\t\tret = flow_tcf_translate_action_count(dev,\r\n-\t\t\t\t\t\t\t      dev_flow, error);\r\n-\t\t\tif (ret < 0)\r\n-\t\t\t\treturn ret;\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:\r\n-\t\t\tconf.of_push_vlan = NULL;\r\n-\t\t\tvlan_act = TCA_VLAN_ACT_POP;\r\n-\t\t\tgoto action_of_vlan;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:\r\n-\t\t\tconf.of_push_vlan = actions->conf;\r\n-\t\t\tvlan_act = TCA_VLAN_ACT_PUSH;\r\n-\t\t\tgoto action_of_vlan;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:\r\n-\t\t\tconf.of_set_vlan_vid = actions->conf;\r\n-\t\t\tif (na_vlan_id)\r\n-\t\t\t\tgoto override_na_vlan_id;\r\n-\t\t\tvlan_act = TCA_VLAN_ACT_MODIFY;\r\n-\t\t\tgoto action_of_vlan;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:\r\n-\t\t\tconf.of_set_vlan_pcp = actions->conf;\r\n-\t\t\tif (na_vlan_priority)\r\n-\t\t\t\tgoto override_na_vlan_priority;\r\n-\t\t\tvlan_act = TCA_VLAN_ACT_MODIFY;\r\n-\t\t\tgoto action_of_vlan;\r\n-action_of_vlan:\r\n-\t\t\tna_act_index =\r\n-\t\t\t\tmnl_attr_nest_start(nlh, na_act_index_cur++);\r\n-\t\t\tassert(na_act_index);\r\n-\t\t\tmnl_attr_put_strz(nlh, TCA_ACT_KIND, \"vlan\");\r\n-\t\t\tna_act = mnl_attr_nest_start(nlh, TCA_ACT_OPTIONS);\r\n-\t\t\tassert(na_act);\r\n-\t\t\tmnl_attr_put(nlh, TCA_VLAN_PARMS,\r\n-\t\t\t\t     sizeof(struct tc_vlan),\r\n-\t\t\t\t     &(struct tc_vlan){\r\n-\t\t\t\t\t.action = TC_ACT_PIPE,\r\n-\t\t\t\t\t.v_action = vlan_act,\r\n-\t\t\t\t     });\r\n-\t\t\tif (vlan_act == TCA_VLAN_ACT_POP) {\r\n-\t\t\t\tmnl_attr_nest_end(nlh, na_act);\r\n-\t\t\t\tmnl_attr_nest_end(nlh, na_act_index);\r\n-\t\t\t\tbreak;\r\n-\t\t\t}\r\n-\t\t\tif (vlan_act == TCA_VLAN_ACT_PUSH)\r\n-\t\t\t\tmnl_attr_put_u16(nlh,\r\n-\t\t\t\t\t\t TCA_VLAN_PUSH_VLAN_PROTOCOL,\r\n-\t\t\t\t\t\t conf.of_push_vlan->ethertype);\r\n-\t\t\tna_vlan_id = mnl_nlmsg_get_payload_tail(nlh);\r\n-\t\t\tmnl_attr_put_u16(nlh, TCA_VLAN_PAD, 0);\r\n-\t\t\tna_vlan_priority = mnl_nlmsg_get_payload_tail(nlh);\r\n-\t\t\tmnl_attr_put_u8(nlh, TCA_VLAN_PAD, 0);\r\n-\t\t\tmnl_attr_nest_end(nlh, na_act);\r\n-\t\t\tmnl_attr_nest_end(nlh, na_act_index);\r\n-\t\t\tif (actions->type ==\r\n-\t\t\t    RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {\r\n-override_na_vlan_id:\r\n-\t\t\t\tna_vlan_id->nla_type = TCA_VLAN_PUSH_VLAN_ID;\r\n-\t\t\t\t*(uint16_t *)mnl_attr_get_payload(na_vlan_id) =\r\n-\t\t\t\t\trte_be_to_cpu_16\r\n-\t\t\t\t\t(conf.of_set_vlan_vid->vlan_vid);\r\n-\t\t\t} else if (actions->type ==\r\n-\t\t\t\t   RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {\r\n-override_na_vlan_priority:\r\n-\t\t\t\tna_vlan_priority->nla_type =\r\n-\t\t\t\t\tTCA_VLAN_PUSH_VLAN_PRIORITY;\r\n-\t\t\t\t*(uint8_t *)mnl_attr_get_payload\r\n-\t\t\t\t\t(na_vlan_priority) =\r\n-\t\t\t\t\tconf.of_set_vlan_pcp->vlan_pcp;\r\n-\t\t\t}\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:\r\n-\t\t\tassert(decap.vxlan);\r\n-\t\t\tassert(dev_flow->tcf.tunnel);\r\n-\t\t\tdev_flow->tcf.tunnel->ifindex_ptr =\r\n-\t\t\t\t(unsigned int *)&tcm->tcm_ifindex;\r\n-\t\t\tna_act_index =\r\n-\t\t\t\tmnl_attr_nest_start(nlh, na_act_index_cur++);\r\n-\t\t\tassert(na_act_index);\r\n-\t\t\tmnl_attr_put_strz(nlh, TCA_ACT_KIND, \"tunnel_key\");\r\n-\t\t\tna_act = mnl_attr_nest_start(nlh, TCA_ACT_OPTIONS);\r\n-\t\t\tassert(na_act);\r\n-\t\t\tmnl_attr_put(nlh, TCA_TUNNEL_KEY_PARMS,\r\n-\t\t\t\tsizeof(struct tc_tunnel_key),\r\n-\t\t\t\t&(struct tc_tunnel_key){\r\n-\t\t\t\t\t.action = TC_ACT_PIPE,\r\n-\t\t\t\t\t.t_action = TCA_TUNNEL_KEY_ACT_RELEASE,\r\n-\t\t\t\t\t});\r\n-\t\t\tmnl_attr_nest_end(nlh, na_act);\r\n-\t\t\tmnl_attr_nest_end(nlh, na_act_index);\r\n-\t\t\tassert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:\r\n-\t\t\tassert(encap.vxlan);\r\n-\t\t\tflow_tcf_vxlan_encap_parse(actions, encap.vxlan);\r\n-\t\t\tna_act_index =\r\n-\t\t\t\tmnl_attr_nest_start(nlh, na_act_index_cur++);\r\n-\t\t\tassert(na_act_index);\r\n-\t\t\tmnl_attr_put_strz(nlh, TCA_ACT_KIND, \"tunnel_key\");\r\n-\t\t\tna_act = mnl_attr_nest_start(nlh, TCA_ACT_OPTIONS);\r\n-\t\t\tassert(na_act);\r\n-\t\t\tmnl_attr_put(nlh, TCA_TUNNEL_KEY_PARMS,\r\n-\t\t\t\tsizeof(struct tc_tunnel_key),\r\n-\t\t\t\t&(struct tc_tunnel_key){\r\n-\t\t\t\t\t.action = TC_ACT_PIPE,\r\n-\t\t\t\t\t.t_action = TCA_TUNNEL_KEY_ACT_SET,\r\n-\t\t\t\t\t});\r\n-\t\t\tif (encap.vxlan->mask & FLOW_TCF_ENCAP_UDP_DST)\r\n-\t\t\t\tmnl_attr_put_u16(nlh,\r\n-\t\t\t\t\t TCA_TUNNEL_KEY_ENC_DST_PORT,\r\n-\t\t\t\t\t encap.vxlan->udp.dst);\r\n-\t\t\tif (encap.vxlan->mask & FLOW_TCF_ENCAP_IPV4_SRC)\r\n-\t\t\t\tmnl_attr_put_u32(nlh,\r\n-\t\t\t\t\t TCA_TUNNEL_KEY_ENC_IPV4_SRC,\r\n-\t\t\t\t\t encap.vxlan->ipv4.src);\r\n-\t\t\tif (encap.vxlan->mask & FLOW_TCF_ENCAP_IPV4_DST)\r\n-\t\t\t\tmnl_attr_put_u32(nlh,\r\n-\t\t\t\t\t TCA_TUNNEL_KEY_ENC_IPV4_DST,\r\n-\t\t\t\t\t encap.vxlan->ipv4.dst);\r\n-\t\t\tif (encap.vxlan->mask & FLOW_TCF_ENCAP_IPV6_SRC)\r\n-\t\t\t\tmnl_attr_put(nlh,\r\n-\t\t\t\t\t TCA_TUNNEL_KEY_ENC_IPV6_SRC,\r\n-\t\t\t\t\t sizeof(encap.vxlan->ipv6.src),\r\n-\t\t\t\t\t &encap.vxlan->ipv6.src);\r\n-\t\t\tif (encap.vxlan->mask & FLOW_TCF_ENCAP_IPV6_DST)\r\n-\t\t\t\tmnl_attr_put(nlh,\r\n-\t\t\t\t\t TCA_TUNNEL_KEY_ENC_IPV6_DST,\r\n-\t\t\t\t\t sizeof(encap.vxlan->ipv6.dst),\r\n-\t\t\t\t\t &encap.vxlan->ipv6.dst);\r\n-\t\t\tif (encap.vxlan->mask & FLOW_TCF_ENCAP_IP_TTL)\r\n-\t\t\t\tmnl_attr_put_u8(nlh,\r\n-\t\t\t\t\t TCA_TUNNEL_KEY_ENC_TTL,\r\n-\t\t\t\t\t encap.vxlan->ip_ttl_hop);\r\n-\t\t\tif (encap.vxlan->mask & FLOW_TCF_ENCAP_IP_TOS)\r\n-\t\t\t\tmnl_attr_put_u8(nlh,\r\n-\t\t\t\t\t TCA_TUNNEL_KEY_ENC_TOS,\r\n-\t\t\t\t\t encap.vxlan->ip_tos);\r\n-\t\t\tif (encap.vxlan->mask & FLOW_TCF_ENCAP_VXLAN_VNI)\r\n-\t\t\t\tmnl_attr_put_u32(nlh,\r\n-\t\t\t\t\t TCA_TUNNEL_KEY_ENC_KEY_ID,\r\n-\t\t\t\t\t vxlan_vni_as_be32\r\n-\t\t\t\t\t\t(encap.vxlan->vxlan.vni));\r\n-\t\t\tmnl_attr_put_u8(nlh, TCA_TUNNEL_KEY_NO_CSUM, 0);\r\n-\t\t\tmnl_attr_nest_end(nlh, na_act);\r\n-\t\t\tmnl_attr_nest_end(nlh, na_act_index);\r\n-\t\t\tassert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_TP_SRC:\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_TP_DST:\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_TTL:\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_DEC_TTL:\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_SET_MAC_DST:\r\n-\t\t\tna_act_index =\r\n-\t\t\t\tmnl_attr_nest_start(nlh, na_act_index_cur++);\r\n-\t\t\tflow_tcf_create_pedit_mnl_msg(nlh,\r\n-\t\t\t\t\t\t      &actions, item_flags);\r\n-\t\t\tmnl_attr_nest_end(nlh, na_act_index);\r\n-\t\t\tbreak;\r\n-\t\tdefault:\r\n-\t\t\treturn rte_flow_error_set(error, ENOTSUP,\r\n-\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\r\n-\t\t\t\t\t\t  actions,\r\n-\t\t\t\t\t\t  \"action not supported\");\r\n-\t\t}\r\n-\t}\r\n-\tassert(na_flower);\r\n-\tassert(na_flower_act);\r\n-\tmnl_attr_nest_end(nlh, na_flower_act);\r\n-\tdev_flow->tcf.ptc_flags = mnl_attr_get_payload\r\n-\t\t\t\t\t(mnl_nlmsg_get_payload_tail(nlh));\r\n-\tmnl_attr_put_u32(nlh, TCA_FLOWER_FLAGS,\tdecap.vxlan ?\r\n-\t\t\t\t\t\t0 : TCA_CLS_FLAGS_SKIP_SW);\r\n-\tmnl_attr_nest_end(nlh, na_flower);\r\n-\tif (dev_flow->tcf.tunnel && dev_flow->tcf.tunnel->ifindex_ptr)\r\n-\t\tdev_flow->tcf.tunnel->ifindex_org =\r\n-\t\t\t*dev_flow->tcf.tunnel->ifindex_ptr;\r\n-\tassert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);\r\n-\treturn 0;\r\n-}\r\n-\r\n-/**\r\n- * Send Netlink message with acknowledgment.\r\n- *\r\n- * @param tcf\r\n- *   Flow context to use.\r\n- * @param nlh\r\n- *   Message to send. This function always raises the NLM_F_ACK flag before\r\n- *   sending.\r\n- * @param[in] cb\r\n- *   Callback handler for received message.\r\n- * @param[in] arg\r\n- *   Context pointer for callback handler.\r\n- *\r\n- * @return\r\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\r\n- */\r\n-static int\r\n-flow_tcf_nl_ack(struct mlx5_flow_tcf_context *tcf,\r\n-\t\tstruct nlmsghdr *nlh,\r\n-\t\tmnl_cb_t cb, void *arg)\r\n-{\r\n-\tunsigned int portid = mnl_socket_get_portid(tcf->nl);\r\n-\tuint32_t seq = tcf->seq++;\r\n-\tint ret, err = 0;\r\n-\r\n-\tassert(tcf->nl);\r\n-\tassert(tcf->buf);\r\n-\tif (!seq) {\r\n-\t\t/* seq 0 is reserved for kernel event-driven notifications. */\r\n-\t\tseq = tcf->seq++;\r\n-\t}\r\n-\tnlh->nlmsg_seq = seq;\r\n-\tnlh->nlmsg_flags |= NLM_F_ACK;\r\n-\tret = mnl_socket_sendto(tcf->nl, nlh, nlh->nlmsg_len);\r\n-\tif (ret <= 0) {\r\n-\t\t/* Message send error occurred. */\r\n-\t\trte_errno = errno;\r\n-\t\treturn -rte_errno;\r\n-\t}\r\n-\tnlh = (struct nlmsghdr *)(tcf->buf);\r\n-\t/*\r\n-\t * The following loop postpones non-fatal errors until multipart\r\n-\t * messages are complete.\r\n-\t */\r\n-\twhile (true) {\r\n-\t\tret = mnl_socket_recvfrom(tcf->nl, tcf->buf, tcf->buf_size);\r\n-\t\tif (ret < 0) {\r\n-\t\t\terr = errno;\r\n-\t\t\t/*\r\n-\t\t\t * In case of overflow Will receive till\r\n-\t\t\t * end of multipart message. We may lost part\r\n-\t\t\t * of reply messages but mark and return an error.\r\n-\t\t\t */\r\n-\t\t\tif (err != ENOSPC ||\r\n-\t\t\t    !(nlh->nlmsg_flags & NLM_F_MULTI) ||\r\n-\t\t\t    nlh->nlmsg_type == NLMSG_DONE)\r\n-\t\t\t\tbreak;\r\n-\t\t} else {\r\n-\t\t\tret = mnl_cb_run(nlh, ret, seq, portid, cb, arg);\r\n-\t\t\tif (!ret) {\r\n-\t\t\t\t/*\r\n-\t\t\t\t * libmnl returns 0 if DONE or\r\n-\t\t\t\t * success ACK message found.\r\n-\t\t\t\t */\r\n-\t\t\t\tbreak;\r\n-\t\t\t}\r\n-\t\t\tif (ret < 0) {\r\n-\t\t\t\t/*\r\n-\t\t\t\t * ACK message with error found\r\n-\t\t\t\t * or some error occurred.\r\n-\t\t\t\t */\r\n-\t\t\t\terr = errno;\r\n-\t\t\t\tbreak;\r\n-\t\t\t}\r\n-\t\t\t/* We should continue receiving. */\r\n-\t\t}\r\n-\t}\r\n-\tif (!err)\r\n-\t\treturn 0;\r\n-\trte_errno = err;\r\n-\treturn -err;\r\n-}\r\n-\r\n-#define MNL_BUF_EXTRA_SPACE 16\r\n-#define MNL_REQUEST_SIZE_MIN 256\r\n-#define MNL_REQUEST_SIZE_MAX 2048\r\n-#define MNL_REQUEST_SIZE RTE_MIN(RTE_MAX(sysconf(_SC_PAGESIZE), \\\r\n-\t\t\t\t MNL_REQUEST_SIZE_MIN), MNL_REQUEST_SIZE_MAX)\r\n-\r\n-/* Data structures used by flow_tcf_xxx_cb() routines. */\r\n-struct tcf_nlcb_buf {\r\n-\tLIST_ENTRY(tcf_nlcb_buf) next;\r\n-\tuint32_t size;\r\n-\talignas(struct nlmsghdr)\r\n-\tuint8_t msg[]; /**< Netlink message data. */\r\n-};\r\n-\r\n-struct tcf_nlcb_context {\r\n-\tunsigned int ifindex; /**< Base interface index. */\r\n-\tuint32_t bufsize;\r\n-\tLIST_HEAD(, tcf_nlcb_buf) nlbuf;\r\n-};\r\n-\r\n-/**\r\n- * Allocate space for netlink command in buffer list\r\n- *\r\n- * @param[in, out] ctx\r\n- *   Pointer to callback context with command buffers list.\r\n- * @param[in] size\r\n- *   Required size of data buffer to be allocated.\r\n- *\r\n- * @return\r\n- *   Pointer to allocated memory, aligned as message header.\r\n- *   NULL if some error occurred.\r\n- */\r\n-static struct nlmsghdr *\r\n-flow_tcf_alloc_nlcmd(struct tcf_nlcb_context *ctx, uint32_t size)\r\n-{\r\n-\tstruct tcf_nlcb_buf *buf;\r\n-\tstruct nlmsghdr *nlh;\r\n-\r\n-\tsize = NLMSG_ALIGN(size);\r\n-\tbuf = LIST_FIRST(&ctx->nlbuf);\r\n-\tif (buf && (buf->size + size) <= ctx->bufsize) {\r\n-\t\tnlh = (struct nlmsghdr *)&buf->msg[buf->size];\r\n-\t\tbuf->size += size;\r\n-\t\treturn nlh;\r\n-\t}\r\n-\tif (size > ctx->bufsize) {\r\n-\t\tDRV_LOG(WARNING, \"netlink: too long command buffer requested\");\r\n-\t\treturn NULL;\r\n-\t}\r\n-\tbuf = rte_malloc(__func__,\r\n-\t\t\tctx->bufsize + sizeof(struct tcf_nlcb_buf),\r\n-\t\t\talignof(struct tcf_nlcb_buf));\r\n-\tif (!buf) {\r\n-\t\tDRV_LOG(WARNING, \"netlink: no memory for command buffer\");\r\n-\t\treturn NULL;\r\n-\t}\r\n-\tLIST_INSERT_HEAD(&ctx->nlbuf, buf, next);\r\n-\tbuf->size = size;\r\n-\tnlh = (struct nlmsghdr *)&buf->msg[0];\r\n-\treturn nlh;\r\n-}\r\n-\r\n-/**\r\n- * Send the buffers with prepared netlink commands. Scans the list and\r\n- * sends all found buffers. Buffers are sent and freed anyway in order\r\n- * to prevent memory leakage if some every message in received packet.\r\n- *\r\n- * @param[in] tcf\r\n- *   Context object initialized by mlx5_flow_tcf_context_create().\r\n- * @param[in, out] ctx\r\n- *   Pointer to callback context with command buffers list.\r\n- *\r\n- * @return\r\n- *   Zero value on success, negative errno value otherwise\r\n- *   and rte_errno is set.\r\n- */\r\n-static int\r\n-flow_tcf_send_nlcmd(struct mlx5_flow_tcf_context *tcf,\r\n-\t\t    struct tcf_nlcb_context *ctx)\r\n-{\r\n-\tstruct tcf_nlcb_buf *bc = LIST_FIRST(&ctx->nlbuf);\r\n-\tint ret = 0;\r\n-\r\n-\twhile (bc) {\r\n-\t\tstruct tcf_nlcb_buf *bn = LIST_NEXT(bc, next);\r\n-\t\tstruct nlmsghdr *nlh;\r\n-\t\tuint32_t msg = 0;\r\n-\t\tint rc;\r\n-\r\n-\t\twhile (msg < bc->size) {\r\n-\t\t\t/*\r\n-\t\t\t * Send Netlink commands from buffer in one by one\r\n-\t\t\t * fashion. If we send multiple rule deletion commands\r\n-\t\t\t * in one Netlink message and some error occurs it may\r\n-\t\t\t * cause multiple ACK error messages and break sequence\r\n-\t\t\t * numbers of Netlink communication, because we expect\r\n-\t\t\t * the only one ACK reply.\r\n-\t\t\t */\r\n-\t\t\tassert((bc->size - msg) >= sizeof(struct nlmsghdr));\r\n-\t\t\tnlh = (struct nlmsghdr *)&bc->msg[msg];\r\n-\t\t\tassert((bc->size - msg) >= nlh->nlmsg_len);\r\n-\t\t\tmsg += nlh->nlmsg_len;\r\n-\t\t\trc = flow_tcf_nl_ack(tcf, nlh, NULL, NULL);\r\n-\t\t\tif (rc) {\r\n-\t\t\t\tDRV_LOG(WARNING,\r\n-\t\t\t\t\t\"netlink: cleanup error %d\", rc);\r\n-\t\t\t\tif (!ret)\r\n-\t\t\t\t\tret = rc;\r\n-\t\t\t}\r\n-\t\t}\r\n-\t\trte_free(bc);\r\n-\t\tbc = bn;\r\n-\t}\r\n-\tLIST_INIT(&ctx->nlbuf);\r\n-\treturn ret;\r\n-}\r\n-\r\n-/**\r\n- * Collect local IP address rules with scope link attribute  on specified\r\n- * network device. This is callback routine called by libmnl mnl_cb_run()\r\n- * in loop for every message in received packet.\r\n- *\r\n- * @param[in] nlh\r\n- *   Pointer to reply header.\r\n- * @param[in, out] arg\r\n- *   Opaque data pointer for this callback.\r\n- *\r\n- * @return\r\n- *   A positive, nonzero value on success, negative errno value otherwise\r\n- *   and rte_errno is set.\r\n- */\r\n-static int\r\n-flow_tcf_collect_local_cb(const struct nlmsghdr *nlh, void *arg)\r\n-{\r\n-\tstruct tcf_nlcb_context *ctx = arg;\r\n-\tstruct nlmsghdr *cmd;\r\n-\tstruct ifaddrmsg *ifa;\r\n-\tstruct nlattr *na;\r\n-\tstruct nlattr *na_local = NULL;\r\n-\tstruct nlattr *na_peer = NULL;\r\n-\tunsigned char family;\r\n-\tuint32_t size;\r\n-\r\n-\tif (nlh->nlmsg_type != RTM_NEWADDR) {\r\n-\t\trte_errno = EINVAL;\r\n-\t\treturn -rte_errno;\r\n-\t}\r\n-\tifa = mnl_nlmsg_get_payload(nlh);\r\n-\tfamily = ifa->ifa_family;\r\n-\tif (ifa->ifa_index != ctx->ifindex ||\r\n-\t    ifa->ifa_scope != RT_SCOPE_LINK ||\r\n-\t    !(ifa->ifa_flags & IFA_F_PERMANENT) ||\r\n-\t    (family != AF_INET && family != AF_INET6))\r\n-\t\treturn 1;\r\n-\tmnl_attr_for_each(na, nlh, sizeof(*ifa)) {\r\n-\t\tswitch (mnl_attr_get_type(na)) {\r\n-\t\tcase IFA_LOCAL:\r\n-\t\t\tna_local = na;\r\n-\t\t\tbreak;\r\n-\t\tcase IFA_ADDRESS:\r\n-\t\t\tna_peer = na;\r\n-\t\t\tbreak;\r\n-\t\t}\r\n-\t\tif (na_local && na_peer)\r\n-\t\t\tbreak;\r\n-\t}\r\n-\tif (!na_local || !na_peer)\r\n-\t\treturn 1;\r\n-\t/* Local rule found with scope link, permanent and assigned peer. */\r\n-\tsize = MNL_ALIGN(sizeof(struct nlmsghdr)) +\r\n-\t       MNL_ALIGN(sizeof(struct ifaddrmsg)) +\r\n-\t       (family == AF_INET6 ? 2 * SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN)\r\n-\t\t\t\t   : 2 * SZ_NLATTR_TYPE_OF(uint32_t));\r\n-\tcmd = flow_tcf_alloc_nlcmd(ctx, size);\r\n-\tif (!cmd) {\r\n-\t\trte_errno = ENOMEM;\r\n-\t\treturn -rte_errno;\r\n-\t}\r\n-\tcmd = mnl_nlmsg_put_header(cmd);\r\n-\tcmd->nlmsg_type = RTM_DELADDR;\r\n-\tcmd->nlmsg_flags = NLM_F_REQUEST;\r\n-\tifa = mnl_nlmsg_put_extra_header(cmd, sizeof(*ifa));\r\n-\tifa->ifa_flags = IFA_F_PERMANENT;\r\n-\tifa->ifa_scope = RT_SCOPE_LINK;\r\n-\tifa->ifa_index = ctx->ifindex;\r\n-\tif (family == AF_INET) {\r\n-\t\tifa->ifa_family = AF_INET;\r\n-\t\tifa->ifa_prefixlen = 32;\r\n-\t\tmnl_attr_put_u32(cmd, IFA_LOCAL, mnl_attr_get_u32(na_local));\r\n-\t\tmnl_attr_put_u32(cmd, IFA_ADDRESS, mnl_attr_get_u32(na_peer));\r\n-\t} else {\r\n-\t\tifa->ifa_family = AF_INET6;\r\n-\t\tifa->ifa_prefixlen = 128;\r\n-\t\tmnl_attr_put(cmd, IFA_LOCAL, IPV6_ADDR_LEN,\r\n-\t\t\tmnl_attr_get_payload(na_local));\r\n-\t\tmnl_attr_put(cmd, IFA_ADDRESS, IPV6_ADDR_LEN,\r\n-\t\t\tmnl_attr_get_payload(na_peer));\r\n-\t}\r\n-\tassert(size == cmd->nlmsg_len);\r\n-\treturn 1;\r\n-}\r\n-\r\n-/**\r\n- * Cleanup the local IP addresses on outer interface.\r\n- *\r\n- * @param[in] tcf\r\n- *   Context object initialized by mlx5_flow_tcf_context_create().\r\n- * @param[in] ifindex\r\n- *   Network interface index to perform cleanup.\r\n- */\r\n-static void\r\n-flow_tcf_encap_local_cleanup(struct mlx5_flow_tcf_context *tcf,\r\n-\t\t\t    unsigned int ifindex)\r\n-{\r\n-\tstruct nlmsghdr *nlh;\r\n-\tstruct ifaddrmsg *ifa;\r\n-\tstruct tcf_nlcb_context ctx = {\r\n-\t\t.ifindex = ifindex,\r\n-\t\t.bufsize = MNL_REQUEST_SIZE,\r\n-\t\t.nlbuf = LIST_HEAD_INITIALIZER(),\r\n-\t};\r\n-\tint ret;\r\n-\r\n-\tassert(ifindex);\r\n-\t/*\r\n-\t * Seek and destroy leftovers of local IP addresses with\r\n-\t * matching properties \"scope link\".\r\n-\t */\r\n-\tnlh = mnl_nlmsg_put_header(tcf->buf);\r\n-\tnlh->nlmsg_type = RTM_GETADDR;\r\n-\tnlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;\r\n-\tifa = mnl_nlmsg_put_extra_header(nlh, sizeof(*ifa));\r\n-\tifa->ifa_family = AF_UNSPEC;\r\n-\tifa->ifa_index = ifindex;\r\n-\tifa->ifa_scope = RT_SCOPE_LINK;\r\n-\tret = flow_tcf_nl_ack(tcf, nlh, flow_tcf_collect_local_cb, &ctx);\r\n-\tif (ret)\r\n-\t\tDRV_LOG(WARNING, \"netlink: query device list error %d\", ret);\r\n-\tret = flow_tcf_send_nlcmd(tcf, &ctx);\r\n-\tif (ret)\r\n-\t\tDRV_LOG(WARNING, \"netlink: device delete error %d\", ret);\r\n-}\r\n-\r\n-/**\r\n- * Collect neigh permanent rules on specified network device.\r\n- * This is callback routine called by libmnl mnl_cb_run() in loop for\r\n- * every message in received packet.\r\n- *\r\n- * @param[in] nlh\r\n- *   Pointer to reply header.\r\n- * @param[in, out] arg\r\n- *   Opaque data pointer for this callback.\r\n- *\r\n- * @return\r\n- *   A positive, nonzero value on success, negative errno value otherwise\r\n- *   and rte_errno is set.\r\n- */\r\n-static int\r\n-flow_tcf_collect_neigh_cb(const struct nlmsghdr *nlh, void *arg)\r\n-{\r\n-\tstruct tcf_nlcb_context *ctx = arg;\r\n-\tstruct nlmsghdr *cmd;\r\n-\tstruct ndmsg *ndm;\r\n-\tstruct nlattr *na;\r\n-\tstruct nlattr *na_ip = NULL;\r\n-\tstruct nlattr *na_mac = NULL;\r\n-\tunsigned char family;\r\n-\tuint32_t size;\r\n-\r\n-\tif (nlh->nlmsg_type != RTM_NEWNEIGH) {\r\n-\t\trte_errno = EINVAL;\r\n-\t\treturn -rte_errno;\r\n-\t}\r\n-\tndm = mnl_nlmsg_get_payload(nlh);\r\n-\tfamily = ndm->ndm_family;\r\n-\tif (ndm->ndm_ifindex != (int)ctx->ifindex ||\r\n-\t   !(ndm->ndm_state & NUD_PERMANENT) ||\r\n-\t   (family != AF_INET && family != AF_INET6))\r\n-\t\treturn 1;\r\n-\tmnl_attr_for_each(na, nlh, sizeof(*ndm)) {\r\n-\t\tswitch (mnl_attr_get_type(na)) {\r\n-\t\tcase NDA_DST:\r\n-\t\t\tna_ip = na;\r\n-\t\t\tbreak;\r\n-\t\tcase NDA_LLADDR:\r\n-\t\t\tna_mac = na;\r\n-\t\t\tbreak;\r\n-\t\t}\r\n-\t\tif (na_mac && na_ip)\r\n-\t\t\tbreak;\r\n-\t}\r\n-\tif (!na_mac || !na_ip)\r\n-\t\treturn 1;\r\n-\t/* Neigh rule with permanent attribute found. */\r\n-\tsize = MNL_ALIGN(sizeof(struct nlmsghdr)) +\r\n-\t       MNL_ALIGN(sizeof(struct ndmsg)) +\r\n-\t       SZ_NLATTR_DATA_OF(RTE_ETHER_ADDR_LEN) +\r\n-\t       (family == AF_INET6 ? SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN)\r\n-\t\t\t\t   : SZ_NLATTR_TYPE_OF(uint32_t));\r\n-\tcmd = flow_tcf_alloc_nlcmd(ctx, size);\r\n-\tif (!cmd) {\r\n-\t\trte_errno = ENOMEM;\r\n-\t\treturn -rte_errno;\r\n-\t}\r\n-\tcmd = mnl_nlmsg_put_header(cmd);\r\n-\tcmd->nlmsg_type = RTM_DELNEIGH;\r\n-\tcmd->nlmsg_flags = NLM_F_REQUEST;\r\n-\tndm = mnl_nlmsg_put_extra_header(cmd, sizeof(*ndm));\r\n-\tndm->ndm_ifindex = ctx->ifindex;\r\n-\tndm->ndm_state = NUD_PERMANENT;\r\n-\tndm->ndm_flags = 0;\r\n-\tndm->ndm_type = 0;\r\n-\tif (family == AF_INET) {\r\n-\t\tndm->ndm_family = AF_INET;\r\n-\t\tmnl_attr_put_u32(cmd, NDA_DST, mnl_attr_get_u32(na_ip));\r\n-\t} else {\r\n-\t\tndm->ndm_family = AF_INET6;\r\n-\t\tmnl_attr_put(cmd, NDA_DST, IPV6_ADDR_LEN,\r\n-\t\t\t     mnl_attr_get_payload(na_ip));\r\n-\t}\r\n-\tmnl_attr_put(cmd, NDA_LLADDR, RTE_ETHER_ADDR_LEN,\r\n-\t\t     mnl_attr_get_payload(na_mac));\r\n-\tassert(size == cmd->nlmsg_len);\r\n-\treturn 1;\r\n-}\r\n-\r\n-/**\r\n- * Cleanup the neigh rules on outer interface.\r\n- *\r\n- * @param[in] tcf\r\n- *   Context object initialized by mlx5_flow_tcf_context_create().\r\n- * @param[in] ifindex\r\n- *   Network interface index to perform cleanup.\r\n- */\r\n-static void\r\n-flow_tcf_encap_neigh_cleanup(struct mlx5_flow_tcf_context *tcf,\r\n-\t\t\t    unsigned int ifindex)\r\n-{\r\n-\tstruct nlmsghdr *nlh;\r\n-\tstruct ndmsg *ndm;\r\n-\tstruct tcf_nlcb_context ctx = {\r\n-\t\t.ifindex = ifindex,\r\n-\t\t.bufsize = MNL_REQUEST_SIZE,\r\n-\t\t.nlbuf = LIST_HEAD_INITIALIZER(),\r\n-\t};\r\n-\tint ret;\r\n-\r\n-\tassert(ifindex);\r\n-\t/* Seek and destroy leftovers of neigh rules. */\r\n-\tnlh = mnl_nlmsg_put_header(tcf->buf);\r\n-\tnlh->nlmsg_type = RTM_GETNEIGH;\r\n-\tnlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;\r\n-\tndm = mnl_nlmsg_put_extra_header(nlh, sizeof(*ndm));\r\n-\tndm->ndm_family = AF_UNSPEC;\r\n-\tndm->ndm_ifindex = ifindex;\r\n-\tndm->ndm_state = NUD_PERMANENT;\r\n-\tret = flow_tcf_nl_ack(tcf, nlh, flow_tcf_collect_neigh_cb, &ctx);\r\n-\tif (ret)\r\n-\t\tDRV_LOG(WARNING, \"netlink: query device list error %d\", ret);\r\n-\tret = flow_tcf_send_nlcmd(tcf, &ctx);\r\n-\tif (ret)\r\n-\t\tDRV_LOG(WARNING, \"netlink: device delete error %d\", ret);\r\n-}\r\n-\r\n-/**\r\n- * Collect indices of VXLAN encap/decap interfaces associated with device.\r\n- * This is callback routine called by libmnl mnl_cb_run() in loop for\r\n- * every message in received packet.\r\n- *\r\n- * @param[in] nlh\r\n- *   Pointer to reply header.\r\n- * @param[in, out] arg\r\n- *   Opaque data pointer for this callback.\r\n- *\r\n- * @return\r\n- *   A positive, nonzero value on success, negative errno value otherwise\r\n- *   and rte_errno is set.\r\n- */\r\n-static int\r\n-flow_tcf_collect_vxlan_cb(const struct nlmsghdr *nlh, void *arg)\r\n-{\r\n-\tstruct tcf_nlcb_context *ctx = arg;\r\n-\tstruct nlmsghdr *cmd;\r\n-\tstruct ifinfomsg *ifm;\r\n-\tstruct nlattr *na;\r\n-\tstruct nlattr *na_info = NULL;\r\n-\tstruct nlattr *na_vxlan = NULL;\r\n-\tbool found = false;\r\n-\tunsigned int vxindex;\r\n-\tuint32_t size;\r\n-\r\n-\tif (nlh->nlmsg_type != RTM_NEWLINK) {\r\n-\t\trte_errno = EINVAL;\r\n-\t\treturn -rte_errno;\r\n-\t}\r\n-\tifm = mnl_nlmsg_get_payload(nlh);\r\n-\tif (!ifm->ifi_index) {\r\n-\t\trte_errno = EINVAL;\r\n-\t\treturn -rte_errno;\r\n-\t}\r\n-\tmnl_attr_for_each(na, nlh, sizeof(*ifm))\r\n-\t\tif (mnl_attr_get_type(na) == IFLA_LINKINFO) {\r\n-\t\t\tna_info = na;\r\n-\t\t\tbreak;\r\n-\t\t}\r\n-\tif (!na_info)\r\n-\t\treturn 1;\r\n-\tmnl_attr_for_each_nested(na, na_info) {\r\n-\t\tswitch (mnl_attr_get_type(na)) {\r\n-\t\tcase IFLA_INFO_KIND:\r\n-\t\t\tif (!strncmp(\"vxlan\", mnl_attr_get_str(na),\r\n-\t\t\t\t     mnl_attr_get_len(na)))\r\n-\t\t\t\tfound = true;\r\n-\t\t\tbreak;\r\n-\t\tcase IFLA_INFO_DATA:\r\n-\t\t\tna_vxlan = na;\r\n-\t\t\tbreak;\r\n-\t\t}\r\n-\t\tif (found && na_vxlan)\r\n-\t\t\tbreak;\r\n-\t}\r\n-\tif (!found || !na_vxlan)\r\n-\t\treturn 1;\r\n-\tfound = false;\r\n-\tmnl_attr_for_each_nested(na, na_vxlan) {\r\n-\t\tif (mnl_attr_get_type(na) == IFLA_VXLAN_LINK &&\r\n-\t\t    mnl_attr_get_u32(na) == ctx->ifindex) {\r\n-\t\t\tfound = true;\r\n-\t\t\tbreak;\r\n-\t\t}\r\n-\t}\r\n-\tif (!found)\r\n-\t\treturn 1;\r\n-\t/* Attached VXLAN device found, store the command to delete. */\r\n-\tvxindex = ifm->ifi_index;\r\n-\tsize = MNL_ALIGN(sizeof(struct nlmsghdr)) +\r\n-\t       MNL_ALIGN(sizeof(struct ifinfomsg));\r\n-\tcmd = flow_tcf_alloc_nlcmd(ctx, size);\r\n-\tif (!cmd) {\r\n-\t\trte_errno = ENOMEM;\r\n-\t\treturn -rte_errno;\r\n-\t}\r\n-\tcmd = mnl_nlmsg_put_header(cmd);\r\n-\tcmd->nlmsg_type = RTM_DELLINK;\r\n-\tcmd->nlmsg_flags = NLM_F_REQUEST;\r\n-\tifm = mnl_nlmsg_put_extra_header(cmd, sizeof(*ifm));\r\n-\tifm->ifi_family = AF_UNSPEC;\r\n-\tifm->ifi_index = vxindex;\r\n-\tassert(size == cmd->nlmsg_len);\r\n-\treturn 1;\r\n-}\r\n-\r\n-/**\r\n- * Cleanup the outer interface. Removes all found vxlan devices\r\n- * attached to specified index, flushes the neigh and local IP\r\n- * database.\r\n- *\r\n- * @param[in] tcf\r\n- *   Context object initialized by mlx5_flow_tcf_context_create().\r\n- * @param[in] ifindex\r\n- *   Network inferface index to perform cleanup.\r\n- */\r\n-static void\r\n-flow_tcf_encap_iface_cleanup(struct mlx5_flow_tcf_context *tcf,\r\n-\t\t\t    unsigned int ifindex)\r\n-{\r\n-\tstruct nlmsghdr *nlh;\r\n-\tstruct ifinfomsg *ifm;\r\n-\tstruct tcf_nlcb_context ctx = {\r\n-\t\t.ifindex = ifindex,\r\n-\t\t.bufsize = MNL_REQUEST_SIZE,\r\n-\t\t.nlbuf = LIST_HEAD_INITIALIZER(),\r\n-\t};\r\n-\tint ret;\r\n-\r\n-\tassert(ifindex);\r\n-\t/*\r\n-\t * Seek and destroy leftover VXLAN encap/decap interfaces with\r\n-\t * matching properties.\r\n-\t */\r\n-\tnlh = mnl_nlmsg_put_header(tcf->buf);\r\n-\tnlh->nlmsg_type = RTM_GETLINK;\r\n-\tnlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;\r\n-\tifm = mnl_nlmsg_put_extra_header(nlh, sizeof(*ifm));\r\n-\tifm->ifi_family = AF_UNSPEC;\r\n-\tret = flow_tcf_nl_ack(tcf, nlh, flow_tcf_collect_vxlan_cb, &ctx);\r\n-\tif (ret)\r\n-\t\tDRV_LOG(WARNING, \"netlink: query device list error %d\", ret);\r\n-\tret = flow_tcf_send_nlcmd(tcf, &ctx);\r\n-\tif (ret)\r\n-\t\tDRV_LOG(WARNING, \"netlink: device delete error %d\", ret);\r\n-}\r\n-\r\n-/**\r\n- * Emit Netlink message to add/remove local address to the outer device.\r\n- * The address being added is visible within the link only (scope link).\r\n- *\r\n- * Note that an implicit route is maintained by the kernel due to the\r\n- * presence of a peer address (IFA_ADDRESS).\r\n- *\r\n- * These rules are used for encapsulation only and allow to assign\r\n- * the outer tunnel source IP address.\r\n- *\r\n- * @param[in] tcf\r\n- *   Libmnl socket context object.\r\n- * @param[in] encap\r\n- *   Encapsulation properties (source address and its peer).\r\n- * @param[in] ifindex\r\n- *   Network interface to apply rule.\r\n- * @param[in] enable\r\n- *   Toggle between add and remove.\r\n- * @param[out] error\r\n- *   Perform verbose error reporting if not NULL.\r\n- *\r\n- * @return\r\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\r\n- */\r\n-static int\r\n-flow_tcf_rule_local(struct mlx5_flow_tcf_context *tcf,\r\n-\t\t    const struct flow_tcf_vxlan_encap *encap,\r\n-\t\t    unsigned int ifindex,\r\n-\t\t    bool enable,\r\n-\t\t    struct rte_flow_error *error)\r\n-{\r\n-\tstruct nlmsghdr *nlh;\r\n-\tstruct ifaddrmsg *ifa;\r\n-\talignas(struct nlmsghdr)\r\n-\tuint8_t buf[mnl_nlmsg_size(sizeof(*ifa) + 128)];\r\n-\r\n-\tnlh = mnl_nlmsg_put_header(buf);\r\n-\tnlh->nlmsg_type = enable ? RTM_NEWADDR : RTM_DELADDR;\r\n-\tnlh->nlmsg_flags =\r\n-\t\tNLM_F_REQUEST | (enable ? NLM_F_CREATE | NLM_F_REPLACE : 0);\r\n-\tnlh->nlmsg_seq = 0;\r\n-\tifa = mnl_nlmsg_put_extra_header(nlh, sizeof(*ifa));\r\n-\tifa->ifa_flags = IFA_F_PERMANENT;\r\n-\tifa->ifa_scope = RT_SCOPE_LINK;\r\n-\tifa->ifa_index = ifindex;\r\n-\tif (encap->mask & FLOW_TCF_ENCAP_IPV4_SRC) {\r\n-\t\tifa->ifa_family = AF_INET;\r\n-\t\tifa->ifa_prefixlen = 32;\r\n-\t\tmnl_attr_put_u32(nlh, IFA_LOCAL, encap->ipv4.src);\r\n-\t\tif (encap->mask & FLOW_TCF_ENCAP_IPV4_DST)\r\n-\t\t\tmnl_attr_put_u32(nlh, IFA_ADDRESS,\r\n-\t\t\t\t\t      encap->ipv4.dst);\r\n-\t} else {\r\n-\t\tassert(encap->mask & FLOW_TCF_ENCAP_IPV6_SRC);\r\n-\t\tifa->ifa_family = AF_INET6;\r\n-\t\tifa->ifa_prefixlen = 128;\r\n-\t\tmnl_attr_put(nlh, IFA_LOCAL,\r\n-\t\t\t\t  sizeof(encap->ipv6.src),\r\n-\t\t\t\t  &encap->ipv6.src);\r\n-\t\tif (encap->mask & FLOW_TCF_ENCAP_IPV6_DST)\r\n-\t\t\tmnl_attr_put(nlh, IFA_ADDRESS,\r\n-\t\t\t\t\t  sizeof(encap->ipv6.dst),\r\n-\t\t\t\t\t  &encap->ipv6.dst);\r\n-\t}\r\n-\tif (!flow_tcf_nl_ack(tcf, nlh, NULL, NULL))\r\n-\t\treturn 0;\r\n-\treturn rte_flow_error_set(error, rte_errno,\r\n-\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\r\n-\t\t\t\t  \"netlink: cannot complete IFA request\"\r\n-\t\t\t\t  \" (ip addr add)\");\r\n-}\r\n-\r\n-/**\r\n- * Emit Netlink message to add/remove neighbor.\r\n- *\r\n- * @param[in] tcf\r\n- *   Libmnl socket context object.\r\n- * @param[in] encap\r\n- *   Encapsulation properties (destination address).\r\n- * @param[in] ifindex\r\n- *   Network interface.\r\n- * @param[in] enable\r\n- *   Toggle between add and remove.\r\n- * @param[out] error\r\n- *   Perform verbose error reporting if not NULL.\r\n- *\r\n- * @return\r\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\r\n- */\r\n-static int\r\n-flow_tcf_rule_neigh(struct mlx5_flow_tcf_context *tcf,\r\n-\t\t     const struct flow_tcf_vxlan_encap *encap,\r\n-\t\t     unsigned int ifindex,\r\n-\t\t     bool enable,\r\n-\t\t     struct rte_flow_error *error)\r\n-{\r\n-\tstruct nlmsghdr *nlh;\r\n-\tstruct ndmsg *ndm;\r\n-\talignas(struct nlmsghdr)\r\n-\tuint8_t buf[mnl_nlmsg_size(sizeof(*ndm) + 128)];\r\n-\r\n-\tnlh = mnl_nlmsg_put_header(buf);\r\n-\tnlh->nlmsg_type = enable ? RTM_NEWNEIGH : RTM_DELNEIGH;\r\n-\tnlh->nlmsg_flags =\r\n-\t\tNLM_F_REQUEST | (enable ? NLM_F_CREATE | NLM_F_REPLACE : 0);\r\n-\tnlh->nlmsg_seq = 0;\r\n-\tndm = mnl_nlmsg_put_extra_header(nlh, sizeof(*ndm));\r\n-\tndm->ndm_ifindex = ifindex;\r\n-\tndm->ndm_state = NUD_PERMANENT;\r\n-\tndm->ndm_flags = 0;\r\n-\tndm->ndm_type = 0;\r\n-\tif (encap->mask & FLOW_TCF_ENCAP_IPV4_DST) {\r\n-\t\tndm->ndm_family = AF_INET;\r\n-\t\tmnl_attr_put_u32(nlh, NDA_DST, encap->ipv4.dst);\r\n-\t} else {\r\n-\t\tassert(encap->mask & FLOW_TCF_ENCAP_IPV6_DST);\r\n-\t\tndm->ndm_family = AF_INET6;\r\n-\t\tmnl_attr_put(nlh, NDA_DST, sizeof(encap->ipv6.dst),\r\n-\t\t\t\t\t\t &encap->ipv6.dst);\r\n-\t}\r\n-\tif (encap->mask & FLOW_TCF_ENCAP_ETH_SRC && enable)\r\n-\t\tDRV_LOG(WARNING,\r\n-\t\t\t\"outer ethernet source address cannot be \"\r\n-\t\t\t\"forced for VXLAN encapsulation\");\r\n-\tif (encap->mask & FLOW_TCF_ENCAP_ETH_DST)\r\n-\t\tmnl_attr_put(nlh, NDA_LLADDR, sizeof(encap->eth.dst),\r\n-\t\t\t\t\t\t    &encap->eth.dst);\r\n-\tif (!flow_tcf_nl_ack(tcf, nlh, NULL, NULL))\r\n-\t\treturn 0;\r\n-\treturn rte_flow_error_set(error, rte_errno,\r\n-\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\r\n-\t\t\t\t  \"netlink: cannot complete ND request\"\r\n-\t\t\t\t  \" (ip neigh)\");\r\n-}\r\n-\r\n-/**\r\n- * Manage the local IP addresses and their peers IP addresses on the\r\n- * outer interface for encapsulation purposes. The kernel searches the\r\n- * appropriate device for tunnel egress traffic using the outer source\r\n- * IP, this IP should be assigned to the outer network device, otherwise\r\n- * kernel rejects the rule.\r\n- *\r\n- * Adds or removes the addresses using the Netlink command like this:\r\n- *   ip addr add <src_ip> peer <dst_ip> scope link dev <ifouter>\r\n- *\r\n- * The addresses are local to the netdev (\"scope link\"), this reduces\r\n- * the risk of conflicts. Note that an implicit route is maintained by\r\n- * the kernel due to the presence of a peer address (IFA_ADDRESS).\r\n- *\r\n- * @param[in] tcf\r\n- *   Libmnl socket context object.\r\n- * @param[in] iface\r\n- *   Object, contains rule database and ifouter index.\r\n- * @param[in] dev_flow\r\n- *   Flow object, contains the tunnel parameters (for encap only).\r\n- * @param[in] enable\r\n- *   Toggle between add and remove.\r\n- * @param[out] error\r\n- *   Perform verbose error reporting if not NULL.\r\n- *\r\n- * @return\r\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\r\n- */\r\n-static int\r\n-flow_tcf_encap_local(struct mlx5_flow_tcf_context *tcf,\r\n-\t\t     struct tcf_irule *iface,\r\n-\t\t     struct mlx5_flow *dev_flow,\r\n-\t\t     bool enable,\r\n-\t\t     struct rte_flow_error *error)\r\n-{\r\n-\tconst struct flow_tcf_vxlan_encap *encap = dev_flow->tcf.vxlan_encap;\r\n-\tstruct tcf_local_rule *rule = NULL;\r\n-\tint ret;\r\n-\r\n-\tassert(encap);\r\n-\tassert(encap->hdr.type == FLOW_TCF_TUNACT_VXLAN_ENCAP);\r\n-\tif (encap->mask & FLOW_TCF_ENCAP_IPV4_SRC) {\r\n-\t\tassert(encap->mask & FLOW_TCF_ENCAP_IPV4_DST);\r\n-\t\tLIST_FOREACH(rule, &iface->local, next) {\r\n-\t\t\tif (rule->mask & FLOW_TCF_ENCAP_IPV4_SRC &&\r\n-\t\t\t    encap->ipv4.src == rule->ipv4.src &&\r\n-\t\t\t    encap->ipv4.dst == rule->ipv4.dst) {\r\n-\t\t\t\tbreak;\r\n-\t\t\t}\r\n-\t\t}\r\n-\t} else {\r\n-\t\tassert(encap->mask & FLOW_TCF_ENCAP_IPV6_SRC);\r\n-\t\tassert(encap->mask & FLOW_TCF_ENCAP_IPV6_DST);\r\n-\t\tLIST_FOREACH(rule, &iface->local, next) {\r\n-\t\t\tif (rule->mask & FLOW_TCF_ENCAP_IPV6_SRC &&\r\n-\t\t\t    !memcmp(&encap->ipv6.src, &rule->ipv6.src,\r\n-\t\t\t\t\t    sizeof(encap->ipv6.src)) &&\r\n-\t\t\t    !memcmp(&encap->ipv6.dst, &rule->ipv6.dst,\r\n-\t\t\t\t\t    sizeof(encap->ipv6.dst))) {\r\n-\t\t\t\tbreak;\r\n-\t\t\t}\r\n-\t\t}\r\n-\t}\r\n-\tif (rule) {\r\n-\t\tif (enable) {\r\n-\t\t\trule->refcnt++;\r\n-\t\t\treturn 0;\r\n-\t\t}\r\n-\t\tif (!rule->refcnt || !--rule->refcnt) {\r\n-\t\t\tLIST_REMOVE(rule, next);\r\n-\t\t\treturn flow_tcf_rule_local(tcf, encap,\r\n-\t\t\t\t\tiface->ifouter, false, error);\r\n-\t\t}\r\n-\t\treturn 0;\r\n-\t}\r\n-\tif (!enable) {\r\n-\t\tDRV_LOG(WARNING, \"disabling not existing local rule\");\r\n-\t\trte_flow_error_set(error, ENOENT,\r\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\r\n-\t\t\t\t   \"disabling not existing local rule\");\r\n-\t\treturn -ENOENT;\r\n-\t}\r\n-\trule = rte_zmalloc(__func__, sizeof(struct tcf_local_rule),\r\n-\t\t\t\talignof(struct tcf_local_rule));\r\n-\tif (!rule) {\r\n-\t\trte_flow_error_set(error, ENOMEM,\r\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\r\n-\t\t\t\t   \"unable to allocate memory for local rule\");\r\n-\t\treturn -rte_errno;\r\n-\t}\r\n-\t*rule = (struct tcf_local_rule){.refcnt = 0,\r\n-\t\t\t\t\t.mask = 0,\r\n-\t\t\t\t\t};\r\n-\tif (encap->mask & FLOW_TCF_ENCAP_IPV4_SRC) {\r\n-\t\trule->mask = FLOW_TCF_ENCAP_IPV4_SRC\r\n-\t\t\t   | FLOW_TCF_ENCAP_IPV4_DST;\r\n-\t\trule->ipv4.src = encap->ipv4.src;\r\n-\t\trule->ipv4.dst = encap->ipv4.dst;\r\n-\t} else {\r\n-\t\trule->mask = FLOW_TCF_ENCAP_IPV6_SRC\r\n-\t\t\t   | FLOW_TCF_ENCAP_IPV6_DST;\r\n-\t\tmemcpy(&rule->ipv6.src, &encap->ipv6.src, IPV6_ADDR_LEN);\r\n-\t\tmemcpy(&rule->ipv6.dst, &encap->ipv6.dst, IPV6_ADDR_LEN);\r\n-\t}\r\n-\tret = flow_tcf_rule_local(tcf, encap, iface->ifouter, true, error);\r\n-\tif (ret) {\r\n-\t\trte_free(rule);\r\n-\t\treturn ret;\r\n-\t}\r\n-\trule->refcnt++;\r\n-\tLIST_INSERT_HEAD(&iface->local, rule, next);\r\n-\treturn 0;\r\n-}\r\n-\r\n-/**\r\n- * Manage the destination MAC/IP addresses neigh database, kernel uses\r\n- * this one to determine the destination MAC address within encapsulation\r\n- * header. Adds or removes the entries using the Netlink command like this:\r\n- *   ip neigh add dev <ifouter> lladdr <dst_mac> to <dst_ip> nud permanent\r\n- *\r\n- * @param[in] tcf\r\n- *   Libmnl socket context object.\r\n- * @param[in] iface\r\n- *   Object, contains rule database and ifouter index.\r\n- * @param[in] dev_flow\r\n- *   Flow object, contains the tunnel parameters (for encap only).\r\n- * @param[in] enable\r\n- *   Toggle between add and remove.\r\n- * @param[out] error\r\n- *   Perform verbose error reporting if not NULL.\r\n- *\r\n- * @return\r\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\r\n- */\r\n-static int\r\n-flow_tcf_encap_neigh(struct mlx5_flow_tcf_context *tcf,\r\n-\t\t     struct tcf_irule *iface,\r\n-\t\t     struct mlx5_flow *dev_flow,\r\n-\t\t     bool enable,\r\n-\t\t     struct rte_flow_error *error)\r\n-{\r\n-\tconst struct flow_tcf_vxlan_encap *encap = dev_flow->tcf.vxlan_encap;\r\n-\tstruct tcf_neigh_rule *rule = NULL;\r\n-\tint ret;\r\n-\r\n-\tassert(encap);\r\n-\tassert(encap->hdr.type == FLOW_TCF_TUNACT_VXLAN_ENCAP);\r\n-\tif (encap->mask & FLOW_TCF_ENCAP_IPV4_DST) {\r\n-\t\tassert(encap->mask & FLOW_TCF_ENCAP_IPV4_SRC);\r\n-\t\tLIST_FOREACH(rule, &iface->neigh, next) {\r\n-\t\t\tif (rule->mask & FLOW_TCF_ENCAP_IPV4_DST &&\r\n-\t\t\t    encap->ipv4.dst == rule->ipv4.dst) {\r\n-\t\t\t\tbreak;\r\n-\t\t\t}\r\n-\t\t}\r\n-\t} else {\r\n-\t\tassert(encap->mask & FLOW_TCF_ENCAP_IPV6_SRC);\r\n-\t\tassert(encap->mask & FLOW_TCF_ENCAP_IPV6_DST);\r\n-\t\tLIST_FOREACH(rule, &iface->neigh, next) {\r\n-\t\t\tif (rule->mask & FLOW_TCF_ENCAP_IPV6_DST &&\r\n-\t\t\t    !memcmp(&encap->ipv6.dst, &rule->ipv6.dst,\r\n-\t\t\t\t\t\tsizeof(encap->ipv6.dst))) {\r\n-\t\t\t\tbreak;\r\n-\t\t\t}\r\n-\t\t}\r\n-\t}\r\n-\tif (rule) {\r\n-\t\tif (memcmp(&encap->eth.dst, &rule->eth,\r\n-\t\t\t   sizeof(encap->eth.dst))) {\r\n-\t\t\tDRV_LOG(WARNING, \"Destination MAC differs\"\r\n-\t\t\t\t\t \" in neigh rule\");\r\n-\t\t\trte_flow_error_set(error, EEXIST,\r\n-\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\r\n-\t\t\t\t\t   NULL, \"Different MAC address\"\r\n-\t\t\t\t\t   \" neigh rule for the same\"\r\n-\t\t\t\t\t   \" destination IP\");\r\n-\t\t\t\t\treturn -EEXIST;\r\n-\t\t}\r\n-\t\tif (enable) {\r\n-\t\t\trule->refcnt++;\r\n-\t\t\treturn 0;\r\n-\t\t}\r\n-\t\tif (!rule->refcnt || !--rule->refcnt) {\r\n-\t\t\tLIST_REMOVE(rule, next);\r\n-\t\t\treturn flow_tcf_rule_neigh(tcf, encap,\r\n-\t\t\t\t\t\t   iface->ifouter,\r\n-\t\t\t\t\t\t   false, error);\r\n-\t\t}\r\n-\t\treturn 0;\r\n-\t}\r\n-\tif (!enable) {\r\n-\t\tDRV_LOG(WARNING, \"Disabling not existing neigh rule\");\r\n-\t\trte_flow_error_set(error, ENOENT,\r\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\r\n-\t\t\t\t   \"unable to allocate memory for neigh rule\");\r\n-\t\treturn -ENOENT;\r\n-\t}\r\n-\trule = rte_zmalloc(__func__, sizeof(struct tcf_neigh_rule),\r\n-\t\t\t\talignof(struct tcf_neigh_rule));\r\n-\tif (!rule) {\r\n-\t\trte_flow_error_set(error, ENOMEM,\r\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\r\n-\t\t\t\t   \"unable to allocate memory for neigh rule\");\r\n-\t\treturn -rte_errno;\r\n-\t}\r\n-\t*rule = (struct tcf_neigh_rule){.refcnt = 0,\r\n-\t\t\t\t\t.mask = 0,\r\n-\t\t\t\t\t};\r\n-\tif (encap->mask & FLOW_TCF_ENCAP_IPV4_DST) {\r\n-\t\trule->mask = FLOW_TCF_ENCAP_IPV4_DST;\r\n-\t\trule->ipv4.dst = encap->ipv4.dst;\r\n-\t} else {\r\n-\t\trule->mask = FLOW_TCF_ENCAP_IPV6_DST;\r\n-\t\tmemcpy(&rule->ipv6.dst, &encap->ipv6.dst, IPV6_ADDR_LEN);\r\n-\t}\r\n-\tmemcpy(&rule->eth, &encap->eth.dst, sizeof(rule->eth));\r\n-\tret = flow_tcf_rule_neigh(tcf, encap, iface->ifouter, true, error);\r\n-\tif (ret) {\r\n-\t\trte_free(rule);\r\n-\t\treturn ret;\r\n-\t}\r\n-\trule->refcnt++;\r\n-\tLIST_INSERT_HEAD(&iface->neigh, rule, next);\r\n-\treturn 0;\r\n-}\r\n-\r\n-/* VXLAN encap rule database for outer interfaces. */\r\n-static  LIST_HEAD(, tcf_irule) iface_list_vxlan = LIST_HEAD_INITIALIZER();\r\n-\r\n-/* VTEP device list is shared between PMD port instances. */\r\n-static LIST_HEAD(, tcf_vtep) vtep_list_vxlan = LIST_HEAD_INITIALIZER();\r\n-static pthread_mutex_t vtep_list_mutex = PTHREAD_MUTEX_INITIALIZER;\r\n-\r\n-/**\r\n- * Acquire the VXLAN encap rules container for specified interface.\r\n- * First looks for the container in the existing ones list, creates\r\n- * and initializes the new container if existing not found.\r\n- *\r\n- * @param[in] tcf\r\n- *   Context object initialized by mlx5_flow_tcf_context_create().\r\n- * @param[in] ifouter\r\n- *   Network interface index to create VXLAN encap rules on.\r\n- * @param[out] error\r\n- *   Perform verbose error reporting if not NULL.\r\n- * @return\r\n- *   Rule container pointer on success,\r\n- *   NULL otherwise and rte_errno is set.\r\n- */\r\n-static struct tcf_irule*\r\n-flow_tcf_encap_irule_acquire(struct mlx5_flow_tcf_context *tcf,\r\n-\t\t\t     unsigned int ifouter,\r\n-\t\t\t     struct rte_flow_error *error)\r\n-{\r\n-\tstruct tcf_irule *iface;\r\n-\r\n-\t/* Look whether the container for encap rules is created. */\r\n-\tassert(ifouter);\r\n-\tLIST_FOREACH(iface, &iface_list_vxlan, next) {\r\n-\t\tif (iface->ifouter == ifouter)\r\n-\t\t\tbreak;\r\n-\t}\r\n-\tif (iface) {\r\n-\t\t/* Container already exists, just increment the reference. */\r\n-\t\tiface->refcnt++;\r\n-\t\treturn iface;\r\n-\t}\r\n-\t/* Not found, we should create the new container. */\r\n-\tiface = rte_zmalloc(__func__, sizeof(*iface),\r\n-\t\t\t    alignof(struct tcf_irule));\r\n-\tif (!iface) {\r\n-\t\trte_flow_error_set(error, ENOMEM,\r\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\r\n-\t\t\t\t   \"unable to allocate memory for container\");\r\n-\t\treturn NULL;\r\n-\t}\r\n-\t*iface = (struct tcf_irule){\r\n-\t\t\t.local = LIST_HEAD_INITIALIZER(),\r\n-\t\t\t.neigh = LIST_HEAD_INITIALIZER(),\r\n-\t\t\t.ifouter = ifouter,\r\n-\t\t\t.refcnt = 1,\r\n-\t};\r\n-\t/* Interface cleanup for new container created. */\r\n-\tflow_tcf_encap_iface_cleanup(tcf, ifouter);\r\n-\tflow_tcf_encap_local_cleanup(tcf, ifouter);\r\n-\tflow_tcf_encap_neigh_cleanup(tcf, ifouter);\r\n-\tLIST_INSERT_HEAD(&iface_list_vxlan, iface, next);\r\n-\treturn iface;\r\n-}\r\n-\r\n-/**\r\n- * Releases VXLAN encap rules container by pointer. Decrements the\r\n- * reference counter and deletes the container if counter is zero.\r\n- *\r\n- * @param[in] irule\r\n- *   VXLAN rule container pointer to release.\r\n- */\r\n-static void\r\n-flow_tcf_encap_irule_release(struct tcf_irule *iface)\r\n-{\r\n-\tassert(iface->refcnt);\r\n-\tif (--iface->refcnt == 0) {\r\n-\t\t/* Reference counter is zero, delete the container. */\r\n-\t\tassert(LIST_EMPTY(&iface->local));\r\n-\t\tassert(LIST_EMPTY(&iface->neigh));\r\n-\t\tLIST_REMOVE(iface, next);\r\n-\t\trte_free(iface);\r\n-\t}\r\n-}\r\n-\r\n-/**\r\n- * Deletes VTEP network device.\r\n- *\r\n- * @param[in] tcf\r\n- *   Context object initialized by mlx5_flow_tcf_context_create().\r\n- * @param[in] vtep\r\n- *   Object representing the network device to delete. Memory\r\n- *   allocated for this object is freed by routine.\r\n- */\r\n-static void\r\n-flow_tcf_vtep_delete(struct mlx5_flow_tcf_context *tcf,\r\n-\t\t     struct tcf_vtep *vtep)\r\n-{\r\n-\tstruct nlmsghdr *nlh;\r\n-\tstruct ifinfomsg *ifm;\r\n-\talignas(struct nlmsghdr)\r\n-\tuint8_t buf[mnl_nlmsg_size(MNL_ALIGN(sizeof(*ifm))) +\r\n-\t\t    MNL_BUF_EXTRA_SPACE];\r\n-\tint ret;\r\n-\r\n-\tassert(!vtep->refcnt);\r\n-\t/* Delete only ifaces those we actually created. */\r\n-\tif (vtep->created && vtep->ifindex) {\r\n-\t\tDRV_LOG(INFO, \"VTEP delete (%d)\", vtep->ifindex);\r\n-\t\tnlh = mnl_nlmsg_put_header(buf);\r\n-\t\tnlh->nlmsg_type = RTM_DELLINK;\r\n-\t\tnlh->nlmsg_flags = NLM_F_REQUEST;\r\n-\t\tifm = mnl_nlmsg_put_extra_header(nlh, sizeof(*ifm));\r\n-\t\tifm->ifi_family = AF_UNSPEC;\r\n-\t\tifm->ifi_index = vtep->ifindex;\r\n-\t\tassert(sizeof(buf) >= nlh->nlmsg_len);\r\n-\t\tret = flow_tcf_nl_ack(tcf, nlh, NULL, NULL);\r\n-\t\tif (ret)\r\n-\t\t\tDRV_LOG(WARNING, \"netlink: error deleting vxlan\"\r\n-\t\t\t\t\t \" encap/decap ifindex %u\",\r\n-\t\t\t\t\t ifm->ifi_index);\r\n-\t}\r\n-\trte_free(vtep);\r\n-}\r\n-\r\n-/**\r\n- * Creates VTEP network device.\r\n- *\r\n- * @param[in] tcf\r\n- *   Context object initialized by mlx5_flow_tcf_context_create().\r\n- * @param[in] port\r\n- *   UDP port of created VTEP device.\r\n- * @param[out] error\r\n- *   Perform verbose error reporting if not NULL.\r\n- *\r\n- * @return\r\n- * Pointer to created device structure on success,\r\n- * NULL otherwise and rte_errno is set.\r\n- */\r\n-static struct tcf_vtep*\r\n-flow_tcf_vtep_create(struct mlx5_flow_tcf_context *tcf,\r\n-\t\t     uint16_t port, struct rte_flow_error *error)\r\n-{\r\n-\tstruct tcf_vtep *vtep;\r\n-\tstruct nlmsghdr *nlh;\r\n-\tstruct ifinfomsg *ifm;\r\n-\tchar name[sizeof(MLX5_VXLAN_DEVICE_PFX) + 24];\r\n-\talignas(struct nlmsghdr)\r\n-\tuint8_t buf[mnl_nlmsg_size(sizeof(*ifm)) +\r\n-\t\t    SZ_NLATTR_DATA_OF(sizeof(name)) +\r\n-\t\t    SZ_NLATTR_NEST * 2 +\r\n-\t\t    SZ_NLATTR_STRZ_OF(\"vxlan\") +\r\n-\t\t    SZ_NLATTR_DATA_OF(sizeof(uint32_t)) +\r\n-\t\t    SZ_NLATTR_DATA_OF(sizeof(uint16_t)) +\r\n-\t\t    SZ_NLATTR_DATA_OF(sizeof(uint8_t)) * 3 +\r\n-\t\t    MNL_BUF_EXTRA_SPACE];\r\n-\tstruct nlattr *na_info;\r\n-\tstruct nlattr *na_vxlan;\r\n-\trte_be16_t vxlan_port = rte_cpu_to_be_16(port);\r\n-\tint ret;\r\n-\r\n-\tvtep = rte_zmalloc(__func__, sizeof(*vtep), alignof(struct tcf_vtep));\r\n-\tif (!vtep) {\r\n-\t\trte_flow_error_set(error, ENOMEM,\r\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\r\n-\t\t\t\t   \"unable to allocate memory for VTEP\");\r\n-\t\treturn NULL;\r\n-\t}\r\n-\t*vtep = (struct tcf_vtep){\r\n-\t\t\t.port = port,\r\n-\t};\r\n-\tmemset(buf, 0, sizeof(buf));\r\n-\tnlh = mnl_nlmsg_put_header(buf);\r\n-\tnlh->nlmsg_type = RTM_NEWLINK;\r\n-\tnlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE  | NLM_F_EXCL;\r\n-\tifm = mnl_nlmsg_put_extra_header(nlh, sizeof(*ifm));\r\n-\tifm->ifi_family = AF_UNSPEC;\r\n-\tifm->ifi_type = 0;\r\n-\tifm->ifi_index = 0;\r\n-\tifm->ifi_flags = IFF_UP;\r\n-\tifm->ifi_change = 0xffffffff;\r\n-\tsnprintf(name, sizeof(name), \"%s%u\", MLX5_VXLAN_DEVICE_PFX, port);\r\n-\tmnl_attr_put_strz(nlh, IFLA_IFNAME, name);\r\n-\tna_info = mnl_attr_nest_start(nlh, IFLA_LINKINFO);\r\n-\tassert(na_info);\r\n-\tmnl_attr_put_strz(nlh, IFLA_INFO_KIND, \"vxlan\");\r\n-\tna_vxlan = mnl_attr_nest_start(nlh, IFLA_INFO_DATA);\r\n-\tassert(na_vxlan);\r\n-#ifdef HAVE_IFLA_VXLAN_COLLECT_METADATA\r\n-\t/*\r\n-\t * RH 7.2 does not support metadata for tunnel device.\r\n-\t * It does not matter because we are going to use the\r\n-\t * hardware offload by mlx5 driver.\r\n-\t */\r\n-\tmnl_attr_put_u8(nlh, IFLA_VXLAN_COLLECT_METADATA, 1);\r\n-#endif\r\n-\tmnl_attr_put_u8(nlh, IFLA_VXLAN_UDP_ZERO_CSUM6_RX, 1);\r\n-\tmnl_attr_put_u8(nlh, IFLA_VXLAN_LEARNING, 0);\r\n-\tmnl_attr_put_u16(nlh, IFLA_VXLAN_PORT, vxlan_port);\r\n-#ifndef HAVE_IFLA_VXLAN_COLLECT_METADATA\r\n-\t/*\r\n-\t *  We must specify VNI explicitly if metadata not supported.\r\n-\t *  Note, VNI is transferred with native endianness format.\r\n-\t */\r\n-\tmnl_attr_put_u16(nlh, IFLA_VXLAN_ID, MLX5_VXLAN_DEFAULT_VNI);\r\n-#endif\r\n-\tmnl_attr_nest_end(nlh, na_vxlan);\r\n-\tmnl_attr_nest_end(nlh, na_info);\r\n-\tassert(sizeof(buf) >= nlh->nlmsg_len);\r\n-\tret = flow_tcf_nl_ack(tcf, nlh, NULL, NULL);\r\n-\tif (ret) {\r\n-\t\tDRV_LOG(WARNING,\r\n-\t\t\t\"netlink: VTEP %s create failure (%d)\",\r\n-\t\t\tname, rte_errno);\r\n-\t\tif (rte_errno != EEXIST)\r\n-\t\t\t/*\r\n-\t\t\t * Some unhandled error occurred or device is\r\n-\t\t\t * for encapsulation and cannot be shared.\r\n-\t\t\t */\r\n-\t\t\tgoto error;\r\n-\t} else {\r\n-\t\t/*\r\n-\t\t * Mark device we actually created.\r\n-\t\t * We should explicitly delete\r\n-\t\t * when we do not need it anymore.\r\n-\t\t */\r\n-\t\tvtep->created = 1;\r\n-\t\tvtep->waitreg = 1;\r\n-\t}\r\n-\t/* Try to get ifindex of created of pre-existing device. */\r\n-\tret = if_nametoindex(name);\r\n-\tif (!ret) {\r\n-\t\tDRV_LOG(WARNING,\r\n-\t\t\t\"VTEP %s failed to get index (%d)\", name, errno);\r\n-\t\trte_flow_error_set\r\n-\t\t\t(error, -errno,\r\n-\t\t\t RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\r\n-\t\t\t \"netlink: failed to retrieve VTEP ifindex\");\r\n-\t\tgoto error;\r\n-\t}\r\n-\tvtep->ifindex = ret;\r\n-\tmemset(buf, 0, sizeof(buf));\r\n-\tnlh = mnl_nlmsg_put_header(buf);\r\n-\tnlh->nlmsg_type = RTM_NEWLINK;\r\n-\tnlh->nlmsg_flags = NLM_F_REQUEST;\r\n-\tifm = mnl_nlmsg_put_extra_header(nlh, sizeof(*ifm));\r\n-\tifm->ifi_family = AF_UNSPEC;\r\n-\tifm->ifi_type = 0;\r\n-\tifm->ifi_index = vtep->ifindex;\r\n-\tifm->ifi_flags = IFF_UP;\r\n-\tifm->ifi_change = IFF_UP;\r\n-\tret = flow_tcf_nl_ack(tcf, nlh, NULL, NULL);\r\n-\tif (ret) {\r\n-\t\trte_flow_error_set(error, -errno,\r\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\r\n-\t\t\t\t   \"netlink: failed to set VTEP link up\");\r\n-\t\tDRV_LOG(WARNING, \"netlink: VTEP %s set link up failure (%d)\",\r\n-\t\t\tname, rte_errno);\r\n-\t\tgoto clean;\r\n-\t}\r\n-\tret = mlx5_flow_tcf_init(tcf, vtep->ifindex, error);\r\n-\tif (ret) {\r\n-\t\tDRV_LOG(WARNING, \"VTEP %s init failure (%d)\", name, rte_errno);\r\n-\t\tgoto clean;\r\n-\t}\r\n-\tDRV_LOG(INFO, \"VTEP create (%d, %d)\", vtep->port, vtep->ifindex);\r\n-\tvtep->refcnt = 1;\r\n-\treturn vtep;\r\n-clean:\r\n-\tflow_tcf_vtep_delete(tcf, vtep);\r\n-\treturn NULL;\r\n-error:\r\n-\trte_free(vtep);\r\n-\treturn NULL;\r\n-}\r\n-\r\n-/**\r\n- * Acquire target interface index for VXLAN tunneling decapsulation.\r\n- * In order to share the UDP port within the other interfaces the\r\n- * VXLAN device created as not attached to any interface (if created).\r\n- *\r\n- * @param[in] tcf\r\n- *   Context object initialized by mlx5_flow_tcf_context_create().\r\n- * @param[in] dev_flow\r\n- *   Flow tcf object with tunnel structure pointer set.\r\n- * @param[out] error\r\n- *   Perform verbose error reporting if not NULL.\r\n- * @return\r\n- *   Interface descriptor pointer on success,\r\n- *   NULL otherwise and rte_errno is set.\r\n- */\r\n-static struct tcf_vtep*\r\n-flow_tcf_decap_vtep_acquire(struct mlx5_flow_tcf_context *tcf,\r\n-\t\t\t    struct mlx5_flow *dev_flow,\r\n-\t\t\t    struct rte_flow_error *error)\r\n-{\r\n-\tstruct tcf_vtep *vtep;\r\n-\tuint16_t port = dev_flow->tcf.vxlan_decap->udp_port;\r\n-\r\n-\tLIST_FOREACH(vtep, &vtep_list_vxlan, next) {\r\n-\t\tif (vtep->port == port)\r\n-\t\t\tbreak;\r\n-\t}\r\n-\tif (vtep) {\r\n-\t\t/* Device exists, just increment the reference counter. */\r\n-\t\tvtep->refcnt++;\r\n-\t\tassert(vtep->ifindex);\r\n-\t\treturn vtep;\r\n-\t}\r\n-\t/* No decapsulation device exists, try to create the new one. */\r\n-\tvtep = flow_tcf_vtep_create(tcf, port, error);\r\n-\tif (vtep)\r\n-\t\tLIST_INSERT_HEAD(&vtep_list_vxlan, vtep, next);\r\n-\treturn vtep;\r\n-}\r\n-\r\n-/**\r\n- * Acquire target interface index for VXLAN tunneling encapsulation.\r\n- *\r\n- * @param[in] tcf\r\n- *   Context object initialized by mlx5_flow_tcf_context_create().\r\n- * @param[in] ifouter\r\n- *   Network interface index to attach VXLAN encap device to.\r\n- * @param[in] dev_flow\r\n- *   Flow tcf object with tunnel structure pointer set.\r\n- * @param[out] error\r\n- *   Perform verbose error reporting if not NULL.\r\n- * @return\r\n- *   Interface descriptor pointer on success,\r\n- *   NULL otherwise and rte_errno is set.\r\n- */\r\n-static struct tcf_vtep*\r\n-flow_tcf_encap_vtep_acquire(struct mlx5_flow_tcf_context *tcf,\r\n-\t\t\t    unsigned int ifouter,\r\n-\t\t\t    struct mlx5_flow *dev_flow,\r\n-\t\t\t    struct rte_flow_error *error)\r\n-{\r\n-\tstatic uint16_t port;\r\n-\tstruct tcf_vtep *vtep;\r\n-\tstruct tcf_irule *iface;\r\n-\tint ret;\r\n-\r\n-\tassert(ifouter);\r\n-\t/* Look whether the VTEP for specified port is created. */\r\n-\tport = rte_be_to_cpu_16(dev_flow->tcf.vxlan_encap->udp.dst);\r\n-\tLIST_FOREACH(vtep, &vtep_list_vxlan, next) {\r\n-\t\tif (vtep->port == port)\r\n-\t\t\tbreak;\r\n-\t}\r\n-\tif (vtep) {\r\n-\t\t/* VTEP already exists, just increment the reference. */\r\n-\t\tvtep->refcnt++;\r\n-\t} else {\r\n-\t\t/* Not found, we should create the new VTEP. */\r\n-\t\tvtep = flow_tcf_vtep_create(tcf, port, error);\r\n-\t\tif (!vtep)\r\n-\t\t\treturn NULL;\r\n-\t\tLIST_INSERT_HEAD(&vtep_list_vxlan, vtep, next);\r\n-\t}\r\n-\tassert(vtep->ifindex);\r\n-\tiface = flow_tcf_encap_irule_acquire(tcf, ifouter, error);\r\n-\tif (!iface) {\r\n-\t\tif (--vtep->refcnt == 0)\r\n-\t\t\tflow_tcf_vtep_delete(tcf, vtep);\r\n-\t\treturn NULL;\r\n-\t}\r\n-\tdev_flow->tcf.vxlan_encap->iface = iface;\r\n-\t/* Create local ipaddr with peer to specify the outer IPs. */\r\n-\tret = flow_tcf_encap_local(tcf, iface, dev_flow, true, error);\r\n-\tif (!ret) {\r\n-\t\t/* Create neigh rule to specify outer destination MAC. */\r\n-\t\tret = flow_tcf_encap_neigh(tcf, iface, dev_flow, true, error);\r\n-\t\tif (ret)\r\n-\t\t\tflow_tcf_encap_local(tcf, iface,\r\n-\t\t\t\t\t     dev_flow, false, error);\r\n-\t}\r\n-\tif (ret) {\r\n-\t\tdev_flow->tcf.vxlan_encap->iface = NULL;\r\n-\t\tflow_tcf_encap_irule_release(iface);\r\n-\t\tif (--vtep->refcnt == 0)\r\n-\t\t\tflow_tcf_vtep_delete(tcf, vtep);\r\n-\t\treturn NULL;\r\n-\t}\r\n-\treturn vtep;\r\n-}\r\n-\r\n-/**\r\n- * Acquires target interface index for tunneling of any type.\r\n- * Creates the new VTEP if needed.\r\n- *\r\n- * @param[in] tcf\r\n- *   Context object initialized by mlx5_flow_tcf_context_create().\r\n- * @param[in] ifouter\r\n- *   Network interface index to create VXLAN encap rules on.\r\n- * @param[in] dev_flow\r\n- *   Flow tcf object with tunnel structure pointer set.\r\n- * @param[out] error\r\n- *   Perform verbose error reporting if not NULL.\r\n- * @return\r\n- *   Interface descriptor pointer on success,\r\n- *   NULL otherwise and rte_errno is set.\r\n- */\r\n-static struct tcf_vtep*\r\n-flow_tcf_vtep_acquire(struct mlx5_flow_tcf_context *tcf,\r\n-\t\t      unsigned int ifouter,\r\n-\t\t      struct mlx5_flow *dev_flow,\r\n-\t\t      struct rte_flow_error *error)\r\n-{\r\n-\tstruct tcf_vtep *vtep = NULL;\r\n-\r\n-\tassert(dev_flow->tcf.tunnel);\r\n-\tpthread_mutex_lock(&vtep_list_mutex);\r\n-\tswitch (dev_flow->tcf.tunnel->type) {\r\n-\tcase FLOW_TCF_TUNACT_VXLAN_ENCAP:\r\n-\t\tvtep = flow_tcf_encap_vtep_acquire(tcf, ifouter,\r\n-\t\t\t\t\t\t  dev_flow, error);\r\n-\t\tbreak;\r\n-\tcase FLOW_TCF_TUNACT_VXLAN_DECAP:\r\n-\t\tvtep = flow_tcf_decap_vtep_acquire(tcf, dev_flow, error);\r\n-\t\tbreak;\r\n-\tdefault:\r\n-\t\trte_flow_error_set(error, ENOTSUP,\r\n-\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\r\n-\t\t\t\t   \"unsupported tunnel type\");\r\n-\t\tbreak;\r\n-\t}\r\n-\tpthread_mutex_unlock(&vtep_list_mutex);\r\n-\treturn vtep;\r\n-}\r\n-\r\n-/**\r\n- * Release tunneling interface by ifindex. Decrements reference\r\n- * counter and actually removes the device if counter is zero.\r\n- *\r\n- * @param[in] tcf\r\n- *   Context object initialized by mlx5_flow_tcf_context_create().\r\n- * @param[in] vtep\r\n- *   VTEP device descriptor structure.\r\n- * @param[in] dev_flow\r\n- *   Flow tcf object with tunnel structure pointer set.\r\n- */\r\n-static void\r\n-flow_tcf_vtep_release(struct mlx5_flow_tcf_context *tcf,\r\n-\t\t      struct tcf_vtep *vtep,\r\n-\t\t      struct mlx5_flow *dev_flow)\r\n-{\r\n-\tassert(dev_flow->tcf.tunnel);\r\n-\tpthread_mutex_lock(&vtep_list_mutex);\r\n-\tswitch (dev_flow->tcf.tunnel->type) {\r\n-\tcase FLOW_TCF_TUNACT_VXLAN_DECAP:\r\n-\t\tbreak;\r\n-\tcase FLOW_TCF_TUNACT_VXLAN_ENCAP: {\r\n-\t\tstruct tcf_irule *iface;\r\n-\r\n-\t\t/* Remove the encap ancillary rules first. */\r\n-\t\tiface = dev_flow->tcf.vxlan_encap->iface;\r\n-\t\tassert(iface);\r\n-\t\tflow_tcf_encap_neigh(tcf, iface, dev_flow, false, NULL);\r\n-\t\tflow_tcf_encap_local(tcf, iface, dev_flow, false, NULL);\r\n-\t\tflow_tcf_encap_irule_release(iface);\r\n-\t\tdev_flow->tcf.vxlan_encap->iface = NULL;\r\n-\t\tbreak;\r\n-\t}\r\n-\tdefault:\r\n-\t\tassert(false);\r\n-\t\tDRV_LOG(WARNING, \"Unsupported tunnel type\");\r\n-\t\tbreak;\r\n-\t}\r\n-\tassert(vtep->refcnt);\r\n-\tif (--vtep->refcnt == 0) {\r\n-\t\tLIST_REMOVE(vtep, next);\r\n-\t\tflow_tcf_vtep_delete(tcf, vtep);\r\n-\t}\r\n-\tpthread_mutex_unlock(&vtep_list_mutex);\r\n-}\r\n-\r\n-struct tcf_nlcb_query {\r\n-\tuint32_t handle;\r\n-\tuint32_t tc_flags;\r\n-\tuint32_t flags_valid:1;\r\n-};\r\n-\r\n-/**\r\n- * Collect queried rule attributes. This is callback routine called by\r\n- * libmnl mnl_cb_run() in loop for every message in received packet.\r\n- * Current implementation collects the flower flags only.\r\n- *\r\n- * @param[in] nlh\r\n- *   Pointer to reply header.\r\n- * @param[in, out] arg\r\n- *   Context pointer for this callback.\r\n- *\r\n- * @return\r\n- *   A positive, nonzero value on success (required by libmnl\r\n- *   to continue messages processing).\r\n- */\r\n-static int\r\n-flow_tcf_collect_query_cb(const struct nlmsghdr *nlh, void *arg)\r\n-{\r\n-\tstruct tcf_nlcb_query *query = arg;\r\n-\tstruct tcmsg *tcm = mnl_nlmsg_get_payload(nlh);\r\n-\tstruct nlattr *na, *na_opt;\r\n-\tbool flower = false;\r\n-\r\n-\tif (nlh->nlmsg_type != RTM_NEWTFILTER ||\r\n-\t    tcm->tcm_handle != query->handle)\r\n-\t\treturn 1;\r\n-\tmnl_attr_for_each(na, nlh, sizeof(*tcm)) {\r\n-\t\tswitch (mnl_attr_get_type(na)) {\r\n-\t\tcase TCA_KIND:\r\n-\t\t\tif (strcmp(mnl_attr_get_payload(na), \"flower\")) {\r\n-\t\t\t\t/* Not flower filter, drop entire message. */\r\n-\t\t\t\treturn 1;\r\n-\t\t\t}\r\n-\t\t\tflower = true;\r\n-\t\t\tbreak;\r\n-\t\tcase TCA_OPTIONS:\r\n-\t\t\tif (!flower) {\r\n-\t\t\t\t/* Not flower options, drop entire message. */\r\n-\t\t\t\treturn 1;\r\n-\t\t\t}\r\n-\t\t\t/* Check nested flower options. */\r\n-\t\t\tmnl_attr_for_each_nested(na_opt, na) {\r\n-\t\t\t\tswitch (mnl_attr_get_type(na_opt)) {\r\n-\t\t\t\tcase TCA_FLOWER_FLAGS:\r\n-\t\t\t\t\tquery->flags_valid = 1;\r\n-\t\t\t\t\tquery->tc_flags =\r\n-\t\t\t\t\t\tmnl_attr_get_u32(na_opt);\r\n-\t\t\t\t\tbreak;\r\n-\t\t\t\t}\r\n-\t\t\t}\r\n-\t\t\tbreak;\r\n-\t\t}\r\n-\t}\r\n-\treturn 1;\r\n-}\r\n-\r\n-/**\r\n- * Query a TC flower rule flags via netlink.\r\n- *\r\n- * @param[in] tcf\r\n- *   Context object initialized by mlx5_flow_tcf_context_create().\r\n- * @param[in] dev_flow\r\n- *   Pointer to the flow.\r\n- * @param[out] pflags\r\n- *   pointer to the data retrieved by the query.\r\n- *\r\n- * @return\r\n- *   0 on success, a negative errno value otherwise.\r\n- */\r\n-static int\r\n-flow_tcf_query_flags(struct mlx5_flow_tcf_context *tcf,\r\n-\t\t     struct mlx5_flow *dev_flow,\r\n-\t\t     uint32_t *pflags)\r\n-{\r\n-\tstruct nlmsghdr *nlh;\r\n-\tstruct tcmsg *tcm;\r\n-\tstruct tcf_nlcb_query query = {\r\n-\t\t.handle = dev_flow->tcf.tcm->tcm_handle,\r\n-\t};\r\n-\r\n-\tnlh = mnl_nlmsg_put_header(tcf->buf);\r\n-\tnlh->nlmsg_type = RTM_GETTFILTER;\r\n-\tnlh->nlmsg_flags = NLM_F_REQUEST;\r\n-\ttcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));\r\n-\tmemcpy(tcm, dev_flow->tcf.tcm, sizeof(*tcm));\r\n-\t/*\r\n-\t * Ignore Netlink error for filter query operations.\r\n-\t * The reply length is sent by kernel as errno.\r\n-\t * Just check we got the flags option.\r\n-\t */\r\n-\tflow_tcf_nl_ack(tcf, nlh, flow_tcf_collect_query_cb, &query);\r\n-\tif (!query.flags_valid) {\r\n-\t\t*pflags = 0;\r\n-\t\treturn -ENOENT;\r\n-\t}\r\n-\t*pflags = query.tc_flags;\r\n-\treturn 0;\r\n-}\r\n-\r\n-/**\r\n- * Query and check the in_hw set for specified rule.\r\n- *\r\n- * @param[in] tcf\r\n- *   Context object initialized by mlx5_flow_tcf_context_create().\r\n- * @param[in] dev_flow\r\n- *   Pointer to the flow to check.\r\n- *\r\n- * @return\r\n- *   0 on success, a negative errno value otherwise.\r\n- */\r\n-static int\r\n-flow_tcf_check_inhw(struct mlx5_flow_tcf_context *tcf,\r\n-\t\t    struct mlx5_flow *dev_flow)\r\n-{\r\n-\tuint32_t flags;\r\n-\tint ret;\r\n-\r\n-\tret = flow_tcf_query_flags(tcf, dev_flow, &flags);\r\n-\tif (ret)\r\n-\t\treturn ret;\r\n-\treturn  (flags & TCA_CLS_FLAGS_IN_HW) ? 0 : -ENOENT;\r\n-}\r\n-\r\n-/**\r\n- * Remove flow from E-Switch by sending Netlink message.\r\n- *\r\n- * @param[in] dev\r\n- *   Pointer to Ethernet device.\r\n- * @param[in, out] flow\r\n- *   Pointer to the sub flow.\r\n- */\r\n-static void\r\n-flow_tcf_remove(struct rte_eth_dev *dev, struct rte_flow *flow)\r\n-{\r\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\r\n-\tstruct mlx5_flow_tcf_context *ctx = priv->tcf_context;\r\n-\tstruct mlx5_flow *dev_flow;\r\n-\tstruct nlmsghdr *nlh;\r\n-\tstruct tcmsg *tcm;\r\n-\r\n-\tif (!flow)\r\n-\t\treturn;\r\n-\tdev_flow = LIST_FIRST(&flow->dev_flows);\r\n-\tif (!dev_flow)\r\n-\t\treturn;\r\n-\t/* E-Switch flow can't be expanded. */\r\n-\tassert(!LIST_NEXT(dev_flow, next));\r\n-\tif (dev_flow->tcf.applied) {\r\n-\t\tnlh = dev_flow->tcf.nlh;\r\n-\t\tnlh->nlmsg_type = RTM_DELTFILTER;\r\n-\t\tnlh->nlmsg_flags = NLM_F_REQUEST;\r\n-\t\tflow_tcf_nl_ack(ctx, nlh, NULL, NULL);\r\n-\t\tif (dev_flow->tcf.tunnel) {\r\n-\t\t\tassert(dev_flow->tcf.tunnel->vtep);\r\n-\t\t\tflow_tcf_vtep_release(ctx,\r\n-\t\t\t\tdev_flow->tcf.tunnel->vtep,\r\n-\t\t\t\tdev_flow);\r\n-\t\t\tdev_flow->tcf.tunnel->vtep = NULL;\r\n-\t\t}\r\n-\t\t/* Cleanup the rule handle value. */\r\n-\t\ttcm = mnl_nlmsg_get_payload(nlh);\r\n-\t\ttcm->tcm_handle = 0;\r\n-\t\tdev_flow->tcf.applied = 0;\r\n-\t}\r\n-}\r\n-\r\n-/**\r\n- * Fetch the applied rule handle. This is callback routine called by\r\n- * libmnl mnl_cb_run() in loop for every message in received packet.\r\n- * When the NLM_F_ECHO flag is specified the kernel sends the created\r\n- * rule descriptor back to the application and we can retrieve the\r\n- * actual rule handle from updated descriptor.\r\n- *\r\n- * @param[in] nlh\r\n- *   Pointer to reply header.\r\n- * @param[in, out] arg\r\n- *   Context pointer for this callback.\r\n- *\r\n- * @return\r\n- *   A positive, nonzero value on success (required by libmnl\r\n- *   to continue messages processing).\r\n- */\r\n-static int\r\n-flow_tcf_collect_apply_cb(const struct nlmsghdr *nlh, void *arg)\r\n-{\r\n-\tstruct nlmsghdr *nlhrq = arg;\r\n-\tstruct tcmsg *tcmrq = mnl_nlmsg_get_payload(nlhrq);\r\n-\tstruct tcmsg *tcm = mnl_nlmsg_get_payload(nlh);\r\n-\tstruct nlattr *na;\r\n-\r\n-\tif (nlh->nlmsg_type != RTM_NEWTFILTER ||\r\n-\t    nlh->nlmsg_seq != nlhrq->nlmsg_seq)\r\n-\t\treturn 1;\r\n-\tmnl_attr_for_each(na, nlh, sizeof(*tcm)) {\r\n-\t\tswitch (mnl_attr_get_type(na)) {\r\n-\t\tcase TCA_KIND:\r\n-\t\t\tif (strcmp(mnl_attr_get_payload(na), \"flower\")) {\r\n-\t\t\t\t/* Not flower filter, drop entire message. */\r\n-\t\t\t\treturn 1;\r\n-\t\t\t}\r\n-\t\t\ttcmrq->tcm_handle = tcm->tcm_handle;\r\n-\t\t\treturn 1;\r\n-\t\t}\r\n-\t}\r\n-\treturn 1;\r\n-}\r\n-/**\r\n- * Apply flow to E-Switch by sending Netlink message.\r\n- *\r\n- * @param[in] dev\r\n- *   Pointer to Ethernet device.\r\n- * @param[in, out] flow\r\n- *   Pointer to the sub flow.\r\n- * @param[out] error\r\n- *   Pointer to the error structure.\r\n- *\r\n- * @return\r\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\r\n- */\r\n-static int\r\n-flow_tcf_apply(struct rte_eth_dev *dev, struct rte_flow *flow,\r\n-\t       struct rte_flow_error *error)\r\n-{\r\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\r\n-\tstruct mlx5_flow_tcf_context *ctx = priv->tcf_context;\r\n-\tstruct mlx5_flow *dev_flow;\r\n-\tstruct nlmsghdr *nlh;\r\n-\tstruct tcmsg *tcm;\r\n-\tuint64_t start = 0;\r\n-\tuint64_t twait = 0;\r\n-\tint ret;\r\n-\r\n-\tdev_flow = LIST_FIRST(&flow->dev_flows);\r\n-\t/* E-Switch flow can't be expanded. */\r\n-\tassert(!LIST_NEXT(dev_flow, next));\r\n-\tif (dev_flow->tcf.applied)\r\n-\t\treturn 0;\r\n-\tnlh = dev_flow->tcf.nlh;\r\n-\tnlh->nlmsg_type = RTM_NEWTFILTER;\r\n-\tnlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE |\r\n-\t\t\t   NLM_F_EXCL | NLM_F_ECHO;\r\n-\ttcm = mnl_nlmsg_get_payload(nlh);\r\n-\t/* Allow kernel to assign handle on its own. */\r\n-\ttcm->tcm_handle = 0;\r\n-\tif (dev_flow->tcf.tunnel) {\r\n-\t\t/*\r\n-\t\t * Replace the interface index, target for\r\n-\t\t * encapsulation, source for decapsulation.\r\n-\t\t */\r\n-\t\tassert(!dev_flow->tcf.tunnel->vtep);\r\n-\t\tassert(dev_flow->tcf.tunnel->ifindex_ptr);\r\n-\t\t/* Acquire actual VTEP device when rule is being applied. */\r\n-\t\tdev_flow->tcf.tunnel->vtep =\r\n-\t\t\tflow_tcf_vtep_acquire(ctx,\r\n-\t\t\t\t\tdev_flow->tcf.tunnel->ifindex_org,\r\n-\t\t\t\t\tdev_flow, error);\r\n-\t\tif (!dev_flow->tcf.tunnel->vtep)\r\n-\t\t\treturn -rte_errno;\r\n-\t\tDRV_LOG(INFO, \"Replace ifindex: %d->%d\",\r\n-\t\t\t\tdev_flow->tcf.tunnel->vtep->ifindex,\r\n-\t\t\t\tdev_flow->tcf.tunnel->ifindex_org);\r\n-\t\t*dev_flow->tcf.tunnel->ifindex_ptr =\r\n-\t\t\tdev_flow->tcf.tunnel->vtep->ifindex;\r\n-\t\tif (dev_flow->tcf.tunnel->vtep->waitreg) {\r\n-\t\t\t/* Clear wait flag for VXLAN port registration. */\r\n-\t\t\tdev_flow->tcf.tunnel->vtep->waitreg = 0;\r\n-\t\t\ttwait = rte_get_timer_hz();\r\n-\t\t\tassert(twait > MS_PER_S);\r\n-\t\t\ttwait = twait * MLX5_VXLAN_WAIT_PORT_REG_MS;\r\n-\t\t\ttwait = twait / MS_PER_S;\r\n-\t\t\tstart = rte_get_timer_cycles();\r\n-\t\t}\r\n-\t}\r\n-\t/*\r\n-\t * Kernel creates the VXLAN devices and registers UDP ports to\r\n-\t * be hardware offloaded within the NIC kernel drivers. The\r\n-\t * registration process is being performed into context of\r\n-\t * working kernel thread and the race conditions might happen.\r\n-\t * The VXLAN device is created and success is returned to\r\n-\t * calling application, but the UDP port registration process\r\n-\t * is not completed yet. The next applied rule may be rejected\r\n-\t * by the driver with ENOSUP code. We are going to wait a bit,\r\n-\t * allowing registration process to be completed. The waiting\r\n-\t * is performed once after device been created.\r\n-\t */\r\n-\tdo {\r\n-\t\tstruct timespec onems;\r\n-\r\n-\t\tret = flow_tcf_nl_ack(ctx, nlh,\r\n-\t\t\t\t      flow_tcf_collect_apply_cb, nlh);\r\n-\t\tif (!ret || ret != -ENOTSUP || !twait)\r\n-\t\t\tbreak;\r\n-\t\t/* Wait one millisecond and try again till timeout. */\r\n-\t\tonems.tv_sec = 0;\r\n-\t\tonems.tv_nsec = NS_PER_S / MS_PER_S;\r\n-\t\tnanosleep(&onems, 0);\r\n-\t\tif ((rte_get_timer_cycles() - start) > twait) {\r\n-\t\t\t/* Timeout elapsed, try once more and exit. */\r\n-\t\t\ttwait = 0;\r\n-\t\t}\r\n-\t} while (true);\r\n-\tif (!ret) {\r\n-\t\tif (!tcm->tcm_handle) {\r\n-\t\t\tflow_tcf_remove(dev, flow);\r\n-\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t(error, ENOENT,\r\n-\t\t\t\t RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\r\n-\t\t\t\t \"netlink: rule zero handle returned\");\r\n-\t\t}\r\n-\t\tdev_flow->tcf.applied = 1;\r\n-\t\tif (*dev_flow->tcf.ptc_flags & TCA_CLS_FLAGS_SKIP_SW)\r\n-\t\t\treturn 0;\r\n-\t\t/*\r\n-\t\t * Rule was applied without skip_sw flag set.\r\n-\t\t * We should check whether the rule was acctually\r\n-\t\t * accepted by hardware (have look at in_hw flag).\r\n-\t\t */\r\n-\t\tif (flow_tcf_check_inhw(ctx, dev_flow)) {\r\n-\t\t\tflow_tcf_remove(dev, flow);\r\n-\t\t\treturn rte_flow_error_set\r\n-\t\t\t\t(error, ENOENT,\r\n-\t\t\t\t RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\r\n-\t\t\t\t \"netlink: rule has no in_hw flag set\");\r\n-\t\t}\r\n-\t\treturn 0;\r\n-\t}\r\n-\tif (dev_flow->tcf.tunnel) {\r\n-\t\t/* Rollback the VTEP configuration if rule apply failed. */\r\n-\t\tassert(dev_flow->tcf.tunnel->vtep);\r\n-\t\tflow_tcf_vtep_release(ctx, dev_flow->tcf.tunnel->vtep,\r\n-\t\t\t\t      dev_flow);\r\n-\t\tdev_flow->tcf.tunnel->vtep = NULL;\r\n-\t}\r\n-\treturn rte_flow_error_set(error, rte_errno,\r\n-\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\r\n-\t\t\t\t  \"netlink: failed to create TC flow rule\");\r\n-}\r\n-\r\n-/**\r\n- * Remove flow from E-Switch and release resources of the device flow.\r\n- *\r\n- * @param[in] dev\r\n- *   Pointer to Ethernet device.\r\n- * @param[in, out] flow\r\n- *   Pointer to the sub flow.\r\n- */\r\n-static void\r\n-flow_tcf_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)\r\n-{\r\n-\tstruct mlx5_flow *dev_flow;\r\n-\r\n-\tif (!flow)\r\n-\t\treturn;\r\n-\tflow_tcf_remove(dev, flow);\r\n-\tif (flow->counter) {\r\n-\t\tif (--flow->counter->ref_cnt == 0) {\r\n-\t\t\trte_free(flow->counter);\r\n-\t\t\tflow->counter = NULL;\r\n-\t\t}\r\n-\t}\r\n-\tdev_flow = LIST_FIRST(&flow->dev_flows);\r\n-\tif (!dev_flow)\r\n-\t\treturn;\r\n-\t/* E-Switch flow can't be expanded. */\r\n-\tassert(!LIST_NEXT(dev_flow, next));\r\n-\tLIST_REMOVE(dev_flow, next);\r\n-\trte_free(dev_flow);\r\n-}\r\n-\r\n-/**\r\n- * Helper routine for figuring the space size required for a parse buffer.\r\n- *\r\n- * @param array\r\n- *   array of values to use.\r\n- * @param idx\r\n- *   Current location in array.\r\n- * @param value\r\n- *   Value to compare with.\r\n- *\r\n- * @return\r\n- *   The maximum between the given value and the array value on index.\r\n- */\r\n-static uint16_t\r\n-flow_tcf_arr_val_max(uint16_t array[], int idx, uint16_t value)\r\n-{\r\n-\treturn idx < 0 ? (value) : RTE_MAX((array)[idx], value);\r\n-}\r\n-\r\n-/**\r\n- * Parse rtnetlink message attributes filling the attribute table with the info\r\n- * retrieved.\r\n- *\r\n- * @param tb\r\n- *   Attribute table to be filled.\r\n- * @param[out] max\r\n- *   Maxinum entry in the attribute table.\r\n- * @param rte\r\n- *   The attributes section in the message to be parsed.\r\n- * @param len\r\n- *   The length of the attributes section in the message.\r\n- */\r\n-static void\r\n-flow_tcf_nl_parse_rtattr(struct rtattr *tb[], int max,\r\n-\t\t\t struct rtattr *rta, int len)\r\n-{\r\n-\tunsigned short type;\r\n-\tmemset(tb, 0, sizeof(struct rtattr *) * (max + 1));\r\n-\twhile (RTA_OK(rta, len)) {\r\n-\t\ttype = rta->rta_type;\r\n-\t\tif (type <= max && !tb[type])\r\n-\t\t\ttb[type] = rta;\r\n-\t\trta = RTA_NEXT(rta, len);\r\n-\t}\r\n-}\r\n-\r\n-/**\r\n- * Extract flow counters from flower action.\r\n- *\r\n- * @param rta\r\n- *   flower action stats properties in the Netlink message received.\r\n- * @param rta_type\r\n- *   The backward sequence of rta_types, as written in the attribute table,\r\n- *   we need to traverse in order to get to the requested object.\r\n- * @param idx\r\n- *   Current location in rta_type table.\r\n- * @param[out] data\r\n- *   data holding the count statistics of the rte_flow retrieved from\r\n- *   the message.\r\n- *\r\n- * @return\r\n- *   0 if data was found and retrieved, -1 otherwise.\r\n- */\r\n-static int\r\n-flow_tcf_nl_action_stats_parse_and_get(struct rtattr *rta,\r\n-\t\t\t\t       uint16_t rta_type[], int idx,\r\n-\t\t\t\t       struct gnet_stats_basic *data)\r\n-{\r\n-\tint tca_stats_max = flow_tcf_arr_val_max(rta_type, idx,\r\n-\t\t\t\t\t\t TCA_STATS_BASIC);\r\n-\tstruct rtattr *tbs[tca_stats_max + 1];\r\n-\r\n-\tif (rta == NULL || idx < 0)\r\n-\t\treturn -1;\r\n-\tflow_tcf_nl_parse_rtattr(tbs, tca_stats_max,\r\n-\t\t\t\t RTA_DATA(rta), RTA_PAYLOAD(rta));\r\n-\tswitch (rta_type[idx]) {\r\n-\tcase TCA_STATS_BASIC:\r\n-\t\tif (tbs[TCA_STATS_BASIC]) {\r\n-\t\t\tmemcpy(data, RTA_DATA(tbs[TCA_STATS_BASIC]),\r\n-\t\t\t       RTE_MIN(RTA_PAYLOAD(tbs[TCA_STATS_BASIC]),\r\n-\t\t\t       sizeof(*data)));\r\n-\t\t\treturn 0;\r\n-\t\t}\r\n-\t\tbreak;\r\n-\tdefault:\r\n-\t\tbreak;\r\n-\t}\r\n-\treturn -1;\r\n-}\r\n-\r\n-/**\r\n- * Parse flower single action retrieving the requested action attribute,\r\n- * if found.\r\n- *\r\n- * @param arg\r\n- *   flower action properties in the Netlink message received.\r\n- * @param rta_type\r\n- *   The backward sequence of rta_types, as written in the attribute table,\r\n- *   we need to traverse in order to get to the requested object.\r\n- * @param idx\r\n- *   Current location in rta_type table.\r\n- * @param[out] data\r\n- *   Count statistics retrieved from the message query.\r\n- *\r\n- * @return\r\n- *   0 if data was found and retrieved, -1 otherwise.\r\n- */\r\n-static int\r\n-flow_tcf_nl_parse_one_action_and_get(struct rtattr *arg,\r\n-\t\t\t\t     uint16_t rta_type[], int idx, void *data)\r\n-{\r\n-\tint tca_act_max = flow_tcf_arr_val_max(rta_type, idx, TCA_ACT_STATS);\r\n-\tstruct rtattr *tb[tca_act_max + 1];\r\n-\r\n-\tif (arg == NULL || idx < 0)\r\n-\t\treturn -1;\r\n-\tflow_tcf_nl_parse_rtattr(tb, tca_act_max,\r\n-\t\t\t\t RTA_DATA(arg), RTA_PAYLOAD(arg));\r\n-\tif (tb[TCA_ACT_KIND] == NULL)\r\n-\t\treturn -1;\r\n-\tswitch (rta_type[idx]) {\r\n-\tcase TCA_ACT_STATS:\r\n-\t\tif (tb[TCA_ACT_STATS])\r\n-\t\t\treturn flow_tcf_nl_action_stats_parse_and_get\r\n-\t\t\t\t\t(tb[TCA_ACT_STATS],\r\n-\t\t\t\t\t rta_type, --idx,\r\n-\t\t\t\t\t (struct gnet_stats_basic *)data);\r\n-\t\tbreak;\r\n-\tdefault:\r\n-\t\tbreak;\r\n-\t}\r\n-\treturn -1;\r\n-}\r\n-\r\n-/**\r\n- * Parse flower action section in the message retrieving the requested\r\n- * attribute from the first action that provides it.\r\n- *\r\n- * @param opt\r\n- *   flower section in the Netlink message received.\r\n- * @param rta_type\r\n- *   The backward sequence of rta_types, as written in the attribute table,\r\n- *   we need to traverse in order to get to the requested object.\r\n- * @param idx\r\n- *   Current location in rta_type table.\r\n- * @param[out] data\r\n- *   data retrieved from the message query.\r\n- *\r\n- * @return\r\n- *   0 if data was found and retrieved, -1 otherwise.\r\n- */\r\n-static int\r\n-flow_tcf_nl_action_parse_and_get(struct rtattr *arg,\r\n-\t\t\t\t uint16_t rta_type[], int idx, void *data)\r\n-{\r\n-\tstruct rtattr *tb[TCA_ACT_MAX_PRIO + 1];\r\n-\tint i;\r\n-\r\n-\tif (arg == NULL || idx < 0)\r\n-\t\treturn -1;\r\n-\tflow_tcf_nl_parse_rtattr(tb, TCA_ACT_MAX_PRIO,\r\n-\t\t\t\t RTA_DATA(arg), RTA_PAYLOAD(arg));\r\n-\tswitch (rta_type[idx]) {\r\n-\t/*\r\n-\t * flow counters are stored in the actions defined by the flow\r\n-\t * and not in the flow itself, therefore we need to traverse the\r\n-\t * flower chain of actions in search for them.\r\n-\t *\r\n-\t * Note that the index is not decremented here.\r\n-\t */\r\n-\tcase TCA_ACT_STATS:\r\n-\t\tfor (i = 0; i <= TCA_ACT_MAX_PRIO; i++) {\r\n-\t\t\tif (tb[i] &&\r\n-\t\t\t!flow_tcf_nl_parse_one_action_and_get(tb[i],\r\n-\t\t\t\t\t\t\t      rta_type,\r\n-\t\t\t\t\t\t\t      idx, data))\r\n-\t\t\t\treturn 0;\r\n-\t\t}\r\n-\t\tbreak;\r\n-\tdefault:\r\n-\t\tbreak;\r\n-\t}\r\n-\treturn -1;\r\n-}\r\n-\r\n-/**\r\n- * Parse flower classifier options in the message, retrieving the requested\r\n- * attribute if found.\r\n- *\r\n- * @param opt\r\n- *   flower section in the Netlink message received.\r\n- * @param rta_type\r\n- *   The backward sequence of rta_types, as written in the attribute table,\r\n- *   we need to traverse in order to get to the requested object.\r\n- * @param idx\r\n- *   Current location in rta_type table.\r\n- * @param[out] data\r\n- *   data retrieved from the message query.\r\n- *\r\n- * @return\r\n- *   0 if data was found and retrieved, -1 otherwise.\r\n- */\r\n-static int\r\n-flow_tcf_nl_opts_parse_and_get(struct rtattr *opt,\r\n-\t\t\t       uint16_t rta_type[], int idx, void *data)\r\n-{\r\n-\tint tca_flower_max = flow_tcf_arr_val_max(rta_type, idx,\r\n-\t\t\t\t\t\t  TCA_FLOWER_ACT);\r\n-\tstruct rtattr *tb[tca_flower_max + 1];\r\n-\r\n-\tif (!opt || idx < 0)\r\n-\t\treturn -1;\r\n-\tflow_tcf_nl_parse_rtattr(tb, tca_flower_max,\r\n-\t\t\t\t RTA_DATA(opt), RTA_PAYLOAD(opt));\r\n-\tswitch (rta_type[idx]) {\r\n-\tcase TCA_FLOWER_ACT:\r\n-\t\tif (tb[TCA_FLOWER_ACT])\r\n-\t\t\treturn flow_tcf_nl_action_parse_and_get\r\n-\t\t\t\t\t\t\t(tb[TCA_FLOWER_ACT],\r\n-\t\t\t\t\t\t\t rta_type, --idx, data);\r\n-\t\tbreak;\r\n-\tdefault:\r\n-\t\tbreak;\r\n-\t}\r\n-\treturn -1;\r\n-}\r\n-\r\n-/**\r\n- * Parse Netlink reply on filter query, retrieving the flow counters.\r\n- *\r\n- * @param nlh\r\n- *   Message received from Netlink.\r\n- * @param rta_type\r\n- *   The backward sequence of rta_types, as written in the attribute table,\r\n- *   we need to traverse in order to get to the requested object.\r\n- * @param idx\r\n- *   Current location in rta_type table.\r\n- * @param[out] data\r\n- *   data retrieved from the message query.\r\n- *\r\n- * @return\r\n- *   0 if data was found and retrieved, -1 otherwise.\r\n- */\r\n-static int\r\n-flow_tcf_nl_filter_parse_and_get(struct nlmsghdr *cnlh,\r\n-\t\t\t\t uint16_t rta_type[], int idx, void *data)\r\n-{\r\n-\tstruct nlmsghdr *nlh = cnlh;\r\n-\tstruct tcmsg *t = NLMSG_DATA(nlh);\r\n-\tint len = nlh->nlmsg_len;\r\n-\tint tca_max = flow_tcf_arr_val_max(rta_type, idx, TCA_OPTIONS);\r\n-\tstruct rtattr *tb[tca_max + 1];\r\n-\r\n-\tif (idx < 0)\r\n-\t\treturn -1;\r\n-\tif (nlh->nlmsg_type != RTM_NEWTFILTER &&\r\n-\t    nlh->nlmsg_type != RTM_GETTFILTER &&\r\n-\t    nlh->nlmsg_type != RTM_DELTFILTER)\r\n-\t\treturn -1;\r\n-\tlen -= NLMSG_LENGTH(sizeof(*t));\r\n-\tif (len < 0)\r\n-\t\treturn -1;\r\n-\tflow_tcf_nl_parse_rtattr(tb, tca_max, TCA_RTA(t), len);\r\n-\t/* Not a TC flower flow - bail out */\r\n-\tif (!tb[TCA_KIND] ||\r\n-\t    strcmp(RTA_DATA(tb[TCA_KIND]), \"flower\"))\r\n-\t\treturn -1;\r\n-\tswitch (rta_type[idx]) {\r\n-\tcase TCA_OPTIONS:\r\n-\t\tif (tb[TCA_OPTIONS])\r\n-\t\t\treturn flow_tcf_nl_opts_parse_and_get(tb[TCA_OPTIONS],\r\n-\t\t\t\t\t\t\t      rta_type,\r\n-\t\t\t\t\t\t\t      --idx, data);\r\n-\t\tbreak;\r\n-\tdefault:\r\n-\t\tbreak;\r\n-\t}\r\n-\treturn -1;\r\n-}\r\n-\r\n-/**\r\n- * A callback to parse Netlink reply on TC flower query.\r\n- *\r\n- * @param nlh\r\n- *   Message received from Netlink.\r\n- * @param[out] data\r\n- *   Pointer to data area to be filled by the parsing routine.\r\n- *   assumed to be a pointer to struct flow_tcf_stats_basic.\r\n- *\r\n- * @return\r\n- *   MNL_CB_OK value.\r\n- */\r\n-static int\r\n-flow_tcf_nl_message_get_stats_basic(const struct nlmsghdr *nlh, void *data)\r\n-{\r\n-\t/*\r\n-\t * The backward sequence of rta_types to pass in order to get\r\n-\t *  to the counters.\r\n-\t */\r\n-\tuint16_t rta_type[] = { TCA_STATS_BASIC, TCA_ACT_STATS,\r\n-\t\t\t\tTCA_FLOWER_ACT, TCA_OPTIONS };\r\n-\tstruct flow_tcf_stats_basic *sb_data = data;\r\n-\tunion {\r\n-\t\tconst struct nlmsghdr *c;\r\n-\t\tstruct nlmsghdr *nc;\r\n-\t} tnlh = { .c = nlh };\r\n-\r\n-\tif (!flow_tcf_nl_filter_parse_and_get(tnlh.nc, rta_type,\r\n-\t\t\t\t\t      RTE_DIM(rta_type) - 1,\r\n-\t\t\t\t\t      (void *)&sb_data->counters))\r\n-\t\tsb_data->valid = true;\r\n-\treturn MNL_CB_OK;\r\n-}\r\n-\r\n-/**\r\n- * Query a TC flower rule for its statistics via netlink.\r\n- *\r\n- * @param[in] dev\r\n- *   Pointer to Ethernet device.\r\n- * @param[in] flow\r\n- *   Pointer to the sub flow.\r\n- * @param[out] data\r\n- *   data retrieved by the query.\r\n- * @param[out] error\r\n- *   Perform verbose error reporting if not NULL.\r\n- *\r\n- * @return\r\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\r\n- */\r\n-static int\r\n-flow_tcf_query_count(struct rte_eth_dev *dev,\r\n-\t\t\t  struct rte_flow *flow,\r\n-\t\t\t  void *data,\r\n-\t\t\t  struct rte_flow_error *error)\r\n-{\r\n-\tstruct flow_tcf_stats_basic sb_data;\r\n-\tstruct rte_flow_query_count *qc = data;\r\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\r\n-\tstruct mlx5_flow_tcf_context *ctx = priv->tcf_context;\r\n-\tstruct mnl_socket *nl = ctx->nl;\r\n-\tstruct mlx5_flow *dev_flow;\r\n-\tstruct nlmsghdr *nlh;\r\n-\tuint32_t seq = priv->tcf_context->seq++;\r\n-\tssize_t ret;\r\n-\tassert(qc);\r\n-\r\n-\tmemset(&sb_data, 0, sizeof(sb_data));\r\n-\tdev_flow = LIST_FIRST(&flow->dev_flows);\r\n-\t/* E-Switch flow can't be expanded. */\r\n-\tassert(!LIST_NEXT(dev_flow, next));\r\n-\tif (!dev_flow->flow->counter)\r\n-\t\tgoto notsup_exit;\r\n-\tnlh = dev_flow->tcf.nlh;\r\n-\tnlh->nlmsg_type = RTM_GETTFILTER;\r\n-\tnlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ECHO;\r\n-\tnlh->nlmsg_seq = seq;\r\n-\tif (mnl_socket_sendto(nl, nlh, nlh->nlmsg_len) == -1)\r\n-\t\tgoto error_exit;\r\n-\tdo {\r\n-\t\tret = mnl_socket_recvfrom(nl, ctx->buf, ctx->buf_size);\r\n-\t\tif (ret <= 0)\r\n-\t\t\tbreak;\r\n-\t\tret = mnl_cb_run(ctx->buf, ret, seq,\r\n-\t\t\t\t mnl_socket_get_portid(nl),\r\n-\t\t\t\t flow_tcf_nl_message_get_stats_basic,\r\n-\t\t\t\t (void *)&sb_data);\r\n-\t} while (ret > 0);\r\n-\t/* Return the delta from last reset. */\r\n-\tif (sb_data.valid) {\r\n-\t\t/* Return the delta from last reset. */\r\n-\t\tqc->hits_set = 1;\r\n-\t\tqc->bytes_set = 1;\r\n-\t\tqc->hits = sb_data.counters.packets - flow->counter->hits;\r\n-\t\tqc->bytes = sb_data.counters.bytes - flow->counter->bytes;\r\n-\t\tif (qc->reset) {\r\n-\t\t\tflow->counter->hits = sb_data.counters.packets;\r\n-\t\t\tflow->counter->bytes = sb_data.counters.bytes;\r\n-\t\t}\r\n-\t\treturn 0;\r\n-\t}\r\n-\treturn rte_flow_error_set(error, EINVAL,\r\n-\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\r\n-\t\t\t\t  NULL,\r\n-\t\t\t\t  \"flow does not have counter\");\r\n-error_exit:\r\n-\treturn rte_flow_error_set\r\n-\t\t\t(error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\r\n-\t\t\t NULL, \"netlink: failed to read flow rule counters\");\r\n-notsup_exit:\r\n-\treturn rte_flow_error_set\r\n-\t\t\t(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\r\n-\t\t\t NULL, \"counters are not available.\");\r\n-}\r\n-\r\n-/**\r\n- * Query a flow.\r\n- *\r\n- * @see rte_flow_query()\r\n- * @see rte_flow_ops\r\n- */\r\n-static int\r\n-flow_tcf_query(struct rte_eth_dev *dev,\r\n-\t       struct rte_flow *flow,\r\n-\t       const struct rte_flow_action *actions,\r\n-\t       void *data,\r\n-\t       struct rte_flow_error *error)\r\n-{\r\n-\tint ret = -EINVAL;\r\n-\r\n-\tfor (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {\r\n-\t\tswitch (actions->type) {\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_VOID:\r\n-\t\t\tbreak;\r\n-\t\tcase RTE_FLOW_ACTION_TYPE_COUNT:\r\n-\t\t\tret = flow_tcf_query_count(dev, flow, data, error);\r\n-\t\t\tbreak;\r\n-\t\tdefault:\r\n-\t\t\treturn rte_flow_error_set(error, ENOTSUP,\r\n-\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\r\n-\t\t\t\t\t\t  actions,\r\n-\t\t\t\t\t\t  \"action not supported\");\r\n-\t\t}\r\n-\t}\r\n-\treturn ret;\r\n-}\r\n-\r\n-const struct mlx5_flow_driver_ops mlx5_flow_tcf_drv_ops = {\r\n-\t.validate = flow_tcf_validate,\r\n-\t.prepare = flow_tcf_prepare,\r\n-\t.translate = flow_tcf_translate,\r\n-\t.apply = flow_tcf_apply,\r\n-\t.remove = flow_tcf_remove,\r\n-\t.destroy = flow_tcf_destroy,\r\n-\t.query = flow_tcf_query,\r\n-};\r\n-\r\n-/**\r\n- * Create and configure a libmnl socket for Netlink flow rules.\r\n- *\r\n- * @return\r\n- *   A valid libmnl socket object pointer on success, NULL otherwise and\r\n- *   rte_errno is set.\r\n- */\r\n-static struct mnl_socket *\r\n-flow_tcf_mnl_socket_create(void)\r\n-{\r\n-\tstruct mnl_socket *nl = mnl_socket_open(NETLINK_ROUTE);\r\n-\r\n-\tif (nl) {\r\n-\t\tmnl_socket_setsockopt(nl, NETLINK_CAP_ACK, &(int){ 1 },\r\n-\t\t\t\t      sizeof(int));\r\n-\t\tif (!mnl_socket_bind(nl, 0, MNL_SOCKET_AUTOPID))\r\n-\t\t\treturn nl;\r\n-\t}\r\n-\trte_errno = errno;\r\n-\tif (nl)\r\n-\t\tmnl_socket_close(nl);\r\n-\treturn NULL;\r\n-}\r\n-\r\n-/**\r\n- * Destroy a libmnl socket.\r\n- *\r\n- * @param nl\r\n- *   Libmnl socket of the @p NETLINK_ROUTE kind.\r\n- */\r\n-static void\r\n-flow_tcf_mnl_socket_destroy(struct mnl_socket *nl)\r\n-{\r\n-\tif (nl)\r\n-\t\tmnl_socket_close(nl);\r\n-}\r\n-\r\n-/**\r\n- * Initialize ingress qdisc of a given network interface.\r\n- *\r\n- * @param ctx\r\n- *   Pointer to tc-flower context to use.\r\n- * @param ifindex\r\n- *   Index of network interface to initialize.\r\n- * @param[out] error\r\n- *   Perform verbose error reporting if not NULL.\r\n- *\r\n- * @return\r\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\r\n- */\r\n-int\r\n-mlx5_flow_tcf_init(struct mlx5_flow_tcf_context *ctx,\r\n-\t\t   unsigned int ifindex, struct rte_flow_error *error)\r\n-{\r\n-\tstruct nlmsghdr *nlh;\r\n-\tstruct tcmsg *tcm;\r\n-\talignas(struct nlmsghdr)\r\n-\tuint8_t buf[mnl_nlmsg_size(sizeof(*tcm)) +\r\n-\t\t    SZ_NLATTR_STRZ_OF(\"ingress\") +\r\n-\t\t    MNL_BUF_EXTRA_SPACE];\r\n-\r\n-\t/* Destroy existing ingress qdisc and everything attached to it. */\r\n-\tnlh = mnl_nlmsg_put_header(buf);\r\n-\tnlh->nlmsg_type = RTM_DELQDISC;\r\n-\tnlh->nlmsg_flags = NLM_F_REQUEST;\r\n-\ttcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));\r\n-\ttcm->tcm_family = AF_UNSPEC;\r\n-\ttcm->tcm_ifindex = ifindex;\r\n-\ttcm->tcm_handle = TC_H_MAKE(TC_H_INGRESS, 0);\r\n-\ttcm->tcm_parent = TC_H_INGRESS;\r\n-\tassert(sizeof(buf) >= nlh->nlmsg_len);\r\n-\t/* Ignore errors when qdisc is already absent. */\r\n-\tif (flow_tcf_nl_ack(ctx, nlh, NULL, NULL) &&\r\n-\t    rte_errno != EINVAL && rte_errno != ENOENT)\r\n-\t\treturn rte_flow_error_set(error, rte_errno,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\r\n-\t\t\t\t\t  \"netlink: failed to remove ingress\"\r\n-\t\t\t\t\t  \" qdisc\");\r\n-\t/* Create fresh ingress qdisc. */\r\n-\tnlh = mnl_nlmsg_put_header(buf);\r\n-\tnlh->nlmsg_type = RTM_NEWQDISC;\r\n-\tnlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL;\r\n-\ttcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));\r\n-\ttcm->tcm_family = AF_UNSPEC;\r\n-\ttcm->tcm_ifindex = ifindex;\r\n-\ttcm->tcm_handle = TC_H_MAKE(TC_H_INGRESS, 0);\r\n-\ttcm->tcm_parent = TC_H_INGRESS;\r\n-\tmnl_attr_put_strz_check(nlh, sizeof(buf), TCA_KIND, \"ingress\");\r\n-\tassert(sizeof(buf) >= nlh->nlmsg_len);\r\n-\tif (flow_tcf_nl_ack(ctx, nlh, NULL, NULL))\r\n-\t\treturn rte_flow_error_set(error, rte_errno,\r\n-\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\r\n-\t\t\t\t\t  \"netlink: failed to create ingress\"\r\n-\t\t\t\t\t  \" qdisc\");\r\n-\treturn 0;\r\n-}\r\n-\r\n-/**\r\n- * Create libmnl context for Netlink flow rules.\r\n- *\r\n- * @return\r\n- *   A valid libmnl socket object pointer on success, NULL otherwise and\r\n- *   rte_errno is set.\r\n- */\r\n-struct mlx5_flow_tcf_context *\r\n-mlx5_flow_tcf_context_create(void)\r\n-{\r\n-\tstruct mlx5_flow_tcf_context *ctx = rte_zmalloc(__func__,\r\n-\t\t\t\t\t\t\tsizeof(*ctx),\r\n-\t\t\t\t\t\t\tsizeof(uint32_t));\r\n-\tif (!ctx)\r\n-\t\tgoto error;\r\n-\tctx->nl = flow_tcf_mnl_socket_create();\r\n-\tif (!ctx->nl)\r\n-\t\tgoto error;\r\n-\tctx->buf_size = MNL_SOCKET_BUFFER_SIZE;\r\n-\tctx->buf = rte_zmalloc(__func__,\r\n-\t\t\t       ctx->buf_size, sizeof(uint32_t));\r\n-\tif (!ctx->buf)\r\n-\t\tgoto error;\r\n-\tctx->seq = random();\r\n-\treturn ctx;\r\n-error:\r\n-\tmlx5_flow_tcf_context_destroy(ctx);\r\n-\treturn NULL;\r\n-}\r\n-\r\n-/**\r\n- * Destroy a libmnl context.\r\n- *\r\n- * @param ctx\r\n- *   Libmnl socket of the @p NETLINK_ROUTE kind.\r\n- */\r\n-void\r\n-mlx5_flow_tcf_context_destroy(struct mlx5_flow_tcf_context *ctx)\r\n-{\r\n-\tif (!ctx)\r\n-\t\treturn;\r\n-\tflow_tcf_mnl_socket_destroy(ctx->nl);\r\n-\trte_free(ctx->buf);\r\n-\trte_free(ctx);\r\n-}\r\ndiff --git a/mk/rte.app.mk b/mk/rte.app.mk\r\nindex 7c9b4b5..05126ee 100644\r\n--- a/mk/rte.app.mk\r\n+++ b/mk/rte.app.mk\r\n@@ -175,7 +175,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_KNI)        += -lrte_pmd_kni\r\n endif\r\n _LDLIBS-$(CONFIG_RTE_LIBRTE_LIO_PMD)        += -lrte_pmd_lio\r\n _LDLIBS-$(CONFIG_RTE_LIBRTE_MLX4_PMD)       += -lrte_pmd_mlx4\r\n-_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX5_PMD)       += -lrte_pmd_mlx5 -lmnl\r\n+_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX5_PMD)       += -lrte_pmd_mlx5\r\n ifeq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y)\r\n _LDLIBS-$(CONFIG_RTE_LIBRTE_MLX4_PMD)       += -ldl\r\n _LDLIBS-$(CONFIG_RTE_LIBRTE_MLX5_PMD)       += -ldl\r\n",
    "prefixes": [
        "2/2"
    ]
}