get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/65000/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 65000,
    "url": "http://patches.dpdk.org/api/patches/65000/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1579539790-3882-3-git-send-email-matan@mellanox.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1579539790-3882-3-git-send-email-matan@mellanox.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1579539790-3882-3-git-send-email-matan@mellanox.com",
    "date": "2020-01-20T17:02:34",
    "name": "[v1,02/38] mlx5: prepare common library",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "af27aed892ac91ce883b3b903471c97a3e2783cf",
    "submitter": {
        "id": 796,
        "url": "http://patches.dpdk.org/api/people/796/?format=api",
        "name": "Matan Azrad",
        "email": "matan@mellanox.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1579539790-3882-3-git-send-email-matan@mellanox.com/mbox/",
    "series": [
        {
            "id": 8223,
            "url": "http://patches.dpdk.org/api/series/8223/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=8223",
            "date": "2020-01-20T17:02:37",
            "name": "Introduce mlx5 vDPA driver",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/8223/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/65000/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/65000/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 86FBDA04DD;\n\tTue, 21 Jan 2020 10:12:33 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 396992BF5;\n\tTue, 21 Jan 2020 10:12:33 +0100 (CET)",
            "from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129])\n by dpdk.org (Postfix) with ESMTP id 1A76D1BF87\n for <dev@dpdk.org>; Mon, 20 Jan 2020 18:03:13 +0100 (CET)",
            "from Internal Mail-Server by MTLPINE1 (envelope-from\n asafp@mellanox.com)\n with ESMTPS (AES256-SHA encrypted); 20 Jan 2020 19:03:12 +0200",
            "from pegasus07.mtr.labs.mlnx (pegasus07.mtr.labs.mlnx\n [10.210.16.112])\n by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 00KH3BGL024424;\n Mon, 20 Jan 2020 19:03:12 +0200"
        ],
        "From": "Matan Azrad <matan@mellanox.com>",
        "To": "dev@dpdk.org",
        "Cc": "Maxime Coquelin <maxime.coquelin@redhat.com>,\n Thomas Monjalon <thomas@monjalon.net>",
        "Date": "Mon, 20 Jan 2020 17:02:34 +0000",
        "Message-Id": "<1579539790-3882-3-git-send-email-matan@mellanox.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1579539790-3882-1-git-send-email-matan@mellanox.com>",
        "References": "<1579539790-3882-1-git-send-email-matan@mellanox.com>",
        "X-Mailman-Approved-At": "Tue, 21 Jan 2020 10:12:31 +0100",
        "Subject": "[dpdk-dev] [PATCH v1 02/38] mlx5: prepare common library",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "A new Mellanox vdpa PMD will be added to support vdpa operations by\nMellanox adapters.\n\nThis vdpa PMD design includes mlx5_glue and mlx5_devx operations and big\npart of them are shared with the net/mlx5 PMD.\n\nCreate a new common library in drivers/common for mlx5 PMDs.\nMove mlx5_glue, mlx5_devx_cmds and their dependencies to the new mlx5\ncommon library in drivers/common.\n\nThe files mlx5_devx_cmds.c, mlx5_devx_cmds.h, mlx5_glue.c,\nmlx5_glue.h and mlx5_prm.h,  are moved as is from drivers/net/mlx5 to\ndrivers/common/mlx5.\n\nShare the log mechanism macros.\nSeparate also the log mechanism to allow different log level control to\nthe common library.\n\nBuild files and version files are adjusted accordingly.\nIncludes lines are adjusted accordingly.\n\nSigned-off-by: Matan Azrad <matan@mellanox.com>\n---\n MAINTAINERS                                     |    1 +\n drivers/common/Makefile                         |    4 +\n drivers/common/meson.build                      |    2 +-\n drivers/common/mlx5/Makefile                    |  331 ++++\n drivers/common/mlx5/meson.build                 |  205 +++\n drivers/common/mlx5/mlx5_common.c               |   17 +\n drivers/common/mlx5/mlx5_common.h               |   87 ++\n drivers/common/mlx5/mlx5_common_utils.h         |   20 +\n drivers/common/mlx5/mlx5_devx_cmds.c            |  974 ++++++++++++\n drivers/common/mlx5/mlx5_devx_cmds.h            |  227 +++\n drivers/common/mlx5/mlx5_glue.c                 | 1138 ++++++++++++++\n drivers/common/mlx5/mlx5_glue.h                 |  265 ++++\n drivers/common/mlx5/mlx5_prm.h                  | 1884 +++++++++++++++++++++++\n drivers/common/mlx5/rte_common_mlx5_version.map |   20 +\n drivers/net/mlx5/Makefile                       |  303 +---\n drivers/net/mlx5/meson.build                    |  256 +--\n drivers/net/mlx5/mlx5.c                         |    7 +-\n drivers/net/mlx5/mlx5.h                         |    9 +-\n drivers/net/mlx5/mlx5_devx_cmds.c               |  974 ------------\n drivers/net/mlx5/mlx5_devx_cmds.h               |  227 ---\n drivers/net/mlx5/mlx5_ethdev.c                  |    5 +-\n drivers/net/mlx5/mlx5_flow.c                    |    9 +-\n drivers/net/mlx5/mlx5_flow.h                    |    3 +-\n drivers/net/mlx5/mlx5_flow_dv.c                 |    9 +-\n drivers/net/mlx5/mlx5_flow_meter.c              |    2 +\n drivers/net/mlx5/mlx5_flow_verbs.c              |    7 +-\n drivers/net/mlx5/mlx5_glue.c                    | 1150 --------------\n drivers/net/mlx5/mlx5_glue.h                    |  264 ----\n drivers/net/mlx5/mlx5_mac.c                     |    2 +-\n drivers/net/mlx5/mlx5_mr.c                      |    3 +-\n drivers/net/mlx5/mlx5_prm.h                     | 1883 ----------------------\n drivers/net/mlx5/mlx5_rss.c                     |    2 +-\n drivers/net/mlx5/mlx5_rxq.c                     |    8 +-\n drivers/net/mlx5/mlx5_rxtx.c                    |    7 +-\n drivers/net/mlx5/mlx5_rxtx.h                    |    7 +-\n drivers/net/mlx5/mlx5_rxtx_vec.c                |    5 +-\n drivers/net/mlx5/mlx5_rxtx_vec.h                |    3 +-\n drivers/net/mlx5/mlx5_rxtx_vec_altivec.h        |    5 +-\n drivers/net/mlx5/mlx5_rxtx_vec_neon.h           |    5 +-\n drivers/net/mlx5/mlx5_rxtx_vec_sse.h            |    5 +-\n drivers/net/mlx5/mlx5_stats.c                   |    2 +-\n drivers/net/mlx5/mlx5_txq.c                     |    7 +-\n drivers/net/mlx5/mlx5_utils.h                   |   79 +-\n drivers/net/mlx5/mlx5_vlan.c                    |    5 +-\n mk/rte.app.mk                                   |    1 +\n 45 files changed, 5296 insertions(+), 5133 deletions(-)\n create mode 100644 drivers/common/mlx5/Makefile\n create mode 100644 drivers/common/mlx5/meson.build\n create mode 100644 drivers/common/mlx5/mlx5_common.c\n create mode 100644 drivers/common/mlx5/mlx5_common.h\n create mode 100644 drivers/common/mlx5/mlx5_common_utils.h\n create mode 100644 drivers/common/mlx5/mlx5_devx_cmds.c\n create mode 100644 drivers/common/mlx5/mlx5_devx_cmds.h\n create mode 100644 drivers/common/mlx5/mlx5_glue.c\n create mode 100644 drivers/common/mlx5/mlx5_glue.h\n create mode 100644 drivers/common/mlx5/mlx5_prm.h\n create mode 100644 drivers/common/mlx5/rte_common_mlx5_version.map\n delete mode 100644 drivers/net/mlx5/mlx5_devx_cmds.c\n delete mode 100644 drivers/net/mlx5/mlx5_devx_cmds.h\n delete mode 100644 drivers/net/mlx5/mlx5_glue.c\n delete mode 100644 drivers/net/mlx5/mlx5_glue.h\n delete mode 100644 drivers/net/mlx5/mlx5_prm.h",
    "diff": "diff --git a/MAINTAINERS b/MAINTAINERS\nindex 8cd037c..4b0d524 100644\n--- a/MAINTAINERS\n+++ b/MAINTAINERS\n@@ -736,6 +736,7 @@ M: Matan Azrad <matan@mellanox.com>\n M: Shahaf Shuler <shahafs@mellanox.com>\n M: Viacheslav Ovsiienko <viacheslavo@mellanox.com>\n T: git://dpdk.org/next/dpdk-next-net-mlx\n+F: drivers/common/mlx5/\n F: drivers/net/mlx5/\n F: buildtools/options-ibverbs-static.sh\n F: doc/guides/nics/mlx5.rst\ndiff --git a/drivers/common/Makefile b/drivers/common/Makefile\nindex 3254c52..4775d4b 100644\n--- a/drivers/common/Makefile\n+++ b/drivers/common/Makefile\n@@ -35,4 +35,8 @@ ifneq (,$(findstring y,$(IAVF-y)))\n DIRS-y += iavf\n endif\n \n+ifeq ($(CONFIG_RTE_LIBRTE_MLX5_PMD),y)\n+DIRS-y += mlx5\n+endif\n+\n include $(RTE_SDK)/mk/rte.subdir.mk\ndiff --git a/drivers/common/meson.build b/drivers/common/meson.build\nindex fc620f7..ffd06e2 100644\n--- a/drivers/common/meson.build\n+++ b/drivers/common/meson.build\n@@ -2,6 +2,6 @@\n # Copyright(c) 2018 Cavium, Inc\n \n std_deps = ['eal']\n-drivers = ['cpt', 'dpaax', 'iavf', 'mvep', 'octeontx', 'octeontx2', 'qat']\n+drivers = ['cpt', 'dpaax', 'iavf', 'mlx5', 'mvep', 'octeontx', 'octeontx2', 'qat']\n config_flag_fmt = 'RTE_LIBRTE_@0@_COMMON'\n driver_name_fmt = 'rte_common_@0@'\ndiff --git a/drivers/common/mlx5/Makefile b/drivers/common/mlx5/Makefile\nnew file mode 100644\nindex 0000000..b94d3c0\n--- /dev/null\n+++ b/drivers/common/mlx5/Makefile\n@@ -0,0 +1,331 @@\n+#   SPDX-License-Identifier: BSD-3-Clause\n+#   Copyright 2019 Mellanox Technologies, Ltd\n+\n+include $(RTE_SDK)/mk/rte.vars.mk\n+\n+# Library name.\n+LIB = librte_common_mlx5.a\n+LIB_GLUE = $(LIB_GLUE_BASE).$(LIB_GLUE_VERSION)\n+LIB_GLUE_BASE = librte_pmd_mlx5_glue.so\n+LIB_GLUE_VERSION = 20.02.0\n+\n+# Sources.\n+ifneq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y)\n+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_glue.c\n+endif\n+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_devx_cmds.c\n+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_common.c\n+\n+ifeq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y)\n+INSTALL-$(CONFIG_RTE_LIBRTE_MLX5_PMD)-lib += $(LIB_GLUE)\n+endif\n+\n+# Basic CFLAGS.\n+CFLAGS += -O3\n+CFLAGS += -std=c11 -Wall -Wextra\n+CFLAGS += -g\n+CFLAGS += -I.\n+CFLAGS += -D_BSD_SOURCE\n+CFLAGS += -D_DEFAULT_SOURCE\n+CFLAGS += -D_XOPEN_SOURCE=600\n+CFLAGS += $(WERROR_FLAGS)\n+CFLAGS += -Wno-strict-prototypes\n+ifeq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y)\n+CFLAGS += -DMLX5_GLUE='\"$(LIB_GLUE)\"'\n+CFLAGS += -DMLX5_GLUE_VERSION='\"$(LIB_GLUE_VERSION)\"'\n+CFLAGS_mlx5_glue.o += -fPIC\n+LDLIBS += -ldl\n+else ifeq ($(CONFIG_RTE_IBVERBS_LINK_STATIC),y)\n+LDLIBS += $(shell $(RTE_SDK)/buildtools/options-ibverbs-static.sh)\n+else\n+LDLIBS += -libverbs -lmlx5\n+endif\n+\n+LDLIBS += -lrte_eal\n+\n+# A few warnings cannot be avoided in external headers.\n+CFLAGS += -Wno-error=cast-qual -DNDEBUG -UPEDANTIC\n+\n+EXPORT_MAP := rte_common_mlx5_version.map\n+\n+include $(RTE_SDK)/mk/rte.lib.mk\n+\n+# Generate and clean-up mlx5_autoconf.h.\n+\n+export CC CFLAGS CPPFLAGS EXTRA_CFLAGS EXTRA_CPPFLAGS\n+export AUTO_CONFIG_CFLAGS = -Wno-error\n+\n+ifndef V\n+AUTOCONF_OUTPUT := >/dev/null\n+endif\n+\n+mlx5_autoconf.h.new: FORCE\n+\n+mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh\n+\t$Q $(RM) -f -- '$@'\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT \\\n+\t\tinfiniband/mlx5dv.h \\\n+\t\tenum MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_IBV_DEVICE_TUNNEL_SUPPORT \\\n+\t\tinfiniband/mlx5dv.h \\\n+\t\tenum MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_IBV_DEVICE_MPLS_SUPPORT \\\n+\t\tinfiniband/verbs.h \\\n+\t\tenum IBV_FLOW_SPEC_MPLS \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING \\\n+\t\tinfiniband/verbs.h \\\n+\t\tenum IBV_WQ_FLAGS_PCI_WRITE_END_PADDING \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_IBV_WQ_FLAG_RX_END_PADDING \\\n+\t\tinfiniband/verbs.h \\\n+\t\tenum IBV_WQ_FLAG_RX_END_PADDING \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_IBV_MLX5_MOD_SWP \\\n+\t\tinfiniband/mlx5dv.h \\\n+\t\ttype 'struct mlx5dv_sw_parsing_caps' \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_IBV_MLX5_MOD_MPW \\\n+\t\tinfiniband/mlx5dv.h \\\n+\t\tenum MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_IBV_MLX5_MOD_CQE_128B_COMP \\\n+\t\tinfiniband/mlx5dv.h \\\n+\t\tenum MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_IBV_MLX5_MOD_CQE_128B_PAD \\\n+\t\tinfiniband/mlx5dv.h \\\n+\t\tenum MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_IBV_FLOW_DV_SUPPORT \\\n+\t\tinfiniband/mlx5dv.h \\\n+\t\tfunc mlx5dv_create_flow_action_packet_reformat \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_MLX5DV_DR \\\n+\t\tinfiniband/mlx5dv.h \\\n+\t\tenum MLX5DV_DR_DOMAIN_TYPE_NIC_RX \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_MLX5DV_DR_ESWITCH \\\n+\t\tinfiniband/mlx5dv.h \\\n+\t\tenum MLX5DV_DR_DOMAIN_TYPE_FDB \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_MLX5DV_DR_VLAN \\\n+\t\tinfiniband/mlx5dv.h \\\n+\t\tfunc mlx5dv_dr_action_create_push_vlan \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_MLX5DV_DR_DEVX_PORT \\\n+\t\tinfiniband/mlx5dv.h \\\n+\t\tfunc mlx5dv_query_devx_port \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_IBV_DEVX_OBJ \\\n+\t\tinfiniband/mlx5dv.h \\\n+\t\tfunc mlx5dv_devx_obj_create \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_IBV_FLOW_DEVX_COUNTERS \\\n+\t\tinfiniband/mlx5dv.h \\\n+\t\tenum MLX5DV_FLOW_ACTION_COUNTERS_DEVX \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_IBV_DEVX_ASYNC \\\n+\t\tinfiniband/mlx5dv.h \\\n+\t\tfunc mlx5dv_devx_obj_query_async \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR \\\n+\t\tinfiniband/mlx5dv.h \\\n+\t\tfunc mlx5dv_dr_action_create_dest_devx_tir \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_MLX5_DR_CREATE_ACTION_FLOW_METER \\\n+\t\tinfiniband/mlx5dv.h \\\n+\t\tfunc mlx5dv_dr_action_create_flow_meter \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_MLX5_DR_FLOW_DUMP \\\n+\t\tinfiniband/mlx5dv.h \\\n+\t\tfunc mlx5dv_dump_dr_domain \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_MLX5DV_MMAP_GET_NC_PAGES_CMD \\\n+\t\tinfiniband/mlx5dv.h \\\n+\t\tenum MLX5_MMAP_GET_NC_PAGES_CMD \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_ETHTOOL_LINK_MODE_25G \\\n+\t\t/usr/include/linux/ethtool.h \\\n+\t\tenum ETHTOOL_LINK_MODE_25000baseCR_Full_BIT \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_ETHTOOL_LINK_MODE_50G \\\n+\t\t/usr/include/linux/ethtool.h \\\n+\t\tenum ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_ETHTOOL_LINK_MODE_100G \\\n+\t\t/usr/include/linux/ethtool.h \\\n+\t\tenum ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_IBV_DEVICE_COUNTERS_SET_V42 \\\n+\t\tinfiniband/verbs.h \\\n+\t\ttype 'struct ibv_counter_set_init_attr' \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_IBV_DEVICE_COUNTERS_SET_V45 \\\n+\t\tinfiniband/verbs.h \\\n+\t\ttype 'struct ibv_counters_init_attr' \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_RDMA_NL_NLDEV \\\n+\t\trdma/rdma_netlink.h \\\n+\t\tenum RDMA_NL_NLDEV \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_RDMA_NLDEV_CMD_GET \\\n+\t\trdma/rdma_netlink.h \\\n+\t\tenum RDMA_NLDEV_CMD_GET \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_RDMA_NLDEV_CMD_PORT_GET \\\n+\t\trdma/rdma_netlink.h \\\n+\t\tenum RDMA_NLDEV_CMD_PORT_GET \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_RDMA_NLDEV_ATTR_DEV_INDEX \\\n+\t\trdma/rdma_netlink.h \\\n+\t\tenum RDMA_NLDEV_ATTR_DEV_INDEX \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_RDMA_NLDEV_ATTR_DEV_NAME \\\n+\t\trdma/rdma_netlink.h \\\n+\t\tenum RDMA_NLDEV_ATTR_DEV_NAME \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_RDMA_NLDEV_ATTR_PORT_INDEX \\\n+\t\trdma/rdma_netlink.h \\\n+\t\tenum RDMA_NLDEV_ATTR_PORT_INDEX \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_RDMA_NLDEV_ATTR_NDEV_INDEX \\\n+\t\trdma/rdma_netlink.h \\\n+\t\tenum RDMA_NLDEV_ATTR_NDEV_INDEX \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_IFLA_NUM_VF \\\n+\t\tlinux/if_link.h \\\n+\t\tenum IFLA_NUM_VF \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_IFLA_EXT_MASK \\\n+\t\tlinux/if_link.h \\\n+\t\tenum IFLA_EXT_MASK \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_IFLA_PHYS_SWITCH_ID \\\n+\t\tlinux/if_link.h \\\n+\t\tenum IFLA_PHYS_SWITCH_ID \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_IFLA_PHYS_PORT_NAME \\\n+\t\tlinux/if_link.h \\\n+\t\tenum IFLA_PHYS_PORT_NAME \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_SUPPORTED_40000baseKR4_Full \\\n+\t\t/usr/include/linux/ethtool.h \\\n+\t\tdefine SUPPORTED_40000baseKR4_Full \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_SUPPORTED_40000baseCR4_Full \\\n+\t\t/usr/include/linux/ethtool.h \\\n+\t\tdefine SUPPORTED_40000baseCR4_Full \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_SUPPORTED_40000baseSR4_Full \\\n+\t\t/usr/include/linux/ethtool.h \\\n+\t\tdefine SUPPORTED_40000baseSR4_Full \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_SUPPORTED_40000baseLR4_Full \\\n+\t\t/usr/include/linux/ethtool.h \\\n+\t\tdefine SUPPORTED_40000baseLR4_Full \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_SUPPORTED_56000baseKR4_Full \\\n+\t\t/usr/include/linux/ethtool.h \\\n+\t\tdefine SUPPORTED_56000baseKR4_Full \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_SUPPORTED_56000baseCR4_Full \\\n+\t\t/usr/include/linux/ethtool.h \\\n+\t\tdefine SUPPORTED_56000baseCR4_Full \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_SUPPORTED_56000baseSR4_Full \\\n+\t\t/usr/include/linux/ethtool.h \\\n+\t\tdefine SUPPORTED_56000baseSR4_Full \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_SUPPORTED_56000baseLR4_Full \\\n+\t\t/usr/include/linux/ethtool.h \\\n+\t\tdefine SUPPORTED_56000baseLR4_Full \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\t$Q sh -- '$<' '$@' \\\n+\t\tHAVE_STATIC_ASSERT \\\n+\t\t/usr/include/assert.h \\\n+\t\tdefine static_assert \\\n+\t\t$(AUTOCONF_OUTPUT)\n+\n+# Create mlx5_autoconf.h or update it in case it differs from the new one.\n+\n+mlx5_autoconf.h: mlx5_autoconf.h.new\n+\t$Q [ -f '$@' ] && \\\n+\t\tcmp '$<' '$@' $(AUTOCONF_OUTPUT) || \\\n+\t\tmv '$<' '$@'\n+\n+$(SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD):.c=.o): mlx5_autoconf.h\n+\n+# Generate dependency plug-in for rdma-core when the PMD must not be linked\n+# directly, so that applications do not inherit this dependency.\n+\n+ifeq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y)\n+\n+$(LIB): $(LIB_GLUE)\n+\n+ifeq ($(LINK_USING_CC),1)\n+GLUE_LDFLAGS := $(call linkerprefix,$(LDFLAGS))\n+else\n+GLUE_LDFLAGS := $(LDFLAGS)\n+endif\n+$(LIB_GLUE): mlx5_glue.o\n+\t$Q $(LD) $(GLUE_LDFLAGS) $(EXTRA_LDFLAGS) \\\n+\t\t-Wl,-h,$(LIB_GLUE) \\\n+\t\t-shared -o $@ $< -libverbs -lmlx5\n+\n+mlx5_glue.o: mlx5_autoconf.h\n+\n+endif\n+\n+clean_mlx5: FORCE\n+\t$Q rm -f -- mlx5_autoconf.h mlx5_autoconf.h.new\n+\t$Q rm -f -- mlx5_glue.o $(LIB_GLUE_BASE)*\n+\n+clean: clean_mlx5\ndiff --git a/drivers/common/mlx5/meson.build b/drivers/common/mlx5/meson.build\nnew file mode 100644\nindex 0000000..d2eeb45\n--- /dev/null\n+++ b/drivers/common/mlx5/meson.build\n@@ -0,0 +1,205 @@\n+# SPDX-License-Identifier: BSD-3-Clause\n+# Copyright 2019 Mellanox Technologies, Ltd\n+\n+if not is_linux\n+\tbuild = false\n+\treason = 'only supported on Linux'\n+\tsubdir_done()\n+endif\n+build = true\n+\n+pmd_dlopen = (get_option('ibverbs_link') == 'dlopen')\n+LIB_GLUE_BASE = 'librte_pmd_mlx5_glue.so'\n+LIB_GLUE_VERSION = '20.02.0'\n+LIB_GLUE = LIB_GLUE_BASE + '.' + LIB_GLUE_VERSION\n+if pmd_dlopen\n+\tdpdk_conf.set('RTE_IBVERBS_LINK_DLOPEN', 1)\n+\tcflags += [\n+\t\t'-DMLX5_GLUE=\"@0@\"'.format(LIB_GLUE),\n+\t\t'-DMLX5_GLUE_VERSION=\"@0@\"'.format(LIB_GLUE_VERSION),\n+\t]\n+endif\n+\n+libnames = [ 'mlx5', 'ibverbs' ]\n+libs = []\n+foreach libname:libnames\n+\tlib = dependency('lib' + libname, required:false)\n+\tif not lib.found()\n+\t\tlib = cc.find_library(libname, required:false)\n+\tendif\n+\tif lib.found()\n+\t\tlibs += [ lib ]\n+\telse\n+\t\tbuild = false\n+\t\treason = 'missing dependency, \"' + libname + '\"'\n+\tendif\n+endforeach\n+\n+if build\n+\tallow_experimental_apis = true\n+\tdeps += ['hash', 'pci', 'net', 'eal']\n+\text_deps += libs\n+\tsources = files(\n+\t\t'mlx5_devx_cmds.c',\n+\t\t'mlx5_common.c',\n+\t)\n+\tif not pmd_dlopen\n+\t\tsources += files('mlx5_glue.c')\n+\tendif\n+\tcflags_options = [\n+\t\t'-std=c11',\n+\t\t'-Wno-strict-prototypes',\n+\t\t'-D_BSD_SOURCE',\n+\t\t'-D_DEFAULT_SOURCE',\n+\t\t'-D_XOPEN_SOURCE=600'\n+\t]\n+\tforeach option:cflags_options\n+\t\tif cc.has_argument(option)\n+\t\t\tcflags += option\n+\t\tendif\n+\tendforeach\n+\tif get_option('buildtype').contains('debug')\n+\t\tcflags += [ '-pedantic', '-UNDEBUG', '-DPEDANTIC' ]\n+\telse\n+\t\tcflags += [ '-DNDEBUG', '-UPEDANTIC' ]\n+\tendif\n+\t# To maintain the compatibility with the make build system\n+\t# mlx5_autoconf.h file is still generated.\n+\t# input array for meson member search:\n+\t# [ \"MACRO to define if found\", \"header for the search\",\n+\t#   \"symbol to search\", \"struct member to search\" ]\n+\thas_member_args = [\n+\t\t[ 'HAVE_IBV_MLX5_MOD_SWP', 'infiniband/mlx5dv.h',\n+\t\t'struct mlx5dv_sw_parsing_caps', 'sw_parsing_offloads' ],\n+\t\t[ 'HAVE_IBV_DEVICE_COUNTERS_SET_V42', 'infiniband/verbs.h',\n+\t\t'struct ibv_counter_set_init_attr', 'counter_set_id' ],\n+\t\t[ 'HAVE_IBV_DEVICE_COUNTERS_SET_V45', 'infiniband/verbs.h',\n+\t\t'struct ibv_counters_init_attr', 'comp_mask' ],\n+\t]\n+\t# input array for meson symbol search:\n+\t# [ \"MACRO to define if found\", \"header for the search\",\n+\t#   \"symbol to search\" ]\n+\thas_sym_args = [\n+\t\t[ 'HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT', 'infiniband/mlx5dv.h',\n+\t\t'MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX' ],\n+\t\t[ 'HAVE_IBV_DEVICE_TUNNEL_SUPPORT', 'infiniband/mlx5dv.h',\n+\t\t'MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS' ],\n+\t\t[ 'HAVE_IBV_MLX5_MOD_MPW', 'infiniband/mlx5dv.h',\n+\t\t'MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED' ],\n+\t\t[ 'HAVE_IBV_MLX5_MOD_CQE_128B_COMP', 'infiniband/mlx5dv.h',\n+\t\t'MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP' ],\n+\t\t[ 'HAVE_IBV_MLX5_MOD_CQE_128B_PAD', 'infiniband/mlx5dv.h',\n+\t\t'MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD' ],\n+\t\t[ 'HAVE_IBV_FLOW_DV_SUPPORT', 'infiniband/mlx5dv.h',\n+\t\t'mlx5dv_create_flow_action_packet_reformat' ],\n+\t\t[ 'HAVE_IBV_DEVICE_MPLS_SUPPORT', 'infiniband/verbs.h',\n+\t\t'IBV_FLOW_SPEC_MPLS' ],\n+\t\t[ 'HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING', 'infiniband/verbs.h',\n+\t\t'IBV_WQ_FLAGS_PCI_WRITE_END_PADDING' ],\n+\t\t[ 'HAVE_IBV_WQ_FLAG_RX_END_PADDING', 'infiniband/verbs.h',\n+\t\t'IBV_WQ_FLAG_RX_END_PADDING' ],\n+\t\t[ 'HAVE_MLX5DV_DR_DEVX_PORT', 'infiniband/mlx5dv.h',\n+\t\t'mlx5dv_query_devx_port' ],\n+\t\t[ 'HAVE_IBV_DEVX_OBJ', 'infiniband/mlx5dv.h',\n+\t\t'mlx5dv_devx_obj_create' ],\n+\t\t[ 'HAVE_IBV_FLOW_DEVX_COUNTERS', 'infiniband/mlx5dv.h',\n+\t\t'MLX5DV_FLOW_ACTION_COUNTERS_DEVX' ],\n+\t\t[ 'HAVE_IBV_DEVX_ASYNC', 'infiniband/mlx5dv.h',\n+\t\t'mlx5dv_devx_obj_query_async' ],\n+\t\t[ 'HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR', 'infiniband/mlx5dv.h',\n+\t\t'mlx5dv_dr_action_create_dest_devx_tir' ],\n+\t\t[ 'HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER', 'infiniband/mlx5dv.h',\n+\t\t'mlx5dv_dr_action_create_flow_meter' ],\n+\t\t[ 'HAVE_MLX5DV_MMAP_GET_NC_PAGES_CMD', 'infiniband/mlx5dv.h',\n+\t\t'MLX5_MMAP_GET_NC_PAGES_CMD' ],\n+\t\t[ 'HAVE_MLX5DV_DR', 'infiniband/mlx5dv.h',\n+\t\t'MLX5DV_DR_DOMAIN_TYPE_NIC_RX' ],\n+\t\t[ 'HAVE_MLX5DV_DR_ESWITCH', 'infiniband/mlx5dv.h',\n+\t\t'MLX5DV_DR_DOMAIN_TYPE_FDB' ],\n+\t\t[ 'HAVE_MLX5DV_DR_VLAN', 'infiniband/mlx5dv.h',\n+\t\t'mlx5dv_dr_action_create_push_vlan' ],\n+\t\t[ 'HAVE_SUPPORTED_40000baseKR4_Full', 'linux/ethtool.h',\n+\t\t'SUPPORTED_40000baseKR4_Full' ],\n+\t\t[ 'HAVE_SUPPORTED_40000baseCR4_Full', 'linux/ethtool.h',\n+\t\t'SUPPORTED_40000baseCR4_Full' ],\n+\t\t[ 'HAVE_SUPPORTED_40000baseSR4_Full', 'linux/ethtool.h',\n+\t\t'SUPPORTED_40000baseSR4_Full' ],\n+\t\t[ 'HAVE_SUPPORTED_40000baseLR4_Full', 'linux/ethtool.h',\n+\t\t'SUPPORTED_40000baseLR4_Full' ],\n+\t\t[ 'HAVE_SUPPORTED_56000baseKR4_Full', 'linux/ethtool.h',\n+\t\t'SUPPORTED_56000baseKR4_Full' ],\n+\t\t[ 'HAVE_SUPPORTED_56000baseCR4_Full', 'linux/ethtool.h',\n+\t\t'SUPPORTED_56000baseCR4_Full' ],\n+\t\t[ 'HAVE_SUPPORTED_56000baseSR4_Full', 'linux/ethtool.h',\n+\t\t'SUPPORTED_56000baseSR4_Full' ],\n+\t\t[ 'HAVE_SUPPORTED_56000baseLR4_Full', 'linux/ethtool.h',\n+\t\t'SUPPORTED_56000baseLR4_Full' ],\n+\t\t[ 'HAVE_ETHTOOL_LINK_MODE_25G', 'linux/ethtool.h',\n+\t\t'ETHTOOL_LINK_MODE_25000baseCR_Full_BIT' ],\n+\t\t[ 'HAVE_ETHTOOL_LINK_MODE_50G', 'linux/ethtool.h',\n+\t\t'ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT' ],\n+\t\t[ 'HAVE_ETHTOOL_LINK_MODE_100G', 'linux/ethtool.h',\n+\t\t'ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT' ],\n+\t\t[ 'HAVE_IFLA_NUM_VF', 'linux/if_link.h',\n+\t\t'IFLA_NUM_VF' ],\n+\t\t[ 'HAVE_IFLA_EXT_MASK', 'linux/if_link.h',\n+\t\t'IFLA_EXT_MASK' ],\n+\t\t[ 'HAVE_IFLA_PHYS_SWITCH_ID', 'linux/if_link.h',\n+\t\t'IFLA_PHYS_SWITCH_ID' ],\n+\t\t[ 'HAVE_IFLA_PHYS_PORT_NAME', 'linux/if_link.h',\n+\t\t'IFLA_PHYS_PORT_NAME' ],\n+\t\t[ 'HAVE_RDMA_NL_NLDEV', 'rdma/rdma_netlink.h',\n+\t\t'RDMA_NL_NLDEV' ],\n+\t\t[ 'HAVE_RDMA_NLDEV_CMD_GET', 'rdma/rdma_netlink.h',\n+\t\t'RDMA_NLDEV_CMD_GET' ],\n+\t\t[ 'HAVE_RDMA_NLDEV_CMD_PORT_GET', 'rdma/rdma_netlink.h',\n+\t\t'RDMA_NLDEV_CMD_PORT_GET' ],\n+\t\t[ 'HAVE_RDMA_NLDEV_ATTR_DEV_INDEX', 'rdma/rdma_netlink.h',\n+\t\t'RDMA_NLDEV_ATTR_DEV_INDEX' ],\n+\t\t[ 'HAVE_RDMA_NLDEV_ATTR_DEV_NAME', 'rdma/rdma_netlink.h',\n+\t\t'RDMA_NLDEV_ATTR_DEV_NAME' ],\n+\t\t[ 'HAVE_RDMA_NLDEV_ATTR_PORT_INDEX', 'rdma/rdma_netlink.h',\n+\t\t'RDMA_NLDEV_ATTR_PORT_INDEX' ],\n+\t\t[ 'HAVE_RDMA_NLDEV_ATTR_NDEV_INDEX', 'rdma/rdma_netlink.h',\n+\t\t'RDMA_NLDEV_ATTR_NDEV_INDEX' ],\n+\t\t[ 'HAVE_MLX5_DR_FLOW_DUMP', 'infiniband/mlx5dv.h',\n+\t\t'mlx5dv_dump_dr_domain'],\n+\t]\n+\tconfig = configuration_data()\n+\tforeach arg:has_sym_args\n+\t\tconfig.set(arg[0], cc.has_header_symbol(arg[1], arg[2],\n+\t\t\tdependencies: libs))\n+\tendforeach\n+\tforeach arg:has_member_args\n+\t\tfile_prefix = '#include <' + arg[1] + '>'\n+\t\tconfig.set(arg[0], cc.has_member(arg[2], arg[3],\n+\t\t\tprefix : file_prefix, dependencies: libs))\n+\tendforeach\n+\tconfigure_file(output : 'mlx5_autoconf.h', configuration : config)\n+endif\n+# Build Glue Library\n+if pmd_dlopen and build\n+\tdlopen_name = 'mlx5_glue'\n+\tdlopen_lib_name = driver_name_fmt.format(dlopen_name)\n+\tdlopen_so_version = LIB_GLUE_VERSION\n+\tdlopen_sources = files('mlx5_glue.c')\n+\tdlopen_install_dir = [ eal_pmd_path + '-glue' ]\n+\tdlopen_includes = [global_inc]\n+\tdlopen_includes += include_directories(\n+\t\t'../../../lib/librte_eal/common/include/generic',\n+\t)\n+\tshared_lib = shared_library(\n+\t\tdlopen_lib_name,\n+\t\tdlopen_sources,\n+\t\tinclude_directories: dlopen_includes,\n+\t\tc_args: cflags,\n+\t\tdependencies: libs,\n+\t\tlink_args: [\n+\t\t'-Wl,-export-dynamic',\n+\t\t'-Wl,-h,@0@'.format(LIB_GLUE),\n+\t\t],\n+\t\tsoversion: dlopen_so_version,\n+\t\tinstall: true,\n+\t\tinstall_dir: dlopen_install_dir,\n+\t)\n+endif\ndiff --git a/drivers/common/mlx5/mlx5_common.c b/drivers/common/mlx5/mlx5_common.c\nnew file mode 100644\nindex 0000000..14ebd30\n--- /dev/null\n+++ b/drivers/common/mlx5/mlx5_common.c\n@@ -0,0 +1,17 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright 2019 Mellanox Technologies, Ltd\n+ */\n+\n+#include \"mlx5_common.h\"\n+\n+\n+int mlx5_common_logtype;\n+\n+\n+RTE_INIT(rte_mlx5_common_pmd_init)\n+{\n+\t/* Initialize driver log type. */\n+\tmlx5_common_logtype = rte_log_register(\"pmd.common.mlx5\");\n+\tif (mlx5_common_logtype >= 0)\n+\t\trte_log_set_level(mlx5_common_logtype, RTE_LOG_NOTICE);\n+}\ndiff --git a/drivers/common/mlx5/mlx5_common.h b/drivers/common/mlx5/mlx5_common.h\nnew file mode 100644\nindex 0000000..9f10def\n--- /dev/null\n+++ b/drivers/common/mlx5/mlx5_common.h\n@@ -0,0 +1,87 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright 2019 Mellanox Technologies, Ltd\n+ */\n+\n+#ifndef RTE_PMD_MLX5_COMMON_H_\n+#define RTE_PMD_MLX5_COMMON_H_\n+\n+#include <assert.h>\n+\n+#include <rte_log.h>\n+\n+\n+/*\n+ * Helper macros to work around __VA_ARGS__ limitations in a C99 compliant\n+ * manner.\n+ */\n+#define PMD_DRV_LOG_STRIP(a, b) a\n+#define PMD_DRV_LOG_OPAREN (\n+#define PMD_DRV_LOG_CPAREN )\n+#define PMD_DRV_LOG_COMMA ,\n+\n+/* Return the file name part of a path. */\n+static inline const char *\n+pmd_drv_log_basename(const char *s)\n+{\n+\tconst char *n = s;\n+\n+\twhile (*n)\n+\t\tif (*(n++) == '/')\n+\t\t\ts = n;\n+\treturn s;\n+}\n+\n+#define PMD_DRV_LOG___(level, type, name, ...) \\\n+\trte_log(RTE_LOG_ ## level, \\\n+\t\ttype, \\\n+\t\tRTE_FMT(name \": \" \\\n+\t\t\tRTE_FMT_HEAD(__VA_ARGS__,), \\\n+\t\tRTE_FMT_TAIL(__VA_ARGS__,)))\n+\n+/*\n+ * When debugging is enabled (NDEBUG not defined), file, line and function\n+ * information replace the driver name (MLX5_DRIVER_NAME) in log messages.\n+ */\n+#ifndef NDEBUG\n+\n+#define PMD_DRV_LOG__(level, type, name, ...) \\\n+\tPMD_DRV_LOG___(level, type, name, \"%s:%u: %s(): \" __VA_ARGS__)\n+#define PMD_DRV_LOG_(level, type, name, s, ...) \\\n+\tPMD_DRV_LOG__(level, type, name,\\\n+\t\ts \"\\n\" PMD_DRV_LOG_COMMA \\\n+\t\tpmd_drv_log_basename(__FILE__) PMD_DRV_LOG_COMMA \\\n+\t\t__LINE__ PMD_DRV_LOG_COMMA \\\n+\t\t__func__, \\\n+\t\t__VA_ARGS__)\n+\n+#else /* NDEBUG */\n+#define PMD_DRV_LOG__(level, type, name, ...) \\\n+\tPMD_DRV_LOG___(level, type, name, __VA_ARGS__)\n+#define PMD_DRV_LOG_(level, type, name, s, ...) \\\n+\tPMD_DRV_LOG__(level, type, name, s \"\\n\", __VA_ARGS__)\n+\n+#endif /* NDEBUG */\n+\n+/* claim_zero() does not perform any check when debugging is disabled. */\n+#ifndef NDEBUG\n+\n+#define DEBUG(...) DRV_LOG(DEBUG, __VA_ARGS__)\n+#define claim_zero(...) assert((__VA_ARGS__) == 0)\n+#define claim_nonzero(...) assert((__VA_ARGS__) != 0)\n+\n+#else /* NDEBUG */\n+\n+#define DEBUG(...) (void)0\n+#define claim_zero(...) (__VA_ARGS__)\n+#define claim_nonzero(...) (__VA_ARGS__)\n+\n+#endif /* NDEBUG */\n+\n+/* Allocate a buffer on the stack and fill it with a printf format string. */\n+#define MKSTR(name, ...) \\\n+\tint mkstr_size_##name = snprintf(NULL, 0, \"\" __VA_ARGS__); \\\n+\tchar name[mkstr_size_##name + 1]; \\\n+\t\\\n+\tsnprintf(name, sizeof(name), \"\" __VA_ARGS__)\n+\n+#endif /* RTE_PMD_MLX5_COMMON_H_ */\ndiff --git a/drivers/common/mlx5/mlx5_common_utils.h b/drivers/common/mlx5/mlx5_common_utils.h\nnew file mode 100644\nindex 0000000..32c3adf\n--- /dev/null\n+++ b/drivers/common/mlx5/mlx5_common_utils.h\n@@ -0,0 +1,20 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright 2019 Mellanox Technologies, Ltd\n+ */\n+\n+#ifndef RTE_PMD_MLX5_COMMON_UTILS_H_\n+#define RTE_PMD_MLX5_COMMON_UTILS_H_\n+\n+#include \"mlx5_common.h\"\n+\n+\n+extern int mlx5_common_logtype;\n+\n+#define MLX5_COMMON_LOG_PREFIX \"common_mlx5\"\n+/* Generic printf()-like logging macro with automatic line feed. */\n+#define DRV_LOG(level, ...) \\\n+\tPMD_DRV_LOG_(level, mlx5_common_logtype, MLX5_COMMON_LOG_PREFIX, \\\n+\t\t__VA_ARGS__ PMD_DRV_LOG_STRIP PMD_DRV_LOG_OPAREN, \\\n+\t\tPMD_DRV_LOG_CPAREN)\n+\n+#endif /* RTE_PMD_MLX5_COMMON_UTILS_H_ */\ndiff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c\nnew file mode 100644\nindex 0000000..67e5929\n--- /dev/null\n+++ b/drivers/common/mlx5/mlx5_devx_cmds.c\n@@ -0,0 +1,974 @@\n+// SPDX-License-Identifier: BSD-3-Clause\n+/* Copyright 2018 Mellanox Technologies, Ltd */\n+\n+#include <unistd.h>\n+\n+#include <rte_errno.h>\n+#include <rte_malloc.h>\n+\n+#include \"mlx5_prm.h\"\n+#include \"mlx5_devx_cmds.h\"\n+#include \"mlx5_common_utils.h\"\n+\n+\n+/**\n+ * Allocate flow counters via devx interface.\n+ *\n+ * @param[in] ctx\n+ *   ibv contexts returned from mlx5dv_open_device.\n+ * @param dcs\n+ *   Pointer to counters properties structure to be filled by the routine.\n+ * @param bulk_n_128\n+ *   Bulk counter numbers in 128 counters units.\n+ *\n+ * @return\n+ *   Pointer to counter object on success, a negative value otherwise and\n+ *   rte_errno is set.\n+ */\n+struct mlx5_devx_obj *\n+mlx5_devx_cmd_flow_counter_alloc(struct ibv_context *ctx, uint32_t bulk_n_128)\n+{\n+\tstruct mlx5_devx_obj *dcs = rte_zmalloc(\"dcs\", sizeof(*dcs), 0);\n+\tuint32_t in[MLX5_ST_SZ_DW(alloc_flow_counter_in)]   = {0};\n+\tuint32_t out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};\n+\n+\tif (!dcs) {\n+\t\trte_errno = ENOMEM;\n+\t\treturn NULL;\n+\t}\n+\tMLX5_SET(alloc_flow_counter_in, in, opcode,\n+\t\t MLX5_CMD_OP_ALLOC_FLOW_COUNTER);\n+\tMLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, bulk_n_128);\n+\tdcs->obj = mlx5_glue->devx_obj_create(ctx, in,\n+\t\t\t\t\t      sizeof(in), out, sizeof(out));\n+\tif (!dcs->obj) {\n+\t\tDRV_LOG(ERR, \"Can't allocate counters - error %d\", errno);\n+\t\trte_errno = errno;\n+\t\trte_free(dcs);\n+\t\treturn NULL;\n+\t}\n+\tdcs->id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);\n+\treturn dcs;\n+}\n+\n+/**\n+ * Query flow counters values.\n+ *\n+ * @param[in] dcs\n+ *   devx object that was obtained from mlx5_devx_cmd_fc_alloc.\n+ * @param[in] clear\n+ *   Whether hardware should clear the counters after the query or not.\n+ * @param[in] n_counters\n+ *   0 in case of 1 counter to read, otherwise the counter number to read.\n+ *  @param pkts\n+ *   The number of packets that matched the flow.\n+ *  @param bytes\n+ *    The number of bytes that matched the flow.\n+ *  @param mkey\n+ *   The mkey key for batch query.\n+ *  @param addr\n+ *    The address in the mkey range for batch query.\n+ *  @param cmd_comp\n+ *   The completion object for asynchronous batch query.\n+ *  @param async_id\n+ *    The ID to be returned in the asynchronous batch query response.\n+ *\n+ * @return\n+ *   0 on success, a negative value otherwise.\n+ */\n+int\n+mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_obj *dcs,\n+\t\t\t\t int clear, uint32_t n_counters,\n+\t\t\t\t uint64_t *pkts, uint64_t *bytes,\n+\t\t\t\t uint32_t mkey, void *addr,\n+\t\t\t\t struct mlx5dv_devx_cmd_comp *cmd_comp,\n+\t\t\t\t uint64_t async_id)\n+{\n+\tint out_len = MLX5_ST_SZ_BYTES(query_flow_counter_out) +\n+\t\t\tMLX5_ST_SZ_BYTES(traffic_counter);\n+\tuint32_t out[out_len];\n+\tuint32_t in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};\n+\tvoid *stats;\n+\tint rc;\n+\n+\tMLX5_SET(query_flow_counter_in, in, opcode,\n+\t\t MLX5_CMD_OP_QUERY_FLOW_COUNTER);\n+\tMLX5_SET(query_flow_counter_in, in, op_mod, 0);\n+\tMLX5_SET(query_flow_counter_in, in, flow_counter_id, dcs->id);\n+\tMLX5_SET(query_flow_counter_in, in, clear, !!clear);\n+\n+\tif (n_counters) {\n+\t\tMLX5_SET(query_flow_counter_in, in, num_of_counters,\n+\t\t\t n_counters);\n+\t\tMLX5_SET(query_flow_counter_in, in, dump_to_memory, 1);\n+\t\tMLX5_SET(query_flow_counter_in, in, mkey, mkey);\n+\t\tMLX5_SET64(query_flow_counter_in, in, address,\n+\t\t\t   (uint64_t)(uintptr_t)addr);\n+\t}\n+\tif (!cmd_comp)\n+\t\trc = mlx5_glue->devx_obj_query(dcs->obj, in, sizeof(in), out,\n+\t\t\t\t\t       out_len);\n+\telse\n+\t\trc = mlx5_glue->devx_obj_query_async(dcs->obj, in, sizeof(in),\n+\t\t\t\t\t\t     out_len, async_id,\n+\t\t\t\t\t\t     cmd_comp);\n+\tif (rc) {\n+\t\tDRV_LOG(ERR, \"Failed to query devx counters with rc %d\", rc);\n+\t\trte_errno = rc;\n+\t\treturn -rc;\n+\t}\n+\tif (!n_counters) {\n+\t\tstats = MLX5_ADDR_OF(query_flow_counter_out,\n+\t\t\t\t     out, flow_statistics);\n+\t\t*pkts = MLX5_GET64(traffic_counter, stats, packets);\n+\t\t*bytes = MLX5_GET64(traffic_counter, stats, octets);\n+\t}\n+\treturn 0;\n+}\n+\n+/**\n+ * Create a new mkey.\n+ *\n+ * @param[in] ctx\n+ *   ibv contexts returned from mlx5dv_open_device.\n+ * @param[in] attr\n+ *   Attributes of the requested mkey.\n+ *\n+ * @return\n+ *   Pointer to Devx mkey on success, a negative value otherwise and rte_errno\n+ *   is set.\n+ */\n+struct mlx5_devx_obj *\n+mlx5_devx_cmd_mkey_create(struct ibv_context *ctx,\n+\t\t\t  struct mlx5_devx_mkey_attr *attr)\n+{\n+\tuint32_t in[MLX5_ST_SZ_DW(create_mkey_in)] = {0};\n+\tuint32_t out[MLX5_ST_SZ_DW(create_mkey_out)] = {0};\n+\tvoid *mkc;\n+\tstruct mlx5_devx_obj *mkey = rte_zmalloc(\"mkey\", sizeof(*mkey), 0);\n+\tsize_t pgsize;\n+\tuint32_t translation_size;\n+\n+\tif (!mkey) {\n+\t\trte_errno = ENOMEM;\n+\t\treturn NULL;\n+\t}\n+\tpgsize = sysconf(_SC_PAGESIZE);\n+\ttranslation_size = (RTE_ALIGN(attr->size, pgsize) * 8) / 16;\n+\tMLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);\n+\tMLX5_SET(create_mkey_in, in, translations_octword_actual_size,\n+\t\t translation_size);\n+\tMLX5_SET(create_mkey_in, in, mkey_umem_id, attr->umem_id);\n+\tmkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);\n+\tMLX5_SET(mkc, mkc, lw, 0x1);\n+\tMLX5_SET(mkc, mkc, lr, 0x1);\n+\tMLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);\n+\tMLX5_SET(mkc, mkc, qpn, 0xffffff);\n+\tMLX5_SET(mkc, mkc, pd, attr->pd);\n+\tMLX5_SET(mkc, mkc, mkey_7_0, attr->umem_id & 0xFF);\n+\tMLX5_SET(mkc, mkc, translations_octword_size, translation_size);\n+\tMLX5_SET64(mkc, mkc, start_addr, attr->addr);\n+\tMLX5_SET64(mkc, mkc, len, attr->size);\n+\tMLX5_SET(mkc, mkc, log_page_size, rte_log2_u32(pgsize));\n+\tmkey->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,\n+\t\t\t\t\t       sizeof(out));\n+\tif (!mkey->obj) {\n+\t\tDRV_LOG(ERR, \"Can't create mkey - error %d\", errno);\n+\t\trte_errno = errno;\n+\t\trte_free(mkey);\n+\t\treturn NULL;\n+\t}\n+\tmkey->id = MLX5_GET(create_mkey_out, out, mkey_index);\n+\tmkey->id = (mkey->id << 8) | (attr->umem_id & 0xFF);\n+\treturn mkey;\n+}\n+\n+/**\n+ * Get status of devx command response.\n+ * Mainly used for asynchronous commands.\n+ *\n+ * @param[in] out\n+ *   The out response buffer.\n+ *\n+ * @return\n+ *   0 on success, non-zero value otherwise.\n+ */\n+int\n+mlx5_devx_get_out_command_status(void *out)\n+{\n+\tint status;\n+\n+\tif (!out)\n+\t\treturn -EINVAL;\n+\tstatus = MLX5_GET(query_flow_counter_out, out, status);\n+\tif (status) {\n+\t\tint syndrome = MLX5_GET(query_flow_counter_out, out, syndrome);\n+\n+\t\tDRV_LOG(ERR, \"Bad devX status %x, syndrome = %x\", status,\n+\t\t\tsyndrome);\n+\t}\n+\treturn status;\n+}\n+\n+/**\n+ * Destroy any object allocated by a Devx API.\n+ *\n+ * @param[in] obj\n+ *   Pointer to a general object.\n+ *\n+ * @return\n+ *   0 on success, a negative value otherwise.\n+ */\n+int\n+mlx5_devx_cmd_destroy(struct mlx5_devx_obj *obj)\n+{\n+\tint ret;\n+\n+\tif (!obj)\n+\t\treturn 0;\n+\tret =  mlx5_glue->devx_obj_destroy(obj->obj);\n+\trte_free(obj);\n+\treturn ret;\n+}\n+\n+/**\n+ * Query NIC vport context.\n+ * Fills minimal inline attribute.\n+ *\n+ * @param[in] ctx\n+ *   ibv contexts returned from mlx5dv_open_device.\n+ * @param[in] vport\n+ *   vport index\n+ * @param[out] attr\n+ *   Attributes device values.\n+ *\n+ * @return\n+ *   0 on success, a negative value otherwise.\n+ */\n+static int\n+mlx5_devx_cmd_query_nic_vport_context(struct ibv_context *ctx,\n+\t\t\t\t      unsigned int vport,\n+\t\t\t\t      struct mlx5_hca_attr *attr)\n+{\n+\tuint32_t in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};\n+\tuint32_t out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};\n+\tvoid *vctx;\n+\tint status, syndrome, rc;\n+\n+\t/* Query NIC vport context to determine inline mode. */\n+\tMLX5_SET(query_nic_vport_context_in, in, opcode,\n+\t\t MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);\n+\tMLX5_SET(query_nic_vport_context_in, in, vport_number, vport);\n+\tif (vport)\n+\t\tMLX5_SET(query_nic_vport_context_in, in, other_vport, 1);\n+\trc = mlx5_glue->devx_general_cmd(ctx,\n+\t\t\t\t\t in, sizeof(in),\n+\t\t\t\t\t out, sizeof(out));\n+\tif (rc)\n+\t\tgoto error;\n+\tstatus = MLX5_GET(query_nic_vport_context_out, out, status);\n+\tsyndrome = MLX5_GET(query_nic_vport_context_out, out, syndrome);\n+\tif (status) {\n+\t\tDRV_LOG(DEBUG, \"Failed to query NIC vport context, \"\n+\t\t\t\"status %x, syndrome = %x\",\n+\t\t\tstatus, syndrome);\n+\t\treturn -1;\n+\t}\n+\tvctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,\n+\t\t\t    nic_vport_context);\n+\tattr->vport_inline_mode = MLX5_GET(nic_vport_context, vctx,\n+\t\t\t\t\t   min_wqe_inline_mode);\n+\treturn 0;\n+error:\n+\trc = (rc > 0) ? -rc : rc;\n+\treturn rc;\n+}\n+\n+/**\n+ * Query HCA attributes.\n+ * Using those attributes we can check on run time if the device\n+ * is having the required capabilities.\n+ *\n+ * @param[in] ctx\n+ *   ibv contexts returned from mlx5dv_open_device.\n+ * @param[out] attr\n+ *   Attributes device values.\n+ *\n+ * @return\n+ *   0 on success, a negative value otherwise.\n+ */\n+int\n+mlx5_devx_cmd_query_hca_attr(struct ibv_context *ctx,\n+\t\t\t     struct mlx5_hca_attr *attr)\n+{\n+\tuint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};\n+\tuint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};\n+\tvoid *hcattr;\n+\tint status, syndrome, rc;\n+\n+\tMLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);\n+\tMLX5_SET(query_hca_cap_in, in, op_mod,\n+\t\t MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE |\n+\t\t MLX5_HCA_CAP_OPMOD_GET_CUR);\n+\n+\trc = mlx5_glue->devx_general_cmd(ctx,\n+\t\t\t\t\t in, sizeof(in), out, sizeof(out));\n+\tif (rc)\n+\t\tgoto error;\n+\tstatus = MLX5_GET(query_hca_cap_out, out, status);\n+\tsyndrome = MLX5_GET(query_hca_cap_out, out, syndrome);\n+\tif (status) {\n+\t\tDRV_LOG(DEBUG, \"Failed to query devx HCA capabilities, \"\n+\t\t\t\"status %x, syndrome = %x\",\n+\t\t\tstatus, syndrome);\n+\t\treturn -1;\n+\t}\n+\thcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);\n+\tattr->flow_counter_bulk_alloc_bitmap =\n+\t\t\tMLX5_GET(cmd_hca_cap, hcattr, flow_counter_bulk_alloc);\n+\tattr->flow_counters_dump = MLX5_GET(cmd_hca_cap, hcattr,\n+\t\t\t\t\t    flow_counters_dump);\n+\tattr->eswitch_manager = MLX5_GET(cmd_hca_cap, hcattr, eswitch_manager);\n+\tattr->hairpin = MLX5_GET(cmd_hca_cap, hcattr, hairpin);\n+\tattr->log_max_hairpin_queues = MLX5_GET(cmd_hca_cap, hcattr,\n+\t\t\t\t\t\tlog_max_hairpin_queues);\n+\tattr->log_max_hairpin_wq_data_sz = MLX5_GET(cmd_hca_cap, hcattr,\n+\t\t\t\t\t\t    log_max_hairpin_wq_data_sz);\n+\tattr->log_max_hairpin_num_packets = MLX5_GET\n+\t\t(cmd_hca_cap, hcattr, log_min_hairpin_wq_data_sz);\n+\tattr->vhca_id = MLX5_GET(cmd_hca_cap, hcattr, vhca_id);\n+\tattr->eth_net_offloads = MLX5_GET(cmd_hca_cap, hcattr,\n+\t\t\t\t\t  eth_net_offloads);\n+\tattr->eth_virt = MLX5_GET(cmd_hca_cap, hcattr, eth_virt);\n+\tattr->flex_parser_protocols = MLX5_GET(cmd_hca_cap, hcattr,\n+\t\t\t\t\t       flex_parser_protocols);\n+\tattr->qos.sup = MLX5_GET(cmd_hca_cap, hcattr, qos);\n+\tif (attr->qos.sup) {\n+\t\tMLX5_SET(query_hca_cap_in, in, op_mod,\n+\t\t\t MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP |\n+\t\t\t MLX5_HCA_CAP_OPMOD_GET_CUR);\n+\t\trc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in),\n+\t\t\t\t\t\t out, sizeof(out));\n+\t\tif (rc)\n+\t\t\tgoto error;\n+\t\tif (status) {\n+\t\t\tDRV_LOG(DEBUG, \"Failed to query devx QOS capabilities,\"\n+\t\t\t\t\" status %x, syndrome = %x\",\n+\t\t\t\tstatus, syndrome);\n+\t\t\treturn -1;\n+\t\t}\n+\t\thcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);\n+\t\tattr->qos.srtcm_sup =\n+\t\t\t\tMLX5_GET(qos_cap, hcattr, flow_meter_srtcm);\n+\t\tattr->qos.log_max_flow_meter =\n+\t\t\t\tMLX5_GET(qos_cap, hcattr, log_max_flow_meter);\n+\t\tattr->qos.flow_meter_reg_c_ids =\n+\t\t\tMLX5_GET(qos_cap, hcattr, flow_meter_reg_id);\n+\t}\n+\tif (!attr->eth_net_offloads)\n+\t\treturn 0;\n+\n+\t/* Query HCA offloads for Ethernet protocol. */\n+\tmemset(in, 0, sizeof(in));\n+\tmemset(out, 0, sizeof(out));\n+\tMLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);\n+\tMLX5_SET(query_hca_cap_in, in, op_mod,\n+\t\t MLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS |\n+\t\t MLX5_HCA_CAP_OPMOD_GET_CUR);\n+\n+\trc = mlx5_glue->devx_general_cmd(ctx,\n+\t\t\t\t\t in, sizeof(in),\n+\t\t\t\t\t out, sizeof(out));\n+\tif (rc) {\n+\t\tattr->eth_net_offloads = 0;\n+\t\tgoto error;\n+\t}\n+\tstatus = MLX5_GET(query_hca_cap_out, out, status);\n+\tsyndrome = MLX5_GET(query_hca_cap_out, out, syndrome);\n+\tif (status) {\n+\t\tDRV_LOG(DEBUG, \"Failed to query devx HCA capabilities, \"\n+\t\t\t\"status %x, syndrome = %x\",\n+\t\t\tstatus, syndrome);\n+\t\tattr->eth_net_offloads = 0;\n+\t\treturn -1;\n+\t}\n+\thcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);\n+\tattr->wqe_vlan_insert = MLX5_GET(per_protocol_networking_offload_caps,\n+\t\t\t\t\t hcattr, wqe_vlan_insert);\n+\tattr->lro_cap = MLX5_GET(per_protocol_networking_offload_caps, hcattr,\n+\t\t\t\t lro_cap);\n+\tattr->tunnel_lro_gre = MLX5_GET(per_protocol_networking_offload_caps,\n+\t\t\t\t\thcattr, tunnel_lro_gre);\n+\tattr->tunnel_lro_vxlan = MLX5_GET(per_protocol_networking_offload_caps,\n+\t\t\t\t\t  hcattr, tunnel_lro_vxlan);\n+\tattr->lro_max_msg_sz_mode = MLX5_GET\n+\t\t\t\t\t(per_protocol_networking_offload_caps,\n+\t\t\t\t\t hcattr, lro_max_msg_sz_mode);\n+\tfor (int i = 0 ; i < MLX5_LRO_NUM_SUPP_PERIODS ; i++) {\n+\t\tattr->lro_timer_supported_periods[i] =\n+\t\t\tMLX5_GET(per_protocol_networking_offload_caps, hcattr,\n+\t\t\t\t lro_timer_supported_periods[i]);\n+\t}\n+\tattr->tunnel_stateless_geneve_rx =\n+\t\t\t    MLX5_GET(per_protocol_networking_offload_caps,\n+\t\t\t\t     hcattr, tunnel_stateless_geneve_rx);\n+\tattr->geneve_max_opt_len =\n+\t\t    MLX5_GET(per_protocol_networking_offload_caps,\n+\t\t\t     hcattr, max_geneve_opt_len);\n+\tattr->wqe_inline_mode = MLX5_GET(per_protocol_networking_offload_caps,\n+\t\t\t\t\t hcattr, wqe_inline_mode);\n+\tattr->tunnel_stateless_gtp = MLX5_GET\n+\t\t\t\t\t(per_protocol_networking_offload_caps,\n+\t\t\t\t\t hcattr, tunnel_stateless_gtp);\n+\tif (attr->wqe_inline_mode != MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)\n+\t\treturn 0;\n+\tif (attr->eth_virt) {\n+\t\trc = mlx5_devx_cmd_query_nic_vport_context(ctx, 0, attr);\n+\t\tif (rc) {\n+\t\t\tattr->eth_virt = 0;\n+\t\t\tgoto error;\n+\t\t}\n+\t}\n+\treturn 0;\n+error:\n+\trc = (rc > 0) ? -rc : rc;\n+\treturn rc;\n+}\n+\n+/**\n+ * Query TIS transport domain from QP verbs object using DevX API.\n+ *\n+ * @param[in] qp\n+ *   Pointer to verbs QP returned by ibv_create_qp .\n+ * @param[in] tis_num\n+ *   TIS number of TIS to query.\n+ * @param[out] tis_td\n+ *   Pointer to TIS transport domain variable, to be set by the routine.\n+ *\n+ * @return\n+ *   0 on success, a negative value otherwise.\n+ */\n+int\n+mlx5_devx_cmd_qp_query_tis_td(struct ibv_qp *qp, uint32_t tis_num,\n+\t\t\t      uint32_t *tis_td)\n+{\n+\tuint32_t in[MLX5_ST_SZ_DW(query_tis_in)] = {0};\n+\tuint32_t out[MLX5_ST_SZ_DW(query_tis_out)] = {0};\n+\tint rc;\n+\tvoid *tis_ctx;\n+\n+\tMLX5_SET(query_tis_in, in, opcode, MLX5_CMD_OP_QUERY_TIS);\n+\tMLX5_SET(query_tis_in, in, tisn, tis_num);\n+\trc = mlx5_glue->devx_qp_query(qp, in, sizeof(in), out, sizeof(out));\n+\tif (rc) {\n+\t\tDRV_LOG(ERR, \"Failed to query QP using DevX\");\n+\t\treturn -rc;\n+\t};\n+\ttis_ctx = MLX5_ADDR_OF(query_tis_out, out, tis_context);\n+\t*tis_td = MLX5_GET(tisc, tis_ctx, transport_domain);\n+\treturn 0;\n+}\n+\n+/**\n+ * Fill WQ data for DevX API command.\n+ * Utility function for use when creating DevX objects containing a WQ.\n+ *\n+ * @param[in] wq_ctx\n+ *   Pointer to WQ context to fill with data.\n+ * @param [in] wq_attr\n+ *   Pointer to WQ attributes structure to fill in WQ context.\n+ */\n+static void\n+devx_cmd_fill_wq_data(void *wq_ctx, struct mlx5_devx_wq_attr *wq_attr)\n+{\n+\tMLX5_SET(wq, wq_ctx, wq_type, wq_attr->wq_type);\n+\tMLX5_SET(wq, wq_ctx, wq_signature, wq_attr->wq_signature);\n+\tMLX5_SET(wq, wq_ctx, end_padding_mode, wq_attr->end_padding_mode);\n+\tMLX5_SET(wq, wq_ctx, cd_slave, wq_attr->cd_slave);\n+\tMLX5_SET(wq, wq_ctx, hds_skip_first_sge, wq_attr->hds_skip_first_sge);\n+\tMLX5_SET(wq, wq_ctx, log2_hds_buf_size, wq_attr->log2_hds_buf_size);\n+\tMLX5_SET(wq, wq_ctx, page_offset, wq_attr->page_offset);\n+\tMLX5_SET(wq, wq_ctx, lwm, wq_attr->lwm);\n+\tMLX5_SET(wq, wq_ctx, pd, wq_attr->pd);\n+\tMLX5_SET(wq, wq_ctx, uar_page, wq_attr->uar_page);\n+\tMLX5_SET64(wq, wq_ctx, dbr_addr, wq_attr->dbr_addr);\n+\tMLX5_SET(wq, wq_ctx, hw_counter, wq_attr->hw_counter);\n+\tMLX5_SET(wq, wq_ctx, sw_counter, wq_attr->sw_counter);\n+\tMLX5_SET(wq, wq_ctx, log_wq_stride, wq_attr->log_wq_stride);\n+\tMLX5_SET(wq, wq_ctx, log_wq_pg_sz, wq_attr->log_wq_pg_sz);\n+\tMLX5_SET(wq, wq_ctx, log_wq_sz, wq_attr->log_wq_sz);\n+\tMLX5_SET(wq, wq_ctx, dbr_umem_valid, wq_attr->dbr_umem_valid);\n+\tMLX5_SET(wq, wq_ctx, wq_umem_valid, wq_attr->wq_umem_valid);\n+\tMLX5_SET(wq, wq_ctx, log_hairpin_num_packets,\n+\t\t wq_attr->log_hairpin_num_packets);\n+\tMLX5_SET(wq, wq_ctx, log_hairpin_data_sz, wq_attr->log_hairpin_data_sz);\n+\tMLX5_SET(wq, wq_ctx, single_wqe_log_num_of_strides,\n+\t\t wq_attr->single_wqe_log_num_of_strides);\n+\tMLX5_SET(wq, wq_ctx, two_byte_shift_en, wq_attr->two_byte_shift_en);\n+\tMLX5_SET(wq, wq_ctx, single_stride_log_num_of_bytes,\n+\t\t wq_attr->single_stride_log_num_of_bytes);\n+\tMLX5_SET(wq, wq_ctx, dbr_umem_id, wq_attr->dbr_umem_id);\n+\tMLX5_SET(wq, wq_ctx, wq_umem_id, wq_attr->wq_umem_id);\n+\tMLX5_SET64(wq, wq_ctx, wq_umem_offset, wq_attr->wq_umem_offset);\n+}\n+\n+/**\n+ * Create RQ using DevX API.\n+ *\n+ * @param[in] ctx\n+ *   ibv_context returned from mlx5dv_open_device.\n+ * @param [in] rq_attr\n+ *   Pointer to create RQ attributes structure.\n+ * @param [in] socket\n+ *   CPU socket ID for allocations.\n+ *\n+ * @return\n+ *   The DevX object created, NULL otherwise and rte_errno is set.\n+ */\n+struct mlx5_devx_obj *\n+mlx5_devx_cmd_create_rq(struct ibv_context *ctx,\n+\t\t\tstruct mlx5_devx_create_rq_attr *rq_attr,\n+\t\t\tint socket)\n+{\n+\tuint32_t in[MLX5_ST_SZ_DW(create_rq_in)] = {0};\n+\tuint32_t out[MLX5_ST_SZ_DW(create_rq_out)] = {0};\n+\tvoid *rq_ctx, *wq_ctx;\n+\tstruct mlx5_devx_wq_attr *wq_attr;\n+\tstruct mlx5_devx_obj *rq = NULL;\n+\n+\trq = rte_calloc_socket(__func__, 1, sizeof(*rq), 0, socket);\n+\tif (!rq) {\n+\t\tDRV_LOG(ERR, \"Failed to allocate RQ data\");\n+\t\trte_errno = ENOMEM;\n+\t\treturn NULL;\n+\t}\n+\tMLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);\n+\trq_ctx = MLX5_ADDR_OF(create_rq_in, in, ctx);\n+\tMLX5_SET(rqc, rq_ctx, rlky, rq_attr->rlky);\n+\tMLX5_SET(rqc, rq_ctx, delay_drop_en, rq_attr->delay_drop_en);\n+\tMLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs);\n+\tMLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd);\n+\tMLX5_SET(rqc, rq_ctx, mem_rq_type, rq_attr->mem_rq_type);\n+\tMLX5_SET(rqc, rq_ctx, state, rq_attr->state);\n+\tMLX5_SET(rqc, rq_ctx, flush_in_error_en, rq_attr->flush_in_error_en);\n+\tMLX5_SET(rqc, rq_ctx, hairpin, rq_attr->hairpin);\n+\tMLX5_SET(rqc, rq_ctx, user_index, rq_attr->user_index);\n+\tMLX5_SET(rqc, rq_ctx, cqn, rq_attr->cqn);\n+\tMLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id);\n+\tMLX5_SET(rqc, rq_ctx, rmpn, rq_attr->rmpn);\n+\twq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq);\n+\twq_attr = &rq_attr->wq_attr;\n+\tdevx_cmd_fill_wq_data(wq_ctx, wq_attr);\n+\trq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),\n+\t\t\t\t\t\t  out, sizeof(out));\n+\tif (!rq->obj) {\n+\t\tDRV_LOG(ERR, \"Failed to create RQ using DevX\");\n+\t\trte_errno = errno;\n+\t\trte_free(rq);\n+\t\treturn NULL;\n+\t}\n+\trq->id = MLX5_GET(create_rq_out, out, rqn);\n+\treturn rq;\n+}\n+\n+/**\n+ * Modify RQ using DevX API.\n+ *\n+ * @param[in] rq\n+ *   Pointer to RQ object structure.\n+ * @param [in] rq_attr\n+ *   Pointer to modify RQ attributes structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+int\n+mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj *rq,\n+\t\t\tstruct mlx5_devx_modify_rq_attr *rq_attr)\n+{\n+\tuint32_t in[MLX5_ST_SZ_DW(modify_rq_in)] = {0};\n+\tuint32_t out[MLX5_ST_SZ_DW(modify_rq_out)] = {0};\n+\tvoid *rq_ctx, *wq_ctx;\n+\tint ret;\n+\n+\tMLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);\n+\tMLX5_SET(modify_rq_in, in, rq_state, rq_attr->rq_state);\n+\tMLX5_SET(modify_rq_in, in, rqn, rq->id);\n+\tMLX5_SET64(modify_rq_in, in, modify_bitmask, rq_attr->modify_bitmask);\n+\trq_ctx = MLX5_ADDR_OF(modify_rq_in, in, ctx);\n+\tMLX5_SET(rqc, rq_ctx, state, rq_attr->state);\n+\tif (rq_attr->modify_bitmask &\n+\t\t\tMLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS)\n+\t\tMLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs);\n+\tif (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD)\n+\t\tMLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd);\n+\tif (rq_attr->modify_bitmask &\n+\t\t\tMLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID)\n+\t\tMLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id);\n+\tMLX5_SET(rqc, rq_ctx, hairpin_peer_sq, rq_attr->hairpin_peer_sq);\n+\tMLX5_SET(rqc, rq_ctx, hairpin_peer_vhca, rq_attr->hairpin_peer_vhca);\n+\tif (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM) {\n+\t\twq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq);\n+\t\tMLX5_SET(wq, wq_ctx, lwm, rq_attr->lwm);\n+\t}\n+\tret = mlx5_glue->devx_obj_modify(rq->obj, in, sizeof(in),\n+\t\t\t\t\t out, sizeof(out));\n+\tif (ret) {\n+\t\tDRV_LOG(ERR, \"Failed to modify RQ using DevX\");\n+\t\trte_errno = errno;\n+\t\treturn -errno;\n+\t}\n+\treturn ret;\n+}\n+\n+/**\n+ * Create TIR using DevX API.\n+ *\n+ * @param[in] ctx\n+ *   ibv_context returned from mlx5dv_open_device.\n+ * @param [in] tir_attr\n+ *   Pointer to TIR attributes structure.\n+ *\n+ * @return\n+ *   The DevX object created, NULL otherwise and rte_errno is set.\n+ */\n+struct mlx5_devx_obj *\n+mlx5_devx_cmd_create_tir(struct ibv_context *ctx,\n+\t\t\t struct mlx5_devx_tir_attr *tir_attr)\n+{\n+\tuint32_t in[MLX5_ST_SZ_DW(create_tir_in)] = {0};\n+\tuint32_t out[MLX5_ST_SZ_DW(create_tir_out)] = {0};\n+\tvoid *tir_ctx, *outer, *inner;\n+\tstruct mlx5_devx_obj *tir = NULL;\n+\tint i;\n+\n+\ttir = rte_calloc(__func__, 1, sizeof(*tir), 0);\n+\tif (!tir) {\n+\t\tDRV_LOG(ERR, \"Failed to allocate TIR data\");\n+\t\trte_errno = ENOMEM;\n+\t\treturn NULL;\n+\t}\n+\tMLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);\n+\ttir_ctx = MLX5_ADDR_OF(create_tir_in, in, ctx);\n+\tMLX5_SET(tirc, tir_ctx, disp_type, tir_attr->disp_type);\n+\tMLX5_SET(tirc, tir_ctx, lro_timeout_period_usecs,\n+\t\t tir_attr->lro_timeout_period_usecs);\n+\tMLX5_SET(tirc, tir_ctx, lro_enable_mask, tir_attr->lro_enable_mask);\n+\tMLX5_SET(tirc, tir_ctx, lro_max_msg_sz, tir_attr->lro_max_msg_sz);\n+\tMLX5_SET(tirc, tir_ctx, inline_rqn, tir_attr->inline_rqn);\n+\tMLX5_SET(tirc, tir_ctx, rx_hash_symmetric, tir_attr->rx_hash_symmetric);\n+\tMLX5_SET(tirc, tir_ctx, tunneled_offload_en,\n+\t\t tir_attr->tunneled_offload_en);\n+\tMLX5_SET(tirc, tir_ctx, indirect_table, tir_attr->indirect_table);\n+\tMLX5_SET(tirc, tir_ctx, rx_hash_fn, tir_attr->rx_hash_fn);\n+\tMLX5_SET(tirc, tir_ctx, self_lb_block, tir_attr->self_lb_block);\n+\tMLX5_SET(tirc, tir_ctx, transport_domain, tir_attr->transport_domain);\n+\tfor (i = 0; i < 10; i++) {\n+\t\tMLX5_SET(tirc, tir_ctx, rx_hash_toeplitz_key[i],\n+\t\t\t tir_attr->rx_hash_toeplitz_key[i]);\n+\t}\n+\touter = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_outer);\n+\tMLX5_SET(rx_hash_field_select, outer, l3_prot_type,\n+\t\t tir_attr->rx_hash_field_selector_outer.l3_prot_type);\n+\tMLX5_SET(rx_hash_field_select, outer, l4_prot_type,\n+\t\t tir_attr->rx_hash_field_selector_outer.l4_prot_type);\n+\tMLX5_SET(rx_hash_field_select, outer, selected_fields,\n+\t\t tir_attr->rx_hash_field_selector_outer.selected_fields);\n+\tinner = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_inner);\n+\tMLX5_SET(rx_hash_field_select, inner, l3_prot_type,\n+\t\t tir_attr->rx_hash_field_selector_inner.l3_prot_type);\n+\tMLX5_SET(rx_hash_field_select, inner, l4_prot_type,\n+\t\t tir_attr->rx_hash_field_selector_inner.l4_prot_type);\n+\tMLX5_SET(rx_hash_field_select, inner, selected_fields,\n+\t\t tir_attr->rx_hash_field_selector_inner.selected_fields);\n+\ttir->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),\n+\t\t\t\t\t\t   out, sizeof(out));\n+\tif (!tir->obj) {\n+\t\tDRV_LOG(ERR, \"Failed to create TIR using DevX\");\n+\t\trte_errno = errno;\n+\t\trte_free(tir);\n+\t\treturn NULL;\n+\t}\n+\ttir->id = MLX5_GET(create_tir_out, out, tirn);\n+\treturn tir;\n+}\n+\n+/**\n+ * Create RQT using DevX API.\n+ *\n+ * @param[in] ctx\n+ *   ibv_context returned from mlx5dv_open_device.\n+ * @param [in] rqt_attr\n+ *   Pointer to RQT attributes structure.\n+ *\n+ * @return\n+ *   The DevX object created, NULL otherwise and rte_errno is set.\n+ */\n+struct mlx5_devx_obj *\n+mlx5_devx_cmd_create_rqt(struct ibv_context *ctx,\n+\t\t\t struct mlx5_devx_rqt_attr *rqt_attr)\n+{\n+\tuint32_t *in = NULL;\n+\tuint32_t inlen = MLX5_ST_SZ_BYTES(create_rqt_in) +\n+\t\t\t rqt_attr->rqt_actual_size * sizeof(uint32_t);\n+\tuint32_t out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};\n+\tvoid *rqt_ctx;\n+\tstruct mlx5_devx_obj *rqt = NULL;\n+\tint i;\n+\n+\tin = rte_calloc(__func__, 1, inlen, 0);\n+\tif (!in) {\n+\t\tDRV_LOG(ERR, \"Failed to allocate RQT IN data\");\n+\t\trte_errno = ENOMEM;\n+\t\treturn NULL;\n+\t}\n+\trqt = rte_calloc(__func__, 1, sizeof(*rqt), 0);\n+\tif (!rqt) {\n+\t\tDRV_LOG(ERR, \"Failed to allocate RQT data\");\n+\t\trte_errno = ENOMEM;\n+\t\trte_free(in);\n+\t\treturn NULL;\n+\t}\n+\tMLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);\n+\trqt_ctx = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);\n+\tMLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size);\n+\tMLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size);\n+\tfor (i = 0; i < rqt_attr->rqt_actual_size; i++)\n+\t\tMLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]);\n+\trqt->obj = mlx5_glue->devx_obj_create(ctx, in, inlen, out, sizeof(out));\n+\trte_free(in);\n+\tif (!rqt->obj) {\n+\t\tDRV_LOG(ERR, \"Failed to create RQT using DevX\");\n+\t\trte_errno = errno;\n+\t\trte_free(rqt);\n+\t\treturn NULL;\n+\t}\n+\trqt->id = MLX5_GET(create_rqt_out, out, rqtn);\n+\treturn rqt;\n+}\n+\n+/**\n+ * Create SQ using DevX API.\n+ *\n+ * @param[in] ctx\n+ *   ibv_context returned from mlx5dv_open_device.\n+ * @param [in] sq_attr\n+ *   Pointer to SQ attributes structure.\n+ * @param [in] socket\n+ *   CPU socket ID for allocations.\n+ *\n+ * @return\n+ *   The DevX object created, NULL otherwise and rte_errno is set.\n+ **/\n+struct mlx5_devx_obj *\n+mlx5_devx_cmd_create_sq(struct ibv_context *ctx,\n+\t\t\tstruct mlx5_devx_create_sq_attr *sq_attr)\n+{\n+\tuint32_t in[MLX5_ST_SZ_DW(create_sq_in)] = {0};\n+\tuint32_t out[MLX5_ST_SZ_DW(create_sq_out)] = {0};\n+\tvoid *sq_ctx;\n+\tvoid *wq_ctx;\n+\tstruct mlx5_devx_wq_attr *wq_attr;\n+\tstruct mlx5_devx_obj *sq = NULL;\n+\n+\tsq = rte_calloc(__func__, 1, sizeof(*sq), 0);\n+\tif (!sq) {\n+\t\tDRV_LOG(ERR, \"Failed to allocate SQ data\");\n+\t\trte_errno = ENOMEM;\n+\t\treturn NULL;\n+\t}\n+\tMLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);\n+\tsq_ctx = MLX5_ADDR_OF(create_sq_in, in, ctx);\n+\tMLX5_SET(sqc, sq_ctx, rlky, sq_attr->rlky);\n+\tMLX5_SET(sqc, sq_ctx, cd_master, sq_attr->cd_master);\n+\tMLX5_SET(sqc, sq_ctx, fre, sq_attr->fre);\n+\tMLX5_SET(sqc, sq_ctx, flush_in_error_en, sq_attr->flush_in_error_en);\n+\tMLX5_SET(sqc, sq_ctx, allow_multi_pkt_send_wqe,\n+\t\t sq_attr->flush_in_error_en);\n+\tMLX5_SET(sqc, sq_ctx, min_wqe_inline_mode,\n+\t\t sq_attr->min_wqe_inline_mode);\n+\tMLX5_SET(sqc, sq_ctx, state, sq_attr->state);\n+\tMLX5_SET(sqc, sq_ctx, reg_umr, sq_attr->reg_umr);\n+\tMLX5_SET(sqc, sq_ctx, allow_swp, sq_attr->allow_swp);\n+\tMLX5_SET(sqc, sq_ctx, hairpin, sq_attr->hairpin);\n+\tMLX5_SET(sqc, sq_ctx, user_index, sq_attr->user_index);\n+\tMLX5_SET(sqc, sq_ctx, cqn, sq_attr->cqn);\n+\tMLX5_SET(sqc, sq_ctx, packet_pacing_rate_limit_index,\n+\t\t sq_attr->packet_pacing_rate_limit_index);\n+\tMLX5_SET(sqc, sq_ctx, tis_lst_sz, sq_attr->tis_lst_sz);\n+\tMLX5_SET(sqc, sq_ctx, tis_num_0, sq_attr->tis_num);\n+\twq_ctx = MLX5_ADDR_OF(sqc, sq_ctx, wq);\n+\twq_attr = &sq_attr->wq_attr;\n+\tdevx_cmd_fill_wq_data(wq_ctx, wq_attr);\n+\tsq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),\n+\t\t\t\t\t     out, sizeof(out));\n+\tif (!sq->obj) {\n+\t\tDRV_LOG(ERR, \"Failed to create SQ using DevX\");\n+\t\trte_errno = errno;\n+\t\trte_free(sq);\n+\t\treturn NULL;\n+\t}\n+\tsq->id = MLX5_GET(create_sq_out, out, sqn);\n+\treturn sq;\n+}\n+\n+/**\n+ * Modify SQ using DevX API.\n+ *\n+ * @param[in] sq\n+ *   Pointer to SQ object structure.\n+ * @param [in] sq_attr\n+ *   Pointer to SQ attributes structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+int\n+mlx5_devx_cmd_modify_sq(struct mlx5_devx_obj *sq,\n+\t\t\tstruct mlx5_devx_modify_sq_attr *sq_attr)\n+{\n+\tuint32_t in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};\n+\tuint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};\n+\tvoid *sq_ctx;\n+\tint ret;\n+\n+\tMLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);\n+\tMLX5_SET(modify_sq_in, in, sq_state, sq_attr->sq_state);\n+\tMLX5_SET(modify_sq_in, in, sqn, sq->id);\n+\tsq_ctx = MLX5_ADDR_OF(modify_sq_in, in, ctx);\n+\tMLX5_SET(sqc, sq_ctx, state, sq_attr->state);\n+\tMLX5_SET(sqc, sq_ctx, hairpin_peer_rq, sq_attr->hairpin_peer_rq);\n+\tMLX5_SET(sqc, sq_ctx, hairpin_peer_vhca, sq_attr->hairpin_peer_vhca);\n+\tret = mlx5_glue->devx_obj_modify(sq->obj, in, sizeof(in),\n+\t\t\t\t\t out, sizeof(out));\n+\tif (ret) {\n+\t\tDRV_LOG(ERR, \"Failed to modify SQ using DevX\");\n+\t\trte_errno = errno;\n+\t\treturn -errno;\n+\t}\n+\treturn ret;\n+}\n+\n+/**\n+ * Create TIS using DevX API.\n+ *\n+ * @param[in] ctx\n+ *   ibv_context returned from mlx5dv_open_device.\n+ * @param [in] tis_attr\n+ *   Pointer to TIS attributes structure.\n+ *\n+ * @return\n+ *   The DevX object created, NULL otherwise and rte_errno is set.\n+ */\n+struct mlx5_devx_obj *\n+mlx5_devx_cmd_create_tis(struct ibv_context *ctx,\n+\t\t\t struct mlx5_devx_tis_attr *tis_attr)\n+{\n+\tuint32_t in[MLX5_ST_SZ_DW(create_tis_in)] = {0};\n+\tuint32_t out[MLX5_ST_SZ_DW(create_tis_out)] = {0};\n+\tstruct mlx5_devx_obj *tis = NULL;\n+\tvoid *tis_ctx;\n+\n+\ttis = rte_calloc(__func__, 1, sizeof(*tis), 0);\n+\tif (!tis) {\n+\t\tDRV_LOG(ERR, \"Failed to allocate TIS object\");\n+\t\trte_errno = ENOMEM;\n+\t\treturn NULL;\n+\t}\n+\tMLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);\n+\ttis_ctx = MLX5_ADDR_OF(create_tis_in, in, ctx);\n+\tMLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity,\n+\t\t tis_attr->strict_lag_tx_port_affinity);\n+\tMLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity,\n+\t\t tis_attr->strict_lag_tx_port_affinity);\n+\tMLX5_SET(tisc, tis_ctx, prio, tis_attr->prio);\n+\tMLX5_SET(tisc, tis_ctx, transport_domain,\n+\t\t tis_attr->transport_domain);\n+\ttis->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),\n+\t\t\t\t\t      out, sizeof(out));\n+\tif (!tis->obj) {\n+\t\tDRV_LOG(ERR, \"Failed to create TIS using DevX\");\n+\t\trte_errno = errno;\n+\t\trte_free(tis);\n+\t\treturn NULL;\n+\t}\n+\ttis->id = MLX5_GET(create_tis_out, out, tisn);\n+\treturn tis;\n+}\n+\n+/**\n+ * Create transport domain using DevX API.\n+ *\n+ * @param[in] ctx\n+ *   ibv_context returned from mlx5dv_open_device.\n+ *\n+ * @return\n+ *   The DevX object created, NULL otherwise and rte_errno is set.\n+ */\n+struct mlx5_devx_obj *\n+mlx5_devx_cmd_create_td(struct ibv_context *ctx)\n+{\n+\tuint32_t in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0};\n+\tuint32_t out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};\n+\tstruct mlx5_devx_obj *td = NULL;\n+\n+\ttd = rte_calloc(__func__, 1, sizeof(*td), 0);\n+\tif (!td) {\n+\t\tDRV_LOG(ERR, \"Failed to allocate TD object\");\n+\t\trte_errno = ENOMEM;\n+\t\treturn NULL;\n+\t}\n+\tMLX5_SET(alloc_transport_domain_in, in, opcode,\n+\t\t MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);\n+\ttd->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),\n+\t\t\t\t\t     out, sizeof(out));\n+\tif (!td->obj) {\n+\t\tDRV_LOG(ERR, \"Failed to create TIS using DevX\");\n+\t\trte_errno = errno;\n+\t\trte_free(td);\n+\t\treturn NULL;\n+\t}\n+\ttd->id = MLX5_GET(alloc_transport_domain_out, out,\n+\t\t\t   transport_domain);\n+\treturn td;\n+}\n+\n+/**\n+ * Dump all flows to file.\n+ *\n+ * @param[in] fdb_domain\n+ *   FDB domain.\n+ * @param[in] rx_domain\n+ *   RX domain.\n+ * @param[in] tx_domain\n+ *   TX domain.\n+ * @param[out] file\n+ *   Pointer to file stream.\n+ *\n+ * @return\n+ *   0 on success, a nagative value otherwise.\n+ */\n+int\n+mlx5_devx_cmd_flow_dump(void *fdb_domain __rte_unused,\n+\t\t\tvoid *rx_domain __rte_unused,\n+\t\t\tvoid *tx_domain __rte_unused, FILE *file __rte_unused)\n+{\n+\tint ret = 0;\n+\n+#ifdef HAVE_MLX5_DR_FLOW_DUMP\n+\tif (fdb_domain) {\n+\t\tret = mlx5_glue->dr_dump_domain(file, fdb_domain);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\tassert(rx_domain);\n+\tret = mlx5_glue->dr_dump_domain(file, rx_domain);\n+\tif (ret)\n+\t\treturn ret;\n+\tassert(tx_domain);\n+\tret = mlx5_glue->dr_dump_domain(file, tx_domain);\n+#else\n+\tret = ENOTSUP;\n+#endif\n+\treturn -ret;\n+}\ndiff --git a/drivers/common/mlx5/mlx5_devx_cmds.h b/drivers/common/mlx5/mlx5_devx_cmds.h\nnew file mode 100644\nindex 0000000..0c5afde\n--- /dev/null\n+++ b/drivers/common/mlx5/mlx5_devx_cmds.h\n@@ -0,0 +1,227 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright 2019 Mellanox Technologies, Ltd\n+ */\n+\n+#ifndef RTE_PMD_MLX5_DEVX_CMDS_H_\n+#define RTE_PMD_MLX5_DEVX_CMDS_H_\n+\n+#include \"mlx5_glue.h\"\n+\n+/* devX creation object */\n+struct mlx5_devx_obj {\n+\tstruct mlx5dv_devx_obj *obj; /* The DV object. */\n+\tint id; /* The object ID. */\n+};\n+\n+struct mlx5_devx_mkey_attr {\n+\tuint64_t addr;\n+\tuint64_t size;\n+\tuint32_t umem_id;\n+\tuint32_t pd;\n+};\n+\n+/* HCA qos attributes. */\n+struct mlx5_hca_qos_attr {\n+\tuint32_t sup:1;\t/* Whether QOS is supported. */\n+\tuint32_t srtcm_sup:1; /* Whether srTCM mode is supported. */\n+\tuint8_t log_max_flow_meter;\n+\t/* Power of the maximum supported meters. */\n+\tuint8_t flow_meter_reg_c_ids;\n+\t/* Bitmap of the reg_Cs available for flow meter to use. */\n+\n+};\n+\n+/* HCA supports this number of time periods for LRO. */\n+#define MLX5_LRO_NUM_SUPP_PERIODS 4\n+\n+struct mlx5_hca_attr {\n+\tuint32_t eswitch_manager:1;\n+\tuint32_t flow_counters_dump:1;\n+\tuint8_t flow_counter_bulk_alloc_bitmap;\n+\tuint32_t eth_net_offloads:1;\n+\tuint32_t eth_virt:1;\n+\tuint32_t wqe_vlan_insert:1;\n+\tuint32_t wqe_inline_mode:2;\n+\tuint32_t vport_inline_mode:3;\n+\tuint32_t tunnel_stateless_geneve_rx:1;\n+\tuint32_t geneve_max_opt_len:1; /* 0x0: 14DW, 0x1: 63DW */\n+\tuint32_t tunnel_stateless_gtp:1;\n+\tuint32_t lro_cap:1;\n+\tuint32_t tunnel_lro_gre:1;\n+\tuint32_t tunnel_lro_vxlan:1;\n+\tuint32_t lro_max_msg_sz_mode:2;\n+\tuint32_t lro_timer_supported_periods[MLX5_LRO_NUM_SUPP_PERIODS];\n+\tuint32_t flex_parser_protocols;\n+\tuint32_t hairpin:1;\n+\tuint32_t log_max_hairpin_queues:5;\n+\tuint32_t log_max_hairpin_wq_data_sz:5;\n+\tuint32_t log_max_hairpin_num_packets:5;\n+\tuint32_t vhca_id:16;\n+\tstruct mlx5_hca_qos_attr qos;\n+};\n+\n+struct mlx5_devx_wq_attr {\n+\tuint32_t wq_type:4;\n+\tuint32_t wq_signature:1;\n+\tuint32_t end_padding_mode:2;\n+\tuint32_t cd_slave:1;\n+\tuint32_t hds_skip_first_sge:1;\n+\tuint32_t log2_hds_buf_size:3;\n+\tuint32_t page_offset:5;\n+\tuint32_t lwm:16;\n+\tuint32_t pd:24;\n+\tuint32_t uar_page:24;\n+\tuint64_t dbr_addr;\n+\tuint32_t hw_counter;\n+\tuint32_t sw_counter;\n+\tuint32_t log_wq_stride:4;\n+\tuint32_t log_wq_pg_sz:5;\n+\tuint32_t log_wq_sz:5;\n+\tuint32_t dbr_umem_valid:1;\n+\tuint32_t wq_umem_valid:1;\n+\tuint32_t log_hairpin_num_packets:5;\n+\tuint32_t log_hairpin_data_sz:5;\n+\tuint32_t single_wqe_log_num_of_strides:4;\n+\tuint32_t two_byte_shift_en:1;\n+\tuint32_t single_stride_log_num_of_bytes:3;\n+\tuint32_t dbr_umem_id;\n+\tuint32_t wq_umem_id;\n+\tuint64_t wq_umem_offset;\n+};\n+\n+/* Create RQ attributes structure, used by create RQ operation. */\n+struct mlx5_devx_create_rq_attr {\n+\tuint32_t rlky:1;\n+\tuint32_t delay_drop_en:1;\n+\tuint32_t scatter_fcs:1;\n+\tuint32_t vsd:1;\n+\tuint32_t mem_rq_type:4;\n+\tuint32_t state:4;\n+\tuint32_t flush_in_error_en:1;\n+\tuint32_t hairpin:1;\n+\tuint32_t user_index:24;\n+\tuint32_t cqn:24;\n+\tuint32_t counter_set_id:8;\n+\tuint32_t rmpn:24;\n+\tstruct mlx5_devx_wq_attr wq_attr;\n+};\n+\n+/* Modify RQ attributes structure, used by modify RQ operation. */\n+struct mlx5_devx_modify_rq_attr {\n+\tuint32_t rqn:24;\n+\tuint32_t rq_state:4; /* Current RQ state. */\n+\tuint32_t state:4; /* Required RQ state. */\n+\tuint32_t scatter_fcs:1;\n+\tuint32_t vsd:1;\n+\tuint32_t counter_set_id:8;\n+\tuint32_t hairpin_peer_sq:24;\n+\tuint32_t hairpin_peer_vhca:16;\n+\tuint64_t modify_bitmask;\n+\tuint32_t lwm:16; /* Contained WQ lwm. */\n+};\n+\n+struct mlx5_rx_hash_field_select {\n+\tuint32_t l3_prot_type:1;\n+\tuint32_t l4_prot_type:1;\n+\tuint32_t selected_fields:30;\n+};\n+\n+/* TIR attributes structure, used by TIR operations. */\n+struct mlx5_devx_tir_attr {\n+\tuint32_t disp_type:4;\n+\tuint32_t lro_timeout_period_usecs:16;\n+\tuint32_t lro_enable_mask:4;\n+\tuint32_t lro_max_msg_sz:8;\n+\tuint32_t inline_rqn:24;\n+\tuint32_t rx_hash_symmetric:1;\n+\tuint32_t tunneled_offload_en:1;\n+\tuint32_t indirect_table:24;\n+\tuint32_t rx_hash_fn:4;\n+\tuint32_t self_lb_block:2;\n+\tuint32_t transport_domain:24;\n+\tuint32_t rx_hash_toeplitz_key[10];\n+\tstruct mlx5_rx_hash_field_select rx_hash_field_selector_outer;\n+\tstruct mlx5_rx_hash_field_select rx_hash_field_selector_inner;\n+};\n+\n+/* RQT attributes structure, used by RQT operations. */\n+struct mlx5_devx_rqt_attr {\n+\tuint32_t rqt_max_size:16;\n+\tuint32_t rqt_actual_size:16;\n+\tuint32_t rq_list[];\n+};\n+\n+/* TIS attributes structure. */\n+struct mlx5_devx_tis_attr {\n+\tuint32_t strict_lag_tx_port_affinity:1;\n+\tuint32_t tls_en:1;\n+\tuint32_t lag_tx_port_affinity:4;\n+\tuint32_t prio:4;\n+\tuint32_t transport_domain:24;\n+};\n+\n+/* SQ attributes structure, used by SQ create operation. */\n+struct mlx5_devx_create_sq_attr {\n+\tuint32_t rlky:1;\n+\tuint32_t cd_master:1;\n+\tuint32_t fre:1;\n+\tuint32_t flush_in_error_en:1;\n+\tuint32_t allow_multi_pkt_send_wqe:1;\n+\tuint32_t min_wqe_inline_mode:3;\n+\tuint32_t state:4;\n+\tuint32_t reg_umr:1;\n+\tuint32_t allow_swp:1;\n+\tuint32_t hairpin:1;\n+\tuint32_t user_index:24;\n+\tuint32_t cqn:24;\n+\tuint32_t packet_pacing_rate_limit_index:16;\n+\tuint32_t tis_lst_sz:16;\n+\tuint32_t tis_num:24;\n+\tstruct mlx5_devx_wq_attr wq_attr;\n+};\n+\n+/* SQ attributes structure, used by SQ modify operation. */\n+struct mlx5_devx_modify_sq_attr {\n+\tuint32_t sq_state:4;\n+\tuint32_t state:4;\n+\tuint32_t hairpin_peer_rq:24;\n+\tuint32_t hairpin_peer_vhca:16;\n+};\n+\n+/* mlx5_devx_cmds.c */\n+\n+struct mlx5_devx_obj *mlx5_devx_cmd_flow_counter_alloc(struct ibv_context *ctx,\n+\t\t\t\t\t\t       uint32_t bulk_sz);\n+int mlx5_devx_cmd_destroy(struct mlx5_devx_obj *obj);\n+int mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_obj *dcs,\n+\t\t\t\t     int clear, uint32_t n_counters,\n+\t\t\t\t     uint64_t *pkts, uint64_t *bytes,\n+\t\t\t\t     uint32_t mkey, void *addr,\n+\t\t\t\t     struct mlx5dv_devx_cmd_comp *cmd_comp,\n+\t\t\t\t     uint64_t async_id);\n+int mlx5_devx_cmd_query_hca_attr(struct ibv_context *ctx,\n+\t\t\t\t struct mlx5_hca_attr *attr);\n+struct mlx5_devx_obj *mlx5_devx_cmd_mkey_create(struct ibv_context *ctx,\n+\t\t\t\t\t      struct mlx5_devx_mkey_attr *attr);\n+int mlx5_devx_get_out_command_status(void *out);\n+int mlx5_devx_cmd_qp_query_tis_td(struct ibv_qp *qp, uint32_t tis_num,\n+\t\t\t\t  uint32_t *tis_td);\n+struct mlx5_devx_obj *mlx5_devx_cmd_create_rq(struct ibv_context *ctx,\n+\t\t\t\t       struct mlx5_devx_create_rq_attr *rq_attr,\n+\t\t\t\t       int socket);\n+int mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj *rq,\n+\t\t\t    struct mlx5_devx_modify_rq_attr *rq_attr);\n+struct mlx5_devx_obj *mlx5_devx_cmd_create_tir(struct ibv_context *ctx,\n+\t\t\t\t\t   struct mlx5_devx_tir_attr *tir_attr);\n+struct mlx5_devx_obj *mlx5_devx_cmd_create_rqt(struct ibv_context *ctx,\n+\t\t\t\t\t   struct mlx5_devx_rqt_attr *rqt_attr);\n+struct mlx5_devx_obj *mlx5_devx_cmd_create_sq(struct ibv_context *ctx,\n+\t\t\t\t      struct mlx5_devx_create_sq_attr *sq_attr);\n+int mlx5_devx_cmd_modify_sq(struct mlx5_devx_obj *sq,\n+\t\t\t    struct mlx5_devx_modify_sq_attr *sq_attr);\n+struct mlx5_devx_obj *mlx5_devx_cmd_create_tis(struct ibv_context *ctx,\n+\t\t\t\t\t   struct mlx5_devx_tis_attr *tis_attr);\n+struct mlx5_devx_obj *mlx5_devx_cmd_create_td(struct ibv_context *ctx);\n+int mlx5_devx_cmd_flow_dump(void *fdb_domain, void *rx_domain, void *tx_domain,\n+\t\t\t    FILE *file);\n+#endif /* RTE_PMD_MLX5_DEVX_CMDS_H_ */\ndiff --git a/drivers/common/mlx5/mlx5_glue.c b/drivers/common/mlx5/mlx5_glue.c\nnew file mode 100644\nindex 0000000..d5bc84e\n--- /dev/null\n+++ b/drivers/common/mlx5/mlx5_glue.c\n@@ -0,0 +1,1138 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright 2018 6WIND S.A.\n+ * Copyright 2018 Mellanox Technologies, Ltd\n+ */\n+\n+#include <errno.h>\n+#include <stdalign.h>\n+#include <stddef.h>\n+#include <stdint.h>\n+#include <stdlib.h>\n+/*\n+ * Not needed by this file; included to work around the lack of off_t\n+ * definition for mlx5dv.h with unpatched rdma-core versions.\n+ */\n+#include <sys/types.h>\n+\n+#include <rte_config.h>\n+\n+#include \"mlx5_glue.h\"\n+\n+static int\n+mlx5_glue_fork_init(void)\n+{\n+\treturn ibv_fork_init();\n+}\n+\n+static struct ibv_pd *\n+mlx5_glue_alloc_pd(struct ibv_context *context)\n+{\n+\treturn ibv_alloc_pd(context);\n+}\n+\n+static int\n+mlx5_glue_dealloc_pd(struct ibv_pd *pd)\n+{\n+\treturn ibv_dealloc_pd(pd);\n+}\n+\n+static struct ibv_device **\n+mlx5_glue_get_device_list(int *num_devices)\n+{\n+\treturn ibv_get_device_list(num_devices);\n+}\n+\n+static void\n+mlx5_glue_free_device_list(struct ibv_device **list)\n+{\n+\tibv_free_device_list(list);\n+}\n+\n+static struct ibv_context *\n+mlx5_glue_open_device(struct ibv_device *device)\n+{\n+\treturn ibv_open_device(device);\n+}\n+\n+static int\n+mlx5_glue_close_device(struct ibv_context *context)\n+{\n+\treturn ibv_close_device(context);\n+}\n+\n+static int\n+mlx5_glue_query_device(struct ibv_context *context,\n+\t\t       struct ibv_device_attr *device_attr)\n+{\n+\treturn ibv_query_device(context, device_attr);\n+}\n+\n+static int\n+mlx5_glue_query_device_ex(struct ibv_context *context,\n+\t\t\t  const struct ibv_query_device_ex_input *input,\n+\t\t\t  struct ibv_device_attr_ex *attr)\n+{\n+\treturn ibv_query_device_ex(context, input, attr);\n+}\n+\n+static int\n+mlx5_glue_query_rt_values_ex(struct ibv_context *context,\n+\t\t\t  struct ibv_values_ex *values)\n+{\n+\treturn ibv_query_rt_values_ex(context, values);\n+}\n+\n+static int\n+mlx5_glue_query_port(struct ibv_context *context, uint8_t port_num,\n+\t\t     struct ibv_port_attr *port_attr)\n+{\n+\treturn ibv_query_port(context, port_num, port_attr);\n+}\n+\n+static struct ibv_comp_channel *\n+mlx5_glue_create_comp_channel(struct ibv_context *context)\n+{\n+\treturn ibv_create_comp_channel(context);\n+}\n+\n+static int\n+mlx5_glue_destroy_comp_channel(struct ibv_comp_channel *channel)\n+{\n+\treturn ibv_destroy_comp_channel(channel);\n+}\n+\n+static struct ibv_cq *\n+mlx5_glue_create_cq(struct ibv_context *context, int cqe, void *cq_context,\n+\t\t    struct ibv_comp_channel *channel, int comp_vector)\n+{\n+\treturn ibv_create_cq(context, cqe, cq_context, channel, comp_vector);\n+}\n+\n+static int\n+mlx5_glue_destroy_cq(struct ibv_cq *cq)\n+{\n+\treturn ibv_destroy_cq(cq);\n+}\n+\n+static int\n+mlx5_glue_get_cq_event(struct ibv_comp_channel *channel, struct ibv_cq **cq,\n+\t\t       void **cq_context)\n+{\n+\treturn ibv_get_cq_event(channel, cq, cq_context);\n+}\n+\n+static void\n+mlx5_glue_ack_cq_events(struct ibv_cq *cq, unsigned int nevents)\n+{\n+\tibv_ack_cq_events(cq, nevents);\n+}\n+\n+static struct ibv_rwq_ind_table *\n+mlx5_glue_create_rwq_ind_table(struct ibv_context *context,\n+\t\t\t       struct ibv_rwq_ind_table_init_attr *init_attr)\n+{\n+\treturn ibv_create_rwq_ind_table(context, init_attr);\n+}\n+\n+static int\n+mlx5_glue_destroy_rwq_ind_table(struct ibv_rwq_ind_table *rwq_ind_table)\n+{\n+\treturn ibv_destroy_rwq_ind_table(rwq_ind_table);\n+}\n+\n+static struct ibv_wq *\n+mlx5_glue_create_wq(struct ibv_context *context,\n+\t\t    struct ibv_wq_init_attr *wq_init_attr)\n+{\n+\treturn ibv_create_wq(context, wq_init_attr);\n+}\n+\n+static int\n+mlx5_glue_destroy_wq(struct ibv_wq *wq)\n+{\n+\treturn ibv_destroy_wq(wq);\n+}\n+static int\n+mlx5_glue_modify_wq(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr)\n+{\n+\treturn ibv_modify_wq(wq, wq_attr);\n+}\n+\n+static struct ibv_flow *\n+mlx5_glue_create_flow(struct ibv_qp *qp, struct ibv_flow_attr *flow)\n+{\n+\treturn ibv_create_flow(qp, flow);\n+}\n+\n+static int\n+mlx5_glue_destroy_flow(struct ibv_flow *flow_id)\n+{\n+\treturn ibv_destroy_flow(flow_id);\n+}\n+\n+static int\n+mlx5_glue_destroy_flow_action(void *action)\n+{\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+#ifdef HAVE_MLX5DV_DR\n+\treturn mlx5dv_dr_action_destroy(action);\n+#else\n+\tstruct mlx5dv_flow_action_attr *attr = action;\n+\tint res = 0;\n+\tswitch (attr->type) {\n+\tcase MLX5DV_FLOW_ACTION_TAG:\n+\t\tbreak;\n+\tdefault:\n+\t\tres = ibv_destroy_flow_action(attr->action);\n+\t\tbreak;\n+\t}\n+\tfree(action);\n+\treturn res;\n+#endif\n+#else\n+\t(void)action;\n+\treturn ENOTSUP;\n+#endif\n+}\n+\n+static struct ibv_qp *\n+mlx5_glue_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *qp_init_attr)\n+{\n+\treturn ibv_create_qp(pd, qp_init_attr);\n+}\n+\n+static struct ibv_qp *\n+mlx5_glue_create_qp_ex(struct ibv_context *context,\n+\t\t       struct ibv_qp_init_attr_ex *qp_init_attr_ex)\n+{\n+\treturn ibv_create_qp_ex(context, qp_init_attr_ex);\n+}\n+\n+static int\n+mlx5_glue_destroy_qp(struct ibv_qp *qp)\n+{\n+\treturn ibv_destroy_qp(qp);\n+}\n+\n+static int\n+mlx5_glue_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask)\n+{\n+\treturn ibv_modify_qp(qp, attr, attr_mask);\n+}\n+\n+static struct ibv_mr *\n+mlx5_glue_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access)\n+{\n+\treturn ibv_reg_mr(pd, addr, length, access);\n+}\n+\n+static int\n+mlx5_glue_dereg_mr(struct ibv_mr *mr)\n+{\n+\treturn ibv_dereg_mr(mr);\n+}\n+\n+static struct ibv_counter_set *\n+mlx5_glue_create_counter_set(struct ibv_context *context,\n+\t\t\t     struct ibv_counter_set_init_attr *init_attr)\n+{\n+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42\n+\t(void)context;\n+\t(void)init_attr;\n+\treturn NULL;\n+#else\n+\treturn ibv_create_counter_set(context, init_attr);\n+#endif\n+}\n+\n+static int\n+mlx5_glue_destroy_counter_set(struct ibv_counter_set *cs)\n+{\n+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42\n+\t(void)cs;\n+\treturn ENOTSUP;\n+#else\n+\treturn ibv_destroy_counter_set(cs);\n+#endif\n+}\n+\n+static int\n+mlx5_glue_describe_counter_set(struct ibv_context *context,\n+\t\t\t       uint16_t counter_set_id,\n+\t\t\t       struct ibv_counter_set_description *cs_desc)\n+{\n+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42\n+\t(void)context;\n+\t(void)counter_set_id;\n+\t(void)cs_desc;\n+\treturn ENOTSUP;\n+#else\n+\treturn ibv_describe_counter_set(context, counter_set_id, cs_desc);\n+#endif\n+}\n+\n+static int\n+mlx5_glue_query_counter_set(struct ibv_query_counter_set_attr *query_attr,\n+\t\t\t    struct ibv_counter_set_data *cs_data)\n+{\n+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42\n+\t(void)query_attr;\n+\t(void)cs_data;\n+\treturn ENOTSUP;\n+#else\n+\treturn ibv_query_counter_set(query_attr, cs_data);\n+#endif\n+}\n+\n+static struct ibv_counters *\n+mlx5_glue_create_counters(struct ibv_context *context,\n+\t\t\t  struct ibv_counters_init_attr *init_attr)\n+{\n+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45\n+\t(void)context;\n+\t(void)init_attr;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#else\n+\treturn ibv_create_counters(context, init_attr);\n+#endif\n+}\n+\n+static int\n+mlx5_glue_destroy_counters(struct ibv_counters *counters)\n+{\n+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45\n+\t(void)counters;\n+\treturn ENOTSUP;\n+#else\n+\treturn ibv_destroy_counters(counters);\n+#endif\n+}\n+\n+static int\n+mlx5_glue_attach_counters(struct ibv_counters *counters,\n+\t\t\t  struct ibv_counter_attach_attr *attr,\n+\t\t\t  struct ibv_flow *flow)\n+{\n+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45\n+\t(void)counters;\n+\t(void)attr;\n+\t(void)flow;\n+\treturn ENOTSUP;\n+#else\n+\treturn ibv_attach_counters_point_flow(counters, attr, flow);\n+#endif\n+}\n+\n+static int\n+mlx5_glue_query_counters(struct ibv_counters *counters,\n+\t\t\t uint64_t *counters_value,\n+\t\t\t uint32_t ncounters,\n+\t\t\t uint32_t flags)\n+{\n+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45\n+\t(void)counters;\n+\t(void)counters_value;\n+\t(void)ncounters;\n+\t(void)flags;\n+\treturn ENOTSUP;\n+#else\n+\treturn ibv_read_counters(counters, counters_value, ncounters, flags);\n+#endif\n+}\n+\n+static void\n+mlx5_glue_ack_async_event(struct ibv_async_event *event)\n+{\n+\tibv_ack_async_event(event);\n+}\n+\n+static int\n+mlx5_glue_get_async_event(struct ibv_context *context,\n+\t\t\t  struct ibv_async_event *event)\n+{\n+\treturn ibv_get_async_event(context, event);\n+}\n+\n+static const char *\n+mlx5_glue_port_state_str(enum ibv_port_state port_state)\n+{\n+\treturn ibv_port_state_str(port_state);\n+}\n+\n+static struct ibv_cq *\n+mlx5_glue_cq_ex_to_cq(struct ibv_cq_ex *cq)\n+{\n+\treturn ibv_cq_ex_to_cq(cq);\n+}\n+\n+static void *\n+mlx5_glue_dr_create_flow_action_dest_flow_tbl(void *tbl)\n+{\n+#ifdef HAVE_MLX5DV_DR\n+\treturn mlx5dv_dr_action_create_dest_table(tbl);\n+#else\n+\t(void)tbl;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dr_create_flow_action_dest_port(void *domain, uint32_t port)\n+{\n+#ifdef HAVE_MLX5DV_DR_DEVX_PORT\n+\treturn mlx5dv_dr_action_create_dest_ib_port(domain, port);\n+#else\n+#ifdef HAVE_MLX5DV_DR_ESWITCH\n+\treturn mlx5dv_dr_action_create_dest_vport(domain, port);\n+#else\n+\t(void)domain;\n+\t(void)port;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dr_create_flow_action_drop(void)\n+{\n+#ifdef HAVE_MLX5DV_DR_ESWITCH\n+\treturn mlx5dv_dr_action_create_drop();\n+#else\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dr_create_flow_action_push_vlan(struct mlx5dv_dr_domain *domain,\n+\t\t\t\t\t  rte_be32_t vlan_tag)\n+{\n+#ifdef HAVE_MLX5DV_DR_VLAN\n+\treturn mlx5dv_dr_action_create_push_vlan(domain, vlan_tag);\n+#else\n+\t(void)domain;\n+\t(void)vlan_tag;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dr_create_flow_action_pop_vlan(void)\n+{\n+#ifdef HAVE_MLX5DV_DR_VLAN\n+\treturn mlx5dv_dr_action_create_pop_vlan();\n+#else\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dr_create_flow_tbl(void *domain, uint32_t level)\n+{\n+#ifdef HAVE_MLX5DV_DR\n+\treturn mlx5dv_dr_table_create(domain, level);\n+#else\n+\t(void)domain;\n+\t(void)level;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_dr_destroy_flow_tbl(void *tbl)\n+{\n+#ifdef HAVE_MLX5DV_DR\n+\treturn mlx5dv_dr_table_destroy(tbl);\n+#else\n+\t(void)tbl;\n+\terrno = ENOTSUP;\n+\treturn errno;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dr_create_domain(struct ibv_context *ctx,\n+\t\t\t   enum  mlx5dv_dr_domain_type domain)\n+{\n+#ifdef HAVE_MLX5DV_DR\n+\treturn mlx5dv_dr_domain_create(ctx, domain);\n+#else\n+\t(void)ctx;\n+\t(void)domain;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_dr_destroy_domain(void *domain)\n+{\n+#ifdef HAVE_MLX5DV_DR\n+\treturn mlx5dv_dr_domain_destroy(domain);\n+#else\n+\t(void)domain;\n+\terrno = ENOTSUP;\n+\treturn errno;\n+#endif\n+}\n+\n+static struct ibv_cq_ex *\n+mlx5_glue_dv_create_cq(struct ibv_context *context,\n+\t\t       struct ibv_cq_init_attr_ex *cq_attr,\n+\t\t       struct mlx5dv_cq_init_attr *mlx5_cq_attr)\n+{\n+\treturn mlx5dv_create_cq(context, cq_attr, mlx5_cq_attr);\n+}\n+\n+static struct ibv_wq *\n+mlx5_glue_dv_create_wq(struct ibv_context *context,\n+\t\t       struct ibv_wq_init_attr *wq_attr,\n+\t\t       struct mlx5dv_wq_init_attr *mlx5_wq_attr)\n+{\n+#ifndef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT\n+\t(void)context;\n+\t(void)wq_attr;\n+\t(void)mlx5_wq_attr;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#else\n+\treturn mlx5dv_create_wq(context, wq_attr, mlx5_wq_attr);\n+#endif\n+}\n+\n+static int\n+mlx5_glue_dv_query_device(struct ibv_context *ctx,\n+\t\t\t  struct mlx5dv_context *attrs_out)\n+{\n+\treturn mlx5dv_query_device(ctx, attrs_out);\n+}\n+\n+static int\n+mlx5_glue_dv_set_context_attr(struct ibv_context *ibv_ctx,\n+\t\t\t      enum mlx5dv_set_ctx_attr_type type, void *attr)\n+{\n+\treturn mlx5dv_set_context_attr(ibv_ctx, type, attr);\n+}\n+\n+static int\n+mlx5_glue_dv_init_obj(struct mlx5dv_obj *obj, uint64_t obj_type)\n+{\n+\treturn mlx5dv_init_obj(obj, obj_type);\n+}\n+\n+static struct ibv_qp *\n+mlx5_glue_dv_create_qp(struct ibv_context *context,\n+\t\t       struct ibv_qp_init_attr_ex *qp_init_attr_ex,\n+\t\t       struct mlx5dv_qp_init_attr *dv_qp_init_attr)\n+{\n+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT\n+\treturn mlx5dv_create_qp(context, qp_init_attr_ex, dv_qp_init_attr);\n+#else\n+\t(void)context;\n+\t(void)qp_init_attr_ex;\n+\t(void)dv_qp_init_attr;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dv_create_flow_matcher(struct ibv_context *context,\n+\t\t\t\t struct mlx5dv_flow_matcher_attr *matcher_attr,\n+\t\t\t\t void *tbl)\n+{\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+#ifdef HAVE_MLX5DV_DR\n+\t(void)context;\n+\treturn mlx5dv_dr_matcher_create(tbl, matcher_attr->priority,\n+\t\t\t\t\tmatcher_attr->match_criteria_enable,\n+\t\t\t\t\tmatcher_attr->match_mask);\n+#else\n+\t(void)tbl;\n+\treturn mlx5dv_create_flow_matcher(context, matcher_attr);\n+#endif\n+#else\n+\t(void)context;\n+\t(void)matcher_attr;\n+\t(void)tbl;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dv_create_flow(void *matcher,\n+\t\t\t void *match_value,\n+\t\t\t size_t num_actions,\n+\t\t\t void *actions[])\n+{\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+#ifdef HAVE_MLX5DV_DR\n+\treturn mlx5dv_dr_rule_create(matcher, match_value, num_actions,\n+\t\t\t\t     (struct mlx5dv_dr_action **)actions);\n+#else\n+\tstruct mlx5dv_flow_action_attr actions_attr[8];\n+\n+\tif (num_actions > 8)\n+\t\treturn NULL;\n+\tfor (size_t i = 0; i < num_actions; i++)\n+\t\tactions_attr[i] =\n+\t\t\t*((struct mlx5dv_flow_action_attr *)(actions[i]));\n+\treturn mlx5dv_create_flow(matcher, match_value,\n+\t\t\t\t  num_actions, actions_attr);\n+#endif\n+#else\n+\t(void)matcher;\n+\t(void)match_value;\n+\t(void)num_actions;\n+\t(void)actions;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dv_create_flow_action_counter(void *counter_obj, uint32_t offset)\n+{\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+#ifdef HAVE_MLX5DV_DR\n+\treturn mlx5dv_dr_action_create_flow_counter(counter_obj, offset);\n+#else\n+\tstruct mlx5dv_flow_action_attr *action;\n+\n+\t(void)offset;\n+\taction = malloc(sizeof(*action));\n+\tif (!action)\n+\t\treturn NULL;\n+\taction->type = MLX5DV_FLOW_ACTION_COUNTERS_DEVX;\n+\taction->obj = counter_obj;\n+\treturn action;\n+#endif\n+#else\n+\t(void)counter_obj;\n+\t(void)offset;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dv_create_flow_action_dest_ibv_qp(void *qp)\n+{\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+#ifdef HAVE_MLX5DV_DR\n+\treturn mlx5dv_dr_action_create_dest_ibv_qp(qp);\n+#else\n+\tstruct mlx5dv_flow_action_attr *action;\n+\n+\taction = malloc(sizeof(*action));\n+\tif (!action)\n+\t\treturn NULL;\n+\taction->type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;\n+\taction->obj = qp;\n+\treturn action;\n+#endif\n+#else\n+\t(void)qp;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dv_create_flow_action_dest_devx_tir(void *tir)\n+{\n+#ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR\n+\treturn mlx5dv_dr_action_create_dest_devx_tir(tir);\n+#else\n+\t(void)tir;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dv_create_flow_action_modify_header\n+\t\t\t\t\t(struct ibv_context *ctx,\n+\t\t\t\t\t enum mlx5dv_flow_table_type ft_type,\n+\t\t\t\t\t void *domain, uint64_t flags,\n+\t\t\t\t\t size_t actions_sz,\n+\t\t\t\t\t uint64_t actions[])\n+{\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+#ifdef HAVE_MLX5DV_DR\n+\t(void)ctx;\n+\t(void)ft_type;\n+\treturn mlx5dv_dr_action_create_modify_header(domain, flags, actions_sz,\n+\t\t\t\t\t\t     (__be64 *)actions);\n+#else\n+\tstruct mlx5dv_flow_action_attr *action;\n+\n+\t(void)domain;\n+\t(void)flags;\n+\taction = malloc(sizeof(*action));\n+\tif (!action)\n+\t\treturn NULL;\n+\taction->type = MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;\n+\taction->action = mlx5dv_create_flow_action_modify_header\n+\t\t(ctx, actions_sz, actions, ft_type);\n+\treturn action;\n+#endif\n+#else\n+\t(void)ctx;\n+\t(void)ft_type;\n+\t(void)domain;\n+\t(void)flags;\n+\t(void)actions_sz;\n+\t(void)actions;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dv_create_flow_action_packet_reformat\n+\t\t(struct ibv_context *ctx,\n+\t\t enum mlx5dv_flow_action_packet_reformat_type reformat_type,\n+\t\t enum mlx5dv_flow_table_type ft_type,\n+\t\t struct mlx5dv_dr_domain *domain,\n+\t\t uint32_t flags, size_t data_sz, void *data)\n+{\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+#ifdef HAVE_MLX5DV_DR\n+\t(void)ctx;\n+\t(void)ft_type;\n+\treturn mlx5dv_dr_action_create_packet_reformat(domain, flags,\n+\t\t\t\t\t\t       reformat_type, data_sz,\n+\t\t\t\t\t\t       data);\n+#else\n+\t(void)domain;\n+\t(void)flags;\n+\tstruct mlx5dv_flow_action_attr *action;\n+\n+\taction = malloc(sizeof(*action));\n+\tif (!action)\n+\t\treturn NULL;\n+\taction->type = MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;\n+\taction->action = mlx5dv_create_flow_action_packet_reformat\n+\t\t(ctx, data_sz, data, reformat_type, ft_type);\n+\treturn action;\n+#endif\n+#else\n+\t(void)ctx;\n+\t(void)reformat_type;\n+\t(void)ft_type;\n+\t(void)domain;\n+\t(void)flags;\n+\t(void)data_sz;\n+\t(void)data;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dv_create_flow_action_tag(uint32_t tag)\n+{\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+#ifdef HAVE_MLX5DV_DR\n+\treturn mlx5dv_dr_action_create_tag(tag);\n+#else\n+\tstruct mlx5dv_flow_action_attr *action;\n+\taction = malloc(sizeof(*action));\n+\tif (!action)\n+\t\treturn NULL;\n+\taction->type = MLX5DV_FLOW_ACTION_TAG;\n+\taction->tag_value = tag;\n+\treturn action;\n+#endif\n+#endif\n+\t(void)tag;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+}\n+\n+static void *\n+mlx5_glue_dv_create_flow_action_meter(struct mlx5dv_dr_flow_meter_attr *attr)\n+{\n+#if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER)\n+\treturn mlx5dv_dr_action_create_flow_meter(attr);\n+#else\n+\t(void)attr;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_dv_modify_flow_action_meter(void *action,\n+\t\t\t\t      struct mlx5dv_dr_flow_meter_attr *attr,\n+\t\t\t\t      uint64_t modify_bits)\n+{\n+#if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER)\n+\treturn mlx5dv_dr_action_modify_flow_meter(action, attr, modify_bits);\n+#else\n+\t(void)action;\n+\t(void)attr;\n+\t(void)modify_bits;\n+\terrno = ENOTSUP;\n+\treturn errno;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_dv_destroy_flow(void *flow_id)\n+{\n+#ifdef HAVE_MLX5DV_DR\n+\treturn mlx5dv_dr_rule_destroy(flow_id);\n+#else\n+\treturn ibv_destroy_flow(flow_id);\n+#endif\n+}\n+\n+static int\n+mlx5_glue_dv_destroy_flow_matcher(void *matcher)\n+{\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+#ifdef HAVE_MLX5DV_DR\n+\treturn mlx5dv_dr_matcher_destroy(matcher);\n+#else\n+\treturn mlx5dv_destroy_flow_matcher(matcher);\n+#endif\n+#else\n+\t(void)matcher;\n+\terrno = ENOTSUP;\n+\treturn errno;\n+#endif\n+}\n+\n+static struct ibv_context *\n+mlx5_glue_dv_open_device(struct ibv_device *device)\n+{\n+#ifdef HAVE_IBV_DEVX_OBJ\n+\treturn mlx5dv_open_device(device,\n+\t\t\t\t  &(struct mlx5dv_context_attr){\n+\t\t\t\t\t.flags = MLX5DV_CONTEXT_FLAGS_DEVX,\n+\t\t\t\t  });\n+#else\n+\t(void)device;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static struct mlx5dv_devx_obj *\n+mlx5_glue_devx_obj_create(struct ibv_context *ctx,\n+\t\t\t  const void *in, size_t inlen,\n+\t\t\t  void *out, size_t outlen)\n+{\n+#ifdef HAVE_IBV_DEVX_OBJ\n+\treturn mlx5dv_devx_obj_create(ctx, in, inlen, out, outlen);\n+#else\n+\t(void)ctx;\n+\t(void)in;\n+\t(void)inlen;\n+\t(void)out;\n+\t(void)outlen;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_devx_obj_destroy(struct mlx5dv_devx_obj *obj)\n+{\n+#ifdef HAVE_IBV_DEVX_OBJ\n+\treturn mlx5dv_devx_obj_destroy(obj);\n+#else\n+\t(void)obj;\n+\treturn -ENOTSUP;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_devx_obj_query(struct mlx5dv_devx_obj *obj,\n+\t\t\t const void *in, size_t inlen,\n+\t\t\t void *out, size_t outlen)\n+{\n+#ifdef HAVE_IBV_DEVX_OBJ\n+\treturn mlx5dv_devx_obj_query(obj, in, inlen, out, outlen);\n+#else\n+\t(void)obj;\n+\t(void)in;\n+\t(void)inlen;\n+\t(void)out;\n+\t(void)outlen;\n+\treturn -ENOTSUP;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_devx_obj_modify(struct mlx5dv_devx_obj *obj,\n+\t\t\t  const void *in, size_t inlen,\n+\t\t\t  void *out, size_t outlen)\n+{\n+#ifdef HAVE_IBV_DEVX_OBJ\n+\treturn mlx5dv_devx_obj_modify(obj, in, inlen, out, outlen);\n+#else\n+\t(void)obj;\n+\t(void)in;\n+\t(void)inlen;\n+\t(void)out;\n+\t(void)outlen;\n+\treturn -ENOTSUP;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_devx_general_cmd(struct ibv_context *ctx,\n+\t\t\t   const void *in, size_t inlen,\n+\t\t\t   void *out, size_t outlen)\n+{\n+#ifdef HAVE_IBV_DEVX_OBJ\n+\treturn mlx5dv_devx_general_cmd(ctx, in, inlen, out, outlen);\n+#else\n+\t(void)ctx;\n+\t(void)in;\n+\t(void)inlen;\n+\t(void)out;\n+\t(void)outlen;\n+\treturn -ENOTSUP;\n+#endif\n+}\n+\n+static struct mlx5dv_devx_cmd_comp *\n+mlx5_glue_devx_create_cmd_comp(struct ibv_context *ctx)\n+{\n+#ifdef HAVE_IBV_DEVX_ASYNC\n+\treturn mlx5dv_devx_create_cmd_comp(ctx);\n+#else\n+\t(void)ctx;\n+\terrno = -ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void\n+mlx5_glue_devx_destroy_cmd_comp(struct mlx5dv_devx_cmd_comp *cmd_comp)\n+{\n+#ifdef HAVE_IBV_DEVX_ASYNC\n+\tmlx5dv_devx_destroy_cmd_comp(cmd_comp);\n+#else\n+\t(void)cmd_comp;\n+\terrno = -ENOTSUP;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_devx_obj_query_async(struct mlx5dv_devx_obj *obj, const void *in,\n+\t\t\t       size_t inlen, size_t outlen, uint64_t wr_id,\n+\t\t\t       struct mlx5dv_devx_cmd_comp *cmd_comp)\n+{\n+#ifdef HAVE_IBV_DEVX_ASYNC\n+\treturn mlx5dv_devx_obj_query_async(obj, in, inlen, outlen, wr_id,\n+\t\t\t\t\t   cmd_comp);\n+#else\n+\t(void)obj;\n+\t(void)in;\n+\t(void)inlen;\n+\t(void)outlen;\n+\t(void)wr_id;\n+\t(void)cmd_comp;\n+\treturn -ENOTSUP;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_devx_get_async_cmd_comp(struct mlx5dv_devx_cmd_comp *cmd_comp,\n+\t\t\t\t  struct mlx5dv_devx_async_cmd_hdr *cmd_resp,\n+\t\t\t\t  size_t cmd_resp_len)\n+{\n+#ifdef HAVE_IBV_DEVX_ASYNC\n+\treturn mlx5dv_devx_get_async_cmd_comp(cmd_comp, cmd_resp,\n+\t\t\t\t\t      cmd_resp_len);\n+#else\n+\t(void)cmd_comp;\n+\t(void)cmd_resp;\n+\t(void)cmd_resp_len;\n+\treturn -ENOTSUP;\n+#endif\n+}\n+\n+static struct mlx5dv_devx_umem *\n+mlx5_glue_devx_umem_reg(struct ibv_context *context, void *addr, size_t size,\n+\t\t\tuint32_t access)\n+{\n+#ifdef HAVE_IBV_DEVX_OBJ\n+\treturn mlx5dv_devx_umem_reg(context, addr, size, access);\n+#else\n+\t(void)context;\n+\t(void)addr;\n+\t(void)size;\n+\t(void)access;\n+\terrno = -ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_devx_umem_dereg(struct mlx5dv_devx_umem *dv_devx_umem)\n+{\n+#ifdef HAVE_IBV_DEVX_OBJ\n+\treturn mlx5dv_devx_umem_dereg(dv_devx_umem);\n+#else\n+\t(void)dv_devx_umem;\n+\treturn -ENOTSUP;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_devx_qp_query(struct ibv_qp *qp,\n+\t\t\tconst void *in, size_t inlen,\n+\t\t\tvoid *out, size_t outlen)\n+{\n+#ifdef HAVE_IBV_DEVX_OBJ\n+\treturn mlx5dv_devx_qp_query(qp, in, inlen, out, outlen);\n+#else\n+\t(void)qp;\n+\t(void)in;\n+\t(void)inlen;\n+\t(void)out;\n+\t(void)outlen;\n+\terrno = ENOTSUP;\n+\treturn errno;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_devx_port_query(struct ibv_context *ctx,\n+\t\t\t  uint32_t port_num,\n+\t\t\t  struct mlx5dv_devx_port *mlx5_devx_port)\n+{\n+#ifdef HAVE_MLX5DV_DR_DEVX_PORT\n+\treturn mlx5dv_query_devx_port(ctx, port_num, mlx5_devx_port);\n+#else\n+\t(void)ctx;\n+\t(void)port_num;\n+\t(void)mlx5_devx_port;\n+\terrno = ENOTSUP;\n+\treturn errno;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_dr_dump_domain(FILE *file, void *domain)\n+{\n+#ifdef HAVE_MLX5_DR_FLOW_DUMP\n+\treturn mlx5dv_dump_dr_domain(file, domain);\n+#else\n+\tRTE_SET_USED(file);\n+\tRTE_SET_USED(domain);\n+\treturn -ENOTSUP;\n+#endif\n+}\n+\n+alignas(RTE_CACHE_LINE_SIZE)\n+const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue){\n+\t.version = MLX5_GLUE_VERSION,\n+\t.fork_init = mlx5_glue_fork_init,\n+\t.alloc_pd = mlx5_glue_alloc_pd,\n+\t.dealloc_pd = mlx5_glue_dealloc_pd,\n+\t.get_device_list = mlx5_glue_get_device_list,\n+\t.free_device_list = mlx5_glue_free_device_list,\n+\t.open_device = mlx5_glue_open_device,\n+\t.close_device = mlx5_glue_close_device,\n+\t.query_device = mlx5_glue_query_device,\n+\t.query_device_ex = mlx5_glue_query_device_ex,\n+\t.query_rt_values_ex = mlx5_glue_query_rt_values_ex,\n+\t.query_port = mlx5_glue_query_port,\n+\t.create_comp_channel = mlx5_glue_create_comp_channel,\n+\t.destroy_comp_channel = mlx5_glue_destroy_comp_channel,\n+\t.create_cq = mlx5_glue_create_cq,\n+\t.destroy_cq = mlx5_glue_destroy_cq,\n+\t.get_cq_event = mlx5_glue_get_cq_event,\n+\t.ack_cq_events = mlx5_glue_ack_cq_events,\n+\t.create_rwq_ind_table = mlx5_glue_create_rwq_ind_table,\n+\t.destroy_rwq_ind_table = mlx5_glue_destroy_rwq_ind_table,\n+\t.create_wq = mlx5_glue_create_wq,\n+\t.destroy_wq = mlx5_glue_destroy_wq,\n+\t.modify_wq = mlx5_glue_modify_wq,\n+\t.create_flow = mlx5_glue_create_flow,\n+\t.destroy_flow = mlx5_glue_destroy_flow,\n+\t.destroy_flow_action = mlx5_glue_destroy_flow_action,\n+\t.create_qp = mlx5_glue_create_qp,\n+\t.create_qp_ex = mlx5_glue_create_qp_ex,\n+\t.destroy_qp = mlx5_glue_destroy_qp,\n+\t.modify_qp = mlx5_glue_modify_qp,\n+\t.reg_mr = mlx5_glue_reg_mr,\n+\t.dereg_mr = mlx5_glue_dereg_mr,\n+\t.create_counter_set = mlx5_glue_create_counter_set,\n+\t.destroy_counter_set = mlx5_glue_destroy_counter_set,\n+\t.describe_counter_set = mlx5_glue_describe_counter_set,\n+\t.query_counter_set = mlx5_glue_query_counter_set,\n+\t.create_counters = mlx5_glue_create_counters,\n+\t.destroy_counters = mlx5_glue_destroy_counters,\n+\t.attach_counters = mlx5_glue_attach_counters,\n+\t.query_counters = mlx5_glue_query_counters,\n+\t.ack_async_event = mlx5_glue_ack_async_event,\n+\t.get_async_event = mlx5_glue_get_async_event,\n+\t.port_state_str = mlx5_glue_port_state_str,\n+\t.cq_ex_to_cq = mlx5_glue_cq_ex_to_cq,\n+\t.dr_create_flow_action_dest_flow_tbl =\n+\t\tmlx5_glue_dr_create_flow_action_dest_flow_tbl,\n+\t.dr_create_flow_action_dest_port =\n+\t\tmlx5_glue_dr_create_flow_action_dest_port,\n+\t.dr_create_flow_action_drop =\n+\t\tmlx5_glue_dr_create_flow_action_drop,\n+\t.dr_create_flow_action_push_vlan =\n+\t\tmlx5_glue_dr_create_flow_action_push_vlan,\n+\t.dr_create_flow_action_pop_vlan =\n+\t\tmlx5_glue_dr_create_flow_action_pop_vlan,\n+\t.dr_create_flow_tbl = mlx5_glue_dr_create_flow_tbl,\n+\t.dr_destroy_flow_tbl = mlx5_glue_dr_destroy_flow_tbl,\n+\t.dr_create_domain = mlx5_glue_dr_create_domain,\n+\t.dr_destroy_domain = mlx5_glue_dr_destroy_domain,\n+\t.dv_create_cq = mlx5_glue_dv_create_cq,\n+\t.dv_create_wq = mlx5_glue_dv_create_wq,\n+\t.dv_query_device = mlx5_glue_dv_query_device,\n+\t.dv_set_context_attr = mlx5_glue_dv_set_context_attr,\n+\t.dv_init_obj = mlx5_glue_dv_init_obj,\n+\t.dv_create_qp = mlx5_glue_dv_create_qp,\n+\t.dv_create_flow_matcher = mlx5_glue_dv_create_flow_matcher,\n+\t.dv_create_flow = mlx5_glue_dv_create_flow,\n+\t.dv_create_flow_action_counter =\n+\t\tmlx5_glue_dv_create_flow_action_counter,\n+\t.dv_create_flow_action_dest_ibv_qp =\n+\t\tmlx5_glue_dv_create_flow_action_dest_ibv_qp,\n+\t.dv_create_flow_action_dest_devx_tir =\n+\t\tmlx5_glue_dv_create_flow_action_dest_devx_tir,\n+\t.dv_create_flow_action_modify_header =\n+\t\tmlx5_glue_dv_create_flow_action_modify_header,\n+\t.dv_create_flow_action_packet_reformat =\n+\t\tmlx5_glue_dv_create_flow_action_packet_reformat,\n+\t.dv_create_flow_action_tag =  mlx5_glue_dv_create_flow_action_tag,\n+\t.dv_create_flow_action_meter = mlx5_glue_dv_create_flow_action_meter,\n+\t.dv_modify_flow_action_meter = mlx5_glue_dv_modify_flow_action_meter,\n+\t.dv_destroy_flow = mlx5_glue_dv_destroy_flow,\n+\t.dv_destroy_flow_matcher = mlx5_glue_dv_destroy_flow_matcher,\n+\t.dv_open_device = mlx5_glue_dv_open_device,\n+\t.devx_obj_create = mlx5_glue_devx_obj_create,\n+\t.devx_obj_destroy = mlx5_glue_devx_obj_destroy,\n+\t.devx_obj_query = mlx5_glue_devx_obj_query,\n+\t.devx_obj_modify = mlx5_glue_devx_obj_modify,\n+\t.devx_general_cmd = mlx5_glue_devx_general_cmd,\n+\t.devx_create_cmd_comp = mlx5_glue_devx_create_cmd_comp,\n+\t.devx_destroy_cmd_comp = mlx5_glue_devx_destroy_cmd_comp,\n+\t.devx_obj_query_async = mlx5_glue_devx_obj_query_async,\n+\t.devx_get_async_cmd_comp = mlx5_glue_devx_get_async_cmd_comp,\n+\t.devx_umem_reg = mlx5_glue_devx_umem_reg,\n+\t.devx_umem_dereg = mlx5_glue_devx_umem_dereg,\n+\t.devx_qp_query = mlx5_glue_devx_qp_query,\n+\t.devx_port_query = mlx5_glue_devx_port_query,\n+\t.dr_dump_domain = mlx5_glue_dr_dump_domain,\n+};\ndiff --git a/drivers/common/mlx5/mlx5_glue.h b/drivers/common/mlx5/mlx5_glue.h\nnew file mode 100644\nindex 0000000..f4c3180\n--- /dev/null\n+++ b/drivers/common/mlx5/mlx5_glue.h\n@@ -0,0 +1,265 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright 2018 6WIND S.A.\n+ * Copyright 2018 Mellanox Technologies, Ltd\n+ */\n+\n+#ifndef MLX5_GLUE_H_\n+#define MLX5_GLUE_H_\n+\n+#include <stddef.h>\n+#include <stdint.h>\n+/* Verbs headers do not support -pedantic. */\n+#ifdef PEDANTIC\n+#pragma GCC diagnostic ignored \"-Wpedantic\"\n+#endif\n+#include <infiniband/mlx5dv.h>\n+#include <infiniband/verbs.h>\n+#ifdef PEDANTIC\n+#pragma GCC diagnostic error \"-Wpedantic\"\n+#endif\n+\n+#include <rte_byteorder.h>\n+\n+#include \"mlx5_autoconf.h\"\n+\n+#ifndef MLX5_GLUE_VERSION\n+#define MLX5_GLUE_VERSION \"\"\n+#endif\n+\n+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42\n+struct ibv_counter_set;\n+struct ibv_counter_set_data;\n+struct ibv_counter_set_description;\n+struct ibv_counter_set_init_attr;\n+struct ibv_query_counter_set_attr;\n+#endif\n+\n+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45\n+struct ibv_counters;\n+struct ibv_counters_init_attr;\n+struct ibv_counter_attach_attr;\n+#endif\n+\n+#ifndef HAVE_IBV_DEVICE_TUNNEL_SUPPORT\n+struct mlx5dv_qp_init_attr;\n+#endif\n+\n+#ifndef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT\n+struct mlx5dv_wq_init_attr;\n+#endif\n+\n+#ifndef HAVE_IBV_FLOW_DV_SUPPORT\n+struct mlx5dv_flow_matcher;\n+struct mlx5dv_flow_matcher_attr;\n+struct mlx5dv_flow_action_attr;\n+struct mlx5dv_flow_match_parameters;\n+struct mlx5dv_dr_flow_meter_attr;\n+struct ibv_flow_action;\n+enum mlx5dv_flow_action_packet_reformat_type { packet_reformat_type = 0, };\n+enum mlx5dv_flow_table_type { flow_table_type = 0, };\n+#endif\n+\n+#ifndef HAVE_IBV_FLOW_DEVX_COUNTERS\n+#define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0\n+#endif\n+\n+#ifndef HAVE_IBV_DEVX_OBJ\n+struct mlx5dv_devx_obj;\n+struct mlx5dv_devx_umem { uint32_t umem_id; };\n+#endif\n+\n+#ifndef HAVE_IBV_DEVX_ASYNC\n+struct mlx5dv_devx_cmd_comp;\n+struct mlx5dv_devx_async_cmd_hdr;\n+#endif\n+\n+#ifndef HAVE_MLX5DV_DR\n+enum  mlx5dv_dr_domain_type { unused, };\n+struct mlx5dv_dr_domain;\n+#endif\n+\n+#ifndef HAVE_MLX5DV_DR_DEVX_PORT\n+struct mlx5dv_devx_port;\n+#endif\n+\n+#ifndef HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER\n+struct mlx5dv_dr_flow_meter_attr;\n+#endif\n+\n+/* LIB_GLUE_VERSION must be updated every time this structure is modified. */\n+struct mlx5_glue {\n+\tconst char *version;\n+\tint (*fork_init)(void);\n+\tstruct ibv_pd *(*alloc_pd)(struct ibv_context *context);\n+\tint (*dealloc_pd)(struct ibv_pd *pd);\n+\tstruct ibv_device **(*get_device_list)(int *num_devices);\n+\tvoid (*free_device_list)(struct ibv_device **list);\n+\tstruct ibv_context *(*open_device)(struct ibv_device *device);\n+\tint (*close_device)(struct ibv_context *context);\n+\tint (*query_device)(struct ibv_context *context,\n+\t\t\t    struct ibv_device_attr *device_attr);\n+\tint (*query_device_ex)(struct ibv_context *context,\n+\t\t\t       const struct ibv_query_device_ex_input *input,\n+\t\t\t       struct ibv_device_attr_ex *attr);\n+\tint (*query_rt_values_ex)(struct ibv_context *context,\n+\t\t\t       struct ibv_values_ex *values);\n+\tint (*query_port)(struct ibv_context *context, uint8_t port_num,\n+\t\t\t  struct ibv_port_attr *port_attr);\n+\tstruct ibv_comp_channel *(*create_comp_channel)\n+\t\t(struct ibv_context *context);\n+\tint (*destroy_comp_channel)(struct ibv_comp_channel *channel);\n+\tstruct ibv_cq *(*create_cq)(struct ibv_context *context, int cqe,\n+\t\t\t\t    void *cq_context,\n+\t\t\t\t    struct ibv_comp_channel *channel,\n+\t\t\t\t    int comp_vector);\n+\tint (*destroy_cq)(struct ibv_cq *cq);\n+\tint (*get_cq_event)(struct ibv_comp_channel *channel,\n+\t\t\t    struct ibv_cq **cq, void **cq_context);\n+\tvoid (*ack_cq_events)(struct ibv_cq *cq, unsigned int nevents);\n+\tstruct ibv_rwq_ind_table *(*create_rwq_ind_table)\n+\t\t(struct ibv_context *context,\n+\t\t struct ibv_rwq_ind_table_init_attr *init_attr);\n+\tint (*destroy_rwq_ind_table)(struct ibv_rwq_ind_table *rwq_ind_table);\n+\tstruct ibv_wq *(*create_wq)(struct ibv_context *context,\n+\t\t\t\t    struct ibv_wq_init_attr *wq_init_attr);\n+\tint (*destroy_wq)(struct ibv_wq *wq);\n+\tint (*modify_wq)(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr);\n+\tstruct ibv_flow *(*create_flow)(struct ibv_qp *qp,\n+\t\t\t\t\tstruct ibv_flow_attr *flow);\n+\tint (*destroy_flow)(struct ibv_flow *flow_id);\n+\tint (*destroy_flow_action)(void *action);\n+\tstruct ibv_qp *(*create_qp)(struct ibv_pd *pd,\n+\t\t\t\t    struct ibv_qp_init_attr *qp_init_attr);\n+\tstruct ibv_qp *(*create_qp_ex)\n+\t\t(struct ibv_context *context,\n+\t\t struct ibv_qp_init_attr_ex *qp_init_attr_ex);\n+\tint (*destroy_qp)(struct ibv_qp *qp);\n+\tint (*modify_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,\n+\t\t\t int attr_mask);\n+\tstruct ibv_mr *(*reg_mr)(struct ibv_pd *pd, void *addr,\n+\t\t\t\t size_t length, int access);\n+\tint (*dereg_mr)(struct ibv_mr *mr);\n+\tstruct ibv_counter_set *(*create_counter_set)\n+\t\t(struct ibv_context *context,\n+\t\t struct ibv_counter_set_init_attr *init_attr);\n+\tint (*destroy_counter_set)(struct ibv_counter_set *cs);\n+\tint (*describe_counter_set)\n+\t\t(struct ibv_context *context,\n+\t\t uint16_t counter_set_id,\n+\t\t struct ibv_counter_set_description *cs_desc);\n+\tint (*query_counter_set)(struct ibv_query_counter_set_attr *query_attr,\n+\t\t\t\t struct ibv_counter_set_data *cs_data);\n+\tstruct ibv_counters *(*create_counters)\n+\t\t(struct ibv_context *context,\n+\t\t struct ibv_counters_init_attr *init_attr);\n+\tint (*destroy_counters)(struct ibv_counters *counters);\n+\tint (*attach_counters)(struct ibv_counters *counters,\n+\t\t\t       struct ibv_counter_attach_attr *attr,\n+\t\t\t       struct ibv_flow *flow);\n+\tint (*query_counters)(struct ibv_counters *counters,\n+\t\t\t      uint64_t *counters_value,\n+\t\t\t      uint32_t ncounters,\n+\t\t\t      uint32_t flags);\n+\tvoid (*ack_async_event)(struct ibv_async_event *event);\n+\tint (*get_async_event)(struct ibv_context *context,\n+\t\t\t       struct ibv_async_event *event);\n+\tconst char *(*port_state_str)(enum ibv_port_state port_state);\n+\tstruct ibv_cq *(*cq_ex_to_cq)(struct ibv_cq_ex *cq);\n+\tvoid *(*dr_create_flow_action_dest_flow_tbl)(void *tbl);\n+\tvoid *(*dr_create_flow_action_dest_port)(void *domain,\n+\t\t\t\t\t\t uint32_t port);\n+\tvoid *(*dr_create_flow_action_drop)();\n+\tvoid *(*dr_create_flow_action_push_vlan)\n+\t\t\t\t\t(struct mlx5dv_dr_domain *domain,\n+\t\t\t\t\t rte_be32_t vlan_tag);\n+\tvoid *(*dr_create_flow_action_pop_vlan)();\n+\tvoid *(*dr_create_flow_tbl)(void *domain, uint32_t level);\n+\tint (*dr_destroy_flow_tbl)(void *tbl);\n+\tvoid *(*dr_create_domain)(struct ibv_context *ctx,\n+\t\t\t\t  enum mlx5dv_dr_domain_type domain);\n+\tint (*dr_destroy_domain)(void *domain);\n+\tstruct ibv_cq_ex *(*dv_create_cq)\n+\t\t(struct ibv_context *context,\n+\t\t struct ibv_cq_init_attr_ex *cq_attr,\n+\t\t struct mlx5dv_cq_init_attr *mlx5_cq_attr);\n+\tstruct ibv_wq *(*dv_create_wq)\n+\t\t(struct ibv_context *context,\n+\t\t struct ibv_wq_init_attr *wq_attr,\n+\t\t struct mlx5dv_wq_init_attr *mlx5_wq_attr);\n+\tint (*dv_query_device)(struct ibv_context *ctx_in,\n+\t\t\t       struct mlx5dv_context *attrs_out);\n+\tint (*dv_set_context_attr)(struct ibv_context *ibv_ctx,\n+\t\t\t\t   enum mlx5dv_set_ctx_attr_type type,\n+\t\t\t\t   void *attr);\n+\tint (*dv_init_obj)(struct mlx5dv_obj *obj, uint64_t obj_type);\n+\tstruct ibv_qp *(*dv_create_qp)\n+\t\t(struct ibv_context *context,\n+\t\t struct ibv_qp_init_attr_ex *qp_init_attr_ex,\n+\t\t struct mlx5dv_qp_init_attr *dv_qp_init_attr);\n+\tvoid *(*dv_create_flow_matcher)\n+\t\t(struct ibv_context *context,\n+\t\t struct mlx5dv_flow_matcher_attr *matcher_attr,\n+\t\t void *tbl);\n+\tvoid *(*dv_create_flow)(void *matcher, void *match_value,\n+\t\t\t  size_t num_actions, void *actions[]);\n+\tvoid *(*dv_create_flow_action_counter)(void *obj, uint32_t  offset);\n+\tvoid *(*dv_create_flow_action_dest_ibv_qp)(void *qp);\n+\tvoid *(*dv_create_flow_action_dest_devx_tir)(void *tir);\n+\tvoid *(*dv_create_flow_action_modify_header)\n+\t\t(struct ibv_context *ctx, enum mlx5dv_flow_table_type ft_type,\n+\t\t void *domain, uint64_t flags, size_t actions_sz,\n+\t\t uint64_t actions[]);\n+\tvoid *(*dv_create_flow_action_packet_reformat)\n+\t\t(struct ibv_context *ctx,\n+\t\t enum mlx5dv_flow_action_packet_reformat_type reformat_type,\n+\t\t enum mlx5dv_flow_table_type ft_type,\n+\t\t struct mlx5dv_dr_domain *domain,\n+\t\t uint32_t flags, size_t data_sz, void *data);\n+\tvoid *(*dv_create_flow_action_tag)(uint32_t tag);\n+\tvoid *(*dv_create_flow_action_meter)\n+\t\t(struct mlx5dv_dr_flow_meter_attr *attr);\n+\tint (*dv_modify_flow_action_meter)(void *action,\n+\t\tstruct mlx5dv_dr_flow_meter_attr *attr, uint64_t modify_bits);\n+\tint (*dv_destroy_flow)(void *flow);\n+\tint (*dv_destroy_flow_matcher)(void *matcher);\n+\tstruct ibv_context *(*dv_open_device)(struct ibv_device *device);\n+\tstruct mlx5dv_devx_obj *(*devx_obj_create)\n+\t\t\t\t\t(struct ibv_context *ctx,\n+\t\t\t\t\t const void *in, size_t inlen,\n+\t\t\t\t\t void *out, size_t outlen);\n+\tint (*devx_obj_destroy)(struct mlx5dv_devx_obj *obj);\n+\tint (*devx_obj_query)(struct mlx5dv_devx_obj *obj,\n+\t\t\t      const void *in, size_t inlen,\n+\t\t\t      void *out, size_t outlen);\n+\tint (*devx_obj_modify)(struct mlx5dv_devx_obj *obj,\n+\t\t\t       const void *in, size_t inlen,\n+\t\t\t       void *out, size_t outlen);\n+\tint (*devx_general_cmd)(struct ibv_context *context,\n+\t\t\t\tconst void *in, size_t inlen,\n+\t\t\t\tvoid *out, size_t outlen);\n+\tstruct mlx5dv_devx_cmd_comp *(*devx_create_cmd_comp)\n+\t\t\t\t\t(struct ibv_context *context);\n+\tvoid (*devx_destroy_cmd_comp)(struct mlx5dv_devx_cmd_comp *cmd_comp);\n+\tint (*devx_obj_query_async)(struct mlx5dv_devx_obj *obj,\n+\t\t\t\t    const void *in, size_t inlen,\n+\t\t\t\t    size_t outlen, uint64_t wr_id,\n+\t\t\t\t    struct mlx5dv_devx_cmd_comp *cmd_comp);\n+\tint (*devx_get_async_cmd_comp)(struct mlx5dv_devx_cmd_comp *cmd_comp,\n+\t\t\t\t       struct mlx5dv_devx_async_cmd_hdr *resp,\n+\t\t\t\t       size_t cmd_resp_len);\n+\tstruct mlx5dv_devx_umem *(*devx_umem_reg)(struct ibv_context *context,\n+\t\t\t\t\t\t  void *addr, size_t size,\n+\t\t\t\t\t\t  uint32_t access);\n+\tint (*devx_umem_dereg)(struct mlx5dv_devx_umem *dv_devx_umem);\n+\tint (*devx_qp_query)(struct ibv_qp *qp,\n+\t\t\t     const void *in, size_t inlen,\n+\t\t\t     void *out, size_t outlen);\n+\tint (*devx_port_query)(struct ibv_context *ctx,\n+\t\t\t       uint32_t port_num,\n+\t\t\t       struct mlx5dv_devx_port *mlx5_devx_port);\n+\tint (*dr_dump_domain)(FILE *file, void *domain);\n+};\n+\n+const struct mlx5_glue *mlx5_glue;\n+\n+#endif /* MLX5_GLUE_H_ */\ndiff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h\nnew file mode 100644\nindex 0000000..4b521b2\n--- /dev/null\n+++ b/drivers/common/mlx5/mlx5_prm.h\n@@ -0,0 +1,1884 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright 2016 6WIND S.A.\n+ * Copyright 2016 Mellanox Technologies, Ltd\n+ */\n+\n+#ifndef RTE_PMD_MLX5_PRM_H_\n+#define RTE_PMD_MLX5_PRM_H_\n+\n+#include <assert.h>\n+/* Verbs header. */\n+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */\n+#ifdef PEDANTIC\n+#pragma GCC diagnostic ignored \"-Wpedantic\"\n+#endif\n+#include <infiniband/mlx5dv.h>\n+#ifdef PEDANTIC\n+#pragma GCC diagnostic error \"-Wpedantic\"\n+#endif\n+\n+#include <rte_vect.h>\n+#include <rte_byteorder.h>\n+\n+#include \"mlx5_autoconf.h\"\n+\n+/* RSS hash key size. */\n+#define MLX5_RSS_HASH_KEY_LEN 40\n+\n+/* Get CQE owner bit. */\n+#define MLX5_CQE_OWNER(op_own) ((op_own) & MLX5_CQE_OWNER_MASK)\n+\n+/* Get CQE format. */\n+#define MLX5_CQE_FORMAT(op_own) (((op_own) & MLX5E_CQE_FORMAT_MASK) >> 2)\n+\n+/* Get CQE opcode. */\n+#define MLX5_CQE_OPCODE(op_own) (((op_own) & 0xf0) >> 4)\n+\n+/* Get CQE solicited event. */\n+#define MLX5_CQE_SE(op_own) (((op_own) >> 1) & 1)\n+\n+/* Invalidate a CQE. */\n+#define MLX5_CQE_INVALIDATE (MLX5_CQE_INVALID << 4)\n+\n+/* WQE Segment sizes in bytes. */\n+#define MLX5_WSEG_SIZE 16u\n+#define MLX5_WQE_CSEG_SIZE sizeof(struct mlx5_wqe_cseg)\n+#define MLX5_WQE_DSEG_SIZE sizeof(struct mlx5_wqe_dseg)\n+#define MLX5_WQE_ESEG_SIZE sizeof(struct mlx5_wqe_eseg)\n+\n+/* WQE/WQEBB size in bytes. */\n+#define MLX5_WQE_SIZE sizeof(struct mlx5_wqe)\n+\n+/*\n+ * Max size of a WQE session.\n+ * Absolute maximum size is 63 (MLX5_DSEG_MAX) segments,\n+ * the WQE size field in Control Segment is 6 bits wide.\n+ */\n+#define MLX5_WQE_SIZE_MAX (60 * MLX5_WSEG_SIZE)\n+\n+/*\n+ * Default minimum number of Tx queues for inlining packets.\n+ * If there are less queues as specified we assume we have\n+ * no enough CPU resources (cycles) to perform inlining,\n+ * the PCIe throughput is not supposed as bottleneck and\n+ * inlining is disabled.\n+ */\n+#define MLX5_INLINE_MAX_TXQS 8u\n+#define MLX5_INLINE_MAX_TXQS_BLUEFIELD 16u\n+\n+/*\n+ * Default packet length threshold to be inlined with\n+ * enhanced MPW. If packet length exceeds the threshold\n+ * the data are not inlined. Should be aligned in WQEBB\n+ * boundary with accounting the title Control and Ethernet\n+ * segments.\n+ */\n+#define MLX5_EMPW_DEF_INLINE_LEN (4u * MLX5_WQE_SIZE + \\\n+\t\t\t\t  MLX5_DSEG_MIN_INLINE_SIZE)\n+/*\n+ * Maximal inline data length sent with enhanced MPW.\n+ * Is based on maximal WQE size.\n+ */\n+#define MLX5_EMPW_MAX_INLINE_LEN (MLX5_WQE_SIZE_MAX - \\\n+\t\t\t\t  MLX5_WQE_CSEG_SIZE - \\\n+\t\t\t\t  MLX5_WQE_ESEG_SIZE - \\\n+\t\t\t\t  MLX5_WQE_DSEG_SIZE + \\\n+\t\t\t\t  MLX5_DSEG_MIN_INLINE_SIZE)\n+/*\n+ * Minimal amount of packets to be sent with EMPW.\n+ * This limits the minimal required size of sent EMPW.\n+ * If there are no enough resources to built minimal\n+ * EMPW the sending loop exits.\n+ */\n+#define MLX5_EMPW_MIN_PACKETS (2u + 3u * 4u)\n+/*\n+ * Maximal amount of packets to be sent with EMPW.\n+ * This value is not recommended to exceed MLX5_TX_COMP_THRESH,\n+ * otherwise there might be up to MLX5_EMPW_MAX_PACKETS mbufs\n+ * without CQE generation request, being multiplied by\n+ * MLX5_TX_COMP_MAX_CQE it may cause significant latency\n+ * in tx burst routine at the moment of freeing multiple mbufs.\n+ */\n+#define MLX5_EMPW_MAX_PACKETS MLX5_TX_COMP_THRESH\n+#define MLX5_MPW_MAX_PACKETS 6\n+#define MLX5_MPW_INLINE_MAX_PACKETS 2\n+\n+/*\n+ * Default packet length threshold to be inlined with\n+ * ordinary SEND. Inlining saves the MR key search\n+ * and extra PCIe data fetch transaction, but eats the\n+ * CPU cycles.\n+ */\n+#define MLX5_SEND_DEF_INLINE_LEN (5U * MLX5_WQE_SIZE + \\\n+\t\t\t\t  MLX5_ESEG_MIN_INLINE_SIZE - \\\n+\t\t\t\t  MLX5_WQE_CSEG_SIZE - \\\n+\t\t\t\t  MLX5_WQE_ESEG_SIZE - \\\n+\t\t\t\t  MLX5_WQE_DSEG_SIZE)\n+/*\n+ * Maximal inline data length sent with ordinary SEND.\n+ * Is based on maximal WQE size.\n+ */\n+#define MLX5_SEND_MAX_INLINE_LEN (MLX5_WQE_SIZE_MAX - \\\n+\t\t\t\t  MLX5_WQE_CSEG_SIZE - \\\n+\t\t\t\t  MLX5_WQE_ESEG_SIZE - \\\n+\t\t\t\t  MLX5_WQE_DSEG_SIZE + \\\n+\t\t\t\t  MLX5_ESEG_MIN_INLINE_SIZE)\n+\n+/* Missed in mlv5dv.h, should define here. */\n+#define MLX5_OPCODE_ENHANCED_MPSW 0x29u\n+\n+/* CQE value to inform that VLAN is stripped. */\n+#define MLX5_CQE_VLAN_STRIPPED (1u << 0)\n+\n+/* IPv4 options. */\n+#define MLX5_CQE_RX_IP_EXT_OPTS_PACKET (1u << 1)\n+\n+/* IPv6 packet. */\n+#define MLX5_CQE_RX_IPV6_PACKET (1u << 2)\n+\n+/* IPv4 packet. */\n+#define MLX5_CQE_RX_IPV4_PACKET (1u << 3)\n+\n+/* TCP packet. */\n+#define MLX5_CQE_RX_TCP_PACKET (1u << 4)\n+\n+/* UDP packet. */\n+#define MLX5_CQE_RX_UDP_PACKET (1u << 5)\n+\n+/* IP is fragmented. */\n+#define MLX5_CQE_RX_IP_FRAG_PACKET (1u << 7)\n+\n+/* L2 header is valid. */\n+#define MLX5_CQE_RX_L2_HDR_VALID (1u << 8)\n+\n+/* L3 header is valid. */\n+#define MLX5_CQE_RX_L3_HDR_VALID (1u << 9)\n+\n+/* L4 header is valid. */\n+#define MLX5_CQE_RX_L4_HDR_VALID (1u << 10)\n+\n+/* Outer packet, 0 IPv4, 1 IPv6. */\n+#define MLX5_CQE_RX_OUTER_PACKET (1u << 1)\n+\n+/* Tunnel packet bit in the CQE. */\n+#define MLX5_CQE_RX_TUNNEL_PACKET (1u << 0)\n+\n+/* Mask for LRO push flag in the CQE lro_tcppsh_abort_dupack field. */\n+#define MLX5_CQE_LRO_PUSH_MASK 0x40\n+\n+/* Mask for L4 type in the CQE hdr_type_etc field. */\n+#define MLX5_CQE_L4_TYPE_MASK 0x70\n+\n+/* The bit index of L4 type in CQE hdr_type_etc field. */\n+#define MLX5_CQE_L4_TYPE_SHIFT 0x4\n+\n+/* L4 type to indicate TCP packet without acknowledgment. */\n+#define MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK 0x3\n+\n+/* L4 type to indicate TCP packet with acknowledgment. */\n+#define MLX5_L4_HDR_TYPE_TCP_WITH_ACL 0x4\n+\n+/* Inner L3 checksum offload (Tunneled packets only). */\n+#define MLX5_ETH_WQE_L3_INNER_CSUM (1u << 4)\n+\n+/* Inner L4 checksum offload (Tunneled packets only). */\n+#define MLX5_ETH_WQE_L4_INNER_CSUM (1u << 5)\n+\n+/* Outer L4 type is TCP. */\n+#define MLX5_ETH_WQE_L4_OUTER_TCP  (0u << 5)\n+\n+/* Outer L4 type is UDP. */\n+#define MLX5_ETH_WQE_L4_OUTER_UDP  (1u << 5)\n+\n+/* Outer L3 type is IPV4. */\n+#define MLX5_ETH_WQE_L3_OUTER_IPV4 (0u << 4)\n+\n+/* Outer L3 type is IPV6. */\n+#define MLX5_ETH_WQE_L3_OUTER_IPV6 (1u << 4)\n+\n+/* Inner L4 type is TCP. */\n+#define MLX5_ETH_WQE_L4_INNER_TCP (0u << 1)\n+\n+/* Inner L4 type is UDP. */\n+#define MLX5_ETH_WQE_L4_INNER_UDP (1u << 1)\n+\n+/* Inner L3 type is IPV4. */\n+#define MLX5_ETH_WQE_L3_INNER_IPV4 (0u << 0)\n+\n+/* Inner L3 type is IPV6. */\n+#define MLX5_ETH_WQE_L3_INNER_IPV6 (1u << 0)\n+\n+/* VLAN insertion flag. */\n+#define MLX5_ETH_WQE_VLAN_INSERT (1u << 31)\n+\n+/* Data inline segment flag. */\n+#define MLX5_ETH_WQE_DATA_INLINE (1u << 31)\n+\n+/* Is flow mark valid. */\n+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN\n+#define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff00)\n+#else\n+#define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff)\n+#endif\n+\n+/* INVALID is used by packets matching no flow rules. */\n+#define MLX5_FLOW_MARK_INVALID 0\n+\n+/* Maximum allowed value to mark a packet. */\n+#define MLX5_FLOW_MARK_MAX 0xfffff0\n+\n+/* Default mark value used when none is provided. */\n+#define MLX5_FLOW_MARK_DEFAULT 0xffffff\n+\n+/* Default mark mask for metadata legacy mode. */\n+#define MLX5_FLOW_MARK_MASK 0xffffff\n+\n+/* Maximum number of DS in WQE. Limited by 6-bit field. */\n+#define MLX5_DSEG_MAX 63\n+\n+/* The completion mode offset in the WQE control segment line 2. */\n+#define MLX5_COMP_MODE_OFFSET 2\n+\n+/* Amount of data bytes in minimal inline data segment. */\n+#define MLX5_DSEG_MIN_INLINE_SIZE 12u\n+\n+/* Amount of data bytes in minimal inline eth segment. */\n+#define MLX5_ESEG_MIN_INLINE_SIZE 18u\n+\n+/* Amount of data bytes after eth data segment. */\n+#define MLX5_ESEG_EXTRA_DATA_SIZE 32u\n+\n+/* The maximum log value of segments per RQ WQE. */\n+#define MLX5_MAX_LOG_RQ_SEGS 5u\n+\n+/* The alignment needed for WQ buffer. */\n+#define MLX5_WQE_BUF_ALIGNMENT 512\n+\n+/* Completion mode. */\n+enum mlx5_completion_mode {\n+\tMLX5_COMP_ONLY_ERR = 0x0,\n+\tMLX5_COMP_ONLY_FIRST_ERR = 0x1,\n+\tMLX5_COMP_ALWAYS = 0x2,\n+\tMLX5_COMP_CQE_AND_EQE = 0x3,\n+};\n+\n+/* MPW mode. */\n+enum mlx5_mpw_mode {\n+\tMLX5_MPW_DISABLED,\n+\tMLX5_MPW,\n+\tMLX5_MPW_ENHANCED, /* Enhanced Multi-Packet Send WQE, a.k.a MPWv2. */\n+};\n+\n+/* WQE Control segment. */\n+struct mlx5_wqe_cseg {\n+\tuint32_t opcode;\n+\tuint32_t sq_ds;\n+\tuint32_t flags;\n+\tuint32_t misc;\n+} __rte_packed __rte_aligned(MLX5_WSEG_SIZE);\n+\n+/* Header of data segment. Minimal size Data Segment */\n+struct mlx5_wqe_dseg {\n+\tuint32_t bcount;\n+\tunion {\n+\t\tuint8_t inline_data[MLX5_DSEG_MIN_INLINE_SIZE];\n+\t\tstruct {\n+\t\t\tuint32_t lkey;\n+\t\t\tuint64_t pbuf;\n+\t\t} __rte_packed;\n+\t};\n+} __rte_packed;\n+\n+/* Subset of struct WQE Ethernet Segment. */\n+struct mlx5_wqe_eseg {\n+\tunion {\n+\t\tstruct {\n+\t\t\tuint32_t swp_offs;\n+\t\t\tuint8_t\tcs_flags;\n+\t\t\tuint8_t\tswp_flags;\n+\t\t\tuint16_t mss;\n+\t\t\tuint32_t metadata;\n+\t\t\tuint16_t inline_hdr_sz;\n+\t\t\tunion {\n+\t\t\t\tuint16_t inline_data;\n+\t\t\t\tuint16_t vlan_tag;\n+\t\t\t};\n+\t\t} __rte_packed;\n+\t\tstruct {\n+\t\t\tuint32_t offsets;\n+\t\t\tuint32_t flags;\n+\t\t\tuint32_t flow_metadata;\n+\t\t\tuint32_t inline_hdr;\n+\t\t} __rte_packed;\n+\t};\n+} __rte_packed;\n+\n+/* The title WQEBB, header of WQE. */\n+struct mlx5_wqe {\n+\tunion {\n+\t\tstruct mlx5_wqe_cseg cseg;\n+\t\tuint32_t ctrl[4];\n+\t};\n+\tstruct mlx5_wqe_eseg eseg;\n+\tunion {\n+\t\tstruct mlx5_wqe_dseg dseg[2];\n+\t\tuint8_t data[MLX5_ESEG_EXTRA_DATA_SIZE];\n+\t};\n+} __rte_packed;\n+\n+/* WQE for Multi-Packet RQ. */\n+struct mlx5_wqe_mprq {\n+\tstruct mlx5_wqe_srq_next_seg next_seg;\n+\tstruct mlx5_wqe_data_seg dseg;\n+};\n+\n+#define MLX5_MPRQ_LEN_MASK 0x000ffff\n+#define MLX5_MPRQ_LEN_SHIFT 0\n+#define MLX5_MPRQ_STRIDE_NUM_MASK 0x3fff0000\n+#define MLX5_MPRQ_STRIDE_NUM_SHIFT 16\n+#define MLX5_MPRQ_FILLER_MASK 0x80000000\n+#define MLX5_MPRQ_FILLER_SHIFT 31\n+\n+#define MLX5_MPRQ_STRIDE_SHIFT_BYTE 2\n+\n+/* CQ element structure - should be equal to the cache line size */\n+struct mlx5_cqe {\n+#if (RTE_CACHE_LINE_SIZE == 128)\n+\tuint8_t padding[64];\n+#endif\n+\tuint8_t pkt_info;\n+\tuint8_t rsvd0;\n+\tuint16_t wqe_id;\n+\tuint8_t lro_tcppsh_abort_dupack;\n+\tuint8_t lro_min_ttl;\n+\tuint16_t lro_tcp_win;\n+\tuint32_t lro_ack_seq_num;\n+\tuint32_t rx_hash_res;\n+\tuint8_t rx_hash_type;\n+\tuint8_t rsvd1[3];\n+\tuint16_t csum;\n+\tuint8_t rsvd2[6];\n+\tuint16_t hdr_type_etc;\n+\tuint16_t vlan_info;\n+\tuint8_t lro_num_seg;\n+\tuint8_t rsvd3[3];\n+\tuint32_t flow_table_metadata;\n+\tuint8_t rsvd4[4];\n+\tuint32_t byte_cnt;\n+\tuint64_t timestamp;\n+\tuint32_t sop_drop_qpn;\n+\tuint16_t wqe_counter;\n+\tuint8_t rsvd5;\n+\tuint8_t op_own;\n+};\n+\n+/* Adding direct verbs to data-path. */\n+\n+/* CQ sequence number mask. */\n+#define MLX5_CQ_SQN_MASK 0x3\n+\n+/* CQ sequence number index. */\n+#define MLX5_CQ_SQN_OFFSET 28\n+\n+/* CQ doorbell index mask. */\n+#define MLX5_CI_MASK 0xffffff\n+\n+/* CQ doorbell offset. */\n+#define MLX5_CQ_ARM_DB 1\n+\n+/* CQ doorbell offset*/\n+#define MLX5_CQ_DOORBELL 0x20\n+\n+/* CQE format value. */\n+#define MLX5_COMPRESSED 0x3\n+\n+/* Action type of header modification. */\n+enum {\n+\tMLX5_MODIFICATION_TYPE_SET = 0x1,\n+\tMLX5_MODIFICATION_TYPE_ADD = 0x2,\n+\tMLX5_MODIFICATION_TYPE_COPY = 0x3,\n+};\n+\n+/* The field of packet to be modified. */\n+enum mlx5_modification_field {\n+\tMLX5_MODI_OUT_NONE = -1,\n+\tMLX5_MODI_OUT_SMAC_47_16 = 1,\n+\tMLX5_MODI_OUT_SMAC_15_0,\n+\tMLX5_MODI_OUT_ETHERTYPE,\n+\tMLX5_MODI_OUT_DMAC_47_16,\n+\tMLX5_MODI_OUT_DMAC_15_0,\n+\tMLX5_MODI_OUT_IP_DSCP,\n+\tMLX5_MODI_OUT_TCP_FLAGS,\n+\tMLX5_MODI_OUT_TCP_SPORT,\n+\tMLX5_MODI_OUT_TCP_DPORT,\n+\tMLX5_MODI_OUT_IPV4_TTL,\n+\tMLX5_MODI_OUT_UDP_SPORT,\n+\tMLX5_MODI_OUT_UDP_DPORT,\n+\tMLX5_MODI_OUT_SIPV6_127_96,\n+\tMLX5_MODI_OUT_SIPV6_95_64,\n+\tMLX5_MODI_OUT_SIPV6_63_32,\n+\tMLX5_MODI_OUT_SIPV6_31_0,\n+\tMLX5_MODI_OUT_DIPV6_127_96,\n+\tMLX5_MODI_OUT_DIPV6_95_64,\n+\tMLX5_MODI_OUT_DIPV6_63_32,\n+\tMLX5_MODI_OUT_DIPV6_31_0,\n+\tMLX5_MODI_OUT_SIPV4,\n+\tMLX5_MODI_OUT_DIPV4,\n+\tMLX5_MODI_OUT_FIRST_VID,\n+\tMLX5_MODI_IN_SMAC_47_16 = 0x31,\n+\tMLX5_MODI_IN_SMAC_15_0,\n+\tMLX5_MODI_IN_ETHERTYPE,\n+\tMLX5_MODI_IN_DMAC_47_16,\n+\tMLX5_MODI_IN_DMAC_15_0,\n+\tMLX5_MODI_IN_IP_DSCP,\n+\tMLX5_MODI_IN_TCP_FLAGS,\n+\tMLX5_MODI_IN_TCP_SPORT,\n+\tMLX5_MODI_IN_TCP_DPORT,\n+\tMLX5_MODI_IN_IPV4_TTL,\n+\tMLX5_MODI_IN_UDP_SPORT,\n+\tMLX5_MODI_IN_UDP_DPORT,\n+\tMLX5_MODI_IN_SIPV6_127_96,\n+\tMLX5_MODI_IN_SIPV6_95_64,\n+\tMLX5_MODI_IN_SIPV6_63_32,\n+\tMLX5_MODI_IN_SIPV6_31_0,\n+\tMLX5_MODI_IN_DIPV6_127_96,\n+\tMLX5_MODI_IN_DIPV6_95_64,\n+\tMLX5_MODI_IN_DIPV6_63_32,\n+\tMLX5_MODI_IN_DIPV6_31_0,\n+\tMLX5_MODI_IN_SIPV4,\n+\tMLX5_MODI_IN_DIPV4,\n+\tMLX5_MODI_OUT_IPV6_HOPLIMIT,\n+\tMLX5_MODI_IN_IPV6_HOPLIMIT,\n+\tMLX5_MODI_META_DATA_REG_A,\n+\tMLX5_MODI_META_DATA_REG_B = 0x50,\n+\tMLX5_MODI_META_REG_C_0,\n+\tMLX5_MODI_META_REG_C_1,\n+\tMLX5_MODI_META_REG_C_2,\n+\tMLX5_MODI_META_REG_C_3,\n+\tMLX5_MODI_META_REG_C_4,\n+\tMLX5_MODI_META_REG_C_5,\n+\tMLX5_MODI_META_REG_C_6,\n+\tMLX5_MODI_META_REG_C_7,\n+\tMLX5_MODI_OUT_TCP_SEQ_NUM,\n+\tMLX5_MODI_IN_TCP_SEQ_NUM,\n+\tMLX5_MODI_OUT_TCP_ACK_NUM,\n+\tMLX5_MODI_IN_TCP_ACK_NUM = 0x5C,\n+};\n+\n+/* Total number of metadata reg_c's. */\n+#define MLX5_MREG_C_NUM (MLX5_MODI_META_REG_C_7 - MLX5_MODI_META_REG_C_0 + 1)\n+\n+enum modify_reg {\n+\tREG_NONE = 0,\n+\tREG_A,\n+\tREG_B,\n+\tREG_C_0,\n+\tREG_C_1,\n+\tREG_C_2,\n+\tREG_C_3,\n+\tREG_C_4,\n+\tREG_C_5,\n+\tREG_C_6,\n+\tREG_C_7,\n+};\n+\n+/* Modification sub command. */\n+struct mlx5_modification_cmd {\n+\tunion {\n+\t\tuint32_t data0;\n+\t\tstruct {\n+\t\t\tunsigned int length:5;\n+\t\t\tunsigned int rsvd0:3;\n+\t\t\tunsigned int offset:5;\n+\t\t\tunsigned int rsvd1:3;\n+\t\t\tunsigned int field:12;\n+\t\t\tunsigned int action_type:4;\n+\t\t};\n+\t};\n+\tunion {\n+\t\tuint32_t data1;\n+\t\tuint8_t data[4];\n+\t\tstruct {\n+\t\t\tunsigned int rsvd2:8;\n+\t\t\tunsigned int dst_offset:5;\n+\t\t\tunsigned int rsvd3:3;\n+\t\t\tunsigned int dst_field:12;\n+\t\t\tunsigned int rsvd4:4;\n+\t\t};\n+\t};\n+};\n+\n+typedef uint32_t u32;\n+typedef uint16_t u16;\n+typedef uint8_t u8;\n+\n+#define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)\n+#define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)\n+#define __mlx5_bit_off(typ, fld) ((unsigned int)(unsigned long) \\\n+\t\t\t\t  (&(__mlx5_nullp(typ)->fld)))\n+#define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - \\\n+\t\t\t\t    (__mlx5_bit_off(typ, fld) & 0x1f))\n+#define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)\n+#define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)\n+#define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << \\\n+\t\t\t\t  __mlx5_dw_bit_off(typ, fld))\n+#define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))\n+#define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16)\n+#define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - \\\n+\t\t\t\t    (__mlx5_bit_off(typ, fld) & 0xf))\n+#define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1))\n+#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)\n+#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)\n+#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)\n+#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))\n+\n+/* insert a value to a struct */\n+#define MLX5_SET(typ, p, fld, v) \\\n+\tdo { \\\n+\t\tu32 _v = v; \\\n+\t\t*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \\\n+\t\trte_cpu_to_be_32((rte_be_to_cpu_32(*((u32 *)(p) + \\\n+\t\t\t\t  __mlx5_dw_off(typ, fld))) & \\\n+\t\t\t\t  (~__mlx5_dw_mask(typ, fld))) | \\\n+\t\t\t\t (((_v) & __mlx5_mask(typ, fld)) << \\\n+\t\t\t\t   __mlx5_dw_bit_off(typ, fld))); \\\n+\t} while (0)\n+\n+#define MLX5_SET64(typ, p, fld, v) \\\n+\tdo { \\\n+\t\tassert(__mlx5_bit_sz(typ, fld) == 64); \\\n+\t\t*((__be64 *)(p) + __mlx5_64_off(typ, fld)) = \\\n+\t\t\trte_cpu_to_be_64(v); \\\n+\t} while (0)\n+\n+#define MLX5_GET(typ, p, fld) \\\n+\t((rte_be_to_cpu_32(*((__be32 *)(p) +\\\n+\t__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \\\n+\t__mlx5_mask(typ, fld))\n+#define MLX5_GET16(typ, p, fld) \\\n+\t((rte_be_to_cpu_16(*((__be16 *)(p) + \\\n+\t  __mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \\\n+\t __mlx5_mask16(typ, fld))\n+#define MLX5_GET64(typ, p, fld) rte_be_to_cpu_64(*((__be64 *)(p) + \\\n+\t\t\t\t\t\t   __mlx5_64_off(typ, fld)))\n+#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)\n+\n+struct mlx5_ifc_fte_match_set_misc_bits {\n+\tu8 gre_c_present[0x1];\n+\tu8 reserved_at_1[0x1];\n+\tu8 gre_k_present[0x1];\n+\tu8 gre_s_present[0x1];\n+\tu8 source_vhci_port[0x4];\n+\tu8 source_sqn[0x18];\n+\tu8 reserved_at_20[0x10];\n+\tu8 source_port[0x10];\n+\tu8 outer_second_prio[0x3];\n+\tu8 outer_second_cfi[0x1];\n+\tu8 outer_second_vid[0xc];\n+\tu8 inner_second_prio[0x3];\n+\tu8 inner_second_cfi[0x1];\n+\tu8 inner_second_vid[0xc];\n+\tu8 outer_second_cvlan_tag[0x1];\n+\tu8 inner_second_cvlan_tag[0x1];\n+\tu8 outer_second_svlan_tag[0x1];\n+\tu8 inner_second_svlan_tag[0x1];\n+\tu8 reserved_at_64[0xc];\n+\tu8 gre_protocol[0x10];\n+\tu8 gre_key_h[0x18];\n+\tu8 gre_key_l[0x8];\n+\tu8 vxlan_vni[0x18];\n+\tu8 reserved_at_b8[0x8];\n+\tu8 geneve_vni[0x18];\n+\tu8 reserved_at_e4[0x7];\n+\tu8 geneve_oam[0x1];\n+\tu8 reserved_at_e0[0xc];\n+\tu8 outer_ipv6_flow_label[0x14];\n+\tu8 reserved_at_100[0xc];\n+\tu8 inner_ipv6_flow_label[0x14];\n+\tu8 reserved_at_120[0xa];\n+\tu8 geneve_opt_len[0x6];\n+\tu8 geneve_protocol_type[0x10];\n+\tu8 reserved_at_140[0xc0];\n+};\n+\n+struct mlx5_ifc_ipv4_layout_bits {\n+\tu8 reserved_at_0[0x60];\n+\tu8 ipv4[0x20];\n+};\n+\n+struct mlx5_ifc_ipv6_layout_bits {\n+\tu8 ipv6[16][0x8];\n+};\n+\n+union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {\n+\tstruct mlx5_ifc_ipv6_layout_bits ipv6_layout;\n+\tstruct mlx5_ifc_ipv4_layout_bits ipv4_layout;\n+\tu8 reserved_at_0[0x80];\n+};\n+\n+struct mlx5_ifc_fte_match_set_lyr_2_4_bits {\n+\tu8 smac_47_16[0x20];\n+\tu8 smac_15_0[0x10];\n+\tu8 ethertype[0x10];\n+\tu8 dmac_47_16[0x20];\n+\tu8 dmac_15_0[0x10];\n+\tu8 first_prio[0x3];\n+\tu8 first_cfi[0x1];\n+\tu8 first_vid[0xc];\n+\tu8 ip_protocol[0x8];\n+\tu8 ip_dscp[0x6];\n+\tu8 ip_ecn[0x2];\n+\tu8 cvlan_tag[0x1];\n+\tu8 svlan_tag[0x1];\n+\tu8 frag[0x1];\n+\tu8 ip_version[0x4];\n+\tu8 tcp_flags[0x9];\n+\tu8 tcp_sport[0x10];\n+\tu8 tcp_dport[0x10];\n+\tu8 reserved_at_c0[0x20];\n+\tu8 udp_sport[0x10];\n+\tu8 udp_dport[0x10];\n+\tunion mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;\n+\tunion mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;\n+};\n+\n+struct mlx5_ifc_fte_match_mpls_bits {\n+\tu8 mpls_label[0x14];\n+\tu8 mpls_exp[0x3];\n+\tu8 mpls_s_bos[0x1];\n+\tu8 mpls_ttl[0x8];\n+};\n+\n+struct mlx5_ifc_fte_match_set_misc2_bits {\n+\tstruct mlx5_ifc_fte_match_mpls_bits outer_first_mpls;\n+\tstruct mlx5_ifc_fte_match_mpls_bits inner_first_mpls;\n+\tstruct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_gre;\n+\tstruct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_udp;\n+\tu8 metadata_reg_c_7[0x20];\n+\tu8 metadata_reg_c_6[0x20];\n+\tu8 metadata_reg_c_5[0x20];\n+\tu8 metadata_reg_c_4[0x20];\n+\tu8 metadata_reg_c_3[0x20];\n+\tu8 metadata_reg_c_2[0x20];\n+\tu8 metadata_reg_c_1[0x20];\n+\tu8 metadata_reg_c_0[0x20];\n+\tu8 metadata_reg_a[0x20];\n+\tu8 metadata_reg_b[0x20];\n+\tu8 reserved_at_1c0[0x40];\n+};\n+\n+struct mlx5_ifc_fte_match_set_misc3_bits {\n+\tu8 inner_tcp_seq_num[0x20];\n+\tu8 outer_tcp_seq_num[0x20];\n+\tu8 inner_tcp_ack_num[0x20];\n+\tu8 outer_tcp_ack_num[0x20];\n+\tu8 reserved_at_auto1[0x8];\n+\tu8 outer_vxlan_gpe_vni[0x18];\n+\tu8 outer_vxlan_gpe_next_protocol[0x8];\n+\tu8 outer_vxlan_gpe_flags[0x8];\n+\tu8 reserved_at_a8[0x10];\n+\tu8 icmp_header_data[0x20];\n+\tu8 icmpv6_header_data[0x20];\n+\tu8 icmp_type[0x8];\n+\tu8 icmp_code[0x8];\n+\tu8 icmpv6_type[0x8];\n+\tu8 icmpv6_code[0x8];\n+\tu8 reserved_at_120[0x20];\n+\tu8 gtpu_teid[0x20];\n+\tu8 gtpu_msg_type[0x08];\n+\tu8 gtpu_msg_flags[0x08];\n+\tu8 reserved_at_170[0x90];\n+};\n+\n+/* Flow matcher. */\n+struct mlx5_ifc_fte_match_param_bits {\n+\tstruct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;\n+\tstruct mlx5_ifc_fte_match_set_misc_bits misc_parameters;\n+\tstruct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;\n+\tstruct mlx5_ifc_fte_match_set_misc2_bits misc_parameters_2;\n+\tstruct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3;\n+};\n+\n+enum {\n+\tMLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT,\n+\tMLX5_MATCH_CRITERIA_ENABLE_MISC_BIT,\n+\tMLX5_MATCH_CRITERIA_ENABLE_INNER_BIT,\n+\tMLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT,\n+\tMLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT\n+};\n+\n+enum {\n+\tMLX5_CMD_OP_QUERY_HCA_CAP = 0x100,\n+\tMLX5_CMD_OP_CREATE_MKEY = 0x200,\n+\tMLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754,\n+\tMLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN = 0x816,\n+\tMLX5_CMD_OP_CREATE_TIR = 0x900,\n+\tMLX5_CMD_OP_CREATE_SQ = 0X904,\n+\tMLX5_CMD_OP_MODIFY_SQ = 0X905,\n+\tMLX5_CMD_OP_CREATE_RQ = 0x908,\n+\tMLX5_CMD_OP_MODIFY_RQ = 0x909,\n+\tMLX5_CMD_OP_CREATE_TIS = 0x912,\n+\tMLX5_CMD_OP_QUERY_TIS = 0x915,\n+\tMLX5_CMD_OP_CREATE_RQT = 0x916,\n+\tMLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939,\n+\tMLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,\n+};\n+\n+enum {\n+\tMLX5_MKC_ACCESS_MODE_MTT   = 0x1,\n+};\n+\n+/* Flow counters. */\n+struct mlx5_ifc_alloc_flow_counter_out_bits {\n+\tu8         status[0x8];\n+\tu8         reserved_at_8[0x18];\n+\tu8         syndrome[0x20];\n+\tu8         flow_counter_id[0x20];\n+\tu8         reserved_at_60[0x20];\n+};\n+\n+struct mlx5_ifc_alloc_flow_counter_in_bits {\n+\tu8         opcode[0x10];\n+\tu8         reserved_at_10[0x10];\n+\tu8         reserved_at_20[0x10];\n+\tu8         op_mod[0x10];\n+\tu8         flow_counter_id[0x20];\n+\tu8         reserved_at_40[0x18];\n+\tu8         flow_counter_bulk[0x8];\n+};\n+\n+struct mlx5_ifc_dealloc_flow_counter_out_bits {\n+\tu8         status[0x8];\n+\tu8         reserved_at_8[0x18];\n+\tu8         syndrome[0x20];\n+\tu8         reserved_at_40[0x40];\n+};\n+\n+struct mlx5_ifc_dealloc_flow_counter_in_bits {\n+\tu8         opcode[0x10];\n+\tu8         reserved_at_10[0x10];\n+\tu8         reserved_at_20[0x10];\n+\tu8         op_mod[0x10];\n+\tu8         flow_counter_id[0x20];\n+\tu8         reserved_at_60[0x20];\n+};\n+\n+struct mlx5_ifc_traffic_counter_bits {\n+\tu8         packets[0x40];\n+\tu8         octets[0x40];\n+};\n+\n+struct mlx5_ifc_query_flow_counter_out_bits {\n+\tu8         status[0x8];\n+\tu8         reserved_at_8[0x18];\n+\tu8         syndrome[0x20];\n+\tu8         reserved_at_40[0x40];\n+\tstruct mlx5_ifc_traffic_counter_bits flow_statistics[];\n+};\n+\n+struct mlx5_ifc_query_flow_counter_in_bits {\n+\tu8         opcode[0x10];\n+\tu8         reserved_at_10[0x10];\n+\tu8         reserved_at_20[0x10];\n+\tu8         op_mod[0x10];\n+\tu8         reserved_at_40[0x20];\n+\tu8         mkey[0x20];\n+\tu8         address[0x40];\n+\tu8         clear[0x1];\n+\tu8         dump_to_memory[0x1];\n+\tu8         num_of_counters[0x1e];\n+\tu8         flow_counter_id[0x20];\n+};\n+\n+struct mlx5_ifc_mkc_bits {\n+\tu8         reserved_at_0[0x1];\n+\tu8         free[0x1];\n+\tu8         reserved_at_2[0x1];\n+\tu8         access_mode_4_2[0x3];\n+\tu8         reserved_at_6[0x7];\n+\tu8         relaxed_ordering_write[0x1];\n+\tu8         reserved_at_e[0x1];\n+\tu8         small_fence_on_rdma_read_response[0x1];\n+\tu8         umr_en[0x1];\n+\tu8         a[0x1];\n+\tu8         rw[0x1];\n+\tu8         rr[0x1];\n+\tu8         lw[0x1];\n+\tu8         lr[0x1];\n+\tu8         access_mode_1_0[0x2];\n+\tu8         reserved_at_18[0x8];\n+\n+\tu8         qpn[0x18];\n+\tu8         mkey_7_0[0x8];\n+\n+\tu8         reserved_at_40[0x20];\n+\n+\tu8         length64[0x1];\n+\tu8         bsf_en[0x1];\n+\tu8         sync_umr[0x1];\n+\tu8         reserved_at_63[0x2];\n+\tu8         expected_sigerr_count[0x1];\n+\tu8         reserved_at_66[0x1];\n+\tu8         en_rinval[0x1];\n+\tu8         pd[0x18];\n+\n+\tu8         start_addr[0x40];\n+\n+\tu8         len[0x40];\n+\n+\tu8         bsf_octword_size[0x20];\n+\n+\tu8         reserved_at_120[0x80];\n+\n+\tu8         translations_octword_size[0x20];\n+\n+\tu8         reserved_at_1c0[0x1b];\n+\tu8         log_page_size[0x5];\n+\n+\tu8         reserved_at_1e0[0x20];\n+};\n+\n+struct mlx5_ifc_create_mkey_out_bits {\n+\tu8         status[0x8];\n+\tu8         reserved_at_8[0x18];\n+\n+\tu8         syndrome[0x20];\n+\n+\tu8         reserved_at_40[0x8];\n+\tu8         mkey_index[0x18];\n+\n+\tu8         reserved_at_60[0x20];\n+};\n+\n+struct mlx5_ifc_create_mkey_in_bits {\n+\tu8         opcode[0x10];\n+\tu8         reserved_at_10[0x10];\n+\n+\tu8         reserved_at_20[0x10];\n+\tu8         op_mod[0x10];\n+\n+\tu8         reserved_at_40[0x20];\n+\n+\tu8         pg_access[0x1];\n+\tu8         reserved_at_61[0x1f];\n+\n+\tstruct mlx5_ifc_mkc_bits memory_key_mkey_entry;\n+\n+\tu8         reserved_at_280[0x80];\n+\n+\tu8         translations_octword_actual_size[0x20];\n+\n+\tu8         mkey_umem_id[0x20];\n+\n+\tu8         mkey_umem_offset[0x40];\n+\n+\tu8         reserved_at_380[0x500];\n+\n+\tu8         klm_pas_mtt[][0x20];\n+};\n+\n+enum {\n+\tMLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0 << 1,\n+\tMLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS = 0x1 << 1,\n+\tMLX5_GET_HCA_CAP_OP_MOD_QOS_CAP = 0xc << 1,\n+};\n+\n+enum {\n+\tMLX5_HCA_CAP_OPMOD_GET_MAX   = 0,\n+\tMLX5_HCA_CAP_OPMOD_GET_CUR   = 1,\n+};\n+\n+enum {\n+\tMLX5_CAP_INLINE_MODE_L2,\n+\tMLX5_CAP_INLINE_MODE_VPORT_CONTEXT,\n+\tMLX5_CAP_INLINE_MODE_NOT_REQUIRED,\n+};\n+\n+enum {\n+\tMLX5_INLINE_MODE_NONE,\n+\tMLX5_INLINE_MODE_L2,\n+\tMLX5_INLINE_MODE_IP,\n+\tMLX5_INLINE_MODE_TCP_UDP,\n+\tMLX5_INLINE_MODE_RESERVED4,\n+\tMLX5_INLINE_MODE_INNER_L2,\n+\tMLX5_INLINE_MODE_INNER_IP,\n+\tMLX5_INLINE_MODE_INNER_TCP_UDP,\n+};\n+\n+/* HCA bit masks indicating which Flex parser protocols are already enabled. */\n+#define MLX5_HCA_FLEX_IPV4_OVER_VXLAN_ENABLED (1UL << 0)\n+#define MLX5_HCA_FLEX_IPV6_OVER_VXLAN_ENABLED (1UL << 1)\n+#define MLX5_HCA_FLEX_IPV6_OVER_IP_ENABLED (1UL << 2)\n+#define MLX5_HCA_FLEX_GENEVE_ENABLED (1UL << 3)\n+#define MLX5_HCA_FLEX_CW_MPLS_OVER_GRE_ENABLED (1UL << 4)\n+#define MLX5_HCA_FLEX_CW_MPLS_OVER_UDP_ENABLED (1UL << 5)\n+#define MLX5_HCA_FLEX_P_BIT_VXLAN_GPE_ENABLED (1UL << 6)\n+#define MLX5_HCA_FLEX_VXLAN_GPE_ENABLED (1UL << 7)\n+#define MLX5_HCA_FLEX_ICMP_ENABLED (1UL << 8)\n+#define MLX5_HCA_FLEX_ICMPV6_ENABLED (1UL << 9)\n+\n+struct mlx5_ifc_cmd_hca_cap_bits {\n+\tu8 reserved_at_0[0x30];\n+\tu8 vhca_id[0x10];\n+\tu8 reserved_at_40[0x40];\n+\tu8 log_max_srq_sz[0x8];\n+\tu8 log_max_qp_sz[0x8];\n+\tu8 reserved_at_90[0xb];\n+\tu8 log_max_qp[0x5];\n+\tu8 reserved_at_a0[0xb];\n+\tu8 log_max_srq[0x5];\n+\tu8 reserved_at_b0[0x10];\n+\tu8 reserved_at_c0[0x8];\n+\tu8 log_max_cq_sz[0x8];\n+\tu8 reserved_at_d0[0xb];\n+\tu8 log_max_cq[0x5];\n+\tu8 log_max_eq_sz[0x8];\n+\tu8 reserved_at_e8[0x2];\n+\tu8 log_max_mkey[0x6];\n+\tu8 reserved_at_f0[0x8];\n+\tu8 dump_fill_mkey[0x1];\n+\tu8 reserved_at_f9[0x3];\n+\tu8 log_max_eq[0x4];\n+\tu8 max_indirection[0x8];\n+\tu8 fixed_buffer_size[0x1];\n+\tu8 log_max_mrw_sz[0x7];\n+\tu8 force_teardown[0x1];\n+\tu8 reserved_at_111[0x1];\n+\tu8 log_max_bsf_list_size[0x6];\n+\tu8 umr_extended_translation_offset[0x1];\n+\tu8 null_mkey[0x1];\n+\tu8 log_max_klm_list_size[0x6];\n+\tu8 reserved_at_120[0xa];\n+\tu8 log_max_ra_req_dc[0x6];\n+\tu8 reserved_at_130[0xa];\n+\tu8 log_max_ra_res_dc[0x6];\n+\tu8 reserved_at_140[0xa];\n+\tu8 log_max_ra_req_qp[0x6];\n+\tu8 reserved_at_150[0xa];\n+\tu8 log_max_ra_res_qp[0x6];\n+\tu8 end_pad[0x1];\n+\tu8 cc_query_allowed[0x1];\n+\tu8 cc_modify_allowed[0x1];\n+\tu8 start_pad[0x1];\n+\tu8 cache_line_128byte[0x1];\n+\tu8 reserved_at_165[0xa];\n+\tu8 qcam_reg[0x1];\n+\tu8 gid_table_size[0x10];\n+\tu8 out_of_seq_cnt[0x1];\n+\tu8 vport_counters[0x1];\n+\tu8 retransmission_q_counters[0x1];\n+\tu8 debug[0x1];\n+\tu8 modify_rq_counter_set_id[0x1];\n+\tu8 rq_delay_drop[0x1];\n+\tu8 max_qp_cnt[0xa];\n+\tu8 pkey_table_size[0x10];\n+\tu8 vport_group_manager[0x1];\n+\tu8 vhca_group_manager[0x1];\n+\tu8 ib_virt[0x1];\n+\tu8 eth_virt[0x1];\n+\tu8 vnic_env_queue_counters[0x1];\n+\tu8 ets[0x1];\n+\tu8 nic_flow_table[0x1];\n+\tu8 eswitch_manager[0x1];\n+\tu8 device_memory[0x1];\n+\tu8 mcam_reg[0x1];\n+\tu8 pcam_reg[0x1];\n+\tu8 local_ca_ack_delay[0x5];\n+\tu8 port_module_event[0x1];\n+\tu8 enhanced_error_q_counters[0x1];\n+\tu8 ports_check[0x1];\n+\tu8 reserved_at_1b3[0x1];\n+\tu8 disable_link_up[0x1];\n+\tu8 beacon_led[0x1];\n+\tu8 port_type[0x2];\n+\tu8 num_ports[0x8];\n+\tu8 reserved_at_1c0[0x1];\n+\tu8 pps[0x1];\n+\tu8 pps_modify[0x1];\n+\tu8 log_max_msg[0x5];\n+\tu8 reserved_at_1c8[0x4];\n+\tu8 max_tc[0x4];\n+\tu8 temp_warn_event[0x1];\n+\tu8 dcbx[0x1];\n+\tu8 general_notification_event[0x1];\n+\tu8 reserved_at_1d3[0x2];\n+\tu8 fpga[0x1];\n+\tu8 rol_s[0x1];\n+\tu8 rol_g[0x1];\n+\tu8 reserved_at_1d8[0x1];\n+\tu8 wol_s[0x1];\n+\tu8 wol_g[0x1];\n+\tu8 wol_a[0x1];\n+\tu8 wol_b[0x1];\n+\tu8 wol_m[0x1];\n+\tu8 wol_u[0x1];\n+\tu8 wol_p[0x1];\n+\tu8 stat_rate_support[0x10];\n+\tu8 reserved_at_1f0[0xc];\n+\tu8 cqe_version[0x4];\n+\tu8 compact_address_vector[0x1];\n+\tu8 striding_rq[0x1];\n+\tu8 reserved_at_202[0x1];\n+\tu8 ipoib_enhanced_offloads[0x1];\n+\tu8 ipoib_basic_offloads[0x1];\n+\tu8 reserved_at_205[0x1];\n+\tu8 repeated_block_disabled[0x1];\n+\tu8 umr_modify_entity_size_disabled[0x1];\n+\tu8 umr_modify_atomic_disabled[0x1];\n+\tu8 umr_indirect_mkey_disabled[0x1];\n+\tu8 umr_fence[0x2];\n+\tu8 reserved_at_20c[0x3];\n+\tu8 drain_sigerr[0x1];\n+\tu8 cmdif_checksum[0x2];\n+\tu8 sigerr_cqe[0x1];\n+\tu8 reserved_at_213[0x1];\n+\tu8 wq_signature[0x1];\n+\tu8 sctr_data_cqe[0x1];\n+\tu8 reserved_at_216[0x1];\n+\tu8 sho[0x1];\n+\tu8 tph[0x1];\n+\tu8 rf[0x1];\n+\tu8 dct[0x1];\n+\tu8 qos[0x1];\n+\tu8 eth_net_offloads[0x1];\n+\tu8 roce[0x1];\n+\tu8 atomic[0x1];\n+\tu8 reserved_at_21f[0x1];\n+\tu8 cq_oi[0x1];\n+\tu8 cq_resize[0x1];\n+\tu8 cq_moderation[0x1];\n+\tu8 reserved_at_223[0x3];\n+\tu8 cq_eq_remap[0x1];\n+\tu8 pg[0x1];\n+\tu8 block_lb_mc[0x1];\n+\tu8 reserved_at_229[0x1];\n+\tu8 scqe_break_moderation[0x1];\n+\tu8 cq_period_start_from_cqe[0x1];\n+\tu8 cd[0x1];\n+\tu8 reserved_at_22d[0x1];\n+\tu8 apm[0x1];\n+\tu8 vector_calc[0x1];\n+\tu8 umr_ptr_rlky[0x1];\n+\tu8 imaicl[0x1];\n+\tu8 reserved_at_232[0x4];\n+\tu8 qkv[0x1];\n+\tu8 pkv[0x1];\n+\tu8 set_deth_sqpn[0x1];\n+\tu8 reserved_at_239[0x3];\n+\tu8 xrc[0x1];\n+\tu8 ud[0x1];\n+\tu8 uc[0x1];\n+\tu8 rc[0x1];\n+\tu8 uar_4k[0x1];\n+\tu8 reserved_at_241[0x9];\n+\tu8 uar_sz[0x6];\n+\tu8 reserved_at_250[0x8];\n+\tu8 log_pg_sz[0x8];\n+\tu8 bf[0x1];\n+\tu8 driver_version[0x1];\n+\tu8 pad_tx_eth_packet[0x1];\n+\tu8 reserved_at_263[0x8];\n+\tu8 log_bf_reg_size[0x5];\n+\tu8 reserved_at_270[0xb];\n+\tu8 lag_master[0x1];\n+\tu8 num_lag_ports[0x4];\n+\tu8 reserved_at_280[0x10];\n+\tu8 max_wqe_sz_sq[0x10];\n+\tu8 reserved_at_2a0[0x10];\n+\tu8 max_wqe_sz_rq[0x10];\n+\tu8 max_flow_counter_31_16[0x10];\n+\tu8 max_wqe_sz_sq_dc[0x10];\n+\tu8 reserved_at_2e0[0x7];\n+\tu8 max_qp_mcg[0x19];\n+\tu8 reserved_at_300[0x10];\n+\tu8 flow_counter_bulk_alloc[0x08];\n+\tu8 log_max_mcg[0x8];\n+\tu8 reserved_at_320[0x3];\n+\tu8 log_max_transport_domain[0x5];\n+\tu8 reserved_at_328[0x3];\n+\tu8 log_max_pd[0x5];\n+\tu8 reserved_at_330[0xb];\n+\tu8 log_max_xrcd[0x5];\n+\tu8 nic_receive_steering_discard[0x1];\n+\tu8 receive_discard_vport_down[0x1];\n+\tu8 transmit_discard_vport_down[0x1];\n+\tu8 reserved_at_343[0x5];\n+\tu8 log_max_flow_counter_bulk[0x8];\n+\tu8 max_flow_counter_15_0[0x10];\n+\tu8 modify_tis[0x1];\n+\tu8 flow_counters_dump[0x1];\n+\tu8 reserved_at_360[0x1];\n+\tu8 log_max_rq[0x5];\n+\tu8 reserved_at_368[0x3];\n+\tu8 log_max_sq[0x5];\n+\tu8 reserved_at_370[0x3];\n+\tu8 log_max_tir[0x5];\n+\tu8 reserved_at_378[0x3];\n+\tu8 log_max_tis[0x5];\n+\tu8 basic_cyclic_rcv_wqe[0x1];\n+\tu8 reserved_at_381[0x2];\n+\tu8 log_max_rmp[0x5];\n+\tu8 reserved_at_388[0x3];\n+\tu8 log_max_rqt[0x5];\n+\tu8 reserved_at_390[0x3];\n+\tu8 log_max_rqt_size[0x5];\n+\tu8 reserved_at_398[0x3];\n+\tu8 log_max_tis_per_sq[0x5];\n+\tu8 ext_stride_num_range[0x1];\n+\tu8 reserved_at_3a1[0x2];\n+\tu8 log_max_stride_sz_rq[0x5];\n+\tu8 reserved_at_3a8[0x3];\n+\tu8 log_min_stride_sz_rq[0x5];\n+\tu8 reserved_at_3b0[0x3];\n+\tu8 log_max_stride_sz_sq[0x5];\n+\tu8 reserved_at_3b8[0x3];\n+\tu8 log_min_stride_sz_sq[0x5];\n+\tu8 hairpin[0x1];\n+\tu8 reserved_at_3c1[0x2];\n+\tu8 log_max_hairpin_queues[0x5];\n+\tu8 reserved_at_3c8[0x3];\n+\tu8 log_max_hairpin_wq_data_sz[0x5];\n+\tu8 reserved_at_3d0[0x3];\n+\tu8 log_max_hairpin_num_packets[0x5];\n+\tu8 reserved_at_3d8[0x3];\n+\tu8 log_max_wq_sz[0x5];\n+\tu8 nic_vport_change_event[0x1];\n+\tu8 disable_local_lb_uc[0x1];\n+\tu8 disable_local_lb_mc[0x1];\n+\tu8 log_min_hairpin_wq_data_sz[0x5];\n+\tu8 reserved_at_3e8[0x3];\n+\tu8 log_max_vlan_list[0x5];\n+\tu8 reserved_at_3f0[0x3];\n+\tu8 log_max_current_mc_list[0x5];\n+\tu8 reserved_at_3f8[0x3];\n+\tu8 log_max_current_uc_list[0x5];\n+\tu8 general_obj_types[0x40];\n+\tu8 reserved_at_440[0x20];\n+\tu8 reserved_at_460[0x10];\n+\tu8 max_num_eqs[0x10];\n+\tu8 reserved_at_480[0x3];\n+\tu8 log_max_l2_table[0x5];\n+\tu8 reserved_at_488[0x8];\n+\tu8 log_uar_page_sz[0x10];\n+\tu8 reserved_at_4a0[0x20];\n+\tu8 device_frequency_mhz[0x20];\n+\tu8 device_frequency_khz[0x20];\n+\tu8 reserved_at_500[0x20];\n+\tu8 num_of_uars_per_page[0x20];\n+\tu8 flex_parser_protocols[0x20];\n+\tu8 reserved_at_560[0x20];\n+\tu8 reserved_at_580[0x3c];\n+\tu8 mini_cqe_resp_stride_index[0x1];\n+\tu8 cqe_128_always[0x1];\n+\tu8 cqe_compression_128[0x1];\n+\tu8 cqe_compression[0x1];\n+\tu8 cqe_compression_timeout[0x10];\n+\tu8 cqe_compression_max_num[0x10];\n+\tu8 reserved_at_5e0[0x10];\n+\tu8 tag_matching[0x1];\n+\tu8 rndv_offload_rc[0x1];\n+\tu8 rndv_offload_dc[0x1];\n+\tu8 log_tag_matching_list_sz[0x5];\n+\tu8 reserved_at_5f8[0x3];\n+\tu8 log_max_xrq[0x5];\n+\tu8 affiliate_nic_vport_criteria[0x8];\n+\tu8 native_port_num[0x8];\n+\tu8 num_vhca_ports[0x8];\n+\tu8 reserved_at_618[0x6];\n+\tu8 sw_owner_id[0x1];\n+\tu8 reserved_at_61f[0x1e1];\n+};\n+\n+struct mlx5_ifc_qos_cap_bits {\n+\tu8 packet_pacing[0x1];\n+\tu8 esw_scheduling[0x1];\n+\tu8 esw_bw_share[0x1];\n+\tu8 esw_rate_limit[0x1];\n+\tu8 reserved_at_4[0x1];\n+\tu8 packet_pacing_burst_bound[0x1];\n+\tu8 packet_pacing_typical_size[0x1];\n+\tu8 flow_meter_srtcm[0x1];\n+\tu8 reserved_at_8[0x8];\n+\tu8 log_max_flow_meter[0x8];\n+\tu8 flow_meter_reg_id[0x8];\n+\tu8 reserved_at_25[0x20];\n+\tu8 packet_pacing_max_rate[0x20];\n+\tu8 packet_pacing_min_rate[0x20];\n+\tu8 reserved_at_80[0x10];\n+\tu8 packet_pacing_rate_table_size[0x10];\n+\tu8 esw_element_type[0x10];\n+\tu8 esw_tsar_type[0x10];\n+\tu8 reserved_at_c0[0x10];\n+\tu8 max_qos_para_vport[0x10];\n+\tu8 max_tsar_bw_share[0x20];\n+\tu8 reserved_at_100[0x6e8];\n+};\n+\n+struct mlx5_ifc_per_protocol_networking_offload_caps_bits {\n+\tu8 csum_cap[0x1];\n+\tu8 vlan_cap[0x1];\n+\tu8 lro_cap[0x1];\n+\tu8 lro_psh_flag[0x1];\n+\tu8 lro_time_stamp[0x1];\n+\tu8 lro_max_msg_sz_mode[0x2];\n+\tu8 wqe_vlan_insert[0x1];\n+\tu8 self_lb_en_modifiable[0x1];\n+\tu8 self_lb_mc[0x1];\n+\tu8 self_lb_uc[0x1];\n+\tu8 max_lso_cap[0x5];\n+\tu8 multi_pkt_send_wqe[0x2];\n+\tu8 wqe_inline_mode[0x2];\n+\tu8 rss_ind_tbl_cap[0x4];\n+\tu8 reg_umr_sq[0x1];\n+\tu8 scatter_fcs[0x1];\n+\tu8 enhanced_multi_pkt_send_wqe[0x1];\n+\tu8 tunnel_lso_const_out_ip_id[0x1];\n+\tu8 tunnel_lro_gre[0x1];\n+\tu8 tunnel_lro_vxlan[0x1];\n+\tu8 tunnel_stateless_gre[0x1];\n+\tu8 tunnel_stateless_vxlan[0x1];\n+\tu8 swp[0x1];\n+\tu8 swp_csum[0x1];\n+\tu8 swp_lso[0x1];\n+\tu8 reserved_at_23[0x8];\n+\tu8 tunnel_stateless_gtp[0x1];\n+\tu8 reserved_at_25[0x4];\n+\tu8 max_vxlan_udp_ports[0x8];\n+\tu8 reserved_at_38[0x6];\n+\tu8 max_geneve_opt_len[0x1];\n+\tu8 tunnel_stateless_geneve_rx[0x1];\n+\tu8 reserved_at_40[0x10];\n+\tu8 lro_min_mss_size[0x10];\n+\tu8 reserved_at_60[0x120];\n+\tu8 lro_timer_supported_periods[4][0x20];\n+\tu8 reserved_at_200[0x600];\n+};\n+\n+union mlx5_ifc_hca_cap_union_bits {\n+\tstruct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;\n+\tstruct mlx5_ifc_per_protocol_networking_offload_caps_bits\n+\t       per_protocol_networking_offload_caps;\n+\tstruct mlx5_ifc_qos_cap_bits qos_cap;\n+\tu8 reserved_at_0[0x8000];\n+};\n+\n+struct mlx5_ifc_query_hca_cap_out_bits {\n+\tu8 status[0x8];\n+\tu8 reserved_at_8[0x18];\n+\tu8 syndrome[0x20];\n+\tu8 reserved_at_40[0x40];\n+\tunion mlx5_ifc_hca_cap_union_bits capability;\n+};\n+\n+struct mlx5_ifc_query_hca_cap_in_bits {\n+\tu8 opcode[0x10];\n+\tu8 reserved_at_10[0x10];\n+\tu8 reserved_at_20[0x10];\n+\tu8 op_mod[0x10];\n+\tu8 reserved_at_40[0x40];\n+};\n+\n+struct mlx5_ifc_mac_address_layout_bits {\n+\tu8 reserved_at_0[0x10];\n+\tu8 mac_addr_47_32[0x10];\n+\tu8 mac_addr_31_0[0x20];\n+};\n+\n+struct mlx5_ifc_nic_vport_context_bits {\n+\tu8 reserved_at_0[0x5];\n+\tu8 min_wqe_inline_mode[0x3];\n+\tu8 reserved_at_8[0x15];\n+\tu8 disable_mc_local_lb[0x1];\n+\tu8 disable_uc_local_lb[0x1];\n+\tu8 roce_en[0x1];\n+\tu8 arm_change_event[0x1];\n+\tu8 reserved_at_21[0x1a];\n+\tu8 event_on_mtu[0x1];\n+\tu8 event_on_promisc_change[0x1];\n+\tu8 event_on_vlan_change[0x1];\n+\tu8 event_on_mc_address_change[0x1];\n+\tu8 event_on_uc_address_change[0x1];\n+\tu8 reserved_at_40[0xc];\n+\tu8 affiliation_criteria[0x4];\n+\tu8 affiliated_vhca_id[0x10];\n+\tu8 reserved_at_60[0xd0];\n+\tu8 mtu[0x10];\n+\tu8 system_image_guid[0x40];\n+\tu8 port_guid[0x40];\n+\tu8 node_guid[0x40];\n+\tu8 reserved_at_200[0x140];\n+\tu8 qkey_violation_counter[0x10];\n+\tu8 reserved_at_350[0x430];\n+\tu8 promisc_uc[0x1];\n+\tu8 promisc_mc[0x1];\n+\tu8 promisc_all[0x1];\n+\tu8 reserved_at_783[0x2];\n+\tu8 allowed_list_type[0x3];\n+\tu8 reserved_at_788[0xc];\n+\tu8 allowed_list_size[0xc];\n+\tstruct mlx5_ifc_mac_address_layout_bits permanent_address;\n+\tu8 reserved_at_7e0[0x20];\n+};\n+\n+struct mlx5_ifc_query_nic_vport_context_out_bits {\n+\tu8 status[0x8];\n+\tu8 reserved_at_8[0x18];\n+\tu8 syndrome[0x20];\n+\tu8 reserved_at_40[0x40];\n+\tstruct mlx5_ifc_nic_vport_context_bits nic_vport_context;\n+};\n+\n+struct mlx5_ifc_query_nic_vport_context_in_bits {\n+\tu8 opcode[0x10];\n+\tu8 reserved_at_10[0x10];\n+\tu8 reserved_at_20[0x10];\n+\tu8 op_mod[0x10];\n+\tu8 other_vport[0x1];\n+\tu8 reserved_at_41[0xf];\n+\tu8 vport_number[0x10];\n+\tu8 reserved_at_60[0x5];\n+\tu8 allowed_list_type[0x3];\n+\tu8 reserved_at_68[0x18];\n+};\n+\n+struct mlx5_ifc_tisc_bits {\n+\tu8 strict_lag_tx_port_affinity[0x1];\n+\tu8 reserved_at_1[0x3];\n+\tu8 lag_tx_port_affinity[0x04];\n+\tu8 reserved_at_8[0x4];\n+\tu8 prio[0x4];\n+\tu8 reserved_at_10[0x10];\n+\tu8 reserved_at_20[0x100];\n+\tu8 reserved_at_120[0x8];\n+\tu8 transport_domain[0x18];\n+\tu8 reserved_at_140[0x8];\n+\tu8 underlay_qpn[0x18];\n+\tu8 reserved_at_160[0x3a0];\n+};\n+\n+struct mlx5_ifc_query_tis_out_bits {\n+\tu8 status[0x8];\n+\tu8 reserved_at_8[0x18];\n+\tu8 syndrome[0x20];\n+\tu8 reserved_at_40[0x40];\n+\tstruct mlx5_ifc_tisc_bits tis_context;\n+};\n+\n+struct mlx5_ifc_query_tis_in_bits {\n+\tu8 opcode[0x10];\n+\tu8 reserved_at_10[0x10];\n+\tu8 reserved_at_20[0x10];\n+\tu8 op_mod[0x10];\n+\tu8 reserved_at_40[0x8];\n+\tu8 tisn[0x18];\n+\tu8 reserved_at_60[0x20];\n+};\n+\n+struct mlx5_ifc_alloc_transport_domain_out_bits {\n+\tu8 status[0x8];\n+\tu8 reserved_at_8[0x18];\n+\tu8 syndrome[0x20];\n+\tu8 reserved_at_40[0x8];\n+\tu8 transport_domain[0x18];\n+\tu8 reserved_at_60[0x20];\n+};\n+\n+struct mlx5_ifc_alloc_transport_domain_in_bits {\n+\tu8 opcode[0x10];\n+\tu8 reserved_at_10[0x10];\n+\tu8 reserved_at_20[0x10];\n+\tu8 op_mod[0x10];\n+\tu8 reserved_at_40[0x40];\n+};\n+\n+enum {\n+\tMLX5_WQ_TYPE_LINKED_LIST                = 0x0,\n+\tMLX5_WQ_TYPE_CYCLIC                     = 0x1,\n+\tMLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ    = 0x2,\n+\tMLX5_WQ_TYPE_CYCLIC_STRIDING_RQ         = 0x3,\n+};\n+\n+enum {\n+\tMLX5_WQ_END_PAD_MODE_NONE  = 0x0,\n+\tMLX5_WQ_END_PAD_MODE_ALIGN = 0x1,\n+};\n+\n+struct mlx5_ifc_wq_bits {\n+\tu8 wq_type[0x4];\n+\tu8 wq_signature[0x1];\n+\tu8 end_padding_mode[0x2];\n+\tu8 cd_slave[0x1];\n+\tu8 reserved_at_8[0x18];\n+\tu8 hds_skip_first_sge[0x1];\n+\tu8 log2_hds_buf_size[0x3];\n+\tu8 reserved_at_24[0x7];\n+\tu8 page_offset[0x5];\n+\tu8 lwm[0x10];\n+\tu8 reserved_at_40[0x8];\n+\tu8 pd[0x18];\n+\tu8 reserved_at_60[0x8];\n+\tu8 uar_page[0x18];\n+\tu8 dbr_addr[0x40];\n+\tu8 hw_counter[0x20];\n+\tu8 sw_counter[0x20];\n+\tu8 reserved_at_100[0xc];\n+\tu8 log_wq_stride[0x4];\n+\tu8 reserved_at_110[0x3];\n+\tu8 log_wq_pg_sz[0x5];\n+\tu8 reserved_at_118[0x3];\n+\tu8 log_wq_sz[0x5];\n+\tu8 dbr_umem_valid[0x1];\n+\tu8 wq_umem_valid[0x1];\n+\tu8 reserved_at_122[0x1];\n+\tu8 log_hairpin_num_packets[0x5];\n+\tu8 reserved_at_128[0x3];\n+\tu8 log_hairpin_data_sz[0x5];\n+\tu8 reserved_at_130[0x4];\n+\tu8 single_wqe_log_num_of_strides[0x4];\n+\tu8 two_byte_shift_en[0x1];\n+\tu8 reserved_at_139[0x4];\n+\tu8 single_stride_log_num_of_bytes[0x3];\n+\tu8 dbr_umem_id[0x20];\n+\tu8 wq_umem_id[0x20];\n+\tu8 wq_umem_offset[0x40];\n+\tu8 reserved_at_1c0[0x440];\n+};\n+\n+enum {\n+\tMLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE  = 0x0,\n+\tMLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP     = 0x1,\n+};\n+\n+enum {\n+\tMLX5_RQC_STATE_RST  = 0x0,\n+\tMLX5_RQC_STATE_RDY  = 0x1,\n+\tMLX5_RQC_STATE_ERR  = 0x3,\n+};\n+\n+struct mlx5_ifc_rqc_bits {\n+\tu8 rlky[0x1];\n+\tu8 delay_drop_en[0x1];\n+\tu8 scatter_fcs[0x1];\n+\tu8 vsd[0x1];\n+\tu8 mem_rq_type[0x4];\n+\tu8 state[0x4];\n+\tu8 reserved_at_c[0x1];\n+\tu8 flush_in_error_en[0x1];\n+\tu8 hairpin[0x1];\n+\tu8 reserved_at_f[0x11];\n+\tu8 reserved_at_20[0x8];\n+\tu8 user_index[0x18];\n+\tu8 reserved_at_40[0x8];\n+\tu8 cqn[0x18];\n+\tu8 counter_set_id[0x8];\n+\tu8 reserved_at_68[0x18];\n+\tu8 reserved_at_80[0x8];\n+\tu8 rmpn[0x18];\n+\tu8 reserved_at_a0[0x8];\n+\tu8 hairpin_peer_sq[0x18];\n+\tu8 reserved_at_c0[0x10];\n+\tu8 hairpin_peer_vhca[0x10];\n+\tu8 reserved_at_e0[0xa0];\n+\tstruct mlx5_ifc_wq_bits wq; /* Not used in LRO RQ. */\n+};\n+\n+struct mlx5_ifc_create_rq_out_bits {\n+\tu8 status[0x8];\n+\tu8 reserved_at_8[0x18];\n+\tu8 syndrome[0x20];\n+\tu8 reserved_at_40[0x8];\n+\tu8 rqn[0x18];\n+\tu8 reserved_at_60[0x20];\n+};\n+\n+struct mlx5_ifc_create_rq_in_bits {\n+\tu8 opcode[0x10];\n+\tu8 uid[0x10];\n+\tu8 reserved_at_20[0x10];\n+\tu8 op_mod[0x10];\n+\tu8 reserved_at_40[0xc0];\n+\tstruct mlx5_ifc_rqc_bits ctx;\n+};\n+\n+struct mlx5_ifc_modify_rq_out_bits {\n+\tu8 status[0x8];\n+\tu8 reserved_at_8[0x18];\n+\tu8 syndrome[0x20];\n+\tu8 reserved_at_40[0x40];\n+};\n+\n+struct mlx5_ifc_create_tis_out_bits {\n+\tu8 status[0x8];\n+\tu8 reserved_at_8[0x18];\n+\tu8 syndrome[0x20];\n+\tu8 reserved_at_40[0x8];\n+\tu8 tisn[0x18];\n+\tu8 reserved_at_60[0x20];\n+};\n+\n+struct mlx5_ifc_create_tis_in_bits {\n+\tu8 opcode[0x10];\n+\tu8 uid[0x10];\n+\tu8 reserved_at_20[0x10];\n+\tu8 op_mod[0x10];\n+\tu8 reserved_at_40[0xc0];\n+\tstruct mlx5_ifc_tisc_bits ctx;\n+};\n+\n+enum {\n+\tMLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM = 1ULL << 0,\n+\tMLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1,\n+\tMLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS = 1ULL << 2,\n+\tMLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID = 1ULL << 3,\n+};\n+\n+struct mlx5_ifc_modify_rq_in_bits {\n+\tu8 opcode[0x10];\n+\tu8 uid[0x10];\n+\tu8 reserved_at_20[0x10];\n+\tu8 op_mod[0x10];\n+\tu8 rq_state[0x4];\n+\tu8 reserved_at_44[0x4];\n+\tu8 rqn[0x18];\n+\tu8 reserved_at_60[0x20];\n+\tu8 modify_bitmask[0x40];\n+\tu8 reserved_at_c0[0x40];\n+\tstruct mlx5_ifc_rqc_bits ctx;\n+};\n+\n+enum {\n+\tMLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP     = 0x0,\n+\tMLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP     = 0x1,\n+\tMLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT   = 0x2,\n+\tMLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT   = 0x3,\n+\tMLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI  = 0x4,\n+};\n+\n+struct mlx5_ifc_rx_hash_field_select_bits {\n+\tu8 l3_prot_type[0x1];\n+\tu8 l4_prot_type[0x1];\n+\tu8 selected_fields[0x1e];\n+};\n+\n+enum {\n+\tMLX5_TIRC_DISP_TYPE_DIRECT    = 0x0,\n+\tMLX5_TIRC_DISP_TYPE_INDIRECT  = 0x1,\n+};\n+\n+enum {\n+\tMLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO  = 0x1,\n+\tMLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO  = 0x2,\n+};\n+\n+enum {\n+\tMLX5_RX_HASH_FN_NONE           = 0x0,\n+\tMLX5_RX_HASH_FN_INVERTED_XOR8  = 0x1,\n+\tMLX5_RX_HASH_FN_TOEPLITZ       = 0x2,\n+};\n+\n+enum {\n+\tMLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST    = 0x1,\n+\tMLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST  = 0x2,\n+};\n+\n+enum {\n+\tMLX5_LRO_MAX_MSG_SIZE_START_FROM_L4    = 0x0,\n+\tMLX5_LRO_MAX_MSG_SIZE_START_FROM_L2  = 0x1,\n+};\n+\n+struct mlx5_ifc_tirc_bits {\n+\tu8 reserved_at_0[0x20];\n+\tu8 disp_type[0x4];\n+\tu8 reserved_at_24[0x1c];\n+\tu8 reserved_at_40[0x40];\n+\tu8 reserved_at_80[0x4];\n+\tu8 lro_timeout_period_usecs[0x10];\n+\tu8 lro_enable_mask[0x4];\n+\tu8 lro_max_msg_sz[0x8];\n+\tu8 reserved_at_a0[0x40];\n+\tu8 reserved_at_e0[0x8];\n+\tu8 inline_rqn[0x18];\n+\tu8 rx_hash_symmetric[0x1];\n+\tu8 reserved_at_101[0x1];\n+\tu8 tunneled_offload_en[0x1];\n+\tu8 reserved_at_103[0x5];\n+\tu8 indirect_table[0x18];\n+\tu8 rx_hash_fn[0x4];\n+\tu8 reserved_at_124[0x2];\n+\tu8 self_lb_block[0x2];\n+\tu8 transport_domain[0x18];\n+\tu8 rx_hash_toeplitz_key[10][0x20];\n+\tstruct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer;\n+\tstruct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner;\n+\tu8 reserved_at_2c0[0x4c0];\n+};\n+\n+struct mlx5_ifc_create_tir_out_bits {\n+\tu8 status[0x8];\n+\tu8 reserved_at_8[0x18];\n+\tu8 syndrome[0x20];\n+\tu8 reserved_at_40[0x8];\n+\tu8 tirn[0x18];\n+\tu8 reserved_at_60[0x20];\n+};\n+\n+struct mlx5_ifc_create_tir_in_bits {\n+\tu8 opcode[0x10];\n+\tu8 uid[0x10];\n+\tu8 reserved_at_20[0x10];\n+\tu8 op_mod[0x10];\n+\tu8 reserved_at_40[0xc0];\n+\tstruct mlx5_ifc_tirc_bits ctx;\n+};\n+\n+struct mlx5_ifc_rq_num_bits {\n+\tu8 reserved_at_0[0x8];\n+\tu8 rq_num[0x18];\n+};\n+\n+struct mlx5_ifc_rqtc_bits {\n+\tu8 reserved_at_0[0xa0];\n+\tu8 reserved_at_a0[0x10];\n+\tu8 rqt_max_size[0x10];\n+\tu8 reserved_at_c0[0x10];\n+\tu8 rqt_actual_size[0x10];\n+\tu8 reserved_at_e0[0x6a0];\n+\tstruct mlx5_ifc_rq_num_bits rq_num[];\n+};\n+\n+struct mlx5_ifc_create_rqt_out_bits {\n+\tu8 status[0x8];\n+\tu8 reserved_at_8[0x18];\n+\tu8 syndrome[0x20];\n+\tu8 reserved_at_40[0x8];\n+\tu8 rqtn[0x18];\n+\tu8 reserved_at_60[0x20];\n+};\n+\n+#ifdef PEDANTIC\n+#pragma GCC diagnostic ignored \"-Wpedantic\"\n+#endif\n+struct mlx5_ifc_create_rqt_in_bits {\n+\tu8 opcode[0x10];\n+\tu8 uid[0x10];\n+\tu8 reserved_at_20[0x10];\n+\tu8 op_mod[0x10];\n+\tu8 reserved_at_40[0xc0];\n+\tstruct mlx5_ifc_rqtc_bits rqt_context;\n+};\n+#ifdef PEDANTIC\n+#pragma GCC diagnostic error \"-Wpedantic\"\n+#endif\n+\n+enum {\n+\tMLX5_SQC_STATE_RST  = 0x0,\n+\tMLX5_SQC_STATE_RDY  = 0x1,\n+\tMLX5_SQC_STATE_ERR  = 0x3,\n+};\n+\n+struct mlx5_ifc_sqc_bits {\n+\tu8 rlky[0x1];\n+\tu8 cd_master[0x1];\n+\tu8 fre[0x1];\n+\tu8 flush_in_error_en[0x1];\n+\tu8 allow_multi_pkt_send_wqe[0x1];\n+\tu8 min_wqe_inline_mode[0x3];\n+\tu8 state[0x4];\n+\tu8 reg_umr[0x1];\n+\tu8 allow_swp[0x1];\n+\tu8 hairpin[0x1];\n+\tu8 reserved_at_f[0x11];\n+\tu8 reserved_at_20[0x8];\n+\tu8 user_index[0x18];\n+\tu8 reserved_at_40[0x8];\n+\tu8 cqn[0x18];\n+\tu8 reserved_at_60[0x8];\n+\tu8 hairpin_peer_rq[0x18];\n+\tu8 reserved_at_80[0x10];\n+\tu8 hairpin_peer_vhca[0x10];\n+\tu8 reserved_at_a0[0x50];\n+\tu8 packet_pacing_rate_limit_index[0x10];\n+\tu8 tis_lst_sz[0x10];\n+\tu8 reserved_at_110[0x10];\n+\tu8 reserved_at_120[0x40];\n+\tu8 reserved_at_160[0x8];\n+\tu8 tis_num_0[0x18];\n+\tstruct mlx5_ifc_wq_bits wq;\n+};\n+\n+struct mlx5_ifc_query_sq_in_bits {\n+\tu8 opcode[0x10];\n+\tu8 reserved_at_10[0x10];\n+\tu8 reserved_at_20[0x10];\n+\tu8 op_mod[0x10];\n+\tu8 reserved_at_40[0x8];\n+\tu8 sqn[0x18];\n+\tu8 reserved_at_60[0x20];\n+};\n+\n+struct mlx5_ifc_modify_sq_out_bits {\n+\tu8 status[0x8];\n+\tu8 reserved_at_8[0x18];\n+\tu8 syndrome[0x20];\n+\tu8 reserved_at_40[0x40];\n+};\n+\n+struct mlx5_ifc_modify_sq_in_bits {\n+\tu8 opcode[0x10];\n+\tu8 uid[0x10];\n+\tu8 reserved_at_20[0x10];\n+\tu8 op_mod[0x10];\n+\tu8 sq_state[0x4];\n+\tu8 reserved_at_44[0x4];\n+\tu8 sqn[0x18];\n+\tu8 reserved_at_60[0x20];\n+\tu8 modify_bitmask[0x40];\n+\tu8 reserved_at_c0[0x40];\n+\tstruct mlx5_ifc_sqc_bits ctx;\n+};\n+\n+struct mlx5_ifc_create_sq_out_bits {\n+\tu8 status[0x8];\n+\tu8 reserved_at_8[0x18];\n+\tu8 syndrome[0x20];\n+\tu8 reserved_at_40[0x8];\n+\tu8 sqn[0x18];\n+\tu8 reserved_at_60[0x20];\n+};\n+\n+struct mlx5_ifc_create_sq_in_bits {\n+\tu8 opcode[0x10];\n+\tu8 uid[0x10];\n+\tu8 reserved_at_20[0x10];\n+\tu8 op_mod[0x10];\n+\tu8 reserved_at_40[0xc0];\n+\tstruct mlx5_ifc_sqc_bits ctx;\n+};\n+\n+enum {\n+\tMLX5_FLOW_METER_OBJ_MODIFY_FIELD_ACTIVE = (1ULL << 0),\n+\tMLX5_FLOW_METER_OBJ_MODIFY_FIELD_CBS = (1ULL << 1),\n+\tMLX5_FLOW_METER_OBJ_MODIFY_FIELD_CIR = (1ULL << 2),\n+\tMLX5_FLOW_METER_OBJ_MODIFY_FIELD_EBS = (1ULL << 3),\n+\tMLX5_FLOW_METER_OBJ_MODIFY_FIELD_EIR = (1ULL << 4),\n+};\n+\n+struct mlx5_ifc_flow_meter_parameters_bits {\n+\tu8         valid[0x1];\t\t\t// 00h\n+\tu8         bucket_overflow[0x1];\n+\tu8         start_color[0x2];\n+\tu8         both_buckets_on_green[0x1];\n+\tu8         meter_mode[0x2];\n+\tu8         reserved_at_1[0x19];\n+\tu8         reserved_at_2[0x20]; //04h\n+\tu8         reserved_at_3[0x3];\n+\tu8         cbs_exponent[0x5];\t\t// 08h\n+\tu8         cbs_mantissa[0x8];\n+\tu8         reserved_at_4[0x3];\n+\tu8         cir_exponent[0x5];\n+\tu8         cir_mantissa[0x8];\n+\tu8         reserved_at_5[0x20];\t\t// 0Ch\n+\tu8         reserved_at_6[0x3];\n+\tu8         ebs_exponent[0x5];\t\t// 10h\n+\tu8         ebs_mantissa[0x8];\n+\tu8         reserved_at_7[0x3];\n+\tu8         eir_exponent[0x5];\n+\tu8         eir_mantissa[0x8];\n+\tu8         reserved_at_8[0x60];\t\t// 14h-1Ch\n+};\n+\n+/* CQE format mask. */\n+#define MLX5E_CQE_FORMAT_MASK 0xc\n+\n+/* MPW opcode. */\n+#define MLX5_OPC_MOD_MPW 0x01\n+\n+/* Compressed Rx CQE structure. */\n+struct mlx5_mini_cqe8 {\n+\tunion {\n+\t\tuint32_t rx_hash_result;\n+\t\tstruct {\n+\t\t\tuint16_t checksum;\n+\t\t\tuint16_t stride_idx;\n+\t\t};\n+\t\tstruct {\n+\t\t\tuint16_t wqe_counter;\n+\t\t\tuint8_t  s_wqe_opcode;\n+\t\t\tuint8_t  reserved;\n+\t\t} s_wqe_info;\n+\t};\n+\tuint32_t byte_cnt;\n+};\n+\n+/* srTCM PRM flow meter parameters. */\n+enum {\n+\tMLX5_FLOW_COLOR_RED = 0,\n+\tMLX5_FLOW_COLOR_YELLOW,\n+\tMLX5_FLOW_COLOR_GREEN,\n+\tMLX5_FLOW_COLOR_UNDEFINED,\n+};\n+\n+/* Maximum value of srTCM metering parameters. */\n+#define MLX5_SRTCM_CBS_MAX (0xFF * (1ULL << 0x1F))\n+#define MLX5_SRTCM_CIR_MAX (8 * (1ULL << 30) * 0xFF)\n+#define MLX5_SRTCM_EBS_MAX 0\n+\n+/**\n+ * Convert a user mark to flow mark.\n+ *\n+ * @param val\n+ *   Mark value to convert.\n+ *\n+ * @return\n+ *   Converted mark value.\n+ */\n+static inline uint32_t\n+mlx5_flow_mark_set(uint32_t val)\n+{\n+\tuint32_t ret;\n+\n+\t/*\n+\t * Add one to the user value to differentiate un-marked flows from\n+\t * marked flows, if the ID is equal to MLX5_FLOW_MARK_DEFAULT it\n+\t * remains untouched.\n+\t */\n+\tif (val != MLX5_FLOW_MARK_DEFAULT)\n+\t\t++val;\n+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN\n+\t/*\n+\t * Mark is 24 bits (minus reserved values) but is stored on a 32 bit\n+\t * word, byte-swapped by the kernel on little-endian systems. In this\n+\t * case, left-shifting the resulting big-endian value ensures the\n+\t * least significant 24 bits are retained when converting it back.\n+\t */\n+\tret = rte_cpu_to_be_32(val) >> 8;\n+#else\n+\tret = val;\n+#endif\n+\treturn ret;\n+}\n+\n+/**\n+ * Convert a mark to user mark.\n+ *\n+ * @param val\n+ *   Mark value to convert.\n+ *\n+ * @return\n+ *   Converted mark value.\n+ */\n+static inline uint32_t\n+mlx5_flow_mark_get(uint32_t val)\n+{\n+\t/*\n+\t * Subtract one from the retrieved value. It was added by\n+\t * mlx5_flow_mark_set() to distinguish unmarked flows.\n+\t */\n+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN\n+\treturn (val >> 8) - 1;\n+#else\n+\treturn val - 1;\n+#endif\n+}\n+\n+#endif /* RTE_PMD_MLX5_PRM_H_ */\ndiff --git a/drivers/common/mlx5/rte_common_mlx5_version.map b/drivers/common/mlx5/rte_common_mlx5_version.map\nnew file mode 100644\nindex 0000000..e4f85e2\n--- /dev/null\n+++ b/drivers/common/mlx5/rte_common_mlx5_version.map\n@@ -0,0 +1,20 @@\n+DPDK_20.02 {\n+\tglobal:\n+\n+\tmlx5_devx_cmd_create_rq;\n+\tmlx5_devx_cmd_create_rqt;\n+\tmlx5_devx_cmd_create_sq;\n+\tmlx5_devx_cmd_create_tir;\n+\tmlx5_devx_cmd_create_td;\n+\tmlx5_devx_cmd_create_tis;\n+\tmlx5_devx_cmd_destroy;\n+\tmlx5_devx_cmd_flow_counter_alloc;\n+\tmlx5_devx_cmd_flow_counter_query;\n+\tmlx5_devx_cmd_flow_dump;\n+\tmlx5_devx_cmd_mkey_create;\n+\tmlx5_devx_cmd_modify_rq;\n+\tmlx5_devx_cmd_modify_sq;\n+\tmlx5_devx_cmd_qp_query_tis_td;\n+\tmlx5_devx_cmd_query_hca_attr;\n+\tmlx5_devx_get_out_command_status;\n+};\ndiff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile\nindex 0466d9d..88ce197 100644\n--- a/drivers/net/mlx5/Makefile\n+++ b/drivers/net/mlx5/Makefile\n@@ -12,9 +12,6 @@ LIB_GLUE_VERSION = 20.02.0\n \n # Sources.\n SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5.c\n-ifneq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y)\n-SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_glue.c\n-endif\n SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxq.c\n SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_txq.c\n SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxtx.c\n@@ -37,34 +34,22 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow_dv.c\n SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow_verbs.c\n SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mp.c\n SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl.c\n-SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_devx_cmds.c\n SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_utils.c\n SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_socket.c\n \n-ifeq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y)\n-INSTALL-$(CONFIG_RTE_LIBRTE_MLX5_PMD)-lib += $(LIB_GLUE)\n-endif\n-\n # Basic CFLAGS.\n CFLAGS += -O3\n CFLAGS += -std=c11 -Wall -Wextra\n CFLAGS += -g\n-CFLAGS += -I.\n+CFLAGS += -I$(RTE_SDK)/drivers/common/mlx5\n+CFLAGS += -I$(RTE_SDK)/drivers/net/mlx5\n+CFLAGS += -I$(BUILDDIR)/drivers/common/mlx5\n CFLAGS += -D_BSD_SOURCE\n CFLAGS += -D_DEFAULT_SOURCE\n CFLAGS += -D_XOPEN_SOURCE=600\n CFLAGS += $(WERROR_FLAGS)\n CFLAGS += -Wno-strict-prototypes\n-ifeq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y)\n-CFLAGS += -DMLX5_GLUE='\"$(LIB_GLUE)\"'\n-CFLAGS += -DMLX5_GLUE_VERSION='\"$(LIB_GLUE_VERSION)\"'\n-CFLAGS_mlx5_glue.o += -fPIC\n-LDLIBS += -ldl\n-else ifeq ($(CONFIG_RTE_IBVERBS_LINK_STATIC),y)\n-LDLIBS += $(shell $(RTE_SDK)/buildtools/options-ibverbs-static.sh)\n-else\n-LDLIBS += -libverbs -lmlx5\n-endif\n+LDLIBS += -lrte_common_mlx5\n LDLIBS += -lm\n LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring\n LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs\n@@ -74,6 +59,7 @@ LDLIBS += -lrte_bus_pci\n CFLAGS += -Wno-error=cast-qual\n \n EXPORT_MAP := rte_pmd_mlx5_version.map\n+\n # memseg walk is not part of stable API\n CFLAGS += -DALLOW_EXPERIMENTAL_API\n \n@@ -96,282 +82,3 @@ endif\n \n include $(RTE_SDK)/mk/rte.lib.mk\n \n-# Generate and clean-up mlx5_autoconf.h.\n-\n-export CC CFLAGS CPPFLAGS EXTRA_CFLAGS EXTRA_CPPFLAGS\n-export AUTO_CONFIG_CFLAGS += -Wno-error\n-\n-ifndef V\n-AUTOCONF_OUTPUT := >/dev/null\n-endif\n-\n-mlx5_autoconf.h.new: FORCE\n-\n-mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh\n-\t$Q $(RM) -f -- '$@'\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT \\\n-\t\tinfiniband/mlx5dv.h \\\n-\t\tenum MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_IBV_DEVICE_TUNNEL_SUPPORT \\\n-\t\tinfiniband/mlx5dv.h \\\n-\t\tenum MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_IBV_DEVICE_MPLS_SUPPORT \\\n-\t\tinfiniband/verbs.h \\\n-\t\tenum IBV_FLOW_SPEC_MPLS \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING \\\n-\t\tinfiniband/verbs.h \\\n-\t\tenum IBV_WQ_FLAGS_PCI_WRITE_END_PADDING \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_IBV_WQ_FLAG_RX_END_PADDING \\\n-\t\tinfiniband/verbs.h \\\n-\t\tenum IBV_WQ_FLAG_RX_END_PADDING \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_IBV_MLX5_MOD_SWP \\\n-\t\tinfiniband/mlx5dv.h \\\n-\t\ttype 'struct mlx5dv_sw_parsing_caps' \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_IBV_MLX5_MOD_MPW \\\n-\t\tinfiniband/mlx5dv.h \\\n-\t\tenum MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_IBV_MLX5_MOD_CQE_128B_COMP \\\n-\t\tinfiniband/mlx5dv.h \\\n-\t\tenum MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_IBV_MLX5_MOD_CQE_128B_PAD \\\n-\t\tinfiniband/mlx5dv.h \\\n-\t\tenum MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_IBV_FLOW_DV_SUPPORT \\\n-\t\tinfiniband/mlx5dv.h \\\n-\t\tfunc mlx5dv_create_flow_action_packet_reformat \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_MLX5DV_DR \\\n-\t\tinfiniband/mlx5dv.h \\\n-\t\tenum MLX5DV_DR_DOMAIN_TYPE_NIC_RX \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_MLX5DV_DR_ESWITCH \\\n-\t\tinfiniband/mlx5dv.h \\\n-\t\tenum MLX5DV_DR_DOMAIN_TYPE_FDB \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_MLX5DV_DR_VLAN \\\n-\t\tinfiniband/mlx5dv.h \\\n-\t\tfunc mlx5dv_dr_action_create_push_vlan \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_MLX5DV_DR_DEVX_PORT \\\n-\t\tinfiniband/mlx5dv.h \\\n-\t\tfunc mlx5dv_query_devx_port \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_IBV_DEVX_OBJ \\\n-\t\tinfiniband/mlx5dv.h \\\n-\t\tfunc mlx5dv_devx_obj_create \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_IBV_FLOW_DEVX_COUNTERS \\\n-\t\tinfiniband/mlx5dv.h \\\n-\t\tenum MLX5DV_FLOW_ACTION_COUNTERS_DEVX \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_IBV_DEVX_ASYNC \\\n-\t\tinfiniband/mlx5dv.h \\\n-\t\tfunc mlx5dv_devx_obj_query_async \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR \\\n-\t\tinfiniband/mlx5dv.h \\\n-\t\tfunc mlx5dv_dr_action_create_dest_devx_tir \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_MLX5_DR_CREATE_ACTION_FLOW_METER \\\n-\t\tinfiniband/mlx5dv.h \\\n-\t\tfunc mlx5dv_dr_action_create_flow_meter \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_MLX5_DR_FLOW_DUMP \\\n-\t\tinfiniband/mlx5dv.h \\\n-\t\tfunc mlx5dv_dump_dr_domain \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_MLX5DV_MMAP_GET_NC_PAGES_CMD \\\n-\t\tinfiniband/mlx5dv.h \\\n-\t\tenum MLX5_MMAP_GET_NC_PAGES_CMD \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_ETHTOOL_LINK_MODE_25G \\\n-\t\t/usr/include/linux/ethtool.h \\\n-\t\tenum ETHTOOL_LINK_MODE_25000baseCR_Full_BIT \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_ETHTOOL_LINK_MODE_50G \\\n-\t\t/usr/include/linux/ethtool.h \\\n-\t\tenum ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_ETHTOOL_LINK_MODE_100G \\\n-\t\t/usr/include/linux/ethtool.h \\\n-\t\tenum ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_IBV_DEVICE_COUNTERS_SET_V42 \\\n-\t\tinfiniband/verbs.h \\\n-\t\ttype 'struct ibv_counter_set_init_attr' \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_IBV_DEVICE_COUNTERS_SET_V45 \\\n-\t\tinfiniband/verbs.h \\\n-\t\ttype 'struct ibv_counters_init_attr' \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_RDMA_NL_NLDEV \\\n-\t\trdma/rdma_netlink.h \\\n-\t\tenum RDMA_NL_NLDEV \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_RDMA_NLDEV_CMD_GET \\\n-\t\trdma/rdma_netlink.h \\\n-\t\tenum RDMA_NLDEV_CMD_GET \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_RDMA_NLDEV_CMD_PORT_GET \\\n-\t\trdma/rdma_netlink.h \\\n-\t\tenum RDMA_NLDEV_CMD_PORT_GET \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_RDMA_NLDEV_ATTR_DEV_INDEX \\\n-\t\trdma/rdma_netlink.h \\\n-\t\tenum RDMA_NLDEV_ATTR_DEV_INDEX \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_RDMA_NLDEV_ATTR_DEV_NAME \\\n-\t\trdma/rdma_netlink.h \\\n-\t\tenum RDMA_NLDEV_ATTR_DEV_NAME \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_RDMA_NLDEV_ATTR_PORT_INDEX \\\n-\t\trdma/rdma_netlink.h \\\n-\t\tenum RDMA_NLDEV_ATTR_PORT_INDEX \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_RDMA_NLDEV_ATTR_NDEV_INDEX \\\n-\t\trdma/rdma_netlink.h \\\n-\t\tenum RDMA_NLDEV_ATTR_NDEV_INDEX \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_IFLA_NUM_VF \\\n-\t\tlinux/if_link.h \\\n-\t\tenum IFLA_NUM_VF \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_IFLA_EXT_MASK \\\n-\t\tlinux/if_link.h \\\n-\t\tenum IFLA_EXT_MASK \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_IFLA_PHYS_SWITCH_ID \\\n-\t\tlinux/if_link.h \\\n-\t\tenum IFLA_PHYS_SWITCH_ID \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_IFLA_PHYS_PORT_NAME \\\n-\t\tlinux/if_link.h \\\n-\t\tenum IFLA_PHYS_PORT_NAME \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_SUPPORTED_40000baseKR4_Full \\\n-\t\t/usr/include/linux/ethtool.h \\\n-\t\tdefine SUPPORTED_40000baseKR4_Full \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_SUPPORTED_40000baseCR4_Full \\\n-\t\t/usr/include/linux/ethtool.h \\\n-\t\tdefine SUPPORTED_40000baseCR4_Full \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_SUPPORTED_40000baseSR4_Full \\\n-\t\t/usr/include/linux/ethtool.h \\\n-\t\tdefine SUPPORTED_40000baseSR4_Full \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_SUPPORTED_40000baseLR4_Full \\\n-\t\t/usr/include/linux/ethtool.h \\\n-\t\tdefine SUPPORTED_40000baseLR4_Full \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_SUPPORTED_56000baseKR4_Full \\\n-\t\t/usr/include/linux/ethtool.h \\\n-\t\tdefine SUPPORTED_56000baseKR4_Full \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_SUPPORTED_56000baseCR4_Full \\\n-\t\t/usr/include/linux/ethtool.h \\\n-\t\tdefine SUPPORTED_56000baseCR4_Full \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_SUPPORTED_56000baseSR4_Full \\\n-\t\t/usr/include/linux/ethtool.h \\\n-\t\tdefine SUPPORTED_56000baseSR4_Full \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_SUPPORTED_56000baseLR4_Full \\\n-\t\t/usr/include/linux/ethtool.h \\\n-\t\tdefine SUPPORTED_56000baseLR4_Full \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_STATIC_ASSERT \\\n-\t\t/usr/include/assert.h \\\n-\t\tdefine static_assert \\\n-\t\t$(AUTOCONF_OUTPUT)\n-\n-# Create mlx5_autoconf.h or update it in case it differs from the new one.\n-\n-mlx5_autoconf.h: mlx5_autoconf.h.new\n-\t$Q [ -f '$@' ] && \\\n-\t\tcmp '$<' '$@' $(AUTOCONF_OUTPUT) || \\\n-\t\tmv '$<' '$@'\n-\n-$(SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD):.c=.o): mlx5_autoconf.h\n-\n-# Generate dependency plug-in for rdma-core when the PMD must not be linked\n-# directly, so that applications do not inherit this dependency.\n-\n-ifeq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y)\n-\n-$(LIB): $(LIB_GLUE)\n-\n-ifeq ($(LINK_USING_CC),1)\n-GLUE_LDFLAGS := $(call linkerprefix,$(LDFLAGS))\n-else\n-GLUE_LDFLAGS := $(LDFLAGS)\n-endif\n-$(LIB_GLUE): mlx5_glue.o\n-\t$Q $(LD) $(GLUE_LDFLAGS) $(EXTRA_LDFLAGS) \\\n-\t\t-Wl,-h,$(LIB_GLUE) \\\n-\t\t-shared -o $@ $< -libverbs -lmlx5\n-\n-mlx5_glue.o: mlx5_autoconf.h\n-\n-endif\n-\n-clean_mlx5: FORCE\n-\t$Q rm -f -- mlx5_autoconf.h mlx5_autoconf.h.new\n-\t$Q rm -f -- mlx5_glue.o $(LIB_GLUE_BASE)*\n-\n-clean: clean_mlx5\ndiff --git a/drivers/net/mlx5/meson.build b/drivers/net/mlx5/meson.build\nindex 3ad4f02..f6d0db9 100644\n--- a/drivers/net/mlx5/meson.build\n+++ b/drivers/net/mlx5/meson.build\n@@ -7,224 +7,54 @@ if not is_linux\n \treason = 'only supported on Linux'\n \tsubdir_done()\n endif\n-build = true\n \n-pmd_dlopen = (get_option('ibverbs_link') == 'dlopen')\n LIB_GLUE_BASE = 'librte_pmd_mlx5_glue.so'\n LIB_GLUE_VERSION = '20.02.0'\n LIB_GLUE = LIB_GLUE_BASE + '.' + LIB_GLUE_VERSION\n-if pmd_dlopen\n-\tdpdk_conf.set('RTE_IBVERBS_LINK_DLOPEN', 1)\n-\tcflags += [\n-\t\t'-DMLX5_GLUE=\"@0@\"'.format(LIB_GLUE),\n-\t\t'-DMLX5_GLUE_VERSION=\"@0@\"'.format(LIB_GLUE_VERSION),\n-\t]\n-endif\n \n-libnames = [ 'mlx5', 'ibverbs' ]\n-libs = []\n-foreach libname:libnames\n-\tlib = dependency('lib' + libname, required:false)\n-\tif not lib.found()\n-\t\tlib = cc.find_library(libname, required:false)\n-\tendif\n-\tif lib.found()\n-\t\tlibs += [ lib ]\n-\telse\n-\t\tbuild = false\n-\t\treason = 'missing dependency, \"' + libname + '\"'\n+allow_experimental_apis = true\n+deps += ['hash', 'common_mlx5']\n+sources = files(\n+\t'mlx5.c',\n+\t'mlx5_ethdev.c',\n+\t'mlx5_flow.c',\n+\t'mlx5_flow_meter.c',\n+\t'mlx5_flow_dv.c',\n+\t'mlx5_flow_verbs.c',\n+\t'mlx5_mac.c',\n+\t'mlx5_mr.c',\n+\t'mlx5_nl.c',\n+\t'mlx5_rss.c',\n+\t'mlx5_rxmode.c',\n+\t'mlx5_rxq.c',\n+\t'mlx5_rxtx.c',\n+\t'mlx5_mp.c',\n+\t'mlx5_stats.c',\n+\t'mlx5_trigger.c',\n+\t'mlx5_txq.c',\n+\t'mlx5_vlan.c',\n+\t'mlx5_utils.c',\n+\t'mlx5_socket.c',\n+)\n+if (dpdk_conf.has('RTE_ARCH_X86_64')\n+\tor dpdk_conf.has('RTE_ARCH_ARM64')\n+\tor dpdk_conf.has('RTE_ARCH_PPC_64'))\n+\tsources += files('mlx5_rxtx_vec.c')\n+endif\n+cflags_options = [\n+\t'-std=c11',\n+\t'-Wno-strict-prototypes',\n+\t'-D_BSD_SOURCE',\n+\t'-D_DEFAULT_SOURCE',\n+\t'-D_XOPEN_SOURCE=600'\n+]\n+foreach option:cflags_options\n+\tif cc.has_argument(option)\n+\t\tcflags += option\n \tendif\n endforeach\n-\n-if build\n-\tallow_experimental_apis = true\n-\tdeps += ['hash']\n-\text_deps += libs\n-\tsources = files(\n-\t\t'mlx5.c',\n-\t\t'mlx5_ethdev.c',\n-\t\t'mlx5_flow.c',\n-\t\t'mlx5_flow_meter.c',\n-\t\t'mlx5_flow_dv.c',\n-\t\t'mlx5_flow_verbs.c',\n-\t\t'mlx5_mac.c',\n-\t\t'mlx5_mr.c',\n-\t\t'mlx5_nl.c',\n-\t\t'mlx5_rss.c',\n-\t\t'mlx5_rxmode.c',\n-\t\t'mlx5_rxq.c',\n-\t\t'mlx5_rxtx.c',\n-\t\t'mlx5_mp.c',\n-\t\t'mlx5_stats.c',\n-\t\t'mlx5_trigger.c',\n-\t\t'mlx5_txq.c',\n-\t\t'mlx5_vlan.c',\n-\t\t'mlx5_devx_cmds.c',\n-\t\t'mlx5_utils.c',\n-\t\t'mlx5_socket.c',\n-\t)\n-\tif (dpdk_conf.has('RTE_ARCH_X86_64')\n-\t\tor dpdk_conf.has('RTE_ARCH_ARM64')\n-\t\tor dpdk_conf.has('RTE_ARCH_PPC_64'))\n-\t\tsources += files('mlx5_rxtx_vec.c')\n-\tendif\n-\tif not pmd_dlopen\n-\t\tsources += files('mlx5_glue.c')\n-\tendif\n-\tcflags_options = [\n-\t\t'-std=c11',\n-\t\t'-Wno-strict-prototypes',\n-\t\t'-D_BSD_SOURCE',\n-\t\t'-D_DEFAULT_SOURCE',\n-\t\t'-D_XOPEN_SOURCE=600'\n-\t]\n-\tforeach option:cflags_options\n-\t\tif cc.has_argument(option)\n-\t\t\tcflags += option\n-\t\tendif\n-\tendforeach\n-\tif get_option('buildtype').contains('debug')\n-\t\tcflags += [ '-pedantic', '-UNDEBUG', '-DPEDANTIC' ]\n-\telse\n-\t\tcflags += [ '-DNDEBUG', '-UPEDANTIC' ]\n-\tendif\n-\t# To maintain the compatibility with the make build system\n-\t# mlx5_autoconf.h file is still generated.\n-\t# input array for meson member search:\n-\t# [ \"MACRO to define if found\", \"header for the search\",\n-\t#   \"symbol to search\", \"struct member to search\" ]\n-\thas_member_args = [\n-\t\t[ 'HAVE_IBV_MLX5_MOD_SWP', 'infiniband/mlx5dv.h',\n-\t\t'struct mlx5dv_sw_parsing_caps', 'sw_parsing_offloads' ],\n-\t\t[ 'HAVE_IBV_DEVICE_COUNTERS_SET_V42', 'infiniband/verbs.h',\n-\t\t'struct ibv_counter_set_init_attr', 'counter_set_id' ],\n-\t\t[ 'HAVE_IBV_DEVICE_COUNTERS_SET_V45', 'infiniband/verbs.h',\n-\t\t'struct ibv_counters_init_attr', 'comp_mask' ],\n-\t]\n-\t# input array for meson symbol search:\n-\t# [ \"MACRO to define if found\", \"header for the search\",\n-\t#   \"symbol to search\" ]\n-\thas_sym_args = [\n-\t\t[ 'HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT', 'infiniband/mlx5dv.h',\n-\t\t'MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX' ],\n-\t\t[ 'HAVE_IBV_DEVICE_TUNNEL_SUPPORT', 'infiniband/mlx5dv.h',\n-\t\t'MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS' ],\n-\t\t[ 'HAVE_IBV_MLX5_MOD_MPW', 'infiniband/mlx5dv.h',\n-\t\t'MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED' ],\n-\t\t[ 'HAVE_IBV_MLX5_MOD_CQE_128B_COMP', 'infiniband/mlx5dv.h',\n-\t\t'MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP' ],\n-\t\t[ 'HAVE_IBV_MLX5_MOD_CQE_128B_PAD', 'infiniband/mlx5dv.h',\n-\t\t'MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD' ],\n-\t\t[ 'HAVE_IBV_FLOW_DV_SUPPORT', 'infiniband/mlx5dv.h',\n-\t\t'mlx5dv_create_flow_action_packet_reformat' ],\n-\t\t[ 'HAVE_IBV_DEVICE_MPLS_SUPPORT', 'infiniband/verbs.h',\n-\t\t'IBV_FLOW_SPEC_MPLS' ],\n-\t\t[ 'HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING', 'infiniband/verbs.h',\n-\t\t'IBV_WQ_FLAGS_PCI_WRITE_END_PADDING' ],\n-\t\t[ 'HAVE_IBV_WQ_FLAG_RX_END_PADDING', 'infiniband/verbs.h',\n-\t\t'IBV_WQ_FLAG_RX_END_PADDING' ],\n-\t\t[ 'HAVE_MLX5DV_DR_DEVX_PORT', 'infiniband/mlx5dv.h',\n-\t\t'mlx5dv_query_devx_port' ],\n-\t\t[ 'HAVE_IBV_DEVX_OBJ', 'infiniband/mlx5dv.h',\n-\t\t'mlx5dv_devx_obj_create' ],\n-\t\t[ 'HAVE_IBV_FLOW_DEVX_COUNTERS', 'infiniband/mlx5dv.h',\n-\t\t'MLX5DV_FLOW_ACTION_COUNTERS_DEVX' ],\n-\t\t[ 'HAVE_IBV_DEVX_ASYNC', 'infiniband/mlx5dv.h',\n-\t\t'mlx5dv_devx_obj_query_async' ],\n-\t\t[ 'HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR', 'infiniband/mlx5dv.h',\n-\t\t'mlx5dv_dr_action_create_dest_devx_tir' ],\n-\t\t[ 'HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER', 'infiniband/mlx5dv.h',\n-\t\t'mlx5dv_dr_action_create_flow_meter' ],\n-\t\t[ 'HAVE_MLX5DV_MMAP_GET_NC_PAGES_CMD', 'infiniband/mlx5dv.h',\n-\t\t'MLX5_MMAP_GET_NC_PAGES_CMD' ],\n-\t\t[ 'HAVE_MLX5DV_DR', 'infiniband/mlx5dv.h',\n-\t\t'MLX5DV_DR_DOMAIN_TYPE_NIC_RX' ],\n-\t\t[ 'HAVE_MLX5DV_DR_ESWITCH', 'infiniband/mlx5dv.h',\n-\t\t'MLX5DV_DR_DOMAIN_TYPE_FDB' ],\n-\t\t[ 'HAVE_MLX5DV_DR_VLAN', 'infiniband/mlx5dv.h',\n-\t\t'mlx5dv_dr_action_create_push_vlan' ],\n-\t\t[ 'HAVE_SUPPORTED_40000baseKR4_Full', 'linux/ethtool.h',\n-\t\t'SUPPORTED_40000baseKR4_Full' ],\n-\t\t[ 'HAVE_SUPPORTED_40000baseCR4_Full', 'linux/ethtool.h',\n-\t\t'SUPPORTED_40000baseCR4_Full' ],\n-\t\t[ 'HAVE_SUPPORTED_40000baseSR4_Full', 'linux/ethtool.h',\n-\t\t'SUPPORTED_40000baseSR4_Full' ],\n-\t\t[ 'HAVE_SUPPORTED_40000baseLR4_Full', 'linux/ethtool.h',\n-\t\t'SUPPORTED_40000baseLR4_Full' ],\n-\t\t[ 'HAVE_SUPPORTED_56000baseKR4_Full', 'linux/ethtool.h',\n-\t\t'SUPPORTED_56000baseKR4_Full' ],\n-\t\t[ 'HAVE_SUPPORTED_56000baseCR4_Full', 'linux/ethtool.h',\n-\t\t'SUPPORTED_56000baseCR4_Full' ],\n-\t\t[ 'HAVE_SUPPORTED_56000baseSR4_Full', 'linux/ethtool.h',\n-\t\t'SUPPORTED_56000baseSR4_Full' ],\n-\t\t[ 'HAVE_SUPPORTED_56000baseLR4_Full', 'linux/ethtool.h',\n-\t\t'SUPPORTED_56000baseLR4_Full' ],\n-\t\t[ 'HAVE_ETHTOOL_LINK_MODE_25G', 'linux/ethtool.h',\n-\t\t'ETHTOOL_LINK_MODE_25000baseCR_Full_BIT' ],\n-\t\t[ 'HAVE_ETHTOOL_LINK_MODE_50G', 'linux/ethtool.h',\n-\t\t'ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT' ],\n-\t\t[ 'HAVE_ETHTOOL_LINK_MODE_100G', 'linux/ethtool.h',\n-\t\t'ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT' ],\n-\t\t[ 'HAVE_IFLA_NUM_VF', 'linux/if_link.h',\n-\t\t'IFLA_NUM_VF' ],\n-\t\t[ 'HAVE_IFLA_EXT_MASK', 'linux/if_link.h',\n-\t\t'IFLA_EXT_MASK' ],\n-\t\t[ 'HAVE_IFLA_PHYS_SWITCH_ID', 'linux/if_link.h',\n-\t\t'IFLA_PHYS_SWITCH_ID' ],\n-\t\t[ 'HAVE_IFLA_PHYS_PORT_NAME', 'linux/if_link.h',\n-\t\t'IFLA_PHYS_PORT_NAME' ],\n-\t\t[ 'HAVE_RDMA_NL_NLDEV', 'rdma/rdma_netlink.h',\n-\t\t'RDMA_NL_NLDEV' ],\n-\t\t[ 'HAVE_RDMA_NLDEV_CMD_GET', 'rdma/rdma_netlink.h',\n-\t\t'RDMA_NLDEV_CMD_GET' ],\n-\t\t[ 'HAVE_RDMA_NLDEV_CMD_PORT_GET', 'rdma/rdma_netlink.h',\n-\t\t'RDMA_NLDEV_CMD_PORT_GET' ],\n-\t\t[ 'HAVE_RDMA_NLDEV_ATTR_DEV_INDEX', 'rdma/rdma_netlink.h',\n-\t\t'RDMA_NLDEV_ATTR_DEV_INDEX' ],\n-\t\t[ 'HAVE_RDMA_NLDEV_ATTR_DEV_NAME', 'rdma/rdma_netlink.h',\n-\t\t'RDMA_NLDEV_ATTR_DEV_NAME' ],\n-\t\t[ 'HAVE_RDMA_NLDEV_ATTR_PORT_INDEX', 'rdma/rdma_netlink.h',\n-\t\t'RDMA_NLDEV_ATTR_PORT_INDEX' ],\n-\t\t[ 'HAVE_RDMA_NLDEV_ATTR_NDEV_INDEX', 'rdma/rdma_netlink.h',\n-\t\t'RDMA_NLDEV_ATTR_NDEV_INDEX' ],\n-\t\t[ 'HAVE_MLX5_DR_FLOW_DUMP', 'infiniband/mlx5dv.h',\n-\t\t'mlx5dv_dump_dr_domain'],\n-\t]\n-\tconfig = configuration_data()\n-\tforeach arg:has_sym_args\n-\t\tconfig.set(arg[0], cc.has_header_symbol(arg[1], arg[2],\n-\t\t\tdependencies: libs))\n-\tendforeach\n-\tforeach arg:has_member_args\n-\t\tfile_prefix = '#include <' + arg[1] + '>'\n-\t\tconfig.set(arg[0], cc.has_member(arg[2], arg[3],\n-\t\t\tprefix : file_prefix, dependencies: libs))\n-\tendforeach\n-\tconfigure_file(output : 'mlx5_autoconf.h', configuration : config)\n-endif\n-# Build Glue Library\n-if pmd_dlopen and build\n-\tdlopen_name = 'mlx5_glue'\n-\tdlopen_lib_name = driver_name_fmt.format(dlopen_name)\n-\tdlopen_so_version = LIB_GLUE_VERSION\n-\tdlopen_sources = files('mlx5_glue.c')\n-\tdlopen_install_dir = [ eal_pmd_path + '-glue' ]\n-\tdlopen_includes = [global_inc]\n-\tdlopen_includes += include_directories(\n-\t\t'../../../lib/librte_eal/common/include/generic',\n-\t)\n-\tshared_lib = shared_library(\n-\t\tdlopen_lib_name,\n-\t\tdlopen_sources,\n-\t\tinclude_directories: dlopen_includes,\n-\t\tc_args: cflags,\n-\t\tdependencies: libs,\n-\t\tlink_args: [\n-\t\t'-Wl,-export-dynamic',\n-\t\t'-Wl,-h,@0@'.format(LIB_GLUE),\n-\t\t],\n-\t\tsoversion: dlopen_so_version,\n-\t\tinstall: true,\n-\t\tinstall_dir: dlopen_install_dir,\n-\t)\n+if get_option('buildtype').contains('debug')\n+\tcflags += [ '-pedantic', '-UNDEBUG', '-DPEDANTIC' ]\n+else\n+\tcflags += [ '-DNDEBUG', '-UPEDANTIC' ]\n endif\ndiff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex 2f91e50..1cb8374 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -38,15 +38,16 @@\n #include <rte_string_fns.h>\n #include <rte_alarm.h>\n \n+#include <mlx5_glue.h>\n+#include <mlx5_devx_cmds.h>\n+\n+#include \"mlx5_defs.h\"\n #include \"mlx5.h\"\n #include \"mlx5_utils.h\"\n #include \"mlx5_rxtx.h\"\n #include \"mlx5_autoconf.h\"\n-#include \"mlx5_defs.h\"\n-#include \"mlx5_glue.h\"\n #include \"mlx5_mr.h\"\n #include \"mlx5_flow.h\"\n-#include \"mlx5_devx_cmds.h\"\n \n /* Device parameter to enable RX completion queue compression. */\n #define MLX5_RXQ_CQE_COMP_EN \"rxq_cqe_comp_en\"\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex 0b8b1b6..29c0a06 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -32,13 +32,14 @@\n #include <rte_errno.h>\n #include <rte_flow.h>\n \n+#include <mlx5_glue.h>\n+#include <mlx5_devx_cmds.h>\n+#include <mlx5_prm.h>\n+\n+#include \"mlx5_defs.h\"\n #include \"mlx5_utils.h\"\n #include \"mlx5_mr.h\"\n #include \"mlx5_autoconf.h\"\n-#include \"mlx5_defs.h\"\n-#include \"mlx5_glue.h\"\n-#include \"mlx5_prm.h\"\n-#include \"mlx5_devx_cmds.h\"\n \n enum {\n \tPCI_VENDOR_ID_MELLANOX = 0x15b3,\ndiff --git a/drivers/net/mlx5/mlx5_devx_cmds.c b/drivers/net/mlx5/mlx5_devx_cmds.c\ndeleted file mode 100644\nindex 1302919..0000000\n--- a/drivers/net/mlx5/mlx5_devx_cmds.c\n+++ /dev/null\n@@ -1,974 +0,0 @@\n-// SPDX-License-Identifier: BSD-3-Clause\n-/* Copyright 2018 Mellanox Technologies, Ltd */\n-\n-#include <unistd.h>\n-\n-#include <rte_flow_driver.h>\n-#include <rte_malloc.h>\n-\n-#include \"mlx5_prm.h\"\n-#include \"mlx5_devx_cmds.h\"\n-#include \"mlx5_utils.h\"\n-\n-\n-/**\n- * Allocate flow counters via devx interface.\n- *\n- * @param[in] ctx\n- *   ibv contexts returned from mlx5dv_open_device.\n- * @param dcs\n- *   Pointer to counters properties structure to be filled by the routine.\n- * @param bulk_n_128\n- *   Bulk counter numbers in 128 counters units.\n- *\n- * @return\n- *   Pointer to counter object on success, a negative value otherwise and\n- *   rte_errno is set.\n- */\n-struct mlx5_devx_obj *\n-mlx5_devx_cmd_flow_counter_alloc(struct ibv_context *ctx, uint32_t bulk_n_128)\n-{\n-\tstruct mlx5_devx_obj *dcs = rte_zmalloc(\"dcs\", sizeof(*dcs), 0);\n-\tuint32_t in[MLX5_ST_SZ_DW(alloc_flow_counter_in)]   = {0};\n-\tuint32_t out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};\n-\n-\tif (!dcs) {\n-\t\trte_errno = ENOMEM;\n-\t\treturn NULL;\n-\t}\n-\tMLX5_SET(alloc_flow_counter_in, in, opcode,\n-\t\t MLX5_CMD_OP_ALLOC_FLOW_COUNTER);\n-\tMLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, bulk_n_128);\n-\tdcs->obj = mlx5_glue->devx_obj_create(ctx, in,\n-\t\t\t\t\t      sizeof(in), out, sizeof(out));\n-\tif (!dcs->obj) {\n-\t\tDRV_LOG(ERR, \"Can't allocate counters - error %d\", errno);\n-\t\trte_errno = errno;\n-\t\trte_free(dcs);\n-\t\treturn NULL;\n-\t}\n-\tdcs->id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);\n-\treturn dcs;\n-}\n-\n-/**\n- * Query flow counters values.\n- *\n- * @param[in] dcs\n- *   devx object that was obtained from mlx5_devx_cmd_fc_alloc.\n- * @param[in] clear\n- *   Whether hardware should clear the counters after the query or not.\n- * @param[in] n_counters\n- *   0 in case of 1 counter to read, otherwise the counter number to read.\n- *  @param pkts\n- *   The number of packets that matched the flow.\n- *  @param bytes\n- *    The number of bytes that matched the flow.\n- *  @param mkey\n- *   The mkey key for batch query.\n- *  @param addr\n- *    The address in the mkey range for batch query.\n- *  @param cmd_comp\n- *   The completion object for asynchronous batch query.\n- *  @param async_id\n- *    The ID to be returned in the asynchronous batch query response.\n- *\n- * @return\n- *   0 on success, a negative value otherwise.\n- */\n-int\n-mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_obj *dcs,\n-\t\t\t\t int clear, uint32_t n_counters,\n-\t\t\t\t uint64_t *pkts, uint64_t *bytes,\n-\t\t\t\t uint32_t mkey, void *addr,\n-\t\t\t\t struct mlx5dv_devx_cmd_comp *cmd_comp,\n-\t\t\t\t uint64_t async_id)\n-{\n-\tint out_len = MLX5_ST_SZ_BYTES(query_flow_counter_out) +\n-\t\t\tMLX5_ST_SZ_BYTES(traffic_counter);\n-\tuint32_t out[out_len];\n-\tuint32_t in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};\n-\tvoid *stats;\n-\tint rc;\n-\n-\tMLX5_SET(query_flow_counter_in, in, opcode,\n-\t\t MLX5_CMD_OP_QUERY_FLOW_COUNTER);\n-\tMLX5_SET(query_flow_counter_in, in, op_mod, 0);\n-\tMLX5_SET(query_flow_counter_in, in, flow_counter_id, dcs->id);\n-\tMLX5_SET(query_flow_counter_in, in, clear, !!clear);\n-\n-\tif (n_counters) {\n-\t\tMLX5_SET(query_flow_counter_in, in, num_of_counters,\n-\t\t\t n_counters);\n-\t\tMLX5_SET(query_flow_counter_in, in, dump_to_memory, 1);\n-\t\tMLX5_SET(query_flow_counter_in, in, mkey, mkey);\n-\t\tMLX5_SET64(query_flow_counter_in, in, address,\n-\t\t\t   (uint64_t)(uintptr_t)addr);\n-\t}\n-\tif (!cmd_comp)\n-\t\trc = mlx5_glue->devx_obj_query(dcs->obj, in, sizeof(in), out,\n-\t\t\t\t\t       out_len);\n-\telse\n-\t\trc = mlx5_glue->devx_obj_query_async(dcs->obj, in, sizeof(in),\n-\t\t\t\t\t\t     out_len, async_id,\n-\t\t\t\t\t\t     cmd_comp);\n-\tif (rc) {\n-\t\tDRV_LOG(ERR, \"Failed to query devx counters with rc %d\", rc);\n-\t\trte_errno = rc;\n-\t\treturn -rc;\n-\t}\n-\tif (!n_counters) {\n-\t\tstats = MLX5_ADDR_OF(query_flow_counter_out,\n-\t\t\t\t     out, flow_statistics);\n-\t\t*pkts = MLX5_GET64(traffic_counter, stats, packets);\n-\t\t*bytes = MLX5_GET64(traffic_counter, stats, octets);\n-\t}\n-\treturn 0;\n-}\n-\n-/**\n- * Create a new mkey.\n- *\n- * @param[in] ctx\n- *   ibv contexts returned from mlx5dv_open_device.\n- * @param[in] attr\n- *   Attributes of the requested mkey.\n- *\n- * @return\n- *   Pointer to Devx mkey on success, a negative value otherwise and rte_errno\n- *   is set.\n- */\n-struct mlx5_devx_obj *\n-mlx5_devx_cmd_mkey_create(struct ibv_context *ctx,\n-\t\t\t  struct mlx5_devx_mkey_attr *attr)\n-{\n-\tuint32_t in[MLX5_ST_SZ_DW(create_mkey_in)] = {0};\n-\tuint32_t out[MLX5_ST_SZ_DW(create_mkey_out)] = {0};\n-\tvoid *mkc;\n-\tstruct mlx5_devx_obj *mkey = rte_zmalloc(\"mkey\", sizeof(*mkey), 0);\n-\tsize_t pgsize;\n-\tuint32_t translation_size;\n-\n-\tif (!mkey) {\n-\t\trte_errno = ENOMEM;\n-\t\treturn NULL;\n-\t}\n-\tpgsize = sysconf(_SC_PAGESIZE);\n-\ttranslation_size = (RTE_ALIGN(attr->size, pgsize) * 8) / 16;\n-\tMLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);\n-\tMLX5_SET(create_mkey_in, in, translations_octword_actual_size,\n-\t\t translation_size);\n-\tMLX5_SET(create_mkey_in, in, mkey_umem_id, attr->umem_id);\n-\tmkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);\n-\tMLX5_SET(mkc, mkc, lw, 0x1);\n-\tMLX5_SET(mkc, mkc, lr, 0x1);\n-\tMLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);\n-\tMLX5_SET(mkc, mkc, qpn, 0xffffff);\n-\tMLX5_SET(mkc, mkc, pd, attr->pd);\n-\tMLX5_SET(mkc, mkc, mkey_7_0, attr->umem_id & 0xFF);\n-\tMLX5_SET(mkc, mkc, translations_octword_size, translation_size);\n-\tMLX5_SET64(mkc, mkc, start_addr, attr->addr);\n-\tMLX5_SET64(mkc, mkc, len, attr->size);\n-\tMLX5_SET(mkc, mkc, log_page_size, rte_log2_u32(pgsize));\n-\tmkey->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,\n-\t\t\t\t\t       sizeof(out));\n-\tif (!mkey->obj) {\n-\t\tDRV_LOG(ERR, \"Can't create mkey - error %d\", errno);\n-\t\trte_errno = errno;\n-\t\trte_free(mkey);\n-\t\treturn NULL;\n-\t}\n-\tmkey->id = MLX5_GET(create_mkey_out, out, mkey_index);\n-\tmkey->id = (mkey->id << 8) | (attr->umem_id & 0xFF);\n-\treturn mkey;\n-}\n-\n-/**\n- * Get status of devx command response.\n- * Mainly used for asynchronous commands.\n- *\n- * @param[in] out\n- *   The out response buffer.\n- *\n- * @return\n- *   0 on success, non-zero value otherwise.\n- */\n-int\n-mlx5_devx_get_out_command_status(void *out)\n-{\n-\tint status;\n-\n-\tif (!out)\n-\t\treturn -EINVAL;\n-\tstatus = MLX5_GET(query_flow_counter_out, out, status);\n-\tif (status) {\n-\t\tint syndrome = MLX5_GET(query_flow_counter_out, out, syndrome);\n-\n-\t\tDRV_LOG(ERR, \"Bad devX status %x, syndrome = %x\", status,\n-\t\t\tsyndrome);\n-\t}\n-\treturn status;\n-}\n-\n-/**\n- * Destroy any object allocated by a Devx API.\n- *\n- * @param[in] obj\n- *   Pointer to a general object.\n- *\n- * @return\n- *   0 on success, a negative value otherwise.\n- */\n-int\n-mlx5_devx_cmd_destroy(struct mlx5_devx_obj *obj)\n-{\n-\tint ret;\n-\n-\tif (!obj)\n-\t\treturn 0;\n-\tret =  mlx5_glue->devx_obj_destroy(obj->obj);\n-\trte_free(obj);\n-\treturn ret;\n-}\n-\n-/**\n- * Query NIC vport context.\n- * Fills minimal inline attribute.\n- *\n- * @param[in] ctx\n- *   ibv contexts returned from mlx5dv_open_device.\n- * @param[in] vport\n- *   vport index\n- * @param[out] attr\n- *   Attributes device values.\n- *\n- * @return\n- *   0 on success, a negative value otherwise.\n- */\n-static int\n-mlx5_devx_cmd_query_nic_vport_context(struct ibv_context *ctx,\n-\t\t\t\t      unsigned int vport,\n-\t\t\t\t      struct mlx5_hca_attr *attr)\n-{\n-\tuint32_t in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};\n-\tuint32_t out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};\n-\tvoid *vctx;\n-\tint status, syndrome, rc;\n-\n-\t/* Query NIC vport context to determine inline mode. */\n-\tMLX5_SET(query_nic_vport_context_in, in, opcode,\n-\t\t MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);\n-\tMLX5_SET(query_nic_vport_context_in, in, vport_number, vport);\n-\tif (vport)\n-\t\tMLX5_SET(query_nic_vport_context_in, in, other_vport, 1);\n-\trc = mlx5_glue->devx_general_cmd(ctx,\n-\t\t\t\t\t in, sizeof(in),\n-\t\t\t\t\t out, sizeof(out));\n-\tif (rc)\n-\t\tgoto error;\n-\tstatus = MLX5_GET(query_nic_vport_context_out, out, status);\n-\tsyndrome = MLX5_GET(query_nic_vport_context_out, out, syndrome);\n-\tif (status) {\n-\t\tDRV_LOG(DEBUG, \"Failed to query NIC vport context, \"\n-\t\t\t\"status %x, syndrome = %x\",\n-\t\t\tstatus, syndrome);\n-\t\treturn -1;\n-\t}\n-\tvctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,\n-\t\t\t    nic_vport_context);\n-\tattr->vport_inline_mode = MLX5_GET(nic_vport_context, vctx,\n-\t\t\t\t\t   min_wqe_inline_mode);\n-\treturn 0;\n-error:\n-\trc = (rc > 0) ? -rc : rc;\n-\treturn rc;\n-}\n-\n-/**\n- * Query HCA attributes.\n- * Using those attributes we can check on run time if the device\n- * is having the required capabilities.\n- *\n- * @param[in] ctx\n- *   ibv contexts returned from mlx5dv_open_device.\n- * @param[out] attr\n- *   Attributes device values.\n- *\n- * @return\n- *   0 on success, a negative value otherwise.\n- */\n-int\n-mlx5_devx_cmd_query_hca_attr(struct ibv_context *ctx,\n-\t\t\t     struct mlx5_hca_attr *attr)\n-{\n-\tuint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};\n-\tuint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};\n-\tvoid *hcattr;\n-\tint status, syndrome, rc;\n-\n-\tMLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);\n-\tMLX5_SET(query_hca_cap_in, in, op_mod,\n-\t\t MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE |\n-\t\t MLX5_HCA_CAP_OPMOD_GET_CUR);\n-\n-\trc = mlx5_glue->devx_general_cmd(ctx,\n-\t\t\t\t\t in, sizeof(in), out, sizeof(out));\n-\tif (rc)\n-\t\tgoto error;\n-\tstatus = MLX5_GET(query_hca_cap_out, out, status);\n-\tsyndrome = MLX5_GET(query_hca_cap_out, out, syndrome);\n-\tif (status) {\n-\t\tDRV_LOG(DEBUG, \"Failed to query devx HCA capabilities, \"\n-\t\t\t\"status %x, syndrome = %x\",\n-\t\t\tstatus, syndrome);\n-\t\treturn -1;\n-\t}\n-\thcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);\n-\tattr->flow_counter_bulk_alloc_bitmap =\n-\t\t\tMLX5_GET(cmd_hca_cap, hcattr, flow_counter_bulk_alloc);\n-\tattr->flow_counters_dump = MLX5_GET(cmd_hca_cap, hcattr,\n-\t\t\t\t\t    flow_counters_dump);\n-\tattr->eswitch_manager = MLX5_GET(cmd_hca_cap, hcattr, eswitch_manager);\n-\tattr->hairpin = MLX5_GET(cmd_hca_cap, hcattr, hairpin);\n-\tattr->log_max_hairpin_queues = MLX5_GET(cmd_hca_cap, hcattr,\n-\t\t\t\t\t\tlog_max_hairpin_queues);\n-\tattr->log_max_hairpin_wq_data_sz = MLX5_GET(cmd_hca_cap, hcattr,\n-\t\t\t\t\t\t    log_max_hairpin_wq_data_sz);\n-\tattr->log_max_hairpin_num_packets = MLX5_GET\n-\t\t(cmd_hca_cap, hcattr, log_min_hairpin_wq_data_sz);\n-\tattr->vhca_id = MLX5_GET(cmd_hca_cap, hcattr, vhca_id);\n-\tattr->eth_net_offloads = MLX5_GET(cmd_hca_cap, hcattr,\n-\t\t\t\t\t  eth_net_offloads);\n-\tattr->eth_virt = MLX5_GET(cmd_hca_cap, hcattr, eth_virt);\n-\tattr->flex_parser_protocols = MLX5_GET(cmd_hca_cap, hcattr,\n-\t\t\t\t\t       flex_parser_protocols);\n-\tattr->qos.sup = MLX5_GET(cmd_hca_cap, hcattr, qos);\n-\tif (attr->qos.sup) {\n-\t\tMLX5_SET(query_hca_cap_in, in, op_mod,\n-\t\t\t MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP |\n-\t\t\t MLX5_HCA_CAP_OPMOD_GET_CUR);\n-\t\trc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in),\n-\t\t\t\t\t\t out, sizeof(out));\n-\t\tif (rc)\n-\t\t\tgoto error;\n-\t\tif (status) {\n-\t\t\tDRV_LOG(DEBUG, \"Failed to query devx QOS capabilities,\"\n-\t\t\t\t\" status %x, syndrome = %x\",\n-\t\t\t\tstatus, syndrome);\n-\t\t\treturn -1;\n-\t\t}\n-\t\thcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);\n-\t\tattr->qos.srtcm_sup =\n-\t\t\t\tMLX5_GET(qos_cap, hcattr, flow_meter_srtcm);\n-\t\tattr->qos.log_max_flow_meter =\n-\t\t\t\tMLX5_GET(qos_cap, hcattr, log_max_flow_meter);\n-\t\tattr->qos.flow_meter_reg_c_ids =\n-\t\t\tMLX5_GET(qos_cap, hcattr, flow_meter_reg_id);\n-\t}\n-\tif (!attr->eth_net_offloads)\n-\t\treturn 0;\n-\n-\t/* Query HCA offloads for Ethernet protocol. */\n-\tmemset(in, 0, sizeof(in));\n-\tmemset(out, 0, sizeof(out));\n-\tMLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);\n-\tMLX5_SET(query_hca_cap_in, in, op_mod,\n-\t\t MLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS |\n-\t\t MLX5_HCA_CAP_OPMOD_GET_CUR);\n-\n-\trc = mlx5_glue->devx_general_cmd(ctx,\n-\t\t\t\t\t in, sizeof(in),\n-\t\t\t\t\t out, sizeof(out));\n-\tif (rc) {\n-\t\tattr->eth_net_offloads = 0;\n-\t\tgoto error;\n-\t}\n-\tstatus = MLX5_GET(query_hca_cap_out, out, status);\n-\tsyndrome = MLX5_GET(query_hca_cap_out, out, syndrome);\n-\tif (status) {\n-\t\tDRV_LOG(DEBUG, \"Failed to query devx HCA capabilities, \"\n-\t\t\t\"status %x, syndrome = %x\",\n-\t\t\tstatus, syndrome);\n-\t\tattr->eth_net_offloads = 0;\n-\t\treturn -1;\n-\t}\n-\thcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);\n-\tattr->wqe_vlan_insert = MLX5_GET(per_protocol_networking_offload_caps,\n-\t\t\t\t\t hcattr, wqe_vlan_insert);\n-\tattr->lro_cap = MLX5_GET(per_protocol_networking_offload_caps, hcattr,\n-\t\t\t\t lro_cap);\n-\tattr->tunnel_lro_gre = MLX5_GET(per_protocol_networking_offload_caps,\n-\t\t\t\t\thcattr, tunnel_lro_gre);\n-\tattr->tunnel_lro_vxlan = MLX5_GET(per_protocol_networking_offload_caps,\n-\t\t\t\t\t  hcattr, tunnel_lro_vxlan);\n-\tattr->lro_max_msg_sz_mode = MLX5_GET\n-\t\t\t\t\t(per_protocol_networking_offload_caps,\n-\t\t\t\t\t hcattr, lro_max_msg_sz_mode);\n-\tfor (int i = 0 ; i < MLX5_LRO_NUM_SUPP_PERIODS ; i++) {\n-\t\tattr->lro_timer_supported_periods[i] =\n-\t\t\tMLX5_GET(per_protocol_networking_offload_caps, hcattr,\n-\t\t\t\t lro_timer_supported_periods[i]);\n-\t}\n-\tattr->tunnel_stateless_geneve_rx =\n-\t\t\t    MLX5_GET(per_protocol_networking_offload_caps,\n-\t\t\t\t     hcattr, tunnel_stateless_geneve_rx);\n-\tattr->geneve_max_opt_len =\n-\t\t    MLX5_GET(per_protocol_networking_offload_caps,\n-\t\t\t     hcattr, max_geneve_opt_len);\n-\tattr->wqe_inline_mode = MLX5_GET(per_protocol_networking_offload_caps,\n-\t\t\t\t\t hcattr, wqe_inline_mode);\n-\tattr->tunnel_stateless_gtp = MLX5_GET\n-\t\t\t\t\t(per_protocol_networking_offload_caps,\n-\t\t\t\t\t hcattr, tunnel_stateless_gtp);\n-\tif (attr->wqe_inline_mode != MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)\n-\t\treturn 0;\n-\tif (attr->eth_virt) {\n-\t\trc = mlx5_devx_cmd_query_nic_vport_context(ctx, 0, attr);\n-\t\tif (rc) {\n-\t\t\tattr->eth_virt = 0;\n-\t\t\tgoto error;\n-\t\t}\n-\t}\n-\treturn 0;\n-error:\n-\trc = (rc > 0) ? -rc : rc;\n-\treturn rc;\n-}\n-\n-/**\n- * Query TIS transport domain from QP verbs object using DevX API.\n- *\n- * @param[in] qp\n- *   Pointer to verbs QP returned by ibv_create_qp .\n- * @param[in] tis_num\n- *   TIS number of TIS to query.\n- * @param[out] tis_td\n- *   Pointer to TIS transport domain variable, to be set by the routine.\n- *\n- * @return\n- *   0 on success, a negative value otherwise.\n- */\n-int\n-mlx5_devx_cmd_qp_query_tis_td(struct ibv_qp *qp, uint32_t tis_num,\n-\t\t\t      uint32_t *tis_td)\n-{\n-\tuint32_t in[MLX5_ST_SZ_DW(query_tis_in)] = {0};\n-\tuint32_t out[MLX5_ST_SZ_DW(query_tis_out)] = {0};\n-\tint rc;\n-\tvoid *tis_ctx;\n-\n-\tMLX5_SET(query_tis_in, in, opcode, MLX5_CMD_OP_QUERY_TIS);\n-\tMLX5_SET(query_tis_in, in, tisn, tis_num);\n-\trc = mlx5_glue->devx_qp_query(qp, in, sizeof(in), out, sizeof(out));\n-\tif (rc) {\n-\t\tDRV_LOG(ERR, \"Failed to query QP using DevX\");\n-\t\treturn -rc;\n-\t};\n-\ttis_ctx = MLX5_ADDR_OF(query_tis_out, out, tis_context);\n-\t*tis_td = MLX5_GET(tisc, tis_ctx, transport_domain);\n-\treturn 0;\n-}\n-\n-/**\n- * Fill WQ data for DevX API command.\n- * Utility function for use when creating DevX objects containing a WQ.\n- *\n- * @param[in] wq_ctx\n- *   Pointer to WQ context to fill with data.\n- * @param [in] wq_attr\n- *   Pointer to WQ attributes structure to fill in WQ context.\n- */\n-static void\n-devx_cmd_fill_wq_data(void *wq_ctx, struct mlx5_devx_wq_attr *wq_attr)\n-{\n-\tMLX5_SET(wq, wq_ctx, wq_type, wq_attr->wq_type);\n-\tMLX5_SET(wq, wq_ctx, wq_signature, wq_attr->wq_signature);\n-\tMLX5_SET(wq, wq_ctx, end_padding_mode, wq_attr->end_padding_mode);\n-\tMLX5_SET(wq, wq_ctx, cd_slave, wq_attr->cd_slave);\n-\tMLX5_SET(wq, wq_ctx, hds_skip_first_sge, wq_attr->hds_skip_first_sge);\n-\tMLX5_SET(wq, wq_ctx, log2_hds_buf_size, wq_attr->log2_hds_buf_size);\n-\tMLX5_SET(wq, wq_ctx, page_offset, wq_attr->page_offset);\n-\tMLX5_SET(wq, wq_ctx, lwm, wq_attr->lwm);\n-\tMLX5_SET(wq, wq_ctx, pd, wq_attr->pd);\n-\tMLX5_SET(wq, wq_ctx, uar_page, wq_attr->uar_page);\n-\tMLX5_SET64(wq, wq_ctx, dbr_addr, wq_attr->dbr_addr);\n-\tMLX5_SET(wq, wq_ctx, hw_counter, wq_attr->hw_counter);\n-\tMLX5_SET(wq, wq_ctx, sw_counter, wq_attr->sw_counter);\n-\tMLX5_SET(wq, wq_ctx, log_wq_stride, wq_attr->log_wq_stride);\n-\tMLX5_SET(wq, wq_ctx, log_wq_pg_sz, wq_attr->log_wq_pg_sz);\n-\tMLX5_SET(wq, wq_ctx, log_wq_sz, wq_attr->log_wq_sz);\n-\tMLX5_SET(wq, wq_ctx, dbr_umem_valid, wq_attr->dbr_umem_valid);\n-\tMLX5_SET(wq, wq_ctx, wq_umem_valid, wq_attr->wq_umem_valid);\n-\tMLX5_SET(wq, wq_ctx, log_hairpin_num_packets,\n-\t\t wq_attr->log_hairpin_num_packets);\n-\tMLX5_SET(wq, wq_ctx, log_hairpin_data_sz, wq_attr->log_hairpin_data_sz);\n-\tMLX5_SET(wq, wq_ctx, single_wqe_log_num_of_strides,\n-\t\t wq_attr->single_wqe_log_num_of_strides);\n-\tMLX5_SET(wq, wq_ctx, two_byte_shift_en, wq_attr->two_byte_shift_en);\n-\tMLX5_SET(wq, wq_ctx, single_stride_log_num_of_bytes,\n-\t\t wq_attr->single_stride_log_num_of_bytes);\n-\tMLX5_SET(wq, wq_ctx, dbr_umem_id, wq_attr->dbr_umem_id);\n-\tMLX5_SET(wq, wq_ctx, wq_umem_id, wq_attr->wq_umem_id);\n-\tMLX5_SET64(wq, wq_ctx, wq_umem_offset, wq_attr->wq_umem_offset);\n-}\n-\n-/**\n- * Create RQ using DevX API.\n- *\n- * @param[in] ctx\n- *   ibv_context returned from mlx5dv_open_device.\n- * @param [in] rq_attr\n- *   Pointer to create RQ attributes structure.\n- * @param [in] socket\n- *   CPU socket ID for allocations.\n- *\n- * @return\n- *   The DevX object created, NULL otherwise and rte_errno is set.\n- */\n-struct mlx5_devx_obj *\n-mlx5_devx_cmd_create_rq(struct ibv_context *ctx,\n-\t\t\tstruct mlx5_devx_create_rq_attr *rq_attr,\n-\t\t\tint socket)\n-{\n-\tuint32_t in[MLX5_ST_SZ_DW(create_rq_in)] = {0};\n-\tuint32_t out[MLX5_ST_SZ_DW(create_rq_out)] = {0};\n-\tvoid *rq_ctx, *wq_ctx;\n-\tstruct mlx5_devx_wq_attr *wq_attr;\n-\tstruct mlx5_devx_obj *rq = NULL;\n-\n-\trq = rte_calloc_socket(__func__, 1, sizeof(*rq), 0, socket);\n-\tif (!rq) {\n-\t\tDRV_LOG(ERR, \"Failed to allocate RQ data\");\n-\t\trte_errno = ENOMEM;\n-\t\treturn NULL;\n-\t}\n-\tMLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);\n-\trq_ctx = MLX5_ADDR_OF(create_rq_in, in, ctx);\n-\tMLX5_SET(rqc, rq_ctx, rlky, rq_attr->rlky);\n-\tMLX5_SET(rqc, rq_ctx, delay_drop_en, rq_attr->delay_drop_en);\n-\tMLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs);\n-\tMLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd);\n-\tMLX5_SET(rqc, rq_ctx, mem_rq_type, rq_attr->mem_rq_type);\n-\tMLX5_SET(rqc, rq_ctx, state, rq_attr->state);\n-\tMLX5_SET(rqc, rq_ctx, flush_in_error_en, rq_attr->flush_in_error_en);\n-\tMLX5_SET(rqc, rq_ctx, hairpin, rq_attr->hairpin);\n-\tMLX5_SET(rqc, rq_ctx, user_index, rq_attr->user_index);\n-\tMLX5_SET(rqc, rq_ctx, cqn, rq_attr->cqn);\n-\tMLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id);\n-\tMLX5_SET(rqc, rq_ctx, rmpn, rq_attr->rmpn);\n-\twq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq);\n-\twq_attr = &rq_attr->wq_attr;\n-\tdevx_cmd_fill_wq_data(wq_ctx, wq_attr);\n-\trq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),\n-\t\t\t\t\t\t  out, sizeof(out));\n-\tif (!rq->obj) {\n-\t\tDRV_LOG(ERR, \"Failed to create RQ using DevX\");\n-\t\trte_errno = errno;\n-\t\trte_free(rq);\n-\t\treturn NULL;\n-\t}\n-\trq->id = MLX5_GET(create_rq_out, out, rqn);\n-\treturn rq;\n-}\n-\n-/**\n- * Modify RQ using DevX API.\n- *\n- * @param[in] rq\n- *   Pointer to RQ object structure.\n- * @param [in] rq_attr\n- *   Pointer to modify RQ attributes structure.\n- *\n- * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n- */\n-int\n-mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj *rq,\n-\t\t\tstruct mlx5_devx_modify_rq_attr *rq_attr)\n-{\n-\tuint32_t in[MLX5_ST_SZ_DW(modify_rq_in)] = {0};\n-\tuint32_t out[MLX5_ST_SZ_DW(modify_rq_out)] = {0};\n-\tvoid *rq_ctx, *wq_ctx;\n-\tint ret;\n-\n-\tMLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);\n-\tMLX5_SET(modify_rq_in, in, rq_state, rq_attr->rq_state);\n-\tMLX5_SET(modify_rq_in, in, rqn, rq->id);\n-\tMLX5_SET64(modify_rq_in, in, modify_bitmask, rq_attr->modify_bitmask);\n-\trq_ctx = MLX5_ADDR_OF(modify_rq_in, in, ctx);\n-\tMLX5_SET(rqc, rq_ctx, state, rq_attr->state);\n-\tif (rq_attr->modify_bitmask &\n-\t\t\tMLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS)\n-\t\tMLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs);\n-\tif (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD)\n-\t\tMLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd);\n-\tif (rq_attr->modify_bitmask &\n-\t\t\tMLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID)\n-\t\tMLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id);\n-\tMLX5_SET(rqc, rq_ctx, hairpin_peer_sq, rq_attr->hairpin_peer_sq);\n-\tMLX5_SET(rqc, rq_ctx, hairpin_peer_vhca, rq_attr->hairpin_peer_vhca);\n-\tif (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM) {\n-\t\twq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq);\n-\t\tMLX5_SET(wq, wq_ctx, lwm, rq_attr->lwm);\n-\t}\n-\tret = mlx5_glue->devx_obj_modify(rq->obj, in, sizeof(in),\n-\t\t\t\t\t out, sizeof(out));\n-\tif (ret) {\n-\t\tDRV_LOG(ERR, \"Failed to modify RQ using DevX\");\n-\t\trte_errno = errno;\n-\t\treturn -errno;\n-\t}\n-\treturn ret;\n-}\n-\n-/**\n- * Create TIR using DevX API.\n- *\n- * @param[in] ctx\n- *   ibv_context returned from mlx5dv_open_device.\n- * @param [in] tir_attr\n- *   Pointer to TIR attributes structure.\n- *\n- * @return\n- *   The DevX object created, NULL otherwise and rte_errno is set.\n- */\n-struct mlx5_devx_obj *\n-mlx5_devx_cmd_create_tir(struct ibv_context *ctx,\n-\t\t\t struct mlx5_devx_tir_attr *tir_attr)\n-{\n-\tuint32_t in[MLX5_ST_SZ_DW(create_tir_in)] = {0};\n-\tuint32_t out[MLX5_ST_SZ_DW(create_tir_out)] = {0};\n-\tvoid *tir_ctx, *outer, *inner;\n-\tstruct mlx5_devx_obj *tir = NULL;\n-\tint i;\n-\n-\ttir = rte_calloc(__func__, 1, sizeof(*tir), 0);\n-\tif (!tir) {\n-\t\tDRV_LOG(ERR, \"Failed to allocate TIR data\");\n-\t\trte_errno = ENOMEM;\n-\t\treturn NULL;\n-\t}\n-\tMLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);\n-\ttir_ctx = MLX5_ADDR_OF(create_tir_in, in, ctx);\n-\tMLX5_SET(tirc, tir_ctx, disp_type, tir_attr->disp_type);\n-\tMLX5_SET(tirc, tir_ctx, lro_timeout_period_usecs,\n-\t\t tir_attr->lro_timeout_period_usecs);\n-\tMLX5_SET(tirc, tir_ctx, lro_enable_mask, tir_attr->lro_enable_mask);\n-\tMLX5_SET(tirc, tir_ctx, lro_max_msg_sz, tir_attr->lro_max_msg_sz);\n-\tMLX5_SET(tirc, tir_ctx, inline_rqn, tir_attr->inline_rqn);\n-\tMLX5_SET(tirc, tir_ctx, rx_hash_symmetric, tir_attr->rx_hash_symmetric);\n-\tMLX5_SET(tirc, tir_ctx, tunneled_offload_en,\n-\t\t tir_attr->tunneled_offload_en);\n-\tMLX5_SET(tirc, tir_ctx, indirect_table, tir_attr->indirect_table);\n-\tMLX5_SET(tirc, tir_ctx, rx_hash_fn, tir_attr->rx_hash_fn);\n-\tMLX5_SET(tirc, tir_ctx, self_lb_block, tir_attr->self_lb_block);\n-\tMLX5_SET(tirc, tir_ctx, transport_domain, tir_attr->transport_domain);\n-\tfor (i = 0; i < 10; i++) {\n-\t\tMLX5_SET(tirc, tir_ctx, rx_hash_toeplitz_key[i],\n-\t\t\t tir_attr->rx_hash_toeplitz_key[i]);\n-\t}\n-\touter = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_outer);\n-\tMLX5_SET(rx_hash_field_select, outer, l3_prot_type,\n-\t\t tir_attr->rx_hash_field_selector_outer.l3_prot_type);\n-\tMLX5_SET(rx_hash_field_select, outer, l4_prot_type,\n-\t\t tir_attr->rx_hash_field_selector_outer.l4_prot_type);\n-\tMLX5_SET(rx_hash_field_select, outer, selected_fields,\n-\t\t tir_attr->rx_hash_field_selector_outer.selected_fields);\n-\tinner = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_inner);\n-\tMLX5_SET(rx_hash_field_select, inner, l3_prot_type,\n-\t\t tir_attr->rx_hash_field_selector_inner.l3_prot_type);\n-\tMLX5_SET(rx_hash_field_select, inner, l4_prot_type,\n-\t\t tir_attr->rx_hash_field_selector_inner.l4_prot_type);\n-\tMLX5_SET(rx_hash_field_select, inner, selected_fields,\n-\t\t tir_attr->rx_hash_field_selector_inner.selected_fields);\n-\ttir->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),\n-\t\t\t\t\t\t   out, sizeof(out));\n-\tif (!tir->obj) {\n-\t\tDRV_LOG(ERR, \"Failed to create TIR using DevX\");\n-\t\trte_errno = errno;\n-\t\trte_free(tir);\n-\t\treturn NULL;\n-\t}\n-\ttir->id = MLX5_GET(create_tir_out, out, tirn);\n-\treturn tir;\n-}\n-\n-/**\n- * Create RQT using DevX API.\n- *\n- * @param[in] ctx\n- *   ibv_context returned from mlx5dv_open_device.\n- * @param [in] rqt_attr\n- *   Pointer to RQT attributes structure.\n- *\n- * @return\n- *   The DevX object created, NULL otherwise and rte_errno is set.\n- */\n-struct mlx5_devx_obj *\n-mlx5_devx_cmd_create_rqt(struct ibv_context *ctx,\n-\t\t\t struct mlx5_devx_rqt_attr *rqt_attr)\n-{\n-\tuint32_t *in = NULL;\n-\tuint32_t inlen = MLX5_ST_SZ_BYTES(create_rqt_in) +\n-\t\t\t rqt_attr->rqt_actual_size * sizeof(uint32_t);\n-\tuint32_t out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};\n-\tvoid *rqt_ctx;\n-\tstruct mlx5_devx_obj *rqt = NULL;\n-\tint i;\n-\n-\tin = rte_calloc(__func__, 1, inlen, 0);\n-\tif (!in) {\n-\t\tDRV_LOG(ERR, \"Failed to allocate RQT IN data\");\n-\t\trte_errno = ENOMEM;\n-\t\treturn NULL;\n-\t}\n-\trqt = rte_calloc(__func__, 1, sizeof(*rqt), 0);\n-\tif (!rqt) {\n-\t\tDRV_LOG(ERR, \"Failed to allocate RQT data\");\n-\t\trte_errno = ENOMEM;\n-\t\trte_free(in);\n-\t\treturn NULL;\n-\t}\n-\tMLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);\n-\trqt_ctx = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);\n-\tMLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size);\n-\tMLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size);\n-\tfor (i = 0; i < rqt_attr->rqt_actual_size; i++)\n-\t\tMLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]);\n-\trqt->obj = mlx5_glue->devx_obj_create(ctx, in, inlen, out, sizeof(out));\n-\trte_free(in);\n-\tif (!rqt->obj) {\n-\t\tDRV_LOG(ERR, \"Failed to create RQT using DevX\");\n-\t\trte_errno = errno;\n-\t\trte_free(rqt);\n-\t\treturn NULL;\n-\t}\n-\trqt->id = MLX5_GET(create_rqt_out, out, rqtn);\n-\treturn rqt;\n-}\n-\n-/**\n- * Create SQ using DevX API.\n- *\n- * @param[in] ctx\n- *   ibv_context returned from mlx5dv_open_device.\n- * @param [in] sq_attr\n- *   Pointer to SQ attributes structure.\n- * @param [in] socket\n- *   CPU socket ID for allocations.\n- *\n- * @return\n- *   The DevX object created, NULL otherwise and rte_errno is set.\n- **/\n-struct mlx5_devx_obj *\n-mlx5_devx_cmd_create_sq(struct ibv_context *ctx,\n-\t\t\tstruct mlx5_devx_create_sq_attr *sq_attr)\n-{\n-\tuint32_t in[MLX5_ST_SZ_DW(create_sq_in)] = {0};\n-\tuint32_t out[MLX5_ST_SZ_DW(create_sq_out)] = {0};\n-\tvoid *sq_ctx;\n-\tvoid *wq_ctx;\n-\tstruct mlx5_devx_wq_attr *wq_attr;\n-\tstruct mlx5_devx_obj *sq = NULL;\n-\n-\tsq = rte_calloc(__func__, 1, sizeof(*sq), 0);\n-\tif (!sq) {\n-\t\tDRV_LOG(ERR, \"Failed to allocate SQ data\");\n-\t\trte_errno = ENOMEM;\n-\t\treturn NULL;\n-\t}\n-\tMLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);\n-\tsq_ctx = MLX5_ADDR_OF(create_sq_in, in, ctx);\n-\tMLX5_SET(sqc, sq_ctx, rlky, sq_attr->rlky);\n-\tMLX5_SET(sqc, sq_ctx, cd_master, sq_attr->cd_master);\n-\tMLX5_SET(sqc, sq_ctx, fre, sq_attr->fre);\n-\tMLX5_SET(sqc, sq_ctx, flush_in_error_en, sq_attr->flush_in_error_en);\n-\tMLX5_SET(sqc, sq_ctx, allow_multi_pkt_send_wqe,\n-\t\t sq_attr->flush_in_error_en);\n-\tMLX5_SET(sqc, sq_ctx, min_wqe_inline_mode,\n-\t\t sq_attr->min_wqe_inline_mode);\n-\tMLX5_SET(sqc, sq_ctx, state, sq_attr->state);\n-\tMLX5_SET(sqc, sq_ctx, reg_umr, sq_attr->reg_umr);\n-\tMLX5_SET(sqc, sq_ctx, allow_swp, sq_attr->allow_swp);\n-\tMLX5_SET(sqc, sq_ctx, hairpin, sq_attr->hairpin);\n-\tMLX5_SET(sqc, sq_ctx, user_index, sq_attr->user_index);\n-\tMLX5_SET(sqc, sq_ctx, cqn, sq_attr->cqn);\n-\tMLX5_SET(sqc, sq_ctx, packet_pacing_rate_limit_index,\n-\t\t sq_attr->packet_pacing_rate_limit_index);\n-\tMLX5_SET(sqc, sq_ctx, tis_lst_sz, sq_attr->tis_lst_sz);\n-\tMLX5_SET(sqc, sq_ctx, tis_num_0, sq_attr->tis_num);\n-\twq_ctx = MLX5_ADDR_OF(sqc, sq_ctx, wq);\n-\twq_attr = &sq_attr->wq_attr;\n-\tdevx_cmd_fill_wq_data(wq_ctx, wq_attr);\n-\tsq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),\n-\t\t\t\t\t     out, sizeof(out));\n-\tif (!sq->obj) {\n-\t\tDRV_LOG(ERR, \"Failed to create SQ using DevX\");\n-\t\trte_errno = errno;\n-\t\trte_free(sq);\n-\t\treturn NULL;\n-\t}\n-\tsq->id = MLX5_GET(create_sq_out, out, sqn);\n-\treturn sq;\n-}\n-\n-/**\n- * Modify SQ using DevX API.\n- *\n- * @param[in] sq\n- *   Pointer to SQ object structure.\n- * @param [in] sq_attr\n- *   Pointer to SQ attributes structure.\n- *\n- * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n- */\n-int\n-mlx5_devx_cmd_modify_sq(struct mlx5_devx_obj *sq,\n-\t\t\tstruct mlx5_devx_modify_sq_attr *sq_attr)\n-{\n-\tuint32_t in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};\n-\tuint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};\n-\tvoid *sq_ctx;\n-\tint ret;\n-\n-\tMLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);\n-\tMLX5_SET(modify_sq_in, in, sq_state, sq_attr->sq_state);\n-\tMLX5_SET(modify_sq_in, in, sqn, sq->id);\n-\tsq_ctx = MLX5_ADDR_OF(modify_sq_in, in, ctx);\n-\tMLX5_SET(sqc, sq_ctx, state, sq_attr->state);\n-\tMLX5_SET(sqc, sq_ctx, hairpin_peer_rq, sq_attr->hairpin_peer_rq);\n-\tMLX5_SET(sqc, sq_ctx, hairpin_peer_vhca, sq_attr->hairpin_peer_vhca);\n-\tret = mlx5_glue->devx_obj_modify(sq->obj, in, sizeof(in),\n-\t\t\t\t\t out, sizeof(out));\n-\tif (ret) {\n-\t\tDRV_LOG(ERR, \"Failed to modify SQ using DevX\");\n-\t\trte_errno = errno;\n-\t\treturn -errno;\n-\t}\n-\treturn ret;\n-}\n-\n-/**\n- * Create TIS using DevX API.\n- *\n- * @param[in] ctx\n- *   ibv_context returned from mlx5dv_open_device.\n- * @param [in] tis_attr\n- *   Pointer to TIS attributes structure.\n- *\n- * @return\n- *   The DevX object created, NULL otherwise and rte_errno is set.\n- */\n-struct mlx5_devx_obj *\n-mlx5_devx_cmd_create_tis(struct ibv_context *ctx,\n-\t\t\t struct mlx5_devx_tis_attr *tis_attr)\n-{\n-\tuint32_t in[MLX5_ST_SZ_DW(create_tis_in)] = {0};\n-\tuint32_t out[MLX5_ST_SZ_DW(create_tis_out)] = {0};\n-\tstruct mlx5_devx_obj *tis = NULL;\n-\tvoid *tis_ctx;\n-\n-\ttis = rte_calloc(__func__, 1, sizeof(*tis), 0);\n-\tif (!tis) {\n-\t\tDRV_LOG(ERR, \"Failed to allocate TIS object\");\n-\t\trte_errno = ENOMEM;\n-\t\treturn NULL;\n-\t}\n-\tMLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);\n-\ttis_ctx = MLX5_ADDR_OF(create_tis_in, in, ctx);\n-\tMLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity,\n-\t\t tis_attr->strict_lag_tx_port_affinity);\n-\tMLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity,\n-\t\t tis_attr->strict_lag_tx_port_affinity);\n-\tMLX5_SET(tisc, tis_ctx, prio, tis_attr->prio);\n-\tMLX5_SET(tisc, tis_ctx, transport_domain,\n-\t\t tis_attr->transport_domain);\n-\ttis->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),\n-\t\t\t\t\t      out, sizeof(out));\n-\tif (!tis->obj) {\n-\t\tDRV_LOG(ERR, \"Failed to create TIS using DevX\");\n-\t\trte_errno = errno;\n-\t\trte_free(tis);\n-\t\treturn NULL;\n-\t}\n-\ttis->id = MLX5_GET(create_tis_out, out, tisn);\n-\treturn tis;\n-}\n-\n-/**\n- * Create transport domain using DevX API.\n- *\n- * @param[in] ctx\n- *   ibv_context returned from mlx5dv_open_device.\n- *\n- * @return\n- *   The DevX object created, NULL otherwise and rte_errno is set.\n- */\n-struct mlx5_devx_obj *\n-mlx5_devx_cmd_create_td(struct ibv_context *ctx)\n-{\n-\tuint32_t in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0};\n-\tuint32_t out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};\n-\tstruct mlx5_devx_obj *td = NULL;\n-\n-\ttd = rte_calloc(__func__, 1, sizeof(*td), 0);\n-\tif (!td) {\n-\t\tDRV_LOG(ERR, \"Failed to allocate TD object\");\n-\t\trte_errno = ENOMEM;\n-\t\treturn NULL;\n-\t}\n-\tMLX5_SET(alloc_transport_domain_in, in, opcode,\n-\t\t MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);\n-\ttd->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),\n-\t\t\t\t\t     out, sizeof(out));\n-\tif (!td->obj) {\n-\t\tDRV_LOG(ERR, \"Failed to create TIS using DevX\");\n-\t\trte_errno = errno;\n-\t\trte_free(td);\n-\t\treturn NULL;\n-\t}\n-\ttd->id = MLX5_GET(alloc_transport_domain_out, out,\n-\t\t\t   transport_domain);\n-\treturn td;\n-}\n-\n-/**\n- * Dump all flows to file.\n- *\n- * @param[in] fdb_domain\n- *   FDB domain.\n- * @param[in] rx_domain\n- *   RX domain.\n- * @param[in] tx_domain\n- *   TX domain.\n- * @param[out] file\n- *   Pointer to file stream.\n- *\n- * @return\n- *   0 on success, a nagative value otherwise.\n- */\n-int\n-mlx5_devx_cmd_flow_dump(void *fdb_domain __rte_unused,\n-\t\t\tvoid *rx_domain __rte_unused,\n-\t\t\tvoid *tx_domain __rte_unused, FILE *file __rte_unused)\n-{\n-\tint ret = 0;\n-\n-#ifdef HAVE_MLX5_DR_FLOW_DUMP\n-\tif (fdb_domain) {\n-\t\tret = mlx5_glue->dr_dump_domain(file, fdb_domain);\n-\t\tif (ret)\n-\t\t\treturn ret;\n-\t}\n-\tassert(rx_domain);\n-\tret = mlx5_glue->dr_dump_domain(file, rx_domain);\n-\tif (ret)\n-\t\treturn ret;\n-\tassert(tx_domain);\n-\tret = mlx5_glue->dr_dump_domain(file, tx_domain);\n-#else\n-\tret = ENOTSUP;\n-#endif\n-\treturn -ret;\n-}\ndiff --git a/drivers/net/mlx5/mlx5_devx_cmds.h b/drivers/net/mlx5/mlx5_devx_cmds.h\ndeleted file mode 100644\nindex 0c5afde..0000000\n--- a/drivers/net/mlx5/mlx5_devx_cmds.h\n+++ /dev/null\n@@ -1,227 +0,0 @@\n-/* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright 2019 Mellanox Technologies, Ltd\n- */\n-\n-#ifndef RTE_PMD_MLX5_DEVX_CMDS_H_\n-#define RTE_PMD_MLX5_DEVX_CMDS_H_\n-\n-#include \"mlx5_glue.h\"\n-\n-/* devX creation object */\n-struct mlx5_devx_obj {\n-\tstruct mlx5dv_devx_obj *obj; /* The DV object. */\n-\tint id; /* The object ID. */\n-};\n-\n-struct mlx5_devx_mkey_attr {\n-\tuint64_t addr;\n-\tuint64_t size;\n-\tuint32_t umem_id;\n-\tuint32_t pd;\n-};\n-\n-/* HCA qos attributes. */\n-struct mlx5_hca_qos_attr {\n-\tuint32_t sup:1;\t/* Whether QOS is supported. */\n-\tuint32_t srtcm_sup:1; /* Whether srTCM mode is supported. */\n-\tuint8_t log_max_flow_meter;\n-\t/* Power of the maximum supported meters. */\n-\tuint8_t flow_meter_reg_c_ids;\n-\t/* Bitmap of the reg_Cs available for flow meter to use. */\n-\n-};\n-\n-/* HCA supports this number of time periods for LRO. */\n-#define MLX5_LRO_NUM_SUPP_PERIODS 4\n-\n-struct mlx5_hca_attr {\n-\tuint32_t eswitch_manager:1;\n-\tuint32_t flow_counters_dump:1;\n-\tuint8_t flow_counter_bulk_alloc_bitmap;\n-\tuint32_t eth_net_offloads:1;\n-\tuint32_t eth_virt:1;\n-\tuint32_t wqe_vlan_insert:1;\n-\tuint32_t wqe_inline_mode:2;\n-\tuint32_t vport_inline_mode:3;\n-\tuint32_t tunnel_stateless_geneve_rx:1;\n-\tuint32_t geneve_max_opt_len:1; /* 0x0: 14DW, 0x1: 63DW */\n-\tuint32_t tunnel_stateless_gtp:1;\n-\tuint32_t lro_cap:1;\n-\tuint32_t tunnel_lro_gre:1;\n-\tuint32_t tunnel_lro_vxlan:1;\n-\tuint32_t lro_max_msg_sz_mode:2;\n-\tuint32_t lro_timer_supported_periods[MLX5_LRO_NUM_SUPP_PERIODS];\n-\tuint32_t flex_parser_protocols;\n-\tuint32_t hairpin:1;\n-\tuint32_t log_max_hairpin_queues:5;\n-\tuint32_t log_max_hairpin_wq_data_sz:5;\n-\tuint32_t log_max_hairpin_num_packets:5;\n-\tuint32_t vhca_id:16;\n-\tstruct mlx5_hca_qos_attr qos;\n-};\n-\n-struct mlx5_devx_wq_attr {\n-\tuint32_t wq_type:4;\n-\tuint32_t wq_signature:1;\n-\tuint32_t end_padding_mode:2;\n-\tuint32_t cd_slave:1;\n-\tuint32_t hds_skip_first_sge:1;\n-\tuint32_t log2_hds_buf_size:3;\n-\tuint32_t page_offset:5;\n-\tuint32_t lwm:16;\n-\tuint32_t pd:24;\n-\tuint32_t uar_page:24;\n-\tuint64_t dbr_addr;\n-\tuint32_t hw_counter;\n-\tuint32_t sw_counter;\n-\tuint32_t log_wq_stride:4;\n-\tuint32_t log_wq_pg_sz:5;\n-\tuint32_t log_wq_sz:5;\n-\tuint32_t dbr_umem_valid:1;\n-\tuint32_t wq_umem_valid:1;\n-\tuint32_t log_hairpin_num_packets:5;\n-\tuint32_t log_hairpin_data_sz:5;\n-\tuint32_t single_wqe_log_num_of_strides:4;\n-\tuint32_t two_byte_shift_en:1;\n-\tuint32_t single_stride_log_num_of_bytes:3;\n-\tuint32_t dbr_umem_id;\n-\tuint32_t wq_umem_id;\n-\tuint64_t wq_umem_offset;\n-};\n-\n-/* Create RQ attributes structure, used by create RQ operation. */\n-struct mlx5_devx_create_rq_attr {\n-\tuint32_t rlky:1;\n-\tuint32_t delay_drop_en:1;\n-\tuint32_t scatter_fcs:1;\n-\tuint32_t vsd:1;\n-\tuint32_t mem_rq_type:4;\n-\tuint32_t state:4;\n-\tuint32_t flush_in_error_en:1;\n-\tuint32_t hairpin:1;\n-\tuint32_t user_index:24;\n-\tuint32_t cqn:24;\n-\tuint32_t counter_set_id:8;\n-\tuint32_t rmpn:24;\n-\tstruct mlx5_devx_wq_attr wq_attr;\n-};\n-\n-/* Modify RQ attributes structure, used by modify RQ operation. */\n-struct mlx5_devx_modify_rq_attr {\n-\tuint32_t rqn:24;\n-\tuint32_t rq_state:4; /* Current RQ state. */\n-\tuint32_t state:4; /* Required RQ state. */\n-\tuint32_t scatter_fcs:1;\n-\tuint32_t vsd:1;\n-\tuint32_t counter_set_id:8;\n-\tuint32_t hairpin_peer_sq:24;\n-\tuint32_t hairpin_peer_vhca:16;\n-\tuint64_t modify_bitmask;\n-\tuint32_t lwm:16; /* Contained WQ lwm. */\n-};\n-\n-struct mlx5_rx_hash_field_select {\n-\tuint32_t l3_prot_type:1;\n-\tuint32_t l4_prot_type:1;\n-\tuint32_t selected_fields:30;\n-};\n-\n-/* TIR attributes structure, used by TIR operations. */\n-struct mlx5_devx_tir_attr {\n-\tuint32_t disp_type:4;\n-\tuint32_t lro_timeout_period_usecs:16;\n-\tuint32_t lro_enable_mask:4;\n-\tuint32_t lro_max_msg_sz:8;\n-\tuint32_t inline_rqn:24;\n-\tuint32_t rx_hash_symmetric:1;\n-\tuint32_t tunneled_offload_en:1;\n-\tuint32_t indirect_table:24;\n-\tuint32_t rx_hash_fn:4;\n-\tuint32_t self_lb_block:2;\n-\tuint32_t transport_domain:24;\n-\tuint32_t rx_hash_toeplitz_key[10];\n-\tstruct mlx5_rx_hash_field_select rx_hash_field_selector_outer;\n-\tstruct mlx5_rx_hash_field_select rx_hash_field_selector_inner;\n-};\n-\n-/* RQT attributes structure, used by RQT operations. */\n-struct mlx5_devx_rqt_attr {\n-\tuint32_t rqt_max_size:16;\n-\tuint32_t rqt_actual_size:16;\n-\tuint32_t rq_list[];\n-};\n-\n-/* TIS attributes structure. */\n-struct mlx5_devx_tis_attr {\n-\tuint32_t strict_lag_tx_port_affinity:1;\n-\tuint32_t tls_en:1;\n-\tuint32_t lag_tx_port_affinity:4;\n-\tuint32_t prio:4;\n-\tuint32_t transport_domain:24;\n-};\n-\n-/* SQ attributes structure, used by SQ create operation. */\n-struct mlx5_devx_create_sq_attr {\n-\tuint32_t rlky:1;\n-\tuint32_t cd_master:1;\n-\tuint32_t fre:1;\n-\tuint32_t flush_in_error_en:1;\n-\tuint32_t allow_multi_pkt_send_wqe:1;\n-\tuint32_t min_wqe_inline_mode:3;\n-\tuint32_t state:4;\n-\tuint32_t reg_umr:1;\n-\tuint32_t allow_swp:1;\n-\tuint32_t hairpin:1;\n-\tuint32_t user_index:24;\n-\tuint32_t cqn:24;\n-\tuint32_t packet_pacing_rate_limit_index:16;\n-\tuint32_t tis_lst_sz:16;\n-\tuint32_t tis_num:24;\n-\tstruct mlx5_devx_wq_attr wq_attr;\n-};\n-\n-/* SQ attributes structure, used by SQ modify operation. */\n-struct mlx5_devx_modify_sq_attr {\n-\tuint32_t sq_state:4;\n-\tuint32_t state:4;\n-\tuint32_t hairpin_peer_rq:24;\n-\tuint32_t hairpin_peer_vhca:16;\n-};\n-\n-/* mlx5_devx_cmds.c */\n-\n-struct mlx5_devx_obj *mlx5_devx_cmd_flow_counter_alloc(struct ibv_context *ctx,\n-\t\t\t\t\t\t       uint32_t bulk_sz);\n-int mlx5_devx_cmd_destroy(struct mlx5_devx_obj *obj);\n-int mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_obj *dcs,\n-\t\t\t\t     int clear, uint32_t n_counters,\n-\t\t\t\t     uint64_t *pkts, uint64_t *bytes,\n-\t\t\t\t     uint32_t mkey, void *addr,\n-\t\t\t\t     struct mlx5dv_devx_cmd_comp *cmd_comp,\n-\t\t\t\t     uint64_t async_id);\n-int mlx5_devx_cmd_query_hca_attr(struct ibv_context *ctx,\n-\t\t\t\t struct mlx5_hca_attr *attr);\n-struct mlx5_devx_obj *mlx5_devx_cmd_mkey_create(struct ibv_context *ctx,\n-\t\t\t\t\t      struct mlx5_devx_mkey_attr *attr);\n-int mlx5_devx_get_out_command_status(void *out);\n-int mlx5_devx_cmd_qp_query_tis_td(struct ibv_qp *qp, uint32_t tis_num,\n-\t\t\t\t  uint32_t *tis_td);\n-struct mlx5_devx_obj *mlx5_devx_cmd_create_rq(struct ibv_context *ctx,\n-\t\t\t\t       struct mlx5_devx_create_rq_attr *rq_attr,\n-\t\t\t\t       int socket);\n-int mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj *rq,\n-\t\t\t    struct mlx5_devx_modify_rq_attr *rq_attr);\n-struct mlx5_devx_obj *mlx5_devx_cmd_create_tir(struct ibv_context *ctx,\n-\t\t\t\t\t   struct mlx5_devx_tir_attr *tir_attr);\n-struct mlx5_devx_obj *mlx5_devx_cmd_create_rqt(struct ibv_context *ctx,\n-\t\t\t\t\t   struct mlx5_devx_rqt_attr *rqt_attr);\n-struct mlx5_devx_obj *mlx5_devx_cmd_create_sq(struct ibv_context *ctx,\n-\t\t\t\t      struct mlx5_devx_create_sq_attr *sq_attr);\n-int mlx5_devx_cmd_modify_sq(struct mlx5_devx_obj *sq,\n-\t\t\t    struct mlx5_devx_modify_sq_attr *sq_attr);\n-struct mlx5_devx_obj *mlx5_devx_cmd_create_tis(struct ibv_context *ctx,\n-\t\t\t\t\t   struct mlx5_devx_tis_attr *tis_attr);\n-struct mlx5_devx_obj *mlx5_devx_cmd_create_td(struct ibv_context *ctx);\n-int mlx5_devx_cmd_flow_dump(void *fdb_domain, void *rx_domain, void *tx_domain,\n-\t\t\t    FILE *file);\n-#endif /* RTE_PMD_MLX5_DEVX_CMDS_H_ */\ndiff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c\nindex ce0109c..eddf888 100644\n--- a/drivers/net/mlx5/mlx5_ethdev.c\n+++ b/drivers/net/mlx5/mlx5_ethdev.c\n@@ -36,9 +36,10 @@\n #include <rte_rwlock.h>\n #include <rte_cycles.h>\n \n+#include <mlx5_glue.h>\n+#include <mlx5_devx_cmds.h>\n+\n #include \"mlx5.h\"\n-#include \"mlx5_glue.h\"\n-#include \"mlx5_devx_cmds.h\"\n #include \"mlx5_rxtx.h\"\n #include \"mlx5_utils.h\"\n \ndiff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 34f3a53..a2c07f5 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -27,12 +27,13 @@\n #include <rte_malloc.h>\n #include <rte_ip.h>\n \n-#include \"mlx5.h\"\n+#include <mlx5_glue.h>\n+#include <mlx5_devx_cmds.h>\n+#include <mlx5_prm.h>\n+\n #include \"mlx5_defs.h\"\n+#include \"mlx5.h\"\n #include \"mlx5_flow.h\"\n-#include \"mlx5_glue.h\"\n-#include \"mlx5_devx_cmds.h\"\n-#include \"mlx5_prm.h\"\n #include \"mlx5_rxtx.h\"\n \n /* Dev ops structure defined in mlx5.c */\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex 9832542..55f9a5a 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -25,8 +25,9 @@\n #include <rte_alarm.h>\n #include <rte_mtr.h>\n \n+#include <mlx5_prm.h>\n+\n #include \"mlx5.h\"\n-#include \"mlx5_prm.h\"\n \n /* Private rte flow items. */\n enum mlx5_rte_flow_item_type {\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex d70dd4f..50d1078 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -29,12 +29,13 @@\n #include <rte_vxlan.h>\n #include <rte_gtp.h>\n \n-#include \"mlx5.h\"\n+#include <mlx5_glue.h>\n+#include <mlx5_devx_cmds.h>\n+#include <mlx5_prm.h>\n+\n #include \"mlx5_defs.h\"\n-#include \"mlx5_glue.h\"\n-#include \"mlx5_devx_cmds.h\"\n+#include \"mlx5.h\"\n #include \"mlx5_flow.h\"\n-#include \"mlx5_prm.h\"\n #include \"mlx5_rxtx.h\"\n \n #ifdef HAVE_IBV_FLOW_DV_SUPPORT\ndiff --git a/drivers/net/mlx5/mlx5_flow_meter.c b/drivers/net/mlx5/mlx5_flow_meter.c\nindex c4d28b2..32d51c0 100644\n--- a/drivers/net/mlx5/mlx5_flow_meter.c\n+++ b/drivers/net/mlx5/mlx5_flow_meter.c\n@@ -9,6 +9,8 @@\n #include <rte_mtr.h>\n #include <rte_mtr_driver.h>\n \n+#include <mlx5_devx_cmds.h>\n+\n #include \"mlx5.h\"\n #include \"mlx5_flow.h\"\n \ndiff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c\nindex c787c98..8922bac 100644\n--- a/drivers/net/mlx5/mlx5_flow_verbs.c\n+++ b/drivers/net/mlx5/mlx5_flow_verbs.c\n@@ -26,11 +26,12 @@\n #include <rte_malloc.h>\n #include <rte_ip.h>\n \n-#include \"mlx5.h\"\n+#include <mlx5_glue.h>\n+#include <mlx5_prm.h>\n+\n #include \"mlx5_defs.h\"\n+#include \"mlx5.h\"\n #include \"mlx5_flow.h\"\n-#include \"mlx5_glue.h\"\n-#include \"mlx5_prm.h\"\n #include \"mlx5_rxtx.h\"\n \n #define VERBS_SPEC_INNER(item_flags) \\\ndiff --git a/drivers/net/mlx5/mlx5_glue.c b/drivers/net/mlx5/mlx5_glue.c\ndeleted file mode 100644\nindex 4906eeb..0000000\n--- a/drivers/net/mlx5/mlx5_glue.c\n+++ /dev/null\n@@ -1,1150 +0,0 @@\n-/* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright 2018 6WIND S.A.\n- * Copyright 2018 Mellanox Technologies, Ltd\n- */\n-\n-#include <errno.h>\n-#include <stdalign.h>\n-#include <stddef.h>\n-#include <stdint.h>\n-#include <stdlib.h>\n-\n-/*\n- * Not needed by this file; included to work around the lack of off_t\n- * definition for mlx5dv.h with unpatched rdma-core versions.\n- */\n-#include <sys/types.h>\n-\n-/* Verbs headers do not support -pedantic. */\n-#ifdef PEDANTIC\n-#pragma GCC diagnostic ignored \"-Wpedantic\"\n-#endif\n-#include <infiniband/mlx5dv.h>\n-#include <infiniband/verbs.h>\n-#ifdef PEDANTIC\n-#pragma GCC diagnostic error \"-Wpedantic\"\n-#endif\n-\n-#include <rte_config.h>\n-\n-#include \"mlx5_autoconf.h\"\n-#include \"mlx5_glue.h\"\n-\n-static int\n-mlx5_glue_fork_init(void)\n-{\n-\treturn ibv_fork_init();\n-}\n-\n-static struct ibv_pd *\n-mlx5_glue_alloc_pd(struct ibv_context *context)\n-{\n-\treturn ibv_alloc_pd(context);\n-}\n-\n-static int\n-mlx5_glue_dealloc_pd(struct ibv_pd *pd)\n-{\n-\treturn ibv_dealloc_pd(pd);\n-}\n-\n-static struct ibv_device **\n-mlx5_glue_get_device_list(int *num_devices)\n-{\n-\treturn ibv_get_device_list(num_devices);\n-}\n-\n-static void\n-mlx5_glue_free_device_list(struct ibv_device **list)\n-{\n-\tibv_free_device_list(list);\n-}\n-\n-static struct ibv_context *\n-mlx5_glue_open_device(struct ibv_device *device)\n-{\n-\treturn ibv_open_device(device);\n-}\n-\n-static int\n-mlx5_glue_close_device(struct ibv_context *context)\n-{\n-\treturn ibv_close_device(context);\n-}\n-\n-static int\n-mlx5_glue_query_device(struct ibv_context *context,\n-\t\t       struct ibv_device_attr *device_attr)\n-{\n-\treturn ibv_query_device(context, device_attr);\n-}\n-\n-static int\n-mlx5_glue_query_device_ex(struct ibv_context *context,\n-\t\t\t  const struct ibv_query_device_ex_input *input,\n-\t\t\t  struct ibv_device_attr_ex *attr)\n-{\n-\treturn ibv_query_device_ex(context, input, attr);\n-}\n-\n-static int\n-mlx5_glue_query_rt_values_ex(struct ibv_context *context,\n-\t\t\t  struct ibv_values_ex *values)\n-{\n-\treturn ibv_query_rt_values_ex(context, values);\n-}\n-\n-static int\n-mlx5_glue_query_port(struct ibv_context *context, uint8_t port_num,\n-\t\t     struct ibv_port_attr *port_attr)\n-{\n-\treturn ibv_query_port(context, port_num, port_attr);\n-}\n-\n-static struct ibv_comp_channel *\n-mlx5_glue_create_comp_channel(struct ibv_context *context)\n-{\n-\treturn ibv_create_comp_channel(context);\n-}\n-\n-static int\n-mlx5_glue_destroy_comp_channel(struct ibv_comp_channel *channel)\n-{\n-\treturn ibv_destroy_comp_channel(channel);\n-}\n-\n-static struct ibv_cq *\n-mlx5_glue_create_cq(struct ibv_context *context, int cqe, void *cq_context,\n-\t\t    struct ibv_comp_channel *channel, int comp_vector)\n-{\n-\treturn ibv_create_cq(context, cqe, cq_context, channel, comp_vector);\n-}\n-\n-static int\n-mlx5_glue_destroy_cq(struct ibv_cq *cq)\n-{\n-\treturn ibv_destroy_cq(cq);\n-}\n-\n-static int\n-mlx5_glue_get_cq_event(struct ibv_comp_channel *channel, struct ibv_cq **cq,\n-\t\t       void **cq_context)\n-{\n-\treturn ibv_get_cq_event(channel, cq, cq_context);\n-}\n-\n-static void\n-mlx5_glue_ack_cq_events(struct ibv_cq *cq, unsigned int nevents)\n-{\n-\tibv_ack_cq_events(cq, nevents);\n-}\n-\n-static struct ibv_rwq_ind_table *\n-mlx5_glue_create_rwq_ind_table(struct ibv_context *context,\n-\t\t\t       struct ibv_rwq_ind_table_init_attr *init_attr)\n-{\n-\treturn ibv_create_rwq_ind_table(context, init_attr);\n-}\n-\n-static int\n-mlx5_glue_destroy_rwq_ind_table(struct ibv_rwq_ind_table *rwq_ind_table)\n-{\n-\treturn ibv_destroy_rwq_ind_table(rwq_ind_table);\n-}\n-\n-static struct ibv_wq *\n-mlx5_glue_create_wq(struct ibv_context *context,\n-\t\t    struct ibv_wq_init_attr *wq_init_attr)\n-{\n-\treturn ibv_create_wq(context, wq_init_attr);\n-}\n-\n-static int\n-mlx5_glue_destroy_wq(struct ibv_wq *wq)\n-{\n-\treturn ibv_destroy_wq(wq);\n-}\n-static int\n-mlx5_glue_modify_wq(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr)\n-{\n-\treturn ibv_modify_wq(wq, wq_attr);\n-}\n-\n-static struct ibv_flow *\n-mlx5_glue_create_flow(struct ibv_qp *qp, struct ibv_flow_attr *flow)\n-{\n-\treturn ibv_create_flow(qp, flow);\n-}\n-\n-static int\n-mlx5_glue_destroy_flow(struct ibv_flow *flow_id)\n-{\n-\treturn ibv_destroy_flow(flow_id);\n-}\n-\n-static int\n-mlx5_glue_destroy_flow_action(void *action)\n-{\n-#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n-#ifdef HAVE_MLX5DV_DR\n-\treturn mlx5dv_dr_action_destroy(action);\n-#else\n-\tstruct mlx5dv_flow_action_attr *attr = action;\n-\tint res = 0;\n-\tswitch (attr->type) {\n-\tcase MLX5DV_FLOW_ACTION_TAG:\n-\t\tbreak;\n-\tdefault:\n-\t\tres = ibv_destroy_flow_action(attr->action);\n-\t\tbreak;\n-\t}\n-\tfree(action);\n-\treturn res;\n-#endif\n-#else\n-\t(void)action;\n-\treturn ENOTSUP;\n-#endif\n-}\n-\n-static struct ibv_qp *\n-mlx5_glue_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *qp_init_attr)\n-{\n-\treturn ibv_create_qp(pd, qp_init_attr);\n-}\n-\n-static struct ibv_qp *\n-mlx5_glue_create_qp_ex(struct ibv_context *context,\n-\t\t       struct ibv_qp_init_attr_ex *qp_init_attr_ex)\n-{\n-\treturn ibv_create_qp_ex(context, qp_init_attr_ex);\n-}\n-\n-static int\n-mlx5_glue_destroy_qp(struct ibv_qp *qp)\n-{\n-\treturn ibv_destroy_qp(qp);\n-}\n-\n-static int\n-mlx5_glue_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask)\n-{\n-\treturn ibv_modify_qp(qp, attr, attr_mask);\n-}\n-\n-static struct ibv_mr *\n-mlx5_glue_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access)\n-{\n-\treturn ibv_reg_mr(pd, addr, length, access);\n-}\n-\n-static int\n-mlx5_glue_dereg_mr(struct ibv_mr *mr)\n-{\n-\treturn ibv_dereg_mr(mr);\n-}\n-\n-static struct ibv_counter_set *\n-mlx5_glue_create_counter_set(struct ibv_context *context,\n-\t\t\t     struct ibv_counter_set_init_attr *init_attr)\n-{\n-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42\n-\t(void)context;\n-\t(void)init_attr;\n-\treturn NULL;\n-#else\n-\treturn ibv_create_counter_set(context, init_attr);\n-#endif\n-}\n-\n-static int\n-mlx5_glue_destroy_counter_set(struct ibv_counter_set *cs)\n-{\n-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42\n-\t(void)cs;\n-\treturn ENOTSUP;\n-#else\n-\treturn ibv_destroy_counter_set(cs);\n-#endif\n-}\n-\n-static int\n-mlx5_glue_describe_counter_set(struct ibv_context *context,\n-\t\t\t       uint16_t counter_set_id,\n-\t\t\t       struct ibv_counter_set_description *cs_desc)\n-{\n-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42\n-\t(void)context;\n-\t(void)counter_set_id;\n-\t(void)cs_desc;\n-\treturn ENOTSUP;\n-#else\n-\treturn ibv_describe_counter_set(context, counter_set_id, cs_desc);\n-#endif\n-}\n-\n-static int\n-mlx5_glue_query_counter_set(struct ibv_query_counter_set_attr *query_attr,\n-\t\t\t    struct ibv_counter_set_data *cs_data)\n-{\n-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42\n-\t(void)query_attr;\n-\t(void)cs_data;\n-\treturn ENOTSUP;\n-#else\n-\treturn ibv_query_counter_set(query_attr, cs_data);\n-#endif\n-}\n-\n-static struct ibv_counters *\n-mlx5_glue_create_counters(struct ibv_context *context,\n-\t\t\t  struct ibv_counters_init_attr *init_attr)\n-{\n-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45\n-\t(void)context;\n-\t(void)init_attr;\n-\terrno = ENOTSUP;\n-\treturn NULL;\n-#else\n-\treturn ibv_create_counters(context, init_attr);\n-#endif\n-}\n-\n-static int\n-mlx5_glue_destroy_counters(struct ibv_counters *counters)\n-{\n-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45\n-\t(void)counters;\n-\treturn ENOTSUP;\n-#else\n-\treturn ibv_destroy_counters(counters);\n-#endif\n-}\n-\n-static int\n-mlx5_glue_attach_counters(struct ibv_counters *counters,\n-\t\t\t  struct ibv_counter_attach_attr *attr,\n-\t\t\t  struct ibv_flow *flow)\n-{\n-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45\n-\t(void)counters;\n-\t(void)attr;\n-\t(void)flow;\n-\treturn ENOTSUP;\n-#else\n-\treturn ibv_attach_counters_point_flow(counters, attr, flow);\n-#endif\n-}\n-\n-static int\n-mlx5_glue_query_counters(struct ibv_counters *counters,\n-\t\t\t uint64_t *counters_value,\n-\t\t\t uint32_t ncounters,\n-\t\t\t uint32_t flags)\n-{\n-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45\n-\t(void)counters;\n-\t(void)counters_value;\n-\t(void)ncounters;\n-\t(void)flags;\n-\treturn ENOTSUP;\n-#else\n-\treturn ibv_read_counters(counters, counters_value, ncounters, flags);\n-#endif\n-}\n-\n-static void\n-mlx5_glue_ack_async_event(struct ibv_async_event *event)\n-{\n-\tibv_ack_async_event(event);\n-}\n-\n-static int\n-mlx5_glue_get_async_event(struct ibv_context *context,\n-\t\t\t  struct ibv_async_event *event)\n-{\n-\treturn ibv_get_async_event(context, event);\n-}\n-\n-static const char *\n-mlx5_glue_port_state_str(enum ibv_port_state port_state)\n-{\n-\treturn ibv_port_state_str(port_state);\n-}\n-\n-static struct ibv_cq *\n-mlx5_glue_cq_ex_to_cq(struct ibv_cq_ex *cq)\n-{\n-\treturn ibv_cq_ex_to_cq(cq);\n-}\n-\n-static void *\n-mlx5_glue_dr_create_flow_action_dest_flow_tbl(void *tbl)\n-{\n-#ifdef HAVE_MLX5DV_DR\n-\treturn mlx5dv_dr_action_create_dest_table(tbl);\n-#else\n-\t(void)tbl;\n-\terrno = ENOTSUP;\n-\treturn NULL;\n-#endif\n-}\n-\n-static void *\n-mlx5_glue_dr_create_flow_action_dest_port(void *domain, uint32_t port)\n-{\n-#ifdef HAVE_MLX5DV_DR_DEVX_PORT\n-\treturn mlx5dv_dr_action_create_dest_ib_port(domain, port);\n-#else\n-#ifdef HAVE_MLX5DV_DR_ESWITCH\n-\treturn mlx5dv_dr_action_create_dest_vport(domain, port);\n-#else\n-\t(void)domain;\n-\t(void)port;\n-\terrno = ENOTSUP;\n-\treturn NULL;\n-#endif\n-#endif\n-}\n-\n-static void *\n-mlx5_glue_dr_create_flow_action_drop(void)\n-{\n-#ifdef HAVE_MLX5DV_DR_ESWITCH\n-\treturn mlx5dv_dr_action_create_drop();\n-#else\n-\terrno = ENOTSUP;\n-\treturn NULL;\n-#endif\n-}\n-\n-static void *\n-mlx5_glue_dr_create_flow_action_push_vlan(struct mlx5dv_dr_domain *domain,\n-\t\t\t\t\t  rte_be32_t vlan_tag)\n-{\n-#ifdef HAVE_MLX5DV_DR_VLAN\n-\treturn mlx5dv_dr_action_create_push_vlan(domain, vlan_tag);\n-#else\n-\t(void)domain;\n-\t(void)vlan_tag;\n-\terrno = ENOTSUP;\n-\treturn NULL;\n-#endif\n-}\n-\n-static void *\n-mlx5_glue_dr_create_flow_action_pop_vlan(void)\n-{\n-#ifdef HAVE_MLX5DV_DR_VLAN\n-\treturn mlx5dv_dr_action_create_pop_vlan();\n-#else\n-\terrno = ENOTSUP;\n-\treturn NULL;\n-#endif\n-}\n-\n-static void *\n-mlx5_glue_dr_create_flow_tbl(void *domain, uint32_t level)\n-{\n-#ifdef HAVE_MLX5DV_DR\n-\treturn mlx5dv_dr_table_create(domain, level);\n-#else\n-\t(void)domain;\n-\t(void)level;\n-\terrno = ENOTSUP;\n-\treturn NULL;\n-#endif\n-}\n-\n-static int\n-mlx5_glue_dr_destroy_flow_tbl(void *tbl)\n-{\n-#ifdef HAVE_MLX5DV_DR\n-\treturn mlx5dv_dr_table_destroy(tbl);\n-#else\n-\t(void)tbl;\n-\terrno = ENOTSUP;\n-\treturn errno;\n-#endif\n-}\n-\n-static void *\n-mlx5_glue_dr_create_domain(struct ibv_context *ctx,\n-\t\t\t   enum  mlx5dv_dr_domain_type domain)\n-{\n-#ifdef HAVE_MLX5DV_DR\n-\treturn mlx5dv_dr_domain_create(ctx, domain);\n-#else\n-\t(void)ctx;\n-\t(void)domain;\n-\terrno = ENOTSUP;\n-\treturn NULL;\n-#endif\n-}\n-\n-static int\n-mlx5_glue_dr_destroy_domain(void *domain)\n-{\n-#ifdef HAVE_MLX5DV_DR\n-\treturn mlx5dv_dr_domain_destroy(domain);\n-#else\n-\t(void)domain;\n-\terrno = ENOTSUP;\n-\treturn errno;\n-#endif\n-}\n-\n-static struct ibv_cq_ex *\n-mlx5_glue_dv_create_cq(struct ibv_context *context,\n-\t\t       struct ibv_cq_init_attr_ex *cq_attr,\n-\t\t       struct mlx5dv_cq_init_attr *mlx5_cq_attr)\n-{\n-\treturn mlx5dv_create_cq(context, cq_attr, mlx5_cq_attr);\n-}\n-\n-static struct ibv_wq *\n-mlx5_glue_dv_create_wq(struct ibv_context *context,\n-\t\t       struct ibv_wq_init_attr *wq_attr,\n-\t\t       struct mlx5dv_wq_init_attr *mlx5_wq_attr)\n-{\n-#ifndef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT\n-\t(void)context;\n-\t(void)wq_attr;\n-\t(void)mlx5_wq_attr;\n-\terrno = ENOTSUP;\n-\treturn NULL;\n-#else\n-\treturn mlx5dv_create_wq(context, wq_attr, mlx5_wq_attr);\n-#endif\n-}\n-\n-static int\n-mlx5_glue_dv_query_device(struct ibv_context *ctx,\n-\t\t\t  struct mlx5dv_context *attrs_out)\n-{\n-\treturn mlx5dv_query_device(ctx, attrs_out);\n-}\n-\n-static int\n-mlx5_glue_dv_set_context_attr(struct ibv_context *ibv_ctx,\n-\t\t\t      enum mlx5dv_set_ctx_attr_type type, void *attr)\n-{\n-\treturn mlx5dv_set_context_attr(ibv_ctx, type, attr);\n-}\n-\n-static int\n-mlx5_glue_dv_init_obj(struct mlx5dv_obj *obj, uint64_t obj_type)\n-{\n-\treturn mlx5dv_init_obj(obj, obj_type);\n-}\n-\n-static struct ibv_qp *\n-mlx5_glue_dv_create_qp(struct ibv_context *context,\n-\t\t       struct ibv_qp_init_attr_ex *qp_init_attr_ex,\n-\t\t       struct mlx5dv_qp_init_attr *dv_qp_init_attr)\n-{\n-#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT\n-\treturn mlx5dv_create_qp(context, qp_init_attr_ex, dv_qp_init_attr);\n-#else\n-\t(void)context;\n-\t(void)qp_init_attr_ex;\n-\t(void)dv_qp_init_attr;\n-\terrno = ENOTSUP;\n-\treturn NULL;\n-#endif\n-}\n-\n-static void *\n-mlx5_glue_dv_create_flow_matcher(struct ibv_context *context,\n-\t\t\t\t struct mlx5dv_flow_matcher_attr *matcher_attr,\n-\t\t\t\t void *tbl)\n-{\n-#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n-#ifdef HAVE_MLX5DV_DR\n-\t(void)context;\n-\treturn mlx5dv_dr_matcher_create(tbl, matcher_attr->priority,\n-\t\t\t\t\tmatcher_attr->match_criteria_enable,\n-\t\t\t\t\tmatcher_attr->match_mask);\n-#else\n-\t(void)tbl;\n-\treturn mlx5dv_create_flow_matcher(context, matcher_attr);\n-#endif\n-#else\n-\t(void)context;\n-\t(void)matcher_attr;\n-\t(void)tbl;\n-\terrno = ENOTSUP;\n-\treturn NULL;\n-#endif\n-}\n-\n-static void *\n-mlx5_glue_dv_create_flow(void *matcher,\n-\t\t\t void *match_value,\n-\t\t\t size_t num_actions,\n-\t\t\t void *actions[])\n-{\n-#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n-#ifdef HAVE_MLX5DV_DR\n-\treturn mlx5dv_dr_rule_create(matcher, match_value, num_actions,\n-\t\t\t\t     (struct mlx5dv_dr_action **)actions);\n-#else\n-\tstruct mlx5dv_flow_action_attr actions_attr[8];\n-\n-\tif (num_actions > 8)\n-\t\treturn NULL;\n-\tfor (size_t i = 0; i < num_actions; i++)\n-\t\tactions_attr[i] =\n-\t\t\t*((struct mlx5dv_flow_action_attr *)(actions[i]));\n-\treturn mlx5dv_create_flow(matcher, match_value,\n-\t\t\t\t  num_actions, actions_attr);\n-#endif\n-#else\n-\t(void)matcher;\n-\t(void)match_value;\n-\t(void)num_actions;\n-\t(void)actions;\n-\treturn NULL;\n-#endif\n-}\n-\n-static void *\n-mlx5_glue_dv_create_flow_action_counter(void *counter_obj, uint32_t offset)\n-{\n-#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n-#ifdef HAVE_MLX5DV_DR\n-\treturn mlx5dv_dr_action_create_flow_counter(counter_obj, offset);\n-#else\n-\tstruct mlx5dv_flow_action_attr *action;\n-\n-\t(void)offset;\n-\taction = malloc(sizeof(*action));\n-\tif (!action)\n-\t\treturn NULL;\n-\taction->type = MLX5DV_FLOW_ACTION_COUNTERS_DEVX;\n-\taction->obj = counter_obj;\n-\treturn action;\n-#endif\n-#else\n-\t(void)counter_obj;\n-\t(void)offset;\n-\terrno = ENOTSUP;\n-\treturn NULL;\n-#endif\n-}\n-\n-static void *\n-mlx5_glue_dv_create_flow_action_dest_ibv_qp(void *qp)\n-{\n-#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n-#ifdef HAVE_MLX5DV_DR\n-\treturn mlx5dv_dr_action_create_dest_ibv_qp(qp);\n-#else\n-\tstruct mlx5dv_flow_action_attr *action;\n-\n-\taction = malloc(sizeof(*action));\n-\tif (!action)\n-\t\treturn NULL;\n-\taction->type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;\n-\taction->obj = qp;\n-\treturn action;\n-#endif\n-#else\n-\t(void)qp;\n-\terrno = ENOTSUP;\n-\treturn NULL;\n-#endif\n-}\n-\n-static void *\n-mlx5_glue_dv_create_flow_action_dest_devx_tir(void *tir)\n-{\n-#ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR\n-\treturn mlx5dv_dr_action_create_dest_devx_tir(tir);\n-#else\n-\t(void)tir;\n-\terrno = ENOTSUP;\n-\treturn NULL;\n-#endif\n-}\n-\n-static void *\n-mlx5_glue_dv_create_flow_action_modify_header\n-\t\t\t\t\t(struct ibv_context *ctx,\n-\t\t\t\t\t enum mlx5dv_flow_table_type ft_type,\n-\t\t\t\t\t void *domain, uint64_t flags,\n-\t\t\t\t\t size_t actions_sz,\n-\t\t\t\t\t uint64_t actions[])\n-{\n-#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n-#ifdef HAVE_MLX5DV_DR\n-\t(void)ctx;\n-\t(void)ft_type;\n-\treturn mlx5dv_dr_action_create_modify_header(domain, flags, actions_sz,\n-\t\t\t\t\t\t     (__be64 *)actions);\n-#else\n-\tstruct mlx5dv_flow_action_attr *action;\n-\n-\t(void)domain;\n-\t(void)flags;\n-\taction = malloc(sizeof(*action));\n-\tif (!action)\n-\t\treturn NULL;\n-\taction->type = MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;\n-\taction->action = mlx5dv_create_flow_action_modify_header\n-\t\t(ctx, actions_sz, actions, ft_type);\n-\treturn action;\n-#endif\n-#else\n-\t(void)ctx;\n-\t(void)ft_type;\n-\t(void)domain;\n-\t(void)flags;\n-\t(void)actions_sz;\n-\t(void)actions;\n-\terrno = ENOTSUP;\n-\treturn NULL;\n-#endif\n-}\n-\n-static void *\n-mlx5_glue_dv_create_flow_action_packet_reformat\n-\t\t(struct ibv_context *ctx,\n-\t\t enum mlx5dv_flow_action_packet_reformat_type reformat_type,\n-\t\t enum mlx5dv_flow_table_type ft_type,\n-\t\t struct mlx5dv_dr_domain *domain,\n-\t\t uint32_t flags, size_t data_sz, void *data)\n-{\n-#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n-#ifdef HAVE_MLX5DV_DR\n-\t(void)ctx;\n-\t(void)ft_type;\n-\treturn mlx5dv_dr_action_create_packet_reformat(domain, flags,\n-\t\t\t\t\t\t       reformat_type, data_sz,\n-\t\t\t\t\t\t       data);\n-#else\n-\t(void)domain;\n-\t(void)flags;\n-\tstruct mlx5dv_flow_action_attr *action;\n-\n-\taction = malloc(sizeof(*action));\n-\tif (!action)\n-\t\treturn NULL;\n-\taction->type = MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;\n-\taction->action = mlx5dv_create_flow_action_packet_reformat\n-\t\t(ctx, data_sz, data, reformat_type, ft_type);\n-\treturn action;\n-#endif\n-#else\n-\t(void)ctx;\n-\t(void)reformat_type;\n-\t(void)ft_type;\n-\t(void)domain;\n-\t(void)flags;\n-\t(void)data_sz;\n-\t(void)data;\n-\terrno = ENOTSUP;\n-\treturn NULL;\n-#endif\n-}\n-\n-static void *\n-mlx5_glue_dv_create_flow_action_tag(uint32_t tag)\n-{\n-#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n-#ifdef HAVE_MLX5DV_DR\n-\treturn mlx5dv_dr_action_create_tag(tag);\n-#else\n-\tstruct mlx5dv_flow_action_attr *action;\n-\taction = malloc(sizeof(*action));\n-\tif (!action)\n-\t\treturn NULL;\n-\taction->type = MLX5DV_FLOW_ACTION_TAG;\n-\taction->tag_value = tag;\n-\treturn action;\n-#endif\n-#endif\n-\t(void)tag;\n-\terrno = ENOTSUP;\n-\treturn NULL;\n-}\n-\n-static void *\n-mlx5_glue_dv_create_flow_action_meter(struct mlx5dv_dr_flow_meter_attr *attr)\n-{\n-#if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER)\n-\treturn mlx5dv_dr_action_create_flow_meter(attr);\n-#else\n-\t(void)attr;\n-\terrno = ENOTSUP;\n-\treturn NULL;\n-#endif\n-}\n-\n-static int\n-mlx5_glue_dv_modify_flow_action_meter(void *action,\n-\t\t\t\t      struct mlx5dv_dr_flow_meter_attr *attr,\n-\t\t\t\t      uint64_t modify_bits)\n-{\n-#if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER)\n-\treturn mlx5dv_dr_action_modify_flow_meter(action, attr, modify_bits);\n-#else\n-\t(void)action;\n-\t(void)attr;\n-\t(void)modify_bits;\n-\terrno = ENOTSUP;\n-\treturn errno;\n-#endif\n-}\n-\n-static int\n-mlx5_glue_dv_destroy_flow(void *flow_id)\n-{\n-#ifdef HAVE_MLX5DV_DR\n-\treturn mlx5dv_dr_rule_destroy(flow_id);\n-#else\n-\treturn ibv_destroy_flow(flow_id);\n-#endif\n-}\n-\n-static int\n-mlx5_glue_dv_destroy_flow_matcher(void *matcher)\n-{\n-#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n-#ifdef HAVE_MLX5DV_DR\n-\treturn mlx5dv_dr_matcher_destroy(matcher);\n-#else\n-\treturn mlx5dv_destroy_flow_matcher(matcher);\n-#endif\n-#else\n-\t(void)matcher;\n-\terrno = ENOTSUP;\n-\treturn errno;\n-#endif\n-}\n-\n-static struct ibv_context *\n-mlx5_glue_dv_open_device(struct ibv_device *device)\n-{\n-#ifdef HAVE_IBV_DEVX_OBJ\n-\treturn mlx5dv_open_device(device,\n-\t\t\t\t  &(struct mlx5dv_context_attr){\n-\t\t\t\t\t.flags = MLX5DV_CONTEXT_FLAGS_DEVX,\n-\t\t\t\t  });\n-#else\n-\t(void)device;\n-\terrno = ENOTSUP;\n-\treturn NULL;\n-#endif\n-}\n-\n-static struct mlx5dv_devx_obj *\n-mlx5_glue_devx_obj_create(struct ibv_context *ctx,\n-\t\t\t  const void *in, size_t inlen,\n-\t\t\t  void *out, size_t outlen)\n-{\n-#ifdef HAVE_IBV_DEVX_OBJ\n-\treturn mlx5dv_devx_obj_create(ctx, in, inlen, out, outlen);\n-#else\n-\t(void)ctx;\n-\t(void)in;\n-\t(void)inlen;\n-\t(void)out;\n-\t(void)outlen;\n-\terrno = ENOTSUP;\n-\treturn NULL;\n-#endif\n-}\n-\n-static int\n-mlx5_glue_devx_obj_destroy(struct mlx5dv_devx_obj *obj)\n-{\n-#ifdef HAVE_IBV_DEVX_OBJ\n-\treturn mlx5dv_devx_obj_destroy(obj);\n-#else\n-\t(void)obj;\n-\treturn -ENOTSUP;\n-#endif\n-}\n-\n-static int\n-mlx5_glue_devx_obj_query(struct mlx5dv_devx_obj *obj,\n-\t\t\t const void *in, size_t inlen,\n-\t\t\t void *out, size_t outlen)\n-{\n-#ifdef HAVE_IBV_DEVX_OBJ\n-\treturn mlx5dv_devx_obj_query(obj, in, inlen, out, outlen);\n-#else\n-\t(void)obj;\n-\t(void)in;\n-\t(void)inlen;\n-\t(void)out;\n-\t(void)outlen;\n-\treturn -ENOTSUP;\n-#endif\n-}\n-\n-static int\n-mlx5_glue_devx_obj_modify(struct mlx5dv_devx_obj *obj,\n-\t\t\t  const void *in, size_t inlen,\n-\t\t\t  void *out, size_t outlen)\n-{\n-#ifdef HAVE_IBV_DEVX_OBJ\n-\treturn mlx5dv_devx_obj_modify(obj, in, inlen, out, outlen);\n-#else\n-\t(void)obj;\n-\t(void)in;\n-\t(void)inlen;\n-\t(void)out;\n-\t(void)outlen;\n-\treturn -ENOTSUP;\n-#endif\n-}\n-\n-static int\n-mlx5_glue_devx_general_cmd(struct ibv_context *ctx,\n-\t\t\t   const void *in, size_t inlen,\n-\t\t\t   void *out, size_t outlen)\n-{\n-#ifdef HAVE_IBV_DEVX_OBJ\n-\treturn mlx5dv_devx_general_cmd(ctx, in, inlen, out, outlen);\n-#else\n-\t(void)ctx;\n-\t(void)in;\n-\t(void)inlen;\n-\t(void)out;\n-\t(void)outlen;\n-\treturn -ENOTSUP;\n-#endif\n-}\n-\n-static struct mlx5dv_devx_cmd_comp *\n-mlx5_glue_devx_create_cmd_comp(struct ibv_context *ctx)\n-{\n-#ifdef HAVE_IBV_DEVX_ASYNC\n-\treturn mlx5dv_devx_create_cmd_comp(ctx);\n-#else\n-\t(void)ctx;\n-\terrno = -ENOTSUP;\n-\treturn NULL;\n-#endif\n-}\n-\n-static void\n-mlx5_glue_devx_destroy_cmd_comp(struct mlx5dv_devx_cmd_comp *cmd_comp)\n-{\n-#ifdef HAVE_IBV_DEVX_ASYNC\n-\tmlx5dv_devx_destroy_cmd_comp(cmd_comp);\n-#else\n-\t(void)cmd_comp;\n-\terrno = -ENOTSUP;\n-#endif\n-}\n-\n-static int\n-mlx5_glue_devx_obj_query_async(struct mlx5dv_devx_obj *obj, const void *in,\n-\t\t\t       size_t inlen, size_t outlen, uint64_t wr_id,\n-\t\t\t       struct mlx5dv_devx_cmd_comp *cmd_comp)\n-{\n-#ifdef HAVE_IBV_DEVX_ASYNC\n-\treturn mlx5dv_devx_obj_query_async(obj, in, inlen, outlen, wr_id,\n-\t\t\t\t\t   cmd_comp);\n-#else\n-\t(void)obj;\n-\t(void)in;\n-\t(void)inlen;\n-\t(void)outlen;\n-\t(void)wr_id;\n-\t(void)cmd_comp;\n-\treturn -ENOTSUP;\n-#endif\n-}\n-\n-static int\n-mlx5_glue_devx_get_async_cmd_comp(struct mlx5dv_devx_cmd_comp *cmd_comp,\n-\t\t\t\t  struct mlx5dv_devx_async_cmd_hdr *cmd_resp,\n-\t\t\t\t  size_t cmd_resp_len)\n-{\n-#ifdef HAVE_IBV_DEVX_ASYNC\n-\treturn mlx5dv_devx_get_async_cmd_comp(cmd_comp, cmd_resp,\n-\t\t\t\t\t      cmd_resp_len);\n-#else\n-\t(void)cmd_comp;\n-\t(void)cmd_resp;\n-\t(void)cmd_resp_len;\n-\treturn -ENOTSUP;\n-#endif\n-}\n-\n-static struct mlx5dv_devx_umem *\n-mlx5_glue_devx_umem_reg(struct ibv_context *context, void *addr, size_t size,\n-\t\t\tuint32_t access)\n-{\n-#ifdef HAVE_IBV_DEVX_OBJ\n-\treturn mlx5dv_devx_umem_reg(context, addr, size, access);\n-#else\n-\t(void)context;\n-\t(void)addr;\n-\t(void)size;\n-\t(void)access;\n-\terrno = -ENOTSUP;\n-\treturn NULL;\n-#endif\n-}\n-\n-static int\n-mlx5_glue_devx_umem_dereg(struct mlx5dv_devx_umem *dv_devx_umem)\n-{\n-#ifdef HAVE_IBV_DEVX_OBJ\n-\treturn mlx5dv_devx_umem_dereg(dv_devx_umem);\n-#else\n-\t(void)dv_devx_umem;\n-\treturn -ENOTSUP;\n-#endif\n-}\n-\n-static int\n-mlx5_glue_devx_qp_query(struct ibv_qp *qp,\n-\t\t\tconst void *in, size_t inlen,\n-\t\t\tvoid *out, size_t outlen)\n-{\n-#ifdef HAVE_IBV_DEVX_OBJ\n-\treturn mlx5dv_devx_qp_query(qp, in, inlen, out, outlen);\n-#else\n-\t(void)qp;\n-\t(void)in;\n-\t(void)inlen;\n-\t(void)out;\n-\t(void)outlen;\n-\terrno = ENOTSUP;\n-\treturn errno;\n-#endif\n-}\n-\n-static int\n-mlx5_glue_devx_port_query(struct ibv_context *ctx,\n-\t\t\t  uint32_t port_num,\n-\t\t\t  struct mlx5dv_devx_port *mlx5_devx_port)\n-{\n-#ifdef HAVE_MLX5DV_DR_DEVX_PORT\n-\treturn mlx5dv_query_devx_port(ctx, port_num, mlx5_devx_port);\n-#else\n-\t(void)ctx;\n-\t(void)port_num;\n-\t(void)mlx5_devx_port;\n-\terrno = ENOTSUP;\n-\treturn errno;\n-#endif\n-}\n-\n-static int\n-mlx5_glue_dr_dump_domain(FILE *file, void *domain)\n-{\n-#ifdef HAVE_MLX5_DR_FLOW_DUMP\n-\treturn mlx5dv_dump_dr_domain(file, domain);\n-#else\n-\tRTE_SET_USED(file);\n-\tRTE_SET_USED(domain);\n-\treturn -ENOTSUP;\n-#endif\n-}\n-\n-alignas(RTE_CACHE_LINE_SIZE)\n-const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue){\n-\t.version = MLX5_GLUE_VERSION,\n-\t.fork_init = mlx5_glue_fork_init,\n-\t.alloc_pd = mlx5_glue_alloc_pd,\n-\t.dealloc_pd = mlx5_glue_dealloc_pd,\n-\t.get_device_list = mlx5_glue_get_device_list,\n-\t.free_device_list = mlx5_glue_free_device_list,\n-\t.open_device = mlx5_glue_open_device,\n-\t.close_device = mlx5_glue_close_device,\n-\t.query_device = mlx5_glue_query_device,\n-\t.query_device_ex = mlx5_glue_query_device_ex,\n-\t.query_rt_values_ex = mlx5_glue_query_rt_values_ex,\n-\t.query_port = mlx5_glue_query_port,\n-\t.create_comp_channel = mlx5_glue_create_comp_channel,\n-\t.destroy_comp_channel = mlx5_glue_destroy_comp_channel,\n-\t.create_cq = mlx5_glue_create_cq,\n-\t.destroy_cq = mlx5_glue_destroy_cq,\n-\t.get_cq_event = mlx5_glue_get_cq_event,\n-\t.ack_cq_events = mlx5_glue_ack_cq_events,\n-\t.create_rwq_ind_table = mlx5_glue_create_rwq_ind_table,\n-\t.destroy_rwq_ind_table = mlx5_glue_destroy_rwq_ind_table,\n-\t.create_wq = mlx5_glue_create_wq,\n-\t.destroy_wq = mlx5_glue_destroy_wq,\n-\t.modify_wq = mlx5_glue_modify_wq,\n-\t.create_flow = mlx5_glue_create_flow,\n-\t.destroy_flow = mlx5_glue_destroy_flow,\n-\t.destroy_flow_action = mlx5_glue_destroy_flow_action,\n-\t.create_qp = mlx5_glue_create_qp,\n-\t.create_qp_ex = mlx5_glue_create_qp_ex,\n-\t.destroy_qp = mlx5_glue_destroy_qp,\n-\t.modify_qp = mlx5_glue_modify_qp,\n-\t.reg_mr = mlx5_glue_reg_mr,\n-\t.dereg_mr = mlx5_glue_dereg_mr,\n-\t.create_counter_set = mlx5_glue_create_counter_set,\n-\t.destroy_counter_set = mlx5_glue_destroy_counter_set,\n-\t.describe_counter_set = mlx5_glue_describe_counter_set,\n-\t.query_counter_set = mlx5_glue_query_counter_set,\n-\t.create_counters = mlx5_glue_create_counters,\n-\t.destroy_counters = mlx5_glue_destroy_counters,\n-\t.attach_counters = mlx5_glue_attach_counters,\n-\t.query_counters = mlx5_glue_query_counters,\n-\t.ack_async_event = mlx5_glue_ack_async_event,\n-\t.get_async_event = mlx5_glue_get_async_event,\n-\t.port_state_str = mlx5_glue_port_state_str,\n-\t.cq_ex_to_cq = mlx5_glue_cq_ex_to_cq,\n-\t.dr_create_flow_action_dest_flow_tbl =\n-\t\tmlx5_glue_dr_create_flow_action_dest_flow_tbl,\n-\t.dr_create_flow_action_dest_port =\n-\t\tmlx5_glue_dr_create_flow_action_dest_port,\n-\t.dr_create_flow_action_drop =\n-\t\tmlx5_glue_dr_create_flow_action_drop,\n-\t.dr_create_flow_action_push_vlan =\n-\t\tmlx5_glue_dr_create_flow_action_push_vlan,\n-\t.dr_create_flow_action_pop_vlan =\n-\t\tmlx5_glue_dr_create_flow_action_pop_vlan,\n-\t.dr_create_flow_tbl = mlx5_glue_dr_create_flow_tbl,\n-\t.dr_destroy_flow_tbl = mlx5_glue_dr_destroy_flow_tbl,\n-\t.dr_create_domain = mlx5_glue_dr_create_domain,\n-\t.dr_destroy_domain = mlx5_glue_dr_destroy_domain,\n-\t.dv_create_cq = mlx5_glue_dv_create_cq,\n-\t.dv_create_wq = mlx5_glue_dv_create_wq,\n-\t.dv_query_device = mlx5_glue_dv_query_device,\n-\t.dv_set_context_attr = mlx5_glue_dv_set_context_attr,\n-\t.dv_init_obj = mlx5_glue_dv_init_obj,\n-\t.dv_create_qp = mlx5_glue_dv_create_qp,\n-\t.dv_create_flow_matcher = mlx5_glue_dv_create_flow_matcher,\n-\t.dv_create_flow = mlx5_glue_dv_create_flow,\n-\t.dv_create_flow_action_counter =\n-\t\tmlx5_glue_dv_create_flow_action_counter,\n-\t.dv_create_flow_action_dest_ibv_qp =\n-\t\tmlx5_glue_dv_create_flow_action_dest_ibv_qp,\n-\t.dv_create_flow_action_dest_devx_tir =\n-\t\tmlx5_glue_dv_create_flow_action_dest_devx_tir,\n-\t.dv_create_flow_action_modify_header =\n-\t\tmlx5_glue_dv_create_flow_action_modify_header,\n-\t.dv_create_flow_action_packet_reformat =\n-\t\tmlx5_glue_dv_create_flow_action_packet_reformat,\n-\t.dv_create_flow_action_tag =  mlx5_glue_dv_create_flow_action_tag,\n-\t.dv_create_flow_action_meter = mlx5_glue_dv_create_flow_action_meter,\n-\t.dv_modify_flow_action_meter = mlx5_glue_dv_modify_flow_action_meter,\n-\t.dv_destroy_flow = mlx5_glue_dv_destroy_flow,\n-\t.dv_destroy_flow_matcher = mlx5_glue_dv_destroy_flow_matcher,\n-\t.dv_open_device = mlx5_glue_dv_open_device,\n-\t.devx_obj_create = mlx5_glue_devx_obj_create,\n-\t.devx_obj_destroy = mlx5_glue_devx_obj_destroy,\n-\t.devx_obj_query = mlx5_glue_devx_obj_query,\n-\t.devx_obj_modify = mlx5_glue_devx_obj_modify,\n-\t.devx_general_cmd = mlx5_glue_devx_general_cmd,\n-\t.devx_create_cmd_comp = mlx5_glue_devx_create_cmd_comp,\n-\t.devx_destroy_cmd_comp = mlx5_glue_devx_destroy_cmd_comp,\n-\t.devx_obj_query_async = mlx5_glue_devx_obj_query_async,\n-\t.devx_get_async_cmd_comp = mlx5_glue_devx_get_async_cmd_comp,\n-\t.devx_umem_reg = mlx5_glue_devx_umem_reg,\n-\t.devx_umem_dereg = mlx5_glue_devx_umem_dereg,\n-\t.devx_qp_query = mlx5_glue_devx_qp_query,\n-\t.devx_port_query = mlx5_glue_devx_port_query,\n-\t.dr_dump_domain = mlx5_glue_dr_dump_domain,\n-};\ndiff --git a/drivers/net/mlx5/mlx5_glue.h b/drivers/net/mlx5/mlx5_glue.h\ndeleted file mode 100644\nindex 6771a18..0000000\n--- a/drivers/net/mlx5/mlx5_glue.h\n+++ /dev/null\n@@ -1,264 +0,0 @@\n-/* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright 2018 6WIND S.A.\n- * Copyright 2018 Mellanox Technologies, Ltd\n- */\n-\n-#ifndef MLX5_GLUE_H_\n-#define MLX5_GLUE_H_\n-\n-#include <stddef.h>\n-#include <stdint.h>\n-\n-#include \"rte_byteorder.h\"\n-\n-/* Verbs headers do not support -pedantic. */\n-#ifdef PEDANTIC\n-#pragma GCC diagnostic ignored \"-Wpedantic\"\n-#endif\n-#include <infiniband/mlx5dv.h>\n-#include <infiniband/verbs.h>\n-#ifdef PEDANTIC\n-#pragma GCC diagnostic error \"-Wpedantic\"\n-#endif\n-\n-#ifndef MLX5_GLUE_VERSION\n-#define MLX5_GLUE_VERSION \"\"\n-#endif\n-\n-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42\n-struct ibv_counter_set;\n-struct ibv_counter_set_data;\n-struct ibv_counter_set_description;\n-struct ibv_counter_set_init_attr;\n-struct ibv_query_counter_set_attr;\n-#endif\n-\n-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45\n-struct ibv_counters;\n-struct ibv_counters_init_attr;\n-struct ibv_counter_attach_attr;\n-#endif\n-\n-#ifndef HAVE_IBV_DEVICE_TUNNEL_SUPPORT\n-struct mlx5dv_qp_init_attr;\n-#endif\n-\n-#ifndef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT\n-struct mlx5dv_wq_init_attr;\n-#endif\n-\n-#ifndef HAVE_IBV_FLOW_DV_SUPPORT\n-struct mlx5dv_flow_matcher;\n-struct mlx5dv_flow_matcher_attr;\n-struct mlx5dv_flow_action_attr;\n-struct mlx5dv_flow_match_parameters;\n-struct mlx5dv_dr_flow_meter_attr;\n-struct ibv_flow_action;\n-enum mlx5dv_flow_action_packet_reformat_type { packet_reformat_type = 0, };\n-enum mlx5dv_flow_table_type { flow_table_type = 0, };\n-#endif\n-\n-#ifndef HAVE_IBV_FLOW_DEVX_COUNTERS\n-#define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0\n-#endif\n-\n-#ifndef HAVE_IBV_DEVX_OBJ\n-struct mlx5dv_devx_obj;\n-struct mlx5dv_devx_umem { uint32_t umem_id; };\n-#endif\n-\n-#ifndef HAVE_IBV_DEVX_ASYNC\n-struct mlx5dv_devx_cmd_comp;\n-struct mlx5dv_devx_async_cmd_hdr;\n-#endif\n-\n-#ifndef HAVE_MLX5DV_DR\n-enum  mlx5dv_dr_domain_type { unused, };\n-struct mlx5dv_dr_domain;\n-#endif\n-\n-#ifndef HAVE_MLX5DV_DR_DEVX_PORT\n-struct mlx5dv_devx_port;\n-#endif\n-\n-#ifndef HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER\n-struct mlx5dv_dr_flow_meter_attr;\n-#endif\n-\n-/* LIB_GLUE_VERSION must be updated every time this structure is modified. */\n-struct mlx5_glue {\n-\tconst char *version;\n-\tint (*fork_init)(void);\n-\tstruct ibv_pd *(*alloc_pd)(struct ibv_context *context);\n-\tint (*dealloc_pd)(struct ibv_pd *pd);\n-\tstruct ibv_device **(*get_device_list)(int *num_devices);\n-\tvoid (*free_device_list)(struct ibv_device **list);\n-\tstruct ibv_context *(*open_device)(struct ibv_device *device);\n-\tint (*close_device)(struct ibv_context *context);\n-\tint (*query_device)(struct ibv_context *context,\n-\t\t\t    struct ibv_device_attr *device_attr);\n-\tint (*query_device_ex)(struct ibv_context *context,\n-\t\t\t       const struct ibv_query_device_ex_input *input,\n-\t\t\t       struct ibv_device_attr_ex *attr);\n-\tint (*query_rt_values_ex)(struct ibv_context *context,\n-\t\t\t       struct ibv_values_ex *values);\n-\tint (*query_port)(struct ibv_context *context, uint8_t port_num,\n-\t\t\t  struct ibv_port_attr *port_attr);\n-\tstruct ibv_comp_channel *(*create_comp_channel)\n-\t\t(struct ibv_context *context);\n-\tint (*destroy_comp_channel)(struct ibv_comp_channel *channel);\n-\tstruct ibv_cq *(*create_cq)(struct ibv_context *context, int cqe,\n-\t\t\t\t    void *cq_context,\n-\t\t\t\t    struct ibv_comp_channel *channel,\n-\t\t\t\t    int comp_vector);\n-\tint (*destroy_cq)(struct ibv_cq *cq);\n-\tint (*get_cq_event)(struct ibv_comp_channel *channel,\n-\t\t\t    struct ibv_cq **cq, void **cq_context);\n-\tvoid (*ack_cq_events)(struct ibv_cq *cq, unsigned int nevents);\n-\tstruct ibv_rwq_ind_table *(*create_rwq_ind_table)\n-\t\t(struct ibv_context *context,\n-\t\t struct ibv_rwq_ind_table_init_attr *init_attr);\n-\tint (*destroy_rwq_ind_table)(struct ibv_rwq_ind_table *rwq_ind_table);\n-\tstruct ibv_wq *(*create_wq)(struct ibv_context *context,\n-\t\t\t\t    struct ibv_wq_init_attr *wq_init_attr);\n-\tint (*destroy_wq)(struct ibv_wq *wq);\n-\tint (*modify_wq)(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr);\n-\tstruct ibv_flow *(*create_flow)(struct ibv_qp *qp,\n-\t\t\t\t\tstruct ibv_flow_attr *flow);\n-\tint (*destroy_flow)(struct ibv_flow *flow_id);\n-\tint (*destroy_flow_action)(void *action);\n-\tstruct ibv_qp *(*create_qp)(struct ibv_pd *pd,\n-\t\t\t\t    struct ibv_qp_init_attr *qp_init_attr);\n-\tstruct ibv_qp *(*create_qp_ex)\n-\t\t(struct ibv_context *context,\n-\t\t struct ibv_qp_init_attr_ex *qp_init_attr_ex);\n-\tint (*destroy_qp)(struct ibv_qp *qp);\n-\tint (*modify_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,\n-\t\t\t int attr_mask);\n-\tstruct ibv_mr *(*reg_mr)(struct ibv_pd *pd, void *addr,\n-\t\t\t\t size_t length, int access);\n-\tint (*dereg_mr)(struct ibv_mr *mr);\n-\tstruct ibv_counter_set *(*create_counter_set)\n-\t\t(struct ibv_context *context,\n-\t\t struct ibv_counter_set_init_attr *init_attr);\n-\tint (*destroy_counter_set)(struct ibv_counter_set *cs);\n-\tint (*describe_counter_set)\n-\t\t(struct ibv_context *context,\n-\t\t uint16_t counter_set_id,\n-\t\t struct ibv_counter_set_description *cs_desc);\n-\tint (*query_counter_set)(struct ibv_query_counter_set_attr *query_attr,\n-\t\t\t\t struct ibv_counter_set_data *cs_data);\n-\tstruct ibv_counters *(*create_counters)\n-\t\t(struct ibv_context *context,\n-\t\t struct ibv_counters_init_attr *init_attr);\n-\tint (*destroy_counters)(struct ibv_counters *counters);\n-\tint (*attach_counters)(struct ibv_counters *counters,\n-\t\t\t       struct ibv_counter_attach_attr *attr,\n-\t\t\t       struct ibv_flow *flow);\n-\tint (*query_counters)(struct ibv_counters *counters,\n-\t\t\t      uint64_t *counters_value,\n-\t\t\t      uint32_t ncounters,\n-\t\t\t      uint32_t flags);\n-\tvoid (*ack_async_event)(struct ibv_async_event *event);\n-\tint (*get_async_event)(struct ibv_context *context,\n-\t\t\t       struct ibv_async_event *event);\n-\tconst char *(*port_state_str)(enum ibv_port_state port_state);\n-\tstruct ibv_cq *(*cq_ex_to_cq)(struct ibv_cq_ex *cq);\n-\tvoid *(*dr_create_flow_action_dest_flow_tbl)(void *tbl);\n-\tvoid *(*dr_create_flow_action_dest_port)(void *domain,\n-\t\t\t\t\t\t uint32_t port);\n-\tvoid *(*dr_create_flow_action_drop)();\n-\tvoid *(*dr_create_flow_action_push_vlan)\n-\t\t\t\t\t(struct mlx5dv_dr_domain *domain,\n-\t\t\t\t\t rte_be32_t vlan_tag);\n-\tvoid *(*dr_create_flow_action_pop_vlan)();\n-\tvoid *(*dr_create_flow_tbl)(void *domain, uint32_t level);\n-\tint (*dr_destroy_flow_tbl)(void *tbl);\n-\tvoid *(*dr_create_domain)(struct ibv_context *ctx,\n-\t\t\t\t  enum mlx5dv_dr_domain_type domain);\n-\tint (*dr_destroy_domain)(void *domain);\n-\tstruct ibv_cq_ex *(*dv_create_cq)\n-\t\t(struct ibv_context *context,\n-\t\t struct ibv_cq_init_attr_ex *cq_attr,\n-\t\t struct mlx5dv_cq_init_attr *mlx5_cq_attr);\n-\tstruct ibv_wq *(*dv_create_wq)\n-\t\t(struct ibv_context *context,\n-\t\t struct ibv_wq_init_attr *wq_attr,\n-\t\t struct mlx5dv_wq_init_attr *mlx5_wq_attr);\n-\tint (*dv_query_device)(struct ibv_context *ctx_in,\n-\t\t\t       struct mlx5dv_context *attrs_out);\n-\tint (*dv_set_context_attr)(struct ibv_context *ibv_ctx,\n-\t\t\t\t   enum mlx5dv_set_ctx_attr_type type,\n-\t\t\t\t   void *attr);\n-\tint (*dv_init_obj)(struct mlx5dv_obj *obj, uint64_t obj_type);\n-\tstruct ibv_qp *(*dv_create_qp)\n-\t\t(struct ibv_context *context,\n-\t\t struct ibv_qp_init_attr_ex *qp_init_attr_ex,\n-\t\t struct mlx5dv_qp_init_attr *dv_qp_init_attr);\n-\tvoid *(*dv_create_flow_matcher)\n-\t\t(struct ibv_context *context,\n-\t\t struct mlx5dv_flow_matcher_attr *matcher_attr,\n-\t\t void *tbl);\n-\tvoid *(*dv_create_flow)(void *matcher, void *match_value,\n-\t\t\t  size_t num_actions, void *actions[]);\n-\tvoid *(*dv_create_flow_action_counter)(void *obj, uint32_t  offset);\n-\tvoid *(*dv_create_flow_action_dest_ibv_qp)(void *qp);\n-\tvoid *(*dv_create_flow_action_dest_devx_tir)(void *tir);\n-\tvoid *(*dv_create_flow_action_modify_header)\n-\t\t(struct ibv_context *ctx, enum mlx5dv_flow_table_type ft_type,\n-\t\t void *domain, uint64_t flags, size_t actions_sz,\n-\t\t uint64_t actions[]);\n-\tvoid *(*dv_create_flow_action_packet_reformat)\n-\t\t(struct ibv_context *ctx,\n-\t\t enum mlx5dv_flow_action_packet_reformat_type reformat_type,\n-\t\t enum mlx5dv_flow_table_type ft_type,\n-\t\t struct mlx5dv_dr_domain *domain,\n-\t\t uint32_t flags, size_t data_sz, void *data);\n-\tvoid *(*dv_create_flow_action_tag)(uint32_t tag);\n-\tvoid *(*dv_create_flow_action_meter)\n-\t\t(struct mlx5dv_dr_flow_meter_attr *attr);\n-\tint (*dv_modify_flow_action_meter)(void *action,\n-\t\tstruct mlx5dv_dr_flow_meter_attr *attr, uint64_t modify_bits);\n-\tint (*dv_destroy_flow)(void *flow);\n-\tint (*dv_destroy_flow_matcher)(void *matcher);\n-\tstruct ibv_context *(*dv_open_device)(struct ibv_device *device);\n-\tstruct mlx5dv_devx_obj *(*devx_obj_create)\n-\t\t\t\t\t(struct ibv_context *ctx,\n-\t\t\t\t\t const void *in, size_t inlen,\n-\t\t\t\t\t void *out, size_t outlen);\n-\tint (*devx_obj_destroy)(struct mlx5dv_devx_obj *obj);\n-\tint (*devx_obj_query)(struct mlx5dv_devx_obj *obj,\n-\t\t\t      const void *in, size_t inlen,\n-\t\t\t      void *out, size_t outlen);\n-\tint (*devx_obj_modify)(struct mlx5dv_devx_obj *obj,\n-\t\t\t       const void *in, size_t inlen,\n-\t\t\t       void *out, size_t outlen);\n-\tint (*devx_general_cmd)(struct ibv_context *context,\n-\t\t\t\tconst void *in, size_t inlen,\n-\t\t\t\tvoid *out, size_t outlen);\n-\tstruct mlx5dv_devx_cmd_comp *(*devx_create_cmd_comp)\n-\t\t\t\t\t(struct ibv_context *context);\n-\tvoid (*devx_destroy_cmd_comp)(struct mlx5dv_devx_cmd_comp *cmd_comp);\n-\tint (*devx_obj_query_async)(struct mlx5dv_devx_obj *obj,\n-\t\t\t\t    const void *in, size_t inlen,\n-\t\t\t\t    size_t outlen, uint64_t wr_id,\n-\t\t\t\t    struct mlx5dv_devx_cmd_comp *cmd_comp);\n-\tint (*devx_get_async_cmd_comp)(struct mlx5dv_devx_cmd_comp *cmd_comp,\n-\t\t\t\t       struct mlx5dv_devx_async_cmd_hdr *resp,\n-\t\t\t\t       size_t cmd_resp_len);\n-\tstruct mlx5dv_devx_umem *(*devx_umem_reg)(struct ibv_context *context,\n-\t\t\t\t\t\t  void *addr, size_t size,\n-\t\t\t\t\t\t  uint32_t access);\n-\tint (*devx_umem_dereg)(struct mlx5dv_devx_umem *dv_devx_umem);\n-\tint (*devx_qp_query)(struct ibv_qp *qp,\n-\t\t\t     const void *in, size_t inlen,\n-\t\t\t     void *out, size_t outlen);\n-\tint (*devx_port_query)(struct ibv_context *ctx,\n-\t\t\t       uint32_t port_num,\n-\t\t\t       struct mlx5dv_devx_port *mlx5_devx_port);\n-\tint (*dr_dump_domain)(FILE *file, void *domain);\n-};\n-\n-const struct mlx5_glue *mlx5_glue;\n-\n-#endif /* MLX5_GLUE_H_ */\ndiff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c\nindex 7bdaa2a..a646b90 100644\n--- a/drivers/net/mlx5/mlx5_mac.c\n+++ b/drivers/net/mlx5/mlx5_mac.c\n@@ -27,10 +27,10 @@\n #include <rte_ethdev_driver.h>\n #include <rte_common.h>\n \n+#include \"mlx5_defs.h\"\n #include \"mlx5.h\"\n #include \"mlx5_utils.h\"\n #include \"mlx5_rxtx.h\"\n-#include \"mlx5_defs.h\"\n \n /**\n  * Get MAC address by querying netdevice.\ndiff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c\nindex 0d549b6..b1cd9f7 100644\n--- a/drivers/net/mlx5/mlx5_mr.c\n+++ b/drivers/net/mlx5/mlx5_mr.c\n@@ -17,10 +17,11 @@\n #include <rte_rwlock.h>\n #include <rte_bus_pci.h>\n \n+#include <mlx5_glue.h>\n+\n #include \"mlx5.h\"\n #include \"mlx5_mr.h\"\n #include \"mlx5_rxtx.h\"\n-#include \"mlx5_glue.h\"\n \n struct mr_find_contig_memsegs_data {\n \tuintptr_t addr;\ndiff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h\ndeleted file mode 100644\nindex 6ad214b..0000000\n--- a/drivers/net/mlx5/mlx5_prm.h\n+++ /dev/null\n@@ -1,1883 +0,0 @@\n-/* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright 2016 6WIND S.A.\n- * Copyright 2016 Mellanox Technologies, Ltd\n- */\n-\n-#ifndef RTE_PMD_MLX5_PRM_H_\n-#define RTE_PMD_MLX5_PRM_H_\n-\n-#include <assert.h>\n-\n-/* Verbs header. */\n-/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */\n-#ifdef PEDANTIC\n-#pragma GCC diagnostic ignored \"-Wpedantic\"\n-#endif\n-#include <infiniband/mlx5dv.h>\n-#ifdef PEDANTIC\n-#pragma GCC diagnostic error \"-Wpedantic\"\n-#endif\n-\n-#include <rte_vect.h>\n-#include \"mlx5_autoconf.h\"\n-\n-/* RSS hash key size. */\n-#define MLX5_RSS_HASH_KEY_LEN 40\n-\n-/* Get CQE owner bit. */\n-#define MLX5_CQE_OWNER(op_own) ((op_own) & MLX5_CQE_OWNER_MASK)\n-\n-/* Get CQE format. */\n-#define MLX5_CQE_FORMAT(op_own) (((op_own) & MLX5E_CQE_FORMAT_MASK) >> 2)\n-\n-/* Get CQE opcode. */\n-#define MLX5_CQE_OPCODE(op_own) (((op_own) & 0xf0) >> 4)\n-\n-/* Get CQE solicited event. */\n-#define MLX5_CQE_SE(op_own) (((op_own) >> 1) & 1)\n-\n-/* Invalidate a CQE. */\n-#define MLX5_CQE_INVALIDATE (MLX5_CQE_INVALID << 4)\n-\n-/* WQE Segment sizes in bytes. */\n-#define MLX5_WSEG_SIZE 16u\n-#define MLX5_WQE_CSEG_SIZE sizeof(struct mlx5_wqe_cseg)\n-#define MLX5_WQE_DSEG_SIZE sizeof(struct mlx5_wqe_dseg)\n-#define MLX5_WQE_ESEG_SIZE sizeof(struct mlx5_wqe_eseg)\n-\n-/* WQE/WQEBB size in bytes. */\n-#define MLX5_WQE_SIZE sizeof(struct mlx5_wqe)\n-\n-/*\n- * Max size of a WQE session.\n- * Absolute maximum size is 63 (MLX5_DSEG_MAX) segments,\n- * the WQE size field in Control Segment is 6 bits wide.\n- */\n-#define MLX5_WQE_SIZE_MAX (60 * MLX5_WSEG_SIZE)\n-\n-/*\n- * Default minimum number of Tx queues for inlining packets.\n- * If there are less queues as specified we assume we have\n- * no enough CPU resources (cycles) to perform inlining,\n- * the PCIe throughput is not supposed as bottleneck and\n- * inlining is disabled.\n- */\n-#define MLX5_INLINE_MAX_TXQS 8u\n-#define MLX5_INLINE_MAX_TXQS_BLUEFIELD 16u\n-\n-/*\n- * Default packet length threshold to be inlined with\n- * enhanced MPW. If packet length exceeds the threshold\n- * the data are not inlined. Should be aligned in WQEBB\n- * boundary with accounting the title Control and Ethernet\n- * segments.\n- */\n-#define MLX5_EMPW_DEF_INLINE_LEN (4u * MLX5_WQE_SIZE + \\\n-\t\t\t\t  MLX5_DSEG_MIN_INLINE_SIZE)\n-/*\n- * Maximal inline data length sent with enhanced MPW.\n- * Is based on maximal WQE size.\n- */\n-#define MLX5_EMPW_MAX_INLINE_LEN (MLX5_WQE_SIZE_MAX - \\\n-\t\t\t\t  MLX5_WQE_CSEG_SIZE - \\\n-\t\t\t\t  MLX5_WQE_ESEG_SIZE - \\\n-\t\t\t\t  MLX5_WQE_DSEG_SIZE + \\\n-\t\t\t\t  MLX5_DSEG_MIN_INLINE_SIZE)\n-/*\n- * Minimal amount of packets to be sent with EMPW.\n- * This limits the minimal required size of sent EMPW.\n- * If there are no enough resources to built minimal\n- * EMPW the sending loop exits.\n- */\n-#define MLX5_EMPW_MIN_PACKETS (2u + 3u * 4u)\n-/*\n- * Maximal amount of packets to be sent with EMPW.\n- * This value is not recommended to exceed MLX5_TX_COMP_THRESH,\n- * otherwise there might be up to MLX5_EMPW_MAX_PACKETS mbufs\n- * without CQE generation request, being multiplied by\n- * MLX5_TX_COMP_MAX_CQE it may cause significant latency\n- * in tx burst routine at the moment of freeing multiple mbufs.\n- */\n-#define MLX5_EMPW_MAX_PACKETS MLX5_TX_COMP_THRESH\n-#define MLX5_MPW_MAX_PACKETS 6\n-#define MLX5_MPW_INLINE_MAX_PACKETS 2\n-\n-/*\n- * Default packet length threshold to be inlined with\n- * ordinary SEND. Inlining saves the MR key search\n- * and extra PCIe data fetch transaction, but eats the\n- * CPU cycles.\n- */\n-#define MLX5_SEND_DEF_INLINE_LEN (5U * MLX5_WQE_SIZE + \\\n-\t\t\t\t  MLX5_ESEG_MIN_INLINE_SIZE - \\\n-\t\t\t\t  MLX5_WQE_CSEG_SIZE - \\\n-\t\t\t\t  MLX5_WQE_ESEG_SIZE - \\\n-\t\t\t\t  MLX5_WQE_DSEG_SIZE)\n-/*\n- * Maximal inline data length sent with ordinary SEND.\n- * Is based on maximal WQE size.\n- */\n-#define MLX5_SEND_MAX_INLINE_LEN (MLX5_WQE_SIZE_MAX - \\\n-\t\t\t\t  MLX5_WQE_CSEG_SIZE - \\\n-\t\t\t\t  MLX5_WQE_ESEG_SIZE - \\\n-\t\t\t\t  MLX5_WQE_DSEG_SIZE + \\\n-\t\t\t\t  MLX5_ESEG_MIN_INLINE_SIZE)\n-\n-/* Missed in mlv5dv.h, should define here. */\n-#define MLX5_OPCODE_ENHANCED_MPSW 0x29u\n-\n-/* CQE value to inform that VLAN is stripped. */\n-#define MLX5_CQE_VLAN_STRIPPED (1u << 0)\n-\n-/* IPv4 options. */\n-#define MLX5_CQE_RX_IP_EXT_OPTS_PACKET (1u << 1)\n-\n-/* IPv6 packet. */\n-#define MLX5_CQE_RX_IPV6_PACKET (1u << 2)\n-\n-/* IPv4 packet. */\n-#define MLX5_CQE_RX_IPV4_PACKET (1u << 3)\n-\n-/* TCP packet. */\n-#define MLX5_CQE_RX_TCP_PACKET (1u << 4)\n-\n-/* UDP packet. */\n-#define MLX5_CQE_RX_UDP_PACKET (1u << 5)\n-\n-/* IP is fragmented. */\n-#define MLX5_CQE_RX_IP_FRAG_PACKET (1u << 7)\n-\n-/* L2 header is valid. */\n-#define MLX5_CQE_RX_L2_HDR_VALID (1u << 8)\n-\n-/* L3 header is valid. */\n-#define MLX5_CQE_RX_L3_HDR_VALID (1u << 9)\n-\n-/* L4 header is valid. */\n-#define MLX5_CQE_RX_L4_HDR_VALID (1u << 10)\n-\n-/* Outer packet, 0 IPv4, 1 IPv6. */\n-#define MLX5_CQE_RX_OUTER_PACKET (1u << 1)\n-\n-/* Tunnel packet bit in the CQE. */\n-#define MLX5_CQE_RX_TUNNEL_PACKET (1u << 0)\n-\n-/* Mask for LRO push flag in the CQE lro_tcppsh_abort_dupack field. */\n-#define MLX5_CQE_LRO_PUSH_MASK 0x40\n-\n-/* Mask for L4 type in the CQE hdr_type_etc field. */\n-#define MLX5_CQE_L4_TYPE_MASK 0x70\n-\n-/* The bit index of L4 type in CQE hdr_type_etc field. */\n-#define MLX5_CQE_L4_TYPE_SHIFT 0x4\n-\n-/* L4 type to indicate TCP packet without acknowledgment. */\n-#define MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK 0x3\n-\n-/* L4 type to indicate TCP packet with acknowledgment. */\n-#define MLX5_L4_HDR_TYPE_TCP_WITH_ACL 0x4\n-\n-/* Inner L3 checksum offload (Tunneled packets only). */\n-#define MLX5_ETH_WQE_L3_INNER_CSUM (1u << 4)\n-\n-/* Inner L4 checksum offload (Tunneled packets only). */\n-#define MLX5_ETH_WQE_L4_INNER_CSUM (1u << 5)\n-\n-/* Outer L4 type is TCP. */\n-#define MLX5_ETH_WQE_L4_OUTER_TCP  (0u << 5)\n-\n-/* Outer L4 type is UDP. */\n-#define MLX5_ETH_WQE_L4_OUTER_UDP  (1u << 5)\n-\n-/* Outer L3 type is IPV4. */\n-#define MLX5_ETH_WQE_L3_OUTER_IPV4 (0u << 4)\n-\n-/* Outer L3 type is IPV6. */\n-#define MLX5_ETH_WQE_L3_OUTER_IPV6 (1u << 4)\n-\n-/* Inner L4 type is TCP. */\n-#define MLX5_ETH_WQE_L4_INNER_TCP (0u << 1)\n-\n-/* Inner L4 type is UDP. */\n-#define MLX5_ETH_WQE_L4_INNER_UDP (1u << 1)\n-\n-/* Inner L3 type is IPV4. */\n-#define MLX5_ETH_WQE_L3_INNER_IPV4 (0u << 0)\n-\n-/* Inner L3 type is IPV6. */\n-#define MLX5_ETH_WQE_L3_INNER_IPV6 (1u << 0)\n-\n-/* VLAN insertion flag. */\n-#define MLX5_ETH_WQE_VLAN_INSERT (1u << 31)\n-\n-/* Data inline segment flag. */\n-#define MLX5_ETH_WQE_DATA_INLINE (1u << 31)\n-\n-/* Is flow mark valid. */\n-#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN\n-#define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff00)\n-#else\n-#define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff)\n-#endif\n-\n-/* INVALID is used by packets matching no flow rules. */\n-#define MLX5_FLOW_MARK_INVALID 0\n-\n-/* Maximum allowed value to mark a packet. */\n-#define MLX5_FLOW_MARK_MAX 0xfffff0\n-\n-/* Default mark value used when none is provided. */\n-#define MLX5_FLOW_MARK_DEFAULT 0xffffff\n-\n-/* Default mark mask for metadata legacy mode. */\n-#define MLX5_FLOW_MARK_MASK 0xffffff\n-\n-/* Maximum number of DS in WQE. Limited by 6-bit field. */\n-#define MLX5_DSEG_MAX 63\n-\n-/* The completion mode offset in the WQE control segment line 2. */\n-#define MLX5_COMP_MODE_OFFSET 2\n-\n-/* Amount of data bytes in minimal inline data segment. */\n-#define MLX5_DSEG_MIN_INLINE_SIZE 12u\n-\n-/* Amount of data bytes in minimal inline eth segment. */\n-#define MLX5_ESEG_MIN_INLINE_SIZE 18u\n-\n-/* Amount of data bytes after eth data segment. */\n-#define MLX5_ESEG_EXTRA_DATA_SIZE 32u\n-\n-/* The maximum log value of segments per RQ WQE. */\n-#define MLX5_MAX_LOG_RQ_SEGS 5u\n-\n-/* The alignment needed for WQ buffer. */\n-#define MLX5_WQE_BUF_ALIGNMENT 512\n-\n-/* Completion mode. */\n-enum mlx5_completion_mode {\n-\tMLX5_COMP_ONLY_ERR = 0x0,\n-\tMLX5_COMP_ONLY_FIRST_ERR = 0x1,\n-\tMLX5_COMP_ALWAYS = 0x2,\n-\tMLX5_COMP_CQE_AND_EQE = 0x3,\n-};\n-\n-/* MPW mode. */\n-enum mlx5_mpw_mode {\n-\tMLX5_MPW_DISABLED,\n-\tMLX5_MPW,\n-\tMLX5_MPW_ENHANCED, /* Enhanced Multi-Packet Send WQE, a.k.a MPWv2. */\n-};\n-\n-/* WQE Control segment. */\n-struct mlx5_wqe_cseg {\n-\tuint32_t opcode;\n-\tuint32_t sq_ds;\n-\tuint32_t flags;\n-\tuint32_t misc;\n-} __rte_packed __rte_aligned(MLX5_WSEG_SIZE);\n-\n-/* Header of data segment. Minimal size Data Segment */\n-struct mlx5_wqe_dseg {\n-\tuint32_t bcount;\n-\tunion {\n-\t\tuint8_t inline_data[MLX5_DSEG_MIN_INLINE_SIZE];\n-\t\tstruct {\n-\t\t\tuint32_t lkey;\n-\t\t\tuint64_t pbuf;\n-\t\t} __rte_packed;\n-\t};\n-} __rte_packed;\n-\n-/* Subset of struct WQE Ethernet Segment. */\n-struct mlx5_wqe_eseg {\n-\tunion {\n-\t\tstruct {\n-\t\t\tuint32_t swp_offs;\n-\t\t\tuint8_t\tcs_flags;\n-\t\t\tuint8_t\tswp_flags;\n-\t\t\tuint16_t mss;\n-\t\t\tuint32_t metadata;\n-\t\t\tuint16_t inline_hdr_sz;\n-\t\t\tunion {\n-\t\t\t\tuint16_t inline_data;\n-\t\t\t\tuint16_t vlan_tag;\n-\t\t\t};\n-\t\t} __rte_packed;\n-\t\tstruct {\n-\t\t\tuint32_t offsets;\n-\t\t\tuint32_t flags;\n-\t\t\tuint32_t flow_metadata;\n-\t\t\tuint32_t inline_hdr;\n-\t\t} __rte_packed;\n-\t};\n-} __rte_packed;\n-\n-/* The title WQEBB, header of WQE. */\n-struct mlx5_wqe {\n-\tunion {\n-\t\tstruct mlx5_wqe_cseg cseg;\n-\t\tuint32_t ctrl[4];\n-\t};\n-\tstruct mlx5_wqe_eseg eseg;\n-\tunion {\n-\t\tstruct mlx5_wqe_dseg dseg[2];\n-\t\tuint8_t data[MLX5_ESEG_EXTRA_DATA_SIZE];\n-\t};\n-} __rte_packed;\n-\n-/* WQE for Multi-Packet RQ. */\n-struct mlx5_wqe_mprq {\n-\tstruct mlx5_wqe_srq_next_seg next_seg;\n-\tstruct mlx5_wqe_data_seg dseg;\n-};\n-\n-#define MLX5_MPRQ_LEN_MASK 0x000ffff\n-#define MLX5_MPRQ_LEN_SHIFT 0\n-#define MLX5_MPRQ_STRIDE_NUM_MASK 0x3fff0000\n-#define MLX5_MPRQ_STRIDE_NUM_SHIFT 16\n-#define MLX5_MPRQ_FILLER_MASK 0x80000000\n-#define MLX5_MPRQ_FILLER_SHIFT 31\n-\n-#define MLX5_MPRQ_STRIDE_SHIFT_BYTE 2\n-\n-/* CQ element structure - should be equal to the cache line size */\n-struct mlx5_cqe {\n-#if (RTE_CACHE_LINE_SIZE == 128)\n-\tuint8_t padding[64];\n-#endif\n-\tuint8_t pkt_info;\n-\tuint8_t rsvd0;\n-\tuint16_t wqe_id;\n-\tuint8_t lro_tcppsh_abort_dupack;\n-\tuint8_t lro_min_ttl;\n-\tuint16_t lro_tcp_win;\n-\tuint32_t lro_ack_seq_num;\n-\tuint32_t rx_hash_res;\n-\tuint8_t rx_hash_type;\n-\tuint8_t rsvd1[3];\n-\tuint16_t csum;\n-\tuint8_t rsvd2[6];\n-\tuint16_t hdr_type_etc;\n-\tuint16_t vlan_info;\n-\tuint8_t lro_num_seg;\n-\tuint8_t rsvd3[3];\n-\tuint32_t flow_table_metadata;\n-\tuint8_t rsvd4[4];\n-\tuint32_t byte_cnt;\n-\tuint64_t timestamp;\n-\tuint32_t sop_drop_qpn;\n-\tuint16_t wqe_counter;\n-\tuint8_t rsvd5;\n-\tuint8_t op_own;\n-};\n-\n-/* Adding direct verbs to data-path. */\n-\n-/* CQ sequence number mask. */\n-#define MLX5_CQ_SQN_MASK 0x3\n-\n-/* CQ sequence number index. */\n-#define MLX5_CQ_SQN_OFFSET 28\n-\n-/* CQ doorbell index mask. */\n-#define MLX5_CI_MASK 0xffffff\n-\n-/* CQ doorbell offset. */\n-#define MLX5_CQ_ARM_DB 1\n-\n-/* CQ doorbell offset*/\n-#define MLX5_CQ_DOORBELL 0x20\n-\n-/* CQE format value. */\n-#define MLX5_COMPRESSED 0x3\n-\n-/* Action type of header modification. */\n-enum {\n-\tMLX5_MODIFICATION_TYPE_SET = 0x1,\n-\tMLX5_MODIFICATION_TYPE_ADD = 0x2,\n-\tMLX5_MODIFICATION_TYPE_COPY = 0x3,\n-};\n-\n-/* The field of packet to be modified. */\n-enum mlx5_modification_field {\n-\tMLX5_MODI_OUT_NONE = -1,\n-\tMLX5_MODI_OUT_SMAC_47_16 = 1,\n-\tMLX5_MODI_OUT_SMAC_15_0,\n-\tMLX5_MODI_OUT_ETHERTYPE,\n-\tMLX5_MODI_OUT_DMAC_47_16,\n-\tMLX5_MODI_OUT_DMAC_15_0,\n-\tMLX5_MODI_OUT_IP_DSCP,\n-\tMLX5_MODI_OUT_TCP_FLAGS,\n-\tMLX5_MODI_OUT_TCP_SPORT,\n-\tMLX5_MODI_OUT_TCP_DPORT,\n-\tMLX5_MODI_OUT_IPV4_TTL,\n-\tMLX5_MODI_OUT_UDP_SPORT,\n-\tMLX5_MODI_OUT_UDP_DPORT,\n-\tMLX5_MODI_OUT_SIPV6_127_96,\n-\tMLX5_MODI_OUT_SIPV6_95_64,\n-\tMLX5_MODI_OUT_SIPV6_63_32,\n-\tMLX5_MODI_OUT_SIPV6_31_0,\n-\tMLX5_MODI_OUT_DIPV6_127_96,\n-\tMLX5_MODI_OUT_DIPV6_95_64,\n-\tMLX5_MODI_OUT_DIPV6_63_32,\n-\tMLX5_MODI_OUT_DIPV6_31_0,\n-\tMLX5_MODI_OUT_SIPV4,\n-\tMLX5_MODI_OUT_DIPV4,\n-\tMLX5_MODI_OUT_FIRST_VID,\n-\tMLX5_MODI_IN_SMAC_47_16 = 0x31,\n-\tMLX5_MODI_IN_SMAC_15_0,\n-\tMLX5_MODI_IN_ETHERTYPE,\n-\tMLX5_MODI_IN_DMAC_47_16,\n-\tMLX5_MODI_IN_DMAC_15_0,\n-\tMLX5_MODI_IN_IP_DSCP,\n-\tMLX5_MODI_IN_TCP_FLAGS,\n-\tMLX5_MODI_IN_TCP_SPORT,\n-\tMLX5_MODI_IN_TCP_DPORT,\n-\tMLX5_MODI_IN_IPV4_TTL,\n-\tMLX5_MODI_IN_UDP_SPORT,\n-\tMLX5_MODI_IN_UDP_DPORT,\n-\tMLX5_MODI_IN_SIPV6_127_96,\n-\tMLX5_MODI_IN_SIPV6_95_64,\n-\tMLX5_MODI_IN_SIPV6_63_32,\n-\tMLX5_MODI_IN_SIPV6_31_0,\n-\tMLX5_MODI_IN_DIPV6_127_96,\n-\tMLX5_MODI_IN_DIPV6_95_64,\n-\tMLX5_MODI_IN_DIPV6_63_32,\n-\tMLX5_MODI_IN_DIPV6_31_0,\n-\tMLX5_MODI_IN_SIPV4,\n-\tMLX5_MODI_IN_DIPV4,\n-\tMLX5_MODI_OUT_IPV6_HOPLIMIT,\n-\tMLX5_MODI_IN_IPV6_HOPLIMIT,\n-\tMLX5_MODI_META_DATA_REG_A,\n-\tMLX5_MODI_META_DATA_REG_B = 0x50,\n-\tMLX5_MODI_META_REG_C_0,\n-\tMLX5_MODI_META_REG_C_1,\n-\tMLX5_MODI_META_REG_C_2,\n-\tMLX5_MODI_META_REG_C_3,\n-\tMLX5_MODI_META_REG_C_4,\n-\tMLX5_MODI_META_REG_C_5,\n-\tMLX5_MODI_META_REG_C_6,\n-\tMLX5_MODI_META_REG_C_7,\n-\tMLX5_MODI_OUT_TCP_SEQ_NUM,\n-\tMLX5_MODI_IN_TCP_SEQ_NUM,\n-\tMLX5_MODI_OUT_TCP_ACK_NUM,\n-\tMLX5_MODI_IN_TCP_ACK_NUM = 0x5C,\n-};\n-\n-/* Total number of metadata reg_c's. */\n-#define MLX5_MREG_C_NUM (MLX5_MODI_META_REG_C_7 - MLX5_MODI_META_REG_C_0 + 1)\n-\n-enum modify_reg {\n-\tREG_NONE = 0,\n-\tREG_A,\n-\tREG_B,\n-\tREG_C_0,\n-\tREG_C_1,\n-\tREG_C_2,\n-\tREG_C_3,\n-\tREG_C_4,\n-\tREG_C_5,\n-\tREG_C_6,\n-\tREG_C_7,\n-};\n-\n-/* Modification sub command. */\n-struct mlx5_modification_cmd {\n-\tunion {\n-\t\tuint32_t data0;\n-\t\tstruct {\n-\t\t\tunsigned int length:5;\n-\t\t\tunsigned int rsvd0:3;\n-\t\t\tunsigned int offset:5;\n-\t\t\tunsigned int rsvd1:3;\n-\t\t\tunsigned int field:12;\n-\t\t\tunsigned int action_type:4;\n-\t\t};\n-\t};\n-\tunion {\n-\t\tuint32_t data1;\n-\t\tuint8_t data[4];\n-\t\tstruct {\n-\t\t\tunsigned int rsvd2:8;\n-\t\t\tunsigned int dst_offset:5;\n-\t\t\tunsigned int rsvd3:3;\n-\t\t\tunsigned int dst_field:12;\n-\t\t\tunsigned int rsvd4:4;\n-\t\t};\n-\t};\n-};\n-\n-typedef uint32_t u32;\n-typedef uint16_t u16;\n-typedef uint8_t u8;\n-\n-#define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)\n-#define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)\n-#define __mlx5_bit_off(typ, fld) ((unsigned int)(unsigned long) \\\n-\t\t\t\t  (&(__mlx5_nullp(typ)->fld)))\n-#define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - \\\n-\t\t\t\t    (__mlx5_bit_off(typ, fld) & 0x1f))\n-#define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)\n-#define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)\n-#define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << \\\n-\t\t\t\t  __mlx5_dw_bit_off(typ, fld))\n-#define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))\n-#define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16)\n-#define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - \\\n-\t\t\t\t    (__mlx5_bit_off(typ, fld) & 0xf))\n-#define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1))\n-#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)\n-#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)\n-#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)\n-#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))\n-\n-/* insert a value to a struct */\n-#define MLX5_SET(typ, p, fld, v) \\\n-\tdo { \\\n-\t\tu32 _v = v; \\\n-\t\t*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \\\n-\t\trte_cpu_to_be_32((rte_be_to_cpu_32(*((u32 *)(p) + \\\n-\t\t\t\t  __mlx5_dw_off(typ, fld))) & \\\n-\t\t\t\t  (~__mlx5_dw_mask(typ, fld))) | \\\n-\t\t\t\t (((_v) & __mlx5_mask(typ, fld)) << \\\n-\t\t\t\t   __mlx5_dw_bit_off(typ, fld))); \\\n-\t} while (0)\n-\n-#define MLX5_SET64(typ, p, fld, v) \\\n-\tdo { \\\n-\t\tassert(__mlx5_bit_sz(typ, fld) == 64); \\\n-\t\t*((__be64 *)(p) + __mlx5_64_off(typ, fld)) = \\\n-\t\t\trte_cpu_to_be_64(v); \\\n-\t} while (0)\n-\n-#define MLX5_GET(typ, p, fld) \\\n-\t((rte_be_to_cpu_32(*((__be32 *)(p) +\\\n-\t__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \\\n-\t__mlx5_mask(typ, fld))\n-#define MLX5_GET16(typ, p, fld) \\\n-\t((rte_be_to_cpu_16(*((__be16 *)(p) + \\\n-\t  __mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \\\n-\t __mlx5_mask16(typ, fld))\n-#define MLX5_GET64(typ, p, fld) rte_be_to_cpu_64(*((__be64 *)(p) + \\\n-\t\t\t\t\t\t   __mlx5_64_off(typ, fld)))\n-#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)\n-\n-struct mlx5_ifc_fte_match_set_misc_bits {\n-\tu8 gre_c_present[0x1];\n-\tu8 reserved_at_1[0x1];\n-\tu8 gre_k_present[0x1];\n-\tu8 gre_s_present[0x1];\n-\tu8 source_vhci_port[0x4];\n-\tu8 source_sqn[0x18];\n-\tu8 reserved_at_20[0x10];\n-\tu8 source_port[0x10];\n-\tu8 outer_second_prio[0x3];\n-\tu8 outer_second_cfi[0x1];\n-\tu8 outer_second_vid[0xc];\n-\tu8 inner_second_prio[0x3];\n-\tu8 inner_second_cfi[0x1];\n-\tu8 inner_second_vid[0xc];\n-\tu8 outer_second_cvlan_tag[0x1];\n-\tu8 inner_second_cvlan_tag[0x1];\n-\tu8 outer_second_svlan_tag[0x1];\n-\tu8 inner_second_svlan_tag[0x1];\n-\tu8 reserved_at_64[0xc];\n-\tu8 gre_protocol[0x10];\n-\tu8 gre_key_h[0x18];\n-\tu8 gre_key_l[0x8];\n-\tu8 vxlan_vni[0x18];\n-\tu8 reserved_at_b8[0x8];\n-\tu8 geneve_vni[0x18];\n-\tu8 reserved_at_e4[0x7];\n-\tu8 geneve_oam[0x1];\n-\tu8 reserved_at_e0[0xc];\n-\tu8 outer_ipv6_flow_label[0x14];\n-\tu8 reserved_at_100[0xc];\n-\tu8 inner_ipv6_flow_label[0x14];\n-\tu8 reserved_at_120[0xa];\n-\tu8 geneve_opt_len[0x6];\n-\tu8 geneve_protocol_type[0x10];\n-\tu8 reserved_at_140[0xc0];\n-};\n-\n-struct mlx5_ifc_ipv4_layout_bits {\n-\tu8 reserved_at_0[0x60];\n-\tu8 ipv4[0x20];\n-};\n-\n-struct mlx5_ifc_ipv6_layout_bits {\n-\tu8 ipv6[16][0x8];\n-};\n-\n-union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {\n-\tstruct mlx5_ifc_ipv6_layout_bits ipv6_layout;\n-\tstruct mlx5_ifc_ipv4_layout_bits ipv4_layout;\n-\tu8 reserved_at_0[0x80];\n-};\n-\n-struct mlx5_ifc_fte_match_set_lyr_2_4_bits {\n-\tu8 smac_47_16[0x20];\n-\tu8 smac_15_0[0x10];\n-\tu8 ethertype[0x10];\n-\tu8 dmac_47_16[0x20];\n-\tu8 dmac_15_0[0x10];\n-\tu8 first_prio[0x3];\n-\tu8 first_cfi[0x1];\n-\tu8 first_vid[0xc];\n-\tu8 ip_protocol[0x8];\n-\tu8 ip_dscp[0x6];\n-\tu8 ip_ecn[0x2];\n-\tu8 cvlan_tag[0x1];\n-\tu8 svlan_tag[0x1];\n-\tu8 frag[0x1];\n-\tu8 ip_version[0x4];\n-\tu8 tcp_flags[0x9];\n-\tu8 tcp_sport[0x10];\n-\tu8 tcp_dport[0x10];\n-\tu8 reserved_at_c0[0x20];\n-\tu8 udp_sport[0x10];\n-\tu8 udp_dport[0x10];\n-\tunion mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;\n-\tunion mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;\n-};\n-\n-struct mlx5_ifc_fte_match_mpls_bits {\n-\tu8 mpls_label[0x14];\n-\tu8 mpls_exp[0x3];\n-\tu8 mpls_s_bos[0x1];\n-\tu8 mpls_ttl[0x8];\n-};\n-\n-struct mlx5_ifc_fte_match_set_misc2_bits {\n-\tstruct mlx5_ifc_fte_match_mpls_bits outer_first_mpls;\n-\tstruct mlx5_ifc_fte_match_mpls_bits inner_first_mpls;\n-\tstruct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_gre;\n-\tstruct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_udp;\n-\tu8 metadata_reg_c_7[0x20];\n-\tu8 metadata_reg_c_6[0x20];\n-\tu8 metadata_reg_c_5[0x20];\n-\tu8 metadata_reg_c_4[0x20];\n-\tu8 metadata_reg_c_3[0x20];\n-\tu8 metadata_reg_c_2[0x20];\n-\tu8 metadata_reg_c_1[0x20];\n-\tu8 metadata_reg_c_0[0x20];\n-\tu8 metadata_reg_a[0x20];\n-\tu8 metadata_reg_b[0x20];\n-\tu8 reserved_at_1c0[0x40];\n-};\n-\n-struct mlx5_ifc_fte_match_set_misc3_bits {\n-\tu8 inner_tcp_seq_num[0x20];\n-\tu8 outer_tcp_seq_num[0x20];\n-\tu8 inner_tcp_ack_num[0x20];\n-\tu8 outer_tcp_ack_num[0x20];\n-\tu8 reserved_at_auto1[0x8];\n-\tu8 outer_vxlan_gpe_vni[0x18];\n-\tu8 outer_vxlan_gpe_next_protocol[0x8];\n-\tu8 outer_vxlan_gpe_flags[0x8];\n-\tu8 reserved_at_a8[0x10];\n-\tu8 icmp_header_data[0x20];\n-\tu8 icmpv6_header_data[0x20];\n-\tu8 icmp_type[0x8];\n-\tu8 icmp_code[0x8];\n-\tu8 icmpv6_type[0x8];\n-\tu8 icmpv6_code[0x8];\n-\tu8 reserved_at_120[0x20];\n-\tu8 gtpu_teid[0x20];\n-\tu8 gtpu_msg_type[0x08];\n-\tu8 gtpu_msg_flags[0x08];\n-\tu8 reserved_at_170[0x90];\n-};\n-\n-/* Flow matcher. */\n-struct mlx5_ifc_fte_match_param_bits {\n-\tstruct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;\n-\tstruct mlx5_ifc_fte_match_set_misc_bits misc_parameters;\n-\tstruct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;\n-\tstruct mlx5_ifc_fte_match_set_misc2_bits misc_parameters_2;\n-\tstruct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3;\n-};\n-\n-enum {\n-\tMLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT,\n-\tMLX5_MATCH_CRITERIA_ENABLE_MISC_BIT,\n-\tMLX5_MATCH_CRITERIA_ENABLE_INNER_BIT,\n-\tMLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT,\n-\tMLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT\n-};\n-\n-enum {\n-\tMLX5_CMD_OP_QUERY_HCA_CAP = 0x100,\n-\tMLX5_CMD_OP_CREATE_MKEY = 0x200,\n-\tMLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754,\n-\tMLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN = 0x816,\n-\tMLX5_CMD_OP_CREATE_TIR = 0x900,\n-\tMLX5_CMD_OP_CREATE_SQ = 0X904,\n-\tMLX5_CMD_OP_MODIFY_SQ = 0X905,\n-\tMLX5_CMD_OP_CREATE_RQ = 0x908,\n-\tMLX5_CMD_OP_MODIFY_RQ = 0x909,\n-\tMLX5_CMD_OP_CREATE_TIS = 0x912,\n-\tMLX5_CMD_OP_QUERY_TIS = 0x915,\n-\tMLX5_CMD_OP_CREATE_RQT = 0x916,\n-\tMLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939,\n-\tMLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,\n-};\n-\n-enum {\n-\tMLX5_MKC_ACCESS_MODE_MTT   = 0x1,\n-};\n-\n-/* Flow counters. */\n-struct mlx5_ifc_alloc_flow_counter_out_bits {\n-\tu8         status[0x8];\n-\tu8         reserved_at_8[0x18];\n-\tu8         syndrome[0x20];\n-\tu8         flow_counter_id[0x20];\n-\tu8         reserved_at_60[0x20];\n-};\n-\n-struct mlx5_ifc_alloc_flow_counter_in_bits {\n-\tu8         opcode[0x10];\n-\tu8         reserved_at_10[0x10];\n-\tu8         reserved_at_20[0x10];\n-\tu8         op_mod[0x10];\n-\tu8         flow_counter_id[0x20];\n-\tu8         reserved_at_40[0x18];\n-\tu8         flow_counter_bulk[0x8];\n-};\n-\n-struct mlx5_ifc_dealloc_flow_counter_out_bits {\n-\tu8         status[0x8];\n-\tu8         reserved_at_8[0x18];\n-\tu8         syndrome[0x20];\n-\tu8         reserved_at_40[0x40];\n-};\n-\n-struct mlx5_ifc_dealloc_flow_counter_in_bits {\n-\tu8         opcode[0x10];\n-\tu8         reserved_at_10[0x10];\n-\tu8         reserved_at_20[0x10];\n-\tu8         op_mod[0x10];\n-\tu8         flow_counter_id[0x20];\n-\tu8         reserved_at_60[0x20];\n-};\n-\n-struct mlx5_ifc_traffic_counter_bits {\n-\tu8         packets[0x40];\n-\tu8         octets[0x40];\n-};\n-\n-struct mlx5_ifc_query_flow_counter_out_bits {\n-\tu8         status[0x8];\n-\tu8         reserved_at_8[0x18];\n-\tu8         syndrome[0x20];\n-\tu8         reserved_at_40[0x40];\n-\tstruct mlx5_ifc_traffic_counter_bits flow_statistics[];\n-};\n-\n-struct mlx5_ifc_query_flow_counter_in_bits {\n-\tu8         opcode[0x10];\n-\tu8         reserved_at_10[0x10];\n-\tu8         reserved_at_20[0x10];\n-\tu8         op_mod[0x10];\n-\tu8         reserved_at_40[0x20];\n-\tu8         mkey[0x20];\n-\tu8         address[0x40];\n-\tu8         clear[0x1];\n-\tu8         dump_to_memory[0x1];\n-\tu8         num_of_counters[0x1e];\n-\tu8         flow_counter_id[0x20];\n-};\n-\n-struct mlx5_ifc_mkc_bits {\n-\tu8         reserved_at_0[0x1];\n-\tu8         free[0x1];\n-\tu8         reserved_at_2[0x1];\n-\tu8         access_mode_4_2[0x3];\n-\tu8         reserved_at_6[0x7];\n-\tu8         relaxed_ordering_write[0x1];\n-\tu8         reserved_at_e[0x1];\n-\tu8         small_fence_on_rdma_read_response[0x1];\n-\tu8         umr_en[0x1];\n-\tu8         a[0x1];\n-\tu8         rw[0x1];\n-\tu8         rr[0x1];\n-\tu8         lw[0x1];\n-\tu8         lr[0x1];\n-\tu8         access_mode_1_0[0x2];\n-\tu8         reserved_at_18[0x8];\n-\n-\tu8         qpn[0x18];\n-\tu8         mkey_7_0[0x8];\n-\n-\tu8         reserved_at_40[0x20];\n-\n-\tu8         length64[0x1];\n-\tu8         bsf_en[0x1];\n-\tu8         sync_umr[0x1];\n-\tu8         reserved_at_63[0x2];\n-\tu8         expected_sigerr_count[0x1];\n-\tu8         reserved_at_66[0x1];\n-\tu8         en_rinval[0x1];\n-\tu8         pd[0x18];\n-\n-\tu8         start_addr[0x40];\n-\n-\tu8         len[0x40];\n-\n-\tu8         bsf_octword_size[0x20];\n-\n-\tu8         reserved_at_120[0x80];\n-\n-\tu8         translations_octword_size[0x20];\n-\n-\tu8         reserved_at_1c0[0x1b];\n-\tu8         log_page_size[0x5];\n-\n-\tu8         reserved_at_1e0[0x20];\n-};\n-\n-struct mlx5_ifc_create_mkey_out_bits {\n-\tu8         status[0x8];\n-\tu8         reserved_at_8[0x18];\n-\n-\tu8         syndrome[0x20];\n-\n-\tu8         reserved_at_40[0x8];\n-\tu8         mkey_index[0x18];\n-\n-\tu8         reserved_at_60[0x20];\n-};\n-\n-struct mlx5_ifc_create_mkey_in_bits {\n-\tu8         opcode[0x10];\n-\tu8         reserved_at_10[0x10];\n-\n-\tu8         reserved_at_20[0x10];\n-\tu8         op_mod[0x10];\n-\n-\tu8         reserved_at_40[0x20];\n-\n-\tu8         pg_access[0x1];\n-\tu8         reserved_at_61[0x1f];\n-\n-\tstruct mlx5_ifc_mkc_bits memory_key_mkey_entry;\n-\n-\tu8         reserved_at_280[0x80];\n-\n-\tu8         translations_octword_actual_size[0x20];\n-\n-\tu8         mkey_umem_id[0x20];\n-\n-\tu8         mkey_umem_offset[0x40];\n-\n-\tu8         reserved_at_380[0x500];\n-\n-\tu8         klm_pas_mtt[][0x20];\n-};\n-\n-enum {\n-\tMLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0 << 1,\n-\tMLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS = 0x1 << 1,\n-\tMLX5_GET_HCA_CAP_OP_MOD_QOS_CAP = 0xc << 1,\n-};\n-\n-enum {\n-\tMLX5_HCA_CAP_OPMOD_GET_MAX   = 0,\n-\tMLX5_HCA_CAP_OPMOD_GET_CUR   = 1,\n-};\n-\n-enum {\n-\tMLX5_CAP_INLINE_MODE_L2,\n-\tMLX5_CAP_INLINE_MODE_VPORT_CONTEXT,\n-\tMLX5_CAP_INLINE_MODE_NOT_REQUIRED,\n-};\n-\n-enum {\n-\tMLX5_INLINE_MODE_NONE,\n-\tMLX5_INLINE_MODE_L2,\n-\tMLX5_INLINE_MODE_IP,\n-\tMLX5_INLINE_MODE_TCP_UDP,\n-\tMLX5_INLINE_MODE_RESERVED4,\n-\tMLX5_INLINE_MODE_INNER_L2,\n-\tMLX5_INLINE_MODE_INNER_IP,\n-\tMLX5_INLINE_MODE_INNER_TCP_UDP,\n-};\n-\n-/* HCA bit masks indicating which Flex parser protocols are already enabled. */\n-#define MLX5_HCA_FLEX_IPV4_OVER_VXLAN_ENABLED (1UL << 0)\n-#define MLX5_HCA_FLEX_IPV6_OVER_VXLAN_ENABLED (1UL << 1)\n-#define MLX5_HCA_FLEX_IPV6_OVER_IP_ENABLED (1UL << 2)\n-#define MLX5_HCA_FLEX_GENEVE_ENABLED (1UL << 3)\n-#define MLX5_HCA_FLEX_CW_MPLS_OVER_GRE_ENABLED (1UL << 4)\n-#define MLX5_HCA_FLEX_CW_MPLS_OVER_UDP_ENABLED (1UL << 5)\n-#define MLX5_HCA_FLEX_P_BIT_VXLAN_GPE_ENABLED (1UL << 6)\n-#define MLX5_HCA_FLEX_VXLAN_GPE_ENABLED (1UL << 7)\n-#define MLX5_HCA_FLEX_ICMP_ENABLED (1UL << 8)\n-#define MLX5_HCA_FLEX_ICMPV6_ENABLED (1UL << 9)\n-\n-struct mlx5_ifc_cmd_hca_cap_bits {\n-\tu8 reserved_at_0[0x30];\n-\tu8 vhca_id[0x10];\n-\tu8 reserved_at_40[0x40];\n-\tu8 log_max_srq_sz[0x8];\n-\tu8 log_max_qp_sz[0x8];\n-\tu8 reserved_at_90[0xb];\n-\tu8 log_max_qp[0x5];\n-\tu8 reserved_at_a0[0xb];\n-\tu8 log_max_srq[0x5];\n-\tu8 reserved_at_b0[0x10];\n-\tu8 reserved_at_c0[0x8];\n-\tu8 log_max_cq_sz[0x8];\n-\tu8 reserved_at_d0[0xb];\n-\tu8 log_max_cq[0x5];\n-\tu8 log_max_eq_sz[0x8];\n-\tu8 reserved_at_e8[0x2];\n-\tu8 log_max_mkey[0x6];\n-\tu8 reserved_at_f0[0x8];\n-\tu8 dump_fill_mkey[0x1];\n-\tu8 reserved_at_f9[0x3];\n-\tu8 log_max_eq[0x4];\n-\tu8 max_indirection[0x8];\n-\tu8 fixed_buffer_size[0x1];\n-\tu8 log_max_mrw_sz[0x7];\n-\tu8 force_teardown[0x1];\n-\tu8 reserved_at_111[0x1];\n-\tu8 log_max_bsf_list_size[0x6];\n-\tu8 umr_extended_translation_offset[0x1];\n-\tu8 null_mkey[0x1];\n-\tu8 log_max_klm_list_size[0x6];\n-\tu8 reserved_at_120[0xa];\n-\tu8 log_max_ra_req_dc[0x6];\n-\tu8 reserved_at_130[0xa];\n-\tu8 log_max_ra_res_dc[0x6];\n-\tu8 reserved_at_140[0xa];\n-\tu8 log_max_ra_req_qp[0x6];\n-\tu8 reserved_at_150[0xa];\n-\tu8 log_max_ra_res_qp[0x6];\n-\tu8 end_pad[0x1];\n-\tu8 cc_query_allowed[0x1];\n-\tu8 cc_modify_allowed[0x1];\n-\tu8 start_pad[0x1];\n-\tu8 cache_line_128byte[0x1];\n-\tu8 reserved_at_165[0xa];\n-\tu8 qcam_reg[0x1];\n-\tu8 gid_table_size[0x10];\n-\tu8 out_of_seq_cnt[0x1];\n-\tu8 vport_counters[0x1];\n-\tu8 retransmission_q_counters[0x1];\n-\tu8 debug[0x1];\n-\tu8 modify_rq_counter_set_id[0x1];\n-\tu8 rq_delay_drop[0x1];\n-\tu8 max_qp_cnt[0xa];\n-\tu8 pkey_table_size[0x10];\n-\tu8 vport_group_manager[0x1];\n-\tu8 vhca_group_manager[0x1];\n-\tu8 ib_virt[0x1];\n-\tu8 eth_virt[0x1];\n-\tu8 vnic_env_queue_counters[0x1];\n-\tu8 ets[0x1];\n-\tu8 nic_flow_table[0x1];\n-\tu8 eswitch_manager[0x1];\n-\tu8 device_memory[0x1];\n-\tu8 mcam_reg[0x1];\n-\tu8 pcam_reg[0x1];\n-\tu8 local_ca_ack_delay[0x5];\n-\tu8 port_module_event[0x1];\n-\tu8 enhanced_error_q_counters[0x1];\n-\tu8 ports_check[0x1];\n-\tu8 reserved_at_1b3[0x1];\n-\tu8 disable_link_up[0x1];\n-\tu8 beacon_led[0x1];\n-\tu8 port_type[0x2];\n-\tu8 num_ports[0x8];\n-\tu8 reserved_at_1c0[0x1];\n-\tu8 pps[0x1];\n-\tu8 pps_modify[0x1];\n-\tu8 log_max_msg[0x5];\n-\tu8 reserved_at_1c8[0x4];\n-\tu8 max_tc[0x4];\n-\tu8 temp_warn_event[0x1];\n-\tu8 dcbx[0x1];\n-\tu8 general_notification_event[0x1];\n-\tu8 reserved_at_1d3[0x2];\n-\tu8 fpga[0x1];\n-\tu8 rol_s[0x1];\n-\tu8 rol_g[0x1];\n-\tu8 reserved_at_1d8[0x1];\n-\tu8 wol_s[0x1];\n-\tu8 wol_g[0x1];\n-\tu8 wol_a[0x1];\n-\tu8 wol_b[0x1];\n-\tu8 wol_m[0x1];\n-\tu8 wol_u[0x1];\n-\tu8 wol_p[0x1];\n-\tu8 stat_rate_support[0x10];\n-\tu8 reserved_at_1f0[0xc];\n-\tu8 cqe_version[0x4];\n-\tu8 compact_address_vector[0x1];\n-\tu8 striding_rq[0x1];\n-\tu8 reserved_at_202[0x1];\n-\tu8 ipoib_enhanced_offloads[0x1];\n-\tu8 ipoib_basic_offloads[0x1];\n-\tu8 reserved_at_205[0x1];\n-\tu8 repeated_block_disabled[0x1];\n-\tu8 umr_modify_entity_size_disabled[0x1];\n-\tu8 umr_modify_atomic_disabled[0x1];\n-\tu8 umr_indirect_mkey_disabled[0x1];\n-\tu8 umr_fence[0x2];\n-\tu8 reserved_at_20c[0x3];\n-\tu8 drain_sigerr[0x1];\n-\tu8 cmdif_checksum[0x2];\n-\tu8 sigerr_cqe[0x1];\n-\tu8 reserved_at_213[0x1];\n-\tu8 wq_signature[0x1];\n-\tu8 sctr_data_cqe[0x1];\n-\tu8 reserved_at_216[0x1];\n-\tu8 sho[0x1];\n-\tu8 tph[0x1];\n-\tu8 rf[0x1];\n-\tu8 dct[0x1];\n-\tu8 qos[0x1];\n-\tu8 eth_net_offloads[0x1];\n-\tu8 roce[0x1];\n-\tu8 atomic[0x1];\n-\tu8 reserved_at_21f[0x1];\n-\tu8 cq_oi[0x1];\n-\tu8 cq_resize[0x1];\n-\tu8 cq_moderation[0x1];\n-\tu8 reserved_at_223[0x3];\n-\tu8 cq_eq_remap[0x1];\n-\tu8 pg[0x1];\n-\tu8 block_lb_mc[0x1];\n-\tu8 reserved_at_229[0x1];\n-\tu8 scqe_break_moderation[0x1];\n-\tu8 cq_period_start_from_cqe[0x1];\n-\tu8 cd[0x1];\n-\tu8 reserved_at_22d[0x1];\n-\tu8 apm[0x1];\n-\tu8 vector_calc[0x1];\n-\tu8 umr_ptr_rlky[0x1];\n-\tu8 imaicl[0x1];\n-\tu8 reserved_at_232[0x4];\n-\tu8 qkv[0x1];\n-\tu8 pkv[0x1];\n-\tu8 set_deth_sqpn[0x1];\n-\tu8 reserved_at_239[0x3];\n-\tu8 xrc[0x1];\n-\tu8 ud[0x1];\n-\tu8 uc[0x1];\n-\tu8 rc[0x1];\n-\tu8 uar_4k[0x1];\n-\tu8 reserved_at_241[0x9];\n-\tu8 uar_sz[0x6];\n-\tu8 reserved_at_250[0x8];\n-\tu8 log_pg_sz[0x8];\n-\tu8 bf[0x1];\n-\tu8 driver_version[0x1];\n-\tu8 pad_tx_eth_packet[0x1];\n-\tu8 reserved_at_263[0x8];\n-\tu8 log_bf_reg_size[0x5];\n-\tu8 reserved_at_270[0xb];\n-\tu8 lag_master[0x1];\n-\tu8 num_lag_ports[0x4];\n-\tu8 reserved_at_280[0x10];\n-\tu8 max_wqe_sz_sq[0x10];\n-\tu8 reserved_at_2a0[0x10];\n-\tu8 max_wqe_sz_rq[0x10];\n-\tu8 max_flow_counter_31_16[0x10];\n-\tu8 max_wqe_sz_sq_dc[0x10];\n-\tu8 reserved_at_2e0[0x7];\n-\tu8 max_qp_mcg[0x19];\n-\tu8 reserved_at_300[0x10];\n-\tu8 flow_counter_bulk_alloc[0x08];\n-\tu8 log_max_mcg[0x8];\n-\tu8 reserved_at_320[0x3];\n-\tu8 log_max_transport_domain[0x5];\n-\tu8 reserved_at_328[0x3];\n-\tu8 log_max_pd[0x5];\n-\tu8 reserved_at_330[0xb];\n-\tu8 log_max_xrcd[0x5];\n-\tu8 nic_receive_steering_discard[0x1];\n-\tu8 receive_discard_vport_down[0x1];\n-\tu8 transmit_discard_vport_down[0x1];\n-\tu8 reserved_at_343[0x5];\n-\tu8 log_max_flow_counter_bulk[0x8];\n-\tu8 max_flow_counter_15_0[0x10];\n-\tu8 modify_tis[0x1];\n-\tu8 flow_counters_dump[0x1];\n-\tu8 reserved_at_360[0x1];\n-\tu8 log_max_rq[0x5];\n-\tu8 reserved_at_368[0x3];\n-\tu8 log_max_sq[0x5];\n-\tu8 reserved_at_370[0x3];\n-\tu8 log_max_tir[0x5];\n-\tu8 reserved_at_378[0x3];\n-\tu8 log_max_tis[0x5];\n-\tu8 basic_cyclic_rcv_wqe[0x1];\n-\tu8 reserved_at_381[0x2];\n-\tu8 log_max_rmp[0x5];\n-\tu8 reserved_at_388[0x3];\n-\tu8 log_max_rqt[0x5];\n-\tu8 reserved_at_390[0x3];\n-\tu8 log_max_rqt_size[0x5];\n-\tu8 reserved_at_398[0x3];\n-\tu8 log_max_tis_per_sq[0x5];\n-\tu8 ext_stride_num_range[0x1];\n-\tu8 reserved_at_3a1[0x2];\n-\tu8 log_max_stride_sz_rq[0x5];\n-\tu8 reserved_at_3a8[0x3];\n-\tu8 log_min_stride_sz_rq[0x5];\n-\tu8 reserved_at_3b0[0x3];\n-\tu8 log_max_stride_sz_sq[0x5];\n-\tu8 reserved_at_3b8[0x3];\n-\tu8 log_min_stride_sz_sq[0x5];\n-\tu8 hairpin[0x1];\n-\tu8 reserved_at_3c1[0x2];\n-\tu8 log_max_hairpin_queues[0x5];\n-\tu8 reserved_at_3c8[0x3];\n-\tu8 log_max_hairpin_wq_data_sz[0x5];\n-\tu8 reserved_at_3d0[0x3];\n-\tu8 log_max_hairpin_num_packets[0x5];\n-\tu8 reserved_at_3d8[0x3];\n-\tu8 log_max_wq_sz[0x5];\n-\tu8 nic_vport_change_event[0x1];\n-\tu8 disable_local_lb_uc[0x1];\n-\tu8 disable_local_lb_mc[0x1];\n-\tu8 log_min_hairpin_wq_data_sz[0x5];\n-\tu8 reserved_at_3e8[0x3];\n-\tu8 log_max_vlan_list[0x5];\n-\tu8 reserved_at_3f0[0x3];\n-\tu8 log_max_current_mc_list[0x5];\n-\tu8 reserved_at_3f8[0x3];\n-\tu8 log_max_current_uc_list[0x5];\n-\tu8 general_obj_types[0x40];\n-\tu8 reserved_at_440[0x20];\n-\tu8 reserved_at_460[0x10];\n-\tu8 max_num_eqs[0x10];\n-\tu8 reserved_at_480[0x3];\n-\tu8 log_max_l2_table[0x5];\n-\tu8 reserved_at_488[0x8];\n-\tu8 log_uar_page_sz[0x10];\n-\tu8 reserved_at_4a0[0x20];\n-\tu8 device_frequency_mhz[0x20];\n-\tu8 device_frequency_khz[0x20];\n-\tu8 reserved_at_500[0x20];\n-\tu8 num_of_uars_per_page[0x20];\n-\tu8 flex_parser_protocols[0x20];\n-\tu8 reserved_at_560[0x20];\n-\tu8 reserved_at_580[0x3c];\n-\tu8 mini_cqe_resp_stride_index[0x1];\n-\tu8 cqe_128_always[0x1];\n-\tu8 cqe_compression_128[0x1];\n-\tu8 cqe_compression[0x1];\n-\tu8 cqe_compression_timeout[0x10];\n-\tu8 cqe_compression_max_num[0x10];\n-\tu8 reserved_at_5e0[0x10];\n-\tu8 tag_matching[0x1];\n-\tu8 rndv_offload_rc[0x1];\n-\tu8 rndv_offload_dc[0x1];\n-\tu8 log_tag_matching_list_sz[0x5];\n-\tu8 reserved_at_5f8[0x3];\n-\tu8 log_max_xrq[0x5];\n-\tu8 affiliate_nic_vport_criteria[0x8];\n-\tu8 native_port_num[0x8];\n-\tu8 num_vhca_ports[0x8];\n-\tu8 reserved_at_618[0x6];\n-\tu8 sw_owner_id[0x1];\n-\tu8 reserved_at_61f[0x1e1];\n-};\n-\n-struct mlx5_ifc_qos_cap_bits {\n-\tu8 packet_pacing[0x1];\n-\tu8 esw_scheduling[0x1];\n-\tu8 esw_bw_share[0x1];\n-\tu8 esw_rate_limit[0x1];\n-\tu8 reserved_at_4[0x1];\n-\tu8 packet_pacing_burst_bound[0x1];\n-\tu8 packet_pacing_typical_size[0x1];\n-\tu8 flow_meter_srtcm[0x1];\n-\tu8 reserved_at_8[0x8];\n-\tu8 log_max_flow_meter[0x8];\n-\tu8 flow_meter_reg_id[0x8];\n-\tu8 reserved_at_25[0x20];\n-\tu8 packet_pacing_max_rate[0x20];\n-\tu8 packet_pacing_min_rate[0x20];\n-\tu8 reserved_at_80[0x10];\n-\tu8 packet_pacing_rate_table_size[0x10];\n-\tu8 esw_element_type[0x10];\n-\tu8 esw_tsar_type[0x10];\n-\tu8 reserved_at_c0[0x10];\n-\tu8 max_qos_para_vport[0x10];\n-\tu8 max_tsar_bw_share[0x20];\n-\tu8 reserved_at_100[0x6e8];\n-};\n-\n-struct mlx5_ifc_per_protocol_networking_offload_caps_bits {\n-\tu8 csum_cap[0x1];\n-\tu8 vlan_cap[0x1];\n-\tu8 lro_cap[0x1];\n-\tu8 lro_psh_flag[0x1];\n-\tu8 lro_time_stamp[0x1];\n-\tu8 lro_max_msg_sz_mode[0x2];\n-\tu8 wqe_vlan_insert[0x1];\n-\tu8 self_lb_en_modifiable[0x1];\n-\tu8 self_lb_mc[0x1];\n-\tu8 self_lb_uc[0x1];\n-\tu8 max_lso_cap[0x5];\n-\tu8 multi_pkt_send_wqe[0x2];\n-\tu8 wqe_inline_mode[0x2];\n-\tu8 rss_ind_tbl_cap[0x4];\n-\tu8 reg_umr_sq[0x1];\n-\tu8 scatter_fcs[0x1];\n-\tu8 enhanced_multi_pkt_send_wqe[0x1];\n-\tu8 tunnel_lso_const_out_ip_id[0x1];\n-\tu8 tunnel_lro_gre[0x1];\n-\tu8 tunnel_lro_vxlan[0x1];\n-\tu8 tunnel_stateless_gre[0x1];\n-\tu8 tunnel_stateless_vxlan[0x1];\n-\tu8 swp[0x1];\n-\tu8 swp_csum[0x1];\n-\tu8 swp_lso[0x1];\n-\tu8 reserved_at_23[0x8];\n-\tu8 tunnel_stateless_gtp[0x1];\n-\tu8 reserved_at_25[0x4];\n-\tu8 max_vxlan_udp_ports[0x8];\n-\tu8 reserved_at_38[0x6];\n-\tu8 max_geneve_opt_len[0x1];\n-\tu8 tunnel_stateless_geneve_rx[0x1];\n-\tu8 reserved_at_40[0x10];\n-\tu8 lro_min_mss_size[0x10];\n-\tu8 reserved_at_60[0x120];\n-\tu8 lro_timer_supported_periods[4][0x20];\n-\tu8 reserved_at_200[0x600];\n-};\n-\n-union mlx5_ifc_hca_cap_union_bits {\n-\tstruct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;\n-\tstruct mlx5_ifc_per_protocol_networking_offload_caps_bits\n-\t       per_protocol_networking_offload_caps;\n-\tstruct mlx5_ifc_qos_cap_bits qos_cap;\n-\tu8 reserved_at_0[0x8000];\n-};\n-\n-struct mlx5_ifc_query_hca_cap_out_bits {\n-\tu8 status[0x8];\n-\tu8 reserved_at_8[0x18];\n-\tu8 syndrome[0x20];\n-\tu8 reserved_at_40[0x40];\n-\tunion mlx5_ifc_hca_cap_union_bits capability;\n-};\n-\n-struct mlx5_ifc_query_hca_cap_in_bits {\n-\tu8 opcode[0x10];\n-\tu8 reserved_at_10[0x10];\n-\tu8 reserved_at_20[0x10];\n-\tu8 op_mod[0x10];\n-\tu8 reserved_at_40[0x40];\n-};\n-\n-struct mlx5_ifc_mac_address_layout_bits {\n-\tu8 reserved_at_0[0x10];\n-\tu8 mac_addr_47_32[0x10];\n-\tu8 mac_addr_31_0[0x20];\n-};\n-\n-struct mlx5_ifc_nic_vport_context_bits {\n-\tu8 reserved_at_0[0x5];\n-\tu8 min_wqe_inline_mode[0x3];\n-\tu8 reserved_at_8[0x15];\n-\tu8 disable_mc_local_lb[0x1];\n-\tu8 disable_uc_local_lb[0x1];\n-\tu8 roce_en[0x1];\n-\tu8 arm_change_event[0x1];\n-\tu8 reserved_at_21[0x1a];\n-\tu8 event_on_mtu[0x1];\n-\tu8 event_on_promisc_change[0x1];\n-\tu8 event_on_vlan_change[0x1];\n-\tu8 event_on_mc_address_change[0x1];\n-\tu8 event_on_uc_address_change[0x1];\n-\tu8 reserved_at_40[0xc];\n-\tu8 affiliation_criteria[0x4];\n-\tu8 affiliated_vhca_id[0x10];\n-\tu8 reserved_at_60[0xd0];\n-\tu8 mtu[0x10];\n-\tu8 system_image_guid[0x40];\n-\tu8 port_guid[0x40];\n-\tu8 node_guid[0x40];\n-\tu8 reserved_at_200[0x140];\n-\tu8 qkey_violation_counter[0x10];\n-\tu8 reserved_at_350[0x430];\n-\tu8 promisc_uc[0x1];\n-\tu8 promisc_mc[0x1];\n-\tu8 promisc_all[0x1];\n-\tu8 reserved_at_783[0x2];\n-\tu8 allowed_list_type[0x3];\n-\tu8 reserved_at_788[0xc];\n-\tu8 allowed_list_size[0xc];\n-\tstruct mlx5_ifc_mac_address_layout_bits permanent_address;\n-\tu8 reserved_at_7e0[0x20];\n-};\n-\n-struct mlx5_ifc_query_nic_vport_context_out_bits {\n-\tu8 status[0x8];\n-\tu8 reserved_at_8[0x18];\n-\tu8 syndrome[0x20];\n-\tu8 reserved_at_40[0x40];\n-\tstruct mlx5_ifc_nic_vport_context_bits nic_vport_context;\n-};\n-\n-struct mlx5_ifc_query_nic_vport_context_in_bits {\n-\tu8 opcode[0x10];\n-\tu8 reserved_at_10[0x10];\n-\tu8 reserved_at_20[0x10];\n-\tu8 op_mod[0x10];\n-\tu8 other_vport[0x1];\n-\tu8 reserved_at_41[0xf];\n-\tu8 vport_number[0x10];\n-\tu8 reserved_at_60[0x5];\n-\tu8 allowed_list_type[0x3];\n-\tu8 reserved_at_68[0x18];\n-};\n-\n-struct mlx5_ifc_tisc_bits {\n-\tu8 strict_lag_tx_port_affinity[0x1];\n-\tu8 reserved_at_1[0x3];\n-\tu8 lag_tx_port_affinity[0x04];\n-\tu8 reserved_at_8[0x4];\n-\tu8 prio[0x4];\n-\tu8 reserved_at_10[0x10];\n-\tu8 reserved_at_20[0x100];\n-\tu8 reserved_at_120[0x8];\n-\tu8 transport_domain[0x18];\n-\tu8 reserved_at_140[0x8];\n-\tu8 underlay_qpn[0x18];\n-\tu8 reserved_at_160[0x3a0];\n-};\n-\n-struct mlx5_ifc_query_tis_out_bits {\n-\tu8 status[0x8];\n-\tu8 reserved_at_8[0x18];\n-\tu8 syndrome[0x20];\n-\tu8 reserved_at_40[0x40];\n-\tstruct mlx5_ifc_tisc_bits tis_context;\n-};\n-\n-struct mlx5_ifc_query_tis_in_bits {\n-\tu8 opcode[0x10];\n-\tu8 reserved_at_10[0x10];\n-\tu8 reserved_at_20[0x10];\n-\tu8 op_mod[0x10];\n-\tu8 reserved_at_40[0x8];\n-\tu8 tisn[0x18];\n-\tu8 reserved_at_60[0x20];\n-};\n-\n-struct mlx5_ifc_alloc_transport_domain_out_bits {\n-\tu8 status[0x8];\n-\tu8 reserved_at_8[0x18];\n-\tu8 syndrome[0x20];\n-\tu8 reserved_at_40[0x8];\n-\tu8 transport_domain[0x18];\n-\tu8 reserved_at_60[0x20];\n-};\n-\n-struct mlx5_ifc_alloc_transport_domain_in_bits {\n-\tu8 opcode[0x10];\n-\tu8 reserved_at_10[0x10];\n-\tu8 reserved_at_20[0x10];\n-\tu8 op_mod[0x10];\n-\tu8 reserved_at_40[0x40];\n-};\n-\n-enum {\n-\tMLX5_WQ_TYPE_LINKED_LIST                = 0x0,\n-\tMLX5_WQ_TYPE_CYCLIC                     = 0x1,\n-\tMLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ    = 0x2,\n-\tMLX5_WQ_TYPE_CYCLIC_STRIDING_RQ         = 0x3,\n-};\n-\n-enum {\n-\tMLX5_WQ_END_PAD_MODE_NONE  = 0x0,\n-\tMLX5_WQ_END_PAD_MODE_ALIGN = 0x1,\n-};\n-\n-struct mlx5_ifc_wq_bits {\n-\tu8 wq_type[0x4];\n-\tu8 wq_signature[0x1];\n-\tu8 end_padding_mode[0x2];\n-\tu8 cd_slave[0x1];\n-\tu8 reserved_at_8[0x18];\n-\tu8 hds_skip_first_sge[0x1];\n-\tu8 log2_hds_buf_size[0x3];\n-\tu8 reserved_at_24[0x7];\n-\tu8 page_offset[0x5];\n-\tu8 lwm[0x10];\n-\tu8 reserved_at_40[0x8];\n-\tu8 pd[0x18];\n-\tu8 reserved_at_60[0x8];\n-\tu8 uar_page[0x18];\n-\tu8 dbr_addr[0x40];\n-\tu8 hw_counter[0x20];\n-\tu8 sw_counter[0x20];\n-\tu8 reserved_at_100[0xc];\n-\tu8 log_wq_stride[0x4];\n-\tu8 reserved_at_110[0x3];\n-\tu8 log_wq_pg_sz[0x5];\n-\tu8 reserved_at_118[0x3];\n-\tu8 log_wq_sz[0x5];\n-\tu8 dbr_umem_valid[0x1];\n-\tu8 wq_umem_valid[0x1];\n-\tu8 reserved_at_122[0x1];\n-\tu8 log_hairpin_num_packets[0x5];\n-\tu8 reserved_at_128[0x3];\n-\tu8 log_hairpin_data_sz[0x5];\n-\tu8 reserved_at_130[0x4];\n-\tu8 single_wqe_log_num_of_strides[0x4];\n-\tu8 two_byte_shift_en[0x1];\n-\tu8 reserved_at_139[0x4];\n-\tu8 single_stride_log_num_of_bytes[0x3];\n-\tu8 dbr_umem_id[0x20];\n-\tu8 wq_umem_id[0x20];\n-\tu8 wq_umem_offset[0x40];\n-\tu8 reserved_at_1c0[0x440];\n-};\n-\n-enum {\n-\tMLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE  = 0x0,\n-\tMLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP     = 0x1,\n-};\n-\n-enum {\n-\tMLX5_RQC_STATE_RST  = 0x0,\n-\tMLX5_RQC_STATE_RDY  = 0x1,\n-\tMLX5_RQC_STATE_ERR  = 0x3,\n-};\n-\n-struct mlx5_ifc_rqc_bits {\n-\tu8 rlky[0x1];\n-\tu8 delay_drop_en[0x1];\n-\tu8 scatter_fcs[0x1];\n-\tu8 vsd[0x1];\n-\tu8 mem_rq_type[0x4];\n-\tu8 state[0x4];\n-\tu8 reserved_at_c[0x1];\n-\tu8 flush_in_error_en[0x1];\n-\tu8 hairpin[0x1];\n-\tu8 reserved_at_f[0x11];\n-\tu8 reserved_at_20[0x8];\n-\tu8 user_index[0x18];\n-\tu8 reserved_at_40[0x8];\n-\tu8 cqn[0x18];\n-\tu8 counter_set_id[0x8];\n-\tu8 reserved_at_68[0x18];\n-\tu8 reserved_at_80[0x8];\n-\tu8 rmpn[0x18];\n-\tu8 reserved_at_a0[0x8];\n-\tu8 hairpin_peer_sq[0x18];\n-\tu8 reserved_at_c0[0x10];\n-\tu8 hairpin_peer_vhca[0x10];\n-\tu8 reserved_at_e0[0xa0];\n-\tstruct mlx5_ifc_wq_bits wq; /* Not used in LRO RQ. */\n-};\n-\n-struct mlx5_ifc_create_rq_out_bits {\n-\tu8 status[0x8];\n-\tu8 reserved_at_8[0x18];\n-\tu8 syndrome[0x20];\n-\tu8 reserved_at_40[0x8];\n-\tu8 rqn[0x18];\n-\tu8 reserved_at_60[0x20];\n-};\n-\n-struct mlx5_ifc_create_rq_in_bits {\n-\tu8 opcode[0x10];\n-\tu8 uid[0x10];\n-\tu8 reserved_at_20[0x10];\n-\tu8 op_mod[0x10];\n-\tu8 reserved_at_40[0xc0];\n-\tstruct mlx5_ifc_rqc_bits ctx;\n-};\n-\n-struct mlx5_ifc_modify_rq_out_bits {\n-\tu8 status[0x8];\n-\tu8 reserved_at_8[0x18];\n-\tu8 syndrome[0x20];\n-\tu8 reserved_at_40[0x40];\n-};\n-\n-struct mlx5_ifc_create_tis_out_bits {\n-\tu8 status[0x8];\n-\tu8 reserved_at_8[0x18];\n-\tu8 syndrome[0x20];\n-\tu8 reserved_at_40[0x8];\n-\tu8 tisn[0x18];\n-\tu8 reserved_at_60[0x20];\n-};\n-\n-struct mlx5_ifc_create_tis_in_bits {\n-\tu8 opcode[0x10];\n-\tu8 uid[0x10];\n-\tu8 reserved_at_20[0x10];\n-\tu8 op_mod[0x10];\n-\tu8 reserved_at_40[0xc0];\n-\tstruct mlx5_ifc_tisc_bits ctx;\n-};\n-\n-enum {\n-\tMLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM = 1ULL << 0,\n-\tMLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1,\n-\tMLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS = 1ULL << 2,\n-\tMLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID = 1ULL << 3,\n-};\n-\n-struct mlx5_ifc_modify_rq_in_bits {\n-\tu8 opcode[0x10];\n-\tu8 uid[0x10];\n-\tu8 reserved_at_20[0x10];\n-\tu8 op_mod[0x10];\n-\tu8 rq_state[0x4];\n-\tu8 reserved_at_44[0x4];\n-\tu8 rqn[0x18];\n-\tu8 reserved_at_60[0x20];\n-\tu8 modify_bitmask[0x40];\n-\tu8 reserved_at_c0[0x40];\n-\tstruct mlx5_ifc_rqc_bits ctx;\n-};\n-\n-enum {\n-\tMLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP     = 0x0,\n-\tMLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP     = 0x1,\n-\tMLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT   = 0x2,\n-\tMLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT   = 0x3,\n-\tMLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI  = 0x4,\n-};\n-\n-struct mlx5_ifc_rx_hash_field_select_bits {\n-\tu8 l3_prot_type[0x1];\n-\tu8 l4_prot_type[0x1];\n-\tu8 selected_fields[0x1e];\n-};\n-\n-enum {\n-\tMLX5_TIRC_DISP_TYPE_DIRECT    = 0x0,\n-\tMLX5_TIRC_DISP_TYPE_INDIRECT  = 0x1,\n-};\n-\n-enum {\n-\tMLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO  = 0x1,\n-\tMLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO  = 0x2,\n-};\n-\n-enum {\n-\tMLX5_RX_HASH_FN_NONE           = 0x0,\n-\tMLX5_RX_HASH_FN_INVERTED_XOR8  = 0x1,\n-\tMLX5_RX_HASH_FN_TOEPLITZ       = 0x2,\n-};\n-\n-enum {\n-\tMLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST    = 0x1,\n-\tMLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST  = 0x2,\n-};\n-\n-enum {\n-\tMLX5_LRO_MAX_MSG_SIZE_START_FROM_L4    = 0x0,\n-\tMLX5_LRO_MAX_MSG_SIZE_START_FROM_L2  = 0x1,\n-};\n-\n-struct mlx5_ifc_tirc_bits {\n-\tu8 reserved_at_0[0x20];\n-\tu8 disp_type[0x4];\n-\tu8 reserved_at_24[0x1c];\n-\tu8 reserved_at_40[0x40];\n-\tu8 reserved_at_80[0x4];\n-\tu8 lro_timeout_period_usecs[0x10];\n-\tu8 lro_enable_mask[0x4];\n-\tu8 lro_max_msg_sz[0x8];\n-\tu8 reserved_at_a0[0x40];\n-\tu8 reserved_at_e0[0x8];\n-\tu8 inline_rqn[0x18];\n-\tu8 rx_hash_symmetric[0x1];\n-\tu8 reserved_at_101[0x1];\n-\tu8 tunneled_offload_en[0x1];\n-\tu8 reserved_at_103[0x5];\n-\tu8 indirect_table[0x18];\n-\tu8 rx_hash_fn[0x4];\n-\tu8 reserved_at_124[0x2];\n-\tu8 self_lb_block[0x2];\n-\tu8 transport_domain[0x18];\n-\tu8 rx_hash_toeplitz_key[10][0x20];\n-\tstruct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer;\n-\tstruct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner;\n-\tu8 reserved_at_2c0[0x4c0];\n-};\n-\n-struct mlx5_ifc_create_tir_out_bits {\n-\tu8 status[0x8];\n-\tu8 reserved_at_8[0x18];\n-\tu8 syndrome[0x20];\n-\tu8 reserved_at_40[0x8];\n-\tu8 tirn[0x18];\n-\tu8 reserved_at_60[0x20];\n-};\n-\n-struct mlx5_ifc_create_tir_in_bits {\n-\tu8 opcode[0x10];\n-\tu8 uid[0x10];\n-\tu8 reserved_at_20[0x10];\n-\tu8 op_mod[0x10];\n-\tu8 reserved_at_40[0xc0];\n-\tstruct mlx5_ifc_tirc_bits ctx;\n-};\n-\n-struct mlx5_ifc_rq_num_bits {\n-\tu8 reserved_at_0[0x8];\n-\tu8 rq_num[0x18];\n-};\n-\n-struct mlx5_ifc_rqtc_bits {\n-\tu8 reserved_at_0[0xa0];\n-\tu8 reserved_at_a0[0x10];\n-\tu8 rqt_max_size[0x10];\n-\tu8 reserved_at_c0[0x10];\n-\tu8 rqt_actual_size[0x10];\n-\tu8 reserved_at_e0[0x6a0];\n-\tstruct mlx5_ifc_rq_num_bits rq_num[];\n-};\n-\n-struct mlx5_ifc_create_rqt_out_bits {\n-\tu8 status[0x8];\n-\tu8 reserved_at_8[0x18];\n-\tu8 syndrome[0x20];\n-\tu8 reserved_at_40[0x8];\n-\tu8 rqtn[0x18];\n-\tu8 reserved_at_60[0x20];\n-};\n-\n-#ifdef PEDANTIC\n-#pragma GCC diagnostic ignored \"-Wpedantic\"\n-#endif\n-struct mlx5_ifc_create_rqt_in_bits {\n-\tu8 opcode[0x10];\n-\tu8 uid[0x10];\n-\tu8 reserved_at_20[0x10];\n-\tu8 op_mod[0x10];\n-\tu8 reserved_at_40[0xc0];\n-\tstruct mlx5_ifc_rqtc_bits rqt_context;\n-};\n-#ifdef PEDANTIC\n-#pragma GCC diagnostic error \"-Wpedantic\"\n-#endif\n-\n-enum {\n-\tMLX5_SQC_STATE_RST  = 0x0,\n-\tMLX5_SQC_STATE_RDY  = 0x1,\n-\tMLX5_SQC_STATE_ERR  = 0x3,\n-};\n-\n-struct mlx5_ifc_sqc_bits {\n-\tu8 rlky[0x1];\n-\tu8 cd_master[0x1];\n-\tu8 fre[0x1];\n-\tu8 flush_in_error_en[0x1];\n-\tu8 allow_multi_pkt_send_wqe[0x1];\n-\tu8 min_wqe_inline_mode[0x3];\n-\tu8 state[0x4];\n-\tu8 reg_umr[0x1];\n-\tu8 allow_swp[0x1];\n-\tu8 hairpin[0x1];\n-\tu8 reserved_at_f[0x11];\n-\tu8 reserved_at_20[0x8];\n-\tu8 user_index[0x18];\n-\tu8 reserved_at_40[0x8];\n-\tu8 cqn[0x18];\n-\tu8 reserved_at_60[0x8];\n-\tu8 hairpin_peer_rq[0x18];\n-\tu8 reserved_at_80[0x10];\n-\tu8 hairpin_peer_vhca[0x10];\n-\tu8 reserved_at_a0[0x50];\n-\tu8 packet_pacing_rate_limit_index[0x10];\n-\tu8 tis_lst_sz[0x10];\n-\tu8 reserved_at_110[0x10];\n-\tu8 reserved_at_120[0x40];\n-\tu8 reserved_at_160[0x8];\n-\tu8 tis_num_0[0x18];\n-\tstruct mlx5_ifc_wq_bits wq;\n-};\n-\n-struct mlx5_ifc_query_sq_in_bits {\n-\tu8 opcode[0x10];\n-\tu8 reserved_at_10[0x10];\n-\tu8 reserved_at_20[0x10];\n-\tu8 op_mod[0x10];\n-\tu8 reserved_at_40[0x8];\n-\tu8 sqn[0x18];\n-\tu8 reserved_at_60[0x20];\n-};\n-\n-struct mlx5_ifc_modify_sq_out_bits {\n-\tu8 status[0x8];\n-\tu8 reserved_at_8[0x18];\n-\tu8 syndrome[0x20];\n-\tu8 reserved_at_40[0x40];\n-};\n-\n-struct mlx5_ifc_modify_sq_in_bits {\n-\tu8 opcode[0x10];\n-\tu8 uid[0x10];\n-\tu8 reserved_at_20[0x10];\n-\tu8 op_mod[0x10];\n-\tu8 sq_state[0x4];\n-\tu8 reserved_at_44[0x4];\n-\tu8 sqn[0x18];\n-\tu8 reserved_at_60[0x20];\n-\tu8 modify_bitmask[0x40];\n-\tu8 reserved_at_c0[0x40];\n-\tstruct mlx5_ifc_sqc_bits ctx;\n-};\n-\n-struct mlx5_ifc_create_sq_out_bits {\n-\tu8 status[0x8];\n-\tu8 reserved_at_8[0x18];\n-\tu8 syndrome[0x20];\n-\tu8 reserved_at_40[0x8];\n-\tu8 sqn[0x18];\n-\tu8 reserved_at_60[0x20];\n-};\n-\n-struct mlx5_ifc_create_sq_in_bits {\n-\tu8 opcode[0x10];\n-\tu8 uid[0x10];\n-\tu8 reserved_at_20[0x10];\n-\tu8 op_mod[0x10];\n-\tu8 reserved_at_40[0xc0];\n-\tstruct mlx5_ifc_sqc_bits ctx;\n-};\n-\n-enum {\n-\tMLX5_FLOW_METER_OBJ_MODIFY_FIELD_ACTIVE = (1ULL << 0),\n-\tMLX5_FLOW_METER_OBJ_MODIFY_FIELD_CBS = (1ULL << 1),\n-\tMLX5_FLOW_METER_OBJ_MODIFY_FIELD_CIR = (1ULL << 2),\n-\tMLX5_FLOW_METER_OBJ_MODIFY_FIELD_EBS = (1ULL << 3),\n-\tMLX5_FLOW_METER_OBJ_MODIFY_FIELD_EIR = (1ULL << 4),\n-};\n-\n-struct mlx5_ifc_flow_meter_parameters_bits {\n-\tu8         valid[0x1];\t\t\t// 00h\n-\tu8         bucket_overflow[0x1];\n-\tu8         start_color[0x2];\n-\tu8         both_buckets_on_green[0x1];\n-\tu8         meter_mode[0x2];\n-\tu8         reserved_at_1[0x19];\n-\tu8         reserved_at_2[0x20]; //04h\n-\tu8         reserved_at_3[0x3];\n-\tu8         cbs_exponent[0x5];\t\t// 08h\n-\tu8         cbs_mantissa[0x8];\n-\tu8         reserved_at_4[0x3];\n-\tu8         cir_exponent[0x5];\n-\tu8         cir_mantissa[0x8];\n-\tu8         reserved_at_5[0x20];\t\t// 0Ch\n-\tu8         reserved_at_6[0x3];\n-\tu8         ebs_exponent[0x5];\t\t// 10h\n-\tu8         ebs_mantissa[0x8];\n-\tu8         reserved_at_7[0x3];\n-\tu8         eir_exponent[0x5];\n-\tu8         eir_mantissa[0x8];\n-\tu8         reserved_at_8[0x60];\t\t// 14h-1Ch\n-};\n-\n-/* CQE format mask. */\n-#define MLX5E_CQE_FORMAT_MASK 0xc\n-\n-/* MPW opcode. */\n-#define MLX5_OPC_MOD_MPW 0x01\n-\n-/* Compressed Rx CQE structure. */\n-struct mlx5_mini_cqe8 {\n-\tunion {\n-\t\tuint32_t rx_hash_result;\n-\t\tstruct {\n-\t\t\tuint16_t checksum;\n-\t\t\tuint16_t stride_idx;\n-\t\t};\n-\t\tstruct {\n-\t\t\tuint16_t wqe_counter;\n-\t\t\tuint8_t  s_wqe_opcode;\n-\t\t\tuint8_t  reserved;\n-\t\t} s_wqe_info;\n-\t};\n-\tuint32_t byte_cnt;\n-};\n-\n-/* srTCM PRM flow meter parameters. */\n-enum {\n-\tMLX5_FLOW_COLOR_RED = 0,\n-\tMLX5_FLOW_COLOR_YELLOW,\n-\tMLX5_FLOW_COLOR_GREEN,\n-\tMLX5_FLOW_COLOR_UNDEFINED,\n-};\n-\n-/* Maximum value of srTCM metering parameters. */\n-#define MLX5_SRTCM_CBS_MAX (0xFF * (1ULL << 0x1F))\n-#define MLX5_SRTCM_CIR_MAX (8 * (1ULL << 30) * 0xFF)\n-#define MLX5_SRTCM_EBS_MAX 0\n-\n-/**\n- * Convert a user mark to flow mark.\n- *\n- * @param val\n- *   Mark value to convert.\n- *\n- * @return\n- *   Converted mark value.\n- */\n-static inline uint32_t\n-mlx5_flow_mark_set(uint32_t val)\n-{\n-\tuint32_t ret;\n-\n-\t/*\n-\t * Add one to the user value to differentiate un-marked flows from\n-\t * marked flows, if the ID is equal to MLX5_FLOW_MARK_DEFAULT it\n-\t * remains untouched.\n-\t */\n-\tif (val != MLX5_FLOW_MARK_DEFAULT)\n-\t\t++val;\n-#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN\n-\t/*\n-\t * Mark is 24 bits (minus reserved values) but is stored on a 32 bit\n-\t * word, byte-swapped by the kernel on little-endian systems. In this\n-\t * case, left-shifting the resulting big-endian value ensures the\n-\t * least significant 24 bits are retained when converting it back.\n-\t */\n-\tret = rte_cpu_to_be_32(val) >> 8;\n-#else\n-\tret = val;\n-#endif\n-\treturn ret;\n-}\n-\n-/**\n- * Convert a mark to user mark.\n- *\n- * @param val\n- *   Mark value to convert.\n- *\n- * @return\n- *   Converted mark value.\n- */\n-static inline uint32_t\n-mlx5_flow_mark_get(uint32_t val)\n-{\n-\t/*\n-\t * Subtract one from the retrieved value. It was added by\n-\t * mlx5_flow_mark_set() to distinguish unmarked flows.\n-\t */\n-#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN\n-\treturn (val >> 8) - 1;\n-#else\n-\treturn val - 1;\n-#endif\n-}\n-\n-#endif /* RTE_PMD_MLX5_PRM_H_ */\ndiff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c\nindex 1028264..345ce3a 100644\n--- a/drivers/net/mlx5/mlx5_rss.c\n+++ b/drivers/net/mlx5/mlx5_rss.c\n@@ -22,8 +22,8 @@\n #include <rte_malloc.h>\n #include <rte_ethdev_driver.h>\n \n-#include \"mlx5.h\"\n #include \"mlx5_defs.h\"\n+#include \"mlx5.h\"\n #include \"mlx5_rxtx.h\"\n \n /**\ndiff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c\nindex 89168cd..62fdbe6 100644\n--- a/drivers/net/mlx5/mlx5_rxq.c\n+++ b/drivers/net/mlx5/mlx5_rxq.c\n@@ -30,14 +30,16 @@\n #include <rte_debug.h>\n #include <rte_io.h>\n \n+#include <mlx5_glue.h>\n+#include <mlx5_devx_cmds.h>\n+\n+#include \"mlx5_defs.h\"\n #include \"mlx5.h\"\n #include \"mlx5_rxtx.h\"\n #include \"mlx5_utils.h\"\n #include \"mlx5_autoconf.h\"\n-#include \"mlx5_defs.h\"\n-#include \"mlx5_glue.h\"\n #include \"mlx5_flow.h\"\n-#include \"mlx5_devx_cmds.h\"\n+\n \n /* Default RSS hash key also used for ConnectX-3. */\n uint8_t rss_hash_default_key[] = {\ndiff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c\nindex 2eede1b..a845f67 100644\n--- a/drivers/net/mlx5/mlx5_rxtx.c\n+++ b/drivers/net/mlx5/mlx5_rxtx.c\n@@ -28,13 +28,14 @@\n #include <rte_cycles.h>\n #include <rte_flow.h>\n \n+#include <mlx5_devx_cmds.h>\n+#include <mlx5_prm.h>\n+\n+#include \"mlx5_defs.h\"\n #include \"mlx5.h\"\n-#include \"mlx5_devx_cmds.h\"\n #include \"mlx5_utils.h\"\n #include \"mlx5_rxtx.h\"\n #include \"mlx5_autoconf.h\"\n-#include \"mlx5_defs.h\"\n-#include \"mlx5_prm.h\"\n \n /* TX burst subroutines return codes. */\n enum mlx5_txcmp_code {\ndiff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h\nindex b6a33c5..84b1fce 100644\n--- a/drivers/net/mlx5/mlx5_rxtx.h\n+++ b/drivers/net/mlx5/mlx5_rxtx.h\n@@ -31,13 +31,14 @@\n #include <rte_bus_pci.h>\n #include <rte_malloc.h>\n \n+#include <mlx5_glue.h>\n+#include <mlx5_prm.h>\n+\n+#include \"mlx5_defs.h\"\n #include \"mlx5_utils.h\"\n #include \"mlx5.h\"\n #include \"mlx5_mr.h\"\n #include \"mlx5_autoconf.h\"\n-#include \"mlx5_defs.h\"\n-#include \"mlx5_prm.h\"\n-#include \"mlx5_glue.h\"\n \n /* Support tunnel matching. */\n #define MLX5_FLOW_TUNNEL 10\ndiff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c\nindex d85f908..5505762 100644\n--- a/drivers/net/mlx5/mlx5_rxtx_vec.c\n+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c\n@@ -23,13 +23,14 @@\n #include <rte_mempool.h>\n #include <rte_prefetch.h>\n \n+#include <mlx5_prm.h>\n+\n+#include \"mlx5_defs.h\"\n #include \"mlx5.h\"\n #include \"mlx5_utils.h\"\n #include \"mlx5_rxtx.h\"\n #include \"mlx5_rxtx_vec.h\"\n #include \"mlx5_autoconf.h\"\n-#include \"mlx5_defs.h\"\n-#include \"mlx5_prm.h\"\n \n #if defined RTE_ARCH_X86_64\n #include \"mlx5_rxtx_vec_sse.h\"\ndiff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h\nindex 85e0bd5..39aefc3 100644\n--- a/drivers/net/mlx5/mlx5_rxtx_vec.h\n+++ b/drivers/net/mlx5/mlx5_rxtx_vec.h\n@@ -9,8 +9,9 @@\n #include <rte_common.h>\n #include <rte_mbuf.h>\n \n+#include <mlx5_prm.h>\n+\n #include \"mlx5_autoconf.h\"\n-#include \"mlx5_prm.h\"\n \n /* HW checksum offload capabilities of vectorized Tx. */\n #define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \\\ndiff --git a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h\nindex 8e79883..cd1b65f 100644\n--- a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h\n+++ b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h\n@@ -17,13 +17,14 @@\n #include <rte_mempool.h>\n #include <rte_prefetch.h>\n \n+#include <mlx5_prm.h>\n+\n+#include \"mlx5_defs.h\"\n #include \"mlx5.h\"\n #include \"mlx5_utils.h\"\n #include \"mlx5_rxtx.h\"\n #include \"mlx5_rxtx_vec.h\"\n #include \"mlx5_autoconf.h\"\n-#include \"mlx5_defs.h\"\n-#include \"mlx5_prm.h\"\n \n #ifndef __INTEL_COMPILER\n #pragma GCC diagnostic ignored \"-Wcast-qual\"\ndiff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h\nindex 86785c7..9fd8429 100644\n--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h\n+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h\n@@ -16,13 +16,14 @@\n #include <rte_mempool.h>\n #include <rte_prefetch.h>\n \n+#include <mlx5_prm.h>\n+\n+#include \"mlx5_defs.h\"\n #include \"mlx5.h\"\n #include \"mlx5_utils.h\"\n #include \"mlx5_rxtx.h\"\n #include \"mlx5_rxtx_vec.h\"\n #include \"mlx5_autoconf.h\"\n-#include \"mlx5_defs.h\"\n-#include \"mlx5_prm.h\"\n \n #pragma GCC diagnostic ignored \"-Wcast-qual\"\n \ndiff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h\nindex 35b7761..f281b9e 100644\n--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h\n+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h\n@@ -16,13 +16,14 @@\n #include <rte_mempool.h>\n #include <rte_prefetch.h>\n \n+#include <mlx5_prm.h>\n+\n+#include \"mlx5_defs.h\"\n #include \"mlx5.h\"\n #include \"mlx5_utils.h\"\n #include \"mlx5_rxtx.h\"\n #include \"mlx5_rxtx_vec.h\"\n #include \"mlx5_autoconf.h\"\n-#include \"mlx5_defs.h\"\n-#include \"mlx5_prm.h\"\n \n #ifndef __INTEL_COMPILER\n #pragma GCC diagnostic ignored \"-Wcast-qual\"\ndiff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c\nindex 205e4fe..0ed7170 100644\n--- a/drivers/net/mlx5/mlx5_stats.c\n+++ b/drivers/net/mlx5/mlx5_stats.c\n@@ -13,9 +13,9 @@\n #include <rte_common.h>\n #include <rte_malloc.h>\n \n+#include \"mlx5_defs.h\"\n #include \"mlx5.h\"\n #include \"mlx5_rxtx.h\"\n-#include \"mlx5_defs.h\"\n \n static const struct mlx5_counter_ctrl mlx5_counters_init[] = {\n \t{\ndiff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c\nindex 5adb4dc..1d2ba8a 100644\n--- a/drivers/net/mlx5/mlx5_txq.c\n+++ b/drivers/net/mlx5/mlx5_txq.c\n@@ -28,13 +28,14 @@\n #include <rte_ethdev_driver.h>\n #include <rte_common.h>\n \n-#include \"mlx5_utils.h\"\n+#include <mlx5_glue.h>\n+#include <mlx5_devx_cmds.h>\n+\n #include \"mlx5_defs.h\"\n+#include \"mlx5_utils.h\"\n #include \"mlx5.h\"\n #include \"mlx5_rxtx.h\"\n #include \"mlx5_autoconf.h\"\n-#include \"mlx5_glue.h\"\n-#include \"mlx5_devx_cmds.h\"\n \n /**\n  * Allocate TX queue elements.\ndiff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h\nindex ebf79b8..c868aee 100644\n--- a/drivers/net/mlx5/mlx5_utils.h\n+++ b/drivers/net/mlx5/mlx5_utils.h\n@@ -13,8 +13,11 @@\n #include <assert.h>\n #include <errno.h>\n \n+#include <mlx5_common.h>\n+\n #include \"mlx5_defs.h\"\n \n+\n /*\n  * Compilation workaround for PPC64 when AltiVec is fully enabled, e.g. std=c11.\n  * Otherwise there would be a type conflict between stdbool and altivec.\n@@ -50,81 +53,14 @@\n /* Save and restore errno around argument evaluation. */\n #define ERRNO_SAFE(x) ((errno = (int []){ errno, ((x), 0) }[0]))\n \n-/*\n- * Helper macros to work around __VA_ARGS__ limitations in a C99 compliant\n- * manner.\n- */\n-#define PMD_DRV_LOG_STRIP(a, b) a\n-#define PMD_DRV_LOG_OPAREN (\n-#define PMD_DRV_LOG_CPAREN )\n-#define PMD_DRV_LOG_COMMA ,\n-\n-/* Return the file name part of a path. */\n-static inline const char *\n-pmd_drv_log_basename(const char *s)\n-{\n-\tconst char *n = s;\n-\n-\twhile (*n)\n-\t\tif (*(n++) == '/')\n-\t\t\ts = n;\n-\treturn s;\n-}\n-\n extern int mlx5_logtype;\n \n-#define PMD_DRV_LOG___(level, ...) \\\n-\trte_log(RTE_LOG_ ## level, \\\n-\t\tmlx5_logtype, \\\n-\t\tRTE_FMT(MLX5_DRIVER_NAME \": \" \\\n-\t\t\tRTE_FMT_HEAD(__VA_ARGS__,), \\\n-\t\tRTE_FMT_TAIL(__VA_ARGS__,)))\n-\n-/*\n- * When debugging is enabled (NDEBUG not defined), file, line and function\n- * information replace the driver name (MLX5_DRIVER_NAME) in log messages.\n- */\n-#ifndef NDEBUG\n-\n-#define PMD_DRV_LOG__(level, ...) \\\n-\tPMD_DRV_LOG___(level, \"%s:%u: %s(): \" __VA_ARGS__)\n-#define PMD_DRV_LOG_(level, s, ...) \\\n-\tPMD_DRV_LOG__(level, \\\n-\t\ts \"\\n\" PMD_DRV_LOG_COMMA \\\n-\t\tpmd_drv_log_basename(__FILE__) PMD_DRV_LOG_COMMA \\\n-\t\t__LINE__ PMD_DRV_LOG_COMMA \\\n-\t\t__func__, \\\n-\t\t__VA_ARGS__)\n-\n-#else /* NDEBUG */\n-#define PMD_DRV_LOG__(level, ...) \\\n-\tPMD_DRV_LOG___(level, __VA_ARGS__)\n-#define PMD_DRV_LOG_(level, s, ...) \\\n-\tPMD_DRV_LOG__(level, s \"\\n\", __VA_ARGS__)\n-\n-#endif /* NDEBUG */\n-\n /* Generic printf()-like logging macro with automatic line feed. */\n #define DRV_LOG(level, ...) \\\n-\tPMD_DRV_LOG_(level, \\\n+\tPMD_DRV_LOG_(level, mlx5_logtype, MLX5_DRIVER_NAME, \\\n \t\t__VA_ARGS__ PMD_DRV_LOG_STRIP PMD_DRV_LOG_OPAREN, \\\n \t\tPMD_DRV_LOG_CPAREN)\n \n-/* claim_zero() does not perform any check when debugging is disabled. */\n-#ifndef NDEBUG\n-\n-#define DEBUG(...) DRV_LOG(DEBUG, __VA_ARGS__)\n-#define claim_zero(...) assert((__VA_ARGS__) == 0)\n-#define claim_nonzero(...) assert((__VA_ARGS__) != 0)\n-\n-#else /* NDEBUG */\n-\n-#define DEBUG(...) (void)0\n-#define claim_zero(...) (__VA_ARGS__)\n-#define claim_nonzero(...) (__VA_ARGS__)\n-\n-#endif /* NDEBUG */\n-\n #define INFO(...) DRV_LOG(INFO, __VA_ARGS__)\n #define WARN(...) DRV_LOG(WARNING, __VA_ARGS__)\n #define ERROR(...) DRV_LOG(ERR, __VA_ARGS__)\n@@ -144,13 +80,6 @@\n \t (((val) & (from)) / ((from) / (to))) : \\\n \t (((val) & (from)) * ((to) / (from))))\n \n-/* Allocate a buffer on the stack and fill it with a printf format string. */\n-#define MKSTR(name, ...) \\\n-\tint mkstr_size_##name = snprintf(NULL, 0, \"\" __VA_ARGS__); \\\n-\tchar name[mkstr_size_##name + 1]; \\\n-\t\\\n-\tsnprintf(name, sizeof(name), \"\" __VA_ARGS__)\n-\n /**\n  * Return logarithm of the nearest power of two above input value.\n  *\ndiff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c\nindex feac0f1..b0fa31a 100644\n--- a/drivers/net/mlx5/mlx5_vlan.c\n+++ b/drivers/net/mlx5/mlx5_vlan.c\n@@ -27,10 +27,11 @@\n #include <rte_ethdev_driver.h>\n #include <rte_common.h>\n \n+#include <mlx5_glue.h>\n+#include <mlx5_devx_cmds.h>\n+\n #include \"mlx5.h\"\n #include \"mlx5_autoconf.h\"\n-#include \"mlx5_glue.h\"\n-#include \"mlx5_devx_cmds.h\"\n #include \"mlx5_rxtx.h\"\n #include \"mlx5_utils.h\"\n \ndiff --git a/mk/rte.app.mk b/mk/rte.app.mk\nindex 1169dd8..d90f14d 100644\n--- a/mk/rte.app.mk\n+++ b/mk/rte.app.mk\n@@ -196,6 +196,7 @@ endif\n _LDLIBS-$(CONFIG_RTE_LIBRTE_LIO_PMD)        += -lrte_pmd_lio\n _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_MEMIF)      += -lrte_pmd_memif\n _LDLIBS-$(CONFIG_RTE_LIBRTE_MLX4_PMD)       += -lrte_pmd_mlx4\n+_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX5_PMD)       += -lrte_common_mlx5\n _LDLIBS-$(CONFIG_RTE_LIBRTE_MLX5_PMD)       += -lrte_pmd_mlx5\n ifeq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y)\n _LDLIBS-$(CONFIG_RTE_LIBRTE_MLX4_PMD)       += -ldl\n",
    "prefixes": [
        "v1",
        "02/38"
    ]
}