get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/29198/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 29198,
    "url": "http://patches.dpdk.org/api/patches/29198/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1506440304-28795-1-git-send-email-nelio.laranjeiro@6wind.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1506440304-28795-1-git-send-email-nelio.laranjeiro@6wind.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1506440304-28795-1-git-send-email-nelio.laranjeiro@6wind.com",
    "date": "2017-09-26T15:38:24",
    "name": "[dpdk-dev,v8] net/mlx5: support upstream rdma-core",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "dac4e556a2934f1e540fb379fae234aabdfdfb98",
    "submitter": {
        "id": 243,
        "url": "http://patches.dpdk.org/api/people/243/?format=api",
        "name": "Nélio Laranjeiro",
        "email": "nelio.laranjeiro@6wind.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1506440304-28795-1-git-send-email-nelio.laranjeiro@6wind.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/29198/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/29198/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 1B60A1B1AF;\n\tTue, 26 Sep 2017 17:38:52 +0200 (CEST)",
            "from mail-wr0-f169.google.com (mail-wr0-f169.google.com\n\t[209.85.128.169]) by dpdk.org (Postfix) with ESMTP id 3BE261B19F\n\tfor <dev@dpdk.org>; Tue, 26 Sep 2017 17:38:50 +0200 (CEST)",
            "by mail-wr0-f169.google.com with SMTP id v109so13591759wrc.1\n\tfor <dev@dpdk.org>; Tue, 26 Sep 2017 08:38:50 -0700 (PDT)",
            "from ping.dev.6wind.com (host.78.145.23.62.rev.coltfrance.com.\n\t[62.23.145.78]) by smtp.gmail.com with ESMTPSA id\n\tq19sm12030047wrb.17.2017.09.26.08.38.47\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128);\n\tTue, 26 Sep 2017 08:38:47 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=6wind-com.20150623.gappssmtp.com; s=20150623;\n\th=from:to:cc:subject:date:message-id:in-reply-to:references;\n\tbh=1pkVfY+6KzkwvneqO2I+voSYZalHRRGSZTJcjVDnAlQ=;\n\tb=1wmN4XM2lmblM5Jr/Y85ZPSe0wbHu58rdX1WfbmLAqRznfGt1ds7iepMKk4juPIv5v\n\t7mVB3p+OkFh3ff745zG5I/QXZuDhP7D0IFNNTI0j4H9BXZ1D/eNCfI8D77K2qZO2hNtP\n\tAA8zuFF0sceLl3cp0Kw/ibQz7+x57GQNtgCER+M2wXKTnhPhxjzZcjfFaUTmfkvh+sEw\n\tng2LlWvD+YUMfUIrsvCf7PwuWSfSZvNuW6qN+QYwCcTh/IC7+ErugoqgA4InCWZ0iDx2\n\tBDKQbeaqME5rQBEbDgjLvHb0sEqnsRLRvJNrHT862PV2eO0IeR2MyLdo1al9qYQbR9lc\n\tt0Wg==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20161025;\n\th=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n\t:references;\n\tbh=1pkVfY+6KzkwvneqO2I+voSYZalHRRGSZTJcjVDnAlQ=;\n\tb=lM7JDrNqUVckkFKLVMiZA0LJvTPDBjSwPOPpE6ZzsvSXoaMfN/BLwklHTgR0mQKiCL\n\tcwv1XuCSuiiRrof/hDaAw54x9zlotdgCnyhAHabsdhhKW+5oxG3mevsJMsdnfSc22Cs1\n\taY5t/Xxy3fmwzQZp+Fa5mfSa0+iDwA1II+YzJAZPD9XBuYuD5FWUyliynF/GgVPHvqyF\n\th4B4lS8ZKwGI3AE7ILmRHKKSZhZ1+tW/+vPqe7ndc6vK96N3RIBdg5dG5UBvR46P5/Me\n\tTrp+1foXiUI2QZsLsYMCPKNWC411F+cXq+68Hq7YTbbpL36rlZyvuCJL6TNW2OV0wKvo\n\tkI5A==",
        "X-Gm-Message-State": "AHPjjUgSm5AURJwPEllrgoKKf/bmETmlO2S3GWjMDCHrMor40CNSlHYj\n\t0fb5ch1wcHl+TaBjNVwCjcm6ij/1QA==",
        "X-Google-Smtp-Source": "AOwi7QAEhr7PjmnwVfCnfNKH0D2JmH9WKrdsJnNkNx7AFW+5qeJCnhZwB273/xzdYEKh9WRAQ8i4cA==",
        "X-Received": "by 10.223.163.154 with SMTP id l26mr9668913wrb.42.1506440328026; \n\tTue, 26 Sep 2017 08:38:48 -0700 (PDT)",
        "From": "Nelio Laranjeiro <nelio.laranjeiro@6wind.com>",
        "To": "dev@dpdk.org,\n\tFerruh Yigit <ferruh.yigit@intel.com>",
        "Cc": "Shachar Beiser <shacharbe@mellanox.com>",
        "Date": "Tue, 26 Sep 2017 17:38:24 +0200",
        "Message-Id": "<1506440304-28795-1-git-send-email-nelio.laranjeiro@6wind.com>",
        "X-Mailer": "git-send-email 2.1.4",
        "In-Reply-To": "<0c4ff3cb5a1608e9c2b5717ea3d31e315bb3a6c8.1505744936.git.shacharbe@mellanox.com>",
        "References": "<0c4ff3cb5a1608e9c2b5717ea3d31e315bb3a6c8.1505744936.git.shacharbe@mellanox.com>",
        "Subject": "[dpdk-dev] [PATCH v8] net/mlx5: support upstream rdma-core",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Shachar Beiser <shacharbe@mellanox.com>\n\nThis removes the dependency on specific Mellanox OFED libraries by\nusing the upstream rdma-core and linux upstream community code.\nBoth rdma-core upstream and Mellanox OFED are Linux user-space packages:\n  1. Rdma-core is Linux upstream user-space package.(Generic)\n  2. Mellanox OFED is Mellanox's Linux user-space package.(Proprietary)\nThe difference between the two are the APIs towards the kernel.\n\nSupport for x86-32 is removed due to issues in rdma-core library.\nICC compilation will be supported as soon as the following patch is\nintegrated in rdma-core:\nhttps://marc.info/?l=linux-rdma&m=150643474705690&w=2\n\nSigned-off-by: Shachar Beiser <shacharbe@mellanox.com>\nSigned-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>\n---\n\nChanges in v8:\n\n - Remove support for 32 bits (not supported by RDMA core)\n - Add link with libmlx5 for shared library support\n - Documentation rewording\n---\n doc/guides/nics/features/mlx5.ini    |   1 -\n doc/guides/nics/mlx5.rst             |  30 +++--\n drivers/net/mlx5/Makefile            |  25 ++--\n drivers/net/mlx5/mlx5.c              |  98 ++++++++-------\n drivers/net/mlx5/mlx5.h              |   4 +-\n drivers/net/mlx5/mlx5_ethdev.c       |   4 +-\n drivers/net/mlx5/mlx5_fdir.c         | 103 ++++++++--------\n drivers/net/mlx5/mlx5_flow.c         | 230 +++++++++++++++++------------------\n drivers/net/mlx5/mlx5_mac.c          |  18 +--\n drivers/net/mlx5/mlx5_prm.h          |  42 ++++++-\n drivers/net/mlx5/mlx5_rxmode.c       |  18 +--\n drivers/net/mlx5/mlx5_rxq.c          | 228 +++++++++++++++++++---------------\n drivers/net/mlx5/mlx5_rxtx.c         |   3 +-\n drivers/net/mlx5/mlx5_rxtx.h         |  33 ++---\n drivers/net/mlx5/mlx5_rxtx_vec_sse.c |   3 +-\n drivers/net/mlx5/mlx5_txq.c          |  73 ++++++-----\n drivers/net/mlx5/mlx5_vlan.c         |  13 +-\n mk/rte.app.mk                        |   2 +-\n 18 files changed, 502 insertions(+), 426 deletions(-)",
    "diff": "diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini\nindex d506249..4a2c3a6 100644\n--- a/doc/guides/nics/features/mlx5.ini\n+++ b/doc/guides/nics/features/mlx5.ini\n@@ -38,6 +38,5 @@ Stats per queue      = Y\n Other kdrv           = Y\n ARMv8                = Y\n Power8               = Y\n-x86-32               = Y\n x86-64               = Y\n Usage doc            = Y\ndiff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst\nindex c6a196c..be0e91c 100644\n--- a/doc/guides/nics/mlx5.rst\n+++ b/doc/guides/nics/mlx5.rst\n@@ -297,7 +297,7 @@ DPDK and must be installed separately:\n   This library basically implements send/receive calls to the hardware\n   queues.\n \n-- **Kernel modules** (mlnx-ofed-kernel)\n+- **Kernel modules**\n \n   They provide the kernel-side Verbs API and low level device drivers that\n   manage actual hardware initialization and resources sharing with user\n@@ -324,9 +324,26 @@ DPDK and must be installed separately:\n    Both libraries are BSD and GPL licensed. Linux kernel modules are GPL\n    licensed.\n \n-Currently supported by DPDK:\n+Installation\n+~~~~~~~~~~~~\n \n-- Mellanox OFED version: **4.1**.\n+Either RDMA Core library with a recent enough Linux kernel release\n+(recommended) or Mellanox OFED, which provides compatibility with older\n+releases.\n+\n+RMDA Core with Linux Kernel\n+^^^^^^^^^^^^^^^^^^^^^^^^^^^\n+\n+- Minimal kernel version : 4.13-rc4 (see `Linux installation documentation`_)\n+- Minimal rdma-core version: v15 (see `RDMA Core installation documentation`_)\n+\n+.. _`Linux installation documentation`: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git/plain/Documentation/admin-guide/README.rst\n+.. _`RDMA Core installation documentation`: https://raw.githubusercontent.com/linux-rdma/rdma-core/master/README.md\n+\n+Mellanox OFED\n+^^^^^^^^^^^^^\n+\n+- Mellanox OFED version: **4.2**.\n - firmware version:\n \n   - ConnectX-4: **12.20.1010** and above.\n@@ -334,9 +351,6 @@ Currently supported by DPDK:\n   - ConnectX-5: **16.20.1010** and above.\n   - ConnectX-5 Ex: **16.20.1010** and above.\n \n-Getting Mellanox OFED\n-~~~~~~~~~~~~~~~~~~~~~\n-\n While these libraries and kernel modules are available on OpenFabrics\n Alliance's `website <https://www.openfabrics.org/>`__ and provided by package\n managers on most distributions, this PMD requires Ethernet extensions that\n@@ -377,8 +391,8 @@ Supported NICs\n * Mellanox(R) ConnectX(R)-5 100G MCX556A-ECAT (2x100G)\n * Mellanox(R) ConnectX(R)-5 Ex EN 100G MCX516A-CDAT (2x100G)\n \n-Quick Start Guide\n------------------\n+Quick Start Guide on OFED\n+-------------------------\n \n 1. Download latest Mellanox OFED. For more info check the  `prerequisites`_.\n \ndiff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile\nindex 14b739a..f75d344 100644\n--- a/drivers/net/mlx5/Makefile\n+++ b/drivers/net/mlx5/Makefile\n@@ -63,7 +63,7 @@ CFLAGS += -D_DEFAULT_SOURCE\n CFLAGS += -D_XOPEN_SOURCE=600\n CFLAGS += $(WERROR_FLAGS)\n CFLAGS += -Wno-strict-prototypes\n-LDLIBS += -libverbs\n+LDLIBS += -libverbs -lmlx5\n \n # A few warnings cannot be avoided in external headers.\n CFLAGS += -Wno-error=cast-qual\n@@ -104,19 +104,19 @@ mlx5_autoconf.h.new: FORCE\n mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh\n \t$Q $(RM) -f -- '$@'\n \t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_VERBS_IBV_EXP_CQ_COMPRESSED_CQE \\\n-\t\tinfiniband/verbs_exp.h \\\n-\t\tenum IBV_EXP_CQ_COMPRESSED_CQE \\\n+\t\tHAVE_IBV_DEVICE_VXLAN_SUPPORT \\\n+\t\tinfiniband/verbs.h \\\n+\t\tenum IBV_DEVICE_VXLAN_SUPPORT \\\n \t\t$(AUTOCONF_OUTPUT)\n \t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_VERBS_MLX5_ETH_VLAN_INLINE_HEADER_SIZE \\\n-\t\tinfiniband/mlx5_hw.h \\\n-\t\tenum MLX5_ETH_VLAN_INLINE_HEADER_SIZE \\\n+\t\tHAVE_IBV_WQ_FLAG_RX_END_PADDING \\\n+\t\tinfiniband/verbs.h \\\n+\t\tenum IBV_WQ_FLAG_RX_END_PADDING \\\n \t\t$(AUTOCONF_OUTPUT)\n \t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_VERBS_MLX5_OPCODE_TSO \\\n-\t\tinfiniband/mlx5_hw.h \\\n-\t\tenum MLX5_OPCODE_TSO \\\n+\t\tHAVE_IBV_MLX5_MOD_MPW \\\n+\t\tinfiniband/mlx5dv.h \\\n+\t\tenum MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED \\\n \t\t$(AUTOCONF_OUTPUT)\n \t$Q sh -- '$<' '$@' \\\n \t\tHAVE_ETHTOOL_LINK_MODE_25G \\\n@@ -133,11 +133,6 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh\n \t\t/usr/include/linux/ethtool.h \\\n \t\tenum ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT \\\n \t\t$(AUTOCONF_OUTPUT)\n-\t$Q sh -- '$<' '$@' \\\n-\t\tHAVE_UPDATE_CQ_CI \\\n-\t\tinfiniband/mlx5_hw.h \\\n-\t\tfunc ibv_mlx5_exp_update_cq_ci \\\n-\t\t$(AUTOCONF_OUTPUT)\n \n # Create mlx5_autoconf.h or update it in case it differs from the new one.\n \ndiff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex 32e22df..229b824 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -96,6 +96,11 @@\n /* Default PMD specific parameter value. */\n #define MLX5_ARG_UNSET (-1)\n \n+#ifndef HAVE_IBV_MLX5_MOD_MPW\n+#define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)\n+#define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)\n+#endif\n+\n struct mlx5_args {\n \tint cqe_comp;\n \tint txq_inline;\n@@ -247,10 +252,8 @@ static const struct eth_dev_ops mlx5_dev_ops = {\n \t.filter_ctrl = mlx5_dev_filter_ctrl,\n \t.rx_descriptor_status = mlx5_rx_descriptor_status,\n \t.tx_descriptor_status = mlx5_tx_descriptor_status,\n-#ifdef HAVE_UPDATE_CQ_CI\n \t.rx_queue_intr_enable = mlx5_rx_intr_enable,\n \t.rx_queue_intr_disable = mlx5_rx_intr_disable,\n-#endif\n };\n \n static struct {\n@@ -442,12 +445,13 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)\n \tstruct ibv_device *ibv_dev;\n \tint err = 0;\n \tstruct ibv_context *attr_ctx = NULL;\n-\tstruct ibv_device_attr device_attr;\n+\tstruct ibv_device_attr_ex device_attr;\n \tunsigned int sriov;\n \tunsigned int mps;\n \tunsigned int tunnel_en = 0;\n \tint idx;\n \tint i;\n+\tstruct mlx5dv_context attrs_out;\n \n \t(void)pci_drv;\n \tassert(pci_drv == &mlx5_driver);\n@@ -493,35 +497,24 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)\n \t\t       PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) ||\n \t\t      (pci_dev->id.device_id ==\n \t\t       PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF));\n-\t\t/*\n-\t\t * Multi-packet send is supported by ConnectX-4 Lx PF as well\n-\t\t * as all ConnectX-5 devices.\n-\t\t */\n \t\tswitch (pci_dev->id.device_id) {\n \t\tcase PCI_DEVICE_ID_MELLANOX_CONNECTX4:\n \t\t\ttunnel_en = 1;\n-\t\t\tmps = MLX5_MPW_DISABLED;\n \t\t\tbreak;\n \t\tcase PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:\n-\t\t\ttunnel_en = 1;\n-\t\t\tmps = MLX5_MPW;\n-\t\t\tbreak;\n \t\tcase PCI_DEVICE_ID_MELLANOX_CONNECTX5:\n \t\tcase PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:\n \t\tcase PCI_DEVICE_ID_MELLANOX_CONNECTX5EX:\n \t\tcase PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:\n \t\t\ttunnel_en = 1;\n-\t\t\tmps = MLX5_MPW_ENHANCED;\n \t\t\tbreak;\n \t\tdefault:\n-\t\t\tmps = MLX5_MPW_DISABLED;\n+\t\t\tbreak;\n \t\t}\n \t\tINFO(\"PCI information matches, using device \\\"%s\\\"\"\n-\t\t     \" (SR-IOV: %s, %sMPS: %s)\",\n+\t\t     \" (SR-IOV: %s)\",\n \t\t     list[i]->name,\n-\t\t     sriov ? \"true\" : \"false\",\n-\t\t     mps == MLX5_MPW_ENHANCED ? \"Enhanced \" : \"\",\n-\t\t     mps != MLX5_MPW_DISABLED ? \"true\" : \"false\");\n+\t\t     sriov ? \"true\" : \"false\");\n \t\tattr_ctx = ibv_open_device(list[i]);\n \t\terr = errno;\n \t\tbreak;\n@@ -542,11 +535,27 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)\n \tibv_dev = list[i];\n \n \tDEBUG(\"device opened\");\n-\tif (ibv_query_device(attr_ctx, &device_attr))\n+\t/*\n+\t * Multi-packet send is supported by ConnectX-4 Lx PF as well\n+\t * as all ConnectX-5 devices.\n+\t */\n+\tmlx5dv_query_device(attr_ctx, &attrs_out);\n+\tif (attrs_out.flags & (MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW |\n+\t\t\t       MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED)) {\n+\t\tINFO(\"Enhanced MPW is detected\\n\");\n+\t\tmps = MLX5_MPW_ENHANCED;\n+\t} else if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {\n+\t\tINFO(\"MPW is detected\\n\");\n+\t\tmps = MLX5_MPW;\n+\t} else {\n+\t\tINFO(\"MPW is disabled\\n\");\n+\t\tmps = MLX5_MPW_DISABLED;\n+\t}\n+\tif (ibv_query_device_ex(attr_ctx, NULL, &device_attr))\n \t\tgoto error;\n-\tINFO(\"%u port(s) detected\", device_attr.phys_port_cnt);\n+\tINFO(\"%u port(s) detected\", device_attr.orig_attr.phys_port_cnt);\n \n-\tfor (i = 0; i < device_attr.phys_port_cnt; i++) {\n+\tfor (i = 0; i < device_attr.orig_attr.phys_port_cnt; i++) {\n \t\tuint32_t port = i + 1; /* ports are indexed from one */\n \t\tuint32_t test = (1 << i);\n \t\tstruct ibv_context *ctx = NULL;\n@@ -554,7 +563,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)\n \t\tstruct ibv_pd *pd = NULL;\n \t\tstruct priv *priv = NULL;\n \t\tstruct rte_eth_dev *eth_dev;\n-\t\tstruct ibv_exp_device_attr exp_device_attr;\n+\t\tstruct ibv_device_attr_ex device_attr_ex;\n \t\tstruct ether_addr mac;\n \t\tuint16_t num_vfs = 0;\n \t\tstruct mlx5_args args = {\n@@ -569,14 +578,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)\n \t\t\t.rx_vec_en = MLX5_ARG_UNSET,\n \t\t};\n \n-\t\texp_device_attr.comp_mask =\n-\t\t\tIBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS |\n-\t\t\tIBV_EXP_DEVICE_ATTR_RX_HASH |\n-\t\t\tIBV_EXP_DEVICE_ATTR_VLAN_OFFLOADS |\n-\t\t\tIBV_EXP_DEVICE_ATTR_RX_PAD_END_ALIGN |\n-\t\t\tIBV_EXP_DEVICE_ATTR_TSO_CAPS |\n-\t\t\t0;\n-\n \t\tDEBUG(\"using port %u (%08\" PRIx32 \")\", port, test);\n \n \t\tctx = ibv_open_device(ibv_dev);\n@@ -642,26 +643,26 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)\n \t\t\tgoto port_error;\n \t\t}\n \t\tmlx5_args_assign(priv, &args);\n-\t\tif (ibv_exp_query_device(ctx, &exp_device_attr)) {\n-\t\t\tERROR(\"ibv_exp_query_device() failed\");\n-\t\t\terr = ENODEV;\n+\t\tif (ibv_query_device_ex(ctx, NULL, &device_attr_ex)) {\n+\t\t\tERROR(\"ibv_query_device_ex() failed\");\n \t\t\tgoto port_error;\n \t\t}\n \n \t\tpriv->hw_csum =\n-\t\t\t((exp_device_attr.exp_device_cap_flags &\n-\t\t\t  IBV_EXP_DEVICE_RX_CSUM_TCP_UDP_PKT) &&\n-\t\t\t (exp_device_attr.exp_device_cap_flags &\n-\t\t\t  IBV_EXP_DEVICE_RX_CSUM_IP_PKT));\n+\t\t\t((device_attr_ex.device_cap_flags_ex &\n+\t\t\t  IBV_DEVICE_UD_IP_CSUM));\n \t\tDEBUG(\"checksum offloading is %ssupported\",\n \t\t      (priv->hw_csum ? \"\" : \"not \"));\n \n+#ifdef HAVE_IBV_DEVICE_VXLAN_SUPPORT\n \t\tpriv->hw_csum_l2tun = !!(exp_device_attr.exp_device_cap_flags &\n-\t\t\t\t\t IBV_EXP_DEVICE_VXLAN_SUPPORT);\n+\t\t\t\t\t IBV_DEVICE_VXLAN_SUPPORT);\n+#endif\n \t\tDEBUG(\"L2 tunnel checksum offloads are %ssupported\",\n \t\t      (priv->hw_csum_l2tun ? \"\" : \"not \"));\n \n-\t\tpriv->ind_table_max_size = exp_device_attr.rx_hash_caps.max_rwq_indirection_table_size;\n+\t\tpriv->ind_table_max_size =\n+\t\t\tdevice_attr_ex.rss_caps.max_rwq_indirection_table_size;\n \t\t/* Remove this check once DPDK supports larger/variable\n \t\t * indirection tables. */\n \t\tif (priv->ind_table_max_size >\n@@ -669,29 +670,32 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)\n \t\t\tpriv->ind_table_max_size = ETH_RSS_RETA_SIZE_512;\n \t\tDEBUG(\"maximum RX indirection table size is %u\",\n \t\t      priv->ind_table_max_size);\n-\t\tpriv->hw_vlan_strip = !!(exp_device_attr.wq_vlan_offloads_cap &\n-\t\t\t\t\t IBV_EXP_RECEIVE_WQ_CVLAN_STRIP);\n+\t\tpriv->hw_vlan_strip = !!(device_attr_ex.raw_packet_caps &\n+\t\t\t\t\t IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);\n \t\tDEBUG(\"VLAN stripping is %ssupported\",\n \t\t      (priv->hw_vlan_strip ? \"\" : \"not \"));\n \n-\t\tpriv->hw_fcs_strip = !!(exp_device_attr.exp_device_cap_flags &\n-\t\t\t\t\tIBV_EXP_DEVICE_SCATTER_FCS);\n+\t\tpriv->hw_fcs_strip =\n+\t\t\t\t!!(device_attr_ex.orig_attr.device_cap_flags &\n+\t\t\t\tIBV_WQ_FLAGS_SCATTER_FCS);\n \t\tDEBUG(\"FCS stripping configuration is %ssupported\",\n \t\t      (priv->hw_fcs_strip ? \"\" : \"not \"));\n \n-\t\tpriv->hw_padding = !!exp_device_attr.rx_pad_end_addr_align;\n+#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING\n+\t\tpriv->hw_padding = !!device_attr_ex.rx_pad_end_addr_align;\n+#endif\n \t\tDEBUG(\"hardware RX end alignment padding is %ssupported\",\n \t\t      (priv->hw_padding ? \"\" : \"not \"));\n \n \t\tpriv_get_num_vfs(priv, &num_vfs);\n \t\tpriv->sriov = (num_vfs || sriov);\n \t\tpriv->tso = ((priv->tso) &&\n-\t\t\t    (exp_device_attr.tso_caps.max_tso > 0) &&\n-\t\t\t    (exp_device_attr.tso_caps.supported_qpts &\n-\t\t\t    (1 << IBV_QPT_RAW_ETH)));\n+\t\t\t    (device_attr_ex.tso_caps.max_tso > 0) &&\n+\t\t\t    (device_attr_ex.tso_caps.supported_qpts &\n+\t\t\t    (1 << IBV_QPT_RAW_PACKET)));\n \t\tif (priv->tso)\n \t\t\tpriv->max_tso_payload_sz =\n-\t\t\t\texp_device_attr.tso_caps.max_tso;\n+\t\t\t\tdevice_attr_ex.tso_caps.max_tso;\n \t\tif (priv->mps && !mps) {\n \t\t\tERROR(\"multi-packet send not supported on this device\"\n \t\t\t      \" (\" MLX5_TXQ_MPW_EN \")\");\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex e89aba8..ab03fe0 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -89,7 +89,7 @@ struct mlx5_xstats_ctrl {\n struct priv {\n \tstruct rte_eth_dev *dev; /* Ethernet device. */\n \tstruct ibv_context *ctx; /* Verbs context. */\n-\tstruct ibv_device_attr device_attr; /* Device properties. */\n+\tstruct ibv_device_attr_ex device_attr; /* Device properties. */\n \tstruct ibv_pd *pd; /* Protection Domain. */\n \t/*\n \t * MAC addresses array and configuration bit-field.\n@@ -132,7 +132,7 @@ struct priv {\n \tstruct rxq *(*rxqs)[]; /* RX queues. */\n \tstruct txq *(*txqs)[]; /* TX queues. */\n \t/* Indirection tables referencing all RX WQs. */\n-\tstruct ibv_exp_rwq_ind_table *(*ind_tables)[];\n+\tstruct ibv_rwq_ind_table *(*ind_tables)[];\n \tunsigned int ind_tables_n; /* Number of indirection tables. */\n \tunsigned int ind_table_max_size; /* Maximum indirection table size. */\n \t/* Hash RX QPs feeding the indirection table. */\ndiff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c\nindex a47fd2e..b87eb09 100644\n--- a/drivers/net/mlx5/mlx5_ethdev.c\n+++ b/drivers/net/mlx5/mlx5_ethdev.c\n@@ -660,8 +660,8 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)\n \t * Since we need one CQ per QP, the limit is the minimum number\n \t * between the two values.\n \t */\n-\tmax = ((priv->device_attr.max_cq > priv->device_attr.max_qp) ?\n-\t       priv->device_attr.max_qp : priv->device_attr.max_cq);\n+\tmax = RTE_MIN(priv->device_attr.orig_attr.max_cq,\n+\t\t      priv->device_attr.orig_attr.max_qp);\n \t/* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */\n \tif (max >= 65535)\n \t\tmax = 65535;\ndiff --git a/drivers/net/mlx5/mlx5_fdir.c b/drivers/net/mlx5/mlx5_fdir.c\nindex ad256e4..acae668 100644\n--- a/drivers/net/mlx5/mlx5_fdir.c\n+++ b/drivers/net/mlx5/mlx5_fdir.c\n@@ -72,7 +72,7 @@ struct mlx5_fdir_filter {\n \tuint16_t queue; /* Queue assigned to if FDIR match. */\n \tenum rte_eth_fdir_behavior behavior;\n \tstruct fdir_flow_desc desc;\n-\tstruct ibv_exp_flow *flow;\n+\tstruct ibv_flow *flow;\n };\n \n LIST_HEAD(fdir_filter_list, mlx5_fdir_filter);\n@@ -238,19 +238,19 @@ priv_fdir_flow_add(struct priv *priv,\n \t\t   struct mlx5_fdir_filter *mlx5_fdir_filter,\n \t\t   struct fdir_queue *fdir_queue)\n {\n-\tstruct ibv_exp_flow *flow;\n+\tstruct ibv_flow *flow;\n \tstruct fdir_flow_desc *desc = &mlx5_fdir_filter->desc;\n \tenum rte_fdir_mode fdir_mode =\n \t\tpriv->dev->data->dev_conf.fdir_conf.mode;\n \tstruct rte_eth_fdir_masks *mask =\n \t\t&priv->dev->data->dev_conf.fdir_conf.mask;\n \tFLOW_ATTR_SPEC_ETH(data, priv_flow_attr(priv, NULL, 0, desc->type));\n-\tstruct ibv_exp_flow_attr *attr = &data->attr;\n+\tstruct ibv_flow_attr *attr = &data->attr;\n \tuintptr_t spec_offset = (uintptr_t)&data->spec;\n-\tstruct ibv_exp_flow_spec_eth *spec_eth;\n-\tstruct ibv_exp_flow_spec_ipv4 *spec_ipv4;\n-\tstruct ibv_exp_flow_spec_ipv6 *spec_ipv6;\n-\tstruct ibv_exp_flow_spec_tcp_udp *spec_tcp_udp;\n+\tstruct ibv_flow_spec_eth *spec_eth;\n+\tstruct ibv_flow_spec_ipv4 *spec_ipv4;\n+\tstruct ibv_flow_spec_ipv6 *spec_ipv6;\n+\tstruct ibv_flow_spec_tcp_udp *spec_tcp_udp;\n \tstruct mlx5_fdir_filter *iter_fdir_filter;\n \tunsigned int i;\n \n@@ -272,10 +272,10 @@ priv_fdir_flow_add(struct priv *priv,\n \tpriv_flow_attr(priv, attr, sizeof(data), desc->type);\n \n \t/* Set Ethernet spec */\n-\tspec_eth = (struct ibv_exp_flow_spec_eth *)spec_offset;\n+\tspec_eth = (struct ibv_flow_spec_eth *)spec_offset;\n \n \t/* The first specification must be Ethernet. */\n-\tassert(spec_eth->type == IBV_EXP_FLOW_SPEC_ETH);\n+\tassert(spec_eth->type == IBV_FLOW_SPEC_ETH);\n \tassert(spec_eth->size == sizeof(*spec_eth));\n \n \t/* VLAN ID */\n@@ -302,10 +302,10 @@ priv_fdir_flow_add(struct priv *priv,\n \t\tspec_offset += spec_eth->size;\n \n \t\t/* Set IP spec */\n-\t\tspec_ipv4 = (struct ibv_exp_flow_spec_ipv4 *)spec_offset;\n+\t\tspec_ipv4 = (struct ibv_flow_spec_ipv4 *)spec_offset;\n \n \t\t/* The second specification must be IP. */\n-\t\tassert(spec_ipv4->type == IBV_EXP_FLOW_SPEC_IPV4);\n+\t\tassert(spec_ipv4->type == IBV_FLOW_SPEC_IPV4);\n \t\tassert(spec_ipv4->size == sizeof(*spec_ipv4));\n \n \t\tspec_ipv4->val.src_ip =\n@@ -329,10 +329,10 @@ priv_fdir_flow_add(struct priv *priv,\n \t\tspec_offset += spec_eth->size;\n \n \t\t/* Set IP spec */\n-\t\tspec_ipv6 = (struct ibv_exp_flow_spec_ipv6 *)spec_offset;\n+\t\tspec_ipv6 = (struct ibv_flow_spec_ipv6 *)spec_offset;\n \n \t\t/* The second specification must be IP. */\n-\t\tassert(spec_ipv6->type == IBV_EXP_FLOW_SPEC_IPV6);\n+\t\tassert(spec_ipv6->type == IBV_FLOW_SPEC_IPV6);\n \t\tassert(spec_ipv6->size == sizeof(*spec_ipv6));\n \n \t\tfor (i = 0; i != RTE_DIM(desc->src_ip); ++i) {\n@@ -362,11 +362,11 @@ priv_fdir_flow_add(struct priv *priv,\n \t}\n \n \t/* Set TCP/UDP flow specification. */\n-\tspec_tcp_udp = (struct ibv_exp_flow_spec_tcp_udp *)spec_offset;\n+\tspec_tcp_udp = (struct ibv_flow_spec_tcp_udp *)spec_offset;\n \n \t/* The third specification must be TCP/UDP. */\n-\tassert(spec_tcp_udp->type == IBV_EXP_FLOW_SPEC_TCP ||\n-\t       spec_tcp_udp->type == IBV_EXP_FLOW_SPEC_UDP);\n+\tassert(spec_tcp_udp->type == IBV_FLOW_SPEC_TCP ||\n+\t       spec_tcp_udp->type == IBV_FLOW_SPEC_UDP);\n \tassert(spec_tcp_udp->size == sizeof(*spec_tcp_udp));\n \n \tspec_tcp_udp->val.src_port = desc->src_port & mask->src_port_mask;\n@@ -380,7 +380,7 @@ priv_fdir_flow_add(struct priv *priv,\n create_flow:\n \n \terrno = 0;\n-\tflow = ibv_exp_create_flow(fdir_queue->qp, attr);\n+\tflow = ibv_create_flow(fdir_queue->qp, attr);\n \tif (flow == NULL) {\n \t\t/* It's not clear whether errno is always set in this case. */\n \t\tERROR(\"%p: flow director configuration failed, errno=%d: %s\",\n@@ -416,16 +416,16 @@ priv_fdir_queue_destroy(struct priv *priv, struct fdir_queue *fdir_queue)\n \t\tassert(idx < priv->rxqs_n);\n \t\tif (fdir_queue == rxq_ctrl->fdir_queue &&\n \t\t    fdir_filter->flow != NULL) {\n-\t\t\tclaim_zero(ibv_exp_destroy_flow(fdir_filter->flow));\n+\t\t\tclaim_zero(ibv_destroy_flow(fdir_filter->flow));\n \t\t\tfdir_filter->flow = NULL;\n \t\t}\n \t}\n \tassert(fdir_queue->qp);\n \tclaim_zero(ibv_destroy_qp(fdir_queue->qp));\n \tassert(fdir_queue->ind_table);\n-\tclaim_zero(ibv_exp_destroy_rwq_ind_table(fdir_queue->ind_table));\n+\tclaim_zero(ibv_destroy_rwq_ind_table(fdir_queue->ind_table));\n \tif (fdir_queue->wq)\n-\t\tclaim_zero(ibv_exp_destroy_wq(fdir_queue->wq));\n+\t\tclaim_zero(ibv_destroy_wq(fdir_queue->wq));\n \tif (fdir_queue->cq)\n \t\tclaim_zero(ibv_destroy_cq(fdir_queue->cq));\n #ifndef NDEBUG\n@@ -447,7 +447,7 @@ priv_fdir_queue_destroy(struct priv *priv, struct fdir_queue *fdir_queue)\n  *   Related flow director queue on success, NULL otherwise.\n  */\n static struct fdir_queue *\n-priv_fdir_queue_create(struct priv *priv, struct ibv_exp_wq *wq,\n+priv_fdir_queue_create(struct priv *priv, struct ibv_wq *wq,\n \t\t       unsigned int socket)\n {\n \tstruct fdir_queue *fdir_queue;\n@@ -461,21 +461,18 @@ priv_fdir_queue_create(struct priv *priv, struct ibv_exp_wq *wq,\n \tassert(priv->pd);\n \tassert(priv->ctx);\n \tif (!wq) {\n-\t\tfdir_queue->cq = ibv_exp_create_cq(\n-\t\t\tpriv->ctx, 1, NULL, NULL, 0,\n-\t\t\t&(struct ibv_exp_cq_init_attr){\n-\t\t\t\t.comp_mask = 0,\n-\t\t\t});\n+\t\tfdir_queue->cq = ibv_create_cq(\n+\t\t\tpriv->ctx, 1, NULL, NULL, 0);\n \t\tif (!fdir_queue->cq) {\n \t\t\tERROR(\"cannot create flow director CQ\");\n \t\t\tgoto error;\n \t\t}\n-\t\tfdir_queue->wq = ibv_exp_create_wq(\n+\t\tfdir_queue->wq = ibv_create_wq(\n \t\t\tpriv->ctx,\n-\t\t\t&(struct ibv_exp_wq_init_attr){\n-\t\t\t\t.wq_type = IBV_EXP_WQT_RQ,\n-\t\t\t\t.max_recv_wr = 1,\n-\t\t\t\t.max_recv_sge = 1,\n+\t\t\t&(struct ibv_wq_init_attr){\n+\t\t\t\t.wq_type = IBV_WQT_RQ,\n+\t\t\t\t.max_wr = 1,\n+\t\t\t\t.max_sge = 1,\n \t\t\t\t.pd = priv->pd,\n \t\t\t\t.cq = fdir_queue->cq,\n \t\t\t});\n@@ -485,10 +482,9 @@ priv_fdir_queue_create(struct priv *priv, struct ibv_exp_wq *wq,\n \t\t}\n \t\twq = fdir_queue->wq;\n \t}\n-\tfdir_queue->ind_table = ibv_exp_create_rwq_ind_table(\n+\tfdir_queue->ind_table = ibv_create_rwq_ind_table(\n \t\tpriv->ctx,\n-\t\t&(struct ibv_exp_rwq_ind_table_init_attr){\n-\t\t\t.pd = priv->pd,\n+\t\t&(struct ibv_rwq_ind_table_init_attr){\n \t\t\t.log_ind_tbl_size = 0,\n \t\t\t.ind_tbl = &wq,\n \t\t\t.comp_mask = 0,\n@@ -497,24 +493,23 @@ priv_fdir_queue_create(struct priv *priv, struct ibv_exp_wq *wq,\n \t\tERROR(\"cannot create flow director indirection table\");\n \t\tgoto error;\n \t}\n-\tfdir_queue->qp = ibv_exp_create_qp(\n+\tfdir_queue->qp = ibv_create_qp_ex(\n \t\tpriv->ctx,\n-\t\t&(struct ibv_exp_qp_init_attr){\n+\t\t&(struct ibv_qp_init_attr_ex){\n \t\t\t.qp_type = IBV_QPT_RAW_PACKET,\n \t\t\t.comp_mask =\n-\t\t\t\tIBV_EXP_QP_INIT_ATTR_PD |\n-\t\t\t\tIBV_EXP_QP_INIT_ATTR_PORT |\n-\t\t\t\tIBV_EXP_QP_INIT_ATTR_RX_HASH,\n-\t\t\t.pd = priv->pd,\n-\t\t\t.rx_hash_conf = &(struct ibv_exp_rx_hash_conf){\n+\t\t\t\tIBV_QP_INIT_ATTR_PD |\n+\t\t\t\tIBV_QP_INIT_ATTR_IND_TABLE |\n+\t\t\t\tIBV_QP_INIT_ATTR_RX_HASH,\n+\t\t\t.rx_hash_conf = (struct ibv_rx_hash_conf){\n \t\t\t\t.rx_hash_function =\n-\t\t\t\t\tIBV_EXP_RX_HASH_FUNC_TOEPLITZ,\n+\t\t\t\t\tIBV_RX_HASH_FUNC_TOEPLITZ,\n \t\t\t\t.rx_hash_key_len = rss_hash_default_key_len,\n \t\t\t\t.rx_hash_key = rss_hash_default_key,\n \t\t\t\t.rx_hash_fields_mask = 0,\n-\t\t\t\t.rwq_ind_tbl = fdir_queue->ind_table,\n \t\t\t},\n-\t\t\t.port_num = priv->port,\n+\t\t\t.rwq_ind_tbl = fdir_queue->ind_table,\n+\t\t\t.pd = priv->pd,\n \t\t});\n \tif (!fdir_queue->qp) {\n \t\tERROR(\"cannot create flow director hash RX QP\");\n@@ -525,10 +520,10 @@ priv_fdir_queue_create(struct priv *priv, struct ibv_exp_wq *wq,\n \tassert(fdir_queue);\n \tassert(!fdir_queue->qp);\n \tif (fdir_queue->ind_table)\n-\t\tclaim_zero(ibv_exp_destroy_rwq_ind_table\n+\t\tclaim_zero(ibv_destroy_rwq_ind_table\n \t\t\t   (fdir_queue->ind_table));\n \tif (fdir_queue->wq)\n-\t\tclaim_zero(ibv_exp_destroy_wq(fdir_queue->wq));\n+\t\tclaim_zero(ibv_destroy_wq(fdir_queue->wq));\n \tif (fdir_queue->cq)\n \t\tclaim_zero(ibv_destroy_cq(fdir_queue->cq));\n \trte_free(fdir_queue);\n@@ -673,13 +668,13 @@ priv_fdir_filter_flush(struct priv *priv)\n \tstruct mlx5_fdir_filter *mlx5_fdir_filter;\n \n \twhile ((mlx5_fdir_filter = LIST_FIRST(priv->fdir_filter_list))) {\n-\t\tstruct ibv_exp_flow *flow = mlx5_fdir_filter->flow;\n+\t\tstruct ibv_flow *flow = mlx5_fdir_filter->flow;\n \n \t\tDEBUG(\"%p: flushing flow director filter %p\",\n \t\t      (void *)priv, (void *)mlx5_fdir_filter);\n \t\tLIST_REMOVE(mlx5_fdir_filter, next);\n \t\tif (flow != NULL)\n-\t\t\tclaim_zero(ibv_exp_destroy_flow(flow));\n+\t\t\tclaim_zero(ibv_destroy_flow(flow));\n \t\trte_free(mlx5_fdir_filter);\n \t}\n }\n@@ -712,7 +707,7 @@ priv_fdir_disable(struct priv *priv)\n \n \t/* Run on every flow director filter and destroy flow handle. */\n \tLIST_FOREACH(mlx5_fdir_filter, priv->fdir_filter_list, next) {\n-\t\tstruct ibv_exp_flow *flow;\n+\t\tstruct ibv_flow *flow;\n \n \t\t/* Only valid elements should be in the list */\n \t\tassert(mlx5_fdir_filter != NULL);\n@@ -720,7 +715,7 @@ priv_fdir_disable(struct priv *priv)\n \n \t\t/* Destroy flow handle */\n \t\tif (flow != NULL) {\n-\t\t\tclaim_zero(ibv_exp_destroy_flow(flow));\n+\t\t\tclaim_zero(ibv_destroy_flow(flow));\n \t\t\tmlx5_fdir_filter->flow = NULL;\n \t\t}\n \t}\n@@ -887,7 +882,7 @@ priv_fdir_filter_update(struct priv *priv,\n \n \tmlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter);\n \tif (mlx5_fdir_filter != NULL) {\n-\t\tstruct ibv_exp_flow *flow = mlx5_fdir_filter->flow;\n+\t\tstruct ibv_flow *flow = mlx5_fdir_filter->flow;\n \t\tint err = 0;\n \n \t\t/* Update queue number. */\n@@ -895,7 +890,7 @@ priv_fdir_filter_update(struct priv *priv,\n \n \t\t/* Destroy flow handle. */\n \t\tif (flow != NULL) {\n-\t\t\tclaim_zero(ibv_exp_destroy_flow(flow));\n+\t\t\tclaim_zero(ibv_destroy_flow(flow));\n \t\t\tmlx5_fdir_filter->flow = NULL;\n \t\t}\n \t\tDEBUG(\"%p: flow director filter %p updated\",\n@@ -933,14 +928,14 @@ priv_fdir_filter_delete(struct priv *priv,\n \n \tmlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter);\n \tif (mlx5_fdir_filter != NULL) {\n-\t\tstruct ibv_exp_flow *flow = mlx5_fdir_filter->flow;\n+\t\tstruct ibv_flow *flow = mlx5_fdir_filter->flow;\n \n \t\t/* Remove element from list. */\n \t\tLIST_REMOVE(mlx5_fdir_filter, next);\n \n \t\t/* Destroy flow handle. */\n \t\tif (flow != NULL) {\n-\t\t\tclaim_zero(ibv_exp_destroy_flow(flow));\n+\t\t\tclaim_zero(ibv_destroy_flow(flow));\n \t\t\tmlx5_fdir_filter->flow = NULL;\n \t\t}\n \ndiff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 7dd3ebb..dbd241f 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -89,11 +89,11 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,\n \n struct rte_flow {\n \tTAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */\n-\tstruct ibv_exp_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */\n-\tstruct ibv_exp_rwq_ind_table *ind_table; /**< Indirection table. */\n+\tstruct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */\n+\tstruct ibv_rwq_ind_table *ind_table; /**< Indirection table. */\n \tstruct ibv_qp *qp; /**< Verbs queue pair. */\n-\tstruct ibv_exp_flow *ibv_flow; /**< Verbs flow. */\n-\tstruct ibv_exp_wq *wq; /**< Verbs work queue. */\n+\tstruct ibv_flow *ibv_flow; /**< Verbs flow. */\n+\tstruct ibv_wq *wq; /**< Verbs work queue. */\n \tstruct ibv_cq *cq; /**< Verbs completion queue. */\n \tuint16_t rxqs_n; /**< Number of queues in this flow, 0 if drop queue. */\n \tuint32_t mark:1; /**< Set if the flow is marked. */\n@@ -172,7 +172,7 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {\n \t\t.default_mask = &rte_flow_item_eth_mask,\n \t\t.mask_sz = sizeof(struct rte_flow_item_eth),\n \t\t.convert = mlx5_flow_create_eth,\n-\t\t.dst_sz = sizeof(struct ibv_exp_flow_spec_eth),\n+\t\t.dst_sz = sizeof(struct ibv_flow_spec_eth),\n \t},\n \t[RTE_FLOW_ITEM_TYPE_VLAN] = {\n \t\t.items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,\n@@ -201,7 +201,7 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {\n \t\t.default_mask = &rte_flow_item_ipv4_mask,\n \t\t.mask_sz = sizeof(struct rte_flow_item_ipv4),\n \t\t.convert = mlx5_flow_create_ipv4,\n-\t\t.dst_sz = sizeof(struct ibv_exp_flow_spec_ipv4_ext),\n+\t\t.dst_sz = sizeof(struct ibv_flow_spec_ipv4_ext),\n \t},\n \t[RTE_FLOW_ITEM_TYPE_IPV6] = {\n \t\t.items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,\n@@ -229,7 +229,7 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {\n \t\t.default_mask = &rte_flow_item_ipv6_mask,\n \t\t.mask_sz = sizeof(struct rte_flow_item_ipv6),\n \t\t.convert = mlx5_flow_create_ipv6,\n-\t\t.dst_sz = sizeof(struct ibv_exp_flow_spec_ipv6_ext),\n+\t\t.dst_sz = sizeof(struct ibv_flow_spec_ipv6),\n \t},\n \t[RTE_FLOW_ITEM_TYPE_UDP] = {\n \t\t.items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN),\n@@ -243,7 +243,7 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {\n \t\t.default_mask = &rte_flow_item_udp_mask,\n \t\t.mask_sz = sizeof(struct rte_flow_item_udp),\n \t\t.convert = mlx5_flow_create_udp,\n-\t\t.dst_sz = sizeof(struct ibv_exp_flow_spec_tcp_udp),\n+\t\t.dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),\n \t},\n \t[RTE_FLOW_ITEM_TYPE_TCP] = {\n \t\t.actions = valid_actions,\n@@ -256,7 +256,7 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {\n \t\t.default_mask = &rte_flow_item_tcp_mask,\n \t\t.mask_sz = sizeof(struct rte_flow_item_tcp),\n \t\t.convert = mlx5_flow_create_tcp,\n-\t\t.dst_sz = sizeof(struct ibv_exp_flow_spec_tcp_udp),\n+\t\t.dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),\n \t},\n \t[RTE_FLOW_ITEM_TYPE_VXLAN] = {\n \t\t.items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),\n@@ -267,13 +267,13 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {\n \t\t.default_mask = &rte_flow_item_vxlan_mask,\n \t\t.mask_sz = sizeof(struct rte_flow_item_vxlan),\n \t\t.convert = mlx5_flow_create_vxlan,\n-\t\t.dst_sz = sizeof(struct ibv_exp_flow_spec_tunnel),\n+\t\t.dst_sz = sizeof(struct ibv_flow_spec_tunnel),\n \t},\n };\n \n /** Structure to pass to the conversion function. */\n struct mlx5_flow {\n-\tstruct ibv_exp_flow_attr *ibv_attr; /**< Verbs attribute. */\n+\tstruct ibv_flow_attr *ibv_attr; /**< Verbs attribute. */\n \tunsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */\n \tuint32_t inner; /**< Set once VXLAN is encountered. */\n \tuint64_t hash_fields; /**< Fields that participate in the hash. */\n@@ -281,9 +281,9 @@ struct mlx5_flow {\n \n /** Structure for Drop queue. */\n struct rte_flow_drop {\n-\tstruct ibv_exp_rwq_ind_table *ind_table; /**< Indirection table. */\n+\tstruct ibv_rwq_ind_table *ind_table; /**< Indirection table. */\n \tstruct ibv_qp *qp; /**< Verbs queue pair. */\n-\tstruct ibv_exp_wq *wq; /**< Verbs work queue. */\n+\tstruct ibv_wq *wq; /**< Verbs work queue. */\n \tstruct ibv_cq *cq; /**< Verbs completion queue. */\n };\n \n@@ -572,9 +572,9 @@ priv_flow_validate(struct priv *priv,\n \t\t}\n \t}\n \tif (action->mark && !flow->ibv_attr && !action->drop)\n-\t\tflow->offset += sizeof(struct ibv_exp_flow_spec_action_tag);\n+\t\tflow->offset += sizeof(struct ibv_flow_spec_action_tag);\n \tif (!flow->ibv_attr && action->drop)\n-\t\tflow->offset += sizeof(struct ibv_exp_flow_spec_action_drop);\n+\t\tflow->offset += sizeof(struct ibv_flow_spec_action_drop);\n \tif (!action->queue && !action->drop) {\n \t\trte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,\n \t\t\t\t   NULL, \"no valid action\");\n@@ -606,7 +606,7 @@ mlx5_flow_validate(struct rte_eth_dev *dev,\n {\n \tstruct priv *priv = dev->data->dev_private;\n \tint ret;\n-\tstruct mlx5_flow flow = { .offset = sizeof(struct ibv_exp_flow_attr) };\n+\tstruct mlx5_flow flow = { .offset = sizeof(struct ibv_flow_attr) };\n \tstruct mlx5_flow_action action = {\n \t\t.queue = 0,\n \t\t.drop = 0,\n@@ -640,16 +640,16 @@ mlx5_flow_create_eth(const struct rte_flow_item *item,\n \tconst struct rte_flow_item_eth *spec = item->spec;\n \tconst struct rte_flow_item_eth *mask = item->mask;\n \tstruct mlx5_flow *flow = (struct mlx5_flow *)data;\n-\tstruct ibv_exp_flow_spec_eth *eth;\n-\tconst unsigned int eth_size = sizeof(struct ibv_exp_flow_spec_eth);\n+\tstruct ibv_flow_spec_eth *eth;\n+\tconst unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);\n \tunsigned int i;\n \n \t++flow->ibv_attr->num_of_specs;\n \tflow->ibv_attr->priority = 2;\n \tflow->hash_fields = 0;\n \teth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);\n-\t*eth = (struct ibv_exp_flow_spec_eth) {\n-\t\t.type = flow->inner | IBV_EXP_FLOW_SPEC_ETH,\n+\t*eth = (struct ibv_flow_spec_eth) {\n+\t\t.type = flow->inner | IBV_FLOW_SPEC_ETH,\n \t\t.size = eth_size,\n \t};\n \tif (!spec)\n@@ -689,8 +689,8 @@ mlx5_flow_create_vlan(const struct rte_flow_item *item,\n \tconst struct rte_flow_item_vlan *spec = item->spec;\n \tconst struct rte_flow_item_vlan *mask = item->mask;\n \tstruct mlx5_flow *flow = (struct mlx5_flow *)data;\n-\tstruct ibv_exp_flow_spec_eth *eth;\n-\tconst unsigned int eth_size = sizeof(struct ibv_exp_flow_spec_eth);\n+\tstruct ibv_flow_spec_eth *eth;\n+\tconst unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);\n \n \teth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size);\n \tif (!spec)\n@@ -721,29 +721,29 @@ mlx5_flow_create_ipv4(const struct rte_flow_item *item,\n \tconst struct rte_flow_item_ipv4 *spec = item->spec;\n \tconst struct rte_flow_item_ipv4 *mask = item->mask;\n \tstruct mlx5_flow *flow = (struct mlx5_flow *)data;\n-\tstruct ibv_exp_flow_spec_ipv4_ext *ipv4;\n-\tunsigned int ipv4_size = sizeof(struct ibv_exp_flow_spec_ipv4_ext);\n+\tstruct ibv_flow_spec_ipv4_ext *ipv4;\n+\tunsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4_ext);\n \n \t++flow->ibv_attr->num_of_specs;\n \tflow->ibv_attr->priority = 1;\n-\tflow->hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |\n-\t\t\t     IBV_EXP_RX_HASH_DST_IPV4);\n+\tflow->hash_fields = (IBV_RX_HASH_SRC_IPV4 |\n+\t\t\t     IBV_RX_HASH_DST_IPV4);\n \tipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);\n-\t*ipv4 = (struct ibv_exp_flow_spec_ipv4_ext) {\n-\t\t.type = flow->inner | IBV_EXP_FLOW_SPEC_IPV4_EXT,\n+\t*ipv4 = (struct ibv_flow_spec_ipv4_ext) {\n+\t\t.type = flow->inner | IBV_FLOW_SPEC_IPV4_EXT,\n \t\t.size = ipv4_size,\n \t};\n \tif (!spec)\n \t\treturn 0;\n \tif (!mask)\n \t\tmask = default_mask;\n-\tipv4->val = (struct ibv_exp_flow_ipv4_ext_filter){\n+\tipv4->val = (struct ibv_flow_ipv4_ext_filter){\n \t\t.src_ip = spec->hdr.src_addr,\n \t\t.dst_ip = spec->hdr.dst_addr,\n \t\t.proto = spec->hdr.next_proto_id,\n \t\t.tos = spec->hdr.type_of_service,\n \t};\n-\tipv4->mask = (struct ibv_exp_flow_ipv4_ext_filter){\n+\tipv4->mask = (struct ibv_flow_ipv4_ext_filter){\n \t\t.src_ip = mask->hdr.src_addr,\n \t\t.dst_ip = mask->hdr.dst_addr,\n \t\t.proto = mask->hdr.next_proto_id,\n@@ -775,17 +775,17 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item,\n \tconst struct rte_flow_item_ipv6 *spec = item->spec;\n \tconst struct rte_flow_item_ipv6 *mask = item->mask;\n \tstruct mlx5_flow *flow = (struct mlx5_flow *)data;\n-\tstruct ibv_exp_flow_spec_ipv6_ext *ipv6;\n-\tunsigned int ipv6_size = sizeof(struct ibv_exp_flow_spec_ipv6_ext);\n+\tstruct ibv_flow_spec_ipv6 *ipv6;\n+\tunsigned int ipv6_size = sizeof(struct ibv_flow_spec_ipv6);\n \tunsigned int i;\n \n \t++flow->ibv_attr->num_of_specs;\n \tflow->ibv_attr->priority = 1;\n-\tflow->hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |\n-\t\t\t     IBV_EXP_RX_HASH_DST_IPV6);\n+\tflow->hash_fields = (IBV_RX_HASH_SRC_IPV6 |\n+\t\t\t     IBV_RX_HASH_DST_IPV6);\n \tipv6 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);\n-\t*ipv6 = (struct ibv_exp_flow_spec_ipv6_ext) {\n-\t\t.type = flow->inner | IBV_EXP_FLOW_SPEC_IPV6_EXT,\n+\t*ipv6 = (struct ibv_flow_spec_ipv6) {\n+\t\t.type = flow->inner | IBV_FLOW_SPEC_IPV6,\n \t\t.size = ipv6_size,\n \t};\n \tif (!spec)\n@@ -832,16 +832,16 @@ mlx5_flow_create_udp(const struct rte_flow_item *item,\n \tconst struct rte_flow_item_udp *spec = item->spec;\n \tconst struct rte_flow_item_udp *mask = item->mask;\n \tstruct mlx5_flow *flow = (struct mlx5_flow *)data;\n-\tstruct ibv_exp_flow_spec_tcp_udp *udp;\n-\tunsigned int udp_size = sizeof(struct ibv_exp_flow_spec_tcp_udp);\n+\tstruct ibv_flow_spec_tcp_udp *udp;\n+\tunsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);\n \n \t++flow->ibv_attr->num_of_specs;\n \tflow->ibv_attr->priority = 0;\n-\tflow->hash_fields |= (IBV_EXP_RX_HASH_SRC_PORT_UDP |\n-\t\t\t      IBV_EXP_RX_HASH_DST_PORT_UDP);\n+\tflow->hash_fields |= (IBV_RX_HASH_SRC_PORT_UDP |\n+\t\t\t      IBV_RX_HASH_DST_PORT_UDP);\n \tudp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);\n-\t*udp = (struct ibv_exp_flow_spec_tcp_udp) {\n-\t\t.type = flow->inner | IBV_EXP_FLOW_SPEC_UDP,\n+\t*udp = (struct ibv_flow_spec_tcp_udp) {\n+\t\t.type = flow->inner | IBV_FLOW_SPEC_UDP,\n \t\t.size = udp_size,\n \t};\n \tif (!spec)\n@@ -876,16 +876,16 @@ mlx5_flow_create_tcp(const struct rte_flow_item *item,\n \tconst struct rte_flow_item_tcp *spec = item->spec;\n \tconst struct rte_flow_item_tcp *mask = item->mask;\n \tstruct mlx5_flow *flow = (struct mlx5_flow *)data;\n-\tstruct ibv_exp_flow_spec_tcp_udp *tcp;\n-\tunsigned int tcp_size = sizeof(struct ibv_exp_flow_spec_tcp_udp);\n+\tstruct ibv_flow_spec_tcp_udp *tcp;\n+\tunsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);\n \n \t++flow->ibv_attr->num_of_specs;\n \tflow->ibv_attr->priority = 0;\n-\tflow->hash_fields |= (IBV_EXP_RX_HASH_SRC_PORT_TCP |\n-\t\t\t      IBV_EXP_RX_HASH_DST_PORT_TCP);\n+\tflow->hash_fields |= (IBV_RX_HASH_SRC_PORT_TCP |\n+\t\t\t      IBV_RX_HASH_DST_PORT_TCP);\n \ttcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);\n-\t*tcp = (struct ibv_exp_flow_spec_tcp_udp) {\n-\t\t.type = flow->inner | IBV_EXP_FLOW_SPEC_TCP,\n+\t*tcp = (struct ibv_flow_spec_tcp_udp) {\n+\t\t.type = flow->inner | IBV_FLOW_SPEC_TCP,\n \t\t.size = tcp_size,\n \t};\n \tif (!spec)\n@@ -920,8 +920,8 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,\n \tconst struct rte_flow_item_vxlan *spec = item->spec;\n \tconst struct rte_flow_item_vxlan *mask = item->mask;\n \tstruct mlx5_flow *flow = (struct mlx5_flow *)data;\n-\tstruct ibv_exp_flow_spec_tunnel *vxlan;\n-\tunsigned int size = sizeof(struct ibv_exp_flow_spec_tunnel);\n+\tstruct ibv_flow_spec_tunnel *vxlan;\n+\tunsigned int size = sizeof(struct ibv_flow_spec_tunnel);\n \tunion vni {\n \t\tuint32_t vlan_id;\n \t\tuint8_t vni[4];\n@@ -931,11 +931,11 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,\n \tflow->ibv_attr->priority = 0;\n \tid.vni[0] = 0;\n \tvxlan = (void *)((uintptr_t)flow->ibv_attr + flow->offset);\n-\t*vxlan = (struct ibv_exp_flow_spec_tunnel) {\n-\t\t.type = flow->inner | IBV_EXP_FLOW_SPEC_VXLAN_TUNNEL,\n+\t*vxlan = (struct ibv_flow_spec_tunnel) {\n+\t\t.type = flow->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL,\n \t\t.size = size,\n \t};\n-\tflow->inner = IBV_EXP_FLOW_SPEC_INNER;\n+\tflow->inner = IBV_FLOW_SPEC_INNER;\n \tif (!spec)\n \t\treturn 0;\n \tif (!mask)\n@@ -960,12 +960,12 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,\n static int\n mlx5_flow_create_flag_mark(struct mlx5_flow *flow, uint32_t mark_id)\n {\n-\tstruct ibv_exp_flow_spec_action_tag *tag;\n-\tunsigned int size = sizeof(struct ibv_exp_flow_spec_action_tag);\n+\tstruct ibv_flow_spec_action_tag *tag;\n+\tunsigned int size = sizeof(struct ibv_flow_spec_action_tag);\n \n \ttag = (void *)((uintptr_t)flow->ibv_attr + flow->offset);\n-\t*tag = (struct ibv_exp_flow_spec_action_tag){\n-\t\t.type = IBV_EXP_FLOW_SPEC_ACTION_TAG,\n+\t*tag = (struct ibv_flow_spec_action_tag){\n+\t\t.type = IBV_FLOW_SPEC_ACTION_TAG,\n \t\t.size = size,\n \t\t.tag_id = mlx5_flow_mark_set(mark_id),\n \t};\n@@ -992,8 +992,8 @@ priv_flow_create_action_queue_drop(struct priv *priv,\n \t\t\t\t   struct rte_flow_error *error)\n {\n \tstruct rte_flow *rte_flow;\n-\tstruct ibv_exp_flow_spec_action_drop *drop;\n-\tunsigned int size = sizeof(struct ibv_exp_flow_spec_action_drop);\n+\tstruct ibv_flow_spec_action_drop *drop;\n+\tunsigned int size = sizeof(struct ibv_flow_spec_action_drop);\n \n \tassert(priv->pd);\n \tassert(priv->ctx);\n@@ -1005,18 +1005,18 @@ priv_flow_create_action_queue_drop(struct priv *priv,\n \t}\n \trte_flow->drop = 1;\n \tdrop = (void *)((uintptr_t)flow->ibv_attr + flow->offset);\n-\t*drop = (struct ibv_exp_flow_spec_action_drop){\n-\t\t\t.type = IBV_EXP_FLOW_SPEC_ACTION_DROP,\n+\t*drop = (struct ibv_flow_spec_action_drop){\n+\t\t\t.type = IBV_FLOW_SPEC_ACTION_DROP,\n \t\t\t.size = size,\n \t};\n \t++flow->ibv_attr->num_of_specs;\n-\tflow->offset += sizeof(struct ibv_exp_flow_spec_action_drop);\n+\tflow->offset += sizeof(struct ibv_flow_spec_action_drop);\n \trte_flow->ibv_attr = flow->ibv_attr;\n \tif (!priv->started)\n \t\treturn rte_flow;\n \trte_flow->qp = priv->flow_drop_queue->qp;\n-\trte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp,\n-\t\t\t\t\t\t rte_flow->ibv_attr);\n+\trte_flow->ibv_flow = ibv_create_flow(rte_flow->qp,\n+\t\t\t\t\t     rte_flow->ibv_attr);\n \tif (!rte_flow->ibv_flow) {\n \t\trte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,\n \t\t\t\t   NULL, \"flow rule creation failure\");\n@@ -1054,7 +1054,7 @@ priv_flow_create_action_queue(struct priv *priv,\n \tunsigned int i;\n \tunsigned int j;\n \tconst unsigned int wqs_n = 1 << log2above(action->queues_n);\n-\tstruct ibv_exp_wq *wqs[wqs_n];\n+\tstruct ibv_wq *wqs[wqs_n];\n \n \tassert(priv->pd);\n \tassert(priv->ctx);\n@@ -1085,10 +1085,9 @@ priv_flow_create_action_queue(struct priv *priv,\n \trte_flow->mark = action->mark;\n \trte_flow->ibv_attr = flow->ibv_attr;\n \trte_flow->hash_fields = flow->hash_fields;\n-\trte_flow->ind_table = ibv_exp_create_rwq_ind_table(\n+\trte_flow->ind_table = ibv_create_rwq_ind_table(\n \t\tpriv->ctx,\n-\t\t&(struct ibv_exp_rwq_ind_table_init_attr){\n-\t\t\t.pd = priv->pd,\n+\t\t&(struct ibv_rwq_ind_table_init_attr){\n \t\t\t.log_ind_tbl_size = log2above(action->queues_n),\n \t\t\t.ind_tbl = wqs,\n \t\t\t.comp_mask = 0,\n@@ -1098,24 +1097,23 @@ priv_flow_create_action_queue(struct priv *priv,\n \t\t\t\t   NULL, \"cannot allocate indirection table\");\n \t\tgoto error;\n \t}\n-\trte_flow->qp = ibv_exp_create_qp(\n+\trte_flow->qp = ibv_create_qp_ex(\n \t\tpriv->ctx,\n-\t\t&(struct ibv_exp_qp_init_attr){\n+\t\t&(struct ibv_qp_init_attr_ex){\n \t\t\t.qp_type = IBV_QPT_RAW_PACKET,\n \t\t\t.comp_mask =\n-\t\t\t\tIBV_EXP_QP_INIT_ATTR_PD |\n-\t\t\t\tIBV_EXP_QP_INIT_ATTR_PORT |\n-\t\t\t\tIBV_EXP_QP_INIT_ATTR_RX_HASH,\n-\t\t\t.pd = priv->pd,\n-\t\t\t.rx_hash_conf = &(struct ibv_exp_rx_hash_conf){\n+\t\t\t\tIBV_QP_INIT_ATTR_PD |\n+\t\t\t\tIBV_QP_INIT_ATTR_IND_TABLE |\n+\t\t\t\tIBV_QP_INIT_ATTR_RX_HASH,\n+\t\t\t.rx_hash_conf = (struct ibv_rx_hash_conf){\n \t\t\t\t.rx_hash_function =\n-\t\t\t\t\tIBV_EXP_RX_HASH_FUNC_TOEPLITZ,\n+\t\t\t\t\tIBV_RX_HASH_FUNC_TOEPLITZ,\n \t\t\t\t.rx_hash_key_len = rss_hash_default_key_len,\n \t\t\t\t.rx_hash_key = rss_hash_default_key,\n \t\t\t\t.rx_hash_fields_mask = rte_flow->hash_fields,\n-\t\t\t\t.rwq_ind_tbl = rte_flow->ind_table,\n \t\t\t},\n-\t\t\t.port_num = priv->port,\n+\t\t\t.rwq_ind_tbl = rte_flow->ind_table,\n+\t\t\t.pd = priv->pd\n \t\t});\n \tif (!rte_flow->qp) {\n \t\trte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,\n@@ -1124,8 +1122,8 @@ priv_flow_create_action_queue(struct priv *priv,\n \t}\n \tif (!priv->started)\n \t\treturn rte_flow;\n-\trte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp,\n-\t\t\t\t\t\t rte_flow->ibv_attr);\n+\trte_flow->ibv_flow = ibv_create_flow(rte_flow->qp,\n+\t\t\t\t\t     rte_flow->ibv_attr);\n \tif (!rte_flow->ibv_flow) {\n \t\trte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,\n \t\t\t\t   NULL, \"flow rule creation failure\");\n@@ -1137,7 +1135,7 @@ priv_flow_create_action_queue(struct priv *priv,\n \tif (rte_flow->qp)\n \t\tibv_destroy_qp(rte_flow->qp);\n \tif (rte_flow->ind_table)\n-\t\tibv_exp_destroy_rwq_ind_table(rte_flow->ind_table);\n+\t\tibv_destroy_rwq_ind_table(rte_flow->ind_table);\n \trte_free(rte_flow);\n \treturn NULL;\n }\n@@ -1167,7 +1165,7 @@ priv_flow_create(struct priv *priv,\n \t\t struct rte_flow_error *error)\n {\n \tstruct rte_flow *rte_flow;\n-\tstruct mlx5_flow flow = { .offset = sizeof(struct ibv_exp_flow_attr), };\n+\tstruct mlx5_flow flow = { .offset = sizeof(struct ibv_flow_attr), };\n \tstruct mlx5_flow_action action = {\n \t\t.queue = 0,\n \t\t.drop = 0,\n@@ -1182,20 +1180,19 @@ priv_flow_create(struct priv *priv,\n \tif (err)\n \t\tgoto exit;\n \tflow.ibv_attr = rte_malloc(__func__, flow.offset, 0);\n-\tflow.offset = sizeof(struct ibv_exp_flow_attr);\n+\tflow.offset = sizeof(struct ibv_flow_attr);\n \tif (!flow.ibv_attr) {\n \t\trte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,\n \t\t\t\t   NULL, \"cannot allocate ibv_attr memory\");\n \t\tgoto exit;\n \t}\n-\t*flow.ibv_attr = (struct ibv_exp_flow_attr){\n-\t\t.type = IBV_EXP_FLOW_ATTR_NORMAL,\n-\t\t.size = sizeof(struct ibv_exp_flow_attr),\n+\t*flow.ibv_attr = (struct ibv_flow_attr){\n+\t\t.type = IBV_FLOW_ATTR_NORMAL,\n+\t\t.size = sizeof(struct ibv_flow_attr),\n \t\t.priority = attr->priority,\n \t\t.num_of_specs = 0,\n \t\t.port = 0,\n \t\t.flags = 0,\n-\t\t.reserved = 0,\n \t};\n \tflow.inner = 0;\n \tflow.hash_fields = 0;\n@@ -1203,7 +1200,7 @@ priv_flow_create(struct priv *priv,\n \t\t\t\t      error, &flow, &action));\n \tif (action.mark && !action.drop) {\n \t\tmlx5_flow_create_flag_mark(&flow, action.mark_id);\n-\t\tflow.offset += sizeof(struct ibv_exp_flow_spec_action_tag);\n+\t\tflow.offset += sizeof(struct ibv_flow_spec_action_tag);\n \t}\n \tif (action.drop)\n \t\trte_flow =\n@@ -1259,13 +1256,13 @@ priv_flow_destroy(struct priv *priv,\n {\n \tTAILQ_REMOVE(&priv->flows, flow, next);\n \tif (flow->ibv_flow)\n-\t\tclaim_zero(ibv_exp_destroy_flow(flow->ibv_flow));\n+\t\tclaim_zero(ibv_destroy_flow(flow->ibv_flow));\n \tif (flow->drop)\n \t\tgoto free;\n \tif (flow->qp)\n \t\tclaim_zero(ibv_destroy_qp(flow->qp));\n \tif (flow->ind_table)\n-\t\tclaim_zero(ibv_exp_destroy_rwq_ind_table(flow->ind_table));\n+\t\tclaim_zero(ibv_destroy_rwq_ind_table(flow->ind_table));\n \tif (flow->mark) {\n \t\tstruct rte_flow *tmp;\n \t\tstruct rxq *rxq;\n@@ -1381,19 +1378,16 @@ priv_flow_create_drop_queue(struct priv *priv)\n \t\tWARN(\"cannot allocate memory for drop queue\");\n \t\tgoto error;\n \t}\n-\tfdq->cq = ibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0,\n-\t\t\t&(struct ibv_exp_cq_init_attr){\n-\t\t\t.comp_mask = 0,\n-\t\t\t});\n+\tfdq->cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);\n \tif (!fdq->cq) {\n \t\tWARN(\"cannot allocate CQ for drop queue\");\n \t\tgoto error;\n \t}\n-\tfdq->wq = ibv_exp_create_wq(priv->ctx,\n-\t\t\t&(struct ibv_exp_wq_init_attr){\n-\t\t\t.wq_type = IBV_EXP_WQT_RQ,\n-\t\t\t.max_recv_wr = 1,\n-\t\t\t.max_recv_sge = 1,\n+\tfdq->wq = ibv_create_wq(priv->ctx,\n+\t\t\t&(struct ibv_wq_init_attr){\n+\t\t\t.wq_type = IBV_WQT_RQ,\n+\t\t\t.max_wr = 1,\n+\t\t\t.max_sge = 1,\n \t\t\t.pd = priv->pd,\n \t\t\t.cq = fdq->cq,\n \t\t\t});\n@@ -1401,9 +1395,8 @@ priv_flow_create_drop_queue(struct priv *priv)\n \t\tWARN(\"cannot allocate WQ for drop queue\");\n \t\tgoto error;\n \t}\n-\tfdq->ind_table = ibv_exp_create_rwq_ind_table(priv->ctx,\n-\t\t\t&(struct ibv_exp_rwq_ind_table_init_attr){\n-\t\t\t.pd = priv->pd,\n+\tfdq->ind_table = ibv_create_rwq_ind_table(priv->ctx,\n+\t\t\t&(struct ibv_rwq_ind_table_init_attr){\n \t\t\t.log_ind_tbl_size = 0,\n \t\t\t.ind_tbl = &fdq->wq,\n \t\t\t.comp_mask = 0,\n@@ -1412,24 +1405,23 @@ priv_flow_create_drop_queue(struct priv *priv)\n \t\tWARN(\"cannot allocate indirection table for drop queue\");\n \t\tgoto error;\n \t}\n-\tfdq->qp = ibv_exp_create_qp(priv->ctx,\n-\t\t&(struct ibv_exp_qp_init_attr){\n+\tfdq->qp = ibv_create_qp_ex(priv->ctx,\n+\t\t&(struct ibv_qp_init_attr_ex){\n \t\t\t.qp_type = IBV_QPT_RAW_PACKET,\n \t\t\t.comp_mask =\n-\t\t\t\tIBV_EXP_QP_INIT_ATTR_PD |\n-\t\t\t\tIBV_EXP_QP_INIT_ATTR_PORT |\n-\t\t\t\tIBV_EXP_QP_INIT_ATTR_RX_HASH,\n-\t\t\t.pd = priv->pd,\n-\t\t\t.rx_hash_conf = &(struct ibv_exp_rx_hash_conf){\n+\t\t\t\tIBV_QP_INIT_ATTR_PD |\n+\t\t\t\tIBV_QP_INIT_ATTR_IND_TABLE |\n+\t\t\t\tIBV_QP_INIT_ATTR_RX_HASH,\n+\t\t\t.rx_hash_conf = (struct ibv_rx_hash_conf){\n \t\t\t\t.rx_hash_function =\n-\t\t\t\t\tIBV_EXP_RX_HASH_FUNC_TOEPLITZ,\n+\t\t\t\t\tIBV_RX_HASH_FUNC_TOEPLITZ,\n \t\t\t\t.rx_hash_key_len = rss_hash_default_key_len,\n \t\t\t\t.rx_hash_key = rss_hash_default_key,\n \t\t\t\t.rx_hash_fields_mask = 0,\n-\t\t\t\t.rwq_ind_tbl = fdq->ind_table,\n \t\t\t\t},\n-\t\t\t.port_num = priv->port,\n-\t\t\t});\n+\t\t\t.rwq_ind_tbl = fdq->ind_table,\n+\t\t\t.pd = priv->pd\n+\t\t});\n \tif (!fdq->qp) {\n \t\tWARN(\"cannot allocate QP for drop queue\");\n \t\tgoto error;\n@@ -1440,9 +1432,9 @@ priv_flow_create_drop_queue(struct priv *priv)\n \tif (fdq->qp)\n \t\tclaim_zero(ibv_destroy_qp(fdq->qp));\n \tif (fdq->ind_table)\n-\t\tclaim_zero(ibv_exp_destroy_rwq_ind_table(fdq->ind_table));\n+\t\tclaim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table));\n \tif (fdq->wq)\n-\t\tclaim_zero(ibv_exp_destroy_wq(fdq->wq));\n+\t\tclaim_zero(ibv_destroy_wq(fdq->wq));\n \tif (fdq->cq)\n \t\tclaim_zero(ibv_destroy_cq(fdq->cq));\n \tif (fdq)\n@@ -1467,9 +1459,9 @@ priv_flow_delete_drop_queue(struct priv *priv)\n \tif (fdq->qp)\n \t\tclaim_zero(ibv_destroy_qp(fdq->qp));\n \tif (fdq->ind_table)\n-\t\tclaim_zero(ibv_exp_destroy_rwq_ind_table(fdq->ind_table));\n+\t\tclaim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table));\n \tif (fdq->wq)\n-\t\tclaim_zero(ibv_exp_destroy_wq(fdq->wq));\n+\t\tclaim_zero(ibv_destroy_wq(fdq->wq));\n \tif (fdq->cq)\n \t\tclaim_zero(ibv_destroy_cq(fdq->cq));\n \trte_free(fdq);\n@@ -1490,7 +1482,7 @@ priv_flow_stop(struct priv *priv)\n \tstruct rte_flow *flow;\n \n \tTAILQ_FOREACH_REVERSE(flow, &priv->flows, mlx5_flows, next) {\n-\t\tclaim_zero(ibv_exp_destroy_flow(flow->ibv_flow));\n+\t\tclaim_zero(ibv_destroy_flow(flow->ibv_flow));\n \t\tflow->ibv_flow = NULL;\n \t\tif (flow->mark) {\n \t\t\tunsigned int n;\n@@ -1528,7 +1520,7 @@ priv_flow_start(struct priv *priv)\n \t\t\tqp = priv->flow_drop_queue->qp;\n \t\telse\n \t\t\tqp = flow->qp;\n-\t\tflow->ibv_flow = ibv_exp_create_flow(qp, flow->ibv_attr);\n+\t\tflow->ibv_flow = ibv_create_flow(qp, flow->ibv_attr);\n \t\tif (!flow->ibv_flow) {\n \t\t\tDEBUG(\"Flow %p cannot be applied\", (void *)flow);\n \t\t\trte_errno = EINVAL;\ndiff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c\nindex b3c3fa2..086af58 100644\n--- a/drivers/net/mlx5/mlx5_mac.c\n+++ b/drivers/net/mlx5/mlx5_mac.c\n@@ -112,8 +112,8 @@ hash_rxq_del_mac_flow(struct hash_rxq *hash_rxq, unsigned int mac_index,\n \t      (*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5],\n \t      mac_index,\n \t      vlan_index);\n-\tclaim_zero(ibv_exp_destroy_flow(hash_rxq->mac_flow\n-\t\t\t\t\t[mac_index][vlan_index]));\n+\tclaim_zero(ibv_destroy_flow(hash_rxq->mac_flow\n+\t\t\t\t    [mac_index][vlan_index]));\n \thash_rxq->mac_flow[mac_index][vlan_index] = NULL;\n }\n \n@@ -231,14 +231,14 @@ static int\n hash_rxq_add_mac_flow(struct hash_rxq *hash_rxq, unsigned int mac_index,\n \t\t      unsigned int vlan_index)\n {\n-\tstruct ibv_exp_flow *flow;\n+\tstruct ibv_flow *flow;\n \tstruct priv *priv = hash_rxq->priv;\n \tconst uint8_t (*mac)[ETHER_ADDR_LEN] =\n \t\t\t(const uint8_t (*)[ETHER_ADDR_LEN])\n \t\t\tpriv->mac[mac_index].addr_bytes;\n \tFLOW_ATTR_SPEC_ETH(data, priv_flow_attr(priv, NULL, 0, hash_rxq->type));\n-\tstruct ibv_exp_flow_attr *attr = &data->attr;\n-\tstruct ibv_exp_flow_spec_eth *spec = &data->spec;\n+\tstruct ibv_flow_attr *attr = &data->attr;\n+\tstruct ibv_flow_spec_eth *spec = &data->spec;\n \tunsigned int vlan_enabled = !!priv->vlan_filter_n;\n \tunsigned int vlan_id = priv->vlan_filter[vlan_index];\n \n@@ -253,10 +253,10 @@ hash_rxq_add_mac_flow(struct hash_rxq *hash_rxq, unsigned int mac_index,\n \tassert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec);\n \tpriv_flow_attr(priv, attr, sizeof(data), hash_rxq->type);\n \t/* The first specification must be Ethernet. */\n-\tassert(spec->type == IBV_EXP_FLOW_SPEC_ETH);\n+\tassert(spec->type == IBV_FLOW_SPEC_ETH);\n \tassert(spec->size == sizeof(*spec));\n-\t*spec = (struct ibv_exp_flow_spec_eth){\n-\t\t.type = IBV_EXP_FLOW_SPEC_ETH,\n+\t*spec = (struct ibv_flow_spec_eth){\n+\t\t.type = IBV_FLOW_SPEC_ETH,\n \t\t.size = sizeof(*spec),\n \t\t.val = {\n \t\t\t.dst_mac = {\n@@ -284,7 +284,7 @@ hash_rxq_add_mac_flow(struct hash_rxq *hash_rxq, unsigned int mac_index,\n \t      vlan_id);\n \t/* Create related flow. */\n \terrno = 0;\n-\tflow = ibv_exp_create_flow(hash_rxq->qp, attr);\n+\tflow = ibv_create_flow(hash_rxq->qp, attr);\n \tif (flow == NULL) {\n \t\t/* It's not clear whether errno is always set in this case. */\n \t\tERROR(\"%p: flow configuration failed, errno=%d: %s\",\ndiff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h\nindex e00be81..2de310b 100644\n--- a/drivers/net/mlx5/mlx5_prm.h\n+++ b/drivers/net/mlx5/mlx5_prm.h\n@@ -41,7 +41,7 @@\n #ifdef PEDANTIC\n #pragma GCC diagnostic ignored \"-Wpedantic\"\n #endif\n-#include <infiniband/mlx5_hw.h>\n+#include <infiniband/mlx5dv.h>\n #ifdef PEDANTIC\n #pragma GCC diagnostic error \"-Wpedantic\"\n #endif\n@@ -244,6 +244,46 @@ struct mlx5_cqe {\n \tuint8_t op_own;\n };\n \n+/* Adding direct verbs to data-path. */\n+\n+/* CQ sequence number mask. */\n+#define MLX5_CQ_SQN_MASK 0x3\n+\n+/* CQ sequence number index. */\n+#define MLX5_CQ_SQN_OFFSET 28\n+\n+/* CQ doorbell index mask. */\n+#define MLX5_CI_MASK 0xffffff\n+\n+/* CQ doorbell offset. */\n+#define MLX5_CQ_ARM_DB 1\n+\n+/* CQ doorbell offset*/\n+#define MLX5_CQ_DOORBELL 0x20\n+\n+/* CQE format value. */\n+#define MLX5_COMPRESSED 0x3\n+\n+/* CQE format mask. */\n+#define MLX5E_CQE_FORMAT_MASK 0xc\n+\n+/* MPW opcode. */\n+#define MLX5_OPC_MOD_MPW 0x01\n+\n+/* Compressed Rx CQE structure. */\n+struct mlx5_mini_cqe8 {\n+\tunion {\n+\t\tuint32_t rx_hash_result;\n+\t\tuint32_t checksum;\n+\t\tstruct {\n+\t\t\tuint16_t wqe_counter;\n+\t\t\tuint8_t  s_wqe_opcode;\n+\t\t\tuint8_t  reserved;\n+\t\t} s_wqe_info;\n+\t};\n+\tuint32_t byte_cnt;\n+};\n+\n /**\n  * Convert a user mark to flow mark.\n  *\ndiff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c\nindex db2e05b..e9ea2aa 100644\n--- a/drivers/net/mlx5/mlx5_rxmode.c\n+++ b/drivers/net/mlx5/mlx5_rxmode.c\n@@ -122,10 +122,10 @@ hash_rxq_special_flow_enable_vlan(struct hash_rxq *hash_rxq,\n \t\t\t\t  unsigned int vlan_index)\n {\n \tstruct priv *priv = hash_rxq->priv;\n-\tstruct ibv_exp_flow *flow;\n+\tstruct ibv_flow *flow;\n \tFLOW_ATTR_SPEC_ETH(data, priv_flow_attr(priv, NULL, 0, hash_rxq->type));\n-\tstruct ibv_exp_flow_attr *attr = &data->attr;\n-\tstruct ibv_exp_flow_spec_eth *spec = &data->spec;\n+\tstruct ibv_flow_attr *attr = &data->attr;\n+\tstruct ibv_flow_spec_eth *spec = &data->spec;\n \tconst uint8_t *mac;\n \tconst uint8_t *mask;\n \tunsigned int vlan_enabled = (priv->vlan_filter_n &&\n@@ -146,13 +146,13 @@ hash_rxq_special_flow_enable_vlan(struct hash_rxq *hash_rxq,\n \tassert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec);\n \tpriv_flow_attr(priv, attr, sizeof(data), hash_rxq->type);\n \t/* The first specification must be Ethernet. */\n-\tassert(spec->type == IBV_EXP_FLOW_SPEC_ETH);\n+\tassert(spec->type == IBV_FLOW_SPEC_ETH);\n \tassert(spec->size == sizeof(*spec));\n \n \tmac = special_flow_init[flow_type].dst_mac_val;\n \tmask = special_flow_init[flow_type].dst_mac_mask;\n-\t*spec = (struct ibv_exp_flow_spec_eth){\n-\t\t.type = IBV_EXP_FLOW_SPEC_ETH,\n+\t*spec = (struct ibv_flow_spec_eth){\n+\t\t.type = IBV_FLOW_SPEC_ETH,\n \t\t.size = sizeof(*spec),\n \t\t.val = {\n \t\t\t.dst_mac = {\n@@ -175,7 +175,7 @@ hash_rxq_special_flow_enable_vlan(struct hash_rxq *hash_rxq,\n \t};\n \n \terrno = 0;\n-\tflow = ibv_exp_create_flow(hash_rxq->qp, attr);\n+\tflow = ibv_create_flow(hash_rxq->qp, attr);\n \tif (flow == NULL) {\n \t\t/* It's not clear whether errno is always set in this case. */\n \t\tERROR(\"%p: flow configuration failed, errno=%d: %s\",\n@@ -207,12 +207,12 @@ hash_rxq_special_flow_disable_vlan(struct hash_rxq *hash_rxq,\n \t\t\t\t   enum hash_rxq_flow_type flow_type,\n \t\t\t\t   unsigned int vlan_index)\n {\n-\tstruct ibv_exp_flow *flow =\n+\tstruct ibv_flow *flow =\n \t\thash_rxq->special_flow[flow_type][vlan_index];\n \n \tif (flow == NULL)\n \t\treturn;\n-\tclaim_zero(ibv_exp_destroy_flow(flow));\n+\tclaim_zero(ibv_destroy_flow(flow));\n \thash_rxq->special_flow[flow_type][vlan_index] = NULL;\n \tDEBUG(\"%p: special flow %s (index %d) VLAN %u (index %u) disabled\",\n \t      (void *)hash_rxq, hash_rxq_flow_type_str(flow_type), flow_type,\ndiff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c\nindex 437dc02..22448c9 100644\n--- a/drivers/net/mlx5/mlx5_rxq.c\n+++ b/drivers/net/mlx5/mlx5_rxq.c\n@@ -44,8 +44,7 @@\n #pragma GCC diagnostic ignored \"-Wpedantic\"\n #endif\n #include <infiniband/verbs.h>\n-#include <infiniband/arch.h>\n-#include <infiniband/mlx5_hw.h>\n+#include <infiniband/mlx5dv.h>\n #ifdef PEDANTIC\n #pragma GCC diagnostic error \"-Wpedantic\"\n #endif\n@@ -56,6 +55,7 @@\n #include <rte_common.h>\n #include <rte_interrupts.h>\n #include <rte_debug.h>\n+#include <rte_io.h>\n \n #include \"mlx5.h\"\n #include \"mlx5_rxtx.h\"\n@@ -66,77 +66,77 @@\n /* Initialization data for hash RX queues. */\n const struct hash_rxq_init hash_rxq_init[] = {\n \t[HASH_RXQ_TCPV4] = {\n-\t\t.hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |\n-\t\t\t\tIBV_EXP_RX_HASH_DST_IPV4 |\n-\t\t\t\tIBV_EXP_RX_HASH_SRC_PORT_TCP |\n-\t\t\t\tIBV_EXP_RX_HASH_DST_PORT_TCP),\n+\t\t.hash_fields = (IBV_RX_HASH_SRC_IPV4 |\n+\t\t\t\tIBV_RX_HASH_DST_IPV4 |\n+\t\t\t\tIBV_RX_HASH_SRC_PORT_TCP |\n+\t\t\t\tIBV_RX_HASH_DST_PORT_TCP),\n \t\t.dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP,\n \t\t.flow_priority = 0,\n \t\t.flow_spec.tcp_udp = {\n-\t\t\t.type = IBV_EXP_FLOW_SPEC_TCP,\n+\t\t\t.type = IBV_FLOW_SPEC_TCP,\n \t\t\t.size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),\n \t\t},\n \t\t.underlayer = &hash_rxq_init[HASH_RXQ_IPV4],\n \t},\n \t[HASH_RXQ_UDPV4] = {\n-\t\t.hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |\n-\t\t\t\tIBV_EXP_RX_HASH_DST_IPV4 |\n-\t\t\t\tIBV_EXP_RX_HASH_SRC_PORT_UDP |\n-\t\t\t\tIBV_EXP_RX_HASH_DST_PORT_UDP),\n+\t\t.hash_fields = (IBV_RX_HASH_SRC_IPV4 |\n+\t\t\t\tIBV_RX_HASH_DST_IPV4 |\n+\t\t\t\tIBV_RX_HASH_SRC_PORT_UDP |\n+\t\t\t\tIBV_RX_HASH_DST_PORT_UDP),\n \t\t.dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP,\n \t\t.flow_priority = 0,\n \t\t.flow_spec.tcp_udp = {\n-\t\t\t.type = IBV_EXP_FLOW_SPEC_UDP,\n+\t\t\t.type = IBV_FLOW_SPEC_UDP,\n \t\t\t.size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),\n \t\t},\n \t\t.underlayer = &hash_rxq_init[HASH_RXQ_IPV4],\n \t},\n \t[HASH_RXQ_IPV4] = {\n-\t\t.hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |\n-\t\t\t\tIBV_EXP_RX_HASH_DST_IPV4),\n+\t\t.hash_fields = (IBV_RX_HASH_SRC_IPV4 |\n+\t\t\t\tIBV_RX_HASH_DST_IPV4),\n \t\t.dpdk_rss_hf = (ETH_RSS_IPV4 |\n \t\t\t\tETH_RSS_FRAG_IPV4),\n \t\t.flow_priority = 1,\n \t\t.flow_spec.ipv4 = {\n-\t\t\t.type = IBV_EXP_FLOW_SPEC_IPV4,\n+\t\t\t.type = IBV_FLOW_SPEC_IPV4,\n \t\t\t.size = sizeof(hash_rxq_init[0].flow_spec.ipv4),\n \t\t},\n \t\t.underlayer = &hash_rxq_init[HASH_RXQ_ETH],\n \t},\n \t[HASH_RXQ_TCPV6] = {\n-\t\t.hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |\n-\t\t\t\tIBV_EXP_RX_HASH_DST_IPV6 |\n-\t\t\t\tIBV_EXP_RX_HASH_SRC_PORT_TCP |\n-\t\t\t\tIBV_EXP_RX_HASH_DST_PORT_TCP),\n+\t\t.hash_fields = (IBV_RX_HASH_SRC_IPV6 |\n+\t\t\t\tIBV_RX_HASH_DST_IPV6 |\n+\t\t\t\tIBV_RX_HASH_SRC_PORT_TCP |\n+\t\t\t\tIBV_RX_HASH_DST_PORT_TCP),\n \t\t.dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP,\n \t\t.flow_priority = 0,\n \t\t.flow_spec.tcp_udp = {\n-\t\t\t.type = IBV_EXP_FLOW_SPEC_TCP,\n+\t\t\t.type = IBV_FLOW_SPEC_TCP,\n \t\t\t.size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),\n \t\t},\n \t\t.underlayer = &hash_rxq_init[HASH_RXQ_IPV6],\n \t},\n \t[HASH_RXQ_UDPV6] = {\n-\t\t.hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |\n-\t\t\t\tIBV_EXP_RX_HASH_DST_IPV6 |\n-\t\t\t\tIBV_EXP_RX_HASH_SRC_PORT_UDP |\n-\t\t\t\tIBV_EXP_RX_HASH_DST_PORT_UDP),\n+\t\t.hash_fields = (IBV_RX_HASH_SRC_IPV6 |\n+\t\t\t\tIBV_RX_HASH_DST_IPV6 |\n+\t\t\t\tIBV_RX_HASH_SRC_PORT_UDP |\n+\t\t\t\tIBV_RX_HASH_DST_PORT_UDP),\n \t\t.dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP,\n \t\t.flow_priority = 0,\n \t\t.flow_spec.tcp_udp = {\n-\t\t\t.type = IBV_EXP_FLOW_SPEC_UDP,\n+\t\t\t.type = IBV_FLOW_SPEC_UDP,\n \t\t\t.size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),\n \t\t},\n \t\t.underlayer = &hash_rxq_init[HASH_RXQ_IPV6],\n \t},\n \t[HASH_RXQ_IPV6] = {\n-\t\t.hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |\n-\t\t\t\tIBV_EXP_RX_HASH_DST_IPV6),\n+\t\t.hash_fields = (IBV_RX_HASH_SRC_IPV6 |\n+\t\t\t\tIBV_RX_HASH_DST_IPV6),\n \t\t.dpdk_rss_hf = (ETH_RSS_IPV6 |\n \t\t\t\tETH_RSS_FRAG_IPV6),\n \t\t.flow_priority = 1,\n \t\t.flow_spec.ipv6 = {\n-\t\t\t.type = IBV_EXP_FLOW_SPEC_IPV6,\n+\t\t\t.type = IBV_FLOW_SPEC_IPV6,\n \t\t\t.size = sizeof(hash_rxq_init[0].flow_spec.ipv6),\n \t\t},\n \t\t.underlayer = &hash_rxq_init[HASH_RXQ_ETH],\n@@ -146,7 +146,7 @@ const struct hash_rxq_init hash_rxq_init[] = {\n \t\t.dpdk_rss_hf = 0,\n \t\t.flow_priority = 2,\n \t\t.flow_spec.eth = {\n-\t\t\t.type = IBV_EXP_FLOW_SPEC_ETH,\n+\t\t\t.type = IBV_FLOW_SPEC_ETH,\n \t\t\t.size = sizeof(hash_rxq_init[0].flow_spec.eth),\n \t\t},\n \t\t.underlayer = NULL,\n@@ -215,7 +215,7 @@ const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);\n  *   Total size of the flow attribute buffer. No errors are defined.\n  */\n size_t\n-priv_flow_attr(struct priv *priv, struct ibv_exp_flow_attr *flow_attr,\n+priv_flow_attr(struct priv *priv, struct ibv_flow_attr *flow_attr,\n \t       size_t flow_attr_size, enum hash_rxq_type type)\n {\n \tsize_t offset = sizeof(*flow_attr);\n@@ -231,8 +231,8 @@ priv_flow_attr(struct priv *priv, struct ibv_exp_flow_attr *flow_attr,\n \t\treturn offset;\n \tflow_attr_size = offset;\n \tinit = &hash_rxq_init[type];\n-\t*flow_attr = (struct ibv_exp_flow_attr){\n-\t\t.type = IBV_EXP_FLOW_ATTR_NORMAL,\n+\t*flow_attr = (struct ibv_flow_attr){\n+\t\t.type = IBV_FLOW_ATTR_NORMAL,\n \t\t/* Priorities < 3 are reserved for flow director. */\n \t\t.priority = init->flow_priority + 3,\n \t\t.num_of_specs = 0,\n@@ -338,13 +338,13 @@ priv_make_ind_table_init(struct priv *priv,\n int\n priv_create_hash_rxqs(struct priv *priv)\n {\n-\tstruct ibv_exp_wq *wqs[priv->reta_idx_n];\n+\tstruct ibv_wq *wqs[priv->reta_idx_n];\n \tstruct ind_table_init ind_table_init[IND_TABLE_INIT_N];\n \tunsigned int ind_tables_n =\n \t\tpriv_make_ind_table_init(priv, &ind_table_init);\n \tunsigned int hash_rxqs_n = 0;\n \tstruct hash_rxq (*hash_rxqs)[] = NULL;\n-\tstruct ibv_exp_rwq_ind_table *(*ind_tables)[] = NULL;\n+\tstruct ibv_rwq_ind_table *(*ind_tables)[] = NULL;\n \tunsigned int i;\n \tunsigned int j;\n \tunsigned int k;\n@@ -395,21 +395,20 @@ priv_create_hash_rxqs(struct priv *priv)\n \t\tgoto error;\n \t}\n \tfor (i = 0; (i != ind_tables_n); ++i) {\n-\t\tstruct ibv_exp_rwq_ind_table_init_attr ind_init_attr = {\n-\t\t\t.pd = priv->pd,\n+\t\tstruct ibv_rwq_ind_table_init_attr ind_init_attr = {\n \t\t\t.log_ind_tbl_size = 0, /* Set below. */\n \t\t\t.ind_tbl = wqs,\n \t\t\t.comp_mask = 0,\n \t\t};\n \t\tunsigned int ind_tbl_size = ind_table_init[i].max_size;\n-\t\tstruct ibv_exp_rwq_ind_table *ind_table;\n+\t\tstruct ibv_rwq_ind_table *ind_table;\n \n \t\tif (priv->reta_idx_n < ind_tbl_size)\n \t\t\tind_tbl_size = priv->reta_idx_n;\n \t\tind_init_attr.log_ind_tbl_size = log2above(ind_tbl_size);\n \t\terrno = 0;\n-\t\tind_table = ibv_exp_create_rwq_ind_table(priv->ctx,\n-\t\t\t\t\t\t\t &ind_init_attr);\n+\t\tind_table = ibv_create_rwq_ind_table(priv->ctx,\n+\t\t\t\t\t\t     &ind_init_attr);\n \t\tif (ind_table != NULL) {\n \t\t\t(*ind_tables)[i] = ind_table;\n \t\t\tcontinue;\n@@ -437,8 +436,8 @@ priv_create_hash_rxqs(struct priv *priv)\n \t\t\thash_rxq_type_from_pos(&ind_table_init[j], k);\n \t\tstruct rte_eth_rss_conf *priv_rss_conf =\n \t\t\t(*priv->rss_conf)[type];\n-\t\tstruct ibv_exp_rx_hash_conf hash_conf = {\n-\t\t\t.rx_hash_function = IBV_EXP_RX_HASH_FUNC_TOEPLITZ,\n+\t\tstruct ibv_rx_hash_conf hash_conf = {\n+\t\t\t.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,\n \t\t\t.rx_hash_key_len = (priv_rss_conf ?\n \t\t\t\t\t    priv_rss_conf->rss_key_len :\n \t\t\t\t\t    rss_hash_default_key_len),\n@@ -446,23 +445,22 @@ priv_create_hash_rxqs(struct priv *priv)\n \t\t\t\t\tpriv_rss_conf->rss_key :\n \t\t\t\t\trss_hash_default_key),\n \t\t\t.rx_hash_fields_mask = hash_rxq_init[type].hash_fields,\n-\t\t\t.rwq_ind_tbl = (*ind_tables)[j],\n \t\t};\n-\t\tstruct ibv_exp_qp_init_attr qp_init_attr = {\n-\t\t\t.max_inl_recv = 0, /* Currently not supported. */\n+\t\tstruct ibv_qp_init_attr_ex qp_init_attr = {\n \t\t\t.qp_type = IBV_QPT_RAW_PACKET,\n-\t\t\t.comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |\n-\t\t\t\t      IBV_EXP_QP_INIT_ATTR_RX_HASH),\n+\t\t\t.comp_mask = (IBV_QP_INIT_ATTR_PD |\n+\t\t\t\t      IBV_QP_INIT_ATTR_IND_TABLE |\n+\t\t\t\t      IBV_QP_INIT_ATTR_RX_HASH),\n+\t\t\t.rx_hash_conf = hash_conf,\n+\t\t\t.rwq_ind_tbl = (*ind_tables)[j],\n \t\t\t.pd = priv->pd,\n-\t\t\t.rx_hash_conf = &hash_conf,\n-\t\t\t.port_num = priv->port,\n \t\t};\n \n \t\tDEBUG(\"using indirection table %u for hash RX queue %u type %d\",\n \t\t      j, i, type);\n \t\t*hash_rxq = (struct hash_rxq){\n \t\t\t.priv = priv,\n-\t\t\t.qp = ibv_exp_create_qp(priv->ctx, &qp_init_attr),\n+\t\t\t.qp = ibv_create_qp_ex(priv->ctx, &qp_init_attr),\n \t\t\t.type = type,\n \t\t};\n \t\tif (hash_rxq->qp == NULL) {\n@@ -497,12 +495,12 @@ priv_create_hash_rxqs(struct priv *priv)\n \t}\n \tif (ind_tables != NULL) {\n \t\tfor (j = 0; (j != ind_tables_n); ++j) {\n-\t\t\tstruct ibv_exp_rwq_ind_table *ind_table =\n+\t\t\tstruct ibv_rwq_ind_table *ind_table =\n \t\t\t\t(*ind_tables)[j];\n \n \t\t\tif (ind_table == NULL)\n \t\t\t\tcontinue;\n-\t\t\tclaim_zero(ibv_exp_destroy_rwq_ind_table(ind_table));\n+\t\t\tclaim_zero(ibv_destroy_rwq_ind_table(ind_table));\n \t\t}\n \t\trte_free(ind_tables);\n \t}\n@@ -547,11 +545,11 @@ priv_destroy_hash_rxqs(struct priv *priv)\n \trte_free(priv->hash_rxqs);\n \tpriv->hash_rxqs = NULL;\n \tfor (i = 0; (i != priv->ind_tables_n); ++i) {\n-\t\tstruct ibv_exp_rwq_ind_table *ind_table =\n+\t\tstruct ibv_rwq_ind_table *ind_table =\n \t\t\t(*priv->ind_tables)[i];\n \n \t\tassert(ind_table != NULL);\n-\t\tclaim_zero(ibv_exp_destroy_rwq_ind_table(ind_table));\n+\t\tclaim_zero(ibv_destroy_rwq_ind_table(ind_table));\n \t}\n \tpriv->ind_tables_n = 0;\n \trte_free(priv->ind_tables);\n@@ -765,7 +763,7 @@ rxq_cleanup(struct rxq_ctrl *rxq_ctrl)\n \tif (rxq_ctrl->fdir_queue != NULL)\n \t\tpriv_fdir_queue_destroy(rxq_ctrl->priv, rxq_ctrl->fdir_queue);\n \tif (rxq_ctrl->wq != NULL)\n-\t\tclaim_zero(ibv_exp_destroy_wq(rxq_ctrl->wq));\n+\t\tclaim_zero(ibv_destroy_wq(rxq_ctrl->wq));\n \tif (rxq_ctrl->cq != NULL)\n \t\tclaim_zero(ibv_destroy_cq(rxq_ctrl->cq));\n \tif (rxq_ctrl->channel != NULL)\n@@ -788,16 +786,23 @@ static inline int\n rxq_setup(struct rxq_ctrl *tmpl)\n {\n \tstruct ibv_cq *ibcq = tmpl->cq;\n-\tstruct ibv_mlx5_cq_info cq_info;\n-\tstruct mlx5_rwq *rwq = container_of(tmpl->wq, struct mlx5_rwq, wq);\n+\tstruct mlx5dv_cq cq_info;\n+\tstruct mlx5dv_rwq rwq;\n \tconst uint16_t desc_n =\n \t\t(1 << tmpl->rxq.elts_n) + tmpl->priv->rx_vec_en *\n \t\tMLX5_VPMD_DESCS_PER_LOOP;\n \tstruct rte_mbuf *(*elts)[desc_n] =\n \t\trte_calloc_socket(\"RXQ\", 1, sizeof(*elts), 0, tmpl->socket);\n-\tif (ibv_mlx5_exp_get_cq_info(ibcq, &cq_info)) {\n-\t\tERROR(\"Unable to query CQ info. check your OFED.\");\n-\t\treturn ENOTSUP;\n+\tstruct mlx5dv_obj obj;\n+\tint ret = 0;\n+\n+\tobj.cq.in = ibcq;\n+\tobj.cq.out = &cq_info;\n+\tobj.rwq.in = tmpl->wq;\n+\tobj.rwq.out = &rwq;\n+\tret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);\n+\tif (ret != 0) {\n+\t\treturn -EINVAL;\n \t}\n \tif (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {\n \t\tERROR(\"Wrong MLX5_CQE_SIZE environment variable value: \"\n@@ -806,7 +811,7 @@ rxq_setup(struct rxq_ctrl *tmpl)\n \t}\n \tif (elts == NULL)\n \t\treturn ENOMEM;\n-\ttmpl->rxq.rq_db = rwq->rq.db;\n+\ttmpl->rxq.rq_db = rwq.dbrec;\n \ttmpl->rxq.cqe_n = log2above(cq_info.cqe_cnt);\n \ttmpl->rxq.cq_ci = 0;\n \ttmpl->rxq.rq_ci = 0;\n@@ -814,11 +819,14 @@ rxq_setup(struct rxq_ctrl *tmpl)\n \ttmpl->rxq.cq_db = cq_info.dbrec;\n \ttmpl->rxq.wqes =\n \t\t(volatile struct mlx5_wqe_data_seg (*)[])\n-\t\t(uintptr_t)rwq->rq.buff;\n+\t\t(uintptr_t)rwq.buf;\n \ttmpl->rxq.cqes =\n \t\t(volatile struct mlx5_cqe (*)[])\n \t\t(uintptr_t)cq_info.buf;\n \ttmpl->rxq.elts = elts;\n+\ttmpl->rxq.cq_uar = cq_info.cq_uar;\n+\ttmpl->rxq.cqn = cq_info.cqn;\n+\ttmpl->rxq.cq_arm_sn = 0;\n \treturn 0;\n }\n \n@@ -856,11 +864,11 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,\n \t\t\t.rss_hash = priv->rxqs_n > 1,\n \t\t},\n \t};\n-\tstruct ibv_exp_wq_attr mod;\n+\tstruct ibv_wq_attr mod;\n \tunion {\n-\t\tstruct ibv_exp_cq_init_attr cq;\n-\t\tstruct ibv_exp_wq_init_attr wq;\n-\t\tstruct ibv_exp_cq_attr cq_attr;\n+\t\tstruct ibv_cq_init_attr_ex cq;\n+\t\tstruct ibv_wq_init_attr wq;\n+\t\tstruct ibv_cq_ex cq_attr;\n \t} attr;\n \tunsigned int mb_len = rte_pktmbuf_data_room_size(mp);\n \tunsigned int cqe_n = desc - 1;\n@@ -940,12 +948,12 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,\n \t\t\tgoto error;\n \t\t}\n \t}\n-\tattr.cq = (struct ibv_exp_cq_init_attr){\n+\tattr.cq = (struct ibv_cq_init_attr_ex){\n \t\t.comp_mask = 0,\n \t};\n \tif (priv->cqe_comp) {\n-\t\tattr.cq.comp_mask |= IBV_EXP_CQ_INIT_ATTR_FLAGS;\n-\t\tattr.cq.flags |= IBV_EXP_CQ_COMPRESSED_CQE;\n+\t\tattr.cq.comp_mask |= IBV_CQ_INIT_ATTR_MASK_FLAGS;\n+\t\tattr.cq.flags |= MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;\n \t\t/*\n \t\t * For vectorized Rx, it must not be doubled in order to\n \t\t * make cq_ci and rq_ci aligned.\n@@ -953,8 +961,7 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,\n \t\tif (rxq_check_vec_support(&tmpl.rxq) < 0)\n \t\t\tcqe_n = (desc * 2) - 1; /* Double the number of CQEs. */\n \t}\n-\ttmpl.cq = ibv_exp_create_cq(priv->ctx, cqe_n, NULL, tmpl.channel, 0,\n-\t\t\t\t    &attr.cq);\n+\ttmpl.cq = ibv_create_cq(priv->ctx, cqe_n, NULL, tmpl.channel, 0);\n \tif (tmpl.cq == NULL) {\n \t\tret = ENOMEM;\n \t\tERROR(\"%p: CQ creation failure: %s\",\n@@ -962,35 +969,35 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,\n \t\tgoto error;\n \t}\n \tDEBUG(\"priv->device_attr.max_qp_wr is %d\",\n-\t      priv->device_attr.max_qp_wr);\n+\t      priv->device_attr.orig_attr.max_qp_wr);\n \tDEBUG(\"priv->device_attr.max_sge is %d\",\n-\t      priv->device_attr.max_sge);\n+\t      priv->device_attr.orig_attr.max_sge);\n \t/* Configure VLAN stripping. */\n \ttmpl.rxq.vlan_strip = (priv->hw_vlan_strip &&\n \t\t\t       !!dev->data->dev_conf.rxmode.hw_vlan_strip);\n-\tattr.wq = (struct ibv_exp_wq_init_attr){\n+\tattr.wq = (struct ibv_wq_init_attr){\n \t\t.wq_context = NULL, /* Could be useful in the future. */\n-\t\t.wq_type = IBV_EXP_WQT_RQ,\n+\t\t.wq_type = IBV_WQT_RQ,\n \t\t/* Max number of outstanding WRs. */\n-\t\t.max_recv_wr = desc >> tmpl.rxq.sges_n,\n+\t\t.max_wr = desc >> tmpl.rxq.sges_n,\n \t\t/* Max number of scatter/gather elements in a WR. */\n-\t\t.max_recv_sge = 1 << tmpl.rxq.sges_n,\n+\t\t.max_sge = 1 << tmpl.rxq.sges_n,\n \t\t.pd = priv->pd,\n \t\t.cq = tmpl.cq,\n \t\t.comp_mask =\n-\t\t\tIBV_EXP_CREATE_WQ_VLAN_OFFLOADS |\n+\t\t\tIBV_WQ_FLAGS_CVLAN_STRIPPING |\n \t\t\t0,\n-\t\t.vlan_offloads = (tmpl.rxq.vlan_strip ?\n-\t\t\t\t  IBV_EXP_RECEIVE_WQ_CVLAN_STRIP :\n-\t\t\t\t  0),\n+\t\t.create_flags = (tmpl.rxq.vlan_strip ?\n+\t\t\t\t IBV_WQ_FLAGS_CVLAN_STRIPPING :\n+\t\t\t\t 0),\n \t};\n \t/* By default, FCS (CRC) is stripped by hardware. */\n \tif (dev->data->dev_conf.rxmode.hw_strip_crc) {\n \t\ttmpl.rxq.crc_present = 0;\n \t} else if (priv->hw_fcs_strip) {\n \t\t/* Ask HW/Verbs to leave CRC in place when supported. */\n-\t\tattr.wq.flags |= IBV_EXP_CREATE_WQ_FLAG_SCATTER_FCS;\n-\t\tattr.wq.comp_mask |= IBV_EXP_CREATE_WQ_FLAGS;\n+\t\tattr.wq.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;\n+\t\tattr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;\n \t\ttmpl.rxq.crc_present = 1;\n \t} else {\n \t\tWARN(\"%p: CRC stripping has been disabled but will still\"\n@@ -1004,20 +1011,22 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,\n \t      (void *)dev,\n \t      tmpl.rxq.crc_present ? \"disabled\" : \"enabled\",\n \t      tmpl.rxq.crc_present << 2);\n+#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING\n \tif (!mlx5_getenv_int(\"MLX5_PMD_ENABLE_PADDING\"))\n \t\t; /* Nothing else to do. */\n \telse if (priv->hw_padding) {\n \t\tINFO(\"%p: enabling packet padding on queue %p\",\n \t\t     (void *)dev, (void *)rxq_ctrl);\n-\t\tattr.wq.flags |= IBV_EXP_CREATE_WQ_FLAG_RX_END_PADDING;\n-\t\tattr.wq.comp_mask |= IBV_EXP_CREATE_WQ_FLAGS;\n+\t\tattr.wq.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;\n+\t\tattr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;\n \t} else\n \t\tWARN(\"%p: packet padding has been requested but is not\"\n \t\t     \" supported, make sure MLNX_OFED and firmware are\"\n \t\t     \" up to date\",\n \t\t     (void *)dev);\n+#endif\n \n-\ttmpl.wq = ibv_exp_create_wq(priv->ctx, &attr.wq);\n+\ttmpl.wq = ibv_create_wq(priv->ctx, &attr.wq);\n \tif (tmpl.wq == NULL) {\n \t\tret = (errno ? errno : EINVAL);\n \t\tERROR(\"%p: WQ creation failure: %s\",\n@@ -1028,12 +1037,12 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,\n \t * Make sure number of WRs*SGEs match expectations since a queue\n \t * cannot allocate more than \"desc\" buffers.\n \t */\n-\tif (((int)attr.wq.max_recv_wr != (desc >> tmpl.rxq.sges_n)) ||\n-\t    ((int)attr.wq.max_recv_sge != (1 << tmpl.rxq.sges_n))) {\n+\tif (((int)attr.wq.max_wr != (desc >> tmpl.rxq.sges_n)) ||\n+\t    ((int)attr.wq.max_sge != (1 << tmpl.rxq.sges_n))) {\n \t\tERROR(\"%p: requested %u*%u but got %u*%u WRs*SGEs\",\n \t\t      (void *)dev,\n \t\t      (desc >> tmpl.rxq.sges_n), (1 << tmpl.rxq.sges_n),\n-\t\t      attr.wq.max_recv_wr, attr.wq.max_recv_sge);\n+\t\t      attr.wq.max_wr, attr.wq.max_sge);\n \t\tret = EINVAL;\n \t\tgoto error;\n \t}\n@@ -1041,13 +1050,13 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,\n \ttmpl.rxq.port_id = dev->data->port_id;\n \tDEBUG(\"%p: RTE port ID: %u\", (void *)rxq_ctrl, tmpl.rxq.port_id);\n \t/* Change queue state to ready. */\n-\tmod = (struct ibv_exp_wq_attr){\n-\t\t.attr_mask = IBV_EXP_WQ_ATTR_STATE,\n-\t\t.wq_state = IBV_EXP_WQS_RDY,\n+\tmod = (struct ibv_wq_attr){\n+\t\t.attr_mask = IBV_WQ_ATTR_STATE,\n+\t\t.wq_state = IBV_WQS_RDY,\n \t};\n-\tret = ibv_exp_modify_wq(tmpl.wq, &mod);\n+\tret = ibv_modify_wq(tmpl.wq, &mod);\n \tif (ret) {\n-\t\tERROR(\"%p: WQ state to IBV_EXP_WQS_RDY failed: %s\",\n+\t\tERROR(\"%p: WQ state to IBV_WQS_RDY failed: %s\",\n \t\t      (void *)dev, strerror(ret));\n \t\tgoto error;\n \t}\n@@ -1311,7 +1320,30 @@ priv_rx_intr_vec_disable(struct priv *priv)\n \tintr_handle->intr_vec = NULL;\n }\n \n-#ifdef HAVE_UPDATE_CQ_CI\n+/**\n+ *  MLX5 CQ notification .\n+ *\n+ *  @param rxq\n+ *     Pointer to receive queue structure.\n+ *  @param sq_n_rxq\n+ *     Sequence number per receive queue .\n+ */\n+static inline void\n+mlx5_arm_cq(struct rxq *rxq, int sq_n_rxq)\n+{\n+\tint sq_n = 0;\n+\tuint32_t doorbell_hi;\n+\tuint64_t doorbell;\n+\tvoid *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;\n+\n+\tsq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;\n+\tdoorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);\n+\tdoorbell = (uint64_t)doorbell_hi << 32;\n+\tdoorbell |=  rxq->cqn;\n+\trxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);\n+\trte_wmb();\n+\trte_write64(rte_cpu_to_be_64(doorbell), cq_db_reg);\n+}\n \n /**\n  * DPDK callback for Rx queue interrupt enable.\n@@ -1330,13 +1362,12 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n \tstruct priv *priv = mlx5_get_priv(dev);\n \tstruct rxq *rxq = (*priv->rxqs)[rx_queue_id];\n \tstruct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);\n-\tint ret;\n+\tint ret = 0;\n \n \tif (!rxq || !rxq_ctrl->channel) {\n \t\tret = EINVAL;\n \t} else {\n-\t\tibv_mlx5_exp_update_cq_ci(rxq_ctrl->cq, rxq->cq_ci);\n-\t\tret = ibv_req_notify_cq(rxq_ctrl->cq, 0);\n+\t\tmlx5_arm_cq(rxq, rxq->cq_arm_sn);\n \t}\n \tif (ret)\n \t\tWARN(\"unable to arm interrupt on rx queue %d\", rx_queue_id);\n@@ -1368,6 +1399,7 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n \t\tret = EINVAL;\n \t} else {\n \t\tret = ibv_get_cq_event(rxq_ctrl->cq->channel, &ev_cq, &ev_ctx);\n+\t\trxq->cq_arm_sn++;\n \t\tif (ret || ev_cq != rxq_ctrl->cq)\n \t\t\tret = EINVAL;\n \t}\n@@ -1378,5 +1410,3 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n \t\tibv_ack_cq_events(rxq_ctrl->cq, 1);\n \treturn -ret;\n }\n-\n-#endif /* HAVE_UPDATE_CQ_CI */\ndiff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c\nindex 674cce0..c45ebee 100644\n--- a/drivers/net/mlx5/mlx5_rxtx.c\n+++ b/drivers/net/mlx5/mlx5_rxtx.c\n@@ -42,8 +42,7 @@\n #pragma GCC diagnostic ignored \"-Wpedantic\"\n #endif\n #include <infiniband/verbs.h>\n-#include <infiniband/mlx5_hw.h>\n-#include <infiniband/arch.h>\n+#include <infiniband/mlx5dv.h>\n #ifdef PEDANTIC\n #pragma GCC diagnostic error \"-Wpedantic\"\n #endif\ndiff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h\nindex 9375aa8..342c933 100644\n--- a/drivers/net/mlx5/mlx5_rxtx.h\n+++ b/drivers/net/mlx5/mlx5_rxtx.h\n@@ -43,7 +43,7 @@\n #pragma GCC diagnostic ignored \"-Wpedantic\"\n #endif\n #include <infiniband/verbs.h>\n-#include <infiniband/mlx5_hw.h>\n+#include <infiniband/mlx5dv.h>\n #ifdef PEDANTIC\n #pragma GCC diagnostic error \"-Wpedantic\"\n #endif\n@@ -81,8 +81,8 @@ struct mlx5_txq_stats {\n /* Flow director queue structure. */\n struct fdir_queue {\n \tstruct ibv_qp *qp; /* Associated RX QP. */\n-\tstruct ibv_exp_rwq_ind_table *ind_table; /* Indirection table. */\n-\tstruct ibv_exp_wq *wq; /* Work queue. */\n+\tstruct ibv_rwq_ind_table *ind_table; /* Indirection table. */\n+\tstruct ibv_wq *wq; /* Work queue. */\n \tstruct ibv_cq *cq; /* Completion queue. */\n };\n \n@@ -124,13 +124,16 @@ struct rxq {\n \tstruct mlx5_rxq_stats stats;\n \tuint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */\n \tstruct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */\n+\tvoid *cq_uar; /* CQ user access region. */\n+\tuint32_t cqn; /* CQ number. */\n+\tuint8_t cq_arm_sn; /* CQ arm seq number. */\n } __rte_cache_aligned;\n \n /* RX queue control descriptor. */\n struct rxq_ctrl {\n \tstruct priv *priv; /* Back pointer to private data. */\n \tstruct ibv_cq *cq; /* Completion Queue. */\n-\tstruct ibv_exp_wq *wq; /* Work Queue. */\n+\tstruct ibv_wq *wq; /* Work Queue. */\n \tstruct fdir_queue *fdir_queue; /* Flow director queue. */\n \tstruct ibv_mr *mr; /* Memory Region (for mp). */\n \tstruct ibv_comp_channel *channel;\n@@ -152,8 +155,8 @@ enum hash_rxq_type {\n /* Flow structure with Ethernet specification. It is packed to prevent padding\n  * between attr and spec as this layout is expected by libibverbs. */\n struct flow_attr_spec_eth {\n-\tstruct ibv_exp_flow_attr attr;\n-\tstruct ibv_exp_flow_spec_eth spec;\n+\tstruct ibv_flow_attr attr;\n+\tstruct ibv_flow_spec_eth spec;\n } __attribute__((packed));\n \n /* Define a struct flow_attr_spec_eth object as an array of at least\n@@ -171,13 +174,13 @@ struct hash_rxq_init {\n \tunsigned int flow_priority; /* Flow priority to use. */\n \tunion {\n \t\tstruct {\n-\t\t\tenum ibv_exp_flow_spec_type type;\n+\t\t\tenum ibv_flow_spec_type type;\n \t\t\tuint16_t size;\n \t\t} hdr;\n-\t\tstruct ibv_exp_flow_spec_tcp_udp tcp_udp;\n-\t\tstruct ibv_exp_flow_spec_ipv4 ipv4;\n-\t\tstruct ibv_exp_flow_spec_ipv6 ipv6;\n-\t\tstruct ibv_exp_flow_spec_eth eth;\n+\t\tstruct ibv_flow_spec_tcp_udp tcp_udp;\n+\t\tstruct ibv_flow_spec_ipv4 ipv4;\n+\t\tstruct ibv_flow_spec_ipv6 ipv6;\n+\t\tstruct ibv_flow_spec_eth eth;\n \t} flow_spec; /* Flow specification template. */\n \tconst struct hash_rxq_init *underlayer; /* Pointer to underlayer. */\n };\n@@ -231,9 +234,9 @@ struct hash_rxq {\n \tstruct ibv_qp *qp; /* Hash RX QP. */\n \tenum hash_rxq_type type; /* Hash RX queue type. */\n \t/* MAC flow steering rules, one per VLAN ID. */\n-\tstruct ibv_exp_flow *mac_flow\n+\tstruct ibv_flow *mac_flow\n \t\t[MLX5_MAX_MAC_ADDRESSES][MLX5_MAX_VLAN_IDS];\n-\tstruct ibv_exp_flow *special_flow\n+\tstruct ibv_flow *special_flow\n \t\t[MLX5_MAX_SPECIAL_FLOWS][MLX5_MAX_VLAN_IDS];\n };\n \n@@ -293,7 +296,7 @@ extern const unsigned int hash_rxq_init_n;\n extern uint8_t rss_hash_default_key[];\n extern const size_t rss_hash_default_key_len;\n \n-size_t priv_flow_attr(struct priv *, struct ibv_exp_flow_attr *,\n+size_t priv_flow_attr(struct priv *, struct ibv_flow_attr *,\n \t\t      size_t, enum hash_rxq_type);\n int priv_create_hash_rxqs(struct priv *);\n void priv_destroy_hash_rxqs(struct priv *);\n@@ -305,10 +308,8 @@ int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,\n void mlx5_rx_queue_release(void *);\n int priv_rx_intr_vec_enable(struct priv *priv);\n void priv_rx_intr_vec_disable(struct priv *priv);\n-#ifdef HAVE_UPDATE_CQ_CI\n int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n-#endif /* HAVE_UPDATE_CQ_CI */\n \n /* mlx5_txq.c */\n \ndiff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c\nindex aff3359..33988e3 100644\n--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c\n+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c\n@@ -43,8 +43,7 @@\n #pragma GCC diagnostic ignored \"-Wpedantic\"\n #endif\n #include <infiniband/verbs.h>\n-#include <infiniband/mlx5_hw.h>\n-#include <infiniband/arch.h>\n+#include <infiniband/mlx5dv.h>\n #ifdef PEDANTIC\n #pragma GCC diagnostic error \"-Wpedantic\"\n #endif\ndiff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c\nindex b4c5b10..39a38c1 100644\n--- a/drivers/net/mlx5/mlx5_txq.c\n+++ b/drivers/net/mlx5/mlx5_txq.c\n@@ -162,13 +162,19 @@ txq_cleanup(struct txq_ctrl *txq_ctrl)\n static inline int\n txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl)\n {\n-\tstruct mlx5_qp *qp = to_mqp(tmpl->qp);\n+\tstruct mlx5dv_qp qp;\n \tstruct ibv_cq *ibcq = tmpl->cq;\n-\tstruct ibv_mlx5_cq_info cq_info;\n+\tstruct mlx5dv_cq cq_info;\n+\tstruct mlx5dv_obj obj;\n+\tint ret = 0;\n \n-\tif (ibv_mlx5_exp_get_cq_info(ibcq, &cq_info)) {\n-\t\tERROR(\"Unable to query CQ info. check your OFED.\");\n-\t\treturn ENOTSUP;\n+\tobj.cq.in = ibcq;\n+\tobj.cq.out = &cq_info;\n+\tobj.qp.in = tmpl->qp;\n+\tobj.qp.out = &qp;\n+\tret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);\n+\tif (ret != 0) {\n+\t\treturn -EINVAL;\n \t}\n \tif (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {\n \t\tERROR(\"Wrong MLX5_CQE_SIZE environment variable value: \"\n@@ -176,11 +182,11 @@ txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl)\n \t\treturn EINVAL;\n \t}\n \ttmpl->txq.cqe_n = log2above(cq_info.cqe_cnt);\n-\ttmpl->txq.qp_num_8s = qp->ctrl_seg.qp_num << 8;\n-\ttmpl->txq.wqes = qp->gen_data.sqstart;\n-\ttmpl->txq.wqe_n = log2above(qp->sq.wqe_cnt);\n-\ttmpl->txq.qp_db = &qp->gen_data.db[MLX5_SND_DBR];\n-\ttmpl->txq.bf_reg = qp->gen_data.bf->reg;\n+\ttmpl->txq.qp_num_8s = tmpl->qp->qp_num << 8;\n+\ttmpl->txq.wqes = qp.sq.buf;\n+\ttmpl->txq.wqe_n = log2above(qp.sq.wqe_cnt);\n+\ttmpl->txq.qp_db = &qp.dbrec[MLX5_SND_DBR];\n+\ttmpl->txq.bf_reg = qp.bf.reg;\n \ttmpl->txq.cq_db = cq_info.dbrec;\n \ttmpl->txq.cqes =\n \t\t(volatile struct mlx5_cqe (*)[])\n@@ -219,10 +225,10 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,\n \t\t.socket = socket,\n \t};\n \tunion {\n-\t\tstruct ibv_exp_qp_init_attr init;\n-\t\tstruct ibv_exp_cq_init_attr cq;\n-\t\tstruct ibv_exp_qp_attr mod;\n-\t\tstruct ibv_exp_cq_attr cq_attr;\n+\t\tstruct ibv_qp_init_attr_ex init;\n+\t\tstruct ibv_cq_init_attr_ex cq;\n+\t\tstruct ibv_qp_attr mod;\n+\t\tstruct ibv_cq_ex cq_attr;\n \t} attr;\n \tunsigned int cqe_n;\n \tconst unsigned int max_tso_inline = ((MLX5_MAX_TSO_HEADER +\n@@ -241,16 +247,16 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,\n \tif (priv->mps == MLX5_MPW_ENHANCED)\n \t\ttmpl.txq.mpw_hdr_dseg = priv->mpw_hdr_dseg;\n \t/* MRs will be registered in mp2mr[] later. */\n-\tattr.cq = (struct ibv_exp_cq_init_attr){\n+\tattr.cq = (struct ibv_cq_init_attr_ex){\n \t\t.comp_mask = 0,\n \t};\n \tcqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ?\n \t\t((desc / MLX5_TX_COMP_THRESH) - 1) : 1;\n \tif (priv->mps == MLX5_MPW_ENHANCED)\n \t\tcqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;\n-\ttmpl.cq = ibv_exp_create_cq(priv->ctx,\n-\t\t\t\t    cqe_n,\n-\t\t\t\t    NULL, NULL, 0, &attr.cq);\n+\ttmpl.cq = ibv_create_cq(priv->ctx,\n+\t\t\t\tcqe_n,\n+\t\t\t\tNULL, NULL, 0);\n \tif (tmpl.cq == NULL) {\n \t\tret = ENOMEM;\n \t\tERROR(\"%p: CQ creation failure: %s\",\n@@ -258,19 +264,20 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,\n \t\tgoto error;\n \t}\n \tDEBUG(\"priv->device_attr.max_qp_wr is %d\",\n-\t      priv->device_attr.max_qp_wr);\n+\t      priv->device_attr.orig_attr.max_qp_wr);\n \tDEBUG(\"priv->device_attr.max_sge is %d\",\n-\t      priv->device_attr.max_sge);\n-\tattr.init = (struct ibv_exp_qp_init_attr){\n+\t      priv->device_attr.orig_attr.max_sge);\n+\tattr.init = (struct ibv_qp_init_attr_ex){\n \t\t/* CQ to be associated with the send queue. */\n \t\t.send_cq = tmpl.cq,\n \t\t/* CQ to be associated with the receive queue. */\n \t\t.recv_cq = tmpl.cq,\n \t\t.cap = {\n \t\t\t/* Max number of outstanding WRs. */\n-\t\t\t.max_send_wr = ((priv->device_attr.max_qp_wr < desc) ?\n-\t\t\t\t\tpriv->device_attr.max_qp_wr :\n-\t\t\t\t\tdesc),\n+\t\t\t.max_send_wr =\n+\t\t\t ((priv->device_attr.orig_attr.max_qp_wr < desc) ?\n+\t\t\t   priv->device_attr.orig_attr.max_qp_wr :\n+\t\t\t   desc),\n \t\t\t/*\n \t\t\t * Max number of scatter/gather elements in a WR,\n \t\t\t * must be 1 to prevent libmlx5 from trying to affect\n@@ -285,7 +292,7 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,\n \t\t * TX burst. */\n \t\t.sq_sig_all = 0,\n \t\t.pd = priv->pd,\n-\t\t.comp_mask = IBV_EXP_QP_INIT_ATTR_PD,\n+\t\t.comp_mask = IBV_QP_INIT_ATTR_PD,\n \t};\n \tif (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) {\n \t\tunsigned int ds_cnt;\n@@ -348,14 +355,14 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,\n \tif (priv->tso) {\n \t\tattr.init.max_tso_header =\n \t\t\tmax_tso_inline * RTE_CACHE_LINE_SIZE;\n-\t\tattr.init.comp_mask |= IBV_EXP_QP_INIT_ATTR_MAX_TSO_HEADER;\n+\t\tattr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;\n \t\ttmpl.txq.max_inline = RTE_MAX(tmpl.txq.max_inline,\n \t\t\t\t\t      max_tso_inline);\n \t\ttmpl.txq.tso_en = 1;\n \t}\n \tif (priv->tunnel_en)\n \t\ttmpl.txq.tunnel_en = 1;\n-\ttmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init);\n+\ttmpl.qp = ibv_create_qp_ex(priv->ctx, &attr.init);\n \tif (tmpl.qp == NULL) {\n \t\tret = (errno ? errno : EINVAL);\n \t\tERROR(\"%p: QP creation failure: %s\",\n@@ -367,14 +374,14 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,\n \t      attr.init.cap.max_send_wr,\n \t      attr.init.cap.max_send_sge,\n \t      attr.init.cap.max_inline_data);\n-\tattr.mod = (struct ibv_exp_qp_attr){\n+\tattr.mod = (struct ibv_qp_attr){\n \t\t/* Move the QP to this state. */\n \t\t.qp_state = IBV_QPS_INIT,\n \t\t/* Primary port number. */\n \t\t.port_num = priv->port\n \t};\n-\tret = ibv_exp_modify_qp(tmpl.qp, &attr.mod,\n-\t\t\t\t(IBV_EXP_QP_STATE | IBV_EXP_QP_PORT));\n+\tret = ibv_modify_qp(tmpl.qp, &attr.mod,\n+\t\t\t    (IBV_QP_STATE | IBV_QP_PORT));\n \tif (ret) {\n \t\tERROR(\"%p: QP state to IBV_QPS_INIT failed: %s\",\n \t\t      (void *)dev, strerror(ret));\n@@ -387,17 +394,17 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,\n \t\tgoto error;\n \t}\n \ttxq_alloc_elts(&tmpl, desc);\n-\tattr.mod = (struct ibv_exp_qp_attr){\n+\tattr.mod = (struct ibv_qp_attr){\n \t\t.qp_state = IBV_QPS_RTR\n \t};\n-\tret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE);\n+\tret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);\n \tif (ret) {\n \t\tERROR(\"%p: QP state to IBV_QPS_RTR failed: %s\",\n \t\t      (void *)dev, strerror(ret));\n \t\tgoto error;\n \t}\n \tattr.mod.qp_state = IBV_QPS_RTS;\n-\tret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE);\n+\tret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);\n \tif (ret) {\n \t\tERROR(\"%p: QP state to IBV_QPS_RTS failed: %s\",\n \t\t      (void *)dev, strerror(ret));\ndiff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c\nindex 353ae49..36ffbba 100644\n--- a/drivers/net/mlx5/mlx5_vlan.c\n+++ b/drivers/net/mlx5/mlx5_vlan.c\n@@ -139,20 +139,21 @@ priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on)\n {\n \tstruct rxq *rxq = (*priv->rxqs)[idx];\n \tstruct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);\n-\tstruct ibv_exp_wq_attr mod;\n+\tstruct ibv_wq_attr mod;\n \tuint16_t vlan_offloads =\n-\t\t(on ? IBV_EXP_RECEIVE_WQ_CVLAN_STRIP : 0) |\n+\t\t(on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) |\n \t\t0;\n \tint err;\n \n \tDEBUG(\"set VLAN offloads 0x%x for port %d queue %d\",\n \t      vlan_offloads, rxq->port_id, idx);\n-\tmod = (struct ibv_exp_wq_attr){\n-\t\t.attr_mask = IBV_EXP_WQ_ATTR_VLAN_OFFLOADS,\n-\t\t.vlan_offloads = vlan_offloads,\n+\tmod = (struct ibv_wq_attr){\n+\t\t.attr_mask = IBV_WQ_ATTR_FLAGS,\n+\t\t.flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING,\n+\t\t.flags = vlan_offloads,\n \t};\n \n-\terr = ibv_exp_modify_wq(rxq_ctrl->wq, &mod);\n+\terr = ibv_modify_wq(rxq_ctrl->wq, &mod);\n \tif (err) {\n \t\tERROR(\"%p: failed to modified stripping mode: %s\",\n \t\t      (void *)priv, strerror(err));\ndiff --git a/mk/rte.app.mk b/mk/rte.app.mk\nindex c25fdd9..9415537 100644\n--- a/mk/rte.app.mk\n+++ b/mk/rte.app.mk\n@@ -129,7 +129,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_KNI)        += -lrte_pmd_kni\n endif\n _LDLIBS-$(CONFIG_RTE_LIBRTE_LIO_PMD)        += -lrte_pmd_lio\n _LDLIBS-$(CONFIG_RTE_LIBRTE_MLX4_PMD)       += -lrte_pmd_mlx4 -libverbs\n-_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX5_PMD)       += -lrte_pmd_mlx5 -libverbs\n+_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX5_PMD)       += -lrte_pmd_mlx5 -libverbs -lmlx5\n _LDLIBS-$(CONFIG_RTE_LIBRTE_NFP_PMD)        += -lrte_pmd_nfp\n _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_NULL)       += -lrte_pmd_null\n _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_PCAP)       += -lrte_pmd_pcap -lpcap\n",
    "prefixes": [
        "dpdk-dev",
        "v8"
    ]
}