get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/100769/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 100769,
    "url": "http://patches.dpdk.org/api/patches/100769/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20211007184350.73858-2-srikanth.k@oneconvergence.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20211007184350.73858-2-srikanth.k@oneconvergence.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20211007184350.73858-2-srikanth.k@oneconvergence.com",
    "date": "2021-10-07T18:43:10",
    "name": "[v2,01/41] common/mlx5: add glue files for FreeBSD",
    "commit_ref": null,
    "pull_url": null,
    "state": "deferred",
    "archived": false,
    "hash": "f5a2d7dbffac9b4ddc38005282d19fa7a1f36c2e",
    "submitter": {
        "id": 2368,
        "url": "http://patches.dpdk.org/api/people/2368/?format=api",
        "name": "Srikanth Kaka",
        "email": "srikanth.k@oneconvergence.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20211007184350.73858-2-srikanth.k@oneconvergence.com/mbox/",
    "series": [
        {
            "id": 19455,
            "url": "http://patches.dpdk.org/api/series/19455/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=19455",
            "date": "2021-10-07T18:43:09",
            "name": "add MLX5 FreeBSD support",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/19455/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/100769/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/100769/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id B908BA034F;\n\tFri,  8 Oct 2021 12:56:00 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 333D741101;\n\tFri,  8 Oct 2021 12:55:57 +0200 (CEST)",
            "from mail-pf1-f180.google.com (mail-pf1-f180.google.com\n [209.85.210.180])\n by mails.dpdk.org (Postfix) with ESMTP id 8A118411E0\n for <dev@dpdk.org>; Thu,  7 Oct 2021 20:44:16 +0200 (CEST)",
            "by mail-pf1-f180.google.com with SMTP id g2so6083460pfc.6\n for <dev@dpdk.org>; Thu, 07 Oct 2021 11:44:16 -0700 (PDT)",
            "from srikanth-ThinkPad-T450.domain.name ([136.185.113.102])\n by smtp.gmail.com with ESMTPSA id c11sm3311586pji.38.2021.10.07.11.44.12\n (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256);\n Thu, 07 Oct 2021 11:44:14 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n d=oneconvergence.com; s=google;\n h=from:to:cc:subject:date:message-id:in-reply-to:references\n :mime-version:content-transfer-encoding;\n bh=dAiJSB8zH93VnvBKsPULPZ9FnLTZ+j01cQKQm5GVZsQ=;\n b=ZZnU1+8IJ8fD/WKIxhe3XHz/TX156lVcQPttNYE01bui9PEFuQKKWicOC90rKv45Zr\n jq3eoCMweb6fGlF/nnjKImvM/zF5Mi4bHZt+U20lfxudCeR3Vqr3VU9feVutDb59d0Cd\n bg0l3mUjvDuRzAw7NYD8ByzAQhv5N1iT0yV1w=",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n d=1e100.net; s=20210112;\n h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n :references:mime-version:content-transfer-encoding;\n bh=dAiJSB8zH93VnvBKsPULPZ9FnLTZ+j01cQKQm5GVZsQ=;\n b=iAUBbppEyZHa5+gD7dMXk+xq4Uco126GXg4QVQZwUj6v9sNqMeLCnuGVxVRaqW2PCs\n VOrS72s7vatYwMCvkXPeim/lxpTX8wQQaNEQgLM8WkdDEFc+hT8P4GkPHyFSir4PwjXG\n eMJ2OtgocVLnFCGXoJ+jMt9aE4z3o5TtS/tXQj9YefyXrHQSp6kdEFdMa29r9QhpvuG8\n Fo5+Iy2k74WXVCxo9ob6ZR/U03enYUoeAng+UiEYYm8Adm02WDVF1HjTedHOm4w9DP7o\n mvJ9n1FcjDpte41zSYSun7dnJQ605m9IDVsykSZeN4dBg8XtB8JqZaOymoQ4eB+6tBas\n qd+Q==",
        "X-Gm-Message-State": "AOAM530VI4FfGJJamxU8rmEC3UZSkD6omtc5gSP5tG19oVlQ6tr5YCww\n 2NmXvkFfagi1W4t1Crv6e2SsMg==",
        "X-Google-Smtp-Source": "\n ABdhPJyFmpDsLrvtEP3PUIg7Y5lJYlZ7zyhlNRI6oHcn63+yJjXQaVGrhTAnxAE2WBo/wDnC6AdWKQ==",
        "X-Received": "by 2002:a63:ac1a:: with SMTP id v26mr977450pge.105.1633632255366;\n Thu, 07 Oct 2021 11:44:15 -0700 (PDT)",
        "From": "Srikanth Kaka <srikanth.k@oneconvergence.com>",
        "To": "Matan Azrad <matan@nvidia.com>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>",
        "Cc": "dev@dpdk.org, Vag Singh <vag.singh@oneconvergence.com>,\n Anand Thulasiram <avelu@juniper.net>,\n Srikanth Kaka <srikanth.k@oneconvergence.com>",
        "Date": "Fri,  8 Oct 2021 00:13:10 +0530",
        "Message-Id": "<20211007184350.73858-2-srikanth.k@oneconvergence.com>",
        "X-Mailer": "git-send-email 2.30.2",
        "In-Reply-To": "<20211007184350.73858-1-srikanth.k@oneconvergence.com>",
        "References": "<20211007184350.73858-1-srikanth.k@oneconvergence.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-Mailman-Approved-At": "Fri, 08 Oct 2021 12:55:54 +0200",
        "Subject": "[dpdk-dev] [PATCH v2 01/41] common/mlx5: add glue files for FreeBSD",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "- Added mlx5_glue.h. It is same as its Linux counterpart\n- Added mlx5_glue.c. It is same as its Linux counterpart\n\nSigned-off-by: Srikanth Kaka <srikanth.k@oneconvergence.com>\nSigned-off-by: Vag Singh <vag.singh@oneconvergence.com>\nSigned-off-by: Anand Thulasiram <avelu@juniper.net>\n---\n drivers/common/mlx5/freebsd/mlx5_glue.c | 1505 +++++++++++++++++++++++\n drivers/common/mlx5/freebsd/mlx5_glue.h |  374 ++++++\n 2 files changed, 1879 insertions(+)\n create mode 100644 drivers/common/mlx5/freebsd/mlx5_glue.c\n create mode 100644 drivers/common/mlx5/freebsd/mlx5_glue.h",
    "diff": "diff --git a/drivers/common/mlx5/freebsd/mlx5_glue.c b/drivers/common/mlx5/freebsd/mlx5_glue.c\nnew file mode 100644\nindex 0000000000..037ca961a0\n--- /dev/null\n+++ b/drivers/common/mlx5/freebsd/mlx5_glue.c\n@@ -0,0 +1,1505 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright 2018 6WIND S.A.\n+ * Copyright 2018 Mellanox Technologies, Ltd\n+ */\n+\n+#include <errno.h>\n+#include <stdalign.h>\n+#include <stddef.h>\n+#include <stdint.h>\n+#include <stdlib.h>\n+/*\n+ * Not needed by this file; included to work around the lack of off_t\n+ * definition for mlx5dv.h with unpatched rdma-core versions.\n+ */\n+#include <sys/types.h>\n+\n+#include \"mlx5_glue.h\"\n+\n+static int\n+mlx5_glue_fork_init(void)\n+{\n+\treturn ibv_fork_init();\n+}\n+\n+static struct ibv_pd *\n+mlx5_glue_alloc_pd(struct ibv_context *context)\n+{\n+\treturn ibv_alloc_pd(context);\n+}\n+\n+static int\n+mlx5_glue_dealloc_pd(struct ibv_pd *pd)\n+{\n+\treturn ibv_dealloc_pd(pd);\n+}\n+\n+static struct ibv_device **\n+mlx5_glue_get_device_list(int *num_devices)\n+{\n+\treturn ibv_get_device_list(num_devices);\n+}\n+\n+static void\n+mlx5_glue_free_device_list(struct ibv_device **list)\n+{\n+\tibv_free_device_list(list);\n+}\n+\n+static struct ibv_context *\n+mlx5_glue_open_device(struct ibv_device *device)\n+{\n+\treturn ibv_open_device(device);\n+}\n+\n+static int\n+mlx5_glue_close_device(struct ibv_context *context)\n+{\n+\treturn ibv_close_device(context);\n+}\n+\n+static int\n+mlx5_glue_query_device(struct ibv_context *context,\n+\t\t       struct ibv_device_attr *device_attr)\n+{\n+\treturn ibv_query_device(context, device_attr);\n+}\n+\n+static int\n+mlx5_glue_query_device_ex(struct ibv_context *context,\n+\t\t\t  const struct ibv_query_device_ex_input *input,\n+\t\t\t  struct ibv_device_attr_ex *attr)\n+{\n+\treturn ibv_query_device_ex(context, input, attr);\n+}\n+\n+static int\n+mlx5_glue_query_rt_values_ex(struct ibv_context *context,\n+\t\t\t  struct ibv_values_ex *values)\n+{\n+\treturn ibv_query_rt_values_ex(context, values);\n+}\n+\n+static int\n+mlx5_glue_query_port(struct ibv_context *context, uint8_t port_num,\n+\t\t     struct ibv_port_attr *port_attr)\n+{\n+\treturn ibv_query_port(context, port_num, port_attr);\n+}\n+\n+static struct ibv_comp_channel *\n+mlx5_glue_create_comp_channel(struct ibv_context *context)\n+{\n+\treturn ibv_create_comp_channel(context);\n+}\n+\n+static int\n+mlx5_glue_destroy_comp_channel(struct ibv_comp_channel *channel)\n+{\n+\treturn ibv_destroy_comp_channel(channel);\n+}\n+\n+static struct ibv_cq *\n+mlx5_glue_create_cq(struct ibv_context *context, int cqe, void *cq_context,\n+\t\t    struct ibv_comp_channel *channel, int comp_vector)\n+{\n+\treturn ibv_create_cq(context, cqe, cq_context, channel, comp_vector);\n+}\n+\n+static int\n+mlx5_glue_destroy_cq(struct ibv_cq *cq)\n+{\n+\treturn ibv_destroy_cq(cq);\n+}\n+\n+static int\n+mlx5_glue_get_cq_event(struct ibv_comp_channel *channel, struct ibv_cq **cq,\n+\t\t       void **cq_context)\n+{\n+\treturn ibv_get_cq_event(channel, cq, cq_context);\n+}\n+\n+static void\n+mlx5_glue_ack_cq_events(struct ibv_cq *cq, unsigned int nevents)\n+{\n+\tibv_ack_cq_events(cq, nevents);\n+}\n+\n+static struct ibv_rwq_ind_table *\n+mlx5_glue_create_rwq_ind_table(struct ibv_context *context,\n+\t\t\t       struct ibv_rwq_ind_table_init_attr *init_attr)\n+{\n+\treturn ibv_create_rwq_ind_table(context, init_attr);\n+}\n+\n+static int\n+mlx5_glue_destroy_rwq_ind_table(struct ibv_rwq_ind_table *rwq_ind_table)\n+{\n+\treturn ibv_destroy_rwq_ind_table(rwq_ind_table);\n+}\n+\n+static struct ibv_wq *\n+mlx5_glue_create_wq(struct ibv_context *context,\n+\t\t    struct ibv_wq_init_attr *wq_init_attr)\n+{\n+\treturn ibv_create_wq(context, wq_init_attr);\n+}\n+\n+static int\n+mlx5_glue_destroy_wq(struct ibv_wq *wq)\n+{\n+\treturn ibv_destroy_wq(wq);\n+}\n+static int\n+mlx5_glue_modify_wq(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr)\n+{\n+\treturn ibv_modify_wq(wq, wq_attr);\n+}\n+\n+static struct ibv_flow *\n+mlx5_glue_create_flow(struct ibv_qp *qp, struct ibv_flow_attr *flow)\n+{\n+\treturn ibv_create_flow(qp, flow);\n+}\n+\n+static int\n+mlx5_glue_destroy_flow(struct ibv_flow *flow_id)\n+{\n+\treturn ibv_destroy_flow(flow_id);\n+}\n+\n+static int\n+mlx5_glue_destroy_flow_action(void *action)\n+{\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+#ifdef HAVE_MLX5DV_DR\n+\treturn mlx5dv_dr_action_destroy(action);\n+#else\n+\tstruct mlx5dv_flow_action_attr *attr = action;\n+\tint res = 0;\n+\tswitch (attr->type) {\n+\tcase MLX5DV_FLOW_ACTION_TAG:\n+\t\tbreak;\n+\tdefault:\n+\t\tres = ibv_destroy_flow_action(attr->action);\n+\t\tbreak;\n+\t}\n+\tfree(action);\n+\treturn res;\n+#endif\n+#else\n+\t(void)action;\n+\treturn -ENOTSUP;\n+#endif\n+}\n+\n+static struct ibv_qp *\n+mlx5_glue_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *qp_init_attr)\n+{\n+\treturn ibv_create_qp(pd, qp_init_attr);\n+}\n+\n+static struct ibv_qp *\n+mlx5_glue_create_qp_ex(struct ibv_context *context,\n+\t\t       struct ibv_qp_init_attr_ex *qp_init_attr_ex)\n+{\n+\treturn ibv_create_qp_ex(context, qp_init_attr_ex);\n+}\n+\n+static int\n+mlx5_glue_destroy_qp(struct ibv_qp *qp)\n+{\n+\treturn ibv_destroy_qp(qp);\n+}\n+\n+static int\n+mlx5_glue_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask)\n+{\n+\treturn ibv_modify_qp(qp, attr, attr_mask);\n+}\n+\n+static struct ibv_mr *\n+mlx5_glue_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access)\n+{\n+\treturn ibv_reg_mr(pd, addr, length, access);\n+}\n+\n+static struct ibv_mr *\n+mlx5_glue_alloc_null_mr(struct ibv_pd *pd)\n+{\n+#ifdef HAVE_IBV_DEVX_OBJ\n+\treturn ibv_alloc_null_mr(pd);\n+#else\n+\t(void)pd;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_dereg_mr(struct ibv_mr *mr)\n+{\n+\treturn ibv_dereg_mr(mr);\n+}\n+\n+static struct ibv_counter_set *\n+mlx5_glue_create_counter_set(struct ibv_context *context,\n+\t\t\t     struct ibv_counter_set_init_attr *init_attr)\n+{\n+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42\n+\t(void)context;\n+\t(void)init_attr;\n+\treturn NULL;\n+#else\n+\treturn ibv_create_counter_set(context, init_attr);\n+#endif\n+}\n+\n+static int\n+mlx5_glue_destroy_counter_set(struct ibv_counter_set *cs)\n+{\n+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42\n+\t(void)cs;\n+\treturn -ENOTSUP;\n+#else\n+\treturn ibv_destroy_counter_set(cs);\n+#endif\n+}\n+\n+static int\n+mlx5_glue_describe_counter_set(struct ibv_context *context,\n+\t\t\t       uint16_t counter_set_id,\n+\t\t\t       struct ibv_counter_set_description *cs_desc)\n+{\n+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42\n+\t(void)context;\n+\t(void)counter_set_id;\n+\t(void)cs_desc;\n+\treturn -ENOTSUP;\n+#else\n+\treturn ibv_describe_counter_set(context, counter_set_id, cs_desc);\n+#endif\n+}\n+\n+static int\n+mlx5_glue_query_counter_set(struct ibv_query_counter_set_attr *query_attr,\n+\t\t\t    struct ibv_counter_set_data *cs_data)\n+{\n+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42\n+\t(void)query_attr;\n+\t(void)cs_data;\n+\treturn -ENOTSUP;\n+#else\n+\treturn ibv_query_counter_set(query_attr, cs_data);\n+#endif\n+}\n+\n+static struct ibv_counters *\n+mlx5_glue_create_counters(struct ibv_context *context,\n+\t\t\t  struct ibv_counters_init_attr *init_attr)\n+{\n+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45\n+\t(void)context;\n+\t(void)init_attr;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#else\n+\treturn ibv_create_counters(context, init_attr);\n+#endif\n+}\n+\n+static int\n+mlx5_glue_destroy_counters(struct ibv_counters *counters)\n+{\n+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45\n+\t(void)counters;\n+\treturn -ENOTSUP;\n+#else\n+\treturn ibv_destroy_counters(counters);\n+#endif\n+}\n+\n+static int\n+mlx5_glue_attach_counters(struct ibv_counters *counters,\n+\t\t\t  struct ibv_counter_attach_attr *attr,\n+\t\t\t  struct ibv_flow *flow)\n+{\n+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45\n+\t(void)counters;\n+\t(void)attr;\n+\t(void)flow;\n+\treturn -ENOTSUP;\n+#else\n+\treturn ibv_attach_counters_point_flow(counters, attr, flow);\n+#endif\n+}\n+\n+static int\n+mlx5_glue_query_counters(struct ibv_counters *counters,\n+\t\t\t uint64_t *counters_value,\n+\t\t\t uint32_t ncounters,\n+\t\t\t uint32_t flags)\n+{\n+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45\n+\t(void)counters;\n+\t(void)counters_value;\n+\t(void)ncounters;\n+\t(void)flags;\n+\treturn -ENOTSUP;\n+#else\n+\treturn ibv_read_counters(counters, counters_value, ncounters, flags);\n+#endif\n+}\n+\n+static void\n+mlx5_glue_ack_async_event(struct ibv_async_event *event)\n+{\n+\tibv_ack_async_event(event);\n+}\n+\n+static int\n+mlx5_glue_get_async_event(struct ibv_context *context,\n+\t\t\t  struct ibv_async_event *event)\n+{\n+\treturn ibv_get_async_event(context, event);\n+}\n+\n+static const char *\n+mlx5_glue_port_state_str(enum ibv_port_state port_state)\n+{\n+\treturn ibv_port_state_str(port_state);\n+}\n+\n+static struct ibv_cq *\n+mlx5_glue_cq_ex_to_cq(struct ibv_cq_ex *cq)\n+{\n+\treturn ibv_cq_ex_to_cq(cq);\n+}\n+\n+static void *\n+mlx5_glue_dr_create_flow_action_dest_flow_tbl(void *tbl)\n+{\n+#ifdef HAVE_MLX5DV_DR\n+\treturn mlx5dv_dr_action_create_dest_table(tbl);\n+#else\n+\t(void)tbl;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dr_create_flow_action_dest_port(void *domain, uint32_t port)\n+{\n+#ifdef HAVE_MLX5DV_DR_CREATE_DEST_IB_PORT\n+\treturn mlx5dv_dr_action_create_dest_ib_port(domain, port);\n+#else\n+#ifdef HAVE_MLX5DV_DR_ESWITCH\n+\treturn mlx5dv_dr_action_create_dest_vport(domain, port);\n+#else\n+\t(void)domain;\n+\t(void)port;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dr_create_flow_action_drop(void)\n+{\n+#ifdef HAVE_MLX5DV_DR_ESWITCH\n+\treturn mlx5dv_dr_action_create_drop();\n+#else\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dr_create_flow_action_push_vlan(struct mlx5dv_dr_domain *domain,\n+\t\t\t\t\t  rte_be32_t vlan_tag)\n+{\n+#ifdef HAVE_MLX5DV_DR_VLAN\n+\treturn mlx5dv_dr_action_create_push_vlan(domain, vlan_tag);\n+#else\n+\t(void)domain;\n+\t(void)vlan_tag;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dr_create_flow_action_pop_vlan(void)\n+{\n+#ifdef HAVE_MLX5DV_DR_VLAN\n+\treturn mlx5dv_dr_action_create_pop_vlan();\n+#else\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dr_create_flow_tbl(void *domain, uint32_t level)\n+{\n+#ifdef HAVE_MLX5DV_DR\n+\treturn mlx5dv_dr_table_create(domain, level);\n+#else\n+\t(void)domain;\n+\t(void)level;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_dr_destroy_flow_tbl(void *tbl)\n+{\n+#ifdef HAVE_MLX5DV_DR\n+\treturn mlx5dv_dr_table_destroy(tbl);\n+#else\n+\t(void)tbl;\n+\terrno = ENOTSUP;\n+\treturn errno;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dr_create_domain(struct ibv_context *ctx,\n+\t\t\t   enum  mlx5dv_dr_domain_type domain)\n+{\n+#ifdef HAVE_MLX5DV_DR\n+\treturn mlx5dv_dr_domain_create(ctx, domain);\n+#else\n+\t(void)ctx;\n+\t(void)domain;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_dr_destroy_domain(void *domain)\n+{\n+#ifdef HAVE_MLX5DV_DR\n+\treturn mlx5dv_dr_domain_destroy(domain);\n+#else\n+\t(void)domain;\n+\terrno = ENOTSUP;\n+\treturn errno;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_dr_sync_domain(void *domain, uint32_t flags)\n+{\n+#ifdef HAVE_MLX5DV_DR\n+\treturn mlx5dv_dr_domain_sync(domain, flags);\n+#else\n+\t(void)domain;\n+\t(void)flags;\n+\terrno = ENOTSUP;\n+\treturn errno;\n+#endif\n+}\n+\n+static struct ibv_cq_ex *\n+mlx5_glue_dv_create_cq(struct ibv_context *context,\n+\t\t       struct ibv_cq_init_attr_ex *cq_attr,\n+\t\t       struct mlx5dv_cq_init_attr *mlx5_cq_attr)\n+{\n+\treturn mlx5dv_create_cq(context, cq_attr, mlx5_cq_attr);\n+}\n+\n+static struct ibv_wq *\n+mlx5_glue_dv_create_wq(struct ibv_context *context,\n+\t\t       struct ibv_wq_init_attr *wq_attr,\n+\t\t       struct mlx5dv_wq_init_attr *mlx5_wq_attr)\n+{\n+#ifndef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT\n+\t(void)context;\n+\t(void)wq_attr;\n+\t(void)mlx5_wq_attr;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#else\n+\treturn mlx5dv_create_wq(context, wq_attr, mlx5_wq_attr);\n+#endif\n+}\n+\n+static int\n+mlx5_glue_dv_query_device(struct ibv_context *ctx,\n+\t\t\t  struct mlx5dv_context *attrs_out)\n+{\n+\treturn mlx5dv_query_device(ctx, attrs_out);\n+}\n+\n+static int\n+mlx5_glue_dv_set_context_attr(struct ibv_context *ibv_ctx,\n+\t\t\t      enum mlx5dv_set_ctx_attr_type type, void *attr)\n+{\n+\treturn mlx5dv_set_context_attr(ibv_ctx, type, attr);\n+}\n+\n+static int\n+mlx5_glue_dv_init_obj(struct mlx5dv_obj *obj, uint64_t obj_type)\n+{\n+\treturn mlx5dv_init_obj(obj, obj_type);\n+}\n+\n+static struct ibv_qp *\n+mlx5_glue_dv_create_qp(struct ibv_context *context,\n+\t\t       struct ibv_qp_init_attr_ex *qp_init_attr_ex,\n+\t\t       struct mlx5dv_qp_init_attr *dv_qp_init_attr)\n+{\n+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT\n+\treturn mlx5dv_create_qp(context, qp_init_attr_ex, dv_qp_init_attr);\n+#else\n+\t(void)context;\n+\t(void)qp_init_attr_ex;\n+\t(void)dv_qp_init_attr;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dv_create_flow_matcher(struct ibv_context *context,\n+\t\t\t\t struct mlx5dv_flow_matcher_attr *matcher_attr,\n+\t\t\t\t void *tbl)\n+{\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+#ifdef HAVE_MLX5DV_DR\n+\t(void)context;\n+\treturn mlx5dv_dr_matcher_create(tbl, matcher_attr->priority,\n+\t\t\t\t\tmatcher_attr->match_criteria_enable,\n+\t\t\t\t\tmatcher_attr->match_mask);\n+#else\n+\t(void)tbl;\n+\treturn mlx5dv_create_flow_matcher(context, matcher_attr);\n+#endif\n+#else\n+\t(void)context;\n+\t(void)matcher_attr;\n+\t(void)tbl;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dv_create_flow(void *matcher,\n+\t\t\t void *match_value,\n+\t\t\t size_t num_actions,\n+\t\t\t void *actions[])\n+{\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+#ifdef HAVE_MLX5DV_DR\n+\treturn mlx5dv_dr_rule_create(matcher, match_value, num_actions,\n+\t\t\t\t     (struct mlx5dv_dr_action **)actions);\n+#else\n+\tsize_t i;\n+\tstruct mlx5dv_flow_action_attr actions_attr[8];\n+\n+\tif (num_actions > 8)\n+\t\treturn NULL;\n+\tfor (i = 0; i < num_actions; i++)\n+\t\tactions_attr[i] =\n+\t\t\t*((struct mlx5dv_flow_action_attr *)(actions[i]));\n+\treturn mlx5dv_create_flow(matcher, match_value,\n+\t\t\t\t  num_actions, actions_attr);\n+#endif\n+#else\n+\t(void)matcher;\n+\t(void)match_value;\n+\t(void)num_actions;\n+\t(void)actions;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dv_create_flow_action_counter(void *counter_obj, uint32_t offset)\n+{\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+#ifdef HAVE_MLX5DV_DR\n+\treturn mlx5dv_dr_action_create_flow_counter(counter_obj, offset);\n+#else\n+\tstruct mlx5dv_flow_action_attr *action;\n+\n+\t(void)offset;\n+\taction = malloc(sizeof(*action));\n+\tif (!action)\n+\t\treturn NULL;\n+\taction->type = MLX5DV_FLOW_ACTION_COUNTERS_DEVX;\n+\taction->obj = counter_obj;\n+\treturn action;\n+#endif\n+#else\n+\t(void)counter_obj;\n+\t(void)offset;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dv_create_flow_action_dest_ibv_qp(void *qp)\n+{\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+#ifdef HAVE_MLX5DV_DR\n+\treturn mlx5dv_dr_action_create_dest_ibv_qp(qp);\n+#else\n+\tstruct mlx5dv_flow_action_attr *action;\n+\n+\taction = malloc(sizeof(*action));\n+\tif (!action)\n+\t\treturn NULL;\n+\taction->type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;\n+\taction->obj = qp;\n+\treturn action;\n+#endif\n+#else\n+\t(void)qp;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dv_create_flow_action_dest_devx_tir(void *tir)\n+{\n+#ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR\n+\treturn mlx5dv_dr_action_create_dest_devx_tir(tir);\n+#else\n+\t(void)tir;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dv_create_flow_action_modify_header\n+\t\t\t\t\t(struct ibv_context *ctx,\n+\t\t\t\t\t enum mlx5dv_flow_table_type ft_type,\n+\t\t\t\t\t void *domain, uint64_t flags,\n+\t\t\t\t\t size_t actions_sz,\n+\t\t\t\t\t uint64_t actions[])\n+{\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+#ifdef HAVE_MLX5DV_DR\n+\t(void)ctx;\n+\t(void)ft_type;\n+\treturn mlx5dv_dr_action_create_modify_header(domain, flags, actions_sz,\n+\t\t\t\t\t\t     (__be64 *)actions);\n+#else\n+\tstruct mlx5dv_flow_action_attr *action;\n+\n+\t(void)domain;\n+\t(void)flags;\n+\taction = malloc(sizeof(*action));\n+\tif (!action)\n+\t\treturn NULL;\n+\taction->type = MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;\n+\taction->action = mlx5dv_create_flow_action_modify_header\n+\t\t(ctx, actions_sz, actions, ft_type);\n+\treturn action;\n+#endif\n+#else\n+\t(void)ctx;\n+\t(void)ft_type;\n+\t(void)domain;\n+\t(void)flags;\n+\t(void)actions_sz;\n+\t(void)actions;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dv_create_flow_action_packet_reformat\n+\t\t(struct ibv_context *ctx,\n+\t\t enum mlx5dv_flow_action_packet_reformat_type reformat_type,\n+\t\t enum mlx5dv_flow_table_type ft_type,\n+\t\t struct mlx5dv_dr_domain *domain,\n+\t\t uint32_t flags, size_t data_sz, void *data)\n+{\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+#ifdef HAVE_MLX5DV_DR\n+\t(void)ctx;\n+\t(void)ft_type;\n+\treturn mlx5dv_dr_action_create_packet_reformat(domain, flags,\n+\t\t\t\t\t\t       reformat_type, data_sz,\n+\t\t\t\t\t\t       data);\n+#else\n+\t(void)domain;\n+\t(void)flags;\n+\tstruct mlx5dv_flow_action_attr *action;\n+\n+\taction = malloc(sizeof(*action));\n+\tif (!action)\n+\t\treturn NULL;\n+\taction->type = MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;\n+\taction->action = mlx5dv_create_flow_action_packet_reformat\n+\t\t(ctx, data_sz, data, reformat_type, ft_type);\n+\treturn action;\n+#endif\n+#else\n+\t(void)ctx;\n+\t(void)reformat_type;\n+\t(void)ft_type;\n+\t(void)domain;\n+\t(void)flags;\n+\t(void)data_sz;\n+\t(void)data;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dv_create_flow_action_tag(uint32_t tag)\n+{\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+#ifdef HAVE_MLX5DV_DR\n+\treturn mlx5dv_dr_action_create_tag(tag);\n+#else /* HAVE_MLX5DV_DR */\n+\tstruct mlx5dv_flow_action_attr *action;\n+\n+\taction = malloc(sizeof(*action));\n+\tif (!action)\n+\t\treturn NULL;\n+\taction->type = MLX5DV_FLOW_ACTION_TAG;\n+\taction->tag_value = tag;\n+\treturn action;\n+#endif /* HAVE_MLX5DV_DR */\n+#else /* HAVE_IBV_FLOW_DV_SUPPORT */\n+\t(void)tag;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif /* HAVE_IBV_FLOW_DV_SUPPORT */\n+}\n+\n+static void *\n+mlx5_glue_dv_create_flow_action_meter(struct mlx5dv_dr_flow_meter_attr *attr)\n+{\n+#if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER)\n+\treturn mlx5dv_dr_action_create_flow_meter(attr);\n+#else\n+\t(void)attr;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_dv_modify_flow_action_meter(void *action,\n+\t\t\t\t      struct mlx5dv_dr_flow_meter_attr *attr,\n+\t\t\t\t      uint64_t modify_bits)\n+{\n+#if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER)\n+\treturn mlx5dv_dr_action_modify_flow_meter(action, attr, modify_bits);\n+#else\n+\t(void)action;\n+\t(void)attr;\n+\t(void)modify_bits;\n+\terrno = ENOTSUP;\n+\treturn errno;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dv_create_flow_action_aso(struct mlx5dv_dr_domain *domain,\n+\t\t\t\t    void *aso_obj,\n+\t\t\t\t    uint32_t offset,\n+\t\t\t\t    uint32_t flags,\n+\t\t\t\t    uint8_t return_reg_c)\n+{\n+#if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_ASO)\n+\treturn mlx5dv_dr_action_create_aso(domain, aso_obj, offset,\n+\t\t\t\t\t   flags, return_reg_c);\n+#else\n+\t(void)domain;\n+\t(void)aso_obj;\n+\t(void)offset;\n+\t(void)flags;\n+\t(void)return_reg_c;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dr_create_flow_action_default_miss(void)\n+{\n+#if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_DEFAULT_MISS)\n+\treturn mlx5dv_dr_action_create_default_miss();\n+#else\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_dv_destroy_flow(void *flow_id)\n+{\n+#ifdef HAVE_MLX5DV_DR\n+\treturn mlx5dv_dr_rule_destroy(flow_id);\n+#else\n+\treturn ibv_destroy_flow(flow_id);\n+#endif\n+}\n+\n+static int\n+mlx5_glue_dv_destroy_flow_matcher(void *matcher)\n+{\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+#ifdef HAVE_MLX5DV_DR\n+\treturn mlx5dv_dr_matcher_destroy(matcher);\n+#else\n+\treturn mlx5dv_destroy_flow_matcher(matcher);\n+#endif\n+#else\n+\t(void)matcher;\n+\terrno = ENOTSUP;\n+\treturn errno;\n+#endif\n+}\n+\n+static struct ibv_context *\n+mlx5_glue_dv_open_device(struct ibv_device *device)\n+{\n+#ifdef HAVE_IBV_DEVX_OBJ\n+\treturn mlx5dv_open_device(device,\n+\t\t\t\t  &(struct mlx5dv_context_attr){\n+\t\t\t\t\t.flags = MLX5DV_CONTEXT_FLAGS_DEVX,\n+\t\t\t\t  });\n+#else\n+\t(void)device;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static struct mlx5dv_devx_obj *\n+mlx5_glue_devx_obj_create(struct ibv_context *ctx,\n+\t\t\t  const void *in, size_t inlen,\n+\t\t\t  void *out, size_t outlen)\n+{\n+#ifdef HAVE_IBV_DEVX_OBJ\n+\treturn mlx5dv_devx_obj_create(ctx, in, inlen, out, outlen);\n+#else\n+\t(void)ctx;\n+\t(void)in;\n+\t(void)inlen;\n+\t(void)out;\n+\t(void)outlen;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_devx_obj_destroy(struct mlx5dv_devx_obj *obj)\n+{\n+#ifdef HAVE_IBV_DEVX_OBJ\n+\treturn mlx5dv_devx_obj_destroy(obj);\n+#else\n+\t(void)obj;\n+\treturn -ENOTSUP;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_devx_obj_query(struct mlx5dv_devx_obj *obj,\n+\t\t\t const void *in, size_t inlen,\n+\t\t\t void *out, size_t outlen)\n+{\n+#ifdef HAVE_IBV_DEVX_OBJ\n+\treturn mlx5dv_devx_obj_query(obj, in, inlen, out, outlen);\n+#else\n+\t(void)obj;\n+\t(void)in;\n+\t(void)inlen;\n+\t(void)out;\n+\t(void)outlen;\n+\treturn -ENOTSUP;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_devx_obj_modify(struct mlx5dv_devx_obj *obj,\n+\t\t\t  const void *in, size_t inlen,\n+\t\t\t  void *out, size_t outlen)\n+{\n+#ifdef HAVE_IBV_DEVX_OBJ\n+\treturn mlx5dv_devx_obj_modify(obj, in, inlen, out, outlen);\n+#else\n+\t(void)obj;\n+\t(void)in;\n+\t(void)inlen;\n+\t(void)out;\n+\t(void)outlen;\n+\treturn -ENOTSUP;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_devx_general_cmd(struct ibv_context *ctx,\n+\t\t\t   const void *in, size_t inlen,\n+\t\t\t   void *out, size_t outlen)\n+{\n+#ifdef HAVE_IBV_DEVX_OBJ\n+\treturn mlx5dv_devx_general_cmd(ctx, in, inlen, out, outlen);\n+#else\n+\t(void)ctx;\n+\t(void)in;\n+\t(void)inlen;\n+\t(void)out;\n+\t(void)outlen;\n+\treturn -ENOTSUP;\n+#endif\n+}\n+\n+static struct mlx5dv_devx_cmd_comp *\n+mlx5_glue_devx_create_cmd_comp(struct ibv_context *ctx)\n+{\n+#ifdef HAVE_IBV_DEVX_ASYNC\n+\treturn mlx5dv_devx_create_cmd_comp(ctx);\n+#else\n+\t(void)ctx;\n+\terrno = -ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void\n+mlx5_glue_devx_destroy_cmd_comp(struct mlx5dv_devx_cmd_comp *cmd_comp)\n+{\n+#ifdef HAVE_IBV_DEVX_ASYNC\n+\tmlx5dv_devx_destroy_cmd_comp(cmd_comp);\n+#else\n+\t(void)cmd_comp;\n+\terrno = -ENOTSUP;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_devx_obj_query_async(struct mlx5dv_devx_obj *obj, const void *in,\n+\t\t\t       size_t inlen, size_t outlen, uint64_t wr_id,\n+\t\t\t       struct mlx5dv_devx_cmd_comp *cmd_comp)\n+{\n+#ifdef HAVE_IBV_DEVX_ASYNC\n+\treturn mlx5dv_devx_obj_query_async(obj, in, inlen, outlen, wr_id,\n+\t\t\t\t\t   cmd_comp);\n+#else\n+\t(void)obj;\n+\t(void)in;\n+\t(void)inlen;\n+\t(void)outlen;\n+\t(void)wr_id;\n+\t(void)cmd_comp;\n+\treturn -ENOTSUP;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_devx_get_async_cmd_comp(struct mlx5dv_devx_cmd_comp *cmd_comp,\n+\t\t\t\t  struct mlx5dv_devx_async_cmd_hdr *cmd_resp,\n+\t\t\t\t  size_t cmd_resp_len)\n+{\n+#ifdef HAVE_IBV_DEVX_ASYNC\n+\treturn mlx5dv_devx_get_async_cmd_comp(cmd_comp, cmd_resp,\n+\t\t\t\t\t      cmd_resp_len);\n+#else\n+\t(void)cmd_comp;\n+\t(void)cmd_resp;\n+\t(void)cmd_resp_len;\n+\treturn -ENOTSUP;\n+#endif\n+}\n+\n+static struct mlx5dv_devx_umem *\n+mlx5_glue_devx_umem_reg(struct ibv_context *context, void *addr, size_t size,\n+\t\t\tuint32_t access)\n+{\n+#ifdef HAVE_IBV_DEVX_OBJ\n+\treturn mlx5dv_devx_umem_reg(context, addr, size, access);\n+#else\n+\t(void)context;\n+\t(void)addr;\n+\t(void)size;\n+\t(void)access;\n+\terrno = -ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_devx_umem_dereg(struct mlx5dv_devx_umem *dv_devx_umem)\n+{\n+#ifdef HAVE_IBV_DEVX_OBJ\n+\treturn mlx5dv_devx_umem_dereg(dv_devx_umem);\n+#else\n+\t(void)dv_devx_umem;\n+\treturn -ENOTSUP;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_devx_qp_query(struct ibv_qp *qp,\n+\t\t\tconst void *in, size_t inlen,\n+\t\t\tvoid *out, size_t outlen)\n+{\n+#ifdef HAVE_IBV_DEVX_QP\n+\treturn mlx5dv_devx_qp_query(qp, in, inlen, out, outlen);\n+#else\n+\t(void)qp;\n+\t(void)in;\n+\t(void)inlen;\n+\t(void)out;\n+\t(void)outlen;\n+\terrno = ENOTSUP;\n+\treturn errno;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_devx_wq_query(struct ibv_wq *wq, const void *in, size_t inlen,\n+\t\t\tvoid *out, size_t outlen)\n+{\n+#ifdef HAVE_IBV_DEVX_QP\n+\treturn mlx5dv_devx_wq_query(wq, in, inlen, out, outlen);\n+#else\n+\t(void)wq;\n+\t(void)in;\n+\t(void)inlen;\n+\t(void)out;\n+\t(void)outlen;\n+\terrno = ENOTSUP;\n+\treturn errno;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_devx_port_query(struct ibv_context *ctx,\n+\t\t\t  uint32_t port_num,\n+\t\t\t  struct mlx5_port_info *info)\n+{\n+\tint err = 0;\n+\n+\tinfo->query_flags = 0;\n+#ifdef HAVE_MLX5DV_DR_DEVX_PORT_V35\n+\t/* The DevX port query API is implemented (rdma-core v35 and above). */\n+\tstruct mlx5_ib_uapi_query_port devx_port;\n+\n+\tmemset(&devx_port, 0, sizeof(devx_port));\n+\terr = mlx5dv_query_port(ctx, port_num, &devx_port);\n+\tif (err)\n+\t\treturn err;\n+\tif (devx_port.flags & MLX5DV_QUERY_PORT_VPORT_REG_C0) {\n+\t\tinfo->vport_meta_tag = devx_port.reg_c0.value;\n+\t\tinfo->vport_meta_mask = devx_port.reg_c0.mask;\n+\t\tinfo->query_flags |= MLX5_PORT_QUERY_REG_C0;\n+\t}\n+\tif (devx_port.flags & MLX5DV_QUERY_PORT_VPORT) {\n+\t\tinfo->vport_id = devx_port.vport;\n+\t\tinfo->query_flags |= MLX5_PORT_QUERY_VPORT;\n+\t}\n+#else\n+#ifdef HAVE_MLX5DV_DR_DEVX_PORT\n+\t/* The legacy DevX port query API is implemented (prior v35). */\n+\tstruct mlx5dv_devx_port devx_port = {\n+\t\t.comp_mask = MLX5DV_DEVX_PORT_VPORT |\n+\t\t\t     MLX5DV_DEVX_PORT_MATCH_REG_C_0\n+\t};\n+\n+\terr = mlx5dv_query_devx_port(ctx, port_num, &devx_port);\n+\tif (err)\n+\t\treturn err;\n+\tif (devx_port.comp_mask & MLX5DV_DEVX_PORT_MATCH_REG_C_0) {\n+\t\tinfo->vport_meta_tag = devx_port.reg_c_0.value;\n+\t\tinfo->vport_meta_mask = devx_port.reg_c_0.mask;\n+\t\tinfo->query_flags |= MLX5_PORT_QUERY_REG_C0;\n+\t}\n+\tif (devx_port.comp_mask & MLX5DV_DEVX_PORT_VPORT) {\n+\t\tinfo->vport_id = devx_port.vport_num;\n+\t\tinfo->query_flags |= MLX5_PORT_QUERY_VPORT;\n+\t}\n+#else\n+\tRTE_SET_USED(ctx);\n+\tRTE_SET_USED(port_num);\n+#endif /* HAVE_MLX5DV_DR_DEVX_PORT */\n+#endif /* HAVE_MLX5DV_DR_DEVX_PORT_V35 */\n+\treturn err;\n+}\n+\n+static int\n+mlx5_glue_dr_dump_single_rule(FILE *file, void *rule)\n+{\n+#ifdef HAVE_MLX5_DR_FLOW_DUMP_RULE\n+\treturn mlx5dv_dump_dr_rule(file, rule);\n+#else\n+\tRTE_SET_USED(file);\n+\tRTE_SET_USED(rule);\n+\treturn -ENOTSUP;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_dr_dump_domain(FILE *file, void *domain)\n+{\n+#ifdef HAVE_MLX5_DR_FLOW_DUMP\n+\treturn mlx5dv_dump_dr_domain(file, domain);\n+#else\n+\tRTE_SET_USED(file);\n+\tRTE_SET_USED(domain);\n+\treturn -ENOTSUP;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dr_create_flow_action_sampler\n+\t\t\t(struct mlx5dv_dr_flow_sampler_attr *attr)\n+{\n+#ifdef HAVE_MLX5_DR_CREATE_ACTION_FLOW_SAMPLE\n+\treturn mlx5dv_dr_action_create_flow_sampler(attr);\n+#else\n+\t(void)attr;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void *\n+mlx5_glue_dr_action_create_dest_array\n+\t\t\t(void *domain,\n+\t\t\t size_t num_dest,\n+\t\t\t struct mlx5dv_dr_action_dest_attr *dests[])\n+{\n+#ifdef HAVE_MLX5_DR_CREATE_ACTION_DEST_ARRAY\n+\treturn mlx5dv_dr_action_create_dest_array\n+\t\t\t\t(domain,\n+\t\t\t\tnum_dest,\n+\t\t\t\tdests);\n+#else\n+\t(void)domain;\n+\t(void)num_dest;\n+\t(void)dests;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_devx_query_eqn(struct ibv_context *ctx, uint32_t cpus,\n+\t\t\t uint32_t *eqn)\n+{\n+#ifdef HAVE_IBV_DEVX_OBJ\n+\treturn mlx5dv_devx_query_eqn(ctx, cpus, eqn);\n+#else\n+\t(void)ctx;\n+\t(void)cpus;\n+\t(void)eqn;\n+\treturn -ENOTSUP;\n+#endif\n+}\n+\n+static struct mlx5dv_devx_event_channel *\n+mlx5_glue_devx_create_event_channel(struct ibv_context *ctx, int flags)\n+{\n+#ifdef HAVE_IBV_DEVX_EVENT\n+\treturn mlx5dv_devx_create_event_channel(ctx, flags);\n+#else\n+\t(void)ctx;\n+\t(void)flags;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void\n+mlx5_glue_devx_destroy_event_channel(struct mlx5dv_devx_event_channel *eventc)\n+{\n+#ifdef HAVE_IBV_DEVX_EVENT\n+\tmlx5dv_devx_destroy_event_channel(eventc);\n+#else\n+\t(void)eventc;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_devx_subscribe_devx_event(struct mlx5dv_devx_event_channel *eventc,\n+\t\t\t\t    struct mlx5dv_devx_obj *obj,\n+\t\t\t\t    uint16_t events_sz, uint16_t events_num[],\n+\t\t\t\t    uint64_t cookie)\n+{\n+#ifdef HAVE_IBV_DEVX_EVENT\n+\treturn mlx5dv_devx_subscribe_devx_event(eventc, obj, events_sz,\n+\t\t\t\t\t\tevents_num, cookie);\n+#else\n+\t(void)eventc;\n+\t(void)obj;\n+\t(void)events_sz;\n+\t(void)events_num;\n+\t(void)cookie;\n+\treturn -ENOTSUP;\n+#endif\n+}\n+\n+static int\n+mlx5_glue_devx_subscribe_devx_event_fd(struct mlx5dv_devx_event_channel *eventc,\n+\t\t\t\t       int fd, struct mlx5dv_devx_obj *obj,\n+\t\t\t\t       uint16_t event_num)\n+{\n+#ifdef HAVE_IBV_DEVX_EVENT\n+\treturn mlx5dv_devx_subscribe_devx_event_fd(eventc, fd, obj, event_num);\n+#else\n+\t(void)eventc;\n+\t(void)fd;\n+\t(void)obj;\n+\t(void)event_num;\n+\treturn -ENOTSUP;\n+#endif\n+}\n+\n+static ssize_t\n+mlx5_glue_devx_get_event(struct mlx5dv_devx_event_channel *eventc,\n+\t\t\t struct mlx5dv_devx_async_event_hdr *event_data,\n+\t\t\t size_t event_resp_len)\n+{\n+#ifdef HAVE_IBV_DEVX_EVENT\n+\treturn mlx5dv_devx_get_event(eventc, event_data, event_resp_len);\n+#else\n+\t(void)eventc;\n+\t(void)event_data;\n+\t(void)event_resp_len;\n+\terrno = ENOTSUP;\n+\treturn -1;\n+#endif\n+}\n+\n+static struct mlx5dv_devx_uar *\n+mlx5_glue_devx_alloc_uar(struct ibv_context *context, uint32_t flags)\n+{\n+#ifdef HAVE_IBV_DEVX_OBJ\n+\treturn mlx5dv_devx_alloc_uar(context, flags);\n+#else\n+\t(void)context;\n+\t(void)flags;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void\n+mlx5_glue_devx_free_uar(struct mlx5dv_devx_uar *devx_uar)\n+{\n+#ifdef HAVE_IBV_DEVX_OBJ\n+\tmlx5dv_devx_free_uar(devx_uar);\n+#else\n+\t(void)devx_uar;\n+#endif\n+}\n+\n+static struct mlx5dv_var *\n+mlx5_glue_dv_alloc_var(struct ibv_context *context, uint32_t flags)\n+{\n+#ifdef HAVE_IBV_VAR\n+\treturn mlx5dv_alloc_var(context, flags);\n+#else\n+\t(void)context;\n+\t(void)flags;\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void\n+mlx5_glue_dv_free_var(struct mlx5dv_var *var)\n+{\n+#ifdef HAVE_IBV_VAR\n+\tmlx5dv_free_var(var);\n+#else\n+\t(void)var;\n+\terrno = ENOTSUP;\n+#endif\n+}\n+\n+static void\n+mlx5_glue_dr_reclaim_domain_memory(void *domain, uint32_t enable)\n+{\n+#ifdef HAVE_MLX5DV_DR_MEM_RECLAIM\n+\tmlx5dv_dr_domain_set_reclaim_device_memory(domain, enable);\n+#else\n+\t(void)(enable);\n+\t(void)(domain);\n+#endif\n+}\n+\n+static struct mlx5dv_pp *\n+mlx5_glue_dv_alloc_pp(struct ibv_context *context,\n+\t\t      size_t pp_context_sz,\n+\t\t      const void *pp_context,\n+\t\t      uint32_t flags)\n+{\n+#ifdef HAVE_MLX5DV_PP_ALLOC\n+\treturn mlx5dv_pp_alloc(context, pp_context_sz, pp_context, flags);\n+#else\n+\tRTE_SET_USED(context);\n+\tRTE_SET_USED(pp_context_sz);\n+\tRTE_SET_USED(pp_context);\n+\tRTE_SET_USED(flags);\n+\terrno = ENOTSUP;\n+\treturn NULL;\n+#endif\n+}\n+\n+static void\n+mlx5_glue_dr_allow_duplicate_rules(void *domain, uint32_t allow)\n+{\n+#ifdef HAVE_MLX5_DR_ALLOW_DUPLICATE\n+\tmlx5dv_dr_domain_allow_duplicate_rules(domain, allow);\n+#else\n+\t(void)(allow);\n+\t(void)(domain);\n+#endif\n+}\n+\n+static void\n+mlx5_glue_dv_free_pp(struct mlx5dv_pp *pp)\n+{\n+#ifdef HAVE_MLX5DV_PP_ALLOC\n+\tmlx5dv_pp_free(pp);\n+#else\n+\tRTE_SET_USED(pp);\n+#endif\n+}\n+\n+__rte_cache_aligned\n+const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue) {\n+\t.version = MLX5_GLUE_VERSION,\n+\t.fork_init = mlx5_glue_fork_init,\n+\t.alloc_pd = mlx5_glue_alloc_pd,\n+\t.dealloc_pd = mlx5_glue_dealloc_pd,\n+\t.get_device_list = mlx5_glue_get_device_list,\n+\t.free_device_list = mlx5_glue_free_device_list,\n+\t.open_device = mlx5_glue_open_device,\n+\t.close_device = mlx5_glue_close_device,\n+\t.query_device = mlx5_glue_query_device,\n+\t.query_device_ex = mlx5_glue_query_device_ex,\n+\t.query_rt_values_ex = mlx5_glue_query_rt_values_ex,\n+\t.query_port = mlx5_glue_query_port,\n+\t.create_comp_channel = mlx5_glue_create_comp_channel,\n+\t.destroy_comp_channel = mlx5_glue_destroy_comp_channel,\n+\t.create_cq = mlx5_glue_create_cq,\n+\t.destroy_cq = mlx5_glue_destroy_cq,\n+\t.get_cq_event = mlx5_glue_get_cq_event,\n+\t.ack_cq_events = mlx5_glue_ack_cq_events,\n+\t.create_rwq_ind_table = mlx5_glue_create_rwq_ind_table,\n+\t.destroy_rwq_ind_table = mlx5_glue_destroy_rwq_ind_table,\n+\t.create_wq = mlx5_glue_create_wq,\n+\t.destroy_wq = mlx5_glue_destroy_wq,\n+\t.modify_wq = mlx5_glue_modify_wq,\n+\t.create_flow = mlx5_glue_create_flow,\n+\t.destroy_flow = mlx5_glue_destroy_flow,\n+\t.destroy_flow_action = mlx5_glue_destroy_flow_action,\n+\t.create_qp = mlx5_glue_create_qp,\n+\t.create_qp_ex = mlx5_glue_create_qp_ex,\n+\t.destroy_qp = mlx5_glue_destroy_qp,\n+\t.modify_qp = mlx5_glue_modify_qp,\n+\t.reg_mr = mlx5_glue_reg_mr,\n+\t.alloc_null_mr = mlx5_glue_alloc_null_mr,\n+\t.dereg_mr = mlx5_glue_dereg_mr,\n+\t.create_counter_set = mlx5_glue_create_counter_set,\n+\t.destroy_counter_set = mlx5_glue_destroy_counter_set,\n+\t.describe_counter_set = mlx5_glue_describe_counter_set,\n+\t.query_counter_set = mlx5_glue_query_counter_set,\n+\t.create_counters = mlx5_glue_create_counters,\n+\t.destroy_counters = mlx5_glue_destroy_counters,\n+\t.attach_counters = mlx5_glue_attach_counters,\n+\t.query_counters = mlx5_glue_query_counters,\n+\t.ack_async_event = mlx5_glue_ack_async_event,\n+\t.get_async_event = mlx5_glue_get_async_event,\n+\t.port_state_str = mlx5_glue_port_state_str,\n+\t.cq_ex_to_cq = mlx5_glue_cq_ex_to_cq,\n+\t.dr_create_flow_action_dest_flow_tbl =\n+\t\tmlx5_glue_dr_create_flow_action_dest_flow_tbl,\n+\t.dr_create_flow_action_dest_port =\n+\t\tmlx5_glue_dr_create_flow_action_dest_port,\n+\t.dr_create_flow_action_drop =\n+\t\tmlx5_glue_dr_create_flow_action_drop,\n+\t.dr_create_flow_action_push_vlan =\n+\t\tmlx5_glue_dr_create_flow_action_push_vlan,\n+\t.dr_create_flow_action_pop_vlan =\n+\t\tmlx5_glue_dr_create_flow_action_pop_vlan,\n+\t.dr_create_flow_tbl = mlx5_glue_dr_create_flow_tbl,\n+\t.dr_destroy_flow_tbl = mlx5_glue_dr_destroy_flow_tbl,\n+\t.dr_create_domain = mlx5_glue_dr_create_domain,\n+\t.dr_destroy_domain = mlx5_glue_dr_destroy_domain,\n+\t.dr_sync_domain = mlx5_glue_dr_sync_domain,\n+\t.dv_create_cq = mlx5_glue_dv_create_cq,\n+\t.dv_create_wq = mlx5_glue_dv_create_wq,\n+\t.dv_query_device = mlx5_glue_dv_query_device,\n+\t.dv_set_context_attr = mlx5_glue_dv_set_context_attr,\n+\t.dv_init_obj = mlx5_glue_dv_init_obj,\n+\t.dv_create_qp = mlx5_glue_dv_create_qp,\n+\t.dv_create_flow_matcher = mlx5_glue_dv_create_flow_matcher,\n+\t.dv_create_flow = mlx5_glue_dv_create_flow,\n+\t.dv_create_flow_action_counter =\n+\t\tmlx5_glue_dv_create_flow_action_counter,\n+\t.dv_create_flow_action_dest_ibv_qp =\n+\t\tmlx5_glue_dv_create_flow_action_dest_ibv_qp,\n+\t.dv_create_flow_action_dest_devx_tir =\n+\t\tmlx5_glue_dv_create_flow_action_dest_devx_tir,\n+\t.dv_create_flow_action_modify_header =\n+\t\tmlx5_glue_dv_create_flow_action_modify_header,\n+\t.dv_create_flow_action_packet_reformat =\n+\t\tmlx5_glue_dv_create_flow_action_packet_reformat,\n+\t.dv_create_flow_action_tag =  mlx5_glue_dv_create_flow_action_tag,\n+\t.dv_create_flow_action_meter = mlx5_glue_dv_create_flow_action_meter,\n+\t.dv_modify_flow_action_meter = mlx5_glue_dv_modify_flow_action_meter,\n+\t.dv_create_flow_action_aso = mlx5_glue_dv_create_flow_action_aso,\n+\t.dr_create_flow_action_default_miss =\n+\t\tmlx5_glue_dr_create_flow_action_default_miss,\n+\t.dv_destroy_flow = mlx5_glue_dv_destroy_flow,\n+\t.dv_destroy_flow_matcher = mlx5_glue_dv_destroy_flow_matcher,\n+\t.dv_open_device = mlx5_glue_dv_open_device,\n+\t.devx_obj_create = mlx5_glue_devx_obj_create,\n+\t.devx_obj_destroy = mlx5_glue_devx_obj_destroy,\n+\t.devx_obj_query = mlx5_glue_devx_obj_query,\n+\t.devx_obj_modify = mlx5_glue_devx_obj_modify,\n+\t.devx_general_cmd = mlx5_glue_devx_general_cmd,\n+\t.devx_create_cmd_comp = mlx5_glue_devx_create_cmd_comp,\n+\t.devx_destroy_cmd_comp = mlx5_glue_devx_destroy_cmd_comp,\n+\t.devx_obj_query_async = mlx5_glue_devx_obj_query_async,\n+\t.devx_get_async_cmd_comp = mlx5_glue_devx_get_async_cmd_comp,\n+\t.devx_umem_reg = mlx5_glue_devx_umem_reg,\n+\t.devx_umem_dereg = mlx5_glue_devx_umem_dereg,\n+\t.devx_qp_query = mlx5_glue_devx_qp_query,\n+\t.devx_wq_query = mlx5_glue_devx_wq_query,\n+\t.devx_port_query = mlx5_glue_devx_port_query,\n+\t.dr_dump_domain = mlx5_glue_dr_dump_domain,\n+\t.dr_dump_rule = mlx5_glue_dr_dump_single_rule,\n+\t.dr_reclaim_domain_memory = mlx5_glue_dr_reclaim_domain_memory,\n+\t.dr_create_flow_action_sampler =\n+\t\tmlx5_glue_dr_create_flow_action_sampler,\n+\t.dr_create_flow_action_dest_array =\n+\t\tmlx5_glue_dr_action_create_dest_array,\n+\t.dr_allow_duplicate_rules = mlx5_glue_dr_allow_duplicate_rules,\n+\t.devx_query_eqn = mlx5_glue_devx_query_eqn,\n+\t.devx_create_event_channel = mlx5_glue_devx_create_event_channel,\n+\t.devx_destroy_event_channel = mlx5_glue_devx_destroy_event_channel,\n+\t.devx_subscribe_devx_event = mlx5_glue_devx_subscribe_devx_event,\n+\t.devx_subscribe_devx_event_fd = mlx5_glue_devx_subscribe_devx_event_fd,\n+\t.devx_get_event = mlx5_glue_devx_get_event,\n+\t.devx_alloc_uar = mlx5_glue_devx_alloc_uar,\n+\t.devx_free_uar = mlx5_glue_devx_free_uar,\n+\t.dv_alloc_var = mlx5_glue_dv_alloc_var,\n+\t.dv_free_var = mlx5_glue_dv_free_var,\n+\t.dv_alloc_pp = mlx5_glue_dv_alloc_pp,\n+\t.dv_free_pp = mlx5_glue_dv_free_pp,\n+};\ndiff --git a/drivers/common/mlx5/freebsd/mlx5_glue.h b/drivers/common/mlx5/freebsd/mlx5_glue.h\nnew file mode 100644\nindex 0000000000..9f12a5d71e\n--- /dev/null\n+++ b/drivers/common/mlx5/freebsd/mlx5_glue.h\n@@ -0,0 +1,374 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright 2018 6WIND S.A.\n+ * Copyright 2018 Mellanox Technologies, Ltd\n+ */\n+\n+#ifndef MLX5_GLUE_H_\n+#define MLX5_GLUE_H_\n+\n+#include <stdio.h>\n+#include <stddef.h>\n+#include <stdint.h>\n+/* Verbs headers do not support -pedantic. */\n+#ifdef PEDANTIC\n+#pragma GCC diagnostic ignored \"-Wpedantic\"\n+#endif\n+#include <infiniband/mlx5dv.h>\n+#include <infiniband/verbs.h>\n+#ifdef PEDANTIC\n+#pragma GCC diagnostic error \"-Wpedantic\"\n+#endif\n+\n+#include <rte_byteorder.h>\n+\n+#include \"mlx5_autoconf.h\"\n+\n+#ifndef MLX5_GLUE_VERSION\n+#define MLX5_GLUE_VERSION \"\"\n+#endif\n+\n+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42\n+struct ibv_counter_set;\n+struct ibv_counter_set_data;\n+struct ibv_counter_set_description;\n+struct ibv_counter_set_init_attr;\n+struct ibv_query_counter_set_attr;\n+#endif\n+\n+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45\n+struct ibv_counters;\n+struct ibv_counters_init_attr;\n+struct ibv_counter_attach_attr;\n+#endif\n+\n+#ifndef HAVE_IBV_DEVICE_TUNNEL_SUPPORT\n+struct mlx5dv_qp_init_attr;\n+#endif\n+\n+#ifndef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT\n+struct mlx5dv_wq_init_attr;\n+#endif\n+\n+#ifndef HAVE_IBV_FLOW_DV_SUPPORT\n+struct mlx5dv_flow_matcher;\n+struct mlx5dv_flow_matcher_attr;\n+struct mlx5dv_flow_action_attr;\n+struct mlx5dv_flow_match_parameters;\n+struct mlx5dv_dr_flow_meter_attr;\n+struct ibv_flow_action;\n+enum mlx5dv_flow_action_packet_reformat_type { packet_reformat_type = 0, };\n+enum mlx5dv_flow_table_type { flow_table_type = 0, };\n+#endif\n+\n+#ifndef HAVE_IBV_FLOW_DEVX_COUNTERS\n+#define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0\n+#endif\n+\n+#ifndef HAVE_IBV_DEVX_OBJ\n+struct mlx5dv_devx_obj;\n+struct mlx5dv_devx_umem { uint32_t umem_id; };\n+struct mlx5dv_devx_uar { void *reg_addr; void *base_addr; uint32_t page_id; };\n+#endif\n+\n+#ifndef HAVE_IBV_DEVX_ASYNC\n+struct mlx5dv_devx_cmd_comp;\n+struct mlx5dv_devx_async_cmd_hdr;\n+#endif\n+\n+#ifndef HAVE_MLX5DV_DR\n+enum  mlx5dv_dr_domain_type { unused, };\n+struct mlx5dv_dr_domain;\n+struct mlx5dv_dr_action;\n+#define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1\n+#endif\n+\n+#ifndef HAVE_MLX5DV_DR_DEVX_PORT\n+struct mlx5dv_devx_port;\n+#endif\n+\n+#ifndef HAVE_MLX5DV_DR_DEVX_PORT_V35\n+struct mlx5dv_port;\n+#endif\n+\n+#define MLX5_PORT_QUERY_VPORT (1u << 0)\n+#define MLX5_PORT_QUERY_REG_C0 (1u << 1)\n+\n+struct mlx5_port_info {\n+\tuint16_t query_flags;\n+\tuint16_t vport_id; /* Associated VF vport index (if any). */\n+\tuint32_t vport_meta_tag; /* Used for vport index match ove VF LAG. */\n+\tuint32_t vport_meta_mask; /* Used for vport index field match mask. */\n+};\n+\n+#ifndef HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER\n+struct mlx5dv_dr_flow_meter_attr;\n+#endif\n+\n+#ifndef HAVE_MLX5_DR_CREATE_ACTION_FLOW_SAMPLE\n+struct mlx5dv_dr_flow_sampler_attr {\n+\tuint32_t sample_ratio;\n+\tvoid *default_next_table;\n+\tsize_t num_sample_actions;\n+\tstruct mlx5dv_dr_action **sample_actions;\n+\tuint64_t action;\n+};\n+#endif\n+\n+#ifndef HAVE_MLX5_DR_CREATE_ACTION_DEST_ARRAY\n+enum mlx5dv_dr_action_dest_type {\n+\tMLX5DV_DR_ACTION_DEST,\n+\tMLX5DV_DR_ACTION_DEST_REFORMAT,\n+};\n+struct mlx5dv_dr_action_dest_reformat {\n+\tstruct mlx5dv_dr_action *reformat;\n+\tstruct mlx5dv_dr_action *dest;\n+};\n+struct mlx5dv_dr_action_dest_attr {\n+\tenum mlx5dv_dr_action_dest_type type;\n+\tunion {\n+\t\tstruct mlx5dv_dr_action *dest;\n+\t\tstruct mlx5dv_dr_action_dest_reformat *dest_reformat;\n+\t};\n+};\n+#endif\n+\n+#ifndef HAVE_IBV_DEVX_EVENT\n+struct mlx5dv_devx_event_channel { int fd; };\n+struct mlx5dv_devx_async_event_hdr;\n+#define MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA 1\n+#endif\n+\n+#ifndef HAVE_IBV_VAR\n+struct mlx5dv_var { uint32_t page_id; uint32_t length; off_t mmap_off;\n+\t\t\tuint64_t comp_mask; };\n+#endif\n+\n+#ifndef HAVE_IBV_RELAXED_ORDERING\n+#define IBV_ACCESS_RELAXED_ORDERING 0\n+#endif\n+\n+struct mlx5_glue {\n+\tconst char *version;\n+\tint (*fork_init)(void);\n+\tstruct ibv_pd *(*alloc_pd)(struct ibv_context *context);\n+\tint (*dealloc_pd)(struct ibv_pd *pd);\n+\tstruct ibv_device **(*get_device_list)(int *num_devices);\n+\tvoid (*free_device_list)(struct ibv_device **list);\n+\tstruct ibv_context *(*open_device)(struct ibv_device *device);\n+\tint (*close_device)(struct ibv_context *context);\n+\tint (*query_device)(struct ibv_context *context,\n+\t\t\t    struct ibv_device_attr *device_attr);\n+\tint (*query_device_ex)(struct ibv_context *context,\n+\t\t\t       const struct ibv_query_device_ex_input *input,\n+\t\t\t       struct ibv_device_attr_ex *attr);\n+\tint (*query_rt_values_ex)(struct ibv_context *context,\n+\t\t\t       struct ibv_values_ex *values);\n+\tint (*query_port)(struct ibv_context *context, uint8_t port_num,\n+\t\t\t  struct ibv_port_attr *port_attr);\n+\tstruct ibv_comp_channel *(*create_comp_channel)\n+\t\t(struct ibv_context *context);\n+\tint (*destroy_comp_channel)(struct ibv_comp_channel *channel);\n+\tstruct ibv_cq *(*create_cq)(struct ibv_context *context, int cqe,\n+\t\t\t\t    void *cq_context,\n+\t\t\t\t    struct ibv_comp_channel *channel,\n+\t\t\t\t    int comp_vector);\n+\tint (*destroy_cq)(struct ibv_cq *cq);\n+\tint (*get_cq_event)(struct ibv_comp_channel *channel,\n+\t\t\t    struct ibv_cq **cq, void **cq_context);\n+\tvoid (*ack_cq_events)(struct ibv_cq *cq, unsigned int nevents);\n+\tstruct ibv_rwq_ind_table *(*create_rwq_ind_table)\n+\t\t(struct ibv_context *context,\n+\t\t struct ibv_rwq_ind_table_init_attr *init_attr);\n+\tint (*destroy_rwq_ind_table)(struct ibv_rwq_ind_table *rwq_ind_table);\n+\tstruct ibv_wq *(*create_wq)(struct ibv_context *context,\n+\t\t\t\t    struct ibv_wq_init_attr *wq_init_attr);\n+\tint (*destroy_wq)(struct ibv_wq *wq);\n+\tint (*modify_wq)(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr);\n+\tstruct ibv_flow *(*create_flow)(struct ibv_qp *qp,\n+\t\t\t\t\tstruct ibv_flow_attr *flow);\n+\tint (*destroy_flow)(struct ibv_flow *flow_id);\n+\tint (*destroy_flow_action)(void *action);\n+\tstruct ibv_qp *(*create_qp)(struct ibv_pd *pd,\n+\t\t\t\t    struct ibv_qp_init_attr *qp_init_attr);\n+\tstruct ibv_qp *(*create_qp_ex)\n+\t\t(struct ibv_context *context,\n+\t\t struct ibv_qp_init_attr_ex *qp_init_attr_ex);\n+\tint (*destroy_qp)(struct ibv_qp *qp);\n+\tint (*modify_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,\n+\t\t\t int attr_mask);\n+\tstruct ibv_mr *(*reg_mr)(struct ibv_pd *pd, void *addr,\n+\t\t\t\t size_t length, int access);\n+\tstruct ibv_mr *(*alloc_null_mr)(struct ibv_pd *pd);\n+\tint (*dereg_mr)(struct ibv_mr *mr);\n+\tstruct ibv_counter_set *(*create_counter_set)\n+\t\t(struct ibv_context *context,\n+\t\t struct ibv_counter_set_init_attr *init_attr);\n+\tint (*destroy_counter_set)(struct ibv_counter_set *cs);\n+\tint (*describe_counter_set)\n+\t\t(struct ibv_context *context,\n+\t\t uint16_t counter_set_id,\n+\t\t struct ibv_counter_set_description *cs_desc);\n+\tint (*query_counter_set)(struct ibv_query_counter_set_attr *query_attr,\n+\t\t\t\t struct ibv_counter_set_data *cs_data);\n+\tstruct ibv_counters *(*create_counters)\n+\t\t(struct ibv_context *context,\n+\t\t struct ibv_counters_init_attr *init_attr);\n+\tint (*destroy_counters)(struct ibv_counters *counters);\n+\tint (*attach_counters)(struct ibv_counters *counters,\n+\t\t\t       struct ibv_counter_attach_attr *attr,\n+\t\t\t       struct ibv_flow *flow);\n+\tint (*query_counters)(struct ibv_counters *counters,\n+\t\t\t      uint64_t *counters_value,\n+\t\t\t      uint32_t ncounters,\n+\t\t\t      uint32_t flags);\n+\tvoid (*ack_async_event)(struct ibv_async_event *event);\n+\tint (*get_async_event)(struct ibv_context *context,\n+\t\t\t       struct ibv_async_event *event);\n+\tconst char *(*port_state_str)(enum ibv_port_state port_state);\n+\tstruct ibv_cq *(*cq_ex_to_cq)(struct ibv_cq_ex *cq);\n+\tvoid *(*dr_create_flow_action_dest_flow_tbl)(void *tbl);\n+\tvoid *(*dr_create_flow_action_dest_port)(void *domain,\n+\t\t\t\t\t\t uint32_t port);\n+\tvoid *(*dr_create_flow_action_drop)(void);\n+\tvoid *(*dr_create_flow_action_push_vlan)\n+\t\t\t\t\t(struct mlx5dv_dr_domain *domain,\n+\t\t\t\t\t rte_be32_t vlan_tag);\n+\tvoid *(*dr_create_flow_action_pop_vlan)(void);\n+\tvoid *(*dr_create_flow_tbl)(void *domain, uint32_t level);\n+\tint (*dr_destroy_flow_tbl)(void *tbl);\n+\tvoid *(*dr_create_domain)(struct ibv_context *ctx,\n+\t\t\t\t  enum mlx5dv_dr_domain_type domain);\n+\tint (*dr_destroy_domain)(void *domain);\n+\tint (*dr_sync_domain)(void *domain, uint32_t flags);\n+\tstruct ibv_cq_ex *(*dv_create_cq)\n+\t\t(struct ibv_context *context,\n+\t\t struct ibv_cq_init_attr_ex *cq_attr,\n+\t\t struct mlx5dv_cq_init_attr *mlx5_cq_attr);\n+\tstruct ibv_wq *(*dv_create_wq)\n+\t\t(struct ibv_context *context,\n+\t\t struct ibv_wq_init_attr *wq_attr,\n+\t\t struct mlx5dv_wq_init_attr *mlx5_wq_attr);\n+\tint (*dv_query_device)(struct ibv_context *ctx_in,\n+\t\t\t       struct mlx5dv_context *attrs_out);\n+\tint (*dv_set_context_attr)(struct ibv_context *ibv_ctx,\n+\t\t\t\t   enum mlx5dv_set_ctx_attr_type type,\n+\t\t\t\t   void *attr);\n+\tint (*dv_init_obj)(struct mlx5dv_obj *obj, uint64_t obj_type);\n+\tstruct ibv_qp *(*dv_create_qp)\n+\t\t(struct ibv_context *context,\n+\t\t struct ibv_qp_init_attr_ex *qp_init_attr_ex,\n+\t\t struct mlx5dv_qp_init_attr *dv_qp_init_attr);\n+\tvoid *(*dv_create_flow_matcher)\n+\t\t(struct ibv_context *context,\n+\t\t struct mlx5dv_flow_matcher_attr *matcher_attr,\n+\t\t void *tbl);\n+\tvoid *(*dv_create_flow)(void *matcher, void *match_value,\n+\t\t\t  size_t num_actions, void *actions[]);\n+\tvoid *(*dv_create_flow_action_counter)(void *obj, uint32_t  offset);\n+\tvoid *(*dv_create_flow_action_dest_ibv_qp)(void *qp);\n+\tvoid *(*dv_create_flow_action_dest_devx_tir)(void *tir);\n+\tvoid *(*dv_create_flow_action_modify_header)\n+\t\t(struct ibv_context *ctx, enum mlx5dv_flow_table_type ft_type,\n+\t\t void *domain, uint64_t flags, size_t actions_sz,\n+\t\t uint64_t actions[]);\n+\tvoid *(*dv_create_flow_action_packet_reformat)\n+\t\t(struct ibv_context *ctx,\n+\t\t enum mlx5dv_flow_action_packet_reformat_type reformat_type,\n+\t\t enum mlx5dv_flow_table_type ft_type,\n+\t\t struct mlx5dv_dr_domain *domain,\n+\t\t uint32_t flags, size_t data_sz, void *data);\n+\tvoid *(*dv_create_flow_action_tag)(uint32_t tag);\n+\tvoid *(*dv_create_flow_action_meter)\n+\t\t(struct mlx5dv_dr_flow_meter_attr *attr);\n+\tint (*dv_modify_flow_action_meter)(void *action,\n+\t\tstruct mlx5dv_dr_flow_meter_attr *attr, uint64_t modify_bits);\n+\tvoid *(*dr_create_flow_action_default_miss)(void);\n+\tint (*dv_destroy_flow)(void *flow);\n+\tint (*dv_destroy_flow_matcher)(void *matcher);\n+\tstruct ibv_context *(*dv_open_device)(struct ibv_device *device);\n+\tstruct mlx5dv_var *(*dv_alloc_var)(struct ibv_context *context,\n+\t\t\t\t\t   uint32_t flags);\n+\tvoid (*dv_free_var)(struct mlx5dv_var *var);\n+\tstruct mlx5dv_devx_uar *(*devx_alloc_uar)(struct ibv_context *context,\n+\t\t\t\t\t\t  uint32_t flags);\n+\tvoid (*devx_free_uar)(struct mlx5dv_devx_uar *devx_uar);\n+\tstruct mlx5dv_devx_obj *(*devx_obj_create)\n+\t\t\t\t\t(struct ibv_context *ctx,\n+\t\t\t\t\t const void *in, size_t inlen,\n+\t\t\t\t\t void *out, size_t outlen);\n+\tint (*devx_obj_destroy)(struct mlx5dv_devx_obj *obj);\n+\tint (*devx_obj_query)(struct mlx5dv_devx_obj *obj,\n+\t\t\t      const void *in, size_t inlen,\n+\t\t\t      void *out, size_t outlen);\n+\tint (*devx_obj_modify)(struct mlx5dv_devx_obj *obj,\n+\t\t\t       const void *in, size_t inlen,\n+\t\t\t       void *out, size_t outlen);\n+\tint (*devx_general_cmd)(struct ibv_context *context,\n+\t\t\t\tconst void *in, size_t inlen,\n+\t\t\t\tvoid *out, size_t outlen);\n+\tstruct mlx5dv_devx_cmd_comp *(*devx_create_cmd_comp)\n+\t\t\t\t\t(struct ibv_context *context);\n+\tvoid (*devx_destroy_cmd_comp)(struct mlx5dv_devx_cmd_comp *cmd_comp);\n+\tint (*devx_obj_query_async)(struct mlx5dv_devx_obj *obj,\n+\t\t\t\t    const void *in, size_t inlen,\n+\t\t\t\t    size_t outlen, uint64_t wr_id,\n+\t\t\t\t    struct mlx5dv_devx_cmd_comp *cmd_comp);\n+\tint (*devx_get_async_cmd_comp)(struct mlx5dv_devx_cmd_comp *cmd_comp,\n+\t\t\t\t       struct mlx5dv_devx_async_cmd_hdr *resp,\n+\t\t\t\t       size_t cmd_resp_len);\n+\tstruct mlx5dv_devx_umem *(*devx_umem_reg)(struct ibv_context *context,\n+\t\t\t\t\t\t  void *addr, size_t size,\n+\t\t\t\t\t\t  uint32_t access);\n+\tint (*devx_umem_dereg)(struct mlx5dv_devx_umem *dv_devx_umem);\n+\tint (*devx_qp_query)(struct ibv_qp *qp,\n+\t\t\t     const void *in, size_t inlen,\n+\t\t\t     void *out, size_t outlen);\n+\tint (*devx_wq_query)(struct ibv_wq *wq, const void *in, size_t inlen,\n+\t\t\t     void *out, size_t outlen);\n+\tint (*devx_port_query)(struct ibv_context *ctx,\n+\t\t\t       uint32_t port_num,\n+\t\t\t       struct mlx5_port_info *info);\n+\tint (*dr_dump_domain)(FILE *file, void *domain);\n+\tint (*dr_dump_rule)(FILE *file, void *rule);\n+\tint (*devx_query_eqn)(struct ibv_context *context, uint32_t cpus,\n+\t\t\t      uint32_t *eqn);\n+\tstruct mlx5dv_devx_event_channel *(*devx_create_event_channel)\n+\t\t\t\t(struct ibv_context *context, int flags);\n+\tvoid (*devx_destroy_event_channel)\n+\t\t\t(struct mlx5dv_devx_event_channel *event_channel);\n+\tint (*devx_subscribe_devx_event)\n+\t\t\t(struct mlx5dv_devx_event_channel *event_channel,\n+\t\t\t struct mlx5dv_devx_obj *obj,\n+\t\t\t uint16_t events_sz,\n+\t\t\t uint16_t events_num[],\n+\t\t\t uint64_t cookie);\n+\tint (*devx_subscribe_devx_event_fd)\n+\t\t\t(struct mlx5dv_devx_event_channel *event_channel,\n+\t\t\t int fd,\n+\t\t\t struct mlx5dv_devx_obj *obj,\n+\t\t\t uint16_t event_num);\n+\tssize_t (*devx_get_event)\n+\t\t\t(struct mlx5dv_devx_event_channel *event_channel,\n+\t\t\t struct mlx5dv_devx_async_event_hdr *event_data,\n+\t\t\t size_t event_resp_len);\n+\tvoid (*dr_reclaim_domain_memory)(void *domain, uint32_t enable);\n+\tvoid (*dr_allow_duplicate_rules)(void *domain, uint32_t allow);\n+\tstruct mlx5dv_pp *(*dv_alloc_pp)(struct ibv_context *context,\n+\t\t\t\t\t size_t pp_context_sz,\n+\t\t\t\t\t const void *pp_context,\n+\t\t\t\t\t uint32_t flags);\n+\tvoid (*dv_free_pp)(struct mlx5dv_pp *pp);\n+\tvoid *(*dr_create_flow_action_sampler)\n+\t\t\t(struct mlx5dv_dr_flow_sampler_attr *attr);\n+\tvoid *(*dr_create_flow_action_dest_array)\n+\t\t\t(void *domain,\n+\t\t\t size_t num_dest,\n+\t\t\t struct mlx5dv_dr_action_dest_attr *dests[]);\n+\tvoid *(*dv_create_flow_action_aso)\n+\t\t\t(struct mlx5dv_dr_domain *domain, void *aso_obj,\n+\t\t\t uint32_t offset, uint32_t flags, uint8_t return_reg_c);\n+};\n+\n+extern const struct mlx5_glue *mlx5_glue;\n+\n+#endif /* MLX5_GLUE_H_ */\n",
    "prefixes": [
        "v2",
        "01/41"
    ]
}