get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/56852/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 56852,
    "url": "https://patches.dpdk.org/api/patches/56852/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1563786795-14027-22-git-send-email-matan@mellanox.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1563786795-14027-22-git-send-email-matan@mellanox.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1563786795-14027-22-git-send-email-matan@mellanox.com",
    "date": "2019-07-22T09:13:08",
    "name": "[21/28] net/mlx5: create advanced RxQ using new API",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "f65b6e1626b0628f0ba865586caf36c43d8ce491",
    "submitter": {
        "id": 796,
        "url": "https://patches.dpdk.org/api/people/796/?format=api",
        "name": "Matan Azrad",
        "email": "matan@mellanox.com"
    },
    "delegate": {
        "id": 3268,
        "url": "https://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1563786795-14027-22-git-send-email-matan@mellanox.com/mbox/",
    "series": [
        {
            "id": 5639,
            "url": "https://patches.dpdk.org/api/series/5639/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=5639",
            "date": "2019-07-22T09:12:48",
            "name": "net/mlx5: support LRO",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/5639/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/56852/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/56852/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 18C1B1BE9B;\n\tMon, 22 Jul 2019 11:14:30 +0200 (CEST)",
            "from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129])\n\tby dpdk.org (Postfix) with ESMTP id 262401BDF1\n\tfor <dev@dpdk.org>; Mon, 22 Jul 2019 11:13:31 +0200 (CEST)",
            "from Internal Mail-Server by MTLPINE2 (envelope-from\n\tmatan@mellanox.com)\n\twith ESMTPS (AES256-SHA encrypted); 22 Jul 2019 12:13:24 +0300",
            "from pegasus07.mtr.labs.mlnx (pegasus07.mtr.labs.mlnx\n\t[10.210.16.112])\n\tby labmailer.mlnx (8.13.8/8.13.8) with ESMTP id x6M9DMjk010084;\n\tMon, 22 Jul 2019 12:13:24 +0300"
        ],
        "From": "Matan Azrad <matan@mellanox.com>",
        "To": "Shahaf Shuler <shahafs@mellanox.com>, Yongseok Koh <yskoh@mellanox.com>, \n\tViacheslav Ovsiienko <viacheslavo@mellanox.com>",
        "Cc": "dev@dpdk.org, Dekel Peled <dekelp@mellanox.com>",
        "Date": "Mon, 22 Jul 2019 09:13:08 +0000",
        "Message-Id": "<1563786795-14027-22-git-send-email-matan@mellanox.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1563786795-14027-1-git-send-email-matan@mellanox.com>",
        "References": "<1563786795-14027-1-git-send-email-matan@mellanox.com>",
        "Subject": "[dpdk-dev] [PATCH 21/28] net/mlx5: create advanced RxQ using new API",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Dekel Peled <dekelp@mellanox.com>\n\nFunction mlx5_rxq_obj_new(), previously called mlx5_rxq_ibv_new(),\nsupports creating Rx queue objects using verbs.\nThis patch expands the relevant functions, to support creating\nverbs or DevX Rx queue objects:\nFunction mlx5_rxq_obj_new() updated to create RQ object using DevX.\nFunction mlx5_ind_table_obj_new() updated to create RQT object using DevX.\nFunction mlx5_hrxq_new() updated to create TIR object using DevX.\nNew utility functions added to perform specific operations:\nmlx5_devx_rq_new(),  mlx5_devx_wq_attr_fill(),\nmlx5_devx_create_rq_attr_fill().\n\nSigned-off-by: Dekel Peled <dekelp@mellanox.com>\nAcked-by: Matan Azrad <matan@mellanox.com>\n---\n drivers/net/mlx5/mlx5_glue.h    |   2 +-\n drivers/net/mlx5/mlx5_rxq.c     | 550 +++++++++++++++++++++++++++++++---------\n drivers/net/mlx5/mlx5_rxtx.h    |   9 +-\n drivers/net/mlx5/mlx5_trigger.c |   3 +-\n drivers/net/mlx5/mlx5_vlan.c    |  30 ++-\n 5 files changed, 456 insertions(+), 138 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_glue.h b/drivers/net/mlx5/mlx5_glue.h\nindex f8e2b9a..6b5dadf 100644\n--- a/drivers/net/mlx5/mlx5_glue.h\n+++ b/drivers/net/mlx5/mlx5_glue.h\n@@ -61,7 +61,7 @@\n \n #ifndef HAVE_IBV_DEVX_OBJ\n struct mlx5dv_devx_obj;\n-struct mlx5dv_devx_umem;\n+struct mlx5dv_devx_umem { uint32_t umem_id; };\n #endif\n \n #ifndef HAVE_IBV_DEVX_ASYNC\ndiff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c\nindex 9d859df..9712db4 100644\n--- a/drivers/net/mlx5/mlx5_rxq.c\n+++ b/drivers/net/mlx5/mlx5_rxq.c\n@@ -561,6 +561,23 @@\n }\n \n /**\n+ * Release the resources allocated for an RQ DevX object.\n+ *\n+ * @param rxq_ctrl\n+ *   DevX Rx queue object.\n+ */\n+static void\n+rxq_release_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)\n+{\n+\tif (rxq_ctrl->rxq.wqes) {\n+\t\trte_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);\n+\t\trxq_ctrl->rxq.wqes = NULL;\n+\t}\n+\tif (rxq_ctrl->wq_umem)\n+\t\tmlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);\n+}\n+\n+/**\n  * Release an Rx verbs/DevX queue object.\n  *\n  * @param rxq_obj\n@@ -573,11 +590,17 @@\n mlx5_rxq_obj_release(struct mlx5_rxq_obj *rxq_obj)\n {\n \tassert(rxq_obj);\n-\tassert(rxq_obj->wq);\n+\tif (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_IBV)\n+\t\tassert(rxq_obj->wq);\n \tassert(rxq_obj->cq);\n \tif (rte_atomic32_dec_and_test(&rxq_obj->refcnt)) {\n \t\trxq_free_elts(rxq_obj->rxq_ctrl);\n-\t\tclaim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));\n+\t\tif (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {\n+\t\t\tclaim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));\n+\t\t} else if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {\n+\t\t\tclaim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));\n+\t\t\trxq_release_rq_resources(rxq_obj->rxq_ctrl);\n+\t\t}\n \t\tclaim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));\n \t\tif (rxq_obj->channel)\n \t\t\tclaim_zero(mlx5_glue->destroy_comp_channel\n@@ -1000,18 +1023,147 @@\n }\n \n /**\n+ * Fill common fields of create RQ attributes structure.\n+ *\n+ * @param rxq_data\n+ *   Pointer to Rx queue data.\n+ * @param cqn\n+ *   CQ number to use with this RQ.\n+ * @param rq_attr\n+ *   RQ attributes structure to fill..\n+ */\n+static void\n+mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,\n+\t\t\t      struct mlx5_devx_create_rq_attr *rq_attr)\n+{\n+\trq_attr->state = MLX5_RQC_STATE_RST;\n+\trq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;\n+\trq_attr->cqn = cqn;\n+\trq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;\n+}\n+\n+/**\n+ * Fill common fields of DevX WQ attributes structure.\n+ *\n+ * @param priv\n+ *   Pointer to device private data.\n+ * @param rxq_ctrl\n+ *   Pointer to Rx queue control structure.\n+ * @param wq_attr\n+ *   WQ attributes structure to fill..\n+ */\n+static void\n+mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,\n+\t\t       struct mlx5_devx_wq_attr *wq_attr)\n+{\n+\twq_attr->end_padding_mode = priv->config.cqe_pad ?\n+\t\t\t\t\tMLX5_WQ_END_PAD_MODE_ALIGN :\n+\t\t\t\t\tMLX5_WQ_END_PAD_MODE_NONE;\n+\twq_attr->pd = priv->sh->pdn;\n+\twq_attr->dbr_addr = rxq_ctrl->dbr_offset;\n+\twq_attr->dbr_umem_id = rxq_ctrl->dbr_umem_id;\n+\twq_attr->dbr_umem_valid = 1;\n+\twq_attr->wq_umem_id = rxq_ctrl->wq_umem->umem_id;\n+\twq_attr->wq_umem_valid = 1;\n+}\n+\n+/**\n+ * Create a RQ object using DevX.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ * @param idx\n+ *   Queue index in DPDK Rx queue array\n+ * @param cqn\n+ *   CQ number to use with this RQ.\n+ *\n+ * @return\n+ *   The DevX object initialised, NULL otherwise and rte_errno is set.\n+ */\n+static struct mlx5_devx_obj *\n+mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n+\t\tcontainer_of(rxq_data, struct mlx5_rxq_ctrl, rxq);\n+\tstruct mlx5_devx_create_rq_attr rq_attr;\n+\tuint32_t wqe_n = 1 << rxq_data->elts_n;\n+\tuint32_t wq_size = 0;\n+\tuint32_t wqe_size = 0;\n+\tuint32_t log_wqe_size = 0;\n+\tvoid *buf = NULL;\n+\tstruct mlx5_devx_obj *rq;\n+\n+\tmemset(&rq_attr, 0, sizeof(rq_attr));\n+\t/* Fill RQ attributes. */\n+\trq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;\n+\trq_attr.flush_in_error_en = 1;\n+\tmlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);\n+\t/* Fill WQ attributes for this RQ. */\n+\tif (mlx5_rxq_mprq_enabled(rxq_data)) {\n+\t\trq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;\n+\t\t/*\n+\t\t * Number of strides in each WQE:\n+\t\t * 512*2^single_wqe_log_num_of_strides.\n+\t\t */\n+\t\trq_attr.wq_attr.single_wqe_log_num_of_strides =\n+\t\t\t\trxq_data->strd_num_n -\n+\t\t\t\tMLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;\n+\t\t/* Stride size = (2^single_stride_log_num_of_bytes)*64B. */\n+\t\trq_attr.wq_attr.single_stride_log_num_of_bytes =\n+\t\t\t\trxq_data->strd_sz_n -\n+\t\t\t\tMLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;\n+\t\twqe_size = sizeof(struct mlx5_wqe_mprq);\n+\t} else {\n+\t\tint max_sge = 0;\n+\t\tint num_scatter = 0;\n+\n+\t\trq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;\n+\t\tmax_sge = 1 << rxq_data->sges_n;\n+\t\tnum_scatter = RTE_MAX(max_sge, 1);\n+\t\twqe_size = sizeof(struct mlx5_wqe_data_seg) * num_scatter;\n+\t}\n+\tlog_wqe_size = log2above(wqe_size);\n+\trq_attr.wq_attr.log_wq_stride = log_wqe_size;\n+\trq_attr.wq_attr.log_wq_sz = rxq_data->elts_n;\n+\t/* Calculate and allocate WQ memory space. */\n+\twqe_size = 1 << log_wqe_size; /* round up power of two.*/\n+\twq_size = wqe_n * wqe_size;\n+\tbuf = rte_calloc_socket(__func__, 1, wq_size, RTE_CACHE_LINE_SIZE,\n+\t\t\t\trxq_ctrl->socket);\n+\tif (!buf)\n+\t\treturn NULL;\n+\trxq_data->wqes = buf;\n+\trxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,\n+\t\t\t\t\t\t     buf, wq_size, 0);\n+\tif (!rxq_ctrl->wq_umem) {\n+\t\trte_free(buf);\n+\t\treturn NULL;\n+\t}\n+\tmlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);\n+\trq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);\n+\tif (!rq)\n+\t\trxq_release_rq_resources(rxq_ctrl);\n+\treturn rq;\n+}\n+\n+/**\n  * Create the Rx queue Verbs/DevX object.\n  *\n  * @param dev\n  *   Pointer to Ethernet device.\n  * @param idx\n  *   Queue index in DPDK Rx queue array\n+ * @param type\n+ *   Type of Rx queue object to create.\n  *\n  * @return\n  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.\n  */\n struct mlx5_rxq_obj *\n-mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx)\n+mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx,\n+\t\t enum mlx5_rxq_obj_type type)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];\n@@ -1039,6 +1191,7 @@ struct mlx5_rxq_obj *\n \t\trte_errno = ENOMEM;\n \t\tgoto error;\n \t}\n+\ttmpl->type = type;\n \ttmpl->rxq_ctrl = rxq_ctrl;\n \tif (rxq_ctrl->irq) {\n \t\ttmpl->channel = mlx5_glue->create_comp_channel(priv->sh->ctx);\n@@ -1060,35 +1213,9 @@ struct mlx5_rxq_obj *\n \t\trte_errno = ENOMEM;\n \t\tgoto error;\n \t}\n-\tDRV_LOG(DEBUG, \"port %u device_attr.max_qp_wr is %d\",\n-\t\tdev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);\n-\tDRV_LOG(DEBUG, \"port %u device_attr.max_sge is %d\",\n-\t\tdev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);\n-\ttmpl->wq = mlx5_ibv_wq_new(dev, priv, rxq_data, idx, wqe_n, tmpl);\n-\tif (!tmpl->wq) {\n-\t\tDRV_LOG(ERR, \"port %u Rx queue %u WQ creation failure\",\n-\t\t\tdev->data->port_id, idx);\n-\t\trte_errno = ENOMEM;\n-\t\tgoto error;\n-\t}\n-\t/* Change queue state to ready. */\n-\tmod = (struct ibv_wq_attr){\n-\t\t.attr_mask = IBV_WQ_ATTR_STATE,\n-\t\t.wq_state = IBV_WQS_RDY,\n-\t};\n-\tret = mlx5_glue->modify_wq(tmpl->wq, &mod);\n-\tif (ret) {\n-\t\tDRV_LOG(ERR,\n-\t\t\t\"port %u Rx queue %u WQ state to IBV_WQS_RDY failed\",\n-\t\t\tdev->data->port_id, idx);\n-\t\trte_errno = ret;\n-\t\tgoto error;\n-\t}\n \tobj.cq.in = tmpl->cq;\n \tobj.cq.out = &cq_info;\n-\tobj.rwq.in = tmpl->wq;\n-\tobj.rwq.out = &rwq;\n-\tret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);\n+\tret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);\n \tif (ret) {\n \t\trte_errno = ret;\n \t\tgoto error;\n@@ -1101,9 +1228,76 @@ struct mlx5_rxq_obj *\n \t\trte_errno = EINVAL;\n \t\tgoto error;\n \t}\n+\tDRV_LOG(DEBUG, \"port %u device_attr.max_qp_wr is %d\",\n+\t\tdev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);\n+\tDRV_LOG(DEBUG, \"port %u device_attr.max_sge is %d\",\n+\t\tdev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);\n+\t/* Allocate door-bell for types created with DevX. */\n+\tif (tmpl->type != MLX5_RXQ_OBJ_TYPE_IBV) {\n+\t\tstruct mlx5_devx_dbr_page *dbr_page;\n+\t\tint64_t dbr_offset;\n+\n+\t\tdbr_offset = mlx5_get_dbr(dev, &dbr_page);\n+\t\tif (dbr_offset < 0)\n+\t\t\tgoto error;\n+\t\trxq_ctrl->dbr_offset = dbr_offset;\n+\t\trxq_ctrl->dbr_umem_id = dbr_page->umem->umem_id;\n+\t\trxq_data->rq_db = (uint32_t *)RTE_PTR_ADD(dbr_page->dbrs,\n+\t\t\t\t\t\t\t  rxq_ctrl->dbr_offset);\n+\t\trxq_data->rq_db = (uint32_t *)(uintptr_t)RTE_PTR_ADD\n+\t\t\t\t\t\t\t(dbr_page->dbrs,\n+\t\t\t\t\t\t\t rxq_ctrl->dbr_offset);\n+\t}\n+\tif (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV) {\n+\t\ttmpl->wq = mlx5_ibv_wq_new(dev, priv, rxq_data, idx, wqe_n,\n+\t\t\t\t\t   tmpl);\n+\t\tif (!tmpl->wq) {\n+\t\t\tDRV_LOG(ERR, \"port %u Rx queue %u WQ creation failure\",\n+\t\t\t\tdev->data->port_id, idx);\n+\t\t\trte_errno = ENOMEM;\n+\t\t\tgoto error;\n+\t\t}\n+\t\t/* Change queue state to ready. */\n+\t\tmod = (struct ibv_wq_attr){\n+\t\t\t.attr_mask = IBV_WQ_ATTR_STATE,\n+\t\t\t.wq_state = IBV_WQS_RDY,\n+\t\t};\n+\t\tret = mlx5_glue->modify_wq(tmpl->wq, &mod);\n+\t\tif (ret) {\n+\t\t\tDRV_LOG(ERR,\n+\t\t\t\t\"port %u Rx queue %u WQ state to IBV_WQS_RDY\"\n+\t\t\t\t\" failed\", dev->data->port_id, idx);\n+\t\t\trte_errno = ret;\n+\t\t\tgoto error;\n+\t\t}\n+\t\tobj.rwq.in = tmpl->wq;\n+\t\tobj.rwq.out = &rwq;\n+\t\tret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ);\n+\t\tif (ret) {\n+\t\t\trte_errno = ret;\n+\t\t\tgoto error;\n+\t\t}\n+\t\trxq_data->wqes = rwq.buf;\n+\t\trxq_data->rq_db = rwq.dbrec;\n+\t} else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {\n+\t\tstruct mlx5_devx_modify_rq_attr rq_attr;\n+\n+\t\tmemset(&rq_attr, 0, sizeof(rq_attr));\n+\t\ttmpl->rq = mlx5_devx_rq_new(dev, idx, cq_info.cqn);\n+\t\tif (!tmpl->rq) {\n+\t\t\tDRV_LOG(ERR, \"port %u Rx queue %u RQ creation failure\",\n+\t\t\t\tdev->data->port_id, idx);\n+\t\t\trte_errno = ENOMEM;\n+\t\t\tgoto error;\n+\t\t}\n+\t\t/* Change queue state to ready. */\n+\t\trq_attr.rq_state = MLX5_RQC_STATE_RST;\n+\t\trq_attr.state = MLX5_RQC_STATE_RDY;\n+\t\tret = mlx5_devx_cmd_modify_rq(tmpl->rq, &rq_attr);\n+\t\tif (ret)\n+\t\t\tgoto error;\n+\t}\n \t/* Fill the rings. */\n-\trxq_data->wqes = rwq.buf;\n-\trxq_data->rq_db = rwq.dbrec;\n \trxq_data->cqe_n = log2above(cq_info.cqe_cnt);\n \trxq_data->cq_db = cq_info.dbrec;\n \trxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;\n@@ -1121,8 +1315,10 @@ struct mlx5_rxq_obj *\n error:\n \tif (tmpl) {\n \t\tret = rte_errno; /* Save rte_errno before cleanup. */\n-\t\tif (tmpl->wq)\n+\t\tif (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV && tmpl->wq)\n \t\t\tclaim_zero(mlx5_glue->destroy_wq(tmpl->wq));\n+\t\telse if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ && tmpl->rq)\n+\t\t\tclaim_zero(mlx5_devx_cmd_destroy(tmpl->rq));\n \t\tif (tmpl->cq)\n \t\t\tclaim_zero(mlx5_glue->destroy_cq(tmpl->cq));\n \t\tif (tmpl->channel)\n@@ -1131,6 +1327,8 @@ struct mlx5_rxq_obj *\n \t\trte_free(tmpl);\n \t\trte_errno = ret; /* Restore rte_errno. */\n \t}\n+\tif (type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)\n+\t\trxq_release_rq_resources(rxq_ctrl);\n \tpriv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;\n \treturn NULL;\n }\n@@ -1585,6 +1783,8 @@ struct mlx5_rxq_ctrl *\n \tif (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj))\n \t\trxq_ctrl->obj = NULL;\n \tif (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {\n+\t\tclaim_zero(mlx5_release_dbr(dev, rxq_ctrl->dbr_umem_id,\n+\t\t\t\t\t    rxq_ctrl->dbr_offset));\n \t\tmlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);\n \t\tLIST_REMOVE(rxq_ctrl, next);\n \t\trte_free(rxq_ctrl);\n@@ -1633,16 +1833,11 @@ struct mlx5_rxq_ctrl *\n  */\n static struct mlx5_ind_table_obj *\n mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,\n-\t\t       uint32_t queues_n)\n+\t\t       uint32_t queues_n, enum mlx5_ind_tbl_type type)\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_ind_table_obj *ind_tbl;\n-\tconst unsigned int wq_n = rte_is_power_of_2(queues_n) ?\n-\t\tlog2above(queues_n) :\n-\t\tlog2above(priv->config.ind_table_max_size);\n-\tstruct ibv_wq *wq[1 << wq_n];\n-\tunsigned int i;\n-\tunsigned int j;\n+\tunsigned int i = 0, j = 0, k = 0;\n \n \tind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +\n \t\t\t     queues_n * sizeof(uint16_t), 0);\n@@ -1650,33 +1845,75 @@ struct mlx5_rxq_ctrl *\n \t\trte_errno = ENOMEM;\n \t\treturn NULL;\n \t}\n-\tfor (i = 0; i != queues_n; ++i) {\n-\t\tstruct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);\n+\tind_tbl->type = type;\n+\tif (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {\n+\t\tconst unsigned int wq_n = rte_is_power_of_2(queues_n) ?\n+\t\t\tlog2above(queues_n) :\n+\t\t\tlog2above(priv->config.ind_table_max_size);\n+\t\tstruct ibv_wq *wq[1 << wq_n];\n+\n+\t\tfor (i = 0; i != queues_n; ++i) {\n+\t\t\tstruct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,\n+\t\t\t\t\t\t\t\t queues[i]);\n+\t\t\tif (!rxq)\n+\t\t\t\tgoto error;\n+\t\t\twq[i] = rxq->obj->wq;\n+\t\t\tind_tbl->queues[i] = queues[i];\n+\t\t}\n+\t\tind_tbl->queues_n = queues_n;\n+\t\t/* Finalise indirection table. */\n+\t\tk = i; /* Retain value of i for use in error case. */\n+\t\tfor (j = 0; k != (unsigned int)(1 << wq_n); ++k, ++j)\n+\t\t\twq[k] = wq[j];\n+\t\tind_tbl->ind_table = mlx5_glue->create_rwq_ind_table\n+\t\t\t(priv->sh->ctx,\n+\t\t\t &(struct ibv_rwq_ind_table_init_attr){\n+\t\t\t\t.log_ind_tbl_size = wq_n,\n+\t\t\t\t.ind_tbl = wq,\n+\t\t\t\t.comp_mask = 0,\n+\t\t\t});\n+\t\tif (!ind_tbl->ind_table) {\n+\t\t\trte_errno = errno;\n+\t\t\tgoto error;\n+\t\t}\n+\t} else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */\n+\t\tstruct mlx5_devx_rqt_attr *rqt_attr = NULL;\n \n-\t\tif (!rxq)\n+\t\trqt_attr = rte_calloc(__func__, 1, sizeof(*rqt_attr) +\n+\t\t\t\t      queues_n * sizeof(uint16_t), 0);\n+\t\tif (!rqt_attr) {\n+\t\t\tDRV_LOG(ERR, \"port %u cannot allocate RQT resources\",\n+\t\t\t\tdev->data->port_id);\n+\t\t\trte_errno = ENOMEM;\n \t\t\tgoto error;\n-\t\twq[i] = rxq->obj->wq;\n-\t\tind_tbl->queues[i] = queues[i];\n-\t}\n-\tind_tbl->queues_n = queues_n;\n-\t/* Finalise indirection table. */\n-\tfor (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)\n-\t\twq[i] = wq[j];\n-\tind_tbl->ind_table = mlx5_glue->create_rwq_ind_table\n-\t\t(priv->sh->ctx,\n-\t\t &(struct ibv_rwq_ind_table_init_attr){\n-\t\t\t.log_ind_tbl_size = wq_n,\n-\t\t\t.ind_tbl = wq,\n-\t\t\t.comp_mask = 0,\n-\t\t });\n-\tif (!ind_tbl->ind_table) {\n-\t\trte_errno = errno;\n-\t\tgoto error;\n+\t\t}\n+\t\trqt_attr->rqt_max_size = priv->config.ind_table_max_size;\n+\t\trqt_attr->rqt_actual_size = queues_n;\n+\t\tfor (i = 0; i != queues_n; ++i) {\n+\t\t\tstruct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,\n+\t\t\t\t\t\t\t\t queues[i]);\n+\t\t\tif (!rxq)\n+\t\t\t\tgoto error;\n+\t\t\trqt_attr->rq_list[i] = rxq->obj->rq->id;\n+\t\t\tind_tbl->queues[i] = queues[i];\n+\t\t}\n+\t\tind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx,\n+\t\t\t\t\t\t\trqt_attr);\n+\t\trte_free(rqt_attr);\n+\t\tif (!ind_tbl->rqt) {\n+\t\t\tDRV_LOG(ERR, \"port %u cannot create DevX RQT\",\n+\t\t\t\tdev->data->port_id);\n+\t\t\trte_errno = errno;\n+\t\t\tgoto error;\n+\t\t}\n+\t\tind_tbl->queues_n = queues_n;\n \t}\n \trte_atomic32_inc(&ind_tbl->refcnt);\n \tLIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);\n \treturn ind_tbl;\n error:\n+\tfor (j = 0; j < i; j++)\n+\t\tmlx5_rxq_release(dev, ind_tbl->queues[j]);\n \trte_free(ind_tbl);\n \tDEBUG(\"port %u cannot create indirection table\", dev->data->port_id);\n \treturn NULL;\n@@ -1736,9 +1973,13 @@ struct mlx5_rxq_ctrl *\n {\n \tunsigned int i;\n \n-\tif (rte_atomic32_dec_and_test(&ind_tbl->refcnt))\n-\t\tclaim_zero(mlx5_glue->destroy_rwq_ind_table\n-\t\t\t   (ind_tbl->ind_table));\n+\tif (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {\n+\t\tif (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV)\n+\t\t\tclaim_zero(mlx5_glue->destroy_rwq_ind_table\n+\t\t\t\t\t\t\t(ind_tbl->ind_table));\n+\t\telse if (ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX)\n+\t\t\tclaim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));\n+\t}\n \tfor (i = 0; i != ind_tbl->queues_n; ++i)\n \t\tclaim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));\n \tif (!rte_atomic32_read(&ind_tbl->refcnt)) {\n@@ -1805,93 +2046,145 @@ struct mlx5_hrxq *\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_hrxq *hrxq;\n-\tstruct mlx5_ind_table_obj *ind_tbl;\n \tstruct ibv_qp *qp;\n-#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT\n-\tstruct mlx5dv_qp_init_attr qp_init_attr;\n-#endif\n+\tstruct mlx5_ind_table_obj *ind_tbl;\n \tint err;\n+\tstruct mlx5_devx_obj *tir;\n \n \tqueues_n = hash_fields ? queues_n : 1;\n \tind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);\n-\tif (!ind_tbl)\n-\t\tind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n);\n+\tif (!ind_tbl) {\n+\t\tstruct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[0]];\n+\t\tstruct mlx5_rxq_ctrl *rxq_ctrl =\n+\t\t\tcontainer_of(rxq_data, struct mlx5_rxq_ctrl, rxq);\n+\t\tenum mlx5_ind_tbl_type type;\n+\n+\t\ttype = rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV ?\n+\t\t\t\tMLX5_IND_TBL_TYPE_IBV : MLX5_IND_TBL_TYPE_DEVX;\n+\t\tind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, type);\n+\t}\n \tif (!ind_tbl) {\n \t\trte_errno = ENOMEM;\n \t\treturn NULL;\n \t}\n+\tif (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {\n #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT\n-\tmemset(&qp_init_attr, 0, sizeof(qp_init_attr));\n-\tif (tunnel) {\n-\t\tqp_init_attr.comp_mask =\n+\t\tstruct mlx5dv_qp_init_attr qp_init_attr;\n+\n+\t\tmemset(&qp_init_attr, 0, sizeof(qp_init_attr));\n+\t\tif (tunnel) {\n+\t\t\tqp_init_attr.comp_mask =\n \t\t\t\tMLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;\n-\t\tqp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;\n-\t}\n+\t\t\tqp_init_attr.create_flags =\n+\t\t\t\tMLX5DV_QP_CREATE_TUNNEL_OFFLOADS;\n+\t\t}\n #ifdef HAVE_IBV_FLOW_DV_SUPPORT\n-\tif (dev->data->dev_conf.lpbk_mode) {\n-\t\t/* Allow packet sent from NIC loop back w/o source MAC check. */\n-\t\tqp_init_attr.comp_mask |=\n+\t\tif (dev->data->dev_conf.lpbk_mode) {\n+\t\t\t/*\n+\t\t\t * Allow packet sent from NIC loop back\n+\t\t\t * w/o source MAC check.\n+\t\t\t */\n+\t\t\tqp_init_attr.comp_mask |=\n \t\t\t\tMLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;\n-\t\tqp_init_attr.create_flags |=\n+\t\t\tqp_init_attr.create_flags |=\n \t\t\t\tMLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;\n-\t}\n+\t\t}\n #endif\n-\tqp = mlx5_glue->dv_create_qp\n-\t\t(priv->sh->ctx,\n-\t\t &(struct ibv_qp_init_attr_ex){\n-\t\t\t.qp_type = IBV_QPT_RAW_PACKET,\n-\t\t\t.comp_mask =\n-\t\t\t\tIBV_QP_INIT_ATTR_PD |\n-\t\t\t\tIBV_QP_INIT_ATTR_IND_TABLE |\n-\t\t\t\tIBV_QP_INIT_ATTR_RX_HASH,\n-\t\t\t.rx_hash_conf = (struct ibv_rx_hash_conf){\n-\t\t\t\t.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,\n-\t\t\t\t.rx_hash_key_len = rss_key_len,\n-\t\t\t\t.rx_hash_key = (void *)(uintptr_t)rss_key,\n-\t\t\t\t.rx_hash_fields_mask = hash_fields,\n-\t\t\t},\n-\t\t\t.rwq_ind_tbl = ind_tbl->ind_table,\n-\t\t\t.pd = priv->sh->pd,\n-\t\t },\n-\t\t &qp_init_attr);\n+\t\tqp = mlx5_glue->dv_create_qp\n+\t\t\t(priv->sh->ctx,\n+\t\t\t &(struct ibv_qp_init_attr_ex){\n+\t\t\t\t.qp_type = IBV_QPT_RAW_PACKET,\n+\t\t\t\t.comp_mask =\n+\t\t\t\t\tIBV_QP_INIT_ATTR_PD |\n+\t\t\t\t\tIBV_QP_INIT_ATTR_IND_TABLE |\n+\t\t\t\t\tIBV_QP_INIT_ATTR_RX_HASH,\n+\t\t\t\t.rx_hash_conf = (struct ibv_rx_hash_conf){\n+\t\t\t\t\t.rx_hash_function =\n+\t\t\t\t\t\tIBV_RX_HASH_FUNC_TOEPLITZ,\n+\t\t\t\t\t.rx_hash_key_len = rss_key_len,\n+\t\t\t\t\t.rx_hash_key =\n+\t\t\t\t\t\t(void *)(uintptr_t)rss_key,\n+\t\t\t\t\t.rx_hash_fields_mask = hash_fields,\n+\t\t\t\t},\n+\t\t\t\t.rwq_ind_tbl = ind_tbl->ind_table,\n+\t\t\t\t.pd = priv->sh->pd,\n+\t\t\t  },\n+\t\t\t  &qp_init_attr);\n #else\n-\tqp = mlx5_glue->create_qp_ex\n-\t\t(priv->sh->ctx,\n-\t\t &(struct ibv_qp_init_attr_ex){\n-\t\t\t.qp_type = IBV_QPT_RAW_PACKET,\n-\t\t\t.comp_mask =\n-\t\t\t\tIBV_QP_INIT_ATTR_PD |\n-\t\t\t\tIBV_QP_INIT_ATTR_IND_TABLE |\n-\t\t\t\tIBV_QP_INIT_ATTR_RX_HASH,\n-\t\t\t.rx_hash_conf = (struct ibv_rx_hash_conf){\n-\t\t\t\t.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,\n-\t\t\t\t.rx_hash_key_len = rss_key_len,\n-\t\t\t\t.rx_hash_key = (void *)(uintptr_t)rss_key,\n-\t\t\t\t.rx_hash_fields_mask = hash_fields,\n-\t\t\t},\n-\t\t\t.rwq_ind_tbl = ind_tbl->ind_table,\n-\t\t\t.pd = priv->sh->pd,\n-\t\t });\n+\t\tqp = mlx5_glue->create_qp_ex\n+\t\t\t(priv->sh->ctx,\n+\t\t\t &(struct ibv_qp_init_attr_ex){\n+\t\t\t\t.qp_type = IBV_QPT_RAW_PACKET,\n+\t\t\t\t.comp_mask =\n+\t\t\t\t\tIBV_QP_INIT_ATTR_PD |\n+\t\t\t\t\tIBV_QP_INIT_ATTR_IND_TABLE |\n+\t\t\t\t\tIBV_QP_INIT_ATTR_RX_HASH,\n+\t\t\t\t.rx_hash_conf = (struct ibv_rx_hash_conf){\n+\t\t\t\t\t.rx_hash_function =\n+\t\t\t\t\t\tIBV_RX_HASH_FUNC_TOEPLITZ,\n+\t\t\t\t\t.rx_hash_key_len = rss_key_len,\n+\t\t\t\t\t.rx_hash_key =\n+\t\t\t\t\t\t(void *)(uintptr_t)rss_key,\n+\t\t\t\t\t.rx_hash_fields_mask = hash_fields,\n+\t\t\t\t},\n+\t\t\t\t.rwq_ind_tbl = ind_tbl->ind_table,\n+\t\t\t\t.pd = priv->sh->pd,\n+\t\t\t });\n #endif\n-\tif (!qp) {\n-\t\trte_errno = errno;\n-\t\tgoto error;\n+\t\tif (!qp) {\n+\t\t\trte_errno = errno;\n+\t\t\tgoto error;\n+\t\t}\n+\t} else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */\n+\t\tstruct mlx5_devx_tir_attr tir_attr;\n+\n+\t\tmemset(&tir_attr, 0, sizeof(tir_attr));\n+\t\ttir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;\n+\t\ttir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;\n+\t\tmemcpy(&tir_attr.rx_hash_field_selector_outer, &hash_fields,\n+\t\t       sizeof(uint64_t));\n+\t\ttir_attr.transport_domain = priv->sh->tdn;\n+\t\tmemcpy(tir_attr.rx_hash_toeplitz_key, rss_key, rss_key_len);\n+\t\ttir_attr.indirect_table = ind_tbl->rqt->id;\n+\t\tif (dev->data->dev_conf.lpbk_mode)\n+\t\t\ttir_attr.self_lb_block =\n+\t\t\t\t\tMLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;\n+\t\ttir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);\n+\t\tif (!tir) {\n+\t\t\tDRV_LOG(ERR, \"port %u cannot create DevX TIR\",\n+\t\t\t\tdev->data->port_id);\n+\t\t\trte_errno = errno;\n+\t\t\tgoto error;\n+\t\t}\n \t}\n \thrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);\n \tif (!hrxq)\n \t\tgoto error;\n \thrxq->ind_table = ind_tbl;\n-\thrxq->qp = qp;\n+\tif (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {\n+\t\thrxq->qp = qp;\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+\t\thrxq->action =\n+\t\t\tmlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);\n+\t\tif (!hrxq->action) {\n+\t\t\trte_errno = errno;\n+\t\t\tgoto error;\n+\t\t}\n+#endif\n+\t} else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */\n+\t\thrxq->tir = tir;\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+\t\thrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir\n+\t\t\t\t\t\t\t(hrxq->tir->obj);\n+\t\tif (!hrxq->action) {\n+\t\t\trte_errno = errno;\n+\t\t\tgoto error;\n+\t\t}\n+#endif\n+\t}\n \thrxq->rss_key_len = rss_key_len;\n \thrxq->hash_fields = hash_fields;\n \tmemcpy(hrxq->rss_key, rss_key, rss_key_len);\n-#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n-\thrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);\n-\tif (!hrxq->action) {\n-\t\trte_errno = errno;\n-\t\tgoto error;\n-\t}\n-#endif\n \trte_atomic32_inc(&hrxq->refcnt);\n \tLIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);\n \treturn hrxq;\n@@ -1900,6 +2193,8 @@ struct mlx5_hrxq *\n \tmlx5_ind_table_obj_release(dev, ind_tbl);\n \tif (qp)\n \t\tclaim_zero(mlx5_glue->destroy_qp(qp));\n+\telse if (tir)\n+\t\tclaim_zero(mlx5_devx_cmd_destroy(tir));\n \trte_errno = err; /* Restore rte_errno. */\n \treturn NULL;\n }\n@@ -1970,7 +2265,10 @@ struct mlx5_hrxq *\n #ifdef HAVE_IBV_FLOW_DV_SUPPORT\n \t\tmlx5_glue->destroy_flow_action(hrxq->action);\n #endif\n-\t\tclaim_zero(mlx5_glue->destroy_qp(hrxq->qp));\n+\t\tif (hrxq->ind_table->type == MLX5_IND_TBL_TYPE_IBV)\n+\t\t\tclaim_zero(mlx5_glue->destroy_qp(hrxq->qp));\n+\t\telse /* hrxq->ind_table->type == MLX5_IND_TBL_TYPE_DEVX */\n+\t\t\tclaim_zero(mlx5_devx_cmd_destroy(hrxq->tir));\n \t\tmlx5_ind_table_obj_release(dev, hrxq->ind_table);\n \t\tLIST_REMOVE(hrxq, next);\n \t\trte_free(hrxq);\ndiff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h\nindex f4f5c0d..bd4ae80 100644\n--- a/drivers/net/mlx5/mlx5_rxtx.h\n+++ b/drivers/net/mlx5/mlx5_rxtx.h\n@@ -80,6 +80,9 @@ struct mlx5_mprq_buf {\n /* Get pointer to the first stride. */\n #define mlx5_mprq_buf_addr(ptr) ((ptr) + 1)\n \n+#define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6\n+#define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9\n+\n enum mlx5_rxq_err_state {\n \tMLX5_RXQ_ERR_STATE_NO_ERROR = 0,\n \tMLX5_RXQ_ERR_STATE_NEED_RESET,\n@@ -174,6 +177,9 @@ struct mlx5_rxq_ctrl {\n \tuint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */\n \tuint32_t wqn; /* WQ number. */\n \tuint16_t dump_file_n; /* Number of dump files. */\n+\tuint32_t dbr_umem_id; /* Storing door-bell information, */\n+\tuint64_t dbr_offset;  /* needed when freeing door-bell. */\n+\tstruct mlx5dv_devx_umem *wq_umem; /* WQ buffer registration info. */\n };\n \n enum mlx5_ind_tbl_type {\n@@ -324,7 +330,8 @@ int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);\n int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n-struct mlx5_rxq_obj *mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx);\n+struct mlx5_rxq_obj *mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx,\n+\t\t\t\t      enum mlx5_rxq_obj_type type);\n int mlx5_rxq_obj_verify(struct rte_eth_dev *dev);\n struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,\n \t\t\t\t   uint16_t desc, unsigned int socket,\ndiff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c\nindex 54353ee..acd2902 100644\n--- a/drivers/net/mlx5/mlx5_trigger.c\n+++ b/drivers/net/mlx5/mlx5_trigger.c\n@@ -123,7 +123,8 @@\n \t\tret = rxq_alloc_elts(rxq_ctrl);\n \t\tif (ret)\n \t\t\tgoto error;\n-\t\trxq_ctrl->obj = mlx5_rxq_obj_new(dev, i);\n+\t\trxq_ctrl->obj = mlx5_rxq_obj_new(dev, i,\n+\t\t\t\t\t\t MLX5_RXQ_OBJ_TYPE_DEVX_RQ);\n \t\tif (!rxq_ctrl->obj)\n \t\t\tgoto error;\n \t\trxq_ctrl->wqn = rxq_ctrl->obj->wq->wq_num;\ndiff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c\nindex 67518c2..5f6554a 100644\n--- a/drivers/net/mlx5/mlx5_vlan.c\n+++ b/drivers/net/mlx5/mlx5_vlan.c\n@@ -111,7 +111,7 @@\n \tuint16_t vlan_offloads =\n \t\t(on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) |\n \t\t0;\n-\tint ret;\n+\tint ret = 0;\n \n \t/* Validate hw support */\n \tif (!priv->config.hw_vlan_strip) {\n@@ -132,15 +132,27 @@\n \t\trxq->vlan_strip = !!on;\n \t\treturn;\n \t}\n-\tmod = (struct ibv_wq_attr){\n-\t\t.attr_mask = IBV_WQ_ATTR_FLAGS,\n-\t\t.flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING,\n-\t\t.flags = vlan_offloads,\n-\t};\n-\tret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);\n+\tif (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {\n+\t\tmod = (struct ibv_wq_attr){\n+\t\t\t.attr_mask = IBV_WQ_ATTR_FLAGS,\n+\t\t\t.flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING,\n+\t\t\t.flags = vlan_offloads,\n+\t\t};\n+\t\tret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);\n+\t} else if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {\n+\t\tstruct mlx5_devx_modify_rq_attr rq_attr;\n+\n+\t\tmemset(&rq_attr, 0, sizeof(rq_attr));\n+\t\trq_attr.rq_state = MLX5_RQC_STATE_RDY;\n+\t\trq_attr.state = MLX5_RQC_STATE_RDY;\n+\t\trq_attr.vsd = (on ? 0 : 1);\n+\t\trq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;\n+\t\tret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);\n+\t}\n \tif (ret) {\n-\t\tDRV_LOG(ERR, \"port %u failed to modified stripping mode: %s\",\n-\t\t\tdev->data->port_id, strerror(rte_errno));\n+\t\tDRV_LOG(ERR, \"port %u failed to modify object %d stripping \"\n+\t\t\t\"mode: %s\", dev->data->port_id,\n+\t\t\trxq_ctrl->obj->type, strerror(rte_errno));\n \t\treturn;\n \t}\n \t/* Update related bits in RX queue. */\n",
    "prefixes": [
        "21/28"
    ]
}