get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/76400/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 76400,
    "url": "http://patches.dpdk.org/api/patches/76400/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1599128029-2092-18-git-send-email-michaelba@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1599128029-2092-18-git-send-email-michaelba@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1599128029-2092-18-git-send-email-michaelba@nvidia.com",
    "date": "2020-09-03T10:13:48",
    "name": "[v1,17/18] net/mlx5: separate Rx queue drop",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "f56327b758951a9ce0473a7b92d191071b4581db",
    "submitter": {
        "id": 1949,
        "url": "http://patches.dpdk.org/api/people/1949/?format=api",
        "name": "Michael Baum",
        "email": "michaelba@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1599128029-2092-18-git-send-email-michaelba@nvidia.com/mbox/",
    "series": [
        {
            "id": 11924,
            "url": "http://patches.dpdk.org/api/series/11924/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=11924",
            "date": "2020-09-03T10:13:31",
            "name": "mlx5 Rx DevX/Verbs separation",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/11924/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/76400/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/76400/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 82A1DA04DB;\n\tThu,  3 Sep 2020 12:17:41 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id BFD041C1BC;\n\tThu,  3 Sep 2020 12:15:17 +0200 (CEST)",
            "from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129])\n by dpdk.org (Postfix) with ESMTP id 4410F1C1B6\n for <dev@dpdk.org>; Thu,  3 Sep 2020 12:15:15 +0200 (CEST)",
            "from Internal Mail-Server by MTLPINE1 (envelope-from\n michaelba@nvidia.com) with SMTP; 3 Sep 2020 13:15:14 +0300",
            "from nvidia.com (pegasus07.mtr.labs.mlnx [10.210.16.112])\n by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 083AEP9F031645;\n Thu, 3 Sep 2020 13:15:14 +0300"
        ],
        "From": "Michael Baum <michaelba@nvidia.com>",
        "To": "dev@dpdk.org",
        "Cc": "Matan Azrad <matan@nvidia.com>, Raslan Darawsheh <rasland@nvidia.com>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>",
        "Date": "Thu,  3 Sep 2020 10:13:48 +0000",
        "Message-Id": "<1599128029-2092-18-git-send-email-michaelba@nvidia.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1599128029-2092-1-git-send-email-michaelba@nvidia.com>",
        "References": "<1599128029-2092-1-git-send-email-michaelba@nvidia.com>",
        "Subject": "[dpdk-dev] [PATCH v1 17/18] net/mlx5: separate Rx queue drop",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Separate Rx queue drop creation into both Verbs and DevX modules.\n\nSigned-off-by: Michael Baum <michaelba@nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\n---\n drivers/net/mlx5/linux/mlx5_os.c    |  11 +-\n drivers/net/mlx5/linux/mlx5_verbs.c | 252 +++++++++++++++++++++++++++++++++\n drivers/net/mlx5/mlx5.h             |   4 +-\n drivers/net/mlx5/mlx5_devx.c        |  34 +++++\n drivers/net/mlx5/mlx5_flow_dv.c     |   8 +-\n drivers/net/mlx5/mlx5_flow_verbs.c  |  10 +-\n drivers/net/mlx5/mlx5_rxq.c         | 271 ++----------------------------------\n drivers/net/mlx5/mlx5_trigger.c     |   2 +-\n drivers/net/mlx5/mlx5_vlan.c        |   2 +-\n 9 files changed, 316 insertions(+), 278 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c\nindex 694fbd3..505e7d9 100644\n--- a/drivers/net/mlx5/linux/mlx5_os.c\n+++ b/drivers/net/mlx5/linux/mlx5_os.c\n@@ -1267,6 +1267,13 @@\n \t\t\tgoto error;\n \t\t}\n \t}\n+\tif (config->devx && config->dv_flow_en) {\n+\t\tpriv->obj_ops = devx_obj_ops;\n+\t\tpriv->obj_ops.hrxq_drop_new = ibv_obj_ops.hrxq_drop_new;\n+\t\tpriv->obj_ops.hrxq_drop_release = ibv_obj_ops.hrxq_drop_release;\n+\t} else {\n+\t\tpriv->obj_ops = ibv_obj_ops;\n+\t}\n \t/* Supported Verbs flow priority number detection. */\n \terr = mlx5_flow_discover_priorities(eth_dev);\n \tif (err < 0) {\n@@ -1323,10 +1330,6 @@\n \t\t\tgoto error;\n \t\t}\n \t}\n-\tif (config->devx && config->dv_flow_en)\n-\t\tpriv->obj_ops = &devx_obj_ops;\n-\telse\n-\t\tpriv->obj_ops = &ibv_obj_ops;\n \treturn eth_dev;\n error:\n \tif (priv) {\ndiff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c\nindex 0745da9..0a8ae65 100644\n--- a/drivers/net/mlx5/linux/mlx5_verbs.c\n+++ b/drivers/net/mlx5/linux/mlx5_verbs.c\n@@ -613,6 +613,256 @@\n \tclaim_zero(mlx5_glue->destroy_qp(hrxq->qp));\n }\n \n+/**\n+ * Create a drop Rx queue Verbs object.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ *\n+ * @return\n+ *   The Verbs object initialized, NULL otherwise and rte_errno is set.\n+ */\n+static struct mlx5_rxq_obj *\n+mlx5_rxq_obj_drop_new(struct rte_eth_dev *dev)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct ibv_context *ctx = priv->sh->ctx;\n+\tstruct ibv_cq *cq;\n+\tstruct ibv_wq *wq = NULL;\n+\tstruct mlx5_rxq_obj *rxq;\n+\n+\tif (priv->drop_queue.rxq)\n+\t\treturn priv->drop_queue.rxq;\n+\tcq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);\n+\tif (!cq) {\n+\t\tDEBUG(\"Port %u cannot allocate CQ for drop queue.\",\n+\t\t      dev->data->port_id);\n+\t\trte_errno = errno;\n+\t\tgoto error;\n+\t}\n+\twq = mlx5_glue->create_wq(ctx,\n+\t\t &(struct ibv_wq_init_attr){\n+\t\t\t.wq_type = IBV_WQT_RQ,\n+\t\t\t.max_wr = 1,\n+\t\t\t.max_sge = 1,\n+\t\t\t.pd = priv->sh->pd,\n+\t\t\t.cq = cq,\n+\t\t });\n+\tif (!wq) {\n+\t\tDEBUG(\"Port %u cannot allocate WQ for drop queue.\",\n+\t\t      dev->data->port_id);\n+\t\trte_errno = errno;\n+\t\tgoto error;\n+\t}\n+\trxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, SOCKET_ID_ANY);\n+\tif (!rxq) {\n+\t\tDEBUG(\"Port %u cannot allocate drop Rx queue memory.\",\n+\t\t      dev->data->port_id);\n+\t\trte_errno = ENOMEM;\n+\t\tgoto error;\n+\t}\n+\trxq->ibv_cq = cq;\n+\trxq->wq = wq;\n+\tpriv->drop_queue.rxq = rxq;\n+\treturn rxq;\n+error:\n+\tif (wq)\n+\t\tclaim_zero(mlx5_glue->destroy_wq(wq));\n+\tif (cq)\n+\t\tclaim_zero(mlx5_glue->destroy_cq(cq));\n+\treturn NULL;\n+}\n+\n+/**\n+ * Release a drop Rx queue Verbs object.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ */\n+static void\n+mlx5_rxq_obj_drop_release(struct rte_eth_dev *dev)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;\n+\n+\tif (rxq->wq)\n+\t\tclaim_zero(mlx5_glue->destroy_wq(rxq->wq));\n+\tif (rxq->ibv_cq)\n+\t\tclaim_zero(mlx5_glue->destroy_cq(rxq->ibv_cq));\n+\tmlx5_free(rxq);\n+\tpriv->drop_queue.rxq = NULL;\n+}\n+\n+/**\n+ * Create a drop indirection table.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ *\n+ * @return\n+ *   The Verbs object initialized, NULL otherwise and rte_errno is set.\n+ */\n+static struct mlx5_ind_table_obj *\n+mlx5_ind_table_obj_drop_new(struct rte_eth_dev *dev)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_ind_table_obj *ind_tbl;\n+\tstruct mlx5_rxq_obj *rxq;\n+\tstruct mlx5_ind_table_obj tmpl;\n+\n+\trxq = mlx5_rxq_obj_drop_new(dev);\n+\tif (!rxq)\n+\t\treturn NULL;\n+\ttmpl.ind_table = mlx5_glue->create_rwq_ind_table\n+\t\t(priv->sh->ctx,\n+\t\t &(struct ibv_rwq_ind_table_init_attr){\n+\t\t\t.log_ind_tbl_size = 0,\n+\t\t\t.ind_tbl = (struct ibv_wq **)&rxq->wq,\n+\t\t\t.comp_mask = 0,\n+\t\t });\n+\tif (!tmpl.ind_table) {\n+\t\tDEBUG(\"Port %u cannot allocate indirection table for drop\"\n+\t\t      \" queue.\", dev->data->port_id);\n+\t\trte_errno = errno;\n+\t\tgoto error;\n+\t}\n+\tind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl), 0,\n+\t\t\t      SOCKET_ID_ANY);\n+\tif (!ind_tbl) {\n+\t\trte_errno = ENOMEM;\n+\t\tgoto error;\n+\t}\n+\tind_tbl->ind_table = tmpl.ind_table;\n+\treturn ind_tbl;\n+error:\n+\tmlx5_rxq_obj_drop_release(dev);\n+\treturn NULL;\n+}\n+\n+/**\n+ * Release a drop indirection table.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ */\n+static void\n+mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_ind_table_obj *ind_tbl = priv->drop_queue.hrxq->ind_table;\n+\n+\tclaim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));\n+\tmlx5_rxq_obj_drop_release(dev);\n+\tmlx5_free(ind_tbl);\n+\tpriv->drop_queue.hrxq->ind_table = NULL;\n+}\n+\n+/**\n+ * Create a drop Rx Hash queue.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ *\n+ * @return\n+ *   The Verbs object initialized, NULL otherwise and rte_errno is set.\n+ */\n+static struct mlx5_hrxq *\n+mlx5_ibv_hrxq_drop_new(struct rte_eth_dev *dev)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_ind_table_obj *ind_tbl = NULL;\n+\tstruct ibv_qp *qp = NULL;\n+\tstruct mlx5_hrxq *hrxq = NULL;\n+\n+\tif (priv->drop_queue.hrxq) {\n+\t\trte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);\n+\t\treturn priv->drop_queue.hrxq;\n+\t}\n+\thrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);\n+\tif (!hrxq) {\n+\t\tDRV_LOG(WARNING,\n+\t\t\t\"Port %u cannot allocate memory for drop queue.\",\n+\t\t\tdev->data->port_id);\n+\t\trte_errno = ENOMEM;\n+\t\tgoto error;\n+\t}\n+\tpriv->drop_queue.hrxq = hrxq;\n+\tind_tbl = mlx5_ind_table_obj_drop_new(dev);\n+\tif (!ind_tbl)\n+\t\tgoto error;\n+\thrxq->ind_table = ind_tbl;\n+\tqp = mlx5_glue->create_qp_ex(priv->sh->ctx,\n+\t\t &(struct ibv_qp_init_attr_ex){\n+\t\t\t.qp_type = IBV_QPT_RAW_PACKET,\n+\t\t\t.comp_mask =\n+\t\t\t\tIBV_QP_INIT_ATTR_PD |\n+\t\t\t\tIBV_QP_INIT_ATTR_IND_TABLE |\n+\t\t\t\tIBV_QP_INIT_ATTR_RX_HASH,\n+\t\t\t.rx_hash_conf = (struct ibv_rx_hash_conf){\n+\t\t\t\t.rx_hash_function =\n+\t\t\t\t\tIBV_RX_HASH_FUNC_TOEPLITZ,\n+\t\t\t\t.rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,\n+\t\t\t\t.rx_hash_key = rss_hash_default_key,\n+\t\t\t\t.rx_hash_fields_mask = 0,\n+\t\t\t\t},\n+\t\t\t.rwq_ind_tbl = ind_tbl->ind_table,\n+\t\t\t.pd = priv->sh->pd\n+\t\t });\n+\tif (!qp) {\n+\t\tDEBUG(\"Port %u cannot allocate QP for drop queue.\",\n+\t\t      dev->data->port_id);\n+\t\trte_errno = errno;\n+\t\tgoto error;\n+\t}\n+\thrxq->qp = qp;\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+\thrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);\n+\tif (!hrxq->action) {\n+\t\trte_errno = errno;\n+\t\tgoto error;\n+\t}\n+#endif\n+\trte_atomic32_set(&hrxq->refcnt, 1);\n+\treturn hrxq;\n+error:\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+\tif (hrxq && hrxq->action)\n+\t\tmlx5_glue->destroy_flow_action(hrxq->action);\n+#endif\n+\tif (qp)\n+\t\tclaim_zero(mlx5_glue->destroy_qp(hrxq->qp));\n+\tif (ind_tbl)\n+\t\tmlx5_ind_table_obj_drop_release(dev);\n+\tif (hrxq) {\n+\t\tpriv->drop_queue.hrxq = NULL;\n+\t\tmlx5_free(hrxq);\n+\t}\n+\treturn NULL;\n+}\n+\n+/**\n+ * Release a drop hash Rx queue.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ */\n+static void\n+mlx5_ibv_hrxq_drop_release(struct rte_eth_dev *dev)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;\n+\n+\tif (rte_atomic32_dec_and_test(&hrxq->refcnt)) {\n+#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n+\t\tmlx5_glue->destroy_flow_action(hrxq->action);\n+#endif\n+\t\tclaim_zero(mlx5_glue->destroy_qp(hrxq->qp));\n+\t\tmlx5_ind_table_obj_drop_release(dev);\n+\t\tmlx5_free(hrxq);\n+\t\tpriv->drop_queue.hrxq = NULL;\n+\t}\n+}\n+\n struct mlx5_obj_ops ibv_obj_ops = {\n \t.rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_wq_vlan_strip,\n \t.rxq_obj_new = mlx5_rxq_ibv_obj_new,\n@@ -623,4 +873,6 @@ struct mlx5_obj_ops ibv_obj_ops = {\n \t.ind_table_destroy = mlx5_ibv_ind_table_destroy,\n \t.hrxq_new = mlx5_ibv_hrxq_new,\n \t.hrxq_destroy = mlx5_ibv_qp_destroy,\n+\t.hrxq_drop_new = mlx5_ibv_hrxq_drop_new,\n+\t.hrxq_drop_release = mlx5_ibv_hrxq_drop_release,\n };\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex 579c961..8cef097 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -748,6 +748,8 @@ struct mlx5_obj_ops {\n \tint (*hrxq_new)(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,\n \t\t\tint tunnel __rte_unused);\n \tvoid (*hrxq_destroy)(struct mlx5_hrxq *hrxq);\n+\tstruct mlx5_hrxq *(*hrxq_drop_new)(struct rte_eth_dev *dev);\n+\tvoid (*hrxq_drop_release)(struct rte_eth_dev *dev);\n };\n \n struct mlx5_priv {\n@@ -793,7 +795,7 @@ struct mlx5_priv {\n \tvoid *rss_desc; /* Intermediate rss description resources. */\n \tint flow_idx; /* Intermediate device flow index. */\n \tint flow_nested_idx; /* Intermediate device flow index, nested. */\n-\tstruct mlx5_obj_ops *obj_ops; /* HW objects operations. */\n+\tstruct mlx5_obj_ops obj_ops; /* HW objects operations. */\n \tLIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */\n \tLIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */\n \tuint32_t hrxqs; /* Verbs Hash Rx queues. */\ndiff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c\nindex cfb9264..ddaab83 100644\n--- a/drivers/net/mlx5/mlx5_devx.c\n+++ b/drivers/net/mlx5/mlx5_devx.c\n@@ -791,6 +791,38 @@\n \tclaim_zero(mlx5_devx_cmd_destroy(hrxq->tir));\n }\n \n+/**\n+ * Create a drop Rx Hash queue.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ *\n+ * @return\n+ *   The DevX object initialized, NULL otherwise and rte_errno is set.\n+ */\n+static struct mlx5_hrxq *\n+mlx5_devx_hrxq_drop_new(struct rte_eth_dev *dev)\n+{\n+\t(void)dev;\n+\tDRV_LOG(ERR, \"DevX drop action is not supported yet\");\n+\trte_errno = ENOTSUP;\n+\treturn NULL;\n+}\n+\n+/**\n+ * Release a drop hash Rx queue.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ */\n+static void\n+mlx5_devx_hrxq_drop_release(struct rte_eth_dev *dev)\n+{\n+\t(void)dev;\n+\tDRV_LOG(ERR, \"DevX drop action is not supported yet\");\n+\trte_errno = ENOTSUP;\n+}\n+\n struct mlx5_obj_ops devx_obj_ops = {\n \t.rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip,\n \t.rxq_obj_new = mlx5_rxq_devx_obj_new,\n@@ -801,4 +833,6 @@ struct mlx5_obj_ops devx_obj_ops = {\n \t.ind_table_destroy = mlx5_devx_ind_table_destroy,\n \t.hrxq_new = mlx5_devx_hrxq_new,\n \t.hrxq_destroy = mlx5_devx_tir_destroy,\n+\t.hrxq_drop_new = mlx5_devx_hrxq_drop_new,\n+\t.hrxq_drop_release = mlx5_devx_hrxq_drop_release,\n };\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex d636c57..f953a2d 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -8917,7 +8917,7 @@ struct field_modify_info modify_tcp[] = {\n \t\t\t\tdv->actions[n++] = priv->sh->esw_drop_action;\n \t\t\t} else {\n \t\t\t\tstruct mlx5_hrxq *drop_hrxq;\n-\t\t\t\tdrop_hrxq = mlx5_hrxq_drop_new(dev);\n+\t\t\t\tdrop_hrxq = priv->obj_ops.hrxq_drop_new(dev);\n \t\t\t\tif (!drop_hrxq) {\n \t\t\t\t\trte_flow_error_set\n \t\t\t\t\t\t(error, errno,\n@@ -9013,7 +9013,7 @@ struct field_modify_info modify_tcp[] = {\n \t\t/* hrxq is union, don't clear it if the flag is not set. */\n \t\tif (dh->rix_hrxq) {\n \t\t\tif (dh->fate_action == MLX5_FLOW_FATE_DROP) {\n-\t\t\t\tmlx5_hrxq_drop_release(dev);\n+\t\t\t\tpriv->obj_ops.hrxq_drop_release(dev);\n \t\t\t\tdh->rix_hrxq = 0;\n \t\t\t} else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {\n \t\t\t\tmlx5_hrxq_release(dev, dh->rix_hrxq);\n@@ -9303,11 +9303,13 @@ struct field_modify_info modify_tcp[] = {\n flow_dv_fate_resource_release(struct rte_eth_dev *dev,\n \t\t\t       struct mlx5_flow_handle *handle)\n {\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\n \tif (!handle->rix_fate)\n \t\treturn;\n \tswitch (handle->fate_action) {\n \tcase MLX5_FLOW_FATE_DROP:\n-\t\tmlx5_hrxq_drop_release(dev);\n+\t\tpriv->obj_ops.hrxq_drop_release(dev);\n \t\tbreak;\n \tcase MLX5_FLOW_FATE_QUEUE:\n \t\tmlx5_hrxq_release(dev, handle->rix_hrxq);\ndiff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c\nindex 2ce91f7..e5fc278 100644\n--- a/drivers/net/mlx5/mlx5_flow_verbs.c\n+++ b/drivers/net/mlx5/mlx5_flow_verbs.c\n@@ -72,7 +72,7 @@\n \t\t},\n \t};\n \tstruct ibv_flow *flow;\n-\tstruct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);\n+\tstruct mlx5_hrxq *drop = priv->obj_ops.hrxq_drop_new(dev);\n \tuint16_t vprio[] = { 8, 16 };\n \tint i;\n \tint priority = 0;\n@@ -89,7 +89,7 @@\n \t\tclaim_zero(mlx5_glue->destroy_flow(flow));\n \t\tpriority = vprio[i];\n \t}\n-\tmlx5_hrxq_drop_release(dev);\n+\tpriv->obj_ops.hrxq_drop_release(dev);\n \tswitch (priority) {\n \tcase 8:\n \t\tpriority = RTE_DIM(priority_map_3);\n@@ -1889,7 +1889,7 @@\n \t\t/* hrxq is union, don't touch it only the flag is set. */\n \t\tif (handle->rix_hrxq) {\n \t\t\tif (handle->fate_action == MLX5_FLOW_FATE_DROP) {\n-\t\t\t\tmlx5_hrxq_drop_release(dev);\n+\t\t\t\tpriv->obj_ops.hrxq_drop_release(dev);\n \t\t\t\thandle->rix_hrxq = 0;\n \t\t\t} else if (handle->fate_action ==\n \t\t\t\t   MLX5_FLOW_FATE_QUEUE) {\n@@ -1965,7 +1965,7 @@\n \t\tdev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];\n \t\thandle = dev_flow->handle;\n \t\tif (handle->fate_action == MLX5_FLOW_FATE_DROP) {\n-\t\t\thrxq = mlx5_hrxq_drop_new(dev);\n+\t\t\thrxq = priv->obj_ops.hrxq_drop_new(dev);\n \t\t\tif (!hrxq) {\n \t\t\t\trte_flow_error_set\n \t\t\t\t\t(error, errno,\n@@ -2034,7 +2034,7 @@\n \t\t/* hrxq is union, don't touch it only the flag is set. */\n \t\tif (handle->rix_hrxq) {\n \t\t\tif (handle->fate_action == MLX5_FLOW_FATE_DROP) {\n-\t\t\t\tmlx5_hrxq_drop_release(dev);\n+\t\t\t\tpriv->obj_ops.hrxq_drop_release(dev);\n \t\t\t\thandle->rix_hrxq = 0;\n \t\t\t} else if (handle->fate_action ==\n \t\t\t\t   MLX5_FLOW_FATE_QUEUE) {\ndiff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c\nindex 234ee28..99b32f6 100644\n--- a/drivers/net/mlx5/mlx5_rxq.c\n+++ b/drivers/net/mlx5/mlx5_rxq.c\n@@ -513,7 +513,7 @@\n \tint ret;\n \n \tMLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);\n-\tret = priv->obj_ops->rxq_obj_modify(rxq_ctrl->obj, false);\n+\tret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, false);\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"Cannot change Rx WQ state to RESET:  %s\",\n \t\t\tstrerror(errno));\n@@ -612,7 +612,7 @@\n \t/* Reset RQ consumer before moving queue to READY state. */\n \t*rxq->rq_db = rte_cpu_to_be_32(0);\n \trte_cio_wmb();\n-\tret = priv->obj_ops->rxq_obj_modify(rxq_ctrl->obj, true);\n+\tret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, true);\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"Cannot change Rx WQ state to READY:  %s\",\n \t\t\tstrerror(errno));\n@@ -1027,7 +1027,7 @@\n \tif (!rxq_ctrl->obj)\n \t\tgoto error;\n \tif (rxq_ctrl->irq) {\n-\t\tret = priv->obj_ops->rxq_event_get(rxq_ctrl->obj);\n+\t\tret = priv->obj_ops.rxq_event_get(rxq_ctrl->obj);\n \t\tif (ret < 0)\n \t\t\tgoto error;\n \t\trxq_ctrl->rxq.cq_arm_sn++;\n@@ -1641,7 +1641,7 @@ struct mlx5_rxq_ctrl *\n \tif (!rte_atomic32_dec_and_test(&rxq_ctrl->refcnt))\n \t\treturn 1;\n \tif (rxq_ctrl->obj) {\n-\t\tpriv->obj_ops->rxq_obj_release(rxq_ctrl->obj);\n+\t\tpriv->obj_ops.rxq_obj_release(rxq_ctrl->obj);\n \t\tLIST_REMOVE(rxq_ctrl->obj, next);\n \t\tmlx5_free(rxq_ctrl->obj);\n \t\trxq_ctrl->obj = NULL;\n@@ -1762,7 +1762,7 @@ struct mlx5_ind_table_obj *\n \tunsigned int i;\n \n \tif (rte_atomic32_dec_and_test(&ind_tbl->refcnt))\n-\t\tpriv->obj_ops->ind_table_destroy(ind_tbl);\n+\t\tpriv->obj_ops.ind_table_destroy(ind_tbl);\n \tfor (i = 0; i != ind_tbl->queues_n; ++i)\n \t\tclaim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));\n \tif (!rte_atomic32_read(&ind_tbl->refcnt)) {\n@@ -1836,7 +1836,7 @@ struct mlx5_ind_table_obj *\n \t\t\tgoto error;\n \t\tind_tbl->queues[i] = queues[i];\n \t}\n-\tret = priv->obj_ops->ind_table_new(dev, n, ind_tbl);\n+\tret = priv->obj_ops.ind_table_new(dev, n, ind_tbl);\n \tif (ret < 0)\n \t\tgoto error;\n \trte_atomic32_inc(&ind_tbl->refcnt);\n@@ -1926,7 +1926,7 @@ struct mlx5_ind_table_obj *\n #ifdef HAVE_IBV_FLOW_DV_SUPPORT\n \t\tmlx5_glue->destroy_flow_action(hrxq->action);\n #endif\n-\t\tpriv->obj_ops->hrxq_destroy(hrxq);\n+\t\tpriv->obj_ops.hrxq_destroy(hrxq);\n \t\tmlx5_ind_table_obj_release(dev, hrxq->ind_table);\n \t\tILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs,\n \t\t\t     hrxq_idx, hrxq, next);\n@@ -1987,7 +1987,7 @@ struct mlx5_ind_table_obj *\n \thrxq->rss_key_len = rss_key_len;\n \thrxq->hash_fields = hash_fields;\n \tmemcpy(hrxq->rss_key, rss_key, rss_key_len);\n-\tret = priv->obj_ops->hrxq_new(dev, hrxq, tunnel);\n+\tret = priv->obj_ops.hrxq_new(dev, hrxq, tunnel);\n \tif (ret < 0) {\n \t\trte_errno = errno;\n \t\tgoto error;\n@@ -2033,261 +2033,6 @@ struct mlx5_ind_table_obj *\n }\n \n /**\n- * Create a drop Rx queue Verbs/DevX object.\n- *\n- * @param dev\n- *   Pointer to Ethernet device.\n- *\n- * @return\n- *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.\n- */\n-static struct mlx5_rxq_obj *\n-mlx5_rxq_obj_drop_new(struct rte_eth_dev *dev)\n-{\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct ibv_context *ctx = priv->sh->ctx;\n-\tstruct ibv_cq *cq;\n-\tstruct ibv_wq *wq = NULL;\n-\tstruct mlx5_rxq_obj *rxq;\n-\n-\tif (priv->drop_queue.rxq)\n-\t\treturn priv->drop_queue.rxq;\n-\tcq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);\n-\tif (!cq) {\n-\t\tDEBUG(\"port %u cannot allocate CQ for drop queue\",\n-\t\t      dev->data->port_id);\n-\t\trte_errno = errno;\n-\t\tgoto error;\n-\t}\n-\twq = mlx5_glue->create_wq(ctx,\n-\t\t &(struct ibv_wq_init_attr){\n-\t\t\t.wq_type = IBV_WQT_RQ,\n-\t\t\t.max_wr = 1,\n-\t\t\t.max_sge = 1,\n-\t\t\t.pd = priv->sh->pd,\n-\t\t\t.cq = cq,\n-\t\t });\n-\tif (!wq) {\n-\t\tDEBUG(\"port %u cannot allocate WQ for drop queue\",\n-\t\t      dev->data->port_id);\n-\t\trte_errno = errno;\n-\t\tgoto error;\n-\t}\n-\trxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, SOCKET_ID_ANY);\n-\tif (!rxq) {\n-\t\tDEBUG(\"port %u cannot allocate drop Rx queue memory\",\n-\t\t      dev->data->port_id);\n-\t\trte_errno = ENOMEM;\n-\t\tgoto error;\n-\t}\n-\trxq->ibv_cq = cq;\n-\trxq->wq = wq;\n-\tpriv->drop_queue.rxq = rxq;\n-\treturn rxq;\n-error:\n-\tif (wq)\n-\t\tclaim_zero(mlx5_glue->destroy_wq(wq));\n-\tif (cq)\n-\t\tclaim_zero(mlx5_glue->destroy_cq(cq));\n-\treturn NULL;\n-}\n-\n-/**\n- * Release a drop Rx queue Verbs/DevX object.\n- *\n- * @param dev\n- *   Pointer to Ethernet device.\n- *\n- * @return\n- *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.\n- */\n-static void\n-mlx5_rxq_obj_drop_release(struct rte_eth_dev *dev)\n-{\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;\n-\n-\tif (rxq->wq)\n-\t\tclaim_zero(mlx5_glue->destroy_wq(rxq->wq));\n-\tif (rxq->ibv_cq)\n-\t\tclaim_zero(mlx5_glue->destroy_cq(rxq->ibv_cq));\n-\tmlx5_free(rxq);\n-\tpriv->drop_queue.rxq = NULL;\n-}\n-\n-/**\n- * Create a drop indirection table.\n- *\n- * @param dev\n- *   Pointer to Ethernet device.\n- *\n- * @return\n- *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.\n- */\n-static struct mlx5_ind_table_obj *\n-mlx5_ind_table_obj_drop_new(struct rte_eth_dev *dev)\n-{\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_ind_table_obj *ind_tbl;\n-\tstruct mlx5_rxq_obj *rxq;\n-\tstruct mlx5_ind_table_obj tmpl;\n-\n-\trxq = mlx5_rxq_obj_drop_new(dev);\n-\tif (!rxq)\n-\t\treturn NULL;\n-\ttmpl.ind_table = mlx5_glue->create_rwq_ind_table\n-\t\t(priv->sh->ctx,\n-\t\t &(struct ibv_rwq_ind_table_init_attr){\n-\t\t\t.log_ind_tbl_size = 0,\n-\t\t\t.ind_tbl = (struct ibv_wq **)&rxq->wq,\n-\t\t\t.comp_mask = 0,\n-\t\t });\n-\tif (!tmpl.ind_table) {\n-\t\tDEBUG(\"port %u cannot allocate indirection table for drop\"\n-\t\t      \" queue\",\n-\t\t      dev->data->port_id);\n-\t\trte_errno = errno;\n-\t\tgoto error;\n-\t}\n-\tind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl), 0,\n-\t\t\t      SOCKET_ID_ANY);\n-\tif (!ind_tbl) {\n-\t\trte_errno = ENOMEM;\n-\t\tgoto error;\n-\t}\n-\tind_tbl->ind_table = tmpl.ind_table;\n-\treturn ind_tbl;\n-error:\n-\tmlx5_rxq_obj_drop_release(dev);\n-\treturn NULL;\n-}\n-\n-/**\n- * Release a drop indirection table.\n- *\n- * @param dev\n- *   Pointer to Ethernet device.\n- */\n-static void\n-mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev)\n-{\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_ind_table_obj *ind_tbl = priv->drop_queue.hrxq->ind_table;\n-\n-\tclaim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));\n-\tmlx5_rxq_obj_drop_release(dev);\n-\tmlx5_free(ind_tbl);\n-\tpriv->drop_queue.hrxq->ind_table = NULL;\n-}\n-\n-/**\n- * Create a drop Rx Hash queue.\n- *\n- * @param dev\n- *   Pointer to Ethernet device.\n- *\n- * @return\n- *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.\n- */\n-struct mlx5_hrxq *\n-mlx5_hrxq_drop_new(struct rte_eth_dev *dev)\n-{\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_ind_table_obj *ind_tbl = NULL;\n-\tstruct ibv_qp *qp = NULL;\n-\tstruct mlx5_hrxq *hrxq = NULL;\n-\n-\tif (priv->drop_queue.hrxq) {\n-\t\trte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);\n-\t\treturn priv->drop_queue.hrxq;\n-\t}\n-\thrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);\n-\tif (!hrxq) {\n-\t\tDRV_LOG(WARNING,\n-\t\t\t\"port %u cannot allocate memory for drop queue\",\n-\t\t\tdev->data->port_id);\n-\t\trte_errno = ENOMEM;\n-\t\tgoto error;\n-\t}\n-\tpriv->drop_queue.hrxq = hrxq;\n-\tind_tbl = mlx5_ind_table_obj_drop_new(dev);\n-\tif (!ind_tbl)\n-\t\tgoto error;\n-\thrxq->ind_table = ind_tbl;\n-\tqp = mlx5_glue->create_qp_ex(priv->sh->ctx,\n-\t\t &(struct ibv_qp_init_attr_ex){\n-\t\t\t.qp_type = IBV_QPT_RAW_PACKET,\n-\t\t\t.comp_mask =\n-\t\t\t\tIBV_QP_INIT_ATTR_PD |\n-\t\t\t\tIBV_QP_INIT_ATTR_IND_TABLE |\n-\t\t\t\tIBV_QP_INIT_ATTR_RX_HASH,\n-\t\t\t.rx_hash_conf = (struct ibv_rx_hash_conf){\n-\t\t\t\t.rx_hash_function =\n-\t\t\t\t\tIBV_RX_HASH_FUNC_TOEPLITZ,\n-\t\t\t\t.rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,\n-\t\t\t\t.rx_hash_key = rss_hash_default_key,\n-\t\t\t\t.rx_hash_fields_mask = 0,\n-\t\t\t\t},\n-\t\t\t.rwq_ind_tbl = ind_tbl->ind_table,\n-\t\t\t.pd = priv->sh->pd\n-\t\t });\n-\tif (!qp) {\n-\t\tDEBUG(\"port %u cannot allocate QP for drop queue\",\n-\t\t      dev->data->port_id);\n-\t\trte_errno = errno;\n-\t\tgoto error;\n-\t}\n-\thrxq->qp = qp;\n-#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n-\thrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);\n-\tif (!hrxq->action) {\n-\t\trte_errno = errno;\n-\t\tgoto error;\n-\t}\n-#endif\n-\trte_atomic32_set(&hrxq->refcnt, 1);\n-\treturn hrxq;\n-error:\n-#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n-\tif (hrxq && hrxq->action)\n-\t\tmlx5_glue->destroy_flow_action(hrxq->action);\n-#endif\n-\tif (qp)\n-\t\tclaim_zero(mlx5_glue->destroy_qp(hrxq->qp));\n-\tif (ind_tbl)\n-\t\tmlx5_ind_table_obj_drop_release(dev);\n-\tif (hrxq) {\n-\t\tpriv->drop_queue.hrxq = NULL;\n-\t\tmlx5_free(hrxq);\n-\t}\n-\treturn NULL;\n-}\n-\n-/**\n- * Release a drop hash Rx queue.\n- *\n- * @param dev\n- *   Pointer to Ethernet device.\n- */\n-void\n-mlx5_hrxq_drop_release(struct rte_eth_dev *dev)\n-{\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;\n-\n-\tif (rte_atomic32_dec_and_test(&hrxq->refcnt)) {\n-#ifdef HAVE_IBV_FLOW_DV_SUPPORT\n-\t\tmlx5_glue->destroy_flow_action(hrxq->action);\n-#endif\n-\t\tclaim_zero(mlx5_glue->destroy_qp(hrxq->qp));\n-\t\tmlx5_ind_table_obj_drop_release(dev);\n-\t\tmlx5_free(hrxq);\n-\t\tpriv->drop_queue.hrxq = NULL;\n-\t}\n-}\n-\n-\n-/**\n  * Set the Rx queue timestamp conversion parameters\n  *\n  * @param[in] dev\ndiff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c\nindex 43eff93..0f4d031 100644\n--- a/drivers/net/mlx5/mlx5_trigger.c\n+++ b/drivers/net/mlx5/mlx5_trigger.c\n@@ -150,7 +150,7 @@\n \t\t\trte_errno = ENOMEM;\n \t\t\tgoto error;\n \t\t}\n-\t\tret = priv->obj_ops->rxq_obj_new(dev, i);\n+\t\tret = priv->obj_ops.rxq_obj_new(dev, i);\n \t\tif (ret) {\n \t\t\tmlx5_free(rxq_ctrl->obj);\n \t\t\tgoto error;\ndiff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c\nindex 4bcd3e2..290503a 100644\n--- a/drivers/net/mlx5/mlx5_vlan.c\n+++ b/drivers/net/mlx5/mlx5_vlan.c\n@@ -114,7 +114,7 @@\n \t\trxq->vlan_strip = !!on;\n \t\treturn;\n \t}\n-\tret = priv->obj_ops->rxq_obj_modify_vlan_strip(rxq_ctrl->obj, on);\n+\tret = priv->obj_ops.rxq_obj_modify_vlan_strip(rxq_ctrl->obj, on);\n \tif (ret) {\n \t\tDRV_LOG(ERR, \"port %u failed to modify object %d stripping \"\n \t\t\t\"mode: %s\", dev->data->port_id,\n",
    "prefixes": [
        "v1",
        "17/18"
    ]
}