Show a patch.

GET /api/patches/42948/
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 42948,
    "url": "http://patches.dpdk.org/api/patches/42948/",
    "web_url": "http://patches.dpdk.org/patch/42948/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk"
    },
    "msgid": "<7382d91764548ce8955382111b84cb67a5db2012.1531387413.git.nelio.laranjeiro@6wind.com>",
    "date": "2018-07-12T09:30:48",
    "name": "[v4,02/21] net/mlx5: handle drop queues as regular queues",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "e88f2927b613bc043a28900265f4e6787c201c64",
    "submitter": {
        "id": 243,
        "url": "http://patches.dpdk.org/api/people/243/",
        "name": "Nélio Laranjeiro",
        "email": "nelio.laranjeiro@6wind.com"
    },
    "delegate": {
        "id": 6624,
        "url": "http://patches.dpdk.org/api/users/6624/",
        "username": "shahafs",
        "first_name": "Shahaf",
        "last_name": "Shuler",
        "email": "shahafs@mellanox.com"
    },
    "mbox": "http://patches.dpdk.org/patch/42948/mbox/",
    "series": [
        {
            "id": 544,
            "url": "http://patches.dpdk.org/api/series/544/",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=544",
            "date": "2018-07-12T09:30:46",
            "name": "net/mlx5: flow rework",
            "version": 4,
            "mbox": "http://patches.dpdk.org/series/544/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/42948/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/42948/checks/",
    "tags": {},
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "References": "<cover.1531293415.git.nelio.laranjeiro@6wind.com>\n\t<cover.1531387413.git.nelio.laranjeiro@6wind.com>",
        "X-Mailman-Version": "2.1.15",
        "From": "Nelio Laranjeiro <nelio.laranjeiro@6wind.com>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "X-BeenThere": "dev@dpdk.org",
        "Message-Id": "<7382d91764548ce8955382111b84cb67a5db2012.1531387413.git.nelio.laranjeiro@6wind.com>",
        "X-Original-To": "patchwork@dpdk.org",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20161025;\n\th=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n\t:references;\n\tbh=M7mQVmhGuoQr2Ao/o5cwihUqPGGiFK5S+N3Nicdet/8=;\n\tb=osCBDIr9zhaNWR0VrhN6Ow3LKibYf63njiZfby1wc9Jf39XWRV+CXCd38+xHkol9e4\n\tZkMArWmC32UTdvHO0NJcE31dFI7pAnJ59mpndbGk46arZmJtvzj24seX9R/iU/ioMnl4\n\txC+qJqIrIkCGoFFQwl9zxv28LW8wgGS16DVE2uWlG9brbBIg+Xo9tA+NuVOgtziwnCym\n\tu0yrivm7zN8cqheI7oaGFVAog1AO3ewGUzD/XA6mdX8s1KEtZjXhtV7l9ZSwMqLCJmjW\n\t11uYYgXwKjzyLby6pLScDzwHyKA7GLbUSrLTQ4US3+FSk6DWTJcaA0neM/XBDYJaKuAq\n\t5Naw==",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 5C62B1B559;\n\tThu, 12 Jul 2018 11:31:30 +0200 (CEST)",
            "from mail-wr1-f66.google.com (mail-wr1-f66.google.com\n\t[209.85.221.66]) by dpdk.org (Postfix) with ESMTP id 470771B521\n\tfor <dev@dpdk.org>; Thu, 12 Jul 2018 11:31:27 +0200 (CEST)",
            "by mail-wr1-f66.google.com with SMTP id g6-v6so12048090wrp.0\n\tfor <dev@dpdk.org>; Thu, 12 Jul 2018 02:31:27 -0700 (PDT)",
            "from laranjeiro-vm.dev.6wind.com\n\t(host.78.145.23.62.rev.coltfrance.com. [62.23.145.78])\n\tby smtp.gmail.com with ESMTPSA id\n\ts2-v6sm18717603wrn.75.2018.07.12.02.31.25\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128);\n\tThu, 12 Jul 2018 02:31:25 -0700 (PDT)"
        ],
        "X-Mailer": "git-send-email 2.18.0",
        "X-Received": "by 2002:adf:b8ad:: with SMTP id\n\ti42-v6mr1049801wrf.163.1531387886552; \n\tThu, 12 Jul 2018 02:31:26 -0700 (PDT)",
        "Subject": "[dpdk-dev] [PATCH v4 02/21] net/mlx5: handle drop queues as regular\n\tqueues",
        "Precedence": "list",
        "X-Gm-Message-State": "AOUpUlGtEWeauB95zpEzbzPEI66JyrgmAO4t1v/YyHAhll+sxVDo/q8i\n\tt3RbLwLAhdM6ulRjbtNOWcItycbuuQ==",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=6wind-com.20150623.gappssmtp.com; s=20150623;\n\th=from:to:cc:subject:date:message-id:in-reply-to:references;\n\tbh=M7mQVmhGuoQr2Ao/o5cwihUqPGGiFK5S+N3Nicdet/8=;\n\tb=mHvk+xhiNbsmmIMh8A7oej5fBTLZ4PMav36eBMOQ7z2Ooxibts9bAYdjmqrPZchmg3\n\tXcLCx9ND4UOStl3rxkOKmn7OGz7hvvVY+25E1IztJkY15kOQFdZUEKRQ/DQPKsNZTZq2\n\tjw/hXosu6GPZCET/JNrhyeLEJU8nYHRMigxd++Gp7DMCMH9Ksy640etd3NYRqE7WF3N4\n\tRDreaIbiW0PSJUT5BEV2MDAvLWuY7BROzlY3DEb9pZb6rQKdAi+ATOlHn/2ocLTl100u\n\trX9HN2m4/FXEfeXHFR2vYw0tLiV00K5VddoOc07yy6CZl/C0QUYX5IEtmhOkjIdtWgr0\n\tQ1jQ==",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "Date": "Thu, 12 Jul 2018 11:30:48 +0200",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>",
        "Errors-To": "dev-bounces@dpdk.org",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "X-Google-Smtp-Source": "AAOMgpc09hKMJJl5Ywb2992Mt0tPfVIs49ubXOXAPFFGMxNBN2gpjIb5fUim7j5QKMyayiHgXPIJXA==",
        "Cc": "Adrien Mazarguil <adrien.mazarguil@6wind.com>",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "Delivered-To": "patchwork@dpdk.org",
        "In-Reply-To": "<cover.1531387413.git.nelio.laranjeiro@6wind.com>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "To": "dev@dpdk.org,\n\tYongseok Koh <yskoh@mellanox.com>"
    },
    "content": "Drop queues are essentially used in flows due to Verbs API, the\ninformation if the fate of the flow is a drop or not is already present\nin the flow.  Due to this, drop queues can be fully mapped on regular\nqueues.\n\nSigned-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>\nAcked-by: Yongseok Koh <yskoh@mellanox.com>\n---\n drivers/net/mlx5/mlx5.c      |  24 ++--\n drivers/net/mlx5/mlx5.h      |  14 ++-\n drivers/net/mlx5/mlx5_flow.c |  94 +++++++-------\n drivers/net/mlx5/mlx5_rxq.c  | 232 +++++++++++++++++++++++++++++++++++\n drivers/net/mlx5/mlx5_rxtx.h |   6 +\n 5 files changed, 308 insertions(+), 62 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex df7f39844..e9780ac8f 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -261,7 +261,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)\n \t\tpriv->txqs_n = 0;\n \t\tpriv->txqs = NULL;\n \t}\n-\tmlx5_flow_delete_drop_queue(dev);\n \tmlx5_mprq_free_mp(dev);\n \tmlx5_mr_release(dev);\n \tif (priv->pd != NULL) {\n@@ -1139,22 +1138,15 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,\n \tmlx5_link_update(eth_dev, 0);\n \t/* Store device configuration on private structure. */\n \tpriv->config = config;\n-\t/* Create drop queue. */\n-\terr = mlx5_flow_create_drop_queue(eth_dev);\n-\tif (err) {\n-\t\tDRV_LOG(ERR, \"port %u drop queue allocation failed: %s\",\n-\t\t\teth_dev->data->port_id, strerror(rte_errno));\n-\t\terr = rte_errno;\n-\t\tgoto error;\n-\t}\n \t/* Supported Verbs flow priority number detection. */\n-\tif (verb_priorities == 0)\n-\t\tverb_priorities = mlx5_get_max_verbs_prio(eth_dev);\n-\tif (verb_priorities < MLX5_VERBS_FLOW_PRIO_8) {\n-\t\tDRV_LOG(ERR, \"port %u wrong Verbs flow priorities: %u\",\n-\t\t\teth_dev->data->port_id, verb_priorities);\n-\t\terr = ENOTSUP;\n-\t\tgoto error;\n+\tif (verb_priorities == 0) {\n+\t\terr = mlx5_verbs_max_prio(eth_dev);\n+\t\tif (err < 0) {\n+\t\t\tDRV_LOG(ERR, \"port %u wrong Verbs flow priorities\",\n+\t\t\t\teth_dev->data->port_id);\n+\t\t\tgoto error;\n+\t\t}\n+\t\tverb_priorities = err;\n \t}\n \tpriv->config.max_verbs_prio = verb_priorities;\n \t/*\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex cc01310e0..227429848 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -139,9 +139,6 @@ enum mlx5_verbs_alloc_type {\n \tMLX5_VERBS_ALLOC_TYPE_RX_QUEUE,\n };\n \n-/* 8 Verbs priorities. */\n-#define MLX5_VERBS_FLOW_PRIO_8 8\n-\n /**\n  * Verbs allocator needs a context to know in the callback which kind of\n  * resources it is allocating.\n@@ -153,6 +150,12 @@ struct mlx5_verbs_alloc_ctx {\n \n LIST_HEAD(mlx5_mr_list, mlx5_mr);\n \n+/* Flow drop context necessary due to Verbs API. */\n+struct mlx5_drop {\n+\tstruct mlx5_hrxq *hrxq; /* Hash Rx queue queue. */\n+\tstruct mlx5_rxq_ibv *rxq; /* Verbs Rx queue. */\n+};\n+\n struct priv {\n \tLIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */\n \tstruct rte_eth_dev_data *dev_data;  /* Pointer to device data. */\n@@ -182,7 +185,7 @@ struct priv {\n \tstruct rte_intr_handle intr_handle; /* Interrupt handler. */\n \tunsigned int (*reta_idx)[]; /* RETA index table. */\n \tunsigned int reta_idx_n; /* RETA index size. */\n-\tstruct mlx5_hrxq_drop *flow_drop_queue; /* Flow drop queue. */\n+\tstruct mlx5_drop drop_queue; /* Flow drop queues. */\n \tstruct mlx5_flows flows; /* RTE Flow rules. */\n \tstruct mlx5_flows ctrl_flows; /* Control flow rules. */\n \tstruct {\n@@ -314,7 +317,8 @@ int mlx5_traffic_restart(struct rte_eth_dev *dev);\n \n /* mlx5_flow.c */\n \n-unsigned int mlx5_get_max_verbs_prio(struct rte_eth_dev *dev);\n+int mlx5_verbs_max_prio(struct rte_eth_dev *dev);\n+void mlx5_flow_print(struct rte_flow *flow);\n int mlx5_flow_validate(struct rte_eth_dev *dev,\n \t\t       const struct rte_flow_attr *attr,\n \t\t       const struct rte_flow_item items[],\ndiff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex a45cb06e1..5e325be37 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -75,6 +75,58 @@ struct ibv_spec_header {\n \tuint16_t size;\n };\n \n+ /**\n+  * Get the maximum number of priority available.\n+  *\n+  * @param[in] dev\n+  *   Pointer to Ethernet device.\n+  *\n+  * @return\n+  *   number of supported Verbs flow priority on success, a negative errno\n+  *   value otherwise and rte_errno is set.\n+  */\n+int\n+mlx5_verbs_max_prio(struct rte_eth_dev *dev)\n+{\n+\tstruct {\n+\t\tstruct ibv_flow_attr attr;\n+\t\tstruct ibv_flow_spec_eth eth;\n+\t\tstruct ibv_flow_spec_action_drop drop;\n+\t} flow_attr = {\n+\t\t.attr = {\n+\t\t\t.num_of_specs = 2,\n+\t\t},\n+\t\t.eth = {\n+\t\t\t.type = IBV_FLOW_SPEC_ETH,\n+\t\t\t.size = sizeof(struct ibv_flow_spec_eth),\n+\t\t},\n+\t\t.drop = {\n+\t\t\t.size = sizeof(struct ibv_flow_spec_action_drop),\n+\t\t\t.type = IBV_FLOW_SPEC_ACTION_DROP,\n+\t\t},\n+\t};\n+\tstruct ibv_flow *flow;\n+\tuint32_t verb_priorities;\n+\tstruct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);\n+\n+\tif (!drop) {\n+\t\trte_errno = ENOTSUP;\n+\t\treturn -rte_errno;\n+\t}\n+\tfor (verb_priorities = 0; 1; verb_priorities++) {\n+\t\tflow_attr.attr.priority = verb_priorities;\n+\t\tflow = mlx5_glue->create_flow(drop->qp,\n+\t\t\t\t\t      &flow_attr.attr);\n+\t\tif (!flow)\n+\t\t\tbreak;\n+\t\tclaim_zero(mlx5_glue->destroy_flow(flow));\n+\t}\n+\tmlx5_hrxq_drop_release(dev);\n+\tDRV_LOG(INFO, \"port %u flow maximum priority: %d\",\n+\t\tdev->data->port_id, verb_priorities);\n+\treturn verb_priorities;\n+}\n+\n /**\n  * Convert a flow.\n  *\n@@ -184,32 +236,6 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)\n \t}\n }\n \n-/**\n- * Create drop queue.\n- *\n- * @param dev\n- *   Pointer to Ethernet device.\n- *\n- * @return\n- *   0 on success, a negative errno value otherwise and rte_errno is set.\n- */\n-int\n-mlx5_flow_create_drop_queue(struct rte_eth_dev *dev __rte_unused)\n-{\n-\treturn 0;\n-}\n-\n-/**\n- * Delete drop queue.\n- *\n- * @param dev\n- *   Pointer to Ethernet device.\n- */\n-void\n-mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev __rte_unused)\n-{\n-}\n-\n /**\n  * Remove all flows.\n  *\n@@ -292,6 +318,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,\n \tstruct priv *priv = dev->data->dev_private;\n \tconst struct rte_flow_attr attr = {\n \t\t.ingress = 1,\n+\t\t.priority = priv->config.max_verbs_prio - 1,\n \t};\n \tstruct rte_flow_item items[] = {\n \t\t{\n@@ -830,18 +857,3 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,\n \t}\n \treturn 0;\n }\n-\n-/**\n- * Detect number of Verbs flow priorities supported.\n- *\n- * @param dev\n- *   Pointer to Ethernet device.\n- *\n- * @return\n- *   number of supported Verbs flow priority.\n- */\n-unsigned int\n-mlx5_get_max_verbs_prio(struct rte_eth_dev *dev __rte_unused)\n-{\n-\treturn 8;\n-}\ndiff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c\nindex fd0df177e..d960daa43 100644\n--- a/drivers/net/mlx5/mlx5_rxq.c\n+++ b/drivers/net/mlx5/mlx5_rxq.c\n@@ -1957,3 +1957,235 @@ mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)\n \t}\n \treturn ret;\n }\n+\n+/**\n+ * Create a drop Rx queue Verbs object.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ *\n+ * @return\n+ *   The Verbs object initialised, NULL otherwise and rte_errno is set.\n+ */\n+struct mlx5_rxq_ibv *\n+mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev)\n+{\n+\tstruct priv *priv = dev->data->dev_private;\n+\tstruct ibv_cq *cq;\n+\tstruct ibv_wq *wq = NULL;\n+\tstruct mlx5_rxq_ibv *rxq;\n+\n+\tif (priv->drop_queue.rxq)\n+\t\treturn priv->drop_queue.rxq;\n+\tcq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);\n+\tif (!cq) {\n+\t\tDEBUG(\"port %u cannot allocate CQ for drop queue\",\n+\t\t      dev->data->port_id);\n+\t\trte_errno = errno;\n+\t\tgoto error;\n+\t}\n+\twq = mlx5_glue->create_wq(priv->ctx,\n+\t\t &(struct ibv_wq_init_attr){\n+\t\t\t.wq_type = IBV_WQT_RQ,\n+\t\t\t.max_wr = 1,\n+\t\t\t.max_sge = 1,\n+\t\t\t.pd = priv->pd,\n+\t\t\t.cq = cq,\n+\t\t });\n+\tif (!wq) {\n+\t\tDEBUG(\"port %u cannot allocate WQ for drop queue\",\n+\t\t      dev->data->port_id);\n+\t\trte_errno = errno;\n+\t\tgoto error;\n+\t}\n+\trxq = rte_calloc(__func__, 1, sizeof(*rxq), 0);\n+\tif (!rxq) {\n+\t\tDEBUG(\"port %u cannot allocate drop Rx queue memory\",\n+\t\t      dev->data->port_id);\n+\t\trte_errno = ENOMEM;\n+\t\tgoto error;\n+\t}\n+\trxq->cq = cq;\n+\trxq->wq = wq;\n+\tpriv->drop_queue.rxq = rxq;\n+\treturn rxq;\n+error:\n+\tif (wq)\n+\t\tclaim_zero(mlx5_glue->destroy_wq(wq));\n+\tif (cq)\n+\t\tclaim_zero(mlx5_glue->destroy_cq(cq));\n+\treturn NULL;\n+}\n+\n+/**\n+ * Release a drop Rx queue Verbs object.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ *\n+ * @return\n+ *   The Verbs object initialised, NULL otherwise and rte_errno is set.\n+ */\n+void\n+mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev)\n+{\n+\tstruct priv *priv = dev->data->dev_private;\n+\tstruct mlx5_rxq_ibv *rxq = priv->drop_queue.rxq;\n+\n+\tif (rxq->wq)\n+\t\tclaim_zero(mlx5_glue->destroy_wq(rxq->wq));\n+\tif (rxq->cq)\n+\t\tclaim_zero(mlx5_glue->destroy_cq(rxq->cq));\n+\trte_free(rxq);\n+\tpriv->drop_queue.rxq = NULL;\n+}\n+\n+/**\n+ * Create a drop indirection table.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ *\n+ * @return\n+ *   The Verbs object initialised, NULL otherwise and rte_errno is set.\n+ */\n+struct mlx5_ind_table_ibv *\n+mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev)\n+{\n+\tstruct priv *priv = dev->data->dev_private;\n+\tstruct mlx5_ind_table_ibv *ind_tbl;\n+\tstruct mlx5_rxq_ibv *rxq;\n+\tstruct mlx5_ind_table_ibv tmpl;\n+\n+\trxq = mlx5_rxq_ibv_drop_new(dev);\n+\tif (!rxq)\n+\t\treturn NULL;\n+\ttmpl.ind_table = mlx5_glue->create_rwq_ind_table\n+\t\t(priv->ctx,\n+\t\t &(struct ibv_rwq_ind_table_init_attr){\n+\t\t\t.log_ind_tbl_size = 0,\n+\t\t\t.ind_tbl = &rxq->wq,\n+\t\t\t.comp_mask = 0,\n+\t\t });\n+\tif (!tmpl.ind_table) {\n+\t\tDEBUG(\"port %u cannot allocate indirection table for drop\"\n+\t\t      \" queue\",\n+\t\t      dev->data->port_id);\n+\t\trte_errno = errno;\n+\t\tgoto error;\n+\t}\n+\tind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0);\n+\tif (!ind_tbl) {\n+\t\trte_errno = ENOMEM;\n+\t\tgoto error;\n+\t}\n+\tind_tbl->ind_table = tmpl.ind_table;\n+\treturn ind_tbl;\n+error:\n+\tmlx5_rxq_ibv_drop_release(dev);\n+\treturn NULL;\n+}\n+\n+/**\n+ * Release a drop indirection table.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ */\n+void\n+mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev)\n+{\n+\tstruct priv *priv = dev->data->dev_private;\n+\tstruct mlx5_ind_table_ibv *ind_tbl = priv->drop_queue.hrxq->ind_table;\n+\n+\tclaim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));\n+\tmlx5_rxq_ibv_drop_release(dev);\n+\trte_free(ind_tbl);\n+\tpriv->drop_queue.hrxq->ind_table = NULL;\n+}\n+\n+/**\n+ * Create a drop Rx Hash queue.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ *\n+ * @return\n+ *   The Verbs object initialised, NULL otherwise and rte_errno is set.\n+ */\n+struct mlx5_hrxq *\n+mlx5_hrxq_drop_new(struct rte_eth_dev *dev)\n+{\n+\tstruct priv *priv = dev->data->dev_private;\n+\tstruct mlx5_ind_table_ibv *ind_tbl;\n+\tstruct ibv_qp *qp;\n+\tstruct mlx5_hrxq *hrxq;\n+\n+\tif (priv->drop_queue.hrxq) {\n+\t\trte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);\n+\t\treturn priv->drop_queue.hrxq;\n+\t}\n+\tind_tbl = mlx5_ind_table_ibv_drop_new(dev);\n+\tif (!ind_tbl)\n+\t\treturn NULL;\n+\tqp = mlx5_glue->create_qp_ex(priv->ctx,\n+\t\t &(struct ibv_qp_init_attr_ex){\n+\t\t\t.qp_type = IBV_QPT_RAW_PACKET,\n+\t\t\t.comp_mask =\n+\t\t\t\tIBV_QP_INIT_ATTR_PD |\n+\t\t\t\tIBV_QP_INIT_ATTR_IND_TABLE |\n+\t\t\t\tIBV_QP_INIT_ATTR_RX_HASH,\n+\t\t\t.rx_hash_conf = (struct ibv_rx_hash_conf){\n+\t\t\t\t.rx_hash_function =\n+\t\t\t\t\tIBV_RX_HASH_FUNC_TOEPLITZ,\n+\t\t\t\t.rx_hash_key_len = rss_hash_default_key_len,\n+\t\t\t\t.rx_hash_key = rss_hash_default_key,\n+\t\t\t\t.rx_hash_fields_mask = 0,\n+\t\t\t\t},\n+\t\t\t.rwq_ind_tbl = ind_tbl->ind_table,\n+\t\t\t.pd = priv->pd\n+\t\t });\n+\tif (!qp) {\n+\t\tDEBUG(\"port %u cannot allocate QP for drop queue\",\n+\t\t      dev->data->port_id);\n+\t\trte_errno = errno;\n+\t\tgoto error;\n+\t}\n+\thrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);\n+\tif (!hrxq) {\n+\t\tDRV_LOG(WARNING,\n+\t\t\t\"port %u cannot allocate memory for drop queue\",\n+\t\t\tdev->data->port_id);\n+\t\trte_errno = ENOMEM;\n+\t\tgoto error;\n+\t}\n+\thrxq->ind_table = ind_tbl;\n+\thrxq->qp = qp;\n+\tpriv->drop_queue.hrxq = hrxq;\n+\trte_atomic32_set(&hrxq->refcnt, 1);\n+\treturn hrxq;\n+error:\n+\tif (ind_tbl)\n+\t\tmlx5_ind_table_ibv_drop_release(dev);\n+\treturn NULL;\n+}\n+\n+/**\n+ * Release a drop hash Rx queue.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ */\n+void\n+mlx5_hrxq_drop_release(struct rte_eth_dev *dev)\n+{\n+\tstruct priv *priv = dev->data->dev_private;\n+\tstruct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;\n+\n+\tif (rte_atomic32_dec_and_test(&hrxq->refcnt)) {\n+\t\tclaim_zero(mlx5_glue->destroy_qp(hrxq->qp));\n+\t\tmlx5_ind_table_ibv_drop_release(dev);\n+\t\trte_free(hrxq);\n+\t\tpriv->drop_queue.hrxq = NULL;\n+\t}\n+}\ndiff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h\nindex 97b4d9eb6..708fdd4fa 100644\n--- a/drivers/net/mlx5/mlx5_rxtx.h\n+++ b/drivers/net/mlx5/mlx5_rxtx.h\n@@ -244,6 +244,8 @@ struct mlx5_rxq_ibv *mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);\n struct mlx5_rxq_ibv *mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);\n int mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv);\n int mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv);\n+struct mlx5_rxq_ibv *mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev);\n+void mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev);\n int mlx5_rxq_ibv_verify(struct rte_eth_dev *dev);\n struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,\n \t\t\t\t   uint16_t desc, unsigned int socket,\n@@ -264,6 +266,8 @@ struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_get(struct rte_eth_dev *dev,\n int mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,\n \t\t\t       struct mlx5_ind_table_ibv *ind_tbl);\n int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev);\n+struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev);\n+void mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev);\n struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,\n \t\t\t\tconst uint8_t *rss_key, uint32_t rss_key_len,\n \t\t\t\tuint64_t hash_fields,\n@@ -276,6 +280,8 @@ struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,\n \t\t\t\tuint32_t tunnel, uint32_t rss_level);\n int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);\n int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev);\n+struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev);\n+void mlx5_hrxq_drop_release(struct rte_eth_dev *dev);\n uint64_t mlx5_get_rx_port_offloads(void);\n uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);\n \n",
    "prefixes": [
        "v4",
        "02/21"
    ]
}