get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/113616/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 113616,
    "url": "http://patches.dpdk.org/api/patches/113616/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1656666167-26035-14-git-send-email-longli@linuxonhyperv.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1656666167-26035-14-git-send-email-longli@linuxonhyperv.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1656666167-26035-14-git-send-email-longli@linuxonhyperv.com",
    "date": "2022-07-01T09:02:43",
    "name": "[13/17] net/mana: add function to start/stop RX queues",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "fc65dbc76ad2565764cc3580967b990374db6cb4",
    "submitter": {
        "id": 1784,
        "url": "http://patches.dpdk.org/api/people/1784/?format=api",
        "name": "Long Li",
        "email": "longli@linuxonhyperv.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1656666167-26035-14-git-send-email-longli@linuxonhyperv.com/mbox/",
    "series": [
        {
            "id": 23855,
            "url": "http://patches.dpdk.org/api/series/23855/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=23855",
            "date": "2022-07-01T09:02:30",
            "name": "Introduce Microsoft Azure Network Adatper (MANA) PMD",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/23855/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/113616/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/113616/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 538C2A00C2;\n\tFri,  1 Jul 2022 11:04:34 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 8F12A42BAC;\n\tFri,  1 Jul 2022 11:03:18 +0200 (CEST)",
            "from linux.microsoft.com (linux.microsoft.com [13.77.154.182])\n by mails.dpdk.org (Postfix) with ESMTP id 93ACF42B75\n for <dev@dpdk.org>; Fri,  1 Jul 2022 11:03:09 +0200 (CEST)",
            "by linux.microsoft.com (Postfix, from userid 1004)\n id 4EBC620D4D79; Fri,  1 Jul 2022 02:03:09 -0700 (PDT)"
        ],
        "DKIM-Filter": "OpenDKIM Filter v2.11.0 linux.microsoft.com 4EBC620D4D79",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=linuxonhyperv.com;\n s=default; t=1656666189;\n bh=83sYIDdBeMnZnOUZwA2igKK7PWw7eLl8XHgySVR8BOU=;\n h=From:To:Cc:Subject:Date:In-Reply-To:References:Reply-To:From;\n b=H40+sFSSz4Wna9NVyVMdzbH5gAzG21Ok02DUu21G/i46qk0hzLndJhgw79koc8S7b\n 4vNH4KAafUin1DwqffgU4vR73j09SgL/1+47qLjorJmSX6b4rBMu748JPDlzG1JM4P\n 35HYfACI2qY5bAGqMjIN1rAHh2IpsZtcU+cduLtI=",
        "From": "longli@linuxonhyperv.com",
        "To": "Ferruh Yigit <ferruh.yigit@intel.com>",
        "Cc": "dev@dpdk.org, Ajay Sharma <sharmaajay@microsoft.com>,\n Stephen Hemminger <sthemmin@microsoft.com>, Long Li <longli@microsoft.com>",
        "Subject": "[PATCH 13/17] net/mana: add function to start/stop RX queues",
        "Date": "Fri,  1 Jul 2022 02:02:43 -0700",
        "Message-Id": "<1656666167-26035-14-git-send-email-longli@linuxonhyperv.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1656666167-26035-1-git-send-email-longli@linuxonhyperv.com>",
        "References": "<1656666167-26035-1-git-send-email-longli@linuxonhyperv.com>",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Reply-To": "longli@microsoft.com",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Long Li <longli@microsoft.com>\n\nMANA allocates device queues through the IB layer when starting RX queues. When\ndevice is stopped all the queues are unmapped and freed.\n\nSigned-off-by: Long Li <longli@microsoft.com>\n---\n drivers/net/mana/mana.h      |   5 +\n drivers/net/mana/meson.build |   1 +\n drivers/net/mana/rx.c        | 369 +++++++++++++++++++++++++++++++++++\n 3 files changed, 375 insertions(+)\n create mode 100644 drivers/net/mana/rx.c",
    "diff": "diff --git a/drivers/net/mana/mana.h b/drivers/net/mana/mana.h\nindex fef646a9a7..5052ec9061 100644\n--- a/drivers/net/mana/mana.h\n+++ b/drivers/net/mana/mana.h\n@@ -364,6 +364,7 @@ extern int mana_logtype_init;\n \n int mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type,\n \t\t       uint32_t queue_id, uint32_t tail);\n+int rq_ring_doorbell(struct mana_rxq *rxq);\n \n int gdma_post_work_request(struct mana_gdma_queue *queue,\n \t\t\t   struct gdma_work_request *work_req,\n@@ -379,10 +380,14 @@ uint16_t mana_tx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts,\n int gdma_poll_completion_queue(struct mana_gdma_queue *cq,\n \t\t\t       struct gdma_comp *comp);\n \n+int start_rx_queues(struct rte_eth_dev *dev);\n int start_tx_queues(struct rte_eth_dev *dev);\n \n+int stop_rx_queues(struct rte_eth_dev *dev);\n int stop_tx_queues(struct rte_eth_dev *dev);\n \n+int alloc_and_post_rx_wqe(struct mana_rxq *rxq);\n+\n struct mana_mr_cache *find_pmd_mr(struct mana_mr_btree *local_tree,\n \t\t\t\t  struct mana_priv *priv,\n \t\t\t\t  struct rte_mbuf *mbuf);\ndiff --git a/drivers/net/mana/meson.build b/drivers/net/mana/meson.build\nindex 34bb9c6b2f..8233c04eee 100644\n--- a/drivers/net/mana/meson.build\n+++ b/drivers/net/mana/meson.build\n@@ -11,6 +11,7 @@ deps += ['pci', 'bus_pci', 'net', 'eal', 'kvargs']\n \n sources += files(\n \t'mana.c',\n+\t'rx.c',\n \t'tx.c',\n \t'mr.c',\n \t'gdma.c',\ndiff --git a/drivers/net/mana/rx.c b/drivers/net/mana/rx.c\nnew file mode 100644\nindex 0000000000..bcc9f308f3\n--- /dev/null\n+++ b/drivers/net/mana/rx.c\n@@ -0,0 +1,369 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright 2022 Microsoft Corporation\n+ */\n+\n+#include <stddef.h>\n+#include <unistd.h>\n+#include <string.h>\n+#include <stdint.h>\n+#include <stdlib.h>\n+#include <errno.h>\n+#include <stdio.h>\n+#include <sys/types.h>\n+#include <dirent.h>\n+\n+#include <rte_malloc.h>\n+#include <ethdev_driver.h>\n+#include <ethdev_pci.h>\n+#include <rte_pci.h>\n+#include <rte_bus_pci.h>\n+#include <rte_common.h>\n+#include <rte_kvargs.h>\n+#include <rte_rwlock.h>\n+#include <rte_spinlock.h>\n+#include <rte_string_fns.h>\n+#include <rte_alarm.h>\n+#include <rte_log.h>\n+#include <rte_eal_paging.h>\n+#include <rte_io.h>\n+\n+#include <infiniband/verbs.h>\n+#include <infiniband/manadv.h>\n+\n+#include \"mana.h\"\n+\n+static uint8_t mana_rss_hash_key_default[TOEPLITZ_HASH_KEY_SIZE_IN_BYTES] = {\n+\t0x2c, 0xc6, 0x81, 0xd1,\n+\t0x5b, 0xdb, 0xf4, 0xf7,\n+\t0xfc, 0xa2, 0x83, 0x19,\n+\t0xdb, 0x1a, 0x3e, 0x94,\n+\t0x6b, 0x9e, 0x38, 0xd9,\n+\t0x2c, 0x9c, 0x03, 0xd1,\n+\t0xad, 0x99, 0x44, 0xa7,\n+\t0xd9, 0x56, 0x3d, 0x59,\n+\t0x06, 0x3c, 0x25, 0xf3,\n+\t0xfc, 0x1f, 0xdc, 0x2a,\n+};\n+\n+int rq_ring_doorbell(struct mana_rxq *rxq)\n+{\n+\tstruct mana_priv *priv = rxq->priv;\n+\tint ret;\n+\tvoid *db_page = priv->db_page;\n+\n+\tif (rte_eal_process_type() == RTE_PROC_SECONDARY) {\n+\t\tstruct rte_eth_dev *dev =\n+\t\t\t&rte_eth_devices[priv->dev_data->port_id];\n+\t\tstruct mana_process_priv *process_priv = dev->process_private;\n+\n+\t\tdb_page = process_priv->db_page;\n+\t}\n+\n+\tret = mana_ring_doorbell(db_page, gdma_queue_receive,\n+\t\t\t\t rxq->gdma_rq.id,\n+\t\t\t\t rxq->gdma_rq.head *\n+\t\t\t\t\tGDMA_WQE_ALIGNMENT_UNIT_SIZE);\n+\n+\tif (ret)\n+\t\tDRV_LOG(ERR, \"failed to ring RX doorbell ret %d\", ret);\n+\n+\treturn ret;\n+}\n+\n+int alloc_and_post_rx_wqe(struct mana_rxq *rxq)\n+{\n+\tstruct rte_mbuf *mbuf = NULL;\n+\tstruct gdma_sgl_element sgl[1];\n+\tstruct gdma_work_request request = {0};\n+\tstruct gdma_posted_wqe_info wqe_info = {0};\n+\tstruct mana_priv *priv = rxq->priv;\n+\tint ret;\n+\tstruct mana_mr_cache *mr;\n+\n+\tmbuf = rte_pktmbuf_alloc(rxq->mp);\n+\tif (!mbuf) {\n+\t\trxq->stats.nombuf++;\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tmr = find_pmd_mr(&rxq->mr_btree, priv, mbuf);\n+\tif (!mr) {\n+\t\tDRV_LOG(ERR, \"failed to register RX MR\");\n+\t\trte_pktmbuf_free(mbuf);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\trequest.gdma_header.struct_size = sizeof(request);\n+\twqe_info.gdma_header.struct_size = sizeof(wqe_info);\n+\n+\tsgl[0].address = rte_cpu_to_le_64(rte_pktmbuf_mtod(mbuf, uint64_t));\n+\tsgl[0].memory_key = mr->lkey;\n+\tsgl[0].size =\n+\t\trte_pktmbuf_data_room_size(rxq->mp) -\n+\t\tRTE_PKTMBUF_HEADROOM;\n+\n+\trequest.sgl = sgl;\n+\trequest.num_sgl_elements = 1;\n+\trequest.inline_oob_data = NULL;\n+\trequest.inline_oob_size_in_bytes = 0;\n+\trequest.flags = 0;\n+\trequest.client_data_unit = NOT_USING_CLIENT_DATA_UNIT;\n+\n+\tret = gdma_post_work_request(&rxq->gdma_rq, &request, &wqe_info);\n+\tif (!ret) {\n+\t\tstruct mana_rxq_desc *desc =\n+\t\t\t&rxq->desc_ring[rxq->desc_ring_head];\n+\n+\t\t/* update queue for tracking pending packets */\n+\t\tdesc->pkt = mbuf;\n+\t\tdesc->wqe_size_in_bu = wqe_info.wqe_size_in_bu;\n+\t\trxq->desc_ring_head = (rxq->desc_ring_head + 1) % rxq->num_desc;\n+\t} else {\n+\t\tDRV_LOG(ERR, \"failed to post recv ret %d\", ret);\n+\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int alloc_and_post_rx_wqes(struct mana_rxq *rxq)\n+{\n+\tint ret;\n+\n+\tfor (uint32_t i = 0; i < rxq->num_desc; i++) {\n+\t\tret = alloc_and_post_rx_wqe(rxq);\n+\t\tif (ret) {\n+\t\t\tDRV_LOG(ERR, \"failed to post RX ret = %d\", ret);\n+\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n+\trq_ring_doorbell(rxq);\n+\n+\treturn ret;\n+}\n+\n+int stop_rx_queues(struct rte_eth_dev *dev)\n+{\n+\tstruct mana_priv *priv = dev->data->dev_private;\n+\tint ret, i;\n+\n+\tif (priv->rwq_qp) {\n+\t\tret = ibv_destroy_qp(priv->rwq_qp);\n+\t\tif (ret)\n+\t\t\tDRV_LOG(ERR, \"rx_queue destory_qp failed %d\", ret);\n+\t\tpriv->rwq_qp = NULL;\n+\t}\n+\n+\tif (priv->ind_table) {\n+\t\tret = ibv_destroy_rwq_ind_table(priv->ind_table);\n+\t\tif (ret)\n+\t\t\tDRV_LOG(ERR, \"destroy rwq ind table failed %d\", ret);\n+\t\tpriv->ind_table = NULL;\n+\t}\n+\n+\tfor (i = 0; i < priv->num_queues; i++) {\n+\t\tstruct mana_rxq *rxq = dev->data->rx_queues[i];\n+\n+\t\tif (rxq->wq) {\n+\t\t\tret = ibv_destroy_wq(rxq->wq);\n+\t\t\tif (ret)\n+\t\t\t\tDRV_LOG(ERR,\n+\t\t\t\t\t\"rx_queue destroy_wq failed %d\", ret);\n+\t\t\trxq->wq = NULL;\n+\t\t}\n+\n+\t\tif (rxq->cq) {\n+\t\t\tret = ibv_destroy_cq(rxq->cq);\n+\t\t\tif (ret)\n+\t\t\t\tDRV_LOG(ERR,\n+\t\t\t\t\t\"rx_queue destroy_cq failed %d\", ret);\n+\t\t\trxq->cq = NULL;\n+\t\t}\n+\n+\t\t/* Drain and free posted WQEs */\n+\t\twhile (rxq->desc_ring_tail != rxq->desc_ring_head) {\n+\t\t\tstruct mana_rxq_desc *desc =\n+\t\t\t\t&rxq->desc_ring[rxq->desc_ring_tail];\n+\n+\t\t\trte_pktmbuf_free(desc->pkt);\n+\n+\t\t\trxq->desc_ring_tail =\n+\t\t\t\t(rxq->desc_ring_tail + 1) % rxq->num_desc;\n+\t\t}\n+\t\trxq->desc_ring_head = 0;\n+\t\trxq->desc_ring_tail = 0;\n+\n+\t\tmemset(&rxq->gdma_rq, 0, sizeof(rxq->gdma_rq));\n+\t\tmemset(&rxq->gdma_cq, 0, sizeof(rxq->gdma_cq));\n+\t}\n+\treturn 0;\n+}\n+\n+int start_rx_queues(struct rte_eth_dev *dev)\n+{\n+\tstruct mana_priv *priv = dev->data->dev_private;\n+\tint ret, i;\n+\tstruct ibv_wq *ind_tbl[priv->num_queues];\n+\n+\tDRV_LOG(INFO, \"start rx queues\");\n+\tfor (i = 0; i < priv->num_queues; i++) {\n+\t\tstruct mana_rxq *rxq = dev->data->rx_queues[i];\n+\t\tstruct ibv_wq_init_attr wq_attr = {};\n+\n+\t\tmanadv_set_context_attr(priv->ib_ctx,\n+\t\t\tMANADV_CTX_ATTR_BUF_ALLOCATORS,\n+\t\t\t(void *)((uintptr_t)&(struct manadv_ctx_allocators){\n+\t\t\t\t.alloc = &mana_alloc_verbs_buf,\n+\t\t\t\t.free = &mana_free_verbs_buf,\n+\t\t\t\t.data = (void *)(uintptr_t)rxq->socket,\n+\t\t\t}));\n+\n+\t\trxq->cq = ibv_create_cq(priv->ib_ctx, rxq->num_desc,\n+\t\t\t\t\tNULL, NULL, 0);\n+\t\tif (!rxq->cq) {\n+\t\t\tret = -errno;\n+\t\t\tDRV_LOG(ERR, \"failed to create rx cq queue %d\", i);\n+\t\t\tgoto fail;\n+\t\t}\n+\n+\t\twq_attr.wq_type = IBV_WQT_RQ;\n+\t\twq_attr.max_wr = rxq->num_desc;\n+\t\twq_attr.max_sge = 1;\n+\t\twq_attr.pd = priv->ib_parent_pd;\n+\t\twq_attr.cq = rxq->cq;\n+\n+\t\trxq->wq = ibv_create_wq(priv->ib_ctx, &wq_attr);\n+\t\tif (!rxq->wq) {\n+\t\t\tret = -errno;\n+\t\t\tDRV_LOG(ERR, \"failed to create rx wq %d\", i);\n+\t\t\tgoto fail;\n+\t\t}\n+\n+\t\tind_tbl[i] = rxq->wq;\n+\t}\n+\n+\tstruct ibv_rwq_ind_table_init_attr ind_table_attr = {\n+\t\t.log_ind_tbl_size = rte_log2_u32(RTE_DIM(ind_tbl)),\n+\t\t.ind_tbl = ind_tbl,\n+\t\t.comp_mask = 0,\n+\t};\n+\n+\tpriv->ind_table = ibv_create_rwq_ind_table(priv->ib_ctx,\n+\t\t\t\t\t\t   &ind_table_attr);\n+\tif (!priv->ind_table) {\n+\t\tret = -errno;\n+\t\tDRV_LOG(ERR, \"failed to create ind_table ret %d\", ret);\n+\t\tgoto fail;\n+\t}\n+\n+\tDRV_LOG(INFO, \"ind_table handle %d num %d\",\n+\t\tpriv->ind_table->ind_tbl_handle,\n+\t\tpriv->ind_table->ind_tbl_num);\n+\n+\tstruct ibv_qp_init_attr_ex qp_attr_ex = {\n+\t\t.comp_mask = IBV_QP_INIT_ATTR_PD |\n+\t\t\t     IBV_QP_INIT_ATTR_RX_HASH |\n+\t\t\t     IBV_QP_INIT_ATTR_IND_TABLE,\n+\t\t.qp_type = IBV_QPT_RAW_PACKET,\n+\t\t.pd = priv->ib_parent_pd,\n+\t\t.rwq_ind_tbl = priv->ind_table,\n+\t\t.rx_hash_conf = {\n+\t\t\t.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,\n+\t\t\t.rx_hash_key_len = TOEPLITZ_HASH_KEY_SIZE_IN_BYTES,\n+\t\t\t.rx_hash_key = mana_rss_hash_key_default,\n+\t\t\t.rx_hash_fields_mask =\n+\t\t\t\tIBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4,\n+\t\t},\n+\n+\t};\n+\n+\t/* overwrite default if rss key is set */\n+\tif (priv->rss_conf.rss_key_len && priv->rss_conf.rss_key)\n+\t\tqp_attr_ex.rx_hash_conf.rx_hash_key =\n+\t\t\tpriv->rss_conf.rss_key;\n+\n+\t/* overwrite default if rss hash fields are set */\n+\tif (priv->rss_conf.rss_hf) {\n+\t\tqp_attr_ex.rx_hash_conf.rx_hash_fields_mask = 0;\n+\n+\t\tif (priv->rss_conf.rss_hf & ETH_RSS_IPV4)\n+\t\t\tqp_attr_ex.rx_hash_conf.rx_hash_fields_mask |=\n+\t\t\t\tIBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4;\n+\n+\t\tif (priv->rss_conf.rss_hf & ETH_RSS_IPV6)\n+\t\t\tqp_attr_ex.rx_hash_conf.rx_hash_fields_mask |=\n+\t\t\t\tIBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_SRC_IPV6;\n+\n+\t\tif (priv->rss_conf.rss_hf &\n+\t\t    (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))\n+\t\t\tqp_attr_ex.rx_hash_conf.rx_hash_fields_mask |=\n+\t\t\t\tIBV_RX_HASH_SRC_PORT_TCP |\n+\t\t\t\tIBV_RX_HASH_DST_PORT_TCP;\n+\n+\t\tif (priv->rss_conf.rss_hf &\n+\t\t    (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))\n+\t\t\tqp_attr_ex.rx_hash_conf.rx_hash_fields_mask |=\n+\t\t\t\tIBV_RX_HASH_SRC_PORT_UDP |\n+\t\t\t\tIBV_RX_HASH_DST_PORT_UDP;\n+\t}\n+\n+\tpriv->rwq_qp = ibv_create_qp_ex(priv->ib_ctx, &qp_attr_ex);\n+\tif (!priv->rwq_qp) {\n+\t\tret = -errno;\n+\t\tDRV_LOG(ERR, \"rx ibv_create_qp_ex failed\");\n+\t\tgoto fail;\n+\t}\n+\n+\tfor (i = 0; i < priv->num_queues; i++) {\n+\t\tstruct mana_rxq *rxq = dev->data->rx_queues[i];\n+\t\tstruct manadv_obj obj = {};\n+\t\tstruct manadv_cq dv_cq;\n+\t\tstruct manadv_rwq dv_wq;\n+\n+\t\tobj.cq.in = rxq->cq;\n+\t\tobj.cq.out = &dv_cq;\n+\t\tobj.rwq.in = rxq->wq;\n+\t\tobj.rwq.out = &dv_wq;\n+\t\tret = manadv_init_obj(&obj, MANADV_OBJ_CQ | MANADV_OBJ_RWQ);\n+\t\tif (ret) {\n+\t\t\tDRV_LOG(ERR, \"manadv_init_obj failed ret %d\", ret);\n+\t\t\tgoto fail;\n+\t\t}\n+\n+\t\trxq->gdma_cq.buffer = obj.cq.out->buf;\n+\t\trxq->gdma_cq.count = obj.cq.out->count;\n+\t\trxq->gdma_cq.size = rxq->gdma_cq.count * COMP_ENTRY_SIZE;\n+\t\trxq->gdma_cq.id = obj.cq.out->cq_id;\n+\n+\t\t/* CQ head starts with count */\n+\t\trxq->gdma_cq.head = rxq->gdma_cq.count;\n+\n+\t\tDRV_LOG(INFO, \"rxq cq id %u buf %px count %u size %u\",\n+\t\t\trxq->gdma_cq.id, rxq->gdma_cq.buffer,\n+\t\t\trxq->gdma_cq.count, rxq->gdma_cq.size);\n+\n+\t\tpriv->db_page = obj.rwq.out->db_page;\n+\n+\t\trxq->gdma_rq.buffer = obj.rwq.out->buf;\n+\t\trxq->gdma_rq.count = obj.rwq.out->count;\n+\t\trxq->gdma_rq.size = obj.rwq.out->size;\n+\t\trxq->gdma_rq.id = obj.rwq.out->wq_id;\n+\n+\t\tDRV_LOG(INFO, \"rxq rq id %u buf %px count %u size %u\",\n+\t\t\trxq->gdma_rq.id, rxq->gdma_rq.buffer,\n+\t\t\trxq->gdma_rq.count, rxq->gdma_rq.size);\n+\t}\n+\n+\tfor (i = 0; i < priv->num_queues; i++) {\n+\t\tret = alloc_and_post_rx_wqes(dev->data->rx_queues[i]);\n+\t\tif (ret)\n+\t\t\tgoto fail;\n+\t}\n+\n+\treturn 0;\n+\n+fail:\n+\tstop_rx_queues(dev);\n+\treturn ret;\n+}\n",
    "prefixes": [
        "13/17"
    ]
}