get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/115676/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 115676,
    "url": "https://patches.dpdk.org/api/patches/115676/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1661899911-13086-12-git-send-email-longli@linuxonhyperv.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1661899911-13086-12-git-send-email-longli@linuxonhyperv.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1661899911-13086-12-git-send-email-longli@linuxonhyperv.com",
    "date": "2022-08-30T22:51:44",
    "name": "[v6,11/18] net/mana: implement the hardware layer operations",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "7655cabf7c2f38ad22c0896536cd66c05e9e9ac6",
    "submitter": {
        "id": 1784,
        "url": "https://patches.dpdk.org/api/people/1784/?format=api",
        "name": "Long Li",
        "email": "longli@linuxonhyperv.com"
    },
    "delegate": {
        "id": 319,
        "url": "https://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1661899911-13086-12-git-send-email-longli@linuxonhyperv.com/mbox/",
    "series": [
        {
            "id": 24471,
            "url": "https://patches.dpdk.org/api/series/24471/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=24471",
            "date": "2022-08-30T22:51:33",
            "name": "Introduce Microsoft Azure Network Adatper (MANA) PMD",
            "version": 6,
            "mbox": "https://patches.dpdk.org/series/24471/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/115676/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/115676/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id CB8E6A00C5;\n\tWed, 31 Aug 2022 00:53:05 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 3E79D42B94;\n\tWed, 31 Aug 2022 00:52:09 +0200 (CEST)",
            "from linux.microsoft.com (linux.microsoft.com [13.77.154.182])\n by mails.dpdk.org (Postfix) with ESMTP id 365BB400D4\n for <dev@dpdk.org>; Wed, 31 Aug 2022 00:52:04 +0200 (CEST)",
            "by linux.microsoft.com (Postfix, from userid 1004)\n id A0D3A2045E24; Tue, 30 Aug 2022 15:52:03 -0700 (PDT)"
        ],
        "DKIM-Filter": "OpenDKIM Filter v2.11.0 linux.microsoft.com A0D3A2045E24",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=linuxonhyperv.com;\n s=default; t=1661899923;\n bh=1C+rKu0KMVv3GBSVb93HukSBvhVZpAuR0QUqpCzxf6U=;\n h=From:To:Cc:Subject:Date:In-Reply-To:References:Reply-To:From;\n b=KQDd4bM8MnO0UkNSzbYLeMPE4FlxClTO/6i4OMtzJc3tSNPzgR0Esi+ifUnGUtxdf\n R7MWTt/CdSCExcK6jugMiG/v3ChpJLHcaS19pcVvQCjzJUOO5aHncitltHTTl5UyXS\n 0Uko86ZH0K1XJhpnU7JoTiy9LNevPvtDNYptUqYI=",
        "From": "longli@linuxonhyperv.com",
        "To": "Ferruh Yigit <ferruh.yigit@xilinx.com>",
        "Cc": "dev@dpdk.org, Ajay Sharma <sharmaajay@microsoft.com>,\n Stephen Hemminger <sthemmin@microsoft.com>, Long Li <longli@microsoft.com>",
        "Subject": "[Patch v6 11/18] net/mana: implement the hardware layer operations",
        "Date": "Tue, 30 Aug 2022 15:51:44 -0700",
        "Message-Id": "<1661899911-13086-12-git-send-email-longli@linuxonhyperv.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1661899911-13086-1-git-send-email-longli@linuxonhyperv.com>",
        "References": "<1661899911-13086-1-git-send-email-longli@linuxonhyperv.com>",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Reply-To": "longli@microsoft.com",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Long Li <longli@microsoft.com>\n\nThe hardware layer of MANA understands the device queue and doorbell\nformats. Those functions are implemented for use by packet RX/TX code.\n\nSigned-off-by: Long Li <longli@microsoft.com>\n---\nChange log:\nv2:\nRemove unused header files.\nRename a camel case.\nv5:\nUse RTE_BIT32() instead of defining a new BIT()\nv6:\nadd rte_rmb() after reading owner bits\n\n drivers/net/mana/gdma.c      | 289 +++++++++++++++++++++++++++++++++++\n drivers/net/mana/mana.h      | 183 ++++++++++++++++++++++\n drivers/net/mana/meson.build |   1 +\n 3 files changed, 473 insertions(+)\n create mode 100644 drivers/net/mana/gdma.c",
    "diff": "diff --git a/drivers/net/mana/gdma.c b/drivers/net/mana/gdma.c\nnew file mode 100644\nindex 0000000000..7ad175651e\n--- /dev/null\n+++ b/drivers/net/mana/gdma.c\n@@ -0,0 +1,289 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright 2022 Microsoft Corporation\n+ */\n+\n+#include <ethdev_driver.h>\n+#include <rte_io.h>\n+\n+#include \"mana.h\"\n+\n+uint8_t *gdma_get_wqe_pointer(struct mana_gdma_queue *queue)\n+{\n+\tuint32_t offset_in_bytes =\n+\t\t(queue->head * GDMA_WQE_ALIGNMENT_UNIT_SIZE) &\n+\t\t(queue->size - 1);\n+\n+\tDRV_LOG(DEBUG, \"txq sq_head %u sq_size %u offset_in_bytes %u\",\n+\t\tqueue->head, queue->size, offset_in_bytes);\n+\n+\tif (offset_in_bytes + GDMA_WQE_ALIGNMENT_UNIT_SIZE > queue->size)\n+\t\tDRV_LOG(ERR, \"fatal error: offset_in_bytes %u too big\",\n+\t\t\toffset_in_bytes);\n+\n+\treturn ((uint8_t *)queue->buffer) + offset_in_bytes;\n+}\n+\n+static uint32_t\n+write_dma_client_oob(uint8_t *work_queue_buffer_pointer,\n+\t\t     const struct gdma_work_request *work_request,\n+\t\t     uint32_t client_oob_size)\n+{\n+\tuint8_t *p = work_queue_buffer_pointer;\n+\n+\tstruct gdma_wqe_dma_oob *header = (struct gdma_wqe_dma_oob *)p;\n+\n+\tmemset(header, 0, sizeof(struct gdma_wqe_dma_oob));\n+\theader->num_sgl_entries = work_request->num_sgl_elements;\n+\theader->inline_client_oob_size_in_dwords =\n+\t\tclient_oob_size / sizeof(uint32_t);\n+\theader->client_data_unit = work_request->client_data_unit;\n+\n+\tDRV_LOG(DEBUG, \"queue buf %p sgl %u oob_h %u du %u oob_buf %p oob_b %u\",\n+\t\twork_queue_buffer_pointer, header->num_sgl_entries,\n+\t\theader->inline_client_oob_size_in_dwords,\n+\t\theader->client_data_unit, work_request->inline_oob_data,\n+\t\twork_request->inline_oob_size_in_bytes);\n+\n+\tp += sizeof(struct gdma_wqe_dma_oob);\n+\tif (work_request->inline_oob_data &&\n+\t    work_request->inline_oob_size_in_bytes > 0) {\n+\t\tmemcpy(p, work_request->inline_oob_data,\n+\t\t       work_request->inline_oob_size_in_bytes);\n+\t\tif (client_oob_size > work_request->inline_oob_size_in_bytes)\n+\t\t\tmemset(p + work_request->inline_oob_size_in_bytes, 0,\n+\t\t\t       client_oob_size -\n+\t\t\t       work_request->inline_oob_size_in_bytes);\n+\t}\n+\n+\treturn sizeof(struct gdma_wqe_dma_oob) + client_oob_size;\n+}\n+\n+static uint32_t\n+write_scatter_gather_list(uint8_t *work_queue_head_pointer,\n+\t\t\t  uint8_t *work_queue_end_pointer,\n+\t\t\t  uint8_t *work_queue_cur_pointer,\n+\t\t\t  struct gdma_work_request *work_request)\n+{\n+\tstruct gdma_sgl_element *sge_list;\n+\tstruct gdma_sgl_element dummy_sgl[1];\n+\tuint8_t *address;\n+\tuint32_t size;\n+\tuint32_t num_sge;\n+\tuint32_t size_to_queue_end;\n+\tuint32_t sge_list_size;\n+\n+\tDRV_LOG(DEBUG, \"work_queue_cur_pointer %p work_request->flags %x\",\n+\t\twork_queue_cur_pointer, work_request->flags);\n+\n+\tnum_sge = work_request->num_sgl_elements;\n+\tsge_list = work_request->sgl;\n+\tsize_to_queue_end = (uint32_t)(work_queue_end_pointer -\n+\t\t\t\t       work_queue_cur_pointer);\n+\n+\tif (num_sge == 0) {\n+\t\t/* Per spec, the case of an empty SGL should be handled as\n+\t\t * follows to avoid corrupted WQE errors:\n+\t\t * Write one dummy SGL entry\n+\t\t * Set the address to 1, leave the rest as 0\n+\t\t */\n+\t\tdummy_sgl[num_sge].address = 1;\n+\t\tdummy_sgl[num_sge].size = 0;\n+\t\tdummy_sgl[num_sge].memory_key = 0;\n+\t\tnum_sge++;\n+\t\tsge_list = dummy_sgl;\n+\t}\n+\n+\tsge_list_size = 0;\n+\t{\n+\t\taddress = (uint8_t *)sge_list;\n+\t\tsize = sizeof(struct gdma_sgl_element) * num_sge;\n+\t\tif (size_to_queue_end < size) {\n+\t\t\tmemcpy(work_queue_cur_pointer, address,\n+\t\t\t       size_to_queue_end);\n+\t\t\twork_queue_cur_pointer = work_queue_head_pointer;\n+\t\t\taddress += size_to_queue_end;\n+\t\t\tsize -= size_to_queue_end;\n+\t\t}\n+\n+\t\tmemcpy(work_queue_cur_pointer, address, size);\n+\t\tsge_list_size = size;\n+\t}\n+\n+\tDRV_LOG(DEBUG, \"sge %u address 0x%\" PRIx64 \" size %u key %u list_s %u\",\n+\t\tnum_sge, sge_list->address, sge_list->size,\n+\t\tsge_list->memory_key, sge_list_size);\n+\n+\treturn sge_list_size;\n+}\n+\n+int gdma_post_work_request(struct mana_gdma_queue *queue,\n+\t\t\t   struct gdma_work_request *work_req,\n+\t\t\t   struct gdma_posted_wqe_info *wqe_info)\n+{\n+\tuint32_t client_oob_size =\n+\t\twork_req->inline_oob_size_in_bytes >\n+\t\t\t\tINLINE_OOB_SMALL_SIZE_IN_BYTES ?\n+\t\t\tINLINE_OOB_LARGE_SIZE_IN_BYTES :\n+\t\t\tINLINE_OOB_SMALL_SIZE_IN_BYTES;\n+\n+\tuint32_t sgl_data_size = sizeof(struct gdma_sgl_element) *\n+\t\t\tRTE_MAX((uint32_t)1, work_req->num_sgl_elements);\n+\tuint32_t wqe_size =\n+\t\tRTE_ALIGN(sizeof(struct gdma_wqe_dma_oob) +\n+\t\t\t\tclient_oob_size + sgl_data_size,\n+\t\t\t  GDMA_WQE_ALIGNMENT_UNIT_SIZE);\n+\tuint8_t *wq_buffer_pointer;\n+\tuint32_t queue_free_units = queue->count - (queue->head - queue->tail);\n+\n+\tif (wqe_size / GDMA_WQE_ALIGNMENT_UNIT_SIZE > queue_free_units) {\n+\t\tDRV_LOG(DEBUG, \"WQE size %u queue count %u head %u tail %u\",\n+\t\t\twqe_size, queue->count, queue->head, queue->tail);\n+\t\treturn -EBUSY;\n+\t}\n+\n+\tDRV_LOG(DEBUG, \"client_oob_size %u sgl_data_size %u wqe_size %u\",\n+\t\tclient_oob_size, sgl_data_size, wqe_size);\n+\n+\tif (wqe_info) {\n+\t\twqe_info->wqe_index =\n+\t\t\t((queue->head * GDMA_WQE_ALIGNMENT_UNIT_SIZE) &\n+\t\t\t (queue->size - 1)) / GDMA_WQE_ALIGNMENT_UNIT_SIZE;\n+\t\twqe_info->unmasked_queue_offset = queue->head;\n+\t\twqe_info->wqe_size_in_bu =\n+\t\t\twqe_size / GDMA_WQE_ALIGNMENT_UNIT_SIZE;\n+\t}\n+\n+\twq_buffer_pointer = gdma_get_wqe_pointer(queue);\n+\twq_buffer_pointer += write_dma_client_oob(wq_buffer_pointer, work_req,\n+\t\t\t\t\t\t  client_oob_size);\n+\tif (wq_buffer_pointer >= ((uint8_t *)queue->buffer) + queue->size)\n+\t\twq_buffer_pointer -= queue->size;\n+\n+\twrite_scatter_gather_list((uint8_t *)queue->buffer,\n+\t\t\t\t  (uint8_t *)queue->buffer + queue->size,\n+\t\t\t\t  wq_buffer_pointer, work_req);\n+\n+\tqueue->head += wqe_size / GDMA_WQE_ALIGNMENT_UNIT_SIZE;\n+\n+\treturn 0;\n+}\n+\n+union gdma_doorbell_entry {\n+\tuint64_t     as_uint64;\n+\n+\tstruct {\n+\t\tuint64_t id\t  : 24;\n+\t\tuint64_t reserved    : 8;\n+\t\tuint64_t tail_ptr    : 31;\n+\t\tuint64_t arm\t : 1;\n+\t} cq;\n+\n+\tstruct {\n+\t\tuint64_t id\t  : 24;\n+\t\tuint64_t wqe_cnt     : 8;\n+\t\tuint64_t tail_ptr    : 32;\n+\t} rq;\n+\n+\tstruct {\n+\t\tuint64_t id\t  : 24;\n+\t\tuint64_t reserved    : 8;\n+\t\tuint64_t tail_ptr    : 32;\n+\t} sq;\n+\n+\tstruct {\n+\t\tuint64_t id\t  : 16;\n+\t\tuint64_t reserved    : 16;\n+\t\tuint64_t tail_ptr    : 31;\n+\t\tuint64_t arm\t : 1;\n+\t} eq;\n+}; /* HW DATA */\n+\n+#define DOORBELL_OFFSET_SQ      0x0\n+#define DOORBELL_OFFSET_RQ      0x400\n+#define DOORBELL_OFFSET_CQ      0x800\n+#define DOORBELL_OFFSET_EQ      0xFF8\n+\n+int mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type,\n+\t\t       uint32_t queue_id, uint32_t tail)\n+{\n+\tuint8_t *addr = db_page;\n+\tunion gdma_doorbell_entry e = {};\n+\n+\tswitch (queue_type) {\n+\tcase gdma_queue_send:\n+\t\te.sq.id = queue_id;\n+\t\te.sq.tail_ptr = tail;\n+\t\taddr += DOORBELL_OFFSET_SQ;\n+\t\tbreak;\n+\n+\tcase gdma_queue_receive:\n+\t\te.rq.id = queue_id;\n+\t\te.rq.tail_ptr = tail;\n+\t\te.rq.wqe_cnt = 1;\n+\t\taddr += DOORBELL_OFFSET_RQ;\n+\t\tbreak;\n+\n+\tcase gdma_queue_completion:\n+\t\te.cq.id = queue_id;\n+\t\te.cq.tail_ptr = tail;\n+\t\te.cq.arm = 1;\n+\t\taddr += DOORBELL_OFFSET_CQ;\n+\t\tbreak;\n+\n+\tdefault:\n+\t\tDRV_LOG(ERR, \"Unsupported queue type %d\", queue_type);\n+\t\treturn -1;\n+\t}\n+\n+\t/* Ensure all writes are done before ringing doorbell */\n+\trte_wmb();\n+\n+\tDRV_LOG(DEBUG, \"db_page %p addr %p queue_id %u type %u tail %u\",\n+\t\tdb_page, addr, queue_id, queue_type, tail);\n+\n+\trte_write64(e.as_uint64, addr);\n+\treturn 0;\n+}\n+\n+int gdma_poll_completion_queue(struct mana_gdma_queue *cq,\n+\t\t\t       struct gdma_comp *comp)\n+{\n+\tstruct gdma_hardware_completion_entry *cqe;\n+\tuint32_t head = cq->head % cq->count;\n+\tuint32_t new_owner_bits, old_owner_bits;\n+\tuint32_t cqe_owner_bits;\n+\tstruct gdma_hardware_completion_entry *buffer = cq->buffer;\n+\n+\tcqe = &buffer[head];\n+\tnew_owner_bits = (cq->head / cq->count) & COMPLETION_QUEUE_OWNER_MASK;\n+\told_owner_bits = (cq->head / cq->count - 1) &\n+\t\t\t\tCOMPLETION_QUEUE_OWNER_MASK;\n+\tcqe_owner_bits = cqe->owner_bits;\n+\n+\tDRV_LOG(DEBUG, \"comp cqe bits 0x%x owner bits 0x%x\",\n+\t\tcqe_owner_bits, old_owner_bits);\n+\n+\tif (cqe_owner_bits == old_owner_bits)\n+\t\treturn 0; /* No new entry */\n+\n+\tif (cqe_owner_bits != new_owner_bits) {\n+\t\tDRV_LOG(ERR, \"CQ overflowed, ID %u cqe 0x%x new 0x%x\",\n+\t\t\tcq->id, cqe_owner_bits, new_owner_bits);\n+\t\treturn -1;\n+\t}\n+\n+\t/* Ensure checking owner bits happens before reading from CQE */\n+\trte_rmb();\n+\n+\tcomp->work_queue_number = cqe->wq_num;\n+\tcomp->send_work_queue = cqe->is_sq;\n+\n+\tmemcpy(comp->completion_data, cqe->dma_client_data, GDMA_COMP_DATA_SIZE);\n+\n+\tcq->head++;\n+\n+\tDRV_LOG(DEBUG, \"comp new 0x%x old 0x%x cqe 0x%x wq %u sq %u head %u\",\n+\t\tnew_owner_bits, old_owner_bits, cqe_owner_bits,\n+\t\tcomp->work_queue_number, comp->send_work_queue, cq->head);\n+\treturn 1;\n+}\ndiff --git a/drivers/net/mana/mana.h b/drivers/net/mana/mana.h\nindex 9e15b43275..d87358ab15 100644\n--- a/drivers/net/mana/mana.h\n+++ b/drivers/net/mana/mana.h\n@@ -50,6 +50,178 @@ struct mana_shared_data {\n #define MAX_RECEIVE_BUFFERS_PER_QUEUE\t256\n #define MAX_SEND_BUFFERS_PER_QUEUE\t256\n \n+#define GDMA_WQE_ALIGNMENT_UNIT_SIZE 32\n+\n+#define COMP_ENTRY_SIZE 64\n+#define MAX_TX_WQE_SIZE 512\n+#define MAX_RX_WQE_SIZE 256\n+\n+/* Values from the GDMA specification document, WQE format description */\n+#define INLINE_OOB_SMALL_SIZE_IN_BYTES 8\n+#define INLINE_OOB_LARGE_SIZE_IN_BYTES 24\n+\n+#define NOT_USING_CLIENT_DATA_UNIT 0\n+\n+enum gdma_queue_types {\n+\tgdma_queue_type_invalid = 0,\n+\tgdma_queue_send,\n+\tgdma_queue_receive,\n+\tgdma_queue_completion,\n+\tgdma_queue_event,\n+\tgdma_queue_type_max = 16,\n+\t/*Room for expansion */\n+\n+\t/* This enum can be expanded to add more queue types but\n+\t * it's expected to be done in a contiguous manner.\n+\t * Failing that will result in unexpected behavior.\n+\t */\n+};\n+\n+#define WORK_QUEUE_NUMBER_BASE_BITS 10\n+\n+struct gdma_header {\n+\t/* size of the entire gdma structure, including the entire length of\n+\t * the struct that is formed by extending other gdma struct. i.e.\n+\t * GDMA_BASE_SPEC extends gdma_header, GDMA_EVENT_QUEUE_SPEC extends\n+\t * GDMA_BASE_SPEC, StructSize for GDMA_EVENT_QUEUE_SPEC will be size of\n+\t * GDMA_EVENT_QUEUE_SPEC which includes size of GDMA_BASE_SPEC and size\n+\t * of gdma_header.\n+\t * Above example is for illustration purpose and is not in code\n+\t */\n+\tsize_t struct_size;\n+};\n+\n+/* The following macros are from GDMA SPEC 3.6, \"Table 2: CQE data structure\"\n+ * and \"Table 4: Event Queue Entry (EQE) data format\"\n+ */\n+#define GDMA_COMP_DATA_SIZE 0x3C /* Must be a multiple of 4 */\n+#define GDMA_COMP_DATA_SIZE_IN_UINT32 (GDMA_COMP_DATA_SIZE / 4)\n+\n+#define COMPLETION_QUEUE_ENTRY_WORK_QUEUE_INDEX 0\n+#define COMPLETION_QUEUE_ENTRY_WORK_QUEUE_SIZE 24\n+#define COMPLETION_QUEUE_ENTRY_SEND_WORK_QUEUE_INDEX 24\n+#define COMPLETION_QUEUE_ENTRY_SEND_WORK_QUEUE_SIZE 1\n+#define COMPLETION_QUEUE_ENTRY_OWNER_BITS_INDEX 29\n+#define COMPLETION_QUEUE_ENTRY_OWNER_BITS_SIZE 3\n+\n+#define COMPLETION_QUEUE_OWNER_MASK \\\n+\t((1 << (COMPLETION_QUEUE_ENTRY_OWNER_BITS_SIZE)) - 1)\n+\n+struct gdma_comp {\n+\tstruct gdma_header gdma_header;\n+\n+\t/* Filled by GDMA core */\n+\tuint32_t completion_data[GDMA_COMP_DATA_SIZE_IN_UINT32];\n+\n+\t/* Filled by GDMA core */\n+\tuint32_t work_queue_number;\n+\n+\t/* Filled by GDMA core */\n+\tbool send_work_queue;\n+};\n+\n+struct gdma_hardware_completion_entry {\n+\tchar dma_client_data[GDMA_COMP_DATA_SIZE];\n+\tunion {\n+\t\tuint32_t work_queue_owner_bits;\n+\t\tstruct {\n+\t\t\tuint32_t wq_num\t\t: 24;\n+\t\t\tuint32_t is_sq\t\t: 1;\n+\t\t\tuint32_t reserved\t: 4;\n+\t\t\tuint32_t owner_bits\t: 3;\n+\t\t};\n+\t};\n+}; /* HW DATA */\n+\n+struct gdma_posted_wqe_info {\n+\tstruct gdma_header gdma_header;\n+\n+\t/* size of the written wqe in basic units (32B), filled by GDMA core.\n+\t * Use this value to progress the work queue after the wqe is processed\n+\t * by hardware.\n+\t */\n+\tuint32_t wqe_size_in_bu;\n+\n+\t/* At the time of writing the wqe to the work queue, the offset in the\n+\t * work queue buffer where by the wqe will be written. Each unit\n+\t * represents 32B of buffer space.\n+\t */\n+\tuint32_t wqe_index;\n+\n+\t/* Unmasked offset in the queue to which the WQE was written.\n+\t * In 32 byte units.\n+\t */\n+\tuint32_t unmasked_queue_offset;\n+};\n+\n+struct gdma_sgl_element {\n+\tuint64_t address;\n+\tuint32_t memory_key;\n+\tuint32_t size;\n+};\n+\n+#define MAX_SGL_ENTRIES_FOR_TRANSMIT 30\n+\n+struct one_sgl {\n+\tstruct gdma_sgl_element gdma_sgl[MAX_SGL_ENTRIES_FOR_TRANSMIT];\n+};\n+\n+struct gdma_work_request {\n+\tstruct gdma_header gdma_header;\n+\tstruct gdma_sgl_element *sgl;\n+\tuint32_t num_sgl_elements;\n+\tuint32_t inline_oob_size_in_bytes;\n+\tvoid *inline_oob_data;\n+\tuint32_t flags; /* From _gdma_work_request_FLAGS */\n+\tuint32_t client_data_unit; /* For LSO, this is the MTU of the data */\n+};\n+\n+enum mana_cqe_type {\n+\tCQE_INVALID                     = 0,\n+};\n+\n+struct mana_cqe_header {\n+\tuint32_t cqe_type    : 6;\n+\tuint32_t client_type : 2;\n+\tuint32_t vendor_err  : 24;\n+}; /* HW DATA */\n+\n+/* NDIS HASH Types */\n+#define BIT(nr)\t\t(1 << (nr))\n+#define NDIS_HASH_IPV4          BIT(0)\n+#define NDIS_HASH_TCP_IPV4      BIT(1)\n+#define NDIS_HASH_UDP_IPV4      BIT(2)\n+#define NDIS_HASH_IPV6          BIT(3)\n+#define NDIS_HASH_TCP_IPV6      BIT(4)\n+#define NDIS_HASH_UDP_IPV6      BIT(5)\n+#define NDIS_HASH_IPV6_EX       BIT(6)\n+#define NDIS_HASH_TCP_IPV6_EX   BIT(7)\n+#define NDIS_HASH_UDP_IPV6_EX   BIT(8)\n+\n+#define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)\n+#define MANA_HASH_L4                                                         \\\n+\t(NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 |      \\\n+\t NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)\n+\n+struct gdma_wqe_dma_oob {\n+\tuint32_t reserved:24;\n+\tuint32_t last_v_bytes:8;\n+\tunion {\n+\t\tuint32_t flags;\n+\t\tstruct {\n+\t\t\tuint32_t num_sgl_entries:8;\n+\t\t\tuint32_t inline_client_oob_size_in_dwords:3;\n+\t\t\tuint32_t client_oob_in_sgl:1;\n+\t\t\tuint32_t consume_credit:1;\n+\t\t\tuint32_t fence:1;\n+\t\t\tuint32_t reserved1:2;\n+\t\t\tuint32_t client_data_unit:14;\n+\t\t\tuint32_t check_sn:1;\n+\t\t\tuint32_t sgl_direct:1;\n+\t\t};\n+\t};\n+};\n+\n struct mana_mr_cache {\n \tuint32_t\tlkey;\n \tuintptr_t\taddr;\n@@ -190,12 +362,23 @@ extern int mana_logtype_init;\n \n #define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, \" >>\")\n \n+int mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type,\n+\t\t       uint32_t queue_id, uint32_t tail);\n+\n+int gdma_post_work_request(struct mana_gdma_queue *queue,\n+\t\t\t   struct gdma_work_request *work_req,\n+\t\t\t   struct gdma_posted_wqe_info *wqe_info);\n+uint8_t *gdma_get_wqe_pointer(struct mana_gdma_queue *queue);\n+\n uint16_t mana_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts,\n \t\t\t       uint16_t pkts_n);\n \n uint16_t mana_tx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts,\n \t\t\t       uint16_t pkts_n);\n \n+int gdma_poll_completion_queue(struct mana_gdma_queue *cq,\n+\t\t\t       struct gdma_comp *comp);\n+\n struct mana_mr_cache *mana_find_pmd_mr(struct mana_mr_btree *local_tree,\n \t\t\t\t       struct mana_priv *priv,\n \t\t\t\t       struct rte_mbuf *mbuf);\ndiff --git a/drivers/net/mana/meson.build b/drivers/net/mana/meson.build\nindex 9771394370..364d57a619 100644\n--- a/drivers/net/mana/meson.build\n+++ b/drivers/net/mana/meson.build\n@@ -12,6 +12,7 @@ deps += ['pci', 'bus_pci', 'net', 'eal', 'kvargs']\n sources += files(\n \t'mana.c',\n \t'mr.c',\n+\t'gdma.c',\n \t'mp.c',\n )\n \n",
    "prefixes": [
        "v6",
        "11/18"
    ]
}