get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/75648/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 75648,
    "url": "http://patches.dpdk.org/api/patches/75648/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1597791894-37041-5-git-send-email-nicolas.chautru@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1597791894-37041-5-git-send-email-nicolas.chautru@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1597791894-37041-5-git-send-email-nicolas.chautru@intel.com",
    "date": "2020-08-18T23:04:47",
    "name": "[v2,04/11] baseband/acc100: add queue configuration",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "e0c08db378afc1e56e112e2e2c2ad13112749bcc",
    "submitter": {
        "id": 1314,
        "url": "http://patches.dpdk.org/api/people/1314/?format=api",
        "name": "Chautru, Nicolas",
        "email": "nicolas.chautru@intel.com"
    },
    "delegate": {
        "id": 6690,
        "url": "http://patches.dpdk.org/api/users/6690/?format=api",
        "username": "akhil",
        "first_name": "akhil",
        "last_name": "goyal",
        "email": "gakhil@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1597791894-37041-5-git-send-email-nicolas.chautru@intel.com/mbox/",
    "series": [
        {
            "id": 11695,
            "url": "http://patches.dpdk.org/api/series/11695/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=11695",
            "date": "2020-08-18T23:04:43",
            "name": "bbdev PMD ACC100",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/11695/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/75648/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/75648/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id DA911A04AF;\n\tWed, 19 Aug 2020 01:07:32 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 2459D1C0BE;\n\tWed, 19 Aug 2020 01:06:53 +0200 (CEST)",
            "from mga14.intel.com (mga14.intel.com [192.55.52.115])\n by dpdk.org (Postfix) with ESMTP id C5C3D1C014\n for <dev@dpdk.org>; Wed, 19 Aug 2020 01:06:46 +0200 (CEST)",
            "from orsmga004.jf.intel.com ([10.7.209.38])\n by fmsmga103.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 18 Aug 2020 16:06:44 -0700",
            "from skx-5gnr-sc12-4.sc.intel.com ([172.25.69.210])\n by orsmga004.jf.intel.com with ESMTP; 18 Aug 2020 16:06:43 -0700"
        ],
        "IronPort-SDR": [
            "\n iDl1JlT2Yfl8Ddl2BdT3jXeJ8z1dwU5lnsMum/k6F4gJBsuEv8wSCNlOqHcGDmqZC+R7sqpa69\n G8qe85jz2jJw==",
            "\n aM4cJIIiTa3dSLGvjfGixjTmYHYmH+QO30QY3ST6oR3YtfVOCJeWy71NX5v4zsic9crCn893Xo\n MKW02taXN7bQ=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6000,8403,9717\"; a=\"154281358\"",
            "E=Sophos;i=\"5.76,329,1592895600\"; d=\"scan'208\";a=\"154281358\"",
            "E=Sophos;i=\"5.76,329,1592895600\"; d=\"scan'208\";a=\"441400698\""
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "From": "Nicolas Chautru <nicolas.chautru@intel.com>",
        "To": "dev@dpdk.org,\n\takhil.goyal@nxp.com",
        "Cc": "bruce.richardson@intel.com,\n\tNicolas Chautru <nicolas.chautru@intel.com>",
        "Date": "Tue, 18 Aug 2020 16:04:47 -0700",
        "Message-Id": "<1597791894-37041-5-git-send-email-nicolas.chautru@intel.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1597791894-37041-1-git-send-email-nicolas.chautru@intel.com>",
        "References": "<1597791894-37041-1-git-send-email-nicolas.chautru@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v2 04/11] baseband/acc100: add queue configuration",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Adding function to create and configure queues for\nthe device. Still no capability.\n\nSigned-off-by: Nicolas Chautru <nicolas.chautru@intel.com>\n---\n drivers/baseband/acc100/rte_acc100_pmd.c | 420 ++++++++++++++++++++++++++++++-\n drivers/baseband/acc100/rte_acc100_pmd.h |  45 ++++\n 2 files changed, 464 insertions(+), 1 deletion(-)",
    "diff": "diff --git a/drivers/baseband/acc100/rte_acc100_pmd.c b/drivers/baseband/acc100/rte_acc100_pmd.c\nindex 7807a30..7a21c57 100644\n--- a/drivers/baseband/acc100/rte_acc100_pmd.c\n+++ b/drivers/baseband/acc100/rte_acc100_pmd.c\n@@ -26,6 +26,22 @@\n RTE_LOG_REGISTER(acc100_logtype, pmd.bb.acc100, NOTICE);\n #endif\n \n+/* Write to MMIO register address */\n+static inline void\n+mmio_write(void *addr, uint32_t value)\n+{\n+\t*((volatile uint32_t *)(addr)) = rte_cpu_to_le_32(value);\n+}\n+\n+/* Write a register of a ACC100 device */\n+static inline void\n+acc100_reg_write(struct acc100_device *d, uint32_t offset, uint32_t payload)\n+{\n+\tvoid *reg_addr = RTE_PTR_ADD(d->mmio_base, offset);\n+\tmmio_write(reg_addr, payload);\n+\tusleep(1000);\n+}\n+\n /* Read a register of a ACC100 device */\n static inline uint32_t\n acc100_reg_read(struct acc100_device *d, uint32_t offset)\n@@ -36,6 +52,22 @@\n \treturn rte_le_to_cpu_32(ret);\n }\n \n+/* Basic Implementation of Log2 for exact 2^N */\n+static inline uint32_t\n+log2_basic(uint32_t value)\n+{\n+\treturn (value == 0) ? 0 : __builtin_ctz(value);\n+}\n+\n+/* Calculate memory alignment offset assuming alignment is 2^N */\n+static inline uint32_t\n+calc_mem_alignment_offset(void *unaligned_virt_mem, uint32_t alignment)\n+{\n+\trte_iova_t unaligned_phy_mem = rte_malloc_virt2iova(unaligned_virt_mem);\n+\treturn (uint32_t)(alignment -\n+\t\t\t(unaligned_phy_mem & (alignment-1)));\n+}\n+\n /* Calculate the offset of the enqueue register */\n static inline uint32_t\n queue_offset(bool pf_device, uint8_t vf_id, uint8_t qgrp_id, uint16_t aq_id)\n@@ -204,10 +236,393 @@\n \t\t\tacc100_conf->q_dl_5g.aq_depth_log2);\n }\n \n+static void\n+free_base_addresses(void **base_addrs, int size)\n+{\n+\tint i;\n+\tfor (i = 0; i < size; i++)\n+\t\trte_free(base_addrs[i]);\n+}\n+\n+static inline uint32_t\n+get_desc_len(void)\n+{\n+\treturn sizeof(union acc100_dma_desc);\n+}\n+\n+/* Allocate the 2 * 64MB block for the sw rings */\n+static int\n+alloc_2x64mb_sw_rings_mem(struct rte_bbdev *dev, struct acc100_device *d,\n+\t\tint socket)\n+{\n+\tuint32_t sw_ring_size = ACC100_SIZE_64MBYTE;\n+\td->sw_rings_base = rte_zmalloc_socket(dev->device->driver->name,\n+\t\t\t2 * sw_ring_size, RTE_CACHE_LINE_SIZE, socket);\n+\tif (d->sw_rings_base == NULL) {\n+\t\trte_bbdev_log(ERR, \"Failed to allocate memory for %s:%u\",\n+\t\t\t\tdev->device->driver->name,\n+\t\t\t\tdev->data->dev_id);\n+\t\treturn -ENOMEM;\n+\t}\n+\tmemset(d->sw_rings_base, 0, ACC100_SIZE_64MBYTE);\n+\tuint32_t next_64mb_align_offset = calc_mem_alignment_offset(\n+\t\t\td->sw_rings_base, ACC100_SIZE_64MBYTE);\n+\td->sw_rings = RTE_PTR_ADD(d->sw_rings_base, next_64mb_align_offset);\n+\td->sw_rings_phys = rte_malloc_virt2iova(d->sw_rings_base) +\n+\t\t\tnext_64mb_align_offset;\n+\td->sw_ring_size = MAX_QUEUE_DEPTH * get_desc_len();\n+\td->sw_ring_max_depth = d->sw_ring_size / get_desc_len();\n+\n+\treturn 0;\n+}\n+\n+/* Attempt to allocate minimised memory space for sw rings */\n+static void\n+alloc_sw_rings_min_mem(struct rte_bbdev *dev, struct acc100_device *d,\n+\t\tuint16_t num_queues, int socket)\n+{\n+\trte_iova_t sw_rings_base_phy, next_64mb_align_addr_phy;\n+\tuint32_t next_64mb_align_offset;\n+\trte_iova_t sw_ring_phys_end_addr;\n+\tvoid *base_addrs[SW_RING_MEM_ALLOC_ATTEMPTS];\n+\tvoid *sw_rings_base;\n+\tint i = 0;\n+\tuint32_t q_sw_ring_size = MAX_QUEUE_DEPTH * get_desc_len();\n+\tuint32_t dev_sw_ring_size = q_sw_ring_size * num_queues;\n+\n+\t/* Find an aligned block of memory to store sw rings */\n+\twhile (i < SW_RING_MEM_ALLOC_ATTEMPTS) {\n+\t\t/*\n+\t\t * sw_ring allocated memory is guaranteed to be aligned to\n+\t\t * q_sw_ring_size at the condition that the requested size is\n+\t\t * less than the page size\n+\t\t */\n+\t\tsw_rings_base = rte_zmalloc_socket(\n+\t\t\t\tdev->device->driver->name,\n+\t\t\t\tdev_sw_ring_size, q_sw_ring_size, socket);\n+\n+\t\tif (sw_rings_base == NULL) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"Failed to allocate memory for %s:%u\",\n+\t\t\t\t\tdev->device->driver->name,\n+\t\t\t\t\tdev->data->dev_id);\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tsw_rings_base_phy = rte_malloc_virt2iova(sw_rings_base);\n+\t\tnext_64mb_align_offset = calc_mem_alignment_offset(\n+\t\t\t\tsw_rings_base, ACC100_SIZE_64MBYTE);\n+\t\tnext_64mb_align_addr_phy = sw_rings_base_phy +\n+\t\t\t\tnext_64mb_align_offset;\n+\t\tsw_ring_phys_end_addr = sw_rings_base_phy + dev_sw_ring_size;\n+\n+\t\t/* Check if the end of the sw ring memory block is before the\n+\t\t * start of next 64MB aligned mem address\n+\t\t */\n+\t\tif (sw_ring_phys_end_addr < next_64mb_align_addr_phy) {\n+\t\t\td->sw_rings_phys = sw_rings_base_phy;\n+\t\t\td->sw_rings = sw_rings_base;\n+\t\t\td->sw_rings_base = sw_rings_base;\n+\t\t\td->sw_ring_size = q_sw_ring_size;\n+\t\t\td->sw_ring_max_depth = MAX_QUEUE_DEPTH;\n+\t\t\tbreak;\n+\t\t}\n+\t\t/* Store the address of the unaligned mem block */\n+\t\tbase_addrs[i] = sw_rings_base;\n+\t\ti++;\n+\t}\n+\n+\t/* Free all unaligned blocks of mem allocated in the loop */\n+\tfree_base_addresses(base_addrs, i);\n+}\n+\n+\n+/* Allocate 64MB memory used for all software rings */\n+static int\n+acc100_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)\n+{\n+\tuint32_t phys_low, phys_high, payload;\n+\tstruct acc100_device *d = dev->data->dev_private;\n+\tconst struct acc100_registry_addr *reg_addr;\n+\n+\tif (d->pf_device && !d->acc100_conf.pf_mode_en) {\n+\t\trte_bbdev_log(NOTICE,\n+\t\t\t\t\"%s has PF mode disabled. This PF can't be used.\",\n+\t\t\t\tdev->data->name);\n+\t\treturn -ENODEV;\n+\t}\n+\n+\talloc_sw_rings_min_mem(dev, d, num_queues, socket_id);\n+\n+\t/* If minimal memory space approach failed, then allocate\n+\t * the 2 * 64MB block for the sw rings\n+\t */\n+\tif (d->sw_rings == NULL)\n+\t\talloc_2x64mb_sw_rings_mem(dev, d, socket_id);\n+\n+\t/* Configure ACC100 with the base address for DMA descriptor rings\n+\t * Same descriptor rings used for UL and DL DMA Engines\n+\t * Note : Assuming only VF0 bundle is used for PF mode\n+\t */\n+\tphys_high = (uint32_t)(d->sw_rings_phys >> 32);\n+\tphys_low  = (uint32_t)(d->sw_rings_phys & ~(ACC100_SIZE_64MBYTE-1));\n+\n+\t/* Choose correct registry addresses for the device type */\n+\tif (d->pf_device)\n+\t\treg_addr = &pf_reg_addr;\n+\telse\n+\t\treg_addr = &vf_reg_addr;\n+\n+\t/* Read the populated cfg from ACC100 registers */\n+\tfetch_acc100_config(dev);\n+\n+\t/* Mark as configured properly */\n+\td->configured = true;\n+\n+\t/* Release AXI from PF */\n+\tif (d->pf_device)\n+\t\tacc100_reg_write(d, HWPfDmaAxiControl, 1);\n+\n+\tacc100_reg_write(d, reg_addr->dma_ring_ul5g_hi, phys_high);\n+\tacc100_reg_write(d, reg_addr->dma_ring_ul5g_lo, phys_low);\n+\tacc100_reg_write(d, reg_addr->dma_ring_dl5g_hi, phys_high);\n+\tacc100_reg_write(d, reg_addr->dma_ring_dl5g_lo, phys_low);\n+\tacc100_reg_write(d, reg_addr->dma_ring_ul4g_hi, phys_high);\n+\tacc100_reg_write(d, reg_addr->dma_ring_ul4g_lo, phys_low);\n+\tacc100_reg_write(d, reg_addr->dma_ring_dl4g_hi, phys_high);\n+\tacc100_reg_write(d, reg_addr->dma_ring_dl4g_lo, phys_low);\n+\n+\t/*\n+\t * Configure Ring Size to the max queue ring size\n+\t * (used for wrapping purpose)\n+\t */\n+\tpayload = log2_basic(d->sw_ring_size / 64);\n+\tacc100_reg_write(d, reg_addr->ring_size, payload);\n+\n+\t/* Configure tail pointer for use when SDONE enabled */\n+\td->tail_ptrs = rte_zmalloc_socket(\n+\t\t\tdev->device->driver->name,\n+\t\t\tACC100_NUM_QGRPS * ACC100_NUM_AQS * sizeof(uint32_t),\n+\t\t\tRTE_CACHE_LINE_SIZE, socket_id);\n+\tif (d->tail_ptrs == NULL) {\n+\t\trte_bbdev_log(ERR, \"Failed to allocate tail ptr for %s:%u\",\n+\t\t\t\tdev->device->driver->name,\n+\t\t\t\tdev->data->dev_id);\n+\t\trte_free(d->sw_rings);\n+\t\treturn -ENOMEM;\n+\t}\n+\td->tail_ptr_phys = rte_malloc_virt2iova(d->tail_ptrs);\n+\n+\tphys_high = (uint32_t)(d->tail_ptr_phys >> 32);\n+\tphys_low  = (uint32_t)(d->tail_ptr_phys);\n+\tacc100_reg_write(d, reg_addr->tail_ptrs_ul5g_hi, phys_high);\n+\tacc100_reg_write(d, reg_addr->tail_ptrs_ul5g_lo, phys_low);\n+\tacc100_reg_write(d, reg_addr->tail_ptrs_dl5g_hi, phys_high);\n+\tacc100_reg_write(d, reg_addr->tail_ptrs_dl5g_lo, phys_low);\n+\tacc100_reg_write(d, reg_addr->tail_ptrs_ul4g_hi, phys_high);\n+\tacc100_reg_write(d, reg_addr->tail_ptrs_ul4g_lo, phys_low);\n+\tacc100_reg_write(d, reg_addr->tail_ptrs_dl4g_hi, phys_high);\n+\tacc100_reg_write(d, reg_addr->tail_ptrs_dl4g_lo, phys_low);\n+\n+\td->harq_layout = rte_zmalloc_socket(\"HARQ Layout\",\n+\t\t\tACC100_HARQ_LAYOUT * sizeof(*d->harq_layout),\n+\t\t\tRTE_CACHE_LINE_SIZE, dev->data->socket_id);\n+\n+\trte_bbdev_log_debug(\n+\t\t\t\"ACC100 (%s) configured  sw_rings = %p, sw_rings_phys = %#\"\n+\t\t\tPRIx64, dev->data->name, d->sw_rings, d->sw_rings_phys);\n+\n+\treturn 0;\n+}\n+\n /* Free 64MB memory used for software rings */\n static int\n-acc100_dev_close(struct rte_bbdev *dev  __rte_unused)\n+acc100_dev_close(struct rte_bbdev *dev)\n {\n+\tstruct acc100_device *d = dev->data->dev_private;\n+\tif (d->sw_rings_base != NULL) {\n+\t\trte_free(d->tail_ptrs);\n+\t\trte_free(d->sw_rings_base);\n+\t\td->sw_rings_base = NULL;\n+\t}\n+\tusleep(1000);\n+\treturn 0;\n+}\n+\n+\n+/**\n+ * Report a ACC100 queue index which is free\n+ * Return 0 to 16k for a valid queue_idx or -1 when no queue is available\n+ * Note : Only supporting VF0 Bundle for PF mode\n+ */\n+static int\n+acc100_find_free_queue_idx(struct rte_bbdev *dev,\n+\t\tconst struct rte_bbdev_queue_conf *conf)\n+{\n+\tstruct acc100_device *d = dev->data->dev_private;\n+\tint op_2_acc[5] = {0, UL_4G, DL_4G, UL_5G, DL_5G};\n+\tint acc = op_2_acc[conf->op_type];\n+\tstruct rte_q_topology_t *qtop = NULL;\n+\tqtopFromAcc(&qtop, acc, &(d->acc100_conf));\n+\tif (qtop == NULL)\n+\t\treturn -1;\n+\t/* Identify matching QGroup Index which are sorted in priority order */\n+\tuint16_t group_idx = qtop->first_qgroup_index;\n+\tgroup_idx += conf->priority;\n+\tif (group_idx >= ACC100_NUM_QGRPS ||\n+\t\t\tconf->priority >= qtop->num_qgroups) {\n+\t\trte_bbdev_log(INFO, \"Invalid Priority on %s, priority %u\",\n+\t\t\t\tdev->data->name, conf->priority);\n+\t\treturn -1;\n+\t}\n+\t/* Find a free AQ_idx  */\n+\tuint16_t aq_idx;\n+\tfor (aq_idx = 0; aq_idx < qtop->num_aqs_per_groups; aq_idx++) {\n+\t\tif (((d->q_assigned_bit_map[group_idx] >> aq_idx) & 0x1) == 0) {\n+\t\t\t/* Mark the Queue as assigned */\n+\t\t\td->q_assigned_bit_map[group_idx] |= (1 << aq_idx);\n+\t\t\t/* Report the AQ Index */\n+\t\t\treturn (group_idx << GRP_ID_SHIFT) + aq_idx;\n+\t\t}\n+\t}\n+\trte_bbdev_log(INFO, \"Failed to find free queue on %s, priority %u\",\n+\t\t\tdev->data->name, conf->priority);\n+\treturn -1;\n+}\n+\n+/* Setup ACC100 queue */\n+static int\n+acc100_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,\n+\t\tconst struct rte_bbdev_queue_conf *conf)\n+{\n+\tstruct acc100_device *d = dev->data->dev_private;\n+\tstruct acc100_queue *q;\n+\tint16_t q_idx;\n+\n+\t/* Allocate the queue data structure. */\n+\tq = rte_zmalloc_socket(dev->device->driver->name, sizeof(*q),\n+\t\t\tRTE_CACHE_LINE_SIZE, conf->socket);\n+\tif (q == NULL) {\n+\t\trte_bbdev_log(ERR, \"Failed to allocate queue memory\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tq->d = d;\n+\tq->ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id));\n+\tq->ring_addr_phys = d->sw_rings_phys + (d->sw_ring_size * queue_id);\n+\n+\t/* Prepare the Ring with default descriptor format */\n+\tunion acc100_dma_desc *desc = NULL;\n+\tunsigned int desc_idx, b_idx;\n+\tint fcw_len = (conf->op_type == RTE_BBDEV_OP_LDPC_ENC ?\n+\t\tACC100_FCW_LE_BLEN : (conf->op_type == RTE_BBDEV_OP_TURBO_DEC ?\n+\t\tACC100_FCW_TD_BLEN : ACC100_FCW_LD_BLEN));\n+\n+\tfor (desc_idx = 0; desc_idx < d->sw_ring_max_depth; desc_idx++) {\n+\t\tdesc = q->ring_addr + desc_idx;\n+\t\tdesc->req.word0 = ACC100_DMA_DESC_TYPE;\n+\t\tdesc->req.word1 = 0; /**< Timestamp */\n+\t\tdesc->req.word2 = 0;\n+\t\tdesc->req.word3 = 0;\n+\t\tuint64_t fcw_offset = (desc_idx << 8) + ACC100_DESC_FCW_OFFSET;\n+\t\tdesc->req.data_ptrs[0].address = q->ring_addr_phys + fcw_offset;\n+\t\tdesc->req.data_ptrs[0].blen = fcw_len;\n+\t\tdesc->req.data_ptrs[0].blkid = ACC100_DMA_BLKID_FCW;\n+\t\tdesc->req.data_ptrs[0].last = 0;\n+\t\tdesc->req.data_ptrs[0].dma_ext = 0;\n+\t\tfor (b_idx = 1; b_idx < ACC100_DMA_MAX_NUM_POINTERS - 1;\n+\t\t\t\tb_idx++) {\n+\t\t\tdesc->req.data_ptrs[b_idx].blkid = ACC100_DMA_BLKID_IN;\n+\t\t\tdesc->req.data_ptrs[b_idx].last = 1;\n+\t\t\tdesc->req.data_ptrs[b_idx].dma_ext = 0;\n+\t\t\tb_idx++;\n+\t\t\tdesc->req.data_ptrs[b_idx].blkid =\n+\t\t\t\t\tACC100_DMA_BLKID_OUT_ENC;\n+\t\t\tdesc->req.data_ptrs[b_idx].last = 1;\n+\t\t\tdesc->req.data_ptrs[b_idx].dma_ext = 0;\n+\t\t}\n+\t\t/* Preset some fields of LDPC FCW */\n+\t\tdesc->req.fcw_ld.FCWversion = ACC100_FCW_VER;\n+\t\tdesc->req.fcw_ld.gain_i = 1;\n+\t\tdesc->req.fcw_ld.gain_h = 1;\n+\t}\n+\n+\tq->lb_in = rte_zmalloc_socket(dev->device->driver->name,\n+\t\t\tRTE_CACHE_LINE_SIZE,\n+\t\t\tRTE_CACHE_LINE_SIZE, conf->socket);\n+\tif (q->lb_in == NULL) {\n+\t\trte_bbdev_log(ERR, \"Failed to allocate lb_in memory\");\n+\t\treturn -ENOMEM;\n+\t}\n+\tq->lb_in_addr_phys = rte_malloc_virt2iova(q->lb_in);\n+\tq->lb_out = rte_zmalloc_socket(dev->device->driver->name,\n+\t\t\tRTE_CACHE_LINE_SIZE,\n+\t\t\tRTE_CACHE_LINE_SIZE, conf->socket);\n+\tif (q->lb_out == NULL) {\n+\t\trte_bbdev_log(ERR, \"Failed to allocate lb_out memory\");\n+\t\treturn -ENOMEM;\n+\t}\n+\tq->lb_out_addr_phys = rte_malloc_virt2iova(q->lb_out);\n+\n+\t/*\n+\t * Software queue ring wraps synchronously with the HW when it reaches\n+\t * the boundary of the maximum allocated queue size, no matter what the\n+\t * sw queue size is. This wrapping is guarded by setting the wrap_mask\n+\t * to represent the maximum queue size as allocated at the time when\n+\t * the device has been setup (in configure()).\n+\t *\n+\t * The queue depth is set to the queue size value (conf->queue_size).\n+\t * This limits the occupancy of the queue at any point of time, so that\n+\t * the queue does not get swamped with enqueue requests.\n+\t */\n+\tq->sw_ring_depth = conf->queue_size;\n+\tq->sw_ring_wrap_mask = d->sw_ring_max_depth - 1;\n+\n+\tq->op_type = conf->op_type;\n+\n+\tq_idx = acc100_find_free_queue_idx(dev, conf);\n+\tif (q_idx == -1) {\n+\t\trte_free(q);\n+\t\treturn -1;\n+\t}\n+\n+\tq->qgrp_id = (q_idx >> GRP_ID_SHIFT) & 0xF;\n+\tq->vf_id = (q_idx >> VF_ID_SHIFT)  & 0x3F;\n+\tq->aq_id = q_idx & 0xF;\n+\tq->aq_depth = (conf->op_type ==  RTE_BBDEV_OP_TURBO_DEC) ?\n+\t\t\t(1 << d->acc100_conf.q_ul_4g.aq_depth_log2) :\n+\t\t\t(1 << d->acc100_conf.q_dl_4g.aq_depth_log2);\n+\n+\tq->mmio_reg_enqueue = RTE_PTR_ADD(d->mmio_base,\n+\t\t\tqueue_offset(d->pf_device,\n+\t\t\t\t\tq->vf_id, q->qgrp_id, q->aq_id));\n+\n+\trte_bbdev_log_debug(\n+\t\t\t\"Setup dev%u q%u: qgrp_id=%u, vf_id=%u, aq_id=%u, aq_depth=%u, mmio_reg_enqueue=%p\",\n+\t\t\tdev->data->dev_id, queue_id, q->qgrp_id, q->vf_id,\n+\t\t\tq->aq_id, q->aq_depth, q->mmio_reg_enqueue);\n+\n+\tdev->data->queues[queue_id].queue_private = q;\n+\treturn 0;\n+}\n+\n+/* Release ACC100 queue */\n+static int\n+acc100_queue_release(struct rte_bbdev *dev, uint16_t q_id)\n+{\n+\tstruct acc100_device *d = dev->data->dev_private;\n+\tstruct acc100_queue *q = dev->data->queues[q_id].queue_private;\n+\n+\tif (q != NULL) {\n+\t\t/* Mark the Queue as un-assigned */\n+\t\td->q_assigned_bit_map[q->qgrp_id] &= (0xFFFFFFFF -\n+\t\t\t\t(1 << q->aq_id));\n+\t\trte_free(q->lb_in);\n+\t\trte_free(q->lb_out);\n+\t\trte_free(q);\n+\t\tdev->data->queues[q_id].queue_private = NULL;\n+\t}\n+\n \treturn 0;\n }\n \n@@ -258,8 +673,11 @@\n }\n \n static const struct rte_bbdev_ops acc100_bbdev_ops = {\n+\t.setup_queues = acc100_setup_queues,\n \t.close = acc100_dev_close,\n \t.info_get = acc100_dev_info_get,\n+\t.queue_setup = acc100_queue_setup,\n+\t.queue_release = acc100_queue_release,\n };\n \n /* ACC100 PCI PF address map */\ndiff --git a/drivers/baseband/acc100/rte_acc100_pmd.h b/drivers/baseband/acc100/rte_acc100_pmd.h\nindex 3e2397c..be699e5 100644\n--- a/drivers/baseband/acc100/rte_acc100_pmd.h\n+++ b/drivers/baseband/acc100/rte_acc100_pmd.h\n@@ -518,11 +518,56 @@ struct acc100_registry_addr {\n \t.ddr_range = HWVfDmaDdrBaseRangeRoVf,\n };\n \n+/* Structure associated with each queue. */\n+struct __rte_cache_aligned acc100_queue {\n+\tunion acc100_dma_desc *ring_addr;  /* Virtual address of sw ring */\n+\trte_iova_t ring_addr_phys;  /* Physical address of software ring */\n+\tuint32_t sw_ring_head;  /* software ring head */\n+\tuint32_t sw_ring_tail;  /* software ring tail */\n+\t/* software ring size (descriptors, not bytes) */\n+\tuint32_t sw_ring_depth;\n+\t/* mask used to wrap enqueued descriptors on the sw ring */\n+\tuint32_t sw_ring_wrap_mask;\n+\t/* MMIO register used to enqueue descriptors */\n+\tvoid *mmio_reg_enqueue;\n+\tuint8_t vf_id;  /* VF ID (max = 63) */\n+\tuint8_t qgrp_id;  /* Queue Group ID */\n+\tuint16_t aq_id;  /* Atomic Queue ID */\n+\tuint16_t aq_depth;  /* Depth of atomic queue */\n+\tuint32_t aq_enqueued;  /* Count how many \"batches\" have been enqueued */\n+\tuint32_t aq_dequeued;  /* Count how many \"batches\" have been dequeued */\n+\tuint32_t irq_enable;  /* Enable ops dequeue interrupts if set to 1 */\n+\tstruct rte_mempool *fcw_mempool;  /* FCW mempool */\n+\tenum rte_bbdev_op_type op_type;  /* Type of this Queue: TE or TD */\n+\t/* Internal Buffers for loopback input */\n+\tuint8_t *lb_in;\n+\tuint8_t *lb_out;\n+\trte_iova_t lb_in_addr_phys;\n+\trte_iova_t lb_out_addr_phys;\n+\tstruct acc100_device *d;\n+};\n+\n /* Private data structure for each ACC100 device */\n struct acc100_device {\n \tvoid *mmio_base;  /**< Base address of MMIO registers (BAR0) */\n+\tvoid *sw_rings_base;  /* Base addr of un-aligned memory for sw rings */\n+\tvoid *sw_rings;  /* 64MBs of 64MB aligned memory for sw rings */\n+\trte_iova_t sw_rings_phys;  /* Physical address of sw_rings */\n+\t/* Virtual address of the info memory routed to the this function under\n+\t * operation, whether it is PF or VF.\n+\t */\n+\tunion acc100_harq_layout_data *harq_layout;\n+\tuint32_t sw_ring_size;\n \tuint32_t ddr_size; /* Size in kB */\n+\tuint32_t *tail_ptrs; /* Base address of response tail pointer buffer */\n+\trte_iova_t tail_ptr_phys; /* Physical address of tail pointers */\n+\t/* Max number of entries available for each queue in device, depending\n+\t * on how many queues are enabled with configure()\n+\t */\n+\tuint32_t sw_ring_max_depth;\n \tstruct acc100_conf acc100_conf; /* ACC100 Initial configuration */\n+\t/* Bitmap capturing which Queues have already been assigned */\n+\tuint16_t q_assigned_bit_map[ACC100_NUM_QGRPS];\n \tbool pf_device; /**< True if this is a PF ACC100 device */\n \tbool configured; /**< True if this ACC100 device is configured */\n };\n",
    "prefixes": [
        "v2",
        "04/11"
    ]
}