get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/76670/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 76670,
    "url": "http://patches.dpdk.org/api/patches/76670/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1599470764-30569-4-git-send-email-g.singh@nxp.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1599470764-30569-4-git-send-email-g.singh@nxp.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1599470764-30569-4-git-send-email-g.singh@nxp.com",
    "date": "2020-09-07T09:26:00",
    "name": "[3/7] raw/dpaa2_qdma: refactor the code",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "20d526d5060fd97994a385664b118307aff78d39",
    "submitter": {
        "id": 1068,
        "url": "http://patches.dpdk.org/api/people/1068/?format=api",
        "name": "Gagandeep Singh",
        "email": "g.singh@nxp.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1599470764-30569-4-git-send-email-g.singh@nxp.com/mbox/",
    "series": [
        {
            "id": 11988,
            "url": "http://patches.dpdk.org/api/series/11988/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=11988",
            "date": "2020-09-07T09:25:57",
            "name": "raw/dpaa2_qdma: driver enhancement",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/11988/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/76670/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/76670/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id B1093A04B9;\n\tMon,  7 Sep 2020 11:28:01 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 10C541C12C;\n\tMon,  7 Sep 2020 11:26:55 +0200 (CEST)",
            "from inva020.nxp.com (inva020.nxp.com [92.121.34.13])\n by dpdk.org (Postfix) with ESMTP id AA8FF1C0DA\n for <dev@dpdk.org>; Mon,  7 Sep 2020 11:26:53 +0200 (CEST)",
            "from inva020.nxp.com (localhost [127.0.0.1])\n by inva020.eu-rdc02.nxp.com (Postfix) with ESMTP id 83D781A1029;\n Mon,  7 Sep 2020 11:26:53 +0200 (CEST)",
            "from invc005.ap-rdc01.nxp.com (invc005.ap-rdc01.nxp.com\n [165.114.16.14])\n by inva020.eu-rdc02.nxp.com (Postfix) with ESMTP id 978CC1A1053;\n Mon,  7 Sep 2020 11:26:50 +0200 (CEST)",
            "from lsv11086.swis.cn-sha01.nxp.com (lsv11086.swis.cn-sha01.nxp.com\n [92.121.210.87])\n by invc005.ap-rdc01.nxp.com (Postfix) with ESMTP id 10C2B402D7;\n Mon,  7 Sep 2020 11:26:43 +0200 (CEST)"
        ],
        "From": "Gagandeep Singh <g.singh@nxp.com>",
        "To": "dev@dpdk.org,\n\tnipun.gupta@nxp.com,\n\themant.agrawal@nxp.com",
        "Cc": "thomas.monjalon@6wind.com,\n\tJun Yang <jun.yang@nxp.com>",
        "Date": "Mon,  7 Sep 2020 17:26:00 +0800",
        "Message-Id": "<1599470764-30569-4-git-send-email-g.singh@nxp.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1599470764-30569-1-git-send-email-g.singh@nxp.com>",
        "References": "<1599470764-30569-1-git-send-email-g.singh@nxp.com>",
        "X-Virus-Scanned": "ClamAV using ClamSMTP",
        "Subject": "[dpdk-dev] [PATCH 3/7] raw/dpaa2_qdma: refactor the code",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Jun Yang <jun.yang@nxp.com>\n\nThis patch moves qdma queue specific configurations from driver\nglobal configuration to per-queue setup. This is required\nas each queue can be configured differently.\n\nSigned-off-by: Jun Yang <jun.yang@nxp.com>\n---\n drivers/raw/dpaa2_qdma/dpaa2_qdma.c         | 1259 +++++++++++++--------------\n drivers/raw/dpaa2_qdma/dpaa2_qdma.h         |   39 +-\n drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h |   13 +-\n 3 files changed, 670 insertions(+), 641 deletions(-)",
    "diff": "diff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c\nindex 57194db..ae45ec3 100644\n--- a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c\n+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c\n@@ -26,6 +26,9 @@\n \n #define DPAA2_QDMA_NO_PREFETCH \"no_prefetch\"\n \n+/* Dynamic log type identifier */\n+int dpaa2_qdma_logtype;\n+\n uint32_t dpaa2_coherent_no_alloc_cache;\n uint32_t dpaa2_coherent_alloc_cache;\n \n@@ -37,31 +40,9 @@ TAILQ_HEAD(qdma_hw_queue_list, qdma_hw_queue);\n static struct qdma_hw_queue_list qdma_queue_list\n \t= TAILQ_HEAD_INITIALIZER(qdma_queue_list);\n \n-/* QDMA Virtual Queues */\n-static struct qdma_virt_queue *qdma_vqs;\n-\n /* QDMA per core data */\n static struct qdma_per_core_info qdma_core_info[RTE_MAX_LCORE];\n \n-typedef int (dpdmai_dev_dequeue_multijob_t)(struct dpaa2_dpdmai_dev *dpdmai_dev,\n-\t\t\t\t\t    uint16_t rxq_id,\n-\t\t\t\t\t    uint16_t *vq_id,\n-\t\t\t\t\t    struct rte_qdma_job **job,\n-\t\t\t\t\t    uint16_t nb_jobs);\n-\n-dpdmai_dev_dequeue_multijob_t *dpdmai_dev_dequeue_multijob;\n-\n-typedef uint16_t (dpdmai_dev_get_job_t)(struct qdma_device *qdma_dev,\n-\t\t\t\t\tconst struct qbman_fd *fd,\n-\t\t\t\t\tstruct rte_qdma_job **job);\n-typedef int (dpdmai_dev_set_fd_t)(struct qdma_device *qdma_dev,\n-\t\t\t\t  struct qbman_fd *fd,\n-\t\t\t\t  struct rte_qdma_job *job,\n-\t\t\t\t  struct rte_qdma_rbp *rbp,\n-\t\t\t\t  uint16_t vq_id);\n-dpdmai_dev_get_job_t *dpdmai_dev_get_job;\n-dpdmai_dev_set_fd_t *dpdmai_dev_set_fd;\n-\n static inline int\n qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,\n \t\t\tuint32_t len, struct qbman_fd *fd,\n@@ -114,7 +95,7 @@ qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,\n \t/**\n \t * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011\n \t * Coherent copy of cacheable memory,\n-\t * lookup in downstream cache, no allocate\n+\t* lookup in downstream cache, no allocate\n \t * on miss\n \t */\n \tfd->simple_ddr.rns = 0;\n@@ -204,12 +185,11 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle,\n }\n \n static inline int dpdmai_dev_set_fd_us(\n-\t\t\t\tstruct qdma_device *qdma_dev __rte_unused,\n-\t\t\t\tstruct qbman_fd *fd,\n-\t\t\t\tstruct rte_qdma_job *job,\n-\t\t\t\tstruct rte_qdma_rbp *rbp,\n-\t\t\t\tuint16_t vq_id)\n+\t\tstruct qdma_virt_queue *qdma_vq,\n+\t\tstruct qbman_fd *fd,\n+\t\tstruct rte_qdma_job *job)\n {\n+\tstruct rte_qdma_rbp *rbp = &qdma_vq->rbp;\n \tstruct rte_qdma_job **ppjob;\n \tsize_t iova;\n \tint ret = 0;\n@@ -220,7 +200,7 @@ static inline int dpdmai_dev_set_fd_us(\n \t\tiova = (size_t)job->src;\n \n \t/* Set the metadata */\n-\tjob->vq_id = vq_id;\n+\tjob->vq_id = qdma_vq->vq_id;\n \tppjob = (struct rte_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;\n \t*ppjob = job;\n \n@@ -234,15 +214,17 @@ static inline int dpdmai_dev_set_fd_us(\n \t\t\t\t\t   job->len, fd);\n \treturn ret;\n }\n-static inline int dpdmai_dev_set_fd_lf(struct qdma_device *qdma_dev,\n-\t\t\t\t\tstruct qbman_fd *fd,\n-\t\t\t\t\tstruct rte_qdma_job *job,\n-\t\t\t\t\tstruct rte_qdma_rbp *rbp,\n-\t\t\t\t\tuint16_t vq_id)\n+static inline int dpdmai_dev_set_fd_lf(\n+\t\tstruct qdma_virt_queue *qdma_vq,\n+\t\tstruct qbman_fd *fd,\n+\t\tstruct rte_qdma_job *job)\n {\n+\tstruct rte_qdma_rbp *rbp = &qdma_vq->rbp;\n \tstruct rte_qdma_job **ppjob;\n \tstruct qbman_fle *fle;\n \tint ret = 0;\n+\tstruct qdma_device *qdma_dev = QDMA_DEV_OF_VQ(qdma_vq);\n+\n \t/*\n \t * Get an FLE/SDD from FLE pool.\n \t * Note: IO metadata is before the FLE and SDD memory.\n@@ -254,7 +236,7 @@ static inline int dpdmai_dev_set_fd_lf(struct qdma_device *qdma_dev,\n \t}\n \n \t/* Set the metadata */\n-\tjob->vq_id = vq_id;\n+\tjob->vq_id = qdma_vq->vq_id;\n \t*ppjob = job;\n \n \tfle = (struct qbman_fle *)(ppjob + 1);\n@@ -272,7 +254,7 @@ static inline int dpdmai_dev_set_fd_lf(struct qdma_device *qdma_dev,\n }\n \n static inline uint16_t dpdmai_dev_get_job_us(\n-\t\t\t\tstruct qdma_device *qdma_dev __rte_unused,\n+\t\t\t\tstruct qdma_virt_queue *qdma_vq __rte_unused,\n \t\t\t\tconst struct qbman_fd *fd,\n \t\t\t\tstruct rte_qdma_job **job)\n {\n@@ -281,7 +263,7 @@ static inline uint16_t dpdmai_dev_get_job_us(\n \tstruct rte_qdma_job **ppjob;\n \n \tif (fd->simple_pci.saddr_hi & (QDMA_RBP_UPPER_ADDRESS_MASK >> 32))\n-\t\tiova = (size_t) (((uint64_t)fd->simple_pci.daddr_hi) << 32\n+\t\tiova = (size_t)(((uint64_t)fd->simple_pci.daddr_hi) << 32\n \t\t\t\t| (uint64_t)fd->simple_pci.daddr_lo);\n \telse\n \t\tiova = (size_t)(((uint64_t)fd->simple_pci.saddr_hi) << 32\n@@ -289,18 +271,22 @@ static inline uint16_t dpdmai_dev_get_job_us(\n \n \tppjob = (struct rte_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;\n \t*job = (struct rte_qdma_job *)*ppjob;\n-\t(*job)->status = (fd->simple_pci.acc_err << 8) | (fd->simple_pci.error);\n+\t(*job)->status = (fd->simple_pci.acc_err << 8) |\n+\t\t\t\t\t(fd->simple_pci.error);\n \tvqid = (*job)->vq_id;\n \n \treturn vqid;\n }\n \n-static inline uint16_t dpdmai_dev_get_job_lf(struct qdma_device *qdma_dev,\n-\t\t\t\t\t     const struct qbman_fd *fd,\n-\t\t\t\t\t     struct rte_qdma_job **job)\n+static inline uint16_t dpdmai_dev_get_job_lf(\n+\t\t\t\t\t\tstruct qdma_virt_queue *qdma_vq,\n+\t\t\t\t\t\tconst struct qbman_fd *fd,\n+\t\t\t\t\t\tstruct rte_qdma_job **job)\n {\n \tstruct rte_qdma_job **ppjob;\n \tuint16_t vqid;\n+\tstruct qdma_device *qdma_dev = QDMA_DEV_OF_VQ(qdma_vq);\n+\n \t/*\n \t * Fetch metadata from FLE. job and vq_id were set\n \t * in metadata in the enqueue operation.\n@@ -320,342 +306,268 @@ static inline uint16_t dpdmai_dev_get_job_lf(struct qdma_device *qdma_dev,\n \treturn vqid;\n }\n \n-static struct qdma_hw_queue *\n-alloc_hw_queue(uint32_t lcore_id)\n+/* Function to receive a QDMA job for a given device and queue*/\n+static int\n+dpdmai_dev_dequeue_multijob_prefetch(\n+\t\t\tstruct qdma_virt_queue *qdma_vq,\n+\t\t\tuint16_t *vq_id,\n+\t\t\tstruct rte_qdma_job **job,\n+\t\t\tuint16_t nb_jobs)\n {\n-\tstruct qdma_hw_queue *queue = NULL;\n+\tstruct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;\n+\tstruct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;\n+\tuint16_t rxq_id = qdma_pq->queue_id;\n \n-\tDPAA2_QDMA_FUNC_TRACE();\n+\tstruct dpaa2_queue *rxq;\n+\tstruct qbman_result *dq_storage, *dq_storage1 = NULL;\n+\tstruct qbman_pull_desc pulldesc;\n+\tstruct qbman_swp *swp;\n+\tstruct queue_storage_info_t *q_storage;\n+\tuint32_t fqid;\n+\tuint8_t status, pending;\n+\tuint8_t num_rx = 0;\n+\tconst struct qbman_fd *fd;\n+\tuint16_t vqid;\n+\tint ret, pull_size;\n \n-\t/* Get a free queue from the list */\n-\tTAILQ_FOREACH(queue, &qdma_queue_list, next) {\n-\t\tif (queue->num_users == 0) {\n-\t\t\tqueue->lcore_id = lcore_id;\n-\t\t\tqueue->num_users++;\n-\t\t\tbreak;\n+\tif (unlikely(!DPAA2_PER_LCORE_DPIO)) {\n+\t\tret = dpaa2_affine_qbman_swp();\n+\t\tif (ret) {\n+\t\t\tDPAA2_QDMA_ERR(\n+\t\t\t\t\"Failed to allocate IO portal, tid: %d\\n\",\n+\t\t\t\trte_gettid());\n+\t\t\treturn 0;\n \t\t}\n \t}\n+\tswp = DPAA2_PER_LCORE_PORTAL;\n \n-\treturn queue;\n-}\n-\n-static void\n-free_hw_queue(struct qdma_hw_queue *queue)\n-{\n-\tDPAA2_QDMA_FUNC_TRACE();\n+\tpull_size = (nb_jobs > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_jobs;\n+\trxq = &(dpdmai_dev->rx_queue[rxq_id]);\n+\tfqid = rxq->fqid;\n+\tq_storage = rxq->q_storage;\n \n-\tqueue->num_users--;\n-}\n+\tif (unlikely(!q_storage->active_dqs)) {\n+\t\tq_storage->toggle = 0;\n+\t\tdq_storage = q_storage->dq_storage[q_storage->toggle];\n+\t\tq_storage->last_num_pkts = pull_size;\n+\t\tqbman_pull_desc_clear(&pulldesc);\n+\t\tqbman_pull_desc_set_numframes(&pulldesc,\n+\t\t\t\t\t      q_storage->last_num_pkts);\n+\t\tqbman_pull_desc_set_fq(&pulldesc, fqid);\n+\t\tqbman_pull_desc_set_storage(&pulldesc, dq_storage,\n+\t\t\t\t(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);\n+\t\tif (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {\n+\t\t\twhile (!qbman_check_command_complete(\n+\t\t\t       get_swp_active_dqs(\n+\t\t\t       DPAA2_PER_LCORE_DPIO->index)))\n+\t\t\t\t;\n+\t\t\tclear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);\n+\t\t}\n+\t\twhile (1) {\n+\t\t\tif (qbman_swp_pull(swp, &pulldesc)) {\n+\t\t\t\tDPAA2_QDMA_DP_WARN(\n+\t\t\t\t\t\"VDQ command not issued.QBMAN busy\\n\");\n+\t\t\t\t\t/* Portal was busy, try again */\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\t\t\tbreak;\n+\t\t}\n+\t\tq_storage->active_dqs = dq_storage;\n+\t\tq_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;\n+\t\tset_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index,\n+\t\t\t\t   dq_storage);\n+\t}\n \n+\tdq_storage = q_storage->active_dqs;\n+\trte_prefetch0((void *)(size_t)(dq_storage));\n+\trte_prefetch0((void *)(size_t)(dq_storage + 1));\n \n-static struct qdma_hw_queue *\n-get_hw_queue(struct qdma_device *qdma_dev, uint32_t lcore_id)\n-{\n-\tstruct qdma_per_core_info *core_info;\n-\tstruct qdma_hw_queue *queue, *temp;\n-\tuint32_t least_num_users;\n-\tint num_hw_queues, i;\n+\t/* Prepare next pull descriptor. This will give space for the\n+\t * prefething done on DQRR entries\n+\t */\n+\tq_storage->toggle ^= 1;\n+\tdq_storage1 = q_storage->dq_storage[q_storage->toggle];\n+\tqbman_pull_desc_clear(&pulldesc);\n+\tqbman_pull_desc_set_numframes(&pulldesc, pull_size);\n+\tqbman_pull_desc_set_fq(&pulldesc, fqid);\n+\tqbman_pull_desc_set_storage(&pulldesc, dq_storage1,\n+\t\t(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);\n \n-\tDPAA2_QDMA_FUNC_TRACE();\n+\t/* Check if the previous issued command is completed.\n+\t * Also seems like the SWP is shared between the Ethernet Driver\n+\t * and the SEC driver.\n+\t */\n+\twhile (!qbman_check_command_complete(dq_storage))\n+\t\t;\n+\tif (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))\n+\t\tclear_swp_active_dqs(q_storage->active_dpio_id);\n \n-\tcore_info = &qdma_core_info[lcore_id];\n-\tnum_hw_queues = core_info->num_hw_queues;\n+\tpending = 1;\n \n-\t/*\n-\t * Allocate a HW queue if there are less queues\n-\t * than maximum per core queues configured\n-\t */\n-\tif (num_hw_queues < qdma_dev->max_hw_queues_per_core) {\n-\t\tqueue = alloc_hw_queue(lcore_id);\n-\t\tif (queue) {\n-\t\t\tcore_info->hw_queues[num_hw_queues] = queue;\n-\t\t\tcore_info->num_hw_queues++;\n-\t\t\treturn queue;\n+\tdo {\n+\t\t/* Loop until the dq_storage is updated with\n+\t\t * new token by QBMAN\n+\t\t */\n+\t\twhile (!qbman_check_new_result(dq_storage))\n+\t\t\t;\n+\t\trte_prefetch0((void *)((size_t)(dq_storage + 2)));\n+\t\t/* Check whether Last Pull command is Expired and\n+\t\t * setting Condition for Loop termination\n+\t\t */\n+\t\tif (qbman_result_DQ_is_pull_complete(dq_storage)) {\n+\t\t\tpending = 0;\n+\t\t\t/* Check for valid frame. */\n+\t\t\tstatus = qbman_result_DQ_flags(dq_storage);\n+\t\t\tif (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))\n+\t\t\t\tcontinue;\n \t\t}\n-\t}\n+\t\tfd = qbman_result_DQ_fd(dq_storage);\n \n-\tqueue = core_info->hw_queues[0];\n-\t/* In case there is no queue associated with the core return NULL */\n-\tif (!queue)\n-\t\treturn NULL;\n+\t\tvqid = qdma_vq->get_job(qdma_vq, fd, &job[num_rx]);\n+\t\tif (vq_id)\n+\t\t\tvq_id[num_rx] = vqid;\n \n-\t/* Fetch the least loaded H/W queue */\n-\tleast_num_users = core_info->hw_queues[0]->num_users;\n-\tfor (i = 0; i < num_hw_queues; i++) {\n-\t\ttemp = core_info->hw_queues[i];\n-\t\tif (temp->num_users < least_num_users)\n-\t\t\tqueue = temp;\n+\t\tdq_storage++;\n+\t\tnum_rx++;\n+\t} while (pending);\n+\n+\tif (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {\n+\t\twhile (!qbman_check_command_complete(\n+\t\t       get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))\n+\t\t\t;\n+\t\tclear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);\n+\t}\n+\t/* issue a volatile dequeue command for next pull */\n+\twhile (1) {\n+\t\tif (qbman_swp_pull(swp, &pulldesc)) {\n+\t\t\tDPAA2_QDMA_DP_WARN(\n+\t\t\t\t\"VDQ command is not issued. QBMAN is busy (2)\\n\");\n+\t\t\tcontinue;\n+\t\t}\n+\t\tbreak;\n \t}\n \n-\tif (queue)\n-\t\tqueue->num_users++;\n+\tq_storage->active_dqs = dq_storage1;\n+\tq_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;\n+\tset_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);\n \n-\treturn queue;\n+\treturn num_rx;\n }\n \n-static void\n-put_hw_queue(struct qdma_hw_queue *queue)\n+static int\n+dpdmai_dev_dequeue_multijob_no_prefetch(\n+\t\tstruct qdma_virt_queue *qdma_vq,\n+\t\tuint16_t *vq_id,\n+\t\tstruct rte_qdma_job **job,\n+\t\tuint16_t nb_jobs)\n {\n-\tstruct qdma_per_core_info *core_info;\n-\tint lcore_id, num_hw_queues, i;\n-\n-\tDPAA2_QDMA_FUNC_TRACE();\n+\tstruct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;\n+\tstruct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;\n+\tuint16_t rxq_id = qdma_pq->queue_id;\n \n-\t/*\n-\t * If this is the last user of the queue free it.\n-\t * Also remove it from QDMA core info.\n-\t */\n-\tif (queue->num_users == 1) {\n-\t\tfree_hw_queue(queue);\n+\tstruct dpaa2_queue *rxq;\n+\tstruct qbman_result *dq_storage;\n+\tstruct qbman_pull_desc pulldesc;\n+\tstruct qbman_swp *swp;\n+\tuint32_t fqid;\n+\tuint8_t status, pending;\n+\tuint8_t num_rx = 0;\n+\tconst struct qbman_fd *fd;\n+\tuint16_t vqid;\n+\tint ret, next_pull = nb_jobs, num_pulled = 0;\n \n-\t\t/* Remove the physical queue from core info */\n-\t\tlcore_id = queue->lcore_id;\n-\t\tcore_info = &qdma_core_info[lcore_id];\n-\t\tnum_hw_queues = core_info->num_hw_queues;\n-\t\tfor (i = 0; i < num_hw_queues; i++) {\n-\t\t\tif (queue == core_info->hw_queues[i])\n-\t\t\t\tbreak;\n+\tif (unlikely(!DPAA2_PER_LCORE_DPIO)) {\n+\t\tret = dpaa2_affine_qbman_swp();\n+\t\tif (ret) {\n+\t\t\tDPAA2_QDMA_ERR(\n+\t\t\t\t\"Failed to allocate IO portal, tid: %d\\n\",\n+\t\t\t\trte_gettid());\n+\t\t\treturn 0;\n \t\t}\n-\t\tfor (; i < num_hw_queues - 1; i++)\n-\t\t\tcore_info->hw_queues[i] = core_info->hw_queues[i + 1];\n-\t\tcore_info->hw_queues[i] = NULL;\n-\t} else {\n-\t\tqueue->num_users--;\n \t}\n-}\n-\n-static int\n-dpaa2_qdma_attr_get(struct rte_rawdev *rawdev,\n-\t\t    __rte_unused const char *attr_name,\n-\t\t    uint64_t *attr_value)\n-{\n-\tstruct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;\n-\tstruct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;\n-\tstruct rte_qdma_attr *qdma_attr = (struct rte_qdma_attr *)attr_value;\n-\n-\tDPAA2_QDMA_FUNC_TRACE();\n+\tswp = DPAA2_PER_LCORE_PORTAL;\n \n-\tqdma_attr->num_hw_queues = qdma_dev->num_hw_queues;\n+\trxq = &(dpdmai_dev->rx_queue[rxq_id]);\n+\tfqid = rxq->fqid;\n \n-\treturn 0;\n-}\n+\tdo {\n+\t\tdq_storage = rxq->q_storage->dq_storage[0];\n+\t\t/* Prepare dequeue descriptor */\n+\t\tqbman_pull_desc_clear(&pulldesc);\n+\t\tqbman_pull_desc_set_fq(&pulldesc, fqid);\n+\t\tqbman_pull_desc_set_storage(&pulldesc, dq_storage,\n+\t\t\t(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);\n \n-static int\n-dpaa2_qdma_reset(struct rte_rawdev *rawdev)\n-{\n-\tstruct qdma_hw_queue *queue;\n-\tstruct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;\n-\tstruct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;\n-\tint i;\n-\n-\tDPAA2_QDMA_FUNC_TRACE();\n-\n-\t/* In case QDMA device is not in stopped state, return -EBUSY */\n-\tif (qdma_dev->state == 1) {\n-\t\tDPAA2_QDMA_ERR(\n-\t\t\t\"Device is in running state. Stop before reset.\");\n-\t\treturn -EBUSY;\n-\t}\n-\n-\t/* In case there are pending jobs on any VQ, return -EBUSY */\n-\tfor (i = 0; i < qdma_dev->max_vqs; i++) {\n-\t\tif (qdma_vqs[i].in_use && (qdma_vqs[i].num_enqueues !=\n-\t\t    qdma_vqs[i].num_dequeues))\n-\t\t\tDPAA2_QDMA_ERR(\"Jobs are still pending on VQ: %d\", i);\n-\t\t\treturn -EBUSY;\n-\t}\n-\n-\t/* Reset HW queues */\n-\tTAILQ_FOREACH(queue, &qdma_queue_list, next)\n-\t\tqueue->num_users = 0;\n-\n-\t/* Reset and free virtual queues */\n-\tfor (i = 0; i < qdma_dev->max_vqs; i++) {\n-\t\tif (qdma_vqs[i].status_ring)\n-\t\t\trte_ring_free(qdma_vqs[i].status_ring);\n-\t}\n-\tif (qdma_vqs)\n-\t\trte_free(qdma_vqs);\n-\tqdma_vqs = NULL;\n-\n-\t/* Reset per core info */\n-\tmemset(&qdma_core_info, 0,\n-\t\tsizeof(struct qdma_per_core_info) * RTE_MAX_LCORE);\n-\n-\t/* Free the FLE pool */\n-\tif (qdma_dev->fle_pool)\n-\t\trte_mempool_free(qdma_dev->fle_pool);\n-\n-\t/* Reset QDMA device structure */\n-\tqdma_dev->mode = RTE_QDMA_MODE_HW;\n-\tqdma_dev->max_hw_queues_per_core = 0;\n-\tqdma_dev->fle_pool = NULL;\n-\tqdma_dev->fle_pool_count = 0;\n-\tqdma_dev->max_vqs = 0;\n-\n-\treturn 0;\n-}\n-\n-static int\n-dpaa2_qdma_configure(const struct rte_rawdev *rawdev,\n-\t\t\t rte_rawdev_obj_t config)\n-{\n-\tchar fle_pool_name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */\n-\tstruct rte_qdma_config *qdma_config = (struct rte_qdma_config *)config;\n-\tstruct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;\n-\tstruct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;\n-\n-\tDPAA2_QDMA_FUNC_TRACE();\n-\n-\t/* In case QDMA device is not in stopped state, return -EBUSY */\n-\tif (qdma_dev->state == 1) {\n-\t\tDPAA2_QDMA_ERR(\n-\t\t\t\"Device is in running state. Stop before config.\");\n-\t\treturn -1;\n-\t}\n-\n-\t/* Set mode */\n-\tqdma_dev->mode = qdma_config->mode;\n-\n-\t/* Set max HW queue per core */\n-\tif (qdma_config->max_hw_queues_per_core > MAX_HW_QUEUE_PER_CORE) {\n-\t\tDPAA2_QDMA_ERR(\"H/W queues per core is more than: %d\",\n-\t\t\t       MAX_HW_QUEUE_PER_CORE);\n-\t\treturn -EINVAL;\n-\t}\n-\tqdma_dev->max_hw_queues_per_core =\n-\t\tqdma_config->max_hw_queues_per_core;\n-\n-\t/* Allocate Virtual Queues */\n-\tqdma_vqs = rte_malloc(\"qdma_virtual_queues\",\n-\t\t\t(sizeof(struct qdma_virt_queue) * qdma_config->max_vqs),\n-\t\t\tRTE_CACHE_LINE_SIZE);\n-\tif (!qdma_vqs) {\n-\t\tDPAA2_QDMA_ERR(\"qdma_virtual_queues allocation failed\");\n-\t\treturn -ENOMEM;\n-\t}\n-\tqdma_dev->max_vqs = qdma_config->max_vqs;\n-\n-\t/* Allocate FLE pool; just append PID so that in case of\n-\t * multiprocess, the pool's don't collide.\n-\t */\n-\tsnprintf(fle_pool_name, sizeof(fle_pool_name), \"qdma_fle_pool%u\",\n-\t\t getpid());\n-\tqdma_dev->fle_pool = rte_mempool_create(fle_pool_name,\n-\t\t\tqdma_config->fle_pool_count, QDMA_FLE_POOL_SIZE,\n-\t\t\tQDMA_FLE_CACHE_SIZE(qdma_config->fle_pool_count), 0,\n-\t\t\tNULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);\n-\tif (!qdma_dev->fle_pool) {\n-\t\tDPAA2_QDMA_ERR(\"qdma_fle_pool create failed\");\n-\t\trte_free(qdma_vqs);\n-\t\tqdma_vqs = NULL;\n-\t\treturn -ENOMEM;\n-\t}\n-\tqdma_dev->fle_pool_count = qdma_config->fle_pool_count;\n-\n-\tif (qdma_config->format == RTE_QDMA_ULTRASHORT_FORMAT) {\n-\t\tdpdmai_dev_get_job = dpdmai_dev_get_job_us;\n-\t\tdpdmai_dev_set_fd = dpdmai_dev_set_fd_us;\n-\t} else {\n-\t\tdpdmai_dev_get_job = dpdmai_dev_get_job_lf;\n-\t\tdpdmai_dev_set_fd = dpdmai_dev_set_fd_lf;\n-\t}\n-\treturn 0;\n-}\n-\n-static int\n-dpaa2_qdma_start(struct rte_rawdev *rawdev)\n-{\n-\tstruct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;\n-\tstruct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;\n-\n-\tDPAA2_QDMA_FUNC_TRACE();\n-\n-\tqdma_dev->state = 1;\n-\n-\treturn 0;\n-}\n-\n-static int\n-dpaa2_qdma_queue_setup(struct rte_rawdev *rawdev,\n-\t\t\t  __rte_unused uint16_t queue_id,\n-\t\t\t  rte_rawdev_obj_t queue_conf)\n-{\n-\tchar ring_name[32];\n-\tint i;\n-\tstruct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;\n-\tstruct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;\n-\tstruct rte_qdma_queue_config *q_config =\n-\t\t(struct rte_qdma_queue_config *)queue_conf;\n-\n-\tDPAA2_QDMA_FUNC_TRACE();\n-\n-\trte_spinlock_lock(&qdma_dev->lock);\n+\t\tif (next_pull > dpaa2_dqrr_size) {\n+\t\t\tqbman_pull_desc_set_numframes(&pulldesc,\n+\t\t\t\t\tdpaa2_dqrr_size);\n+\t\t\tnext_pull -= dpaa2_dqrr_size;\n+\t\t} else {\n+\t\t\tqbman_pull_desc_set_numframes(&pulldesc, next_pull);\n+\t\t\tnext_pull = 0;\n+\t\t}\n \n-\t/* Get a free Virtual Queue */\n-\tfor (i = 0; i < qdma_dev->max_vqs; i++) {\n-\t\tif (qdma_vqs[i].in_use == 0)\n+\t\twhile (1) {\n+\t\t\tif (qbman_swp_pull(swp, &pulldesc)) {\n+\t\t\t\tDPAA2_QDMA_DP_WARN(\n+\t\t\t\t\t\"VDQ command not issued. QBMAN busy\");\n+\t\t\t\t/* Portal was busy, try again */\n+\t\t\t\tcontinue;\n+\t\t\t}\n \t\t\tbreak;\n-\t}\n+\t\t}\n \n-\t/* Return in case no VQ is free */\n-\tif (i == qdma_dev->max_vqs) {\n-\t\trte_spinlock_unlock(&qdma_dev->lock);\n-\t\tDPAA2_QDMA_ERR(\"Unable to get lock on QDMA device\");\n-\t\treturn -ENODEV;\n-\t}\n+\t\trte_prefetch0((void *)((size_t)(dq_storage + 1)));\n+\t\t/* Check if the previous issued command is completed. */\n+\t\twhile (!qbman_check_command_complete(dq_storage))\n+\t\t\t;\n \n-\tif (qdma_dev->mode == RTE_QDMA_MODE_HW ||\n-\t\t\t(q_config->flags & RTE_QDMA_VQ_EXCLUSIVE_PQ)) {\n-\t\t/* Allocate HW queue for a VQ */\n-\t\tqdma_vqs[i].hw_queue = alloc_hw_queue(q_config->lcore_id);\n-\t\tqdma_vqs[i].exclusive_hw_queue = 1;\n-\t} else {\n-\t\t/* Allocate a Ring for Virutal Queue in VQ mode */\n-\t\tsnprintf(ring_name, sizeof(ring_name), \"status ring %d\", i);\n-\t\tqdma_vqs[i].status_ring = rte_ring_create(ring_name,\n-\t\t\tqdma_dev->fle_pool_count, rte_socket_id(), 0);\n-\t\tif (!qdma_vqs[i].status_ring) {\n-\t\t\tDPAA2_QDMA_ERR(\"Status ring creation failed for vq\");\n-\t\t\trte_spinlock_unlock(&qdma_dev->lock);\n-\t\t\treturn rte_errno;\n-\t\t}\n+\t\tnum_pulled = 0;\n+\t\tpending = 1;\n \n-\t\t/* Get a HW queue (shared) for a VQ */\n-\t\tqdma_vqs[i].hw_queue = get_hw_queue(qdma_dev,\n-\t\t\t\t\t\t    q_config->lcore_id);\n-\t\tqdma_vqs[i].exclusive_hw_queue = 0;\n-\t}\n+\t\tdo {\n+\t\t\t/* Loop until dq_storage is updated\n+\t\t\t * with new token by QBMAN\n+\t\t\t */\n+\t\t\twhile (!qbman_check_new_result(dq_storage))\n+\t\t\t\t;\n+\t\t\trte_prefetch0((void *)((size_t)(dq_storage + 2)));\n \n-\tif (qdma_vqs[i].hw_queue == NULL) {\n-\t\tDPAA2_QDMA_ERR(\"No H/W queue available for VQ\");\n-\t\tif (qdma_vqs[i].status_ring)\n-\t\t\trte_ring_free(qdma_vqs[i].status_ring);\n-\t\tqdma_vqs[i].status_ring = NULL;\n-\t\trte_spinlock_unlock(&qdma_dev->lock);\n-\t\treturn -ENODEV;\n-\t}\n+\t\t\tif (qbman_result_DQ_is_pull_complete(dq_storage)) {\n+\t\t\t\tpending = 0;\n+\t\t\t\t/* Check for valid frame. */\n+\t\t\t\tstatus = qbman_result_DQ_flags(dq_storage);\n+\t\t\t\tif (unlikely((status &\n+\t\t\t\t\tQBMAN_DQ_STAT_VALIDFRAME) == 0))\n+\t\t\t\t\tcontinue;\n+\t\t\t}\n+\t\t\tfd = qbman_result_DQ_fd(dq_storage);\n \n-\tqdma_vqs[i].in_use = 1;\n-\tqdma_vqs[i].lcore_id = q_config->lcore_id;\n-\tmemset(&qdma_vqs[i].rbp, 0, sizeof(struct rte_qdma_rbp));\n-\trte_spinlock_unlock(&qdma_dev->lock);\n+\t\t\tvqid = qdma_vq->get_job(qdma_vq, fd, &job[num_rx]);\n+\t\t\tif (vq_id)\n+\t\t\t\tvq_id[num_rx] = vqid;\n \n-\tif (q_config->rbp != NULL)\n-\t\tmemcpy(&qdma_vqs[i].rbp, q_config->rbp,\n-\t\t       sizeof(struct rte_qdma_rbp));\n+\t\t\tdq_storage++;\n+\t\t\tnum_rx++;\n+\t\t\tnum_pulled++;\n \n-\treturn i;\n+\t\t} while (pending);\n+\t/* Last VDQ provided all packets and more packets are requested */\n+\t} while (next_pull && num_pulled == dpaa2_dqrr_size);\n+\n+\treturn num_rx;\n }\n \n static int\n-dpdmai_dev_enqueue_multi(struct dpaa2_dpdmai_dev *dpdmai_dev,\n-\t\t\tuint16_t txq_id,\n-\t\t\tuint16_t vq_id,\n-\t\t\tstruct rte_qdma_rbp *rbp,\n+dpdmai_dev_enqueue_multi(\n+\t\t\tstruct qdma_virt_queue *qdma_vq,\n \t\t\tstruct rte_qdma_job **job,\n \t\t\tuint16_t nb_jobs)\n {\n+\tstruct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;\n+\tstruct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;\n+\tuint16_t txq_id = qdma_pq->queue_id;\n+\n \tstruct qbman_fd fd[RTE_QDMA_BURST_NB_MAX];\n \tstruct dpaa2_queue *txq;\n \tstruct qbman_eq_desc eqdesc;\n@@ -692,8 +604,7 @@ dpdmai_dev_enqueue_multi(struct dpaa2_dpdmai_dev *dpdmai_dev,\n \t\t\tdpaa2_eqcr_size : nb_jobs;\n \n \t\tfor (loop = 0; loop < num_to_send; loop++) {\n-\t\t\tret = dpdmai_dev_set_fd(dpdmai_dev->qdma_dev, &fd[loop],\n-\t\t\t\t\t\tjob[num_tx], rbp, vq_id);\n+\t\t\tret = qdma_vq->set_fd(qdma_vq, &fd[loop], job[num_tx]);\n \t\t\tif (ret < 0) {\n \t\t\t\t/* Set nb_jobs to loop, so outer while loop\n \t\t\t\t * breaks out.\n@@ -707,6 +618,7 @@ dpdmai_dev_enqueue_multi(struct dpaa2_dpdmai_dev *dpdmai_dev,\n \n \t\t/* Enqueue the packet to the QBMAN */\n \t\tuint32_t enqueue_loop = 0, retry_count = 0;\n+\n \t\twhile (enqueue_loop < loop) {\n \t\t\tret = qbman_swp_enqueue_multiple(swp,\n \t\t\t\t\t\t&eqdesc,\n@@ -727,299 +639,426 @@ dpdmai_dev_enqueue_multi(struct dpaa2_dpdmai_dev *dpdmai_dev,\n \treturn num_tx;\n }\n \n-static int\n-dpaa2_qdma_enqueue(__rte_unused struct rte_rawdev *rawdev,\n-\t\t  __rte_unused struct rte_rawdev_buf **buffers,\n-\t\t  unsigned int nb_jobs,\n-\t\t  rte_rawdev_obj_t context)\n+static struct qdma_hw_queue *\n+alloc_hw_queue(uint32_t lcore_id)\n {\n-\tstruct rte_qdma_enqdeq *e_context = (struct rte_qdma_enqdeq *)context;\n-\tstruct qdma_virt_queue *qdma_vq = &qdma_vqs[e_context->vq_id];\n-\tstruct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;\n-\tstruct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;\n-\tint ret;\n+\tstruct qdma_hw_queue *queue = NULL;\n \n-\t/* Return error in case of wrong lcore_id */\n-\tif (rte_lcore_id() != qdma_vq->lcore_id) {\n-\t\tDPAA2_QDMA_ERR(\"QDMA enqueue for vqid %d on wrong core\",\n-\t\t\t\te_context->vq_id);\n-\t\treturn -EINVAL;\n-\t}\n+\tDPAA2_QDMA_FUNC_TRACE();\n \n-\tret = dpdmai_dev_enqueue_multi(dpdmai_dev,\n-\t\t\t\t qdma_pq->queue_id,\n-\t\t\t\t e_context->vq_id,\n-\t\t\t\t &qdma_vq->rbp,\n-\t\t\t\t e_context->job,\n-\t\t\t\t nb_jobs);\n-\tif (ret < 0) {\n-\t\tDPAA2_QDMA_ERR(\"DPDMAI device enqueue failed: %d\", ret);\n-\t\treturn ret;\n+\t/* Get a free queue from the list */\n+\tTAILQ_FOREACH(queue, &qdma_queue_list, next) {\n+\t\tif (queue->num_users == 0) {\n+\t\t\tqueue->lcore_id = lcore_id;\n+\t\t\tqueue->num_users++;\n+\t\t\tbreak;\n+\t\t}\n \t}\n \n-\tqdma_vq->num_enqueues += ret;\n+\treturn queue;\n+}\n \n-\treturn ret;\n+static void\n+free_hw_queue(struct qdma_hw_queue *queue)\n+{\n+\tDPAA2_QDMA_FUNC_TRACE();\n+\n+\tqueue->num_users--;\n }\n \n-/* Function to receive a QDMA job for a given device and queue*/\n-static int\n-dpdmai_dev_dequeue_multijob_prefetch(\n-\t\t\tstruct dpaa2_dpdmai_dev *dpdmai_dev,\n-\t\t\tuint16_t rxq_id,\n-\t\t\tuint16_t *vq_id,\n-\t\t\tstruct rte_qdma_job **job,\n-\t\t\tuint16_t nb_jobs)\n+\n+static struct qdma_hw_queue *\n+get_hw_queue(struct qdma_device *qdma_dev, uint32_t lcore_id)\n {\n-\tstruct dpaa2_queue *rxq;\n-\tstruct qbman_result *dq_storage, *dq_storage1 = NULL;\n-\tstruct qbman_pull_desc pulldesc;\n-\tstruct qbman_swp *swp;\n-\tstruct queue_storage_info_t *q_storage;\n-\tuint32_t fqid;\n-\tuint8_t status, pending;\n-\tuint8_t num_rx = 0;\n-\tconst struct qbman_fd *fd;\n-\tuint16_t vqid;\n-\tint ret, pull_size;\n+\tstruct qdma_per_core_info *core_info;\n+\tstruct qdma_hw_queue *queue, *temp;\n+\tuint32_t least_num_users;\n+\tint num_hw_queues, i;\n \n-\tif (unlikely(!DPAA2_PER_LCORE_DPIO)) {\n-\t\tret = dpaa2_affine_qbman_swp();\n-\t\tif (ret) {\n-\t\t\tDPAA2_QDMA_ERR(\n-\t\t\t\t\"Failed to allocate IO portal, tid: %d\\n\",\n-\t\t\t\trte_gettid());\n-\t\t\treturn 0;\n+\tDPAA2_QDMA_FUNC_TRACE();\n+\n+\tcore_info = &qdma_core_info[lcore_id];\n+\tnum_hw_queues = core_info->num_hw_queues;\n+\n+\t/*\n+\t * Allocate a HW queue if there are less queues\n+\t * than maximum per core queues configured\n+\t */\n+\tif (num_hw_queues < qdma_dev->max_hw_queues_per_core) {\n+\t\tqueue = alloc_hw_queue(lcore_id);\n+\t\tif (queue) {\n+\t\t\tcore_info->hw_queues[num_hw_queues] = queue;\n+\t\t\tcore_info->num_hw_queues++;\n+\t\t\treturn queue;\n \t\t}\n \t}\n-\tswp = DPAA2_PER_LCORE_PORTAL;\n \n-\tpull_size = (nb_jobs > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_jobs;\n-\trxq = &(dpdmai_dev->rx_queue[rxq_id]);\n-\tfqid = rxq->fqid;\n-\tq_storage = rxq->q_storage;\n+\tqueue = core_info->hw_queues[0];\n+\t/* In case there is no queue associated with the core return NULL */\n+\tif (!queue)\n+\t\treturn NULL;\n \n-\tif (unlikely(!q_storage->active_dqs)) {\n-\t\tq_storage->toggle = 0;\n-\t\tdq_storage = q_storage->dq_storage[q_storage->toggle];\n-\t\tq_storage->last_num_pkts = pull_size;\n-\t\tqbman_pull_desc_clear(&pulldesc);\n-\t\tqbman_pull_desc_set_numframes(&pulldesc,\n-\t\t\t\t\t      q_storage->last_num_pkts);\n-\t\tqbman_pull_desc_set_fq(&pulldesc, fqid);\n-\t\tqbman_pull_desc_set_storage(&pulldesc, dq_storage,\n-\t\t\t\t(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);\n-\t\tif (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {\n-\t\t\twhile (!qbman_check_command_complete(\n-\t\t\t       get_swp_active_dqs(\n-\t\t\t       DPAA2_PER_LCORE_DPIO->index)))\n-\t\t\t\t;\n-\t\t\tclear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);\n+\t/* Fetch the least loaded H/W queue */\n+\tleast_num_users = core_info->hw_queues[0]->num_users;\n+\tfor (i = 0; i < num_hw_queues; i++) {\n+\t\ttemp = core_info->hw_queues[i];\n+\t\tif (temp->num_users < least_num_users)\n+\t\t\tqueue = temp;\n+\t}\n+\n+\tif (queue)\n+\t\tqueue->num_users++;\n+\n+\treturn queue;\n+}\n+\n+static void\n+put_hw_queue(struct qdma_hw_queue *queue)\n+{\n+\tstruct qdma_per_core_info *core_info;\n+\tint lcore_id, num_hw_queues, i;\n+\n+\tDPAA2_QDMA_FUNC_TRACE();\n+\n+\t/*\n+\t * If this is the last user of the queue free it.\n+\t * Also remove it from QDMA core info.\n+\t */\n+\tif (queue->num_users == 1) {\n+\t\tfree_hw_queue(queue);\n+\n+\t\t/* Remove the physical queue from core info */\n+\t\tlcore_id = queue->lcore_id;\n+\t\tcore_info = &qdma_core_info[lcore_id];\n+\t\tnum_hw_queues = core_info->num_hw_queues;\n+\t\tfor (i = 0; i < num_hw_queues; i++) {\n+\t\t\tif (queue == core_info->hw_queues[i])\n+\t\t\t\tbreak;\n \t\t}\n-\t\twhile (1) {\n-\t\t\tif (qbman_swp_pull(swp, &pulldesc)) {\n-\t\t\t\tDPAA2_QDMA_DP_WARN(\n-\t\t\t\t\t\"VDQ command not issued.QBMAN busy\\n\");\n-\t\t\t\t\t/* Portal was busy, try again */\n-\t\t\t\tcontinue;\n-\t\t\t}\n-\t\t\tbreak;\n+\t\tfor (; i < num_hw_queues - 1; i++)\n+\t\t\tcore_info->hw_queues[i] = core_info->hw_queues[i + 1];\n+\t\tcore_info->hw_queues[i] = NULL;\n+\t} else {\n+\t\tqueue->num_users--;\n+\t}\n+}\n+\n+static int\n+dpaa2_qdma_attr_get(struct rte_rawdev *rawdev,\n+\t\t    __rte_unused const char *attr_name,\n+\t\t    uint64_t *attr_value)\n+{\n+\tstruct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;\n+\tstruct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;\n+\tstruct rte_qdma_attr *qdma_attr = (struct rte_qdma_attr *)attr_value;\n+\n+\tDPAA2_QDMA_FUNC_TRACE();\n+\n+\tqdma_attr->num_hw_queues = qdma_dev->num_hw_queues;\n+\n+\treturn 0;\n+}\n+\n+static int\n+dpaa2_qdma_reset(struct rte_rawdev *rawdev)\n+{\n+\tstruct qdma_hw_queue *queue;\n+\tstruct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;\n+\tstruct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;\n+\tint i;\n+\n+\tDPAA2_QDMA_FUNC_TRACE();\n+\n+\t/* In case QDMA device is not in stopped state, return -EBUSY */\n+\tif (qdma_dev->state == 1) {\n+\t\tDPAA2_QDMA_ERR(\n+\t\t\t\"Device is in running state. Stop before reset.\");\n+\t\treturn -EBUSY;\n+\t}\n+\n+\t/* In case there are pending jobs on any VQ, return -EBUSY */\n+\tfor (i = 0; i < qdma_dev->max_vqs; i++) {\n+\t\tif (qdma_dev->vqs[i].in_use && (qdma_dev->vqs[i].num_enqueues !=\n+\t\t    qdma_dev->vqs[i].num_dequeues)) {\n+\t\t\tDPAA2_QDMA_ERR(\"Jobs are still pending on VQ: %d\", i);\n+\t\t\treturn -EBUSY;\n \t\t}\n-\t\tq_storage->active_dqs = dq_storage;\n-\t\tq_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;\n-\t\tset_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index,\n-\t\t\t\t   dq_storage);\n \t}\n \n-\tdq_storage = q_storage->active_dqs;\n-\trte_prefetch0((void *)(size_t)(dq_storage));\n-\trte_prefetch0((void *)(size_t)(dq_storage + 1));\n+\t/* Reset HW queues */\n+\tTAILQ_FOREACH(queue, &qdma_queue_list, next)\n+\t\tqueue->num_users = 0;\n \n-\t/* Prepare next pull descriptor. This will give space for the\n-\t * prefething done on DQRR entries\n-\t */\n-\tq_storage->toggle ^= 1;\n-\tdq_storage1 = q_storage->dq_storage[q_storage->toggle];\n-\tqbman_pull_desc_clear(&pulldesc);\n-\tqbman_pull_desc_set_numframes(&pulldesc, pull_size);\n-\tqbman_pull_desc_set_fq(&pulldesc, fqid);\n-\tqbman_pull_desc_set_storage(&pulldesc, dq_storage1,\n-\t\t(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);\n+\t/* Reset and free virtual queues */\n+\tfor (i = 0; i < qdma_dev->max_vqs; i++) {\n+\t\tif (qdma_dev->vqs[i].status_ring)\n+\t\t\trte_ring_free(qdma_dev->vqs[i].status_ring);\n+\t}\n+\tif (qdma_dev->vqs)\n+\t\trte_free(qdma_dev->vqs);\n+\tqdma_dev->vqs = NULL;\n \n-\t/* Check if the previous issued command is completed.\n-\t * Also seems like the SWP is shared between the Ethernet Driver\n-\t * and the SEC driver.\n+\t/* Reset per core info */\n+\tmemset(&qdma_core_info, 0,\n+\t\tsizeof(struct qdma_per_core_info) * RTE_MAX_LCORE);\n+\n+\t/* Free the FLE pool */\n+\tif (qdma_dev->fle_pool)\n+\t\trte_mempool_free(qdma_dev->fle_pool);\n+\n+\t/* Reset QDMA device structure */\n+\tqdma_dev->max_hw_queues_per_core = 0;\n+\tqdma_dev->fle_pool = NULL;\n+\tqdma_dev->fle_pool_count = 0;\n+\tqdma_dev->max_vqs = 0;\n+\n+\treturn 0;\n+}\n+\n+static int\n+dpaa2_qdma_configure(const struct rte_rawdev *rawdev,\n+\t\t\t rte_rawdev_obj_t config)\n+{\n+\tchar name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */\n+\tstruct rte_qdma_config *qdma_config = (struct rte_qdma_config *)config;\n+\tstruct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;\n+\tstruct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;\n+\n+\tDPAA2_QDMA_FUNC_TRACE();\n+\n+\t/* In case QDMA device is not in stopped state, return -EBUSY */\n+\tif (qdma_dev->state == 1) {\n+\t\tDPAA2_QDMA_ERR(\n+\t\t\t\"Device is in running state. Stop before config.\");\n+\t\treturn -1;\n+\t}\n+\n+\t/* Set max HW queue per core */\n+\tif (qdma_config->max_hw_queues_per_core > MAX_HW_QUEUE_PER_CORE) {\n+\t\tDPAA2_QDMA_ERR(\"H/W queues per core is more than: %d\",\n+\t\t\t       MAX_HW_QUEUE_PER_CORE);\n+\t\treturn -EINVAL;\n+\t}\n+\tqdma_dev->max_hw_queues_per_core =\n+\t\tqdma_config->max_hw_queues_per_core;\n+\n+\t/* Allocate Virtual Queues */\n+\tsprintf(name, \"qdma_%d_vq\", rawdev->dev_id);\n+\tqdma_dev->vqs = rte_malloc(name,\n+\t\t\t(sizeof(struct qdma_virt_queue) * qdma_config->max_vqs),\n+\t\t\tRTE_CACHE_LINE_SIZE);\n+\tif (!qdma_dev->vqs) {\n+\t\tDPAA2_QDMA_ERR(\"qdma_virtual_queues allocation failed\");\n+\t\treturn -ENOMEM;\n+\t}\n+\tqdma_dev->max_vqs = qdma_config->max_vqs;\n+\n+\t/* Allocate FLE pool; just append PID so that in case of\n+\t * multiprocess, the pool's don't collide.\n \t */\n-\twhile (!qbman_check_command_complete(dq_storage))\n-\t\t;\n-\tif (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))\n-\t\tclear_swp_active_dqs(q_storage->active_dpio_id);\n+\tsnprintf(name, sizeof(name), \"qdma_fle_pool%u\",\n+\t\t getpid());\n+\tqdma_dev->fle_pool = rte_mempool_create(name,\n+\t\t\tqdma_config->fle_pool_count, QDMA_FLE_POOL_SIZE,\n+\t\t\tQDMA_FLE_CACHE_SIZE(qdma_config->fle_pool_count), 0,\n+\t\t\tNULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);\n+\tif (!qdma_dev->fle_pool) {\n+\t\tDPAA2_QDMA_ERR(\"qdma_fle_pool create failed\");\n+\t\trte_free(qdma_dev->vqs);\n+\t\tqdma_dev->vqs = NULL;\n+\t\treturn -ENOMEM;\n+\t}\n+\tqdma_dev->fle_pool_count = qdma_config->fle_pool_count;\n \n-\tpending = 1;\n+\treturn 0;\n+}\n \n-\tdo {\n-\t\t/* Loop until the dq_storage is updated with\n-\t\t * new token by QBMAN\n-\t\t */\n-\t\twhile (!qbman_check_new_result(dq_storage))\n-\t\t\t;\n-\t\trte_prefetch0((void *)((size_t)(dq_storage + 2)));\n-\t\t/* Check whether Last Pull command is Expired and\n-\t\t * setting Condition for Loop termination\n-\t\t */\n-\t\tif (qbman_result_DQ_is_pull_complete(dq_storage)) {\n-\t\t\tpending = 0;\n-\t\t\t/* Check for valid frame. */\n-\t\t\tstatus = qbman_result_DQ_flags(dq_storage);\n-\t\t\tif (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))\n-\t\t\t\tcontinue;\n-\t\t}\n-\t\tfd = qbman_result_DQ_fd(dq_storage);\n+static int\n+dpaa2_qdma_start(struct rte_rawdev *rawdev)\n+{\n+\tstruct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;\n+\tstruct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;\n+\n+\tDPAA2_QDMA_FUNC_TRACE();\n \n-\t\tvqid = dpdmai_dev_get_job(dpdmai_dev->qdma_dev, fd,\n-\t\t\t\t\t  &job[num_rx]);\n-\t\tif (vq_id)\n-\t\t\tvq_id[num_rx] = vqid;\n+\tqdma_dev->state = 1;\n \n-\t\tdq_storage++;\n-\t\tnum_rx++;\n-\t} while (pending);\n+\treturn 0;\n+}\n \n-\tif (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {\n-\t\twhile (!qbman_check_command_complete(\n-\t\t       get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))\n-\t\t\t;\n-\t\tclear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);\n-\t}\n-\t/* issue a volatile dequeue command for next pull */\n-\twhile (1) {\n-\t\tif (qbman_swp_pull(swp, &pulldesc)) {\n-\t\t\tDPAA2_QDMA_DP_WARN(\"VDQ command is not issued.\"\n-\t\t\t\t\t  \"QBMAN is busy (2)\\n\");\n-\t\t\tcontinue;\n-\t\t}\n-\t\tbreak;\n+static int\n+check_devargs_handler(__rte_unused const char *key, const char *value,\n+\t\t      __rte_unused void *opaque)\n+{\n+\tif (strcmp(value, \"1\"))\n+\t\treturn -1;\n+\n+\treturn 0;\n+}\n+\n+static int\n+dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)\n+{\n+\tstruct rte_kvargs *kvlist;\n+\n+\tif (!devargs)\n+\t\treturn 0;\n+\n+\tkvlist = rte_kvargs_parse(devargs->args, NULL);\n+\tif (!kvlist)\n+\t\treturn 0;\n+\n+\tif (!rte_kvargs_count(kvlist, key)) {\n+\t\trte_kvargs_free(kvlist);\n+\t\treturn 0;\n \t}\n \n-\tq_storage->active_dqs = dq_storage1;\n-\tq_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;\n-\tset_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);\n+\tif (rte_kvargs_process(kvlist, key,\n+\t\t\t       check_devargs_handler, NULL) < 0) {\n+\t\trte_kvargs_free(kvlist);\n+\t\treturn 0;\n+\t}\n+\trte_kvargs_free(kvlist);\n \n-\treturn num_rx;\n+\treturn 1;\n }\n \n static int\n-dpdmai_dev_dequeue_multijob_no_prefetch(\n-\t\tstruct dpaa2_dpdmai_dev *dpdmai_dev,\n-\t\tuint16_t rxq_id,\n-\t\tuint16_t *vq_id,\n-\t\tstruct rte_qdma_job **job,\n-\t\tuint16_t nb_jobs)\n+dpaa2_qdma_queue_setup(struct rte_rawdev *rawdev,\n+\t\t\t  __rte_unused uint16_t queue_id,\n+\t\t\t  rte_rawdev_obj_t queue_conf)\n {\n-\tstruct dpaa2_queue *rxq;\n-\tstruct qbman_result *dq_storage;\n-\tstruct qbman_pull_desc pulldesc;\n-\tstruct qbman_swp *swp;\n-\tuint32_t fqid;\n-\tuint8_t status, pending;\n-\tuint8_t num_rx = 0;\n-\tconst struct qbman_fd *fd;\n-\tuint16_t vqid;\n-\tint ret, next_pull = nb_jobs, num_pulled = 0;\n+\tchar ring_name[32];\n+\tint i;\n+\tstruct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;\n+\tstruct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;\n+\tstruct rte_qdma_queue_config *q_config =\n+\t\t(struct rte_qdma_queue_config *)queue_conf;\n \n-\tif (unlikely(!DPAA2_PER_LCORE_DPIO)) {\n-\t\tret = dpaa2_affine_qbman_swp();\n-\t\tif (ret) {\n-\t\t\tDPAA2_QDMA_ERR(\n-\t\t\t\t\"Failed to allocate IO portal, tid: %d\\n\",\n-\t\t\t\trte_gettid());\n-\t\t\treturn 0;\n+\tDPAA2_QDMA_FUNC_TRACE();\n+\n+\trte_spinlock_lock(&qdma_dev->lock);\n+\n+\t/* Get a free Virtual Queue */\n+\tfor (i = 0; i < qdma_dev->max_vqs; i++) {\n+\t\tif (qdma_dev->vqs[i].in_use == 0)\n+\t\t\tbreak;\n+\t}\n+\n+\t/* Return in case no VQ is free */\n+\tif (i == qdma_dev->max_vqs) {\n+\t\trte_spinlock_unlock(&qdma_dev->lock);\n+\t\tDPAA2_QDMA_ERR(\"Unable to get lock on QDMA device\");\n+\t\treturn -ENODEV;\n+\t}\n+\n+\tif (q_config->flags & RTE_QDMA_VQ_EXCLUSIVE_PQ) {\n+\t\t/* Allocate HW queue for a VQ */\n+\t\tqdma_dev->vqs[i].hw_queue = alloc_hw_queue(q_config->lcore_id);\n+\t\tqdma_dev->vqs[i].exclusive_hw_queue = 1;\n+\t} else {\n+\t\t/* Allocate a Ring for Virtual Queue in VQ mode */\n+\t\tsnprintf(ring_name, sizeof(ring_name), \"status ring %d\", i);\n+\t\tqdma_dev->vqs[i].status_ring = rte_ring_create(ring_name,\n+\t\t\tqdma_dev->fle_pool_count, rte_socket_id(), 0);\n+\t\tif (!qdma_dev->vqs[i].status_ring) {\n+\t\t\tDPAA2_QDMA_ERR(\"Status ring creation failed for vq\");\n+\t\t\trte_spinlock_unlock(&qdma_dev->lock);\n+\t\t\treturn rte_errno;\n \t\t}\n+\n+\t\t/* Get a HW queue (shared) for a VQ */\n+\t\tqdma_dev->vqs[i].hw_queue = get_hw_queue(qdma_dev,\n+\t\t\t\t\t\t    q_config->lcore_id);\n+\t\tqdma_dev->vqs[i].exclusive_hw_queue = 0;\n \t}\n-\tswp = DPAA2_PER_LCORE_PORTAL;\n \n-\trxq = &(dpdmai_dev->rx_queue[rxq_id]);\n-\tfqid = rxq->fqid;\n+\tif (qdma_dev->vqs[i].hw_queue == NULL) {\n+\t\tDPAA2_QDMA_ERR(\"No H/W queue available for VQ\");\n+\t\tif (qdma_dev->vqs[i].status_ring)\n+\t\t\trte_ring_free(qdma_dev->vqs[i].status_ring);\n+\t\tqdma_dev->vqs[i].status_ring = NULL;\n+\t\trte_spinlock_unlock(&qdma_dev->lock);\n+\t\treturn -ENODEV;\n+\t}\n \n-\tdo {\n-\t\tdq_storage = rxq->q_storage->dq_storage[0];\n-\t\t/* Prepare dequeue descriptor */\n-\t\tqbman_pull_desc_clear(&pulldesc);\n-\t\tqbman_pull_desc_set_fq(&pulldesc, fqid);\n-\t\tqbman_pull_desc_set_storage(&pulldesc, dq_storage,\n-\t\t\t(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);\n+\tqdma_dev->vqs[i].in_use = 1;\n+\tqdma_dev->vqs[i].lcore_id = q_config->lcore_id;\n+\tmemset(&qdma_dev->vqs[i].rbp, 0, sizeof(struct rte_qdma_rbp));\n \n-\t\tif (next_pull > dpaa2_dqrr_size) {\n-\t\t\tqbman_pull_desc_set_numframes(&pulldesc,\n-\t\t\t\t\tdpaa2_dqrr_size);\n-\t\t\tnext_pull -= dpaa2_dqrr_size;\n-\t\t} else {\n-\t\t\tqbman_pull_desc_set_numframes(&pulldesc, next_pull);\n-\t\t\tnext_pull = 0;\n-\t\t}\n+\tif (q_config->flags & RTE_QDMA_VQ_FD_LONG_FORMAT) {\n+\t\tqdma_dev->vqs[i].set_fd = dpdmai_dev_set_fd_lf;\n+\t\tqdma_dev->vqs[i].get_job = dpdmai_dev_get_job_lf;\n+\t} else {\n+\t\tqdma_dev->vqs[i].set_fd = dpdmai_dev_set_fd_us;\n+\t\tqdma_dev->vqs[i].get_job = dpdmai_dev_get_job_us;\n+\t}\n+\tif (dpaa2_get_devargs(rawdev->device->devargs,\n+\t\t\tDPAA2_QDMA_NO_PREFETCH) ||\n+\t\t\t(getenv(\"DPAA2_NO_QDMA_PREFETCH_RX\"))) {\n+\t\t/* If no prefetch is configured. */\n+\t\tqdma_dev->vqs[i].dequeue_job =\n+\t\t\t\tdpdmai_dev_dequeue_multijob_no_prefetch;\n+\t\tDPAA2_QDMA_INFO(\"No Prefetch RX Mode enabled\");\n+\t} else {\n+\t\tqdma_dev->vqs[i].dequeue_job =\n+\t\t\tdpdmai_dev_dequeue_multijob_prefetch;\n+\t}\n \n-\t\twhile (1) {\n-\t\t\tif (qbman_swp_pull(swp, &pulldesc)) {\n-\t\t\t\tDPAA2_QDMA_DP_WARN(\"VDQ command not issued. QBMAN busy\");\n-\t\t\t\t/* Portal was busy, try again */\n-\t\t\t\tcontinue;\n-\t\t\t}\n-\t\t\tbreak;\n-\t\t}\n+\tqdma_dev->vqs[i].enqueue_job = dpdmai_dev_enqueue_multi;\n \n-\t\trte_prefetch0((void *)((size_t)(dq_storage + 1)));\n-\t\t/* Check if the previous issued command is completed. */\n-\t\twhile (!qbman_check_command_complete(dq_storage))\n-\t\t\t;\n+\tif (q_config->rbp != NULL)\n+\t\tmemcpy(&qdma_dev->vqs[i].rbp, q_config->rbp,\n+\t\t\t\tsizeof(struct rte_qdma_rbp));\n \n-\t\tnum_pulled = 0;\n-\t\tpending = 1;\n+\trte_spinlock_unlock(&qdma_dev->lock);\n \n-\t\tdo {\n-\t\t\t/* Loop until dq_storage is updated\n-\t\t\t * with new token by QBMAN\n-\t\t\t */\n-\t\t\twhile (!qbman_check_new_result(dq_storage))\n-\t\t\t\t;\n-\t\t\trte_prefetch0((void *)((size_t)(dq_storage + 2)));\n+\treturn i;\n+}\n \n-\t\t\tif (qbman_result_DQ_is_pull_complete(dq_storage)) {\n-\t\t\t\tpending = 0;\n-\t\t\t\t/* Check for valid frame. */\n-\t\t\t\tstatus = qbman_result_DQ_flags(dq_storage);\n-\t\t\t\tif (unlikely((status &\n-\t\t\t\t\tQBMAN_DQ_STAT_VALIDFRAME) == 0))\n-\t\t\t\t\tcontinue;\n-\t\t\t}\n-\t\t\tfd = qbman_result_DQ_fd(dq_storage);\n+static int\n+dpaa2_qdma_enqueue(struct rte_rawdev *rawdev,\n+\t\t  __rte_unused struct rte_rawdev_buf **buffers,\n+\t\t  unsigned int nb_jobs,\n+\t\t  rte_rawdev_obj_t context)\n+{\n+\tstruct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;\n+\tstruct rte_qdma_enqdeq *e_context =\n+\t\t(struct rte_qdma_enqdeq *)context;\n+\tstruct qdma_virt_queue *qdma_vq =\n+\t\t&dpdmai_dev->qdma_dev->vqs[e_context->vq_id];\n+\tint ret;\n \n-\t\t\tvqid = dpdmai_dev_get_job(dpdmai_dev->qdma_dev, fd,\n-\t\t\t\t\t\t  &job[num_rx]);\n-\t\t\tif (vq_id)\n-\t\t\t\tvq_id[num_rx] = vqid;\n+\t/* Return error in case of wrong lcore_id */\n+\tif (rte_lcore_id() != qdma_vq->lcore_id) {\n+\t\tDPAA2_QDMA_ERR(\"QDMA enqueue for vqid %d on wrong core\",\n+\t\t\t\te_context->vq_id);\n+\t\treturn -EINVAL;\n+\t}\n \n-\t\t\tdq_storage++;\n-\t\t\tnum_rx++;\n-\t\t\tnum_pulled++;\n+\tret = qdma_vq->enqueue_job(qdma_vq, e_context->job, nb_jobs);\n+\tif (ret < 0) {\n+\t\tDPAA2_QDMA_ERR(\"DPDMAI device enqueue failed: %d\", ret);\n+\t\treturn ret;\n+\t}\n \n-\t\t} while (pending);\n-\t/* Last VDQ provided all packets and more packets are requested */\n-\t} while (next_pull && num_pulled == dpaa2_dqrr_size);\n+\tqdma_vq->num_enqueues += ret;\n \n-\treturn num_rx;\n+\treturn ret;\n }\n \n static int\n-dpaa2_qdma_dequeue(__rte_unused struct rte_rawdev *rawdev,\n+dpaa2_qdma_dequeue(struct rte_rawdev *rawdev,\n \t\t   __rte_unused struct rte_rawdev_buf **buffers,\n \t\t   unsigned int nb_jobs,\n \t\t   rte_rawdev_obj_t cntxt)\n {\n-\tstruct rte_qdma_enqdeq *context = (struct rte_qdma_enqdeq *)cntxt;\n-\tstruct qdma_virt_queue *qdma_vq = &qdma_vqs[context->vq_id];\n-\tstruct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;\n+\tstruct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;\n+\tstruct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;\n+\tstruct rte_qdma_enqdeq *context =\n+\t\t(struct rte_qdma_enqdeq *)cntxt;\n+\tstruct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[context->vq_id];\n \tstruct qdma_virt_queue *temp_qdma_vq;\n-\tstruct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;\n \tint ret = 0, i;\n \tunsigned int ring_count;\n \n@@ -1035,12 +1074,12 @@ dpaa2_qdma_dequeue(__rte_unused struct rte_rawdev *rawdev,\n \t\treturn 0;\n \n \tif (qdma_vq->num_enqueues < (qdma_vq->num_dequeues + nb_jobs))\n-\t\tnb_jobs = (qdma_vq->num_enqueues -  qdma_vq->num_dequeues);\n+\t\tnb_jobs = (qdma_vq->num_enqueues - qdma_vq->num_dequeues);\n \n \tif (qdma_vq->exclusive_hw_queue) {\n \t\t/* In case of exclusive queue directly fetch from HW queue */\n-\t\tret = dpdmai_dev_dequeue_multijob(dpdmai_dev, qdma_pq->queue_id,\n-\t\t\t\t\t NULL, context->job, nb_jobs);\n+\t\tret = qdma_vq->dequeue_job(qdma_vq, NULL,\n+\t\t\t\t\tcontext->job, nb_jobs);\n \t\tif (ret < 0) {\n \t\t\tDPAA2_QDMA_ERR(\n \t\t\t\t\"Dequeue from DPDMAI device failed: %d\", ret);\n@@ -1057,11 +1096,10 @@ dpaa2_qdma_dequeue(__rte_unused struct rte_rawdev *rawdev,\n \t\tring_count = rte_ring_count(qdma_vq->status_ring);\n \t\tif (ring_count < nb_jobs) {\n \t\t\t/* TODO - How to have right budget */\n-\t\t\tret = dpdmai_dev_dequeue_multijob(dpdmai_dev,\n-\t\t\t\t\tqdma_pq->queue_id,\n+\t\t\tret = qdma_vq->dequeue_job(qdma_vq,\n \t\t\t\t\ttemp_vq_id, context->job, nb_jobs);\n \t\t\tfor (i = 0; i < ret; i++) {\n-\t\t\t\ttemp_qdma_vq = &qdma_vqs[temp_vq_id[i]];\n+\t\t\t\ttemp_qdma_vq = &qdma_dev->vqs[temp_vq_id[i]];\n \t\t\t\trte_ring_enqueue(temp_qdma_vq->status_ring,\n \t\t\t\t\t(void *)(context->job[i]));\n \t\t\t}\n@@ -1085,10 +1123,13 @@ dpaa2_qdma_dequeue(__rte_unused struct rte_rawdev *rawdev,\n }\n \n void\n-rte_qdma_vq_stats(uint16_t vq_id,\n-\t\t  struct rte_qdma_vq_stats *vq_status)\n+rte_qdma_vq_stats(struct rte_rawdev *rawdev,\n+\t\tuint16_t vq_id,\n+\t\tstruct rte_qdma_vq_stats *vq_status)\n {\n-\tstruct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];\n+\tstruct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;\n+\tstruct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;\n+\tstruct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vq_id];\n \n \tif (qdma_vq->in_use) {\n \t\tvq_status->exclusive_hw_queue = qdma_vq->exclusive_hw_queue;\n@@ -1107,7 +1148,7 @@ dpaa2_qdma_queue_release(struct rte_rawdev *rawdev,\n \tstruct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;\n \tstruct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;\n \n-\tstruct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];\n+\tstruct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vq_id];\n \n \tDPAA2_QDMA_FUNC_TRACE();\n \n@@ -1120,8 +1161,8 @@ dpaa2_qdma_queue_release(struct rte_rawdev *rawdev,\n \tif (qdma_vq->exclusive_hw_queue)\n \t\tfree_hw_queue(qdma_vq->hw_queue);\n \telse {\n-\t\tif (qdma_vqs->status_ring)\n-\t\t\trte_ring_free(qdma_vqs->status_ring);\n+\t\tif (qdma_vq->status_ring)\n+\t\t\trte_ring_free(qdma_vq->status_ring);\n \n \t\tput_hw_queue(qdma_vq->hw_queue);\n \t}\n@@ -1245,43 +1286,6 @@ dpaa2_dpdmai_dev_uninit(struct rte_rawdev *rawdev)\n }\n \n static int\n-check_devargs_handler(__rte_unused const char *key, const char *value,\n-\t\t      __rte_unused void *opaque)\n-{\n-\tif (strcmp(value, \"1\"))\n-\t\treturn -1;\n-\n-\treturn 0;\n-}\n-\n-static int\n-dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)\n-{\n-\tstruct rte_kvargs *kvlist;\n-\n-\tif (!devargs)\n-\t\treturn 0;\n-\n-\tkvlist = rte_kvargs_parse(devargs->args, NULL);\n-\tif (!kvlist)\n-\t\treturn 0;\n-\n-\tif (!rte_kvargs_count(kvlist, key)) {\n-\t\trte_kvargs_free(kvlist);\n-\t\treturn 0;\n-\t}\n-\n-\tif (rte_kvargs_process(kvlist, key,\n-\t\t\t       check_devargs_handler, NULL) < 0) {\n-\t\trte_kvargs_free(kvlist);\n-\t\treturn 0;\n-\t}\n-\trte_kvargs_free(kvlist);\n-\n-\treturn 1;\n-}\n-\n-static int\n dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)\n {\n \tstruct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;\n@@ -1384,17 +1388,6 @@ dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)\n \t\tgoto init_err;\n \t}\n \n-\tif (dpaa2_get_devargs(rawdev->device->devargs,\n-\t\tDPAA2_QDMA_NO_PREFETCH)) {\n-\t\t/* If no prefetch is configured. */\n-\t\tdpdmai_dev_dequeue_multijob =\n-\t\t\t\tdpdmai_dev_dequeue_multijob_no_prefetch;\n-\t\tDPAA2_QDMA_INFO(\"No Prefetch RX Mode enabled\");\n-\t} else {\n-\t\tdpdmai_dev_dequeue_multijob =\n-\t\t\tdpdmai_dev_dequeue_multijob_prefetch;\n-\t}\n-\n \tif (!dpaa2_coherent_no_alloc_cache) {\n \t\tif (dpaa2_svr_family == SVR_LX2160A) {\n \t\t\tdpaa2_coherent_no_alloc_cache =\ndiff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.h b/drivers/raw/dpaa2_qdma/dpaa2_qdma.h\nindex 3c112d2..4265ee8 100644\n--- a/drivers/raw/dpaa2_qdma/dpaa2_qdma.h\n+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.h\n@@ -52,10 +52,11 @@ struct qdma_device {\n \t * This is limited by MAX_HW_QUEUE_PER_CORE\n \t */\n \tuint16_t max_hw_queues_per_core;\n+\n+\t/** VQ's of this device */\n+\tstruct qdma_virt_queue *vqs;\n \t/** Maximum number of VQ's */\n \tuint16_t max_vqs;\n-\t/** mode of operation - physical(h/w) or virtual */\n-\tuint8_t mode;\n \t/** Device state - started or stopped */\n \tuint8_t state;\n \t/** FLE pool for the device */\n@@ -80,6 +81,26 @@ struct qdma_hw_queue {\n \tuint32_t num_users;\n };\n \n+struct qdma_virt_queue;\n+\n+typedef uint16_t (qdma_get_job_t)(struct qdma_virt_queue *qdma_vq,\n+\t\t\t\t\tconst struct qbman_fd *fd,\n+\t\t\t\t\tstruct rte_qdma_job **job);\n+typedef int (qdma_set_fd_t)(struct qdma_virt_queue *qdma_vq,\n+\t\t\t\t\tstruct qbman_fd *fd,\n+\t\t\t\t\tstruct rte_qdma_job *job);\n+\n+typedef int (qdma_dequeue_multijob_t)(\n+\t\t\t\tstruct qdma_virt_queue *qdma_vq,\n+\t\t\t\tuint16_t *vq_id,\n+\t\t\t\tstruct rte_qdma_job **job,\n+\t\t\t\tuint16_t nb_jobs);\n+\n+typedef int (qdma_enqueue_multijob_t)(\n+\t\t\tstruct qdma_virt_queue *qdma_vq,\n+\t\t\tstruct rte_qdma_job **job,\n+\t\t\tuint16_t nb_jobs);\n+\n /** Represents a QDMA virtual queue */\n struct qdma_virt_queue {\n \t/** Status ring of the virtual queue */\n@@ -98,6 +119,14 @@ struct qdma_virt_queue {\n \tuint64_t num_enqueues;\n \t/* Total number of dequeues from this VQ */\n \tuint64_t num_dequeues;\n+\n+\tuint16_t vq_id;\n+\n+\tqdma_set_fd_t *set_fd;\n+\tqdma_get_job_t *get_job;\n+\n+\tqdma_dequeue_multijob_t *dequeue_job;\n+\tqdma_enqueue_multijob_t *enqueue_job;\n };\n \n /** Represents a QDMA per core hw queues allocation in virtual mode */\n@@ -176,4 +205,10 @@ struct dpaa2_dpdmai_dev {\n \tstruct qdma_device *qdma_dev;\n };\n \n+static inline struct qdma_device *\n+QDMA_DEV_OF_VQ(struct qdma_virt_queue *vq)\n+{\n+\treturn vq->hw_queue->dpdmai_dev->qdma_dev;\n+}\n+\n #endif /* __DPAA2_QDMA_H__ */\ndiff --git a/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h\nindex 71894d3..ff4fc1d 100644\n--- a/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h\n+++ b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h\n@@ -50,6 +50,8 @@ enum {\n  */\n #define RTE_QDMA_VQ_EXCLUSIVE_PQ\t(1ULL)\n \n+#define RTE_QDMA_VQ_FD_LONG_FORMAT\t\t(1ULL << 1)\n+\n /** States if the source addresses is physical. */\n #define RTE_QDMA_JOB_SRC_PHY\t\t(1ULL)\n \n@@ -68,10 +70,6 @@ struct rte_qdma_config {\n \tuint16_t max_hw_queues_per_core;\n \t/** Maximum number of VQ's to be used. */\n \tuint16_t max_vqs;\n-\t/** mode of operation - physical(h/w) or virtual */\n-\tuint8_t mode;\n-\t/** FD format */\n-\tuint8_t format;\n \t/**\n \t * User provides this as input to the driver as a size of the FLE pool.\n \t * FLE's (and corresponding source/destination descriptors) are\n@@ -182,13 +180,16 @@ struct rte_qdma_queue_config {\n /**\n  * Get a Virtual Queue statistics.\n  *\n+ * @param rawdev\n+ *   Raw Device.\n  * @param vq_id\n  *   Virtual Queue ID.\n  * @param vq_stats\n  *   VQ statistics structure which will be filled in by the driver.\n  */\n void\n-rte_qdma_vq_stats(uint16_t vq_id,\n-\t\t  struct rte_qdma_vq_stats *vq_stats);\n+rte_qdma_vq_stats(struct rte_rawdev *rawdev,\n+\t\tuint16_t vq_id,\n+\t\tstruct rte_qdma_vq_stats *vq_stats);\n \n #endif /* __RTE_PMD_DPAA2_QDMA_H__*/\n",
    "prefixes": [
        "3/7"
    ]
}