get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/76668/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 76668,
    "url": "http://patches.dpdk.org/api/patches/76668/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1599470764-30569-2-git-send-email-g.singh@nxp.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1599470764-30569-2-git-send-email-g.singh@nxp.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1599470764-30569-2-git-send-email-g.singh@nxp.com",
    "date": "2020-09-07T09:25:58",
    "name": "[1/7] raw/dpaa2_qdma: change DPAA2 QDMA APIs to rawdev ops",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "a05158c7d511e19ae35573e3847dfe487326318b",
    "submitter": {
        "id": 1068,
        "url": "http://patches.dpdk.org/api/people/1068/?format=api",
        "name": "Gagandeep Singh",
        "email": "g.singh@nxp.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1599470764-30569-2-git-send-email-g.singh@nxp.com/mbox/",
    "series": [
        {
            "id": 11988,
            "url": "http://patches.dpdk.org/api/series/11988/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=11988",
            "date": "2020-09-07T09:25:57",
            "name": "raw/dpaa2_qdma: driver enhancement",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/11988/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/76668/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/76668/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 5AA95A04B9;\n\tMon,  7 Sep 2020 11:27:41 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 6E3411C126;\n\tMon,  7 Sep 2020 11:26:46 +0200 (CEST)",
            "from inva021.nxp.com (inva021.nxp.com [92.121.34.21])\n by dpdk.org (Postfix) with ESMTP id D426B1C11E\n for <dev@dpdk.org>; Mon,  7 Sep 2020 11:26:44 +0200 (CEST)",
            "from inva021.nxp.com (localhost [127.0.0.1])\n by inva021.eu-rdc02.nxp.com (Postfix) with ESMTP id A735B200D95;\n Mon,  7 Sep 2020 11:26:44 +0200 (CEST)",
            "from invc005.ap-rdc01.nxp.com (invc005.ap-rdc01.nxp.com\n [165.114.16.14])\n by inva021.eu-rdc02.nxp.com (Postfix) with ESMTP id E8986200112;\n Mon,  7 Sep 2020 11:26:41 +0200 (CEST)",
            "from lsv11086.swis.cn-sha01.nxp.com (lsv11086.swis.cn-sha01.nxp.com\n [92.121.210.87])\n by invc005.ap-rdc01.nxp.com (Postfix) with ESMTP id 39A4F402E2;\n Mon,  7 Sep 2020 11:26:35 +0200 (CEST)"
        ],
        "From": "Gagandeep Singh <g.singh@nxp.com>",
        "To": "dev@dpdk.org,\n\tnipun.gupta@nxp.com,\n\themant.agrawal@nxp.com",
        "Cc": "thomas.monjalon@6wind.com,\n\tGagandeep Singh <g.singh@nxp.com>",
        "Date": "Mon,  7 Sep 2020 17:25:58 +0800",
        "Message-Id": "<1599470764-30569-2-git-send-email-g.singh@nxp.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1599470764-30569-1-git-send-email-g.singh@nxp.com>",
        "References": "<1599470764-30569-1-git-send-email-g.singh@nxp.com>",
        "X-Virus-Scanned": "ClamAV using ClamSMTP",
        "Subject": "[dpdk-dev] [PATCH 1/7] raw/dpaa2_qdma: change DPAA2 QDMA APIs to\n\trawdev ops",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "dpaa2_qdma was partially using direct pmd APIs.\nThis patch changes that and adapt the driver to use\nmore of the rawdev APIs\n\nSigned-off-by: Gagandeep Singh <g.singh@nxp.com>\n---\n drivers/raw/dpaa2_qdma/dpaa2_qdma.c         | 331 ++++++++++++++--------------\n drivers/raw/dpaa2_qdma/dpaa2_qdma.h         |   3 +-\n drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h | 207 ++---------------\n 3 files changed, 187 insertions(+), 354 deletions(-)",
    "diff": "diff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c\nindex 0b9c4e3..a2ee6cc 100644\n--- a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c\n+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c\n@@ -1,5 +1,5 @@\n /* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright 2018-2019 NXP\n+ * Copyright 2018-2020 NXP\n  */\n \n #include <string.h>\n@@ -30,7 +30,7 @@ uint32_t dpaa2_coherent_no_alloc_cache;\n uint32_t dpaa2_coherent_alloc_cache;\n \n /* QDMA device */\n-static struct qdma_device qdma_dev;\n+static struct qdma_device q_dev;\n \n /* QDMA H/W queues list */\n TAILQ_HEAD(qdma_hw_queue_list, qdma_hw_queue);\n@@ -51,9 +51,11 @@ typedef int (dpdmai_dev_dequeue_multijob_t)(struct dpaa2_dpdmai_dev *dpdmai_dev,\n \n dpdmai_dev_dequeue_multijob_t *dpdmai_dev_dequeue_multijob;\n \n-typedef uint16_t (dpdmai_dev_get_job_t)(const struct qbman_fd *fd,\n+typedef uint16_t (dpdmai_dev_get_job_t)(struct qdma_device *qdma_dev,\n+\t\t\t\t\tconst struct qbman_fd *fd,\n \t\t\t\t\tstruct rte_qdma_job **job);\n-typedef int (dpdmai_dev_set_fd_t)(struct qbman_fd *fd,\n+typedef int (dpdmai_dev_set_fd_t)(struct qdma_device *qdma_dev,\n+\t\t\t\t  struct qbman_fd *fd,\n \t\t\t\t  struct rte_qdma_job *job,\n \t\t\t\t  struct rte_qdma_rbp *rbp,\n \t\t\t\t  uint16_t vq_id);\n@@ -201,10 +203,12 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle,\n \tDPAA2_SET_FLE_FIN(fle);\n }\n \n-static inline int dpdmai_dev_set_fd_us(struct qbman_fd *fd,\n-\t\t\t\t\tstruct rte_qdma_job *job,\n-\t\t\t\t\tstruct rte_qdma_rbp *rbp,\n-\t\t\t\t\tuint16_t vq_id)\n+static inline int dpdmai_dev_set_fd_us(\n+\t\t\t\tstruct qdma_device *qdma_dev __rte_unused,\n+\t\t\t\tstruct qbman_fd *fd,\n+\t\t\t\tstruct rte_qdma_job *job,\n+\t\t\t\tstruct rte_qdma_rbp *rbp,\n+\t\t\t\tuint16_t vq_id)\n {\n \tstruct rte_qdma_job **ppjob;\n \tsize_t iova;\n@@ -230,7 +234,8 @@ static inline int dpdmai_dev_set_fd_us(struct qbman_fd *fd,\n \t\t\t\t\t   job->len, fd);\n \treturn ret;\n }\n-static inline int dpdmai_dev_set_fd_lf(struct qbman_fd *fd,\n+static inline int dpdmai_dev_set_fd_lf(struct qdma_device *qdma_dev,\n+\t\t\t\t\tstruct qbman_fd *fd,\n \t\t\t\t\tstruct rte_qdma_job *job,\n \t\t\t\t\tstruct rte_qdma_rbp *rbp,\n \t\t\t\t\tuint16_t vq_id)\n@@ -242,7 +247,7 @@ static inline int dpdmai_dev_set_fd_lf(struct qbman_fd *fd,\n \t * Get an FLE/SDD from FLE pool.\n \t * Note: IO metadata is before the FLE and SDD memory.\n \t */\n-\tret = rte_mempool_get(qdma_dev.fle_pool, (void **)(&ppjob));\n+\tret = rte_mempool_get(qdma_dev->fle_pool, (void **)(&ppjob));\n \tif (ret) {\n \t\tDPAA2_QDMA_DP_DEBUG(\"Memory alloc failed for FLE\");\n \t\treturn ret;\n@@ -266,8 +271,10 @@ static inline int dpdmai_dev_set_fd_lf(struct qbman_fd *fd,\n \treturn 0;\n }\n \n-static inline uint16_t dpdmai_dev_get_job_us(const struct qbman_fd *fd,\n-\t\t\t\t\tstruct rte_qdma_job **job)\n+static inline uint16_t dpdmai_dev_get_job_us(\n+\t\t\t\tstruct qdma_device *qdma_dev __rte_unused,\n+\t\t\t\tconst struct qbman_fd *fd,\n+\t\t\t\tstruct rte_qdma_job **job)\n {\n \tuint16_t vqid;\n \tsize_t iova;\n@@ -288,8 +295,9 @@ static inline uint16_t dpdmai_dev_get_job_us(const struct qbman_fd *fd,\n \treturn vqid;\n }\n \n-static inline uint16_t dpdmai_dev_get_job_lf(const struct qbman_fd *fd,\n-\t\t\t\t\tstruct rte_qdma_job **job)\n+static inline uint16_t dpdmai_dev_get_job_lf(struct qdma_device *qdma_dev,\n+\t\t\t\t\t     const struct qbman_fd *fd,\n+\t\t\t\t\t     struct rte_qdma_job **job)\n {\n \tstruct rte_qdma_job **ppjob;\n \tuint16_t vqid;\n@@ -307,7 +315,7 @@ static inline uint16_t dpdmai_dev_get_job_lf(const struct qbman_fd *fd,\n \tvqid = (*job)->vq_id;\n \n \t/* Free FLE to the pool */\n-\trte_mempool_put(qdma_dev.fle_pool, (void *)ppjob);\n+\trte_mempool_put(qdma_dev->fle_pool, (void *)ppjob);\n \n \treturn vqid;\n }\n@@ -341,7 +349,7 @@ free_hw_queue(struct qdma_hw_queue *queue)\n \n \n static struct qdma_hw_queue *\n-get_hw_queue(uint32_t lcore_id)\n+get_hw_queue(struct qdma_device *qdma_dev, uint32_t lcore_id)\n {\n \tstruct qdma_per_core_info *core_info;\n \tstruct qdma_hw_queue *queue, *temp;\n@@ -357,7 +365,7 @@ get_hw_queue(uint32_t lcore_id)\n \t * Allocate a HW queue if there are less queues\n \t * than maximum per core queues configured\n \t */\n-\tif (num_hw_queues < qdma_dev.max_hw_queues_per_core) {\n+\tif (num_hw_queues < qdma_dev->max_hw_queues_per_core) {\n \t\tqueue = alloc_hw_queue(lcore_id);\n \t\tif (queue) {\n \t\t\tcore_info->hw_queues[num_hw_queues] = queue;\n@@ -416,41 +424,41 @@ put_hw_queue(struct qdma_hw_queue *queue)\n \t}\n }\n \n-int\n-rte_qdma_init(void)\n+static int\n+dpaa2_qdma_attr_get(struct rte_rawdev *rawdev,\n+\t\t    __rte_unused const char *attr_name,\n+\t\t    uint64_t *attr_value)\n {\n+\tstruct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;\n+\tstruct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;\n+\tstruct rte_qdma_attr *qdma_attr = (struct rte_qdma_attr *)attr_value;\n+\n \tDPAA2_QDMA_FUNC_TRACE();\n \n-\trte_spinlock_init(&qdma_dev.lock);\n+\tqdma_attr->num_hw_queues = qdma_dev->num_hw_queues;\n \n \treturn 0;\n }\n \n-void\n-rte_qdma_attr_get(struct rte_qdma_attr *qdma_attr)\n-{\n-\tDPAA2_QDMA_FUNC_TRACE();\n-\n-\tqdma_attr->num_hw_queues = qdma_dev.num_hw_queues;\n-}\n-\n-int\n-rte_qdma_reset(void)\n+static int\n+dpaa2_qdma_reset(struct rte_rawdev *rawdev)\n {\n \tstruct qdma_hw_queue *queue;\n+\tstruct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;\n+\tstruct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;\n \tint i;\n \n \tDPAA2_QDMA_FUNC_TRACE();\n \n \t/* In case QDMA device is not in stopped state, return -EBUSY */\n-\tif (qdma_dev.state == 1) {\n+\tif (qdma_dev->state == 1) {\n \t\tDPAA2_QDMA_ERR(\n \t\t\t\"Device is in running state. Stop before reset.\");\n \t\treturn -EBUSY;\n \t}\n \n \t/* In case there are pending jobs on any VQ, return -EBUSY */\n-\tfor (i = 0; i < qdma_dev.max_vqs; i++) {\n+\tfor (i = 0; i < qdma_dev->max_vqs; i++) {\n \t\tif (qdma_vqs[i].in_use && (qdma_vqs[i].num_enqueues !=\n \t\t    qdma_vqs[i].num_dequeues))\n \t\t\tDPAA2_QDMA_ERR(\"Jobs are still pending on VQ: %d\", i);\n@@ -462,7 +470,7 @@ rte_qdma_reset(void)\n \t\tqueue->num_users = 0;\n \n \t/* Reset and free virtual queues */\n-\tfor (i = 0; i < qdma_dev.max_vqs; i++) {\n+\tfor (i = 0; i < qdma_dev->max_vqs; i++) {\n \t\tif (qdma_vqs[i].status_ring)\n \t\t\trte_ring_free(qdma_vqs[i].status_ring);\n \t}\n@@ -475,43 +483,39 @@ rte_qdma_reset(void)\n \t\tsizeof(struct qdma_per_core_info) * RTE_MAX_LCORE);\n \n \t/* Free the FLE pool */\n-\tif (qdma_dev.fle_pool)\n-\t\trte_mempool_free(qdma_dev.fle_pool);\n+\tif (qdma_dev->fle_pool)\n+\t\trte_mempool_free(qdma_dev->fle_pool);\n \n \t/* Reset QDMA device structure */\n-\tqdma_dev.mode = RTE_QDMA_MODE_HW;\n-\tqdma_dev.max_hw_queues_per_core = 0;\n-\tqdma_dev.fle_pool = NULL;\n-\tqdma_dev.fle_pool_count = 0;\n-\tqdma_dev.max_vqs = 0;\n+\tqdma_dev->mode = RTE_QDMA_MODE_HW;\n+\tqdma_dev->max_hw_queues_per_core = 0;\n+\tqdma_dev->fle_pool = NULL;\n+\tqdma_dev->fle_pool_count = 0;\n+\tqdma_dev->max_vqs = 0;\n \n \treturn 0;\n }\n \n-int\n-rte_qdma_configure(struct rte_qdma_config *qdma_config)\n+static int\n+dpaa2_qdma_configure(const struct rte_rawdev *rawdev,\n+\t\t\t rte_rawdev_obj_t config)\n {\n-\tint ret;\n \tchar fle_pool_name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */\n+\tstruct rte_qdma_config *qdma_config = (struct rte_qdma_config *)config;\n+\tstruct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;\n+\tstruct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;\n \n \tDPAA2_QDMA_FUNC_TRACE();\n \n \t/* In case QDMA device is not in stopped state, return -EBUSY */\n-\tif (qdma_dev.state == 1) {\n+\tif (qdma_dev->state == 1) {\n \t\tDPAA2_QDMA_ERR(\n \t\t\t\"Device is in running state. Stop before config.\");\n \t\treturn -1;\n \t}\n \n-\t/* Reset the QDMA device */\n-\tret = rte_qdma_reset();\n-\tif (ret) {\n-\t\tDPAA2_QDMA_ERR(\"Resetting QDMA failed\");\n-\t\treturn ret;\n-\t}\n-\n \t/* Set mode */\n-\tqdma_dev.mode = qdma_config->mode;\n+\tqdma_dev->mode = qdma_config->mode;\n \n \t/* Set max HW queue per core */\n \tif (qdma_config->max_hw_queues_per_core > MAX_HW_QUEUE_PER_CORE) {\n@@ -519,7 +523,7 @@ rte_qdma_configure(struct rte_qdma_config *qdma_config)\n \t\t\t       MAX_HW_QUEUE_PER_CORE);\n \t\treturn -EINVAL;\n \t}\n-\tqdma_dev.max_hw_queues_per_core =\n+\tqdma_dev->max_hw_queues_per_core =\n \t\tqdma_config->max_hw_queues_per_core;\n \n \t/* Allocate Virtual Queues */\n@@ -530,24 +534,24 @@ rte_qdma_configure(struct rte_qdma_config *qdma_config)\n \t\tDPAA2_QDMA_ERR(\"qdma_virtual_queues allocation failed\");\n \t\treturn -ENOMEM;\n \t}\n-\tqdma_dev.max_vqs = qdma_config->max_vqs;\n+\tqdma_dev->max_vqs = qdma_config->max_vqs;\n \n \t/* Allocate FLE pool; just append PID so that in case of\n \t * multiprocess, the pool's don't collide.\n \t */\n \tsnprintf(fle_pool_name, sizeof(fle_pool_name), \"qdma_fle_pool%u\",\n \t\t getpid());\n-\tqdma_dev.fle_pool = rte_mempool_create(fle_pool_name,\n+\tqdma_dev->fle_pool = rte_mempool_create(fle_pool_name,\n \t\t\tqdma_config->fle_pool_count, QDMA_FLE_POOL_SIZE,\n \t\t\tQDMA_FLE_CACHE_SIZE(qdma_config->fle_pool_count), 0,\n \t\t\tNULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);\n-\tif (!qdma_dev.fle_pool) {\n+\tif (!qdma_dev->fle_pool) {\n \t\tDPAA2_QDMA_ERR(\"qdma_fle_pool create failed\");\n \t\trte_free(qdma_vqs);\n \t\tqdma_vqs = NULL;\n \t\treturn -ENOMEM;\n \t}\n-\tqdma_dev.fle_pool_count = qdma_config->fle_pool_count;\n+\tqdma_dev->fle_pool_count = qdma_config->fle_pool_count;\n \n \tif (qdma_config->format == RTE_QDMA_ULTRASHORT_FORMAT) {\n \t\tdpdmai_dev_get_job = dpdmai_dev_get_job_us;\n@@ -559,57 +563,67 @@ rte_qdma_configure(struct rte_qdma_config *qdma_config)\n \treturn 0;\n }\n \n-int\n-rte_qdma_start(void)\n+static int\n+dpaa2_qdma_start(struct rte_rawdev *rawdev)\n {\n+\tstruct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;\n+\tstruct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;\n+\n \tDPAA2_QDMA_FUNC_TRACE();\n \n-\tqdma_dev.state = 1;\n+\tqdma_dev->state = 1;\n \n \treturn 0;\n }\n \n-int\n-rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags)\n+static int\n+dpaa2_qdma_queue_setup(struct rte_rawdev *rawdev,\n+\t\t\t  __rte_unused uint16_t queue_id,\n+\t\t\t  rte_rawdev_obj_t queue_conf)\n {\n \tchar ring_name[32];\n \tint i;\n+\tstruct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;\n+\tstruct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;\n+\tstruct rte_qdma_queue_config *q_config =\n+\t\t(struct rte_qdma_queue_config *)queue_conf;\n \n \tDPAA2_QDMA_FUNC_TRACE();\n \n-\trte_spinlock_lock(&qdma_dev.lock);\n+\trte_spinlock_lock(&qdma_dev->lock);\n \n \t/* Get a free Virtual Queue */\n-\tfor (i = 0; i < qdma_dev.max_vqs; i++) {\n+\tfor (i = 0; i < qdma_dev->max_vqs; i++) {\n \t\tif (qdma_vqs[i].in_use == 0)\n \t\t\tbreak;\n \t}\n \n \t/* Return in case no VQ is free */\n-\tif (i == qdma_dev.max_vqs) {\n-\t\trte_spinlock_unlock(&qdma_dev.lock);\n+\tif (i == qdma_dev->max_vqs) {\n+\t\trte_spinlock_unlock(&qdma_dev->lock);\n \t\tDPAA2_QDMA_ERR(\"Unable to get lock on QDMA device\");\n \t\treturn -ENODEV;\n \t}\n \n-\tif (qdma_dev.mode == RTE_QDMA_MODE_HW ||\n-\t\t\t(flags & RTE_QDMA_VQ_EXCLUSIVE_PQ)) {\n+\tif (qdma_dev->mode == RTE_QDMA_MODE_HW ||\n+\t\t\t(q_config->flags & RTE_QDMA_VQ_EXCLUSIVE_PQ)) {\n \t\t/* Allocate HW queue for a VQ */\n-\t\tqdma_vqs[i].hw_queue = alloc_hw_queue(lcore_id);\n+\t\tqdma_vqs[i].hw_queue = alloc_hw_queue(q_config->lcore_id);\n \t\tqdma_vqs[i].exclusive_hw_queue = 1;\n \t} else {\n \t\t/* Allocate a Ring for Virutal Queue in VQ mode */\n \t\tsnprintf(ring_name, sizeof(ring_name), \"status ring %d\", i);\n \t\tqdma_vqs[i].status_ring = rte_ring_create(ring_name,\n-\t\t\tqdma_dev.fle_pool_count, rte_socket_id(), 0);\n+\t\t\tqdma_dev->fle_pool_count, rte_socket_id(), 0);\n \t\tif (!qdma_vqs[i].status_ring) {\n \t\t\tDPAA2_QDMA_ERR(\"Status ring creation failed for vq\");\n-\t\t\trte_spinlock_unlock(&qdma_dev.lock);\n+\t\t\trte_spinlock_unlock(&qdma_dev->lock);\n \t\t\treturn rte_errno;\n \t\t}\n \n \t\t/* Get a HW queue (shared) for a VQ */\n-\t\tqdma_vqs[i].hw_queue = get_hw_queue(lcore_id);\n+\t\tqdma_vqs[i].hw_queue = get_hw_queue(qdma_dev,\n+\t\t\t\t\t\t    q_config->lcore_id);\n \t\tqdma_vqs[i].exclusive_hw_queue = 0;\n \t}\n \n@@ -618,28 +632,18 @@ rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags)\n \t\tif (qdma_vqs[i].status_ring)\n \t\t\trte_ring_free(qdma_vqs[i].status_ring);\n \t\tqdma_vqs[i].status_ring = NULL;\n-\t\trte_spinlock_unlock(&qdma_dev.lock);\n+\t\trte_spinlock_unlock(&qdma_dev->lock);\n \t\treturn -ENODEV;\n \t}\n \n \tqdma_vqs[i].in_use = 1;\n-\tqdma_vqs[i].lcore_id = lcore_id;\n+\tqdma_vqs[i].lcore_id = q_config->lcore_id;\n \tmemset(&qdma_vqs[i].rbp, 0, sizeof(struct rte_qdma_rbp));\n-\trte_spinlock_unlock(&qdma_dev.lock);\n-\n-\treturn i;\n-}\n-\n-/*create vq for route-by-port*/\n-int\n-rte_qdma_vq_create_rbp(uint32_t lcore_id, uint32_t flags,\n-\t\t\tstruct rte_qdma_rbp *rbp)\n-{\n-\tint i;\n-\n-\ti = rte_qdma_vq_create(lcore_id, flags);\n+\trte_spinlock_unlock(&qdma_dev->lock);\n \n-\tmemcpy(&qdma_vqs[i].rbp, rbp, sizeof(struct rte_qdma_rbp));\n+\tif (q_config->rbp != NULL)\n+\t\tmemcpy(&qdma_vqs[i].rbp, q_config->rbp,\n+\t\t       sizeof(struct rte_qdma_rbp));\n \n \treturn i;\n }\n@@ -688,7 +692,7 @@ dpdmai_dev_enqueue_multi(struct dpaa2_dpdmai_dev *dpdmai_dev,\n \t\t\tdpaa2_eqcr_size : nb_jobs;\n \n \t\tfor (loop = 0; loop < num_to_send; loop++) {\n-\t\t\tret = dpdmai_dev_set_fd(&fd[loop],\n+\t\t\tret = dpdmai_dev_set_fd(dpdmai_dev->qdma_dev, &fd[loop],\n \t\t\t\t\t\tjob[num_tx], rbp, vq_id);\n \t\t\tif (ret < 0) {\n \t\t\t\t/* Set nb_jobs to loop, so outer while loop\n@@ -723,12 +727,14 @@ dpdmai_dev_enqueue_multi(struct dpaa2_dpdmai_dev *dpdmai_dev,\n \treturn num_tx;\n }\n \n-int\n-rte_qdma_vq_enqueue_multi(uint16_t vq_id,\n-\t\t\t  struct rte_qdma_job **job,\n-\t\t\t  uint16_t nb_jobs)\n+static int\n+dpaa2_qdma_enqueue(__rte_unused struct rte_rawdev *rawdev,\n+\t\t  __rte_unused struct rte_rawdev_buf **buffers,\n+\t\t  unsigned int nb_jobs,\n+\t\t  rte_rawdev_obj_t context)\n {\n-\tstruct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];\n+\tstruct rte_qdma_enqdeq *e_context = (struct rte_qdma_enqdeq *)context;\n+\tstruct qdma_virt_queue *qdma_vq = &qdma_vqs[e_context->vq_id];\n \tstruct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;\n \tstruct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;\n \tint ret;\n@@ -736,15 +742,15 @@ rte_qdma_vq_enqueue_multi(uint16_t vq_id,\n \t/* Return error in case of wrong lcore_id */\n \tif (rte_lcore_id() != qdma_vq->lcore_id) {\n \t\tDPAA2_QDMA_ERR(\"QDMA enqueue for vqid %d on wrong core\",\n-\t\t\t\tvq_id);\n+\t\t\t\te_context->vq_id);\n \t\treturn -EINVAL;\n \t}\n \n \tret = dpdmai_dev_enqueue_multi(dpdmai_dev,\n \t\t\t\t qdma_pq->queue_id,\n-\t\t\t\t vq_id,\n+\t\t\t\t e_context->vq_id,\n \t\t\t\t &qdma_vq->rbp,\n-\t\t\t\t job,\n+\t\t\t\t e_context->job,\n \t\t\t\t nb_jobs);\n \tif (ret < 0) {\n \t\tDPAA2_QDMA_ERR(\"DPDMAI device enqueue failed: %d\", ret);\n@@ -756,13 +762,6 @@ rte_qdma_vq_enqueue_multi(uint16_t vq_id,\n \treturn ret;\n }\n \n-int\n-rte_qdma_vq_enqueue(uint16_t vq_id,\n-\t\t    struct rte_qdma_job *job)\n-{\n-\treturn rte_qdma_vq_enqueue_multi(vq_id, &job, 1);\n-}\n-\n /* Function to receive a QDMA job for a given device and queue*/\n static int\n dpdmai_dev_dequeue_multijob_prefetch(\n@@ -877,7 +876,8 @@ dpdmai_dev_dequeue_multijob_prefetch(\n \t\t}\n \t\tfd = qbman_result_DQ_fd(dq_storage);\n \n-\t\tvqid = dpdmai_dev_get_job(fd, &job[num_rx]);\n+\t\tvqid = dpdmai_dev_get_job(dpdmai_dev->qdma_dev, fd,\n+\t\t\t\t\t  &job[num_rx]);\n \t\tif (vq_id)\n \t\t\tvq_id[num_rx] = vqid;\n \n@@ -993,7 +993,8 @@ dpdmai_dev_dequeue_multijob_no_prefetch(\n \t\t\t}\n \t\t\tfd = qbman_result_DQ_fd(dq_storage);\n \n-\t\t\tvqid = dpdmai_dev_get_job(fd, &job[num_rx]);\n+\t\t\tvqid = dpdmai_dev_get_job(dpdmai_dev->qdma_dev, fd,\n+\t\t\t\t\t\t  &job[num_rx]);\n \t\t\tif (vq_id)\n \t\t\t\tvq_id[num_rx] = vqid;\n \n@@ -1008,21 +1009,24 @@ dpdmai_dev_dequeue_multijob_no_prefetch(\n \treturn num_rx;\n }\n \n-int\n-rte_qdma_vq_dequeue_multi(uint16_t vq_id,\n-\t\t\t  struct rte_qdma_job **job,\n-\t\t\t  uint16_t nb_jobs)\n+static int\n+dpaa2_qdma_dequeue(__rte_unused struct rte_rawdev *rawdev,\n+\t\t   __rte_unused struct rte_rawdev_buf **buffers,\n+\t\t   unsigned int nb_jobs,\n+\t\t   rte_rawdev_obj_t cntxt)\n {\n-\tstruct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];\n+\tstruct rte_qdma_enqdeq *context = (struct rte_qdma_enqdeq *)cntxt;\n+\tstruct qdma_virt_queue *qdma_vq = &qdma_vqs[context->vq_id];\n \tstruct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;\n \tstruct qdma_virt_queue *temp_qdma_vq;\n \tstruct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;\n-\tint ring_count, ret = 0, i;\n+\tint ret = 0, i;\n+\tunsigned int ring_count;\n \n \t/* Return error in case of wrong lcore_id */\n \tif (rte_lcore_id() != (unsigned int)(qdma_vq->lcore_id)) {\n \t\tDPAA2_QDMA_WARN(\"QDMA dequeue for vqid %d on wrong core\",\n-\t\t\t\tvq_id);\n+\t\t\t\tcontext->vq_id);\n \t\treturn -1;\n \t}\n \n@@ -1036,7 +1040,7 @@ rte_qdma_vq_dequeue_multi(uint16_t vq_id,\n \tif (qdma_vq->exclusive_hw_queue) {\n \t\t/* In case of exclusive queue directly fetch from HW queue */\n \t\tret = dpdmai_dev_dequeue_multijob(dpdmai_dev, qdma_pq->queue_id,\n-\t\t\t\t\t NULL, job, nb_jobs);\n+\t\t\t\t\t NULL, context->job, nb_jobs);\n \t\tif (ret < 0) {\n \t\t\tDPAA2_QDMA_ERR(\n \t\t\t\t\"Dequeue from DPDMAI device failed: %d\", ret);\n@@ -1055,11 +1059,11 @@ rte_qdma_vq_dequeue_multi(uint16_t vq_id,\n \t\t\t/* TODO - How to have right budget */\n \t\t\tret = dpdmai_dev_dequeue_multijob(dpdmai_dev,\n \t\t\t\t\tqdma_pq->queue_id,\n-\t\t\t\t\ttemp_vq_id, job, nb_jobs);\n+\t\t\t\t\ttemp_vq_id, context->job, nb_jobs);\n \t\t\tfor (i = 0; i < ret; i++) {\n \t\t\t\ttemp_qdma_vq = &qdma_vqs[temp_vq_id[i]];\n \t\t\t\trte_ring_enqueue(temp_qdma_vq->status_ring,\n-\t\t\t\t\t(void *)(job[i]));\n+\t\t\t\t\t(void *)(context->job[i]));\n \t\t\t}\n \t\t\tring_count = rte_ring_count(\n \t\t\t\t\tqdma_vq->status_ring);\n@@ -1070,7 +1074,8 @@ rte_qdma_vq_dequeue_multi(uint16_t vq_id,\n \t\t\t * to provide to the user\n \t\t\t */\n \t\t\tret = rte_ring_dequeue_bulk(qdma_vq->status_ring,\n-\t\t\t\t\t(void **)job, ring_count, NULL);\n+\t\t\t\t\t\t    (void **)context->job,\n+\t\t\t\t\t\t    ring_count, NULL);\n \t\t\tif (ret)\n \t\t\t\tqdma_vq->num_dequeues += ret;\n \t\t}\n@@ -1079,19 +1084,6 @@ rte_qdma_vq_dequeue_multi(uint16_t vq_id,\n \treturn ret;\n }\n \n-struct rte_qdma_job *\n-rte_qdma_vq_dequeue(uint16_t vq_id)\n-{\n-\tint ret;\n-\tstruct rte_qdma_job *job = NULL;\n-\n-\tret = rte_qdma_vq_dequeue_multi(vq_id, &job, 1);\n-\tif (ret < 0)\n-\t\tDPAA2_QDMA_DP_WARN(\"DPDMAI device dequeue failed: %d\", ret);\n-\n-\treturn job;\n-}\n-\n void\n rte_qdma_vq_stats(uint16_t vq_id,\n \t\t  struct rte_qdma_vq_stats *vq_status)\n@@ -1108,9 +1100,13 @@ rte_qdma_vq_stats(uint16_t vq_id,\n \t}\n }\n \n-int\n-rte_qdma_vq_destroy(uint16_t vq_id)\n+static int\n+dpaa2_qdma_queue_release(struct rte_rawdev *rawdev,\n+\t\t\t uint16_t vq_id)\n {\n+\tstruct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;\n+\tstruct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;\n+\n \tstruct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];\n \n \tDPAA2_QDMA_FUNC_TRACE();\n@@ -1119,7 +1115,7 @@ rte_qdma_vq_destroy(uint16_t vq_id)\n \tif (qdma_vq->num_enqueues != qdma_vq->num_dequeues)\n \t\treturn -EBUSY;\n \n-\trte_spinlock_lock(&qdma_dev.lock);\n+\trte_spinlock_lock(&qdma_dev->lock);\n \n \tif (qdma_vq->exclusive_hw_queue)\n \t\tfree_hw_queue(qdma_vq->hw_queue);\n@@ -1132,57 +1128,44 @@ rte_qdma_vq_destroy(uint16_t vq_id)\n \n \tmemset(qdma_vq, 0, sizeof(struct qdma_virt_queue));\n \n-\trte_spinlock_unlock(&qdma_dev.lock);\n+\trte_spinlock_unlock(&qdma_dev->lock);\n \n \treturn 0;\n }\n \n-int\n-rte_qdma_vq_destroy_rbp(uint16_t vq_id)\n+static void\n+dpaa2_qdma_stop(struct rte_rawdev *rawdev)\n {\n-\tstruct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];\n+\tstruct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;\n+\tstruct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;\n \n \tDPAA2_QDMA_FUNC_TRACE();\n \n-\t/* In case there are pending jobs on any VQ, return -EBUSY */\n-\tif (qdma_vq->num_enqueues != qdma_vq->num_dequeues)\n-\t\treturn -EBUSY;\n-\n-\trte_spinlock_lock(&qdma_dev.lock);\n-\n-\tif (qdma_vq->exclusive_hw_queue) {\n-\t\tfree_hw_queue(qdma_vq->hw_queue);\n-\t} else {\n-\t\tif (qdma_vqs->status_ring)\n-\t\t\trte_ring_free(qdma_vqs->status_ring);\n-\n-\t\tput_hw_queue(qdma_vq->hw_queue);\n-\t}\n-\n-\tmemset(qdma_vq, 0, sizeof(struct qdma_virt_queue));\n-\n-\trte_spinlock_unlock(&qdma_dev.lock);\n-\n-\treturn 0;\n+\tqdma_dev->state = 0;\n }\n \n-void\n-rte_qdma_stop(void)\n+static int\n+dpaa2_qdma_close(struct rte_rawdev *rawdev)\n {\n \tDPAA2_QDMA_FUNC_TRACE();\n \n-\tqdma_dev.state = 0;\n-}\n-\n-void\n-rte_qdma_destroy(void)\n-{\n-\tDPAA2_QDMA_FUNC_TRACE();\n+\tdpaa2_qdma_reset(rawdev);\n \n-\trte_qdma_reset();\n+\treturn 0;\n }\n \n-static const struct rte_rawdev_ops dpaa2_qdma_ops;\n+static struct rte_rawdev_ops dpaa2_qdma_ops = {\n+\t.dev_configure            = dpaa2_qdma_configure,\n+\t.dev_start                = dpaa2_qdma_start,\n+\t.dev_stop                 = dpaa2_qdma_stop,\n+\t.dev_reset                = dpaa2_qdma_reset,\n+\t.dev_close                = dpaa2_qdma_close,\n+\t.queue_setup\t\t  = dpaa2_qdma_queue_setup,\n+\t.queue_release\t\t  = dpaa2_qdma_queue_release,\n+\t.attr_get\t\t  = dpaa2_qdma_attr_get,\n+\t.enqueue_bufs\t\t  = dpaa2_qdma_enqueue,\n+\t.dequeue_bufs\t\t  = dpaa2_qdma_dequeue,\n+};\n \n static int\n add_hw_queues_to_list(struct dpaa2_dpdmai_dev *dpdmai_dev)\n@@ -1204,7 +1187,7 @@ add_hw_queues_to_list(struct dpaa2_dpdmai_dev *dpdmai_dev)\n \t\tqueue->queue_id = i;\n \n \t\tTAILQ_INSERT_TAIL(&qdma_queue_list, queue, next);\n-\t\tqdma_dev.num_hw_queues++;\n+\t\tdpdmai_dev->qdma_dev->num_hw_queues++;\n \t}\n \n \treturn 0;\n@@ -1313,6 +1296,7 @@ dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)\n \t/* Open DPDMAI device */\n \tdpdmai_dev->dpdmai_id = dpdmai_id;\n \tdpdmai_dev->dpdmai.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);\n+\tdpdmai_dev->qdma_dev = &q_dev;\n \tret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,\n \t\t\t  dpdmai_dev->dpdmai_id, &dpdmai_dev->token);\n \tif (ret) {\n@@ -1427,6 +1411,8 @@ dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)\n \n \tDPAA2_QDMA_DEBUG(\"Initialized dpdmai object successfully\");\n \n+\trte_spinlock_init(&dpdmai_dev->qdma_dev->lock);\n+\n \treturn 0;\n init_err:\n \tdpaa2_dpdmai_dev_uninit(rawdev);\n@@ -1462,6 +1448,13 @@ rte_dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,\n \t\treturn ret;\n \t}\n \n+\t/* Reset the QDMA device */\n+\tret = dpaa2_qdma_reset(rawdev);\n+\tif (ret) {\n+\t\tDPAA2_QDMA_ERR(\"Resetting QDMA failed\");\n+\t\treturn ret;\n+\t}\n+\n \treturn 0;\n }\n \ndiff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.h b/drivers/raw/dpaa2_qdma/dpaa2_qdma.h\nindex 0176380..3c112d2 100644\n--- a/drivers/raw/dpaa2_qdma/dpaa2_qdma.h\n+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.h\n@@ -1,5 +1,5 @@\n /* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright 2018-2019 NXP\n+ * Copyright 2018-2020 NXP\n  */\n \n #ifndef __DPAA2_QDMA_H__\n@@ -173,6 +173,7 @@ struct dpaa2_dpdmai_dev {\n \tstruct dpaa2_queue rx_queue[DPAA2_DPDMAI_MAX_QUEUES];\n \t/** TX queues */\n \tstruct dpaa2_queue tx_queue[DPAA2_DPDMAI_MAX_QUEUES];\n+\tstruct qdma_device *qdma_dev;\n };\n \n #endif /* __DPAA2_QDMA_H__ */\ndiff --git a/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h\nindex 4e1268c..71894d3 100644\n--- a/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h\n+++ b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h\n@@ -1,10 +1,12 @@\n /* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright 2018-2019 NXP\n+ * Copyright 2018-2020 NXP\n  */\n \n #ifndef __RTE_PMD_DPAA2_QDMA_H__\n #define __RTE_PMD_DPAA2_QDMA_H__\n \n+#include <rte_rawdev.h>\n+\n /**\n  * @file\n  *\n@@ -154,150 +156,29 @@ struct rte_qdma_job {\n \tuint16_t vq_id;\n };\n \n-/**\n- * Initialize the QDMA device.\n- *\n- * @returns\n- *   - 0: Success.\n- *   - <0: Error code.\n- */\n-int\n-rte_qdma_init(void);\n-\n-/**\n- * Get the QDMA attributes.\n- *\n- * @param qdma_attr\n- *   QDMA attributes providing total number of hw queues etc.\n- */\n-void\n-rte_qdma_attr_get(struct rte_qdma_attr *qdma_attr);\n-\n-/**\n- * Reset the QDMA device. This API will completely reset the QDMA\n- * device, bringing it to original state as if only rte_qdma_init() API\n- * has been called.\n- *\n- * @returns\n- *   - 0: Success.\n- *   - <0: Error code.\n- */\n-int\n-rte_qdma_reset(void);\n-\n-/**\n- * Configure the QDMA device.\n- *\n- * @returns\n- *   - 0: Success.\n- *   - <0: Error code.\n- */\n-int\n-rte_qdma_configure(struct rte_qdma_config *qdma_config);\n-\n-/**\n- * Start the QDMA device.\n- *\n- * @returns\n- *   - 0: Success.\n- *   - <0: Error code.\n- */\n-int\n-rte_qdma_start(void);\n-\n-/**\n- * Create a Virtual Queue on a particular lcore id.\n- * This API can be called from any thread/core. User can create/destroy\n- * VQ's at runtime.\n- *\n- * @param lcore_id\n- *   LCORE ID on which this particular queue would be associated with.\n- * @param flags\n- *  RTE_QDMA_VQ_ flags. See macro definitions.\n- *\n- * @returns\n- *   - >= 0: Virtual queue ID.\n- *   - <0: Error code.\n- */\n-int\n-rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags);\n-\n-/*create vq for route-by-port*/\n-int\n-rte_qdma_vq_create_rbp(uint32_t lcore_id, uint32_t flags,\n-\t\t\tstruct rte_qdma_rbp *rbp);\n-\n-/**\n- * Enqueue multiple jobs to a Virtual Queue.\n- * If the enqueue is successful, the H/W will perform DMA operations\n- * on the basis of the QDMA jobs provided.\n- *\n- * @param vq_id\n- *   Virtual Queue ID.\n- * @param job\n- *   List of QDMA Jobs containing relevant information related to DMA.\n- * @param nb_jobs\n- *   Number of QDMA jobs provided by the user.\n- *\n- * @returns\n- *   - >=0: Number of jobs successfully submitted\n- *   - <0: Error code.\n- */\n-int\n-rte_qdma_vq_enqueue_multi(uint16_t vq_id,\n-\t\t\t  struct rte_qdma_job **job,\n-\t\t\t  uint16_t nb_jobs);\n-\n-/**\n- * Enqueue a single job to a Virtual Queue.\n- * If the enqueue is successful, the H/W will perform DMA operations\n- * on the basis of the QDMA job provided.\n- *\n- * @param vq_id\n- *   Virtual Queue ID.\n- * @param job\n- *   A QDMA Job containing relevant information related to DMA.\n- *\n- * @returns\n- *   - >=0: Number of jobs successfully submitted\n- *   - <0: Error code.\n- */\n-int\n-rte_qdma_vq_enqueue(uint16_t vq_id,\n-\t\t    struct rte_qdma_job *job);\n+struct rte_qdma_enqdeq {\n+\tuint16_t vq_id;\n+\tstruct rte_qdma_job **job;\n+};\n \n-/**\n- * Dequeue multiple completed jobs from a Virtual Queue.\n- * Provides the list of completed jobs capped by nb_jobs.\n- *\n- * @param vq_id\n- *   Virtual Queue ID.\n- * @param job\n- *   List of QDMA Jobs returned from the API.\n- * @param nb_jobs\n- *   Number of QDMA jobs requested for dequeue by the user.\n- *\n- * @returns\n- *   - >=0: Number of jobs successfully received\n- *   - <0: Error code.\n- */\n-int\n-rte_qdma_vq_dequeue_multi(uint16_t vq_id,\n-\t\t\t  struct rte_qdma_job **job,\n-\t\t\t  uint16_t nb_jobs);\n+struct rte_qdma_queue_config {\n+\tuint32_t lcore_id;\n+\tuint32_t flags;\n+\tstruct rte_qdma_rbp *rbp;\n+};\n \n-/**\n- * Dequeue a single completed jobs from a Virtual Queue.\n- *\n- * @param vq_id\n- *   Virtual Queue ID.\n- *\n- * @returns\n- *   - A completed job or NULL if no job is there.\n- */\n-struct rte_qdma_job *\n-rte_qdma_vq_dequeue(uint16_t vq_id);\n+#define rte_qdma_info rte_rawdev_info\n+#define rte_qdma_start(id) rte_rawdev_start(id)\n+#define rte_qdma_reset(id) rte_rawdev_reset(id)\n+#define rte_qdma_configure(id, cf) rte_rawdev_configure(id, cf)\n+#define rte_qdma_dequeue_buffers(id, buf, num, ctxt) \\\n+\trte_rawdev_dequeue_buffers(id, buf, num, ctxt)\n+#define rte_qdma_enqueue_buffers(id, buf, num, ctxt) \\\n+\trte_rawdev_enqueue_buffers(id, buf, num, ctxt)\n+#define rte_qdma_queue_setup(id, qid, cfg) \\\n+\trte_rawdev_queue_setup(id, qid, cfg)\n \n+/*TODO introduce per queue stats API in rawdew */\n /**\n  * Get a Virtual Queue statistics.\n  *\n@@ -310,46 +191,4 @@ void\n rte_qdma_vq_stats(uint16_t vq_id,\n \t\t  struct rte_qdma_vq_stats *vq_stats);\n \n-/**\n- * Destroy the Virtual Queue specified by vq_id.\n- * This API can be called from any thread/core. User can create/destroy\n- * VQ's at runtime.\n- *\n- * @param vq_id\n- *   Virtual Queue ID which needs to be uninitialized.\n- *\n- * @returns\n- *   - 0: Success.\n- *   - <0: Error code.\n- */\n-int\n-rte_qdma_vq_destroy(uint16_t vq_id);\n-\n-/**\n- * Destroy the RBP specific Virtual Queue specified by vq_id.\n- * This API can be called from any thread/core. User can create/destroy\n- * VQ's at runtime.\n- *\n- * @param vq_id\n- *   RBP based Virtual Queue ID which needs to be uninitialized.\n- *\n- * @returns\n- *   - 0: Success.\n- *   - <0: Error code.\n- */\n-\n-int\n-rte_qdma_vq_destroy_rbp(uint16_t vq_id);\n-/**\n- * Stop QDMA device.\n- */\n-void\n-rte_qdma_stop(void);\n-\n-/**\n- * Destroy the QDMA device.\n- */\n-void\n-rte_qdma_destroy(void);\n-\n #endif /* __RTE_PMD_DPAA2_QDMA_H__*/\n",
    "prefixes": [
        "1/7"
    ]
}