get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/76672/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 76672,
    "url": "http://patches.dpdk.org/api/patches/76672/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1599470764-30569-6-git-send-email-g.singh@nxp.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1599470764-30569-6-git-send-email-g.singh@nxp.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1599470764-30569-6-git-send-email-g.singh@nxp.com",
    "date": "2020-09-07T09:26:02",
    "name": "[5/7] raw/dpaa2_qdma: support scatter gather in enqueue",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "25b134e0568acf3b9d6fafc1d68ed7a9b518276c",
    "submitter": {
        "id": 1068,
        "url": "http://patches.dpdk.org/api/people/1068/?format=api",
        "name": "Gagandeep Singh",
        "email": "g.singh@nxp.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1599470764-30569-6-git-send-email-g.singh@nxp.com/mbox/",
    "series": [
        {
            "id": 11988,
            "url": "http://patches.dpdk.org/api/series/11988/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=11988",
            "date": "2020-09-07T09:25:57",
            "name": "raw/dpaa2_qdma: driver enhancement",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/11988/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/76672/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/76672/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 50962A04B9;\n\tMon,  7 Sep 2020 11:28:24 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 69B8E1C13A;\n\tMon,  7 Sep 2020 11:27:02 +0200 (CEST)",
            "from inva020.nxp.com (inva020.nxp.com [92.121.34.13])\n by dpdk.org (Postfix) with ESMTP id 398FE1BE0C\n for <dev@dpdk.org>; Mon,  7 Sep 2020 11:27:01 +0200 (CEST)",
            "from inva020.nxp.com (localhost [127.0.0.1])\n by inva020.eu-rdc02.nxp.com (Postfix) with ESMTP id 18F281A016B;\n Mon,  7 Sep 2020 11:27:01 +0200 (CEST)",
            "from invc005.ap-rdc01.nxp.com (invc005.ap-rdc01.nxp.com\n [165.114.16.14])\n by inva020.eu-rdc02.nxp.com (Postfix) with ESMTP id 5BC941A1028;\n Mon,  7 Sep 2020 11:26:58 +0200 (CEST)",
            "from lsv11086.swis.cn-sha01.nxp.com (lsv11086.swis.cn-sha01.nxp.com\n [92.121.210.87])\n by invc005.ap-rdc01.nxp.com (Postfix) with ESMTP id 174C1402CA;\n Mon,  7 Sep 2020 11:26:51 +0200 (CEST)"
        ],
        "From": "Gagandeep Singh <g.singh@nxp.com>",
        "To": "dev@dpdk.org,\n\tnipun.gupta@nxp.com,\n\themant.agrawal@nxp.com",
        "Cc": "thomas.monjalon@6wind.com,\n\tJun Yang <jun.yang@nxp.com>",
        "Date": "Mon,  7 Sep 2020 17:26:02 +0800",
        "Message-Id": "<1599470764-30569-6-git-send-email-g.singh@nxp.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1599470764-30569-1-git-send-email-g.singh@nxp.com>",
        "References": "<1599470764-30569-1-git-send-email-g.singh@nxp.com>",
        "X-Virus-Scanned": "ClamAV using ClamSMTP",
        "Subject": "[dpdk-dev] [PATCH 5/7] raw/dpaa2_qdma: support scatter gather in\n\tenqueue",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Jun Yang <jun.yang@nxp.com>\n\nThis patch add support to add Scatter Gather support\nfor different jobs for qdma queues.\nIt also supports gathering  multiple enqueue jobs into SG enqueue job(s).\n\nSigned-off-by: Jun Yang <jun.yang@nxp.com>\n---\n drivers/bus/fslmc/portal/dpaa2_hw_pvt.h     |  18 +-\n drivers/raw/dpaa2_qdma/dpaa2_qdma.c         | 397 +++++++++++++++++++++++-----\n drivers/raw/dpaa2_qdma/dpaa2_qdma.h         |  75 +++++-\n drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h |   2 +\n 4 files changed, 411 insertions(+), 81 deletions(-)",
    "diff": "diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h\nindex 35423df..e540759 100644\n--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h\n+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h\n@@ -210,12 +210,28 @@ struct dpaa2_dpcon_dev {\n };\n \n /* Refer to Table 7-3 in SEC BG */\n+#define QBMAN_FLE_WORD4_FMT_SBF 0x0    /* Single buffer frame */\n+#define QBMAN_FLE_WORD4_FMT_SGE 0x2 /* Scatter gather frame */\n+\n+struct qbman_fle_word4 {\n+\tuint32_t bpid:14; /* Frame buffer pool ID */\n+\tuint32_t ivp:1; /* Invalid Pool ID. */\n+\tuint32_t bmt:1; /* Bypass Memory Translation */\n+\tuint32_t offset:12; /* Frame offset */\n+\tuint32_t fmt:2; /* Frame Format */\n+\tuint32_t sl:1; /* Short Length */\n+\tuint32_t f:1; /* Final bit */\n+};\n+\n struct qbman_fle {\n \tuint32_t addr_lo;\n \tuint32_t addr_hi;\n \tuint32_t length;\n \t/* FMT must be 00, MSB is final bit  */\n-\tuint32_t fin_bpid_offset;\n+\tunion {\n+\t\tuint32_t fin_bpid_offset;\n+\t\tstruct qbman_fle_word4 word4;\n+\t};\n \tuint32_t frc;\n \tuint32_t reserved[3]; /* Not used currently */\n };\ndiff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c\nindex 6b4d080..0c56a04 100644\n--- a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c\n+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c\n@@ -116,17 +116,21 @@ qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,\n \n static void\n dpaa2_qdma_populate_fle(struct qbman_fle *fle,\n+\t\t\tuint64_t fle_iova,\n \t\t\tstruct rte_qdma_rbp *rbp,\n \t\t\tuint64_t src, uint64_t dest,\n-\t\t\tsize_t len, uint32_t flags)\n+\t\t\tsize_t len, uint32_t flags, uint32_t fmt)\n {\n \tstruct qdma_sdd *sdd;\n+\tuint64_t sdd_iova;\n \n-\tsdd = (struct qdma_sdd *)((uint8_t *)(fle) +\n-\t\t(DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle)));\n+\tsdd = (struct qdma_sdd *)\n+\t\t\t((uint64_t)fle - QDMA_FLE_FLE_OFFSET +\n+\t\t\tQDMA_FLE_SDD_OFFSET);\n+\tsdd_iova = fle_iova - QDMA_FLE_FLE_OFFSET + QDMA_FLE_SDD_OFFSET;\n \n \t/* first frame list to source descriptor */\n-\tDPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sdd));\n+\tDPAA2_SET_FLE_ADDR(fle, sdd_iova);\n \tDPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));\n \n \t/* source and destination descriptor */\n@@ -164,20 +168,26 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle,\n \t/* source frame list to source buffer */\n \tif (flags & RTE_QDMA_JOB_SRC_PHY) {\n \t\tDPAA2_SET_FLE_ADDR(fle, src);\n+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA\n \t\tDPAA2_SET_FLE_BMT(fle);\n+#endif\n \t} else {\n \t\tDPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));\n \t}\n+\tfle->word4.fmt = fmt;\n \tDPAA2_SET_FLE_LEN(fle, len);\n \n \tfle++;\n \t/* destination frame list to destination buffer */\n \tif (flags & RTE_QDMA_JOB_DEST_PHY) {\n+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA\n \t\tDPAA2_SET_FLE_BMT(fle);\n+#endif\n \t\tDPAA2_SET_FLE_ADDR(fle, dest);\n \t} else {\n \t\tDPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));\n \t}\n+\tfle->word4.fmt = fmt;\n \tDPAA2_SET_FLE_LEN(fle, len);\n \n \t/* Final bit: 1, for last frame list */\n@@ -187,44 +197,169 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle,\n static inline int dpdmai_dev_set_fd_us(\n \t\tstruct qdma_virt_queue *qdma_vq,\n \t\tstruct qbman_fd *fd,\n-\t\tstruct rte_qdma_job *job)\n+\t\tstruct rte_qdma_job **job,\n+\t\tuint16_t nb_jobs)\n {\n \tstruct rte_qdma_rbp *rbp = &qdma_vq->rbp;\n \tstruct rte_qdma_job **ppjob;\n \tsize_t iova;\n-\tint ret = 0;\n+\tint ret = 0, loop;\n+\n+\tfor (loop = 0; loop < nb_jobs; loop++) {\n+\t\tif (job[loop]->src & QDMA_RBP_UPPER_ADDRESS_MASK)\n+\t\t\tiova = (size_t)job[loop]->dest;\n+\t\telse\n+\t\t\tiova = (size_t)job[loop]->src;\n+\n+\t\t/* Set the metadata */\n+\t\tjob[loop]->vq_id = qdma_vq->vq_id;\n+\t\tppjob = (struct rte_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;\n+\t\t*ppjob = job[loop];\n+\n+\t\tif ((rbp->drbp == 1) || (rbp->srbp == 1))\n+\t\t\tret = qdma_populate_fd_pci((phys_addr_t)job[loop]->src,\n+\t\t\t\t\t\t(phys_addr_t)job[loop]->dest,\n+\t\t\t\t\t\tjob[loop]->len, &fd[loop], rbp);\n+\t\telse\n+\t\t\tret = qdma_populate_fd_ddr((phys_addr_t)job[loop]->src,\n+\t\t\t\t\t\t(phys_addr_t)job[loop]->dest,\n+\t\t\t\t\t\tjob[loop]->len, &fd[loop]);\n+\t}\n \n-\tif (job->src & QDMA_RBP_UPPER_ADDRESS_MASK)\n-\t\tiova = (size_t)job->dest;\n-\telse\n-\t\tiova = (size_t)job->src;\n+\treturn ret;\n+}\n \n-\t/* Set the metadata */\n-\tjob->vq_id = qdma_vq->vq_id;\n-\tppjob = (struct rte_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;\n-\t*ppjob = job;\n+static uint32_t qdma_populate_sg_entry(\n+\t\tstruct rte_qdma_job **jobs,\n+\t\tstruct qdma_sg_entry *src_sge,\n+\t\tstruct qdma_sg_entry *dst_sge,\n+\t\tuint16_t nb_jobs)\n+{\n+\tuint16_t i;\n+\tuint32_t total_len = 0;\n+\tuint64_t iova;\n+\n+\tfor (i = 0; i < nb_jobs; i++) {\n+\t\t/* source SG */\n+\t\tif (likely(jobs[i]->flags & RTE_QDMA_JOB_SRC_PHY)) {\n+\t\t\tsrc_sge->addr_lo = (uint32_t)jobs[i]->src;\n+\t\t\tsrc_sge->addr_hi = (jobs[i]->src >> 32);\n+\t\t} else {\n+\t\t\tiova = DPAA2_VADDR_TO_IOVA(jobs[i]->src);\n+\t\t\tsrc_sge->addr_lo = (uint32_t)iova;\n+\t\t\tsrc_sge->addr_hi = iova >> 32;\n+\t\t}\n+\t\tsrc_sge->data_len.data_len_sl0 = jobs[i]->len;\n+\t\tsrc_sge->ctrl.sl = QDMA_SG_SL_LONG;\n+\t\tsrc_sge->ctrl.fmt = QDMA_SG_FMT_SDB;\n+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA\n+\t\tsrc_sge->ctrl.bmt = QDMA_SG_BMT_ENABLE;\n+#else\n+\t\tsrc_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;\n+#endif\n+\t\t/* destination SG */\n+\t\tif (likely(jobs[i]->flags & RTE_QDMA_JOB_DEST_PHY)) {\n+\t\t\tdst_sge->addr_lo = (uint32_t)jobs[i]->dest;\n+\t\t\tdst_sge->addr_hi = (jobs[i]->dest >> 32);\n+\t\t} else {\n+\t\t\tiova = DPAA2_VADDR_TO_IOVA(jobs[i]->dest);\n+\t\t\tdst_sge->addr_lo = (uint32_t)iova;\n+\t\t\tdst_sge->addr_hi = iova >> 32;\n+\t\t}\n+\t\tdst_sge->data_len.data_len_sl0 = jobs[i]->len;\n+\t\tdst_sge->ctrl.sl = QDMA_SG_SL_LONG;\n+\t\tdst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;\n+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA\n+\t\tdst_sge->ctrl.bmt = QDMA_SG_BMT_ENABLE;\n+#else\n+\t\tdst_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;\n+#endif\n+\t\ttotal_len += jobs[i]->len;\n \n-\tif ((rbp->drbp == 1) || (rbp->srbp == 1))\n-\t\tret = qdma_populate_fd_pci((phys_addr_t) job->src,\n-\t\t\t\t\t   (phys_addr_t) job->dest,\n-\t\t\t\t\t   job->len, fd, rbp);\n-\telse\n-\t\tret = qdma_populate_fd_ddr((phys_addr_t) job->src,\n-\t\t\t\t\t   (phys_addr_t) job->dest,\n-\t\t\t\t\t   job->len, fd);\n-\treturn ret;\n+\t\tif (i == (nb_jobs - 1)) {\n+\t\t\tsrc_sge->ctrl.f = QDMA_SG_F;\n+\t\t\tdst_sge->ctrl.f = QDMA_SG_F;\n+\t\t} else {\n+\t\t\tsrc_sge->ctrl.f = 0;\n+\t\t\tdst_sge->ctrl.f = 0;\n+\t\t}\n+\t\tsrc_sge++;\n+\t\tdst_sge++;\n+\t}\n+\n+\treturn total_len;\n }\n-static inline int dpdmai_dev_set_fd_lf(\n+\n+static inline int dpdmai_dev_set_multi_fd_lf(\n \t\tstruct qdma_virt_queue *qdma_vq,\n \t\tstruct qbman_fd *fd,\n-\t\tstruct rte_qdma_job *job)\n+\t\tstruct rte_qdma_job **job,\n+\t\tuint16_t nb_jobs)\n {\n \tstruct rte_qdma_rbp *rbp = &qdma_vq->rbp;\n \tstruct rte_qdma_job **ppjob;\n-\tvoid *elem;\n+\tuint16_t i;\n+\tint ret;\n+\tstruct qdma_device *qdma_dev = QDMA_DEV_OF_VQ(qdma_vq);\n+\tvoid *elem[RTE_QDMA_BURST_NB_MAX];\n \tstruct qbman_fle *fle;\n \tuint64_t elem_iova, fle_iova;\n-\tint ret = 0;\n+\n+\tret = rte_mempool_get_bulk(qdma_dev->fle_pool, elem, nb_jobs);\n+\tif (ret) {\n+\t\tDPAA2_QDMA_DP_DEBUG(\"Memory alloc failed for FLE\");\n+\t\treturn ret;\n+\t}\n+\n+\tfor (i = 0; i < nb_jobs; i++) {\n+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA\n+\t\telem_iova = rte_mempool_virt2iova(elem[i]);\n+#else\n+\t\telem_iova = DPAA2_VADDR_TO_IOVA(elem[i]);\n+#endif\n+\n+\t\t*((uint16_t *)\n+\t\t((uint64_t)elem[i] + QDMA_FLE_JOB_NB_OFFSET)) = 1;\n+\n+\t\tppjob = (struct rte_qdma_job **)\n+\t\t\t\t((uint64_t)elem[i] + QDMA_FLE_JOBS_OFFSET);\n+\t\t*ppjob = job[i];\n+\n+\t\tjob[i]->vq_id = qdma_vq->vq_id;\n+\n+\t\tfle = (struct qbman_fle *)\n+\t\t\t\t((uint64_t)elem[i] + QDMA_FLE_FLE_OFFSET);\n+\t\tfle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;\n+\n+\t\tDPAA2_SET_FD_ADDR(&fd[i], fle_iova);\n+\t\tDPAA2_SET_FD_COMPOUND_FMT(&fd[i]);\n+\t\tDPAA2_SET_FD_FRC(&fd[i], QDMA_SER_CTX);\n+\n+\t\tmemset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +\n+\t\t\tDPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));\n+\n+\t\tdpaa2_qdma_populate_fle(fle, fle_iova, rbp,\n+\t\t\t\tjob[i]->src, job[i]->dest, job[i]->len,\n+\t\t\t\tjob[i]->flags, QBMAN_FLE_WORD4_FMT_SBF);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static inline int dpdmai_dev_set_sg_fd_lf(\n+\t\tstruct qdma_virt_queue *qdma_vq,\n+\t\tstruct qbman_fd *fd,\n+\t\tstruct rte_qdma_job **job,\n+\t\tuint16_t nb_jobs)\n+{\n+\tstruct rte_qdma_rbp *rbp = &qdma_vq->rbp;\n+\tstruct rte_qdma_job **ppjob;\n+\tvoid *elem;\n+\tstruct qbman_fle *fle;\n+\tuint64_t elem_iova, fle_iova, src, dst;\n+\tint ret = 0, i;\n+\tstruct qdma_sg_entry *src_sge, *dst_sge;\n+\tuint32_t len, fmt, flags;\n \tstruct qdma_device *qdma_dev = QDMA_DEV_OF_VQ(qdma_vq);\n \n \t/*\n@@ -244,10 +379,14 @@ static inline int dpdmai_dev_set_fd_lf(\n #endif\n \n \t/* Set the metadata */\n-\tjob->vq_id = qdma_vq->vq_id;\n+\t/* Save job context. */\n+\t*((uint16_t *)((uint64_t)elem + QDMA_FLE_JOB_NB_OFFSET)) = nb_jobs;\n \tppjob = (struct rte_qdma_job **)\n-\t\t\t((uint64_t)elem + QDMA_FLE_JOB_OFFSET);\n-\t*ppjob = job;\n+\t\t\t((uint64_t)elem + QDMA_FLE_JOBS_OFFSET);\n+\tfor (i = 0; i < nb_jobs; i++)\n+\t\tppjob[i] = job[i];\n+\n+\tppjob[0]->vq_id = qdma_vq->vq_id;\n \n \tfle = (struct qbman_fle *)\n \t\t\t((uint64_t)elem + QDMA_FLE_FLE_OFFSET);\n@@ -258,9 +397,29 @@ static inline int dpdmai_dev_set_fd_lf(\n \tDPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);\n \n \t/* Populate FLE */\n-\tmemset(fle, 0, QDMA_FLE_POOL_SIZE);\n-\tdpaa2_qdma_populate_fle(fle, rbp, job->src, job->dest,\n-\t\t\t\tjob->len, job->flags);\n+\tif (likely(nb_jobs > 1)) {\n+\t\tsrc_sge = (struct qdma_sg_entry *)\n+\t\t\t\t((uint64_t)elem + QDMA_FLE_SG_ENTRY_OFFSET);\n+\t\tdst_sge = src_sge + DPAA2_QDMA_MAX_SG_NB;\n+\t\tsrc = elem_iova + QDMA_FLE_SG_ENTRY_OFFSET;\n+\t\tdst = src +\n+\t\t\tDPAA2_QDMA_MAX_SG_NB * sizeof(struct qdma_sg_entry);\n+\t\tlen = qdma_populate_sg_entry(job, src_sge, dst_sge, nb_jobs);\n+\t\tfmt = QBMAN_FLE_WORD4_FMT_SGE;\n+\t\tflags = RTE_QDMA_JOB_SRC_PHY | RTE_QDMA_JOB_DEST_PHY;\n+\t} else {\n+\t\tsrc = job[0]->src;\n+\t\tdst = job[0]->dest;\n+\t\tlen = job[0]->len;\n+\t\tfmt = QBMAN_FLE_WORD4_FMT_SBF;\n+\t\tflags = job[0]->flags;\n+\t}\n+\n+\tmemset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +\n+\t\t\tDPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));\n+\n+\tdpaa2_qdma_populate_fle(fle, fle_iova, rbp,\n+\t\t\t\t\tsrc, dst, len, flags, fmt);\n \n \treturn 0;\n }\n@@ -268,7 +427,7 @@ static inline int dpdmai_dev_set_fd_lf(\n static inline uint16_t dpdmai_dev_get_job_us(\n \t\t\t\tstruct qdma_virt_queue *qdma_vq __rte_unused,\n \t\t\t\tconst struct qbman_fd *fd,\n-\t\t\t\tstruct rte_qdma_job **job)\n+\t\t\t\tstruct rte_qdma_job **job, uint16_t *nb_jobs)\n {\n \tuint16_t vqid;\n \tsize_t iova;\n@@ -286,6 +445,7 @@ static inline uint16_t dpdmai_dev_get_job_us(\n \t(*job)->status = (fd->simple_pci.acc_err << 8) |\n \t\t\t\t\t(fd->simple_pci.error);\n \tvqid = (*job)->vq_id;\n+\t*nb_jobs = 1;\n \n \treturn vqid;\n }\n@@ -293,12 +453,12 @@ static inline uint16_t dpdmai_dev_get_job_us(\n static inline uint16_t dpdmai_dev_get_job_lf(\n \t\t\t\t\t\tstruct qdma_virt_queue *qdma_vq,\n \t\t\t\t\t\tconst struct qbman_fd *fd,\n-\t\t\t\t\t\tstruct rte_qdma_job **job)\n+\t\t\t\t\t\tstruct rte_qdma_job **job,\n+\t\t\t\t\t\tuint16_t *nb_jobs)\n {\n-\tvoid *elem;\n \tstruct qbman_fle *fle;\n-\tstruct rte_qdma_job **ppjob;\n-\tuint16_t vqid;\n+\tstruct rte_qdma_job **ppjob = NULL;\n+\tuint16_t i, status;\n \tstruct qdma_device *qdma_dev = QDMA_DEV_OF_VQ(qdma_vq);\n \n \t/*\n@@ -307,20 +467,24 @@ static inline uint16_t dpdmai_dev_get_job_lf(\n \t */\n \tfle = (struct qbman_fle *)\n \t\t\tDPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));\n-\telem = (void *)((uint64_t)fle - QDMA_FLE_FLE_OFFSET);\n \n-\tppjob = (struct rte_qdma_job **)\n-\t\t\t((uint64_t)elem + QDMA_FLE_JOB_OFFSET);\n+\t*nb_jobs = *((uint16_t *)((uint64_t)fle -\n+\t\t\t\tQDMA_FLE_FLE_OFFSET + QDMA_FLE_JOB_NB_OFFSET));\n+\tstatus = (DPAA2_GET_FD_ERR(fd) << 8) | (DPAA2_GET_FD_FRC(fd) & 0xFF);\n \n-\t*job = (struct rte_qdma_job *)*ppjob;\n-\t(*job)->status = (DPAA2_GET_FD_ERR(fd) << 8) |\n-\t\t\t (DPAA2_GET_FD_FRC(fd) & 0xFF);\n-\tvqid = (*job)->vq_id;\n+\tppjob = (struct rte_qdma_job **)((uint64_t)fle -\n+\t\t\t\tQDMA_FLE_FLE_OFFSET + QDMA_FLE_JOBS_OFFSET);\n+\n+\tfor (i = 0; i < (*nb_jobs); i++) {\n+\t\tjob[i] = ppjob[i];\n+\t\tjob[i]->status = status;\n+\t}\n \n \t/* Free FLE to the pool */\n-\trte_mempool_put(qdma_dev->fle_pool, elem);\n+\trte_mempool_put(qdma_dev->fle_pool,\n+\t\t\t\t(void *)((uint64_t)fle - QDMA_FLE_FLE_OFFSET));\n \n-\treturn vqid;\n+\treturn job[0]->vq_id;\n }\n \n /* Function to receive a QDMA job for a given device and queue*/\n@@ -344,9 +508,16 @@ dpdmai_dev_dequeue_multijob_prefetch(\n \tuint8_t status, pending;\n \tuint8_t num_rx = 0;\n \tconst struct qbman_fd *fd;\n-\tuint16_t vqid;\n+\tuint16_t vqid, num_rx_ret;\n \tint ret, pull_size;\n \n+\tif (qdma_vq->flags & RTE_QDMA_VQ_FD_SG_FORMAT) {\n+\t\t/** Make sure there are enough space to get jobs.*/\n+\t\tif (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))\n+\t\t\treturn -EINVAL;\n+\t\tnb_jobs = 1;\n+\t}\n+\n \tif (unlikely(!DPAA2_PER_LCORE_DPIO)) {\n \t\tret = dpaa2_affine_qbman_swp();\n \t\tif (ret) {\n@@ -440,12 +611,13 @@ dpdmai_dev_dequeue_multijob_prefetch(\n \t\t}\n \t\tfd = qbman_result_DQ_fd(dq_storage);\n \n-\t\tvqid = qdma_vq->get_job(qdma_vq, fd, &job[num_rx]);\n+\t\tvqid = qdma_vq->get_job(qdma_vq, fd, &job[num_rx],\n+\t\t\t\t\t\t\t\t&num_rx_ret);\n \t\tif (vq_id)\n \t\t\tvq_id[num_rx] = vqid;\n \n \t\tdq_storage++;\n-\t\tnum_rx++;\n+\t\tnum_rx += num_rx_ret;\n \t} while (pending);\n \n \tif (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {\n@@ -490,8 +662,17 @@ dpdmai_dev_dequeue_multijob_no_prefetch(\n \tuint8_t status, pending;\n \tuint8_t num_rx = 0;\n \tconst struct qbman_fd *fd;\n-\tuint16_t vqid;\n-\tint ret, next_pull = nb_jobs, num_pulled = 0;\n+\tuint16_t vqid, num_rx_ret;\n+\tint ret, next_pull, num_pulled = 0;\n+\n+\tif (qdma_vq->flags & RTE_QDMA_VQ_FD_SG_FORMAT) {\n+\t\t/** Make sure there are enough space to get jobs.*/\n+\t\tif (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))\n+\t\t\treturn -EINVAL;\n+\t\tnb_jobs = 1;\n+\t}\n+\n+\tnext_pull = nb_jobs;\n \n \tif (unlikely(!DPAA2_PER_LCORE_DPIO)) {\n \t\tret = dpaa2_affine_qbman_swp();\n@@ -560,12 +741,13 @@ dpdmai_dev_dequeue_multijob_no_prefetch(\n \t\t\t}\n \t\t\tfd = qbman_result_DQ_fd(dq_storage);\n \n-\t\t\tvqid = qdma_vq->get_job(qdma_vq, fd, &job[num_rx]);\n+\t\t\tvqid = qdma_vq->get_job(qdma_vq, fd,\n+\t\t\t\t\t\t&job[num_rx], &num_rx_ret);\n \t\t\tif (vq_id)\n \t\t\t\tvq_id[num_rx] = vqid;\n \n \t\t\tdq_storage++;\n-\t\t\tnum_rx++;\n+\t\t\tnum_rx += num_rx_ret;\n \t\t\tnum_pulled++;\n \n \t\t} while (pending);\n@@ -592,6 +774,7 @@ dpdmai_dev_enqueue_multi(\n \tint ret;\n \tuint32_t num_to_send = 0;\n \tuint16_t num_tx = 0;\n+\tuint32_t enqueue_loop, retry_count, loop;\n \n \tif (unlikely(!DPAA2_PER_LCORE_DPIO)) {\n \t\tret = dpaa2_affine_qbman_swp();\n@@ -612,45 +795,87 @@ dpdmai_dev_enqueue_multi(\n \tqbman_eq_desc_set_no_orp(&eqdesc, 0);\n \tqbman_eq_desc_set_response(&eqdesc, 0, 0);\n \n+\tif (qdma_vq->flags & RTE_QDMA_VQ_FD_SG_FORMAT) {\n+\t\tuint16_t fd_nb;\n+\t\tuint16_t sg_entry_nb = nb_jobs > DPAA2_QDMA_MAX_SG_NB ?\n+\t\t\t\t\t\tDPAA2_QDMA_MAX_SG_NB : nb_jobs;\n+\t\tuint16_t job_idx = 0;\n+\t\tuint16_t fd_sg_nb[8];\n+\t\tuint16_t nb_jobs_ret = 0;\n+\n+\t\tif (nb_jobs % DPAA2_QDMA_MAX_SG_NB)\n+\t\t\tfd_nb = nb_jobs / DPAA2_QDMA_MAX_SG_NB + 1;\n+\t\telse\n+\t\t\tfd_nb = nb_jobs / DPAA2_QDMA_MAX_SG_NB;\n+\n+\t\tmemset(&fd[0], 0, sizeof(struct qbman_fd) * fd_nb);\n+\n+\t\tfor (loop = 0; loop < fd_nb; loop++) {\n+\t\t\tret = qdma_vq->set_fd(qdma_vq, &fd[loop], &job[job_idx],\n+\t\t\t\t\t      sg_entry_nb);\n+\t\t\tif (unlikely(ret < 0))\n+\t\t\t\treturn 0;\n+\t\t\tfd_sg_nb[loop] = sg_entry_nb;\n+\t\t\tnb_jobs -= sg_entry_nb;\n+\t\t\tjob_idx += sg_entry_nb;\n+\t\t\tsg_entry_nb = nb_jobs > DPAA2_QDMA_MAX_SG_NB ?\n+\t\t\t\t\t\tDPAA2_QDMA_MAX_SG_NB : nb_jobs;\n+\t\t}\n+\n+\t\t/* Enqueue the packet to the QBMAN */\n+\t\tenqueue_loop = 0; retry_count = 0;\n+\n+\t\twhile (enqueue_loop < fd_nb) {\n+\t\t\tret = qbman_swp_enqueue_multiple(swp,\n+\t\t\t\t\t&eqdesc, &fd[enqueue_loop],\n+\t\t\t\t\tNULL, fd_nb - enqueue_loop);\n+\t\t\tif (unlikely(ret < 0)) {\n+\t\t\t\tretry_count++;\n+\t\t\t\tif (retry_count > DPAA2_MAX_TX_RETRY_COUNT)\n+\t\t\t\t\treturn nb_jobs_ret;\n+\t\t\t} else {\n+\t\t\t\tfor (loop = 0; loop < (uint32_t)ret; loop++)\n+\t\t\t\t\tnb_jobs_ret +=\n+\t\t\t\t\t\tfd_sg_nb[enqueue_loop + loop];\n+\t\t\t\tenqueue_loop += ret;\n+\t\t\t\tretry_count = 0;\n+\t\t\t}\n+\t\t}\n+\n+\t\treturn nb_jobs_ret;\n+\t}\n+\n \tmemset(fd, 0, nb_jobs * sizeof(struct qbman_fd));\n \n \twhile (nb_jobs > 0) {\n-\t\tuint32_t loop;\n-\n \t\tnum_to_send = (nb_jobs > dpaa2_eqcr_size) ?\n \t\t\tdpaa2_eqcr_size : nb_jobs;\n \n-\t\tfor (loop = 0; loop < num_to_send; loop++) {\n-\t\t\tret = qdma_vq->set_fd(qdma_vq, &fd[loop], job[num_tx]);\n-\t\t\tif (ret < 0) {\n-\t\t\t\t/* Set nb_jobs to loop, so outer while loop\n-\t\t\t\t * breaks out.\n-\t\t\t\t */\n-\t\t\t\tnb_jobs = loop;\n-\t\t\t\tbreak;\n-\t\t\t}\n-\n-\t\t\tnum_tx++;\n-\t\t}\n+\t\tret = qdma_vq->set_fd(qdma_vq, &fd[num_tx],\n+\t\t\t\t\t\t&job[num_tx], num_to_send);\n+\t\tif (unlikely(ret < 0))\n+\t\t\tbreak;\n \n \t\t/* Enqueue the packet to the QBMAN */\n-\t\tuint32_t enqueue_loop = 0, retry_count = 0;\n+\t\tenqueue_loop = 0; retry_count = 0;\n+\t\tloop = num_to_send;\n \n \t\twhile (enqueue_loop < loop) {\n \t\t\tret = qbman_swp_enqueue_multiple(swp,\n \t\t\t\t\t\t&eqdesc,\n-\t\t\t\t\t\t&fd[enqueue_loop],\n+\t\t\t\t\t\t&fd[num_tx + enqueue_loop],\n \t\t\t\t\t\tNULL,\n \t\t\t\t\t\tloop - enqueue_loop);\n \t\t\tif (unlikely(ret < 0)) {\n \t\t\t\tretry_count++;\n \t\t\t\tif (retry_count > DPAA2_MAX_TX_RETRY_COUNT)\n-\t\t\t\t\treturn num_tx - (loop - enqueue_loop);\n+\t\t\t\t\treturn num_tx;\n \t\t\t} else {\n \t\t\t\tenqueue_loop += ret;\n \t\t\t\tretry_count = 0;\n \t\t\t}\n \t\t}\n+\t\tnum_tx += num_to_send;\n \t\tnb_jobs -= loop;\n \t}\n \treturn num_tx;\n@@ -969,6 +1194,21 @@ dpaa2_qdma_queue_setup(struct rte_rawdev *rawdev,\n \t\treturn -ENODEV;\n \t}\n \n+\tif (q_config->flags & RTE_QDMA_VQ_FD_SG_FORMAT) {\n+\t\tif (!(q_config->flags & RTE_QDMA_VQ_EXCLUSIVE_PQ)) {\n+\t\t\tDPAA2_QDMA_ERR(\n+\t\t\t\t\"qDMA SG format only supports physical queue!\");\n+\t\t\trte_spinlock_unlock(&qdma_dev->lock);\n+\t\t\treturn -ENODEV;\n+\t\t}\n+\t\tif (!(q_config->flags & RTE_QDMA_VQ_FD_LONG_FORMAT)) {\n+\t\t\tDPAA2_QDMA_ERR(\n+\t\t\t\t\"qDMA SG format only supports long FD format!\");\n+\t\t\trte_spinlock_unlock(&qdma_dev->lock);\n+\t\t\treturn -ENODEV;\n+\t\t}\n+\t}\n+\n \tif (q_config->flags & RTE_QDMA_VQ_EXCLUSIVE_PQ) {\n \t\t/* Allocate HW queue for a VQ */\n \t\tqdma_dev->vqs[i].hw_queue = alloc_hw_queue(q_config->lcore_id);\n@@ -999,12 +1239,16 @@ dpaa2_qdma_queue_setup(struct rte_rawdev *rawdev,\n \t\treturn -ENODEV;\n \t}\n \n+\tqdma_dev->vqs[i].flags = q_config->flags;\n \tqdma_dev->vqs[i].in_use = 1;\n \tqdma_dev->vqs[i].lcore_id = q_config->lcore_id;\n \tmemset(&qdma_dev->vqs[i].rbp, 0, sizeof(struct rte_qdma_rbp));\n \n \tif (q_config->flags & RTE_QDMA_VQ_FD_LONG_FORMAT) {\n-\t\tqdma_dev->vqs[i].set_fd = dpdmai_dev_set_fd_lf;\n+\t\tif (q_config->flags & RTE_QDMA_VQ_FD_SG_FORMAT)\n+\t\t\tqdma_dev->vqs[i].set_fd = dpdmai_dev_set_sg_fd_lf;\n+\t\telse\n+\t\t\tqdma_dev->vqs[i].set_fd = dpdmai_dev_set_multi_fd_lf;\n \t\tqdma_dev->vqs[i].get_job = dpdmai_dev_get_job_lf;\n \t} else {\n \t\tqdma_dev->vqs[i].set_fd = dpdmai_dev_set_fd_us;\n@@ -1079,6 +1323,12 @@ dpaa2_qdma_dequeue(struct rte_rawdev *rawdev,\n \tint ret = 0, i;\n \tunsigned int ring_count;\n \n+\tif (qdma_vq->flags & RTE_QDMA_VQ_FD_SG_FORMAT) {\n+\t\t/** Make sure there are enough space to get jobs.*/\n+\t\tif (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))\n+\t\t\treturn -EINVAL;\n+\t}\n+\n \t/* Return error in case of wrong lcore_id */\n \tif (rte_lcore_id() != (unsigned int)(qdma_vq->lcore_id)) {\n \t\tDPAA2_QDMA_WARN(\"QDMA dequeue for vqid %d on wrong core\",\n@@ -1090,7 +1340,8 @@ dpaa2_qdma_dequeue(struct rte_rawdev *rawdev,\n \tif (qdma_vq->num_enqueues == qdma_vq->num_dequeues)\n \t\treturn 0;\n \n-\tif (qdma_vq->num_enqueues < (qdma_vq->num_dequeues + nb_jobs))\n+\tif (!(qdma_vq->flags & RTE_QDMA_VQ_FD_SG_FORMAT) &&\n+\t\tqdma_vq->num_enqueues < (qdma_vq->num_dequeues + nb_jobs))\n \t\tnb_jobs = (qdma_vq->num_enqueues - qdma_vq->num_dequeues);\n \n \tif (qdma_vq->exclusive_hw_queue) {\ndiff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.h b/drivers/raw/dpaa2_qdma/dpaa2_qdma.h\nindex ff7743f..43a01d5 100644\n--- a/drivers/raw/dpaa2_qdma/dpaa2_qdma.h\n+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.h\n@@ -11,16 +11,37 @@ struct rte_qdma_job;\n #define DPAA2_QDMA_MAX_FLE 3\n #define DPAA2_QDMA_MAX_SDD 2\n \n+#define DPAA2_QDMA_MAX_SG_NB 64\n+\n #define DPAA2_DPDMAI_MAX_QUEUES\t8\n \n-/** FLE pool size: 3 Frame list + 2 source/destination descriptor */\n-#define QDMA_FLE_POOL_SIZE (sizeof(struct rte_qdma_job *) + \\\n+/** FLE pool size: job number(uint64_t) +\n+ * 3 Frame list + 2 source/destination descriptor  +\n+ * 32 (src + dst) sg entries + 32 jobs pointers.\n+ */\n+\n+#define QDMA_FLE_POOL_SIZE (sizeof(uint64_t) + \\\n \t\tsizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \\\n-\t\tsizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)\n+\t\tsizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD + \\\n+\t\tsizeof(struct qdma_sg_entry) * DPAA2_QDMA_MAX_SG_NB * 2 + \\\n+\t\tsizeof(struct rte_qdma_job *) * DPAA2_QDMA_MAX_SG_NB)\n+\n+#define QDMA_FLE_JOB_NB_OFFSET 0\n \n-#define QDMA_FLE_JOB_OFFSET 0\n #define QDMA_FLE_FLE_OFFSET \\\n-\t\t(QDMA_FLE_JOB_OFFSET + sizeof(struct rte_qdma_job *))\n+\t\t(QDMA_FLE_JOB_NB_OFFSET + sizeof(uint64_t))\n+\n+#define QDMA_FLE_SDD_OFFSET \\\n+\t\t(QDMA_FLE_FLE_OFFSET + \\\n+\t\tsizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE)\n+\n+#define QDMA_FLE_SG_ENTRY_OFFSET \\\n+\t\t(QDMA_FLE_SDD_OFFSET + \\\n+\t\tsizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)\n+\n+#define QDMA_FLE_JOBS_OFFSET \\\n+\t\t(QDMA_FLE_SG_ENTRY_OFFSET + \\\n+\t\tsizeof(struct qdma_sg_entry) * DPAA2_QDMA_MAX_SG_NB * 2)\n \n /** FLE pool cache size */\n #define QDMA_FLE_CACHE_SIZE(_num) (_num/(RTE_MAX_LCORE * 2))\n@@ -90,10 +111,12 @@ struct qdma_virt_queue;\n \n typedef uint16_t (qdma_get_job_t)(struct qdma_virt_queue *qdma_vq,\n \t\t\t\t\tconst struct qbman_fd *fd,\n-\t\t\t\t\tstruct rte_qdma_job **job);\n+\t\t\t\t\tstruct rte_qdma_job **job,\n+\t\t\t\t\tuint16_t *nb_jobs);\n typedef int (qdma_set_fd_t)(struct qdma_virt_queue *qdma_vq,\n \t\t\t\t\tstruct qbman_fd *fd,\n-\t\t\t\t\tstruct rte_qdma_job *job);\n+\t\t\t\t\tstruct rte_qdma_job **job,\n+\t\t\t\t\tuint16_t nb_jobs);\n \n typedef int (qdma_dequeue_multijob_t)(\n \t\t\t\tstruct qdma_virt_queue *qdma_vq,\n@@ -126,6 +149,7 @@ struct qdma_virt_queue {\n \tuint64_t num_dequeues;\n \n \tuint16_t vq_id;\n+\tuint32_t flags;\n \n \tqdma_set_fd_t *set_fd;\n \tqdma_get_job_t *get_job;\n@@ -191,6 +215,43 @@ struct qdma_sdd {\n \t};\n } __rte_packed;\n \n+#define QDMA_SG_FMT_SDB\t0x0 /* single data buffer */\n+#define QDMA_SG_FMT_FDS\t0x1 /* frame data section */\n+#define QDMA_SG_FMT_SGTE\t0x2 /* SGT extension */\n+#define QDMA_SG_SL_SHORT\t0x1 /* short length */\n+#define QDMA_SG_SL_LONG\t0x0 /* long length */\n+#define QDMA_SG_F\t0x1 /* last sg entry */\n+#define QDMA_SG_BMT_ENABLE 0x1\n+#define QDMA_SG_BMT_DISABLE 0x0\n+\n+struct qdma_sg_entry {\n+\tuint32_t addr_lo;\t\t/* address 0:31 */\n+\tuint32_t addr_hi:17;\t/* address 32:48 */\n+\tuint32_t rsv:15;\n+\tunion {\n+\t\tuint32_t data_len_sl0;\t/* SL=0, the long format */\n+\t\tstruct {\n+\t\t\tuint32_t len:17;\t/* SL=1, the short format */\n+\t\t\tuint32_t reserve:3;\n+\t\t\tuint32_t sf:1;\n+\t\t\tuint32_t sr:1;\n+\t\t\tuint32_t size:10;\t/* buff size */\n+\t\t} data_len_sl1;\n+\t} data_len;\t\t\t\t\t/* AVAIL_LENGTH */\n+\tunion {\n+\t\tuint32_t ctrl_fields;\n+\t\tstruct {\n+\t\t\tuint32_t bpid:14;\n+\t\t\tuint32_t ivp:1;\n+\t\t\tuint32_t bmt:1;\n+\t\t\tuint32_t offset:12;\n+\t\t\tuint32_t fmt:2;\n+\t\t\tuint32_t sl:1;\n+\t\t\tuint32_t f:1;\n+\t\t} ctrl;\n+\t};\n+} __attribute__((__packed__));\n+\n /** Represents a DPDMAI raw device */\n struct dpaa2_dpdmai_dev {\n \t/** Pointer to Next device instance */\ndiff --git a/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h\nindex ff4fc1d..cfec303 100644\n--- a/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h\n+++ b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h\n@@ -52,6 +52,8 @@ enum {\n \n #define RTE_QDMA_VQ_FD_LONG_FORMAT\t\t(1ULL << 1)\n \n+#define RTE_QDMA_VQ_FD_SG_FORMAT\t\t(1ULL << 2)\n+\n /** States if the source addresses is physical. */\n #define RTE_QDMA_JOB_SRC_PHY\t\t(1ULL)\n \n",
    "prefixes": [
        "5/7"
    ]
}