get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/76674/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 76674,
    "url": "http://patches.dpdk.org/api/patches/76674/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1599470764-30569-8-git-send-email-g.singh@nxp.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1599470764-30569-8-git-send-email-g.singh@nxp.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1599470764-30569-8-git-send-email-g.singh@nxp.com",
    "date": "2020-09-07T09:26:04",
    "name": "[7/7] raw/dpaa2_qdma: support enqueue without response wait",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "c0317e41a80c33e48a41d3f9381024dc63cc7c91",
    "submitter": {
        "id": 1068,
        "url": "http://patches.dpdk.org/api/people/1068/?format=api",
        "name": "Gagandeep Singh",
        "email": "g.singh@nxp.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1599470764-30569-8-git-send-email-g.singh@nxp.com/mbox/",
    "series": [
        {
            "id": 11988,
            "url": "http://patches.dpdk.org/api/series/11988/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=11988",
            "date": "2020-09-07T09:25:57",
            "name": "raw/dpaa2_qdma: driver enhancement",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/11988/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/76674/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/76674/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 4AF93A04B9;\n\tMon,  7 Sep 2020 11:28:45 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 051C61C1A9;\n\tMon,  7 Sep 2020 11:27:09 +0200 (CEST)",
            "from inva020.nxp.com (inva020.nxp.com [92.121.34.13])\n by dpdk.org (Postfix) with ESMTP id D90681C125\n for <dev@dpdk.org>; Mon,  7 Sep 2020 11:27:07 +0200 (CEST)",
            "from inva020.nxp.com (localhost [127.0.0.1])\n by inva020.eu-rdc02.nxp.com (Postfix) with ESMTP id BAC911A016B;\n Mon,  7 Sep 2020 11:27:07 +0200 (CEST)",
            "from invc005.ap-rdc01.nxp.com (invc005.ap-rdc01.nxp.com\n [165.114.16.14])\n by inva020.eu-rdc02.nxp.com (Postfix) with ESMTP id 3833D1A1028;\n Mon,  7 Sep 2020 11:27:05 +0200 (CEST)",
            "from lsv11086.swis.cn-sha01.nxp.com (lsv11086.swis.cn-sha01.nxp.com\n [92.121.210.87])\n by invc005.ap-rdc01.nxp.com (Postfix) with ESMTP id 67DA0402D7;\n Mon,  7 Sep 2020 11:26:59 +0200 (CEST)"
        ],
        "From": "Gagandeep Singh <g.singh@nxp.com>",
        "To": "dev@dpdk.org,\n\tnipun.gupta@nxp.com,\n\themant.agrawal@nxp.com",
        "Cc": "thomas.monjalon@6wind.com,\n\tJun Yang <jun.yang@nxp.com>",
        "Date": "Mon,  7 Sep 2020 17:26:04 +0800",
        "Message-Id": "<1599470764-30569-8-git-send-email-g.singh@nxp.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1599470764-30569-1-git-send-email-g.singh@nxp.com>",
        "References": "<1599470764-30569-1-git-send-email-g.singh@nxp.com>",
        "X-Virus-Scanned": "ClamAV using ClamSMTP",
        "Subject": "[dpdk-dev] [PATCH 7/7] raw/dpaa2_qdma: support enqueue without\n\tresponse wait",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Jun Yang <jun.yang@nxp.com>\n\nIn this condition, user needs to check if dma transfer is completed\nby its own logic.\n\nqDMA FLE pool is not used in this condition since there is no chance to put\nFLE back to pool without dequeue response.\n\nUser application is responsible to transfer FLE memory to qDMA driver\nby qdma job descriptor and maintain it as well.\n\nSigned-off-by: Jun Yang <jun.yang@nxp.com>\n---\n drivers/raw/dpaa2_qdma/dpaa2_qdma.c         | 85 ++++++++++++++++++++++++-----\n drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h |  7 +++\n 2 files changed, 78 insertions(+), 14 deletions(-)",
    "diff": "diff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c\nindex ba46ed0..845aa53 100644\n--- a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c\n+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c\n@@ -46,7 +46,7 @@ static struct qdma_per_core_info qdma_core_info[RTE_MAX_LCORE];\n static inline int\n qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,\n \t\t\tuint32_t len, struct qbman_fd *fd,\n-\t\t\tstruct rte_qdma_rbp *rbp)\n+\t\t\tstruct rte_qdma_rbp *rbp, int ser)\n {\n \tfd->simple_pci.saddr_lo = lower_32_bits((uint64_t) (src));\n \tfd->simple_pci.saddr_hi = upper_32_bits((uint64_t) (src));\n@@ -56,7 +56,7 @@ qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,\n \tfd->simple_pci.bmt = 1;\n \tfd->simple_pci.fmt = 3;\n \tfd->simple_pci.sl = 1;\n-\tfd->simple_pci.ser = 1;\n+\tfd->simple_pci.ser = ser;\n \n \tfd->simple_pci.sportid = rbp->sportid;\t/*pcie 3 */\n \tfd->simple_pci.srbp = rbp->srbp;\n@@ -81,7 +81,7 @@ qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,\n \n static inline int\n qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,\n-\t\t\tuint32_t len, struct qbman_fd *fd)\n+\t\t\tuint32_t len, struct qbman_fd *fd, int ser)\n {\n \tfd->simple_ddr.saddr_lo = lower_32_bits((uint64_t) (src));\n \tfd->simple_ddr.saddr_hi = upper_32_bits((uint64_t) (src));\n@@ -91,7 +91,7 @@ qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,\n \tfd->simple_ddr.bmt = 1;\n \tfd->simple_ddr.fmt = 3;\n \tfd->simple_ddr.sl = 1;\n-\tfd->simple_ddr.ser = 1;\n+\tfd->simple_ddr.ser = ser;\n \t/**\n \t * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011\n \t * Coherent copy of cacheable memory,\n@@ -204,6 +204,8 @@ static inline int dpdmai_dev_set_fd_us(\n \tstruct rte_qdma_job **ppjob;\n \tsize_t iova;\n \tint ret = 0, loop;\n+\tint ser = (qdma_vq->flags & RTE_QDMA_VQ_NO_RESPONSE) ?\n+\t\t\t\t0 : 1;\n \n \tfor (loop = 0; loop < nb_jobs; loop++) {\n \t\tif (job[loop]->src & QDMA_RBP_UPPER_ADDRESS_MASK)\n@@ -218,12 +220,12 @@ static inline int dpdmai_dev_set_fd_us(\n \n \t\tif ((rbp->drbp == 1) || (rbp->srbp == 1))\n \t\t\tret = qdma_populate_fd_pci((phys_addr_t)job[loop]->src,\n-\t\t\t\t\t\t(phys_addr_t)job[loop]->dest,\n-\t\t\t\t\t\tjob[loop]->len, &fd[loop], rbp);\n+\t\t\t\t\t(phys_addr_t)job[loop]->dest,\n+\t\t\t\t\tjob[loop]->len, &fd[loop], rbp, ser);\n \t\telse\n \t\t\tret = qdma_populate_fd_ddr((phys_addr_t)job[loop]->src,\n-\t\t\t\t\t\t(phys_addr_t)job[loop]->dest,\n-\t\t\t\t\t\tjob[loop]->len, &fd[loop]);\n+\t\t\t\t\t(phys_addr_t)job[loop]->dest,\n+\t\t\t\t\tjob[loop]->len, &fd[loop], ser);\n \t}\n \n \treturn ret;\n@@ -290,6 +292,51 @@ static uint32_t qdma_populate_sg_entry(\n \treturn total_len;\n }\n \n+static inline int dpdmai_dev_set_multi_fd_lf_no_rsp(\n+\t\tstruct qdma_virt_queue *qdma_vq,\n+\t\tstruct qbman_fd *fd,\n+\t\tstruct rte_qdma_job **job,\n+\t\tuint16_t nb_jobs)\n+{\n+\tstruct rte_qdma_rbp *rbp = &qdma_vq->rbp;\n+\tstruct rte_qdma_job **ppjob;\n+\tuint16_t i;\n+\tvoid *elem;\n+\tstruct qbman_fle *fle;\n+\tuint64_t elem_iova, fle_iova;\n+\n+\tfor (i = 0; i < nb_jobs; i++) {\n+\t\telem = job[i]->usr_elem;\n+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA\n+\t\telem_iova = rte_mempool_virt2iova(elem);\n+#else\n+\t\telem_iova = DPAA2_VADDR_TO_IOVA(elem);\n+#endif\n+\n+\t\tppjob = (struct rte_qdma_job **)\n+\t\t\t\t((uint64_t)elem + QDMA_FLE_SINGLE_JOB_OFFSET);\n+\t\t*ppjob = job[i];\n+\n+\t\tjob[i]->vq_id = qdma_vq->vq_id;\n+\n+\t\tfle = (struct qbman_fle *)\n+\t\t\t\t((uint64_t)elem + QDMA_FLE_FLE_OFFSET);\n+\t\tfle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;\n+\n+\t\tDPAA2_SET_FD_ADDR(&fd[i], fle_iova);\n+\t\tDPAA2_SET_FD_COMPOUND_FMT(&fd[i]);\n+\n+\t\tmemset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +\n+\t\t\t\tDPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));\n+\n+\t\tdpaa2_qdma_populate_fle(fle, fle_iova, rbp,\n+\t\t\tjob[i]->src, job[i]->dest, job[i]->len,\n+\t\t\tjob[i]->flags, QBMAN_FLE_WORD4_FMT_SBF);\n+\t}\n+\n+\treturn 0;\n+}\n+\n static inline int dpdmai_dev_set_multi_fd_lf(\n \t\tstruct qdma_virt_queue *qdma_vq,\n \t\tstruct qbman_fd *fd,\n@@ -361,10 +408,14 @@ static inline int dpdmai_dev_set_sg_fd_lf(\n \t * Get an FLE/SDD from FLE pool.\n \t * Note: IO metadata is before the FLE and SDD memory.\n \t */\n-\tret = rte_mempool_get(qdma_vq->fle_pool, (void **)(&elem));\n-\tif (ret) {\n-\t\tDPAA2_QDMA_DP_DEBUG(\"Memory alloc failed for FLE\");\n-\t\treturn ret;\n+\tif (qdma_vq->flags & RTE_QDMA_VQ_NO_RESPONSE) {\n+\t\telem = job[0]->usr_elem;\n+\t} else {\n+\t\tret = rte_mempool_get(qdma_vq->fle_pool, &elem);\n+\t\tif (ret) {\n+\t\t\tDPAA2_QDMA_DP_DEBUG(\"Memory alloc failed for FLE\");\n+\t\t\treturn ret;\n+\t\t}\n \t}\n \n #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA\n@@ -389,7 +440,8 @@ static inline int dpdmai_dev_set_sg_fd_lf(\n \n \tDPAA2_SET_FD_ADDR(fd, fle_iova);\n \tDPAA2_SET_FD_COMPOUND_FMT(fd);\n-\tDPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);\n+\tif (!(qdma_vq->flags & RTE_QDMA_VQ_NO_RESPONSE))\n+\t\tDPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);\n \n \t/* Populate FLE */\n \tif (likely(nb_jobs > 1)) {\n@@ -1271,7 +1323,12 @@ dpaa2_qdma_queue_setup(struct rte_rawdev *rawdev,\n \t\t\tqdma_dev->vqs[i].set_fd = dpdmai_dev_set_sg_fd_lf;\n \t\t\tqdma_dev->vqs[i].get_job = dpdmai_dev_get_sg_job_lf;\n \t\t} else {\n-\t\t\tqdma_dev->vqs[i].set_fd = dpdmai_dev_set_multi_fd_lf;\n+\t\t\tif (q_config->flags & RTE_QDMA_VQ_NO_RESPONSE)\n+\t\t\t\tqdma_dev->vqs[i].set_fd =\n+\t\t\t\t\tdpdmai_dev_set_multi_fd_lf_no_rsp;\n+\t\t\telse\n+\t\t\t\tqdma_dev->vqs[i].set_fd =\n+\t\t\t\t\tdpdmai_dev_set_multi_fd_lf;\n \t\t\tqdma_dev->vqs[i].get_job = dpdmai_dev_get_single_job_lf;\n \t\t}\n \t} else {\ndiff --git a/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h\nindex 3cd4167..cc1ac25 100644\n--- a/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h\n+++ b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h\n@@ -54,6 +54,8 @@ enum {\n \n #define RTE_QDMA_VQ_FD_SG_FORMAT\t\t(1ULL << 2)\n \n+#define RTE_QDMA_VQ_NO_RESPONSE\t\t\t(1ULL << 3)\n+\n /** States if the source addresses is physical. */\n #define RTE_QDMA_JOB_SRC_PHY\t\t(1ULL)\n \n@@ -154,6 +156,11 @@ struct rte_qdma_job {\n \t */\n \tuint16_t status;\n \tuint16_t vq_id;\n+\t/**\n+\t * FLE pool element maintained by user, in case no qDMA response.\n+\t * Note: the address must be allocated from DPDK memory pool.\n+\t */\n+\tvoid *usr_elem;\n };\n \n struct rte_qdma_enqdeq {\n",
    "prefixes": [
        "7/7"
    ]
}