get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/52294/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 52294,
    "url": "http://patches.dpdk.org/api/patches/52294/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20190404114818.21286-5-hemant.agrawal@nxp.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20190404114818.21286-5-hemant.agrawal@nxp.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20190404114818.21286-5-hemant.agrawal@nxp.com",
    "date": "2019-04-04T11:50:25",
    "name": "[v3,5/7] raw/dpaa2_qdma: add rbp mode support",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "bfdbbf4fec45476e4fa09f1a83e32deed828ba6d",
    "submitter": {
        "id": 477,
        "url": "http://patches.dpdk.org/api/people/477/?format=api",
        "name": "Hemant Agrawal",
        "email": "hemant.agrawal@nxp.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20190404114818.21286-5-hemant.agrawal@nxp.com/mbox/",
    "series": [
        {
            "id": 4115,
            "url": "http://patches.dpdk.org/api/series/4115/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=4115",
            "date": "2019-04-04T11:50:18",
            "name": "[v3,1/7] config: increase the num of rawdev to be 64",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/4115/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/52294/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/52294/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 6BBED1B3C0;\n\tThu,  4 Apr 2019 13:50:35 +0200 (CEST)",
            "from EUR02-AM5-obe.outbound.protection.outlook.com\n\t(mail-eopbgr00051.outbound.protection.outlook.com [40.107.0.51])\n\tby dpdk.org (Postfix) with ESMTP id 01F811B3A0\n\tfor <dev@dpdk.org>; Thu,  4 Apr 2019 13:50:26 +0200 (CEST)",
            "from VI1PR0401MB2541.eurprd04.prod.outlook.com (10.168.65.19) by\n\tVI1PR0401MB1965.eurprd04.prod.outlook.com (10.166.140.155) with\n\tMicrosoft SMTP Server (version=TLS1_2,\n\tcipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n\t15.20.1750.15; Thu, 4 Apr 2019 11:50:25 +0000",
            "from VI1PR0401MB2541.eurprd04.prod.outlook.com\n\t([fe80::18e3:39b6:c61d:3f18]) by\n\tVI1PR0401MB2541.eurprd04.prod.outlook.com\n\t([fe80::18e3:39b6:c61d:3f18%12]) with mapi id 15.20.1750.017;\n\tThu, 4 Apr 2019 11:50:25 +0000"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=nxp.com; s=selector1;\n\th=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n\tbh=vn1U+zzG049sHExzz/S/ee/M7JES0DW224cmLdzdoLI=;\n\tb=hQXvQg3YzErNNQZkk2HQI5+ZSXYk/Zepb2h3TFLuDBC5nTI/PLSnU63xbf4OGeeJ9RIp+aAMR7wFFINLwgod58Bolao7sAzulsWxX2Dujli5bs6gGO69GDgCNfDY/aHT36PHEGJDYuiZd23Gsc237thsGqJmmWDX7Yv8lZLsvxY=",
        "From": "Hemant Agrawal <hemant.agrawal@nxp.com>",
        "To": "\"dev@dpdk.org\" <dev@dpdk.org>",
        "CC": "\"thomas@monjalon.net\" <thomas@monjalon.net>, Shreyansh Jain\n\t<shreyansh.jain@nxp.com>, \"M.h. Lian\" <minghuan.lian@nxp.com>,\n\tSachin Saxena <sachin.saxena@nxp.com>",
        "Thread-Topic": "[PATCH v3 5/7] raw/dpaa2_qdma: add rbp mode support",
        "Thread-Index": "AQHU6tyXfwI5ddNp10uTODiSwSnSXg==",
        "Date": "Thu, 4 Apr 2019 11:50:25 +0000",
        "Message-ID": "<20190404114818.21286-5-hemant.agrawal@nxp.com>",
        "References": "<20190404110215.14410-1-hemant.agrawal@nxp.com>\n\t<20190404114818.21286-1-hemant.agrawal@nxp.com>",
        "In-Reply-To": "<20190404114818.21286-1-hemant.agrawal@nxp.com>",
        "Accept-Language": "en-US",
        "Content-Language": "en-US",
        "X-MS-Has-Attach": "",
        "X-MS-TNEF-Correlator": "",
        "x-originating-ip": "[92.120.1.72]",
        "x-mailer": "git-send-email 2.17.1",
        "x-clientproxiedby": "LO2P265CA0082.GBRP265.PROD.OUTLOOK.COM\n\t(2603:10a6:600:8::22) To VI1PR0401MB2541.eurprd04.prod.outlook.com\n\t(2603:10a6:800:56::19)",
        "authentication-results": "spf=none (sender IP is )\n\tsmtp.mailfrom=hemant.agrawal@nxp.com; ",
        "x-ms-exchange-messagesentrepresentingtype": "1",
        "x-ms-publictraffictype": "Email",
        "x-ms-office365-filtering-correlation-id": "dd4d5781-18cb-4c14-752c-08d6b8f3b974",
        "x-ms-office365-filtering-ht": "Tenant",
        "x-microsoft-antispam": "BCL:0; PCL:0;\n\tRULEID:(2390118)(7020095)(4652040)(8989299)(5600139)(711020)(4605104)(4618075)(4534185)(4627221)(201703031133081)(201702281549075)(8990200)(2017052603328)(7193020);\n\tSRVR:VI1PR0401MB1965; ",
        "x-ms-traffictypediagnostic": "VI1PR0401MB1965:",
        "x-microsoft-antispam-prvs": "<VI1PR0401MB196516D7EECBFC206EC7AF9089500@VI1PR0401MB1965.eurprd04.prod.outlook.com>",
        "x-forefront-prvs": "0997523C40",
        "x-forefront-antispam-report": "SFV:NSPM;\n\tSFS:(10009020)(346002)(376002)(136003)(39860400002)(396003)(366004)(199004)(189003)(44832011)(8936002)(71190400001)(25786009)(305945005)(6512007)(52116002)(478600001)(68736007)(105586002)(4326008)(30864003)(53946003)(7736002)(486006)(53936002)(2906002)(1076003)(5660300002)(102836004)(5640700003)(76176011)(446003)(186003)(476003)(386003)(66066001)(11346002)(26005)(316002)(106356001)(2616005)(6916009)(2501003)(14454004)(3846002)(81166006)(6486002)(71200400001)(6436002)(99286004)(6116002)(6506007)(81156014)(8676002)(97736004)(50226002)(54906003)(1730700003)(86362001)(256004)(14444005)(36756003)(2351001)(579004);\n\tDIR:OUT; SFP:1101; SCL:1; SRVR:VI1PR0401MB1965;\n\tH:VI1PR0401MB2541.eurprd04.prod.outlook.com; FPR:; SPF:None; LANG:en; \n\tPTR:InfoNoRecords; A:1; MX:1; ",
        "received-spf": "None (protection.outlook.com: nxp.com does not designate\n\tpermitted sender hosts)",
        "x-ms-exchange-senderadcheck": "1",
        "x-microsoft-antispam-message-info": "SDKDLqydaK/pf7pcramPRUZqCnTngBl8x6kzkAENOIifjJUonPrr6KKrm04uuIg3c8xnzTF72KdhSbzOgrgM8KyMVfVxFkI/UkuRjXrwhmtIcXq9oN4bZxMTjun9bEl+4RfoYYIWa5JJelvIt/XJFOaZNdBWEys65VbVq4EIBC5nMj91ZBfP/BOhxkVhor3EDlCjMl3a33PS4TYulTPPkAvHpkZWwuU7D/JXm+zqtB5qpPFXIrpcgj5cHEA1tkEG79U0ADOTeiR4MI2XVX2EFWJ3nu/J55fD9ldr9rLxUZqXF3rUJvzZ6SxULSuy6AA4EIplF6ThSzOWi5WntogpqmGaeKyv6Pt1adh0xKHSU/ZUXPmfiAYp2oOzPWeruWOpyBK2LiR6fD2Hj2zH28USVmanr1LSRBaC6yppvOABD74=",
        "Content-Type": "text/plain; charset=\"iso-8859-1\"",
        "Content-Transfer-Encoding": "quoted-printable",
        "MIME-Version": "1.0",
        "X-OriginatorOrg": "nxp.com",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "dd4d5781-18cb-4c14-752c-08d6b8f3b974",
        "X-MS-Exchange-CrossTenant-originalarrivaltime": "04 Apr 2019 11:50:25.4200\n\t(UTC)",
        "X-MS-Exchange-CrossTenant-fromentityheader": "Hosted",
        "X-MS-Exchange-CrossTenant-id": "686ea1d3-bc2b-4c6f-a92c-d99c5c301635",
        "X-MS-Exchange-CrossTenant-mailboxtype": "HOSTED",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "VI1PR0401MB1965",
        "Subject": "[dpdk-dev] [PATCH v3 5/7] raw/dpaa2_qdma: add rbp mode support",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add support for route by port mode. The route by port\nfeature in HW helps in translating the PCI address\nof connected device.\n\nSigned-off-by: Minghuan Lian <Minghuan.Lian@nxp.com>\nSigned-off-by: Sachin Saxena <sachin.saxena@nxp.com>\nSigned-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>\n---\n drivers/raw/dpaa2_qdma/Makefile             |   2 +-\n drivers/raw/dpaa2_qdma/dpaa2_qdma.c         | 403 +++++++++++++-------\n drivers/raw/dpaa2_qdma/dpaa2_qdma.h         |  65 +++-\n drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h |  60 ++-\n 4 files changed, 383 insertions(+), 147 deletions(-)",
    "diff": "diff --git a/drivers/raw/dpaa2_qdma/Makefile b/drivers/raw/dpaa2_qdma/Makefile\nindex 5c75f5fa0..ee95662f1 100644\n--- a/drivers/raw/dpaa2_qdma/Makefile\n+++ b/drivers/raw/dpaa2_qdma/Makefile\n@@ -26,7 +26,7 @@ LDLIBS += -lrte_common_dpaax\n \n EXPORT_MAP := rte_pmd_dpaa2_qdma_version.map\n \n-LIBABIVER := 2\n+LIBABIVER := 3\n \n #\n # all source are stored in SRCS-y\ndiff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c\nindex a1351e648..cf1a1aaa6 100644\n--- a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c\n+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c\n@@ -19,13 +19,16 @@\n #include <portal/dpaa2_hw_pvt.h>\n #include <portal/dpaa2_hw_dpio.h>\n \n+#include \"rte_pmd_dpaa2_qdma.h\"\n #include \"dpaa2_qdma.h\"\n #include \"dpaa2_qdma_logs.h\"\n-#include \"rte_pmd_dpaa2_qdma.h\"\n \n /* Dynamic log type identifier */\n int dpaa2_qdma_logtype;\n \n+uint32_t dpaa2_coherent_no_alloc_cache;\n+uint32_t dpaa2_coherent_alloc_cache;\n+\n /* QDMA device */\n static struct qdma_device qdma_dev;\n \n@@ -345,14 +348,29 @@ rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags)\n \n \tqdma_vqs[i].in_use = 1;\n \tqdma_vqs[i].lcore_id = lcore_id;\n-\n+\tmemset(&qdma_vqs[i].rbp, 0, sizeof(struct rte_qdma_rbp));\n \trte_spinlock_unlock(&qdma_dev.lock);\n \n \treturn i;\n }\n \n+/*create vq for route-by-port*/\n+int\n+rte_qdma_vq_create_rbp(uint32_t lcore_id, uint32_t flags,\n+\t\t\tstruct rte_qdma_rbp *rbp)\n+{\n+\tint i;\n+\n+\ti = rte_qdma_vq_create(lcore_id, flags);\n+\n+\tmemcpy(&qdma_vqs[i].rbp, rbp, sizeof(struct rte_qdma_rbp));\n+\n+\treturn i;\n+}\n+\n static void\n dpaa2_qdma_populate_fle(struct qbman_fle *fle,\n+\t\t\tstruct rte_qdma_rbp *rbp,\n \t\t\tuint64_t src, uint64_t dest,\n \t\t\tsize_t len, uint32_t flags)\n {\n@@ -368,10 +386,36 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle,\n \tDPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));\n \n \t/* source and destination descriptor */\n-\tDPAA2_SET_SDD_RD_COHERENT(sdd); /* source descriptor CMD */\n-\tsdd++;\n-\tDPAA2_SET_SDD_WR_COHERENT(sdd); /* dest descriptor CMD */\n+\tif (rbp && rbp->enable) {\n+\t\t/* source */\n+\t\tsdd->read_cmd.portid = rbp->sportid;\n+\t\tsdd->rbpcmd_simple.pfid = rbp->spfid;\n+\t\tsdd->rbpcmd_simple.vfid = rbp->svfid;\n+\n+\t\tif (rbp->srbp) {\n+\t\t\tsdd->read_cmd.rbp = rbp->srbp;\n+\t\t\tsdd->read_cmd.rdtype = DPAA2_RBP_MEM_RW;\n+\t\t} else {\n+\t\t\tsdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;\n+\t\t}\n+\t\tsdd++;\n+\t\t/* destination */\n+\t\tsdd->write_cmd.portid = rbp->dportid;\n+\t\tsdd->rbpcmd_simple.pfid = rbp->dpfid;\n+\t\tsdd->rbpcmd_simple.vfid = rbp->dvfid;\n+\n+\t\tif (rbp->drbp) {\n+\t\t\tsdd->write_cmd.rbp = rbp->drbp;\n+\t\t\tsdd->write_cmd.wrttype = DPAA2_RBP_MEM_RW;\n+\t\t} else {\n+\t\t\tsdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;\n+\t\t}\n \n+\t} else {\n+\t\tsdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;\n+\t\tsdd++;\n+\t\tsdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;\n+\t}\n \tfle++;\n \t/* source frame list to source buffer */\n \tif (flags & RTE_QDMA_JOB_SRC_PHY) {\n@@ -396,31 +440,57 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle,\n \tDPAA2_SET_FLE_FIN(fle);\n }\n \n-int\n-rte_qdma_vq_enqueue_multi(uint16_t vq_id,\n-\t\t\t  struct rte_qdma_job **job,\n-\t\t\t  uint16_t nb_jobs)\n+static inline uint16_t dpdmai_dev_set_fd(struct qbman_fd *fd,\n+\t\t\t\t\tstruct rte_qdma_job *job,\n+\t\t\t\t\tstruct rte_qdma_rbp *rbp,\n+\t\t\t\t\tuint16_t vq_id)\n {\n-\tstruct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];\n-\tstruct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;\n-\tstruct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;\n \tstruct qdma_io_meta *io_meta;\n-\tstruct qbman_fd fd_arr[MAX_TX_RING_SLOTS];\n-\tstruct dpaa2_queue *txq;\n \tstruct qbman_fle *fle;\n+\tint ret = 0;\n+\t/*\n+\t * Get an FLE/SDD from FLE pool.\n+\t * Note: IO metadata is before the FLE and SDD memory.\n+\t */\n+\tret = rte_mempool_get(qdma_dev.fle_pool, (void **)(&io_meta));\n+\tif (ret) {\n+\t\tDPAA2_QDMA_DP_DEBUG(\"Memory alloc failed for FLE\");\n+\t\treturn ret;\n+\t}\n+\n+\t/* Set the metadata */\n+\tio_meta->cnxt = (size_t)job;\n+\tio_meta->id = vq_id;\n+\n+\tfle = (struct qbman_fle *)(io_meta + 1);\n+\n+\tDPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));\n+\tDPAA2_SET_FD_COMPOUND_FMT(fd);\n+\tDPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);\n+\n+\t/* Populate FLE */\n+\tmemset(fle, 0, QDMA_FLE_POOL_SIZE);\n+\tdpaa2_qdma_populate_fle(fle, rbp, job->src, job->dest,\n+\t\t\t\tjob->len, job->flags);\n+\n+\treturn 0;\n+}\n+\n+static int\n+dpdmai_dev_enqueue_multi(struct dpaa2_dpdmai_dev *dpdmai_dev,\n+\t\t\tuint16_t txq_id,\n+\t\t\tuint16_t vq_id,\n+\t\t\tstruct rte_qdma_rbp *rbp,\n+\t\t\tstruct rte_qdma_job **job,\n+\t\t\tuint16_t nb_jobs)\n+{\n+\tstruct qbman_fd fd[RTE_QDMA_BURST_NB_MAX];\n+\tstruct dpaa2_queue *txq;\n \tstruct qbman_eq_desc eqdesc;\n \tstruct qbman_swp *swp;\n \tint ret;\n \tuint32_t num_to_send = 0;\n \tuint16_t num_tx = 0;\n-\tuint16_t num_txed = 0;\n-\n-\t/* Return error in case of wrong lcore_id */\n-\tif (rte_lcore_id() != qdma_vq->lcore_id) {\n-\t\tDPAA2_QDMA_ERR(\"QDMA enqueue for vqid %d on wrong core\",\n-\t\t\t\tvq_id);\n-\t\treturn -1;\n-\t}\n \n \tif (unlikely(!DPAA2_PER_LCORE_DPIO)) {\n \t\tret = dpaa2_affine_qbman_swp();\n@@ -431,7 +501,7 @@ rte_qdma_vq_enqueue_multi(uint16_t vq_id,\n \t}\n \tswp = DPAA2_PER_LCORE_PORTAL;\n \n-\ttxq = &(dpdmai_dev->tx_queue[qdma_pq->queue_id]);\n+\ttxq = &(dpdmai_dev->tx_queue[txq_id]);\n \n \t/* Prepare enqueue descriptor */\n \tqbman_eq_desc_clear(&eqdesc);\n@@ -439,6 +509,8 @@ rte_qdma_vq_enqueue_multi(uint16_t vq_id,\n \tqbman_eq_desc_set_no_orp(&eqdesc, 0);\n \tqbman_eq_desc_set_response(&eqdesc, 0, 0);\n \n+\tmemset(fd, 0, RTE_QDMA_BURST_NB_MAX * sizeof(struct qbman_fd));\n+\n \twhile (nb_jobs > 0) {\n \t\tuint32_t loop;\n \n@@ -446,73 +518,100 @@ rte_qdma_vq_enqueue_multi(uint16_t vq_id,\n \t\t\tdpaa2_eqcr_size : nb_jobs;\n \n \t\tfor (loop = 0; loop < num_to_send; loop++) {\n-\t\t\t/*\n-\t\t\t * Get an FLE/SDD from FLE pool.\n-\t\t\t * Note: IO metadata is before the FLE and SDD memory.\n-\t\t\t */\n-\t\t\tret = rte_mempool_get(qdma_dev.fle_pool,\n-\t\t\t\t\t(void **)(&io_meta));\n-\t\t\tif (ret) {\n-\t\t\t\tDPAA2_QDMA_DP_WARN(\"Me alloc failed for FLE\");\n-\t\t\t\treturn ret;\n+\t\t\tret = dpdmai_dev_set_fd(&fd[loop],\n+\t\t\t\t\t\tjob[num_tx], rbp, vq_id);\n+\t\t\tif (ret < 0) {\n+\t\t\t\t/* Set nb_jobs to loop, so outer while loop\n+\t\t\t\t * breaks out.\n+\t\t\t\t */\n+\t\t\t\tnb_jobs = loop;\n+\t\t\t\tbreak;\n \t\t\t}\n \n-\t\t\t/* Set the metadata */\n-\t\t\tio_meta->cnxt = (size_t)job[num_tx];\n-\t\t\tio_meta->id = vq_id;\n-\n-\t\t\tfle = (struct qbman_fle *)(io_meta + 1);\n-\n-\t\t\t/* populate Frame descriptor */\n-\t\t\tmemset(&fd_arr[loop], 0, sizeof(struct qbman_fd));\n-\t\t\tDPAA2_SET_FD_ADDR(&fd_arr[loop],\n-\t\t\t\t\tDPAA2_VADDR_TO_IOVA(fle));\n-\t\t\tDPAA2_SET_FD_COMPOUND_FMT(&fd_arr[loop]);\n-\t\t\tDPAA2_SET_FD_FRC(&fd_arr[loop], QDMA_SER_CTX);\n-\n-\t\t\t/* Populate FLE */\n-\t\t\tmemset(fle, 0, QDMA_FLE_POOL_SIZE);\n-\t\t\tdpaa2_qdma_populate_fle(fle, job[num_tx]->src,\n-\t\t\t\t\t\tjob[num_tx]->dest,\n-\t\t\t\t\t\tjob[num_tx]->len,\n-\t\t\t\t\t\tjob[num_tx]->flags);\n-\n \t\t\tnum_tx++;\n \t\t}\n \n \t\t/* Enqueue the packet to the QBMAN */\n \t\tuint32_t enqueue_loop = 0;\n-\t\twhile (enqueue_loop < num_to_send) {\n+\t\twhile (enqueue_loop < loop) {\n \t\t\tenqueue_loop += qbman_swp_enqueue_multiple(swp,\n \t\t\t\t\t\t&eqdesc,\n-\t\t\t\t\t\t&fd_arr[enqueue_loop],\n+\t\t\t\t\t\t&fd[enqueue_loop],\n \t\t\t\t\t\tNULL,\n-\t\t\t\t\t\tnum_to_send - enqueue_loop);\n+\t\t\t\t\t\tloop - enqueue_loop);\n \t\t}\n-\n-\t\tnum_txed += num_to_send;\n-\t\tnb_jobs -= num_to_send;\n+\t\tnb_jobs -= loop;\n \t}\n-\tqdma_vq->num_enqueues += num_txed;\n-\treturn num_txed;\n+\treturn num_tx;\n }\n \n int\n-rte_qdma_vq_enqueue(uint16_t vq_id,\n-\t\t    struct rte_qdma_job *job)\n+rte_qdma_vq_enqueue_multi(uint16_t vq_id,\n+\t\t\t  struct rte_qdma_job **job,\n+\t\t\t  uint16_t nb_jobs)\n {\n+\tstruct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];\n+\tstruct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;\n+\tstruct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;\n \tint ret;\n \n-\tret = rte_qdma_vq_enqueue_multi(vq_id, &job, 1);\n+\tDPAA2_QDMA_FUNC_TRACE();\n+\n+\t/* Return error in case of wrong lcore_id */\n+\tif (rte_lcore_id() != qdma_vq->lcore_id) {\n+\t\tDPAA2_QDMA_ERR(\"QDMA enqueue for vqid %d on wrong core\",\n+\t\t\t\tvq_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tret = dpdmai_dev_enqueue_multi(dpdmai_dev,\n+\t\t\t\t qdma_pq->queue_id,\n+\t\t\t\t vq_id,\n+\t\t\t\t &qdma_vq->rbp,\n+\t\t\t\t job,\n+\t\t\t\t nb_jobs);\n \tif (ret < 0) {\n \t\tDPAA2_QDMA_ERR(\"DPDMAI device enqueue failed: %d\", ret);\n \t\treturn ret;\n \t}\n \n-\treturn 1;\n+\tqdma_vq->num_enqueues += ret;\n+\n+\treturn ret;\n+}\n+\n+int\n+rte_qdma_vq_enqueue(uint16_t vq_id,\n+\t\t    struct rte_qdma_job *job)\n+{\n+\treturn rte_qdma_vq_enqueue_multi(vq_id, &job, 1);\n+}\n+\n+static inline uint16_t dpdmai_dev_get_job(const struct qbman_fd *fd,\n+\t\t\t\t\tstruct rte_qdma_job **job)\n+{\n+\tstruct qbman_fle *fle;\n+\tstruct qdma_io_meta *io_meta;\n+\tuint16_t vqid;\n+\t/*\n+\t * Fetch metadata from FLE. job and vq_id were set\n+\t * in metadata in the enqueue operation.\n+\t */\n+\tfle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));\n+\tio_meta = (struct qdma_io_meta *)(fle) - 1;\n+\n+\t*job = (struct rte_qdma_job *)(size_t)io_meta->cnxt;\n+\t(*job)->status = (DPAA2_GET_FD_ERR(fd) << 8) |\n+\t\t\t (DPAA2_GET_FD_FRC(fd) & 0xFF);\n+\n+\tvqid = io_meta->id;\n+\n+\t/* Free FLE to the pool */\n+\trte_mempool_put(qdma_dev.fle_pool, io_meta);\n+\n+\treturn vqid;\n }\n \n-/* Function to receive a QDMA job for a given device and queue*/\n static int\n dpdmai_dev_dequeue_multijob(struct dpaa2_dpdmai_dev *dpdmai_dev,\n \t\t   uint16_t rxq_id,\n@@ -520,16 +619,18 @@ dpdmai_dev_dequeue_multijob(struct dpaa2_dpdmai_dev *dpdmai_dev,\n \t\t   struct rte_qdma_job **job,\n \t\t   uint16_t nb_jobs)\n {\n-\tstruct qdma_io_meta *io_meta;\n \tstruct dpaa2_queue *rxq;\n \tstruct qbman_result *dq_storage;\n \tstruct qbman_pull_desc pulldesc;\n-\tconst struct qbman_fd *fd;\n \tstruct qbman_swp *swp;\n-\tstruct qbman_fle *fle;\n \tuint32_t fqid;\n-\tuint8_t status;\n-\tint ret;\n+\tuint8_t status, pending;\n+\tuint8_t num_rx = 0;\n+\tconst struct qbman_fd *fd;\n+\tuint16_t vqid;\n+\tint ret, next_pull = nb_jobs, num_pulled = 0;\n+\n+\tDPAA2_QDMA_FUNC_TRACE();\n \n \tif (unlikely(!DPAA2_PER_LCORE_DPIO)) {\n \t\tret = dpaa2_affine_qbman_swp();\n@@ -539,77 +640,75 @@ dpdmai_dev_dequeue_multijob(struct dpaa2_dpdmai_dev *dpdmai_dev,\n \t\t}\n \t}\n \tswp = DPAA2_PER_LCORE_PORTAL;\n+\n \trxq = &(dpdmai_dev->rx_queue[rxq_id]);\n-\tdq_storage = rxq->q_storage->dq_storage[0];\n \tfqid = rxq->fqid;\n \n-\t/* Prepare dequeue descriptor */\n-\tqbman_pull_desc_clear(&pulldesc);\n-\tqbman_pull_desc_set_fq(&pulldesc, fqid);\n-\tqbman_pull_desc_set_storage(&pulldesc, dq_storage,\n-\t\t(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);\n-\tif (nb_jobs > dpaa2_dqrr_size)\n-\t\tqbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);\n-\telse\n-\t\tqbman_pull_desc_set_numframes(&pulldesc, nb_jobs);\n-\n-\twhile (1) {\n-\t\tif (qbman_swp_pull(swp, &pulldesc)) {\n-\t\t\tDPAA2_QDMA_DP_WARN(\"VDQ command not issued. QBMAN busy\");\n-\t\t\tcontinue;\n+\tdo {\n+\t\tdq_storage = rxq->q_storage->dq_storage[0];\n+\t\t/* Prepare dequeue descriptor */\n+\t\tqbman_pull_desc_clear(&pulldesc);\n+\t\tqbman_pull_desc_set_fq(&pulldesc, fqid);\n+\t\tqbman_pull_desc_set_storage(&pulldesc, dq_storage,\n+\t\t\t(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);\n+\n+\t\tif (next_pull > dpaa2_dqrr_size) {\n+\t\t\tqbman_pull_desc_set_numframes(&pulldesc,\n+\t\t\t\t\tdpaa2_dqrr_size);\n+\t\t\tnext_pull -= dpaa2_dqrr_size;\n+\t\t} else {\n+\t\t\tqbman_pull_desc_set_numframes(&pulldesc, next_pull);\n+\t\t\tnext_pull = 0;\n \t\t}\n-\t\tbreak;\n-\t}\n \n-\trte_prefetch0((void *)((size_t)(dq_storage + 1)));\n-\t/* Check if the previous issued command is completed. */\n-\twhile (!qbman_check_command_complete(dq_storage))\n-\t\t;\n+\t\twhile (1) {\n+\t\t\tif (qbman_swp_pull(swp, &pulldesc)) {\n+\t\t\t\tDPAA2_QDMA_DP_WARN(\"VDQ command not issued. QBMAN busy\");\n+\t\t\t\t/* Portal was busy, try again */\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\t\t\tbreak;\n+\t\t}\n \n-\tint num_pulled = 0;\n-\tint pending = 1;\n-\tdo {\n-\t\t/* Loop until the dq_storage is updated with\n-\t\t * new token by QBMAN\n-\t\t */\n-\t\twhile (!qbman_check_new_result(dq_storage))\n+\t\trte_prefetch0((void *)((size_t)(dq_storage + 1)));\n+\t\t/* Check if the previous issued command is completed. */\n+\t\twhile (!qbman_check_command_complete(dq_storage))\n \t\t\t;\n \n-\t\trte_prefetch0((void *)((size_t)(dq_storage + 2)));\n-\t\t/* Check whether Last Pull command is Expired and\n-\t\t * setting Condition for Loop termination\n-\t\t */\n-\t\tif (qbman_result_DQ_is_pull_complete(dq_storage)) {\n-\t\t\tpending = 0;\n-\t\t\t/* Check for valid frame. */\n-\t\t\tstatus = qbman_result_DQ_flags(dq_storage);\n-\t\t\tif (unlikely((status &\n-\t\t\t\tQBMAN_DQ_STAT_VALIDFRAME) == 0))\n-\t\t\t\tcontinue;\n-\t\t}\n-\t\tfd = qbman_result_DQ_fd(dq_storage);\n+\t\tnum_pulled = 0;\n+\t\tpending = 1;\n \n-\t\t/*\n-\t\t * Fetch metadata from FLE. job and vq_id were set\n-\t\t * in metadata in the enqueue operation.\n-\t\t */\n-\t\tfle = (struct qbman_fle *)\n-\t\t\t\tDPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));\n-\t\tio_meta = (struct qdma_io_meta *)(fle) - 1;\n-\t\tif (vq_id)\n-\t\t\tvq_id[num_pulled] = io_meta->id;\n+\t\tdo {\n+\t\t\t/* Loop until dq_storage is updated\n+\t\t\t * with new token by QBMAN\n+\t\t\t */\n+\t\t\twhile (!qbman_check_new_result(dq_storage))\n+\t\t\t\t;\n+\t\t\trte_prefetch0((void *)((size_t)(dq_storage + 2)));\n+\n+\t\t\tif (qbman_result_DQ_is_pull_complete(dq_storage)) {\n+\t\t\t\tpending = 0;\n+\t\t\t\t/* Check for valid frame. */\n+\t\t\t\tstatus = qbman_result_DQ_flags(dq_storage);\n+\t\t\t\tif (unlikely((status &\n+\t\t\t\t\tQBMAN_DQ_STAT_VALIDFRAME) == 0))\n+\t\t\t\t\tcontinue;\n+\t\t\t}\n+\t\t\tfd = qbman_result_DQ_fd(dq_storage);\n \n-\t\tjob[num_pulled] = (struct rte_qdma_job *)(size_t)io_meta->cnxt;\n-\t\tjob[num_pulled]->status = DPAA2_GET_FD_ERR(fd);\n+\t\t\tvqid = dpdmai_dev_get_job(fd, &job[num_rx]);\n+\t\t\tif (vq_id)\n+\t\t\t\tvq_id[num_rx] = vqid;\n \n-\t\t/* Free FLE to the pool */\n-\t\trte_mempool_put(qdma_dev.fle_pool, io_meta);\n+\t\t\tdq_storage++;\n+\t\t\tnum_rx++;\n+\t\t\tnum_pulled++;\n \n-\t\tdq_storage++;\n-\t\tnum_pulled++;\n-\t} while (pending && (num_pulled <= dpaa2_dqrr_size));\n+\t\t} while (pending);\n+\t/* Last VDQ provided all packets and more packets are requested */\n+\t} while (next_pull && num_pulled == dpaa2_dqrr_size);\n \n-\treturn num_pulled;\n+\treturn num_rx;\n }\n \n int\n@@ -664,9 +763,9 @@ rte_qdma_vq_dequeue_multi(uint16_t vq_id,\n \t\t\t\ttemp_qdma_vq = &qdma_vqs[temp_vq_id[i]];\n \t\t\t\trte_ring_enqueue(temp_qdma_vq->status_ring,\n \t\t\t\t\t(void *)(job[i]));\n-\t\t\t\tring_count = rte_ring_count(\n-\t\t\t\t\tqdma_vq->status_ring);\n \t\t\t}\n+\t\t\tring_count = rte_ring_count(\n+\t\t\t\t\tqdma_vq->status_ring);\n \t\t}\n \n \t\tif (ring_count) {\n@@ -743,6 +842,35 @@ rte_qdma_vq_destroy(uint16_t vq_id)\n \treturn 0;\n }\n \n+int\n+rte_qdma_vq_destroy_rbp(uint16_t vq_id)\n+{\n+\tstruct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];\n+\n+\tDPAA2_QDMA_FUNC_TRACE();\n+\n+\t/* In case there are pending jobs on any VQ, return -EBUSY */\n+\tif (qdma_vq->num_enqueues != qdma_vq->num_dequeues)\n+\t\treturn -EBUSY;\n+\n+\trte_spinlock_lock(&qdma_dev.lock);\n+\n+\tif (qdma_vq->exclusive_hw_queue) {\n+\t\tfree_hw_queue(qdma_vq->hw_queue);\n+\t} else {\n+\t\tif (qdma_vqs->status_ring)\n+\t\t\trte_ring_free(qdma_vqs->status_ring);\n+\n+\t\tput_hw_queue(qdma_vq->hw_queue);\n+\t}\n+\n+\tmemset(qdma_vq, 0, sizeof(struct qdma_virt_queue));\n+\n+\trte_spinlock_lock(&qdma_dev.lock);\n+\n+\treturn 0;\n+}\n+\n void\n rte_qdma_stop(void)\n {\n@@ -939,6 +1067,21 @@ dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)\n \t\tDPAA2_QDMA_ERR(\"Adding H/W queue to list failed\");\n \t\tgoto init_err;\n \t}\n+\n+\tif (!dpaa2_coherent_no_alloc_cache) {\n+\t\tif (dpaa2_svr_family == SVR_LX2160A) {\n+\t\t\tdpaa2_coherent_no_alloc_cache =\n+\t\t\t\tDPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE;\n+\t\t\tdpaa2_coherent_alloc_cache =\n+\t\t\t\tDPAA2_LX2_COHERENT_ALLOCATE_CACHE;\n+\t\t} else {\n+\t\t\tdpaa2_coherent_no_alloc_cache =\n+\t\t\t\tDPAA2_COHERENT_NO_ALLOCATE_CACHE;\n+\t\t\tdpaa2_coherent_alloc_cache =\n+\t\t\t\tDPAA2_COHERENT_ALLOCATE_CACHE;\n+\t\t}\n+\t}\n+\n \tDPAA2_QDMA_DEBUG(\"Initialized dpdmai object successfully\");\n \n \treturn 0;\ndiff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.h b/drivers/raw/dpaa2_qdma/dpaa2_qdma.h\nindex 0cbe90255..f15dda694 100644\n--- a/drivers/raw/dpaa2_qdma/dpaa2_qdma.h\n+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.h\n@@ -22,28 +22,24 @@ struct qdma_io_meta;\n \n /** Notification by FQD_CTX[fqid] */\n #define QDMA_SER_CTX (1 << 8)\n-\n+#define DPAA2_RBP_MEM_RW            0x0\n /**\n  * Source descriptor command read transaction type for RBP=0:\n  * coherent copy of cacheable memory\n  */\n-#define DPAA2_SET_SDD_RD_COHERENT(sdd) ((sdd)->cmd = (0xb << 28))\n+#define DPAA2_COHERENT_NO_ALLOCATE_CACHE\t0xb\n+#define DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE\t0x7\n /**\n  * Destination descriptor command write transaction type for RBP=0:\n  * coherent copy of cacheable memory\n  */\n-#define DPAA2_SET_SDD_WR_COHERENT(sdd) ((sdd)->cmd = (0x6 << 28))\n+#define DPAA2_COHERENT_ALLOCATE_CACHE\t\t0x6\n+#define DPAA2_LX2_COHERENT_ALLOCATE_CACHE\t0xb\n \n /** Maximum possible H/W Queues on each core */\n #define MAX_HW_QUEUE_PER_CORE\t\t64\n \n-/**\n- * In case of Virtual Queue mode, this specifies the number of\n- * dequeue the 'qdma_vq_dequeue/multi' API does from the H/W Queue\n- * in case there is no job present on the Virtual Queue ring.\n- */\n-#define QDMA_DEQUEUE_BUDGET\t\t64\n-\n+#define QDMA_RBP_UPPER_ADDRESS_MASK (0xfff0000000000)\n /**\n  * Represents a QDMA device.\n  * A single QDMA device exists which is combination of multiple DPDMAI rawdev's.\n@@ -90,6 +86,8 @@ struct qdma_virt_queue {\n \tstruct rte_ring *status_ring;\n \t/** Associated hw queue */\n \tstruct qdma_hw_queue *hw_queue;\n+\t/** Route by port */\n+\tstruct rte_qdma_rbp rbp;\n \t/** Associated lcore id */\n \tuint32_t lcore_id;\n \t/** States if this vq is in use or not */\n@@ -118,7 +116,7 @@ struct qdma_io_meta {\n \t */\n \tuint64_t cnxt;\n \t/** VQ ID is stored as a part of metadata of the enqueue command */\n-\t uint64_t id;\n+\tuint64_t id;\n };\n \n /** Source/Destination Descriptor */\n@@ -127,9 +125,48 @@ struct qdma_sdd {\n \t/** Stride configuration */\n \tuint32_t stride;\n \t/** Route-by-port command */\n-\tuint32_t rbpcmd;\n-\tuint32_t cmd;\n-} __attribute__((__packed__));\n+\tunion {\n+\t\tuint32_t rbpcmd;\n+\t\tstruct rbpcmd_st {\n+\t\t\tuint32_t vfid:6;\n+\t\t\tuint32_t rsv4:2;\n+\t\t\tuint32_t pfid:1;\n+\t\t\tuint32_t rsv3:7;\n+\t\t\tuint32_t attr:3;\n+\t\t\tuint32_t rsv2:1;\n+\t\t\tuint32_t at:2;\n+\t\t\tuint32_t vfa:1;\n+\t\t\tuint32_t ca:1;\n+\t\t\tuint32_t tc:3;\n+\t\t\tuint32_t rsv1:5;\n+\t\t} rbpcmd_simple;\n+\t};\n+\tunion {\n+\t\tuint32_t cmd;\n+\t\tstruct rcmd_simple {\n+\t\t\tuint32_t portid:4;\n+\t\t\tuint32_t rsv1:14;\n+\t\t\tuint32_t rbp:1;\n+\t\t\tuint32_t ssen:1;\n+\t\t\tuint32_t rthrotl:4;\n+\t\t\tuint32_t sqos:3;\n+\t\t\tuint32_t ns:1;\n+\t\t\tuint32_t rdtype:4;\n+\t\t} read_cmd;\n+\t\tstruct wcmd_simple {\n+\t\t\tuint32_t portid:4;\n+\t\t\tuint32_t rsv3:10;\n+\t\t\tuint32_t rsv2:2;\n+\t\t\tuint32_t lwc:2;\n+\t\t\tuint32_t rbp:1;\n+\t\t\tuint32_t dsen:1;\n+\t\t\tuint32_t rsv1:4;\n+\t\t\tuint32_t dqos:3;\n+\t\t\tuint32_t ns:1;\n+\t\t\tuint32_t wrttype:4;\n+\t\t} write_cmd;\n+\t};\n+} __attribute__ ((__packed__));\n \n /** Represents a DPDMAI raw device */\n struct dpaa2_dpdmai_dev {\ndiff --git a/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h\nindex e1ccc19e8..bbc66a286 100644\n--- a/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h\n+++ b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h\n@@ -13,7 +13,7 @@\n  */\n \n /** Maximum qdma burst size */\n-#define RTE_QDMA_BURST_NB_MAX 32\n+#define RTE_QDMA_BURST_NB_MAX 256\n \n /** Determines the mode of operation */\n enum {\n@@ -73,6 +73,40 @@ struct rte_qdma_config {\n \tint fle_pool_count;\n };\n \n+struct rte_qdma_rbp {\n+\tuint32_t use_ultrashort:1;\n+\tuint32_t enable:1;\n+\t/**\n+\t * dportid:\n+\t * 0000 PCI-Express 1\n+\t * 0001 PCI-Express 2\n+\t * 0010 PCI-Express 3\n+\t * 0011 PCI-Express 4\n+\t * 0100 PCI-Express 5\n+\t * 0101 PCI-Express 6\n+\t */\n+\tuint32_t dportid:4;\n+\tuint32_t dpfid:2;\n+\tuint32_t dvfid:6;\n+\t/*using route by port for destination */\n+\tuint32_t drbp:1;\n+\t/**\n+\t * sportid:\n+\t * 0000 PCI-Express 1\n+\t * 0001 PCI-Express 2\n+\t * 0010 PCI-Express 3\n+\t * 0011 PCI-Express 4\n+\t * 0100 PCI-Express 5\n+\t * 0101 PCI-Express 6\n+\t */\n+\tuint32_t sportid:4;\n+\tuint32_t spfid:2;\n+\tuint32_t svfid:6;\n+\t/* using route by port for source */\n+\tuint32_t srbp:1;\n+\tuint32_t rsv:4;\n+};\n+\n /** Provides QDMA device statistics */\n struct rte_qdma_vq_stats {\n \t/** States if this vq has exclusively associated hw queue */\n@@ -105,8 +139,10 @@ struct rte_qdma_job {\n \t/**\n \t * Status of the transaction.\n \t * This is filled in the dequeue operation by the driver.\n+\t * upper 8bits acc_err for route by port.\n+\t * lower 8bits fd error\n \t */\n-\tuint8_t status;\n+\tuint16_t status;\n };\n \n /**\n@@ -177,6 +213,11 @@ rte_qdma_start(void);\n int\n rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags);\n \n+/*create vq for route-by-port*/\n+int\n+rte_qdma_vq_create_rbp(uint32_t lcore_id, uint32_t flags,\n+\t\t\tstruct rte_qdma_rbp *rbp);\n+\n /**\n  * Enqueue multiple jobs to a Virtual Queue.\n  * If the enqueue is successful, the H/W will perform DMA operations\n@@ -275,6 +316,21 @@ rte_qdma_vq_stats(uint16_t vq_id,\n int\n rte_qdma_vq_destroy(uint16_t vq_id);\n \n+/**\n+ * Destroy the RBP specific Virtual Queue specified by vq_id.\n+ * This API can be called from any thread/core. User can create/destroy\n+ * VQ's at runtime.\n+ *\n+ * @param vq_id\n+ *   RBP based Virtual Queue ID which needs to be deinialized.\n+ *\n+ * @returns\n+ *   - 0: Success.\n+ *   - <0: Error code.\n+ */\n+\n+int __rte_experimental\n+rte_qdma_vq_destroy_rbp(uint16_t vq_id);\n /**\n  * Stop QDMA device.\n  */\n",
    "prefixes": [
        "v3",
        "5/7"
    ]
}