get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/29268/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 29268,
    "url": "http://patches.dpdk.org/api/patches/29268/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20170928113344.12248-25-shreyansh.jain@nxp.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20170928113344.12248-25-shreyansh.jain@nxp.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20170928113344.12248-25-shreyansh.jain@nxp.com",
    "date": "2017-09-28T11:33:28",
    "name": "[dpdk-dev,v5,24/40] net/dpaa: support Tx and Rx queue setup",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "636cac15823d98948cc740e9e6dac67d646cf15f",
    "submitter": {
        "id": 497,
        "url": "http://patches.dpdk.org/api/people/497/?format=api",
        "name": "Shreyansh Jain",
        "email": "shreyansh.jain@nxp.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20170928113344.12248-25-shreyansh.jain@nxp.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/29268/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/29268/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id C25551B21F;\n\tThu, 28 Sep 2017 13:24:26 +0200 (CEST)",
            "from NAM01-BY2-obe.outbound.protection.outlook.com\n\t(mail-by2nam01on0055.outbound.protection.outlook.com [104.47.34.55])\n\tby dpdk.org (Postfix) with ESMTP id 0D72D1B194\n\tfor <dev@dpdk.org>; Thu, 28 Sep 2017 13:23:50 +0200 (CEST)",
            "from DM5PR03CA0026.namprd03.prod.outlook.com (10.174.189.143) by\n\tMWHPR03MB2703.namprd03.prod.outlook.com (10.168.207.137) with\n\tMicrosoft SMTP Server (version=TLS1_2,\n\tcipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384_P256) id\n\t15.20.77.7; Thu, 28 Sep 2017 11:23:49 +0000",
            "from BN1AFFO11FD006.protection.gbl (2a01:111:f400:7c10::159) by\n\tDM5PR03CA0026.outlook.office365.com (2603:10b6:4:3b::15) with\n\tMicrosoft SMTP Server (version=TLS1_2,\n\tcipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384_P256) id\n\t15.20.56.8 via Frontend Transport; Thu, 28 Sep 2017 11:23:49 +0000",
            "from tx30smr01.am.freescale.net (192.88.168.50) by\n\tBN1AFFO11FD006.mail.protection.outlook.com (10.58.52.66) with\n\tMicrosoft SMTP Server (version=TLS1_0,\n\tcipher=TLS_RSA_WITH_AES_256_CBC_SHA) id 15.20.56.11\n\tvia Frontend Transport; Thu, 28 Sep 2017 11:23:48 +0000",
            "from Tophie.ap.freescale.net ([10.232.14.39])\n\tby tx30smr01.am.freescale.net (8.14.3/8.14.0) with ESMTP id\n\tv8SBMpG0016035; Thu, 28 Sep 2017 04:23:46 -0700"
        ],
        "Authentication-Results": "spf=fail (sender IP is 192.88.168.50)\n\tsmtp.mailfrom=nxp.com; nxp.com; dkim=none (message not signed)\n\theader.d=none;nxp.com; dmarc=fail action=none header.from=nxp.com;",
        "Received-SPF": "Fail (protection.outlook.com: domain of nxp.com does not\n\tdesignate 192.88.168.50 as permitted sender)\n\treceiver=protection.outlook.com; \n\tclient-ip=192.88.168.50; helo=tx30smr01.am.freescale.net;",
        "From": "Shreyansh Jain <shreyansh.jain@nxp.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<ferruh.yigit@intel.com>, <hemant.agrawal@nxp.com>",
        "Date": "Thu, 28 Sep 2017 17:03:28 +0530",
        "Message-ID": "<20170928113344.12248-25-shreyansh.jain@nxp.com>",
        "X-Mailer": "git-send-email 2.9.3",
        "In-Reply-To": "<20170928113344.12248-1-shreyansh.jain@nxp.com>",
        "References": "<20170909112132.13936-1-shreyansh.jain@nxp.com>\n\t<20170928113344.12248-1-shreyansh.jain@nxp.com>",
        "X-EOPAttributedMessage": "0",
        "X-Matching-Connectors": "131510714288036459;\n\t(91ab9b29-cfa4-454e-5278-08d120cd25b8); ()",
        "X-Forefront-Antispam-Report": "CIP:192.88.168.50; IPV:NLI; CTRY:US; EFV:NLI;\n\tSFV:NSPM;\n\tSFS:(10009020)(6009001)(7966004)(336005)(346002)(376002)(39860400002)(39380400002)(2980300002)(1110001)(1109001)(339900001)(199003)(189002)(51234002)(1076002)(498600001)(54906003)(50226002)(68736007)(2351001)(2906002)(106466001)(305945005)(36756003)(5003940100001)(33646002)(316002)(8656003)(5660300001)(85426001)(8936002)(16586007)(86362001)(48376002)(76176999)(50986999)(6666003)(104016004)(81166006)(50466002)(81156014)(77096006)(47776003)(53936002)(6916009)(2950100002)(105606002)(189998001)(97736004)(8676002)(356003)(4326008);\n\tDIR:OUT; SFP:1101; SCL:1; SRVR:MWHPR03MB2703;\n\tH:tx30smr01.am.freescale.net; \n\tFPR:; SPF:Fail; PTR:InfoDomainNonexistent; A:1; MX:1; LANG:en; ",
        "X-Microsoft-Exchange-Diagnostics": [
            "1; BN1AFFO11FD006;\n\t1:ZmEwclGdFaCjzsWqM8ciVEDP2aWirFU2DAVgB1b0t2iwhQi3Re2VhedtDJXO08JfwdfoPLXNLU98HD6jDw7TumSIPoIft2v98oWDq/XmUL7z29jITipNd8hJnIhUP4e3",
            "1; MWHPR03MB2703;\n\t3:FwKIe56bx8wBU9mJdnjCeVunHfn8GPNufa1pYcEpI+leX2zfCBZGL1M6wNqdaVMrT3+WaY2qiff/5lMAnOfO/fmIn0vnwMLEpsBF7A29sF8Kdbx8xRpQf4YyoY7ehA/JFODnzRz/w2JLVz7+i+aMhboarS9raEwlbV4Qu7CeSLQNRElEu7E+O505WyulvT86fXdhckljUv//2pN3lEJx/Qz2Ku+qYqDPLcUiyMvN/Xng+TkStVIoe1dcHOyRlkI36azKfmnQk74XCokWOuB3X6DB48XV7+NHzpPf5kRB4xYi1DbO1twTle2eBKmhTdTq5ZTOOPjXG0igl+qJ3LW9eHkl12e7lem0Dd0+VKthR4w=;\n\t25:UUNEkTjmPamjQGa5r8tiUnYJr/G31On2qET8Eccp4mGBYruUj/bHylzFKyJMNmTQ0uFNdIossBDjjDSIZhHFhynlGKgxn47UtF7ls1OicbvIIOdT9v+5iwhQoqU+0rZQdsCGR3M/w8MbkeI3370onEHAM5hRth7f+t8RRWjQ9CUqxf6rk0gIAkriBgwib/EKpJVh8YAGV3/KpUy22aCHihDUDCfbopT4fp5Md0deaV9qDq+klzMPssQS1KSihZSs8KTgISa0PAI2yUPHmUWAgtP/ZsHpPp9Lp/wlmSNCZnt7l3dHLfImuQgcPSJYgEqLt5JsKlPEpBdcmugL13WVGg==",
            "1; MWHPR03MB2703;\n\t31:VBMub/sVFlzj4Y6eTZ48JebbCWP8JsFzh3mslwzIJheubonEsl2oE24F/IUCrc6DqKDdeqf3Lr3Jw3cdn467zwSLF8Tas1bJbp/o80i83o2orHKhrVHPzn5JkkpZffOEcrTkA2EOBp7xHCY5hREzCz7yE4FI9mYxgccURqpzaPMe70QZED/JQZCZBKWXsVyS3K8XbG/thSbflmpaP5NSflo8Br8oesk7Np0Qhqlwe0A=;\n\t4:0VuxCMm9PETAbcQxPj+Ev+2AmbZ4Qwx8y04EtyuuCc9qCd1K5c/Aiit4uZ6KUXaWPyahhX9J/P7dC+99zUn7xenGmhDJHBMZkMKkYhw8/+Ahd5RFh6mkV1innUafV3xOC92w/LvdH1poMGsE1LNJNGilCXH6bXS4/gw5w28kHn+19XfDeeMW/QBtnB7cj9QQN0MaA9qD5I+LYs5gQfNcT8sFvQSu014TEY0rsvuE0IJCndCZn+qRMErBXVDe7F+P0/aWl1MHWhDNG/+8nSfMCXE63puOnaAq6ganSgY7+Se1JDG3eEHStGKSnPOo2Yas/y/G0uHj0cADCG8F8KC0mw==",
            "=?us-ascii?Q?1; MWHPR03MB2703;\n\t23:p1oyDh1OjN0EtqDhrcycw7H6VHNUC6sMwEep06sIb?=\n\t9s+t6EeG1rwyT62s/6NsPBu42X0jLxnEI7mWiEr2nLLJ/Wai6Fu7n+lAyqyDD+jd4fjmUUFs/oPPdunDGTwEL2NN7cW9VOK/OJLryYWRwn1+IXaW7eG/ViDMHrMNqbWt86zAKhcporBxiSC51GGPDFfMiFaFlofW/Io/bkQuk1ow9ILM7KSj00bSh0FUceiFprL4APnD3zq8AtH0H3om0GfYI9T/VzRrY/n/fOdxb6ZAUmu4YbOh0uZsA50D/75NBrTd+SyicJrf5bgiOPXsatLGzlLJH1IqCK0N56ReAx5GojInVJ/kNSSBA5+sqyLL7PZPiS1P3SQTvZDCdhDXzm5I7Qm6oYpGVKLSH1DEWhg0S43cwwXAH3sGG7EejXZalXeKdJEKMxlF1hBDazBh7XYfPPzuzrju/OtUz4Iu57jQr+n5b3Ed+YL3/OYtxf13sG1G8ppLS3wGe4zu927hwfB2aLW+E1XE6lg8CX4RIsrcJ2XPibAPyUhvpDErd8Fj4+gQ4cb1dj6ZoqnQ1buw3kj+fvPcg5oqfK57p4YArrTPmnywo8alXpWWvJuqvecNjLfJDy/Ao2KoWMzRBFjW5zPpQBXjrtDu14FkHkKzRF11SMZtJDERu67/BPa1sO5J4yw2zeA3OWZp9Q5bEympR4qZDwE7cl69oaIGgfkTOu6vb0OaDS98QeHiednTLaxEAWmFS2YMdU2N387AmmBzpuBedC5ZrHeWDqcKc1y5fNOK+PH7S6CBFnoCzgPS3Ay/QuNx5qoU/fBXBxTOXD7NbzvmM2sUufFiEhjMigaOD0agcNPfVYzP/2T0CMvUZT+FK/riAEkxdFvfVgFsquGGCTcByMRcysuqtifNj1cfB7spFhZ8tkmQFfIAFu66s4EUeFhuK5t89EPnjQDc335+LGaI6HrfGTL4jJx3i4KYg5KKE/ENls8YETHF2Vnb+2j0d8WsydbhqaS+EBearIYeu3WWChsLZjz8B6bOYJuqjobQArrIAzvJuEIdab+ShFpqWta96bXjyADysNIvVTgvZE70wfxAKMx5/oAjvHETHQwmW3R5JK+6GXw8/uxCNMdXYaqxDB5Ow/ZS1/dFcLQq7qwtvAITjxzZ+1N8ptImy4+DVUgV0S4cbpOcCHzjB1AdTtrmch+eaS56/71Aqc1dzhVEJAYJrJ3aEzqIGqdZgzgEg==",
            "1; MWHPR03MB2703;\n\t6:LjQW1GsAPIXLn3ZxSgqRnBgtT6HCOfu+d3JtqusCK+1d+5trYj9yJEuewYMJlWZrivFCfLhaoT9M2aX9N2qg+I968RsBKiEzAKDHxmPakam7nRfTWLxcqO1UDd8ZQjVSowcQ+UHiOmHeKUiQYi4QnDzv/q0szmMvYxZ3jXTfmeOEQQiyv1i2PamexvdcENRPe2yYTRHGyVfZRchp/M+qByEzd7BB0cKox1A1FzJ+oLlaUPZmLemsx+yfnJ0VRn1NL9a8K048enVqRz34M4IsY61iXuFxIGchFrUN7EnCcT7aHIta9SXzYOy0CXod4qhtbkXMx3Sd+w7oWhr5MkJt5g==;\n\t5:j7uXvJBYAJ/lwMTDJCSnFoNJnnH+FR5I9Hza6h+tJX77lkBegZS2x5evgbIZGjgxFOXJVSTo5/an+JKWxkTRcx1UlNvoZfJBtiX2VFmSxfYuN2X3mqjdamlxqgomcF6yw0onLifvKZ51bf0nHSJ8Ow==;\n\t24:myf8xolyc0wDN2Rte2rJ/j0iD1VhX3f2ThmokdkyTiRmQFZnOlmTAKnLe5msztly4W3nOVoPTC08iyy9M8h7m5ZN44voThwduF/zXj3LaVI=;\n\t7:kIBnioh0R5ZTR4h21lLmhP20K8toeHyqbVdXzwTZCS1IXYeZUV6CxuDnAWzAP2x/n7MPpvcAxnGOVaP0U+QlLl2VN9QWfflycXJrmMRzvQtTGQaKOUvT54pTj9cEHKj/tWc+ro9tYws1plkR3dZFtsR/SuGUgMcsXhPSbnIOratE2rxRqjYaVPTcUKtAEB8NWU47qeWNrz20DVnAbZCJhYflc3Qf8V8vypaClHGfABc="
        ],
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "137a5305-fae2-44ed-cf63-08d5066363af",
        "X-Microsoft-Antispam": "UriScan:; BCL:0; PCL:0;\n\tRULEID:(22001)(2017052603199)(201703131430075)(201703131517081);\n\tSRVR:MWHPR03MB2703; ",
        "X-MS-TrafficTypeDiagnostic": "MWHPR03MB2703:",
        "X-Exchange-Antispam-Report-Test": "UriScan:(185117386973197)(275809806118684); ",
        "X-Microsoft-Antispam-PRVS": "<MWHPR03MB27038E85EEB3B44F3DECE93890790@MWHPR03MB2703.namprd03.prod.outlook.com>",
        "X-Exchange-Antispam-Report-CFA-Test": "BCL:0; PCL:0;\n\tRULEID:(100000700101)(100105000095)(100000701101)(100105300095)(100000702101)(100105100095)(6095135)(2401047)(8121501046)(5005006)(100000703101)(100105400095)(10201501046)(3002001)(93006095)(93001095)(6055026)(6096035)(20161123561025)(20161123563025)(20161123559100)(20161123565025)(201703131430075)(201703131441075)(201703131448075)(201703131433075)(201703161259150)(20161123556025)(201708071742011)(100000704101)(100105200095)(100000705101)(100105500095);\n\tSRVR:MWHPR03MB2703; BCL:0; PCL:0;\n\tRULEID:(100000800101)(100110000095)(100000801101)(100110300095)(100000802101)(100110100095)(100000803101)(100110400095)(400006)(100000804101)(100110200095)(100000805101)(100110500095);\n\tSRVR:MWHPR03MB2703; ",
        "X-Forefront-PRVS": "0444EB1997",
        "SpamDiagnosticOutput": "1:99",
        "SpamDiagnosticMetadata": "NSPM",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "28 Sep 2017 11:23:48.5072\n\t(UTC)",
        "X-MS-Exchange-CrossTenant-Id": "5afe0b00-7697-4969-b663-5eab37d5f47e",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "TenantId=5afe0b00-7697-4969-b663-5eab37d5f47e;\n\tIp=[192.88.168.50]; \n\tHelo=[tx30smr01.am.freescale.net]",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "MWHPR03MB2703",
        "Subject": "[dpdk-dev] [PATCH v5 24/40] net/dpaa: support Tx and Rx queue setup",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>\nSigned-off-by: Shreyansh Jain <shreyansh.jain@nxp.com>\n---\n drivers/net/dpaa/Makefile      |   4 +\n drivers/net/dpaa/dpaa_ethdev.c | 296 ++++++++++++++++++++++++++++++++-\n drivers/net/dpaa/dpaa_rxtx.c   | 370 +++++++++++++++++++++++++++++++++++++++++\n drivers/net/dpaa/dpaa_rxtx.h   |  61 +++++++\n mk/rte.app.mk                  |   1 +\n 5 files changed, 729 insertions(+), 3 deletions(-)\n create mode 100644 drivers/net/dpaa/dpaa_rxtx.c\n create mode 100644 drivers/net/dpaa/dpaa_rxtx.h",
    "diff": "diff --git a/drivers/net/dpaa/Makefile b/drivers/net/dpaa/Makefile\nindex bb305ca..c77384c 100644\n--- a/drivers/net/dpaa/Makefile\n+++ b/drivers/net/dpaa/Makefile\n@@ -38,10 +38,12 @@ LIB = librte_pmd_dpaa.a\n \n CFLAGS := -I$(SRCDIR) $(CFLAGS)\n CFLAGS += -O3 $(WERROR_FLAGS)\n+CFLAGS += -Wno-pointer-arith\n CFLAGS += -I$(RTE_SDK_DPAA)/\n CFLAGS += -I$(RTE_SDK_DPAA)/include\n CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa\n CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include/\n+CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa\n CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include\n CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal/include\n \n@@ -51,7 +53,9 @@ LIBABIVER := 1\n \n # Interfaces with DPDK\n SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_ethdev.c\n+SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_rxtx.c\n \n LDLIBS += -lrte_bus_dpaa\n+LDLIBS += -lrte_mempool_dpaa\n \n include $(RTE_SDK)/mk/rte.lib.mk\ndiff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c\nindex 4543dfc..2db7d99 100644\n--- a/drivers/net/dpaa/dpaa_ethdev.c\n+++ b/drivers/net/dpaa/dpaa_ethdev.c\n@@ -62,8 +62,15 @@\n \n #include <rte_dpaa_bus.h>\n #include <rte_dpaa_logs.h>\n+#include <dpaa_mempool.h>\n \n #include <dpaa_ethdev.h>\n+#include <dpaa_rxtx.h>\n+\n+#include <fsl_usd.h>\n+#include <fsl_qman.h>\n+#include <fsl_bman.h>\n+#include <fsl_fman.h>\n \n /* Keep track of whether QMAN and BMAN have been globally initialized */\n static int is_global_init;\n@@ -78,20 +85,104 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)\n \n static int dpaa_eth_dev_start(struct rte_eth_dev *dev)\n {\n+\tstruct dpaa_if *dpaa_intf = dev->data->dev_private;\n+\n \tPMD_INIT_FUNC_TRACE();\n \n \t/* Change tx callback to the real one */\n-\tdev->tx_pkt_burst = NULL;\n+\tdev->tx_pkt_burst = dpaa_eth_queue_tx;\n+\tfman_if_enable_rx(dpaa_intf->fif);\n \n \treturn 0;\n }\n \n static void dpaa_eth_dev_stop(struct rte_eth_dev *dev)\n {\n-\tdev->tx_pkt_burst = NULL;\n+\tstruct dpaa_if *dpaa_intf = dev->data->dev_private;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tfman_if_disable_rx(dpaa_intf->fif);\n+\tdev->tx_pkt_burst = dpaa_eth_tx_drop_all;\n }\n \n-static void dpaa_eth_dev_close(struct rte_eth_dev *dev __rte_unused)\n+static void dpaa_eth_dev_close(struct rte_eth_dev *dev)\n+{\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tdpaa_eth_dev_stop(dev);\n+}\n+\n+static\n+int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\t    uint16_t nb_desc __rte_unused,\n+\t\t\t    unsigned int socket_id __rte_unused,\n+\t\t\t    const struct rte_eth_rxconf *rx_conf __rte_unused,\n+\t\t\t    struct rte_mempool *mp)\n+{\n+\tstruct dpaa_if *dpaa_intf = dev->data->dev_private;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tDPAA_PMD_INFO(\"Rx queue setup for queue index: %d\", queue_idx);\n+\n+\tif (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {\n+\t\tstruct fman_if_ic_params icp;\n+\t\tuint32_t fd_offset;\n+\t\tuint32_t bp_size;\n+\n+\t\tif (!mp->pool_data) {\n+\t\t\tDPAA_PMD_ERR(\"Not an offloaded buffer pool!\");\n+\t\t\treturn -1;\n+\t\t}\n+\t\tdpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);\n+\n+\t\tmemset(&icp, 0, sizeof(icp));\n+\t\t/* set ICEOF for to the default value , which is 0*/\n+\t\ticp.iciof = DEFAULT_ICIOF;\n+\t\ticp.iceof = DEFAULT_RX_ICEOF;\n+\t\ticp.icsz = DEFAULT_ICSZ;\n+\t\tfman_if_set_ic_params(dpaa_intf->fif, &icp);\n+\n+\t\tfd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;\n+\t\tfman_if_set_fdoff(dpaa_intf->fif, fd_offset);\n+\n+\t\t/* Buffer pool size should be equal to Dataroom Size*/\n+\t\tbp_size = rte_pktmbuf_data_room_size(mp);\n+\t\tfman_if_set_bp(dpaa_intf->fif, mp->size,\n+\t\t\t       dpaa_intf->bp_info->bpid, bp_size);\n+\t\tdpaa_intf->valid = 1;\n+\t\tDPAA_PMD_INFO(\"if =%s - fd_offset = %d offset = %d\",\n+\t\t\t    dpaa_intf->name, fd_offset,\n+\t\t\tfman_if_get_fdoff(dpaa_intf->fif));\n+\t}\n+\tdev->data->rx_queues[queue_idx] = &dpaa_intf->rx_queues[queue_idx];\n+\n+\treturn 0;\n+}\n+\n+static\n+void dpaa_eth_rx_queue_release(void *rxq __rte_unused)\n+{\n+\tPMD_INIT_FUNC_TRACE();\n+}\n+\n+static\n+int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\t    uint16_t nb_desc __rte_unused,\n+\t\tunsigned int socket_id __rte_unused,\n+\t\tconst struct rte_eth_txconf *tx_conf __rte_unused)\n+{\n+\tstruct dpaa_if *dpaa_intf = dev->data->dev_private;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tDPAA_PMD_INFO(\"Tx queue setup for queue index: %d\", queue_idx);\n+\tdev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx];\n+\treturn 0;\n+}\n+\n+static void dpaa_eth_tx_queue_release(void *txq __rte_unused)\n {\n \tPMD_INIT_FUNC_TRACE();\n }\n@@ -101,15 +192,102 @@ static struct eth_dev_ops dpaa_devops = {\n \t.dev_start\t\t  = dpaa_eth_dev_start,\n \t.dev_stop\t\t  = dpaa_eth_dev_stop,\n \t.dev_close\t\t  = dpaa_eth_dev_close,\n+\n+\t.rx_queue_setup\t\t  = dpaa_eth_rx_queue_setup,\n+\t.tx_queue_setup\t\t  = dpaa_eth_tx_queue_setup,\n+\t.rx_queue_release\t  = dpaa_eth_rx_queue_release,\n+\t.tx_queue_release\t  = dpaa_eth_tx_queue_release,\n };\n \n+/* Initialise an Rx FQ */\n+static int dpaa_rx_queue_init(struct qman_fq *fq,\n+\t\t\t      uint32_t fqid)\n+{\n+\tstruct qm_mcc_initfq opts;\n+\tint ret;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tret = qman_reserve_fqid(fqid);\n+\tif (ret) {\n+\t\tDPAA_PMD_ERR(\"reserve rx fqid %d failed with ret: %d\",\n+\t\t\t     fqid, ret);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tDPAA_PMD_DEBUG(\"creating rx fq %p, fqid %d\", fq, fqid);\n+\tret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);\n+\tif (ret) {\n+\t\tDPAA_PMD_ERR(\"create rx fqid %d failed with ret: %d\",\n+\t\t\tfqid, ret);\n+\t\treturn ret;\n+\t}\n+\n+\topts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |\n+\t\t       QM_INITFQ_WE_CONTEXTA;\n+\n+\topts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;\n+\topts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |\n+\t\t\t   QM_FQCTRL_PREFERINCACHE;\n+\topts.fqd.context_a.stashing.exclusive = 0;\n+\topts.fqd.context_a.stashing.annotation_cl = DPAA_IF_RX_ANNOTATION_STASH;\n+\topts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;\n+\topts.fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;\n+\n+\t/*Enable tail drop */\n+\topts.we_mask = opts.we_mask | QM_INITFQ_WE_TDTHRESH;\n+\topts.fqd.fq_ctrl = opts.fqd.fq_ctrl | QM_FQCTRL_TDE;\n+\tqm_fqd_taildrop_set(&opts.fqd.td, CONG_THRESHOLD_RX_Q, 1);\n+\n+\tret = qman_init_fq(fq, 0, &opts);\n+\tif (ret)\n+\t\tDPAA_PMD_ERR(\"init rx fqid %d failed with ret: %d\", fqid, ret);\n+\treturn ret;\n+}\n+\n+/* Initialise a Tx FQ */\n+static int dpaa_tx_queue_init(struct qman_fq *fq,\n+\t\t\t      struct fman_if *fman_intf)\n+{\n+\tstruct qm_mcc_initfq opts;\n+\tint ret;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |\n+\t\t\t     QMAN_FQ_FLAG_TO_DCPORTAL, fq);\n+\tif (ret) {\n+\t\tDPAA_PMD_ERR(\"create tx fq failed with ret: %d\", ret);\n+\t\treturn ret;\n+\t}\n+\topts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |\n+\t\t       QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;\n+\topts.fqd.dest.channel = fman_intf->tx_channel_id;\n+\topts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;\n+\topts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;\n+\topts.fqd.context_b = 0;\n+\t/* no tx-confirmation */\n+\topts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;\n+\topts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;\n+\tDPAA_PMD_DEBUG(\"init tx fq %p, fqid %d\", fq, fq->fqid);\n+\tret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);\n+\tif (ret)\n+\t\tDPAA_PMD_ERR(\"init tx fqid %d failed %d\", fq->fqid, ret);\n+\treturn ret;\n+}\n+\n /* Initialise a network interface */\n static int\n dpaa_dev_init(struct rte_eth_dev *eth_dev)\n {\n+\tint num_cores, num_rx_fqs, fqid;\n+\tint loop, ret = 0;\n \tint dev_id;\n \tstruct rte_dpaa_device *dpaa_device;\n \tstruct dpaa_if *dpaa_intf;\n+\tstruct fm_eth_port_cfg *cfg;\n+\tstruct fman_if *fman_intf;\n+\tstruct fman_if_bpool *bp, *tmp_bp;\n \n \tPMD_INIT_FUNC_TRACE();\n \n@@ -120,12 +298,110 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)\n \tdpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);\n \tdev_id = dpaa_device->id.dev_id;\n \tdpaa_intf = eth_dev->data->dev_private;\n+\tcfg = &dpaa_netcfg->port_cfg[dev_id];\n+\tfman_intf = cfg->fman_if;\n \n \tdpaa_intf->name = dpaa_device->name;\n \n+\t/* save fman_if & cfg in the interface struture */\n+\tdpaa_intf->fif = fman_intf;\n \tdpaa_intf->ifid = dev_id;\n+\tdpaa_intf->cfg = cfg;\n+\n+\t/* Initialize Rx FQ's */\n+\tif (getenv(\"DPAA_NUM_RX_QUEUES\"))\n+\t\tnum_rx_fqs = atoi(getenv(\"DPAA_NUM_RX_QUEUES\"));\n+\telse\n+\t\tnum_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;\n \n+\t/* Each device can not have more than DPAA_PCD_FQID_MULTIPLIER RX\n+\t * queues.\n+\t */\n+\tif (num_rx_fqs <= 0 || num_rx_fqs > DPAA_PCD_FQID_MULTIPLIER) {\n+\t\tDPAA_PMD_ERR(\"Invalid number of RX queues\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tdpaa_intf->rx_queues = rte_zmalloc(NULL,\n+\t\tsizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);\n+\tfor (loop = 0; loop < num_rx_fqs; loop++) {\n+\t\tfqid = DPAA_PCD_FQID_START + dpaa_intf->ifid *\n+\t\t\tDPAA_PCD_FQID_MULTIPLIER + loop;\n+\t\tret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop], fqid);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t\tdpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;\n+\t}\n+\tdpaa_intf->nb_rx_queues = num_rx_fqs;\n+\n+\t/* Initialise Tx FQs. Have as many Tx FQ's as number of cores */\n+\tnum_cores = rte_lcore_count();\n+\tdpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *\n+\t\tnum_cores, MAX_CACHELINE);\n+\tif (!dpaa_intf->tx_queues)\n+\t\treturn -ENOMEM;\n+\n+\tfor (loop = 0; loop < num_cores; loop++) {\n+\t\tret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],\n+\t\t\t\t\t fman_intf);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t\tdpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;\n+\t}\n+\tdpaa_intf->nb_tx_queues = num_cores;\n+\n+\tDPAA_PMD_DEBUG(\"All frame queues created\");\n+\n+\t/* reset bpool list, initialize bpool dynamically */\n+\tlist_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {\n+\t\tlist_del(&bp->node);\n+\t\trte_free(bp);\n+\t}\n+\n+\t/* Populate ethdev structure */\n \teth_dev->dev_ops = &dpaa_devops;\n+\teth_dev->rx_pkt_burst = dpaa_eth_queue_rx;\n+\teth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;\n+\n+\t/* Allocate memory for storing MAC addresses */\n+\teth_dev->data->mac_addrs = rte_zmalloc(\"mac_addr\",\n+\t\tETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);\n+\tif (eth_dev->data->mac_addrs == NULL) {\n+\t\tDPAA_PMD_ERR(\"Failed to allocate %d bytes needed to \"\n+\t\t\t\t\t\t\"store MAC addresses\",\n+\t\t\t\tETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);\n+\t\trte_free(dpaa_intf->rx_queues);\n+\t\trte_free(dpaa_intf->tx_queues);\n+\t\tdpaa_intf->rx_queues = NULL;\n+\t\tdpaa_intf->tx_queues = NULL;\n+\t\tdpaa_intf->nb_rx_queues = 0;\n+\t\tdpaa_intf->nb_tx_queues = 0;\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* copy the primary mac address */\n+\tmemcpy(eth_dev->data->mac_addrs[0].addr_bytes,\n+\t\tfman_intf->mac_addr.addr_bytes,\n+\t\tETHER_ADDR_LEN);\n+\n+\tRTE_LOG(INFO, PMD, \"net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\\n\",\n+\t\tdpaa_device->name,\n+\t\tfman_intf->mac_addr.addr_bytes[0],\n+\t\tfman_intf->mac_addr.addr_bytes[1],\n+\t\tfman_intf->mac_addr.addr_bytes[2],\n+\t\tfman_intf->mac_addr.addr_bytes[3],\n+\t\tfman_intf->mac_addr.addr_bytes[4],\n+\t\tfman_intf->mac_addr.addr_bytes[5]);\n+\n+\t/* Disable RX mode */\n+\tfman_if_discard_rx_errors(fman_intf);\n+\tfman_if_disable_rx(fman_intf);\n+\t/* Disable promiscuous mode */\n+\tfman_if_promiscuous_disable(fman_intf);\n+\t/* Disable multicast */\n+\tfman_if_reset_mcast_filter_table(fman_intf);\n+\t/* Reset interface statistics */\n+\tfman_if_stats_reset(fman_intf);\n \n \treturn 0;\n }\n@@ -147,6 +423,20 @@ dpaa_dev_uninit(struct rte_eth_dev *dev)\n \n \tdpaa_eth_dev_close(dev);\n \n+\t/* release configuration memory */\n+\tif (dpaa_intf->fc_conf)\n+\t\trte_free(dpaa_intf->fc_conf);\n+\n+\trte_free(dpaa_intf->rx_queues);\n+\tdpaa_intf->rx_queues = NULL;\n+\n+\trte_free(dpaa_intf->tx_queues);\n+\tdpaa_intf->tx_queues = NULL;\n+\n+\t/* free memory for storing MAC addresses */\n+\trte_free(dev->data->mac_addrs);\n+\tdev->data->mac_addrs = NULL;\n+\n \tdev->dev_ops = NULL;\n \tdev->rx_pkt_burst = NULL;\n \tdev->tx_pkt_burst = NULL;\ndiff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c\nnew file mode 100644\nindex 0000000..c4e67f5\n--- /dev/null\n+++ b/drivers/net/dpaa/dpaa_rxtx.c\n@@ -0,0 +1,370 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.\n+ *   Copyright 2017 NXP.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of  Freescale Semiconductor, Inc nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+/* System headers */\n+#include <stdio.h>\n+#include <inttypes.h>\n+#include <unistd.h>\n+#include <stdio.h>\n+#include <limits.h>\n+#include <sched.h>\n+#include <pthread.h>\n+\n+#include <rte_config.h>\n+#include <rte_byteorder.h>\n+#include <rte_common.h>\n+#include <rte_interrupts.h>\n+#include <rte_log.h>\n+#include <rte_debug.h>\n+#include <rte_pci.h>\n+#include <rte_atomic.h>\n+#include <rte_branch_prediction.h>\n+#include <rte_memory.h>\n+#include <rte_memzone.h>\n+#include <rte_tailq.h>\n+#include <rte_eal.h>\n+#include <rte_alarm.h>\n+#include <rte_ether.h>\n+#include <rte_ethdev.h>\n+#include <rte_atomic.h>\n+#include <rte_malloc.h>\n+#include <rte_ring.h>\n+#include <rte_ip.h>\n+#include <rte_tcp.h>\n+#include <rte_udp.h>\n+\n+#include \"dpaa_ethdev.h\"\n+#include \"dpaa_rxtx.h\"\n+#include <rte_dpaa_bus.h>\n+#include <dpaa_mempool.h>\n+\n+#include <fsl_usd.h>\n+#include <fsl_qman.h>\n+#include <fsl_bman.h>\n+#include <of.h>\n+#include <netcfg.h>\n+\n+#define DPAA_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) \\\n+\tdo { \\\n+\t\t(_fd)->cmd = 0; \\\n+\t\t(_fd)->opaque_addr = 0; \\\n+\t\t(_fd)->opaque = QM_FD_CONTIG << DPAA_FD_FORMAT_SHIFT; \\\n+\t\t(_fd)->opaque |= ((_mbuf)->data_off) << DPAA_FD_OFFSET_SHIFT; \\\n+\t\t(_fd)->opaque |= (_mbuf)->pkt_len; \\\n+\t\t(_fd)->addr = (_mbuf)->buf_physaddr; \\\n+\t\t(_fd)->bpid = _bpid; \\\n+\t} while (0)\n+\n+static inline struct rte_mbuf *dpaa_eth_fd_to_mbuf(struct qm_fd *fd,\n+\t\t\t\t\t\t\tuint32_t ifid)\n+{\n+\tstruct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);\n+\tstruct rte_mbuf *mbuf;\n+\tvoid *ptr;\n+\tuint16_t offset =\n+\t\t(fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT;\n+\tuint32_t length = fd->opaque & DPAA_FD_LENGTH_MASK;\n+\n+\tDPAA_DP_LOG(DEBUG, \" FD--->MBUF\");\n+\n+\t/* Ignoring case when format != qm_fd_contig */\n+\tptr = rte_dpaa_mem_ptov(fd->addr);\n+\t/* Ignoring case when ptr would be NULL. That is only possible incase\n+\t * of a corrupted packet\n+\t */\n+\n+\tmbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);\n+\t/* Prefetch the Parse results and packet data to L1 */\n+\trte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));\n+\trte_prefetch0((void *)((uint8_t *)ptr + offset));\n+\n+\tmbuf->data_off = offset;\n+\tmbuf->data_len = length;\n+\tmbuf->pkt_len = length;\n+\n+\tmbuf->port = ifid;\n+\tmbuf->nb_segs = 1;\n+\tmbuf->ol_flags = 0;\n+\tmbuf->next = NULL;\n+\trte_mbuf_refcnt_set(mbuf, 1);\n+\n+\treturn mbuf;\n+}\n+\n+uint16_t dpaa_eth_queue_rx(void *q,\n+\t\t\t   struct rte_mbuf **bufs,\n+\t\t\t   uint16_t nb_bufs)\n+{\n+\tstruct qman_fq *fq = q;\n+\tstruct qm_dqrr_entry *dq;\n+\tuint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;\n+\tint ret;\n+\n+\tret = rte_dpaa_portal_init((void *)0);\n+\tif (ret) {\n+\t\tDPAA_PMD_ERR(\"Failure in affining portal\");\n+\t\treturn 0;\n+\t}\n+\n+\tret = qman_set_vdq(fq, (nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?\n+\t\t\t\tDPAA_MAX_DEQUEUE_NUM_FRAMES : nb_bufs);\n+\tif (ret)\n+\t\treturn 0;\n+\n+\tdo {\n+\t\tdq = qman_dequeue(fq);\n+\t\tif (!dq)\n+\t\t\tcontinue;\n+\t\tbufs[num_rx++] = dpaa_eth_fd_to_mbuf(&dq->fd, ifid);\n+\t\tqman_dqrr_consume(fq, dq);\n+\t} while (fq->flags & QMAN_FQ_STATE_VDQCR);\n+\n+\treturn num_rx;\n+}\n+\n+static void *dpaa_get_pktbuf(struct dpaa_bp_info *bp_info)\n+{\n+\tint ret;\n+\tuint64_t buf = 0;\n+\tstruct bm_buffer bufs;\n+\n+\tret = bman_acquire(bp_info->bp, &bufs, 1, 0);\n+\tif (ret <= 0) {\n+\t\tDPAA_PMD_WARN(\"Failed to allocate buffers %d\", ret);\n+\t\treturn (void *)buf;\n+\t}\n+\n+\tDPAA_DP_LOG(DEBUG, \"got buffer 0x%lx from pool %d\",\n+\t\t    (uint64_t)bufs.addr, bufs.bpid);\n+\n+\tbuf = (uint64_t)rte_dpaa_mem_ptov(bufs.addr) - bp_info->meta_data_size;\n+\tif (!buf)\n+\t\tgoto out;\n+\n+out:\n+\treturn (void *)buf;\n+}\n+\n+static struct rte_mbuf *dpaa_get_dmable_mbuf(struct rte_mbuf *mbuf,\n+\t\t\t\t\t     struct dpaa_if *dpaa_intf)\n+{\n+\tstruct rte_mbuf *dpaa_mbuf;\n+\n+\t/* allocate pktbuffer on bpid for dpaa port */\n+\tdpaa_mbuf = dpaa_get_pktbuf(dpaa_intf->bp_info);\n+\tif (!dpaa_mbuf)\n+\t\treturn NULL;\n+\n+\tmemcpy((uint8_t *)(dpaa_mbuf->buf_addr) + mbuf->data_off, (void *)\n+\t\t((uint8_t *)(mbuf->buf_addr) + mbuf->data_off), mbuf->pkt_len);\n+\n+\t/* Copy only the required fields */\n+\tdpaa_mbuf->data_off = mbuf->data_off;\n+\tdpaa_mbuf->pkt_len = mbuf->pkt_len;\n+\tdpaa_mbuf->ol_flags = mbuf->ol_flags;\n+\tdpaa_mbuf->packet_type = mbuf->packet_type;\n+\tdpaa_mbuf->tx_offload = mbuf->tx_offload;\n+\trte_pktmbuf_free(mbuf);\n+\treturn dpaa_mbuf;\n+}\n+\n+/* Handle mbufs which are not segmented (non SG) */\n+static inline void\n+tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf,\n+\t\t\t    struct dpaa_bp_info *bp_info,\n+\t\t\t    struct qm_fd *fd_arr)\n+{\n+\tstruct rte_mbuf *mi = NULL;\n+\n+\tif (RTE_MBUF_DIRECT(mbuf)) {\n+\t\tif (rte_mbuf_refcnt_read(mbuf) > 1) {\n+\t\t\t/* In case of direct mbuf and mbuf being cloned,\n+\t\t\t * BMAN should _not_ release buffer.\n+\t\t\t */\n+\t\t\tDPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff);\n+\t\t\t/* Buffer should be releasd by EAL */\n+\t\t\trte_mbuf_refcnt_update(mbuf, -1);\n+\t\t} else {\n+\t\t\t/* In case of direct mbuf and no cloning, mbuf can be\n+\t\t\t * released by BMAN.\n+\t\t\t */\n+\t\t\tDPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid);\n+\t\t}\n+\t} else {\n+\t\t/* This is data-containing core mbuf: 'mi' */\n+\t\tmi = rte_mbuf_from_indirect(mbuf);\n+\t\tif (rte_mbuf_refcnt_read(mi) > 1) {\n+\t\t\t/* In case of indirect mbuf, and mbuf being cloned,\n+\t\t\t * BMAN should _not_ release it and let EAL release\n+\t\t\t * it through pktmbuf_free below.\n+\t\t\t */\n+\t\t\tDPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff);\n+\t\t} else {\n+\t\t\t/* In case of indirect mbuf, and no cloning, core mbuf\n+\t\t\t * should be released by BMAN.\n+\t\t\t * Increate refcnt of core mbuf so that when\n+\t\t\t * pktmbuf_free is called and mbuf is released, EAL\n+\t\t\t * doesn't try to release core mbuf which would have\n+\t\t\t * been released by BMAN.\n+\t\t\t */\n+\t\t\trte_mbuf_refcnt_update(mi, 1);\n+\t\t\tDPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid);\n+\t\t}\n+\t\trte_pktmbuf_free(mbuf);\n+\t}\n+}\n+\n+/* Handle all mbufs on dpaa BMAN managed pool */\n+static inline uint16_t\n+tx_on_dpaa_pool(struct rte_mbuf *mbuf,\n+\t\tstruct dpaa_bp_info *bp_info,\n+\t\tstruct qm_fd *fd_arr)\n+{\n+\tDPAA_DP_LOG(DEBUG, \"BMAN offloaded buffer, mbuf: %p\", mbuf);\n+\n+\tif (mbuf->nb_segs == 1) {\n+\t\t/* Case for non-segmented buffers */\n+\t\ttx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr);\n+\t} else {\n+\t\tDPAA_PMD_DEBUG(\"Number of Segments not supported\");\n+\t\treturn 1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/* Handle all mbufs on an external pool (non-dpaa) */\n+static inline uint16_t\n+tx_on_external_pool(struct qman_fq *txq, struct rte_mbuf *mbuf,\n+\t\t    struct qm_fd *fd_arr)\n+{\n+\tstruct dpaa_if *dpaa_intf = txq->dpaa_intf;\n+\tstruct rte_mbuf *dmable_mbuf;\n+\n+\tDPAA_DP_LOG(DEBUG, \"Non-BMAN offloaded buffer.\"\n+\t\t    \"Allocating an offloaded buffer\");\n+\tdmable_mbuf = dpaa_get_dmable_mbuf(mbuf, dpaa_intf);\n+\tif (!dmable_mbuf) {\n+\t\tDPAA_DP_LOG(DEBUG, \"no dpaa buffers.\");\n+\t\treturn 1;\n+\t}\n+\n+\tDPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, dpaa_intf->bp_info->bpid);\n+\n+\treturn 0;\n+}\n+\n+uint16_t\n+dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)\n+{\n+\tstruct rte_mbuf *mbuf, *mi = NULL;\n+\tstruct rte_mempool *mp;\n+\tstruct dpaa_bp_info *bp_info;\n+\tstruct qm_fd fd_arr[MAX_TX_RING_SLOTS];\n+\tuint32_t frames_to_send, loop, i = 0;\n+\tuint16_t state;\n+\tint ret;\n+\n+\tret = rte_dpaa_portal_init((void *)0);\n+\tif (ret) {\n+\t\tDPAA_PMD_ERR(\"Failure in affining portal\");\n+\t\treturn 0;\n+\t}\n+\n+\tDPAA_DP_LOG(DEBUG, \"Transmitting %d buffers on queue: %p\", nb_bufs, q);\n+\n+\twhile (nb_bufs) {\n+\t\tframes_to_send = (nb_bufs >> 3) ? MAX_TX_RING_SLOTS : nb_bufs;\n+\t\tfor (loop = 0; loop < frames_to_send; loop++, i++) {\n+\t\t\tmbuf = bufs[i];\n+\t\t\tif (RTE_MBUF_DIRECT(mbuf)) {\n+\t\t\t\tmp = mbuf->pool;\n+\t\t\t} else {\n+\t\t\t\tmi = rte_mbuf_from_indirect(mbuf);\n+\t\t\t\tmp = mi->pool;\n+\t\t\t}\n+\n+\t\t\tbp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);\n+\t\t\tif (likely(mp->ops_index == bp_info->dpaa_ops_index)) {\n+\t\t\t\tstate = tx_on_dpaa_pool(mbuf, bp_info,\n+\t\t\t\t\t\t\t&fd_arr[loop]);\n+\t\t\t\tif (unlikely(state)) {\n+\t\t\t\t\t/* Set frames_to_send & nb_bufs so\n+\t\t\t\t\t * that packets are transmitted till\n+\t\t\t\t\t * previous frame.\n+\t\t\t\t\t */\n+\t\t\t\t\tframes_to_send = loop;\n+\t\t\t\t\tnb_bufs = loop;\n+\t\t\t\t\tgoto send_pkts;\n+\t\t\t\t}\n+\t\t\t} else {\n+\t\t\t\tstate = tx_on_external_pool(q, mbuf,\n+\t\t\t\t\t\t\t    &fd_arr[loop]);\n+\t\t\t\tif (unlikely(state)) {\n+\t\t\t\t\t/* Set frames_to_send & nb_bufs so\n+\t\t\t\t\t * that packets are transmitted till\n+\t\t\t\t\t * previous frame.\n+\t\t\t\t\t */\n+\t\t\t\t\tframes_to_send = loop;\n+\t\t\t\t\tnb_bufs = loop;\n+\t\t\t\t\tgoto send_pkts;\n+\t\t\t\t}\n+\t\t\t}\n+\t\t}\n+\n+send_pkts:\n+\t\tloop = 0;\n+\t\twhile (loop < frames_to_send) {\n+\t\t\tloop += qman_enqueue_multi(q, &fd_arr[loop],\n+\t\t\t\t\tframes_to_send - loop);\n+\t\t}\n+\t\tnb_bufs -= frames_to_send;\n+\t}\n+\n+\tDPAA_DP_LOG(DEBUG, \"Transmitted %d buffers on queue: %p\", i, q);\n+\n+\treturn i;\n+}\n+\n+uint16_t dpaa_eth_tx_drop_all(void *q  __rte_unused,\n+\t\t\t      struct rte_mbuf **bufs __rte_unused,\n+\t\tuint16_t nb_bufs __rte_unused)\n+{\n+\tDPAA_DP_LOG(DEBUG, \"Drop all packets\");\n+\n+\t/* Drop all incoming packets. No need to free packets here\n+\t * because the rte_eth f/w frees up the packets through tx_buffer\n+\t * callback in case this functions returns count less than nb_bufs\n+\t */\n+\treturn 0;\n+}\ndiff --git a/drivers/net/dpaa/dpaa_rxtx.h b/drivers/net/dpaa/dpaa_rxtx.h\nnew file mode 100644\nindex 0000000..45bfae8\n--- /dev/null\n+++ b/drivers/net/dpaa/dpaa_rxtx.h\n@@ -0,0 +1,61 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.\n+ *   Copyright 2017 NXP.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of  Freescale Semiconductor, Inc nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#ifndef __DPDK_RXTX_H__\n+#define __DPDK_RXTX_H__\n+\n+/* internal offset from where IC is copied to packet buffer*/\n+#define DEFAULT_ICIOF          32\n+/* IC transfer size */\n+#define DEFAULT_ICSZ\t48\n+\n+/* IC offsets from buffer header address */\n+#define DEFAULT_RX_ICEOF\t16\n+\n+#define DPAA_MAX_DEQUEUE_NUM_FRAMES    63\n+\t/** <Maximum number of frames to be dequeued in a single rx call*/\n+/* FD structure masks and offset */\n+#define DPAA_FD_FORMAT_MASK 0xE0000000\n+#define DPAA_FD_OFFSET_MASK 0x1FF00000\n+#define DPAA_FD_LENGTH_MASK 0xFFFFF\n+#define DPAA_FD_FORMAT_SHIFT 29\n+#define DPAA_FD_OFFSET_SHIFT 20\n+\n+uint16_t dpaa_eth_queue_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs);\n+\n+uint16_t dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs);\n+\n+uint16_t dpaa_eth_tx_drop_all(void *q  __rte_unused,\n+\t\t\t      struct rte_mbuf **bufs __rte_unused,\n+\t\t\t      uint16_t nb_bufs __rte_unused);\n+#endif\ndiff --git a/mk/rte.app.mk b/mk/rte.app.mk\nindex 9c5a171..7440848 100644\n--- a/mk/rte.app.mk\n+++ b/mk/rte.app.mk\n@@ -185,6 +185,7 @@ endif # CONFIG_RTE_LIBRTE_DPAA2_PMD\n \n ifeq ($(CONFIG_RTE_LIBRTE_DPAA_PMD),y)\n _LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA_PMD)       += -lrte_bus_dpaa\n+_LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA_PMD)       += -lrte_mempool_dpaa\n endif\n \n endif # !CONFIG_RTE_BUILD_SHARED_LIBS\n",
    "prefixes": [
        "dpdk-dev",
        "v5",
        "24/40"
    ]
}