get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/27781/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 27781,
    "url": "http://patches.dpdk.org/api/patches/27781/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20170823141213.25476-11-shreyansh.jain@nxp.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20170823141213.25476-11-shreyansh.jain@nxp.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20170823141213.25476-11-shreyansh.jain@nxp.com",
    "date": "2017-08-23T14:11:43",
    "name": "[dpdk-dev,v3,10/40] bus/dpaa: add QMAN interface driver",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "e7f1dd5016f5c158435429d997a04af3a0a559d6",
    "submitter": {
        "id": 497,
        "url": "http://patches.dpdk.org/api/people/497/?format=api",
        "name": "Shreyansh Jain",
        "email": "shreyansh.jain@nxp.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20170823141213.25476-11-shreyansh.jain@nxp.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/27781/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/27781/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id 868417D7A;\n\tWed, 23 Aug 2017 16:02:55 +0200 (CEST)",
            "from NAM03-BY2-obe.outbound.protection.outlook.com\n\t(mail-by2nam03on0077.outbound.protection.outlook.com [104.47.42.77])\n\tby dpdk.org (Postfix) with ESMTP id 4A3269105\n\tfor <dev@dpdk.org>; Wed, 23 Aug 2017 16:02:52 +0200 (CEST)",
            "from BN3PR03CA0085.namprd03.prod.outlook.com\n\t(2a01:111:e400:7a4d::45) by SN2PR03MB2269.namprd03.prod.outlook.com\n\t(2603:10b6:804:d::14) with Microsoft SMTP Server (version=TLS1_2,\n\tcipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.1385.9;\n\tWed, 23 Aug 2017 14:02:49 +0000",
            "from BL2FFO11FD026.protection.gbl (2a01:111:f400:7c09::124) by\n\tBN3PR03CA0085.outlook.office365.com (2a01:111:e400:7a4d::45) with\n\tMicrosoft SMTP Server (version=TLS1_2,\n\tcipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.1362.18\n\tvia Frontend Transport; Wed, 23 Aug 2017 14:02:49 +0000",
            "from tx30smr01.am.freescale.net (192.88.168.50) by\n\tBL2FFO11FD026.mail.protection.outlook.com (10.173.161.105) with\n\tMicrosoft\n\tSMTP Server (version=TLS1_0, cipher=TLS_RSA_WITH_AES_256_CBC_SHA) id\n\t15.1.1341.15 via Frontend Transport; Wed, 23 Aug 2017 14:02:48 +0000",
            "from Tophie.ap.freescale.net ([10.232.14.39])\n\tby tx30smr01.am.freescale.net (8.14.3/8.14.0) with ESMTP id\n\tv7NE2Q2u004389; Wed, 23 Aug 2017 07:02:46 -0700"
        ],
        "Authentication-Results": "spf=fail (sender IP is 192.88.168.50)\n\tsmtp.mailfrom=nxp.com; nxp.com; dkim=none (message not signed)\n\theader.d=none;nxp.com; dmarc=fail action=none header.from=nxp.com;",
        "Received-SPF": "Fail (protection.outlook.com: domain of nxp.com does not\n\tdesignate 192.88.168.50 as permitted sender)\n\treceiver=protection.outlook.com; \n\tclient-ip=192.88.168.50; helo=tx30smr01.am.freescale.net;",
        "From": "Shreyansh Jain <shreyansh.jain@nxp.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<ferruh.yigit@intel.com>, <hemant.agrawal@nxp.com>",
        "Date": "Wed, 23 Aug 2017 19:41:43 +0530",
        "Message-ID": "<20170823141213.25476-11-shreyansh.jain@nxp.com>",
        "X-Mailer": "git-send-email 2.9.3",
        "In-Reply-To": "<20170823141213.25476-1-shreyansh.jain@nxp.com>",
        "References": "<1499179471-19145-1-git-send-email-shreyansh.jain@nxp.com>\n\t<20170823141213.25476-1-shreyansh.jain@nxp.com>",
        "X-EOPAttributedMessage": "0",
        "X-Matching-Connectors": "131479705691047502;\n\t(91ab9b29-cfa4-454e-5278-08d120cd25b8); ()",
        "X-Forefront-Antispam-Report": "CIP:192.88.168.50; IPV:NLI; CTRY:US; EFV:NLI;\n\tSFV:NSPM;\n\tSFS:(10009020)(6009001)(336005)(39860400002)(39380400002)(2980300002)(1109001)(1110001)(339900001)(189002)(199003)(6666003)(5660300001)(97736004)(105606002)(8656003)(626005)(68736007)(6916009)(4326008)(2950100002)(575784001)(36756003)(86362001)(85426001)(2906002)(77096006)(356003)(305945005)(106466001)(53946003)(16200700003)(53936002)(1076002)(8676002)(81166006)(50986999)(50226002)(76176999)(50466002)(8936002)(81156014)(5003940100001)(48376002)(104016004)(54906002)(2351001)(33646002)(110136004)(498600001)(189998001)(47776003)(2004002)(579004)(559001)(569006);\n\tDIR:OUT; SFP:1101; SCL:1; SRVR:SN2PR03MB2269;\n\tH:tx30smr01.am.freescale.net; \n\tFPR:; SPF:Fail; PTR:InfoDomainNonexistent; MX:1; A:1; LANG:en; ",
        "X-Microsoft-Exchange-Diagnostics": [
            "1; BL2FFO11FD026;\n\t1:60ss49lead9/PiFcV94FJbPRjYY3htprtbUxxFkcyQLQt4GkTguMsGk2BsP17QEuCgMPmMSUe7CvDgxXIeY2Rsi+P4qd/7RohoVrt2i7JlcK3sniZXRjsCEG/tXjSjQJ",
            "1; SN2PR03MB2269;\n\t3:i0NyvDRixt6L+a/2vlLiU5od5AlcNepa44YGD2aBJJ5B1ylNXgAa+CXWsODEumnlrzkm1JCYl8FcvqHVFGz3NKc9Nrw9G0Qi4HH0rvrSWFa5Mbw+WNFMjDLO3SJk7le95qib6UNxopk8/buERS+k/kRNrUIUkFK+MXOXgOFX9tInInVT5CNeVocv6OHyPSjCuBMaG6Oga6Nkww/9rzO4INaKjJmO6tbjU1BP4wNmUOGi6yQXSMLd2JuaY1wTaUOPdr7GPCGY9MPy4PXWxrYpYGgPqpotIyOAhuka5uhNsp8yzVcph7l/w9j6j262xY5v8yMXopHrcpgmQ9Z9i56E+/rLV8TDzmyVQvznO2kNLo0=;\n\t25:SUVucVYFxLFKK0I/ALZvEJV1j7rOiRmyVPwLPN5ymkY7L06oEjqBpxGc48CSSaVLVSmvawad9XGImNI5n5UStiX4WJAifmuW3jf4+uByIAwQNTFhldFq27LY6BbUodBJsQRcderIGd97nZWklcJPA9Yt6GW391JDF3opVvt72jkOqrU2x5AaemQwW5dZO5C5SKC2mph5Y357VqmMs9f3xpI6YrrQzXdIHBE1jYnag6I3nfkFYAxHh8UldLXPhKKp57T9RXn3wsQx7tZdM9W3h9a/aiJktAIA0QSYFi/jOrcbDDZEv7BGPOTUq50bB8s1VtGB6Ogzp/9o3tVM+ylY8Q==",
            "1; SN2PR03MB2269;\n\t31:oXqasxMMLQsWVSvWuiHd87sMR7dPPQJrXbxR1kk4MBTSfjUDt4QktMYD98xj/3h6ckJ7Ad+Nb4KLuS7kHKg9TiFyBNR73mqS4xwvgknqdRRawRUk7J4RxYYYj3F5b5FrEj8jW5N6RW/4Q/3ai/9fQuRTZg4RRv2wkZPhXZS6oT+Bls+eAWq2U/QT4FD22C1oA2ExzSZE1isnR7RJED0JjA5nw1cLR0rPheFPj8dGKbw=;\n\t4:4KZtA6eafrbbYXfwecheZftmp3Sm6ebT49ILBOvKwj2fwgdQcfmJW/DhcpfkTaQIUwa/BzT4nAN6tls9QqozJ4YB5UdK2harm+1NgYpglQOprgOw8EgTf6L9BD0EwE/7X594yKd8d5m2W7kyI+/HHb0/ij+3+b1GBEkjfkZk1nHUQ1iZArvRwCu70jgYnz5GCOlL3fCuXkwVXug4V4i5hlDOYbTyHTuweHdvh4mhvH+wJvuKJkxGgvtl3Vam83TIg/WB26PY16wHY4mEIahUeTb3fD8ibRBHBSZzToWGIt3gihUBJ1BLBgHTfY1qLd7Z5yDqqk/9N5g6mSthm/RCCWQ4QICrvUv+WXJO8E0cZN3UXb7JxTOmI6yZrQaQf+9i",
            "=?us-ascii?Q?1; SN2PR03MB2269;\n\t23:PCAcsfkpizHIanhN8vjYpybloWBBmPckyAWswd+uZ?=\n\tvxvJkHlLUUMobFGKD9JB5BtTCTkDzYN/s6nbUh7Drj/lJ5aJK+2pvJJF5W8L4gW9+4Jj235RwxeDeKklsPCCjNRS5ZiVWdOIe7oFU6ZWMQaQH23VgG1zdN1pf8vBtlPSskcQls5X4CIabHGTnSWHZSv7PzljEdkDcckFpj/Me+CCyp4IXQr2O17OXDEqwvZ0hd+LcPSvNIYBtrPa9EC14Oo2UgtbyjTbpe6YsZ5aZpRhDRXycRbpUhP2DuHmCHgY1wfG5lORlhlM3gFUJMF8LUhetvRVwQQ4IbvpHJkISa37rby+skYP8XhPpUIVP6aV6JdVKK5jz9B/GQaXoECoqzbd8/vnmET/KtAUw+P6dz6GdOpJZgcjlYtYLEoEpdH7Wl5jrCkjg8C0YTw//gSYUc+DIkvpX0A8Yf81N1E6YfjGIDpnwaIbcnTfpkbnc7dWBIFAul1ZjJ7lrJQAVWTJuzaI7taTtYaMU7JJ5xgDv5PyB5Qsow57p6nNvZr0kaNerivSPdZtyRosMLMB2sOml1k8d9nReOij+oWD8cxYf1hmF6UiIEQdrseZO5+M/6IP8GOwlWgWI6EUbz1D/ARISXEv7F11swl+X/ps4EgvOTOYJl+4Lm6+AjwO+2SF54wpviuZEfWLw0HDtaH2bz6vE34Yxrbv/UDAYTmqSdr10emF7yrqocYtOfu3cg07rqhgHSdVWopi+iDDe/0YJfRXLsunbw3GLRS/JUUbbipXgmc5VZTyVXf+ipfAJ3YTj/imJreiP3n52IgcZ9w3FZaVhBXzV05tYKorkrcqmfqlkM+SzWo5VHpnQjZg07h2YruKpBb8iKS+rCTxDQnl5ynooc5lm44XZUqkYs7cBKi6QhjC2C2ygq1WnEQ1ShUEl2lDWYvRTYjDLxZRJiE5KOUcXBB2z7sqOL0fS6LHKcKKuWFmMc+yky4Uh6328oHHSb0QCVrq+BqRcPOuOu9udpeqFHwtv7lIlN63wt5fsyBndWctW65zqJz48Scw1SuNOjxQfZigyjvpAVO2I8bIYiU5f2XK1o7ioI2RYNYo0QYm2vae85+isI/mlBBEkhgovLCm81C2VKbSiigG3qSc2BAwAIO3P8Pf0zMG33+7To0Yc6OrM+KSp+Z6EfS/MJ5IsXUmGeRMmz+sjWRn4UtrIDCqEgfR43BNPC5HLDJmKosyM3C02VlWHEdFHGPEkvwmjaoTspX/XkUQ/wLNCGnYomxFQVgo2YBcAF5e7ybywfOovHw6g==",
            "1; SN2PR03MB2269;\n\t6:bChSm/zXCJg3RHoOn3V5G1q0YJoeNbSBjtDDzlfIsCiEKsFeU5hYggvUbNJkmycXX+t7U5tDQzRWdW6uLG9YAJM1OGglPwUgByZCpGJGbZDkl3MdFJPTADz2Y9sU9+oWYm3r+MvxYC0zR9y8rvEpQB+BQs8HD6AMCuVELxasjN295hQ3bYqj1HLMLGh9YnZrmk+BvxFVEM6sBz3WC5+3H/nfg2OZ0Ms50CN/KYpXnHvAPok9TITIn2j6dUPXHWA9sv5e1sImGNHsgyxrE/JloRTO7ecAV5NKMoKuv5Deh9nCAsigDtTXfKWa6wiG2BfXypAJ03JJKPlwfFZW2XqhmA==;\n\t5:xdjWTb0Y71ZxqWW9oNUADFvtfpOenb4jr04uz2xgQCX8UJEgqOxya3sTaopZCP+e9+aC+NA9XGN/MgnpYSxRBCNG35HGefAJ4AxuWe4cfibEaqX8G2ZS0TMFic/stqsJv2zb5qSKW1c6LJAlO0bygg==;\n\t24:h7fs+90GvHl4WegvHWipyQl0l0Ltiaz6DX3sBwAxZLWOCl3cVq80hvSf1OA6dvvdOiz73zib6xKTsE0EQzsjiS61mOtRP9crH47dLvO4fmA=;\n\t7:BxCPH6FJyUhzZivurHsPt+lDmCjoG1IZJmddqvHatP2ePwHYo+L2jGVi7PNDU9PzdpigQcLrsmgrmlxhlpt2kWNo7V5IvwXjT2GgSZP5ouhxkyOKy265febmgbzX0Zlpedai9amtf2wRXHEOG5k0igqXB6gBUmrSOkW9QRGdL8yL/Gz88NbobIGSYtR+BLEJyK83GvKEoa1qhI8aRpgV3Xwj89TbQtMzIzp1gbuxE1E="
        ],
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "de1d3104-ca29-46cc-464e-08d4ea2fa35a",
        "X-Microsoft-Antispam": "UriScan:; BCL:0; PCL:0;\n\tRULEID:(300000500095)(300135000095)(300000501095)(300135300095)(22001)(300000502095)(300135100095)(300000503095)(300135400095)(2017052603185)(201703131430075)(201703131517081)(300000504095)(300135200095)(300000505095)(300135600095)(300000506095)(300135500095);\n\tSRVR:SN2PR03MB2269; ",
        "X-MS-TrafficTypeDiagnostic": "SN2PR03MB2269:",
        "X-Exchange-Antispam-Report-Test": "UriScan:(185117386973197)(227817650892897)(275809806118684); ",
        "X-Microsoft-Antispam-PRVS": "<SN2PR03MB22694AF7D1F95F3BEA1C903190850@SN2PR03MB2269.namprd03.prod.outlook.com>",
        "X-Exchange-Antispam-Report-CFA-Test": "BCL:0; PCL:0;\n\tRULEID:(100000700101)(100105000095)(100000701101)(100105300095)(100000702101)(100105100095)(6095135)(601004)(2401047)(13018025)(13016025)(5005006)(8121501046)(10201501046)(3002001)(93006095)(93001095)(100000703101)(100105400095)(6055026)(6096035)(201703131430075)(201703131433075)(201703131441075)(201703131448075)(201703161259150)(20161123561025)(20161123563025)(20161123559100)(20161123556025)(20161123565025)(201708071742011)(100000704101)(100105200095)(100000705101)(100105500095);\n\tSRVR:SN2PR03MB2269; BCL:0; PCL:0;\n\tRULEID:(100000800101)(100110000095)(100000801101)(100110300095)(100000802101)(100110100095)(100000803101)(100110400095)(400006)(100000804101)(100110200095)(100000805101)(100110500095);\n\tSRVR:SN2PR03MB2269; ",
        "X-Forefront-PRVS": "040866B734",
        "SpamDiagnosticOutput": "1:99",
        "SpamDiagnosticMetadata": "NSPM",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "23 Aug 2017 14:02:48.7615\n\t(UTC)",
        "X-MS-Exchange-CrossTenant-Id": "5afe0b00-7697-4969-b663-5eab37d5f47e",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "TenantId=5afe0b00-7697-4969-b663-5eab37d5f47e;\n\tIp=[192.88.168.50]; \n\tHelo=[tx30smr01.am.freescale.net]",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "SN2PR03MB2269",
        "Subject": "[dpdk-dev] [PATCH v3 10/40] bus/dpaa: add QMAN interface driver",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "The Queue Manager (QMan) is a hardware queue management block that\nallows software and accelerators on the datapath to enqueue and dequeue\nframes in order to communicate.\n\nThis part of QBMAN DPAA Block.\n\nSigned-off-by: Geoff Thorpe <geoff.thorpe@nxp.com>\nSigned-off-by: Roy Pledge <roy.pledge@nxp.com>\nSigned-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>\nSigned-off-by: Shreyansh Jain <shreyansh.jain@nxp.com>\n---\n drivers/bus/dpaa/Makefile                 |    4 +\n drivers/bus/dpaa/base/qbman/qman_driver.c |  271 +++++++\n drivers/bus/dpaa/base/qbman/qman_priv.h   |  303 +++++++\n drivers/bus/dpaa/include/fsl_qman.h       | 1254 +++++++++++++++++++++++++++++\n drivers/bus/dpaa/include/fsl_usd.h        |   13 +\n 5 files changed, 1845 insertions(+)\n create mode 100644 drivers/bus/dpaa/base/qbman/qman_driver.c\n create mode 100644 drivers/bus/dpaa/base/qbman/qman_priv.h\n create mode 100644 drivers/bus/dpaa/include/fsl_qman.h",
    "diff": "diff --git a/drivers/bus/dpaa/Makefile b/drivers/bus/dpaa/Makefile\nindex ad6f8c0..29f01df 100644\n--- a/drivers/bus/dpaa/Makefile\n+++ b/drivers/bus/dpaa/Makefile\n@@ -70,6 +70,10 @@ SRCS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += \\\n \tbase/fman/of.c \\\n \tbase/fman/netcfg_layer.c \\\n \tbase/qbman/process.c \\\n+\tbase/qbman/qman_driver.c \\\n \tbase/qbman/dpaa_sys.c\n \n+# Link Pthread\n+LDLIBS += -lpthread\n+\n include $(RTE_SDK)/mk/rte.lib.mk\ndiff --git a/drivers/bus/dpaa/base/qbman/qman_driver.c b/drivers/bus/dpaa/base/qbman/qman_driver.c\nnew file mode 100644\nindex 0000000..80dde20\n--- /dev/null\n+++ b/drivers/bus/dpaa/base/qbman/qman_driver.c\n@@ -0,0 +1,271 @@\n+/*-\n+ * This file is provided under a dual BSD/GPLv2 license. When using or\n+ * redistributing this file, you may do so under either license.\n+ *\n+ *   BSD LICENSE\n+ *\n+ * Copyright 2008-2016 Freescale Semiconductor Inc.\n+ * Copyright 2017 NXP.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions are met:\n+ * * Redistributions of source code must retain the above copyright\n+ * notice, this list of conditions and the following disclaimer.\n+ * * Redistributions in binary form must reproduce the above copyright\n+ * notice, this list of conditions and the following disclaimer in the\n+ * documentation and/or other materials provided with the distribution.\n+ * * Neither the name of the above-listed copyright holders nor the\n+ * names of any contributors may be used to endorse or promote products\n+ * derived from this software without specific prior written permission.\n+ *\n+ *   GPL LICENSE SUMMARY\n+ *\n+ * ALTERNATIVELY, this software may be distributed under the terms of the\n+ * GNU General Public License (\"GPL\") as published by the Free Software\n+ * Foundation, either version 2 of that License or (at your option) any\n+ * later version.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE\n+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n+ * POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#include <fsl_usd.h>\n+#include <process.h>\n+#include \"qman_priv.h\"\n+#include <sys/ioctl.h>\n+#include <rte_branch_prediction.h>\n+\n+/* Global variable containing revision id (even on non-control plane systems\n+ * where CCSR isn't available).\n+ */\n+u16 qman_ip_rev;\n+u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;\n+u16 qm_channel_caam = QMAN_CHANNEL_CAAM;\n+u16 qm_channel_pme = QMAN_CHANNEL_PME;\n+\n+/* Ccsr map address to access ccsrbased register */\n+void *qman_ccsr_map;\n+/* The qman clock frequency */\n+u32 qman_clk;\n+\n+static __thread int fd = -1;\n+static __thread struct qm_portal_config pcfg;\n+static __thread struct dpaa_ioctl_portal_map map = {\n+\t.type = dpaa_portal_qman\n+};\n+\n+static int fsl_qman_portal_init(uint32_t index, int is_shared)\n+{\n+\tcpu_set_t cpuset;\n+\tint loop, ret;\n+\tstruct dpaa_ioctl_irq_map irq_map;\n+\n+\t/* Verify the thread's cpu-affinity */\n+\tret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),\n+\t\t\t\t     &cpuset);\n+\tif (ret) {\n+\t\terror(0, ret, \"pthread_getaffinity_np()\");\n+\t\treturn ret;\n+\t}\n+\tpcfg.cpu = -1;\n+\tfor (loop = 0; loop < CPU_SETSIZE; loop++)\n+\t\tif (CPU_ISSET(loop, &cpuset)) {\n+\t\t\tif (pcfg.cpu != -1) {\n+\t\t\t\tpr_err(\"Thread is not affine to 1 cpu\\n\");\n+\t\t\t\treturn -EINVAL;\n+\t\t\t}\n+\t\t\tpcfg.cpu = loop;\n+\t\t}\n+\tif (pcfg.cpu == -1) {\n+\t\tpr_err(\"Bug in getaffinity handling!\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Allocate and map a qman portal */\n+\tmap.index = index;\n+\tret = process_portal_map(&map);\n+\tif (ret) {\n+\t\terror(0, ret, \"process_portal_map()\");\n+\t\treturn ret;\n+\t}\n+\tpcfg.channel = map.channel;\n+\tpcfg.pools = map.pools;\n+\tpcfg.index = map.index;\n+\n+\t/* Make the portal's cache-[enabled|inhibited] regions */\n+\tpcfg.addr_virt[DPAA_PORTAL_CE] = map.addr.cena;\n+\tpcfg.addr_virt[DPAA_PORTAL_CI] = map.addr.cinh;\n+\n+\tfd = open(QMAN_PORTAL_IRQ_PATH, O_RDONLY);\n+\tif (fd == -1) {\n+\t\tpr_err(\"QMan irq init failed\\n\");\n+\t\tprocess_portal_unmap(&map.addr);\n+\t\treturn -EBUSY;\n+\t}\n+\n+\tpcfg.is_shared = is_shared;\n+\tpcfg.node = NULL;\n+\tpcfg.irq = fd;\n+\n+\tirq_map.type = dpaa_portal_qman;\n+\tirq_map.portal_cinh = map.addr.cinh;\n+\tprocess_portal_irq_map(fd, &irq_map);\n+\treturn 0;\n+}\n+\n+static int fsl_qman_portal_finish(void)\n+{\n+\tint ret;\n+\n+\tprocess_portal_irq_unmap(fd);\n+\n+\tret = process_portal_unmap(&map.addr);\n+\tif (ret)\n+\t\terror(0, ret, \"process_portal_unmap()\");\n+\treturn ret;\n+}\n+\n+int qman_thread_init(void)\n+{\n+\t/* Convert from contiguous/virtual cpu numbering to real cpu when\n+\t * calling into the code that is dependent on the device naming.\n+\t */\n+\treturn fsl_qman_portal_init(QBMAN_ANY_PORTAL_IDX, 0);\n+}\n+\n+int qman_thread_finish(void)\n+{\n+\treturn fsl_qman_portal_finish();\n+}\n+\n+void qman_thread_irq(void)\n+{\n+\tqbman_invoke_irq(pcfg.irq);\n+\n+\t/* Now we need to uninhibit interrupts. This is the only code outside\n+\t * the regular portal driver that manipulates any portal register, so\n+\t * rather than breaking that encapsulation I am simply hard-coding the\n+\t * offset to the inhibit register here.\n+\t */\n+\tout_be32(pcfg.addr_virt[DPAA_PORTAL_CI] + 0xe0c, 0);\n+}\n+\n+int qman_global_init(void)\n+{\n+\tconst struct device_node *dt_node;\n+\tint ret = 0;\n+\tsize_t lenp;\n+\tconst u32 *chanid;\n+\tstatic int ccsr_map_fd;\n+\tconst uint32_t *qman_addr;\n+\tuint64_t phys_addr;\n+\tuint64_t regs_size;\n+\tconst u32 *clk;\n+\n+\tstatic int done;\n+\n+\tif (done)\n+\t\treturn -EBUSY;\n+\n+\t/* Use the device-tree to determine IP revision until something better\n+\t * is devised.\n+\t */\n+\tdt_node = of_find_compatible_node(NULL, NULL, \"fsl,qman-portal\");\n+\tif (!dt_node) {\n+\t\tpr_err(\"No qman portals available for any CPU\\n\");\n+\t\treturn -ENODEV;\n+\t}\n+\tif (of_device_is_compatible(dt_node, \"fsl,qman-portal-1.0\") ||\n+\t    of_device_is_compatible(dt_node, \"fsl,qman-portal-1.0.0\"))\n+\t\tpr_err(\"QMan rev1.0 on P4080 rev1 is not supported!\\n\");\n+\telse if (of_device_is_compatible(dt_node, \"fsl,qman-portal-1.1\") ||\n+\t\t of_device_is_compatible(dt_node, \"fsl,qman-portal-1.1.0\"))\n+\t\tqman_ip_rev = QMAN_REV11;\n+\telse if\t(of_device_is_compatible(dt_node, \"fsl,qman-portal-1.2\") ||\n+\t\t of_device_is_compatible(dt_node, \"fsl,qman-portal-1.2.0\"))\n+\t\tqman_ip_rev = QMAN_REV12;\n+\telse if (of_device_is_compatible(dt_node, \"fsl,qman-portal-2.0\") ||\n+\t\t of_device_is_compatible(dt_node, \"fsl,qman-portal-2.0.0\"))\n+\t\tqman_ip_rev = QMAN_REV20;\n+\telse if (of_device_is_compatible(dt_node, \"fsl,qman-portal-3.0.0\") ||\n+\t\t of_device_is_compatible(dt_node, \"fsl,qman-portal-3.0.1\"))\n+\t\tqman_ip_rev = QMAN_REV30;\n+\telse if (of_device_is_compatible(dt_node, \"fsl,qman-portal-3.1.0\") ||\n+\t\t of_device_is_compatible(dt_node, \"fsl,qman-portal-3.1.1\") ||\n+\t\tof_device_is_compatible(dt_node, \"fsl,qman-portal-3.1.2\") ||\n+\t\tof_device_is_compatible(dt_node, \"fsl,qman-portal-3.1.3\"))\n+\t\tqman_ip_rev = QMAN_REV31;\n+\telse if (of_device_is_compatible(dt_node, \"fsl,qman-portal-3.2.0\") ||\n+\t\t of_device_is_compatible(dt_node, \"fsl,qman-portal-3.2.1\"))\n+\t\tqman_ip_rev = QMAN_REV32;\n+\telse\n+\t\tqman_ip_rev = QMAN_REV11;\n+\n+\tif (!qman_ip_rev) {\n+\t\tpr_err(\"Unknown qman portal version\\n\");\n+\t\treturn -ENODEV;\n+\t}\n+\tif ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {\n+\t\tqm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;\n+\t\tqm_channel_caam = QMAN_CHANNEL_CAAM_REV3;\n+\t\tqm_channel_pme = QMAN_CHANNEL_PME_REV3;\n+\t}\n+\n+\tdt_node = of_find_compatible_node(NULL, NULL, \"fsl,pool-channel-range\");\n+\tif (!dt_node) {\n+\t\tpr_err(\"No qman pool channel range available\\n\");\n+\t\treturn -ENODEV;\n+\t}\n+\tchanid = of_get_property(dt_node, \"fsl,pool-channel-range\", &lenp);\n+\tif (!chanid) {\n+\t\tpr_err(\"Can not get pool-channel-range property\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* get ccsr base */\n+\tdt_node = of_find_compatible_node(NULL, NULL, \"fsl,qman\");\n+\tif (!dt_node) {\n+\t\tpr_err(\"No qman device node available\\n\");\n+\t\treturn -ENODEV;\n+\t}\n+\tqman_addr = of_get_address(dt_node, 0, &regs_size, NULL);\n+\tif (!qman_addr) {\n+\t\tpr_err(\"of_get_address cannot return qman address\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\tphys_addr = of_translate_address(dt_node, qman_addr);\n+\tif (!phys_addr) {\n+\t\tpr_err(\"of_translate_address failed\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tccsr_map_fd = open(\"/dev/mem\", O_RDWR);\n+\tif (unlikely(ccsr_map_fd < 0)) {\n+\t\tpr_err(\"Can not open /dev/mem for qman ccsr map\\n\");\n+\t\treturn ccsr_map_fd;\n+\t}\n+\n+\tqman_ccsr_map = mmap(NULL, regs_size, PROT_READ | PROT_WRITE,\n+\t\t\t     MAP_SHARED, ccsr_map_fd, phys_addr);\n+\tif (qman_ccsr_map == MAP_FAILED) {\n+\t\tpr_err(\"Can not map qman ccsr base\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tclk = of_get_property(dt_node, \"clock-frequency\", NULL);\n+\tif (!clk)\n+\t\tpr_warn(\"Can't find Qman clock frequency\\n\");\n+\telse\n+\t\tqman_clk = be32_to_cpu(*clk);\n+\n+\treturn ret;\n+}\ndiff --git a/drivers/bus/dpaa/base/qbman/qman_priv.h b/drivers/bus/dpaa/base/qbman/qman_priv.h\nnew file mode 100644\nindex 0000000..4a11e40\n--- /dev/null\n+++ b/drivers/bus/dpaa/base/qbman/qman_priv.h\n@@ -0,0 +1,303 @@\n+/*-\n+ * This file is provided under a dual BSD/GPLv2 license. When using or\n+ * redistributing this file, you may do so under either license.\n+ *\n+ *   BSD LICENSE\n+ *\n+ * Copyright 2008-2016 Freescale Semiconductor Inc.\n+ * Copyright 2017 NXP.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions are met:\n+ * * Redistributions of source code must retain the above copyright\n+ * notice, this list of conditions and the following disclaimer.\n+ * * Redistributions in binary form must reproduce the above copyright\n+ * notice, this list of conditions and the following disclaimer in the\n+ * documentation and/or other materials provided with the distribution.\n+ * * Neither the name of the above-listed copyright holders nor the\n+ * names of any contributors may be used to endorse or promote products\n+ * derived from this software without specific prior written permission.\n+ *\n+ *   GPL LICENSE SUMMARY\n+ *\n+ * ALTERNATIVELY, this software may be distributed under the terms of the\n+ * GNU General Public License (\"GPL\") as published by the Free Software\n+ * Foundation, either version 2 of that License or (at your option) any\n+ * later version.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE\n+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n+ * POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#ifndef __QMAN_PRIV_H\n+#define __QMAN_PRIV_H\n+\n+#include \"dpaa_sys.h\"\n+#include <fsl_qman.h>\n+\n+/* Congestion Groups */\n+/*\n+ * This wrapper represents a bit-array for the state of the 256 QMan congestion\n+ * groups. Is also used as a *mask* for congestion groups, eg. so we ignore\n+ * those that don't concern us. We harness the structure and accessor details\n+ * already used in the management command to query congestion groups.\n+ */\n+struct qman_cgrs {\n+\tstruct __qm_mcr_querycongestion q;\n+};\n+\n+static inline void qman_cgrs_init(struct qman_cgrs *c)\n+{\n+\tmemset(c, 0, sizeof(*c));\n+}\n+\n+static inline void qman_cgrs_fill(struct qman_cgrs *c)\n+{\n+\tmemset(c, 0xff, sizeof(*c));\n+}\n+\n+static inline int qman_cgrs_get(struct qman_cgrs *c, int num)\n+{\n+\treturn QM_MCR_QUERYCONGESTION(&c->q, num);\n+}\n+\n+static inline void qman_cgrs_set(struct qman_cgrs *c, int num)\n+{\n+\tc->q.state[__CGR_WORD(num)] |= (0x80000000 >> __CGR_SHIFT(num));\n+}\n+\n+static inline void qman_cgrs_unset(struct qman_cgrs *c, int num)\n+{\n+\tc->q.state[__CGR_WORD(num)] &= ~(0x80000000 >> __CGR_SHIFT(num));\n+}\n+\n+static inline int qman_cgrs_next(struct qman_cgrs *c, int num)\n+{\n+\twhile ((++num < (int)__CGR_NUM) && !qman_cgrs_get(c, num))\n+\t\t;\n+\treturn num;\n+}\n+\n+static inline void qman_cgrs_cp(struct qman_cgrs *dest,\n+\t\t\t\tconst struct qman_cgrs *src)\n+{\n+\tmemcpy(dest, src, sizeof(*dest));\n+}\n+\n+static inline void qman_cgrs_and(struct qman_cgrs *dest,\n+\t\t\t\t const struct qman_cgrs *a,\n+\t\t\t\t const struct qman_cgrs *b)\n+{\n+\tint ret;\n+\tu32 *_d = dest->q.state;\n+\tconst u32 *_a = a->q.state;\n+\tconst u32 *_b = b->q.state;\n+\n+\tfor (ret = 0; ret < 8; ret++)\n+\t\t*(_d++) = *(_a++) & *(_b++);\n+}\n+\n+static inline void qman_cgrs_xor(struct qman_cgrs *dest,\n+\t\t\t\t const struct qman_cgrs *a,\n+\t\t\t\t const struct qman_cgrs *b)\n+{\n+\tint ret;\n+\tu32 *_d = dest->q.state;\n+\tconst u32 *_a = a->q.state;\n+\tconst u32 *_b = b->q.state;\n+\n+\tfor (ret = 0; ret < 8; ret++)\n+\t\t*(_d++) = *(_a++) ^ *(_b++);\n+}\n+\n+/* used by CCSR and portal interrupt code */\n+enum qm_isr_reg {\n+\tqm_isr_status = 0,\n+\tqm_isr_enable = 1,\n+\tqm_isr_disable = 2,\n+\tqm_isr_inhibit = 3\n+};\n+\n+struct qm_portal_config {\n+\t/*\n+\t * Corenet portal addresses;\n+\t * [0]==cache-enabled, [1]==cache-inhibited.\n+\t */\n+\tvoid __iomem *addr_virt[2];\n+\tstruct device_node *node;\n+\t/* Allow these to be joined in lists */\n+\tstruct list_head list;\n+\t/* User-visible portal configuration settings */\n+\t/* If the caller enables DQRR stashing (and thus wishes to operate the\n+\t * portal from only one cpu), this is the logical CPU that the portal\n+\t * will stash to. Whether stashing is enabled or not, this setting is\n+\t * also used for any \"core-affine\" portals, ie. default portals\n+\t * associated to the corresponding cpu. -1 implies that there is no\n+\t * core affinity configured.\n+\t */\n+\tint cpu;\n+\t/* portal interrupt line */\n+\tint irq;\n+\t/* the unique index of this portal */\n+\tu32 index;\n+\t/* Is this portal shared? (If so, it has coarser locking and demuxes\n+\t * processing on behalf of other CPUs.).\n+\t */\n+\tint is_shared;\n+\t/* The portal's dedicated channel id, use this value for initialising\n+\t * frame queues to target this portal when scheduled.\n+\t */\n+\tu16 channel;\n+\t/* A mask of which pool channels this portal has dequeue access to\n+\t * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask).\n+\t */\n+\tu32 pools;\n+\n+};\n+\n+/* Revision info (for errata and feature handling) */\n+#define QMAN_REV11 0x0101\n+#define QMAN_REV12 0x0102\n+#define QMAN_REV20 0x0200\n+#define QMAN_REV30 0x0300\n+#define QMAN_REV31 0x0301\n+#define QMAN_REV32 0x0302\n+extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */\n+extern u32 qman_clk;\n+\n+int qm_set_wpm(int wpm);\n+int qm_get_wpm(int *wpm);\n+\n+struct qman_portal *qman_create_affine_portal(\n+\t\t\tconst struct qm_portal_config *config,\n+\t\t\tconst struct qman_cgrs *cgrs);\n+const struct qm_portal_config *qman_destroy_affine_portal(void);\n+\n+struct qm_portal_config *qm_get_unused_portal(void);\n+struct qm_portal_config *qm_get_unused_portal_idx(uint32_t idx);\n+\n+void qm_put_unused_portal(struct qm_portal_config *pcfg);\n+void qm_set_liodns(struct qm_portal_config *pcfg);\n+\n+/* This CGR feature is supported by h/w and required by unit-tests and the\n+ * debugfs hooks, so is implemented in the driver. However it allows an explicit\n+ * corruption of h/w fields by s/w that are usually incorruptible (because the\n+ * counters are usually maintained entirely within h/w). As such, we declare\n+ * this API internally.\n+ */\n+int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,\n+\t\t       struct qm_mcr_cgrtestwrite *result);\n+\n+/*   QMan s/w corenet portal, low-level i/face\t */\n+\n+/*\n+ * For Choose one SOURCE. Choose one COUNT. Choose one\n+ * dequeue TYPE. Choose TOKEN (8-bit).\n+ * If SOURCE == CHANNELS,\n+ *   Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).\n+ *   You can choose DEDICATED_PRECEDENCE if the portal channel should have\n+ *   priority.\n+ * If SOURCE == SPECIFICWQ,\n+ *     Either select the work-queue ID with SPECIFICWQ_WQ(), or select the\n+ *     channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the\n+ *     work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the\n+ *     same value.\n+ */\n+#define QM_SDQCR_SOURCE_CHANNELS\t0x0\n+#define QM_SDQCR_SOURCE_SPECIFICWQ\t0x40000000\n+#define QM_SDQCR_COUNT_EXACT1\t\t0x0\n+#define QM_SDQCR_COUNT_UPTO3\t\t0x20000000\n+#define QM_SDQCR_DEDICATED_PRECEDENCE\t0x10000000\n+#define QM_SDQCR_TYPE_MASK\t\t0x03000000\n+#define QM_SDQCR_TYPE_NULL\t\t0x0\n+#define QM_SDQCR_TYPE_PRIO_QOS\t\t0x01000000\n+#define QM_SDQCR_TYPE_ACTIVE_QOS\t0x02000000\n+#define QM_SDQCR_TYPE_ACTIVE\t\t0x03000000\n+#define QM_SDQCR_TOKEN_MASK\t\t0x00ff0000\n+#define QM_SDQCR_TOKEN_SET(v)\t\t(((v) & 0xff) << 16)\n+#define QM_SDQCR_TOKEN_GET(v)\t\t(((v) >> 16) & 0xff)\n+#define QM_SDQCR_CHANNELS_DEDICATED\t0x00008000\n+#define QM_SDQCR_SPECIFICWQ_MASK\t0x000000f7\n+#define QM_SDQCR_SPECIFICWQ_DEDICATED\t0x00000000\n+#define QM_SDQCR_SPECIFICWQ_POOL(n)\t((n) << 4)\n+#define QM_SDQCR_SPECIFICWQ_WQ(n)\t(n)\n+\n+#define QM_VDQCR_FQID_MASK\t\t0x00ffffff\n+#define QM_VDQCR_FQID(n)\t\t((n) & QM_VDQCR_FQID_MASK)\n+\n+#define QM_EQCR_VERB_VBIT\t\t0x80\n+#define QM_EQCR_VERB_CMD_MASK\t\t0x61\t/* but only one value; */\n+#define QM_EQCR_VERB_CMD_ENQUEUE\t0x01\n+#define QM_EQCR_VERB_COLOUR_MASK\t0x18\t/* 4 possible values; */\n+#define QM_EQCR_VERB_COLOUR_GREEN\t0x00\n+#define QM_EQCR_VERB_COLOUR_YELLOW\t0x08\n+#define QM_EQCR_VERB_COLOUR_RED\t\t0x10\n+#define QM_EQCR_VERB_COLOUR_OVERRIDE\t0x18\n+#define QM_EQCR_VERB_INTERRUPT\t\t0x04\t/* on command consumption */\n+#define QM_EQCR_VERB_ORP\t\t0x02\t/* enable order restoration */\n+#define QM_EQCR_DCA_ENABLE\t\t0x80\n+#define QM_EQCR_DCA_PARK\t\t0x40\n+#define QM_EQCR_DCA_IDXMASK\t\t0x0f\t/* \"DQRR::idx\" goes here */\n+#define QM_EQCR_SEQNUM_NESN\t\t0x8000\t/* Advance NESN */\n+#define QM_EQCR_SEQNUM_NLIS\t\t0x4000\t/* More fragments to come */\n+#define QM_EQCR_SEQNUM_SEQMASK\t\t0x3fff\t/* sequence number goes here */\n+#define QM_EQCR_FQID_NULL\t\t0\t/* eg. for an ORP seqnum hole */\n+\n+#define QM_MCC_VERB_VBIT\t\t0x80\n+#define QM_MCC_VERB_MASK\t\t0x7f\t/* where the verb contains; */\n+#define QM_MCC_VERB_INITFQ_PARKED\t0x40\n+#define QM_MCC_VERB_INITFQ_SCHED\t0x41\n+#define QM_MCC_VERB_QUERYFQ\t\t0x44\n+#define QM_MCC_VERB_QUERYFQ_NP\t\t0x45\t/* \"non-programmable\" fields */\n+#define QM_MCC_VERB_QUERYWQ\t\t0x46\n+#define QM_MCC_VERB_QUERYWQ_DEDICATED\t0x47\n+#define QM_MCC_VERB_ALTER_SCHED\t\t0x48\t/* Schedule FQ */\n+#define QM_MCC_VERB_ALTER_FE\t\t0x49\t/* Force Eligible FQ */\n+#define QM_MCC_VERB_ALTER_RETIRE\t0x4a\t/* Retire FQ */\n+#define QM_MCC_VERB_ALTER_OOS\t\t0x4b\t/* Take FQ out of service */\n+#define QM_MCC_VERB_ALTER_FQXON\t\t0x4d\t/* FQ XON */\n+#define QM_MCC_VERB_ALTER_FQXOFF\t0x4e\t/* FQ XOFF */\n+#define QM_MCC_VERB_INITCGR\t\t0x50\n+#define QM_MCC_VERB_MODIFYCGR\t\t0x51\n+#define QM_MCC_VERB_CGRTESTWRITE\t0x52\n+#define QM_MCC_VERB_QUERYCGR\t\t0x58\n+#define QM_MCC_VERB_QUERYCONGESTION\t0x59\n+\n+/*\n+ * Used by all portal interrupt registers except 'inhibit'\n+ * Channels with frame availability\n+ */\n+#define QM_PIRQ_DQAVAIL\t0x0000ffff\n+\n+/* The DQAVAIL interrupt fields break down into these bits; */\n+#define QM_DQAVAIL_PORTAL\t0x8000\t\t/* Portal channel */\n+#define QM_DQAVAIL_POOL(n)\t(0x8000 >> (n))\t/* Pool channel, n==[1..15] */\n+#define QM_DQAVAIL_MASK\t\t0xffff\n+/* This mask contains all the \"irqsource\" bits visible to API users */\n+#define QM_PIRQ_VISIBLE\t(QM_PIRQ_SLOW | QM_PIRQ_DQRI)\n+\n+/* These are qm_<reg>_<verb>(). So for example, qm_disable_write() means \"write\n+ * the disable register\" rather than \"disable the ability to write\".\n+ */\n+#define qm_isr_status_read(qm)\t\t__qm_isr_read(qm, qm_isr_status)\n+#define qm_isr_status_clear(qm, m)\t__qm_isr_write(qm, qm_isr_status, m)\n+#define qm_isr_enable_read(qm)\t\t__qm_isr_read(qm, qm_isr_enable)\n+#define qm_isr_enable_write(qm, v)\t__qm_isr_write(qm, qm_isr_enable, v)\n+#define qm_isr_disable_read(qm)\t\t__qm_isr_read(qm, qm_isr_disable)\n+#define qm_isr_disable_write(qm, v)\t__qm_isr_write(qm, qm_isr_disable, v)\n+/* TODO: unfortunate name-clash here, reword? */\n+#define qm_isr_inhibit(qm)\t\t__qm_isr_write(qm, qm_isr_inhibit, 1)\n+#define qm_isr_uninhibit(qm)\t\t__qm_isr_write(qm, qm_isr_inhibit, 0)\n+\n+#define QMAN_PORTAL_IRQ_PATH \"/dev/fsl-usdpaa-irq\"\n+\n+#endif /* _QMAN_PRIV_H */\ndiff --git a/drivers/bus/dpaa/include/fsl_qman.h b/drivers/bus/dpaa/include/fsl_qman.h\nnew file mode 100644\nindex 0000000..784fe60\n--- /dev/null\n+++ b/drivers/bus/dpaa/include/fsl_qman.h\n@@ -0,0 +1,1254 @@\n+/*-\n+ * This file is provided under a dual BSD/GPLv2 license. When using or\n+ * redistributing this file, you may do so under either license.\n+ *\n+ *   BSD LICENSE\n+ *\n+ * Copyright 2008-2012 Freescale Semiconductor, Inc.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions are met:\n+ * * Redistributions of source code must retain the above copyright\n+ * notice, this list of conditions and the following disclaimer.\n+ * * Redistributions in binary form must reproduce the above copyright\n+ * notice, this list of conditions and the following disclaimer in the\n+ * documentation and/or other materials provided with the distribution.\n+ * * Neither the name of the above-listed copyright holders nor the\n+ * names of any contributors may be used to endorse or promote products\n+ * derived from this software without specific prior written permission.\n+ *\n+ *   GPL LICENSE SUMMARY\n+ *\n+ * ALTERNATIVELY, this software may be distributed under the terms of the\n+ * GNU General Public License (\"GPL\") as published by the Free Software\n+ * Foundation, either version 2 of that License or (at your option) any\n+ * later version.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE\n+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n+ * POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#ifndef __FSL_QMAN_H\n+#define __FSL_QMAN_H\n+\n+#ifdef __cplusplus\n+extern \"C\" {\n+#endif\n+\n+#include <dpaa_rbtree.h>\n+\n+/* Last updated for v00.800 of the BG */\n+\n+/* Hardware constants */\n+#define QM_CHANNEL_SWPORTAL0 0\n+#define QMAN_CHANNEL_POOL1 0x21\n+#define QMAN_CHANNEL_CAAM 0x80\n+#define QMAN_CHANNEL_PME 0xa0\n+#define QMAN_CHANNEL_POOL1_REV3 0x401\n+#define QMAN_CHANNEL_CAAM_REV3 0x840\n+#define QMAN_CHANNEL_PME_REV3 0x860\n+extern u16 qm_channel_pool1;\n+extern u16 qm_channel_caam;\n+extern u16 qm_channel_pme;\n+enum qm_dc_portal {\n+\tqm_dc_portal_fman0 = 0,\n+\tqm_dc_portal_fman1 = 1,\n+\tqm_dc_portal_caam = 2,\n+\tqm_dc_portal_pme = 3\n+};\n+\n+/* Portal processing (interrupt) sources */\n+#define QM_PIRQ_CCSCI\t0x00200000\t/* CEETM Congestion State Change */\n+#define QM_PIRQ_CSCI\t0x00100000\t/* Congestion State Change */\n+#define QM_PIRQ_EQCI\t0x00080000\t/* Enqueue Command Committed */\n+#define QM_PIRQ_EQRI\t0x00040000\t/* EQCR Ring (below threshold) */\n+#define QM_PIRQ_DQRI\t0x00020000\t/* DQRR Ring (non-empty) */\n+#define QM_PIRQ_MRI\t0x00010000\t/* MR Ring (non-empty) */\n+/*\n+ * This mask contains all the interrupt sources that need handling except DQRI,\n+ * ie. that if present should trigger slow-path processing.\n+ */\n+#define QM_PIRQ_SLOW\t(QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \\\n+\t\t\tQM_PIRQ_MRI | QM_PIRQ_CCSCI)\n+\n+/* For qman_static_dequeue_*** APIs */\n+#define QM_SDQCR_CHANNELS_POOL_MASK\t0x00007fff\n+/* for n in [1,15] */\n+#define QM_SDQCR_CHANNELS_POOL(n)\t(0x00008000 >> (n))\n+/* for conversion from n of qm_channel */\n+static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)\n+{\n+\treturn QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);\n+}\n+\n+/* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use\n+ * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use\n+ * FQID(n) to fill in the frame queue ID.\n+ */\n+#define QM_VDQCR_PRECEDENCE_VDQCR\t0x0\n+#define QM_VDQCR_PRECEDENCE_SDQCR\t0x80000000\n+#define QM_VDQCR_EXACT\t\t\t0x40000000\n+#define QM_VDQCR_NUMFRAMES_MASK\t\t0x3f000000\n+#define QM_VDQCR_NUMFRAMES_SET(n)\t(((n) & 0x3f) << 24)\n+#define QM_VDQCR_NUMFRAMES_GET(n)\t(((n) >> 24) & 0x3f)\n+#define QM_VDQCR_NUMFRAMES_TILLEMPTY\tQM_VDQCR_NUMFRAMES_SET(0)\n+\n+/* --- QMan data structures (and associated constants) --- */\n+\n+/* Represents s/w corenet portal mapped data structures */\n+struct qm_eqcr_entry;\t/* EQCR (EnQueue Command Ring) entries */\n+struct qm_dqrr_entry;\t/* DQRR (DeQueue Response Ring) entries */\n+struct qm_mr_entry;\t/* MR (Message Ring) entries */\n+struct qm_mc_command;\t/* MC (Management Command) command */\n+struct qm_mc_result;\t/* MC result */\n+\n+#define QM_FD_FORMAT_SG\t\t0x4\n+#define QM_FD_FORMAT_LONG\t0x2\n+#define QM_FD_FORMAT_COMPOUND\t0x1\n+enum qm_fd_format {\n+\t/*\n+\t * 'contig' implies a contiguous buffer, whereas 'sg' implies a\n+\t * scatter-gather table. 'big' implies a 29-bit length with no offset\n+\t * field, otherwise length is 20-bit and offset is 9-bit. 'compound'\n+\t * implies a s/g-like table, where each entry itself represents a frame\n+\t * (contiguous or scatter-gather) and the 29-bit \"length\" is\n+\t * interpreted purely for congestion calculations, ie. a \"congestion\n+\t * weight\".\n+\t */\n+\tqm_fd_contig = 0,\n+\tqm_fd_contig_big = QM_FD_FORMAT_LONG,\n+\tqm_fd_sg = QM_FD_FORMAT_SG,\n+\tqm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,\n+\tqm_fd_compound = QM_FD_FORMAT_COMPOUND\n+};\n+\n+/* Capitalised versions are un-typed but can be used in static expressions */\n+#define QM_FD_CONTIG\t0\n+#define QM_FD_CONTIG_BIG QM_FD_FORMAT_LONG\n+#define QM_FD_SG\tQM_FD_FORMAT_SG\n+#define QM_FD_SG_BIG\t(QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG)\n+#define QM_FD_COMPOUND\tQM_FD_FORMAT_COMPOUND\n+\n+/* \"Frame Descriptor (FD)\" */\n+struct qm_fd {\n+\tunion {\n+\t\tstruct {\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\t\t\tu8 dd:2;\t/* dynamic debug */\n+\t\t\tu8 liodn_offset:6;\n+\t\t\tu8 bpid:8;\t/* Buffer Pool ID */\n+\t\t\tu8 eliodn_offset:4;\n+\t\t\tu8 __reserved:4;\n+\t\t\tu8 addr_hi;\t/* high 8-bits of 40-bit address */\n+\t\t\tu32 addr_lo;\t/* low 32-bits of 40-bit address */\n+#else\n+\t\t\tu8 liodn_offset:6;\n+\t\t\tu8 dd:2;\t/* dynamic debug */\n+\t\t\tu8 bpid:8;\t/* Buffer Pool ID */\n+\t\t\tu8 __reserved:4;\n+\t\t\tu8 eliodn_offset:4;\n+\t\t\tu8 addr_hi;\t/* high 8-bits of 40-bit address */\n+\t\t\tu32 addr_lo;\t/* low 32-bits of 40-bit address */\n+#endif\n+\t\t};\n+\t\tstruct {\n+\t\t\tu64 __notaddress:24;\n+\t\t\t/* More efficient address accessor */\n+\t\t\tu64 addr:40;\n+\t\t};\n+\t\tu64 opaque_addr;\n+\t};\n+\t/* The 'format' field indicates the interpretation of the remaining 29\n+\t * bits of the 32-bit word. For packing reasons, it is duplicated in the\n+\t * other union elements. Note, union'd structs are difficult to use with\n+\t * static initialisation under gcc, in which case use the \"opaque\" form\n+\t * with one of the macros.\n+\t */\n+\tunion {\n+\t\t/* For easier/faster copying of this part of the fd (eg. from a\n+\t\t * DQRR entry to an EQCR entry) copy 'opaque'\n+\t\t */\n+\t\tu32 opaque;\n+\t\t/* If 'format' is _contig or _sg, 20b length and 9b offset */\n+\t\tstruct {\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\t\t\tenum qm_fd_format format:3;\n+\t\t\tu16 offset:9;\n+\t\t\tu32 length20:20;\n+#else\n+\t\t\tu32 length20:20;\n+\t\t\tu16 offset:9;\n+\t\t\tenum qm_fd_format format:3;\n+#endif\n+\t\t};\n+\t\t/* If 'format' is _contig_big or _sg_big, 29b length */\n+\t\tstruct {\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\t\t\tenum qm_fd_format _format1:3;\n+\t\t\tu32 length29:29;\n+#else\n+\t\t\tu32 length29:29;\n+\t\t\tenum qm_fd_format _format1:3;\n+#endif\n+\t\t};\n+\t\t/* If 'format' is _compound, 29b \"congestion weight\" */\n+\t\tstruct {\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\t\t\tenum qm_fd_format _format2:3;\n+\t\t\tu32 cong_weight:29;\n+#else\n+\t\t\tu32 cong_weight:29;\n+\t\t\tenum qm_fd_format _format2:3;\n+#endif\n+\t\t};\n+\t};\n+\tunion {\n+\t\tu32 cmd;\n+\t\tu32 status;\n+\t};\n+} __attribute__((aligned(8)));\n+#define QM_FD_DD_NULL\t\t0x00\n+#define QM_FD_PID_MASK\t\t0x3f\n+static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)\n+{\n+\treturn fd->addr;\n+}\n+\n+static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)\n+{\n+\treturn (dma_addr_t)fd->addr;\n+}\n+\n+/* Macro, so we compile better if 'v' isn't always 64-bit */\n+#define qm_fd_addr_set64(fd, v) \\\n+\tdo { \\\n+\t\tstruct qm_fd *__fd931 = (fd); \\\n+\t\t__fd931->addr = v; \\\n+\t} while (0)\n+\n+/* Scatter/Gather table entry */\n+struct qm_sg_entry {\n+\tunion {\n+\t\tstruct {\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\t\t\tu8 __reserved1[3];\n+\t\t\tu8 addr_hi;\t/* high 8-bits of 40-bit address */\n+\t\t\tu32 addr_lo;\t/* low 32-bits of 40-bit address */\n+#else\n+\t\t\tu32 addr_lo;\t/* low 32-bits of 40-bit address */\n+\t\t\tu8 addr_hi;\t/* high 8-bits of 40-bit address */\n+\t\t\tu8 __reserved1[3];\n+#endif\n+\t\t};\n+\t\tstruct {\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\t\t\tu64 __notaddress:24;\n+\t\t\tu64 addr:40;\n+#else\n+\t\t\tu64 addr:40;\n+\t\t\tu64 __notaddress:24;\n+#endif\n+\t\t};\n+\t\tu64 opaque;\n+\t};\n+\tunion {\n+\t\tstruct {\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\t\t\tu32 extension:1;\t/* Extension bit */\n+\t\t\tu32 final:1;\t\t/* Final bit */\n+\t\t\tu32 length:30;\n+#else\n+\t\t\tu32 length:30;\n+\t\t\tu32 final:1;\t\t/* Final bit */\n+\t\t\tu32 extension:1;\t/* Extension bit */\n+#endif\n+\t\t};\n+\t\tu32 val;\n+\t};\n+\tu8 __reserved2;\n+\tu8 bpid;\n+\tunion {\n+\t\tstruct {\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\t\t\tu16 __reserved3:3;\n+\t\t\tu16 offset:13;\n+#else\n+\t\t\tu16 offset:13;\n+\t\t\tu16 __reserved3:3;\n+#endif\n+\t\t};\n+\t\tu16 val_off;\n+\t};\n+} __packed;\n+static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg)\n+{\n+\treturn sg->addr;\n+}\n+\n+static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)\n+{\n+\treturn (dma_addr_t)sg->addr;\n+}\n+\n+/* Macro, so we compile better if 'v' isn't always 64-bit */\n+#define qm_sg_entry_set64(sg, v) \\\n+\tdo { \\\n+\t\tstruct qm_sg_entry *__sg931 = (sg); \\\n+\t\t__sg931->addr = v; \\\n+\t} while (0)\n+\n+/* See 1.5.8.1: \"Enqueue Command\" */\n+struct qm_eqcr_entry {\n+\tu8 __dont_write_directly__verb;\n+\tu8 dca;\n+\tu16 seqnum;\n+\tu32 orp;\t/* 24-bit */\n+\tu32 fqid;\t/* 24-bit */\n+\tu32 tag;\n+\tstruct qm_fd fd;\n+\tu8 __reserved3[32];\n+} __packed;\n+\n+\n+/* \"Frame Dequeue Response\" */\n+struct qm_dqrr_entry {\n+\tu8 verb;\n+\tu8 stat;\n+\tu16 seqnum;\t/* 15-bit */\n+\tu8 tok;\n+\tu8 __reserved2[3];\n+\tu32 fqid;\t/* 24-bit */\n+\tu32 contextB;\n+\tstruct qm_fd fd;\n+\tu8 __reserved4[32];\n+};\n+\n+#define QM_DQRR_VERB_VBIT\t\t0x80\n+#define QM_DQRR_VERB_MASK\t\t0x7f\t/* where the verb contains; */\n+#define QM_DQRR_VERB_FRAME_DEQUEUE\t0x60\t/* \"this format\" */\n+#define QM_DQRR_STAT_FQ_EMPTY\t\t0x80\t/* FQ empty */\n+#define QM_DQRR_STAT_FQ_HELDACTIVE\t0x40\t/* FQ held active */\n+#define QM_DQRR_STAT_FQ_FORCEELIGIBLE\t0x20\t/* FQ was force-eligible'd */\n+#define QM_DQRR_STAT_FD_VALID\t\t0x10\t/* has a non-NULL FD */\n+#define QM_DQRR_STAT_UNSCHEDULED\t0x02\t/* Unscheduled dequeue */\n+#define QM_DQRR_STAT_DQCR_EXPIRED\t0x01\t/* VDQCR or PDQCR expired*/\n+\n+\n+/* \"ERN Message Response\" */\n+/* \"FQ State Change Notification\" */\n+struct qm_mr_entry {\n+\tu8 verb;\n+\tunion {\n+\t\tstruct {\n+\t\t\tu8 dca;\n+\t\t\tu16 seqnum;\n+\t\t\tu8 rc;\t\t/* Rejection Code */\n+\t\t\tu32 orp:24;\n+\t\t\tu32 fqid;\t/* 24-bit */\n+\t\t\tu32 tag;\n+\t\t\tstruct qm_fd fd;\n+\t\t} __packed ern;\n+\t\tstruct {\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\t\t\tu8 colour:2;\t/* See QM_MR_DCERN_COLOUR_* */\n+\t\t\tu8 __reserved1:4;\n+\t\t\tenum qm_dc_portal portal:2;\n+#else\n+\t\t\tenum qm_dc_portal portal:3;\n+\t\t\tu8 __reserved1:3;\n+\t\t\tu8 colour:2;\t/* See QM_MR_DCERN_COLOUR_* */\n+#endif\n+\t\t\tu16 __reserved2;\n+\t\t\tu8 rc;\t\t/* Rejection Code */\n+\t\t\tu32 __reserved3:24;\n+\t\t\tu32 fqid;\t/* 24-bit */\n+\t\t\tu32 tag;\n+\t\t\tstruct qm_fd fd;\n+\t\t} __packed dcern;\n+\t\tstruct {\n+\t\t\tu8 fqs;\t\t/* Frame Queue Status */\n+\t\t\tu8 __reserved1[6];\n+\t\t\tu32 fqid;\t/* 24-bit */\n+\t\t\tu32 contextB;\n+\t\t\tu8 __reserved2[16];\n+\t\t} __packed fq;\t\t/* FQRN/FQRNI/FQRL/FQPN */\n+\t};\n+\tu8 __reserved2[32];\n+} __packed;\n+#define QM_MR_VERB_VBIT\t\t\t0x80\n+/*\n+ * ERNs originating from direct-connect portals (\"dcern\") use 0x20 as a verb\n+ * which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished\n+ * from the other MR types by noting if the 0x20 bit is unset.\n+ */\n+#define QM_MR_VERB_TYPE_MASK\t\t0x27\n+#define QM_MR_VERB_DC_ERN\t\t0x20\n+#define QM_MR_VERB_FQRN\t\t\t0x21\n+#define QM_MR_VERB_FQRNI\t\t0x22\n+#define QM_MR_VERB_FQRL\t\t\t0x23\n+#define QM_MR_VERB_FQPN\t\t\t0x24\n+#define QM_MR_RC_MASK\t\t\t0xf0\t/* contains one of; */\n+#define QM_MR_RC_CGR_TAILDROP\t\t0x00\n+#define QM_MR_RC_WRED\t\t\t0x10\n+#define QM_MR_RC_ERROR\t\t\t0x20\n+#define QM_MR_RC_ORPWINDOW_EARLY\t0x30\n+#define QM_MR_RC_ORPWINDOW_LATE\t\t0x40\n+#define QM_MR_RC_FQ_TAILDROP\t\t0x50\n+#define QM_MR_RC_ORPWINDOW_RETIRED\t0x60\n+#define QM_MR_RC_ORP_ZERO\t\t0x70\n+#define QM_MR_FQS_ORLPRESENT\t\t0x02\t/* ORL fragments to come */\n+#define QM_MR_FQS_NOTEMPTY\t\t0x01\t/* FQ has enqueued frames */\n+#define QM_MR_DCERN_COLOUR_GREEN\t0x00\n+#define QM_MR_DCERN_COLOUR_YELLOW\t0x01\n+#define QM_MR_DCERN_COLOUR_RED\t\t0x02\n+#define QM_MR_DCERN_COLOUR_OVERRIDE\t0x03\n+/*\n+ * An identical structure of FQD fields is present in the \"Init FQ\" command and\n+ * the \"Query FQ\" result, it's suctioned out into the \"struct qm_fqd\" type.\n+ * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the\n+ * latter has two inlines to assist with converting to/from the mant+exp\n+ * representation.\n+ */\n+struct qm_fqd_stashing {\n+\t/* See QM_STASHING_EXCL_<...> */\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\tu8 exclusive;\n+\tu8 __reserved1:2;\n+\t/* Numbers of cachelines */\n+\tu8 annotation_cl:2;\n+\tu8 data_cl:2;\n+\tu8 context_cl:2;\n+#else\n+\tu8 context_cl:2;\n+\tu8 data_cl:2;\n+\tu8 annotation_cl:2;\n+\tu8 __reserved1:2;\n+\tu8 exclusive;\n+#endif\n+} __packed;\n+struct qm_fqd_taildrop {\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\tu16 __reserved1:3;\n+\tu16 mant:8;\n+\tu16 exp:5;\n+#else\n+\tu16 exp:5;\n+\tu16 mant:8;\n+\tu16 __reserved1:3;\n+#endif\n+} __packed;\n+struct qm_fqd_oac {\n+\t/* \"Overhead Accounting Control\", see QM_OAC_<...> */\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\tu8 oac:2; /* \"Overhead Accounting Control\" */\n+\tu8 __reserved1:6;\n+#else\n+\tu8 __reserved1:6;\n+\tu8 oac:2; /* \"Overhead Accounting Control\" */\n+#endif\n+\t/* Two's-complement value (-128 to +127) */\n+\tsigned char oal; /* \"Overhead Accounting Length\" */\n+} __packed;\n+struct qm_fqd {\n+\tunion {\n+\t\tu8 orpc;\n+\t\tstruct {\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\t\t\tu8 __reserved1:2;\n+\t\t\tu8 orprws:3;\n+\t\t\tu8 oa:1;\n+\t\t\tu8 olws:2;\n+#else\n+\t\t\tu8 olws:2;\n+\t\t\tu8 oa:1;\n+\t\t\tu8 orprws:3;\n+\t\t\tu8 __reserved1:2;\n+#endif\n+\t\t} __packed;\n+\t};\n+\tu8 cgid;\n+\tu16 fq_ctrl;\t/* See QM_FQCTRL_<...> */\n+\tunion {\n+\t\tu16 dest_wq;\n+\t\tstruct {\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\t\t\tu16 channel:13; /* qm_channel */\n+\t\t\tu16 wq:3;\n+#else\n+\t\t\tu16 wq:3;\n+\t\t\tu16 channel:13; /* qm_channel */\n+#endif\n+\t\t} __packed dest;\n+\t};\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\tu16 __reserved2:1;\n+\tu16 ics_cred:15;\n+#else\n+\tu16 __reserved2:1;\n+\tu16 ics_cred:15;\n+#endif\n+\t/*\n+\t * For \"Initialize Frame Queue\" commands, the write-enable mask\n+\t * determines whether 'td' or 'oac_init' is observed. For query\n+\t * commands, this field is always 'td', and 'oac_query' (below) reflects\n+\t * the Overhead ACcounting values.\n+\t */\n+\tunion {\n+\t\tuint16_t opaque_td;\n+\t\tstruct qm_fqd_taildrop td;\n+\t\tstruct qm_fqd_oac oac_init;\n+\t};\n+\tu32 context_b;\n+\tunion {\n+\t\t/* Treat it as 64-bit opaque */\n+\t\tu64 opaque;\n+\t\tstruct {\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\t\t\tu32 hi;\n+\t\t\tu32 lo;\n+#else\n+\t\t\tu32 lo;\n+\t\t\tu32 hi;\n+#endif\n+\t\t};\n+\t\t/* Treat it as s/w portal stashing config */\n+\t\t/* see \"FQD Context_A field used for [...]\" */\n+\t\tstruct {\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\t\t\tstruct qm_fqd_stashing stashing;\n+\t\t\t/*\n+\t\t\t * 48-bit address of FQ context to\n+\t\t\t * stash, must be cacheline-aligned\n+\t\t\t */\n+\t\t\tu16 context_hi;\n+\t\t\tu32 context_lo;\n+#else\n+\t\t\tu32 context_lo;\n+\t\t\tu16 context_hi;\n+\t\t\tstruct qm_fqd_stashing stashing;\n+#endif\n+\t\t} __packed;\n+\t} context_a;\n+\tstruct qm_fqd_oac oac_query;\n+} __packed;\n+/* 64-bit converters for context_hi/lo */\n+static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)\n+{\n+\treturn ((u64)fqd->context_a.context_hi << 32) |\n+\t\t(u64)fqd->context_a.context_lo;\n+}\n+\n+static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)\n+{\n+\treturn (dma_addr_t)qm_fqd_stashing_get64(fqd);\n+}\n+\n+static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)\n+{\n+\treturn ((u64)fqd->context_a.hi << 32) |\n+\t\t(u64)fqd->context_a.lo;\n+}\n+\n+static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)\n+{\n+\t\tfqd->context_a.context_hi = upper_32_bits(addr);\n+\t\tfqd->context_a.context_lo = lower_32_bits(addr);\n+}\n+\n+static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)\n+{\n+\tfqd->context_a.hi = upper_32_bits(addr);\n+\tfqd->context_a.lo = lower_32_bits(addr);\n+}\n+\n+/* convert a threshold value into mant+exp representation */\n+static inline int qm_fqd_taildrop_set(struct qm_fqd_taildrop *td, u32 val,\n+\t\t\t\t      int roundup)\n+{\n+\tu32 e = 0;\n+\tint oddbit = 0;\n+\n+\tif (val > 0xe0000000)\n+\t\treturn -ERANGE;\n+\twhile (val > 0xff) {\n+\t\toddbit = val & 1;\n+\t\tval >>= 1;\n+\t\te++;\n+\t\tif (roundup && oddbit)\n+\t\t\tval++;\n+\t}\n+\ttd->exp = e;\n+\ttd->mant = val;\n+\treturn 0;\n+}\n+\n+/* and the other direction */\n+static inline u32 qm_fqd_taildrop_get(const struct qm_fqd_taildrop *td)\n+{\n+\treturn (u32)td->mant << td->exp;\n+}\n+\n+\n+/* See \"Frame Queue Descriptor (FQD)\" */\n+/* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */\n+#define QM_FQCTRL_MASK\t\t0x07ff\t/* 'fq_ctrl' flags; */\n+#define QM_FQCTRL_CGE\t\t0x0400\t/* Congestion Group Enable */\n+#define QM_FQCTRL_TDE\t\t0x0200\t/* Tail-Drop Enable */\n+#define QM_FQCTRL_ORP\t\t0x0100\t/* ORP Enable */\n+#define QM_FQCTRL_CTXASTASHING\t0x0080\t/* Context-A stashing */\n+#define QM_FQCTRL_CPCSTASH\t0x0040\t/* CPC Stash Enable */\n+#define QM_FQCTRL_FORCESFDR\t0x0008\t/* High-priority SFDRs */\n+#define QM_FQCTRL_AVOIDBLOCK\t0x0004\t/* Don't block active */\n+#define QM_FQCTRL_HOLDACTIVE\t0x0002\t/* Hold active in portal */\n+#define QM_FQCTRL_PREFERINCACHE\t0x0001\t/* Aggressively cache FQD */\n+#define QM_FQCTRL_LOCKINCACHE\tQM_FQCTRL_PREFERINCACHE /* older naming */\n+\n+/* See \"FQD Context_A field used for [...] */\n+/* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */\n+#define QM_STASHING_EXCL_ANNOTATION\t0x04\n+#define QM_STASHING_EXCL_DATA\t\t0x02\n+#define QM_STASHING_EXCL_CTX\t\t0x01\n+\n+/* See \"Intra Class Scheduling\" */\n+/* FQD field 'OAC' (Overhead ACcounting) uses these constants */\n+#define QM_OAC_ICS\t\t0x2 /* Accounting for Intra-Class Scheduling */\n+#define QM_OAC_CG\t\t0x1 /* Accounting for Congestion Groups */\n+\n+/*\n+ * This struct represents the 32-bit \"WR_PARM_[GYR]\" parameters in CGR fields\n+ * and associated commands/responses. The WRED parameters are calculated from\n+ * these fields as follows;\n+ *   MaxTH = MA * (2 ^ Mn)\n+ *   Slope = SA / (2 ^ Sn)\n+ *    MaxP = 4 * (Pn + 1)\n+ */\n+struct qm_cgr_wr_parm {\n+\tunion {\n+\t\tu32 word;\n+\t\tstruct {\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\t\t\tu32 MA:8;\n+\t\t\tu32 Mn:5;\n+\t\t\tu32 SA:7; /* must be between 64-127 */\n+\t\t\tu32 Sn:6;\n+\t\t\tu32 Pn:6;\n+#else\n+\t\t\tu32 Pn:6;\n+\t\t\tu32 Sn:6;\n+\t\t\tu32 SA:7; /* must be between 64-127 */\n+\t\t\tu32 Mn:5;\n+\t\t\tu32 MA:8;\n+#endif\n+\t\t} __packed;\n+\t};\n+} __packed;\n+/*\n+ * This struct represents the 13-bit \"CS_THRES\" CGR field. In the corresponding\n+ * management commands, this is padded to a 16-bit structure field, so that's\n+ * how we represent it here. The congestion state threshold is calculated from\n+ * these fields as follows;\n+ *   CS threshold = TA * (2 ^ Tn)\n+ */\n+struct qm_cgr_cs_thres {\n+\tunion {\n+\t\tu16 hword;\n+\t\tstruct {\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\t\t\tu16 __reserved:3;\n+\t\t\tu16 TA:8;\n+\t\t\tu16 Tn:5;\n+#else\n+\t\t\tu16 Tn:5;\n+\t\t\tu16 TA:8;\n+\t\t\tu16 __reserved:3;\n+#endif\n+\t\t} __packed;\n+\t};\n+} __packed;\n+/*\n+ * This identical structure of CGR fields is present in the \"Init/Modify CGR\"\n+ * commands and the \"Query CGR\" result. It's suctioned out here into its own\n+ * struct.\n+ */\n+struct __qm_mc_cgr {\n+\tstruct qm_cgr_wr_parm wr_parm_g;\n+\tstruct qm_cgr_wr_parm wr_parm_y;\n+\tstruct qm_cgr_wr_parm wr_parm_r;\n+\tu8 wr_en_g;\t/* boolean, use QM_CGR_EN */\n+\tu8 wr_en_y;\t/* boolean, use QM_CGR_EN */\n+\tu8 wr_en_r;\t/* boolean, use QM_CGR_EN */\n+\tu8 cscn_en;\t/* boolean, use QM_CGR_EN */\n+\tunion {\n+\t\tstruct {\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\t\t\tu16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */\n+\t\t\tu16 cscn_targ_dcp_low;  /* CSCN_TARG_DCP low-16bits */\n+#else\n+\t\t\tu16 cscn_targ_dcp_low;  /* CSCN_TARG_DCP low-16bits */\n+\t\t\tu16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */\n+#endif\n+\t\t};\n+\t\tu32 cscn_targ;\t/* use QM_CGR_TARG_* */\n+\t};\n+\tu8 cstd_en;\t/* boolean, use QM_CGR_EN */\n+\tu8 cs;\t\t/* boolean, only used in query response */\n+\tunion {\n+\t\tstruct qm_cgr_cs_thres cs_thres;\n+\t\t/* use qm_cgr_cs_thres_set64() */\n+\t\tu16 __cs_thres;\n+\t};\n+\tu8 mode;\t/* QMAN_CGR_MODE_FRAME not supported in rev1.0 */\n+} __packed;\n+#define QM_CGR_EN\t\t0x01 /* For wr_en_*, cscn_en, cstd_en */\n+#define QM_CGR_TARG_UDP_CTRL_WRITE_BIT\t0x8000 /* value written to portal bit*/\n+#define QM_CGR_TARG_UDP_CTRL_DCP\t0x4000 /* 0: SWP, 1: DCP */\n+#define QM_CGR_TARG_PORTAL(n)\t(0x80000000 >> (n)) /* s/w portal, 0-9 */\n+#define QM_CGR_TARG_FMAN0\t0x00200000 /* direct-connect portal: fman0 */\n+#define QM_CGR_TARG_FMAN1\t0x00100000 /*\t\t\t   : fman1 */\n+/* Convert CGR thresholds to/from \"cs_thres\" format */\n+static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)\n+{\n+\treturn (u64)th->TA << th->Tn;\n+}\n+\n+static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,\n+\t\t\t\t\tint roundup)\n+{\n+\tu32 e = 0;\n+\tint oddbit = 0;\n+\n+\twhile (val > 0xff) {\n+\t\toddbit = val & 1;\n+\t\tval >>= 1;\n+\t\te++;\n+\t\tif (roundup && oddbit)\n+\t\t\tval++;\n+\t}\n+\tth->Tn = e;\n+\tth->TA = val;\n+\treturn 0;\n+}\n+\n+/* See 1.5.8.5.1: \"Initialize FQ\" */\n+/* See 1.5.8.5.2: \"Query FQ\" */\n+/* See 1.5.8.5.3: \"Query FQ Non-Programmable Fields\" */\n+/* See 1.5.8.5.4: \"Alter FQ State Commands \" */\n+/* See 1.5.8.6.1: \"Initialize/Modify CGR\" */\n+/* See 1.5.8.6.2: \"CGR Test Write\" */\n+/* See 1.5.8.6.3: \"Query CGR\" */\n+/* See 1.5.8.6.4: \"Query Congestion Group State\" */\n+struct qm_mcc_initfq {\n+\tu8 __reserved1;\n+\tu16 we_mask;\t/* Write Enable Mask */\n+\tu32 fqid;\t/* 24-bit */\n+\tu16 count;\t/* Initialises 'count+1' FQDs */\n+\tstruct qm_fqd fqd; /* the FQD fields go here */\n+\tu8 __reserved3[30];\n+} __packed;\n+struct qm_mcc_queryfq {\n+\tu8 __reserved1[3];\n+\tu32 fqid;\t/* 24-bit */\n+\tu8 __reserved2[56];\n+} __packed;\n+struct qm_mcc_queryfq_np {\n+\tu8 __reserved1[3];\n+\tu32 fqid;\t/* 24-bit */\n+\tu8 __reserved2[56];\n+} __packed;\n+struct qm_mcc_alterfq {\n+\tu8 __reserved1[3];\n+\tu32 fqid;\t/* 24-bit */\n+\tu8 __reserved2;\n+\tu8 count;\t/* number of consecutive FQID */\n+\tu8 __reserved3[10];\n+\tu32 context_b;\t/* frame queue context b */\n+\tu8 __reserved4[40];\n+} __packed;\n+struct qm_mcc_initcgr {\n+\tu8 __reserved1;\n+\tu16 we_mask;\t/* Write Enable Mask */\n+\tstruct __qm_mc_cgr cgr;\t/* CGR fields */\n+\tu8 __reserved2[2];\n+\tu8 cgid;\n+\tu8 __reserved4[32];\n+} __packed;\n+struct qm_mcc_cgrtestwrite {\n+\tu8 __reserved1[2];\n+\tu8 i_bcnt_hi:8;/* high 8-bits of 40-bit \"Instant\" */\n+\tu32 i_bcnt_lo;\t/* low 32-bits of 40-bit */\n+\tu8 __reserved2[23];\n+\tu8 cgid;\n+\tu8 __reserved3[32];\n+} __packed;\n+struct qm_mcc_querycgr {\n+\tu8 __reserved1[30];\n+\tu8 cgid;\n+\tu8 __reserved2[32];\n+} __packed;\n+struct qm_mcc_querycongestion {\n+\tu8 __reserved[63];\n+} __packed;\n+struct qm_mcc_querywq {\n+\tu8 __reserved;\n+\t/* select channel if verb != QUERYWQ_DEDICATED */\n+\tunion {\n+\t\tu16 channel_wq; /* ignores wq (3 lsbits) */\n+\t\tstruct {\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\t\t\tu16 id:13; /* qm_channel */\n+\t\t\tu16 __reserved1:3;\n+#else\n+\t\t\tu16 __reserved1:3;\n+\t\t\tu16 id:13; /* qm_channel */\n+#endif\n+\t\t} __packed channel;\n+\t};\n+\tu8 __reserved2[60];\n+} __packed;\n+\n+struct qm_mc_command {\n+\tu8 __dont_write_directly__verb;\n+\tunion {\n+\t\tstruct qm_mcc_initfq initfq;\n+\t\tstruct qm_mcc_queryfq queryfq;\n+\t\tstruct qm_mcc_queryfq_np queryfq_np;\n+\t\tstruct qm_mcc_alterfq alterfq;\n+\t\tstruct qm_mcc_initcgr initcgr;\n+\t\tstruct qm_mcc_cgrtestwrite cgrtestwrite;\n+\t\tstruct qm_mcc_querycgr querycgr;\n+\t\tstruct qm_mcc_querycongestion querycongestion;\n+\t\tstruct qm_mcc_querywq querywq;\n+\t};\n+} __packed;\n+\n+/* INITFQ-specific flags */\n+#define QM_INITFQ_WE_MASK\t\t0x01ff\t/* 'Write Enable' flags; */\n+#define QM_INITFQ_WE_OAC\t\t0x0100\n+#define QM_INITFQ_WE_ORPC\t\t0x0080\n+#define QM_INITFQ_WE_CGID\t\t0x0040\n+#define QM_INITFQ_WE_FQCTRL\t\t0x0020\n+#define QM_INITFQ_WE_DESTWQ\t\t0x0010\n+#define QM_INITFQ_WE_ICSCRED\t\t0x0008\n+#define QM_INITFQ_WE_TDTHRESH\t\t0x0004\n+#define QM_INITFQ_WE_CONTEXTB\t\t0x0002\n+#define QM_INITFQ_WE_CONTEXTA\t\t0x0001\n+/* INITCGR/MODIFYCGR-specific flags */\n+#define QM_CGR_WE_MASK\t\t\t0x07ff\t/* 'Write Enable Mask'; */\n+#define QM_CGR_WE_WR_PARM_G\t\t0x0400\n+#define QM_CGR_WE_WR_PARM_Y\t\t0x0200\n+#define QM_CGR_WE_WR_PARM_R\t\t0x0100\n+#define QM_CGR_WE_WR_EN_G\t\t0x0080\n+#define QM_CGR_WE_WR_EN_Y\t\t0x0040\n+#define QM_CGR_WE_WR_EN_R\t\t0x0020\n+#define QM_CGR_WE_CSCN_EN\t\t0x0010\n+#define QM_CGR_WE_CSCN_TARG\t\t0x0008\n+#define QM_CGR_WE_CSTD_EN\t\t0x0004\n+#define QM_CGR_WE_CS_THRES\t\t0x0002\n+#define QM_CGR_WE_MODE\t\t\t0x0001\n+\n+struct qm_mcr_initfq {\n+\tu8 __reserved1[62];\n+} __packed;\n+struct qm_mcr_queryfq {\n+\tu8 __reserved1[8];\n+\tstruct qm_fqd fqd;\t/* the FQD fields are here */\n+\tu8 __reserved2[30];\n+} __packed;\n+struct qm_mcr_queryfq_np {\n+\tu8 __reserved1;\n+\tu8 state;\t/* QM_MCR_NP_STATE_*** */\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\tu8 __reserved2;\n+\tu32 fqd_link:24;\n+\tu16 __reserved3:2;\n+\tu16 odp_seq:14;\n+\tu16 __reserved4:2;\n+\tu16 orp_nesn:14;\n+\tu16 __reserved5:1;\n+\tu16 orp_ea_hseq:15;\n+\tu16 __reserved6:1;\n+\tu16 orp_ea_tseq:15;\n+\tu8 __reserved7;\n+\tu32 orp_ea_hptr:24;\n+\tu8 __reserved8;\n+\tu32 orp_ea_tptr:24;\n+\tu8 __reserved9;\n+\tu32 pfdr_hptr:24;\n+\tu8 __reserved10;\n+\tu32 pfdr_tptr:24;\n+\tu8 __reserved11[5];\n+\tu8 __reserved12:7;\n+\tu8 is:1;\n+\tu16 ics_surp;\n+\tu32 byte_cnt;\n+\tu8 __reserved13;\n+\tu32 frm_cnt:24;\n+\tu32 __reserved14;\n+\tu16 ra1_sfdr;\t/* QM_MCR_NP_RA1_*** */\n+\tu16 ra2_sfdr;\t/* QM_MCR_NP_RA2_*** */\n+\tu16 __reserved15;\n+\tu16 od1_sfdr;\t/* QM_MCR_NP_OD1_*** */\n+\tu16 od2_sfdr;\t/* QM_MCR_NP_OD2_*** */\n+\tu16 od3_sfdr;\t/* QM_MCR_NP_OD3_*** */\n+#else\n+\tu8 __reserved2;\n+\tu32 fqd_link:24;\n+\n+\tu16 odp_seq:14;\n+\tu16 __reserved3:2;\n+\n+\tu16 orp_nesn:14;\n+\tu16 __reserved4:2;\n+\n+\tu16 orp_ea_hseq:15;\n+\tu16 __reserved5:1;\n+\n+\tu16 orp_ea_tseq:15;\n+\tu16 __reserved6:1;\n+\n+\tu8 __reserved7;\n+\tu32 orp_ea_hptr:24;\n+\n+\tu8 __reserved8;\n+\tu32 orp_ea_tptr:24;\n+\n+\tu8 __reserved9;\n+\tu32 pfdr_hptr:24;\n+\n+\tu8 __reserved10;\n+\tu32 pfdr_tptr:24;\n+\n+\tu8 __reserved11[5];\n+\tu8 is:1;\n+\tu8 __reserved12:7;\n+\tu16 ics_surp;\n+\tu32 byte_cnt;\n+\tu8 __reserved13;\n+\tu32 frm_cnt:24;\n+\tu32 __reserved14;\n+\tu16 ra1_sfdr;\t/* QM_MCR_NP_RA1_*** */\n+\tu16 ra2_sfdr;\t/* QM_MCR_NP_RA2_*** */\n+\tu16 __reserved15;\n+\tu16 od1_sfdr;\t/* QM_MCR_NP_OD1_*** */\n+\tu16 od2_sfdr;\t/* QM_MCR_NP_OD2_*** */\n+\tu16 od3_sfdr;\t/* QM_MCR_NP_OD3_*** */\n+#endif\n+} __packed;\n+\n+struct qm_mcr_alterfq {\n+\tu8 fqs;\t\t/* Frame Queue Status */\n+\tu8 __reserved1[61];\n+} __packed;\n+struct qm_mcr_initcgr {\n+\tu8 __reserved1[62];\n+} __packed;\n+struct qm_mcr_cgrtestwrite {\n+\tu16 __reserved1;\n+\tstruct __qm_mc_cgr cgr; /* CGR fields */\n+\tu8 __reserved2[3];\n+\tu32 __reserved3:24;\n+\tu32 i_bcnt_hi:8;/* high 8-bits of 40-bit \"Instant\" */\n+\tu32 i_bcnt_lo;\t/* low 32-bits of 40-bit */\n+\tu32 __reserved4:24;\n+\tu32 a_bcnt_hi:8;/* high 8-bits of 40-bit \"Average\" */\n+\tu32 a_bcnt_lo;\t/* low 32-bits of 40-bit */\n+\tu16 lgt;\t/* Last Group Tick */\n+\tu16 wr_prob_g;\n+\tu16 wr_prob_y;\n+\tu16 wr_prob_r;\n+\tu8 __reserved5[8];\n+} __packed;\n+struct qm_mcr_querycgr {\n+\tu16 __reserved1;\n+\tstruct __qm_mc_cgr cgr; /* CGR fields */\n+\tu8 __reserved2[3];\n+\tunion {\n+\t\tstruct {\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\t\t\tu32 __reserved3:24;\n+\t\t\tu32 i_bcnt_hi:8;/* high 8-bits of 40-bit \"Instant\" */\n+\t\t\tu32 i_bcnt_lo;\t/* low 32-bits of 40-bit */\n+#else\n+\t\t\tu32 i_bcnt_lo;\t/* low 32-bits of 40-bit */\n+\t\t\tu32 i_bcnt_hi:8;/* high 8-bits of 40-bit \"Instant\" */\n+\t\t\tu32 __reserved3:24;\n+#endif\n+\t\t};\n+\t\tu64 i_bcnt;\n+\t};\n+\tunion {\n+\t\tstruct {\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\t\t\tu32 __reserved4:24;\n+\t\t\tu32 a_bcnt_hi:8;/* high 8-bits of 40-bit \"Average\" */\n+\t\t\tu32 a_bcnt_lo;\t/* low 32-bits of 40-bit */\n+#else\n+\t\t\tu32 a_bcnt_lo;\t/* low 32-bits of 40-bit */\n+\t\t\tu32 a_bcnt_hi:8;/* high 8-bits of 40-bit \"Average\" */\n+\t\t\tu32 __reserved4:24;\n+#endif\n+\t\t};\n+\t\tu64 a_bcnt;\n+\t};\n+\tunion {\n+\t\tu32 cscn_targ_swp[4];\n+\t\tu8 __reserved5[16];\n+\t};\n+} __packed;\n+\n+struct __qm_mcr_querycongestion {\n+\tu32 state[8];\n+};\n+\n+struct qm_mcr_querycongestion {\n+\tu8 __reserved[30];\n+\t/* Access this struct using QM_MCR_QUERYCONGESTION() */\n+\tstruct __qm_mcr_querycongestion state;\n+} __packed;\n+struct qm_mcr_querywq {\n+\tunion {\n+\t\tu16 channel_wq; /* ignores wq (3 lsbits) */\n+\t\tstruct {\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\t\t\tu16 id:13; /* qm_channel */\n+\t\t\tu16 __reserved:3;\n+#else\n+\t\t\tu16 __reserved:3;\n+\t\t\tu16 id:13; /* qm_channel */\n+#endif\n+\t\t} __packed channel;\n+\t};\n+\tu8 __reserved[28];\n+\tu32 wq_len[8];\n+} __packed;\n+\n+struct qm_mc_result {\n+\tu8 verb;\n+\tu8 result;\n+\tunion {\n+\t\tstruct qm_mcr_initfq initfq;\n+\t\tstruct qm_mcr_queryfq queryfq;\n+\t\tstruct qm_mcr_queryfq_np queryfq_np;\n+\t\tstruct qm_mcr_alterfq alterfq;\n+\t\tstruct qm_mcr_initcgr initcgr;\n+\t\tstruct qm_mcr_cgrtestwrite cgrtestwrite;\n+\t\tstruct qm_mcr_querycgr querycgr;\n+\t\tstruct qm_mcr_querycongestion querycongestion;\n+\t\tstruct qm_mcr_querywq querywq;\n+\t};\n+} __packed;\n+\n+#define QM_MCR_VERB_RRID\t\t0x80\n+#define QM_MCR_VERB_MASK\t\tQM_MCC_VERB_MASK\n+#define QM_MCR_VERB_INITFQ_PARKED\tQM_MCC_VERB_INITFQ_PARKED\n+#define QM_MCR_VERB_INITFQ_SCHED\tQM_MCC_VERB_INITFQ_SCHED\n+#define QM_MCR_VERB_QUERYFQ\t\tQM_MCC_VERB_QUERYFQ\n+#define QM_MCR_VERB_QUERYFQ_NP\t\tQM_MCC_VERB_QUERYFQ_NP\n+#define QM_MCR_VERB_QUERYWQ\t\tQM_MCC_VERB_QUERYWQ\n+#define QM_MCR_VERB_QUERYWQ_DEDICATED\tQM_MCC_VERB_QUERYWQ_DEDICATED\n+#define QM_MCR_VERB_ALTER_SCHED\t\tQM_MCC_VERB_ALTER_SCHED\n+#define QM_MCR_VERB_ALTER_FE\t\tQM_MCC_VERB_ALTER_FE\n+#define QM_MCR_VERB_ALTER_RETIRE\tQM_MCC_VERB_ALTER_RETIRE\n+#define QM_MCR_VERB_ALTER_OOS\t\tQM_MCC_VERB_ALTER_OOS\n+#define QM_MCR_RESULT_NULL\t\t0x00\n+#define QM_MCR_RESULT_OK\t\t0xf0\n+#define QM_MCR_RESULT_ERR_FQID\t\t0xf1\n+#define QM_MCR_RESULT_ERR_FQSTATE\t0xf2\n+#define QM_MCR_RESULT_ERR_NOTEMPTY\t0xf3\t/* OOS fails if FQ is !empty */\n+#define QM_MCR_RESULT_ERR_BADCHANNEL\t0xf4\n+#define QM_MCR_RESULT_PENDING\t\t0xf8\n+#define QM_MCR_RESULT_ERR_BADCOMMAND\t0xff\n+#define QM_MCR_NP_STATE_FE\t\t0x10\n+#define QM_MCR_NP_STATE_R\t\t0x08\n+#define QM_MCR_NP_STATE_MASK\t\t0x07\t/* Reads FQD::STATE; */\n+#define QM_MCR_NP_STATE_OOS\t\t0x00\n+#define QM_MCR_NP_STATE_RETIRED\t\t0x01\n+#define QM_MCR_NP_STATE_TEN_SCHED\t0x02\n+#define QM_MCR_NP_STATE_TRU_SCHED\t0x03\n+#define QM_MCR_NP_STATE_PARKED\t\t0x04\n+#define QM_MCR_NP_STATE_ACTIVE\t\t0x05\n+#define QM_MCR_NP_PTR_MASK\t\t0x07ff\t/* for RA[12] & OD[123] */\n+#define QM_MCR_NP_RA1_NRA(v)\t\t(((v) >> 14) & 0x3)\t/* FQD::NRA */\n+#define QM_MCR_NP_RA2_IT(v)\t\t(((v) >> 14) & 0x1)\t/* FQD::IT */\n+#define QM_MCR_NP_OD1_NOD(v)\t\t(((v) >> 14) & 0x3)\t/* FQD::NOD */\n+#define QM_MCR_NP_OD3_NPC(v)\t\t(((v) >> 14) & 0x3)\t/* FQD::NPC */\n+#define QM_MCR_FQS_ORLPRESENT\t\t0x02\t/* ORL fragments to come */\n+#define QM_MCR_FQS_NOTEMPTY\t\t0x01\t/* FQ has enqueued frames */\n+/* This extracts the state for congestion group 'n' from a query response.\n+ * Eg.\n+ *   u8 cgr = [...];\n+ *   struct qm_mc_result *res = [...];\n+ *   printf(\"congestion group %d congestion state: %d\\n\", cgr,\n+ *       QM_MCR_QUERYCONGESTION(&res->querycongestion.state, cgr));\n+ */\n+#define __CGR_WORD(num)\t\t(num >> 5)\n+#define __CGR_SHIFT(num)\t(num & 0x1f)\n+#define __CGR_NUM\t\t(sizeof(struct __qm_mcr_querycongestion) << 3)\n+static inline int QM_MCR_QUERYCONGESTION(struct __qm_mcr_querycongestion *p,\n+\t\t\t\t\t u8 cgr)\n+{\n+\treturn p->state[__CGR_WORD(cgr)] & (0x80000000 >> __CGR_SHIFT(cgr));\n+}\n+\n+\t/* Portal and Frame Queues */\n+/* Represents a managed portal */\n+struct qman_portal;\n+\n+/*\n+ * This object type represents QMan frame queue descriptors (FQD), it is\n+ * cacheline-aligned, and initialised by qman_create_fq(). The structure is\n+ * defined further down.\n+ */\n+struct qman_fq;\n+\n+/*\n+ * This object type represents a QMan congestion group, it is defined further\n+ * down.\n+ */\n+struct qman_cgr;\n+\n+/*\n+ * This enum, and the callback type that returns it, are used when handling\n+ * dequeued frames via DQRR. Note that for \"null\" callbacks registered with the\n+ * portal object (for handling dequeues that do not demux because context_b is\n+ * NULL), the return value *MUST* be qman_cb_dqrr_consume.\n+ */\n+enum qman_cb_dqrr_result {\n+\t/* DQRR entry can be consumed */\n+\tqman_cb_dqrr_consume,\n+\t/* Like _consume, but requests parking - FQ must be held-active */\n+\tqman_cb_dqrr_park,\n+\t/* Does not consume, for DCA mode only. This allows out-of-order\n+\t * consumes by explicit calls to qman_dca() and/or the use of implicit\n+\t * DCA via EQCR entries.\n+\t */\n+\tqman_cb_dqrr_defer,\n+\t/*\n+\t * Stop processing without consuming this ring entry. Exits the current\n+\t * qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within\n+\t * an interrupt handler, the callback would typically call\n+\t * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,\n+\t * otherwise the interrupt will reassert immediately.\n+\t */\n+\tqman_cb_dqrr_stop,\n+\t/* Like qman_cb_dqrr_stop, but consumes the current entry. */\n+\tqman_cb_dqrr_consume_stop\n+};\n+\n+typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,\n+\t\t\t\t\tstruct qman_fq *fq,\n+\t\t\t\t\tconst struct qm_dqrr_entry *dqrr);\n+\n+/*\n+ * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They\n+ * are always consumed after the callback returns.\n+ */\n+typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,\n+\t\t\t\tconst struct qm_mr_entry *msg);\n+\n+/* This callback type is used when handling DCP ERNs */\n+typedef void (*qman_cb_dc_ern)(struct qman_portal *qm,\n+\t\t\t\tconst struct qm_mr_entry *msg);\n+/*\n+ * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +\n+ * held-active + held-suspended are just \"sched\". Things like \"retired\" will not\n+ * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until\n+ * then, to indicate it's completing and to gate attempts to retry the retire\n+ * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's\n+ * technically impossible in the case of enqueue DCAs (which refer to DQRR ring\n+ * index rather than the FQ that ring entry corresponds to), so repeated park\n+ * commands are allowed (if you're silly enough to try) but won't change FQ\n+ * state, and the resulting park notifications move FQs from \"sched\" to\n+ * \"parked\".\n+ */\n+enum qman_fq_state {\n+\tqman_fq_state_oos,\n+\tqman_fq_state_parked,\n+\tqman_fq_state_sched,\n+\tqman_fq_state_retired\n+};\n+\n+\n+/*\n+ * Frame queue objects (struct qman_fq) are stored within memory passed to\n+ * qman_create_fq(), as this allows stashing of caller-provided demux callback\n+ * pointers at no extra cost to stashing of (driver-internal) FQ state. If the\n+ * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,\n+ * they should;\n+ *\n+ * (a) extend the qman_fq structure with their state; eg.\n+ *\n+ *     // myfq is allocated and driver_fq callbacks filled in;\n+ *     struct my_fq {\n+ *\t   struct qman_fq base;\n+ *\t   int an_extra_field;\n+ *\t   [ ... add other fields to be associated with each FQ ...]\n+ *     } *myfq = some_my_fq_allocator();\n+ *     struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);\n+ *\n+ *     // in a dequeue callback, access extra fields from 'fq' via a cast;\n+ *     struct my_fq *myfq = (struct my_fq *)fq;\n+ *     do_something_with(myfq->an_extra_field);\n+ *     [...]\n+ *\n+ * (b) when and if configuring the FQ for context stashing, specify how ever\n+ *     many cachelines are required to stash 'struct my_fq', to accelerate not\n+ *     only the QMan driver but the callback as well.\n+ */\n+\n+struct qman_fq_cb {\n+\tqman_cb_dqrr dqrr;\t/* for dequeued frames */\n+\tqman_cb_mr ern;\t\t/* for s/w ERNs */\n+\tqman_cb_mr fqs;\t\t/* frame-queue state changes*/\n+};\n+\n+struct qman_fq {\n+\t/* Caller of qman_create_fq() provides these demux callbacks */\n+\tstruct qman_fq_cb cb;\n+\t/*\n+\t * These are internal to the driver, don't touch. In particular, they\n+\t * may change, be removed, or extended (so you shouldn't rely on\n+\t * sizeof(qman_fq) being a constant).\n+\t */\n+\tspinlock_t fqlock;\n+\tu32 fqid;\n+\t/* DPDK Interface */\n+\tvoid *dpaa_intf;\n+\n+\tvolatile unsigned long flags;\n+\tenum qman_fq_state state;\n+\tint cgr_groupid;\n+\tstruct rb_node node;\n+};\n+\n+/*\n+ * This callback type is used when handling congestion group entry/exit.\n+ * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.\n+ */\n+typedef void (*qman_cb_cgr)(struct qman_portal *qm,\n+\t\t\t    struct qman_cgr *cgr, int congested);\n+\n+struct qman_cgr {\n+\t/* Set these prior to qman_create_cgr() */\n+\tu32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/\n+\tqman_cb_cgr cb;\n+\t/* These are private to the driver */\n+\tu16 chan; /* portal channel this object is created on */\n+\tstruct list_head node;\n+};\n+\n+\n+#ifdef __cplusplus\n+}\n+#endif\n+\n+#endif /* __FSL_QMAN_H */\ndiff --git a/drivers/bus/dpaa/include/fsl_usd.h b/drivers/bus/dpaa/include/fsl_usd.h\nindex 4ff48c6..b0d953f 100644\n--- a/drivers/bus/dpaa/include/fsl_usd.h\n+++ b/drivers/bus/dpaa/include/fsl_usd.h\n@@ -47,6 +47,10 @@\n extern \"C\" {\n #endif\n \n+/* Thread-entry/exit hooks; */\n+int qman_thread_init(void);\n+int qman_thread_finish(void);\n+\n #define QBMAN_ANY_PORTAL_IDX 0xffffffff\n \n /* Obtain and free raw (unitialized) portals */\n@@ -81,6 +85,15 @@ int qman_free_raw_portal(struct dpaa_raw_portal *portal);\n int bman_allocate_raw_portal(struct dpaa_raw_portal *portal);\n int bman_free_raw_portal(struct dpaa_raw_portal *portal);\n \n+/* Post-process interrupts. NB, the kernel IRQ handler disables the interrupt\n+ * line before notifying us, and this post-processing re-enables it once\n+ * processing is complete. As such, it is essential to call this before going\n+ * into another blocking read/select/poll.\n+ */\n+void qman_thread_irq(void);\n+\n+/* Global setup */\n+int qman_global_init(void);\n #ifdef __cplusplus\n }\n #endif\n",
    "prefixes": [
        "dpdk-dev",
        "v3",
        "10/40"
    ]
}