get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/18164/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 18164,
    "url": "http://patches.dpdk.org/api/patches/18164/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1482180853-18823-5-git-send-email-hemant.agrawal@nxp.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1482180853-18823-5-git-send-email-hemant.agrawal@nxp.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1482180853-18823-5-git-send-email-hemant.agrawal@nxp.com",
    "date": "2016-12-19T20:53:43",
    "name": "[dpdk-dev,PATCHv2,04/34] drivers/common/dpaa2: adding qbman driver",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "3a44d35a8837019392fe0bd4963e0facfe9a01f4",
    "submitter": {
        "id": 477,
        "url": "http://patches.dpdk.org/api/people/477/?format=api",
        "name": "Hemant Agrawal",
        "email": "hemant.agrawal@nxp.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1482180853-18823-5-git-send-email-hemant.agrawal@nxp.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/18164/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/18164/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id 66A6BF98A;\n\tMon, 19 Dec 2016 16:21:13 +0100 (CET)",
            "from NAM02-CY1-obe.outbound.protection.outlook.com\n\t(mail-cys01nam02on0080.outbound.protection.outlook.com\n\t[104.47.37.80]) by dpdk.org (Postfix) with ESMTP id 9EFB0F96C\n\tfor <dev@dpdk.org>; Mon, 19 Dec 2016 16:20:47 +0100 (CET)",
            "from CY1PR03CA0042.namprd03.prod.outlook.com (10.174.128.52) by\n\tDM5PR03MB2473.namprd03.prod.outlook.com (10.168.233.19) with\n\tMicrosoft SMTP Server (version=TLS1_2,\n\tcipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384_P384) id\n\t15.1.789.14; Mon, 19 Dec 2016 15:20:45 +0000",
            "from BL2FFO11FD012.protection.gbl (2a01:111:f400:7c09::132) by\n\tCY1PR03CA0042.outlook.office365.com (2603:10b6:600::52) with\n\tMicrosoft SMTP Server (version=TLS1_2,\n\tcipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384_P384) id\n\t15.1.789.14 via Frontend Transport; Mon, 19 Dec 2016 15:20:45 +0000",
            "from az84smr01.freescale.net (192.88.158.2) by\n\tBL2FFO11FD012.mail.protection.outlook.com (10.173.161.18) with\n\tMicrosoft SMTP Server (version=TLS1_0,\n\tcipher=TLS_RSA_WITH_AES_256_CBC_SHA) id 15.1.789.10\n\tvia Frontend Transport; Mon, 19 Dec 2016 15:20:44 +0000",
            "from bf-netperf1.idc ([10.232.134.28])\n\tby az84smr01.freescale.net (8.14.3/8.14.0) with ESMTP id\n\tuBJFKMhT029110; Mon, 19 Dec 2016 08:20:39 -0700"
        ],
        "Authentication-Results": "spf=fail (sender IP is 192.88.158.2)\n\tsmtp.mailfrom=nxp.com; nxp.com; dkim=none (message not signed)\n\theader.d=none; nxp.com; dmarc=fail action=none header.from=nxp.com;\n\tnxp.com; \n\tdkim=none (message not signed) header.d=none;",
        "Received-SPF": "Fail (protection.outlook.com: domain of nxp.com does not\n\tdesignate 192.88.158.2 as permitted sender)\n\treceiver=protection.outlook.com; \n\tclient-ip=192.88.158.2; helo=az84smr01.freescale.net;",
        "From": "Hemant Agrawal <hemant.agrawal@nxp.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<thomas.monjalon@6wind.com>, <bruce.richardson@intel.com>,\n\t<shreyansh.jain@nxp.com>, <john.mcnamara@intel.com>,\n\t<ferruh.yigit@intel.com>, <jerin.jacob@caviumnetworks.com>, Geoff Thorpe\n\t<Geoff.Thorpe@nxp.com>, Roy Pledge <Roy.Pledge@nxp.com>, Hemant Agrawal\n\t<hemant.agrawal@nxp.com>",
        "Date": "Tue, 20 Dec 2016 02:23:43 +0530",
        "Message-ID": "<1482180853-18823-5-git-send-email-hemant.agrawal@nxp.com>",
        "X-Mailer": "git-send-email 1.9.1",
        "In-Reply-To": "<1482180853-18823-1-git-send-email-hemant.agrawal@nxp.com>",
        "References": "<1480875447-23680-1-git-send-email-hemant.agrawal@nxp.com>\n\t<1482180853-18823-1-git-send-email-hemant.agrawal@nxp.com>",
        "X-EOPAttributedMessage": "0",
        "X-Matching-Connectors": "131266344447446089;\n\t(91ab9b29-cfa4-454e-5278-08d120cd25b8); ()",
        "X-Forefront-Antispam-Report": "CIP:192.88.158.2; IPV:NLI; CTRY:US; EFV:NLI;\n\tSFV:NSPM;\n\tSFS:(10009020)(6009001)(336005)(7916002)(39860400002)(39400400002)(39410400002)(39850400002)(39380400002)(39450400003)(39840400002)(2980300002)(1110001)(1109001)(339900001)(189002)(199003)(626004)(92566002)(5003940100001)(105606002)(104016004)(6916009)(305945005)(77096006)(189998001)(48376002)(76176999)(5660300001)(85426001)(33646002)(106466001)(36756003)(50986999)(7416002)(50466002)(86362001)(97736004)(2906002)(110136003)(6666003)(2950100002)(8936002)(2351001)(50226002)(38730400001)(8676002)(69596002)(356003)(68736007)(4326007)(81156014)(47776003)(81166006)(8666005)(183404003)(7059030)(579004)(559001)(569005);\n\tDIR:OUT; SFP:1101; SCL:1; SRVR:DM5PR03MB2473;\n\tH:az84smr01.freescale.net; FPR:; \n\tSPF:Fail; PTR:InfoDomainNonexistent; MX:1; A:1; LANG:en; ",
        "X-Microsoft-Exchange-Diagnostics": [
            "1; BL2FFO11FD012;\n\t1:VCo0kp/FFw/L7II/uLkKF6y2496XUVWEGHNa8/oDsrYn1/bHK3HqVWjDohPvZBv58p1wG9nmh0aPzA1r/nRLpjOahrkoyFCN7L3T5z/ly+mSWW4L8Y1HtSlTref+kUNzL6PfnIVLj9Ax4ckwFFpg2XpU1XWnUz6J4f3bb8zrCw1/GthwQLGvrj2WxNJIqYPVc7Gj3IyH/xx+FXTuXiiX8TSQhyTz4nGcrY6PtdEy30U5EDVBmlhclUWUTOc1ZZcG0nNV5MdJHiK/lUNAIJ3YTAlcAJcPmdhrpRZJc40o349yHCJwOYkyvygoHzm8kvSVTT7bc5sQ+KzOAKFKnneMXfHX9CJDRL4A/VWTs7wcZPiVY60gq2Sm7AWR9NFGzYA/BwYDk1+mUaMCstD2lC264sF7G8o5o1w25H5AAtw5kAvVJhEZRtsmtPKbHUceIcJunnykvQdPVzG45zn9sz7N11NHTnsu9apNpFiklwzawdbL3KJ3xk94nq5iuKBxEfFIEIHiPcBDt0Ys1VZZZOe4a6JuJd4am+Ju9dlpVhH5iLmS+HubcKekb9JZU9oE6gAOQzhXwikKtwHSjWntcm9p4OxAe5LHEbs1qVaXmQaUc0DOe/LzT/OegHrVwaKNbGMFDWmoMlWVNNK9iDsR3fP56ItZxgTXRRTpSEnWSxdDRag2RNM4hF9kl9BSQpngqeet8rU9MeUBhS62jqMJTHtPcQh9twGrMJwhSKGWpYQ52i+1uyDU34139P8GM8nc6avD0mnymS8Cp/vbuP1IQT9wLg==",
            "1; DM5PR03MB2473;\n\t3:hX8pgyiTyZVD6oN9Qsd0ap1A9KDglF1oq//yR2HLebjbBdq564jxNfuvZ5jP6qE/26A4or7eZuQM6agKoPPESLe6g0lpYof/3Pek8NpuEE7Y8Rsn16Iab3FlF2TYlB24z+AxTo1wEERdJQrp5vaLaAfmCq1IZEnMmKUbLfMXTxTP53rjlZ8I6fJ+1mvxg4iyGmMDNOvp7JnVvEi6BzX60iBePODCpmhVpPsIAryqejxswwbkSbZUeTePQNHJ+1bQ5KvQajtpaV4/NhN4M8jZQBQ6zwvYbevce65Gzk3ZTKwxzit79Zpjh9lE84hxah6KzoJ0SSuVlTtBeFk7CaeBKTzMWJ7M31WLJYrMTBndQkeGbSuii8L6sheX60cWzDWx",
            "1; DM5PR03MB2473;\n\t25:JupwVXynFumO0S/ptwJRm4ehpkSdYAjPBLAxAbxov/5vR3uE0rKzWAj5CcxOG3gTJaVQ/RoA2aR4rS4QD2SAuhim5ZlZlCy6q1tG5Nqmt5R6GmNI8yA34krlNI/9XesBr04GynXnc/v1mNySvm/nEHQ5loIA3o9QEJQFv77ptqk4UdKlCBjiNKRrZdXnWnxDyXIadqJQ0uHSKjvlAz6zDkyqW1pR/9UvFs+4ZTz4tcspPS6JLG2uAOt3RdrKUwnsp0+niACqg2f+zbJLfqoHpSy/QqCf4mCNdBZGWtfHldzTR7Z6Yfg4bldWngBhw2jU0c2ye50geCpLrizeDubzSmRUGhiMpOwRsrQvFku6c9dcM6Tl8Hfalb7q6nXsUnVB5fflFUzjvc+Y+eBvQhpHj/HXYWCoQ9bKKnYdY53NCcbqZEzy/fF7E24LOl0tIo/jDEtPWfGRwaTpSZU2Azhh1amt14zocvxy8uRQehmnm9RIbTDCdefte7naJFRUbPqFVv/mMzZIh3fjlo20xh7rhcRMG3aJXKYf17T/p2UrDvmFNRUDIMfY1Q/4HzHdIs+etEAq44gek0CXcThqh8kMwgFZg+NL6Ph5Ss42Lx0SL/1viNsooIWICneEw60xLHMgYU12nYcSox14mifJBLqMjJXlow0K5g5jp0otD10OJBYAIDNR5AdN5jdjIBKz0Jy4bd3OOA+b4GVUKKP5TWkcjA==",
            "1; DM5PR03MB2473;\n\t31:HoPXpbAp6Hdk8Die+cxR2ebR5N+H9GV+lO43sv2RnnwueSMVU2sBbbfNWvQ1exOlU07u0I6t8BUs/TfDBpN2+79TqKIA7np7sLeBFX+SNQQpjl45h7B+EdfgNtaXYsfZKelpSog0gcF8f6ghVhubMZjZ4FIzXztaVGScywmlkOTO433mlUlk7m4Jvd2LoLm9V6If1Wc8FBW8UEr6OHcNPt1NBLTFs0fWEIw9pcw1D2gU5z9ik98UXPY3YkqN8QTusiVlZM/1TVVrYvpkYdgOsg==",
            "1; DM5PR03MB2473;\n\t4:fEErBITziPDndP+E4TMgECnX0vXSu5BK7MEEozry9brB6MkvMzvqJdHcJz335oQd6O0fcp9EkoYMmqOmGgWn7Og7K1ewH5sQlHHY68XxSIHyyWVcvRqgsKlQyXYr4ezE8lYcybHfHFmBWWqOz3PCtGDBBeU3a9cOJZchkO6XjWOZhAm2q1ACjzUcWmcuBVrMLIkKZeFrpfZ9As31jaI9NVRz0MlXIS7jZauJRH58eD/7q6W7Ls5fDl+CtL0w+QvIzouIpRlx82AiwNxMaYUxpix1P33bCxUQWC5nOjLi5xq9uPuq2wWDI3NJyFwe1o6DRhwBX/6ZGXOi75nAT9WYjdJnaChGSzDpjJRGgyhQAQUwrOhykuVv3pnrhUa2Ql7dEJqH24/M8pXJm4c0KKcjfkZtITk13pZIp+KodnuG7DoCRA6oR70Jmi1Ho9BDwTqtK4WdGqYhUcSEdRB9JBFYm2V66QZgfgbd5GCs1Epqd7DKXrWOqGrOCuAGh0w0o0JFrwTw/Fhzf3MbWUs8FyJFLqFCttLHtPfZdRJQW0cO7Fs0axzlqdaciRp9hBUimWaMWa7QRen6LAexBoHYwiaU6pldhWSIs53fnpyNeSfNtnq6u/8xIFFGqCTyhM87W5o6/cYo0ZBaZKVpcg2VSukr56ea9++HpyJS2ifiCnKStdToWCuSguyAO19/LOJy1uDlsNg9elNpI5jcMdDun5C7RaW5lTW/Qypvo3ulsOQPFlSwJwoQhCdT2XuDnDCF2YxHU6l/82BVhe9UN74BoxR3B8ZF/NPhGHF2N9GHqr8/gCn6PBXwyYY+hmvZHka9zK+lwTuww+hfLaL+dmA6RQsD/Q==",
            "=?us-ascii?Q?1; DM5PR03MB2473;\n\t23:XPaItJikGe0dM/mfRmZ6CMt9JhVEy/vS577ja2CgW?=\n\t5Itb1mOqJNldOJfyspOfHypVRKoMZPiU/zRFy8YVYZ4n5EvaBWrX1C/PYjtIFml6+jUk039h6xJdXBb092210BooukNJ2sEIGv8g00OvdZPFZW314C/KDEiSPsivnZjOUpxtLphT660/36evEA6o/uQK2Oi1Mj0tO6i1cJa1RcDfVRA4k8Yv2Ol9jaQ8gDnHIuhRxl/ftWLNKEipBKtWPn4uDzv1EGTGuhgL/BL6XW1tPK6UwFoAUJ7unCspy8kI/XgCrA02k7trMbS+yiSizcwpajl7+DJLTqXSgLZ9rQ5wuw6CPdJzuOtY+eyfnid3t0oOt4OHpMdtCPqihNvCLqrAEBa2dqh3IRkcEzY3VIXq/P3XUa6By/E45XhJLOJ2eBGUqYshPakv4Zlj1amJisd5qHxkg2+d7ktIR0HNbn/YT/kZM06o5xQv8J/8SFu3VrtIbzvqbFiCtZI7ZkS2fSXltu8v5FIVpz7a1Lff0B+BO4Mo/BbayAdbujsecg7KHgOZVY3GWaO/JySTstU5NM/JVAma3FrNmKsm82ymgiYzd31WUR0MrYFDynnXVjP2w1ldYU65t3FBA+pcuNe3oG9QKo3kcfbLPwza585OHosgTrM/CnvX4bZk9c86hvsjDPG4QeIkoVVAxZw5/Gy4Dx1M086EPWDz0F68to5V59U0B9bBRO962vzaczqWYiFbxPsRj/qee1jLiE3IOIZr8hbqOx8zDIHGiYWd7zWuxD5QBxSSEyeA220vglwr31QAPoEX0kAulnlGdzr4x8cSL7oGEAqhr+HOyJ2oULJenuy/bV3xG7fNLH+GG+deok3aqsKUpjEqavfzTwgk3B9LPlKlasyuGZ+msdkEUp40iE/yi/N5vnmAbH5c9jgUorzEPTBl1/hXZxo/IZ97G91NYIXo2S9rYaSQpQ/hBCrwGVonXp6DZilDFxMhSDuE4U/YoRz1JdDDdpSLGTVVyOtcXBG2gceYYeoTjl3L0pueIO0mWnMJmHbpyR6xxF4murhdL23JvAWB3k+CfkXwLYl2GcUSQtsg+dulH/9bzEnvpmSYTymsfoXTjpM6MmKXTvLoN+D1tOFfAdiJRVI/+B63zukZ+zt1oez2mQXcKhARa86hC889b8VqPWlwoDtERUge5s8t0HR9FiAFwmjChnPQiUs+S8kkIYDjVnq1vSZ7M+NWQYnE4jOvTlKb22FCjyeHoMgGo1UKTmPDQadEgCQQv3mimbt8hk8b1VjXPEDv/X0R6MAZRNp7sU5QTsIcw+5h+IuNpRV+s7jpTa3ehchbDaFh2qyoLBwUwbQ45F75q9Yd+NT97DzQz17FNFXgFSqez1E0jT+bYWKK7kZesA94+vD",
            "1; DM5PR03MB2473;\n\t6:atW7eUsOx5C221cV9zLAl5VLy6eYZIingiiT2fuuwUWxWLvPiAp90V8EoHUcwWy2NEvvMQ4xjNHFg6io9kyPIooY4/7xzjJu7sIhr6MzbYF3Kiwk9POlJcRGfaZVoTnsYxyyMfKnumSUpjwVuKHPyo4u1G06jSaoVd0sg4AP8K/6tJNCOjI1Qo018SvHJ4+m8KB1tZ3HFr2XrK6szK6vFF+PWbSNIaRoVtpx+8VP4YP3mREygdfSe1VWMHyZ7ccjFnRZ+ZpY1Ps9mp9Necyi3a5eQm99WH4+dAhIaUR6cryRH6NgGtzlEchrOJLVRFt76dCTx0pypf+0LCHrlmieZzx5HIF8q6cQCXrU6/dAgDcjYhH48gtslBJq7H+7EnDdC31k+VpVER7JL+46cE8WNlA9jIKTVDRzVmiqNyFMP4kXApIXEvi/KMZtNVUCTFC+;\n\t5:BuYJ3nCdLPGEKyzRr7IBdTgm3APEOqLD9Rgg5nsIiKj7UebehK/7txdRFSF9UqJ0LX4qvAU2AAXLNihXqTalJVcCtR40kUfT80QSgjALttJicmQGl3gvsrtTbo1b7+sx0zufv8/kCkQfAbIJKvE3BwWkT+JwbgNW9YTisX5K7bO/WhGfPM8o7WTRef5X9CSf;\n\t24:9rScFaqLgCJzCn3Wr7pZ/AOjPGS8whh+Fnv97QTkiBnCMBbN54HRlRYQqvthO9p9FUOIZ+UL+yxbcjl0OIggiXy21fioZXY+xqE7XaxyuPc=",
            "1; DM5PR03MB2473;\n\t7:N1+4Aw1RJ3WUd8KO4vezB5rE1ltidrFWp0UAkcDu8cl2asdsKbxfZac/BgZLxUnna+tAfyww5R/CT2YF+Gl+jfXorRvM58uMH8npf6CqHXZx5J0MScC9efWckpbbY2nLu52GFD3264LD3XNr8JGooKGnUnQy1sIlWeoTQkkL0GTkjXumAiv+pqPWLk7opjJ+5JTrBwpqURyjrlkcC+keMuEjnfc9R85CzURz5v1zuNG0TF6b26mKwBmluJBuf6lIiidUle4sj0Cv+ah3IGsQ2rcCo1qUGFKNfELBmcfgGzHPfdFM5YJvSfN3gTPco2UTxchHm54+CKF5XtLvhDsinGnQDEDscc9jnUPytsWJe0FsYcKADMgzuXL6J5lJmSBaDoiuQiD0mVx/vEHe5ey+CZwwKuxO1OVccSwwpkH8OefbTKWkNrUNntyL7nQCnuilBUF38t/3wkvmKl7Q2oMAMg=="
        ],
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-MS-Office365-Filtering-Correlation-Id": "e89deafa-80b6-4dcf-0141-08d428229a25",
        "X-Microsoft-Antispam": "UriScan:; BCL:0; PCL:0; RULEID:(22001);\n\tSRVR:DM5PR03MB2473; ",
        "X-Microsoft-Antispam-PRVS": "<DM5PR03MB2473C6BA99F2EB9DE0A1003789910@DM5PR03MB2473.namprd03.prod.outlook.com>",
        "X-Exchange-Antispam-Report-Test": "UriScan:(185117386973197)(227817650892897)(788757137089)(275809806118684);",
        "X-Exchange-Antispam-Report-CFA-Test": "BCL:0; PCL:0;\n\tRULEID:(6095060)(601004)(2401047)(5005006)(8121501046)(13023025)(13024025)(13015025)(13017025)(13018025)(3002001)(10201501046)(6055026)(6096035)(20161123563025)(20161123561025)(20161123556025)(20161123565025)(20161123559025);\n\tSRVR:DM5PR03MB2473; BCL:0; PCL:0; RULEID:(400006); SRVR:DM5PR03MB2473;",
        "X-Forefront-PRVS": "01613DFDC8",
        "SpamDiagnosticOutput": "1:99",
        "SpamDiagnosticMetadata": "NSPM",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "19 Dec 2016 15:20:44.3858\n\t(UTC)",
        "X-MS-Exchange-CrossTenant-Id": "5afe0b00-7697-4969-b663-5eab37d5f47e",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "TenantId=5afe0b00-7697-4969-b663-5eab37d5f47e;\n\tIp=[192.88.158.2]; \n\tHelo=[az84smr01.freescale.net]",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "DM5PR03MB2473",
        "Subject": "[dpdk-dev] [PATCHv2 04/34] drivers/common/dpaa2: adding qbman driver",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "QBMAN, is a hardware block which interfaces with the other\naccelerating hardware blocks (For e.g., WRIOP) on NXP's DPAA2\nSoC for queue, buffer and packet scheduling.\n\nThis patch introduces a userspace driver for interfacing with\nthe QBMAN hw block.\n\nThe qbman-portal component provides APIs to do the low level\nhardware bit twiddling for operations such as:\n      -initializing Qman software portals\n      -building and sending portal commands\n      -portal interrupt configuration and processing\n\nThis same/similar code is used in kernel and compat file is used\nto make it working in user space.\n\nSigned-off-by: Geoff Thorpe <Geoff.Thorpe@nxp.com>\nSigned-off-by: Roy Pledge <Roy.Pledge@nxp.com>\nSigned-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>\n---\n config/common_base                                 |    3 +\n config/defconfig_arm64-dpaa2-linuxapp-gcc          |    8 +-\n drivers/Makefile                                   |    1 +\n drivers/common/Makefile                            |   36 +\n drivers/common/dpaa2/Makefile                      |   36 +\n drivers/common/dpaa2/qbman/Makefile                |   53 +\n drivers/common/dpaa2/qbman/include/compat.h        |  405 ++++++\n .../common/dpaa2/qbman/include/fsl_qbman_base.h    |  157 ++\n .../common/dpaa2/qbman/include/fsl_qbman_portal.h  | 1090 ++++++++++++++\n drivers/common/dpaa2/qbman/qbman_portal.c          | 1492 ++++++++++++++++++++\n drivers/common/dpaa2/qbman/qbman_portal.h          |  274 ++++\n drivers/common/dpaa2/qbman/qbman_private.h         |  167 +++\n drivers/common/dpaa2/qbman/qbman_sys.h             |  380 +++++\n drivers/common/dpaa2/qbman/qbman_sys_decl.h        |   70 +\n .../dpaa2/qbman/rte_pmd_dpaa2_qbman_version.map    |   21 +\n 15 files changed, 4192 insertions(+), 1 deletion(-)\n create mode 100644 drivers/common/Makefile\n create mode 100644 drivers/common/dpaa2/Makefile\n create mode 100644 drivers/common/dpaa2/qbman/Makefile\n create mode 100644 drivers/common/dpaa2/qbman/include/compat.h\n create mode 100644 drivers/common/dpaa2/qbman/include/fsl_qbman_base.h\n create mode 100644 drivers/common/dpaa2/qbman/include/fsl_qbman_portal.h\n create mode 100644 drivers/common/dpaa2/qbman/qbman_portal.c\n create mode 100644 drivers/common/dpaa2/qbman/qbman_portal.h\n create mode 100644 drivers/common/dpaa2/qbman/qbman_private.h\n create mode 100644 drivers/common/dpaa2/qbman/qbman_sys.h\n create mode 100644 drivers/common/dpaa2/qbman/qbman_sys_decl.h\n create mode 100644 drivers/common/dpaa2/qbman/rte_pmd_dpaa2_qbman_version.map",
    "diff": "diff --git a/config/common_base b/config/common_base\nindex edb6a54..68cd51a 100644\n--- a/config/common_base\n+++ b/config/common_base\n@@ -273,6 +273,9 @@ CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_DRIVER=n\n CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_MBOX=n\n \n #\n+# Compile Support Libraries for NXP DPAA2\n+#\n+CONFIG_RTE_LIBRTE_DPAA2_COMMON=n\n # Compile burst-oriented VIRTIO PMD driver\n #\n CONFIG_RTE_LIBRTE_VIRTIO_PMD=y\ndiff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc\nindex 66df54c..c57c340 100644\n--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc\n+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc\n@@ -1,6 +1,7 @@\n #   BSD LICENSE\n #\n-#   Copyright(c) 2016 Freescale Semiconductor, Inc. All rights reserved.\n+#   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.\n+#   Copyright (c) 2016 NXP. All rights reserved.\n #\n #   Redistribution and use in source and binary forms, with or without\n #   modification, are permitted provided that the following conditions\n@@ -40,3 +41,8 @@ CONFIG_RTE_ARCH_ARM_TUNE=\"cortex-a57+fp+simd\"\n #\n CONFIG_RTE_MAX_LCORE=8\n CONFIG_RTE_MAX_NUMA_NODES=1\n+\n+#\n+# Compile Support Libraries for DPAA2\n+#\n+CONFIG_RTE_LIBRTE_DPAA2_COMMON=y\ndiff --git a/drivers/Makefile b/drivers/Makefile\nindex 81c03a8..d5580f6 100644\n--- a/drivers/Makefile\n+++ b/drivers/Makefile\n@@ -31,6 +31,7 @@\n \n include $(RTE_SDK)/mk/rte.vars.mk\n \n+DIRS-y += common\n DIRS-y += net\n DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += crypto\n \ndiff --git a/drivers/common/Makefile b/drivers/common/Makefile\nnew file mode 100644\nindex 0000000..e5bfecb\n--- /dev/null\n+++ b/drivers/common/Makefile\n@@ -0,0 +1,36 @@\n+#   BSD LICENSE\n+#\n+#   Copyright(c) 2016 NXP. All rights reserved.\n+#   All rights reserved.\n+#\n+#   Redistribution and use in source and binary forms, with or without\n+#   modification, are permitted provided that the following conditions\n+#   are met:\n+#\n+#     * Redistributions of source code must retain the above copyright\n+#       notice, this list of conditions and the following disclaimer.\n+#     * Redistributions in binary form must reproduce the above copyright\n+#       notice, this list of conditions and the following disclaimer in\n+#       the documentation and/or other materials provided with the\n+#       distribution.\n+#     * Neither the name of NXP nor the names of its\n+#       contributors may be used to endorse or promote products derived\n+#       from this software without specific prior written permission.\n+#\n+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+#   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+\n+include $(RTE_SDK)/mk/rte.vars.mk\n+\n+DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_COMMON) += dpaa2\n+\n+include $(RTE_SDK)/mk/rte.subdir.mk\ndiff --git a/drivers/common/dpaa2/Makefile b/drivers/common/dpaa2/Makefile\nnew file mode 100644\nindex 0000000..4960ebe\n--- /dev/null\n+++ b/drivers/common/dpaa2/Makefile\n@@ -0,0 +1,36 @@\n+#   BSD LICENSE\n+#\n+#   Copyright(c) 2016 NXP. All rights reserved.\n+#   All rights reserved.\n+#\n+#   Redistribution and use in source and binary forms, with or without\n+#   modification, are permitted provided that the following conditions\n+#   are met:\n+#\n+#     * Redistributions of source code must retain the above copyright\n+#       notice, this list of conditions and the following disclaimer.\n+#     * Redistributions in binary form must reproduce the above copyright\n+#       notice, this list of conditions and the following disclaimer in\n+#       the documentation and/or other materials provided with the\n+#       distribution.\n+#     * Neither the name of NXP nor the names of its\n+#       contributors may be used to endorse or promote products derived\n+#       from this software without specific prior written permission.\n+#\n+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+#   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+\n+include $(RTE_SDK)/mk/rte.vars.mk\n+\n+DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_COMMON) += qbman\n+\n+include $(RTE_SDK)/mk/rte.subdir.mk\ndiff --git a/drivers/common/dpaa2/qbman/Makefile b/drivers/common/dpaa2/qbman/Makefile\nnew file mode 100644\nindex 0000000..a6f7ece\n--- /dev/null\n+++ b/drivers/common/dpaa2/qbman/Makefile\n@@ -0,0 +1,53 @@\n+#   BSD LICENSE\n+#\n+#   Copyright(c) 2016 NXP. All rights reserved.\n+#   All rights reserved.\n+#\n+#   Redistribution and use in source and binary forms, with or without\n+#   modification, are permitted provided that the following conditions\n+#   are met:\n+#\n+#     * Redistributions of source code must retain the above copyright\n+#       notice, this list of conditions and the following disclaimer.\n+#     * Redistributions in binary form must reproduce the above copyright\n+#       notice, this list of conditions and the following disclaimer in\n+#       the documentation and/or other materials provided with the\n+#       distribution.\n+#     * Neither the name of NXP nor the names of its\n+#       contributors may be used to endorse or promote products derived\n+#       from this software without specific prior written permission.\n+#\n+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+#   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+\n+include $(RTE_SDK)/mk/rte.vars.mk\n+\n+#\n+# library name\n+#\n+LIB = librte_pmd_dpaa2_qbman.a\n+\n+CFLAGS += -O3\n+CFLAGS += $(WERROR_FLAGS)\n+\n+CFLAGS += -I$(RTE_SDK)/drivers/common/dpaa2/qbman/include\n+\n+EXPORT_MAP := rte_pmd_dpaa2_qbman_version.map\n+\n+LIBABIVER := 1\n+\n+# all source are stored in SRCS-y\n+#\n+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_COMMON) += \\\n+\tqbman_portal.c\n+\n+include $(RTE_SDK)/mk/rte.lib.mk\ndiff --git a/drivers/common/dpaa2/qbman/include/compat.h b/drivers/common/dpaa2/qbman/include/compat.h\nnew file mode 100644\nindex 0000000..3e1c7a0\n--- /dev/null\n+++ b/drivers/common/dpaa2/qbman/include/compat.h\n@@ -0,0 +1,405 @@\n+/* Copyright (c) 2008-2016 Freescale Semiconductor, Inc.\n+ * All rights reserved.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions are met:\n+ *     * Redistributions of source code must retain the above copyright\n+ *\t notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *\t notice, this list of conditions and the following disclaimer in the\n+ *\t documentation and/or other materials provided with the distribution.\n+ *     * Neither the name of Freescale Semiconductor nor the\n+ *\t names of its contributors may be used to endorse or promote products\n+ *\t derived from this software without specific prior written permission.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY\n+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY\n+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#ifndef HEADER_COMPAT_H\n+#define HEADER_COMPAT_H\n+\n+#include <sched.h>\n+\n+#ifndef _GNU_SOURCE\n+#define _GNU_SOURCE\n+#endif\n+#include <stdint.h>\n+#include <stdlib.h>\n+#include <stddef.h>\n+#include <errno.h>\n+#include <string.h>\n+#include <pthread.h>\n+#include <net/ethernet.h>\n+#include <stdio.h>\n+#include <stdbool.h>\n+#include <ctype.h>\n+#include <malloc.h>\n+#include <sys/types.h>\n+#include <sys/stat.h>\n+#include <fcntl.h>\n+#include <unistd.h>\n+#include <sys/mman.h>\n+#include <limits.h>\n+#include <assert.h>\n+#include <dirent.h>\n+#include <inttypes.h>\n+#include <error.h>\n+#include <rte_atomic.h>\n+\n+/* The following definitions are primarily to allow the single-source driver\n+ * interfaces to be included by arbitrary program code. Ie. for interfaces that\n+ * are also available in kernel-space, these definitions provide compatibility\n+ * with certain attributes and types used in those interfaces.\n+ */\n+\n+/* Required compiler attributes */\n+#define __user\n+#define likely(x)\t__builtin_expect(!!(x), 1)\n+#define unlikely(x)\t__builtin_expect(!!(x), 0)\n+#define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))\n+#undef container_of\n+#define container_of(ptr, type, member) ({ \\\n+\t\ttypeof(((type *)0)->member)(*__mptr) = (ptr); \\\n+\t\t(type *)((char *)__mptr - offsetof(type, member)); })\n+#define __stringify_1(x) #x\n+#define __stringify(x)\t__stringify_1(x)\n+\n+#ifdef ARRAY_SIZE\n+#undef ARRAY_SIZE\n+#endif\n+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))\n+\n+/* Required types */\n+typedef uint8_t\t\tu8;\n+typedef uint16_t\tu16;\n+typedef uint32_t\tu32;\n+typedef uint64_t\tu64;\n+typedef uint64_t\tdma_addr_t;\n+typedef cpu_set_t\tcpumask_t;\n+typedef\tu32\t\tcompat_uptr_t;\n+\n+static inline void __user *compat_ptr(compat_uptr_t uptr)\n+{\n+\treturn (void __user *)(unsigned long)uptr;\n+}\n+\n+static inline compat_uptr_t ptr_to_compat(void __user *uptr)\n+{\n+\treturn (u32)(unsigned long)uptr;\n+}\n+\n+/* I/O operations */\n+static inline u32 in_be32(volatile void *__p)\n+{\n+\tvolatile u32 *p = __p;\n+\treturn *p;\n+}\n+\n+static inline void out_be32(volatile void *__p, u32 val)\n+{\n+\tvolatile u32 *p = __p;\n+\t*p = val;\n+}\n+\n+/* Debugging */\n+#define prflush(fmt, args...) \\\n+\tdo { \\\n+\t\tprintf(fmt, ##args); \\\n+\t\tfflush(stdout); \\\n+\t} while (0)\n+#define pr_crit(fmt, args...)\t prflush(\"CRIT:\" fmt, ##args)\n+#define pr_err(fmt, args...)\t prflush(\"ERR:\" fmt, ##args)\n+#define pr_warn(fmt, args...)\t prflush(\"WARN:\" fmt, ##args)\n+#define pr_info(fmt, args...)\t prflush(fmt, ##args)\n+\n+#ifdef pr_debug\n+#undef pr_debug\n+#endif\n+#define pr_debug(fmt, args...) {}\n+#define might_sleep_if(c) {}\n+#define msleep(x) {}\n+#define WARN_ON(c, str) \\\n+do { \\\n+\tstatic int warned_##__LINE__; \\\n+\tif ((c) && !warned_##__LINE__) { \\\n+\t\tpr_warn(\"%s\\n\", str); \\\n+\t\tpr_warn(\"(%s:%d)\\n\", __FILE__, __LINE__); \\\n+\t\twarned_##__LINE__ = 1; \\\n+\t} \\\n+} while (0)\n+#define QBMAN_BUG_ON(c) WARN_ON(c, \"BUG\")\n+\n+#define ALIGN(x, a) (((x) + ((typeof(x))(a) - 1)) & ~((typeof(x))(a) - 1))\n+\n+/****************/\n+/* Linked-lists */\n+/****************/\n+\n+struct list_head {\n+\tstruct list_head *prev;\n+\tstruct list_head *next;\n+};\n+\n+#define LIST_HEAD(n) \\\n+struct list_head n = { \\\n+\t.prev = &n, \\\n+\t.next = &n \\\n+}\n+\n+#define INIT_LIST_HEAD(p) \\\n+do { \\\n+\tstruct list_head *__p298 = (p); \\\n+\t__p298->next = __p298; \\\n+\t__p298->prev = __p298->next; \\\n+} while (0)\n+#define list_entry(node, type, member) \\\n+\t(type *)((void *)node - offsetof(type, member))\n+#define list_empty(p) \\\n+({ \\\n+\tconst struct list_head *__p298 = (p); \\\n+\t((__p298->next == __p298) && (__p298->prev == __p298)); \\\n+})\n+#define list_add(p, l) \\\n+do { \\\n+\tstruct list_head *__p298 = (p); \\\n+\tstruct list_head *__l298 = (l); \\\n+\t__p298->next = __l298->next; \\\n+\t__p298->prev = __l298; \\\n+\t__l298->next->prev = __p298; \\\n+\t__l298->next = __p298; \\\n+} while (0)\n+#define list_add_tail(p, l) \\\n+do { \\\n+\tstruct list_head *__p298 = (p); \\\n+\tstruct list_head *__l298 = (l); \\\n+\t__p298->prev = __l298->prev; \\\n+\t__p298->next = __l298; \\\n+\t__l298->prev->next = __p298; \\\n+\t__l298->prev = __p298; \\\n+} while (0)\n+#define list_for_each(i, l)\t\t\t\t\\\n+\tfor (i = (l)->next; i != (l); i = i->next)\n+#define list_for_each_safe(i, j, l)\t\t\t\\\n+\tfor (i = (l)->next, j = i->next; i != (l);\t\\\n+\t     i = j, j = i->next)\n+#define list_for_each_entry(i, l, name) \\\n+\tfor (i = list_entry((l)->next, typeof(*i), name); &i->name != (l); \\\n+\t\ti = list_entry(i->name.next, typeof(*i), name))\n+#define list_for_each_entry_safe(i, j, l, name) \\\n+\tfor (i = list_entry((l)->next, typeof(*i), name), \\\n+\t\tj = list_entry(i->name.next, typeof(*j), name); \\\n+\t\t&i->name != (l); \\\n+\t\ti = j, j = list_entry(j->name.next, typeof(*j), name))\n+#define list_del(i) \\\n+do { \\\n+\t(i)->next->prev = (i)->prev; \\\n+\t(i)->prev->next = (i)->next; \\\n+} while (0)\n+\n+/* Other miscellaneous interfaces our APIs depend on; */\n+\n+#define lower_32_bits(x) ((u32)(x))\n+#define upper_32_bits(x) ((u32)(((x) >> 16) >> 16))\n+\n+/* Compiler/type stuff */\n+typedef unsigned int\tgfp_t;\n+typedef uint32_t\tphandle;\n+\n+#define __iomem\n+#define EINTR\t\t4\n+#define ENODEV\t\t19\n+#define GFP_KERNEL\t0\n+#define __raw_readb(p)\t(*(const volatile unsigned char *)(p))\n+#define __raw_readl(p)\t(*(const volatile unsigned int *)(p))\n+#define __raw_writel(v, p) {*(volatile unsigned int *)(p) = (v); }\n+\n+\n+\n+/* memcpy() stuff - when you know alignments in advance */\n+#ifdef CONFIG_TRY_BETTER_MEMCPY\n+static inline void copy_words(void *dest, const void *src, size_t sz)\n+{\n+\tu32 *__dest = dest;\n+\tconst u32 *__src = src;\n+\tsize_t __sz = sz >> 2;\n+\n+\tQBMAN_BUG_ON((unsigned long)dest & 0x3);\n+\tQBMAN_BUG_ON((unsigned long)src & 0x3);\n+\tQBMAN_BUG_ON(sz & 0x3);\n+\twhile (__sz--)\n+\t\t*(__dest++) = *(__src++);\n+}\n+\n+static inline void copy_shorts(void *dest, const void *src, size_t sz)\n+{\n+\tu16 *__dest = dest;\n+\tconst u16 *__src = src;\n+\tsize_t __sz = sz >> 1;\n+\n+\tQBMAN_BUG_ON((unsigned long)dest & 0x1);\n+\tQBMAN_BUG_ON((unsigned long)src & 0x1);\n+\tQBMAN_BUG_ON(sz & 0x1);\n+\twhile (__sz--)\n+\t\t*(__dest++) = *(__src++);\n+}\n+\n+static inline void copy_bytes(void *dest, const void *src, size_t sz)\n+{\n+\tu8 *__dest = dest;\n+\tconst u8 *__src = src;\n+\n+\twhile (sz--)\n+\t\t*(__dest++) = *(__src++);\n+}\n+#else\n+#define copy_words memcpy\n+#define copy_shorts memcpy\n+#define copy_bytes memcpy\n+#endif\n+\n+/* Completion stuff */\n+#define DECLARE_COMPLETION(n) int n = 0\n+#define complete(n) { *n = 1; }\n+#define wait_for_completion(n) \\\n+do { \\\n+\twhile (!*n) { \\\n+\t\tbman_poll(); \\\n+\t\tqman_poll(); \\\n+\t} \\\n+\t*n = 0; \\\n+} while (0)\n+\n+\n+/* Allocator stuff */\n+#define kmalloc(sz, t)\tmalloc(sz)\n+#define vmalloc(sz)\tmalloc(sz)\n+#define kfree(p)\t{ if (p) free(p); }\n+static inline void *kzalloc(size_t sz, gfp_t __foo __rte_unused)\n+{\n+\tvoid *ptr = malloc(sz);\n+\n+\tif (ptr)\n+\t\tmemset(ptr, 0, sz);\n+\treturn ptr;\n+}\n+\n+static inline unsigned long get_zeroed_page(gfp_t __foo __rte_unused)\n+{\n+\tvoid *p;\n+\n+\tif (posix_memalign(&p, 4096, 4096))\n+\t\treturn 0;\n+\tmemset(p, 0, 4096);\n+\treturn (unsigned long)p;\n+}\n+\n+static inline void free_page(unsigned long p)\n+{\n+\tfree((void *)p);\n+}\n+\n+/* Bitfield stuff. */\n+#define BITS_PER_ULONG\t(sizeof(unsigned long) << 3)\n+#define SHIFT_PER_ULONG\t(((1 << 5) == BITS_PER_ULONG) ? 5 : 6)\n+#define BITS_MASK(idx)\t((unsigned long)1 << ((idx) & (BITS_PER_ULONG - 1)))\n+#define BITS_IDX(idx)\t((idx) >> SHIFT_PER_ULONG)\n+static inline unsigned long test_bits(unsigned long mask,\n+\t\t\t\t      volatile unsigned long *p)\n+{\n+\treturn *p & mask;\n+}\n+\n+static inline int test_bit(int idx, volatile unsigned long *bits)\n+{\n+\treturn test_bits(BITS_MASK(idx), bits + BITS_IDX(idx));\n+}\n+\n+static inline void set_bits(unsigned long mask, volatile unsigned long *p)\n+{\n+\t*p |= mask;\n+}\n+\n+static inline void set_bit(int idx, volatile unsigned long *bits)\n+{\n+\tset_bits(BITS_MASK(idx), bits + BITS_IDX(idx));\n+}\n+\n+static inline void clear_bits(unsigned long mask, volatile unsigned long *p)\n+{\n+\t*p &= ~mask;\n+}\n+\n+static inline void clear_bit(int idx, volatile unsigned long *bits)\n+{\n+\tclear_bits(BITS_MASK(idx), bits + BITS_IDX(idx));\n+}\n+\n+static inline unsigned long test_and_set_bits(unsigned long mask,\n+\t\t\t\t\t      volatile unsigned long *p)\n+{\n+\tunsigned long ret = test_bits(mask, p);\n+\n+\tset_bits(mask, p);\n+\treturn ret;\n+}\n+\n+static inline int test_and_set_bit(int idx, volatile unsigned long *bits)\n+{\n+\tint ret = test_bit(idx, bits);\n+\n+\tset_bit(idx, bits);\n+\treturn ret;\n+}\n+\n+static inline int test_and_clear_bit(int idx, volatile unsigned long *bits)\n+{\n+\tint ret = test_bit(idx, bits);\n+\n+\tclear_bit(idx, bits);\n+\treturn ret;\n+}\n+\n+static inline int find_next_zero_bit(unsigned long *bits, int limit, int idx)\n+{\n+\twhile ((++idx < limit) && test_bit(idx, bits))\n+\t\t;\n+\treturn idx;\n+}\n+\n+static inline int find_first_zero_bit(unsigned long *bits, int limit)\n+{\n+\tint idx = 0;\n+\n+\twhile (test_bit(idx, bits) && (++idx < limit))\n+\t\t;\n+\treturn idx;\n+}\n+\n+static inline u64 div64_u64(u64 n, u64 d)\n+{\n+\treturn n / d;\n+}\n+#define atomic_t                rte_atomic32_t\n+#define atomic_read(v)          rte_atomic32_read(v)\n+#define atomic_set(v, i)        rte_atomic32_set(v, i)\n+\n+#define atomic_inc(v)           rte_atomic32_add(v, 1)\n+#define atomic_dec(v)           rte_atomic32_sub(v, 1)\n+\n+#define atomic_inc_and_test(v)  rte_atomic32_inc_and_test(v)\n+#define atomic_dec_and_test(v)  rte_atomic32_dec_and_test(v)\n+\n+#define atomic_inc_return(v)    rte_atomic32_add_return(v, 1)\n+#define atomic_dec_return(v)    rte_atomic32_sub_return(v, 1)\n+#define atomic_sub_and_test(i, v) (rte_atomic32_sub_return(v, i) == 0)\n+\n+#endif /* HEADER_COMPAT_H */\ndiff --git a/drivers/common/dpaa2/qbman/include/fsl_qbman_base.h b/drivers/common/dpaa2/qbman/include/fsl_qbman_base.h\nnew file mode 100644\nindex 0000000..bae019f\n--- /dev/null\n+++ b/drivers/common/dpaa2/qbman/include/fsl_qbman_base.h\n@@ -0,0 +1,157 @@\n+/* Copyright (C) 2014 Freescale Semiconductor, Inc.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions are met:\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in the\n+ *       documentation and/or other materials provided with the distribution.\n+ *     * Neither the name of Freescale Semiconductor nor the\n+ *       names of its contributors may be used to endorse or promote products\n+ *       derived from this software without specific prior written permission.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY\n+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY\n+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+#ifndef _FSL_QBMAN_BASE_H\n+#define _FSL_QBMAN_BASE_H\n+\n+typedef uint64_t  dma_addr_t;\n+\n+/**\n+ * DOC: QBMan basic structures\n+ *\n+ * The QBMan block descriptor, software portal descriptor and Frame descriptor\n+ * are defined here.\n+ *\n+ */\n+\n+#define QMAN_REV_4000   0x04000000\n+#define QMAN_REV_4100   0x04010000\n+#define QMAN_REV_4101   0x04010001\n+\n+/**\n+ * struct qbman_block_desc - qbman block descriptor structure\n+ * @ccsr_reg_bar: CCSR register map.\n+ * @irq_rerr: Recoverable error interrupt line.\n+ * @irq_nrerr: Non-recoverable error interrupt line\n+ *\n+ * Descriptor for a QBMan instance on the SoC. On partitions/targets that do not\n+ * control this QBMan instance, these values may simply be place-holders. The\n+ * idea is simply that we be able to distinguish between them, eg. so that SWP\n+ * descriptors can identify which QBMan instance they belong to.\n+ */\n+struct qbman_block_desc {\n+\tvoid *ccsr_reg_bar;\n+\tint irq_rerr;\n+\tint irq_nrerr;\n+};\n+\n+enum qbman_eqcr_mode {\n+\tqman_eqcr_vb_ring = 2, /* Valid bit, with eqcr in ring mode */\n+\tqman_eqcr_vb_array, /* Valid bit, with eqcr in array mode */\n+};\n+\n+/**\n+ * struct qbman_swp_desc - qbman software portal descriptor structure\n+ * @block: The QBMan instance.\n+ * @cena_bar: Cache-enabled portal register map.\n+ * @cinh_bar: Cache-inhibited portal register map.\n+ * @irq: -1 if unused (or unassigned)\n+ * @idx: SWPs within a QBMan are indexed. -1 if opaque to the user.\n+ * @qman_version: the qman version.\n+ * @eqcr_mode: Select the eqcr mode, currently only valid bit ring mode and\n+ * valid bit array mode are supported.\n+ *\n+ * Descriptor for a QBMan software portal, expressed in terms that make sense to\n+ * the user context. Ie. on MC, this information is likely to be true-physical,\n+ * and instantiated statically at compile-time. On GPP, this information is\n+ * likely to be obtained via \"discovery\" over a partition's \"MC bus\"\n+ * (ie. in response to a MC portal command), and would take into account any\n+ * virtualisation of the GPP user's address space and/or interrupt numbering.\n+ */\n+struct qbman_swp_desc {\n+\tconst struct qbman_block_desc *block;\n+\tuint8_t *cena_bar;\n+\tuint8_t *cinh_bar;\n+\tint irq;\n+\tint idx;\n+\tuint32_t qman_version;\n+\tenum qbman_eqcr_mode eqcr_mode;\n+};\n+\n+/* Driver object for managing a QBMan portal */\n+struct qbman_swp;\n+\n+/**\n+ * struct qbman_fd - basci structure for qbman frame descriptor\n+ * @words: for easier/faster copying the whole FD structure.\n+ * @addr_lo: the lower 32 bits of the address in FD.\n+ * @addr_hi: the upper 32 bits of the address in FD.\n+ * @len: the length field in FD.\n+ * @bpid_offset: represent the bpid and offset fields in FD. offset in\n+ * the MS 16 bits, BPID in the LS 16 bits.\n+ * @frc: frame context\n+ * @ctrl: the 32bit control bits including dd, sc,... va, err.\n+ * @flc_lo: the lower 32bit of flow context.\n+ * @flc_hi: the upper 32bits of flow context.\n+ *\n+ * Place-holder for FDs, we represent it via the simplest form that we need for\n+ * now. Different overlays may be needed to support different options, etc. (It\n+ * is impractical to define One True Struct, because the resulting encoding\n+ * routines (lots of read-modify-writes) would be worst-case performance whether\n+ * or not circumstances required them.)\n+ *\n+ * Note, as with all data-structures exchanged between software and hardware (be\n+ * they located in the portal register map or DMA'd to and from main-memory),\n+ * the driver ensures that the caller of the driver API sees the data-structures\n+ * in host-endianness. \"struct qbman_fd\" is no exception. The 32-bit words\n+ * contained within this structure are represented in host-endianness, even if\n+ * hardware always treats them as little-endian. As such, if any of these fields\n+ * are interpreted in a binary (rather than numerical) fashion by hardware\n+ * blocks (eg. accelerators), then the user should be careful. We illustrate\n+ * with an example;\n+ *\n+ * Suppose the desired behaviour of an accelerator is controlled by the \"frc\"\n+ * field of the FDs that are sent to it. Suppose also that the behaviour desired\n+ * by the user corresponds to an \"frc\" value which is expressed as the literal\n+ * sequence of bytes 0xfe, 0xed, 0xab, and 0xba. So \"frc\" should be the 32-bit\n+ * value in which 0xfe is the first byte and 0xba is the last byte, and as\n+ * hardware is little-endian, this amounts to a 32-bit \"value\" of 0xbaabedfe. If\n+ * the software is little-endian also, this can simply be achieved by setting\n+ * frc=0xbaabedfe. On the other hand, if software is big-endian, it should set\n+ * frc=0xfeedabba! The best away of avoiding trouble with this sort of thing is\n+ * to treat the 32-bit words as numerical values, in which the offset of a field\n+ * from the beginning of the first byte (as required or generated by hardware)\n+ * is numerically encoded by a left-shift (ie. by raising the field to a\n+ * corresponding power of 2).  Ie. in the current example, software could set\n+ * \"frc\" in the following way, and it would work correctly on both little-endian\n+ * and big-endian operation;\n+ *    fd.frc = (0xfe << 0) | (0xed << 8) | (0xab << 16) | (0xba << 24);\n+ */\n+struct qbman_fd {\n+\tunion {\n+\t\tuint32_t words[8];\n+\t\tstruct qbman_fd_simple {\n+\t\t\tuint32_t addr_lo;\n+\t\t\tuint32_t addr_hi;\n+\t\t\tuint32_t len;\n+\t\t\tuint32_t bpid_offset;\n+\t\t\tuint32_t frc;\n+\t\t\tuint32_t ctrl;\n+\t\t\tuint32_t flc_lo;\n+\t\t\tuint32_t flc_hi;\n+\t\t} simple;\n+\t};\n+};\n+\n+#endif /* !_FSL_QBMAN_BASE_H */\ndiff --git a/drivers/common/dpaa2/qbman/include/fsl_qbman_portal.h b/drivers/common/dpaa2/qbman/include/fsl_qbman_portal.h\nnew file mode 100644\nindex 0000000..a86ab31\n--- /dev/null\n+++ b/drivers/common/dpaa2/qbman/include/fsl_qbman_portal.h\n@@ -0,0 +1,1090 @@\n+/* Copyright (C) 2014 Freescale Semiconductor, Inc.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions are met:\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in the\n+ *       documentation and/or other materials provided with the distribution.\n+ *     * Neither the name of Freescale Semiconductor nor the\n+ *       names of its contributors may be used to endorse or promote products\n+ *       derived from this software without specific prior written permission.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY\n+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY\n+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+#ifndef _FSL_QBMAN_PORTAL_H\n+#define _FSL_QBMAN_PORTAL_H\n+\n+#include <fsl_qbman_base.h>\n+\n+/**\n+ * DOC - QBMan portal APIs to implement the following functions:\n+ * - Initialize and destroy Software portal object.\n+ * - Read and write Software portal interrupt registers.\n+ * - Enqueue, including setting the enqueue descriptor, and issuing enqueue\n+ *   command etc.\n+ * - Dequeue, including setting the dequeue descriptor, issuing dequeue command,\n+ *   parsing the dequeue response in DQRR and memeory, parsing the state change\n+ *   notifications etc.\n+ * - Release, including setting the release descriptor, and issuing the buffer\n+ *   release command.\n+ * - Acquire, acquire the buffer from the given buffer pool.\n+ * - FQ management.\n+ * - Channel management, enable/disable CDAN with or without context.\n+ */\n+\n+/**\n+ * qbman_swp_init() - Create a functional object representing the given\n+ * QBMan portal descriptor.\n+ * @d: the given qbman swp descriptor\n+ *\n+ * Return qbman_swp portal object for success, NULL if the object cannot\n+ * be created.\n+ */\n+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);\n+\n+/**\n+ * qbman_swp_finish() - Create and destroy a functional object representing\n+ * the given QBMan portal descriptor.\n+ * @p: the qbman_swp object to be destroyed.\n+ *\n+ */\n+void qbman_swp_finish(struct qbman_swp *p);\n+\n+/**\n+ * qbman_swp_get_desc() - Get the descriptor of the given portal object.\n+ * @p: the given portal object.\n+ *\n+ * Return the descriptor for this portal.\n+ */\n+const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p);\n+\n+\t/**************/\n+\t/* Interrupts */\n+\t/**************/\n+\n+/* EQCR ring interrupt */\n+#define QBMAN_SWP_INTERRUPT_EQRI ((uint32_t)0x00000001)\n+/* Enqueue command dispatched interrupt */\n+#define QBMAN_SWP_INTERRUPT_EQDI ((uint32_t)0x00000002)\n+/* DQRR non-empty interrupt */\n+#define QBMAN_SWP_INTERRUPT_DQRI ((uint32_t)0x00000004)\n+/* RCR ring interrupt */\n+#define QBMAN_SWP_INTERRUPT_RCRI ((uint32_t)0x00000008)\n+/* Release command dispatched interrupt */\n+#define QBMAN_SWP_INTERRUPT_RCDI ((uint32_t)0x00000010)\n+/* Volatile dequeue command interrupt */\n+#define QBMAN_SWP_INTERRUPT_VDCI ((uint32_t)0x00000020)\n+\n+/**\n+ * qbman_swp_interrupt_get_vanish() - Get the data in software portal\n+ * interrupt status disable register.\n+ * @p: the given software portal object.\n+ *\n+ * Return the settings in SWP_ISDR register.\n+ */\n+uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p);\n+\n+/**\n+ * qbman_swp_interrupt_set_vanish() - Set the data in software portal\n+ * interrupt status disable register.\n+ * @p: the given software portal object.\n+ * @mask: The value to set in SWP_IDSR register.\n+ */\n+void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask);\n+\n+/**\n+ * qbman_swp_interrupt_read_status() - Get the data in software portal\n+ * interrupt status register.\n+ * @p: the given software portal object.\n+ *\n+ * Return the settings in SWP_ISR register.\n+ */\n+uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p);\n+\n+/**\n+ * qbman_swp_interrupt_clear_status() - Set the data in software portal\n+ * interrupt status register.\n+ * @p: the given software portal object.\n+ * @mask: The value to set in SWP_ISR register.\n+ */\n+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask);\n+\n+/**\n+ * qbman_swp_interrupt_get_trigger() - Get the data in software portal\n+ * interrupt enable register.\n+ * @p: the given software portal object.\n+ *\n+ * Return the settings in SWP_IER register.\n+ */\n+uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p);\n+\n+/**\n+ * qbman_swp_interrupt_set_trigger() - Set the data in software portal\n+ * interrupt enable register.\n+ * @p: the given software portal object.\n+ * @mask: The value to set in SWP_IER register.\n+ */\n+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask);\n+\n+/**\n+ * qbman_swp_interrupt_get_inhibit() - Get the data in software portal\n+ * interrupt inhibit register.\n+ * @p: the given software portal object.\n+ *\n+ * Return the settings in SWP_IIR register.\n+ */\n+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);\n+\n+/**\n+ * qbman_swp_interrupt_set_inhibit() - Set the data in software portal\n+ * interrupt inhibit register.\n+ * @p: the given software portal object.\n+ * @mask: The value to set in SWP_IIR register.\n+ */\n+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);\n+\n+\t/************/\n+\t/* Dequeues */\n+\t/************/\n+\n+/**\n+ * struct qbman_result - structure for qbman dequeue response and/or\n+ * notification.\n+ * @dont_manipulate_directly: the 16 32bit data to represent the whole\n+ * possible qbman dequeue result.\n+ */\n+struct qbman_result {\n+\tuint32_t dont_manipulate_directly[16];\n+};\n+\n+/* TODO:\n+ *A DQRI interrupt can be generated when there are dequeue results on the\n+ * portal's DQRR (this mechanism does not deal with \"pull\" dequeues to\n+ * user-supplied 'storage' addresses). There are two parameters to this\n+ * interrupt source, one is a threshold and the other is a timeout. The\n+ * interrupt will fire if either the fill-level of the ring exceeds 'thresh', or\n+ * if the ring has been non-empty for been longer than 'timeout' nanoseconds.\n+ * For timeout, an approximation to the desired nanosecond-granularity value is\n+ * made, so there are get and set APIs to allow the user to see what actual\n+ * timeout is set (compared to the timeout that was requested).\n+ */\n+int qbman_swp_dequeue_thresh(struct qbman_swp *s, unsigned int thresh);\n+int qbman_swp_dequeue_set_timeout(struct qbman_swp *s, unsigned int timeout);\n+int qbman_swp_dequeue_get_timeout(struct qbman_swp *s, unsigned int *timeout);\n+\n+/* ------------------- */\n+/* Push-mode dequeuing */\n+/* ------------------- */\n+\n+/* The user of a portal can enable and disable push-mode dequeuing of up to 16\n+ * channels independently. It does not specify this toggling by channel IDs, but\n+ * rather by specifying the index (from 0 to 15) that has been mapped to the\n+ * desired channel.\n+ */\n+\n+/**\n+ * qbman_swp_push_get() - Get the push dequeue setup.\n+ * @s: the software portal object.\n+ * @channel_idx: the channel index to query.\n+ * @enabled: returned boolean to show whether the push dequeue is enabled for\n+ * the given channel.\n+ */\n+void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled);\n+\n+/**\n+ * qbman_swp_push_set() - Enable or disable push dequeue.\n+ * @s: the software portal object.\n+ * @channel_idx: the channel index..\n+ * @enable: enable or disable push dequeue.\n+ *\n+ * The user of a portal can enable and disable push-mode dequeuing of up to 16\n+ * channels independently. It does not specify this toggling by channel IDs, but\n+ * rather by specifying the index (from 0 to 15) that has been mapped to the\n+ * desired channel.\n+ */\n+void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable);\n+\n+/* ------------------- */\n+/* Pull-mode dequeuing */\n+/* ------------------- */\n+\n+/**\n+ * struct qbman_pull_desc - the structure for pull dequeue descriptor\n+ * @dont_manipulate_directly: the 6 32bit data to represent the whole\n+ * possible settings for pull dequeue descriptor.\n+ */\n+struct qbman_pull_desc {\n+\tuint32_t dont_manipulate_directly[6];\n+};\n+\n+enum qbman_pull_type_e {\n+\t/* dequeue with priority precedence, respect intra-class scheduling */\n+\tqbman_pull_type_prio = 1,\n+\t/* dequeue with active FQ precedence, respect ICS */\n+\tqbman_pull_type_active,\n+\t/* dequeue with active FQ precedence, no ICS */\n+\tqbman_pull_type_active_noics\n+};\n+\n+/**\n+ * qbman_pull_desc_clear() - Clear the contents of a descriptor to\n+ * default/starting state.\n+ * @d: the pull dequeue descriptor to be cleared.\n+ */\n+void qbman_pull_desc_clear(struct qbman_pull_desc *d);\n+\n+/**\n+ * qbman_pull_desc_set_storage()- Set the pull dequeue storage\n+ * @d: the pull dequeue descriptor to be set.\n+ * @storage: the pointer of the memory to store the dequeue result.\n+ * @storage_phys: the physical address of the storage memory.\n+ * @stash: to indicate whether write allocate is enabled.\n+ *\n+ * If not called, or if called with 'storage' as NULL, the result pull dequeues\n+ * will produce results to DQRR. If 'storage' is non-NULL, then results are\n+ * produced to the given memory location (using the physical/DMA address which\n+ * the caller provides in 'storage_phys'), and 'stash' controls whether or not\n+ * those writes to main-memory express a cache-warming attribute.\n+ */\n+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,\n+\t\t\t\t struct qbman_result *storage,\n+\t\t\t\t dma_addr_t storage_phys,\n+\t\t\t\t int stash);\n+/**\n+ * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued.\n+ * @d: the pull dequeue descriptor to be set.\n+ * @numframes: number of frames to be set, must be between 1 and 16, inclusive.\n+ */\n+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,\n+\t\t\t\t   uint8_t numframes);\n+/**\n+ * qbman_pull_desc_set_token() - Set dequeue token for pull command\n+ * @d: the dequeue descriptor\n+ * @token: the token to be set\n+ *\n+ * token is the value that shows up in the dequeue response that can be used to\n+ * detect when the results have been published. The easiest technique is to zero\n+ * result \"storage\" before issuing a dequeue, and use any non-zero 'token' value\n+ */\n+void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token);\n+\n+/* Exactly one of the following descriptor \"actions\" should be set. (Calling any\n+ * one of these will replace the effect of any prior call to one of these.)\n+ * - pull dequeue from the given frame queue (FQ)\n+ * - pull dequeue from any FQ in the given work queue (WQ)\n+ * - pull dequeue from any FQ in any WQ in the given channel\n+ */\n+/**\n+ * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues.\n+ * @fqid: the frame queue index of the given FQ.\n+ */\n+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid);\n+\n+/**\n+ * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues.\n+ * @wqid: composed of channel id and wqid within the channel.\n+ * @dct: the dequeue command type.\n+ */\n+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,\n+\t\t\t    enum qbman_pull_type_e dct);\n+\n+/* qbman_pull_desc_set_channel() - Set channelid from which the dequeue command\n+ * dequeues.\n+ * @chid: the channel id to be dequeued.\n+ * @dct: the dequeue command type.\n+ */\n+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,\n+\t\t\t\t enum qbman_pull_type_e dct);\n+\n+/**\n+ * qbman_swp_pull() - Issue the pull dequeue command\n+ * @s: the software portal object.\n+ * @d: the software portal descriptor which has been configured with\n+ * the set of qbman_pull_desc_set_*() calls.\n+ *\n+ * Return 0 for success, and -EBUSY if the software portal is not ready\n+ * to do pull dequeue.\n+ */\n+int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d);\n+\n+/* -------------------------------- */\n+/* Polling DQRR for dequeue results */\n+/* -------------------------------- */\n+\n+/**\n+ * qbman_swp_dqrr_next() - Get an valid DQRR entry.\n+ * @s: the software portal object.\n+ *\n+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry\n+ * only once, so repeated calls can return a sequence of DQRR entries, without\n+ * requiring they be consumed immediately or in any particular order.\n+ */\n+const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *p);\n+\n+/**\n+ * qbman_swp_dqrr_consume() -  Consume DQRR entries previously returned from\n+ * qbman_swp_dqrr_next().\n+ * @s: the software portal object.\n+ * @dq: the DQRR entry to be consumed.\n+ */\n+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct qbman_result *dq);\n+\n+/**\n+ * qbman_get_dqrr_idx() - Get dqrr index from the given dqrr\n+ * @dqrr: the given dqrr object.\n+ *\n+ * Return dqrr index.\n+ */\n+uint8_t qbman_get_dqrr_idx(struct qbman_result *dqrr);\n+\n+/**\n+ * qbman_get_dqrr_from_idx() - Use index to get the dqrr entry from the\n+ * given portal\n+ * @s: the given portal.\n+ * @idx: the dqrr index.\n+ *\n+ * Return dqrr entry object.\n+ */\n+struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx);\n+\n+/* ------------------------------------------------- */\n+/* Polling user-provided storage for dequeue results */\n+/* ------------------------------------------------- */\n+\n+/**\n+ * qbman_result_has_new_result() - Check and get the dequeue response from the\n+ * dq storage memory set in pull dequeue command\n+ * @s: the software portal object.\n+ * @dq: the dequeue result read from the memory.\n+ *\n+ * Only used for user-provided storage of dequeue results, not DQRR. For\n+ * efficiency purposes, the driver will perform any required endianness\n+ * conversion to ensure that the user's dequeue result storage is in host-endian\n+ * format (whether or not that is the same as the little-endian format that\n+ * hardware DMA'd to the user's storage). As such, once the user has called\n+ * qbman_result_has_new_result() and been returned a valid dequeue result,\n+ * they should not call it again on the same memory location (except of course\n+ * if another dequeue command has been executed to produce a new result to that\n+ * location).\n+ *\n+ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid\n+ * dequeue result.\n+ */\n+int qbman_result_has_new_result(struct qbman_swp *s,\n+\t\t\t\tconst struct qbman_result *dq);\n+\n+/* -------------------------------------------------------- */\n+/* Parsing dequeue entries (DQRR and user-provided storage) */\n+/* -------------------------------------------------------- */\n+\n+/**\n+ * qbman_result_is_DQ() - check the dequeue result is a dequeue response or not\n+ * @dq: the dequeue result to be checked.\n+ *\n+ * DQRR entries may contain non-dequeue results, ie. notifications\n+ */\n+int qbman_result_is_DQ(const struct qbman_result *dq);\n+\n+/**\n+ * qbman_result_is_SCN() - Check the dequeue result is notification or not\n+ * @dq: the dequeue result to be checked.\n+ *\n+ * All the non-dequeue results (FQDAN/CDAN/CSCN/...) are \"state change\n+ * notifications\" of one type or another. Some APIs apply to all of them, of the\n+ * form qbman_result_SCN_***().\n+ */\n+static inline int qbman_result_is_SCN(const struct qbman_result *dq)\n+{\n+\treturn !qbman_result_is_DQ(dq);\n+}\n+\n+/* Recognise different notification types, only required if the user allows for\n+ * these to occur, and cares about them when they do.\n+ */\n+\n+/**\n+ * qbman_result_is_FQDAN() - Check for FQ Data Availability\n+ * @dq: the qbman_result object.\n+ *\n+ * Return 1 if this is FQDAN.\n+ */\n+int qbman_result_is_FQDAN(const struct qbman_result *dq);\n+\n+/**\n+ * qbman_result_is_CDAN() - Check for Channel Data Availability\n+ * @dq: the qbman_result object to check.\n+ *\n+ * Return 1 if this is CDAN.\n+ */\n+int qbman_result_is_CDAN(const struct qbman_result *dq);\n+\n+/**\n+ * qbman_result_is_CSCN() - Check for Congestion State Change\n+ * @dq: the qbman_result object to check.\n+ *\n+ * Return 1 if this is CSCN.\n+ */\n+int qbman_result_is_CSCN(const struct qbman_result *dq);\n+\n+/**\n+ * qbman_result_is_BPSCN() - Check for Buffer Pool State Change.\n+ * @dq: the qbman_result object to check.\n+ *\n+ * Return 1 if this is BPSCN.\n+ */\n+int qbman_result_is_BPSCN(const struct qbman_result *dq);\n+\n+/**\n+ * qbman_result_is_CGCU() - Check for Congestion Group Count Update.\n+ * @dq: the qbman_result object to check.\n+ *\n+ * Return 1 if this is CGCU.\n+ */\n+int qbman_result_is_CGCU(const struct qbman_result *dq);\n+\n+/* Frame queue state change notifications; (FQDAN in theory counts too as it\n+ * leaves a FQ parked, but it is primarily a data availability notification)\n+ */\n+\n+/**\n+ * qbman_result_is_FQRN() - Check for FQ Retirement Notification.\n+ * @dq: the qbman_result object to check.\n+ *\n+ * Return 1 if this is FQRN.\n+ */\n+int qbman_result_is_FQRN(const struct qbman_result *dq);\n+\n+/**\n+ * qbman_result_is_FQRNI() - Check for FQ Retirement Immediate\n+ * @dq: the qbman_result object to check.\n+ *\n+ * Return 1 if this is FQRNI.\n+ */\n+int qbman_result_is_FQRNI(const struct qbman_result *dq);\n+\n+/**\n+ * qbman_result_is_FQPN() - Check for FQ Park Notification\n+ * @dq: the qbman_result object to check.\n+ *\n+ * Return 1 if this is FQPN.\n+ */\n+int qbman_result_is_FQPN(const struct qbman_result *dq);\n+\n+/* Parsing frame dequeue results (qbman_result_is_DQ() must be TRUE)\n+ */\n+/* FQ empty */\n+#define QBMAN_DQ_STAT_FQEMPTY       0x80\n+/* FQ held active */\n+#define QBMAN_DQ_STAT_HELDACTIVE    0x40\n+/* FQ force eligible */\n+#define QBMAN_DQ_STAT_FORCEELIGIBLE 0x20\n+/* Valid frame */\n+#define QBMAN_DQ_STAT_VALIDFRAME    0x10\n+/* FQ ODP enable */\n+#define QBMAN_DQ_STAT_ODPVALID      0x04\n+/* Volatile dequeue */\n+#define QBMAN_DQ_STAT_VOLATILE      0x02\n+/* volatile dequeue command is expired */\n+#define QBMAN_DQ_STAT_EXPIRED       0x01\n+\n+/**\n+ * qbman_result_DQ_flags() - Get the STAT field of dequeue response\n+ * @dq: the dequeue result.\n+ *\n+ * Return the state field.\n+ */\n+uint32_t qbman_result_DQ_flags(const struct qbman_result *dq);\n+\n+/**\n+ * qbman_result_DQ_is_pull() - Check whether the dq response is from a pull\n+ * command.\n+ * @dq: the dequeue result.\n+ *\n+ * Return 1 for volatile(pull) dequeue, 0 for static dequeue.\n+ */\n+static inline int qbman_result_DQ_is_pull(const struct qbman_result *dq)\n+{\n+\treturn (int)(qbman_result_DQ_flags(dq) & QBMAN_DQ_STAT_VOLATILE);\n+}\n+\n+/**\n+ * qbman_result_DQ_is_pull_complete() - Check whether the pull command is\n+ * completed.\n+ * @dq: the dequeue result.\n+ *\n+ * Return boolean.\n+ */\n+static inline int qbman_result_DQ_is_pull_complete(\n+\t\t\t\t\tconst struct qbman_result *dq)\n+{\n+\treturn (int)(qbman_result_DQ_flags(dq) & QBMAN_DQ_STAT_EXPIRED);\n+}\n+\n+/**\n+ * qbman_result_DQ_seqnum()  - Get the seqnum field in dequeue response\n+ * seqnum is valid only if VALIDFRAME flag is TRUE\n+ * @dq: the dequeue result.\n+ *\n+ * Return seqnum.\n+ */\n+uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq);\n+\n+/**\n+ * qbman_result_DQ_odpid() - Get the seqnum field in dequeue response\n+ * odpid is valid only if ODPVAILD flag is TRUE.\n+ * @dq: the dequeue result.\n+ *\n+ * Return odpid.\n+ */\n+uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq);\n+\n+/**\n+ * qbman_result_DQ_fqid() - Get the fqid in dequeue response\n+ * @dq: the dequeue result.\n+ *\n+ * Return fqid.\n+ */\n+uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq);\n+\n+/**\n+ * qbman_result_DQ_byte_count() - Get the byte count in dequeue response\n+ * @dq: the dequeue result.\n+ *\n+ * Return the byte count remaining in the FQ.\n+ */\n+uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq);\n+\n+/**\n+ * qbman_result_DQ_frame_count - Get the frame count in dequeue response\n+ * @dq: the dequeue result.\n+ *\n+ * Return the frame count remaining in the FQ.\n+ */\n+uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq);\n+\n+/**\n+ * qbman_result_DQ_fqd_ctx() - Get the frame queue context in dequeue response\n+ * @dq: the dequeue result.\n+ *\n+ * Return the frame queue context.\n+ */\n+uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq);\n+\n+/**\n+ * qbman_result_DQ_fd() - Get the frame descriptor in dequeue response\n+ * @dq: the dequeue result.\n+ *\n+ * Return the frame descriptor.\n+ */\n+const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq);\n+\n+/* State-change notifications (FQDAN/CDAN/CSCN/...). */\n+\n+/**\n+ * qbman_result_SCN_state() - Get the state field in State-change notification\n+ * @scn: the state change notification.\n+ *\n+ * Return the state in the notifiation.\n+ */\n+uint8_t qbman_result_SCN_state(const struct qbman_result *scn);\n+\n+/**\n+ * qbman_result_SCN_rid() - Get the resource id from the notification\n+ * @scn: the state change notification.\n+ *\n+ * Return the resource id.\n+ */\n+uint32_t qbman_result_SCN_rid(const struct qbman_result *scn);\n+\n+/**\n+ * qbman_result_SCN_ctx() - get the context from the notification\n+ * @scn: the state change notification.\n+ *\n+ * Return the context.\n+ */\n+uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn);\n+\n+/**\n+ * qbman_result_SCN_state_in_mem() - Get the state in notification written\n+ * in memory\n+ * @scn: the state change notification.\n+ *\n+ * Return the state.\n+ */\n+uint8_t qbman_result_SCN_state_in_mem(const struct qbman_result *scn);\n+\n+/**\n+ * qbman_result_SCN_rid_in_mem() - Get the resource id in notification written\n+ * in memory.\n+ * @scn: the state change notification.\n+ *\n+ * Return the resource id.\n+ */\n+uint32_t qbman_result_SCN_rid_in_mem(const struct qbman_result *scn);\n+\n+/* Type-specific \"resource IDs\". Mainly for illustration purposes, though it\n+ * also gives the appropriate type widths.\n+ */\n+/* Get the FQID from the FQDAN */\n+#define qbman_result_FQDAN_fqid(dq) qbman_result_SCN_rid(dq)\n+/* Get the FQID from the FQRN */\n+#define qbman_result_FQRN_fqid(dq) qbman_result_SCN_rid(dq)\n+/* Get the FQID from the FQRNI */\n+#define qbman_result_FQRNI_fqid(dq) qbman_result_SCN_rid(dq)\n+/* Get the FQID from the FQPN */\n+#define qbman_result_FQPN_fqid(dq) qbman_result_SCN_rid(dq)\n+/* Get the channel ID from the CDAN */\n+#define qbman_result_CDAN_cid(dq) ((uint16_t)qbman_result_SCN_rid(dq))\n+/* Get the CGID from the CSCN */\n+#define qbman_result_CSCN_cgid(dq) ((uint16_t)qbman_result_SCN_rid(dq))\n+\n+/**\n+ * qbman_result_bpscn_bpid() - Get the bpid from BPSCN\n+ * @scn: the state change notification.\n+ *\n+ * Return the buffer pool id.\n+ */\n+uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn);\n+\n+/**\n+ * qbman_result_bpscn_has_free_bufs() - Check whether there are free\n+ * buffers in the pool from BPSCN.\n+ * @scn: the state change notification.\n+ *\n+ * Return the number of free buffers.\n+ */\n+int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn);\n+\n+/**\n+ * qbman_result_bpscn_is_depleted() - Check BPSCN to see whether the\n+ * buffer pool is depleted.\n+ * @scn: the state change notification.\n+ *\n+ * Return the status of buffer pool depletion.\n+ */\n+int qbman_result_bpscn_is_depleted(const struct qbman_result *scn);\n+\n+/**\n+ * qbman_result_bpscn_is_surplus() - Check BPSCN to see whether the buffer\n+ * pool is surplus or not.\n+ * @scn: the state change notification.\n+ *\n+ * Return the status of buffer pool surplus.\n+ */\n+int qbman_result_bpscn_is_surplus(const struct qbman_result *scn);\n+\n+/**\n+ * qbman_result_bpscn_ctx() - Get the BPSCN CTX from BPSCN message\n+ * @scn: the state change notification.\n+ *\n+ * Return the BPSCN context.\n+ */\n+uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn);\n+\n+/* Parsing CGCU */\n+/**\n+ * qbman_result_cgcu_cgid() - Check CGCU resouce id, i.e. cgid\n+ * @scn: the state change notification.\n+ *\n+ * Return the CGCU resource id.\n+ */\n+uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn);\n+\n+/**\n+ * qbman_result_cgcu_icnt() - Get the I_CNT from CGCU\n+ * @scn: the state change notification.\n+ *\n+ * Return instantaneous count in the CGCU notification.\n+ */\n+uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn);\n+\n+\t/************/\n+\t/* Enqueues */\n+\t/************/\n+\n+/**\n+ * struct qbman_eq_desc - structure of enqueue descriptor\n+ * @dont_manipulate_directly: the 8 32bit data to represent the whole\n+ * possible qbman enqueue setting in enqueue descriptor.\n+ */\n+struct qbman_eq_desc {\n+\tuint32_t dont_manipulate_directly[8];\n+};\n+\n+/**\n+ * struct qbman_eq_response - structure of enqueue response\n+ * @dont_manipulate_directly: the 16 32bit data to represent the whole\n+ * enqueue response.\n+ */\n+struct qbman_eq_response {\n+\tuint32_t dont_manipulate_directly[16];\n+};\n+\n+/**\n+ * qbman_eq_desc_clear() - Clear the contents of a descriptor to\n+ * default/starting state.\n+ * @d: the given enqueue descriptor.\n+ */\n+void qbman_eq_desc_clear(struct qbman_eq_desc *d);\n+\n+/* Exactly one of the following descriptor \"actions\" should be set. (Calling\n+ * any one of these will replace the effect of any prior call to one of these.)\n+ * - enqueue without order-restoration\n+ * - enqueue with order-restoration\n+ * - fill a hole in the order-restoration sequence, without any enqueue\n+ * - advance NESN (Next Expected Sequence Number), without any enqueue\n+ * 'respond_success' indicates whether an enqueue response should be DMA'd\n+ * after success (otherwise a response is DMA'd only after failure).\n+ * 'incomplete' indicates that other fragments of the same 'seqnum' are yet to\n+ * be enqueued.\n+ */\n+\n+/**\n+ * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp\n+ * @d: the enqueue descriptor.\n+ * @response_success: 1 = enqueue with response always; 0 = enqueue with\n+ * rejections returned on a FQ.\n+ */\n+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);\n+/**\n+ * qbman_eq_desc_set_orp() - Set order-resotration in the enqueue descriptor\n+ * @d: the enqueue descriptor.\n+ * @response_success: 1 = enqueue with response always; 0 = enqueue with\n+ * rejections returned on a FQ.\n+ * @opr_id: the order point record id.\n+ * @seqnum: the order restoration sequence number.\n+ * @incomplete: indiates whether this is the last fragments using the same\n+ * sequeue number.\n+ */\n+void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,\n+\t\t\t   uint32_t opr_id, uint32_t seqnum, int incomplete);\n+\n+/**\n+ * qbman_eq_desc_set_orp_hole() - fill a hole in the order-restoration sequence\n+ * without any enqueue\n+ * @d: the enqueue descriptor.\n+ * @opr_id: the order point record id.\n+ * @seqnum: the order restoration sequence number.\n+ */\n+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id,\n+\t\t\t\tuint32_t seqnum);\n+\n+/**\n+ * qbman_eq_desc_set_orp_nesn() -  advance NESN (Next Expected Sequence Number)\n+ * without any enqueue\n+ * @d: the enqueue descriptor.\n+ * @opr_id: the order point record id.\n+ * @seqnum: the order restoration sequence number.\n+ */\n+void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id,\n+\t\t\t\tuint32_t seqnum);\n+/**\n+ * qbman_eq_desc_set_response() - Set the enqueue response info.\n+ * @d: the enqueue descriptor\n+ * @storage_phys: the physical address of the enqueue response in memory.\n+ * @stash: indicate that the write allocation enabled or not.\n+ *\n+ * In the case where an enqueue response is DMA'd, this determines where that\n+ * response should go. (The physical/DMA address is given for hardware's\n+ * benefit, but software should interpret it as a \"struct qbman_eq_response\"\n+ * data structure.) 'stash' controls whether or not the write to main-memory\n+ * expresses a cache-warming attribute.\n+ */\n+void qbman_eq_desc_set_response(struct qbman_eq_desc *d,\n+\t\t\t\tdma_addr_t storage_phys,\n+\t\t\t\tint stash);\n+\n+/**\n+ * qbman_eq_desc_set_token() - Set token for the enqueue command\n+ * @d: the enqueue descriptor\n+ * @token: the token to be set.\n+ *\n+ * token is the value that shows up in an enqueue response that can be used to\n+ * detect when the results have been published. The easiest technique is to zero\n+ * result \"storage\" before issuing an enqueue, and use any non-zero 'token'\n+ * value.\n+ */\n+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token);\n+\n+/**\n+ * Exactly one of the following descriptor \"targets\" should be set. (Calling any\n+ * one of these will replace the effect of any prior call to one of these.)\n+ * - enqueue to a frame queue\n+ * - enqueue to a queuing destination\n+ * Note, that none of these will have any affect if the \"action\" type has been\n+ * set to \"orp_hole\" or \"orp_nesn\".\n+ */\n+/**\n+ * qbman_eq_desc_set_fq() - Set Frame Queue id for the enqueue command\n+ * @d: the enqueue descriptor\n+ * @fqid: the id of the frame queue to be enqueued.\n+ */\n+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid);\n+\n+/**\n+ * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command.\n+ * @d: the enqueue descriptor\n+ * @qdid: the id of the queuing destination to be enqueued.\n+ * @qd_bin: the queuing destination bin\n+ * @qd_prio: the queuing destination priority.\n+ */\n+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,\n+\t\t\t  uint32_t qd_bin, uint32_t qd_prio);\n+\n+/**\n+ * qbman_eq_desc_set_eqdi() - enable/disable EQDI interrupt\n+ * @d: the enqueue descriptor\n+ * @enable: boolean to enable/disable EQDI\n+ *\n+ * Determines whether or not the portal's EQDI interrupt source should be\n+ * asserted after the enqueue command is completed.\n+ */\n+void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable);\n+\n+/**\n+ * qbman_eq_desc_set_dca() - Set DCA mode in the enqueue command.\n+ * @d: the enqueue descriptor.\n+ * @enable: enabled/disable DCA mode.\n+ * @dqrr_idx: DCAP_CI, the DCAP consumer index.\n+ * @park: determine the whether park the FQ or not\n+ *\n+ * Determines whether or not a portal DQRR entry should be consumed once the\n+ * enqueue command is completed. (And if so, and the DQRR entry corresponds to a\n+ * held-active (order-preserving) FQ, whether the FQ should be parked instead of\n+ * being rescheduled.)\n+ */\n+void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,\n+\t\t\t   uint32_t dqrr_idx, int park);\n+\n+/**\n+ * qbman_swp_enqueue() - Issue an enqueue command.\n+ * @s: the software portal used for enqueue.\n+ * @d: the enqueue descriptor.\n+ * @fd: the frame descriptor to be enqueued.\n+ *\n+ * Please note that 'fd' should only be NULL if the \"action\" of the\n+ * descriptor is \"orp_hole\" or \"orp_nesn\".\n+ *\n+ * Return 0 for a successful enqueue, -EBUSY if the EQCR is not ready.\n+ */\n+int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,\n+\t\t      const struct qbman_fd *fd);\n+\n+/* TODO:\n+ * qbman_swp_enqueue_thresh() - Set threshold for EQRI interrupt.\n+ * @s: the software portal.\n+ * @thresh: the threshold to trigger the EQRI interrupt.\n+ *\n+ * An EQRI interrupt can be generated when the fill-level of EQCR falls below\n+ * the 'thresh' value set here. Setting thresh==0 (the default) disables.\n+ */\n+int qbman_swp_enqueue_thresh(struct qbman_swp *s, unsigned int thresh);\n+\n+\t/*******************/\n+\t/* Buffer releases */\n+\t/*******************/\n+/**\n+ * struct qbman_release_desc - The structure for buffer release descriptor\n+ * @dont_manipulate_directly: the 32bit data to represent the whole\n+ * possible settings of qbman release descriptor.\n+ */\n+struct qbman_release_desc {\n+\tuint32_t dont_manipulate_directly[1];\n+};\n+\n+/**\n+ * qbman_release_desc_clear() - Clear the contents of a descriptor to\n+ * default/starting state.\n+ * @d: the qbman release descriptor.\n+ */\n+void qbman_release_desc_clear(struct qbman_release_desc *d);\n+\n+/**\n+ * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to\n+ * @d: the qbman release descriptor.\n+ */\n+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid);\n+\n+/**\n+ * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI\n+ * interrupt source should be asserted after the release command is completed.\n+ * @d: the qbman release descriptor.\n+ */\n+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);\n+\n+/**\n+ * qbman_swp_release() - Issue a buffer release command.\n+ * @s: the software portal object.\n+ * @d: the release descriptor.\n+ * @buffers: a pointer pointing to the buffer address to be released.\n+ * @num_buffers: number of buffers to be released,  must be less than 8.\n+ *\n+ * Return 0 for success, -EBUSY if the release command ring is not ready.\n+ */\n+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,\n+\t\t      const uint64_t *buffers, unsigned int num_buffers);\n+\n+/* TODO:\n+ * qbman_swp_release_thresh() - Set threshold for RCRI interrupt\n+ * @s: the software portal.\n+ * @thresh: the threshold.\n+ * An RCRI interrupt can be generated when the fill-level of RCR falls below\n+ * the 'thresh' value set here. Setting thresh==0 (the default) disables.\n+ */\n+int qbman_swp_release_thresh(struct qbman_swp *s, unsigned int thresh);\n+\n+\t/*******************/\n+\t/* Buffer acquires */\n+\t/*******************/\n+/**\n+ * qbman_swp_acquire() - Issue a buffer acquire command.\n+ * @s: the software portal object.\n+ * @bpid: the buffer pool index.\n+ * @buffers: a pointer pointing to the acquired buffer address|es.\n+ * @num_buffers: number of buffers to be acquired, must be less than 8.\n+ *\n+ * Return 0 for success, or negative error code if the acquire command\n+ * fails.\n+ */\n+int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers,\n+\t\t      unsigned int num_buffers);\n+\n+\t/*****************/\n+\t/* FQ management */\n+\t/*****************/\n+/**\n+ * qbman_swp_fq_schedule() - Move the fq to the scheduled state.\n+ * @s: the software portal object.\n+ * @fqid: the index of frame queue to be scheduled.\n+ *\n+ * There are a couple of different ways that a FQ can end up parked state,\n+ * This schedules it.\n+ *\n+ * Return 0 for success, or negative error code for failure.\n+ */\n+int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid);\n+\n+/**\n+ * qbman_swp_fq_force() - Force the FQ to fully scheduled state.\n+ * @s: the software portal object.\n+ * @fqid: the index of frame queue to be forced.\n+ *\n+ * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled\n+ * and thus be available for selection by any channel-dequeuing behaviour (push\n+ * or pull). If the FQ is subsequently \"dequeued\" from the channel and is still\n+ * empty at the time this happens, the resulting dq_entry will have no FD.\n+ * (qbman_result_DQ_fd() will return NULL.)\n+ *\n+ * Return 0 for success, or negative error code for failure.\n+ */\n+int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid);\n+\n+/**\n+ * These functions change the FQ flow-control stuff between XON/XOFF. (The\n+ * default is XON.) This setting doesn't affect enqueues to the FQ, just\n+ * dequeues. XOFF FQs will remain in the tenatively-scheduled state, even when\n+ * non-empty, meaning they won't be selected for scheduled dequeuing. If a FQ is\n+ * changed to XOFF after it had already become truly-scheduled to a channel, and\n+ * a pull dequeue of that channel occurs that selects that FQ for dequeuing,\n+ * then the resulting dq_entry will have no FD. (qbman_result_DQ_fd() will\n+ * return NULL.)\n+ */\n+/**\n+ * qbman_swp_fq_xon() - XON the frame queue.\n+ * @s: the software portal object.\n+ * @fqid: the index of frame queue.\n+ *\n+ * Return 0 for success, or negative error code for failure.\n+ */\n+int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid);\n+/**\n+ * qbman_swp_fq_xoff() - XOFF the frame queue.\n+ * @s: the software portal object.\n+ * @fqid: the index of frame queue.\n+ *\n+ * Return 0 for success, or negative error code for failure.\n+ */\n+int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid);\n+\n+\t/**********************/\n+\t/* Channel management */\n+\t/**********************/\n+\n+/**\n+ * If the user has been allocated a channel object that is going to generate\n+ * CDANs to another channel, then these functions will be necessary.\n+ * CDAN-enabled channels only generate a single CDAN notification, after which\n+ * it they need to be reenabled before they'll generate another. (The idea is\n+ * that pull dequeuing will occur in reaction to the CDAN, followed by a\n+ * reenable step.) Each function generates a distinct command to hardware, so a\n+ * combination function is provided if the user wishes to modify the \"context\"\n+ * (which shows up in each CDAN message) each time they reenable, as a single\n+ * command to hardware.\n+ */\n+\n+/**\n+ * qbman_swp_CDAN_set_context() - Set CDAN context\n+ * @s: the software portal object.\n+ * @channelid: the channel index.\n+ * @ctx: the context to be set in CDAN.\n+ *\n+ * Return 0 for success, or negative error code for failure.\n+ */\n+int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,\n+\t\t\t       uint64_t ctx);\n+\n+/**\n+ * qbman_swp_CDAN_enable() - Enable CDAN for the channel.\n+ * @s: the software portal object.\n+ * @channelid: the index of the channel to generate CDAN.\n+ *\n+ * Return 0 for success, or negative error code for failure.\n+ */\n+int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid);\n+\n+/**\n+ * qbman_swp_CDAN_disable() - disable CDAN for the channel.\n+ * @s: the software portal object.\n+ * @channelid: the index of the channel to generate CDAN.\n+ *\n+ * Return 0 for success, or negative error code for failure.\n+ */\n+int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid);\n+\n+/**\n+ * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN\n+ * @s: the software portal object.\n+ * @channelid: the index of the channel to generate CDAN.\n+ * @ctx: the context set in CDAN.\n+ *\n+ * Return 0 for success, or negative error code for failure.\n+ */\n+int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,\n+\t\t\t\t      uint64_t ctx);\n+int qbman_swp_fill_ring(struct qbman_swp *s,\n+\t\t\tconst struct qbman_eq_desc *d,\n+\t\t       const struct qbman_fd *fd,\n+\t\t       uint8_t burst_index);\n+int qbman_swp_flush_ring(struct qbman_swp *s);\n+void qbman_sync(void);\n+int qbman_swp_send_multiple(struct qbman_swp *s,\n+\t\t\t    const struct qbman_eq_desc *d,\n+\t\t\t    const struct qbman_fd *fd,\n+\t\t\t    int frames_to_send);\n+\n+int qbman_check_command_complete(struct qbman_swp *s,\n+\t\t\t\t const struct qbman_result *dq);\n+\n+int qbman_get_version(void);\n+#endif /* !_FSL_QBMAN_PORTAL_H */\ndiff --git a/drivers/common/dpaa2/qbman/qbman_portal.c b/drivers/common/dpaa2/qbman/qbman_portal.c\nnew file mode 100644\nindex 0000000..224f479\n--- /dev/null\n+++ b/drivers/common/dpaa2/qbman/qbman_portal.c\n@@ -0,0 +1,1492 @@\n+/* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions are met:\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in the\n+ *       documentation and/or other materials provided with the distribution.\n+ *     * Neither the name of Freescale Semiconductor nor the\n+ *       names of its contributors may be used to endorse or promote products\n+ *       derived from this software without specific prior written permission.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY\n+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY\n+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#include \"qbman_portal.h\"\n+\n+/* QBMan portal management command codes */\n+#define QBMAN_MC_ACQUIRE       0x30\n+#define QBMAN_WQCHAN_CONFIGURE 0x46\n+\n+/* CINH register offsets */\n+#define QBMAN_CINH_SWP_EQCR_PI 0x800\n+#define QBMAN_CINH_SWP_EQCR_CI 0x840\n+#define QBMAN_CINH_SWP_EQAR    0x8c0\n+#define QBMAN_CINH_SWP_DQPI    0xa00\n+#define QBMAN_CINH_SWP_DCAP    0xac0\n+#define QBMAN_CINH_SWP_SDQCR   0xb00\n+#define QBMAN_CINH_SWP_RAR     0xcc0\n+#define QBMAN_CINH_SWP_ISR     0xe00\n+#define QBMAN_CINH_SWP_IER     0xe40\n+#define QBMAN_CINH_SWP_ISDR    0xe80\n+#define QBMAN_CINH_SWP_IIR     0xec0\n+\n+/* CENA register offsets */\n+#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))\n+#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))\n+#define QBMAN_CENA_SWP_RCR(n)  (0x400 + ((uint32_t)(n) << 6))\n+#define QBMAN_CENA_SWP_CR      0x600\n+#define QBMAN_CENA_SWP_RR(vb)  (0x700 + ((uint32_t)(vb) >> 1))\n+#define QBMAN_CENA_SWP_VDQCR   0x780\n+#define QBMAN_CENA_SWP_EQCR_CI 0x840\n+\n+/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */\n+#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)\n+\n+/* QBMan FQ management command codes */\n+#define QBMAN_FQ_SCHEDULE\t0x48\n+#define QBMAN_FQ_FORCE\t\t0x49\n+#define QBMAN_FQ_XON\t\t0x4d\n+#define QBMAN_FQ_XOFF\t\t0x4e\n+\n+/*******************************/\n+/* Pre-defined attribute codes */\n+/*******************************/\n+\n+struct qb_attr_code code_generic_verb = QB_CODE(0, 0, 7);\n+struct qb_attr_code code_generic_rslt = QB_CODE(0, 8, 8);\n+\n+/*************************/\n+/* SDQCR attribute codes */\n+/*************************/\n+\n+/* we put these here because at least some of them are required by\n+ * qbman_swp_init()\n+ */\n+struct qb_attr_code code_sdqcr_dct = QB_CODE(0, 24, 2);\n+struct qb_attr_code code_sdqcr_fc = QB_CODE(0, 29, 1);\n+struct qb_attr_code code_sdqcr_tok = QB_CODE(0, 16, 8);\n+static struct qb_attr_code code_eq_dca_idx;\n+#define CODE_SDQCR_DQSRC(n) QB_CODE(0, n, 1)\n+enum qbman_sdqcr_dct {\n+\tqbman_sdqcr_dct_null = 0,\n+\tqbman_sdqcr_dct_prio_ics,\n+\tqbman_sdqcr_dct_active_ics,\n+\tqbman_sdqcr_dct_active\n+};\n+\n+enum qbman_sdqcr_fc {\n+\tqbman_sdqcr_fc_one = 0,\n+\tqbman_sdqcr_fc_up_to_3 = 1\n+};\n+\n+struct qb_attr_code code_sdqcr_dqsrc = QB_CODE(0, 0, 16);\n+\n+/* We need to keep track of which SWP triggered a pull command\n+ * so keep an array of portal IDs and use the token field to\n+ * be able to find the proper portal\n+ */\n+#define MAX_QBMAN_PORTALS  35\n+static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];\n+\n+uint32_t qman_version;\n+\n+/*********************************/\n+/* Portal constructor/destructor */\n+/*********************************/\n+\n+/* Software portals should always be in the power-on state when we initialise,\n+ * due to the CCSR-based portal reset functionality that MC has.\n+ *\n+ * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR\n+ * valid-bits, so we need to support a workaround where we don't trust\n+ * valid-bits when detecting new entries until any stale ring entries have been\n+ * overwritten at least once. The idea is that we read PI for the first few\n+ * entries, then switch to valid-bit after that. The trick is to clear the\n+ * bug-work-around boolean once the PI wraps around the ring for the first time.\n+ *\n+ * Note: this still carries a slight additional cost once the decrementer hits\n+ * zero.\n+ */\n+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)\n+{\n+\tint ret;\n+\tuint32_t eqcr_pi;\n+\tstruct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);\n+\n+\tif (!p)\n+\t\treturn NULL;\n+\tp->desc = *d;\n+#ifdef QBMAN_CHECKING\n+\tp->mc.check = swp_mc_can_start;\n+#endif\n+\tp->mc.valid_bit = QB_VALID_BIT;\n+\tp->sdq = 0;\n+\tqb_attr_code_encode(&code_sdqcr_dct, &p->sdq, qbman_sdqcr_dct_prio_ics);\n+\tqb_attr_code_encode(&code_sdqcr_fc, &p->sdq, qbman_sdqcr_fc_up_to_3);\n+\tqb_attr_code_encode(&code_sdqcr_tok, &p->sdq, 0xbb);\n+\tatomic_set(&p->vdq.busy, 1);\n+\tp->vdq.valid_bit = QB_VALID_BIT;\n+\tp->dqrr.next_idx = 0;\n+\tp->dqrr.valid_bit = QB_VALID_BIT;\n+\tqman_version = p->desc.qman_version;\n+\tif ((qman_version & 0xFFFF0000) < QMAN_REV_4100) {\n+\t\tp->dqrr.dqrr_size = 4;\n+\t\tp->dqrr.reset_bug = 1;\n+\t\t/* Set size of DQRR to 4, encoded in 2 bits */\n+\t\tcode_eq_dca_idx = (struct qb_attr_code)QB_CODE(0, 8, 2);\n+\t} else {\n+\t\tp->dqrr.dqrr_size = 8;\n+\t\tp->dqrr.reset_bug = 0;\n+\t\t/* Set size of DQRR to 8, encoded in 3 bits */\n+\t\tcode_eq_dca_idx = (struct qb_attr_code)QB_CODE(0, 8, 3);\n+\t}\n+\n+\tret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);\n+\tif (ret) {\n+\t\tkfree(p);\n+\t\tpr_err(\"qbman_swp_sys_init() failed %d\\n\", ret);\n+\t\treturn NULL;\n+\t}\n+\t/* SDQCR needs to be initialized to 0 when no channels are\n+\t * being dequeued from or else the QMan HW will indicate an\n+\t * error.  The values that were calculated above will be\n+\t * applied when dequeues from a specific channel are enabled\n+\t */\n+\tqbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);\n+\teqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);\n+\tp->eqcr.pi = eqcr_pi & 0xF;\n+\tp->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;\n+\tp->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI) & 0xF;\n+\tp->eqcr.available = QBMAN_EQCR_SIZE - qm_cyc_diff(QBMAN_EQCR_SIZE,\n+\t\t\t\t\t\tp->eqcr.ci, p->eqcr.pi);\n+\n+\tportal_idx_map[p->desc.idx] = p;\n+\treturn p;\n+}\n+\n+void qbman_swp_finish(struct qbman_swp *p)\n+{\n+#ifdef QBMAN_CHECKING\n+\tQBMAN_BUG_ON(p->mc.check != swp_mc_can_start);\n+#endif\n+\tqbman_swp_sys_finish(&p->sys);\n+\tportal_idx_map[p->desc.idx] = NULL;\n+\tkfree(p);\n+}\n+\n+const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)\n+{\n+\treturn &p->desc;\n+}\n+\n+/**************/\n+/* Interrupts */\n+/**************/\n+\n+uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)\n+{\n+\treturn qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);\n+}\n+\n+void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)\n+{\n+\tqbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);\n+}\n+\n+uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)\n+{\n+\treturn qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);\n+}\n+\n+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)\n+{\n+\tqbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);\n+}\n+\n+uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)\n+{\n+\treturn qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);\n+}\n+\n+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)\n+{\n+\tqbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);\n+}\n+\n+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)\n+{\n+\treturn qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);\n+}\n+\n+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)\n+{\n+\tqbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);\n+}\n+\n+/***********************/\n+/* Management commands */\n+/***********************/\n+\n+/*\n+ * Internal code common to all types of management commands.\n+ */\n+\n+void *qbman_swp_mc_start(struct qbman_swp *p)\n+{\n+\tvoid *ret;\n+#ifdef QBMAN_CHECKING\n+\tQBMAN_BUG_ON(p->mc.check != swp_mc_can_start);\n+#endif\n+\tret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);\n+#ifdef QBMAN_CHECKING\n+\tif (!ret)\n+\t\tp->mc.check = swp_mc_can_submit;\n+#endif\n+\treturn ret;\n+}\n+\n+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb)\n+{\n+\tuint32_t *v = cmd;\n+#ifdef QBMAN_CHECKING\n+\tQBMAN_BUG_ON(!p->mc.check != swp_mc_can_submit);\n+#endif\n+\t/* TBD: \"|=\" is going to hurt performance. Need to move as many fields\n+\t * out of word zero, and for those that remain, the \"OR\" needs to occur\n+\t * at the caller side. This debug check helps to catch cases where the\n+\t * caller wants to OR but has forgotten to do so.\n+\t */\n+\tQBMAN_BUG_ON((*v & cmd_verb) != *v);\n+\t*v = cmd_verb | p->mc.valid_bit;\n+\tqbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);\n+#ifdef QBMAN_CHECKING\n+\tp->mc.check = swp_mc_can_poll;\n+#endif\n+}\n+\n+void *qbman_swp_mc_result(struct qbman_swp *p)\n+{\n+\tuint32_t *ret, verb;\n+#ifdef QBMAN_CHECKING\n+\tQBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);\n+#endif\n+\tqbman_cena_invalidate_prefetch(&p->sys,\n+\t\t\t\t       QBMAN_CENA_SWP_RR(p->mc.valid_bit));\n+\tret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit));\n+\t/* Remove the valid-bit - command completed iff the rest is non-zero */\n+\tverb = ret[0] & ~QB_VALID_BIT;\n+\tif (!verb)\n+\t\treturn NULL;\n+#ifdef QBMAN_CHECKING\n+\tp->mc.check = swp_mc_can_start;\n+#endif\n+\tp->mc.valid_bit ^= QB_VALID_BIT;\n+\treturn ret;\n+}\n+\n+/***********/\n+/* Enqueue */\n+/***********/\n+\n+/* These should be const, eventually */\n+static struct qb_attr_code code_eq_cmd = QB_CODE(0, 0, 2);\n+static struct qb_attr_code code_eq_eqdi = QB_CODE(0, 3, 1);\n+static struct qb_attr_code code_eq_dca_en = QB_CODE(0, 15, 1);\n+static struct qb_attr_code code_eq_dca_pk = QB_CODE(0, 14, 1);\n+/* Can't set code_eq_dca_idx width. Need qman version. Read at runtime */\n+static struct qb_attr_code code_eq_orp_en = QB_CODE(0, 2, 1);\n+static struct qb_attr_code code_eq_orp_is_nesn = QB_CODE(0, 31, 1);\n+static struct qb_attr_code code_eq_orp_nlis = QB_CODE(0, 30, 1);\n+static struct qb_attr_code code_eq_orp_seqnum = QB_CODE(0, 16, 14);\n+static struct qb_attr_code code_eq_opr_id = QB_CODE(1, 0, 16);\n+static struct qb_attr_code code_eq_tgt_id = QB_CODE(2, 0, 24);\n+/* static struct qb_attr_code code_eq_tag = QB_CODE(3, 0, 32); */\n+static struct qb_attr_code code_eq_qd_en = QB_CODE(0, 4, 1);\n+static struct qb_attr_code code_eq_qd_bin = QB_CODE(4, 0, 16);\n+static struct qb_attr_code code_eq_qd_pri = QB_CODE(4, 16, 4);\n+static struct qb_attr_code code_eq_rsp_stash = QB_CODE(5, 16, 1);\n+static struct qb_attr_code code_eq_rsp_id = QB_CODE(5, 24, 8);\n+static struct qb_attr_code code_eq_rsp_lo = QB_CODE(6, 0, 32);\n+\n+enum qbman_eq_cmd_e {\n+\t/* No enqueue, primarily for plugging ORP gaps for dropped frames */\n+\tqbman_eq_cmd_empty,\n+\t/* DMA an enqueue response once complete */\n+\tqbman_eq_cmd_respond,\n+\t/* DMA an enqueue response only if the enqueue fails */\n+\tqbman_eq_cmd_respond_reject\n+};\n+\n+void qbman_eq_desc_clear(struct qbman_eq_desc *d)\n+{\n+\tmemset(d, 0, sizeof(*d));\n+}\n+\n+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)\n+{\n+\tuint32_t *cl = qb_cl(d);\n+\n+\tqb_attr_code_encode(&code_eq_orp_en, cl, 0);\n+\tqb_attr_code_encode(&code_eq_cmd, cl,\n+\t\t\t    respond_success ? qbman_eq_cmd_respond :\n+\t\t\t\t\t      qbman_eq_cmd_respond_reject);\n+}\n+\n+void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,\n+\t\t\t   uint32_t opr_id, uint32_t seqnum, int incomplete)\n+{\n+\tuint32_t *cl = qb_cl(d);\n+\n+\tqb_attr_code_encode(&code_eq_orp_en, cl, 1);\n+\tqb_attr_code_encode(&code_eq_cmd, cl,\n+\t\t\t    respond_success ? qbman_eq_cmd_respond :\n+\t\t\t\t\t      qbman_eq_cmd_respond_reject);\n+\tqb_attr_code_encode(&code_eq_opr_id, cl, opr_id);\n+\tqb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);\n+\tqb_attr_code_encode(&code_eq_orp_nlis, cl, !!incomplete);\n+}\n+\n+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id,\n+\t\t\t\tuint32_t seqnum)\n+{\n+\tuint32_t *cl = qb_cl(d);\n+\n+\tqb_attr_code_encode(&code_eq_orp_en, cl, 1);\n+\tqb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty);\n+\tqb_attr_code_encode(&code_eq_opr_id, cl, opr_id);\n+\tqb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);\n+\tqb_attr_code_encode(&code_eq_orp_nlis, cl, 0);\n+\tqb_attr_code_encode(&code_eq_orp_is_nesn, cl, 0);\n+}\n+\n+void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id,\n+\t\t\t\tuint32_t seqnum)\n+{\n+\tuint32_t *cl = qb_cl(d);\n+\n+\tqb_attr_code_encode(&code_eq_orp_en, cl, 1);\n+\tqb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty);\n+\tqb_attr_code_encode(&code_eq_opr_id, cl, opr_id);\n+\tqb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);\n+\tqb_attr_code_encode(&code_eq_orp_nlis, cl, 0);\n+\tqb_attr_code_encode(&code_eq_orp_is_nesn, cl, 1);\n+}\n+\n+void qbman_eq_desc_set_response(struct qbman_eq_desc *d,\n+\t\t\t\tdma_addr_t storage_phys,\n+\t\t\t\tint stash)\n+{\n+\tuint32_t *cl = qb_cl(d);\n+\n+\tqb_attr_code_encode_64(&code_eq_rsp_lo, (uint64_t *)cl, storage_phys);\n+\tqb_attr_code_encode(&code_eq_rsp_stash, cl, !!stash);\n+}\n+\n+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)\n+{\n+\tuint32_t *cl = qb_cl(d);\n+\n+\tqb_attr_code_encode(&code_eq_rsp_id, cl, (uint32_t)token);\n+}\n+\n+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)\n+{\n+\tuint32_t *cl = qb_cl(d);\n+\n+\tqb_attr_code_encode(&code_eq_qd_en, cl, 0);\n+\tqb_attr_code_encode(&code_eq_tgt_id, cl, fqid);\n+}\n+\n+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,\n+\t\t\t  uint32_t qd_bin, uint32_t qd_prio)\n+{\n+\tuint32_t *cl = qb_cl(d);\n+\n+\tqb_attr_code_encode(&code_eq_qd_en, cl, 1);\n+\tqb_attr_code_encode(&code_eq_tgt_id, cl, qdid);\n+\tqb_attr_code_encode(&code_eq_qd_bin, cl, qd_bin);\n+\tqb_attr_code_encode(&code_eq_qd_pri, cl, qd_prio);\n+}\n+\n+void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)\n+{\n+\tuint32_t *cl = qb_cl(d);\n+\n+\tqb_attr_code_encode(&code_eq_eqdi, cl, !!enable);\n+}\n+\n+void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,\n+\t\t\t   uint32_t dqrr_idx, int park)\n+{\n+\tuint32_t *cl = qb_cl(d);\n+\n+\tqb_attr_code_encode(&code_eq_dca_en, cl, !!enable);\n+\tif (enable) {\n+\t\tqb_attr_code_encode(&code_eq_dca_pk, cl, !!park);\n+\t\tqb_attr_code_encode(&code_eq_dca_idx, cl, dqrr_idx);\n+\t}\n+}\n+\n+#define EQAR_IDX(eqar)     ((eqar) & 0x7)\n+#define EQAR_VB(eqar)      ((eqar) & 0x80)\n+#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)\n+static int qbman_swp_enqueue_array_mode(struct qbman_swp *s,\n+\t\t\t\t\tconst struct qbman_eq_desc *d,\n+\t\t\t\t const struct qbman_fd *fd)\n+{\n+\tuint32_t *p;\n+\tconst uint32_t *cl = qb_cl(d);\n+\tuint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);\n+\n+\tpr_debug(\"EQAR=%08x\\n\", eqar);\n+\tif (!EQAR_SUCCESS(eqar))\n+\t\treturn -EBUSY;\n+\tp = qbman_cena_write_start_wo_shadow(&s->sys,\n+\t\t\tQBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));\n+\tword_copy(&p[1], &cl[1], 7);\n+\tword_copy(&p[8], fd, sizeof(*fd) >> 2);\n+\t/* Set the verb byte, have to substitute in the valid-bit */\n+\tlwsync();\n+\tp[0] = cl[0] | EQAR_VB(eqar);\n+\tqbman_cena_write_complete_wo_shadow(&s->sys,\n+\t\t\tQBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));\n+\treturn 0;\n+}\n+\n+static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,\n+\t\t\t\t       const struct qbman_eq_desc *d,\n+\t\t\t\tconst struct qbman_fd *fd)\n+{\n+\tuint32_t *p;\n+\tconst uint32_t *cl = qb_cl(d);\n+\tuint32_t eqcr_ci;\n+\tuint8_t diff;\n+\n+\tif (!s->eqcr.available) {\n+\t\teqcr_ci = s->eqcr.ci;\n+\t\ts->eqcr.ci = qbman_cena_read_reg(&s->sys,\n+\t\t\t\tQBMAN_CENA_SWP_EQCR_CI) & 0xF;\n+\t\tdiff = qm_cyc_diff(QBMAN_EQCR_SIZE,\n+\t\t\t\t   eqcr_ci, s->eqcr.ci);\n+\t\ts->eqcr.available += diff;\n+\t\tif (!diff)\n+\t\t\treturn -EBUSY;\n+\t}\n+\n+\tp = qbman_cena_write_start_wo_shadow(&s->sys,\n+\t\tQBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));\n+\tword_copy(&p[1], &cl[1], 7);\n+\tword_copy(&p[8], fd, sizeof(*fd) >> 2);\n+\tlwsync();\n+\t/* Set the verb byte, have to substitute in the valid-bit */\n+\tp[0] = cl[0] | s->eqcr.pi_vb;\n+\tqbman_cena_write_complete_wo_shadow(&s->sys,\n+\t\tQBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));\n+\ts->eqcr.pi++;\n+\ts->eqcr.pi &= 0xF;\n+\ts->eqcr.available--;\n+\tif (!(s->eqcr.pi & 7))\n+\t\ts->eqcr.pi_vb ^= QB_VALID_BIT;\n+\treturn 0;\n+}\n+\n+int qbman_swp_fill_ring(struct qbman_swp *s,\n+\t\t\tconst struct qbman_eq_desc *d,\n+\t\t\tconst struct qbman_fd *fd,\n+\t\t\t__attribute__((unused)) uint8_t burst_index)\n+{\n+\tuint32_t *p;\n+\tconst uint32_t *cl = qb_cl(d);\n+\tuint32_t eqcr_ci;\n+\tuint8_t diff;\n+\n+\tif (!s->eqcr.available) {\n+\t\teqcr_ci = s->eqcr.ci;\n+\t\ts->eqcr.ci = qbman_cena_read_reg(&s->sys,\n+\t\t\t\tQBMAN_CENA_SWP_EQCR_CI) & 0xF;\n+\t\tdiff = qm_cyc_diff(QBMAN_EQCR_SIZE,\n+\t\t\t\t   eqcr_ci, s->eqcr.ci);\n+\t\ts->eqcr.available += diff;\n+\t\tif (!diff)\n+\t\t\treturn -EBUSY;\n+\t}\n+\tp = qbman_cena_write_start_wo_shadow(&s->sys,\n+\t\tQBMAN_CENA_SWP_EQCR((s->eqcr.pi/* +burst_index */) & 7));\n+\t/* word_copy(&p[1], &cl[1], 7); */\n+\tmemcpy(&p[1], &cl[1], 7 * 4);\n+\t/* word_copy(&p[8], fd, sizeof(*fd) >> 2); */\n+\tmemcpy(&p[8], fd, sizeof(struct qbman_fd));\n+\n+\t/* lwsync(); */\n+\tp[0] = cl[0] | s->eqcr.pi_vb;\n+\n+\ts->eqcr.pi++;\n+\ts->eqcr.pi &= 0xF;\n+\ts->eqcr.available--;\n+\tif (!(s->eqcr.pi & 7))\n+\t\ts->eqcr.pi_vb ^= QB_VALID_BIT;\n+\n+\treturn 0;\n+}\n+\n+int qbman_swp_flush_ring(struct qbman_swp *s)\n+{\n+\tvoid *ptr = s->sys.addr_cena;\n+\n+\tdcbf((uint64_t)ptr);\n+\tdcbf((uint64_t)ptr + 0x40);\n+\tdcbf((uint64_t)ptr + 0x80);\n+\tdcbf((uint64_t)ptr + 0xc0);\n+\tdcbf((uint64_t)ptr + 0x100);\n+\tdcbf((uint64_t)ptr + 0x140);\n+\tdcbf((uint64_t)ptr + 0x180);\n+\tdcbf((uint64_t)ptr + 0x1c0);\n+\n+\treturn 0;\n+}\n+\n+void qbman_sync(void)\n+{\n+\tlwsync();\n+}\n+\n+int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,\n+\t\t      const struct qbman_fd *fd)\n+{\n+\tif (s->sys.eqcr_mode == qman_eqcr_vb_array)\n+\t\treturn qbman_swp_enqueue_array_mode(s, d, fd);\n+\telse    /* Use ring mode by default */\n+\t\treturn qbman_swp_enqueue_ring_mode(s, d, fd);\n+}\n+\n+/*************************/\n+/* Static (push) dequeue */\n+/*************************/\n+\n+void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)\n+{\n+\tstruct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx);\n+\n+\tQBMAN_BUG_ON(channel_idx > 15);\n+\t*enabled = (int)qb_attr_code_decode(&code, &s->sdq);\n+}\n+\n+void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)\n+{\n+\tuint16_t dqsrc;\n+\tstruct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx);\n+\n+\tQBMAN_BUG_ON(channel_idx > 15);\n+\tqb_attr_code_encode(&code, &s->sdq, !!enable);\n+\t/* Read make the complete src map.  If no channels are enabled\n+\t * the SDQCR must be 0 or else QMan will assert errors\n+\t */\n+\tdqsrc = (uint16_t)qb_attr_code_decode(&code_sdqcr_dqsrc, &s->sdq);\n+\tif (dqsrc != 0)\n+\t\tqbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);\n+\telse\n+\t\tqbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);\n+}\n+\n+/***************************/\n+/* Volatile (pull) dequeue */\n+/***************************/\n+\n+/* These should be const, eventually */\n+static struct qb_attr_code code_pull_dct = QB_CODE(0, 0, 2);\n+static struct qb_attr_code code_pull_dt = QB_CODE(0, 2, 2);\n+static struct qb_attr_code code_pull_rls = QB_CODE(0, 4, 1);\n+static struct qb_attr_code code_pull_stash = QB_CODE(0, 5, 1);\n+static struct qb_attr_code code_pull_numframes = QB_CODE(0, 8, 4);\n+static struct qb_attr_code code_pull_token = QB_CODE(0, 16, 8);\n+static struct qb_attr_code code_pull_dqsource = QB_CODE(1, 0, 24);\n+static struct qb_attr_code code_pull_rsp_lo = QB_CODE(2, 0, 32);\n+\n+enum qb_pull_dt_e {\n+\tqb_pull_dt_channel,\n+\tqb_pull_dt_workqueue,\n+\tqb_pull_dt_framequeue\n+};\n+\n+void qbman_pull_desc_clear(struct qbman_pull_desc *d)\n+{\n+\tmemset(d, 0, sizeof(*d));\n+}\n+\n+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,\n+\t\t\t\t struct qbman_result *storage,\n+\t\t\t\t dma_addr_t storage_phys,\n+\t\t\t\t int stash)\n+{\n+\tuint32_t *cl = qb_cl(d);\n+\t/* Squiggle the pointer 'storage' into the extra 2 words of the\n+\t * descriptor (which aren't copied to the hw command)\n+\t */\n+\t*(void **)&cl[4] = storage;\n+\tif (!storage) {\n+\t\tqb_attr_code_encode(&code_pull_rls, cl, 0);\n+\t\treturn;\n+\t}\n+\tqb_attr_code_encode(&code_pull_rls, cl, 1);\n+\tqb_attr_code_encode(&code_pull_stash, cl, !!stash);\n+\tqb_attr_code_encode_64(&code_pull_rsp_lo, (uint64_t *)cl, storage_phys);\n+}\n+\n+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes)\n+{\n+\tuint32_t *cl = qb_cl(d);\n+\n+\tQBMAN_BUG_ON(!numframes || (numframes > 16));\n+\tqb_attr_code_encode(&code_pull_numframes, cl,\n+\t\t\t    (uint32_t)(numframes - 1));\n+}\n+\n+void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)\n+{\n+\tuint32_t *cl = qb_cl(d);\n+\n+\tqb_attr_code_encode(&code_pull_token, cl, token);\n+}\n+\n+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)\n+{\n+\tuint32_t *cl = qb_cl(d);\n+\n+\tqb_attr_code_encode(&code_pull_dct, cl, 1);\n+\tqb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_framequeue);\n+\tqb_attr_code_encode(&code_pull_dqsource, cl, fqid);\n+}\n+\n+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,\n+\t\t\t    enum qbman_pull_type_e dct)\n+{\n+\tuint32_t *cl = qb_cl(d);\n+\n+\tqb_attr_code_encode(&code_pull_dct, cl, dct);\n+\tqb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_workqueue);\n+\tqb_attr_code_encode(&code_pull_dqsource, cl, wqid);\n+}\n+\n+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,\n+\t\t\t\t enum qbman_pull_type_e dct)\n+{\n+\tuint32_t *cl = qb_cl(d);\n+\n+\tqb_attr_code_encode(&code_pull_dct, cl, dct);\n+\tqb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_channel);\n+\tqb_attr_code_encode(&code_pull_dqsource, cl, chid);\n+}\n+\n+int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)\n+{\n+\tuint32_t *p;\n+\tuint32_t *cl = qb_cl(d);\n+\n+\tif (!atomic_dec_and_test(&s->vdq.busy)) {\n+\t\tatomic_inc(&s->vdq.busy);\n+\t\treturn -EBUSY;\n+\t}\n+\ts->vdq.storage = *(void **)&cl[4];\n+\t/* We use portal index +1 as token so that 0 still indicates\n+\t * that the result isn't valid yet.\n+\t */\n+\tqb_attr_code_encode(&code_pull_token, cl, s->desc.idx + 1);\n+\tp = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);\n+\tword_copy(&p[1], &cl[1], 3);\n+\t/* Set the verb byte, have to substitute in the valid-bit */\n+\tlwsync();\n+\tp[0] = cl[0] | s->vdq.valid_bit;\n+\ts->vdq.valid_bit ^= QB_VALID_BIT;\n+\tqbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);\n+\treturn 0;\n+}\n+\n+/****************/\n+/* Polling DQRR */\n+/****************/\n+\n+static struct qb_attr_code code_dqrr_verb = QB_CODE(0, 0, 8);\n+static struct qb_attr_code code_dqrr_response = QB_CODE(0, 0, 7);\n+static struct qb_attr_code code_dqrr_stat = QB_CODE(0, 8, 8);\n+static struct qb_attr_code code_dqrr_seqnum = QB_CODE(0, 16, 14);\n+static struct qb_attr_code code_dqrr_odpid = QB_CODE(1, 0, 16);\n+/* static struct qb_attr_code code_dqrr_tok = QB_CODE(1, 24, 8); */\n+static struct qb_attr_code code_dqrr_fqid = QB_CODE(2, 0, 24);\n+static struct qb_attr_code code_dqrr_byte_count = QB_CODE(4, 0, 32);\n+static struct qb_attr_code code_dqrr_frame_count = QB_CODE(5, 0, 24);\n+static struct qb_attr_code code_dqrr_ctx_lo = QB_CODE(6, 0, 32);\n+\n+#define QBMAN_RESULT_DQ        0x60\n+#define QBMAN_RESULT_FQRN      0x21\n+#define QBMAN_RESULT_FQRNI     0x22\n+#define QBMAN_RESULT_FQPN      0x24\n+#define QBMAN_RESULT_FQDAN     0x25\n+#define QBMAN_RESULT_CDAN      0x26\n+#define QBMAN_RESULT_CSCN_MEM  0x27\n+#define QBMAN_RESULT_CGCU      0x28\n+#define QBMAN_RESULT_BPSCN     0x29\n+#define QBMAN_RESULT_CSCN_WQ   0x2a\n+\n+static struct qb_attr_code code_dqpi_pi = QB_CODE(0, 0, 4);\n+\n+/* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry\n+ * only once, so repeated calls can return a sequence of DQRR entries, without\n+ * requiring they be consumed immediately or in any particular order.\n+ */\n+const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)\n+{\n+\tuint32_t verb;\n+\tuint32_t response_verb;\n+\tuint32_t flags;\n+\tconst struct qbman_result *dq;\n+\tconst uint32_t *p;\n+\n+\t/* Before using valid-bit to detect if something is there, we have to\n+\t * handle the case of the DQRR reset bug...\n+\t */\n+\tif (unlikely(s->dqrr.reset_bug)) {\n+\t\t/* We pick up new entries by cache-inhibited producer index,\n+\t\t * which means that a non-coherent mapping would require us to\n+\t\t * invalidate and read *only* once that PI has indicated that\n+\t\t * there's an entry here. The first trip around the DQRR ring\n+\t\t * will be much less efficient than all subsequent trips around\n+\t\t * it...\n+\t\t */\n+\t\tuint32_t dqpi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI);\n+\t\tuint32_t pi = qb_attr_code_decode(&code_dqpi_pi, &dqpi);\n+\t\t/* there are new entries iff pi != next_idx */\n+\t\tif (pi == s->dqrr.next_idx)\n+\t\t\treturn NULL;\n+\t\t/* if next_idx is/was the last ring index, and 'pi' is\n+\t\t * different, we can disable the workaround as all the ring\n+\t\t * entries have now been DMA'd to so valid-bit checking is\n+\t\t * repaired. Note: this logic needs to be based on next_idx\n+\t\t * (which increments one at a time), rather than on pi (which\n+\t\t * can burst and wrap-around between our snapshots of it).\n+\t\t */\n+\t\tQBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);\n+\t\tif (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {\n+\t\t\tpr_debug(\"DEBUG: next_idx=%d, pi=%d, clear reset bug\\n\",\n+\t\t\t\t s->dqrr.next_idx, pi);\n+\t\t\ts->dqrr.reset_bug = 0;\n+\t\t}\n+\t\tqbman_cena_invalidate_prefetch(&s->sys,\n+\t\t\t\tQBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));\n+\t}\n+\tdq = qbman_cena_read_wo_shadow(&s->sys,\n+\t\t\t\t       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));\n+\tp = qb_cl(dq);\n+\tverb = qb_attr_code_decode(&code_dqrr_verb, p);\n+\t/* If the valid-bit isn't of the expected polarity, nothing there. Note,\n+\t * in the DQRR reset bug workaround, we shouldn't need to skip these\n+\t * check, because we've already determined that a new entry is available\n+\t * and we've invalidated the cacheline before reading it, so the\n+\t * valid-bit behaviour is repaired and should tell us what we already\n+\t * knew from reading PI.\n+\t */\n+\tif ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)\n+\t\treturn NULL;\n+\n+\t/* There's something there. Move \"next_idx\" attention to the next ring\n+\t * entry (and prefetch it) before returning what we found.\n+\t */\n+\ts->dqrr.next_idx++;\n+\tif (s->dqrr.next_idx == s->dqrr.dqrr_size) {\n+\t\ts->dqrr.next_idx = 0;\n+\t\ts->dqrr.valid_bit ^= QB_VALID_BIT;\n+\t}\n+\t/* If this is the final response to a volatile dequeue command\n+\t * indicate that the vdq is no longer busy.\n+\t */\n+\tflags = qbman_result_DQ_flags(dq);\n+\tresponse_verb = qb_attr_code_decode(&code_dqrr_response, &verb);\n+\tif ((response_verb == QBMAN_RESULT_DQ) &&\n+\t    (flags & QBMAN_DQ_STAT_VOLATILE) &&\n+\t    (flags & QBMAN_DQ_STAT_EXPIRED))\n+\t\t\tatomic_inc(&s->vdq.busy);\n+\n+\treturn dq;\n+}\n+\n+/* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */\n+void qbman_swp_dqrr_consume(struct qbman_swp *s,\n+\t\t\t    const struct qbman_result *dq)\n+{\n+\tqbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));\n+}\n+\n+/*********************************/\n+/* Polling user-provided storage */\n+/*********************************/\n+\n+int qbman_result_has_new_result(__attribute__((unused)) struct qbman_swp *s,\n+\t\t\t\tconst struct qbman_result *dq)\n+{\n+\t/* To avoid converting the little-endian DQ entry to host-endian prior\n+\t * to us knowing whether there is a valid entry or not (and run the\n+\t * risk of corrupting the incoming hardware LE write), we detect in\n+\t * hardware endianness rather than host. This means we need a different\n+\t * \"code\" depending on whether we are BE or LE in software, which is\n+\t * where DQRR_TOK_OFFSET comes in...\n+\t */\n+\tstatic struct qb_attr_code code_dqrr_tok_detect =\n+\t\t\t\t\tQB_CODE(0, DQRR_TOK_OFFSET, 8);\n+\t/* The user trying to poll for a result treats \"dq\" as const. It is\n+\t * however the same address that was provided to us non-const in the\n+\t * first place, for directing hardware DMA to. So we can cast away the\n+\t * const because it is mutable from our perspective.\n+\t */\n+\tuint32_t *p = (uint32_t *)(unsigned long)qb_cl(dq);\n+\tuint32_t token;\n+\n+\ttoken = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]);\n+\tif (token == 0)\n+\t\treturn 0;\n+\t/* Entry is valid - overwrite token back to 0 so\n+\t * a) If this memory is reused tokesn will be 0\n+\t * b) If someone calls \"has_new_result()\" again on this entry it\n+\t *    will not appear to be new\n+\t */\n+\tqb_attr_code_encode(&code_dqrr_tok_detect, &p[1], 0);\n+\n+\t/* Only now do we convert from hardware to host endianness. Also, as we\n+\t * are returning success, the user has promised not to call us again, so\n+\t * there's no risk of us converting the endianness twice...\n+\t */\n+\tmake_le32_n(p, 16);\n+\treturn 1;\n+}\n+\n+int qbman_check_command_complete(struct qbman_swp *s,\n+\t\t\t\t const struct qbman_result *dq)\n+{\n+\t/* To avoid converting the little-endian DQ entry to host-endian prior\n+\t * to us knowing whether there is a valid entry or not (and run the\n+\t * risk of corrupting the incoming hardware LE write), we detect in\n+\t * hardware endianness rather than host. This means we need a different\n+\t * \"code\" depending on whether we are BE or LE in software, which is\n+\t * where DQRR_TOK_OFFSET comes in...\n+\t */\n+\tstatic struct qb_attr_code code_dqrr_tok_detect =\n+\t\t\t\t\tQB_CODE(0, DQRR_TOK_OFFSET, 8);\n+\t/* The user trying to poll for a result treats \"dq\" as const. It is\n+\t * however the same address that was provided to us non-const in the\n+\t * first place, for directing hardware DMA to. So we can cast away the\n+\t * const because it is mutable from our perspective.\n+\t */\n+\tuint32_t *p = (uint32_t *)(unsigned long)qb_cl(dq);\n+\tuint32_t token;\n+\n+\ttoken = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]);\n+\tif (token == 0)\n+\t\treturn 0;\n+\t/* TODO: Remove qbman_swp from parameters and make it a local\n+\t * once we've tested the reserve portal map change\n+\t */\n+\ts = portal_idx_map[token - 1];\n+\t/* When token is set it indicates that VDQ command has been fetched\n+\t * by qbman and is working on it. It is safe for software to issue\n+\t * another VDQ command, so incrementing the busy variable.\n+\t */\n+\tif (s->vdq.storage == dq) {\n+\t\ts->vdq.storage = NULL;\n+\t\tatomic_inc(&s->vdq.busy);\n+\t}\n+\treturn 1;\n+}\n+\n+/********************************/\n+/* Categorising qbman results   */\n+/********************************/\n+\n+static struct qb_attr_code code_result_in_mem =\n+\t\t\tQB_CODE(0, QBMAN_RESULT_VERB_OFFSET_IN_MEM, 7);\n+\n+static inline int __qbman_result_is_x(const struct qbman_result *dq,\n+\t\t\t\t      uint32_t x)\n+{\n+\tconst uint32_t *p = qb_cl(dq);\n+\tuint32_t response_verb = qb_attr_code_decode(&code_dqrr_response, p);\n+\n+\treturn (response_verb == x);\n+}\n+\n+static inline int __qbman_result_is_x_in_mem(const struct qbman_result *dq,\n+\t\t\t\t\t     uint32_t x)\n+{\n+\tconst uint32_t *p = qb_cl(dq);\n+\tuint32_t response_verb = qb_attr_code_decode(&code_result_in_mem, p);\n+\n+\treturn (response_verb == x);\n+}\n+\n+int qbman_result_is_DQ(const struct qbman_result *dq)\n+{\n+\treturn __qbman_result_is_x(dq, QBMAN_RESULT_DQ);\n+}\n+\n+int qbman_result_is_FQDAN(const struct qbman_result *dq)\n+{\n+\treturn __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);\n+}\n+\n+int qbman_result_is_CDAN(const struct qbman_result *dq)\n+{\n+\treturn __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);\n+}\n+\n+int qbman_result_is_CSCN(const struct qbman_result *dq)\n+{\n+\treturn __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CSCN_MEM) ||\n+\t\t__qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);\n+}\n+\n+int qbman_result_is_BPSCN(const struct qbman_result *dq)\n+{\n+\treturn __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_BPSCN);\n+}\n+\n+int qbman_result_is_CGCU(const struct qbman_result *dq)\n+{\n+\treturn __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CGCU);\n+}\n+\n+int qbman_result_is_FQRN(const struct qbman_result *dq)\n+{\n+\treturn __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRN);\n+}\n+\n+int qbman_result_is_FQRNI(const struct qbman_result *dq)\n+{\n+\treturn __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRNI);\n+}\n+\n+int qbman_result_is_FQPN(const struct qbman_result *dq)\n+{\n+\treturn __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);\n+}\n+\n+/*********************************/\n+/* Parsing frame dequeue results */\n+/*********************************/\n+\n+/* These APIs assume qbman_result_is_DQ() is TRUE */\n+\n+uint32_t qbman_result_DQ_flags(const struct qbman_result *dq)\n+{\n+\tconst uint32_t *p = qb_cl(dq);\n+\n+\treturn qb_attr_code_decode(&code_dqrr_stat, p);\n+}\n+\n+uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)\n+{\n+\tconst uint32_t *p = qb_cl(dq);\n+\n+\treturn (uint16_t)qb_attr_code_decode(&code_dqrr_seqnum, p);\n+}\n+\n+uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)\n+{\n+\tconst uint32_t *p = qb_cl(dq);\n+\n+\treturn (uint16_t)qb_attr_code_decode(&code_dqrr_odpid, p);\n+}\n+\n+uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)\n+{\n+\tconst uint32_t *p = qb_cl(dq);\n+\n+\treturn qb_attr_code_decode(&code_dqrr_fqid, p);\n+}\n+\n+uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)\n+{\n+\tconst uint32_t *p = qb_cl(dq);\n+\n+\treturn qb_attr_code_decode(&code_dqrr_byte_count, p);\n+}\n+\n+uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)\n+{\n+\tconst uint32_t *p = qb_cl(dq);\n+\n+\treturn qb_attr_code_decode(&code_dqrr_frame_count, p);\n+}\n+\n+uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)\n+{\n+\tconst uint64_t *p = (const uint64_t *)qb_cl(dq);\n+\n+\treturn qb_attr_code_decode_64(&code_dqrr_ctx_lo, p);\n+}\n+\n+const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)\n+{\n+\tconst uint32_t *p = qb_cl(dq);\n+\n+\treturn (const struct qbman_fd *)&p[8];\n+}\n+\n+/**************************************/\n+/* Parsing state-change notifications */\n+/**************************************/\n+\n+static struct qb_attr_code code_scn_state = QB_CODE(0, 16, 8);\n+static struct qb_attr_code code_scn_rid = QB_CODE(1, 0, 24);\n+static struct qb_attr_code code_scn_state_in_mem =\n+\t\t\tQB_CODE(0, SCN_STATE_OFFSET_IN_MEM, 8);\n+static struct qb_attr_code code_scn_rid_in_mem =\n+\t\t\tQB_CODE(1, SCN_RID_OFFSET_IN_MEM, 24);\n+static struct qb_attr_code code_scn_ctx_lo = QB_CODE(2, 0, 32);\n+\n+uint8_t qbman_result_SCN_state(const struct qbman_result *scn)\n+{\n+\tconst uint32_t *p = qb_cl(scn);\n+\n+\treturn (uint8_t)qb_attr_code_decode(&code_scn_state, p);\n+}\n+\n+uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)\n+{\n+\tconst uint32_t *p = qb_cl(scn);\n+\n+\treturn qb_attr_code_decode(&code_scn_rid, p);\n+}\n+\n+uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)\n+{\n+\tconst uint64_t *p = (const uint64_t *)qb_cl(scn);\n+\n+\treturn qb_attr_code_decode_64(&code_scn_ctx_lo, p);\n+}\n+\n+uint8_t qbman_result_SCN_state_in_mem(const struct qbman_result *scn)\n+{\n+\tconst uint32_t *p = qb_cl(scn);\n+\n+\treturn (uint8_t)qb_attr_code_decode(&code_scn_state_in_mem, p);\n+}\n+\n+uint32_t qbman_result_SCN_rid_in_mem(const struct qbman_result *scn)\n+{\n+\tconst uint32_t *p = qb_cl(scn);\n+\tuint32_t result_rid;\n+\n+\tresult_rid = qb_attr_code_decode(&code_scn_rid_in_mem, p);\n+\treturn make_le24(result_rid);\n+}\n+\n+/*****************/\n+/* Parsing BPSCN */\n+/*****************/\n+uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)\n+{\n+\treturn (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0x3FFF;\n+}\n+\n+int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)\n+{\n+\treturn !(int)(qbman_result_SCN_state_in_mem(scn) & 0x1);\n+}\n+\n+int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)\n+{\n+\treturn (int)(qbman_result_SCN_state_in_mem(scn) & 0x2);\n+}\n+\n+int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)\n+{\n+\treturn (int)(qbman_result_SCN_state_in_mem(scn) & 0x4);\n+}\n+\n+uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)\n+{\n+\tuint64_t ctx;\n+\tuint32_t ctx_hi, ctx_lo;\n+\n+\tctx = qbman_result_SCN_ctx(scn);\n+\tctx_hi = upper32(ctx);\n+\tctx_lo = lower32(ctx);\n+\treturn ((uint64_t)make_le32(ctx_hi) << 32 |\n+\t\t(uint64_t)make_le32(ctx_lo));\n+}\n+\n+/*****************/\n+/* Parsing CGCU  */\n+/*****************/\n+uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)\n+{\n+\treturn (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0xFFFF;\n+}\n+\n+uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)\n+{\n+\tuint64_t ctx;\n+\tuint32_t ctx_hi, ctx_lo;\n+\n+\tctx = qbman_result_SCN_ctx(scn);\n+\tctx_hi = upper32(ctx);\n+\tctx_lo = lower32(ctx);\n+\treturn ((uint64_t)(make_le32(ctx_hi) & 0xFF) << 32) |\n+\t\t(uint64_t)make_le32(ctx_lo);\n+}\n+\n+/******************/\n+/* Buffer release */\n+/******************/\n+\n+/* These should be const, eventually */\n+/* static struct qb_attr_code code_release_num = QB_CODE(0, 0, 3); */\n+static struct qb_attr_code code_release_set_me = QB_CODE(0, 5, 1);\n+static struct qb_attr_code code_release_rcdi = QB_CODE(0, 6, 1);\n+static struct qb_attr_code code_release_bpid = QB_CODE(0, 16, 16);\n+\n+void qbman_release_desc_clear(struct qbman_release_desc *d)\n+{\n+\tuint32_t *cl;\n+\n+\tmemset(d, 0, sizeof(*d));\n+\tcl = qb_cl(d);\n+\tqb_attr_code_encode(&code_release_set_me, cl, 1);\n+}\n+\n+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid)\n+{\n+\tuint32_t *cl = qb_cl(d);\n+\n+\tqb_attr_code_encode(&code_release_bpid, cl, bpid);\n+}\n+\n+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)\n+{\n+\tuint32_t *cl = qb_cl(d);\n+\n+\tqb_attr_code_encode(&code_release_rcdi, cl, !!enable);\n+}\n+\n+#define RAR_IDX(rar)     ((rar) & 0x7)\n+#define RAR_VB(rar)      ((rar) & 0x80)\n+#define RAR_SUCCESS(rar) ((rar) & 0x100)\n+\n+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,\n+\t\t      const uint64_t *buffers, unsigned int num_buffers)\n+{\n+\tuint32_t *p;\n+\tconst uint32_t *cl = qb_cl(d);\n+\tuint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);\n+\n+\tpr_debug(\"RAR=%08x\\n\", rar);\n+\tif (!RAR_SUCCESS(rar))\n+\t\treturn -EBUSY;\n+\tQBMAN_BUG_ON(!num_buffers || (num_buffers > 7));\n+\t/* Start the release command */\n+\tp = qbman_cena_write_start_wo_shadow(&s->sys,\n+\t\t\t\t\t     QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));\n+\t/* Copy the caller's buffer pointers to the command */\n+\tu64_to_le32_copy(&p[2], buffers, num_buffers);\n+\t/* Set the verb byte, have to substitute in the valid-bit and the number\n+\t * of buffers.\n+\t */\n+\tlwsync();\n+\tp[0] = cl[0] | RAR_VB(rar) | num_buffers;\n+\tqbman_cena_write_complete_wo_shadow(&s->sys,\n+\t\t\t\t\t    QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));\n+\treturn 0;\n+}\n+\n+/*******************/\n+/* Buffer acquires */\n+/*******************/\n+\n+/* These should be const, eventually */\n+static struct qb_attr_code code_acquire_bpid = QB_CODE(0, 16, 16);\n+static struct qb_attr_code code_acquire_num = QB_CODE(1, 0, 3);\n+static struct qb_attr_code code_acquire_r_num = QB_CODE(1, 0, 3);\n+\n+int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers,\n+\t\t      unsigned int num_buffers)\n+{\n+\tuint32_t *p;\n+\tuint32_t rslt, num;\n+\n+\tQBMAN_BUG_ON(!num_buffers || (num_buffers > 7));\n+\n+\t/* Start the management command */\n+\tp = qbman_swp_mc_start(s);\n+\n+\tif (!p)\n+\t\treturn -EBUSY;\n+\n+\t/* Encode the caller-provided attributes */\n+\tqb_attr_code_encode(&code_acquire_bpid, p, bpid);\n+\tqb_attr_code_encode(&code_acquire_num, p, num_buffers);\n+\n+\t/* Complete the management command */\n+\tp = qbman_swp_mc_complete(s, p, p[0] | QBMAN_MC_ACQUIRE);\n+\n+\t/* Decode the outcome */\n+\trslt = qb_attr_code_decode(&code_generic_rslt, p);\n+\tnum = qb_attr_code_decode(&code_acquire_r_num, p);\n+\tQBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != QBMAN_MC_ACQUIRE);\n+\n+\t/* Determine success or failure */\n+\tif (unlikely(rslt != QBMAN_MC_RSLT_OK)) {\n+\t\tpr_err(\"Acquire buffers from BPID 0x%x failed, code=0x%02x\\n\",\n+\t\t       bpid, rslt);\n+\t\treturn -EIO;\n+\t}\n+\tQBMAN_BUG_ON(num > num_buffers);\n+\t/* Copy the acquired buffers to the caller's array */\n+\tu64_from_le32_copy(buffers, &p[2], num);\n+\treturn (int)num;\n+}\n+\n+/*****************/\n+/* FQ management */\n+/*****************/\n+\n+static struct qb_attr_code code_fqalt_fqid = QB_CODE(1, 0, 32);\n+\n+static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,\n+\t\t\t\t  uint8_t alt_fq_verb)\n+{\n+\tuint32_t *p;\n+\tuint32_t rslt;\n+\n+\t/* Start the management command */\n+\tp = qbman_swp_mc_start(s);\n+\tif (!p)\n+\t\treturn -EBUSY;\n+\n+\tqb_attr_code_encode(&code_fqalt_fqid, p, fqid);\n+\t/* Complete the management command */\n+\tp = qbman_swp_mc_complete(s, p, p[0] | alt_fq_verb);\n+\n+\t/* Decode the outcome */\n+\trslt = qb_attr_code_decode(&code_generic_rslt, p);\n+\tQBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != alt_fq_verb);\n+\n+\t/* Determine success or failure */\n+\tif (unlikely(rslt != QBMAN_MC_RSLT_OK)) {\n+\t\tpr_err(\"ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\\n\",\n+\t\t       fqid, alt_fq_verb, rslt);\n+\t\treturn -EIO;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)\n+{\n+\treturn qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);\n+}\n+\n+int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)\n+{\n+\treturn qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);\n+}\n+\n+int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)\n+{\n+\treturn qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);\n+}\n+\n+int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)\n+{\n+\treturn qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);\n+}\n+\n+/**********************/\n+/* Channel management */\n+/**********************/\n+\n+static struct qb_attr_code code_cdan_cid = QB_CODE(0, 16, 12);\n+static struct qb_attr_code code_cdan_we = QB_CODE(1, 0, 8);\n+static struct qb_attr_code code_cdan_en = QB_CODE(1, 8, 1);\n+static struct qb_attr_code code_cdan_ctx_lo = QB_CODE(2, 0, 32);\n+\n+/* Hide \"ICD\" for now as we don't use it, don't set it, and don't test it, so it\n+ * would be irresponsible to expose it.\n+ */\n+#define CODE_CDAN_WE_EN    0x1\n+#define CODE_CDAN_WE_CTX   0x4\n+\n+static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,\n+\t\t\t      uint8_t we_mask, uint8_t cdan_en,\n+\t\t\t      uint64_t ctx)\n+{\n+\tuint32_t *p;\n+\tuint32_t rslt;\n+\n+\t/* Start the management command */\n+\tp = qbman_swp_mc_start(s);\n+\tif (!p)\n+\t\treturn -EBUSY;\n+\n+\t/* Encode the caller-provided attributes */\n+\tqb_attr_code_encode(&code_cdan_cid, p, channelid);\n+\tqb_attr_code_encode(&code_cdan_we, p, we_mask);\n+\tqb_attr_code_encode(&code_cdan_en, p, cdan_en);\n+\tqb_attr_code_encode_64(&code_cdan_ctx_lo, (uint64_t *)p, ctx);\n+\t/* Complete the management command */\n+\tp = qbman_swp_mc_complete(s, p, p[0] | QBMAN_WQCHAN_CONFIGURE);\n+\n+\t/* Decode the outcome */\n+\trslt = qb_attr_code_decode(&code_generic_rslt, p);\n+\tQBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb, p)\n+\t\t\t\t\t!= QBMAN_WQCHAN_CONFIGURE);\n+\n+\t/* Determine success or failure */\n+\tif (unlikely(rslt != QBMAN_MC_RSLT_OK)) {\n+\t\tpr_err(\"CDAN cQID %d failed: code = 0x%02x\\n\",\n+\t\t       channelid, rslt);\n+\t\treturn -EIO;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,\n+\t\t\t       uint64_t ctx)\n+{\n+\treturn qbman_swp_CDAN_set(s, channelid,\n+\t\t\t\t  CODE_CDAN_WE_CTX,\n+\t\t\t\t  0, ctx);\n+}\n+\n+int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)\n+{\n+\treturn qbman_swp_CDAN_set(s, channelid,\n+\t\t\t\t  CODE_CDAN_WE_EN,\n+\t\t\t\t  1, 0);\n+}\n+\n+int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)\n+{\n+\treturn qbman_swp_CDAN_set(s, channelid,\n+\t\t\t\t  CODE_CDAN_WE_EN,\n+\t\t\t\t  0, 0);\n+}\n+\n+int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,\n+\t\t\t\t      uint64_t ctx)\n+{\n+\treturn qbman_swp_CDAN_set(s, channelid,\n+\t\t\t\t  CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,\n+\t\t\t\t  1, ctx);\n+}\n+\n+uint8_t qbman_get_dqrr_idx(struct qbman_result *dqrr)\n+{\n+\treturn QBMAN_IDX_FROM_DQRR(dqrr);\n+}\n+\n+struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)\n+{\n+\tstruct qbman_result *dq;\n+\n+\tdq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));\n+\treturn dq;\n+}\n+\n+int qbman_swp_send_multiple(struct qbman_swp *s,\n+\t\t\t    const struct qbman_eq_desc *d,\n+\t\t\t    const struct qbman_fd *fd,\n+\t\t\t    int frames_to_send)\n+{\n+\tuint32_t *p;\n+\tconst uint32_t *cl = qb_cl(d);\n+\tuint32_t eqcr_ci;\n+\tuint8_t diff;\n+\tint sent = 0;\n+\tint i;\n+\tint initial_pi = s->eqcr.pi;\n+\tuint64_t start_pointer;\n+\n+\tif (!s->eqcr.available) {\n+\t\teqcr_ci = s->eqcr.ci;\n+\t\ts->eqcr.ci = qbman_cena_read_reg(&s->sys,\n+\t\t\t\t QBMAN_CENA_SWP_EQCR_CI) & 0xF;\n+\t\tdiff = qm_cyc_diff(QBMAN_EQCR_SIZE,\n+\t\t\t\t   eqcr_ci, s->eqcr.ci);\n+\t\tif (!diff)\n+\t\t\tgoto done;\n+\t\ts->eqcr.available += diff;\n+\t}\n+\n+\t/* we are trying to send frames_to_send,\n+\t * if we have enough space in the ring\n+\t */\n+\twhile (s->eqcr.available && frames_to_send--) {\n+\t\tp = qbman_cena_write_start_wo_shadow_fast(&s->sys,\n+\t\t\t\t\tQBMAN_CENA_SWP_EQCR((initial_pi) & 7));\n+\t\t/* Write command (except of first byte) and FD */\n+\t\tmemcpy(&p[1], &cl[1], 7 * 4);\n+\t\tmemcpy(&p[8], &fd[sent], sizeof(struct qbman_fd));\n+\n+\t\tinitial_pi++;\n+\t\tinitial_pi &= 0xF;\n+\t\ts->eqcr.available--;\n+\t\tsent++;\n+\t}\n+\n+done:\n+\tinitial_pi =  s->eqcr.pi;\n+\tlwsync();\n+\n+\t/* in order for flushes to complete faster:\n+\t * we use a following trick: we record all lines in 32 bit word\n+\t */\n+\n+\tinitial_pi =  s->eqcr.pi;\n+\tfor (i = 0; i < sent; i++) {\n+\t\tp = qbman_cena_write_start_wo_shadow_fast(&s->sys,\n+\t\t\t\t\tQBMAN_CENA_SWP_EQCR((initial_pi) & 7));\n+\n+\t\tp[0] = cl[0] | s->eqcr.pi_vb;\n+\t\tinitial_pi++;\n+\t\tinitial_pi &= 0xF;\n+\n+\t\tif (!(initial_pi & 7))\n+\t\t\ts->eqcr.pi_vb ^= QB_VALID_BIT;\n+\t}\n+\n+\tinitial_pi = s->eqcr.pi;\n+\n+\t/* We need  to flush all the lines but without\n+\t * load/store operations between them.\n+\t * We assign start_pointer before we start loop so that\n+\t * in loop we do not read it from memory\n+\t */\n+\tstart_pointer = (uint64_t)s->sys.addr_cena;\n+\tfor (i = 0; i < sent; i++) {\n+\t\tp = (uint32_t *)(start_pointer\n+\t\t\t\t + QBMAN_CENA_SWP_EQCR(initial_pi & 7));\n+\t\tdcbf((uint64_t)p);\n+\t\tinitial_pi++;\n+\t\tinitial_pi &= 0xF;\n+\t}\n+\n+\t/* Update producer index for the next call */\n+\ts->eqcr.pi = initial_pi;\n+\n+\treturn sent;\n+}\n+\n+int qbman_get_version(void)\n+{\n+\treturn qman_version;\n+}\ndiff --git a/drivers/common/dpaa2/qbman/qbman_portal.h b/drivers/common/dpaa2/qbman/qbman_portal.h\nnew file mode 100644\nindex 0000000..fe93354\n--- /dev/null\n+++ b/drivers/common/dpaa2/qbman/qbman_portal.h\n@@ -0,0 +1,274 @@\n+/* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions are met:\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in the\n+ *       documentation and/or other materials provided with the distribution.\n+ *     * Neither the name of Freescale Semiconductor nor the\n+ *       names of its contributors may be used to endorse or promote products\n+ *       derived from this software without specific prior written permission.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY\n+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY\n+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#include \"qbman_private.h\"\n+#include <fsl_qbman_portal.h>\n+\n+/* All QBMan command and result structures use this \"valid bit\" encoding */\n+#define QB_VALID_BIT ((uint32_t)0x80)\n+\n+/* Management command result codes */\n+#define QBMAN_MC_RSLT_OK      0xf0\n+\n+/* QBMan DQRR size is set at runtime in qbman_portal.c */\n+\n+#define QBMAN_EQCR_SIZE 8\n+\n+static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)\n+{\n+\t/* 'first' is included, 'last' is excluded */\n+\tif (first <= last)\n+\t\treturn last - first;\n+\treturn (2 * ringsize) + last - first;\n+}\n+\n+/* --------------------- */\n+/* portal data structure */\n+/* --------------------- */\n+\n+struct qbman_swp {\n+\tstruct qbman_swp_desc desc;\n+\t/* The qbman_sys (ie. arch/OS-specific) support code can put anything it\n+\t * needs in here.\n+\t */\n+\tstruct qbman_swp_sys sys;\n+\t/* Management commands */\n+\tstruct {\n+#ifdef QBMAN_CHECKING\n+\t\tenum swp_mc_check {\n+\t\t\tswp_mc_can_start, /* call __qbman_swp_mc_start() */\n+\t\t\tswp_mc_can_submit, /* call __qbman_swp_mc_submit() */\n+\t\t\tswp_mc_can_poll, /* call __qbman_swp_mc_result() */\n+\t\t} check;\n+#endif\n+\t\tuint32_t valid_bit; /* 0x00 or 0x80 */\n+\t} mc;\n+\t/* Push dequeues */\n+\tuint32_t sdq;\n+\t/* Volatile dequeues */\n+\tstruct {\n+\t\t/* VDQCR supports a \"1 deep pipeline\", meaning that if you know\n+\t\t * the last-submitted command is already executing in the\n+\t\t * hardware (as evidenced by at least 1 valid dequeue result),\n+\t\t * you can write another dequeue command to the register, the\n+\t\t * hardware will start executing it as soon as the\n+\t\t * already-executing command terminates. (This minimises latency\n+\t\t * and stalls.) With that in mind, this \"busy\" variable refers\n+\t\t * to whether or not a command can be submitted, not whether or\n+\t\t * not a previously-submitted command is still executing. In\n+\t\t * other words, once proof is seen that the previously-submitted\n+\t\t * command is executing, \"vdq\" is no longer \"busy\".\n+\t\t */\n+\t\tatomic_t busy;\n+\t\tuint32_t valid_bit; /* 0x00 or 0x80 */\n+\t\t/* We need to determine when vdq is no longer busy. This depends\n+\t\t * on whether the \"busy\" (last-submitted) dequeue command is\n+\t\t * targeting DQRR or main-memory, and detected is based on the\n+\t\t * presence of the dequeue command's \"token\" showing up in\n+\t\t * dequeue entries in DQRR or main-memory (respectively).\n+\t\t */\n+\t\tstruct qbman_result *storage; /* NULL if DQRR */\n+\t} vdq;\n+\t/* DQRR */\n+\tstruct {\n+\t\tuint32_t next_idx;\n+\t\tuint32_t valid_bit;\n+\t\tuint8_t dqrr_size;\n+\t\tint reset_bug;\n+\t} dqrr;\n+\tstruct {\n+\t\tuint32_t pi;\n+\t\tuint32_t pi_vb;\n+\t\tuint32_t ci;\n+\t\tint available;\n+\t} eqcr;\n+};\n+\n+/* -------------------------- */\n+/* portal management commands */\n+/* -------------------------- */\n+\n+/* Different management commands all use this common base layer of code to issue\n+ * commands and poll for results. The first function returns a pointer to where\n+ * the caller should fill in their MC command (though they should ignore the\n+ * verb byte), the second function commits merges in the caller-supplied command\n+ * verb (which should not include the valid-bit) and submits the command to\n+ * hardware, and the third function checks for a completed response (returns\n+ * non-NULL if only if the response is complete).\n+ */\n+void *qbman_swp_mc_start(struct qbman_swp *p);\n+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb);\n+void *qbman_swp_mc_result(struct qbman_swp *p);\n+\n+/* Wraps up submit + poll-for-result */\n+static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,\n+\t\t\t\t\t  uint32_t cmd_verb)\n+{\n+\tint loopvar;\n+\n+\tqbman_swp_mc_submit(swp, cmd, cmd_verb);\n+\tDBG_POLL_START(loopvar);\n+\tdo {\n+\t\tDBG_POLL_CHECK(loopvar);\n+\t\tcmd = qbman_swp_mc_result(swp);\n+\t} while (!cmd);\n+\treturn cmd;\n+}\n+\n+/* ------------ */\n+/* qb_attr_code */\n+/* ------------ */\n+\n+/* This struct locates a sub-field within a QBMan portal (CENA) cacheline which\n+ * is either serving as a configuration command or a query result. The\n+ * representation is inherently little-endian, as the indexing of the words is\n+ * itself little-endian in nature and DPAA2 QBMan is little endian for anything\n+ * that crosses a word boundary too (64-bit fields are the obvious examples).\n+ */\n+struct qb_attr_code {\n+\tunsigned int word; /* which uint32_t[] array member encodes the field */\n+\tunsigned int lsoffset; /* encoding offset from ls-bit */\n+\tunsigned int width; /* encoding width. (bool must be 1.) */\n+};\n+\n+/* Some pre-defined codes */\n+extern struct qb_attr_code code_generic_verb;\n+extern struct qb_attr_code code_generic_rslt;\n+\n+/* Macros to define codes */\n+#define QB_CODE(a, b, c) { a, b, c}\n+#define QB_CODE_NULL \\\n+\tQB_CODE((unsigned int)-1, (unsigned int)-1, (unsigned int)-1)\n+\n+/* Rotate a code \"ms\", meaning that it moves from less-significant bytes to\n+ * more-significant, from less-significant words to more-significant, etc. The\n+ * \"ls\" version does the inverse, from more-significant towards\n+ * less-significant.\n+ */\n+static inline void qb_attr_code_rotate_ms(struct qb_attr_code *code,\n+\t\t\t\t\t  unsigned int bits)\n+{\n+\tcode->lsoffset += bits;\n+\twhile (code->lsoffset > 31) {\n+\t\tcode->word++;\n+\t\tcode->lsoffset -= 32;\n+\t}\n+}\n+\n+static inline void qb_attr_code_rotate_ls(struct qb_attr_code *code,\n+\t\t\t\t\t  unsigned int bits)\n+{\n+\t/* Don't be fooled, this trick should work because the types are\n+\t * unsigned. So the case that interests the while loop (the rotate has\n+\t * gone too far and the word count needs to compensate for it), is\n+\t * manifested when lsoffset is negative. But that equates to a really\n+\t * large unsigned value, starting with lots of \"F\"s. As such, we can\n+\t * continue adding 32 back to it until it wraps back round above zero,\n+\t * to a value of 31 or less...\n+\t */\n+\tcode->lsoffset -= bits;\n+\twhile (code->lsoffset > 31) {\n+\t\tcode->word--;\n+\t\tcode->lsoffset += 32;\n+\t}\n+}\n+\n+/* Implement a loop of code rotations until 'expr' evaluates to FALSE (0). */\n+#define qb_attr_code_for_ms(code, bits, expr) \\\n+\t\tfor (; expr; qb_attr_code_rotate_ms(code, bits))\n+#define qb_attr_code_for_ls(code, bits, expr) \\\n+\t\tfor (; expr; qb_attr_code_rotate_ls(code, bits))\n+\n+/* decode a field from a cacheline */\n+static inline uint32_t qb_attr_code_decode(const struct qb_attr_code *code,\n+\t\t\t\t\t   const uint32_t *cacheline)\n+{\n+\treturn d32_uint32_t(code->lsoffset, code->width, cacheline[code->word]);\n+}\n+\n+static inline uint64_t qb_attr_code_decode_64(const struct qb_attr_code *code,\n+\t\t\t\t\t      const uint64_t *cacheline)\n+{\n+\treturn cacheline[code->word / 2];\n+}\n+\n+/* encode a field to a cacheline */\n+static inline void qb_attr_code_encode(const struct qb_attr_code *code,\n+\t\t\t\t       uint32_t *cacheline, uint32_t val)\n+{\n+\tcacheline[code->word] =\n+\t\tr32_uint32_t(code->lsoffset, code->width, cacheline[code->word])\n+\t\t| e32_uint32_t(code->lsoffset, code->width, val);\n+}\n+\n+static inline void qb_attr_code_encode_64(const struct qb_attr_code *code,\n+\t\t\t\t\t  uint64_t *cacheline, uint64_t val)\n+{\n+\tcacheline[code->word / 2] = val;\n+}\n+\n+/* Small-width signed values (two's-complement) will decode into medium-width\n+ * positives. (Eg. for an 8-bit signed field, which stores values from -128 to\n+ * +127, a setting of -7 would appear to decode to the 32-bit unsigned value\n+ * 249. Likewise -120 would decode as 136.) This function allows the caller to\n+ * \"re-sign\" such fields to 32-bit signed. (Eg. -7, which was 249 with an 8-bit\n+ * encoding, will become 0xfffffff9 if you cast the return value to uint32_t).\n+ */\n+static inline int32_t qb_attr_code_makesigned(const struct qb_attr_code *code,\n+\t\t\t\t\t      uint32_t val)\n+{\n+\tQBMAN_BUG_ON(val >= (1u << code->width));\n+\t/* code->width should never exceed the width of val. If it does then a\n+\t * different function with larger val size must be used to translate\n+\t * from unsigned to signed\n+\t */\n+\tQBMAN_BUG_ON(code->width > sizeof(val) * CHAR_BIT);\n+\t/* If the high bit was set, it was encoding a negative */\n+\tif (val >= 1u << (code->width - 1))\n+\t\treturn (int32_t)0 - (int32_t)(((uint32_t)1 << code->width) -\n+\t\t\tval);\n+\t/* Otherwise, it was encoding a positive */\n+\treturn (int32_t)val;\n+}\n+\n+/* ---------------------- */\n+/* Descriptors/cachelines */\n+/* ---------------------- */\n+\n+/* To avoid needless dynamic allocation, the driver API often gives the caller\n+ * a \"descriptor\" type that the caller can instantiate however they like.\n+ * Ultimately though, it is just a cacheline of binary storage (or something\n+ * smaller when it is known that the descriptor doesn't need all 64 bytes) for\n+ * holding pre-formatted pieces of hardware commands. The performance-critical\n+ * code can then copy these descriptors directly into hardware command\n+ * registers more efficiently than trying to construct/format commands\n+ * on-the-fly. The API user sees the descriptor as an array of 32-bit words in\n+ * order for the compiler to know its size, but the internal details are not\n+ * exposed. The following macro is used within the driver for converting *any*\n+ * descriptor pointer to a usable array pointer. The use of a macro (instead of\n+ * an inline) is necessary to work with different descriptor types and to work\n+ * correctly with const and non-const inputs (and similarly-qualified outputs).\n+ */\n+#define qb_cl(d) (&(d)->dont_manipulate_directly[0])\ndiff --git a/drivers/common/dpaa2/qbman/qbman_private.h b/drivers/common/dpaa2/qbman/qbman_private.h\nnew file mode 100644\nindex 0000000..24fea62\n--- /dev/null\n+++ b/drivers/common/dpaa2/qbman/qbman_private.h\n@@ -0,0 +1,167 @@\n+/* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions are met:\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in the\n+ *       documentation and/or other materials provided with the distribution.\n+ *     * Neither the name of Freescale Semiconductor nor the\n+ *       names of its contributors may be used to endorse or promote products\n+ *       derived from this software without specific prior written permission.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY\n+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY\n+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+/* Perform extra checking */\n+#define QBMAN_CHECKING\n+\n+/* To maximise the amount of logic that is common between the Linux driver and\n+ * other targets (such as the embedded MC firmware), we pivot here between the\n+ * inclusion of two platform-specific headers.\n+ *\n+ * The first, qbman_sys_decl.h, includes any and all required system headers as\n+ * well as providing any definitions for the purposes of compatibility. The\n+ * second, qbman_sys.h, is where platform-specific routines go.\n+ *\n+ * The point of the split is that the platform-independent code (including this\n+ * header) may depend on platform-specific declarations, yet other\n+ * platform-specific routines may depend on platform-independent definitions.\n+ */\n+\n+#include \"qbman_sys_decl.h\"\n+\n+/* When things go wrong, it is a convenient trick to insert a few FOO()\n+ * statements in the code to trace progress. TODO: remove this once we are\n+ * hacking the code less actively.\n+ */\n+#define FOO() fsl_os_print(\"FOO: %s:%d\\n\", __FILE__, __LINE__)\n+\n+/* Any time there is a register interface which we poll on, this provides a\n+ * \"break after x iterations\" scheme for it. It's handy for debugging, eg.\n+ * where you don't want millions of lines of log output from a polling loop\n+ * that won't, because such things tend to drown out the earlier log output\n+ * that might explain what caused the problem. (NB: put \";\" after each macro!)\n+ * TODO: we should probably remove this once we're done sanitising the\n+ * simulator...\n+ */\n+#define DBG_POLL_START(loopvar) (loopvar = 10)\n+#define DBG_POLL_CHECK(loopvar) \\\n+do { \\\n+\tif (!(loopvar--)) \\\n+\t\tQBMAN_BUG_ON(NULL == \"DBG_POLL_CHECK\"); \\\n+} while (0)\n+\n+/* For CCSR or portal-CINH registers that contain fields at arbitrary offsets\n+ * and widths, these macro-generated encode/decode/isolate/remove inlines can\n+ * be used.\n+ *\n+ * Eg. to \"d\"ecode a 14-bit field out of a register (into a \"uint16_t\" type),\n+ * where the field is located 3 bits \"up\" from the least-significant bit of the\n+ * register (ie. the field location within the 32-bit register corresponds to a\n+ * mask of 0x0001fff8), you would do;\n+ *                uint16_t field = d32_uint16_t(3, 14, reg_value);\n+ *\n+ * Or to \"e\"ncode a 1-bit boolean value (input type is \"int\", zero is FALSE,\n+ * non-zero is TRUE, so must convert all non-zero inputs to 1, hence the \"!!\"\n+ * operator) into a register at bit location 0x00080000 (19 bits \"in\" from the\n+ * LS bit), do;\n+ *                reg_value |= e32_int(19, 1, !!field);\n+ *\n+ * If you wish to read-modify-write a register, such that you leave the 14-bit\n+ * field as-is but have all other fields set to zero, then \"i\"solate the 14-bit\n+ * value using;\n+ *                reg_value = i32_uint16_t(3, 14, reg_value);\n+ *\n+ * Alternatively, you could \"r\"emove the 1-bit boolean field (setting it to\n+ * zero) but leaving all other fields as-is;\n+ *                reg_val = r32_int(19, 1, reg_value);\n+ *\n+ */\n+#define MAKE_MASK32(width) (width == 32 ? 0xffffffff : \\\n+\t\t\t\t (uint32_t)((1 << width) - 1))\n+#define DECLARE_CODEC32(t) \\\n+static inline uint32_t e32_##t(uint32_t lsoffset, uint32_t width, t val) \\\n+{ \\\n+\tQBMAN_BUG_ON(width > (sizeof(t) * 8)); \\\n+\treturn ((uint32_t)val & MAKE_MASK32(width)) << lsoffset; \\\n+} \\\n+static inline t d32_##t(uint32_t lsoffset, uint32_t width, uint32_t val) \\\n+{ \\\n+\tQBMAN_BUG_ON(width > (sizeof(t) * 8)); \\\n+\treturn (t)((val >> lsoffset) & MAKE_MASK32(width)); \\\n+} \\\n+static inline uint32_t i32_##t(uint32_t lsoffset, uint32_t width, \\\n+\t\t\t\tuint32_t val) \\\n+{ \\\n+\tQBMAN_BUG_ON(width > (sizeof(t) * 8)); \\\n+\treturn e32_##t(lsoffset, width, d32_##t(lsoffset, width, val)); \\\n+} \\\n+static inline uint32_t r32_##t(uint32_t lsoffset, uint32_t width, \\\n+\t\t\t\tuint32_t val) \\\n+{ \\\n+\tQBMAN_BUG_ON(width > (sizeof(t) * 8)); \\\n+\treturn ~(MAKE_MASK32(width) << lsoffset) & val; \\\n+}\n+DECLARE_CODEC32(uint32_t)\n+DECLARE_CODEC32(uint16_t)\n+DECLARE_CODEC32(uint8_t)\n+DECLARE_CODEC32(int)\n+\n+\t/*********************/\n+\t/* Debugging assists */\n+\t/*********************/\n+\n+static inline void __hexdump(unsigned long start, unsigned long end,\n+\t\t\t     unsigned long p, size_t sz, const unsigned char *c)\n+{\n+\twhile (start < end) {\n+\t\tunsigned int pos = 0;\n+\t\tchar buf[64];\n+\t\tint nl = 0;\n+\n+\t\tpos += sprintf(buf + pos, \"%08lx: \", start);\n+\t\tdo {\n+\t\t\tif ((start < p) || (start >= (p + sz)))\n+\t\t\t\tpos += sprintf(buf + pos, \"..\");\n+\t\t\telse\n+\t\t\t\tpos += sprintf(buf + pos, \"%02x\", *(c++));\n+\t\t\tif (!(++start & 15)) {\n+\t\t\t\tbuf[pos++] = '\\n';\n+\t\t\t\tnl = 1;\n+\t\t\t} else {\n+\t\t\t\tnl = 0;\n+\t\t\t\tif (!(start & 1))\n+\t\t\t\t\tbuf[pos++] = ' ';\n+\t\t\t\tif (!(start & 3))\n+\t\t\t\t\tbuf[pos++] = ' ';\n+\t\t\t}\n+\t\t} while (start & 15);\n+\t\tif (!nl)\n+\t\t\tbuf[pos++] = '\\n';\n+\t\tbuf[pos] = '\\0';\n+\t\tpr_info(\"%s\", buf);\n+\t}\n+}\n+\n+static inline void hexdump(const void *ptr, size_t sz)\n+{\n+\tunsigned long p = (unsigned long)ptr;\n+\tunsigned long start = p & ~(unsigned long)15;\n+\tunsigned long end = (p + sz + 15) & ~(unsigned long)15;\n+\tconst unsigned char *c = ptr;\n+\n+\t__hexdump(start, end, p, sz, c);\n+}\n+\n+#include \"qbman_sys.h\"\ndiff --git a/drivers/common/dpaa2/qbman/qbman_sys.h b/drivers/common/dpaa2/qbman/qbman_sys.h\nnew file mode 100644\nindex 0000000..3704a7f\n--- /dev/null\n+++ b/drivers/common/dpaa2/qbman/qbman_sys.h\n@@ -0,0 +1,380 @@\n+/* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions are met:\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in the\n+ *       documentation and/or other materials provided with the distribution.\n+ *     * Neither the name of Freescale Semiconductor nor the\n+ *       names of its contributors may be used to endorse or promote products\n+ *       derived from this software without specific prior written permission.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY\n+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY\n+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+/* qbman_sys_decl.h and qbman_sys.h are the two platform-specific files in the\n+ * driver. They are only included via qbman_private.h, which is itself a\n+ * platform-independent file and is included by all the other driver source.\n+ *\n+ * qbman_sys_decl.h is included prior to all other declarations and logic, and\n+ * it exists to provide compatibility with any linux interfaces our\n+ * single-source driver code is dependent on (eg. kmalloc). Ie. this file\n+ * provides linux compatibility.\n+ *\n+ * This qbman_sys.h header, on the other hand, is included *after* any common\n+ * and platform-neutral declarations and logic in qbman_private.h, and exists to\n+ * implement any platform-specific logic of the qbman driver itself. Ie. it is\n+ * *not* to provide linux compatibility.\n+ */\n+\n+/* Trace the 3 different classes of read/write access to QBMan. #undef as\n+ * required.\n+ */\n+#undef QBMAN_CCSR_TRACE\n+#undef QBMAN_CINH_TRACE\n+#undef QBMAN_CENA_TRACE\n+\n+static inline void word_copy(void *d, const void *s, unsigned int cnt)\n+{\n+\tuint32_t *dd = d;\n+\tconst uint32_t *ss = s;\n+\n+\twhile (cnt--)\n+\t\t*(dd++) = *(ss++);\n+}\n+\n+/* Currently, the CENA support code expects each 32-bit word to be written in\n+ * host order, and these are converted to hardware (little-endian) order on\n+ * command submission. However, 64-bit quantities are must be written (and read)\n+ * as two 32-bit words with the least-significant word first, irrespective of\n+ * host endianness.\n+ */\n+static inline void u64_to_le32_copy(void *d, const uint64_t *s,\n+\t\t\t\t    unsigned int cnt)\n+{\n+\tuint32_t *dd = d;\n+\tconst uint32_t *ss = (const uint32_t *)s;\n+\n+\twhile (cnt--) {\n+\t\t/* TBD: the toolchain was choking on the use of 64-bit types up\n+\t\t * until recently so this works entirely with 32-bit variables.\n+\t\t * When 64-bit types become usable again, investigate better\n+\t\t * ways of doing this.\n+\t\t */\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\t\t*(dd++) = ss[1];\n+\t\t*(dd++) = ss[0];\n+\t\tss += 2;\n+#else\n+\t\t*(dd++) = *(ss++);\n+\t\t*(dd++) = *(ss++);\n+#endif\n+\t}\n+}\n+\n+static inline void u64_from_le32_copy(uint64_t *d, const void *s,\n+\t\t\t\t      unsigned int cnt)\n+{\n+\tconst uint32_t *ss = s;\n+\tuint32_t *dd = (uint32_t *)d;\n+\n+\twhile (cnt--) {\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+\t\tdd[1] = *(ss++);\n+\t\tdd[0] = *(ss++);\n+\t\tdd += 2;\n+#else\n+\t\t*(dd++) = *(ss++);\n+\t\t*(dd++) = *(ss++);\n+#endif\n+\t}\n+}\n+\n+/* Convert a host-native 32bit value into little endian */\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+static inline uint32_t make_le32(uint32_t val)\n+{\n+\treturn ((val & 0xff) << 24) | ((val & 0xff00) << 8) |\n+\t\t((val & 0xff0000) >> 8) | ((val & 0xff000000) >> 24);\n+}\n+\n+static inline uint32_t make_le24(uint32_t val)\n+{\n+\treturn (((val & 0xff) << 16) | (val & 0xff00) |\n+\t\t((val & 0xff0000) >> 16));\n+}\n+static inline void make_le32_n(uint32_t *val, unsigned int num)\n+{\n+\twhile (num--) {\n+\t\t*val = make_le32(*val);\n+\t\tval++;\n+\t}\n+}\n+#else\n+#define make_le32(val) (val)\n+#define make_le24(val) (val)\n+#define make_le32_n(val, len) do {} while (0)\n+#endif\n+\n+\t/******************/\n+\t/* Portal access  */\n+\t/******************/\n+struct qbman_swp_sys {\n+\t/* On GPP, the sys support for qbman_swp is here. The CENA region isi\n+\t * not an mmap() of the real portal registers, but an allocated\n+\t * place-holder, because the actual writes/reads to/from the portal are\n+\t * marshalled from these allocated areas using QBMan's \"MC access\n+\t * registers\". CINH accesses are atomic so there's no need for a\n+\t * place-holder.\n+\t */\n+\tuint8_t *cena;\n+\tuint8_t __iomem *addr_cena;\n+\tuint8_t __iomem *addr_cinh;\n+\tuint32_t idx;\n+\tenum qbman_eqcr_mode eqcr_mode;\n+};\n+\n+/* P_OFFSET is (ACCESS_CMD,0,12) - offset within the portal\n+ * C is (ACCESS_CMD,12,1) - is inhibited? (0==CENA, 1==CINH)\n+ * SWP_IDX is (ACCESS_CMD,16,10) - Software portal index\n+ * P is (ACCESS_CMD,28,1) - (0==special portal, 1==any portal)\n+ * T is (ACCESS_CMD,29,1) - Command type (0==READ, 1==WRITE)\n+ * E is (ACCESS_CMD,31,1) - Command execute (1 to issue, poll for 0==complete)\n+ */\n+\n+static inline void qbman_cinh_write(struct qbman_swp_sys *s, uint32_t offset,\n+\t\t\t\t    uint32_t val)\n+{\n+\t__raw_writel(val, s->addr_cinh + offset);\n+#ifdef QBMAN_CINH_TRACE\n+\tpr_info(\"qbman_cinh_write(%p:%d:0x%03x) 0x%08x\\n\",\n+\t\ts->addr_cinh, s->idx, offset, val);\n+#endif\n+}\n+\n+static inline uint32_t qbman_cinh_read(struct qbman_swp_sys *s, uint32_t offset)\n+{\n+\tuint32_t reg = __raw_readl(s->addr_cinh + offset);\n+#ifdef QBMAN_CINH_TRACE\n+\tpr_info(\"qbman_cinh_read(%p:%d:0x%03x) 0x%08x\\n\",\n+\t\ts->addr_cinh, s->idx, offset, reg);\n+#endif\n+\treturn reg;\n+}\n+\n+static inline void *qbman_cena_write_start(struct qbman_swp_sys *s,\n+\t\t\t\t\t   uint32_t offset)\n+{\n+\tvoid *shadow = s->cena + offset;\n+\n+#ifdef QBMAN_CENA_TRACE\n+\tpr_info(\"qbman_cena_write_start(%p:%d:0x%03x) %p\\n\",\n+\t\ts->addr_cena, s->idx, offset, shadow);\n+#endif\n+\tQBMAN_BUG_ON(offset & 63);\n+\tdcbz(shadow);\n+\treturn shadow;\n+}\n+\n+static inline void *qbman_cena_write_start_wo_shadow(struct qbman_swp_sys *s,\n+\t\t\t\t\t\t     uint32_t offset)\n+{\n+#ifdef QBMAN_CENA_TRACE\n+\tpr_info(\"qbman_cena_write_start(%p:%d:0x%03x)\\n\",\n+\t\ts->addr_cena, s->idx, offset);\n+#endif\n+\tQBMAN_BUG_ON(offset & 63);\n+\treturn (s->addr_cena + offset);\n+}\n+\n+static inline void qbman_cena_write_complete(struct qbman_swp_sys *s,\n+\t\t\t\t\t     uint32_t offset, void *cmd)\n+{\n+\tconst uint32_t *shadow = cmd;\n+\tint loop;\n+#ifdef QBMAN_CENA_TRACE\n+\tpr_info(\"qbman_cena_write_complete(%p:%d:0x%03x) %p\\n\",\n+\t\ts->addr_cena, s->idx, offset, shadow);\n+\thexdump(cmd, 64);\n+#endif\n+\tfor (loop = 15; loop >= 1; loop--)\n+\t\t__raw_writel(shadow[loop], s->addr_cena +\n+\t\t\t\t\t offset + loop * 4);\n+\tlwsync();\n+\t\t__raw_writel(shadow[0], s->addr_cena + offset);\n+\tdcbf(s->addr_cena + offset);\n+}\n+\n+static inline void qbman_cena_write_complete_wo_shadow(struct qbman_swp_sys *s,\n+\t\t\t\t\t\t       uint32_t offset)\n+{\n+#ifdef QBMAN_CENA_TRACE\n+\tpr_info(\"qbman_cena_write_complete(%p:%d:0x%03x)\\n\",\n+\t\ts->addr_cena, s->idx, offset);\n+\thexdump(cmd, 64);\n+#endif\n+\tdcbf(s->addr_cena + offset);\n+}\n+\n+static inline uint32_t qbman_cena_read_reg(struct qbman_swp_sys *s,\n+\t\t\t\t\t   uint32_t offset)\n+{\n+\treturn __raw_readl(s->addr_cena + offset);\n+}\n+\n+static inline void *qbman_cena_read(struct qbman_swp_sys *s, uint32_t offset)\n+{\n+\tuint32_t *shadow = (uint32_t *)(s->cena + offset);\n+\tunsigned int loop;\n+#ifdef QBMAN_CENA_TRACE\n+\tpr_info(\"qbman_cena_read(%p:%d:0x%03x) %p\\n\",\n+\t\ts->addr_cena, s->idx, offset, shadow);\n+#endif\n+\n+\tfor (loop = 0; loop < 16; loop++)\n+\t\tshadow[loop] = __raw_readl(s->addr_cena + offset\n+\t\t\t\t\t+ loop * 4);\n+#ifdef QBMAN_CENA_TRACE\n+\thexdump(shadow, 64);\n+#endif\n+\treturn shadow;\n+}\n+\n+static inline void *qbman_cena_read_wo_shadow(struct qbman_swp_sys *s,\n+\t\t\t\t\t      uint32_t offset)\n+{\n+#ifdef QBMAN_CENA_TRACE\n+\tpr_info(\"qbman_cena_read(%p:%d:0x%03x) %p\\n\",\n+\t\ts->addr_cena, s->idx, offset, shadow);\n+#endif\n+\n+#ifdef QBMAN_CENA_TRACE\n+\thexdump(shadow, 64);\n+#endif\n+\treturn s->addr_cena + offset;\n+}\n+\n+static inline void qbman_cena_invalidate(struct qbman_swp_sys *s,\n+\t\t\t\t\t uint32_t offset)\n+{\n+\tdccivac(s->addr_cena + offset);\n+}\n+\n+static inline void qbman_cena_invalidate_prefetch(struct qbman_swp_sys *s,\n+\t\t\t\t\t\t  uint32_t offset)\n+{\n+\tdccivac(s->addr_cena + offset);\n+\tprefetch_for_load(s->addr_cena + offset);\n+}\n+\n+static inline void qbman_cena_prefetch(struct qbman_swp_sys *s,\n+\t\t\t\t       uint32_t offset)\n+{\n+\tprefetch_for_load(s->addr_cena + offset);\n+}\n+\n+\t/******************/\n+\t/* Portal support */\n+\t/******************/\n+\n+/* The SWP_CFG portal register is special, in that it is used by the\n+ * platform-specific code rather than the platform-independent code in\n+ * qbman_portal.c. So use of it is declared locally here.\n+ */\n+#define QBMAN_CINH_SWP_CFG   0xd00\n+\n+/* For MC portal use, we always configure with\n+ * DQRR_MF is (SWP_CFG,20,3) - DQRR max fill (<- 0x4)\n+ * EST is (SWP_CFG,16,3) - EQCR_CI stashing threshold (<- 0x2)\n+ * RPM is (SWP_CFG,12,2) - RCR production notification mode (<- 0x3)\n+ * DCM is (SWP_CFG,10,2) - DQRR consumption notification mode (<- 0x2)\n+ * EPM is (SWP_CFG,8,2) - EQCR production notification mode (<- 0x2)\n+ * SD is (SWP_CFG,5,1) - memory stashing drop enable (<- TRUE)\n+ * SP is (SWP_CFG,4,1) - memory stashing priority (<- TRUE)\n+ * SE is (SWP_CFG,3,1) - memory stashing enable (<- TRUE)\n+ * DP is (SWP_CFG,2,1) - dequeue stashing priority (<- TRUE)\n+ * DE is (SWP_CFG,1,1) - dequeue stashing enable (<- TRUE)\n+ * EP is (SWP_CFG,0,1) - EQCR_CI stashing priority (<- TRUE)\n+ */\n+static inline uint32_t qbman_set_swp_cfg(uint8_t max_fill, uint8_t wn,\n+\t\t\t\t\t uint8_t est, uint8_t rpm, uint8_t dcm,\n+\t\t\t\t\tuint8_t epm, int sd, int sp, int se,\n+\t\t\t\t\tint dp, int de, int ep)\n+{\n+\tuint32_t reg;\n+\n+\treg = e32_uint8_t(20, (uint32_t)(3 + (max_fill >> 3)), max_fill) |\n+\t\te32_uint8_t(16, 3, est) |\n+\t\te32_uint8_t(12, 2, rpm) | e32_uint8_t(10, 2, dcm) |\n+\t\te32_uint8_t(8, 2, epm) | e32_int(5, 1, sd) |\n+\t\te32_int(4, 1, sp) | e32_int(3, 1, se) | e32_int(2, 1, dp) |\n+\t\te32_int(1, 1, de) | e32_int(0, 1, ep) |\te32_uint8_t(14, 1, wn);\n+\treturn reg;\n+}\n+\n+static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,\n+\t\t\t\t     const struct qbman_swp_desc *d,\n+\t\t\t\t     uint8_t dqrr_size)\n+{\n+\tuint32_t reg;\n+\n+\ts->addr_cena = d->cena_bar;\n+\ts->addr_cinh = d->cinh_bar;\n+\ts->idx = (uint32_t)d->idx;\n+\ts->cena = (void *)get_zeroed_page(GFP_KERNEL);\n+\tif (!s->cena) {\n+\t\tpr_err(\"Could not allocate page for cena shadow\\n\");\n+\t\treturn -1;\n+\t}\n+\ts->eqcr_mode = d->eqcr_mode;\n+\tQBMAN_BUG_ON(d->idx < 0);\n+#ifdef QBMAN_CHECKING\n+\t/* We should never be asked to initialise for a portal that isn't in\n+\t * the power-on state. (Ie. don't forget to reset portals when they are\n+\t * decommissioned!)\n+\t */\n+\treg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);\n+\tQBMAN_BUG_ON(reg);\n+#endif\n+\tif (s->eqcr_mode == qman_eqcr_vb_array)\n+\t\treg = qbman_set_swp_cfg(dqrr_size, 0, 0, 3, 2, 3, 1, 1, 1, 1,\n+\t\t\t\t\t1, 1);\n+\telse\n+\t\treg = qbman_set_swp_cfg(dqrr_size, 0, 2, 3, 2, 2, 1, 1, 1, 1,\n+\t\t\t\t\t1, 1);\n+\tqbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg);\n+\treg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);\n+\tif (!reg) {\n+\t\tpr_err(\"The portal %d is not enabled!\\n\", s->idx);\n+\t\tkfree(s->cena);\n+\t\treturn -1;\n+\t}\n+\treturn 0;\n+}\n+\n+static inline void qbman_swp_sys_finish(struct qbman_swp_sys *s)\n+{\n+\tfree_page((unsigned long)s->cena);\n+}\n+\n+static inline void *\n+qbman_cena_write_start_wo_shadow_fast(struct qbman_swp_sys *s,\n+\t\t\t\t      uint32_t offset)\n+{\n+#ifdef QBMAN_CENA_TRACE\n+\tpr_info(\"qbman_cena_write_start(%p:%d:0x%03x)\\n\",\n+\t\ts->addr_cena, s->idx, offset);\n+#endif\n+\tQBMAN_BUG_ON(offset & 63);\n+\treturn (s->addr_cena + offset);\n+}\ndiff --git a/drivers/common/dpaa2/qbman/qbman_sys_decl.h b/drivers/common/dpaa2/qbman/qbman_sys_decl.h\nnew file mode 100644\nindex 0000000..c49da57\n--- /dev/null\n+++ b/drivers/common/dpaa2/qbman/qbman_sys_decl.h\n@@ -0,0 +1,70 @@\n+/* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions are met:\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in the\n+ *       documentation and/or other materials provided with the distribution.\n+ *     * Neither the name of Freescale Semiconductor nor the\n+ *       names of its contributors may be used to endorse or promote products\n+ *       derived from this software without specific prior written permission.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY\n+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY\n+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+#include <compat.h>\n+#include <fsl_qbman_base.h>\n+\n+/* Sanity check */\n+#if (__BYTE_ORDER__ != __ORDER_BIG_ENDIAN__) && \\\n+\t(__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__)\n+#error \"Unknown endianness!\"\n+#endif\n+\n+/* The platform-independent code shouldn't need endianness, except for\n+ * weird/fast-path cases like qbman_result_has_token(), which needs to\n+ * perform a passive and endianness-specific test on a read-only data structure\n+ * very quickly. It's an exception, and this symbol is used for that case.\n+ */\n+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__\n+#define DQRR_TOK_OFFSET 0\n+#define QBMAN_RESULT_VERB_OFFSET_IN_MEM 24\n+#define SCN_STATE_OFFSET_IN_MEM 8\n+#define SCN_RID_OFFSET_IN_MEM 8\n+#else\n+#define DQRR_TOK_OFFSET 24\n+#define QBMAN_RESULT_VERB_OFFSET_IN_MEM 0\n+#define SCN_STATE_OFFSET_IN_MEM 16\n+#define SCN_RID_OFFSET_IN_MEM 0\n+#endif\n+\n+/* Similarly-named functions */\n+#define upper32(a) upper_32_bits(a)\n+#define lower32(a) lower_32_bits(a)\n+\n+\t/****************/\n+\t/* arch assists */\n+\t/****************/\n+#define dcbz(p) { asm volatile(\"dc zva, %0\" : : \"r\" (p) : \"memory\"); }\n+#define lwsync() { asm volatile(\"dmb st\" : : : \"memory\"); }\n+#define dcbf(p) { asm volatile(\"dc cvac, %0\" : : \"r\"(p) : \"memory\"); }\n+#define dccivac(p) { asm volatile(\"dc civac, %0\" : : \"r\"(p) : \"memory\"); }\n+static inline void prefetch_for_load(void *p)\n+{\n+\tasm volatile(\"prfm pldl1keep, [%0, #64]\" : : \"r\" (p));\n+}\n+\n+static inline void prefetch_for_store(void *p)\n+{\n+\tasm volatile(\"prfm pstl1keep, [%0, #64]\" : : \"r\" (p));\n+}\ndiff --git a/drivers/common/dpaa2/qbman/rte_pmd_dpaa2_qbman_version.map b/drivers/common/dpaa2/qbman/rte_pmd_dpaa2_qbman_version.map\nnew file mode 100644\nindex 0000000..8021478\n--- /dev/null\n+++ b/drivers/common/dpaa2/qbman/rte_pmd_dpaa2_qbman_version.map\n@@ -0,0 +1,21 @@\n+DPDK_17.02 {\n+\tglobal:\n+\tqbman_check_command_complete;\n+\tqbman_eq_desc_clear;\n+\tqbman_eq_desc_set_no_orp;\n+\tqbman_eq_desc_set_response;\n+\tqbman_eq_desc_set_qd;\n+\tqbman_get_version;\n+\tqbman_pull_desc_clear;\n+\tqbman_pull_desc_set_fq;\n+\tqbman_pull_desc_set_numframes;\n+\tqbman_pull_desc_set_storage;\n+\tqbman_swp_pull;\n+\tqbman_swp_send_multiple;\n+\tqbman_result_DQ_fd;\n+\tqbman_result_DQ_flags;\n+\tqbman_result_DQ_flags;\n+\tqbman_result_has_new_result;\n+\n+\tlocal: *;\n+};\n",
    "prefixes": [
        "dpdk-dev",
        "PATCHv2",
        "04/34"
    ]
}