get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/35828/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 35828,
    "url": "http://patches.dpdk.org/api/patches/35828/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1520584954-130575-9-git-send-email-Ravi1.kumar@amd.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1520584954-130575-9-git-send-email-Ravi1.kumar@amd.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1520584954-130575-9-git-send-email-Ravi1.kumar@amd.com",
    "date": "2018-03-09T08:42:25",
    "name": "[dpdk-dev,v3,09/18] net/axgbe: add DMA programming and dev start and stop apis",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "343f50bb7de1e5de8e1f6fa4d32d502a6b757433",
    "submitter": {
        "id": 819,
        "url": "http://patches.dpdk.org/api/people/819/?format=api",
        "name": "Kumar, Ravi1",
        "email": "ravi1.kumar@amd.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1520584954-130575-9-git-send-email-Ravi1.kumar@amd.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/35828/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/35828/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 32C74AAD3;\n\tFri,  9 Mar 2018 09:43:18 +0100 (CET)",
            "from NAM01-BY2-obe.outbound.protection.outlook.com\n\t(mail-by2nam01on0070.outbound.protection.outlook.com [104.47.34.70])\n\tby dpdk.org (Postfix) with ESMTP id 6AD28AA97\n\tfor <dev@dpdk.org>; Fri,  9 Mar 2018 09:43:15 +0100 (CET)",
            "from wallaby-smavila.amd.com (202.56.249.162) by\n\tMWHPR12MB1517.namprd12.prod.outlook.com (2603:10b6:301:b::21) with\n\tMicrosoft SMTP Server (version=TLS1_2,\n\tcipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384_P256) id 15.20.567.12;\n\tFri, 9 Mar 2018 08:43:13 +0000"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=amdcloud.onmicrosoft.com; s=selector1-amd-com;\n\th=From:Date:Subject:Message-ID:Content-Type:MIME-Version;\n\tbh=Q4PsPssbzSh/3svz6NOpqWi8zP8vfhLs+VSNkIp+OY8=;\n\tb=USyjOD2+JfakMR/mfB+hlck3bracY0fP0JpQthy2JWaHqdASc8bMDjENe4sPcFQNR4jKEakWHAtZ0tVMrzkNEShexLoN71JEfHpSABqFYZY5fsOEWIamwF96B/jkDRt0wgC9o/u2Jt8/V1JfLpfVLdSq/Cmv9XXwiIPu60JqE0Y=",
        "From": "Ravi Kumar <Ravi1.kumar@amd.com>",
        "To": "dev@dpdk.org",
        "Cc": "ferruh.yigit@intel.com",
        "Date": "Fri,  9 Mar 2018 03:42:25 -0500",
        "Message-Id": "<1520584954-130575-9-git-send-email-Ravi1.kumar@amd.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1520584954-130575-1-git-send-email-Ravi1.kumar@amd.com>",
        "References": "<1515145938-97474-1-git-send-email-Ravi1.kumar@amd.com>\n\t<1520584954-130575-1-git-send-email-Ravi1.kumar@amd.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[202.56.249.162]",
        "X-ClientProxiedBy": "BM1PR0101CA0057.INDPRD01.PROD.OUTLOOK.COM\n\t(2603:1096:b00:19::19) To MWHPR12MB1517.namprd12.prod.outlook.com\n\t(2603:10b6:301:b::21)",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-HT": "Tenant",
        "X-MS-Office365-Filtering-Correlation-Id": "e9b004f8-dca2-4397-ab52-08d58599cbe5",
        "X-Microsoft-Antispam": "UriScan:; BCL:0; PCL:0;\n\tRULEID:(7020095)(4652020)(48565401081)(5600026)(4604075)(4534165)(4627221)(201703031133081)(201702281549075)(2017052603328)(7153060)(7193020);\n\tSRVR:MWHPR12MB1517; ",
        "X-Microsoft-Exchange-Diagnostics": [
            "1; MWHPR12MB1517;\n\t3:5TFALu8oIpNvUomFjkVZX4nk8fFxrf1OS5FsQHruqR85vcygNyG4y8Mucb/Zama1oC1HcM4l+Hp1mPa+AbN+L/mYn2mAcIiYBRElvdA7ciHZF7oBus2t6vjCd+VkN3seSFVyLvUtjddFWcQen7nsZJh5YvwCbo68jYKw7sSpaUEs21IYBMucHcLD1snyo4ubeW6gkmHAonoj3NSMGSPmA8RX5X/DRxyWb1fW8a0N+YoxQYQ5NJXIDmXZCKbXfbQn;\n\t25:yMdJjhJfAqDBjCu4sMSRxqRf+DEgue8WuO/eVzwFmk7CB94evkgdRSyhmzRAJwRCtvdzBK7NpGJddCToj1LUYtofNYb1SIQx/xjOs4zyVqvGzl4AKtFopFgucgz32CeRwEAnuSq16hhNcA2TqYd0imMHfYoqsNitRvWM9TZQrjG9MiAigKP/wBLp1BobVfHhcYAttw9BZY5jAGv3pPU8V51qnuOmfvMgM7vsZZjZroe9D3oNfKfOfDYQOXoDY618sB5kgEQ36bj2MjaNHevzAZ4A8f3xawkG2KaxMSDkx1YFhbODWhslw6J6UZ7j+JCdK5WJnijRt3YWT03IDz80gQ==;\n\t31:i3M/c6O6li7E0mH7R03dJYajfbtCvkpeXT21hFA0+A7u1m44tClEWObfRkw1khEVe2A1G3XkkKPoRxqTCaJY+D9pmqw8/SIWBNmasIewCQG7Vick84t2EI978HD64pqae7cNBfPkdoF7p6LKIAEGLrgx2bpvIwKR4K97Z17n6IELaX+h/214+ym3iHg4nRnW59XlO+yBQmRKBjs0XtnobN85nepyLTcfu/acmUIOvQ4=",
            "1; MWHPR12MB1517;\n\t20:pehX5lEYB3lOtPaJ+Qd5CiaFW6YWT0p4wAuWO6aJVLfJmO/W4uSqQ+OIUK0vvHvz369N3zz9X/D3HpzFjTC+3Kv7CzfvirF7HwiPerDj/6bzRKCHJjPNPwJV/q7MthP2r+kwH1XWIM9DlJ+CSdNAsIHzAxqJheaj5Bg4SLbUEfq/CXNzxyTYczlfXhAQ7yy8cembwbSfWz/XmaM40J2HQEPdMRlakWUeZce2T43Y+S07G49BP9RH9ScwCsJk1Ckj74bzO24t8ZD6DxcXoQc5e1mn3YliRRpevtwEnhmGHl3YsjgWWcLCXOKcE3NaNCogkdFklCnJvSocZq1zswfwQolcUS4zuD3Kds+d6687Jv22TpQ3x88/WYIqv/l4SKFcMnRfu82zEQp5gehQLj0IIdlXsiKHDLeChHSRE5xBgHxHhZ48huN9lWVMe62fA5NGZ/erIQeofSWs8YNOX/5534p6PmO/zOP0u2uEoBtwjd1b/IV2VZtRgpLcNJSx5pKU;\n\t4:xDjmp8SwGzYbmg5L7xLwIwafO3fn4bOR6n0YDHx6VWsQ6aBG6waw2Jwhhkfx04/KyjhBdDMjPvQqz9SAnca+Dgf4SrYvmjV0/zmXmeBlc8HaleFzD7Zh0LHkUQ6dKJFXTSn3TOwW/m9ud2W9p7Q469pvJ94bxVJ3j7Bgq45vR7xhkubDptkqlidjgujA8huJaVrDBIQlWvE57kGtI1TW3F0agN2KS5iNFM+B6kl2B3Wj2cTs3a2UA7ZxGPzJO1oOI54rZd3CcLERJYOUE5BCYKL34vFByB3tO+A+6ybZtNNl3FCPU8MhQ8fhGDOHrX1j",
            "=?us-ascii?Q?1; MWHPR12MB1517;\n\t23:COI9BPyCg7NjlV4gimBs3iY7XPI9J0jRPaQZQQ12J?=\n\tU7DvjaAwsOzKocLr3uKeM/DeMh1ui8jrrpebDR15Q4qxykHO4R1uI93oQvVFcQatc9kuSPPl9ZXizgvLsV7/dLIiltouAK+ByN0GU2A01fmhWdFRtsMRz/HBCCZCO/fyLWc77RlltFhyvW2TyHT1NZZKyv0ae4d3LYv3jKnNocGbvoirnboSaZBmTI71f3C4gS15KiW8/Vjp3mXV6A+Z0w1AC06Ao8zuFY8+SCmizs5c0mHOuq8S6k5R1re1i+j3ec7s4IlXvS4tNtg76C6WoigFIQhVrSFk5/Hj8jCvyN0OslPIrfKlYBIgMUbHeSA5/eV4e6iMN0OwFD18zKsy/FS/frbgHk2RN0AHIPcy7ik9SI6X/1Faxhh3pcIukGHynepRyjJyfWDjOdKsLsu2ripMfA8TYuaMRWQjRDnDgXs8WsE8bw7M7sYU9vTKBHxSscWqstNAZmUqPmdJM4HALskZgcbi+hjkFa0s3nFLQf134MJ9zBGBpR4OtLkgDJFlS2jAo5HyfUx6vPOFpJKb4DG9aE4xuaMSyKr1Jfkn4MncU2QY1HxMaOFbLOZDimQx31EoZ6Pm+47pOyKNeoy2M5eL5u/t64AYmBkOp3QW3NFwX8Q+LkaLd7ZPryIsoYemQcjbL1QCSTp9O0KDiIF8jatbEh3xjL8sKnd6eWRamIO2XTi1ufDqk8vqD4ZD5K/5nVzZwZN2ywpcLHQuASK+vZm87F3eMx/tA7f64jauXxSLy9uDtTspUnpce/hswPkiq70VV6F7iqC5Mr4lKjQgmUxrdbYH5kWJK4nGamy3ExOlsIy6WWIl0qQ2svc+qGCZ0FlLxhqyQ9JZ2glEfoVE8pznlDiF1xZcqPcZ4GFfceDeqmREwDx1PmSKUZwiWLFKhm3QSW0BzLzE4zk8ojELHsIGULRqYiBnb0Vu6JfCO5mzaHfo+fnfnONzbtjEICpJFGWFW5jZzxmIaDYEHkU9lXmx+NKP7Va/t01QV0KZGvXy3vJvOz+McGNhecfaoVAVaqjNNKfnq98a4v0RYE9f3kgFxVcaQHy0Iy32V9FU94LFezkDw9yESbypJzUYCwtqIYq8j+crfQoSG6iNZnqbR4LabAHmHxDWccFeMaKfAfUvNZPRq/zCZMvSnL5NpwXNCr/5v7qlpKzu/KIDjCimz8GV8kF3+DvMbdNM9GmWG39JA==",
            "1; MWHPR12MB1517;\n\t6:SPHm7q5lK1RW8NhBUbrJN8y9tD0t1tEMuXXIVMH3/lIebmArvkhNcyWF/6W2YHvnj474AcAjgO9HLCYLNdBzBJeyxd64BrhO2Oxk71daVh7b+thC1VVWhDM7WF+p3dVcHGgYoJSDxwJ5Ssgj3VnRvIlxxdg7xxikmvTflwbLVMQTFJuZp+faljodbsEH3VHLJb648OHw0eeDO0dpaLNbL1IY09TmbVqFhNgkCEykPrVJpNOuzDb2JhkrfKAFmE7UhWUpA16u9aDbfQ+MaJq4dAeTZ1O+rXAel98t2mfyke5Ry+9H3rTfEzq0Lz8SgQg08YJpDOJZ3KuvPdbO8ggYnJPWzJimc7NB+knSeeE1CIo=;\n\t5:K0jVNx0h19Xtx56qih6iUfK/TptyoFRsFkz9amjpWhl9mz+OQ1qHMrhA5mMUdUQOwktL1lPmLVPJ5wVEwFpg16cLbkUG/aF57NXxiAP7tckONp2yzjouzdxaLP7SX1y5zPPAQgjnjom4bNMg43k6tl4NWf7YiUjZd/X+YZgsbyw=;\n\t24:NtYP4cJkJFjEg6TJlbHR3VCHWRhDepEaXp+14xgcFWVHBVDTGcsD6REw9KoufzENXtd1Xfu0/3ef4w+2hgNYJ8+brEYLZD/rG2cSnWTkfQ0=;\n\t7:/hT/PYe+vWXy2TVQUyVO8FGHzxCDmnL/yUrGbCunGpeSAcIy8aHR74Ss2XsJqER3m/8XPv9h10El+gFRKUsTlH8//i7QoBWVmpK+m5AUaEI662qIi9BdoetULwzSMzcVxlcxgEtyFykW+33ZDN1m/LM0w1MURlcXgnxlHc0LT+kSXCbWaO/msLLWk+jE2GsnYp6JOWH7I4XzcJRPiH4YjJKfVyTkx8bOAv8jyj+QmJXfAe4A315EylI7faCgS+Xl",
            "1; MWHPR12MB1517;\n\t20:wbq4Rlng/6fDtktWCHIgH6pXB6Pk5th+FCuQnj/hXozgk7eLK7+d9gT5yYqqu9t1vEVjCxP/yChVvKAih+ZMdD5UCf6Pu7W/bVbOyjZ5YsfNNSfdMMggSHJX6cGb/uH4AGRlXU9I4KiRRrH2EYqLldFH7neXBA6fVj7SmZrJrK583ffsVxt795cHpuV3COQwhiIzlecQ0xNBqqpoUJ/lmus3AoUIhejOyGRIkMv4FFvc9YOcLOsq42/54ZloQUj2"
        ],
        "X-MS-TrafficTypeDiagnostic": "MWHPR12MB1517:",
        "X-Microsoft-Antispam-PRVS": "<MWHPR12MB1517EE1CB55CEF9C39E02524AEDE0@MWHPR12MB1517.namprd12.prod.outlook.com>",
        "X-Exchange-Antispam-Report-Test": "UriScan:(767451399110);",
        "X-Exchange-Antispam-Report-CFA-Test": "BCL:0; PCL:0;\n\tRULEID:(8211001083)(6040522)(2401047)(8121501046)(5005006)(3002001)(10201501046)(93006095)(93001095)(3231220)(944501244)(52105095)(6055026)(6041310)(201703131423095)(201702281528075)(20161123555045)(201703061421075)(201703061406153)(20161123558120)(20161123562045)(20161123560045)(20161123564045)(6072148)(201708071742011);\n\tSRVR:MWHPR12MB1517; BCL:0; PCL:0; RULEID:; SRVR:MWHPR12MB1517; ",
        "X-Forefront-PRVS": "0606BBEB39",
        "X-Forefront-Antispam-Report": "SFV:NSPM;\n\tSFS:(10009020)(346002)(39380400002)(396003)(39860400002)(366004)(376002)(199004)(189003)(305945005)(50226002)(386003)(6346003)(26005)(36756003)(97736004)(25786009)(7736002)(50466002)(8936002)(4326008)(16586007)(86362001)(81156014)(81166006)(6666003)(2950100002)(6916009)(16526019)(186003)(5660300001)(59450400001)(316002)(8676002)(575784001)(76176011)(52116002)(51416003)(7696005)(53416004)(48376002)(478600001)(106356001)(6116002)(3846002)(68736007)(2351001)(2906002)(6486002)(66066001)(47776003)(105586002)(72206003)(53936002)(2361001);\n\tDIR:OUT; SFP:1101; SCL:1; SRVR:MWHPR12MB1517;\n\tH:wallaby-smavila.amd.com; FPR:; \n\tSPF:None; PTR:InfoNoRecords; A:1; MX:1; LANG:en; ",
        "Received-SPF": "None (protection.outlook.com: amd.com does not designate\n\tpermitted sender hosts)",
        "Authentication-Results": "spf=none (sender IP is )\n\tsmtp.mailfrom=Ravi1.Kumar@amd.com; ",
        "X-Microsoft-Antispam-Message-Info": "/C9iUlD44JZ27A6AvscOI3rQnhVzdh8uNlf6lg7qQtqO02a6b/YkHa3EJzC9BpmH34SJpxKhUqGnKICicMbcXP70Cyvk0tK0yGwXB3YjXWyXyOh2diM/mGNPRb7SqtGz9vTZ9iR8+vHYmRGf794OJH1FmPJCp0ADczScwsYklBOD9nM6VndEgvLXQlnOvy0m",
        "SpamDiagnosticOutput": "1:99",
        "SpamDiagnosticMetadata": "NSPM",
        "X-OriginatorOrg": "amd.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "09 Mar 2018 08:43:13.0266\n\t(UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "e9b004f8-dca2-4397-ab52-08d58599cbe5",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "Hosted",
        "X-MS-Exchange-CrossTenant-Id": "3dd8961f-e488-4e60-8e11-a82d994e183d",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "MWHPR12MB1517",
        "Subject": "[dpdk-dev] [PATCH v3 09/18] net/axgbe: add DMA programming and dev\n\tstart and stop apis",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>\n---\n drivers/net/axgbe/axgbe_dev.c    | 844 +++++++++++++++++++++++++++++++++++++++\n drivers/net/axgbe/axgbe_ethdev.c |  96 +++++\n 2 files changed, 940 insertions(+)",
    "diff": "diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c\nindex 528241e..a69a078 100644\n--- a/drivers/net/axgbe/axgbe_dev.c\n+++ b/drivers/net/axgbe/axgbe_dev.c\n@@ -128,6 +128,13 @@\n #include \"axgbe_ethdev.h\"\n #include \"axgbe_common.h\"\n #include \"axgbe_phy.h\"\n+#include \"axgbe_rxtx.h\"\n+\n+static inline unsigned int axgbe_get_max_frame(struct axgbe_port *pdata)\n+{\n+\treturn pdata->eth_dev->data->mtu + ETHER_HDR_LEN +\n+\t\tETHER_CRC_LEN + VLAN_HLEN;\n+}\n \n /* query busy bit */\n static int mdio_complete(struct axgbe_port *pdata)\n@@ -334,6 +341,191 @@ static int axgbe_set_speed(struct axgbe_port *pdata, int speed)\n \treturn 0;\n }\n \n+static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata)\n+{\n+\tunsigned int max_q_count, q_count;\n+\tunsigned int reg, reg_val;\n+\tunsigned int i;\n+\n+\t/* Clear MTL flow control */\n+\tfor (i = 0; i < pdata->rx_q_count; i++)\n+\t\tAXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);\n+\n+\t/* Clear MAC flow control */\n+\tmax_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;\n+\tq_count = RTE_MIN(pdata->tx_q_count,\n+\t\t\tmax_q_count);\n+\treg = MAC_Q0TFCR;\n+\tfor (i = 0; i < q_count; i++) {\n+\t\treg_val = AXGMAC_IOREAD(pdata, reg);\n+\t\tAXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);\n+\t\tAXGMAC_IOWRITE(pdata, reg, reg_val);\n+\n+\t\treg += MAC_QTFCR_INC;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata)\n+{\n+\tunsigned int max_q_count, q_count;\n+\tunsigned int reg, reg_val;\n+\tunsigned int i;\n+\n+\t/* Set MTL flow control */\n+\tfor (i = 0; i < pdata->rx_q_count; i++) {\n+\t\tunsigned int ehfc = 0;\n+\n+\t\t/* Flow control thresholds are established */\n+\t\tif (pdata->rx_rfd[i])\n+\t\t\tehfc = 1;\n+\n+\t\tAXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);\n+\t}\n+\n+\t/* Set MAC flow control */\n+\tmax_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;\n+\tq_count = RTE_MIN(pdata->tx_q_count,\n+\t\t\tmax_q_count);\n+\treg = MAC_Q0TFCR;\n+\tfor (i = 0; i < q_count; i++) {\n+\t\treg_val = AXGMAC_IOREAD(pdata, reg);\n+\n+\t\t/* Enable transmit flow control */\n+\t\tAXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);\n+\t\t/* Set pause time */\n+\t\tAXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);\n+\n+\t\tAXGMAC_IOWRITE(pdata, reg, reg_val);\n+\n+\t\treg += MAC_QTFCR_INC;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int axgbe_disable_rx_flow_control(struct axgbe_port *pdata)\n+{\n+\tAXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);\n+\n+\treturn 0;\n+}\n+\n+static int axgbe_enable_rx_flow_control(struct axgbe_port *pdata)\n+{\n+\tAXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);\n+\n+\treturn 0;\n+}\n+\n+static int axgbe_config_tx_flow_control(struct axgbe_port *pdata)\n+{\n+\tif (pdata->tx_pause)\n+\t\taxgbe_enable_tx_flow_control(pdata);\n+\telse\n+\t\taxgbe_disable_tx_flow_control(pdata);\n+\n+\treturn 0;\n+}\n+\n+static int axgbe_config_rx_flow_control(struct axgbe_port *pdata)\n+{\n+\tif (pdata->rx_pause)\n+\t\taxgbe_enable_rx_flow_control(pdata);\n+\telse\n+\t\taxgbe_disable_rx_flow_control(pdata);\n+\n+\treturn 0;\n+}\n+\n+static void axgbe_config_flow_control(struct axgbe_port *pdata)\n+{\n+\taxgbe_config_tx_flow_control(pdata);\n+\taxgbe_config_rx_flow_control(pdata);\n+\n+\tAXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);\n+}\n+\n+static void axgbe_queue_flow_control_threshold(struct axgbe_port *pdata,\n+\t\t\t\t\t       unsigned int queue,\n+\t\t\t\t\t       unsigned int q_fifo_size)\n+{\n+\tunsigned int frame_fifo_size;\n+\tunsigned int rfa, rfd;\n+\n+\tframe_fifo_size = AXGMAC_FLOW_CONTROL_ALIGN(axgbe_get_max_frame(pdata));\n+\n+\t/* This path deals with just maximum frame sizes which are\n+\t * limited to a jumbo frame of 9,000 (plus headers, etc.)\n+\t * so we can never exceed the maximum allowable RFA/RFD\n+\t * values.\n+\t */\n+\tif (q_fifo_size <= 2048) {\n+\t\t/* rx_rfd to zero to signal no flow control */\n+\t\tpdata->rx_rfa[queue] = 0;\n+\t\tpdata->rx_rfd[queue] = 0;\n+\t\treturn;\n+\t}\n+\n+\tif (q_fifo_size <= 4096) {\n+\t\t/* Between 2048 and 4096 */\n+\t\tpdata->rx_rfa[queue] = 0;\t/* Full - 1024 bytes */\n+\t\tpdata->rx_rfd[queue] = 1;\t/* Full - 1536 bytes */\n+\t\treturn;\n+\t}\n+\n+\tif (q_fifo_size <= frame_fifo_size) {\n+\t\t/* Between 4096 and max-frame */\n+\t\tpdata->rx_rfa[queue] = 2;\t/* Full - 2048 bytes */\n+\t\tpdata->rx_rfd[queue] = 5;\t/* Full - 3584 bytes */\n+\t\treturn;\n+\t}\n+\n+\tif (q_fifo_size <= (frame_fifo_size * 3)) {\n+\t\t/* Between max-frame and 3 max-frames,\n+\t\t * trigger if we get just over a frame of data and\n+\t\t * resume when we have just under half a frame left.\n+\t\t */\n+\t\trfa = q_fifo_size - frame_fifo_size;\n+\t\trfd = rfa + (frame_fifo_size / 2);\n+\t} else {\n+\t\t/* Above 3 max-frames - trigger when just over\n+\t\t * 2 frames of space available\n+\t\t */\n+\t\trfa = frame_fifo_size * 2;\n+\t\trfa += AXGMAC_FLOW_CONTROL_UNIT;\n+\t\trfd = rfa + frame_fifo_size;\n+\t}\n+\n+\tpdata->rx_rfa[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfa);\n+\tpdata->rx_rfd[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfd);\n+}\n+\n+static void axgbe_calculate_flow_control_threshold(struct axgbe_port *pdata)\n+{\n+\tunsigned int q_fifo_size;\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < pdata->rx_q_count; i++) {\n+\t\tq_fifo_size = (pdata->fifo + 1) * AXGMAC_FIFO_UNIT;\n+\n+\t\taxgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);\n+\t}\n+}\n+\n+static void axgbe_config_flow_control_threshold(struct axgbe_port *pdata)\n+{\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < pdata->rx_q_count; i++) {\n+\t\tAXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,\n+\t\t\t\t\tpdata->rx_rfa[i]);\n+\t\tAXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,\n+\t\t\t\t\tpdata->rx_rfd[i]);\n+\t}\n+}\n+\n static int __axgbe_exit(struct axgbe_port *pdata)\n {\n \tunsigned int count = 2000;\n@@ -366,10 +558,659 @@ static int axgbe_exit(struct axgbe_port *pdata)\n \treturn __axgbe_exit(pdata);\n }\n \n+static int axgbe_flush_tx_queues(struct axgbe_port *pdata)\n+{\n+\tunsigned int i, count;\n+\n+\tif (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)\n+\t\treturn 0;\n+\n+\tfor (i = 0; i < pdata->tx_q_count; i++)\n+\t\tAXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);\n+\n+\t/* Poll Until Poll Condition */\n+\tfor (i = 0; i < pdata->tx_q_count; i++) {\n+\t\tcount = 2000;\n+\t\twhile (--count && AXGMAC_MTL_IOREAD_BITS(pdata, i,\n+\t\t\t\t\t\t\t MTL_Q_TQOMR, FTQ))\n+\t\t\trte_delay_us(500);\n+\n+\t\tif (!count)\n+\t\t\treturn -EBUSY;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void axgbe_config_dma_bus(struct axgbe_port *pdata)\n+{\n+\t/* Set enhanced addressing mode */\n+\tAXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);\n+\n+\t/* Out standing read/write requests*/\n+\tAXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, RD_OSR, 0x3f);\n+\tAXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, WR_OSR, 0x3f);\n+\n+\t/* Set the System Bus mode */\n+\tAXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);\n+\tAXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_32, 1);\n+\tAXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, AAL, 1);\n+}\n+\n+static void axgbe_config_dma_cache(struct axgbe_port *pdata)\n+{\n+\tunsigned int arcache, awcache, arwcache;\n+\n+\tarcache = 0;\n+\tAXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0x3);\n+\tAXGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);\n+\n+\tawcache = 0;\n+\tAXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0x3);\n+\tAXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0x3);\n+\tAXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, 0x1);\n+\tAXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0x3);\n+\tAXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, 0x1);\n+\tAXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0x3);\n+\tAXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDD, 0x1);\n+\tAXGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);\n+\n+\tarwcache = 0;\n+\tAXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWD, 0x1);\n+\tAXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0x3);\n+\tAXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0x3);\n+\tAXGMAC_IOWRITE(pdata, DMA_AXIAWRCR, arwcache);\n+}\n+\n+static void axgbe_config_edma_control(struct axgbe_port *pdata)\n+{\n+\tAXGMAC_IOWRITE(pdata, EDMA_TX_CONTROL, 0x5);\n+\tAXGMAC_IOWRITE(pdata, EDMA_RX_CONTROL, 0x5);\n+}\n+\n+static int axgbe_config_osp_mode(struct axgbe_port *pdata)\n+{\n+\t/* Force DMA to operate on second packet before closing descriptors\n+\t *  of first packet\n+\t */\n+\tstruct axgbe_tx_queue *txq;\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {\n+\t\ttxq = pdata->eth_dev->data->tx_queues[i];\n+\t\tAXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, OSP,\n+\t\t\t\t\tpdata->tx_osp_mode);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int axgbe_config_pblx8(struct axgbe_port *pdata)\n+{\n+\tstruct axgbe_tx_queue *txq;\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {\n+\t\ttxq = pdata->eth_dev->data->tx_queues[i];\n+\t\tAXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, PBLX8,\n+\t\t\t\t\tpdata->pblx8);\n+\t}\n+\treturn 0;\n+}\n+\n+static int axgbe_config_tx_pbl_val(struct axgbe_port *pdata)\n+{\n+\tstruct axgbe_tx_queue *txq;\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {\n+\t\ttxq = pdata->eth_dev->data->tx_queues[i];\n+\t\tAXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, PBL,\n+\t\t\t\tpdata->tx_pbl);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int axgbe_config_rx_pbl_val(struct axgbe_port *pdata)\n+{\n+\tstruct axgbe_rx_queue *rxq;\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {\n+\t\trxq = pdata->eth_dev->data->rx_queues[i];\n+\t\tAXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, PBL,\n+\t\t\t\tpdata->rx_pbl);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void axgbe_config_rx_buffer_size(struct axgbe_port *pdata)\n+{\n+\tstruct axgbe_rx_queue *rxq;\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {\n+\t\trxq = pdata->eth_dev->data->rx_queues[i];\n+\n+\t\trxq->buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -\n+\t\t\tRTE_PKTMBUF_HEADROOM;\n+\t\trxq->buf_size = (rxq->buf_size + AXGBE_RX_BUF_ALIGN - 1) &\n+\t\t\t~(AXGBE_RX_BUF_ALIGN - 1);\n+\n+\t\tif (rxq->buf_size > pdata->rx_buf_size)\n+\t\t\tpdata->rx_buf_size = rxq->buf_size;\n+\n+\t\tAXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, RBSZ,\n+\t\t\t\t\trxq->buf_size);\n+\t}\n+}\n+\n+static int axgbe_write_rss_reg(struct axgbe_port *pdata, unsigned int type,\n+\t\t\t       unsigned int index, unsigned int val)\n+{\n+\tunsigned int wait;\n+\n+\tif (AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))\n+\t\treturn -EBUSY;\n+\n+\tAXGMAC_IOWRITE(pdata, MAC_RSSDR, val);\n+\n+\tAXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);\n+\tAXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);\n+\tAXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);\n+\tAXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);\n+\n+\twait = 1000;\n+\twhile (wait--) {\n+\t\tif (!AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))\n+\t\t\treturn 0;\n+\n+\t\trte_delay_us(1500);\n+\t}\n+\n+\treturn -EBUSY;\n+}\n+\n+static int axgbe_write_rss_hash_key(struct axgbe_port *pdata)\n+{\n+\tstruct rte_eth_rss_conf *rss_conf;\n+\tunsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);\n+\tunsigned int *key;\n+\tint ret;\n+\n+\trss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;\n+\n+\tif (!rss_conf->rss_key)\n+\t\tkey = (unsigned int *)&pdata->rss_key;\n+\telse\n+\t\tkey = (unsigned int *)&rss_conf->rss_key;\n+\n+\twhile (key_regs--) {\n+\t\tret = axgbe_write_rss_reg(pdata, AXGBE_RSS_HASH_KEY_TYPE,\n+\t\t\t\t\t  key_regs, *key++);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int axgbe_write_rss_lookup_table(struct axgbe_port *pdata)\n+{\n+\tunsigned int i;\n+\tint ret;\n+\n+\tfor (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {\n+\t\tret = axgbe_write_rss_reg(pdata,\n+\t\t\t\t\t  AXGBE_RSS_LOOKUP_TABLE_TYPE, i,\n+\t\t\t\t\t  pdata->rss_table[i]);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int axgbe_enable_rss(struct axgbe_port *pdata)\n+{\n+\tint ret;\n+\n+\t/* Program the hash key */\n+\tret = axgbe_write_rss_hash_key(pdata);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t/* Program the lookup table */\n+\tret = axgbe_write_rss_lookup_table(pdata);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t/* Set the RSS options */\n+\tAXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);\n+\n+\t/* Enable RSS */\n+\tAXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);\n+\n+\treturn 0;\n+}\n+\n+static void axgbe_rss_options(struct axgbe_port *pdata)\n+{\n+\tstruct rte_eth_rss_conf *rss_conf;\n+\tuint64_t rss_hf;\n+\n+\trss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;\n+\trss_hf = rss_conf->rss_hf;\n+\n+\tif (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))\n+\t\tAXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);\n+\tif (rss_hf & (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))\n+\t\tAXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);\n+\tif (rss_hf & (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))\n+\t\tAXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);\n+}\n+\n+static int axgbe_config_rss(struct axgbe_port *pdata)\n+{\n+\tuint32_t i;\n+\n+\tif (pdata->rss_enable) {\n+\t\t/* Initialize RSS hash key and lookup table */\n+\t\tuint32_t *key = (uint32_t *)pdata->rss_key;\n+\n+\t\tfor (i = 0; i < sizeof(pdata->rss_key) / 4; i++)\n+\t\t\t*key++ = (uint32_t)rte_rand();\n+\t\tfor (i = 0; i < AXGBE_RSS_MAX_TABLE_SIZE; i++)\n+\t\t\tAXGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,\n+\t\t\t\t\ti % pdata->eth_dev->data->nb_rx_queues);\n+\t\taxgbe_rss_options(pdata);\n+\t\tif (axgbe_enable_rss(pdata)) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Error in enabling RSS support\");\n+\t\t\treturn -1;\n+\t\t}\n+\t} else {\n+\t\tAXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void axgbe_enable_dma_interrupts(struct axgbe_port *pdata)\n+{\n+\tstruct axgbe_tx_queue *txq;\n+\tunsigned int dma_ch_isr, dma_ch_ier;\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {\n+\t\ttxq = pdata->eth_dev->data->tx_queues[i];\n+\n+\t\t/* Clear all the interrupts which are set */\n+\t\tdma_ch_isr = AXGMAC_DMA_IOREAD(txq, DMA_CH_SR);\n+\t\tAXGMAC_DMA_IOWRITE(txq, DMA_CH_SR, dma_ch_isr);\n+\n+\t\t/* Clear all interrupt enable bits */\n+\t\tdma_ch_ier = 0;\n+\n+\t\t/* Enable following interrupts\n+\t\t *   NIE  - Normal Interrupt Summary Enable\n+\t\t *   AIE  - Abnormal Interrupt Summary Enable\n+\t\t *   FBEE - Fatal Bus Error Enable\n+\t\t */\n+\t\tAXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 0);\n+\t\tAXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);\n+\t\tAXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);\n+\n+\t\t/* Enable following Rx interrupts\n+\t\t *   RBUE - Receive Buffer Unavailable Enable\n+\t\t *   RIE  - Receive Interrupt Enable (unless using\n+\t\t *          per channel interrupts in edge triggered\n+\t\t *          mode)\n+\t\t */\n+\t\tAXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);\n+\n+\t\tAXGMAC_DMA_IOWRITE(txq, DMA_CH_IER, dma_ch_ier);\n+\t}\n+}\n+\n+static void wrapper_tx_desc_init(struct axgbe_port *pdata)\n+{\n+\tstruct axgbe_tx_queue *txq;\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {\n+\t\ttxq = pdata->eth_dev->data->tx_queues[i];\n+\t\ttxq->cur = 0;\n+\t\ttxq->dirty = 0;\n+\t\t/* Update the total number of Tx descriptors */\n+\t\tAXGMAC_DMA_IOWRITE(txq, DMA_CH_TDRLR, txq->nb_desc - 1);\n+\t\t/* Update the starting address of descriptor ring */\n+\t\tAXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_HI,\n+\t\t\t\t\thigh32_value(txq->ring_phys_addr));\n+\t\tAXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_LO,\n+\t\t\t\t\tlow32_value(txq->ring_phys_addr));\n+\t}\n+}\n+\n+static int wrapper_rx_desc_init(struct axgbe_port *pdata)\n+{\n+\tstruct axgbe_rx_queue *rxq;\n+\tstruct rte_mbuf *mbuf;\n+\tvolatile union axgbe_rx_desc *desc;\n+\tunsigned int i, j;\n+\n+\tfor (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {\n+\t\trxq = pdata->eth_dev->data->rx_queues[i];\n+\n+\t\t/* Initialize software ring entries */\n+\t\trxq->mbuf_alloc = 0;\n+\t\trxq->cur = 0;\n+\t\trxq->dirty = 0;\n+\t\tdesc = AXGBE_GET_DESC_PT(rxq, 0);\n+\n+\t\tfor (j = 0; j < rxq->nb_desc; j++) {\n+\t\t\tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);\n+\t\t\tif (mbuf == NULL) {\n+\t\t\t\tPMD_DRV_LOG(ERR, \"RX mbuf alloc failed queue_id = %u, idx = %d\",\n+\t\t\t\t\t    (unsigned int)rxq->queue_id, j);\n+\t\t\t\taxgbe_dev_rx_queue_release(rxq);\n+\t\t\t\treturn -ENOMEM;\n+\t\t\t}\n+\t\t\trxq->sw_ring[j] = mbuf;\n+\t\t\t/* Mbuf populate */\n+\t\t\tmbuf->next = NULL;\n+\t\t\tmbuf->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\t\tmbuf->nb_segs = 1;\n+\t\t\tmbuf->port = rxq->port_id;\n+\t\t\tdesc->read.baddr =\n+\t\t\t\trte_cpu_to_le_64(\n+\t\t\t\t\trte_mbuf_data_iova_default(mbuf));\n+\t\t\trte_wmb();\n+\t\t\tAXGMAC_SET_BITS_LE(desc->read.desc3,\n+\t\t\t\t\t\tRX_NORMAL_DESC3, OWN, 1);\n+\t\t\trte_wmb();\n+\t\t\trxq->mbuf_alloc++;\n+\t\t\tdesc++;\n+\t\t}\n+\t\t/* Update the total number of Rx descriptors */\n+\t\tAXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDRLR,\n+\t\t\t\t\trxq->nb_desc - 1);\n+\t\t/* Update the starting address of descriptor ring */\n+\t\tAXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_HI,\n+\t\t\t\t\thigh32_value(rxq->ring_phys_addr));\n+\t\tAXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_LO,\n+\t\t\t\t\tlow32_value(rxq->ring_phys_addr));\n+\t\t/* Update the Rx Descriptor Tail Pointer */\n+\t\tAXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,\n+\t\t\t\t   low32_value(rxq->ring_phys_addr +\n+\t\t\t\t   (rxq->nb_desc - 1) *\n+\t\t\t\t   sizeof(union axgbe_rx_desc)));\n+\t}\n+\treturn 0;\n+}\n+\n+static void axgbe_config_mtl_mode(struct axgbe_port *pdata)\n+{\n+\tunsigned int i;\n+\n+\t/* Set Tx to weighted round robin scheduling algorithm */\n+\tAXGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);\n+\n+\t/* Set Tx traffic classes to use WRR algorithm with equal weights */\n+\tfor (i = 0; i < pdata->hw_feat.tc_cnt; i++) {\n+\t\tAXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,\n+\t\t\t\tMTL_TSA_ETS);\n+\t\tAXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);\n+\t}\n+\n+\t/* Set Rx to strict priority algorithm */\n+\tAXGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);\n+}\n+\n+static int axgbe_config_tsf_mode(struct axgbe_port *pdata, unsigned int val)\n+{\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < pdata->tx_q_count; i++)\n+\t\tAXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);\n+\n+\treturn 0;\n+}\n+\n+static int axgbe_config_rsf_mode(struct axgbe_port *pdata, unsigned int val)\n+{\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < pdata->rx_q_count; i++)\n+\t\tAXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);\n+\n+\treturn 0;\n+}\n+\n+static int axgbe_config_tx_threshold(struct axgbe_port *pdata,\n+\t\t\t\t     unsigned int val)\n+{\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < pdata->tx_q_count; i++)\n+\t\tAXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);\n+\n+\treturn 0;\n+}\n+\n+static int axgbe_config_rx_threshold(struct axgbe_port *pdata,\n+\t\t\t\t     unsigned int val)\n+{\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < pdata->rx_q_count; i++)\n+\t\tAXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);\n+\n+\treturn 0;\n+}\n+\n+/*Distrubting fifo size  */\n+static void axgbe_config_rx_fifo_size(struct axgbe_port *pdata)\n+{\n+\tunsigned int fifo_size;\n+\tunsigned int q_fifo_size;\n+\tunsigned int p_fifo, i;\n+\n+\tfifo_size = RTE_MIN(pdata->rx_max_fifo_size,\n+\t\t\t  pdata->hw_feat.rx_fifo_size);\n+\tq_fifo_size = fifo_size / pdata->rx_q_count;\n+\n+\t/* Calculate the fifo setting by dividing the queue's fifo size\n+\t * by the fifo allocation increment (with 0 representing the\n+\t * base allocation increment so decrement the result\n+\t * by 1).\n+\t */\n+\tp_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;\n+\tif (p_fifo)\n+\t\tp_fifo--;\n+\n+\tfor (i = 0; i < pdata->rx_q_count; i++)\n+\t\tAXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, p_fifo);\n+\tpdata->fifo = p_fifo;\n+\n+\t/*Calculate and config Flow control threshold*/\n+\taxgbe_calculate_flow_control_threshold(pdata);\n+\taxgbe_config_flow_control_threshold(pdata);\n+}\n+\n+static void axgbe_config_tx_fifo_size(struct axgbe_port *pdata)\n+{\n+\tunsigned int fifo_size;\n+\tunsigned int q_fifo_size;\n+\tunsigned int p_fifo, i;\n+\n+\tfifo_size = RTE_MIN(pdata->tx_max_fifo_size,\n+\t\t\t\tpdata->hw_feat.tx_fifo_size);\n+\tq_fifo_size = fifo_size / pdata->tx_q_count;\n+\n+\t/* Calculate the fifo setting by dividing the queue's fifo size\n+\t * by the fifo allocation increment (with 0 representing the\n+\t * base allocation increment so decrement the result\n+\t * by 1).\n+\t */\n+\tp_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;\n+\tif (p_fifo)\n+\t\tp_fifo--;\n+\n+\tfor (i = 0; i < pdata->tx_q_count; i++)\n+\t\tAXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, p_fifo);\n+}\n+\n+static void axgbe_config_queue_mapping(struct axgbe_port *pdata)\n+{\n+\tunsigned int qptc, qptc_extra, queue;\n+\tunsigned int i, j, reg, reg_val;\n+\n+\t/* Map the MTL Tx Queues to Traffic Classes\n+\t *   Note: Tx Queues >= Traffic Classes\n+\t */\n+\tqptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;\n+\tqptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;\n+\n+\tfor (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {\n+\t\tfor (j = 0; j < qptc; j++)\n+\t\t\tAXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,\n+\t\t\t\t\t\tQ2TCMAP, i);\n+\t\tif (i < qptc_extra)\n+\t\t\tAXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,\n+\t\t\t\t\t\tQ2TCMAP, i);\n+\t}\n+\n+\tif (pdata->rss_enable) {\n+\t\t/* Select dynamic mapping of MTL Rx queue to DMA Rx channel */\n+\t\treg = MTL_RQDCM0R;\n+\t\treg_val = 0;\n+\t\tfor (i = 0; i < pdata->rx_q_count;) {\n+\t\t\treg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));\n+\n+\t\t\tif ((i % MTL_RQDCM_Q_PER_REG) &&\n+\t\t\t    (i != pdata->rx_q_count))\n+\t\t\t\tcontinue;\n+\n+\t\t\tAXGMAC_IOWRITE(pdata, reg, reg_val);\n+\n+\t\t\treg += MTL_RQDCM_INC;\n+\t\t\treg_val = 0;\n+\t\t}\n+\t}\n+}\n+\n+static void axgbe_enable_mtl_interrupts(struct axgbe_port *pdata)\n+{\n+\tunsigned int mtl_q_isr;\n+\tunsigned int q_count, i;\n+\n+\tq_count = RTE_MAX(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);\n+\tfor (i = 0; i < q_count; i++) {\n+\t\t/* Clear all the interrupts which are set */\n+\t\tmtl_q_isr = AXGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);\n+\t\tAXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);\n+\n+\t\t/* No MTL interrupts to be enabled */\n+\t\tAXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);\n+\t}\n+}\n+\n+static int axgbe_set_mac_address(struct axgbe_port *pdata, u8 *addr)\n+{\n+\tunsigned int mac_addr_hi, mac_addr_lo;\n+\n+\tmac_addr_hi = (addr[5] <<  8) | (addr[4] <<  0);\n+\tmac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |\n+\t\t(addr[1] <<  8) | (addr[0] <<  0);\n+\n+\tAXGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);\n+\tAXGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);\n+\n+\treturn 0;\n+}\n+\n+static void axgbe_config_mac_address(struct axgbe_port *pdata)\n+{\n+\taxgbe_set_mac_address(pdata, pdata->mac_addr.addr_bytes);\n+}\n+\n+static void axgbe_config_jumbo_enable(struct axgbe_port *pdata)\n+{\n+\tunsigned int val;\n+\n+\tval = (pdata->rx_buf_size > AXGMAC_STD_PACKET_MTU) ? 1 : 0;\n+\tval = 1;\n+\n+\tAXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);\n+}\n+\n+static void axgbe_config_mac_speed(struct axgbe_port *pdata)\n+{\n+\taxgbe_set_speed(pdata, pdata->phy_speed);\n+}\n+\n+static void axgbe_config_checksum_offload(struct axgbe_port *pdata)\n+{\n+\tif (pdata->rx_csum_enable)\n+\t\tAXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);\n+\telse\n+\t\tAXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);\n+}\n+\n+static int axgbe_init(struct axgbe_port *pdata)\n+{\n+\tint ret;\n+\n+\t/* Flush Tx queues */\n+\tret = axgbe_flush_tx_queues(pdata);\n+\tif (ret)\n+\t\treturn ret;\n+\t/* Initialize DMA related features */\n+\taxgbe_config_dma_bus(pdata);\n+\taxgbe_config_dma_cache(pdata);\n+\taxgbe_config_edma_control(pdata);\n+\taxgbe_config_osp_mode(pdata);\n+\taxgbe_config_pblx8(pdata);\n+\taxgbe_config_tx_pbl_val(pdata);\n+\taxgbe_config_rx_pbl_val(pdata);\n+\taxgbe_config_rx_buffer_size(pdata);\n+\taxgbe_config_rss(pdata);\n+\twrapper_tx_desc_init(pdata);\n+\tret = wrapper_rx_desc_init(pdata);\n+\tif (ret)\n+\t\treturn ret;\n+\taxgbe_enable_dma_interrupts(pdata);\n+\n+\t/* Initialize MTL related features */\n+\taxgbe_config_mtl_mode(pdata);\n+\taxgbe_config_queue_mapping(pdata);\n+\taxgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);\n+\taxgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);\n+\taxgbe_config_tx_threshold(pdata, pdata->tx_threshold);\n+\taxgbe_config_rx_threshold(pdata, pdata->rx_threshold);\n+\taxgbe_config_tx_fifo_size(pdata);\n+\taxgbe_config_rx_fifo_size(pdata);\n+\n+\taxgbe_enable_mtl_interrupts(pdata);\n+\n+\t/* Initialize MAC related features */\n+\taxgbe_config_mac_address(pdata);\n+\taxgbe_config_jumbo_enable(pdata);\n+\taxgbe_config_flow_control(pdata);\n+\taxgbe_config_mac_speed(pdata);\n+\taxgbe_config_checksum_offload(pdata);\n+\n+\treturn 0;\n+}\n+\n void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if)\n {\n \thw_if->exit = axgbe_exit;\n+\thw_if->config_flow_control = axgbe_config_flow_control;\n \n+\thw_if->init = axgbe_init;\n \n \thw_if->read_mmd_regs = axgbe_read_mmd_regs;\n \thw_if->write_mmd_regs = axgbe_write_mmd_regs;\n@@ -379,4 +1220,7 @@ void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if)\n \thw_if->set_ext_mii_mode = axgbe_set_ext_mii_mode;\n \thw_if->read_ext_mii_regs = axgbe_read_ext_mii_regs;\n \thw_if->write_ext_mii_regs = axgbe_write_ext_mii_regs;\n+\t/* For FLOW ctrl */\n+\thw_if->config_tx_flow_control = axgbe_config_tx_flow_control;\n+\thw_if->config_rx_flow_control = axgbe_config_rx_flow_control;\n }\ndiff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c\nindex ee1e48d..9065a44 100644\n--- a/drivers/net/axgbe/axgbe_ethdev.c\n+++ b/drivers/net/axgbe/axgbe_ethdev.c\n@@ -132,6 +132,9 @@\n \n static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);\n static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);\n+static int  axgbe_dev_configure(struct rte_eth_dev *dev);\n+static int  axgbe_dev_start(struct rte_eth_dev *dev);\n+static void axgbe_dev_stop(struct rte_eth_dev *dev);\n static void axgbe_dev_interrupt_handler(void *param);\n static void axgbe_dev_close(struct rte_eth_dev *dev);\n static void axgbe_dev_info_get(struct rte_eth_dev *dev,\n@@ -186,6 +189,9 @@ static const struct rte_eth_desc_lim tx_desc_lim = {\n };\n \n static const struct eth_dev_ops axgbe_eth_dev_ops = {\n+\t.dev_configure        = axgbe_dev_configure,\n+\t.dev_start            = axgbe_dev_start,\n+\t.dev_stop             = axgbe_dev_stop,\n \t.dev_close            = axgbe_dev_close,\n \t.dev_infos_get        = axgbe_dev_info_get,\n \t.rx_queue_setup       = axgbe_dev_rx_queue_setup,\n@@ -194,6 +200,13 @@ static const struct eth_dev_ops axgbe_eth_dev_ops = {\n \t.tx_queue_release     = axgbe_dev_tx_queue_release,\n };\n \n+static int axgbe_phy_reset(struct axgbe_port *pdata)\n+{\n+\tpdata->phy_link = -1;\n+\tpdata->phy_speed = SPEED_UNKNOWN;\n+\treturn pdata->phy_if.phy_reset(pdata);\n+}\n+\n /*\n  * Interrupt handler triggered by NIC  for handling\n  * specific interrupt.\n@@ -218,6 +231,89 @@ axgbe_dev_interrupt_handler(void *param)\n \trte_intr_enable(&pdata->pci_dev->intr_handle);\n }\n \n+/*\n+ * Configure device link speed and setup link.\n+ * It returns 0 on success.\n+ */\n+static int\n+axgbe_dev_configure(struct rte_eth_dev *dev)\n+{\n+\tstruct axgbe_port *pdata =  dev->data->dev_private;\n+\t/* Checksum offload to hardware */\n+\tpdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &\n+\t\t\t\tDEV_RX_OFFLOAD_CHECKSUM;\n+\treturn 0;\n+}\n+\n+static int\n+axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)\n+{\n+\tstruct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;\n+\n+\tif (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)\n+\t\tpdata->rss_enable = 1;\n+\telse if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)\n+\t\tpdata->rss_enable = 0;\n+\telse\n+\t\treturn  -1;\n+\treturn 0;\n+}\n+\n+static int\n+axgbe_dev_start(struct rte_eth_dev *dev)\n+{\n+\tPMD_INIT_FUNC_TRACE();\n+\tstruct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;\n+\tint ret;\n+\n+\t/* Multiqueue RSS */\n+\tret = axgbe_dev_rx_mq_config(dev);\n+\tif (ret) {\n+\t\tPMD_DRV_LOG(ERR, \"Unable to config RX MQ\");\n+\t\treturn ret;\n+\t}\n+\tret = axgbe_phy_reset(pdata);\n+\tif (ret) {\n+\t\tPMD_DRV_LOG(ERR, \"phy reset failed\");\n+\t\treturn ret;\n+\t}\n+\tret = pdata->hw_if.init(pdata);\n+\tif (ret) {\n+\t\tPMD_DRV_LOG(ERR, \"dev_init failed\");\n+\t\treturn ret;\n+\t}\n+\n+\t/* enable uio/vfio intr/eventfd mapping */\n+\trte_intr_enable(&pdata->pci_dev->intr_handle);\n+\n+\t/* phy start*/\n+\tpdata->phy_if.phy_start(pdata);\n+\n+\taxgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);\n+\taxgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);\n+\treturn 0;\n+}\n+\n+/* Stop device: disable rx and tx functions to allow for reconfiguring. */\n+static void\n+axgbe_dev_stop(struct rte_eth_dev *dev)\n+{\n+\tPMD_INIT_FUNC_TRACE();\n+\tstruct axgbe_port *pdata = dev->data->dev_private;\n+\n+\trte_intr_disable(&pdata->pci_dev->intr_handle);\n+\n+\tif (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state))\n+\t\treturn;\n+\n+\taxgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);\n+\n+\tpdata->phy_if.phy_stop(pdata);\n+\tpdata->hw_if.exit(pdata);\n+\tmemset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));\n+\taxgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);\n+}\n+\n /* Clear all resources like TX/RX queues. */\n static void\n axgbe_dev_close(struct rte_eth_dev *dev)\n",
    "prefixes": [
        "dpdk-dev",
        "v3",
        "09/18"
    ]
}