get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/44676/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 44676,
    "url": "http://patches.dpdk.org/api/patches/44676/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1536838528-11800-20-git-send-email-igor.russkikh@aquantia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1536838528-11800-20-git-send-email-igor.russkikh@aquantia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1536838528-11800-20-git-send-email-igor.russkikh@aquantia.com",
    "date": "2018-09-13T11:35:27",
    "name": "[v2,20/21] net/atlantic: RX side structures and implementation",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "f1dbe760cfb3965450a5f0a681a2b5b9bb8fcdbc",
    "submitter": {
        "id": 1124,
        "url": "http://patches.dpdk.org/api/people/1124/?format=api",
        "name": "Igor Russkikh",
        "email": "igor.russkikh@aquantia.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1536838528-11800-20-git-send-email-igor.russkikh@aquantia.com/mbox/",
    "series": [
        {
            "id": 1307,
            "url": "http://patches.dpdk.org/api/series/1307/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=1307",
            "date": "2018-09-13T11:35:08",
            "name": "[v2,01/21] net/atlantic: atlantic PMD driver skeleton",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/1307/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/44676/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/44676/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id A83501B06B;\n\tThu, 13 Sep 2018 13:36:25 +0200 (CEST)",
            "from NAM01-SN1-obe.outbound.protection.outlook.com\n\t(mail-sn1nam01on0074.outbound.protection.outlook.com [104.47.32.74])\n\tby dpdk.org (Postfix) with ESMTP id 2A9751AEE8\n\tfor <dev@dpdk.org>; Thu, 13 Sep 2018 13:36:16 +0200 (CEST)",
            "from ubuntubox.rdc.aquantia.com (95.79.108.179) by\n\tBLUPR0701MB1650.namprd07.prod.outlook.com (2a01:111:e400:58c6::20)\n\twith Microsoft SMTP Server (version=TLS1_2,\n\tcipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.1143.15;\n\tThu, 13 Sep 2018 11:36:13 +0000"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=AQUANTIA1COM.onmicrosoft.com; s=selector1-aquantia-com;\n\th=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n\tbh=G0LA4Lfmk/CQMZ9K1ySYhbVUfKZy5WlB2sbefDdLDm4=;\n\tb=DzEi9AMQl3k2UMofnV2liVEFIEQaZvncApEDUHQXZ9kR40ZtisfklZwryvyHlO6YFv/bucN1mjbHVn98TSmuIH0uj2jWY0MVqnFzPdxB3tp0L7nAyoNSzCEqx0dc2xKUhZHPO65+udDGrdxRIBAhI84F2BpZa772rETWHugaCdw=",
        "Authentication-Results": "spf=none (sender IP is )\n\tsmtp.mailfrom=Igor.Russkikh@aquantia.com; ",
        "From": "Igor Russkikh <igor.russkikh@aquantia.com>",
        "To": "dev@dpdk.org",
        "Cc": "pavel.belous@aquantia.com,\n\tigor.russkikh@aquantia.com",
        "Date": "Thu, 13 Sep 2018 14:35:27 +0300",
        "Message-Id": "<1536838528-11800-20-git-send-email-igor.russkikh@aquantia.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1536838528-11800-1-git-send-email-igor.russkikh@aquantia.com>",
        "References": "<1536838528-11800-1-git-send-email-igor.russkikh@aquantia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[95.79.108.179]",
        "X-ClientProxiedBy": "VI1PR0202CA0026.eurprd02.prod.outlook.com\n\t(2603:10a6:803:14::39) To BLUPR0701MB1650.namprd07.prod.outlook.com\n\t(2a01:111:e400:58c6::20)",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "f8988545-7119-454c-f97c-08d6196d1cb2",
        "X-Microsoft-Antispam": "BCL:0; PCL:0;\n\tRULEID:(7020095)(4652040)(8989137)(4534165)(4627221)(201703031133081)(201702281549075)(8990107)(5600074)(711020)(2017052603328)(7153060)(7193020);\n\tSRVR:BLUPR0701MB1650; ",
        "X-Microsoft-Exchange-Diagnostics": [
            "1; BLUPR0701MB1650;\n\t3:hYSGEDqAP4QrfxZoT2U3Qn6KD9yZnaI8n/vmzD8u837o3yP2CO3zViPVqkD7/szUFpP6EysZYiYlhLJ1jFJn0GJXFFun4/qaqOlKPRmZY/pPTghyfYYWqT8gWbzN00iS5uQqDwNtphD3etoquPNFW/bsIjjb6L4jhGghPexhVNfufV9lvwQaasra/GVWXL+uMsGEOPmsRL5JeBD5TriX99XLeRdMrLszYIsKBph2x7hMF2/QOp4lWU1Czym8Vl93;\n\t25:wo90laUHJ+o5nsPFE1CxcekNu/jcf/S+3nQ5SKuOEAv+wADnWTHbEOtl4aRkZPUimvon7c549gLX2PSAoybU/krn7oNxda3J1oM5/QX9g4Lwm6B4MFhyg4TEdt/QwoCTo5oELyNr5hjkP6GKBMeQLe2dpWS/g4z/5B2kb8PDbc16BFn29v2xZkdMrha4/jn9Gw25aBBOdhQwXfCcK0gLmnyTOsp9cflQD8il/khQC3UzuHNSbp+iPHL4y1/T85Qkc81r6Ab20ZQ9/sI+9ZfUd9g/JzpXtmlYNW7vzJDX1Nh9vZJ0wKedQ4D/s07LV12OozmOpq56BAZ9kQnhla20VdVGC58YfgnOW1xV5ZfqCMs=;\n\t31:EGNpnSzgdR9aPX0KRFFAE4D2eKKe9tw3nJRXPxEuVaML2f3oEqSEy8fJC7ippVC0nKu1SERnDkbyjNx44YZXKKy/1VOh3FVjG/S5/dKTKEs2y0N4bPDCFrWevBd9xnX3+MyliVWvsvErILeAJuzqdDtLo5FPV58rzK0KjGnnQtD3eibr0zCk5WdCgYJWu0lXjZYaP7Ac8BoIlUXxOmMmNcqqMfTgowve0Yp4n/VJbd8=",
            "1; BLUPR0701MB1650;\n\t20:mFRvnmG2McepZte5EwCCa7T493VysBNu0LKocQoJH5Y2OZnwi2vo+h+TWLjSLgk5Q/RAD5Otb303oT33dT4Atu6XCgH4AmzsGKtiyKzaSlPfu+6NvE9M2+9lXyDlCwho48zt7cmsljauVJIOxRE9m+cf52T9YEUenLtQWHm5P0qykMJzoTmEHfnQXkEg9rHO7bTZZaFTkVxQKGk/7PIkhrsdV6/jepGtJnwoUM99Agay1diC4NyNJ/CWrW+SDPnyQzAerD6PJ4Xepeqr3nCAVk+G56yJt5pORx4sLurT37BCPO+vxqpyM8hvtmdJoW3Kbr7jIQHFny5+pntaWuQ7oboovu1/FAcy2BmOPnlEkoncA5gXl5JCi4gok4xzriOobPjxDrlPHYN0eNCwnau15ZzfJ8QtFtpvysUBgnCmJwlEhnq+WOsVSrS8S9YMNxIHidtehZ92XRiQ7pJRN0+jqqa7MFF7O+Il+78mR1cCWTWtrePDALnlM3Iz2WDcwDBP;\n\t4:7H8xDc36jA0BDMQCMHZo+RXSIi1bWyJik6UpnfVQLtEUI89DwVjDjHWySe+GOf8pLsiv9/UuO+6h+DrcULIPUsf3MNLOTfqbwGfqqfxD1JPGWBzfp1oJWNpabBepEi6r7zLpzBiMy+GTHoGOtqpSKd4b52KUHhBccj3NsZsQxaeocSOJmbc3/ozXtb8X8zorlWkXl1uO4B2Xjrz1NP+d0iMn54H7Ta/mQcupY/OIMIYb/+WTI8ZyMCQzK1qh40s3G9pemG4gnHKABKgLRf4ZPQ==",
            "=?us-ascii?Q?1; BLUPR0701MB1650;\n\t23:wBFr4Cq4aaQj3FCiaVckN61JpOfNnNp/Fazazvr?=\n\t9h7SiHWA3zfWabXM7OttGjSH8kByyY28eyH31Ahd0OQ2XE50P+F86roRhj9qdWWSC6TFZ+So/FLAXhQWHhw1gFehCyOVhhBnJcISC4DB0fB+GH4KToQoBhO4g9dZwhUH2yuXxLfaNhXCG3vs3sZxGZv+99w8noXdhwh3KWQxPslWeUzpmaQxtly9aAP6/lXOebRa1AeX1eP6hQiaTuyT4HHJuKZhHE847sXyyR1KAVvykuWljoWEve8F0z9KZuDBolknAVqZojMpjCg6bqFX8/itCwN3Weo14DzN0bVL130W4aCChV5xgZbEv/KP9uSY0TJ5AFVf881tk0gnly1sQdTV9Hkx/Ka7+kAhmhqLQjg6p1PHIowgUbs5RgvTjKhnmy4q2FXfEqHu0RiS4Lsq4AniIyGTc+/irSNJU0PhAeC9243SP8A+ZlHGpYmL9Tm9qTYnFqV5VMI/NSQUp/viNxQmMSrvAju0wOm5WWTbW4x99suv7UHqBvGlRImo8Cg3o8IRmGd/CXjqmEm+x7nobm1F3f/6jHhLSs6vQPwa3TOFCulpsZy811mb5A9U4Reu7GU5OeXfZTdPURhFpINBXtSy3sSYDLidRqszZSSXzefPHB7BUVnhRoyuvee6F1STf54PX+oI/mc4sdkwTgMcRl5/LTasWFreof1+pz0Ns6NcI2IEv6jaw8yJFxh/AxnDQJQ5IjaSnU4ONEefSSiMi13FxPYF5dERfDkGkBt5D265eou+QUI43a4k4s0J9+QmfHeBk+smYvc2SQ3/kLehRsr8HlCGgj0vG3GBeKLp8EvTjXW1nDL8x3+ZJagh3msWXrJ96+/spP9y3PXHCiTzaRPbGQQBs2LEcgP3Mv2VKHOZ88oEAH0n8fjcGs0KwDHm6I3v1BhFCuO34nFpKh8ONiuxxva25p0PqWidNKh15pZsKCPmAPgqBd2+JyOFSADWCHssoyuXJG7CqcnKsaKCj8vNhCWqdhv9NEP7YpwA4cHRgWlc1xqF3Wdei4FGKQtTk2Du2gl4YmQpmQl1d6F7fJj6u/uKJuouX6MjMGSZUAsdYL7ZAtWJvsVjayTCoaYeBPw0E9Ekp8FOO6C40zv/az+NSNtAH+Nx9CftH61Qgeu2a/YLvj6SzSGNKBP7QWguNeAGOOUzFrs3qCkwySJ4i+qXHF7//9UVMKkY9VyQ9vQssuE/b7NImvtMTjx74kntEJGTkSbB4R0GtpotX9ItcVaFV",
            "1; BLUPR0701MB1650;\n\t6:LcSjLDuhdpUKvu9CcUkBs61dW9V2FKVjGm+fV+JpofcDBm3TYz5Q7K1+tjAMLfXjU21id7gt6n12ZG9lszHX+kHZVTmQaw5Wue/BdKg5fh3tL2/M9W9fu1vfB418gTvMzcQf+JsQuzjHLvBEh4HUllKIRYvtf5YI4YZxbHLBivz5DGIb9FSq/b6E0iX9tYnJsem+fO/RVE2ANk3wltO++aKn35I0ucEh3YcQqOOcYqAp5/nRUwRizoe+5Zs0pWK5j36zMtF5ro8bJ14Mxu1Spl6qYPhiLjvcD6IsnwMR/zuOKygFp4OSr8ujcq6IOvd5A1RpMBvC6ACAWKd4zfdRa4XymJLpV+I1dPAJzpccqS1CM2DZnOQOf/NauPbjlI/kSvFVegi8GOYPuWvA60R3y6BR7CzFhpcvb+hAf9rrcpL774Hvl3KORy7/5/Kl1495dA7y5uAasOURZhbNR1hCMw==;\n\t5:6GWSXZeOsMh2uplCwF5UL8Zm02ZmsOvJxfHVeNn6a8ao7JSEOrHypUP6qr470rcXMSWBElB5ZCEqyWkurCYspe1yzIaFehp7w66BCHlz4V8460wFifUbEKI6i4vumkxS4eJvBYF5y5RemNWL3oaFeWgAXgExFRO1bLuW9x06UQk=;\n\t7:rIgFRV9DiEiHYbN+Y5qx3nMMpef8LbDqRXgIF5B+lQSyhEVCSSCtBWzCbukpTEJOajHwEzzT7bFQ6sF47/FuVxdmKtiM4XDggS+QqPV9VmsGabwkhuIQcE0cfDD5oR87cxwreDdmKjTUyH5s288X5X1SIO/HgcHqfTiC599Qrl9c39M6e39DadWya8P6bv2QxMO6hPMhoBK7KkuHbmOzX3d5RADIAXrkyQ7fczZMLJEnHAQsDdiWwJF1MamIhQ87"
        ],
        "X-MS-TrafficTypeDiagnostic": "BLUPR0701MB1650:",
        "X-Microsoft-Antispam-PRVS": "<BLUPR0701MB16500585AFE521E9D4A3B966981A0@BLUPR0701MB1650.namprd07.prod.outlook.com>",
        "X-Exchange-Antispam-Report-Test": "UriScan:;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-Exchange-Antispam-Report-CFA-Test": "BCL:0; PCL:0;\n\tRULEID:(6040522)(2401047)(8121501046)(5005006)(93006095)(93001095)(3231311)(944501410)(52105095)(10201501046)(3002001)(149027)(150027)(6041310)(201703131423095)(201702281528075)(20161123555045)(201703061421075)(201703061406153)(20161123560045)(20161123564045)(20161123562045)(20161123558120)(201708071742011)(7699050);\n\tSRVR:BLUPR0701MB1650; BCL:0; PCL:0; RULEID:; SRVR:BLUPR0701MB1650; ",
        "X-Forefront-PRVS": "07943272E1",
        "X-Forefront-Antispam-Report": "SFV:NSPM;\n\tSFS:(10009020)(39840400004)(366004)(396003)(376002)(136003)(346002)(199004)(189003)(6666003)(6916009)(97736004)(8936002)(68736007)(25786009)(50226002)(81166006)(81156014)(4326008)(107886003)(6486002)(51416003)(2906002)(53936002)(5660300001)(36756003)(86362001)(316002)(52116002)(8676002)(16586007)(7736002)(305945005)(11346002)(446003)(476003)(956004)(47776003)(72206003)(66066001)(44832011)(478600001)(486006)(186003)(26005)(16526019)(14444005)(386003)(48376002)(2351001)(106356001)(2361001)(2616005)(3846002)(105586002)(50466002)(7696005)(6116002)(76176011);\n\tDIR:OUT; SFP:1101; SCL:1; SRVR:BLUPR0701MB1650;\n\tH:ubuntubox.rdc.aquantia.com; \n\tFPR:; SPF:None; LANG:en; PTR:InfoNoRecords; A:1; MX:1; ",
        "Received-SPF": "None (protection.outlook.com: aquantia.com does not designate\n\tpermitted sender hosts)",
        "X-Microsoft-Antispam-Message-Info": "wrT0fMU3JJ6BekRIfJXpTUYMuaAaZQUmD8IXdOnIcg4oF2+O2ISlX2VEVDmtpEGENNSfiqJ0f8cIMIflGzLpemIdIJVxeus8+jMlpy0Hm6gLtMeId4ZCdXb8U8VqoTrXAf5ld0YqRE3HA6QZz8fccvAnrgO/4rd7WR7qQKgsZHVh+9eo5VH0iSAaDjXk0uzUt3aPjmyf6T0Lo/XfHn5ucMPBJdFwy6i+2pi8uF6bwaI3swWp9ncWJbPnZNEfndi3WyVI5B2iFweeWqYbiPEeR/4QX05YhP/jrK9Shz6pHr8BP69upNOZp3GCG08pxlPzYddjVPOQzdTTQC/ycRr7egSr6AFOWOAPvdumsXCSoVM=",
        "SpamDiagnosticOutput": "1:99",
        "SpamDiagnosticMetadata": "NSPM",
        "X-OriginatorOrg": "aquantia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "13 Sep 2018 11:36:13.4207\n\t(UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "f8988545-7119-454c-f97c-08d6196d1cb2",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "Hosted",
        "X-MS-Exchange-CrossTenant-Id": "83e2e134-991c-4ede-8ced-34d47e38e6b1",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BLUPR0701MB1650",
        "Subject": "[dpdk-dev] [PATCH v2 20/21] net/atlantic: RX side structures and\n\timplementation",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Signed-off-by: Igor Russkikh <igor.russkikh@aquantia.com>\n---\n drivers/net/atlantic/Makefile     |   6 +\n drivers/net/atlantic/atl_ethdev.c |  44 +++\n drivers/net/atlantic/atl_ethdev.h |  82 ++++\n drivers/net/atlantic/atl_rxtx.c   | 787 ++++++++++++++++++++++++++++++++++++++\n drivers/net/atlantic/meson.build  |   6 +\n 5 files changed, 925 insertions(+)\n create mode 100644 drivers/net/atlantic/atl_rxtx.c",
    "diff": "diff --git a/drivers/net/atlantic/Makefile b/drivers/net/atlantic/Makefile\nindex 8af64d082..b84c8a988 100644\n--- a/drivers/net/atlantic/Makefile\n+++ b/drivers/net/atlantic/Makefile\n@@ -31,7 +31,13 @@ VPATH += $(SRCDIR)/hw_atl\n #\n # all source are stored in SRCS-y\n #\n+SRCS-$(CONFIG_RTE_LIBRTE_ATLANTIC_PMD) += atl_rxtx.c\n SRCS-$(CONFIG_RTE_LIBRTE_ATLANTIC_PMD) += atl_ethdev.c\n+SRCS-$(CONFIG_RTE_LIBRTE_ATLANTIC_PMD) += atl_hw_regs.c\n+SRCS-$(CONFIG_RTE_LIBRTE_ATLANTIC_PMD) += hw_atl_utils.c\n+SRCS-$(CONFIG_RTE_LIBRTE_ATLANTIC_PMD) += hw_atl_llh.c\n+SRCS-$(CONFIG_RTE_LIBRTE_ATLANTIC_PMD) += hw_atl_utils_fw2x.c\n+SRCS-$(CONFIG_RTE_LIBRTE_ATLANTIC_PMD) += hw_atl_b0.c\n SRCS-$(CONFIG_RTE_LIBRTE_ATLANTIC_PMD) += rte_pmd_atlantic.c\n \n # install this header file\ndiff --git a/drivers/net/atlantic/atl_ethdev.c b/drivers/net/atlantic/atl_ethdev.c\nindex 01700b4ad..8f81761f3 100644\n--- a/drivers/net/atlantic/atl_ethdev.c\n+++ b/drivers/net/atlantic/atl_ethdev.c\n@@ -284,6 +284,8 @@ static const struct eth_dev_ops atl_eth_dev_ops = {\n \t.xstats_get_names     = atl_dev_xstats_get_names,\n \t.stats_reset\t      = atl_dev_stats_reset,\n \t.xstats_reset\t      = atl_dev_xstats_reset,\n+\t.queue_stats_mapping_set = atl_dev_queue_stats_mapping_set,\n+\n \t.fw_version_get       = atl_fw_version_get,\n \t.dev_infos_get\t      = atl_dev_info_get,\n \t.dev_supported_ptypes_get = atl_dev_supported_ptypes_get,\n@@ -296,6 +298,20 @@ static const struct eth_dev_ops atl_eth_dev_ops = {\n \t.vlan_tpid_set        = atl_vlan_tpid_set,\n \t.vlan_strip_queue_set = atl_vlan_strip_queue_set,\n \n+\t/* Queue Control */\n+\t.rx_queue_start\t      = atl_rx_queue_start,\n+\t.rx_queue_stop\t      = atl_rx_queue_stop,\n+\t.rx_queue_setup       = atl_rx_queue_setup,\n+\t.rx_queue_release     = atl_rx_queue_release,\n+\n+\t.rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,\n+\t.rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,\n+\n+\t.rx_queue_count       = atl_rx_queue_count,\n+\t.rx_descriptor_done   = atl_dev_rx_descriptor_done,\n+\t.rx_descriptor_status = atl_dev_rx_descriptor_status,\n+\t.tx_descriptor_status = atl_dev_tx_descriptor_status,\n+\n \t/* LEDs */\n \t.dev_led_on           = atl_dev_led_on,\n \t.dev_led_off          = atl_dev_led_off,\n@@ -314,6 +330,7 @@ static const struct eth_dev_ops atl_eth_dev_ops = {\n \t.mac_addr_remove      = atl_remove_mac_addr,\n \t.mac_addr_set\t      = atl_set_default_mac_addr,\n \t.set_mc_addr_list     = atl_dev_set_mc_addr_list,\n+\t.rxq_info_get\t      = atl_rxq_info_get,\n \t.reta_update          = atl_reta_update,\n \t.reta_query           = atl_reta_query,\n \t.rss_hash_update      = atl_rss_hash_update,\n@@ -631,6 +648,19 @@ atl_dev_start(struct rte_eth_dev *dev)\n \t\t}\n \t}\n \n+\t/* This can fail when allocating mbufs for descriptor rings */\n+\terr = atl_rx_init(dev);\n+\tif (err) {\n+\t\tPMD_INIT_LOG(ERR, \"Unable to initialize RX hardware\");\n+\t\tgoto error;\n+\t}\n+\n+\terr = atl_start_queues(dev);\n+\tif (err < 0) {\n+\t\tPMD_INIT_LOG(ERR, \"Unable to start rxtx queues\");\n+\t\tgoto error;\n+\t}\n+\n \terr = hw->aq_fw_ops->update_link_status(hw);\n \n \tif (err)\n@@ -716,6 +746,9 @@ atl_dev_stop(struct rte_eth_dev *dev)\n \t/* reset the NIC */\n \tatl_reset_hw(hw);\n \thw->adapter_stopped = 0;\n+\n+\tatl_stop_queues(dev);\n+\n \t/* Clear stored conf */\n \tdev->data->scattered_rx = 0;\n \tdev->data->lro = 0;\n@@ -775,6 +808,8 @@ atl_dev_close(struct rte_eth_dev *dev)\n \n \tatl_dev_stop(dev);\n \thw->adapter_stopped = 1;\n+\n+\tatl_free_queues(dev);\n }\n \n static int\n@@ -879,6 +914,15 @@ atl_dev_xstats_reset(struct rte_eth_dev *dev __rte_unused)\n {\n }\n \n+static int\n+atl_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev __rte_unused,\n+\t\t\t\t\t     uint16_t queue_id __rte_unused,\n+\t\t\t\t\t     uint8_t stat_idx __rte_unused,\n+\t\t\t\t\t     uint8_t is_rx __rte_unused)\n+{\n+\t/* The mapping is hardcoded: queue 0 -> stat 0, etc */\n+\treturn 0;\n+}\n \n static int\n atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)\ndiff --git a/drivers/net/atlantic/atl_ethdev.h b/drivers/net/atlantic/atl_ethdev.h\nindex 3ebef1f43..614757b7a 100644\n--- a/drivers/net/atlantic/atl_ethdev.h\n+++ b/drivers/net/atlantic/atl_ethdev.h\n@@ -13,6 +13,17 @@\n #include \"atl_types.h\"\n #include \"hw_atl/hw_atl_utils.h\"\n \n+#define ATL_RSS_OFFLOAD_ALL ( \\\n+\tETH_RSS_IPV4 | \\\n+\tETH_RSS_NONFRAG_IPV4_TCP | \\\n+\tETH_RSS_NONFRAG_IPV4_UDP | \\\n+\tETH_RSS_IPV6 | \\\n+\tETH_RSS_NONFRAG_IPV6_TCP | \\\n+\tETH_RSS_NONFRAG_IPV6_UDP | \\\n+\tETH_RSS_IPV6_EX | \\\n+\tETH_RSS_IPV6_TCP_EX | \\\n+\tETH_RSS_IPV6_UDP_EX)\n+\n #define ATL_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)\n #define ATL_FLAG_NEED_LINK_CONFIG (uint32_t)(4 << 0)\n \n@@ -46,6 +57,77 @@ struct atl_adapter {\n #define ATL_DEV_PRIVATE_TO_CFG(adapter) \\\n \t(&((struct atl_adapter *)adapter)->hw_cfg)\n \n+extern const struct rte_flow_ops atl_flow_ops;\n+\n+/*\n+ * RX/TX function prototypes\n+ */\n+void atl_rx_queue_release(void *rxq);\n+void atl_tx_queue_release(void *txq);\n+\n+int atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n+\t\tuint16_t nb_rx_desc, unsigned int socket_id,\n+\t\tconst struct rte_eth_rxconf *rx_conf,\n+\t\tstruct rte_mempool *mb_pool);\n+\n+int atl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,\n+\t\tuint16_t nb_tx_desc, unsigned int socket_id,\n+\t\tconst struct rte_eth_txconf *tx_conf);\n+\n+uint32_t atl_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n+\n+int atl_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);\n+int atl_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);\n+int atl_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);\n+\n+int atl_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,\n+\t\t\t\t uint16_t queue_id);\n+int atl_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,\n+\t\t\t\t  uint16_t queue_id);\n+\n+int atl_rx_init(struct rte_eth_dev *dev);\n+int atl_tx_init(struct rte_eth_dev *dev);\n+\n+int atl_start_queues(struct rte_eth_dev *dev);\n+int atl_stop_queues(struct rte_eth_dev *dev);\n+void atl_free_queues(struct rte_eth_dev *dev);\n+\n+int atl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n+int atl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n+\n+int atl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n+int atl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n+\n+void atl_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\tstruct rte_eth_rxq_info *qinfo);\n+\n+void atl_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\tstruct rte_eth_txq_info *qinfo);\n+\n+uint16_t atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\tuint16_t nb_pkts);\n+\n+uint16_t atl_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\t\t\t    uint16_t nb_pkts);\n+\n+uint16_t atl_recv_pkts_lro_single_alloc(void *rx_queue,\n+\t\tstruct rte_mbuf **rx_pkts, uint16_t nb_pkts);\n+uint16_t atl_recv_pkts_lro_bulk_alloc(void *rx_queue,\n+\t\tstruct rte_mbuf **rx_pkts, uint16_t nb_pkts);\n+\n+uint16_t atl_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\tuint16_t nb_pkts);\n+\n+uint16_t atl_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\tuint16_t nb_pkts);\n+\n+uint16_t atl_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\tuint16_t nb_pkts);\n+\n int\n atl_dev_led_control(struct rte_eth_dev *dev, int control);\n+\n+bool\n+is_atl_supported(struct rte_eth_dev *dev);\n+\n #endif /* _ATLANTIC_ETHDEV_H_ */\ndiff --git a/drivers/net/atlantic/atl_rxtx.c b/drivers/net/atlantic/atl_rxtx.c\nnew file mode 100644\nindex 000000000..77009e095\n--- /dev/null\n+++ b/drivers/net/atlantic/atl_rxtx.c\n@@ -0,0 +1,787 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2018 Aquantia Corporation\n+ */\n+\n+#include <sys/queue.h>\n+\n+#include <stdio.h>\n+#include <stdlib.h>\n+#include <string.h>\n+#include <errno.h>\n+#include <stdint.h>\n+#include <stdarg.h>\n+#include <inttypes.h>\n+\n+#include <rte_interrupts.h>\n+#include <rte_byteorder.h>\n+#include <rte_common.h>\n+#include <rte_log.h>\n+#include <rte_debug.h>\n+#include <rte_pci.h>\n+#include <rte_memory.h>\n+#include <rte_memcpy.h>\n+#include <rte_memzone.h>\n+#include <rte_launch.h>\n+#include <rte_eal.h>\n+#include <rte_per_lcore.h>\n+#include <rte_lcore.h>\n+#include <rte_atomic.h>\n+#include <rte_branch_prediction.h>\n+#include <rte_mempool.h>\n+#include <rte_malloc.h>\n+#include <rte_mbuf.h>\n+#include <rte_ether.h>\n+#include <rte_ethdev_driver.h>\n+#include <rte_prefetch.h>\n+#include <rte_udp.h>\n+#include <rte_tcp.h>\n+#include <rte_sctp.h>\n+#include <rte_net.h>\n+#include <rte_string_fns.h>\n+#include \"atl_ethdev.h\"\n+#include \"atl_hw_regs.h\"\n+\n+#include \"atl_logs.h\"\n+#include \"hw_atl/hw_atl_llh.h\"\n+#include \"hw_atl/hw_atl_b0.h\"\n+#include \"hw_atl/hw_atl_b0_internal.h\"\n+\n+/**\n+ * Structure associated with each descriptor of the RX ring of a RX queue.\n+ */\n+struct atl_rx_entry {\n+\tstruct rte_mbuf *mbuf;\n+};\n+\n+/**\n+ * Structure associated with each RX queue.\n+ */\n+struct atl_rx_queue {\n+\tstruct rte_mempool\t*mb_pool;\n+\tstruct hw_atl_rxd_s\t*hw_ring;\n+\tuint64_t\t\thw_ring_phys_addr;\n+\tstruct atl_rx_entry\t*sw_ring;\n+\tuint16_t\t\tnb_rx_desc;\n+\tuint16_t\t\trx_tail;\n+\tuint16_t\t\tnb_rx_hold;\n+\tuint16_t\t\trx_free_thresh;\n+\tuint16_t\t\tqueue_id;\n+\tuint16_t\t\tport_id;\n+\tuint16_t\t\tbuff_size;\n+\tbool\t\t\tl3_csum_enabled;\n+\tbool\t\t\tl4_csum_enabled;\n+};\n+\n+static inline void\n+atl_reset_rx_queue(struct atl_rx_queue *rxq)\n+{\n+\tstruct hw_atl_rxd_s *rxd = NULL;\n+\tint i;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\trxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[i];\n+\t\trxd->buf_addr = 0;\n+\t\trxd->hdr_addr = 0;\n+\t}\n+\n+\trxq->rx_tail = 0;\n+}\n+\n+int\n+atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n+\t\t   uint16_t nb_rx_desc, unsigned int socket_id,\n+\t\t   const struct rte_eth_rxconf *rx_conf,\n+\t\t   struct rte_mempool *mb_pool)\n+{\n+\tstruct atl_rx_queue *rxq;\n+\tconst struct rte_memzone *mz;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\t/* make sure a valid number of descriptors have been requested */\n+\tif (nb_rx_desc < AQ_HW_MIN_RX_RING_SIZE ||\n+\t\t\tnb_rx_desc > AQ_HW_MAX_RX_RING_SIZE) {\n+\t\tPMD_INIT_LOG(ERR, \"Number of Rx descriptors must be \"\n+\t\t\"less than or equal to %d, \"\n+\t\t\"greater than or equal to %d\", AQ_HW_MAX_RX_RING_SIZE,\n+\t\tAQ_HW_MIN_RX_RING_SIZE);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/*\n+\t * if this queue existed already, free the associated memory. The\n+\t * queue cannot be reused in case we need to allocate memory on\n+\t * different socket than was previously used.\n+\t */\n+\tif (dev->data->rx_queues[rx_queue_id] != NULL) {\n+\t\tatl_rx_queue_release(dev->data->rx_queues[rx_queue_id]);\n+\t\tdev->data->rx_queues[rx_queue_id] = NULL;\n+\t}\n+\n+\t/* allocate memory for the queue structure */\n+\trxq = rte_zmalloc_socket(\"atlantic Rx queue\", sizeof(*rxq),\n+\t\t\t\t RTE_CACHE_LINE_SIZE, socket_id);\n+\tif (rxq == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Cannot allocate queue structure\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* setup queue */\n+\trxq->mb_pool = mb_pool;\n+\trxq->nb_rx_desc = nb_rx_desc;\n+\trxq->port_id = dev->data->port_id;\n+\trxq->queue_id = rx_queue_id;\n+\trxq->rx_free_thresh = rx_conf->rx_free_thresh;\n+\n+\trxq->l3_csum_enabled = dev->data->dev_conf.rxmode.offloads &\n+\t\tDEV_RX_OFFLOAD_IPV4_CKSUM;\n+\trxq->l4_csum_enabled = dev->data->dev_conf.rxmode.offloads &\n+\t\t(DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM);\n+\n+\t/* allocate memory for the software ring */\n+\trxq->sw_ring = rte_zmalloc_socket(\"atlantic sw rx ring\",\n+\t\t\t\tnb_rx_desc * sizeof(struct atl_rx_entry),\n+\t\t\t\tRTE_CACHE_LINE_SIZE, socket_id);\n+\tif (rxq->sw_ring == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Cannot allocate software ring\");\n+\t\trte_free(rxq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/*\n+\t * allocate memory for the hardware descriptor ring. A memzone large\n+\t * enough to hold the maximum ring size is requested to allow for\n+\t * resizing in later calls to the queue setup function.\n+\t */\n+\tmz = rte_eth_dma_zone_reserve(dev, \"rx hw_ring\", rx_queue_id,\n+\t\t\t\t      HW_ATL_B0_MAX_RXD *\n+\t\t\t\t\tsizeof(struct hw_atl_rxd_s),\n+\t\t\t\t      128, socket_id);\n+\tif (mz == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Cannot allocate hardware ring\");\n+\t\trte_free(rxq->sw_ring);\n+\t\trte_free(rxq);\n+\t\treturn -ENOMEM;\n+\t}\n+\trxq->hw_ring = mz->addr;\n+\trxq->hw_ring_phys_addr = mz->iova;\n+\n+\tatl_reset_rx_queue(rxq);\n+\n+\tdev->data->rx_queues[rx_queue_id] = rxq;\n+\treturn 0;\n+}\n+\n+int\n+atl_rx_init(struct rte_eth_dev *eth_dev)\n+{\n+\tstruct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);\n+\tstruct aq_rss_parameters *rss_params = &hw->aq_nic_cfg->aq_rss;\n+\tstruct atl_rx_queue *rxq;\n+\tuint64_t base_addr = 0;\n+\tint i = 0;\n+\tint err = 0;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tfor (i = 0; i < eth_dev->data->nb_rx_queues; i++) {\n+\t\trxq = eth_dev->data->rx_queues[i];\n+\t\tbase_addr = rxq->hw_ring_phys_addr;\n+\n+\t\t/* Take requested pool mbuf size and adapt\n+\t\t * descriptor buffer to best fit\n+\t\t */\n+\t\tint buff_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -\n+\t\t\t\tRTE_PKTMBUF_HEADROOM;\n+\n+\t\tbuff_size = RTE_ALIGN_FLOOR(buff_size, 1024);\n+\t\tif (buff_size > HW_ATL_B0_RXD_BUF_SIZE_MAX) {\n+\t\t\tPMD_INIT_LOG(WARNING,\n+\t\t\t\t\"queue %d: mem pool buff size is too big\\n\",\n+\t\t\t\trxq->queue_id);\n+\t\t\tbuff_size = HW_ATL_B0_RXD_BUF_SIZE_MAX;\n+\t\t}\n+\t\tif (buff_size < 1024) {\n+\t\t\tPMD_INIT_LOG(ERR,\n+\t\t\t\t\"queue %d: mem pool buff size is too small\\n\",\n+\t\t\t\trxq->queue_id);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t\trxq->buff_size = buff_size;\n+\n+\t\terr = hw_atl_b0_hw_ring_rx_init(hw, base_addr, rxq->queue_id,\n+\t\t\t\t\t\trxq->nb_rx_desc, buff_size, 0,\n+\t\t\t\t\t\trxq->port_id);\n+\t}\n+\n+\tfor (i = rss_params->indirection_table_size; i--;)\n+\t\trss_params->indirection_table[i] = i &\n+\t\t\t(eth_dev->data->nb_rx_queues - 1);\n+\thw_atl_b0_hw_rss_set(hw, rss_params);\n+\treturn err;\n+}\n+\n+static int\n+atl_alloc_rx_queue_mbufs(struct atl_rx_queue *rxq)\n+{\n+\tstruct atl_rx_entry *rx_entry = rxq->sw_ring;\n+\tstruct hw_atl_rxd_s *rxd;\n+\tuint64_t dma_addr = 0;\n+\tuint32_t i = 0;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\t/* fill Rx ring */\n+\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\tstruct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);\n+\n+\t\tif (mbuf == NULL) {\n+\t\t\tPMD_INIT_LOG(ERR, \"mbuf alloca failed for rx queue %u\",\n+\t\t\t\t     (unsigned int)rxq->queue_id);\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\tmbuf->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\tmbuf->port = rxq->port_id;\n+\n+\t\tdma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));\n+\t\trxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[i];\n+\t\trxd->buf_addr = dma_addr;\n+\t\trxd->hdr_addr = 0;\n+\t\trx_entry[i].mbuf = mbuf;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void\n+atl_rx_queue_release_mbufs(struct atl_rx_queue *rxq)\n+{\n+\tint i;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (rxq->sw_ring != NULL) {\n+\t\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\t\tif (rxq->sw_ring[i].mbuf != NULL) {\n+\t\t\t\trte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);\n+\t\t\t\trxq->sw_ring[i].mbuf = NULL;\n+\t\t\t}\n+\t\t}\n+\t}\n+}\n+\n+int\n+atl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n+{\n+\tstruct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct atl_rx_queue *rxq = NULL;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (rx_queue_id < dev->data->nb_rx_queues) {\n+\t\trxq = dev->data->rx_queues[rx_queue_id];\n+\n+\t\tif (atl_alloc_rx_queue_mbufs(rxq) != 0) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Allocate mbufs for queue %d failed\",\n+\t\t\t\t     rx_queue_id);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\thw_atl_b0_hw_ring_rx_start(hw, rx_queue_id);\n+\n+\t\trte_wmb();\n+\t\thw_atl_reg_rx_dma_desc_tail_ptr_set(hw, rxq->nb_rx_desc - 1,\n+\t\t\t\t\t\t    rx_queue_id);\n+\t\tdev->data->rx_queue_state[rx_queue_id] =\n+\t\t\tRTE_ETH_QUEUE_STATE_STARTED;\n+\t} else {\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int\n+atl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n+{\n+\tstruct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct atl_rx_queue *rxq = NULL;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (rx_queue_id < dev->data->nb_rx_queues) {\n+\t\trxq = dev->data->rx_queues[rx_queue_id];\n+\n+\t\thw_atl_b0_hw_ring_rx_stop(hw, rx_queue_id);\n+\n+\t\tatl_rx_queue_release_mbufs(rxq);\n+\t\tatl_reset_rx_queue(rxq);\n+\n+\t\tdev->data->rx_queue_state[rx_queue_id] =\n+\t\t\tRTE_ETH_QUEUE_STATE_STOPPED;\n+\t} else {\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+void\n+atl_rx_queue_release(void *rx_queue)\n+{\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (rx_queue != NULL) {\n+\t\tstruct atl_rx_queue *rxq = (struct atl_rx_queue *)rx_queue;\n+\n+\t\tatl_rx_queue_release_mbufs(rxq);\n+\t\trte_free(rxq->sw_ring);\n+\t\trte_free(rxq);\n+\t}\n+}\n+\n+void\n+atl_free_queues(struct rte_eth_dev *dev)\n+{\n+\tunsigned int i;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\tatl_rx_queue_release(dev->data->rx_queues[i]);\n+\t\tdev->data->rx_queues[i] = 0;\n+\t}\n+\tdev->data->nb_rx_queues = 0;\n+}\n+\n+int\n+atl_start_queues(struct rte_eth_dev *dev)\n+{\n+\tint i;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\tif (atl_rx_queue_start(dev, i) != 0) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Start Rx queue %d failed\", i);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int\n+atl_stop_queues(struct rte_eth_dev *dev)\n+{\n+\tint i;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\tif (atl_rx_queue_stop(dev, i) != 0) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Stop Rx queue %d failed\", i);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+void\n+atl_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\t\t struct rte_eth_rxq_info *qinfo)\n+{\n+\tstruct atl_rx_queue *rxq;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\trxq = dev->data->rx_queues[queue_id];\n+\n+\tqinfo->mp = rxq->mb_pool;\n+\tqinfo->scattered_rx = dev->data->scattered_rx;\n+\tqinfo->nb_desc = rxq->nb_rx_desc;\n+}\n+\n+/* Return Rx queue avail count */\n+\n+uint32_t\n+atl_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n+{\n+\tstruct atl_rx_queue *rxq;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (rx_queue_id >= dev->data->nb_rx_queues) {\n+\t\tPMD_DRV_LOG(ERR, \"Invalid RX queue id=%d\", rx_queue_id);\n+\t\treturn 0;\n+\t}\n+\n+\trxq = dev->data->rx_queues[rx_queue_id];\n+\n+\tif (rxq == NULL)\n+\t\treturn 0;\n+\n+\treturn rxq->nb_rx_desc - rxq->nb_rx_hold;\n+}\n+\n+int\n+atl_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)\n+{\n+\tstruct atl_rx_queue *rxq = rx_queue;\n+\tstruct hw_atl_rxd_wb_s *rxd;\n+\tuint32_t idx;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (unlikely(offset >= rxq->nb_rx_desc))\n+\t\treturn 0;\n+\n+\tidx = rxq->rx_tail + offset;\n+\n+\tif (idx >= rxq->nb_rx_desc)\n+\t\tidx -= rxq->nb_rx_desc;\n+\n+\trxd = (struct hw_atl_rxd_wb_s *)&rxq->hw_ring[idx];\n+\n+\treturn rxd->dd;\n+}\n+\n+int\n+atl_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)\n+{\n+\tstruct atl_rx_queue *rxq = rx_queue;\n+\tstruct hw_atl_rxd_wb_s *rxd;\n+\tuint32_t idx;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (unlikely(offset >= rxq->nb_rx_desc))\n+\t\treturn -EINVAL;\n+\n+\tif (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)\n+\t\treturn RTE_ETH_RX_DESC_UNAVAIL;\n+\n+\tidx = rxq->rx_tail + offset;\n+\n+\tif (idx >= rxq->nb_rx_desc)\n+\t\tidx -= rxq->nb_rx_desc;\n+\n+\trxd = (struct hw_atl_rxd_wb_s *)&rxq->hw_ring[idx];\n+\n+\tif (rxd->dd)\n+\t\treturn RTE_ETH_RX_DESC_DONE;\n+\n+\treturn RTE_ETH_RX_DESC_AVAIL;\n+}\n+\n+static int\n+atl_rx_enable_intr(struct rte_eth_dev *dev, uint16_t queue_id, bool enable)\n+{\n+\tstruct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct atl_rx_queue *rxq;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (queue_id >= dev->data->nb_rx_queues) {\n+\t\tPMD_DRV_LOG(ERR, \"Invalid RX queue id=%d\", queue_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\trxq = dev->data->rx_queues[queue_id];\n+\n+\tif (rxq == NULL)\n+\t\treturn 0;\n+\n+\t/* Mapping interrupt vector */\n+\thw_atl_itr_irq_map_en_rx_set(hw, enable, queue_id);\n+\n+\treturn 0;\n+}\n+\n+int\n+atl_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, uint16_t queue_id)\n+{\n+\treturn atl_rx_enable_intr(eth_dev, queue_id, true);\n+}\n+\n+int\n+atl_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, uint16_t queue_id)\n+{\n+\treturn atl_rx_enable_intr(eth_dev, queue_id, false);\n+}\n+\n+static uint64_t\n+atl_desc_to_offload_flags(struct atl_rx_queue *rxq,\n+\t\t\t  struct hw_atl_rxd_wb_s *rxd_wb)\n+{\n+\tuint64_t mbuf_flags = 0;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\t/* IPv4 ? */\n+\tif (rxq->l3_csum_enabled && ((rxd_wb->pkt_type & 0x3) == 0)) {\n+\t\t/* IPv4 csum error ? */\n+\t\tif (rxd_wb->rx_stat & BIT(1))\n+\t\t\tmbuf_flags |= PKT_RX_IP_CKSUM_BAD;\n+\t\telse\n+\t\t\tmbuf_flags |= PKT_RX_IP_CKSUM_GOOD;\n+\t} else {\n+\t\tmbuf_flags |= PKT_RX_IP_CKSUM_UNKNOWN;\n+\t}\n+\n+\t/* CSUM calculated ? */\n+\tif (rxq->l4_csum_enabled && (rxd_wb->rx_stat & BIT(3))) {\n+\t\tif (rxd_wb->rx_stat & BIT(2))\n+\t\t\tmbuf_flags |= PKT_RX_L4_CKSUM_BAD;\n+\t\telse\n+\t\t\tmbuf_flags |= PKT_RX_L4_CKSUM_GOOD;\n+\t} else {\n+\t\tmbuf_flags |= PKT_RX_L4_CKSUM_UNKNOWN;\n+\t}\n+\n+\treturn mbuf_flags;\n+}\n+\n+static uint32_t\n+atl_desc_to_pkt_type(struct hw_atl_rxd_wb_s *rxd_wb)\n+{\n+\tuint32_t type = RTE_PTYPE_UNKNOWN;\n+\tuint16_t l2_l3_type = rxd_wb->pkt_type & 0x3;\n+\tuint16_t l4_type = (rxd_wb->pkt_type & 0x1C) >> 2;\n+\n+\tswitch (l2_l3_type) {\n+\tcase 0:\n+\t\ttype = RTE_PTYPE_L3_IPV4;\n+\t\tbreak;\n+\tcase 1:\n+\t\ttype = RTE_PTYPE_L3_IPV6;\n+\t\tbreak;\n+\tcase 2:\n+\t\ttype = RTE_PTYPE_L2_ETHER;\n+\t\tbreak;\n+\tcase 3:\n+\t\ttype = RTE_PTYPE_L2_ETHER_ARP;\n+\t\tbreak;\n+\t}\n+\n+\tswitch (l4_type) {\n+\tcase 0:\n+\t\ttype |= RTE_PTYPE_L4_TCP;\n+\t\tbreak;\n+\tcase 1:\n+\t\ttype |= RTE_PTYPE_L4_UDP;\n+\t\tbreak;\n+\tcase 2:\n+\t\ttype |= RTE_PTYPE_L4_SCTP;\n+\t\tbreak;\n+\tcase 3:\n+\t\ttype |= RTE_PTYPE_L4_ICMP;\n+\t\tbreak;\n+\t}\n+\n+\tif (rxd_wb->pkt_type & BIT(5))\n+\t\ttype |= RTE_PTYPE_L2_ETHER_VLAN;\n+\n+\treturn type;\n+}\n+\n+uint16_t\n+atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n+{\n+\tstruct atl_rx_queue *rxq = (struct atl_rx_queue *)rx_queue;\n+\tstruct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];\n+\tstruct atl_adapter *adapter =\n+\t\tATL_DEV_TO_ADAPTER(&rte_eth_devices[rxq->port_id]);\n+\tstruct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(adapter);\n+\tstruct atl_rx_entry *sw_ring = rxq->sw_ring;\n+\n+\tstruct rte_mbuf *new_mbuf;\n+\tstruct rte_mbuf *rx_mbuf, *rx_mbuf_prev, *rx_mbuf_first;\n+\tstruct atl_rx_entry *rx_entry;\n+\tuint16_t nb_rx = 0;\n+\tuint16_t nb_hold = 0;\n+\tstruct hw_atl_rxd_wb_s rxd_wb;\n+\tstruct hw_atl_rxd_s *rxd = NULL;\n+\tuint16_t tail = rxq->rx_tail;\n+\tuint64_t dma_addr;\n+\tuint16_t pkt_len = 0;\n+\n+\twhile (nb_rx < nb_pkts) {\n+\t\tuint16_t eop_tail = tail;\n+\n+\t\trxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[tail];\n+\t\trxd_wb = *(struct hw_atl_rxd_wb_s *)rxd;\n+\n+\t\tif (!rxd_wb.dd) { /* RxD is not done */\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tPMD_RX_LOG(ERR, \"port_id=%u queue_id=%u tail=%u \"\n+\t\t\t   \"eop=0x%x pkt_len=%u hash=0x%x hash_type=0x%x\",\n+\t\t\t   (unsigned int)rxq->port_id,\n+\t\t\t   (unsigned int)rxq->queue_id,\n+\t\t\t   (unsigned int)tail, (unsigned int)rxd_wb.eop,\n+\t\t\t   (unsigned int)rte_le_to_cpu_16(rxd_wb.pkt_len),\n+\t\t\trxd_wb.rss_hash, rxd_wb.rss_type);\n+\n+\t\t/* RxD is not done */\n+\t\tif (!rxd_wb.eop) {\n+\t\t\twhile (true) {\n+\t\t\t\tstruct hw_atl_rxd_wb_s *eop_rxwbd;\n+\n+\t\t\t\teop_tail = (eop_tail + 1) % rxq->nb_rx_desc;\n+\t\t\t\teop_rxwbd = (struct hw_atl_rxd_wb_s *)\n+\t\t\t\t\t&rxq->hw_ring[eop_tail];\n+\t\t\t\tif (!eop_rxwbd->dd) {\n+\t\t\t\t\t/* no EOP received yet */\n+\t\t\t\t\teop_tail = tail;\n+\t\t\t\t\tbreak;\n+\t\t\t\t}\n+\t\t\t\tif (eop_rxwbd->dd && eop_rxwbd->eop)\n+\t\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\t/* No EOP in ring */\n+\t\t\tif (eop_tail == tail)\n+\t\t\t\tbreak;\n+\t\t}\n+\t\trx_mbuf_prev = NULL;\n+\t\trx_mbuf_first = NULL;\n+\n+\t\t/* Run through packet segments */\n+\t\twhile (true) {\n+\t\t\tnew_mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);\n+\t\t\tif (new_mbuf == NULL) {\n+\t\t\t\tPMD_RX_LOG(ERR,\n+\t\t\t\t   \"RX mbuf alloc failed port_id=%u \"\n+\t\t\t\t   \"queue_id=%u\", (unsigned int)rxq->port_id,\n+\t\t\t\t   (unsigned int)rxq->queue_id);\n+\t\t\t\tdev->data->rx_mbuf_alloc_failed++;\n+\t\t\t\tadapter->sw_stats.rx_nombuf++;\n+\t\t\t\tgoto err_stop;\n+\t\t\t}\n+\n+\t\t\tnb_hold++;\n+\t\t\trx_entry = &sw_ring[tail];\n+\n+\t\t\trx_mbuf = rx_entry->mbuf;\n+\t\t\trx_entry->mbuf = new_mbuf;\n+\t\t\tdma_addr = rte_cpu_to_le_64(\n+\t\t\t\trte_mbuf_data_iova_default(new_mbuf));\n+\n+\t\t\t/* setup RX descriptor */\n+\t\t\trxd->hdr_addr = 0;\n+\t\t\trxd->buf_addr = dma_addr;\n+\n+\t\t\t/*\n+\t\t\t * Initialize the returned mbuf.\n+\t\t\t * 1) setup generic mbuf fields:\n+\t\t\t *\t  - number of segments,\n+\t\t\t *\t  - next segment,\n+\t\t\t *\t  - packet length,\n+\t\t\t *\t  - RX port identifier.\n+\t\t\t * 2) integrate hardware offload data, if any:\n+\t\t\t *\t<  - RSS flag & hash,\n+\t\t\t *\t  - IP checksum flag,\n+\t\t\t *\t  - VLAN TCI, if any,\n+\t\t\t *\t  - error flags.\n+\t\t\t */\n+\t\t\tpkt_len = (uint16_t)rte_le_to_cpu_16(rxd_wb.pkt_len);\n+\t\t\trx_mbuf->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\t\trte_prefetch1((char *)rx_mbuf->buf_addr +\n+\t\t\t\trx_mbuf->data_off);\n+\t\t\trx_mbuf->nb_segs = 0;\n+\t\t\trx_mbuf->next = NULL;\n+\t\t\trx_mbuf->pkt_len = pkt_len;\n+\t\t\trx_mbuf->data_len = pkt_len;\n+\t\t\tif (rxd_wb.eop) {\n+\t\t\t\tu16 remainder_len = pkt_len % rxq->buff_size;\n+\t\t\t\tif (!remainder_len)\n+\t\t\t\t\tremainder_len = rxq->buff_size;\n+\t\t\t\trx_mbuf->data_len = remainder_len;\n+\t\t\t} else {\n+\t\t\t\trx_mbuf->data_len = pkt_len > rxq->buff_size ?\n+\t\t\t\t\t\trxq->buff_size : pkt_len;\n+\t\t\t}\n+\t\t\trx_mbuf->port = rxq->port_id;\n+\n+\t\t\trx_mbuf->hash.rss = rxd_wb.rss_hash;\n+\n+\t\t\trx_mbuf->vlan_tci = rxd_wb.vlan;\n+\n+\t\t\trx_mbuf->ol_flags =\n+\t\t\t\tatl_desc_to_offload_flags(rxq, &rxd_wb);\n+\t\t\trx_mbuf->packet_type = atl_desc_to_pkt_type(&rxd_wb);\n+\n+\t\t\tif (!rx_mbuf_first)\n+\t\t\t\trx_mbuf_first = rx_mbuf;\n+\t\t\trx_mbuf_first->nb_segs++;\n+\n+\t\t\tif (rx_mbuf_prev)\n+\t\t\t\trx_mbuf_prev->next = rx_mbuf;\n+\t\t\trx_mbuf_prev = rx_mbuf;\n+\n+\t\t\ttail = (tail + 1) % rxq->nb_rx_desc;\n+\t\t\t/* Prefetch next mbufs */\n+\t\t\trte_prefetch0(sw_ring[tail].mbuf);\n+\t\t\tif ((tail & 0x3) == 0) {\n+\t\t\t\trte_prefetch0(&sw_ring[tail]);\n+\t\t\t\trte_prefetch0(&sw_ring[tail]);\n+\t\t\t}\n+\n+\t\t\t/* filled mbuf_first */\n+\t\t\tif (rxd_wb.eop)\n+\t\t\t\tbreak;\n+\t\t\trxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[tail];\n+\t\t\trxd_wb = *(struct hw_atl_rxd_wb_s *)rxd;\n+\t\t};\n+\n+\t\t/*\n+\t\t * Store the mbuf address into the next entry of the array\n+\t\t * of returned packets.\n+\t\t */\n+\t\trx_pkts[nb_rx++] = rx_mbuf_first;\n+\t\tadapter->sw_stats.q_ipackets[rxq->queue_id]++;\n+\t\tadapter->sw_stats.q_ibytes[rxq->queue_id] +=\n+\t\t\trx_mbuf_first->pkt_len;\n+\n+\t\tPMD_RX_LOG(ERR, \"add mbuf segs=%d pkt_len=%d\",\n+\t\t\trx_mbuf_first->nb_segs,\n+\t\t\trx_mbuf_first->pkt_len);\n+\t}\n+\n+err_stop:\n+\n+\trxq->rx_tail = tail;\n+\n+\t/*\n+\t * If the number of free RX descriptors is greater than the RX free\n+\t * threshold of the queue, advance the Receive Descriptor Tail (RDT)\n+\t * register.\n+\t * Update the RDT with the value of the last processed RX descriptor\n+\t * minus 1, to guarantee that the RDT register is never equal to the\n+\t * RDH register, which creates a \"full\" ring situtation from the\n+\t * hardware point of view...\n+\t */\n+\tnb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);\n+\tif (nb_hold > rxq->rx_free_thresh) {\n+\t\tPMD_RX_LOG(ERR, \"port_id=%u queue_id=%u rx_tail=%u \"\n+\t\t\t\"nb_hold=%u nb_rx=%u\",\n+\t\t\t(unsigned int)rxq->port_id, (unsigned int)rxq->queue_id,\n+\t\t\t(unsigned int)tail, (unsigned int)nb_hold,\n+\t\t\t(unsigned int)nb_rx);\n+\t\ttail = (uint16_t)((tail == 0) ?\n+\t\t\t(rxq->nb_rx_desc - 1) : (tail - 1));\n+\n+\t\thw_atl_reg_rx_dma_desc_tail_ptr_set(hw, tail, rxq->queue_id);\n+\n+\t\tnb_hold = 0;\n+\t}\n+\n+\trxq->nb_rx_hold = nb_hold;\n+\n+\treturn nb_rx;\n+}\ndiff --git a/drivers/net/atlantic/meson.build b/drivers/net/atlantic/meson.build\nindex 19fa41cd3..42821f35a 100644\n--- a/drivers/net/atlantic/meson.build\n+++ b/drivers/net/atlantic/meson.build\n@@ -4,8 +4,14 @@\n #subdir('hw_atl')\n \n sources = files(\n+\t'atl_rxtx.c',\n \t'atl_ethdev.c',\n+\t'atl_hw_regs.c',\n \t'rte_pmd_atlantic.c',\n+\t'hw_atl/hw_atl_b0.c',\n+\t'hw_atl/hw_atl_llh.c',\n+\t'hw_atl/hw_atl_utils_fw2x.c',\n+\t'hw_atl/hw_atl_utils.c',\n )\n \n deps += ['hash', 'eal']\n",
    "prefixes": [
        "v2",
        "20/21"
    ]
}