get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/46574/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 46574,
    "url": "https://patches.dpdk.org/api/patches/46574/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/2d9f33ed6591ae36e9bc9136d3e9945b188e5ab9.1539249721.git.igor.russkikh@aquantia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<2d9f33ed6591ae36e9bc9136d3e9945b188e5ab9.1539249721.git.igor.russkikh@aquantia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/2d9f33ed6591ae36e9bc9136d3e9945b188e5ab9.1539249721.git.igor.russkikh@aquantia.com",
    "date": "2018-10-11T10:35:16",
    "name": "[v5,10/23] net/atlantic: receive side structures and implementation",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "25c2bcca22a2f3f70236c9ef5f37e347d49ab63f",
    "submitter": {
        "id": 1124,
        "url": "https://patches.dpdk.org/api/people/1124/?format=api",
        "name": "Igor Russkikh",
        "email": "igor.russkikh@aquantia.com"
    },
    "delegate": {
        "id": 319,
        "url": "https://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/2d9f33ed6591ae36e9bc9136d3e9945b188e5ab9.1539249721.git.igor.russkikh@aquantia.com/mbox/",
    "series": [
        {
            "id": 1827,
            "url": "https://patches.dpdk.org/api/series/1827/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=1827",
            "date": "2018-10-11T10:34:48",
            "name": "net/atlantic: Aquantia aQtion 10G NIC Family DPDK PMD driver",
            "version": 5,
            "mbox": "https://patches.dpdk.org/series/1827/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/46574/comments/",
    "check": "pending",
    "checks": "https://patches.dpdk.org/api/patches/46574/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 8B68A1B4AB;\n\tThu, 11 Oct 2018 12:35:21 +0200 (CEST)",
            "from NAM02-BL2-obe.outbound.protection.outlook.com\n\t(mail-bl2nam02on0058.outbound.protection.outlook.com [104.47.38.58])\n\tby dpdk.org (Postfix) with ESMTP id 8E73B1B4A0\n\tfor <dev@dpdk.org>; Thu, 11 Oct 2018 12:35:18 +0200 (CEST)",
            "from BY1PR0701MB1660.namprd07.prod.outlook.com (10.162.110.22) by\n\tBY1PR0701MB1206.namprd07.prod.outlook.com (10.160.105.15) with\n\tMicrosoft SMTP Server (version=TLS1_2,\n\tcipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n\t15.20.1228.24; Thu, 11 Oct 2018 10:35:16 +0000",
            "from BY1PR0701MB1660.namprd07.prod.outlook.com\n\t([fe80::346d:e756:e70e:17]) by\n\tBY1PR0701MB1660.namprd07.prod.outlook.com\n\t([fe80::346d:e756:e70e:17%3]) with mapi id 15.20.1228.020;\n\tThu, 11 Oct 2018 10:35:16 +0000"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=AQUANTIA1COM.onmicrosoft.com; s=selector1-aquantia-com;\n\th=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n\tbh=lCwxnC3svlhl6X4E0tYtYsgq3oQkvrha195wFZZ3Qb4=;\n\tb=ZxDrhp6btVZpZPnx0ySHMEKObQvLQieoz6AbXGfHWt1zLSwExqqUqoPBuQpSfWoYkNKBpcSZMebrDtkqcPwES76anYriSUM3NAZKOKwYPlo4hgKo4LQoKZ8vANRJcxq8/gaD8t01C91v6+DK0H6o6gX+tNM7owxpC1lvEs9eooY=",
        "From": "Igor Russkikh <Igor.Russkikh@aquantia.com>",
        "To": "\"dev@dpdk.org\" <dev@dpdk.org>",
        "CC": "Pavel Belous <Pavel.Belous@aquantia.com>, Igor Russkikh\n\t<Igor.Russkikh@aquantia.com>, \"ferruh.yigit@intel.com\"\n\t<ferruh.yigit@intel.com>",
        "Thread-Topic": "[PATCH v5 10/23] net/atlantic: receive side structures and\n\timplementation",
        "Thread-Index": "AQHUYU4ZSMr8dVwjdEKmI9XrtPZp1w==",
        "Date": "Thu, 11 Oct 2018 10:35:16 +0000",
        "Message-ID": "<2d9f33ed6591ae36e9bc9136d3e9945b188e5ab9.1539249721.git.igor.russkikh@aquantia.com>",
        "References": "<cover.1539249721.git.igor.russkikh@aquantia.com>",
        "In-Reply-To": "<cover.1539249721.git.igor.russkikh@aquantia.com>",
        "Accept-Language": "en-US",
        "Content-Language": "en-US",
        "X-MS-Has-Attach": "",
        "X-MS-TNEF-Correlator": "",
        "x-clientproxiedby": "VI1PR04CA0088.eurprd04.prod.outlook.com\n\t(2603:10a6:803:64::23) To BY1PR0701MB1660.namprd07.prod.outlook.com\n\t(2a01:111:e400:522a::22)",
        "authentication-results": "spf=none (sender IP is )\n\tsmtp.mailfrom=Igor.Russkikh@aquantia.com; ",
        "x-ms-exchange-messagesentrepresentingtype": "1",
        "x-originating-ip": "[95.79.108.179]",
        "x-ms-publictraffictype": "Email",
        "x-microsoft-exchange-diagnostics": "1; BY1PR0701MB1206;\n\t6:XScumJxx37pQ8xvz0uLpkaz3SZYt3kHYSkDAicsgqjFPywSv+8N0VXZ9SQpwzzvu4+02M1jhuWYVInz2LDy1eV5HMh1SHF2b6XU3qmaM5hxL5J2Fq6I2ethZ1pKtXaGpNAcQCeEIu47+/TM+WwChVUItT9Fz7juIUHhRf0oMrbKZ2bsp8v7opWDcM0mt6UJcQYy3Y9jRuRJsVxVsHTtwiV3TEk2qEqCTQaxrECPfqW/9pJz7Kpp4cLAHe8IyFYrGLCF17KOUu/7flJMAkY10h9EklxZ1hc1xhT2N7aU13JTA5LrK0O1KG1Z+thaVWEGlC6P2e6wTHiIMLXjtU7KTiRoDQsdLNNsxRoQnZ2MEzBoCSnFlN570rGINh9hr0TpE/bwbliODMEgako9dPosHamFjNjF7n0amKVQAonpiDCJLPomAEKX1k7EzPEhZZkzl3lNK9ZtV7iGXxIpzT6hAVO61Zu9FZLxNuWfHAeaWmHQ=;\n\t5:ax9uGjSyyysnpga8GFrkqpNsJsBTOmKko1IMOjJHvWmIMdO1X6g44bTn1eXQADcTQcG3KmqXwDQ4DzIWeJsffJsOx/LknaBnAzpYLd6NAYvUuk5PlQQz27SS60CFVMCsQPYly0L0VNvC8SCRYM0zbQZIzRrsTPqrEaMQF01kroA=;\n\t7:BTsZ9vK4six+6IJvzQvWUHdsUbpIT6uhd1hr6xAubcBoBeF9uz1ozptZ3s8gmQP40sYKf0HzacCa0QmsIU0zlm7yovt0YIXHIQ437R5bGDFWe5gD+78Qq9katLnwqCHoZQwN1v/HFdT1b7DzuD2VPA/sH0OtdiX5CddTePxqLNVEpGLewKw1lFlfcYch41s90O1j8xSgQE5YnALv1o/yQFrmvrvycc+q43uCoJY0wq6VsFjchqBcyfFvsuqJwwhi",
        "x-ms-office365-filtering-correlation-id": "1a386649-1878-4b26-cc7c-08d62f653b5e",
        "x-microsoft-antispam": "BCL:0; PCL:0;\n\tRULEID:(7020095)(4652040)(8989299)(4534185)(4627221)(201703031133081)(201702281549075)(8990200)(5600074)(711020)(2017052603328)(7153060)(7193020);\n\tSRVR:BY1PR0701MB1206; ",
        "x-ms-traffictypediagnostic": "BY1PR0701MB1206:",
        "x-microsoft-antispam-prvs": "<BY1PR0701MB1206E22A8D206809C013E74198E10@BY1PR0701MB1206.namprd07.prod.outlook.com>",
        "x-exchange-antispam-report-test": "UriScan:;",
        "x-ms-exchange-senderadcheck": "1",
        "x-exchange-antispam-report-cfa-test": "BCL:0; PCL:0;\n\tRULEID:(6040522)(2401047)(8121501046)(5005006)(3002001)(3231355)(944501410)(52105095)(10201501046)(93006095)(93001095)(149066)(150057)(6041310)(20161123564045)(20161123560045)(20161123562045)(201703131423095)(201702281528075)(20161123555045)(201703061421075)(201703061406153)(20161123558120)(201708071742011)(7699051);\n\tSRVR:BY1PR0701MB1206; BCL:0; PCL:0; RULEID:; SRVR:BY1PR0701MB1206; ",
        "x-forefront-prvs": "08220FA8D6",
        "x-forefront-antispam-report": "SFV:NSPM;\n\tSFS:(10009020)(1496009)(346002)(136003)(396003)(39850400004)(376002)(366004)(189003)(199004)(486006)(14444005)(256004)(2351001)(25786009)(186003)(44832011)(2501003)(26005)(6436002)(6512007)(118296001)(478600001)(5640700003)(305945005)(71190400001)(71200400001)(72206003)(4326008)(53936002)(66066001)(7736002)(8676002)(53946003)(6486002)(102836004)(36756003)(14454004)(81166006)(81156014)(1730700003)(386003)(6506007)(6116002)(3846002)(446003)(54906003)(68736007)(11346002)(476003)(99286004)(4744004)(76176011)(52116002)(8936002)(316002)(2616005)(86362001)(106356001)(5660300001)(97736004)(2900100001)(105586002)(6916009)(5250100002)(2906002)(579004);\n\tDIR:OUT; SFP:1101; SCL:1; SRVR:BY1PR0701MB1206;\n\tH:BY1PR0701MB1660.namprd07.prod.outlook.com; FPR:; SPF:None; LANG:en; \n\tPTR:InfoNoRecords; MX:1; A:1; ",
        "received-spf": "None (protection.outlook.com: aquantia.com does not designate\n\tpermitted sender hosts)",
        "x-microsoft-antispam-message-info": "V2rgB1Hzf9NIg+8AxUepjKjSLldQiOm//puX1JZI53FGxTvnv91VIiufGU+zoYi6P0juCpjFxGiz4k8krRytoLlktMjILLVa6ynpkOlvAl8ndyDoLBWXmx0Gwo+UvOEn+qm5SsTSY3hNj4rnXpn8ar1yCN+uBIdqsS9w5q9+YERq3/iNDZwBeC8IWoErdN8XnySJHValPW8NnsKbUs3e8iFWYnmuHQKkeiRDz+35awryJFs3J6UgfqO4+ldOVHSlAb5wjnQDmjLyxE9V38iTmpRtDB1uEeU/3WAAkqGyHtGnQxi5rxMM+AdxSlK5bzWnwvSdgbWwC0fdcDWXw93dlz3f4N86aCuPgpakRQA2mNc=",
        "spamdiagnosticoutput": "1:99",
        "spamdiagnosticmetadata": "NSPM",
        "Content-Type": "text/plain; charset=\"iso-8859-1\"",
        "Content-Transfer-Encoding": "quoted-printable",
        "MIME-Version": "1.0",
        "X-OriginatorOrg": "aquantia.com",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "1a386649-1878-4b26-cc7c-08d62f653b5e",
        "X-MS-Exchange-CrossTenant-originalarrivaltime": "11 Oct 2018 10:35:16.1771\n\t(UTC)",
        "X-MS-Exchange-CrossTenant-fromentityheader": "Hosted",
        "X-MS-Exchange-CrossTenant-id": "83e2e134-991c-4ede-8ced-34d47e38e6b1",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BY1PR0701MB1206",
        "Subject": "[dpdk-dev] [PATCH v5 10/23] net/atlantic: receive side structures\n\tand implementation",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add implementation for RX datapath.\n\nSigned-off-by: Igor Russkikh <igor.russkikh@aquantia.com>\nSigned-off-by: Pavel Belous <pavel.belous@aquantia.com>\n---\n doc/guides/nics/atlantic.rst          |   6 +\n doc/guides/nics/features/atlantic.ini |   5 +\n drivers/net/atlantic/Makefile         |   2 +-\n drivers/net/atlantic/atl_ethdev.c     |  79 ++++-\n drivers/net/atlantic/atl_ethdev.h     |  19 +\n drivers/net/atlantic/atl_rxtx.c       | 632 +++++++++++++++++++++++++++++++++-\n 6 files changed, 733 insertions(+), 10 deletions(-)",
    "diff": "diff --git a/doc/guides/nics/atlantic.rst b/doc/guides/nics/atlantic.rst\nindex 21e52bb26625..a003ef5110c9 100644\n--- a/doc/guides/nics/atlantic.rst\n+++ b/doc/guides/nics/atlantic.rst\n@@ -12,6 +12,10 @@ More information can be found at `Aquantia Official Website\n Supported features\n ^^^^^^^^^^^^^^^^^^\n \n+- Base L2 features\n+- Checksum offload\n+- Jumbo Frame upto 16K\n+\n Configuration Information\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n \n@@ -29,6 +33,8 @@ Statistics\n MTU setting\n ~~~~~~~~~~~\n \n+Atlantic NIC supports up to 16K jumbo frame size\n+\n Supported Chipsets and NICs\n ---------------------------\n \ndiff --git a/doc/guides/nics/features/atlantic.ini b/doc/guides/nics/features/atlantic.ini\nindex 9ffb3f61577d..65567c73d3e7 100644\n--- a/doc/guides/nics/features/atlantic.ini\n+++ b/doc/guides/nics/features/atlantic.ini\n@@ -5,6 +5,11 @@\n ;\n [Features]\n Queue start/stop     = Y\n+Jumbo frame          = Y\n+CRC offload          = Y\n+L3 checksum offload  = Y\n+L4 checksum offload  = Y\n+Packet type parsing  = Y\n FW version           = Y\n Linux UIO            = Y\n x86-32               = Y\ndiff --git a/drivers/net/atlantic/Makefile b/drivers/net/atlantic/Makefile\nindex b88da362146d..62dcdbffa69c 100644\n--- a/drivers/net/atlantic/Makefile\n+++ b/drivers/net/atlantic/Makefile\n@@ -15,7 +15,7 @@ EXPORT_MAP := rte_pmd_atlantic_version.map\n \n LIBABIVER := 1\n \n-LDLIBS += -lrte_eal\n+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring\n LDLIBS += -lrte_ethdev -lrte_net\n LDLIBS += -lrte_bus_pci\n \ndiff --git a/drivers/net/atlantic/atl_ethdev.c b/drivers/net/atlantic/atl_ethdev.c\nindex 91a7e3cb530f..dbe32295f469 100644\n--- a/drivers/net/atlantic/atl_ethdev.c\n+++ b/drivers/net/atlantic/atl_ethdev.c\n@@ -27,6 +27,7 @@ static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,\n static void atl_dev_info_get(struct rte_eth_dev *dev,\n \t\t\t       struct rte_eth_dev_info *dev_info);\n \n+static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);\n \n static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,\n \tstruct rte_pci_device *pci_dev);\n@@ -75,6 +76,18 @@ static struct rte_pci_driver rte_atl_pmd = {\n \t.remove = eth_atl_pci_remove,\n };\n \n+#define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \\\n+\t\t\t| DEV_RX_OFFLOAD_IPV4_CKSUM \\\n+\t\t\t| DEV_RX_OFFLOAD_UDP_CKSUM \\\n+\t\t\t| DEV_RX_OFFLOAD_TCP_CKSUM \\\n+\t\t\t| DEV_RX_OFFLOAD_JUMBO_FRAME)\n+\n+static const struct rte_eth_desc_lim rx_desc_lim = {\n+\t.nb_max = ATL_MAX_RING_DESC,\n+\t.nb_min = ATL_MIN_RING_DESC,\n+\t.nb_align = ATL_RXD_ALIGN,\n+};\n+\n static const struct eth_dev_ops atl_eth_dev_ops = {\n \t.dev_configure\t      = atl_dev_configure,\n \t.dev_start\t      = atl_dev_start,\n@@ -84,6 +97,13 @@ static const struct eth_dev_ops atl_eth_dev_ops = {\n \n \t.fw_version_get       = atl_fw_version_get,\n \t.dev_infos_get\t      = atl_dev_info_get,\n+\t.dev_supported_ptypes_get = atl_dev_supported_ptypes_get,\n+\n+\t/* Queue Control */\n+\t.rx_queue_start\t      = atl_rx_queue_start,\n+\t.rx_queue_stop\t      = atl_rx_queue_stop,\n+\t.rx_queue_setup       = atl_rx_queue_setup,\n+\t.rx_queue_release     = atl_rx_queue_release,\n };\n \n static inline int32_t\n@@ -220,7 +240,7 @@ atl_dev_start(struct rte_eth_dev *dev)\n \terr = atl_rx_init(dev);\n \tif (err) {\n \t\tPMD_INIT_LOG(ERR, \"Unable to initialize RX hardware\");\n-\t\treturn -EIO;\n+\t\tgoto error;\n \t}\n \n \tPMD_INIT_LOG(DEBUG, \"FW version: %u.%u.%u\",\n@@ -229,7 +249,17 @@ atl_dev_start(struct rte_eth_dev *dev)\n \t\thw->fw_ver_actual & 0xFFFF);\n \tPMD_INIT_LOG(DEBUG, \"Driver version: %s\", ATL_PMD_DRIVER_VERSION);\n \n-\treturn err;\n+\terr = atl_start_queues(dev);\n+\tif (err < 0) {\n+\t\tPMD_INIT_LOG(ERR, \"Unable to start rxtx queues\");\n+\t\tgoto error;\n+\t}\n+\n+\treturn 0;\n+\n+error:\n+\tatl_stop_queues(dev);\n+\treturn -EIO;\n }\n \n /*\n@@ -244,6 +274,12 @@ atl_dev_stop(struct rte_eth_dev *dev)\n \t/* reset the NIC */\n \tatl_reset_hw(hw);\n \thw->adapter_stopped = 1;\n+\n+\tatl_stop_queues(dev);\n+\n+\t/* Clear stored conf */\n+\tdev->data->scattered_rx = 0;\n+\tdev->data->lro = 0;\n }\n \n /*\n@@ -255,6 +291,8 @@ atl_dev_close(struct rte_eth_dev *dev)\n \tPMD_INIT_FUNC_TRACE();\n \n \tatl_dev_stop(dev);\n+\n+\tatl_free_queues(dev);\n }\n \n static int\n@@ -298,14 +336,47 @@ atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n {\n \tstruct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);\n \n-\tdev_info->max_rx_queues = 0;\n-\tdev_info->max_rx_queues = 0;\n+\tdev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;\n+\tdev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;\n \n+\tdev_info->min_rx_bufsize = 1024;\n+\tdev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;\n+\tdev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;\n \tdev_info->max_vfs = pci_dev->max_vfs;\n \n \tdev_info->max_hash_mac_addrs = 0;\n \tdev_info->max_vmdq_pools = 0;\n \tdev_info->vmdq_queue_num = 0;\n+\n+\tdev_info->rx_offload_capa = ATL_RX_OFFLOADS;\n+\n+\tdev_info->default_rxconf = (struct rte_eth_rxconf) {\n+\t\t.rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,\n+\t};\n+\n+\tdev_info->rx_desc_lim = rx_desc_lim;\n+}\n+\n+static const uint32_t *\n+atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)\n+{\n+\tstatic const uint32_t ptypes[] = {\n+\t\tRTE_PTYPE_L2_ETHER,\n+\t\tRTE_PTYPE_L2_ETHER_ARP,\n+\t\tRTE_PTYPE_L2_ETHER_VLAN,\n+\t\tRTE_PTYPE_L3_IPV4,\n+\t\tRTE_PTYPE_L3_IPV6,\n+\t\tRTE_PTYPE_L4_TCP,\n+\t\tRTE_PTYPE_L4_UDP,\n+\t\tRTE_PTYPE_L4_SCTP,\n+\t\tRTE_PTYPE_L4_ICMP,\n+\t\tRTE_PTYPE_UNKNOWN\n+\t};\n+\n+\tif (dev->rx_pkt_burst == atl_recv_pkts)\n+\t\treturn ptypes;\n+\n+\treturn NULL;\n }\n \n RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);\ndiff --git a/drivers/net/atlantic/atl_ethdev.h b/drivers/net/atlantic/atl_ethdev.h\nindex 990e8e4e9978..a9a9fc8fe7ff 100644\n--- a/drivers/net/atlantic/atl_ethdev.h\n+++ b/drivers/net/atlantic/atl_ethdev.h\n@@ -13,6 +13,10 @@\n #define ATL_DEV_PRIVATE_TO_HW(adapter) \\\n \t(&((struct atl_adapter *)adapter)->hw)\n \n+#define ATL_DEV_TO_ADAPTER(dev) \\\n+\t((struct atl_adapter *)(dev)->data->dev_private)\n+\n+\n /*\n  * Structure to store private data for each driver instance (for each port).\n  */\n@@ -24,9 +28,24 @@ struct atl_adapter {\n /*\n  * RX/TX function prototypes\n  */\n+void atl_rx_queue_release(void *rxq);\n+\n+int atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n+\t\tuint16_t nb_rx_desc, unsigned int socket_id,\n+\t\tconst struct rte_eth_rxconf *rx_conf,\n+\t\tstruct rte_mempool *mb_pool);\n+\n int atl_rx_init(struct rte_eth_dev *dev);\n int atl_tx_init(struct rte_eth_dev *dev);\n \n+int atl_start_queues(struct rte_eth_dev *dev);\n+int atl_stop_queues(struct rte_eth_dev *dev);\n+void atl_free_queues(struct rte_eth_dev *dev);\n+\n+int atl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n+int atl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n+\n+\n uint16_t atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\tuint16_t nb_pkts);\n \ndiff --git a/drivers/net/atlantic/atl_rxtx.c b/drivers/net/atlantic/atl_rxtx.c\nindex 0fbd93038075..61dbb83b4572 100644\n--- a/drivers/net/atlantic/atl_rxtx.c\n+++ b/drivers/net/atlantic/atl_rxtx.c\n@@ -2,7 +2,150 @@\n  * Copyright(c) 2018 Aquantia Corporation\n  */\n \n+#include <rte_malloc.h>\n+#include <rte_ethdev_driver.h>\n+\n #include \"atl_ethdev.h\"\n+#include \"atl_hw_regs.h\"\n+\n+#include \"atl_logs.h\"\n+#include \"hw_atl/hw_atl_llh.h\"\n+#include \"hw_atl/hw_atl_b0.h\"\n+#include \"hw_atl/hw_atl_b0_internal.h\"\n+\n+/**\n+ * Structure associated with each descriptor of the RX ring of a RX queue.\n+ */\n+struct atl_rx_entry {\n+\tstruct rte_mbuf *mbuf;\n+};\n+\n+/**\n+ * Structure associated with each RX queue.\n+ */\n+struct atl_rx_queue {\n+\tstruct rte_mempool\t*mb_pool;\n+\tstruct hw_atl_rxd_s\t*hw_ring;\n+\tuint64_t\t\thw_ring_phys_addr;\n+\tstruct atl_rx_entry\t*sw_ring;\n+\tuint16_t\t\tnb_rx_desc;\n+\tuint16_t\t\trx_tail;\n+\tuint16_t\t\tnb_rx_hold;\n+\tuint16_t\t\trx_free_thresh;\n+\tuint16_t\t\tqueue_id;\n+\tuint16_t\t\tport_id;\n+\tuint16_t\t\tbuff_size;\n+\tbool\t\t\tl3_csum_enabled;\n+\tbool\t\t\tl4_csum_enabled;\n+};\n+\n+static inline void\n+atl_reset_rx_queue(struct atl_rx_queue *rxq)\n+{\n+\tstruct hw_atl_rxd_s *rxd = NULL;\n+\tint i;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\trxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[i];\n+\t\trxd->buf_addr = 0;\n+\t\trxd->hdr_addr = 0;\n+\t}\n+\n+\trxq->rx_tail = 0;\n+}\n+\n+int\n+atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n+\t\t   uint16_t nb_rx_desc, unsigned int socket_id,\n+\t\t   const struct rte_eth_rxconf *rx_conf,\n+\t\t   struct rte_mempool *mb_pool)\n+{\n+\tstruct atl_rx_queue *rxq;\n+\tconst struct rte_memzone *mz;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\t/* make sure a valid number of descriptors have been requested */\n+\tif (nb_rx_desc < AQ_HW_MIN_RX_RING_SIZE ||\n+\t\t\tnb_rx_desc > AQ_HW_MAX_RX_RING_SIZE) {\n+\t\tPMD_INIT_LOG(ERR, \"Number of Rx descriptors must be \"\n+\t\t\"less than or equal to %d, \"\n+\t\t\"greater than or equal to %d\", AQ_HW_MAX_RX_RING_SIZE,\n+\t\tAQ_HW_MIN_RX_RING_SIZE);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/*\n+\t * if this queue existed already, free the associated memory. The\n+\t * queue cannot be reused in case we need to allocate memory on\n+\t * different socket than was previously used.\n+\t */\n+\tif (dev->data->rx_queues[rx_queue_id] != NULL) {\n+\t\tatl_rx_queue_release(dev->data->rx_queues[rx_queue_id]);\n+\t\tdev->data->rx_queues[rx_queue_id] = NULL;\n+\t}\n+\n+\t/* allocate memory for the queue structure */\n+\trxq = rte_zmalloc_socket(\"atlantic Rx queue\", sizeof(*rxq),\n+\t\t\t\t RTE_CACHE_LINE_SIZE, socket_id);\n+\tif (rxq == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Cannot allocate queue structure\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* setup queue */\n+\trxq->mb_pool = mb_pool;\n+\trxq->nb_rx_desc = nb_rx_desc;\n+\trxq->port_id = dev->data->port_id;\n+\trxq->queue_id = rx_queue_id;\n+\trxq->rx_free_thresh = rx_conf->rx_free_thresh;\n+\n+\trxq->l3_csum_enabled = dev->data->dev_conf.rxmode.offloads &\n+\t\tDEV_RX_OFFLOAD_IPV4_CKSUM;\n+\trxq->l4_csum_enabled = dev->data->dev_conf.rxmode.offloads &\n+\t\t(DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM);\n+\tif (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)\n+\t\tPMD_DRV_LOG(ERR, \"PMD does not support KEEP_CRC offload\");\n+\n+\t/* allocate memory for the software ring */\n+\trxq->sw_ring = rte_zmalloc_socket(\"atlantic sw rx ring\",\n+\t\t\t\tnb_rx_desc * sizeof(struct atl_rx_entry),\n+\t\t\t\tRTE_CACHE_LINE_SIZE, socket_id);\n+\tif (rxq->sw_ring == NULL) {\n+\t\tPMD_INIT_LOG(ERR,\n+\t\t\t\"Port %d: Cannot allocate software ring for queue %d\",\n+\t\t\trxq->port_id, rxq->queue_id);\n+\t\trte_free(rxq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/*\n+\t * allocate memory for the hardware descriptor ring. A memzone large\n+\t * enough to hold the maximum ring size is requested to allow for\n+\t * resizing in later calls to the queue setup function.\n+\t */\n+\tmz = rte_eth_dma_zone_reserve(dev, \"rx hw_ring\", rx_queue_id,\n+\t\t\t\t      HW_ATL_B0_MAX_RXD *\n+\t\t\t\t\tsizeof(struct hw_atl_rxd_s),\n+\t\t\t\t      128, socket_id);\n+\tif (mz == NULL) {\n+\t\tPMD_INIT_LOG(ERR,\n+\t\t\t\"Port %d: Cannot allocate hardware ring for queue %d\",\n+\t\t\trxq->port_id, rxq->queue_id);\n+\t\trte_free(rxq->sw_ring);\n+\t\trte_free(rxq);\n+\t\treturn -ENOMEM;\n+\t}\n+\trxq->hw_ring = mz->addr;\n+\trxq->hw_ring_phys_addr = mz->iova;\n+\n+\tatl_reset_rx_queue(rxq);\n+\n+\tdev->data->rx_queues[rx_queue_id] = rxq;\n+\treturn 0;\n+}\n \n int\n atl_tx_init(struct rte_eth_dev *eth_dev __rte_unused)\n@@ -11,11 +154,177 @@ atl_tx_init(struct rte_eth_dev *eth_dev __rte_unused)\n }\n \n int\n-atl_rx_init(struct rte_eth_dev *eth_dev __rte_unused)\n+atl_rx_init(struct rte_eth_dev *eth_dev)\n {\n+\tstruct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);\n+\tstruct atl_rx_queue *rxq;\n+\tuint64_t base_addr = 0;\n+\tint i = 0;\n+\tint err = 0;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tfor (i = 0; i < eth_dev->data->nb_rx_queues; i++) {\n+\t\trxq = eth_dev->data->rx_queues[i];\n+\t\tbase_addr = rxq->hw_ring_phys_addr;\n+\n+\t\t/* Take requested pool mbuf size and adapt\n+\t\t * descriptor buffer to best fit\n+\t\t */\n+\t\tint buff_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -\n+\t\t\t\tRTE_PKTMBUF_HEADROOM;\n+\n+\t\tbuff_size = RTE_ALIGN_FLOOR(buff_size, 1024);\n+\t\tif (buff_size > HW_ATL_B0_RXD_BUF_SIZE_MAX) {\n+\t\t\tPMD_INIT_LOG(WARNING,\n+\t\t\t\t\"Port %d queue %d: mem pool buff size is too big\\n\",\n+\t\t\t\trxq->port_id, rxq->queue_id);\n+\t\t\tbuff_size = HW_ATL_B0_RXD_BUF_SIZE_MAX;\n+\t\t}\n+\t\tif (buff_size < 1024) {\n+\t\t\tPMD_INIT_LOG(ERR,\n+\t\t\t\t\"Port %d queue %d: mem pool buff size is too small\\n\",\n+\t\t\t\trxq->port_id, rxq->queue_id);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t\trxq->buff_size = buff_size;\n+\n+\t\terr = hw_atl_b0_hw_ring_rx_init(hw, base_addr, rxq->queue_id,\n+\t\t\t\t\t\trxq->nb_rx_desc, buff_size, 0,\n+\t\t\t\t\t\trxq->port_id);\n+\n+\t\tif (err) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Port %d: Cannot init RX queue %d\",\n+\t\t\t\t     rxq->port_id, rxq->queue_id);\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\treturn err;\n+}\n+\n+static int\n+atl_alloc_rx_queue_mbufs(struct atl_rx_queue *rxq)\n+{\n+\tstruct atl_rx_entry *rx_entry = rxq->sw_ring;\n+\tstruct hw_atl_rxd_s *rxd;\n+\tuint64_t dma_addr = 0;\n+\tuint32_t i = 0;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\t/* fill Rx ring */\n+\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\tstruct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);\n+\n+\t\tif (mbuf == NULL) {\n+\t\t\tPMD_INIT_LOG(ERR,\n+\t\t\t\t\"Port %d: mbuf alloc failed for rx queue %d\",\n+\t\t\t\trxq->port_id, rxq->queue_id);\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\tmbuf->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\tmbuf->port = rxq->port_id;\n+\n+\t\tdma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));\n+\t\trxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[i];\n+\t\trxd->buf_addr = dma_addr;\n+\t\trxd->hdr_addr = 0;\n+\t\trx_entry[i].mbuf = mbuf;\n+\t}\n+\n \treturn 0;\n }\n \n+static void\n+atl_rx_queue_release_mbufs(struct atl_rx_queue *rxq)\n+{\n+\tint i;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (rxq->sw_ring != NULL) {\n+\t\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\t\tif (rxq->sw_ring[i].mbuf != NULL) {\n+\t\t\t\trte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);\n+\t\t\t\trxq->sw_ring[i].mbuf = NULL;\n+\t\t\t}\n+\t\t}\n+\t}\n+}\n+\n+int\n+atl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n+{\n+\tstruct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct atl_rx_queue *rxq = NULL;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (rx_queue_id < dev->data->nb_rx_queues) {\n+\t\trxq = dev->data->rx_queues[rx_queue_id];\n+\n+\t\tif (atl_alloc_rx_queue_mbufs(rxq) != 0) {\n+\t\t\tPMD_INIT_LOG(ERR,\n+\t\t\t\t\"Port %d: Allocate mbufs for queue %d failed\",\n+\t\t\t\trxq->port_id, rxq->queue_id);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\thw_atl_b0_hw_ring_rx_start(hw, rx_queue_id);\n+\n+\t\trte_wmb();\n+\t\thw_atl_reg_rx_dma_desc_tail_ptr_set(hw, rxq->nb_rx_desc - 1,\n+\t\t\t\t\t\t    rx_queue_id);\n+\t\tdev->data->rx_queue_state[rx_queue_id] =\n+\t\t\tRTE_ETH_QUEUE_STATE_STARTED;\n+\t} else {\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int\n+atl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n+{\n+\tstruct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct atl_rx_queue *rxq = NULL;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (rx_queue_id < dev->data->nb_rx_queues) {\n+\t\trxq = dev->data->rx_queues[rx_queue_id];\n+\n+\t\thw_atl_b0_hw_ring_rx_stop(hw, rx_queue_id);\n+\n+\t\tatl_rx_queue_release_mbufs(rxq);\n+\t\tatl_reset_rx_queue(rxq);\n+\n+\t\tdev->data->rx_queue_state[rx_queue_id] =\n+\t\t\tRTE_ETH_QUEUE_STATE_STOPPED;\n+\t} else {\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+void\n+atl_rx_queue_release(void *rx_queue)\n+{\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (rx_queue != NULL) {\n+\t\tstruct atl_rx_queue *rxq = (struct atl_rx_queue *)rx_queue;\n+\n+\t\tatl_rx_queue_release_mbufs(rxq);\n+\t\trte_free(rxq->sw_ring);\n+\t\trte_free(rxq);\n+\t}\n+}\n+\n uint16_t\n atl_prep_pkts(void *tx_queue __rte_unused,\n \t      struct rte_mbuf **tx_pkts __rte_unused,\n@@ -24,14 +333,327 @@ atl_prep_pkts(void *tx_queue __rte_unused,\n \treturn 0;\n }\n \n-uint16_t\n-atl_recv_pkts(void *rx_queue __rte_unused,\n-\t      struct rte_mbuf **rx_pkts __rte_unused,\n-\t      uint16_t nb_pkts __rte_unused)\n+void\n+atl_free_queues(struct rte_eth_dev *dev)\n+{\n+\tunsigned int i;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\tatl_rx_queue_release(dev->data->rx_queues[i]);\n+\t\tdev->data->rx_queues[i] = 0;\n+\t}\n+\tdev->data->nb_rx_queues = 0;\n+}\n+\n+int\n+atl_start_queues(struct rte_eth_dev *dev)\n {\n+\tint i;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\tif (atl_rx_queue_start(dev, i) != 0) {\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t\"Port %d: Start Rx queue %d failed\",\n+\t\t\t\tdev->data->port_id, i);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n \treturn 0;\n }\n \n+int\n+atl_stop_queues(struct rte_eth_dev *dev)\n+{\n+\tint i;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\tif (atl_rx_queue_stop(dev, i) != 0) {\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t\"Port %d: Stop Rx queue %d failed\",\n+\t\t\t\tdev->data->port_id, i);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static uint64_t\n+atl_desc_to_offload_flags(struct atl_rx_queue *rxq,\n+\t\t\t  struct hw_atl_rxd_wb_s *rxd_wb)\n+{\n+\tuint64_t mbuf_flags = 0;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\t/* IPv4 ? */\n+\tif (rxq->l3_csum_enabled && ((rxd_wb->pkt_type & 0x3) == 0)) {\n+\t\t/* IPv4 csum error ? */\n+\t\tif (rxd_wb->rx_stat & BIT(1))\n+\t\t\tmbuf_flags |= PKT_RX_IP_CKSUM_BAD;\n+\t\telse\n+\t\t\tmbuf_flags |= PKT_RX_IP_CKSUM_GOOD;\n+\t} else {\n+\t\tmbuf_flags |= PKT_RX_IP_CKSUM_UNKNOWN;\n+\t}\n+\n+\t/* CSUM calculated ? */\n+\tif (rxq->l4_csum_enabled && (rxd_wb->rx_stat & BIT(3))) {\n+\t\tif (rxd_wb->rx_stat & BIT(2))\n+\t\t\tmbuf_flags |= PKT_RX_L4_CKSUM_BAD;\n+\t\telse\n+\t\t\tmbuf_flags |= PKT_RX_L4_CKSUM_GOOD;\n+\t} else {\n+\t\tmbuf_flags |= PKT_RX_L4_CKSUM_UNKNOWN;\n+\t}\n+\n+\treturn mbuf_flags;\n+}\n+\n+static uint32_t\n+atl_desc_to_pkt_type(struct hw_atl_rxd_wb_s *rxd_wb)\n+{\n+\tuint32_t type = RTE_PTYPE_UNKNOWN;\n+\tuint16_t l2_l3_type = rxd_wb->pkt_type & 0x3;\n+\tuint16_t l4_type = (rxd_wb->pkt_type & 0x1C) >> 2;\n+\n+\tswitch (l2_l3_type) {\n+\tcase 0:\n+\t\ttype = RTE_PTYPE_L3_IPV4;\n+\t\tbreak;\n+\tcase 1:\n+\t\ttype = RTE_PTYPE_L3_IPV6;\n+\t\tbreak;\n+\tcase 2:\n+\t\ttype = RTE_PTYPE_L2_ETHER;\n+\t\tbreak;\n+\tcase 3:\n+\t\ttype = RTE_PTYPE_L2_ETHER_ARP;\n+\t\tbreak;\n+\t}\n+\n+\tswitch (l4_type) {\n+\tcase 0:\n+\t\ttype |= RTE_PTYPE_L4_TCP;\n+\t\tbreak;\n+\tcase 1:\n+\t\ttype |= RTE_PTYPE_L4_UDP;\n+\t\tbreak;\n+\tcase 2:\n+\t\ttype |= RTE_PTYPE_L4_SCTP;\n+\t\tbreak;\n+\tcase 3:\n+\t\ttype |= RTE_PTYPE_L4_ICMP;\n+\t\tbreak;\n+\t}\n+\n+\tif (rxd_wb->pkt_type & BIT(5))\n+\t\ttype |= RTE_PTYPE_L2_ETHER_VLAN;\n+\n+\treturn type;\n+}\n+\n+uint16_t\n+atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n+{\n+\tstruct atl_rx_queue *rxq = (struct atl_rx_queue *)rx_queue;\n+\tstruct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];\n+\tstruct atl_adapter *adapter =\n+\t\tATL_DEV_TO_ADAPTER(&rte_eth_devices[rxq->port_id]);\n+\tstruct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(adapter);\n+\tstruct atl_rx_entry *sw_ring = rxq->sw_ring;\n+\n+\tstruct rte_mbuf *new_mbuf;\n+\tstruct rte_mbuf *rx_mbuf, *rx_mbuf_prev, *rx_mbuf_first;\n+\tstruct atl_rx_entry *rx_entry;\n+\tuint16_t nb_rx = 0;\n+\tuint16_t nb_hold = 0;\n+\tstruct hw_atl_rxd_wb_s rxd_wb;\n+\tstruct hw_atl_rxd_s *rxd = NULL;\n+\tuint16_t tail = rxq->rx_tail;\n+\tuint64_t dma_addr;\n+\tuint16_t pkt_len = 0;\n+\n+\twhile (nb_rx < nb_pkts) {\n+\t\tuint16_t eop_tail = tail;\n+\n+\t\trxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[tail];\n+\t\trxd_wb = *(struct hw_atl_rxd_wb_s *)rxd;\n+\n+\t\tif (!rxd_wb.dd) { /* RxD is not done */\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tPMD_RX_LOG(ERR, \"port_id=%u queue_id=%u tail=%u \"\n+\t\t\t   \"eop=0x%x pkt_len=%u hash=0x%x hash_type=0x%x\",\n+\t\t\t   (unsigned int)rxq->port_id,\n+\t\t\t   (unsigned int)rxq->queue_id,\n+\t\t\t   (unsigned int)tail, (unsigned int)rxd_wb.eop,\n+\t\t\t   (unsigned int)rte_le_to_cpu_16(rxd_wb.pkt_len),\n+\t\t\trxd_wb.rss_hash, rxd_wb.rss_type);\n+\n+\t\t/* RxD is not done */\n+\t\tif (!rxd_wb.eop) {\n+\t\t\twhile (true) {\n+\t\t\t\tstruct hw_atl_rxd_wb_s *eop_rxwbd;\n+\n+\t\t\t\teop_tail = (eop_tail + 1) % rxq->nb_rx_desc;\n+\t\t\t\teop_rxwbd = (struct hw_atl_rxd_wb_s *)\n+\t\t\t\t\t&rxq->hw_ring[eop_tail];\n+\t\t\t\tif (!eop_rxwbd->dd) {\n+\t\t\t\t\t/* no EOP received yet */\n+\t\t\t\t\teop_tail = tail;\n+\t\t\t\t\tbreak;\n+\t\t\t\t}\n+\t\t\t\tif (eop_rxwbd->dd && eop_rxwbd->eop)\n+\t\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\t/* No EOP in ring */\n+\t\t\tif (eop_tail == tail)\n+\t\t\t\tbreak;\n+\t\t}\n+\t\trx_mbuf_prev = NULL;\n+\t\trx_mbuf_first = NULL;\n+\n+\t\t/* Run through packet segments */\n+\t\twhile (true) {\n+\t\t\tnew_mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);\n+\t\t\tif (new_mbuf == NULL) {\n+\t\t\t\tPMD_RX_LOG(ERR,\n+\t\t\t\t   \"RX mbuf alloc failed port_id=%u \"\n+\t\t\t\t   \"queue_id=%u\", (unsigned int)rxq->port_id,\n+\t\t\t\t   (unsigned int)rxq->queue_id);\n+\t\t\t\tdev->data->rx_mbuf_alloc_failed++;\n+\t\t\t\t\t\tgoto err_stop;\n+\t\t\t}\n+\n+\t\t\tnb_hold++;\n+\t\t\trx_entry = &sw_ring[tail];\n+\n+\t\t\trx_mbuf = rx_entry->mbuf;\n+\t\t\trx_entry->mbuf = new_mbuf;\n+\t\t\tdma_addr = rte_cpu_to_le_64(\n+\t\t\t\trte_mbuf_data_iova_default(new_mbuf));\n+\n+\t\t\t/* setup RX descriptor */\n+\t\t\trxd->hdr_addr = 0;\n+\t\t\trxd->buf_addr = dma_addr;\n+\n+\t\t\t/*\n+\t\t\t * Initialize the returned mbuf.\n+\t\t\t * 1) setup generic mbuf fields:\n+\t\t\t *\t  - number of segments,\n+\t\t\t *\t  - next segment,\n+\t\t\t *\t  - packet length,\n+\t\t\t *\t  - RX port identifier.\n+\t\t\t * 2) integrate hardware offload data, if any:\n+\t\t\t *\t<  - RSS flag & hash,\n+\t\t\t *\t  - IP checksum flag,\n+\t\t\t *\t  - VLAN TCI, if any,\n+\t\t\t *\t  - error flags.\n+\t\t\t */\n+\t\t\tpkt_len = (uint16_t)rte_le_to_cpu_16(rxd_wb.pkt_len);\n+\t\t\trx_mbuf->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\t\trte_prefetch1((char *)rx_mbuf->buf_addr +\n+\t\t\t\trx_mbuf->data_off);\n+\t\t\trx_mbuf->nb_segs = 0;\n+\t\t\trx_mbuf->next = NULL;\n+\t\t\trx_mbuf->pkt_len = pkt_len;\n+\t\t\trx_mbuf->data_len = pkt_len;\n+\t\t\tif (rxd_wb.eop) {\n+\t\t\t\tu16 remainder_len = pkt_len % rxq->buff_size;\n+\t\t\t\tif (!remainder_len)\n+\t\t\t\t\tremainder_len = rxq->buff_size;\n+\t\t\t\trx_mbuf->data_len = remainder_len;\n+\t\t\t} else {\n+\t\t\t\trx_mbuf->data_len = pkt_len > rxq->buff_size ?\n+\t\t\t\t\t\trxq->buff_size : pkt_len;\n+\t\t\t}\n+\t\t\trx_mbuf->port = rxq->port_id;\n+\n+\t\t\trx_mbuf->hash.rss = rxd_wb.rss_hash;\n+\n+\t\t\trx_mbuf->vlan_tci = rxd_wb.vlan;\n+\n+\t\t\trx_mbuf->ol_flags =\n+\t\t\t\tatl_desc_to_offload_flags(rxq, &rxd_wb);\n+\t\t\trx_mbuf->packet_type = atl_desc_to_pkt_type(&rxd_wb);\n+\n+\t\t\tif (!rx_mbuf_first)\n+\t\t\t\trx_mbuf_first = rx_mbuf;\n+\t\t\trx_mbuf_first->nb_segs++;\n+\n+\t\t\tif (rx_mbuf_prev)\n+\t\t\t\trx_mbuf_prev->next = rx_mbuf;\n+\t\t\trx_mbuf_prev = rx_mbuf;\n+\n+\t\t\ttail = (tail + 1) % rxq->nb_rx_desc;\n+\t\t\t/* Prefetch next mbufs */\n+\t\t\trte_prefetch0(sw_ring[tail].mbuf);\n+\t\t\tif ((tail & 0x3) == 0) {\n+\t\t\t\trte_prefetch0(&sw_ring[tail]);\n+\t\t\t\trte_prefetch0(&sw_ring[tail]);\n+\t\t\t}\n+\n+\t\t\t/* filled mbuf_first */\n+\t\t\tif (rxd_wb.eop)\n+\t\t\t\tbreak;\n+\t\t\trxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[tail];\n+\t\t\trxd_wb = *(struct hw_atl_rxd_wb_s *)rxd;\n+\t\t};\n+\n+\t\t/*\n+\t\t * Store the mbuf address into the next entry of the array\n+\t\t * of returned packets.\n+\t\t */\n+\t\trx_pkts[nb_rx++] = rx_mbuf_first;\n+\n+\t\tPMD_RX_LOG(ERR, \"add mbuf segs=%d pkt_len=%d\",\n+\t\t\trx_mbuf_first->nb_segs,\n+\t\t\trx_mbuf_first->pkt_len);\n+\t}\n+\n+err_stop:\n+\n+\trxq->rx_tail = tail;\n+\n+\t/*\n+\t * If the number of free RX descriptors is greater than the RX free\n+\t * threshold of the queue, advance the Receive Descriptor Tail (RDT)\n+\t * register.\n+\t * Update the RDT with the value of the last processed RX descriptor\n+\t * minus 1, to guarantee that the RDT register is never equal to the\n+\t * RDH register, which creates a \"full\" ring situtation from the\n+\t * hardware point of view...\n+\t */\n+\tnb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);\n+\tif (nb_hold > rxq->rx_free_thresh) {\n+\t\tPMD_RX_LOG(ERR, \"port_id=%u queue_id=%u rx_tail=%u \"\n+\t\t\t\"nb_hold=%u nb_rx=%u\",\n+\t\t\t(unsigned int)rxq->port_id, (unsigned int)rxq->queue_id,\n+\t\t\t(unsigned int)tail, (unsigned int)nb_hold,\n+\t\t\t(unsigned int)nb_rx);\n+\t\ttail = (uint16_t)((tail == 0) ?\n+\t\t\t(rxq->nb_rx_desc - 1) : (tail - 1));\n+\n+\t\thw_atl_reg_rx_dma_desc_tail_ptr_set(hw, tail, rxq->queue_id);\n+\n+\t\tnb_hold = 0;\n+\t}\n+\n+\trxq->nb_rx_hold = nb_hold;\n+\n+\treturn nb_rx;\n+}\n+\n+\n uint16_t\n atl_xmit_pkts(void *tx_queue __rte_unused,\n \t      struct rte_mbuf **tx_pkts __rte_unused,\n",
    "prefixes": [
        "v5",
        "10/23"
    ]
}