get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/132596/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 132596,
    "url": "http://patches.dpdk.org/api/patches/132596/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20231013060653.1006410-7-chaoyong.he@corigine.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20231013060653.1006410-7-chaoyong.he@corigine.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20231013060653.1006410-7-chaoyong.he@corigine.com",
    "date": "2023-10-13T06:06:48",
    "name": "[v3,06/11] net/nfp: standard the comment style",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "23e4d69ca955e4122e0351f79f3136e51afae0f4",
    "submitter": {
        "id": 2554,
        "url": "http://patches.dpdk.org/api/people/2554/?format=api",
        "name": "Chaoyong He",
        "email": "chaoyong.he@corigine.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20231013060653.1006410-7-chaoyong.he@corigine.com/mbox/",
    "series": [
        {
            "id": 29831,
            "url": "http://patches.dpdk.org/api/series/29831/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=29831",
            "date": "2023-10-13T06:06:42",
            "name": "Unify the PMD coding style",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/29831/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/132596/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/132596/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 0AF0142354;\n\tFri, 13 Oct 2023 08:08:25 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id DCB4740A7A;\n\tFri, 13 Oct 2023 08:07:42 +0200 (CEST)",
            "from NAM11-CO1-obe.outbound.protection.outlook.com\n (mail-co1nam11on2099.outbound.protection.outlook.com [40.107.220.99])\n by mails.dpdk.org (Postfix) with ESMTP id 46AD140A7D\n for <dev@dpdk.org>; Fri, 13 Oct 2023 08:07:41 +0200 (CEST)",
            "from SJ0PR13MB5545.namprd13.prod.outlook.com (2603:10b6:a03:424::5)\n by PH7PR13MB6296.namprd13.prod.outlook.com (2603:10b6:510:237::7)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.6863.45; Fri, 13 Oct\n 2023 06:07:37 +0000",
            "from SJ0PR13MB5545.namprd13.prod.outlook.com\n ([fe80::2d5:f7d6:ecde:c93a]) by SJ0PR13MB5545.namprd13.prod.outlook.com\n ([fe80::2d5:f7d6:ecde:c93a%4]) with mapi id 15.20.6863.032; Fri, 13 Oct 2023\n 06:07:37 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=emHhfSVkDe1U9SDc0yCUrpFvw89VrHP4DXylAA1nbZJSKaFeVKhhZrZPkFYcwF5ui5QrG5AqNlJeb+Ne30eYPb3kchaxl62gO2IC4u7yULiRgyjw8/+TUl+TRdJZ05ho1AsggOYrIRLHFxq5T/crJ6XqRWa5WQLgFfxZqH3QaSDlHLT2egQHBUDRqpSfEE5Kccy8kzzypUxd7q0Pb0s0tHID/K3Lh6eSzDT4Y/lw8bu6942iJO5y+A48uUCVBVCQhc1BLQ+wYEXA5Yn9EyZ6gE/EUlmBAsamU0AyKHawqq+M0gq3FhP963GLLTf8f07MioY4dCyNZBtyYGIIWJhOJw==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=LZ3nUd8r92OW5f42ettDfbqsSuO0aBqmp+HA8rgHIjo=;\n b=DX5/sB1WMi8r4NoCW5WnmNuHuSrvWQH2jJObBFtrM/wPp5ZNxQdCza/7BoAxB/raAlkfC011laoggilTRUW4+0EvkUeZ4xpaK61kGzeju7B7R71Bbg0ERXEFhoKjqjLwKhO2L1d8nFsRJ4/BNpOaTpMveInivCWBaxAuQc5Pqgt8F5GEfHlzB9uwy+vdc347TyVGr0O0c+waYFQivmagOxuxHshedjuTLoL+NuBad89KjDSZ5KRE+h0parfqPwsAbrizscnrSpeoNlwXxqwvUsXeszUVHY3AHpqvJScz73Qm6A6SkNQu4N3eqTvneE3z0nzBsHu5OHBC0mQYqA/znQ==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass\n smtp.mailfrom=corigine.com; dmarc=pass action=none header.from=corigine.com;\n dkim=pass header.d=corigine.com; arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n d=corigine.onmicrosoft.com; s=selector2-corigine-onmicrosoft-com;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=LZ3nUd8r92OW5f42ettDfbqsSuO0aBqmp+HA8rgHIjo=;\n b=ZJIkE6LqP32SjiuETWRZR1qz5dC791LhHgqzKjVW9HECxwqyyGaBXamIRB19uBi8HP7sK16/e9RqBCqabtPTUfRPCMNySPcIAyKGjkSTWm8F7mGmJmw2wwdwFTwMjT5UctxE9+cWkFVin3ucANAYgNWFzOd+f9Xn3itatrJNi5c=",
        "Authentication-Results": "dkim=none (message not signed)\n header.d=none;dmarc=none action=none header.from=corigine.com;",
        "From": "Chaoyong He <chaoyong.he@corigine.com>",
        "To": "dev@dpdk.org",
        "Cc": "oss-drivers@corigine.com, Chaoyong He <chaoyong.he@corigine.com>,\n Long Wu <long.wu@corigine.com>, Peng Zhang <peng.zhang@corigine.com>",
        "Subject": "[PATCH v3 06/11] net/nfp: standard the comment style",
        "Date": "Fri, 13 Oct 2023 14:06:48 +0800",
        "Message-Id": "<20231013060653.1006410-7-chaoyong.he@corigine.com>",
        "X-Mailer": "git-send-email 2.39.1",
        "In-Reply-To": "<20231013060653.1006410-1-chaoyong.he@corigine.com>",
        "References": "<20231012012704.483828-1-chaoyong.he@corigine.com>\n <20231013060653.1006410-1-chaoyong.he@corigine.com>",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-ClientProxiedBy": "SI2PR06CA0008.apcprd06.prod.outlook.com\n (2603:1096:4:186::23) To SJ0PR13MB5545.namprd13.prod.outlook.com\n (2603:10b6:a03:424::5)",
        "MIME-Version": "1.0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "SJ0PR13MB5545:EE_|PH7PR13MB6296:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "ba2f558d-d5ac-49cd-387d-08dbcbb2b2f7",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n E7mAtJgswEMZK6bHNCy/Kt00ULYVN0fg9vgqB7uJf+4l+/G5LuZVGQzGMEl3KoHyeBFiD9nW+ckdm6xKjx8hrpyhr77swKiDvdmbViouAc0Zj9hKF5Qdt7aYbxHE+ir2ubhxW1iNuN4cYds6YA3nQK6TOgwMUAmmnRZGWiMmdaYtqEc31AqWhdOhx9OHGT/+11aUi28J+1HSkYN6Mqy5i1lRLX1Jbz7FQKZ5BRaIrW74H5RSTqNhgUtM+g+noTBfR/AhAMPdi06OFAE9oJwdyDV5n1fEwRvLJlNLBa1iEWu5A2bR/qrIcKb/raumPaunkpJfRcIt3f018SJGNSBk24PtzaUyjxQJwrFJt0wUkFEmaQFY/I3iJZuXMXAacQhpBecRa6VsIL7Wc9jH80d/1/gFT0E3p5bNZTLzge4OIEcBziaWdRlt/ybcklWk4lMPAp144giK5YH9o4g6IglDnDHmTExIOKOygqCxW8wHIe7pcYeZBGvJ07BuzaYL/iOp16RVhCMevHpfZGILrGCA3suTg60xQuhKNynE/sh7nWBNRjcAf/1PywMmpXEmWPLHyAAvME/xF52A4ry1/cQLSKF9Cxr5hW6RSHvaI4uzXlUdVih38gfgnUskhh8ejRczZcf0e15fq17+Y6lkFDt8Af0MnfoHdI+wXzbxk7BZ2r4=",
        "X-Forefront-Antispam-Report": "CIP:255.255.255.255; CTRY:; LANG:en; SCL:1; SRV:;\n IPV:NLI; SFV:NSPM; H:SJ0PR13MB5545.namprd13.prod.outlook.com; PTR:; CAT:NONE;\n SFS:(13230031)(396003)(366004)(346002)(39830400003)(376002)(136003)(230922051799003)(186009)(1800799009)(64100799003)(451199024)(66899024)(30864003)(86362001)(36756003)(83380400001)(2906002)(38350700002)(38100700002)(44832011)(5660300002)(8676002)(8936002)(4326008)(6666004)(316002)(66556008)(54906003)(66946007)(66476007)(52116002)(6512007)(6916009)(6506007)(41300700001)(6486002)(107886003)(1076003)(26005)(478600001)(2616005)(559001)(579004);\n DIR:OUT; SFP:1102;",
        "X-MS-Exchange-AntiSpam-MessageData-ChunkCount": "1",
        "X-MS-Exchange-AntiSpam-MessageData-0": "\n o8vo4Fbe+xx25rwb8bKrA0U34gCixEEVvX6o4kfFCbvGa9lUmp3ezc0GClES/1p3OazvI7oyeQi5ns/U3LI6clamy3+Z/f4hl1GlsTMmX8QCy3ntEw1gJBskAGFL+fSiev+qt51kt7019fpgdBqnD/KL89L4jQQ/RVCxlwujNxfMIouvBPUeEJQDTP7MGfxv+dCPImQlaUTUDl07DL0pHT2bKYH1OdsZG9+TxteLoirBtGvQLkLxXgrPNyesu4H3D5F8JmlhuINyXo/B2uNQoEVwZpQj7GdEpqLTYqV/yWzUZKTUjA9vr1vMztwcZ3ICXRdqt4YEAG9GnkzVTCyd9uV30BrkbnQ203woChudqzREgdiNqTJChO7VryZVoK3uRYGEzI/kfFzW4gaafHQRtkPOSAUVX4JFCtscBrBHjR2j4zcqQzu7IDB92v1mMlWX8Adcg3tr+P1JlGeMqQ4h8iH6eE9W5V7ZicAoiWhl0jjxJO7ew2yZEUv7WXXtuuFMdlO/slwbjGwmTSdjoS91ZW7SQx5YIhfZ+OetyVcbG8OIgcTbczpUqO8YnvjXS2gzE4wzKdQJ+oxZgL0SNewRmiYngTZjoOSDVOs/hMi/jWBd2lAmB+inFJ0Qf2JKXHqbr+YWNO1t+j86dXLroRAMG76Wpk2rUwaXl5emAks06cobAe0Yz4BTFCYIOTrS2wF7fSaegmEoo3MIiGb7QFk5+uuQ7hbF9TH+3JSZy+l/xl4oDSXQBh50dCfZqgIVcRuL/s6T2Q+QKyC2ddS0Fh5nex8oDIe0eoov8DaWD2IiVF1PNtIKGdiWCQD/5bKtWmMZWdl/FMZnbT1Mhqpf6dm9wk6pJ/zlziJhARLL7P1X/yr83M3l2MXBuI0n51sKejrpar/NpyyS+udHKFfC5hCASm8Q7loaJomlfyPGq9Xbw0wBomBrD/0CF+jZ+5f/+vSeyebvh9fv1FdMaxRGFbkYKDONt6BRHn5FGMkZCxdvEcbtpCbFsaAhFQVFANVrXDopFUka/1gXWJ+la/PFohNRPDO2dna7AHLkQmNEMNYLaSd3ytc1tzBWP/BImlxsfAjd1VP3jz68q7+oRgdDCqzXMZ3oKZ4i1ujQMyFa7dKWFRY2zTLF15pW2MCNAXwpqgCtorZnggR2oYxwvbXRe2P3xO/eevU50+Yjfgts4yo292O297LHooUzqH53N5igY7SfjIjinIiReRR0tFNzQxZ1tOIuFNktJe9NUTZblo/4ZAq8Xz1PF54Hxc4vgyNyqV5ar8vOd13/UWgs46+V34NhOp4HudhmQVXFCg8OGA9jHyjv248SQrQ8ymYbiBqTnutnR/wW1owyBnkEePEfVP5AzoO5kzvsHx6QRQ6Hj8dx5urJRcmThmcKavQk1+TqEAAx3adzPI5QVH/dBwmX1mPbMEeHvbRpR3hrFLCRwKsFCj0lEybwZIScGjW07JIFBcavZinv9CJWmC9iiHW3v3Jm0wpMUH9c1DvNfzuWRtOzdnFJImqSwpOYH+7C/1NKZMpB+ANa3VYJitCk3G5sCa5s5hfDiE3U7EjMLGsfsYtfd9frnfIg34FM8n979b9BtpDzkenBpISOri+20FBl00UCFA==",
        "X-OriginatorOrg": "corigine.com",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n ba2f558d-d5ac-49cd-387d-08dbcbb2b2f7",
        "X-MS-Exchange-CrossTenant-AuthSource": "SJ0PR13MB5545.namprd13.prod.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Internal",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "13 Oct 2023 06:07:37.4379 (UTC)",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "Hosted",
        "X-MS-Exchange-CrossTenant-Id": "fe128f2c-073b-4c20-818e-7246a585940c",
        "X-MS-Exchange-CrossTenant-MailboxType": "HOSTED",
        "X-MS-Exchange-CrossTenant-UserPrincipalName": "\n Vi4Kd0DELSVut66EhqR6pbAHF9LifLLy7GM9dnW4PEX6WuvC15mjr2Kb7khe2G8DSeLayqDKFqFks8BUEQ17de2aYo3BTMhmxPs4dht7q0U=",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "PH7PR13MB6296",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Follow the DPDK coding style, use the kdoc comment style.\nAlso delete some comment which are not valid anymore and add some\ncomment to help understand logic.\n\nSigned-off-by: Chaoyong He <chaoyong.he@corigine.com>\nReviewed-by: Long Wu <long.wu@corigine.com>\nReviewed-by: Peng Zhang <peng.zhang@corigine.com>\n---\n drivers/net/nfp/flower/nfp_conntrack.c        |   4 +-\n drivers/net/nfp/flower/nfp_flower.c           |  10 +-\n drivers/net/nfp/flower/nfp_flower.h           |  28 ++--\n drivers/net/nfp/flower/nfp_flower_cmsg.c      |   2 +-\n drivers/net/nfp/flower/nfp_flower_cmsg.h      |  56 +++----\n drivers/net/nfp/flower/nfp_flower_ctrl.c      |  16 +-\n .../net/nfp/flower/nfp_flower_representor.c   |  42 +++--\n .../net/nfp/flower/nfp_flower_representor.h   |   2 +-\n drivers/net/nfp/nfd3/nfp_nfd3.h               |  33 ++--\n drivers/net/nfp/nfd3/nfp_nfd3_dp.c            |  24 ++-\n drivers/net/nfp/nfdk/nfp_nfdk.h               |  41 ++---\n drivers/net/nfp/nfdk/nfp_nfdk_dp.c            |   8 +-\n drivers/net/nfp/nfp_common.c                  | 152 ++++++++----------\n drivers/net/nfp/nfp_common.h                  |  61 +++----\n drivers/net/nfp/nfp_cpp_bridge.c              |   6 +-\n drivers/net/nfp/nfp_ctrl.h                    |  34 ++--\n drivers/net/nfp/nfp_ethdev.c                  |  40 +++--\n drivers/net/nfp/nfp_ethdev_vf.c               |  15 +-\n drivers/net/nfp/nfp_flow.c                    |  62 +++----\n drivers/net/nfp/nfp_flow.h                    |  10 +-\n drivers/net/nfp/nfp_ipsec.h                   |  12 +-\n drivers/net/nfp/nfp_rxtx.c                    | 125 ++++++--------\n drivers/net/nfp/nfp_rxtx.h                    |  18 +--\n 23 files changed, 354 insertions(+), 447 deletions(-)",
    "diff": "diff --git a/drivers/net/nfp/flower/nfp_conntrack.c b/drivers/net/nfp/flower/nfp_conntrack.c\nindex 7b84b12546..f89003be8b 100644\n--- a/drivers/net/nfp/flower/nfp_conntrack.c\n+++ b/drivers/net/nfp/flower/nfp_conntrack.c\n@@ -667,8 +667,8 @@ nfp_ct_flow_entry_get(struct nfp_ct_zone_entry *ze,\n {\n \tbool ret;\n \tuint8_t loop;\n-\tuint8_t item_cnt = 1;      /* the RTE_FLOW_ITEM_TYPE_END */\n-\tuint8_t action_cnt = 1;    /* the RTE_FLOW_ACTION_TYPE_END */\n+\tuint8_t item_cnt = 1;      /* The RTE_FLOW_ITEM_TYPE_END */\n+\tuint8_t action_cnt = 1;    /* The RTE_FLOW_ACTION_TYPE_END */\n \tstruct nfp_flow_priv *priv;\n \tstruct nfp_ct_map_entry *me;\n \tstruct nfp_ct_flow_entry *fe;\ndiff --git a/drivers/net/nfp/flower/nfp_flower.c b/drivers/net/nfp/flower/nfp_flower.c\nindex 7a4e671178..4453ae7b5e 100644\n--- a/drivers/net/nfp/flower/nfp_flower.c\n+++ b/drivers/net/nfp/flower/nfp_flower.c\n@@ -208,7 +208,7 @@ nfp_flower_pf_close(struct rte_eth_dev *dev)\n \t\tnfp_net_reset_rx_queue(this_rx_q);\n \t}\n \n-\t/* Cancel possible impending LSC work here before releasing the port*/\n+\t/* Cancel possible impending LSC work here before releasing the port */\n \trte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev);\n \n \tnn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);\n@@ -488,7 +488,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)\n \n \t\t/*\n \t\t * Tracking mbuf size for detecting a potential mbuf overflow due to\n-\t\t * RX offset\n+\t\t * RX offset.\n \t\t */\n \t\trxq->mem_pool = mp;\n \t\trxq->mbuf_size = rxq->mem_pool->elt_size;\n@@ -535,7 +535,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)\n \n \t\t/*\n \t\t * Telling the HW about the physical address of the RX ring and number\n-\t\t * of descriptors in log2 format\n+\t\t * of descriptors in log2 format.\n \t\t */\n \t\tnn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(i), rxq->dma);\n \t\tnn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(i), rte_log2_u32(CTRL_VNIC_NB_DESC));\n@@ -600,7 +600,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)\n \n \t\t/*\n \t\t * Telling the HW about the physical address of the TX ring and number\n-\t\t * of descriptors in log2 format\n+\t\t * of descriptors in log2 format.\n \t\t */\n \t\tnn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(i), txq->dma);\n \t\tnn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(i), rte_log2_u32(CTRL_VNIC_NB_DESC));\n@@ -758,7 +758,7 @@ nfp_flower_enable_services(struct nfp_app_fw_flower *app_fw_flower)\n \tapp_fw_flower->ctrl_vnic_id = service_id;\n \tPMD_INIT_LOG(INFO, \"%s registered\", flower_service.name);\n \n-\t/* Map them to available service cores*/\n+\t/* Map them to available service cores */\n \tret = nfp_map_service(service_id);\n \tif (ret != 0) {\n \t\tPMD_INIT_LOG(ERR, \"Could not map %s\", flower_service.name);\ndiff --git a/drivers/net/nfp/flower/nfp_flower.h b/drivers/net/nfp/flower/nfp_flower.h\nindex 244b6daa37..0b4e38cedd 100644\n--- a/drivers/net/nfp/flower/nfp_flower.h\n+++ b/drivers/net/nfp/flower/nfp_flower.h\n@@ -53,49 +53,49 @@ struct nfp_flower_nfd_func {\n \n /* The flower application's private structure */\n struct nfp_app_fw_flower {\n-\t/* switch domain for this app */\n+\t/** Switch domain for this app */\n \tuint16_t switch_domain_id;\n \n-\t/* Number of VF representors */\n+\t/** Number of VF representors */\n \tuint8_t num_vf_reprs;\n \n-\t/* Number of phyport representors */\n+\t/** Number of phyport representors */\n \tuint8_t num_phyport_reprs;\n \n-\t/* Pointer to the PF vNIC */\n+\t/** Pointer to the PF vNIC */\n \tstruct nfp_net_hw *pf_hw;\n \n-\t/* Pointer to a mempool for the ctrlvNIC */\n+\t/** Pointer to a mempool for the Ctrl vNIC */\n \tstruct rte_mempool *ctrl_pktmbuf_pool;\n \n-\t/* Pointer to the ctrl vNIC */\n+\t/** Pointer to the ctrl vNIC */\n \tstruct nfp_net_hw *ctrl_hw;\n \n-\t/* Ctrl vNIC Rx counter */\n+\t/** Ctrl vNIC Rx counter */\n \tuint64_t ctrl_vnic_rx_count;\n \n-\t/* Ctrl vNIC Tx counter */\n+\t/** Ctrl vNIC Tx counter */\n \tuint64_t ctrl_vnic_tx_count;\n \n-\t/* Array of phyport representors */\n+\t/** Array of phyport representors */\n \tstruct nfp_flower_representor *phy_reprs[MAX_FLOWER_PHYPORTS];\n \n-\t/* Array of VF representors */\n+\t/** Array of VF representors */\n \tstruct nfp_flower_representor *vf_reprs[MAX_FLOWER_VFS];\n \n-\t/* PF representor */\n+\t/** PF representor */\n \tstruct nfp_flower_representor *pf_repr;\n \n-\t/* service id of ctrl vnic service */\n+\t/** Service id of Ctrl vNIC service */\n \tuint32_t ctrl_vnic_id;\n \n-\t/* Flower extra features */\n+\t/** Flower extra features */\n \tuint64_t ext_features;\n \n \tstruct nfp_flow_priv *flow_priv;\n \tstruct nfp_mtr_priv *mtr_priv;\n \n-\t/* Function pointers for different NFD version */\n+\t/** Function pointers for different NFD version */\n \tstruct nfp_flower_nfd_func nfd_func;\n };\n \ndiff --git a/drivers/net/nfp/flower/nfp_flower_cmsg.c b/drivers/net/nfp/flower/nfp_flower_cmsg.c\nindex 5d6912b079..2ec9498d22 100644\n--- a/drivers/net/nfp/flower/nfp_flower_cmsg.c\n+++ b/drivers/net/nfp/flower/nfp_flower_cmsg.c\n@@ -230,7 +230,7 @@ nfp_flower_cmsg_flow_add(struct nfp_app_fw_flower *app_fw_flower,\n \t\treturn -ENOMEM;\n \t}\n \n-\t/* copy the flow to mbuf */\n+\t/* Copy the flow to mbuf */\n \tnfp_flow_meta = flow->payload.meta;\n \tmsg_len = (nfp_flow_meta->key_len + nfp_flow_meta->mask_len +\n \t\t\tnfp_flow_meta->act_len) << NFP_FL_LW_SIZ;\ndiff --git a/drivers/net/nfp/flower/nfp_flower_cmsg.h b/drivers/net/nfp/flower/nfp_flower_cmsg.h\nindex 9449760145..cb019171b6 100644\n--- a/drivers/net/nfp/flower/nfp_flower_cmsg.h\n+++ b/drivers/net/nfp/flower/nfp_flower_cmsg.h\n@@ -348,7 +348,7 @@ struct nfp_flower_stats_frame {\n \trte_be64_t stats_cookie;\n };\n \n-/**\n+/*\n  * See RFC 2698 for more details.\n  * Word[0](Flag options):\n  * [15] p(pps) 1 for pps, 0 for bps\n@@ -378,40 +378,24 @@ struct nfp_cfg_head {\n \trte_be32_t profile_id;\n };\n \n-/**\n- * Struct nfp_profile_conf - profile config, offload to NIC\n- * @head:        config head information\n- * @bkt_tkn_p:   token bucket peak\n- * @bkt_tkn_c:   token bucket committed\n- * @pbs:         peak burst size\n- * @cbs:         committed burst size\n- * @pir:         peak information rate\n- * @cir:         committed information rate\n- */\n+/* Profile config, offload to NIC */\n struct nfp_profile_conf {\n-\tstruct nfp_cfg_head head;\n-\trte_be32_t bkt_tkn_p;\n-\trte_be32_t bkt_tkn_c;\n-\trte_be32_t pbs;\n-\trte_be32_t cbs;\n-\trte_be32_t pir;\n-\trte_be32_t cir;\n-};\n-\n-/**\n- * Struct nfp_mtr_stats_reply - meter stats, read from firmware\n- * @head:          config head information\n- * @pass_bytes:    count of passed bytes\n- * @pass_pkts:     count of passed packets\n- * @drop_bytes:    count of dropped bytes\n- * @drop_pkts:     count of dropped packets\n- */\n+\tstruct nfp_cfg_head head;    /**< Config head information */\n+\trte_be32_t bkt_tkn_p;        /**< Token bucket peak */\n+\trte_be32_t bkt_tkn_c;        /**< Token bucket committed */\n+\trte_be32_t pbs;              /**< Peak burst size */\n+\trte_be32_t cbs;              /**< Committed burst size */\n+\trte_be32_t pir;              /**< Peak information rate */\n+\trte_be32_t cir;              /**< Committed information rate */\n+};\n+\n+/* Meter stats, read from firmware */\n struct nfp_mtr_stats_reply {\n-\tstruct nfp_cfg_head head;\n-\trte_be64_t pass_bytes;\n-\trte_be64_t pass_pkts;\n-\trte_be64_t drop_bytes;\n-\trte_be64_t drop_pkts;\n+\tstruct nfp_cfg_head head;    /**< Config head information */\n+\trte_be64_t pass_bytes;       /**< Count of passed bytes */\n+\trte_be64_t pass_pkts;        /**< Count of passed packets */\n+\trte_be64_t drop_bytes;       /**< Count of dropped bytes */\n+\trte_be64_t drop_pkts;        /**< Count of dropped packets */\n };\n \n enum nfp_flower_cmsg_port_type {\n@@ -851,7 +835,7 @@ struct nfp_fl_act_set_ipv6_addr {\n };\n \n /*\n- * ipv6 tc hl fl\n+ * Ipv6 tc hl fl\n  *    3                   2                   1\n  *  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0\n  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n@@ -954,9 +938,9 @@ struct nfp_fl_act_set_tun {\n \tuint8_t    tos;\n \trte_be16_t outer_vlan_tpid;\n \trte_be16_t outer_vlan_tci;\n-\tuint8_t    tun_len;      /* Only valid for NFP_FL_TUNNEL_GENEVE */\n+\tuint8_t    tun_len;      /**< Only valid for NFP_FL_TUNNEL_GENEVE */\n \tuint8_t    reserved2;\n-\trte_be16_t tun_proto;    /* Only valid for NFP_FL_TUNNEL_GENEVE */\n+\trte_be16_t tun_proto;    /**< Only valid for NFP_FL_TUNNEL_GENEVE */\n } __rte_packed;\n \n /*\ndiff --git a/drivers/net/nfp/flower/nfp_flower_ctrl.c b/drivers/net/nfp/flower/nfp_flower_ctrl.c\nindex d1c350ae93..b4be28ccdf 100644\n--- a/drivers/net/nfp/flower/nfp_flower_ctrl.c\n+++ b/drivers/net/nfp/flower/nfp_flower_ctrl.c\n@@ -34,7 +34,7 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue,\n \tif (unlikely(rxq == NULL)) {\n \t\t/*\n \t\t * DPDK just checks the queue is lower than max queues\n-\t\t * enabled. But the queue needs to be configured\n+\t\t * enabled. But the queue needs to be configured.\n \t\t */\n \t\tPMD_RX_LOG(ERR, \"RX Bad queue\");\n \t\treturn 0;\n@@ -60,7 +60,7 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue,\n \n \t\t/*\n \t\t * We got a packet. Let's alloc a new mbuf for refilling the\n-\t\t * free descriptor ring as soon as possible\n+\t\t * free descriptor ring as soon as possible.\n \t\t */\n \t\tnew_mb = rte_pktmbuf_alloc(rxq->mem_pool);\n \t\tif (unlikely(new_mb == NULL)) {\n@@ -72,7 +72,7 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue,\n \n \t\t/*\n \t\t * Grab the mbuf and refill the descriptor with the\n-\t\t * previously allocated mbuf\n+\t\t * previously allocated mbuf.\n \t\t */\n \t\tmb = rxb->mbuf;\n \t\trxb->mbuf = new_mb;\n@@ -86,7 +86,7 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue,\n \t\t\t/*\n \t\t\t * This should not happen and the user has the\n \t\t\t * responsibility of avoiding it. But we have\n-\t\t\t * to give some info about the error\n+\t\t\t * to give some info about the error.\n \t\t\t */\n \t\t\tPMD_RX_LOG(ERR, \"mbuf overflow likely due to the RX offset.\");\n \t\t\trte_pktmbuf_free(mb);\n@@ -116,7 +116,7 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue,\n \t\tnb_hold++;\n \n \t\trxq->rd_p++;\n-\t\tif (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/\n+\t\tif (unlikely(rxq->rd_p == rxq->rx_count)) /* Wrapping */\n \t\t\trxq->rd_p = 0;\n \t}\n \n@@ -163,7 +163,7 @@ nfp_flower_ctrl_vnic_nfd3_xmit(struct nfp_app_fw_flower *app_fw_flower,\n \tif (unlikely(txq == NULL)) {\n \t\t/*\n \t\t * DPDK just checks the queue is lower than max queues\n-\t\t * enabled. But the queue needs to be configured\n+\t\t * enabled. But the queue needs to be configured.\n \t\t */\n \t\tPMD_TX_LOG(ERR, \"ctrl dev TX Bad queue\");\n \t\tgoto xmit_end;\n@@ -199,7 +199,7 @@ nfp_flower_ctrl_vnic_nfd3_xmit(struct nfp_app_fw_flower *app_fw_flower,\n \ttxds->offset_eop = FLOWER_PKT_DATA_OFFSET | NFD3_DESC_TX_EOP;\n \n \ttxq->wr_p++;\n-\tif (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/\n+\tif (unlikely(txq->wr_p == txq->tx_count)) /* Wrapping */\n \t\ttxq->wr_p = 0;\n \n \tcnt++;\n@@ -513,7 +513,7 @@ nfp_flower_ctrl_vnic_poll(struct nfp_app_fw_flower *app_fw_flower)\n \tctrl_hw = app_fw_flower->ctrl_hw;\n \tctrl_eth_dev = ctrl_hw->eth_dev;\n \n-\t/* ctrl vNIC only has a single Rx queue */\n+\t/* Ctrl vNIC only has a single Rx queue */\n \trxq = ctrl_eth_dev->data->rx_queues[0];\n \n \twhile (rte_service_runstate_get(app_fw_flower->ctrl_vnic_id) != 0) {\ndiff --git a/drivers/net/nfp/flower/nfp_flower_representor.c b/drivers/net/nfp/flower/nfp_flower_representor.c\nindex bf794a1d70..90f8ccba71 100644\n--- a/drivers/net/nfp/flower/nfp_flower_representor.c\n+++ b/drivers/net/nfp/flower/nfp_flower_representor.c\n@@ -10,18 +10,12 @@\n #include \"../nfp_logs.h\"\n #include \"../nfp_mtr.h\"\n \n-/*\n- * enum nfp_repr_type - type of representor\n- * @NFP_REPR_TYPE_PHYS_PORT:   external NIC port\n- * @NFP_REPR_TYPE_PF:          physical function\n- * @NFP_REPR_TYPE_VF:          virtual function\n- * @NFP_REPR_TYPE_MAX:         number of representor types\n- */\n+/* Type of representor */\n enum nfp_repr_type {\n-\tNFP_REPR_TYPE_PHYS_PORT,\n-\tNFP_REPR_TYPE_PF,\n-\tNFP_REPR_TYPE_VF,\n-\tNFP_REPR_TYPE_MAX,\n+\tNFP_REPR_TYPE_PHYS_PORT,    /*<< External NIC port */\n+\tNFP_REPR_TYPE_PF,           /*<< Physical function */\n+\tNFP_REPR_TYPE_VF,           /*<< Virtual function */\n+\tNFP_REPR_TYPE_MAX,          /*<< Number of representor types */\n };\n \n static int\n@@ -55,7 +49,7 @@ nfp_pf_repr_rx_queue_setup(struct rte_eth_dev *dev,\n \n \t/*\n \t * Tracking mbuf size for detecting a potential mbuf overflow due to\n-\t * RX offset\n+\t * RX offset.\n \t */\n \trxq->mem_pool = mp;\n \trxq->mbuf_size = rxq->mem_pool->elt_size;\n@@ -86,7 +80,7 @@ nfp_pf_repr_rx_queue_setup(struct rte_eth_dev *dev,\n \trxq->dma = (uint64_t)tz->iova;\n \trxq->rxds = tz->addr;\n \n-\t/* mbuf pointers array for referencing mbufs linked to RX descriptors */\n+\t/* Mbuf pointers array for referencing mbufs linked to RX descriptors */\n \trxq->rxbufs = rte_zmalloc_socket(\"rxq->rxbufs\",\n \t\t\tsizeof(*rxq->rxbufs) * nb_desc,\n \t\t\tRTE_CACHE_LINE_SIZE, socket_id);\n@@ -101,7 +95,7 @@ nfp_pf_repr_rx_queue_setup(struct rte_eth_dev *dev,\n \n \t/*\n \t * Telling the HW about the physical address of the RX ring and number\n-\t * of descriptors in log2 format\n+\t * of descriptors in log2 format.\n \t */\n \tnn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);\n \tnn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));\n@@ -159,7 +153,7 @@ nfp_pf_repr_tx_queue_setup(struct rte_eth_dev *dev,\n \ttxq->tx_count = nb_desc;\n \ttxq->tx_free_thresh = tx_free_thresh;\n \n-\t/* queue mapping based on firmware configuration */\n+\t/* Queue mapping based on firmware configuration */\n \ttxq->qidx = queue_idx;\n \ttxq->tx_qcidx = queue_idx * hw->stride_tx;\n \ttxq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);\n@@ -170,7 +164,7 @@ nfp_pf_repr_tx_queue_setup(struct rte_eth_dev *dev,\n \ttxq->dma = (uint64_t)tz->iova;\n \ttxq->txds = tz->addr;\n \n-\t/* mbuf pointers array for referencing mbufs linked to TX descriptors */\n+\t/* Mbuf pointers array for referencing mbufs linked to TX descriptors */\n \ttxq->txbufs = rte_zmalloc_socket(\"txq->txbufs\",\n \t\t\tsizeof(*txq->txbufs) * nb_desc,\n \t\t\tRTE_CACHE_LINE_SIZE, socket_id);\n@@ -185,7 +179,7 @@ nfp_pf_repr_tx_queue_setup(struct rte_eth_dev *dev,\n \n \t/*\n \t * Telling the HW about the physical address of the TX ring and number\n-\t * of descriptors in log2 format\n+\t * of descriptors in log2 format.\n \t */\n \tnn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);\n \tnn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc));\n@@ -603,7 +597,7 @@ nfp_flower_pf_repr_init(struct rte_eth_dev *eth_dev,\n \t/* Memory has been allocated in the eth_dev_create() function */\n \trepr = eth_dev->data->dev_private;\n \n-\t/* Copy data here from the input representor template*/\n+\t/* Copy data here from the input representor template */\n \trepr->vf_id            = init_repr_data->vf_id;\n \trepr->switch_domain_id = init_repr_data->switch_domain_id;\n \trepr->repr_type        = init_repr_data->repr_type;\n@@ -673,7 +667,7 @@ nfp_flower_repr_init(struct rte_eth_dev *eth_dev,\n \t\treturn -ENOMEM;\n \t}\n \n-\t/* Copy data here from the input representor template*/\n+\t/* Copy data here from the input representor template */\n \trepr->vf_id            = init_repr_data->vf_id;\n \trepr->switch_domain_id = init_repr_data->switch_domain_id;\n \trepr->port_id          = init_repr_data->port_id;\n@@ -756,7 +750,7 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower)\n \tnfp_eth_table = app_fw_flower->pf_hw->pf_dev->nfp_eth_table;\n \teth_dev = app_fw_flower->ctrl_hw->eth_dev;\n \n-\t/* Send a NFP_FLOWER_CMSG_TYPE_MAC_REPR cmsg to hardware*/\n+\t/* Send a NFP_FLOWER_CMSG_TYPE_MAC_REPR cmsg to hardware */\n \tret = nfp_flower_cmsg_mac_repr(app_fw_flower);\n \tif (ret != 0) {\n \t\tPMD_INIT_LOG(ERR, \"Cloud not send mac repr cmsgs\");\n@@ -799,8 +793,8 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower)\n \t\t\t\t\"%s_repr_p%d\", pci_name, i);\n \n \t\t/*\n-\t\t * Create a eth_dev for this representor\n-\t\t * This will also allocate private memory for the device\n+\t\t * Create a eth_dev for this representor.\n+\t\t * This will also allocate private memory for the device.\n \t\t */\n \t\tret = rte_eth_dev_create(eth_dev->device, flower_repr.name,\n \t\t\t\tsizeof(struct nfp_flower_representor),\n@@ -816,7 +810,7 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower)\n \n \t/*\n \t * Now allocate eth_dev's for VF representors.\n-\t * Also send reify messages\n+\t * Also send reify messages.\n \t */\n \tfor (i = 0; i < app_fw_flower->num_vf_reprs; i++) {\n \t\tflower_repr.repr_type = NFP_REPR_TYPE_VF;\n@@ -830,7 +824,7 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower)\n \t\tsnprintf(flower_repr.name, sizeof(flower_repr.name),\n \t\t\t\t\"%s_repr_vf%d\", pci_name, i);\n \n-\t\t/* This will also allocate private memory for the device*/\n+\t\t/* This will also allocate private memory for the device */\n \t\tret = rte_eth_dev_create(eth_dev->device, flower_repr.name,\n \t\t\t\tsizeof(struct nfp_flower_representor),\n \t\t\t\tNULL, NULL, nfp_flower_repr_init, &flower_repr);\ndiff --git a/drivers/net/nfp/flower/nfp_flower_representor.h b/drivers/net/nfp/flower/nfp_flower_representor.h\nindex 5ac5e38186..eda19cbb16 100644\n--- a/drivers/net/nfp/flower/nfp_flower_representor.h\n+++ b/drivers/net/nfp/flower/nfp_flower_representor.h\n@@ -13,7 +13,7 @@ struct nfp_flower_representor {\n \tuint16_t switch_domain_id;\n \tuint32_t repr_type;\n \tuint32_t port_id;\n-\tuint32_t nfp_idx;    /* only valid for the repr of physical port */\n+\tuint32_t nfp_idx;    /**< Only valid for the repr of physical port */\n \tchar name[RTE_ETH_NAME_MAX_LEN];\n \tstruct rte_ether_addr mac_addr;\n \tstruct nfp_app_fw_flower *app_fw_flower;\ndiff --git a/drivers/net/nfp/nfd3/nfp_nfd3.h b/drivers/net/nfp/nfd3/nfp_nfd3.h\nindex 7c56ca4908..0b0ca361f4 100644\n--- a/drivers/net/nfp/nfd3/nfp_nfd3.h\n+++ b/drivers/net/nfp/nfd3/nfp_nfd3.h\n@@ -17,24 +17,24 @@\n struct nfp_net_nfd3_tx_desc {\n \tunion {\n \t\tstruct {\n-\t\t\tuint8_t dma_addr_hi; /* High bits of host buf address */\n-\t\t\tuint16_t dma_len;    /* Length to DMA for this desc */\n-\t\t\t/* Offset in buf where pkt starts + highest bit is eop flag */\n+\t\t\tuint8_t dma_addr_hi; /**< High bits of host buf address */\n+\t\t\tuint16_t dma_len;    /**< Length to DMA for this desc */\n+\t\t\t/** Offset in buf where pkt starts + highest bit is eop flag */\n \t\t\tuint8_t offset_eop;\n-\t\t\tuint32_t dma_addr_lo; /* Low 32bit of host buf addr */\n+\t\t\tuint32_t dma_addr_lo; /**< Low 32bit of host buf addr */\n \n-\t\t\tuint16_t mss;         /* MSS to be used for LSO */\n-\t\t\tuint8_t lso_hdrlen;   /* LSO, where the data starts */\n-\t\t\tuint8_t flags;        /* TX Flags, see @NFD3_DESC_TX_* */\n+\t\t\tuint16_t mss;         /**< MSS to be used for LSO */\n+\t\t\tuint8_t lso_hdrlen;   /**< LSO, where the data starts */\n+\t\t\tuint8_t flags;        /**< TX Flags, see @NFD3_DESC_TX_* */\n \n \t\t\tunion {\n \t\t\t\tstruct {\n-\t\t\t\t\tuint8_t l3_offset; /* L3 header offset */\n-\t\t\t\t\tuint8_t l4_offset; /* L4 header offset */\n+\t\t\t\t\tuint8_t l3_offset; /**< L3 header offset */\n+\t\t\t\t\tuint8_t l4_offset; /**< L4 header offset */\n \t\t\t\t};\n-\t\t\t\tuint16_t vlan; /* VLAN tag to add if indicated */\n+\t\t\t\tuint16_t vlan; /**< VLAN tag to add if indicated */\n \t\t\t};\n-\t\t\tuint16_t data_len;     /* Length of frame + meta data */\n+\t\t\tuint16_t data_len;     /**< Length of frame + meta data */\n \t\t} __rte_packed;\n \t\tuint32_t vals[4];\n \t};\n@@ -54,13 +54,14 @@ nfp_net_nfd3_free_tx_desc(struct nfp_net_txq *txq)\n \treturn (free_desc > 8) ? (free_desc - 8) : 0;\n }\n \n-/*\n- * nfp_net_nfd3_txq_full() - Check if the TX queue free descriptors\n- * is below tx_free_threshold for firmware of nfd3\n- *\n- * @txq: TX queue to check\n+/**\n+ * Check if the TX queue free descriptors is below tx_free_threshold\n+ * for firmware with nfd3\n  *\n  * This function uses the host copy* of read/write pointers.\n+ *\n+ * @param txq\n+ *   TX queue to check\n  */\n static inline bool\n nfp_net_nfd3_txq_full(struct nfp_net_txq *txq)\ndiff --git a/drivers/net/nfp/nfd3/nfp_nfd3_dp.c b/drivers/net/nfp/nfd3/nfp_nfd3_dp.c\nindex 51755f4324..4df2c5d4d2 100644\n--- a/drivers/net/nfp/nfd3/nfp_nfd3_dp.c\n+++ b/drivers/net/nfp/nfd3/nfp_nfd3_dp.c\n@@ -113,14 +113,12 @@ nfp_flower_nfd3_pkt_add_metadata(struct rte_mbuf *mbuf,\n }\n \n /*\n- * nfp_net_nfd3_tx_vlan() - Set vlan info in the nfd3 tx desc\n+ * Set vlan info in the nfd3 tx desc\n  *\n  * If enable NFP_NET_CFG_CTRL_TXVLAN_V2\n- *\tVlan_info is stored in the meta and\n- *\tis handled in the nfp_net_nfd3_set_meta_vlan()\n+ *   Vlan_info is stored in the meta and is handled in the @nfp_net_nfd3_set_meta_vlan()\n  * else if enable NFP_NET_CFG_CTRL_TXVLAN\n- *\tVlan_info is stored in the tx_desc and\n- *\tis handled in the nfp_net_nfd3_tx_vlan()\n+ *   Vlan_info is stored in the tx_desc and is handled in the @nfp_net_nfd3_tx_vlan()\n  */\n static inline void\n nfp_net_nfd3_tx_vlan(struct nfp_net_txq *txq,\n@@ -299,9 +297,9 @@ nfp_net_nfd3_xmit_pkts_common(void *tx_queue,\n \t\tnfp_net_nfd3_tx_vlan(txq, &txd, pkt);\n \n \t\t/*\n-\t\t * mbuf data_len is the data in one segment and pkt_len data\n+\t\t * Mbuf data_len is the data in one segment and pkt_len data\n \t\t * in the whole packet. When the packet is just one segment,\n-\t\t * then data_len = pkt_len\n+\t\t * then data_len = pkt_len.\n \t\t */\n \t\tpkt_size = pkt->pkt_len;\n \n@@ -315,7 +313,7 @@ nfp_net_nfd3_xmit_pkts_common(void *tx_queue,\n \n \t\t\t/*\n \t\t\t * Linking mbuf with descriptor for being released\n-\t\t\t * next time descriptor is used\n+\t\t\t * next time descriptor is used.\n \t\t\t */\n \t\t\t*lmbuf = pkt;\n \n@@ -330,14 +328,14 @@ nfp_net_nfd3_xmit_pkts_common(void *tx_queue,\n \t\t\tfree_descs--;\n \n \t\t\ttxq->wr_p++;\n-\t\t\tif (unlikely(txq->wr_p == txq->tx_count)) /* wrapping */\n+\t\t\tif (unlikely(txq->wr_p == txq->tx_count)) /* Wrapping */\n \t\t\t\ttxq->wr_p = 0;\n \n \t\t\tpkt_size -= dma_size;\n \n \t\t\t/*\n \t\t\t * Making the EOP, packets with just one segment\n-\t\t\t * the priority\n+\t\t\t * the priority.\n \t\t\t */\n \t\t\tif (likely(pkt_size == 0))\n \t\t\t\ttxds->offset_eop = NFD3_DESC_TX_EOP;\n@@ -439,7 +437,7 @@ nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev,\n \ttxq->tx_count = nb_desc * NFD3_TX_DESC_PER_PKT;\n \ttxq->tx_free_thresh = tx_free_thresh;\n \n-\t/* queue mapping based on firmware configuration */\n+\t/* Queue mapping based on firmware configuration */\n \ttxq->qidx = queue_idx;\n \ttxq->tx_qcidx = queue_idx * hw->stride_tx;\n \ttxq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);\n@@ -449,7 +447,7 @@ nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev,\n \ttxq->dma = tz->iova;\n \ttxq->txds = tz->addr;\n \n-\t/* mbuf pointers array for referencing mbufs linked to TX descriptors */\n+\t/* Mbuf pointers array for referencing mbufs linked to TX descriptors */\n \ttxq->txbufs = rte_zmalloc_socket(\"txq->txbufs\",\n \t\t\tsizeof(*txq->txbufs) * txq->tx_count,\n \t\t\tRTE_CACHE_LINE_SIZE, socket_id);\n@@ -465,7 +463,7 @@ nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev,\n \n \t/*\n \t * Telling the HW about the physical address of the TX ring and number\n-\t * of descriptors in log2 format\n+\t * of descriptors in log2 format.\n \t */\n \tnn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);\n \tnn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(txq->tx_count));\ndiff --git a/drivers/net/nfp/nfdk/nfp_nfdk.h b/drivers/net/nfp/nfdk/nfp_nfdk.h\nindex 99675b6bd7..04bd3c7600 100644\n--- a/drivers/net/nfp/nfdk/nfp_nfdk.h\n+++ b/drivers/net/nfp/nfdk/nfp_nfdk.h\n@@ -75,7 +75,7 @@\n  * dma_addr_hi - bits [47:32] of host memory address\n  * dma_addr_lo - bits [31:0] of host memory address\n  *\n- * --> metadata descriptor\n+ * --> Metadata descriptor\n  * Bit     3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0\n  * -----\\  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0\n  * Word   +-------+-----------------------+---------------------+---+-----+\n@@ -104,27 +104,27 @@\n  */\n struct nfp_net_nfdk_tx_desc {\n \tunion {\n-\t\t/* Address descriptor */\n+\t\t/** Address descriptor */\n \t\tstruct {\n-\t\t\tuint16_t dma_addr_hi;  /* High bits of host buf address */\n-\t\t\tuint16_t dma_len_type; /* Length to DMA for this desc */\n-\t\t\tuint32_t dma_addr_lo;  /* Low 32bit of host buf addr */\n+\t\t\tuint16_t dma_addr_hi;  /**< High bits of host buf address */\n+\t\t\tuint16_t dma_len_type; /**< Length to DMA for this desc */\n+\t\t\tuint32_t dma_addr_lo;  /**< Low 32bit of host buf addr */\n \t\t};\n \n-\t\t/* TSO descriptor */\n+\t\t/** TSO descriptor */\n \t\tstruct {\n-\t\t\tuint16_t mss;          /* MSS to be used for LSO */\n-\t\t\tuint8_t lso_hdrlen;    /* LSO, TCP payload offset */\n-\t\t\tuint8_t lso_totsegs;   /* LSO, total segments */\n-\t\t\tuint8_t l3_offset;     /* L3 header offset */\n-\t\t\tuint8_t l4_offset;     /* L4 header offset */\n-\t\t\tuint16_t lso_meta_res; /* Rsvd bits in TSO metadata */\n+\t\t\tuint16_t mss;          /**< MSS to be used for LSO */\n+\t\t\tuint8_t lso_hdrlen;    /**< LSO, TCP payload offset */\n+\t\t\tuint8_t lso_totsegs;   /**< LSO, total segments */\n+\t\t\tuint8_t l3_offset;     /**< L3 header offset */\n+\t\t\tuint8_t l4_offset;     /**< L4 header offset */\n+\t\t\tuint16_t lso_meta_res; /**< Rsvd bits in TSO metadata */\n \t\t};\n \n-\t\t/* Metadata descriptor */\n+\t\t/** Metadata descriptor */\n \t\tstruct {\n-\t\t\tuint8_t flags;         /* TX Flags, see @NFDK_DESC_TX_* */\n-\t\t\tuint8_t reserved[7];   /* meta byte placeholder */\n+\t\t\tuint8_t flags;         /**< TX Flags, see @NFDK_DESC_TX_* */\n+\t\t\tuint8_t reserved[7];   /**< Meta byte place holder */\n \t\t};\n \n \t\tuint32_t vals[2];\n@@ -146,13 +146,14 @@ nfp_net_nfdk_free_tx_desc(struct nfp_net_txq *txq)\n \t\t\t(free_desc - NFDK_TX_DESC_STOP_CNT) : 0;\n }\n \n-/*\n- * nfp_net_nfdk_txq_full() - Check if the TX queue free descriptors\n- * is below tx_free_threshold for firmware of nfdk\n- *\n- * @txq: TX queue to check\n+/**\n+ * Check if the TX queue free descriptors is below tx_free_threshold\n+ * for firmware of nfdk\n  *\n  * This function uses the host copy* of read/write pointers.\n+ *\n+ * @param txq\n+ *   TX queue to check\n  */\n static inline bool\n nfp_net_nfdk_txq_full(struct nfp_net_txq *txq)\ndiff --git a/drivers/net/nfp/nfdk/nfp_nfdk_dp.c b/drivers/net/nfp/nfdk/nfp_nfdk_dp.c\nindex dae87ac6df..1289ba1d65 100644\n--- a/drivers/net/nfp/nfdk/nfp_nfdk_dp.c\n+++ b/drivers/net/nfp/nfdk/nfp_nfdk_dp.c\n@@ -478,7 +478,7 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev,\n \n \t/*\n \t * Free memory prior to re-allocation if needed. This is the case after\n-\t * calling nfp_net_stop\n+\t * calling nfp_net_stop().\n \t */\n \tif (dev->data->tx_queues[queue_idx] != NULL) {\n \t\tPMD_TX_LOG(DEBUG, \"Freeing memory prior to re-allocation %d\",\n@@ -513,7 +513,7 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev,\n \ttxq->tx_count = nb_desc * NFDK_TX_DESC_PER_SIMPLE_PKT;\n \ttxq->tx_free_thresh = tx_free_thresh;\n \n-\t/* queue mapping based on firmware configuration */\n+\t/* Queue mapping based on firmware configuration */\n \ttxq->qidx = queue_idx;\n \ttxq->tx_qcidx = queue_idx * hw->stride_tx;\n \ttxq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);\n@@ -523,7 +523,7 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev,\n \ttxq->dma = tz->iova;\n \ttxq->ktxds = tz->addr;\n \n-\t/* mbuf pointers array for referencing mbufs linked to TX descriptors */\n+\t/* Mbuf pointers array for referencing mbufs linked to TX descriptors */\n \ttxq->txbufs = rte_zmalloc_socket(\"txq->txbufs\",\n \t\t\tsizeof(*txq->txbufs) * txq->tx_count,\n \t\t\tRTE_CACHE_LINE_SIZE, socket_id);\n@@ -539,7 +539,7 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev,\n \n \t/*\n \t * Telling the HW about the physical address of the TX ring and number\n-\t * of descriptors in log2 format\n+\t * of descriptors in log2 format.\n \t */\n \tnn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);\n \tnn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(txq->tx_count));\ndiff --git a/drivers/net/nfp/nfp_common.c b/drivers/net/nfp/nfp_common.c\nindex f48e1930dc..130f004b4d 100644\n--- a/drivers/net/nfp/nfp_common.c\n+++ b/drivers/net/nfp/nfp_common.c\n@@ -55,7 +55,7 @@ struct nfp_xstat {\n }\n \n static const struct nfp_xstat nfp_net_xstats[] = {\n-\t/**\n+\t/*\n \t * Basic xstats available on both VF and PF.\n \t * Note that in case new statistics of group NFP_XSTAT_GROUP_NET\n \t * are added to this array, they must appear before any statistics\n@@ -80,7 +80,7 @@ static const struct nfp_xstat nfp_net_xstats[] = {\n \tNFP_XSTAT_NET(\"bpf_app2_bytes\", APP2_BYTES),\n \tNFP_XSTAT_NET(\"bpf_app3_pkts\", APP3_FRAMES),\n \tNFP_XSTAT_NET(\"bpf_app3_bytes\", APP3_BYTES),\n-\t/**\n+\t/*\n \t * MAC xstats available only on PF. These statistics are not available for VFs as the\n \t * PF is not initialized when the VF is initialized as it is still bound to the kernel\n \t * driver. As such, the PMD cannot obtain a CPP handle and access the rtsym_table in order\n@@ -175,7 +175,7 @@ static void\n nfp_net_notify_port_speed(struct nfp_net_hw *hw,\n \t\tstruct rte_eth_link *link)\n {\n-\t/**\n+\t/*\n \t * Read the link status from NFP_NET_CFG_STS. If the link is down\n \t * then write the link speed NFP_NET_CFG_STS_LINK_RATE_UNKNOWN to\n \t * NFP_NET_CFG_STS_NSP_LINK_RATE.\n@@ -184,7 +184,7 @@ nfp_net_notify_port_speed(struct nfp_net_hw *hw,\n \t\tnn_cfg_writew(hw, NFP_NET_CFG_STS_NSP_LINK_RATE, NFP_NET_CFG_STS_LINK_RATE_UNKNOWN);\n \t\treturn;\n \t}\n-\t/**\n+\t/*\n \t * Link is up so write the link speed from the eth_table to\n \t * NFP_NET_CFG_STS_NSP_LINK_RATE.\n \t */\n@@ -214,7 +214,7 @@ __nfp_net_reconfig(struct nfp_net_hw *hw,\n \tnfp_qcp_ptr_add(hw->qcp_cfg, NFP_QCP_WRITE_PTR, 1);\n \n \twait.tv_sec = 0;\n-\twait.tv_nsec = 1000000;\n+\twait.tv_nsec = 1000000; /* 1ms */\n \n \tPMD_DRV_LOG(DEBUG, \"Polling for update ack...\");\n \n@@ -253,7 +253,7 @@ __nfp_net_reconfig(struct nfp_net_hw *hw,\n  *\n  * @return\n  *   - (0) if OK to reconfigure the device.\n- *   - (EIO) if I/O err and fail to reconfigure the device.\n+ *   - (-EIO) if I/O err and fail to reconfigure the device.\n  */\n int\n nfp_net_reconfig(struct nfp_net_hw *hw,\n@@ -297,7 +297,7 @@ nfp_net_reconfig(struct nfp_net_hw *hw,\n  *\n  * @return\n  *   - (0) if OK to reconfigure the device.\n- *   - (EIO) if I/O err and fail to reconfigure the device.\n+ *   - (-EIO) if I/O err and fail to reconfigure the device.\n  */\n int\n nfp_net_ext_reconfig(struct nfp_net_hw *hw,\n@@ -368,9 +368,15 @@ nfp_net_mbox_reconfig(struct nfp_net_hw *hw,\n }\n \n /*\n- * Configure an Ethernet device. This function must be invoked first\n- * before any other function in the Ethernet API. This function can\n- * also be re-invoked when a device is in the stopped state.\n+ * Configure an Ethernet device.\n+ *\n+ * This function must be invoked first before any other function in the Ethernet API.\n+ * This function can also be re-invoked when a device is in the stopped state.\n+ *\n+ * A DPDK app sends info about how many queues to use and how  those queues\n+ * need to be configured. This is used by the DPDK core and it makes sure no\n+ * more queues than those advertised by the driver are requested.\n+ * This function is called after that internal process.\n  */\n int\n nfp_net_configure(struct rte_eth_dev *dev)\n@@ -382,14 +388,6 @@ nfp_net_configure(struct rte_eth_dev *dev)\n \n \thw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n \n-\t/*\n-\t * A DPDK app sends info about how many queues to use and how\n-\t * those queues need to be configured. This is used by the\n-\t * DPDK core and it makes sure no more queues than those\n-\t * advertised by the driver are requested. This function is\n-\t * called after that internal process\n-\t */\n-\n \tdev_conf = &dev->data->dev_conf;\n \trxmode = &dev_conf->rxmode;\n \ttxmode = &dev_conf->txmode;\n@@ -557,12 +555,12 @@ nfp_net_set_mac_addr(struct rte_eth_dev *dev,\n \t/* Writing new MAC to the specific port BAR address */\n \tnfp_net_write_mac(hw, (uint8_t *)mac_addr);\n \n-\t/* Signal the NIC about the change */\n \tupdate = NFP_NET_CFG_UPDATE_MACADDR;\n \tctrl = hw->ctrl;\n \tif ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) != 0 &&\n \t\t\t(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) != 0)\n \t\tctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR;\n+\t/* Signal the NIC about the change */\n \tif (nfp_net_reconfig(hw, ctrl, update) != 0) {\n \t\tPMD_DRV_LOG(ERR, \"MAC address update failed\");\n \t\treturn -EIO;\n@@ -588,7 +586,7 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,\n \n \tif (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {\n \t\tPMD_DRV_LOG(INFO, \"VF: enabling RX interrupt with UIO\");\n-\t\t/* UIO just supports one queue and no LSC*/\n+\t\t/* UIO just supports one queue and no LSC */\n \t\tnn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);\n \t\tif (rte_intr_vec_list_index_set(intr_handle, 0, 0) != 0)\n \t\t\treturn -1;\n@@ -597,8 +595,8 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,\n \t\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n \t\t\t/*\n \t\t\t * The first msix vector is reserved for non\n-\t\t\t * efd interrupts\n-\t\t\t*/\n+\t\t\t * efd interrupts.\n+\t\t\t */\n \t\t\tnn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);\n \t\t\tif (rte_intr_vec_list_index_set(intr_handle, i, i + 1) != 0)\n \t\t\t\treturn -1;\n@@ -706,10 +704,6 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev)\n \tnew_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC;\n \tupdate = NFP_NET_CFG_UPDATE_GEN;\n \n-\t/*\n-\t * DPDK sets promiscuous mode on just after this call assuming\n-\t * it can not fail ...\n-\t */\n \tret = nfp_net_reconfig(hw, new_ctrl, update);\n \tif (ret != 0)\n \t\treturn ret;\n@@ -737,10 +731,6 @@ nfp_net_promisc_disable(struct rte_eth_dev *dev)\n \tnew_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC;\n \tupdate = NFP_NET_CFG_UPDATE_GEN;\n \n-\t/*\n-\t * DPDK sets promiscuous mode off just before this call\n-\t * assuming it can not fail ...\n-\t */\n \tret = nfp_net_reconfig(hw, new_ctrl, update);\n \tif (ret != 0)\n \t\treturn ret;\n@@ -751,7 +741,7 @@ nfp_net_promisc_disable(struct rte_eth_dev *dev)\n }\n \n /*\n- * return 0 means link status changed, -1 means not changed\n+ * Return 0 means link status changed, -1 means not changed\n  *\n  * Wait to complete is needed as it can take up to 9 seconds to get the Link\n  * status.\n@@ -793,7 +783,7 @@ nfp_net_link_update(struct rte_eth_dev *dev,\n \t\t\t\t}\n \t\t\t}\n \t\t} else {\n-\t\t\t/**\n+\t\t\t/*\n \t\t\t * Shift and mask nn_link_status so that it is effectively the value\n \t\t\t * at offset NFP_NET_CFG_STS_NSP_LINK_RATE.\n \t\t\t */\n@@ -812,7 +802,7 @@ nfp_net_link_update(struct rte_eth_dev *dev,\n \t\t\tPMD_DRV_LOG(INFO, \"NIC Link is Down\");\n \t}\n \n-\t/**\n+\t/*\n \t * Notify the port to update the speed value in the CTRL BAR from NSP.\n \t * Not applicable for VFs as the associated PF is still attached to the\n \t * kernel driver.\n@@ -833,11 +823,9 @@ nfp_net_stats_get(struct rte_eth_dev *dev,\n \n \thw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n \n-\t/* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */\n-\n \tmemset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));\n \n-\t/* reading per RX ring stats */\n+\t/* Reading per RX ring stats */\n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n \t\tif (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)\n \t\t\tbreak;\n@@ -855,7 +843,7 @@ nfp_net_stats_get(struct rte_eth_dev *dev,\n \t\t\t\thw->eth_stats_base.q_ibytes[i];\n \t}\n \n-\t/* reading per TX ring stats */\n+\t/* Reading per TX ring stats */\n \tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n \t\tif (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)\n \t\t\tbreak;\n@@ -889,7 +877,7 @@ nfp_net_stats_get(struct rte_eth_dev *dev,\n \n \tnfp_dev_stats.obytes -= hw->eth_stats_base.obytes;\n \n-\t/* reading general device stats */\n+\t/* Reading general device stats */\n \tnfp_dev_stats.ierrors =\n \t\t\tnn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);\n \n@@ -915,6 +903,10 @@ nfp_net_stats_get(struct rte_eth_dev *dev,\n \treturn -EINVAL;\n }\n \n+/*\n+ * hw->eth_stats_base records the per counter starting point.\n+ * Lets update it now.\n+ */\n int\n nfp_net_stats_reset(struct rte_eth_dev *dev)\n {\n@@ -923,12 +915,7 @@ nfp_net_stats_reset(struct rte_eth_dev *dev)\n \n \thw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n \n-\t/*\n-\t * hw->eth_stats_base records the per counter starting point.\n-\t * Lets update it now\n-\t */\n-\n-\t/* reading per RX ring stats */\n+\t/* Reading per RX ring stats */\n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n \t\tif (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)\n \t\t\tbreak;\n@@ -940,7 +927,7 @@ nfp_net_stats_reset(struct rte_eth_dev *dev)\n \t\t\t\tnn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);\n \t}\n \n-\t/* reading per TX ring stats */\n+\t/* Reading per TX ring stats */\n \tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n \t\tif (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)\n \t\t\tbreak;\n@@ -964,7 +951,7 @@ nfp_net_stats_reset(struct rte_eth_dev *dev)\n \thw->eth_stats_base.obytes =\n \t\t\tnn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);\n \n-\t/* reading general device stats */\n+\t/* Reading general device stats */\n \thw->eth_stats_base.ierrors =\n \t\t\tnn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);\n \n@@ -1032,7 +1019,7 @@ nfp_net_xstats_value(const struct rte_eth_dev *dev,\n \tif (raw)\n \t\treturn value;\n \n-\t/**\n+\t/*\n \t * A baseline value of each statistic counter is recorded when stats are \"reset\".\n \t * Thus, the value returned by this function need to be decremented by this\n \t * baseline value. The result is the count of this statistic since the last time\n@@ -1041,12 +1028,12 @@ nfp_net_xstats_value(const struct rte_eth_dev *dev,\n \treturn value - hw->eth_xstats_base[index].value;\n }\n \n+/* NOTE: All callers ensure dev is always set. */\n int\n nfp_net_xstats_get_names(struct rte_eth_dev *dev,\n \t\tstruct rte_eth_xstat_name *xstats_names,\n \t\tunsigned int size)\n {\n-\t/* NOTE: All callers ensure dev is always set. */\n \tuint32_t id;\n \tuint32_t nfp_size;\n \tuint32_t read_size;\n@@ -1066,12 +1053,12 @@ nfp_net_xstats_get_names(struct rte_eth_dev *dev,\n \treturn read_size;\n }\n \n+/* NOTE: All callers ensure dev is always set. */\n int\n nfp_net_xstats_get(struct rte_eth_dev *dev,\n \t\tstruct rte_eth_xstat *xstats,\n \t\tunsigned int n)\n {\n-\t/* NOTE: All callers ensure dev is always set. */\n \tuint32_t id;\n \tuint32_t nfp_size;\n \tuint32_t read_size;\n@@ -1092,16 +1079,16 @@ nfp_net_xstats_get(struct rte_eth_dev *dev,\n \treturn read_size;\n }\n \n+/*\n+ * NOTE: The only caller rte_eth_xstats_get_names_by_id() ensures dev,\n+ * ids, xstats_names and size are valid, and non-NULL.\n+ */\n int\n nfp_net_xstats_get_names_by_id(struct rte_eth_dev *dev,\n \t\tconst uint64_t *ids,\n \t\tstruct rte_eth_xstat_name *xstats_names,\n \t\tunsigned int size)\n {\n-\t/**\n-\t * NOTE: The only caller rte_eth_xstats_get_names_by_id() ensures dev,\n-\t * ids, xstats_names and size are valid, and non-NULL.\n-\t */\n \tuint32_t i;\n \tuint32_t read_size;\n \n@@ -1123,16 +1110,16 @@ nfp_net_xstats_get_names_by_id(struct rte_eth_dev *dev,\n \treturn read_size;\n }\n \n+/*\n+ * NOTE: The only caller rte_eth_xstats_get_by_id() ensures dev,\n+ * ids, values and n are valid, and non-NULL.\n+ */\n int\n nfp_net_xstats_get_by_id(struct rte_eth_dev *dev,\n \t\tconst uint64_t *ids,\n \t\tuint64_t *values,\n \t\tunsigned int n)\n {\n-\t/**\n-\t * NOTE: The only caller rte_eth_xstats_get_by_id() ensures dev,\n-\t * ids, values and n are valid, and non-NULL.\n-\t */\n \tuint32_t i;\n \tuint32_t read_size;\n \n@@ -1167,10 +1154,7 @@ nfp_net_xstats_reset(struct rte_eth_dev *dev)\n \t\thw->eth_xstats_base[id].id = id;\n \t\thw->eth_xstats_base[id].value = nfp_net_xstats_value(dev, id, true);\n \t}\n-\t/**\n-\t * Successfully reset xstats, now call function to reset basic stats\n-\t * return value is then based on the success of that function\n-\t */\n+\t/* Successfully reset xstats, now call function to reset basic stats. */\n \treturn nfp_net_stats_reset(dev);\n }\n \n@@ -1217,7 +1201,7 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \tdev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;\n \tdev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;\n \tdev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;\n-\t/*\n+\t/**\n \t * The maximum rx packet length (max_rx_pktlen) is set to the\n \t * maximum supported frame size that the NFP can handle. This\n \t * includes layer 2 headers, CRC and other metadata that can\n@@ -1358,7 +1342,7 @@ nfp_net_common_init(struct rte_pci_device *pci_dev,\n \n \tnfp_net_init_metadata_format(hw);\n \n-\t/* read the Rx offset configured from firmware */\n+\t/* Read the Rx offset configured from firmware */\n \tif (hw->ver.major < 2)\n \t\thw->rx_offset = NFP_NET_RX_OFFSET;\n \telse\n@@ -1375,7 +1359,6 @@ const uint32_t *\n nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)\n {\n \tstatic const uint32_t ptypes[] = {\n-\t\t/* refers to nfp_net_set_hash() */\n \t\tRTE_PTYPE_INNER_L3_IPV4,\n \t\tRTE_PTYPE_INNER_L3_IPV6,\n \t\tRTE_PTYPE_INNER_L3_IPV6_EXT,\n@@ -1449,10 +1432,8 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev)\n \t\t\tpci_dev->addr.devid, pci_dev->addr.function);\n }\n \n-/* Interrupt configuration and handling */\n-\n /*\n- * nfp_net_irq_unmask - Unmask an interrupt\n+ * Unmask an interrupt\n  *\n  * If MSI-X auto-masking is enabled clear the mask bit, otherwise\n  * clear the ICR for the entry.\n@@ -1478,16 +1459,14 @@ nfp_net_irq_unmask(struct rte_eth_dev *dev)\n \t}\n }\n \n-/*\n+/**\n  * Interrupt handler which shall be registered for alarm callback for delayed\n  * handling specific interrupt to wait for the stable nic state. As the NIC\n  * interrupt state is not stable for nfp after link is just down, it needs\n  * to wait 4 seconds to get the stable status.\n  *\n- * @param handle   Pointer to interrupt handle.\n- * @param param    The address of parameter (struct rte_eth_dev *)\n- *\n- * @return  void\n+ * @param param\n+ *   The address of parameter (struct rte_eth_dev *)\n  */\n void\n nfp_net_dev_interrupt_delayed_handler(void *param)\n@@ -1516,13 +1495,12 @@ nfp_net_dev_interrupt_handler(void *param)\n \n \tnfp_net_link_update(dev, 0);\n \n-\t/* likely to up */\n+\t/* Likely to up */\n \tif (link.link_status == 0) {\n-\t\t/* handle it 1 sec later, wait it being stable */\n+\t\t/* Handle it 1 sec later, wait it being stable */\n \t\ttimeout = NFP_NET_LINK_UP_CHECK_TIMEOUT;\n-\t\t/* likely to down */\n-\t} else {\n-\t\t/* handle it 4 sec later, wait it being stable */\n+\t} else {  /* Likely to down */\n+\t\t/* Handle it 4 sec later, wait it being stable */\n \t\ttimeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT;\n \t}\n \n@@ -1543,7 +1521,7 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev,\n \n \thw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n \n-\t/* mtu setting is forbidden if port is started */\n+\t/* MTU setting is forbidden if port is started */\n \tif (dev->data->dev_started) {\n \t\tPMD_DRV_LOG(ERR, \"port %d must be stopped before configuration\",\n \t\t\t\tdev->data->port_id);\n@@ -1557,7 +1535,7 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev,\n \t\treturn -ERANGE;\n \t}\n \n-\t/* writing to configuration space */\n+\t/* Writing to configuration space */\n \tnn_cfg_writel(hw, NFP_NET_CFG_MTU, mtu);\n \n \thw->mtu = mtu;\n@@ -1634,7 +1612,7 @@ nfp_net_rss_reta_write(struct rte_eth_dev *dev,\n \n \t/*\n \t * Update Redirection Table. There are 128 8bit-entries which can be\n-\t * manage as 32 32bit-entries\n+\t * manage as 32 32bit-entries.\n \t */\n \tfor (i = 0; i < reta_size; i += 4) {\n \t\t/* Handling 4 RSS entries per loop */\n@@ -1653,8 +1631,8 @@ nfp_net_rss_reta_write(struct rte_eth_dev *dev,\n \t\tfor (j = 0; j < 4; j++) {\n \t\t\tif ((mask & (0x1 << j)) == 0)\n \t\t\t\tcontinue;\n+\t\t\t/* Clearing the entry bits */\n \t\t\tif (mask != 0xF)\n-\t\t\t\t/* Clearing the entry bits */\n \t\t\t\treta &= ~(0xFF << (8 * j));\n \t\t\treta |= reta_conf[idx].reta[shift + j] << (8 * j);\n \t\t}\n@@ -1689,7 +1667,7 @@ nfp_net_reta_update(struct rte_eth_dev *dev,\n \treturn 0;\n }\n \n- /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */\n+/* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */\n int\n nfp_net_reta_query(struct rte_eth_dev *dev,\n \t\tstruct rte_eth_rss_reta_entry64 *reta_conf,\n@@ -1717,7 +1695,7 @@ nfp_net_reta_query(struct rte_eth_dev *dev,\n \n \t/*\n \t * Reading Redirection Table. There are 128 8bit-entries which can be\n-\t * manage as 32 32bit-entries\n+\t * manage as 32 32bit-entries.\n \t */\n \tfor (i = 0; i < reta_size; i += 4) {\n \t\t/* Handling 4 RSS entries per loop */\n@@ -1751,7 +1729,7 @@ nfp_net_rss_hash_write(struct rte_eth_dev *dev,\n \n \thw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n \n-\t/* Writing the key byte a byte */\n+\t/* Writing the key byte by byte */\n \tfor (i = 0; i < rss_conf->rss_key_len; i++) {\n \t\tmemcpy(&key, &rss_conf->rss_key[i], 1);\n \t\tnn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);\n@@ -1786,7 +1764,7 @@ nfp_net_rss_hash_write(struct rte_eth_dev *dev,\n \tcfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;\n \tcfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;\n \n-\t/* configuring where to apply the RSS hash */\n+\t/* Configuring where to apply the RSS hash */\n \tnn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);\n \n \t/* Writing the key size */\n@@ -1809,7 +1787,7 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev,\n \n \t/* Checking if RSS is enabled */\n \tif ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0) {\n-\t\tif (rss_hf != 0) { /* Enable RSS? */\n+\t\tif (rss_hf != 0) {\n \t\t\tPMD_DRV_LOG(ERR, \"RSS unsupported\");\n \t\t\treturn -EINVAL;\n \t\t}\n@@ -2010,7 +1988,7 @@ nfp_net_set_vxlan_port(struct nfp_net_hw *hw,\n \n /*\n  * The firmware with NFD3 can not handle DMA address requiring more\n- * than 40 bits\n+ * than 40 bits.\n  */\n int\n nfp_net_check_dma_mask(struct nfp_net_hw *hw,\ndiff --git a/drivers/net/nfp/nfp_common.h b/drivers/net/nfp/nfp_common.h\nindex 9cb889c4a6..6a36e2b04c 100644\n--- a/drivers/net/nfp/nfp_common.h\n+++ b/drivers/net/nfp/nfp_common.h\n@@ -53,7 +53,7 @@ enum nfp_app_fw_id {\n \tNFP_APP_FW_FLOWER_NIC             = 0x3,\n };\n \n-/* nfp_qcp_ptr - Read or Write Pointer of a queue */\n+/* Read or Write Pointer of a queue */\n enum nfp_qcp_ptr {\n \tNFP_QCP_READ_PTR = 0,\n \tNFP_QCP_WRITE_PTR\n@@ -72,15 +72,15 @@ struct nfp_net_tlv_caps {\n };\n \n struct nfp_pf_dev {\n-\t/* Backpointer to associated pci device */\n+\t/** Backpointer to associated pci device */\n \tstruct rte_pci_device *pci_dev;\n \n \tenum nfp_app_fw_id app_fw_id;\n \n-\t/* Pointer to the app running on the PF */\n+\t/** Pointer to the app running on the PF */\n \tvoid *app_fw_priv;\n \n-\t/* The eth table reported by firmware */\n+\t/** The eth table reported by firmware */\n \tstruct nfp_eth_table *nfp_eth_table;\n \n \tuint8_t *ctrl_bar;\n@@ -94,17 +94,17 @@ struct nfp_pf_dev {\n \tstruct nfp_hwinfo *hwinfo;\n \tstruct nfp_rtsym_table *sym_tbl;\n \n-\t/* service id of cpp bridge service */\n+\t/** Service id of cpp bridge service */\n \tuint32_t cpp_bridge_id;\n };\n \n struct nfp_app_fw_nic {\n-\t/* Backpointer to the PF device */\n+\t/** Backpointer to the PF device */\n \tstruct nfp_pf_dev *pf_dev;\n \n-\t/*\n-\t * Array of physical ports belonging to the this CoreNIC app\n-\t * This is really a list of vNIC's. One for each physical port\n+\t/**\n+\t * Array of physical ports belonging to this CoreNIC app.\n+\t * This is really a list of vNIC's, one for each physical port.\n \t */\n \tstruct nfp_net_hw *ports[NFP_MAX_PHYPORTS];\n \n@@ -113,13 +113,13 @@ struct nfp_app_fw_nic {\n };\n \n struct nfp_net_hw {\n-\t/* Backpointer to the PF this port belongs to */\n+\t/** Backpointer to the PF this port belongs to */\n \tstruct nfp_pf_dev *pf_dev;\n \n-\t/* Backpointer to the eth_dev of this port*/\n+\t/** Backpointer to the eth_dev of this port */\n \tstruct rte_eth_dev *eth_dev;\n \n-\t/* Info from the firmware */\n+\t/** Info from the firmware */\n \tstruct nfp_net_fw_ver ver;\n \tuint32_t cap;\n \tuint32_t max_mtu;\n@@ -130,7 +130,7 @@ struct nfp_net_hw {\n \t/** NFP ASIC params */\n \tconst struct nfp_dev_info *dev_info;\n \n-\t/* Current values for control */\n+\t/** Current values for control */\n \tuint32_t ctrl;\n \n \tuint8_t *ctrl_bar;\n@@ -156,7 +156,7 @@ struct nfp_net_hw {\n \n \tstruct rte_ether_addr mac_addr;\n \n-\t/* Records starting point for counters */\n+\t/** Records starting point for counters */\n \tstruct rte_eth_stats eth_stats_base;\n \tstruct rte_eth_xstat *eth_xstats_base;\n \n@@ -166,9 +166,9 @@ struct nfp_net_hw {\n \tuint8_t *mac_stats_bar;\n \tuint8_t *mac_stats;\n \n-\t/* Sequential physical port number, only valid for CoreNIC firmware */\n+\t/** Sequential physical port number, only valid for CoreNIC firmware */\n \tuint8_t idx;\n-\t/* Internal port number as seen from NFP */\n+\t/** Internal port number as seen from NFP */\n \tuint8_t nfp_idx;\n \n \tstruct nfp_net_tlv_caps tlv_caps;\n@@ -240,10 +240,6 @@ nn_writeq(uint64_t val,\n \tnn_writel(val, addr);\n }\n \n-/*\n- * Functions to read/write from/to Config BAR\n- * Performs any endian conversion necessary.\n- */\n static inline uint8_t\n nn_cfg_readb(struct nfp_net_hw *hw,\n \t\tuint32_t off)\n@@ -304,11 +300,15 @@ nn_cfg_writeq(struct nfp_net_hw *hw,\n \tnn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off);\n }\n \n-/*\n- * nfp_qcp_ptr_add - Add the value to the selected pointer of a queue\n- * @q: Base address for queue structure\n- * @ptr: Add to the Read or Write pointer\n- * @val: Value to add to the queue pointer\n+/**\n+ * Add the value to the selected pointer of a queue.\n+ *\n+ * @param q\n+ *   Base address for queue structure\n+ * @param ptr\n+ *   Add to the read or write pointer\n+ * @param val\n+ *   Value to add to the queue pointer\n  */\n static inline void\n nfp_qcp_ptr_add(uint8_t *q,\n@@ -325,10 +325,13 @@ nfp_qcp_ptr_add(uint8_t *q,\n \tnn_writel(rte_cpu_to_le_32(val), q + off);\n }\n \n-/*\n- * nfp_qcp_read - Read the current Read/Write pointer value for a queue\n- * @q:  Base address for queue structure\n- * @ptr: Read or Write pointer\n+/**\n+ * Read the current read/write pointer value for a queue.\n+ *\n+ * @param q\n+ *   Base address for queue structure\n+ * @param ptr\n+ *   Read or Write pointer\n  */\n static inline uint32_t\n nfp_qcp_read(uint8_t *q,\ndiff --git a/drivers/net/nfp/nfp_cpp_bridge.c b/drivers/net/nfp/nfp_cpp_bridge.c\nindex 222cfdcbc3..8f5271cde9 100644\n--- a/drivers/net/nfp/nfp_cpp_bridge.c\n+++ b/drivers/net/nfp/nfp_cpp_bridge.c\n@@ -1,8 +1,6 @@\n /* SPDX-License-Identifier: BSD-3-Clause\n  * Copyright (c) 2014-2021 Netronome Systems, Inc.\n  * All rights reserved.\n- *\n- * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.\n  */\n \n #include \"nfp_cpp_bridge.h\"\n@@ -48,7 +46,7 @@ nfp_map_service(uint32_t service_id)\n \n \t/*\n \t * Find a service core with the least number of services already\n-\t * registered to it\n+\t * registered to it.\n \t */\n \twhile (slcore_count--) {\n \t\tservice_count = rte_service_lcore_count_services(slcore_array[slcore_count]);\n@@ -100,7 +98,7 @@ nfp_enable_cpp_service(struct nfp_pf_dev *pf_dev)\n \tpf_dev->cpp_bridge_id = service_id;\n \tPMD_INIT_LOG(INFO, \"NFP cpp service registered\");\n \n-\t/* Map it to available service core*/\n+\t/* Map it to available service core */\n \tret = nfp_map_service(service_id);\n \tif (ret != 0) {\n \t\tPMD_INIT_LOG(DEBUG, \"Could not map nfp cpp service\");\ndiff --git a/drivers/net/nfp/nfp_ctrl.h b/drivers/net/nfp/nfp_ctrl.h\nindex 55073c3cea..cd0a2f92a8 100644\n--- a/drivers/net/nfp/nfp_ctrl.h\n+++ b/drivers/net/nfp/nfp_ctrl.h\n@@ -20,7 +20,7 @@\n /* Offset in Freelist buffer where packet starts on RX */\n #define NFP_NET_RX_OFFSET               32\n \n-/* working with metadata api (NFD version > 3.0) */\n+/* Working with metadata api (NFD version > 3.0) */\n #define NFP_NET_META_FIELD_SIZE         4\n #define NFP_NET_META_FIELD_MASK ((1 << NFP_NET_META_FIELD_SIZE) - 1)\n #define NFP_NET_META_HEADER_SIZE        4\n@@ -36,14 +36,14 @@\n \t\t\t\t\t\tNFP_NET_META_VLAN_TPID_MASK)\n \n /* Prepend field types */\n-#define NFP_NET_META_HASH               1 /* next field carries hash type */\n+#define NFP_NET_META_HASH               1 /* Next field carries hash type */\n #define NFP_NET_META_VLAN               4\n #define NFP_NET_META_PORTID             5\n #define NFP_NET_META_IPSEC              9\n \n #define NFP_META_PORT_ID_CTRL           ~0U\n \n-/* Hash type pre-pended when a RSS hash was computed */\n+/* Hash type prepended when a RSS hash was computed */\n #define NFP_NET_RSS_NONE                0\n #define NFP_NET_RSS_IPV4                1\n #define NFP_NET_RSS_IPV6                2\n@@ -102,7 +102,7 @@\n #define   NFP_NET_CFG_CTRL_IRQMOD         (0x1 << 18) /* Interrupt moderation */\n #define   NFP_NET_CFG_CTRL_RINGPRIO       (0x1 << 19) /* Ring priorities */\n #define   NFP_NET_CFG_CTRL_MSIXAUTO       (0x1 << 20) /* MSI-X auto-masking */\n-#define   NFP_NET_CFG_CTRL_TXRWB          (0x1 << 21) /* Write-back of TX ring*/\n+#define   NFP_NET_CFG_CTRL_TXRWB          (0x1 << 21) /* Write-back of TX ring */\n #define   NFP_NET_CFG_CTRL_L2SWITCH       (0x1 << 22) /* L2 Switch */\n #define   NFP_NET_CFG_CTRL_TXVLAN_V2      (0x1 << 23) /* Enable VLAN insert with metadata */\n #define   NFP_NET_CFG_CTRL_VXLAN          (0x1 << 24) /* Enable VXLAN */\n@@ -111,7 +111,7 @@\n #define   NFP_NET_CFG_CTRL_LSO2           (0x1 << 28) /* LSO/TSO (version 2) */\n #define   NFP_NET_CFG_CTRL_RSS2           (0x1 << 29) /* RSS (version 2) */\n #define   NFP_NET_CFG_CTRL_CSUM_COMPLETE  (0x1 << 30) /* Checksum complete */\n-#define   NFP_NET_CFG_CTRL_LIVE_ADDR      (0x1U << 31)/* live MAC addr change */\n+#define   NFP_NET_CFG_CTRL_LIVE_ADDR      (0x1U << 31) /* Live MAC addr change */\n #define NFP_NET_CFG_UPDATE              0x0004\n #define   NFP_NET_CFG_UPDATE_GEN          (0x1 <<  0) /* General update */\n #define   NFP_NET_CFG_UPDATE_RING         (0x1 <<  1) /* Ring config change */\n@@ -124,7 +124,7 @@\n #define   NFP_NET_CFG_UPDATE_IRQMOD       (0x1 <<  8) /* IRQ mod change */\n #define   NFP_NET_CFG_UPDATE_VXLAN        (0x1 <<  9) /* VXLAN port change */\n #define   NFP_NET_CFG_UPDATE_MACADDR      (0x1 << 11) /* MAC address change */\n-#define   NFP_NET_CFG_UPDATE_MBOX         (0x1 << 12) /**< Mailbox update */\n+#define   NFP_NET_CFG_UPDATE_MBOX         (0x1 << 12) /* Mailbox update */\n #define   NFP_NET_CFG_UPDATE_ERR          (0x1U << 31) /* A error occurred */\n #define NFP_NET_CFG_TXRS_ENABLE         0x0008\n #define NFP_NET_CFG_RXRS_ENABLE         0x0010\n@@ -205,7 +205,7 @@ struct nfp_net_fw_ver {\n  * @NFP_NET_CFG_SPARE_ADDR:  DMA address for ME code to use (e.g. YDS-155 fix)\n  */\n #define NFP_NET_CFG_SPARE_ADDR          0x0050\n-/**\n+/*\n  * NFP6000/NFP4000 - Prepend configuration\n  */\n #define NFP_NET_CFG_RX_OFFSET\t\t0x0050\n@@ -280,7 +280,7 @@ struct nfp_net_fw_ver {\n  * @NFP_NET_CFG_TXR_BASE:    Base offset for TX ring configuration\n  * @NFP_NET_CFG_TXR_ADDR:    Per TX ring DMA address (8B entries)\n  * @NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries)\n- * @NFP_NET_CFG_TXR_SZ:      Per TX ring ring size (1B entries)\n+ * @NFP_NET_CFG_TXR_SZ:      Per TX ring size (1B entries)\n  * @NFP_NET_CFG_TXR_VEC:     Per TX ring MSI-X table entry (1B entries)\n  * @NFP_NET_CFG_TXR_PRIO:    Per TX ring priority (1B entries)\n  * @NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation (4B entries)\n@@ -299,7 +299,7 @@ struct nfp_net_fw_ver {\n  * RX ring configuration (0x0800 - 0x0c00)\n  * @NFP_NET_CFG_RXR_BASE:    Base offset for RX ring configuration\n  * @NFP_NET_CFG_RXR_ADDR:    Per TX ring DMA address (8B entries)\n- * @NFP_NET_CFG_RXR_SZ:      Per TX ring ring size (1B entries)\n+ * @NFP_NET_CFG_RXR_SZ:      Per TX ring size (1B entries)\n  * @NFP_NET_CFG_RXR_VEC:     Per TX ring MSI-X table entry (1B entries)\n  * @NFP_NET_CFG_RXR_PRIO:    Per TX ring priority (1B entries)\n  * @NFP_NET_CFG_RXR_IRQ_MOD: Per TX ring interrupt moderation (4B entries)\n@@ -330,7 +330,7 @@ struct nfp_net_fw_ver {\n \n /*\n  * General device stats (0x0d00 - 0x0d90)\n- * all counters are 64bit.\n+ * All counters are 64bit.\n  */\n #define NFP_NET_CFG_STATS_BASE          0x0d00\n #define NFP_NET_CFG_STATS_RX_DISCARDS   (NFP_NET_CFG_STATS_BASE + 0x00)\n@@ -364,7 +364,7 @@ struct nfp_net_fw_ver {\n \n /*\n  * Per ring stats (0x1000 - 0x1800)\n- * options, 64bit per entry\n+ * Options, 64bit per entry\n  * @NFP_NET_CFG_TXR_STATS:   TX ring statistics (Packet and Byte count)\n  * @NFP_NET_CFG_RXR_STATS:   RX ring statistics (Packet and Byte count)\n  */\n@@ -375,9 +375,9 @@ struct nfp_net_fw_ver {\n #define NFP_NET_CFG_RXR_STATS(_x)       (NFP_NET_CFG_RXR_STATS_BASE + \\\n \t\t\t\t\t ((_x) * 0x10))\n \n-/**\n+/*\n  * Mac stats (0x0000 - 0x0200)\n- * all counters are 64bit.\n+ * All counters are 64bit.\n  */\n #define NFP_MAC_STATS_BASE                0x0000\n #define NFP_MAC_STATS_SIZE                0x0200\n@@ -558,9 +558,11 @@ struct nfp_net_fw_ver {\n \n int nfp_net_tlv_caps_parse(struct rte_eth_dev *dev);\n \n-/*\n- * nfp_net_cfg_ctrl_rss() - Get RSS flag based on firmware's capability\n- * @hw_cap: The firmware's capabilities\n+/**\n+ * Get RSS flag based on firmware's capability\n+ *\n+ * @param hw_cap\n+ *   The firmware's capabilities\n  */\n static inline uint32_t\n nfp_net_cfg_ctrl_rss(uint32_t hw_cap)\ndiff --git a/drivers/net/nfp/nfp_ethdev.c b/drivers/net/nfp/nfp_ethdev.c\nindex 72abc4c16e..1651ac2455 100644\n--- a/drivers/net/nfp/nfp_ethdev.c\n+++ b/drivers/net/nfp/nfp_ethdev.c\n@@ -66,7 +66,7 @@ nfp_net_start(struct rte_eth_dev *dev)\n \t/* Enabling the required queues in the device */\n \tnfp_net_enable_queues(dev);\n \n-\t/* check and configure queue intr-vector mapping */\n+\t/* Check and configure queue intr-vector mapping */\n \tif (dev->data->dev_conf.intr_conf.rxq != 0) {\n \t\tif (app_fw_nic->multiport) {\n \t\t\tPMD_INIT_LOG(ERR, \"PMD rx interrupt is not supported \"\n@@ -76,7 +76,7 @@ nfp_net_start(struct rte_eth_dev *dev)\n \t\tif (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {\n \t\t\t/*\n \t\t\t * Better not to share LSC with RX interrupts.\n-\t\t\t * Unregistering LSC interrupt handler\n+\t\t\t * Unregistering LSC interrupt handler.\n \t\t\t */\n \t\t\trte_intr_callback_unregister(pci_dev->intr_handle,\n \t\t\t\t\tnfp_net_dev_interrupt_handler, (void *)dev);\n@@ -150,7 +150,7 @@ nfp_net_start(struct rte_eth_dev *dev)\n \n \t/*\n \t * Allocating rte mbufs for configured rx queues.\n-\t * This requires queues being enabled before\n+\t * This requires queues being enabled before.\n \t */\n \tif (nfp_net_rx_freelist_setup(dev) != 0) {\n \t\tret = -ENOMEM;\n@@ -273,11 +273,11 @@ nfp_net_close(struct rte_eth_dev *dev)\n \t/* Clear ipsec */\n \tnfp_ipsec_uninit(dev);\n \n-\t/* Cancel possible impending LSC work here before releasing the port*/\n+\t/* Cancel possible impending LSC work here before releasing the port */\n \trte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev);\n \n \t/* Only free PF resources after all physical ports have been closed */\n-\t/* Mark this port as unused and free device priv resources*/\n+\t/* Mark this port as unused and free device priv resources */\n \tnn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);\n \tapp_fw_nic->ports[hw->idx] = NULL;\n \trte_eth_dev_release_port(dev);\n@@ -300,15 +300,10 @@ nfp_net_close(struct rte_eth_dev *dev)\n \n \trte_intr_disable(pci_dev->intr_handle);\n \n-\t/* unregister callback func from eal lib */\n+\t/* Unregister callback func from eal lib */\n \trte_intr_callback_unregister(pci_dev->intr_handle,\n \t\t\tnfp_net_dev_interrupt_handler, (void *)dev);\n \n-\t/*\n-\t * The ixgbe PMD disables the pcie master on the\n-\t * device. The i40e does not...\n-\t */\n-\n \treturn 0;\n }\n \n@@ -497,7 +492,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)\n \n \t/*\n \t * Use PF array of physical ports to get pointer to\n-\t * this specific port\n+\t * this specific port.\n \t */\n \thw = app_fw_nic->ports[port];\n \n@@ -779,7 +774,7 @@ nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev,\n \n \t/*\n \t * For coreNIC the number of vNICs exposed should be the same as the\n-\t * number of physical ports\n+\t * number of physical ports.\n \t */\n \tif (total_vnics != nfp_eth_table->count) {\n \t\tPMD_INIT_LOG(ERR, \"Total physical ports do not match number of vNICs\");\n@@ -787,7 +782,7 @@ nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev,\n \t\tgoto app_cleanup;\n \t}\n \n-\t/* Populate coreNIC app properties*/\n+\t/* Populate coreNIC app properties */\n \tapp_fw_nic->total_phyports = total_vnics;\n \tapp_fw_nic->pf_dev = pf_dev;\n \tif (total_vnics > 1)\n@@ -842,8 +837,9 @@ nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev,\n \n \t\teth_dev->device = &pf_dev->pci_dev->device;\n \n-\t\t/* ctrl/tx/rx BAR mappings and remaining init happens in\n-\t\t * nfp_net_init\n+\t\t/*\n+\t\t * Ctrl/tx/rx BAR mappings and remaining init happens in\n+\t\t * @nfp_net_init()\n \t\t */\n \t\tret = nfp_net_init(eth_dev);\n \t\tif (ret != 0) {\n@@ -970,7 +966,7 @@ nfp_pf_init(struct rte_pci_device *pci_dev)\n \tpf_dev->pci_dev = pci_dev;\n \tpf_dev->nfp_eth_table = nfp_eth_table;\n \n-\t/* configure access to tx/rx vNIC BARs */\n+\t/* Configure access to tx/rx vNIC BARs */\n \taddr = nfp_qcp_queue_offset(dev_info, 0);\n \tcpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0);\n \n@@ -986,7 +982,7 @@ nfp_pf_init(struct rte_pci_device *pci_dev)\n \n \t/*\n \t * PF initialization has been done at this point. Call app specific\n-\t * init code now\n+\t * init code now.\n \t */\n \tswitch (pf_dev->app_fw_id) {\n \tcase NFP_APP_FW_CORE_NIC:\n@@ -1011,7 +1007,7 @@ nfp_pf_init(struct rte_pci_device *pci_dev)\n \t\tgoto hwqueues_cleanup;\n \t}\n \n-\t/* register the CPP bridge service here for primary use */\n+\t/* Register the CPP bridge service here for primary use */\n \tret = nfp_enable_cpp_service(pf_dev);\n \tif (ret != 0)\n \t\tPMD_INIT_LOG(INFO, \"Enable cpp service failed.\");\n@@ -1079,7 +1075,7 @@ nfp_secondary_init_app_fw_nic(struct rte_pci_device *pci_dev,\n /*\n  * When attaching to the NFP4000/6000 PF on a secondary process there\n  * is no need to initialise the PF again. Only minimal work is required\n- * here\n+ * here.\n  */\n static int\n nfp_pf_secondary_init(struct rte_pci_device *pci_dev)\n@@ -1119,7 +1115,7 @@ nfp_pf_secondary_init(struct rte_pci_device *pci_dev)\n \n \t/*\n \t * We don't have access to the PF created in the primary process\n-\t * here so we have to read the number of ports from firmware\n+\t * here so we have to read the number of ports from firmware.\n \t */\n \tsym_tbl = nfp_rtsym_table_read(cpp);\n \tif (sym_tbl == NULL) {\n@@ -1216,7 +1212,7 @@ nfp_pci_uninit(struct rte_eth_dev *eth_dev)\n \t\trte_eth_dev_close(port_id);\n \t/*\n \t * Ports can be closed and freed but hotplugging is not\n-\t * currently supported\n+\t * currently supported.\n \t */\n \treturn -ENOTSUP;\n }\ndiff --git a/drivers/net/nfp/nfp_ethdev_vf.c b/drivers/net/nfp/nfp_ethdev_vf.c\nindex d3c3c9e953..c9e72dd953 100644\n--- a/drivers/net/nfp/nfp_ethdev_vf.c\n+++ b/drivers/net/nfp/nfp_ethdev_vf.c\n@@ -47,12 +47,12 @@ nfp_netvf_start(struct rte_eth_dev *dev)\n \t/* Enabling the required queues in the device */\n \tnfp_net_enable_queues(dev);\n \n-\t/* check and configure queue intr-vector mapping */\n+\t/* Check and configure queue intr-vector mapping */\n \tif (dev->data->dev_conf.intr_conf.rxq != 0) {\n \t\tif (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {\n \t\t\t/*\n \t\t\t * Better not to share LSC with RX interrupts.\n-\t\t\t * Unregistering LSC interrupt handler\n+\t\t\t * Unregistering LSC interrupt handler.\n \t\t\t */\n \t\t\trte_intr_callback_unregister(pci_dev->intr_handle,\n \t\t\t\t\tnfp_net_dev_interrupt_handler, (void *)dev);\n@@ -101,7 +101,7 @@ nfp_netvf_start(struct rte_eth_dev *dev)\n \n \t/*\n \t * Allocating rte mbufs for configured rx queues.\n-\t * This requires queues being enabled before\n+\t * This requires queues being enabled before.\n \t */\n \tif (nfp_net_rx_freelist_setup(dev) != 0) {\n \t\tret = -ENOMEM;\n@@ -182,18 +182,13 @@ nfp_netvf_close(struct rte_eth_dev *dev)\n \n \trte_intr_disable(pci_dev->intr_handle);\n \n-\t/* unregister callback func from eal lib */\n+\t/* Unregister callback func from eal lib */\n \trte_intr_callback_unregister(pci_dev->intr_handle,\n \t\t\tnfp_net_dev_interrupt_handler, (void *)dev);\n \n-\t/* Cancel possible impending LSC work here before releasing the port*/\n+\t/* Cancel possible impending LSC work here before releasing the port */\n \trte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev);\n \n-\t/*\n-\t * The ixgbe PMD disables the pcie master on the\n-\t * device. The i40e does not...\n-\t */\n-\n \treturn 0;\n }\n \ndiff --git a/drivers/net/nfp/nfp_flow.c b/drivers/net/nfp/nfp_flow.c\nindex 84b48daf85..fbcdb3d19e 100644\n--- a/drivers/net/nfp/nfp_flow.c\n+++ b/drivers/net/nfp/nfp_flow.c\n@@ -108,21 +108,21 @@\n #define NVGRE_V4_LEN     (sizeof(struct rte_ether_hdr) + \\\n \t\t\t\tsizeof(struct rte_ipv4_hdr) + \\\n \t\t\t\tsizeof(struct rte_flow_item_gre) + \\\n-\t\t\t\tsizeof(rte_be32_t))    /* gre key */\n+\t\t\t\tsizeof(rte_be32_t))    /* Gre key */\n #define NVGRE_V6_LEN     (sizeof(struct rte_ether_hdr) + \\\n \t\t\t\tsizeof(struct rte_ipv6_hdr) + \\\n \t\t\t\tsizeof(struct rte_flow_item_gre) + \\\n-\t\t\t\tsizeof(rte_be32_t))    /* gre key */\n+\t\t\t\tsizeof(rte_be32_t))    /* Gre key */\n \n /* Process structure associated with a flow item */\n struct nfp_flow_item_proc {\n-\t/* Bit-mask for fields supported by this PMD. */\n+\t/** Bit-mask for fields supported by this PMD. */\n \tconst void *mask_support;\n-\t/* Bit-mask to use when @p item->mask is not provided. */\n+\t/** Bit-mask to use when @p item->mask is not provided. */\n \tconst void *mask_default;\n-\t/* Size in bytes for @p mask_support and @p mask_default. */\n+\t/** Size in bytes for @p mask_support and @p mask_default. */\n \tconst size_t mask_sz;\n-\t/* Merge a pattern item into a flow rule handle. */\n+\t/** Merge a pattern item into a flow rule handle. */\n \tint (*merge)(struct nfp_app_fw_flower *app_fw_flower,\n \t\t\tstruct rte_flow *nfp_flow,\n \t\t\tchar **mbuf_off,\n@@ -130,7 +130,7 @@ struct nfp_flow_item_proc {\n \t\t\tconst struct nfp_flow_item_proc *proc,\n \t\t\tbool is_mask,\n \t\t\tbool is_outer_layer);\n-\t/* List of possible subsequent items. */\n+\t/** List of possible subsequent items. */\n \tconst enum rte_flow_item_type *const next_item;\n };\n \n@@ -308,12 +308,12 @@ nfp_check_mask_add(struct nfp_flow_priv *priv,\n \n \tmask_entry = nfp_mask_table_search(priv, mask_data, mask_len);\n \tif (mask_entry == NULL) {\n-\t\t/* mask entry does not exist, let's create one */\n+\t\t/* Mask entry does not exist, let's create one */\n \t\tret = nfp_mask_table_add(priv, mask_data, mask_len, mask_id);\n \t\tif (ret != 0)\n \t\t\treturn false;\n \t} else {\n-\t\t/* mask entry already exist */\n+\t\t/* Mask entry already exist */\n \t\tmask_entry->ref_cnt++;\n \t\t*mask_id = mask_entry->mask_id;\n \t}\n@@ -818,7 +818,7 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[],\n \t\tcase RTE_FLOW_ITEM_TYPE_ETH:\n \t\t\tPMD_DRV_LOG(DEBUG, \"RTE_FLOW_ITEM_TYPE_ETH detected\");\n \t\t\t/*\n-\t\t\t * eth is set with no specific params.\n+\t\t\t * Eth is set with no specific params.\n \t\t\t * NFP does not need this.\n \t\t\t */\n \t\t\tif (item->spec == NULL)\n@@ -879,7 +879,7 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[],\n \t\t\t\tkey_ls->key_size += sizeof(struct nfp_flower_ipv4_udp_tun);\n \t\t\t\t/*\n \t\t\t\t * The outer l3 layer information is\n-\t\t\t\t * in `struct nfp_flower_ipv4_udp_tun`\n+\t\t\t\t * in `struct nfp_flower_ipv4_udp_tun`.\n \t\t\t\t */\n \t\t\t\tkey_ls->key_size -= sizeof(struct nfp_flower_ipv4);\n \t\t\t} else if (outer_ip6_flag) {\n@@ -889,7 +889,7 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[],\n \t\t\t\tkey_ls->key_size += sizeof(struct nfp_flower_ipv6_udp_tun);\n \t\t\t\t/*\n \t\t\t\t * The outer l3 layer information is\n-\t\t\t\t * in `struct nfp_flower_ipv6_udp_tun`\n+\t\t\t\t * in `struct nfp_flower_ipv6_udp_tun`.\n \t\t\t\t */\n \t\t\t\tkey_ls->key_size -= sizeof(struct nfp_flower_ipv6);\n \t\t\t} else {\n@@ -910,7 +910,7 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[],\n \t\t\t\tkey_ls->key_size += sizeof(struct nfp_flower_ipv4_udp_tun);\n \t\t\t\t/*\n \t\t\t\t * The outer l3 layer information is\n-\t\t\t\t * in `struct nfp_flower_ipv4_udp_tun`\n+\t\t\t\t * in `struct nfp_flower_ipv4_udp_tun`.\n \t\t\t\t */\n \t\t\t\tkey_ls->key_size -= sizeof(struct nfp_flower_ipv4);\n \t\t\t} else if (outer_ip6_flag) {\n@@ -918,7 +918,7 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[],\n \t\t\t\tkey_ls->key_size += sizeof(struct nfp_flower_ipv6_udp_tun);\n \t\t\t\t/*\n \t\t\t\t * The outer l3 layer information is\n-\t\t\t\t * in `struct nfp_flower_ipv6_udp_tun`\n+\t\t\t\t * in `struct nfp_flower_ipv6_udp_tun`.\n \t\t\t\t */\n \t\t\t\tkey_ls->key_size -= sizeof(struct nfp_flower_ipv6);\n \t\t\t} else {\n@@ -939,7 +939,7 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[],\n \t\t\t\tkey_ls->key_size += sizeof(struct nfp_flower_ipv4_gre_tun);\n \t\t\t\t/*\n \t\t\t\t * The outer l3 layer information is\n-\t\t\t\t * in `struct nfp_flower_ipv4_gre_tun`\n+\t\t\t\t * in `struct nfp_flower_ipv4_gre_tun`.\n \t\t\t\t */\n \t\t\t\tkey_ls->key_size -= sizeof(struct nfp_flower_ipv4);\n \t\t\t} else if (outer_ip6_flag) {\n@@ -947,7 +947,7 @@ nfp_flow_key_layers_calculate_items(const struct rte_flow_item items[],\n \t\t\t\tkey_ls->key_size += sizeof(struct nfp_flower_ipv6_gre_tun);\n \t\t\t\t/*\n \t\t\t\t * The outer l3 layer information is\n-\t\t\t\t * in `struct nfp_flower_ipv6_gre_tun`\n+\t\t\t\t * in `struct nfp_flower_ipv6_gre_tun`.\n \t\t\t\t */\n \t\t\t\tkey_ls->key_size -= sizeof(struct nfp_flower_ipv6);\n \t\t\t} else {\n@@ -1309,8 +1309,8 @@ nfp_flow_merge_ipv4(__rte_unused struct nfp_app_fw_flower *app_fw_flower,\n \t\t}\n \n \t\t/*\n-\t\t * reserve space for L4 info.\n-\t\t * rte_flow has ipv4 before L4 but NFP flower fw requires L4 before ipv4\n+\t\t * Reserve space for L4 info.\n+\t\t * rte_flow has ipv4 before L4 but NFP flower fw requires L4 before ipv4.\n \t\t */\n \t\tif ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP) != 0)\n \t\t\t*mbuf_off += sizeof(struct nfp_flower_tp_ports);\n@@ -1392,8 +1392,8 @@ nfp_flow_merge_ipv6(__rte_unused struct nfp_app_fw_flower *app_fw_flower,\n \t\t}\n \n \t\t/*\n-\t\t * reserve space for L4 info.\n-\t\t * rte_flow has ipv4 before L4 but NFP flower fw requires L4 before ipv6\n+\t\t * Reserve space for L4 info.\n+\t\t * rte_flow has ipv6 before L4 but NFP flower fw requires L4 before ipv6.\n \t\t */\n \t\tif ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP) != 0)\n \t\t\t*mbuf_off += sizeof(struct nfp_flower_tp_ports);\n@@ -2127,7 +2127,7 @@ nfp_flow_compile_items(struct nfp_flower_representor *representor,\n \tif (nfp_flow_tcp_flag_check(items))\n \t\tnfp_flow->tcp_flag = true;\n \n-\t/* Check if this is a tunnel flow and get the inner item*/\n+\t/* Check if this is a tunnel flow and get the inner item */\n \tis_tun_flow = nfp_flow_inner_item_get(items, &loop_item);\n \tif (is_tun_flow)\n \t\tis_outer_layer = false;\n@@ -3366,9 +3366,9 @@ nfp_flow_action_raw_encap(struct nfp_app_fw_flower *app_fw_flower,\n \t\treturn -EINVAL;\n \t}\n \n-\t/* Pre_tunnel action must be the first on action list.\n-\t * If other actions already exist, they need to be\n-\t * pushed forward.\n+\t/*\n+\t * Pre_tunnel action must be the first on action list.\n+\t * If other actions already exist, they need to be pushed forward.\n \t */\n \tact_len = act_data - actions;\n \tif (act_len != 0) {\n@@ -4384,7 +4384,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)\n \t\tgoto free_mask_id;\n \t}\n \n-\t/* flow stats */\n+\t/* Flow stats */\n \trte_spinlock_init(&priv->stats_lock);\n \tstats_size = (ctx_count & NFP_FL_STAT_ID_STAT) |\n \t\t\t((ctx_split - 1) & NFP_FL_STAT_ID_MU_NUM);\n@@ -4398,7 +4398,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)\n \t\tgoto free_stats_id;\n \t}\n \n-\t/* mask table */\n+\t/* Mask table */\n \tmask_hash_params.hash_func_init_val = priv->hash_seed;\n \tpriv->mask_table = rte_hash_create(&mask_hash_params);\n \tif (priv->mask_table == NULL) {\n@@ -4407,7 +4407,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)\n \t\tgoto free_stats;\n \t}\n \n-\t/* flow table */\n+\t/* Flow table */\n \tflow_hash_params.hash_func_init_val = priv->hash_seed;\n \tflow_hash_params.entries = ctx_count;\n \tpriv->flow_table = rte_hash_create(&flow_hash_params);\n@@ -4417,7 +4417,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)\n \t\tgoto free_mask_table;\n \t}\n \n-\t/* pre tunnel table */\n+\t/* Pre tunnel table */\n \tpriv->pre_tun_cnt = 1;\n \tpre_tun_hash_params.hash_func_init_val = priv->hash_seed;\n \tpriv->pre_tun_table = rte_hash_create(&pre_tun_hash_params);\n@@ -4446,15 +4446,15 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev)\n \t\tgoto free_ct_zone_table;\n \t}\n \n-\t/* ipv4 off list */\n+\t/* IPv4 off list */\n \trte_spinlock_init(&priv->ipv4_off_lock);\n \tLIST_INIT(&priv->ipv4_off_list);\n \n-\t/* ipv6 off list */\n+\t/* IPv6 off list */\n \trte_spinlock_init(&priv->ipv6_off_lock);\n \tLIST_INIT(&priv->ipv6_off_list);\n \n-\t/* neighbor next list */\n+\t/* Neighbor next list */\n \tLIST_INIT(&priv->nn_list);\n \n \treturn 0;\ndiff --git a/drivers/net/nfp/nfp_flow.h b/drivers/net/nfp/nfp_flow.h\nindex ed06eca371..ab38dbe1f4 100644\n--- a/drivers/net/nfp/nfp_flow.h\n+++ b/drivers/net/nfp/nfp_flow.h\n@@ -126,19 +126,19 @@ struct nfp_ipv6_addr_entry {\n struct nfp_flow_priv {\n \tuint32_t hash_seed; /**< Hash seed for hash tables in this structure. */\n \tuint64_t flower_version; /**< Flow version, always increase. */\n-\t/* mask hash table */\n+\t/* Mask hash table */\n \tstruct nfp_fl_mask_id mask_ids; /**< Entry for mask hash table */\n \tstruct rte_hash *mask_table; /**< Hash table to store mask ids. */\n-\t/* flow hash table */\n+\t/* Flow hash table */\n \tstruct rte_hash *flow_table; /**< Hash table to store flow rules. */\n-\t/* flow stats */\n+\t/* Flow stats */\n \tuint32_t active_mem_unit; /**< The size of active mem units. */\n \tuint32_t total_mem_units; /**< The size of total mem units. */\n \tuint32_t stats_ring_size; /**< The size of stats id ring. */\n \tstruct nfp_fl_stats_id stats_ids; /**< The stats id ring. */\n \tstruct nfp_fl_stats *stats; /**< Store stats of flow. */\n \trte_spinlock_t stats_lock; /** < Lock the update of 'stats' field. */\n-\t/* pre tunnel rule */\n+\t/* Pre tunnel rule */\n \tuint16_t pre_tun_cnt; /**< The size of pre tunnel rule */\n \tuint8_t pre_tun_bitmap[NFP_TUN_PRE_TUN_RULE_LIMIT]; /**< Bitmap of pre tunnel rule */\n \tstruct rte_hash *pre_tun_table; /**< Hash table to store pre tunnel rule */\n@@ -148,7 +148,7 @@ struct nfp_flow_priv {\n \t/* IPv6 off */\n \tLIST_HEAD(, nfp_ipv6_addr_entry) ipv6_off_list; /**< Store ipv6 off */\n \trte_spinlock_t ipv6_off_lock; /**< Lock the ipv6 off list */\n-\t/* neighbor next */\n+\t/* Neighbor next */\n \tLIST_HEAD(, nfp_fl_tun)nn_list; /**< Store nn entry */\n \t/* Conntrack */\n \tstruct rte_hash *ct_zone_table; /**< Hash table to store ct zone entry */\ndiff --git a/drivers/net/nfp/nfp_ipsec.h b/drivers/net/nfp/nfp_ipsec.h\nindex aaebb80fe1..d7a729398a 100644\n--- a/drivers/net/nfp/nfp_ipsec.h\n+++ b/drivers/net/nfp/nfp_ipsec.h\n@@ -82,7 +82,7 @@ struct ipsec_discard_stats {\n \tuint32_t discards_alignment;             /**< Alignment error */\n \tuint32_t discards_hard_bytelimit;        /**< Hard byte Count limit */\n \tuint32_t discards_seq_num_wrap;          /**< Sequ Number wrap */\n-\tuint32_t discards_pmtu_exceeded;         /**< PMTU Limit exceeded*/\n+\tuint32_t discards_pmtu_exceeded;         /**< PMTU Limit exceeded */\n \tuint32_t discards_arw_old_seq;           /**< Anti-Replay seq small */\n \tuint32_t discards_arw_replay;            /**< Anti-Replay seq rcvd */\n \tuint32_t discards_ctrl_word;             /**< Bad SA Control word */\n@@ -99,16 +99,16 @@ struct ipsec_discard_stats {\n \n struct ipsec_get_sa_stats {\n \tuint32_t seq_lo;                         /**< Sequence Number (low 32bits) */\n-\tuint32_t seq_high;                       /**< Sequence Number (high 32bits)*/\n+\tuint32_t seq_high;                       /**< Sequence Number (high 32bits) */\n \tuint32_t arw_counter_lo;                 /**< Anti-replay wndw cntr */\n \tuint32_t arw_counter_high;               /**< Anti-replay wndw cntr */\n \tuint32_t arw_bitmap_lo;                  /**< Anti-replay wndw bitmap */\n \tuint32_t arw_bitmap_high;                /**< Anti-replay wndw bitmap */\n \tuint32_t spare:1;\n-\tuint32_t soft_byte_exceeded :1;          /**< Soft lifetime byte cnt exceeded*/\n-\tuint32_t hard_byte_exceeded :1;          /**< Hard lifetime byte cnt exceeded*/\n-\tuint32_t soft_time_exceeded :1;          /**< Soft lifetime time limit exceeded*/\n-\tuint32_t hard_time_exceeded :1;          /**< Hard lifetime time limit exceeded*/\n+\tuint32_t soft_byte_exceeded :1;          /**< Soft lifetime byte cnt exceeded */\n+\tuint32_t hard_byte_exceeded :1;          /**< Hard lifetime byte cnt exceeded */\n+\tuint32_t soft_time_exceeded :1;          /**< Soft lifetime time limit exceeded */\n+\tuint32_t hard_time_exceeded :1;          /**< Hard lifetime time limit exceeded */\n \tuint32_t spare1:27;\n \tuint32_t lifetime_byte_count;\n \tuint32_t pkt_count;\ndiff --git a/drivers/net/nfp/nfp_rxtx.c b/drivers/net/nfp/nfp_rxtx.c\nindex b37a338b2f..9e08e38955 100644\n--- a/drivers/net/nfp/nfp_rxtx.c\n+++ b/drivers/net/nfp/nfp_rxtx.c\n@@ -20,43 +20,22 @@\n /* Maximum number of supported VLANs in parsed form packet metadata. */\n #define NFP_META_MAX_VLANS       2\n \n-/*\n- * struct nfp_meta_parsed - Record metadata parsed from packet\n- *\n- * Parsed NFP packet metadata are recorded in this struct. The content is\n- * read-only after it have been recorded during parsing by nfp_net_parse_meta().\n- *\n- * @port_id: Port id value\n- * @sa_idx: IPsec SA index\n- * @hash: RSS hash value\n- * @hash_type: RSS hash type\n- * @ipsec_type: IPsec type\n- * @vlan_layer: The layers of VLAN info which are passed from nic.\n- *              Only this number of entries of the @vlan array are valid.\n- *\n- * @vlan: Holds information parses from NFP_NET_META_VLAN. The inner most vlan\n- *        starts at position 0 and only @vlan_layer entries contain valid\n- *        information.\n- *\n- *        Currently only 2 layers of vlan are supported,\n- *        vlan[0] - vlan strip info\n- *        vlan[1] - qinq strip info\n- *\n- * @vlan.offload:  Flag indicates whether VLAN is offloaded\n- * @vlan.tpid: Vlan TPID\n- * @vlan.tci: Vlan TCI including PCP + Priority + VID\n- */\n+/* Record metadata parsed from packet */\n struct nfp_meta_parsed {\n-\tuint32_t port_id;\n-\tuint32_t sa_idx;\n-\tuint32_t hash;\n-\tuint8_t hash_type;\n-\tuint8_t ipsec_type;\n-\tuint8_t vlan_layer;\n+\tuint32_t port_id;         /**< Port id value */\n+\tuint32_t sa_idx;          /**< IPsec SA index */\n+\tuint32_t hash;            /**< RSS hash value */\n+\tuint8_t hash_type;        /**< RSS hash type */\n+\tuint8_t ipsec_type;       /**< IPsec type */\n+\tuint8_t vlan_layer;       /**< The valid number of value in @vlan[] */\n+\t/**\n+\t * Holds information parses from NFP_NET_META_VLAN.\n+\t * The inner most vlan starts at position 0\n+\t */\n \tstruct {\n-\t\tuint8_t offload;\n-\t\tuint8_t tpid;\n-\t\tuint16_t tci;\n+\t\tuint8_t offload;  /**< Flag indicates whether VLAN is offloaded */\n+\t\tuint8_t tpid;     /**< Vlan TPID */\n+\t\tuint16_t tci;     /**< Vlan TCI (PCP + Priority + VID) */\n \t} vlan[NFP_META_MAX_VLANS];\n };\n \n@@ -156,7 +135,7 @@ struct nfp_ptype_parsed {\n \tuint8_t outer_l3_ptype; /**< Packet type of outer layer 3. */\n };\n \n-/* set mbuf checksum flags based on RX descriptor flags */\n+/* Set mbuf checksum flags based on RX descriptor flags */\n void\n nfp_net_rx_cksum(struct nfp_net_rxq *rxq,\n \t\tstruct nfp_net_rx_desc *rxd,\n@@ -254,7 +233,7 @@ nfp_net_rx_queue_count(void *rx_queue)\n \t * descriptors and counting all four if the first has the DD\n \t * bit on. Of course, this is not accurate but can be good for\n \t * performance. But ideally that should be done in descriptors\n-\t * chunks belonging to the same cache line\n+\t * chunks belonging to the same cache line.\n \t */\n \n \twhile (count < rxq->rx_count) {\n@@ -265,7 +244,7 @@ nfp_net_rx_queue_count(void *rx_queue)\n \t\tcount++;\n \t\tidx++;\n \n-\t\t/* Wrapping? */\n+\t\t/* Wrapping */\n \t\tif ((idx) == rxq->rx_count)\n \t\t\tidx = 0;\n \t}\n@@ -273,7 +252,7 @@ nfp_net_rx_queue_count(void *rx_queue)\n \treturn count;\n }\n \n-/* nfp_net_parse_chained_meta() - Parse the chained metadata from packet */\n+/* Parse the chained metadata from packet */\n static bool\n nfp_net_parse_chained_meta(uint8_t *meta_base,\n \t\trte_be32_t meta_header,\n@@ -320,12 +299,7 @@ nfp_net_parse_chained_meta(uint8_t *meta_base,\n \treturn true;\n }\n \n-/*\n- * nfp_net_parse_meta_hash() - Set mbuf hash data based on the metadata info\n- *\n- * The RSS hash and hash-type are prepended to the packet data.\n- * Extract and decode it and set the mbuf fields.\n- */\n+/* Set mbuf hash data based on the metadata info */\n static void\n nfp_net_parse_meta_hash(const struct nfp_meta_parsed *meta,\n \t\tstruct nfp_net_rxq *rxq,\n@@ -341,7 +315,7 @@ nfp_net_parse_meta_hash(const struct nfp_meta_parsed *meta,\n }\n \n /*\n- * nfp_net_parse_single_meta() - Parse the single metadata\n+ * Parse the single metadata\n  *\n  * The RSS hash and hash-type are prepended to the packet data.\n  * Get it from metadata area.\n@@ -355,12 +329,7 @@ nfp_net_parse_single_meta(uint8_t *meta_base,\n \tmeta->hash = rte_be_to_cpu_32(*(rte_be32_t *)(meta_base + 4));\n }\n \n-/*\n- * nfp_net_parse_meta_vlan() - Set mbuf vlan_strip data based on metadata info\n- *\n- * The VLAN info TPID and TCI are prepended to the packet data.\n- * Extract and decode it and set the mbuf fields.\n- */\n+/* Set mbuf vlan_strip data based on metadata info */\n static void\n nfp_net_parse_meta_vlan(const struct nfp_meta_parsed *meta,\n \t\tstruct nfp_net_rx_desc *rxd,\n@@ -369,19 +338,14 @@ nfp_net_parse_meta_vlan(const struct nfp_meta_parsed *meta,\n {\n \tstruct nfp_net_hw *hw = rxq->hw;\n \n-\t/* Skip if hardware don't support setting vlan. */\n+\t/* Skip if firmware don't support setting vlan. */\n \tif ((hw->ctrl & (NFP_NET_CFG_CTRL_RXVLAN | NFP_NET_CFG_CTRL_RXVLAN_V2)) == 0)\n \t\treturn;\n \n \t/*\n-\t * The nic support the two way to send the VLAN info,\n-\t * 1. According the metadata to send the VLAN info when NFP_NET_CFG_CTRL_RXVLAN_V2\n-\t * is set\n-\t * 2. According the descriptor to sned the VLAN info when NFP_NET_CFG_CTRL_RXVLAN\n-\t * is set\n-\t *\n-\t * If the nic doesn't send the VLAN info, it is not necessary\n-\t * to do anything.\n+\t * The firmware support two ways to send the VLAN info (with priority) :\n+\t * 1. Using the metadata when NFP_NET_CFG_CTRL_RXVLAN_V2 is set,\n+\t * 2. Using the descriptor when NFP_NET_CFG_CTRL_RXVLAN is set.\n \t */\n \tif ((hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN_V2) != 0) {\n \t\tif (meta->vlan_layer > 0 && meta->vlan[0].offload != 0) {\n@@ -397,7 +361,7 @@ nfp_net_parse_meta_vlan(const struct nfp_meta_parsed *meta,\n }\n \n /*\n- * nfp_net_parse_meta_qinq() - Set mbuf qinq_strip data based on metadata info\n+ * Set mbuf qinq_strip data based on metadata info\n  *\n  * The out VLAN tci are prepended to the packet data.\n  * Extract and decode it and set the mbuf fields.\n@@ -469,7 +433,7 @@ nfp_net_parse_meta_ipsec(struct nfp_meta_parsed *meta,\n \t}\n }\n \n-/* nfp_net_parse_meta() - Parse the metadata from packet */\n+/* Parse the metadata from packet */\n static void\n nfp_net_parse_meta(struct nfp_net_rx_desc *rxds,\n \t\tstruct nfp_net_rxq *rxq,\n@@ -672,7 +636,7 @@ nfp_net_parse_ptype(struct nfp_net_rx_desc *rxds,\n  * doing now have any benefit at all. Again, tests with this change have not\n  * shown any improvement. Also, rte_mempool_get_bulk returns all or nothing\n  * so looking at the implications of this type of allocation should be studied\n- * deeply\n+ * deeply.\n  */\n \n uint16_t\n@@ -695,7 +659,7 @@ nfp_net_recv_pkts(void *rx_queue,\n \tif (unlikely(rxq == NULL)) {\n \t\t/*\n \t\t * DPDK just checks the queue is lower than max queues\n-\t\t * enabled. But the queue needs to be configured\n+\t\t * enabled. But the queue needs to be configured.\n \t\t */\n \t\tPMD_RX_LOG(ERR, \"RX Bad queue\");\n \t\treturn 0;\n@@ -722,7 +686,7 @@ nfp_net_recv_pkts(void *rx_queue,\n \n \t\t/*\n \t\t * We got a packet. Let's alloc a new mbuf for refilling the\n-\t\t * free descriptor ring as soon as possible\n+\t\t * free descriptor ring as soon as possible.\n \t\t */\n \t\tnew_mb = rte_pktmbuf_alloc(rxq->mem_pool);\n \t\tif (unlikely(new_mb == NULL)) {\n@@ -734,7 +698,7 @@ nfp_net_recv_pkts(void *rx_queue,\n \n \t\t/*\n \t\t * Grab the mbuf and refill the descriptor with the\n-\t\t * previously allocated mbuf\n+\t\t * previously allocated mbuf.\n \t\t */\n \t\tmb = rxb->mbuf;\n \t\trxb->mbuf = new_mb;\n@@ -751,7 +715,7 @@ nfp_net_recv_pkts(void *rx_queue,\n \t\t\t/*\n \t\t\t * This should not happen and the user has the\n \t\t\t * responsibility of avoiding it. But we have\n-\t\t\t * to give some info about the error\n+\t\t\t * to give some info about the error.\n \t\t\t */\n \t\t\tPMD_RX_LOG(ERR, \"mbuf overflow likely due to the RX offset.\");\n \t\t\trte_pktmbuf_free(mb);\n@@ -796,7 +760,7 @@ nfp_net_recv_pkts(void *rx_queue,\n \t\tnb_hold++;\n \n \t\trxq->rd_p++;\n-\t\tif (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/\n+\t\tif (unlikely(rxq->rd_p == rxq->rx_count)) /* Wrapping */\n \t\t\trxq->rd_p = 0;\n \t}\n \n@@ -810,7 +774,7 @@ nfp_net_recv_pkts(void *rx_queue,\n \n \t/*\n \t * FL descriptors needs to be written before incrementing the\n-\t * FL queue WR pointer\n+\t * FL queue WR pointer.\n \t */\n \trte_wmb();\n \tif (nb_hold > rxq->rx_free_thresh) {\n@@ -891,7 +855,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,\n \n \t/*\n \t * Free memory prior to re-allocation if needed. This is the case after\n-\t * calling nfp_net_stop\n+\t * calling @nfp_net_stop().\n \t */\n \tif (dev->data->rx_queues[queue_idx] != NULL) {\n \t\tnfp_net_rx_queue_release(dev, queue_idx);\n@@ -913,7 +877,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,\n \n \t/*\n \t * Tracking mbuf size for detecting a potential mbuf overflow due to\n-\t * RX offset\n+\t * RX offset.\n \t */\n \trxq->mem_pool = mp;\n \trxq->mbuf_size = rxq->mem_pool->elt_size;\n@@ -944,7 +908,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,\n \trxq->dma = (uint64_t)tz->iova;\n \trxq->rxds = tz->addr;\n \n-\t/* mbuf pointers array for referencing mbufs linked to RX descriptors */\n+\t/* Mbuf pointers array for referencing mbufs linked to RX descriptors */\n \trxq->rxbufs = rte_zmalloc_socket(\"rxq->rxbufs\",\n \t\t\tsizeof(*rxq->rxbufs) * nb_desc, RTE_CACHE_LINE_SIZE,\n \t\t\tsocket_id);\n@@ -960,7 +924,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,\n \n \t/*\n \t * Telling the HW about the physical address of the RX ring and number\n-\t * of descriptors in log2 format\n+\t * of descriptors in log2 format.\n \t */\n \tnn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);\n \tnn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));\n@@ -968,11 +932,14 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,\n \treturn 0;\n }\n \n-/*\n- * nfp_net_tx_free_bufs - Check for descriptors with a complete\n- * status\n- * @txq: TX queue to work with\n- * Returns number of descriptors freed\n+/**\n+ * Check for descriptors with a complete status\n+ *\n+ * @param txq\n+ *   TX queue to work with\n+ *\n+ * @return\n+ *   Number of descriptors freed\n  */\n uint32_t\n nfp_net_tx_free_bufs(struct nfp_net_txq *txq)\ndiff --git a/drivers/net/nfp/nfp_rxtx.h b/drivers/net/nfp/nfp_rxtx.h\nindex 98ef6c3d93..899cc42c97 100644\n--- a/drivers/net/nfp/nfp_rxtx.h\n+++ b/drivers/net/nfp/nfp_rxtx.h\n@@ -19,21 +19,11 @@\n /* Maximum number of NFP packet metadata fields. */\n #define NFP_META_MAX_FIELDS      8\n \n-/*\n- * struct nfp_net_meta_raw - Raw memory representation of packet metadata\n- *\n- * Describe the raw metadata format, useful when preparing metadata for a\n- * transmission mbuf.\n- *\n- * @header: NFD3 or NFDk field type header (see format in nfp.rst)\n- * @data: Array of each fields data member\n- * @length: Keep track of number of valid fields in @header and data. Not part\n- *          of the raw metadata.\n- */\n+/* Describe the raw metadata format. */\n struct nfp_net_meta_raw {\n-\tuint32_t header;\n-\tuint32_t data[NFP_META_MAX_FIELDS];\n-\tuint8_t length;\n+\tuint32_t header; /**< Field type header (see format in nfp.rst) */\n+\tuint32_t data[NFP_META_MAX_FIELDS]; /**< Array of each fields data member */\n+\tuint8_t length; /**< Number of valid fields in @header */\n };\n \n /* Descriptor alignment */\n",
    "prefixes": [
        "v3",
        "06/11"
    ]
}