get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/66384/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 66384,
    "url": "https://patches.dpdk.org/api/patches/66384/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1583742247-370386-6-git-send-email-alvinx.zhang@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1583742247-370386-6-git-send-email-alvinx.zhang@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1583742247-370386-6-git-send-email-alvinx.zhang@intel.com",
    "date": "2020-03-09T08:23:58",
    "name": "[v1,06/15] net/igc: implement status API",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "25e172c095fc0d59a437411d0f89d75678e3a91b",
    "submitter": {
        "id": 1398,
        "url": "https://patches.dpdk.org/api/people/1398/?format=api",
        "name": "Alvin Zhang",
        "email": "alvinx.zhang@intel.com"
    },
    "delegate": {
        "id": 319,
        "url": "https://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1583742247-370386-6-git-send-email-alvinx.zhang@intel.com/mbox/",
    "series": [
        {
            "id": 8831,
            "url": "https://patches.dpdk.org/api/series/8831/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=8831",
            "date": "2020-03-09T08:23:53",
            "name": "[v1,01/15] net/igc: add igc PMD",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/8831/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/66384/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/66384/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 0D007A052E;\n\tMon,  9 Mar 2020 09:29:35 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 4F6121C067;\n\tMon,  9 Mar 2020 09:29:02 +0100 (CET)",
            "from mga18.intel.com (mga18.intel.com [134.134.136.126])\n by dpdk.org (Postfix) with ESMTP id 3D90B1C037\n for <dev@dpdk.org>; Mon,  9 Mar 2020 09:29:00 +0100 (CET)",
            "from orsmga002.jf.intel.com ([10.7.209.21])\n by orsmga106.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n 09 Mar 2020 01:28:59 -0700",
            "from unknown (HELO dpdk-zhangalvin-dev.sh.intel.com)\n ([10.240.179.50])\n by orsmga002.jf.intel.com with ESMTP; 09 Mar 2020 01:28:57 -0700"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.70,518,1574150400\"; d=\"scan'208\";a=\"260350719\"",
        "From": "alvinx.zhang@intel.com",
        "To": "dev@dpdk.org",
        "Cc": "haiyue.wang@intel.com, xiaolong.ye@intel.com, qi.z.zhang@intel.com,\n beilei.xing@intel.com, Alvin Zhang <alvinx.zhang@intel.com>",
        "Date": "Mon,  9 Mar 2020 16:23:58 +0800",
        "Message-Id": "<1583742247-370386-6-git-send-email-alvinx.zhang@intel.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1583742247-370386-1-git-send-email-alvinx.zhang@intel.com>",
        "References": "<1583742247-370386-1-git-send-email-alvinx.zhang@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v1 06/15] net/igc: implement status API",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Alvin Zhang <alvinx.zhang@intel.com>\n\nImplement base status, extend status and per queue status API.\n\nBelow ops are added:\nstats_get\nxstats_get\nxstats_get_by_id\nxstats_get_names_by_id\nxstats_get_names\nstats_reset\nxstats_reset\nqueue_stats_mapping_set\n\nSigned-off-by: Alvin Zhang <alvinx.zhang@intel.com>\n---\n doc/guides/nics/features/igc.ini |   3 +\n drivers/net/igc/igc_ethdev.c     | 582 ++++++++++++++++++++++++++++++++++++++-\n drivers/net/igc/igc_ethdev.h     |  31 ++-\n 3 files changed, 614 insertions(+), 2 deletions(-)",
    "diff": "diff --git a/doc/guides/nics/features/igc.ini b/doc/guides/nics/features/igc.ini\nindex e49b5e7..9ba817d 100644\n--- a/doc/guides/nics/features/igc.ini\n+++ b/doc/guides/nics/features/igc.ini\n@@ -22,6 +22,9 @@ RSS hash             = Y\n CRC offload          = Y\n L3 checksum offload  = Y\n L4 checksum offload  = Y\n+Basic stats          = Y\n+Extended stats       = Y\n+Stats per queue      = Y\n Linux UIO            = Y\n Linux VFIO           = Y\n x86-64               = Y\ndiff --git a/drivers/net/igc/igc_ethdev.c b/drivers/net/igc/igc_ethdev.c\nindex 589bfb2..6f03ad1 100644\n--- a/drivers/net/igc/igc_ethdev.c\n+++ b/drivers/net/igc/igc_ethdev.c\n@@ -2,10 +2,12 @@\n  * Copyright(c) 2010-2020 Intel Corporation\n  */\n \n+#include <rte_string_fns.h>\n #include <rte_pci.h>\n #include <rte_bus_pci.h>\n #include <rte_ethdev_driver.h>\n #include <rte_ethdev_pci.h>\n+#include <rte_alarm.h>\n \n #include \"igc_logs.h\"\n #include \"igc_txrx.h\"\n@@ -41,6 +43,28 @@\n /* MSI-X other interrupt vector */\n #define IGC_MSIX_OTHER_INTR_VEC\t\t0\n \n+/* Per Queue Good Packets Received Count */\n+#define IGC_PQGPRC(idx)\t\t(0x10010 + 0x100 * (idx))\n+/* Per Queue Good Octets Received Count */\n+#define IGC_PQGORC(idx)\t\t(0x10018 + 0x100 * (idx))\n+/* Per Queue Good Octets Transmitted Count */\n+#define IGC_PQGOTC(idx)\t\t(0x10034 + 0x100 * (idx))\n+/* Per Queue Multicast Packets Received Count */\n+#define IGC_PQMPRC(idx)\t\t(0x10038 + 0x100 * (idx))\n+/* Transmit Queue Drop Packet Count */\n+#define IGC_TQDPC(idx)\t\t(0xe030 + 0x40 * (idx))\n+\n+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN\n+#define U32_0_IN_U64\t\t0\t/* lower bytes of u64 */\n+#define U32_1_IN_U64\t\t1\t/* higher bytes of u64 */\n+#else\n+#define U32_0_IN_U64\t\t1\n+#define U32_1_IN_U64\t\t0\n+#endif\n+\n+#define IGC_ALARM_INTERVAL\t8000000u\n+/* us, about 13.6s some per-queue registers will wrap around back to 0. */\n+\n static const struct rte_eth_desc_lim rx_desc_lim = {\n \t.nb_max = IGC_MAX_RXD,\n \t.nb_min = IGC_MIN_RXD,\n@@ -64,6 +88,76 @@\n \t{ .vendor_id = 0, /* sentinel */ },\n };\n \n+/* store statistics names and its offset in stats structure */\n+struct rte_igc_xstats_name_off {\n+\tchar name[RTE_ETH_XSTATS_NAME_SIZE];\n+\tunsigned int offset;\n+};\n+\n+static const struct rte_igc_xstats_name_off rte_igc_stats_strings[] = {\n+\t{\"rx_crc_errors\", offsetof(struct igc_hw_stats, crcerrs)},\n+\t{\"rx_align_errors\", offsetof(struct igc_hw_stats, algnerrc)},\n+\t{\"rx_errors\", offsetof(struct igc_hw_stats, rxerrc)},\n+\t{\"rx_missed_packets\", offsetof(struct igc_hw_stats, mpc)},\n+\t{\"tx_single_collision_packets\", offsetof(struct igc_hw_stats, scc)},\n+\t{\"tx_multiple_collision_packets\", offsetof(struct igc_hw_stats, mcc)},\n+\t{\"tx_excessive_collision_packets\", offsetof(struct igc_hw_stats,\n+\t\tecol)},\n+\t{\"tx_late_collisions\", offsetof(struct igc_hw_stats, latecol)},\n+\t{\"tx_total_collisions\", offsetof(struct igc_hw_stats, colc)},\n+\t{\"tx_deferred_packets\", offsetof(struct igc_hw_stats, dc)},\n+\t{\"tx_no_carrier_sense_packets\", offsetof(struct igc_hw_stats, tncrs)},\n+\t{\"tx_discarded_packets\", offsetof(struct igc_hw_stats, htdpmc)},\n+\t{\"rx_length_errors\", offsetof(struct igc_hw_stats, rlec)},\n+\t{\"rx_xon_packets\", offsetof(struct igc_hw_stats, xonrxc)},\n+\t{\"tx_xon_packets\", offsetof(struct igc_hw_stats, xontxc)},\n+\t{\"rx_xoff_packets\", offsetof(struct igc_hw_stats, xoffrxc)},\n+\t{\"tx_xoff_packets\", offsetof(struct igc_hw_stats, xofftxc)},\n+\t{\"rx_flow_control_unsupported_packets\", offsetof(struct igc_hw_stats,\n+\t\tfcruc)},\n+\t{\"rx_size_64_packets\", offsetof(struct igc_hw_stats, prc64)},\n+\t{\"rx_size_65_to_127_packets\", offsetof(struct igc_hw_stats, prc127)},\n+\t{\"rx_size_128_to_255_packets\", offsetof(struct igc_hw_stats, prc255)},\n+\t{\"rx_size_256_to_511_packets\", offsetof(struct igc_hw_stats, prc511)},\n+\t{\"rx_size_512_to_1023_packets\", offsetof(struct igc_hw_stats,\n+\t\tprc1023)},\n+\t{\"rx_size_1024_to_max_packets\", offsetof(struct igc_hw_stats,\n+\t\tprc1522)},\n+\t{\"rx_broadcast_packets\", offsetof(struct igc_hw_stats, bprc)},\n+\t{\"rx_multicast_packets\", offsetof(struct igc_hw_stats, mprc)},\n+\t{\"rx_undersize_errors\", offsetof(struct igc_hw_stats, ruc)},\n+\t{\"rx_fragment_errors\", offsetof(struct igc_hw_stats, rfc)},\n+\t{\"rx_oversize_errors\", offsetof(struct igc_hw_stats, roc)},\n+\t{\"rx_jabber_errors\", offsetof(struct igc_hw_stats, rjc)},\n+\t{\"rx_no_buffers\", offsetof(struct igc_hw_stats, rnbc)},\n+\t{\"rx_management_packets\", offsetof(struct igc_hw_stats, mgprc)},\n+\t{\"rx_management_dropped\", offsetof(struct igc_hw_stats, mgpdc)},\n+\t{\"tx_management_packets\", offsetof(struct igc_hw_stats, mgptc)},\n+\t{\"rx_total_packets\", offsetof(struct igc_hw_stats, tpr)},\n+\t{\"tx_total_packets\", offsetof(struct igc_hw_stats, tpt)},\n+\t{\"rx_total_bytes\", offsetof(struct igc_hw_stats, tor)},\n+\t{\"tx_total_bytes\", offsetof(struct igc_hw_stats, tot)},\n+\t{\"tx_size_64_packets\", offsetof(struct igc_hw_stats, ptc64)},\n+\t{\"tx_size_65_to_127_packets\", offsetof(struct igc_hw_stats, ptc127)},\n+\t{\"tx_size_128_to_255_packets\", offsetof(struct igc_hw_stats, ptc255)},\n+\t{\"tx_size_256_to_511_packets\", offsetof(struct igc_hw_stats, ptc511)},\n+\t{\"tx_size_512_to_1023_packets\", offsetof(struct igc_hw_stats,\n+\t\tptc1023)},\n+\t{\"tx_size_1023_to_max_packets\", offsetof(struct igc_hw_stats,\n+\t\tptc1522)},\n+\t{\"tx_multicast_packets\", offsetof(struct igc_hw_stats, mptc)},\n+\t{\"tx_broadcast_packets\", offsetof(struct igc_hw_stats, bptc)},\n+\t{\"tx_tso_packets\", offsetof(struct igc_hw_stats, tsctc)},\n+\t{\"rx_sent_to_host_packets\", offsetof(struct igc_hw_stats, rpthc)},\n+\t{\"tx_sent_by_host_packets\", offsetof(struct igc_hw_stats, hgptc)},\n+\t{\"interrupt_assert_count\", offsetof(struct igc_hw_stats, iac)},\n+\t{\"rx_descriptor_lower_threshold\",\n+\t\toffsetof(struct igc_hw_stats, icrxdmtc)},\n+};\n+\n+#define IGC_NB_XSTATS (sizeof(rte_igc_stats_strings) / \\\n+\t\tsizeof(rte_igc_stats_strings[0]))\n+\n static int eth_igc_configure(struct rte_eth_dev *dev);\n static int eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete);\n static void eth_igc_stop(struct rte_eth_dev *dev);\n@@ -92,6 +186,23 @@ static int eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,\n static int eth_igc_allmulticast_enable(struct rte_eth_dev *dev);\n static int eth_igc_allmulticast_disable(struct rte_eth_dev *dev);\n static int eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);\n+static int eth_igc_stats_get(struct rte_eth_dev *dev,\n+\t\t\tstruct rte_eth_stats *rte_stats);\n+static int eth_igc_xstats_get(struct rte_eth_dev *dev,\n+\t\t\tstruct rte_eth_xstat *xstats, unsigned int n);\n+static int eth_igc_xstats_get_by_id(struct rte_eth_dev *dev,\n+\t\t\t\tconst uint64_t *ids,\n+\t\t\t\tuint64_t *values, unsigned int n);\n+static int eth_igc_xstats_get_names(struct rte_eth_dev *dev,\n+\t\t\t\tstruct rte_eth_xstat_name *xstats_names,\n+\t\t\t\tunsigned int size);\n+static int eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev,\n+\t\tstruct rte_eth_xstat_name *xstats_names, const uint64_t *ids,\n+\t\tunsigned int limit);\n+static int eth_igc_xstats_reset(struct rte_eth_dev *dev);\n+static int\n+eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev,\n+\tuint16_t queue_id, uint8_t stat_idx, uint8_t is_rx);\n \n static const struct eth_dev_ops eth_igc_ops = {\n \t.dev_configure\t\t= eth_igc_configure,\n@@ -128,6 +239,14 @@ static int eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,\n \t.tx_done_cleanup\t= eth_igc_tx_done_cleanup,\n \t.rxq_info_get\t\t= eth_igc_rxq_info_get,\n \t.txq_info_get\t\t= eth_igc_txq_info_get,\n+\t.stats_get\t\t= eth_igc_stats_get,\n+\t.xstats_get\t\t= eth_igc_xstats_get,\n+\t.xstats_get_by_id\t= eth_igc_xstats_get_by_id,\n+\t.xstats_get_names_by_id\t= eth_igc_xstats_get_names_by_id,\n+\t.xstats_get_names\t= eth_igc_xstats_get_names,\n+\t.stats_reset\t\t= eth_igc_xstats_reset,\n+\t.xstats_reset\t\t= eth_igc_xstats_reset,\n+\t.queue_stats_mapping_set = eth_igc_queue_stats_mapping_set,\n };\n \n /*\n@@ -393,6 +512,22 @@ static int eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,\n \teth_igc_interrupt_action(dev);\n }\n \n+static void igc_read_queue_stats_register(struct rte_eth_dev *dev);\n+\n+/*\n+ * Update the queue status every IGC_ALARM_INTERVAL time.\n+ * @param\n+ *  The address of parameter (struct rte_eth_dev *) regsitered before.\n+ */\n+static void\n+igc_update_queue_stats_handler(void *param)\n+{\n+\tstruct rte_eth_dev *dev = param;\n+\tigc_read_queue_stats_register(dev);\n+\trte_eal_alarm_set(IGC_ALARM_INTERVAL,\n+\t\t\tigc_update_queue_stats_handler, dev);\n+}\n+\n /*\n  * rx,tx enable/disable\n  */\n@@ -446,6 +581,8 @@ static int eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,\n \n \tigc_intr_other_disable(dev);\n \n+\trte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);\n+\n \t/* disable intr eventfd mapping */\n \trte_intr_disable(intr_handle);\n \n@@ -749,6 +886,9 @@ static int eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,\n \t/* enable uio/vfio intr/eventfd mapping */\n \trte_intr_enable(intr_handle);\n \n+\trte_eal_alarm_set(IGC_ALARM_INTERVAL,\n+\t\t\tigc_update_queue_stats_handler, dev);\n+\n \t/* resume enabled intr since hw reset */\n \tigc_intr_other_enable(dev);\n \n@@ -890,7 +1030,7 @@ static int eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,\n \tstruct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);\n \tstruct igc_adapter *igc = IGC_DEV_PRIVATE(dev);\n \tstruct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);\n-\tint error = 0;\n+\tint i, error = 0;\n \n \tPMD_INIT_FUNC_TRACE();\n \tdev->dev_ops = &eth_igc_ops;\n@@ -1016,6 +1156,11 @@ static int eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,\n \t/* enable support intr */\n \tigc_intr_other_enable(dev);\n \n+\t/* initiate queue status */\n+\tfor (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {\n+\t\tigc->txq_stats_map[i] = -1;\n+\t\tigc->rxq_stats_map[i] = -1;\n+\t}\n \treturn 0;\n \n err_late:\n@@ -1327,6 +1472,441 @@ static int eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,\n \treturn 0;\n }\n \n+/*\n+ * Read hardware registers\n+ */\n+static void\n+igc_read_stats_registers(struct igc_hw *hw, struct igc_hw_stats *stats)\n+{\n+\tint pause_frames;\n+\n+\tuint64_t old_gprc  = stats->gprc;\n+\tuint64_t old_gptc  = stats->gptc;\n+\tuint64_t old_tpr   = stats->tpr;\n+\tuint64_t old_tpt   = stats->tpt;\n+\tuint64_t old_rpthc = stats->rpthc;\n+\tuint64_t old_hgptc = stats->hgptc;\n+\n+\tstats->crcerrs += IGC_READ_REG(hw, IGC_CRCERRS);\n+\tstats->algnerrc += IGC_READ_REG(hw, IGC_ALGNERRC);\n+\tstats->rxerrc += IGC_READ_REG(hw, IGC_RXERRC);\n+\tstats->mpc += IGC_READ_REG(hw, IGC_MPC);\n+\tstats->scc += IGC_READ_REG(hw, IGC_SCC);\n+\tstats->ecol += IGC_READ_REG(hw, IGC_ECOL);\n+\n+\tstats->mcc += IGC_READ_REG(hw, IGC_MCC);\n+\tstats->latecol += IGC_READ_REG(hw, IGC_LATECOL);\n+\tstats->colc += IGC_READ_REG(hw, IGC_COLC);\n+\n+\tstats->dc += IGC_READ_REG(hw, IGC_DC);\n+\tstats->tncrs += IGC_READ_REG(hw, IGC_TNCRS);\n+\tstats->htdpmc += IGC_READ_REG(hw, IGC_HTDPMC);\n+\tstats->rlec += IGC_READ_REG(hw, IGC_RLEC);\n+\tstats->xonrxc += IGC_READ_REG(hw, IGC_XONRXC);\n+\tstats->xontxc += IGC_READ_REG(hw, IGC_XONTXC);\n+\n+\t/*\n+\t * For watchdog management we need to know if we have been\n+\t * paused during the last interval, so capture that here.\n+\t */\n+\tpause_frames = IGC_READ_REG(hw, IGC_XOFFRXC);\n+\tstats->xoffrxc += pause_frames;\n+\tstats->xofftxc += IGC_READ_REG(hw, IGC_XOFFTXC);\n+\tstats->fcruc += IGC_READ_REG(hw, IGC_FCRUC);\n+\tstats->prc64 += IGC_READ_REG(hw, IGC_PRC64);\n+\tstats->prc127 += IGC_READ_REG(hw, IGC_PRC127);\n+\tstats->prc255 += IGC_READ_REG(hw, IGC_PRC255);\n+\tstats->prc511 += IGC_READ_REG(hw, IGC_PRC511);\n+\tstats->prc1023 += IGC_READ_REG(hw, IGC_PRC1023);\n+\tstats->prc1522 += IGC_READ_REG(hw, IGC_PRC1522);\n+\tstats->gprc += IGC_READ_REG(hw, IGC_GPRC);\n+\tstats->bprc += IGC_READ_REG(hw, IGC_BPRC);\n+\tstats->mprc += IGC_READ_REG(hw, IGC_MPRC);\n+\tstats->gptc += IGC_READ_REG(hw, IGC_GPTC);\n+\n+\t/* For the 64-bit byte counters the low dword must be read first. */\n+\t/* Both registers clear on the read of the high dword */\n+\n+\t/* Workaround CRC bytes included in size, take away 4 bytes/packet */\n+\tstats->gorc += IGC_READ_REG(hw, IGC_GORCL);\n+\tstats->gorc += ((uint64_t)IGC_READ_REG(hw, IGC_GORCH) << 32);\n+\tstats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN;\n+\tstats->gotc += IGC_READ_REG(hw, IGC_GOTCL);\n+\tstats->gotc += ((uint64_t)IGC_READ_REG(hw, IGC_GOTCH) << 32);\n+\tstats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN;\n+\n+\tstats->rnbc += IGC_READ_REG(hw, IGC_RNBC);\n+\tstats->ruc += IGC_READ_REG(hw, IGC_RUC);\n+\tstats->rfc += IGC_READ_REG(hw, IGC_RFC);\n+\tstats->roc += IGC_READ_REG(hw, IGC_ROC);\n+\tstats->rjc += IGC_READ_REG(hw, IGC_RJC);\n+\n+\tstats->mgprc += IGC_READ_REG(hw, IGC_MGTPRC);\n+\tstats->mgpdc += IGC_READ_REG(hw, IGC_MGTPDC);\n+\tstats->mgptc += IGC_READ_REG(hw, IGC_MGTPTC);\n+\tstats->b2ospc += IGC_READ_REG(hw, IGC_B2OSPC);\n+\tstats->b2ogprc += IGC_READ_REG(hw, IGC_B2OGPRC);\n+\tstats->o2bgptc += IGC_READ_REG(hw, IGC_O2BGPTC);\n+\tstats->o2bspc += IGC_READ_REG(hw, IGC_O2BSPC);\n+\n+\tstats->tpr += IGC_READ_REG(hw, IGC_TPR);\n+\tstats->tpt += IGC_READ_REG(hw, IGC_TPT);\n+\n+\tstats->tor += IGC_READ_REG(hw, IGC_TORL);\n+\tstats->tor += ((uint64_t)IGC_READ_REG(hw, IGC_TORH) << 32);\n+\tstats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN;\n+\tstats->tot += IGC_READ_REG(hw, IGC_TOTL);\n+\tstats->tot += ((uint64_t)IGC_READ_REG(hw, IGC_TOTH) << 32);\n+\tstats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN;\n+\n+\tstats->ptc64 += IGC_READ_REG(hw, IGC_PTC64);\n+\tstats->ptc127 += IGC_READ_REG(hw, IGC_PTC127);\n+\tstats->ptc255 += IGC_READ_REG(hw, IGC_PTC255);\n+\tstats->ptc511 += IGC_READ_REG(hw, IGC_PTC511);\n+\tstats->ptc1023 += IGC_READ_REG(hw, IGC_PTC1023);\n+\tstats->ptc1522 += IGC_READ_REG(hw, IGC_PTC1522);\n+\tstats->mptc += IGC_READ_REG(hw, IGC_MPTC);\n+\tstats->bptc += IGC_READ_REG(hw, IGC_BPTC);\n+\tstats->tsctc += IGC_READ_REG(hw, IGC_TSCTC);\n+\n+\tstats->iac += IGC_READ_REG(hw, IGC_IAC);\n+\tstats->rpthc += IGC_READ_REG(hw, IGC_RPTHC);\n+\tstats->hgptc += IGC_READ_REG(hw, IGC_HGPTC);\n+\tstats->icrxdmtc += IGC_READ_REG(hw, IGC_ICRXDMTC);\n+\n+\t/* Host to Card Statistics */\n+\tstats->hgorc += IGC_READ_REG(hw, IGC_HGORCL);\n+\tstats->hgorc += ((uint64_t)IGC_READ_REG(hw, IGC_HGORCH) << 32);\n+\tstats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN;\n+\tstats->hgotc += IGC_READ_REG(hw, IGC_HGOTCL);\n+\tstats->hgotc += ((uint64_t)IGC_READ_REG(hw, IGC_HGOTCH) << 32);\n+\tstats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN;\n+\tstats->lenerrs += IGC_READ_REG(hw, IGC_LENERRS);\n+}\n+\n+/*\n+ * Write 0 to all queue status registers\n+ */\n+static void\n+igc_reset_queue_stats_register(struct igc_hw *hw)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {\n+\t\tIGC_WRITE_REG(hw, IGC_PQGPRC(i), 0);\n+\t\tIGC_WRITE_REG(hw, IGC_PQGPTC(i), 0);\n+\t\tIGC_WRITE_REG(hw, IGC_PQGORC(i), 0);\n+\t\tIGC_WRITE_REG(hw, IGC_PQGOTC(i), 0);\n+\t\tIGC_WRITE_REG(hw, IGC_PQMPRC(i), 0);\n+\t\tIGC_WRITE_REG(hw, IGC_RQDPC(i), 0);\n+\t\tIGC_WRITE_REG(hw, IGC_TQDPC(i), 0);\n+\t}\n+}\n+\n+/*\n+ * Read all hardware queue status registers\n+ */\n+static void\n+igc_read_queue_stats_register(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);\n+\tstruct igc_hw_queue_stats *queue_stats =\n+\t\t\t\tIGC_DEV_PRIVATE_QUEUE_STATS(dev);\n+\tint i;\n+\n+\t/*\n+\t * This register is not cleared on read. Furthermore, the register wraps\n+\t * around back to 0x00000000 on the next increment when reaching a value\n+\t * of 0xFFFFFFFF and then continues normal count operation.\n+\t */\n+\tfor (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {\n+\t\tunion {\n+\t\t\tu64 ddword;\n+\t\t\tu32 dword[2];\n+\t\t} value;\n+\t\tu32 tmp;\n+\n+\t\t/*\n+\t\t * Read the register first, if the value is smaller than that\n+\t\t * previous read, that mean the register has been overflowed,\n+\t\t * then we add the high 4 bytes by 1 and replace the low 4\n+\t\t * bytes by the new value.\n+\t\t */\n+\t\ttmp = IGC_READ_REG(hw, IGC_PQGPRC(i));\n+\t\tvalue.ddword = queue_stats->pqgprc[i];\n+\t\tif (value.dword[U32_0_IN_U64] > tmp)\n+\t\t\tvalue.dword[U32_1_IN_U64]++;\n+\t\tvalue.dword[U32_0_IN_U64] = tmp;\n+\t\tqueue_stats->pqgprc[i] = value.ddword;\n+\n+\t\ttmp = IGC_READ_REG(hw, IGC_PQGPTC(i));\n+\t\tvalue.ddword = queue_stats->pqgptc[i];\n+\t\tif (value.dword[U32_0_IN_U64] > tmp)\n+\t\t\tvalue.dword[U32_1_IN_U64]++;\n+\t\tvalue.dword[U32_0_IN_U64] = tmp;\n+\t\tqueue_stats->pqgptc[i] = value.ddword;\n+\n+\t\ttmp = IGC_READ_REG(hw, IGC_PQGORC(i));\n+\t\tvalue.ddword = queue_stats->pqgorc[i];\n+\t\tif (value.dword[U32_0_IN_U64] > tmp)\n+\t\t\tvalue.dword[U32_1_IN_U64]++;\n+\t\tvalue.dword[U32_0_IN_U64] = tmp;\n+\t\tqueue_stats->pqgorc[i] = value.ddword;\n+\n+\t\ttmp = IGC_READ_REG(hw, IGC_PQGOTC(i));\n+\t\tvalue.ddword = queue_stats->pqgotc[i];\n+\t\tif (value.dword[U32_0_IN_U64] > tmp)\n+\t\t\tvalue.dword[U32_1_IN_U64]++;\n+\t\tvalue.dword[U32_0_IN_U64] = tmp;\n+\t\tqueue_stats->pqgotc[i] = value.ddword;\n+\n+\t\ttmp = IGC_READ_REG(hw, IGC_PQMPRC(i));\n+\t\tvalue.ddword = queue_stats->pqmprc[i];\n+\t\tif (value.dword[U32_0_IN_U64] > tmp)\n+\t\t\tvalue.dword[U32_1_IN_U64]++;\n+\t\tvalue.dword[U32_0_IN_U64] = tmp;\n+\t\tqueue_stats->pqmprc[i] = value.ddword;\n+\n+\t\ttmp = IGC_READ_REG(hw, IGC_RQDPC(i));\n+\t\tvalue.ddword = queue_stats->rqdpc[i];\n+\t\tif (value.dword[U32_0_IN_U64] > tmp)\n+\t\t\tvalue.dword[U32_1_IN_U64]++;\n+\t\tvalue.dword[U32_0_IN_U64] = tmp;\n+\t\tqueue_stats->rqdpc[i] = value.ddword;\n+\n+\t\ttmp = IGC_READ_REG(hw, IGC_TQDPC(i));\n+\t\tvalue.ddword = queue_stats->tqdpc[i];\n+\t\tif (value.dword[U32_0_IN_U64] > tmp)\n+\t\t\tvalue.dword[U32_1_IN_U64]++;\n+\t\tvalue.dword[U32_0_IN_U64] = tmp;\n+\t\tqueue_stats->tqdpc[i] = value.ddword;\n+\t}\n+}\n+\n+static int\n+eth_igc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)\n+{\n+\tstruct igc_adapter *igc = IGC_DEV_PRIVATE(dev);\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);\n+\tstruct igc_hw_stats *stats = IGC_DEV_PRIVATE_STATS(dev);\n+\tstruct igc_hw_queue_stats *queue_stats =\n+\t\t\tIGC_DEV_PRIVATE_QUEUE_STATS(dev);\n+\tint i;\n+\n+\t/*\n+\t * Cancel status handler since it will read the queue status registers\n+\t */\n+\trte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);\n+\n+\t/* Read status register */\n+\tigc_read_queue_stats_register(dev);\n+\tigc_read_stats_registers(hw, stats);\n+\n+\tif (rte_stats == NULL) {\n+\t\t/* Restart queue status handler */\n+\t\trte_eal_alarm_set(IGC_ALARM_INTERVAL,\n+\t\t\t\tigc_update_queue_stats_handler, dev);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Rx Errors */\n+\trte_stats->imissed = stats->mpc;\n+\trte_stats->ierrors = stats->crcerrs +\n+\t\t\tstats->rlec + stats->ruc + stats->roc +\n+\t\t\tstats->rxerrc + stats->algnerrc;\n+\n+\t/* Tx Errors */\n+\trte_stats->oerrors = stats->ecol + stats->latecol;\n+\n+\trte_stats->ipackets = stats->gprc;\n+\trte_stats->opackets = stats->gptc;\n+\trte_stats->ibytes   = stats->gorc;\n+\trte_stats->obytes   = stats->gotc;\n+\n+\t/* Get per-queue statuses */\n+\tfor (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {\n+\t\t/* GET TX queue statuses */\n+\t\tint map_id = igc->txq_stats_map[i];\n+\t\tif (map_id >= 0) {\n+\t\t\trte_stats->q_opackets[map_id] += queue_stats->pqgptc[i];\n+\t\t\trte_stats->q_obytes[map_id] += queue_stats->pqgotc[i];\n+\t\t}\n+\t\t/* Get RX queue statuses */\n+\t\tmap_id = igc->rxq_stats_map[i];\n+\t\tif (map_id >= 0) {\n+\t\t\trte_stats->q_ipackets[map_id] += queue_stats->pqgprc[i];\n+\t\t\trte_stats->q_ibytes[map_id] += queue_stats->pqgorc[i];\n+\t\t\trte_stats->q_errors[map_id] += queue_stats->rqdpc[i];\n+\t\t}\n+\t}\n+\n+\t/* Restart queue status handler */\n+\trte_eal_alarm_set(IGC_ALARM_INTERVAL,\n+\t\t\tigc_update_queue_stats_handler, dev);\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,\n+\t\t   unsigned int n)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);\n+\tstruct igc_hw_stats *hw_stats =\n+\t\t\tIGC_DEV_PRIVATE_STATS(dev);\n+\tunsigned int i;\n+\n+\tigc_read_stats_registers(hw, hw_stats);\n+\n+\tif (n < IGC_NB_XSTATS)\n+\t\treturn IGC_NB_XSTATS;\n+\n+\t/* If this is a reset xstats is NULL, and we have cleared the\n+\t * registers by reading them.\n+\t */\n+\tif (!xstats)\n+\t\treturn 0;\n+\n+\t/* Extended stats */\n+\tfor (i = 0; i < IGC_NB_XSTATS; i++) {\n+\t\txstats[i].id = i;\n+\t\txstats[i].value = *(uint64_t *)(((char *)hw_stats) +\n+\t\t\trte_igc_stats_strings[i].offset);\n+\t}\n+\n+\treturn IGC_NB_XSTATS;\n+}\n+\n+static int\n+eth_igc_xstats_reset(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);\n+\tstruct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev);\n+\tstruct igc_hw_queue_stats *queue_stats =\n+\t\t\tIGC_DEV_PRIVATE_QUEUE_STATS(dev);\n+\n+\t/* Cancel queue status handler for avoid conflict */\n+\trte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);\n+\n+\t/* HW registers are cleared on read */\n+\tigc_reset_queue_stats_register(hw);\n+\tigc_read_stats_registers(hw, hw_stats);\n+\n+\t/* Reset software totals */\n+\tmemset(hw_stats, 0, sizeof(*hw_stats));\n+\tmemset(queue_stats, 0, sizeof(*queue_stats));\n+\n+\t/* Restart the queue status handler */\n+\trte_eal_alarm_set(IGC_ALARM_INTERVAL, igc_update_queue_stats_handler,\n+\t\t\tdev);\n+\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_xstats_get_names(__rte_unused struct rte_eth_dev *dev,\n+\tstruct rte_eth_xstat_name *xstats_names, unsigned int size)\n+{\n+\tunsigned int i;\n+\n+\tif (xstats_names == NULL)\n+\t\treturn IGC_NB_XSTATS;\n+\n+\tif (size < IGC_NB_XSTATS) {\n+\t\tPMD_DRV_LOG(ERR, \"not enough buffers!\");\n+\t\treturn IGC_NB_XSTATS;\n+\t}\n+\n+\tfor (i = 0; i < IGC_NB_XSTATS; i++)\n+\t\tstrlcpy(xstats_names[i].name, rte_igc_stats_strings[i].name,\n+\t\t\tsizeof(xstats_names[i].name));\n+\n+\treturn IGC_NB_XSTATS;\n+}\n+\n+static int\n+eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev,\n+\t\tstruct rte_eth_xstat_name *xstats_names, const uint64_t *ids,\n+\t\tunsigned int limit)\n+{\n+\tunsigned int i;\n+\n+\tif (!ids)\n+\t\treturn eth_igc_xstats_get_names(dev, xstats_names, limit);\n+\n+\tfor (i = 0; i < limit; i++) {\n+\t\tif (ids[i] >= IGC_NB_XSTATS) {\n+\t\t\tPMD_DRV_LOG(ERR, \"id value isn't valid\");\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t\tstrlcpy(xstats_names[i].name,\n+\t\t\trte_igc_stats_strings[i].name,\n+\t\t\tsizeof(xstats_names[i].name));\n+\t}\n+\treturn limit;\n+}\n+\n+static int\n+eth_igc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,\n+\t\tuint64_t *values, unsigned int n)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);\n+\tstruct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev);\n+\tunsigned int i;\n+\n+\tigc_read_stats_registers(hw, hw_stats);\n+\n+\tif (!ids) {\n+\t\tif (n < IGC_NB_XSTATS)\n+\t\t\treturn IGC_NB_XSTATS;\n+\n+\t\t/* If this is a reset xstats is NULL, and we have cleared the\n+\t\t * registers by reading them.\n+\t\t */\n+\t\tif (!values)\n+\t\t\treturn 0;\n+\n+\t\t/* Extended stats */\n+\t\tfor (i = 0; i < IGC_NB_XSTATS; i++)\n+\t\t\tvalues[i] = *(uint64_t *)(((char *)hw_stats) +\n+\t\t\t\t\trte_igc_stats_strings[i].offset);\n+\n+\t\treturn IGC_NB_XSTATS;\n+\n+\t} else {\n+\t\tfor (i = 0; i < n; i++) {\n+\t\t\tif (ids[i] >= IGC_NB_XSTATS) {\n+\t\t\t\tPMD_DRV_LOG(ERR, \"id value isn't valid\");\n+\t\t\t\treturn -EINVAL;\n+\t\t\t}\n+\t\t\tvalues[i] = *(uint64_t *)(((char *)hw_stats) +\n+\t\t\t\t\trte_igc_stats_strings[ids[i]].offset);\n+\t\t}\n+\t\treturn n;\n+\t}\n+}\n+\n+static int\n+eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev,\n+\t\tuint16_t queue_id, uint8_t stat_idx, uint8_t is_rx)\n+{\n+\tstruct igc_adapter *igc = IGC_DEV_PRIVATE(dev);\n+\n+\t/* check queue id is valid */\n+\tif (queue_id >= IGC_QUEUE_PAIRS_NUM) {\n+\t\tPMD_DRV_LOG(ERR, \"queue id(%u) error, max is %u\",\n+\t\t\tqueue_id, IGC_QUEUE_PAIRS_NUM - 1);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* store the mapping status id */\n+\tif (is_rx)\n+\t\tigc->rxq_stats_map[queue_id] = stat_idx;\n+\telse\n+\t\tigc->txq_stats_map[queue_id] = stat_idx;\n+\n+\treturn 0;\n+}\n+\n static int\n eth_igc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,\n \tstruct rte_pci_device *pci_dev)\ndiff --git a/drivers/net/igc/igc_ethdev.h b/drivers/net/igc/igc_ethdev.h\nindex 5e7102f..20738df 100644\n--- a/drivers/net/igc/igc_ethdev.h\n+++ b/drivers/net/igc/igc_ethdev.h\n@@ -90,11 +90,34 @@ struct igc_interrupt {\n \tuint8_t  bytes[4];\n };\n \n+/* Structure to per-queue statics */\n+struct igc_hw_queue_stats {\n+\tu64\tpqgprc[IGC_QUEUE_PAIRS_NUM];\n+\t/* per queue good packets received count */\n+\tu64\tpqgptc[IGC_QUEUE_PAIRS_NUM];\n+\t/* per queue good packets transmitted count */\n+\tu64\tpqgorc[IGC_QUEUE_PAIRS_NUM];\n+\t/* per queue good octets received count */\n+\tu64\tpqgotc[IGC_QUEUE_PAIRS_NUM];\n+\t/* per queue good octets transmitted count */\n+\tu64\tpqmprc[IGC_QUEUE_PAIRS_NUM];\n+\t/* per queue multicast packets received count */\n+\tu64\trqdpc[IGC_QUEUE_PAIRS_NUM];\n+\t/* per receive queue drop packet count */\n+\tu64\ttqdpc[IGC_QUEUE_PAIRS_NUM];\n+\t/* per transmit queue drop packet count */\n+};\n+\n /*\n  * Structure to store private data for each driver instance (for each port).\n  */\n struct igc_adapter {\n-\tstruct igc_hw\thw;\n+\tstruct igc_hw\t\thw;\n+\tstruct igc_hw_stats\tstats;\n+\tstruct igc_hw_queue_stats queue_stats;\n+\tint16_t txq_stats_map[IGC_QUEUE_PAIRS_NUM];\n+\tint16_t rxq_stats_map[IGC_QUEUE_PAIRS_NUM];\n+\n \tstruct igc_interrupt  intr;\n \tbool\t\tstopped;\n };\n@@ -104,6 +127,12 @@ struct igc_adapter {\n #define IGC_DEV_PRIVATE_HW(_dev) \\\n \t(&((struct igc_adapter *)(_dev)->data->dev_private)->hw)\n \n+#define IGC_DEV_PRIVATE_STATS(_dev) \\\n+\t(&((struct igc_adapter *)(_dev)->data->dev_private)->stats)\n+\n+#define IGC_DEV_PRIVATE_QUEUE_STATS(_dev) \\\n+\t(&((struct igc_adapter *)(_dev)->data->dev_private)->queue_stats)\n+\n #define IGC_DEV_PRIVATE_INTR(_dev) \\\n \t(&((struct igc_adapter *)(_dev)->data->dev_private)->intr)\n \n",
    "prefixes": [
        "v1",
        "06/15"
    ]
}