From patchwork Wed Sep 8 08:37:34 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiawen Wu X-Patchwork-Id: 98289 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id ABC43A0C56; Wed, 8 Sep 2021 10:37:22 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 38617411A0; Wed, 8 Sep 2021 10:36:42 +0200 (CEST) Received: from smtpbg506.qq.com (smtpbg506.qq.com [203.205.250.33]) by mails.dpdk.org (Postfix) with ESMTP id 3119D411A0 for ; Wed, 8 Sep 2021 10:36:38 +0200 (CEST) X-QQ-mid: bizesmtp47t1631090194tcw5lffv Received: from wxdbg.localdomain.com (unknown [183.129.236.74]) by esmtp6.qq.com (ESMTP) with id ; Wed, 08 Sep 2021 16:36:33 +0800 (CST) X-QQ-SSF: 01400000002000E0G000B00A0000000 X-QQ-FEAT: NC3V4KDEm0GNS7HtLA5n+4BPNGuSW5rO0xqv/czdpNXkevC9vdnsi2Pmg64YU bXkFvTXdsytF9hM7Fi7hOCr0ijrYeM43ntKE3Dm5HESSQvqtchX9LXYlRtezFP7C3qDGzOh 3LuvXLKpLkNCgixpTVzQi7LYnb4bIlEjfsB46My2DEOjGbnqq7faZD0672kj82XDtOhguUK 0cp5rVl54LHKqidjSdfqXz6LTsSBdbrSgdYKROtX+Av24yixE0f0fnr4S+8+LyznN2pP9Da R+SFuqXA9bntIZ1EyE86WI2gIkteZmz9n6HLBEtoIIU3WkaDWviYpwmP3SxlUBgCaHdQ3Hc uh9zDpGdVeKlvYMsN65/f4APJgbw//AI7JqKIgb X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Wed, 8 Sep 2021 16:37:34 +0800 Message-Id: <20210908083758.312055-9-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.27.0 In-Reply-To: <20210908083758.312055-1-jiawenwu@trustnetic.com> References: <20210908083758.312055-1-jiawenwu@trustnetic.com> MIME-Version: 1.0 X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign2 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH 08/32] net/ngbe: support basic statistics X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Support to read and clear basic statistics, and configure per-queue stats counter mapping. Signed-off-by: Jiawen Wu --- doc/guides/nics/features/ngbe.ini | 2 + doc/guides/nics/ngbe.rst | 1 + drivers/net/ngbe/base/ngbe_dummy.h | 5 + drivers/net/ngbe/base/ngbe_hw.c | 101 ++++++++++ drivers/net/ngbe/base/ngbe_hw.h | 1 + drivers/net/ngbe/base/ngbe_type.h | 134 +++++++++++++ drivers/net/ngbe/ngbe_ethdev.c | 300 +++++++++++++++++++++++++++++ drivers/net/ngbe/ngbe_ethdev.h | 19 ++ 8 files changed, 563 insertions(+) diff --git a/doc/guides/nics/features/ngbe.ini b/doc/guides/nics/features/ngbe.ini index 4ae2d66d15..f310fb102a 100644 --- a/doc/guides/nics/features/ngbe.ini +++ b/doc/guides/nics/features/ngbe.ini @@ -19,6 +19,8 @@ L4 checksum offload = P Inner L3 checksum = P Inner L4 checksum = P Packet type parsing = Y +Basic stats = Y +Stats per queue = Y Multiprocess aware = Y Linux = Y ARMv8 = Y diff --git a/doc/guides/nics/ngbe.rst b/doc/guides/nics/ngbe.rst index 9518a59443..64c07e4741 100644 --- a/doc/guides/nics/ngbe.rst +++ b/doc/guides/nics/ngbe.rst @@ -15,6 +15,7 @@ Features - Checksum offload - VLAN/QinQ stripping and inserting - TSO offload +- Port hardware statistics - Jumbo frames - Link state information - Scattered and gather for TX and RX diff --git a/drivers/net/ngbe/base/ngbe_dummy.h b/drivers/net/ngbe/base/ngbe_dummy.h index 8863acef0d..0def116c53 100644 --- a/drivers/net/ngbe/base/ngbe_dummy.h +++ b/drivers/net/ngbe/base/ngbe_dummy.h @@ -55,6 +55,10 @@ static inline s32 ngbe_mac_stop_hw_dummy(struct ngbe_hw *TUP0) { return NGBE_ERR_OPS_DUMMY; } +static inline s32 ngbe_mac_clear_hw_cntrs_dummy(struct ngbe_hw *TUP0) +{ + return NGBE_ERR_OPS_DUMMY; +} static inline s32 ngbe_mac_get_mac_addr_dummy(struct ngbe_hw *TUP0, u8 *TUP1) { return NGBE_ERR_OPS_DUMMY; @@ -178,6 +182,7 @@ static inline void ngbe_init_ops_dummy(struct ngbe_hw *hw) hw->mac.reset_hw = ngbe_mac_reset_hw_dummy; hw->mac.start_hw = ngbe_mac_start_hw_dummy; hw->mac.stop_hw = ngbe_mac_stop_hw_dummy; + hw->mac.clear_hw_cntrs = ngbe_mac_clear_hw_cntrs_dummy; hw->mac.get_mac_addr = ngbe_mac_get_mac_addr_dummy; hw->mac.enable_rx_dma = ngbe_mac_enable_rx_dma_dummy; hw->mac.disable_sec_rx_path = ngbe_mac_disable_sec_rx_path_dummy; diff --git a/drivers/net/ngbe/base/ngbe_hw.c b/drivers/net/ngbe/base/ngbe_hw.c index 6b575fc67b..f302df5d9d 100644 --- a/drivers/net/ngbe/base/ngbe_hw.c +++ b/drivers/net/ngbe/base/ngbe_hw.c @@ -19,6 +19,9 @@ s32 ngbe_start_hw(struct ngbe_hw *hw) { DEBUGFUNC("ngbe_start_hw"); + /* Clear statistics registers */ + hw->mac.clear_hw_cntrs(hw); + /* Clear adapter stopped flag */ hw->adapter_stopped = false; @@ -159,6 +162,7 @@ s32 ngbe_reset_hw_em(struct ngbe_hw *hw) msec_delay(50); ngbe_reset_misc_em(hw); + hw->mac.clear_hw_cntrs(hw); msec_delay(50); @@ -175,6 +179,102 @@ s32 ngbe_reset_hw_em(struct ngbe_hw *hw) return status; } +/** + * ngbe_clear_hw_cntrs - Generic clear hardware counters + * @hw: pointer to hardware structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +s32 ngbe_clear_hw_cntrs(struct ngbe_hw *hw) +{ + u16 i = 0; + + DEBUGFUNC("ngbe_clear_hw_cntrs"); + + /* QP Stats */ + /* don't write clear queue stats */ + for (i = 0; i < NGBE_MAX_QP; i++) { + hw->qp_last[i].rx_qp_packets = 0; + hw->qp_last[i].tx_qp_packets = 0; + hw->qp_last[i].rx_qp_bytes = 0; + hw->qp_last[i].tx_qp_bytes = 0; + hw->qp_last[i].rx_qp_mc_packets = 0; + hw->qp_last[i].tx_qp_mc_packets = 0; + hw->qp_last[i].rx_qp_bc_packets = 0; + hw->qp_last[i].tx_qp_bc_packets = 0; + } + + /* PB Stats */ + rd32(hw, NGBE_PBRXLNKXON); + rd32(hw, NGBE_PBRXLNKXOFF); + rd32(hw, NGBE_PBTXLNKXON); + rd32(hw, NGBE_PBTXLNKXOFF); + + /* DMA Stats */ + rd32(hw, NGBE_DMARXPKT); + rd32(hw, NGBE_DMATXPKT); + + rd64(hw, NGBE_DMARXOCTL); + rd64(hw, NGBE_DMATXOCTL); + + /* MAC Stats */ + rd64(hw, NGBE_MACRXERRCRCL); + rd64(hw, NGBE_MACRXMPKTL); + rd64(hw, NGBE_MACTXMPKTL); + + rd64(hw, NGBE_MACRXPKTL); + rd64(hw, NGBE_MACTXPKTL); + rd64(hw, NGBE_MACRXGBOCTL); + + rd64(hw, NGBE_MACRXOCTL); + rd32(hw, NGBE_MACTXOCTL); + + rd64(hw, NGBE_MACRX1TO64L); + rd64(hw, NGBE_MACRX65TO127L); + rd64(hw, NGBE_MACRX128TO255L); + rd64(hw, NGBE_MACRX256TO511L); + rd64(hw, NGBE_MACRX512TO1023L); + rd64(hw, NGBE_MACRX1024TOMAXL); + rd64(hw, NGBE_MACTX1TO64L); + rd64(hw, NGBE_MACTX65TO127L); + rd64(hw, NGBE_MACTX128TO255L); + rd64(hw, NGBE_MACTX256TO511L); + rd64(hw, NGBE_MACTX512TO1023L); + rd64(hw, NGBE_MACTX1024TOMAXL); + + rd64(hw, NGBE_MACRXERRLENL); + rd32(hw, NGBE_MACRXOVERSIZE); + rd32(hw, NGBE_MACRXJABBER); + + /* MACsec Stats */ + rd32(hw, NGBE_LSECTX_UTPKT); + rd32(hw, NGBE_LSECTX_ENCPKT); + rd32(hw, NGBE_LSECTX_PROTPKT); + rd32(hw, NGBE_LSECTX_ENCOCT); + rd32(hw, NGBE_LSECTX_PROTOCT); + rd32(hw, NGBE_LSECRX_UTPKT); + rd32(hw, NGBE_LSECRX_BTPKT); + rd32(hw, NGBE_LSECRX_NOSCIPKT); + rd32(hw, NGBE_LSECRX_UNSCIPKT); + rd32(hw, NGBE_LSECRX_DECOCT); + rd32(hw, NGBE_LSECRX_VLDOCT); + rd32(hw, NGBE_LSECRX_UNCHKPKT); + rd32(hw, NGBE_LSECRX_DLYPKT); + rd32(hw, NGBE_LSECRX_LATEPKT); + for (i = 0; i < 2; i++) { + rd32(hw, NGBE_LSECRX_OKPKT(i)); + rd32(hw, NGBE_LSECRX_INVPKT(i)); + rd32(hw, NGBE_LSECRX_BADPKT(i)); + } + for (i = 0; i < 4; i++) { + rd32(hw, NGBE_LSECRX_INVSAPKT(i)); + rd32(hw, NGBE_LSECRX_BADSAPKT(i)); + } + + return 0; +} + /** * ngbe_get_mac_addr - Generic get MAC address * @hw: pointer to hardware structure @@ -988,6 +1088,7 @@ s32 ngbe_init_ops_pf(struct ngbe_hw *hw) mac->init_hw = ngbe_init_hw; mac->reset_hw = ngbe_reset_hw_em; mac->start_hw = ngbe_start_hw; + mac->clear_hw_cntrs = ngbe_clear_hw_cntrs; mac->enable_rx_dma = ngbe_enable_rx_dma; mac->get_mac_addr = ngbe_get_mac_addr; mac->stop_hw = ngbe_stop_hw; diff --git a/drivers/net/ngbe/base/ngbe_hw.h b/drivers/net/ngbe/base/ngbe_hw.h index 17a0a03c88..6a08c02bee 100644 --- a/drivers/net/ngbe/base/ngbe_hw.h +++ b/drivers/net/ngbe/base/ngbe_hw.h @@ -17,6 +17,7 @@ s32 ngbe_init_hw(struct ngbe_hw *hw); s32 ngbe_start_hw(struct ngbe_hw *hw); s32 ngbe_reset_hw_em(struct ngbe_hw *hw); s32 ngbe_stop_hw(struct ngbe_hw *hw); +s32 ngbe_clear_hw_cntrs(struct ngbe_hw *hw); s32 ngbe_get_mac_addr(struct ngbe_hw *hw, u8 *mac_addr); void ngbe_set_lan_id_multi_port(struct ngbe_hw *hw); diff --git a/drivers/net/ngbe/base/ngbe_type.h b/drivers/net/ngbe/base/ngbe_type.h index 28540e4ba0..c13f0208fd 100644 --- a/drivers/net/ngbe/base/ngbe_type.h +++ b/drivers/net/ngbe/base/ngbe_type.h @@ -9,6 +9,7 @@ #define NGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ #define NGBE_FRAME_SIZE_DFT (1522) /* Default frame size, +FCS */ +#define NGBE_MAX_QP (8) #define NGBE_ALIGN 128 /* as intel did */ #define NGBE_ISB_SIZE 16 @@ -77,6 +78,127 @@ struct ngbe_bus_info { u8 lan_id; }; +/* Statistics counters collected by the MAC */ +/* PB[] RxTx */ +struct ngbe_pb_stats { + u64 tx_pb_xon_packets; + u64 rx_pb_xon_packets; + u64 tx_pb_xoff_packets; + u64 rx_pb_xoff_packets; + u64 rx_pb_dropped; + u64 rx_pb_mbuf_alloc_errors; + u64 tx_pb_xon2off_packets; +}; + +/* QP[] RxTx */ +struct ngbe_qp_stats { + u64 rx_qp_packets; + u64 tx_qp_packets; + u64 rx_qp_bytes; + u64 tx_qp_bytes; + u64 rx_qp_mc_packets; +}; + +struct ngbe_hw_stats { + /* MNG RxTx */ + u64 mng_bmc2host_packets; + u64 mng_host2bmc_packets; + /* Basix RxTx */ + u64 rx_drop_packets; + u64 tx_drop_packets; + u64 rx_dma_drop; + u64 tx_secdrp_packets; + u64 rx_packets; + u64 tx_packets; + u64 rx_bytes; + u64 tx_bytes; + u64 rx_total_bytes; + u64 rx_total_packets; + u64 tx_total_packets; + u64 rx_total_missed_packets; + u64 rx_broadcast_packets; + u64 tx_broadcast_packets; + u64 rx_multicast_packets; + u64 tx_multicast_packets; + u64 rx_management_packets; + u64 tx_management_packets; + u64 rx_management_dropped; + + /* Basic Error */ + u64 rx_crc_errors; + u64 rx_illegal_byte_errors; + u64 rx_error_bytes; + u64 rx_mac_short_packet_dropped; + u64 rx_length_errors; + u64 rx_undersize_errors; + u64 rx_fragment_errors; + u64 rx_oversize_errors; + u64 rx_jabber_errors; + u64 rx_l3_l4_xsum_error; + u64 mac_local_errors; + u64 mac_remote_errors; + + /* MACSEC */ + u64 tx_macsec_pkts_untagged; + u64 tx_macsec_pkts_encrypted; + u64 tx_macsec_pkts_protected; + u64 tx_macsec_octets_encrypted; + u64 tx_macsec_octets_protected; + u64 rx_macsec_pkts_untagged; + u64 rx_macsec_pkts_badtag; + u64 rx_macsec_pkts_nosci; + u64 rx_macsec_pkts_unknownsci; + u64 rx_macsec_octets_decrypted; + u64 rx_macsec_octets_validated; + u64 rx_macsec_sc_pkts_unchecked; + u64 rx_macsec_sc_pkts_delayed; + u64 rx_macsec_sc_pkts_late; + u64 rx_macsec_sa_pkts_ok; + u64 rx_macsec_sa_pkts_invalid; + u64 rx_macsec_sa_pkts_notvalid; + u64 rx_macsec_sa_pkts_unusedsa; + u64 rx_macsec_sa_pkts_notusingsa; + + /* MAC RxTx */ + u64 rx_size_64_packets; + u64 rx_size_65_to_127_packets; + u64 rx_size_128_to_255_packets; + u64 rx_size_256_to_511_packets; + u64 rx_size_512_to_1023_packets; + u64 rx_size_1024_to_max_packets; + u64 tx_size_64_packets; + u64 tx_size_65_to_127_packets; + u64 tx_size_128_to_255_packets; + u64 tx_size_256_to_511_packets; + u64 tx_size_512_to_1023_packets; + u64 tx_size_1024_to_max_packets; + + /* Flow Control */ + u64 tx_xon_packets; + u64 rx_xon_packets; + u64 tx_xoff_packets; + u64 rx_xoff_packets; + + u64 rx_up_dropped; + + u64 rdb_pkt_cnt; + u64 rdb_repli_cnt; + u64 rdb_drp_cnt; + + /* QP[] RxTx */ + struct { + u64 rx_qp_packets; + u64 tx_qp_packets; + u64 rx_qp_bytes; + u64 tx_qp_bytes; + u64 rx_qp_mc_packets; + u64 tx_qp_mc_packets; + u64 rx_qp_bc_packets; + u64 tx_qp_bc_packets; + } qp[NGBE_MAX_QP]; + +}; + struct ngbe_rom_info { s32 (*init_params)(struct ngbe_hw *hw); s32 (*validate_checksum)(struct ngbe_hw *hw, u16 *checksum_val); @@ -96,6 +218,7 @@ struct ngbe_mac_info { s32 (*reset_hw)(struct ngbe_hw *hw); s32 (*start_hw)(struct ngbe_hw *hw); s32 (*stop_hw)(struct ngbe_hw *hw); + s32 (*clear_hw_cntrs)(struct ngbe_hw *hw); s32 (*get_mac_addr)(struct ngbe_hw *hw, u8 *mac_addr); s32 (*enable_rx_dma)(struct ngbe_hw *hw, u32 regval); s32 (*disable_sec_rx_path)(struct ngbe_hw *hw); @@ -195,7 +318,18 @@ struct ngbe_hw { u32 q_rx_regs[8 * 4]; u32 q_tx_regs[8 * 4]; + bool offset_loaded; bool is_pf; + struct { + u64 rx_qp_packets; + u64 tx_qp_packets; + u64 rx_qp_bytes; + u64 tx_qp_bytes; + u64 rx_qp_mc_packets; + u64 tx_qp_mc_packets; + u64 rx_qp_bc_packets; + u64 tx_qp_bc_packets; + } qp_last[NGBE_MAX_QP]; }; #include "ngbe_regs.h" diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c index 3903eb0a2c..3d459718b1 100644 --- a/drivers/net/ngbe/ngbe_ethdev.c +++ b/drivers/net/ngbe/ngbe_ethdev.c @@ -17,6 +17,7 @@ static int ngbe_dev_close(struct rte_eth_dev *dev); static int ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete); +static int ngbe_dev_stats_reset(struct rte_eth_dev *dev); static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue); @@ -122,6 +123,56 @@ ngbe_disable_intr(struct ngbe_hw *hw) ngbe_flush(hw); } +static int +ngbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, + uint16_t queue_id, + uint8_t stat_idx, + uint8_t is_rx) +{ + struct ngbe_stat_mappings *stat_mappings = + NGBE_DEV_STAT_MAPPINGS(eth_dev); + uint32_t qsmr_mask = 0; + uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK; + uint32_t q_map; + uint8_t n, offset; + + if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK) + return -EIO; + + PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d", + (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", + queue_id, stat_idx); + + n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG); + if (n >= NGBE_NB_STAT_MAPPING) { + PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded"); + return -EIO; + } + offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG); + + /* Now clear any previous stat_idx set */ + clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); + if (!is_rx) + stat_mappings->tqsm[n] &= ~clearing_mask; + else + stat_mappings->rqsm[n] &= ~clearing_mask; + + q_map = (uint32_t)stat_idx; + q_map &= QMAP_FIELD_RESERVED_BITS_MASK; + qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); + if (!is_rx) + stat_mappings->tqsm[n] |= qsmr_mask; + else + stat_mappings->rqsm[n] |= qsmr_mask; + + PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d", + (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", + queue_id, stat_idx); + PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n, + is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]); + return 0; +} + /* * Ensure that all locks are released before first NVM or PHY access */ @@ -236,6 +287,9 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) return -EIO; } + /* Reset the hw statistics */ + ngbe_dev_stats_reset(eth_dev); + /* disable interrupt */ ngbe_disable_intr(hw); @@ -616,6 +670,7 @@ static int ngbe_dev_start(struct rte_eth_dev *dev) { struct ngbe_hw *hw = ngbe_dev_hw(dev); + struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t intr_vector = 0; @@ -780,6 +835,9 @@ ngbe_dev_start(struct rte_eth_dev *dev) */ ngbe_dev_link_update(dev, 0); + ngbe_read_stats_registers(hw, hw_stats); + hw->offset_loaded = 1; + return 0; error: @@ -916,6 +974,245 @@ ngbe_dev_reset(struct rte_eth_dev *dev) return ret; } +#define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \ + { \ + uint32_t current_counter = rd32(hw, reg); \ + if (current_counter < last_counter) \ + current_counter += 0x100000000LL; \ + if (!hw->offset_loaded) \ + last_counter = current_counter; \ + counter = current_counter - last_counter; \ + counter &= 0xFFFFFFFFLL; \ + } + +#define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ + { \ + uint64_t current_counter_lsb = rd32(hw, reg_lsb); \ + uint64_t current_counter_msb = rd32(hw, reg_msb); \ + uint64_t current_counter = (current_counter_msb << 32) | \ + current_counter_lsb; \ + if (current_counter < last_counter) \ + current_counter += 0x1000000000LL; \ + if (!hw->offset_loaded) \ + last_counter = current_counter; \ + counter = current_counter - last_counter; \ + counter &= 0xFFFFFFFFFLL; \ + } + +void +ngbe_read_stats_registers(struct ngbe_hw *hw, + struct ngbe_hw_stats *hw_stats) +{ + unsigned int i; + + /* QP Stats */ + for (i = 0; i < hw->nb_rx_queues; i++) { + UPDATE_QP_COUNTER_32bit(NGBE_QPRXPKT(i), + hw->qp_last[i].rx_qp_packets, + hw_stats->qp[i].rx_qp_packets); + UPDATE_QP_COUNTER_36bit(NGBE_QPRXOCTL(i), NGBE_QPRXOCTH(i), + hw->qp_last[i].rx_qp_bytes, + hw_stats->qp[i].rx_qp_bytes); + UPDATE_QP_COUNTER_32bit(NGBE_QPRXMPKT(i), + hw->qp_last[i].rx_qp_mc_packets, + hw_stats->qp[i].rx_qp_mc_packets); + UPDATE_QP_COUNTER_32bit(NGBE_QPRXBPKT(i), + hw->qp_last[i].rx_qp_bc_packets, + hw_stats->qp[i].rx_qp_bc_packets); + } + + for (i = 0; i < hw->nb_tx_queues; i++) { + UPDATE_QP_COUNTER_32bit(NGBE_QPTXPKT(i), + hw->qp_last[i].tx_qp_packets, + hw_stats->qp[i].tx_qp_packets); + UPDATE_QP_COUNTER_36bit(NGBE_QPTXOCTL(i), NGBE_QPTXOCTH(i), + hw->qp_last[i].tx_qp_bytes, + hw_stats->qp[i].tx_qp_bytes); + UPDATE_QP_COUNTER_32bit(NGBE_QPTXMPKT(i), + hw->qp_last[i].tx_qp_mc_packets, + hw_stats->qp[i].tx_qp_mc_packets); + UPDATE_QP_COUNTER_32bit(NGBE_QPTXBPKT(i), + hw->qp_last[i].tx_qp_bc_packets, + hw_stats->qp[i].tx_qp_bc_packets); + } + + /* PB Stats */ + hw_stats->rx_up_dropped += rd32(hw, NGBE_PBRXMISS); + hw_stats->rdb_pkt_cnt += rd32(hw, NGBE_PBRXPKT); + hw_stats->rdb_repli_cnt += rd32(hw, NGBE_PBRXREP); + hw_stats->rdb_drp_cnt += rd32(hw, NGBE_PBRXDROP); + hw_stats->tx_xoff_packets += rd32(hw, NGBE_PBTXLNKXOFF); + hw_stats->tx_xon_packets += rd32(hw, NGBE_PBTXLNKXON); + + hw_stats->rx_xon_packets += rd32(hw, NGBE_PBRXLNKXON); + hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF); + + /* DMA Stats */ + hw_stats->rx_drop_packets += rd32(hw, NGBE_DMARXDROP); + hw_stats->tx_drop_packets += rd32(hw, NGBE_DMATXDROP); + hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP); + hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP); + hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT); + hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT); + hw_stats->rx_bytes += rd64(hw, NGBE_DMARXOCTL); + hw_stats->tx_bytes += rd64(hw, NGBE_DMATXOCTL); + + /* MAC Stats */ + hw_stats->rx_crc_errors += rd64(hw, NGBE_MACRXERRCRCL); + hw_stats->rx_multicast_packets += rd64(hw, NGBE_MACRXMPKTL); + hw_stats->tx_multicast_packets += rd64(hw, NGBE_MACTXMPKTL); + + hw_stats->rx_total_packets += rd64(hw, NGBE_MACRXPKTL); + hw_stats->tx_total_packets += rd64(hw, NGBE_MACTXPKTL); + hw_stats->rx_total_bytes += rd64(hw, NGBE_MACRXGBOCTL); + + hw_stats->rx_broadcast_packets += rd64(hw, NGBE_MACRXOCTL); + hw_stats->tx_broadcast_packets += rd32(hw, NGBE_MACTXOCTL); + + hw_stats->rx_size_64_packets += rd64(hw, NGBE_MACRX1TO64L); + hw_stats->rx_size_65_to_127_packets += rd64(hw, NGBE_MACRX65TO127L); + hw_stats->rx_size_128_to_255_packets += rd64(hw, NGBE_MACRX128TO255L); + hw_stats->rx_size_256_to_511_packets += rd64(hw, NGBE_MACRX256TO511L); + hw_stats->rx_size_512_to_1023_packets += + rd64(hw, NGBE_MACRX512TO1023L); + hw_stats->rx_size_1024_to_max_packets += + rd64(hw, NGBE_MACRX1024TOMAXL); + hw_stats->tx_size_64_packets += rd64(hw, NGBE_MACTX1TO64L); + hw_stats->tx_size_65_to_127_packets += rd64(hw, NGBE_MACTX65TO127L); + hw_stats->tx_size_128_to_255_packets += rd64(hw, NGBE_MACTX128TO255L); + hw_stats->tx_size_256_to_511_packets += rd64(hw, NGBE_MACTX256TO511L); + hw_stats->tx_size_512_to_1023_packets += + rd64(hw, NGBE_MACTX512TO1023L); + hw_stats->tx_size_1024_to_max_packets += + rd64(hw, NGBE_MACTX1024TOMAXL); + + hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL); + hw_stats->rx_oversize_errors += rd32(hw, NGBE_MACRXOVERSIZE); + hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER); + + /* MNG Stats */ + hw_stats->mng_bmc2host_packets = rd32(hw, NGBE_MNGBMC2OS); + hw_stats->mng_host2bmc_packets = rd32(hw, NGBE_MNGOS2BMC); + hw_stats->rx_management_packets = rd32(hw, NGBE_DMARXMNG); + hw_stats->tx_management_packets = rd32(hw, NGBE_DMATXMNG); + + /* MACsec Stats */ + hw_stats->tx_macsec_pkts_untagged += rd32(hw, NGBE_LSECTX_UTPKT); + hw_stats->tx_macsec_pkts_encrypted += + rd32(hw, NGBE_LSECTX_ENCPKT); + hw_stats->tx_macsec_pkts_protected += + rd32(hw, NGBE_LSECTX_PROTPKT); + hw_stats->tx_macsec_octets_encrypted += + rd32(hw, NGBE_LSECTX_ENCOCT); + hw_stats->tx_macsec_octets_protected += + rd32(hw, NGBE_LSECTX_PROTOCT); + hw_stats->rx_macsec_pkts_untagged += rd32(hw, NGBE_LSECRX_UTPKT); + hw_stats->rx_macsec_pkts_badtag += rd32(hw, NGBE_LSECRX_BTPKT); + hw_stats->rx_macsec_pkts_nosci += rd32(hw, NGBE_LSECRX_NOSCIPKT); + hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, NGBE_LSECRX_UNSCIPKT); + hw_stats->rx_macsec_octets_decrypted += rd32(hw, NGBE_LSECRX_DECOCT); + hw_stats->rx_macsec_octets_validated += rd32(hw, NGBE_LSECRX_VLDOCT); + hw_stats->rx_macsec_sc_pkts_unchecked += + rd32(hw, NGBE_LSECRX_UNCHKPKT); + hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, NGBE_LSECRX_DLYPKT); + hw_stats->rx_macsec_sc_pkts_late += rd32(hw, NGBE_LSECRX_LATEPKT); + for (i = 0; i < 2; i++) { + hw_stats->rx_macsec_sa_pkts_ok += + rd32(hw, NGBE_LSECRX_OKPKT(i)); + hw_stats->rx_macsec_sa_pkts_invalid += + rd32(hw, NGBE_LSECRX_INVPKT(i)); + hw_stats->rx_macsec_sa_pkts_notvalid += + rd32(hw, NGBE_LSECRX_BADPKT(i)); + } + for (i = 0; i < 4; i++) { + hw_stats->rx_macsec_sa_pkts_unusedsa += + rd32(hw, NGBE_LSECRX_INVSAPKT(i)); + hw_stats->rx_macsec_sa_pkts_notusingsa += + rd32(hw, NGBE_LSECRX_BADSAPKT(i)); + } + hw_stats->rx_total_missed_packets = + hw_stats->rx_up_dropped; +} + +static int +ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); + struct ngbe_stat_mappings *stat_mappings = + NGBE_DEV_STAT_MAPPINGS(dev); + uint32_t i, j; + + ngbe_read_stats_registers(hw, hw_stats); + + if (stats == NULL) + return -EINVAL; + + /* Fill out the rte_eth_stats statistics structure */ + stats->ipackets = hw_stats->rx_packets; + stats->ibytes = hw_stats->rx_bytes; + stats->opackets = hw_stats->tx_packets; + stats->obytes = hw_stats->tx_bytes; + + memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets)); + memset(&stats->q_opackets, 0, sizeof(stats->q_opackets)); + memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes)); + memset(&stats->q_obytes, 0, sizeof(stats->q_obytes)); + memset(&stats->q_errors, 0, sizeof(stats->q_errors)); + for (i = 0; i < NGBE_MAX_QP; i++) { + uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG; + uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8; + uint32_t q_map; + + q_map = (stat_mappings->rqsm[n] >> offset) + & QMAP_FIELD_RESERVED_BITS_MASK; + j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS + ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS); + stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets; + stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes; + + q_map = (stat_mappings->tqsm[n] >> offset) + & QMAP_FIELD_RESERVED_BITS_MASK; + j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS + ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS); + stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets; + stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes; + } + + /* Rx Errors */ + stats->imissed = hw_stats->rx_total_missed_packets + + hw_stats->rx_dma_drop; + stats->ierrors = hw_stats->rx_crc_errors + + hw_stats->rx_mac_short_packet_dropped + + hw_stats->rx_length_errors + + hw_stats->rx_undersize_errors + + hw_stats->rx_oversize_errors + + hw_stats->rx_illegal_byte_errors + + hw_stats->rx_error_bytes + + hw_stats->rx_fragment_errors; + + /* Tx Errors */ + stats->oerrors = 0; + return 0; +} + +static int +ngbe_dev_stats_reset(struct rte_eth_dev *dev) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev); + + /* HW registers are cleared on read */ + hw->offset_loaded = 0; + ngbe_dev_stats_get(dev, NULL); + hw->offset_loaded = 1; + + /* Reset software totals */ + memset(hw_stats, 0, sizeof(*hw_stats)); + + return 0; +} + static int ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { @@ -1462,6 +1759,9 @@ static const struct eth_dev_ops ngbe_eth_dev_ops = { .dev_close = ngbe_dev_close, .dev_reset = ngbe_dev_reset, .link_update = ngbe_dev_link_update, + .stats_get = ngbe_dev_stats_get, + .stats_reset = ngbe_dev_stats_reset, + .queue_stats_mapping_set = ngbe_dev_queue_stats_mapping_set, .vlan_offload_set = ngbe_vlan_offload_set, .rx_queue_start = ngbe_dev_rx_queue_start, .rx_queue_stop = ngbe_dev_rx_queue_stop, diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h index 8b3a1cdc3d..c0f1a50c66 100644 --- a/drivers/net/ngbe/ngbe_ethdev.h +++ b/drivers/net/ngbe/ngbe_ethdev.h @@ -40,6 +40,15 @@ struct ngbe_interrupt { uint64_t mask_orig; /* save mask during delayed handler */ }; +#define NGBE_NB_STAT_MAPPING 32 +#define QSM_REG_NB_BITS_PER_QMAP_FIELD 8 +#define NB_QMAP_FIELDS_PER_QSM_REG 4 +#define QMAP_FIELD_RESERVED_BITS_MASK 0x0f +struct ngbe_stat_mappings { + uint32_t tqsm[NGBE_NB_STAT_MAPPING]; + uint32_t rqsm[NGBE_NB_STAT_MAPPING]; +}; + struct ngbe_vfta { uint32_t vfta[NGBE_VFTA_SIZE]; }; @@ -53,7 +62,9 @@ struct ngbe_hwstrip { */ struct ngbe_adapter { struct ngbe_hw hw; + struct ngbe_hw_stats stats; struct ngbe_interrupt intr; + struct ngbe_stat_mappings stat_mappings; struct ngbe_vfta shadow_vfta; struct ngbe_hwstrip hwstrip; bool rx_bulk_alloc_allowed; @@ -76,6 +87,9 @@ ngbe_dev_hw(struct rte_eth_dev *dev) return hw; } +#define NGBE_DEV_STATS(dev) \ + (&((struct ngbe_adapter *)(dev)->data->dev_private)->stats) + static inline struct ngbe_interrupt * ngbe_dev_intr(struct rte_eth_dev *dev) { @@ -85,6 +99,9 @@ ngbe_dev_intr(struct rte_eth_dev *dev) return intr; } +#define NGBE_DEV_STAT_MAPPINGS(dev) \ + (&((struct ngbe_adapter *)(dev)->data->dev_private)->stat_mappings) + #define NGBE_DEV_VFTA(dev) \ (&((struct ngbe_adapter *)(dev)->data->dev_private)->shadow_vfta) @@ -190,5 +207,7 @@ void ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on); void ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask); +void ngbe_read_stats_registers(struct ngbe_hw *hw, + struct ngbe_hw_stats *hw_stats); #endif /* _NGBE_ETHDEV_H_ */