[v1,27/42] net/txgbe: add device stats get

Message ID 20200901115113.1529675-27-jiawenwu@trustnetic.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series [v1,01/42] net/txgbe: add build and doc infrastructure |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Jiawen Wu Sept. 1, 2020, 11:50 a.m. UTC
  Add device stats get from reading hardware registers.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/txgbe/base/txgbe_type.h | 153 ++++++++++++++++-
 drivers/net/txgbe/txgbe_ethdev.c    | 245 +++++++++++++++++++++++++++-
 drivers/net/txgbe/txgbe_ethdev.h    |  16 ++
 3 files changed, 411 insertions(+), 3 deletions(-)
  

Patch

diff --git a/drivers/net/txgbe/base/txgbe_type.h b/drivers/net/txgbe/base/txgbe_type.h
index 1c16257da..f9a18d581 100644
--- a/drivers/net/txgbe/base/txgbe_type.h
+++ b/drivers/net/txgbe/base/txgbe_type.h
@@ -10,6 +10,8 @@ 
 
 #define TXGBE_FRAME_SIZE_MAX	(9728) /* Maximum frame size, +FCS */
 #define TXGBE_FRAME_SIZE_DFT	(1518) /* Default frame size, +FCS */
+#define TXGBE_MAX_UP		8
+#define TXGBE_MAX_QP		(128)
 
 #define TXGBE_ALIGN				128 /* as intel did */
 
@@ -186,8 +188,149 @@  struct txgbe_bus_info {
 	u8 lan_id;
 	u16 instance_id;
 };
+/* Statistics counters collected by the MAC */
+/* PB[] RxTx */
+struct txgbe_pb_stats {
+	u64 tx_pb_xon_packets;
+	u64 rx_pb_xon_packets;
+	u64 tx_pb_xoff_packets;
+	u64 rx_pb_xoff_packets;
+	u64 rx_pb_dropped;
+	u64 rx_pb_mbuf_alloc_errors;
+	u64 tx_pb_xon2off_packets;
+};
+
+/* QP[] RxTx */
+struct txgbe_qp_stats {
+	u64 rx_qp_packets;
+	u64 tx_qp_packets;
+	u64 rx_qp_bytes;
+	u64 tx_qp_bytes;
+	u64 rx_qp_mc_packets;
+};
+
 struct txgbe_hw_stats {
-	u64 counter;
+	/* MNG RxTx */
+	u64 mng_bmc2host_packets;
+	u64 mng_host2bmc_packets;
+	/* Basix RxTx */
+	u64 rx_packets;
+	u64 tx_packets;
+	u64 rx_bytes;
+	u64 tx_bytes;
+	u64 rx_total_bytes;
+	u64 rx_total_packets;
+	u64 tx_total_packets;
+	u64 rx_total_missed_packets;
+	u64 rx_broadcast_packets;
+	u64 tx_broadcast_packets;
+	u64 rx_multicast_packets;
+	u64 tx_multicast_packets;
+	u64 rx_management_packets;
+	u64 tx_management_packets;
+	u64 rx_management_dropped;
+	u64 rx_drop_packets;
+
+	/* Basic Error */
+	u64 rx_crc_errors;
+	u64 rx_illegal_byte_errors;
+	u64 rx_error_bytes;
+	u64 rx_mac_short_packet_dropped;
+	u64 rx_length_errors;
+	u64 rx_undersize_errors;
+	u64 rx_fragment_errors;
+	u64 rx_oversize_errors;
+	u64 rx_jabber_errors;
+	u64 rx_l3_l4_xsum_error;
+	u64 mac_local_errors;
+	u64 mac_remote_errors;
+
+	/* Flow Director */
+	u64 flow_director_added_filters;
+	u64 flow_director_removed_filters;
+	u64 flow_director_filter_add_errors;
+	u64 flow_director_filter_remove_errors;
+	u64 flow_director_matched_filters;
+	u64 flow_director_missed_filters;
+
+	/* FCoE */
+	u64 rx_fcoe_crc_errors;
+	u64 rx_fcoe_mbuf_allocation_errors;
+	u64 rx_fcoe_dropped;
+	u64 rx_fcoe_packets;
+	u64 tx_fcoe_packets;
+	u64 rx_fcoe_bytes;
+	u64 tx_fcoe_bytes;
+	u64 rx_fcoe_no_ddp;
+	u64 rx_fcoe_no_ddp_ext_buff;
+
+	/* MACSEC */
+	u64 tx_macsec_pkts_untagged;
+	u64 tx_macsec_pkts_encrypted;
+	u64 tx_macsec_pkts_protected;
+	u64 tx_macsec_octets_encrypted;
+	u64 tx_macsec_octets_protected;
+	u64 rx_macsec_pkts_untagged;
+	u64 rx_macsec_pkts_badtag;
+	u64 rx_macsec_pkts_nosci;
+	u64 rx_macsec_pkts_unknownsci;
+	u64 rx_macsec_octets_decrypted;
+	u64 rx_macsec_octets_validated;
+	u64 rx_macsec_sc_pkts_unchecked;
+	u64 rx_macsec_sc_pkts_delayed;
+	u64 rx_macsec_sc_pkts_late;
+	u64 rx_macsec_sa_pkts_ok;
+	u64 rx_macsec_sa_pkts_invalid;
+	u64 rx_macsec_sa_pkts_notvalid;
+	u64 rx_macsec_sa_pkts_unusedsa;
+	u64 rx_macsec_sa_pkts_notusingsa;
+
+	/* MAC RxTx */
+	u64 rx_size_64_packets;
+	u64 rx_size_65_to_127_packets;
+	u64 rx_size_128_to_255_packets;
+	u64 rx_size_256_to_511_packets;
+	u64 rx_size_512_to_1023_packets;
+	u64 rx_size_1024_to_max_packets;
+	u64 tx_size_64_packets;
+	u64 tx_size_65_to_127_packets;
+	u64 tx_size_128_to_255_packets;
+	u64 tx_size_256_to_511_packets;
+	u64 tx_size_512_to_1023_packets;
+	u64 tx_size_1024_to_max_packets;
+
+	/* Flow Control */
+	u64 tx_xon_packets;
+	u64 rx_xon_packets;
+	u64 tx_xoff_packets;
+	u64 rx_xoff_packets;
+
+	/* PB[] RxTx */
+	struct {
+		u64 rx_up_packets;
+		u64 tx_up_packets;
+		u64 rx_up_bytes;
+		u64 tx_up_bytes;
+		u64 rx_up_drop_packets;
+
+		u64 tx_up_xon_packets;
+		u64 rx_up_xon_packets;
+		u64 tx_up_xoff_packets;
+		u64 rx_up_xoff_packets;
+		u64 rx_up_dropped;
+		u64 rx_up_mbuf_alloc_errors;
+		u64 tx_up_xon2off_packets;
+	} up[TXGBE_MAX_UP];
+
+	/* QP[] RxTx */
+	struct {
+		u64 rx_qp_packets;
+		u64 tx_qp_packets;
+		u64 rx_qp_bytes;
+		u64 tx_qp_bytes;
+		u64 rx_qp_mc_packets;
+	} qp[TXGBE_MAX_QP];
+
 };
 
 /* iterator type for walking multicast address lists */
@@ -472,6 +615,14 @@  struct txgbe_hw {
 
 	u32 q_rx_regs[128 * 4];
 	u32 q_tx_regs[128 * 4];
+	bool offset_loaded;
+	struct {
+		u64 rx_qp_packets;
+		u64 tx_qp_packets;
+		u64 rx_qp_bytes;
+		u64 tx_qp_bytes;
+		u64 rx_qp_mc_packets;
+	} qp_last[TXGBE_MAX_QP];
 };
 
 #include "txgbe_regs.h"
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 08b31f66e..63f811d93 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -414,6 +414,7 @@  static int
 txgbe_dev_start(struct rte_eth_dev *dev)
 {
 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
 	uint32_t intr_vector = 0;
@@ -595,6 +596,9 @@  txgbe_dev_start(struct rte_eth_dev *dev)
 
 	wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_OD_MASK);
 
+	txgbe_read_stats_registers(hw, hw_stats);
+	hw->offset_loaded = 1;
+
 	return 0;
 
 error:
@@ -731,6 +735,9 @@  txgbe_dev_close(struct rte_eth_dev *dev)
 	txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
 
 	dev->dev_ops = NULL;
+	dev->rx_pkt_burst = NULL;
+	dev->tx_pkt_burst = NULL;
+
 
 	/* disable uio intr before callback unregister */
 	rte_intr_disable(intr_handle);
@@ -786,22 +793,256 @@  txgbe_dev_reset(struct rte_eth_dev *dev)
 
 	return ret;
 }
+
+#define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
+	{                                                       \
+		uint32_t current_counter = rd32(hw, reg);       \
+		if (current_counter < last_counter)             \
+			current_counter += 0x100000000LL;       \
+		if (!hw->offset_loaded)                         \
+			last_counter = current_counter;         \
+		counter = current_counter - last_counter;       \
+		counter &= 0xFFFFFFFFLL;                        \
+	}
+
+#define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
+	{                                                                \
+		uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
+		uint64_t current_counter_msb = rd32(hw, reg_msb);        \
+		uint64_t current_counter = (current_counter_msb << 32) | \
+			current_counter_lsb;                             \
+		if (current_counter < last_counter)                      \
+			current_counter += 0x1000000000LL;               \
+		if (!hw->offset_loaded)                                  \
+			last_counter = current_counter;                  \
+		counter = current_counter - last_counter;                \
+		counter &= 0xFFFFFFFFFLL;                                \
+	}
+
+void
+txgbe_read_stats_registers(struct txgbe_hw *hw,
+			   struct txgbe_hw_stats *hw_stats)
+{
+	unsigned i;
+
+	/* QP Stats */
+	for (i = 0; i < hw->nb_rx_queues; i++) {
+		UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
+			hw->qp_last[i].rx_qp_packets,
+			hw_stats->qp[i].rx_qp_packets);
+		UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
+			hw->qp_last[i].rx_qp_bytes,
+			hw_stats->qp[i].rx_qp_bytes);
+		UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
+			hw->qp_last[i].rx_qp_mc_packets,
+			hw_stats->qp[i].rx_qp_mc_packets);
+	}
+
+	for (i = 0; i < hw->nb_tx_queues; i++) {
+		UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
+			hw->qp_last[i].tx_qp_packets,
+			hw_stats->qp[i].tx_qp_packets);
+		UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
+			hw->qp_last[i].tx_qp_bytes,
+			hw_stats->qp[i].tx_qp_bytes);
+	}
+	/* PB Stats */
+	for (i = 0; i < TXGBE_MAX_UP; i++) {
+		hw_stats->up[i].rx_up_xon_packets +=
+				rd32(hw, TXGBE_PBRXUPXON(i));
+		hw_stats->up[i].rx_up_xoff_packets +=
+				rd32(hw, TXGBE_PBRXUPXOFF(i));
+		hw_stats->up[i].tx_up_xon_packets +=
+				rd32(hw, TXGBE_PBTXUPXON(i));
+		hw_stats->up[i].tx_up_xoff_packets +=
+				rd32(hw, TXGBE_PBTXUPXOFF(i));
+		hw_stats->up[i].tx_up_xon2off_packets +=
+				rd32(hw, TXGBE_PBTXUPOFF(i));
+		hw_stats->up[i].rx_up_dropped +=
+				rd32(hw, TXGBE_PBRXMISS(i));
+	}
+	hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
+	hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
+	hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
+	hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
+
+	/* DMA Stats */
+	hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
+	hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
+
+	hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
+	hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
+	hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
+
+	/* MAC Stats */
+	hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
+	hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
+	hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
+
+	hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
+	hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
+	hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
+
+	hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
+	hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
+
+	hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1to64L);
+	hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65to127L);
+	hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128to255L);
+	hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256to511L);
+	hw_stats->rx_size_512_to_1023_packets += rd64(hw, TXGBE_MACRX512to1023L);
+	hw_stats->rx_size_1024_to_max_packets += rd64(hw, TXGBE_MACRX1024toMAXL);
+	hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1to64L);
+	hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65to127L);
+	hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128to255L);
+	hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256to511L);
+	hw_stats->tx_size_512_to_1023_packets += rd64(hw, TXGBE_MACTX512to1023L);
+	hw_stats->tx_size_1024_to_max_packets += rd64(hw, TXGBE_MACTX1024toMAXL);
+
+	hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
+	hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
+	hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
+
+	/* MNG Stats */
+	hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
+	hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
+	hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
+	hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
+
+	/* FCoE Stats */
+	hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
+	hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
+	hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
+	hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
+	hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
+	hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
+	hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
+
+	/* Flow Director Stats */
+	hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
+	hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
+	hw_stats->flow_director_added_filters +=
+		TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
+	hw_stats->flow_director_removed_filters +=
+		TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
+	hw_stats->flow_director_filter_add_errors +=
+		TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
+	hw_stats->flow_director_filter_remove_errors +=
+		TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
+
+	/* MACsec Stats */
+	hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
+	hw_stats->tx_macsec_pkts_encrypted +=
+			rd32(hw, TXGBE_LSECTX_ENCPKT);
+	hw_stats->tx_macsec_pkts_protected +=
+			rd32(hw, TXGBE_LSECTX_PROTPKT);
+	hw_stats->tx_macsec_octets_encrypted +=
+			rd32(hw, TXGBE_LSECTX_ENCOCT);
+	hw_stats->tx_macsec_octets_protected +=
+			rd32(hw, TXGBE_LSECTX_PROTOCT);
+	hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
+	hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
+	hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
+	hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
+	hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
+	hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
+	hw_stats->rx_macsec_sc_pkts_unchecked += rd32(hw, TXGBE_LSECRX_UNCHKPKT);
+	hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
+	hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
+	for (i = 0; i < 2; i++) {
+		hw_stats->rx_macsec_sa_pkts_ok +=
+			rd32(hw, TXGBE_LSECRX_OKPKT(i));
+		hw_stats->rx_macsec_sa_pkts_invalid +=
+			rd32(hw, TXGBE_LSECRX_INVPKT(i));
+		hw_stats->rx_macsec_sa_pkts_notvalid +=
+			rd32(hw, TXGBE_LSECRX_BADPKT(i));
+	}
+	hw_stats->rx_macsec_sa_pkts_unusedsa +=
+			rd32(hw, TXGBE_LSECRX_INVSAPKT);
+	hw_stats->rx_macsec_sa_pkts_notusingsa +=
+			rd32(hw, TXGBE_LSECRX_BADSAPKT);
+
+	hw_stats->rx_total_missed_packets = 0;
+	for (i = 0; i < TXGBE_MAX_UP; i++) {
+		hw_stats->rx_total_missed_packets +=
+			hw_stats->up[i].rx_up_dropped;
+	}
+}
+
 static int
 txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
-	RTE_SET_USED(dev);
-	RTE_SET_USED(stats);
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+	struct txgbe_stat_mappings *stat_mappings =
+			TXGBE_DEV_STAT_MAPPINGS(dev);
+	uint32_t i, j;
 
+	txgbe_read_stats_registers(hw, hw_stats);
+
+	if (stats == NULL)
+		return -EINVAL;
+
+	/* Fill out the rte_eth_stats statistics structure */
+	stats->ipackets = hw_stats->rx_packets;
+	stats->ibytes = hw_stats->rx_bytes;
+	stats->opackets = hw_stats->tx_packets;
+	stats->obytes = hw_stats->tx_bytes;
+
+	memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
+	memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
+	memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
+	memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
+	memset(&stats->q_errors, 0, sizeof(stats->q_errors));
+	for (i = 0; i < TXGBE_MAX_QP; i++) {
+		uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
+		uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
+		uint32_t q_map;
+
+		q_map = (stat_mappings->rqsm[n] >> offset)
+				& QMAP_FIELD_RESERVED_BITS_MASK;
+		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
+		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
+		stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
+		stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
+
+		q_map = (stat_mappings->tqsm[n] >> offset)
+				& QMAP_FIELD_RESERVED_BITS_MASK;
+		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
+		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
+		stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
+		stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
+	}
+
+	/* Rx Errors */
+	stats->imissed  = hw_stats->rx_total_missed_packets;
+	stats->ierrors  = hw_stats->rx_crc_errors +
+			  hw_stats->rx_mac_short_packet_dropped +
+			  hw_stats->rx_length_errors +
+			  hw_stats->rx_undersize_errors +
+			  hw_stats->rx_oversize_errors +
+			  hw_stats->rx_drop_packets +
+			  hw_stats->rx_illegal_byte_errors +
+			  hw_stats->rx_error_bytes +
+			  hw_stats->rx_fragment_errors +
+			  hw_stats->rx_fcoe_crc_errors +
+			  hw_stats->rx_fcoe_mbuf_allocation_errors;
+
+	/* Tx Errors */
+	stats->oerrors  = 0;
 	return 0;
 }
 
 static int
 txgbe_dev_stats_reset(struct rte_eth_dev *dev)
 {
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
 
 	/* HW registers are cleared on read */
+	hw->offset_loaded = 0;
 	txgbe_dev_stats_get(dev, NULL);
+	hw->offset_loaded = 1;
 
 	/* Reset software totals */
 	memset(hw_stats, 0, sizeof(*hw_stats));
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index dceb88d2f..d896b7775 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -40,6 +40,15 @@  struct txgbe_interrupt {
 	uint32_t mask[2];
 };
 
+#define TXGBE_NB_STAT_MAPPING  32
+#define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
+#define NB_QMAP_FIELDS_PER_QSM_REG 4
+#define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
+struct txgbe_stat_mappings {
+	uint32_t tqsm[TXGBE_NB_STAT_MAPPING];
+	uint32_t rqsm[TXGBE_NB_STAT_MAPPING];
+};
+
 struct txgbe_vf_info {
 	uint8_t api_version;
 	uint16_t switch_domain_id;
@@ -52,6 +61,7 @@  struct txgbe_adapter {
 	struct txgbe_hw             hw;
 	struct txgbe_hw_stats       stats;
 	struct txgbe_interrupt      intr;
+	struct txgbe_stat_mappings  stat_mappings;
 	struct txgbe_vf_info        *vfdata;
 	bool rx_bulk_alloc_allowed;
 };
@@ -77,6 +87,9 @@  int txgbe_vf_representor_uninit(struct rte_eth_dev *ethdev);
 #define TXGBE_DEV_INTR(dev) \
 	(&((struct txgbe_adapter *)(dev)->data->dev_private)->intr)
 
+#define TXGBE_DEV_STAT_MAPPINGS(dev) \
+	(&((struct txgbe_adapter *)(dev)->data->dev_private)->stat_mappings)
+
 #define TXGBE_DEV_VFDATA(dev) \
 	(&((struct txgbe_adapter *)(dev)->data->dev_private)->vfdata)
 
@@ -163,4 +176,7 @@  void txgbe_pf_mbx_process(struct rte_eth_dev *eth_dev);
 
 const uint32_t *txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 void txgbe_dev_setup_link_alarm_handler(void *param);
+void txgbe_read_stats_registers(struct txgbe_hw *hw,
+			   struct txgbe_hw_stats *hw_stats);
+
 #endif /* _TXGBE_ETHDEV_H_ */