[2/7] net/hns3: fix MAC and queues HW statistics reversion

Message ID 20220406092240.52900-3-humin29@huawei.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series bugfix for hns3 PMD |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

humin (Q) April 6, 2022, 9:22 a.m. UTC
  From: Huisong Li <lihuisong@huawei.com>

The MAC and queues statistics aren't 64-bit registers in hardware. If
hardware statistics are not obtained for a long time, these statistics will
be reversed. So PF and VF driver have to periodically obtain and save these
statistics. Since the periodical task and the stats API are in different
threads, we introduce a statistics lock to protect the statistics.

Fixes: 8839c5e202f3 ("net/hns3: support device stats")
Cc: stable@dpdk.org

Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
 drivers/net/hns3/hns3_ethdev.c    |   6 +-
 drivers/net/hns3/hns3_ethdev.h    |   6 ++
 drivers/net/hns3/hns3_ethdev_vf.c |   6 +-
 drivers/net/hns3/hns3_stats.c     | 136 +++++++++++++++++++++---------
 drivers/net/hns3/hns3_stats.h     |   1 +
 5 files changed, 110 insertions(+), 45 deletions(-)
  

Comments

Ferruh Yigit May 3, 2022, 7:05 p.m. UTC | #1
On 4/6/2022 10:22 AM, Min Hu (Connor) wrote:
> From: Huisong Li <lihuisong@huawei.com>
> 
> The MAC and queues statistics aren't 64-bit registers in hardware. If
> hardware statistics are not obtained for a long time, these statistics will
> be reversed. 

I think using 'overflow' instead of 'reversed' is more clear.

As far as I can see from code, these are 32 bit values, can be good to 
mention in the commit log.

> So PF and VF driver have to periodically obtain and save these
> statistics. Since the periodical task and the stats API are in different
> threads, we introduce a statistics lock to protect the statistics.
> 
> Fixes: 8839c5e202f3 ("net/hns3: support device stats")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Huisong Li <lihuisong@huawei.com>
> Signed-off-by: Min Hu (Connor) <humin29@huawei.com>

<...>

> @@ -604,16 +626,15 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
>   	struct hns3_tqp_stats *stats = &hw->tqp_stats;
>   	struct hns3_rx_queue *rxq;
>   	struct hns3_tx_queue *txq;
> -	uint64_t cnt;
>   	uint16_t i;
>   	int ret;
>   
> +	rte_spinlock_lock(&hw->stats_lock);

The lock covers most of the 'stats_get()' function, but only Rx/Tx queue 
stats needs to be protected, you may consider reducing the scope of the 
lock. Same for below functions.

<...>

> @@ -1453,6 +1464,7 @@ int
>   hns3_dev_xstats_reset(struct rte_eth_dev *dev)
>   {
>   	struct hns3_adapter *hns = dev->data->dev_private;
> +	struct hns3_hw *hw = &hns->hw;
>   	int ret;
>   
>   	/* Clear tqp stats */
> @@ -1460,20 +1472,22 @@ hns3_dev_xstats_reset(struct rte_eth_dev *dev)
>   	if (ret)
>   		return ret;
>   
> +	rte_spinlock_unlock(&hw->stats_lock);

I guess this should be 'rte_spinlock_lock()'
  

Patch

diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index ee2c5d5a1a..2d47088487 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -4362,10 +4362,12 @@  hns3_service_handler(void *param)
 	struct hns3_adapter *hns = eth_dev->data->dev_private;
 	struct hns3_hw *hw = &hns->hw;
 
-	if (!hns3_is_reset_pending(hns))
+	if (!hns3_is_reset_pending(hns)) {
 		hns3_update_linkstatus_and_event(hw, true);
-	else
+		hns3_update_hw_stats(hw);
+	} else {
 		hns3_warn(hw, "Cancel the query when reset is pending");
+	}
 
 	rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev);
 }
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index a00f841543..6a3dbae48e 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -503,6 +503,12 @@  struct hns3_hw {
 	uint32_t mac_stats_reg_num;
 	struct hns3_rx_missed_stats imissed_stats;
 	uint64_t oerror_stats;
+	/*
+	 * The lock is used to protect statistics update in stats APIs and
+	 * periodic task.
+	 */
+	rte_spinlock_t stats_lock;
+
 	uint32_t fw_version;
 	uint16_t pf_vf_if_version;  /* version of communication interface */
 
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 4f52f22776..589de0ab3a 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -1335,10 +1335,12 @@  hns3vf_service_handler(void *param)
 	 * Before querying the link status, check whether there is a reset
 	 * pending, and if so, abandon the query.
 	 */
-	if (!hns3vf_is_reset_pending(hns))
+	if (!hns3vf_is_reset_pending(hns)) {
 		hns3vf_request_link_info(hw);
-	else
+		hns3_update_hw_stats(hw);
+	} else {
 		hns3_warn(hw, "Cancel the query when reset is pending");
+	}
 
 	rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
 			  eth_dev);
diff --git a/drivers/net/hns3/hns3_stats.c b/drivers/net/hns3/hns3_stats.c
index e4a5dcf2f8..224e43be8f 100644
--- a/drivers/net/hns3/hns3_stats.c
+++ b/drivers/net/hns3/hns3_stats.c
@@ -584,6 +584,28 @@  hns3_update_oerror_stats(struct hns3_hw *hw, bool is_clear)
 	return 0;
 }
 
+static void
+hns3_rcb_rx_ring_stats_get(struct hns3_rx_queue *rxq,
+			   struct hns3_tqp_stats *stats)
+{
+	uint32_t cnt;
+
+	cnt = hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);
+	stats->rcb_rx_ring_pktnum_rcd += cnt;
+	stats->rcb_rx_ring_pktnum[rxq->queue_id] += cnt;
+}
+
+static void
+hns3_rcb_tx_ring_stats_get(struct hns3_tx_queue *txq,
+			   struct hns3_tqp_stats *stats)
+{
+	uint32_t cnt;
+
+	cnt = hns3_read_dev(txq, HNS3_RING_TX_PKTNUM_RECORD_REG);
+	stats->rcb_tx_ring_pktnum_rcd += cnt;
+	stats->rcb_tx_ring_pktnum[txq->queue_id] += cnt;
+}
+
 /*
  * Query tqp tx queue statistics ,opcode id: 0x0B03.
  * Query tqp rx queue statistics ,opcode id: 0x0B13.
@@ -604,16 +626,15 @@  hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
 	struct hns3_tqp_stats *stats = &hw->tqp_stats;
 	struct hns3_rx_queue *rxq;
 	struct hns3_tx_queue *txq;
-	uint64_t cnt;
 	uint16_t i;
 	int ret;
 
+	rte_spinlock_lock(&hw->stats_lock);
 	/* Update imissed stats */
 	ret = hns3_update_imissed_stats(hw, false);
 	if (ret) {
-		hns3_err(hw, "update imissed stats failed, ret = %d",
-			 ret);
-		return ret;
+		hns3_err(hw, "update imissed stats failed, ret = %d", ret);
+		goto out;
 	}
 	rte_stats->imissed = imissed_stats->rpu_rx_drop_cnt +
 				imissed_stats->ssu_rx_drop_cnt;
@@ -624,15 +645,9 @@  hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
 		if (rxq == NULL)
 			continue;
 
-		cnt = hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);
-		/*
-		 * Read hardware and software in adjacent positions to minimize
-		 * the timing variance.
-		 */
+		hns3_rcb_rx_ring_stats_get(rxq, stats);
 		rte_stats->ierrors += rxq->err_stats.l2_errors +
 				      rxq->err_stats.pkt_len_errors;
-		stats->rcb_rx_ring_pktnum_rcd += cnt;
-		stats->rcb_rx_ring_pktnum[i] += cnt;
 		rte_stats->ibytes += rxq->basic_stats.bytes;
 	}
 
@@ -642,17 +657,14 @@  hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
 		if (txq == NULL)
 			continue;
 
-		cnt = hns3_read_dev(txq, HNS3_RING_TX_PKTNUM_RECORD_REG);
-		stats->rcb_tx_ring_pktnum_rcd += cnt;
-		stats->rcb_tx_ring_pktnum[i] += cnt;
+		hns3_rcb_tx_ring_stats_get(txq, stats);
 		rte_stats->obytes += txq->basic_stats.bytes;
 	}
 
 	ret = hns3_update_oerror_stats(hw, false);
 	if (ret) {
-		hns3_err(hw, "update oerror stats failed, ret = %d",
-			 ret);
-		return ret;
+		hns3_err(hw, "update oerror stats failed, ret = %d", ret);
+		goto out;
 	}
 	rte_stats->oerrors = hw->oerror_stats;
 
@@ -668,7 +680,10 @@  hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
 		rte_stats->oerrors;
 	rte_stats->rx_nombuf = eth_dev->data->rx_mbuf_alloc_failed;
 
-	return 0;
+out:
+	rte_spinlock_unlock(&hw->stats_lock);
+
+	return ret;
 }
 
 int
@@ -681,6 +696,7 @@  hns3_stats_reset(struct rte_eth_dev *eth_dev)
 	uint16_t i;
 	int ret;
 
+	rte_spinlock_lock(&hw->stats_lock);
 	/*
 	 * Note: Reading hardware statistics of imissed registers will
 	 * clear them.
@@ -688,7 +704,7 @@  hns3_stats_reset(struct rte_eth_dev *eth_dev)
 	ret = hns3_update_imissed_stats(hw, true);
 	if (ret) {
 		hns3_err(hw, "clear imissed stats failed, ret = %d", ret);
-		return ret;
+		goto out;
 	}
 
 	/*
@@ -697,9 +713,8 @@  hns3_stats_reset(struct rte_eth_dev *eth_dev)
 	 */
 	ret = hns3_update_oerror_stats(hw, true);
 	if (ret) {
-		hns3_err(hw, "clear oerror stats failed, ret = %d",
-			 ret);
-		return ret;
+		hns3_err(hw, "clear oerror stats failed, ret = %d", ret);
+		goto out;
 	}
 
 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
@@ -741,7 +756,10 @@  hns3_stats_reset(struct rte_eth_dev *eth_dev)
 
 	hns3_tqp_stats_clear(hw);
 
-	return 0;
+out:
+	rte_spinlock_unlock(&hw->stats_lock);
+
+	return ret;
 }
 
 static int
@@ -908,7 +926,6 @@  hns3_rxq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 	struct hns3_rx_basic_stats *rxq_stats;
 	struct hns3_rx_queue *rxq;
 	uint16_t i, j;
-	uint32_t cnt;
 	char *val;
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -916,16 +933,10 @@  hns3_rxq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 		if (rxq == NULL)
 			continue;
 
-		cnt = hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);
-		/*
-		 * Read hardware and software in adjacent positions to minimize
-		 * the time difference.
-		 */
+		hns3_rcb_rx_ring_stats_get(rxq, stats);
 		rxq_stats = &rxq->basic_stats;
 		rxq_stats->errors = rxq->err_stats.l2_errors +
 					rxq->err_stats.pkt_len_errors;
-		stats->rcb_rx_ring_pktnum_rcd += cnt;
-		stats->rcb_rx_ring_pktnum[i] += cnt;
 
 		/*
 		 * If HW statistics are reset by stats_reset, but a lot of
@@ -955,7 +966,6 @@  hns3_txq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 	struct hns3_tx_basic_stats *txq_stats;
 	struct hns3_tx_queue *txq;
 	uint16_t i, j;
-	uint32_t cnt;
 	char *val;
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
@@ -963,9 +973,7 @@  hns3_txq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 		if (txq == NULL)
 			continue;
 
-		cnt = hns3_read_dev(txq, HNS3_RING_TX_PKTNUM_RECORD_REG);
-		stats->rcb_tx_ring_pktnum_rcd += cnt;
-		stats->rcb_tx_ring_pktnum[i] += cnt;
+		hns3_rcb_tx_ring_stats_get(txq, stats);
 
 		txq_stats = &txq->basic_stats;
 		txq_stats->packets = stats->rcb_tx_ring_pktnum[i];
@@ -1050,6 +1058,7 @@  hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 
 	count = 0;
 
+	rte_spinlock_lock(&hw->stats_lock);
 	hns3_tqp_basic_stats_get(dev, xstats, &count);
 
 	if (!hns->is_vf) {
@@ -1057,6 +1066,7 @@  hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 		ret = hns3_query_update_mac_stats(dev);
 		if (ret < 0) {
 			hns3_err(hw, "Update Mac stats fail : %d", ret);
+			rte_spinlock_unlock(&hw->stats_lock);
 			return ret;
 		}
 
@@ -1071,8 +1081,8 @@  hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 
 	ret = hns3_update_imissed_stats(hw, false);
 	if (ret) {
-		hns3_err(hw, "update imissed stats failed, ret = %d",
-			 ret);
+		hns3_err(hw, "update imissed stats failed, ret = %d", ret);
+		rte_spinlock_unlock(&hw->stats_lock);
 		return ret;
 	}
 
@@ -1103,6 +1113,7 @@  hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 
 	hns3_tqp_dfx_stats_get(dev, xstats, &count);
 	hns3_queue_stats_get(dev, xstats, &count);
+	rte_spinlock_unlock(&hw->stats_lock);
 
 	return count;
 }
@@ -1453,6 +1464,7 @@  int
 hns3_dev_xstats_reset(struct rte_eth_dev *dev)
 {
 	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_hw *hw = &hns->hw;
 	int ret;
 
 	/* Clear tqp stats */
@@ -1460,20 +1472,22 @@  hns3_dev_xstats_reset(struct rte_eth_dev *dev)
 	if (ret)
 		return ret;
 
+	rte_spinlock_unlock(&hw->stats_lock);
 	hns3_tqp_dfx_stats_clear(dev);
 
 	/* Clear reset stats */
 	memset(&hns->hw.reset.stats, 0, sizeof(struct hns3_reset_stats));
 
 	if (hns->is_vf)
-		return 0;
+		goto out;
 
 	/* HW registers are cleared on read */
 	ret = hns3_mac_stats_reset(dev);
-	if (ret)
-		return ret;
 
-	return 0;
+out:
+	rte_spinlock_unlock(&hw->stats_lock);
+
+	return ret;
 }
 
 static int
@@ -1527,6 +1541,7 @@  hns3_stats_init(struct hns3_hw *hw)
 {
 	int ret;
 
+	rte_spinlock_init(&hw->stats_lock);
 	/* Hardware statistics of imissed registers cleared. */
 	ret = hns3_update_imissed_stats(hw, true);
 	if (ret) {
@@ -1542,3 +1557,42 @@  hns3_stats_uninit(struct hns3_hw *hw)
 {
 	hns3_tqp_stats_uninit(hw);
 }
+
+static void
+hns3_update_queues_stats(struct hns3_hw *hw)
+{
+	struct rte_eth_dev_data *data = hw->data;
+	struct hns3_rx_queue *rxq;
+	struct hns3_tx_queue *txq;
+	uint16_t i;
+
+	for (i = 0; i < data->nb_rx_queues; i++) {
+		rxq = data->rx_queues[i];
+		if (rxq != NULL)
+			hns3_rcb_rx_ring_stats_get(rxq, &hw->tqp_stats);
+	}
+
+	for (i = 0; i < data->nb_tx_queues; i++) {
+		txq = data->tx_queues[i];
+		if (txq != NULL)
+			hns3_rcb_tx_ring_stats_get(txq, &hw->tqp_stats);
+	}
+}
+
+/*
+ * Some hardware statistics registers are not 64-bit. If hardware statistics are
+ * not obtained for a long time, these statistics may be reversed. This function
+ * is used to update these hardware statistics in periodic task.
+ */
+void
+hns3_update_hw_stats(struct hns3_hw *hw)
+{
+	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+
+	rte_spinlock_lock(&hw->stats_lock);
+	if (!hns->is_vf)
+		hns3_update_mac_stats(hw);
+
+	hns3_update_queues_stats(hw);
+	rte_spinlock_unlock(&hw->stats_lock);
+}
diff --git a/drivers/net/hns3/hns3_stats.h b/drivers/net/hns3/hns3_stats.h
index e89dc97632..b5cd6188b4 100644
--- a/drivers/net/hns3/hns3_stats.h
+++ b/drivers/net/hns3/hns3_stats.h
@@ -164,5 +164,6 @@  int hns3_stats_reset(struct rte_eth_dev *dev);
 int hns3_stats_init(struct hns3_hw *hw);
 void hns3_stats_uninit(struct hns3_hw *hw);
 int hns3_query_mac_stats_reg_num(struct hns3_hw *hw);
+void hns3_update_hw_stats(struct hns3_hw *hw);
 
 #endif /* _HNS3_STATS_H_ */