diff mbox series

[RFC,29/29] net/qdma: add stats PMD ops for PF and VF

Message ID 20220706075219.517046-30-aman.kumar@vvdntech.in (mailing list archive)
State New
Delegated to: Thomas Monjalon
Headers show
Series cover letter for net/qdma PMD | expand

Checks

Context Check Description
ci/intel-Testing success Testing PASS
ci/Intel-compilation fail Compilation issues
ci/checkpatch success coding style OK

Commit Message

Aman Kumar July 6, 2022, 7:52 a.m. UTC
This patch implements PMD ops related to stats
for both PF and VF functions.

Signed-off-by: Aman Kumar <aman.kumar@vvdntech.in>
---
 drivers/net/qdma/qdma.h           |   3 +-
 drivers/net/qdma/qdma_devops.c    | 114 +++++++++++++++++++++++++++---
 drivers/net/qdma/qdma_rxtx.c      |   4 ++
 drivers/net/qdma/qdma_vf_ethdev.c |   1 +
 4 files changed, 111 insertions(+), 11 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/qdma/qdma.h b/drivers/net/qdma/qdma.h
index d9239f34a7..4c86d0702a 100644
--- a/drivers/net/qdma/qdma.h
+++ b/drivers/net/qdma/qdma.h
@@ -149,6 +149,7 @@  struct qdma_rx_queue {
 	uint64_t		mbuf_initializer; /* value to init mbufs */
 	struct qdma_q_pidx_reg_info	q_pidx_info;
 	struct qdma_q_cmpt_cidx_reg_info cmpt_cidx_info;
+	struct qdma_pkt_stats   stats;
 	uint16_t		port_id; /* Device port identifier. */
 	uint8_t			status:1;
 	uint8_t			err:1;
@@ -212,9 +213,7 @@  struct qdma_tx_queue {
 	uint16_t			port_id; /* Device port identifier. */
 	uint8_t				func_id; /* RX queue index. */
 	int8_t				ringszidx;
-
 	struct qdma_pkt_stats		stats;
-
 	uint64_t			ep_addr;
 	uint32_t			queue_id; /* TX queue index. */
 	uint32_t			num_queues; /* TX queue index. */
diff --git a/drivers/net/qdma/qdma_devops.c b/drivers/net/qdma/qdma_devops.c
index e6803dd86f..f0b7291e8c 100644
--- a/drivers/net/qdma/qdma_devops.c
+++ b/drivers/net/qdma/qdma_devops.c
@@ -1745,9 +1745,40 @@  int
 qdma_dev_get_regs(struct rte_eth_dev *dev,
 	      struct rte_dev_reg_info *regs)
 {
-	(void)dev;
-	(void)regs;
+	struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+	uint32_t *data = regs->data;
+	uint32_t reg_length = 0;
+	int ret = 0;
+
+	ret = qdma_acc_get_num_config_regs(dev,
+			(enum qdma_ip_type)qdma_dev->ip_type,
+			&reg_length);
+	if (ret < 0 || reg_length == 0) {
+		PMD_DRV_LOG(ERR, "%s: Failed to get number of config registers\n",
+						__func__);
+		return ret;
+	}
 
+	if (data == NULL) {
+		regs->length = reg_length - 1;
+		regs->width = sizeof(uint32_t);
+		return 0;
+	}
+
+	/* Support only full register dump */
+	if (regs->length == 0 || regs->length == (reg_length - 1)) {
+		regs->version = 1;
+		ret = qdma_acc_get_config_regs(dev, qdma_dev->is_vf,
+		(enum qdma_ip_type)qdma_dev->ip_type, data);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "%s: Failed to get config registers\n",
+						__func__);
+		}
+		return ret;
+	}
+
+	PMD_DRV_LOG(ERR, "%s: Unsupported length (0x%x) requested\n",
+						__func__, regs->length);
 	return -ENOTSUP;
 }
 
@@ -1773,11 +1804,30 @@  int qdma_dev_queue_stats_mapping(struct rte_eth_dev *dev,
 				 uint8_t stat_idx,
 				 uint8_t is_rx)
 {
-	(void)dev;
-	(void)queue_id;
-	(void)stat_idx;
-	(void)is_rx;
+	struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+
+	if (is_rx && queue_id >= dev->data->nb_rx_queues) {
+		PMD_DRV_LOG(ERR, "%s: Invalid Rx qid %d\n",
+					__func__, queue_id);
+		return -EINVAL;
+	}
+
+	if (!is_rx && queue_id >= dev->data->nb_tx_queues) {
+		PMD_DRV_LOG(ERR, "%s: Invalid Tx qid %d\n",
+					__func__, queue_id);
+		return -EINVAL;
+	}
 
+	if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+		PMD_DRV_LOG(ERR, "%s: Invalid stats index %d\n",
+					__func__, stat_idx);
+		return -EINVAL;
+	}
+
+	if (is_rx)
+		qdma_dev->rx_qid_statid_map[stat_idx] = queue_id;
+	else
+		qdma_dev->tx_qid_statid_map[stat_idx] = queue_id;
 	return 0;
 }
 
@@ -1795,9 +1845,42 @@  int qdma_dev_queue_stats_mapping(struct rte_eth_dev *dev,
 int qdma_dev_stats_get(struct rte_eth_dev *dev,
 			      struct rte_eth_stats *eth_stats)
 {
-	(void)dev;
-	(void)eth_stats;
+	uint32_t i;
+	int qid;
+	struct qdma_rx_queue *rxq;
+	struct qdma_tx_queue *txq;
+	struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
 
+	memset(eth_stats, 0, sizeof(struct rte_eth_stats));
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = (struct qdma_rx_queue *)dev->data->rx_queues[i];
+		eth_stats->ipackets += rxq->stats.pkts;
+		eth_stats->ibytes += rxq->stats.bytes;
+	}
+
+	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
+		qid = qdma_dev->rx_qid_statid_map[i];
+		if (qid >= 0) {
+			rxq = (struct qdma_rx_queue *)dev->data->rx_queues[qid];
+			eth_stats->q_ipackets[i] = rxq->stats.pkts;
+			eth_stats->q_ibytes[i] = rxq->stats.bytes;
+		}
+	}
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = (struct qdma_tx_queue *)dev->data->tx_queues[i];
+		eth_stats->opackets += txq->stats.pkts;
+		eth_stats->obytes   += txq->stats.bytes;
+	}
+
+	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
+		qid = qdma_dev->tx_qid_statid_map[i];
+		if (qid >= 0) {
+			txq = (struct qdma_tx_queue *)dev->data->tx_queues[qid];
+			eth_stats->q_opackets[i] = txq->stats.pkts;
+			eth_stats->q_obytes[i] = txq->stats.bytes;
+		}
+	}
 	return 0;
 }
 
@@ -1810,8 +1893,21 @@  int qdma_dev_stats_get(struct rte_eth_dev *dev,
  */
 int qdma_dev_stats_reset(struct rte_eth_dev *dev)
 {
-	(void)dev;
+	uint32_t i;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		struct qdma_rx_queue *rxq =
+			(struct qdma_rx_queue *)dev->data->rx_queues[i];
+		rxq->stats.pkts = 0;
+		rxq->stats.bytes = 0;
+	}
 
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		struct qdma_tx_queue *txq =
+			(struct qdma_tx_queue *)dev->data->tx_queues[i];
+		txq->stats.pkts = 0;
+		txq->stats.bytes = 0;
+	}
 	return 0;
 }
 
diff --git a/drivers/net/qdma/qdma_rxtx.c b/drivers/net/qdma/qdma_rxtx.c
index 7652f35dd2..8a4caa465b 100644
--- a/drivers/net/qdma/qdma_rxtx.c
+++ b/drivers/net/qdma/qdma_rxtx.c
@@ -708,6 +708,8 @@  struct rte_mbuf *prepare_single_packet(struct qdma_rx_queue *rxq,
 	pkt_length = qdma_ul_get_cmpt_pkt_len(&rxq->cmpt_data[cmpt_idx]);
 
 	if (pkt_length) {
+		rxq->stats.pkts++;
+		rxq->stats.bytes += pkt_length;
 		if (likely(pkt_length <= rxq->rx_buff_size)) {
 			mb = rxq->sw_ring[id];
 			rxq->sw_ring[id++] = NULL;
@@ -870,6 +872,8 @@  static uint16_t prepare_packets(struct qdma_rx_queue *rxq,
 	while (count < nb_pkts) {
 		pkt_length = qdma_ul_get_cmpt_pkt_len(&rxq->cmpt_data[count]);
 		if (pkt_length) {
+			rxq->stats.pkts++;
+			rxq->stats.bytes += pkt_length;
 			mb = prepare_segmented_packet(rxq,
 					pkt_length, &rxq->rx_tail);
 			rx_pkts[count_pkts++] = mb;
diff --git a/drivers/net/qdma/qdma_vf_ethdev.c b/drivers/net/qdma/qdma_vf_ethdev.c
index cbae4c9716..50529340b5 100644
--- a/drivers/net/qdma/qdma_vf_ethdev.c
+++ b/drivers/net/qdma/qdma_vf_ethdev.c
@@ -796,6 +796,7 @@  static struct eth_dev_ops qdma_vf_eth_dev_ops = {
 	.rx_queue_stop        = qdma_vf_dev_rx_queue_stop,
 	.tx_queue_start       = qdma_vf_dev_tx_queue_start,
 	.tx_queue_stop        = qdma_vf_dev_tx_queue_stop,
+	.stats_get            = qdma_dev_stats_get,
 };
 
 /**