From patchwork Fri Aug 27 06:57:05 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Andrew Rybchenko X-Patchwork-Id: 97445 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 08D55A0C43; Fri, 27 Aug 2021 09:00:39 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 47E18412D1; Fri, 27 Aug 2021 08:58:57 +0200 (CEST) Received: from shelob.oktetlabs.ru (shelob.oktetlabs.ru [91.220.146.113]) by mails.dpdk.org (Postfix) with ESMTP id DC6EB412BF for ; Fri, 27 Aug 2021 08:58:53 +0200 (CEST) Received: by shelob.oktetlabs.ru (Postfix, from userid 122) id AB4EF7F6E2; Fri, 27 Aug 2021 09:58:53 +0300 (MSK) X-Spam-Checker-Version: SpamAssassin 3.4.2 (2018-09-13) on shelob.oktetlabs.ru X-Spam-Level: X-Spam-Status: No, score=0.8 required=5.0 tests=ALL_TRUSTED, DKIM_ADSP_DISCARD, URIBL_BLOCKED autolearn=no autolearn_force=no version=3.4.2 Received: from aros.oktetlabs.ru (aros.oktetlabs.ru [192.168.38.17]) by shelob.oktetlabs.ru (Postfix) with ESMTP id A66857F6ED; Fri, 27 Aug 2021 09:57:35 +0300 (MSK) DKIM-Filter: OpenDKIM Filter v2.11.0 shelob.oktetlabs.ru A66857F6ED Authentication-Results: shelob.oktetlabs.ru/A66857F6ED; dkim=none; dkim-atps=neutral From: Andrew Rybchenko To: dev@dpdk.org Cc: Igor Romanov , Andy Moreton , Ivan Malov Date: Fri, 27 Aug 2021 09:57:05 +0300 Message-Id: <20210827065717.1838258-27-andrew.rybchenko@oktetlabs.ru> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20210827065717.1838258-1-andrew.rybchenko@oktetlabs.ru> References: <20210827065717.1838258-1-andrew.rybchenko@oktetlabs.ru> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH 26/38] net/sfc: add simple port representor statistics X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Igor Romanov Gather statistics of enqueued and dequeued packets in Rx and Tx burst callbacks to report in stats_get callback. Signed-off-by: Igor Romanov Signed-off-by: Andrew Rybchenko Reviewed-by: Andy Moreton Reviewed-by: Ivan Malov --- drivers/net/sfc/sfc_repr.c | 60 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 58 insertions(+), 2 deletions(-) diff --git a/drivers/net/sfc/sfc_repr.c b/drivers/net/sfc/sfc_repr.c index a436b7e5e1..4fd81c3f6b 100644 --- a/drivers/net/sfc/sfc_repr.c +++ b/drivers/net/sfc/sfc_repr.c @@ -32,9 +32,14 @@ struct sfc_repr_shared { uint16_t switch_port_id; }; +struct sfc_repr_queue_stats { + union sfc_pkts_bytes packets_bytes; +}; + struct sfc_repr_rxq { /* Datapath members */ struct rte_ring *ring; + struct sfc_repr_queue_stats stats; /* Non-datapath members */ struct sfc_repr_shared *shared; @@ -45,6 +50,7 @@ struct sfc_repr_txq { /* Datapath members */ struct rte_ring *ring; efx_mport_id_t egress_mport; + struct sfc_repr_queue_stats stats; /* Non-datapath members */ struct sfc_repr_shared *shared; @@ -173,15 +179,30 @@ sfc_repr_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { struct sfc_repr_rxq *rxq = rx_queue; void **objs = (void *)&rx_pkts[0]; + unsigned int n_rx; /* mbufs port is already filled correctly by representors proxy */ - return rte_ring_sc_dequeue_burst(rxq->ring, objs, nb_pkts, NULL); + n_rx = rte_ring_sc_dequeue_burst(rxq->ring, objs, nb_pkts, NULL); + + if (n_rx > 0) { + unsigned int n_bytes = 0; + unsigned int i = 0; + + do { + n_bytes += rx_pkts[i]->pkt_len; + } while (++i < n_rx); + + sfc_pkts_bytes_add(&rxq->stats.packets_bytes, n_rx, n_bytes); + } + + return n_rx; } static uint16_t sfc_repr_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { struct sfc_repr_txq *txq = tx_queue; + unsigned int n_bytes = 0; unsigned int n_tx; void **objs; uint16_t i; @@ -201,6 +222,7 @@ sfc_repr_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) m->ol_flags |= sfc_dp_mport_override; *RTE_MBUF_DYNFIELD(m, sfc_dp_mport_offset, efx_mport_id_t *) = txq->egress_mport; + n_bytes += tx_pkts[i]->pkt_len; } objs = (void *)&tx_pkts[0]; @@ -210,14 +232,18 @@ sfc_repr_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) * Remove m-port override flag from packets that were not enqueued * Setting the flag only for enqueued packets after the burst is * not possible since the ownership of enqueued packets is - * transferred to representor proxy. + * transferred to representor proxy. The same logic applies to + * counting the enqueued packets' bytes. */ for (i = n_tx; i < nb_pkts; ++i) { struct rte_mbuf *m = tx_pkts[i]; m->ol_flags &= ~sfc_dp_mport_override; + n_bytes -= m->pkt_len; } + sfc_pkts_bytes_add(&txq->stats.packets_bytes, n_tx, n_bytes); + return n_tx; } @@ -849,6 +875,35 @@ sfc_repr_dev_close(struct rte_eth_dev *dev) return 0; } +static int +sfc_repr_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + union sfc_pkts_bytes queue_stats; + uint16_t i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct sfc_repr_rxq *rxq = dev->data->rx_queues[i]; + + sfc_pkts_bytes_get(&rxq->stats.packets_bytes, + &queue_stats); + + stats->ipackets += queue_stats.pkts; + stats->ibytes += queue_stats.bytes; + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct sfc_repr_txq *txq = dev->data->tx_queues[i]; + + sfc_pkts_bytes_get(&txq->stats.packets_bytes, + &queue_stats); + + stats->opackets += queue_stats.pkts; + stats->obytes += queue_stats.bytes; + } + + return 0; +} + static const struct eth_dev_ops sfc_repr_dev_ops = { .dev_configure = sfc_repr_dev_configure, .dev_start = sfc_repr_dev_start, @@ -856,6 +911,7 @@ static const struct eth_dev_ops sfc_repr_dev_ops = { .dev_close = sfc_repr_dev_close, .dev_infos_get = sfc_repr_dev_infos_get, .link_update = sfc_repr_dev_link_update, + .stats_get = sfc_repr_stats_get, .rx_queue_setup = sfc_repr_rx_queue_setup, .rx_queue_release = sfc_repr_rx_queue_release, .tx_queue_setup = sfc_repr_tx_queue_setup,