From patchwork Fri Dec 14 13:18:41 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Michal Krawczyk X-Patchwork-Id: 48873 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 6CF5D1BC0C; Fri, 14 Dec 2018 14:19:36 +0100 (CET) Received: from mail-lj1-f194.google.com (mail-lj1-f194.google.com [209.85.208.194]) by dpdk.org (Postfix) with ESMTP id C76501BBD8 for ; Fri, 14 Dec 2018 14:19:16 +0100 (CET) Received: by mail-lj1-f194.google.com with SMTP id l15-v6so4857208lja.9 for ; Fri, 14 Dec 2018 05:19:16 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=semihalf-com.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=5xFEFGHAsNLgJhKcjue8GxC66YHYFu816mwu83F5sQE=; b=U1Uh1LnYvoMCU4e2796TeQvM7IT4u8nzQYwkRUz/EQBXotjrTWCPHvmAZSwz6tvY/4 dQmsFR8lxEHn0brLtJ0KCeirxRuXeZmVcmyiP36abYHmc81WMMfpy8Vm2Li/vN+dY5oa AgSarQ+CxBnA4VliQ5e3ZqseH68KVbVUCobgUWwMiyn1KGX/8cGfRxqzhB3MmBWvnSmk 08EWqG9ub1IT+x+B0sQsYRqdVSyn4D3ruCY7tsrYyJSPBOCSlBiDPul+7xX9UsWJQ1m0 tksULHO5pO8RzEmcHq142NQTx+V5CeEbwHu4QblJMCr9qffKDwXA9/578p1SjFYiLy+n ff9g== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=5xFEFGHAsNLgJhKcjue8GxC66YHYFu816mwu83F5sQE=; b=bzcirkNl03OlRnJzuu/LqZI17oQnXs6s4RYHH7s3uhZic1hrMtTxIzkPpTP2gJ6lab ZmGRarBbKRdG1rIpnIziytOW6DzzLbQMYJaoFNkn1ofoyEp6MvYxnwtInx6iw2N9F+2r 1iRTBycixKYVy2fhb69W89joVs0LIVea5sGK0vG0UEL9Dk/M8x/uY5w/ZtU/iGU5qMjG KzORhq5jqwGznpcYWiETXPUKFKFLI6BSqNuyIWCBX73Eh8g7M9OcUx8b9FHyQhjDJfab Q30UzbJPQfQNe+y1nPaHO119QGSqQdDQs2UnRvGwPp96xOJMjEEaIh1/fZkeZ3w2nGe1 R39w== X-Gm-Message-State: AA+aEWZIrON2PBEk3J0t8iZL08imrFtxKkXmXpAF2iyjOZgPhvKUTInh 8PlW4Xx8E8EXr+20sjCqNzDBkmDx188= X-Google-Smtp-Source: AFSGD/VzqVYm92EkuRY4zVMnGhaV6ZAAce+9iPuTNJd6XSgOrKhW/cZe+SfUzGvxwBeQFPvMlJgAaQ== X-Received: by 2002:a2e:6594:: with SMTP id e20-v6mr1998598ljf.123.1544793556064; Fri, 14 Dec 2018 05:19:16 -0800 (PST) Received: from mkPC.semihalf.local (31-172-191-173.noc.fibertech.net.pl. [31.172.191.173]) by smtp.gmail.com with ESMTPSA id o25sm873884lfd.29.2018.12.14.05.19.14 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Fri, 14 Dec 2018 05:19:15 -0800 (PST) From: Michal Krawczyk To: dev@dpdk.org Cc: gtzalik@dpdk.org, mw@dpdk.org, matua@amazon.com, rk@semihalf.com, Michal Krawczyk , Solganik Alexander Date: Fri, 14 Dec 2018 14:18:41 +0100 Message-Id: <20181214131846.22439-16-mk@semihalf.com> X-Mailer: git-send-email 2.14.1 In-Reply-To: <20181214131846.22439-1-mk@semihalf.com> References: <20181214131846.22439-1-mk@semihalf.com> Subject: [dpdk-dev] [PATCH 15/20] net/ena: add per-queue software counters stats X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Those counters provide information regards sent/received bytes and packets per queue. Signed-off-by: Solganik Alexander Signed-off-by: Michal Krawczyk --- drivers/net/ena/ena_ethdev.c | 36 +++++++++++++++++++++++++- drivers/net/ena/ena_ethdev.h | 60 ++++++++++++++++++++++++-------------------- 2 files changed, 68 insertions(+), 28 deletions(-) diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index ce0ca40c4..799dcefcd 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -160,6 +160,7 @@ static const struct ena_stats ena_stats_rx_strings[] = { ENA_STAT_RX_ENTRY(dma_mapping_err), ENA_STAT_RX_ENTRY(bad_desc_num), ENA_STAT_RX_ENTRY(small_copy_len_pkt), + ENA_STAT_TX_ENTRY(bad_req_id), }; static const struct ena_stats ena_stats_ena_com_strings[] = { @@ -390,6 +391,7 @@ static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id) rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; rx_ring->adapter->trigger_reset = true; + ++rx_ring->rx_stats.bad_req_id; return -EFAULT; } @@ -969,6 +971,8 @@ static int ena_stats_get(struct rte_eth_dev *dev, (struct ena_adapter *)(dev->data->dev_private); struct ena_com_dev *ena_dev = &adapter->ena_dev; int rc; + int i; + int max_rings_stats; if (rte_eal_process_type() != RTE_PROC_PRIMARY) return -ENOTSUP; @@ -996,6 +1000,27 @@ static int ena_stats_get(struct rte_eth_dev *dev, stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); + + max_rings_stats = RTE_MIN(dev->data->nb_rx_queues, + RTE_ETHDEV_QUEUE_STAT_CNTRS); + for (i = 0; i < max_rings_stats; ++i) { + struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats; + + stats->q_ibytes[i] = rx_stats->bytes; + stats->q_ipackets[i] = rx_stats->cnt; + stats->q_errors[i] = rx_stats->bad_desc_num + + rx_stats->bad_req_id; + } + + max_rings_stats = RTE_MIN(dev->data->nb_tx_queues, + RTE_ETHDEV_QUEUE_STAT_CNTRS); + for (i = 0; i < max_rings_stats; ++i) { + struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats; + + stats->q_obytes[i] = tx_stats->bytes; + stats->q_opackets[i] = tx_stats->cnt; + } + return 0; } @@ -1408,6 +1433,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) rc = rte_mempool_get_bulk(rxq->mb_pool, (void **)mbufs, count); if (unlikely(rc < 0)) { rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); + ++rxq->rx_stats.page_alloc_fail; PMD_RX_LOG(DEBUG, "there are no enough free buffers"); return 0; } @@ -2108,8 +2134,10 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, /* pass to DPDK application head mbuf */ rx_pkts[recv_idx] = mbuf_head; recv_idx++; + rx_ring->rx_stats.bytes += mbuf_head->pkt_len; } + rx_ring->rx_stats.cnt += recv_idx; rx_ring->next_to_clean = next_to_clean; desc_in_use = desc_in_use - completed + 1; @@ -2256,6 +2284,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t push_len = 0; uint16_t delta = 0; int nb_hw_desc; + uint32_t total_length; /* Check adapter state */ if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { @@ -2270,6 +2299,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { mbuf = tx_pkts[sent_idx]; + total_length = 0; rc = ena_check_and_linearize_mbuf(tx_ring, mbuf); if (unlikely(rc)) @@ -2335,6 +2365,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, ebuf++; tx_info->num_of_bufs++; } + total_length += mbuf->data_len; while ((mbuf = mbuf->next) != NULL) { seg_len = mbuf->data_len; @@ -2347,6 +2378,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, ebuf->paddr = mbuf->buf_iova + mbuf->data_off + delta; ebuf->len = seg_len - delta; + total_length += ebuf->len; ebuf++; tx_info->num_of_bufs++; @@ -2373,6 +2405,8 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_info->tx_descs = nb_hw_desc; next_to_use++; + tx_ring->tx_stats.cnt += tx_info->num_of_bufs; + tx_ring->tx_stats.bytes += total_length; } /* If there are ready packets to be xmitted... */ @@ -2380,7 +2414,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* ...let HW do its best :-) */ rte_wmb(); ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); - + tx_ring->tx_stats.doorbells++; tx_ring->next_to_use = next_to_use; } diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h index 713cdea97..255e1c0ec 100644 --- a/drivers/net/ena/ena_ethdev.h +++ b/drivers/net/ena/ena_ethdev.h @@ -79,6 +79,34 @@ struct ena_calc_queue_size_ctx { u16 max_rx_sgl_size; }; +struct ena_stats_tx { + u64 cnt; + u64 bytes; + u64 queue_stop; + u64 prepare_ctx_err; + u64 queue_wakeup; + u64 dma_mapping_err; + u64 linearize; + u64 linearize_failed; + u64 tx_poll; + u64 doorbells; + u64 missing_tx_comp; + u64 bad_req_id; +}; + +struct ena_stats_rx { + u64 cnt; + u64 bytes; + u64 refil_partial; + u64 bad_csum; + u64 page_alloc_fail; + u64 skb_alloc_fail; + u64 dma_mapping_err; + u64 bad_desc_num; + u64 small_copy_len_pkt; + u64 bad_req_id; +}; + struct ena_ring { u16 next_to_use; u16 next_to_clean; @@ -116,6 +144,11 @@ struct ena_ring { struct ena_adapter *adapter; uint64_t offloads; u16 sgl_size; + + union { + struct ena_stats_rx rx_stats; + struct ena_stats_tx tx_stats; + }; } __rte_cache_aligned; enum ena_adapter_state { @@ -143,33 +176,6 @@ struct ena_stats_dev { u64 admin_q_pause; }; -struct ena_stats_tx { - u64 cnt; - u64 bytes; - u64 queue_stop; - u64 prepare_ctx_err; - u64 queue_wakeup; - u64 dma_mapping_err; - u64 linearize; - u64 linearize_failed; - u64 tx_poll; - u64 doorbells; - u64 missing_tx_comp; - u64 bad_req_id; -}; - -struct ena_stats_rx { - u64 cnt; - u64 bytes; - u64 refil_partial; - u64 bad_csum; - u64 page_alloc_fail; - u64 skb_alloc_fail; - u64 dma_mapping_err; - u64 bad_desc_num; - u64 small_copy_len_pkt; -}; - /* board specific private data structure */ struct ena_adapter { /* OS defined structs */