From patchwork Fri May 15 18:45:37 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ajit Khaparde X-Patchwork-Id: 70341 X-Patchwork-Delegate: ajit.khaparde@broadcom.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5008EA00C3; Fri, 15 May 2020 20:46:30 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id EC4F71DB2C; Fri, 15 May 2020 20:45:57 +0200 (CEST) Received: from rnd-relay.smtp.broadcom.com (rnd-relay.smtp.broadcom.com [192.19.229.170]) by dpdk.org (Postfix) with ESMTP id 65E111DABA for ; Fri, 15 May 2020 20:45:51 +0200 (CEST) Received: from mail-irv-17.broadcom.com (mail-irv-17.lvn.broadcom.net [10.75.242.48]) by rnd-relay.smtp.broadcom.com (Postfix) with ESMTP id D7B7D30D39E; Fri, 15 May 2020 11:44:43 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.10.3 rnd-relay.smtp.broadcom.com D7B7D30D39E DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=broadcom.com; s=dkimrelay; t=1589568283; bh=tz9+jimaWrWwuINfJ/M3uEAe93N+MsaTFjvqjV/uGyU=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=qNsClqrLDRC+nCNH1oEtcloAmxUEjpiNdehoLBD3K1R8tk5S5sCbMWhBlG2B2/Zns TzuG/CS7Cw4/f9ZOUpAr/5LifwMu3Q3BV7BuYZh2MWuErcTbq5rwcIA2aSlWuvKPJX 93WTJVivKqzH+5wvsv1qSRIbd7+Ch5EsmksjEWcA= Received: from localhost.localdomain (unknown [10.230.185.215]) by mail-irv-17.broadcom.com (Postfix) with ESMTP id 3F5B014008C; Fri, 15 May 2020 11:45:50 -0700 (PDT) From: Ajit Khaparde To: dev@dpdk.org Cc: ferruh.yigit@intel.com, Kalesh AP , Somnath Kotur Date: Fri, 15 May 2020 11:45:37 -0700 Message-Id: <20200515184542.89318-5-ajit.khaparde@broadcom.com> X-Mailer: git-send-email 2.21.1 (Apple Git-122.3) In-Reply-To: <20200515184542.89318-1-ajit.khaparde@broadcom.com> References: <20200515184542.89318-1-ajit.khaparde@broadcom.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v1 4/9] net/bnxt: fix to allocate flow stat related structs X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Consolidate flow stat related structs for performance improvement. The intention of this patch is to reduce the size struct bnxt which had grown because of recent changes and was impacting performance. Fixes: 02a95625fe9c ("net/bnxt: add flow stats in extended stats") Signed-off-by: Ajit Khaparde Signed-off-by: Kalesh AP Reviewed-by: Somnath Kotur --- drivers/net/bnxt/bnxt.h | 19 ++-- drivers/net/bnxt/bnxt_ethdev.c | 153 +++++++++++++++++++++------------ drivers/net/bnxt/bnxt_flow.c | 14 +-- drivers/net/bnxt/bnxt_hwrm.c | 5 +- drivers/net/bnxt/bnxt_stats.c | 14 +-- 5 files changed, 127 insertions(+), 78 deletions(-) diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h index 570767253..b71435495 100644 --- a/drivers/net/bnxt/bnxt.h +++ b/drivers/net/bnxt/bnxt.h @@ -515,6 +515,16 @@ struct bnxt_mark_info { #define BNXT_FW_STATUS_SHUTDOWN 0x100000 #define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input) + +struct bnxt_flow_stat_info { + uint16_t max_fc; + uint16_t flow_count; + struct bnxt_ctx_mem_buf_info rx_fc_in_tbl; + struct bnxt_ctx_mem_buf_info rx_fc_out_tbl; + struct bnxt_ctx_mem_buf_info tx_fc_in_tbl; + struct bnxt_ctx_mem_buf_info tx_fc_out_tbl; +}; + struct bnxt { void *bar0; @@ -549,6 +559,7 @@ struct bnxt { #define BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS BIT(22) #define BNXT_FLAG_FC_THREAD BIT(23) #define BNXT_FLAG_RX_VECTOR_PKT_MODE BIT(24) +#define BNXT_FLAG_FLOW_XSTATS_EN BIT(25) #define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF)) #define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF) #define BNXT_NPAR(bp) ((bp)->flags & BNXT_FLAG_NPAR_PF) @@ -561,6 +572,7 @@ struct bnxt { #define BNXT_STINGRAY(bp) ((bp)->flags & BNXT_FLAG_STINGRAY) #define BNXT_HAS_NQ(bp) BNXT_CHIP_THOR(bp) #define BNXT_HAS_RING_GRPS(bp) (!BNXT_CHIP_THOR(bp)) +#define BNXT_FLOW_XSTATS_EN(bp) ((bp)->flags & BNXT_FLAG_FLOW_XSTATS_EN) uint32_t fw_cap; #define BNXT_FW_CAP_HOT_RESET BIT(0) @@ -709,12 +721,7 @@ struct bnxt { struct tf tfp; struct bnxt_ulp_context ulp_ctx; uint8_t truflow; - uint16_t max_fc; - struct bnxt_ctx_mem_buf_info rx_fc_in_tbl; - struct bnxt_ctx_mem_buf_info rx_fc_out_tbl; - struct bnxt_ctx_mem_buf_info tx_fc_in_tbl; - struct bnxt_ctx_mem_buf_info tx_fc_out_tbl; - uint16_t flow_count; + struct bnxt_flow_stat_info *flow_stat; uint8_t flow_xstat; }; diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index fa1f84d44..90fb7f635 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -197,6 +197,12 @@ static void bnxt_free_leds_info(struct bnxt *bp) bp->leds = NULL; } +static void bnxt_free_flow_stats_info(struct bnxt *bp) +{ + rte_free(bp->flow_stat); + bp->flow_stat = NULL; +} + static void bnxt_free_cos_queues(struct bnxt *bp) { rte_free(bp->rx_cos_queue); @@ -205,6 +211,8 @@ static void bnxt_free_cos_queues(struct bnxt *bp) static void bnxt_free_mem(struct bnxt *bp, bool reconfig) { + bnxt_free_flow_stats_info(bp); + bnxt_free_filter_mem(bp); bnxt_free_vnic_attributes(bp); bnxt_free_vnic_mem(bp); @@ -257,6 +265,16 @@ static int bnxt_alloc_cos_queues(struct bnxt *bp) return 0; } +static int bnxt_alloc_flow_stats_info(struct bnxt *bp) +{ + bp->flow_stat = rte_zmalloc("bnxt_flow_xstat", + sizeof(struct bnxt_flow_stat_info), 0); + if (bp->flow_stat == NULL) + return -ENOMEM; + + return 0; +} + static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig) { int rc; @@ -289,6 +307,12 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig) if (rc) goto alloc_mem_err; + if (BNXT_FLOW_XSTATS_EN(bp)) { + rc = bnxt_alloc_flow_stats_info(bp); + if (rc) + goto alloc_mem_err; + } + return 0; alloc_mem_err: @@ -390,68 +414,72 @@ static int bnxt_register_fc_ctx_mem(struct bnxt *bp) { int rc = 0; - rc = bnxt_hwrm_ctx_rgtr(bp, bp->rx_fc_in_tbl.dma, - &bp->rx_fc_in_tbl.ctx_id); + rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_in_tbl.dma, + &bp->flow_stat->rx_fc_in_tbl.ctx_id); if (rc) return rc; PMD_DRV_LOG(DEBUG, "rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p" " rx_fc_in_tbl.ctx_id = %d\n", - bp->rx_fc_in_tbl.va, - (void *)((uintptr_t)bp->rx_fc_in_tbl.dma), - bp->rx_fc_in_tbl.ctx_id); + bp->flow_stat->rx_fc_in_tbl.va, + (void *)((uintptr_t)bp->flow_stat->rx_fc_in_tbl.dma), + bp->flow_stat->rx_fc_in_tbl.ctx_id); - rc = bnxt_hwrm_ctx_rgtr(bp, bp->rx_fc_out_tbl.dma, - &bp->rx_fc_out_tbl.ctx_id); + rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_out_tbl.dma, + &bp->flow_stat->rx_fc_out_tbl.ctx_id); if (rc) return rc; PMD_DRV_LOG(DEBUG, "rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p" " rx_fc_out_tbl.ctx_id = %d\n", - bp->rx_fc_out_tbl.va, - (void *)((uintptr_t)bp->rx_fc_out_tbl.dma), - bp->rx_fc_out_tbl.ctx_id); + bp->flow_stat->rx_fc_out_tbl.va, + (void *)((uintptr_t)bp->flow_stat->rx_fc_out_tbl.dma), + bp->flow_stat->rx_fc_out_tbl.ctx_id); - rc = bnxt_hwrm_ctx_rgtr(bp, bp->tx_fc_in_tbl.dma, - &bp->tx_fc_in_tbl.ctx_id); + rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_in_tbl.dma, + &bp->flow_stat->tx_fc_in_tbl.ctx_id); if (rc) return rc; PMD_DRV_LOG(DEBUG, "tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p" " tx_fc_in_tbl.ctx_id = %d\n", - bp->tx_fc_in_tbl.va, - (void *)((uintptr_t)bp->tx_fc_in_tbl.dma), - bp->tx_fc_in_tbl.ctx_id); + bp->flow_stat->tx_fc_in_tbl.va, + (void *)((uintptr_t)bp->flow_stat->tx_fc_in_tbl.dma), + bp->flow_stat->tx_fc_in_tbl.ctx_id); - rc = bnxt_hwrm_ctx_rgtr(bp, bp->tx_fc_out_tbl.dma, - &bp->tx_fc_out_tbl.ctx_id); + rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_out_tbl.dma, + &bp->flow_stat->tx_fc_out_tbl.ctx_id); if (rc) return rc; PMD_DRV_LOG(DEBUG, "tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p" " tx_fc_out_tbl.ctx_id = %d\n", - bp->tx_fc_out_tbl.va, - (void *)((uintptr_t)bp->tx_fc_out_tbl.dma), - bp->tx_fc_out_tbl.ctx_id); + bp->flow_stat->tx_fc_out_tbl.va, + (void *)((uintptr_t)bp->flow_stat->tx_fc_out_tbl.dma), + bp->flow_stat->tx_fc_out_tbl.ctx_id); - memset(bp->rx_fc_out_tbl.va, 0, bp->rx_fc_out_tbl.size); + memset(bp->flow_stat->rx_fc_out_tbl.va, + 0, + bp->flow_stat->rx_fc_out_tbl.size); rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, - bp->rx_fc_out_tbl.ctx_id, - bp->max_fc, + bp->flow_stat->rx_fc_out_tbl.ctx_id, + bp->flow_stat->max_fc, true); if (rc) return rc; - memset(bp->tx_fc_out_tbl.va, 0, bp->tx_fc_out_tbl.size); + memset(bp->flow_stat->tx_fc_out_tbl.va, + 0, + bp->flow_stat->tx_fc_out_tbl.size); rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, - bp->tx_fc_out_tbl.ctx_id, - bp->max_fc, + bp->flow_stat->tx_fc_out_tbl.ctx_id, + bp->flow_stat->max_fc, true); return rc; @@ -482,33 +510,41 @@ static int bnxt_init_fc_ctx_mem(struct bnxt *bp) uint16_t max_fc; int rc = 0; - max_fc = bp->max_fc; + max_fc = bp->flow_stat->max_fc; sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, pdev->addr.bus, pdev->addr.devid, pdev->addr.function); /* 4 bytes for each counter-id */ - rc = bnxt_alloc_ctx_mem_buf(type, max_fc * 4, &bp->rx_fc_in_tbl); + rc = bnxt_alloc_ctx_mem_buf(type, + max_fc * 4, + &bp->flow_stat->rx_fc_in_tbl); if (rc) return rc; sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, pdev->addr.bus, pdev->addr.devid, pdev->addr.function); /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ - rc = bnxt_alloc_ctx_mem_buf(type, max_fc * 16, &bp->rx_fc_out_tbl); + rc = bnxt_alloc_ctx_mem_buf(type, + max_fc * 16, + &bp->flow_stat->rx_fc_out_tbl); if (rc) return rc; sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, pdev->addr.bus, pdev->addr.devid, pdev->addr.function); /* 4 bytes for each counter-id */ - rc = bnxt_alloc_ctx_mem_buf(type, max_fc * 4, &bp->tx_fc_in_tbl); + rc = bnxt_alloc_ctx_mem_buf(type, + max_fc * 4, + &bp->flow_stat->tx_fc_in_tbl); if (rc) return rc; sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, pdev->addr.bus, pdev->addr.devid, pdev->addr.function); /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ - rc = bnxt_alloc_ctx_mem_buf(type, max_fc * 16, &bp->tx_fc_out_tbl); + rc = bnxt_alloc_ctx_mem_buf(type, + max_fc * 16, + &bp->flow_stat->tx_fc_out_tbl); if (rc) return rc; @@ -522,10 +558,11 @@ static int bnxt_init_ctx_mem(struct bnxt *bp) int rc = 0; if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS) || - !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) + !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) || + !BNXT_FLOW_XSTATS_EN(bp)) return 0; - rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->max_fc); + rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->flow_stat->max_fc); if (rc) return rc; @@ -1244,6 +1281,9 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; bp->rx_cosq_cnt = 0; + /* All filters are deleted on a port stop. */ + if (BNXT_FLOW_XSTATS_EN(bp)) + bp->flow_stat->flow_count = 0; } static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) @@ -5314,8 +5354,8 @@ bnxt_parse_devarg_flow_xstat(__rte_unused const char *key, return -EINVAL; } - bp->flow_xstat = flow_xstat; - if (bp->flow_xstat) + bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN; + if (BNXT_FLOW_XSTATS_EN(bp)) PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n"); return 0; @@ -5457,46 +5497,47 @@ static void bnxt_unregister_fc_ctx_mem(struct bnxt *bp) { bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, - bp->rx_fc_out_tbl.ctx_id, - bp->max_fc, + bp->flow_stat->rx_fc_out_tbl.ctx_id, + bp->flow_stat->max_fc, false); bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, - bp->tx_fc_out_tbl.ctx_id, - bp->max_fc, + bp->flow_stat->tx_fc_out_tbl.ctx_id, + bp->flow_stat->max_fc, false); - if (bp->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) - bnxt_hwrm_ctx_unrgtr(bp, bp->rx_fc_in_tbl.ctx_id); - bp->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; + if (bp->flow_stat->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) + bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_in_tbl.ctx_id); + bp->flow_stat->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; - if (bp->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) - bnxt_hwrm_ctx_unrgtr(bp, bp->rx_fc_out_tbl.ctx_id); - bp->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; + if (bp->flow_stat->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) + bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_out_tbl.ctx_id); + bp->flow_stat->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; - if (bp->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) - bnxt_hwrm_ctx_unrgtr(bp, bp->tx_fc_in_tbl.ctx_id); - bp->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; + if (bp->flow_stat->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) + bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_in_tbl.ctx_id); + bp->flow_stat->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; - if (bp->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) - bnxt_hwrm_ctx_unrgtr(bp, bp->tx_fc_out_tbl.ctx_id); - bp->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; + if (bp->flow_stat->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) + bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_out_tbl.ctx_id); + bp->flow_stat->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; } static void bnxt_uninit_fc_ctx_mem(struct bnxt *bp) { bnxt_unregister_fc_ctx_mem(bp); - bnxt_free_ctx_mem_buf(&bp->rx_fc_in_tbl); - bnxt_free_ctx_mem_buf(&bp->rx_fc_out_tbl); - bnxt_free_ctx_mem_buf(&bp->tx_fc_in_tbl); - bnxt_free_ctx_mem_buf(&bp->tx_fc_out_tbl); + bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_in_tbl); + bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_out_tbl); + bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_in_tbl); + bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_out_tbl); } static void bnxt_uninit_ctx_mem(struct bnxt *bp) { - bnxt_uninit_fc_ctx_mem(bp); + if (BNXT_FLOW_XSTATS_EN(bp)) + bnxt_uninit_fc_ctx_mem(bp); } static void diff --git a/drivers/net/bnxt/bnxt_flow.c b/drivers/net/bnxt/bnxt_flow.c index 44734272f..84a21dba9 100644 --- a/drivers/net/bnxt/bnxt_flow.c +++ b/drivers/net/bnxt/bnxt_flow.c @@ -1633,7 +1633,7 @@ static void bnxt_setup_flow_counter(struct bnxt *bp) { if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS && - !(bp->flags & BNXT_FLAG_FC_THREAD)) { + !(bp->flags & BNXT_FLAG_FC_THREAD) && BNXT_FLOW_XSTATS_EN(bp)) { rte_eal_alarm_set(US_PER_S * BNXT_FC_TIMER, bnxt_flow_cnt_alarm_cb, (void *)bp); @@ -1646,13 +1646,13 @@ void bnxt_flow_cnt_alarm_cb(void *arg) int rc = 0; struct bnxt *bp = arg; - if (!bp->rx_fc_out_tbl.va) { - PMD_DRV_LOG(ERR, "bp->rx_fc_out_tbl.va is NULL?\n"); + if (!bp->flow_stat->rx_fc_out_tbl.va) { + PMD_DRV_LOG(ERR, "bp->flow_stat->rx_fc_out_tbl.va is NULL?\n"); bnxt_cancel_fc_thread(bp); return; } - if (!bp->flow_count) { + if (!bp->flow_stat->flow_count) { bnxt_cancel_fc_thread(bp); return; } @@ -1830,7 +1830,8 @@ bnxt_flow_create(struct rte_eth_dev *dev, bp->mark_table[flow_id].valid = true; bp->mark_table[flow_id].mark_id = filter->mark; } - bp->flow_count++; + if (BNXT_FLOW_XSTATS_EN(bp)) + bp->flow_stat->flow_count++; bnxt_release_flow_lock(bp); bnxt_setup_flow_counter(bp); return flow; @@ -1952,7 +1953,8 @@ _bnxt_flow_destroy(struct bnxt *bp, bnxt_free_filter(bp, filter); STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next); rte_free(flow); - bp->flow_count--; + if (BNXT_FLOW_XSTATS_EN(bp)) + bp->flow_stat->flow_count--; /* If this was the last flow associated with this vnic, * switch the queue back to RSS pool. diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c index 148000934..4022fafd1 100644 --- a/drivers/net/bnxt/bnxt_hwrm.c +++ b/drivers/net/bnxt/bnxt_hwrm.c @@ -5274,7 +5274,6 @@ int bnxt_hwrm_cfa_counter_qcaps(struct bnxt *bp, uint16_t *max_fc) *max_fc = rte_le_to_cpu_16(resp->max_rx_fc); HWRM_UNLOCK(); - PMD_DRV_LOG(DEBUG, "max_fc = %d\n", *max_fc); return 0; } @@ -5387,10 +5386,10 @@ int bnxt_hwrm_cfa_counter_qstats(struct bnxt *bp, } if (dir == BNXT_DIR_RX) { - flow_ctx_id = bp->rx_fc_in_tbl.ctx_id; + flow_ctx_id = bp->flow_stat->rx_fc_in_tbl.ctx_id; flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX; } else if (dir == BNXT_DIR_TX) { - flow_ctx_id = bp->tx_fc_in_tbl.ctx_id; + flow_ctx_id = bp->flow_stat->tx_fc_in_tbl.ctx_id; flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_TX; } diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c index 1d3be16f8..cfe193284 100644 --- a/drivers/net/bnxt/bnxt_stats.c +++ b/drivers/net/bnxt/bnxt_stats.c @@ -669,7 +669,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev, if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS && bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT && - bp->flow_xstat) { + BNXT_FLOW_XSTATS_EN(bp)) { int j; i = 0; @@ -713,7 +713,7 @@ int bnxt_flow_stats_cnt(struct bnxt *bp) { if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS && bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT && - bp->flow_xstat) { + BNXT_FLOW_XSTATS_EN(bp)) { struct bnxt_xstats_name_off flow_bytes[bp->max_l2_ctx]; struct bnxt_xstats_name_off flow_pkts[bp->max_l2_ctx]; @@ -783,7 +783,7 @@ int bnxt_dev_xstats_get_names_op(struct rte_eth_dev *eth_dev, if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS && bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT && - bp->flow_xstat) { + BNXT_FLOW_XSTATS_EN(bp)) { for (i = 0; i < bp->max_l2_ctx; i++) { char buf[RTE_ETH_XSTATS_NAME_SIZE]; @@ -936,8 +936,8 @@ static int bnxt_update_fc_tbl(struct bnxt *bp, uint16_t ctr, uint32_t out_rx_tbl_cnt = 0; int i, rc = 0; - in_rx_tbl = (uint32_t *)bp->rx_fc_in_tbl.va; - out_rx_tbl = (uint64_t *)bp->rx_fc_out_tbl.va; + in_rx_tbl = (uint32_t *)bp->flow_stat->rx_fc_in_tbl.va; + out_rx_tbl = (uint64_t *)bp->flow_stat->rx_fc_out_tbl.va; for (i = 0; i < in_flow_cnt; i++) { if (!en_tbl[i]) @@ -979,7 +979,7 @@ int bnxt_flow_stats_req(struct bnxt *bp) struct rte_flow *flow; uint16_t in_flow_tbl_cnt = 0; struct bnxt_vnic_info *vnic = NULL; - struct bnxt_filter_info *valid_en_tbl[bp->max_fc]; + struct bnxt_filter_info *valid_en_tbl[bp->flow_stat->max_fc]; uint16_t counter_type = CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC; bnxt_acquire_flow_lock(bp); @@ -996,7 +996,7 @@ int bnxt_flow_stats_req(struct bnxt *bp) continue; valid_en_tbl[in_flow_tbl_cnt++] = flow->filter; - if (in_flow_tbl_cnt >= bp->max_fc) { + if (in_flow_tbl_cnt >= bp->flow_stat->max_fc) { rc = bnxt_update_fc_tbl(bp, counter_type, valid_en_tbl, in_flow_tbl_cnt);