From patchwork Fri May 15 18:45:39 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ajit Khaparde X-Patchwork-Id: 70343 X-Patchwork-Delegate: ajit.khaparde@broadcom.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id E22CFA00C3; Fri, 15 May 2020 20:46:50 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 08DF21DAC0; Fri, 15 May 2020 20:46:01 +0200 (CEST) Received: from rnd-relay.smtp.broadcom.com (rnd-relay.smtp.broadcom.com [192.19.229.170]) by dpdk.org (Postfix) with ESMTP id 10F6E1DAC1; Fri, 15 May 2020 20:45:52 +0200 (CEST) Received: from mail-irv-17.broadcom.com (mail-irv-17.lvn.broadcom.net [10.75.242.48]) by rnd-relay.smtp.broadcom.com (Postfix) with ESMTP id 8205091BF3; Fri, 15 May 2020 11:44:44 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.10.3 rnd-relay.smtp.broadcom.com 8205091BF3 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=broadcom.com; s=dkimrelay; t=1589568284; bh=fiCKI/fA3x0hNhkDHyyJNTNu2DsENCQ/zWJpi5s8WOk=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=Jw8em+njFswR+NN1l19UNxvZvFI0iapYCY6/ieSu0UyIfh8UxaEMsKWzgtr3N44ao Jmgatr9v/3haArDE0dBi2e5xjpbmB+DgkVHLKucssscMP9cVltVeqW7T604GhK4zj5 4lNTSfRNsJIBu7qN7gFP/2w+Ny1z/65EneplsGTs= Received: from localhost.localdomain (unknown [10.230.185.215]) by mail-irv-17.broadcom.com (Postfix) with ESMTP id C7AEB14008B; Fri, 15 May 2020 11:45:50 -0700 (PDT) From: Ajit Khaparde To: dev@dpdk.org Cc: ferruh.yigit@intel.com, stable@dpdk.org, Kalesh AP , Somnath Kotur Date: Fri, 15 May 2020 11:45:39 -0700 Message-Id: <20200515184542.89318-7-ajit.khaparde@broadcom.com> X-Mailer: git-send-email 2.21.1 (Apple Git-122.3) In-Reply-To: <20200515184542.89318-1-ajit.khaparde@broadcom.com> References: <20200515184542.89318-1-ajit.khaparde@broadcom.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v1 6/9] net/bnxt: fix to alloc PF info structure X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Fixes: 804e746c7b73 ("net/bnxt: add hardware resource manager init code") Cc: stable@dpdk.org Signed-off-by: Ajit Khaparde Signed-off-by: Kalesh AP Reviewed-by: Somnath Kotur --- drivers/net/bnxt/bnxt.h | 9 +- drivers/net/bnxt/bnxt_cpr.c | 20 ++--- drivers/net/bnxt/bnxt_ethdev.c | 31 +++++-- drivers/net/bnxt/bnxt_filter.c | 12 +-- drivers/net/bnxt/bnxt_hwrm.c | 145 ++++++++++++++++---------------- drivers/net/bnxt/rte_pmd_bnxt.c | 98 ++++++++++----------- 6 files changed, 169 insertions(+), 146 deletions(-) diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h index b66f6ba25..7410db5e4 100644 --- a/drivers/net/bnxt/bnxt.h +++ b/drivers/net/bnxt/bnxt.h @@ -219,11 +219,12 @@ struct bnxt_child_vf_info { struct bnxt_pf_info { #define BNXT_FIRST_PF_FID 1 -#define BNXT_MAX_VFS(bp) (bp->pf.max_vfs) -#define BNXT_TOTAL_VFS(bp) ((bp)->pf.total_vfs) +#define BNXT_MAX_VFS(bp) ((bp)->pf->max_vfs) +#define BNXT_TOTAL_VFS(bp) ((bp)->pf->total_vfs) #define BNXT_FIRST_VF_FID 128 #define BNXT_PF_RINGS_USED(bp) bnxt_get_num_queues(bp) -#define BNXT_PF_RINGS_AVAIL(bp) (bp->pf.max_cp_rings - BNXT_PF_RINGS_USED(bp)) +#define BNXT_PF_RINGS_AVAIL(bp) ((bp)->pf->max_cp_rings - \ + BNXT_PF_RINGS_USED(bp)) uint16_t port_id; uint16_t first_vf_id; uint16_t active_vfs; @@ -689,7 +690,7 @@ struct bnxt { #define BNXT_OUTER_TPID_BD_MASK 0xffff0000 #define BNXT_OUTER_TPID_BD_SHFT 16 uint32_t outer_tpid_bd; - struct bnxt_pf_info pf; + struct bnxt_pf_info *pf; uint8_t vxlan_port_cnt; uint8_t geneve_port_cnt; uint16_t vxlan_port; diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c index 21565b1e2..40e5350f6 100644 --- a/drivers/net/bnxt/bnxt_cpr.c +++ b/drivers/net/bnxt/bnxt_cpr.c @@ -160,14 +160,14 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl) uint16_t req_len; int rc; - if (bp->pf.active_vfs <= 0) { + if (bp->pf->active_vfs <= 0) { PMD_DRV_LOG(ERR, "Forwarded VF with no active VFs\n"); return; } /* Qualify the fwd request */ fw_vf_id = rte_le_to_cpu_16(fwd_cmpl->source_id); - vf_id = fw_vf_id - bp->pf.first_vf_id; + vf_id = fw_vf_id - bp->pf->first_vf_id; req_len = (rte_le_to_cpu_16(fwd_cmpl->req_len_type) & HWRM_FWD_REQ_CMPL_REQ_LEN_MASK) >> @@ -176,15 +176,15 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl) req_len = sizeof(fwreq->encap_request); /* Locate VF's forwarded command */ - fwd_cmd = (struct input *)bp->pf.vf_info[vf_id].req_buf; + fwd_cmd = (struct input *)bp->pf->vf_info[vf_id].req_buf; - if (fw_vf_id < bp->pf.first_vf_id || - fw_vf_id >= (bp->pf.first_vf_id) + bp->pf.active_vfs) { + if (fw_vf_id < bp->pf->first_vf_id || + fw_vf_id >= bp->pf->first_vf_id + bp->pf->active_vfs) { PMD_DRV_LOG(ERR, "FWD req's source_id 0x%x out of range 0x%x - 0x%x (%d %d)\n", - fw_vf_id, bp->pf.first_vf_id, - (bp->pf.first_vf_id) + bp->pf.active_vfs - 1, - bp->pf.first_vf_id, bp->pf.active_vfs); + fw_vf_id, bp->pf->first_vf_id, + (bp->pf->first_vf_id) + bp->pf->active_vfs - 1, + bp->pf->first_vf_id, bp->pf->active_vfs); goto reject; } @@ -219,7 +219,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl) if (rc) { PMD_DRV_LOG(ERR, "Failed to send FWD req VF 0x%x, type 0x%x.\n", - fw_vf_id - bp->pf.first_vf_id, + fw_vf_id - bp->pf->first_vf_id, rte_le_to_cpu_16(fwd_cmd->req_type)); } return; @@ -230,7 +230,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl) if (rc) { PMD_DRV_LOG(ERR, "Failed to send REJECT req VF 0x%x, type 0x%x.\n", - fw_vf_id - bp->pf.first_vf_id, + fw_vf_id - bp->pf->first_vf_id, rte_le_to_cpu_16(fwd_cmd->req_type)); } diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index 7367c14d4..b64745003 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -191,6 +191,11 @@ static uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp) return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR; } +static void bnxt_free_pf_info(struct bnxt *bp) +{ + rte_free(bp->pf); +} + static void bnxt_free_link_info(struct bnxt *bp) { rte_free(bp->link_info); @@ -238,6 +243,15 @@ static void bnxt_free_mem(struct bnxt *bp, bool reconfig) bp->grp_info = NULL; } +static int bnxt_alloc_pf_info(struct bnxt *bp) +{ + bp->pf = rte_zmalloc("bnxt_pf_info", sizeof(struct bnxt_pf_info), 0); + if (bp->pf == NULL) + return -ENOMEM; + + return 0; +} + static int bnxt_alloc_link_info(struct bnxt *bp) { bp->link_info = @@ -1319,6 +1333,7 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) bnxt_free_leds_info(bp); bnxt_free_cos_queues(bp); bnxt_free_link_info(bp); + bnxt_free_pf_info(bp); eth_dev->dev_ops = NULL; eth_dev->rx_pkt_burst = NULL; @@ -1329,8 +1344,8 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); bp->rx_mem_zone = NULL; - rte_free(bp->pf.vf_info); - bp->pf.vf_info = NULL; + rte_free(bp->pf->vf_info); + bp->pf->vf_info = NULL; rte_free(bp->grp_info); bp->grp_info = NULL; @@ -3751,7 +3766,7 @@ static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts) static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; - struct bnxt_pf_info *pf = &bp->pf; + struct bnxt_pf_info *pf = bp->pf; uint16_t port_id; uint32_t fifo; @@ -5013,7 +5028,7 @@ static void bnxt_config_vf_req_fwd(struct bnxt *bp) #define ALLOW_FUNC(x) \ { \ uint32_t arg = (x); \ - bp->pf.vf_req_fwd[((arg) >> 5)] &= \ + bp->pf->vf_req_fwd[((arg) >> 5)] &= \ ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \ } @@ -5021,11 +5036,11 @@ static void bnxt_config_vf_req_fwd(struct bnxt *bp) if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) && (bp->fw_ver < ((20 << 24) | (7 << 16)))) || ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) { - memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd)); + memset(bp->pf->vf_req_fwd, 0xff, sizeof(bp->pf->vf_req_fwd)); } else { PMD_DRV_LOG(WARNING, "Firmware too old for VF mailbox functionality\n"); - memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd)); + memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd)); } /* @@ -5457,6 +5472,10 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) return rc; } + rc = bnxt_alloc_pf_info(bp); + if (rc) + goto error_free; + rc = bnxt_alloc_link_info(bp); if (rc) goto error_free; diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c index a1463a0e2..d822ff607 100644 --- a/drivers/net/bnxt/bnxt_filter.c +++ b/drivers/net/bnxt/bnxt_filter.c @@ -55,7 +55,7 @@ struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf) } filter->fw_l2_filter_id = UINT64_MAX; - STAILQ_INSERT_TAIL(&bp->pf.vf_info[vf].filter, filter, next); + STAILQ_INSERT_TAIL(&bp->pf->vf_info[vf].filter, filter, next); return filter; } @@ -95,8 +95,8 @@ void bnxt_free_all_filters(struct bnxt *bp) STAILQ_INIT(&vnic->filter); } - for (i = 0; i < bp->pf.max_vfs; i++) { - STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) { + for (i = 0; i < bp->pf->max_vfs; i++) { + STAILQ_FOREACH(filter, &bp->pf->vf_info[i].filter, next) { bnxt_hwrm_clear_l2_filter(bp, filter); } } @@ -144,10 +144,10 @@ void bnxt_free_filter_mem(struct bnxt *bp) rte_free(bp->filter_info); bp->filter_info = NULL; - for (i = 0; i < bp->pf.max_vfs; i++) { - STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) { + for (i = 0; i < bp->pf->max_vfs; i++) { + STAILQ_FOREACH(filter, &bp->pf->vf_info[i].filter, next) { rte_free(filter); - STAILQ_REMOVE(&bp->pf.vf_info[i].filter, filter, + STAILQ_REMOVE(&bp->pf->vf_info[i].filter, filter, bnxt_filter_info, next); } } diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c index 2e93a847f..a41d77669 100644 --- a/drivers/net/bnxt/bnxt_hwrm.c +++ b/drivers/net/bnxt/bnxt_hwrm.c @@ -626,7 +626,7 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp) HWRM_PREP(&req, HWRM_PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB); - req.port_id = rte_cpu_to_le_16(bp->pf.port_id); + req.port_id = rte_cpu_to_le_16(bp->pf->port_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -690,41 +690,42 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps); flags = rte_le_to_cpu_32(resp->flags); if (BNXT_PF(bp)) { - bp->pf.port_id = resp->port_id; - bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id); - bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs); + bp->pf->port_id = resp->port_id; + bp->pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id); + bp->pf->total_vfs = rte_le_to_cpu_16(resp->max_vfs); new_max_vfs = bp->pdev->max_vfs; - if (new_max_vfs != bp->pf.max_vfs) { - if (bp->pf.vf_info) - rte_free(bp->pf.vf_info); - bp->pf.vf_info = rte_malloc("bnxt_vf_info", - sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0); - bp->pf.max_vfs = new_max_vfs; + if (new_max_vfs != bp->pf->max_vfs) { + if (bp->pf->vf_info) + rte_free(bp->pf->vf_info); + bp->pf->vf_info = rte_malloc("bnxt_vf_info", + sizeof(bp->pf->vf_info[0]) * new_max_vfs, 0); + bp->pf->max_vfs = new_max_vfs; for (i = 0; i < new_max_vfs; i++) { - bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i; - bp->pf.vf_info[i].vlan_table = + bp->pf->vf_info[i].fid = + bp->pf->first_vf_id + i; + bp->pf->vf_info[i].vlan_table = rte_zmalloc("VF VLAN table", getpagesize(), getpagesize()); - if (bp->pf.vf_info[i].vlan_table == NULL) + if (bp->pf->vf_info[i].vlan_table == NULL) PMD_DRV_LOG(ERR, "Fail to alloc VLAN table for VF %d\n", i); else rte_mem_lock_page( - bp->pf.vf_info[i].vlan_table); - bp->pf.vf_info[i].vlan_as_table = + bp->pf->vf_info[i].vlan_table); + bp->pf->vf_info[i].vlan_as_table = rte_zmalloc("VF VLAN AS table", getpagesize(), getpagesize()); - if (bp->pf.vf_info[i].vlan_as_table == NULL) + if (bp->pf->vf_info[i].vlan_as_table == NULL) PMD_DRV_LOG(ERR, "Alloc VLAN AS table for VF %d fail\n", i); else rte_mem_lock_page( - bp->pf.vf_info[i].vlan_as_table); - STAILQ_INIT(&bp->pf.vf_info[i].filter); + bp->pf->vf_info[i].vlan_as_table); + STAILQ_INIT(&bp->pf->vf_info[i].filter); } } } @@ -742,7 +743,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->max_l2_ctx += bp->max_rx_em_flows; /* TODO: For now, do not support VMDq/RFS on VFs. */ if (BNXT_PF(bp)) { - if (bp->pf.max_vfs) + if (bp->pf->max_vfs) bp->max_vnics = 1; else bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics); @@ -753,7 +754,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->max_l2_ctx, bp->max_vnics); bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx); if (BNXT_PF(bp)) { - bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics); + bp->pf->total_vnics = rte_le_to_cpu_16(resp->max_vnics); if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) { bp->flags |= BNXT_FLAG_PTP_SUPPORTED; PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n"); @@ -881,9 +882,9 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) if (BNXT_PF(bp)) { req.enables |= rte_cpu_to_le_32( HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD); - memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd, + memcpy(req.vf_req_fwd, bp->pf->vf_req_fwd, RTE_MIN(sizeof(req.vf_req_fwd), - sizeof(bp->pf.vf_req_fwd))); + sizeof(bp->pf->vf_req_fwd))); /* * PF can sniff HWRM API issued by VF. This can be set up by @@ -2234,11 +2235,11 @@ int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr) struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; int rc; - req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags); + req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags); req.enables = rte_cpu_to_le_32( HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR); memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr)); - req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); + req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid); HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); @@ -2246,7 +2247,7 @@ int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr) HWRM_CHECK_RESULT(); HWRM_UNLOCK(); - bp->pf.vf_info[vf].random_mac = false; + bp->pf->vf_info[vf].random_mac = false; return rc; } @@ -3164,7 +3165,7 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings) req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings); } - req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags); + req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags); req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU); req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu)); req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx); @@ -3233,7 +3234,7 @@ static void add_random_mac_if_needed(struct bnxt *bp, cfg_req->enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR); rte_eth_random_addr(cfg_req->dflt_mac_addr); - bp->pf.vf_info[vf].random_mac = true; + bp->pf->vf_info[vf].random_mac = true; } else { memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, RTE_ETHER_ADDR_LEN); @@ -3250,7 +3251,7 @@ static int reserve_resources_from_vf(struct bnxt *bp, /* Get the actual allocated values now */ HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB); - req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); + req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); if (rc) { @@ -3288,7 +3289,7 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf) /* Check for zero MAC address */ HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB); - req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); + req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); rc = rte_le_to_cpu_16(resp->vlan); @@ -3312,7 +3313,7 @@ static int update_pf_resource_max(struct bnxt *bp) /* Only TX ring value reflects actual allocation? TODO */ bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings); - bp->pf.evb_mode = resp->evb_mode; + bp->pf->evb_mode = resp->evb_mode; HWRM_UNLOCK(); @@ -3332,10 +3333,10 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp) if (rc) return rc; - bp->pf.func_cfg_flags &= + bp->pf->func_cfg_flags &= ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE | HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE); - bp->pf.func_cfg_flags |= + bp->pf->func_cfg_flags |= HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE; rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings); rc = __bnxt_hwrm_func_qcaps(bp); @@ -3361,7 +3362,7 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs) if (rc) return rc; - bp->pf.active_vfs = num_vfs; + bp->pf->active_vfs = num_vfs; /* * First, configure the PF to only use one TX ring. This ensures that @@ -3373,10 +3374,10 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs) * * This has been fixed with firmware versions above 20.6.54 */ - bp->pf.func_cfg_flags &= + bp->pf->func_cfg_flags &= ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE | HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE); - bp->pf.func_cfg_flags |= + bp->pf->func_cfg_flags |= HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE; rc = bnxt_hwrm_pf_func_cfg(bp, 1); if (rc) @@ -3386,16 +3387,16 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs) * Now, create and register a buffer to hold forwarded VF requests */ req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN; - bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz, + bp->pf->vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz, page_roundup(num_vfs * HWRM_MAX_REQ_LEN)); - if (bp->pf.vf_req_buf == NULL) { + if (bp->pf->vf_req_buf == NULL) { rc = -ENOMEM; goto error_free; } for (sz = 0; sz < req_buf_sz; sz += getpagesize()) - rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz); + rte_mem_lock_page(((char *)bp->pf->vf_req_buf) + sz); for (i = 0; i < num_vfs; i++) - bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) + + bp->pf->vf_info[i].req_buf = ((char *)bp->pf->vf_req_buf) + (i * HWRM_MAX_REQ_LEN); rc = bnxt_hwrm_func_buf_rgtr(bp); @@ -3404,13 +3405,13 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs) populate_vf_func_cfg_req(bp, &req, num_vfs); - bp->pf.active_vfs = 0; + bp->pf->active_vfs = 0; for (i = 0; i < num_vfs; i++) { add_random_mac_if_needed(bp, &req, i); HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); - req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags); - req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid); + req.flags = rte_cpu_to_le_32(bp->pf->vf_info[i].func_cfg_flags); + req.fid = rte_cpu_to_le_16(bp->pf->vf_info[i].fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), @@ -3433,8 +3434,8 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs) HWRM_UNLOCK(); reserve_resources_from_vf(bp, &req, i); - bp->pf.active_vfs++; - bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid); + bp->pf->active_vfs++; + bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid); } /* @@ -3468,7 +3469,7 @@ int bnxt_hwrm_pf_evb_mode(struct bnxt *bp) req.fid = rte_cpu_to_le_16(0xffff); req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE); - req.evb_mode = bp->pf.evb_mode; + req.evb_mode = bp->pf->evb_mode; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -3536,7 +3537,7 @@ int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf, HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); - req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); + req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid); req.flags = rte_cpu_to_le_32(flags); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -3568,10 +3569,10 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) req.req_buf_num_pages = rte_cpu_to_le_16(1); req.req_buf_page_size = rte_cpu_to_le_16( - page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN)); + page_getenum(bp->pf->active_vfs * HWRM_MAX_REQ_LEN)); req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN); req.req_buf_page_addr0 = - rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf.vf_req_buf)); + rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf->vf_req_buf)); if (req.req_buf_page_addr0 == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map buffer address to physical memory\n"); @@ -3614,7 +3615,7 @@ int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp) HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(0xffff); - req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags); + req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags); req.enables = rte_cpu_to_le_32( HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR); req.async_event_cr = rte_cpu_to_le_16( @@ -3658,12 +3659,12 @@ int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf) HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); if (is_vf) { - dflt_vlan = bp->pf.vf_info[vf].dflt_vlan; - fid = bp->pf.vf_info[vf].fid; - func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags; + dflt_vlan = bp->pf->vf_info[vf].dflt_vlan; + fid = bp->pf->vf_info[vf].fid; + func_cfg_flags = bp->pf->vf_info[vf].func_cfg_flags; } else { fid = rte_cpu_to_le_16(0xffff); - func_cfg_flags = bp->pf.func_cfg_flags; + func_cfg_flags = bp->pf->func_cfg_flags; dflt_vlan = bp->vlan; } @@ -3689,9 +3690,9 @@ int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf, HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); - req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); + req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid); req.enables |= rte_cpu_to_le_32(enables); - req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags); + req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags); req.max_bw = rte_cpu_to_le_32(max_bw); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -3709,10 +3710,10 @@ int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf) HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); - req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags); - req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); + req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags); + req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid); req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN); - req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan); + req.dflt_vlan = rte_cpu_to_le_16(bp->pf->vf_info[vf].dflt_vlan); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -3766,7 +3767,7 @@ int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf, HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB); - req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); + req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -3843,7 +3844,7 @@ int bnxt_hwrm_port_qstats(struct bnxt *bp) { struct hwrm_port_qstats_input req = {0}; struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr; - struct bnxt_pf_info *pf = &bp->pf; + struct bnxt_pf_info *pf = bp->pf; int rc; HWRM_PREP(&req, HWRM_PORT_QSTATS, BNXT_USE_CHIMP_MB); @@ -3863,7 +3864,7 @@ int bnxt_hwrm_port_clr_stats(struct bnxt *bp) { struct hwrm_port_clr_stats_input req = {0}; struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr; - struct bnxt_pf_info *pf = &bp->pf; + struct bnxt_pf_info *pf = bp->pf; int rc; /* Not allowed on NS2 device, NPAR, MultiHost, VF */ @@ -3892,7 +3893,7 @@ int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) return 0; HWRM_PREP(&req, HWRM_PORT_LED_QCAPS, BNXT_USE_CHIMP_MB); - req.port_id = bp->pf.port_id; + req.port_id = bp->pf->port_id; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -3939,7 +3940,7 @@ int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on) led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT; duration = rte_cpu_to_le_16(500); } - req.port_id = bp->pf.port_id; + req.port_id = bp->pf->port_id; req.num_leds = bp->leds->num_leds; led_cfg = (struct bnxt_led_cfg *)&req.led0_id; for (i = 0; i < bp->leds->num_leds; i++, led_cfg++) { @@ -4151,8 +4152,8 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf, /* First query all VNIC ids */ HWRM_PREP(&req, HWRM_FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB); - req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf); - req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics); + req.vf_id = rte_cpu_to_le_16(bp->pf->first_vf_id + vf); + req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf->total_vnics); req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids)); if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) { @@ -4187,7 +4188,7 @@ int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf, size_t sz; /* First query all VNIC ids */ - vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids); + vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids); vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz, RTE_CACHE_LINE_SIZE); if (vnic_ids == NULL) @@ -4206,7 +4207,7 @@ int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf, for (i = 0; i < num_vnic_ids; i++) { memset(&vnic, 0, sizeof(struct bnxt_vnic_info)); vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]); - rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf); + rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf->first_vf_id + vf); if (rc) break; if (vnic.mru <= 4) /* Indicates unallocated */ @@ -4233,7 +4234,7 @@ int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf, HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); - req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); + req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid); req.enables |= rte_cpu_to_le_32( HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE); req.vlan_antispoof_mode = on ? @@ -4256,7 +4257,7 @@ int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf) size_t sz; int rc; - vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids); + vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids); vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz, RTE_CACHE_LINE_SIZE); if (vnic_ids == NULL) @@ -4279,7 +4280,7 @@ int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf) memset(&vnic, 0, sizeof(struct bnxt_vnic_info)); vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]); rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, - bp->pf.first_vf_id + vf); + bp->pf->first_vf_id + vf); if (rc) goto exit; if (vnic.func_default) { @@ -4885,7 +4886,7 @@ int bnxt_hwrm_ext_port_qstats(struct bnxt *bp) { struct hwrm_port_qstats_ext_input req = {0}; struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; - struct bnxt_pf_info *pf = &bp->pf; + struct bnxt_pf_info *pf = bp->pf; int rc; if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS || @@ -5204,7 +5205,7 @@ int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp) } req.flags = rte_cpu_to_le_32(flags); - req.port_id = rte_cpu_to_le_16(bp->pf.port_id); + req.port_id = rte_cpu_to_le_16(bp->pf->port_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); diff --git a/drivers/net/bnxt/rte_pmd_bnxt.c b/drivers/net/bnxt/rte_pmd_bnxt.c index 2df486592..eafc1d386 100644 --- a/drivers/net/bnxt/rte_pmd_bnxt.c +++ b/drivers/net/bnxt/rte_pmd_bnxt.c @@ -64,9 +64,9 @@ int rte_pmd_bnxt_set_tx_loopback(uint16_t port, uint8_t on) } if (on) - bp->pf.evb_mode = BNXT_EVB_MODE_VEB; + bp->pf->evb_mode = BNXT_EVB_MODE_VEB; else - bp->pf.evb_mode = BNXT_EVB_MODE_VEPA; + bp->pf->evb_mode = BNXT_EVB_MODE_VEPA; rc = bnxt_hwrm_pf_evb_mode(bp); @@ -118,7 +118,7 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on) } /* Stall all active VFs */ - for (i = 0; i < bp->pf.active_vfs; i++) { + for (i = 0; i < bp->pf->active_vfs; i++) { rc = bnxt_hwrm_func_vf_vnic_query_and_config(bp, i, rte_pmd_bnxt_set_all_queues_drop_en_cb, &on, bnxt_hwrm_vnic_cfg); @@ -197,10 +197,10 @@ int rte_pmd_bnxt_set_vf_rate_limit(uint16_t port, uint16_t vf, } bp = eth_dev->data->dev_private; - if (!bp->pf.active_vfs) + if (!bp->pf->active_vfs) return -EINVAL; - if (vf >= bp->pf.max_vfs) + if (vf >= bp->pf->max_vfs) return -EINVAL; /* Add up the per queue BW and configure MAX BW of the VF */ @@ -216,14 +216,14 @@ int rte_pmd_bnxt_set_vf_rate_limit(uint16_t port, uint16_t vf, } /* Requested BW already configured */ - if (tot_rate == bp->pf.vf_info[vf].max_tx_rate) + if (tot_rate == bp->pf->vf_info[vf].max_tx_rate) return 0; rc = bnxt_hwrm_func_bw_cfg(bp, vf, tot_rate, HWRM_FUNC_CFG_INPUT_ENABLES_MAX_BW); if (!rc) - bp->pf.vf_info[vf].max_tx_rate = tot_rate; + bp->pf->vf_info[vf].max_tx_rate = tot_rate; return rc; } @@ -265,10 +265,10 @@ int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on) return -EINVAL; /* Prev setting same as new setting. */ - if (on == bp->pf.vf_info[vf].mac_spoof_en) + if (on == bp->pf->vf_info[vf].mac_spoof_en) return 0; - func_flags = bp->pf.vf_info[vf].func_cfg_flags; + func_flags = bp->pf->vf_info[vf].func_cfg_flags; func_flags &= ~(HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE | HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE); @@ -281,8 +281,8 @@ int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on) rc = bnxt_hwrm_func_cfg_vf_set_flags(bp, vf, func_flags); if (!rc) { - bp->pf.vf_info[vf].mac_spoof_en = on; - bp->pf.vf_info[vf].func_cfg_flags = func_flags; + bp->pf->vf_info[vf].mac_spoof_en = on; + bp->pf->vf_info[vf].func_cfg_flags = func_flags; } return rc; @@ -325,12 +325,12 @@ int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on) rc = bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(bp, vf, on); if (!rc) { - bp->pf.vf_info[vf].vlan_spoof_en = on; + bp->pf->vf_info[vf].vlan_spoof_en = on; if (on) { if (bnxt_hwrm_cfa_vlan_antispoof_cfg(bp, - bp->pf.first_vf_id + vf, - bp->pf.vf_info[vf].vlan_count, - bp->pf.vf_info[vf].vlan_as_table)) + bp->pf->first_vf_id + vf, + bp->pf->vf_info[vf].vlan_count, + bp->pf->vf_info[vf].vlan_as_table)) rc = -1; } } else { @@ -415,7 +415,7 @@ int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf, } bp = dev->data->dev_private; - if (!bp->pf.vf_info) + if (!bp->pf->vf_info) return -EINVAL; if (vf >= bp->pdev->max_vfs) @@ -436,13 +436,13 @@ int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf, flag |= BNXT_VNIC_INFO_ALLMULTI | BNXT_VNIC_INFO_MCAST; if (on) - bp->pf.vf_info[vf].l2_rx_mask |= flag; + bp->pf->vf_info[vf].l2_rx_mask |= flag; else - bp->pf.vf_info[vf].l2_rx_mask &= ~flag; + bp->pf->vf_info[vf].l2_rx_mask &= ~flag; rc = bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, vf_vnic_set_rxmask_cb, - &bp->pf.vf_info[vf].l2_rx_mask, + &bp->pf->vf_info[vf].l2_rx_mask, bnxt_set_rx_mask_no_vlan); if (rc) PMD_DRV_LOG(ERR, "bnxt_hwrm_func_vf_vnic_set_rxmask failed\n"); @@ -475,10 +475,10 @@ static int bnxt_set_vf_table(struct bnxt *bp, uint16_t vf) memset(&vnic, 0, sizeof(vnic)); vnic.fw_vnic_id = dflt_vnic; if (bnxt_hwrm_vnic_qcfg(bp, &vnic, - bp->pf.first_vf_id + vf) == 0) { + bp->pf->first_vf_id + vf) == 0) { if (bnxt_hwrm_cfa_l2_set_rx_mask(bp, &vnic, - bp->pf.vf_info[vf].vlan_count, - bp->pf.vf_info[vf].vlan_table)) + bp->pf->vf_info[vf].vlan_count, + bp->pf->vf_info[vf].vlan_table)) rc = -1; } else { rc = -1; @@ -506,19 +506,19 @@ int rte_pmd_bnxt_set_vf_vlan_filter(uint16_t port, uint16_t vlan, return -ENOTSUP; bp = dev->data->dev_private; - if (!bp->pf.vf_info) + if (!bp->pf->vf_info) return -EINVAL; for (i = 0; vf_mask; i++, vf_mask >>= 1) { - cnt = bp->pf.vf_info[i].vlan_count; + cnt = bp->pf->vf_info[i].vlan_count; if ((vf_mask & 1) == 0) continue; - if (bp->pf.vf_info[i].vlan_table == NULL) { + if (bp->pf->vf_info[i].vlan_table == NULL) { rc = -1; continue; } - if (bp->pf.vf_info[i].vlan_as_table == NULL) { + if (bp->pf->vf_info[i].vlan_as_table == NULL) { rc = -1; continue; } @@ -526,7 +526,8 @@ int rte_pmd_bnxt_set_vf_vlan_filter(uint16_t port, uint16_t vlan, /* First, search for a duplicate... */ for (j = 0; j < cnt; j++) { if (rte_be_to_cpu_16( - bp->pf.vf_info[i].vlan_table[j].vid) == vlan) + bp->pf->vf_info[i].vlan_table[j].vid) == + vlan) break; } if (j == cnt) { @@ -543,17 +544,17 @@ int rte_pmd_bnxt_set_vf_vlan_filter(uint16_t port, uint16_t vlan, } /* cnt is one less than vlan_count */ - cnt = bp->pf.vf_info[i].vlan_count++; + cnt = bp->pf->vf_info[i].vlan_count++; /* * And finally, add to the * end of the table */ - vase = &bp->pf.vf_info[i].vlan_as_table[cnt]; + vase = &bp->pf->vf_info[i].vlan_as_table[cnt]; // TODO: Hardcoded TPID vase->tpid = rte_cpu_to_be_16(0x8100); vase->vid = rte_cpu_to_be_16(vlan); vase->mask = rte_cpu_to_be_16(0xfff); - ve = &bp->pf.vf_info[i].vlan_table[cnt]; + ve = &bp->pf->vf_info[i].vlan_table[cnt]; /* TODO: Hardcoded TPID */ ve->tpid = rte_cpu_to_be_16(0x8100); ve->vid = rte_cpu_to_be_16(vlan); @@ -561,18 +562,19 @@ int rte_pmd_bnxt_set_vf_vlan_filter(uint16_t port, uint16_t vlan, } else { for (j = 0; j < cnt; j++) { if (rte_be_to_cpu_16( - bp->pf.vf_info[i].vlan_table[j].vid) != vlan) + bp->pf->vf_info[i].vlan_table[j].vid) != + vlan) continue; - memmove(&bp->pf.vf_info[i].vlan_table[j], - &bp->pf.vf_info[i].vlan_table[j + 1], + memmove(&bp->pf->vf_info[i].vlan_table[j], + &bp->pf->vf_info[i].vlan_table[j + 1], getpagesize() - ((j + 1) * sizeof(struct bnxt_vlan_table_entry))); - memmove(&bp->pf.vf_info[i].vlan_as_table[j], - &bp->pf.vf_info[i].vlan_as_table[j + 1], + memmove(&bp->pf->vf_info[i].vlan_as_table[j], + &bp->pf->vf_info[i].vlan_as_table[j + 1], getpagesize() - ((j + 1) * sizeof(struct bnxt_vlan_antispoof_table_entry))); j--; - cnt = --bp->pf.vf_info[i].vlan_count; + cnt = --bp->pf->vf_info[i].vlan_count; } } bnxt_set_vf_table(bp, i); @@ -614,7 +616,7 @@ int rte_pmd_bnxt_get_vf_stats(uint16_t port, return -ENOTSUP; } - return bnxt_hwrm_func_qstats(bp, bp->pf.first_vf_id + vf_id, stats, + return bnxt_hwrm_func_qstats(bp, bp->pf->first_vf_id + vf_id, stats, NULL); } @@ -650,7 +652,7 @@ int rte_pmd_bnxt_reset_vf_stats(uint16_t port, return -ENOTSUP; } - return bnxt_hwrm_func_clr_stats(bp, bp->pf.first_vf_id + vf_id); + return bnxt_hwrm_func_clr_stats(bp, bp->pf->first_vf_id + vf_id); } int rte_pmd_bnxt_get_vf_rx_status(uint16_t port, uint16_t vf_id) @@ -719,7 +721,7 @@ int rte_pmd_bnxt_get_vf_tx_drop_count(uint16_t port, uint16_t vf_id, return -ENOTSUP; } - return bnxt_hwrm_func_qstats_tx_drop(bp, bp->pf.first_vf_id + vf_id, + return bnxt_hwrm_func_qstats_tx_drop(bp, bp->pf->first_vf_id + vf_id, count); } @@ -759,7 +761,7 @@ int rte_pmd_bnxt_mac_addr_add(uint16_t port, struct rte_ether_addr *addr, } /* If the VF currently uses a random MAC, update default to this one */ - if (bp->pf.vf_info[vf_id].random_mac) { + if (bp->pf->vf_info[vf_id].random_mac) { if (rte_pmd_bnxt_get_vf_rx_status(port, vf_id) <= 0) bnxt_hwrm_func_vf_mac(bp, vf_id, (uint8_t *)addr); } @@ -771,11 +773,11 @@ int rte_pmd_bnxt_mac_addr_add(uint16_t port, struct rte_ether_addr *addr, memset(&vnic, 0, sizeof(struct bnxt_vnic_info)); vnic.fw_vnic_id = rte_le_to_cpu_16(rc); - rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf_id); + rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf->first_vf_id + vf_id); if (rc < 0) goto exit; - STAILQ_FOREACH(filter, &bp->pf.vf_info[vf_id].filter, next) { + STAILQ_FOREACH(filter, &bp->pf->vf_info[vf_id].filter, next) { if (filter->flags == HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX && filter->enables == @@ -841,9 +843,9 @@ rte_pmd_bnxt_set_vf_vlan_insert(uint16_t port, uint16_t vf, return -ENOTSUP; } - bp->pf.vf_info[vf].dflt_vlan = vlan_id; + bp->pf->vf_info[vf].dflt_vlan = vlan_id; if (bnxt_hwrm_func_qcfg_current_vf_vlan(bp, vf) == - bp->pf.vf_info[vf].dflt_vlan) + bp->pf->vf_info[vf].dflt_vlan) return 0; rc = bnxt_hwrm_set_vf_vlan(bp, vf); @@ -886,10 +888,10 @@ int rte_pmd_bnxt_set_vf_persist_stats(uint16_t port, uint16_t vf, uint8_t on) return -EINVAL; /* Prev setting same as new setting. */ - if (on == bp->pf.vf_info[vf].persist_stats) + if (on == bp->pf->vf_info[vf].persist_stats) return 0; - func_flags = bp->pf.vf_info[vf].func_cfg_flags; + func_flags = bp->pf->vf_info[vf].func_cfg_flags; if (on) func_flags |= @@ -900,8 +902,8 @@ int rte_pmd_bnxt_set_vf_persist_stats(uint16_t port, uint16_t vf, uint8_t on) rc = bnxt_hwrm_func_cfg_vf_set_flags(bp, vf, func_flags); if (!rc) { - bp->pf.vf_info[vf].persist_stats = on; - bp->pf.vf_info[vf].func_cfg_flags = func_flags; + bp->pf->vf_info[vf].persist_stats = on; + bp->pf->vf_info[vf].func_cfg_flags = func_flags; } return rc;