From patchwork Tue Mar 28 03:48:53 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ajit Khaparde X-Patchwork-Id: 22477 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 2BD40D1B2; Tue, 28 Mar 2017 05:50:04 +0200 (CEST) Received: from rnd-relay.smtp.broadcom.com (lpdvrndsmtp01.broadcom.com [192.19.229.170]) by dpdk.org (Postfix) with ESMTP id ABD962C1A for ; Tue, 28 Mar 2017 05:49:18 +0200 (CEST) Received: from mail-irv-17.broadcom.com (mail-irv-17.lvn.broadcom.net [10.75.224.233]) by rnd-relay.smtp.broadcom.com (Postfix) with ESMTP id B214630CCFF; Mon, 27 Mar 2017 20:49:17 -0700 (PDT) Received: from C02PT1RBG8WP.vpn.broadcom.net (unknown [10.10.115.230]) by mail-irv-17.broadcom.com (Postfix) with ESMTP id 9FE4B81EA1; Mon, 27 Mar 2017 20:49:17 -0700 (PDT) From: Ajit Khaparde To: dev@dpdk.org Cc: Stephen Hurd Date: Mon, 27 Mar 2017 22:48:53 -0500 Message-Id: <20170328034903.41482-19-ajit.khaparde@broadcom.com> X-Mailer: git-send-email 2.10.1 (Apple Git-78) In-Reply-To: <20170328034903.41482-1-ajit.khaparde@broadcom.com> References: <20170328034903.41482-1-ajit.khaparde@broadcom.com> Subject: [dpdk-dev] [PATCH 18/28] bnxt: support lack of huge pages X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" rte_malloc_virt2phy() does not return a physical address if huge pages aren't in use. Further, rte_memzone->phys_addr is not a physical address. Use rte_mem_virt2phy() and manually lock pages to support lack of huge pages. Also check the return value of rte_mem_virt2phy() Verify the function returns an address. Otherwise return an error and log a message. Signed-off-by: Stephen Hurd Signed-off-by: Ajit Khaparde --- drivers/net/bnxt/bnxt_hwrm.c | 64 +++++++++++++++++++++++++++++++------------- drivers/net/bnxt/bnxt_ring.c | 22 ++++++++++++--- drivers/net/bnxt/bnxt_vnic.c | 16 ++++++++++- 3 files changed, 78 insertions(+), 24 deletions(-) diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c index a840683..18ac2ad 100644 --- a/drivers/net/bnxt/bnxt_hwrm.c +++ b/drivers/net/bnxt/bnxt_hwrm.c @@ -477,8 +477,15 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) rc = -ENOMEM; goto error; } + rte_mem_lock_page(bp->hwrm_cmd_resp_addr); bp->hwrm_cmd_resp_dma_addr = - rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr); + rte_mem_virt2phy(bp->hwrm_cmd_resp_addr); + if (bp->hwrm_cmd_resp_dma_addr == 0) { + RTE_LOG(ERR, PMD, + "Unable to map response buffer to physical memory.\n"); + rc = -ENOMEM; + goto error; + } bp->max_resp_len = max_resp_len; } @@ -1065,6 +1072,11 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN); req.req_buf_page_addr[0] = rte_cpu_to_le_64(rte_malloc_virt2phy(bp->pf.vf_req_buf)); + if (req.req_buf_page_addr[0] == 0) { + RTE_LOG(ERR, PMD, + "unable to map buffer address to physical memory\n"); + return -ENOMEM; + } rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); @@ -1334,6 +1346,7 @@ int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp) void bnxt_free_hwrm_resources(struct bnxt *bp) { /* Release memzone */ + /* TODO: unlock page?!?! */ rte_free(bp->hwrm_cmd_resp_addr); bp->hwrm_cmd_resp_addr = NULL; bp->hwrm_cmd_resp_dma_addr = 0; @@ -1349,10 +1362,17 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp) bp->max_req_len = HWRM_MAX_REQ_LEN; bp->max_resp_len = HWRM_MAX_RESP_LEN; bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0); + rte_mem_lock_page(bp->hwrm_cmd_resp_addr); if (bp->hwrm_cmd_resp_addr == NULL) return -ENOMEM; + bp->hwrm_cmd_resp_dma_addr = - rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr); + rte_mem_virt2phy(bp->hwrm_cmd_resp_addr); + if (bp->hwrm_cmd_resp_dma_addr == 0) { + RTE_LOG(ERR, PMD, + "unable to map response address to physical memory\n"); + return -ENOMEM; + } rte_spinlock_init(&bp->hwrm_lock); return 0; @@ -1935,7 +1955,9 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs) struct hwrm_func_cfg_input req = {0}; struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; int i; + unsigned int ui; int rc = 0; + size_t req_buf_sz; if (!BNXT_PF(bp)) { RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n"); @@ -1967,14 +1989,18 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs) /* * Now, create and register a buffer to hold forwarded VF requests */ - bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", - num_vfs * HWRM_MAX_REQ_LEN, + req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN; + bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz, page_roundup(num_vfs * HWRM_MAX_REQ_LEN)); if (bp->pf.vf_req_buf == NULL) { rc = -ENOMEM; goto error_free; } + + for (ui = 0; ui < req_buf_sz; ui += rte_eal_get_physmem_size()) + rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + ui); + for (i = 0; i < num_vfs; i++) bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) + (i * HWRM_MAX_REQ_LEN); @@ -2130,21 +2156,6 @@ int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf, uint16_t vlan) return rc; } -int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf) -{ - struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_cfg_input req = {0}; - int rc; - - HWRM_PREP(req, FUNC_CFG, -1, resp); - req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); - req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags); - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; - - return rc; -} - int bnxt_hwrm_func_vf_vnic_cfg_do(struct bnxt *bp, uint16_t vf, void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata) @@ -2201,3 +2212,18 @@ int bnxt_hwrm_func_vf_vnic_cfg_do(struct bnxt *bp, uint16_t vf, return rc; } + +int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf) +{ + struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_cfg_input req = {0}; + int rc; + + HWRM_PREP(req, FUNC_CFG, -1, resp); + req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); + req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_CHECK_RESULT; + + return rc; +} diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c index 389bef2..cace73b 100644 --- a/drivers/net/bnxt/bnxt_ring.c +++ b/drivers/net/bnxt/bnxt_ring.c @@ -96,6 +96,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, struct rte_pci_device *pdev = bp->pdev; const struct rte_memzone *mz = NULL; char mz_name[RTE_MEMZONE_NAMESIZE]; + phys_addr_t mz_phys_addr; int stats_len = (tx_ring_info || rx_ring_info) ? RTE_CACHE_LINE_ROUNDUP(sizeof(struct ctx_hw_stats64)) : 0; @@ -144,13 +145,26 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, return -ENOMEM; } memset(mz->addr, 0, mz->len); + mz_phys_addr = mz->phys_addr; + if ((phys_addr_t)mz->addr == mz_phys_addr) { + RTE_LOG(WARNING, PMD, + "Memzone physical address same as virtual.\n"); + RTE_LOG(WARNING, PMD, + "Using rte_mem_virt2phy()\n"); + mz_phys_addr = rte_mem_virt2phy(mz->addr); + if (mz_phys_addr == 0) { + RTE_LOG(ERR, PMD, + "unable to map ring address to physical memory\n"); + return -ENOMEM; + } + } if (tx_ring_info) { tx_ring = tx_ring_info->tx_ring_struct; tx_ring->bd = ((char *)mz->addr + tx_ring_start); tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd; - tx_ring->bd_dma = mz->phys_addr + tx_ring_start; + tx_ring->bd_dma = mz_phys_addr + tx_ring_start; tx_ring_info->tx_desc_mapping = tx_ring->bd_dma; tx_ring->mem_zone = (const void *)mz; @@ -170,7 +184,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, rx_ring->bd = ((char *)mz->addr + rx_ring_start); rx_ring_info->rx_desc_ring = (struct rx_prod_pkt_bd *)rx_ring->bd; - rx_ring->bd_dma = mz->phys_addr + rx_ring_start; + rx_ring->bd_dma = mz_phys_addr + rx_ring_start; rx_ring_info->rx_desc_mapping = rx_ring->bd_dma; rx_ring->mem_zone = (const void *)mz; @@ -185,7 +199,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, } cp_ring->bd = ((char *)mz->addr + cp_ring_start); - cp_ring->bd_dma = mz->phys_addr + cp_ring_start; + cp_ring->bd_dma = mz_phys_addr + cp_ring_start; cp_ring_info->cp_desc_ring = cp_ring->bd; cp_ring_info->cp_desc_mapping = cp_ring->bd_dma; cp_ring->mem_zone = (const void *)mz; @@ -196,7 +210,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, *cp_ring->vmem = ((char *)mz->addr + stats_len); if (stats_len) { cp_ring_info->hw_stats = mz->addr; - cp_ring_info->hw_stats_map = mz->phys_addr; + cp_ring_info->hw_stats_map = mz_phys_addr; } cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE; return 0; diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c index 139e4bf..dfbaedb 100644 --- a/drivers/net/bnxt/bnxt_vnic.c +++ b/drivers/net/bnxt/bnxt_vnic.c @@ -172,6 +172,7 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp) HW_HASH_KEY_SIZE); uint16_t max_vnics; int i; + phys_addr_t mz_phys_addr; max_vnics = bp->max_vnics; snprintf(mz_name, RTE_MEMZONE_NAMESIZE, @@ -188,6 +189,19 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp) if (!mz) return -ENOMEM; } + mz_phys_addr = mz->phys_addr; + if ((phys_addr_t)mz->addr == mz_phys_addr) { + RTE_LOG(WARNING, PMD, + "Memzone physical address same as virtual.\n"); + RTE_LOG(WARNING, PMD, + "Using rte_mem_virt2phy()\n"); + mz_phys_addr = rte_mem_virt2phy(mz->addr); + if (mz_phys_addr == 0) { + RTE_LOG(ERR, PMD, + "unable to map vnic address to physical memory\n"); + return -ENOMEM; + } + } for (i = 0; i < max_vnics; i++) { vnic = &bp->vnic_info[i]; @@ -197,7 +211,7 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp) (void *)((char *)mz->addr + (entry_length * i)); memset(vnic->rss_table, -1, entry_length); - vnic->rss_table_dma_addr = mz->phys_addr + (entry_length * i); + vnic->rss_table_dma_addr = mz_phys_addr + (entry_length * i); vnic->rss_hash_key = (void *)((char *)vnic->rss_table + HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table));