From patchwork Tue Nov 24 10:26:43 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Slava Ovsiienko X-Patchwork-Id: 84501 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 9FEC3A04B1; Tue, 24 Nov 2020 11:26:50 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 824D1C936; Tue, 24 Nov 2020 11:26:49 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 11AB6C904 for ; Tue, 24 Nov 2020 11:26:47 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from viacheslavo@nvidia.com) with SMTP; 24 Nov 2020 12:26:45 +0200 Received: from nvidia.com (pegasus12.mtr.labs.mlnx [10.210.17.40]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 0AOAQjgW011746; Tue, 24 Nov 2020 12:26:45 +0200 From: Viacheslav Ovsiienko To: dev@dpdk.org Cc: rasland@nvidia.com, matan@nvidia.com, orika@nvidia.com, thomas@monjalon.net, stable@dpdk.org Date: Tue, 24 Nov 2020 10:26:43 +0000 Message-Id: <1606213603-32536-1-git-send-email-viacheslavo@nvidia.com> X-Mailer: git-send-email 1.8.3.1 Subject: [dpdk-dev] [PATCH] net/mlx5: fix Verbs memory allocation callback X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The rdma-core library uses callbacks to allocate and free memory from DPDK. The memory allocation callback used the complicated and incorrect way to get the NUMA socket ID from the context. The context was wrong that might result in wrong socket ID and allocating memory from wrong node. The callbacks are assigned once as Infinibande device context is created allowing early access to shared DPDK memory for all Verbs internal objects need that. Fixes: 36dabcea78f0 ("net/mlx5: use anonymous Direct Verbs allocator argument") Fixes: 2eb4d0107acc ("net/mlx5: refactor PCI probing on Linux") Fixes: 17e19bc4dde7 ("net/mlx5: add IB shared context alloc/free functions") Cc: stable@dpdk.org Signed-off-by: Viacheslav Ovsiienko Acked-by: Matan Azrad --- drivers/net/mlx5/linux/mlx5_os.c | 27 +++++++++++++-------------- drivers/net/mlx5/linux/mlx5_verbs.c | 8 -------- drivers/net/mlx5/mlx5.h | 19 ------------------- 3 files changed, 13 insertions(+), 41 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index 4c863db..9062191 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -168,9 +168,8 @@ static void * mlx5_alloc_verbs_buf(size_t size, void *data) { - struct mlx5_priv *priv = data; + struct mlx5_dev_ctx_shared *sh = data; void *ret; - unsigned int socket = SOCKET_ID_ANY; size_t alignment = rte_mem_page_size(); if (alignment == (size_t)-1) { DRV_LOG(ERR, "Failed to get mem page size"); @@ -178,18 +177,8 @@ return NULL; } - if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) { - const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj; - - socket = ctrl->socket; - } else if (priv->verbs_alloc_ctx.type == - MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) { - const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj; - - socket = ctrl->socket; - } MLX5_ASSERT(data != NULL); - ret = mlx5_malloc(0, size, alignment, socket); + ret = mlx5_malloc(0, size, alignment, sh->numa_node); if (!ret && size) rte_errno = ENOMEM; return ret; @@ -1459,7 +1448,7 @@ (void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){ .alloc = &mlx5_alloc_verbs_buf, .free = &mlx5_free_verbs_buf, - .data = priv, + .data = sh, })); /* Bring Ethernet device up. */ DRV_LOG(DEBUG, "port %u forcing Ethernet interface up", @@ -2324,6 +2313,16 @@ DRV_LOG(DEBUG, "DevX is NOT supported"); err = 0; } + if (!err && sh->ctx) { + /* Hint libmlx5 to use PMD allocator for data plane resources */ + mlx5_glue->dv_set_context_attr(sh->ctx, + MLX5DV_CTX_ATTR_BUF_ALLOCATORS, + (void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){ + .alloc = &mlx5_alloc_verbs_buf, + .free = &mlx5_free_verbs_buf, + .data = sh, + })); + } return err; } diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c index 540ce32..9161fa3 100644 --- a/drivers/net/mlx5/linux/mlx5_verbs.c +++ b/drivers/net/mlx5/linux/mlx5_verbs.c @@ -366,8 +366,6 @@ MLX5_ASSERT(rxq_data); MLX5_ASSERT(tmpl); - priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE; - priv->verbs_alloc_ctx.obj = rxq_ctrl; tmpl->rxq_ctrl = rxq_ctrl; if (rxq_ctrl->irq) { tmpl->ibv_channel = @@ -438,7 +436,6 @@ rxq_data->cq_arm_sn = 0; mlx5_rxq_initialize(rxq_data); rxq_data->cq_ci = 0; - priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; rxq_ctrl->wqn = ((struct ibv_wq *)(tmpl->wq))->wq_num; return 0; @@ -451,7 +448,6 @@ if (tmpl->ibv_channel) claim_zero(mlx5_glue->destroy_comp_channel(tmpl->ibv_channel)); rte_errno = ret; /* Restore rte_errno. */ - priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; return -rte_errno; } @@ -932,8 +928,6 @@ MLX5_ASSERT(txq_data); MLX5_ASSERT(txq_obj); txq_obj->txq_ctrl = txq_ctrl; - priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE; - priv->verbs_alloc_ctx.obj = txq_ctrl; if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) { DRV_LOG(ERR, "Port %u MLX5_ENABLE_CQE_COMPRESSION " "must never be set.", dev->data->port_id); @@ -1039,7 +1033,6 @@ } txq_uar_init(txq_ctrl); dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; - priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; return 0; error: ret = rte_errno; /* Save rte_errno before cleanup. */ @@ -1047,7 +1040,6 @@ claim_zero(mlx5_glue->destroy_cq(txq_obj->cq)); if (txq_obj->qp) claim_zero(mlx5_glue->destroy_qp(txq_obj->qp)); - priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; rte_errno = ret; /* Restore rte_errno. */ return -rte_errno; } diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 041240e..121d726 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -258,30 +258,12 @@ struct mlx5_dev_config { }; -/** - * Type of object being allocated. - */ -enum mlx5_verbs_alloc_type { - MLX5_VERBS_ALLOC_TYPE_NONE, - MLX5_VERBS_ALLOC_TYPE_TX_QUEUE, - MLX5_VERBS_ALLOC_TYPE_RX_QUEUE, -}; - /* Structure for VF VLAN workaround. */ struct mlx5_vf_vlan { uint32_t tag:12; uint32_t created:1; }; -/** - * Verbs allocator needs a context to know in the callback which kind of - * resources it is allocating. - */ -struct mlx5_verbs_alloc_ctx { - enum mlx5_verbs_alloc_type type; /* Kind of object being allocated. */ - const void *obj; /* Pointer to the DPDK object. */ -}; - /* Flow drop context necessary due to Verbs API. */ struct mlx5_drop { struct mlx5_hrxq *hrxq; /* Hash Rx queue queue. */ @@ -989,7 +971,6 @@ struct mlx5_priv { struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */ struct mlx5_stats_ctrl stats_ctrl; /* Stats control. */ struct mlx5_dev_config config; /* Device configuration. */ - struct mlx5_verbs_alloc_ctx verbs_alloc_ctx; /* Context for Verbs allocator. */ int nl_socket_rdma; /* Netlink socket (NETLINK_RDMA). */ int nl_socket_route; /* Netlink socket (NETLINK_ROUTE). */