From patchwork Thu Oct 15 06:38:10 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Matan Azrad X-Patchwork-Id: 80834 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id D81F6A04DB; Thu, 15 Oct 2020 08:38:33 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id EDFE71DC89; Thu, 15 Oct 2020 08:38:31 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 330081DC86 for ; Thu, 15 Oct 2020 08:38:30 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from matan@nvidia.com) with SMTP; 15 Oct 2020 09:38:23 +0300 Received: from nvidia.com (pegasus25.mtr.labs.mlnx [10.210.16.10]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09F6cNVs014993; Thu, 15 Oct 2020 09:38:23 +0300 From: Matan Azrad To: dev@dpdk.org Cc: Viacheslav Ovsiienko Date: Thu, 15 Oct 2020 06:38:10 +0000 Message-Id: <1602743893-345348-1-git-send-email-matan@nvidia.com> X-Mailer: git-send-email 1.8.3.1 Subject: [dpdk-dev] [PATCH 1/4] net/mlx5: fix Rx queue release X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The HW objects of the Rx queue is created/destroyed in the device start\stop stage while the ethdev configurations for the Rx queue starts from the rx_queue_setup stage. The PMD should save all the last configurations it got from the ethdev and to apply them to the device in the dev_start operation. Wrongly, last code added to mitigate the reference counters didn't take into account the above rule and combined the configurations and HW objects to be created\destroyed together. This causes to memory leak and other memory issues. Make sure the HW object is released in stop operation when there is no any reference to it while the configurations stay saved. Fixes: 24e4b650badc ("net/mlx5: mitigate Rx queue reference counters") Signed-off-by: Matan Azrad Acked-by: Viacheslav Ovsiienko --- drivers/net/mlx5/mlx5_rxq.c | 23 +++++++++++++---------- drivers/net/mlx5/mlx5_rxtx.h | 2 +- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index f1d8373..e1783ba 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -447,7 +447,8 @@ return -rte_errno; } rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); - return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1); + return (__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED) == 1); + } /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */ @@ -1541,7 +1542,7 @@ struct mlx5_rxq_ctrl * tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq; #endif tmpl->rxq.idx = idx; - rte_atomic32_inc(&tmpl->refcnt); + __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED); LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); return tmpl; error: @@ -1588,7 +1589,7 @@ struct mlx5_rxq_ctrl * tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 }; tmpl->hairpin_conf = *hairpin_conf; tmpl->rxq.idx = idx; - rte_atomic32_inc(&tmpl->refcnt); + __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED); LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); return tmpl; } @@ -1613,7 +1614,7 @@ struct mlx5_rxq_ctrl * if (rxq_data) { rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); - rte_atomic32_inc(&rxq_ctrl->refcnt); + __atomic_add_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED); } return rxq_ctrl; } @@ -1638,7 +1639,7 @@ struct mlx5_rxq_ctrl * if (!(*priv->rxqs)[idx]) return 0; rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); - if (!rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) + if (__atomic_sub_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1) return 1; if (rxq_ctrl->obj) { priv->obj_ops.rxq_obj_release(rxq_ctrl->obj); @@ -1646,13 +1647,15 @@ struct mlx5_rxq_ctrl * mlx5_free(rxq_ctrl->obj); rxq_ctrl->obj = NULL; } - if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) { - mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh); + if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) rxq_free_elts(rxq_ctrl); + if (!__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED)) { + if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) + mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh); + LIST_REMOVE(rxq_ctrl, next); + mlx5_free(rxq_ctrl); + (*priv->rxqs)[idx] = NULL; } - LIST_REMOVE(rxq_ctrl, next); - mlx5_free(rxq_ctrl); - (*priv->rxqs)[idx] = NULL; return 0; } diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 674296e..c3734e3 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -165,7 +165,7 @@ enum mlx5_rxq_type { struct mlx5_rxq_ctrl { struct mlx5_rxq_data rxq; /* Data path structure. */ LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */ - rte_atomic32_t refcnt; /* Reference counter. */ + uint32_t refcnt; /* Reference counter. */ struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */ struct mlx5_priv *priv; /* Back pointer to private data. */ enum mlx5_rxq_type type; /* Rxq type. */ From patchwork Thu Oct 15 06:38:11 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Matan Azrad X-Patchwork-Id: 80835 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4E8B0A04DB; Thu, 15 Oct 2020 08:38:49 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 75F1D1DC9F; Thu, 15 Oct 2020 08:38:33 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 3C7ED1DC89 for ; Thu, 15 Oct 2020 08:38:30 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from matan@nvidia.com) with SMTP; 15 Oct 2020 09:38:23 +0300 Received: from nvidia.com (pegasus25.mtr.labs.mlnx [10.210.16.10]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09F6cNVt014993; Thu, 15 Oct 2020 09:38:23 +0300 From: Matan Azrad To: dev@dpdk.org Cc: Viacheslav Ovsiienko Date: Thu, 15 Oct 2020 06:38:11 +0000 Message-Id: <1602743893-345348-2-git-send-email-matan@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1602743893-345348-1-git-send-email-matan@nvidia.com> References: <1602743893-345348-1-git-send-email-matan@nvidia.com> Subject: [dpdk-dev] [PATCH 2/4] net/mlx5: fix Tx queue release X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The HW objects of the Tx queue is created/destroyed in the device start\stop stage while the ethdev configurations for the Tx queue starts from the tx_queue_setup stage. The PMD should save all the last configurations it got from the ethdev and to apply them to the device in the dev_start operation. Wrongly, last code added to mitigate the reference counters didn't take into account the above rule and combined the configurations and HW objects to be created\destroyed together. This causes to memory leak and other memory issues. Make sure the HW object is released in stop operation when there is no any reference to it while the configurations stay saved. Fixes: 17a57183c0eb ("net/mlx5: mitigate Tx queue reference counters") Signed-off-by: Matan Azrad Acked-by: Viacheslav Ovsiienko --- drivers/net/mlx5/mlx5_rxtx.h | 2 +- drivers/net/mlx5/mlx5_txq.c | 21 ++++++++++++--------- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index c3734e3..b243b6f 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -269,7 +269,7 @@ enum mlx5_txq_type { /* TX queue control descriptor. */ struct mlx5_txq_ctrl { LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */ - rte_atomic32_t refcnt; /* Reference counter. */ + uint32_t refcnt; /* Reference counter. */ unsigned int socket; /* CPU socket ID for allocations. */ enum mlx5_txq_type type; /* The txq ctrl type. */ unsigned int max_inline_data; /* Max inline data. */ diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index af84f5f..9c2dd2a 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -1121,7 +1121,7 @@ struct mlx5_txq_ctrl * rte_errno = ENOMEM; goto error; } - rte_atomic32_inc(&tmpl->refcnt); + __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED); tmpl->type = MLX5_TXQ_TYPE_STANDARD; LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next); return tmpl; @@ -1165,7 +1165,7 @@ struct mlx5_txq_ctrl * tmpl->txq.idx = idx; tmpl->hairpin_conf = *hairpin_conf; tmpl->type = MLX5_TXQ_TYPE_HAIRPIN; - rte_atomic32_inc(&tmpl->refcnt); + __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED); LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next); return tmpl; } @@ -1190,7 +1190,7 @@ struct mlx5_txq_ctrl * if (txq_data) { ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq); - rte_atomic32_inc(&ctrl->refcnt); + __atomic_add_fetch(&ctrl->refcnt, 1, __ATOMIC_RELAXED); } return ctrl; } @@ -1215,7 +1215,7 @@ struct mlx5_txq_ctrl * if (!(*priv->txqs)[idx]) return 0; txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); - if (!rte_atomic32_dec_and_test(&txq_ctrl->refcnt)) + if (__atomic_sub_fetch(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1) return 1; if (txq_ctrl->obj) { priv->obj_ops.txq_obj_release(txq_ctrl->obj); @@ -1229,12 +1229,15 @@ struct mlx5_txq_ctrl * txq_ctrl->txq.fcqs = NULL; } txq_free_elts(txq_ctrl); - mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh); } - LIST_REMOVE(txq_ctrl, next); - mlx5_free(txq_ctrl); - (*priv->txqs)[idx] = NULL; dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED; + if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) { + if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) + mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh); + LIST_REMOVE(txq_ctrl, next); + mlx5_free(txq_ctrl); + (*priv->txqs)[idx] = NULL; + } return 0; } @@ -1258,7 +1261,7 @@ struct mlx5_txq_ctrl * if (!(*priv->txqs)[idx]) return -1; txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); - return (rte_atomic32_read(&txq->refcnt) == 1); + return (__atomic_load_n(&txq->refcnt, __ATOMIC_RELAXED) == 1); } /** From patchwork Thu Oct 15 06:38:12 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Matan Azrad X-Patchwork-Id: 80837 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id D65D8A04DB; Thu, 15 Oct 2020 08:39:25 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 324551DCB2; Thu, 15 Oct 2020 08:38:37 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 2E70A1DC84 for ; Thu, 15 Oct 2020 08:38:30 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from matan@nvidia.com) with SMTP; 15 Oct 2020 09:38:23 +0300 Received: from nvidia.com (pegasus25.mtr.labs.mlnx [10.210.16.10]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09F6cNVu014993; Thu, 15 Oct 2020 09:38:23 +0300 From: Matan Azrad To: dev@dpdk.org Cc: Viacheslav Ovsiienko , stable@dpdk.org Date: Thu, 15 Oct 2020 06:38:12 +0000 Message-Id: <1602743893-345348-3-git-send-email-matan@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1602743893-345348-1-git-send-email-matan@nvidia.com> References: <1602743893-345348-1-git-send-email-matan@nvidia.com> Subject: [dpdk-dev] [PATCH 3/4] net/mlx5: fix event queue number query X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" When a Rx\Tx queue is created by DevX, its CQ configuration should include the EQ number of the interrupts. The EQ is managed by the kernel and there is a glue API in order to query the EQ number from the kernel. The EQ query API gets a vector number specifies the kernel vector of the interrupt handling. The vector number was wrongly detected according to the configuration CPU instead of using the device attributes of the supported vectors. The CPU was wrongly detected by the rte_lcore_to_cpu_id API without any check, and in case of non-EAL thread context the value was 0xFFFFFFFF which caused a failure in the EQ number query API. Use vector 0 for each EQ number query which must be supported by the kernel. Fixes: 08d1838f645a ("net/mlx5: implement CQ for Rx using DevX API") Fixes: d133f4cdb706 ("net/mlx5: create clock queue for packet pacing") Cc: stable@dpdk.org Signed-off-by: Matan Azrad Acked-by: Viacheslav Ovsiienko --- drivers/net/mlx5/mlx5.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 44822ad..f1e3579 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -947,10 +947,8 @@ struct mlx5_dev_ctx_shared * goto error; } if (sh->devx) { - uint32_t lcore = (uint32_t)rte_lcore_to_cpu_id(-1); - /* Query the EQN for this core. */ - err = mlx5_glue->devx_query_eqn(sh->ctx, lcore, &sh->eqn); + err = mlx5_glue->devx_query_eqn(sh->ctx, 0, &sh->eqn); if (err) { rte_errno = errno; DRV_LOG(ERR, "Failed to query event queue number %d.", From patchwork Thu Oct 15 06:38:13 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Matan Azrad X-Patchwork-Id: 80836 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id AA773A04DB; Thu, 15 Oct 2020 08:39:11 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 571D41DCAA; Thu, 15 Oct 2020 08:38:35 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 3781C1DC88 for ; Thu, 15 Oct 2020 08:38:30 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from matan@nvidia.com) with SMTP; 15 Oct 2020 09:38:23 +0300 Received: from nvidia.com (pegasus25.mtr.labs.mlnx [10.210.16.10]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09F6cNVv014993; Thu, 15 Oct 2020 09:38:23 +0300 From: Matan Azrad To: dev@dpdk.org Cc: Viacheslav Ovsiienko Date: Thu, 15 Oct 2020 06:38:13 +0000 Message-Id: <1602743893-345348-4-git-send-email-matan@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1602743893-345348-1-git-send-email-matan@nvidia.com> References: <1602743893-345348-1-git-send-email-matan@nvidia.com> Subject: [dpdk-dev] [PATCH 4/4] net/mlx5/linux: fix Tx queue operations decision X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" One of the conditions to create Tx queue object by DevX is to be sure that the DPDK mlx5 driver is not going to be the E-Switch manager of the device. The issue is with the default FDB flows managed by the kernel driver, which are not created by the kernel when the Tx queues are created by DevX. The current decision is to create the Tx queues by Verbs when E-Switch is enabled while the current behavior uses an opposite condition to create them by DevX. Create the Tx queues by Verbs when E-Switch is enabled. Fixes: 86d259cec852 ("net/mlx5: separate Tx queue object creations") Signed-off-by: Matan Azrad Acked-by: Viacheslav Ovsiienko --- drivers/net/mlx5/linux/mlx5_os.c | 53 +++++++++++++--------------------------- 1 file changed, 17 insertions(+), 36 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index 09d0944..d177b4f 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -526,26 +526,16 @@ mlx5_os_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_dev_config *config = &priv->config; struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; struct mlx5_txq_ctrl *txq_ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq); - /* - * When DevX is supported and DV flow is enable, and dest tir is enable, - * hairpin functions use DevX API. - * When, in addition, DV E-Switch is enable and DevX uar offset is - * supported, all Tx functions also use DevX API. - * Otherwise, all Tx functions use Verbs API. - */ - if (config->devx && config->dv_flow_en && config->dest_tir) { - if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) - return mlx5_txq_devx_obj_new(dev, idx); + if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) + return mlx5_txq_devx_obj_new(dev, idx); #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET - if (config->dv_esw_en) - return mlx5_txq_devx_obj_new(dev, idx); + if (!priv->config.dv_esw_en) + return mlx5_txq_devx_obj_new(dev, idx); #endif - } return mlx5_txq_ibv_obj_new(dev, idx); } @@ -558,20 +548,16 @@ static void mlx5_os_txq_obj_release(struct mlx5_txq_obj *txq_obj) { - struct mlx5_dev_config *config = &txq_obj->txq_ctrl->priv->config; - - if (config->devx && config->dv_flow_en && config->dest_tir) { + if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) { + mlx5_txq_devx_obj_release(txq_obj); + return; + } #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET - if (config->dv_esw_en) { - mlx5_txq_devx_obj_release(txq_obj); - return; - } -#endif - if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) { - mlx5_txq_devx_obj_release(txq_obj); - return; - } + if (!txq_obj->txq_ctrl->priv->config.dv_esw_en) { + mlx5_txq_devx_obj_release(txq_obj); + return; } +#endif mlx5_txq_ibv_obj_release(txq_obj); } @@ -1377,12 +1363,6 @@ goto error; } } - /* - * Initialize the dev_ops structure with DevX/Verbs function pointers. - * When DevX is supported and both DV flow and dest tir are enabled, all - * Rx functions use DevX API (except for drop that has not yet been - * implemented in DevX). - */ if (config->devx && config->dv_flow_en && config->dest_tir) { priv->obj_ops = devx_obj_ops; priv->obj_ops.drop_action_create = @@ -1392,16 +1372,17 @@ #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET priv->obj_ops.txq_obj_modify = ibv_obj_ops.txq_obj_modify; #else - if (!config->dv_esw_en) + if (config->dv_esw_en) priv->obj_ops.txq_obj_modify = ibv_obj_ops.txq_obj_modify; #endif + /* Use specific wrappers for Tx object. */ + priv->obj_ops.txq_obj_new = mlx5_os_txq_obj_new; + priv->obj_ops.txq_obj_release = mlx5_os_txq_obj_release; + } else { priv->obj_ops = ibv_obj_ops; } - /* The Tx objects are managed by a specific linux wrapper functions. */ - priv->obj_ops.txq_obj_new = mlx5_os_txq_obj_new; - priv->obj_ops.txq_obj_release = mlx5_os_txq_obj_release; /* Supported Verbs flow priority number detection. */ err = mlx5_flow_discover_priorities(eth_dev); if (err < 0) {