From patchwork Thu Jan 9 17:16:04 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Slava Ovsiienko X-Patchwork-Id: 64381 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8D3F5A04F9; Thu, 9 Jan 2020 18:16:32 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E3F4E1E4BB; Thu, 9 Jan 2020 18:16:21 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 3ACB01E4B0 for ; Thu, 9 Jan 2020 18:16:18 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from viacheslavo@mellanox.com) with ESMTPS (AES256-SHA encrypted); 9 Jan 2020 19:16:13 +0200 Received: from pegasus11.mtr.labs.mlnx (pegasus11.mtr.labs.mlnx [10.210.16.104]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 009HGD0P030195; Thu, 9 Jan 2020 19:16:13 +0200 Received: from pegasus11.mtr.labs.mlnx (localhost [127.0.0.1]) by pegasus11.mtr.labs.mlnx (8.14.7/8.14.7) with ESMTP id 009HGDIi010268; Thu, 9 Jan 2020 17:16:13 GMT Received: (from viacheslavo@localhost) by pegasus11.mtr.labs.mlnx (8.14.7/8.14.7/Submit) id 009HGDL3010267; Thu, 9 Jan 2020 17:16:13 GMT X-Authentication-Warning: pegasus11.mtr.labs.mlnx: viacheslavo set sender to viacheslavo@mellanox.com using -f From: Viacheslav Ovsiienko To: dev@dpdk.org Cc: matan@mellanox.com, rasland@mellanox.com, orika@mellanox.com Date: Thu, 9 Jan 2020 17:16:04 +0000 Message-Id: <1578590167-10167-2-git-send-email-viacheslavo@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1578590167-10167-1-git-send-email-viacheslavo@mellanox.com> References: <1578500161-20156-1-git-send-email-viacheslavo@mellanox.com> <1578590167-10167-1-git-send-email-viacheslavo@mellanox.com> Subject: [dpdk-dev] [PATCH v3 1/4] net/mlx5: move Tx complete request routine X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The complete request flag is set once per Tx burst call, the code of appropriate routine moved to the end of sending loop. This is preparation step to remove WQE reserved field usage to store index of elts to free. Signed-off-by: Viacheslav Ovsiienko Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_rxtx.c | 26 ++++---------------------- 1 file changed, 4 insertions(+), 22 deletions(-) diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 25a2952..ee6d5fc 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -2145,9 +2145,6 @@ enum mlx5_txcmp_code { * Pointer to TX queue structure. * @param loc * Pointer to burst routine local context. - * @param multi, - * Routine is called from multi-segment sending loop, - * do not correct the elts_head according to the pkts_copy. * @param olx * Configured Tx offloads mask. It is fully defined at * compile time and may be used for optimization. @@ -2155,13 +2152,12 @@ enum mlx5_txcmp_code { static __rte_always_inline void mlx5_tx_request_completion(struct mlx5_txq_data *restrict txq, struct mlx5_txq_local *restrict loc, - bool multi, unsigned int olx) { uint16_t head = txq->elts_head; unsigned int part; - part = (MLX5_TXOFF_CONFIG(INLINE) || multi) ? + part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc->pkts_sent - loc->pkts_copy; head += part; if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH || @@ -3120,8 +3116,6 @@ enum mlx5_txcmp_code { wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds); txq->wqe_ci += (ds + 3) / 4; loc->wqe_free -= (ds + 3) / 4; - /* Request CQE generation if limits are reached. */ - mlx5_tx_request_completion(txq, loc, true, olx); return MLX5_TXCMP_CODE_MULTI; } @@ -3230,8 +3224,6 @@ enum mlx5_txcmp_code { } while (true); txq->wqe_ci += (ds + 3) / 4; loc->wqe_free -= (ds + 3) / 4; - /* Request CQE generation if limits are reached. */ - mlx5_tx_request_completion(txq, loc, true, olx); return MLX5_TXCMP_CODE_MULTI; } @@ -3388,8 +3380,6 @@ enum mlx5_txcmp_code { wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds); txq->wqe_ci += (ds + 3) / 4; loc->wqe_free -= (ds + 3) / 4; - /* Request CQE generation if limits are reached. */ - mlx5_tx_request_completion(txq, loc, true, olx); return MLX5_TXCMP_CODE_MULTI; } @@ -3599,8 +3589,6 @@ enum mlx5_txcmp_code { --loc->elts_free; ++loc->pkts_sent; --pkts_n; - /* Request CQE generation if limits are reached. */ - mlx5_tx_request_completion(txq, loc, false, olx); if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free)) return MLX5_TXCMP_CODE_EXIT; loc->mbuf = *pkts++; @@ -3750,7 +3738,7 @@ enum mlx5_txcmp_code { struct mlx5_txq_local *restrict loc, unsigned int ds, unsigned int slen, - unsigned int olx) + unsigned int olx __rte_unused) { assert(!MLX5_TXOFF_CONFIG(INLINE)); #ifdef MLX5_PMD_SOFT_COUNTERS @@ -3765,8 +3753,6 @@ enum mlx5_txcmp_code { loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds); txq->wqe_ci += (ds + 3) / 4; loc->wqe_free -= (ds + 3) / 4; - /* Request CQE generation if limits are reached. */ - mlx5_tx_request_completion(txq, loc, false, olx); } /* @@ -3809,8 +3795,6 @@ enum mlx5_txcmp_code { loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len); txq->wqe_ci += (len + 3) / 4; loc->wqe_free -= (len + 3) / 4; - /* Request CQE generation if limits are reached. */ - mlx5_tx_request_completion(txq, loc, false, olx); } /** @@ -4011,8 +3995,6 @@ enum mlx5_txcmp_code { txq->wqe_ci += (2 + part + 3) / 4; loc->wqe_free -= (2 + part + 3) / 4; pkts_n -= part; - /* Request CQE generation if limits are reached. */ - mlx5_tx_request_completion(txq, loc, false, olx); if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free)) return MLX5_TXCMP_CODE_EXIT; loc->mbuf = *pkts++; @@ -4496,8 +4478,6 @@ enum mlx5_txcmp_code { } ++loc->pkts_sent; --pkts_n; - /* Request CQE generation if limits are reached. */ - mlx5_tx_request_completion(txq, loc, false, olx); if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free)) return MLX5_TXCMP_CODE_EXIT; loc->mbuf = *pkts++; @@ -4776,6 +4756,8 @@ enum mlx5_txcmp_code { /* Take a shortcut if nothing is sent. */ if (unlikely(loc.pkts_sent == loc.pkts_loop)) goto burst_exit; + /* Request CQE generation if limits are reached. */ + mlx5_tx_request_completion(txq, &loc, olx); /* * Ring QP doorbell immediately after WQE building completion * to improve latencies. The pure software related data treatment From patchwork Thu Jan 9 17:16:05 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Slava Ovsiienko X-Patchwork-Id: 64380 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 821D6A04F9; Thu, 9 Jan 2020 18:16:24 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id EF6321E4B3; Thu, 9 Jan 2020 18:16:18 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 3A4031E4AE for ; Thu, 9 Jan 2020 18:16:18 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from viacheslavo@mellanox.com) with ESMTPS (AES256-SHA encrypted); 9 Jan 2020 19:16:17 +0200 Received: from pegasus11.mtr.labs.mlnx (pegasus11.mtr.labs.mlnx [10.210.16.104]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 009HGHQT030292; Thu, 9 Jan 2020 19:16:17 +0200 Received: from pegasus11.mtr.labs.mlnx (localhost [127.0.0.1]) by pegasus11.mtr.labs.mlnx (8.14.7/8.14.7) with ESMTP id 009HGHr5010271; Thu, 9 Jan 2020 17:16:17 GMT Received: (from viacheslavo@localhost) by pegasus11.mtr.labs.mlnx (8.14.7/8.14.7/Submit) id 009HGHKQ010270; Thu, 9 Jan 2020 17:16:17 GMT X-Authentication-Warning: pegasus11.mtr.labs.mlnx: viacheslavo set sender to viacheslavo@mellanox.com using -f From: Viacheslav Ovsiienko To: dev@dpdk.org Cc: matan@mellanox.com, rasland@mellanox.com, orika@mellanox.com Date: Thu, 9 Jan 2020 17:16:05 +0000 Message-Id: <1578590167-10167-3-git-send-email-viacheslavo@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1578590167-10167-1-git-send-email-viacheslavo@mellanox.com> References: <1578500161-20156-1-git-send-email-viacheslavo@mellanox.com> <1578590167-10167-1-git-send-email-viacheslavo@mellanox.com> Subject: [dpdk-dev] [PATCH v3 2/4] net/mlx5: update Tx error handling routine X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This is preparation step, we are going to store the index of elts to free on completion in the dedicated free on completion queue, this patch updates the elts freeing routine and updates Tx error handling routine to be synched with coming new queue. Signed-off-by: Viacheslav Ovsiienko Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_rxtx.c | 98 +++++++++++++++++++++++--------------------- drivers/net/mlx5/mlx5_rxtx.h | 4 +- drivers/net/mlx5/mlx5_txq.c | 2 +- 3 files changed, 54 insertions(+), 50 deletions(-) diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index ee6d5fc..b7b40ac 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -654,10 +654,10 @@ enum mlx5_txcmp_code { * Pointer to the error CQE. * * @return - * Negative value if queue recovery failed, - * the last Tx buffer element to free otherwise. + * Negative value if queue recovery failed, otherwise + * the error completion entry is handled successfully. */ -int +static int mlx5_tx_error_cqe_handle(struct mlx5_txq_data *restrict txq, volatile struct mlx5_err_cqe *err_cqe) { @@ -701,18 +701,14 @@ enum mlx5_txcmp_code { */ txq->stats.oerrors += ((txq->wqe_ci & wqe_m) - new_wqe_pi) & wqe_m; - if (tx_recover_qp(txq_ctrl) == 0) { - txq->cq_ci++; - /* Release all the remaining buffers. */ - return txq->elts_head; + if (tx_recover_qp(txq_ctrl)) { + /* Recovering failed - retry later on the same WQE. */ + return -1; } - /* Recovering failed - try again later on the same WQE. */ - return -1; - } else { - txq->cq_ci++; + /* Release all the remaining buffers. */ + txq_free_elts(txq_ctrl); } - /* Do not release buffers. */ - return txq->elts_tail; + return 0; } /** @@ -2034,8 +2030,6 @@ enum mlx5_txcmp_code { * Pointer to TX queue structure. * @param valid CQE pointer * if not NULL update txq->wqe_pi and flush the buffers - * @param itail - * if not negative - flush the buffers till this index. * @param olx * Configured Tx offloads mask. It is fully defined at * compile time and may be used for optimization. @@ -2043,25 +2037,18 @@ enum mlx5_txcmp_code { static __rte_always_inline void mlx5_tx_comp_flush(struct mlx5_txq_data *restrict txq, volatile struct mlx5_cqe *last_cqe, - int itail, unsigned int olx __rte_unused) { - uint16_t tail; - if (likely(last_cqe != NULL)) { + uint16_t tail; + txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter); tail = ((volatile struct mlx5_wqe_cseg *) (txq->wqes + (txq->wqe_pi & txq->wqe_m)))->misc; - } else if (itail >= 0) { - tail = (uint16_t)itail; - } else { - return; - } - rte_compiler_barrier(); - *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci); - if (likely(tail != txq->elts_tail)) { - mlx5_tx_free_elts(txq, tail, olx); - assert(tail == txq->elts_tail); + if (likely(tail != txq->elts_tail)) { + mlx5_tx_free_elts(txq, tail, olx); + assert(tail == txq->elts_tail); + } } } @@ -2085,6 +2072,7 @@ enum mlx5_txcmp_code { { unsigned int count = MLX5_TX_COMP_MAX_CQE; volatile struct mlx5_cqe *last_cqe = NULL; + uint16_t ci = txq->cq_ci; int ret; static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value"); @@ -2092,8 +2080,8 @@ enum mlx5_txcmp_code { do { volatile struct mlx5_cqe *cqe; - cqe = &txq->cqes[txq->cq_ci & txq->cqe_m]; - ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci); + cqe = &txq->cqes[ci & txq->cqe_m]; + ret = check_cqe(cqe, txq->cqe_s, ci); if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) { if (likely(ret != MLX5_CQE_STATUS_ERR)) { /* No new CQEs in completion queue. */ @@ -2109,31 +2097,49 @@ enum mlx5_txcmp_code { rte_wmb(); ret = mlx5_tx_error_cqe_handle (txq, (volatile struct mlx5_err_cqe *)cqe); + if (unlikely(ret < 0)) { + /* + * Some error occurred on queue error + * handling, we do not advance the index + * here, allowing to retry on next call. + */ + return; + } /* - * Flush buffers, update consuming index - * if recovery succeeded. Otherwise - * just try to recover later. + * We are going to fetch all entries with + * MLX5_CQE_SYNDROME_WR_FLUSH_ERR status. */ - last_cqe = NULL; - break; + ++ci; + continue; } /* Normal transmit completion. */ - ++txq->cq_ci; + ++ci; last_cqe = cqe; #ifndef NDEBUG if (txq->cq_pi) --txq->cq_pi; #endif - /* - * We have to restrict the amount of processed CQEs - * in one tx_burst routine call. The CQ may be large - * and many CQEs may be updated by the NIC in one - * transaction. Buffers freeing is time consuming, - * multiple iterations may introduce significant - * latency. - */ - } while (--count); - mlx5_tx_comp_flush(txq, last_cqe, ret, olx); + /* + * We have to restrict the amount of processed CQEs + * in one tx_burst routine call. The CQ may be large + * and many CQEs may be updated by the NIC in one + * transaction. Buffers freeing is time consuming, + * multiple iterations may introduce significant + * latency. + */ + if (--count == 0) + break; + } while (true); + if (likely(ci != txq->cq_ci)) { + /* + * Update completion queue consuming index + * and ring doorbell to notify hardware. + */ + rte_compiler_barrier(); + txq->cq_ci = ci; + *txq->cq_db = rte_cpu_to_be_32(ci); + mlx5_tx_comp_flush(txq, last_cqe, olx); + } } /** diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index e927343..8a2185a 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -440,6 +440,7 @@ struct mlx5_txq_ctrl *mlx5_txq_hairpin_new int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx); int mlx5_txq_verify(struct rte_eth_dev *dev); void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl); +void txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl); uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev); /* mlx5_rxtx.c */ @@ -451,9 +452,6 @@ struct mlx5_txq_ctrl *mlx5_txq_hairpin_new void mlx5_set_ptype_table(void); void mlx5_set_cksum_table(void); void mlx5_set_swp_types_table(void); -__rte_noinline int mlx5_tx_error_cqe_handle - (struct mlx5_txq_data *restrict txq, - volatile struct mlx5_err_cqe *err_cqe); uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n); void mlx5_rxq_initialize(struct mlx5_rxq_data *rxq); __rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec); diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 1c4f7e7..abe0947 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -62,7 +62,7 @@ * @param txq_ctrl * Pointer to TX queue structure. */ -static void +void txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl) { const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n; From patchwork Thu Jan 9 17:16:06 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Slava Ovsiienko X-Patchwork-Id: 64382 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 49B6EA04F9; Thu, 9 Jan 2020 18:16:40 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 03ECD1E4C1; Thu, 9 Jan 2020 18:16:25 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 45FDE1E4C0 for ; Thu, 9 Jan 2020 18:16:23 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from viacheslavo@mellanox.com) with ESMTPS (AES256-SHA encrypted); 9 Jan 2020 19:16:21 +0200 Received: from pegasus11.mtr.labs.mlnx (pegasus11.mtr.labs.mlnx [10.210.16.104]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 009HGKp6030303; Thu, 9 Jan 2020 19:16:20 +0200 Received: from pegasus11.mtr.labs.mlnx (localhost [127.0.0.1]) by pegasus11.mtr.labs.mlnx (8.14.7/8.14.7) with ESMTP id 009HGKl6010274; Thu, 9 Jan 2020 17:16:20 GMT Received: (from viacheslavo@localhost) by pegasus11.mtr.labs.mlnx (8.14.7/8.14.7/Submit) id 009HGKQY010273; Thu, 9 Jan 2020 17:16:20 GMT X-Authentication-Warning: pegasus11.mtr.labs.mlnx: viacheslavo set sender to viacheslavo@mellanox.com using -f From: Viacheslav Ovsiienko To: dev@dpdk.org Cc: matan@mellanox.com, rasland@mellanox.com, orika@mellanox.com Date: Thu, 9 Jan 2020 17:16:06 +0000 Message-Id: <1578590167-10167-4-git-send-email-viacheslavo@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1578590167-10167-1-git-send-email-viacheslavo@mellanox.com> References: <1578500161-20156-1-git-send-email-viacheslavo@mellanox.com> <1578590167-10167-1-git-send-email-viacheslavo@mellanox.com> Subject: [dpdk-dev] [PATCH v3 3/4] net/mlx5: add free on completion queue X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The new software manged entity is introduced in Tx datapath - free on completion queue. This queue keeps the information how many buffers stored in elts array must freed on send comletion. Each element of the queue contains transmitting descriptor index to be in synch with completion entries (in debug build only) and the index in elts array to free buffers. Signed-off-by: Viacheslav Ovsiienko Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_rxtx.h | 5 +++++ drivers/net/mlx5/mlx5_txq.c | 15 +++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 8a2185a..7d1b2fa 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -297,6 +297,11 @@ struct mlx5_txq_data { struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */ struct mlx5_wqe *wqes; /* Work queue. */ struct mlx5_wqe *wqes_end; /* Work queue array limit. */ +#ifdef NDEBUG + uint16_t *fcqs; /* Free completion queue. */ +#else + uint32_t *fcqs; /* Free completion queue (debug extended). */ +#endif volatile struct mlx5_cqe *cqes; /* Completion queue. */ volatile uint32_t *qp_db; /* Work queue doorbell. */ volatile uint32_t *cq_db; /* Completion queue doorbell. */ diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index abe0947..aee0970 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -724,6 +724,17 @@ struct mlx5_txq_obj * txq_data->wqe_pi = 0; txq_data->wqe_comp = 0; txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV; + txq_data->fcqs = rte_calloc_socket(__func__, + txq_data->cqe_s, + sizeof(*txq_data->fcqs), + RTE_CACHE_LINE_SIZE, + txq_ctrl->socket); + if (!txq_data->fcqs) { + DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory (FCQ)", + dev->data->port_id, idx); + rte_errno = ENOMEM; + goto error; + } #ifdef HAVE_IBV_FLOW_DV_SUPPORT /* * If using DevX need to query and store TIS transport domain value. @@ -772,6 +783,8 @@ struct mlx5_txq_obj * claim_zero(mlx5_glue->destroy_cq(tmpl.cq)); if (tmpl.qp) claim_zero(mlx5_glue->destroy_qp(tmpl.qp)); + if (txq_data && txq_data->fcqs) + rte_free(txq_data->fcqs); if (txq_obj) rte_free(txq_obj); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; @@ -826,6 +839,8 @@ struct mlx5_txq_obj * } else { claim_zero(mlx5_glue->destroy_qp(txq_obj->qp)); claim_zero(mlx5_glue->destroy_cq(txq_obj->cq)); + if (txq_obj->txq_ctrl->txq.fcqs) + rte_free(txq_obj->txq_ctrl->txq.fcqs); } LIST_REMOVE(txq_obj, next); rte_free(txq_obj); From patchwork Thu Jan 9 17:16:07 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Slava Ovsiienko X-Patchwork-Id: 64383 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id CD847A04F9; Thu, 9 Jan 2020 18:16:48 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id EC7F21E4C9; Thu, 9 Jan 2020 18:16:26 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 6381C1E4C1 for ; Thu, 9 Jan 2020 18:16:23 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from viacheslavo@mellanox.com) with ESMTPS (AES256-SHA encrypted); 9 Jan 2020 19:16:21 +0200 Received: from pegasus11.mtr.labs.mlnx (pegasus11.mtr.labs.mlnx [10.210.16.104]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 009HGL5U030306; Thu, 9 Jan 2020 19:16:21 +0200 Received: from pegasus11.mtr.labs.mlnx (localhost [127.0.0.1]) by pegasus11.mtr.labs.mlnx (8.14.7/8.14.7) with ESMTP id 009HGLfP010276; Thu, 9 Jan 2020 17:16:21 GMT Received: (from viacheslavo@localhost) by pegasus11.mtr.labs.mlnx (8.14.7/8.14.7/Submit) id 009HGLrb010275; Thu, 9 Jan 2020 17:16:21 GMT X-Authentication-Warning: pegasus11.mtr.labs.mlnx: viacheslavo set sender to viacheslavo@mellanox.com using -f From: Viacheslav Ovsiienko To: dev@dpdk.org Cc: matan@mellanox.com, rasland@mellanox.com, orika@mellanox.com Date: Thu, 9 Jan 2020 17:16:07 +0000 Message-Id: <1578590167-10167-5-git-send-email-viacheslavo@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1578590167-10167-1-git-send-email-viacheslavo@mellanox.com> References: <1578500161-20156-1-git-send-email-viacheslavo@mellanox.com> <1578590167-10167-1-git-send-email-viacheslavo@mellanox.com> Subject: [dpdk-dev] [PATCH v3 4/4] net/mlx5: engage free on completion queue X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The free on completion queue keeps the indices of elts array, all mbuf stored below this index should be freed on arrival of normal send completion. In debug version it also contains an index of completed transmitting descriptor (WQE) to check queues synchronization. Signed-off-by: Viacheslav Ovsiienko Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_rxtx.c | 33 +++++++++++++++++---------------- drivers/net/mlx5/mlx5_rxtx.h | 4 +--- drivers/net/mlx5/mlx5_txq.c | 2 -- 3 files changed, 18 insertions(+), 21 deletions(-) diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index b7b40ac..b11c5eb 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -2043,8 +2043,7 @@ enum mlx5_txcmp_code { uint16_t tail; txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter); - tail = ((volatile struct mlx5_wqe_cseg *) - (txq->wqes + (txq->wqe_pi & txq->wqe_m)))->misc; + tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m]; if (likely(tail != txq->elts_tail)) { mlx5_tx_free_elts(txq, tail, olx); assert(tail == txq->elts_tail); @@ -2095,6 +2094,7 @@ enum mlx5_txcmp_code { * here, before we might perform SQ reset. */ rte_wmb(); + txq->cq_ci = ci; ret = mlx5_tx_error_cqe_handle (txq, (volatile struct mlx5_err_cqe *)cqe); if (unlikely(ret < 0)) { @@ -2108,17 +2108,18 @@ enum mlx5_txcmp_code { /* * We are going to fetch all entries with * MLX5_CQE_SYNDROME_WR_FLUSH_ERR status. + * The send queue is supposed to be empty. */ ++ci; + txq->cq_pi = ci; + last_cqe = NULL; continue; } /* Normal transmit completion. */ + assert(ci != txq->cq_pi); + assert((txq->fcqs[ci & txq->cqe_m] >> 16) == cqe->wqe_counter); ++ci; last_cqe = cqe; -#ifndef NDEBUG - if (txq->cq_pi) - --txq->cq_pi; -#endif /* * We have to restrict the amount of processed CQEs * in one tx_burst routine call. The CQ may be large @@ -2127,7 +2128,7 @@ enum mlx5_txcmp_code { * multiple iterations may introduce significant * latency. */ - if (--count == 0) + if (likely(--count == 0)) break; } while (true); if (likely(ci != txq->cq_ci)) { @@ -2177,15 +2178,15 @@ enum mlx5_txcmp_code { /* Request unconditional completion on last WQE. */ last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS << MLX5_COMP_MODE_OFFSET); - /* Save elts_head in unused "immediate" field of WQE. */ - last->cseg.misc = head; - /* - * A CQE slot must always be available. Count the - * issued CEQ "always" request instead of production - * index due to here can be CQE with errors and - * difference with ci may become inconsistent. - */ - assert(txq->cqe_s > ++txq->cq_pi); + /* Save elts_head in dedicated free on completion queue. */ +#ifdef NDEBUG + txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head; +#else + txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head | + (last->cseg.opcode >> 8) << 16; +#endif + /* A CQE slot must always be available. */ + assert((txq->cq_pi - txq->cq_ci) <= txq->cqe_s); } } diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 7d1b2fa..e362b4a 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -273,9 +273,7 @@ struct mlx5_txq_data { uint16_t wqe_thres; /* WQE threshold to request completion in CQ. */ /* WQ related fields. */ uint16_t cq_ci; /* Consumer index for completion queue. */ -#ifndef NDEBUG - uint16_t cq_pi; /* Counter of issued CQE "always" requests. */ -#endif + uint16_t cq_pi; /* Production index for completion queue. */ uint16_t cqe_s; /* Number of CQ elements. */ uint16_t cqe_m; /* Mask for CQ indices. */ /* CQ related fields. */ diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index aee0970..c750082 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -717,9 +717,7 @@ struct mlx5_txq_obj * txq_data->cq_db = cq_info.dbrec; txq_data->cqes = (volatile struct mlx5_cqe *)cq_info.buf; txq_data->cq_ci = 0; -#ifndef NDEBUG txq_data->cq_pi = 0; -#endif txq_data->wqe_ci = 0; txq_data->wqe_pi = 0; txq_data->wqe_comp = 0;