From patchwork Tue Oct 27 19:03:19 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Slava Ovsiienko X-Patchwork-Id: 82362 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3F81CA04B5; Tue, 27 Oct 2020 20:03:28 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 6971F2BD3; Tue, 27 Oct 2020 20:03:26 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 8917E2BAA for ; Tue, 27 Oct 2020 20:03:24 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from viacheslavo@nvidia.com) with SMTP; 27 Oct 2020 21:03:21 +0200 Received: from nvidia.com (pegasus12.mtr.labs.mlnx [10.210.17.40]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RJ3LIm026716; Tue, 27 Oct 2020 21:03:21 +0200 From: Viacheslav Ovsiienko To: dev@dpdk.org Cc: rasland@nvidia.com Date: Tue, 27 Oct 2020 19:03:19 +0000 Message-Id: <1603825399-29715-1-git-send-email-viacheslavo@nvidia.com> X-Mailer: git-send-email 1.8.3.1 Subject: [dpdk-dev] [PATCH] net/mlx5: use C11 atomics in packet scheduling X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The rte_atomic API is deprecated and needs to be replaced with C11 atomic builtins. Use the relaxed ordering and explicit memory barrier for Clock Queue and timestamps synchronization. Signed-off-by: Viacheslav Ovsiienko --- drivers/net/mlx5/mlx5.h | 14 +++--- drivers/net/mlx5/mlx5_rxtx.h | 11 +++-- drivers/net/mlx5/mlx5_txpp.c | 107 +++++++++++++++++++++++++------------------ 3 files changed, 76 insertions(+), 56 deletions(-) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 1bd8cd4..bd5b9b4 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -563,8 +563,8 @@ struct mlx5_txpp_wq { /* Tx packet pacing internal timestamp. */ struct mlx5_txpp_ts { - rte_atomic64_t ci_ts; - rte_atomic64_t ts; + uint64_t ci_ts; + uint64_t ts; }; /* Tx packet pacing structure. */ @@ -587,11 +587,11 @@ struct mlx5_dev_txpp { struct mlx5_txpp_ts ts; /* Cached completion id/timestamp. */ uint32_t sync_lost:1; /* ci/timestamp synchronization lost. */ /* Statistics counters. */ - rte_atomic32_t err_miss_int; /* Missed service interrupt. */ - rte_atomic32_t err_rearm_queue; /* Rearm Queue errors. */ - rte_atomic32_t err_clock_queue; /* Clock Queue errors. */ - rte_atomic32_t err_ts_past; /* Timestamp in the past. */ - rte_atomic32_t err_ts_future; /* Timestamp in the distant future. */ + uint64_t err_miss_int; /* Missed service interrupt. */ + uint64_t err_rearm_queue; /* Rearm Queue errors. */ + uint64_t err_clock_queue; /* Clock Queue errors. */ + uint64_t err_ts_past; /* Timestamp in the past. */ + uint64_t err_ts_future; /* Timestamp in the distant future. */ }; /* Supported flex parser profile ID. */ diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 1b5fba4..84eaef7 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -677,8 +676,8 @@ int mlx5_dma_unmap(struct rte_pci_device *pdev, void *addr, uint64_t iova, * the service thread, data should be re-read. */ rte_compiler_barrier(); - ci = rte_atomic64_read(&sh->txpp.ts.ci_ts); - ts = rte_atomic64_read(&sh->txpp.ts.ts); + ci = __atomic_load_n(&sh->txpp.ts.ci_ts, __ATOMIC_RELAXED); + ts = __atomic_load_n(&sh->txpp.ts.ts, __ATOMIC_RELAXED); rte_compiler_barrier(); if (!((ts ^ ci) << (64 - MLX5_CQ_INDEX_WIDTH))) break; @@ -688,7 +687,8 @@ int mlx5_dma_unmap(struct rte_pci_device *pdev, void *addr, uint64_t iova, mts -= ts; if (unlikely(mts >= UINT64_MAX / 2)) { /* We have negative integer, mts is in the past. */ - rte_atomic32_inc(&sh->txpp.err_ts_past); + __atomic_fetch_add(&sh->txpp.err_ts_past, + 1, __ATOMIC_RELAXED); return -1; } tick = sh->txpp.tick; @@ -697,7 +697,8 @@ int mlx5_dma_unmap(struct rte_pci_device *pdev, void *addr, uint64_t iova, mts = (mts + tick - 1) / tick; if (unlikely(mts >= (1 << MLX5_CQ_INDEX_WIDTH) / 2 - 1)) { /* We have mts is too distant future. */ - rte_atomic32_inc(&sh->txpp.err_ts_future); + __atomic_fetch_add(&sh->txpp.err_ts_future, + 1, __ATOMIC_RELAXED); return -1; } mts <<= 64 - MLX5_CQ_INDEX_WIDTH; diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c index 37355fa..6fb51ca 100644 --- a/drivers/net/mlx5/mlx5_txpp.c +++ b/drivers/net/mlx5/mlx5_txpp.c @@ -650,29 +650,34 @@ * update by hardware with soecified rate. We have to * read timestump and WQE completion index atomically. */ -#if defined(RTE_ARCH_X86_64) || defined(RTE_ARCH_ARM64) +#if defined(RTE_ARCH_X86_64) rte_int128_t src; memset(&src, 0, sizeof(src)); *ts = src; /* if (*from == *ts) *from = *src else *ts = *from; */ - rte_atomic128_cmp_exchange(from, ts, &src, 0, - __ATOMIC_RELAXED, __ATOMIC_RELAXED); + __atomic_compare_exchange_n((__uint128_t *)from, + (__uint128_t *)ts, + (__uint128_t *)&src, + 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); #else - rte_atomic64_t *cqe = (rte_atomic64_t *)from; + uint64_t *cqe = (uint64_t *)from; - /* Power architecture does not support 16B compare-and-swap. */ + /* + * Power architecture does not support 16B compare-and-swap. + * ARM implements it in software, code below is more relevant. + */ for (;;) { int64_t tm, op; int64_t *ps; rte_compiler_barrier(); - tm = rte_atomic64_read(cqe + 0); - op = rte_atomic64_read(cqe + 1); + tm = __atomic_load_n(cqe + 0, __ATOMIC_RELAXED); + op = __atomic_load_n(cqe + 1, __ATOMIC_RELAXED); rte_compiler_barrier(); - if (tm != rte_atomic64_read(cqe + 0)) + if (tm != __atomic_load_n(cqe + 0, __ATOMIC_RELAXED)) continue; - if (op != rte_atomic64_read(cqe + 1)) + if (op != __atomic_load_n(cqe + 1, __ATOMIC_RELAXED)) continue; ps = (int64_t *)ts; ps[0] = tm; @@ -690,8 +695,8 @@ ci = ci << (64 - MLX5_CQ_INDEX_WIDTH); ci |= (ts << MLX5_CQ_INDEX_WIDTH) >> MLX5_CQ_INDEX_WIDTH; rte_compiler_barrier(); - rte_atomic64_set(&sh->txpp.ts.ts, ts); - rte_atomic64_set(&sh->txpp.ts.ci_ts, ci); + __atomic_store_n(&sh->txpp.ts.ts, ts, __ATOMIC_RELAXED); + __atomic_store_n(&sh->txpp.ts.ci_ts, ci, __ATOMIC_RELAXED); rte_wmb(); } @@ -713,7 +718,8 @@ mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128); if (to.cts.op_own >> 4) { DRV_LOG(DEBUG, "Clock Queue error sync lost."); - rte_atomic32_inc(&sh->txpp.err_clock_queue); + __atomic_fetch_add(&sh->txpp.err_clock_queue, + 1, __ATOMIC_RELAXED); sh->txpp.sync_lost = 1; return; } @@ -758,7 +764,10 @@ if (!sh->txpp.clock_queue.sq_ci && !sh->txpp.ts_n) return; MLX5_ASSERT(sh->txpp.ts_p < MLX5_TXPP_REARM_SQ_SIZE); - sh->txpp.tsa[sh->txpp.ts_p] = sh->txpp.ts; + __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ts, + sh->txpp.ts.ts, __ATOMIC_RELAXED); + __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts, + sh->txpp.ts.ci_ts, __ATOMIC_RELAXED); if (++sh->txpp.ts_p >= MLX5_TXPP_REARM_SQ_SIZE) sh->txpp.ts_p = 0; if (sh->txpp.ts_n < MLX5_TXPP_REARM_SQ_SIZE) @@ -799,7 +808,8 @@ /* Check whether we have missed interrupts. */ if (cq_ci - wq->cq_ci != 1) { DRV_LOG(DEBUG, "Rearm Queue missed interrupt."); - rte_atomic32_inc(&sh->txpp.err_miss_int); + __atomic_fetch_add(&sh->txpp.err_miss_int, + 1, __ATOMIC_RELAXED); /* Check sync lost on wqe index. */ if (cq_ci - wq->cq_ci >= (((1UL << MLX5_WQ_INDEX_WIDTH) / @@ -814,7 +824,8 @@ /* Fire new requests to Rearm Queue. */ if (error) { DRV_LOG(DEBUG, "Rearm Queue error sync lost."); - rte_atomic32_inc(&sh->txpp.err_rearm_queue); + __atomic_fetch_add(&sh->txpp.err_rearm_queue, + 1, __ATOMIC_RELAXED); sh->txpp.sync_lost = 1; } } @@ -877,11 +888,11 @@ int ret; int fd; - rte_atomic32_set(&sh->txpp.err_miss_int, 0); - rte_atomic32_set(&sh->txpp.err_rearm_queue, 0); - rte_atomic32_set(&sh->txpp.err_clock_queue, 0); - rte_atomic32_set(&sh->txpp.err_ts_past, 0); - rte_atomic32_set(&sh->txpp.err_ts_future, 0); + sh->txpp.err_miss_int = 0; + sh->txpp.err_rearm_queue = 0; + sh->txpp.err_clock_queue = 0; + sh->txpp.err_ts_past = 0; + sh->txpp.err_ts_future = 0; /* Attach interrupt handler to process Rearm Queue completions. */ fd = mlx5_os_get_devx_channel_fd(sh->txpp.echan); ret = mlx5_os_set_nonblock_channel_fd(fd); @@ -1116,7 +1127,8 @@ mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128); if (to.cts.op_own >> 4) { DRV_LOG(DEBUG, "Clock Queue error sync lost."); - rte_atomic32_inc(&sh->txpp.err_clock_queue); + __atomic_fetch_add(&sh->txpp.err_clock_queue, + 1, __ATOMIC_RELAXED); sh->txpp.sync_lost = 1; return -EIO; } @@ -1147,11 +1159,11 @@ int mlx5_txpp_xstats_reset(struct rte_eth_dev *dev) struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_ctx_shared *sh = priv->sh; - rte_atomic32_set(&sh->txpp.err_miss_int, 0); - rte_atomic32_set(&sh->txpp.err_rearm_queue, 0); - rte_atomic32_set(&sh->txpp.err_clock_queue, 0); - rte_atomic32_set(&sh->txpp.err_ts_past, 0); - rte_atomic32_set(&sh->txpp.err_ts_future, 0); + __atomic_store_n(&sh->txpp.err_miss_int, 0, __ATOMIC_RELAXED); + __atomic_store_n(&sh->txpp.err_rearm_queue, 0, __ATOMIC_RELAXED); + __atomic_store_n(&sh->txpp.err_clock_queue, 0, __ATOMIC_RELAXED); + __atomic_store_n(&sh->txpp.err_ts_past, 0, __ATOMIC_RELAXED); + __atomic_store_n(&sh->txpp.err_ts_future, 0, __ATOMIC_RELAXED); return 0; } @@ -1196,19 +1208,21 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused, struct mlx5_txpp_ts *tsa, uint16_t idx) { do { - int64_t ts, ci; + uint64_t ts, ci; - ts = rte_atomic64_read(&txpp->tsa[idx].ts); - ci = rte_atomic64_read(&txpp->tsa[idx].ci_ts); + ts = __atomic_load_n(&txpp->tsa[idx].ts, __ATOMIC_RELAXED); + ci = __atomic_load_n(&txpp->tsa[idx].ci_ts, __ATOMIC_RELAXED); rte_compiler_barrier(); if ((ci ^ ts) << MLX5_CQ_INDEX_WIDTH != 0) continue; - if (rte_atomic64_read(&txpp->tsa[idx].ts) != ts) + if (__atomic_load_n(&txpp->tsa[idx].ts, + __ATOMIC_RELAXED) != ts) continue; - if (rte_atomic64_read(&txpp->tsa[idx].ci_ts) != ci) + if (__atomic_load_n(&txpp->tsa[idx].ci_ts, + __ATOMIC_RELAXED) != ci) continue; - rte_atomic64_set(&tsa->ts, ts); - rte_atomic64_set(&tsa->ci_ts, ci); + tsa->ts = ts; + tsa->ci_ts = ci; return; } while (true); } @@ -1244,9 +1258,9 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused, rte_compiler_barrier(); } while (ts_p != txpp->ts_p); /* We have two neighbor reports, calculate the jitter. */ - dts = rte_atomic64_read(&tsa1.ts) - rte_atomic64_read(&tsa0.ts); - dci = (rte_atomic64_read(&tsa1.ci_ts) >> (64 - MLX5_CQ_INDEX_WIDTH)) - - (rte_atomic64_read(&tsa0.ci_ts) >> (64 - MLX5_CQ_INDEX_WIDTH)); + dts = tsa1.ts - tsa0.ts; + dci = (tsa1.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)) - + (tsa0.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)); if (dci < 0) dci += 1 << MLX5_CQ_INDEX_WIDTH; dci *= txpp->tick; @@ -1284,9 +1298,9 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused, rte_compiler_barrier(); } while (ts_p != txpp->ts_p); /* We have two neighbor reports, calculate the jitter. */ - dts = rte_atomic64_read(&tsa1.ts) - rte_atomic64_read(&tsa0.ts); - dci = (rte_atomic64_read(&tsa1.ci_ts) >> (64 - MLX5_CQ_INDEX_WIDTH)) - - (rte_atomic64_read(&tsa0.ci_ts) >> (64 - MLX5_CQ_INDEX_WIDTH)); + dts = tsa1.ts - tsa0.ts; + dci = (tsa1.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)) - + (tsa0.ci_ts >> (64 - MLX5_CQ_INDEX_WIDTH)); dci += 1 << MLX5_CQ_INDEX_WIDTH; dci *= txpp->tick; return (dts > dci) ? dts - dci : dci - dts; @@ -1325,15 +1339,20 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused, for (i = 0; i < n_txpp; ++i) stats[n_used + i].id = n_used + i; stats[n_used + 0].value = - rte_atomic32_read(&sh->txpp.err_miss_int); + __atomic_load_n(&sh->txpp.err_miss_int, + __ATOMIC_RELAXED); stats[n_used + 1].value = - rte_atomic32_read(&sh->txpp.err_rearm_queue); + __atomic_load_n(&sh->txpp.err_rearm_queue, + __ATOMIC_RELAXED); stats[n_used + 2].value = - rte_atomic32_read(&sh->txpp.err_clock_queue); + __atomic_load_n(&sh->txpp.err_clock_queue, + __ATOMIC_RELAXED); stats[n_used + 3].value = - rte_atomic32_read(&sh->txpp.err_ts_past); + __atomic_load_n(&sh->txpp.err_ts_past, + __ATOMIC_RELAXED); stats[n_used + 4].value = - rte_atomic32_read(&sh->txpp.err_ts_future); + __atomic_load_n(&sh->txpp.err_ts_future, + __ATOMIC_RELAXED); stats[n_used + 5].value = mlx5_txpp_xstats_jitter(&sh->txpp); stats[n_used + 6].value = mlx5_txpp_xstats_wander(&sh->txpp); stats[n_used + 7].value = sh->txpp.sync_lost;