get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/74054/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 74054,
    "url": "http://patches.dpdk.org/api/patches/74054/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1594794114-16313-10-git-send-email-viacheslavo@mellanox.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1594794114-16313-10-git-send-email-viacheslavo@mellanox.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1594794114-16313-10-git-send-email-viacheslavo@mellanox.com",
    "date": "2020-07-15T06:21:46",
    "name": "[v2,09/17] net/mlx5: introduce clock queue service routine",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "acda811dd0ca1988e46de1060b3590392402cc28",
    "submitter": {
        "id": 1102,
        "url": "http://patches.dpdk.org/api/people/1102/?format=api",
        "name": "Slava Ovsiienko",
        "email": "viacheslavo@mellanox.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1594794114-16313-10-git-send-email-viacheslavo@mellanox.com/mbox/",
    "series": [
        {
            "id": 11032,
            "url": "http://patches.dpdk.org/api/series/11032/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=11032",
            "date": "2020-07-15T06:21:37",
            "name": "net/mlx5: introduce accurate packet Tx scheduling",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/11032/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/74054/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/74054/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 7A82FA0540;\n\tWed, 15 Jul 2020 08:24:07 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 1E81B1C1AF;\n\tWed, 15 Jul 2020 08:22:28 +0200 (CEST)",
            "from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129])\n by dpdk.org (Postfix) with ESMTP id 8D4C81C121\n for <dev@dpdk.org>; Wed, 15 Jul 2020 08:22:17 +0200 (CEST)",
            "from Internal Mail-Server by MTLPINE1 (envelope-from\n viacheslavo@mellanox.com) with SMTP; 15 Jul 2020 09:22:14 +0300",
            "from pegasus12.mtr.labs.mlnx (pegasus12.mtr.labs.mlnx\n [10.210.17.40])\n by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 06F6MDku007047;\n Wed, 15 Jul 2020 09:22:13 +0300",
            "from pegasus12.mtr.labs.mlnx (localhost [127.0.0.1])\n by pegasus12.mtr.labs.mlnx (8.14.7/8.14.7) with ESMTP id 06F6MDnm016446;\n Wed, 15 Jul 2020 06:22:13 GMT",
            "(from viacheslavo@localhost)\n by pegasus12.mtr.labs.mlnx (8.14.7/8.14.7/Submit) id 06F6MDsj016444;\n Wed, 15 Jul 2020 06:22:13 GMT"
        ],
        "X-Authentication-Warning": "pegasus12.mtr.labs.mlnx: viacheslavo set sender to\n viacheslavo@mellanox.com using -f",
        "From": "Viacheslav Ovsiienko <viacheslavo@mellanox.com>",
        "To": "dev@dpdk.org",
        "Cc": "matan@mellanox.com, rasland@mellanox.com, olivier.matz@6wind.com,\n thomas@monjalon.net, ferruh.yigit@intel.com",
        "Date": "Wed, 15 Jul 2020 06:21:46 +0000",
        "Message-Id": "<1594794114-16313-10-git-send-email-viacheslavo@mellanox.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1594794114-16313-1-git-send-email-viacheslavo@mellanox.com>",
        "References": "<1591771085-24959-1-git-send-email-viacheslavo@mellanox.com>\n <1594794114-16313-1-git-send-email-viacheslavo@mellanox.com>",
        "Subject": "[dpdk-dev] [PATCH v2 09/17] net/mlx5: introduce clock queue service\n\troutine",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Service routine is invoked periodically on Rearm Queue\ncompletion interrupts, typically once per some milliseconds\n(1-16) to track clock jitter and wander in robust fashion.\nIt performs the following:\n\n- fetches the completed CQEs for Rearm Queue\n- restarts Rearm Queue on errors\n- pushes new requests to Rearm Queue to make it\n  continuously running and pushing cross-channel requests\n  to Clock Queue\n- reads and caches the Clock Queue CQE to be used in datapath\n- gathers statistics to estimate clock jitter and wander\n- gathers Clock Queue errors statistics\n\nSigned-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>\nAcked-by: Matan Azrad <matan@mellanox.com>\n---\n drivers/net/mlx5/mlx5.h      |  15 ++\n drivers/net/mlx5/mlx5_defs.h |   1 +\n drivers/net/mlx5/mlx5_rxtx.h |  20 +++\n drivers/net/mlx5/mlx5_txpp.c | 336 +++++++++++++++++++++++++++++++++++++++++++\n 4 files changed, 372 insertions(+)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex e8a7b10..e599bbb 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -555,6 +555,12 @@ struct mlx5_txpp_wq {\n \tvolatile uint32_t *sq_dbrec;\n };\n \n+/* Tx packet pacing internal timestamp. */\n+struct mlx5_txpp_ts {\n+\trte_atomic64_t ci_ts;\n+\trte_atomic64_t ts;\n+};\n+\n /* Tx packet pacing structure. */\n struct mlx5_dev_txpp {\n \tpthread_mutex_t mutex; /* Pacing create/destroy mutex. */\n@@ -570,6 +576,15 @@ struct mlx5_dev_txpp {\n \tstruct mlx5_txpp_wq rearm_queue; /* Clock Queue. */\n \tstruct mlx5dv_pp *pp; /* Packet pacing context. */\n \tuint16_t pp_id; /* Packet pacing context index. */\n+\tuint16_t ts_n; /* Number of captured timestamps. */\n+\tuint16_t ts_p; /* Pointer to statisticks timestamp. */\n+\tstruct mlx5_txpp_ts *tsa; /* Timestamps sliding window stats. */\n+\tstruct mlx5_txpp_ts ts; /* Cached completion id/timestamp. */\n+\tuint32_t sync_lost:1; /* ci/timestamp synchronization lost. */\n+\t/* Statistics counters. */\n+\trte_atomic32_t err_miss_int; /* Missed service interrupt. */\n+\trte_atomic32_t err_rearm_queue; /* Rearm Queue errors. */\n+\trte_atomic32_t err_clock_queue; /* Clock Queue errors. */\n };\n \n /*\ndiff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h\nindex a8626a4..7ed3e88 100644\n--- a/drivers/net/mlx5/mlx5_defs.h\n+++ b/drivers/net/mlx5/mlx5_defs.h\n@@ -172,6 +172,7 @@\n #define MLX5_TXDB_HEURISTIC 2\n \n /* Tx accurate scheduling on timestamps parameters. */\n+#define MLX5_TXPP_WAIT_INIT_TS 1000ul /* How long to wait timestamp. */\n #define MLX5_TXPP_CLKQ_SIZE 1\n #define MLX5_TXPP_REARM\t((1UL << MLX5_WQ_INDEX_WIDTH) / 4)\n #define MLX5_TXPP_REARM_SQ_SIZE (((1UL << MLX5_CQ_INDEX_WIDTH) / \\\ndiff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h\nindex 1b797da..8a8d2b5 100644\n--- a/drivers/net/mlx5/mlx5_rxtx.h\n+++ b/drivers/net/mlx5/mlx5_rxtx.h\n@@ -30,6 +30,7 @@\n #include <rte_io.h>\n #include <rte_bus_pci.h>\n #include <rte_malloc.h>\n+#include <rte_cycles.h>\n \n #include <mlx5_glue.h>\n #include <mlx5_prm.h>\n@@ -695,4 +696,23 @@ int mlx5_dma_unmap(struct rte_pci_device *pdev, void *addr, uint64_t iova,\n \tmlx5_tx_dbrec_cond_wmb(txq, wqe, 1);\n }\n \n+/**\n+ * Convert timestamp from HW format to linear counter\n+ * from Packet Pacing Clock Queue CQE timestamp format.\n+ *\n+ * @param sh\n+ *   Pointer to the device shared context. Might be needed\n+ *   to convert according current device configuration.\n+ * @param ts\n+ *   Timestamp from CQE to convert.\n+ * @return\n+ *   UTC in nanoseconds\n+ */\n+static __rte_always_inline uint64_t\n+mlx5_txpp_convert_rx_ts(struct mlx5_dev_ctx_shared *sh, uint64_t ts)\n+{\n+\tRTE_SET_USED(sh);\n+\treturn (ts & UINT32_MAX) + (ts >> 32) * NS_PER_S;\n+}\n+\n #endif /* RTE_PMD_MLX5_RXTX_H_ */\ndiff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c\nindex a0ee872..17d64ff 100644\n--- a/drivers/net/mlx5/mlx5_txpp.c\n+++ b/drivers/net/mlx5/mlx5_txpp.c\n@@ -1,6 +1,9 @@\n /* SPDX-License-Identifier: BSD-3-Clause\n  * Copyright 2020 Mellanox Technologies, Ltd\n  */\n+#include <fcntl.h>\n+#include <stdint.h>\n+\n #include <rte_ether.h>\n #include <rte_ethdev_driver.h>\n #include <rte_interrupts.h>\n@@ -144,6 +147,33 @@\n \tstruct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;\n \n \tmlx5_txpp_destroy_send_queue(wq);\n+\tif (sh->txpp.tsa) {\n+\t\trte_free(sh->txpp.tsa);\n+\t\tsh->txpp.tsa = NULL;\n+\t}\n+}\n+\n+static void\n+mlx5_txpp_doorbell_rearm_queue(struct mlx5_dev_ctx_shared *sh, uint16_t ci)\n+{\n+\tstruct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;\n+\tunion {\n+\t\tuint32_t w32[2];\n+\t\tuint64_t w64;\n+\t} cs;\n+\n+\twq->sq_ci = ci + 1;\n+\tcs.w32[0] = rte_cpu_to_be_32(rte_be_to_cpu_32\n+\t\t   (wq->wqes[ci & (wq->sq_size - 1)].ctrl[0]) | (ci - 1) << 8);\n+\tcs.w32[1] = wq->wqes[ci & (wq->sq_size - 1)].ctrl[1];\n+\t/* Update SQ doorbell record with new SQ ci. */\n+\trte_compiler_barrier();\n+\t*wq->sq_dbrec = rte_cpu_to_be_32(wq->sq_ci);\n+\t/* Make sure the doorbell record is updated. */\n+\trte_wmb();\n+\t/* Write to doorbel register to start processing. */\n+\t__mlx5_uar_write64_relaxed(cs.w64, sh->tx_uar->reg_addr, NULL);\n+\trte_wmb();\n }\n \n static void\n@@ -433,6 +463,16 @@\n \tuint32_t umem_size, umem_dbrec;\n \tint ret;\n \n+\tsh->txpp.tsa = rte_zmalloc_socket(__func__,\n+\t\t\t\t\t   MLX5_TXPP_REARM_SQ_SIZE *\n+\t\t\t\t\t   sizeof(struct mlx5_txpp_ts),\n+\t\t\t\t\t   0, sh->numa_node);\n+\tif (!sh->txpp.tsa) {\n+\t\tDRV_LOG(ERR, \"Failed to allocate memory for CQ stats.\");\n+\t\treturn -ENOMEM;\n+\t}\n+\tsh->txpp.ts_p = 0;\n+\tsh->txpp.ts_n = 0;\n \t/* Allocate memory buffer for CQEs and doorbell record. */\n \tumem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_CLKQ_SIZE;\n \tumem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);\n@@ -562,6 +602,297 @@\n \treturn ret;\n }\n \n+/* Enable notification from the Rearm Queue CQ. */\n+static inline void\n+mlx5_txpp_cq_arm(struct mlx5_dev_ctx_shared *sh)\n+{\n+\tstruct mlx5_txpp_wq *aq = &sh->txpp.rearm_queue;\n+\tuint32_t arm_sn = aq->arm_sn << MLX5_CQ_SQN_OFFSET;\n+\tuint32_t db_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | aq->cq_ci;\n+\tuint64_t db_be = rte_cpu_to_be_64(((uint64_t)db_hi << 32) | aq->cq->id);\n+\tuint32_t *addr = RTE_PTR_ADD(sh->tx_uar->base_addr, MLX5_CQ_DOORBELL);\n+\n+\trte_compiler_barrier();\n+\taq->cq_dbrec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(db_hi);\n+\trte_wmb();\n+#ifdef RTE_ARCH_64\n+\t*(uint64_t *)addr = db_be;\n+#else\n+\t*(uint32_t *)addr = db_be;\n+\trte_io_wmb();\n+\t*((uint32_t *)addr + 1) = db_be >> 32;\n+#endif\n+\taq->arm_sn++;\n+}\n+\n+static inline void\n+mlx5_atomic_read_cqe(rte_int128_t *from, rte_int128_t *ts)\n+{\n+\t/*\n+\t * The only CQE of Clock Queue is being continuously\n+\t * update by hardware with soecified rate. We have to\n+\t * read timestump and WQE completion index atomically.\n+\t */\n+#ifdef RTE_ARCH_PPC_64\n+\trte_atomic64_t *cqe = (rte_atomic64_t *)from;\n+\n+\t/* Power architecture does not support 16B compare-and-swap. */\n+\tfor (;;) {\n+\t\tint64_t tm, op;\n+\t\tint64_t *ps;\n+\n+\t\trte_compiler_barrier();\n+\t\ttm = rte_atomic64_read(cqe + 0);\n+\t\top = rte_atomic64_read(cqe + 1);\n+\t\trte_compiler_barrier();\n+\t\tif (tm != rte_atomic64_read(cqe + 0))\n+\t\t\tcontinue;\n+\t\tif (op != rte_atomic64_read(cqe + 1))\n+\t\t\tcontinue;\n+\t\tps = (int64_t *)ts;\n+\t\tps[0] = tm;\n+\t\tps[1] = op;\n+\t\treturn;\n+\t}\n+#else\n+\trte_int128_t src;\n+\n+\tmemset(&src, 0, sizeof(src));\n+\t*ts = src;\n+\t/* if (*from == *ts) *from = *src else *ts = *from; */\n+\trte_atomic128_cmp_exchange(from, ts, &src, 0,\n+\t\t\t\t   __ATOMIC_RELAXED, __ATOMIC_RELAXED);\n+#endif\n+}\n+\n+/* Stores timestamp in the cache structure to share data with datapath. */\n+static inline void\n+mlx5_txpp_cache_timestamp(struct mlx5_dev_ctx_shared *sh,\n+\t\t\t   uint64_t ts, uint64_t ci)\n+{\n+\tci = ci << (64 - MLX5_CQ_INDEX_WIDTH);\n+\tci |= (ts << MLX5_CQ_INDEX_WIDTH) >> MLX5_CQ_INDEX_WIDTH;\n+\trte_compiler_barrier();\n+\trte_atomic64_set(&sh->txpp.ts.ts, ts);\n+\trte_atomic64_set(&sh->txpp.ts.ci_ts, ci);\n+\trte_wmb();\n+}\n+\n+/* Reads timestamp from Clock Queue CQE and stores in the cache. */\n+static inline void\n+mlx5_txpp_update_timestamp(struct mlx5_dev_ctx_shared *sh)\n+{\n+\tstruct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;\n+\tstruct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes;\n+\tunion {\n+\t\trte_int128_t u128;\n+\t\tstruct mlx5_cqe_ts cts;\n+\t} to;\n+\tuint64_t ts;\n+\tuint16_t ci;\n+\n+\tstatic_assert(sizeof(struct mlx5_cqe_ts) == sizeof(rte_int128_t),\n+\t\t      \"Wrong timestamp CQE part size\");\n+\tmlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);\n+\tif (to.cts.op_own >> 4) {\n+\t\tDRV_LOG(DEBUG, \"Clock Queue error sync lost.\");\n+\t\trte_atomic32_inc(&sh->txpp.err_clock_queue);\n+\t\tsh->txpp.sync_lost = 1;\n+\t\treturn;\n+\t}\n+\tci = rte_be_to_cpu_16(to.cts.wqe_counter);\n+\tts = rte_be_to_cpu_64(to.cts.timestamp);\n+\tts = mlx5_txpp_convert_rx_ts(sh, ts);\n+\twq->cq_ci += (ci - wq->sq_ci) & UINT16_MAX;\n+\twq->sq_ci = ci;\n+\tmlx5_txpp_cache_timestamp(sh, ts, wq->cq_ci);\n+}\n+\n+/* Gather statistics for timestamp from Clock Queue CQE. */\n+static inline void\n+mlx5_txpp_gather_timestamp(struct mlx5_dev_ctx_shared *sh)\n+{\n+\t/* Check whether we have a valid timestamp. */\n+\tif (!sh->txpp.clock_queue.sq_ci && !sh->txpp.ts_n)\n+\t\treturn;\n+\tMLX5_ASSERT(sh->txpp.ts_p < MLX5_TXPP_REARM_SQ_SIZE);\n+\tsh->txpp.tsa[sh->txpp.ts_p] = sh->txpp.ts;\n+\tif (++sh->txpp.ts_p >= MLX5_TXPP_REARM_SQ_SIZE)\n+\t\tsh->txpp.ts_p = 0;\n+\tif (sh->txpp.ts_n < MLX5_TXPP_REARM_SQ_SIZE)\n+\t\t++sh->txpp.ts_n;\n+}\n+\n+/* Waits for the first completion on Clock Queue to init timestamp. */\n+static inline void\n+mlx5_txpp_init_timestamp(struct mlx5_dev_ctx_shared *sh)\n+{\n+\tstruct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;\n+\tuint32_t wait;\n+\n+\tsh->txpp.ts_p = 0;\n+\tsh->txpp.ts_n = 0;\n+\tfor (wait = 0; wait < MLX5_TXPP_WAIT_INIT_TS; wait++) {\n+\t\tstruct timespec onems;\n+\n+\t\tmlx5_txpp_update_timestamp(sh);\n+\t\tif (wq->sq_ci)\n+\t\t\treturn;\n+\t\t/* Wait one millisecond and try again. */\n+\t\tonems.tv_sec = 0;\n+\t\tonems.tv_nsec = NS_PER_S / MS_PER_S;\n+\t\tnanosleep(&onems, 0);\n+\t}\n+\tDRV_LOG(ERR, \"Unable to initialize timestamp.\");\n+\tsh->txpp.sync_lost = 1;\n+}\n+\n+/* Handles Rearm Queue completions in periodic service. */\n+static __rte_always_inline void\n+mlx5_txpp_handle_rearm_queue(struct mlx5_dev_ctx_shared *sh)\n+{\n+\tstruct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;\n+\tuint32_t cq_ci = wq->cq_ci;\n+\tbool error = false;\n+\tint ret;\n+\n+\tdo {\n+\t\tvolatile struct mlx5_cqe *cqe;\n+\n+\t\tcqe = &wq->cqes[cq_ci & (MLX5_TXPP_REARM_CQ_SIZE - 1)];\n+\t\tret = check_cqe(cqe, MLX5_TXPP_REARM_CQ_SIZE, cq_ci);\n+\t\tswitch (ret) {\n+\t\tcase MLX5_CQE_STATUS_ERR:\n+\t\t\terror = true;\n+\t\t\t++cq_ci;\n+\t\t\tbreak;\n+\t\tcase MLX5_CQE_STATUS_SW_OWN:\n+\t\t\twq->sq_ci += 2;\n+\t\t\t++cq_ci;\n+\t\t\tbreak;\n+\t\tcase MLX5_CQE_STATUS_HW_OWN:\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tMLX5_ASSERT(false);\n+\t\t\tbreak;\n+\t\t}\n+\t} while (ret != MLX5_CQE_STATUS_HW_OWN);\n+\tif (likely(cq_ci != wq->cq_ci)) {\n+\t\t/* Check whether we have missed interrupts. */\n+\t\tif (cq_ci - wq->cq_ci != 1) {\n+\t\t\tDRV_LOG(DEBUG, \"Rearm Queue missed interrupt.\");\n+\t\t\trte_atomic32_inc(&sh->txpp.err_miss_int);\n+\t\t\t/* Check sync lost on wqe index. */\n+\t\t\tif (cq_ci - wq->cq_ci >=\n+\t\t\t\t(((1UL << MLX5_WQ_INDEX_WIDTH) /\n+\t\t\t\t  MLX5_TXPP_REARM) - 1))\n+\t\t\t\terror = 1;\n+\t\t}\n+\t\t/* Update doorbell record to notify hardware. */\n+\t\trte_compiler_barrier();\n+\t\t*wq->cq_dbrec = rte_cpu_to_be_32(cq_ci);\n+\t\trte_wmb();\n+\t\twq->cq_ci = cq_ci;\n+\t\t/* Fire new requests to Rearm Queue. */\n+\t\tif (error) {\n+\t\t\tDRV_LOG(DEBUG, \"Rearm Queue error sync lost.\");\n+\t\t\trte_atomic32_inc(&sh->txpp.err_rearm_queue);\n+\t\t\tsh->txpp.sync_lost = 1;\n+\t\t}\n+\t}\n+}\n+\n+/* Handles Clock Queue completions in periodic service. */\n+static __rte_always_inline void\n+mlx5_txpp_handle_clock_queue(struct mlx5_dev_ctx_shared *sh)\n+{\n+\tmlx5_txpp_update_timestamp(sh);\n+\tmlx5_txpp_gather_timestamp(sh);\n+}\n+\n+/* Invoked periodically on Rearm Queue completions. */\n+static void\n+mlx5_txpp_interrupt_handler(void *cb_arg)\n+{\n+#ifndef HAVE_IBV_DEVX_EVENT\n+\tRTE_SET_USED(cb_arg);\n+\treturn;\n+#else\n+\tstruct mlx5_dev_ctx_shared *sh = cb_arg;\n+\tunion {\n+\t\tstruct mlx5dv_devx_async_event_hdr event_resp;\n+\t\tuint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];\n+\t} out;\n+\n+\tMLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);\n+\t/* Process events in the loop. Only rearm completions are expected. */\n+\twhile (mlx5_glue->devx_get_event\n+\t\t\t\t(sh->txpp.echan,\n+\t\t\t\t &out.event_resp,\n+\t\t\t\t sizeof(out.buf)) >=\n+\t\t\t\t (ssize_t)sizeof(out.event_resp.cookie)) {\n+\t\tmlx5_txpp_handle_rearm_queue(sh);\n+\t\tmlx5_txpp_handle_clock_queue(sh);\n+\t\tmlx5_txpp_cq_arm(sh);\n+\t\tmlx5_txpp_doorbell_rearm_queue\n+\t\t\t\t\t(sh, sh->txpp.rearm_queue.sq_ci - 1);\n+\t}\n+#endif /* HAVE_IBV_DEVX_ASYNC */\n+}\n+\n+static void\n+mlx5_txpp_stop_service(struct mlx5_dev_ctx_shared *sh)\n+{\n+\tif (!sh->txpp.intr_handle.fd)\n+\t\treturn;\n+\tmlx5_intr_callback_unregister(&sh->txpp.intr_handle,\n+\t\t\t\t      mlx5_txpp_interrupt_handler, sh);\n+\tsh->txpp.intr_handle.fd = 0;\n+}\n+\n+/* Attach interrupt handler and fires first request to Rearm Queue. */\n+static int\n+mlx5_txpp_start_service(struct mlx5_dev_ctx_shared *sh)\n+{\n+\tuint16_t event_nums[1] = {0};\n+\tint flags;\n+\tint ret;\n+\n+\t/* Attach interrupt handler to process Rearm Queue completions. */\n+\tflags = fcntl(sh->txpp.echan->fd, F_GETFL);\n+\tret = fcntl(sh->txpp.echan->fd, F_SETFL, flags | O_NONBLOCK);\n+\tif (ret) {\n+\t\tDRV_LOG(ERR, \"Failed to change event channel FD.\");\n+\t\trte_errno = errno;\n+\t\treturn -rte_errno;\n+\t}\n+\tmemset(&sh->txpp.intr_handle, 0, sizeof(sh->txpp.intr_handle));\n+\tsh->txpp.intr_handle.fd = sh->txpp.echan->fd;\n+\tsh->txpp.intr_handle.type = RTE_INTR_HANDLE_EXT;\n+\tif (rte_intr_callback_register(&sh->txpp.intr_handle,\n+\t\t\t\t       mlx5_txpp_interrupt_handler, sh)) {\n+\t\tsh->txpp.intr_handle.fd = 0;\n+\t\tDRV_LOG(ERR, \"Failed to register CQE interrupt %d.\", rte_errno);\n+\t\treturn -rte_errno;\n+\t}\n+\t/* Subscribe CQ event to the event channel controlled by the driver. */\n+\tret = mlx5_glue->devx_subscribe_devx_event(sh->txpp.echan,\n+\t\t\t\t\t\t   sh->txpp.rearm_queue.cq->obj,\n+\t\t\t\t\t\t   sizeof(event_nums),\n+\t\t\t\t\t\t   event_nums, 0);\n+\tif (ret) {\n+\t\tDRV_LOG(ERR, \"Failed to subscribe CQE event.\");\n+\t\trte_errno = errno;\n+\t\treturn -errno;\n+\t}\n+\t/* Enable interrupts in the CQ. */\n+\tmlx5_txpp_cq_arm(sh);\n+\t/* Fire the first request on Rearm Queue. */\n+\tmlx5_txpp_doorbell_rearm_queue(sh, sh->txpp.rearm_queue.sq_size - 1);\n+\tmlx5_txpp_init_timestamp(sh);\n+\treturn 0;\n+}\n+\n /*\n  * The routine initializes the packet pacing infrastructure:\n  * - allocates PP context\n@@ -595,8 +926,12 @@\n \tret = mlx5_txpp_create_rearm_queue(sh);\n \tif (ret)\n \t\tgoto exit;\n+\tret = mlx5_txpp_start_service(sh);\n+\tif (ret)\n+\t\tgoto exit;\n exit:\n \tif (ret) {\n+\t\tmlx5_txpp_stop_service(sh);\n \t\tmlx5_txpp_destroy_rearm_queue(sh);\n \t\tmlx5_txpp_destroy_clock_queue(sh);\n \t\tmlx5_txpp_free_pp_index(sh);\n@@ -618,6 +953,7 @@\n static void\n mlx5_txpp_destroy(struct mlx5_dev_ctx_shared *sh)\n {\n+\tmlx5_txpp_stop_service(sh);\n \tmlx5_txpp_destroy_rearm_queue(sh);\n \tmlx5_txpp_destroy_clock_queue(sh);\n \tmlx5_txpp_free_pp_index(sh);\n",
    "prefixes": [
        "v2",
        "09/17"
    ]
}