From patchwork Fri Jul 10 09:48:40 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Slava Ovsiienko X-Patchwork-Id: 73719 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id DF0B5A052A; Fri, 10 Jul 2020 11:49:48 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 43ECA1DDFE; Fri, 10 Jul 2020 11:49:09 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id A85D31DD86 for ; Fri, 10 Jul 2020 11:49:00 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from viacheslavo@mellanox.com) with SMTP; 10 Jul 2020 12:48:59 +0300 Received: from pegasus12.mtr.labs.mlnx (pegasus12.mtr.labs.mlnx [10.210.17.40]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 06A9mx7c012270; Fri, 10 Jul 2020 12:48:59 +0300 Received: from pegasus12.mtr.labs.mlnx (localhost [127.0.0.1]) by pegasus12.mtr.labs.mlnx (8.14.7/8.14.7) with ESMTP id 06A9mxvv024755; Fri, 10 Jul 2020 09:48:59 GMT Received: (from viacheslavo@localhost) by pegasus12.mtr.labs.mlnx (8.14.7/8.14.7/Submit) id 06A9mwJt024754; Fri, 10 Jul 2020 09:48:58 GMT X-Authentication-Warning: pegasus12.mtr.labs.mlnx: viacheslavo set sender to viacheslavo@mellanox.com using -f From: Viacheslav Ovsiienko To: dev@dpdk.org Cc: matan@mellanox.com, rasland@mellanox.com, thomas@monjalon.net, ferruh.yigit@intel.com Date: Fri, 10 Jul 2020 09:48:40 +0000 Message-Id: <1594374530-24659-6-git-send-email-viacheslavo@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1594374530-24659-1-git-send-email-viacheslavo@mellanox.com> References: <1594374530-24659-1-git-send-email-viacheslavo@mellanox.com> Subject: [dpdk-dev] [PATCH v1 06/16] net/mlx5: create rearm queue for packet pacing X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The dedicated Rearm Queue is needed to fire the work requests to the Clock Queue in realtime. The Clock Queue should never stop, otherwise the clock synchronization mignt be broken and packet send scheduling would fail. The Rearm Queue uses cross channel SEND_EN/WAIT operations to provides the requests to the CLock Queue in robust way. Signed-off-by: Viacheslav Ovsiienko --- drivers/net/mlx5/mlx5.h | 1 + drivers/net/mlx5/mlx5_defs.h | 5 +- drivers/net/mlx5/mlx5_txpp.c | 203 ++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 205 insertions(+), 4 deletions(-) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index be28d80..a1956cc 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -567,6 +567,7 @@ struct mlx5_dev_txpp { struct rte_intr_handle intr_handle; /* Periodic interrupt. */ struct mlx5dv_devx_event_channel *echan; /* Event Channel. */ struct mlx5_txpp_wq clock_queue; /* Clock Queue. */ + struct mlx5_txpp_wq rearm_queue; /* Clock Queue. */ }; /* diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h index fff11af..35f02cb 100644 --- a/drivers/net/mlx5/mlx5_defs.h +++ b/drivers/net/mlx5/mlx5_defs.h @@ -173,11 +173,14 @@ /* Tx accurate scheduling on timestamps parameters. */ #define MLX5_TXPP_CLKQ_SIZE 1 +#define MLX5_TXPP_REARM ((1UL << MLX5_WQ_INDEX_WIDTH) / 4) +#define MLX5_TXPP_REARM_SQ_SIZE (((1UL << MLX5_CQ_INDEX_WIDTH) / \ + MLX5_TXPP_REARM) * 2) +#define MLX5_TXPP_REARM_CQ_SIZE (MLX5_TXPP_REARM_SQ_SIZE / 2) /* The minimal size test packet to put into one WQE, padded by HW. */ #define MLX5_TXPP_TEST_PKT_SIZE (sizeof(struct rte_ether_hdr) + \ sizeof(struct rte_ipv4_hdr)) - /* Size of the simple hash table for metadata register table. */ #define MLX5_FLOW_MREG_HTABLE_SZ 4096 #define MLX5_FLOW_MREG_HNAME "MARK_COPY_TABLE" diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c index 7f8a6c4..34ac493 100644 --- a/drivers/net/mlx5/mlx5_txpp.c +++ b/drivers/net/mlx5/mlx5_txpp.c @@ -9,6 +9,7 @@ #include "mlx5.h" #include "mlx5_rxtx.h" +#include "mlx5_common_os.h" /* Destroy Event Queue Notification Channel. */ static void @@ -48,10 +49,8 @@ } static void -mlx5_txpp_destroy_clock_queue(struct mlx5_dev_ctx_shared *sh) +mlx5_txpp_destroy_send_queue(struct mlx5_txpp_wq *wq) { - struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue; - if (wq->sq) claim_zero(mlx5_devx_cmd_destroy(wq->sq)); if (wq->sq_umem) @@ -68,6 +67,199 @@ } static void +mlx5_txpp_destroy_rearm_queue(struct mlx5_dev_ctx_shared *sh) +{ + struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue; + + mlx5_txpp_destroy_send_queue(wq); +} + +static void +mlx5_txpp_destroy_clock_queue(struct mlx5_dev_ctx_shared *sh) +{ + struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue; + + mlx5_txpp_destroy_send_queue(wq); +} + +static void +mlx5_txpp_fill_cqe_rearm_queue(struct mlx5_dev_ctx_shared *sh) +{ + struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue; + struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes; + uint32_t i; + + for (i = 0; i < MLX5_TXPP_REARM_CQ_SIZE; i++) { + cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK; + ++cqe; + } +} + +static void +mlx5_txpp_fill_wqe_rearm_queue(struct mlx5_dev_ctx_shared *sh) +{ + struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue; + struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->wqes; + uint32_t i; + + for (i = 0; i < wq->sq_size; i += 2) { + struct mlx5_wqe_cseg *cs; + struct mlx5_wqe_qseg *qs; + uint32_t index; + + /* Build SEND_EN request with slave WQE index. */ + cs = &wqe[i + 0].cseg; + cs->opcode = RTE_BE32(MLX5_OPCODE_SEND_EN | 0); + cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) | 2); + cs->flags = RTE_BE32(MLX5_COMP_ALWAYS << + MLX5_COMP_MODE_OFFSET); + cs->misc = RTE_BE32(0); + qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg)); + index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM) & + ((1 << MLX5_WQ_INDEX_WIDTH) - 1); + qs->max_index = rte_cpu_to_be_32(index); + qs->qpn_cqn = rte_cpu_to_be_32(sh->txpp.clock_queue.sq->id); + /* Build WAIT request with slave CQE index. */ + cs = &wqe[i + 1].cseg; + cs->opcode = RTE_BE32(MLX5_OPCODE_WAIT | 0); + cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) | 2); + cs->flags = RTE_BE32(MLX5_COMP_ONLY_ERR << + MLX5_COMP_MODE_OFFSET); + cs->misc = RTE_BE32(0); + qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg)); + index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM / 2) & + ((1 << MLX5_CQ_INDEX_WIDTH) - 1); + qs->max_index = rte_cpu_to_be_32(index); + qs->qpn_cqn = rte_cpu_to_be_32(sh->txpp.clock_queue.cq->id); + } +} + +/* Creates the Rearm Queue to fire the requests to Clock Queue in realtime. */ +static int +mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh) +{ + struct mlx5_devx_create_sq_attr sq_attr = { 0 }; + struct mlx5_devx_modify_sq_attr msq_attr = { 0 }; + struct mlx5_devx_cq_attr cq_attr = { 0 }; + struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue; + size_t page_size = sysconf(_SC_PAGESIZE); + uint32_t umem_size, umem_dbrec; + int ret; + + /* Allocate memory buffer for CQEs and doorbell record. */ + umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_REARM_CQ_SIZE; + umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); + umem_size += MLX5_DBR_SIZE; + wq->cq_buf = rte_zmalloc_socket(__func__, umem_size, + page_size, sh->numa_node); + if (!wq->cq_buf) { + DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue."); + return -ENOMEM; + } + /* Register allocated buffer in user space with DevX. */ + wq->cq_umem = mlx5_glue->devx_umem_reg(sh->ctx, + (void *)(uintptr_t)wq->cq_buf, + umem_size, + IBV_ACCESS_LOCAL_WRITE); + if (!wq->cq_umem) { + rte_errno = errno; + DRV_LOG(ERR, "Failed to register umem for Rearm Queue."); + goto error; + } + /* Create completion queue object for Rearm Queue. */ + cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ? + MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B; + cq_attr.uar_page_id = sh->tx_uar->page_id; + cq_attr.eqn = sh->txpp.eqn; + cq_attr.q_umem_valid = 1; + cq_attr.q_umem_offset = 0; + cq_attr.q_umem_id = mlx5_os_get_umem_id(wq->cq_umem); + cq_attr.db_umem_valid = 1; + cq_attr.db_umem_offset = umem_dbrec; + cq_attr.db_umem_id = mlx5_os_get_umem_id(wq->cq_umem); + cq_attr.log_cq_size = rte_log2_u32(MLX5_TXPP_REARM_CQ_SIZE); + cq_attr.log_page_size = rte_log2_u32(page_size); + wq->cq = mlx5_devx_cmd_create_cq(sh->ctx, &cq_attr); + if (!wq->cq) { + rte_errno = errno; + DRV_LOG(ERR, "Failed to create CQ for Rearm Queue."); + goto error; + } + wq->cq_dbrec = RTE_PTR_ADD(wq->cq_buf, umem_dbrec); + wq->cq_ci = 0; + wq->arm_sn = 0; + /* Mark all CQEs initially as invalid. */ + mlx5_txpp_fill_cqe_rearm_queue(sh); + /* + * Allocate memory buffer for Send Queue WQEs. + * There should be no WQE leftovers in the cyclic queue. + */ + wq->sq_size = MLX5_TXPP_REARM_SQ_SIZE; + MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size))); + umem_size = MLX5_WQE_SIZE * wq->sq_size; + umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); + umem_size += MLX5_DBR_SIZE; + wq->sq_buf = rte_zmalloc_socket(__func__, umem_size, + page_size, sh->numa_node); + if (!wq->sq_buf) { + DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue."); + rte_errno = ENOMEM; + goto error; + } + /* Register allocated buffer in user space with DevX. */ + wq->sq_umem = mlx5_glue->devx_umem_reg(sh->ctx, + (void *)(uintptr_t)wq->sq_buf, + umem_size, + IBV_ACCESS_LOCAL_WRITE); + if (!wq->sq_umem) { + rte_errno = errno; + DRV_LOG(ERR, "Failed to register umem for Rearm Queue."); + goto error; + } + /* Create send queue object for Rearm Queue. */ + sq_attr.state = MLX5_SQC_STATE_RST; + sq_attr.tis_lst_sz = 1; + sq_attr.tis_num = sh->tis->id; + sq_attr.cqn = wq->cq->id; + sq_attr.cd_master = 1; + sq_attr.wq_attr.uar_page = sh->tx_uar->page_id; + sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; + sq_attr.wq_attr.pd = sh->pdn; + sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE); + sq_attr.wq_attr.log_wq_sz = rte_log2_u32(wq->sq_size); + sq_attr.wq_attr.dbr_umem_valid = 1; + sq_attr.wq_attr.dbr_addr = umem_dbrec; + sq_attr.wq_attr.dbr_umem_id = mlx5_os_get_umem_id(wq->sq_umem); + sq_attr.wq_attr.wq_umem_valid = 1; + sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(wq->sq_umem); + sq_attr.wq_attr.wq_umem_offset = 0; + wq->sq = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr); + if (!wq->sq) { + rte_errno = errno; + DRV_LOG(ERR, "Failed to create SQ for Rearm Queue."); + goto error; + } + wq->sq_dbrec = RTE_PTR_ADD(wq->sq_buf, umem_dbrec + + MLX5_SND_DBR * sizeof(uint32_t)); + /* Build the WQEs in the Send Queue before goto Ready state. */ + mlx5_txpp_fill_wqe_rearm_queue(sh); + /* Change queue state to ready. */ + msq_attr.sq_state = MLX5_SQC_STATE_RST; + msq_attr.state = MLX5_SQC_STATE_RDY; + ret = mlx5_devx_cmd_modify_sq(wq->sq, &msq_attr); + if (ret) { + DRV_LOG(ERR, "Failed to set SQ ready state Rearm Queue."); + goto error; + } + return 0; +error: + ret = -rte_errno; + mlx5_txpp_destroy_rearm_queue(sh); + rte_errno = -ret; + return ret; +} + +static void mlx5_txpp_fill_wqe_clock_queue(struct mlx5_dev_ctx_shared *sh) { struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue; @@ -331,8 +523,12 @@ ret = mlx5_txpp_create_clock_queue(sh); if (ret) goto exit; + ret = mlx5_txpp_create_rearm_queue(sh); + if (ret) + goto exit; exit: if (ret) { + mlx5_txpp_destroy_rearm_queue(sh); mlx5_txpp_destroy_clock_queue(sh); mlx5_txpp_destroy_eqn(sh); sh->txpp.tick = 0; @@ -352,6 +548,7 @@ static void mlx5_txpp_destroy(struct mlx5_dev_ctx_shared *sh) { + mlx5_txpp_destroy_rearm_queue(sh); mlx5_txpp_destroy_clock_queue(sh); mlx5_txpp_destroy_eqn(sh); sh->txpp.tick = 0;