From patchwork Sun Jul 21 14:24:55 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Slava Ovsiienko X-Patchwork-Id: 56806 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 13D121B959; Sun, 21 Jul 2019 16:25:49 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id E75134CA9 for ; Sun, 21 Jul 2019 16:25:39 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE2 (envelope-from viacheslavo@mellanox.com) with ESMTPS (AES256-SHA encrypted); 21 Jul 2019 17:25:36 +0300 Received: from pegasus12.mtr.labs.mlnx (pegasus12.mtr.labs.mlnx [10.210.17.40]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id x6LEPaXE006949; Sun, 21 Jul 2019 17:25:36 +0300 Received: from pegasus12.mtr.labs.mlnx (localhost [127.0.0.1]) by pegasus12.mtr.labs.mlnx (8.14.7/8.14.7) with ESMTP id x6LEPaqQ000745; Sun, 21 Jul 2019 14:25:36 GMT Received: (from viacheslavo@localhost) by pegasus12.mtr.labs.mlnx (8.14.7/8.14.7/Submit) id x6LEPawi000744; Sun, 21 Jul 2019 14:25:36 GMT X-Authentication-Warning: pegasus12.mtr.labs.mlnx: viacheslavo set sender to viacheslavo@mellanox.com using -f From: Viacheslav Ovsiienko To: dev@dpdk.org Cc: yskoh@mellanox.com Date: Sun, 21 Jul 2019 14:24:55 +0000 Message-Id: <1563719100-368-4-git-send-email-viacheslavo@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1563719100-368-1-git-send-email-viacheslavo@mellanox.com> References: <1563346400-1762-1-git-send-email-viacheslavo@mellanox.com> <1563719100-368-1-git-send-email-viacheslavo@mellanox.com> Subject: [dpdk-dev] [PATCH v4 3/8] net/mlx5: update Tx datapath definitions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch updates Tx datapath definitions, mostly hardware related. The Tx descriptor structures are redefined with required fields, size definitions are renamed to reflect the meanings in more appropriate way. This is a preparation step before introducing the new Tx datapath implementation. Signed-off-by: Viacheslav Ovsiienko Acked-by: Yongseok Koh --- drivers/net/mlx5/mlx5_defs.h | 2 +- drivers/net/mlx5/mlx5_prm.h | 164 +++++++++++++++++++++++++++++++++++++++---- 2 files changed, 152 insertions(+), 14 deletions(-) diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h index 6861304..873a595 100644 --- a/drivers/net/mlx5/mlx5_defs.h +++ b/drivers/net/mlx5/mlx5_defs.h @@ -58,7 +58,7 @@ #define MLX5_MAX_XSTATS 32 /* Maximum Packet headers size (L2+L3+L4) for TSO. */ -#define MLX5_MAX_TSO_HEADER 192 +#define MLX5_MAX_TSO_HEADER (128u + 34u) /* Threshold of buffer replenishment for vectorized Rx. */ #define MLX5_VPMD_RXQ_RPLNSH_THRESH(n) \ diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h index dfd9317..97abdb2 100644 --- a/drivers/net/mlx5/mlx5_prm.h +++ b/drivers/net/mlx5/mlx5_prm.h @@ -39,14 +39,85 @@ /* Invalidate a CQE. */ #define MLX5_CQE_INVALIDATE (MLX5_CQE_INVALID << 4) -/* WQE DWORD size */ -#define MLX5_WQE_DWORD_SIZE 16 - -/* WQE size */ -#define MLX5_WQE_SIZE (4 * MLX5_WQE_DWORD_SIZE) +/* WQE Segment sizes in bytes. */ +#define MLX5_WSEG_SIZE 16u +#define MLX5_WQE_CSEG_SIZE sizeof(struct mlx5_wqe_cseg) +#define MLX5_WQE_DSEG_SIZE sizeof(struct mlx5_wqe_dseg) +#define MLX5_WQE_ESEG_SIZE sizeof(struct mlx5_wqe_eseg) + +/* WQE/WQEBB size in bytes. */ +#define MLX5_WQE_SIZE sizeof(struct mlx5_wqe) + +/* + * Max size of a WQE session. + * Absolute maximum size is 63 (MLX5_DSEG_MAX) segments, + * the WQE size field in Control Segment is 6 bits wide. + */ +#define MLX5_WQE_SIZE_MAX (60 * MLX5_WSEG_SIZE) + +/* + * Default minimum number of Tx queues for inlining packets. + * If there are less queues as specified we assume we have + * no enough CPU resources (cycles) to perform inlining, + * the PCIe throughput is not supposed as bottleneck and + * inlining is disabled. + */ +#define MLX5_INLINE_MAX_TXQS 8u +#define MLX5_INLINE_MAX_TXQS_BLUEFIELD 16u + +/* + * Default packet length threshold to be inlined with + * enhanced MPW. If packet length exceeds the threshold + * the data are not inlined. Should be aligned in WQEBB + * boundary with accounting the title Control and Ethernet + * segments. + */ +#define MLX5_EMPW_DEF_INLINE_LEN (3U * MLX5_WQE_SIZE + \ + MLX5_DSEG_MIN_INLINE_SIZE - \ + MLX5_WQE_DSEG_SIZE) +/* + * Maximal inline data length sent with enhanced MPW. + * Is based on maximal WQE size. + */ +#define MLX5_EMPW_MAX_INLINE_LEN (MLX5_WQE_SIZE_MAX - \ + MLX5_WQE_CSEG_SIZE - \ + MLX5_WQE_ESEG_SIZE - \ + MLX5_WQE_DSEG_SIZE + \ + MLX5_DSEG_MIN_INLINE_SIZE) +/* + * Minimal amount of packets to be sent with EMPW. + * This limits the minimal required size of sent EMPW. + * If there are no enough resources to built minimal + * EMPW the sending loop exits. + */ +#define MLX5_EMPW_MIN_PACKETS (2 + 3 * 4) +#define MLX5_EMPW_MAX_PACKETS ((MLX5_WQE_SIZE_MAX - \ + MLX5_WQE_CSEG_SIZE - \ + MLX5_WQE_ESEG_SIZE) / \ + MLX5_WSEG_SIZE) +/* + * Default packet length threshold to be inlined with + * ordinary SEND. Inlining saves the MR key search + * and extra PCIe data fetch transaction, but eats the + * CPU cycles. + */ +#define MLX5_SEND_DEF_INLINE_LEN (5U * MLX5_WQE_SIZE + \ + MLX5_ESEG_MIN_INLINE_SIZE - \ + MLX5_WQE_CSEG_SIZE - \ + MLX5_WQE_ESEG_SIZE - \ + MLX5_WQE_DSEG_SIZE) +/* + * Maximal inline data length sent with ordinary SEND. + * Is based on maximal WQE size. + */ +#define MLX5_SEND_MAX_INLINE_LEN (MLX5_WQE_SIZE_MAX - \ + MLX5_WQE_CSEG_SIZE - \ + MLX5_WQE_ESEG_SIZE - \ + MLX5_WQE_DSEG_SIZE + \ + MLX5_ESEG_MIN_INLINE_SIZE) -#define MLX5_OPC_MOD_ENHANCED_MPSW 0 -#define MLX5_OPCODE_ENHANCED_MPSW 0x29 +/* Missed in mlv5dv.h, should define here. */ +#define MLX5_OPCODE_ENHANCED_MPSW 0x29u /* CQE value to inform that VLAN is stripped. */ #define MLX5_CQE_VLAN_STRIPPED (1u << 0) @@ -114,6 +185,12 @@ /* Inner L3 type is IPV6. */ #define MLX5_ETH_WQE_L3_INNER_IPV6 (1u << 0) +/* VLAN insertion flag. */ +#define MLX5_ETH_WQE_VLAN_INSERT (1u << 31) + +/* Data inline segment flag. */ +#define MLX5_ETH_WQE_DATA_INLINE (1u << 31) + /* Is flow mark valid. */ #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN #define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff00) @@ -130,12 +207,21 @@ /* Default mark value used when none is provided. */ #define MLX5_FLOW_MARK_DEFAULT 0xffffff -/* Maximum number of DS in WQE. */ +/* Maximum number of DS in WQE. Limited by 6-bit field. */ #define MLX5_DSEG_MAX 63 /* The completion mode offset in the WQE control segment line 2. */ #define MLX5_COMP_MODE_OFFSET 2 +/* Amount of data bytes in minimal inline data segment. */ +#define MLX5_DSEG_MIN_INLINE_SIZE 12u + +/* Amount of data bytes in minimal inline eth segment. */ +#define MLX5_ESEG_MIN_INLINE_SIZE 18u + +/* Amount of data bytes after eth data segment. */ +#define MLX5_ESEG_EXTRA_DATA_SIZE 32u + /* Completion mode. */ enum mlx5_completion_mode { MLX5_COMP_ONLY_ERR = 0x0, @@ -144,11 +230,6 @@ enum mlx5_completion_mode { MLX5_COMP_CQE_AND_EQE = 0x3, }; -/* Small common part of the WQE. */ -struct mlx5_wqe { - uint32_t ctrl[4]; -}; - /* MPW mode. */ enum mlx5_mpw_mode { MLX5_MPW_DISABLED, @@ -156,6 +237,63 @@ enum mlx5_mpw_mode { MLX5_MPW_ENHANCED, /* Enhanced Multi-Packet Send WQE, a.k.a MPWv2. */ }; +/* WQE Control segment. */ +struct mlx5_wqe_cseg { + uint32_t opcode; + uint32_t sq_ds; + uint32_t flags; + uint32_t misc; +} __rte_packed __rte_aligned(MLX5_WSEG_SIZE); + +/* Header of data segment. Minimal size Data Segment */ +struct mlx5_wqe_dseg { + uint32_t bcount; + union { + uint8_t inline_data[MLX5_DSEG_MIN_INLINE_SIZE]; + struct { + uint32_t lkey; + uint64_t pbuf; + } __rte_packed; + }; +} __rte_packed; + +/* Subset of struct WQE Ethernet Segment. */ +struct mlx5_wqe_eseg { + union { + struct { + uint32_t swp_offs; + uint8_t cs_flags; + uint8_t swp_flags; + uint16_t mss; + uint32_t metadata; + uint16_t inline_hdr_sz; + union { + uint16_t inline_data; + uint16_t vlan_tag; + }; + } __rte_packed; + struct { + uint32_t offsets; + uint32_t flags; + uint32_t flow_metadata; + uint32_t inline_hdr; + } __rte_packed; + }; +} __rte_packed; + +/* The title WQEBB, header of WQE. */ +struct mlx5_wqe { + union { + struct mlx5_wqe_cseg cseg; + uint32_t ctrl[4]; + }; + struct mlx5_wqe_eseg eseg; + union { + struct mlx5_wqe_dseg dseg[2]; + uint8_t data[MLX5_ESEG_EXTRA_DATA_SIZE]; + }; +} __rte_packed; + /* WQE for Multi-Packet RQ. */ struct mlx5_wqe_mprq { struct mlx5_wqe_srq_next_seg next_seg;