From patchwork Mon Oct 26 11:55:00 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Slava Ovsiienko X-Patchwork-Id: 82190 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8F700A04DC; Mon, 26 Oct 2020 12:55:57 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 871B52C2D; Mon, 26 Oct 2020 12:55:21 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id D135C2BD0 for ; Mon, 26 Oct 2020 12:55:16 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from viacheslavo@nvidia.com) with SMTP; 26 Oct 2020 13:55:10 +0200 Received: from nvidia.com (pegasus12.mtr.labs.mlnx [10.210.17.40]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09QBt8vU013951; Mon, 26 Oct 2020 13:55:09 +0200 From: Viacheslav Ovsiienko To: dev@dpdk.org Cc: thomas@monjalon.net, matan@nvidia.com, akozyrev@nvidia.com, rasland@nvidia.com, orika@nvidia.com Date: Mon, 26 Oct 2020 11:55:00 +0000 Message-Id: <1603713305-30991-2-git-send-email-viacheslavo@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603713305-30991-1-git-send-email-viacheslavo@nvidia.com> References: <1603381371-5360-2-git-send-email-viacheslavo@nvidia.com> <1603713305-30991-1-git-send-email-viacheslavo@nvidia.com> Subject: [dpdk-dev] [PATCH v4 1/6] net/mlx5: add extended Rx queue setup routine X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The routine to provide Rx queue setup with specifying extended receiving buffer description is added. It allows application to specify desired segment lengths, data position offsets in the buffer and dedicated memory pool for each segment. Signed-off-by: Viacheslav Ovsiienko Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5.h | 3 +++ drivers/net/mlx5/mlx5_rxq.c | 39 ++++++++++++++++++++++++++++++++++----- drivers/net/mlx5/mlx5_rxtx.h | 13 ++++++++++++- 3 files changed, 49 insertions(+), 6 deletions(-) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index bb954c4..258be03 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -164,6 +164,9 @@ struct mlx5_stats_ctrl { /* Maximal size of aggregated LRO packet. */ #define MLX5_MAX_LRO_SIZE (UINT8_MAX * MLX5_LRO_SEG_CHUNK_SIZE) +/* Maximal number of segments to split. */ +#define MLX5_MAX_RXQ_NSEG (1u << MLX5_MAX_LOG_RQ_SEGS) + /* LRO configurations structure. */ struct mlx5_lro_config { uint32_t supported:1; /* Whether LRO is supported. */ diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 0176ece..72d76c1 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -744,12 +744,40 @@ struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); + struct rte_eth_rxseg_split *rx_seg = + (struct rte_eth_rxseg_split *)conf->rx_seg; + struct rte_eth_rxseg_split rx_single = {.mp = mp}; + uint16_t n_seg = conf->rx_nseg; int res; + if (mp) { + /* + * The parameters should be checked on rte_eth_dev layer. + * If mp is specified it means the compatible configuration + * without buffer split feature tuning. + */ + rx_seg = &rx_single; + n_seg = 1; + } + if (n_seg > 1) { + uint64_t offloads = conf->offloads | + dev->data->dev_conf.rxmode.offloads; + + /* The offloads should be checked on rte_eth_dev layer. */ + MLX5_ASSERT(offloads & DEV_RX_OFFLOAD_SCATTER); + if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { + DRV_LOG(ERR, "port %u queue index %u split " + "offload not configured", + dev->data->port_id, idx); + rte_errno = ENOSPC; + return -rte_errno; + } + MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG); + } res = mlx5_rx_queue_pre_setup(dev, idx, &desc); if (res) return res; - rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp); + rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg, n_seg); if (!rxq_ctrl) { DRV_LOG(ERR, "port %u unable to allocate queue index %u", dev->data->port_id, idx); @@ -1342,11 +1370,11 @@ struct mlx5_rxq_ctrl * mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_rxconf *conf, - struct rte_mempool *mp) + const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *tmpl; - unsigned int mb_len = rte_pktmbuf_data_room_size(mp); + unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp); struct mlx5_dev_config *config = &priv->config; uint64_t offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; @@ -1358,7 +1386,8 @@ struct mlx5_rxq_ctrl * RTE_PKTMBUF_HEADROOM; unsigned int max_lro_size = 0; unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM; - const int mprq_en = mlx5_check_mprq_support(dev) > 0; + const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1 && + !rx_seg[0].offset && !rx_seg[0].length; unsigned int mprq_stride_nums = config->mprq.stride_num_n ? config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N; unsigned int mprq_stride_size = non_scatter_min_mbuf_size <= @@ -1544,7 +1573,7 @@ struct mlx5_rxq_ctrl * (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS)); tmpl->rxq.port_id = dev->data->port_id; tmpl->priv = priv; - tmpl->rxq.mp = mp; + tmpl->rxq.mp = rx_seg[0].mp; tmpl->rxq.elts_n = log2above(desc); tmpl->rxq.rq_repl_thresh = MLX5_VPMD_RXQ_RPLNSH_THRESH(desc_n); diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 1b35a26..f204f7e 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -101,6 +101,13 @@ enum mlx5_rqx_code { MLX5_RXQ_CODE_DROPPED, }; +struct mlx5_eth_rxseg { + struct rte_mempool *mp; /**< Memory pool to allocate segment from. */ + uint16_t length; /**< Segment data length, configures split point. */ + uint16_t offset; /**< Data offset from beginning of mbuf data buffer. */ + uint32_t reserved; /**< Reserved field. */ +}; + /* RX queue descriptor. */ struct mlx5_rxq_data { unsigned int csum:1; /* Enable checksum offloading. */ @@ -158,6 +165,9 @@ struct mlx5_rxq_data { uint32_t tunnel; /* Tunnel information. */ uint64_t flow_meta_mask; int32_t flow_meta_offset; + uint32_t rxseg_n; /* Number of split segment descriptions. */ + struct mlx5_eth_rxseg rxseg[MLX5_MAX_RXQ_NSEG]; + /* Buffer split segment descriptions - sizes, offsets, pools. */ } __rte_cache_aligned; enum mlx5_rxq_type { @@ -321,7 +331,8 @@ int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_rxconf *conf, - struct rte_mempool *mp); + const struct rte_eth_rxseg_split *rx_seg, + uint16_t n_seg); struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, const struct rte_eth_hairpin_conf *hairpin_conf); From patchwork Mon Oct 26 11:55:01 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Slava Ovsiienko X-Patchwork-Id: 82189 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id AAEB9A04DC; Mon, 26 Oct 2020 12:55:38 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 0FAAB2BEA; Mon, 26 Oct 2020 12:55:19 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id E58BC2BD3 for ; Mon, 26 Oct 2020 12:55:16 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from viacheslavo@nvidia.com) with SMTP; 26 Oct 2020 13:55:11 +0200 Received: from nvidia.com (pegasus12.mtr.labs.mlnx [10.210.17.40]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09QBt8vV013951; Mon, 26 Oct 2020 13:55:11 +0200 From: Viacheslav Ovsiienko To: dev@dpdk.org Cc: thomas@monjalon.net, matan@nvidia.com, akozyrev@nvidia.com, rasland@nvidia.com, orika@nvidia.com Date: Mon, 26 Oct 2020 11:55:01 +0000 Message-Id: <1603713305-30991-3-git-send-email-viacheslavo@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603713305-30991-1-git-send-email-viacheslavo@nvidia.com> References: <1603381371-5360-2-git-send-email-viacheslavo@nvidia.com> <1603713305-30991-1-git-send-email-viacheslavo@nvidia.com> Subject: [dpdk-dev] [PATCH v4 2/6] net/mlx5: configure Rx queue to support split X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The scatter-gather elements should be configured accordingly to support the buffer split feature. The application provides the desired settings for the segments at the beginning of the packets and PMD pads the buffer chain (if needed) with attributes of last specified segment to accommodate the packet of maximal length. There are some limitations are implied. The MPRQ feature should be disengaged if split is requested, due to MPRQ neither supports pushing data to the dedicated pools nor follows the flexible buffer sizes. The vectorized rx_burst routines does not support the scattering (these ones are extremely simplified and work over the single segment only) and can't handle split as well. Signed-off-by: Viacheslav Ovsiienko Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_rxq.c | 85 +++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 79 insertions(+), 6 deletions(-) diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 72d76c1..7695d62 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -1402,9 +1402,82 @@ struct mlx5_rxq_ctrl * * the vector Rx will not be used. */ uint16_t desc_n = desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP; + const struct rte_eth_rxseg_split *qs_seg = rx_seg; + unsigned int tail_len; - if (non_scatter_min_mbuf_size > mb_len && !(offloads & - DEV_RX_OFFLOAD_SCATTER)) { + tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) + + desc_n * sizeof(struct rte_mbuf *), 0, socket); + if (!tmpl) { + rte_errno = ENOMEM; + return NULL; + } + MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG); + /* + * Build the array of actual buffer offsets and lengths. + * Pad with the buffers from the last memory pool if + * needed to handle max size packets, replace zero length + * with the buffer length from the pool. + */ + tail_len = max_rx_pkt_len; + do { + struct mlx5_eth_rxseg *hw_seg = + &tmpl->rxq.rxseg[tmpl->rxq.rxseg_n]; + uint32_t buf_len, offset, seg_len; + + /* + * For the buffers beyond descriptions offset is zero, + * the first buffer contains head room. + */ + buf_len = rte_pktmbuf_data_room_size(qs_seg->mp); + offset = (tmpl->rxq.rxseg_n >= n_seg ? 0 : qs_seg->offset) + + (tmpl->rxq.rxseg_n ? 0 : RTE_PKTMBUF_HEADROOM); + /* + * For the buffers beyond descriptions the length is + * pool buffer length, zero lengths are replaced with + * pool buffer length either. + */ + seg_len = tmpl->rxq.rxseg_n >= n_seg ? buf_len : + qs_seg->length ? + qs_seg->length : + (buf_len - offset); + /* Check is done in long int, now overflows. */ + if (buf_len < seg_len + offset) { + DRV_LOG(ERR, "port %u Rx queue %u: Split offset/length " + "%u/%u can't be satisfied", + dev->data->port_id, idx, + qs_seg->length, qs_seg->offset); + rte_errno = EINVAL; + goto error; + } + if (seg_len > tail_len) + seg_len = buf_len - offset; + if (++tmpl->rxq.rxseg_n > MLX5_MAX_RXQ_NSEG) { + DRV_LOG(ERR, + "port %u too many SGEs (%u) needed to handle" + " requested maximum packet size %u, the maximum" + " supported are %u", dev->data->port_id, + tmpl->rxq.rxseg_n, max_rx_pkt_len, + MLX5_MAX_RXQ_NSEG); + rte_errno = ENOTSUP; + goto error; + } + /* Build the actual scattering element in the queue object. */ + hw_seg->mp = qs_seg->mp; + MLX5_ASSERT(offset <= UINT16_MAX); + MLX5_ASSERT(seg_len <= UINT16_MAX); + hw_seg->offset = (uint16_t)offset; + hw_seg->length = (uint16_t)seg_len; + /* + * Advance the segment descriptor, the padding is the based + * on the attributes of the last descriptor. + */ + if (tmpl->rxq.rxseg_n < n_seg) + qs_seg++; + tail_len -= RTE_MIN(tail_len, seg_len); + } while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n)); + MLX5_ASSERT(tmpl->rxq.rxseg_n && + tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG); + if (tmpl->rxq.rxseg_n > 1 && !(offloads & DEV_RX_OFFLOAD_SCATTER)) { DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not" " configured and no enough mbuf space(%u) to contain " "the maximum RX packet length(%u) with head-room(%u)", @@ -1438,7 +1511,7 @@ struct mlx5_rxq_ctrl * * - The number of descs is more than the number of strides. * - max_rx_pkt_len plus overhead is less than the max size * of a stride or mprq_stride_size is specified by a user. - * Need to nake sure that there are enough stides to encap + * Need to make sure that there are enough strides to encap * the maximum packet size in case mprq_stride_size is set. * Otherwise, enable Rx scatter if necessary. */ @@ -1468,11 +1541,11 @@ struct mlx5_rxq_ctrl * " strd_num_n = %u, strd_sz_n = %u", dev->data->port_id, idx, tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n); - } else if (max_rx_pkt_len <= first_mb_free_size) { + } else if (tmpl->rxq.rxseg_n == 1) { + MLX5_ASSERT(max_rx_pkt_len <= first_mb_free_size); tmpl->rxq.sges_n = 0; max_lro_size = max_rx_pkt_len; } else if (offloads & DEV_RX_OFFLOAD_SCATTER) { - unsigned int size = non_scatter_min_mbuf_size; unsigned int sges_n; if (lro_on_queue && first_mb_free_size < @@ -1487,7 +1560,7 @@ struct mlx5_rxq_ctrl * * Determine the number of SGEs needed for a full packet * and round it to the next power of two. */ - sges_n = log2above((size / mb_len) + !!(size % mb_len)); + sges_n = tmpl->rxq.rxseg_n; if (sges_n > MLX5_MAX_LOG_RQ_SEGS) { DRV_LOG(ERR, "port %u too many SGEs (%u) needed to handle" From patchwork Mon Oct 26 11:55:02 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Slava Ovsiienko X-Patchwork-Id: 82191 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3D23BA04DC; Mon, 26 Oct 2020 12:56:12 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 2FCD02DCC; Mon, 26 Oct 2020 12:55:24 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id F3C362BEA for ; Mon, 26 Oct 2020 12:55:16 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from viacheslavo@nvidia.com) with SMTP; 26 Oct 2020 13:55:13 +0200 Received: from nvidia.com (pegasus12.mtr.labs.mlnx [10.210.17.40]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09QBt8vW013951; Mon, 26 Oct 2020 13:55:13 +0200 From: Viacheslav Ovsiienko To: dev@dpdk.org Cc: thomas@monjalon.net, matan@nvidia.com, akozyrev@nvidia.com, rasland@nvidia.com, orika@nvidia.com Date: Mon, 26 Oct 2020 11:55:02 +0000 Message-Id: <1603713305-30991-4-git-send-email-viacheslavo@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603713305-30991-1-git-send-email-viacheslavo@nvidia.com> References: <1603381371-5360-2-git-send-email-viacheslavo@nvidia.com> <1603713305-30991-1-git-send-email-viacheslavo@nvidia.com> Subject: [dpdk-dev] [PATCH v4 3/6] net/mlx5: register multiple pool for Rx queue X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The split feature for receiving packets was added to the mlx5 PMD, now Rx queue can receive the data to the buffers belonging to the different pools and the memory of all the involved pool must be registered for DMA operations in order to allow hardware to store the data. Signed-off-by: Viacheslav Ovsiienko Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_mr.c | 3 +++ drivers/net/mlx5/mlx5_trigger.c | 20 ++++++++++++-------- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index dbcf0aa..c308ecc 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -536,6 +536,9 @@ struct mr_update_mp_data { .ret = 0, }; + DRV_LOG(DEBUG, "Port %u Rx queue registering mp %s " + "having %u chunks.", dev->data->port_id, + mp->name, mp->nb_mem_chunks); rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data); if (data.ret < 0 && rte_errno == ENXIO) { /* Mempool may have externally allocated memory. */ diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index 7735f02..19f2d66 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -145,18 +145,22 @@ dev->data->port_id, priv->sh->device_attr.max_sge); for (i = 0; i != priv->rxqs_n; ++i) { struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i); - struct rte_mempool *mp; if (!rxq_ctrl) continue; if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) { - /* Pre-register Rx mempool. */ - mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? - rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp; - DRV_LOG(DEBUG, "Port %u Rx queue %u registering mp %s" - " having %u chunks.", dev->data->port_id, - rxq_ctrl->rxq.idx, mp->name, mp->nb_mem_chunks); - mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp); + /* Pre-register Rx mempools. */ + if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) { + mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, + rxq_ctrl->rxq.mprq_mp); + } else { + uint32_t s; + + for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) + mlx5_mr_update_mp + (dev, &rxq_ctrl->rxq.mr_ctrl, + rxq_ctrl->rxq.rxseg[s].mp); + } ret = rxq_alloc_elts(rxq_ctrl); if (ret) goto error; From patchwork Mon Oct 26 11:55:03 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Slava Ovsiienko X-Patchwork-Id: 82192 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3F038A04DC; Mon, 26 Oct 2020 12:56:32 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 0E02A2BF9; Mon, 26 Oct 2020 12:55:32 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id E7A612D41 for ; Mon, 26 Oct 2020 12:55:21 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from viacheslavo@nvidia.com) with SMTP; 26 Oct 2020 13:55:15 +0200 Received: from nvidia.com (pegasus12.mtr.labs.mlnx [10.210.17.40]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09QBt8vX013951; Mon, 26 Oct 2020 13:55:15 +0200 From: Viacheslav Ovsiienko To: dev@dpdk.org Cc: thomas@monjalon.net, matan@nvidia.com, akozyrev@nvidia.com, rasland@nvidia.com, orika@nvidia.com Date: Mon, 26 Oct 2020 11:55:03 +0000 Message-Id: <1603713305-30991-5-git-send-email-viacheslavo@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603713305-30991-1-git-send-email-viacheslavo@nvidia.com> References: <1603381371-5360-2-git-send-email-viacheslavo@nvidia.com> <1603713305-30991-1-git-send-email-viacheslavo@nvidia.com> Subject: [dpdk-dev] [PATCH v4 4/6] net/mlx5: update Rx datapath to support split X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Only the regular rx_burst routine is updated to support split, because the vectorized ones does not support scatter and MPRQ does not support split at all. Signed-off-by: Viacheslav Ovsiienko Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_rxq.c | 11 +++++------ drivers/net/mlx5/mlx5_rxtx.c | 3 ++- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 7695d62..f9aed38 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -212,9 +212,10 @@ /* Iterate on segments. */ for (i = 0; (i != elts_n); ++i) { + struct mlx5_eth_rxseg *seg = &rxq_ctrl->rxq.rxseg[i % sges_n]; struct rte_mbuf *buf; - buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp); + buf = rte_pktmbuf_alloc(seg->mp); if (buf == NULL) { DRV_LOG(ERR, "port %u empty mbuf pool", PORT_ID(rxq_ctrl->priv)); @@ -227,12 +228,10 @@ MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0); MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0); MLX5_ASSERT(!buf->next); - /* Only the first segment keeps headroom. */ - if (i % sges_n) - SET_DATA_OFF(buf, 0); + SET_DATA_OFF(buf, seg->offset); PORT(buf) = rxq_ctrl->rxq.port_id; - DATA_LEN(buf) = rte_pktmbuf_tailroom(buf); - PKT_LEN(buf) = DATA_LEN(buf); + DATA_LEN(buf) = seg->length; + PKT_LEN(buf) = seg->length; NB_SEGS(buf) = 1; (*rxq_ctrl->rxq.elts)[i] = buf; } diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index dbb427b..2ffacf8 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -1356,7 +1356,8 @@ enum mlx5_txcmp_code { rte_prefetch0(seg); rte_prefetch0(cqe); rte_prefetch0(wqe); - rep = rte_mbuf_raw_alloc(rxq->mp); + /* Allocate the buf from the same pool. */ + rep = rte_mbuf_raw_alloc(seg->pool); if (unlikely(rep == NULL)) { ++rxq->stats.rx_nombuf; if (!pkt) { From patchwork Mon Oct 26 11:55:04 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Slava Ovsiienko X-Patchwork-Id: 82194 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id D5685A04DC; Mon, 26 Oct 2020 12:57:07 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 3A8F537AF; Mon, 26 Oct 2020 12:55:36 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id F336B2DCC for ; Mon, 26 Oct 2020 12:55:21 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from viacheslavo@nvidia.com) with SMTP; 26 Oct 2020 13:55:17 +0200 Received: from nvidia.com (pegasus12.mtr.labs.mlnx [10.210.17.40]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09QBt8vY013951; Mon, 26 Oct 2020 13:55:17 +0200 From: Viacheslav Ovsiienko To: dev@dpdk.org Cc: thomas@monjalon.net, matan@nvidia.com, akozyrev@nvidia.com, rasland@nvidia.com, orika@nvidia.com Date: Mon, 26 Oct 2020 11:55:04 +0000 Message-Id: <1603713305-30991-6-git-send-email-viacheslavo@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603713305-30991-1-git-send-email-viacheslavo@nvidia.com> References: <1603381371-5360-2-git-send-email-viacheslavo@nvidia.com> <1603713305-30991-1-git-send-email-viacheslavo@nvidia.com> Subject: [dpdk-dev] [PATCH v4 5/6] net/mlx5: report Rx segmentation capabilities X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add rte_eth_dev_info->rx_seg_capa parameters: - receiving to multiple pools is supported - buffer offsets are supported - no offset alignment requirement - reports the maximal number of segments - reports the buffer split offload flag Signed-off-by: Viacheslav Ovsiienko Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_ethdev.c | 4 ++++ drivers/net/mlx5/mlx5_rxq.c | 1 + 2 files changed, 5 insertions(+) diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index c70cd30..fc04fc8 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -306,6 +306,10 @@ info->max_tx_queues = max; info->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES; info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev); + info->rx_seg_capa.max_nseg = MLX5_MAX_RXQ_NSEG; + info->rx_seg_capa.multi_pools = 1; + info->rx_seg_capa.offset_allowed = 1; + info->rx_seg_capa.offset_align_log2 = 0; info->rx_offload_capa = (mlx5_get_rx_port_offloads() | info->rx_queue_offload_capa); info->tx_offload_capa = mlx5_get_tx_port_offloads(dev); diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index f9aed38..1cc477a 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -402,6 +402,7 @@ struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_config *config = &priv->config; uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER | + RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT | DEV_RX_OFFLOAD_TIMESTAMP | DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_RSS_HASH); From patchwork Mon Oct 26 11:55:05 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Slava Ovsiienko X-Patchwork-Id: 82193 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4AB3AA04DC; Mon, 26 Oct 2020 12:56:49 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 67A40354D; Mon, 26 Oct 2020 12:55:34 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 0476C2E1E for ; Mon, 26 Oct 2020 12:55:21 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from viacheslavo@nvidia.com) with SMTP; 26 Oct 2020 13:55:18 +0200 Received: from nvidia.com (pegasus12.mtr.labs.mlnx [10.210.17.40]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09QBt8vZ013951; Mon, 26 Oct 2020 13:55:18 +0200 From: Viacheslav Ovsiienko To: dev@dpdk.org Cc: thomas@monjalon.net, matan@nvidia.com, akozyrev@nvidia.com, rasland@nvidia.com, orika@nvidia.com Date: Mon, 26 Oct 2020 11:55:05 +0000 Message-Id: <1603713305-30991-7-git-send-email-viacheslavo@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603713305-30991-1-git-send-email-viacheslavo@nvidia.com> References: <1603381371-5360-2-git-send-email-viacheslavo@nvidia.com> <1603713305-30991-1-git-send-email-viacheslavo@nvidia.com> Subject: [dpdk-dev] [PATCH v4 6/6] doc: add buffer split feature limitation to mlx5 guide X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The buffer split feature is mentioned in the mlx5 PMD documentation, the limitation is description is added as well. Signed-off-by: Viacheslav Ovsiienko Acked-by: Matan Azrad --- doc/guides/nics/mlx5.rst | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst index 66524f1..8dc7c62 100644 --- a/doc/guides/nics/mlx5.rst +++ b/doc/guides/nics/mlx5.rst @@ -59,7 +59,8 @@ Features - Multi arch support: x86_64, POWER8, ARMv8, i686. - Multiple TX and RX queues. -- Support for scattered TX and RX frames. +- Support for scattered TX frames. +- Advanced support for scattered Rx frames with tunable buffer attributes. - IPv4, IPv6, TCPv4, TCPv6, UDPv4 and UDPv6 RSS on any number of queues. - RSS using different combinations of fields: L3 only, L4 only or both, and source only, destination only or both. @@ -187,6 +188,9 @@ Limitations the device. In case of ungraceful program termination, some entries may remain present and should be removed manually by other means. +- Buffer split offload is supported with regular Rx burst routine only, + no MPRQ feature or vectorized code can be engaged. + - When Multi-Packet Rx queue is configured (``mprq_en``), a Rx packet can be externally attached to a user-provided mbuf with having EXT_ATTACHED_MBUF in ol_flags. As the mempool for the external buffer is managed by PMD, all the