From patchwork Fri Oct 7 17:29:18 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Andrew Rybchenko X-Patchwork-Id: 117576 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3B797A00C4; Fri, 7 Oct 2022 19:29:29 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 8337C42685; Fri, 7 Oct 2022 19:29:25 +0200 (CEST) Received: from shelob.oktetlabs.ru (shelob.oktetlabs.ru [91.220.146.113]) by mails.dpdk.org (Postfix) with ESMTP id 5276140151 for ; Fri, 7 Oct 2022 19:29:24 +0200 (CEST) Received: by shelob.oktetlabs.ru (Postfix, from userid 115) id 11B3A92; Fri, 7 Oct 2022 20:29:24 +0300 (MSK) X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on mail1.oktetlabs.ru X-Spam-Level: X-Spam-Status: No, score=0.8 required=5.0 tests=ALL_TRUSTED, DKIM_ADSP_DISCARD autolearn=no autolearn_force=no version=3.4.6 Received: from aros.oktetlabs.ru (aros.oktetlabs.ru [192.168.38.17]) by shelob.oktetlabs.ru (Postfix) with ESMTP id C70557D; Fri, 7 Oct 2022 20:29:22 +0300 (MSK) DKIM-Filter: OpenDKIM Filter v2.11.0 shelob.oktetlabs.ru C70557D Authentication-Results: shelob.oktetlabs.ru/C70557D; dkim=none; dkim-atps=neutral From: Andrew Rybchenko To: Thomas Monjalon , Ferruh Yigit Cc: dev@dpdk.org Subject: [PATCH v8 1/4] ethdev: factor out helper function to check Rx mempool Date: Fri, 7 Oct 2022 20:29:18 +0300 Message-Id: <20221007172921.3325250-2-andrew.rybchenko@oktetlabs.ru> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20221007172921.3325250-1-andrew.rybchenko@oktetlabs.ru> References: <20221006170126.1322852-1-hpothula@marvell.com> <20221007172921.3325250-1-andrew.rybchenko@oktetlabs.ru> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Avoid Rx mempool checks duplication logic. Signed-off-by: Andrew Rybchenko --- lib/ethdev/rte_ethdev.c | 82 +++++++++++++++++++++-------------------- 1 file changed, 42 insertions(+), 40 deletions(-) diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c index 01fac713a2..b3dba291e7 100644 --- a/lib/ethdev/rte_ethdev.c +++ b/lib/ethdev/rte_ethdev.c @@ -1648,6 +1648,36 @@ rte_eth_dev_is_removed(uint16_t port_id) return ret; } +static int +rte_eth_check_rx_mempool(struct rte_mempool *mp, uint16_t offset, + uint16_t min_length) +{ + uint16_t data_room_size; + + /* + * Check the size of the mbuf data buffer, this value + * must be provided in the private data of the memory pool. + * First check that the memory pool(s) has a valid private data. + */ + if (mp->private_data_size < + sizeof(struct rte_pktmbuf_pool_private)) { + RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", + mp->name, mp->private_data_size, + (unsigned int) + sizeof(struct rte_pktmbuf_pool_private)); + return -ENOSPC; + } + data_room_size = rte_pktmbuf_data_room_size(mp); + if (data_room_size < offset + min_length) { + RTE_ETHDEV_LOG(ERR, + "%s mbuf_data_room_size %u < %u (%u + %u)\n", + mp->name, data_room_size, + offset + min_length, offset, min_length); + return -EINVAL; + } + return 0; +} + static int rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg, uint32_t *mbp_buf_size, @@ -1657,6 +1687,7 @@ rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, struct rte_mempool *mp_first; uint32_t offset_mask; uint16_t seg_idx; + int ret; if (n_seg > seg_capa->max_nseg) { RTE_ETHDEV_LOG(ERR, @@ -1696,25 +1727,14 @@ rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, return -EINVAL; } } - if (mpl->private_data_size < - sizeof(struct rte_pktmbuf_pool_private)) { - RTE_ETHDEV_LOG(ERR, - "%s private_data_size %u < %u\n", - mpl->name, mpl->private_data_size, - (unsigned int)sizeof - (struct rte_pktmbuf_pool_private)); - return -ENOSPC; - } + offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); length = length != 0 ? length : *mbp_buf_size; - if (*mbp_buf_size < length + offset) { - RTE_ETHDEV_LOG(ERR, - "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n", - mpl->name, *mbp_buf_size, - length + offset, length, offset); - return -EINVAL; - } + + ret = rte_eth_check_rx_mempool(mpl, offset, length); + if (ret != 0) + return ret; } return 0; } @@ -1753,31 +1773,13 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, "Ambiguous segment configuration\n"); return -EINVAL; } - /* - * Check the size of the mbuf data buffer, this value - * must be provided in the private data of the memory pool. - * First check that the memory pool(s) has a valid private data. - */ - if (mp->private_data_size < - sizeof(struct rte_pktmbuf_pool_private)) { - RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", - mp->name, mp->private_data_size, - (unsigned int) - sizeof(struct rte_pktmbuf_pool_private)); - return -ENOSPC; - } + + ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM, + dev_info.min_rx_bufsize); + if (ret != 0) + return ret; + mbp_buf_size = rte_pktmbuf_data_room_size(mp); - if (mbp_buf_size < dev_info.min_rx_bufsize + - RTE_PKTMBUF_HEADROOM) { - RTE_ETHDEV_LOG(ERR, - "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n", - mp->name, mbp_buf_size, - RTE_PKTMBUF_HEADROOM + - dev_info.min_rx_bufsize, - RTE_PKTMBUF_HEADROOM, - dev_info.min_rx_bufsize); - return -EINVAL; - } } else { const struct rte_eth_rxseg_split *rx_seg; uint16_t n_seg; From patchwork Fri Oct 7 17:29:19 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Andrew Rybchenko X-Patchwork-Id: 117577 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id BA27FA00C4; Fri, 7 Oct 2022 19:29:34 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 5A8A9427F4; Fri, 7 Oct 2022 19:29:26 +0200 (CEST) Received: from shelob.oktetlabs.ru (shelob.oktetlabs.ru [91.220.146.113]) by mails.dpdk.org (Postfix) with ESMTP id 25F774113D for ; Fri, 7 Oct 2022 19:29:25 +0200 (CEST) Received: by shelob.oktetlabs.ru (Postfix, from userid 115) id E181E92; Fri, 7 Oct 2022 20:29:24 +0300 (MSK) X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on mail1.oktetlabs.ru X-Spam-Level: X-Spam-Status: No, score=0.8 required=5.0 tests=ALL_TRUSTED, DKIM_ADSP_DISCARD autolearn=no autolearn_force=no version=3.4.6 Received: from aros.oktetlabs.ru (aros.oktetlabs.ru [192.168.38.17]) by shelob.oktetlabs.ru (Postfix) with ESMTP id 0439B7F; Fri, 7 Oct 2022 20:29:23 +0300 (MSK) DKIM-Filter: OpenDKIM Filter v2.11.0 shelob.oktetlabs.ru 0439B7F Authentication-Results: shelob.oktetlabs.ru/0439B7F; dkim=none; dkim-atps=neutral From: Andrew Rybchenko To: Thomas Monjalon , Ferruh Yigit Cc: dev@dpdk.org, Hanumanth Pothula Subject: [PATCH v8 2/4] ethdev: support multiple mbuf pools per Rx queue Date: Fri, 7 Oct 2022 20:29:19 +0300 Message-Id: <20221007172921.3325250-3-andrew.rybchenko@oktetlabs.ru> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20221007172921.3325250-1-andrew.rybchenko@oktetlabs.ru> References: <20221006170126.1322852-1-hpothula@marvell.com> <20221007172921.3325250-1-andrew.rybchenko@oktetlabs.ru> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Hanumanth Pothula Some of the HW has support for choosing memory pools based on the packet's size. This is often useful for saving the memory where the application can create a different pool to steer the specific size of the packet, thus enabling more efficient usage of memory. For example, let's say HW has a capability of three pools, - pool-1 size is 2K - pool-2 size is > 2K and < 4K - pool-3 size is > 4K Here, pool-1 can accommodate packets with sizes < 2K pool-2 can accommodate packets with sizes > 2K and < 4K pool-3 can accommodate packets with sizes > 4K With multiple mempool capability enabled in SW, an application may create three pools of different sizes and send them to PMD. Allowing PMD to program HW based on the packet lengths. So that packets with less than 2K are received on pool-1, packets with lengths between 2K and 4K are received on pool-2 and finally packets greater than 4K are received on pool-3. Signed-off-by: Hanumanth Pothula Signed-off-by: Andrew Rybchenko --- doc/guides/rel_notes/release_22_11.rst | 6 ++ lib/ethdev/rte_ethdev.c | 81 ++++++++++++++++++++++---- lib/ethdev/rte_ethdev.h | 29 +++++++++ 3 files changed, 105 insertions(+), 11 deletions(-) diff --git a/doc/guides/rel_notes/release_22_11.rst b/doc/guides/rel_notes/release_22_11.rst index e165c45367..df32a6a5e7 100644 --- a/doc/guides/rel_notes/release_22_11.rst +++ b/doc/guides/rel_notes/release_22_11.rst @@ -92,6 +92,12 @@ New Features ``rte_eth_cman_config_set()``, ``rte_eth_cman_info_get()`` to support congestion management. +* **Added support for mulitiple mbuf pools per ethdev Rx queue.** + + The capability allows application to provide many mempools of different + size and PMD and/or NIC to choose a memory pool based on the packet's + length and/or Rx buffers availability. + * **Updated Intel iavf driver.** * Added flow subscription support. diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c index b3dba291e7..979b02356e 100644 --- a/lib/ethdev/rte_ethdev.c +++ b/lib/ethdev/rte_ethdev.c @@ -1739,6 +1739,41 @@ rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, return 0; } +static int +rte_eth_rx_queue_check_mempools(struct rte_mempool **rx_mempools, + uint16_t n_mempools, uint32_t *min_buf_size, + const struct rte_eth_dev_info *dev_info) +{ + uint16_t pool_idx; + int ret; + + if (n_mempools > dev_info->max_rx_mempools) { + RTE_ETHDEV_LOG(ERR, + "Too many Rx mempools %u vs maximum %u\n", + n_mempools, dev_info->max_rx_mempools); + return -EINVAL; + } + + for (pool_idx = 0; pool_idx < n_mempools; pool_idx++) { + struct rte_mempool *mp = rx_mempools[pool_idx]; + + if (mp == NULL) { + RTE_ETHDEV_LOG(ERR, "null Rx mempool pointer\n"); + return -EINVAL; + } + + ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM, + dev_info->min_rx_bufsize); + if (ret != 0) + return ret; + + *min_buf_size = RTE_MIN(*min_buf_size, + rte_pktmbuf_data_room_size(mp)); + } + + return 0; +} + int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, @@ -1746,7 +1781,8 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, struct rte_mempool *mp) { int ret; - uint32_t mbp_buf_size; + uint64_t rx_offloads; + uint32_t mbp_buf_size = UINT32_MAX; struct rte_eth_dev *dev; struct rte_eth_dev_info dev_info; struct rte_eth_rxconf local_conf; @@ -1766,35 +1802,42 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, if (ret != 0) return ret; + rx_offloads = dev->data->dev_conf.rxmode.offloads; + if (rx_conf != NULL) + rx_offloads |= rx_conf->offloads; + + /* Ensure that we have one and only one source of Rx buffers */ + if ((mp != NULL) + + (rx_conf != NULL && rx_conf->rx_nseg > 0) + + (rx_conf != NULL && rx_conf->rx_nmempool > 0) != 1) { + RTE_ETHDEV_LOG(ERR, + "Ambiguous Rx mempools configuration\n"); + return -EINVAL; + } + if (mp != NULL) { /* Single pool configuration check. */ - if (rx_conf != NULL && rx_conf->rx_nseg != 0) { - RTE_ETHDEV_LOG(ERR, - "Ambiguous segment configuration\n"); - return -EINVAL; - } - ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM, dev_info.min_rx_bufsize); if (ret != 0) return ret; mbp_buf_size = rte_pktmbuf_data_room_size(mp); - } else { + } else if (rx_conf != NULL && rx_conf->rx_nseg > 0) { const struct rte_eth_rxseg_split *rx_seg; uint16_t n_seg; /* Extended multi-segment configuration check. */ - if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) { + if (rx_conf->rx_seg == NULL) { RTE_ETHDEV_LOG(ERR, - "Memory pool is null and no extended configuration provided\n"); + "Memory pool is null and no multi-segment configuration provided\n"); return -EINVAL; } rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; n_seg = rx_conf->rx_nseg; - if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + if (rx_offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { ret = rte_eth_rx_queue_check_split(rx_seg, n_seg, &mbp_buf_size, &dev_info); @@ -1804,6 +1847,22 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n"); return -EINVAL; } + } else if (rx_conf != NULL && rx_conf->rx_nmempool > 0) { + /* Extended multi-pool configuration check. */ + if (rx_conf->rx_mempools == NULL) { + RTE_ETHDEV_LOG(ERR, "Memory pools array is null\n"); + return -EINVAL; + } + + ret = rte_eth_rx_queue_check_mempools(rx_conf->rx_mempools, + rx_conf->rx_nmempool, + &mbp_buf_size, + &dev_info); + if (ret != 0) + return ret; + } else { + RTE_ETHDEV_LOG(ERR, "Missing Rx mempool configuration\n"); + return -EINVAL; } /* Use default specified by driver, if nb_rx_desc is zero */ diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h index 2530eda7c4..d1e44ffa5f 100644 --- a/lib/ethdev/rte_ethdev.h +++ b/lib/ethdev/rte_ethdev.h @@ -1067,6 +1067,28 @@ struct rte_eth_rxconf { */ union rte_eth_rxseg *rx_seg; + /** + * Array of mempools to allocate Rx buffers from. + * + * This provides support for multiple mbuf pools per Rx queue. + * The capability is reported in device info via positive + * max_rx_mempools. + * + * It could be useful for more efficient usage of memory when an + * application creates different mempools to steer the specific + * size of the packet. + * + * If many mempools are specified, packets received using Rx + * burst may belong to any provided mempool. From ethdev user point + * of view it is undefined how PMD/NIC chooses mempool for a packet. + * + * If Rx scatter is enabled, a packet may be delivered using a chain + * of mbufs obtained from single mempool or multiple mempools based + * on the NIC implementation. + */ + struct rte_mempool **rx_mempools; + uint16_t rx_nmempool; /** < Number of Rx mempools */ + uint64_t reserved_64s[2]; /**< Reserved for future fields */ void *reserved_ptrs[2]; /**< Reserved for future fields */ }; @@ -1614,6 +1636,13 @@ struct rte_eth_dev_info { /** Configured number of Rx/Tx queues */ uint16_t nb_rx_queues; /**< Number of Rx queues. */ uint16_t nb_tx_queues; /**< Number of Tx queues. */ + /** + * Maximum number of Rx mempools supported per Rx queue. + * + * Value greater than 0 means that the driver supports Rx queue + * mempools specification via rx_conf->rx_mempools. + */ + uint16_t max_rx_mempools; /** Rx parameter recommendations */ struct rte_eth_dev_portconf default_rxportconf; /** Tx parameter recommendations */ From patchwork Fri Oct 7 17:29:20 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Andrew Rybchenko X-Patchwork-Id: 117578 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 874F9A00C4; Fri, 7 Oct 2022 19:29:40 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 3B86742802; Fri, 7 Oct 2022 19:29:29 +0200 (CEST) Received: from shelob.oktetlabs.ru (shelob.oktetlabs.ru [91.220.146.113]) by mails.dpdk.org (Postfix) with ESMTP id 58EA042670 for ; Fri, 7 Oct 2022 19:29:25 +0200 (CEST) Received: by shelob.oktetlabs.ru (Postfix, from userid 115) id 2006799; Fri, 7 Oct 2022 20:29:25 +0300 (MSK) X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on mail1.oktetlabs.ru X-Spam-Level: X-Spam-Status: No, score=0.8 required=5.0 tests=ALL_TRUSTED, DKIM_ADSP_DISCARD, URIBL_BLOCKED autolearn=no autolearn_force=no version=3.4.6 Received: from aros.oktetlabs.ru (aros.oktetlabs.ru [192.168.38.17]) by shelob.oktetlabs.ru (Postfix) with ESMTP id 40DE188; Fri, 7 Oct 2022 20:29:23 +0300 (MSK) DKIM-Filter: OpenDKIM Filter v2.11.0 shelob.oktetlabs.ru 40DE188 Authentication-Results: shelob.oktetlabs.ru/40DE188; dkim=none; dkim-atps=neutral From: Andrew Rybchenko To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao Cc: dev@dpdk.org, Hanumanth Pothula Subject: [PATCH v8 3/4] net/cnxk: support mulitiple mbuf pools per Rx queue Date: Fri, 7 Oct 2022 20:29:20 +0300 Message-Id: <20221007172921.3325250-4-andrew.rybchenko@oktetlabs.ru> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20221007172921.3325250-1-andrew.rybchenko@oktetlabs.ru> References: <20221006170126.1322852-1-hpothula@marvell.com> <20221007172921.3325250-1-andrew.rybchenko@oktetlabs.ru> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Hanumanth Pothula Presently, HW is programmed only to receive packets from LPB pool. Making all packets received from LPB pool. But, CNXK HW supports two pools, - SPB -> packets with smaller size (less than 4K) - LPB -> packets with bigger size (greater than 4K) Patch enables multiple mempool capability, pool is selected based on the packet's length. So, basically, PMD programs HW for receiving packets from both SPB and LPB pools based on the packet's length. This is achieved by enabling rx multiple mempool offload, RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL. This allows the application to send more than one pool(in our case two) to the driver, with different segment(packet) lengths, which helps the driver to configure both pools based on segment lengths. This is often useful for saving the memory where the application can create a different pool to steer the specific size of the packet, thus enabling effective use of memory. Signed-off-by: Hanumanth Pothula --- drivers/net/cnxk/cnxk_ethdev.c | 84 ++++++++++++++++++++++++++---- drivers/net/cnxk/cnxk_ethdev.h | 2 + drivers/net/cnxk/cnxk_ethdev_ops.c | 3 ++ 3 files changed, 80 insertions(+), 9 deletions(-) diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c index 2cb48ba152..bb27cc87fd 100644 --- a/drivers/net/cnxk/cnxk_ethdev.c +++ b/drivers/net/cnxk/cnxk_ethdev.c @@ -541,6 +541,58 @@ cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid) plt_free(txq_sp); } +static int +cnxk_nix_process_rx_conf(const struct rte_eth_rxconf *rx_conf, + struct rte_mempool **lpb_pool, + struct rte_mempool **spb_pool) +{ + struct rte_mempool *pool0; + struct rte_mempool *pool1; + struct rte_mempool **mp = rx_conf->rx_mempools; + const char *platform_ops; + struct rte_mempool_ops *ops; + + if (*lpb_pool || + rx_conf->rx_nmempool != CNXK_NIX_NUM_POOLS_MAX) { + plt_err("invalid arguments"); + return -EINVAL; + } + + if (mp == NULL || mp[0] == NULL || mp[1] == NULL) { + plt_err("invalid memory pools\n"); + return -EINVAL; + } + + pool0 = mp[0]; + pool1 = mp[1]; + + if (pool0->elt_size > pool1->elt_size) { + *lpb_pool = pool0; + *spb_pool = pool1; + + } else { + *lpb_pool = pool1; + *spb_pool = pool0; + } + + if ((*spb_pool)->pool_id == 0) { + plt_err("Invalid pool_id"); + return -EINVAL; + } + + platform_ops = rte_mbuf_platform_mempool_ops(); + ops = rte_mempool_get_ops((*spb_pool)->ops_index); + if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) { + plt_err("mempool ops should be of cnxk_npa type"); + return -EINVAL; + } + + plt_info("spb_pool:%s lpb_pool:%s lpb_len:%u spb_len:%u\n", (*spb_pool)->name, + (*lpb_pool)->name, (*lpb_pool)->elt_size, (*spb_pool)->elt_size); + + return 0; +} + int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, uint32_t nb_desc, uint16_t fp_rx_q_sz, @@ -557,6 +609,8 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, uint16_t first_skip; int rc = -EINVAL; size_t rxq_sz; + struct rte_mempool *lpb_pool = mp; + struct rte_mempool *spb_pool = NULL; /* Sanity checks */ if (rx_conf->rx_deferred_start == 1) { @@ -564,15 +618,21 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, goto fail; } + if (rx_conf->rx_nmempool > 0) { + rc = cnxk_nix_process_rx_conf(rx_conf, &lpb_pool, &spb_pool); + if (rc) + goto fail; + } + platform_ops = rte_mbuf_platform_mempool_ops(); /* This driver needs cnxk_npa mempool ops to work */ - ops = rte_mempool_get_ops(mp->ops_index); + ops = rte_mempool_get_ops(lpb_pool->ops_index); if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) { plt_err("mempool ops should be of cnxk_npa type"); goto fail; } - if (mp->pool_id == 0) { + if (lpb_pool->pool_id == 0) { plt_err("Invalid pool_id"); goto fail; } @@ -589,13 +649,13 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, /* Its a no-op when inline device is not used */ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY || dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY) - roc_nix_inl_dev_xaq_realloc(mp->pool_id); + roc_nix_inl_dev_xaq_realloc(lpb_pool->pool_id); /* Increase CQ size to Aura size to avoid CQ overflow and * then CPT buffer leak. */ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) - nb_desc = nix_inl_cq_sz_clamp_up(nix, mp, nb_desc); + nb_desc = nix_inl_cq_sz_clamp_up(nix, lpb_pool, nb_desc); /* Setup ROC CQ */ cq = &dev->cqs[qid]; @@ -611,17 +671,17 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, rq = &dev->rqs[qid]; rq->qid = qid; rq->cqid = cq->qid; - rq->aura_handle = mp->pool_id; + rq->aura_handle = lpb_pool->pool_id; rq->flow_tag_width = 32; rq->sso_ena = false; /* Calculate first mbuf skip */ first_skip = (sizeof(struct rte_mbuf)); first_skip += RTE_PKTMBUF_HEADROOM; - first_skip += rte_pktmbuf_priv_size(mp); + first_skip += rte_pktmbuf_priv_size(lpb_pool); rq->first_skip = first_skip; rq->later_skip = sizeof(struct rte_mbuf); - rq->lpb_size = mp->elt_size; + rq->lpb_size = lpb_pool->elt_size; if (roc_errata_nix_no_meta_aura()) rq->lpb_drop_ena = !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY); @@ -629,6 +689,12 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, if (roc_nix_inl_inb_is_enabled(nix)) rq->ipsech_ena = true; + if (spb_pool) { + rq->spb_ena = 1; + rq->spb_aura_handle = spb_pool->pool_id; + rq->spb_size = spb_pool->elt_size; + } + rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started); if (rc) { plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc); @@ -651,7 +717,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, /* Queue config should reflect global offloads */ rxq_sp->qconf.conf.rx.offloads = dev->rx_offloads; rxq_sp->qconf.nb_desc = nb_desc; - rxq_sp->qconf.mp = mp; + rxq_sp->qconf.mp = lpb_pool; rxq_sp->tc = 0; rxq_sp->tx_pause = (dev->fc_cfg.mode == RTE_ETH_FC_FULL || dev->fc_cfg.mode == RTE_ETH_FC_TX_PAUSE); @@ -670,7 +736,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, goto free_mem; } - plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, mp->name, nb_desc, + plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, lpb_pool->name, nb_desc, cq->nb_desc); /* Store start of fast path area */ diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h index 5204c46244..d282f79a9a 100644 --- a/drivers/net/cnxk/cnxk_ethdev.h +++ b/drivers/net/cnxk/cnxk_ethdev.h @@ -44,6 +44,8 @@ #define CNXK_NIX_RX_DEFAULT_RING_SZ 4096 /* Max supported SQB count */ #define CNXK_NIX_TX_MAX_SQB 512 +/* LPB & SPB */ +#define CNXK_NIX_NUM_POOLS_MAX 2 /* If PTP is enabled additional SEND MEM DESC is required which * takes 2 words, hence max 7 iova address are possible diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c index 30d169f799..8f7287161b 100644 --- a/drivers/net/cnxk/cnxk_ethdev_ops.c +++ b/drivers/net/cnxk/cnxk_ethdev_ops.c @@ -69,6 +69,9 @@ cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo) devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP | RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; + + devinfo->max_rx_mempools = CNXK_NIX_NUM_POOLS_MAX; + return 0; } From patchwork Fri Oct 7 17:29:21 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Andrew Rybchenko X-Patchwork-Id: 117579 X-Patchwork-Delegate: andrew.rybchenko@oktetlabs.ru Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id CEA25A00C4; Fri, 7 Oct 2022 19:29:45 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 0245C4281C; Fri, 7 Oct 2022 19:29:30 +0200 (CEST) Received: from shelob.oktetlabs.ru (shelob.oktetlabs.ru [91.220.146.113]) by mails.dpdk.org (Postfix) with ESMTP id CAC1C427EE for ; Fri, 7 Oct 2022 19:29:25 +0200 (CEST) Received: by shelob.oktetlabs.ru (Postfix, from userid 115) id 999F298; Fri, 7 Oct 2022 20:29:25 +0300 (MSK) X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on mail1.oktetlabs.ru X-Spam-Level: X-Spam-Status: No, score=0.8 required=5.0 tests=ALL_TRUSTED, DKIM_ADSP_DISCARD autolearn=no autolearn_force=no version=3.4.6 Received: from aros.oktetlabs.ru (aros.oktetlabs.ru [192.168.38.17]) by shelob.oktetlabs.ru (Postfix) with ESMTP id 7103786; Fri, 7 Oct 2022 20:29:23 +0300 (MSK) DKIM-Filter: OpenDKIM Filter v2.11.0 shelob.oktetlabs.ru 7103786 Authentication-Results: shelob.oktetlabs.ru/7103786; dkim=none; dkim-atps=neutral From: Andrew Rybchenko To: Aman Singh , Yuying Zhang Cc: dev@dpdk.org, Hanumanth Pothula Subject: [PATCH v8 4/4] app/testpmd: support mulitiple mbuf pools per Rx queue Date: Fri, 7 Oct 2022 20:29:21 +0300 Message-Id: <20221007172921.3325250-5-andrew.rybchenko@oktetlabs.ru> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20221007172921.3325250-1-andrew.rybchenko@oktetlabs.ru> References: <20221006170126.1322852-1-hpothula@marvell.com> <20221007172921.3325250-1-andrew.rybchenko@oktetlabs.ru> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Hanumanth Pothula Some of the HW has support for choosing memory pools based on the packet's size. The pool sort capability allows PMD/NIC to choose a memory pool based on the packet's length. On multiple mempool support enabled, populate mempool array and also print pool name on which packet is received. Signed-off-by: Hanumanth Pothula --- app/test-pmd/testpmd.c | 34 ++++++++++++++++++++++++---------- app/test-pmd/testpmd.h | 3 +++ app/test-pmd/util.c | 4 ++-- 3 files changed, 29 insertions(+), 12 deletions(-) diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index de6ad00138..2ce9953c76 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -2624,6 +2624,7 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; + struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {}; unsigned int i, mp_n; int ret; @@ -2645,16 +2646,29 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, */ mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i; mpx = mbuf_pool_find(socket_id, mp_n); - /* Handle zero as mbuf data buffer size. */ - rx_seg->length = rx_pkt_seg_lengths[i] ? - rx_pkt_seg_lengths[i] : - mbuf_data_size[mp_n]; - rx_seg->offset = i < rx_pkt_nb_offs ? - rx_pkt_seg_offsets[i] : 0; - rx_seg->mp = mpx ? mpx : mp; - } - rx_conf->rx_nseg = rx_pkt_nb_segs; - rx_conf->rx_seg = rx_useg; + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + /** + * On Segment length zero, update length as, + * buffer size - headroom size + * to make sure enough space is accomidate for header. + */ + rx_seg->length = rx_pkt_seg_lengths[i] ? + rx_pkt_seg_lengths[i] : + mbuf_data_size[mp_n] - RTE_PKTMBUF_HEADROOM; + rx_seg->offset = i < rx_pkt_nb_offs ? + rx_pkt_seg_offsets[i] : 0; + rx_seg->mp = mpx ? mpx : mp; + } else { + rx_mempool[i] = mpx ? mpx : mp; + } + } + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + rx_conf->rx_nseg = rx_pkt_nb_segs; + rx_conf->rx_seg = rx_useg; + } else { + rx_conf->rx_mempools = rx_mempool; + rx_conf->rx_nmempool = rx_pkt_nb_segs; + } ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc, socket_id, rx_conf, NULL); rx_conf->rx_seg = NULL; diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index cbbc7cc350..2f50a10d1f 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -80,6 +80,9 @@ extern uint8_t cl_quit; #define MIN_TOTAL_NUM_MBUFS 1024 +/* Maximum number of pools supported per Rx queue */ +#define MAX_MEMPOOL 8 + typedef uint8_t lcoreid_t; typedef uint16_t portid_t; typedef uint16_t queueid_t; diff --git a/app/test-pmd/util.c b/app/test-pmd/util.c index fd98e8b51d..f9df5f69ef 100644 --- a/app/test-pmd/util.c +++ b/app/test-pmd/util.c @@ -150,8 +150,8 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], print_ether_addr(" - dst=", ð_hdr->dst_addr, print_buf, buf_size, &cur_len); MKDUMPSTR(print_buf, buf_size, cur_len, - " - type=0x%04x - length=%u - nb_segs=%d", - eth_type, (unsigned int) mb->pkt_len, + " - pool=%s - type=0x%04x - length=%u - nb_segs=%d", + mb->pool->name, eth_type, (unsigned int) mb->pkt_len, (int)mb->nb_segs); ol_flags = mb->ol_flags; if (ol_flags & RTE_MBUF_F_RX_RSS_HASH) {