From patchwork Tue Sep 18 13:31:46 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Hemant Agrawal X-Patchwork-Id: 44845 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id CEBDC4CA2; Tue, 18 Sep 2018 15:34:02 +0200 (CEST) Received: from inva020.nxp.com (inva020.nxp.com [92.121.34.13]) by dpdk.org (Postfix) with ESMTP id 8ABC42C57 for ; Tue, 18 Sep 2018 15:34:00 +0200 (CEST) Received: from inva020.nxp.com (localhost [127.0.0.1]) by inva020.eu-rdc02.nxp.com (Postfix) with ESMTP id 511F71A0138; Tue, 18 Sep 2018 15:34:00 +0200 (CEST) Received: from invc005.ap-rdc01.nxp.com (invc005.ap-rdc01.nxp.com [165.114.16.14]) by inva020.eu-rdc02.nxp.com (Postfix) with ESMTP id EEEB71A0143; Tue, 18 Sep 2018 15:33:57 +0200 (CEST) Received: from bf-netperf1.ap.freescale.net (bf-netperf1.ap.freescale.net [10.232.134.28]) by invc005.ap-rdc01.nxp.com (Postfix) with ESMTP id 5A7FA402F8; Tue, 18 Sep 2018 21:33:54 +0800 (SGT) From: Hemant Agrawal To: dev@dpdk.org, thomas@monjalon.net, ferruh.yigit@intel.com Date: Tue, 18 Sep 2018 19:01:46 +0530 Message-Id: <1537277516-8876-4-git-send-email-hemant.agrawal@nxp.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1537277516-8876-1-git-send-email-hemant.agrawal@nxp.com> References: <1535539660-20228-2-git-send-email-hemant.agrawal@nxp.com> <1537277516-8876-1-git-send-email-hemant.agrawal@nxp.com> X-Virus-Scanned: ClamAV using ClamSMTP Subject: [dpdk-dev] [PATCH v2 03/13] net/dpaa: implement scatter offload support X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch implement the sg support, which can be enabled/disabled w.r.t configuration. Signed-off-by: Hemant Agrawal --- drivers/net/dpaa/dpaa_ethdev.c | 62 +++++++++++++++++++++++++++++++++++++++--- drivers/net/dpaa/dpaa_ethdev.h | 3 +- drivers/net/dpaa/dpaa_rxtx.c | 8 +++--- drivers/net/dpaa/dpaa_rxtx.h | 2 -- 4 files changed, 64 insertions(+), 11 deletions(-) diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c index db166f5..9ea8510 100644 --- a/drivers/net/dpaa/dpaa_ethdev.c +++ b/drivers/net/dpaa/dpaa_ethdev.c @@ -47,15 +47,15 @@ /* Supported Rx offloads */ static uint64_t dev_rx_offloads_sup = - DEV_RX_OFFLOAD_JUMBO_FRAME; + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_SCATTER; /* Rx offloads which cannot be disabled */ static uint64_t dev_rx_offloads_nodis = DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_RX_OFFLOAD_SCATTER; + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; /* Supported Tx offloads */ static uint64_t dev_tx_offloads_sup; @@ -147,11 +147,30 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) struct dpaa_if *dpaa_intf = dev->data->dev_private; uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE; + uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; PMD_INIT_FUNC_TRACE(); if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN) return -EINVAL; + /* + * Refuse mtu that requires the support of scattered packets + * when this feature has not been enabled before. + */ + if (dev->data->min_rx_buf_size && + !dev->data->scattered_rx && frame_size > buffsz) { + DPAA_PMD_ERR("SG not enabled, will not fit in one buffer"); + return -EINVAL; + } + + /* check * >= max_frame */ + if (dev->data->min_rx_buf_size && dev->data->scattered_rx && + (frame_size > buffsz * DPAA_SGT_MAX_ENTRIES)) { + DPAA_PMD_ERR("Too big to fit for Max SG list %d", + buffsz * DPAA_SGT_MAX_ENTRIES); + return -EINVAL; + } + if (frame_size > ETHER_MAX_LEN) dev->data->dev_conf.rxmode.offloads &= DEV_RX_OFFLOAD_JUMBO_FRAME; @@ -208,6 +227,13 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev) DPAA_MAX_RX_PKT_LEN); } } + + if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) { + DPAA_PMD_DEBUG("enabling scatter mode"); + fman_if_set_sg(dpaa_intf->fif, 1); + dev->data->scattered_rx = 1; + } + return 0; } @@ -305,7 +331,6 @@ static void dpaa_eth_dev_info(struct rte_eth_dev *dev, dev_info->max_rx_queues = dpaa_intf->nb_rx_queues; dev_info->max_tx_queues = dpaa_intf->nb_tx_queues; - dev_info->min_rx_bufsize = DPAA_MIN_RX_BUF_SIZE; dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN; dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER; dev_info->max_hash_mac_addrs = 0; @@ -519,6 +544,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, struct qm_mcc_initfq opts = {0}; u32 flags = 0; int ret; + u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; PMD_INIT_FUNC_TRACE(); @@ -532,6 +558,28 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)", queue_idx, rxq->fqid); + /* Max packet can fit in single buffer */ + if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) { + ; + } else if (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_SCATTER) { + if (dev->data->dev_conf.rxmode.max_rx_pkt_len > + buffsz * DPAA_SGT_MAX_ENTRIES) { + DPAA_PMD_ERR("max RxPkt size %d too big to fit " + "MaxSGlist %d", + dev->data->dev_conf.rxmode.max_rx_pkt_len, + buffsz * DPAA_SGT_MAX_ENTRIES); + rte_errno = EOVERFLOW; + return -rte_errno; + } + } else { + DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is" + " larger than a single mbuf (%u) and scattered" + " mode has not been requested", + dev->data->dev_conf.rxmode.max_rx_pkt_len, + buffsz - RTE_PKTMBUF_HEADROOM); + } + if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) { struct fman_if_ic_params icp; uint32_t fd_offset; @@ -562,6 +610,9 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, dpaa_intf->name, fd_offset, fman_if_get_fdoff(dpaa_intf->fif)); } + DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name, + fman_if_get_sg_enable(dpaa_intf->fif), + dev->data->dev_conf.rxmode.max_rx_pkt_len); /* checking if push mode only, no error check for now */ if (dpaa_push_mode_max_queue > dpaa_push_queue_idx) { dpaa_push_queue_idx++; @@ -1312,6 +1363,9 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) fman_if_reset_mcast_filter_table(fman_intf); /* Reset interface statistics */ fman_if_stats_reset(fman_intf); + /* Disable SG by default */ + fman_if_set_sg(fman_intf, 0); + fman_if_set_maxfrm(fman_intf, ETHER_MAX_LEN + VLAN_TAG_SIZE); return 0; diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h index 2c38c34..2fc7231 100644 --- a/drivers/net/dpaa/dpaa_ethdev.h +++ b/drivers/net/dpaa/dpaa_ethdev.h @@ -39,9 +39,10 @@ /* Alignment to use for cpu-local structs to avoid coherency problems. */ #define MAX_CACHELINE 64 -#define DPAA_MIN_RX_BUF_SIZE 512 #define DPAA_MAX_RX_PKT_LEN 10240 +#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */ + /* RX queue tail drop threshold (CGR Based) in frame count */ #define CGR_RX_PERFQ_THRESH 256 diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c index 168b77e..3a3a048 100644 --- a/drivers/net/dpaa/dpaa_rxtx.c +++ b/drivers/net/dpaa/dpaa_rxtx.c @@ -306,8 +306,6 @@ dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid) int i = 0; uint8_t fd_offset = fd->offset; - DPAA_DP_LOG(DEBUG, "Received an SG frame"); - vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd)); if (!vaddr) { DPAA_PMD_ERR("unable to convert physical address"); @@ -349,6 +347,8 @@ dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid) } prev_seg = cur_seg; } + DPAA_DP_LOG(DEBUG, "Received an SG frame len =%d, num_sg =%d", + first_seg->pkt_len, first_seg->nb_segs); dpaa_eth_packet_info(first_seg, vaddr); rte_pktmbuf_free_seg(temp); @@ -367,8 +367,6 @@ dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid) uint16_t offset; uint32_t length; - DPAA_DP_LOG(DEBUG, " FD--->MBUF"); - if (unlikely(format == qm_fd_sg)) return dpaa_eth_sg_to_mbuf(fd, ifid); @@ -379,6 +377,8 @@ dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid) offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT; length = fd->opaque & DPAA_FD_LENGTH_MASK; + DPAA_DP_LOG(DEBUG, " FD--->MBUF off %d len = %d", offset, length); + /* Ignoring case when format != qm_fd_contig */ dpaa_display_frame(fd); diff --git a/drivers/net/dpaa/dpaa_rxtx.h b/drivers/net/dpaa/dpaa_rxtx.h index d3e6351..6de70a7 100644 --- a/drivers/net/dpaa/dpaa_rxtx.h +++ b/drivers/net/dpaa/dpaa_rxtx.h @@ -32,8 +32,6 @@ /* L4 Type field: TCP */ #define DPAA_L4_PARSE_RESULT_TCP 0x20 -#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */ - #define DPAA_MAX_DEQUEUE_NUM_FRAMES 63 /**