From patchwork Fri Apr 30 04:34:23 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Apeksha Gupta X-Patchwork-Id: 92456 X-Patchwork-Delegate: andrew.rybchenko@oktetlabs.ru Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D60F6A0546; Fri, 30 Apr 2021 06:35:14 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 91C4141149; Fri, 30 Apr 2021 06:35:07 +0200 (CEST) Received: from inva021.nxp.com (inva021.nxp.com [92.121.34.21]) by mails.dpdk.org (Postfix) with ESMTP id 5519B410FA for ; Fri, 30 Apr 2021 06:35:03 +0200 (CEST) Received: from inva021.nxp.com (localhost [127.0.0.1]) by inva021.eu-rdc02.nxp.com (Postfix) with ESMTP id 2AFF920155E; Fri, 30 Apr 2021 06:35:03 +0200 (CEST) Received: from invc005.ap-rdc01.nxp.com (invc005.ap-rdc01.nxp.com [165.114.16.14]) by inva021.eu-rdc02.nxp.com (Postfix) with ESMTP id A924E2000EF; Fri, 30 Apr 2021 06:35:00 +0200 (CEST) Received: from lsv03186.swis.in-blr01.nxp.com (lsv03186.swis.in-blr01.nxp.com [92.120.146.182]) by invc005.ap-rdc01.nxp.com (Postfix) with ESMTP id 97D58402A8; Fri, 30 Apr 2021 06:34:57 +0200 (CEST) From: Apeksha Gupta To: ferruh.yigit@intel.com Cc: dev@dpdk.org, hemant.agrawal@nxp.com, sachin.saxena@nxp.com, Apeksha Gupta Date: Fri, 30 Apr 2021 10:04:23 +0530 Message-Id: <20210430043424.19752-4-apeksha.gupta@nxp.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20210430043424.19752-1-apeksha.gupta@nxp.com> References: <20210430043424.19752-1-apeksha.gupta@nxp.com> X-Virus-Scanned: ClamAV using ClamSMTP Subject: [dpdk-dev] [PATCH 3/4] drivers/net/enetfec: queue configuration X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch added RX/TX queue configuration setup operations. On packet Rx the respective BD Ring status bit is set which is then used for packet processing. Signed-off-by: Sachin Saxena Signed-off-by: Apeksha Gupta --- drivers/net/enetfec/enet_ethdev.c | 223 ++++++++++++++++++++++++++++++ 1 file changed, 223 insertions(+) diff --git a/drivers/net/enetfec/enet_ethdev.c b/drivers/net/enetfec/enet_ethdev.c index 5f4f2cf9e..b4816179a 100644 --- a/drivers/net/enetfec/enet_ethdev.c +++ b/drivers/net/enetfec/enet_ethdev.c @@ -48,6 +48,19 @@ int enetfec_logtype_pmd; +/* Supported Rx offloads */ +static uint64_t dev_rx_offloads_sup = + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_CHECKSUM; + +static uint64_t dev_tx_offloads_sup = + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; + /* * This function is called to start or restart the FEC during a link * change, transmit timeout or to reconfigure the FEC. The network @@ -176,8 +189,218 @@ enetfec_eth_open(struct rte_eth_dev *dev) return 0; } + +static int +enetfec_eth_configure(__rte_unused struct rte_eth_dev *dev) +{ + ENET_PMD_INFO("%s: returning zero ", __func__); + return 0; +} + +static int +enetfec_eth_info(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + dev_info->max_rx_queues = ENET_MAX_Q; + dev_info->max_tx_queues = ENET_MAX_Q; + dev_info->min_mtu = RTE_ETHER_MIN_MTU; + dev_info->rx_offload_capa = dev_rx_offloads_sup; + dev_info->tx_offload_capa = dev_tx_offloads_sup; + + return 0; +} + +static const unsigned short offset_des_active_rxq[] = { + ENET_RDAR_0, ENET_RDAR_1, ENET_RDAR_2 +}; + +static const unsigned short offset_des_active_txq[] = { + ENET_TDAR_0, ENET_TDAR_1, ENET_TDAR_2 +}; + +static int +enetfec_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_txconf *tx_conf) +{ + struct enetfec_private *fep = dev->data->dev_private; + unsigned int i; + struct bufdesc *bdp, *bd_base; + struct enetfec_priv_tx_q *txq; + unsigned int size; + unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : + sizeof(struct bufdesc); + unsigned int dsize_log2 = fls64(dsize); + + /* allocate transmit queue */ + txq = rte_zmalloc(NULL, sizeof(*txq), RTE_CACHE_LINE_SIZE); + if (!txq) { + ENET_PMD_ERR("transmit queue allocation failed"); + return -ENOMEM; + } + + if (nb_desc > MAX_TX_BD_RING_SIZE) { + nb_desc = MAX_TX_BD_RING_SIZE; + ENET_PMD_WARN("modified the nb_desc to MAX_TX_BD_RING_SIZE\n"); + } + txq->bd.ring_size = nb_desc; + fep->total_tx_ring_size += txq->bd.ring_size; + fep->tx_queues[queue_idx] = txq; + + rte_write32(fep->bd_addr_p_t[queue_idx], + fep->hw_baseaddr_v + ENET_TD_START(queue_idx)); + + /* Set transmit descriptor base. */ + txq = fep->tx_queues[queue_idx]; + txq->fep = fep; + size = dsize * txq->bd.ring_size; + bd_base = (struct bufdesc *)fep->dma_baseaddr_t[queue_idx]; + txq->bd.que_id = queue_idx; + txq->bd.base = bd_base; + txq->bd.cur = bd_base; + txq->bd.d_size = dsize; + txq->bd.d_size_log2 = dsize_log2; + txq->bd.active_reg_desc = + fep->hw_baseaddr_v + offset_des_active_txq[queue_idx]; + bd_base = (struct bufdesc *)(((void *)bd_base) + size); + txq->bd.last = (struct bufdesc *)(((void *)bd_base) - dsize); + bdp = txq->bd.base; + bdp = txq->bd.cur; + + for (i = 0; i < txq->bd.ring_size; i++) { + /* Initialize the BD for every fragment in the page. */ + rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc); + if (txq->tx_mbuf[i]) { + rte_pktmbuf_free(txq->tx_mbuf[i]); + txq->tx_mbuf[i] = NULL; + } + rte_write32(rte_cpu_to_le_32(0), &bdp->bd_bufaddr); + bdp = enet_get_nextdesc(bdp, &txq->bd); + } + + /* Set the last buffer to wrap */ + bdp = enet_get_prevdesc(bdp, &txq->bd); + rte_write16((rte_cpu_to_le_16(TX_BD_WRAP) | + rte_read16(&bdp->bd_sc)), &bdp->bd_sc); + txq->dirty_tx = bdp; + dev->data->tx_queues[queue_idx] = fep->tx_queues[queue_idx]; + return 0; +} + +static int +enetfec_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_rx_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool) +{ + struct enetfec_private *fep = dev->data->dev_private; + unsigned int i; + struct bufdesc *bd_base; + struct bufdesc *bdp; + struct enetfec_priv_rx_q *rxq; + unsigned int size; + unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : + sizeof(struct bufdesc); + unsigned int dsize_log2 = fls64(dsize); + + /* allocate receive queue */ + rxq = rte_zmalloc(NULL, sizeof(*rxq), RTE_CACHE_LINE_SIZE); + if (!rxq) { + ENET_PMD_ERR("receive queue allocation failed"); + return -ENOMEM; + } + + if (nb_rx_desc > MAX_RX_BD_RING_SIZE) { + nb_rx_desc = MAX_RX_BD_RING_SIZE; + ENET_PMD_WARN("modified the nb_desc to MAX_RX_BD_RING_SIZE\n"); + } + + rxq->bd.ring_size = nb_rx_desc; + fep->total_rx_ring_size += rxq->bd.ring_size; + fep->rx_queues[queue_idx] = rxq; + + rte_write32(fep->bd_addr_p_r[queue_idx], + fep->hw_baseaddr_v + ENET_RD_START(queue_idx)); + rte_write32(PKT_MAX_BUF_SIZE, + fep->hw_baseaddr_v + ENET_MRB_SIZE(queue_idx)); + + /* Set receive descriptor base. */ + rxq = fep->rx_queues[queue_idx]; + rxq->pool = mb_pool; + size = dsize * rxq->bd.ring_size; + bd_base = (struct bufdesc *)fep->dma_baseaddr_r[queue_idx]; + rxq->bd.que_id = queue_idx; + rxq->bd.base = bd_base; + rxq->bd.cur = bd_base; + rxq->bd.d_size = dsize; + rxq->bd.d_size_log2 = dsize_log2; + rxq->bd.active_reg_desc = + fep->hw_baseaddr_v + offset_des_active_rxq[queue_idx]; + bd_base = (struct bufdesc *)(((void *)bd_base) + size); + rxq->bd.last = (struct bufdesc *)(((void *)bd_base) - dsize); + + rxq->fep = fep; + bdp = rxq->bd.base; + rxq->bd.cur = bdp; + + for (i = 0; i < nb_rx_desc; i++) { + /* Initialize Rx buffers from pktmbuf pool */ + struct rte_mbuf *mbuf = rte_pktmbuf_alloc(mb_pool); + if (mbuf == NULL) { + ENET_PMD_ERR("mbuf failed\n"); + goto err_alloc; + } + + /* Get the virtual address & physical address */ + rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)), + &bdp->bd_bufaddr); + + rxq->rx_mbuf[i] = mbuf; + rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY), &bdp->bd_sc); + + bdp = enet_get_nextdesc(bdp, &rxq->bd); + } + + /* Initialize the receive buffer descriptors. */ + bdp = rxq->bd.cur; + for (i = 0; i < rxq->bd.ring_size; i++) { + /* Initialize the BD for every fragment in the page. */ + if (rte_read32(&bdp->bd_bufaddr)) + rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY), + &bdp->bd_sc); + else + rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc); + + bdp = enet_get_nextdesc(bdp, &rxq->bd); + } + + /* Set the last buffer to wrap */ + bdp = enet_get_prevdesc(bdp, &rxq->bd); + rte_write16((rte_cpu_to_le_16(RX_BD_WRAP) | + rte_read16(&bdp->bd_sc)), &bdp->bd_sc); + dev->data->rx_queues[queue_idx] = fep->rx_queues[queue_idx]; + rte_write32(0x0, fep->rx_queues[queue_idx]->bd.active_reg_desc); + return 0; + +err_alloc: + for (i = 0; i < nb_rx_desc; i++) { + rte_pktmbuf_free(rxq->rx_mbuf[i]); + rxq->rx_mbuf[i] = NULL; + } + rte_free(rxq); + return -1; +} + static const struct eth_dev_ops ops = { .dev_start = enetfec_eth_open, + .dev_configure = enetfec_eth_configure, + .dev_infos_get = enetfec_eth_info, + .rx_queue_setup = enetfec_rx_queue_setup, + .tx_queue_setup = enetfec_tx_queue_setup, }; static int