From patchwork Thu Sep 2 17:59:53 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Apeksha Gupta X-Patchwork-Id: 97839 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id B1C44A0C4C; Thu, 2 Sep 2021 20:01:16 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 2899A410DC; Thu, 2 Sep 2021 20:01:05 +0200 (CEST) Received: from inva021.nxp.com (inva021.nxp.com [92.121.34.21]) by mails.dpdk.org (Postfix) with ESMTP id ECF1640DDE for ; Thu, 2 Sep 2021 20:01:03 +0200 (CEST) Received: from inva021.nxp.com (localhost [127.0.0.1]) by inva021.eu-rdc02.nxp.com (Postfix) with ESMTP id C7C2E201F4D; Thu, 2 Sep 2021 20:01:03 +0200 (CEST) Received: from aprdc01srsp001v.ap-rdc01.nxp.com (aprdc01srsp001v.ap-rdc01.nxp.com [165.114.16.16]) by inva021.eu-rdc02.nxp.com (Postfix) with ESMTP id 64088201F51; Thu, 2 Sep 2021 20:01:03 +0200 (CEST) Received: from lsv03186.swis.in-blr01.nxp.com (lsv03186.swis.in-blr01.nxp.com [92.120.146.182]) by aprdc01srsp001v.ap-rdc01.nxp.com (Postfix) with ESMTP id 70777183AC89; Fri, 3 Sep 2021 02:01:02 +0800 (+08) From: Apeksha Gupta To: andrew.rybchenko@oktetlabs.ru, ferruh.yigit@intel.com Cc: dev@dpdk.org, hemant.agrawal@nxp.com, sachin.saxena@nxp.com, Apeksha Gupta Date: Thu, 2 Sep 2021 23:29:53 +0530 Message-Id: <20210902175955.9202-4-apeksha.gupta@nxp.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20210902175955.9202-1-apeksha.gupta@nxp.com> References: <20210902175955.9202-1-apeksha.gupta@nxp.com> X-Virus-Scanned: ClamAV using ClamSMTP Subject: [dpdk-dev] [PATCH v2 3/5] net/enetfec: support queue configuration X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch adds Rx/Tx queue configuration setup operations. On packet reception the respective BD Ring status bit is set which is then used for packet processing. Signed-off-by: Sachin Saxena Signed-off-by: Apeksha Gupta --- drivers/net/enetfec/enet_ethdev.c | 230 +++++++++++++++++++++++++++++- 1 file changed, 229 insertions(+), 1 deletion(-) diff --git a/drivers/net/enetfec/enet_ethdev.c b/drivers/net/enetfec/enet_ethdev.c index 673361e3f8..b8bc4a5f8b 100644 --- a/drivers/net/enetfec/enet_ethdev.c +++ b/drivers/net/enetfec/enet_ethdev.c @@ -46,6 +46,19 @@ int enetfec_logtype_pmd; uint32_t e_cntl; +/* Supported Rx offloads */ +static uint64_t dev_rx_offloads_sup = + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_CHECKSUM; + +static uint64_t dev_tx_offloads_sup = + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; + /* * This function is called to start or restart the ENETFEC during a link * change, transmit timeout, or to reconfigure the ENETFEC. The network @@ -214,10 +227,225 @@ enetfec_eth_stop(__rte_unused struct rte_eth_dev *dev) return 0; } +static int +enetfec_eth_info(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + dev_info->max_rx_queues = ENETFEC_MAX_Q; + dev_info->max_tx_queues = ENETFEC_MAX_Q; + dev_info->rx_offload_capa = dev_rx_offloads_sup; + dev_info->tx_offload_capa = dev_tx_offloads_sup; + return 0; +} + +static const unsigned short offset_des_active_rxq[] = { + ENETFEC_RDAR_0, ENETFEC_RDAR_1, ENETFEC_RDAR_2 +}; + +static const unsigned short offset_des_active_txq[] = { + ENETFEC_TDAR_0, ENETFEC_TDAR_1, ENETFEC_TDAR_2 +}; + +static int +enetfec_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf) +{ + struct enetfec_private *fep = dev->data->dev_private; + unsigned int i; + struct bufdesc *bdp, *bd_base; + struct enetfec_priv_tx_q *txq; + unsigned int size; + unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : + sizeof(struct bufdesc); + unsigned int dsize_log2 = fls64(dsize); + + /* Tx deferred start is not supported */ + if (tx_conf->tx_deferred_start) { + ENETFEC_PMD_ERR("%p:Tx deferred start not supported", + (void *)dev); + return -EINVAL; + } + + /* allocate transmit queue */ + txq = rte_zmalloc(NULL, sizeof(*txq), RTE_CACHE_LINE_SIZE); + if (txq == NULL) { + ENETFEC_PMD_ERR("transmit queue allocation failed"); + return -ENOMEM; + } + + if (nb_desc > MAX_TX_BD_RING_SIZE) { + nb_desc = MAX_TX_BD_RING_SIZE; + ENETFEC_PMD_WARN("modified the nb_desc to MAX_TX_BD_RING_SIZE\n"); + } + txq->bd.ring_size = nb_desc; + fep->total_tx_ring_size += txq->bd.ring_size; + fep->tx_queues[queue_idx] = txq; + + rte_write32(rte_cpu_to_le_32(fep->bd_addr_p_t[queue_idx]), + fep->hw_baseaddr_v + ENETFEC_TD_START(queue_idx)); + + /* Set transmit descriptor base. */ + txq = fep->tx_queues[queue_idx]; + txq->fep = fep; + size = dsize * txq->bd.ring_size; + bd_base = (struct bufdesc *)fep->dma_baseaddr_t[queue_idx]; + txq->bd.que_id = queue_idx; + txq->bd.base = bd_base; + txq->bd.cur = bd_base; + txq->bd.d_size = dsize; + txq->bd.d_size_log2 = dsize_log2; + txq->bd.active_reg_desc = + fep->hw_baseaddr_v + offset_des_active_txq[queue_idx]; + bd_base = (struct bufdesc *)(((void *)bd_base) + size); + txq->bd.last = (struct bufdesc *)(((void *)bd_base) - dsize); + bdp = txq->bd.base; + bdp = txq->bd.cur; + + for (i = 0; i < txq->bd.ring_size; i++) { + /* Initialize the BD for every fragment in the page. */ + rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc); + if (txq->tx_mbuf[i] != NULL) { + rte_pktmbuf_free(txq->tx_mbuf[i]); + txq->tx_mbuf[i] = NULL; + } + rte_write32(0, &bdp->bd_bufaddr); + bdp = enet_get_nextdesc(bdp, &txq->bd); + } + + /* Set the last buffer to wrap */ + bdp = enet_get_prevdesc(bdp, &txq->bd); + rte_write16((rte_cpu_to_le_16(TX_BD_WRAP) | + rte_read16(&bdp->bd_sc)), &bdp->bd_sc); + txq->dirty_tx = bdp; + dev->data->tx_queues[queue_idx] = fep->tx_queues[queue_idx]; + return 0; +} + +static int +enetfec_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_rx_desc, + unsigned int socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool) +{ + struct enetfec_private *fep = dev->data->dev_private; + unsigned int i; + struct bufdesc *bd_base; + struct bufdesc *bdp; + struct enetfec_priv_rx_q *rxq; + unsigned int size; + unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : + sizeof(struct bufdesc); + unsigned int dsize_log2 = fls64(dsize); + + /* Rx deferred start is not supported */ + if (rx_conf->rx_deferred_start) { + ENETFEC_PMD_ERR("%p:Rx deferred start not supported", + (void *)dev); + return -EINVAL; + } + + /* allocate receive queue */ + rxq = rte_zmalloc(NULL, sizeof(*rxq), RTE_CACHE_LINE_SIZE); + if (rxq == NULL) { + ENETFEC_PMD_ERR("receive queue allocation failed"); + return -ENOMEM; + } + + if (nb_rx_desc > MAX_RX_BD_RING_SIZE) { + nb_rx_desc = MAX_RX_BD_RING_SIZE; + ENETFEC_PMD_WARN("modified the nb_desc to MAX_RX_BD_RING_SIZE\n"); + } + + rxq->bd.ring_size = nb_rx_desc; + fep->total_rx_ring_size += rxq->bd.ring_size; + fep->rx_queues[queue_idx] = rxq; + + rte_write32(rte_cpu_to_le_32(fep->bd_addr_p_r[queue_idx]), + fep->hw_baseaddr_v + ENETFEC_RD_START(queue_idx)); + rte_write32(rte_cpu_to_le_32(PKT_MAX_BUF_SIZE), + fep->hw_baseaddr_v + ENETFEC_MRB_SIZE(queue_idx)); + + /* Set receive descriptor base. */ + rxq = fep->rx_queues[queue_idx]; + rxq->pool = mb_pool; + size = dsize * rxq->bd.ring_size; + bd_base = (struct bufdesc *)fep->dma_baseaddr_r[queue_idx]; + rxq->bd.que_id = queue_idx; + rxq->bd.base = bd_base; + rxq->bd.cur = bd_base; + rxq->bd.d_size = dsize; + rxq->bd.d_size_log2 = dsize_log2; + rxq->bd.active_reg_desc = + fep->hw_baseaddr_v + offset_des_active_rxq[queue_idx]; + bd_base = (struct bufdesc *)(((void *)bd_base) + size); + rxq->bd.last = (struct bufdesc *)(((void *)bd_base) - dsize); + + rxq->fep = fep; + bdp = rxq->bd.base; + rxq->bd.cur = bdp; + + for (i = 0; i < nb_rx_desc; i++) { + /* Initialize Rx buffers from pktmbuf pool */ + struct rte_mbuf *mbuf = rte_pktmbuf_alloc(mb_pool); + if (mbuf == NULL) { + ENETFEC_PMD_ERR("mbuf failed\n"); + goto err_alloc; + } + + /* Get the virtual address & physical address */ + rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)), + &bdp->bd_bufaddr); + + rxq->rx_mbuf[i] = mbuf; + rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY), &bdp->bd_sc); + + bdp = enet_get_nextdesc(bdp, &rxq->bd); + } + + /* Initialize the receive buffer descriptors. */ + bdp = rxq->bd.cur; + for (i = 0; i < rxq->bd.ring_size; i++) { + /* Initialize the BD for every fragment in the page. */ + if (rte_read32(&bdp->bd_bufaddr) > 0) + rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY), + &bdp->bd_sc); + else + rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc); + + bdp = enet_get_nextdesc(bdp, &rxq->bd); + } + + /* Set the last buffer to wrap */ + bdp = enet_get_prevdesc(bdp, &rxq->bd); + rte_write16((rte_cpu_to_le_16(RX_BD_WRAP) | + rte_read16(&bdp->bd_sc)), &bdp->bd_sc); + dev->data->rx_queues[queue_idx] = fep->rx_queues[queue_idx]; + rte_write32(0, fep->rx_queues[queue_idx]->bd.active_reg_desc); + return 0; + +err_alloc: + for (i = 0; i < nb_rx_desc; i++) { + if (rxq->rx_mbuf[i] != NULL) { + rte_pktmbuf_free(rxq->rx_mbuf[i]); + rxq->rx_mbuf[i] = NULL; + } + } + rte_free(rxq); + return errno; +} + static const struct eth_dev_ops enetfec_ops = { .dev_configure = enetfec_eth_configure, .dev_start = enetfec_eth_start, - .dev_stop = enetfec_eth_stop + .dev_stop = enetfec_eth_stop, + .dev_infos_get = enetfec_eth_info, + .rx_queue_setup = enetfec_rx_queue_setup, + .tx_queue_setup = enetfec_tx_queue_setup }; static int