From patchwork Wed Jul 6 07:52:04 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Aman Kumar X-Patchwork-Id: 113732 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 6332CA0540; Wed, 6 Jul 2022 09:57:36 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id BF47E42BA3; Wed, 6 Jul 2022 09:56:48 +0200 (CEST) Received: from mail-pf1-f175.google.com (mail-pf1-f175.google.com [209.85.210.175]) by mails.dpdk.org (Postfix) with ESMTP id 9967640691 for ; Wed, 6 Jul 2022 09:56:46 +0200 (CEST) Received: by mail-pf1-f175.google.com with SMTP id j3so1029806pfb.6 for ; Wed, 06 Jul 2022 00:56:46 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=vvdntech-in.20210112.gappssmtp.com; s=20210112; h=from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-transfer-encoding; bh=VEGz4Aip37BUfVEh1dsRMq76VvfxIGmJ0DLIACtnhCg=; b=yVFglZX7JiavF+t5uEaF6+ixFPrH8iBeMGWfaGtpcJj2FyXSVUDBa1f1Dgt7l7tckm LzNn8ERYaWGi3HK+dMluedmP1/rSrrvGQAPstAgKypmG9gnWbRQe2rVCHR3KoubuGKgM BrehPe7QBcbk8kqBZ0xgHkjGyZelQd5O4yAcfwrOv5GUQMvljWhebNRUzsTSyJYLE7Fw WWTEmZaaf401EJZIM50x/CsLYx6jbEkM82QATDIhkGooyG+lWD3SX1P6OKQKgTEcR2hB C4/Iciuo/763S3GOyez/beX8QGTl8yimhcz1ENjxgThcKU3lTOHhxVtKxwDlhw9Bf2b+ WRAw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:mime-version:content-transfer-encoding; bh=VEGz4Aip37BUfVEh1dsRMq76VvfxIGmJ0DLIACtnhCg=; b=m3Bi9cJ44/DMmh0uPN2sSzpJ8hZnUOxeE40skOgQMnqj3euBWaRqEiHSt3JPp9G6UG auB589mpIJY9YNPQmMmlg57e/pVbJpEtZSjU6fVdf1V75fC7Gpd8ksh0UTJarsWJ1Fcj c/CZVGt8oIbk4sjg6AuiWgLM+SxSsbd3wgtQz4ke/BxmFFr4zLXZFo01pHRJkkMQV0Og GQ9KHdnApDOyaVZWAwLZFQafhwAKeeB0QtGKkTNT8wS17miqv5l4xPCUqHufWqNxefQt gy4NkAQlkY6dB276JSC3cPXD0UckF9RX2O7iFGD0UsZv8/MR0BpgKLJH2BHy5Ou+QJph 6pmA== X-Gm-Message-State: AJIora/dqCTVObFpsB/xHQs0DFKG9CEqsSjhHGxnri8p7tVeB67Qz1dK oVv2QbV+1R9xby9oENMPMLxQa0tWtN8i4Kw/ X-Google-Smtp-Source: AGRyM1tHCbbwG67EZlEAIacfv+f7bp8WIf4/w+uBvBtCV4gdNCW2gnA85zjYEmmmAh89zenRZImuCg== X-Received: by 2002:a63:6bc1:0:b0:40d:ffa8:2605 with SMTP id g184-20020a636bc1000000b0040dffa82605mr34382803pgc.299.1657094205551; Wed, 06 Jul 2022 00:56:45 -0700 (PDT) Received: from 470--5GDC--BLR.blore.vvdntech.com ([106.51.39.131]) by smtp.gmail.com with ESMTPSA id r4-20020a17090a438400b001ef81574355sm7378805pjg.12.2022.07.06.00.56.43 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Wed, 06 Jul 2022 00:56:45 -0700 (PDT) From: Aman Kumar To: dev@dpdk.org Cc: maxime.coquelin@redhat.com, david.marchand@redhat.com, aman.kumar@vvdntech.in Subject: [RFC PATCH 14/29] net/qdma: add routine for Tx queue initialization Date: Wed, 6 Jul 2022 13:22:04 +0530 Message-Id: <20220706075219.517046-15-aman.kumar@vvdntech.in> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220706075219.517046-1-aman.kumar@vvdntech.in> References: <20220706075219.517046-1-aman.kumar@vvdntech.in> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org defined routines to handle tx queue related ops. this patch add support to rte_eth_dev_tx_queue* apis for this PMD. Signed-off-by: Aman Kumar --- drivers/net/qdma/qdma.h | 8 + drivers/net/qdma/qdma_common.c | 74 +++++++++ drivers/net/qdma/qdma_devops.c | 270 +++++++++++++++++++++++++++++++-- 3 files changed, 343 insertions(+), 9 deletions(-) diff --git a/drivers/net/qdma/qdma.h b/drivers/net/qdma/qdma.h index 5992473b33..8515ebe60e 100644 --- a/drivers/net/qdma/qdma.h +++ b/drivers/net/qdma/qdma.h @@ -42,6 +42,7 @@ #define MIN_RX_PIDX_UPDATE_THRESHOLD (1) #define MIN_TX_PIDX_UPDATE_THRESHOLD (1) #define DEFAULT_MM_CMPT_CNT_THRESHOLD (2) +#define QDMA_TXQ_PIDX_UPDATE_INTERVAL (1000) /* 100 uSec */ #define WB_TIMEOUT (100000) #define RESET_TIMEOUT (60000) @@ -198,6 +199,7 @@ struct qdma_tx_queue { uint16_t tx_desc_pend; uint16_t nb_tx_desc; /* No of TX descriptors. */ rte_spinlock_t pidx_update_lock; + struct qdma_q_pidx_reg_info q_pidx_info; uint64_t offloads; /* Tx offloads */ uint8_t st_mode:1;/* dma-mode: MM or ST */ @@ -297,17 +299,23 @@ struct qdma_pci_dev { }; void qdma_dev_ops_init(struct rte_eth_dev *dev); +void qdma_txq_pidx_update(void *arg); int qdma_pf_csr_read(struct rte_eth_dev *dev); uint8_t qmda_get_desc_sz_idx(enum rte_pmd_qdma_bypass_desc_len); int qdma_init_rx_queue(struct qdma_rx_queue *rxq); +void qdma_reset_tx_queue(struct qdma_tx_queue *txq); void qdma_reset_rx_queue(struct qdma_rx_queue *rxq); void qdma_clr_rx_queue_ctxts(struct rte_eth_dev *dev, uint32_t qid, uint32_t mode); void qdma_inv_rx_queue_ctxts(struct rte_eth_dev *dev, uint32_t qid, uint32_t mode); +void qdma_clr_tx_queue_ctxts(struct rte_eth_dev *dev, uint32_t qid, + uint32_t mode); +void qdma_inv_tx_queue_ctxts(struct rte_eth_dev *dev, uint32_t qid, + uint32_t mode); int qdma_identify_bars(struct rte_eth_dev *dev); int qdma_get_hw_version(struct rte_eth_dev *dev); diff --git a/drivers/net/qdma/qdma_common.c b/drivers/net/qdma/qdma_common.c index d39e642008..2650438e47 100644 --- a/drivers/net/qdma/qdma_common.c +++ b/drivers/net/qdma/qdma_common.c @@ -160,6 +160,80 @@ int qdma_init_rx_queue(struct qdma_rx_queue *rxq) return -ENOMEM; } +/* + * Tx queue reset + */ +void qdma_reset_tx_queue(struct qdma_tx_queue *txq) +{ + uint32_t i; + uint32_t sz; + + txq->tx_fl_tail = 0; + if (txq->st_mode) { /* ST-mode */ + sz = sizeof(struct qdma_ul_st_h2c_desc); + /* Zero out HW ring memory */ + for (i = 0; i < (sz * (txq->nb_tx_desc)); i++) + ((volatile char *)txq->tx_ring)[i] = 0; + } else { + sz = sizeof(struct qdma_ul_mm_desc); + /* Zero out HW ring memory */ + for (i = 0; i < (sz * (txq->nb_tx_desc)); i++) + ((volatile char *)txq->tx_ring)[i] = 0; + } + + /* Initialize SW ring entries */ + for (i = 0; i < txq->nb_tx_desc; i++) + txq->sw_ring[i] = NULL; +} + +void qdma_inv_tx_queue_ctxts(struct rte_eth_dev *dev, + uint32_t qid, uint32_t mode) +{ + struct qdma_pci_dev *qdma_dev = dev->data->dev_private; + struct qdma_descq_sw_ctxt q_sw_ctxt; + struct qdma_descq_hw_ctxt q_hw_ctxt; + struct qdma_descq_credit_ctxt q_credit_ctxt; + struct qdma_hw_access *hw_access = qdma_dev->hw_access; + + hw_access->qdma_sw_ctx_conf(dev, 0, qid, &q_sw_ctxt, + QDMA_HW_ACCESS_INVALIDATE); + hw_access->qdma_hw_ctx_conf(dev, 0, qid, &q_hw_ctxt, + QDMA_HW_ACCESS_INVALIDATE); + + if (mode) { /* ST-mode */ + hw_access->qdma_credit_ctx_conf(dev, 0, qid, + &q_credit_ctxt, QDMA_HW_ACCESS_INVALIDATE); + } +} + +/** + * Clear Tx queue contexts + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * Nothing. + */ +void qdma_clr_tx_queue_ctxts(struct rte_eth_dev *dev, + uint32_t qid, uint32_t mode) +{ + struct qdma_pci_dev *qdma_dev = dev->data->dev_private; + struct qdma_descq_sw_ctxt q_sw_ctxt; + struct qdma_descq_credit_ctxt q_credit_ctxt; + struct qdma_descq_hw_ctxt q_hw_ctxt; + struct qdma_hw_access *hw_access = qdma_dev->hw_access; + + hw_access->qdma_sw_ctx_conf(dev, 0, qid, &q_sw_ctxt, + QDMA_HW_ACCESS_CLEAR); + hw_access->qdma_hw_ctx_conf(dev, 0, qid, &q_hw_ctxt, + QDMA_HW_ACCESS_CLEAR); + if (mode) { /* ST-mode */ + hw_access->qdma_credit_ctx_conf(dev, 0, qid, + &q_credit_ctxt, QDMA_HW_ACCESS_CLEAR); + } +} + /* Utility function to find index of an element in an array */ int index_of_array(uint32_t *arr, uint32_t n, uint32_t element) { diff --git a/drivers/net/qdma/qdma_devops.c b/drivers/net/qdma/qdma_devops.c index fefbbda012..e411c0f1be 100644 --- a/drivers/net/qdma/qdma_devops.c +++ b/drivers/net/qdma/qdma_devops.c @@ -573,13 +573,196 @@ int qdma_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf) { - (void)dev; - (void)tx_queue_id; - (void)nb_tx_desc; - (void)socket_id; - (void)tx_conf; + struct qdma_pci_dev *qdma_dev = dev->data->dev_private; + struct qdma_tx_queue *txq = NULL; + struct qdma_ul_mm_desc *tx_ring_mm; + struct qdma_ul_st_h2c_desc *tx_ring_st; + uint32_t sz; + uint8_t *tx_ring_bypass; + int err = 0; + + PMD_DRV_LOG(INFO, "Configuring Tx queue id:%d with %d desc\n", + tx_queue_id, nb_tx_desc); + + if (!qdma_dev->is_vf) { + err = qdma_dev_increment_active_queue + (qdma_dev->dma_device_index, + qdma_dev->func_id, + QDMA_DEV_Q_TYPE_H2C); + if (err != QDMA_SUCCESS) + return -EINVAL; + } + if (!qdma_dev->init_q_range) { + if (!qdma_dev->is_vf) { + err = qdma_pf_csr_read(dev); + if (err < 0) { + PMD_DRV_LOG(ERR, "CSR read failed\n"); + goto tx_setup_err; + } + } + qdma_dev->init_q_range = 1; + } + /* allocate rx queue data structure */ + txq = rte_zmalloc_socket("QDMA_TxQ", sizeof(struct qdma_tx_queue), + RTE_CACHE_LINE_SIZE, socket_id); + if (txq == NULL) { + PMD_DRV_LOG(ERR, "Memory allocation failed for " + "Tx queue SW structure\n"); + err = -ENOMEM; + goto tx_setup_err; + } + + txq->st_mode = qdma_dev->q_info[tx_queue_id].queue_mode; + txq->en_bypass = (qdma_dev->q_info[tx_queue_id].tx_bypass_mode) ? 1 : 0; + txq->bypass_desc_sz = qdma_dev->q_info[tx_queue_id].tx_bypass_desc_sz; + + txq->nb_tx_desc = (nb_tx_desc + 1); + txq->queue_id = tx_queue_id; + txq->dev = dev; + txq->port_id = dev->data->port_id; + txq->func_id = qdma_dev->func_id; + txq->num_queues = dev->data->nb_tx_queues; + txq->tx_deferred_start = tx_conf->tx_deferred_start; + + txq->ringszidx = index_of_array(qdma_dev->g_ring_sz, + QDMA_NUM_RING_SIZES, txq->nb_tx_desc); + if (txq->ringszidx < 0) { + PMD_DRV_LOG(ERR, "Expected Ring size %d not found\n", + txq->nb_tx_desc); + err = -EINVAL; + goto tx_setup_err; + } + + if (qdma_dev->ip_type == EQDMA_SOFT_IP && + qdma_dev->vivado_rel >= QDMA_VIVADO_2020_2) { + if (qdma_dev->dev_cap.desc_eng_mode == + QDMA_DESC_ENG_BYPASS_ONLY) { + PMD_DRV_LOG(ERR, + "Bypass only mode design " + "is not supported\n"); + return -ENOTSUP; + } + + if (txq->en_bypass && + qdma_dev->dev_cap.desc_eng_mode == + QDMA_DESC_ENG_INTERNAL_ONLY) { + PMD_DRV_LOG(ERR, + "Tx qid %d config in bypass " + "mode not supported on " + "internal only mode design\n", + tx_queue_id); + return -ENOTSUP; + } + } + + /* Allocate memory for TX descriptor ring */ + if (txq->st_mode) { + if (!qdma_dev->dev_cap.st_en) { + PMD_DRV_LOG(ERR, "Streaming mode not enabled " + "in the hardware\n"); + err = -EINVAL; + goto tx_setup_err; + } + + if (txq->en_bypass && + txq->bypass_desc_sz != 0) + sz = (txq->nb_tx_desc) * (txq->bypass_desc_sz); + else + sz = (txq->nb_tx_desc) * + sizeof(struct qdma_ul_st_h2c_desc); + txq->tx_mz = qdma_zone_reserve(dev, "TxHwRn", tx_queue_id, sz, + socket_id); + if (!txq->tx_mz) { + PMD_DRV_LOG(ERR, "Couldn't reserve memory for " + "ST H2C ring of size %d\n", sz); + err = -ENOMEM; + goto tx_setup_err; + } + + txq->tx_ring = txq->tx_mz->addr; + tx_ring_st = (struct qdma_ul_st_h2c_desc *)txq->tx_ring; + + tx_ring_bypass = (uint8_t *)txq->tx_ring; + /* Write-back status structure */ + if (txq->en_bypass && + txq->bypass_desc_sz != 0) + txq->wb_status = (struct wb_status *)& + tx_ring_bypass[(txq->nb_tx_desc - 1) * + (txq->bypass_desc_sz)]; + else + txq->wb_status = (struct wb_status *)& + tx_ring_st[txq->nb_tx_desc - 1]; + } else { + if (!qdma_dev->dev_cap.mm_en) { + PMD_DRV_LOG(ERR, "Memory mapped mode not " + "enabled in the hardware\n"); + err = -EINVAL; + goto tx_setup_err; + } + + if (txq->en_bypass && + txq->bypass_desc_sz != 0) + sz = (txq->nb_tx_desc) * (txq->bypass_desc_sz); + else + sz = (txq->nb_tx_desc) * sizeof(struct qdma_ul_mm_desc); + txq->tx_mz = qdma_zone_reserve(dev, "TxHwRn", tx_queue_id, + sz, socket_id); + if (!txq->tx_mz) { + PMD_DRV_LOG(ERR, "Couldn't reserve memory for " + "MM H2C ring of size %d\n", sz); + err = -ENOMEM; + goto tx_setup_err; + } + + txq->tx_ring = txq->tx_mz->addr; + tx_ring_mm = (struct qdma_ul_mm_desc *)txq->tx_ring; + + /* Write-back status structure */ + + tx_ring_bypass = (uint8_t *)txq->tx_ring; + if (txq->en_bypass && + txq->bypass_desc_sz != 0) + txq->wb_status = (struct wb_status *)& + tx_ring_bypass[(txq->nb_tx_desc - 1) * + (txq->bypass_desc_sz)]; + else + txq->wb_status = (struct wb_status *)& + tx_ring_mm[txq->nb_tx_desc - 1]; + } + + PMD_DRV_LOG(INFO, "Tx ring phys addr: 0x%lX, Tx Ring virt addr: 0x%lX", + (uint64_t)txq->tx_mz->iova, (uint64_t)txq->tx_ring); + + /* Allocate memory for TX software ring */ + sz = txq->nb_tx_desc * sizeof(struct rte_mbuf *); + txq->sw_ring = rte_zmalloc_socket("TxSwRn", sz, + RTE_CACHE_LINE_SIZE, socket_id); + if (txq->sw_ring == NULL) { + PMD_DRV_LOG(ERR, "Memory allocation failed for " + "Tx queue SW ring\n"); + err = -ENOMEM; + goto tx_setup_err; + } + + rte_spinlock_init(&txq->pidx_update_lock); + dev->data->tx_queues[tx_queue_id] = txq; return 0; + +tx_setup_err: + PMD_DRV_LOG(ERR, " Tx queue setup failed"); + if (!qdma_dev->is_vf) + qdma_dev_decrement_active_queue(qdma_dev->dma_device_index, + qdma_dev->func_id, + QDMA_DEV_Q_TYPE_H2C); + if (txq) { + if (txq->tx_mz) + rte_memzone_free(txq->tx_mz); + if (txq->sw_ring) + rte_free(txq->sw_ring); + rte_free(txq); + } + return err; } void qdma_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_id) @@ -983,9 +1166,54 @@ int qdma_dev_configure(struct rte_eth_dev *dev) int qdma_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qid) { - (void)dev; - (void)qid; + struct qdma_pci_dev *qdma_dev = dev->data->dev_private; + struct qdma_tx_queue *txq; + uint32_t queue_base = qdma_dev->queue_base; + int err, bypass_desc_sz_idx; + struct qdma_descq_sw_ctxt q_sw_ctxt; + struct qdma_hw_access *hw_access = qdma_dev->hw_access; + + txq = (struct qdma_tx_queue *)dev->data->tx_queues[qid]; + memset(&q_sw_ctxt, 0, sizeof(struct qdma_descq_sw_ctxt)); + + bypass_desc_sz_idx = qmda_get_desc_sz_idx(txq->bypass_desc_sz); + + qdma_reset_tx_queue(txq); + qdma_clr_tx_queue_ctxts(dev, (qid + queue_base), txq->st_mode); + + if (txq->st_mode) { + q_sw_ctxt.desc_sz = SW_DESC_CNTXT_H2C_STREAM_DMA; + } else { + q_sw_ctxt.desc_sz = SW_DESC_CNTXT_MEMORY_MAP_DMA; + q_sw_ctxt.is_mm = 1; + } + q_sw_ctxt.wbi_chk = 1; + q_sw_ctxt.wbi_intvl_en = 1; + q_sw_ctxt.fnc_id = txq->func_id; + q_sw_ctxt.qen = 1; + q_sw_ctxt.rngsz_idx = txq->ringszidx; + q_sw_ctxt.bypass = txq->en_bypass; + q_sw_ctxt.wbk_en = 1; + q_sw_ctxt.ring_bs_addr = (uint64_t)txq->tx_mz->iova; + + if (txq->en_bypass && + txq->bypass_desc_sz != 0) + q_sw_ctxt.desc_sz = bypass_desc_sz_idx; + + /* Set SW Context */ + err = hw_access->qdma_sw_ctx_conf(dev, 0, + (qid + queue_base), &q_sw_ctxt, + QDMA_HW_ACCESS_WRITE); + if (err < 0) + return qdma_dev->hw_access->qdma_get_error_code(err); + + txq->q_pidx_info.pidx = 0; + hw_access->qdma_queue_pidx_update(dev, qdma_dev->is_vf, + qid, 0, &txq->q_pidx_info); + + dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED; + txq->status = RTE_ETH_QUEUE_STATE_STARTED; return 0; } @@ -1185,8 +1413,32 @@ int qdma_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qid) int qdma_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qid) { - (void)dev; - (void)qid; + struct qdma_pci_dev *qdma_dev = dev->data->dev_private; + uint32_t queue_base = qdma_dev->queue_base; + struct qdma_tx_queue *txq; + int cnt = 0; + uint16_t count; + + txq = (struct qdma_tx_queue *)dev->data->tx_queues[qid]; + + txq->status = RTE_ETH_QUEUE_STATE_STOPPED; + /* Wait for TXQ to send out all packets. */ + while (txq->wb_status->cidx != txq->q_pidx_info.pidx) { + usleep(10); + if (cnt++ > 10000) + break; + } + + qdma_inv_tx_queue_ctxts(dev, (qid + queue_base), txq->st_mode); + + /* Relinquish pending mbufs */ + for (count = 0; count < txq->nb_tx_desc - 1; count++) { + rte_pktmbuf_free(txq->sw_ring[count]); + txq->sw_ring[count] = NULL; + } + qdma_reset_tx_queue(txq); + + dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED; return 0; }