From patchwork Wed Jul 6 07:52:15 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Aman Kumar X-Patchwork-Id: 113743 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 0B34BA0540; Wed, 6 Jul 2022 09:59:05 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 7D88842B70; Wed, 6 Jul 2022 09:57:16 +0200 (CEST) Received: from mail-pj1-f54.google.com (mail-pj1-f54.google.com [209.85.216.54]) by mails.dpdk.org (Postfix) with ESMTP id 96E0442BCC for ; Wed, 6 Jul 2022 09:57:14 +0200 (CEST) Received: by mail-pj1-f54.google.com with SMTP id y14-20020a17090a644e00b001ef775f7118so11148945pjm.2 for ; Wed, 06 Jul 2022 00:57:14 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=vvdntech-in.20210112.gappssmtp.com; s=20210112; h=from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-transfer-encoding; bh=kNsi7xiszfLF345h8Mq0phXkV0jtTi35hmQ1yZEos7s=; b=6BbjoBEdZNjVdXDan0ba4WRfac8od7fP1P+ncpUG3Q+BxUsWki0OvbTpm68hZn9rIc ZjRGLzC694NYfDQqTm4Rrf5jexXFVYhWrBgMy9N/2kSCKLuF6E6Exwc900B0iLED7MGo xtubbcBeEzY62sgThkToe7Jw7SoBVgYTZKIA82ayHJZU2DB/eqDUBCIisvzyPTEVuasf +KvO5GtIb58h+igFsmCLOzPiUs1+L2mTtHLS8RCFWu16QAF05J+If3DIDLw9+pvLnWWF CSQOojJafvuNNe8ex3t8lgR6FFObcS72QprJYex4AA5+fth6hMYiIMIi8mIC1W7163+g V4CA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:mime-version:content-transfer-encoding; bh=kNsi7xiszfLF345h8Mq0phXkV0jtTi35hmQ1yZEos7s=; b=zEbHSeotRNpYpNYjf6EjC4e+AvCCxJyJlgKfhhHXqHmvSg7RnE2ugrKfU5f7GObLhU dDkV1tFd6TCIjWrbRTrkSnWpJ2mbOp8ZdGm8UcSwioubOzaXeYPIHt27vMw+M8CBLp59 qRsftSV6c7dDD914puGuKC8MbuybED7KQxpVRVoa8FmSFBaC9GT4jc0g/Eebk2bN3mCd kVK2h6CtPHGwJCLzA1DqJvKVc1WO/1ynm6vi0AwXnpH9RWaBgYP29MtpjjrlQfuUceMc WJUcFt79J7BtEZIbSN6f/anXGyCgNrTbYfUBNJRPI0O7Cww1po/ri7JHvT9hjTMLKIMt b+5w== X-Gm-Message-State: AJIora9ynCQ4/uchSFibbLRhMEbe8ZbLk5nVE4woHjkFm/cuxwfv2P9Z 4Ie5FKDDdCfg4u13IjB7YrCg4kpCpoFnRbFs X-Google-Smtp-Source: AGRyM1tTrkULUI0zeMr3/0H6rZv7AtQ9EGk6n5wnAIfoTGV2GJwwXHQLiz/4h7j2fgTqygJNIukwZg== X-Received: by 2002:a17:90a:9f46:b0:1ef:789a:315f with SMTP id q6-20020a17090a9f4600b001ef789a315fmr25981600pjv.68.1657094233856; Wed, 06 Jul 2022 00:57:13 -0700 (PDT) Received: from 470--5GDC--BLR.blore.vvdntech.com ([106.51.39.131]) by smtp.gmail.com with ESMTPSA id r4-20020a17090a438400b001ef81574355sm7378805pjg.12.2022.07.06.00.57.11 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Wed, 06 Jul 2022 00:57:13 -0700 (PDT) From: Aman Kumar To: dev@dpdk.org Cc: maxime.coquelin@redhat.com, david.marchand@redhat.com, aman.kumar@vvdntech.in Subject: [RFC PATCH 25/29] net/qdma: add basic PMD ops for VF Date: Wed, 6 Jul 2022 13:22:15 +0530 Message-Id: <20220706075219.517046-26-aman.kumar@vvdntech.in> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220706075219.517046-1-aman.kumar@vvdntech.in> References: <20220706075219.517046-1-aman.kumar@vvdntech.in> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org this patch adds dev_configure, queue start/stop ops for VF devices. Signed-off-by: Aman Kumar --- drivers/net/qdma/qdma_vf_ethdev.c | 641 ++++++++++++++++++++++++++++++ 1 file changed, 641 insertions(+) diff --git a/drivers/net/qdma/qdma_vf_ethdev.c b/drivers/net/qdma/qdma_vf_ethdev.c index 28d34560c1..5a54c00893 100644 --- a/drivers/net/qdma/qdma_vf_ethdev.c +++ b/drivers/net/qdma/qdma_vf_ethdev.c @@ -94,11 +94,652 @@ static int qdma_ethdev_offline(struct rte_eth_dev *dev) return rv; } +static int qdma_vf_set_qrange(struct rte_eth_dev *dev) +{ + struct qdma_pci_dev *qdma_dev = dev->data->dev_private; + struct qdma_mbox_msg *m; + int rv = 0; + + + m = qdma_mbox_msg_alloc(); + if (!m) + return -ENOMEM; + + qdma_mbox_compose_vf_fmap_prog(qdma_dev->func_id, + (uint16_t)qdma_dev->qsets_en, + (int)qdma_dev->queue_base, + m->raw_data); + rv = qdma_mbox_msg_send(dev, m, MBOX_OP_RSP_TIMEOUT); + if (rv < 0) { + if (rv != -ENODEV) + PMD_DRV_LOG(ERR, "%x set q range (fmap) failed %d.\n", + qdma_dev->func_id, rv); + goto err_out; + } + + rv = qdma_mbox_vf_response_status(m->raw_data); + +err_out: + qdma_mbox_msg_free(m); + return rv; +} + +static int qdma_set_qmax(struct rte_eth_dev *dev, int *qmax, int *qbase) +{ + struct qdma_mbox_msg *m; + int rv = 0; + struct qdma_pci_dev *qdma_dev = dev->data->dev_private; + + m = qdma_mbox_msg_alloc(); + if (!m) + return -ENOMEM; + + qdma_mbox_compose_vf_qreq(qdma_dev->func_id, (uint16_t)*qmax & 0xFFFF, + *qbase, m->raw_data); + rv = qdma_mbox_msg_send(dev, m, MBOX_OP_RSP_TIMEOUT); + if (rv < 0) { + PMD_DRV_LOG(ERR, "%x set q max failed %d.\n", + qdma_dev->func_id, rv); + goto err_out; + } + + rv = qdma_mbox_vf_qinfo_get(m->raw_data, qbase, (uint16_t *)qmax); +err_out: + qdma_mbox_msg_free(m); + return rv; +} + +static int qdma_rxq_context_setup(struct rte_eth_dev *dev, uint16_t qid) +{ + struct qdma_pci_dev *qdma_dev = dev->data->dev_private; + uint32_t qid_hw; + struct qdma_mbox_msg *m = qdma_mbox_msg_alloc(); + struct mbox_descq_conf descq_conf; + int rv, bypass_desc_sz_idx; + struct qdma_rx_queue *rxq; + uint8_t cmpt_desc_fmt; + enum mbox_cmpt_ctxt_type cmpt_ctxt_type = QDMA_MBOX_CMPT_CTXT_NONE; + + if (!m) + return -ENOMEM; + memset(&descq_conf, 0, sizeof(struct mbox_descq_conf)); + rxq = (struct qdma_rx_queue *)dev->data->rx_queues[qid]; + qid_hw = qdma_dev->queue_base + rxq->queue_id; + + switch (rxq->cmpt_desc_len) { + case RTE_PMD_QDMA_CMPT_DESC_LEN_8B: + cmpt_desc_fmt = CMPT_CNTXT_DESC_SIZE_8B; + break; + case RTE_PMD_QDMA_CMPT_DESC_LEN_16B: + cmpt_desc_fmt = CMPT_CNTXT_DESC_SIZE_16B; + break; + case RTE_PMD_QDMA_CMPT_DESC_LEN_32B: + cmpt_desc_fmt = CMPT_CNTXT_DESC_SIZE_32B; + break; + case RTE_PMD_QDMA_CMPT_DESC_LEN_64B: + if (!qdma_dev->dev_cap.cmpt_desc_64b) { + PMD_DRV_LOG(ERR, "PF-%d(DEVFN) 64B is not supported in this " + "mode:\n", qdma_dev->func_id); + return -1; + } + cmpt_desc_fmt = CMPT_CNTXT_DESC_SIZE_64B; + break; + default: + cmpt_desc_fmt = CMPT_CNTXT_DESC_SIZE_8B; + break; + } + descq_conf.ring_bs_addr = rxq->rx_mz->iova; + descq_conf.en_bypass = rxq->en_bypass; + descq_conf.irq_arm = 0; + descq_conf.at = 0; + descq_conf.wbk_en = 1; + descq_conf.irq_en = 0; + + bypass_desc_sz_idx = qmda_get_desc_sz_idx(rxq->bypass_desc_sz); + + if (!rxq->st_mode) {/* mm c2h */ + descq_conf.desc_sz = SW_DESC_CNTXT_MEMORY_MAP_DMA; + descq_conf.wbi_intvl_en = 1; + descq_conf.wbi_chk = 1; + } else {/* st c2h */ + descq_conf.desc_sz = SW_DESC_CNTXT_C2H_STREAM_DMA; + descq_conf.forced_en = 1; + descq_conf.cmpt_ring_bs_addr = rxq->rx_cmpt_mz->iova; + descq_conf.cmpt_desc_sz = cmpt_desc_fmt; + descq_conf.triggermode = rxq->triggermode; + + descq_conf.cmpt_color = CMPT_DEFAULT_COLOR_BIT; + descq_conf.cmpt_full_upd = 0; + descq_conf.cnt_thres = + qdma_dev->g_c2h_cnt_th[rxq->threshidx]; + descq_conf.timer_thres = + qdma_dev->g_c2h_timer_cnt[rxq->timeridx]; + descq_conf.cmpt_ringsz = + qdma_dev->g_ring_sz[rxq->cmpt_ringszidx] - 1; + descq_conf.bufsz = qdma_dev->g_c2h_buf_sz[rxq->buffszidx]; + descq_conf.cmpt_int_en = 0; + descq_conf.cmpl_stat_en = rxq->st_mode; + descq_conf.pfch_en = rxq->en_prefetch; + descq_conf.en_bypass_prefetch = rxq->en_bypass_prefetch; + if (qdma_dev->dev_cap.cmpt_ovf_chk_dis) + descq_conf.dis_overflow_check = rxq->dis_overflow_check; + + cmpt_ctxt_type = QDMA_MBOX_CMPT_WITH_ST; + } + + if (rxq->en_bypass && rxq->bypass_desc_sz != 0) + descq_conf.desc_sz = bypass_desc_sz_idx; + + descq_conf.func_id = rxq->func_id; + descq_conf.ringsz = qdma_dev->g_ring_sz[rxq->ringszidx] - 1; + + qdma_mbox_compose_vf_qctxt_write(rxq->func_id, qid_hw, rxq->st_mode, 1, + cmpt_ctxt_type, + &descq_conf, m->raw_data); + + rv = qdma_mbox_msg_send(dev, m, MBOX_OP_RSP_TIMEOUT); + if (rv < 0) { + PMD_DRV_LOG(ERR, "%x, qid_hw 0x%x, mbox failed %d.\n", + qdma_dev->func_id, qid_hw, rv); + goto err_out; + } + + rv = qdma_mbox_vf_response_status(m->raw_data); + +err_out: + qdma_mbox_msg_free(m); + return rv; +} + +static int qdma_txq_context_setup(struct rte_eth_dev *dev, uint16_t qid) +{ + struct qdma_pci_dev *qdma_dev = dev->data->dev_private; + struct qdma_mbox_msg *m = qdma_mbox_msg_alloc(); + struct mbox_descq_conf descq_conf; + int rv, bypass_desc_sz_idx; + struct qdma_tx_queue *txq; + uint32_t qid_hw; + + if (!m) + return -ENOMEM; + memset(&descq_conf, 0, sizeof(struct mbox_descq_conf)); + txq = (struct qdma_tx_queue *)dev->data->tx_queues[qid]; + qid_hw = qdma_dev->queue_base + txq->queue_id; + descq_conf.ring_bs_addr = txq->tx_mz->iova; + descq_conf.en_bypass = txq->en_bypass; + descq_conf.wbi_intvl_en = 1; + descq_conf.wbi_chk = 1; + descq_conf.wbk_en = 1; + + bypass_desc_sz_idx = qmda_get_desc_sz_idx(txq->bypass_desc_sz); + + if (!txq->st_mode) /* mm h2c */ + descq_conf.desc_sz = SW_DESC_CNTXT_MEMORY_MAP_DMA; + else /* st h2c */ + descq_conf.desc_sz = SW_DESC_CNTXT_H2C_STREAM_DMA; + descq_conf.func_id = txq->func_id; + descq_conf.ringsz = qdma_dev->g_ring_sz[txq->ringszidx] - 1; + + if (txq->en_bypass && txq->bypass_desc_sz != 0) + descq_conf.desc_sz = bypass_desc_sz_idx; + + qdma_mbox_compose_vf_qctxt_write(txq->func_id, qid_hw, txq->st_mode, 0, + QDMA_MBOX_CMPT_CTXT_NONE, + &descq_conf, m->raw_data); + + rv = qdma_mbox_msg_send(dev, m, MBOX_OP_RSP_TIMEOUT); + if (rv < 0) { + PMD_DRV_LOG(ERR, "%x, qid_hw 0x%x, mbox failed %d.\n", + qdma_dev->func_id, qid_hw, rv); + goto err_out; + } + + rv = qdma_mbox_vf_response_status(m->raw_data); + +err_out: + qdma_mbox_msg_free(m); + return rv; +} + +static int qdma_queue_context_invalidate(struct rte_eth_dev *dev, uint32_t qid, + bool st, bool c2h) +{ + struct qdma_mbox_msg *m = qdma_mbox_msg_alloc(); + struct qdma_pci_dev *qdma_dev = dev->data->dev_private; + uint32_t qid_hw; + int rv; + enum mbox_cmpt_ctxt_type cmpt_ctxt_type = QDMA_MBOX_CMPT_CTXT_NONE; + + if (!m) + return -ENOMEM; + + if (st && c2h) + cmpt_ctxt_type = QDMA_MBOX_CMPT_WITH_ST; + qid_hw = qdma_dev->queue_base + qid; + qdma_mbox_compose_vf_qctxt_invalidate(qdma_dev->func_id, qid_hw, + st, c2h, cmpt_ctxt_type, + m->raw_data); + rv = qdma_mbox_msg_send(dev, m, MBOX_OP_RSP_TIMEOUT); + if (rv < 0) { + if (rv != -ENODEV) + PMD_DRV_LOG(INFO, "%x, qid_hw 0x%x mbox failed %d.\n", + qdma_dev->func_id, qid_hw, rv); + goto err_out; + } + + rv = qdma_mbox_vf_response_status(m->raw_data); + +err_out: + qdma_mbox_msg_free(m); + return rv; +} + +static int qdma_vf_dev_link_update(struct rte_eth_dev *dev, + __rte_unused int wait_to_complete) +{ + dev->data->dev_link.link_status = RTE_ETH_LINK_UP; + dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; + dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_25G; + + PMD_DRV_LOG(INFO, "Link update done\n"); + + return 0; +} + +static int qdma_vf_dev_infos_get(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct qdma_pci_dev *qdma_dev = dev->data->dev_private; + + dev_info->max_rx_queues = qdma_dev->dev_cap.num_qs; + dev_info->max_tx_queues = qdma_dev->dev_cap.num_qs; + + dev_info->min_rx_bufsize = QDMA_MIN_RXBUFF_SIZE; + dev_info->max_rx_pktlen = DMA_BRAM_SIZE; + dev_info->max_mac_addrs = 1; + + return 0; +} + +int qdma_vf_dev_close(struct rte_eth_dev *dev) +{ + struct qdma_pci_dev *qdma_dev = dev->data->dev_private; + struct qdma_tx_queue *txq; + struct qdma_rx_queue *rxq; + struct qdma_cmpt_queue *cmptq; + uint32_t qid; + + PMD_DRV_LOG(INFO, "Closing all queues\n"); + + /* iterate over rx queues */ + for (qid = 0; qid < dev->data->nb_rx_queues; ++qid) { + rxq = dev->data->rx_queues[qid]; + if (rxq != NULL) { + PMD_DRV_LOG(INFO, "VF-%d(DEVFN) Remove C2H queue: %d", + qdma_dev->func_id, qid); + + qdma_dev_notify_qdel(rxq->dev, rxq->queue_id + + qdma_dev->queue_base, + QDMA_DEV_Q_TYPE_C2H); + + if (rxq->st_mode) + qdma_dev_notify_qdel(rxq->dev, rxq->queue_id + + qdma_dev->queue_base, + QDMA_DEV_Q_TYPE_CMPT); + + if (rxq->sw_ring) + rte_free(rxq->sw_ring); + + if (rxq->st_mode) { /* if ST-mode */ + if (rxq->rx_cmpt_mz) + rte_memzone_free(rxq->rx_cmpt_mz); + } + + if (rxq->rx_mz) + rte_memzone_free(rxq->rx_mz); + rte_free(rxq); + PMD_DRV_LOG(INFO, "VF-%d(DEVFN) C2H queue %d removed", + qdma_dev->func_id, qid); + } + } + + /* iterate over tx queues */ + for (qid = 0; qid < dev->data->nb_tx_queues; ++qid) { + txq = dev->data->tx_queues[qid]; + if (txq != NULL) { + PMD_DRV_LOG(INFO, "VF-%d(DEVFN) Remove H2C queue: %d", + qdma_dev->func_id, qid); + + qdma_dev_notify_qdel(txq->dev, txq->queue_id + + qdma_dev->queue_base, + QDMA_DEV_Q_TYPE_H2C); + if (txq->sw_ring) + rte_free(txq->sw_ring); + if (txq->tx_mz) + rte_memzone_free(txq->tx_mz); + rte_free(txq); + PMD_DRV_LOG(INFO, "VF-%d(DEVFN) H2C queue %d removed", + qdma_dev->func_id, qid); + } + } + if (qdma_dev->dev_cap.mm_cmpt_en) { + /* iterate over cmpt queues */ + for (qid = 0; qid < qdma_dev->qsets_en; ++qid) { + cmptq = qdma_dev->cmpt_queues[qid]; + if (cmptq != NULL) { + PMD_DRV_LOG(INFO, "VF-%d(DEVFN) Remove CMPT queue: %d", + qdma_dev->func_id, qid); + qdma_dev_notify_qdel(cmptq->dev, + cmptq->queue_id + + qdma_dev->queue_base, + QDMA_DEV_Q_TYPE_CMPT); + if (cmptq->cmpt_mz) + rte_memzone_free(cmptq->cmpt_mz); + rte_free(cmptq); + PMD_DRV_LOG(INFO, "VF-%d(DEVFN) CMPT queue %d removed", + qdma_dev->func_id, qid); + } + } + + if (qdma_dev->cmpt_queues != NULL) { + rte_free(qdma_dev->cmpt_queues); + qdma_dev->cmpt_queues = NULL; + } + } + + qdma_dev->qsets_en = 0; + qdma_set_qmax(dev, (int *)&qdma_dev->qsets_en, + (int *)&qdma_dev->queue_base); + qdma_dev->init_q_range = 0; + rte_free(qdma_dev->q_info); + qdma_dev->q_info = NULL; + qdma_dev->dev_configured = 0; + + return 0; +} + +static int qdma_vf_dev_reset(struct rte_eth_dev *dev) +{ + struct qdma_pci_dev *qdma_dev = dev->data->dev_private; + uint32_t i = 0; + int ret; + + PMD_DRV_LOG(INFO, "%s: Reset VF-%d(DEVFN)", + __func__, qdma_dev->func_id); + + ret = eth_qdma_vf_dev_uninit(dev); + if (ret) + return ret; + + if (qdma_dev->reset_state == RESET_STATE_IDLE) { + ret = eth_qdma_vf_dev_init(dev); + } else { + /* VFs do not stop mbox and start waiting for a + * "PF_RESET_DONE" mailbox message from PF + * for a maximum of 60 secs + */ + PMD_DRV_LOG(INFO, + "%s: Waiting for reset done message from PF", + __func__); + while (i < RESET_TIMEOUT) { + if (qdma_dev->reset_state == + RESET_STATE_RECV_PF_RESET_DONE) { + qdma_mbox_uninit(dev); + + ret = eth_qdma_vf_dev_init(dev); + return ret; + } + + rte_delay_ms(1); + i++; + } + } + + if (i >= RESET_TIMEOUT) { + PMD_DRV_LOG(ERR, "%s: Reset failed for VF-%d(DEVFN)\n", + __func__, qdma_dev->func_id); + return -ETIMEDOUT; + } + + return ret; +} + +static int qdma_vf_dev_configure(struct rte_eth_dev *dev) +{ + struct qdma_pci_dev *qdma_dev = dev->data->dev_private; + int32_t ret = 0, queue_base = -1; + uint32_t qid = 0; + + /* FMAP configuration */ + qdma_dev->qsets_en = RTE_MAX(dev->data->nb_rx_queues, + dev->data->nb_tx_queues); + + if (qdma_dev->qsets_en > qdma_dev->dev_cap.num_qs) { + PMD_DRV_LOG(INFO, "VF-%d(DEVFN) Error: Number of Queues to be " + "configured are greater than the queues " + "supported by the hardware\n", + qdma_dev->func_id); + qdma_dev->qsets_en = 0; + return -1; + } + + /* Request queue base from the resource manager */ + ret = qdma_set_qmax(dev, (int *)&qdma_dev->qsets_en, + (int *)&queue_base); + if (ret != QDMA_SUCCESS) { + PMD_DRV_LOG(ERR, "VF-%d(DEVFN) queue allocation failed: %d\n", + qdma_dev->func_id, ret); + return -1; + } + qdma_dev->queue_base = queue_base; + + qdma_dev->q_info = rte_zmalloc("qinfo", sizeof(struct queue_info) * + qdma_dev->qsets_en, 0); + if (qdma_dev->q_info == NULL) { + PMD_DRV_LOG(INFO, "VF-%d fail to allocate queue info memory\n", + qdma_dev->func_id); + return (-ENOMEM); + } + + /* Reserve memory for cmptq ring pointers + * Max completion queues can be maximum of rx and tx queues. + */ + qdma_dev->cmpt_queues = rte_zmalloc("cmpt_queues", + sizeof(qdma_dev->cmpt_queues[0]) * + qdma_dev->qsets_en, + RTE_CACHE_LINE_SIZE); + if (qdma_dev->cmpt_queues == NULL) { + PMD_DRV_LOG(ERR, "VF-%d(DEVFN) cmpt ring pointers memory " + "allocation failed:\n", qdma_dev->func_id); + rte_free(qdma_dev->q_info); + qdma_dev->q_info = NULL; + return -(ENOMEM); + } + + /* Initialize queue_modes to all 1's ( i.e. Streaming) */ + for (qid = 0 ; qid < qdma_dev->qsets_en; qid++) + qdma_dev->q_info[qid].queue_mode = RTE_PMD_QDMA_STREAMING_MODE; + + for (qid = 0 ; qid < dev->data->nb_rx_queues; qid++) { + qdma_dev->q_info[qid].cmpt_desc_sz = qdma_dev->cmpt_desc_len; + qdma_dev->q_info[qid].rx_bypass_mode = + qdma_dev->c2h_bypass_mode; + qdma_dev->q_info[qid].trigger_mode = qdma_dev->trigger_mode; + qdma_dev->q_info[qid].timer_count = + qdma_dev->timer_count; + } + + for (qid = 0 ; qid < dev->data->nb_tx_queues; qid++) + qdma_dev->q_info[qid].tx_bypass_mode = + qdma_dev->h2c_bypass_mode; + + ret = qdma_vf_set_qrange(dev); + if (ret < 0) { + PMD_DRV_LOG(ERR, "FMAP programming failed\n"); + rte_free(qdma_dev->q_info); + qdma_dev->q_info = NULL; + rte_free(qdma_dev->cmpt_queues); + qdma_dev->cmpt_queues = NULL; + return ret; + } + + qdma_dev->dev_configured = 1; + + return ret; +} + +int qdma_vf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qid) +{ + struct qdma_pci_dev *qdma_dev = dev->data->dev_private; + struct qdma_tx_queue *txq; + + txq = (struct qdma_tx_queue *)dev->data->tx_queues[qid]; + qdma_reset_tx_queue(txq); + + if (qdma_txq_context_setup(dev, qid) < 0) + return -1; + + txq->q_pidx_info.pidx = 0; + qdma_dev->hw_access->qdma_queue_pidx_update(dev, qdma_dev->is_vf, + qid, 0, &txq->q_pidx_info); + + dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED; + txq->status = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +int qdma_vf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qid) +{ + struct qdma_pci_dev *qdma_dev = dev->data->dev_private; + struct qdma_rx_queue *rxq; + int err; + + rxq = (struct qdma_rx_queue *)dev->data->rx_queues[qid]; + qdma_reset_rx_queue(rxq); + + err = qdma_init_rx_queue(rxq); + if (err != 0) + return err; + if (qdma_rxq_context_setup(dev, qid) < 0) { + PMD_DRV_LOG(ERR, "context_setup for qid - %u failed", qid); + + return -1; + } + + if (rxq->st_mode) { + rxq->cmpt_cidx_info.counter_idx = rxq->threshidx; + rxq->cmpt_cidx_info.timer_idx = rxq->timeridx; + rxq->cmpt_cidx_info.trig_mode = rxq->triggermode; + rxq->cmpt_cidx_info.wrb_en = 1; + qdma_dev->hw_access->qdma_queue_cmpt_cidx_update(dev, 1, + qid, &rxq->cmpt_cidx_info); + + rxq->q_pidx_info.pidx = (rxq->nb_rx_desc - 2); + qdma_dev->hw_access->qdma_queue_pidx_update(dev, 1, + qid, 1, &rxq->q_pidx_info); + } + + dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED; + rxq->status = RTE_ETH_QUEUE_STATE_STARTED; + return 0; +} + +int qdma_vf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qid) +{ + struct qdma_rx_queue *rxq; + int i = 0, cnt = 0; + + rxq = (struct qdma_rx_queue *)dev->data->rx_queues[qid]; + + rxq->status = RTE_ETH_QUEUE_STATE_STOPPED; + + /* Wait for queue to recv all packets. */ + if (rxq->st_mode) { /* ST-mode */ + while (rxq->wb_status->pidx != rxq->cmpt_cidx_info.wrb_cidx) { + usleep(10); + if (cnt++ > 10000) + break; + } + } else { /* MM mode */ + while (rxq->wb_status->cidx != rxq->q_pidx_info.pidx) { + usleep(10); + if (cnt++ > 10000) + break; + } + } + + qdma_queue_context_invalidate(dev, qid, rxq->st_mode, 1); + + if (rxq->st_mode) { /* ST-mode */ +#ifdef DUMP_MEMPOOL_USAGE_STATS + PMD_DRV_LOG(INFO, "%s(): %d: queue id = %d, mbuf_avail_count = " + "%d, mbuf_in_use_count = %d", + __func__, __LINE__, rxq->queue_id, + rte_mempool_avail_count(rxq->mb_pool), + rte_mempool_in_use_count(rxq->mb_pool)); +#endif /* DUMP_MEMPOOL_USAGE_STATS */ + + for (i = 0; i < rxq->nb_rx_desc - 1; i++) { + rte_pktmbuf_free(rxq->sw_ring[i]); + rxq->sw_ring[i] = NULL; + } +#ifdef DUMP_MEMPOOL_USAGE_STATS + PMD_DRV_LOG(INFO, "%s(): %d: queue id = %d, mbuf_avail_count = " + "%d, mbuf_in_use_count = %d", + __func__, __LINE__, rxq->queue_id, + rte_mempool_avail_count(rxq->mb_pool), + rte_mempool_in_use_count(rxq->mb_pool)); +#endif /* DUMP_MEMPOOL_USAGE_STATS */ + } + + qdma_reset_rx_queue(rxq); + dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; +} + + +int qdma_vf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qid) +{ + struct qdma_tx_queue *txq; + int i = 0, cnt = 0; + + txq = (struct qdma_tx_queue *)dev->data->tx_queues[qid]; + + txq->status = RTE_ETH_QUEUE_STATE_STOPPED; + /* Wait for TXQ to send out all packets. */ + while (txq->wb_status->cidx != txq->q_pidx_info.pidx) { + usleep(10); + if (cnt++ > 10000) + break; + } + + qdma_queue_context_invalidate(dev, qid, txq->st_mode, 0); + + /* Free mbufs if any pending in the ring */ + for (i = 0; i < txq->nb_tx_desc; i++) { + rte_pktmbuf_free(txq->sw_ring[i]); + txq->sw_ring[i] = NULL; + } + qdma_reset_tx_queue(txq); + dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; +} + static struct eth_dev_ops qdma_vf_eth_dev_ops = { + .dev_configure = qdma_vf_dev_configure, + .dev_infos_get = qdma_vf_dev_infos_get, + .dev_close = qdma_vf_dev_close, + .dev_reset = qdma_vf_dev_reset, + .link_update = qdma_vf_dev_link_update, .rx_queue_setup = qdma_dev_rx_queue_setup, .tx_queue_setup = qdma_dev_tx_queue_setup, .rx_queue_release = qdma_dev_rx_queue_release, .tx_queue_release = qdma_dev_tx_queue_release, + .rx_queue_start = qdma_vf_dev_rx_queue_start, + .rx_queue_stop = qdma_vf_dev_rx_queue_stop, + .tx_queue_start = qdma_vf_dev_tx_queue_start, + .tx_queue_stop = qdma_vf_dev_tx_queue_stop, }; /**