From patchwork Thu Mar 2 10:35:12 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Liu, Mingxia" X-Patchwork-Id: 124644 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id B7CD041DB0; Thu, 2 Mar 2023 03:21:21 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id DD18442D16; Thu, 2 Mar 2023 03:20:46 +0100 (CET) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by mails.dpdk.org (Postfix) with ESMTP id 95CD442D0B for ; Thu, 2 Mar 2023 03:20:45 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1677723645; x=1709259645; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=KwwHexi44aEamDtbJ8B9i/g7P5Q8n3P039BlHfaclfM=; b=U5FLNsNqtQk9u0AUMAmiEKoNwVujMt5pD458cRioC8ATOqGMu8YMRYsC shcGKrToCSw2kItAy5HxipfZdXf191arihQYB4JO/tgKwMHwajS3kVQyJ glqNHPwkADd34G6K/YgF/ppSOUGYqMYR8lqxcNz8cq8bApYUaVqXUGI5w oH//22NuIGmQ4dgReVndZ6FRWfwu4fMiKlYOn/JRvTayy4MYhMBBJphz/ WRmCZcUCBUrRxuLPjOEUoBhAnSOMyU69TXl1aDnwCPWVydlGhnn01XDZa 8xt0xofrQ3HzOL2D0p56bdE3h5tEBGNRbkw1K8f77vvn0AeoqFQHf8s16 w==; X-IronPort-AV: E=McAfee;i="6500,9779,10636"; a="315013536" X-IronPort-AV: E=Sophos;i="5.98,226,1673942400"; d="scan'208";a="315013536" Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Mar 2023 18:20:45 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10636"; a="784607480" X-IronPort-AV: E=Sophos;i="5.98,226,1673942400"; d="scan'208";a="784607480" Received: from dpdk-mingxial-ice.sh.intel.com ([10.67.110.191]) by fmsmga002.fm.intel.com with ESMTP; 01 Mar 2023 18:20:43 -0800 From: Mingxia Liu To: dev@dpdk.org, beilei.xing@intel.com, yuying.zhang@intel.com Cc: Mingxia Liu Subject: [PATCH v8 06/21] net/cpfl: support queue stop Date: Thu, 2 Mar 2023 10:35:12 +0000 Message-Id: <20230302103527.931071-7-mingxia.liu@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230302103527.931071-1-mingxia.liu@intel.com> References: <20230216003010.3439881-1-mingxia.liu@intel.com> <20230302103527.931071-1-mingxia.liu@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add support for these device ops: - rx_queue_stop - tx_queue_stop Signed-off-by: Mingxia Liu --- drivers/net/cpfl/cpfl_ethdev.c | 10 +++- drivers/net/cpfl/cpfl_rxtx.c | 98 ++++++++++++++++++++++++++++++++++ drivers/net/cpfl/cpfl_rxtx.h | 3 ++ 3 files changed, 110 insertions(+), 1 deletion(-) diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index 3248d22d2f..00672142e3 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -229,12 +229,16 @@ cpfl_dev_start(struct rte_eth_dev *dev) ret = idpf_vc_vport_ena_dis(vport, true); if (ret != 0) { PMD_DRV_LOG(ERR, "Failed to enable vport"); - return ret; + goto err_vport; } vport->stopped = 0; return 0; + +err_vport: + cpfl_stop_queues(dev); + return ret; } static int @@ -247,6 +251,8 @@ cpfl_dev_stop(struct rte_eth_dev *dev) idpf_vc_vport_ena_dis(vport, false); + cpfl_stop_queues(dev); + vport->stopped = 1; return 0; @@ -281,6 +287,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = { .link_update = cpfl_dev_link_update, .rx_queue_start = cpfl_rx_queue_start, .tx_queue_start = cpfl_tx_queue_start, + .rx_queue_stop = cpfl_rx_queue_stop, + .tx_queue_stop = cpfl_tx_queue_stop, .dev_supported_ptypes_get = cpfl_dev_supported_ptypes_get, }; diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c index c13166b63c..08db01412e 100644 --- a/drivers/net/cpfl/cpfl_rxtx.c +++ b/drivers/net/cpfl/cpfl_rxtx.c @@ -49,6 +49,14 @@ cpfl_tx_offload_convert(uint64_t offload) return ol; } +static const struct idpf_rxq_ops def_rxq_ops = { + .release_mbufs = idpf_qc_rxq_mbufs_release, +}; + +static const struct idpf_txq_ops def_txq_ops = { + .release_mbufs = idpf_qc_txq_mbufs_release, +}; + static const struct rte_memzone * cpfl_dma_zone_reserve(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t len, uint16_t queue_type, @@ -177,6 +185,7 @@ cpfl_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *rxq, idpf_qc_split_rx_bufq_reset(bufq); bufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start + queue_idx * vport->chunks_info.rx_buf_qtail_spacing); + bufq->ops = &def_rxq_ops; bufq->q_set = true; if (bufq_id == IDPF_RX_SPLIT_BUFQ1_ID) { @@ -287,6 +296,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, idpf_qc_single_rx_queue_reset(rxq); rxq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_qtail_start + queue_idx * vport->chunks_info.rx_qtail_spacing); + rxq->ops = &def_rxq_ops; } else { idpf_qc_split_rx_descq_reset(rxq); @@ -461,6 +471,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start + queue_idx * vport->chunks_info.tx_qtail_spacing); + txq->ops = &def_txq_ops; txq->q_set = true; dev->data->tx_queues[queue_idx] = txq; @@ -612,3 +623,90 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) return err; } + +int +cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct idpf_vport *vport = dev->data->dev_private; + struct idpf_rx_queue *rxq; + int err; + + if (rx_queue_id >= dev->data->nb_rx_queues) + return -EINVAL; + + err = idpf_vc_queue_switch(vport, rx_queue_id, true, false); + if (err != 0) { + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", + rx_queue_id); + return err; + } + + rxq = dev->data->rx_queues[rx_queue_id]; + if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) { + rxq->ops->release_mbufs(rxq); + idpf_qc_single_rx_queue_reset(rxq); + } else { + rxq->bufq1->ops->release_mbufs(rxq->bufq1); + rxq->bufq2->ops->release_mbufs(rxq->bufq2); + idpf_qc_split_rx_queue_reset(rxq); + } + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +int +cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct idpf_vport *vport = dev->data->dev_private; + struct idpf_tx_queue *txq; + int err; + + if (tx_queue_id >= dev->data->nb_tx_queues) + return -EINVAL; + + err = idpf_vc_queue_switch(vport, tx_queue_id, false, false); + if (err != 0) { + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off", + tx_queue_id); + return err; + } + + txq = dev->data->tx_queues[tx_queue_id]; + txq->ops->release_mbufs(txq); + if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) { + idpf_qc_single_tx_queue_reset(txq); + } else { + idpf_qc_split_tx_descq_reset(txq); + idpf_qc_split_tx_complq_reset(txq->complq); + } + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +void +cpfl_stop_queues(struct rte_eth_dev *dev) +{ + struct idpf_rx_queue *rxq; + struct idpf_tx_queue *txq; + int i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (rxq == NULL) + continue; + + if (cpfl_rx_queue_stop(dev, i) != 0) + PMD_DRV_LOG(WARNING, "Fail to stop Rx queue %d", i); + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (txq == NULL) + continue; + + if (cpfl_tx_queue_stop(dev, i) != 0) + PMD_DRV_LOG(WARNING, "Fail to stop Tx queue %d", i); + } +} diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h index 716b2fefa4..e9b810deaa 100644 --- a/drivers/net/cpfl/cpfl_rxtx.h +++ b/drivers/net/cpfl/cpfl_rxtx.h @@ -32,4 +32,7 @@ int cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id); int cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); int cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id); int cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); +void cpfl_stop_queues(struct rte_eth_dev *dev); +int cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); +int cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); #endif /* _CPFL_RXTX_H_ */