[v7,06/21] net/cpfl: support queue stop
Checks
Commit Message
Add support for these device ops:
- rx_queue_stop
- tx_queue_stop
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 10 +++-
drivers/net/cpfl/cpfl_rxtx.c | 87 ++++++++++++++++++++++++++++++++++
drivers/net/cpfl/cpfl_rxtx.h | 3 ++
3 files changed, 99 insertions(+), 1 deletion(-)
Comments
On 2/16/2023 12:29 AM, Mingxia Liu wrote:
> Add support for these device ops:
> - rx_queue_stop
> - tx_queue_stop
>
> Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
<...>
> +int
> +cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
> +{
> + struct idpf_vport *vport = dev->data->dev_private;
> + struct idpf_rx_queue *rxq;
> + int err;
> +
> + if (rx_queue_id >= dev->data->nb_rx_queues)
> + return -EINVAL;
> +
> + err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
> + if (err != 0) {
> + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
> + rx_queue_id);
> + return err;
> + }
> +
> + rxq = dev->data->rx_queues[rx_queue_id];
> + if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
> + rxq->ops->release_mbufs(rxq);
> + idpf_qc_single_rx_queue_reset(rxq);
> + } else {
> + rxq->bufq1->ops->release_mbufs(rxq->bufq1);
> + rxq->bufq2->ops->release_mbufs(rxq->bufq2);
In this patch, queue ops (bufq1->ops) not set yet, it is set in next
patch, switching order with next one may help.
@@ -232,12 +232,16 @@ cpfl_dev_start(struct rte_eth_dev *dev)
ret = idpf_vc_vport_ena_dis(vport, true);
if (ret != 0) {
PMD_DRV_LOG(ERR, "Failed to enable vport");
- return ret;
+ goto err_vport;
}
vport->stopped = 0;
return 0;
+
+err_vport:
+ cpfl_stop_queues(dev);
+ return ret;
}
static int
@@ -250,6 +254,8 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
idpf_vc_vport_ena_dis(vport, false);
+ cpfl_stop_queues(dev);
+
vport->stopped = 1;
return 0;
@@ -615,6 +621,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.link_update = cpfl_dev_link_update,
.rx_queue_start = cpfl_rx_queue_start,
.tx_queue_start = cpfl_tx_queue_start,
+ .rx_queue_stop = cpfl_rx_queue_stop,
+ .tx_queue_stop = cpfl_tx_queue_stop,
.dev_supported_ptypes_get = cpfl_dev_supported_ptypes_get,
};
@@ -612,3 +612,90 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
return err;
}
+
+int
+cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct idpf_vport *vport = dev->data->dev_private;
+ struct idpf_rx_queue *rxq;
+ int err;
+
+ if (rx_queue_id >= dev->data->nb_rx_queues)
+ return -EINVAL;
+
+ err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+ rx_queue_id);
+ return err;
+ }
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ rxq->ops->release_mbufs(rxq);
+ idpf_qc_single_rx_queue_reset(rxq);
+ } else {
+ rxq->bufq1->ops->release_mbufs(rxq->bufq1);
+ rxq->bufq2->ops->release_mbufs(rxq->bufq2);
+ idpf_qc_split_rx_queue_reset(rxq);
+ }
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+int
+cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct idpf_vport *vport = dev->data->dev_private;
+ struct idpf_tx_queue *txq;
+ int err;
+
+ if (tx_queue_id >= dev->data->nb_tx_queues)
+ return -EINVAL;
+
+ err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
+ tx_queue_id);
+ return err;
+ }
+
+ txq = dev->data->tx_queues[tx_queue_id];
+ txq->ops->release_mbufs(txq);
+ if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ idpf_qc_single_tx_queue_reset(txq);
+ } else {
+ idpf_qc_split_tx_descq_reset(txq);
+ idpf_qc_split_tx_complq_reset(txq->complq);
+ }
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+void
+cpfl_stop_queues(struct rte_eth_dev *dev)
+{
+ struct idpf_rx_queue *rxq;
+ struct idpf_tx_queue *txq;
+ int i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq == NULL)
+ continue;
+
+ if (cpfl_rx_queue_stop(dev, i) != 0)
+ PMD_DRV_LOG(WARNING, "Fail to stop Rx queue %d", i);
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (txq == NULL)
+ continue;
+
+ if (cpfl_tx_queue_stop(dev, i) != 0)
+ PMD_DRV_LOG(WARNING, "Fail to stop Tx queue %d", i);
+ }
+}
@@ -32,4 +32,7 @@ int cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id);
int cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+void cpfl_stop_queues(struct rte_eth_dev *dev);
+int cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
#endif /* _CPFL_RXTX_H_ */