[v1,09/12] net/ice: add queue start and stop for DCF

Message ID 20200605201737.33766-10-ting.xu@intel.com (mailing list archive)
State Superseded, archived
Delegated to: xiaolong ye
Headers
Series enable DCF datapath configuration |

Checks

Context Check Description
ci/Intel-compilation success Compilation OK
ci/checkpatch success coding style OK

Commit Message

Xu, Ting June 5, 2020, 8:17 p.m. UTC
  From: Qi Zhang <qi.z.zhang@intel.com>

Add queue start and stop in DCF. Support queue enable and disable
through virtual channel. Add support for Rx queue mbufs allocation
and queue reset.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/ice/ice_dcf.c        |  57 ++++++
 drivers/net/ice/ice_dcf.h        |   3 +-
 drivers/net/ice/ice_dcf_ethdev.c | 309 +++++++++++++++++++++++++++++++
 3 files changed, 368 insertions(+), 1 deletion(-)
  

Comments

Xiaolong Ye June 7, 2020, 12:28 p.m. UTC | #1
On 06/05, Ting Xu wrote:
>From: Qi Zhang <qi.z.zhang@intel.com>
>
>Add queue start and stop in DCF. Support queue enable and disable
>through virtual channel. Add support for Rx queue mbufs allocation
>and queue reset.

There is one i40e patch [1] from qiming to correct the queue behavior as "when
one queue fails to start, all following queues start action should be skipped
and previous started queues should be cleaned, and for queue stop case, one
queue stop failure shouldn't skip following queues stop action", I think it
applies to this patch as well, add Qiming for more comments.


[1] https://patches.dpdk.org/patch/70362/

>
>Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
>---
> drivers/net/ice/ice_dcf.c        |  57 ++++++
> drivers/net/ice/ice_dcf.h        |   3 +-
> drivers/net/ice/ice_dcf_ethdev.c | 309 +++++++++++++++++++++++++++++++
> 3 files changed, 368 insertions(+), 1 deletion(-)
>
>diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
>index d864ae894..56b8c0d25 100644
>--- a/drivers/net/ice/ice_dcf.c
>+++ b/drivers/net/ice/ice_dcf.c
>@@ -940,3 +940,60 @@ ice_dcf_config_irq_map(struct ice_dcf_hw *hw)
> 	rte_free(map_info);
> 	return err;
> }
>+
>+int
>+ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on)
>+{
>+	struct virtchnl_queue_select queue_select;
>+	struct dcf_virtchnl_cmd args;
>+	int err;
>+
>+	memset(&queue_select, 0, sizeof(queue_select));
>+	queue_select.vsi_id = hw->vsi_res->vsi_id;
>+	if (rx)
>+		queue_select.rx_queues |= 1 << qid;
>+	else
>+		queue_select.tx_queues |= 1 << qid;
>+
>+	memset(&args, 0, sizeof(args));
>+	if (on)
>+		args.v_op = VIRTCHNL_OP_ENABLE_QUEUES;
>+	else
>+		args.v_op = VIRTCHNL_OP_DISABLE_QUEUES;
>+
>+	args.req_msg = (u8 *)&queue_select;
>+	args.req_msglen = sizeof(queue_select);
>+
>+	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
>+	if (err)
>+		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
>+			    on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
>+	return err;
>+}
>+
>+int
>+ice_dcf_disable_queues(struct ice_dcf_hw *hw)
>+{
>+	struct virtchnl_queue_select queue_select;
>+	struct dcf_virtchnl_cmd args;
>+	int err;
>+
>+	memset(&queue_select, 0, sizeof(queue_select));
>+	queue_select.vsi_id = hw->vsi_res->vsi_id;
>+
>+	queue_select.rx_queues = BIT(hw->eth_dev->data->nb_rx_queues) - 1;
>+	queue_select.tx_queues = BIT(hw->eth_dev->data->nb_tx_queues) - 1;
>+
>+	memset(&args, 0, sizeof(args));
>+	args.v_op = VIRTCHNL_OP_DISABLE_QUEUES;
>+	args.req_msg = (u8 *)&queue_select;
>+	args.req_msglen = sizeof(queue_select);
>+
>+	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
>+	if (err) {
>+		PMD_DRV_LOG(ERR,
>+			    "Failed to execute command of OP_DISABLE_QUEUES");
>+		return err;
>+	}
>+	return 0;

Better to align with above ice_dcf_switch_queue, one 'return err' in the end
is enough.

Thanks,
Xiaolong
  
Qiming Yang June 8, 2020, 7:35 a.m. UTC | #2
> -----Original Message-----
> From: Xu, Ting <ting.xu@intel.com>
> Sent: Saturday, June 6, 2020 04:18
> To: dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Yang, Qiming
> <qiming.yang@intel.com>; Mcnamara, John <john.mcnamara@intel.com>;
> Kovacevic, Marko <marko.kovacevic@intel.com>
> Subject: [PATCH v1 09/12] net/ice: add queue start and stop for DCF
> 
> From: Qi Zhang <qi.z.zhang@intel.com>
> 
> Add queue start and stop in DCF. Support queue enable and disable through
> virtual channel. Add support for Rx queue mbufs allocation and queue reset.
> 
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> ---
>  drivers/net/ice/ice_dcf.c        |  57 ++++++
>  drivers/net/ice/ice_dcf.h        |   3 +-
>  drivers/net/ice/ice_dcf_ethdev.c | 309
> +++++++++++++++++++++++++++++++
>  3 files changed, 368 insertions(+), 1 deletion(-)
> 

Snip...

> +}
> diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h index
> 9470d1df7..68e1661c0 100644
> --- a/drivers/net/ice/ice_dcf.h
> +++ b/drivers/net/ice/ice_dcf.h
> @@ -70,5 +70,6 @@ void ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev,
> struct ice_dcf_hw *hw);  int ice_dcf_init_rss(struct ice_dcf_hw *hw);  int
> ice_dcf_configure_queues(struct ice_dcf_hw *hw);  int
> ice_dcf_config_irq_map(struct ice_dcf_hw *hw);
> -
> +int ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx,
> +bool on); int ice_dcf_disable_queues(struct ice_dcf_hw *hw);
>  #endif /* _ICE_DCF_H_ */
> diff --git a/drivers/net/ice/ice_dcf_ethdev.c
> b/drivers/net/ice/ice_dcf_ethdev.c
> index 9605fb8ed..59113fc4b 100644
> --- a/drivers/net/ice/ice_dcf_ethdev.c
> +++ b/drivers/net/ice/ice_dcf_ethdev.c
> @@ -226,6 +226,259 @@ static int ice_dcf_config_rx_queues_irqs(struct
> rte_eth_dev *dev,
>  	return 0;
>  }
> 
.
> +static int
> +ice_dcf_start_queues(struct rte_eth_dev *dev) {
> +	struct ice_rx_queue *rxq;
> +	struct ice_tx_queue *txq;
> +	int i;
> +
> +	for (i = 0; i < dev->data->nb_tx_queues; i++) {
> +		txq = dev->data->tx_queues[i];
> +		if (txq->tx_deferred_start)
> +			continue;
> +		if (ice_dcf_tx_queue_start(dev, i) != 0) {
> +			PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
> +			return -1;

If queue start fail, should stop the queue already started

> +		}
> +	}
> +
> +	for (i = 0; i < dev->data->nb_rx_queues; i++) {
> +		rxq = dev->data->rx_queues[i];
> +		if (rxq->rx_deferred_start)
> +			continue;
> +		if (ice_dcf_rx_queue_start(dev, i) != 0) {
> +			PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
> +			return -1;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
>  static int
>  ice_dcf_dev_start(struct rte_eth_dev *dev)  { @@ -266,20 +519,72 @@
> ice_dcf_dev_start(struct rte_eth_dev *dev)
>  		return ret;
>  	}
> 
> +	if (dev->data->dev_conf.intr_conf.rxq != 0) {
> +		rte_intr_disable(intr_handle);
> +		rte_intr_enable(intr_handle);
> +	}
> +
> +	ret = ice_dcf_start_queues(dev);
> +	if (ret) {
> +		PMD_DRV_LOG(ERR, "Failed to enable queues");
> +		return ret;
> +	}
> +
>  	dev->data->dev_link.link_status = ETH_LINK_UP;
> 
>  	return 0;
>  }
> 
> +static void
> +ice_dcf_stop_queues(struct rte_eth_dev *dev) {
> +	struct ice_dcf_adapter *ad = dev->data->dev_private;
> +	struct ice_dcf_hw *hw = &ad->real_hw;
> +	struct ice_rx_queue *rxq;
> +	struct ice_tx_queue *txq;
> +	int ret, i;
> +
> +	/* Stop All queues */
> +	ret = ice_dcf_disable_queues(hw);
> +	if (ret)
> +		PMD_DRV_LOG(WARNING, "Fail to stop queues");
> +
> +	for (i = 0; i < dev->data->nb_tx_queues; i++) {
> +		txq = dev->data->tx_queues[i];
> +		if (!txq)
> +			continue;
> +		txq->tx_rel_mbufs(txq);
> +		reset_tx_queue(txq);
> +		dev->data->tx_queue_state[i] =
> RTE_ETH_QUEUE_STATE_STOPPED;
> +	}
> +	for (i = 0; i < dev->data->nb_rx_queues; i++) {
> +		rxq = dev->data->rx_queues[i];
> +		if (!rxq)
> +			continue;
> +		rxq->rx_rel_mbufs(rxq);
> +		reset_rx_queue(rxq);
> +		dev->data->rx_queue_state[i] =
> RTE_ETH_QUEUE_STATE_STOPPED;
> +	}
> +}
> +
>  static void
>  ice_dcf_dev_stop(struct rte_eth_dev *dev)  {
>  	struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
> +	struct rte_intr_handle *intr_handle = dev->intr_handle;
>  	struct ice_adapter *ad = &dcf_ad->parent;
> 
>  	if (ad->pf.adapter_stopped == 1)
>  		return;
> 
> +	ice_dcf_stop_queues(dev);
> +
> +	rte_intr_efd_disable(intr_handle);
> +	if (intr_handle->intr_vec) {
> +		rte_free(intr_handle->intr_vec);
> +		intr_handle->intr_vec = NULL;
> +	}
> +
>  	dev->data->dev_link.link_status = ETH_LINK_DOWN;
>  	ad->pf.adapter_stopped = 1;
>  }
> @@ -476,6 +781,10 @@ static const struct eth_dev_ops
> ice_dcf_eth_dev_ops = {
>  	.tx_queue_setup          = ice_tx_queue_setup,
>  	.rx_queue_release        = ice_rx_queue_release,
>  	.tx_queue_release        = ice_tx_queue_release,
> +	.rx_queue_start          = ice_dcf_rx_queue_start,
> +	.tx_queue_start          = ice_dcf_tx_queue_start,
> +	.rx_queue_stop           = ice_dcf_rx_queue_stop,
> +	.tx_queue_stop           = ice_dcf_tx_queue_stop,
>  	.link_update             = ice_dcf_link_update,
>  	.stats_get               = ice_dcf_stats_get,
>  	.stats_reset             = ice_dcf_stats_reset,
> --
> 2.17.1
  
Xu, Ting June 9, 2020, 7:35 a.m. UTC | #3
Hi, Qiming,

> -----Original Message-----
> From: Yang, Qiming <qiming.yang@intel.com>
> Sent: Monday, June 8, 2020 3:36 PM
> To: Xu, Ting <ting.xu@intel.com>; dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Mcnamara, John
> <john.mcnamara@intel.com>; Kovacevic, Marko <marko.kovacevic@intel.com>
> Subject: RE: [PATCH v1 09/12] net/ice: add queue start and stop for DCF
> 
> 
> 
> > -----Original Message-----
> > From: Xu, Ting <ting.xu@intel.com>
> > Sent: Saturday, June 6, 2020 04:18
> > To: dev@dpdk.org
> > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Yang, Qiming
> > <qiming.yang@intel.com>; Mcnamara, John <john.mcnamara@intel.com>;
> > Kovacevic, Marko <marko.kovacevic@intel.com>
> > Subject: [PATCH v1 09/12] net/ice: add queue start and stop for DCF
> >
> > From: Qi Zhang <qi.z.zhang@intel.com>
> >
> > Add queue start and stop in DCF. Support queue enable and disable
> > through virtual channel. Add support for Rx queue mbufs allocation and
> queue reset.
> >
> > Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> > ---
> >  drivers/net/ice/ice_dcf.c        |  57 ++++++
> >  drivers/net/ice/ice_dcf.h        |   3 +-
> >  drivers/net/ice/ice_dcf_ethdev.c | 309
> > +++++++++++++++++++++++++++++++
> >  3 files changed, 368 insertions(+), 1 deletion(-)
> >
> 
> Snip...
> 
> > +}
> > diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
> > index
> > 9470d1df7..68e1661c0 100644
> > --- a/drivers/net/ice/ice_dcf.h
> > +++ b/drivers/net/ice/ice_dcf.h
> > @@ -70,5 +70,6 @@ void ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev,
> > struct ice_dcf_hw *hw);  int ice_dcf_init_rss(struct ice_dcf_hw *hw);
> > int ice_dcf_configure_queues(struct ice_dcf_hw *hw);  int
> > ice_dcf_config_irq_map(struct ice_dcf_hw *hw);
> > -
> > +int ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool
> > +rx, bool on); int ice_dcf_disable_queues(struct ice_dcf_hw *hw);
> >  #endif /* _ICE_DCF_H_ */
> > diff --git a/drivers/net/ice/ice_dcf_ethdev.c
> > b/drivers/net/ice/ice_dcf_ethdev.c
> > index 9605fb8ed..59113fc4b 100644
> > --- a/drivers/net/ice/ice_dcf_ethdev.c
> > +++ b/drivers/net/ice/ice_dcf_ethdev.c
> > @@ -226,6 +226,259 @@ static int ice_dcf_config_rx_queues_irqs(struct
> > rte_eth_dev *dev,
> >  return 0;
> >  }
> >
> .
> > +static int
> > +ice_dcf_start_queues(struct rte_eth_dev *dev) { struct ice_rx_queue
> > +*rxq; struct ice_tx_queue *txq; int i;
> > +
> > +for (i = 0; i < dev->data->nb_tx_queues; i++) { txq =
> > +dev->data->tx_queues[i]; if (txq->tx_deferred_start) continue; if
> > +(ice_dcf_tx_queue_start(dev, i) != 0) { PMD_DRV_LOG(ERR, "Fail to
> > +start queue %u", i); return -1;
> 
> If queue start fail, should stop the queue already started
> 

This operation can only be seen in ice and i40e PF driver. In iavf or even earlier i40evf, they did not stop the already started queues when failed.
I am not sure if this operation is suitable for DCF? Or we should not follow the current iavf, since it actually needs this modification to stop started queues as well?

> > +}
> > +}
> > +
> > +for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq =
> > +dev->data->rx_queues[i]; if (rxq->rx_deferred_start) continue; if
> > +(ice_dcf_rx_queue_start(dev, i) != 0) { PMD_DRV_LOG(ERR, "Fail to
> > +start queue %u", i); return -1; } }
> > +
> > +return 0;
> > +}
> > +
> >  static int
> >  ice_dcf_dev_start(struct rte_eth_dev *dev)  { @@ -266,20 +519,72 @@
> > ice_dcf_dev_start(struct rte_eth_dev *dev)  return ret;  }
> >
> > +if (dev->data->dev_conf.intr_conf.rxq != 0) {
> > +rte_intr_disable(intr_handle); rte_intr_enable(intr_handle); }
> > +
> > +ret = ice_dcf_start_queues(dev);
> > +if (ret) {
> > +PMD_DRV_LOG(ERR, "Failed to enable queues"); return ret; }
> > +
> >  dev->data->dev_link.link_status = ETH_LINK_UP;
> >
> >  return 0;
> >  }
> >
> > +static void
> > +ice_dcf_stop_queues(struct rte_eth_dev *dev) { struct ice_dcf_adapter
> > +*ad = dev->data->dev_private; struct ice_dcf_hw *hw = &ad->real_hw;
> > +struct ice_rx_queue *rxq; struct ice_tx_queue *txq; int ret, i;
> > +
> > +/* Stop All queues */
> > +ret = ice_dcf_disable_queues(hw);
> > +if (ret)
> > +PMD_DRV_LOG(WARNING, "Fail to stop queues");
> > +
> > +for (i = 0; i < dev->data->nb_tx_queues; i++) { txq =
> > +dev->data->tx_queues[i]; if (!txq) continue;
> > +txq->tx_rel_mbufs(txq);
> > +reset_tx_queue(txq);
> > +dev->data->tx_queue_state[i] =
> > RTE_ETH_QUEUE_STATE_STOPPED;
> > +}
> > +for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq =
> > +dev->data->rx_queues[i]; if (!rxq) continue;
> > +rxq->rx_rel_mbufs(rxq);
> > +reset_rx_queue(rxq);
> > +dev->data->rx_queue_state[i] =
> > RTE_ETH_QUEUE_STATE_STOPPED;
> > +}
> > +}
> > +
> >  static void
> >  ice_dcf_dev_stop(struct rte_eth_dev *dev)  {  struct ice_dcf_adapter
> > *dcf_ad = dev->data->dev_private;
> > +struct rte_intr_handle *intr_handle = dev->intr_handle;
> >  struct ice_adapter *ad = &dcf_ad->parent;
> >
> >  if (ad->pf.adapter_stopped == 1)
> >  return;
> >
> > +ice_dcf_stop_queues(dev);
> > +
> > +rte_intr_efd_disable(intr_handle);
> > +if (intr_handle->intr_vec) {
> > +rte_free(intr_handle->intr_vec);
> > +intr_handle->intr_vec = NULL;
> > +}
> > +
> >  dev->data->dev_link.link_status = ETH_LINK_DOWN;
> > ad->pf.adapter_stopped = 1;  } @@ -476,6 +781,10 @@ static const
> > struct eth_dev_ops ice_dcf_eth_dev_ops = {
> >  .tx_queue_setup          = ice_tx_queue_setup,
> >  .rx_queue_release        = ice_rx_queue_release,
> >  .tx_queue_release        = ice_tx_queue_release,
> > +.rx_queue_start          = ice_dcf_rx_queue_start,
> > +.tx_queue_start          = ice_dcf_tx_queue_start,
> > +.rx_queue_stop           = ice_dcf_rx_queue_stop,
> > +.tx_queue_stop           = ice_dcf_tx_queue_stop,
> >  .link_update             = ice_dcf_link_update,
> >  .stats_get               = ice_dcf_stats_get,
> >  .stats_reset             = ice_dcf_stats_reset,
> > --
> > 2.17.1
>
  
Qiming Yang June 10, 2020, 5:03 a.m. UTC | #4
> -----Original Message-----
> From: Xu, Ting <ting.xu@intel.com>
> Sent: Tuesday, June 9, 2020 15:35
> To: Yang, Qiming <qiming.yang@intel.com>; dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Mcnamara, John
> <john.mcnamara@intel.com>; Kovacevic, Marko
> <marko.kovacevic@intel.com>; Ye, Xiaolong <xiaolong.ye@intel.com>
> Subject: RE: [PATCH v1 09/12] net/ice: add queue start and stop for DCF
> 
> Hi, Qiming,
> 
> > -----Original Message-----
> > From: Yang, Qiming <qiming.yang@intel.com>
> > Sent: Monday, June 8, 2020 3:36 PM
> > To: Xu, Ting <ting.xu@intel.com>; dev@dpdk.org
> > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Mcnamara, John
> > <john.mcnamara@intel.com>; Kovacevic, Marko
> > <marko.kovacevic@intel.com>
> > Subject: RE: [PATCH v1 09/12] net/ice: add queue start and stop for
> > DCF
> >
> >
> >
> > > -----Original Message-----
> > > From: Xu, Ting <ting.xu@intel.com>
> > > Sent: Saturday, June 6, 2020 04:18
> > > To: dev@dpdk.org
> > > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Yang, Qiming
> > > <qiming.yang@intel.com>; Mcnamara, John
> <john.mcnamara@intel.com>;
> > > Kovacevic, Marko <marko.kovacevic@intel.com>
> > > Subject: [PATCH v1 09/12] net/ice: add queue start and stop for DCF
> > >
> > > From: Qi Zhang <qi.z.zhang@intel.com>
> > >
> > > Add queue start and stop in DCF. Support queue enable and disable
> > > through virtual channel. Add support for Rx queue mbufs allocation
> > > and
> > queue reset.
> > >
> > > Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> > > ---
> > >  drivers/net/ice/ice_dcf.c        |  57 ++++++
> > >  drivers/net/ice/ice_dcf.h        |   3 +-
> > >  drivers/net/ice/ice_dcf_ethdev.c | 309
> > > +++++++++++++++++++++++++++++++
> > >  3 files changed, 368 insertions(+), 1 deletion(-)
> > >
> >
> > Snip...
> >
> > > +}
> > > diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
> > > index
> > > 9470d1df7..68e1661c0 100644
> > > --- a/drivers/net/ice/ice_dcf.h
> > > +++ b/drivers/net/ice/ice_dcf.h
> > > @@ -70,5 +70,6 @@ void ice_dcf_uninit_hw(struct rte_eth_dev
> > > *eth_dev, struct ice_dcf_hw *hw);  int ice_dcf_init_rss(struct
> > > ice_dcf_hw *hw); int ice_dcf_configure_queues(struct ice_dcf_hw
> > > *hw);  int ice_dcf_config_irq_map(struct ice_dcf_hw *hw);
> > > -
> > > +int ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool
> > > +rx, bool on); int ice_dcf_disable_queues(struct ice_dcf_hw *hw);
> > >  #endif /* _ICE_DCF_H_ */
> > > diff --git a/drivers/net/ice/ice_dcf_ethdev.c
> > > b/drivers/net/ice/ice_dcf_ethdev.c
> > > index 9605fb8ed..59113fc4b 100644
> > > --- a/drivers/net/ice/ice_dcf_ethdev.c
> > > +++ b/drivers/net/ice/ice_dcf_ethdev.c
> > > @@ -226,6 +226,259 @@ static int
> > > ice_dcf_config_rx_queues_irqs(struct
> > > rte_eth_dev *dev,
> > >  return 0;
> > >  }
> > >
> > .
> > > +static int
> > > +ice_dcf_start_queues(struct rte_eth_dev *dev) { struct ice_rx_queue
> > > +*rxq; struct ice_tx_queue *txq; int i;
> > > +
> > > +for (i = 0; i < dev->data->nb_tx_queues; i++) { txq =
> > > +dev->data->tx_queues[i]; if (txq->tx_deferred_start) continue; if
> > > +(ice_dcf_tx_queue_start(dev, i) != 0) { PMD_DRV_LOG(ERR, "Fail to
> > > +start queue %u", i); return -1;
> >
> > If queue start fail, should stop the queue already started
> >
> 
> This operation can only be seen in ice and i40e PF driver. In iavf or even
> earlier i40evf, they did not stop the already started queues when failed.
> I am not sure if this operation is suitable for DCF? Or we should not follow the
> current iavf, since it actually needs this modification to stop started queues
> as well?
> 

I think that's the correct behavior. We'd better fix the gap if iavf and i40evf not act as that.

> > > +}
> > > +}
> > > +
> > > +for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq =
> > > +dev->data->rx_queues[i]; if (rxq->rx_deferred_start) continue; if
> > > +(ice_dcf_rx_queue_start(dev, i) != 0) { PMD_DRV_LOG(ERR, "Fail to
> > > +start queue %u", i); return -1; } }
> > > +
> > > +return 0;
> > > +}
> > > +
> > >  static int
> > >  ice_dcf_dev_start(struct rte_eth_dev *dev)  { @@ -266,20 +519,72 @@
> > > ice_dcf_dev_start(struct rte_eth_dev *dev)  return ret;  }
> > >
> > > +if (dev->data->dev_conf.intr_conf.rxq != 0) {
> > > +rte_intr_disable(intr_handle); rte_intr_enable(intr_handle); }
> > > +
> > > +ret = ice_dcf_start_queues(dev);
> > > +if (ret) {
> > > +PMD_DRV_LOG(ERR, "Failed to enable queues"); return ret; }
> > > +
> > >  dev->data->dev_link.link_status = ETH_LINK_UP;
> > >
> > >  return 0;
> > >  }
> > >
> > > +static void
> > > +ice_dcf_stop_queues(struct rte_eth_dev *dev) { struct
> > > +ice_dcf_adapter *ad = dev->data->dev_private; struct ice_dcf_hw *hw
> > > += &ad->real_hw; struct ice_rx_queue *rxq; struct ice_tx_queue *txq;
> > > +int ret, i;
> > > +
> > > +/* Stop All queues */
> > > +ret = ice_dcf_disable_queues(hw);
> > > +if (ret)
> > > +PMD_DRV_LOG(WARNING, "Fail to stop queues");
> > > +
> > > +for (i = 0; i < dev->data->nb_tx_queues; i++) { txq =
> > > +dev->data->tx_queues[i]; if (!txq) continue;
> > > +txq->tx_rel_mbufs(txq);
> > > +reset_tx_queue(txq);
> > > +dev->data->tx_queue_state[i] =
> > > RTE_ETH_QUEUE_STATE_STOPPED;
> > > +}
> > > +for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq =
> > > +dev->data->rx_queues[i]; if (!rxq) continue;
> > > +rxq->rx_rel_mbufs(rxq);
> > > +reset_rx_queue(rxq);
> > > +dev->data->rx_queue_state[i] =
> > > RTE_ETH_QUEUE_STATE_STOPPED;
> > > +}
> > > +}
> > > +
> > >  static void
> > >  ice_dcf_dev_stop(struct rte_eth_dev *dev)  {  struct
> > > ice_dcf_adapter *dcf_ad = dev->data->dev_private;
> > > +struct rte_intr_handle *intr_handle = dev->intr_handle;
> > >  struct ice_adapter *ad = &dcf_ad->parent;
> > >
> > >  if (ad->pf.adapter_stopped == 1)
> > >  return;
> > >
> > > +ice_dcf_stop_queues(dev);
> > > +
> > > +rte_intr_efd_disable(intr_handle);
> > > +if (intr_handle->intr_vec) {
> > > +rte_free(intr_handle->intr_vec);
> > > +intr_handle->intr_vec = NULL;
> > > +}
> > > +
> > >  dev->data->dev_link.link_status = ETH_LINK_DOWN;
> > > ad->pf.adapter_stopped = 1;  } @@ -476,6 +781,10 @@ static const
> > > struct eth_dev_ops ice_dcf_eth_dev_ops = {
> > >  .tx_queue_setup          = ice_tx_queue_setup,
> > >  .rx_queue_release        = ice_rx_queue_release,
> > >  .tx_queue_release        = ice_tx_queue_release,
> > > +.rx_queue_start          = ice_dcf_rx_queue_start,
> > > +.tx_queue_start          = ice_dcf_tx_queue_start,
> > > +.rx_queue_stop           = ice_dcf_rx_queue_stop,
> > > +.tx_queue_stop           = ice_dcf_tx_queue_stop,
> > >  .link_update             = ice_dcf_link_update,
> > >  .stats_get               = ice_dcf_stats_get,
> > >  .stats_reset             = ice_dcf_stats_reset,
> > > --
> > > 2.17.1
> >
>
  

Patch

diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index d864ae894..56b8c0d25 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -940,3 +940,60 @@  ice_dcf_config_irq_map(struct ice_dcf_hw *hw)
 	rte_free(map_info);
 	return err;
 }
+
+int
+ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on)
+{
+	struct virtchnl_queue_select queue_select;
+	struct dcf_virtchnl_cmd args;
+	int err;
+
+	memset(&queue_select, 0, sizeof(queue_select));
+	queue_select.vsi_id = hw->vsi_res->vsi_id;
+	if (rx)
+		queue_select.rx_queues |= 1 << qid;
+	else
+		queue_select.tx_queues |= 1 << qid;
+
+	memset(&args, 0, sizeof(args));
+	if (on)
+		args.v_op = VIRTCHNL_OP_ENABLE_QUEUES;
+	else
+		args.v_op = VIRTCHNL_OP_DISABLE_QUEUES;
+
+	args.req_msg = (u8 *)&queue_select;
+	args.req_msglen = sizeof(queue_select);
+
+	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
+			    on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
+	return err;
+}
+
+int
+ice_dcf_disable_queues(struct ice_dcf_hw *hw)
+{
+	struct virtchnl_queue_select queue_select;
+	struct dcf_virtchnl_cmd args;
+	int err;
+
+	memset(&queue_select, 0, sizeof(queue_select));
+	queue_select.vsi_id = hw->vsi_res->vsi_id;
+
+	queue_select.rx_queues = BIT(hw->eth_dev->data->nb_rx_queues) - 1;
+	queue_select.tx_queues = BIT(hw->eth_dev->data->nb_tx_queues) - 1;
+
+	memset(&args, 0, sizeof(args));
+	args.v_op = VIRTCHNL_OP_DISABLE_QUEUES;
+	args.req_msg = (u8 *)&queue_select;
+	args.req_msglen = sizeof(queue_select);
+
+	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to execute command of OP_DISABLE_QUEUES");
+		return err;
+	}
+	return 0;
+}
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index 9470d1df7..68e1661c0 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -70,5 +70,6 @@  void ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw);
 int ice_dcf_init_rss(struct ice_dcf_hw *hw);
 int ice_dcf_configure_queues(struct ice_dcf_hw *hw);
 int ice_dcf_config_irq_map(struct ice_dcf_hw *hw);
-
+int ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on);
+int ice_dcf_disable_queues(struct ice_dcf_hw *hw);
 #endif /* _ICE_DCF_H_ */
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index 9605fb8ed..59113fc4b 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -226,6 +226,259 @@  static int ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
 	return 0;
 }
 
+static int
+alloc_rxq_mbufs(struct ice_rx_queue *rxq)
+{
+	volatile union ice_32b_rx_flex_desc *rxd;
+	struct rte_mbuf *mbuf = NULL;
+	uint64_t dma_addr;
+	uint16_t i;
+
+	for (i = 0; i < rxq->nb_rx_desc; i++) {
+		mbuf = rte_mbuf_raw_alloc(rxq->mp);
+		if (unlikely(!mbuf)) {
+			PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+			return -ENOMEM;
+		}
+
+		rte_mbuf_refcnt_set(mbuf, 1);
+		mbuf->next = NULL;
+		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+		mbuf->nb_segs = 1;
+		mbuf->port = rxq->port_id;
+
+		dma_addr =
+			rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+		rxd = &rxq->rx_ring[i];
+		rxd->read.pkt_addr = dma_addr;
+		rxd->read.hdr_addr = 0;
+		rxd->read.rsvd1 = 0;
+		rxd->read.rsvd2 = 0;
+
+		rxq->sw_ring[i].mbuf = (void *)mbuf;
+	}
+
+	return 0;
+}
+
+static int
+ice_dcf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+	struct ice_dcf_adapter *ad = dev->data->dev_private;
+	struct iavf_hw *hw = &ad->real_hw.avf;
+	struct ice_rx_queue *rxq;
+	int err = 0;
+
+	if (rx_queue_id >= dev->data->nb_rx_queues)
+		return -EINVAL;
+
+	rxq = dev->data->rx_queues[rx_queue_id];
+
+	err = alloc_rxq_mbufs(rxq);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+		return err;
+	}
+
+	rte_wmb();
+
+	/* Init the RX tail register. */
+	IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+	IAVF_WRITE_FLUSH(hw);
+
+	/* Ready to switch the queue on */
+	err = ice_dcf_switch_queue(&ad->real_hw, rx_queue_id, true, true);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+			    rx_queue_id);
+	else
+		dev->data->rx_queue_state[rx_queue_id] =
+			RTE_ETH_QUEUE_STATE_STARTED;
+
+	return err;
+}
+
+static inline void
+reset_rx_queue(struct ice_rx_queue *rxq)
+{
+	uint16_t len;
+	uint32_t i;
+
+	if (!rxq)
+		return;
+
+	len = rxq->nb_rx_desc + ICE_RX_MAX_BURST;
+
+	for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
+		((volatile char *)rxq->rx_ring)[i] = 0;
+
+	memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+
+	for (i = 0; i < ICE_RX_MAX_BURST; i++)
+		rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
+
+	/* for rx bulk */
+	rxq->rx_nb_avail = 0;
+	rxq->rx_next_avail = 0;
+	rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+
+	rxq->rx_tail = 0;
+	rxq->nb_rx_hold = 0;
+	rxq->pkt_first_seg = NULL;
+	rxq->pkt_last_seg = NULL;
+}
+
+static inline void
+reset_tx_queue(struct ice_tx_queue *txq)
+{
+	struct ice_tx_entry *txe;
+	uint32_t i, size;
+	uint16_t prev;
+
+	if (!txq) {
+		PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+		return;
+	}
+
+	txe = txq->sw_ring;
+	size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
+	for (i = 0; i < size; i++)
+		((volatile char *)txq->tx_ring)[i] = 0;
+
+	prev = (uint16_t)(txq->nb_tx_desc - 1);
+	for (i = 0; i < txq->nb_tx_desc; i++) {
+		txq->tx_ring[i].cmd_type_offset_bsz =
+			rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
+		txe[i].mbuf =  NULL;
+		txe[i].last_id = i;
+		txe[prev].next_id = i;
+		prev = i;
+	}
+
+	txq->tx_tail = 0;
+	txq->nb_tx_used = 0;
+
+	txq->last_desc_cleaned = txq->nb_tx_desc - 1;
+	txq->nb_tx_free = txq->nb_tx_desc - 1;
+
+	txq->tx_next_dd = txq->tx_rs_thresh - 1;
+	txq->tx_next_rs = txq->tx_rs_thresh - 1;
+}
+
+static int
+ice_dcf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+	struct ice_dcf_adapter *ad = dev->data->dev_private;
+	struct ice_dcf_hw *hw = &ad->real_hw;
+	struct ice_rx_queue *rxq;
+	int err;
+
+	if (rx_queue_id >= dev->data->nb_rx_queues)
+		return -EINVAL;
+
+	err = ice_dcf_switch_queue(hw, rx_queue_id, true, false);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+			    rx_queue_id);
+		return err;
+	}
+
+	rxq = dev->data->rx_queues[rx_queue_id];
+	rxq->rx_rel_mbufs(rxq);
+	reset_rx_queue(rxq);
+	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	return 0;
+}
+
+static int
+ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+	struct ice_dcf_adapter *ad = dev->data->dev_private;
+	struct iavf_hw *hw = &ad->real_hw.avf;
+	struct ice_tx_queue *txq;
+	int err = 0;
+
+	if (tx_queue_id >= dev->data->nb_tx_queues)
+		return -EINVAL;
+
+	txq = dev->data->tx_queues[tx_queue_id];
+
+	/* Init the RX tail register. */
+	txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(tx_queue_id);
+	IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
+	IAVF_WRITE_FLUSH(hw);
+
+	/* Ready to switch the queue on */
+	err = ice_dcf_switch_queue(&ad->real_hw, tx_queue_id, false, true);
+
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+			    tx_queue_id);
+	else
+		dev->data->tx_queue_state[tx_queue_id] =
+			RTE_ETH_QUEUE_STATE_STARTED;
+
+	return err;
+}
+
+static int
+ice_dcf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+	struct ice_dcf_adapter *ad = dev->data->dev_private;
+	struct ice_dcf_hw *hw = &ad->real_hw;
+	struct ice_tx_queue *txq;
+	int err;
+
+	if (tx_queue_id >= dev->data->nb_tx_queues)
+		return -EINVAL;
+
+	err = ice_dcf_switch_queue(hw, tx_queue_id, false, false);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
+			    tx_queue_id);
+		return err;
+	}
+
+	txq = dev->data->tx_queues[tx_queue_id];
+	txq->tx_rel_mbufs(txq);
+	reset_tx_queue(txq);
+	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	return 0;
+}
+
+static int
+ice_dcf_start_queues(struct rte_eth_dev *dev)
+{
+	struct ice_rx_queue *rxq;
+	struct ice_tx_queue *txq;
+	int i;
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+		if (txq->tx_deferred_start)
+			continue;
+		if (ice_dcf_tx_queue_start(dev, i) != 0) {
+			PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
+			return -1;
+		}
+	}
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+		if (rxq->rx_deferred_start)
+			continue;
+		if (ice_dcf_rx_queue_start(dev, i) != 0) {
+			PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
 static int
 ice_dcf_dev_start(struct rte_eth_dev *dev)
 {
@@ -266,20 +519,72 @@  ice_dcf_dev_start(struct rte_eth_dev *dev)
 		return ret;
 	}
 
+	if (dev->data->dev_conf.intr_conf.rxq != 0) {
+		rte_intr_disable(intr_handle);
+		rte_intr_enable(intr_handle);
+	}
+
+	ret = ice_dcf_start_queues(dev);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to enable queues");
+		return ret;
+	}
+
 	dev->data->dev_link.link_status = ETH_LINK_UP;
 
 	return 0;
 }
 
+static void
+ice_dcf_stop_queues(struct rte_eth_dev *dev)
+{
+	struct ice_dcf_adapter *ad = dev->data->dev_private;
+	struct ice_dcf_hw *hw = &ad->real_hw;
+	struct ice_rx_queue *rxq;
+	struct ice_tx_queue *txq;
+	int ret, i;
+
+	/* Stop All queues */
+	ret = ice_dcf_disable_queues(hw);
+	if (ret)
+		PMD_DRV_LOG(WARNING, "Fail to stop queues");
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+		if (!txq)
+			continue;
+		txq->tx_rel_mbufs(txq);
+		reset_tx_queue(txq);
+		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+	}
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+		if (!rxq)
+			continue;
+		rxq->rx_rel_mbufs(rxq);
+		reset_rx_queue(rxq);
+		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+	}
+}
+
 static void
 ice_dcf_dev_stop(struct rte_eth_dev *dev)
 {
 	struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
+	struct rte_intr_handle *intr_handle = dev->intr_handle;
 	struct ice_adapter *ad = &dcf_ad->parent;
 
 	if (ad->pf.adapter_stopped == 1)
 		return;
 
+	ice_dcf_stop_queues(dev);
+
+	rte_intr_efd_disable(intr_handle);
+	if (intr_handle->intr_vec) {
+		rte_free(intr_handle->intr_vec);
+		intr_handle->intr_vec = NULL;
+	}
+
 	dev->data->dev_link.link_status = ETH_LINK_DOWN;
 	ad->pf.adapter_stopped = 1;
 }
@@ -476,6 +781,10 @@  static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
 	.tx_queue_setup          = ice_tx_queue_setup,
 	.rx_queue_release        = ice_rx_queue_release,
 	.tx_queue_release        = ice_tx_queue_release,
+	.rx_queue_start          = ice_dcf_rx_queue_start,
+	.tx_queue_start          = ice_dcf_tx_queue_start,
+	.rx_queue_stop           = ice_dcf_rx_queue_stop,
+	.tx_queue_stop           = ice_dcf_tx_queue_stop,
 	.link_update             = ice_dcf_link_update,
 	.stats_get               = ice_dcf_stats_get,
 	.stats_reset             = ice_dcf_stats_reset,