[v14,05/18] net/idpf: add support for device start and stop

Message ID 20221027074729.1494529-6-junfeng.guo@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Andrew Rybchenko
Headers
Series add support for idpf PMD in DPDK |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Junfeng Guo Oct. 27, 2022, 7:47 a.m. UTC
  Add dev ops dev_start, dev_stop and link_update.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
 drivers/net/idpf/idpf_ethdev.c | 55 ++++++++++++++++++++++++++++++++++
 drivers/net/idpf/idpf_rxtx.c   | 20 +++++++++++++
 2 files changed, 75 insertions(+)
  

Comments

Andrew Rybchenko Oct. 28, 2022, 3:45 p.m. UTC | #1
On 10/27/22 10:47, Junfeng Guo wrote:
> Add dev ops dev_start, dev_stop and link_update.
> 
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
> Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>

[snip]

> @@ -284,6 +305,40 @@ idpf_dev_configure(struct rte_eth_dev *dev)
>   	return 0;
>   }
>   
> +static int
> +idpf_dev_start(struct rte_eth_dev *dev)
> +{
> +	struct idpf_vport *vport = dev->data->dev_private;
> +
> +	if (dev->data->mtu > vport->max_mtu) {
> +		PMD_DRV_LOG(ERR, "MTU should be less than %d", vport->max_mtu);
> +		return -1;

Negative errno must be returned.

> +	}
> +
> +	vport->max_pkt_len = dev->data->mtu + IDPF_ETH_OVERHEAD;
> +
> +	/* TODO: start queues */
> +
> +	if (idpf_vc_ena_dis_vport(vport, true) != 0) {
> +		PMD_DRV_LOG(ERR, "Failed to enable vport");
> +		return -1;

same here

> +	}
> +
> +	return 0;
> +}

[snip]
  

Patch

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 0585153f69..3430d00e92 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -30,17 +30,38 @@  static const char * const idpf_valid_args[] = {
 };
 
 static int idpf_dev_configure(struct rte_eth_dev *dev);
+static int idpf_dev_start(struct rte_eth_dev *dev);
+static int idpf_dev_stop(struct rte_eth_dev *dev);
 static int idpf_dev_close(struct rte_eth_dev *dev);
 static int idpf_dev_info_get(struct rte_eth_dev *dev,
 			     struct rte_eth_dev_info *dev_info);
 static void idpf_adapter_rel(struct idpf_adapter *adapter);
 
+static int
+idpf_dev_link_update(struct rte_eth_dev *dev,
+		     __rte_unused int wait_to_complete)
+{
+	struct rte_eth_link new_link;
+
+	memset(&new_link, 0, sizeof(new_link));
+
+	new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+	new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+	new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+				  RTE_ETH_LINK_SPEED_FIXED);
+
+	return rte_eth_linkstatus_set(dev, &new_link);
+}
+
 static const struct eth_dev_ops idpf_eth_dev_ops = {
 	.dev_configure			= idpf_dev_configure,
+	.dev_start			= idpf_dev_start,
+	.dev_stop			= idpf_dev_stop,
 	.dev_close			= idpf_dev_close,
 	.rx_queue_setup			= idpf_rx_queue_setup,
 	.tx_queue_setup			= idpf_tx_queue_setup,
 	.dev_infos_get			= idpf_dev_info_get,
+	.link_update			= idpf_dev_link_update,
 };
 
 static int
@@ -284,6 +305,40 @@  idpf_dev_configure(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int
+idpf_dev_start(struct rte_eth_dev *dev)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+
+	if (dev->data->mtu > vport->max_mtu) {
+		PMD_DRV_LOG(ERR, "MTU should be less than %d", vport->max_mtu);
+		return -1;
+	}
+
+	vport->max_pkt_len = dev->data->mtu + IDPF_ETH_OVERHEAD;
+
+	/* TODO: start queues */
+
+	if (idpf_vc_ena_dis_vport(vport, true) != 0) {
+		PMD_DRV_LOG(ERR, "Failed to enable vport");
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+idpf_dev_stop(struct rte_eth_dev *dev)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+
+	idpf_vc_ena_dis_vport(vport, false);
+
+	/* TODO: stop queues */
+
+	return 0;
+}
+
 static int
 idpf_dev_close(struct rte_eth_dev *dev)
 {
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index 25dd5d85d5..3528d2f2c7 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -334,6 +334,11 @@  idpf_rx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
 		return -EINVAL;
 
+	if (rx_conf->rx_deferred_start) {
+		PMD_INIT_LOG(ERR, "Queue start is not supported currently.");
+		return -EINVAL;
+	}
+
 	/* Setup Rx description queue */
 	rxq = rte_zmalloc_socket("idpf rxq",
 				 sizeof(struct idpf_rx_queue),
@@ -465,6 +470,11 @@  idpf_rx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
 		return -EINVAL;
 
+	if (rx_conf->rx_deferred_start) {
+		PMD_INIT_LOG(ERR, "Queue start is not supported currently.");
+		return -EINVAL;
+	}
+
 	/* Setup Rx description queue */
 	rxq = rte_zmalloc_socket("idpf rxq",
 				 sizeof(struct idpf_rx_queue),
@@ -569,6 +579,11 @@  idpf_tx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
 		return -EINVAL;
 
+	if (tx_conf->tx_deferred_start) {
+		PMD_INIT_LOG(ERR, "Queue start is not supported currently.");
+		return -EINVAL;
+	}
+
 	/* Allocate the TX queue data structure. */
 	txq = rte_zmalloc_socket("idpf split txq",
 				 sizeof(struct idpf_tx_queue),
@@ -691,6 +706,11 @@  idpf_tx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
 		return -EINVAL;
 
+	if (tx_conf->tx_deferred_start) {
+		PMD_INIT_LOG(ERR, "Queue start is not supported currently.");
+		return -EINVAL;
+	}
+
 	/* Allocate the TX queue data structure. */
 	txq = rte_zmalloc_socket("idpf txq",
 				 sizeof(struct idpf_tx_queue),