diff mbox series

[1/2] net/qede: fix assignment of Rx/Tx handlers

Message ID 20200505030943.1091-1-rmody@marvell.com (mailing list archive)
State Changes Requested, archived
Delegated to: Jerin Jacob
Headers show
Series [1/2] net/qede: fix assignment of Rx/Tx handlers | expand

Checks

Context Check Description
ci/iol-testing success Testing PASS
ci/Intel-compilation fail Compilation issues
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-nxp-Performance success Performance Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/checkpatch success coding style OK

Commit Message

Rasesh Mody May 5, 2020, 3:09 a.m. UTC
Fix to assign dummy Rx/Tx handlers in dev_stop.
For MTU set, assignment of the appropriate Rx/Tx handlers will be
handled by dev_start/dev_stop.

Fixes: 81f8804992c9 ("net/qede: enhance Rx CPU utilization")
Fixes: 8de0c4201926 ("net/qede: fix odd number of queues usage in 100G mode")
Cc: stable@dpdk.org

Signed-off-by: Rasesh Mody <rmody@marvell.com>
Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
---
 drivers/net/qede/qede_ethdev.c | 33 ++++++++++++++++-----------------
 1 file changed, 16 insertions(+), 17 deletions(-)

Comments

Ferruh Yigit May 5, 2020, 9:01 a.m. UTC | #1
On 5/5/2020 4:09 AM, Rasesh Mody wrote:
> Fix to assign dummy Rx/Tx handlers in dev_stop.
> For MTU set, assignment of the appropriate Rx/Tx handlers will be
> handled by dev_start/dev_stop.
> 
> Fixes: 81f8804992c9 ("net/qede: enhance Rx CPU utilization")
> Fixes: 8de0c4201926 ("net/qede: fix odd number of queues usage in 100G mode")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Rasesh Mody <rmody@marvell.com>
> Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
> ---
>  drivers/net/qede/qede_ethdev.c | 33 ++++++++++++++++-----------------
>  1 file changed, 16 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
> index e71fa1e6a..726daa3e3 100644
> --- a/drivers/net/qede/qede_ethdev.c
> +++ b/drivers/net/qede/qede_ethdev.c
> @@ -320,13 +320,19 @@ qede_interrupt_handler(void *param)
>  }
>  
>  static void
> -qede_assign_rxtx_handlers(struct rte_eth_dev *dev)
> +qede_assign_rxtx_handlers(struct rte_eth_dev *dev, bool is_dummy)
>  {
>  	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
>  	struct qede_dev *qdev = dev->data->dev_private;
>  	struct ecore_dev *edev = &qdev->edev;
>  	bool use_tx_offload = false;
>  
> +	if (is_dummy) {
> +		dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
> +		dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
> +		return;
> +	}
> +
>  	if (ECORE_IS_CMT(edev)) {
>  		dev->rx_pkt_burst = qede_recv_pkts_cmt;
>  		dev->tx_pkt_burst = qede_xmit_pkts_cmt;
> @@ -1150,7 +1156,9 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
>  	/* Start/resume traffic */
>  	qede_fastpath_start(edev);
>  
> -	qede_assign_rxtx_handlers(eth_dev);
> +	/* Assign I/O handlers */
> +	qede_assign_rxtx_handlers(eth_dev, false);
> +
>  	DP_INFO(edev, "Device started\n");
>  
>  	return 0;
> @@ -1166,6 +1174,11 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev)
>  
>  	PMD_INIT_FUNC_TRACE(edev);
>  
> +	/* Replace I/O functions with dummy ones. It cannot
> +	 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
> +	 */
> +	qede_assign_rxtx_handlers(eth_dev, true);
> +

Why need to assign dummy handlers on stop(), what happens if you keep them as
they are as many PMD does?

>  	/* Disable vport */
>  	if (qede_activate_vport(eth_dev, false))
>  		return;
> @@ -2316,11 +2329,6 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
>  			dev->data->min_rx_buf_size);
>  		return -EINVAL;
>  	}
> -	/* Temporarily replace I/O functions with dummy ones. It cannot
> -	 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
> -	 */
> -	dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
> -	dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
>  	if (dev->data->dev_started) {
>  		dev->data->dev_started = 0;
>  		qede_dev_stop(dev);
> @@ -2359,15 +2367,6 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
>  	/* update max frame size */
>  	dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
>  
> -	/* Reassign back */
> -	qede_assign_rxtx_handlers(dev);
> -	if (ECORE_IS_CMT(edev)) {
> -		dev->rx_pkt_burst = qede_recv_pkts_cmt;
> -		dev->tx_pkt_burst = qede_xmit_pkts_cmt;
> -	} else {
> -		dev->rx_pkt_burst = qede_recv_pkts;
> -		dev->tx_pkt_burst = qede_xmit_pkts;
> -	}
>  	return 0;
>  }
>  
> @@ -2570,7 +2569,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
>  	strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
>  		QEDE_PMD_DRV_VER_STR_SIZE);
>  
> -	qede_assign_rxtx_handlers(eth_dev);
> +	qede_assign_rxtx_handlers(eth_dev, true);
>  	eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
>  
>  	/* For CMT mode device do periodic polling for slowpath events.
>
Rasesh Mody May 6, 2020, 2:34 a.m. UTC | #2
Hi Ferruh,

>From: Ferruh Yigit <ferruh.yigit@intel.com>
>Sent: Tuesday, May 05, 2020 2:01 AM
>
>On 5/5/2020 4:09 AM, Rasesh Mody wrote:
>> Fix to assign dummy Rx/Tx handlers in dev_stop.
>> For MTU set, assignment of the appropriate Rx/Tx handlers will be
>> handled by dev_start/dev_stop.
>>
>> Fixes: 81f8804992c9 ("net/qede: enhance Rx CPU utilization")
>> Fixes: 8de0c4201926 ("net/qede: fix odd number of queues usage in 100G
>> mode")
>> Cc: stable@dpdk.org
>>
>> Signed-off-by: Rasesh Mody <rmody@marvell.com>
>> Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
>> ---
>>  drivers/net/qede/qede_ethdev.c | 33 ++++++++++++++++-----------------
>>  1 file changed, 16 insertions(+), 17 deletions(-)
>>
>> diff --git a/drivers/net/qede/qede_ethdev.c
>> b/drivers/net/qede/qede_ethdev.c index e71fa1e6a..726daa3e3 100644
>> --- a/drivers/net/qede/qede_ethdev.c
>> +++ b/drivers/net/qede/qede_ethdev.c
>> @@ -320,13 +320,19 @@ qede_interrupt_handler(void *param)  }
>>
>>  static void
>> -qede_assign_rxtx_handlers(struct rte_eth_dev *dev)
>> +qede_assign_rxtx_handlers(struct rte_eth_dev *dev, bool is_dummy)
>>  {
>>  	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
>>  	struct qede_dev *qdev = dev->data->dev_private;
>>  	struct ecore_dev *edev = &qdev->edev;
>>  	bool use_tx_offload = false;
>>
>> +	if (is_dummy) {
>> +		dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
>> +		dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
>> +		return;
>> +	}
>> +
>>  	if (ECORE_IS_CMT(edev)) {
>>  		dev->rx_pkt_burst = qede_recv_pkts_cmt;
>>  		dev->tx_pkt_burst = qede_xmit_pkts_cmt; @@ -1150,7
>+1156,9 @@
>> static int qede_dev_start(struct rte_eth_dev *eth_dev)
>>  	/* Start/resume traffic */
>>  	qede_fastpath_start(edev);
>>
>> -	qede_assign_rxtx_handlers(eth_dev);
>> +	/* Assign I/O handlers */
>> +	qede_assign_rxtx_handlers(eth_dev, false);
>> +
>>  	DP_INFO(edev, "Device started\n");
>>
>>  	return 0;
>> @@ -1166,6 +1174,11 @@ static void qede_dev_stop(struct rte_eth_dev
>> *eth_dev)
>>
>>  	PMD_INIT_FUNC_TRACE(edev);
>>
>> +	/* Replace I/O functions with dummy ones. It cannot
>> +	 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
>> +	 */
>> +	qede_assign_rxtx_handlers(eth_dev, true);
>> +
>
>Why need to assign dummy handlers on stop(), what happens if you keep
>them as they are as many PMD does?

It helps in preventing crash when queues are still in use.

Thanks!
-Rasesh

>
>>  	/* Disable vport */
>>  	if (qede_activate_vport(eth_dev, false))
>>  		return;
>> @@ -2316,11 +2329,6 @@ static int qede_set_mtu(struct rte_eth_dev
>*dev, uint16_t mtu)
>>  			dev->data->min_rx_buf_size);
>>  		return -EINVAL;
>>  	}
>> -	/* Temporarily replace I/O functions with dummy ones. It cannot
>> -	 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
>> -	 */
>> -	dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
>> -	dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
>>  	if (dev->data->dev_started) {
>>  		dev->data->dev_started = 0;
>>  		qede_dev_stop(dev);
>> @@ -2359,15 +2367,6 @@ static int qede_set_mtu(struct rte_eth_dev
>*dev, uint16_t mtu)
>>  	/* update max frame size */
>>  	dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
>>
>> -	/* Reassign back */
>> -	qede_assign_rxtx_handlers(dev);
>> -	if (ECORE_IS_CMT(edev)) {
>> -		dev->rx_pkt_burst = qede_recv_pkts_cmt;
>> -		dev->tx_pkt_burst = qede_xmit_pkts_cmt;
>> -	} else {
>> -		dev->rx_pkt_burst = qede_recv_pkts;
>> -		dev->tx_pkt_burst = qede_xmit_pkts;
>> -	}
>>  	return 0;
>>  }
>>
>> @@ -2570,7 +2569,7 @@ static int qede_common_dev_init(struct
>rte_eth_dev *eth_dev, bool is_vf)
>>  	strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
>>  		QEDE_PMD_DRV_VER_STR_SIZE);
>>
>> -	qede_assign_rxtx_handlers(eth_dev);
>> +	qede_assign_rxtx_handlers(eth_dev, true);
>>  	eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
>>
>>  	/* For CMT mode device do periodic polling for slowpath events.
>>
diff mbox series

Patch

diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index e71fa1e6a..726daa3e3 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -320,13 +320,19 @@  qede_interrupt_handler(void *param)
 }
 
 static void
-qede_assign_rxtx_handlers(struct rte_eth_dev *dev)
+qede_assign_rxtx_handlers(struct rte_eth_dev *dev, bool is_dummy)
 {
 	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
 	struct qede_dev *qdev = dev->data->dev_private;
 	struct ecore_dev *edev = &qdev->edev;
 	bool use_tx_offload = false;
 
+	if (is_dummy) {
+		dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
+		dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
+		return;
+	}
+
 	if (ECORE_IS_CMT(edev)) {
 		dev->rx_pkt_burst = qede_recv_pkts_cmt;
 		dev->tx_pkt_burst = qede_xmit_pkts_cmt;
@@ -1150,7 +1156,9 @@  static int qede_dev_start(struct rte_eth_dev *eth_dev)
 	/* Start/resume traffic */
 	qede_fastpath_start(edev);
 
-	qede_assign_rxtx_handlers(eth_dev);
+	/* Assign I/O handlers */
+	qede_assign_rxtx_handlers(eth_dev, false);
+
 	DP_INFO(edev, "Device started\n");
 
 	return 0;
@@ -1166,6 +1174,11 @@  static void qede_dev_stop(struct rte_eth_dev *eth_dev)
 
 	PMD_INIT_FUNC_TRACE(edev);
 
+	/* Replace I/O functions with dummy ones. It cannot
+	 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
+	 */
+	qede_assign_rxtx_handlers(eth_dev, true);
+
 	/* Disable vport */
 	if (qede_activate_vport(eth_dev, false))
 		return;
@@ -2316,11 +2329,6 @@  static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 			dev->data->min_rx_buf_size);
 		return -EINVAL;
 	}
-	/* Temporarily replace I/O functions with dummy ones. It cannot
-	 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
-	 */
-	dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
-	dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
 	if (dev->data->dev_started) {
 		dev->data->dev_started = 0;
 		qede_dev_stop(dev);
@@ -2359,15 +2367,6 @@  static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 	/* update max frame size */
 	dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
 
-	/* Reassign back */
-	qede_assign_rxtx_handlers(dev);
-	if (ECORE_IS_CMT(edev)) {
-		dev->rx_pkt_burst = qede_recv_pkts_cmt;
-		dev->tx_pkt_burst = qede_xmit_pkts_cmt;
-	} else {
-		dev->rx_pkt_burst = qede_recv_pkts;
-		dev->tx_pkt_burst = qede_xmit_pkts;
-	}
 	return 0;
 }
 
@@ -2570,7 +2569,7 @@  static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
 	strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
 		QEDE_PMD_DRV_VER_STR_SIZE);
 
-	qede_assign_rxtx_handlers(eth_dev);
+	qede_assign_rxtx_handlers(eth_dev, true);
 	eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
 
 	/* For CMT mode device do periodic polling for slowpath events.