[v1,2/2] net/axgbe: move offloads to Rx/Tx queue setup

Message ID 20221221025202.31733-2-jesna.k.e@amd.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series [v1,1/2] net/axgbe: add multi-process support |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/Intel-compilation success Compilation OK
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/intel-Testing success Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/github-robot: build fail github build: failed
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-aarch64-unit-testing fail Testing issues
ci/iol-x86_64-compile-testing success Testing PASS
ci/iol-aarch64-compile-testing fail Testing issues
ci/iol-testing fail Testing issues
ci/iol-x86_64-unit-testing fail Testing issues

Commit Message

Jesna K E Dec. 21, 2022, 2:52 a.m. UTC
  For Multiprocess dpdk applications retrieving the offload parameter
for receive packet directly from struct eth_dev
giving segmentation fault since rxmode.offloads from eth_dev was
null duirng recv_pkts. So retrieved offload from rx/tx_queue_setup()

Signed-off-by: Jesna K E <jesna.k.e@amd.com>
---
 drivers/net/axgbe/axgbe_rxtx.c | 10 ++++------
 drivers/net/axgbe/axgbe_rxtx.h |  4 ++--
 2 files changed, 6 insertions(+), 8 deletions(-)
  

Comments

Ferruh Yigit Dec. 21, 2022, 4:27 p.m. UTC | #1
On 12/21/2022 2:52 AM, Jesna K E wrote:
> For Multiprocess dpdk applications retrieving the offload parameter
> for receive packet directly from struct eth_dev
> giving segmentation fault since rxmode.offloads from eth_dev was
> null duirng recv_pkts. So retrieved offload from rx/tx_queue_setup()
> 

I don't see any issue to store offload values in queue_setup functions,
and it can be more performant to access if from queue struct,

but I am not clear why it cause segfault when it is in data path, when
burst functions start to run device should be configured alredy, finding
root case helps us to figure out if anything is missing, hence:

Can you please give more details on why/how seg fault happens?

> Signed-off-by: Jesna K E <jesna.k.e@amd.com>
> ---
>  drivers/net/axgbe/axgbe_rxtx.c | 10 ++++------
>  drivers/net/axgbe/axgbe_rxtx.h |  4 ++--
>  2 files changed, 6 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
> index 9b283bd9d0..5dc4fe7369 100644
> --- a/drivers/net/axgbe/axgbe_rxtx.c
> +++ b/drivers/net/axgbe/axgbe_rxtx.c
> @@ -86,6 +86,7 @@ int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
>  	if (rxq->free_thresh >  rxq->nb_desc)
>  		rxq->free_thresh = rxq->nb_desc >> 3;
>  
> +	rxq->offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
>  	/* Allocate RX ring hardware descriptors */
>  	size = rxq->nb_desc * sizeof(union axgbe_rx_desc);
>  	dma = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, 128,
> @@ -211,7 +212,6 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
>  	unsigned int err, etlt;
>  	uint32_t error_status;
>  	uint16_t idx, pidx, pkt_len;
> -	uint64_t offloads;
>  
>  	idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
>  	while (nb_rx < nb_pkts) {
> @@ -278,14 +278,13 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
>  			mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
>  		etlt = AXGMAC_GET_BITS_LE(desc->write.desc3,
>  				RX_NORMAL_DESC3, ETLT);
> -		offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads;
>  		if (!err || !etlt) {
>  			if (etlt == RX_CVLAN_TAG_PRESENT) {
>  				mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
>  				mbuf->vlan_tci =
>  					AXGMAC_GET_BITS_LE(desc->write.desc0,
>  							RX_NORMAL_DESC0, OVT);
> -				if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
> +				if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
>  					mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
>  				else
>  					mbuf->ol_flags &= ~RTE_MBUF_F_RX_VLAN_STRIPPED;
> @@ -345,7 +344,6 @@ uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
>  	unsigned int err = 0, etlt;
>  	uint32_t error_status = 0;
>  	uint16_t idx, pidx, data_len = 0, pkt_len = 0;
> -	uint64_t offloads;
>  	bool eop = 0;
>  
>  	idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
> @@ -441,14 +439,13 @@ uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
>  				rte_le_to_cpu_32(desc->write.desc1);
>  		etlt = AXGMAC_GET_BITS_LE(desc->write.desc3,
>  				RX_NORMAL_DESC3, ETLT);
> -		offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads;
>  		if (!err || !etlt) {
>  			if (etlt == RX_CVLAN_TAG_PRESENT) {
>  				first_seg->ol_flags |= RTE_MBUF_F_RX_VLAN;
>  				first_seg->vlan_tci =
>  					AXGMAC_GET_BITS_LE(desc->write.desc0,
>  							RX_NORMAL_DESC0, OVT);
> -				if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
> +				if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
>  					first_seg->ol_flags |=
>  						RTE_MBUF_F_RX_VLAN_STRIPPED;
>  				else
> @@ -606,6 +603,7 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
>  	txq->desc = tz->addr;
>  	txq->queue_id = queue_idx;
>  	txq->port_id = dev->data->port_id;
> +	txq->offloads = offloads;
>  	txq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE +
>  		(DMA_CH_INC * txq->queue_id));
>  	txq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)txq->dma_regs +
> diff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h
> index d5660f5c4b..56e7c7aadd 100644
> --- a/drivers/net/axgbe/axgbe_rxtx.h
> +++ b/drivers/net/axgbe/axgbe_rxtx.h
> @@ -100,7 +100,7 @@ struct axgbe_rx_queue {
>  	uint64_t rx_mbuf_alloc_failed;
>  	/* Number of mbufs allocated from pool*/
>  	uint64_t mbuf_alloc;
> -
> +	uint64_t offloads; /**< Rx offloads with RTE_ETH_RX_OFFLOAD_**/
>  } __rte_cache_aligned;
>  
>  /*Tx descriptor format */
> @@ -149,7 +149,7 @@ struct axgbe_tx_queue {
>  	uint64_t pkts;
>  	uint64_t bytes;
>  	uint64_t errors;
> -
> +	uint64_t offloads; /**< Tx offload flags of RTE_ETH_TX_OFFLOAD_* */
>  } __rte_cache_aligned;
>  
>  /*Queue related APIs */
  

Patch

diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
index 9b283bd9d0..5dc4fe7369 100644
--- a/drivers/net/axgbe/axgbe_rxtx.c
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -86,6 +86,7 @@  int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	if (rxq->free_thresh >  rxq->nb_desc)
 		rxq->free_thresh = rxq->nb_desc >> 3;
 
+	rxq->offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 	/* Allocate RX ring hardware descriptors */
 	size = rxq->nb_desc * sizeof(union axgbe_rx_desc);
 	dma = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, 128,
@@ -211,7 +212,6 @@  axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	unsigned int err, etlt;
 	uint32_t error_status;
 	uint16_t idx, pidx, pkt_len;
-	uint64_t offloads;
 
 	idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
 	while (nb_rx < nb_pkts) {
@@ -278,14 +278,13 @@  axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 			mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
 		etlt = AXGMAC_GET_BITS_LE(desc->write.desc3,
 				RX_NORMAL_DESC3, ETLT);
-		offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads;
 		if (!err || !etlt) {
 			if (etlt == RX_CVLAN_TAG_PRESENT) {
 				mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
 				mbuf->vlan_tci =
 					AXGMAC_GET_BITS_LE(desc->write.desc0,
 							RX_NORMAL_DESC0, OVT);
-				if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+				if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 					mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
 				else
 					mbuf->ol_flags &= ~RTE_MBUF_F_RX_VLAN_STRIPPED;
@@ -345,7 +344,6 @@  uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
 	unsigned int err = 0, etlt;
 	uint32_t error_status = 0;
 	uint16_t idx, pidx, data_len = 0, pkt_len = 0;
-	uint64_t offloads;
 	bool eop = 0;
 
 	idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
@@ -441,14 +439,13 @@  uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
 				rte_le_to_cpu_32(desc->write.desc1);
 		etlt = AXGMAC_GET_BITS_LE(desc->write.desc3,
 				RX_NORMAL_DESC3, ETLT);
-		offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads;
 		if (!err || !etlt) {
 			if (etlt == RX_CVLAN_TAG_PRESENT) {
 				first_seg->ol_flags |= RTE_MBUF_F_RX_VLAN;
 				first_seg->vlan_tci =
 					AXGMAC_GET_BITS_LE(desc->write.desc0,
 							RX_NORMAL_DESC0, OVT);
-				if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+				if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
 					first_seg->ol_flags |=
 						RTE_MBUF_F_RX_VLAN_STRIPPED;
 				else
@@ -606,6 +603,7 @@  int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	txq->desc = tz->addr;
 	txq->queue_id = queue_idx;
 	txq->port_id = dev->data->port_id;
+	txq->offloads = offloads;
 	txq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE +
 		(DMA_CH_INC * txq->queue_id));
 	txq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)txq->dma_regs +
diff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h
index d5660f5c4b..56e7c7aadd 100644
--- a/drivers/net/axgbe/axgbe_rxtx.h
+++ b/drivers/net/axgbe/axgbe_rxtx.h
@@ -100,7 +100,7 @@  struct axgbe_rx_queue {
 	uint64_t rx_mbuf_alloc_failed;
 	/* Number of mbufs allocated from pool*/
 	uint64_t mbuf_alloc;
-
+	uint64_t offloads; /**< Rx offloads with RTE_ETH_RX_OFFLOAD_**/
 } __rte_cache_aligned;
 
 /*Tx descriptor format */
@@ -149,7 +149,7 @@  struct axgbe_tx_queue {
 	uint64_t pkts;
 	uint64_t bytes;
 	uint64_t errors;
-
+	uint64_t offloads; /**< Tx offload flags of RTE_ETH_TX_OFFLOAD_* */
 } __rte_cache_aligned;
 
 /*Queue related APIs */