[dpdk-dev,v2,1/3] ixgbe: Use the rte_le_to_cpu_xx()/rte_cpu_to_le_xx() when reading/setting HW ring descriptor fields

Message ID 1425918532-8601-2-git-send-email-vladz@cloudius-systems.com (mailing list archive)
State Superseded, archived
Headers

Commit Message

Vladislav Zolotarov March 9, 2015, 4:28 p.m. UTC
  Fixed the above in ixgbe_rx_alloc_bufs() and in ixgbe_recv_scattered_pkts().

Signed-off-by: Vlad Zolotarov <vladz@cloudius-systems.com>
---
 lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 13 +++++++------
 1 file changed, 7 insertions(+), 6 deletions(-)
  

Comments

Ananyev, Konstantin March 10, 2015, 9:23 a.m. UTC | #1
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Vlad Zolotarov
> Sent: Monday, March 09, 2015 4:29 PM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH v2 1/3] ixgbe: Use the rte_le_to_cpu_xx()/rte_cpu_to_le_xx() when reading/setting HW ring descriptor
> fields
> 
> Fixed the above in ixgbe_rx_alloc_bufs() and in ixgbe_recv_scattered_pkts().
> 

Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>

> Signed-off-by: Vlad Zolotarov <vladz@cloudius-systems.com>
> ---
>  lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 13 +++++++------
>  1 file changed, 7 insertions(+), 6 deletions(-)
> 
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> index 9ecf3e5..b033e04 100644
> --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> @@ -1028,7 +1028,7 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
>  	struct igb_rx_entry *rxep;
>  	struct rte_mbuf *mb;
>  	uint16_t alloc_idx;
> -	uint64_t dma_addr;
> +	__le64 dma_addr;
>  	int diag, i;
> 
>  	/* allocate buffers in bulk directly into the S/W ring */
> @@ -1051,7 +1051,7 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
>  		mb->port = rxq->port_id;
> 
>  		/* populate the descriptors */
> -		dma_addr = (uint64_t)mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
> +		dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
>  		rxdp[i].read.hdr_addr = dma_addr;
>  		rxdp[i].read.pkt_addr = dma_addr;
>  	}
> @@ -1559,13 +1559,14 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
>  		first_seg->ol_flags = pkt_flags;
> 
>  		if (likely(pkt_flags & PKT_RX_RSS_HASH))
> -			first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
> +			first_seg->hash.rss =
> +				    rte_le_to_cpu_32(rxd.wb.lower.hi_dword.rss);
>  		else if (pkt_flags & PKT_RX_FDIR) {
>  			first_seg->hash.fdir.hash =
> -				(uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
> -					   & IXGBE_ATR_HASH_MASK);
> +			    rte_le_to_cpu_16(rxd.wb.lower.hi_dword.csum_ip.csum)
> +					   & IXGBE_ATR_HASH_MASK;
>  			first_seg->hash.fdir.id =
> -				rxd.wb.lower.hi_dword.csum_ip.ip_id;
> +			  rte_le_to_cpu_16(rxd.wb.lower.hi_dword.csum_ip.ip_id);
>  		}
> 
>  		/* Prefetch data of first segment, if configured to do so. */
> --
> 2.1.0
  

Patch

diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index 9ecf3e5..b033e04 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -1028,7 +1028,7 @@  ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
 	struct igb_rx_entry *rxep;
 	struct rte_mbuf *mb;
 	uint16_t alloc_idx;
-	uint64_t dma_addr;
+	__le64 dma_addr;
 	int diag, i;
 
 	/* allocate buffers in bulk directly into the S/W ring */
@@ -1051,7 +1051,7 @@  ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
 		mb->port = rxq->port_id;
 
 		/* populate the descriptors */
-		dma_addr = (uint64_t)mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+		dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
 		rxdp[i].read.hdr_addr = dma_addr;
 		rxdp[i].read.pkt_addr = dma_addr;
 	}
@@ -1559,13 +1559,14 @@  ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->ol_flags = pkt_flags;
 
 		if (likely(pkt_flags & PKT_RX_RSS_HASH))
-			first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
+			first_seg->hash.rss =
+				    rte_le_to_cpu_32(rxd.wb.lower.hi_dword.rss);
 		else if (pkt_flags & PKT_RX_FDIR) {
 			first_seg->hash.fdir.hash =
-				(uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
-					   & IXGBE_ATR_HASH_MASK);
+			    rte_le_to_cpu_16(rxd.wb.lower.hi_dword.csum_ip.csum)
+					   & IXGBE_ATR_HASH_MASK;
 			first_seg->hash.fdir.id =
-				rxd.wb.lower.hi_dword.csum_ip.ip_id;
+			  rte_le_to_cpu_16(rxd.wb.lower.hi_dword.csum_ip.ip_id);
 		}
 
 		/* Prefetch data of first segment, if configured to do so. */