[v4,2/3] net/af_xdp: use correct fill queue addresses

Message ID 20200213084914.54755-3-ciara.loftus@intel.com (mailing list archive)
State Accepted, archived
Delegated to: Ferruh Yigit
Headers
Series AF_XDP PMD Fixes |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail apply issues

Commit Message

Loftus, Ciara Feb. 13, 2020, 8:49 a.m. UTC
  The fill queue addresses should start at the beginning of the mempool
object instead of the beginning of the mbuf. This is because the umem
frame headroom includes the mp hdrobj size. Starting at this point
ensures AF_XDP doesn't write past the available room in the frame, in
the case of larger packets which are close to the size of the mbuf.

Fixes: d8a210774e1d ("net/af_xdp: support unaligned umem chunks")
Cc: stable@dpdk.org

Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
Reviewed-by: Xiaolong Ye <xiaolong.ye@intel.com>
---
 drivers/net/af_xdp/rte_eth_af_xdp.c | 25 +++++++++++++++++--------
 1 file changed, 17 insertions(+), 8 deletions(-)
  

Patch

diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
index 111ab000cc..a0edfc3cd3 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -171,7 +171,8 @@  reserve_fill_queue_zc(struct xsk_umem_info *umem, uint16_t reserve_size,
 		uint64_t addr;
 
 		fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
-		addr = (uint64_t)bufs[i] - (uint64_t)umem->buffer;
+		addr = (uint64_t)bufs[i] - (uint64_t)umem->buffer -
+				umem->mb_pool->header_size;
 		*fq_addr = addr;
 	}
 
@@ -270,8 +271,11 @@  af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 		addr = xsk_umem__extract_addr(addr);
 
 		bufs[i] = (struct rte_mbuf *)
-				xsk_umem__get_data(umem->buffer, addr);
-		bufs[i]->data_off = offset - sizeof(struct rte_mbuf);
+				xsk_umem__get_data(umem->buffer, addr +
+					umem->mb_pool->header_size);
+		bufs[i]->data_off = offset - sizeof(struct rte_mbuf) -
+			rte_pktmbuf_priv_size(umem->mb_pool) -
+			umem->mb_pool->header_size;
 
 		rte_pktmbuf_pkt_len(bufs[i]) = len;
 		rte_pktmbuf_data_len(bufs[i]) = len;
@@ -384,7 +388,8 @@  pull_umem_cq(struct xsk_umem_info *umem, int size)
 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
 		addr = xsk_umem__extract_addr(addr);
 		rte_pktmbuf_free((struct rte_mbuf *)
-					xsk_umem__get_data(umem->buffer, addr));
+					xsk_umem__get_data(umem->buffer,
+					addr + umem->mb_pool->header_size));
 #else
 		rte_ring_enqueue(umem->buf_ring, (void *)addr);
 #endif
@@ -442,9 +447,11 @@  af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 			}
 			desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
 			desc->len = mbuf->pkt_len;
-			addr = (uint64_t)mbuf - (uint64_t)umem->buffer;
+			addr = (uint64_t)mbuf - (uint64_t)umem->buffer -
+					umem->mb_pool->header_size;
 			offset = rte_pktmbuf_mtod(mbuf, uint64_t) -
-					(uint64_t)mbuf;
+					(uint64_t)mbuf +
+					umem->mb_pool->header_size;
 			offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
 			desc->addr = addr | offset;
 			count++;
@@ -465,9 +472,11 @@  af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 			desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
 			desc->len = mbuf->pkt_len;
 
-			addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer;
+			addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer -
+					umem->mb_pool->header_size;
 			offset = rte_pktmbuf_mtod(local_mbuf, uint64_t) -
-					(uint64_t)local_mbuf;
+					(uint64_t)local_mbuf +
+					umem->mb_pool->header_size;
 			pkt = xsk_umem__get_data(umem->buffer, addr + offset);
 			offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
 			desc->addr = addr | offset;