[16/35] net/ionic: precalculate segment lengths on receive side

Message ID 20221007174336.54354-17-andrew.boyer@amd.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series net/ionic: updates for 22.11 release |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Boyer, Andrew Oct. 7, 2022, 5:43 p.m. UTC
  The first (header) segment includes the standard headroom.
Subsequent segments do not.

Store the fragment counts in the queue structure.

Precalculating improves performance by reducing
how much work must be done in the hot path.

Signed-off-by: Andrew Boyer <andrew.boyer@amd.com>
---
 drivers/net/ionic/ionic_dev.h  |  1 +
 drivers/net/ionic/ionic_lif.c  | 36 ++++++++++++++++++++++++--
 drivers/net/ionic/ionic_lif.h  |  4 ++-
 drivers/net/ionic/ionic_rxtx.c | 46 ++++++++++++++--------------------
 4 files changed, 57 insertions(+), 30 deletions(-)
  

Patch

diff --git a/drivers/net/ionic/ionic_dev.h b/drivers/net/ionic/ionic_dev.h
index f72c05342c..55a9485bff 100644
--- a/drivers/net/ionic/ionic_dev.h
+++ b/drivers/net/ionic/ionic_dev.h
@@ -137,6 +137,7 @@  struct ionic_dev {
 
 struct ionic_queue {
 	uint16_t num_descs;
+	uint16_t num_segs;
 	uint16_t head_idx;
 	uint16_t tail_idx;
 	uint16_t size_mask;
diff --git a/drivers/net/ionic/ionic_lif.c b/drivers/net/ionic/ionic_lif.c
index 578e36e60c..cc64aedaa1 100644
--- a/drivers/net/ionic/ionic_lif.c
+++ b/drivers/net/ionic/ionic_lif.c
@@ -566,6 +566,7 @@  ionic_qcq_alloc(struct ionic_lif *lif,
 		const char *type_name,
 		uint16_t flags,
 		uint16_t num_descs,
+		uint16_t num_segs,
 		uint16_t desc_size,
 		uint16_t cq_desc_size,
 		uint16_t sg_desc_size,
@@ -616,6 +617,7 @@  ionic_qcq_alloc(struct ionic_lif *lif,
 		goto err_out_free_qcq;
 	}
 
+	new->q.num_segs = num_segs;
 	new->q.type = type;
 
 	err = ionic_q_init(&new->q, index, num_descs);
@@ -698,14 +700,38 @@  ionic_qcq_free(struct ionic_qcq *qcq)
 
 int
 ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id, uint32_t index,
-		uint16_t nrxq_descs, struct ionic_rx_qcq **rxq_out)
+		uint16_t nrxq_descs, struct rte_mempool *mb_pool,
+		struct ionic_rx_qcq **rxq_out)
 {
 	struct ionic_rx_qcq *rxq;
-	uint16_t flags;
+	uint16_t flags, seg_size, hdr_seg_size, max_segs, max_segs_fw;
+	uint32_t max_mtu;
 	int err;
 
 	flags = IONIC_QCQ_F_SG;
 
+	seg_size = rte_pktmbuf_data_room_size(mb_pool);
+
+	/* The first mbuf needs to leave headroom */
+	hdr_seg_size = seg_size - RTE_PKTMBUF_HEADROOM;
+
+	max_mtu = rte_le_to_cpu_32(lif->adapter->ident.lif.eth.max_mtu);
+
+	max_segs_fw = IONIC_RX_MAX_SG_ELEMS + 1;
+
+	/*
+	 * Calculate how many fragment pointers might be stored in queue.
+	 */
+	max_segs = 1 + (max_mtu + RTE_PKTMBUF_HEADROOM - 1) / seg_size;
+
+	IONIC_PRINT(DEBUG, "rxq %u frame_size %u seg_size %u max_segs %u",
+		index, lif->frame_size, seg_size, max_segs);
+	if (max_segs > max_segs_fw) {
+		IONIC_PRINT(ERR, "Rx mbuf size insufficient (%d > %d avail)",
+			max_segs, max_segs_fw);
+		return -EINVAL;
+	}
+
 	err = ionic_qcq_alloc(lif,
 		IONIC_QTYPE_RXQ,
 		sizeof(struct ionic_rx_qcq),
@@ -714,6 +740,7 @@  ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id, uint32_t index,
 		"rx",
 		flags,
 		nrxq_descs,
+		max_segs,
 		sizeof(struct ionic_rxq_desc),
 		sizeof(struct ionic_rxq_comp),
 		sizeof(struct ionic_rxq_sg_desc),
@@ -722,6 +749,8 @@  ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id, uint32_t index,
 		return err;
 
 	rxq->flags = flags;
+	rxq->seg_size = seg_size;
+	rxq->hdr_seg_size = hdr_seg_size;
 
 	lif->rxqcqs[index] = rxq;
 	*rxq_out = rxq;
@@ -749,6 +778,7 @@  ionic_tx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id, uint32_t index,
 		"tx",
 		flags,
 		ntxq_descs,
+		1,
 		sizeof(struct ionic_txq_desc),
 		sizeof(struct ionic_txq_comp),
 		sizeof(struct ionic_txq_sg_desc_v1),
@@ -779,6 +809,7 @@  ionic_admin_qcq_alloc(struct ionic_lif *lif)
 		"admin",
 		flags,
 		IONIC_ADMINQ_LENGTH,
+		1,
 		sizeof(struct ionic_admin_cmd),
 		sizeof(struct ionic_admin_comp),
 		0,
@@ -805,6 +836,7 @@  ionic_notify_qcq_alloc(struct ionic_lif *lif)
 		"notify",
 		flags,
 		IONIC_NOTIFYQ_LENGTH,
+		1,
 		sizeof(struct ionic_notifyq_cmd),
 		sizeof(union ionic_notifyq_comp),
 		0,
diff --git a/drivers/net/ionic/ionic_lif.h b/drivers/net/ionic/ionic_lif.h
index b3f0ce720b..237fd0a2ef 100644
--- a/drivers/net/ionic/ionic_lif.h
+++ b/drivers/net/ionic/ionic_lif.h
@@ -81,6 +81,8 @@  struct ionic_rx_qcq {
 	/* cacheline2 */
 	struct rte_mempool *mb_pool;
 	uint16_t frame_size;	/* Based on configured MTU */
+	uint16_t hdr_seg_size;	/* Length of first segment of RX chain */
+	uint16_t seg_size;	/* Length of all subsequent segments */
 	uint16_t flags;
 
 	/* cacheline3 (inside stats) */
@@ -199,7 +201,7 @@  int ionic_dev_allmulticast_enable(struct rte_eth_dev *dev);
 int ionic_dev_allmulticast_disable(struct rte_eth_dev *dev);
 
 int ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id,
-	uint32_t index, uint16_t nrxq_descs,
+	uint32_t index, uint16_t nrxq_descs, struct rte_mempool *mp,
 	struct ionic_rx_qcq **qcq_out);
 int ionic_tx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id,
 	uint32_t index, uint16_t ntxq_descs,
diff --git a/drivers/net/ionic/ionic_rxtx.c b/drivers/net/ionic/ionic_rxtx.c
index 17ffaf6aac..0f251eca13 100644
--- a/drivers/net/ionic/ionic_rxtx.c
+++ b/drivers/net/ionic/ionic_rxtx.c
@@ -732,7 +732,7 @@  ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 	eth_dev->data->rx_queue_state[rx_queue_id] =
 		RTE_ETH_QUEUE_STATE_STOPPED;
 
-	err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc,
+	err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc, mp,
 			&rxq);
 	if (err) {
 		IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id);
@@ -773,9 +773,6 @@  ionic_rx_clean(struct ionic_rx_qcq *rxq,
 	uint64_t pkt_flags = 0;
 	uint32_t pkt_type;
 	struct ionic_rx_stats *stats = &rxq->stats;
-	uint32_t buf_size = (uint16_t)
-		(rte_pktmbuf_data_room_size(rxq->mb_pool) -
-		RTE_PKTMBUF_HEADROOM);
 	uint32_t left;
 	void **info;
 
@@ -809,14 +806,12 @@  ionic_rx_clean(struct ionic_rx_qcq *rxq,
 	rxm->pkt_len = cq_desc->len;
 	rxm->port = rxq->qcq.lif->port_id;
 
-	left = cq_desc->len;
-
-	rxm->data_len = RTE_MIN(buf_size, left);
-	left -= rxm->data_len;
+	rxm->data_len = RTE_MIN(rxq->hdr_seg_size, cq_desc->len);
+	left = cq_desc->len - rxm->data_len;
 
 	rxm_seg = rxm->next;
 	while (rxm_seg && left) {
-		rxm_seg->data_len = RTE_MIN(buf_size, left);
+		rxm_seg->data_len = RTE_MIN(rxq->seg_size, left);
 		left -= rxm_seg->data_len;
 
 		rxm_seg = rxm_seg->next;
@@ -926,10 +921,7 @@  ionic_rx_fill(struct ionic_rx_qcq *rxq)
 	struct ionic_rxq_sg_elem *elem;
 	void **info;
 	rte_iova_t dma_addr;
-	uint32_t i, j, nsegs, buf_size, size;
-
-	buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
-		RTE_PKTMBUF_HEADROOM);
+	uint32_t i, j;
 
 	/* Initialize software ring entries */
 	for (i = ionic_q_space_avail(q); i; i--) {
@@ -943,21 +935,18 @@  ionic_rx_fill(struct ionic_rx_qcq *rxq)
 
 		info = IONIC_INFO_PTR(q, q->head_idx);
 
-		nsegs = (rxq->frame_size + buf_size - 1) / buf_size;
-
 		desc = &desc_base[q->head_idx];
 		dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(rxm));
 		desc->addr = dma_addr;
-		desc->len = buf_size;
-		size = buf_size;
-		desc->opcode = (nsegs > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
+		desc->len = rxq->hdr_seg_size;
+		desc->opcode = (q->num_segs > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
 			IONIC_RXQ_DESC_OPCODE_SIMPLE;
 		rxm->next = NULL;
 
 		prev_rxm_seg = rxm;
 		sg_desc = &sg_desc_base[q->head_idx];
 		elem = sg_desc->elems;
-		for (j = 0; j < nsegs - 1 && j < IONIC_RX_MAX_SG_ELEMS; j++) {
+		for (j = 0; j < q->num_segs - 1u; j++) {
 			struct rte_mbuf *rxm_seg;
 			rte_iova_t data_iova;
 
@@ -967,21 +956,18 @@  ionic_rx_fill(struct ionic_rx_qcq *rxq)
 				return -ENOMEM;
 			}
 
+			rxm_seg->data_off = 0;
 			data_iova = rte_mbuf_data_iova(rxm_seg);
 			dma_addr = rte_cpu_to_le_64(data_iova);
 			elem->addr = dma_addr;
-			elem->len = buf_size;
-			size += buf_size;
+			elem->len = rxq->seg_size;
 			elem++;
+
 			rxm_seg->next = NULL;
 			prev_rxm_seg->next = rxm_seg;
 			prev_rxm_seg = rxm_seg;
 		}
 
-		if (size < rxq->frame_size)
-			IONIC_PRINT(ERR, "Rx SG size is not sufficient (%d < %d)",
-				size, rxq->frame_size);
-
 		info[0] = rxm;
 
 		q->head_idx = Q_NEXT_TO_POST(q, 1);
@@ -1000,6 +986,7 @@  ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
 {
 	uint8_t *rx_queue_state = eth_dev->data->rx_queue_state;
 	struct ionic_rx_qcq *rxq;
+	struct ionic_queue *q;
 	int err;
 
 	if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) {
@@ -1009,11 +996,16 @@  ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
 	}
 
 	rxq = eth_dev->data->rx_queues[rx_queue_id];
+	q = &rxq->qcq.q;
 
 	rxq->frame_size = rxq->qcq.lif->frame_size - RTE_ETHER_CRC_LEN;
 
-	IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs, size %u",
-		rx_queue_id, rxq->qcq.q.num_descs, rxq->frame_size);
+	/* Recalculate segment count based on MTU */
+	q->num_segs = 1 +
+		(rxq->frame_size + RTE_PKTMBUF_HEADROOM - 1) / rxq->seg_size;
+
+	IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs, size %u segs %u",
+		rx_queue_id, q->num_descs, rxq->frame_size, q->num_segs);
 
 	err = ionic_lif_rxq_init(rxq);
 	if (err)