[v8,03/21] net/cpfl: add Rx queue setup

Message ID 20230302103527.931071-4-mingxia.liu@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series add support for cpfl PMD in DPDK |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Liu, Mingxia March 2, 2023, 10:35 a.m. UTC
  Add support for rx_queue_setup ops.

There are two queue modes supported, single queue mode and split
queue mode for Rx queue.

For the single queue model, the descriptor RX queue is used by SW
to post buffer descriptors to HW, and it's also used by HW to post
completed descriptors to SW.

For the split queue model, "RX buffer queues" are used to pass
descriptor buffers from SW to HW, while RX queues are used only to
pass the descriptor completions from HW to SW.

Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c |  11 ++
 drivers/net/cpfl/cpfl_rxtx.c   | 232 +++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_rxtx.h   |   6 +
 3 files changed, 249 insertions(+)
  

Patch

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index b40f373fb9..99fd86d6d0 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -99,12 +99,22 @@  cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		.tx_rs_thresh = CPFL_DEFAULT_TX_RS_THRESH,
 	};
 
+	dev_info->default_rxconf = (struct rte_eth_rxconf) {
+		.rx_free_thresh = CPFL_DEFAULT_RX_FREE_THRESH,
+	};
+
 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
 		.nb_max = CPFL_MAX_RING_DESC,
 		.nb_min = CPFL_MIN_RING_DESC,
 		.nb_align = CPFL_ALIGN_RING_DESC,
 	};
 
+	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = CPFL_MAX_RING_DESC,
+		.nb_min = CPFL_MIN_RING_DESC,
+		.nb_align = CPFL_ALIGN_RING_DESC,
+	};
+
 	return 0;
 }
 
@@ -191,6 +201,7 @@  cpfl_dev_close(struct rte_eth_dev *dev)
 static const struct eth_dev_ops cpfl_eth_dev_ops = {
 	.dev_configure			= cpfl_dev_configure,
 	.dev_close			= cpfl_dev_close,
+	.rx_queue_setup			= cpfl_rx_queue_setup,
 	.tx_queue_setup			= cpfl_tx_queue_setup,
 	.dev_infos_get			= cpfl_dev_info_get,
 	.link_update			= cpfl_dev_link_update,
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 737d069ec2..930d725a4a 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -9,6 +9,25 @@ 
 #include "cpfl_ethdev.h"
 #include "cpfl_rxtx.h"
 
+static uint64_t
+cpfl_rx_offload_convert(uint64_t offload)
+{
+	uint64_t ol = 0;
+
+	if ((offload & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) != 0)
+		ol |= IDPF_RX_OFFLOAD_IPV4_CKSUM;
+	if ((offload & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) != 0)
+		ol |= IDPF_RX_OFFLOAD_UDP_CKSUM;
+	if ((offload & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) != 0)
+		ol |= IDPF_RX_OFFLOAD_TCP_CKSUM;
+	if ((offload & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
+		ol |= IDPF_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+	if ((offload & RTE_ETH_RX_OFFLOAD_TIMESTAMP) != 0)
+		ol |= IDPF_RX_OFFLOAD_TIMESTAMP;
+
+	return ol;
+}
+
 static uint64_t
 cpfl_tx_offload_convert(uint64_t offload)
 {
@@ -94,6 +113,219 @@  cpfl_dma_zone_release(const struct rte_memzone *mz)
 	rte_memzone_free(mz);
 }
 
+static int
+cpfl_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *rxq,
+			 uint16_t queue_idx, uint16_t rx_free_thresh,
+			 uint16_t nb_desc, unsigned int socket_id,
+			 struct rte_mempool *mp, uint8_t bufq_id)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+	struct idpf_adapter *base = vport->adapter;
+	struct idpf_hw *hw = &base->hw;
+	const struct rte_memzone *mz;
+	struct idpf_rx_queue *bufq;
+	uint16_t len;
+	int ret;
+
+	bufq = rte_zmalloc_socket("cpfl bufq",
+				   sizeof(struct idpf_rx_queue),
+				   RTE_CACHE_LINE_SIZE,
+				   socket_id);
+	if (bufq == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for rx buffer queue.");
+		ret = -ENOMEM;
+		goto err_bufq1_alloc;
+	}
+
+	bufq->mp = mp;
+	bufq->nb_rx_desc = nb_desc;
+	bufq->rx_free_thresh = rx_free_thresh;
+	bufq->queue_id = vport->chunks_info.rx_buf_start_qid + queue_idx;
+	bufq->port_id = dev->data->port_id;
+	bufq->rx_hdr_len = 0;
+	bufq->adapter = base;
+
+	len = rte_pktmbuf_data_room_size(bufq->mp) - RTE_PKTMBUF_HEADROOM;
+	bufq->rx_buf_len = len;
+
+	/* Allocate a little more to support bulk allocate. */
+	len = nb_desc + IDPF_RX_MAX_BURST;
+
+	mz = cpfl_dma_zone_reserve(dev, queue_idx, len,
+				   VIRTCHNL2_QUEUE_TYPE_RX_BUFFER,
+				   socket_id, true);
+	if (mz == NULL) {
+		ret = -ENOMEM;
+		goto err_mz_reserve;
+	}
+
+	bufq->rx_ring_phys_addr = mz->iova;
+	bufq->rx_ring = mz->addr;
+	bufq->mz = mz;
+
+	bufq->sw_ring =
+		rte_zmalloc_socket("cpfl rx bufq sw ring",
+				   sizeof(struct rte_mbuf *) * len,
+				   RTE_CACHE_LINE_SIZE,
+				   socket_id);
+	if (bufq->sw_ring == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
+		ret = -ENOMEM;
+		goto err_sw_ring_alloc;
+	}
+
+	idpf_qc_split_rx_bufq_reset(bufq);
+	bufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start +
+			 queue_idx * vport->chunks_info.rx_buf_qtail_spacing);
+	bufq->q_set = true;
+
+	if (bufq_id == IDPF_RX_SPLIT_BUFQ1_ID) {
+		rxq->bufq1 = bufq;
+	} else if (bufq_id == IDPF_RX_SPLIT_BUFQ2_ID) {
+		rxq->bufq2 = bufq;
+	} else {
+		PMD_INIT_LOG(ERR, "Invalid buffer queue index.");
+		ret = -EINVAL;
+		goto err_bufq_id;
+	}
+
+	return 0;
+
+err_bufq_id:
+	rte_free(bufq->sw_ring);
+err_sw_ring_alloc:
+	cpfl_dma_zone_release(mz);
+err_mz_reserve:
+	rte_free(bufq);
+err_bufq1_alloc:
+	return ret;
+}
+
+static void
+cpfl_rx_split_bufq_release(struct idpf_rx_queue *bufq)
+{
+	rte_free(bufq->sw_ring);
+	cpfl_dma_zone_release(bufq->mz);
+	rte_free(bufq);
+}
+
+int
+cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+		    uint16_t nb_desc, unsigned int socket_id,
+		    const struct rte_eth_rxconf *rx_conf,
+		    struct rte_mempool *mp)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+	struct idpf_adapter *base = vport->adapter;
+	struct idpf_hw *hw = &base->hw;
+	const struct rte_memzone *mz;
+	struct idpf_rx_queue *rxq;
+	uint16_t rx_free_thresh;
+	uint64_t offloads;
+	bool is_splitq;
+	uint16_t len;
+	int ret;
+
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
+	/* Check free threshold */
+	rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
+		CPFL_DEFAULT_RX_FREE_THRESH :
+		rx_conf->rx_free_thresh;
+	if (idpf_qc_rx_thresh_check(nb_desc, rx_free_thresh) != 0)
+		return -EINVAL;
+
+	/* Setup Rx queue */
+	rxq = rte_zmalloc_socket("cpfl rxq",
+				 sizeof(struct idpf_rx_queue),
+				 RTE_CACHE_LINE_SIZE,
+				 socket_id);
+	if (rxq == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
+		ret = -ENOMEM;
+		goto err_rxq_alloc;
+	}
+
+	is_splitq = !!(vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
+
+	rxq->mp = mp;
+	rxq->nb_rx_desc = nb_desc;
+	rxq->rx_free_thresh = rx_free_thresh;
+	rxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx;
+	rxq->port_id = dev->data->port_id;
+	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+	rxq->rx_hdr_len = 0;
+	rxq->adapter = base;
+	rxq->offloads = cpfl_rx_offload_convert(offloads);
+
+	len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
+	rxq->rx_buf_len = len;
+
+	/* Allocate a little more to support bulk allocate. */
+	len = nb_desc + IDPF_RX_MAX_BURST;
+	mz = cpfl_dma_zone_reserve(dev, queue_idx, len, VIRTCHNL2_QUEUE_TYPE_RX,
+				   socket_id, is_splitq);
+	if (mz == NULL) {
+		ret = -ENOMEM;
+		goto err_mz_reserve;
+	}
+	rxq->rx_ring_phys_addr = mz->iova;
+	rxq->rx_ring = mz->addr;
+	rxq->mz = mz;
+
+	if (!is_splitq) {
+		rxq->sw_ring = rte_zmalloc_socket("cpfl rxq sw ring",
+						  sizeof(struct rte_mbuf *) * len,
+						  RTE_CACHE_LINE_SIZE,
+						  socket_id);
+		if (rxq->sw_ring == NULL) {
+			PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
+			ret = -ENOMEM;
+			goto err_sw_ring_alloc;
+		}
+
+		idpf_qc_single_rx_queue_reset(rxq);
+		rxq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_qtail_start +
+				queue_idx * vport->chunks_info.rx_qtail_spacing);
+	} else {
+		idpf_qc_split_rx_descq_reset(rxq);
+
+		/* Setup Rx buffer queues */
+		ret = cpfl_rx_split_bufq_setup(dev, rxq, 2 * queue_idx,
+					       rx_free_thresh, nb_desc,
+					       socket_id, mp, 1);
+		if (ret != 0) {
+			PMD_INIT_LOG(ERR, "Failed to setup buffer queue 1");
+			ret = -EINVAL;
+			goto err_bufq1_setup;
+		}
+
+		ret = cpfl_rx_split_bufq_setup(dev, rxq, 2 * queue_idx + 1,
+					       rx_free_thresh, nb_desc,
+					       socket_id, mp, 2);
+		if (ret != 0) {
+			PMD_INIT_LOG(ERR, "Failed to setup buffer queue 2");
+			ret = -EINVAL;
+			goto err_bufq2_setup;
+		}
+	}
+
+	rxq->q_set = true;
+	dev->data->rx_queues[queue_idx] = rxq;
+
+	return 0;
+
+err_bufq2_setup:
+	cpfl_rx_split_bufq_release(rxq->bufq1);
+err_bufq1_setup:
+err_sw_ring_alloc:
+	cpfl_dma_zone_release(mz);
+err_mz_reserve:
+	rte_free(rxq);
+err_rxq_alloc:
+	return ret;
+}
+
 static int
 cpfl_tx_complq_setup(struct rte_eth_dev *dev, struct idpf_tx_queue *txq,
 		     uint16_t queue_idx, uint16_t nb_desc,
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 232630c5e9..e0221abfa3 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -16,10 +16,16 @@ 
 /* Base address of the HW descriptor ring should be 128B aligned. */
 #define CPFL_RING_BASE_ALIGN	128
 
+#define CPFL_DEFAULT_RX_FREE_THRESH	32
+
 #define CPFL_DEFAULT_TX_RS_THRESH	32
 #define CPFL_DEFAULT_TX_FREE_THRESH	32
 
 int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 			uint16_t nb_desc, unsigned int socket_id,
 			const struct rte_eth_txconf *tx_conf);
+int cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+			uint16_t nb_desc, unsigned int socket_id,
+			const struct rte_eth_rxconf *rx_conf,
+			struct rte_mempool *mp);
 #endif /* _CPFL_RXTX_H_ */