[v1,06/12] net/ice: add Rx queue init in DCF
diff mbox series

Message ID 20200605201737.33766-7-ting.xu@intel.com
State Superseded
Delegated to: xiaolong ye
Headers show
Series
  • enable DCF datapath configuration
Related show

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Ting Xu June 5, 2020, 8:17 p.m. UTC
From: Qi Zhang <qi.z.zhang@intel.com>

Enable Rx queues initialization during device start in DCF.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/ice/ice_dcf.h        |  1 +
 drivers/net/ice/ice_dcf_ethdev.c | 83 ++++++++++++++++++++++++++++++++
 2 files changed, 84 insertions(+)

Patch
diff mbox series

diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index 152266e3c..dcb2a0283 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -53,6 +53,7 @@  struct ice_dcf_hw {
 	uint8_t *rss_lut;
 	uint8_t *rss_key;
 	uint64_t supported_rxdid;
+	uint16_t num_queue_pairs;
 };
 
 int ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw,
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index 62ef71ddb..1f7474dc3 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -42,14 +42,97 @@  ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
 	return 0;
 }
 
+static int
+ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
+{
+	struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
+	struct rte_eth_dev_data *dev_data = dev->data;
+	struct iavf_hw *hw = &dcf_ad->real_hw.avf;
+	uint16_t buf_size, max_pkt_len, len;
+
+	buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
+
+	/* Calculate the maximum packet length allowed */
+	len = rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS;
+	max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len);
+
+	/* Check if the jumbo frame and maximum packet length are set
+	 * correctly.
+	 */
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+		if (max_pkt_len <= RTE_ETHER_MAX_LEN ||
+		    max_pkt_len > ICE_FRAME_SIZE_MAX) {
+			PMD_DRV_LOG(ERR, "maximum packet length must be "
+				    "larger than %u and smaller than %u, "
+				    "as jumbo frame is enabled",
+				    (uint32_t)RTE_ETHER_MAX_LEN,
+				    (uint32_t)ICE_FRAME_SIZE_MAX);
+			return -EINVAL;
+		}
+	} else {
+		if (max_pkt_len < RTE_ETHER_MIN_LEN ||
+		    max_pkt_len > RTE_ETHER_MAX_LEN) {
+			PMD_DRV_LOG(ERR, "maximum packet length must be "
+				    "larger than %u and smaller than %u, "
+				    "as jumbo frame is disabled",
+				    (uint32_t)RTE_ETHER_MIN_LEN,
+				    (uint32_t)RTE_ETHER_MAX_LEN);
+			return -EINVAL;
+		}
+	}
+
+	rxq->max_pkt_len = max_pkt_len;
+	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+	    (rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size) {
+		dev_data->scattered_rx = 1;
+	}
+	rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
+	IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+	IAVF_WRITE_FLUSH(hw);
+
+	return 0;
+}
+
+static int
+ice_dcf_init_rx_queues(struct rte_eth_dev *dev)
+{
+	struct ice_rx_queue **rxq =
+		(struct ice_rx_queue **)dev->data->rx_queues;
+	int i, ret;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		if (!rxq[i] || !rxq[i]->q_set)
+			continue;
+		ret = ice_dcf_init_rxq(dev, rxq[i]);
+		if (ret)
+			return ret;
+	}
+
+	ice_set_rx_function(dev);
+	ice_set_tx_function(dev);
+
+	return 0;
+}
+
 static int
 ice_dcf_dev_start(struct rte_eth_dev *dev)
 {
 	struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
 	struct ice_adapter *ad = &dcf_ad->parent;
+	struct ice_dcf_hw *hw = &dcf_ad->real_hw;
+	int ret;
 
 	ad->pf.adapter_stopped = 0;
 
+	hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
+				      dev->data->nb_tx_queues);
+
+	ret = ice_dcf_init_rx_queues(dev);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to init queues");
+		return ret;
+	}
+
 	dev->data->dev_link.link_status = ETH_LINK_UP;
 
 	return 0;