[v3,8/9] net/gve: add support to get dev info and configure dev

Message ID 20220923093829.3019525-9-junfeng.guo@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series introduce GVE PMD |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Junfeng Guo Sept. 23, 2022, 9:38 a.m. UTC
  Add dev_ops dev_infos_get.
Complete dev_configure with RX offloads configuration.

Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
 doc/guides/nics/features/gve.ini |  1 +
 drivers/net/gve/gve_ethdev.c     | 63 ++++++++++++++++++++++++++++++++
 2 files changed, 64 insertions(+)
  

Patch

diff --git a/doc/guides/nics/features/gve.ini b/doc/guides/nics/features/gve.ini
index 38dc7024d6..cdc46b08a3 100644
--- a/doc/guides/nics/features/gve.ini
+++ b/doc/guides/nics/features/gve.ini
@@ -8,6 +8,7 @@  Speed capabilities   = Y
 Link status          = Y
 MTU update           = Y
 TSO                  = Y
+RSS hash             = Y
 L4 checksum offload  = Y
 Linux                = Y
 x86-32               = Y
diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index dcf79ddb23..e3195376c4 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -93,6 +93,14 @@  gve_free_qpls(struct gve_priv *priv)
 static int
 gve_dev_configure(__rte_unused struct rte_eth_dev *dev)
 {
+	struct gve_priv *priv = dev->data->dev_private;
+
+	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
+		priv->enable_rsc = 1;
+
 	return 0;
 }
 
@@ -266,6 +274,60 @@  gve_dev_close(struct rte_eth_dev *dev)
 	return err;
 }
 
+static int
+gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+	struct gve_priv *priv = dev->data->dev_private;
+
+	dev_info->device = dev->device;
+	dev_info->max_mac_addrs = 1;
+	dev_info->max_rx_queues = priv->max_nb_rxq;
+	dev_info->max_tx_queues = priv->max_nb_txq;
+	dev_info->min_rx_bufsize = GVE_MIN_BUF_SIZE;
+	dev_info->max_rx_pktlen = GVE_MAX_RX_PKTLEN;
+	dev_info->max_mtu = RTE_ETHER_MTU;
+	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+
+	dev_info->rx_offload_capa = 0;
+	dev_info->tx_offload_capa =
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+		RTE_ETH_TX_OFFLOAD_TCP_TSO;
+
+	if (priv->queue_format == GVE_DQO_RDA_FORMAT)
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
+
+	dev_info->default_rxconf = (struct rte_eth_rxconf) {
+		.rx_free_thresh = GVE_DEFAULT_RX_FREE_THRESH,
+		.rx_drop_en = 0,
+		.offloads = 0,
+	};
+
+	dev_info->default_txconf = (struct rte_eth_txconf) {
+		.tx_free_thresh = GVE_DEFAULT_TX_FREE_THRESH,
+		.offloads = 0,
+	};
+
+	dev_info->default_rxportconf.ring_size = priv->rx_desc_cnt;
+	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = priv->rx_desc_cnt,
+		.nb_min = priv->rx_desc_cnt,
+		.nb_align = 1,
+	};
+
+	dev_info->default_txportconf.ring_size = priv->tx_desc_cnt;
+	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = priv->tx_desc_cnt,
+		.nb_min = priv->tx_desc_cnt,
+		.nb_align = 1,
+	};
+
+	return 0;
+}
+
 static int
 gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 {
@@ -299,6 +361,7 @@  static const struct eth_dev_ops gve_eth_dev_ops = {
 	.dev_start            = gve_dev_start,
 	.dev_stop             = gve_dev_stop,
 	.dev_close            = gve_dev_close,
+	.dev_infos_get        = gve_dev_info_get,
 	.rx_queue_setup       = gve_rx_queue_setup,
 	.tx_queue_setup       = gve_tx_queue_setup,
 	.link_update          = gve_link_update,