[v1,30/42] net/txgbe: add device info get

Message ID 20200901115113.1529675-30-jiawenwu@trustnetic.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series [v1,01/42] net/txgbe: add build and doc infrastructure |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Jiawen Wu Sept. 1, 2020, 11:51 a.m. UTC
  Add device information get operation.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/txgbe/txgbe_ethdev.c | 78 +++++++++++++++++++++++++
 drivers/net/txgbe/txgbe_ethdev.h | 25 ++++++++
 drivers/net/txgbe/txgbe_rxtx.c   | 99 ++++++++++++++++++++++++++++++++
 drivers/net/txgbe/txgbe_rxtx.h   |  4 ++
 4 files changed, 206 insertions(+)
  

Comments

Ferruh Yigit Sept. 9, 2020, 5:54 p.m. UTC | #1
On 9/1/2020 12:51 PM, Jiawen Wu wrote:
> Add device information get operation.
> 
> Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>

<...>

> +uint64_t
> +txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
> +{
> +	uint64_t offloads = 0;
> +
> +	offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
>

Instead of initialize to zero and or the value, can just assign it.
  

Patch

diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index c43d5b56f..682519726 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -62,6 +62,20 @@  static const struct rte_pci_id pci_id_txgbe_map[] = {
 	{ .vendor_id = 0, /* sentinel */ },
 };
 
+static const struct rte_eth_desc_lim rx_desc_lim = {
+	.nb_max = TXGBE_RING_DESC_MAX,
+	.nb_min = TXGBE_RING_DESC_MIN,
+	.nb_align = TXGBE_RXD_ALIGN,
+};
+
+static const struct rte_eth_desc_lim tx_desc_lim = {
+	.nb_max = TXGBE_RING_DESC_MAX,
+	.nb_min = TXGBE_RING_DESC_MIN,
+	.nb_align = TXGBE_TXD_ALIGN,
+	.nb_seg_max = TXGBE_TX_MAX_SEG,
+	.nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
+};
+
 static const struct eth_dev_ops txgbe_eth_dev_ops;
 
 #define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)}
@@ -1479,6 +1493,69 @@  txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int
+txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
+	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
+	dev_info->min_rx_bufsize = 1024;
+	dev_info->max_rx_pktlen = 15872;
+	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+	dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
+	dev_info->max_vfs = pci_dev->max_vfs;
+	dev_info->max_vmdq_pools = ETH_64_POOLS;
+	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
+	dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
+	dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
+				     dev_info->rx_queue_offload_capa);
+	dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
+	dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
+
+	dev_info->default_rxconf = (struct rte_eth_rxconf) {
+		.rx_thresh = {
+			.pthresh = TXGBE_DEFAULT_RX_PTHRESH,
+			.hthresh = TXGBE_DEFAULT_RX_HTHRESH,
+			.wthresh = TXGBE_DEFAULT_RX_WTHRESH,
+		},
+		.rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
+		.rx_drop_en = 0,
+		.offloads = 0,
+	};
+
+	dev_info->default_txconf = (struct rte_eth_txconf) {
+		.tx_thresh = {
+			.pthresh = TXGBE_DEFAULT_TX_PTHRESH,
+			.hthresh = TXGBE_DEFAULT_TX_HTHRESH,
+			.wthresh = TXGBE_DEFAULT_TX_WTHRESH,
+		},
+		.tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
+		.offloads = 0,
+	};
+
+	dev_info->rx_desc_lim = rx_desc_lim;
+	dev_info->tx_desc_lim = tx_desc_lim;
+
+	dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
+	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+	dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
+
+	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
+
+	/* Driver-preferred Rx/Tx parameters */
+	dev_info->default_rxportconf.burst_size = 32;
+	dev_info->default_txportconf.burst_size = 32;
+	dev_info->default_rxportconf.nb_queues = 1;
+	dev_info->default_txportconf.nb_queues = 1;
+	dev_info->default_rxportconf.ring_size = 256;
+	dev_info->default_txportconf.ring_size = 256;
+
+	return 0;
+}
+
 const uint32_t *
 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 {
@@ -2010,6 +2087,7 @@  static const struct eth_dev_ops txgbe_eth_dev_ops = {
 	.xstats_get_names           = txgbe_dev_xstats_get_names,
 	.xstats_get_names_by_id     = txgbe_dev_xstats_get_names_by_id,
 	.queue_stats_mapping_set    = txgbe_dev_queue_stats_mapping_set,
+	.dev_infos_get              = txgbe_dev_info_get,
 	.dev_supported_ptypes_get   = txgbe_dev_supported_ptypes_get,
 	.rx_queue_start	            = txgbe_dev_rx_queue_start,
 	.rx_queue_stop              = txgbe_dev_rx_queue_stop,
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index ffff4ee11..61f4aa772 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -25,9 +25,21 @@ 
  * FreeBSD driver.
  */
 #define TXGBE_VLAN_TAG_SIZE 4
+#define TXGBE_HKEY_MAX_INDEX 10
 
 #define TXGBE_QUEUE_ITR_INTERVAL_DEFAULT	500 /* 500us */
 
+#define TXGBE_RSS_OFFLOAD_ALL ( \
+	ETH_RSS_IPV4 | \
+	ETH_RSS_NONFRAG_IPV4_TCP | \
+	ETH_RSS_NONFRAG_IPV4_UDP | \
+	ETH_RSS_IPV6 | \
+	ETH_RSS_NONFRAG_IPV6_TCP | \
+	ETH_RSS_NONFRAG_IPV6_UDP | \
+	ETH_RSS_IPV6_EX | \
+	ETH_RSS_IPV6_TCP_EX | \
+	ETH_RSS_IPV6_UDP_EX)
+
 #define TXGBE_MISC_VEC_ID               RTE_INTR_VEC_ZERO_OFFSET
 #define TXGBE_RX_VEC_START              RTE_INTR_VEC_RXTX_OFFSET
 
@@ -174,6 +186,19 @@  void txgbe_pf_mbx_process(struct rte_eth_dev *eth_dev);
 #define TXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
 #define TXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
 
+/*
+ *  Default values for RX/TX configuration
+ */
+#define TXGBE_DEFAULT_RX_FREE_THRESH  32
+#define TXGBE_DEFAULT_RX_PTHRESH      8
+#define TXGBE_DEFAULT_RX_HTHRESH      8
+#define TXGBE_DEFAULT_RX_WTHRESH      0
+
+#define TXGBE_DEFAULT_TX_FREE_THRESH  32
+#define TXGBE_DEFAULT_TX_PTHRESH      32
+#define TXGBE_DEFAULT_TX_HTHRESH      0
+#define TXGBE_DEFAULT_TX_WTHRESH      0
+
 /* store statistics names and its offset in stats structure */
 struct rte_txgbe_xstats_name_off {
 	char name[RTE_ETH_XSTATS_NAME_SIZE];
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index ef3d63b01..f50bc82ce 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -77,6 +77,19 @@  static const u64 TXGBE_TX_OFFLOAD_MASK = (
 #define rte_txgbe_prefetch(p)   do {} while (0)
 #endif
 
+static int
+txgbe_is_vf(struct rte_eth_dev *dev)
+{
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+	switch (hw->mac.type) {
+	case txgbe_mac_raptor_vf:
+		return 1;
+	default:
+		return 0;
+	}
+}
+
 /*********************************************************************
  *
  *  TX functions
@@ -1943,6 +1956,45 @@  txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq)
 	}
 }
 
+uint64_t
+txgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
+{
+	RTE_SET_USED(dev);
+
+	return 0;
+}
+
+uint64_t
+txgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
+{
+	uint64_t tx_offload_capa;
+
+	tx_offload_capa =
+		DEV_TX_OFFLOAD_VLAN_INSERT |
+		DEV_TX_OFFLOAD_IPV4_CKSUM  |
+		DEV_TX_OFFLOAD_UDP_CKSUM   |
+		DEV_TX_OFFLOAD_TCP_CKSUM   |
+		DEV_TX_OFFLOAD_SCTP_CKSUM  |
+		DEV_TX_OFFLOAD_TCP_TSO     |
+		DEV_TX_OFFLOAD_UDP_TSO	   |
+		DEV_TX_OFFLOAD_UDP_TNL_TSO	|
+		DEV_TX_OFFLOAD_IP_TNL_TSO	|
+		DEV_TX_OFFLOAD_VXLAN_TNL_TSO	|
+		DEV_TX_OFFLOAD_GRE_TNL_TSO	|
+		DEV_TX_OFFLOAD_IPIP_TNL_TSO	|
+		DEV_TX_OFFLOAD_GENEVE_TNL_TSO	|
+		DEV_TX_OFFLOAD_MULTI_SEGS;
+
+	if (!txgbe_is_vf(dev))
+		tx_offload_capa |= DEV_TX_OFFLOAD_QINQ_INSERT;
+
+	tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+
+	tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+	return tx_offload_capa;
+}
+
 int __rte_cold
 txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -2235,6 +2287,53 @@  txgbe_reset_rx_queue(struct txgbe_adapter *adapter, struct txgbe_rx_queue *rxq)
 
 }
 
+uint64_t
+txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
+{
+	uint64_t offloads = 0;
+
+	offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+
+	return offloads;
+}
+
+uint64_t
+txgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
+{
+	uint64_t offloads;
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+	struct rte_eth_dev_sriov *sriov = &RTE_ETH_DEV_SRIOV(dev);
+
+	offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
+		   DEV_RX_OFFLOAD_UDP_CKSUM   |
+		   DEV_RX_OFFLOAD_TCP_CKSUM   |
+		   DEV_RX_OFFLOAD_KEEP_CRC    |
+		   DEV_RX_OFFLOAD_JUMBO_FRAME |
+		   DEV_RX_OFFLOAD_VLAN_FILTER |
+		   DEV_RX_OFFLOAD_RSS_HASH |
+		   DEV_RX_OFFLOAD_SCATTER;
+
+	if (!txgbe_is_vf(dev))
+		offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
+			     DEV_RX_OFFLOAD_QINQ_STRIP |
+			     DEV_RX_OFFLOAD_VLAN_EXTEND);
+
+	/*
+	 * RSC is only supported by PF devices in a non-SR-IOV
+	 * mode.
+	 */
+	if ((hw->mac.type == txgbe_mac_raptor) &&
+	    !sriov->active)
+		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+
+	if (hw->mac.type == txgbe_mac_raptor)
+		offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+
+	offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+	return offloads;
+}
+
 int __rte_cold
 txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
diff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h
index 296e34475..958ca2e97 100644
--- a/drivers/net/txgbe/txgbe_rxtx.h
+++ b/drivers/net/txgbe/txgbe_rxtx.h
@@ -403,5 +403,9 @@  void txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq);
 
 void txgbe_set_rx_function(struct rte_eth_dev *dev);
 
+uint64_t txgbe_get_tx_port_offloads(struct rte_eth_dev *dev);
+uint64_t txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
+uint64_t txgbe_get_rx_port_offloads(struct rte_eth_dev *dev);
+uint64_t txgbe_get_tx_queue_offloads(struct rte_eth_dev *dev);
 
 #endif /* _TXGBE_RXTX_H_ */