[06/13] net/idpf: add support to get packet type

Message ID 20220803113104.1184059-7-junfeng.guo@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Andrew Rybchenko
Headers
Series add support for idpf PMD in DPDK |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Junfeng Guo Aug. 3, 2022, 11:30 a.m. UTC
  Add dev ops dev_supported_ptypes_get.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
 drivers/net/idpf/idpf_ethdev.c |  3 ++
 drivers/net/idpf/idpf_rxtx.c   | 51 ++++++++++++++++++++++++++++++++++
 drivers/net/idpf/idpf_rxtx.h   |  3 ++
 3 files changed, 57 insertions(+)
  

Patch

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index ab991f9181..cc70e2e03a 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -32,6 +32,7 @@  static int idpf_dev_info_get(struct rte_eth_dev *dev,
 			     struct rte_eth_dev_info *dev_info);
 
 static const struct eth_dev_ops idpf_eth_dev_ops = {
+	.dev_supported_ptypes_get	= idpf_dev_supported_ptypes_get,
 	.dev_configure			= idpf_dev_configure,
 	.dev_start			= idpf_dev_start,
 	.dev_stop			= idpf_dev_stop,
@@ -488,6 +489,8 @@  idpf_adapter_init(struct rte_eth_dev *dev)
 	if (adapter->initialized)
 		return 0;
 
+	idpf_set_default_ptype_table(dev);
+
 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
 	hw->hw_addr_len = pci_dev->mem_resource[0].len;
 	hw->back = adapter;
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index 7d5428b750..eae762ced3 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -8,6 +8,57 @@ 
 #include "idpf_ethdev.h"
 #include "idpf_rxtx.h"
 
+const uint32_t *
+idpf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+{
+	static const uint32_t ptypes[] = {
+		RTE_PTYPE_L2_ETHER,
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+		RTE_PTYPE_L4_FRAG,
+		RTE_PTYPE_L4_NONFRAG,
+		RTE_PTYPE_L4_UDP,
+		RTE_PTYPE_L4_TCP,
+		RTE_PTYPE_L4_SCTP,
+		RTE_PTYPE_L4_ICMP,
+		RTE_PTYPE_UNKNOWN
+	};
+
+	return ptypes;
+}
+
+static inline uint32_t
+idpf_get_default_pkt_type(uint16_t ptype)
+{
+	static const uint32_t type_table[IDPF_MAX_PKT_TYPE]
+		__rte_cache_aligned = {
+		[1] = RTE_PTYPE_L2_ETHER,
+		[22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
+		[23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
+		[24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+		[26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+		[27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
+		[28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP,
+		[88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
+		[89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
+		[90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+		[92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+		[93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP,
+		[94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP,
+	};
+
+	return type_table[ptype];
+}
+
+void __rte_cold
+idpf_set_default_ptype_table(struct rte_eth_dev *dev __rte_unused)
+{
+	int i;
+
+	for (i = 0; i < IDPF_MAX_PKT_TYPE; i++)
+		adapter->ptype_tbl[i] = idpf_get_default_pkt_type(i);
+}
+
 static inline int
 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
 {
diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h
index 3997082b21..5e57995f8c 100644
--- a/drivers/net/idpf/idpf_rxtx.h
+++ b/drivers/net/idpf/idpf_rxtx.h
@@ -178,5 +178,8 @@  void idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 
 void idpf_stop_queues(struct rte_eth_dev *dev);
 
+void idpf_set_default_ptype_table(struct rte_eth_dev *dev);
+const uint32_t *idpf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+
 #endif /* _IDPF_RXTX_H_ */