[v8,07/14] net/idpf: add support for packet type get

Message ID 20221020062951.645121-8-junfeng.guo@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Andrew Rybchenko
Headers
Series add support for idpf PMD in DPDK |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Junfeng Guo Oct. 20, 2022, 6:29 a.m. UTC
  Add dev ops dev_supported_ptypes_get.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
 doc/guides/nics/features/idpf.ini |   1 +
 drivers/net/idpf/idpf_ethdev.c    |   7 +
 drivers/net/idpf/idpf_ethdev.h    |   2 +
 drivers/net/idpf/idpf_rxtx.c      |  19 +++
 drivers/net/idpf/idpf_rxtx.h      |   6 +
 drivers/net/idpf/idpf_vchnl.c     | 235 ++++++++++++++++++++++++++++++
 6 files changed, 270 insertions(+)
  

Patch

diff --git a/doc/guides/nics/features/idpf.ini b/doc/guides/nics/features/idpf.ini
index 597beec5d9..79ffafc0d4 100644
--- a/doc/guides/nics/features/idpf.ini
+++ b/doc/guides/nics/features/idpf.ini
@@ -10,6 +10,7 @@ 
 Queue start/stop     = Y
 Runtime Rx queue setup = Y
 Runtime Tx queue setup = Y
+Packet type parsing  = Y
 Multiprocess aware   = Y
 FreeBSD              = Y
 Linux                = Y
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index d1b6797d4a..20f0f39640 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -55,6 +55,7 @@  idpf_dev_link_update(struct rte_eth_dev *dev,
 }
 
 static const struct eth_dev_ops idpf_eth_dev_ops = {
+	.dev_supported_ptypes_get	= idpf_dev_supported_ptypes_get,
 	.dev_configure			= idpf_dev_configure,
 	.dev_start			= idpf_dev_start,
 	.dev_stop			= idpf_dev_stop,
@@ -652,6 +653,12 @@  idpf_adapter_init(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
 		goto err_api;
 	}
 
+	ret = idpf_get_pkt_type(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to set ptype table");
+		goto err_api;
+	}
+
 	adapter->caps = rte_zmalloc("idpf_caps",
 				sizeof(struct virtchnl2_get_capabilities), 0);
 	if (!adapter->caps) {
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index b625868ceb..863d163330 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -210,6 +210,7 @@  int idpf_dev_link_update(struct rte_eth_dev *dev,
 			 __rte_unused int wait_to_complete);
 void idpf_handle_virtchnl_msg(struct rte_eth_dev *dev);
 int idpf_vc_check_api_version(struct idpf_adapter *adapter);
+int idpf_get_pkt_type(struct idpf_adapter *adapter);
 int idpf_vc_get_caps(struct idpf_adapter *adapter);
 int idpf_vc_create_vport(struct rte_eth_dev *dev);
 int idpf_vc_destroy_vport(struct idpf_vport *vport);
@@ -221,6 +222,7 @@  int idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,
 		      bool rx, bool on);
 int idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable);
 int idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable);
+int idpf_vc_query_ptype_info(struct idpf_adapter *adapter);
 int idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops,
 		      uint16_t buf_len, uint8_t *buf);
 
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index 95193713c4..e92ea09c3b 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -8,6 +8,25 @@ 
 #include "idpf_ethdev.h"
 #include "idpf_rxtx.h"
 
+const uint32_t *
+idpf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+{
+	static const uint32_t ptypes[] = {
+		RTE_PTYPE_L2_ETHER,
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+		RTE_PTYPE_L4_FRAG,
+		RTE_PTYPE_L4_NONFRAG,
+		RTE_PTYPE_L4_UDP,
+		RTE_PTYPE_L4_TCP,
+		RTE_PTYPE_L4_SCTP,
+		RTE_PTYPE_L4_ICMP,
+		RTE_PTYPE_UNKNOWN
+	};
+
+	return ptypes;
+}
+
 static inline int
 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
 {
diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h
index f0427b96c5..cd1dda4688 100644
--- a/drivers/net/idpf/idpf_rxtx.h
+++ b/drivers/net/idpf/idpf_rxtx.h
@@ -47,6 +47,10 @@ 
 #define IDPF_TX_OFFLOAD_NOTSUP_MASK \
 		(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IDPF_TX_OFFLOAD_MASK)
 
+#define IDPF_GET_PTYPE_SIZE(p) \
+	(sizeof(struct virtchnl2_ptype) + \
+	(((p)->proto_id_count ? ((p)->proto_id_count - 1) : 0) * sizeof((p)->proto_id[0])))
+
 struct idpf_rx_queue {
 	struct idpf_adapter *adapter;	/* the adapter this queue belongs to */
 	struct rte_mempool *mp;		/* mbuf pool to populate Rx ring */
@@ -189,4 +193,6 @@  void idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 
 void idpf_stop_queues(struct rte_eth_dev *dev);
 
+const uint32_t *idpf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+
 #endif /* _IDPF_RXTX_H_ */
diff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c
index 6b6881872b..7389128712 100644
--- a/drivers/net/idpf/idpf_vchnl.c
+++ b/drivers/net/idpf/idpf_vchnl.c
@@ -303,6 +303,215 @@  idpf_vc_check_api_version(struct idpf_adapter *adapter)
 	return err;
 }
 
+int __rte_cold
+idpf_get_pkt_type(struct idpf_adapter *adapter)
+{
+	struct virtchnl2_get_ptype_info *ptype_info;
+	uint16_t ptype_recvd = 0, ptype_offset, i, j;
+	int ret;
+
+	ret = idpf_vc_query_ptype_info(adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to query packet type information");
+		return ret;
+	}
+
+	ptype_info = rte_zmalloc("ptype_info", IDPF_DFLT_MBX_BUF_SIZE, 0);
+		if (!ptype_info)
+			return -ENOMEM;
+
+	while (ptype_recvd < IDPF_MAX_PKT_TYPE) {
+		ret = idpf_read_one_msg(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO,
+					IDPF_DFLT_MBX_BUF_SIZE, (u8 *)ptype_info);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to get packet type information");
+			goto free_ptype_info;
+		}
+
+		ptype_recvd += ptype_info->num_ptypes;
+		ptype_offset = sizeof(struct virtchnl2_get_ptype_info) -
+						sizeof(struct virtchnl2_ptype);
+
+		for (i = 0; i < rte_cpu_to_le_16(ptype_info->num_ptypes); i++) {
+			bool is_inner = false, is_ip = false;
+			struct virtchnl2_ptype *ptype;
+			uint32_t proto_hdr = 0;
+
+			ptype = (struct virtchnl2_ptype *)
+					((u8 *)ptype_info + ptype_offset);
+			ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
+			if (ptype_offset > IDPF_DFLT_MBX_BUF_SIZE) {
+				ret = -EINVAL;
+				goto free_ptype_info;
+			}
+
+			if (rte_cpu_to_le_16(ptype->ptype_id_10) == 0xFFFF)
+				goto free_ptype_info;
+
+			for (j = 0; j < ptype->proto_id_count; j++) {
+				switch (rte_cpu_to_le_16(ptype->proto_id[j])) {
+				case VIRTCHNL2_PROTO_HDR_GRE:
+				case VIRTCHNL2_PROTO_HDR_VXLAN:
+					proto_hdr &= ~RTE_PTYPE_L4_MASK;
+					proto_hdr |= RTE_PTYPE_TUNNEL_GRENAT;
+					is_inner = true;
+					break;
+				case VIRTCHNL2_PROTO_HDR_MAC:
+					if (is_inner) {
+						proto_hdr &= ~RTE_PTYPE_INNER_L2_MASK;
+						proto_hdr |= RTE_PTYPE_INNER_L2_ETHER;
+					} else {
+						proto_hdr &= ~RTE_PTYPE_L2_MASK;
+						proto_hdr |= RTE_PTYPE_L2_ETHER;
+					}
+					break;
+				case VIRTCHNL2_PROTO_HDR_VLAN:
+					if (is_inner) {
+						proto_hdr &= ~RTE_PTYPE_INNER_L2_MASK;
+						proto_hdr |= RTE_PTYPE_INNER_L2_ETHER_VLAN;
+					}
+					break;
+				case VIRTCHNL2_PROTO_HDR_PTP:
+					proto_hdr &= ~RTE_PTYPE_L2_MASK;
+					proto_hdr |= RTE_PTYPE_L2_ETHER_TIMESYNC;
+					break;
+				case VIRTCHNL2_PROTO_HDR_LLDP:
+					proto_hdr &= ~RTE_PTYPE_L2_MASK;
+					proto_hdr |= RTE_PTYPE_L2_ETHER_LLDP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_ARP:
+					proto_hdr &= ~RTE_PTYPE_L2_MASK;
+					proto_hdr |= RTE_PTYPE_L2_ETHER_ARP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_PPPOE:
+					proto_hdr &= ~RTE_PTYPE_L2_MASK;
+					proto_hdr |= RTE_PTYPE_L2_ETHER_PPPOE;
+					break;
+				case VIRTCHNL2_PROTO_HDR_IPV4:
+					if (!is_ip) {
+						proto_hdr |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+						is_ip = true;
+					} else {
+						proto_hdr |= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+							     RTE_PTYPE_TUNNEL_IP;
+						is_inner = true;
+					}
+						break;
+				case VIRTCHNL2_PROTO_HDR_IPV6:
+					if (!is_ip) {
+						proto_hdr |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+						is_ip = true;
+					} else {
+						proto_hdr |= RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+							     RTE_PTYPE_TUNNEL_IP;
+						is_inner = true;
+					}
+					break;
+				case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
+				case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
+					if (is_inner)
+						proto_hdr |= RTE_PTYPE_INNER_L4_FRAG;
+					else
+						proto_hdr |= RTE_PTYPE_L4_FRAG;
+					break;
+				case VIRTCHNL2_PROTO_HDR_UDP:
+					if (is_inner)
+						proto_hdr |= RTE_PTYPE_INNER_L4_UDP;
+					else
+						proto_hdr |= RTE_PTYPE_L4_UDP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_TCP:
+					if (is_inner)
+						proto_hdr |= RTE_PTYPE_INNER_L4_TCP;
+					else
+						proto_hdr |= RTE_PTYPE_L4_TCP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_SCTP:
+					if (is_inner)
+						proto_hdr |= RTE_PTYPE_INNER_L4_SCTP;
+					else
+						proto_hdr |= RTE_PTYPE_L4_SCTP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_ICMP:
+					if (is_inner)
+						proto_hdr |= RTE_PTYPE_INNER_L4_ICMP;
+					else
+						proto_hdr |= RTE_PTYPE_L4_ICMP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_ICMPV6:
+					if (is_inner)
+						proto_hdr |= RTE_PTYPE_INNER_L4_ICMP;
+					else
+						proto_hdr |= RTE_PTYPE_L4_ICMP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_L2TPV2:
+				case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
+				case VIRTCHNL2_PROTO_HDR_L2TPV3:
+					is_inner = true;
+					proto_hdr |= RTE_PTYPE_TUNNEL_L2TP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_NVGRE:
+					is_inner = true;
+					proto_hdr |= RTE_PTYPE_TUNNEL_NVGRE;
+					break;
+				case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
+					is_inner = true;
+					proto_hdr |= RTE_PTYPE_TUNNEL_GTPC;
+					break;
+				case VIRTCHNL2_PROTO_HDR_GTPU:
+				case VIRTCHNL2_PROTO_HDR_GTPU_UL:
+				case VIRTCHNL2_PROTO_HDR_GTPU_DL:
+					is_inner = true;
+					proto_hdr |= RTE_PTYPE_TUNNEL_GTPU;
+					break;
+				case VIRTCHNL2_PROTO_HDR_PAY:
+				case VIRTCHNL2_PROTO_HDR_IPV6_EH:
+				case VIRTCHNL2_PROTO_HDR_PRE_MAC:
+				case VIRTCHNL2_PROTO_HDR_POST_MAC:
+				case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
+				case VIRTCHNL2_PROTO_HDR_SVLAN:
+				case VIRTCHNL2_PROTO_HDR_CVLAN:
+				case VIRTCHNL2_PROTO_HDR_MPLS:
+				case VIRTCHNL2_PROTO_HDR_MMPLS:
+				case VIRTCHNL2_PROTO_HDR_CTRL:
+				case VIRTCHNL2_PROTO_HDR_ECP:
+				case VIRTCHNL2_PROTO_HDR_EAPOL:
+				case VIRTCHNL2_PROTO_HDR_PPPOD:
+				case VIRTCHNL2_PROTO_HDR_IGMP:
+				case VIRTCHNL2_PROTO_HDR_AH:
+				case VIRTCHNL2_PROTO_HDR_ESP:
+				case VIRTCHNL2_PROTO_HDR_IKE:
+				case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
+				case VIRTCHNL2_PROTO_HDR_GTP:
+				case VIRTCHNL2_PROTO_HDR_GTP_EH:
+				case VIRTCHNL2_PROTO_HDR_GTPCV2:
+				case VIRTCHNL2_PROTO_HDR_ECPRI:
+				case VIRTCHNL2_PROTO_HDR_VRRP:
+				case VIRTCHNL2_PROTO_HDR_OSPF:
+				case VIRTCHNL2_PROTO_HDR_TUN:
+				case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
+				case VIRTCHNL2_PROTO_HDR_GENEVE:
+				case VIRTCHNL2_PROTO_HDR_NSH:
+				case VIRTCHNL2_PROTO_HDR_QUIC:
+				case VIRTCHNL2_PROTO_HDR_PFCP:
+				case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
+				case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
+				case VIRTCHNL2_PROTO_HDR_RTP:
+				case VIRTCHNL2_PROTO_HDR_NO_PROTO:
+				default:
+					continue;
+				}
+				adapter->ptype_tbl[ptype->ptype_id_10] = proto_hdr;
+			}
+		}
+	}
+
+free_ptype_info:
+	rte_free(ptype_info);
+	_clear_cmd(adapter);
+	return ret;
+}
+
 int
 idpf_vc_get_caps(struct idpf_adapter *adapter)
 {
@@ -1005,3 +1214,29 @@  idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable)
 
 	return err;
 }
+
+int
+idpf_vc_query_ptype_info(struct idpf_adapter *adapter)
+{
+	struct virtchnl2_get_ptype_info *ptype_info;
+	struct idpf_cmd_info args;
+	int len, err;
+
+	len = sizeof(struct virtchnl2_get_ptype_info);
+	ptype_info = rte_zmalloc("ptype_info", len, 0);
+	if (!ptype_info)
+		return -ENOMEM;
+
+	ptype_info->start_ptype_id = 0;
+	ptype_info->num_ptypes = IDPF_MAX_PKT_TYPE;
+	args.ops = VIRTCHNL2_OP_GET_PTYPE_INFO;
+	args.in_args = (u8 *)ptype_info;
+	args.in_args_size = len;
+
+	err = idpf_execute_vc_cmd(adapter, &args);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_PTYPE_INFO");
+
+	rte_free(ptype_info);
+	return err;
+}