diff mbox series

[v2,06/14] net/idpf: add support for packet type get

Message ID 20220905105828.3190335-7-junfeng.guo@intel.com (mailing list archive)
State Changes Requested, archived
Delegated to: Andrew Rybchenko
Headers show
Series add support for idpf PMD in DPDK | expand

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Guo, Junfeng Sept. 5, 2022, 10:58 a.m. UTC
Add dev ops dev_supported_ptypes_get.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
 drivers/net/idpf/idpf_ethdev.c |   7 +
 drivers/net/idpf/idpf_ethdev.h |   2 +
 drivers/net/idpf/idpf_rxtx.c   |  19 +++
 drivers/net/idpf/idpf_rxtx.h   |   6 +
 drivers/net/idpf/idpf_vchnl.c  | 235 +++++++++++++++++++++++++++++++++
 5 files changed, 269 insertions(+)

Comments

Andrew Rybchenko Oct. 3, 2022, 1:58 p.m. UTC | #1
On 9/5/22 13:58, Junfeng Guo wrote:
> Add dev ops dev_supported_ptypes_get.
> 
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
> Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>

[snip]

> diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
> index fe044a80c9..1c5c4688cc 100644
> --- a/drivers/net/idpf/idpf_rxtx.c
> +++ b/drivers/net/idpf/idpf_rxtx.c
> @@ -8,6 +8,25 @@
>   #include "idpf_ethdev.h"
>   #include "idpf_rxtx.h"
>   
> +const uint32_t *
> +idpf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
> +{
> +	static const uint32_t ptypes[] = {
> +		RTE_PTYPE_L2_ETHER,
> +		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
> +		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
> +		RTE_PTYPE_L4_FRAG,
> +		RTE_PTYPE_L4_NONFRAG,
> +		RTE_PTYPE_L4_UDP,
> +		RTE_PTYPE_L4_TCP,
> +		RTE_PTYPE_L4_SCTP,
> +		RTE_PTYPE_L4_ICMP,
> +		RTE_PTYPE_UNKNOWN
> +	};

How am I supported to verify that these packet types are really
recognized by the driver? The patch should include the part
of Rx burst callback which does recognition and fill in
corresponding data in mbuf.

> +
> +	return ptypes;
> +}
> +
>   static inline int
>   check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
>   {
Guo, Junfeng Oct. 14, 2022, 9:18 a.m. UTC | #2
> -----Original Message-----
> From: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
> Sent: Monday, October 3, 2022 21:59
> To: Guo, Junfeng <junfeng.guo@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing,
> Beilei <beilei.xing@intel.com>
> Cc: dev@dpdk.org; Wang, Xiao W <xiao.w.wang@intel.com>; Wu,
> Wenjun1 <wenjun1.wu@intel.com>
> Subject: Re: [PATCH v2 06/14] net/idpf: add support for packet type get
> 
> On 9/5/22 13:58, Junfeng Guo wrote:
> > Add dev ops dev_supported_ptypes_get.
> >
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
> > Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
> 
> [snip]
> 
> > diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
> > index fe044a80c9..1c5c4688cc 100644
> > --- a/drivers/net/idpf/idpf_rxtx.c
> > +++ b/drivers/net/idpf/idpf_rxtx.c
> > @@ -8,6 +8,25 @@
> >   #include "idpf_ethdev.h"
> >   #include "idpf_rxtx.h"
> >
> > +const uint32_t *
> > +idpf_dev_supported_ptypes_get(struct rte_eth_dev *dev
> __rte_unused)
> > +{
> > +	static const uint32_t ptypes[] = {
> > +		RTE_PTYPE_L2_ETHER,
> > +		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
> > +		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
> > +		RTE_PTYPE_L4_FRAG,
> > +		RTE_PTYPE_L4_NONFRAG,
> > +		RTE_PTYPE_L4_UDP,
> > +		RTE_PTYPE_L4_TCP,
> > +		RTE_PTYPE_L4_SCTP,
> > +		RTE_PTYPE_L4_ICMP,
> > +		RTE_PTYPE_UNKNOWN
> > +	};
> 
> How am I supported to verify that these packet types are really
> recognized by the driver? The patch should include the part
> of Rx burst callback which does recognition and fill in
> corresponding data in mbuf.

You can verify the PTYPE via sending & receiving certain packets.
After setting the cmd verbose, the corresponding PTYPE may show
up in the testpmd. You can then check if the PTYPE is exactly what
you just sent.

The ptype_tbl will be filled when func idpf_get_pkt_type is called.
And then the corresponding part will be filled in Rx burst phase.
This part patch will also be improved to make it more clear. Thanks!

> 
> > +
> > +	return ptypes;
> > +}
> > +
> >   static inline int
> >   check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
> >   {
diff mbox series

Patch

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 8852b7ce22..0ed2c99dd2 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -32,6 +32,7 @@  static int idpf_dev_info_get(struct rte_eth_dev *dev,
 			     struct rte_eth_dev_info *dev_info);
 
 static const struct eth_dev_ops idpf_eth_dev_ops = {
+	.dev_supported_ptypes_get	= idpf_dev_supported_ptypes_get,
 	.dev_configure			= idpf_dev_configure,
 	.dev_start			= idpf_dev_start,
 	.dev_stop			= idpf_dev_stop,
@@ -647,6 +648,12 @@  idpf_adapter_init(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
 		goto err_api;
 	}
 
+	ret = idpf_get_pkt_type(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to set ptype table");
+		goto err_api;
+	}
+
 	adapter->caps = rte_zmalloc("idpf_caps",
 			       sizeof(struct virtchnl2_get_capabilities), 0);
 	if (!adapter->caps) {
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index 4a98e7739b..1833c6e57d 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -219,6 +219,7 @@  _atomic_set_cmd(struct idpf_adapter *adapter, enum virtchnl_ops ops)
 struct idpf_adapter *idpf_find_adapter(struct rte_pci_device *pci_dev);
 void idpf_handle_virtchnl_msg(struct rte_eth_dev *dev);
 int idpf_check_api_version(struct idpf_adapter *adapter);
+int idpf_get_pkt_type(struct idpf_adapter *adapter);
 int idpf_get_caps(struct idpf_adapter *adapter);
 int idpf_create_vport(struct rte_eth_dev *dev);
 int idpf_destroy_vport(struct idpf_vport *vport);
@@ -230,6 +231,7 @@  int idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,
 		      bool rx, bool on);
 int idpf_ena_dis_queues(struct idpf_vport *vport, bool enable);
 int idpf_ena_dis_vport(struct idpf_vport *vport, bool enable);
+int idpf_query_ptype_info(struct idpf_adapter *adapter);
 int idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops,
 		      uint16_t buf_len, uint8_t *buf);
 
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index fe044a80c9..1c5c4688cc 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -8,6 +8,25 @@ 
 #include "idpf_ethdev.h"
 #include "idpf_rxtx.h"
 
+const uint32_t *
+idpf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+{
+	static const uint32_t ptypes[] = {
+		RTE_PTYPE_L2_ETHER,
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+		RTE_PTYPE_L4_FRAG,
+		RTE_PTYPE_L4_NONFRAG,
+		RTE_PTYPE_L4_UDP,
+		RTE_PTYPE_L4_TCP,
+		RTE_PTYPE_L4_SCTP,
+		RTE_PTYPE_L4_ICMP,
+		RTE_PTYPE_UNKNOWN
+	};
+
+	return ptypes;
+}
+
 static inline int
 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
 {
diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h
index d930bb75ff..f2947a8492 100644
--- a/drivers/net/idpf/idpf_rxtx.h
+++ b/drivers/net/idpf/idpf_rxtx.h
@@ -46,6 +46,10 @@ 
 #define IDPF_TX_OFFLOAD_NOTSUP_MASK \
 		(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IDPF_TX_OFFLOAD_MASK)
 
+#define IDPF_GET_PTYPE_SIZE(p) \
+	(sizeof(struct virtchnl2_ptype) + \
+	(((p)->proto_id_count ? ((p)->proto_id_count - 1) : 0) * sizeof((p)->proto_id[0])))
+
 struct idpf_rx_queue {
 	struct idpf_adapter *adapter;	/* the adapter this queue belongs to */
 	struct rte_mempool *mp;		/* mbuf pool to populate Rx ring */
@@ -186,5 +190,7 @@  void idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 void idpf_stop_queues(struct rte_eth_dev *dev);
 
 
+const uint32_t *idpf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+
 #endif /* _IDPF_RXTX_H_ */
 
diff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c
index cbc2a5ba12..00855f153e 100644
--- a/drivers/net/idpf/idpf_vchnl.c
+++ b/drivers/net/idpf/idpf_vchnl.c
@@ -303,6 +303,215 @@  idpf_check_api_version(struct idpf_adapter *adapter)
 	return err;
 }
 
+int __rte_cold
+idpf_get_pkt_type(struct idpf_adapter *adapter)
+{
+	struct virtchnl2_get_ptype_info *ptype_info;
+	uint16_t ptype_recvd = 0, ptype_offset, i, j;
+	int ret;
+
+	ret = idpf_query_ptype_info(adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to query packet type information");
+		return ret;
+	}
+
+	ptype_info = rte_zmalloc("ptype_info", IDPF_DFLT_MBX_BUF_SIZE, 0);
+		if (!ptype_info)
+			return -ENOMEM;
+
+	while (ptype_recvd < IDPF_MAX_PKT_TYPE) {
+		ret = idpf_read_one_msg(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO,
+					IDPF_DFLT_MBX_BUF_SIZE, (u8 *)ptype_info);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to get packet type information");
+			goto free_ptype_info;
+		}
+
+		ptype_recvd += ptype_info->num_ptypes;
+		ptype_offset = sizeof(struct virtchnl2_get_ptype_info) -
+						sizeof(struct virtchnl2_ptype);
+
+		for (i = 0; i < rte_cpu_to_le_16(ptype_info->num_ptypes); i++) {
+			bool is_inner = false, is_ip = false;
+			struct virtchnl2_ptype *ptype;
+			uint32_t proto_hdr = 0;
+
+			ptype = (struct virtchnl2_ptype *)
+					((u8 *)ptype_info + ptype_offset);
+			ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
+			if (ptype_offset > IDPF_DFLT_MBX_BUF_SIZE) {
+				ret = -EINVAL;
+				goto free_ptype_info;
+			}
+
+			if (rte_cpu_to_le_16(ptype->ptype_id_10) == 0xFFFF)
+				goto free_ptype_info;
+
+			for (j = 0; j < ptype->proto_id_count; j++) {
+				switch (rte_cpu_to_le_16(ptype->proto_id[j])) {
+				case VIRTCHNL2_PROTO_HDR_GRE:
+				case VIRTCHNL2_PROTO_HDR_VXLAN:
+					proto_hdr &= ~RTE_PTYPE_L4_MASK;
+					proto_hdr |= RTE_PTYPE_TUNNEL_GRENAT;
+					is_inner = true;
+					break;
+				case VIRTCHNL2_PROTO_HDR_MAC:
+					if (is_inner) {
+						proto_hdr &= ~RTE_PTYPE_INNER_L2_MASK;
+						proto_hdr |= RTE_PTYPE_INNER_L2_ETHER;
+					} else {
+						proto_hdr &= ~RTE_PTYPE_L2_MASK;
+						proto_hdr |= RTE_PTYPE_L2_ETHER;
+					}
+					break;
+				case VIRTCHNL2_PROTO_HDR_VLAN:
+					if (is_inner) {
+						proto_hdr &= ~RTE_PTYPE_INNER_L2_MASK;
+						proto_hdr |= RTE_PTYPE_INNER_L2_ETHER_VLAN;
+					}
+					break;
+				case VIRTCHNL2_PROTO_HDR_PTP:
+					proto_hdr &= ~RTE_PTYPE_L2_MASK;
+					proto_hdr |= RTE_PTYPE_L2_ETHER_TIMESYNC;
+					break;
+				case VIRTCHNL2_PROTO_HDR_LLDP:
+					proto_hdr &= ~RTE_PTYPE_L2_MASK;
+					proto_hdr |= RTE_PTYPE_L2_ETHER_LLDP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_ARP:
+					proto_hdr &= ~RTE_PTYPE_L2_MASK;
+					proto_hdr |= RTE_PTYPE_L2_ETHER_ARP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_PPPOE:
+					proto_hdr &= ~RTE_PTYPE_L2_MASK;
+					proto_hdr |= RTE_PTYPE_L2_ETHER_PPPOE;
+					break;
+				case VIRTCHNL2_PROTO_HDR_IPV4:
+					if (!is_ip) {
+						proto_hdr |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+						is_ip = true;
+					} else {
+						proto_hdr |= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+							     RTE_PTYPE_TUNNEL_IP;
+						is_inner = true;
+					}
+						break;
+				case VIRTCHNL2_PROTO_HDR_IPV6:
+					if (!is_ip) {
+						proto_hdr |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+						is_ip = true;
+					} else {
+						proto_hdr |= RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+							     RTE_PTYPE_TUNNEL_IP;
+						is_inner = true;
+					}
+					break;
+				case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
+				case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
+					if (is_inner)
+						proto_hdr |= RTE_PTYPE_INNER_L4_FRAG;
+					else
+						proto_hdr |= RTE_PTYPE_L4_FRAG;
+					break;
+				case VIRTCHNL2_PROTO_HDR_UDP:
+					if (is_inner)
+						proto_hdr |= RTE_PTYPE_INNER_L4_UDP;
+					else
+						proto_hdr |= RTE_PTYPE_L4_UDP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_TCP:
+					if (is_inner)
+						proto_hdr |= RTE_PTYPE_INNER_L4_TCP;
+					else
+						proto_hdr |= RTE_PTYPE_L4_TCP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_SCTP:
+					if (is_inner)
+						proto_hdr |= RTE_PTYPE_INNER_L4_SCTP;
+					else
+						proto_hdr |= RTE_PTYPE_L4_SCTP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_ICMP:
+					if (is_inner)
+						proto_hdr |= RTE_PTYPE_INNER_L4_ICMP;
+					else
+						proto_hdr |= RTE_PTYPE_L4_ICMP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_ICMPV6:
+					if (is_inner)
+						proto_hdr |= RTE_PTYPE_INNER_L4_ICMP;
+					else
+						proto_hdr |= RTE_PTYPE_L4_ICMP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_L2TPV2:
+				case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
+				case VIRTCHNL2_PROTO_HDR_L2TPV3:
+					is_inner = true;
+					proto_hdr |= RTE_PTYPE_TUNNEL_L2TP;
+					break;
+				case VIRTCHNL2_PROTO_HDR_NVGRE:
+					is_inner = true;
+					proto_hdr |= RTE_PTYPE_TUNNEL_NVGRE;
+					break;
+				case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
+					is_inner = true;
+					proto_hdr |= RTE_PTYPE_TUNNEL_GTPC;
+					break;
+				case VIRTCHNL2_PROTO_HDR_GTPU:
+				case VIRTCHNL2_PROTO_HDR_GTPU_UL:
+				case VIRTCHNL2_PROTO_HDR_GTPU_DL:
+					is_inner = true;
+					proto_hdr |= RTE_PTYPE_TUNNEL_GTPU;
+					break;
+				case VIRTCHNL2_PROTO_HDR_PAY:
+				case VIRTCHNL2_PROTO_HDR_IPV6_EH:
+				case VIRTCHNL2_PROTO_HDR_PRE_MAC:
+				case VIRTCHNL2_PROTO_HDR_POST_MAC:
+				case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
+				case VIRTCHNL2_PROTO_HDR_SVLAN:
+				case VIRTCHNL2_PROTO_HDR_CVLAN:
+				case VIRTCHNL2_PROTO_HDR_MPLS:
+				case VIRTCHNL2_PROTO_HDR_MMPLS:
+				case VIRTCHNL2_PROTO_HDR_CTRL:
+				case VIRTCHNL2_PROTO_HDR_ECP:
+				case VIRTCHNL2_PROTO_HDR_EAPOL:
+				case VIRTCHNL2_PROTO_HDR_PPPOD:
+				case VIRTCHNL2_PROTO_HDR_IGMP:
+				case VIRTCHNL2_PROTO_HDR_AH:
+				case VIRTCHNL2_PROTO_HDR_ESP:
+				case VIRTCHNL2_PROTO_HDR_IKE:
+				case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
+				case VIRTCHNL2_PROTO_HDR_GTP:
+				case VIRTCHNL2_PROTO_HDR_GTP_EH:
+				case VIRTCHNL2_PROTO_HDR_GTPCV2:
+				case VIRTCHNL2_PROTO_HDR_ECPRI:
+				case VIRTCHNL2_PROTO_HDR_VRRP:
+				case VIRTCHNL2_PROTO_HDR_OSPF:
+				case VIRTCHNL2_PROTO_HDR_TUN:
+				case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
+				case VIRTCHNL2_PROTO_HDR_GENEVE:
+				case VIRTCHNL2_PROTO_HDR_NSH:
+				case VIRTCHNL2_PROTO_HDR_QUIC:
+				case VIRTCHNL2_PROTO_HDR_PFCP:
+				case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
+				case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
+				case VIRTCHNL2_PROTO_HDR_RTP:
+				case VIRTCHNL2_PROTO_HDR_NO_PROTO:
+				default:
+					continue;
+				}
+				adapter->ptype_tbl[ptype->ptype_id_10] = proto_hdr;
+			}
+		}
+	}
+
+free_ptype_info:
+	rte_free(ptype_info);
+	_clear_cmd(adapter);
+	return ret;
+}
+
 int
 idpf_get_caps(struct idpf_adapter *adapter)
 {
@@ -1002,3 +1211,29 @@  idpf_ena_dis_vport(struct idpf_vport *vport, bool enable)
 
 	return err;
 }
+int
+idpf_query_ptype_info(struct idpf_adapter *adapter)
+{
+	struct virtchnl2_get_ptype_info *ptype_info;
+	struct idpf_cmd_info args;
+	int len, err;
+
+	len = sizeof(struct virtchnl2_get_ptype_info);
+	ptype_info = rte_zmalloc("ptype_info", len, 0);
+	if (!ptype_info)
+		return -ENOMEM;
+
+	ptype_info->start_ptype_id = 0;
+	ptype_info->num_ptypes = IDPF_MAX_PKT_TYPE;
+	args.ops = VIRTCHNL2_OP_GET_PTYPE_INFO;
+	args.in_args = (u8 *)ptype_info;
+	args.in_args_size = len;
+
+	err = idpf_execute_vc_cmd(adapter, &args);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_PTYPE_INFO");
+
+	rte_free(ptype_info);
+	return err;
+}
+