[16/22] net/hns3: add start stop configure promiscuous ops

Message ID 1566568031-45991-17-git-send-email-xavier.huwei@huawei.com (mailing list archive)
State Changes Requested, archived
Delegated to: Ferruh Yigit
Headers
Series add hns3 ethernet PMD driver |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail Compilation issues

Commit Message

Wei Hu (Xavier) Aug. 23, 2019, 1:47 p.m. UTC
  This patch adds dev_start, dev_stop, dev_configure, promiscuous_enable,
promiscuous_disable, allmulticast_enable, allmulticast_disable,
dev_infos_get related function codes.

Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
Signed-off-by: Chunsong Feng <fengchunsong@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
Signed-off-by: Hao Chen <chenhao164@huawei.com>
Signed-off-by: Huisong Li <lihuisong@huawei.com>
---
 drivers/net/hns3/hns3_ethdev.c    | 406 ++++++++++++++++++++++++++++++++++++++
 drivers/net/hns3/hns3_ethdev_vf.c |   1 +
 2 files changed, 407 insertions(+)
  

Comments

Ferruh Yigit Aug. 30, 2019, 3:14 p.m. UTC | #1
On 8/23/2019 2:47 PM, Wei Hu (Xavier) wrote:
> This patch adds dev_start, dev_stop, dev_configure, promiscuous_enable,
> promiscuous_disable, allmulticast_enable, allmulticast_disable,
> dev_infos_get related function codes.
> 
> Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
> Signed-off-by: Chunsong Feng <fengchunsong@huawei.com>
> Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
> Signed-off-by: Hao Chen <chenhao164@huawei.com>
> Signed-off-by: Huisong Li <lihuisong@huawei.com>

<...>

> @@ -3626,6 +4031,7 @@ static const struct eth_dev_ops hns3_eth_dev_ops = {
>  	.vlan_offload_set       = hns3_vlan_offload_set,
>  	.vlan_pvid_set          = hns3_vlan_pvid_set,
>  	.get_dcb_info           = hns3_get_dcb_info,
> +	.dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,

'hns3_dev_supported_ptypes_get' has been defined in previous patch, what do you
thinks defining and using in same patch?
  
Wei Hu (Xavier) Sept. 6, 2019, 11:51 a.m. UTC | #2
Hi, Ferruh Yigit


On 2019/8/30 23:14, Ferruh Yigit wrote:
> On 8/23/2019 2:47 PM, Wei Hu (Xavier) wrote:
>> This patch adds dev_start, dev_stop, dev_configure, promiscuous_enable,
>> promiscuous_disable, allmulticast_enable, allmulticast_disable,
>> dev_infos_get related function codes.
>>
>> Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
>> Signed-off-by: Chunsong Feng <fengchunsong@huawei.com>
>> Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
>> Signed-off-by: Hao Chen <chenhao164@huawei.com>
>> Signed-off-by: Huisong Li <lihuisong@huawei.com>
> <...>
>
>> @@ -3626,6 +4031,7 @@ static const struct eth_dev_ops hns3_eth_dev_ops = {
>>  	.vlan_offload_set       = hns3_vlan_offload_set,
>>  	.vlan_pvid_set          = hns3_vlan_pvid_set,
>>  	.get_dcb_info           = hns3_get_dcb_info,
>> +	.dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
> 'hns3_dev_supported_ptypes_get' has been defined in previous patch, what do you
> thinks defining and using in same patch?
    We will fix it in patch V2.
    Thanks for your suggestion.

    Regards
Xavier
>
  

Patch

diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 73b34d2..4a57474 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -42,6 +42,7 @@ 
 #define HNS3_DEFAULT_PORT_CONF_BURST_SIZE	32
 #define HNS3_DEFAULT_PORT_CONF_QUEUES_NUM	1
 
+#define HNS3_SERVICE_INTERVAL		1000000 /* us */
 #define HNS3_PORT_BASE_VLAN_DISABLE	0
 #define HNS3_PORT_BASE_VLAN_ENABLE	1
 #define HNS3_INVLID_PVID		0xFFFF
@@ -134,6 +135,41 @@  hns3_add_dev_vlan_table(struct hns3_adapter *hns, uint16_t vlan_id,
 }
 
 static int
+hns3_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
+{
+	struct hns3_pf *pf = &hns->pf;
+	bool writen_to_tbl = false;
+	int ret = 0;
+
+	/*
+	 * When vlan filter is enabled, hardware regards vlan id 0 as the entry
+	 * for normal packet, deleting vlan id 0 is not allowed.
+	 */
+	if (on == 0 && vlan_id == 0)
+		return 0;
+
+	/*
+	 * When port base vlan enabled, we use port base vlan as the vlan
+	 * filter condition. In this case, we don't update vlan filter table
+	 * when user add new vlan or remove exist vlan, just update the
+	 * vlan list. The vlan id in vlan list will be writen in vlan filter
+	 * table until port base vlan disabled
+	 */
+	if (pf->port_base_vlan_cfg.state == HNS3_PORT_BASE_VLAN_DISABLE) {
+		ret = hns3_set_port_vlan_filter(hns, vlan_id, on);
+		writen_to_tbl = true;
+	}
+
+	if (ret == 0 && vlan_id) {
+		if (on)
+			hns3_add_dev_vlan_table(hns, vlan_id, writen_to_tbl);
+		else
+			hns3_rm_dev_vlan_table(hns, vlan_id);
+	}
+	return ret;
+}
+
+static int
 hns3_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 {
 	struct hns3_adapter *hns = dev->data->dev_private;
@@ -1632,6 +1668,178 @@  hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
 }
 
 static int
+hns3_check_mq_mode(struct rte_eth_dev *dev)
+{
+	enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
+	enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_eth_dcb_rx_conf *dcb_rx_conf;
+	struct rte_eth_dcb_tx_conf *dcb_tx_conf;
+	uint8_t num_tc;
+	int max_tc = 0;
+	int i;
+
+	dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+	dcb_tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
+
+	if (rx_mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+		hns3_err(hw, "ETH_MQ_RX_VMDQ_DCB_RSS is not supported. "
+			 "rx_mq_mode = %d", rx_mq_mode);
+		return -EINVAL;
+	}
+
+	if (rx_mq_mode == ETH_MQ_RX_VMDQ_DCB ||
+	    tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+		hns3_err(hw, "ETH_MQ_RX_VMDQ_DCB and ETH_MQ_TX_VMDQ_DCB "
+			 "is not supported. rx_mq_mode = %d, tx_mq_mode = %d",
+			 rx_mq_mode, tx_mq_mode);
+		return -EINVAL;
+	}
+
+	if (rx_mq_mode == ETH_MQ_RX_DCB_RSS) {
+		if (dcb_rx_conf->nb_tcs > pf->tc_max) {
+			hns3_err(hw, "nb_tcs(%u) > max_tc(%u) driver supported.",
+				 dcb_rx_conf->nb_tcs, pf->tc_max);
+			return -EINVAL;
+		}
+
+		if (!(dcb_rx_conf->nb_tcs == HNS3_4_TCS ||
+		      dcb_rx_conf->nb_tcs == HNS3_8_TCS)) {
+			hns3_err(hw, "on ETH_MQ_RX_DCB_RSS mode, "
+				 "nb_tcs(%d) != %d or %d in rx direction.",
+				 dcb_rx_conf->nb_tcs, HNS3_4_TCS, HNS3_8_TCS);
+			return -EINVAL;
+		}
+
+		if (dcb_rx_conf->nb_tcs != dcb_tx_conf->nb_tcs) {
+			hns3_err(hw, "num_tcs(%d) of tx is not equal to rx(%d)",
+				 dcb_tx_conf->nb_tcs, dcb_rx_conf->nb_tcs);
+			return -EINVAL;
+		}
+
+		for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
+			if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) {
+				hns3_err(hw, "dcb_tc[%d] = %d in rx direction, "
+					 "is not equal to one in tx direction.",
+					 i, dcb_rx_conf->dcb_tc[i]);
+				return -EINVAL;
+			}
+			if (dcb_rx_conf->dcb_tc[i] > max_tc)
+				max_tc = dcb_rx_conf->dcb_tc[i];
+		}
+
+		num_tc = max_tc + 1;
+		if (num_tc > dcb_rx_conf->nb_tcs) {
+			hns3_err(hw, "max num_tc(%u) mapped > nb_tcs(%u)",
+				 num_tc, dcb_rx_conf->nb_tcs);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+hns3_check_dcb_cfg(struct rte_eth_dev *dev)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (!hns3_dev_dcb_supported(hw)) {
+		hns3_err(hw, "this port does not support dcb configurations.");
+		return -EOPNOTSUPP;
+	}
+
+	if (hw->current_fc_status == HNS3_FC_STATUS_MAC_PAUSE) {
+		hns3_err(hw, "MAC pause enabled, cannot config dcb info.");
+		return -EOPNOTSUPP;
+	}
+
+	/* Check multiple queue mode */
+	return hns3_check_mq_mode(dev);
+}
+
+static int
+hns3_dev_configure(struct rte_eth_dev *dev)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct hns3_rss_conf *rss_cfg = &hw->rss_info;
+	struct rte_eth_conf *conf = &dev->data->dev_conf;
+	enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
+	uint16_t nb_rx_q = dev->data->nb_rx_queues;
+	uint16_t nb_tx_q = dev->data->nb_tx_queues;
+	struct rte_eth_rss_conf rss_conf;
+	uint16_t mtu;
+	int ret;
+
+	/*
+	 * Hardware does not support where the number of rx and tx queues is
+	 * not equal in hip08.
+	 */
+	if (nb_rx_q != nb_tx_q) {
+		hns3_err(hw,
+			 "nb_rx_queues(%u) not equal with nb_tx_queues(%u)! "
+			 "Hardware does not support this configuration!",
+			 nb_rx_q, nb_tx_q);
+		return -EINVAL;
+	}
+
+	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+		hns3_err(hw, "setting link speed/duplex not supported");
+		return -EINVAL;
+	}
+
+	hw->adapter_state = HNS3_NIC_CONFIGURING;
+	if ((uint32_t)mq_mode & ETH_MQ_RX_DCB_FLAG) {
+		ret = hns3_check_dcb_cfg(dev);
+		if (ret)
+			goto cfg_err;
+	}
+
+	/* When RSS is not configured, redirect the packet queue 0 */
+	if ((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG) {
+		rss_conf = conf->rx_adv_conf.rss_conf;
+		if (rss_conf.rss_key == NULL) {
+			rss_conf.rss_key = rss_cfg->key;
+			rss_conf.rss_key_len = HNS3_RSS_KEY_SIZE;
+		}
+
+		ret = hns3_dev_rss_hash_update(dev, &rss_conf);
+		if (ret)
+			goto cfg_err;
+	}
+
+	/*
+	 * If jumbo frames are enabled, MTU needs to be refreshed
+	 * according to the maximum RX packet length.
+	 */
+	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+		/*
+		 * Security of max_rx_pkt_len is guaranteed in dpdk frame.
+		 * Maximum value of max_rx_pkt_len is HNS3_MAX_FRAME_LEN, so it
+		 * can safely assign to "uint16_t" type variable.
+		 */
+		mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(conf->rxmode.max_rx_pkt_len);
+		ret = hns3_dev_mtu_set(dev, mtu);
+		if (ret)
+			goto cfg_err;
+		dev->data->mtu = mtu;
+	}
+
+	ret = hns3_dev_configure_vlan(dev);
+	if (ret)
+		goto cfg_err;
+
+	hw->adapter_state = HNS3_NIC_CONFIGURED;
+
+	return 0;
+
+cfg_err:
+	hw->adapter_state = HNS3_NIC_INITIALIZED;
+	return ret;
+}
+
+static int
 hns3_set_mac_mtu(struct hns3_hw *hw, uint16_t new_mps)
 {
 	struct hns3_config_max_frm_size_cmd *req;
@@ -3080,6 +3288,71 @@  hns3_set_promisc_mode(struct hns3_hw *hw, bool en_uc_pmc, bool en_mc_pmc)
 	return 0;
 }
 
+static void
+hns3_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_hw *hw = &hns->hw;
+	bool en_mc_pmc = (dev->data->all_multicast == 1) ? true : false;
+	int ret;
+
+	rte_spinlock_lock(&hw->lock);
+	ret = hns3_set_promisc_mode(hw, true, en_mc_pmc);
+	if (ret)
+		hns3_err(hw, "Failed to enable promiscuous mode: %d", ret);
+	rte_spinlock_unlock(&hw->lock);
+}
+
+static void
+hns3_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_hw *hw = &hns->hw;
+	bool en_mc_pmc = (dev->data->all_multicast == 1) ? true : false;
+	int ret;
+
+	/* If now in all_multicast mode, must remain in all_multicast mode. */
+	rte_spinlock_lock(&hw->lock);
+	ret = hns3_set_promisc_mode(hw, false, en_mc_pmc);
+	if (ret)
+		hns3_err(hw, "Failed to disable promiscuous mode: %d", ret);
+	rte_spinlock_unlock(&hw->lock);
+}
+
+static void
+hns3_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_hw *hw = &hns->hw;
+	bool en_uc_pmc = (dev->data->promiscuous == 1) ? true : false;
+	int ret;
+
+	rte_spinlock_lock(&hw->lock);
+	ret = hns3_set_promisc_mode(hw, en_uc_pmc, true);
+	if (ret)
+		hns3_err(hw, "Failed to enable allmulticast mode: %d", ret);
+	rte_spinlock_unlock(&hw->lock);
+}
+
+static void
+hns3_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_hw *hw = &hns->hw;
+	bool en_uc_pmc = (dev->data->promiscuous == 1) ? true : false;
+	int ret;
+
+	/* If now in promiscuous mode, must remain in all_multicast mode. */
+	if (dev->data->promiscuous == 1)
+		return;
+
+	rte_spinlock_lock(&hw->lock);
+	ret = hns3_set_promisc_mode(hw, en_uc_pmc, false);
+	if (ret)
+		hns3_err(hw, "Failed to disable allmulticast mode: %d", ret);
+	rte_spinlock_unlock(&hw->lock);
+}
+
 static int
 hns3_get_sfp_speed(struct hns3_hw *hw, uint32_t *speed)
 {
@@ -3345,6 +3618,18 @@  hns3_init_pf(struct rte_eth_dev *eth_dev)
 		goto err_cmd_init;
 	}
 
+	ret = rte_intr_callback_register(&pci_dev->intr_handle,
+					 hns3_interrupt_handler,
+					 eth_dev);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
+		goto err_intr_callback_register;
+	}
+
+	/* Enable interrupt */
+	rte_intr_enable(&pci_dev->intr_handle);
+	hns3_pf_enable_irq0(hw);
+
 	/* Get configuration */
 	ret = hns3_get_configuration(hw);
 	if (ret) {
@@ -3373,6 +3658,12 @@  hns3_init_pf(struct rte_eth_dev *eth_dev)
 	hns3_uninit_umv_space(hw);
 
 err_get_config:
+	hns3_pf_disable_irq0(hw);
+	rte_intr_disable(&pci_dev->intr_handle);
+	hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler,
+			     eth_dev);
+
+err_intr_callback_register:
 	hns3_cmd_uninit(hw);
 
 err_cmd_init:
@@ -3397,19 +3688,126 @@  hns3_uninit_pf(struct rte_eth_dev *eth_dev)
 	hns3_rss_uninit(hns);
 	hns3_fdir_filter_uninit(hns);
 	hns3_uninit_umv_space(hw);
+	hns3_pf_disable_irq0(hw);
+	rte_intr_disable(&pci_dev->intr_handle);
+	hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler,
+			     eth_dev);
 	hns3_cmd_uninit(hw);
 	hns3_cmd_destroy_queue(hw);
 	hw->io_base = NULL;
 }
 
+static int
+hns3_do_start(struct hns3_adapter *hns, bool reset_queue)
+{
+	struct hns3_hw *hw = &hns->hw;
+	int ret;
+
+	ret = hns3_dcb_cfg_update(hns);
+	if (ret)
+		return ret;
+
+	/* Enable queues */
+	ret = hns3_start_queues(hns, reset_queue);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to start queues: %d", ret);
+		return ret;
+	}
+
+	/* Enable MAC */
+	ret = hns3_cfg_mac_mode(hw, true);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to enable MAC: %d", ret);
+		goto err_config_mac_mode;
+	}
+	return 0;
+
+err_config_mac_mode:
+	hns3_stop_queues(hns, true);
+	return ret;
+}
+
+static int
+hns3_dev_start(struct rte_eth_dev *eth_dev)
+{
+	struct hns3_adapter *hns = eth_dev->data->dev_private;
+	struct hns3_hw *hw = &hns->hw;
+	int ret;
+
+	PMD_INIT_FUNC_TRACE();
+	rte_spinlock_lock(&hw->lock);
+	hw->adapter_state = HNS3_NIC_STARTING;
+
+	ret = hns3_do_start(hns, true);
+	if (ret) {
+		hw->adapter_state = HNS3_NIC_CONFIGURED;
+		rte_spinlock_unlock(&hw->lock);
+		return ret;
+	}
+
+	hw->adapter_state = HNS3_NIC_STARTED;
+	rte_spinlock_unlock(&hw->lock);
+	hns3_set_rxtx_function(eth_dev);
+	hns3_mp_req_start_rxtx(eth_dev);
+
+	hns3_info(hw, "hns3 dev start successful!");
+	return 0;
+}
+
+static int
+hns3_do_stop(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	bool reset_queue;
+	int ret;
+
+	ret = hns3_cfg_mac_mode(hw, false);
+	if (ret)
+		return ret;
+	hw->mac.link_status = ETH_LINK_DOWN;
+
+	hns3_configure_all_mac_addr(hns, true);
+	reset_queue = true;
+	hw->mac.default_addr_setted = false;
+	return hns3_stop_queues(hns, reset_queue);
+}
+
+static void
+hns3_dev_stop(struct rte_eth_dev *eth_dev)
+{
+	struct hns3_adapter *hns = eth_dev->data->dev_private;
+	struct hns3_hw *hw = &hns->hw;
+
+	PMD_INIT_FUNC_TRACE();
+
+	hw->adapter_state = HNS3_NIC_STOPPING;
+	hns3_set_rxtx_function(eth_dev);
+	rte_wmb();
+
+	rte_spinlock_lock(&hw->lock);
+	hns3_do_stop(hns);
+	hns3_dev_release_mbufs(hns);
+	hw->adapter_state = HNS3_NIC_CONFIGURED;
+	rte_spinlock_unlock(&hw->lock);
+}
+
 static void
 hns3_dev_close(struct rte_eth_dev *eth_dev)
 {
 	struct hns3_adapter *hns = eth_dev->data->dev_private;
 	struct hns3_hw *hw = &hns->hw;
 
+	if (hw->adapter_state == HNS3_NIC_STARTED)
+		hns3_dev_stop(eth_dev);
+
 	hw->adapter_state = HNS3_NIC_CLOSING;
+	rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
+
+	hns3_configure_all_mc_mac_addr(hns, true);
+	hns3_remove_all_vlan_table(hns);
+	hns3_vlan_txvlan_cfg(hns, HNS3_PORT_BASE_VLAN_DISABLE, 0);
 	hns3_uninit_pf(eth_dev);
+	hns3_free_all_queues(eth_dev);
 	hw->adapter_state = HNS3_NIC_CLOSED;
 }
 
@@ -3600,7 +3998,13 @@  hns3_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info)
 }
 
 static const struct eth_dev_ops hns3_eth_dev_ops = {
+	.dev_start          = hns3_dev_start,
+	.dev_stop           = hns3_dev_stop,
 	.dev_close          = hns3_dev_close,
+	.promiscuous_enable = hns3_dev_promiscuous_enable,
+	.promiscuous_disable = hns3_dev_promiscuous_disable,
+	.allmulticast_enable  = hns3_dev_allmulticast_enable,
+	.allmulticast_disable = hns3_dev_allmulticast_disable,
 	.mtu_set            = hns3_dev_mtu_set,
 	.dev_infos_get          = hns3_dev_infos_get,
 	.fw_version_get         = hns3_fw_version_get,
@@ -3608,6 +4012,7 @@  static const struct eth_dev_ops hns3_eth_dev_ops = {
 	.tx_queue_setup         = hns3_tx_queue_setup,
 	.rx_queue_release       = hns3_dev_rx_queue_release,
 	.tx_queue_release       = hns3_dev_tx_queue_release,
+	.dev_configure          = hns3_dev_configure,
 	.flow_ctrl_get          = hns3_flow_ctrl_get,
 	.flow_ctrl_set          = hns3_flow_ctrl_set,
 	.priority_flow_ctrl_set = hns3_priority_flow_ctrl_set,
@@ -3626,6 +4031,7 @@  static const struct eth_dev_ops hns3_eth_dev_ops = {
 	.vlan_offload_set       = hns3_vlan_offload_set,
 	.vlan_pvid_set          = hns3_vlan_pvid_set,
 	.get_dcb_info           = hns3_get_dcb_info,
+	.dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
 };
 
 static int
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index abab2b4..7e73845 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -1161,6 +1161,7 @@  static const struct eth_dev_ops hns3vf_eth_dev_ops = {
 	.filter_ctrl        = hns3_dev_filter_ctrl,
 	.vlan_filter_set    = hns3vf_vlan_filter_set,
 	.vlan_offload_set   = hns3vf_vlan_offload_set,
+	.dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
 };
 
 static int