[09/22] net/hns3: add support for flow directory of hns3 PMD driver

Message ID 1566568031-45991-10-git-send-email-xavier.huwei@huawei.com (mailing list archive)
State Changes Requested, archived
Delegated to: Ferruh Yigit
Headers
Series add hns3 ethernet PMD driver |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail Compilation issues

Commit Message

Wei Hu (Xavier) Aug. 23, 2019, 1:46 p.m. UTC
  This patch adds support for flow directory of hns3 PMD driver.
Flow directory feature is only supported in hns3 PF driver.
It supports the network L2\L3\L4 and tunnel packet creation,
deletion, flushing, and querying hit statistics.

Signed-off-by: Chunsong Feng <fengchunsong@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
Signed-off-by: Hao Chen <chenhao164@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
Signed-off-by: Huisong Li <lihuisong@huawei.com>
---
 drivers/net/hns3/hns3_cmd.c    |    1 +
 drivers/net/hns3/hns3_ethdev.c |   33 +
 drivers/net/hns3/hns3_ethdev.h |    3 +
 drivers/net/hns3/hns3_fdir.c   | 1062 +++++++++++++++++++++++++++++
 drivers/net/hns3/hns3_fdir.h   |  203 ++++++
 drivers/net/hns3/hns3_flow.c   | 1450 ++++++++++++++++++++++++++++++++++++++++
 6 files changed, 2752 insertions(+)
 create mode 100644 drivers/net/hns3/hns3_fdir.c
 create mode 100644 drivers/net/hns3/hns3_fdir.h
 create mode 100644 drivers/net/hns3/hns3_flow.c
  

Comments

Ferruh Yigit Aug. 30, 2019, 3:06 p.m. UTC | #1
On 8/23/2019 2:46 PM, Wei Hu (Xavier) wrote:
> This patch adds support for flow directory of hns3 PMD driver.
> Flow directory feature is only supported in hns3 PF driver.
> It supports the network L2\L3\L4 and tunnel packet creation,
> deletion, flushing, and querying hit statistics.

This patch also adds rte_flow support, can you please add this into commit log?

> 
> Signed-off-by: Chunsong Feng <fengchunsong@huawei.com>
> Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
> Signed-off-by: Hao Chen <chenhao164@huawei.com>
> Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
> Signed-off-by: Huisong Li <lihuisong@huawei.com>

<...>

> @@ -2726,6 +2744,7 @@ static const struct eth_dev_ops hns3_eth_dev_ops = {
>  	.mac_addr_set           = hns3_set_default_mac_addr,
>  	.set_mc_addr_list       = hns3_set_mc_mac_addr_list,
>  	.link_update            = hns3_dev_link_update,
> +	.filter_ctrl            = hns3_dev_filter_ctrl,

'hns3_dev_filter_ctrl()' is not exists up until this patch.

This is the problem of not enabling the driver yet, it is very hard to see these
kind of issues. When Makefile/meson patch moved to the begging of the patches
and start to build the driver, these issues will be visible.

>  };
>  
>  static int
> @@ -2739,6 +2758,16 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)
>  	int ret;
>  
>  	PMD_INIT_FUNC_TRACE();
> +	eth_dev->process_private = (struct hns3_process_private *)
> +	    rte_zmalloc_socket("hns3_filter_list",
> +			       sizeof(struct hns3_process_private),
> +			       RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node);
> +	if (eth_dev->process_private == NULL) {
> +		PMD_INIT_LOG(ERR, "Failed to alloc memory for process private");
> +		return -ENOMEM;
> +	}
> +	/* initialize flow filter lists */
> +	hns3_filterlist_init(eth_dev);

Can you please free 'process_private' in, close dev_ops?
  
Wei Hu (Xavier) Sept. 6, 2019, 8:23 a.m. UTC | #2
Hi, Ferruh Yigit


On 2019/8/30 23:06, Ferruh Yigit wrote:
> On 8/23/2019 2:46 PM, Wei Hu (Xavier) wrote:
>> This patch adds support for flow directory of hns3 PMD driver.
>> Flow directory feature is only supported in hns3 PF driver.
>> It supports the network L2\L3\L4 and tunnel packet creation,
>> deletion, flushing, and querying hit statistics.
> This patch also adds rte_flow support, can you please add this into commit log?
OK,  I will update it and send patch V2.
Thanks.
>> Signed-off-by: Chunsong Feng <fengchunsong@huawei.com>
>> Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
>> Signed-off-by: Hao Chen <chenhao164@huawei.com>
>> Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
>> Signed-off-by: Huisong Li <lihuisong@huawei.com>
> <...>
>
>> @@ -2726,6 +2744,7 @@ static const struct eth_dev_ops hns3_eth_dev_ops = {
>>  	.mac_addr_set           = hns3_set_default_mac_addr,
>>  	.set_mc_addr_list       = hns3_set_mc_mac_addr_list,
>>  	.link_update            = hns3_dev_link_update,
>> +	.filter_ctrl            = hns3_dev_filter_ctrl,
> 'hns3_dev_filter_ctrl()' is not exists up until this patch.
>
> This is the problem of not enabling the driver yet, it is very hard to see these
> kind of issues. When Makefile/meson patch moved to the begging of the patches
> and start to build the driver, these issues will be visible.
I will fix it in patch V2.
>>  };
>>  
>>  static int
>> @@ -2739,6 +2758,16 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)
>>  	int ret;
>>  
>>  	PMD_INIT_FUNC_TRACE();
>> +	eth_dev->process_private = (struct hns3_process_private *)
>> +	    rte_zmalloc_socket("hns3_filter_list",
>> +			       sizeof(struct hns3_process_private),
>> +			       RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node);
>> +	if (eth_dev->process_private == NULL) {
>> +		PMD_INIT_LOG(ERR, "Failed to alloc memory for process private");
>> +		return -ENOMEM;
>> +	}
>> +	/* initialize flow filter lists */
>> +	hns3_filterlist_init(eth_dev);
> Can you please free 'process_private' in, close dev_ops?
    We will update it and send patch V2.

    Regards
Xavier
  
Wei Hu (Xavier) Sept. 6, 2019, 11:08 a.m. UTC | #3
Hi, Ferruh Yigit


On 2019/8/30 23:06, Ferruh Yigit wrote:
> On 8/23/2019 2:46 PM, Wei Hu (Xavier) wrote:
>> This patch adds support for flow directory of hns3 PMD driver.
>> Flow directory feature is only supported in hns3 PF driver.
>> It supports the network L2\L3\L4 and tunnel packet creation,
>> deletion, flushing, and querying hit statistics.
> This patch also adds rte_flow support, can you please add this into commit log?
We will update it in patch V2.
>> Signed-off-by: Chunsong Feng <fengchunsong@huawei.com>
>> Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
>> Signed-off-by: Hao Chen <chenhao164@huawei.com>
>> Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
>> Signed-off-by: Huisong Li <lihuisong@huawei.com>
> <...>
>
>> @@ -2726,6 +2744,7 @@ static const struct eth_dev_ops hns3_eth_dev_ops = {
>>  	.mac_addr_set           = hns3_set_default_mac_addr,
>>  	.set_mc_addr_list       = hns3_set_mc_mac_addr_list,
>>  	.link_update            = hns3_dev_link_update,
>> +	.filter_ctrl            = hns3_dev_filter_ctrl,
> 'hns3_dev_filter_ctrl()' is not exists up until this patch.
>
> This is the problem of not enabling the driver yet, it is very hard to see these
> kind of issues. When Makefile/meson patch moved to the begging of the patches
> and start to build the driver, these issues will be visible.
I will fix it in patch V2.
>>  };
>>  
>>  static int
>> @@ -2739,6 +2758,16 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)
>>  	int ret;
>>  
>>  	PMD_INIT_FUNC_TRACE();
>> +	eth_dev->process_private = (struct hns3_process_private *)
>> +	    rte_zmalloc_socket("hns3_filter_list",
>> +			       sizeof(struct hns3_process_private),
>> +			       RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node);
>> +	if (eth_dev->process_private == NULL) {
>> +		PMD_INIT_LOG(ERR, "Failed to alloc memory for process private");
>> +		return -ENOMEM;
>> +	}
>> +	/* initialize flow filter lists */
>> +	hns3_filterlist_init(eth_dev);
> Can you please free 'process_private' in, close dev_ops?
    We will fix it in patch V2.
    Thanks for your suggestion.

    Regards
Xavier
>
  

Patch

diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
index f272374..4fc282c 100644
--- a/drivers/net/hns3/hns3_cmd.c
+++ b/drivers/net/hns3/hns3_cmd.c
@@ -27,6 +27,7 @@ 
 #include <rte_io.h>
 
 #include "hns3_cmd.h"
+#include "hns3_fdir.h"
 #include "hns3_ethdev.h"
 #include "hns3_regs.h"
 #include "hns3_logs.h"
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index a162d7f..c1f9bcb 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -30,6 +30,7 @@ 
 #include <rte_pci.h>
 
 #include "hns3_cmd.h"
+#include "hns3_fdir.h"
 #include "hns3_ethdev.h"
 #include "hns3_logs.h"
 #include "hns3_regs.h"
@@ -2614,6 +2615,12 @@  hns3_init_hardware(struct hns3_adapter *hns)
 		goto err_mac_init;
 	}
 
+	ret = hns3_init_fd_config(hns);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init flow director: %d", ret);
+		goto err_mac_init;
+	}
+
 	ret = hns3_config_tso(hw, HNS3_TSO_MSS_MIN, HNS3_TSO_MSS_MAX);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Failed to config tso: %d", ret);
@@ -2675,8 +2682,18 @@  hns3_init_pf(struct rte_eth_dev *eth_dev)
 		goto err_get_config;
 	}
 
+	/* Initialize flow director filter list & hash */
+	ret = hns3_fdir_filter_init(hns);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to alloc hashmap for fdir: %d", ret);
+		goto err_hw_init;
+	}
+
 	return 0;
 
+err_hw_init:
+	hns3_uninit_umv_space(hw);
+
 err_get_config:
 	hns3_cmd_uninit(hw);
 
@@ -2699,6 +2716,7 @@  hns3_uninit_pf(struct rte_eth_dev *eth_dev)
 
 	PMD_INIT_FUNC_TRACE();
 
+	hns3_fdir_filter_uninit(hns);
 	hns3_uninit_umv_space(hw);
 	hns3_cmd_uninit(hw);
 	hns3_cmd_destroy_queue(hw);
@@ -2726,6 +2744,7 @@  static const struct eth_dev_ops hns3_eth_dev_ops = {
 	.mac_addr_set           = hns3_set_default_mac_addr,
 	.set_mc_addr_list       = hns3_set_mc_mac_addr_list,
 	.link_update            = hns3_dev_link_update,
+	.filter_ctrl            = hns3_dev_filter_ctrl,
 };
 
 static int
@@ -2739,6 +2758,16 @@  hns3_dev_init(struct rte_eth_dev *eth_dev)
 	int ret;
 
 	PMD_INIT_FUNC_TRACE();
+	eth_dev->process_private = (struct hns3_process_private *)
+	    rte_zmalloc_socket("hns3_filter_list",
+			       sizeof(struct hns3_process_private),
+			       RTE_CACHE_LINE_SIZE, eth_dev->device->numa_node);
+	if (eth_dev->process_private == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to alloc memory for process private");
+		return -ENOMEM;
+	}
+	/* initialize flow filter lists */
+	hns3_filterlist_init(eth_dev);
 
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return 0;
@@ -2788,6 +2817,8 @@  hns3_dev_init(struct rte_eth_dev *eth_dev)
 
 err_init_pf:
 	eth_dev->dev_ops = NULL;
+	rte_free(eth_dev->process_private);
+	eth_dev->process_private = NULL;
 	return ret;
 }
 
@@ -2808,6 +2839,8 @@  hns3_dev_uninit(struct rte_eth_dev *eth_dev)
 	if (hw->adapter_state < HNS3_NIC_CLOSING)
 		hns3_dev_close(eth_dev);
 
+	rte_free(eth_dev->process_private);
+	eth_dev->process_private = NULL;
 	hw->adapter_state = HNS3_NIC_REMOVED;
 	return 0;
 }
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index d5f62fe..c46211a 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -463,6 +463,9 @@  struct hns3_pf {
 	struct hns3_vtag_cfg vtag_config;
 	struct hns3_port_base_vlan_config port_base_vlan_cfg;
 	LIST_HEAD(vlan_tbl, hns3_user_vlan_table) vlan_list;
+
+	struct hns3_fdir_info fdir; /* flow director info */
+	LIST_HEAD(counters, hns3_flow_counter) flow_counters;
 };
 
 struct hns3_vf {
diff --git a/drivers/net/hns3/hns3_fdir.c b/drivers/net/hns3/hns3_fdir.c
new file mode 100644
index 0000000..aa7d968
--- /dev/null
+++ b/drivers/net/hns3/hns3_fdir.c
@@ -0,0 +1,1062 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018-2019 Hisilicon Limited.
+ */
+
+#include <stdbool.h>
+#include <sys/queue.h>
+#include <rte_ethdev_driver.h>
+#include <rte_hash.h>
+#include <rte_hash_crc.h>
+#include <rte_io.h>
+#include <rte_malloc.h>
+
+#include "hns3_cmd.h"
+#include "hns3_fdir.h"
+#include "hns3_ethdev.h"
+#include "hns3_logs.h"
+
+#define HNS3_VLAN_TAG_TYPE_NONE		0
+#define HNS3_VLAN_TAG_TYPE_TAG2		1
+#define HNS3_VLAN_TAG_TYPE_TAG1		2
+#define HNS3_VLAN_TAG_TYPE_TAG1_2	3
+
+#define HNS3_PF_ID_S			0
+#define HNS3_PF_ID_M			GENMASK(2, 0)
+#define HNS3_VF_ID_S			3
+#define HNS3_VF_ID_M			GENMASK(10, 3)
+#define HNS3_PORT_TYPE_B		11
+#define HNS3_NETWORK_PORT_ID_S		0
+#define HNS3_NETWORK_PORT_ID_M		GENMASK(3, 0)
+
+#define HNS3_FD_EPORT_SW_EN_B		0
+
+#define HNS3_FD_AD_DATA_S		32
+#define HNS3_FD_AD_DROP_B		0
+#define HNS3_FD_AD_DIRECT_QID_B	1
+#define HNS3_FD_AD_QID_S		2
+#define HNS3_FD_AD_QID_M		GENMASK(12, 2)
+#define HNS3_FD_AD_USE_COUNTER_B	12
+#define HNS3_FD_AD_COUNTER_NUM_S	13
+#define HNS3_FD_AD_COUNTER_NUM_M	GENMASK(20, 13)
+#define HNS3_FD_AD_NXT_STEP_B		20
+#define HNS3_FD_AD_NXT_KEY_S		21
+#define HNS3_FD_AD_NXT_KEY_M		GENMASK(26, 21)
+#define HNS3_FD_AD_WR_RULE_ID_B	0
+#define HNS3_FD_AD_RULE_ID_S		1
+#define HNS3_FD_AD_RULE_ID_M		GENMASK(13, 1)
+
+enum HNS3_PORT_TYPE {
+	HOST_PORT,
+	NETWORK_PORT
+};
+
+enum HNS3_FD_MODE {
+	HNS3_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1,
+	HNS3_FD_MODE_DEPTH_1K_WIDTH_400B_STAGE_2,
+	HNS3_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1,
+	HNS3_FD_MODE_DEPTH_2K_WIDTH_200B_STAGE_2,
+};
+
+enum HNS3_FD_KEY_TYPE {
+	HNS3_FD_KEY_BASE_ON_PTYPE,
+	HNS3_FD_KEY_BASE_ON_TUPLE,
+};
+
+enum HNS3_FD_META_DATA {
+	PACKET_TYPE_ID,
+	IP_FRAGEMENT,
+	ROCE_TYPE,
+	NEXT_KEY,
+	VLAN_NUMBER,
+	SRC_VPORT,
+	DST_VPORT,
+	TUNNEL_PACKET,
+	MAX_META_DATA,
+};
+
+struct key_info {
+	uint8_t key_type;
+	uint8_t key_length;
+};
+
+static const struct key_info meta_data_key_info[] = {
+	{PACKET_TYPE_ID, 6},
+	{IP_FRAGEMENT, 1},
+	{ROCE_TYPE, 1},
+	{NEXT_KEY, 5},
+	{VLAN_NUMBER, 2},
+	{SRC_VPORT, 12},
+	{DST_VPORT, 12},
+	{TUNNEL_PACKET, 1},
+};
+
+static const struct key_info tuple_key_info[] = {
+	{OUTER_DST_MAC, 48},
+	{OUTER_SRC_MAC, 48},
+	{OUTER_VLAN_TAG_FST, 16},
+	{OUTER_VLAN_TAG_SEC, 16},
+	{OUTER_ETH_TYPE, 16},
+	{OUTER_L2_RSV, 16},
+	{OUTER_IP_TOS, 8},
+	{OUTER_IP_PROTO, 8},
+	{OUTER_SRC_IP, 32},
+	{OUTER_DST_IP, 32},
+	{OUTER_L3_RSV, 16},
+	{OUTER_SRC_PORT, 16},
+	{OUTER_DST_PORT, 16},
+	{OUTER_L4_RSV, 32},
+	{OUTER_TUN_VNI, 24},
+	{OUTER_TUN_FLOW_ID, 8},
+	{INNER_DST_MAC, 48},
+	{INNER_SRC_MAC, 48},
+	{INNER_VLAN_TAG1, 16},
+	{INNER_VLAN_TAG2, 16},
+	{INNER_ETH_TYPE, 16},
+	{INNER_L2_RSV, 16},
+	{INNER_IP_TOS, 8},
+	{INNER_IP_PROTO, 8},
+	{INNER_SRC_IP, 32},
+	{INNER_DST_IP, 32},
+	{INNER_L3_RSV, 16},
+	{INNER_SRC_PORT, 16},
+	{INNER_DST_PORT, 16},
+	{INNER_SCTP_TAG, 32},
+};
+
+#define HNS3_BITS_PER_BYTE	8
+#define MAX_KEY_LENGTH		400
+#define MAX_200B_KEY_LENGTH	200
+#define MAX_META_DATA_LENGTH	16
+#define MAX_KEY_DWORDS	DIV_ROUND_UP(MAX_KEY_LENGTH / HNS3_BITS_PER_BYTE, 4)
+#define MAX_KEY_BYTES	(MAX_KEY_DWORDS * 4)
+
+enum HNS3_FD_PACKET_TYPE {
+	NIC_PACKET,
+	ROCE_PACKET,
+};
+
+/* For each bit of TCAM entry, it uses a pair of 'x' and
+ * 'y' to indicate which value to match, like below:
+ * ----------------------------------
+ * | bit x | bit y |  search value  |
+ * ----------------------------------
+ * |   0   |   0   |   always hit   |
+ * ----------------------------------
+ * |   1   |   0   |   match '0'    |
+ * ----------------------------------
+ * |   0   |   1   |   match '1'    |
+ * ----------------------------------
+ * |   1   |   1   |   invalid      |
+ * ----------------------------------
+ * Then for input key(k) and mask(v), we can calculate the value by
+ * the formulae:
+ *	x = (~k) & v
+ *	y = k & v
+ */
+#define calc_x(x, k, v) ((x) = (~(k) & (v)))
+#define calc_y(y, k, v) ((y) = ((k) & (v)))
+
+struct hns3_fd_tcam_config_1_cmd {
+	uint8_t stage;
+	uint8_t xy_sel;
+	uint8_t port_info;
+	uint8_t rsv1[1];
+	rte_le32_t index;
+	uint8_t entry_vld;
+	uint8_t rsv2[7];
+	uint8_t tcam_data[8];
+};
+
+struct hns3_fd_tcam_config_2_cmd {
+	uint8_t tcam_data[24];
+};
+
+struct hns3_fd_tcam_config_3_cmd {
+	uint8_t tcam_data[20];
+	uint8_t rsv[4];
+};
+
+struct hns3_get_fd_mode_cmd {
+	uint8_t mode;
+	uint8_t enable;
+	uint8_t rsv[22];
+};
+
+struct hns3_get_fd_allocation_cmd {
+	rte_le32_t stage1_entry_num;
+	rte_le32_t stage2_entry_num;
+	rte_le16_t stage1_counter_num;
+	rte_le16_t stage2_counter_num;
+	uint8_t rsv[12];
+};
+
+struct hns3_set_fd_key_config_cmd {
+	uint8_t stage;
+	uint8_t key_select;
+	uint8_t inner_sipv6_word_en;
+	uint8_t inner_dipv6_word_en;
+	uint8_t outer_sipv6_word_en;
+	uint8_t outer_dipv6_word_en;
+	uint8_t rsv1[2];
+	rte_le32_t tuple_mask;
+	rte_le32_t meta_data_mask;
+	uint8_t rsv2[8];
+};
+
+struct hns3_fd_ad_config_cmd {
+	uint8_t stage;
+	uint8_t rsv1[3];
+	rte_le32_t index;
+	rte_le64_t ad_data;
+	uint8_t rsv2[8];
+};
+
+struct hns3_fd_get_cnt_cmd {
+	uint8_t stage;
+	uint8_t rsv1[3];
+	rte_le16_t index;
+	uint8_t rsv2[2];
+	rte_le64_t value;
+	uint8_t rsv3[8];
+};
+
+static int hns3_get_fd_mode(struct hns3_hw *hw, uint8_t *fd_mode)
+{
+	struct hns3_get_fd_mode_cmd *req;
+	struct hns3_cmd_desc desc;
+	int ret;
+
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FD_MODE_CTRL, true);
+
+	req = (struct hns3_get_fd_mode_cmd *)desc.data;
+
+	ret = hns3_cmd_send(hw, &desc, 1);
+	if (ret) {
+		hns3_err(hw, "Get fd mode fail, ret=%d", ret);
+		return ret;
+	}
+
+	*fd_mode = req->mode;
+
+	return ret;
+}
+
+static int hns3_get_fd_allocation(struct hns3_hw *hw,
+				  uint32_t *stage1_entry_num,
+				  uint32_t *stage2_entry_num,
+				  uint16_t *stage1_counter_num,
+				  uint16_t *stage2_counter_num)
+{
+	struct hns3_get_fd_allocation_cmd *req;
+	struct hns3_cmd_desc desc;
+	int ret;
+
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FD_GET_ALLOCATION, true);
+
+	req = (struct hns3_get_fd_allocation_cmd *)desc.data;
+
+	ret = hns3_cmd_send(hw, &desc, 1);
+	if (ret) {
+		hns3_err(hw, "Query fd allocation fail, ret=%d", ret);
+		return ret;
+	}
+
+	*stage1_entry_num = rte_le_to_cpu_32(req->stage1_entry_num);
+	*stage2_entry_num = rte_le_to_cpu_32(req->stage2_entry_num);
+	*stage1_counter_num = rte_le_to_cpu_16(req->stage1_counter_num);
+	*stage2_counter_num = rte_le_to_cpu_16(req->stage2_counter_num);
+
+	return ret;
+}
+
+static int hns3_set_fd_key_config(struct hns3_adapter *hns)
+{
+	struct hns3_set_fd_key_config_cmd *req;
+	struct hns3_fd_key_cfg *key_cfg;
+	struct hns3_pf *pf = &hns->pf;
+	struct hns3_hw *hw = &hns->hw;
+	struct hns3_cmd_desc desc;
+	int ret;
+
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FD_KEY_CONFIG, false);
+
+	req = (struct hns3_set_fd_key_config_cmd *)desc.data;
+	key_cfg = &pf->fdir.fd_cfg.key_cfg[HNS3_FD_STAGE_1];
+	req->stage = HNS3_FD_STAGE_1;
+	req->key_select = key_cfg->key_sel;
+	req->inner_sipv6_word_en = key_cfg->inner_sipv6_word_en;
+	req->inner_dipv6_word_en = key_cfg->inner_dipv6_word_en;
+	req->outer_sipv6_word_en = key_cfg->outer_sipv6_word_en;
+	req->outer_dipv6_word_en = key_cfg->outer_dipv6_word_en;
+	req->tuple_mask = rte_cpu_to_le_32(~key_cfg->tuple_active);
+	req->meta_data_mask = rte_cpu_to_le_32(~key_cfg->meta_data_active);
+
+	ret = hns3_cmd_send(hw, &desc, 1);
+	if (ret)
+		hns3_err(hw, "Set fd key fail, ret=%d", ret);
+
+	return ret;
+}
+
+int hns3_init_fd_config(struct hns3_adapter *hns)
+{
+	struct hns3_pf *pf = &hns->pf;
+	struct hns3_hw *hw = &hns->hw;
+	struct hns3_fd_key_cfg *key_cfg;
+	int ret;
+
+	ret = hns3_get_fd_mode(hw, &pf->fdir.fd_cfg.fd_mode);
+	if (ret)
+		return ret;
+
+	switch (pf->fdir.fd_cfg.fd_mode) {
+	case HNS3_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
+		pf->fdir.fd_cfg.max_key_length = MAX_KEY_LENGTH;
+		break;
+	case HNS3_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
+		pf->fdir.fd_cfg.max_key_length = MAX_200B_KEY_LENGTH;
+		hns3_warn(hw, "Unsupported tunnel filter in 4K*200Bit");
+		break;
+	default:
+		hns3_err(hw, "Unsupported flow director mode %d",
+			    pf->fdir.fd_cfg.fd_mode);
+		return -EOPNOTSUPP;
+	}
+
+	key_cfg = &pf->fdir.fd_cfg.key_cfg[HNS3_FD_STAGE_1];
+	key_cfg->key_sel = HNS3_FD_KEY_BASE_ON_TUPLE;
+	key_cfg->inner_sipv6_word_en = IPV6_ADDR_WORD_MASK;
+	key_cfg->inner_dipv6_word_en = IPV6_ADDR_WORD_MASK;
+	key_cfg->outer_sipv6_word_en = 0;
+	key_cfg->outer_dipv6_word_en = 0;
+
+	key_cfg->tuple_active = BIT(INNER_VLAN_TAG1) | BIT(INNER_ETH_TYPE) |
+	    BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
+	    BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
+	    BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
+
+	/* If use max 400bit key, we can support tuples for ether type */
+	if (pf->fdir.fd_cfg.max_key_length == MAX_KEY_LENGTH) {
+		key_cfg->tuple_active |=
+		    BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC) |
+		    BIT(OUTER_SRC_PORT) | BIT(INNER_SCTP_TAG) |
+		    BIT(OUTER_DST_PORT) | BIT(INNER_VLAN_TAG2) |
+		    BIT(OUTER_TUN_VNI) | BIT(OUTER_TUN_FLOW_ID) |
+		    BIT(OUTER_ETH_TYPE) | BIT(OUTER_IP_PROTO);
+	}
+
+	/* roce_type is used to filter roce frames
+	 * dst_vport is used to specify the rule
+	 */
+	key_cfg->meta_data_active = BIT(DST_VPORT) | BIT(TUNNEL_PACKET) |
+	    BIT(VLAN_NUMBER);
+
+	ret = hns3_get_fd_allocation(hw,
+				     &pf->fdir.fd_cfg.rule_num[HNS3_FD_STAGE_1],
+				     &pf->fdir.fd_cfg.rule_num[HNS3_FD_STAGE_2],
+				     &pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1],
+				     &pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_2]);
+	if (ret)
+		return ret;
+
+	return hns3_set_fd_key_config(hns);
+}
+
+static int hns3_fd_tcam_config(struct hns3_hw *hw, bool sel_x, int loc,
+			       uint8_t *key, bool is_add)
+{
+#define	FD_TCAM_CMD_NUM 3
+	struct hns3_fd_tcam_config_1_cmd *req1;
+	struct hns3_fd_tcam_config_2_cmd *req2;
+	struct hns3_fd_tcam_config_3_cmd *req3;
+	struct hns3_cmd_desc desc[FD_TCAM_CMD_NUM];
+	int len;
+	int ret;
+
+	hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_FD_TCAM_OP, false);
+	desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
+	hns3_cmd_setup_basic_desc(&desc[1], HNS3_OPC_FD_TCAM_OP, false);
+	desc[1].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
+	hns3_cmd_setup_basic_desc(&desc[2], HNS3_OPC_FD_TCAM_OP, false);
+
+	req1 = (struct hns3_fd_tcam_config_1_cmd *)desc[0].data;
+	req2 = (struct hns3_fd_tcam_config_2_cmd *)desc[1].data;
+	req3 = (struct hns3_fd_tcam_config_3_cmd *)desc[2].data;
+
+	req1->stage = HNS3_FD_STAGE_1;
+	req1->xy_sel = sel_x ? 1 : 0;
+	hns3_set_bit(req1->port_info, HNS3_FD_EPORT_SW_EN_B, 0);
+	req1->index = rte_cpu_to_le_32(loc);
+	req1->entry_vld = sel_x ? is_add : 0;
+
+	if (key) {
+		len = sizeof(req1->tcam_data);
+		memcpy(req1->tcam_data, key, len);
+		key += len;
+
+		len = sizeof(req2->tcam_data);
+		memcpy(req2->tcam_data, key, len);
+		key += len;
+
+		len = sizeof(req3->tcam_data);
+		memcpy(req3->tcam_data, key, len);
+	}
+
+	ret = hns3_cmd_send(hw, desc, FD_TCAM_CMD_NUM);
+	if (ret)
+		hns3_err(hw, "Config tcam key fail, ret=%d loc=%d add=%d",
+			    ret, loc, is_add);
+	return ret;
+}
+
+static int hns3_fd_ad_config(struct hns3_hw *hw, int loc,
+			     struct hns3_fd_ad_data *action)
+{
+	struct hns3_fd_ad_config_cmd *req;
+	struct hns3_cmd_desc desc;
+	uint64_t ad_data = 0;
+	int ret;
+
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FD_AD_OP, false);
+
+	req = (struct hns3_fd_ad_config_cmd *)desc.data;
+	req->index = rte_cpu_to_le_32(loc);
+	req->stage = HNS3_FD_STAGE_1;
+
+	hns3_set_bit(ad_data, HNS3_FD_AD_WR_RULE_ID_B,
+		     action->write_rule_id_to_bd);
+	hns3_set_field(ad_data, HNS3_FD_AD_RULE_ID_M, HNS3_FD_AD_RULE_ID_S,
+		       action->rule_id);
+	ad_data <<= HNS3_FD_AD_DATA_S;
+	hns3_set_bit(ad_data, HNS3_FD_AD_DROP_B, action->drop_packet);
+	hns3_set_bit(ad_data, HNS3_FD_AD_DIRECT_QID_B,
+		     action->forward_to_direct_queue);
+	hns3_set_field(ad_data, HNS3_FD_AD_QID_M, HNS3_FD_AD_QID_S,
+		       action->queue_id);
+	hns3_set_bit(ad_data, HNS3_FD_AD_USE_COUNTER_B, action->use_counter);
+	hns3_set_field(ad_data, HNS3_FD_AD_COUNTER_NUM_M,
+		       HNS3_FD_AD_COUNTER_NUM_S, action->counter_id);
+	hns3_set_bit(ad_data, HNS3_FD_AD_NXT_STEP_B, action->use_next_stage);
+	hns3_set_field(ad_data, HNS3_FD_AD_NXT_KEY_M, HNS3_FD_AD_NXT_KEY_S,
+		       action->counter_id);
+
+	req->ad_data = rte_cpu_to_le_64(ad_data);
+	ret = hns3_cmd_send(hw, &desc, 1);
+	if (ret)
+		hns3_err(hw, "Config fd ad fail, ret=%d loc=%d", ret, loc);
+
+	return ret;
+}
+
+static inline void hns3_fd_convert_mac(uint8_t *key, uint8_t *mask,
+				       uint8_t *mac_x, uint8_t *mac_y)
+{
+	uint8_t tmp;
+	int i;
+
+	for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
+		tmp = RTE_ETHER_ADDR_LEN - 1 - i;
+		calc_x(mac_x[tmp], key[i], mask[i]);
+		calc_y(mac_y[tmp], key[i], mask[i]);
+	}
+}
+
+static void hns3_fd_convert_int16(uint32_t tuple, struct hns3_fdir_rule *rule,
+				  uint8_t *val_x, uint8_t *val_y)
+{
+	uint16_t tmp_x_s;
+	uint16_t tmp_y_s;
+	uint16_t mask;
+	uint16_t key;
+
+	switch (tuple) {
+	case OUTER_SRC_PORT:
+		key = rule->key_conf.spec.outer_src_port;
+		mask = rule->key_conf.mask.outer_src_port;
+		break;
+	case OUTER_DST_PORT:
+		key = rule->key_conf.spec.tunnel_type;
+		mask = rule->key_conf.mask.tunnel_type;
+		break;
+	case OUTER_ETH_TYPE:
+		key = rule->key_conf.spec.outer_ether_type;
+		mask = rule->key_conf.mask.outer_ether_type;
+		break;
+	case INNER_SRC_PORT:
+		key = rule->key_conf.spec.src_port;
+		mask = rule->key_conf.mask.src_port;
+		break;
+	case INNER_DST_PORT:
+		key = rule->key_conf.spec.dst_port;
+		mask = rule->key_conf.mask.dst_port;
+		break;
+	case INNER_VLAN_TAG1:
+		key = rule->key_conf.spec.vlan_tag1;
+		mask = rule->key_conf.mask.vlan_tag1;
+		break;
+	case INNER_VLAN_TAG2:
+		key = rule->key_conf.spec.vlan_tag2;
+		mask = rule->key_conf.mask.vlan_tag2;
+		break;
+	default:
+		/*  INNER_ETH_TYPE: */
+		key = rule->key_conf.spec.ether_type;
+		mask = rule->key_conf.mask.ether_type;
+		break;
+	}
+	calc_x(tmp_x_s, key, mask);
+	calc_y(tmp_y_s, key, mask);
+	val_x[0] = rte_cpu_to_le_16(tmp_x_s) & 0xFF;
+	val_x[1] = rte_cpu_to_le_16(tmp_x_s) >> HNS3_BITS_PER_BYTE;
+	val_y[0] = rte_cpu_to_le_16(tmp_y_s) & 0xFF;
+	val_y[1] = rte_cpu_to_le_16(tmp_y_s) >> HNS3_BITS_PER_BYTE;
+}
+
+static inline void hns3_fd_convert_int32(uint32_t key, uint32_t mask,
+					 uint8_t *val_x, uint8_t *val_y)
+{
+	uint32_t tmp_x_l;
+	uint32_t tmp_y_l;
+
+	calc_x(tmp_x_l, key, mask);
+	calc_y(tmp_y_l, key, mask);
+	memcpy(val_x, &tmp_x_l, sizeof(tmp_x_l));
+	memcpy(val_y, &tmp_y_l, sizeof(tmp_y_l));
+}
+
+static bool hns3_fd_convert_tuple(uint32_t tuple, uint8_t *key_x,
+				  uint8_t *key_y, struct hns3_fdir_rule *rule)
+{
+	struct hns3_fdir_key_conf *key_conf;
+	int tmp;
+	int i;
+
+	if ((rule->input_set & BIT(tuple)) == 0)
+		return true;
+
+	key_conf = &rule->key_conf;
+	switch (tuple) {
+	case INNER_DST_MAC:
+		hns3_fd_convert_mac(key_conf->spec.dst_mac,
+				    key_conf->mask.dst_mac, key_x, key_y);
+		break;
+	case INNER_SRC_MAC:
+		hns3_fd_convert_mac(key_conf->spec.src_mac,
+				    key_conf->mask.src_mac, key_x, key_y);
+		break;
+	case OUTER_SRC_PORT:
+	case OUTER_DST_PORT:
+	case OUTER_ETH_TYPE:
+	case INNER_SRC_PORT:
+	case INNER_DST_PORT:
+	case INNER_VLAN_TAG1:
+	case INNER_VLAN_TAG2:
+	case INNER_ETH_TYPE:
+		hns3_fd_convert_int16(tuple, rule, key_x, key_y);
+		break;
+	case INNER_SRC_IP:
+		hns3_fd_convert_int32(key_conf->spec.src_ip[IP_ADDR_KEY_ID],
+				      key_conf->mask.src_ip[IP_ADDR_KEY_ID],
+				      key_x, key_y);
+		break;
+	case INNER_DST_IP:
+		hns3_fd_convert_int32(key_conf->spec.dst_ip[IP_ADDR_KEY_ID],
+				      key_conf->mask.dst_ip[IP_ADDR_KEY_ID],
+				      key_x, key_y);
+		break;
+	case INNER_SCTP_TAG:
+		hns3_fd_convert_int32(key_conf->spec.sctp_tag,
+				      key_conf->mask.sctp_tag, key_x, key_y);
+		break;
+	case OUTER_TUN_VNI:
+		for (i = 0; i < VNI_OR_TNI_LEN; i++) {
+			tmp = VNI_OR_TNI_LEN - 1 - i;
+			calc_x(key_x[tmp],
+			       key_conf->spec.outer_tun_vni[i],
+			       key_conf->mask.outer_tun_vni[i]);
+			calc_y(key_y[tmp],
+			       key_conf->spec.outer_tun_vni[i],
+			       key_conf->mask.outer_tun_vni[i]);
+		}
+		break;
+	case OUTER_TUN_FLOW_ID:
+		calc_x(*key_x, key_conf->spec.outer_tun_flow_id,
+		       key_conf->mask.outer_tun_flow_id);
+		calc_y(*key_y, key_conf->spec.outer_tun_flow_id,
+		       key_conf->mask.outer_tun_flow_id);
+		break;
+	case INNER_IP_TOS:
+		calc_x(*key_x, key_conf->spec.ip_tos, key_conf->mask.ip_tos);
+		calc_y(*key_y, key_conf->spec.ip_tos, key_conf->mask.ip_tos);
+		break;
+	case OUTER_IP_PROTO:
+		calc_x(*key_x, key_conf->spec.outer_proto,
+		       key_conf->mask.outer_proto);
+		calc_y(*key_y, key_conf->spec.outer_proto,
+		       key_conf->mask.outer_proto);
+		break;
+	case INNER_IP_PROTO:
+		calc_x(*key_x, key_conf->spec.ip_proto,
+		       key_conf->mask.ip_proto);
+		calc_y(*key_y, key_conf->spec.ip_proto,
+		       key_conf->mask.ip_proto);
+		break;
+	}
+	return true;
+}
+
+static uint32_t hns3_get_port_number(uint8_t pf_id, uint8_t vf_id)
+{
+	uint32_t port_number = 0;
+
+	hns3_set_field(port_number, HNS3_PF_ID_M, HNS3_PF_ID_S, pf_id);
+	hns3_set_field(port_number, HNS3_VF_ID_M, HNS3_VF_ID_S, vf_id);
+	hns3_set_bit(port_number, HNS3_PORT_TYPE_B, HOST_PORT);
+
+	return port_number;
+}
+
+static void hns3_fd_convert_meta_data(struct hns3_fd_key_cfg *cfg,
+				      uint8_t vf_id,
+				      struct hns3_fdir_rule *rule,
+				      uint8_t *key_x, uint8_t *key_y)
+{
+	uint16_t meta_data = 0;
+	uint16_t port_number;
+	uint8_t cur_pos = 0;
+	uint8_t tuple_size;
+	uint8_t shift_bits;
+	uint32_t tmp_x;
+	uint32_t tmp_y;
+	uint8_t i;
+
+	for (i = 0; i < MAX_META_DATA; i++) {
+		if ((cfg->meta_data_active & BIT(i)) == 0)
+			continue;
+
+		tuple_size = meta_data_key_info[i].key_length;
+		if (i == TUNNEL_PACKET) {
+			hns3_set_bit(meta_data, cur_pos,
+				     rule->key_conf.spec.tunnel_type ? 1 : 0);
+			cur_pos += tuple_size;
+		} else if (i == VLAN_NUMBER) {
+			uint8_t vlan_tag;
+			uint8_t vlan_num;
+			if (rule->key_conf.spec.tunnel_type == 0)
+				vlan_num = rule->key_conf.vlan_num;
+			else
+				vlan_num = rule->key_conf.outer_vlan_num;
+			if (vlan_num == 1)
+				vlan_tag = HNS3_VLAN_TAG_TYPE_TAG1;
+			else if (vlan_num == VLAN_TAG_NUM_MAX)
+				vlan_tag = HNS3_VLAN_TAG_TYPE_TAG1_2;
+			else
+				vlan_tag = HNS3_VLAN_TAG_TYPE_NONE;
+			hns3_set_field(meta_data,
+				       GENMASK(cur_pos + tuple_size,
+					       cur_pos), cur_pos, vlan_tag);
+			cur_pos += tuple_size;
+		} else if (i == DST_VPORT) {
+			port_number = hns3_get_port_number(0, vf_id);
+			hns3_set_field(meta_data,
+				       GENMASK(cur_pos + tuple_size, cur_pos),
+				       cur_pos, port_number);
+			cur_pos += tuple_size;
+		}
+	}
+
+	calc_x(tmp_x, meta_data, 0xFFFF);
+	calc_y(tmp_y, meta_data, 0xFFFF);
+	shift_bits = sizeof(meta_data) * HNS3_BITS_PER_BYTE - cur_pos;
+
+	tmp_x = rte_cpu_to_le_32(tmp_x << shift_bits);
+	tmp_y = rte_cpu_to_le_32(tmp_y << shift_bits);
+	key_x[0] = tmp_x & 0xFF;
+	key_x[1] = (tmp_x >> HNS3_BITS_PER_BYTE) & 0xFF;
+	key_y[0] = tmp_y & 0xFF;
+	key_y[1] = (tmp_y >> HNS3_BITS_PER_BYTE) & 0xFF;
+}
+
+/* A complete key is combined with meta data key and tuple key.
+ * Meta data key is stored at the MSB region, and tuple key is stored at
+ * the LSB region, unused bits will be filled 0.
+ */
+static int hns3_config_key(struct hns3_adapter *hns,
+			   struct hns3_fdir_rule *rule)
+{
+	struct hns3_pf *pf = &hns->pf;
+	struct hns3_hw *hw = &hns->hw;
+	struct hns3_fd_key_cfg *key_cfg;
+	uint8_t *cur_key_x;
+	uint8_t *cur_key_y;
+	uint8_t key_x[MAX_KEY_BYTES] __attribute__((aligned(4)));
+	uint8_t key_y[MAX_KEY_BYTES] __attribute__((aligned(4)));
+	uint8_t vf_id = rule->vf_id;
+	uint8_t meta_data_region;
+	uint8_t tuple_size;
+	uint8_t i;
+	int ret;
+
+	memset(key_x, 0, sizeof(key_x));
+	memset(key_y, 0, sizeof(key_y));
+	cur_key_x = key_x;
+	cur_key_y = key_y;
+
+	key_cfg = &pf->fdir.fd_cfg.key_cfg[HNS3_FD_STAGE_1];
+	for (i = 0; i < MAX_TUPLE; i++) {
+		bool tuple_valid;
+
+		tuple_size = tuple_key_info[i].key_length / HNS3_BITS_PER_BYTE;
+		if (key_cfg->tuple_active & BIT(i)) {
+			tuple_valid = hns3_fd_convert_tuple(i, cur_key_x,
+							    cur_key_y, rule);
+			if (tuple_valid) {
+				cur_key_x += tuple_size;
+				cur_key_y += tuple_size;
+			}
+		}
+	}
+
+	meta_data_region = pf->fdir.fd_cfg.max_key_length / HNS3_BITS_PER_BYTE -
+	    MAX_META_DATA_LENGTH / HNS3_BITS_PER_BYTE;
+
+	hns3_fd_convert_meta_data(key_cfg, vf_id, rule,
+				  key_x + meta_data_region,
+				  key_y + meta_data_region);
+
+	ret = hns3_fd_tcam_config(hw, false, rule->location, key_y, true);
+	if (ret) {
+		hns3_err(hw, "Config fd key_y fail, loc=%d, ret=%d",
+			    rule->queue_id, ret);
+		return ret;
+	}
+
+	ret = hns3_fd_tcam_config(hw, true, rule->location, key_x, true);
+	if (ret)
+		hns3_err(hw, "Config fd key_x fail, loc=%d, ret=%d",
+			    rule->queue_id, ret);
+	return ret;
+}
+
+static int hns3_config_action(struct hns3_hw *hw, struct hns3_fdir_rule *rule)
+{
+	struct hns3_fd_ad_data ad_data;
+
+	ad_data.ad_id = rule->location;
+
+	if (rule->action == HNS3_FD_ACTION_DROP_PACKET) {
+		ad_data.drop_packet = true;
+		ad_data.forward_to_direct_queue = false;
+		ad_data.queue_id = 0;
+	} else {
+		ad_data.drop_packet = false;
+		ad_data.forward_to_direct_queue = true;
+		ad_data.queue_id = rule->queue_id;
+	}
+
+	if (unlikely(rule->flags & HNS3_RULE_FLAG_COUNTER)) {
+		ad_data.use_counter = true;
+		ad_data.counter_id = rule->act_cnt.id;
+	} else {
+		ad_data.use_counter = false;
+		ad_data.counter_id = 0;
+	}
+
+	if (unlikely(rule->flags & HNS3_RULE_FLAG_FDID))
+		ad_data.rule_id = rule->fd_id;
+	else
+		ad_data.rule_id = rule->location;
+
+	ad_data.use_next_stage = false;
+	ad_data.next_input_key = 0;
+
+	ad_data.write_rule_id_to_bd = true;
+
+	return hns3_fd_ad_config(hw, ad_data.ad_id, &ad_data);
+}
+
+int hns3_fdir_filter_init(struct hns3_adapter *hns)
+{
+	struct hns3_pf *pf = &hns->pf;
+	struct hns3_fdir_info *fdir_info = &pf->fdir;
+	uint32_t rule_num = fdir_info->fd_cfg.rule_num[HNS3_FD_STAGE_1];
+	char fdir_hash_name[RTE_HASH_NAMESIZE];
+	struct rte_hash_parameters fdir_hash_params = {
+		.name = fdir_hash_name,
+		.entries = rule_num,
+		.key_len = sizeof(struct hns3_fdir_key_conf),
+		.hash_func = rte_hash_crc,
+		.hash_func_init_val = 0,
+	};
+
+	fdir_hash_params.socket_id = rte_socket_id();
+	TAILQ_INIT(&fdir_info->fdir_list);
+	rte_spinlock_init(&fdir_info->flows_lock);
+	snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, "%s", hns->hw.data->name);
+	fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
+	if (fdir_info->hash_handle == NULL) {
+		PMD_INIT_LOG(ERR, "Create FDIR hash handle fail!");
+		return -EINVAL;
+	}
+	fdir_info->hash_map = rte_zmalloc("hns3 FDIR hash",
+					  rule_num *
+					  sizeof(struct hns3_fdir_rule_ele *),
+					  0);
+	if (fdir_info->hash_map == NULL) {
+		PMD_INIT_LOG(ERR, "Allocate memory for FDIR hash map fail!");
+		rte_hash_free(fdir_info->hash_handle);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+void hns3_fdir_filter_uninit(struct hns3_adapter *hns)
+{
+	struct hns3_pf *pf = &hns->pf;
+	struct hns3_fdir_info *fdir_info = &pf->fdir;
+	struct hns3_fdir_rule_ele *fdir_filter;
+
+	rte_spinlock_lock(&fdir_info->flows_lock);
+	if (fdir_info->hash_map) {
+		rte_free(fdir_info->hash_map);
+		fdir_info->hash_map = NULL;
+	}
+	if (fdir_info->hash_handle) {
+		rte_hash_free(fdir_info->hash_handle);
+		fdir_info->hash_handle = NULL;
+	}
+	rte_spinlock_unlock(&fdir_info->flows_lock);
+
+	fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list);
+	while (fdir_filter) {
+		TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries);
+		hns3_fd_tcam_config(&hns->hw, true,
+				    fdir_filter->fdir_conf.location, NULL,
+				    false);
+		rte_free(fdir_filter);
+		fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list);
+	}
+}
+
+/*
+ * Find a key in the hash table.
+ * @return
+ *   - Zero and positive values are key location.
+ *   - -EINVAL if the parameters are invalid.
+ *   - -ENOENT if the key is not found.
+ */
+static int hns3_fdir_filter_lookup(struct hns3_fdir_info *fdir_info,
+				    struct hns3_fdir_key_conf *key)
+{
+	hash_sig_t sig;
+	int ret;
+
+	rte_spinlock_lock(&fdir_info->flows_lock);
+	sig = rte_hash_crc(key, sizeof(*key), 0);
+	ret = rte_hash_lookup_with_hash(fdir_info->hash_handle, key, sig);
+	rte_spinlock_unlock(&fdir_info->flows_lock);
+
+	return ret;
+}
+
+static int hns3_insert_fdir_filter(struct hns3_hw *hw,
+				   struct hns3_fdir_info *fdir_info,
+				   struct hns3_fdir_rule_ele *fdir_filter)
+{
+	struct hns3_fdir_key_conf *key;
+	hash_sig_t sig;
+	int ret;
+
+	key = &fdir_filter->fdir_conf.key_conf;
+	rte_spinlock_lock(&fdir_info->flows_lock);
+	sig = rte_hash_crc(key, sizeof(*key), 0);
+	ret = rte_hash_add_key_with_hash(fdir_info->hash_handle, key, sig);
+	if (ret < 0) {
+		rte_spinlock_unlock(&fdir_info->flows_lock);
+		hns3_err(hw, "Hash table full? err:%d(%s)!", ret,
+			 strerror(ret));
+		return ret;
+	}
+
+	fdir_info->hash_map[ret] = fdir_filter;
+	TAILQ_INSERT_TAIL(&fdir_info->fdir_list, fdir_filter, entries);
+	rte_spinlock_unlock(&fdir_info->flows_lock);
+
+	return ret;
+}
+
+static int hns3_remove_fdir_filter(struct hns3_hw *hw,
+				   struct hns3_fdir_info *fdir_info,
+				   struct hns3_fdir_key_conf *key)
+{
+	struct hns3_fdir_rule_ele *fdir_filter;
+	hash_sig_t sig;
+	int ret;
+
+	rte_spinlock_lock(&fdir_info->flows_lock);
+	sig = rte_hash_crc(key, sizeof(*key), 0);
+	ret = rte_hash_del_key_with_hash(fdir_info->hash_handle, key, sig);
+	if (ret < 0) {
+		rte_spinlock_unlock(&fdir_info->flows_lock);
+		hns3_err(hw, "Delete hash key fail ret=%d", ret);
+		return ret;
+	}
+
+	fdir_filter = fdir_info->hash_map[ret];
+	fdir_info->hash_map[ret] = NULL;
+	TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries);
+	rte_spinlock_unlock(&fdir_info->flows_lock);
+
+	rte_free(fdir_filter);
+
+	return 0;
+}
+
+int hns3_fdir_filter_program(struct hns3_adapter *hns,
+			     struct hns3_fdir_rule *rule, bool del)
+{
+	struct hns3_pf *pf = &hns->pf;
+	struct hns3_fdir_info *fdir_info = &pf->fdir;
+	struct hns3_fdir_rule_ele *node;
+	struct hns3_hw *hw = &hns->hw;
+	int ret;
+
+	if (del) {
+		ret = hns3_fd_tcam_config(hw, true, rule->location, NULL,
+					  false);
+		if (ret)
+			hns3_err(hw, "Failed to delete fdir: %d src_ip:%x "
+				 "dst_ip:%x src_port:%d dst_port:%d",
+				 rule->location,
+				 rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID],
+				 rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID],
+				 rule->key_conf.spec.src_port,
+				 rule->key_conf.spec.dst_port);
+		else
+			hns3_remove_fdir_filter(hw, fdir_info, &rule->key_conf);
+
+		return ret;
+	}
+
+	ret = hns3_fdir_filter_lookup(fdir_info, &rule->key_conf);
+	if (ret >= 0) {
+		hns3_err(hw, "Conflict with existing fdir loc: %d", ret);
+		return -EINVAL;
+	}
+
+	node = rte_zmalloc("hns3 fdir rule", sizeof(struct hns3_fdir_rule_ele),
+			   0);
+	if (node == NULL) {
+		hns3_err(hw, "Failed to allocate fdir_rule memory");
+		return -ENOMEM;
+	}
+
+	rte_memcpy(&node->fdir_conf, rule, sizeof(struct hns3_fdir_rule));
+	ret = hns3_insert_fdir_filter(hw, fdir_info, node);
+	if (ret < 0) {
+		rte_free(node);
+		return ret;
+	}
+	rule->location = ret;
+	node->fdir_conf.location = ret;
+
+	rte_spinlock_lock(&fdir_info->flows_lock);
+	ret = hns3_config_action(hw, rule);
+	if (!ret)
+		ret = hns3_config_key(hns, rule);
+	rte_spinlock_unlock(&fdir_info->flows_lock);
+	if (ret) {
+		hns3_err(hw, "Failed to config fdir: %d src_ip:%x dst_ip:%x "
+			 "src_port:%d dst_port:%d",
+			 rule->location,
+			 rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID],
+			 rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID],
+			 rule->key_conf.spec.src_port,
+			 rule->key_conf.spec.dst_port);
+		(void)hns3_remove_fdir_filter(hw, fdir_info, &rule->key_conf);
+	}
+
+	return ret;
+}
+
+/* remove all the flow director filters */
+int hns3_clear_all_fdir_filter(struct hns3_adapter *hns)
+{
+	struct hns3_pf *pf = &hns->pf;
+	struct hns3_fdir_info *fdir_info = &pf->fdir;
+	struct hns3_fdir_rule_ele *fdir_filter;
+	struct hns3_hw *hw = &hns->hw;
+	int ret = 0;
+
+	/* flush flow director */
+	rte_spinlock_lock(&fdir_info->flows_lock);
+	rte_hash_reset(fdir_info->hash_handle);
+	rte_spinlock_unlock(&fdir_info->flows_lock);
+
+	fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list);
+	while (fdir_filter) {
+		TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries);
+		ret += hns3_fd_tcam_config(hw, true,
+					   fdir_filter->fdir_conf.location,
+					   NULL, false);
+		rte_free(fdir_filter);
+		fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list);
+	}
+
+	if (ret) {
+		hns3_err(hw, "Fail to delete FDIR filter!");
+		ret = -EIO;
+	}
+	return ret;
+}
+
+int hns3_restore_all_fdir_filter(struct hns3_adapter *hns)
+{
+	struct hns3_pf *pf = &hns->pf;
+	struct hns3_fdir_info *fdir_info = &pf->fdir;
+	struct hns3_fdir_rule_ele *fdir_filter;
+	struct hns3_hw *hw = &hns->hw;
+	bool err = false;
+	int ret;
+
+	TAILQ_FOREACH(fdir_filter, &fdir_info->fdir_list, entries) {
+		ret = hns3_config_action(hw, &fdir_filter->fdir_conf);
+		if (!ret)
+			ret = hns3_config_key(hns, &fdir_filter->fdir_conf);
+		if (ret) {
+			err = true;
+			if (ret == -EBUSY)
+				break;
+		}
+	}
+
+	if (err) {
+		hns3_err(hw, "Fail to restore FDIR filter!");
+		return -EIO;
+	}
+	return 0;
+}
+
+int hns3_get_count(struct hns3_hw *hw, uint32_t id, uint64_t *value)
+{
+	struct hns3_fd_get_cnt_cmd *req;
+	struct hns3_cmd_desc desc;
+	int ret;
+
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FD_COUNTER_OP, true);
+
+	req = (struct hns3_fd_get_cnt_cmd *)desc.data;
+	req->stage = HNS3_FD_STAGE_1;
+	req->index = rte_cpu_to_le_32(id);
+
+	ret = hns3_cmd_send(hw, &desc, 1);
+	if (ret) {
+		hns3_err(hw, "Read counter fail, ret=%d", ret);
+		return ret;
+	}
+
+	*value = req->value;
+
+	return ret;
+}
diff --git a/drivers/net/hns3/hns3_fdir.h b/drivers/net/hns3/hns3_fdir.h
new file mode 100644
index 0000000..4825086
--- /dev/null
+++ b/drivers/net/hns3/hns3_fdir.h
@@ -0,0 +1,203 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018-2019 Hisilicon Limited.
+ */
+
+#ifndef _HNS3_FDIR_H_
+#define _HNS3_FDIR_H_
+
+#include <rte_flow.h>
+
+struct hns3_fd_key_cfg {
+	uint8_t key_sel;
+	uint8_t inner_sipv6_word_en;
+	uint8_t inner_dipv6_word_en;
+	uint8_t outer_sipv6_word_en;
+	uint8_t outer_dipv6_word_en;
+	uint32_t tuple_active;
+	uint32_t meta_data_active;
+};
+
+enum HNS3_FD_STAGE {
+	HNS3_FD_STAGE_1,
+	HNS3_FD_STAGE_2,
+	HNS3_FD_STAGE_NUM,
+};
+
+enum HNS3_FD_ACTION {
+	HNS3_FD_ACTION_ACCEPT_PACKET,
+	HNS3_FD_ACTION_DROP_PACKET,
+};
+
+struct hns3_fd_cfg {
+	uint8_t fd_mode;
+	uint16_t max_key_length;
+	uint32_t rule_num[HNS3_FD_STAGE_NUM]; /* rule entry number */
+	uint16_t cnt_num[HNS3_FD_STAGE_NUM];  /* rule hit counter number */
+	struct hns3_fd_key_cfg key_cfg[HNS3_FD_STAGE_NUM];
+};
+
+/* OUTER_XXX indicates tuples in tunnel header of tunnel packet
+ * INNER_XXX indicate tuples in tunneled header of tunnel packet or
+ *           tuples of non-tunnel packet
+ */
+enum HNS3_FD_TUPLE {
+	OUTER_DST_MAC,
+	OUTER_SRC_MAC,
+	OUTER_VLAN_TAG_FST,
+	OUTER_VLAN_TAG_SEC,
+	OUTER_ETH_TYPE,
+	OUTER_L2_RSV,
+	OUTER_IP_TOS,
+	OUTER_IP_PROTO,
+	OUTER_SRC_IP,
+	OUTER_DST_IP,
+	OUTER_L3_RSV,
+	OUTER_SRC_PORT,
+	OUTER_DST_PORT,
+	OUTER_L4_RSV,
+	OUTER_TUN_VNI,
+	OUTER_TUN_FLOW_ID,
+	INNER_DST_MAC,
+	INNER_SRC_MAC,
+	INNER_VLAN_TAG1,
+	INNER_VLAN_TAG2,
+	INNER_ETH_TYPE,
+	INNER_L2_RSV,
+	INNER_IP_TOS,
+	INNER_IP_PROTO,
+	INNER_SRC_IP,
+	INNER_DST_IP,
+	INNER_L3_RSV,
+	INNER_SRC_PORT,
+	INNER_DST_PORT,
+	INNER_SCTP_TAG,
+	MAX_TUPLE,
+};
+
+#define VLAN_TAG_NUM_MAX 2
+#define VNI_OR_TNI_LEN 3
+#define IP_ADDR_LEN    4 /* Length of IPv6 address. */
+#define IP_ADDR_KEY_ID 3 /* The last 32bit of IP address as FDIR search key */
+#define IPV6_ADDR_WORD_MASK 3 /* The last two word of IPv6 as FDIR search key */
+
+struct hns3_fd_rule_tuples {
+	uint8_t src_mac[RTE_ETHER_ADDR_LEN];
+	uint8_t dst_mac[RTE_ETHER_ADDR_LEN];
+	uint32_t src_ip[IP_ADDR_LEN];
+	uint32_t dst_ip[IP_ADDR_LEN];
+	uint16_t src_port;
+	uint16_t dst_port;
+	uint16_t vlan_tag1;
+	uint16_t vlan_tag2;
+	uint16_t ether_type;
+	uint8_t ip_tos;
+	uint8_t ip_proto;
+	uint32_t sctp_tag;
+	uint16_t outer_src_port;
+	uint16_t tunnel_type;
+	uint16_t outer_ether_type;
+	uint8_t outer_proto;
+	uint8_t outer_tun_vni[VNI_OR_TNI_LEN];
+	uint8_t outer_tun_flow_id;
+};
+
+struct hns3_fd_ad_data {
+	uint16_t ad_id;
+	uint8_t drop_packet;
+	uint8_t forward_to_direct_queue;
+	uint16_t queue_id;
+	uint8_t use_counter;
+	uint8_t counter_id;
+	uint8_t use_next_stage;
+	uint8_t write_rule_id_to_bd;
+	uint8_t next_input_key;
+	uint16_t rule_id;
+};
+
+struct hns3_flow_counter {
+	LIST_ENTRY(hns3_flow_counter) next; /* Pointer to the next counter. */
+	uint32_t shared:1;   /* Share counter ID with other flow rules. */
+	uint32_t ref_cnt:31; /* Reference counter. */
+	uint16_t id;   /* Counter ID. */
+	uint64_t hits; /* Number of packets matched by the rule. */
+};
+
+#define HNS3_RULE_FLAG_FDID		0x1
+#define HNS3_RULE_FLAG_VF_ID		0x2
+#define HNS3_RULE_FLAG_COUNTER		0x4
+
+struct hns3_fdir_key_conf {
+	struct hns3_fd_rule_tuples spec;
+	struct hns3_fd_rule_tuples mask;
+	uint8_t vlan_num;
+	uint8_t outer_vlan_num;
+};
+
+struct hns3_fdir_rule {
+	struct hns3_fdir_key_conf key_conf;
+	uint32_t input_set;
+	uint32_t flags;
+	uint32_t fd_id; /* APP marked unique value for this rule. */
+	uint8_t action;
+	/* VF id, avaiblable when flags with HNS3_RULE_FLAG_VF_ID. */
+	uint8_t vf_id;
+	uint16_t queue_id;
+	uint16_t location;
+	struct rte_flow_action_count act_cnt;
+};
+
+/* FDIR filter list structure */
+struct hns3_fdir_rule_ele {
+	TAILQ_ENTRY(hns3_fdir_rule_ele) entries;
+	struct hns3_fdir_rule fdir_conf;
+};
+/* rss filter list structure */
+struct hns3_rss_conf_ele {
+	TAILQ_ENTRY(hns3_rss_conf_ele) entries;
+	struct hns3_rss_conf filter_info;
+};
+/* hns3_flow memory list structure */
+struct hns3_flow_mem {
+	TAILQ_ENTRY(hns3_flow_mem) entries;
+	struct rte_flow *flow;
+};
+
+TAILQ_HEAD(hns3_fdir_rule_list, hns3_fdir_rule_ele);
+TAILQ_HEAD(hns3_rss_filter_list, hns3_rss_conf_ele);
+TAILQ_HEAD(hns3_flow_mem_list, hns3_flow_mem);
+
+struct hns3_process_private {
+	struct hns3_fdir_rule_list fdir_list;
+	struct hns3_rss_filter_list filter_rss_list;
+	struct hns3_flow_mem_list flow_list;
+};
+
+/*
+ *  A structure used to define fields of a FDIR related info.
+ */
+struct hns3_fdir_info {
+	rte_spinlock_t flows_lock;
+	struct hns3_fdir_rule_list fdir_list;
+	struct hns3_fdir_rule_ele **hash_map;
+	struct rte_hash *hash_handle;
+	struct hns3_fd_cfg fd_cfg;
+};
+
+struct rte_flow {
+	enum rte_filter_type filter_type;
+	void *rule;
+	uint32_t counter_id;
+};
+struct hns3_adapter;
+
+int hns3_init_fd_config(struct hns3_adapter *hns);
+int hns3_fdir_filter_init(struct hns3_adapter *hns);
+void hns3_fdir_filter_uninit(struct hns3_adapter *hns);
+int hns3_fdir_filter_program(struct hns3_adapter *hns,
+			     struct hns3_fdir_rule *rule, bool del);
+int hns3_clear_all_fdir_filter(struct hns3_adapter *hns);
+int hns3_get_count(struct hns3_hw *hw, uint32_t id, uint64_t *value);
+void hns3_filterlist_init(struct rte_eth_dev *dev);
+int hns3_restore_all_fdir_filter(struct hns3_adapter *hns);
+
+#endif /* _HNS3_FDIR_H_ */
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
new file mode 100644
index 0000000..98ed818
--- /dev/null
+++ b/drivers/net/hns3/hns3_flow.c
@@ -0,0 +1,1450 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018-2019 Hisilicon Limited.
+ */
+
+#include <stdbool.h>
+#include <sys/queue.h>
+#include <rte_flow_driver.h>
+#include <rte_io.h>
+#include <rte_malloc.h>
+
+#include "hns3_cmd.h"
+#include "hns3_fdir.h"
+#include "hns3_ethdev.h"
+#include "hns3_logs.h"
+
+/* Default default keys */
+static uint8_t hns3_hash_key[] = {
+	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
+};
+
+static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF };
+static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 };
+
+/* Special Filter id for non-specific packet flagging. Don't change value */
+#define HNS3_MAX_FILTER_ID	0x0FFF
+
+#define ETHER_TYPE_MASK		0xFFFF
+#define IPPROTO_MASK		0xFF
+#define TUNNEL_TYPE_MASK	0xFFFF
+
+#define HNS3_TUNNEL_TYPE_VXLAN		0x12B5
+#define HNS3_TUNNEL_TYPE_VXLAN_GPE	0x12B6
+#define HNS3_TUNNEL_TYPE_GENEVE		0x17C1
+#define HNS3_TUNNEL_TYPE_NVGRE		0x6558
+
+static enum rte_flow_item_type first_items[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_GENEVE,
+	RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
+	RTE_FLOW_ITEM_TYPE_MPLS
+};
+
+static enum rte_flow_item_type L2_next_items[] = {
+	RTE_FLOW_ITEM_TYPE_VLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_IPV6
+};
+
+static enum rte_flow_item_type L3_next_items[] = {
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ICMP
+};
+
+static enum rte_flow_item_type L4_next_items[] = {
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_GENEVE,
+	RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
+	RTE_FLOW_ITEM_TYPE_MPLS
+};
+
+static enum rte_flow_item_type tunnel_next_items[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_VLAN
+};
+
+struct items_step_mngr {
+	enum rte_flow_item_type *items;
+	int count;
+};
+
+static inline void
+net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len)
+{
+	size_t i;
+
+	for (i = 0; i < len; i++)
+		dst[i] = rte_be_to_cpu_32(src[i]);
+}
+
+static inline const struct rte_flow_action *
+find_rss_action(const struct rte_flow_action actions[])
+{
+	const struct rte_flow_action *next = &actions[0];
+
+	for (; next->type != RTE_FLOW_ACTION_TYPE_END; next++) {
+		if (next->type == RTE_FLOW_ACTION_TYPE_RSS)
+			return next;
+	}
+	return NULL;
+}
+
+static inline struct hns3_flow_counter *
+hns3_counter_lookup(struct rte_eth_dev *dev, uint32_t id)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_pf *pf = &hns->pf;
+	struct hns3_flow_counter *cnt;
+
+	LIST_FOREACH(cnt, &pf->flow_counters, next) {
+		if (cnt->id == id)
+			return cnt;
+	}
+	return NULL;
+}
+
+static int
+hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
+		 struct rte_flow_error *error)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_pf *pf = &hns->pf;
+	struct hns3_flow_counter *cnt;
+
+	cnt = hns3_counter_lookup(dev, id);
+	if (cnt) {
+		if (!cnt->shared || cnt->shared != shared)
+			return rte_flow_error_set(error, ENOTSUP,
+						  RTE_FLOW_ERROR_TYPE_ACTION,
+						  cnt,
+						  "Counter id is used,shared flag not match");
+		cnt->ref_cnt++;
+		return 0;
+	}
+
+	cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0);
+	if (cnt == NULL)
+		return rte_flow_error_set(error, ENOMEM,
+					  RTE_FLOW_ERROR_TYPE_ACTION, cnt,
+					  "Alloc mem for counter failed");
+	cnt->id = id;
+	cnt->shared = shared;
+	cnt->ref_cnt = 1;
+	cnt->hits = 0;
+	LIST_INSERT_HEAD(&pf->flow_counters, cnt, next);
+	return 0;
+}
+
+static int
+hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+		   struct rte_flow_query_count *qc,
+		   struct rte_flow_error *error)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_flow_counter *cnt;
+	uint64_t value;
+	int ret;
+
+	/* FDIR is available only in PF driver */
+	if (hns->is_vf)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					  "Fdir is not supported in VF");
+	cnt = hns3_counter_lookup(dev, flow->counter_id);
+	if (cnt == NULL)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "Can't find counter id");
+
+	ret = hns3_get_count(&hns->hw, flow->counter_id, &value);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "Read counter fail.");
+		return ret;
+	}
+	qc->hits_set = 1;
+	qc->hits = value;
+
+	return 0;
+}
+
+static int
+hns3_counter_release(struct rte_eth_dev *dev, uint32_t id)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_hw *hw = &hns->hw;
+	struct hns3_flow_counter *cnt;
+
+	cnt = hns3_counter_lookup(dev, id);
+	if (cnt == NULL) {
+		hns3_err(hw, "Can't find available counter to release");
+		return -EINVAL;
+	}
+	cnt->ref_cnt--;
+	if (cnt->ref_cnt == 0) {
+		LIST_REMOVE(cnt, next);
+		rte_free(cnt);
+	}
+	return 0;
+}
+
+static void
+hns3_counter_flush(struct rte_eth_dev *dev)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_pf *pf = &hns->pf;
+	struct hns3_flow_counter *cnt_ptr;
+
+	cnt_ptr = LIST_FIRST(&pf->flow_counters);
+	while (cnt_ptr) {
+		LIST_REMOVE(cnt_ptr, next);
+		rte_free(cnt_ptr);
+		cnt_ptr = LIST_FIRST(&pf->flow_counters);
+	}
+}
+
+static int
+hns3_handle_action_queue(struct rte_eth_dev *dev,
+			 const struct rte_flow_action *action,
+			 struct hns3_fdir_rule *rule,
+			 struct rte_flow_error *error)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_hw *hw = &hns->hw;
+	const struct rte_flow_action_queue *queue;
+
+	queue = (const struct rte_flow_action_queue *)action->conf;
+	if (queue->index >= hw->data->nb_rx_queues)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ACTION, action,
+					  "Invalid queue ID in PF");
+	rule->queue_id = queue->index;
+	rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
+	return 0;
+}
+
+/*
+ * Parse actions structure from the provided pattern.
+ * The pattern is validated as the items are copied.
+ *
+ * @param actions[in]
+ * @param rule[out]
+ *   NIC specfilc actions derived from the actions.
+ * @param error[out]
+ */
+static int
+hns3_handle_actions(struct rte_eth_dev *dev,
+		    const struct rte_flow_action actions[],
+		    struct hns3_fdir_rule *rule, struct rte_flow_error *error)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	const struct rte_flow_action_count *act_count;
+	const struct rte_flow_action_mark *mark;
+	struct hns3_pf *pf = &hns->pf;
+	uint32_t counter_num;
+	int ret;
+
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			ret = hns3_handle_action_queue(dev, actions, rule,
+						       error);
+			if (ret)
+				return ret;
+			break;
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			rule->action = HNS3_FD_ACTION_DROP_PACKET;
+			break;
+		case RTE_FLOW_ACTION_TYPE_MARK:
+			mark =
+			    (const struct rte_flow_action_mark *)actions->conf;
+			if (mark->id >= HNS3_MAX_FILTER_ID)
+				return rte_flow_error_set(error, EINVAL,
+						     RTE_FLOW_ERROR_TYPE_ACTION,
+						     actions,
+						     "Invalid Mark ID");
+			rule->fd_id = mark->id;
+			rule->flags |= HNS3_RULE_FLAG_FDID;
+			break;
+		case RTE_FLOW_ACTION_TYPE_FLAG:
+			rule->fd_id = HNS3_MAX_FILTER_ID;
+			rule->flags |= HNS3_RULE_FLAG_FDID;
+			break;
+		case RTE_FLOW_ACTION_TYPE_COUNT:
+			act_count =
+			    (const struct rte_flow_action_count *)actions->conf;
+			counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1];
+			if (act_count->id >= counter_num)
+				return rte_flow_error_set(error, EINVAL,
+						     RTE_FLOW_ERROR_TYPE_ACTION,
+						     actions,
+						     "Invalid counter id");
+			rule->act_cnt = *act_count;
+			rule->flags |= HNS3_RULE_FLAG_COUNTER;
+			break;
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		default:
+			return rte_flow_error_set(error, ENOTSUP,
+						  RTE_FLOW_ERROR_TYPE_ACTION,
+						  NULL, "Unsupported action");
+		}
+	}
+
+	return 0;
+}
+
+/* Parse to get the attr and action info of flow director rule. */
+static int
+hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error)
+{
+	if (!attr->ingress)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+					  attr, "Ingress can't be zero");
+	if (attr->egress)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+					  attr, "Not support egress");
+	if (attr->transfer)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+					  attr, "No support for transfer");
+	if (attr->priority)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+					  attr, "Not support priority");
+	if (attr->group)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+					  attr, "Not support group");
+	return 0;
+}
+
+static int
+hns3_parse_eth(const struct rte_flow_item *item,
+		   struct hns3_fdir_rule *rule, struct rte_flow_error *error)
+{
+	const struct rte_flow_item_eth *eth_spec;
+	const struct rte_flow_item_eth *eth_mask;
+
+	if (item->spec == NULL && item->mask)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "Can't configure FDIR with mask but without spec");
+
+	/* Only used to describe the protocol stack. */
+	if (item->spec == NULL && item->mask == NULL)
+		return 0;
+
+	if (item->mask) {
+		eth_mask = item->mask;
+		if (eth_mask->type) {
+			hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
+			rule->key_conf.mask.ether_type =
+			    rte_be_to_cpu_16(eth_mask->type);
+		}
+		if (!rte_is_zero_ether_addr(&eth_mask->src)) {
+			hns3_set_bit(rule->input_set, INNER_SRC_MAC, 1);
+			memcpy(rule->key_conf.mask.src_mac,
+			       eth_mask->src.addr_bytes, RTE_ETHER_ADDR_LEN);
+		}
+		if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
+			hns3_set_bit(rule->input_set, INNER_DST_MAC, 1);
+			memcpy(rule->key_conf.mask.dst_mac,
+			       eth_mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN);
+		}
+	}
+
+	eth_spec = item->spec;
+	rule->key_conf.spec.ether_type = rte_be_to_cpu_16(eth_spec->type);
+	memcpy(rule->key_conf.spec.src_mac, eth_spec->src.addr_bytes,
+	       RTE_ETHER_ADDR_LEN);
+	memcpy(rule->key_conf.spec.dst_mac, eth_spec->dst.addr_bytes,
+	       RTE_ETHER_ADDR_LEN);
+	return 0;
+}
+
+static int
+hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
+		struct rte_flow_error *error)
+{
+	const struct rte_flow_item_vlan *vlan_spec;
+	const struct rte_flow_item_vlan *vlan_mask;
+
+	if (item->spec == NULL && item->mask)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "Can't configure FDIR with mask but without spec");
+
+	rule->key_conf.vlan_num++;
+	if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "Vlan_num is more than 2");
+
+	/* Only used to describe the protocol stack. */
+	if (item->spec == NULL && item->mask == NULL)
+		return 0;
+
+	if (item->mask) {
+		vlan_mask = item->mask;
+		if (vlan_mask->tci) {
+			if (rule->key_conf.vlan_num == 1) {
+				hns3_set_bit(rule->input_set, INNER_VLAN_TAG1,
+					     1);
+				rule->key_conf.mask.vlan_tag1 =
+				    rte_be_to_cpu_16(vlan_mask->tci);
+			} else {
+				hns3_set_bit(rule->input_set, INNER_VLAN_TAG2,
+					     1);
+				rule->key_conf.mask.vlan_tag2 =
+				    rte_be_to_cpu_16(vlan_mask->tci);
+			}
+		}
+	}
+
+	vlan_spec = item->spec;
+	if (rule->key_conf.vlan_num == 1)
+		rule->key_conf.spec.vlan_tag1 =
+		    rte_be_to_cpu_16(vlan_spec->tci);
+	else
+		rule->key_conf.spec.vlan_tag2 =
+		    rte_be_to_cpu_16(vlan_spec->tci);
+	return 0;
+}
+
+static int
+hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
+		struct rte_flow_error *error)
+{
+	const struct rte_flow_item_ipv4 *ipv4_spec;
+	const struct rte_flow_item_ipv4 *ipv4_mask;
+
+	if (item->spec == NULL && item->mask)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "Can't configure FDIR with mask but without spec");
+
+	hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
+	rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV4;
+	rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
+	/* Only used to describe the protocol stack. */
+	if (item->spec == NULL && item->mask == NULL)
+		return 0;
+
+	if (item->mask) {
+		ipv4_mask = item->mask;
+
+		if (ipv4_mask->hdr.total_length ||
+		    ipv4_mask->hdr.packet_id ||
+		    ipv4_mask->hdr.fragment_offset ||
+		    ipv4_mask->hdr.time_to_live ||
+		    ipv4_mask->hdr.hdr_checksum) {
+			return rte_flow_error_set(error, EINVAL,
+						  RTE_FLOW_ERROR_TYPE_ITEM,
+						  item,
+						  "Only support src & dst ip,tos,proto in IPV4");
+		}
+
+		if (ipv4_mask->hdr.src_addr) {
+			hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
+			rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID] =
+			    rte_be_to_cpu_32(ipv4_mask->hdr.src_addr);
+		}
+
+		if (ipv4_mask->hdr.dst_addr) {
+			hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
+			rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID] =
+			    rte_be_to_cpu_32(ipv4_mask->hdr.dst_addr);
+		}
+
+		if (ipv4_mask->hdr.type_of_service) {
+			hns3_set_bit(rule->input_set, INNER_IP_TOS, 1);
+			rule->key_conf.mask.ip_tos =
+			    ipv4_mask->hdr.type_of_service;
+		}
+
+		if (ipv4_mask->hdr.next_proto_id) {
+			hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
+			rule->key_conf.mask.ip_proto =
+			    ipv4_mask->hdr.next_proto_id;
+		}
+	}
+
+	ipv4_spec = item->spec;
+	rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID] =
+	    rte_be_to_cpu_32(ipv4_spec->hdr.src_addr);
+	rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID] =
+	    rte_be_to_cpu_32(ipv4_spec->hdr.dst_addr);
+	rule->key_conf.spec.ip_tos = ipv4_spec->hdr.type_of_service;
+	rule->key_conf.spec.ip_proto = ipv4_spec->hdr.next_proto_id;
+	return 0;
+}
+
+static int
+hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
+		struct rte_flow_error *error)
+{
+	const struct rte_flow_item_ipv6 *ipv6_spec;
+	const struct rte_flow_item_ipv6 *ipv6_mask;
+
+	if (item->spec == NULL && item->mask)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "Can't configure FDIR with mask but without spec");
+
+	hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
+	rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV6;
+	rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
+
+	/* Only used to describe the protocol stack. */
+	if (item->spec == NULL && item->mask == NULL)
+		return 0;
+
+	if (item->mask) {
+		ipv6_mask = item->mask;
+		if (ipv6_mask->hdr.vtc_flow ||
+		    ipv6_mask->hdr.payload_len || ipv6_mask->hdr.hop_limits) {
+			return rte_flow_error_set(error, EINVAL,
+						  RTE_FLOW_ERROR_TYPE_ITEM,
+						  item,
+						  "Only support src & dst ip,proto in IPV6");
+		}
+		net_addr_to_host(rule->key_conf.mask.src_ip,
+				 (const rte_be32_t *)ipv6_mask->hdr.src_addr,
+				 IP_ADDR_LEN);
+		net_addr_to_host(rule->key_conf.mask.dst_ip,
+				 (const rte_be32_t *)ipv6_mask->hdr.dst_addr,
+				 IP_ADDR_LEN);
+		rule->key_conf.mask.ip_proto = ipv6_mask->hdr.proto;
+		if (rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID])
+			hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
+		if (rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID])
+			hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
+		if (ipv6_mask->hdr.proto)
+			hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
+	}
+
+	ipv6_spec = item->spec;
+	net_addr_to_host(rule->key_conf.spec.src_ip,
+			 (const rte_be32_t *)ipv6_spec->hdr.src_addr,
+			 IP_ADDR_LEN);
+	net_addr_to_host(rule->key_conf.spec.dst_ip,
+			 (const rte_be32_t *)ipv6_spec->hdr.dst_addr,
+			 IP_ADDR_LEN);
+	rule->key_conf.spec.ip_proto = ipv6_spec->hdr.proto;
+
+	return 0;
+}
+
+static int
+hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
+	       struct rte_flow_error *error)
+{
+	const struct rte_flow_item_tcp *tcp_spec;
+	const struct rte_flow_item_tcp *tcp_mask;
+
+	if (item->spec == NULL && item->mask)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "Can't configure FDIR with mask but without spec");
+
+	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
+	rule->key_conf.spec.ip_proto = IPPROTO_TCP;
+	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
+
+	/* Only used to describe the protocol stack. */
+	if (item->spec == NULL && item->mask == NULL)
+		return 0;
+
+	if (item->mask) {
+		tcp_mask = item->mask;
+		if (tcp_mask->hdr.sent_seq ||
+		    tcp_mask->hdr.recv_ack ||
+		    tcp_mask->hdr.data_off ||
+		    tcp_mask->hdr.tcp_flags ||
+		    tcp_mask->hdr.rx_win ||
+		    tcp_mask->hdr.cksum || tcp_mask->hdr.tcp_urp) {
+			return rte_flow_error_set(error, EINVAL,
+						  RTE_FLOW_ERROR_TYPE_ITEM,
+						  item,
+						  "Only support src & dst port in TCP");
+		}
+
+		if (tcp_mask->hdr.src_port) {
+			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
+			rule->key_conf.mask.src_port =
+			    rte_be_to_cpu_16(tcp_mask->hdr.src_port);
+		}
+		if (tcp_mask->hdr.dst_port) {
+			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
+			rule->key_conf.mask.dst_port =
+			    rte_be_to_cpu_16(tcp_mask->hdr.dst_port);
+		}
+	}
+
+	tcp_spec = item->spec;
+	rule->key_conf.spec.src_port = rte_be_to_cpu_16(tcp_spec->hdr.src_port);
+	rule->key_conf.spec.dst_port = rte_be_to_cpu_16(tcp_spec->hdr.dst_port);
+
+	return 0;
+}
+
+static int
+hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
+	       struct rte_flow_error *error)
+{
+	const struct rte_flow_item_udp *udp_spec;
+	const struct rte_flow_item_udp *udp_mask;
+
+	if (item->spec == NULL && item->mask)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "Can't configure FDIR with mask but without spec");
+
+	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
+	rule->key_conf.spec.ip_proto = IPPROTO_UDP;
+	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
+	/* Only used to describe the protocol stack. */
+	if (item->spec == NULL && item->mask == NULL)
+		return 0;
+
+	if (item->mask) {
+		udp_mask = item->mask;
+		if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) {
+			return rte_flow_error_set(error, EINVAL,
+						  RTE_FLOW_ERROR_TYPE_ITEM,
+						  item,
+						  "Only support src & dst port in UDP");
+		}
+		if (udp_mask->hdr.src_port) {
+			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
+			rule->key_conf.mask.src_port =
+			    rte_be_to_cpu_16(udp_mask->hdr.src_port);
+		}
+		if (udp_mask->hdr.dst_port) {
+			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
+			rule->key_conf.mask.dst_port =
+			    rte_be_to_cpu_16(udp_mask->hdr.dst_port);
+		}
+	}
+
+	udp_spec = item->spec;
+	rule->key_conf.spec.src_port = rte_be_to_cpu_16(udp_spec->hdr.src_port);
+	rule->key_conf.spec.dst_port = rte_be_to_cpu_16(udp_spec->hdr.dst_port);
+
+	return 0;
+}
+
+static int
+hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
+		struct rte_flow_error *error)
+{
+	const struct rte_flow_item_sctp *sctp_spec;
+	const struct rte_flow_item_sctp *sctp_mask;
+
+	if (item->spec == NULL && item->mask)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "Can't configure FDIR with mask but without spec");
+
+	hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
+	rule->key_conf.spec.ip_proto = IPPROTO_SCTP;
+	rule->key_conf.mask.ip_proto = IPPROTO_MASK;
+
+	/* Only used to describe the protocol stack. */
+	if (item->spec == NULL && item->mask == NULL)
+		return 0;
+
+	if (item->mask) {
+		sctp_mask = item->mask;
+		if (sctp_mask->hdr.cksum)
+			return rte_flow_error_set(error, EINVAL,
+						  RTE_FLOW_ERROR_TYPE_ITEM,
+						  item,
+						  "Only support src & dst port in SCTP");
+
+		if (sctp_mask->hdr.src_port) {
+			hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
+			rule->key_conf.mask.src_port =
+			    rte_be_to_cpu_16(sctp_mask->hdr.src_port);
+		}
+		if (sctp_mask->hdr.dst_port) {
+			hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
+			rule->key_conf.mask.dst_port =
+			    rte_be_to_cpu_16(sctp_mask->hdr.dst_port);
+		}
+		if (sctp_mask->hdr.tag) {
+			hns3_set_bit(rule->input_set, INNER_SCTP_TAG, 1);
+			rule->key_conf.mask.sctp_tag =
+			    rte_be_to_cpu_32(sctp_mask->hdr.tag);
+		}
+	}
+
+	sctp_spec = item->spec;
+	rule->key_conf.spec.src_port =
+	    rte_be_to_cpu_16(sctp_spec->hdr.src_port);
+	rule->key_conf.spec.dst_port =
+	    rte_be_to_cpu_16(sctp_spec->hdr.dst_port);
+	rule->key_conf.spec.sctp_tag = rte_be_to_cpu_32(sctp_spec->hdr.tag);
+
+	return 0;
+}
+
+/*
+ * Check items before tunnel, save inner configs to outer configs,and clear
+ * inner configs.
+ * The key consists of two parts: meta_data and tuple keys.
+ * Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel
+ * packet(1bit).
+ * Tuple keys uses 384bit, including ot_dst-mac(48bit), ot_dst-port(16bit),
+ * ot_tun_vni(24bit), ot_flow_id(8bit), src-mac(48bit), dst-mac(48bit),
+ * src-ip(32/128bit), dst-ip(32/128bit), src-port(16bit), dst-port(16bit),
+ * tos(8bit), ether-proto(16bit), ip-proto(8bit), vlantag1(16bit),
+ * Vlantag2(16bit) and sctp-tag(32bit).
+ */
+static int
+hns3_handle_tunnel(const struct rte_flow_item *item,
+		   struct hns3_fdir_rule *rule, struct rte_flow_error *error)
+{
+	/* check eth config */
+	if (rule->input_set & (BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC)))
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM,
+					  item, "Outer eth mac is unsupported");
+	if (rule->input_set & BIT(INNER_ETH_TYPE)) {
+		hns3_set_bit(rule->input_set, OUTER_ETH_TYPE, 1);
+		rule->key_conf.spec.outer_ether_type =
+		    rule->key_conf.spec.ether_type;
+		rule->key_conf.mask.outer_ether_type =
+		    rule->key_conf.mask.ether_type;
+		hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 0);
+		rule->key_conf.spec.ether_type = 0;
+		rule->key_conf.mask.ether_type = 0;
+	}
+
+	/* check vlan config */
+	if (rule->input_set & (BIT(INNER_VLAN_TAG1) | BIT(INNER_VLAN_TAG2)))
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM,
+					  item,
+					  "Outer vlan tags is unsupported");
+
+	/* clear vlan_num for inner vlan select */
+	rule->key_conf.outer_vlan_num = rule->key_conf.vlan_num;
+	rule->key_conf.vlan_num = 0;
+
+	/* check L3 config */
+	if (rule->input_set &
+	    (BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | BIT(INNER_IP_TOS)))
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM,
+					  item, "Outer ip is unsupported");
+	if (rule->input_set & BIT(INNER_IP_PROTO)) {
+		hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
+		rule->key_conf.spec.outer_proto = rule->key_conf.spec.ip_proto;
+		rule->key_conf.mask.outer_proto = rule->key_conf.mask.ip_proto;
+		hns3_set_bit(rule->input_set, INNER_IP_PROTO, 0);
+		rule->key_conf.spec.ip_proto = 0;
+		rule->key_conf.mask.ip_proto = 0;
+	}
+
+	/* check L4 config */
+	if (rule->input_set & BIT(INNER_SCTP_TAG))
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "Outer sctp tag is unsupported");
+
+	if (rule->input_set & BIT(INNER_SRC_PORT)) {
+		hns3_set_bit(rule->input_set, OUTER_SRC_PORT, 1);
+		rule->key_conf.spec.outer_src_port =
+		    rule->key_conf.spec.src_port;
+		rule->key_conf.mask.outer_src_port =
+		    rule->key_conf.mask.src_port;
+		hns3_set_bit(rule->input_set, INNER_SRC_PORT, 0);
+		rule->key_conf.spec.src_port = 0;
+		rule->key_conf.mask.src_port = 0;
+	}
+	if (rule->input_set & BIT(INNER_DST_PORT)) {
+		hns3_set_bit(rule->input_set, INNER_DST_PORT, 0);
+		rule->key_conf.spec.dst_port = 0;
+		rule->key_conf.mask.dst_port = 0;
+	}
+	return 0;
+}
+
+static int
+hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
+		 struct rte_flow_error *error)
+{
+	const struct rte_flow_item_vxlan *vxlan_spec;
+	const struct rte_flow_item_vxlan *vxlan_mask;
+
+	if (item->spec == NULL && item->mask)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "Can't configure FDIR with mask but without spec");
+	else if (item->spec && (item->mask == NULL))
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "Tunnel packets must configure with mask");
+
+	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
+	rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
+	if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+		rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN;
+	else
+		rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN_GPE;
+
+	/* Only used to describe the protocol stack. */
+	if (item->spec == NULL && item->mask == NULL)
+		return 0;
+
+	vxlan_mask = item->mask;
+	vxlan_spec = item->spec;
+
+	if (vxlan_mask->flags)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "Flags is not supported in VxLAN");
+
+	/* VNI must be totally masked or not. */
+	if (memcmp(vxlan_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
+	    memcmp(vxlan_mask->vni, zero_mask, VNI_OR_TNI_LEN))
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "VNI must be totally masked or not in VxLAN");
+	if (vxlan_mask->vni[0]) {
+		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
+		memcpy(rule->key_conf.mask.outer_tun_vni, vxlan_mask->vni,
+			   VNI_OR_TNI_LEN);
+	}
+	memcpy(rule->key_conf.spec.outer_tun_vni, vxlan_spec->vni,
+		   VNI_OR_TNI_LEN);
+	return 0;
+}
+
+static int
+hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
+		 struct rte_flow_error *error)
+{
+	const struct rte_flow_item_nvgre *nvgre_spec;
+	const struct rte_flow_item_nvgre *nvgre_mask;
+
+	if (item->spec == NULL && item->mask)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "Can't configure FDIR with mask but without spec");
+	else if (item->spec && (item->mask == NULL))
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "Tunnel packets must configure with mask");
+
+	hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
+	rule->key_conf.spec.outer_proto = IPPROTO_GRE;
+	rule->key_conf.mask.outer_proto = IPPROTO_MASK;
+
+	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
+	rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_NVGRE;
+	rule->key_conf.mask.tunnel_type = ~HNS3_TUNNEL_TYPE_NVGRE;
+	/* Only used to describe the protocol stack. */
+	if (item->spec == NULL && item->mask == NULL)
+		return 0;
+
+	nvgre_mask = item->mask;
+	nvgre_spec = item->spec;
+
+	if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "Ver/protocal is not supported in NVGRE");
+
+	/* TNI must be totally masked or not. */
+	if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) &&
+	    memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN))
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "TNI must be totally masked or not in NVGRE");
+
+	if (nvgre_mask->tni[0]) {
+		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
+		memcpy(rule->key_conf.mask.outer_tun_vni, nvgre_mask->tni,
+			   VNI_OR_TNI_LEN);
+	}
+	memcpy(rule->key_conf.spec.outer_tun_vni, nvgre_spec->tni,
+		   VNI_OR_TNI_LEN);
+
+	if (nvgre_mask->flow_id) {
+		hns3_set_bit(rule->input_set, OUTER_TUN_FLOW_ID, 1);
+		rule->key_conf.mask.outer_tun_flow_id = nvgre_mask->flow_id;
+	}
+	rule->key_conf.spec.outer_tun_flow_id = nvgre_spec->flow_id;
+	return 0;
+}
+
+static int
+hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
+		  struct rte_flow_error *error)
+{
+	const struct rte_flow_item_geneve *geneve_spec;
+	const struct rte_flow_item_geneve *geneve_mask;
+
+	if (item->spec == NULL && item->mask)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "Can't configure FDIR with mask but without spec");
+	else if (item->spec && (item->mask == NULL))
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "Tunnel packets must configure with mask");
+
+	hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
+	rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_GENEVE;
+	rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
+	/* Only used to describe the protocol stack. */
+	if (item->spec == NULL && item->mask == NULL)
+		return 0;
+
+	geneve_mask = item->mask;
+	geneve_spec = item->spec;
+
+	if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "Ver/protocal is not supported in GENEVE");
+	/* VNI must be totally masked or not. */
+	if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
+	    memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN))
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "VNI must be totally masked or not in GENEVE");
+	if (geneve_mask->vni[0]) {
+		hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
+		memcpy(rule->key_conf.mask.outer_tun_vni, geneve_mask->vni,
+			   VNI_OR_TNI_LEN);
+	}
+	memcpy(rule->key_conf.spec.outer_tun_vni, geneve_spec->vni,
+		   VNI_OR_TNI_LEN);
+	return 0;
+}
+
+static int
+hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
+		  struct rte_flow_error *error)
+{
+	int ret;
+
+	switch (item->type) {
+	case RTE_FLOW_ITEM_TYPE_VXLAN:
+	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+		ret = hns3_parse_vxlan(item, rule, error);
+		break;
+	case RTE_FLOW_ITEM_TYPE_NVGRE:
+		ret = hns3_parse_nvgre(item, rule, error);
+		break;
+	case RTE_FLOW_ITEM_TYPE_GENEVE:
+		ret = hns3_parse_geneve(item, rule, error);
+		break;
+	default:
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_HANDLE,
+					  NULL, "Unsupported tunnel type!");
+	}
+	if (ret)
+		return ret;
+	return hns3_handle_tunnel(item, rule, error);
+}
+
+static int
+hns3_parse_normal(const struct rte_flow_item *item,
+		  struct hns3_fdir_rule *rule,
+		  struct items_step_mngr *step_mngr,
+		  struct rte_flow_error *error)
+{
+	int ret;
+
+	switch (item->type) {
+	case RTE_FLOW_ITEM_TYPE_ETH:
+		ret = hns3_parse_eth(item, rule, error);
+		step_mngr->items = L2_next_items;
+		step_mngr->count = ARRAY_SIZE(L2_next_items);
+		break;
+	case RTE_FLOW_ITEM_TYPE_VLAN:
+		ret = hns3_parse_vlan(item, rule, error);
+		step_mngr->items = L2_next_items;
+		step_mngr->count = ARRAY_SIZE(L2_next_items);
+		break;
+	case RTE_FLOW_ITEM_TYPE_IPV4:
+		ret = hns3_parse_ipv4(item, rule, error);
+		step_mngr->items = L3_next_items;
+		step_mngr->count = ARRAY_SIZE(L3_next_items);
+		break;
+	case RTE_FLOW_ITEM_TYPE_IPV6:
+		ret = hns3_parse_ipv6(item, rule, error);
+		step_mngr->items = L3_next_items;
+		step_mngr->count = ARRAY_SIZE(L3_next_items);
+		break;
+	case RTE_FLOW_ITEM_TYPE_TCP:
+		ret = hns3_parse_tcp(item, rule, error);
+		step_mngr->items = L4_next_items;
+		step_mngr->count = ARRAY_SIZE(L4_next_items);
+		break;
+	case RTE_FLOW_ITEM_TYPE_UDP:
+		ret = hns3_parse_udp(item, rule, error);
+		step_mngr->items = L4_next_items;
+		step_mngr->count = ARRAY_SIZE(L4_next_items);
+		break;
+	case RTE_FLOW_ITEM_TYPE_SCTP:
+		ret = hns3_parse_sctp(item, rule, error);
+		step_mngr->items = L4_next_items;
+		step_mngr->count = ARRAY_SIZE(L4_next_items);
+		break;
+	default:
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_HANDLE,
+					  NULL, "Unsupported normal type!");
+	}
+
+	return ret;
+}
+
+static int
+hns3_validate_item(const struct rte_flow_item *item,
+		   struct items_step_mngr step_mngr,
+		   struct rte_flow_error *error)
+{
+	int i;
+
+	if (item->last)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item,
+					  "Not supported last point for range");
+
+	for (i = 0; i < step_mngr.count; i++) {
+		if (item->type == step_mngr.items[i])
+			break;
+	}
+
+	if (i == step_mngr.count) {
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM,
+					  item, "Inval or missing item");
+	}
+	return 0;
+}
+
+static inline bool
+is_tunnel_packet(enum rte_flow_item_type type)
+{
+	if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE ||
+	    type == RTE_FLOW_ITEM_TYPE_VXLAN ||
+	    type == RTE_FLOW_ITEM_TYPE_NVGRE ||
+	    type == RTE_FLOW_ITEM_TYPE_GENEVE ||
+	    type == RTE_FLOW_ITEM_TYPE_MPLS)
+		return true;
+	return false;
+}
+
+/*
+ * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
+ * And get the flow director filter info BTW.
+ * UDP/TCP/SCTP PATTERN:
+ * The first not void item can be ETH or IPV4 or IPV6
+ * The second not void item must be IPV4 or IPV6 if the first one is ETH.
+ * The next not void item could be UDP or TCP or SCTP (optional)
+ * The next not void item could be RAW (for flexbyte, optional)
+ * The next not void item must be END.
+ * A Fuzzy Match pattern can appear at any place before END.
+ * Fuzzy Match is optional for IPV4 but is required for IPV6
+ * MAC VLAN PATTERN:
+ * The first not void item must be ETH.
+ * The second not void item must be MAC VLAN.
+ * The next not void item must be END.
+ * ACTION:
+ * The first not void action should be QUEUE or DROP.
+ * The second not void optional action should be MARK,
+ * mark_id is a uint32_t number.
+ * The next not void action should be END.
+ * UDP/TCP/SCTP pattern example:
+ * ITEM		Spec			Mask
+ * ETH		NULL			NULL
+ * IPV4		src_addr 192.168.1.20	0xFFFFFFFF
+ *		dst_addr 192.167.3.50	0xFFFFFFFF
+ * UDP/TCP/SCTP	src_port	80	0xFFFF
+ *		dst_port	80	0xFFFF
+ * END
+ * MAC VLAN pattern example:
+ * ITEM		Spec			Mask
+ * ETH		dst_addr
+		{0xAC, 0x7B, 0xA1,	{0xFF, 0xFF, 0xFF,
+		0x2C, 0x6D, 0x36}	0xFF, 0xFF, 0xFF}
+ * MAC VLAN	tci	0x2016		0xEFFF
+ * END
+ * Other members in mask and spec should set to 0x00.
+ * Item->last should be NULL.
+ */
+static int
+hns3_parse_fdir_filter(struct rte_eth_dev *dev,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       struct hns3_fdir_rule *rule,
+		       struct rte_flow_error *error)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	const struct rte_flow_item *item;
+	struct items_step_mngr step_mngr;
+	int ret;
+
+	/* FDIR is available only in PF driver */
+	if (hns->is_vf)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					  "Fdir not supported in VF");
+
+	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
+					  "fdir_conf.mode isn't perfect");
+
+	step_mngr.items = first_items;
+	step_mngr.count = ARRAY_SIZE(first_items);
+	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
+			continue;
+
+		ret = hns3_validate_item(item, step_mngr, error);
+		if (ret)
+			return ret;
+
+		if (is_tunnel_packet(item->type)) {
+			ret = hns3_parse_tunnel(item, rule, error);
+			if (ret)
+				return ret;
+			step_mngr.items = tunnel_next_items;
+			step_mngr.count = ARRAY_SIZE(tunnel_next_items);
+		} else {
+			ret = hns3_parse_normal(item, rule, &step_mngr, error);
+			if (ret)
+				return ret;
+		}
+	}
+
+	return hns3_handle_actions(dev, actions, rule, error);
+}
+
+void
+hns3_filterlist_init(struct rte_eth_dev *dev)
+{
+	struct hns3_process_private *process_list = dev->process_private;
+
+	TAILQ_INIT(&process_list->fdir_list);
+	TAILQ_INIT(&process_list->filter_rss_list);
+	TAILQ_INIT(&process_list->flow_list);
+}
+
+static void
+hns3_filterlist_flush(struct rte_eth_dev *dev)
+{
+	struct hns3_process_private *process_list = dev->process_private;
+	struct hns3_fdir_rule_ele *fdir_rule_ptr;
+	struct hns3_rss_conf_ele *rss_filter_ptr;
+	struct hns3_flow_mem *flow_node;
+
+	fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list);
+	while (fdir_rule_ptr) {
+		TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries);
+		rte_free(fdir_rule_ptr);
+		fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list);
+	}
+
+	rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
+	while (rss_filter_ptr) {
+		TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
+			     entries);
+		rte_free(rss_filter_ptr);
+		rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
+	}
+
+	flow_node = TAILQ_FIRST(&process_list->flow_list);
+	while (flow_node) {
+		TAILQ_REMOVE(&process_list->flow_list, flow_node, entries);
+		rte_free(flow_node->flow);
+		rte_free(flow_node);
+		flow_node = TAILQ_FIRST(&process_list->flow_list);
+	}
+}
+
+static int
+hns3_flow_args_check(const struct rte_flow_attr *attr,
+		     const struct rte_flow_item pattern[],
+		     const struct rte_flow_action actions[],
+		     struct rte_flow_error *error)
+{
+	if (pattern == NULL)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+					  NULL, "NULL pattern.");
+
+	if (actions == NULL)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+					  NULL, "NULL action.");
+
+	if (attr == NULL)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ATTR,
+					  NULL, "NULL attribute.");
+
+	return hns3_check_attr(attr, error);
+}
+
+/*
+ * Check if the flow rule is supported by hns3.
+ * It only checkes the format. Don't guarantee the rule can be programmed into
+ * the HW. Because there can be no enough room for the rule.
+ */
+static int
+hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+		   const struct rte_flow_item pattern[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	struct hns3_rss_conf rss_conf;
+	struct hns3_fdir_rule fdir_rule;
+	int ret;
+
+	ret = hns3_flow_args_check(attr, pattern, actions, error);
+	if (ret)
+		return ret;
+
+	memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
+	return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
+}
+
+/*
+ * Create or destroy a flow rule.
+ * Theorically one rule can match more than one filters.
+ * We will let it use the filter which it hitt first.
+ * So, the sequence matters.
+ */
+static struct rte_flow *
+hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+		 const struct rte_flow_item pattern[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct hns3_process_private *process_list = dev->process_private;
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_hw *hw = &hns->hw;
+	struct hns3_fdir_rule_ele *fdir_rule_ptr;
+	struct hns3_flow_mem *flow_node;
+	struct rte_flow *flow;
+	struct hns3_fdir_rule fdir_rule;
+	int ret;
+
+	ret = hns3_flow_args_check(attr, pattern, actions, error);
+	if (ret)
+		return NULL;
+
+	flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0);
+	if (flow == NULL) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate flow memory");
+		return NULL;
+	}
+	flow_node = rte_zmalloc("hns3 flow node",
+				sizeof(struct hns3_flow_mem), 0);
+	if (flow_node == NULL) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate flow list memory");
+		rte_free(flow);
+		return NULL;
+	}
+
+	flow_node->flow = flow;
+	TAILQ_INSERT_TAIL(&process_list->flow_list, flow_node, entries);
+
+	memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
+	ret = hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
+	if (ret)
+		goto out;
+
+	if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) {
+		ret = hns3_counter_new(dev, fdir_rule.act_cnt.shared,
+				       fdir_rule.act_cnt.id, error);
+		if (ret)
+			goto out;
+
+		flow->counter_id = fdir_rule.act_cnt.id;
+	}
+	ret = hns3_fdir_filter_program(hns, &fdir_rule, false);
+	if (!ret) {
+		fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
+					    sizeof(struct hns3_fdir_rule_ele),
+					    0);
+		if (fdir_rule_ptr == NULL) {
+			hns3_err(hw, "Failed to allocate fdir_rule memory");
+			ret = -ENOMEM;
+			goto err;
+		}
+		memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule,
+			sizeof(struct hns3_fdir_rule));
+		TAILQ_INSERT_TAIL(&process_list->fdir_list,
+				  fdir_rule_ptr, entries);
+		flow->rule = fdir_rule_ptr;
+		flow->filter_type = RTE_ETH_FILTER_FDIR;
+
+		return flow;
+	}
+
+	if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
+		hns3_counter_release(dev, fdir_rule.act_cnt.id);
+
+err:
+	rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			   "Failed to create flow");
+out:
+	TAILQ_REMOVE(&process_list->flow_list, flow_node, entries);
+	rte_free(flow_node);
+	rte_free(flow);
+	return NULL;
+}
+
+/* Destroy a flow rule on hns3. */
+static int
+hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
+		  struct rte_flow_error *error)
+{
+	struct hns3_process_private *process_list = dev->process_private;
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_fdir_rule_ele *fdir_rule_ptr;
+	struct hns3_flow_mem *flow_node;
+	struct hns3_hw *hw = &hns->hw;
+	enum rte_filter_type filter_type;
+	struct hns3_fdir_rule fdir_rule;
+	int ret;
+
+	if (flow == NULL)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_HANDLE,
+					  flow, "Flow is NULL");
+	filter_type = flow->filter_type;
+	switch (filter_type) {
+	case RTE_ETH_FILTER_FDIR:
+		fdir_rule_ptr = (struct hns3_fdir_rule_ele *)flow->rule;
+		memcpy(&fdir_rule, &fdir_rule_ptr->fdir_conf,
+			   sizeof(struct hns3_fdir_rule));
+
+		ret = hns3_fdir_filter_program(hns, &fdir_rule, true);
+		if (ret)
+			return rte_flow_error_set(error, EIO,
+						  RTE_FLOW_ERROR_TYPE_HANDLE,
+						  flow,
+						  "Destroy FDIR fail.Try again");
+		if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
+			hns3_counter_release(dev, fdir_rule.act_cnt.id);
+		TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries);
+		rte_free(fdir_rule_ptr);
+		fdir_rule_ptr = NULL;
+		break;
+	default:
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_HANDLE, flow,
+					  "Unsupported filter type");
+	}
+
+	TAILQ_FOREACH(flow_node, &process_list->flow_list, entries) {
+		if (flow_node->flow == flow) {
+			TAILQ_REMOVE(&process_list->flow_list, flow_node,
+				     entries);
+			rte_free(flow_node);
+			flow_node = NULL;
+			break;
+		}
+	}
+	rte_free(flow);
+	flow = NULL;
+
+	return 0;
+}
+
+/*  Destroy all flow rules associated with a port on hns3. */
+static int
+hns3_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+	struct hns3_adapter *hns = dev->data->dev_private;
+	int ret;
+
+	/* FDIR is available only in PF driver */
+	if (!hns->is_vf) {
+		ret = hns3_clear_all_fdir_filter(hns);
+		if (ret) {
+			rte_flow_error_set(error, ret,
+					   RTE_FLOW_ERROR_TYPE_HANDLE,
+					   NULL, "Failed to flush rule");
+			return ret;
+		}
+		hns3_counter_flush(dev);
+	}
+
+	hns3_filterlist_flush(dev);
+
+	return 0;
+}
+
+/* Query an existing flow rule. */
+static int
+hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+		const struct rte_flow_action *actions, void *data,
+		struct rte_flow_error *error)
+{
+	struct rte_flow_query_count *qc;
+	int ret;
+
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		case RTE_FLOW_ACTION_TYPE_COUNT:
+			qc = (struct rte_flow_query_count *)data;
+			ret = hns3_counter_query(dev, flow, qc, error);
+			if (ret)
+				return ret;
+			break;
+		default:
+			return rte_flow_error_set(error, ENOTSUP,
+						  RTE_FLOW_ERROR_TYPE_ACTION,
+						  actions,
+						  "Query action only support count");
+		}
+	}
+	return 0;
+}
+
+const struct rte_flow_ops hns3_flow_ops = {
+	.validate = hns3_flow_validate,
+	.create = hns3_flow_create,
+	.destroy = hns3_flow_destroy,
+	.flush = hns3_flow_flush,
+	.query = hns3_flow_query,
+	.isolate = NULL,
+};