[03/12] net/ice: enable input set configuration

Message ID 20190906120058.108073-4-yahui.cao@intel.com (mailing list archive)
State Superseded, archived
Delegated to: xiaolong ye
Headers
Series net/ice: add ice Flow Director driver |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail apply issues

Commit Message

Cao, Yahui Sept. 6, 2019, noon UTC
  From: Beilei Xing <beilei.xing@intel.com>

Configure input set, include:
 - Parse input set.
 - Check the segment.
 - Create profile.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/ice/ice_ethdev.h      |   3 +
 drivers/net/ice/ice_fdir_filter.c | 245 ++++++++++++++++++++++++++++++
 2 files changed, 248 insertions(+)
  

Comments

Xiaolong Ye Sept. 7, 2019, 12:32 p.m. UTC | #1
On 09/06, Yahui Cao wrote:
>From: Beilei Xing <beilei.xing@intel.com>
>
>Configure input set, include:
> - Parse input set.
> - Check the segment.
> - Create profile.

I'd prefer more descriptive sentences in commit log than a few bullets.

>
>Signed-off-by: Beilei Xing <beilei.xing@intel.com>
>---
> drivers/net/ice/ice_ethdev.h      |   3 +
> drivers/net/ice/ice_fdir_filter.c | 245 ++++++++++++++++++++++++++++++
> 2 files changed, 248 insertions(+)
>
>diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
>index c43242b63..ea68858d1 100644
>--- a/drivers/net/ice/ice_ethdev.h
>+++ b/drivers/net/ice/ice_ethdev.h
>@@ -366,6 +366,9 @@ struct ice_vsi *
> ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type);
> int
> ice_release_vsi(struct ice_vsi *vsi);
>+int
>+ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
>+			uint64_t input_set);
> 
> static inline int
> ice_align_floor(int n)
>diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
>index 451ef92b2..0840c3b4b 100644
>--- a/drivers/net/ice/ice_fdir_filter.c
>+++ b/drivers/net/ice/ice_fdir_filter.c
>@@ -148,6 +148,251 @@ ice_fdir_teardown(struct ice_pf *pf)
> 	pf->fdir.fdir_vsi = NULL;
> }
> 
>+static void
>+ice_fdir_rm_prof(struct ice_hw *hw, enum ice_fltr_ptype ptype)
>+{
>+	struct ice_fd_hw_prof *hw_prof = hw->fdir_prof[ptype];
>+	uint64_t prof_id;
>+	uint16_t vsi_num;
>+	int tun;
>+	int i;
>+
>+	if (!hw->fdir_prof || !hw->fdir_prof[ptype])
>+		return;
>+
>+	for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
>+		if (!hw_prof->fdir_seg[tun])
>+			break;
>+		prof_id = ptype + tun * ICE_FLTR_PTYPE_MAX;
>+		for (i = 0; i < hw_prof->cnt; i++) {
>+			if (hw_prof->entry_h[i][tun]) {
>+				vsi_num = ice_get_hw_vsi_num(hw,
>+							     hw_prof->vsi_h[i]);
>+				ice_rem_prof_id_flow(hw, ICE_BLK_FD,
>+						     vsi_num, ptype);
>+				ice_flow_rem_entry(hw,
>+						   hw_prof->entry_h[i][tun]);
>+				hw_prof->entry_h[i][tun] = 0;
>+			}
>+		}
>+		ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
>+		rte_free(hw_prof->fdir_seg[tun]);
>+		hw_prof->fdir_seg[tun] = NULL;
>+	}
>+	for (i = 0; i < hw_prof->cnt; i++)
>+		hw_prof->vsi_h[i] = 0;

memset(hw_prof->vsi_h, 0, hw_prof->cnt);

>+	hw_prof->cnt = 0;
>+}
>+
>+static int
>+ice_fdir_cfg_hw_tbl(struct ice_pf *pf, struct ice_vsi *vsi,
>+		    struct ice_vsi *ctrl_vsi,
>+		    struct ice_flow_seg_info *seg,
>+		    enum ice_fltr_ptype ptype,
>+		    bool is_tunnel)
>+{
>+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
>+	enum ice_flow_dir dir = ICE_FLOW_RX;
>+	struct ice_flow_seg_info *ori_seg;
>+	struct ice_fd_hw_prof *hw_prof;
>+	struct ice_flow_prof *prof;
>+	uint64_t entry_1 = 0;
>+	uint64_t entry_2 = 0;
>+	uint16_t vsi_num;
>+	int ret;
>+	uint64_t prof_id;
>+
>+	if (!hw->fdir_prof) {
>+		hw->fdir_prof = (struct ice_fd_hw_prof **)
>+			ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
>+				   sizeof(*hw->fdir_prof));
>+		if (!hw->fdir_prof)
>+			return -ENOMEM;
>+	}
>+	if (!hw->fdir_prof[ptype]) {
>+		hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
>+			ice_malloc(hw, sizeof(**hw->fdir_prof));
>+		if (!hw->fdir_prof[ptype])
>+			return -ENOMEM;
>+	}

when will we free hw->fdir_prof and hw->fdir_prof[ptype]?

>+
>+	hw_prof = hw->fdir_prof[ptype];
>+	ori_seg = hw_prof->fdir_seg[is_tunnel];
>+	if (ori_seg) {
>+		if (!memcmp(ori_seg, seg, sizeof(*seg)))
>+			return -EAGAIN;
>+		if (hw->fdir_fltr_cnt[ptype])
>+			return -EINVAL;
>+
>+		ice_fdir_rm_prof(hw, ptype);
>+	}
>+
>+	prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
>+	ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
>+				(is_tunnel) ? 2 : 1, NULL, 0, &prof);
>+	if (ret)
>+		return ret;
>+	ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
>+				 vsi->idx, ICE_FLOW_PRIO_NORMAL,
>+				 seg, NULL, 0, &entry_1);
>+	if (ret) {
>+		PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
>+			    ptype);
>+		goto err_add_prof;
>+	}
>+	ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
>+				 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
>+				 seg, NULL, 0, &entry_2);
>+	if (ret) {
>+		PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
>+			    ptype);
>+		goto err_add_entry;
>+	}
>+
>+	hw_prof->cnt = 0;
>+	hw_prof->fdir_seg[is_tunnel] = seg;
>+	hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
>+	hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
>+	hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
>+	hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
>+
>+	return ret;
>+
>+err_add_entry:
>+	vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
>+	ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
>+	ice_flow_rem_entry(hw, entry_1);
>+err_add_prof:
>+	ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
>+
>+	return ret;
>+}
>+
>+static void
>+ice_parse_input_set(uint64_t inset, enum ice_flow_field *field)
>+{
>+	uint32_t i, j;
>+
>+	struct ice_inset_map {
>+		uint64_t inset;
>+		enum ice_flow_field fld;
>+	};
>+	static const struct ice_inset_map ice_inset_map[] = {
>+		{ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
>+		{ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
>+		{ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
>+		{ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
>+		{ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
>+		{ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
>+		{ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
>+		{ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
>+		{ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
>+		{ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
>+	};
>+
>+	for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
>+		if (inset & ice_inset_map[i].inset)
>+			field[j++] = ice_inset_map[i].fld;
>+	}
>+}
>+
>+int
>+ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
>+			uint64_t input_set)
>+{
>+	struct ice_flow_seg_info *seg, *seg_tun;
>+	enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
>+	int i, ret;
>+
>+	if (!input_set)
>+		return -EINVAL;
>+
>+	seg = (struct ice_flow_seg_info *)
>+		ice_malloc(hw, sizeof(*seg));
>+	if (!seg) {
>+		PMD_DRV_LOG(ERR, "No memory can be allocated");
>+		return -ENOMEM;
>+	}
>+
>+	seg_tun = (struct ice_flow_seg_info *)
>+		ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
>+	if (!seg_tun) {
>+		PMD_DRV_LOG(ERR, "No memory can be allocated");
>+		rte_free(seg);
>+		return -ENOMEM;
>+	}
>+
>+	for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
>+		field[i] = ICE_FLOW_FIELD_IDX_MAX;

memset(field, ICE_FLOW_FIELD_IDX_MAX, ICE_FLOW_FIELD_IDX_MAX) ?

>+	ice_parse_input_set(input_set, field);
>+
>+	switch (flow) {
>+	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
>+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
>+				  ICE_FLOW_SEG_HDR_IPV4);
>+		break;
>+	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
>+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
>+				  ICE_FLOW_SEG_HDR_IPV4);
>+		break;
>+	case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
>+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
>+				  ICE_FLOW_SEG_HDR_IPV4);
>+		break;
>+	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
>+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
>+		break;
>+	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
>+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
>+				  ICE_FLOW_SEG_HDR_IPV6);
>+		break;
>+	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
>+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
>+				  ICE_FLOW_SEG_HDR_IPV6);
>+		break;
>+	case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
>+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
>+				  ICE_FLOW_SEG_HDR_IPV6);
>+		break;
>+	case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
>+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
>+		break;
>+	default:
>+		PMD_DRV_LOG(ERR, "not supported filter type.");
>+		break;
>+	}
>+
>+	for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
>+		ice_flow_set_fld(seg, field[i],
>+				 ICE_FLOW_FLD_OFF_INVAL,
>+				 ICE_FLOW_FLD_OFF_INVAL,
>+				 ICE_FLOW_FLD_OFF_INVAL, false);
>+	}
>+
>+	ret = ice_fdir_cfg_hw_tbl(pf, pf->main_vsi, pf->fdir.fdir_vsi,
>+				  seg, flow, 0);
>+	if (ret < 0)
>+		goto FREE_SEG;
>+
>+	rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
>+	ret = ice_fdir_cfg_hw_tbl(pf, pf->main_vsi, pf->fdir.fdir_vsi,
>+				  seg_tun, flow, 1);
>+
>+	if (!ret)
>+		return ret;
>+	else if (ret < 0)
>+		goto FREE_SEG;
>+
>+FREE_SEG:

Use lowercase to keep it consistent with others.

>+	rte_free(seg);
>+	rte_free(seg_tun);
>+
>+	if (ret == -EAGAIN)
>+		return 0;
>+	else
>+		return ret;

return (ret == -EAGAIN) ? 0 : ret;

>+}
>+
> static int
> ice_init_fdir_filter(struct ice_adapter *ad)
> {
>-- 
>2.17.1
>
  

Patch

diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index c43242b63..ea68858d1 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -366,6 +366,9 @@  struct ice_vsi *
 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type);
 int
 ice_release_vsi(struct ice_vsi *vsi);
+int
+ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
+			uint64_t input_set);
 
 static inline int
 ice_align_floor(int n)
diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index 451ef92b2..0840c3b4b 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -148,6 +148,251 @@  ice_fdir_teardown(struct ice_pf *pf)
 	pf->fdir.fdir_vsi = NULL;
 }
 
+static void
+ice_fdir_rm_prof(struct ice_hw *hw, enum ice_fltr_ptype ptype)
+{
+	struct ice_fd_hw_prof *hw_prof = hw->fdir_prof[ptype];
+	uint64_t prof_id;
+	uint16_t vsi_num;
+	int tun;
+	int i;
+
+	if (!hw->fdir_prof || !hw->fdir_prof[ptype])
+		return;
+
+	for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
+		if (!hw_prof->fdir_seg[tun])
+			break;
+		prof_id = ptype + tun * ICE_FLTR_PTYPE_MAX;
+		for (i = 0; i < hw_prof->cnt; i++) {
+			if (hw_prof->entry_h[i][tun]) {
+				vsi_num = ice_get_hw_vsi_num(hw,
+							     hw_prof->vsi_h[i]);
+				ice_rem_prof_id_flow(hw, ICE_BLK_FD,
+						     vsi_num, ptype);
+				ice_flow_rem_entry(hw,
+						   hw_prof->entry_h[i][tun]);
+				hw_prof->entry_h[i][tun] = 0;
+			}
+		}
+		ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
+		rte_free(hw_prof->fdir_seg[tun]);
+		hw_prof->fdir_seg[tun] = NULL;
+	}
+	for (i = 0; i < hw_prof->cnt; i++)
+		hw_prof->vsi_h[i] = 0;
+	hw_prof->cnt = 0;
+}
+
+static int
+ice_fdir_cfg_hw_tbl(struct ice_pf *pf, struct ice_vsi *vsi,
+		    struct ice_vsi *ctrl_vsi,
+		    struct ice_flow_seg_info *seg,
+		    enum ice_fltr_ptype ptype,
+		    bool is_tunnel)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	enum ice_flow_dir dir = ICE_FLOW_RX;
+	struct ice_flow_seg_info *ori_seg;
+	struct ice_fd_hw_prof *hw_prof;
+	struct ice_flow_prof *prof;
+	uint64_t entry_1 = 0;
+	uint64_t entry_2 = 0;
+	uint16_t vsi_num;
+	int ret;
+	uint64_t prof_id;
+
+	if (!hw->fdir_prof) {
+		hw->fdir_prof = (struct ice_fd_hw_prof **)
+			ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
+				   sizeof(*hw->fdir_prof));
+		if (!hw->fdir_prof)
+			return -ENOMEM;
+	}
+	if (!hw->fdir_prof[ptype]) {
+		hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
+			ice_malloc(hw, sizeof(**hw->fdir_prof));
+		if (!hw->fdir_prof[ptype])
+			return -ENOMEM;
+	}
+
+	hw_prof = hw->fdir_prof[ptype];
+	ori_seg = hw_prof->fdir_seg[is_tunnel];
+	if (ori_seg) {
+		if (!memcmp(ori_seg, seg, sizeof(*seg)))
+			return -EAGAIN;
+		if (hw->fdir_fltr_cnt[ptype])
+			return -EINVAL;
+
+		ice_fdir_rm_prof(hw, ptype);
+	}
+
+	prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
+	ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
+				(is_tunnel) ? 2 : 1, NULL, 0, &prof);
+	if (ret)
+		return ret;
+	ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
+				 vsi->idx, ICE_FLOW_PRIO_NORMAL,
+				 seg, NULL, 0, &entry_1);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
+			    ptype);
+		goto err_add_prof;
+	}
+	ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
+				 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+				 seg, NULL, 0, &entry_2);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
+			    ptype);
+		goto err_add_entry;
+	}
+
+	hw_prof->cnt = 0;
+	hw_prof->fdir_seg[is_tunnel] = seg;
+	hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
+	hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
+	hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
+	hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
+
+	return ret;
+
+err_add_entry:
+	vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
+	ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
+	ice_flow_rem_entry(hw, entry_1);
+err_add_prof:
+	ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
+
+	return ret;
+}
+
+static void
+ice_parse_input_set(uint64_t inset, enum ice_flow_field *field)
+{
+	uint32_t i, j;
+
+	struct ice_inset_map {
+		uint64_t inset;
+		enum ice_flow_field fld;
+	};
+	static const struct ice_inset_map ice_inset_map[] = {
+		{ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
+		{ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
+		{ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
+		{ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
+		{ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
+		{ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
+		{ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
+		{ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
+		{ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
+		{ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
+	};
+
+	for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
+		if (inset & ice_inset_map[i].inset)
+			field[j++] = ice_inset_map[i].fld;
+	}
+}
+
+int
+ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
+			uint64_t input_set)
+{
+	struct ice_flow_seg_info *seg, *seg_tun;
+	enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
+	int i, ret;
+
+	if (!input_set)
+		return -EINVAL;
+
+	seg = (struct ice_flow_seg_info *)
+		ice_malloc(hw, sizeof(*seg));
+	if (!seg) {
+		PMD_DRV_LOG(ERR, "No memory can be allocated");
+		return -ENOMEM;
+	}
+
+	seg_tun = (struct ice_flow_seg_info *)
+		ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
+	if (!seg_tun) {
+		PMD_DRV_LOG(ERR, "No memory can be allocated");
+		rte_free(seg);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
+		field[i] = ICE_FLOW_FIELD_IDX_MAX;
+	ice_parse_input_set(input_set, field);
+
+	switch (flow) {
+	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
+				  ICE_FLOW_SEG_HDR_IPV4);
+		break;
+	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
+				  ICE_FLOW_SEG_HDR_IPV4);
+		break;
+	case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
+				  ICE_FLOW_SEG_HDR_IPV4);
+		break;
+	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
+		break;
+	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
+				  ICE_FLOW_SEG_HDR_IPV6);
+		break;
+	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
+				  ICE_FLOW_SEG_HDR_IPV6);
+		break;
+	case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
+				  ICE_FLOW_SEG_HDR_IPV6);
+		break;
+	case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
+		ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "not supported filter type.");
+		break;
+	}
+
+	for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
+		ice_flow_set_fld(seg, field[i],
+				 ICE_FLOW_FLD_OFF_INVAL,
+				 ICE_FLOW_FLD_OFF_INVAL,
+				 ICE_FLOW_FLD_OFF_INVAL, false);
+	}
+
+	ret = ice_fdir_cfg_hw_tbl(pf, pf->main_vsi, pf->fdir.fdir_vsi,
+				  seg, flow, 0);
+	if (ret < 0)
+		goto FREE_SEG;
+
+	rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
+	ret = ice_fdir_cfg_hw_tbl(pf, pf->main_vsi, pf->fdir.fdir_vsi,
+				  seg_tun, flow, 1);
+
+	if (!ret)
+		return ret;
+	else if (ret < 0)
+		goto FREE_SEG;
+
+FREE_SEG:
+	rte_free(seg);
+	rte_free(seg_tun);
+
+	if (ret == -EAGAIN)
+		return 0;
+	else
+		return ret;
+}
+
 static int
 ice_init_fdir_filter(struct ice_adapter *ad)
 {