[v8,4/4] net/ice: enable protocol agnostic flow offloading in FDIR

Message ID 20211101083612.2380503-5-junfeng.guo@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Qi Zhang
Headers
Series enable protocol agnostic flow offloading in FDIR |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation warning apply issues

Commit Message

Junfeng Guo Nov. 1, 2021, 8:36 a.m. UTC
  Protocol agnostic flow offloading in Flow Director is enabled by this
patch based on the Parser Library, using existing rte_flow raw API.

Note that the raw flow requires:
1. byte string of raw target packet bits.
2. byte string of mask of target packet.

Here is an example:
FDIR matching ipv4 dst addr with 1.2.3.4 and redirect to queue 3:

flow create 0 ingress pattern raw \
pattern spec \
00000000000000000000000008004500001400004000401000000000000001020304 \
pattern mask \
000000000000000000000000000000000000000000000000000000000000ffffffff \
/ end actions queue index 3 / mark id 3 / end

Note that mask of some key bits (e.g., 0x0800 to indicate ipv4 proto)
is optional in our cases. To avoid redundancy, we just omit the mask
of 0x0800 (with 0xFFFF) in the mask byte string example. The prefix
'0x' for the spec and mask byte (hex) strings are also omitted here.

Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
 doc/guides/rel_notes/release_21_11.rst |   1 +
 drivers/net/ice/ice_ethdev.h           |  14 ++
 drivers/net/ice/ice_fdir_filter.c      | 235 +++++++++++++++++++++++++
 drivers/net/ice/ice_generic_flow.c     |   7 +
 drivers/net/ice/ice_generic_flow.h     |   3 +
 5 files changed, 260 insertions(+)
  

Comments

Qi Zhang Nov. 1, 2021, 11:56 p.m. UTC | #1
> -----Original Message-----
> From: Guo, Junfeng <junfeng.guo@intel.com>
> Sent: Monday, November 1, 2021 4:36 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>
> Cc: dev@dpdk.org; Yigit, Ferruh <ferruh.yigit@intel.com>; Xu, Ting
> <ting.xu@intel.com>; Guo, Junfeng <junfeng.guo@intel.com>
> Subject: [PATCH v8 4/4] net/ice: enable protocol agnostic flow offloading in
> FDIR
> 
> Protocol agnostic flow offloading in Flow Director is enabled by this patch
> based on the Parser Library, using existing rte_flow raw API.
> 
> Note that the raw flow requires:
> 1. byte string of raw target packet bits.
> 2. byte string of mask of target packet.
> 
> Here is an example:
> FDIR matching ipv4 dst addr with 1.2.3.4 and redirect to queue 3:
> 
> flow create 0 ingress pattern raw \
> pattern spec \
> 0000000000000000000000000800450000140000400040100000000000000
> 1020304 \ pattern mask \
> 000000000000000000000000000000000000000000000000000000000000ff
> ffffff \ / end actions queue index 3 / mark id 3 / end
> 
> Note that mask of some key bits (e.g., 0x0800 to indicate ipv4 proto) is
> optional in our cases. To avoid redundancy, we just omit the mask of 0x0800
> (with 0xFFFF) in the mask byte string example. The prefix '0x' for the spec and
> mask byte (hex) strings are also omitted here.
> 
> Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
> ---
>  doc/guides/rel_notes/release_21_11.rst |   1 +
>  drivers/net/ice/ice_ethdev.h           |  14 ++
>  drivers/net/ice/ice_fdir_filter.c      | 235 +++++++++++++++++++++++++
>  drivers/net/ice/ice_generic_flow.c     |   7 +
>  drivers/net/ice/ice_generic_flow.h     |   3 +
>  5 files changed, 260 insertions(+)
> 
> diff --git a/doc/guides/rel_notes/release_21_11.rst
> b/doc/guides/rel_notes/release_21_11.rst
> index 98d50a160b..36fdee0a98 100644
> --- a/doc/guides/rel_notes/release_21_11.rst
> +++ b/doc/guides/rel_notes/release_21_11.rst
> @@ -167,6 +167,7 @@ New Features
> 
>  * **Updated Intel ice driver.**
> 
> +  * Added protocol agnostic flow offloading support in Flow Director.
>    * Added 1PPS out support by a devargs.
>    * Added IPv4 and L4 (TCP/UDP/SCTP) checksum hash support in RSS flow.
>    * Added DEV_RX_OFFLOAD_TIMESTAMP support.
> diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index
> 0e42c4c063..bbfeb0cc23 100644
> --- a/drivers/net/ice/ice_ethdev.h
> +++ b/drivers/net/ice/ice_ethdev.h
> @@ -318,6 +318,11 @@ struct ice_fdir_filter_conf {
>  	uint64_t input_set_o; /* used for non-tunnel or tunnel outer fields */
>  	uint64_t input_set_i; /* only for tunnel inner fields */
>  	uint32_t mark_flag;
> +
> +	struct ice_parser_profile *prof;
> +	const u8 *pkt_buf;
> +	bool parser_ena;
> +	u8 pkt_len;
>  };
> 
>  #define ICE_MAX_FDIR_FILTER_NUM		(1024 * 16)
> @@ -487,6 +492,14 @@ struct ice_devargs {
>  	uint8_t pps_out_ena;
>  };
> 
> +/**
> + * Structure to store fdir fv entry.
> + */
> +struct ice_fdir_prof_info {
> +	struct ice_parser_profile prof;
> +	u64 fdir_actived_cnt;
> +};
> +
>  /**
>   * Structure to store private data for each PF/VF instance.
>   */
> @@ -510,6 +523,7 @@ struct ice_adapter {
>  	struct rte_timecounter tx_tstamp_tc;
>  	bool ptp_ena;
>  	uint64_t time_hw;
> +	struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
>  #ifdef RTE_ARCH_X86
>  	bool rx_use_avx2;
>  	bool rx_use_avx512;
> diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
> index bd627e3aa8..888f0dea6d 100644
> --- a/drivers/net/ice/ice_fdir_filter.c
> +++ b/drivers/net/ice/ice_fdir_filter.c
> @@ -107,6 +107,7 @@
>  	ICE_INSET_NAT_T_ESP_SPI)
> 
>  static struct ice_pattern_match_item ice_fdir_pattern_list[] = {
> +	{pattern_raw,					ICE_INSET_NONE,
> 	ICE_INSET_NONE,			ICE_INSET_NONE},
>  	{pattern_ethertype,				ICE_FDIR_INSET_ETH,
> 	ICE_INSET_NONE,			ICE_INSET_NONE},
>  	{pattern_eth_ipv4,				ICE_FDIR_INSET_ETH_IPV4,
> 	ICE_INSET_NONE,			ICE_INSET_NONE},
>  	{pattern_eth_ipv4_udp,				ICE_FDIR_INSET_ETH_IPV4_UDP,
> 	ICE_INSET_NONE,			ICE_INSET_NONE},
> @@ -1188,6 +1189,24 @@ ice_fdir_is_tunnel_profile(enum
> ice_fdir_tunnel_type tunnel_type)
>  		return 0;
>  }
> 
> +static int
> +ice_fdir_add_del_raw(struct ice_pf *pf,
> +		     struct ice_fdir_filter_conf *filter,
> +		     bool add)
> +{
> +	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> +
> +	unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
> +	rte_memcpy(pkt, filter->pkt_buf, filter->pkt_len);
> +
> +	struct ice_fltr_desc desc;
> +	memset(&desc, 0, sizeof(desc));
> +	filter->input.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
> +	ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
> +
> +	return ice_fdir_programming(pf, &desc); }
> +
>  static int
>  ice_fdir_add_del_filter(struct ice_pf *pf,
>  			struct ice_fdir_filter_conf *filter, @@ -1303,6 +1322,72 @@
> ice_fdir_create_filter(struct ice_adapter *ad,
>  	struct ice_fdir_fltr_pattern key;
>  	bool is_tun;
>  	int ret;
> +	int i;
> +
> +	if (filter->parser_ena) {
> +		struct ice_hw *hw = ICE_PF_TO_HW(pf);
> +
> +		int id = ice_find_first_bit(filter->prof->ptypes, UINT16_MAX);
> +		int ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
> +		u16 ctrl_vsi = pf->fdir.fdir_vsi->idx;
> +		u16 main_vsi = pf->main_vsi->idx;
> +		bool fv_found = false;
> +
> +		struct ice_fdir_prof_info *pi = &ad->fdir_prof_info[ptg];
> +		if (pi->fdir_actived_cnt != 0) {
> +			for (i = 0; i < ICE_MAX_FV_WORDS; i++)
> +				if (pi->prof.fv[i].proto_id !=
> +				    filter->prof->fv[i].proto_id ||
> +				    pi->prof.fv[i].offset !=
> +				    filter->prof->fv[i].offset ||
> +				    pi->prof.fv[i].msk !=
> +				    filter->prof->fv[i].msk)
> +					break;
> +			if (i == ICE_MAX_FV_WORDS) {
> +				fv_found = true;
> +				pi->fdir_actived_cnt++;
> +			}
> +		}
> +
> +		if (!fv_found) {
> +			ret = ice_flow_set_hw_prof(hw, main_vsi, ctrl_vsi,
> +						   filter->prof, ICE_BLK_FD);
> +			if (ret)
> +				return -rte_errno;
> +		}
> +
> +		ret = ice_fdir_add_del_raw(pf, filter, true);
> +		if (ret)
> +			return -rte_errno;
> +
> +		if (!fv_found) {
> +			for (i = 0; i < filter->prof->fv_num; i++) {
> +				pi->prof.fv[i].proto_id =
> +					filter->prof->fv[i].proto_id;
> +				pi->prof.fv[i].offset =
> +					filter->prof->fv[i].offset;
> +				pi->prof.fv[i].spec = filter->prof->fv[i].spec;
> +				pi->prof.fv[i].msk = filter->prof->fv[i].msk;
> +			}
> +			pi->fdir_actived_cnt = 1;
> +		}
> +
> +		if (filter->mark_flag == 1)
> +			ice_fdir_rx_parsing_enable(ad, 1);
> +
> +		entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
> +		if (!entry)
> +			return -rte_errno;
> +
> +		rte_memcpy(entry, filter, sizeof(*filter));
> +
> +		filter->prof = NULL;
> +		filter->pkt_buf = NULL;

Should we free filter here? as a copy of it already be assigned to flow->rule.

Actually the filter is assigned by meta, and it is created during parse_pattern_action, and assume to be freed in create_filter.

Or we can assign meta to flow->rule directly, then we only need to free it during destroy.  

> +
> +		flow->rule = entry;
> +
> +		return 0;
> +	}
> 
>  	ice_fdir_extract_fltr_key(&key, filter);
>  	node = ice_fdir_entry_lookup(fdir_info, &key); @@ -1397,6 +1482,49
> @@ ice_fdir_destroy_filter(struct ice_adapter *ad,
> 
>  	filter = (struct ice_fdir_filter_conf *)flow->rule;
> 
> +	if (filter->parser_ena) {
> +		struct ice_hw *hw = ICE_PF_TO_HW(pf);
> +
> +		int id = ice_find_first_bit(filter->prof->ptypes, UINT16_MAX);
> +		int ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
> +		u16 ctrl_vsi = pf->fdir.fdir_vsi->idx;
> +		u16 main_vsi = pf->main_vsi->idx;
> +		u16 vsi_num;
> +
> +		ret = ice_fdir_add_del_raw(pf, filter, false);
> +		if (ret)
> +			return -rte_errno;
> +
> +		struct ice_fdir_prof_info *pi = &ad->fdir_prof_info[ptg];
> +		if (pi->fdir_actived_cnt != 0) {
> +			pi->fdir_actived_cnt--;
> +			if (!pi->fdir_actived_cnt) {
> +				vsi_num = ice_get_hw_vsi_num(hw, ctrl_vsi);
> +				ret = ice_rem_prof_id_flow(hw, ICE_BLK_FD,
> +							   vsi_num, id);
> +				if (ret)
> +					return -rte_errno;
> +
> +				vsi_num = ice_get_hw_vsi_num(hw, main_vsi);
> +				ret = ice_rem_prof_id_flow(hw, ICE_BLK_FD,
> +							   vsi_num, id);
> +				if (ret)
> +					return -rte_errno;
> +			}
> +		}
> +
> +		if (filter->mark_flag == 1)
> +			ice_fdir_rx_parsing_enable(ad, 0);
> +
> +		flow->rule = NULL;
> +		filter->prof = NULL;
> +		filter->pkt_buf = NULL;

Should we free the pkt_buf and prof before assign them to NULL.
They are created during parse_pattern but never be freed before.

> +
> +		rte_free(filter);
> +
> +		return 0;
> +	}
> +
  
Junfeng Guo Nov. 2, 2021, 2:44 a.m. UTC | #2
> -----Original Message-----
> From: Zhang, Qi Z <qi.z.zhang@intel.com>
> Sent: Tuesday, November 2, 2021 07:56
> To: Guo, Junfeng <junfeng.guo@intel.com>; Wu, Jingjing
> <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>
> Cc: dev@dpdk.org; Yigit, Ferruh <ferruh.yigit@intel.com>; Xu, Ting
> <Ting.Xu@intel.com>
> Subject: RE: [PATCH v8 4/4] net/ice: enable protocol agnostic flow
> offloading in FDIR
> 
> 
> 
> > -----Original Message-----
> > From: Guo, Junfeng <junfeng.guo@intel.com>
> > Sent: Monday, November 1, 2021 4:36 PM
> > To: Zhang, Qi Z <qi.z.zhang@intel.com>; Wu, Jingjing
> <jingjing.wu@intel.com>;
> > Xing, Beilei <beilei.xing@intel.com>
> > Cc: dev@dpdk.org; Yigit, Ferruh <ferruh.yigit@intel.com>; Xu, Ting
> > <ting.xu@intel.com>; Guo, Junfeng <junfeng.guo@intel.com>
> > Subject: [PATCH v8 4/4] net/ice: enable protocol agnostic flow
> offloading in
> > FDIR
> >
> > Protocol agnostic flow offloading in Flow Director is enabled by this
> patch
> > based on the Parser Library, using existing rte_flow raw API.
> >
> > Note that the raw flow requires:
> > 1. byte string of raw target packet bits.
> > 2. byte string of mask of target packet.
> >
> > Here is an example:
> > FDIR matching ipv4 dst addr with 1.2.3.4 and redirect to queue 3:
> >
> > flow create 0 ingress pattern raw \
> > pattern spec \
> >
> 000000000000000000000000080045000014000040004010000000000000
> 0
> > 1020304 \ pattern mask \
> >
> 000000000000000000000000000000000000000000000000000000000000
> ff
> > ffffff \ / end actions queue index 3 / mark id 3 / end
> >
> > Note that mask of some key bits (e.g., 0x0800 to indicate ipv4 proto) is
> > optional in our cases. To avoid redundancy, we just omit the mask of
> 0x0800
> > (with 0xFFFF) in the mask byte string example. The prefix '0x' for the
> spec and
> > mask byte (hex) strings are also omitted here.
> >
> > Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
> > ---
> >  doc/guides/rel_notes/release_21_11.rst |   1 +
> >  drivers/net/ice/ice_ethdev.h           |  14 ++
> >  drivers/net/ice/ice_fdir_filter.c      | 235 +++++++++++++++++++++++++
> >  drivers/net/ice/ice_generic_flow.c     |   7 +
> >  drivers/net/ice/ice_generic_flow.h     |   3 +
> >  5 files changed, 260 insertions(+)
> >
> > diff --git a/doc/guides/rel_notes/release_21_11.rst
> > b/doc/guides/rel_notes/release_21_11.rst
> > index 98d50a160b..36fdee0a98 100644
> > --- a/doc/guides/rel_notes/release_21_11.rst
> > +++ b/doc/guides/rel_notes/release_21_11.rst
> > @@ -167,6 +167,7 @@ New Features
> >
> >  * **Updated Intel ice driver.**
> >
> > +  * Added protocol agnostic flow offloading support in Flow Director.
> >    * Added 1PPS out support by a devargs.
> >    * Added IPv4 and L4 (TCP/UDP/SCTP) checksum hash support in RSS
> flow.
> >    * Added DEV_RX_OFFLOAD_TIMESTAMP support.
> > diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
> index
> > 0e42c4c063..bbfeb0cc23 100644
> > --- a/drivers/net/ice/ice_ethdev.h
> > +++ b/drivers/net/ice/ice_ethdev.h
> > @@ -318,6 +318,11 @@ struct ice_fdir_filter_conf {
> >  uint64_t input_set_o; /* used for non-tunnel or tunnel outer fields */
> >  uint64_t input_set_i; /* only for tunnel inner fields */
> >  uint32_t mark_flag;
> > +
> > +struct ice_parser_profile *prof;
> > +const u8 *pkt_buf;
> > +bool parser_ena;
> > +u8 pkt_len;
> >  };
> >
> >  #define ICE_MAX_FDIR_FILTER_NUM(1024 * 16)
> > @@ -487,6 +492,14 @@ struct ice_devargs {
> >  uint8_t pps_out_ena;
> >  };
> >
> > +/**
> > + * Structure to store fdir fv entry.
> > + */
> > +struct ice_fdir_prof_info {
> > +struct ice_parser_profile prof;
> > +u64 fdir_actived_cnt;
> > +};
> > +
> >  /**
> >   * Structure to store private data for each PF/VF instance.
> >   */
> > @@ -510,6 +523,7 @@ struct ice_adapter {
> >  struct rte_timecounter tx_tstamp_tc;
> >  bool ptp_ena;
> >  uint64_t time_hw;
> > +struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
> >  #ifdef RTE_ARCH_X86
> >  bool rx_use_avx2;
> >  bool rx_use_avx512;
> > diff --git a/drivers/net/ice/ice_fdir_filter.c
> b/drivers/net/ice/ice_fdir_filter.c
> > index bd627e3aa8..888f0dea6d 100644
> > --- a/drivers/net/ice/ice_fdir_filter.c
> > +++ b/drivers/net/ice/ice_fdir_filter.c
> > @@ -107,6 +107,7 @@
> >  ICE_INSET_NAT_T_ESP_SPI)
> >
> >  static struct ice_pattern_match_item ice_fdir_pattern_list[] = {
> > +{pattern_raw,ICE_INSET_NONE,
> > ICE_INSET_NONE,ICE_INSET_NONE},
> >  {pattern_ethertype,ICE_FDIR_INSET_ETH,
> > ICE_INSET_NONE,ICE_INSET_NONE},
> >  {pattern_eth_ipv4,ICE_FDIR_INSET_ETH_IPV4,
> > ICE_INSET_NONE,ICE_INSET_NONE},
> >  {pattern_eth_ipv4_udp,ICE_FDIR_INSET_ETH_IPV4_UDP,
> > ICE_INSET_NONE,ICE_INSET_NONE},
> > @@ -1188,6 +1189,24 @@ ice_fdir_is_tunnel_profile(enum
> > ice_fdir_tunnel_type tunnel_type)
> >  return 0;
> >  }
> >
> > +static int
> > +ice_fdir_add_del_raw(struct ice_pf *pf,
> > +     struct ice_fdir_filter_conf *filter,
> > +     bool add)
> > +{
> > +struct ice_hw *hw = ICE_PF_TO_HW(pf);
> > +
> > +unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
> > +rte_memcpy(pkt, filter->pkt_buf, filter->pkt_len);
> > +
> > +struct ice_fltr_desc desc;
> > +memset(&desc, 0, sizeof(desc));
> > +filter->input.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
> > +ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
> > +
> > +return ice_fdir_programming(pf, &desc); }
> > +
> >  static int
> >  ice_fdir_add_del_filter(struct ice_pf *pf,
> >  struct ice_fdir_filter_conf *filter, @@ -1303,6 +1322,72 @@
> > ice_fdir_create_filter(struct ice_adapter *ad,
> >  struct ice_fdir_fltr_pattern key;
> >  bool is_tun;
> >  int ret;
> > +int i;
> > +
> > +if (filter->parser_ena) {
> > +struct ice_hw *hw = ICE_PF_TO_HW(pf);
> > +
> > +int id = ice_find_first_bit(filter->prof->ptypes, UINT16_MAX);
> > +int ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
> > +u16 ctrl_vsi = pf->fdir.fdir_vsi->idx;
> > +u16 main_vsi = pf->main_vsi->idx;
> > +bool fv_found = false;
> > +
> > +struct ice_fdir_prof_info *pi = &ad->fdir_prof_info[ptg];
> > +if (pi->fdir_actived_cnt != 0) {
> > +for (i = 0; i < ICE_MAX_FV_WORDS; i++)
> > +if (pi->prof.fv[i].proto_id !=
> > +    filter->prof->fv[i].proto_id ||
> > +    pi->prof.fv[i].offset !=
> > +    filter->prof->fv[i].offset ||
> > +    pi->prof.fv[i].msk !=
> > +    filter->prof->fv[i].msk)
> > +break;
> > +if (i == ICE_MAX_FV_WORDS) {
> > +fv_found = true;
> > +pi->fdir_actived_cnt++;
> > +}
> > +}
> > +
> > +if (!fv_found) {
> > +ret = ice_flow_set_hw_prof(hw, main_vsi, ctrl_vsi,
> > +   filter->prof, ICE_BLK_FD);
> > +if (ret)
> > +return -rte_errno;
> > +}
> > +
> > +ret = ice_fdir_add_del_raw(pf, filter, true);
> > +if (ret)
> > +return -rte_errno;
> > +
> > +if (!fv_found) {
> > +for (i = 0; i < filter->prof->fv_num; i++) {
> > +pi->prof.fv[i].proto_id =
> > +filter->prof->fv[i].proto_id;
> > +pi->prof.fv[i].offset =
> > +filter->prof->fv[i].offset;
> > +pi->prof.fv[i].spec = filter->prof->fv[i].spec;
> > +pi->prof.fv[i].msk = filter->prof->fv[i].msk;
> > +}
> > +pi->fdir_actived_cnt = 1;
> > +}
> > +
> > +if (filter->mark_flag == 1)
> > +ice_fdir_rx_parsing_enable(ad, 1);
> > +
> > +entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
> > +if (!entry)
> > +return -rte_errno;
> > +
> > +rte_memcpy(entry, filter, sizeof(*filter));
> > +
> > +filter->prof = NULL;
> > +filter->pkt_buf = NULL;
> 
> Should we free filter here? as a copy of it already be assigned to flow-
> >rule.

Here we just free the two filter members of "filter", which are allocated by us in func ice_fdir_parse_pattern.
Once the copy of "filter" to "entry" is ready, these two fields of "filter" will belong to "entry".
We set them to NULL to ensure that later free of meta ("filter") will not have impact on "entry".

> 
> Actually the filter is assigned by meta, and it is created during
> parse_pattern_action, and assume to be freed in create_filter.
> 
> Or we can assign meta to flow->rule directly, then we only need to free it
> during destroy.

The "filter" is assigned with a global variable in ice_fdir_parse with "*filter = &pf->fdir.conf".
So we cannot just assign meta to flow->rule directly.
The copy of "filter" will be added into the list of flow and used later for other purpose like destroy.

> 
> > +
> > +flow->rule = entry;
> > +
> > +return 0;
> > +}
> >
> >  ice_fdir_extract_fltr_key(&key, filter);
> >  node = ice_fdir_entry_lookup(fdir_info, &key); @@ -1397,6 +1482,49
> > @@ ice_fdir_destroy_filter(struct ice_adapter *ad,
> >
> >  filter = (struct ice_fdir_filter_conf *)flow->rule;
> >
> > +if (filter->parser_ena) {
> > +struct ice_hw *hw = ICE_PF_TO_HW(pf);
> > +
> > +int id = ice_find_first_bit(filter->prof->ptypes, UINT16_MAX);
> > +int ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
> > +u16 ctrl_vsi = pf->fdir.fdir_vsi->idx;
> > +u16 main_vsi = pf->main_vsi->idx;
> > +u16 vsi_num;
> > +
> > +ret = ice_fdir_add_del_raw(pf, filter, false);
> > +if (ret)
> > +return -rte_errno;
> > +
> > +struct ice_fdir_prof_info *pi = &ad->fdir_prof_info[ptg];
> > +if (pi->fdir_actived_cnt != 0) {
> > +pi->fdir_actived_cnt--;
> > +if (!pi->fdir_actived_cnt) {
> > +vsi_num = ice_get_hw_vsi_num(hw, ctrl_vsi);
> > +ret = ice_rem_prof_id_flow(hw, ICE_BLK_FD,
> > +   vsi_num, id);
> > +if (ret)
> > +return -rte_errno;
> > +
> > +vsi_num = ice_get_hw_vsi_num(hw, main_vsi);
> > +ret = ice_rem_prof_id_flow(hw, ICE_BLK_FD,
> > +   vsi_num, id);
> > +if (ret)
> > +return -rte_errno;
> > +}
> > +}
> > +
> > +if (filter->mark_flag == 1)
> > +ice_fdir_rx_parsing_enable(ad, 0);
> > +
> > +flow->rule = NULL;
> > +filter->prof = NULL;
> > +filter->pkt_buf = NULL;
> 
> Should we free the pkt_buf and prof before assign them to NULL.
> They are created during parse_pattern but never be freed before.

Since prof and pkt_buf is allocated by us, these two fields should also be freed by us before freeing "filter".

> 
> > +
> > +rte_free(filter);
> > +
> > +return 0;
> > +}
> > +
>
  

Patch

diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 98d50a160b..36fdee0a98 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -167,6 +167,7 @@  New Features
 
 * **Updated Intel ice driver.**
 
+  * Added protocol agnostic flow offloading support in Flow Director.
   * Added 1PPS out support by a devargs.
   * Added IPv4 and L4 (TCP/UDP/SCTP) checksum hash support in RSS flow.
   * Added DEV_RX_OFFLOAD_TIMESTAMP support.
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 0e42c4c063..bbfeb0cc23 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -318,6 +318,11 @@  struct ice_fdir_filter_conf {
 	uint64_t input_set_o; /* used for non-tunnel or tunnel outer fields */
 	uint64_t input_set_i; /* only for tunnel inner fields */
 	uint32_t mark_flag;
+
+	struct ice_parser_profile *prof;
+	const u8 *pkt_buf;
+	bool parser_ena;
+	u8 pkt_len;
 };
 
 #define ICE_MAX_FDIR_FILTER_NUM		(1024 * 16)
@@ -487,6 +492,14 @@  struct ice_devargs {
 	uint8_t pps_out_ena;
 };
 
+/**
+ * Structure to store fdir fv entry.
+ */
+struct ice_fdir_prof_info {
+	struct ice_parser_profile prof;
+	u64 fdir_actived_cnt;
+};
+
 /**
  * Structure to store private data for each PF/VF instance.
  */
@@ -510,6 +523,7 @@  struct ice_adapter {
 	struct rte_timecounter tx_tstamp_tc;
 	bool ptp_ena;
 	uint64_t time_hw;
+	struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
 #ifdef RTE_ARCH_X86
 	bool rx_use_avx2;
 	bool rx_use_avx512;
diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index bd627e3aa8..888f0dea6d 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -107,6 +107,7 @@ 
 	ICE_INSET_NAT_T_ESP_SPI)
 
 static struct ice_pattern_match_item ice_fdir_pattern_list[] = {
+	{pattern_raw,					ICE_INSET_NONE,			ICE_INSET_NONE,			ICE_INSET_NONE},
 	{pattern_ethertype,				ICE_FDIR_INSET_ETH,		ICE_INSET_NONE,			ICE_INSET_NONE},
 	{pattern_eth_ipv4,				ICE_FDIR_INSET_ETH_IPV4,	ICE_INSET_NONE,			ICE_INSET_NONE},
 	{pattern_eth_ipv4_udp,				ICE_FDIR_INSET_ETH_IPV4_UDP,	ICE_INSET_NONE,			ICE_INSET_NONE},
@@ -1188,6 +1189,24 @@  ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
 		return 0;
 }
 
+static int
+ice_fdir_add_del_raw(struct ice_pf *pf,
+		     struct ice_fdir_filter_conf *filter,
+		     bool add)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+	unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
+	rte_memcpy(pkt, filter->pkt_buf, filter->pkt_len);
+
+	struct ice_fltr_desc desc;
+	memset(&desc, 0, sizeof(desc));
+	filter->input.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
+	ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
+
+	return ice_fdir_programming(pf, &desc);
+}
+
 static int
 ice_fdir_add_del_filter(struct ice_pf *pf,
 			struct ice_fdir_filter_conf *filter,
@@ -1303,6 +1322,72 @@  ice_fdir_create_filter(struct ice_adapter *ad,
 	struct ice_fdir_fltr_pattern key;
 	bool is_tun;
 	int ret;
+	int i;
+
+	if (filter->parser_ena) {
+		struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+		int id = ice_find_first_bit(filter->prof->ptypes, UINT16_MAX);
+		int ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
+		u16 ctrl_vsi = pf->fdir.fdir_vsi->idx;
+		u16 main_vsi = pf->main_vsi->idx;
+		bool fv_found = false;
+
+		struct ice_fdir_prof_info *pi = &ad->fdir_prof_info[ptg];
+		if (pi->fdir_actived_cnt != 0) {
+			for (i = 0; i < ICE_MAX_FV_WORDS; i++)
+				if (pi->prof.fv[i].proto_id !=
+				    filter->prof->fv[i].proto_id ||
+				    pi->prof.fv[i].offset !=
+				    filter->prof->fv[i].offset ||
+				    pi->prof.fv[i].msk !=
+				    filter->prof->fv[i].msk)
+					break;
+			if (i == ICE_MAX_FV_WORDS) {
+				fv_found = true;
+				pi->fdir_actived_cnt++;
+			}
+		}
+
+		if (!fv_found) {
+			ret = ice_flow_set_hw_prof(hw, main_vsi, ctrl_vsi,
+						   filter->prof, ICE_BLK_FD);
+			if (ret)
+				return -rte_errno;
+		}
+
+		ret = ice_fdir_add_del_raw(pf, filter, true);
+		if (ret)
+			return -rte_errno;
+
+		if (!fv_found) {
+			for (i = 0; i < filter->prof->fv_num; i++) {
+				pi->prof.fv[i].proto_id =
+					filter->prof->fv[i].proto_id;
+				pi->prof.fv[i].offset =
+					filter->prof->fv[i].offset;
+				pi->prof.fv[i].spec = filter->prof->fv[i].spec;
+				pi->prof.fv[i].msk = filter->prof->fv[i].msk;
+			}
+			pi->fdir_actived_cnt = 1;
+		}
+
+		if (filter->mark_flag == 1)
+			ice_fdir_rx_parsing_enable(ad, 1);
+
+		entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
+		if (!entry)
+			return -rte_errno;
+
+		rte_memcpy(entry, filter, sizeof(*filter));
+
+		filter->prof = NULL;
+		filter->pkt_buf = NULL;
+
+		flow->rule = entry;
+
+		return 0;
+	}
 
 	ice_fdir_extract_fltr_key(&key, filter);
 	node = ice_fdir_entry_lookup(fdir_info, &key);
@@ -1397,6 +1482,49 @@  ice_fdir_destroy_filter(struct ice_adapter *ad,
 
 	filter = (struct ice_fdir_filter_conf *)flow->rule;
 
+	if (filter->parser_ena) {
+		struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+		int id = ice_find_first_bit(filter->prof->ptypes, UINT16_MAX);
+		int ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
+		u16 ctrl_vsi = pf->fdir.fdir_vsi->idx;
+		u16 main_vsi = pf->main_vsi->idx;
+		u16 vsi_num;
+
+		ret = ice_fdir_add_del_raw(pf, filter, false);
+		if (ret)
+			return -rte_errno;
+
+		struct ice_fdir_prof_info *pi = &ad->fdir_prof_info[ptg];
+		if (pi->fdir_actived_cnt != 0) {
+			pi->fdir_actived_cnt--;
+			if (!pi->fdir_actived_cnt) {
+				vsi_num = ice_get_hw_vsi_num(hw, ctrl_vsi);
+				ret = ice_rem_prof_id_flow(hw, ICE_BLK_FD,
+							   vsi_num, id);
+				if (ret)
+					return -rte_errno;
+
+				vsi_num = ice_get_hw_vsi_num(hw, main_vsi);
+				ret = ice_rem_prof_id_flow(hw, ICE_BLK_FD,
+							   vsi_num, id);
+				if (ret)
+					return -rte_errno;
+			}
+		}
+
+		if (filter->mark_flag == 1)
+			ice_fdir_rx_parsing_enable(ad, 0);
+
+		flow->rule = NULL;
+		filter->prof = NULL;
+		filter->pkt_buf = NULL;
+
+		rte_free(filter);
+
+		return 0;
+	}
+
 	is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
 
 	if (filter->counter) {
@@ -1675,6 +1803,7 @@  ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
 	enum rte_flow_item_type l4 = RTE_FLOW_ITEM_TYPE_END;
 	enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
+	const struct rte_flow_item_raw *raw_spec, *raw_mask;
 	const struct rte_flow_item_eth *eth_spec, *eth_mask;
 	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
 	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
@@ -1702,6 +1831,9 @@  ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 	struct ice_fdir_extra *p_ext_data;
 	struct ice_fdir_v4 *p_v4 = NULL;
 	struct ice_fdir_v6 *p_v6 = NULL;
+	struct ice_parser_result rslt;
+	struct ice_parser *psr;
+	uint8_t item_num = 0;
 
 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
 		if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
@@ -1713,6 +1845,7 @@  ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 		    item->type == RTE_FLOW_ITEM_TYPE_GTP_PSC) {
 			is_outer = false;
 		}
+		item_num++;
 	}
 
 	/* This loop parse flow pattern and distinguish Non-tunnel and tunnel
@@ -1733,6 +1866,101 @@  ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
 			    &input_set_i : &input_set_o;
 
 		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_RAW:
+			raw_spec = item->spec;
+			raw_mask = item->mask;
+
+			if (item_num != 1)
+				break;
+
+			/* convert raw spec & mask from byte string to int */
+			unsigned char *tmp_spec =
+				(uint8_t *)(uintptr_t)raw_spec->pattern;
+			unsigned char *tmp_mask =
+				(uint8_t *)(uintptr_t)raw_mask->pattern;
+			uint16_t udp_port = 0;
+			uint16_t tmp_val = 0;
+			uint8_t pkt_len = 0;
+			uint8_t tmp = 0;
+			int i, j;
+
+			pkt_len = strlen((char *)(uintptr_t)raw_spec->pattern);
+			if (strlen((char *)(uintptr_t)raw_mask->pattern) !=
+				pkt_len)
+				return -rte_errno;
+
+			for (i = 0, j = 0; i < pkt_len; i += 2, j++) {
+				tmp = tmp_spec[i];
+				if (tmp >= 'a' && tmp <= 'f')
+					tmp_val = tmp - 'a' + 10;
+				if (tmp >= 'A' && tmp <= 'F')
+					tmp_val = tmp - 'A' + 10;
+				if (tmp >= '0' && tmp <= '9')
+					tmp_val = tmp - '0';
+
+				tmp_val *= 16;
+				tmp = tmp_spec[i + 1];
+				if (tmp >= 'a' && tmp <= 'f')
+					tmp_spec[j] = tmp_val + tmp - 'a' + 10;
+				if (tmp >= 'A' && tmp <= 'F')
+					tmp_spec[j] = tmp_val + tmp - 'A' + 10;
+				if (tmp >= '0' && tmp <= '9')
+					tmp_spec[j] = tmp_val + tmp - '0';
+
+				tmp = tmp_mask[i];
+				if (tmp >= 'a' && tmp <= 'f')
+					tmp_val = tmp - 'a' + 10;
+				if (tmp >= 'A' && tmp <= 'F')
+					tmp_val = tmp - 'A' + 10;
+				if (tmp >= '0' && tmp <= '9')
+					tmp_val = tmp - '0';
+
+				tmp_val *= 16;
+				tmp = tmp_mask[i + 1];
+				if (tmp >= 'a' && tmp <= 'f')
+					tmp_mask[j] = tmp_val + tmp - 'a' + 10;
+				if (tmp >= 'A' && tmp <= 'F')
+					tmp_mask[j] = tmp_val + tmp - 'A' + 10;
+				if (tmp >= '0' && tmp <= '9')
+					tmp_mask[j] = tmp_val + tmp - '0';
+			}
+
+			pkt_len /= 2;
+
+			if (ice_parser_create(&ad->hw, &psr))
+				return -rte_errno;
+			if (ice_get_open_tunnel_port(&ad->hw, TNL_VXLAN,
+						     &udp_port))
+				ice_parser_vxlan_tunnel_set(psr, udp_port,
+							    true);
+			if (ice_parser_run(psr, tmp_spec, pkt_len, &rslt))
+				return -rte_errno;
+			ice_parser_destroy(psr);
+
+			if (!tmp_mask)
+				return -rte_errno;
+
+			filter->prof = (struct ice_parser_profile *)
+				ice_malloc(&ad->hw, sizeof(*filter->prof));
+			if (!filter->prof)
+				return -ENOMEM;
+
+			if (ice_parser_profile_init(&rslt, tmp_spec, tmp_mask,
+				pkt_len, ICE_BLK_FD, true, filter->prof))
+				return -rte_errno;
+
+			u8 *pkt_buf = (u8 *)ice_malloc(&ad->hw, pkt_len + 1);
+			if (!pkt_buf)
+				return -ENOMEM;
+			rte_memcpy(pkt_buf, tmp_spec, pkt_len);
+			filter->pkt_buf = pkt_buf;
+
+			filter->pkt_len = pkt_len;
+
+			filter->parser_ena = true;
+
+			break;
+
 		case RTE_FLOW_ITEM_TYPE_ETH:
 			flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
 			eth_spec = item->spec;
@@ -2198,6 +2426,7 @@  ice_fdir_parse(struct ice_adapter *ad,
 	struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
 	struct ice_pattern_match_item *item = NULL;
 	uint64_t input_set;
+	bool raw = false;
 	int ret;
 
 	memset(filter, 0, sizeof(*filter));
@@ -2213,7 +2442,13 @@  ice_fdir_parse(struct ice_adapter *ad,
 	ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
 	if (ret)
 		goto error;
+
+	if (item->pattern_list[0] == RTE_FLOW_ITEM_TYPE_RAW)
+		raw = true;
+
 	input_set = filter->input_set_o | filter->input_set_i;
+	input_set = raw ? ~input_set : input_set;
+
 	if (!input_set || filter->input_set_o &
 	    ~(item->input_set_mask_o | ICE_INSET_ETHERTYPE) ||
 	    filter->input_set_i & ~item->input_set_mask_i) {
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 02f854666a..d3391c86c0 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -65,6 +65,12 @@  enum rte_flow_item_type pattern_empty[] = {
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
+/* raw */
+enum rte_flow_item_type pattern_raw[] = {
+	RTE_FLOW_ITEM_TYPE_RAW,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
 /* L2 */
 enum rte_flow_item_type pattern_ethertype[] = {
 	RTE_FLOW_ITEM_TYPE_ETH,
@@ -2081,6 +2087,7 @@  struct ice_ptype_match {
 };
 
 static struct ice_ptype_match ice_ptype_map[] = {
+	{pattern_raw,					ICE_PTYPE_IPV4_PAY},
 	{pattern_eth_ipv4,				ICE_PTYPE_IPV4_PAY},
 	{pattern_eth_ipv4_udp,				ICE_PTYPE_IPV4_UDP_PAY},
 	{pattern_eth_ipv4_tcp,				ICE_PTYPE_IPV4_TCP_PAY},
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
index 8845a3e156..1b030c0466 100644
--- a/drivers/net/ice/ice_generic_flow.h
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -124,6 +124,9 @@ 
 /* empty pattern */
 extern enum rte_flow_item_type pattern_empty[];
 
+/* raw pattern */
+extern enum rte_flow_item_type pattern_raw[];
+
 /* L2 */
 extern enum rte_flow_item_type pattern_ethertype[];
 extern enum rte_flow_item_type pattern_ethertype_vlan[];