[v2,3/4] net/ice: support ACL filter in DCF

Message ID 20200929015632.109364-4-simei.su@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Qi Zhang
Headers
Series net/ice: support DCF ACL capabiltiy |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Simei Su Sept. 29, 2020, 1:56 a.m. UTC
  Add ice_acl_create_filter to create a rule and ice_acl_destroy_filter
to destroy a rule. If a flow is matched by ACL filter, filter rule
will be set to HW. Currently IPV4/IPV4_UDP/IPV4_TCP/IPV4_SCTP pattern
and drop action are supported.

Signed-off-by: Simei Su <simei.su@intel.com>
Signed-off-by: Xuan Ding <xuan.ding@intel.com>
---
 doc/guides/rel_notes/release_20_11.rst |    5 +
 drivers/net/ice/ice_acl_filter.c       | 1088 ++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_ethdev.h           |   17 +
 drivers/net/ice/ice_generic_flow.c     |    2 +
 drivers/net/ice/meson.build            |    3 +-
 5 files changed, 1114 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_acl_filter.c
  

Patch

diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst
index 96d8c14..1d3afb2 100644
--- a/doc/guides/rel_notes/release_20_11.rst
+++ b/doc/guides/rel_notes/release_20_11.rst
@@ -90,6 +90,11 @@  New Features
 
   * Added support for flexible descriptor metadata extraction.
 
+* **Updated the Intel ice driver.**
+
+  Updated the Intel ice driver with new features and improvements, including:
+
+  * Added acl filter support for Intel DCF.
 
 Removed Items
 -------------
diff --git a/drivers/net/ice/ice_acl_filter.c b/drivers/net/ice/ice_acl_filter.c
new file mode 100644
index 0000000..758362a
--- /dev/null
+++ b/drivers/net/ice/ice_acl_filter.c
@@ -0,0 +1,1088 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+#include <rte_flow.h>
+#include <rte_bitmap.h>
+#include "base/ice_type.h"
+#include "base/ice_acl.h"
+#include "ice_logs.h"
+#include "ice_ethdev.h"
+#include "ice_generic_flow.h"
+#include "base/ice_flow.h"
+
+#define MAX_ACL_SLOTS_ID 2048
+
+#define ICE_ACL_INSET_ETH_IPV4 ( \
+	ICE_INSET_SMAC | ICE_INSET_DMAC | \
+	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
+#define ICE_ACL_INSET_ETH_IPV4_UDP ( \
+	ICE_INSET_SMAC | ICE_INSET_DMAC | \
+	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
+	ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
+#define ICE_ACL_INSET_ETH_IPV4_TCP ( \
+	ICE_INSET_SMAC | ICE_INSET_DMAC | \
+	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
+	ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
+#define ICE_ACL_INSET_ETH_IPV4_SCTP ( \
+	ICE_INSET_SMAC | ICE_INSET_DMAC | \
+	ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
+	ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
+
+static struct ice_flow_parser ice_acl_parser;
+
+static struct
+ice_pattern_match_item ice_acl_pattern[] = {
+	{pattern_eth_ipv4,      ICE_ACL_INSET_ETH_IPV4,      ICE_INSET_NONE},
+	{pattern_eth_ipv4_udp,  ICE_ACL_INSET_ETH_IPV4_UDP,  ICE_INSET_NONE},
+	{pattern_eth_ipv4_tcp,  ICE_ACL_INSET_ETH_IPV4_TCP,  ICE_INSET_NONE},
+	{pattern_eth_ipv4_sctp, ICE_ACL_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
+};
+
+static int
+ice_acl_prof_alloc(struct ice_hw *hw)
+{
+	enum ice_fltr_ptype ptype, fltr_ptype;
+
+	if (!hw->acl_prof) {
+		hw->acl_prof = (struct ice_fd_hw_prof **)
+			ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
+				   sizeof(*hw->acl_prof));
+		if (!hw->acl_prof)
+			return -ENOMEM;
+	}
+
+	for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
+	     ptype < ICE_FLTR_PTYPE_MAX; ptype++) {
+		if (!hw->acl_prof[ptype]) {
+			hw->acl_prof[ptype] = (struct ice_fd_hw_prof *)
+				ice_malloc(hw, sizeof(**hw->acl_prof));
+			if (!hw->acl_prof[ptype])
+				goto fail_mem;
+		}
+	}
+
+	return 0;
+
+fail_mem:
+	for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
+	     fltr_ptype < ptype; fltr_ptype++) {
+		rte_free(hw->acl_prof[fltr_ptype]);
+		hw->acl_prof[fltr_ptype] = NULL;
+	}
+
+	rte_free(hw->acl_prof);
+	hw->acl_prof = NULL;
+
+	return -ENOMEM;
+}
+
+/**
+ * ice_acl_setup - Reserve and initialize the ACL resources
+ * @pf: board private structure
+ */
+static int
+ice_acl_setup(struct ice_pf *pf)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	u32 pf_num = hw->dev_caps.num_funcs;
+	struct ice_acl_tbl_params params;
+	u16 scen_id;
+	int err = 0;
+
+	memset(&params, 0, sizeof(params));
+
+	/* create for IPV4 table */
+	if (pf_num < 4)
+		params.width = ICE_AQC_ACL_KEY_WIDTH_BYTES * 6;
+	else
+		params.width = ICE_AQC_ACL_KEY_WIDTH_BYTES * 3;
+
+	params.depth = ICE_AQC_ACL_TCAM_DEPTH;
+	params.entry_act_pairs = 1;
+	params.concurr = false;
+
+	err = ice_acl_create_tbl(hw, &params);
+	if (err)
+		return err;
+
+	err = ice_acl_create_scen(hw, params.width, params.depth,
+				  &scen_id);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+/**
+ * ice_deinit_acl - Unroll the initialization of the ACL block
+ * @pf: ptr to PF device
+ *
+ * returns 0 on success, negative on error
+ */
+static void ice_deinit_acl(struct ice_pf *pf)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+	ice_acl_destroy_tbl(hw);
+
+	rte_free(hw->acl_tbl);
+	hw->acl_tbl = NULL;
+}
+
+/**
+ * ice_acl_erase_flow_from_hw - Remove a flow from the HW profile tables
+ * @hw: hardware structure containing the filter list
+ * @flow_type: flow type to release
+ */
+static void
+ice_acl_erase_flow_from_hw(struct ice_hw *hw, enum ice_fltr_ptype flow_type)
+{
+	int tun;
+
+	if (!hw->acl_prof || !hw->acl_prof[flow_type])
+		return;
+
+	struct ice_fd_hw_prof *prof = hw->acl_prof[flow_type];
+	for (tun = 0; tun < ICE_FD_HW_SEG_TUN; tun++) {
+		uint64_t prof_id;
+		int j;
+
+		prof_id = flow_type + tun * ICE_FLTR_PTYPE_MAX;
+		for (j = 0; j < prof->cnt; j++) {
+			uint16_t vsi_num;
+
+			if (!prof->entry_h[j][tun] && !prof->vsi_h[j])
+				continue;
+			vsi_num = ice_get_hw_vsi_num(hw, prof->vsi_h[j]);
+			ice_rem_prof_id_flow(hw, ICE_BLK_ACL, vsi_num, prof_id);
+			ice_flow_rem_entry(hw, ICE_BLK_ACL,
+					   prof->entry_h[j][tun]);
+			prof->entry_h[j][tun] = 0;
+		}
+		ice_flow_rem_prof(hw, ICE_BLK_ACL, prof_id);
+	}
+}
+
+/**
+ * ice_acl_rem_flow - Release the ice_flow structures for a filter type
+ * @hw: hardware structure containing the filter list
+ * @flow_type: flow type to release
+ */
+static void ice_acl_rem_flow(struct ice_hw *hw, enum ice_fltr_ptype flow_type)
+{
+	struct ice_fd_hw_prof *prof;
+	int tun, i;
+
+	if (!hw->acl_prof || !hw->acl_prof[flow_type])
+		return;
+
+	prof = hw->acl_prof[flow_type];
+
+	ice_acl_erase_flow_from_hw(hw, flow_type);
+	for (i = 0; i < prof->cnt; i++)
+		prof->vsi_h[i] = 0;
+	for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
+		if (!prof->fdir_seg[tun])
+			continue;
+		rte_free(prof->fdir_seg[tun]);
+		prof->fdir_seg[tun] = NULL;
+	}
+	prof->cnt = 0;
+}
+
+static int
+acl_prof_helper_function(struct ice_hw *hw, struct ice_flow_seg_info *seg,
+			 bool is_l4, uint16_t src_port, uint16_t dst_port)
+{
+	uint16_t val_loc, mask_loc;
+
+	if (hw->dev_caps.num_funcs < 4) {
+		/* mac source address */
+		val_loc = offsetof(struct ice_fdir_fltr,
+				   ext_data.src_mac);
+		mask_loc = offsetof(struct ice_fdir_fltr,
+				    ext_mask.src_mac);
+		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_SA,
+				 val_loc, mask_loc,
+				 ICE_FLOW_FLD_OFF_INVAL, false);
+
+		/* mac destination address */
+		val_loc = offsetof(struct ice_fdir_fltr,
+				   ext_data.dst_mac);
+		mask_loc = offsetof(struct ice_fdir_fltr,
+				    ext_mask.dst_mac);
+		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_DA,
+				 val_loc, mask_loc,
+				 ICE_FLOW_FLD_OFF_INVAL, false);
+	}
+
+	/* IP source address */
+	val_loc = offsetof(struct ice_fdir_fltr, ip.v4.src_ip);
+	mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.src_ip);
+	ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA, val_loc,
+			 mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
+
+	/* IP destination address */
+	val_loc = offsetof(struct ice_fdir_fltr, ip.v4.dst_ip);
+	mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.dst_ip);
+	ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA, val_loc,
+			 mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
+
+	if (is_l4) {
+		/* Layer 4 source port */
+		val_loc = offsetof(struct ice_fdir_fltr, ip.v4.src_port);
+		mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.src_port);
+		ice_flow_set_fld(seg, src_port, val_loc,
+				 mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
+
+		/* Layer 4 destination port */
+		val_loc = offsetof(struct ice_fdir_fltr, ip.v4.dst_port);
+		mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.dst_port);
+		ice_flow_set_fld(seg, dst_port, val_loc,
+				 mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
+	}
+
+	return 0;
+};
+
+/**
+ * ice_acl_prof_init - Initialize ACL profile
+ * @pf: ice PF structure
+ *
+ * Returns 0 on success.
+ */
+static int
+ice_acl_prof_init(struct ice_pf *pf)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	struct ice_flow_prof *prof_1 = NULL;
+	struct ice_flow_prof *prof_2 = NULL;
+	struct ice_flow_prof *prof_3 = NULL;
+	struct ice_flow_prof *prof_4 = NULL;
+	struct ice_flow_seg_info *seg_1, *seg_2, *seg_3, *seg_4;
+	int i;
+
+	seg_1 = (struct ice_flow_seg_info *)
+		 ice_malloc(hw, sizeof(*seg_1));
+
+	seg_2 = (struct ice_flow_seg_info *)
+		 ice_malloc(hw, sizeof(*seg_2));
+
+	seg_3 = (struct ice_flow_seg_info *)
+		 ice_malloc(hw, sizeof(*seg_3));
+
+	seg_4 = (struct ice_flow_seg_info *)
+		 ice_malloc(hw, sizeof(*seg_4));
+
+	ICE_FLOW_SET_HDRS(seg_1, ICE_FLOW_SEG_HDR_IPV4);
+	acl_prof_helper_function(hw, seg_1, false, 0, 0);
+	ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
+			  ICE_FLTR_PTYPE_NONF_IPV4_OTHER,
+			  seg_1, 1, NULL, 0, &prof_1);
+
+	ICE_FLOW_SET_HDRS(seg_2, ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
+	acl_prof_helper_function(hw, seg_2, true,
+				 ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
+				 ICE_FLOW_FIELD_IDX_UDP_DST_PORT);
+	ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
+			  ICE_FLTR_PTYPE_NONF_IPV4_UDP,
+			  seg_2, 1, NULL, 0, &prof_2);
+
+	ICE_FLOW_SET_HDRS(seg_3, ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
+	acl_prof_helper_function(hw, seg_3, true,
+				 ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
+				 ICE_FLOW_FIELD_IDX_TCP_DST_PORT);
+	ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
+			  ICE_FLTR_PTYPE_NONF_IPV4_TCP,
+			  seg_3, 1, NULL, 0, &prof_3);
+
+	ICE_FLOW_SET_HDRS(seg_4, ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
+	acl_prof_helper_function(hw, seg_4, true,
+				 ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
+				 ICE_FLOW_FIELD_IDX_SCTP_DST_PORT);
+	ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
+			  ICE_FLTR_PTYPE_NONF_IPV4_SCTP,
+			  seg_4, 1, NULL, 0, &prof_4);
+
+	for (i = 0; i < pf->main_vsi->idx; i++) {
+		ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_1, i);
+		ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_2, i);
+		ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_3, i);
+		ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_4, i);
+	}
+
+	return 0;
+}
+
+/**
+ * ice_acl_set_input_set - Helper function to set the input set for ACL
+ * @hw: pointer to HW instance
+ * @filter: pointer to ACL info
+ * @input: filter structure
+ *
+ * Return error value or 0 on success.
+ */
+static int
+ice_acl_set_input_set(struct ice_acl_conf *filter, struct ice_fdir_fltr *input)
+{
+	if (!input)
+		return ICE_ERR_BAD_PTR;
+
+	input->q_index = filter->input.q_index;
+	input->dest_vsi = filter->input.dest_vsi;
+	input->dest_ctl = filter->input.dest_ctl;
+	input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID;
+	input->flow_type = filter->input.flow_type;
+
+	switch (input->flow_type) {
+	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+	case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+		input->ip.v4.dst_port = filter->input.ip.v4.dst_port;
+		input->ip.v4.src_port = filter->input.ip.v4.src_port;
+		input->ip.v4.dst_ip = filter->input.ip.v4.dst_ip;
+		input->ip.v4.src_ip = filter->input.ip.v4.src_ip;
+
+		input->mask.v4.dst_port = filter->input.mask.v4.dst_port;
+		input->mask.v4.src_port = filter->input.mask.v4.src_port;
+		input->mask.v4.dst_ip = filter->input.mask.v4.dst_ip;
+		input->mask.v4.src_ip = filter->input.mask.v4.src_ip;
+
+		rte_memcpy(&input->ext_data.src_mac,
+			   &filter->input.ext_data.src_mac,
+			   RTE_ETHER_ADDR_LEN);
+		rte_memcpy(&input->ext_mask.src_mac,
+			   &filter->input.ext_mask.src_mac,
+			   RTE_ETHER_ADDR_LEN);
+
+		rte_memcpy(&input->ext_data.dst_mac,
+			   &filter->input.ext_data.dst_mac,
+			   RTE_ETHER_ADDR_LEN);
+		rte_memcpy(&input->ext_mask.dst_mac,
+			   &filter->input.ext_mask.dst_mac,
+			   RTE_ETHER_ADDR_LEN);
+
+		break;
+	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+		rte_memcpy(&input->ip.v4, &filter->input.ip.v4,
+			   sizeof(struct ice_fdir_v4));
+		rte_memcpy(&input->mask.v4, &filter->input.mask.v4,
+			   sizeof(struct ice_fdir_v4));
+
+		rte_memcpy(&input->ext_data.src_mac,
+			   &filter->input.ext_data.src_mac,
+			   RTE_ETHER_ADDR_LEN);
+		rte_memcpy(&input->ext_mask.src_mac,
+			   &filter->input.ext_mask.src_mac,
+			   RTE_ETHER_ADDR_LEN);
+
+		rte_memcpy(&input->ext_data.dst_mac,
+			   &filter->input.ext_data.dst_mac,
+			   RTE_ETHER_ADDR_LEN);
+		rte_memcpy(&input->ext_mask.dst_mac,
+			   &filter->input.ext_mask.dst_mac,
+			   RTE_ETHER_ADDR_LEN);
+
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+struct acl_rule {
+	enum ice_fltr_ptype flow_type;
+	uint32_t entry_id[4];
+	struct ice_fdir_fltr *input;
+};
+
+/* Allocate slot_id from bitmap table. */
+static inline uint32_t
+ice_acl_alloc_slot_id(struct rte_bitmap *slots, struct rte_flow_error *error)
+{
+	uint32_t pos = 0;
+	uint64_t slab = 0;
+	uint32_t i = 0;
+
+	__rte_bitmap_scan_init(slots);
+	if (!rte_bitmap_scan(slots, &pos, &slab)) {
+		rte_flow_error_set(error, ENOMEM,
+			   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			   "Failed with init acl bitmap.");
+		return -rte_errno;
+	}
+	i = rte_bsf64(slab);
+	pos += i;
+	rte_bitmap_clear(slots, pos);
+
+	return pos;
+}
+
+static int
+ice_acl_create_filter(struct ice_adapter *ad,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	struct ice_pf *pf = &ad->pf;
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	struct ice_acl_conf *filter = meta;
+	struct acl_rule *rule;
+	struct ice_fdir_fltr *input;
+	struct ice_flow_action acts[1];
+	int act_cnt, ret;
+	uint32_t slot_id;
+	enum ice_block blk = ICE_BLK_ACL;
+	enum ice_fltr_ptype flow_type = filter->input.flow_type;
+	uint64_t entry_id;
+	uint64_t entry_0, entry_1, entry_2, entry_3, entry_4, entry_5, entry_6;
+
+	rule = rte_zmalloc("acl_rule", sizeof(*rule), 0);
+	input = rte_zmalloc("acl_entry", sizeof(*input), 0);
+	if (!input) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return -rte_errno;
+	}
+
+	ret = ice_acl_set_input_set(filter, input);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "failed to set input set.");
+	}
+
+	act_cnt = 1;
+	if (filter->input.dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) {
+		acts[0].type = ICE_FLOW_ACT_DROP;
+		acts[0].data.acl_act.mdid = ICE_MDID_RX_PKT_DROP;
+		acts[0].data.acl_act.prio = 0x3;
+		acts[0].data.acl_act.value = CPU_TO_LE16(0x1);
+	}
+
+	input->acl_fltr = true;
+
+	switch (flow_type) {
+	/* For IPV4_OTHER type, should add entry for all types. */
+	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+		slot_id = ice_acl_alloc_slot_id(pf->slots, error);
+		entry_id = ((uint64_t)(ICE_FLTR_PTYPE_NONF_IPV4_OTHER) << 32) |
+			     slot_id;
+		ice_flow_add_entry(hw, blk,
+				   ICE_FLTR_PTYPE_NONF_IPV4_OTHER,
+				   entry_id, pf->main_vsi->idx,
+				   ICE_FLOW_PRIO_NORMAL, input, acts,
+				   act_cnt, &entry_0);
+		rule->entry_id[0] = slot_id;
+		pf->hw_entry_id[slot_id] = entry_0;
+
+		slot_id = ice_acl_alloc_slot_id(pf->slots, error);
+		entry_id = ((uint64_t)(ICE_FLTR_PTYPE_NONF_IPV4_UDP) << 32) |
+			     slot_id;
+		ice_flow_add_entry(hw, blk,
+				   ICE_FLTR_PTYPE_NONF_IPV4_UDP,
+				   entry_id, pf->main_vsi->idx,
+				   ICE_FLOW_PRIO_NORMAL, input, acts,
+				   act_cnt, &entry_1);
+		rule->entry_id[1] = slot_id;
+		pf->hw_entry_id[slot_id] = entry_1;
+
+		slot_id = ice_acl_alloc_slot_id(pf->slots, error);
+		entry_id = ((uint64_t)(ICE_FLTR_PTYPE_NONF_IPV4_TCP) << 32) |
+			     slot_id;
+		ice_flow_add_entry(hw, blk,
+				   ICE_FLTR_PTYPE_NONF_IPV4_TCP,
+				   entry_id, pf->main_vsi->idx,
+				   ICE_FLOW_PRIO_NORMAL, input, acts,
+				   act_cnt, &entry_2);
+		rule->entry_id[2] = slot_id;
+		pf->hw_entry_id[slot_id] = entry_2;
+
+		slot_id = ice_acl_alloc_slot_id(pf->slots, error);
+		entry_id = ((uint64_t)(ICE_FLTR_PTYPE_NONF_IPV4_SCTP) << 32) |
+			     slot_id;
+		ice_flow_add_entry(hw, blk,
+				   ICE_FLTR_PTYPE_NONF_IPV4_SCTP,
+				   entry_id, pf->main_vsi->idx,
+				   ICE_FLOW_PRIO_NORMAL, input, acts,
+				   act_cnt, &entry_3);
+		rule->entry_id[3] = slot_id;
+		pf->hw_entry_id[slot_id] = entry_3;
+		break;
+	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+		slot_id = ice_acl_alloc_slot_id(pf->slots, error);
+		entry_id = ((uint64_t)flow_type << 32) | slot_id;
+		ice_flow_add_entry(hw, blk,
+				   ICE_FLTR_PTYPE_NONF_IPV4_UDP,
+				   entry_id, pf->main_vsi->idx,
+				   ICE_FLOW_PRIO_NORMAL, input, acts,
+				   act_cnt, &entry_4);
+		rule->entry_id[0] = slot_id;
+		pf->hw_entry_id[slot_id] = entry_4;
+		break;
+	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+		slot_id = ice_acl_alloc_slot_id(pf->slots, error);
+		entry_id = ((uint64_t)flow_type << 32) | slot_id;
+		ice_flow_add_entry(hw, blk,
+				   ICE_FLTR_PTYPE_NONF_IPV4_TCP,
+				   entry_id, pf->main_vsi->idx,
+				   ICE_FLOW_PRIO_NORMAL, input, acts,
+				   act_cnt, &entry_5);
+		rule->entry_id[0] = slot_id;
+		pf->hw_entry_id[slot_id] = entry_5;
+		break;
+	case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+		slot_id = ice_acl_alloc_slot_id(pf->slots, error);
+		entry_id = ((uint64_t)flow_type << 32) | slot_id;
+		ice_flow_add_entry(hw, blk,
+				   ICE_FLTR_PTYPE_NONF_IPV4_SCTP,
+				   entry_id, pf->main_vsi->idx,
+				   ICE_FLOW_PRIO_NORMAL, input, acts,
+				   act_cnt, &entry_6);
+		rule->entry_id[0] = slot_id;
+		pf->hw_entry_id[slot_id] = entry_6;
+		break;
+	default:
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "error flow type");
+		break;
+	}
+	rule->flow_type = flow_type;
+	rule->input = input;
+	flow->rule = (void *)rule;
+
+	return 0;
+}
+
+static int
+ice_acl_destroy_filter(struct ice_adapter *ad,
+		struct rte_flow *flow,
+		struct rte_flow_error *error __rte_unused)
+{
+	struct acl_rule *rule = (struct acl_rule *)flow->rule;
+	struct ice_pf *pf = &ad->pf;
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	int ret = 0;
+	uint32_t slot_id0, slot_id1, slot_id2, slot_id3;
+
+	switch (rule->flow_type) {
+	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+		slot_id0 = rule->entry_id[0];
+		rte_bitmap_set(pf->slots, slot_id0);
+		ice_flow_rem_entry(hw, ICE_BLK_ACL, pf->hw_entry_id[slot_id0]);
+
+		slot_id1 = rule->entry_id[1];
+		rte_bitmap_set(pf->slots, slot_id1);
+		ice_flow_rem_entry(hw, ICE_BLK_ACL, pf->hw_entry_id[slot_id1]);
+
+		slot_id2 = rule->entry_id[2];
+		rte_bitmap_set(pf->slots, slot_id2);
+		ice_flow_rem_entry(hw, ICE_BLK_ACL, pf->hw_entry_id[slot_id2]);
+
+		slot_id3 = rule->entry_id[3];
+		rte_bitmap_set(pf->slots, slot_id3);
+		ice_flow_rem_entry(hw, ICE_BLK_ACL, pf->hw_entry_id[slot_id3]);
+
+		ice_acl_rem_flow(hw, rule->flow_type);
+		break;
+
+	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+	case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+		slot_id0 = rule->entry_id[0];
+		rte_bitmap_set(pf->slots, slot_id0);
+		ice_flow_rem_entry(&ad->hw, ICE_BLK_ACL,
+				   pf->hw_entry_id[slot_id0]);
+		ice_acl_rem_flow(hw, rule->flow_type);
+		break;
+	default:
+		break;
+	}
+
+	rte_free(flow->rule);
+	flow->rule = NULL;
+	return ret;
+}
+
+static void
+ice_acl_filter_free(struct rte_flow *flow)
+{
+	rte_free(flow->rule);
+}
+
+static int
+ice_acl_parse_action(__rte_unused struct ice_adapter *ad,
+		     const struct rte_flow_action actions[],
+		     struct rte_flow_error *error,
+		     struct ice_acl_conf *filter)
+{
+	uint32_t dest_num = 0;
+
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			dest_num++;
+
+			filter->input.dest_ctl =
+				ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Invalid action.");
+			return -rte_errno;
+		}
+	}
+
+	if (dest_num == 0 || dest_num >= 2) {
+		rte_flow_error_set(error, EINVAL,
+			   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Unsupported action combination");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+ice_acl_parse_pattern(__rte_unused struct ice_adapter *ad,
+		       const struct rte_flow_item pattern[],
+		       struct rte_flow_error *error,
+		       struct ice_acl_conf *filter)
+{
+	const struct rte_flow_item *item = pattern;
+	enum rte_flow_item_type item_type;
+	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	uint64_t input_set = ICE_INSET_NONE;
+	uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
+
+	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		item_type = item->type;
+
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+
+			if (eth_spec && eth_mask) {
+				if (rte_is_broadcast_ether_addr(&eth_mask->dst) ||
+				    rte_is_broadcast_ether_addr(&eth_mask->src)) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"Invalid mac addr mask");
+					return -rte_errno;
+				}
+
+				if (!rte_is_zero_ether_addr(&eth_spec->src) &&
+				    !rte_is_zero_ether_addr(&eth_mask->src)) {
+					input_set |= ICE_INSET_SMAC;
+					rte_memcpy(&filter->input.ext_data.src_mac,
+						   &eth_spec->src,
+						   RTE_ETHER_ADDR_LEN);
+					rte_memcpy(&filter->input.ext_mask.src_mac,
+						   &eth_mask->src,
+						   RTE_ETHER_ADDR_LEN);
+				}
+
+				if (!rte_is_zero_ether_addr(&eth_spec->dst) &&
+				    !rte_is_zero_ether_addr(&eth_mask->dst)) {
+					input_set |= ICE_INSET_DMAC;
+					rte_memcpy(&filter->input.ext_data.dst_mac,
+						   &eth_spec->dst,
+						   RTE_ETHER_ADDR_LEN);
+					rte_memcpy(&filter->input.ext_mask.dst_mac,
+						   &eth_mask->dst,
+						   RTE_ETHER_ADDR_LEN);
+				}
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+
+			if (ipv4_spec && ipv4_mask) {
+				/* Check IPv4 mask and update input set */
+				if (ipv4_mask->hdr.version_ihl ||
+				    ipv4_mask->hdr.total_length ||
+				    ipv4_mask->hdr.packet_id ||
+				    ipv4_mask->hdr.fragment_offset ||
+				    ipv4_mask->hdr.hdr_checksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"Invalid IPv4 mask.");
+					return -rte_errno;
+				}
+
+				if ((ipv4_mask->hdr.src_addr == UINT32_MAX) ||
+				    (ipv4_mask->hdr.dst_addr == UINT32_MAX)) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"Invalid IPv4 mask.");
+					return -rte_errno;
+				}
+
+				if ((ipv4_spec->hdr.src_addr != 0) &&
+				    (ipv4_mask->hdr.src_addr != 0)) {
+					filter->input.ip.v4.src_ip =
+						ipv4_spec->hdr.src_addr;
+					filter->input.mask.v4.src_ip =
+						ipv4_mask->hdr.src_addr;
+
+					input_set |= ICE_INSET_IPV4_SRC;
+				}
+
+				if ((ipv4_spec->hdr.dst_addr != 0) &&
+				    (ipv4_mask->hdr.dst_addr != 0)) {
+					filter->input.ip.v4.dst_ip =
+						ipv4_spec->hdr.dst_addr;
+					filter->input.mask.v4.dst_ip =
+						ipv4_mask->hdr.dst_addr;
+
+					input_set |= ICE_INSET_IPV4_DST;
+				}
+			}
+
+			flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+
+			if (tcp_spec && tcp_mask) {
+				/* Check TCP mask and update input set */
+				if (tcp_mask->hdr.sent_seq ||
+				    tcp_mask->hdr.recv_ack ||
+				    tcp_mask->hdr.data_off ||
+				    tcp_mask->hdr.tcp_flags ||
+				    tcp_mask->hdr.rx_win ||
+				    tcp_mask->hdr.cksum ||
+				    tcp_mask->hdr.tcp_urp) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"Invalid TCP mask");
+					return -rte_errno;
+				}
+
+				if ((tcp_mask->hdr.src_port == UINT16_MAX) ||
+				    (tcp_mask->hdr.dst_port == UINT16_MAX)) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"Invalid TCP mask");
+					return -rte_errno;
+				}
+
+				if ((tcp_mask->hdr.src_port == UINT16_MAX) ||
+				    (tcp_mask->hdr.dst_port == UINT16_MAX)) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"Invalid TCP mask");
+					return -rte_errno;
+				}
+
+				if ((l3 == RTE_FLOW_ITEM_TYPE_IPV4) &&
+				    (tcp_spec->hdr.src_port != 0)) {
+					input_set |= ICE_INSET_TCP_SRC_PORT;
+					filter->input.ip.v4.src_port =
+						tcp_spec->hdr.src_port;
+					filter->input.mask.v4.src_port =
+						tcp_mask->hdr.src_port;
+				}
+
+				if ((l3 == RTE_FLOW_ITEM_TYPE_IPV4) &&
+				    (tcp_spec->hdr.dst_port != 0)) {
+					input_set |= ICE_INSET_TCP_DST_PORT;
+					filter->input.ip.v4.dst_port =
+						tcp_spec->hdr.dst_port;
+					filter->input.mask.v4.dst_port =
+						tcp_mask->hdr.dst_port;
+				}
+
+				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+					flow_type =
+						ICE_FLTR_PTYPE_NONF_IPV4_TCP;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			if (udp_spec && udp_mask) {
+				/* Check UDP mask and update input set*/
+				if (udp_mask->hdr.dgram_len ||
+				    udp_mask->hdr.dgram_cksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"Invalid UDP mask");
+					return -rte_errno;
+				}
+
+				if ((udp_mask->hdr.src_port == UINT16_MAX) ||
+				    (udp_mask->hdr.dst_port == UINT16_MAX)) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"Invalid UDP mask");
+					return -rte_errno;
+				}
+
+				if ((l3 == RTE_FLOW_ITEM_TYPE_IPV4) &&
+				    (udp_spec->hdr.src_port != 0)) {
+					input_set |= ICE_INSET_UDP_SRC_PORT;
+					filter->input.ip.v4.src_port =
+						udp_spec->hdr.src_port;
+					filter->input.mask.v4.src_port =
+						udp_mask->hdr.src_port;
+				}
+
+				if ((l3 == RTE_FLOW_ITEM_TYPE_IPV4) &&
+				    (udp_spec->hdr.dst_port != 0)) {
+					input_set |= ICE_INSET_UDP_DST_PORT;
+					filter->input.ip.v4.dst_port =
+						udp_spec->hdr.dst_port;
+					filter->input.mask.v4.dst_port =
+						udp_mask->hdr.dst_port;
+				}
+
+				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+					flow_type =
+						ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+
+			if (sctp_spec && sctp_mask) {
+				if ((sctp_mask->hdr.src_port == UINT16_MAX) ||
+				    (sctp_mask->hdr.dst_port == UINT16_MAX)) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"Invalid SCTP mask");
+					return -rte_errno;
+				}
+
+				if ((l3 == RTE_FLOW_ITEM_TYPE_IPV4) &&
+				    (sctp_spec->hdr.src_port != 0)) {
+					input_set |= ICE_INSET_SCTP_SRC_PORT;
+					filter->input.ip.v4.src_port =
+						sctp_spec->hdr.src_port;
+					filter->input.mask.v4.src_port =
+						sctp_mask->hdr.src_port;
+				}
+
+				if ((l3 == RTE_FLOW_ITEM_TYPE_IPV4) &&
+				    (sctp_spec->hdr.dst_port != 0)) {
+					input_set |= ICE_INSET_SCTP_DST_PORT;
+					filter->input.ip.v4.dst_port =
+						sctp_spec->hdr.dst_port;
+					filter->input.mask.v4.dst_port =
+						sctp_mask->hdr.dst_port;
+				}
+
+				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+					flow_type =
+						ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_VOID:
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item,
+				"Invalid pattern item.");
+			return -rte_errno;
+		}
+	}
+
+	filter->input.flow_type = flow_type;
+	filter->input_set = input_set;
+
+	return 0;
+}
+
+static int
+ice_acl_parse(struct ice_adapter *ad,
+	       struct ice_pattern_match_item *array,
+	       uint32_t array_len,
+	       const struct rte_flow_item pattern[],
+	       const struct rte_flow_action actions[],
+	       void **meta,
+	       struct rte_flow_error *error)
+{
+	struct ice_pf *pf = &ad->pf;
+	struct ice_acl_conf *filter = &pf->acl.conf;
+	struct ice_pattern_match_item *item = NULL;
+	uint64_t input_set;
+	int ret;
+
+	memset(filter, 0, sizeof(*filter));
+	item = ice_search_pattern_match_item(pattern, array, array_len, error);
+	if (!item)
+		return -rte_errno;
+
+	ret = ice_acl_parse_pattern(ad, pattern, error, filter);
+	if (ret)
+		goto error;
+	input_set = filter->input_set;
+	if (!input_set || input_set & ~item->input_set_mask) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+				   pattern,
+				   "Invalid input set");
+		ret = -rte_errno;
+		goto error;
+	}
+
+	ret = ice_acl_parse_action(ad, actions, error, filter);
+	if (ret)
+		goto error;
+
+	if (meta)
+		*meta = filter;
+
+error:
+	rte_free(item);
+	return ret;
+}
+
+static int
+ice_acl_bitmap_init(struct ice_pf *pf)
+{
+	uint32_t bmp_size;
+	void *mem = NULL;
+	struct rte_bitmap *slots;
+	bmp_size = rte_bitmap_get_memory_footprint(MAX_ACL_SLOTS_ID);
+	mem = rte_zmalloc("create_acl_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
+	if (mem == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory for acl bitmap.");
+		return -rte_errno;
+	}
+
+	slots = rte_bitmap_init_with_all_set(MAX_ACL_SLOTS_ID, mem, bmp_size);
+	if (slots == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to allocate memory for acl bitmap.");
+		return -rte_errno;
+	}
+	pf->slots = slots;
+
+	return 0;
+}
+
+static int
+ice_acl_init(struct ice_adapter *ad)
+{
+	int ret = 0;
+	struct ice_pf *pf = &ad->pf;
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	struct ice_flow_parser *parser = &ice_acl_parser;
+
+	ret = ice_acl_prof_alloc(hw);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Cannot allocate memory for "
+			    "ACL profile.");
+		return -ENOMEM;
+	}
+
+	ret = ice_acl_setup(pf);
+	if (ret)
+		return ret;
+
+	ret = ice_acl_bitmap_init(pf);
+	if (ret)
+		return ret;
+
+	ret = ice_acl_prof_init(pf);
+	if (ret)
+		return ret;
+
+	return ice_register_parser(parser, ad);
+}
+
+static void
+ice_acl_prof_free(struct ice_hw *hw)
+{
+	enum ice_fltr_ptype ptype;
+
+	for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
+	     ptype < ICE_FLTR_PTYPE_MAX; ptype++) {
+		rte_free(hw->acl_prof[ptype]);
+		hw->acl_prof[ptype] = NULL;
+	}
+
+	rte_free(hw->acl_prof);
+	hw->acl_prof = NULL;
+}
+
+static void
+ice_acl_uninit(struct ice_adapter *ad)
+{
+	struct ice_pf *pf = &ad->pf;
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	struct ice_flow_parser *parser = &ice_acl_parser;
+
+	ice_unregister_parser(parser, ad);
+
+	ice_deinit_acl(pf);
+	ice_acl_prof_free(hw);
+}
+
+static struct
+ice_flow_engine ice_acl_engine = {
+	.init = ice_acl_init,
+	.uninit = ice_acl_uninit,
+	.create = ice_acl_create_filter,
+	.destroy = ice_acl_destroy_filter,
+	.free = ice_acl_filter_free,
+	.type = ICE_FLOW_ENGINE_ACL,
+};
+
+static struct
+ice_flow_parser ice_acl_parser = {
+	.engine = &ice_acl_engine,
+	.array = ice_acl_pattern,
+	.array_len = RTE_DIM(ice_acl_pattern),
+	.parse_pattern_action = ice_acl_parse,
+	.stage = ICE_FLOW_STAGE_DISTRIBUTOR,
+};
+
+RTE_INIT(ice_acl_engine_init)
+{
+	struct ice_flow_engine *engine = &ice_acl_engine;
+	ice_register_flow_engine(engine);
+}
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 37b956e..38de4c6 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -49,6 +49,8 @@ 
 #define ICE_PKG_FILE_SEARCH_PATH_UPDATES "/lib/firmware/updates/intel/ice/ddp/"
 #define ICE_MAX_PKG_FILENAME_SIZE   256
 
+#define MAX_ACL_ENTRIES	512
+
 /**
  * vlan_id is a 12 bit number.
  * The VFTA array is actually a 4096 bit array, 128 of 32bit elements.
@@ -398,6 +400,18 @@  struct ice_hash_gtpu_ctx {
 	struct ice_hash_cfg ipv6_tcp;
 };
 
+struct ice_acl_conf {
+	struct ice_fdir_fltr input;
+	uint64_t input_set;
+};
+
+/**
+ * A structure used to define fields of ACL related info.
+ */
+struct ice_acl_info {
+	struct ice_acl_conf conf;
+};
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -421,6 +435,7 @@  struct ice_pf {
 	uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
 	uint16_t fdir_qp_offset;
 	struct ice_fdir_info fdir; /* flow director info */
+	struct ice_acl_info acl; /* ACL info */
 	struct ice_hash_gtpu_ctx gtpu_hash_ctx;
 	uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
 	uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
@@ -440,6 +455,8 @@  struct ice_pf {
 	uint64_t old_rx_bytes;
 	uint64_t old_tx_bytes;
 	uint64_t supported_rxdid; /* bitmap for supported RXDID */
+	struct rte_bitmap *slots;
+	uint64_t hw_entry_id[MAX_ACL_ENTRIES];
 };
 
 #define ICE_MAX_QUEUE_NUM  2048
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 54b0316..1429cbc 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -1896,6 +1896,8 @@  ice_register_parser(struct ice_flow_parser *parser,
 			TAILQ_INSERT_TAIL(list, parser_node, node);
 		else if (parser->engine->type == ICE_FLOW_ENGINE_FDIR)
 			TAILQ_INSERT_HEAD(list, parser_node, node);
+		else if (parser->engine->type == ICE_FLOW_ENGINE_ACL)
+			TAILQ_INSERT_HEAD(list, parser_node, node);
 		else
 			return -EINVAL;
 	}
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 99e1b77..254595a 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -10,7 +10,8 @@  sources = files(
 	'ice_switch_filter.c',
 	'ice_generic_flow.c',
 	'ice_fdir_filter.c',
-	'ice_hash.c'
+	'ice_hash.c',
+	'ice_acl_filter.c'
 	)
 
 deps += ['hash', 'net', 'common_iavf']