get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/58712/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 58712,
    "url": "http://patches.dpdk.org/api/patches/58712/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20190906120058.108073-2-yahui.cao@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20190906120058.108073-2-yahui.cao@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20190906120058.108073-2-yahui.cao@intel.com",
    "date": "2019-09-06T12:00:47",
    "name": "[01/12] net/ice: initialize and set up flow director",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "4f7c00d31d9fd36e7d14ecdfd3cc21cdf4c581db",
    "submitter": {
        "id": 1176,
        "url": "http://patches.dpdk.org/api/people/1176/?format=api",
        "name": "Cao, Yahui",
        "email": "yahui.cao@intel.com"
    },
    "delegate": {
        "id": 31221,
        "url": "http://patches.dpdk.org/api/users/31221/?format=api",
        "username": "yexl",
        "first_name": "xiaolong",
        "last_name": "ye",
        "email": "xiaolong.ye@intel.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20190906120058.108073-2-yahui.cao@intel.com/mbox/",
    "series": [
        {
            "id": 6273,
            "url": "http://patches.dpdk.org/api/series/6273/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=6273",
            "date": "2019-09-06T12:00:46",
            "name": "net/ice: add ice Flow Director driver",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/6273/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/58712/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/58712/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 5F6D41F0E1;\n\tFri,  6 Sep 2019 06:16:18 +0200 (CEST)",
            "from mga02.intel.com (mga02.intel.com [134.134.136.20])\n\tby dpdk.org (Postfix) with ESMTP id 0324A1F0E7\n\tfor <dev@dpdk.org>; Fri,  6 Sep 2019 06:16:15 +0200 (CEST)",
            "from fmsmga003.fm.intel.com ([10.253.24.29])\n\tby orsmga101.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t05 Sep 2019 21:16:15 -0700",
            "from dpdk-yahui-skylake.sh.intel.com ([10.67.119.16])\n\tby FMSMGA003.fm.intel.com with ESMTP; 05 Sep 2019 21:16:12 -0700"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.64,472,1559545200\"; d=\"scan'208\";a=\"190709691\"",
        "From": "Yahui Cao <yahui.cao@intel.com>",
        "To": "Qiming Yang <qiming.yang@intel.com>,\n\tWenzhuo Lu <wenzhuo.lu@intel.com>",
        "Cc": "dev@dpdk.org, Qi Zhang <qi.z.zhang@intel.com>,\n\tXiaolong Ye <xiaolong.ye@intel.com>,\n\tBeilei Xing <beilei.xing@intel.com>, Yahui Cao <yahui.cao@intel.com>",
        "Date": "Fri,  6 Sep 2019 20:00:47 +0800",
        "Message-Id": "<20190906120058.108073-2-yahui.cao@intel.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20190906120058.108073-1-yahui.cao@intel.com>",
        "References": "<20190906120058.108073-1-yahui.cao@intel.com>",
        "Subject": "[dpdk-dev] [dpdk-dev 01/12] net/ice: initialize and set up flow\n\tdirector",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Beilei Xing <beilei.xing@intel.com>\n\nEnable flow director, include:\n - Create control VSI\n - Queue pair allocated and set up\n - Programming packet\n\nSigned-off-by: Beilei Xing <beilei.xing@intel.com>\n---\n drivers/net/ice/Makefile          |   1 +\n drivers/net/ice/ice_ethdev.c      | 107 +++++--\n drivers/net/ice/ice_ethdev.h      |  19 ++\n drivers/net/ice/ice_fdir_filter.c | 139 +++++++++\n drivers/net/ice/ice_rxtx.c        | 448 ++++++++++++++++++++++++++++++\n drivers/net/ice/ice_rxtx.h        |   7 +\n drivers/net/ice/meson.build       |   3 +-\n 7 files changed, 704 insertions(+), 20 deletions(-)\n create mode 100644 drivers/net/ice/ice_fdir_filter.c",
    "diff": "diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile\nindex ae53c2646..cbbd03fcf 100644\n--- a/drivers/net/ice/Makefile\n+++ b/drivers/net/ice/Makefile\n@@ -62,6 +62,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c\n endif\n \n SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c\n+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_fdir_filter.c\n ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)\n \tCC_AVX2_SUPPORT=1\n else\ndiff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c\nindex 647aca3ed..cb32f08df 100644\n--- a/drivers/net/ice/ice_ethdev.c\n+++ b/drivers/net/ice/ice_ethdev.c\n@@ -1097,11 +1097,20 @@ ice_pf_sw_init(struct rte_eth_dev *dev)\n \t\t\t\t  hw->func_caps.common_cap.num_rxq);\n \n \tpf->lan_nb_qps = pf->lan_nb_qp_max;\n+\tif (hw->func_caps.fd_fltr_guar > 0 ||\n+\t    hw->func_caps.fd_fltr_best_effort > 0) {\n+\t\tpf->flags |= ICE_FLAG_FDIR;\n+\t\tpf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR;\n+\t\tpf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps;\n+\t} else {\n+\t\tpf->fdir_nb_qps = 0;\n+\t}\n+\tpf->fdir_qp_offset = 0;\n \n \treturn 0;\n }\n \n-static struct ice_vsi *\n+struct ice_vsi *\n ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)\n {\n \tstruct ice_hw *hw = ICE_PF_TO_HW(pf);\n@@ -1113,6 +1122,7 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)\n \tstruct rte_ether_addr mac_addr;\n \tuint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };\n \tuint8_t tc_bitmap = 0x1;\n+\tuint16_t cfg;\n \n \t/* hw->num_lports = 1 in NIC mode */\n \tvsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);\n@@ -1136,14 +1146,10 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)\n \tpf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;\n \n \tmemset(&vsi_ctx, 0, sizeof(vsi_ctx));\n-\t/* base_queue in used in queue mapping of VSI add/update command.\n-\t * Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ\n-\t * cases in the first stage. Only Main VSI.\n-\t */\n-\tvsi->base_queue = 0;\n \tswitch (type) {\n \tcase ICE_VSI_PF:\n \t\tvsi->nb_qps = pf->lan_nb_qps;\n+\t\tvsi->base_queue = 1;\n \t\tice_vsi_config_default_rss(&vsi_ctx.info);\n \t\tvsi_ctx.alloc_from_pool = true;\n \t\tvsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;\n@@ -1157,6 +1163,18 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)\n \t\tvsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;\n \t\tvsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |\n \t\t\t\t\t ICE_AQ_VSI_Q_OPT_RSS_TPLZ;\n+\n+\t\t/* FDIR */\n+\t\tcfg = ICE_AQ_VSI_PROP_SECURITY_VALID |\n+\t\t\tICE_AQ_VSI_PROP_FLOW_DIR_VALID;\n+\t\tvsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);\n+\t\tcfg = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;\n+\t\tvsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);\n+\t\tvsi_ctx.info.max_fd_fltr_dedicated =\n+\t\t\trte_cpu_to_le_16(hw->func_caps.fd_fltr_guar);\n+\t\tvsi_ctx.info.max_fd_fltr_shared =\n+\t\t\trte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort);\n+\n \t\t/* Enable VLAN/UP trip */\n \t\tret = ice_vsi_config_tc_queue_mapping(vsi,\n \t\t\t\t\t\t      &vsi_ctx.info,\n@@ -1169,6 +1187,28 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)\n \t\t\tgoto fail_mem;\n \t\t}\n \n+\t\tbreak;\n+\tcase ICE_VSI_CTRL:\n+\t\tvsi->nb_qps = pf->fdir_nb_qps;\n+\t\tvsi->base_queue = ICE_FDIR_QUEUE_ID;\n+\t\tvsi_ctx.alloc_from_pool = true;\n+\t\tvsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;\n+\n+\t\tcfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;\n+\t\tvsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);\n+\t\tcfg = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;\n+\t\tvsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);\n+\t\tvsi_ctx.info.sw_id = hw->port_info->sw_id;\n+\t\tret = ice_vsi_config_tc_queue_mapping(vsi,\n+\t\t\t\t\t\t      &vsi_ctx.info,\n+\t\t\t\t\t\t      ICE_DEFAULT_TCMAP);\n+\t\tif (ret) {\n+\t\t\tPMD_INIT_LOG(ERR,\n+\t\t\t\t     \"tc queue mapping with vsi failed, \"\n+\t\t\t\t     \"err = %d\",\n+\t\t\t\t     ret);\n+\t\t\tgoto fail_mem;\n+\t\t}\n \t\tbreak;\n \tdefault:\n \t\t/* for other types of VSI */\n@@ -1187,10 +1227,19 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)\n \t\t}\n \t\tvsi->msix_intr = ret;\n \t\tvsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);\n+\t} else if (type == ICE_VSI_CTRL) {\n+\t\tret = ice_res_pool_alloc(&pf->msix_pool, 1);\n+\t\tif (ret < 0) {\n+\t\t\tPMD_DRV_LOG(ERR, \"VSI %d get heap failed %d\",\n+\t\t\t\t    vsi->vsi_id, ret);\n+\t\t}\n+\t\tvsi->msix_intr = ret;\n+\t\tvsi->nb_msix = 1;\n \t} else {\n \t\tvsi->msix_intr = 0;\n \t\tvsi->nb_msix = 0;\n \t}\n+\n \tret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);\n \tif (ret != ICE_SUCCESS) {\n \t\tPMD_INIT_LOG(ERR, \"add vsi failed, err = %d\", ret);\n@@ -1202,20 +1251,22 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)\n \tpf->vsis_allocated = vsi_ctx.vsis_allocd;\n \tpf->vsis_unallocated = vsi_ctx.vsis_unallocated;\n \n-\t/* MAC configuration */\n-\trte_memcpy(pf->dev_addr.addr_bytes,\n-\t\t   hw->port_info->mac.perm_addr,\n-\t\t   ETH_ADDR_LEN);\n+\tif (type == ICE_VSI_PF) {\n+\t\t/* MAC configuration */\n+\t\trte_memcpy(pf->dev_addr.addr_bytes,\n+\t\t\t   hw->port_info->mac.perm_addr,\n+\t\t\t   ETH_ADDR_LEN);\n \n-\trte_memcpy(&mac_addr, &pf->dev_addr, RTE_ETHER_ADDR_LEN);\n-\tret = ice_add_mac_filter(vsi, &mac_addr);\n-\tif (ret != ICE_SUCCESS)\n-\t\tPMD_INIT_LOG(ERR, \"Failed to add dflt MAC filter\");\n+\t\trte_memcpy(&mac_addr, &pf->dev_addr, RTE_ETHER_ADDR_LEN);\n+\t\tret = ice_add_mac_filter(vsi, &mac_addr);\n+\t\tif (ret != ICE_SUCCESS)\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to add dflt MAC filter\");\n \n-\trte_memcpy(&mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);\n-\tret = ice_add_mac_filter(vsi, &mac_addr);\n-\tif (ret != ICE_SUCCESS)\n-\t\tPMD_INIT_LOG(ERR, \"Failed to add MAC filter\");\n+\t\trte_memcpy(&mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);\n+\t\tret = ice_add_mac_filter(vsi, &mac_addr);\n+\t\tif (ret != ICE_SUCCESS)\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to add MAC filter\");\n+\t}\n \n \t/* At the beginning, only TC0. */\n \t/* What we need here is the maximam number of the TX queues.\n@@ -1253,7 +1304,9 @@ ice_send_driver_ver(struct ice_hw *hw)\n static int\n ice_pf_setup(struct ice_pf *pf)\n {\n+\tstruct ice_hw *hw = ICE_PF_TO_HW(pf);\n \tstruct ice_vsi *vsi;\n+\tuint16_t unused;\n \n \t/* Clear all stats counters */\n \tpf->offset_loaded = FALSE;\n@@ -1262,6 +1315,13 @@ ice_pf_setup(struct ice_pf *pf)\n \tmemset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));\n \tmemset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));\n \n+\t/* force guaranteed filter pool for PF */\n+\tice_alloc_fd_guar_item(hw, &unused,\n+\t\t\t       hw->func_caps.fd_fltr_guar);\n+\t/* force shared filter pool for PF */\n+\tice_alloc_fd_shrd_item(hw, &unused,\n+\t\t\t       hw->func_caps.fd_fltr_best_effort);\n+\n \tvsi = ice_setup_vsi(pf, ICE_VSI_PF);\n \tif (!vsi) {\n \t\tPMD_INIT_LOG(ERR, \"Failed to add vsi for PF\");\n@@ -1698,7 +1758,7 @@ ice_dev_init(struct rte_eth_dev *dev)\n \treturn ret;\n }\n \n-static int\n+int\n ice_release_vsi(struct ice_vsi *vsi)\n {\n \tstruct ice_hw *hw;\n@@ -1780,6 +1840,9 @@ ice_dev_stop(struct rte_eth_dev *dev)\n \t/* disable all queue interrupts */\n \tice_vsi_disable_queues_intr(main_vsi);\n \n+\tif (pf->fdir.fdir_vsi)\n+\t\tice_vsi_disable_queues_intr(pf->fdir.fdir_vsi);\n+\n \t/* Clear all queues and release mbufs */\n \tice_clear_queues(dev);\n \n@@ -2117,6 +2180,12 @@ ice_rxq_intr_setup(struct rte_eth_dev *dev)\n \t/* Enable interrupts for all the queues */\n \tice_vsi_enable_queues_intr(vsi);\n \n+\t/* Enable FDIR MSIX interrupt */\n+\tif (pf->fdir.fdir_vsi) {\n+\t\tice_vsi_queues_bind_intr(pf->fdir.fdir_vsi);\n+\t\tice_vsi_enable_queues_intr(pf->fdir.fdir_vsi);\n+\t}\n+\n \trte_intr_enable(intr_handle);\n \n \treturn 0;\ndiff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h\nindex d1d07641d..c43242b63 100644\n--- a/drivers/net/ice/ice_ethdev.h\n+++ b/drivers/net/ice/ice_ethdev.h\n@@ -249,6 +249,17 @@ TAILQ_HEAD(ice_flow_list, rte_flow);\n struct ice_flow_parser;\n TAILQ_HEAD(ice_parser_list, ice_flow_parser);\n \n+/**\n+ *  A structure used to define fields of a FDIR related info.\n+ */\n+struct ice_fdir_info {\n+\tstruct ice_vsi *fdir_vsi;     /* pointer to fdir VSI structure */\n+\tstruct ice_tx_queue *txq;\n+\tstruct ice_rx_queue *rxq;\n+\tvoid *prg_pkt;                 /* memory for fdir program packet */\n+\tuint64_t dma_addr;             /* physic address of packet memory*/\n+};\n+\n struct ice_pf {\n \tstruct ice_adapter *adapter; /* The adapter this PF associate to */\n \tstruct ice_vsi *main_vsi; /* pointer to main VSI structure */\n@@ -268,6 +279,9 @@ struct ice_pf {\n \tuint16_t lan_nb_qp_max;\n \tuint16_t lan_nb_qps; /* The number of queue pairs of LAN */\n \tuint16_t base_queue; /* The base queue pairs index  in the device */\n+\tuint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */\n+\tuint16_t fdir_qp_offset;\n+\tstruct ice_fdir_info fdir; /* flow director info */\n \tstruct ice_hw_port_stats stats_offset;\n \tstruct ice_hw_port_stats stats;\n \t/* internal packet statistics, it should be excluded from the total */\n@@ -348,6 +362,11 @@ struct ice_vsi_vlan_pvid_info {\n #define ICE_PF_TO_ETH_DEV(pf) \\\n \t(((struct ice_pf *)pf)->adapter->eth_dev)\n \n+struct ice_vsi *\n+ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type);\n+int\n+ice_release_vsi(struct ice_vsi *vsi);\n+\n static inline int\n ice_align_floor(int n)\n {\ndiff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c\nnew file mode 100644\nindex 000000000..03d143058\n--- /dev/null\n+++ b/drivers/net/ice/ice_fdir_filter.c\n@@ -0,0 +1,139 @@\n+#include <stdio.h>\n+#include <rte_flow.h>\n+#include \"base/ice_fdir.h\"\n+#include \"base/ice_flow.h\"\n+#include \"base/ice_type.h\"\n+#include \"ice_ethdev.h\"\n+#include \"ice_rxtx.h\"\n+#include \"ice_generic_flow.h\"\n+\n+static const struct rte_memzone *\n+ice_memzone_reserve(const char *name, uint32_t len, int socket_id)\n+{\n+\tconst struct rte_memzone *mz;\n+\n+\tmz = rte_memzone_lookup(name);\n+\tif (mz)\n+\t\treturn mz;\n+\n+\tmz = rte_memzone_reserve_aligned(name, len, socket_id,\n+\t\t\t\t\t RTE_MEMZONE_IOVA_CONTIG,\n+\t\t\t\t\t ICE_RING_BASE_ALIGN);\n+\treturn mz;\n+}\n+\n+#define ICE_FDIR_MZ_NAME\t\"FDIR_MEMZONE\"\n+\n+/*\n+ * ice_fdir_setup - reserve and initialize the Flow Director resources\n+ * @pf: board private structure\n+ */\n+static int\n+ice_fdir_setup(struct ice_pf *pf)\n+{\n+\tstruct rte_eth_dev *eth_dev = pf->adapter->eth_dev;\n+\tstruct ice_hw *hw = ICE_PF_TO_HW(pf);\n+\tconst struct rte_memzone *mz = NULL;\n+\tchar z_name[RTE_MEMZONE_NAMESIZE];\n+\tstruct ice_vsi *vsi;\n+\tint err = ICE_SUCCESS;\n+\n+\tif ((pf->flags & ICE_FLAG_FDIR) == 0) {\n+\t\tPMD_INIT_LOG(ERR, \"HW doesn't support FDIR\");\n+\t\treturn -ENOTSUP;\n+\t}\n+\n+\tPMD_DRV_LOG(INFO, \"FDIR HW Capabilities: fd_fltr_guar = %u,\"\n+\t\t    \" fd_fltr_best_effort = %u.\",\n+\t\t    hw->func_caps.fd_fltr_guar,\n+\t\t    hw->func_caps.fd_fltr_best_effort);\n+\n+\tif (pf->fdir.fdir_vsi) {\n+\t\tPMD_DRV_LOG(INFO, \"FDIR initialization has been done.\");\n+\t\treturn ICE_SUCCESS;\n+\t}\n+\n+\t/* make new FDIR VSI */\n+\tvsi = ice_setup_vsi(pf, ICE_VSI_CTRL);\n+\tif (!vsi) {\n+\t\tPMD_DRV_LOG(ERR, \"Couldn't create FDIR VSI.\");\n+\t\treturn -EINVAL;\n+\t}\n+\tpf->fdir.fdir_vsi = vsi;\n+\n+\t/*Fdir tx queue setup*/\n+\terr = ice_fdir_setup_tx_resources(pf);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to setup FDIR TX resources.\");\n+\t\tgoto fail_setup_tx;\n+\t}\n+\n+\t/*Fdir rx queue setup*/\n+\terr = ice_fdir_setup_rx_resources(pf);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to setup FDIR RX resources.\");\n+\t\tgoto fail_setup_rx;\n+\t}\n+\n+\terr = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to start FDIR TX queue.\");\n+\t\tgoto fail_mem;\n+\t}\n+\n+\terr = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to start FDIR RX queue.\");\n+\t\tgoto fail_mem;\n+\t}\n+\n+\t/* reserve memory for the fdir programming packet */\n+\tsnprintf(z_name, sizeof(z_name), \"ICE_%s_%d\",\n+\t\t ICE_FDIR_MZ_NAME,\n+\t\t eth_dev->data->port_id);\n+\tmz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);\n+\tif (!mz) {\n+\t\tPMD_DRV_LOG(ERR, \"Cannot init memzone for \"\n+\t\t\t    \"flow director program packet.\");\n+\t\terr = -ENOMEM;\n+\t\tgoto fail_mem;\n+\t}\n+\tpf->fdir.prg_pkt = mz->addr;\n+\tpf->fdir.dma_addr = mz->iova;\n+\n+\tPMD_DRV_LOG(INFO, \"FDIR setup successfully, with programming queue %u.\",\n+\t\t    vsi->base_queue);\n+\treturn ICE_SUCCESS;\n+\n+fail_mem:\n+\tice_rx_queue_release(pf->fdir.rxq);\n+\tpf->fdir.rxq = NULL;\n+fail_setup_rx:\n+\tice_tx_queue_release(pf->fdir.txq);\n+\tpf->fdir.txq = NULL;\n+fail_setup_tx:\n+\tice_release_vsi(vsi);\n+\tpf->fdir.fdir_vsi = NULL;\n+\treturn err;\n+}\n+\n+static int\n+ice_init_fdir_filter(struct ice_adapter *ad)\n+{\n+\tstruct ice_pf *pf = &ad->pf;\n+\tint ret;\n+\n+\tret = ice_fdir_setup(pf);\n+\n+\treturn ret;\n+}\n+\n+static struct ice_flow_engine ice_fdir_engine = {\n+\t.init = ice_init_fdir_filter,\n+\t.type = ICE_FLOW_ENGINE_FDIR,\n+};\n+\n+RTE_INIT(ice_fdir_init_log)\n+{\n+\tice_register_flow_engine(&ice_fdir_engine);\n+}\ndiff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c\nindex 0282b5375..bd802e350 100644\n--- a/drivers/net/ice/ice_rxtx.c\n+++ b/drivers/net/ice/ice_rxtx.c\n@@ -474,6 +474,175 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n \treturn 0;\n }\n \n+static enum ice_status\n+ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)\n+{\n+\tstruct ice_vsi *vsi = rxq->vsi;\n+\tstruct ice_hw *hw = ICE_VSI_TO_HW(vsi);\n+\tstruct ice_rlan_ctx rx_ctx;\n+\tenum ice_status err;\n+\tuint32_t regval;\n+\n+\t/**\n+\t * The kernel driver uses flex descriptor. It sets the register\n+\t * to flex descriptor mode.\n+\t * DPDK uses legacy descriptor. It should set the register back\n+\t * to the default value, then uses legacy descriptor mode.\n+\t */\n+\tregval = (0x01 << QRXFLXP_CNTXT_RXDID_PRIO_S) &\n+\t\t QRXFLXP_CNTXT_RXDID_PRIO_M;\n+\tICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);\n+\n+\trxq->rx_hdr_len = 0;\n+\trxq->rx_buf_len = 1024;\n+\n+\tmemset(&rx_ctx, 0, sizeof(rx_ctx));\n+\n+\trx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;\n+\trx_ctx.qlen = rxq->nb_rx_desc;\n+\trx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;\n+\trx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;\n+\trx_ctx.dtype = 0; /* No Header Split mode */\n+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC\n+\trx_ctx.dsize = 1; /* 32B descriptors */\n+#endif\n+\trx_ctx.rxmax = RTE_ETHER_MAX_LEN;\n+\t/* TPH: Transaction Layer Packet (TLP) processing hints */\n+\trx_ctx.tphrdesc_ena = 1;\n+\trx_ctx.tphwdesc_ena = 1;\n+\trx_ctx.tphdata_ena = 1;\n+\trx_ctx.tphhead_ena = 1;\n+\t/* Low Receive Queue Threshold defined in 64 descriptors units.\n+\t * When the number of free descriptors goes below the lrxqthresh,\n+\t * an immediate interrupt is triggered.\n+\t */\n+\trx_ctx.lrxqthresh = 2;\n+\t/*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/\n+\trx_ctx.l2tsel = 1;\n+\trx_ctx.showiv = 0;\n+\trx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;\n+\n+\terr = ice_clear_rxq_ctx(hw, rxq->reg_idx);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to clear Lan Rx queue (%u) context\",\n+\t\t\t    rxq->queue_id);\n+\t\treturn -EINVAL;\n+\t}\n+\terr = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to write Lan Rx queue (%u) context\",\n+\t\t\t    rxq->queue_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\trxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);\n+\n+\t/* Init the Rx tail register*/\n+\tICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);\n+\n+\treturn 0;\n+}\n+\n+int\n+ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n+{\n+\tstruct ice_rx_queue *rxq;\n+\tint err;\n+\tstruct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\trxq = pf->fdir.rxq;\n+\tif (!rxq || !rxq->q_set) {\n+\t\tPMD_DRV_LOG(ERR, \"FDIR RX queue %u not available or setup\",\n+\t\t\t    rx_queue_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\terr = ice_fdir_program_hw_rx_queue(rxq);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"fail to program FDIR RX queue %u\",\n+\t\t\t    rx_queue_id);\n+\t\treturn -EIO;\n+\t}\n+\n+\trte_wmb();\n+\n+\t/* Init the RX tail register. */\n+\tICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);\n+\n+\terr = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to switch FDIR RX queue %u on\",\n+\t\t\t    rx_queue_id);\n+\n+\t\tice_reset_rx_queue(rxq);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int\n+ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n+{\n+\tstruct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\tstruct ice_tx_queue *txq;\n+\tint err;\n+\tstruct ice_vsi *vsi;\n+\tstruct ice_hw *hw;\n+\tstruct ice_aqc_add_tx_qgrp txq_elem;\n+\tstruct ice_tlan_ctx tx_ctx;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\ttxq = pf->fdir.txq;\n+\tif (!txq || !txq->q_set) {\n+\t\tPMD_DRV_LOG(ERR, \"FDIR TX queue %u is not available or setup\",\n+\t\t\t    tx_queue_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tvsi = txq->vsi;\n+\thw = ICE_VSI_TO_HW(vsi);\n+\n+\tmemset(&txq_elem, 0, sizeof(txq_elem));\n+\tmemset(&tx_ctx, 0, sizeof(tx_ctx));\n+\ttxq_elem.num_txqs = 1;\n+\ttxq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);\n+\n+\ttx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;\n+\ttx_ctx.qlen = txq->nb_tx_desc;\n+\ttx_ctx.pf_num = hw->pf_id;\n+\ttx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;\n+\ttx_ctx.src_vsi = vsi->vsi_id;\n+\ttx_ctx.port_num = hw->port_info->lport;\n+\ttx_ctx.tso_ena = 1; /* tso enable */\n+\ttx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */\n+\ttx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */\n+\n+\tice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,\n+\t\t    ice_tlan_ctx_info);\n+\n+\ttxq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);\n+\n+\t/* Init the Tx tail register*/\n+\tICE_PCI_REG_WRITE(txq->qtx_tail, 0);\n+\n+\t/* Fix me, we assume TC always 0 here */\n+\terr = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,\n+\t\t\t      &txq_elem, sizeof(txq_elem), NULL);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to add FDIR txq\");\n+\t\treturn -EIO;\n+\t}\n+\t/* store the schedule node id */\n+\ttxq->q_teid = txq_elem.txqs[0].q_teid;\n+\n+\treturn 0;\n+}\n+\n /* Free all mbufs for descriptors in tx queue */\n static void\n _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)\n@@ -997,6 +1166,10 @@ ice_rxd_status_to_pkt_flags(uint64_t qword)\n \t\t  ICE_RX_DESC_FLTSTAT_RSS_HASH) ==\n \t\t ICE_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;\n \n+\t/* Check if FDIR Match */\n+\tflags |= (qword & (1 << ICE_RX_DESC_STATUS_FLM_S) ?\n+\t\t  PKT_RX_FDIR : 0);\n+\n \treturn flags;\n }\n \n@@ -1060,6 +1233,33 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_desc *rxdp)\n \t\t   mb->vlan_tci, mb->vlan_tci_outer);\n }\n \n+#define ICE_RX_DESC_EXT_STATUS_FLEXBH_M   0x03\n+#define ICE_RX_DESC_EXT_STATUS_FLEXBH_FD_ID  0x01\n+\n+static inline uint64_t\n+ice_rxd_build_fdir(volatile union ice_rx_desc *rxdp, struct rte_mbuf *mb)\n+{\n+\tuint64_t flags = 0;\n+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC\n+\tuint16_t flexbh;\n+\n+\tflexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>\n+\t\tICE_RX_DESC_EXT_STATUS_FLEXBH_S) &\n+\t\tICE_RX_DESC_EXT_STATUS_FLEXBH_M;\n+\n+\tif (flexbh == ICE_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {\n+\t\tmb->hash.fdir.hi =\n+\t\t\trte_le_to_cpu_32(rxdp->wb.qword3.fd_id);\n+\t\tflags |= PKT_RX_FDIR_ID;\n+\t}\n+#else\n+\tmb->hash.fdir.hi =\n+\t\trte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);\n+\tflags |= PKT_RX_FDIR_ID;\n+#endif\n+\treturn flags;\n+}\n+\n #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC\n #define ICE_LOOK_AHEAD 8\n #if (ICE_LOOK_AHEAD != 8)\n@@ -1127,6 +1327,8 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)\n \t\t\t\tmb->hash.rss =\n \t\t\t\t\trte_le_to_cpu_32(\n \t\t\t\t\t\trxdp[j].wb.qword0.hi_dword.rss);\n+\t\t\tif (pkt_flags & PKT_RX_FDIR)\n+\t\t\t\tpkt_flags |= ice_rxd_build_fdir(&rxdp[j], mb);\n \t\t\tmb->packet_type = ptype_tbl[(uint8_t)(\n \t\t\t\t\t\t(qword1 &\n \t\t\t\t\t\t ICE_RXD_QW1_PTYPE_M) >>\n@@ -1448,6 +1650,8 @@ ice_recv_scattered_pkts(void *rx_queue,\n \t\tif (pkt_flags & PKT_RX_RSS_HASH)\n \t\t\tfirst_seg->hash.rss =\n \t\t\t\trte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);\n+\t\tif (pkt_flags & PKT_RX_FDIR)\n+\t\t\tpkt_flags |= ice_rxd_build_fdir(&rxd, first_seg);\n \n \t\tfirst_seg->ol_flags |= pkt_flags;\n \t\t/* Prefetch data of first segment, if configured to do so. */\n@@ -1635,6 +1839,127 @@ ice_free_queues(struct rte_eth_dev *dev)\n \tdev->data->nb_tx_queues = 0;\n }\n \n+#define ICE_FDIR_NUM_TX_DESC  ICE_MIN_RING_DESC\n+#define ICE_FDIR_NUM_RX_DESC  ICE_MIN_RING_DESC\n+\n+int\n+ice_fdir_setup_tx_resources(struct ice_pf *pf)\n+{\n+\tstruct ice_tx_queue *txq;\n+\tconst struct rte_memzone *tz = NULL;\n+\tuint32_t ring_size;\n+\tstruct rte_eth_dev *dev;\n+\n+\tif (!pf) {\n+\t\tPMD_DRV_LOG(ERR, \"PF is not available\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tdev = pf->adapter->eth_dev;\n+\n+\t/* Allocate the TX queue data structure. */\n+\ttxq = rte_zmalloc_socket(\"ice fdir tx queue\",\n+\t\t\t\t sizeof(struct ice_tx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE,\n+\t\t\t\t SOCKET_ID_ANY);\n+\tif (!txq) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate memory for \"\n+\t\t\t    \"tx queue structure.\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Allocate TX hardware ring descriptors. */\n+\tring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;\n+\tring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);\n+\n+\ttz = rte_eth_dma_zone_reserve(dev, \"fdir_tx_ring\",\n+\t\t\t\t      ICE_FDIR_QUEUE_ID, ring_size,\n+\t\t\t\t      ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);\n+\tif (!tz) {\n+\t\tice_tx_queue_release(txq);\n+\t\tPMD_DRV_LOG(ERR, \"Failed to reserve DMA memory for TX.\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\ttxq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;\n+\ttxq->queue_id = ICE_FDIR_QUEUE_ID;\n+\ttxq->reg_idx = pf->fdir.fdir_vsi->base_queue;\n+\ttxq->vsi = pf->fdir.fdir_vsi;\n+\n+\ttxq->tx_ring_dma = tz->iova;\n+\ttxq->tx_ring = (struct ice_tx_desc *)tz->addr;\n+\t/*\n+\t * don't need to allocate software ring and reset for the fdir\n+\t * program queue just set the queue has been configured.\n+\t */\n+\ttxq->q_set = TRUE;\n+\tpf->fdir.txq = txq;\n+\n+\ttxq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;\n+\n+\treturn ICE_SUCCESS;\n+}\n+\n+int\n+ice_fdir_setup_rx_resources(struct ice_pf *pf)\n+{\n+\tstruct ice_rx_queue *rxq;\n+\tconst struct rte_memzone *rz = NULL;\n+\tuint32_t ring_size;\n+\tstruct rte_eth_dev *dev;\n+\n+\tif (!pf) {\n+\t\tPMD_DRV_LOG(ERR, \"PF is not available\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tdev = pf->adapter->eth_dev;\n+\n+\t/* Allocate the RX queue data structure. */\n+\trxq = rte_zmalloc_socket(\"ice fdir rx queue\",\n+\t\t\t\t sizeof(struct ice_rx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE,\n+\t\t\t\t SOCKET_ID_ANY);\n+\tif (!rxq) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate memory for \"\n+\t\t\t    \"rx queue structure.\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Allocate RX hardware ring descriptors. */\n+\tring_size = sizeof(union ice_rx_desc) * ICE_FDIR_NUM_RX_DESC;\n+\tring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);\n+\n+\trz = rte_eth_dma_zone_reserve(dev, \"fdir_rx_ring\",\n+\t\t\t\t      ICE_FDIR_QUEUE_ID, ring_size,\n+\t\t\t\t      ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);\n+\tif (!rz) {\n+\t\tice_rx_queue_release(rxq);\n+\t\tPMD_DRV_LOG(ERR, \"Failed to reserve DMA memory for RX.\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\trxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;\n+\trxq->queue_id = ICE_FDIR_QUEUE_ID;\n+\trxq->reg_idx = pf->fdir.fdir_vsi->base_queue;\n+\trxq->vsi = pf->fdir.fdir_vsi;\n+\n+\trxq->rx_ring_dma = rz->iova;\n+\tmemset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC * sizeof(union ice_rx_desc));\n+\trxq->rx_ring = (union ice_rx_desc *)rz->addr;\n+\n+\t/*\n+\t * Don't need to allocate software ring and reset for the fdir\n+\t * rx queue, just set the queue has been configured.\n+\t */\n+\trxq->q_set = TRUE;\n+\tpf->fdir.rxq = rxq;\n+\n+\trxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;\n+\n+\treturn ICE_SUCCESS;\n+}\n+\n uint16_t\n ice_recv_pkts(void *rx_queue,\n \t      struct rte_mbuf **rx_pkts,\n@@ -1716,6 +2041,8 @@ ice_recv_pkts(void *rx_queue,\n \t\tif (pkt_flags & PKT_RX_RSS_HASH)\n \t\t\trxm->hash.rss =\n \t\t\t\trte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);\n+\t\tif (pkt_flags & PKT_RX_FDIR)\n+\t\t\tpkt_flags |= ice_rxd_build_fdir(&rxd, rxm);\n \t\trxm->ol_flags |= pkt_flags;\n \t\t/* copy old mbuf to rx_pkts */\n \t\trx_pkts[nb_rx++] = rxm;\n@@ -3061,3 +3388,124 @@ ice_set_default_ptype_table(struct rte_eth_dev *dev)\n \tfor (i = 0; i < ICE_MAX_PKT_TYPE; i++)\n \t\tad->ptype_tbl[i] = ice_get_default_pkt_type(i);\n }\n+\n+/*\n+ * check the programming status descriptor in rx queue.\n+ * done after Programming Flow Director is programmed on\n+ * tx queue\n+ */\n+static inline int\n+ice_check_fdir_programming_status(struct ice_rx_queue *rxq)\n+{\n+\tvolatile union ice_rx_desc *rxdp;\n+\tuint64_t qword1;\n+\tuint32_t rx_status;\n+\tuint32_t len, id;\n+\tuint32_t error;\n+\tint ret = 0;\n+\n+\trxdp = &rxq->rx_ring[rxq->rx_tail];\n+\tqword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);\n+\trx_status = (qword1 & ICE_RXD_QW1_STATUS_M)\n+\t\t\t>> ICE_RXD_QW1_STATUS_S;\n+\n+\tif (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) {\n+\t\tlen = qword1 >> ICE_RX_PROG_STATUS_DESC_LEN_S;\n+\t\tid = (qword1 & ICE_RX_PROG_STATUS_DESC_QW1_PROGID_M) >>\n+\t\t\t    ICE_RX_PROG_STATUS_DESC_QW1_PROGID_S;\n+\n+\t\tif (len  == ICE_RX_PROG_STATUS_DESC_LEN &&\n+\t\t    id == ICE_RX_PROG_STATUS_DESC_FD_FLTR_STATUS) {\n+\t\t\terror = (qword1 &\n+\t\t\t\tICE_RX_PROG_STATUS_DESC_QW1_ERROR_M) >>\n+\t\t\t\tICE_RX_PROG_STATUS_DESC_QW1_ERROR_S;\n+\t\t\tif (error == (0x1 <<\n+\t\t\t\tICE_RX_PROG_STATUS_DESC_FD_TBL_FULL_S)) {\n+\t\t\t\tPMD_DRV_LOG(ERR, \"Failed to add FDIR filter\"\n+\t\t\t\t\t    \" (FD_ID %u): programming status\"\n+\t\t\t\t\t    \" reported.\",\n+\t\t\t\t\t    rxdp->wb.qword0.hi_dword.fd_id);\n+\t\t\t\tret = -1;\n+\t\t\t} else if (error == (0x1 <<\n+\t\t\t\tICE_RX_PROG_STATUS_DESC_NO_FD_ENTRY_S)) {\n+\t\t\t\tPMD_DRV_LOG(ERR, \"Failed to delete FDIR filter\"\n+\t\t\t\t\t    \" (FD_ID %u): programming status\"\n+\t\t\t\t\t    \" reported.\",\n+\t\t\t\t\t    rxdp->wb.qword0.hi_dword.fd_id);\n+\t\t\t\tret = -1;\n+\t\t\t} else {\n+\t\t\t\tPMD_DRV_LOG(ERR, \"invalid programming status\"\n+\t\t\t\t\t    \" reported, error = %u.\", error);\n+\t\t\t}\n+\t\t} else {\n+\t\t\tPMD_DRV_LOG(INFO, \"unknown programming status\"\n+\t\t\t\t    \" reported, len = %d, id = %u.\", len, id);\n+\t\t}\n+\t\trxdp->wb.qword1.status_error_len = 0;\n+\t\trxq->rx_tail++;\n+\t\tif (unlikely(rxq->rx_tail == rxq->nb_rx_desc))\n+\t\t\trxq->rx_tail = 0;\n+\t\tif (rxq->rx_tail == 0)\n+\t\t\tICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);\n+\t\telse\n+\t\t\tICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);\n+\t}\n+\n+\treturn ret;\n+}\n+\n+#define ICE_FDIR_MAX_WAIT_US 10000\n+\n+int\n+ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)\n+{\n+\tstruct ice_tx_queue *txq = pf->fdir.txq;\n+\tstruct ice_rx_queue *rxq = pf->fdir.rxq;\n+\tvolatile struct ice_fltr_desc *fdirdp;\n+\tvolatile struct ice_tx_desc *txdp;\n+\tuint32_t td_cmd;\n+\tuint16_t i;\n+\n+\tfdirdp = (volatile struct ice_fltr_desc *)\n+\t\t(&txq->tx_ring[txq->tx_tail]);\n+\tfdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;\n+\tfdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;\n+\n+\ttxdp = &txq->tx_ring[txq->tx_tail + 1];\n+\ttxdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);\n+\ttd_cmd = ICE_TX_DESC_CMD_EOP |\n+\t\tICE_TX_DESC_CMD_RS  |\n+\t\tICE_TX_DESC_CMD_DUMMY;\n+\n+\ttxdp->cmd_type_offset_bsz =\n+\t\tice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);\n+\n+\ttxq->tx_tail += 2;\n+\tif (txq->tx_tail >= txq->nb_tx_desc)\n+\t\ttxq->tx_tail = 0;\n+\t/* Update the tx tail register */\n+\trte_wmb();\n+\tICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);\n+\tfor (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {\n+\t\tif ((txdp->cmd_type_offset_bsz &\n+\t\t     rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==\n+\t\t    rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))\n+\t\t\tbreak;\n+\t\trte_delay_us(1);\n+\t}\n+\tif (i >= ICE_FDIR_MAX_WAIT_US) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"Failed to program FDIR filter: time out to get DD on tx queue.\");\n+\t\treturn -ETIMEDOUT;\n+\t}\n+\n+\tfor (; i < ICE_FDIR_MAX_WAIT_US; i++) {\n+\t\tif (ice_check_fdir_programming_status(rxq) >= 0)\n+\t\t\treturn 0;\n+\t\trte_delay_us(1);\n+\t}\n+\n+\tPMD_DRV_LOG(ERR,\n+\t\t    \"Failed to program FDIR filter: programming status reported.\");\n+\treturn -ETIMEDOUT;\n+}\ndiff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h\nindex e9214110c..450db0244 100644\n--- a/drivers/net/ice/ice_rxtx.h\n+++ b/drivers/net/ice/ice_rxtx.h\n@@ -36,6 +36,8 @@\n #define ICE_TX_MAX_FREE_BUF_SZ      64\n #define ICE_DESCS_PER_LOOP          4\n \n+#define ICE_FDIR_PKT_LEN\t512\n+\n typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq);\n typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq);\n \n@@ -147,10 +149,14 @@ int ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n int ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n int ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n int ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n+int ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n+int ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n void ice_rx_queue_release(void *rxq);\n void ice_tx_queue_release(void *txq);\n void ice_clear_queues(struct rte_eth_dev *dev);\n void ice_free_queues(struct rte_eth_dev *dev);\n+int ice_fdir_setup_tx_resources(struct ice_pf *pf);\n+int ice_fdir_setup_rx_resources(struct ice_pf *pf);\n uint16_t ice_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\t       uint16_t nb_pkts);\n uint16_t ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n@@ -188,4 +194,5 @@ uint16_t ice_recv_scattered_pkts_vec_avx2(void *rx_queue,\n \t\t\t\t\t  uint16_t nb_pkts);\n uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t\t\tuint16_t nb_pkts);\n+int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc);\n #endif /* _ICE_RXTX_H_ */\ndiff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build\nindex 36b4b3c85..53846442a 100644\n--- a/drivers/net/ice/meson.build\n+++ b/drivers/net/ice/meson.build\n@@ -10,7 +10,8 @@ sources = files(\n \t'ice_ethdev.c',\n \t'ice_rxtx.c',\n \t'ice_switch_filter.c',\n-\t'ice_generic_flow.c'\n+\t'ice_generic_flow.c',\n+\t'ice_fdir_filter.c'\n \t)\n \n deps += ['hash']\n",
    "prefixes": [
        "01/12"
    ]
}