get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/61448/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 61448,
    "url": "http://patches.dpdk.org/api/patches/61448/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20191018111602.26742-2-yahui.cao@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20191018111602.26742-2-yahui.cao@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20191018111602.26742-2-yahui.cao@intel.com",
    "date": "2019-10-18T11:15:54",
    "name": "[v7,1/9] net/ice: enable flow director engine",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "8697b1876b17c45f588ac09400ecd56e0c34fbf7",
    "submitter": {
        "id": 1176,
        "url": "http://patches.dpdk.org/api/people/1176/?format=api",
        "name": "Cao, Yahui",
        "email": "yahui.cao@intel.com"
    },
    "delegate": {
        "id": 31221,
        "url": "http://patches.dpdk.org/api/users/31221/?format=api",
        "username": "yexl",
        "first_name": "xiaolong",
        "last_name": "ye",
        "email": "xiaolong.ye@intel.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20191018111602.26742-2-yahui.cao@intel.com/mbox/",
    "series": [
        {
            "id": 6926,
            "url": "http://patches.dpdk.org/api/series/6926/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=6926",
            "date": "2019-10-18T11:15:53",
            "name": "net/ice: add ice Flow Director driver",
            "version": 7,
            "mbox": "http://patches.dpdk.org/series/6926/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/61448/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/61448/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 70EED1E54B;\n\tFri, 18 Oct 2019 05:33:46 +0200 (CEST)",
            "from mga14.intel.com (mga14.intel.com [192.55.52.115])\n\tby dpdk.org (Postfix) with ESMTP id ACB361E533\n\tfor <dev@dpdk.org>; Fri, 18 Oct 2019 05:33:42 +0200 (CEST)",
            "from fmsmga003.fm.intel.com ([10.253.24.29])\n\tby fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t17 Oct 2019 20:33:42 -0700",
            "from dpdk-yahui-skylake.sh.intel.com ([10.67.119.16])\n\tby FMSMGA003.fm.intel.com with ESMTP; 17 Oct 2019 20:33:40 -0700"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.67,310,1566889200\"; d=\"scan'208\";a=\"202577911\"",
        "From": "Yahui Cao <yahui.cao@intel.com>",
        "To": "Qiming Yang <qiming.yang@intel.com>,\n\tWenzhuo Lu <wenzhuo.lu@intel.com>",
        "Cc": "dev@dpdk.org, Qi Zhang <qi.z.zhang@intel.com>,\n\tXiaolong Ye <xiaolong.ye@intel.com>,\n\tBeilei Xing <beilei.xing@intel.com>, Yahui Cao <yahui.cao@intel.com>",
        "Date": "Fri, 18 Oct 2019 19:15:54 +0800",
        "Message-Id": "<20191018111602.26742-2-yahui.cao@intel.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20191018111602.26742-1-yahui.cao@intel.com>",
        "References": "<20191017160454.14518-1-yahui.cao@intel.com>\n\t<20191018111602.26742-1-yahui.cao@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v7 1/9] net/ice: enable flow director engine",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Beilei Xing <beilei.xing@intel.com>\n\nEnable flow director engine, including initialization and teardown.\n - Control VSI create and release.\n - Queue pair allocated, set up and release.\n - Programming packet create and release.\n - FDIR profile create and release.\n\nSigned-off-by: Beilei Xing <beilei.xing@intel.com>\nAcked-by: Qi Zhang <qi.z.zhang@intel.com>\n---\n drivers/net/ice/Makefile          |   1 +\n drivers/net/ice/ice_ethdev.c      | 107 ++++++--\n drivers/net/ice/ice_ethdev.h      |  19 ++\n drivers/net/ice/ice_fdir_filter.c | 225 +++++++++++++++++\n drivers/net/ice/ice_rxtx.c        | 403 ++++++++++++++++++++++++++++++\n drivers/net/ice/ice_rxtx.h        |   9 +\n drivers/net/ice/meson.build       |   3 +-\n 7 files changed, 747 insertions(+), 20 deletions(-)\n create mode 100644 drivers/net/ice/ice_fdir_filter.c",
    "diff": "diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile\nindex 4a279f196..f7e185288 100644\n--- a/drivers/net/ice/Makefile\n+++ b/drivers/net/ice/Makefile\n@@ -62,6 +62,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c\n endif\n \n SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c\n+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_fdir_filter.c\n ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)\n \tCC_AVX2_SUPPORT=1\n else\ndiff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c\nindex b433ba844..12e5ea658 100644\n--- a/drivers/net/ice/ice_ethdev.c\n+++ b/drivers/net/ice/ice_ethdev.c\n@@ -1413,10 +1413,20 @@ ice_pf_sw_init(struct rte_eth_dev *dev)\n \n \tice_init_proto_xtr(dev);\n \n+\tif (hw->func_caps.fd_fltr_guar > 0 ||\n+\t    hw->func_caps.fd_fltr_best_effort > 0) {\n+\t\tpf->flags |= ICE_FLAG_FDIR;\n+\t\tpf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR;\n+\t\tpf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps;\n+\t} else {\n+\t\tpf->fdir_nb_qps = 0;\n+\t}\n+\tpf->fdir_qp_offset = 0;\n+\n \treturn 0;\n }\n \n-static struct ice_vsi *\n+struct ice_vsi *\n ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)\n {\n \tstruct ice_hw *hw = ICE_PF_TO_HW(pf);\n@@ -1428,6 +1438,7 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)\n \tstruct rte_ether_addr mac_addr;\n \tuint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };\n \tuint8_t tc_bitmap = 0x1;\n+\tuint16_t cfg;\n \n \t/* hw->num_lports = 1 in NIC mode */\n \tvsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);\n@@ -1451,14 +1462,10 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)\n \tpf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;\n \n \tmemset(&vsi_ctx, 0, sizeof(vsi_ctx));\n-\t/* base_queue in used in queue mapping of VSI add/update command.\n-\t * Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ\n-\t * cases in the first stage. Only Main VSI.\n-\t */\n-\tvsi->base_queue = 0;\n \tswitch (type) {\n \tcase ICE_VSI_PF:\n \t\tvsi->nb_qps = pf->lan_nb_qps;\n+\t\tvsi->base_queue = 1;\n \t\tice_vsi_config_default_rss(&vsi_ctx.info);\n \t\tvsi_ctx.alloc_from_pool = true;\n \t\tvsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;\n@@ -1472,6 +1479,18 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)\n \t\tvsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;\n \t\tvsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |\n \t\t\t\t\t ICE_AQ_VSI_Q_OPT_RSS_TPLZ;\n+\n+\t\t/* FDIR */\n+\t\tcfg = ICE_AQ_VSI_PROP_SECURITY_VALID |\n+\t\t\tICE_AQ_VSI_PROP_FLOW_DIR_VALID;\n+\t\tvsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);\n+\t\tcfg = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;\n+\t\tvsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);\n+\t\tvsi_ctx.info.max_fd_fltr_dedicated =\n+\t\t\trte_cpu_to_le_16(hw->func_caps.fd_fltr_guar);\n+\t\tvsi_ctx.info.max_fd_fltr_shared =\n+\t\t\trte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort);\n+\n \t\t/* Enable VLAN/UP trip */\n \t\tret = ice_vsi_config_tc_queue_mapping(vsi,\n \t\t\t\t\t\t      &vsi_ctx.info,\n@@ -1484,6 +1503,28 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)\n \t\t\tgoto fail_mem;\n \t\t}\n \n+\t\tbreak;\n+\tcase ICE_VSI_CTRL:\n+\t\tvsi->nb_qps = pf->fdir_nb_qps;\n+\t\tvsi->base_queue = ICE_FDIR_QUEUE_ID;\n+\t\tvsi_ctx.alloc_from_pool = true;\n+\t\tvsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;\n+\n+\t\tcfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;\n+\t\tvsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);\n+\t\tcfg = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;\n+\t\tvsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);\n+\t\tvsi_ctx.info.sw_id = hw->port_info->sw_id;\n+\t\tret = ice_vsi_config_tc_queue_mapping(vsi,\n+\t\t\t\t\t\t      &vsi_ctx.info,\n+\t\t\t\t\t\t      ICE_DEFAULT_TCMAP);\n+\t\tif (ret) {\n+\t\t\tPMD_INIT_LOG(ERR,\n+\t\t\t\t     \"tc queue mapping with vsi failed, \"\n+\t\t\t\t     \"err = %d\",\n+\t\t\t\t     ret);\n+\t\t\tgoto fail_mem;\n+\t\t}\n \t\tbreak;\n \tdefault:\n \t\t/* for other types of VSI */\n@@ -1502,6 +1543,14 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)\n \t\t}\n \t\tvsi->msix_intr = ret;\n \t\tvsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);\n+\t} else if (type == ICE_VSI_CTRL) {\n+\t\tret = ice_res_pool_alloc(&pf->msix_pool, 1);\n+\t\tif (ret < 0) {\n+\t\t\tPMD_DRV_LOG(ERR, \"VSI %d get heap failed %d\",\n+\t\t\t\t    vsi->vsi_id, ret);\n+\t\t}\n+\t\tvsi->msix_intr = ret;\n+\t\tvsi->nb_msix = 1;\n \t} else {\n \t\tvsi->msix_intr = 0;\n \t\tvsi->nb_msix = 0;\n@@ -1517,20 +1566,22 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)\n \tpf->vsis_allocated = vsi_ctx.vsis_allocd;\n \tpf->vsis_unallocated = vsi_ctx.vsis_unallocated;\n \n-\t/* MAC configuration */\n-\trte_memcpy(pf->dev_addr.addr_bytes,\n-\t\t   hw->port_info->mac.perm_addr,\n-\t\t   ETH_ADDR_LEN);\n+\tif (type == ICE_VSI_PF) {\n+\t\t/* MAC configuration */\n+\t\trte_memcpy(pf->dev_addr.addr_bytes,\n+\t\t\t   hw->port_info->mac.perm_addr,\n+\t\t\t   ETH_ADDR_LEN);\n \n-\trte_memcpy(&mac_addr, &pf->dev_addr, RTE_ETHER_ADDR_LEN);\n-\tret = ice_add_mac_filter(vsi, &mac_addr);\n-\tif (ret != ICE_SUCCESS)\n-\t\tPMD_INIT_LOG(ERR, \"Failed to add dflt MAC filter\");\n+\t\trte_memcpy(&mac_addr, &pf->dev_addr, RTE_ETHER_ADDR_LEN);\n+\t\tret = ice_add_mac_filter(vsi, &mac_addr);\n+\t\tif (ret != ICE_SUCCESS)\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to add dflt MAC filter\");\n \n-\trte_memcpy(&mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);\n-\tret = ice_add_mac_filter(vsi, &mac_addr);\n-\tif (ret != ICE_SUCCESS)\n-\t\tPMD_INIT_LOG(ERR, \"Failed to add MAC filter\");\n+\t\trte_memcpy(&mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);\n+\t\tret = ice_add_mac_filter(vsi, &mac_addr);\n+\t\tif (ret != ICE_SUCCESS)\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to add MAC filter\");\n+\t}\n \n \t/* At the beginning, only TC0. */\n \t/* What we need here is the maximam number of the TX queues.\n@@ -1568,7 +1619,9 @@ ice_send_driver_ver(struct ice_hw *hw)\n static int\n ice_pf_setup(struct ice_pf *pf)\n {\n+\tstruct ice_hw *hw = ICE_PF_TO_HW(pf);\n \tstruct ice_vsi *vsi;\n+\tuint16_t unused;\n \n \t/* Clear all stats counters */\n \tpf->offset_loaded = FALSE;\n@@ -1577,6 +1630,13 @@ ice_pf_setup(struct ice_pf *pf)\n \tmemset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));\n \tmemset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));\n \n+\t/* force guaranteed filter pool for PF */\n+\tice_alloc_fd_guar_item(hw, &unused,\n+\t\t\t       hw->func_caps.fd_fltr_guar);\n+\t/* force shared filter pool for PF */\n+\tice_alloc_fd_shrd_item(hw, &unused,\n+\t\t\t       hw->func_caps.fd_fltr_best_effort);\n+\n \tvsi = ice_setup_vsi(pf, ICE_VSI_PF);\n \tif (!vsi) {\n \t\tPMD_INIT_LOG(ERR, \"Failed to add vsi for PF\");\n@@ -2031,7 +2091,7 @@ ice_dev_init(struct rte_eth_dev *dev)\n \treturn ret;\n }\n \n-static int\n+int\n ice_release_vsi(struct ice_vsi *vsi)\n {\n \tstruct ice_hw *hw;\n@@ -2113,6 +2173,9 @@ ice_dev_stop(struct rte_eth_dev *dev)\n \t/* disable all queue interrupts */\n \tice_vsi_disable_queues_intr(main_vsi);\n \n+\tif (pf->fdir.fdir_vsi)\n+\t\tice_vsi_disable_queues_intr(pf->fdir.fdir_vsi);\n+\n \t/* Clear all queues and release mbufs */\n \tice_clear_queues(dev);\n \n@@ -2452,6 +2515,12 @@ ice_rxq_intr_setup(struct rte_eth_dev *dev)\n \t/* Enable interrupts for all the queues */\n \tice_vsi_enable_queues_intr(vsi);\n \n+\t/* Enable FDIR MSIX interrupt */\n+\tif (pf->fdir.fdir_vsi) {\n+\t\tice_vsi_queues_bind_intr(pf->fdir.fdir_vsi);\n+\t\tice_vsi_enable_queues_intr(pf->fdir.fdir_vsi);\n+\t}\n+\n \trte_intr_enable(intr_handle);\n \n \treturn 0;\ndiff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h\nindex 99eb2e2fc..222fc081f 100644\n--- a/drivers/net/ice/ice_ethdev.h\n+++ b/drivers/net/ice/ice_ethdev.h\n@@ -247,6 +247,17 @@ TAILQ_HEAD(ice_flow_list, rte_flow);\n struct ice_flow_parser_node;\n TAILQ_HEAD(ice_parser_list, ice_flow_parser_node);\n \n+/**\n+ *  A structure used to define fields of a FDIR related info.\n+ */\n+struct ice_fdir_info {\n+\tstruct ice_vsi *fdir_vsi;     /* pointer to fdir VSI structure */\n+\tstruct ice_tx_queue *txq;\n+\tstruct ice_rx_queue *rxq;\n+\tvoid *prg_pkt;                 /* memory for fdir program packet */\n+\tuint64_t dma_addr;             /* physic address of packet memory*/\n+};\n+\n struct ice_pf {\n \tstruct ice_adapter *adapter; /* The adapter this PF associate to */\n \tstruct ice_vsi *main_vsi; /* pointer to main VSI structure */\n@@ -267,6 +278,9 @@ struct ice_pf {\n \tuint16_t lan_nb_qps; /* The number of queue pairs of LAN */\n \tuint16_t base_queue; /* The base queue pairs index  in the device */\n \tuint8_t *proto_xtr; /* Protocol extraction type for all queues */\n+\tuint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */\n+\tuint16_t fdir_qp_offset;\n+\tstruct ice_fdir_info fdir; /* flow director info */\n \tstruct ice_hw_port_stats stats_offset;\n \tstruct ice_hw_port_stats stats;\n \t/* internal packet statistics, it should be excluded from the total */\n@@ -353,6 +367,11 @@ struct ice_vsi_vlan_pvid_info {\n #define ICE_PF_TO_ETH_DEV(pf) \\\n \t(((struct ice_pf *)pf)->adapter->eth_dev)\n \n+struct ice_vsi *\n+ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type);\n+int\n+ice_release_vsi(struct ice_vsi *vsi);\n+\n static inline int\n ice_align_floor(int n)\n {\ndiff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c\nnew file mode 100644\nindex 000000000..0fb3054f5\n--- /dev/null\n+++ b/drivers/net/ice/ice_fdir_filter.c\n@@ -0,0 +1,225 @@\n+#include <stdio.h>\n+#include <rte_flow.h>\n+#include \"base/ice_fdir.h\"\n+#include \"base/ice_flow.h\"\n+#include \"base/ice_type.h\"\n+#include \"ice_ethdev.h\"\n+#include \"ice_rxtx.h\"\n+#include \"ice_generic_flow.h\"\n+\n+static const struct rte_memzone *\n+ice_memzone_reserve(const char *name, uint32_t len, int socket_id)\n+{\n+\treturn rte_memzone_reserve_aligned(name, len, socket_id,\n+\t\t\t\t\t   RTE_MEMZONE_IOVA_CONTIG,\n+\t\t\t\t\t   ICE_RING_BASE_ALIGN);\n+}\n+\n+#define ICE_FDIR_MZ_NAME\t\"FDIR_MEMZONE\"\n+\n+static int\n+ice_fdir_prof_alloc(struct ice_hw *hw)\n+{\n+\tenum ice_fltr_ptype ptype, fltr_ptype;\n+\n+\tif (!hw->fdir_prof) {\n+\t\thw->fdir_prof = (struct ice_fd_hw_prof **)\n+\t\t\tice_malloc(hw, ICE_FLTR_PTYPE_MAX *\n+\t\t\t\t   sizeof(*hw->fdir_prof));\n+\t\tif (!hw->fdir_prof)\n+\t\t\treturn -ENOMEM;\n+\t}\n+\tfor (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;\n+\t     ptype < ICE_FLTR_PTYPE_MAX;\n+\t     ptype++) {\n+\t\tif (!hw->fdir_prof[ptype]) {\n+\t\t\thw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)\n+\t\t\t\tice_malloc(hw, sizeof(**hw->fdir_prof));\n+\t\t\tif (!hw->fdir_prof[ptype])\n+\t\t\t\tgoto fail_mem;\n+\t\t}\n+\t}\n+\treturn 0;\n+\n+fail_mem:\n+\tfor (fltr_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;\n+\t     fltr_ptype < ptype;\n+\t     fltr_ptype++)\n+\t\trte_free(hw->fdir_prof[fltr_ptype]);\n+\trte_free(hw->fdir_prof);\n+\treturn -ENOMEM;\n+}\n+\n+/*\n+ * ice_fdir_setup - reserve and initialize the Flow Director resources\n+ * @pf: board private structure\n+ */\n+static int\n+ice_fdir_setup(struct ice_pf *pf)\n+{\n+\tstruct rte_eth_dev *eth_dev = pf->adapter->eth_dev;\n+\tstruct ice_hw *hw = ICE_PF_TO_HW(pf);\n+\tconst struct rte_memzone *mz = NULL;\n+\tchar z_name[RTE_MEMZONE_NAMESIZE];\n+\tstruct ice_vsi *vsi;\n+\tint err = ICE_SUCCESS;\n+\n+\tif ((pf->flags & ICE_FLAG_FDIR) == 0) {\n+\t\tPMD_INIT_LOG(ERR, \"HW doesn't support FDIR\");\n+\t\treturn -ENOTSUP;\n+\t}\n+\n+\tPMD_DRV_LOG(INFO, \"FDIR HW Capabilities: fd_fltr_guar = %u,\"\n+\t\t    \" fd_fltr_best_effort = %u.\",\n+\t\t    hw->func_caps.fd_fltr_guar,\n+\t\t    hw->func_caps.fd_fltr_best_effort);\n+\n+\tif (pf->fdir.fdir_vsi) {\n+\t\tPMD_DRV_LOG(INFO, \"FDIR initialization has been done.\");\n+\t\treturn ICE_SUCCESS;\n+\t}\n+\n+\t/* make new FDIR VSI */\n+\tvsi = ice_setup_vsi(pf, ICE_VSI_CTRL);\n+\tif (!vsi) {\n+\t\tPMD_DRV_LOG(ERR, \"Couldn't create FDIR VSI.\");\n+\t\treturn -EINVAL;\n+\t}\n+\tpf->fdir.fdir_vsi = vsi;\n+\n+\t/*Fdir tx queue setup*/\n+\terr = ice_fdir_setup_tx_resources(pf);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to setup FDIR TX resources.\");\n+\t\tgoto fail_setup_tx;\n+\t}\n+\n+\t/*Fdir rx queue setup*/\n+\terr = ice_fdir_setup_rx_resources(pf);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to setup FDIR RX resources.\");\n+\t\tgoto fail_setup_rx;\n+\t}\n+\n+\terr = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to start FDIR TX queue.\");\n+\t\tgoto fail_mem;\n+\t}\n+\n+\terr = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to start FDIR RX queue.\");\n+\t\tgoto fail_mem;\n+\t}\n+\n+\t/* reserve memory for the fdir programming packet */\n+\tsnprintf(z_name, sizeof(z_name), \"ICE_%s_%d\",\n+\t\t ICE_FDIR_MZ_NAME,\n+\t\t eth_dev->data->port_id);\n+\tmz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);\n+\tif (!mz) {\n+\t\tPMD_DRV_LOG(ERR, \"Cannot init memzone for \"\n+\t\t\t    \"flow director program packet.\");\n+\t\terr = -ENOMEM;\n+\t\tgoto fail_mem;\n+\t}\n+\tpf->fdir.prg_pkt = mz->addr;\n+\tpf->fdir.dma_addr = mz->iova;\n+\n+\terr = ice_fdir_prof_alloc(hw);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Cannot allocate memory for \"\n+\t\t\t    \"flow director profile.\");\n+\t\terr = -ENOMEM;\n+\t\tgoto fail_mem;\n+\t}\n+\n+\tPMD_DRV_LOG(INFO, \"FDIR setup successfully, with programming queue %u.\",\n+\t\t    vsi->base_queue);\n+\treturn ICE_SUCCESS;\n+\n+fail_mem:\n+\tice_rx_queue_release(pf->fdir.rxq);\n+\tpf->fdir.rxq = NULL;\n+fail_setup_rx:\n+\tice_tx_queue_release(pf->fdir.txq);\n+\tpf->fdir.txq = NULL;\n+fail_setup_tx:\n+\tice_release_vsi(vsi);\n+\tpf->fdir.fdir_vsi = NULL;\n+\treturn err;\n+}\n+\n+static void\n+ice_fdir_prof_free(struct ice_hw *hw)\n+{\n+\tenum ice_fltr_ptype ptype;\n+\n+\tfor (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;\n+\t     ptype < ICE_FLTR_PTYPE_MAX;\n+\t     ptype++)\n+\t\trte_free(hw->fdir_prof[ptype]);\n+\n+\trte_free(hw->fdir_prof);\n+}\n+\n+/*\n+ * ice_fdir_teardown - release the Flow Director resources\n+ * @pf: board private structure\n+ */\n+static void\n+ice_fdir_teardown(struct ice_pf *pf)\n+{\n+\tstruct rte_eth_dev *eth_dev = pf->adapter->eth_dev;\n+\tstruct ice_hw *hw = ICE_PF_TO_HW(pf);\n+\tstruct ice_vsi *vsi;\n+\tint err;\n+\n+\tvsi = pf->fdir.fdir_vsi;\n+\tif (!vsi)\n+\t\treturn;\n+\n+\terr = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);\n+\tif (err)\n+\t\tPMD_DRV_LOG(ERR, \"Failed to stop TX queue.\");\n+\n+\terr = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);\n+\tif (err)\n+\t\tPMD_DRV_LOG(ERR, \"Failed to stop RX queue.\");\n+\n+\tice_tx_queue_release(pf->fdir.txq);\n+\tpf->fdir.txq = NULL;\n+\tice_rx_queue_release(pf->fdir.rxq);\n+\tpf->fdir.rxq = NULL;\n+\tice_release_vsi(vsi);\n+\tpf->fdir.fdir_vsi = NULL;\n+\tice_fdir_prof_free(hw);\n+}\n+\n+static int\n+ice_fdir_init(struct ice_adapter *ad)\n+{\n+\tstruct ice_pf *pf = &ad->pf;\n+\n+\treturn ice_fdir_setup(pf);\n+}\n+\n+static void\n+ice_fdir_uninit(struct ice_adapter *ad)\n+{\n+\tstruct ice_pf *pf = &ad->pf;\n+\n+\tice_fdir_teardown(pf);\n+}\n+\n+static struct ice_flow_engine ice_fdir_engine = {\n+\t.init = ice_fdir_init,\n+\t.uninit = ice_fdir_uninit,\n+\t.type = ICE_FLOW_ENGINE_FDIR,\n+};\n+\n+RTE_INIT(ice_fdir_engine_register)\n+{\n+\tice_register_flow_engine(&ice_fdir_engine);\n+}\ndiff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c\nindex c07aa4b81..d4c801fc8 100644\n--- a/drivers/net/ice/ice_rxtx.c\n+++ b/drivers/net/ice/ice_rxtx.c\n@@ -511,6 +511,179 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n \treturn 0;\n }\n \n+static enum ice_status\n+ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)\n+{\n+\tstruct ice_vsi *vsi = rxq->vsi;\n+\tstruct ice_hw *hw = ICE_VSI_TO_HW(vsi);\n+\tuint32_t rxdid = ICE_RXDID_COMMS_GENERIC;\n+\tstruct ice_rlan_ctx rx_ctx;\n+\tenum ice_status err;\n+\tuint32_t regval;\n+\n+\trxq->rx_hdr_len = 0;\n+\trxq->rx_buf_len = 1024;\n+\n+\tmemset(&rx_ctx, 0, sizeof(rx_ctx));\n+\n+\trx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;\n+\trx_ctx.qlen = rxq->nb_rx_desc;\n+\trx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;\n+\trx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;\n+\trx_ctx.dtype = 0; /* No Header Split mode */\n+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC\n+\trx_ctx.dsize = 1; /* 32B descriptors */\n+#endif\n+\trx_ctx.rxmax = RTE_ETHER_MAX_LEN;\n+\t/* TPH: Transaction Layer Packet (TLP) processing hints */\n+\trx_ctx.tphrdesc_ena = 1;\n+\trx_ctx.tphwdesc_ena = 1;\n+\trx_ctx.tphdata_ena = 1;\n+\trx_ctx.tphhead_ena = 1;\n+\t/* Low Receive Queue Threshold defined in 64 descriptors units.\n+\t * When the number of free descriptors goes below the lrxqthresh,\n+\t * an immediate interrupt is triggered.\n+\t */\n+\trx_ctx.lrxqthresh = 2;\n+\t/*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/\n+\trx_ctx.l2tsel = 1;\n+\trx_ctx.showiv = 0;\n+\trx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;\n+\n+\t/* Enable Flexible Descriptors in the queue context which\n+\t * allows this driver to select a specific receive descriptor format\n+\t */\n+\tregval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &\n+\t\tQRXFLXP_CNTXT_RXDID_IDX_M;\n+\n+\t/* increasing context priority to pick up profile ID;\n+\t * default is 0x01; setting to 0x03 to ensure profile\n+\t * is programming if prev context is of same priority\n+\t */\n+\tregval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &\n+\t\tQRXFLXP_CNTXT_RXDID_PRIO_M;\n+\n+\tICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);\n+\n+\terr = ice_clear_rxq_ctx(hw, rxq->reg_idx);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to clear Lan Rx queue (%u) context\",\n+\t\t\t    rxq->queue_id);\n+\t\treturn -EINVAL;\n+\t}\n+\terr = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to write Lan Rx queue (%u) context\",\n+\t\t\t    rxq->queue_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\trxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);\n+\n+\t/* Init the Rx tail register*/\n+\tICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);\n+\n+\treturn 0;\n+}\n+\n+int\n+ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n+{\n+\tstruct ice_rx_queue *rxq;\n+\tint err;\n+\tstruct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\trxq = pf->fdir.rxq;\n+\tif (!rxq || !rxq->q_set) {\n+\t\tPMD_DRV_LOG(ERR, \"FDIR RX queue %u not available or setup\",\n+\t\t\t    rx_queue_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\terr = ice_fdir_program_hw_rx_queue(rxq);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"fail to program FDIR RX queue %u\",\n+\t\t\t    rx_queue_id);\n+\t\treturn -EIO;\n+\t}\n+\n+\t/* Init the RX tail register. */\n+\tICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);\n+\n+\terr = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to switch FDIR RX queue %u on\",\n+\t\t\t    rx_queue_id);\n+\n+\t\tice_reset_rx_queue(rxq);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int\n+ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n+{\n+\tstruct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\tstruct ice_tx_queue *txq;\n+\tint err;\n+\tstruct ice_vsi *vsi;\n+\tstruct ice_hw *hw;\n+\tstruct ice_aqc_add_tx_qgrp txq_elem;\n+\tstruct ice_tlan_ctx tx_ctx;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\ttxq = pf->fdir.txq;\n+\tif (!txq || !txq->q_set) {\n+\t\tPMD_DRV_LOG(ERR, \"FDIR TX queue %u is not available or setup\",\n+\t\t\t    tx_queue_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tvsi = txq->vsi;\n+\thw = ICE_VSI_TO_HW(vsi);\n+\n+\tmemset(&txq_elem, 0, sizeof(txq_elem));\n+\tmemset(&tx_ctx, 0, sizeof(tx_ctx));\n+\ttxq_elem.num_txqs = 1;\n+\ttxq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);\n+\n+\ttx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;\n+\ttx_ctx.qlen = txq->nb_tx_desc;\n+\ttx_ctx.pf_num = hw->pf_id;\n+\ttx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;\n+\ttx_ctx.src_vsi = vsi->vsi_id;\n+\ttx_ctx.port_num = hw->port_info->lport;\n+\ttx_ctx.tso_ena = 1; /* tso enable */\n+\ttx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */\n+\ttx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */\n+\n+\tice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,\n+\t\t    ice_tlan_ctx_info);\n+\n+\ttxq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);\n+\n+\t/* Init the Tx tail register*/\n+\tICE_PCI_REG_WRITE(txq->qtx_tail, 0);\n+\n+\t/* Fix me, we assume TC always 0 here */\n+\terr = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,\n+\t\t\t      &txq_elem, sizeof(txq_elem), NULL);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to add FDIR txq\");\n+\t\treturn -EIO;\n+\t}\n+\t/* store the schedule node id */\n+\ttxq->q_teid = txq_elem.txqs[0].q_teid;\n+\n+\treturn 0;\n+}\n+\n /* Free all mbufs for descriptors in tx queue */\n static void\n _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)\n@@ -616,6 +789,63 @@ ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n \treturn 0;\n }\n \n+int\n+ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n+{\n+\tstruct ice_rx_queue *rxq;\n+\tint err;\n+\tstruct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\n+\trxq = pf->fdir.rxq;\n+\n+\terr = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to switch FDIR RX queue %u off\",\n+\t\t\t    rx_queue_id);\n+\t\treturn -EINVAL;\n+\t}\n+\tice_rx_queue_release_mbufs(rxq);\n+\n+\treturn 0;\n+}\n+\n+int\n+ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n+{\n+\tstruct ice_tx_queue *txq;\n+\tstruct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\tstruct ice_vsi *vsi = pf->main_vsi;\n+\tenum ice_status status;\n+\tuint16_t q_ids[1];\n+\tuint32_t q_teids[1];\n+\tuint16_t q_handle = tx_queue_id;\n+\n+\ttxq = pf->fdir.txq;\n+\tif (!txq) {\n+\t\tPMD_DRV_LOG(ERR, \"TX queue %u is not available\",\n+\t\t\t    tx_queue_id);\n+\t\treturn -EINVAL;\n+\t}\n+\tvsi = txq->vsi;\n+\n+\tq_ids[0] = txq->reg_idx;\n+\tq_teids[0] = txq->q_teid;\n+\n+\t/* Fix me, we assume TC always 0 here */\n+\tstatus = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,\n+\t\t\t\t q_ids, q_teids, ICE_NO_RESET, 0, NULL);\n+\tif (status != ICE_SUCCESS) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Failed to disable Lan Tx queue\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tice_tx_queue_release_mbufs(txq);\n+\n+\treturn 0;\n+}\n+\n int\n ice_rx_queue_setup(struct rte_eth_dev *dev,\n \t\t   uint16_t queue_idx,\n@@ -1131,6 +1361,11 @@ ice_rxd_to_pkt_fields(struct rte_mbuf *mb,\n \t\txtr->type = ice_rxdid_to_proto_xtr_type(desc->rxdid);\n \t\txtr->magic = PROTO_XTR_MAGIC_ID;\n \t}\n+\n+\tif (desc->flow_id != 0xFFFFFFFF) {\n+\t\tmb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;\n+\t\tmb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);\n+\t}\n #endif\n }\n \n@@ -1684,6 +1919,128 @@ ice_free_queues(struct rte_eth_dev *dev)\n \tdev->data->nb_tx_queues = 0;\n }\n \n+#define ICE_FDIR_NUM_TX_DESC  ICE_MIN_RING_DESC\n+#define ICE_FDIR_NUM_RX_DESC  ICE_MIN_RING_DESC\n+\n+int\n+ice_fdir_setup_tx_resources(struct ice_pf *pf)\n+{\n+\tstruct ice_tx_queue *txq;\n+\tconst struct rte_memzone *tz = NULL;\n+\tuint32_t ring_size;\n+\tstruct rte_eth_dev *dev;\n+\n+\tif (!pf) {\n+\t\tPMD_DRV_LOG(ERR, \"PF is not available\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tdev = pf->adapter->eth_dev;\n+\n+\t/* Allocate the TX queue data structure. */\n+\ttxq = rte_zmalloc_socket(\"ice fdir tx queue\",\n+\t\t\t\t sizeof(struct ice_tx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE,\n+\t\t\t\t SOCKET_ID_ANY);\n+\tif (!txq) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate memory for \"\n+\t\t\t    \"tx queue structure.\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Allocate TX hardware ring descriptors. */\n+\tring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;\n+\tring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);\n+\n+\ttz = rte_eth_dma_zone_reserve(dev, \"fdir_tx_ring\",\n+\t\t\t\t      ICE_FDIR_QUEUE_ID, ring_size,\n+\t\t\t\t      ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);\n+\tif (!tz) {\n+\t\tice_tx_queue_release(txq);\n+\t\tPMD_DRV_LOG(ERR, \"Failed to reserve DMA memory for TX.\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\ttxq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;\n+\ttxq->queue_id = ICE_FDIR_QUEUE_ID;\n+\ttxq->reg_idx = pf->fdir.fdir_vsi->base_queue;\n+\ttxq->vsi = pf->fdir.fdir_vsi;\n+\n+\ttxq->tx_ring_dma = tz->iova;\n+\ttxq->tx_ring = (struct ice_tx_desc *)tz->addr;\n+\t/*\n+\t * don't need to allocate software ring and reset for the fdir\n+\t * program queue just set the queue has been configured.\n+\t */\n+\ttxq->q_set = TRUE;\n+\tpf->fdir.txq = txq;\n+\n+\ttxq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;\n+\n+\treturn ICE_SUCCESS;\n+}\n+\n+int\n+ice_fdir_setup_rx_resources(struct ice_pf *pf)\n+{\n+\tstruct ice_rx_queue *rxq;\n+\tconst struct rte_memzone *rz = NULL;\n+\tuint32_t ring_size;\n+\tstruct rte_eth_dev *dev;\n+\n+\tif (!pf) {\n+\t\tPMD_DRV_LOG(ERR, \"PF is not available\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tdev = pf->adapter->eth_dev;\n+\n+\t/* Allocate the RX queue data structure. */\n+\trxq = rte_zmalloc_socket(\"ice fdir rx queue\",\n+\t\t\t\t sizeof(struct ice_rx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE,\n+\t\t\t\t SOCKET_ID_ANY);\n+\tif (!rxq) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate memory for \"\n+\t\t\t    \"rx queue structure.\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Allocate RX hardware ring descriptors. */\n+\tring_size = sizeof(union ice_rx_flex_desc) * ICE_FDIR_NUM_RX_DESC;\n+\tring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);\n+\n+\trz = rte_eth_dma_zone_reserve(dev, \"fdir_rx_ring\",\n+\t\t\t\t      ICE_FDIR_QUEUE_ID, ring_size,\n+\t\t\t\t      ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);\n+\tif (!rz) {\n+\t\tice_rx_queue_release(rxq);\n+\t\tPMD_DRV_LOG(ERR, \"Failed to reserve DMA memory for RX.\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\trxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;\n+\trxq->queue_id = ICE_FDIR_QUEUE_ID;\n+\trxq->reg_idx = pf->fdir.fdir_vsi->base_queue;\n+\trxq->vsi = pf->fdir.fdir_vsi;\n+\n+\trxq->rx_ring_dma = rz->iova;\n+\tmemset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *\n+\t       sizeof(union ice_rx_flex_desc));\n+\trxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;\n+\n+\t/*\n+\t * Don't need to allocate software ring and reset for the fdir\n+\t * rx queue, just set the queue has been configured.\n+\t */\n+\trxq->q_set = TRUE;\n+\tpf->fdir.rxq = rxq;\n+\n+\trxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;\n+\n+\treturn ICE_SUCCESS;\n+}\n+\n uint16_t\n ice_recv_pkts(void *rx_queue,\n \t      struct rte_mbuf **rx_pkts,\n@@ -3180,3 +3537,49 @@ ice_set_default_ptype_table(struct rte_eth_dev *dev)\n \tfor (i = 0; i < ICE_MAX_PKT_TYPE; i++)\n \t\tad->ptype_tbl[i] = ice_get_default_pkt_type(i);\n }\n+\n+#define ICE_FDIR_MAX_WAIT_US 10000\n+\n+int\n+ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)\n+{\n+\tstruct ice_tx_queue *txq = pf->fdir.txq;\n+\tvolatile struct ice_fltr_desc *fdirdp;\n+\tvolatile struct ice_tx_desc *txdp;\n+\tuint32_t td_cmd;\n+\tuint16_t i;\n+\n+\tfdirdp = (volatile struct ice_fltr_desc *)\n+\t\t(&txq->tx_ring[txq->tx_tail]);\n+\tfdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;\n+\tfdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;\n+\n+\ttxdp = &txq->tx_ring[txq->tx_tail + 1];\n+\ttxdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);\n+\ttd_cmd = ICE_TX_DESC_CMD_EOP |\n+\t\tICE_TX_DESC_CMD_RS  |\n+\t\tICE_TX_DESC_CMD_DUMMY;\n+\n+\ttxdp->cmd_type_offset_bsz =\n+\t\tice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);\n+\n+\ttxq->tx_tail += 2;\n+\tif (txq->tx_tail >= txq->nb_tx_desc)\n+\t\ttxq->tx_tail = 0;\n+\t/* Update the tx tail register */\n+\tICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);\n+\tfor (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {\n+\t\tif ((txdp->cmd_type_offset_bsz &\n+\t\t     rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==\n+\t\t    rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))\n+\t\t\tbreak;\n+\t\trte_delay_us(1);\n+\t}\n+\tif (i >= ICE_FDIR_MAX_WAIT_US) {\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"Failed to program FDIR filter: time out to get DD on tx queue.\");\n+\t\treturn -ETIMEDOUT;\n+\t}\n+\n+\treturn 0;\n+}\ndiff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h\nindex 31c53d535..5de618976 100644\n--- a/drivers/net/ice/ice_rxtx.h\n+++ b/drivers/net/ice/ice_rxtx.h\n@@ -37,6 +37,8 @@\n #define ICE_TX_MAX_FREE_BUF_SZ      64\n #define ICE_DESCS_PER_LOOP          4\n \n+#define ICE_FDIR_PKT_LEN\t512\n+\n typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq);\n typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq);\n \n@@ -149,10 +151,16 @@ int ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n int ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n int ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n int ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n+int ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n+int ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n+int ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n+int ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n void ice_rx_queue_release(void *rxq);\n void ice_tx_queue_release(void *txq);\n void ice_clear_queues(struct rte_eth_dev *dev);\n void ice_free_queues(struct rte_eth_dev *dev);\n+int ice_fdir_setup_tx_resources(struct ice_pf *pf);\n+int ice_fdir_setup_rx_resources(struct ice_pf *pf);\n uint16_t ice_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\t       uint16_t nb_pkts);\n uint16_t ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,\n@@ -194,4 +202,5 @@ uint16_t ice_recv_scattered_pkts_vec_avx2(void *rx_queue,\n \t\t\t\t\t  uint16_t nb_pkts);\n uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,\n \t\t\t\tuint16_t nb_pkts);\n+int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc);\n #endif /* _ICE_RXTX_H_ */\ndiff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build\nindex 6828170a9..908a2cab7 100644\n--- a/drivers/net/ice/meson.build\n+++ b/drivers/net/ice/meson.build\n@@ -10,7 +10,8 @@ sources = files(\n \t'ice_ethdev.c',\n \t'ice_rxtx.c',\n \t'ice_switch_filter.c',\n-\t'ice_generic_flow.c'\n+\t'ice_generic_flow.c',\n+\t'ice_fdir_filter.c'\n \t)\n \n deps += ['hash']\n",
    "prefixes": [
        "v7",
        "1/9"
    ]
}