get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/55234/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 55234,
    "url": "http://patches.dpdk.org/api/patches/55234/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20190624061508.58235-2-qiming.yang@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20190624061508.58235-2-qiming.yang@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20190624061508.58235-2-qiming.yang@intel.com",
    "date": "2019-06-24T06:15:06",
    "name": "[v6,1/3] net/ice: enable switch filter",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "1c23f3155236df41bd4cf56e87b953b511fa587d",
    "submitter": {
        "id": 522,
        "url": "http://patches.dpdk.org/api/people/522/?format=api",
        "name": "Yang, Qiming",
        "email": "qiming.yang@intel.com"
    },
    "delegate": {
        "id": 1540,
        "url": "http://patches.dpdk.org/api/users/1540/?format=api",
        "username": "qzhan15",
        "first_name": "Qi",
        "last_name": "Zhang",
        "email": "qi.z.zhang@intel.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20190624061508.58235-2-qiming.yang@intel.com/mbox/",
    "series": [
        {
            "id": 5128,
            "url": "http://patches.dpdk.org/api/series/5128/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=5128",
            "date": "2019-06-24T06:15:05",
            "name": "Enable rte_flow API in ice driver",
            "version": 6,
            "mbox": "http://patches.dpdk.org/series/5128/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/55234/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/55234/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "X-BeenThere": "dev@dpdk.org",
        "Return-Path": "<dev-bounces@dpdk.org>",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>",
        "From": "Qiming Yang <qiming.yang@intel.com>",
        "To": "dev@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "X-Amp-File-Uploaded": "False",
        "References": "<1559552722-8970-1-git-send-email-qiming.yang@intel.com>\n\t<20190624061508.58235-1-qiming.yang@intel.com>",
        "In-Reply-To": "<20190624061508.58235-1-qiming.yang@intel.com>",
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "X-Mailer": "git-send-email 2.9.5",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 5CE001BF5A;\n\tMon, 24 Jun 2019 08:17:10 +0200 (CEST)",
            "from mga04.intel.com (mga04.intel.com [192.55.52.120])\n\tby dpdk.org (Postfix) with ESMTP id 462121BF47\n\tfor <dev@dpdk.org>; Mon, 24 Jun 2019 08:17:01 +0200 (CEST)",
            "from fmsmga004.fm.intel.com ([10.253.24.48])\n\tby fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t23 Jun 2019 23:17:00 -0700",
            "from map1.sh.intel.com ([10.67.111.124])\n\tby fmsmga004.fm.intel.com with ESMTP; 23 Jun 2019 23:16:59 -0700"
        ],
        "X-Original-To": "patchwork@dpdk.org",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "X-Mailman-Version": "2.1.15",
        "X-ExtLoop1": "1",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "Errors-To": "dev-bounces@dpdk.org",
        "X-IronPort-AV": "E=Sophos;i=\"5.63,411,1557212400\"; d=\"scan'208\";a=\"184032126\"",
        "Subject": "[dpdk-dev] [PATCH v6 1/3] net/ice: enable switch filter",
        "Date": "Mon, 24 Jun 2019 14:15:06 +0800",
        "Message-Id": "<20190624061508.58235-2-qiming.yang@intel.com>",
        "Cc": "wei zhao <wei.zhao1@intel.com>",
        "Precedence": "list"
    },
    "content": "From: wei zhao <wei.zhao1@intel.com>\n\nThe patch enables the backend of rte_flow. It transfers\nrte_flow_xxx to device specific data structure and\nconfigures packet process engine's binary classifier\n(switch) properly.\n\nSigned-off-by: Wei Zhao <wei.zhao1@intel.com>\n---\n drivers/net/ice/Makefile            |   1 +\n drivers/net/ice/ice_ethdev.c        |  18 ++\n drivers/net/ice/ice_ethdev.h        |   7 +\n drivers/net/ice/ice_switch_filter.c | 521 ++++++++++++++++++++++++++++++++++++\n drivers/net/ice/ice_switch_filter.h |  24 ++\n drivers/net/ice/meson.build         |   3 +-\n 6 files changed, 573 insertions(+), 1 deletion(-)\n create mode 100644 drivers/net/ice/ice_switch_filter.c\n create mode 100644 drivers/net/ice/ice_switch_filter.h",
    "diff": "diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile\nindex 0e5c55e..b10d826 100644\n--- a/drivers/net/ice/Makefile\n+++ b/drivers/net/ice/Makefile\n@@ -60,6 +60,7 @@ ifeq ($(CONFIG_RTE_ARCH_X86), y)\n SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c\n endif\n \n+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c\n ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)\n \tCC_AVX2_SUPPORT=1\n else\ndiff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c\nindex 203d0a9..a94aa7e 100644\n--- a/drivers/net/ice/ice_ethdev.c\n+++ b/drivers/net/ice/ice_ethdev.c\n@@ -1364,6 +1364,21 @@ static int ice_load_pkg(struct rte_eth_dev *dev)\n \treturn err;\n }\n \n+static void\n+ice_base_queue_get(struct ice_pf *pf)\n+{\n+\tuint32_t reg;\n+\tstruct ice_hw *hw = ICE_PF_TO_HW(pf);\n+\n+\treg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);\n+\tif (reg & PFLAN_RX_QALLOC_VALID_M) {\n+\t\tpf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;\n+\t} else {\n+\t\tPMD_INIT_LOG(WARNING, \"Failed to get Rx base queue\"\n+\t\t\t\t\t\" index\");\n+\t}\n+}\n+\n static int\n ice_dev_init(struct rte_eth_dev *dev)\n {\n@@ -1460,6 +1475,9 @@ ice_dev_init(struct rte_eth_dev *dev)\n \t/* enable uio intr after callback register */\n \trte_intr_enable(intr_handle);\n \n+\t/* get base queue pairs index  in the device */\n+\tice_base_queue_get(pf);\n+\n \treturn 0;\n \n err_pf_setup:\ndiff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h\nindex 1385afa..50b966c 100644\n--- a/drivers/net/ice/ice_ethdev.h\n+++ b/drivers/net/ice/ice_ethdev.h\n@@ -234,6 +234,12 @@ struct ice_vsi {\n \tbool offset_loaded;\n };\n \n+/* Struct to store flow created. */\n+struct rte_flow {\n+\tTAILQ_ENTRY(rte_flow) node;\n+\tvoid *rule;\n+};\n+\n struct ice_pf {\n \tstruct ice_adapter *adapter; /* The adapter this PF associate to */\n \tstruct ice_vsi *main_vsi; /* pointer to main VSI structure */\n@@ -252,6 +258,7 @@ struct ice_pf {\n \tuint16_t hash_lut_size; /* The size of hash lookup table */\n \tuint16_t lan_nb_qp_max;\n \tuint16_t lan_nb_qps; /* The number of queue pairs of LAN */\n+\tuint16_t base_queue; /* The base queue pairs index  in the device */\n \tstruct ice_hw_port_stats stats_offset;\n \tstruct ice_hw_port_stats stats;\n \t/* internal packet statistics, it should be excluded from the total */\ndiff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c\nnew file mode 100644\nindex 0000000..adfc154\n--- /dev/null\n+++ b/drivers/net/ice/ice_switch_filter.c\n@@ -0,0 +1,521 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2019 Intel Corporation\n+ */\n+\n+#include <sys/queue.h>\n+#include <stdio.h>\n+#include <errno.h>\n+#include <stdint.h>\n+#include <string.h>\n+#include <unistd.h>\n+#include <stdarg.h>\n+\n+#include <rte_debug.h>\n+#include <rte_ether.h>\n+#include <rte_ethdev_driver.h>\n+#include <rte_log.h>\n+#include <rte_malloc.h>\n+#include <rte_eth_ctrl.h>\n+#include <rte_tailq.h>\n+#include <rte_flow_driver.h>\n+\n+#include \"ice_logs.h\"\n+#include \"base/ice_type.h\"\n+#include \"ice_switch_filter.h\"\n+\n+static int\n+ice_parse_switch_filter(const struct rte_flow_item pattern[],\n+\t\t\tconst struct rte_flow_action actions[],\n+\t\t\tstruct rte_flow_error *error,\n+\t\t\tstruct ice_adv_rule_info *rule_info,\n+\t\t\tstruct ice_adv_lkup_elem **lkup_list,\n+\t\t\tuint16_t *lkups_num)\n+{\n+\tconst struct rte_flow_item *item = pattern;\n+\tenum rte_flow_item_type item_type;\n+\tconst struct rte_flow_item_eth *eth_spec, *eth_mask;\n+\tconst struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;\n+\tconst struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;\n+\tconst struct rte_flow_item_tcp *tcp_spec, *tcp_mask;\n+\tconst struct rte_flow_item_udp *udp_spec, *udp_mask;\n+\tconst struct rte_flow_item_sctp *sctp_spec, *sctp_mask;\n+\tconst struct rte_flow_item_nvgre  *nvgre_spec, *nvgre_mask;\n+\tconst struct rte_flow_item_vxlan  *vxlan_spec, *vxlan_mask;\n+\tstruct ice_adv_lkup_elem *list;\n+\tuint16_t j, t = 0;\n+\tuint16_t item_num = 0;\n+\tenum ice_sw_tunnel_type tun_type = ICE_NON_TUN;\n+\tuint16_t tunnel_valid = 0;\n+\n+\tfor (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {\n+\t\tif (item->type == RTE_FLOW_ITEM_TYPE_ETH ||\n+\t\t\titem->type == RTE_FLOW_ITEM_TYPE_IPV4 ||\n+\t\t\titem->type == RTE_FLOW_ITEM_TYPE_IPV6 ||\n+\t\t\titem->type == RTE_FLOW_ITEM_TYPE_UDP ||\n+\t\t\titem->type == RTE_FLOW_ITEM_TYPE_TCP ||\n+\t\t\titem->type == RTE_FLOW_ITEM_TYPE_SCTP ||\n+\t\t\titem->type == RTE_FLOW_ITEM_TYPE_VXLAN ||\n+\t\t\titem->type == RTE_FLOW_ITEM_TYPE_NVGRE)\n+\t\t\titem_num++;\n+\t\tif (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)\n+\t\t\ttun_type = ICE_SW_TUN_VXLAN;\n+\t\tif (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)\n+\t\t\ttun_type = ICE_SW_TUN_NVGRE;\n+\t}\n+\n+\tlist = rte_zmalloc(NULL, item_num * sizeof(*list), 0);\n+\tif (!list) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM, actions,\n+\t\t\t\t   \"No memory for PMD internal items\");\n+\t\tgoto out;\n+\t}\n+\t*lkup_list = list;\n+\n+\tfor (item = pattern; item->type !=\n+\t\t\tRTE_FLOW_ITEM_TYPE_END; item++) {\n+\t\titem_type = item->type;\n+\n+\t\tswitch (item_type) {\n+\t\tcase RTE_FLOW_ITEM_TYPE_ETH:\n+\t\t\teth_spec = item->spec;\n+\t\t\teth_mask = item->mask;\n+\t\t\tif (eth_spec && eth_mask) {\n+\t\t\t\tlist[t].type = (tun_type == ICE_NON_TUN) ?\n+\t\t\t\t\tICE_MAC_OFOS : ICE_MAC_IL;\n+\t\t\t\tstruct ice_ether_hdr *h;\n+\t\t\t\tstruct ice_ether_hdr *m;\n+\t\t\t\th = &list[t].h_u.eth_hdr;\n+\t\t\t\tm = &list[t].m_u.eth_hdr;\n+\t\t\t\tfor (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {\n+\t\t\t\t\tif (eth_mask->src.addr_bytes[j] ==\n+\t\t\t\t\t\t\t\tUINT8_MAX) {\n+\t\t\t\t\t\th->src_addr[j] =\n+\t\t\t\t\t\teth_spec->src.addr_bytes[j];\n+\t\t\t\t\t\tm->src_addr[j] =\n+\t\t\t\t\t\teth_mask->src.addr_bytes[j];\n+\t\t\t\t\t}\n+\t\t\t\t\tif (eth_mask->dst.addr_bytes[j] ==\n+\t\t\t\t\t\t\t\tUINT8_MAX) {\n+\t\t\t\t\t\th->dst_addr[j] =\n+\t\t\t\t\t\teth_spec->dst.addr_bytes[j];\n+\t\t\t\t\t\tm->dst_addr[j] =\n+\t\t\t\t\t\teth_mask->dst.addr_bytes[j];\n+\t\t\t\t\t}\n+\t\t\t\t}\n+\t\t\t\tif (eth_mask->type == UINT16_MAX) {\n+\t\t\t\t\th->ethtype_id =\n+\t\t\t\t\trte_be_to_cpu_16(eth_spec->type);\n+\t\t\t\t\tm->ethtype_id = UINT16_MAX;\n+\t\t\t\t}\n+\t\t\t\tt++;\n+\t\t\t} else if (!eth_spec && !eth_mask) {\n+\t\t\t\tlist[t].type = (tun_type == ICE_NON_TUN) ?\n+\t\t\t\t\tICE_MAC_OFOS : ICE_MAC_IL;\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase RTE_FLOW_ITEM_TYPE_IPV4:\n+\t\t\tipv4_spec = item->spec;\n+\t\t\tipv4_mask = item->mask;\n+\t\t\tif (ipv4_spec && ipv4_mask) {\n+\t\t\t\tlist[t].type = (tun_type == ICE_NON_TUN) ?\n+\t\t\t\t\tICE_IPV4_OFOS : ICE_IPV4_IL;\n+\t\t\t\tif (ipv4_mask->hdr.src_addr == UINT32_MAX) {\n+\t\t\t\t\tlist[t].h_u.ipv4_hdr.src_addr =\n+\t\t\t\t\t\tipv4_spec->hdr.src_addr;\n+\t\t\t\t\tlist[t].m_u.ipv4_hdr.src_addr =\n+\t\t\t\t\t\tUINT32_MAX;\n+\t\t\t\t}\n+\t\t\t\tif (ipv4_mask->hdr.dst_addr == UINT32_MAX) {\n+\t\t\t\t\tlist[t].h_u.ipv4_hdr.dst_addr =\n+\t\t\t\t\t\tipv4_spec->hdr.dst_addr;\n+\t\t\t\t\tlist[t].m_u.ipv4_hdr.dst_addr =\n+\t\t\t\t\t\tUINT32_MAX;\n+\t\t\t\t}\n+\t\t\t\tif (ipv4_mask->hdr.time_to_live == UINT8_MAX) {\n+\t\t\t\t\tlist[t].h_u.ipv4_hdr.time_to_live =\n+\t\t\t\t\t\tipv4_spec->hdr.time_to_live;\n+\t\t\t\t\tlist[t].m_u.ipv4_hdr.time_to_live =\n+\t\t\t\t\t\tUINT8_MAX;\n+\t\t\t\t}\n+\t\t\t\tif (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {\n+\t\t\t\t\tlist[t].h_u.ipv4_hdr.protocol =\n+\t\t\t\t\t\tipv4_spec->hdr.next_proto_id;\n+\t\t\t\t\tlist[t].m_u.ipv4_hdr.protocol =\n+\t\t\t\t\t\tUINT8_MAX;\n+\t\t\t\t}\n+\t\t\t\tif (ipv4_mask->hdr.type_of_service ==\n+\t\t\t\t\t\tUINT8_MAX) {\n+\t\t\t\t\tlist[t].h_u.ipv4_hdr.tos =\n+\t\t\t\t\t\tipv4_spec->hdr.type_of_service;\n+\t\t\t\t\tlist[t].m_u.ipv4_hdr.tos = UINT8_MAX;\n+\t\t\t\t}\n+\t\t\t\tt++;\n+\t\t\t} else if (!ipv4_spec && !ipv4_mask) {\n+\t\t\t\tlist[t].type = (tun_type == ICE_NON_TUN) ?\n+\t\t\t\t\tICE_IPV4_OFOS : ICE_IPV4_IL;\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase RTE_FLOW_ITEM_TYPE_IPV6:\n+\t\t\tipv6_spec = item->spec;\n+\t\t\tipv6_mask = item->mask;\n+\t\t\tif (ipv6_spec && ipv6_mask) {\n+\t\t\t\tlist[t].type = (tun_type == ICE_NON_TUN) ?\n+\t\t\t\t\tICE_IPV6_OFOS : ICE_IPV6_IL;\n+\t\t\t\tstruct ice_ipv6_hdr *f;\n+\t\t\t\tstruct ice_ipv6_hdr *s;\n+\t\t\t\tf = &list[t].h_u.ice_ipv6_ofos_hdr;\n+\t\t\t\ts = &list[t].m_u.ice_ipv6_ofos_hdr;\n+\t\t\t\tfor (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {\n+\t\t\t\t\tif (ipv6_mask->hdr.src_addr[j] ==\n+\t\t\t\t\t\t\t\tUINT8_MAX) {\n+\t\t\t\t\t\tf->src_addr[j] =\n+\t\t\t\t\t\tipv6_spec->hdr.src_addr[j];\n+\t\t\t\t\t\ts->src_addr[j] =\n+\t\t\t\t\t\tipv6_mask->hdr.src_addr[j];\n+\t\t\t\t\t}\n+\t\t\t\t\tif (ipv6_mask->hdr.dst_addr[j] ==\n+\t\t\t\t\t\t\t\tUINT8_MAX) {\n+\t\t\t\t\t\tf->dst_addr[j] =\n+\t\t\t\t\t\tipv6_spec->hdr.dst_addr[j];\n+\t\t\t\t\t\ts->dst_addr[j] =\n+\t\t\t\t\t\tipv6_mask->hdr.dst_addr[j];\n+\t\t\t\t\t}\n+\t\t\t\t}\n+\t\t\t\tif (ipv6_mask->hdr.proto == UINT8_MAX) {\n+\t\t\t\t\tf->next_hdr =\n+\t\t\t\t\t\tipv6_spec->hdr.proto;\n+\t\t\t\t\ts->next_hdr = UINT8_MAX;\n+\t\t\t\t}\n+\t\t\t\tif (ipv6_mask->hdr.hop_limits == UINT8_MAX) {\n+\t\t\t\t\tf->hop_limit =\n+\t\t\t\t\t\tipv6_spec->hdr.hop_limits;\n+\t\t\t\t\ts->hop_limit = UINT8_MAX;\n+\t\t\t\t}\n+\t\t\t\tt++;\n+\t\t\t} else if (!ipv6_spec && !ipv6_mask) {\n+\t\t\t\tlist[t].type = (tun_type == ICE_NON_TUN) ?\n+\t\t\t\t\tICE_IPV4_OFOS : ICE_IPV4_IL;\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase RTE_FLOW_ITEM_TYPE_UDP:\n+\t\t\tudp_spec = item->spec;\n+\t\t\tudp_mask = item->mask;\n+\t\t\tif (udp_spec && udp_mask) {\n+\t\t\t\tif (tun_type == ICE_SW_TUN_VXLAN &&\n+\t\t\t\t\t\ttunnel_valid == 0)\n+\t\t\t\t\tlist[t].type = ICE_UDP_OF;\n+\t\t\t\telse\n+\t\t\t\t\tlist[t].type = ICE_UDP_ILOS;\n+\t\t\t\tif (udp_mask->hdr.src_port == UINT16_MAX) {\n+\t\t\t\t\tlist[t].h_u.l4_hdr.src_port =\n+\t\t\t\t\t\tudp_spec->hdr.src_port;\n+\t\t\t\t\tlist[t].m_u.l4_hdr.src_port =\n+\t\t\t\t\t\tudp_mask->hdr.src_port;\n+\t\t\t\t}\n+\t\t\t\tif (udp_mask->hdr.dst_port == UINT16_MAX) {\n+\t\t\t\t\tlist[t].h_u.l4_hdr.dst_port =\n+\t\t\t\t\t\tudp_spec->hdr.dst_port;\n+\t\t\t\t\tlist[t].m_u.l4_hdr.dst_port =\n+\t\t\t\t\t\tudp_mask->hdr.dst_port;\n+\t\t\t\t}\n+\t\t\t\tt++;\n+\t\t\t} else if (!udp_spec && !udp_mask) {\n+\t\t\t\tlist[t].type = ICE_UDP_ILOS;\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase RTE_FLOW_ITEM_TYPE_TCP:\n+\t\t\ttcp_spec = item->spec;\n+\t\t\ttcp_mask = item->mask;\n+\t\t\tif (tcp_spec && tcp_mask) {\n+\t\t\t\tlist[t].type = ICE_TCP_IL;\n+\t\t\t\tif (tcp_mask->hdr.src_port == UINT16_MAX) {\n+\t\t\t\t\tlist[t].h_u.l4_hdr.src_port =\n+\t\t\t\t\t\ttcp_spec->hdr.src_port;\n+\t\t\t\t\tlist[t].m_u.l4_hdr.src_port =\n+\t\t\t\t\t\ttcp_mask->hdr.src_port;\n+\t\t\t\t}\n+\t\t\t\tif (tcp_mask->hdr.dst_port == UINT16_MAX) {\n+\t\t\t\t\tlist[t].h_u.l4_hdr.dst_port =\n+\t\t\t\t\t\ttcp_spec->hdr.dst_port;\n+\t\t\t\t\tlist[t].m_u.l4_hdr.dst_port =\n+\t\t\t\t\t\ttcp_mask->hdr.dst_port;\n+\t\t\t\t}\n+\t\t\t\tt++;\n+\t\t\t} else if (!tcp_spec && !tcp_mask) {\n+\t\t\t\tlist[t].type = ICE_TCP_IL;\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase RTE_FLOW_ITEM_TYPE_SCTP:\n+\t\t\tsctp_spec = item->spec;\n+\t\t\tsctp_mask = item->mask;\n+\t\t\tif (sctp_spec && sctp_mask) {\n+\t\t\t\tlist[t].type = ICE_SCTP_IL;\n+\t\t\t\tif (sctp_mask->hdr.src_port == UINT16_MAX) {\n+\t\t\t\t\tlist[t].h_u.sctp_hdr.src_port =\n+\t\t\t\t\t\tsctp_spec->hdr.src_port;\n+\t\t\t\t\tlist[t].m_u.sctp_hdr.src_port =\n+\t\t\t\t\t\tsctp_mask->hdr.src_port;\n+\t\t\t\t}\n+\t\t\t\tif (sctp_mask->hdr.dst_port == UINT16_MAX) {\n+\t\t\t\t\tlist[t].h_u.sctp_hdr.dst_port =\n+\t\t\t\t\t\tsctp_spec->hdr.dst_port;\n+\t\t\t\t\tlist[t].m_u.sctp_hdr.dst_port =\n+\t\t\t\t\t\tsctp_mask->hdr.dst_port;\n+\t\t\t\t}\n+\t\t\t\tt++;\n+\t\t\t} else if (!sctp_spec && !sctp_mask) {\n+\t\t\t\tlist[t].type = ICE_SCTP_IL;\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase RTE_FLOW_ITEM_TYPE_VXLAN:\n+\t\t\tvxlan_spec = item->spec;\n+\t\t\tvxlan_mask = item->mask;\n+\t\t\ttunnel_valid = 1;\n+\t\t\tif (vxlan_spec && vxlan_mask) {\n+\t\t\t\tlist[t].type = ICE_VXLAN;\n+\t\t\t\tif (vxlan_mask->vni[0] == UINT8_MAX &&\n+\t\t\t\t\tvxlan_mask->vni[1] == UINT8_MAX &&\n+\t\t\t\t\tvxlan_mask->vni[2] == UINT8_MAX) {\n+\t\t\t\t\tlist[t].h_u.tnl_hdr.vni =\n+\t\t\t\t\t\t(vxlan_spec->vni[2] << 16) |\n+\t\t\t\t\t\t(vxlan_spec->vni[1] << 8) |\n+\t\t\t\t\t\tvxlan_spec->vni[0];\n+\t\t\t\t\tlist[t].m_u.tnl_hdr.vni =\n+\t\t\t\t\t\tUINT32_MAX;\n+\t\t\t\t}\n+\t\t\t\tt++;\n+\t\t\t} else if (!vxlan_spec && !vxlan_mask) {\n+\t\t\t\tlist[t].type = ICE_VXLAN;\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase RTE_FLOW_ITEM_TYPE_NVGRE:\n+\t\t\tnvgre_spec = item->spec;\n+\t\t\tnvgre_mask = item->mask;\n+\t\t\ttunnel_valid = 1;\n+\t\t\tif (nvgre_spec && nvgre_mask) {\n+\t\t\t\tlist[t].type = ICE_NVGRE;\n+\t\t\t\tif (nvgre_mask->tni[0] == UINT8_MAX &&\n+\t\t\t\t\tnvgre_mask->tni[1] == UINT8_MAX &&\n+\t\t\t\t\tnvgre_mask->tni[2] == UINT8_MAX) {\n+\t\t\t\t\tlist[t].h_u.nvgre_hdr.tni_flow =\n+\t\t\t\t\t\t(nvgre_spec->tni[2] << 16) |\n+\t\t\t\t\t\t(nvgre_spec->tni[1] << 8) |\n+\t\t\t\t\t\tnvgre_spec->tni[0];\n+\t\t\t\t\tlist[t].m_u.nvgre_hdr.tni_flow =\n+\t\t\t\t\t\tUINT32_MAX;\n+\t\t\t\t}\n+\t\t\t\tt++;\n+\t\t\t} else if (!nvgre_spec && !nvgre_mask) {\n+\t\t\t\tlist[t].type = ICE_NVGRE;\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase RTE_FLOW_ITEM_TYPE_VOID:\n+\t\tcase RTE_FLOW_ITEM_TYPE_END:\n+\t\t\tbreak;\n+\n+\t\tdefault:\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM, actions,\n+\t\t\t\t   \"Invalid pattern item.\");\n+\t\t\tgoto out;\n+\t\t}\n+\t}\n+\n+\trule_info->tun_type = tun_type;\n+\t*lkups_num = t;\n+\n+\treturn 0;\n+out:\n+\treturn -rte_errno;\n+}\n+\n+/* By now ice switch filter action code implement only\n+ * supports QUEUE or DROP.\n+ */\n+static int\n+ice_parse_switch_action(struct ice_pf *pf,\n+\t\t\t\t const struct rte_flow_action *actions,\n+\t\t\t\t struct rte_flow_error *error,\n+\t\t\t\t struct ice_adv_rule_info *rule_info)\n+{\n+\tstruct ice_vsi *vsi = pf->main_vsi;\n+\tconst struct rte_flow_action_queue *act_q;\n+\tuint16_t base_queue;\n+\tconst struct rte_flow_action *action;\n+\tenum rte_flow_action_type action_type;\n+\n+\tbase_queue = pf->base_queue;\n+\tfor (action = actions; action->type !=\n+\t\t\tRTE_FLOW_ACTION_TYPE_END; action++) {\n+\t\taction_type = action->type;\n+\t\tswitch (action_type) {\n+\t\tcase RTE_FLOW_ACTION_TYPE_QUEUE:\n+\t\t\tact_q = action->conf;\n+\t\t\trule_info->sw_act.fltr_act =\n+\t\t\t\tICE_FWD_TO_Q;\n+\t\t\trule_info->sw_act.fwd_id.q_id =\n+\t\t\t\tbase_queue + act_q->index;\n+\t\t\tif (act_q->index >=\n+\t\t\t\tpf->dev_data->nb_rx_queues) {\n+\t\t\t\trte_flow_error_set(error,\n+\t\t\t\t\tEINVAL,\n+\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\tactions, \"Invalid queue ID\"\n+\t\t\t\t\t\" for switch filter.\");\n+\t\t\t\treturn -rte_errno;\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase RTE_FLOW_ACTION_TYPE_DROP:\n+\t\t\trule_info->sw_act.fltr_act =\n+\t\t\t\tICE_DROP_PACKET;\n+\t\t\tbreak;\n+\n+\t\tcase RTE_FLOW_ACTION_TYPE_VOID:\n+\t\t\tbreak;\n+\n+\t\tdefault:\n+\t\t\trte_flow_error_set(error,\n+\t\t\t\tEINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\tactions,\n+\t\t\t\t\"Invalid action type\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t}\n+\n+\trule_info->sw_act.vsi_handle = vsi->idx;\n+\trule_info->rx = 1;\n+\trule_info->sw_act.src = vsi->idx;\n+\n+\treturn 0;\n+}\n+\n+static int\n+ice_switch_rule_set(struct ice_pf *pf,\n+\t\t\tstruct ice_adv_lkup_elem *list,\n+\t\t\tuint16_t lkups_cnt,\n+\t\t\tstruct ice_adv_rule_info *rule_info,\n+\t\t\tstruct rte_flow *flow,\n+\t\t\tstruct rte_flow_error *error)\n+{\n+\tstruct ice_hw *hw = ICE_PF_TO_HW(pf);\n+\tint ret;\n+\tstruct ice_rule_query_data rule_added = {0};\n+\tstruct ice_rule_query_data *filter_ptr;\n+\n+\tif (lkups_cnt > ICE_MAX_CHAIN_WORDS) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ITEM, NULL,\n+\t\t\t\"item number too large for rule\");\n+\t\treturn -rte_errno;\n+\t}\n+\tif (!list) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ITEM, NULL,\n+\t\t\t\"lookup list should not be NULL\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);\n+\n+\tif (!ret) {\n+\t\tfilter_ptr = rte_zmalloc(\"ice_switch_filter\",\n+\t\t\tsizeof(struct ice_rule_query_data), 0);\n+\t\tif (!filter_ptr) {\n+\t\t\tPMD_DRV_LOG(ERR, \"failed to allocate memory\");\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t\tflow->rule = filter_ptr;\n+\t\trte_memcpy(filter_ptr,\n+\t\t\t&rule_added,\n+\t\t\tsizeof(struct ice_rule_query_data));\n+\t}\n+\n+\treturn ret;\n+}\n+\n+int\n+ice_create_switch_filter(struct ice_pf *pf,\n+\t\t\tconst struct rte_flow_item pattern[],\n+\t\t\tconst struct rte_flow_action actions[],\n+\t\t\tstruct rte_flow *flow,\n+\t\t\tstruct rte_flow_error *error)\n+{\n+\tint ret = 0;\n+\tstruct ice_adv_rule_info rule_info = {0};\n+\tstruct ice_adv_lkup_elem *list = NULL;\n+\tuint16_t lkups_num = 0;\n+\n+\tret = ice_parse_switch_filter(pattern, actions, error,\n+\t\t\t&rule_info, &list, &lkups_num);\n+\tif (ret)\n+\t\tgoto error;\n+\n+\tret = ice_parse_switch_action(pf, actions, error, &rule_info);\n+\tif (ret)\n+\t\tgoto error;\n+\n+\tret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow, error);\n+\tif (ret)\n+\t\tgoto error;\n+\n+\trte_free(list);\n+\treturn 0;\n+\n+error:\n+\trte_free(list);\n+\n+\treturn -rte_errno;\n+}\n+\n+int\n+ice_destroy_switch_filter(struct ice_pf *pf,\n+\t\t\tstruct rte_flow *flow,\n+\t\t\tstruct rte_flow_error *error)\n+{\n+\tstruct ice_hw *hw = ICE_PF_TO_HW(pf);\n+\tint ret;\n+\tstruct ice_rule_query_data *filter_ptr;\n+\n+\tfilter_ptr = (struct ice_rule_query_data *)\n+\t\t\tflow->rule;\n+\n+\tif (!filter_ptr) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n+\t\t\t\"no such flow\"\n+\t\t\t\" create by switch filter\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tret = ice_rem_adv_rule_by_id(hw, filter_ptr);\n+\tif (ret) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_HANDLE, NULL,\n+\t\t\t\"fail to destroy switch filter rule\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\trte_free(filter_ptr);\n+\treturn ret;\n+}\n+\n+void\n+ice_free_switch_filter_rule(void *rule)\n+{\n+\tstruct ice_rule_query_data *filter_ptr;\n+\n+\tfilter_ptr = (struct ice_rule_query_data *)rule;\n+\n+\trte_free(filter_ptr);\n+}\ndiff --git a/drivers/net/ice/ice_switch_filter.h b/drivers/net/ice/ice_switch_filter.h\nnew file mode 100644\nindex 0000000..cea4799\n--- /dev/null\n+++ b/drivers/net/ice/ice_switch_filter.h\n@@ -0,0 +1,24 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2019 Intel Corporation\n+ */\n+\n+#ifndef _ICE_SWITCH_FILTER_H_\n+#define _ICE_SWITCH_FILTER_H_\n+\n+#include \"base/ice_switch.h\"\n+#include \"base/ice_type.h\"\n+#include \"ice_ethdev.h\"\n+\n+int\n+ice_create_switch_filter(struct ice_pf *pf,\n+\t\t\tconst struct rte_flow_item pattern[],\n+\t\t\tconst struct rte_flow_action actions[],\n+\t\t\tstruct rte_flow *flow,\n+\t\t\tstruct rte_flow_error *error);\n+int\n+ice_destroy_switch_filter(struct ice_pf *pf,\n+\t\t\tstruct rte_flow *flow,\n+\t\t\tstruct rte_flow_error *error);\n+void\n+ice_free_switch_filter_rule(void *rule);\n+#endif /* _ICE_SWITCH_FILTER_H_ */\ndiff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build\nindex 2bec688..8697676 100644\n--- a/drivers/net/ice/meson.build\n+++ b/drivers/net/ice/meson.build\n@@ -6,7 +6,8 @@ objs = [base_objs]\n \n sources = files(\n \t'ice_ethdev.c',\n-\t'ice_rxtx.c'\n+\t'ice_rxtx.c',\n+\t'ice_switch_filter.c'\n \t)\n \n deps += ['hash']\n",
    "prefixes": [
        "v6",
        "1/3"
    ]
}