get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/59751/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 59751,
    "url": "https://patches.dpdk.org/api/patches/59751/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/ff8635c2a0ba6c03db984488c88158561b4a1c38.1569421287.git.cloud.wangxiaoyun@huawei.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<ff8635c2a0ba6c03db984488c88158561b4a1c38.1569421287.git.cloud.wangxiaoyun@huawei.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/ff8635c2a0ba6c03db984488c88158561b4a1c38.1569421287.git.cloud.wangxiaoyun@huawei.com",
    "date": "2019-09-25T14:30:36",
    "name": "[v2,08/17] net/hinic: add fdir validate flow operations",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "db27f495f558c7f720f157db2b9ae3dc643df13b",
    "submitter": {
        "id": 1446,
        "url": "https://patches.dpdk.org/api/people/1446/?format=api",
        "name": "Wangxiaoyun (Cloud)",
        "email": "cloud.wangxiaoyun@huawei.com"
    },
    "delegate": {
        "id": 319,
        "url": "https://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/ff8635c2a0ba6c03db984488c88158561b4a1c38.1569421287.git.cloud.wangxiaoyun@huawei.com/mbox/",
    "series": [
        {
            "id": 6529,
            "url": "https://patches.dpdk.org/api/series/6529/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=6529",
            "date": "2019-09-25T14:26:40",
            "name": "Add advanced features for Huawei hinic pmd",
            "version": 2,
            "mbox": "https://patches.dpdk.org/series/6529/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/59751/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/59751/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 7FC7B1BEBF;\n\tWed, 25 Sep 2019 16:16:47 +0200 (CEST)",
            "from huawei.com (szxga04-in.huawei.com [45.249.212.190])\n\tby dpdk.org (Postfix) with ESMTP id 010AC1BE99\n\tfor <dev@dpdk.org>; Wed, 25 Sep 2019 16:16:42 +0200 (CEST)",
            "from DGGEMS402-HUB.china.huawei.com (unknown [172.30.72.60])\n\tby Forcepoint Email with ESMTP id E5295DE1315DABDBEB4E\n\tfor <dev@dpdk.org>; Wed, 25 Sep 2019 22:16:41 +0800 (CST)",
            "from tester.localdomain (10.175.119.39) by\n\tDGGEMS402-HUB.china.huawei.com (10.3.19.202) with Microsoft SMTP\n\tServer id 14.3.439.0; Wed, 25 Sep 2019 22:16:36 +0800"
        ],
        "From": "Xiaoyun wang <cloud.wangxiaoyun@huawei.com>",
        "To": "<ferruh.yigit@intel.com>",
        "CC": "<dev@dpdk.org>, <xuanziyang2@huawei.com>, <shahar.belkar@huawei.com>,\n\t<luoxianjun@huawei.com>, <tanya.brokhman@huawei.com>,\n\t<zhouguoyang@huawei.com>, <wulike1@huawei.com>, Xiaoyun wang\n\t<cloud.wangxiaoyun@huawei.com>",
        "Date": "Wed, 25 Sep 2019 22:30:36 +0800",
        "Message-ID": "<ff8635c2a0ba6c03db984488c88158561b4a1c38.1569421287.git.cloud.wangxiaoyun@huawei.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<cover.1569421286.git.cloud.wangxiaoyun@huawei.com>",
        "References": "<cover.1569421286.git.cloud.wangxiaoyun@huawei.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.175.119.39]",
        "X-CFilter-Loop": "Reflected",
        "Subject": "[dpdk-dev] [PATCH v2 08/17] net/hinic: add fdir validate flow\n\toperations",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch is to validate the filter rules, which includes\nntuple filter, ethertype filter and fdir filter. The packets\ntype that supported are BGP,VRRP,LACP,ARP and ICMP.\n\nSigned-off-by: Xiaoyun wang <cloud.wangxiaoyun@huawei.com>\n---\n drivers/net/hinic/Makefile           |    1 +\n drivers/net/hinic/hinic_pmd_ethdev.c |   42 ++\n drivers/net/hinic/hinic_pmd_ethdev.h |   24 +\n drivers/net/hinic/hinic_pmd_flow.c   | 1172 ++++++++++++++++++++++++++++++++++\n drivers/net/hinic/meson.build        |    1 +\n 5 files changed, 1240 insertions(+)\n create mode 100644 drivers/net/hinic/hinic_pmd_flow.c",
    "diff": "diff --git a/drivers/net/hinic/Makefile b/drivers/net/hinic/Makefile\nindex 20a338e..b78fd8d 100644\n--- a/drivers/net/hinic/Makefile\n+++ b/drivers/net/hinic/Makefile\n@@ -60,6 +60,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_niccfg.c\n SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_nicio.c\n SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_wq.c\n SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_mbox.c\n+SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_flow.c\n \n SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_ethdev.c\n SRCS-$(CONFIG_RTE_LIBRTE_HINIC_PMD) += hinic_pmd_rx.c\ndiff --git a/drivers/net/hinic/hinic_pmd_ethdev.c b/drivers/net/hinic/hinic_pmd_ethdev.c\nindex 16dc1cd..9f7e6f5 100644\n--- a/drivers/net/hinic/hinic_pmd_ethdev.c\n+++ b/drivers/net/hinic/hinic_pmd_ethdev.c\n@@ -2283,6 +2283,46 @@ static int hinic_set_mc_addr_list(struct rte_eth_dev *dev,\n \treturn 0;\n }\n \n+/**\n+ * DPDK callback to manage filter operations\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device structure.\n+ * @param filter_type\n+ *   Filter type.\n+ * @param filter_op\n+ *   Operation to perform.\n+ * @param arg\n+ *   Pointer to operation-specific structure.\n+ *\n+ * @return\n+ *   0 on success, negative errno value on failure.\n+ */\n+static int hinic_dev_filter_ctrl(struct rte_eth_dev *dev,\n+\t\t     enum rte_filter_type filter_type,\n+\t\t     enum rte_filter_op filter_op,\n+\t\t     void *arg)\n+{\n+\tstruct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);\n+\tint func_id = hinic_global_func_id(nic_dev->hwdev);\n+\n+\tswitch (filter_type) {\n+\tcase RTE_ETH_FILTER_GENERIC:\n+\t\tif (filter_op != RTE_ETH_FILTER_GET)\n+\t\t\treturn -EINVAL;\n+\t\t*(const void **)arg = &hinic_flow_ops;\n+\t\tbreak;\n+\tdefault:\n+\t\tPMD_DRV_LOG(INFO, \"Filter type (%d) not supported\",\n+\t\t\tfilter_type);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tPMD_DRV_LOG(INFO, \"Set filter_ctrl succeed, func_id: 0x%x, filter_type: 0x%x,\"\n+\t\t\t\"filter_op: 0x%x.\", func_id, filter_type, filter_op);\n+\treturn 0;\n+}\n+\n static int hinic_set_default_pause_feature(struct hinic_nic_dev *nic_dev)\n {\n \tstruct nic_pause_config pause_config = {0};\n@@ -2734,6 +2774,7 @@ static void hinic_dev_close(struct rte_eth_dev *dev)\n \t.mac_addr_remove               = hinic_mac_addr_remove,\n \t.mac_addr_add                  = hinic_mac_addr_add,\n \t.set_mc_addr_list              = hinic_set_mc_addr_list,\n+\t.filter_ctrl                   = hinic_dev_filter_ctrl,\n };\n \n static const struct eth_dev_ops hinic_pmd_vf_ops = {\n@@ -2765,6 +2806,7 @@ static void hinic_dev_close(struct rte_eth_dev *dev)\n \t.mac_addr_remove               = hinic_mac_addr_remove,\n \t.mac_addr_add                  = hinic_mac_addr_add,\n \t.set_mc_addr_list              = hinic_set_mc_addr_list,\n+\t.filter_ctrl                   = hinic_dev_filter_ctrl,\n };\n \n static int hinic_func_init(struct rte_eth_dev *eth_dev)\ndiff --git a/drivers/net/hinic/hinic_pmd_ethdev.h b/drivers/net/hinic/hinic_pmd_ethdev.h\nindex b4f93ad..58a38d9 100644\n--- a/drivers/net/hinic/hinic_pmd_ethdev.h\n+++ b/drivers/net/hinic/hinic_pmd_ethdev.h\n@@ -38,6 +38,30 @@ enum hinic_dev_status {\n \tHINIC_DEV_INTR_EN,\n };\n \n+/* Information about the fdir mode. */\n+struct hinic_hw_fdir_mask {\n+\tuint32_t src_ipv4_mask;\n+\tuint32_t dst_ipv4_mask;\n+\tuint16_t src_port_mask;\n+\tuint16_t dst_port_mask;\n+};\n+\n+/* Flow Director attribute */\n+struct hinic_atr_input {\n+\tu32 dst_ip;\n+\tu32 src_ip;\n+\tu16 src_port;\n+\tu16 dst_port;\n+};\n+\n+struct hinic_fdir_rule {\n+\tstruct hinic_hw_fdir_mask mask;\n+\tstruct hinic_atr_input hinic_fdir; /* key of fdir filter */\n+\tuint8_t queue; /* queue assigned when matched */\n+};\n+\n+extern const struct rte_flow_ops hinic_flow_ops;\n+\n /* hinic nic_device */\n struct hinic_nic_dev {\n \t/* hardware device */\ndiff --git a/drivers/net/hinic/hinic_pmd_flow.c b/drivers/net/hinic/hinic_pmd_flow.c\nnew file mode 100644\nindex 0000000..cf9f105\n--- /dev/null\n+++ b/drivers/net/hinic/hinic_pmd_flow.c\n@@ -0,0 +1,1172 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2017 Huawei Technologies Co., Ltd\n+ */\n+\n+#include <stdio.h>\n+#include <errno.h>\n+#include <stdint.h>\n+#include <string.h>\n+#include <unistd.h>\n+\n+#include <rte_byteorder.h>\n+#include <rte_common.h>\n+#include <rte_ether.h>\n+#include <rte_ethdev.h>\n+#include <rte_malloc.h>\n+#include <rte_flow.h>\n+#include <rte_flow_driver.h>\n+#include \"base/hinic_compat.h\"\n+#include \"base/hinic_pmd_hwdev.h\"\n+#include \"base/hinic_pmd_hwif.h\"\n+#include \"base/hinic_pmd_wq.h\"\n+#include \"base/hinic_pmd_cmdq.h\"\n+#include \"base/hinic_pmd_niccfg.h\"\n+#include \"hinic_pmd_ethdev.h\"\n+\n+#ifndef UINT8_MAX\n+#define UINT8_MAX          (u8)(~((u8)0))\t/* 0xFF               */\n+#define UINT16_MAX         (u16)(~((u16)0))\t/* 0xFFFF             */\n+#define UINT32_MAX         (u32)(~((u32)0))\t/* 0xFFFFFFFF         */\n+#define UINT64_MAX         (u64)(~((u64)0))\t/* 0xFFFFFFFFFFFFFFFF */\n+#define ASCII_MAX          (0x7F)\n+#endif\n+\n+#define HINIC_MIN_N_TUPLE_PRIO\t\t1\n+#define HINIC_MAX_N_TUPLE_PRIO\t\t7\n+\n+/**\n+ * Endless loop will never happen with below assumption\n+ * 1. there is at least one no-void item(END)\n+ * 2. cur is before END.\n+ */\n+static inline const struct rte_flow_item *\n+next_no_void_pattern(const struct rte_flow_item pattern[],\n+\t\tconst struct rte_flow_item *cur)\n+{\n+\tconst struct rte_flow_item *next =\n+\t\tcur ? cur + 1 : &pattern[0];\n+\twhile (1) {\n+\t\tif (next->type != RTE_FLOW_ITEM_TYPE_VOID)\n+\t\t\treturn next;\n+\t\tnext++;\n+\t}\n+}\n+\n+static inline const struct rte_flow_action *\n+next_no_void_action(const struct rte_flow_action actions[],\n+\t\tconst struct rte_flow_action *cur)\n+{\n+\tconst struct rte_flow_action *next =\n+\t\tcur ? cur + 1 : &actions[0];\n+\twhile (1) {\n+\t\tif (next->type != RTE_FLOW_ACTION_TYPE_VOID)\n+\t\t\treturn next;\n+\t\tnext++;\n+\t}\n+}\n+\n+static int hinic_check_ethertype_attr_ele(const struct rte_flow_attr *attr,\n+\t\t\t\t\tstruct rte_flow_error *error)\n+{\n+\t/* Must be input direction */\n+\tif (!attr->ingress) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ATTR_INGRESS,\n+\t\t\tattr, \"Only support ingress.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (attr->egress) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ATTR_EGRESS,\n+\t\t\t\tattr, \"Not support egress.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (attr->priority) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,\n+\t\t\t\tattr, \"Not support priority.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (attr->group) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ATTR_GROUP,\n+\t\t\t\tattr, \"Not support group.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int hinic_check_filter_arg(const struct rte_flow_attr *attr,\n+\t\t\t\tconst struct rte_flow_item *pattern,\n+\t\t\t\tconst struct rte_flow_action *actions,\n+\t\t\t\tstruct rte_flow_error *error)\n+{\n+\tif (!pattern) {\n+\t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,\n+\t\t\t\tNULL, \"NULL pattern.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (!actions) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION_NUM,\n+\t\t\t\tNULL, \"NULL action.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (!attr) {\n+\t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,\n+\t\t\t\t   NULL, \"NULL attribute.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int hinic_check_ethertype_first_item(const struct rte_flow_item *item,\n+\t\t\t\t\tstruct rte_flow_error *error)\n+{\n+\t/* The first non-void item should be MAC */\n+\tif (item->type != RTE_FLOW_ITEM_TYPE_ETH) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\titem, \"Not supported by ethertype filter\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* Not supported last point for range */\n+\tif (item->last) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\titem, \"Not supported last point for range\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* Get the MAC info. */\n+\tif (!item->spec || !item->mask) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Not supported by ethertype filter\");\n+\t\treturn -rte_errno;\n+\t}\n+\treturn 0;\n+}\n+\n+static int\n+hinic_parse_ethertype_aciton(const struct rte_flow_action *actions,\n+\t\t\tconst struct rte_flow_action *act,\n+\t\t\tconst struct rte_flow_action_queue *act_q,\n+\t\t\tstruct rte_eth_ethertype_filter *filter,\n+\t\t\tstruct rte_flow_error *error)\n+{\n+\t/* Parse action */\n+\tact = next_no_void_action(actions, NULL);\n+\tif (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&\n+\t\tact->type != RTE_FLOW_ACTION_TYPE_DROP) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\tact, \"Not supported action.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {\n+\t\tact_q = (const struct rte_flow_action_queue *)act->conf;\n+\t\tfilter->queue = act_q->index;\n+\t} else {\n+\t\tfilter->flags |= RTE_ETHTYPE_FLAGS_DROP;\n+\t}\n+\n+\t/* Check if the next non-void item is END */\n+\tact = next_no_void_action(actions, act);\n+\tif (act->type != RTE_FLOW_ACTION_TYPE_END) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\tact, \"Not supported action.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * Parse the rule to see if it is a ethertype rule.\n+ * And get the ethertype filter info BTW.\n+ * pattern:\n+ * The first not void item can be ETH.\n+ * The next not void item must be END.\n+ * action:\n+ * The first not void action should be QUEUE.\n+ * The next not void action should be END.\n+ * pattern example:\n+ * ITEM\t\tSpec\t\t\tMask\n+ * ETH\t\ttype\t0x0807\t\t0xFFFF\n+ * END\n+ * other members in mask and spec should set to 0x00.\n+ * item->last should be NULL.\n+ */\n+static int\n+cons_parse_ethertype_filter(const struct rte_flow_attr *attr,\n+\t\t\tconst struct rte_flow_item *pattern,\n+\t\t\tconst struct rte_flow_action *actions,\n+\t\t\tstruct rte_eth_ethertype_filter *filter,\n+\t\t\tstruct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_item *item;\n+\tconst struct rte_flow_action *act = NULL;\n+\tconst struct rte_flow_item_eth *eth_spec;\n+\tconst struct rte_flow_item_eth *eth_mask;\n+\tconst struct rte_flow_action_queue *act_q = NULL;\n+\n+\tif (hinic_check_filter_arg(attr, pattern, actions, error))\n+\t\treturn -rte_errno;\n+\n+\titem = next_no_void_pattern(pattern, NULL);\n+\tif (hinic_check_ethertype_first_item(item, error))\n+\t\treturn -rte_errno;\n+\n+\teth_spec = (const struct rte_flow_item_eth *)item->spec;\n+\teth_mask = (const struct rte_flow_item_eth *)item->mask;\n+\n+\t/*\n+\t * Mask bits of source MAC address must be full of 0.\n+\t * Mask bits of destination MAC address must be full\n+\t * of 1 or full of 0.\n+\t */\n+\tif (!rte_is_zero_ether_addr(&eth_mask->src) ||\n+\t    (!rte_is_zero_ether_addr(&eth_mask->dst) &&\n+\t     !rte_is_broadcast_ether_addr(&eth_mask->dst))) {\n+\t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Invalid ether address mask\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {\n+\t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Invalid ethertype mask\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/*\n+\t * If mask bits of destination MAC address\n+\t * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.\n+\t */\n+\tif (rte_is_broadcast_ether_addr(&eth_mask->dst)) {\n+\t\tfilter->mac_addr = eth_spec->dst;\n+\t\tfilter->flags |= RTE_ETHTYPE_FLAGS_MAC;\n+\t} else {\n+\t\tfilter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;\n+\t}\n+\tfilter->ether_type = rte_be_to_cpu_16(eth_spec->type);\n+\n+\t/* Check if the next non-void item is END. */\n+\titem = next_no_void_pattern(pattern, item);\n+\tif (item->type != RTE_FLOW_ITEM_TYPE_END) {\n+\t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\titem, \"Not supported by ethertype filter.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (hinic_parse_ethertype_aciton(actions, act, act_q, filter, error))\n+\t\treturn -rte_errno;\n+\n+\tif (hinic_check_ethertype_attr_ele(attr, error))\n+\t\treturn -rte_errno;\n+\n+\treturn 0;\n+}\n+\n+static int\n+hinic_parse_ethertype_filter(struct rte_eth_dev *dev,\n+\t\t\tconst struct rte_flow_attr *attr,\n+\t\t\tconst struct rte_flow_item pattern[],\n+\t\t\tconst struct rte_flow_action actions[],\n+\t\t\tstruct rte_eth_ethertype_filter *filter,\n+\t\t\tstruct rte_flow_error *error)\n+{\n+\tif (cons_parse_ethertype_filter(attr, pattern, actions, filter, error))\n+\t\treturn -rte_errno;\n+\n+\t/* NIC doesn't support MAC address. */\n+\tif (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_ethertype_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\tNULL, \"Not supported by ethertype filter\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (filter->queue >= dev->data->nb_rx_queues) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_ethertype_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\tNULL, \"Queue index much too big\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||\n+\t\tfilter->ether_type == RTE_ETHER_TYPE_IPV6) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_ethertype_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\tNULL, \"IPv4/IPv6 not supported by ethertype filter\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_ethertype_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\tNULL, \"Drop option is unsupported\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* Hinic only support LACP/ARP for ether type */\n+\tif (filter->ether_type != RTE_ETHER_TYPE_SLOW &&\n+\t\tfilter->ether_type != RTE_ETHER_TYPE_ARP) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_ethertype_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ITEM, NULL,\n+\t\t\t\"only lacp/arp type supported by ethertype filter\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int hinic_check_ntuple_attr_ele(const struct rte_flow_attr *attr,\n+\t\t\t\tstruct rte_eth_ntuple_filter *filter,\n+\t\t\t\tstruct rte_flow_error *error)\n+{\n+\t/* Must be input direction */\n+\tif (!attr->ingress) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_ntuple_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,\n+\t\t\t\t   attr, \"Only support ingress.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (attr->egress) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_ntuple_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,\n+\t\t\t\t   attr, \"Not support egress.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (attr->priority > 0xFFFF) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_ntuple_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,\n+\t\t\t\t   attr, \"Error priority.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (attr->priority < HINIC_MIN_N_TUPLE_PRIO ||\n+\t\t    attr->priority > HINIC_MAX_N_TUPLE_PRIO)\n+\t\tfilter->priority = 1;\n+\telse\n+\t\tfilter->priority = (uint16_t)attr->priority;\n+\n+\treturn 0;\n+}\n+\n+static int\n+hinic_check_ntuple_act_ele(__rte_unused const struct rte_flow_item *item,\n+\t\t\tconst struct rte_flow_action actions[],\n+\t\t\tstruct rte_eth_ntuple_filter *filter,\n+\t\t\tstruct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_action *act;\n+\t/*\n+\t * n-tuple only supports forwarding,\n+\t * check if the first not void action is QUEUE.\n+\t */\n+\tact = next_no_void_action(actions, NULL);\n+\tif (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_ntuple_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\tact, \"Flow action type is not QUEUE.\");\n+\t\treturn -rte_errno;\n+\t}\n+\tfilter->queue =\n+\t\t((const struct rte_flow_action_queue *)act->conf)->index;\n+\n+\t/* Check if the next not void item is END */\n+\tact = next_no_void_action(actions, act);\n+\tif (act->type != RTE_FLOW_ACTION_TYPE_END) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_ntuple_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\tact, \"Next not void item is not END.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int hinic_ntuple_item_check_ether(const struct rte_flow_item **ipv4_item,\n+\t\t\t\t\tconst struct rte_flow_item pattern[],\n+\t\t\t\t\tstruct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_item *item;\n+\n+\t/* The first not void item can be MAC or IPv4 */\n+\titem = next_no_void_pattern(pattern, NULL);\n+\n+\tif (item->type != RTE_FLOW_ITEM_TYPE_ETH &&\n+\t\titem->type != RTE_FLOW_ITEM_TYPE_IPV4) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\titem, \"Not supported by ntuple filter\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* Skip Ethernet */\n+\tif (item->type == RTE_FLOW_ITEM_TYPE_ETH) {\n+\t\t/* Not supported last point for range */\n+\t\tif (item->last) {\n+\t\t\trte_flow_error_set(error,\n+\t\t\t\tEINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\titem, \"Not supported last point for range\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t\t/* if the first item is MAC, the content should be NULL */\n+\t\tif (item->spec || item->mask) {\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Not supported by ntuple filter\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t\t/* check if the next not void item is IPv4 */\n+\t\titem = next_no_void_pattern(pattern, item);\n+\t\tif (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {\n+\t\t\trte_flow_error_set(error,\n+\t\t\t\tEINVAL, RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Not supported by ntuple filter\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t}\n+\n+\t*ipv4_item = item;\n+\treturn 0;\n+}\n+\n+static int\n+hinic_ntuple_item_check_ipv4(const struct rte_flow_item **in_out_item,\n+\t\t\tconst struct rte_flow_item pattern[],\n+\t\t\tstruct rte_eth_ntuple_filter *filter,\n+\t\t\tstruct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_item_ipv4 *ipv4_spec;\n+\tconst struct rte_flow_item_ipv4 *ipv4_mask;\n+\tconst struct rte_flow_item *item = *in_out_item;\n+\n+\t/* Get the IPv4 info */\n+\tif (!item->spec || !item->mask) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\titem, \"Invalid ntuple mask\");\n+\t\treturn -rte_errno;\n+\t}\n+\t/* Not supported last point for range */\n+\tif (item->last) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\titem, \"Not supported last point for range\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;\n+\t/*\n+\t * Only support src & dst addresses, protocol,\n+\t * others should be masked.\n+\t */\n+\tif (ipv4_mask->hdr.version_ihl ||\n+\t\tipv4_mask->hdr.type_of_service ||\n+\t\tipv4_mask->hdr.total_length ||\n+\t\tipv4_mask->hdr.packet_id ||\n+\t\tipv4_mask->hdr.fragment_offset ||\n+\t\tipv4_mask->hdr.time_to_live ||\n+\t\tipv4_mask->hdr.hdr_checksum ||\n+\t\t!ipv4_mask->hdr.next_proto_id) {\n+\t\trte_flow_error_set(error,\n+\t\t\tEINVAL, RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\titem, \"Not supported by ntuple filter\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tfilter->dst_ip_mask = ipv4_mask->hdr.dst_addr;\n+\tfilter->src_ip_mask = ipv4_mask->hdr.src_addr;\n+\tfilter->proto_mask = ipv4_mask->hdr.next_proto_id;\n+\n+\tipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;\n+\tfilter->dst_ip = ipv4_spec->hdr.dst_addr;\n+\tfilter->src_ip = ipv4_spec->hdr.src_addr;\n+\tfilter->proto  = ipv4_spec->hdr.next_proto_id;\n+\n+\t/* Get next no void item */\n+\t*in_out_item = next_no_void_pattern(pattern, item);\n+\treturn 0;\n+}\n+\n+static int hinic_ntuple_item_check_l4(const struct rte_flow_item **in_out_item,\n+\t\t\t\tconst struct rte_flow_item pattern[],\n+\t\t\t\tstruct rte_eth_ntuple_filter *filter,\n+\t\t\t\tstruct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_item_tcp *tcp_spec;\n+\tconst struct rte_flow_item_tcp *tcp_mask;\n+\tconst struct rte_flow_item_icmp *icmp_mask;\n+\tconst struct rte_flow_item *item = *in_out_item;\n+\tu32 ntuple_filter_size = sizeof(struct rte_eth_ntuple_filter);\n+\n+\tif (item->type == RTE_FLOW_ITEM_TYPE_END)\n+\t\treturn 0;\n+\n+\t/* Get TCP or UDP info */\n+\tif (item->type != RTE_FLOW_ITEM_TYPE_END &&\n+\t\t(!item->spec || !item->mask)) {\n+\t\tmemset(filter, 0, ntuple_filter_size);\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\titem, \"Invalid ntuple mask\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* Not supported last point for range */\n+\tif (item->last) {\n+\t\tmemset(filter, 0, ntuple_filter_size);\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\titem, \"Not supported last point for range\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (item->type == RTE_FLOW_ITEM_TYPE_TCP) {\n+\t\ttcp_mask = (const struct rte_flow_item_tcp *)item->mask;\n+\n+\t\t/*\n+\t\t * Only support src & dst ports, tcp flags,\n+\t\t * others should be masked.\n+\t\t */\n+\t\tif (tcp_mask->hdr.sent_seq ||\n+\t\t\ttcp_mask->hdr.recv_ack ||\n+\t\t\ttcp_mask->hdr.data_off ||\n+\t\t\ttcp_mask->hdr.rx_win ||\n+\t\t\ttcp_mask->hdr.cksum ||\n+\t\t\ttcp_mask->hdr.tcp_urp) {\n+\t\t\tmemset(filter, 0, ntuple_filter_size);\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Not supported by ntuple filter\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\n+\t\tfilter->dst_port_mask  = tcp_mask->hdr.dst_port;\n+\t\tfilter->src_port_mask  = tcp_mask->hdr.src_port;\n+\t\tif (tcp_mask->hdr.tcp_flags == 0xFF) {\n+\t\t\tfilter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;\n+\t\t} else if (!tcp_mask->hdr.tcp_flags) {\n+\t\t\tfilter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;\n+\t\t} else {\n+\t\t\tmemset(filter, 0, ntuple_filter_size);\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Not supported by ntuple filter\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\n+\t\ttcp_spec = (const struct rte_flow_item_tcp *)item->spec;\n+\t\tfilter->dst_port  = tcp_spec->hdr.dst_port;\n+\t\tfilter->src_port  = tcp_spec->hdr.src_port;\n+\t\tfilter->tcp_flags = tcp_spec->hdr.tcp_flags;\n+\t} else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {\n+\t\ticmp_mask = (const struct rte_flow_item_icmp *)item->mask;\n+\n+\t\t/* ICMP all should be masked. */\n+\t\tif (icmp_mask->hdr.icmp_cksum ||\n+\t\t\ticmp_mask->hdr.icmp_ident ||\n+\t\t\ticmp_mask->hdr.icmp_seq_nb ||\n+\t\t\ticmp_mask->hdr.icmp_type ||\n+\t\t\ticmp_mask->hdr.icmp_code) {\n+\t\t\tmemset(filter, 0, ntuple_filter_size);\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Not supported by ntuple filter\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t}\n+\n+\t/* Get next no void item */\n+\t*in_out_item = next_no_void_pattern(pattern, item);\n+\treturn 0;\n+}\n+\n+static int hinic_ntuple_item_check_end(const struct rte_flow_item *item,\n+\t\t\t\t\tstruct rte_eth_ntuple_filter *filter,\n+\t\t\t\t\tstruct rte_flow_error *error)\n+{\n+\t/* Check if the next not void item is END */\n+\tif (item->type != RTE_FLOW_ITEM_TYPE_END) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_ntuple_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\titem, \"Not supported by ntuple filter\");\n+\t\treturn -rte_errno;\n+\t}\n+\treturn 0;\n+}\n+\n+static int hinic_check_ntuple_item_ele(const struct rte_flow_item *item,\n+\t\t\t\t\tconst struct rte_flow_item pattern[],\n+\t\t\t\t\tstruct rte_eth_ntuple_filter *filter,\n+\t\t\t\t\tstruct rte_flow_error *error)\n+{\n+\tif (hinic_ntuple_item_check_ether(&item, pattern, error) ||\n+\t\thinic_ntuple_item_check_ipv4(&item, pattern, filter, error) ||\n+\t\thinic_ntuple_item_check_l4(&item, pattern, filter, error) ||\n+\t\thinic_ntuple_item_check_end(item, filter, error))\n+\t\treturn -rte_errno;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * Parse the rule to see if it is a n-tuple rule.\n+ * And get the n-tuple filter info BTW.\n+ * pattern:\n+ * The first not void item can be ETH or IPV4.\n+ * The second not void item must be IPV4 if the first one is ETH.\n+ * The third not void item must be UDP or TCP.\n+ * The next not void item must be END.\n+ * action:\n+ * The first not void action should be QUEUE.\n+ * The next not void action should be END.\n+ * pattern example:\n+ * ITEM\t\tSpec\t\t\tMask\n+ * ETH\t\tNULL\t\t\tNULL\n+ * IPV4\t\tsrc_addr 192.168.1.20\t0xFFFFFFFF\n+ *\t\tdst_addr 192.167.3.50\t0xFFFFFFFF\n+ *\t\tnext_proto_id\t17\t0xFF\n+ * UDP/TCP/\tsrc_port\t80\t0xFFFF\n+ * SCTP\t\tdst_port\t80\t0xFFFF\n+ * END\n+ * other members in mask and spec should set to 0x00.\n+ * item->last should be NULL.\n+ * Please aware there's an asumption for all the parsers.\n+ * rte_flow_item is using big endian, rte_flow_attr and\n+ * rte_flow_action are using CPU order.\n+ * Because the pattern is used to describe the packets,\n+ * normally the packets should use network order.\n+ */\n+static int\n+cons_parse_ntuple_filter(const struct rte_flow_attr *attr,\n+\t\t\tconst struct rte_flow_item pattern[],\n+\t\t\tconst struct rte_flow_action actions[],\n+\t\t\tstruct rte_eth_ntuple_filter *filter,\n+\t\t\tstruct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_item *item = NULL;\n+\n+\tif (hinic_check_filter_arg(attr, pattern, actions, error))\n+\t\treturn -rte_errno;\n+\n+\tif (hinic_check_ntuple_item_ele(item, pattern, filter, error))\n+\t\treturn -rte_errno;\n+\n+\tif (hinic_check_ntuple_act_ele(item, actions, filter, error))\n+\t\treturn -rte_errno;\n+\n+\tif (hinic_check_ntuple_attr_ele(attr, filter, error))\n+\t\treturn -rte_errno;\n+\n+\treturn 0;\n+}\n+\n+static int\n+hinic_parse_ntuple_filter(struct rte_eth_dev *dev,\n+\t\t\tconst struct rte_flow_attr *attr,\n+\t\t\tconst struct rte_flow_item pattern[],\n+\t\t\tconst struct rte_flow_action actions[],\n+\t\t\tstruct rte_eth_ntuple_filter *filter,\n+\t\t\tstruct rte_flow_error *error)\n+{\n+\tint ret;\n+\n+\tret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t/* Hinic doesn't support tcp flags */\n+\tif (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_ntuple_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t   NULL, \"Not supported by ntuple filter\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* Hinic doesn't support many priorities */\n+\tif (filter->priority < HINIC_MIN_N_TUPLE_PRIO ||\n+\t    filter->priority > HINIC_MAX_N_TUPLE_PRIO) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_ntuple_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\tNULL, \"Priority not supported by ntuple filter\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (filter->queue >= dev->data->nb_rx_queues)\n+\t\treturn -rte_errno;\n+\n+\t/* Fixed value for hinic */\n+\tfilter->flags = RTE_5TUPLE_FLAGS;\n+\treturn 0;\n+}\n+\n+static int hinic_normal_item_check_ether(const struct rte_flow_item **ip_item,\n+\t\t\t\t\tconst struct rte_flow_item pattern[],\n+\t\t\t\t\tstruct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_item *item;\n+\n+\t/* The first not void item can be MAC or IPv4  or TCP or UDP */\n+\titem = next_no_void_pattern(pattern, NULL);\n+\n+\tif (item->type != RTE_FLOW_ITEM_TYPE_ETH &&\n+\t\titem->type != RTE_FLOW_ITEM_TYPE_IPV4 &&\n+\t\titem->type != RTE_FLOW_ITEM_TYPE_TCP &&\n+\t\titem->type != RTE_FLOW_ITEM_TYPE_UDP) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\"Not supported by fdir filter,support mac,ipv4,tcp,udp\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* Not supported last point for range */\n+\tif (item->last) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED, item,\n+\t\t\t\"Not supported last point for range\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* Skip Ethernet */\n+\tif (item->type == RTE_FLOW_ITEM_TYPE_ETH) {\n+\t\t/* All should be masked. */\n+\t\tif (item->spec || item->mask) {\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Not supported by fdir filter,support mac\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t\t/* Check if the next not void item is IPv4 */\n+\t\titem = next_no_void_pattern(pattern, item);\n+\t\tif (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\"Not supported by fdir filter,support mac,ipv4\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t}\n+\n+\t*ip_item = item;\n+\treturn 0;\n+}\n+\n+static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item,\n+\t\t\t\tconst struct rte_flow_item pattern[],\n+\t\t\t\tstruct hinic_fdir_rule *rule,\n+\t\t\t\tstruct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_item_ipv4 *ipv4_spec;\n+\tconst struct rte_flow_item_ipv4 *ipv4_mask;\n+\tconst struct rte_flow_item *item = *in_out_item;\n+\n+\t/* Get the IPv4 info */\n+\tif (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {\n+\t\t/* Not supported last point for range */\n+\t\tif (item->last) {\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\titem, \"Not supported last point for range\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\n+\t\tif (!item->mask) {\n+\t\t\tmemset(rule, 0, sizeof(struct hinic_fdir_rule));\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Invalid fdir filter mask\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\n+\t\tipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;\n+\t\t/*\n+\t\t * Only support src & dst addresses,\n+\t\t * others should be masked.\n+\t\t */\n+\t\tif (ipv4_mask->hdr.version_ihl ||\n+\t\t\tipv4_mask->hdr.type_of_service ||\n+\t\t\tipv4_mask->hdr.total_length ||\n+\t\t\tipv4_mask->hdr.packet_id ||\n+\t\t\tipv4_mask->hdr.fragment_offset ||\n+\t\t\tipv4_mask->hdr.time_to_live ||\n+\t\t\tipv4_mask->hdr.next_proto_id ||\n+\t\t\tipv4_mask->hdr.hdr_checksum) {\n+\t\t\trte_flow_error_set(error,\n+\t\t\t\tEINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\"Not supported by fdir filter, support src,dst ip\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\n+\t\trule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;\n+\t\trule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;\n+\n+\t\tif (item->spec) {\n+\t\t\tipv4_spec =\n+\t\t\t\t(const struct rte_flow_item_ipv4 *)item->spec;\n+\t\t\trule->hinic_fdir.dst_ip = ipv4_spec->hdr.dst_addr;\n+\t\t\trule->hinic_fdir.src_ip = ipv4_spec->hdr.src_addr;\n+\t\t}\n+\n+\t\t/*\n+\t\t * Check if the next not void item is\n+\t\t * TCP or UDP or END.\n+\t\t */\n+\t\titem = next_no_void_pattern(pattern, item);\n+\t\tif (item->type != RTE_FLOW_ITEM_TYPE_TCP &&\n+\t\t    item->type != RTE_FLOW_ITEM_TYPE_UDP &&\n+\t\t    item->type != RTE_FLOW_ITEM_TYPE_END) {\n+\t\t\tmemset(rule, 0, sizeof(struct hinic_fdir_rule));\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\"Not supported by fdir filter, support tcp, udp, end\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t}\n+\n+\t*in_out_item = item;\n+\treturn 0;\n+}\n+\n+static int hinic_normal_item_check_l4(const struct rte_flow_item **in_out_item,\n+\t\t\t\tconst struct rte_flow_item pattern[],\n+\t\t\t\tstruct hinic_fdir_rule *rule,\n+\t\t\t\tstruct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_item_tcp *tcp_spec;\n+\tconst struct rte_flow_item_tcp *tcp_mask;\n+\tconst struct rte_flow_item_udp *udp_spec;\n+\tconst struct rte_flow_item_udp *udp_mask;\n+\tconst struct rte_flow_item *item = *in_out_item;\n+\n+\tif (item->type != RTE_FLOW_ITEM_TYPE_END) {\n+\t\t/* Not supported last point for range */\n+\t\tif (item->last) {\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\titem, \"Not supported last point for range\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\n+\t\t/* Get TCP/UDP info */\n+\t\tif (item->type == RTE_FLOW_ITEM_TYPE_TCP) {\n+\t\t\t/*\n+\t\t\t * Only care about src & dst ports,\n+\t\t\t * others should be masked.\n+\t\t\t */\n+\t\t\tif (!item->mask) {\n+\t\t\t\tmemset(rule, 0, sizeof(struct hinic_fdir_rule));\n+\t\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t\"Not supported by fdir filter,support src,dst ports\");\n+\t\t\t\treturn -rte_errno;\n+\t\t\t}\n+\n+\t\t\ttcp_mask = (const struct rte_flow_item_tcp *)item->mask;\n+\t\t\tif (tcp_mask->hdr.sent_seq ||\n+\t\t\t\ttcp_mask->hdr.recv_ack ||\n+\t\t\t\ttcp_mask->hdr.data_off ||\n+\t\t\t\ttcp_mask->hdr.tcp_flags ||\n+\t\t\t\ttcp_mask->hdr.rx_win ||\n+\t\t\t\ttcp_mask->hdr.cksum ||\n+\t\t\t\ttcp_mask->hdr.tcp_urp) {\n+\t\t\t\tmemset(rule, 0, sizeof(struct hinic_fdir_rule));\n+\t\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\titem, \"Not supported by fdir filter,support tcp\");\n+\t\t\t\treturn -rte_errno;\n+\t\t\t}\n+\n+\t\t\trule->mask.src_port_mask = tcp_mask->hdr.src_port;\n+\t\t\trule->mask.dst_port_mask = tcp_mask->hdr.dst_port;\n+\n+\t\t\tif (item->spec) {\n+\t\t\t\ttcp_spec =\n+\t\t\t\t\t(const struct rte_flow_item_tcp *)\n+\t\t\t\t\titem->spec;\n+\t\t\t\trule->hinic_fdir.src_port =\n+\t\t\t\t\ttcp_spec->hdr.src_port;\n+\t\t\t\trule->hinic_fdir.dst_port =\n+\t\t\t\t\ttcp_spec->hdr.dst_port;\n+\t\t\t}\n+\n+\t\t} else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {\n+\t\t\t/*\n+\t\t\t * Only care about src & dst ports,\n+\t\t\t * others should be masked.\n+\t\t\t */\n+\t\t\tif (!item->mask) {\n+\t\t\t\tmemset(rule, 0, sizeof(struct hinic_fdir_rule));\n+\t\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\titem, \"Not supported by fdir filter,support src,dst ports\");\n+\t\t\t\treturn -rte_errno;\n+\t\t\t}\n+\n+\t\t\tudp_mask = (const struct rte_flow_item_udp *)item->mask;\n+\t\t\tif (udp_mask->hdr.dgram_len ||\n+\t\t\t    udp_mask->hdr.dgram_cksum) {\n+\t\t\t\tmemset(rule, 0, sizeof(struct hinic_fdir_rule));\n+\t\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\titem, \"Not supported by fdir filter,support udp\");\n+\t\t\t\treturn -rte_errno;\n+\t\t\t}\n+\t\t\trule->mask.src_port_mask = udp_mask->hdr.src_port;\n+\t\t\trule->mask.dst_port_mask = udp_mask->hdr.dst_port;\n+\n+\t\t\tif (item->spec) {\n+\t\t\t\tudp_spec =\n+\t\t\t\t\t(const struct rte_flow_item_udp *)\n+\t\t\t\t\titem->spec;\n+\t\t\t\trule->hinic_fdir.src_port =\n+\t\t\t\t\tudp_spec->hdr.src_port;\n+\t\t\t\trule->hinic_fdir.dst_port =\n+\t\t\t\t\tudp_spec->hdr.dst_port;\n+\t\t\t}\n+\t\t} else {\n+\t\t\tmemset(rule, 0, sizeof(struct hinic_fdir_rule));\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Not supported by fdir filter,support tcp/udp\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\n+\t\t/* Get next no void item */\n+\t\t*in_out_item = next_no_void_pattern(pattern, item);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int hinic_normal_item_check_end(const struct rte_flow_item *item,\n+\t\t\t\t\tstruct hinic_fdir_rule *rule,\n+\t\t\t\t\tstruct rte_flow_error *error)\n+{\n+\t/* Check if the next not void item is END */\n+\tif (item->type != RTE_FLOW_ITEM_TYPE_END) {\n+\t\tmemset(rule, 0, sizeof(struct hinic_fdir_rule));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\titem, \"Not supported by fdir filter,support end\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int hinic_check_normal_item_ele(const struct rte_flow_item *item,\n+\t\t\t\t\tconst struct rte_flow_item pattern[],\n+\t\t\t\t\tstruct hinic_fdir_rule *rule,\n+\t\t\t\t\tstruct rte_flow_error *error)\n+{\n+\tif (hinic_normal_item_check_ether(&item, pattern, error) ||\n+\t\thinic_normal_item_check_ip(&item, pattern, rule, error) ||\n+\t\thinic_normal_item_check_l4(&item, pattern, rule, error) ||\n+\t\thinic_normal_item_check_end(item, rule, error))\n+\t\treturn -rte_errno;\n+\n+\treturn 0;\n+}\n+\n+static int hinic_check_normal_attr_ele(const struct rte_flow_attr *attr,\n+\t\t\t\t\tstruct hinic_fdir_rule *rule,\n+\t\t\t\t\tstruct rte_flow_error *error)\n+{\n+\t/* Must be input direction */\n+\tif (!attr->ingress) {\n+\t\tmemset(rule, 0, sizeof(struct hinic_fdir_rule));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,\n+\t\t\t\t   attr, \"Only support ingress.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* Not supported */\n+\tif (attr->egress) {\n+\t\tmemset(rule, 0, sizeof(struct hinic_fdir_rule));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,\n+\t\t\t\t   attr, \"Not support egress.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* Not supported */\n+\tif (attr->priority) {\n+\t\tmemset(rule, 0, sizeof(struct hinic_fdir_rule));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,\n+\t\t\tattr, \"Not support priority.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int hinic_check_normal_act_ele(const struct rte_flow_item *item,\n+\t\t\t\tconst struct rte_flow_action actions[],\n+\t\t\t\tstruct hinic_fdir_rule *rule,\n+\t\t\t\tstruct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_action *act;\n+\n+\t/* Check if the first not void action is QUEUE */\n+\tact = next_no_void_action(actions, NULL);\n+\tif (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {\n+\t\tmemset(rule, 0, sizeof(struct hinic_fdir_rule));\n+\t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\titem, \"Not supported action.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\trule->queue = ((const struct rte_flow_action_queue *)act->conf)->index;\n+\n+\t/* Check if the next not void item is END */\n+\tact = next_no_void_action(actions, act);\n+\tif (act->type != RTE_FLOW_ACTION_TYPE_END) {\n+\t\tmemset(rule, 0, sizeof(struct hinic_fdir_rule));\n+\t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\tact, \"Not supported action.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * Parse the rule to see if it is a IP or MAC VLAN flow director rule.\n+ * And get the flow director filter info BTW.\n+ * UDP/TCP/SCTP PATTERN:\n+ * The first not void item can be ETH or IPV4 or IPV6\n+ * The second not void item must be IPV4 or IPV6 if the first one is ETH.\n+ * The next not void item could be UDP or TCP(optional)\n+ * The next not void item must be END.\n+ * ACTION:\n+ * The first not void action should be QUEUE.\n+ * The second not void optional action should be MARK,\n+ * mark_id is a uint32_t number.\n+ * The next not void action should be END.\n+ * UDP/TCP pattern example:\n+ * ITEM          Spec\t                                    Mask\n+ * ETH            NULL                                    NULL\n+ * IPV4           src_addr  1.2.3.6                 0xFFFFFFFF\n+ *                   dst_addr  1.2.3.5                 0xFFFFFFFF\n+ * UDP/TCP    src_port  80                         0xFFFF\n+ *                   dst_port  80                         0xFFFF\n+ * END\n+ * Other members in mask and spec should set to 0x00.\n+ * Item->last should be NULL.\n+ */\n+static int\n+hinic_parse_fdir_filter_normal(const struct rte_flow_attr *attr,\n+\t\t\t       const struct rte_flow_item pattern[],\n+\t\t\t       const struct rte_flow_action actions[],\n+\t\t\t       struct hinic_fdir_rule *rule,\n+\t\t\t       struct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_item *item = NULL;\n+\n+\tif (hinic_check_filter_arg(attr, pattern, actions, error))\n+\t\treturn -rte_errno;\n+\n+\tif (hinic_check_normal_item_ele(item, pattern, rule, error))\n+\t\treturn -rte_errno;\n+\n+\tif (hinic_check_normal_attr_ele(attr, rule, error))\n+\t\treturn -rte_errno;\n+\n+\tif (hinic_check_normal_act_ele(item, actions, rule, error))\n+\t\treturn -rte_errno;\n+\n+\treturn 0;\n+}\n+\n+static int\n+hinic_parse_fdir_filter(struct rte_eth_dev *dev,\n+\t\t\tconst struct rte_flow_attr *attr,\n+\t\t\tconst struct rte_flow_item pattern[],\n+\t\t\tconst struct rte_flow_action actions[],\n+\t\t\tstruct hinic_fdir_rule *rule,\n+\t\t\tstruct rte_flow_error *error)\n+{\n+\tint ret;\n+\n+\tret = hinic_parse_fdir_filter_normal(attr, pattern,\n+\t\t\t\t\t\tactions, rule, error);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tif (rule->queue >= dev->data->nb_rx_queues)\n+\t\treturn -ENOTSUP;\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * Check if the flow rule is supported by nic.\n+ * It only checkes the format. Don't guarantee the rule can be programmed into\n+ * the HW. Because there can be no enough room for the rule.\n+ */\n+static int hinic_flow_validate(struct rte_eth_dev *dev,\n+\t\t\t\tconst struct rte_flow_attr *attr,\n+\t\t\t\tconst struct rte_flow_item pattern[],\n+\t\t\t\tconst struct rte_flow_action actions[],\n+\t\t\t\tstruct rte_flow_error *error)\n+{\n+\tstruct rte_eth_ethertype_filter ethertype_filter;\n+\tstruct rte_eth_ntuple_filter ntuple_filter;\n+\tstruct hinic_fdir_rule fdir_rule;\n+\tint ret;\n+\n+\tmemset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));\n+\tret = hinic_parse_ntuple_filter(dev, attr, pattern,\n+\t\t\t\tactions, &ntuple_filter, error);\n+\tif (!ret)\n+\t\treturn 0;\n+\n+\tmemset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));\n+\tret = hinic_parse_ethertype_filter(dev, attr, pattern,\n+\t\t\t\tactions, &ethertype_filter, error);\n+\n+\tif (!ret)\n+\t\treturn 0;\n+\n+\tmemset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));\n+\tret = hinic_parse_fdir_filter(dev, attr, pattern,\n+\t\t\t\tactions, &fdir_rule, error);\n+\n+\treturn ret;\n+}\n+\n+const struct rte_flow_ops hinic_flow_ops = {\n+\t.validate = hinic_flow_validate,\n+};\ndiff --git a/drivers/net/hinic/meson.build b/drivers/net/hinic/meson.build\nindex 87c8d16..bc7e246 100644\n--- a/drivers/net/hinic/meson.build\n+++ b/drivers/net/hinic/meson.build\n@@ -8,6 +8,7 @@ sources = files(\n \t'hinic_pmd_ethdev.c',\n \t'hinic_pmd_rx.c',\n \t'hinic_pmd_tx.c',\n+ \t'hinic_pmd_flow.c',\n \t)\n \n includes += include_directories('base')\n",
    "prefixes": [
        "v2",
        "08/17"
    ]
}