get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/83951/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 83951,
    "url": "http://patches.dpdk.org/api/patches/83951/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20201111064936.768604-4-jiawenwu@trustnetic.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20201111064936.768604-4-jiawenwu@trustnetic.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20201111064936.768604-4-jiawenwu@trustnetic.com",
    "date": "2020-11-11T06:49:02",
    "name": "[v2,03/37] net/txgbe: add ntuple parse rule",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "a87f0c1e2643c1b890b1cdb6a101faaf40955464",
    "submitter": {
        "id": 1932,
        "url": "http://patches.dpdk.org/api/people/1932/?format=api",
        "name": "Jiawen Wu",
        "email": "jiawenwu@trustnetic.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20201111064936.768604-4-jiawenwu@trustnetic.com/mbox/",
    "series": [
        {
            "id": 13798,
            "url": "http://patches.dpdk.org/api/series/13798/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=13798",
            "date": "2020-11-11T06:49:00",
            "name": "net: add txgbe PMD part 2",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/13798/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/83951/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/83951/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 61CDCA09D2;\n\tWed, 11 Nov 2020 07:50:24 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id C2A066A1C;\n\tWed, 11 Nov 2020 07:47:45 +0100 (CET)",
            "from smtpbguseast2.qq.com (smtpbguseast2.qq.com [54.204.34.130])\n by dpdk.org (Postfix) with ESMTP id BFE8C5AB7\n for <dev@dpdk.org>; Wed, 11 Nov 2020 07:47:38 +0100 (CET)",
            "from localhost.localdomain.com (unknown [183.129.236.74])\n by esmtp10.qq.com (ESMTP) with\n id ; Wed, 11 Nov 2020 14:47:24 +0800 (CST)"
        ],
        "X-QQ-mid": "bizesmtp27t1605077244tix3ui5u",
        "X-QQ-SSF": "01400000000000C0C000B00A0000000",
        "X-QQ-FEAT": "uZ1irgsSB1m6CFdAoquvnre+TaMEWBbLpHh5XI8+sRMSGncr+n6o5zl6A1t1x\n 8OZV9m2r3qykDn5P+g2q+0GJBIToQ9UT4e/EED877PZMjSukhCVSPiu6rsxLMsz/RKdfjEU\n gMwUAx+ZQ+aHBT4eOLUezCuvTxMIK/gg1Hn+xoSmdMZ59Ao9srB3v0azLoBxdxyi/kffgF1\n JRRPV8BrSRNacYr/L6N84xAga9erBcQI9FN+c9z9mpalbWhNYEz7Xdkf6yabEqDAkKA4//C\n vPSBNZMzyEYsOdcJc1e3jtc8h7l9I4WYl6/0Bqijej8KR70X8ueDxAg680ePmwdBOQwwsvu\n ijVxx36wcvjS8Bu44ZuJwoPR9lExw==",
        "X-QQ-GoodBg": "2",
        "From": "Jiawen Wu <jiawenwu@trustnetic.com>",
        "To": "dev@dpdk.org",
        "Cc": "Jiawen Wu <jiawenwu@trustnetic.com>",
        "Date": "Wed, 11 Nov 2020 14:49:02 +0800",
        "Message-Id": "<20201111064936.768604-4-jiawenwu@trustnetic.com>",
        "X-Mailer": "git-send-email 2.18.4",
        "In-Reply-To": "<20201111064936.768604-1-jiawenwu@trustnetic.com>",
        "References": "<20201111064936.768604-1-jiawenwu@trustnetic.com>",
        "X-QQ-SENDSIZE": "520",
        "Feedback-ID": "bizesmtp:trustnetic.com:qybgforeign:qybgforeign6",
        "X-QQ-Bgrelay": "1",
        "Subject": "[dpdk-dev] [PATCH v2 03/37] net/txgbe: add ntuple parse rule",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add support to parse flow for ntuple filter.\n\nSigned-off-by: Jiawen Wu <jiawenwu@trustnetic.com>\n---\n drivers/net/txgbe/meson.build  |   1 +\n drivers/net/txgbe/txgbe_flow.c | 536 +++++++++++++++++++++++++++++++++\n 2 files changed, 537 insertions(+)\n create mode 100644 drivers/net/txgbe/txgbe_flow.c",
    "diff": "diff --git a/drivers/net/txgbe/meson.build b/drivers/net/txgbe/meson.build\nindex 345dffaf6..45379175d 100644\n--- a/drivers/net/txgbe/meson.build\n+++ b/drivers/net/txgbe/meson.build\n@@ -6,6 +6,7 @@ objs = [base_objs]\n \n sources = files(\n \t'txgbe_ethdev.c',\n+\t'txgbe_flow.c',\n \t'txgbe_ptypes.c',\n \t'txgbe_pf.c',\n \t'txgbe_rxtx.c',\ndiff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c\nnew file mode 100644\nindex 000000000..6f8be3b7f\n--- /dev/null\n+++ b/drivers/net/txgbe/txgbe_flow.c\n@@ -0,0 +1,536 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2015-2020\n+ */\n+\n+#include <sys/queue.h>\n+#include <stdio.h>\n+#include <errno.h>\n+#include <stdint.h>\n+#include <string.h>\n+#include <unistd.h>\n+#include <stdarg.h>\n+#include <inttypes.h>\n+#include <netinet/in.h>\n+#include <rte_byteorder.h>\n+#include <rte_common.h>\n+#include <rte_cycles.h>\n+\n+#include <rte_flow.h>\n+#include <rte_flow_driver.h>\n+\n+#include \"txgbe_logs.h\"\n+#include \"base/txgbe.h\"\n+#include \"txgbe_ethdev.h\"\n+#include \"txgbe_rxtx.h\"\n+\n+#define TXGBE_MIN_N_TUPLE_PRIO 1\n+#define TXGBE_MAX_N_TUPLE_PRIO 7\n+\n+/**\n+ * Endless loop will never happen with below assumption\n+ * 1. there is at least one no-void item(END)\n+ * 2. cur is before END.\n+ */\n+static inline\n+const struct rte_flow_item *next_no_void_pattern(\n+\t\tconst struct rte_flow_item pattern[],\n+\t\tconst struct rte_flow_item *cur)\n+{\n+\tconst struct rte_flow_item *next =\n+\t\tcur ? cur + 1 : &pattern[0];\n+\twhile (1) {\n+\t\tif (next->type != RTE_FLOW_ITEM_TYPE_VOID)\n+\t\t\treturn next;\n+\t\tnext++;\n+\t}\n+}\n+\n+static inline\n+const struct rte_flow_action *next_no_void_action(\n+\t\tconst struct rte_flow_action actions[],\n+\t\tconst struct rte_flow_action *cur)\n+{\n+\tconst struct rte_flow_action *next =\n+\t\tcur ? cur + 1 : &actions[0];\n+\twhile (1) {\n+\t\tif (next->type != RTE_FLOW_ACTION_TYPE_VOID)\n+\t\t\treturn next;\n+\t\tnext++;\n+\t}\n+}\n+\n+/**\n+ * Please aware there's an asumption for all the parsers.\n+ * rte_flow_item is using big endian, rte_flow_attr and\n+ * rte_flow_action are using CPU order.\n+ * Because the pattern is used to describe the packets,\n+ * normally the packets should use network order.\n+ */\n+\n+/**\n+ * Parse the rule to see if it is a n-tuple rule.\n+ * And get the n-tuple filter info BTW.\n+ * pattern:\n+ * The first not void item can be ETH or IPV4.\n+ * The second not void item must be IPV4 if the first one is ETH.\n+ * The third not void item must be UDP or TCP.\n+ * The next not void item must be END.\n+ * action:\n+ * The first not void action should be QUEUE.\n+ * The next not void action should be END.\n+ * pattern example:\n+ * ITEM\t\tSpec\t\t\tMask\n+ * ETH\t\tNULL\t\t\tNULL\n+ * IPV4\t\tsrc_addr 192.168.1.20\t0xFFFFFFFF\n+ *\t\tdst_addr 192.167.3.50\t0xFFFFFFFF\n+ *\t\tnext_proto_id\t17\t0xFF\n+ * UDP/TCP/\tsrc_port\t80\t0xFFFF\n+ * SCTP\t\tdst_port\t80\t0xFFFF\n+ * END\n+ * other members in mask and spec should set to 0x00.\n+ * item->last should be NULL.\n+ */\n+static int\n+cons_parse_ntuple_filter(const struct rte_flow_attr *attr,\n+\t\t\t const struct rte_flow_item pattern[],\n+\t\t\t const struct rte_flow_action actions[],\n+\t\t\t struct rte_eth_ntuple_filter *filter,\n+\t\t\t struct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_item *item;\n+\tconst struct rte_flow_action *act;\n+\tconst struct rte_flow_item_ipv4 *ipv4_spec;\n+\tconst struct rte_flow_item_ipv4 *ipv4_mask;\n+\tconst struct rte_flow_item_tcp *tcp_spec;\n+\tconst struct rte_flow_item_tcp *tcp_mask;\n+\tconst struct rte_flow_item_udp *udp_spec;\n+\tconst struct rte_flow_item_udp *udp_mask;\n+\tconst struct rte_flow_item_sctp *sctp_spec;\n+\tconst struct rte_flow_item_sctp *sctp_mask;\n+\tconst struct rte_flow_item_eth *eth_spec;\n+\tconst struct rte_flow_item_eth *eth_mask;\n+\tconst struct rte_flow_item_vlan *vlan_spec;\n+\tconst struct rte_flow_item_vlan *vlan_mask;\n+\tstruct rte_flow_item_eth eth_null;\n+\tstruct rte_flow_item_vlan vlan_null;\n+\n+\tif (!pattern) {\n+\t\trte_flow_error_set(error,\n+\t\t\tEINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,\n+\t\t\tNULL, \"NULL pattern.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (!actions) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION_NUM,\n+\t\t\t\t   NULL, \"NULL action.\");\n+\t\treturn -rte_errno;\n+\t}\n+\tif (!attr) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR,\n+\t\t\t\t   NULL, \"NULL attribute.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tmemset(&eth_null, 0, sizeof(struct rte_flow_item_eth));\n+\tmemset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));\n+\n+\t/* the first not void item can be MAC or IPv4 */\n+\titem = next_no_void_pattern(pattern, NULL);\n+\n+\tif (item->type != RTE_FLOW_ITEM_TYPE_ETH &&\n+\t    item->type != RTE_FLOW_ITEM_TYPE_IPV4) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\titem, \"Not supported by ntuple filter\");\n+\t\treturn -rte_errno;\n+\t}\n+\t/* Skip Ethernet */\n+\tif (item->type == RTE_FLOW_ITEM_TYPE_ETH) {\n+\t\teth_spec = item->spec;\n+\t\teth_mask = item->mask;\n+\t\t/*Not supported last point for range*/\n+\t\tif (item->last) {\n+\t\t\trte_flow_error_set(error,\n+\t\t\t  EINVAL,\n+\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t  item, \"Not supported last point for range\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t\t/* if the first item is MAC, the content should be NULL */\n+\t\tif ((item->spec || item->mask) &&\n+\t\t\t(memcmp(eth_spec, &eth_null,\n+\t\t\t\tsizeof(struct rte_flow_item_eth)) ||\n+\t\t\t memcmp(eth_mask, &eth_null,\n+\t\t\t\tsizeof(struct rte_flow_item_eth)))) {\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Not supported by ntuple filter\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t\t/* check if the next not void item is IPv4 or Vlan */\n+\t\titem = next_no_void_pattern(pattern, item);\n+\t\tif (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&\n+\t\t\titem->type != RTE_FLOW_ITEM_TYPE_VLAN) {\n+\t\t\trte_flow_error_set(error,\n+\t\t\t\tEINVAL, RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Not supported by ntuple filter\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t}\n+\n+\tif (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {\n+\t\tvlan_spec = item->spec;\n+\t\tvlan_mask = item->mask;\n+\t\t/*Not supported last point for range*/\n+\t\tif (item->last) {\n+\t\t\trte_flow_error_set(error,\n+\t\t\t\tEINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\titem, \"Not supported last point for range\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t\t/* the content should be NULL */\n+\t\tif ((item->spec || item->mask) &&\n+\t\t\t(memcmp(vlan_spec, &vlan_null,\n+\t\t\t\tsizeof(struct rte_flow_item_vlan)) ||\n+\t\t\t memcmp(vlan_mask, &vlan_null,\n+\t\t\t\tsizeof(struct rte_flow_item_vlan)))) {\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Not supported by ntuple filter\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t\t/* check if the next not void item is IPv4 */\n+\t\titem = next_no_void_pattern(pattern, item);\n+\t\tif (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {\n+\t\t\trte_flow_error_set(error,\n+\t\t\t  EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t  item, \"Not supported by ntuple filter\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t}\n+\n+\tif (item->mask) {\n+\t\t/* get the IPv4 info */\n+\t\tif (!item->spec || !item->mask) {\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Invalid ntuple mask\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t\t/*Not supported last point for range*/\n+\t\tif (item->last) {\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\titem, \"Not supported last point for range\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\n+\t\tipv4_mask = item->mask;\n+\t\t/**\n+\t\t * Only support src & dst addresses, protocol,\n+\t\t * others should be masked.\n+\t\t */\n+\t\tif (ipv4_mask->hdr.version_ihl ||\n+\t\t    ipv4_mask->hdr.type_of_service ||\n+\t\t    ipv4_mask->hdr.total_length ||\n+\t\t    ipv4_mask->hdr.packet_id ||\n+\t\t    ipv4_mask->hdr.fragment_offset ||\n+\t\t    ipv4_mask->hdr.time_to_live ||\n+\t\t    ipv4_mask->hdr.hdr_checksum) {\n+\t\t\trte_flow_error_set(error,\n+\t\t\t\tEINVAL, RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Not supported by ntuple filter\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t\tif ((ipv4_mask->hdr.src_addr != 0 &&\n+\t\t\tipv4_mask->hdr.src_addr != UINT32_MAX) ||\n+\t\t\t(ipv4_mask->hdr.dst_addr != 0 &&\n+\t\t\tipv4_mask->hdr.dst_addr != UINT32_MAX) ||\n+\t\t\t(ipv4_mask->hdr.next_proto_id != UINT8_MAX &&\n+\t\t\tipv4_mask->hdr.next_proto_id != 0)) {\n+\t\t\trte_flow_error_set(error,\n+\t\t\t\tEINVAL, RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Not supported by ntuple filter\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\n+\t\tfilter->dst_ip_mask = ipv4_mask->hdr.dst_addr;\n+\t\tfilter->src_ip_mask = ipv4_mask->hdr.src_addr;\n+\t\tfilter->proto_mask  = ipv4_mask->hdr.next_proto_id;\n+\n+\t\tipv4_spec = item->spec;\n+\t\tfilter->dst_ip = ipv4_spec->hdr.dst_addr;\n+\t\tfilter->src_ip = ipv4_spec->hdr.src_addr;\n+\t\tfilter->proto  = ipv4_spec->hdr.next_proto_id;\n+\t}\n+\n+\t/* check if the next not void item is TCP or UDP */\n+\titem = next_no_void_pattern(pattern, item);\n+\tif (item->type != RTE_FLOW_ITEM_TYPE_TCP &&\n+\t    item->type != RTE_FLOW_ITEM_TYPE_UDP &&\n+\t    item->type != RTE_FLOW_ITEM_TYPE_SCTP &&\n+\t    item->type != RTE_FLOW_ITEM_TYPE_END) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_ntuple_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\titem, \"Not supported by ntuple filter\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (item->type != RTE_FLOW_ITEM_TYPE_END &&\n+\t\t(!item->spec && !item->mask)) {\n+\t\tgoto action;\n+\t}\n+\n+\t/* get the TCP/UDP/SCTP info */\n+\tif (item->type != RTE_FLOW_ITEM_TYPE_END &&\n+\t\t(!item->spec || !item->mask)) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_ntuple_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\titem, \"Invalid ntuple mask\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/*Not supported last point for range*/\n+\tif (item->last) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_ntuple_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\titem, \"Not supported last point for range\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (item->type == RTE_FLOW_ITEM_TYPE_TCP) {\n+\t\ttcp_mask = item->mask;\n+\n+\t\t/**\n+\t\t * Only support src & dst ports, tcp flags,\n+\t\t * others should be masked.\n+\t\t */\n+\t\tif (tcp_mask->hdr.sent_seq ||\n+\t\t    tcp_mask->hdr.recv_ack ||\n+\t\t    tcp_mask->hdr.data_off ||\n+\t\t    tcp_mask->hdr.rx_win ||\n+\t\t    tcp_mask->hdr.cksum ||\n+\t\t    tcp_mask->hdr.tcp_urp) {\n+\t\t\tmemset(filter, 0,\n+\t\t\t\tsizeof(struct rte_eth_ntuple_filter));\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Not supported by ntuple filter\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t\tif ((tcp_mask->hdr.src_port != 0 &&\n+\t\t\ttcp_mask->hdr.src_port != UINT16_MAX) ||\n+\t\t\t(tcp_mask->hdr.dst_port != 0 &&\n+\t\t\ttcp_mask->hdr.dst_port != UINT16_MAX)) {\n+\t\t\trte_flow_error_set(error,\n+\t\t\t\tEINVAL, RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Not supported by ntuple filter\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\n+\t\tfilter->dst_port_mask  = tcp_mask->hdr.dst_port;\n+\t\tfilter->src_port_mask  = tcp_mask->hdr.src_port;\n+\t\tif (tcp_mask->hdr.tcp_flags == 0xFF) {\n+\t\t\tfilter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;\n+\t\t} else if (!tcp_mask->hdr.tcp_flags) {\n+\t\t\tfilter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;\n+\t\t} else {\n+\t\t\tmemset(filter, 0, sizeof(struct rte_eth_ntuple_filter));\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Not supported by ntuple filter\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\n+\t\ttcp_spec = item->spec;\n+\t\tfilter->dst_port  = tcp_spec->hdr.dst_port;\n+\t\tfilter->src_port  = tcp_spec->hdr.src_port;\n+\t\tfilter->tcp_flags = tcp_spec->hdr.tcp_flags;\n+\t} else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {\n+\t\tudp_mask = item->mask;\n+\n+\t\t/**\n+\t\t * Only support src & dst ports,\n+\t\t * others should be masked.\n+\t\t */\n+\t\tif (udp_mask->hdr.dgram_len ||\n+\t\t    udp_mask->hdr.dgram_cksum) {\n+\t\t\tmemset(filter, 0,\n+\t\t\t\tsizeof(struct rte_eth_ntuple_filter));\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Not supported by ntuple filter\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t\tif ((udp_mask->hdr.src_port != 0 &&\n+\t\t\tudp_mask->hdr.src_port != UINT16_MAX) ||\n+\t\t\t(udp_mask->hdr.dst_port != 0 &&\n+\t\t\tudp_mask->hdr.dst_port != UINT16_MAX)) {\n+\t\t\trte_flow_error_set(error,\n+\t\t\t\tEINVAL, RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Not supported by ntuple filter\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\n+\t\tfilter->dst_port_mask = udp_mask->hdr.dst_port;\n+\t\tfilter->src_port_mask = udp_mask->hdr.src_port;\n+\n+\t\tudp_spec = item->spec;\n+\t\tfilter->dst_port = udp_spec->hdr.dst_port;\n+\t\tfilter->src_port = udp_spec->hdr.src_port;\n+\t} else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {\n+\t\tsctp_mask = item->mask;\n+\n+\t\t/**\n+\t\t * Only support src & dst ports,\n+\t\t * others should be masked.\n+\t\t */\n+\t\tif (sctp_mask->hdr.tag ||\n+\t\t    sctp_mask->hdr.cksum) {\n+\t\t\tmemset(filter, 0,\n+\t\t\t\tsizeof(struct rte_eth_ntuple_filter));\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Not supported by ntuple filter\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\n+\t\tfilter->dst_port_mask = sctp_mask->hdr.dst_port;\n+\t\tfilter->src_port_mask = sctp_mask->hdr.src_port;\n+\n+\t\tsctp_spec = item->spec;\n+\t\tfilter->dst_port = sctp_spec->hdr.dst_port;\n+\t\tfilter->src_port = sctp_spec->hdr.src_port;\n+\t} else {\n+\t\tgoto action;\n+\t}\n+\n+\t/* check if the next not void item is END */\n+\titem = next_no_void_pattern(pattern, item);\n+\tif (item->type != RTE_FLOW_ITEM_TYPE_END) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_ntuple_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\titem, \"Not supported by ntuple filter\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+action:\n+\n+\t/**\n+\t * n-tuple only supports forwarding,\n+\t * check if the first not void action is QUEUE.\n+\t */\n+\tact = next_no_void_action(actions, NULL);\n+\tif (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_ntuple_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\titem, \"Not supported action.\");\n+\t\treturn -rte_errno;\n+\t}\n+\tfilter->queue =\n+\t\t((const struct rte_flow_action_queue *)act->conf)->index;\n+\n+\t/* check if the next not void item is END */\n+\tact = next_no_void_action(actions, act);\n+\tif (act->type != RTE_FLOW_ACTION_TYPE_END) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_ntuple_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\tact, \"Not supported action.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* parse attr */\n+\t/* must be input direction */\n+\tif (!attr->ingress) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_ntuple_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,\n+\t\t\t\t   attr, \"Only support ingress.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* not supported */\n+\tif (attr->egress) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_ntuple_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,\n+\t\t\t\t   attr, \"Not support egress.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* not supported */\n+\tif (attr->transfer) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_ntuple_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,\n+\t\t\t\t   attr, \"No support for transfer.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (attr->priority > 0xFFFF) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_ntuple_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,\n+\t\t\t\t   attr, \"Error priority.\");\n+\t\treturn -rte_errno;\n+\t}\n+\tfilter->priority = (uint16_t)attr->priority;\n+\tif (attr->priority < TXGBE_MIN_N_TUPLE_PRIO ||\n+\t\tattr->priority > TXGBE_MAX_N_TUPLE_PRIO)\n+\t\tfilter->priority = 1;\n+\n+\treturn 0;\n+}\n+\n+/* a specific function for txgbe because the flags is specific */\n+static int\n+txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,\n+\t\t\t  const struct rte_flow_attr *attr,\n+\t\t\t  const struct rte_flow_item pattern[],\n+\t\t\t  const struct rte_flow_action actions[],\n+\t\t\t  struct rte_eth_ntuple_filter *filter,\n+\t\t\t  struct rte_flow_error *error)\n+{\n+\tint ret;\n+\n+\tret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);\n+\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t/* txgbe doesn't support tcp flags */\n+\tif (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_ntuple_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t   NULL, \"Not supported by ntuple filter\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* txgbe doesn't support many priorities */\n+\tif (filter->priority < TXGBE_MIN_N_TUPLE_PRIO ||\n+\t    filter->priority > TXGBE_MAX_N_TUPLE_PRIO) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_ntuple_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\tNULL, \"Priority not supported by ntuple filter\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (filter->queue >= dev->data->nb_rx_queues)\n+\t\treturn -rte_errno;\n+\n+\t/* fixed value for txgbe */\n+\tfilter->flags = RTE_5TUPLE_FLAGS;\n+\treturn 0;\n+}\n+\n",
    "prefixes": [
        "v2",
        "03/37"
    ]
}