get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/79363/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 79363,
    "url": "https://patches.dpdk.org/api/patches/79363/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/cee4f1b6633c0a63efa88e7fcaf3120b83cc7ee5.1601474841.git.dekelp@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<cee4f1b6633c0a63efa88e7fcaf3120b83cc7ee5.1601474841.git.dekelp@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/cee4f1b6633c0a63efa88e7fcaf3120b83cc7ee5.1601474841.git.dekelp@nvidia.com",
    "date": "2020-09-30T14:10:15",
    "name": "[07/10] net/mlx5: support match on IPv4 fragment packets",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "f8106d87563db26760881148953a3887fef5f565",
    "submitter": {
        "id": 1897,
        "url": "https://patches.dpdk.org/api/people/1897/?format=api",
        "name": "Dekel Peled",
        "email": "dekelp@nvidia.com"
    },
    "delegate": {
        "id": 319,
        "url": "https://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/cee4f1b6633c0a63efa88e7fcaf3120b83cc7ee5.1601474841.git.dekelp@nvidia.com/mbox/",
    "series": [
        {
            "id": 12622,
            "url": "https://patches.dpdk.org/api/series/12622/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=12622",
            "date": "2020-09-30T14:10:08",
            "name": "support match on L3 fragmented packets",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/12622/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/79363/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/79363/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 2524CA04B5;\n\tWed, 30 Sep 2020 16:12:59 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id CBCE01DB73;\n\tWed, 30 Sep 2020 16:11:02 +0200 (CEST)",
            "from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129])\n by dpdk.org (Postfix) with ESMTP id CD58A1DAFF\n for <dev@dpdk.org>; Wed, 30 Sep 2020 16:10:55 +0200 (CEST)",
            "from Internal Mail-Server by MTLPINE1 (envelope-from\n dekelp@nvidia.com) with SMTP; 30 Sep 2020 17:10:53 +0300",
            "from mtl-vdi-280.wap.labs.mlnx. (mtl-vdi-280.wap.labs.mlnx\n [10.228.134.250])\n by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 08UEAWtk023904;\n Wed, 30 Sep 2020 17:10:53 +0300"
        ],
        "From": "Dekel Peled <dekelp@nvidia.com>",
        "To": "orika@nvidia.com, thomas@monjalon.net, ferruh.yigit@intel.com,\n arybchenko@solarflare.com, konstantin.ananyev@intel.com,\n olivier.matz@6wind.com, wenzhuo.lu@intel.com, beilei.xing@intel.com,\n bernard.iremonger@intel.com, matan@nvidia.com, shahafs@nvidia.com,\n viacheslavo@nvidia.com",
        "Cc": "dev@dpdk.org",
        "Date": "Wed, 30 Sep 2020 17:10:15 +0300",
        "Message-Id": "\n <cee4f1b6633c0a63efa88e7fcaf3120b83cc7ee5.1601474841.git.dekelp@nvidia.com>",
        "X-Mailer": "git-send-email 1.7.1",
        "In-Reply-To": "<cover.1601474841.git.dekelp@nvidia.com>",
        "References": "<cover.1601474841.git.dekelp@nvidia.com>",
        "Subject": "[dpdk-dev] [PATCH 07/10] net/mlx5: support match on IPv4 fragment\n\tpackets",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch adds to MLX5 PMD the support of matching on IPv4\nfragmented and non-fragmented packets, using the IPv4 header\nfragment_offset field.\n\nSigned-off-by: Dekel Peled <dekelp@nvidia.com>\n---\n drivers/net/mlx5/mlx5_flow.c       |  48 ++++++++----\n drivers/net/mlx5/mlx5_flow.h       |  10 +++\n drivers/net/mlx5/mlx5_flow_dv.c    | 156 +++++++++++++++++++++++++++++++------\n drivers/net/mlx5/mlx5_flow_verbs.c |   9 ++-\n 4 files changed, 178 insertions(+), 45 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex ffa7646..906741f 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -474,6 +474,8 @@ struct mlx5_flow_tunnel_info {\n  *   Bit-masks covering supported fields by the NIC to compare with user mask.\n  * @param[in] size\n  *   Bit-masks size in bytes.\n+ * @param[in] range_accepted\n+ *   True if range of values is accepted for specific fields, false otherwise.\n  * @param[out] error\n  *   Pointer to error structure.\n  *\n@@ -485,6 +487,7 @@ struct mlx5_flow_tunnel_info {\n \t\t\t  const uint8_t *mask,\n \t\t\t  const uint8_t *nic_mask,\n \t\t\t  unsigned int size,\n+\t\t\t  bool range_accepted,\n \t\t\t  struct rte_flow_error *error)\n {\n \tunsigned int i;\n@@ -502,7 +505,7 @@ struct mlx5_flow_tunnel_info {\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n \t\t\t\t\t  \"mask/last without a spec is not\"\n \t\t\t\t\t  \" supported\");\n-\tif (item->spec && item->last) {\n+\tif (item->spec && item->last && !range_accepted) {\n \t\tuint8_t spec[size];\n \t\tuint8_t last[size];\n \t\tunsigned int i;\n@@ -1277,7 +1280,8 @@ struct mlx5_flow_tunnel_info {\n \tret = mlx5_flow_item_acceptable\n \t\t(item, (const uint8_t *)mask,\n \t\t (const uint8_t *)&rte_flow_item_icmp6_mask,\n-\t\t sizeof(struct rte_flow_item_icmp6), error);\n+\t\t sizeof(struct rte_flow_item_icmp6),\n+\t\t MLX5_ITEM_RANGE_NOT_ACCEPTED, error);\n \tif (ret < 0)\n \t\treturn ret;\n \treturn 0;\n@@ -1329,7 +1333,8 @@ struct mlx5_flow_tunnel_info {\n \tret = mlx5_flow_item_acceptable\n \t\t(item, (const uint8_t *)mask,\n \t\t (const uint8_t *)&rte_flow_item_icmp_mask,\n-\t\t sizeof(struct rte_flow_item_icmp), error);\n+\t\t sizeof(struct rte_flow_item_icmp),\n+\t\t MLX5_ITEM_RANGE_NOT_ACCEPTED, error);\n \tif (ret < 0)\n \t\treturn ret;\n \treturn 0;\n@@ -1384,7 +1389,7 @@ struct mlx5_flow_tunnel_info {\n \tret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,\n \t\t\t\t\t(const uint8_t *)&nic_mask,\n \t\t\t\t\tsizeof(struct rte_flow_item_eth),\n-\t\t\t\t\terror);\n+\t\t\t\t\tMLX5_ITEM_RANGE_NOT_ACCEPTED, error);\n \treturn ret;\n }\n \n@@ -1438,7 +1443,7 @@ struct mlx5_flow_tunnel_info {\n \tret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,\n \t\t\t\t\t(const uint8_t *)&nic_mask,\n \t\t\t\t\tsizeof(struct rte_flow_item_vlan),\n-\t\t\t\t\terror);\n+\t\t\t\t\tMLX5_ITEM_RANGE_NOT_ACCEPTED, error);\n \tif (ret)\n \t\treturn ret;\n \tif (!tunnel && mask->tci != RTE_BE16(0x0fff)) {\n@@ -1502,6 +1507,7 @@ struct mlx5_flow_tunnel_info {\n \t\t\t     uint64_t last_item,\n \t\t\t     uint16_t ether_type,\n \t\t\t     const struct rte_flow_item_ipv4 *acc_mask,\n+\t\t\t     bool range_accepted,\n \t\t\t     struct rte_flow_error *error)\n {\n \tconst struct rte_flow_item_ipv4 *mask = item->mask;\n@@ -1572,7 +1578,7 @@ struct mlx5_flow_tunnel_info {\n \t\t\t\t\tacc_mask ? (const uint8_t *)acc_mask\n \t\t\t\t\t\t : (const uint8_t *)&nic_mask,\n \t\t\t\t\tsizeof(struct rte_flow_item_ipv4),\n-\t\t\t\t\terror);\n+\t\t\t\t\trange_accepted, error);\n \tif (ret < 0)\n \t\treturn ret;\n \treturn 0;\n@@ -1592,6 +1598,8 @@ struct mlx5_flow_tunnel_info {\n  * @param[in] acc_mask\n  *   Acceptable mask, if NULL default internal default mask\n  *   will be used to check whether item fields are supported.\n+ * @param[in] range_accepted\n+ *   True if range of values is accepted for specific fields, false otherwise.\n  * @param[out] error\n  *   Pointer to error structure.\n  *\n@@ -1671,7 +1679,7 @@ struct mlx5_flow_tunnel_info {\n \t\t\t\t\tacc_mask ? (const uint8_t *)acc_mask\n \t\t\t\t\t\t : (const uint8_t *)&nic_mask,\n \t\t\t\t\tsizeof(struct rte_flow_item_ipv6),\n-\t\t\t\t\terror);\n+\t\t\t\t\tMLX5_ITEM_RANGE_NOT_ACCEPTED, error);\n \tif (ret < 0)\n \t\treturn ret;\n \treturn 0;\n@@ -1726,7 +1734,8 @@ struct mlx5_flow_tunnel_info {\n \tret = mlx5_flow_item_acceptable\n \t\t(item, (const uint8_t *)mask,\n \t\t (const uint8_t *)&rte_flow_item_udp_mask,\n-\t\t sizeof(struct rte_flow_item_udp), error);\n+\t\t sizeof(struct rte_flow_item_udp), MLX5_ITEM_RANGE_NOT_ACCEPTED,\n+\t\t error);\n \tif (ret < 0)\n \t\treturn ret;\n \treturn 0;\n@@ -1781,7 +1790,8 @@ struct mlx5_flow_tunnel_info {\n \tret = mlx5_flow_item_acceptable\n \t\t(item, (const uint8_t *)mask,\n \t\t (const uint8_t *)flow_mask,\n-\t\t sizeof(struct rte_flow_item_tcp), error);\n+\t\t sizeof(struct rte_flow_item_tcp), MLX5_ITEM_RANGE_NOT_ACCEPTED,\n+\t\t error);\n \tif (ret < 0)\n \t\treturn ret;\n \treturn 0;\n@@ -1835,7 +1845,7 @@ struct mlx5_flow_tunnel_info {\n \t\t(item, (const uint8_t *)mask,\n \t\t (const uint8_t *)&rte_flow_item_vxlan_mask,\n \t\t sizeof(struct rte_flow_item_vxlan),\n-\t\t error);\n+\t\t MLX5_ITEM_RANGE_NOT_ACCEPTED, error);\n \tif (ret < 0)\n \t\treturn ret;\n \tif (spec) {\n@@ -1906,7 +1916,7 @@ struct mlx5_flow_tunnel_info {\n \t\t(item, (const uint8_t *)mask,\n \t\t (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,\n \t\t sizeof(struct rte_flow_item_vxlan_gpe),\n-\t\t error);\n+\t\t MLX5_ITEM_RANGE_NOT_ACCEPTED, error);\n \tif (ret < 0)\n \t\treturn ret;\n \tif (spec) {\n@@ -1980,7 +1990,7 @@ struct mlx5_flow_tunnel_info {\n \tret = mlx5_flow_item_acceptable\n \t\t(item, (const uint8_t *)mask,\n \t\t (const uint8_t *)&gre_key_default_mask,\n-\t\t sizeof(rte_be32_t), error);\n+\t\t sizeof(rte_be32_t), MLX5_ITEM_RANGE_NOT_ACCEPTED, error);\n \treturn ret;\n }\n \n@@ -2032,7 +2042,8 @@ struct mlx5_flow_tunnel_info {\n \tret = mlx5_flow_item_acceptable\n \t\t(item, (const uint8_t *)mask,\n \t\t (const uint8_t *)&nic_mask,\n-\t\t sizeof(struct rte_flow_item_gre), error);\n+\t\t sizeof(struct rte_flow_item_gre), MLX5_ITEM_RANGE_NOT_ACCEPTED,\n+\t\t error);\n \tif (ret < 0)\n \t\treturn ret;\n #ifndef HAVE_MLX5DV_DR\n@@ -2107,7 +2118,8 @@ struct mlx5_flow_tunnel_info {\n \tret = mlx5_flow_item_acceptable\n \t\t\t\t  (item, (const uint8_t *)mask,\n \t\t\t\t   (const uint8_t *)&nic_mask,\n-\t\t\t\t   sizeof(struct rte_flow_item_geneve), error);\n+\t\t\t\t   sizeof(struct rte_flow_item_geneve),\n+\t\t\t\t   MLX5_ITEM_RANGE_NOT_ACCEPTED, error);\n \tif (ret)\n \t\treturn ret;\n \tif (spec) {\n@@ -2190,7 +2202,8 @@ struct mlx5_flow_tunnel_info {\n \tret = mlx5_flow_item_acceptable\n \t\t(item, (const uint8_t *)mask,\n \t\t (const uint8_t *)&rte_flow_item_mpls_mask,\n-\t\t sizeof(struct rte_flow_item_mpls), error);\n+\t\t sizeof(struct rte_flow_item_mpls),\n+\t\t MLX5_ITEM_RANGE_NOT_ACCEPTED, error);\n \tif (ret < 0)\n \t\treturn ret;\n \treturn 0;\n@@ -2245,7 +2258,8 @@ struct mlx5_flow_tunnel_info {\n \tret = mlx5_flow_item_acceptable\n \t\t(item, (const uint8_t *)mask,\n \t\t (const uint8_t *)&rte_flow_item_nvgre_mask,\n-\t\t sizeof(struct rte_flow_item_nvgre), error);\n+\t\t sizeof(struct rte_flow_item_nvgre),\n+\t\t MLX5_ITEM_RANGE_NOT_ACCEPTED, error);\n \tif (ret < 0)\n \t\treturn ret;\n \treturn 0;\n@@ -2339,7 +2353,7 @@ struct mlx5_flow_tunnel_info {\n \t\t\t\t\t acc_mask ? (const uint8_t *)acc_mask\n \t\t\t\t\t\t  : (const uint8_t *)&nic_mask,\n \t\t\t\t\t sizeof(struct rte_flow_item_ecpri),\n-\t\t\t\t\t error);\n+\t\t\t\t\t MLX5_ITEM_RANGE_NOT_ACCEPTED, error);\n }\n \n /* Allocate unique ID for the split Q/RSS subflows. */\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex 279daf2..1e30c93 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -330,6 +330,14 @@ enum mlx5_feature_name {\n #define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_flow_item_eth) + \\\n \t\t\t\t\t  sizeof(struct rte_flow_item_ipv4))\n \n+/* IPv4 fragment_offset field contains relevant data in bits 2 to 15. */\n+#define MLX5_IPV4_FRAG_OFFSET_MASK \\\n+\t\t(RTE_IPV4_HDR_OFFSET_MASK | RTE_IPV4_HDR_MF_FLAG)\n+\n+/* Specific item's fields can accept a range of values (using spec and last). */\n+#define MLX5_ITEM_RANGE_NOT_ACCEPTED\tfalse\n+#define MLX5_ITEM_RANGE_ACCEPTED\ttrue\n+\n /* Software header modify action numbers of a flow. */\n #define MLX5_ACT_NUM_MDF_IPV4\t\t1\n #define MLX5_ACT_NUM_MDF_IPV6\t\t4\n@@ -985,6 +993,7 @@ int mlx5_flow_item_acceptable(const struct rte_flow_item *item,\n \t\t\t      const uint8_t *mask,\n \t\t\t      const uint8_t *nic_mask,\n \t\t\t      unsigned int size,\n+\t\t\t      bool range_accepted,\n \t\t\t      struct rte_flow_error *error);\n int mlx5_flow_validate_item_eth(const struct rte_flow_item *item,\n \t\t\t\tuint64_t item_flags,\n@@ -1002,6 +1011,7 @@ int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,\n \t\t\t\t uint64_t last_item,\n \t\t\t\t uint16_t ether_type,\n \t\t\t\t const struct rte_flow_item_ipv4 *acc_mask,\n+\t\t\t\t bool range_accepted,\n \t\t\t\t struct rte_flow_error *error);\n int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,\n \t\t\t\t uint64_t item_flags,\ndiff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c\nindex 0a0a5a4..3379caf 100644\n--- a/drivers/net/mlx5/mlx5_flow_dv.c\n+++ b/drivers/net/mlx5/mlx5_flow_dv.c\n@@ -1418,7 +1418,7 @@ struct field_modify_info modify_tcp[] = {\n \tret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,\n \t\t\t\t\t(const uint8_t *)&nic_mask,\n \t\t\t\t\tsizeof(struct rte_flow_item_mark),\n-\t\t\t\t\terror);\n+\t\t\t\t\tMLX5_ITEM_RANGE_NOT_ACCEPTED, error);\n \tif (ret < 0)\n \t\treturn ret;\n \treturn 0;\n@@ -1494,7 +1494,7 @@ struct field_modify_info modify_tcp[] = {\n \tret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,\n \t\t\t\t\t(const uint8_t *)&nic_mask,\n \t\t\t\t\tsizeof(struct rte_flow_item_meta),\n-\t\t\t\t\terror);\n+\t\t\t\t\tMLX5_ITEM_RANGE_NOT_ACCEPTED, error);\n \treturn ret;\n }\n \n@@ -1547,7 +1547,7 @@ struct field_modify_info modify_tcp[] = {\n \tret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,\n \t\t\t\t\t(const uint8_t *)&nic_mask,\n \t\t\t\t\tsizeof(struct rte_flow_item_tag),\n-\t\t\t\t\terror);\n+\t\t\t\t\tMLX5_ITEM_RANGE_NOT_ACCEPTED, error);\n \tif (ret < 0)\n \t\treturn ret;\n \tif (mask->index != 0xff)\n@@ -1618,7 +1618,7 @@ struct field_modify_info modify_tcp[] = {\n \t\t\t\t(item, (const uint8_t *)mask,\n \t\t\t\t (const uint8_t *)&rte_flow_item_port_id_mask,\n \t\t\t\t sizeof(struct rte_flow_item_port_id),\n-\t\t\t\t error);\n+\t\t\t\t MLX5_ITEM_RANGE_NOT_ACCEPTED, error);\n \tif (ret)\n \t\treturn ret;\n \tif (!spec)\n@@ -1691,7 +1691,7 @@ struct field_modify_info modify_tcp[] = {\n \tret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,\n \t\t\t\t\t(const uint8_t *)&nic_mask,\n \t\t\t\t\tsizeof(struct rte_flow_item_vlan),\n-\t\t\t\t\terror);\n+\t\t\t\t\tMLX5_ITEM_RANGE_NOT_ACCEPTED, error);\n \tif (ret)\n \t\treturn ret;\n \tif (!tunnel && mask->tci != RTE_BE16(0x0fff)) {\n@@ -1778,11 +1778,126 @@ struct field_modify_info modify_tcp[] = {\n \t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n \t\t\t\t\t  \"Match is supported for GTP\"\n \t\t\t\t\t  \" flags only\");\n-\treturn mlx5_flow_item_acceptable\n-\t\t(item, (const uint8_t *)mask,\n-\t\t (const uint8_t *)&nic_mask,\n-\t\t sizeof(struct rte_flow_item_gtp),\n-\t\t error);\n+\treturn mlx5_flow_item_acceptable(item, (const uint8_t *)mask,\n+\t\t\t\t\t (const uint8_t *)&nic_mask,\n+\t\t\t\t\t sizeof(struct rte_flow_item_gtp),\n+\t\t\t\t\t MLX5_ITEM_RANGE_NOT_ACCEPTED, error);\n+}\n+\n+/**\n+ * Validate IPV4 item.\n+ * Use existing validation function mlx5_flow_validate_item_ipv4(), and\n+ * add specific validation of fragment_offset field,\n+ *\n+ * @param[in] item\n+ *   Item specification.\n+ * @param[in] item_flags\n+ *   Bit-fields that holds the items detected until now.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+flow_dv_validate_item_ipv4(const struct rte_flow_item *item,\n+\t\t\t   uint64_t item_flags,\n+\t\t\t   uint64_t last_item,\n+\t\t\t   uint16_t ether_type,\n+\t\t\t   struct rte_flow_error *error)\n+{\n+\tint ret;\n+\tconst struct rte_flow_item_ipv4 *spec = item->spec;\n+\tconst struct rte_flow_item_ipv4 *last = item->last;\n+\tconst struct rte_flow_item_ipv4 *mask = item->mask;\n+\trte_be16_t fragment_offset_spec = 0;\n+\trte_be16_t fragment_offset_last = 0;\n+\tconst struct rte_flow_item_ipv4 nic_ipv4_mask = {\n+\t\t.hdr = {\n+\t\t\t.src_addr = RTE_BE32(0xffffffff),\n+\t\t\t.dst_addr = RTE_BE32(0xffffffff),\n+\t\t\t.type_of_service = 0xff,\n+\t\t\t.fragment_offset = RTE_BE16(0xffff),\n+\t\t\t.next_proto_id = 0xff,\n+\t\t\t.time_to_live = 0xff,\n+\t\t},\n+\t};\n+\n+\tret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,\n+\t\t\t\t\t   ether_type, &nic_ipv4_mask,\n+\t\t\t\t\t   MLX5_ITEM_RANGE_ACCEPTED, error);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\tif (spec && mask)\n+\t\tfragment_offset_spec = spec->hdr.fragment_offset &\n+\t\t\t\t       mask->hdr.fragment_offset;\n+\tif (!fragment_offset_spec)\n+\t\treturn 0;\n+\t/*\n+\t * spec and mask are valid, enforce using full mask to make sure the\n+\t * complete value is used correctly.\n+\t */\n+\tif ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))\n+\t\t\t!= RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM_MASK,\n+\t\t\t\t\t  item, \"must use full mask for\"\n+\t\t\t\t\t  \" fragment_offset\");\n+\t/*\n+\t * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,\n+\t * indicating this is 1st fragment of fragmented packet.\n+\t * This is not yet supported in MLX5, return appropriate error message.\n+\t */\n+\tif (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"match on first fragment not \"\n+\t\t\t\t\t  \"supported\");\n+\tif (fragment_offset_spec && !last)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM, item,\n+\t\t\t\t\t  \"specified value not supported\");\n+\t/* spec and last are valid, validate the specified range. */\n+\tfragment_offset_last = last->hdr.fragment_offset &\n+\t\t\t       mask->hdr.fragment_offset;\n+\t/*\n+\t * Match on fragment_offset spec 0x2001 and last 0x3fff\n+\t * means MF is 1 and frag-offset is > 0.\n+\t * This packet is fragment 2nd and onward, excluding last.\n+\t * This is not yet supported in MLX5, return appropriate\n+\t * error message.\n+\t */\n+\tif (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&\n+\t    fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM_LAST,\n+\t\t\t\t\t  last, \"match on following \"\n+\t\t\t\t\t  \"fragments not supported\");\n+\t/*\n+\t * Match on fragment_offset spec 0x0001 and last 0x1fff\n+\t * means MF is 0 and frag-offset is > 0.\n+\t * This packet is last fragment of fragmented packet.\n+\t * This is not yet supported in MLX5, return appropriate\n+\t * error message.\n+\t */\n+\tif (fragment_offset_spec == RTE_BE16(1) &&\n+\t    fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM_LAST,\n+\t\t\t\t\t  last, \"match on last \"\n+\t\t\t\t\t  \"fragment not supported\");\n+\t/*\n+\t * Match on fragment_offset spec 0x0001 and last 0x3fff\n+\t * means MF and/or frag-offset is not 0.\n+\t * This is a fragmented packet.\n+\t * Other range values are invalid and rejected.\n+\t */\n+\tif (!(fragment_offset_spec == RTE_BE16(1) &&\n+\t      fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,\n+\t\t\t\t\t  \"specified range not supported\");\n+\treturn 0;\n }\n \n /**\n@@ -5084,15 +5199,6 @@ struct field_modify_info modify_tcp[] = {\n \t\t\t.dst_port = RTE_BE16(UINT16_MAX),\n \t\t}\n \t};\n-\tconst struct rte_flow_item_ipv4 nic_ipv4_mask = {\n-\t\t.hdr = {\n-\t\t\t.src_addr = RTE_BE32(0xffffffff),\n-\t\t\t.dst_addr = RTE_BE32(0xffffffff),\n-\t\t\t.type_of_service = 0xff,\n-\t\t\t.next_proto_id = 0xff,\n-\t\t\t.time_to_live = 0xff,\n-\t\t},\n-\t};\n \tconst struct rte_flow_item_ipv6 nic_ipv6_mask = {\n \t\t.hdr = {\n \t\t\t.src_addr =\n@@ -5192,11 +5298,9 @@ struct field_modify_info modify_tcp[] = {\n \t\tcase RTE_FLOW_ITEM_TYPE_IPV4:\n \t\t\tmlx5_flow_tunnel_ip_check(items, next_protocol,\n \t\t\t\t\t\t  &item_flags, &tunnel);\n-\t\t\tret = mlx5_flow_validate_item_ipv4(items, item_flags,\n-\t\t\t\t\t\t\t   last_item,\n-\t\t\t\t\t\t\t   ether_type,\n-\t\t\t\t\t\t\t   &nic_ipv4_mask,\n-\t\t\t\t\t\t\t   error);\n+\t\t\tret = flow_dv_validate_item_ipv4(items, item_flags,\n+\t\t\t\t\t\t\t last_item, ether_type,\n+\t\t\t\t\t\t\t error);\n \t\t\tif (ret < 0)\n \t\t\t\treturn ret;\n \t\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :\n@@ -6296,6 +6400,10 @@ struct field_modify_info modify_tcp[] = {\n \t\t ipv4_m->hdr.time_to_live);\n \tMLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,\n \t\t ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);\n+\tMLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,\n+\t\t !!(ipv4_m->hdr.fragment_offset));\n+\tMLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,\n+\t\t !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));\n }\n \n /**\ndiff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c\nindex 62c18b8..276bcb5 100644\n--- a/drivers/net/mlx5/mlx5_flow_verbs.c\n+++ b/drivers/net/mlx5/mlx5_flow_verbs.c\n@@ -1312,10 +1312,11 @@\n \t\t\t}\n \t\t\tbreak;\n \t\tcase RTE_FLOW_ITEM_TYPE_IPV4:\n-\t\t\tret = mlx5_flow_validate_item_ipv4(items, item_flags,\n-\t\t\t\t\t\t\t   last_item,\n-\t\t\t\t\t\t\t   ether_type, NULL,\n-\t\t\t\t\t\t\t   error);\n+\t\t\tret = mlx5_flow_validate_item_ipv4\n+\t\t\t\t\t\t(items, item_flags,\n+\t\t\t\t\t\t last_item, ether_type, NULL,\n+\t\t\t\t\t\t MLX5_ITEM_RANGE_NOT_ACCEPTED,\n+\t\t\t\t\t\t error);\n \t\t\tif (ret < 0)\n \t\t\t\treturn ret;\n \t\t\tlast_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :\n",
    "prefixes": [
        "07/10"
    ]
}