get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/42951/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 42951,
    "url": "http://patches.dpdk.org/api/patches/42951/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/bd72bc7427895d5f2c61789232df7bbd0e7d4d6b.1531387413.git.nelio.laranjeiro@6wind.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<bd72bc7427895d5f2c61789232df7bbd0e7d4d6b.1531387413.git.nelio.laranjeiro@6wind.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/bd72bc7427895d5f2c61789232df7bbd0e7d4d6b.1531387413.git.nelio.laranjeiro@6wind.com",
    "date": "2018-07-12T09:30:50",
    "name": "[v4,04/21] net/mlx5: support flow Ethernet item along with drop action",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "d76dae5d3afc9e212b71add787eb4fd3af2c953c",
    "submitter": {
        "id": 243,
        "url": "http://patches.dpdk.org/api/people/243/?format=api",
        "name": "Nélio Laranjeiro",
        "email": "nelio.laranjeiro@6wind.com"
    },
    "delegate": {
        "id": 6624,
        "url": "http://patches.dpdk.org/api/users/6624/?format=api",
        "username": "shahafs",
        "first_name": "Shahaf",
        "last_name": "Shuler",
        "email": "shahafs@mellanox.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/bd72bc7427895d5f2c61789232df7bbd0e7d4d6b.1531387413.git.nelio.laranjeiro@6wind.com/mbox/",
    "series": [
        {
            "id": 544,
            "url": "http://patches.dpdk.org/api/series/544/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=544",
            "date": "2018-07-12T09:30:46",
            "name": "net/mlx5: flow rework",
            "version": 4,
            "mbox": "http://patches.dpdk.org/series/544/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/42951/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/42951/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id C6E591B5B4;\n\tThu, 12 Jul 2018 11:31:36 +0200 (CEST)",
            "from mail-wr1-f67.google.com (mail-wr1-f67.google.com\n\t[209.85.221.67]) by dpdk.org (Postfix) with ESMTP id 4CDE01B550\n\tfor <dev@dpdk.org>; Thu, 12 Jul 2018 11:31:29 +0200 (CEST)",
            "by mail-wr1-f67.google.com with SMTP id j5-v6so14437685wrr.8\n\tfor <dev@dpdk.org>; Thu, 12 Jul 2018 02:31:29 -0700 (PDT)",
            "from laranjeiro-vm.dev.6wind.com\n\t(host.78.145.23.62.rev.coltfrance.com. [62.23.145.78])\n\tby smtp.gmail.com with ESMTPSA id\n\ts2-v6sm18717603wrn.75.2018.07.12.02.31.27\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128);\n\tThu, 12 Jul 2018 02:31:27 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=6wind-com.20150623.gappssmtp.com; s=20150623;\n\th=from:to:cc:subject:date:message-id:in-reply-to:references;\n\tbh=hoQkQAjZ7bwsSg/t8xODg7GCv9KD21Hi7TBOPrDwIX0=;\n\tb=omXO0H6wf+kXRtZuldXKWMAXmpZnM7lpO/05s/zVQY7GKn2eTJR14NgIQ6kxLbCvMI\n\ttTlEXAvVyDGDO0wgjMHe6VJDc/6ThTL0AbSyC6iItH5gJI55H41fRNKQ09UFHHCNYqQW\n\tYh4toR3F3GgdCpPNjwqQ5PU/KSu4lmEWwfUmSARuyPMUQR2GpCjRMV1S45tkVaGYIu73\n\t64P/H+klrxrS/KMBAU4XZFYNNyUg0OtZ0os3DgXSLVkgoQEKVivAKFQCWBMVH9qi8nfZ\n\twfuUdktMmiovskS42Cehp1PmitjIImfxLbSW/TEPENndzyE1v+B5t6854ghsJKJH0mSB\n\tzA/Q==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20161025;\n\th=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n\t:references;\n\tbh=hoQkQAjZ7bwsSg/t8xODg7GCv9KD21Hi7TBOPrDwIX0=;\n\tb=C9Wo2Ls1xYxF1ZrU8qzmrpkPWn5H7IUd8lo0p8chrXnwc6e4Ai5bLfmr8AtJbOikk5\n\t+pqKP23NKik48FKmhiUn77oY+ozsmB9bEKUDXlz0gbJNPzLxoPmFUY427pZzH0gi6qIA\n\t+IVIy1ViR42fAj4uS2ScqINT27AwOPdYm9ULFnkGdvKP6FyGGxTN5kKxPkI2533G5gaA\n\t9yMd18Ne07jCn+LNRZfNYPErYmGV1COoJC9jsJ8W4xlpEyyOiWacOm+WwjjOFWGNR41p\n\tleiSd2NUVzJYAGn0qHRTPwE46IAR8P2KwlRL/BrCz/pF/wWkUtRvV4tYcua3re/r3DIh\n\tSLhg==",
        "X-Gm-Message-State": "AOUpUlEFOo16klzlRy8PaZDWO4FjC2kvrGhAveftcbiva7h0rtYCt72T\n\t0WQzZodqKZWAL+74CzXEZZrgMQs3JA==",
        "X-Google-Smtp-Source": "AAOMgpcbZ1tevatg0WSH1BgT9v3NdndCtlZ1eF86c6KyogLdYMR99Ao4X2iLfz26uTXR2gEsc2JCvg==",
        "X-Received": "by 2002:adf:ed41:: with SMTP id\n\tu1-v6mr1069000wro.262.1531387888575; \n\tThu, 12 Jul 2018 02:31:28 -0700 (PDT)",
        "From": "Nelio Laranjeiro <nelio.laranjeiro@6wind.com>",
        "To": "dev@dpdk.org,\n\tYongseok Koh <yskoh@mellanox.com>",
        "Cc": "Adrien Mazarguil <adrien.mazarguil@6wind.com>",
        "Date": "Thu, 12 Jul 2018 11:30:50 +0200",
        "Message-Id": "<bd72bc7427895d5f2c61789232df7bbd0e7d4d6b.1531387413.git.nelio.laranjeiro@6wind.com>",
        "X-Mailer": "git-send-email 2.18.0",
        "In-Reply-To": "<cover.1531387413.git.nelio.laranjeiro@6wind.com>",
        "References": "<cover.1531293415.git.nelio.laranjeiro@6wind.com>\n\t<cover.1531387413.git.nelio.laranjeiro@6wind.com>",
        "Subject": "[dpdk-dev] [PATCH v4 04/21] net/mlx5: support flow Ethernet item\n\talong with drop action",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>\nAcked-by: Yongseok Koh <yskoh@mellanox.com>\n---\n drivers/net/mlx5/mlx5.c      |   1 +\n drivers/net/mlx5/mlx5_flow.c | 664 +++++++++++++++++++++++++++++++++--\n 2 files changed, 627 insertions(+), 38 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex 74248f098..6d3421fae 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -242,6 +242,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)\n \t/* In case mlx5_dev_stop() has not been called. */\n \tmlx5_dev_interrupt_handler_uninstall(dev);\n \tmlx5_traffic_disable(dev);\n+\tmlx5_flow_flush(dev, NULL);\n \t/* Prevent crashes when queues are still in use. */\n \tdev->rx_pkt_burst = removed_rx_burst;\n \tdev->tx_pkt_burst = removed_tx_burst;\ndiff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 8fdc6d7bb..036a8d440 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -35,11 +35,50 @@\n extern const struct eth_dev_ops mlx5_dev_ops;\n extern const struct eth_dev_ops mlx5_dev_ops_isolate;\n \n+/* Pattern Layer bits. */\n+#define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)\n+#define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)\n+#define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)\n+#define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)\n+#define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)\n+#define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)\n+/* Masks. */\n+#define MLX5_FLOW_LAYER_OUTER_L3 \\\n+\t(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)\n+#define MLX5_FLOW_LAYER_OUTER_L4 \\\n+\t(MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)\n+\n+/* Actions that modify the fate of matching traffic. */\n+#define MLX5_FLOW_FATE_DROP (1u << 0)\n+\n+/** Handles information leading to a drop fate. */\n+struct mlx5_flow_verbs {\n+\tunsigned int size; /**< Size of the attribute. */\n+\tstruct {\n+\t\tstruct ibv_flow_attr *attr;\n+\t\t/**< Pointer to the Specification buffer. */\n+\t\tuint8_t *specs; /**< Pointer to the specifications. */\n+\t};\n+\tstruct ibv_flow *flow; /**< Verbs flow pointer. */\n+\tstruct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */\n+};\n+\n+/* Flow structure. */\n struct rte_flow {\n \tTAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */\n+\tstruct rte_flow_attr attributes; /**< User flow attribute. */\n+\tuint32_t layers;\n+\t/**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */\n+\tuint32_t fate;\n+\t/**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */\n+\tstruct mlx5_flow_verbs verbs; /* Verbs drop flow. */\n };\n \n static const struct rte_flow_ops mlx5_flow_ops = {\n+\t.validate = mlx5_flow_validate,\n+\t.create = mlx5_flow_create,\n+\t.destroy = mlx5_flow_destroy,\n+\t.flush = mlx5_flow_flush,\n \t.isolate = mlx5_flow_isolate,\n };\n \n@@ -128,13 +167,415 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev)\n }\n \n /**\n- * Convert a flow.\n+ * Verify the @p attributes will be correctly understood by the NIC and store\n+ * them in the @p flow if everything is correct.\n  *\n- * @param dev\n+ * @param[in] dev\n  *   Pointer to Ethernet device.\n- * @param list\n- *   Pointer to a TAILQ flow list.\n- * @param[in] attr\n+ * @param[in] attributes\n+ *   Pointer to flow attributes\n+ * @param[in, out] flow\n+ *   Pointer to the rte_flow structure.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_attributes(struct rte_eth_dev *dev,\n+\t\t     const struct rte_flow_attr *attributes,\n+\t\t     struct rte_flow *flow,\n+\t\t     struct rte_flow_error *error)\n+{\n+\tuint32_t priority_max =\n+\t\t((struct priv *)dev->data->dev_private)->config.flow_prio;\n+\n+\tif (attributes->group)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,\n+\t\t\t\t\t  NULL,\n+\t\t\t\t\t  \"groups is not supported\");\n+\tif (attributes->priority >= priority_max)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,\n+\t\t\t\t\t  NULL,\n+\t\t\t\t\t  \"priority out of range\");\n+\tif (attributes->egress)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,\n+\t\t\t\t\t  NULL,\n+\t\t\t\t\t  \"egress is not supported\");\n+\tif (attributes->transfer)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,\n+\t\t\t\t\t  NULL,\n+\t\t\t\t\t  \"transfer is not supported\");\n+\tif (!attributes->ingress)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,\n+\t\t\t\t\t  NULL,\n+\t\t\t\t\t  \"ingress attribute is mandatory\");\n+\tflow->attributes = *attributes;\n+\treturn 0;\n+}\n+\n+/**\n+ * Verify the @p item specifications (spec, last, mask) are compatible with the\n+ * NIC capabilities.\n+ *\n+ * @param[in] item\n+ *   Item specification.\n+ * @param[in] mask\n+ *   @p item->mask or flow default bit-masks.\n+ * @param[in] nic_mask\n+ *   Bit-masks covering supported fields by the NIC to compare with user mask.\n+ * @param[in] size\n+ *   Bit-masks size in bytes.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_item_acceptable(const struct rte_flow_item *item,\n+\t\t\t  const uint8_t *mask,\n+\t\t\t  const uint8_t *nic_mask,\n+\t\t\t  unsigned int size,\n+\t\t\t  struct rte_flow_error *error)\n+{\n+\tunsigned int i;\n+\n+\tassert(nic_mask);\n+\tfor (i = 0; i < size; ++i)\n+\t\tif ((nic_mask[i] | mask[i]) != nic_mask[i])\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t  item,\n+\t\t\t\t\t\t  \"mask enables non supported\"\n+\t\t\t\t\t\t  \" bits\");\n+\tif (!item->spec && (item->mask || item->last))\n+\t\treturn rte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t  item,\n+\t\t\t\t\t  \"mask/last without a spec is not\"\n+\t\t\t\t\t  \" supported\");\n+\tif (item->spec && item->last) {\n+\t\tuint8_t spec[size];\n+\t\tuint8_t last[size];\n+\t\tunsigned int i;\n+\t\tint ret;\n+\n+\t\tfor (i = 0; i < size; ++i) {\n+\t\t\tspec[i] = ((const uint8_t *)item->spec)[i] & mask[i];\n+\t\t\tlast[i] = ((const uint8_t *)item->last)[i] & mask[i];\n+\t\t}\n+\t\tret = memcmp(spec, last, size);\n+\t\tif (ret != 0)\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t  item,\n+\t\t\t\t\t\t  \"range is not supported\");\n+\t}\n+\treturn 0;\n+}\n+\n+/**\n+ * Add a verbs specification into @p flow.\n+ *\n+ * @param[in, out] flow\n+ *   Pointer to flow structure.\n+ * @param[in] src\n+ *   Create specification.\n+ * @param[in] size\n+ *   Size in bytes of the specification to copy.\n+ */\n+static void\n+mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size)\n+{\n+\tif (flow->verbs.specs) {\n+\t\tvoid *dst;\n+\n+\t\tdst = (void *)(flow->verbs.specs + flow->verbs.size);\n+\t\tmemcpy(dst, src, size);\n+\t\t++flow->verbs.attr->num_of_specs;\n+\t}\n+\tflow->verbs.size += size;\n+}\n+\n+/**\n+ * Convert the @p item into a Verbs specification after ensuring the NIC\n+ * will understand and process it correctly.\n+ * If the necessary size for the conversion is greater than the @p flow_size,\n+ * nothing is written in @p flow, the validation is still performed.\n+ *\n+ * @param[in] item\n+ *   Item specification.\n+ * @param[in, out] flow\n+ *   Pointer to flow structure.\n+ * @param[in] flow_size\n+ *   Size in bytes of the available space in @p flow, if too small, nothing is\n+ *   written.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   On success the number of bytes consumed/necessary, if the returned value\n+ *   is lesser or equal to @p flow_size, the @p item has fully been converted,\n+ *   otherwise another call with this returned memory size should be done.\n+ *   On error, a negative errno value is returned and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,\n+\t\t   const size_t flow_size, struct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_item_eth *spec = item->spec;\n+\tconst struct rte_flow_item_eth *mask = item->mask;\n+\tconst struct rte_flow_item_eth nic_mask = {\n+\t\t.dst.addr_bytes = \"\\xff\\xff\\xff\\xff\\xff\\xff\",\n+\t\t.src.addr_bytes = \"\\xff\\xff\\xff\\xff\\xff\\xff\",\n+\t\t.type = RTE_BE16(0xffff),\n+\t};\n+\tconst unsigned int size = sizeof(struct ibv_flow_spec_eth);\n+\tstruct ibv_flow_spec_eth eth = {\n+\t\t.type = IBV_FLOW_SPEC_ETH,\n+\t\t.size = size,\n+\t};\n+\tint ret;\n+\n+\tif (flow->layers & MLX5_FLOW_LAYER_OUTER_L2)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t  item,\n+\t\t\t\t\t  \"L2 layers already configured\");\n+\tif (!mask)\n+\t\tmask = &rte_flow_item_eth_mask;\n+\tret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,\n+\t\t\t\t\t(const uint8_t *)&nic_mask,\n+\t\t\t\t\tsizeof(struct rte_flow_item_eth),\n+\t\t\t\t\terror);\n+\tif (ret)\n+\t\treturn ret;\n+\tflow->layers |= MLX5_FLOW_LAYER_OUTER_L2;\n+\tif (size > flow_size)\n+\t\treturn size;\n+\tif (spec) {\n+\t\tunsigned int i;\n+\n+\t\tmemcpy(&eth.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);\n+\t\tmemcpy(&eth.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);\n+\t\teth.val.ether_type = spec->type;\n+\t\tmemcpy(&eth.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);\n+\t\tmemcpy(&eth.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);\n+\t\teth.mask.ether_type = mask->type;\n+\t\t/* Remove unwanted bits from values. */\n+\t\tfor (i = 0; i < ETHER_ADDR_LEN; ++i) {\n+\t\t\teth.val.dst_mac[i] &= eth.mask.dst_mac[i];\n+\t\t\teth.val.src_mac[i] &= eth.mask.src_mac[i];\n+\t\t}\n+\t\teth.val.ether_type &= eth.mask.ether_type;\n+\t}\n+\tmlx5_flow_spec_verbs_add(flow, &eth, size);\n+\treturn size;\n+}\n+\n+/**\n+ * Convert the @p pattern into a Verbs specifications after ensuring the NIC\n+ * will understand and process it correctly.\n+ * The conversion is performed item per item, each of them is written into\n+ * the @p flow if its size is lesser or equal to @p flow_size.\n+ * Validation and memory consumption computation are still performed until the\n+ * end of @p pattern, unless an error is encountered.\n+ *\n+ * @param[in] pattern\n+ *   Flow pattern.\n+ * @param[in, out] flow\n+ *   Pointer to the rte_flow structure.\n+ * @param[in] flow_size\n+ *   Size in bytes of the available space in @p flow, if too small some\n+ *   garbage may be present.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   On success the number of bytes consumed/necessary, if the returned value\n+ *   is lesser or equal to @p flow_size, the @pattern  has fully been\n+ *   converted, otherwise another call with this returned memory size should\n+ *   be done.\n+ *   On error, a negative errno value is returned and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_items(const struct rte_flow_item pattern[],\n+\t\tstruct rte_flow *flow, const size_t flow_size,\n+\t\tstruct rte_flow_error *error)\n+{\n+\tint remain = flow_size;\n+\tsize_t size = 0;\n+\n+\tfor (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {\n+\t\tint ret = 0;\n+\n+\t\tswitch (pattern->type) {\n+\t\tcase RTE_FLOW_ITEM_TYPE_VOID:\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ITEM_TYPE_ETH:\n+\t\t\tret = mlx5_flow_item_eth(pattern, flow, remain, error);\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\t\t  pattern,\n+\t\t\t\t\t\t  \"item not supported\");\n+\t\t}\n+\t\tif (ret < 0)\n+\t\t\treturn ret;\n+\t\tif (remain > ret)\n+\t\t\tremain -= ret;\n+\t\telse\n+\t\t\tremain = 0;\n+\t\tsize += ret;\n+\t}\n+\tif (!flow->layers) {\n+\t\tconst struct rte_flow_item item = {\n+\t\t\t.type = RTE_FLOW_ITEM_TYPE_ETH,\n+\t\t};\n+\n+\t\treturn mlx5_flow_item_eth(&item, flow, flow_size, error);\n+\t}\n+\treturn size;\n+}\n+\n+/**\n+ * Convert the @p action into a Verbs specification after ensuring the NIC\n+ * will understand and process it correctly.\n+ * If the necessary size for the conversion is greater than the @p flow_size,\n+ * nothing is written in @p flow, the validation is still performed.\n+ *\n+ * @param[in] action\n+ *   Action configuration.\n+ * @param[in, out] flow\n+ *   Pointer to flow structure.\n+ * @param[in] flow_size\n+ *   Size in bytes of the available space in @p flow, if too small, nothing is\n+ *   written.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   On success the number of bytes consumed/necessary, if the returned value\n+ *   is lesser or equal to @p flow_size, the @p action has fully been\n+ *   converted, otherwise another call with this returned memory size should\n+ *   be done.\n+ *   On error, a negative errno value is returned and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_action_drop(const struct rte_flow_action *action,\n+\t\t      struct rte_flow *flow, const size_t flow_size,\n+\t\t      struct rte_flow_error *error)\n+{\n+\tunsigned int size = sizeof(struct ibv_flow_spec_action_drop);\n+\tstruct ibv_flow_spec_action_drop drop = {\n+\t\t\t.type = IBV_FLOW_SPEC_ACTION_DROP,\n+\t\t\t.size = size,\n+\t};\n+\n+\tif (flow->fate)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t  action,\n+\t\t\t\t\t  \"multiple fate actions are not\"\n+\t\t\t\t\t  \" supported\");\n+\tif (size < flow_size)\n+\t\tmlx5_flow_spec_verbs_add(flow, &drop, size);\n+\tflow->fate |= MLX5_FLOW_FATE_DROP;\n+\treturn size;\n+}\n+\n+/**\n+ * Convert the @p action into @p flow after ensuring the NIC will understand\n+ * and process it correctly.\n+ * The conversion is performed action per action, each of them is written into\n+ * the @p flow if its size is lesser or equal to @p flow_size.\n+ * Validation and memory consumption computation are still performed until the\n+ * end of @p action, unless an error is encountered.\n+ *\n+ * @param[in] dev\n+ *   Pointer to Ethernet device structure.\n+ * @param[in] actions\n+ *   Pointer to flow actions array.\n+ * @param[in, out] flow\n+ *   Pointer to the rte_flow structure.\n+ * @param[in] flow_size\n+ *   Size in bytes of the available space in @p flow, if too small some\n+ *   garbage may be present.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   On success the number of bytes consumed/necessary, if the returned value\n+ *   is lesser or equal to @p flow_size, the @p actions has fully been\n+ *   converted, otherwise another call with this returned memory size should\n+ *   be done.\n+ *   On error, a negative errno value is returned and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_actions(struct rte_eth_dev *dev __rte_unused,\n+\t\t  const struct rte_flow_action actions[],\n+\t\t  struct rte_flow *flow, const size_t flow_size,\n+\t\t  struct rte_flow_error *error)\n+{\n+\tsize_t size = 0;\n+\tint remain = flow_size;\n+\tint ret = 0;\n+\n+\tfor (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {\n+\t\tswitch (actions->type) {\n+\t\tcase RTE_FLOW_ACTION_TYPE_VOID:\n+\t\t\tbreak;\n+\t\tcase RTE_FLOW_ACTION_TYPE_DROP:\n+\t\t\tret = mlx5_flow_action_drop(actions, flow, remain,\n+\t\t\t\t\t\t    error);\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\t  actions,\n+\t\t\t\t\t\t  \"action not supported\");\n+\t\t}\n+\t\tif (ret < 0)\n+\t\t\treturn ret;\n+\t\tif (remain > ret)\n+\t\t\tremain -= ret;\n+\t\telse\n+\t\t\tremain = 0;\n+\t\tsize += ret;\n+\t}\n+\tif (!flow->fate)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t  NULL,\n+\t\t\t\t\t  \"no fate action found\");\n+\treturn size;\n+}\n+\n+/**\n+ * Convert the @p attributes, @p pattern, @p action, into an flow for the NIC\n+ * after ensuring the NIC will understand and process it correctly.\n+ * The conversion is only performed item/action per item/action, each of\n+ * them is written into the @p flow if its size is lesser or equal to @p\n+ * flow_size.\n+ * Validation and memory consumption computation are still performed until the\n+ * end, unless an error is encountered.\n+ *\n+ * @param[in] dev\n+ *   Pointer to Ethernet device.\n+ * @param[in, out] flow\n+ *   Pointer to flow structure.\n+ * @param[in] flow_size\n+ *   Size in bytes of the available space in @p flow, if too small some\n+ *   garbage may be present.\n+ * @param[in] attributes\n  *   Flow rule attributes.\n  * @param[in] pattern\n  *   Pattern specification (list terminated by the END pattern item).\n@@ -144,21 +585,42 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev)\n  *   Perform verbose error reporting if not NULL.\n  *\n  * @return\n- *   A flow on success, NULL otherwise and rte_errno is set.\n+ *   On success the number of bytes consumed/necessary, if the returned value\n+ *   is lesser or equal to @p flow_size, the flow has fully been converted and\n+ *   can be applied, otherwise another call with this returned memory size\n+ *   should be done.\n+ *   On error, a negative errno value is returned and rte_errno is set.\n  */\n-static struct rte_flow *\n-mlx5_flow_list_create(struct rte_eth_dev *dev __rte_unused,\n-\t\t      struct mlx5_flows *list __rte_unused,\n-\t\t      const struct rte_flow_attr *attr __rte_unused,\n-\t\t      const struct rte_flow_item items[] __rte_unused,\n-\t\t      const struct rte_flow_action actions[] __rte_unused,\n-\t\t      struct rte_flow_error *error)\n+static int\n+mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,\n+\t\tconst size_t flow_size,\n+\t\tconst struct rte_flow_attr *attributes,\n+\t\tconst struct rte_flow_item pattern[],\n+\t\tconst struct rte_flow_action actions[],\n+\t\tstruct rte_flow_error *error)\n {\n-\trte_flow_error_set(error, ENOTSUP,\n-\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n-\t\t\t   NULL,\n-\t\t\t   \"action not supported\");\n-\treturn NULL;\n+\tstruct rte_flow local_flow = { .layers = 0, };\n+\tsize_t size = sizeof(*flow) + sizeof(struct ibv_flow_attr);\n+\tint remain = (flow_size > size) ? flow_size - size : 0;\n+\tint ret;\n+\n+\tif (!remain)\n+\t\tflow = &local_flow;\n+\tret = mlx5_flow_attributes(dev, attributes, flow, error);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\tret = mlx5_flow_items(pattern, flow, remain, error);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\tsize += ret;\n+\tremain = (flow_size > size) ? flow_size - size : 0;\n+\tret = mlx5_flow_actions(dev, actions, flow, remain, error);\n+\tif (ret < 0)\n+\t\treturn ret;\n+\tsize += ret;\n+\tif (size <= flow_size)\n+\t\tflow->verbs.attr->priority = flow->attributes.priority;\n+\treturn size;\n }\n \n /**\n@@ -168,16 +630,142 @@ mlx5_flow_list_create(struct rte_eth_dev *dev __rte_unused,\n  * @see rte_flow_ops\n  */\n int\n-mlx5_flow_validate(struct rte_eth_dev *dev __rte_unused,\n-\t\t   const struct rte_flow_attr *attr __rte_unused,\n-\t\t   const struct rte_flow_item items[] __rte_unused,\n-\t\t   const struct rte_flow_action actions[] __rte_unused,\n+mlx5_flow_validate(struct rte_eth_dev *dev,\n+\t\t   const struct rte_flow_attr *attr,\n+\t\t   const struct rte_flow_item items[],\n+\t\t   const struct rte_flow_action actions[],\n \t\t   struct rte_flow_error *error)\n {\n-\treturn rte_flow_error_set(error, ENOTSUP,\n-\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n-\t\t\t\t  NULL,\n-\t\t\t\t  \"action not supported\");\n+\tint ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);\n+\n+\tif (ret < 0)\n+\t\treturn ret;\n+\treturn 0;\n+}\n+\n+/**\n+ * Remove the flow.\n+ *\n+ * @param[in] dev\n+ *   Pointer to Ethernet device.\n+ * @param[in, out] flow\n+ *   Pointer to flow structure.\n+ */\n+static void\n+mlx5_flow_remove(struct rte_eth_dev *dev, struct rte_flow *flow)\n+{\n+\tif (flow->fate & MLX5_FLOW_FATE_DROP) {\n+\t\tif (flow->verbs.flow) {\n+\t\t\tclaim_zero(mlx5_glue->destroy_flow(flow->verbs.flow));\n+\t\t\tflow->verbs.flow = NULL;\n+\t\t}\n+\t}\n+\tif (flow->verbs.hrxq) {\n+\t\tmlx5_hrxq_drop_release(dev);\n+\t\tflow->verbs.hrxq = NULL;\n+\t}\n+}\n+\n+/**\n+ * Apply the flow.\n+ *\n+ * @param[in] dev\n+ *   Pointer to Ethernet device structure.\n+ * @param[in, out] flow\n+ *   Pointer to flow structure.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow,\n+\t\tstruct rte_flow_error *error)\n+{\n+\tflow->verbs.hrxq = mlx5_hrxq_drop_new(dev);\n+\tif (!flow->verbs.hrxq)\n+\t\treturn rte_flow_error_set\n+\t\t\t(error, errno,\n+\t\t\t RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t NULL,\n+\t\t\t \"cannot allocate Drop queue\");\n+\tflow->verbs.flow =\n+\t\tmlx5_glue->create_flow(flow->verbs.hrxq->qp, flow->verbs.attr);\n+\tif (!flow->verbs.flow) {\n+\t\tmlx5_hrxq_drop_release(dev);\n+\t\tflow->verbs.hrxq = NULL;\n+\t\treturn rte_flow_error_set(error, errno,\n+\t\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t  NULL,\n+\t\t\t\t\t  \"kernel module refuses to create\"\n+\t\t\t\t\t  \" flow\");\n+\t}\n+\treturn 0;\n+}\n+\n+/**\n+ * Create a flow and add it to @p list.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device.\n+ * @param list\n+ *   Pointer to a TAILQ flow list.\n+ * @param[in] attr\n+ *   Flow rule attributes.\n+ * @param[in] items\n+ *   Pattern specification (list terminated by the END pattern item).\n+ * @param[in] actions\n+ *   Associated actions (list terminated by the END action).\n+ * @param[out] error\n+ *   Perform verbose error reporting if not NULL.\n+ *\n+ * @return\n+ *   A flow on success, NULL otherwise and rte_errno is set.\n+ */\n+static struct rte_flow *\n+mlx5_flow_list_create(struct rte_eth_dev *dev,\n+\t\t      struct mlx5_flows *list,\n+\t\t      const struct rte_flow_attr *attr,\n+\t\t      const struct rte_flow_item items[],\n+\t\t      const struct rte_flow_action actions[],\n+\t\t      struct rte_flow_error *error)\n+{\n+\tstruct rte_flow *flow;\n+\tsize_t size;\n+\tint ret;\n+\n+\tret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);\n+\tif (ret < 0)\n+\t\treturn NULL;\n+\tsize = ret;\n+\tflow = rte_zmalloc(__func__, size, 0);\n+\tif (!flow) {\n+\t\trte_flow_error_set(error, ENOMEM,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t   NULL,\n+\t\t\t\t   \"cannot allocate memory\");\n+\t\treturn NULL;\n+\t}\n+\tflow->verbs.attr = (struct ibv_flow_attr *)(flow + 1);\n+\tflow->verbs.specs = (uint8_t *)(flow->verbs.attr + 1);\n+\tret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);\n+\tif (ret < 0)\n+\t\tgoto error;\n+\tassert((size_t)ret == size);\n+\tif (dev->data->dev_started) {\n+\t\tret = mlx5_flow_apply(dev, flow, error);\n+\t\tif (ret < 0)\n+\t\t\tgoto error;\n+\t}\n+\tTAILQ_INSERT_TAIL(list, flow, next);\n+\treturn flow;\n+error:\n+\tret = rte_errno; /* Save rte_errno before cleanup. */\n+\tmlx5_flow_remove(dev, flow);\n+\trte_free(flow);\n+\trte_errno = ret; /* Restore rte_errno. */\n+\treturn NULL;\n }\n \n /**\n@@ -187,17 +775,15 @@ mlx5_flow_validate(struct rte_eth_dev *dev __rte_unused,\n  * @see rte_flow_ops\n  */\n struct rte_flow *\n-mlx5_flow_create(struct rte_eth_dev *dev __rte_unused,\n-\t\t const struct rte_flow_attr *attr __rte_unused,\n-\t\t const struct rte_flow_item items[] __rte_unused,\n-\t\t const struct rte_flow_action actions[] __rte_unused,\n+mlx5_flow_create(struct rte_eth_dev *dev,\n+\t\t const struct rte_flow_attr *attr,\n+\t\t const struct rte_flow_item items[],\n+\t\t const struct rte_flow_action actions[],\n \t\t struct rte_flow_error *error)\n {\n-\trte_flow_error_set(error, ENOTSUP,\n-\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n-\t\t\t   NULL,\n-\t\t\t   \"action not supported\");\n-\treturn NULL;\n+\treturn mlx5_flow_list_create\n+\t\t(dev, &((struct priv *)dev->data->dev_private)->flows,\n+\t\t attr, items, actions, error);\n }\n \n /**\n@@ -211,10 +797,12 @@ mlx5_flow_create(struct rte_eth_dev *dev __rte_unused,\n  *   Flow to destroy.\n  */\n static void\n-mlx5_flow_list_destroy(struct rte_eth_dev *dev __rte_unused,\n-\t\t       struct mlx5_flows *list __rte_unused,\n-\t\t       struct rte_flow *flow __rte_unused)\n+mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,\n+\t\t       struct rte_flow *flow)\n {\n+\tmlx5_flow_remove(dev, flow);\n+\tTAILQ_REMOVE(list, flow, next);\n+\trte_free(flow);\n }\n \n /**\n",
    "prefixes": [
        "v4",
        "04/21"
    ]
}