get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/17264/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 17264,
    "url": "http://patches.dpdk.org/api/patches/17264/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/cf5625f8c3b3eb3851a1f025023b10fa77768615.1480096192.git.nelio.laranjeiro@6wind.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<cf5625f8c3b3eb3851a1f025023b10fa77768615.1480096192.git.nelio.laranjeiro@6wind.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/cf5625f8c3b3eb3851a1f025023b10fa77768615.1480096192.git.nelio.laranjeiro@6wind.com",
    "date": "2016-11-25T18:14:23",
    "name": "[dpdk-dev,3/3] net/mlx5: add rte_flow rule creation",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "b65010f352ca3b4f9aac8c03dc5433578fb34aa6",
    "submitter": {
        "id": 243,
        "url": "http://patches.dpdk.org/api/people/243/?format=api",
        "name": "Nélio Laranjeiro",
        "email": "nelio.laranjeiro@6wind.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/cf5625f8c3b3eb3851a1f025023b10fa77768615.1480096192.git.nelio.laranjeiro@6wind.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/17264/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/17264/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id E1D31FA45;\n\tFri, 25 Nov 2016 19:15:46 +0100 (CET)",
            "from mail-wm0-f49.google.com (mail-wm0-f49.google.com\n\t[74.125.82.49]) by dpdk.org (Postfix) with ESMTP id D4929FA32\n\tfor <dev@dpdk.org>; Fri, 25 Nov 2016 19:14:50 +0100 (CET)",
            "by mail-wm0-f49.google.com with SMTP id t79so98677066wmt.0\n\tfor <dev@dpdk.org>; Fri, 25 Nov 2016 10:14:50 -0800 (PST)",
            "from ping.vm.6wind.com (guy78-3-82-239-227-177.fbx.proxad.net.\n\t[82.239.227.177]) by smtp.gmail.com with ESMTPSA id\n\t135sm14610323wmh.14.2016.11.25.10.14.49\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128);\n\tFri, 25 Nov 2016 10:14:49 -0800 (PST)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=6wind-com.20150623.gappssmtp.com; s=20150623;\n\th=from:to:cc:subject:date:message-id:in-reply-to:references\n\t:in-reply-to:references;\n\tbh=4Fgmr8noW3T3p2zZWVuSrVgan8PVeex5aI0UrJrDMTs=;\n\tb=GtxH3CVBNWMFNv//kodYEYEBZb0nwi4lNGFfnszs+5eMP+P0Z5A9ZzlcUzS+A+ErIy\n\t866Zsyc+aDXHTn2L1yBuGG8UUP/T735R3PnlfeWEZ9135wxrX8IFmyXXUL+kpB/cq/1N\n\tKIFpIR1PkVcOQh0hCRVuuyAfJ9uNa2NAk14wIikRF41GqxslsoxYGfSQJuF0Ghg3LHHA\n\tmoHgZw2C1vjgeXltFxX4J8ewAilIRew+ysvke0YvN5q7n1W8u5YY0BV2VcWFcJcavsN8\n\tlDvMSm/GPG0kRwfbQvu6xPViYBke2Orl7fg6G/rDbuMQzpx8RWR211y1bWouhjuCc7ba\n\tfS+Q==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20130820;\n\th=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n\t:references:in-reply-to:references;\n\tbh=4Fgmr8noW3T3p2zZWVuSrVgan8PVeex5aI0UrJrDMTs=;\n\tb=RabbvInlSLb6sGTNKdci/DWlDjd/Rr72A2ls5XVgaQdZQNT6kVCONssMj6M/gAXE5X\n\toSWHufemEVPWXNo87PaiM50nAYXEWaQIE0Fx8e5J33l+QHq3vaR6EhRF3Hpu4aO8rC/0\n\tCfeSo5vxGWJMrs85huEKvuMoU1WsfxLzdy+2dPLz/aOTzTVVFZh7aHoXKgJWKOucCq8W\n\tHUwC1BH1gcykrqSVUcXcneTnlzt1BowYoqz/WyRVn7EUwQK0HEPAIU4xwvKmE5BnhccB\n\tRGD2qoFpI37mZ4UFJLsAN0EZvDKVdS5q3wKdpu9wuj6TgLc0Es5ro4rVh4BgrXvXppob\n\tDSqw==",
        "X-Gm-Message-State": "AKaTC02n/AMnwmicxT6RMNQdEHe2s2Z0DYEujGd3UZNKhBbyiLAa/8tZKinkD86X1legEqB7",
        "X-Received": "by 10.28.57.197 with SMTP id g188mr8623582wma.26.1480097690066; \n\tFri, 25 Nov 2016 10:14:50 -0800 (PST)",
        "From": "Nelio Laranjeiro <nelio.laranjeiro@6wind.com>",
        "To": "dev@dpdk.org",
        "Cc": "Adrien Mazarguil <adrien.mazarguil@6wind.com>",
        "Date": "Fri, 25 Nov 2016 19:14:23 +0100",
        "Message-Id": "<cf5625f8c3b3eb3851a1f025023b10fa77768615.1480096192.git.nelio.laranjeiro@6wind.com>",
        "X-Mailer": "git-send-email 2.1.4",
        "In-Reply-To": [
            "<cover.1480096192.git.nelio.laranjeiro@6wind.com>",
            "<cover.1480096192.git.nelio.laranjeiro@6wind.com>"
        ],
        "References": [
            "<cover.1480096192.git.nelio.laranjeiro@6wind.com>",
            "<cover.1480096192.git.nelio.laranjeiro@6wind.com>"
        ],
        "Subject": "[dpdk-dev] [PATCH 3/3] net/mlx5: add rte_flow rule creation",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Convert Ethernet, IPv4, IPv6, TCP, UDP layers into ibv_flow and create\nthose rules when after validation (i.e. NIC supports the rule).\n\nVLAN is still not supported in this commit.\n\nSigned-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>\n---\n drivers/net/mlx5/mlx5_flow.c | 645 ++++++++++++++++++++++++++++++++++++++++++-\n 1 file changed, 631 insertions(+), 14 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex 54807ad..e948000 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -31,6 +31,17 @@\n  */\n \n #include <sys/queue.h>\n+#include <string.h>\n+\n+/* Verbs header. */\n+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */\n+#ifdef PEDANTIC\n+#pragma GCC diagnostic ignored \"-Wpedantic\"\n+#endif\n+#include <infiniband/verbs.h>\n+#ifdef PEDANTIC\n+#pragma GCC diagnostic error \"-Wpedantic\"\n+#endif\n \n #include <rte_ethdev.h>\n #include <rte_flow.h>\n@@ -39,11 +50,82 @@\n \n #include \"mlx5.h\"\n \n+/** Define a value to use as index for the drop queue. */\n+#define MLX5_FLOW_DROP_QUEUE ((uint32_t)-1)\n+\n struct rte_flow {\n \tLIST_ENTRY(rte_flow) next;\n+\tstruct ibv_exp_flow_attr *ibv_attr;\n+\tstruct ibv_exp_rwq_ind_table *ind_table;\n+\tstruct ibv_qp *qp;\n+\tstruct ibv_exp_flow *ibv_flow;\n+\tstruct ibv_exp_wq *wq;\n+\tstruct ibv_cq *cq;\n+\tuint8_t drop;\n };\n \n /**\n+ * Check support for a given item.\n+ *\n+ * @param item[in]\n+ *   Item specification.\n+ * @param mask[in]\n+ *   Bit-mask covering supported fields to compare with spec, last and mask in\n+ *   \\item.\n+ * @param size\n+ *   Bit-Mask size in bytes.\n+ *\n+ * @return\n+ *   0 on success.\n+ */\n+static int\n+mlx5_flow_item_validate(const struct rte_flow_item *item,\n+\t\t\tconst uint8_t *mask, unsigned int size)\n+{\n+\tint ret = 0;\n+\n+\tif (item->spec && !item->mask) {\n+\t\tunsigned int i;\n+\t\tconst uint8_t *spec = item->spec;\n+\n+\t\tfor (i = 0; i < size; ++i)\n+\t\t\tif ((spec[i] | mask[i]) != mask[i])\n+\t\t\t\treturn -1;\n+\t}\n+\tif (item->last && !item->mask) {\n+\t\tunsigned int i;\n+\t\tconst uint8_t *spec = item->last;\n+\n+\t\tfor (i = 0; i < size; ++i)\n+\t\t\tif ((spec[i] | mask[i]) != mask[i])\n+\t\t\t\treturn -1;\n+\t}\n+\tif (item->mask) {\n+\t\tunsigned int i;\n+\t\tconst uint8_t *spec = item->mask;\n+\n+\t\tfor (i = 0; i < size; ++i)\n+\t\t\tif ((spec[i] | mask[i]) != mask[i])\n+\t\t\t\treturn -1;\n+\t}\n+\tif (item->spec && item->last) {\n+\t\tuint8_t spec[size];\n+\t\tuint8_t last[size];\n+\t\tconst uint8_t *apply = mask;\n+\t\tunsigned int i;\n+\n+\t\tif (item->mask)\n+\t\t\tapply = item->mask;\n+\t\tfor (i = 0; i < size; ++i) {\n+\t\t\tspec[i] = ((const uint8_t *)item->spec)[i] & apply[i];\n+\t\t\tlast[i] = ((const uint8_t *)item->last)[i] & apply[i];\n+\t\t}\n+\t\tret = memcmp(spec, last, size);\n+\t}\n+\treturn ret;\n+}\n+\n+/**\n  * Validate a flow supported by the NIC.\n  *\n  * @param priv\n@@ -67,9 +149,43 @@ priv_flow_validate(struct priv *priv,\n \t\t   const struct rte_flow_action actions[],\n \t\t   struct rte_flow_error *error)\n {\n-\t(void)priv;\n \tconst struct rte_flow_item *ilast = NULL;\n \tconst struct rte_flow_action *alast = NULL;\n+\t/* Supported mask. */\n+\tconst struct rte_flow_item_eth eth_mask = {\n+\t\t.dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },\n+\t\t.src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },\n+\t};\n+\tconst struct rte_flow_item_ipv4 ipv4_mask = {\n+\t\t.hdr = {\n+\t\t\t.src_addr = -1,\n+\t\t\t.dst_addr = -1,\n+\t\t},\n+\t};\n+\tconst struct rte_flow_item_ipv6 ipv6_mask = {\n+\t\t.hdr = {\n+\t\t\t.src_addr = {\n+\t\t\t\t0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,\n+\t\t\t\t0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,\n+\t\t\t},\n+\t\t\t.dst_addr = {\n+\t\t\t\t0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,\n+\t\t\t\t0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,\n+\t\t\t},\n+\t\t},\n+\t};\n+\tconst struct rte_flow_item_udp udp_mask = {\n+\t\t.hdr = {\n+\t\t\t.src_port = -1,\n+\t\t\t.dst_port = -1,\n+\t\t},\n+\t};\n+\tconst struct rte_flow_item_tcp tcp_mask = {\n+\t\t.hdr = {\n+\t\t\t.src_port = -1,\n+\t\t\t.dst_port = -1,\n+\t\t},\n+\t};\n \n \tif (attr->group) {\n \t\trte_flow_error_set(error, ENOTSUP,\n@@ -100,27 +216,70 @@ priv_flow_validate(struct priv *priv,\n \t\treturn -rte_errno;\n \t}\n \tfor (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {\n+\t\tint err = 0;\n+\n \t\tif (items->type == RTE_FLOW_ITEM_TYPE_VOID) {\n \t\t\tcontinue;\n \t\t} else if (items->type == RTE_FLOW_ITEM_TYPE_ETH) {\n \t\t\tif (ilast)\n \t\t\t\tgoto exit_item_not_supported;\n \t\t\tilast = items;\n-\t\t} else if ((items->type == RTE_FLOW_ITEM_TYPE_IPV4) ||\n-\t\t\t   (items->type == RTE_FLOW_ITEM_TYPE_IPV6)) {\n+\t\t\terr = mlx5_flow_item_validate(\n+\t\t\t\t\titems,\n+\t\t\t\t\t(const uint8_t *)&eth_mask,\n+\t\t\t\t\tsizeof(eth_mask));\n+\t\t\tif (err)\n+\t\t\t\tgoto exit_item_not_supported;\n+\t\t} else if (items->type == RTE_FLOW_ITEM_TYPE_IPV4) {\n \t\t\tif (!ilast)\n \t\t\t\tgoto exit_item_not_supported;\n \t\t\telse if (ilast->type != RTE_FLOW_ITEM_TYPE_ETH)\n \t\t\t\tgoto exit_item_not_supported;\n \t\t\tilast = items;\n-\t\t} else if ((items->type == RTE_FLOW_ITEM_TYPE_UDP) ||\n-\t\t\t   (items->type == RTE_FLOW_ITEM_TYPE_TCP)) {\n+\t\t\terr = mlx5_flow_item_validate(\n+\t\t\t\t\titems,\n+\t\t\t\t\t(const uint8_t *)&ipv4_mask,\n+\t\t\t\t\tsizeof(ipv4_mask));\n+\t\t\tif (err)\n+\t\t\t\tgoto exit_item_not_supported;\n+\t\t} else if (items->type == RTE_FLOW_ITEM_TYPE_IPV6) {\n+\t\t\tif (!ilast)\n+\t\t\t\tgoto exit_item_not_supported;\n+\t\t\telse if (ilast->type != RTE_FLOW_ITEM_TYPE_ETH)\n+\t\t\t\tgoto exit_item_not_supported;\n+\t\t\tilast = items;\n+\t\t\terr = mlx5_flow_item_validate(\n+\t\t\t\t\titems,\n+\t\t\t\t\t(const uint8_t *)&ipv6_mask,\n+\t\t\t\t\tsizeof(ipv6_mask));\n+\t\t\tif (err)\n+\t\t\t\tgoto exit_item_not_supported;\n+\t\t} else if (items->type == RTE_FLOW_ITEM_TYPE_UDP) {\n \t\t\tif (!ilast)\n \t\t\t\tgoto exit_item_not_supported;\n \t\t\telse if ((ilast->type != RTE_FLOW_ITEM_TYPE_IPV4) &&\n \t\t\t\t (ilast->type != RTE_FLOW_ITEM_TYPE_IPV6))\n \t\t\t\tgoto exit_item_not_supported;\n \t\t\tilast = items;\n+\t\t\terr = mlx5_flow_item_validate(\n+\t\t\t\t\titems,\n+\t\t\t\t\t(const uint8_t *)&udp_mask,\n+\t\t\t\t\tsizeof(udp_mask));\n+\t\t\tif (err)\n+\t\t\t\tgoto exit_item_not_supported;\n+\t\t} else if (items->type == RTE_FLOW_ITEM_TYPE_TCP) {\n+\t\t\tif (!ilast)\n+\t\t\t\tgoto exit_item_not_supported;\n+\t\t\telse if ((ilast->type != RTE_FLOW_ITEM_TYPE_IPV4) &&\n+\t\t\t\t (ilast->type != RTE_FLOW_ITEM_TYPE_IPV6))\n+\t\t\t\tgoto exit_item_not_supported;\n+\t\t\tilast = items;\n+\t\t\terr = mlx5_flow_item_validate(\n+\t\t\t\t\titems,\n+\t\t\t\t\t(const uint8_t *)&tcp_mask,\n+\t\t\t\t\tsizeof(tcp_mask));\n+\t\t\tif (err)\n+\t\t\t\tgoto exit_item_not_supported;\n \t\t} else {\n \t\t\tgoto exit_item_not_supported;\n \t\t}\n@@ -128,8 +287,23 @@ priv_flow_validate(struct priv *priv,\n \tfor (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {\n \t\tif (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {\n \t\t\tcontinue;\n-\t\t} else if ((actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) ||\n-\t\t\t   (actions->type == RTE_FLOW_ACTION_TYPE_DROP)) {\n+\t\t} else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {\n+\t\t\tconst struct rte_flow_action_queue *queue =\n+\t\t\t\t(const struct rte_flow_action_queue *)\n+\t\t\t\tactions->conf;\n+\n+\t\t\tif (alast &&\n+\t\t\t    alast->type != actions->type)\n+\t\t\t\tgoto exit_action_not_supported;\n+\t\t\tif (queue->index > (priv->rxqs_n - 1)) {\n+\t\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t\t   actions,\n+\t\t\t\t\t\t   \"queue index error\");\n+\t\t\t\tgoto exit;\n+\t\t\t}\n+\t\t\talast = actions;\n+\t\t} else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {\n \t\t\tif (alast &&\n \t\t\t    alast->type != actions->type)\n \t\t\t\tgoto exit_action_not_supported;\n@@ -146,6 +320,7 @@ priv_flow_validate(struct priv *priv,\n exit_action_not_supported:\n \trte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,\n \t\t\t   actions, \"action not supported\");\n+exit:\n \treturn -rte_errno;\n }\n \n@@ -172,6 +347,310 @@ mlx5_flow_validate(struct rte_eth_dev *dev,\n }\n \n /**\n+ * Convert Ethernet item to Verbs specification.\n+ *\n+ * @param item[in]\n+ *   Item specification.\n+ * @param eth[in, out]\n+ *   Verbs Ethernet specification structure.\n+ */\n+static void\n+mlx5_flow_create_eth(const struct rte_flow_item *item,\n+\t\t     struct ibv_exp_flow_spec_eth *eth)\n+{\n+\tconst struct rte_flow_item_eth *spec = item->spec;\n+\tconst struct rte_flow_item_eth *mask = item->mask;\n+\tunsigned int i;\n+\n+\tmemset(eth, 0, sizeof(struct ibv_exp_flow_spec_eth));\n+\t*eth = (struct ibv_exp_flow_spec_eth) {\n+\t\t.type = IBV_EXP_FLOW_SPEC_ETH,\n+\t\t.size = sizeof(struct ibv_exp_flow_spec_eth),\n+\t};\n+\tif (spec) {\n+\t\tmemcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);\n+\t\tmemcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);\n+\t}\n+\tif (mask) {\n+\t\tmemcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);\n+\t\tmemcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);\n+\t}\n+\t/* Remove unwanted bits from values. */\n+\tfor (i = 0; i < ETHER_ADDR_LEN; ++i) {\n+\t\teth->val.dst_mac[i] &= eth->mask.dst_mac[i];\n+\t\teth->val.src_mac[i] &= eth->mask.src_mac[i];\n+\t}\n+\teth->val.ether_type &= eth->mask.ether_type;\n+\teth->val.vlan_tag &= eth->mask.vlan_tag;\n+}\n+\n+/**\n+ * Convert IPv4 item to Verbs specification.\n+ *\n+ * @param item[in]\n+ *   Item specification.\n+ * @param ipv4[in, out]\n+ *   Verbs IPv4 specification structure.\n+ */\n+static void\n+mlx5_flow_create_ipv4(const struct rte_flow_item *item,\n+\t\t      struct ibv_exp_flow_spec_ipv4 *ipv4)\n+{\n+\tconst struct rte_flow_item_ipv4 *spec = item->spec;\n+\tconst struct rte_flow_item_ipv4 *mask = item->mask;\n+\n+\tmemset(ipv4, 0, sizeof(struct ibv_exp_flow_spec_ipv4));\n+\t*ipv4 = (struct ibv_exp_flow_spec_ipv4) {\n+\t\t.type = IBV_EXP_FLOW_SPEC_IPV4,\n+\t\t.size = sizeof(struct ibv_exp_flow_spec_ipv4),\n+\t};\n+\tif (spec) {\n+\t\tipv4->val = (struct ibv_exp_flow_ipv4_filter){\n+\t\t\t.src_ip = spec->hdr.src_addr,\n+\t\t\t.dst_ip = spec->hdr.dst_addr,\n+\t\t};\n+\t}\n+\tif (mask) {\n+\t\tipv4->mask = (struct ibv_exp_flow_ipv4_filter){\n+\t\t\t.src_ip = mask->hdr.src_addr,\n+\t\t\t.dst_ip = mask->hdr.dst_addr,\n+\t\t};\n+\t}\n+\t/* Remove unwanted bits from values. */\n+\tipv4->val.src_ip &= ipv4->mask.src_ip;\n+\tipv4->val.dst_ip &= ipv4->mask.dst_ip;\n+}\n+\n+/**\n+ * Convert IPv6 item to Verbs specification.\n+ *\n+ * @param item[in]\n+ *   Item specification.\n+ * @param ipv6[in, out]\n+ *   Verbs IPv6 specification structure.\n+ */\n+static void\n+mlx5_flow_create_ipv6(const struct rte_flow_item *item,\n+\t\t      struct ibv_exp_flow_spec_ipv6 *ipv6)\n+{\n+\tconst struct rte_flow_item_ipv6 *spec = item->spec;\n+\tconst struct rte_flow_item_ipv6 *mask = item->mask;\n+\tunsigned int i;\n+\n+\tmemset(ipv6, 0, sizeof(struct ibv_exp_flow_spec_ipv6));\n+\tipv6->type = IBV_EXP_FLOW_SPEC_IPV6;\n+\tipv6->size = sizeof(struct ibv_exp_flow_spec_ipv6);\n+\tif (spec) {\n+\t\tmemcpy(ipv6->val.src_ip, spec->hdr.src_addr,\n+\t\t       RTE_DIM(ipv6->val.src_ip));\n+\t\tmemcpy(ipv6->val.dst_ip, spec->hdr.dst_addr,\n+\t\t       RTE_DIM(ipv6->val.dst_ip));\n+\t}\n+\tif (mask) {\n+\t\tmemcpy(ipv6->mask.src_ip, mask->hdr.src_addr,\n+\t\t       RTE_DIM(ipv6->mask.src_ip));\n+\t\tmemcpy(ipv6->mask.dst_ip, mask->hdr.dst_addr,\n+\t\t       RTE_DIM(ipv6->mask.dst_ip));\n+\t}\n+\t/* Remove unwanted bits from values. */\n+\tfor (i = 0; i < RTE_DIM(ipv6->val.src_ip); ++i) {\n+\t\tipv6->val.src_ip[i] &= ipv6->mask.src_ip[i];\n+\t\tipv6->val.dst_ip[i] &= ipv6->mask.dst_ip[i];\n+\t}\n+}\n+\n+/**\n+ * Convert UDP item to Verbs specification.\n+ *\n+ * @param item[in]\n+ *   Item specification.\n+ * @param udp[in, out]\n+ *   Verbs UDP specification structure.\n+ */\n+static void\n+mlx5_flow_create_udp(const struct rte_flow_item *item,\n+\t\t     struct ibv_exp_flow_spec_tcp_udp *udp)\n+{\n+\tconst struct rte_flow_item_udp *spec = item->spec;\n+\tconst struct rte_flow_item_udp *mask = item->mask;\n+\n+\tmemset(udp, 0, sizeof(struct ibv_exp_flow_spec_tcp_udp));\n+\t*udp = (struct ibv_exp_flow_spec_tcp_udp) {\n+\t\t.type = IBV_EXP_FLOW_SPEC_UDP,\n+\t\t.size = sizeof(struct ibv_exp_flow_spec_tcp_udp),\n+\t};\n+\tudp->type = IBV_EXP_FLOW_SPEC_UDP;\n+\tif (spec) {\n+\t\tudp->val.dst_port = spec->hdr.dst_port;\n+\t\tudp->val.src_port = spec->hdr.src_port;\n+\t}\n+\tif (mask) {\n+\t\tudp->mask.dst_port = mask->hdr.dst_port;\n+\t\tudp->mask.src_port = mask->hdr.src_port;\n+\t}\n+\t/* Remove unwanted bits from values. */\n+\tudp->val.src_port &= udp->mask.src_port;\n+\tudp->val.dst_port &= udp->mask.dst_port;\n+}\n+\n+/**\n+ * Convert TCP item to Verbs specification.\n+ *\n+ * @param item[in]\n+ *   Item specification.\n+ * @param tcp[in, out]\n+ *   Verbs TCP specification structure.\n+ */\n+static void\n+mlx5_flow_create_tcp(const struct rte_flow_item *item,\n+\t\t     struct ibv_exp_flow_spec_tcp_udp *tcp)\n+{\n+\tconst struct rte_flow_item_tcp *spec = item->spec;\n+\tconst struct rte_flow_item_tcp *mask = item->mask;\n+\n+\tmemset(tcp, 0, sizeof(struct ibv_exp_flow_spec_tcp_udp));\n+\t*tcp = (struct ibv_exp_flow_spec_tcp_udp) {\n+\t\t.type = IBV_EXP_FLOW_SPEC_TCP,\n+\t\t.size = sizeof(struct ibv_exp_flow_spec_tcp_udp),\n+\t};\n+\ttcp->type = IBV_EXP_FLOW_SPEC_TCP;\n+\tif (spec) {\n+\t\ttcp->val.dst_port = spec->hdr.dst_port;\n+\t\ttcp->val.src_port = spec->hdr.src_port;\n+\t}\n+\tif (mask) {\n+\t\ttcp->mask.dst_port = mask->hdr.dst_port;\n+\t\ttcp->mask.src_port = mask->hdr.src_port;\n+\t}\n+\t/* Remove unwanted bits from values. */\n+\ttcp->val.src_port &= tcp->mask.src_port;\n+\ttcp->val.dst_port &= tcp->mask.dst_port;\n+}\n+\n+/**\n+ * Complete flow rule creation.\n+ *\n+ * @param  priv\n+ *   Pointer to private structure.\n+ * @param  ibv_attr\n+ *   Verbs flow attributes.\n+ * @param  queue\n+ *   Destination queue.\n+ * @param[out] error\n+ *   Perform verbose error reporting if not NULL.\n+ *\n+ * @return\n+ *   A flow if the rule could be created.\n+ */\n+static struct rte_flow *\n+priv_flow_create_action_queue(struct priv *priv,\n+\t\t\t      struct ibv_exp_flow_attr *ibv_attr,\n+\t\t\t      uint32_t queue,\n+\t\t\t      struct rte_flow_error *error)\n+{\n+\tstruct rxq_ctrl *rxq;\n+\tstruct rte_flow *rte_flow;\n+\n+\tassert(priv->pd);\n+\tassert(priv->ctx);\n+\trte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);\n+\tif (!rte_flow) {\n+\t\trte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t   NULL, \"cannot allocate flow memory\");\n+\t\treturn NULL;\n+\t}\n+\tif (queue == MLX5_FLOW_DROP_QUEUE) {\n+\t\trte_flow->drop = 1;\n+\t\trte_flow->cq =\n+\t\t\tibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0,\n+\t\t\t\t\t  &(struct ibv_exp_cq_init_attr){\n+\t\t\t\t\t\t  .comp_mask = 0,\n+\t\t\t\t\t  });\n+\t\tif (!rte_flow->cq) {\n+\t\t\trte_flow_error_set(error, ENOMEM,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t   NULL, \"cannot allocate CQ\");\n+\t\t\tgoto error;\n+\t\t}\n+\t\trte_flow->wq = ibv_exp_create_wq(\n+\t\t\tpriv->ctx,\n+\t\t\t&(struct ibv_exp_wq_init_attr){\n+\t\t\t\t.wq_type = IBV_EXP_WQT_RQ,\n+\t\t\t\t.max_recv_wr = 1,\n+\t\t\t\t.max_recv_sge = 1,\n+\t\t\t\t.pd = priv->pd,\n+\t\t\t\t.cq = rte_flow->cq,\n+\t\t\t});\n+\t} else {\n+\t\trxq = container_of((*priv->rxqs)[queue], struct rxq_ctrl, rxq);\n+\t\trte_flow->drop = 0;\n+\t\trte_flow->wq = rxq->wq;\n+\t}\n+\trte_flow->ibv_attr = ibv_attr;\n+\trte_flow->ind_table = ibv_exp_create_rwq_ind_table(\n+\t\tpriv->ctx,\n+\t\t&(struct ibv_exp_rwq_ind_table_init_attr){\n+\t\t\t.pd = priv->pd,\n+\t\t\t.log_ind_tbl_size = 0,\n+\t\t\t.ind_tbl = &rte_flow->wq,\n+\t\t\t.comp_mask = 0,\n+\t\t});\n+\tif (!rte_flow->ind_table) {\n+\t\trte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t   NULL, \"cannot allocate indirection table\");\n+\t\tgoto error;\n+\t}\n+\trte_flow->qp = ibv_exp_create_qp(\n+\t\tpriv->ctx,\n+\t\t&(struct ibv_exp_qp_init_attr){\n+\t\t\t.qp_type = IBV_QPT_RAW_PACKET,\n+\t\t\t.comp_mask =\n+\t\t\t\tIBV_EXP_QP_INIT_ATTR_PD |\n+\t\t\t\tIBV_EXP_QP_INIT_ATTR_PORT |\n+\t\t\t\tIBV_EXP_QP_INIT_ATTR_RX_HASH,\n+\t\t\t.pd = priv->pd,\n+\t\t\t.rx_hash_conf = &(struct ibv_exp_rx_hash_conf){\n+\t\t\t\t.rx_hash_function =\n+\t\t\t\t\tIBV_EXP_RX_HASH_FUNC_TOEPLITZ,\n+\t\t\t\t.rx_hash_key_len = rss_hash_default_key_len,\n+\t\t\t\t.rx_hash_key = rss_hash_default_key,\n+\t\t\t\t.rx_hash_fields_mask = 0,\n+\t\t\t\t.rwq_ind_tbl = rte_flow->ind_table,\n+\t\t\t},\n+\t\t\t.port_num = priv->port,\n+\t\t});\n+\tif (!rte_flow->qp) {\n+\t\trte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t   NULL, \"cannot allocate QP\");\n+\t\tgoto error;\n+\t}\n+\trte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp,\n+\t\t\t\t\t\t rte_flow->ibv_attr);\n+\tif (!rte_flow->ibv_flow) {\n+\t\trte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t   NULL, \"flow rule creation failure\");\n+\t\tgoto error;\n+\t}\n+\tif (LIST_EMPTY(&priv->flows))\n+\t\tLIST_INIT(&priv->flows);\n+\tLIST_INSERT_HEAD(&priv->flows, rte_flow, next);\n+\treturn rte_flow;\n+error:\n+\tassert(rte_flow);\n+\tif (rte_flow->qp)\n+\t\tibv_destroy_qp(rte_flow->qp);\n+\tif (rte_flow->ind_table)\n+\t\tibv_exp_destroy_rwq_ind_table(rte_flow->ind_table);\n+\tif (rte_flow->drop && rte_flow->wq)\n+\t\tibv_exp_destroy_wq(rte_flow->wq);\n+\tif (rte_flow->drop && rte_flow->cq)\n+\t\tibv_destroy_cq(rte_flow->cq);\n+\trte_free(rte_flow->ibv_attr);\n+\trte_free(rte_flow);\n+\treturn NULL;\n+}\n+\n+/**\n  * Create a flow.\n  *\n  * @see rte_flow_create()\n@@ -185,17 +664,143 @@ mlx5_flow_create(struct rte_eth_dev *dev,\n \t\t struct rte_flow_error *error)\n {\n \tstruct priv *priv = dev->data->dev_private;\n-\tstruct rte_flow *flow;\n+\tstruct rte_flow *rte_flow = NULL;\n+\tstruct ibv_exp_flow_attr *ibv_attr;\n+\tunsigned int flow_size = sizeof(struct ibv_exp_flow_attr);\n \n \tpriv_lock(priv);\n-\tif (priv_flow_validate(priv, attr, items, actions, error)) {\n-\t\tpriv_unlock(priv);\n-\t\treturn NULL;\n+\tif (priv_flow_validate(priv, attr, items, actions, error))\n+\t\tgoto exit;\n+\tibv_attr = rte_malloc(__func__, flow_size, 0);\n+\tif (!ibv_attr) {\n+\t\trte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,\n+\t\t\t\t   NULL, \"cannot allocate ibv_attr memory\");\n+\t\tgoto exit;\n+\t}\n+\t*ibv_attr = (struct ibv_exp_flow_attr){\n+\t\t.type = IBV_EXP_FLOW_ATTR_NORMAL,\n+\t\t.size = sizeof(struct ibv_exp_flow_attr),\n+\t\t.priority = attr->priority,\n+\t\t.num_of_specs = 0,\n+\t\t.port = 0,\n+\t\t.flags = 0,\n+\t\t.reserved = 0,\n+\t};\n+\t/* Update ibv_flow_spec. */\n+\tfor (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {\n+\t\tif (items->type == RTE_FLOW_ITEM_TYPE_VOID) {\n+\t\t\tcontinue;\n+\t\t} else if (items->type == RTE_FLOW_ITEM_TYPE_ETH) {\n+\t\t\tstruct ibv_exp_flow_spec_eth *eth;\n+\t\t\tunsigned int eth_size =\n+\t\t\t\tsizeof(struct ibv_exp_flow_spec_eth);\n+\n+\t\t\tibv_attr = rte_realloc(ibv_attr,\n+\t\t\t\t\t       flow_size + eth_size, 0);\n+\t\t\tif (!ibv_attr)\n+\t\t\t\tgoto error_no_memory;\n+\t\t\teth = (void *)((uintptr_t)ibv_attr + flow_size);\n+\t\t\tmlx5_flow_create_eth(items, eth);\n+\t\t\tflow_size += eth_size;\n+\t\t\t++ibv_attr->num_of_specs;\n+\t\t\tibv_attr->priority = 2;\n+\t\t} else if (items->type == RTE_FLOW_ITEM_TYPE_IPV4) {\n+\t\t\tstruct ibv_exp_flow_spec_ipv4 *ipv4;\n+\t\t\tunsigned int ipv4_size =\n+\t\t\t\tsizeof(struct ibv_exp_flow_spec_ipv4);\n+\n+\t\t\tibv_attr = rte_realloc(ibv_attr,\n+\t\t\t\t\t       flow_size + ipv4_size, 0);\n+\t\t\tif (!ibv_attr)\n+\t\t\t\tgoto error_no_memory;\n+\t\t\tipv4 = (void *)((uintptr_t)ibv_attr + flow_size);\n+\t\t\tmlx5_flow_create_ipv4(items, ipv4);\n+\t\t\tflow_size += ipv4_size;\n+\t\t\t++ibv_attr->num_of_specs;\n+\t\t\tibv_attr->priority = 1;\n+\t\t} else if (items->type == RTE_FLOW_ITEM_TYPE_IPV6) {\n+\t\t\tstruct ibv_exp_flow_spec_ipv6 *ipv6;\n+\t\t\tunsigned int ipv6_size =\n+\t\t\t\tsizeof(struct ibv_exp_flow_spec_ipv6);\n+\n+\t\t\tibv_attr = rte_realloc(ibv_attr,\n+\t\t\t\t\t       flow_size + ipv6_size, 0);\n+\t\t\tif (!ibv_attr)\n+\t\t\t\tgoto error_no_memory;\n+\t\t\tipv6 = (void *)((uintptr_t)ibv_attr + flow_size);\n+\t\t\tmlx5_flow_create_ipv6(items, ipv6);\n+\t\t\tflow_size += ipv6_size;\n+\t\t\t++ibv_attr->num_of_specs;\n+\t\t\tibv_attr->priority = 1;\n+\t\t} else if (items->type == RTE_FLOW_ITEM_TYPE_UDP) {\n+\t\t\tstruct ibv_exp_flow_spec_tcp_udp *udp;\n+\t\t\tunsigned int udp_size =\n+\t\t\t\tsizeof(struct ibv_exp_flow_spec_tcp_udp);\n+\n+\t\t\tibv_attr = rte_realloc(ibv_attr,\n+\t\t\t\t\t       flow_size + udp_size, 0);\n+\t\t\tif (!ibv_attr)\n+\t\t\t\tgoto error_no_memory;\n+\t\t\tudp = (void *)((uintptr_t)ibv_attr + flow_size);\n+\t\t\tmlx5_flow_create_udp(items, udp);\n+\t\t\tflow_size += udp_size;\n+\t\t\t++ibv_attr->num_of_specs;\n+\t\t\tibv_attr->priority = 0;\n+\t\t} else if (items->type == RTE_FLOW_ITEM_TYPE_TCP) {\n+\t\t\tstruct ibv_exp_flow_spec_tcp_udp *tcp;\n+\t\t\tunsigned int tcp_size =\n+\t\t\t\tsizeof(struct ibv_exp_flow_spec_tcp_udp);\n+\n+\t\t\tibv_attr = rte_realloc(ibv_attr,\n+\t\t\t\t\t       flow_size + tcp_size, 0);\n+\t\t\tif (!ibv_attr)\n+\t\t\t\tgoto error_no_memory;\n+\t\t\ttcp = (void *)((uintptr_t)ibv_attr + flow_size);\n+\t\t\tmlx5_flow_create_tcp(items, tcp);\n+\t\t\tflow_size += tcp_size;\n+\t\t\t++ibv_attr->num_of_specs;\n+\t\t\tibv_attr->priority = 0;\n+\t\t} else {\n+\t\t\t/* This default rule should not happen. */\n+\t\t\trte_free(ibv_attr);\n+\t\t\trte_flow_error_set(\n+\t\t\t\terror, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titems, \"unsupported item\");\n+\t\t\tgoto exit;\n+\t\t}\n \t}\n-\tflow = rte_malloc(__func__, sizeof(struct rte_flow), 0);\n-\tLIST_INSERT_HEAD(&priv->flows, flow, next);\n+\tfor (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {\n+\t\tif (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {\n+\t\t\tcontinue;\n+\t\t} else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {\n+\t\t\tconst struct rte_flow_action_queue *queue =\n+\t\t\t\t(const struct rte_flow_action_queue *)\n+\t\t\t\tactions->conf;\n+\n+\t\t\trte_flow = priv_flow_create_action_queue(\n+\t\t\t\t\tpriv, ibv_attr,\n+\t\t\t\t\tqueue->index, error);\n+\t\t} else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {\n+\t\t\trte_flow = priv_flow_create_action_queue(\n+\t\t\t\t\tpriv, ibv_attr,\n+\t\t\t\t\tMLX5_FLOW_DROP_QUEUE, error);\n+\t\t} else {\n+\t\t\trte_flow_error_set(error, ENOTSUP,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t\t   actions, \"unsupported action\");\n+\t\t\tgoto exit;\n+\t\t}\n+\t}\n+\tpriv_unlock(priv);\n+\treturn rte_flow;\n+error_no_memory:\n+\trte_flow_error_set(error, ENOMEM,\n+\t\t\t   RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t   items,\n+\t\t\t   \"cannot allocate memory\");\n+exit:\n \tpriv_unlock(priv);\n-\treturn flow;\n+\treturn NULL;\n }\n \n /**\n@@ -212,6 +817,18 @@ priv_flow_destroy(struct priv *priv,\n {\n \t(void)priv;\n \tLIST_REMOVE(flow, next);\n+\tclaim_zero(ibv_exp_destroy_flow(flow->ibv_flow));\n+\tif (flow->qp)\n+\t\tclaim_zero(ibv_destroy_qp(flow->qp));\n+\tif (flow->ind_table)\n+\t\tclaim_zero(\n+\t\t\tibv_exp_destroy_rwq_ind_table(\n+\t\t\t\tflow->ind_table));\n+\tif (flow->drop && flow->wq)\n+\t\tclaim_zero(ibv_exp_destroy_wq(flow->wq));\n+\tif (flow->drop && flow->cq)\n+\t\tclaim_zero(ibv_destroy_cq(flow->cq));\n+\trte_free(flow->ibv_attr);\n \trte_free(flow);\n }\n \n",
    "prefixes": [
        "dpdk-dev",
        "3/3"
    ]
}