get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/24464/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 24464,
    "url": "https://patches.dpdk.org/api/patches/24464/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1495523581-56027-9-git-send-email-wei.zhao1@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1495523581-56027-9-git-send-email-wei.zhao1@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1495523581-56027-9-git-send-email-wei.zhao1@intel.com",
    "date": "2017-05-23T07:12:58",
    "name": "[dpdk-dev,08/11] net/e1000: parse flex filter",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "92d26cb720816b5c4b091308a183fd279c3b7cc6",
    "submitter": {
        "id": 495,
        "url": "https://patches.dpdk.org/api/people/495/?format=api",
        "name": "Zhao1, Wei",
        "email": "wei.zhao1@intel.com"
    },
    "delegate": {
        "id": 319,
        "url": "https://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1495523581-56027-9-git-send-email-wei.zhao1@intel.com/mbox/",
    "series": [],
    "comments": "https://patches.dpdk.org/api/patches/24464/comments/",
    "check": "fail",
    "checks": "https://patches.dpdk.org/api/patches/24464/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id 1588858F6;\n\tTue, 23 May 2017 09:21:58 +0200 (CEST)",
            "from mga11.intel.com (mga11.intel.com [192.55.52.93])\n\tby dpdk.org (Postfix) with ESMTP id 568D3377A\n\tfor <dev@dpdk.org>; Tue, 23 May 2017 09:21:41 +0200 (CEST)",
            "from fmsmga002.fm.intel.com ([10.253.24.26])\n\tby fmsmga102.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t23 May 2017 00:21:40 -0700",
            "from dpdk1.bj.intel.com ([172.16.182.84])\n\tby fmsmga002.fm.intel.com with ESMTP; 23 May 2017 00:21:39 -0700"
        ],
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos; i=\"5.38,381,1491289200\"; d=\"scan'208\";\n\ta=\"1173081831\"",
        "From": "Wei Zhao <wei.zhao1@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "Wei Zhao <wei.zhao1@intel.com>",
        "Date": "Tue, 23 May 2017 15:12:58 +0800",
        "Message-Id": "<1495523581-56027-9-git-send-email-wei.zhao1@intel.com>",
        "X-Mailer": "git-send-email 2.5.5",
        "In-Reply-To": "<1495523581-56027-1-git-send-email-wei.zhao1@intel.com>",
        "References": "<1495523581-56027-1-git-send-email-wei.zhao1@intel.com>",
        "Subject": "[dpdk-dev] [PATCH 08/11] net/e1000: parse flex filter",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "check if the rule is a flex byte rule, and get the flex info.\n\nSigned-off-by: Wei Zhao <wei.zhao1@intel.com>\n---\n drivers/net/e1000/igb_flow.c | 276 +++++++++++++++++++++++++++++++++++++++++++\n 1 file changed, 276 insertions(+)",
    "diff": "diff --git a/drivers/net/e1000/igb_flow.c b/drivers/net/e1000/igb_flow.c\nindex 61095ab..a587777 100644\n--- a/drivers/net/e1000/igb_flow.c\n+++ b/drivers/net/e1000/igb_flow.c\n@@ -1020,6 +1020,277 @@ igb_parse_syn_filter(struct rte_eth_dev *dev,\n }\n \n /**\n+ * Parse the rule to see if it is a flex byte rule.\n+ * And get the flex byte filter info BTW.\n+ * pattern:\n+ * The first not void item must be RAW.\n+ * The second not void item can be RAW or END.\n+ * The third not void item can be RAW or END.\n+ * The last not void item must be END.\n+ * action:\n+ * The first not void action should be QUEUE.\n+ * The next not void action should be END.\n+ * pattern example:\n+ * ITEM\t\tSpec\t\t\tMask\n+ * RAW\t\trelative\t0\t\t0x1\n+ *\t\t\toffset\t0\t\t0xFFFFFFFF\n+ *\t\t\tpattern\t{0x08, 0x06}\t\t{0xFF, 0xFF}\n+ * RAW\t\trelative\t1\t\t0x1\n+ *\t\t\toffset\t100\t\t0xFFFFFFFF\n+ *\t\t\tpattern\t{0x11, 0x22, 0x33}\t{0xFF, 0xFF, 0xFF}\n+ * END\n+ * other members in mask and spec should set to 0x00.\n+ * item->last should be NULL.\n+ */\n+static int\n+cons_parse_flex_filter(const struct rte_flow_attr *attr,\n+\t\t\t\tconst struct rte_flow_item pattern[],\n+\t\t\t\tconst struct rte_flow_action actions[],\n+\t\t\t\tstruct rte_eth_flex_filter *filter,\n+\t\t\t\tstruct rte_flow_error *error)\n+{\n+\tconst struct rte_flow_item *item;\n+\tconst struct rte_flow_action *act;\n+\tconst struct rte_flow_item_raw *raw_spec;\n+\tconst struct rte_flow_item_raw *raw_mask;\n+\tconst struct rte_flow_action_queue *act_q;\n+\tuint32_t index, i, offset, total_offset = 0;\n+\tint32_t shift;\n+\n+\tif (!pattern) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM_NUM,\n+\t\t\t\tNULL, \"NULL pattern.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (!actions) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION_NUM,\n+\t\t\t\tNULL, \"NULL action.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (!attr) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR,\n+\t\t\t\t   NULL, \"NULL attribute.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* parse pattern */\n+\tindex = 0;\n+\n+item_loop:\n+\n+\t/* the first not void item should be RAW */\n+\tNEXT_ITEM_OF_PATTERN(item, pattern, index);\n+\tif (item->type != RTE_FLOW_ITEM_TYPE_RAW) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Not supported by flex filter\");\n+\t\treturn -rte_errno;\n+\t}\n+\t\t/*Not supported last point for range*/\n+\tif (item->last) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\titem, \"Not supported last point for range\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\traw_spec = (const struct rte_flow_item_raw *)item->spec;\n+\traw_mask = (const struct rte_flow_item_raw *)item->mask;\n+\n+\tif (!raw_mask->length ||\n+\t    !raw_mask->pattern ||\n+\t    !raw_mask->relative) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_flex_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Not supported by flex filter\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (raw_mask->offset)\n+\t\toffset = raw_spec->offset;\n+\telse\n+\t\toffset = 0;\n+\n+\tfor (index = 0; index < raw_spec->length; index++) {\n+\t\tif (raw_mask->pattern[index] != 0xFF) {\n+\t\t\tmemset(filter, 0, sizeof(struct rte_eth_flex_filter));\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t\titem, \"Not supported by flex filter\");\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t}\n+\n+\tif ((raw_spec->length + offset + total_offset) >\n+\t\t\tRTE_FLEX_FILTER_MAXLEN) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_flex_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Not supported by flex filter\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (raw_spec->relative == 0) {\n+\t\tfor (index = 0; index < raw_spec->length; index++)\n+\t\t\tfilter->bytes[index] = raw_spec->pattern[index];\n+\t\tindex = offset / CHAR_BIT;\n+\t} else {\n+\t\tfor (index = 0; index < raw_spec->length; index++)\n+\t\t\tfilter->bytes[total_offset + index] =\n+\t\t\t\traw_spec->pattern[index];\n+\t\tindex = (total_offset + offset) / CHAR_BIT;\n+\t}\n+\n+\ti = 0;\n+\n+\tfor (shift = offset % CHAR_BIT; shift < CHAR_BIT; shift++) {\n+\t\tfilter->mask[index] |= (0x80 >> shift);\n+\t\ti++;\n+\t\tif (i == raw_spec->length)\n+\t\t\tbreak;\n+\t\tif (shift == (CHAR_BIT - 1)) {\n+\t\t\tindex++;\n+\t\t\tshift = -1;\n+\t\t}\n+\t}\n+\n+\ttotal_offset += offset + raw_spec->length;\n+\n+\t/* check if the next not void item is RAW */\n+\tindex++;\n+\tNEXT_ITEM_OF_PATTERN(item, pattern, index);\n+\tif (item->type != RTE_FLOW_ITEM_TYPE_RAW &&\n+\t\titem->type != RTE_FLOW_ITEM_TYPE_END) {\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Not supported by flex filter\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* go back to parser */\n+\tif (item->type == RTE_FLOW_ITEM_TYPE_RAW) {\n+\t\t/* if the item is RAW, the content should be parse */\n+\t\tgoto item_loop;\n+\t}\n+\n+\tfilter->len = RTE_ALIGN(total_offset, 8);\n+\n+\t/* check if the next not void item is END */\n+\tif (item->type != RTE_FLOW_ITEM_TYPE_END) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_flex_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\titem, \"Not supported by flex filter\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* parse action */\n+\tindex = 0;\n+\n+\t/* check if the first not void action is QUEUE. */\n+\tNEXT_ITEM_OF_ACTION(act, actions, index);\n+\tif (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_flex_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\tact, \"Not supported action.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tact_q = (const struct rte_flow_action_queue *)act->conf;\n+\tfilter->queue = act_q->index;\n+\n+\t/* check if the next not void item is END */\n+\tindex++;\n+\tNEXT_ITEM_OF_ACTION(act, actions, index);\n+\tif (act->type != RTE_FLOW_ACTION_TYPE_END) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_flex_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\tact, \"Not supported action.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* parse attr */\n+\t/* must be input direction */\n+\tif (!attr->ingress) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_flex_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ATTR_INGRESS,\n+\t\t\tattr, \"Only support ingress.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\t/* not supported */\n+\tif (attr->egress) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_flex_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ATTR_EGRESS,\n+\t\t\tattr, \"Not support egress.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (attr->priority > 0xFFFF) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_flex_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,\n+\t\t\t\t   attr, \"Error priority.\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tfilter->priority = (uint16_t)attr->priority;\n+\n+\treturn 0;\n+}\n+\n+static int\n+igb_parse_flex_filter(struct rte_eth_dev *dev,\n+\t\t\t\t const struct rte_flow_attr *attr,\n+\t\t\t     const struct rte_flow_item pattern[],\n+\t\t\t     const struct rte_flow_action actions[],\n+\t\t\t     struct rte_eth_flex_filter *filter,\n+\t\t\t     struct rte_flow_error *error)\n+{\n+\tstruct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tint ret;\n+\n+\tMAC_TYPE_FILTER_SUP(hw->mac.type);\n+\n+\tret = cons_parse_flex_filter(attr, pattern,\n+\t\t\t\t\tactions, filter, error);\n+\n+\tif (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {\n+\t\tmemset(filter, 0, sizeof(struct rte_eth_flex_filter));\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\tRTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\tNULL, \"queue number not supported by flex filter\");\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN\n+\t    || filter->len % sizeof(uint64_t) != 0) {\n+\t\tPMD_DRV_LOG(ERR, \"filter's length is out of range\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {\n+\t\tPMD_DRV_LOG(ERR, \"filter's priority is out of range\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (ret)\n+\t\treturn ret;\n+\n+\treturn 0;\n+}\n+\n+/**\n  * Check if the flow rule is supported by igb.\n  * It only checkes the format. Don't guarantee the rule can be programmed into\n  * the HW. Because there can be no enough room for the rule.\n@@ -1034,6 +1305,7 @@ igb_flow_validate(__rte_unused struct rte_eth_dev *dev,\n \tstruct rte_eth_ntuple_filter ntuple_filter;\n \tstruct rte_eth_ethertype_filter ethertype_filter;\n \tstruct rte_eth_syn_filter syn_filter;\n+\tstruct rte_eth_flex_filter flex_filter;\n \tint ret;\n \n \tmemset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));\n@@ -1054,6 +1326,10 @@ igb_flow_validate(__rte_unused struct rte_eth_dev *dev,\n \tif (!ret)\n \t\treturn 0;\n \n+\tmemset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));\n+\tret = igb_parse_flex_filter(dev, attr, pattern,\n+\t\t\t\tactions, &flex_filter, error);\n+\n \treturn ret;\n }\n \n",
    "prefixes": [
        "dpdk-dev",
        "08/11"
    ]
}