get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/71488/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 71488,
    "url": "https://patches.dpdk.org/api/patches/71488/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/95e8f14d0b8c523b69a9c2af29da2ba55e7d0628.1591998771.git.rahul.lakkireddy@chelsio.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<95e8f14d0b8c523b69a9c2af29da2ba55e7d0628.1591998771.git.rahul.lakkireddy@chelsio.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/95e8f14d0b8c523b69a9c2af29da2ba55e7d0628.1591998771.git.rahul.lakkireddy@chelsio.com",
    "date": "2020-06-12T22:07:27",
    "name": "[5/5] net/cxgbe: ignore flow default masks for unrequested fields",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "953d6839669364a1110b4bc3541c1015da730b48",
    "submitter": {
        "id": 241,
        "url": "https://patches.dpdk.org/api/people/241/?format=api",
        "name": "Rahul Lakkireddy",
        "email": "rahul.lakkireddy@chelsio.com"
    },
    "delegate": {
        "id": 319,
        "url": "https://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/95e8f14d0b8c523b69a9c2af29da2ba55e7d0628.1591998771.git.rahul.lakkireddy@chelsio.com/mbox/",
    "series": [
        {
            "id": 10446,
            "url": "https://patches.dpdk.org/api/series/10446/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=10446",
            "date": "2020-06-12T22:07:22",
            "name": "net/cxgbe: fix rte_flow related hardware resource leaks",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/10446/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/71488/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/71488/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 7FB96A00BE;\n\tSat, 13 Jun 2020 00:20:49 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id E343B1C1C9;\n\tSat, 13 Jun 2020 00:20:33 +0200 (CEST)",
            "from stargate.chelsio.com (stargate.chelsio.com [12.32.117.8])\n by dpdk.org (Postfix) with ESMTP id EEB621C1B9;\n Sat, 13 Jun 2020 00:20:31 +0200 (CEST)",
            "from localhost (scalar.blr.asicdesigners.com [10.193.185.94])\n by stargate.chelsio.com (8.13.8/8.13.8) with ESMTP id 05CMKTCk009581;\n Fri, 12 Jun 2020 15:20:30 -0700"
        ],
        "From": "Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>",
        "To": "dev@dpdk.org",
        "Cc": "stable@dpdk.org, nirranjan@chelsio.com",
        "Date": "Sat, 13 Jun 2020 03:37:27 +0530",
        "Message-Id": "\n <95e8f14d0b8c523b69a9c2af29da2ba55e7d0628.1591998771.git.rahul.lakkireddy@chelsio.com>",
        "X-Mailer": "git-send-email 2.5.3",
        "In-Reply-To": [
            "<cover.1591998771.git.rahul.lakkireddy@chelsio.com>",
            "<cover.1591998771.git.rahul.lakkireddy@chelsio.com>"
        ],
        "References": [
            "<cover.1591998771.git.rahul.lakkireddy@chelsio.com>",
            "<cover.1591998771.git.rahul.lakkireddy@chelsio.com>"
        ],
        "Subject": "[dpdk-dev] [PATCH 5/5] net/cxgbe: ignore flow default masks for\n\tunrequested fields",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "commit 536db938a444 (\"net/cxgbe: add devargs to control filtermode and\nfiltermask\") allows configuring hardware to select specific combination\nof header fields to match in the incoming packets. However, the default\nmask is set for all fields in the requested pattern items, even if the\nfield is not explicitly set in the combination and results in\nvalidation errors. To prevent this, ignore setting the default masks\nfor the unrequested fields and the hardware will also ignore them in\nvalidation, accordingly. Also, tweak the filter spec before finalizing\nthe masks.\n\nFixes: 536db938a444 (\"net/cxgbe: add devargs to control filtermode and filtermask\")\nCc: stable@dpdk.org\n\nSigned-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>\n---\n drivers/net/cxgbe/cxgbe_flow.c | 109 ++++++++++++++++++++++-----------\n 1 file changed, 74 insertions(+), 35 deletions(-)",
    "diff": "diff --git a/drivers/net/cxgbe/cxgbe_flow.c b/drivers/net/cxgbe/cxgbe_flow.c\nindex dd8ee7bbd..f7c4f3696 100644\n--- a/drivers/net/cxgbe/cxgbe_flow.c\n+++ b/drivers/net/cxgbe/cxgbe_flow.c\n@@ -188,19 +188,22 @@ ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item,\n \t\treturn 0;\n \n \t/* we don't support SRC_MAC filtering*/\n-\tif (!rte_is_zero_ether_addr(&mask->src))\n+\tif (!rte_is_zero_ether_addr(&spec->src) ||\n+\t    (umask && !rte_is_zero_ether_addr(&umask->src)))\n \t\treturn rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,\n \t\t\t\t\t  item,\n \t\t\t\t\t  \"src mac filtering not supported\");\n \n-\tif (!rte_is_zero_ether_addr(&mask->dst)) {\n+\tif (!rte_is_zero_ether_addr(&spec->dst) ||\n+\t    (umask && !rte_is_zero_ether_addr(&umask->dst))) {\n \t\tCXGBE_FILL_FS(0, 0x1ff, macidx);\n \t\tCXGBE_FILL_FS_MEMCPY(spec->dst.addr_bytes, mask->dst.addr_bytes,\n \t\t\t\t     dmac);\n \t}\n \n-\tCXGBE_FILL_FS(be16_to_cpu(spec->type),\n-\t\t      be16_to_cpu(mask->type), ethtype);\n+\tif (spec->type || (umask && umask->type))\n+\t\tCXGBE_FILL_FS(be16_to_cpu(spec->type),\n+\t\t\t      be16_to_cpu(mask->type), ethtype);\n \n \treturn 0;\n }\n@@ -224,7 +227,8 @@ ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item,\n \t\t\t\t\t  item,\n \t\t\t\t\t  \"port index up to 0x7 is supported\");\n \n-\tCXGBE_FILL_FS(val->index, mask->index, iport);\n+\tif (val->index || (umask && umask->index))\n+\t\tCXGBE_FILL_FS(val->index, mask->index, iport);\n \n \treturn 0;\n }\n@@ -265,24 +269,24 @@ ch_rte_parsetype_vlan(const void *dmask, const struct rte_flow_item *item,\n \tif (fs->val.ethtype == RTE_ETHER_TYPE_QINQ) {\n \t\tCXGBE_FILL_FS(1, 1, ovlan_vld);\n \t\tif (spec) {\n-\t\t\tCXGBE_FILL_FS(be16_to_cpu(spec->tci),\n-\t\t\t\t      be16_to_cpu(mask->tci), ovlan);\n-\n+\t\t\tif (spec->tci || (umask && umask->tci))\n+\t\t\t\tCXGBE_FILL_FS(be16_to_cpu(spec->tci),\n+\t\t\t\t\t      be16_to_cpu(mask->tci), ovlan);\n \t\t\tfs->mask.ethtype = 0;\n \t\t\tfs->val.ethtype = 0;\n \t\t}\n \t} else if (fs->val.ethtype == RTE_ETHER_TYPE_VLAN) {\n \t\tCXGBE_FILL_FS(1, 1, ivlan_vld);\n \t\tif (spec) {\n-\t\t\tCXGBE_FILL_FS(be16_to_cpu(spec->tci),\n-\t\t\t\t      be16_to_cpu(mask->tci), ivlan);\n-\n+\t\t\tif (spec->tci || (umask && umask->tci))\n+\t\t\t\tCXGBE_FILL_FS(be16_to_cpu(spec->tci),\n+\t\t\t\t\t      be16_to_cpu(mask->tci), ivlan);\n \t\t\tfs->mask.ethtype = 0;\n \t\t\tfs->val.ethtype = 0;\n \t\t}\n \t}\n \n-\tif (spec)\n+\tif (spec && (spec->inner_type || (umask && umask->inner_type)))\n \t\tCXGBE_FILL_FS(be16_to_cpu(spec->inner_type),\n \t\t\t      be16_to_cpu(mask->inner_type), ethtype);\n \n@@ -328,7 +332,8 @@ ch_rte_parsetype_vf(const void *dmask, const struct rte_flow_item *item,\n \t\t\t\t\t  item,\n \t\t\t\t\t  \"VF ID > MAX(255)\");\n \n-\tCXGBE_FILL_FS(val->id, mask->id, vf);\n+\tif (val->id || (umask && umask->id))\n+\t\tCXGBE_FILL_FS(val->id, mask->id, vf);\n \n \treturn 0;\n }\n@@ -352,10 +357,15 @@ ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,\n \tCXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);\n \tif (!val)\n \t\treturn 0;\n-\tCXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),\n-\t\t      be16_to_cpu(mask->hdr.src_port), fport);\n-\tCXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),\n-\t\t      be16_to_cpu(mask->hdr.dst_port), lport);\n+\n+\tif (val->hdr.src_port || (umask && umask->hdr.src_port))\n+\t\tCXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),\n+\t\t\t      be16_to_cpu(mask->hdr.src_port), fport);\n+\n+\tif (val->hdr.dst_port || (umask && umask->hdr.dst_port))\n+\t\tCXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),\n+\t\t\t      be16_to_cpu(mask->hdr.dst_port), lport);\n+\n \treturn 0;\n }\n \n@@ -380,10 +390,15 @@ ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,\n \tCXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);\n \tif (!val)\n \t\treturn 0;\n-\tCXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),\n-\t\t      be16_to_cpu(mask->hdr.src_port), fport);\n-\tCXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),\n-\t\t      be16_to_cpu(mask->hdr.dst_port), lport);\n+\n+\tif (val->hdr.src_port || (umask && umask->hdr.src_port))\n+\t\tCXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),\n+\t\t\t      be16_to_cpu(mask->hdr.src_port), fport);\n+\n+\tif (val->hdr.dst_port || (umask && umask->hdr.dst_port))\n+\t\tCXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),\n+\t\t\t      be16_to_cpu(mask->hdr.dst_port), lport);\n+\n \treturn 0;\n }\n \n@@ -411,10 +426,21 @@ ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,\n \tif (!val)\n \t\treturn 0; /* ipv4 wild card */\n \n-\tCXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto);\n-\tCXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);\n-\tCXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);\n-\tCXGBE_FILL_FS(val->hdr.type_of_service, mask->hdr.type_of_service, tos);\n+\tif (val->hdr.next_proto_id || (umask && umask->hdr.next_proto_id))\n+\t\tCXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id,\n+\t\t\t      proto);\n+\n+\tif (val->hdr.dst_addr || (umask && umask->hdr.dst_addr))\n+\t\tCXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr,\n+\t\t\t\t     lip);\n+\n+\tif (val->hdr.src_addr || (umask && umask->hdr.src_addr))\n+\t\tCXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr,\n+\t\t\t\t     fip);\n+\n+\tif (val->hdr.type_of_service || (umask && umask->hdr.type_of_service))\n+\t\tCXGBE_FILL_FS(val->hdr.type_of_service,\n+\t\t\t      mask->hdr.type_of_service, tos);\n \n \treturn 0;\n }\n@@ -428,6 +454,7 @@ ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,\n \tconst struct rte_flow_item_ipv6 *umask = item->mask;\n \tconst struct rte_flow_item_ipv6 *mask;\n \tu32 vtc_flow, vtc_flow_mask;\n+\tu8 z[16] = { 0 };\n \n \tmask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;\n \n@@ -448,17 +475,28 @@ ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,\n \tif (!val)\n \t\treturn 0; /* ipv6 wild card */\n \n-\tCXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);\n+\tif (val->hdr.proto || (umask && umask->hdr.proto))\n+\t\tCXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);\n \n \tvtc_flow = be32_to_cpu(val->hdr.vtc_flow);\n-\tCXGBE_FILL_FS((vtc_flow & RTE_IPV6_HDR_TC_MASK) >>\n-\t\t      RTE_IPV6_HDR_TC_SHIFT,\n-\t\t      (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>\n-\t\t      RTE_IPV6_HDR_TC_SHIFT,\n-\t\t      tos);\n-\n-\tCXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);\n-\tCXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);\n+\tif (val->hdr.vtc_flow || (umask && umask->hdr.vtc_flow))\n+\t\tCXGBE_FILL_FS((vtc_flow & RTE_IPV6_HDR_TC_MASK) >>\n+\t\t\t      RTE_IPV6_HDR_TC_SHIFT,\n+\t\t\t      (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>\n+\t\t\t      RTE_IPV6_HDR_TC_SHIFT,\n+\t\t\t      tos);\n+\n+\tif (memcmp(val->hdr.dst_addr, z, sizeof(val->hdr.dst_addr)) ||\n+\t    (umask &&\n+\t     memcmp(umask->hdr.dst_addr, z, sizeof(umask->hdr.dst_addr))))\n+\t\tCXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr,\n+\t\t\t\t     lip);\n+\n+\tif (memcmp(val->hdr.src_addr, z, sizeof(val->hdr.src_addr)) ||\n+\t    (umask &&\n+\t     memcmp(umask->hdr.src_addr, z, sizeof(umask->hdr.src_addr))))\n+\t\tCXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr,\n+\t\t\t\t     fip);\n \n \treturn 0;\n }\n@@ -1051,8 +1089,8 @@ cxgbe_rtef_parse_items(struct rte_flow *flow,\n \t\t}\n \t}\n \n-\tcxgbe_fill_filter_region(adap, &flow->fs);\n \tcxgbe_tweak_filter_spec(adap, &flow->fs);\n+\tcxgbe_fill_filter_region(adap, &flow->fs);\n \n \treturn 0;\n }\n@@ -1310,6 +1348,7 @@ cxgbe_flow_validate(struct rte_eth_dev *dev,\n \n \tflow->item_parser = parseitem;\n \tflow->dev = dev;\n+\tflow->fs.private = (void *)flow;\n \n \tret = cxgbe_flow_parse(flow, attr, item, action, e);\n \tif (ret) {\n",
    "prefixes": [
        "5/5"
    ]
}