get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/45542/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 45542,
    "url": "http://patches.dpdk.org/api/patches/45542/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20180928021655.24869-1-johndale@cisco.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20180928021655.24869-1-johndale@cisco.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20180928021655.24869-1-johndale@cisco.com",
    "date": "2018-09-28T02:16:54",
    "name": "[1/2] net/enic: move common Rx functions to a new header file",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "b5a4a80bfd719cebf51619df0942e6c42fee456a",
    "submitter": {
        "id": 359,
        "url": "http://patches.dpdk.org/api/people/359/?format=api",
        "name": "John Daley (johndale)",
        "email": "johndale@cisco.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20180928021655.24869-1-johndale@cisco.com/mbox/",
    "series": [
        {
            "id": 1558,
            "url": "http://patches.dpdk.org/api/series/1558/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=1558",
            "date": "2018-09-28T02:16:54",
            "name": "[1/2] net/enic: move common Rx functions to a new header file",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/1558/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/45542/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/45542/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id DF20C5398;\n\tFri, 28 Sep 2018 04:17:03 +0200 (CEST)",
            "from rcdn-iport-7.cisco.com (rcdn-iport-7.cisco.com [173.37.86.78])\n\tby dpdk.org (Postfix) with ESMTP id 0F1294CA1\n\tfor <dev@dpdk.org>; Fri, 28 Sep 2018 04:17:02 +0200 (CEST)",
            "from rcdn-core-8.cisco.com ([173.37.93.144])\n\tby rcdn-iport-7.cisco.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t28 Sep 2018 02:17:02 +0000",
            "from cisco.com (savbu-usnic-a.cisco.com [10.193.184.48])\n\tby rcdn-core-8.cisco.com (8.15.2/8.15.2) with ESMTP id w8S2H1fA012073;\n\tFri, 28 Sep 2018 02:17:02 GMT",
            "by cisco.com (Postfix, from userid 392789)\n\tid D191420F2001; Thu, 27 Sep 2018 19:17:01 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n\td=cisco.com; i=@cisco.com; l=19490; q=dns/txt;\n\ts=iport; t=1538101023; x=1539310623;\n\th=from:to:cc:subject:date:message-id;\n\tbh=rGf58PeunK++Grp/i5MzUxaRANPffHGwUnXdBBk9qxk=;\n\tb=AYIwnAyQ7DI1la97pwRelbvXKn5otMrmqu4klhKpZHioGQumDq5S+2T/\n\tjgPhQ3BbFxJFTbmVTN02l0SnJBbHnZTww2g+TXyDD9ZXbodQ/vWCi8/rH\n\tvYVUaGNRKBg7q/rqMOhGMxGFPnts8Vdgf2NeXwCqo2O/W5+ogIwZ4e12A s=;",
        "X-IronPort-AV": "E=Sophos;i=\"5.54,313,1534809600\"; d=\"scan'208\";a=\"455352937\"",
        "From": "John Daley <johndale@cisco.com>",
        "To": "ferruh.yigit@intel.com",
        "Cc": "dev@dpdk.org, Hyong Youb Kim <hyonkim@cisco.com>",
        "Date": "Thu, 27 Sep 2018 19:16:54 -0700",
        "Message-Id": "<20180928021655.24869-1-johndale@cisco.com>",
        "X-Mailer": "git-send-email 2.16.2",
        "X-Outbound-SMTP-Client": "10.193.184.48, savbu-usnic-a.cisco.com",
        "X-Outbound-Node": "rcdn-core-8.cisco.com",
        "Subject": "[dpdk-dev] [PATCH 1/2] net/enic: move common Rx functions to a new\n\theader file",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Hyong Youb Kim <hyonkim@cisco.com>\n\nMove a number of Rx functions to the header file so that the avx2\nbased Rx handler can use them.\n\nSigned-off-by: Hyong Youb Kim <hyonkim@cisco.com>\nReviewed-by: John Daley <johndale@cisco.com>\n---\n drivers/net/enic/enic_rxtx.c        | 263 +---------------------------------\n drivers/net/enic/enic_rxtx_common.h | 271 ++++++++++++++++++++++++++++++++++++\n 2 files changed, 272 insertions(+), 262 deletions(-)\n create mode 100644 drivers/net/enic/enic_rxtx_common.h",
    "diff": "diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c\nindex 276a2e559..5189ee635 100644\n--- a/drivers/net/enic/enic_rxtx.c\n+++ b/drivers/net/enic/enic_rxtx.c\n@@ -11,6 +11,7 @@\n #include \"enic_compat.h\"\n #include \"rq_enet_desc.h\"\n #include \"enic.h\"\n+#include \"enic_rxtx_common.h\"\n #include <rte_ether.h>\n #include <rte_ip.h>\n #include <rte_tcp.h>\n@@ -30,268 +31,6 @@\n #define rte_packet_prefetch(p) do {} while (0)\n #endif\n \n-static inline uint16_t\n-enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)\n-{\n-\treturn le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;\n-}\n-\n-static inline uint16_t\n-enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)\n-{\n-\treturn le16_to_cpu(crd->bytes_written_flags) &\n-\t\t\t   ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;\n-}\n-\n-static inline uint8_t\n-enic_cq_rx_desc_packet_error(uint16_t bwflags)\n-{\n-\treturn (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==\n-\t\tCQ_ENET_RQ_DESC_FLAGS_TRUNCATED;\n-}\n-\n-static inline uint8_t\n-enic_cq_rx_desc_eop(uint16_t ciflags)\n-{\n-\treturn (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)\n-\t\t== CQ_ENET_RQ_DESC_FLAGS_EOP;\n-}\n-\n-static inline uint8_t\n-enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)\n-{\n-\treturn (le16_to_cpu(cqrd->q_number_rss_type_flags) &\n-\t\tCQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==\n-\t\tCQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;\n-}\n-\n-static inline uint8_t\n-enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)\n-{\n-\treturn (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==\n-\t\tCQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK;\n-}\n-\n-static inline uint8_t\n-enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)\n-{\n-\treturn (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==\n-\t\tCQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK;\n-}\n-\n-static inline uint8_t\n-enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)\n-{\n-\treturn (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>\n-\t\tCQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);\n-}\n-\n-static inline uint32_t\n-enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)\n-{\n-\treturn le32_to_cpu(cqrd->rss_hash);\n-}\n-\n-static inline uint16_t\n-enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)\n-{\n-\treturn le16_to_cpu(cqrd->vlan);\n-}\n-\n-static inline uint16_t\n-enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)\n-{\n-\tstruct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;\n-\treturn le16_to_cpu(cqrd->bytes_written_flags) &\n-\t\tCQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;\n-}\n-\n-\n-static inline uint8_t\n-enic_cq_rx_check_err(struct cq_desc *cqd)\n-{\n-\tstruct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;\n-\tuint16_t bwflags;\n-\n-\tbwflags = enic_cq_rx_desc_bwflags(cqrd);\n-\tif (unlikely(enic_cq_rx_desc_packet_error(bwflags)))\n-\t\treturn 1;\n-\treturn 0;\n-}\n-\n-/* Lookup table to translate RX CQ flags to mbuf flags. */\n-static inline uint32_t\n-enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd, uint8_t tnl)\n-{\n-\tstruct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;\n-\tuint8_t cqrd_flags = cqrd->flags;\n-\t/*\n-\t * Odd-numbered entries are for tunnel packets. All packet type info\n-\t * applies to the inner packet, and there is no info on the outer\n-\t * packet. The outer flags in these entries exist only to avoid\n-\t * changing enic_cq_rx_to_pkt_flags(). They are cleared from mbuf\n-\t * afterwards.\n-\t *\n-\t * Also, as there is no tunnel type info (VXLAN, NVGRE, or GENEVE), set\n-\t * RTE_PTYPE_TUNNEL_GRENAT..\n-\t */\n-\tstatic const uint32_t cq_type_table[128] __rte_cache_aligned = {\n-\t\t[0x00] = RTE_PTYPE_UNKNOWN,\n-\t\t[0x01] = RTE_PTYPE_UNKNOWN |\n-\t\t\t RTE_PTYPE_TUNNEL_GRENAT |\n-\t\t\t RTE_PTYPE_INNER_L2_ETHER,\n-\t\t[0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,\n-\t\t[0x21] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |\n-\t\t\t RTE_PTYPE_TUNNEL_GRENAT |\n-\t\t\t RTE_PTYPE_INNER_L2_ETHER |\n-\t\t\t RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |\n-\t\t\t RTE_PTYPE_INNER_L4_NONFRAG,\n-\t\t[0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,\n-\t\t[0x23] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |\n-\t\t\t RTE_PTYPE_TUNNEL_GRENAT |\n-\t\t\t RTE_PTYPE_INNER_L2_ETHER |\n-\t\t\t RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |\n-\t\t\t RTE_PTYPE_INNER_L4_UDP,\n-\t\t[0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,\n-\t\t[0x25] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |\n-\t\t\t RTE_PTYPE_TUNNEL_GRENAT |\n-\t\t\t RTE_PTYPE_INNER_L2_ETHER |\n-\t\t\t RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |\n-\t\t\t RTE_PTYPE_INNER_L4_TCP,\n-\t\t[0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,\n-\t\t[0x61] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |\n-\t\t\t RTE_PTYPE_TUNNEL_GRENAT |\n-\t\t\t RTE_PTYPE_INNER_L2_ETHER |\n-\t\t\t RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |\n-\t\t\t RTE_PTYPE_INNER_L4_FRAG,\n-\t\t[0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,\n-\t\t[0x63] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |\n-\t\t\t RTE_PTYPE_TUNNEL_GRENAT |\n-\t\t\t RTE_PTYPE_INNER_L2_ETHER |\n-\t\t\t RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |\n-\t\t\t RTE_PTYPE_INNER_L4_FRAG,\n-\t\t[0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,\n-\t\t[0x65] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |\n-\t\t\t RTE_PTYPE_TUNNEL_GRENAT |\n-\t\t\t RTE_PTYPE_INNER_L2_ETHER |\n-\t\t\t RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |\n-\t\t\t RTE_PTYPE_INNER_L4_FRAG,\n-\t\t[0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,\n-\t\t[0x11] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |\n-\t\t\t RTE_PTYPE_TUNNEL_GRENAT |\n-\t\t\t RTE_PTYPE_INNER_L2_ETHER |\n-\t\t\t RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |\n-\t\t\t RTE_PTYPE_INNER_L4_NONFRAG,\n-\t\t[0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,\n-\t\t[0x13] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |\n-\t\t\t RTE_PTYPE_TUNNEL_GRENAT |\n-\t\t\t RTE_PTYPE_INNER_L2_ETHER |\n-\t\t\t RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |\n-\t\t\t RTE_PTYPE_INNER_L4_UDP,\n-\t\t[0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,\n-\t\t[0x15] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |\n-\t\t\t RTE_PTYPE_TUNNEL_GRENAT |\n-\t\t\t RTE_PTYPE_INNER_L2_ETHER |\n-\t\t\t RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |\n-\t\t\t RTE_PTYPE_INNER_L4_TCP,\n-\t\t[0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,\n-\t\t[0x51] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |\n-\t\t\t RTE_PTYPE_TUNNEL_GRENAT |\n-\t\t\t RTE_PTYPE_INNER_L2_ETHER |\n-\t\t\t RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |\n-\t\t\t RTE_PTYPE_INNER_L4_FRAG,\n-\t\t[0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,\n-\t\t[0x53] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |\n-\t\t\t RTE_PTYPE_TUNNEL_GRENAT |\n-\t\t\t RTE_PTYPE_INNER_L2_ETHER |\n-\t\t\t RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |\n-\t\t\t RTE_PTYPE_INNER_L4_FRAG,\n-\t\t[0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,\n-\t\t[0x55] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |\n-\t\t\t RTE_PTYPE_TUNNEL_GRENAT |\n-\t\t\t RTE_PTYPE_INNER_L2_ETHER |\n-\t\t\t RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |\n-\t\t\t RTE_PTYPE_INNER_L4_FRAG,\n-\t\t/* All others reserved */\n-\t};\n-\tcqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT\n-\t\t| CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6\n-\t\t| CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;\n-\treturn cq_type_table[cqrd_flags + tnl];\n-}\n-\n-static inline void\n-enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)\n-{\n-\tstruct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;\n-\tuint16_t bwflags, pkt_flags = 0, vlan_tci;\n-\tbwflags = enic_cq_rx_desc_bwflags(cqrd);\n-\tvlan_tci = enic_cq_rx_desc_vlan(cqrd);\n-\n-\t/* VLAN STRIPPED flag. The L2 packet type updated here also */\n-\tif (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {\n-\t\tpkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;\n-\t\tmbuf->packet_type |= RTE_PTYPE_L2_ETHER;\n-\t} else {\n-\t\tif (vlan_tci != 0) {\n-\t\t\tpkt_flags |= PKT_RX_VLAN;\n-\t\t\tmbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;\n-\t\t} else {\n-\t\t\tmbuf->packet_type |= RTE_PTYPE_L2_ETHER;\n-\t\t}\n-\t}\n-\tmbuf->vlan_tci = vlan_tci;\n-\n-\tif ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) {\n-\t\tstruct cq_enet_rq_clsf_desc *clsf_cqd;\n-\t\tuint16_t filter_id;\n-\t\tclsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd;\n-\t\tfilter_id = clsf_cqd->filter_id;\n-\t\tif (filter_id) {\n-\t\t\tpkt_flags |= PKT_RX_FDIR;\n-\t\t\tif (filter_id != ENIC_MAGIC_FILTER_ID) {\n-\t\t\t\tmbuf->hash.fdir.hi = clsf_cqd->filter_id;\n-\t\t\t\tpkt_flags |= PKT_RX_FDIR_ID;\n-\t\t\t}\n-\t\t}\n-\t} else if (enic_cq_rx_desc_rss_type(cqrd)) {\n-\t\t/* RSS flag */\n-\t\tpkt_flags |= PKT_RX_RSS_HASH;\n-\t\tmbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);\n-\t}\n-\n-\t/* checksum flags */\n-\tif (mbuf->packet_type & (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV6)) {\n-\t\tif (!enic_cq_rx_desc_csum_not_calc(cqrd)) {\n-\t\t\tuint32_t l4_flags;\n-\t\t\tl4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;\n-\n-\t\t\t/*\n-\t\t\t * When overlay offload is enabled, the NIC may\n-\t\t\t * set ipv4_csum_ok=1 if the inner packet is IPv6..\n-\t\t\t * So, explicitly check for IPv4 before checking\n-\t\t\t * ipv4_csum_ok.\n-\t\t\t */\n-\t\t\tif (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {\n-\t\t\t\tif (enic_cq_rx_desc_ipv4_csum_ok(cqrd))\n-\t\t\t\t\tpkt_flags |= PKT_RX_IP_CKSUM_GOOD;\n-\t\t\t\telse\n-\t\t\t\t\tpkt_flags |= PKT_RX_IP_CKSUM_BAD;\n-\t\t\t}\n-\n-\t\t\tif (l4_flags == RTE_PTYPE_L4_UDP ||\n-\t\t\t    l4_flags == RTE_PTYPE_L4_TCP) {\n-\t\t\t\tif (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))\n-\t\t\t\t\tpkt_flags |= PKT_RX_L4_CKSUM_GOOD;\n-\t\t\t\telse\n-\t\t\t\t\tpkt_flags |= PKT_RX_L4_CKSUM_BAD;\n-\t\t\t}\n-\t\t}\n-\t}\n-\n-\tmbuf->ol_flags = pkt_flags;\n-}\n-\n /* dummy receive function to replace actual function in\n  * order to do safe reconfiguration operations.\n  */\ndiff --git a/drivers/net/enic/enic_rxtx_common.h b/drivers/net/enic/enic_rxtx_common.h\nnew file mode 100644\nindex 000000000..bfbb4909e\n--- /dev/null\n+++ b/drivers/net/enic/enic_rxtx_common.h\n@@ -0,0 +1,271 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright 2008-2018 Cisco Systems, Inc.  All rights reserved.\n+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.\n+ */\n+\n+#ifndef _ENIC_RXTX_COMMON_H_\n+#define _ENIC_RXTX_COMMON_H_\n+\n+static inline uint16_t\n+enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)\n+{\n+\treturn le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;\n+}\n+\n+static inline uint16_t\n+enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)\n+{\n+\treturn le16_to_cpu(crd->bytes_written_flags) &\n+\t\t\t   ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;\n+}\n+\n+static inline uint8_t\n+enic_cq_rx_desc_packet_error(uint16_t bwflags)\n+{\n+\treturn (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==\n+\t\tCQ_ENET_RQ_DESC_FLAGS_TRUNCATED;\n+}\n+\n+static inline uint8_t\n+enic_cq_rx_desc_eop(uint16_t ciflags)\n+{\n+\treturn (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)\n+\t\t== CQ_ENET_RQ_DESC_FLAGS_EOP;\n+}\n+\n+static inline uint8_t\n+enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)\n+{\n+\treturn (le16_to_cpu(cqrd->q_number_rss_type_flags) &\n+\t\tCQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==\n+\t\tCQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;\n+}\n+\n+static inline uint8_t\n+enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)\n+{\n+\treturn (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==\n+\t\tCQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK;\n+}\n+\n+static inline uint8_t\n+enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)\n+{\n+\treturn (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==\n+\t\tCQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK;\n+}\n+\n+static inline uint8_t\n+enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)\n+{\n+\treturn (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>\n+\t\tCQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);\n+}\n+\n+static inline uint32_t\n+enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)\n+{\n+\treturn le32_to_cpu(cqrd->rss_hash);\n+}\n+\n+static inline uint16_t\n+enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)\n+{\n+\treturn le16_to_cpu(cqrd->vlan);\n+}\n+\n+static inline uint16_t\n+enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)\n+{\n+\tstruct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;\n+\treturn le16_to_cpu(cqrd->bytes_written_flags) &\n+\t\tCQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;\n+}\n+\n+\n+static inline uint8_t\n+enic_cq_rx_check_err(struct cq_desc *cqd)\n+{\n+\tstruct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;\n+\tuint16_t bwflags;\n+\n+\tbwflags = enic_cq_rx_desc_bwflags(cqrd);\n+\tif (unlikely(enic_cq_rx_desc_packet_error(bwflags)))\n+\t\treturn 1;\n+\treturn 0;\n+}\n+\n+/* Lookup table to translate RX CQ flags to mbuf flags. */\n+static uint32_t\n+enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd, uint8_t tnl)\n+{\n+\tstruct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;\n+\tuint8_t cqrd_flags = cqrd->flags;\n+\t/*\n+\t * Odd-numbered entries are for tunnel packets. All packet type info\n+\t * applies to the inner packet, and there is no info on the outer\n+\t * packet. The outer flags in these entries exist only to avoid\n+\t * changing enic_cq_rx_to_pkt_flags(). They are cleared from mbuf\n+\t * afterwards.\n+\t *\n+\t * Also, as there is no tunnel type info (VXLAN, NVGRE, or GENEVE), set\n+\t * RTE_PTYPE_TUNNEL_GRENAT..\n+\t */\n+\tstatic const uint32_t cq_type_table[128] __rte_cache_aligned = {\n+\t\t[0x00] = RTE_PTYPE_UNKNOWN,\n+\t\t[0x01] = RTE_PTYPE_UNKNOWN |\n+\t\t\t RTE_PTYPE_TUNNEL_GRENAT |\n+\t\t\t RTE_PTYPE_INNER_L2_ETHER,\n+\t\t[0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,\n+\t\t[0x21] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |\n+\t\t\t RTE_PTYPE_TUNNEL_GRENAT |\n+\t\t\t RTE_PTYPE_INNER_L2_ETHER |\n+\t\t\t RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |\n+\t\t\t RTE_PTYPE_INNER_L4_NONFRAG,\n+\t\t[0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,\n+\t\t[0x23] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |\n+\t\t\t RTE_PTYPE_TUNNEL_GRENAT |\n+\t\t\t RTE_PTYPE_INNER_L2_ETHER |\n+\t\t\t RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |\n+\t\t\t RTE_PTYPE_INNER_L4_UDP,\n+\t\t[0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,\n+\t\t[0x25] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |\n+\t\t\t RTE_PTYPE_TUNNEL_GRENAT |\n+\t\t\t RTE_PTYPE_INNER_L2_ETHER |\n+\t\t\t RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |\n+\t\t\t RTE_PTYPE_INNER_L4_TCP,\n+\t\t[0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,\n+\t\t[0x61] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |\n+\t\t\t RTE_PTYPE_TUNNEL_GRENAT |\n+\t\t\t RTE_PTYPE_INNER_L2_ETHER |\n+\t\t\t RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |\n+\t\t\t RTE_PTYPE_INNER_L4_FRAG,\n+\t\t[0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,\n+\t\t[0x63] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |\n+\t\t\t RTE_PTYPE_TUNNEL_GRENAT |\n+\t\t\t RTE_PTYPE_INNER_L2_ETHER |\n+\t\t\t RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |\n+\t\t\t RTE_PTYPE_INNER_L4_FRAG,\n+\t\t[0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,\n+\t\t[0x65] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |\n+\t\t\t RTE_PTYPE_TUNNEL_GRENAT |\n+\t\t\t RTE_PTYPE_INNER_L2_ETHER |\n+\t\t\t RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |\n+\t\t\t RTE_PTYPE_INNER_L4_FRAG,\n+\t\t[0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,\n+\t\t[0x11] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |\n+\t\t\t RTE_PTYPE_TUNNEL_GRENAT |\n+\t\t\t RTE_PTYPE_INNER_L2_ETHER |\n+\t\t\t RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |\n+\t\t\t RTE_PTYPE_INNER_L4_NONFRAG,\n+\t\t[0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,\n+\t\t[0x13] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |\n+\t\t\t RTE_PTYPE_TUNNEL_GRENAT |\n+\t\t\t RTE_PTYPE_INNER_L2_ETHER |\n+\t\t\t RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |\n+\t\t\t RTE_PTYPE_INNER_L4_UDP,\n+\t\t[0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,\n+\t\t[0x15] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |\n+\t\t\t RTE_PTYPE_TUNNEL_GRENAT |\n+\t\t\t RTE_PTYPE_INNER_L2_ETHER |\n+\t\t\t RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |\n+\t\t\t RTE_PTYPE_INNER_L4_TCP,\n+\t\t[0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,\n+\t\t[0x51] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |\n+\t\t\t RTE_PTYPE_TUNNEL_GRENAT |\n+\t\t\t RTE_PTYPE_INNER_L2_ETHER |\n+\t\t\t RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |\n+\t\t\t RTE_PTYPE_INNER_L4_FRAG,\n+\t\t[0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,\n+\t\t[0x53] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |\n+\t\t\t RTE_PTYPE_TUNNEL_GRENAT |\n+\t\t\t RTE_PTYPE_INNER_L2_ETHER |\n+\t\t\t RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |\n+\t\t\t RTE_PTYPE_INNER_L4_FRAG,\n+\t\t[0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,\n+\t\t[0x55] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |\n+\t\t\t RTE_PTYPE_TUNNEL_GRENAT |\n+\t\t\t RTE_PTYPE_INNER_L2_ETHER |\n+\t\t\t RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |\n+\t\t\t RTE_PTYPE_INNER_L4_FRAG,\n+\t\t/* All others reserved */\n+\t};\n+\tcqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT\n+\t\t| CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6\n+\t\t| CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;\n+\treturn cq_type_table[cqrd_flags + tnl];\n+}\n+\n+static void\n+enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)\n+{\n+\tstruct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;\n+\tuint16_t bwflags, pkt_flags = 0, vlan_tci;\n+\tbwflags = enic_cq_rx_desc_bwflags(cqrd);\n+\tvlan_tci = enic_cq_rx_desc_vlan(cqrd);\n+\n+\t/* VLAN STRIPPED flag. The L2 packet type updated here also */\n+\tif (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {\n+\t\tpkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;\n+\t\tmbuf->packet_type |= RTE_PTYPE_L2_ETHER;\n+\t} else {\n+\t\tif (vlan_tci != 0) {\n+\t\t\tpkt_flags |= PKT_RX_VLAN;\n+\t\t\tmbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;\n+\t\t} else {\n+\t\t\tmbuf->packet_type |= RTE_PTYPE_L2_ETHER;\n+\t\t}\n+\t}\n+\tmbuf->vlan_tci = vlan_tci;\n+\n+\tif ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) {\n+\t\tstruct cq_enet_rq_clsf_desc *clsf_cqd;\n+\t\tuint16_t filter_id;\n+\t\tclsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd;\n+\t\tfilter_id = clsf_cqd->filter_id;\n+\t\tif (filter_id) {\n+\t\t\tpkt_flags |= PKT_RX_FDIR;\n+\t\t\tif (filter_id != ENIC_MAGIC_FILTER_ID) {\n+\t\t\t\tmbuf->hash.fdir.hi = clsf_cqd->filter_id;\n+\t\t\t\tpkt_flags |= PKT_RX_FDIR_ID;\n+\t\t\t}\n+\t\t}\n+\t} else if (enic_cq_rx_desc_rss_type(cqrd)) {\n+\t\t/* RSS flag */\n+\t\tpkt_flags |= PKT_RX_RSS_HASH;\n+\t\tmbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);\n+\t}\n+\n+\t/* checksum flags */\n+\tif (mbuf->packet_type & (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV6)) {\n+\t\tif (!enic_cq_rx_desc_csum_not_calc(cqrd)) {\n+\t\t\tuint32_t l4_flags;\n+\t\t\tl4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;\n+\n+\t\t\t/*\n+\t\t\t * When overlay offload is enabled, the NIC may\n+\t\t\t * set ipv4_csum_ok=1 if the inner packet is IPv6..\n+\t\t\t * So, explicitly check for IPv4 before checking\n+\t\t\t * ipv4_csum_ok.\n+\t\t\t */\n+\t\t\tif (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {\n+\t\t\t\tif (enic_cq_rx_desc_ipv4_csum_ok(cqrd))\n+\t\t\t\t\tpkt_flags |= PKT_RX_IP_CKSUM_GOOD;\n+\t\t\t\telse\n+\t\t\t\t\tpkt_flags |= PKT_RX_IP_CKSUM_BAD;\n+\t\t\t}\n+\n+\t\t\tif (l4_flags == RTE_PTYPE_L4_UDP ||\n+\t\t\t    l4_flags == RTE_PTYPE_L4_TCP) {\n+\t\t\t\tif (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))\n+\t\t\t\t\tpkt_flags |= PKT_RX_L4_CKSUM_GOOD;\n+\t\t\t\telse\n+\t\t\t\t\tpkt_flags |= PKT_RX_L4_CKSUM_BAD;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\tmbuf->ol_flags = pkt_flags;\n+}\n+\n+#endif /* _ENIC_RXTX_COMMON_H_ */\n",
    "prefixes": [
        "1/2"
    ]
}