get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/18452/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 18452,
    "url": "http://patches.dpdk.org/api/patches/18452/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1482454091-12819-4-git-send-email-harish.patil@qlogic.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1482454091-12819-4-git-send-email-harish.patil@qlogic.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1482454091-12819-4-git-send-email-harish.patil@qlogic.com",
    "date": "2016-12-23T00:48:08",
    "name": "[dpdk-dev,v2,4/7] net/qede: add fastpath support for VXLAN tunneling",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "0a68e3b32b20f639974db0a31255e8fbf2539cd8",
    "submitter": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/people/319/?format=api",
        "name": "Harish Patil",
        "email": "harish.patil@qlogic.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1482454091-12819-4-git-send-email-harish.patil@qlogic.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/18452/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/18452/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id 50EB210DDB;\n\tFri, 23 Dec 2016 01:50:10 +0100 (CET)",
            "from mx0b-0016ce01.pphosted.com (mx0a-0016ce01.pphosted.com\n\t[67.231.148.157]) by dpdk.org (Postfix) with ESMTP id F065F10DC1\n\tfor <dev@dpdk.org>; Fri, 23 Dec 2016 01:49:02 +0100 (CET)",
            "from pps.filterd (m0095336.ppops.net [127.0.0.1])\n\tby mx0a-0016ce01.pphosted.com (8.16.0.20/8.16.0.20) with SMTP id\n\tuBN0m5wc015659; Thu, 22 Dec 2016 16:48:59 -0800",
            "from avcashub1.qlogic.com ([198.186.0.115])\n\tby mx0a-0016ce01.pphosted.com with ESMTP id 27gs6480xr-1\n\t(version=TLSv1 cipher=ECDHE-RSA-AES256-SHA bits=256 verify=NOT);\n\tThu, 22 Dec 2016 16:48:58 -0800",
            "from avluser01.qlc.com (10.1.113.203) by avcashub1.qlogic.org\n\t(10.1.4.190) with Microsoft SMTP Server (TLS) id 14.3.235.1;\n\tThu, 22 Dec 2016 16:48:58 -0800",
            "(from hpatil@localhost)\tby avluser01.qlc.com (8.14.4/8.14.4/Submit)\n\tid uBN0mwja012928;\tThu, 22 Dec 2016 16:48:58 -0800"
        ],
        "From": "Harish Patil <harish.patil@qlogic.com>",
        "To": "<ferruh.yigit@intel.com>",
        "CC": "Harish Patil <harish.patil@qlogic.com>, <dev@dpdk.org>,\n\t<Dept-EngDPDKDev@cavium.com>",
        "Date": "Thu, 22 Dec 2016 16:48:08 -0800",
        "Message-ID": "<1482454091-12819-4-git-send-email-harish.patil@qlogic.com>",
        "X-Mailer": "git-send-email 1.7.10.3",
        "In-Reply-To": "<91ac9846-530c-2dd3-4bd6-2c3b9143405b@intel.com>",
        "References": "<91ac9846-530c-2dd3-4bd6-2c3b9143405b@intel.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "disclaimer": "bypass",
        "X-Proofpoint-Virus-Version": "vendor=nai engine=5800 definitions=8387\n\tsignatures=670789",
        "X-Proofpoint-Spam-Details": "rule=notspam policy=default score=0\n\tpriorityscore=1501 malwarescore=0\n\tsuspectscore=1 phishscore=0 bulkscore=0 spamscore=0 clxscore=1015\n\tlowpriorityscore=0 impostorscore=0 adultscore=0 classifier=spam\n\tadjust=0\n\treason=mlx scancount=1 engine=8.0.1-1612050000\n\tdefinitions=main-1612230012",
        "Subject": "[dpdk-dev] [PATCH v2 4/7] net/qede: add fastpath support for VXLAN\n\ttunneling",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "- Support HW checksum and RSS offload for VXLAN traffic\n- Identify inner/outer packet_types using lookup table\n- Update documentation\n\nSigned-off-by: Harish Patil <harish.patil@qlogic.com>\n---\n doc/guides/nics/features/qede.ini |    3 +\n doc/guides/nics/qede.rst          |    3 +-\n drivers/net/qede/qede_eth_if.h    |    3 +\n drivers/net/qede/qede_ethdev.c    |   18 +--\n drivers/net/qede/qede_ethdev.h    |    3 +\n drivers/net/qede/qede_main.c      |    2 +\n drivers/net/qede/qede_rxtx.c      |  280 ++++++++++++++++++++++++++-----------\n drivers/net/qede/qede_rxtx.h      |   46 ++++++\n 8 files changed, 269 insertions(+), 89 deletions(-)",
    "diff": "diff --git a/doc/guides/nics/features/qede.ini b/doc/guides/nics/features/qede.ini\nindex 7d75030..8858e5d 100644\n--- a/doc/guides/nics/features/qede.ini\n+++ b/doc/guides/nics/features/qede.ini\n@@ -23,6 +23,9 @@ CRC offload          = Y\n VLAN offload         = Y\n L3 checksum offload  = Y\n L4 checksum offload  = Y\n+Tunnel filter        = Y\n+Inner L3 checksum    = Y\n+Inner L4 checksum    = Y\n Packet type parsing  = Y\n Basic stats          = Y\n Extended stats       = Y\ndiff --git a/doc/guides/nics/qede.rst b/doc/guides/nics/qede.rst\nindex d22ecdd..999df95 100644\n--- a/doc/guides/nics/qede.rst\n+++ b/doc/guides/nics/qede.rst\n@@ -59,12 +59,13 @@ Supported Features\n - MTU change\n - Multiprocess aware\n - Scatter-Gather\n+- VXLAN tunneling offload\n \n Non-supported Features\n ----------------------\n \n - SR-IOV PF\n-- Tunneling offloads\n+- GENEVE and NVGRE Tunneling offloads\n - LRO/TSO\n - NPAR\n \ndiff --git a/drivers/net/qede/qede_eth_if.h b/drivers/net/qede/qede_eth_if.h\nindex 9c0db87..d67b312 100644\n--- a/drivers/net/qede/qede_eth_if.h\n+++ b/drivers/net/qede/qede_eth_if.h\n@@ -42,6 +42,9 @@ struct qed_dev_eth_info {\n \tstruct ether_addr port_mac;\n \tuint16_t num_vlan_filters;\n \tuint32_t num_mac_addrs;\n+\n+\t/* Legacy VF - this affects the datapath */\n+\tbool is_legacy;\n };\n \n struct qed_update_vport_rss_params {\ndiff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c\nindex 7a95560..ec48306 100644\n--- a/drivers/net/qede/qede_ethdev.c\n+++ b/drivers/net/qede/qede_ethdev.c\n@@ -919,14 +919,16 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,\n \t\t.txq_flags = QEDE_TXQ_FLAGS,\n \t};\n \n-\tdev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |\n-\t\t\t\t     DEV_RX_OFFLOAD_IPV4_CKSUM |\n-\t\t\t\t     DEV_RX_OFFLOAD_UDP_CKSUM |\n-\t\t\t\t     DEV_RX_OFFLOAD_TCP_CKSUM);\n-\tdev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |\n-\t\t\t\t     DEV_TX_OFFLOAD_IPV4_CKSUM |\n-\t\t\t\t     DEV_TX_OFFLOAD_UDP_CKSUM |\n-\t\t\t\t     DEV_TX_OFFLOAD_TCP_CKSUM);\n+\tdev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP\t|\n+\t\t\t\t     DEV_RX_OFFLOAD_IPV4_CKSUM\t|\n+\t\t\t\t     DEV_RX_OFFLOAD_UDP_CKSUM\t|\n+\t\t\t\t     DEV_RX_OFFLOAD_TCP_CKSUM\t|\n+\t\t\t\t     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM);\n+\tdev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT\t|\n+\t\t\t\t     DEV_TX_OFFLOAD_IPV4_CKSUM\t|\n+\t\t\t\t     DEV_TX_OFFLOAD_UDP_CKSUM\t|\n+\t\t\t\t     DEV_TX_OFFLOAD_TCP_CKSUM\t|\n+\t\t\t\t     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM);\n \n \tmemset(&link, 0, sizeof(struct qed_link_output));\n \tqdev->ops->common->get_link(edev, &link);\ndiff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h\nindex 6d0616e..d736246 100644\n--- a/drivers/net/qede/qede_ethdev.h\n+++ b/drivers/net/qede/qede_ethdev.h\n@@ -15,6 +15,7 @@\n #include <rte_ether.h>\n #include <rte_ethdev.h>\n #include <rte_dev.h>\n+#include <rte_ip.h>\n \n /* ecore includes */\n #include \"base/bcm_osal.h\"\n@@ -184,6 +185,8 @@ static int qede_rss_reta_update(struct rte_eth_dev *eth_dev,\n \t\t\t\tstruct rte_eth_rss_reta_entry64 *reta_conf,\n \t\t\t\tuint16_t reta_size);\n \n+static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags);\n+\n /* Non-static functions */\n void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf);\n \ndiff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c\nindex efc99ee..b769673 100644\n--- a/drivers/net/qede/qede_main.c\n+++ b/drivers/net/qede/qede_main.c\n@@ -414,6 +414,8 @@ qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)\n \n \t\tecore_vf_get_port_mac(&edev->hwfns[0],\n \t\t\t\t      (uint8_t *)&info->port_mac);\n+\n+\t\tinfo->is_legacy = ecore_vf_get_pre_fp_hsi(&edev->hwfns[0]);\n \t}\n \n \tqed_fill_dev_info(edev, &info->common);\ndiff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c\nindex 2e181c8..828d3cc 100644\n--- a/drivers/net/qede/qede_rxtx.c\n+++ b/drivers/net/qede/qede_rxtx.c\n@@ -701,79 +701,64 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)\n \treturn 0;\n }\n \n-#ifdef ENC_SUPPORTED\n static bool qede_tunn_exist(uint16_t flag)\n {\n \treturn !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<\n \t\t    PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);\n }\n \n-static inline uint8_t qede_check_tunn_csum(uint16_t flag)\n+/*\n+ * qede_check_tunn_csum_l4:\n+ * Returns:\n+ * 1 : If L4 csum is enabled AND if the validation has failed.\n+ * 0 : Otherwise\n+ */\n+static inline uint8_t qede_check_tunn_csum_l4(uint16_t flag)\n {\n-\tuint8_t tcsum = 0;\n-\tuint16_t csum_flag = 0;\n-\n \tif ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<\n \t     PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)\n-\t\tcsum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<\n-\t\t    PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;\n-\n-\tif ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<\n-\t     PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {\n-\t\tcsum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<\n-\t\t    PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;\n-\t\ttcsum = QEDE_TUNN_CSUM_UNNECESSARY;\n-\t}\n-\n-\tcsum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<\n-\t    PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |\n-\t    PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<\n-\t    PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;\n-\n-\tif (csum_flag & flag)\n-\t\treturn QEDE_CSUM_ERROR;\n-\n-\treturn QEDE_CSUM_UNNECESSARY | tcsum;\n-}\n-#else\n-static inline uint8_t qede_tunn_exist(uint16_t flag)\n-{\n-\treturn 0;\n-}\n+\t\treturn !!((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<\n+\t\t\tPARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT) & flag);\n \n-static inline uint8_t qede_check_tunn_csum(uint16_t flag)\n-{\n \treturn 0;\n }\n-#endif\n \n-static inline uint8_t qede_check_notunn_csum(uint16_t flag)\n+static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag)\n {\n-\tuint8_t csum = 0;\n-\tuint16_t csum_flag = 0;\n-\n \tif ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<\n-\t     PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {\n-\t\tcsum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<\n-\t\t    PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;\n-\t\tcsum = QEDE_CSUM_UNNECESSARY;\n-\t}\n-\n-\tcsum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<\n-\t    PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;\n-\n-\tif (csum_flag & flag)\n-\t\treturn QEDE_CSUM_ERROR;\n+\t     PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag)\n+\t\treturn !!((PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<\n+\t\t\t   PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT) & flag);\n \n-\treturn csum;\n+\treturn 0;\n }\n \n-static inline uint8_t qede_check_csum(uint16_t flag)\n+static inline uint8_t\n+qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)\n {\n-\tif (likely(!qede_tunn_exist(flag)))\n-\t\treturn qede_check_notunn_csum(flag);\n-\telse\n-\t\treturn qede_check_tunn_csum(flag);\n+\tstruct ipv4_hdr *ip;\n+\tuint16_t pkt_csum;\n+\tuint16_t calc_csum;\n+\tuint16_t val;\n+\n+\tval = ((PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<\n+\t\tPARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT) & flag);\n+\n+\tif (unlikely(val)) {\n+\t\tm->packet_type = qede_rx_cqe_to_pkt_type(flag);\n+\t\tif (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {\n+\t\t\tip = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,\n+\t\t\t\t\t   sizeof(struct ether_hdr));\n+\t\t\tpkt_csum = ip->hdr_checksum;\n+\t\t\tip->hdr_checksum = 0;\n+\t\t\tcalc_csum = rte_ipv4_cksum(ip);\n+\t\t\tip->hdr_checksum = pkt_csum;\n+\t\t\treturn (calc_csum != pkt_csum);\n+\t\t} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {\n+\t\t\treturn 1;\n+\t\t}\n+\t}\n+\treturn 0;\n }\n \n static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)\n@@ -818,22 +803,93 @@ qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,\n \n static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)\n {\n-\tuint32_t p_type;\n-\t/* TBD - L4 indications needed ? */\n-\tuint16_t protocol = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<\n-\t\t\t      PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) & flags);\n-\n-\t/* protocol = 3 means LLC/SNAP over Ethernet */\n-\tif (unlikely(protocol == 0 || protocol == 3))\n-\t\tp_type = RTE_PTYPE_UNKNOWN;\n-\telse if (protocol == 1)\n-\t\tp_type = RTE_PTYPE_L3_IPV4;\n-\telse if (protocol == 2)\n-\t\tp_type = RTE_PTYPE_L3_IPV6;\n-\n-\treturn RTE_PTYPE_L2_ETHER | p_type;\n+\tuint16_t val;\n+\n+\t/* Lookup table */\n+\tstatic const uint32_t\n+\tptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {\n+\t\t[QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4,\n+\t\t[QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6,\n+\t\t[QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,\n+\t\t[QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,\n+\t\t[QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,\n+\t\t[QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,\n+\t};\n+\n+\t/* Bits (0..3) provides L3/L4 protocol type */\n+\tval = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<\n+\t       PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |\n+\t       (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<\n+\t\tPARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT)) & flags;\n+\n+\tif (val < QEDE_PKT_TYPE_MAX)\n+\t\treturn ptype_lkup_tbl[val] | RTE_PTYPE_L2_ETHER;\n+\telse\n+\t\treturn RTE_PTYPE_UNKNOWN;\n+}\n+\n+static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)\n+{\n+\tuint32_t val;\n+\n+\t/* Lookup table */\n+\tstatic const uint32_t\n+\tptype_tunn_lkup_tbl[QEDE_PKT_TYPE_TUNN_MAX_TYPE] __rte_cache_aligned = {\n+\t\t[QEDE_PKT_TYPE_UNKNOWN] = RTE_PTYPE_UNKNOWN,\n+\t\t[QEDE_PKT_TYPE_TUNN_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,\n+\t\t[QEDE_PKT_TYPE_TUNN_GRE] = RTE_PTYPE_TUNNEL_GRE,\n+\t\t[QEDE_PKT_TYPE_TUNN_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,\n+\t\t[QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE] =\n+\t\t\t\tRTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L2_ETHER,\n+\t\t[QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE] =\n+\t\t\t\tRTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L2_ETHER,\n+\t\t[QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN] =\n+\t\t\t\tRTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L2_ETHER,\n+\t\t[QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE] =\n+\t\t\t\tRTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L2_ETHER,\n+\t\t[QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE] =\n+\t\t\t\tRTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L2_ETHER,\n+\t\t[QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN] =\n+\t\t\t\tRTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L2_ETHER,\n+\t\t[QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE] =\n+\t\t\t\tRTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,\n+\t\t[QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE] =\n+\t\t\t\tRTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,\n+\t\t[QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN] =\n+\t\t\t\tRTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,\n+\t\t[QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE] =\n+\t\t\t\tRTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,\n+\t\t[QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE] =\n+\t\t\t\tRTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,\n+\t\t[QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN] =\n+\t\t\t\tRTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,\n+\t\t[QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE] =\n+\t\t\t\tRTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,\n+\t\t[QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE] =\n+\t\t\t\tRTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,\n+\t\t[QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN] =\n+\t\t\t\tRTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,\n+\t\t[QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE] =\n+\t\t\t\tRTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,\n+\t\t[QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE] =\n+\t\t\t\tRTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,\n+\t\t[QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN] =\n+\t\t\t\tRTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,\n+\t};\n+\n+\t/* Cover bits[4-0] to include tunn_type and next protocol */\n+\tval = ((ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK <<\n+\t\tETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT) |\n+\t\t(ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK <<\n+\t\tETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT)) & flags;\n+\n+\tif (val < QEDE_PKT_TYPE_TUNN_MAX_TYPE)\n+\t\treturn ptype_tunn_lkup_tbl[val];\n+\telse\n+\t\treturn RTE_PTYPE_UNKNOWN;\n }\n \n+\n int qede_process_sg_pkts(void *p_rxq,  struct rte_mbuf *rx_mb,\n \t\t\t int num_segs, uint16_t pkt_len)\n {\n@@ -904,6 +960,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \tuint16_t len, pad, preload_idx, pkt_len, parse_flag;\n \tuint8_t csum_flag, num_segs;\n \tenum rss_hash_type htype;\n+\tuint8_t tunn_parse_flag;\n \tint ret;\n \n \thw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);\n@@ -950,17 +1007,47 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \t\t/* If this is an error packet then drop it */\n \t\tparse_flag =\n \t\t    rte_le_to_cpu_16(cqe->fast_path_regular.pars_flags.flags);\n-\t\tcsum_flag = qede_check_csum(parse_flag);\n-\t\tif (unlikely(csum_flag == QEDE_CSUM_ERROR)) {\n-\t\t\tPMD_RX_LOG(ERR, rxq,\n-\t\t\t\t   \"CQE in CONS = %u has error, flags = 0x%x \"\n-\t\t\t\t   \"dropping incoming packet\\n\",\n-\t\t\t\t   sw_comp_cons, parse_flag);\n-\t\t\trxq->rx_hw_errors++;\n-\t\t\tqede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);\n-\t\t\tgoto next_cqe;\n+\n+\t\trx_mb->ol_flags = 0;\n+\n+\t\tif (qede_tunn_exist(parse_flag)) {\n+\t\t\tPMD_RX_LOG(DEBUG, rxq, \"Rx tunneled packet\\n\");\n+\t\t\tif (unlikely(qede_check_tunn_csum_l4(parse_flag))) {\n+\t\t\t\tPMD_RX_LOG(ERR, rxq,\n+\t\t\t\t\t    \"L4 csum failed, flags = 0x%x\\n\",\n+\t\t\t\t\t    parse_flag);\n+\t\t\t\trxq->rx_hw_errors++;\n+\t\t\t\trx_mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;\n+\t\t\t} else {\n+\t\t\t\ttunn_parse_flag =\n+\t\t\t\t\t\tfp_cqe->tunnel_pars_flags.flags;\n+\t\t\t\trx_mb->packet_type =\n+\t\t\t\t\tqede_rx_cqe_to_tunn_pkt_type(\n+\t\t\t\t\t\t\ttunn_parse_flag);\n+\t\t\t}\n+\t\t} else {\n+\t\t\tPMD_RX_LOG(DEBUG, rxq, \"Rx non-tunneled packet\\n\");\n+\t\t\tif (unlikely(qede_check_notunn_csum_l4(parse_flag))) {\n+\t\t\t\tPMD_RX_LOG(ERR, rxq,\n+\t\t\t\t\t    \"L4 csum failed, flags = 0x%x\\n\",\n+\t\t\t\t\t    parse_flag);\n+\t\t\t\trxq->rx_hw_errors++;\n+\t\t\t\trx_mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;\n+\t\t\t} else if (unlikely(qede_check_notunn_csum_l3(rx_mb,\n+\t\t\t\t\t\t\tparse_flag))) {\n+\t\t\t\tPMD_RX_LOG(ERR, rxq,\n+\t\t\t\t\t   \"IP csum failed, flags = 0x%x\\n\",\n+\t\t\t\t\t   parse_flag);\n+\t\t\t\trxq->rx_hw_errors++;\n+\t\t\t\trx_mb->ol_flags |= PKT_RX_IP_CKSUM_BAD;\n+\t\t\t} else {\n+\t\t\t\trx_mb->packet_type =\n+\t\t\t\t\tqede_rx_cqe_to_pkt_type(parse_flag);\n+\t\t\t}\n \t\t}\n \n+\t\tPMD_RX_LOG(INFO, rxq, \"packet_type 0x%x\\n\", rx_mb->packet_type);\n+\n \t\tif (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {\n \t\t\tPMD_RX_LOG(ERR, rxq,\n \t\t\t\t   \"New buffer allocation failed,\"\n@@ -995,14 +1082,12 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \t\tpreload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);\n \t\trte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);\n \n-\t\t/* Update MBUF fields */\n-\t\trx_mb->ol_flags = 0;\n+\t\t/* Update rest of the MBUF fields */\n \t\trx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM;\n \t\trx_mb->nb_segs = fp_cqe->bd_num;\n \t\trx_mb->data_len = len;\n \t\trx_mb->pkt_len = fp_cqe->pkt_len;\n \t\trx_mb->port = rxq->port_id;\n-\t\trx_mb->packet_type = qede_rx_cqe_to_pkt_type(parse_flag);\n \n \t\thtype = (uint8_t)GET_FIELD(fp_cqe->bitfields,\n \t\t\t\tETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);\n@@ -1206,8 +1291,39 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n \t\tQEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),\n \t\t\t\t     mbuf->pkt_len);\n \n+\t\tif (RTE_ETH_IS_TUNNEL_PKT(mbuf->packet_type)) {\n+\t\t\tPMD_TX_LOG(INFO, txq, \"Tx tunnel packet\\n\");\n+\t\t\t/* First indicate its a tunnel pkt */\n+\t\t\tbd1->data.bd_flags.bitfields |=\n+\t\t\t\tETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<\n+\t\t\t\tETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;\n+\n+\t\t\t/* Legacy FW had flipped behavior in regard to this bit\n+\t\t\t * i.e. it needed to set to prevent FW from touching\n+\t\t\t * encapsulated packets when it didn't need to.\n+\t\t\t */\n+\t\t\tif (unlikely(txq->is_legacy))\n+\t\t\t\tbd1->data.bitfields ^=\n+\t\t\t\t\t1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;\n+\n+\t\t\t/* Outer IP checksum offload */\n+\t\t\tif (mbuf->ol_flags & PKT_TX_OUTER_IP_CKSUM) {\n+\t\t\t\tPMD_TX_LOG(INFO, txq, \"OuterIP csum offload\\n\");\n+\t\t\t\tbd1->data.bd_flags.bitfields |=\n+\t\t\t\t\tETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<\n+\t\t\t\t\tETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;\n+\t\t\t}\n+\n+\t\t\t/* Outer UDP checksum offload */\n+\t\t\tbd1->data.bd_flags.bitfields |=\n+\t\t\t\tETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<\n+\t\t\t\tETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;\n+\t\t}\n+\n \t\t/* Descriptor based VLAN insertion */\n \t\tif (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {\n+\t\t\tPMD_TX_LOG(INFO, txq, \"Insert VLAN 0x%x\\n\",\n+\t\t\t\t   mbuf->vlan_tci);\n \t\t\tbd1->data.vlan = rte_cpu_to_le_16(mbuf->vlan_tci);\n \t\t\tbd1->data.bd_flags.bitfields |=\n \t\t\t    1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;\n@@ -1215,12 +1331,14 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n \n \t\t/* Offload the IP checksum in the hardware */\n \t\tif (mbuf->ol_flags & PKT_TX_IP_CKSUM) {\n+\t\t\tPMD_TX_LOG(INFO, txq, \"IP csum offload\\n\");\n \t\t\tbd1->data.bd_flags.bitfields |=\n \t\t\t    1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;\n \t\t}\n \n \t\t/* L4 checksum offload (tcp or udp) */\n \t\tif (mbuf->ol_flags & (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {\n+\t\t\tPMD_TX_LOG(INFO, txq, \"L4 csum offload\\n\");\n \t\t\tbd1->data.bd_flags.bitfields |=\n \t\t\t    1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;\n \t\t\t/* IPv6 + extn. -> later */\n@@ -1278,6 +1396,8 @@ static void qede_init_fp_queue(struct rte_eth_dev *eth_dev)\n \t\t\t\tfp->txqs[tc] =\n \t\t\t\t\teth_dev->data->tx_queues[txq_index];\n \t\t\t\tfp->txqs[tc]->queue_id = txq_index;\n+\t\t\t\tif (qdev->dev_info.is_legacy)\n+\t\t\t\t\tfp->txqs[tc]->is_legacy = true;\n \t\t\t}\n \t\t\ttxq++;\n \t\t}\ndiff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h\nindex 4a50afe..3e1e977 100644\n--- a/drivers/net/qede/qede_rxtx.h\n+++ b/drivers/net/qede/qede_rxtx.h\n@@ -74,6 +74,51 @@\n \n #define for_each_queue(i) for (i = 0; i < qdev->num_queues; i++)\n \n+\n+/* Macros for non-tunnel packet types lkup table */\n+#define QEDE_PKT_TYPE_UNKNOWN\t\t\t\t0x0\n+#define QEDE_PKT_TYPE_MAX\t\t\t\t0xf\n+#define QEDE_PKT_TYPE_IPV4\t\t\t\t0x1\n+#define QEDE_PKT_TYPE_IPV6\t\t\t\t0x2\n+#define QEDE_PKT_TYPE_IPV4_TCP\t\t\t\t0x5\n+#define QEDE_PKT_TYPE_IPV6_TCP\t\t\t\t0x6\n+#define QEDE_PKT_TYPE_IPV4_UDP\t\t\t\t0x9\n+#define QEDE_PKT_TYPE_IPV6_UDP\t\t\t\t0xa\n+\n+/* Macros for tunneled packets with next protocol lkup table */\n+#define QEDE_PKT_TYPE_TUNN_GENEVE\t\t\t0x1\n+#define QEDE_PKT_TYPE_TUNN_GRE\t\t\t\t0x2\n+#define QEDE_PKT_TYPE_TUNN_VXLAN\t\t\t0x3\n+\n+/* Bit 2 is don't care bit */\n+#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE\t0x9\n+#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE\t0xa\n+#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN\t0xb\n+\n+#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE\t0xd\n+#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE\t\t0xe\n+#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN\t0xf\n+\n+\n+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE    0x11\n+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE       0x12\n+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN     0x13\n+\n+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE\t0x15\n+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE\t0x16\n+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN\t0x17\n+\n+\n+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE    0x19\n+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE       0x1a\n+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN     0x1b\n+\n+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE      0x1d\n+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE\t\t0x1e\n+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN       0x1f\n+\n+#define QEDE_PKT_TYPE_TUNN_MAX_TYPE\t\t\t0x20 /* 2^5 */\n+\n /*\n  * RX BD descriptor ring\n  */\n@@ -133,6 +178,7 @@ struct qede_tx_queue {\n \tvolatile union db_prod tx_db;\n \tuint16_t port_id;\n \tuint64_t xmit_pkts;\n+\tbool is_legacy;\n \tstruct qede_dev *qdev;\n };\n \n",
    "prefixes": [
        "dpdk-dev",
        "v2",
        "4/7"
    ]
}