get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/11398/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 11398,
    "url": "http://patches.dpdk.org/api/patches/11398/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1457617548-26252-6-git-send-email-rasesh.mody@qlogic.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1457617548-26252-6-git-send-email-rasesh.mody@qlogic.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1457617548-26252-6-git-send-email-rasesh.mody@qlogic.com",
    "date": "2016-03-10T13:45:43",
    "name": "[dpdk-dev,v2,05/10] qede: Add core driver",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "1826fb67e2b5b9966c164dbab29c43702d0259ea",
    "submitter": {
        "id": 325,
        "url": "http://patches.dpdk.org/api/people/325/?format=api",
        "name": "Rasesh Mody",
        "email": "rasesh.mody@qlogic.com"
    },
    "delegate": {
        "id": 10,
        "url": "http://patches.dpdk.org/api/users/10/?format=api",
        "username": "bruce",
        "first_name": "Bruce",
        "last_name": "Richardson",
        "email": "bruce.richardson@intel.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1457617548-26252-6-git-send-email-rasesh.mody@qlogic.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/11398/comments/",
    "check": "pending",
    "checks": "http://patches.dpdk.org/api/patches/11398/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id 149AE3977;\n\tThu, 10 Mar 2016 14:46:30 +0100 (CET)",
            "from mx0a-0016ce01.pphosted.com (mx0a-0016ce01.pphosted.com\n\t[67.231.148.157]) by dpdk.org (Postfix) with ESMTP id 5E928388F\n\tfor <dev@dpdk.org>; Thu, 10 Mar 2016 14:46:27 +0100 (CET)",
            "from pps.filterd (m0045602.ppops.net [127.0.0.1])\n\tby mx0a-0016ce01.pphosted.com (8.16.0.11/8.16.0.11) with SMTP id\n\tu2ADgpJr010949 for <dev@dpdk.org>; Thu, 10 Mar 2016 05:46:26 -0800",
            "from avcashub1.qlogic.com ([198.186.0.117])\n\tby mx0a-0016ce01.pphosted.com with ESMTP id 21fyg1s8k1-1\n\t(version=TLSv1 cipher=AES128-SHA bits=128 verify=NOT)\n\tfor <dev@dpdk.org>; Thu, 10 Mar 2016 05:46:25 -0800",
            "from avluser05.qlc.com (10.1.113.115) by qlc.com (10.1.4.192) with\n\tMicrosoft SMTP Server id 14.3.235.1;\n\tThu, 10 Mar 2016 05:46:24 -0800",
            "(from rmody@localhost)\tby avluser05.qlc.com (8.14.4/8.14.4/Submit)\n\tid u2ADkOhT026346;\tThu, 10 Mar 2016 05:46:24 -0800"
        ],
        "X-Authentication-Warning": "avluser05.qlc.com: rmody set sender to\n\trasesh.mody@qlogic.com using -f",
        "From": "Rasesh Mody <rasesh.mody@qlogic.com>",
        "To": "<dev@dpdk.org>",
        "Date": "Thu, 10 Mar 2016 05:45:43 -0800",
        "Message-ID": "<1457617548-26252-6-git-send-email-rasesh.mody@qlogic.com>",
        "X-Mailer": "git-send-email 1.7.10.3",
        "In-Reply-To": "<1457617548-26252-1-git-send-email-rasesh.mody@qlogic.com>",
        "References": "<1457617548-26252-1-git-send-email-rasesh.mody@qlogic.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "disclaimer": "bypass",
        "X-Proofpoint-Virus-Version": "vendor=nai engine=5800 definitions=8099\n\tsignatures=670697",
        "X-Proofpoint-Spam-Details": "rule=notspam policy=default score=0 suspectscore=4\n\tmalwarescore=0\n\tphishscore=0 bulkscore=0 spamscore=0 clxscore=1015 lowpriorityscore=0\n\tadultscore=0 classifier=spam adjust=0 reason=mlx scancount=1\n\tengine=8.0.1-1601100000 definitions=main-1603100226",
        "Cc": "sony.chacko@qlogic.com",
        "Subject": "[dpdk-dev] [PATCH v2 05/10] qede: Add core driver",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Signed-off-by: Harish Patil <harish.patil@qlogic.com>\nSigned-off-by: Rasesh Mody <rasesh.mody@qlogic.com>\nSigned-off-by: Sony Chacko <sony.chacko@qlogic.com>\n---\n drivers/net/qede/Makefile                 |   90 +++\n drivers/net/qede/qede_eth_if.h            |  176 +++++\n drivers/net/qede/qede_ethdev.c            |  957 +++++++++++++++++++++++\n drivers/net/qede/qede_ethdev.h            |  155 ++++\n drivers/net/qede/qede_if.h                |  155 ++++\n drivers/net/qede/qede_logs.h              |   93 +++\n drivers/net/qede/qede_main.c              |  548 ++++++++++++++\n drivers/net/qede/qede_rxtx.c              | 1172 +++++++++++++++++++++++++++++\n drivers/net/qede/qede_rxtx.h              |  187 +++++\n drivers/net/qede/rte_pmd_qede_version.map |    4 +\n 10 files changed, 3537 insertions(+)\n create mode 100644 drivers/net/qede/Makefile\n create mode 100644 drivers/net/qede/qede_eth_if.h\n create mode 100644 drivers/net/qede/qede_ethdev.c\n create mode 100644 drivers/net/qede/qede_ethdev.h\n create mode 100644 drivers/net/qede/qede_if.h\n create mode 100644 drivers/net/qede/qede_logs.h\n create mode 100644 drivers/net/qede/qede_main.c\n create mode 100644 drivers/net/qede/qede_rxtx.c\n create mode 100644 drivers/net/qede/qede_rxtx.h\n create mode 100644 drivers/net/qede/rte_pmd_qede_version.map",
    "diff": "diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile\nnew file mode 100644\nindex 0000000..efaefb2\n--- /dev/null\n+++ b/drivers/net/qede/Makefile\n@@ -0,0 +1,90 @@\n+#    Copyright (c) 2016 QLogic Corporation.\n+#    All rights reserved.\n+#    www.qlogic.com\n+#\n+#    See LICENSE.qede_pmd for copyright and licensing details.\n+\n+include $(RTE_SDK)/mk/rte.vars.mk\n+\n+#\n+# library name\n+#\n+LIB = librte_pmd_qede.a\n+\n+CFLAGS += -O3\n+CFLAGS += $(WERROR_FLAGS)\n+\n+EXPORT_MAP := rte_pmd_qede_version.map\n+\n+LIBABIVER := 1\n+\n+#\n+#OS\n+#\n+OS_TYPE := $(shell uname -s)\n+\n+#\n+# CFLAGS\n+#\n+CFLAGS_ECORE_DRIVER = -Wno-unused-parameter\n+CFLAGS_ECORE_DRIVER += -Wno-unused-value\n+CFLAGS_ECORE_DRIVER += -Wno-sign-compare\n+CFLAGS_ECORE_DRIVER += -Wno-missing-prototypes\n+CFLAGS_ECORE_DRIVER += -Wno-cast-qual\n+CFLAGS_ECORE_DRIVER += -Wno-unused-function\n+CFLAGS_ECORE_DRIVER += -Wno-unused-variable\n+CFLAGS_ECORE_DRIVER += -Wno-strict-aliasing\n+CFLAGS_ECORE_DRIVER += -Wno-missing-prototypes\n+CFLAGS_ECORE_DRIVER += -Wno-format-nonliteral\n+ifeq ($(OS_TYPE),Linux)\n+CFLAGS_ECORE_DRIVER += -Wno-shift-negative-value\n+endif\n+\n+ifneq (,$(filter gcc gcc48,$(CC)))\n+CFLAGS_ECORE_DRIVER += -Wno-unused-but-set-variable\n+CFLAGS_ECORE_DRIVER += -Wno-missing-declarations\n+CFLAGS_ECORE_DRIVER += -Wno-maybe-uninitialized\n+CFLAGS_ECORE_DRIVER += -Wno-strict-prototypes\n+else ifeq ($(CC), clang)\n+CFLAGS_ECORE_DRIVER += -Wno-format-extra-args\n+CFLAGS_ECORE_DRIVER += -Wno-visibility\n+CFLAGS_ECORE_DRIVER += -Wno-empty-body\n+CFLAGS_ECORE_DRIVER += -Wno-invalid-source-encoding\n+CFLAGS_ECORE_DRIVER += -Wno-sometimes-uninitialized\n+CFLAGS_ECORE_DRIVER += -Wno-pointer-bool-conversion\n+else\n+#icc flags\n+endif\n+\n+#\n+# Add extra flags for base ecore driver files\n+# to disable warnings in them\n+#\n+#\n+ECORE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))\n+$(foreach obj, $(ECORE_DRIVER_OBJS), $(eval CFLAGS+=$(CFLAGS_ECORE_DRIVER)))\n+\n+#\n+# all source are stored in SRCS-y\n+#\n+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_dev.c\n+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_hw.c\n+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_cxt.c\n+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_sp_commands.c\n+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_init_fw_funcs.c\n+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_spq.c\n+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_init_ops.c\n+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_mcp.c\n+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_int.c\n+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/bcm_osal.c\n+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_ethdev.c\n+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_main.c\n+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_rxtx.c\n+\n+# dependent libs:\n+DEPDIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += lib/librte_eal lib/librte_ether\n+DEPDIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += lib/librte_mempool lib/librte_mbuf\n+DEPDIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += lib/librte_net lib/librte_malloc\n+\n+\n+include $(RTE_SDK)/mk/rte.lib.mk\ndiff --git a/drivers/net/qede/qede_eth_if.h b/drivers/net/qede/qede_eth_if.h\nnew file mode 100644\nindex 0000000..47b169d\n--- /dev/null\n+++ b/drivers/net/qede/qede_eth_if.h\n@@ -0,0 +1,176 @@\n+/*\n+ * Copyright (c) 2016 QLogic Corporation.\n+ * All rights reserved.\n+ * www.qlogic.com\n+ *\n+ * See LICENSE.qede_pmd for copyright and licensing details.\n+ */\n+\n+#ifndef _QEDE_ETH_IF_H\n+#define _QEDE_ETH_IF_H\n+\n+#include \"qede_if.h\"\n+\n+/*forward decl */\n+struct eth_slow_path_rx_cqe;\n+\n+#define INIT_STRUCT_FIELD(field, value) .field = value\n+\n+#define QED_ETH_INTERFACE_VERSION       609\n+\n+enum qed_filter_rx_mode_type {\n+\tQED_FILTER_RX_MODE_TYPE_REGULAR,\n+\tQED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,\n+\tQED_FILTER_RX_MODE_TYPE_PROMISC,\n+};\n+\n+enum qed_filter_xcast_params_type {\n+\tQED_FILTER_XCAST_TYPE_ADD,\n+\tQED_FILTER_XCAST_TYPE_DEL,\n+\tQED_FILTER_XCAST_TYPE_REPLACE,\n+};\n+\n+enum qed_filter_type {\n+\tQED_FILTER_TYPE_UCAST,\n+\tQED_FILTER_TYPE_MCAST,\n+\tQED_FILTER_TYPE_RX_MODE,\n+\tQED_MAX_FILTER_TYPES,\n+};\n+\n+struct qed_dev_eth_info {\n+\tstruct qed_dev_info common;\n+\n+\tuint8_t num_queues;\n+\tuint8_t num_tc;\n+\n+\tstruct ether_addr port_mac;\n+\tuint8_t num_vlan_filters;\n+};\n+\n+struct qed_update_vport_rss_params {\n+\tuint16_t rss_ind_table[128];\n+\tuint32_t rss_key[10];\n+};\n+\n+struct qed_stop_rxq_params {\n+\tuint8_t rss_id;\n+\tuint8_t rx_queue_id;\n+\tuint8_t vport_id;\n+\tbool eq_completion_only;\n+};\n+\n+struct qed_update_vport_params {\n+\tuint8_t vport_id;\n+\tuint8_t update_vport_active_flg;\n+\tuint8_t vport_active_flg;\n+\tuint8_t update_inner_vlan_removal_flg;\n+\tuint8_t inner_vlan_removal_flg;\n+\tuint8_t update_tx_switching_flg;\n+\tuint8_t tx_switching_flg;\n+\tuint8_t update_accept_any_vlan_flg;\n+\tuint8_t accept_any_vlan;\n+\tuint8_t update_rss_flg;\n+\tstruct qed_update_vport_rss_params rss_params;\n+};\n+\n+struct qed_start_vport_params {\n+\tbool remove_inner_vlan;\n+\tbool handle_ptp_pkts;\n+\tbool gro_enable;\n+\tbool drop_ttl0;\n+\tuint8_t vport_id;\n+\tuint16_t mtu;\n+\tbool clear_stats;\n+};\n+\n+struct qed_stop_txq_params {\n+\tuint8_t rss_id;\n+\tuint8_t tx_queue_id;\n+};\n+\n+struct qed_filter_ucast_params {\n+\tenum qed_filter_xcast_params_type type;\n+\tuint8_t vlan_valid;\n+\tuint16_t vlan;\n+\tuint8_t mac_valid;\n+\tunsigned char mac[ETHER_ADDR_LEN];\n+} __attribute__ ((__packed__));\n+\n+struct qed_filter_mcast_params {\n+\tenum qed_filter_xcast_params_type type;\n+\tuint8_t num;\n+\tunsigned char mac[64][ETHER_ADDR_LEN];\n+};\n+\n+union qed_filter_type_params {\n+\tenum qed_filter_rx_mode_type accept_flags;\n+\tstruct qed_filter_ucast_params ucast;\n+\tstruct qed_filter_mcast_params mcast;\n+};\n+\n+struct qed_filter_params {\n+\tenum qed_filter_type type;\n+\tunion qed_filter_type_params filter;\n+};\n+\n+struct qed_eth_ops {\n+\tconst struct qed_common_ops *common;\n+\n+\tint (*fill_dev_info)(struct ecore_dev *edev,\n+\t\t\t     struct qed_dev_eth_info *info);\n+\n+\tint (*vport_start)(struct ecore_dev *edev,\n+\t\t\t   struct qed_start_vport_params *params);\n+\n+\tint (*vport_stop)(struct ecore_dev *edev, uint8_t vport_id);\n+\n+\tint (*vport_update)(struct ecore_dev *edev,\n+\t\t\t    struct qed_update_vport_params *params);\n+\n+\tint (*q_rx_start)(struct ecore_dev *cdev,\n+\t\t\t  uint8_t rss_id, uint8_t rx_queue_id,\n+\t\t\t  uint8_t vport_id, uint16_t sb,\n+\t\t\t  uint8_t sb_index, uint16_t bd_max_bytes,\n+\t\t\t  dma_addr_t bd_chain_phys_addr,\n+\t\t\t  dma_addr_t cqe_pbl_addr,\n+\t\t\t  uint16_t cqe_pbl_size, void OSAL_IOMEM**pp_prod);\n+\n+\tint (*q_rx_stop)(struct ecore_dev *edev,\n+\t\t\t struct qed_stop_rxq_params *params);\n+\n+\tint (*q_tx_start)(struct ecore_dev *edev,\n+\t\t\t  uint8_t rss_id, uint16_t tx_queue_id,\n+\t\t\t  uint8_t vport_id, uint16_t sb,\n+\t\t\t  uint8_t sb_index,\n+\t\t\t  dma_addr_t pbl_addr,\n+\t\t\t  uint16_t pbl_size, void OSAL_IOMEM**pp_doorbell);\n+\n+\tint (*q_tx_stop)(struct ecore_dev *edev,\n+\t\t\t struct qed_stop_txq_params *params);\n+\n+\tint (*eth_cqe_completion)(struct ecore_dev *edev,\n+\t\t\t\t  uint8_t rss_id,\n+\t\t\t\t  struct eth_slow_path_rx_cqe *cqe);\n+\n+\tint (*fastpath_stop)(struct ecore_dev *edev);\n+\n+\tvoid (*get_vport_stats)(struct ecore_dev *edev,\n+\t\t\t\tstruct ecore_eth_stats *stats);\n+\n+\tint (*filter_config)(struct ecore_dev *edev,\n+\t\t\t     struct qed_filter_params *params);\n+};\n+\n+/* externs */\n+\n+extern const struct qed_common_ops qed_common_ops_pass;\n+\n+extern int qed_fill_eth_dev_info(struct ecore_dev *edev,\n+\t\t\t\t struct qed_dev_eth_info *info);\n+\n+void qed_put_eth_ops(void);\n+\n+int qed_configure_filter_rx_mode(struct ecore_dev *edev,\n+\t\t\t\t enum qed_filter_rx_mode_type type);\n+\n+#endif /* _QEDE_ETH_IF_H */\ndiff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c\nnew file mode 100644\nindex 0000000..d5f7019\n--- /dev/null\n+++ b/drivers/net/qede/qede_ethdev.c\n@@ -0,0 +1,957 @@\n+/*\n+ * Copyright (c) 2016 QLogic Corporation.\n+ * All rights reserved.\n+ * www.qlogic.com\n+ *\n+ * See LICENSE.qede_pmd for copyright and licensing details.\n+ */\n+\n+#include \"qede_ethdev.h\"\n+\n+/* Globals */\n+static const struct qed_eth_ops *qed_ops;\n+static const char *drivername = \"qede pmd\";\n+\n+static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)\n+{\n+\tecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));\n+}\n+\n+static void\n+qede_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param)\n+{\n+\tstruct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;\n+\tstruct qede_dev *qdev = eth_dev->data->dev_private;\n+\tstruct ecore_dev *edev = &qdev->edev;\n+\n+\tqede_interrupt_action(ECORE_LEADING_HWFN(edev));\n+\tif (rte_intr_enable(&(eth_dev->pci_dev->intr_handle)))\n+\t\tDP_ERR(edev, \"rte_intr_enable failed\\n\");\n+}\n+\n+static void\n+qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)\n+{\n+\trte_memcpy(&qdev->dev_info, info, sizeof(*info));\n+\tqdev->num_tc = qdev->dev_info.num_tc;\n+\tqdev->ops = qed_ops;\n+}\n+\n+static void qede_print_adapter_info(struct qede_dev *qdev)\n+{\n+\tstruct ecore_dev *edev = &qdev->edev;\n+\tstruct qed_dev_info *info = &qdev->dev_info.common;\n+\tchar ver_str[QED_DRV_VER_STR_SIZE] = { 0 };\n+\n+\tRTE_LOG(INFO, PMD,\n+\t\t  \" Chip details : %s%d\\n\",\n+\t\t  ECORE_IS_BB(edev) ? \"BB\" : \"AH\",\n+\t\t  CHIP_REV_IS_A0(edev) ? 0 : 1);\n+\n+\tsprintf(ver_str, \"%s %s_%d.%d.%d\", QEDE_PMD_VER_PREFIX,\n+\t\tedev->ver_str, QEDE_PMD_VERSION_MAJOR,\n+\t\tQEDE_PMD_VERSION_MINOR, QEDE_PMD_VERSION_PATCH);\n+\tstrcpy(qdev->drv_ver, ver_str);\n+\tRTE_LOG(INFO, PMD, \" Driver version : %s\\n\", ver_str);\n+\n+\tver_str[0] = '\\0';\n+\tsprintf(ver_str, \"%d.%d.%d.%d\", info->fw_major, info->fw_minor,\n+\t\tinfo->fw_rev, info->fw_eng);\n+\tRTE_LOG(INFO, PMD, \" Firmware version : %s\\n\", ver_str);\n+\n+\tver_str[0] = '\\0';\n+\tsprintf(ver_str, \"%d.%d.%d.%d\",\n+\t\t(info->mfw_rev >> 24) & 0xff,\n+\t\t(info->mfw_rev >> 16) & 0xff,\n+\t\t(info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);\n+\tRTE_LOG(INFO, PMD, \" Management firmware version : %s\\n\", ver_str);\n+\n+\tRTE_LOG(INFO, PMD, \" Firmware file : %s\\n\", QEDE_FW_FILE_NAME);\n+}\n+\n+static int\n+qede_set_ucast_rx_mac(struct qede_dev *qdev,\n+\t\t      enum qed_filter_xcast_params_type opcode,\n+\t\t      uint8_t mac[ETHER_ADDR_LEN])\n+{\n+\tstruct ecore_dev *edev = &qdev->edev;\n+\tstruct qed_filter_params filter_cmd;\n+\n+\tmemset(&filter_cmd, 0, sizeof(filter_cmd));\n+\tfilter_cmd.type = QED_FILTER_TYPE_UCAST;\n+\tfilter_cmd.filter.ucast.type = opcode;\n+\tfilter_cmd.filter.ucast.mac_valid = 1;\n+\trte_memcpy(&filter_cmd.filter.ucast.mac[0], &mac[0], ETHER_ADDR_LEN);\n+\treturn qdev->ops->filter_config(edev, &filter_cmd);\n+}\n+\n+static void\n+qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,\n+\t\t  __rte_unused uint32_t index, __rte_unused uint32_t pool)\n+{\n+\tstruct qede_dev *qdev = eth_dev->data->dev_private;\n+\tstruct ecore_dev *edev = &qdev->edev;\n+\tint rc;\n+\n+\tPMD_INIT_FUNC_TRACE(edev);\n+\n+\tDP_NOTICE(edev, false, \"%s\\n\", __func__);\n+\n+\t/* Skip adding macaddr if promiscuous mode is set */\n+\tif (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) {\n+\t\tDP_NOTICE(edev, false, \"Port is in promiscuous mode\\n\");\n+\t\treturn;\n+\t}\n+\n+\t/* Add MAC filters according to the unicast secondary macs */\n+\trc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,\n+\t\t\t\t   mac_addr->addr_bytes);\n+\tif (rc)\n+\t\tDP_ERR(edev, \"Unable to add filter\\n\");\n+}\n+\n+static void\n+qede_mac_addr_remove(__rte_unused struct rte_eth_dev *eth_dev,\n+\t\t     __rte_unused uint32_t index)\n+{\n+\tstruct qede_dev *qdev = eth_dev->data->dev_private;\n+\tstruct ecore_dev *edev = &qdev->edev;\n+\n+\t/* TBD: Not implemented currently because DPDK does not provide\n+\t * macaddr and instead just passes the index. So pmd needs to\n+\t * maintain index mapping to macaddr.\n+\t */\n+\tDP_NOTICE(edev, false, \"%s: Unsupported operation\\n\", __func__);\n+}\n+\n+static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)\n+{\n+\tstruct ecore_dev *edev = &qdev->edev;\n+\tstruct qed_update_vport_params params;\n+\tint rc;\n+\n+\t/* Proceed only if action actually needs to be performed */\n+\tif (qdev->accept_any_vlan == action)\n+\t\treturn;\n+\n+\tmemset(&params, 0, sizeof(params));\n+\n+\tparams.vport_id = 0;\n+\tparams.accept_any_vlan = action;\n+\tparams.update_accept_any_vlan_flg = 1;\n+\n+\trc = qdev->ops->vport_update(edev, &params);\n+\tif (rc) {\n+\t\tDP_ERR(edev, \"Failed to %s accept-any-vlan\\n\",\n+\t\t       action ? \"enable\" : \"disable\");\n+\t} else {\n+\t\tDP_INFO(edev, \"%s accept-any-vlan\\n\",\n+\t\t\taction ? \"enabled\" : \"disabled\");\n+\t\tqdev->accept_any_vlan = action;\n+\t}\n+}\n+\n+void qede_config_rx_mode(struct rte_eth_dev *eth_dev)\n+{\n+\tstruct qede_dev *qdev = eth_dev->data->dev_private;\n+\tstruct ecore_dev *edev = &qdev->edev;\n+\t/* TODO: - QED_FILTER_TYPE_UCAST */\n+\tenum qed_filter_rx_mode_type accept_flags =\n+\t\t\tQED_FILTER_RX_MODE_TYPE_REGULAR;\n+\tstruct qed_filter_params rx_mode;\n+\tint rc;\n+\n+\t/* Configure the struct for the Rx mode */\n+\tmemset(&rx_mode, 0, sizeof(struct qed_filter_params));\n+\trx_mode.type = QED_FILTER_TYPE_RX_MODE;\n+\n+\trc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_REPLACE,\n+\t\t\t\t   eth_dev->data->mac_addrs[0].addr_bytes);\n+\tif (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) {\n+\t\taccept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;\n+\t} else {\n+\t\trc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,\n+\t\t\t\t\t   eth_dev->data->\n+\t\t\t\t\t   mac_addrs[0].addr_bytes);\n+\t\tif (rc) {\n+\t\t\tDP_ERR(edev, \"Unable to add filter\\n\");\n+\t\t\treturn;\n+\t\t}\n+\t}\n+\n+\t/* take care of VLAN mode */\n+\tif (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) {\n+\t\tqede_config_accept_any_vlan(qdev, true);\n+\t} else if (!qdev->non_configured_vlans) {\n+\t\t/* If we dont have non-configured VLANs and promisc\n+\t\t * is not set, then check if we need to disable\n+\t\t * accept_any_vlan mode.\n+\t\t * Because in this case, accept_any_vlan mode is set\n+\t\t * as part of IFF_RPOMISC flag handling.\n+\t\t */\n+\t\tqede_config_accept_any_vlan(qdev, false);\n+\t}\n+\trx_mode.filter.accept_flags = accept_flags;\n+\t(void)qdev->ops->filter_config(edev, &rx_mode);\n+}\n+\n+static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)\n+{\n+\tstruct qed_update_vport_params vport_update_params;\n+\tstruct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);\n+\tstruct ecore_dev *edev = QEDE_INIT_EDEV(qdev);\n+\tint rc;\n+\n+\tmemset(&vport_update_params, 0, sizeof(vport_update_params));\n+\tvport_update_params.vport_id = 0;\n+\tvport_update_params.update_inner_vlan_removal_flg = 1;\n+\tvport_update_params.inner_vlan_removal_flg = set_stripping;\n+\trc = qdev->ops->vport_update(edev, &vport_update_params);\n+\tif (rc) {\n+\t\tDP_ERR(edev, \"Update V-PORT failed %d\\n\", rc);\n+\t\treturn rc;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)\n+{\n+\tstruct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);\n+\tstruct ecore_dev *edev = QEDE_INIT_EDEV(qdev);\n+\n+\tif (mask & ETH_VLAN_STRIP_MASK) {\n+\t\tif (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)\n+\t\t\t(void)qede_vlan_stripping(eth_dev, 1);\n+\t\telse\n+\t\t\t(void)qede_vlan_stripping(eth_dev, 0);\n+\t}\n+\n+\tDP_INFO(edev, \"vlan offload mask %d vlan-strip %d\\n\",\n+\t\tmask, eth_dev->data->dev_conf.rxmode.hw_vlan_strip);\n+}\n+\n+static int qede_set_ucast_rx_vlan(struct qede_dev *qdev,\n+\t\t\t\t  enum qed_filter_xcast_params_type opcode,\n+\t\t\t\t  uint16_t vid)\n+{\n+\tstruct qed_filter_params filter_cmd;\n+\tstruct ecore_dev *edev = QEDE_INIT_EDEV(qdev);\n+\n+\tmemset(&filter_cmd, 0, sizeof(filter_cmd));\n+\tfilter_cmd.type = QED_FILTER_TYPE_UCAST;\n+\tfilter_cmd.filter.ucast.type = opcode;\n+\tfilter_cmd.filter.ucast.vlan_valid = 1;\n+\tfilter_cmd.filter.ucast.vlan = vid;\n+\n+\treturn qdev->ops->filter_config(edev, &filter_cmd);\n+}\n+\n+static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,\n+\t\t\t\tuint16_t vlan_id, int on)\n+{\n+\tstruct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);\n+\tstruct ecore_dev *edev = QEDE_INIT_EDEV(qdev);\n+\tstruct qed_dev_eth_info *dev_info = &qdev->dev_info;\n+\tint rc;\n+\n+\tif (vlan_id != 0 &&\n+\t    qdev->configured_vlans == dev_info->num_vlan_filters) {\n+\t\tDP_NOTICE(edev, false, \"Reached max VLAN filter limit\"\n+\t\t\t\t     \" enabling accept_any_vlan\\n\");\n+\t\tqede_config_accept_any_vlan(qdev, true);\n+\t\treturn 0;\n+\t}\n+\n+\tif (on) {\n+\t\trc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_ADD,\n+\t\t\t\t\t    vlan_id);\n+\t\tif (rc)\n+\t\t\tDP_ERR(edev, \"Failed to add VLAN %u rc %d\\n\", vlan_id,\n+\t\t\t       rc);\n+\t\telse\n+\t\t\tif (vlan_id != 0)\n+\t\t\t\tqdev->configured_vlans++;\n+\t} else {\n+\t\trc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_DEL,\n+\t\t\t\t\t    vlan_id);\n+\t\tif (rc)\n+\t\t\tDP_ERR(edev, \"Failed to delete VLAN %u rc %d\\n\",\n+\t\t\t       vlan_id, rc);\n+\t\telse\n+\t\t\tif (vlan_id != 0)\n+\t\t\t\tqdev->configured_vlans--;\n+\t}\n+\n+\tDP_INFO(edev, \"vlan_id %u on %u rc %d configured_vlans %u\\n\",\n+\t\t\tvlan_id, on, rc, qdev->configured_vlans);\n+\n+\treturn rc;\n+}\n+\n+static int qede_dev_configure(struct rte_eth_dev *eth_dev)\n+{\n+\tstruct qede_dev *qdev = eth_dev->data->dev_private;\n+\tstruct ecore_dev *edev = &qdev->edev;\n+\tstruct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;\n+\tint rc = 0;\n+\n+\tPMD_INIT_FUNC_TRACE(edev);\n+\n+\tif (eth_dev->data->nb_rx_queues != eth_dev->data->nb_tx_queues) {\n+\t\tDP_NOTICE(edev, false,\n+\t\t\t  \"Unequal number of rx/tx queues \"\n+\t\t\t  \"is not supported RX=%u TX=%u\\n\",\n+\t\t\t  eth_dev->data->nb_rx_queues,\n+\t\t\t  eth_dev->data->nb_tx_queues);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tqdev->num_rss = eth_dev->data->nb_rx_queues;\n+\n+\t/* Initial state */\n+\tqdev->state = QEDE_CLOSE;\n+\n+\t/* Sanity checks and throw warnings */\n+\n+\tif (rxmode->enable_scatter == 1) {\n+\t\tDP_ERR(edev, \"RX scatter packets is not supported\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (rxmode->enable_lro == 1) {\n+\t\tDP_INFO(edev, \"LRO is not supported\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (!rxmode->hw_strip_crc)\n+\t\tDP_INFO(edev, \"L2 CRC stripping is always enabled in hw\\n\");\n+\n+\tif (!rxmode->hw_ip_checksum)\n+\t\tDP_INFO(edev, \"IP/UDP/TCP checksum offload is always enabled \"\n+\t\t\t      \"in hw\\n\");\n+\n+\n+\tDP_INFO(edev, \"Allocated %d RSS queues on %d TC/s\\n\",\n+\t\tQEDE_RSS_CNT(qdev), qdev->num_tc);\n+\n+\tDP_INFO(edev, \"my_id %u rel_pf_id %u abs_pf_id %u\"\n+\t\t\" port %u first_on_engine %d\\n\",\n+\t\tedev->hwfns[0].my_id,\n+\t\tedev->hwfns[0].rel_pf_id,\n+\t\tedev->hwfns[0].abs_pf_id,\n+\t\tedev->hwfns[0].port_id, edev->hwfns[0].first_on_engine);\n+\n+\treturn 0;\n+}\n+\n+/* Info about HW descriptor ring limitations */\n+static const struct rte_eth_desc_lim qede_rx_desc_lim = {\n+\t.nb_max = NUM_RX_BDS_MAX,\n+\t.nb_min = 128,\n+\t.nb_align = 128\t\t/* lowest common multiple */\n+};\n+\n+static const struct rte_eth_desc_lim qede_tx_desc_lim = {\n+\t.nb_max = NUM_TX_BDS_MAX,\n+\t.nb_min = 256,\n+\t.nb_align = 256\n+};\n+\n+static void\n+qede_dev_info_get(struct rte_eth_dev *eth_dev,\n+\t\t  struct rte_eth_dev_info *dev_info)\n+{\n+\tstruct qede_dev *qdev = eth_dev->data->dev_private;\n+\tstruct ecore_dev *edev = &qdev->edev;\n+\n+\tPMD_INIT_FUNC_TRACE(edev);\n+\n+\tdev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +\n+\t\t\t\t\t      QEDE_ETH_OVERHEAD);\n+\tdev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;\n+\tdev_info->rx_desc_lim = qede_rx_desc_lim;\n+\tdev_info->tx_desc_lim = qede_tx_desc_lim;\n+\t/* Fix it for 8 queues for now */\n+\tdev_info->max_rx_queues = 8;\n+\tdev_info->max_tx_queues = 8;\n+\tdev_info->max_mac_addrs = (uint32_t)(RESC_NUM(&edev->hwfns[0],\n+\t\t\t\t\t\t      ECORE_MAC));\n+\tdev_info->max_vfs = (uint16_t)NUM_OF_VFS(&qdev->edev);\n+\tdev_info->driver_name = qdev->drv_ver;\n+\tdev_info->reta_size = ETH_RSS_RETA_SIZE_128;\n+\tdev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;\n+\tdev_info->default_txconf = (struct rte_eth_txconf) {\n+\t.txq_flags = QEDE_TXQ_FLAGS,};\n+\tdev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |\n+\t\t\t\t     DEV_RX_OFFLOAD_IPV4_CKSUM |\n+\t\t\t\t     DEV_RX_OFFLOAD_UDP_CKSUM |\n+\t\t\t\t     DEV_RX_OFFLOAD_TCP_CKSUM);\n+\tdev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |\n+\t\t\t\t     DEV_TX_OFFLOAD_IPV4_CKSUM |\n+\t\t\t\t     DEV_TX_OFFLOAD_UDP_CKSUM |\n+\t\t\t\t     DEV_TX_OFFLOAD_TCP_CKSUM);\n+}\n+\n+/* return 0 means link status changed, -1 means not changed */\n+static int\n+qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)\n+{\n+\tstruct qede_dev *qdev = eth_dev->data->dev_private;\n+\tstruct ecore_dev *edev = &qdev->edev;\n+\tuint16_t link_duplex;\n+\tstruct qed_link_output link;\n+\tstruct rte_eth_link *old = &eth_dev->data->dev_link;\n+\n+\tmemset(&link, 0, sizeof(struct qed_link_output));\n+\tqdev->ops->common->get_link(edev, &link);\n+\tif (old->link_status == link.link_up)\n+\t\treturn -1;\n+\n+\t/* Speed */\n+\teth_dev->data->dev_link.link_speed = link.speed;\n+\n+\t/* Duplex/Simplex */\n+\tswitch (link.duplex) {\n+\tcase QEDE_DUPLEX_HALF:\n+\t\tlink_duplex = ETH_LINK_HALF_DUPLEX;\n+\t\tbreak;\n+\tcase QEDE_DUPLEX_FULL:\n+\t\tlink_duplex = ETH_LINK_FULL_DUPLEX;\n+\t\tbreak;\n+\tcase QEDE_DUPLEX_UNKNOWN:\n+\tdefault:\n+\t\tlink_duplex = -1;\n+\t}\n+\n+\teth_dev->data->dev_link.link_duplex = link_duplex;\n+\teth_dev->data->dev_link.link_status = link.link_up;\n+\n+\t/* Link state changed */\n+\treturn 0;\n+}\n+\n+static void\n+qede_rx_mode_setting(struct rte_eth_dev *eth_dev,\n+\t\t     enum qed_filter_rx_mode_type accept_flags)\n+{\n+\tstruct qede_dev *qdev = eth_dev->data->dev_private;\n+\tstruct ecore_dev *edev = &qdev->edev;\n+\tstruct qed_filter_params rx_mode;\n+\n+\tDP_INFO(edev, \"%s mode %u\\n\", __func__, accept_flags);\n+\n+\tmemset(&rx_mode, 0, sizeof(struct qed_filter_params));\n+\trx_mode.type = QED_FILTER_TYPE_RX_MODE;\n+\trx_mode.filter.accept_flags = accept_flags;\n+\tqdev->ops->filter_config(edev, &rx_mode);\n+}\n+\n+static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)\n+{\n+\tstruct qede_dev *qdev = eth_dev->data->dev_private;\n+\tstruct ecore_dev *edev = &qdev->edev;\n+\n+\tPMD_INIT_FUNC_TRACE(edev);\n+\n+\tenum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;\n+\n+\tif (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)\n+\t\ttype |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;\n+\n+\tqede_rx_mode_setting(eth_dev, type);\n+}\n+\n+static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)\n+{\n+\tstruct qede_dev *qdev = eth_dev->data->dev_private;\n+\tstruct ecore_dev *edev = &qdev->edev;\n+\n+\tPMD_INIT_FUNC_TRACE(edev);\n+\n+\tif (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)\n+\t\tqede_rx_mode_setting(eth_dev,\n+\t\t\t\t     QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);\n+\telse\n+\t\tqede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);\n+}\n+\n+static void qede_dev_close(struct rte_eth_dev *eth_dev)\n+{\n+\tstruct qede_dev *qdev = eth_dev->data->dev_private;\n+\tstruct ecore_dev *edev = &qdev->edev;\n+\n+\tPMD_INIT_FUNC_TRACE(edev);\n+\n+\t/* dev_stop() shall cleanup fp resources in hw but without releasing\n+\t * dma memories and sw structures so that dev_start() can be called\n+\t * by the app without reconfiguration. However, in dev_close() we\n+\t * can release all the resources and device can be brought up newly\n+\t */\n+\tif (qdev->state != QEDE_STOP)\n+\t\tqede_dev_stop(eth_dev);\n+\telse\n+\t\tDP_INFO(edev, \"Device is already stopped\\n\");\n+\n+\tqede_free_mem_load(qdev);\n+\n+\tqede_free_fp_arrays(qdev);\n+\n+\tqede_dev_set_link_state(eth_dev, false);\n+\n+\tqdev->ops->common->slowpath_stop(edev);\n+\n+\tqdev->ops->common->remove(edev);\n+\n+\trte_intr_disable(&(eth_dev->pci_dev->intr_handle));\n+\n+\trte_intr_callback_unregister(&(eth_dev->pci_dev->intr_handle),\n+\t\t\t\t     qede_interrupt_handler, (void *)eth_dev);\n+\n+\tqdev->state = QEDE_CLOSE;\n+}\n+\n+static void\n+qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)\n+{\n+\tstruct qede_dev *qdev = eth_dev->data->dev_private;\n+\tstruct ecore_dev *edev = &qdev->edev;\n+\tstruct ecore_eth_stats stats;\n+\n+\tqdev->ops->get_vport_stats(edev, &stats);\n+\n+\t/* RX Stats */\n+\teth_stats->ipackets = stats.rx_ucast_pkts +\n+\t    stats.rx_mcast_pkts + stats.rx_bcast_pkts;\n+\n+\teth_stats->ibytes = stats.rx_ucast_bytes +\n+\t    stats.rx_mcast_bytes + stats.rx_bcast_bytes;\n+\n+\teth_stats->imcasts = stats.rx_mcast_pkts;\n+\n+\teth_stats->ierrors = stats.rx_crc_errors +\n+\t    stats.rx_align_errors +\n+\t    stats.rx_carrier_errors +\n+\t    stats.rx_oversize_packets +\n+\t    stats.rx_jabbers + stats.rx_undersize_packets;\n+\n+\teth_stats->rx_nombuf = stats.no_buff_discards;\n+\n+\teth_stats->imissed = stats.mftag_filter_discards +\n+\t    stats.mac_filter_discards +\n+\t    stats.no_buff_discards + stats.brb_truncates + stats.brb_discards;\n+\n+\t/* TX stats */\n+\teth_stats->opackets = stats.tx_ucast_pkts +\n+\t    stats.tx_mcast_pkts + stats.tx_bcast_pkts;\n+\n+\teth_stats->obytes = stats.tx_ucast_bytes +\n+\t    stats.tx_mcast_bytes + stats.tx_bcast_bytes;\n+\n+\teth_stats->oerrors = stats.tx_err_drop_pkts;\n+\n+\tDP_INFO(edev,\n+\t\t\"no_buff_discards=%\" PRIu64 \"\"\n+\t\t\" mac_filter_discards=%\" PRIu64 \"\"\n+\t\t\" brb_truncates=%\" PRIu64 \"\"\n+\t\t\" brb_discards=%\" PRIu64 \"\\n\",\n+\t\tstats.no_buff_discards,\n+\t\tstats.mac_filter_discards,\n+\t\tstats.brb_truncates, stats.brb_discards);\n+}\n+\n+int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)\n+{\n+\tstruct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);\n+\tstruct ecore_dev *edev = QEDE_INIT_EDEV(qdev);\n+\tstruct qed_link_params link_params;\n+\tint rc;\n+\n+\tDP_INFO(edev, \"setting link state %d\\n\", link_up);\n+\tmemset(&link_params, 0, sizeof(link_params));\n+\tlink_params.link_up = link_up;\n+\trc = qdev->ops->common->set_link(edev, &link_params);\n+\tif (rc != ECORE_SUCCESS)\n+\t\tDP_ERR(edev, \"Unable to set link state %d\\n\", link_up);\n+\n+\treturn rc;\n+}\n+\n+static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)\n+{\n+\treturn qede_dev_set_link_state(eth_dev, true);\n+}\n+\n+static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)\n+{\n+\treturn qede_dev_set_link_state(eth_dev, false);\n+}\n+\n+static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)\n+{\n+\tenum qed_filter_rx_mode_type type =\n+\t    QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;\n+\n+\tif (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)\n+\t\ttype |= QED_FILTER_RX_MODE_TYPE_PROMISC;\n+\n+\tqede_rx_mode_setting(eth_dev, type);\n+}\n+\n+static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)\n+{\n+\tif (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)\n+\t\tqede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_PROMISC);\n+\telse\n+\t\tqede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);\n+}\n+\n+static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,\n+\t\t\t      struct rte_eth_fc_conf *fc_conf)\n+{\n+\tstruct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);\n+\tstruct ecore_dev *edev = QEDE_INIT_EDEV(qdev);\n+\tstruct qed_link_output current_link;\n+\tstruct qed_link_params params;\n+\n+\tmemset(&current_link, 0, sizeof(current_link));\n+\tqdev->ops->common->get_link(edev, &current_link);\n+\n+\tmemset(&params, 0, sizeof(params));\n+\tparams.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;\n+\tif (fc_conf->autoneg) {\n+\t\tif (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {\n+\t\t\tDP_ERR(edev, \"Autoneg not supported\\n\");\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t\tparams.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;\n+\t}\n+\n+\t/* Pause is assumed to be supported (SUPPORTED_Pause) */\n+\tif (fc_conf->mode == RTE_FC_FULL)\n+\t\tparams.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |\n+\t\t\t\t\tQED_LINK_PAUSE_RX_ENABLE);\n+\tif (fc_conf->mode == RTE_FC_TX_PAUSE)\n+\t\tparams.pause_config |= QED_LINK_PAUSE_TX_ENABLE;\n+\tif (fc_conf->mode == RTE_FC_RX_PAUSE)\n+\t\tparams.pause_config |= QED_LINK_PAUSE_RX_ENABLE;\n+\n+\tparams.link_up = true;\n+\t(void)qdev->ops->common->set_link(edev, &params);\n+\n+\treturn 0;\n+}\n+\n+static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,\n+\t\t\t      struct rte_eth_fc_conf *fc_conf)\n+{\n+\tstruct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);\n+\tstruct ecore_dev *edev = QEDE_INIT_EDEV(qdev);\n+\tstruct qed_link_output current_link;\n+\n+\tmemset(&current_link, 0, sizeof(current_link));\n+\tqdev->ops->common->get_link(edev, &current_link);\n+\n+\tif (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)\n+\t\tfc_conf->autoneg = true;\n+\n+\tif (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |\n+\t\t\t\t\t QED_LINK_PAUSE_TX_ENABLE))\n+\t\tfc_conf->mode = RTE_FC_FULL;\n+\telse if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)\n+\t\tfc_conf->mode = RTE_FC_RX_PAUSE;\n+\telse if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)\n+\t\tfc_conf->mode = RTE_FC_TX_PAUSE;\n+\telse\n+\t\tfc_conf->mode = RTE_FC_NONE;\n+\n+\treturn 0;\n+}\n+\n+static struct eth_dev_ops qede_eth_dev_ops = {\n+\t.dev_configure = qede_dev_configure,\n+\t.dev_infos_get = qede_dev_info_get,\n+\t.rx_queue_setup = qede_rx_queue_setup,\n+\t.rx_queue_release = qede_rx_queue_release,\n+\t.tx_queue_setup = qede_tx_queue_setup,\n+\t.tx_queue_release = qede_tx_queue_release,\n+\t.dev_start = qede_dev_start,\n+\t.dev_set_link_up = qede_dev_set_link_up,\n+\t.dev_set_link_down = qede_dev_set_link_down,\n+\t.link_update = qede_link_update,\n+\t.promiscuous_enable = qede_promiscuous_enable,\n+\t.promiscuous_disable = qede_promiscuous_disable,\n+\t.allmulticast_enable = qede_allmulticast_enable,\n+\t.allmulticast_disable = qede_allmulticast_disable,\n+\t.dev_stop = qede_dev_stop,\n+\t.dev_close = qede_dev_close,\n+\t.stats_get = qede_get_stats,\n+\t.mac_addr_add = qede_mac_addr_add,\n+\t.mac_addr_remove = qede_mac_addr_remove,\n+\t.vlan_offload_set = qede_vlan_offload_set,\n+\t.vlan_filter_set = qede_vlan_filter_set,\n+\t.flow_ctrl_set = qede_flow_ctrl_set,\n+\t.flow_ctrl_get = qede_flow_ctrl_get,\n+};\n+\n+static void qede_update_pf_params(struct ecore_dev *edev)\n+{\n+\tstruct ecore_pf_params pf_params;\n+\t/* 16 rx + 16 tx */\n+\tmemset(&pf_params, 0, sizeof(struct ecore_pf_params));\n+\tpf_params.eth_pf_params.num_cons = 32;\n+\tqed_ops->common->update_pf_params(edev, &pf_params);\n+}\n+\n+static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)\n+{\n+\tstruct rte_pci_device *pci_dev;\n+\tstruct rte_pci_addr pci_addr;\n+\tstruct qede_dev *adapter;\n+\tstruct ecore_dev *edev;\n+\tstruct qed_dev_eth_info dev_info;\n+\tstruct qed_slowpath_params params;\n+\tuint32_t qed_ver;\n+\tstatic bool do_once = true;\n+\tuint8_t bulletin_change;\n+\tuint8_t vf_mac[ETHER_ADDR_LEN];\n+\tuint8_t is_mac_forced;\n+\tbool is_mac_exist;\n+\t/* Fix up ecore debug level */\n+\tuint32_t dp_module = ~0 & ~ECORE_MSG_HW;\n+\tuint8_t dp_level = ECORE_LEVEL_VERBOSE;\n+\tint rc;\n+\n+\t/* Extract key data structures */\n+\tadapter = eth_dev->data->dev_private;\n+\tedev = &adapter->edev;\n+\tpci_addr = eth_dev->pci_dev->addr;\n+\n+\tPMD_INIT_FUNC_TRACE(edev);\n+\n+\tsnprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT \":dpdk-port-%u\",\n+\t\t pci_addr.bus, pci_addr.devid, pci_addr.function,\n+\t\t eth_dev->data->port_id);\n+\n+\teth_dev->rx_pkt_burst = qede_recv_pkts;\n+\teth_dev->tx_pkt_burst = qede_xmit_pkts;\n+\n+\tif (rte_eal_process_type() != RTE_PROC_PRIMARY) {\n+\t\tDP_NOTICE(edev, false,\n+\t\t\t  \"Skipping device init from secondary process\\n\");\n+\t\treturn 0;\n+\t}\n+\n+\tpci_dev = eth_dev->pci_dev;\n+\n+\trte_eth_copy_pci_info(eth_dev, pci_dev);\n+\n+\tif (qed_ver != QEDE_ETH_INTERFACE_VERSION) {\n+\t\tDP_ERR(edev, \"Version mismatch [%08x != %08x]\\n\",\n+\t\t       qed_ver, QEDE_ETH_INTERFACE_VERSION);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tDP_INFO(edev, \"Starting qede probe\\n\");\n+\n+\trc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH,\n+\t\t\t\t    dp_module, dp_level, is_vf);\n+\n+\tif (rc != 0) {\n+\t\tDP_ERR(edev, \"qede probe failed rc 0x%x\\n\", rc);\n+\t\treturn -ENODEV;\n+\t}\n+\n+\tqede_update_pf_params(edev);\n+\n+\trte_intr_callback_register(&(eth_dev->pci_dev->intr_handle),\n+\t\t\t\t   qede_interrupt_handler, (void *)eth_dev);\n+\n+\tif (rte_intr_enable(&(eth_dev->pci_dev->intr_handle))) {\n+\t\tDP_ERR(edev, \"rte_intr_enable() failed\\n\");\n+\t\treturn -ENODEV;\n+\t}\n+\n+\t/* Start the Slowpath-process */\n+\tmemset(&params, 0, sizeof(struct qed_slowpath_params));\n+\tparams.int_mode = ECORE_INT_MODE_MSIX;\n+\tparams.drv_major = QEDE_MAJOR_VERSION;\n+\tparams.drv_minor = QEDE_MINOR_VERSION;\n+\tparams.drv_rev = QEDE_REVISION_VERSION;\n+\tparams.drv_eng = QEDE_ENGINEERING_VERSION;\n+\tstrncpy((char *)params.name, \"qede LAN\", QED_DRV_VER_STR_SIZE);\n+\n+\trc = qed_ops->common->slowpath_start(edev, &params);\n+\tif (rc) {\n+\t\tDP_ERR(edev, \"Cannot start slowpath rc=0x%x\\n\", rc);\n+\t\treturn -ENODEV;\n+\t}\n+\n+\trc = qed_ops->fill_dev_info(edev, &dev_info);\n+\tif (rc) {\n+\t\tDP_ERR(edev, \"Cannot get device_info rc=0x%x\\n\", rc);\n+\t\tqed_ops->common->slowpath_stop(edev);\n+\t\tqed_ops->common->remove(edev);\n+\t\treturn -ENODEV;\n+\t}\n+\n+\tqede_alloc_etherdev(adapter, &dev_info);\n+\n+\tadapter->ops->common->set_id(edev, edev->name, QEDE_DRV_MODULE_VERSION);\n+\n+\t/* Allocate memory for storing primary macaddr */\n+\teth_dev->data->mac_addrs = rte_zmalloc(edev->name, ETHER_ADDR_LEN,\n+\t\t\t\t\t       RTE_CACHE_LINE_SIZE);\n+\n+\tif (eth_dev->data->mac_addrs == NULL) {\n+\t\tDP_ERR(edev, \"Failed to allocate MAC address\\n\");\n+\t\tqed_ops->common->slowpath_stop(edev);\n+\t\tqed_ops->common->remove(edev);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tether_addr_copy((struct ether_addr *)edev->hwfns[0].\n+\t\t\t\thw_info.hw_mac_addr,\n+\t\t\t\t&eth_dev->data->mac_addrs[0]);\n+\n+\teth_dev->dev_ops = &qede_eth_dev_ops;\n+\n+\tif (do_once) {\n+\t\tqede_print_adapter_info(adapter);\n+\t\tdo_once = false;\n+\t}\n+\n+\tDP_NOTICE(edev, false, \"macaddr %02x:%02x:%02x:%02x:%02x:%02x\\n\",\n+\t\t  eth_dev->data->mac_addrs[0].addr_bytes[0],\n+\t\t  eth_dev->data->mac_addrs[0].addr_bytes[1],\n+\t\t  eth_dev->data->mac_addrs[0].addr_bytes[2],\n+\t\t  eth_dev->data->mac_addrs[0].addr_bytes[3],\n+\t\t  eth_dev->data->mac_addrs[0].addr_bytes[4],\n+\t\t  eth_dev->data->mac_addrs[0].addr_bytes[5]);\n+\n+\treturn rc;\n+}\n+\n+static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)\n+{\n+\treturn qede_common_dev_init(eth_dev, 1);\n+}\n+\n+static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)\n+{\n+\treturn qede_common_dev_init(eth_dev, 0);\n+}\n+\n+static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)\n+{\n+\t/* only uninitialize in the primary process */\n+\tif (rte_eal_process_type() != RTE_PROC_PRIMARY)\n+\t\treturn 0;\n+\n+\t/* safe to close dev here */\n+\tqede_dev_close(eth_dev);\n+\n+\teth_dev->dev_ops = NULL;\n+\teth_dev->rx_pkt_burst = NULL;\n+\teth_dev->tx_pkt_burst = NULL;\n+\n+\tif (eth_dev->data->mac_addrs)\n+\t\trte_free(eth_dev->data->mac_addrs);\n+\n+\teth_dev->data->mac_addrs = NULL;\n+\n+\treturn 0;\n+}\n+\n+static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)\n+{\n+\treturn qede_dev_common_uninit(eth_dev);\n+}\n+\n+static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)\n+{\n+\treturn qede_dev_common_uninit(eth_dev);\n+}\n+\n+static struct rte_pci_id pci_id_qedevf_map[] = {\n+#define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)\n+\t{\n+\t\tQEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_VF)\n+\t},\n+\t{\n+\t\tQEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_IOV)\n+\t},\n+\t{.vendor_id = 0,}\n+};\n+\n+static struct rte_pci_id pci_id_qede_map[] = {\n+#define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)\n+\t{\n+\t\tQEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980E)\n+\t},\n+\t{\n+\t\tQEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980S)\n+\t},\n+\t{\n+\t\tQEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_40)\n+\t},\n+\t{\n+\t\tQEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_25)\n+\t},\n+\t{.vendor_id = 0,}\n+};\n+\n+static struct eth_driver rte_qedevf_pmd = {\n+\t.pci_drv = {\n+\t\t    .name = \"rte_qedevf_pmd\",\n+\t\t    .id_table = pci_id_qedevf_map,\n+\t\t    .drv_flags =\n+\t\t    RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,\n+\t\t    },\n+\t.eth_dev_init = qedevf_eth_dev_init,\n+\t.eth_dev_uninit = qedevf_eth_dev_uninit,\n+\t.dev_private_size = sizeof(struct qede_dev),\n+};\n+\n+static struct eth_driver rte_qede_pmd = {\n+\t.pci_drv = {\n+\t\t    .name = \"rte_qede_pmd\",\n+\t\t    .id_table = pci_id_qede_map,\n+\t\t    .drv_flags =\n+\t\t    RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,\n+\t\t    },\n+\t.eth_dev_init = qede_eth_dev_init,\n+\t.eth_dev_uninit = qede_eth_dev_uninit,\n+\t.dev_private_size = sizeof(struct qede_dev),\n+};\n+\n+static int\n+rte_qedevf_pmd_init(const char *name __rte_unused,\n+\t\t    const char *params __rte_unused)\n+{\n+\trte_eth_driver_register(&rte_qedevf_pmd);\n+\n+\treturn 0;\n+}\n+\n+static int\n+rte_qede_pmd_init(const char *name __rte_unused,\n+\t\t  const char *params __rte_unused)\n+{\n+\trte_eth_driver_register(&rte_qede_pmd);\n+\n+\treturn 0;\n+}\n+\n+static struct rte_driver rte_qedevf_driver = {\n+\t.type = PMD_PDEV,\n+\t.init = rte_qede_pmd_init\n+};\n+\n+static struct rte_driver rte_qede_driver = {\n+\t.type = PMD_PDEV,\n+\t.init = rte_qedevf_pmd_init\n+};\n+\n+PMD_REGISTER_DRIVER(rte_qede_driver);\n+PMD_REGISTER_DRIVER(rte_qedevf_driver);\ndiff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h\nnew file mode 100644\nindex 0000000..3d90b23\n--- /dev/null\n+++ b/drivers/net/qede/qede_ethdev.h\n@@ -0,0 +1,155 @@\n+/*\n+ * Copyright (c) 2016 QLogic Corporation.\n+ * All rights reserved.\n+ * www.qlogic.com\n+ *\n+ * See LICENSE.qede_pmd for copyright and licensing details.\n+ */\n+\n+\n+#ifndef _QEDE_ETHDEV_H_\n+#define _QEDE_ETHDEV_H_\n+\n+#include <rte_ether.h>\n+#include <rte_ethdev.h>\n+#include <rte_dev.h>\n+\n+/* ecore includes */\n+#include \"base/bcm_osal.h\"\n+#include \"base/ecore.h\"\n+#include \"base/ecore_dev_api.h\"\n+#include \"base/ecore_sp_api.h\"\n+#include \"base/ecore_mcp_api.h\"\n+#include \"base/ecore_hsi_common.h\"\n+#include \"base/ecore_int_api.h\"\n+#include \"base/ecore_chain.h\"\n+#include \"base/ecore_status.h\"\n+#include \"base/ecore_hsi_eth.h\"\n+#include \"base/ecore_dev_api.h\"\n+\n+#include \"qede_logs.h\"\n+#include \"qede_if.h\"\n+#include \"qede_eth_if.h\"\n+\n+#include \"qede_rxtx.h\"\n+\n+#define qede_stringify1(x...)\t\t#x\n+#define qede_stringify(x...)\t\tqede_stringify1(x)\n+\n+/* Driver versions */\n+#define QEDE_PMD_VER_PREFIX\t\t\"QEDE PMD\"\n+#define QEDE_PMD_VERSION_MAJOR\t\t1\n+#define QEDE_PMD_VERSION_MINOR\t\t0\n+#define QEDE_PMD_VERSION_PATCH\t\t0\n+\n+#define QEDE_MAJOR_VERSION\t\t8\n+#define QEDE_MINOR_VERSION\t\t7\n+#define QEDE_REVISION_VERSION\t\t9\n+#define QEDE_ENGINEERING_VERSION\t0\n+\n+#define QEDE_DRV_MODULE_VERSION qede_stringify(QEDE_MAJOR_VERSION) \".\"\t\\\n+\t\tqede_stringify(QEDE_MINOR_VERSION) \".\"\t\t\t\\\n+\t\tqede_stringify(QEDE_REVISION_VERSION) \".\"\t\t\\\n+\t\tqede_stringify(QEDE_ENGINEERING_VERSION)\n+\n+#define QEDE_RSS_INDIR_INITED     (1 << 0)\n+#define QEDE_RSS_KEY_INITED       (1 << 1)\n+#define QEDE_RSS_CAPS_INITED      (1 << 2)\n+\n+#define QEDE_MAX_RSS_CNT(edev)  ((edev)->dev_info.num_queues)\n+#define QEDE_MAX_TSS_CNT(edev)  ((edev)->dev_info.num_queues * \\\n+\t\t\t\t\t(edev)->dev_info.num_tc)\n+\n+#define QEDE_RSS_CNT(edev)\t((edev)->num_rss)\n+#define QEDE_TSS_CNT(edev)\t((edev)->num_rss * (edev)->num_tc)\n+\n+#define QEDE_DUPLEX_FULL\t1\n+#define QEDE_DUPLEX_HALF\t2\n+#define QEDE_DUPLEX_UNKNOWN     0xff\n+\n+#define QEDE_SUPPORTED_AUTONEG (1 << 6)\n+#define QEDE_SUPPORTED_PAUSE   (1 << 13)\n+\n+#define QEDE_INIT_QDEV(eth_dev) (eth_dev->data->dev_private)\n+\n+#define QEDE_INIT_EDEV(adapter) (&((struct qede_dev *)adapter)->edev)\n+\n+#define QEDE_INIT(eth_dev) {\t\t\t\t\t\\\n+\tstruct qede_dev *qdev = eth_dev->data->dev_private;\t\\\n+\tstruct ecore_dev *edev = &qdev->edev;\t\t\t\\\n+}\n+\n+/************* QLogic 25G/40G vendor/devices ids *************/\n+#define PCI_VENDOR_ID_QLOGIC            0x1077\n+\n+#define CHIP_NUM_57980E                 0x1634\n+#define CHIP_NUM_57980S                 0x1629\n+#define CHIP_NUM_VF                     0x1630\n+#define CHIP_NUM_57980S_40              0x1634\n+#define CHIP_NUM_57980S_25              0x1656\n+#define CHIP_NUM_57980S_IOV             0x1664\n+\n+#define PCI_DEVICE_ID_NX2_57980E        CHIP_NUM_57980E\n+#define PCI_DEVICE_ID_NX2_57980S        CHIP_NUM_57980S\n+#define PCI_DEVICE_ID_NX2_VF            CHIP_NUM_VF\n+#define PCI_DEVICE_ID_57980S_40         CHIP_NUM_57980S_40\n+#define PCI_DEVICE_ID_57980S_25         CHIP_NUM_57980S_25\n+#define PCI_DEVICE_ID_57980S_IOV        CHIP_NUM_57980S_IOV\n+\n+extern const char *QEDE_FW_FILE_NAME;\n+\n+/* Port/function states */\n+enum dev_state {\n+\tQEDE_START,\n+\tQEDE_STOP,\n+\tQEDE_CLOSE\n+};\n+\n+struct qed_int_param {\n+\tuint32_t int_mode;\n+\tuint8_t num_vectors;\n+\tuint8_t min_msix_cnt;\n+};\n+\n+struct qed_int_params {\n+\tstruct qed_int_param in;\n+\tstruct qed_int_param out;\n+\tbool fp_initialized;\n+};\n+\n+/*\n+ *  Structure to store private data for each port.\n+ */\n+struct qede_dev {\n+\tstruct ecore_dev edev;\n+\tuint8_t protocol;\n+\tconst struct qed_eth_ops *ops;\n+\tstruct qed_dev_eth_info dev_info;\n+\tstruct ecore_sb_info *sb_array;\n+\tstruct qede_fastpath *fp_array;\n+\tuint16_t num_rss;\n+\tuint8_t num_tc;\n+\tuint16_t mtu;\n+\tuint32_t rss_params_inited;\n+\tstruct qed_update_vport_rss_params rss_params;\n+\tuint32_t flags;\n+\tbool gro_disable;\n+\tstruct qede_rx_queue **rx_queues;\n+\tstruct qede_tx_queue **tx_queues;\n+\tenum dev_state state;\n+\n+\t/* Vlans */\n+\tosal_list_t vlan_list;\n+\tuint16_t configured_vlans;\n+\tuint16_t non_configured_vlans;\n+\tbool accept_any_vlan;\n+\tuint16_t vxlan_dst_port;\n+\n+\tbool handle_hw_err;\n+\tchar drv_ver[QED_DRV_VER_STR_SIZE];\n+};\n+\n+int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up);\n+void qede_config_rx_mode(struct rte_eth_dev *eth_dev);\n+\n+#endif /* _QEDE_ETHDEV_H_ */\ndiff --git a/drivers/net/qede/qede_if.h b/drivers/net/qede/qede_if.h\nnew file mode 100644\nindex 0000000..935eed8\n--- /dev/null\n+++ b/drivers/net/qede/qede_if.h\n@@ -0,0 +1,155 @@\n+/*\n+ * Copyright (c) 2016 QLogic Corporation.\n+ * All rights reserved.\n+ * www.qlogic.com\n+ *\n+ * See LICENSE.qede_pmd for copyright and licensing details.\n+ */\n+\n+#ifndef _QEDE_IF_H\n+#define _QEDE_IF_H\n+\n+#include \"qede_ethdev.h\"\n+\n+/* forward */\n+struct ecore_dev;\n+struct qed_sb_info;\n+struct qed_pf_params;\n+enum ecore_int_mode;\n+\n+struct qed_dev_info {\n+\tuint8_t num_hwfns;\n+\tuint8_t hw_mac[ETHER_ADDR_LEN];\n+\tbool is_mf_default;\n+\n+\t/* FW version */\n+\tuint16_t fw_major;\n+\tuint16_t fw_minor;\n+\tuint16_t fw_rev;\n+\tuint16_t fw_eng;\n+\n+\t/* MFW version */\n+\tuint32_t mfw_rev;\n+\n+\tuint32_t flash_size;\n+\tuint8_t mf_mode;\n+\tbool tx_switching;\n+\t/* To be added... */\n+};\n+\n+enum qed_sb_type {\n+\tQED_SB_TYPE_L2_QUEUE,\n+\tQED_SB_TYPE_STORAGE,\n+\tQED_SB_TYPE_CNQ,\n+};\n+\n+enum qed_protocol {\n+\tQED_PROTOCOL_ETH,\n+};\n+\n+struct qed_link_params {\n+\tbool link_up;\n+\n+#define QED_LINK_OVERRIDE_SPEED_AUTONEG         (1 << 0)\n+#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS      (1 << 1)\n+#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED    (1 << 2)\n+#define QED_LINK_OVERRIDE_PAUSE_CONFIG          (1 << 3)\n+\tuint32_t override_flags;\n+\tbool autoneg;\n+\tuint32_t adv_speeds;\n+\tuint32_t forced_speed;\n+#define QED_LINK_PAUSE_AUTONEG_ENABLE           (1 << 0)\n+#define QED_LINK_PAUSE_RX_ENABLE                (1 << 1)\n+#define QED_LINK_PAUSE_TX_ENABLE                (1 << 2)\n+\tuint32_t pause_config;\n+};\n+\n+struct qed_link_output {\n+\tbool link_up;\n+\tuint32_t supported_caps;\t/* In SUPPORTED defs */\n+\tuint32_t advertised_caps;\t/* In ADVERTISED defs */\n+\tuint32_t lp_caps;\t/* In ADVERTISED defs */\n+\tuint32_t speed;\t\t/* In Mb/s */\n+\tuint8_t duplex;\t\t/* In DUPLEX defs */\n+\tuint8_t port;\t\t/* In PORT defs */\n+\tbool autoneg;\n+\tuint32_t pause_config;\n+};\n+\n+#define QED_DRV_VER_STR_SIZE 80\n+struct qed_slowpath_params {\n+\tuint32_t int_mode;\n+\tuint8_t drv_major;\n+\tuint8_t drv_minor;\n+\tuint8_t drv_rev;\n+\tuint8_t drv_eng;\n+\tuint8_t name[QED_DRV_VER_STR_SIZE];\n+};\n+\n+#define ILT_PAGE_SIZE_TCFC 0x8000\t/* 32KB */\n+\n+struct qed_common_cb_ops {\n+\tvoid (*link_update)(void *dev, struct qed_link_output *link);\n+};\n+\n+struct qed_selftest_ops {\n+/**\n+ * @brief registers - Perform register tests\n+ *\n+ * @param edev\n+ *\n+ * @return 0 on success, error otherwise.\n+ */\n+\tint (*registers)(struct ecore_dev *edev);\n+};\n+\n+struct qed_common_ops {\n+\tint (*probe)(struct ecore_dev *edev,\n+\t\t     struct rte_pci_device *pci_dev,\n+\t\t     enum qed_protocol protocol,\n+\t\t     uint32_t dp_module, uint8_t dp_level, bool is_vf);\n+\tvoid (*set_id)(struct ecore_dev *edev,\n+\t\tchar name[], const char ver_str[]);\n+\tenum _ecore_status_t (*chain_alloc)(struct ecore_dev *edev,\n+\t\t\t\t\t    enum ecore_chain_use_mode\n+\t\t\t\t\t    intended_use,\n+\t\t\t\t\t    enum ecore_chain_mode mode,\n+\t\t\t\t\t    enum ecore_chain_cnt_type cnt_type,\n+\t\t\t\t\t    uint32_t num_elems,\n+\t\t\t\t\t    osal_size_t elem_size,\n+\t\t\t\t\t    struct ecore_chain *p_chain);\n+\n+\tvoid (*chain_free)(struct ecore_dev *edev,\n+\t\t\t   struct ecore_chain *p_chain);\n+\n+\tvoid (*get_link)(struct ecore_dev *edev,\n+\t\t\t struct qed_link_output *if_link);\n+\tint (*set_link)(struct ecore_dev *edev,\n+\t\t\tstruct qed_link_params *params);\n+\n+\tint (*drain)(struct ecore_dev *edev);\n+\n+\tvoid (*remove)(struct ecore_dev *edev);\n+\n+\tint (*slowpath_stop)(struct ecore_dev *edev);\n+\n+\tvoid (*update_pf_params)(struct ecore_dev *edev,\n+\t\t\t\t struct ecore_pf_params *params);\n+\n+\tint (*slowpath_start)(struct ecore_dev *edev,\n+\t\t\t      struct qed_slowpath_params *params);\n+\n+\tint (*set_fp_int)(struct ecore_dev *edev, uint16_t cnt);\n+\n+\tuint32_t (*sb_init)(struct ecore_dev *edev,\n+\t\t\t    struct ecore_sb_info *sb_info,\n+\t\t\t    void *sb_virt_addr,\n+\t\t\t    dma_addr_t sb_phy_addr,\n+\t\t\t    uint16_t sb_id, enum qed_sb_type type);\n+\n+\tbool (*can_link_change)(struct ecore_dev *edev);\n+\tvoid (*update_msglvl)(struct ecore_dev *edev,\n+\t\t\t      uint32_t dp_module, uint8_t dp_level);\n+};\n+\n+#endif /* _QEDE_IF_H */\ndiff --git a/drivers/net/qede/qede_logs.h b/drivers/net/qede/qede_logs.h\nnew file mode 100644\nindex 0000000..46a54e1\n--- /dev/null\n+++ b/drivers/net/qede/qede_logs.h\n@@ -0,0 +1,93 @@\n+/*\n+ * Copyright (c) 2016 QLogic Corporation.\n+ * All rights reserved.\n+ * www.qlogic.com\n+ *\n+ * See LICENSE.qede_pmd for copyright and licensing details.\n+ */\n+\n+#ifndef _QEDE_LOGS_H_\n+#define _QEDE_LOGS_H_\n+\n+#define DP_ERR(p_dev, fmt, ...) \\\n+\trte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD, \\\n+\t\t\"[%s:%d(%s)]\" fmt, \\\n+\t\t  __func__, __LINE__, \\\n+\t\t(p_dev)->name ? (p_dev)->name : \"\", \\\n+\t\t##__VA_ARGS__)\n+\n+#define DP_NOTICE(p_dev, is_assert, fmt, ...) \\\n+do {  \\\n+\trte_log(RTE_LOG_NOTICE, RTE_LOGTYPE_PMD,\\\n+\t\t\"[QEDE PMD: (%s)]%s:\" fmt, \\\n+\t\t(p_dev)->name ? (p_dev)->name : \"\", \\\n+\t\t __func__, \\\n+\t\t##__VA_ARGS__); \\\n+\tOSAL_ASSERT(!is_assert); \\\n+} while (0)\n+\n+#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO\n+\n+#define DP_INFO(p_dev, fmt, ...) \\\n+\trte_log(RTE_LOG_INFO, RTE_LOGTYPE_PMD, \\\n+\t\t\"[%s:%d(%s)]\" fmt, \\\n+\t\t__func__, __LINE__, \\\n+\t\t(p_dev)->name ? (p_dev)->name : \"\", \\\n+\t\t##__VA_ARGS__)\n+#else\n+#define DP_INFO(p_dev, fmt, ...) do { } while (0)\n+\n+#endif\n+\n+#ifdef RTE_LIBRTE_QEDE_DEBUG_ECORE\n+#define DP_VERBOSE(p_dev, module, fmt, ...) \\\n+do { \\\n+\tif ((p_dev)->dp_module & module) \\\n+\t\trte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_PMD, \\\n+\t\t\t\"[%s:%d(%s)]\" fmt, \\\n+\t\t      __func__, __LINE__, \\\n+\t\t      (p_dev)->name ? (p_dev)->name : \"\", \\\n+\t\t      ##__VA_ARGS__); \\\n+} while (0)\n+#else\n+#define DP_VERBOSE(p_dev, fmt, ...) do { } while (0)\n+#endif\n+\n+#define PMD_INIT_LOG(level, edev, fmt, args...)\t\\\n+\trte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \\\n+\t\t\"[qede_pmd: %s] %s() \" fmt \"\\n\", \\\n+\t(edev)->name, __func__, ##args)\n+\n+#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT\n+#define PMD_INIT_FUNC_TRACE(edev) PMD_INIT_LOG(DEBUG, edev, \" >>\")\n+#else\n+#define PMD_INIT_FUNC_TRACE(edev) do { } while (0)\n+#endif\n+\n+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX\n+#define PMD_TX_LOG(level, q, fmt, args...) \\\n+\tRTE_LOG(level, PMD, \"%s(): port=%u queue=%u \" fmt \"\\n\", \\\n+\t\t__func__, q->port_id, q->queue_id, ## args)\n+#else\n+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)\n+#endif\n+\n+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX\n+#define PMD_RX_LOG(level, q, fmt, args...) \\\n+\tRTE_LOG(level, PMD, \"%s(): port=%u queue=%u \" fmt \"\\n\",\t\\\n+\t\t__func__, q->port_id, q->queue_id, ## args)\n+#else\n+#define PMD_RX_LOG(level, q, fmt, args...) do { } while (0)\n+#endif\n+\n+#ifdef RTE_LIBRTE_QEDE_DEBUG_DRIVER\n+#define PMD_DRV_LOG_RAW(level, fmt, args...) \\\n+\tRTE_LOG(level, PMD, \"%s(): \" fmt, __func__, ## args)\n+#else\n+#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)\n+#endif\n+\n+#define PMD_DRV_LOG(level, fmt, args...) \\\n+\tPMD_DRV_LOG_RAW(level, fmt \"\\n\", ## args)\n+\n+#endif /* _QEDE_LOGS_H_ */\ndiff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c\nnew file mode 100644\nindex 0000000..7a1b986\n--- /dev/null\n+++ b/drivers/net/qede/qede_main.c\n@@ -0,0 +1,548 @@\n+/*\n+ * Copyright (c) 2016 QLogic Corporation.\n+ * All rights reserved.\n+ * www.qlogic.com\n+ *\n+ * See LICENSE.qede_pmd for copyright and licensing details.\n+ */\n+\n+#include <sys/stat.h>\n+#include <fcntl.h>\n+#include <unistd.h>\n+#include <zlib.h>\n+\n+#include \"qede_ethdev.h\"\n+\n+\n+static uint8_t npar_tx_switching = 1;\n+\n+#define CONFIG_QED_BINARY_FW\n+\n+#ifdef RTE_LIBRTE_QEDE_TX_SWITCHING\n+static uint8_t tx_switching = 1;\n+#else\n+static uint8_t tx_switching;\n+#endif\n+\n+#ifndef RTE_LIBRTE_QEDE_FW\n+const char *QEDE_FW_FILE_NAME =\n+\t\"/lib/firmware/qed/qed_init_values_zipped-8.7.7.0.bin\";\n+#else\n+const char *QEDE_FW_FILE_NAME = RTE_LIBRTE_QEDE_FW;\n+#endif\n+\n+static void\n+qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < edev->num_hwfns; i++) {\n+\t\tstruct ecore_hwfn *p_hwfn = &edev->hwfns[i];\n+\t\tp_hwfn->pf_params = *params;\n+\t}\n+}\n+\n+static void qed_init_pci(struct ecore_dev *edev, struct rte_pci_device *pci_dev)\n+{\n+\tedev->regview = pci_dev->mem_resource[0].addr;\n+\tedev->doorbells = pci_dev->mem_resource[2].addr;\n+}\n+\n+static int\n+qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,\n+\t  enum qed_protocol protocol, uint32_t dp_module,\n+\t  uint8_t dp_level, bool is_vf)\n+{\n+\tstruct qede_dev *qdev = (struct qede_dev *)edev;\n+\tint rc;\n+\n+\tecore_init_struct(edev);\n+\tqdev->protocol = protocol;\n+\tif (is_vf) {\n+\t\tedev->b_is_vf = true;\n+\t\tedev->sriov_info.b_hw_channel = true;\n+\t}\n+\tecore_init_dp(edev, dp_module, dp_level, NULL);\n+\tqed_init_pci(edev, pci_dev);\n+\trc = ecore_hw_prepare(edev, ECORE_PCI_DEFAULT);\n+\tif (rc) {\n+\t\tDP_ERR(edev, \"hw prepare failed\\n\");\n+\t\treturn rc;\n+\t}\n+\n+\treturn rc;\n+}\n+\n+static int qed_nic_setup(struct ecore_dev *edev)\n+{\n+\tint rc, i;\n+\n+\trc = ecore_resc_alloc(edev);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tDP_INFO(edev, \"Allocated qed resources\\n\");\n+\tecore_resc_setup(edev);\n+\n+\treturn rc;\n+}\n+\n+static int qed_alloc_stream_mem(struct ecore_dev *edev)\n+{\n+\tint i;\n+\n+\tfor_each_hwfn(edev, i) {\n+\t\tstruct ecore_hwfn *p_hwfn = &edev->hwfns[i];\n+\n+\t\tp_hwfn->stream = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,\n+\t\t\t\t\t     sizeof(*p_hwfn->stream));\n+\t\tif (!p_hwfn->stream)\n+\t\t\treturn -ENOMEM;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void qed_free_stream_mem(struct ecore_dev *edev)\n+{\n+\tint i;\n+\n+\tfor_each_hwfn(edev, i) {\n+\t\tstruct ecore_hwfn *p_hwfn = &edev->hwfns[i];\n+\n+\t\tif (!p_hwfn->stream)\n+\t\t\treturn;\n+\n+\t\tOSAL_FREE(p_hwfn->p_dev, p_hwfn->stream);\n+\t}\n+}\n+\n+static int qed_load_firmware_data(struct ecore_dev *edev)\n+{\n+\tint fd;\n+\tstruct stat st;\n+\n+\tfd = open(QEDE_FW_FILE_NAME, O_RDONLY);\n+\tif (fd < 0) {\n+\t\tDP_NOTICE(edev, false, \"Can't open firmware file\\n\");\n+\t\treturn -ENOENT;\n+\t}\n+\n+\tif (fstat(fd, &st) < 0) {\n+\t\tDP_NOTICE(edev, false, \"Can't stat firmware file\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\tedev->firmware = rte_zmalloc(\"qede_fw\", st.st_size,\n+\t\t\t\t    RTE_CACHE_LINE_SIZE);\n+\tif (!edev->firmware) {\n+\t\tDP_NOTICE(edev, false, \"Can't allocate memory for firmware\\n\");\n+\t\tclose(fd);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tif (read(fd, edev->firmware, st.st_size) != st.st_size) {\n+\t\tDP_NOTICE(edev, false, \"Can't read firmware data\\n\");\n+\t\tclose(fd);\n+\t\treturn -1;\n+\t}\n+\n+\tedev->fw_len = st.st_size;\n+\tif (edev->fw_len < 104) {\n+\t\tDP_NOTICE(edev, false, \"Invalid fw size: %\" PRIu64\"\\n\",\n+\t\t\t  edev->fw_len);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int qed_slowpath_start(struct ecore_dev *edev,\n+\t\t\t      struct qed_slowpath_params *params)\n+{\n+\tbool allow_npar_tx_switching;\n+\tconst uint8_t *data = NULL;\n+\tstruct ecore_hwfn *hwfn;\n+\tstruct ecore_mcp_drv_version drv_version;\n+\tstruct qede_dev *qdev = (struct qede_dev *)edev;\n+\tint rc;\n+#ifdef QED_ENC_SUPPORTED\n+\tstruct ecore_tunn_start_params tunn_info;\n+#endif\n+\n+#ifdef CONFIG_QED_BINARY_FW\n+\trc = qed_load_firmware_data(edev);\n+\tif (rc) {\n+\t\tDP_NOTICE(edev, true,\n+\t\t\t  \"Failed to find fw file %s\\n\",\n+\t\t\t  QEDE_FW_FILE_NAME);\n+\t\tgoto err;\n+\t}\n+#endif\n+\n+\trc = qed_nic_setup(edev);\n+\tif (rc)\n+\t\tgoto err;\n+\n+\t/* set int_coalescing_mode */\n+\tedev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;\n+\n+\t/* Should go with CONFIG_QED_BINARY_FW */\n+\t/* Allocate stream for unzipping */\n+\trc = qed_alloc_stream_mem(edev);\n+\tif (rc) {\n+\t\tDP_NOTICE(edev, true,\n+\t\t\"Failed to allocate stream memory\\n\");\n+\t\tgoto err2;\n+\t}\n+\n+\t/* Start the slowpath */\n+#ifdef CONFIG_QED_BINARY_FW\n+\tdata = edev->firmware;\n+#endif\n+\tallow_npar_tx_switching = npar_tx_switching ? true : false;\n+\n+#ifdef QED_ENC_SUPPORTED\n+\tmemset(&tunn_info, 0, sizeof(tunn_info));\n+\ttunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |\n+\t    1 << QED_MODE_L2GRE_TUNN |\n+\t    1 << QED_MODE_IPGRE_TUNN |\n+\t    1 << QED_MODE_L2GENEVE_TUNN | 1 << QED_MODE_IPGENEVE_TUNN;\n+\ttunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;\n+\ttunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;\n+\ttunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;\n+\trc = ecore_hw_init(edev, &tunn_info, true, ECORE_INT_MODE_MSIX,\n+\t\t\t   allow_npar_tx_switching, data);\n+#else\n+\trc = ecore_hw_init(edev, NULL, true, ECORE_INT_MODE_MSIX,\n+\t\t\t   allow_npar_tx_switching, data);\n+#endif\n+\tif (rc) {\n+\t\tDP_ERR(edev, \"ecore_hw_init failed\\n\");\n+\t\tgoto err2;\n+\t}\n+\n+\tDP_INFO(edev, \"HW inited and function started\\n\");\n+\n+\thwfn = ECORE_LEADING_HWFN(edev);\n+\tdrv_version.version = (params->drv_major << 24) |\n+\t\t    (params->drv_minor << 16) |\n+\t\t    (params->drv_rev << 8) | (params->drv_eng);\n+\t/* TBD: strlcpy() */\n+\tstrncpy((char *)drv_version.name, (const char *)params->name,\n+\t\t\tMCP_DRV_VER_STR_SIZE - 4);\n+\trc = ecore_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,\n+\t\t\t\t\t\t&drv_version);\n+\tif (rc) {\n+\t\tDP_NOTICE(edev, true,\n+\t\t\t  \"Failed sending drv version command\\n\");\n+\t\treturn rc;\n+\t}\n+\n+\treturn 0;\n+\n+\tecore_hw_stop(edev);\n+err2:\n+\tecore_resc_free(edev);\n+err:\n+#ifdef CONFIG_QED_BINARY_FW\n+\tif (edev->firmware)\n+\t\trte_free(edev->firmware);\n+\tedev->firmware = NULL;\n+#endif\n+\treturn rc;\n+}\n+\n+static int\n+qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info)\n+{\n+\tstruct ecore_ptt *ptt = NULL;\n+\n+\tmemset(dev_info, 0, sizeof(struct qed_dev_info));\n+\tdev_info->num_hwfns = edev->num_hwfns;\n+\tdev_info->is_mf_default = IS_MF_DEFAULT(&edev->hwfns[0]);\n+\trte_memcpy(&dev_info->hw_mac, &edev->hwfns[0].hw_info.hw_mac_addr,\n+\t       ETHER_ADDR_LEN);\n+\n+\tdev_info->fw_major = FW_MAJOR_VERSION;\n+\tdev_info->fw_minor = FW_MINOR_VERSION;\n+\tdev_info->fw_rev = FW_REVISION_VERSION;\n+\tdev_info->fw_eng = FW_ENGINEERING_VERSION;\n+\tdev_info->mf_mode = edev->mf_mode;\n+\tdev_info->tx_switching = tx_switching ? true : false;\n+\n+\tptt = ecore_ptt_acquire(ECORE_LEADING_HWFN(edev));\n+\tif (ptt) {\n+\t\tecore_mcp_get_mfw_ver(edev, ptt,\n+\t\t\t\t\t      &dev_info->mfw_rev, NULL);\n+\n+\t\tecore_mcp_get_flash_size(ECORE_LEADING_HWFN(edev), ptt,\n+\t\t\t\t\t\t &dev_info->flash_size);\n+\n+\t\t/* Workaround to allow PHY-read commands for\n+\t\t * B0 bringup.\n+\t\t */\n+\t\tif (ECORE_IS_BB_B0(edev))\n+\t\t\tdev_info->flash_size = 0xffffffff;\n+\n+\t\tecore_ptt_release(ECORE_LEADING_HWFN(edev), ptt);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int\n+qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)\n+{\n+\tstruct qede_dev *qdev = (struct qede_dev *)edev;\n+\tint i;\n+\n+\tmemset(info, 0, sizeof(*info));\n+\n+\tinfo->num_tc = 1 /* @@@TBD aelior MULTI_COS */;\n+\n+\tinfo->num_queues = 0;\n+\tfor_each_hwfn(edev, i)\n+\t\t    info->num_queues +=\n+\t\t    FEAT_NUM(&edev->hwfns[i], ECORE_PF_L2_QUE);\n+\n+\tinfo->num_vlan_filters = RESC_NUM(&edev->hwfns[0], ECORE_VLAN);\n+\n+\trte_memcpy(&info->port_mac, &edev->hwfns[0].hw_info.hw_mac_addr,\n+\t\t\t   ETHER_ADDR_LEN);\n+\n+\tqed_fill_dev_info(edev, &info->common);\n+\n+\treturn 0;\n+}\n+\n+static void\n+qed_set_id(struct ecore_dev *edev, char name[NAME_SIZE],\n+\t   const char ver_str[VER_SIZE])\n+{\n+\tint i;\n+\n+\trte_memcpy(edev->name, name, NAME_SIZE);\n+\tfor_each_hwfn(edev, i) {\n+\t\tsnprintf(edev->hwfns[i].name, NAME_SIZE, \"%s-%d\", name, i);\n+\t}\n+\trte_memcpy(edev->ver_str, ver_str, VER_SIZE);\n+\tedev->drv_type = DRV_ID_DRV_TYPE_LINUX;\n+}\n+\n+static uint32_t\n+qed_sb_init(struct ecore_dev *edev, struct ecore_sb_info *sb_info,\n+\t    void *sb_virt_addr, dma_addr_t sb_phy_addr,\n+\t    uint16_t sb_id, enum qed_sb_type type)\n+{\n+\tstruct ecore_hwfn *p_hwfn;\n+\tint hwfn_index;\n+\tuint16_t rel_sb_id;\n+\tuint8_t n_hwfns;\n+\tuint32_t rc;\n+\n+\t/* RoCE uses single engine and CMT uses two engines. When using both\n+\t * we force only a single engine. Storage uses only engine 0 too.\n+\t */\n+\tif (type == QED_SB_TYPE_L2_QUEUE)\n+\t\tn_hwfns = edev->num_hwfns;\n+\telse\n+\t\tn_hwfns = 1;\n+\n+\thwfn_index = sb_id % n_hwfns;\n+\tp_hwfn = &edev->hwfns[hwfn_index];\n+\trel_sb_id = sb_id / n_hwfns;\n+\n+\tDP_INFO(edev, \"hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\\n\",\n+\t\thwfn_index, rel_sb_id, sb_id);\n+\n+\trc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,\n+\t\t\t       sb_virt_addr, sb_phy_addr, rel_sb_id);\n+\n+\treturn rc;\n+}\n+\n+static void qed_fill_link(struct ecore_hwfn *hwfn,\n+\t\t\t  struct qed_link_output *if_link)\n+{\n+\tstruct ecore_mcp_link_params params;\n+\tstruct ecore_mcp_link_state link;\n+\tstruct ecore_mcp_link_capabilities link_caps;\n+\tuint32_t media_type;\n+\tuint8_t change = 0;\n+\n+\tmemset(if_link, 0, sizeof(*if_link));\n+\n+\t/* Prepare source inputs */\n+\trte_memcpy(&params, ecore_mcp_get_link_params(hwfn),\n+\t\t       sizeof(params));\n+\trte_memcpy(&link, ecore_mcp_get_link_state(hwfn), sizeof(link));\n+\trte_memcpy(&link_caps, ecore_mcp_get_link_capabilities(hwfn),\n+\t\t       sizeof(link_caps));\n+\n+\t/* Set the link parameters to pass to protocol driver */\n+\tif (link.link_up)\n+\t\tif_link->link_up = true;\n+\n+\tif (link.link_up)\n+\t\tif_link->speed = link.speed;\n+\n+\tif_link->duplex = QEDE_DUPLEX_FULL;\n+\n+\tif (params.speed.autoneg)\n+\t\tif_link->supported_caps |= QEDE_SUPPORTED_AUTONEG;\n+\n+\tif (params.pause.autoneg || params.pause.forced_rx ||\n+\t    params.pause.forced_tx)\n+\t\tif_link->supported_caps |= QEDE_SUPPORTED_PAUSE;\n+\n+\tif (params.pause.autoneg)\n+\t\tif_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;\n+\n+\tif (params.pause.forced_rx)\n+\t\tif_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;\n+\n+\tif (params.pause.forced_tx)\n+\t\tif_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;\n+}\n+\n+static void\n+qed_get_current_link(struct ecore_dev *edev, struct qed_link_output *if_link)\n+{\n+\tqed_fill_link(&edev->hwfns[0], if_link);\n+\n+#ifdef CONFIG_QED_SRIOV\n+\tfor_each_hwfn(cdev, i)\n+\t\tqed_inform_vf_link_state(&cdev->hwfns[i]);\n+#endif\n+}\n+\n+static int qed_set_link(struct ecore_dev *edev, struct qed_link_params *params)\n+{\n+\tstruct ecore_hwfn *hwfn;\n+\tstruct ecore_ptt *ptt;\n+\tstruct ecore_mcp_link_params *link_params;\n+\tint rc;\n+\n+\t/* The link should be set only once per PF */\n+\thwfn = &edev->hwfns[0];\n+\n+\tptt = ecore_ptt_acquire(hwfn);\n+\tif (!ptt)\n+\t\treturn -EBUSY;\n+\n+\tlink_params = ecore_mcp_get_link_params(hwfn);\n+\tif (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)\n+\t\tlink_params->speed.autoneg = params->autoneg;\n+\n+\tif (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {\n+\t\tif (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)\n+\t\t\tlink_params->pause.autoneg = true;\n+\t\telse\n+\t\t\tlink_params->pause.autoneg = false;\n+\t\tif (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)\n+\t\t\tlink_params->pause.forced_rx = true;\n+\t\telse\n+\t\t\tlink_params->pause.forced_rx = false;\n+\t\tif (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)\n+\t\t\tlink_params->pause.forced_tx = true;\n+\t\telse\n+\t\t\tlink_params->pause.forced_tx = false;\n+\t}\n+\n+\trc = ecore_mcp_set_link(hwfn, ptt, params->link_up);\n+\n+\tecore_ptt_release(hwfn, ptt);\n+\n+\treturn rc;\n+}\n+\n+static int qed_drain(struct ecore_dev *edev)\n+{\n+\tstruct ecore_hwfn *hwfn;\n+\tstruct ecore_ptt *ptt;\n+\tint i, rc;\n+\n+\tfor_each_hwfn(edev, i) {\n+\t\thwfn = &edev->hwfns[i];\n+\t\tptt = ecore_ptt_acquire(hwfn);\n+\t\tif (!ptt) {\n+\t\t\tDP_NOTICE(hwfn, true, \"Failed to drain NIG; No PTT\\n\");\n+\t\t\treturn -EBUSY;\n+\t\t}\n+\t\trc = ecore_mcp_drain(hwfn, ptt);\n+\t\tif (rc)\n+\t\t\treturn rc;\n+\t\tecore_ptt_release(hwfn, ptt);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int qed_nic_stop(struct ecore_dev *edev)\n+{\n+\tint i, rc;\n+\n+\trc = ecore_hw_stop(edev);\n+\tfor (i = 0; i < edev->num_hwfns; i++) {\n+\t\tstruct ecore_hwfn *p_hwfn = &edev->hwfns[i];\n+\n+\t\tif (p_hwfn->b_sp_dpc_enabled)\n+\t\t\tp_hwfn->b_sp_dpc_enabled = false;\n+\t}\n+\treturn rc;\n+}\n+\n+static int qed_nic_reset(struct ecore_dev *edev)\n+{\n+\tint rc;\n+\n+\trc = ecore_hw_reset(edev);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tecore_resc_free(edev);\n+\n+\treturn 0;\n+}\n+\n+static int qed_slowpath_stop(struct ecore_dev *edev)\n+{\n+#ifdef CONFIG_QED_SRIOV\n+\tint i;\n+#endif\n+\n+\tif (!edev)\n+\t\treturn -ENODEV;\n+\n+\tqed_free_stream_mem(edev);\n+\n+\tqed_nic_stop(edev);\n+\n+\tqed_nic_reset(edev);\n+\n+\treturn 0;\n+}\n+\n+static void qed_remove(struct ecore_dev *edev)\n+{\n+\tif (!edev)\n+\t\treturn;\n+\n+\tecore_hw_remove(edev);\n+}\n+\n+const struct qed_common_ops qed_common_ops_pass = {\n+\tINIT_STRUCT_FIELD(probe, &qed_probe),\n+\tINIT_STRUCT_FIELD(update_pf_params, &qed_update_pf_params),\n+\tINIT_STRUCT_FIELD(slowpath_start, &qed_slowpath_start),\n+\tINIT_STRUCT_FIELD(set_id, &qed_set_id),\n+\tINIT_STRUCT_FIELD(chain_alloc, &ecore_chain_alloc),\n+\tINIT_STRUCT_FIELD(chain_free, &ecore_chain_free),\n+\tINIT_STRUCT_FIELD(sb_init, &qed_sb_init),\n+\tINIT_STRUCT_FIELD(get_link, &qed_get_current_link),\n+\tINIT_STRUCT_FIELD(set_link, &qed_set_link),\n+\tINIT_STRUCT_FIELD(drain, &qed_drain),\n+\tINIT_STRUCT_FIELD(slowpath_stop, &qed_slowpath_stop),\n+\tINIT_STRUCT_FIELD(remove, &qed_remove),\n+};\ndiff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c\nnew file mode 100644\nindex 0000000..d0450f7\n--- /dev/null\n+++ b/drivers/net/qede/qede_rxtx.c\n@@ -0,0 +1,1172 @@\n+/*\n+ * Copyright (c) 2016 QLogic Corporation.\n+ * All rights reserved.\n+ * www.qlogic.com\n+ *\n+ * See LICENSE.qede_pmd for copyright and licensing details.\n+ */\n+\n+#include \"qede_rxtx.h\"\n+\n+static bool gro_disable = 1;\t/* mod_param */\n+\n+static inline struct\n+rte_mbuf *qede_rxmbuf_alloc(struct rte_mempool *mp)\n+{\n+\tstruct rte_mbuf *m;\n+\n+\tm = __rte_mbuf_raw_alloc(mp);\n+\t__rte_mbuf_sanity_check(m, 0);\n+\n+\treturn m;\n+}\n+\n+static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)\n+{\n+\tstruct rte_mbuf *new_mb = NULL;\n+\tstruct eth_rx_bd *rx_bd;\n+\tdma_addr_t mapping;\n+\tuint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);\n+\n+\tnew_mb = qede_rxmbuf_alloc(rxq->mb_pool);\n+\tif (unlikely(!new_mb)) {\n+\t\tPMD_RX_LOG(ERR, rxq,\n+\t\t\t   \"Failed to allocate rx buffer \"\n+\t\t\t   \"sw_rx_prod %u sw_rx_cons %u mp entries %u free %u\",\n+\t\t\t   idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),\n+\t\t\t   rte_mempool_count(rxq->mb_pool),\n+\t\t\t   rte_mempool_free_count(rxq->mb_pool));\n+\t\treturn -ENOMEM;\n+\t}\n+\trxq->sw_rx_ring[idx].mbuf = new_mb;\n+\trxq->sw_rx_ring[idx].page_offset = 0;\n+\tmapping = new_mb->buf_physaddr;\n+\t/* Advance PROD and get BD pointer */\n+\trx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);\n+\trx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));\n+\trx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));\n+\trxq->sw_rx_prod++;\n+\treturn 0;\n+}\n+\n+static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)\n+{\n+\tuint16_t i;\n+\n+\tif (rxq->sw_rx_ring != NULL) {\n+\t\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\t\tif (rxq->sw_rx_ring[i].mbuf != NULL) {\n+\t\t\t\trte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);\n+\t\t\t\trxq->sw_rx_ring[i].mbuf = NULL;\n+\t\t\t}\n+\t\t}\n+\t}\n+}\n+\n+void qede_rx_queue_release(void *rx_queue)\n+{\n+\tstruct qede_rx_queue *rxq = rx_queue;\n+\n+\tif (rxq != NULL) {\n+\t\tqede_rx_queue_release_mbufs(rxq);\n+\t\trte_free(rxq->sw_rx_ring);\n+\t\trxq->sw_rx_ring = NULL;\n+\t\trte_free(rxq);\n+\t\trx_queue = NULL;\n+\t}\n+}\n+\n+static uint16_t qede_set_rx_buf_size(struct rte_mempool *mp, uint16_t len)\n+{\n+\tuint16_t data_size;\n+\tuint16_t buf_size;\n+\n+\tdata_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;\n+\tbuf_size = RTE_MAX(len, data_size);\n+\treturn buf_size + QEDE_ETH_OVERHEAD;\n+}\n+\n+int\n+qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t    uint16_t nb_desc, unsigned int socket_id,\n+\t\t    const struct rte_eth_rxconf *rx_conf,\n+\t\t    struct rte_mempool *mp)\n+{\n+\tstruct qede_dev *qdev = dev->data->dev_private;\n+\tstruct ecore_dev *edev = &qdev->edev;\n+\tstruct rte_eth_dev_data *eth_data = dev->data;\n+\tstruct qede_rx_queue *rxq;\n+\tuint16_t pkt_len = (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len;\n+\tsize_t size;\n+\tint rc;\n+\tint i;\n+\n+\tPMD_INIT_FUNC_TRACE(edev);\n+\n+\t/* Note: Ring size/align is controlled by struct rte_eth_desc_lim */\n+\tif (!rte_is_power_of_2(nb_desc)) {\n+\t\tDP_NOTICE(edev, false, \"Ring size %u is not power of 2\\n\",\n+\t\t\t  nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Free memory prior to re-allocation if needed... */\n+\tif (dev->data->rx_queues[queue_idx] != NULL) {\n+\t\tqede_rx_queue_release(dev->data->rx_queues[queue_idx]);\n+\t\tdev->data->rx_queues[queue_idx] = NULL;\n+\t}\n+\n+\t/* First allocate the rx queue data structure */\n+\trxq = rte_zmalloc_socket(\"qede_rx_queue\", sizeof(struct qede_rx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE, socket_id);\n+\n+\tif (!rxq) {\n+\t\tDP_NOTICE(edev, false,\n+\t\t\t  \"Unable to allocate memory for rxq on socket %u\",\n+\t\t\t  socket_id);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\trxq->qdev = qdev;\n+\trxq->mb_pool = mp;\n+\trxq->nb_rx_desc = nb_desc;\n+\trxq->queue_id = queue_idx;\n+\trxq->port_id = dev->data->port_id;\n+\n+\trxq->rx_buf_size = qede_set_rx_buf_size(mp, pkt_len);\n+\tif (pkt_len > ETHER_MAX_LEN) {\n+\t\tdev->data->dev_conf.rxmode.jumbo_frame = 1;\n+\t\tDP_NOTICE(edev, false, \"jumbo frame enabled\\n\");\n+\t} else {\n+\t\tdev->data->dev_conf.rxmode.jumbo_frame = 0;\n+\t}\n+\n+\tqdev->mtu = rxq->rx_buf_size;\n+\tDP_INFO(edev, \"rx_buf_size=%u\\n\", qdev->mtu);\n+\n+\t/* Allocate the parallel driver ring for Rx buffers */\n+\tsize = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;\n+\trxq->sw_rx_ring = rte_zmalloc_socket(\"sw_rx_ring\", size,\n+\t\t\t\t\t     RTE_CACHE_LINE_SIZE, socket_id);\n+\tif (!rxq->sw_rx_ring) {\n+\t\tDP_NOTICE(edev, false,\n+\t\t\t  \"Unable to alloc memory for sw_rx_ring on socket %u\\n\",\n+\t\t\t  socket_id);\n+\t\trte_free(rxq);\n+\t\trxq = NULL;\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Allocate FW Rx ring  */\n+\trc = qdev->ops->common->chain_alloc(edev,\n+\t\t\t\t\t    ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,\n+\t\t\t\t\t    ECORE_CHAIN_MODE_NEXT_PTR,\n+\t\t\t\t\t    ECORE_CHAIN_CNT_TYPE_U16,\n+\t\t\t\t\t    rxq->nb_rx_desc,\n+\t\t\t\t\t    sizeof(struct eth_rx_bd),\n+\t\t\t\t\t    &rxq->rx_bd_ring);\n+\n+\tif (rc != ECORE_SUCCESS) {\n+\t\tDP_NOTICE(edev, false,\n+\t\t\t  \"Unable to alloc memory for rxbd ring on socket %u\\n\",\n+\t\t\t  socket_id);\n+\t\trte_free(rxq->sw_rx_ring);\n+\t\trxq->sw_rx_ring = NULL;\n+\t\trte_free(rxq);\n+\t\trxq = NULL;\n+\t}\n+\n+\t/* Allocate FW completion ring */\n+\trc = qdev->ops->common->chain_alloc(edev,\n+\t\t\t\t\t    ECORE_CHAIN_USE_TO_CONSUME,\n+\t\t\t\t\t    ECORE_CHAIN_MODE_PBL,\n+\t\t\t\t\t    ECORE_CHAIN_CNT_TYPE_U16,\n+\t\t\t\t\t    rxq->nb_rx_desc,\n+\t\t\t\t\t    sizeof(union eth_rx_cqe),\n+\t\t\t\t\t    &rxq->rx_comp_ring);\n+\n+\tif (rc != ECORE_SUCCESS) {\n+\t\tDP_NOTICE(edev, false,\n+\t\t\t  \"Unable to alloc memory for cqe ring on socket %u\\n\",\n+\t\t\t  socket_id);\n+\t\t/* TBD: Freeing RX BD ring */\n+\t\trte_free(rxq->sw_rx_ring);\n+\t\trxq->sw_rx_ring = NULL;\n+\t\trte_free(rxq);\n+\t}\n+\n+\t/* Allocate buffers for the Rx ring */\n+\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\trc = qede_alloc_rx_buffer(rxq);\n+\t\tif (rc) {\n+\t\t\tDP_NOTICE(edev, false,\n+\t\t\t\t  \"RX buffer allocation failed at idx=%d\\n\", i);\n+\t\t\tgoto err4;\n+\t\t}\n+\t}\n+\n+\tdev->data->rx_queues[queue_idx] = rxq;\n+\tif (!qdev->rx_queues)\n+\t\tqdev->rx_queues = (struct qede_rx_queue **)dev->data->rx_queues;\n+\n+\tDP_NOTICE(edev, false, \"rxq %d num_desc %u rx_buf_size=%u socket %u\\n\",\n+\t\t  queue_idx, nb_desc, qdev->mtu, socket_id);\n+\n+\treturn 0;\n+err4:\n+\tqede_rx_queue_release(rxq);\n+\treturn -ENOMEM;\n+}\n+\n+static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)\n+{\n+\tunsigned i;\n+\n+\tPMD_TX_LOG(DEBUG, txq, \"releasing %u mbufs\\n\", txq->nb_tx_desc);\n+\n+\tif (txq->sw_tx_ring != NULL) {\n+\t\tfor (i = 0; i < txq->nb_tx_desc; i++) {\n+\t\t\tif (txq->sw_tx_ring[i].mbuf != NULL) {\n+\t\t\t\trte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);\n+\t\t\t\ttxq->sw_tx_ring[i].mbuf = NULL;\n+\t\t\t}\n+\t\t}\n+\t}\n+}\n+\n+void qede_tx_queue_release(void *tx_queue)\n+{\n+\tstruct qede_tx_queue *txq = tx_queue;\n+\n+\tif (txq != NULL) {\n+\t\tqede_tx_queue_release_mbufs(txq);\n+\t\tif (txq->sw_tx_ring) {\n+\t\t\trte_free(txq->sw_tx_ring);\n+\t\t\ttxq->sw_tx_ring = NULL;\n+\t\t}\n+\t\trte_free(txq);\n+\t}\n+\ttx_queue = NULL;\n+}\n+\n+int\n+qede_tx_queue_setup(struct rte_eth_dev *dev,\n+\t\t    uint16_t queue_idx,\n+\t\t    uint16_t nb_desc,\n+\t\t    unsigned int socket_id,\n+\t\t    const struct rte_eth_txconf *tx_conf)\n+{\n+\tstruct qede_dev *qdev = dev->data->dev_private;\n+\tstruct ecore_dev *edev = &qdev->edev;\n+\tstruct qede_tx_queue *txq;\n+\tint rc;\n+\n+\tPMD_INIT_FUNC_TRACE(edev);\n+\n+\tif (!rte_is_power_of_2(nb_desc)) {\n+\t\tDP_NOTICE(edev, false, \"Ring size %u is not power of 2\\n\",\n+\t\t\t  nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Free memory prior to re-allocation if needed... */\n+\tif (dev->data->tx_queues[queue_idx] != NULL) {\n+\t\tqede_tx_queue_release(dev->data->tx_queues[queue_idx]);\n+\t\tdev->data->tx_queues[queue_idx] = NULL;\n+\t}\n+\n+\ttxq = rte_zmalloc_socket(\"qede_tx_queue\", sizeof(struct qede_tx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE, socket_id);\n+\n+\tif (txq == NULL) {\n+\t\tDP_ERR(edev,\n+\t\t       \"Unable to allocate memory for txq on socket %u\",\n+\t\t       socket_id);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\ttxq->nb_tx_desc = nb_desc;\n+\ttxq->qdev = qdev;\n+\ttxq->port_id = dev->data->port_id;\n+\n+\trc = qdev->ops->common->chain_alloc(edev,\n+\t\t\t\t\t    ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,\n+\t\t\t\t\t    ECORE_CHAIN_MODE_PBL,\n+\t\t\t\t\t    ECORE_CHAIN_CNT_TYPE_U16,\n+\t\t\t\t\t    txq->nb_tx_desc,\n+\t\t\t\t\t    sizeof(union eth_tx_bd_types),\n+\t\t\t\t\t    &txq->tx_pbl);\n+\tif (rc != ECORE_SUCCESS) {\n+\t\tDP_ERR(edev,\n+\t\t       \"Unable to allocate memory for txbd ring on socket %u\",\n+\t\t       socket_id);\n+\t\tqede_tx_queue_release(txq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Allocate software ring */\n+\ttxq->sw_tx_ring = rte_zmalloc_socket(\"txq->sw_tx_ring\",\n+\t\t\t\t\t     (sizeof(struct qede_tx_entry) *\n+\t\t\t\t\t      txq->nb_tx_desc),\n+\t\t\t\t\t     RTE_CACHE_LINE_SIZE, socket_id);\n+\n+\tif (!txq->sw_tx_ring) {\n+\t\tDP_ERR(edev,\n+\t\t       \"Unable to allocate memory for txbd ring on socket %u\",\n+\t\t       socket_id);\n+\t\tqede_tx_queue_release(txq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\ttxq->queue_id = queue_idx;\n+\n+\ttxq->nb_tx_avail = txq->nb_tx_desc;\n+\n+\ttxq->tx_free_thresh =\n+\t    tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :\n+\t    (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);\n+\n+\tdev->data->tx_queues[queue_idx] = txq;\n+\tif (!qdev->tx_queues)\n+\t\tqdev->tx_queues = (struct qede_tx_queue **)dev->data->tx_queues;\n+\n+\ttxq->txq_counter = 0;\n+\n+\tDP_NOTICE(edev, false,\n+\t\t  \"txq %u num_desc %u tx_free_thresh %u socket %u\\n\",\n+\t\t  queue_idx, nb_desc, txq->tx_free_thresh, socket_id);\n+\n+\treturn 0;\n+}\n+\n+/* This function inits fp content and resets the SB, RXQ and TXQ arrays */\n+static void qede_init_fp(struct qede_dev *qdev)\n+{\n+\tstruct qede_fastpath *fp;\n+\tint rss_id, txq_index, tc;\n+\n+\tmemset((void *)qdev->fp_array, 0, (QEDE_RSS_CNT(qdev) *\n+\t\t\t\t\t   sizeof(*qdev->fp_array)));\n+\tmemset((void *)qdev->sb_array, 0, (QEDE_RSS_CNT(qdev) *\n+\t\t\t\t\t   sizeof(*qdev->sb_array)));\n+\tfor_each_rss(rss_id) {\n+\t\tfp = &qdev->fp_array[rss_id];\n+\n+\t\tfp->qdev = qdev;\n+\t\tfp->rss_id = rss_id;\n+\n+\t\t/* Point rxq to generic rte queues that was created\n+\t\t * as part of queue creation.\n+\t\t */\n+\t\tfp->rxq = qdev->rx_queues[rss_id];\n+\t\tfp->sb_info = &qdev->sb_array[rss_id];\n+\n+\t\tfor (tc = 0; tc < qdev->num_tc; tc++) {\n+\t\t\ttxq_index = tc * QEDE_RSS_CNT(qdev) + rss_id;\n+\t\t\tfp->txqs[tc] = qdev->tx_queues[txq_index];\n+\t\t\tfp->txqs[tc]->queue_id = txq_index;\n+\t\t\t/* Updating it to main structure */\n+\t\t\tsnprintf(fp->name, sizeof(fp->name), \"%s-fp-%d\",\n+\t\t\t\t \"qdev\", rss_id);\n+\t\t}\n+\t}\n+\n+\tqdev->gro_disable = gro_disable;\n+}\n+\n+void qede_free_fp_arrays(struct qede_dev *qdev)\n+{\n+\t/* It asseumes qede_free_mem_load() is called before */\n+\tif (qdev->fp_array != NULL) {\n+\t\trte_free(qdev->fp_array);\n+\t\tqdev->fp_array = NULL;\n+\t}\n+\n+\tif (qdev->sb_array != NULL) {\n+\t\trte_free(qdev->sb_array);\n+\t\tqdev->sb_array = NULL;\n+\t}\n+}\n+\n+int qede_alloc_fp_array(struct qede_dev *qdev)\n+{\n+\tstruct qede_fastpath *fp;\n+\tstruct ecore_dev *edev = &qdev->edev;\n+\tint i;\n+\n+\tqdev->fp_array = rte_calloc(\"fp\", QEDE_RSS_CNT(qdev),\n+\t\t\t\t    sizeof(*qdev->fp_array),\n+\t\t\t\t    RTE_CACHE_LINE_SIZE);\n+\n+\tif (!qdev->fp_array) {\n+\t\tDP_NOTICE(edev, true, \"fp array allocation failed\\n\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tqdev->sb_array = rte_calloc(\"sb\", QEDE_RSS_CNT(qdev),\n+\t\t\t\t    sizeof(*qdev->sb_array),\n+\t\t\t\t    RTE_CACHE_LINE_SIZE);\n+\n+\tif (!qdev->sb_array) {\n+\t\tDP_NOTICE(edev, true, \"sb array allocation failed\\n\");\n+\t\trte_free(qdev->fp_array);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/* This function allocates fast-path status block memory */\n+static int\n+qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,\n+\t\t  uint16_t sb_id)\n+{\n+\tstruct ecore_dev *edev = &qdev->edev;\n+\tstruct status_block *sb_virt;\n+\tdma_addr_t sb_phys;\n+\tint rc;\n+\n+\tsb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys, sizeof(*sb_virt));\n+\n+\tif (!sb_virt) {\n+\t\tDP_ERR(edev, \"Status block allocation failed\\n\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\trc = qdev->ops->common->sb_init(edev, sb_info,\n+\t\t\t\t\tsb_virt, sb_phys, sb_id,\n+\t\t\t\t\tQED_SB_TYPE_L2_QUEUE);\n+\tif (rc) {\n+\t\tDP_ERR(edev, \"Status block initialization failed\\n\");\n+\t\t/* TBD: No dma_free_coherent possible */\n+\t\treturn rc;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int qede_alloc_mem_fp(struct qede_dev *qdev, struct qede_fastpath *fp)\n+{\n+\treturn qede_alloc_mem_sb(qdev, fp->sb_info, fp->rss_id);\n+}\n+\n+static void qede_shrink_txq(struct qede_dev *qdev, uint16_t num_rss)\n+{\n+\t/* @@@TBD - this should also re-set the qed interrupts */\n+}\n+\n+/* This function allocates all qede memory at NIC load. */\n+static int qede_alloc_mem_load(struct qede_dev *qdev)\n+{\n+\tint rc = 0, rss_id;\n+\tstruct ecore_dev *edev = &qdev->edev;\n+\n+\tfor (rss_id = 0; rss_id < QEDE_RSS_CNT(qdev); rss_id++) {\n+\t\tstruct qede_fastpath *fp = &qdev->fp_array[rss_id];\n+\n+\t\trc = qede_alloc_mem_fp(qdev, fp);\n+\t\tif (rc)\n+\t\t\tbreak;\n+\t}\n+\n+\tif (rss_id != QEDE_RSS_CNT(qdev)) {\n+\t\t/* Failed allocating memory for all the queues */\n+\t\tif (!rss_id) {\n+\t\t\tDP_ERR(edev,\n+\t\t\t       \"Failed to alloc memory for leading queue\\n\");\n+\t\t\trc = -ENOMEM;\n+\t\t} else {\n+\t\t\tDP_NOTICE(edev, false,\n+\t\t\t\t  \"Failed to allocate memory for all of \"\n+\t\t\t\t  \"RSS queues\\n\"\n+\t\t\t\t  \"Desired: %d queues, allocated: %d queues\\n\",\n+\t\t\t\t  QEDE_RSS_CNT(qdev), rss_id);\n+\t\t\tqede_shrink_txq(qdev, rss_id);\n+\t\t}\n+\t\tqdev->num_rss = rss_id;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static inline void\n+qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)\n+{\n+\tuint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);\n+\tuint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);\n+\tstruct eth_rx_prod_data rx_prods = { 0 };\n+\n+\t/* Update producers */\n+\trx_prods.bd_prod = rte_cpu_to_le_16(bd_prod);\n+\trx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod);\n+\n+\t/* Make sure that the BD and SGE data is updated before updating the\n+\t * producers since FW might read the BD/SGE right after the producer\n+\t * is updated.\n+\t */\n+\trte_wmb();\n+\n+\tinternal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),\n+\t\t\t(uint32_t *)&rx_prods);\n+\n+\t/* mmiowb is needed to synchronize doorbell writes from more than one\n+\t * processor. It guarantees that the write arrives to the device before\n+\t * the napi lock is released and another qede_poll is called (possibly\n+\t * on another CPU). Without this barrier, the next doorbell can bypass\n+\t * this doorbell. This is applicable to IA64/Altix systems.\n+\t */\n+\trte_wmb();\n+\n+\tPMD_RX_LOG(DEBUG, rxq, \"bd_prod %u  cqe_prod %u\\n\", bd_prod, cqe_prod);\n+}\n+\n+static inline uint32_t\n+qede_rxfh_indir_default(uint32_t index, uint32_t n_rx_rings)\n+{\n+\treturn index % n_rx_rings;\n+}\n+\n+#ifdef ENC_SUPPORTED\n+static bool qede_tunn_exist(uint16_t flag)\n+{\n+\treturn !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<\n+\t\t    PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);\n+}\n+\n+static inline uint8_t qede_check_tunn_csum(uint16_t flag)\n+{\n+\tuint8_t tcsum = 0;\n+\tuint16_t csum_flag = 0;\n+\n+\tif ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<\n+\t     PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)\n+\t\tcsum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<\n+\t\t    PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;\n+\n+\tif ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<\n+\t     PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {\n+\t\tcsum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<\n+\t\t    PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;\n+\t\ttcsum = QEDE_TUNN_CSUM_UNNECESSARY;\n+\t}\n+\n+\tcsum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<\n+\t    PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |\n+\t    PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<\n+\t    PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;\n+\n+\tif (csum_flag & flag)\n+\t\treturn QEDE_CSUM_ERROR;\n+\n+\treturn QEDE_CSUM_UNNECESSARY | tcsum;\n+}\n+#else\n+static inline uint8_t qede_tunn_exist(uint16_t flag)\n+{\n+\treturn 0;\n+}\n+\n+static inline uint8_t qede_check_tunn_csum(uint16_t flag)\n+{\n+\treturn 0;\n+}\n+#endif\n+\n+static inline uint8_t qede_check_notunn_csum(uint16_t flag)\n+{\n+\tuint8_t csum = 0;\n+\tuint16_t csum_flag = 0;\n+\n+\tif ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<\n+\t     PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {\n+\t\tcsum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<\n+\t\t    PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;\n+\t\tcsum = QEDE_CSUM_UNNECESSARY;\n+\t}\n+\n+\tcsum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<\n+\t    PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;\n+\n+\tif (csum_flag & flag)\n+\t\treturn QEDE_CSUM_ERROR;\n+\n+\treturn csum;\n+}\n+\n+static inline uint8_t qede_check_csum(uint16_t flag)\n+{\n+\tif (likely(!qede_tunn_exist(flag)))\n+\t\treturn qede_check_notunn_csum(flag);\n+\telse\n+\t\treturn qede_check_tunn_csum(flag);\n+}\n+\n+static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)\n+{\n+\tecore_chain_consume(&rxq->rx_bd_ring);\n+\trxq->sw_rx_cons++;\n+}\n+\n+static inline void\n+qede_reuse_page(struct qede_dev *qdev,\n+\t\tstruct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)\n+{\n+\tstruct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);\n+\tuint16_t idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);\n+\tstruct qede_rx_entry *curr_prod;\n+\tdma_addr_t new_mapping;\n+\n+\tcurr_prod = &rxq->sw_rx_ring[idx];\n+\t*curr_prod = *curr_cons;\n+\n+\tnew_mapping = curr_prod->mbuf->buf_physaddr + curr_prod->page_offset;\n+\n+\trx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));\n+\trx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));\n+\n+\trxq->sw_rx_prod++;\n+}\n+\n+static inline void\n+qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,\n+\t\t\tstruct qede_dev *qdev, uint8_t count)\n+{\n+\tstruct qede_rx_entry *curr_cons;\n+\n+\tfor (; count > 0; count--) {\n+\t\tcurr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];\n+\t\tqede_reuse_page(qdev, rxq, curr_cons);\n+\t\tqede_rx_bd_ring_consume(rxq);\n+\t}\n+}\n+\n+static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)\n+{\n+\tuint32_t p_type;\n+\t/* TBD - L4 indications needed ? */\n+\tuint16_t protocol = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<\n+\t\t\t      PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) & flags);\n+\n+\t/* protocol = 3 means LLC/SNAP over Ethernet */\n+\tif (unlikely(protocol == 0 || protocol == 3))\n+\t\tp_type = RTE_PTYPE_UNKNOWN;\n+\telse if (protocol == 1)\n+\t\tp_type = RTE_PTYPE_L3_IPV4;\n+\telse if (protocol == 2)\n+\t\tp_type = RTE_PTYPE_L3_IPV6;\n+\n+\treturn RTE_PTYPE_L2_ETHER | p_type;\n+}\n+\n+uint16_t\n+qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n+{\n+\tstruct qede_rx_queue *rxq = p_rxq;\n+\tstruct qede_dev *qdev = rxq->qdev;\n+\tstruct ecore_dev *edev = &qdev->edev;\n+\tstruct qede_fastpath *fp = &qdev->fp_array[rxq->queue_id];\n+\tuint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;\n+\tuint16_t rx_pkt = 0;\n+\tunion eth_rx_cqe *cqe;\n+\tstruct eth_fast_path_rx_reg_cqe *fp_cqe;\n+\tregister struct rte_mbuf *rx_mb = NULL;\n+\tenum eth_rx_cqe_type cqe_type;\n+\tuint16_t len, pad;\n+\tuint16_t preload_idx;\n+\tuint8_t csum_flag;\n+\tuint16_t parse_flag;\n+\n+\thw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);\n+\tsw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);\n+\n+\trte_rmb();\n+\n+\tif (hw_comp_cons == sw_comp_cons)\n+\t\treturn 0;\n+\n+\twhile (sw_comp_cons != hw_comp_cons) {\n+\t\t/* Get the CQE from the completion ring */\n+\t\tcqe =\n+\t\t    (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);\n+\t\tcqe_type = cqe->fast_path_regular.type;\n+\n+\t\tif (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {\n+\t\t\tPMD_RX_LOG(DEBUG, rxq, \"Got a slowath CQE\\n\");\n+\n+\t\t\tqdev->ops->eth_cqe_completion(edev, fp->rss_id,\n+\t\t\t\t(struct eth_slow_path_rx_cqe *)cqe);\n+\t\t\tgoto next_cqe;\n+\t\t}\n+\n+\t\t/* Get the data from the SW ring */\n+\t\tsw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);\n+\t\trx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;\n+\t\tassert(rx_mb != NULL);\n+\n+\t\t/* non GRO */\n+\t\tfp_cqe = &cqe->fast_path_regular;\n+\n+\t\tlen = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);\n+\t\tpad = fp_cqe->placement_offset;\n+\t\tPMD_RX_LOG(DEBUG, rxq,\n+\t\t\t   \"CQE type = 0x%x, flags = 0x%x, vlan = 0x%x\"\n+\t\t\t   \" len = %u, parsing_flags = %d\\n\",\n+\t\t\t   cqe_type, fp_cqe->bitfields,\n+\t\t\t   rte_le_to_cpu_16(fp_cqe->vlan_tag),\n+\t\t\t   len, rte_le_to_cpu_16(fp_cqe->pars_flags.flags));\n+\n+\t\t/* If this is an error packet then drop it */\n+\t\tparse_flag =\n+\t\t    rte_le_to_cpu_16(cqe->fast_path_regular.pars_flags.flags);\n+\t\tcsum_flag = qede_check_csum(parse_flag);\n+\t\tif (unlikely(csum_flag == QEDE_CSUM_ERROR)) {\n+\t\t\tPMD_RX_LOG(ERR, rxq,\n+\t\t\t\t   \"CQE in CONS = %u has error, flags = 0x%x \"\n+\t\t\t\t   \"dropping incoming packet\\n\",\n+\t\t\t\t   sw_comp_cons, parse_flag);\n+\t\t\trxq->rx_hw_errors++;\n+\t\t\tqede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);\n+\t\t\tgoto next_cqe;\n+\t\t}\n+\n+\t\tif (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {\n+\t\t\tPMD_RX_LOG(ERR, rxq,\n+\t\t\t\t   \"New buffer allocation failed,\"\n+\t\t\t\t   \"dropping incoming packet\\n\");\n+\t\t\tqede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);\n+\t\t\trte_eth_devices[rxq->port_id].\n+\t\t\t    data->rx_mbuf_alloc_failed++;\n+\t\t\trxq->rx_alloc_errors++;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tqede_rx_bd_ring_consume(rxq);\n+\n+\t\t/* Prefetch next mbuf while processing current one. */\n+\t\tpreload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);\n+\t\trte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);\n+\n+\t\tif (fp_cqe->bd_num != 1)\n+\t\t\tPMD_RX_LOG(DEBUG, rxq,\n+\t\t\t\t   \"Jumbo-over-BD packet not supported\\n\");\n+\n+\t\trx_mb->buf_len = len + pad;\n+\t\trx_mb->data_off = pad;\n+\t\trx_mb->nb_segs = 1;\n+\t\trx_mb->data_len = len;\n+\t\trx_mb->pkt_len = len;\n+\t\trx_mb->port = rxq->port_id;\n+\t\trx_mb->packet_type = qede_rx_cqe_to_pkt_type(parse_flag);\n+\t\trte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));\n+\n+\t\tif (CQE_HAS_VLAN(parse_flag) ||\n+\t\t    CQE_HAS_OUTER_VLAN(parse_flag)) {\n+\t\t\trx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);\n+\t\t\trx_mb->ol_flags |= PKT_RX_VLAN_PKT;\n+\t\t}\n+\n+\t\trx_pkts[rx_pkt] = rx_mb;\n+\t\trx_pkt++;\n+next_cqe:\n+\t\tecore_chain_recycle_consumed(&rxq->rx_comp_ring);\n+\t\tsw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);\n+\t\tif (rx_pkt == nb_pkts) {\n+\t\t\tPMD_RX_LOG(DEBUG, rxq,\n+\t\t\t\t   \"Budget reached nb_pkts=%u received=%u\\n\",\n+\t\t\t\t   rx_pkt, nb_pkts);\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\tqede_update_rx_prod(qdev, rxq);\n+\n+\tPMD_RX_LOG(DEBUG, rxq, \"rx_pkts=%u core=%d\\n\", rx_pkt, rte_lcore_id());\n+\n+\treturn rx_pkt;\n+}\n+\n+static inline int\n+qede_free_tx_pkt(struct ecore_dev *edev, struct qede_tx_queue *txq)\n+{\n+\tuint16_t idx = TX_CONS(txq);\n+\tstruct eth_tx_bd *tx_data_bd;\n+\tstruct rte_mbuf *mbuf = txq->sw_tx_ring[idx].mbuf;\n+\n+\tif (unlikely(!mbuf)) {\n+\t\tPMD_TX_LOG(ERR, txq,\n+\t\t\t   \"null mbuf nb_tx_desc %u nb_tx_avail %u \"\n+\t\t\t   \"sw_tx_cons %u sw_tx_prod %u\\n\",\n+\t\t\t   txq->nb_tx_desc, txq->nb_tx_avail, idx,\n+\t\t\t   TX_PROD(txq));\n+\t\treturn -1;\n+\t}\n+\n+\t/* Free now */\n+\trte_pktmbuf_free_seg(mbuf);\n+\ttxq->sw_tx_ring[idx].mbuf = NULL;\n+\tecore_chain_consume(&txq->tx_pbl);\n+\ttxq->nb_tx_avail++;\n+\n+\treturn 0;\n+}\n+\n+static inline uint16_t\n+qede_process_tx_compl(struct ecore_dev *edev, struct qede_tx_queue *txq)\n+{\n+\tuint16_t tx_compl = 0;\n+\tuint16_t hw_bd_cons;\n+\tint rc;\n+\n+\thw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);\n+\trte_compiler_barrier();\n+\n+\twhile (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl)) {\n+\t\trc = qede_free_tx_pkt(edev, txq);\n+\t\tif (rc) {\n+\t\t\tDP_NOTICE(edev, true,\n+\t\t\t\t  \"hw_bd_cons = %d, chain_cons=%d\\n\",\n+\t\t\t\t  hw_bd_cons,\n+\t\t\t\t  ecore_chain_get_cons_idx(&txq->tx_pbl));\n+\t\t\tbreak;\n+\t\t}\n+\t\ttxq->sw_tx_cons++;\t/* Making TXD available */\n+\t\ttx_compl++;\n+\t}\n+\n+\tPMD_TX_LOG(DEBUG, txq, \"Tx compl %u sw_tx_cons %u avail %u\\n\",\n+\t\t   tx_compl, txq->sw_tx_cons, txq->nb_tx_avail);\n+\treturn tx_compl;\n+}\n+\n+uint16_t\n+qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n+{\n+\tstruct qede_tx_queue *txq = p_txq;\n+\tstruct qede_dev *qdev = txq->qdev;\n+\tstruct ecore_dev *edev = &qdev->edev;\n+\tstruct qede_fastpath *fp = &qdev->fp_array[txq->queue_id];\n+\tstruct eth_tx_1st_bd *first_bd;\n+\tuint16_t nb_tx_pkts;\n+\tuint16_t nb_pkt_sent = 0;\n+\tuint16_t bd_prod;\n+\tuint16_t idx;\n+\tuint16_t tx_count;\n+\n+\tif (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {\n+\t\tPMD_TX_LOG(DEBUG, txq, \"send=%u avail=%u free_thresh=%u\\n\",\n+\t\t\t   nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);\n+\t\t(void)qede_process_tx_compl(edev, txq);\n+\t}\n+\n+\tnb_tx_pkts = RTE_MIN(nb_pkts, (txq->nb_tx_avail / MAX_NUM_TX_BDS));\n+\tif (unlikely(nb_tx_pkts == 0)) {\n+\t\tPMD_TX_LOG(DEBUG, txq, \"Out of BDs nb_pkts=%u avail=%u\\n\",\n+\t\t\t   nb_pkts, txq->nb_tx_avail);\n+\t\treturn 0;\n+\t}\n+\n+\ttx_count = nb_tx_pkts;\n+\twhile (nb_tx_pkts--) {\n+\t\t/* Fill the entry in the SW ring and the BDs in the FW ring */\n+\t\tidx = TX_PROD(txq);\n+\t\tstruct rte_mbuf *mbuf = *tx_pkts++;\n+\t\ttxq->sw_tx_ring[idx].mbuf = mbuf;\n+\t\tfirst_bd = (struct eth_tx_1st_bd *)\n+\t\t    ecore_chain_produce(&txq->tx_pbl);\n+\t\tfirst_bd->data.bd_flags.bitfields =\n+\t\t    1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;\n+\t\t/* Map mbug linear data for DMA and set in the first BD */\n+\t\tQEDE_BD_SET_ADDR_LEN(first_bd, RTE_MBUF_DATA_DMA_ADDR(mbuf),\n+\t\t\t\t     mbuf->data_len);\n+\n+\t\t/* Descriptor based VLAN insertion */\n+\t\tif (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {\n+\t\t\tfirst_bd->data.vlan = rte_cpu_to_le_16(mbuf->vlan_tci);\n+\t\t\tfirst_bd->data.bd_flags.bitfields |=\n+\t\t\t    1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;\n+\t\t}\n+\n+\t\t/* Offload the IP checksum in the hardware */\n+\t\tif (mbuf->ol_flags & PKT_TX_IP_CKSUM) {\n+\t\t\tfirst_bd->data.bd_flags.bitfields |=\n+\t\t\t    1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;\n+\t\t}\n+\n+\t\t/* L4 checksum offload (tcp or udp) */\n+\t\tif (mbuf->ol_flags & (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {\n+\t\t\tfirst_bd->data.bd_flags.bitfields |=\n+\t\t\t    1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;\n+\t\t\t/* IPv6 + extn. -> later */\n+\t\t}\n+\t\tfirst_bd->data.nbds = MAX_NUM_TX_BDS;\n+\t\ttxq->sw_tx_prod++;\n+\t\trte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf);\n+\t\ttxq->nb_tx_avail--;\n+\t\tbd_prod =\n+\t\t    rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));\n+\t\tnb_pkt_sent++;\n+\t}\n+\n+\t/* Write value of prod idx into bd_prod */\n+\ttxq->tx_db.data.bd_prod = bd_prod;\n+\trte_wmb();\n+\trte_compiler_barrier();\n+\tDIRECT_REG_WR(edev, txq->doorbell_addr, txq->tx_db.raw);\n+\trte_wmb();\n+\n+\t/* Check again for Tx completions if enabled */\n+#ifdef RTE_LIBRTE_QEDE_TX_COMP_END\n+\t(void)qede_process_tx_compl(edev, txq);\n+#endif\n+\n+\tPMD_TX_LOG(DEBUG, txq, \"to_send=%u can_send=%u sent=%u core=%d\\n\",\n+\t\t   nb_pkts, tx_count, nb_pkt_sent, rte_lcore_id());\n+\n+\treturn nb_pkt_sent;\n+}\n+\n+int qede_dev_start(struct rte_eth_dev *eth_dev)\n+{\n+\tstruct qede_dev *qdev = eth_dev->data->dev_private;\n+\tstruct ecore_dev *edev = &qdev->edev;\n+\tstruct qed_link_output link_output;\n+\tint rc;\n+\n+\tDP_NOTICE(edev, false, \"port %u\\n\", eth_dev->data->port_id);\n+\n+\tPMD_INIT_FUNC_TRACE(edev);\n+\n+\tif (qdev->state == QEDE_START) {\n+\t\tDP_INFO(edev, \"device already started\\n\");\n+\t\treturn 0;\n+\t}\n+\n+\tif (qdev->state == QEDE_CLOSE) {\n+\t\trc = qede_alloc_fp_array(qdev);\n+\t\tqede_init_fp(qdev);\n+\t\trc = qede_alloc_mem_load(qdev);\n+\t\tDP_INFO(edev, \"Allocated %d RSS queues on %d TC/s\\n\",\n+\t\t\tQEDE_RSS_CNT(qdev), qdev->num_tc);\n+\t} else if (qdev->state == QEDE_STOP) {\n+\t\tDP_INFO(edev, \"restarting port %u\\n\", eth_dev->data->port_id);\n+\t} else {\n+\t\tDP_INFO(edev, \"unknown state port %u\\n\",\n+\t\t\teth_dev->data->port_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (rc) {\n+\t\tDP_ERR(edev, \"Failed to start queues\\n\");\n+\t\t/* TBD: free */\n+\t\treturn rc;\n+\t}\n+\n+\tDP_INFO(edev, \"Start VPORT, RXQ and TXQ succeeded\\n\");\n+\n+\tqede_dev_set_link_state(eth_dev, true);\n+\n+\t/* Query whether link is already-up */\n+\tmemset(&link_output, 0, sizeof(link_output));\n+\tqdev->ops->common->get_link(edev, &link_output);\n+\tDP_NOTICE(edev, false, \"link status: %s\\n\",\n+\t\t  link_output.link_up ? \"up\" : \"down\");\n+\n+\tqdev->state = QEDE_START;\n+\n+\tqede_config_rx_mode(eth_dev);\n+\n+\tDP_INFO(edev, \"dev_state is QEDE_START\\n\");\n+\n+\treturn 0;\n+}\n+\n+static int qede_drain_txq(struct qede_dev *qdev,\n+\t\t\t  struct qede_tx_queue *txq, bool allow_drain)\n+{\n+\tstruct ecore_dev *edev = &qdev->edev;\n+\tint rc, cnt = 1000;\n+\n+\twhile (txq->sw_tx_cons != txq->sw_tx_prod) {\n+\t\tqede_process_tx_compl(edev, txq);\n+\t\tif (!cnt) {\n+\t\t\tif (allow_drain) {\n+\t\t\t\tDP_NOTICE(edev, true,\n+\t\t\t\t\t  \"Tx queue[%u] is stuck,\"\n+\t\t\t\t\t  \"requesting MCP to drain\\n\",\n+\t\t\t\t\t  txq->queue_id);\n+\t\t\t\trc = qdev->ops->common->drain(edev);\n+\t\t\t\tif (rc)\n+\t\t\t\t\treturn rc;\n+\t\t\t\treturn qede_drain_txq(qdev, txq, false);\n+\t\t\t} else {\n+\t\t\t\tDP_NOTICE(edev, true,\n+\t\t\t\t\t  \"Timeout waiting for tx queue[%d]:\"\n+\t\t\t\t\t  \"PROD=%d, CONS=%d\\n\",\n+\t\t\t\t\t  txq->queue_id, txq->sw_tx_prod,\n+\t\t\t\t\t  txq->sw_tx_cons);\n+\t\t\t\treturn -ENODEV;\n+\t\t\t}\n+\t\t}\n+\t\tcnt--;\n+\t\tDELAY(1000);\n+\t\trte_compiler_barrier();\n+\t}\n+\n+\t/* FW finished processing, wait for HW to transmit all tx packets */\n+\tDELAY(2000);\n+\n+\treturn 0;\n+}\n+\n+static int qede_stop_queues(struct qede_dev *qdev)\n+{\n+\tstruct qed_update_vport_params vport_update_params;\n+\tstruct ecore_dev *edev = &qdev->edev;\n+\tint rc, tc, i;\n+\n+\t/* Disable the vport */\n+\tmemset(&vport_update_params, 0, sizeof(vport_update_params));\n+\tvport_update_params.vport_id = 0;\n+\tvport_update_params.update_vport_active_flg = 1;\n+\tvport_update_params.vport_active_flg = 0;\n+\tvport_update_params.update_rss_flg = 0;\n+\n+\tDP_INFO(edev, \"vport_update\\n\");\n+\n+\trc = qdev->ops->vport_update(edev, &vport_update_params);\n+\tif (rc) {\n+\t\tDP_ERR(edev, \"Failed to update vport\\n\");\n+\t\treturn rc;\n+\t}\n+\n+\tDP_INFO(edev, \"Flushing tx queues\\n\");\n+\n+\t/* Flush Tx queues. If needed, request drain from MCP */\n+\tfor_each_rss(i) {\n+\t\tstruct qede_fastpath *fp = &qdev->fp_array[i];\n+\t\tfor (tc = 0; tc < qdev->num_tc; tc++) {\n+\t\t\tstruct qede_tx_queue *txq = fp->txqs[tc];\n+\t\t\trc = qede_drain_txq(qdev, txq, true);\n+\t\t\tif (rc)\n+\t\t\t\treturn rc;\n+\t\t}\n+\t}\n+\n+\t/* Stop all Queues in reverse order */\n+\tfor (i = QEDE_RSS_CNT(qdev) - 1; i >= 0; i--) {\n+\t\tstruct qed_stop_rxq_params rx_params;\n+\n+\t\t/* Stop the Tx Queue(s) */\n+\t\tfor (tc = 0; tc < qdev->num_tc; tc++) {\n+\t\t\tstruct qed_stop_txq_params tx_params;\n+\n+\t\t\ttx_params.rss_id = i;\n+\t\t\ttx_params.tx_queue_id = tc * QEDE_RSS_CNT(qdev) + i;\n+\n+\t\t\tDP_INFO(edev, \"Stopping tx queues\\n\");\n+\t\t\trc = qdev->ops->q_tx_stop(edev, &tx_params);\n+\t\t\tif (rc) {\n+\t\t\t\tDP_ERR(edev, \"Failed to stop TXQ #%d\\n\",\n+\t\t\t\t       tx_params.tx_queue_id);\n+\t\t\t\treturn rc;\n+\t\t\t}\n+\t\t}\n+\n+\t\t/* Stop the Rx Queue */\n+\t\tmemset(&rx_params, 0, sizeof(rx_params));\n+\t\trx_params.rss_id = i;\n+\t\trx_params.rx_queue_id = i;\n+\t\trx_params.eq_completion_only = 1;\n+\n+\t\tDP_INFO(edev, \"Stopping rx queues\\n\");\n+\n+\t\trc = qdev->ops->q_rx_stop(edev, &rx_params);\n+\t\tif (rc) {\n+\t\t\tDP_ERR(edev, \"Failed to stop RXQ #%d\\n\", i);\n+\t\t\treturn rc;\n+\t\t}\n+\t}\n+\n+\tDP_INFO(edev, \"Stopping vports\\n\");\n+\n+\t/* Stop the vport */\n+\trc = qdev->ops->vport_stop(edev, 0);\n+\tif (rc)\n+\t\tDP_ERR(edev, \"Failed to stop VPORT\\n\");\n+\n+\treturn rc;\n+}\n+\n+void qede_reset_fp_rings(struct qede_dev *qdev)\n+{\n+\tuint16_t rss_id;\n+\tuint8_t tc;\n+\n+\tfor_each_rss(rss_id) {\n+\t\tDP_INFO(&qdev->edev, \"reset fp chain for rss %u\\n\", rss_id);\n+\t\tstruct qede_fastpath *fp = &qdev->fp_array[rss_id];\n+\t\tecore_chain_reset(&fp->rxq->rx_bd_ring);\n+\t\tecore_chain_reset(&fp->rxq->rx_comp_ring);\n+\t\tfor (tc = 0; tc < qdev->num_tc; tc++) {\n+\t\t\tstruct qede_tx_queue *txq = fp->txqs[tc];\n+\t\t\tecore_chain_reset(&txq->tx_pbl);\n+\t\t}\n+\t}\n+}\n+\n+/* This function frees all memory of a single fp */\n+static void qede_free_mem_fp(struct qede_dev *qdev, struct qede_fastpath *fp)\n+{\n+\tuint8_t tc;\n+\n+\tqede_rx_queue_release(fp->rxq);\n+\tfor (tc = 0; tc < qdev->num_tc; tc++)\n+\t\tqede_tx_queue_release(fp->txqs[tc]);\n+}\n+\n+void qede_free_mem_load(struct qede_dev *qdev)\n+{\n+\tuint8_t rss_id;\n+\n+\tfor_each_rss(rss_id) {\n+\t\tstruct qede_fastpath *fp = &qdev->fp_array[rss_id];\n+\t\tqede_free_mem_fp(qdev, fp);\n+\t}\n+\t/* qdev->num_rss = 0; */\n+}\n+\n+/*\n+ * Stop an Ethernet device. The device can be restarted with a call to\n+ * rte_eth_dev_start().\n+ * Do not change link state and do not release sw structures.\n+ */\n+void qede_dev_stop(struct rte_eth_dev *eth_dev)\n+{\n+\tstruct qede_dev *qdev = eth_dev->data->dev_private;\n+\tstruct ecore_dev *edev = &qdev->edev;\n+\tint rc;\n+\n+\tDP_NOTICE(edev, false, \"port %u\\n\", eth_dev->data->port_id);\n+\n+\tPMD_INIT_FUNC_TRACE(edev);\n+\n+\tif (qdev->state != QEDE_START) {\n+\t\tDP_INFO(edev, \"device not yet started\\n\");\n+\t\treturn;\n+\t}\n+\n+\trc = qede_stop_queues(qdev);\n+\n+\tif (rc)\n+\t\tDP_ERR(edev, \"Didn't succeed to close queues\\n\");\n+\n+\tDP_INFO(edev, \"Stopped queues\\n\");\n+\n+\tqdev->ops->fastpath_stop(edev);\n+\n+\tqede_reset_fp_rings(qdev);\n+\n+\tqdev->state = QEDE_STOP;\n+\n+\tDP_INFO(edev, \"dev_state is QEDE_STOP\\n\");\n+}\ndiff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h\nnew file mode 100644\nindex 0000000..5e4e55b\n--- /dev/null\n+++ b/drivers/net/qede/qede_rxtx.h\n@@ -0,0 +1,187 @@\n+/*\n+ * Copyright (c) 2016 QLogic Corporation.\n+ * All rights reserved.\n+ * www.qlogic.com\n+ *\n+ * See LICENSE.qede_pmd for copyright and licensing details.\n+ */\n+\n+\n+#ifndef _QEDE_RXTX_H_\n+#define _QEDE_RXTX_H_\n+\n+#include \"qede_ethdev.h\"\n+\n+/* Ring Descriptors */\n+#define RX_RING_SIZE_POW        16\t/* 64K */\n+#define RX_RING_SIZE            (1ULL << RX_RING_SIZE_POW)\n+#define NUM_RX_BDS_MAX          (RX_RING_SIZE - 1)\n+#define NUM_RX_BDS_MIN          128\n+#define NUM_RX_BDS_DEF          NUM_RX_BDS_MAX\n+#define NUM_RX_BDS(q)           (q->nb_rx_desc - 1)\n+\n+#define TX_RING_SIZE_POW        16\t/* 64K */\n+#define TX_RING_SIZE            (1ULL << TX_RING_SIZE_POW)\n+#define NUM_TX_BDS_MAX          (TX_RING_SIZE - 1)\n+#define NUM_TX_BDS_MIN          128\n+#define NUM_TX_BDS_DEF          NUM_TX_BDS_MAX\n+#define NUM_TX_BDS(q)           (q->nb_tx_desc - 1)\n+\n+#define TX_CONS(txq)            (txq->sw_tx_cons & NUM_TX_BDS(txq))\n+#define TX_PROD(txq)            (txq->sw_tx_prod & NUM_TX_BDS(txq))\n+\n+/* Number of TX BDs per packet used currently */\n+#define MAX_NUM_TX_BDS\t\t\t1\n+\n+#define QEDE_DEFAULT_TX_FREE_THRESH\t32\n+\n+#define QEDE_CSUM_ERROR\t\t\t(1 << 0)\n+#define QEDE_CSUM_UNNECESSARY\t\t(1 << 1)\n+#define QEDE_TUNN_CSUM_UNNECESSARY\t(1 << 2)\n+\n+#define RTE_MBUF_DATA_DMA_ADDR(mb) \\\n+\t((uint64_t)((mb)->buf_physaddr + (mb)->data_off))\n+\n+#define QEDE_BD_SET_ADDR_LEN(bd, maddr, len) \\\n+\tdo { \\\n+\t\t(bd)->addr.hi = rte_cpu_to_le_32(U64_HI(maddr)); \\\n+\t\t(bd)->addr.lo = rte_cpu_to_le_32(U64_LO(maddr)); \\\n+\t\t(bd)->nbytes = rte_cpu_to_le_16(len); \\\n+\t} while (0)\n+\n+#define CQE_HAS_VLAN(flags) \\\n+\t((flags) & (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK \\\n+\t\t<< PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT))\n+\n+#define CQE_HAS_OUTER_VLAN(flags) \\\n+\t((flags) & (PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK \\\n+\t\t<< PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT))\n+\n+#define QEDE_IP_HEADER_ALIGNMENT_PADDING        2\n+\n+/* Max supported alignment is 256 (8 shift)\n+ * minimal alignment shift 6 is optimal for 57xxx HW performance\n+ */\n+#define QEDE_L1_CACHE_SHIFT\t6\n+#define QEDE_RX_ALIGN_SHIFT\t(RTE_MAX(6, RTE_MIN(8, QEDE_L1_CACHE_SHIFT)))\n+#define QEDE_FW_RX_ALIGN_END\t(1UL << QEDE_RX_ALIGN_SHIFT)\n+\n+/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */\n+#define QEDE_ETH_OVERHEAD       (ETHER_HDR_LEN + ETHER_CRC_LEN + \\\n+\t\t\t\t 8 + 8 + QEDE_IP_HEADER_ALIGNMENT_PADDING + \\\n+\t\t\t\t QEDE_FW_RX_ALIGN_END)\n+\n+/* TBD: Excluding IPV6 */\n+#define QEDE_RSS_OFFLOAD_ALL    (ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP | \\\n+\t\t\t\t ETH_RSS_NONFRAG_IPV4_UDP)\n+\n+#define QEDE_TXQ_FLAGS\t\t((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS)\n+\n+#define MAX_NUM_TC\t\t8\n+\n+#define for_each_rss(i) for (i = 0; i < qdev->num_rss; i++)\n+\n+/*\n+ * RX BD descriptor ring\n+ */\n+struct qede_rx_entry {\n+\tstruct rte_mbuf *mbuf;\n+\tuint32_t page_offset;\n+\t/* allows expansion .. */\n+};\n+\n+/*\n+ * Structure associated with each RX queue.\n+ */\n+struct qede_rx_queue {\n+\tstruct rte_mempool *mb_pool;\n+\tstruct ecore_chain rx_bd_ring;\n+\tstruct ecore_chain rx_comp_ring;\n+\tuint16_t *hw_cons_ptr;\n+\tvoid OSAL_IOMEM *hw_rxq_prod_addr;\n+\tstruct qede_rx_entry *sw_rx_ring;\n+\tuint16_t sw_rx_cons;\n+\tuint16_t sw_rx_prod;\n+\tuint16_t nb_rx_desc;\n+\tuint16_t queue_id;\n+\tuint16_t port_id;\n+\tuint16_t rx_buf_size;\n+\tuint64_t rx_hw_errors;\n+\tuint64_t rx_alloc_errors;\n+\tstruct qede_dev *qdev;\n+};\n+\n+/*\n+ * TX BD descriptor ring\n+ */\n+struct qede_tx_entry {\n+\tstruct rte_mbuf *mbuf;\n+\tuint8_t flags;\n+};\n+\n+union db_prod {\n+\tstruct eth_db_data data;\n+\tuint32_t raw;\n+};\n+\n+struct qede_tx_queue {\n+\tstruct ecore_chain tx_pbl;\n+\tstruct qede_tx_entry *sw_tx_ring;\n+\tuint16_t nb_tx_desc;\n+\tuint16_t nb_tx_avail;\n+\tuint16_t tx_free_thresh;\n+\tuint16_t queue_id;\n+\tuint16_t *hw_cons_ptr;\n+\tuint16_t sw_tx_cons;\n+\tuint16_t sw_tx_prod;\n+\tvoid OSAL_IOMEM *doorbell_addr;\n+\tvolatile union db_prod tx_db;\n+\tuint16_t port_id;\n+\tuint64_t txq_counter;\n+\tstruct qede_dev *qdev;\n+};\n+\n+struct qede_fastpath {\n+\tstruct qede_dev *qdev;\n+\tuint8_t rss_id;\n+\tstruct ecore_sb_info *sb_info;\n+\tstruct qede_rx_queue *rxq;\n+\tstruct qede_tx_queue *txqs[MAX_NUM_TC];\n+\tchar name[80];\n+};\n+\n+/*\n+ * RX/TX function prototypes\n+ */\n+int qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\tuint16_t nb_desc, unsigned int socket_id,\n+\t\t\tconst struct rte_eth_rxconf *rx_conf,\n+\t\t\tstruct rte_mempool *mp);\n+\n+int qede_tx_queue_setup(struct rte_eth_dev *dev,\n+\t\t\tuint16_t queue_idx,\n+\t\t\tuint16_t nb_desc,\n+\t\t\tunsigned int socket_id,\n+\t\t\tconst struct rte_eth_txconf *tx_conf);\n+\n+void qede_rx_queue_release(void *rx_queue);\n+\n+void qede_tx_queue_release(void *tx_queue);\n+\n+int qede_dev_start(struct rte_eth_dev *eth_dev);\n+\n+void qede_dev_stop(struct rte_eth_dev *eth_dev);\n+\n+void qede_reset_fp_rings(struct qede_dev *qdev);\n+\n+void qede_free_fp_arrays(struct qede_dev *qdev);\n+\n+void qede_free_mem_load(struct qede_dev *qdev);\n+\n+uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,\n+\t\t\tuint16_t nb_pkts);\n+\n+uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,\n+\t\t\tuint16_t nb_pkts);\n+\n+#endif /* _QEDE_RXTX_H_ */\ndiff --git a/drivers/net/qede/rte_pmd_qede_version.map b/drivers/net/qede/rte_pmd_qede_version.map\nnew file mode 100644\nindex 0000000..5151684\n--- /dev/null\n+++ b/drivers/net/qede/rte_pmd_qede_version.map\n@@ -0,0 +1,4 @@\n+DPDK_2.2 {\n+\n+\tlocal: *;\n+};\n",
    "prefixes": [
        "dpdk-dev",
        "v2",
        "05/10"
    ]
}