get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/11400/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 11400,
    "url": "http://patches.dpdk.org/api/patches/11400/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1457617548-26252-8-git-send-email-rasesh.mody@qlogic.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1457617548-26252-8-git-send-email-rasesh.mody@qlogic.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1457617548-26252-8-git-send-email-rasesh.mody@qlogic.com",
    "date": "2016-03-10T13:45:45",
    "name": "[dpdk-dev,v2,07/10] qede: Add SRIOV support",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "db143a15321257fddefbe03f44ef6d3e76ce2995",
    "submitter": {
        "id": 325,
        "url": "http://patches.dpdk.org/api/people/325/?format=api",
        "name": "Rasesh Mody",
        "email": "rasesh.mody@qlogic.com"
    },
    "delegate": {
        "id": 10,
        "url": "http://patches.dpdk.org/api/users/10/?format=api",
        "username": "bruce",
        "first_name": "Bruce",
        "last_name": "Richardson",
        "email": "bruce.richardson@intel.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1457617548-26252-8-git-send-email-rasesh.mody@qlogic.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/11400/comments/",
    "check": "pending",
    "checks": "http://patches.dpdk.org/api/patches/11400/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id A74D6378B;\n\tThu, 10 Mar 2016 14:46:42 +0100 (CET)",
            "from mx0a-0016ce01.pphosted.com (mx0a-0016ce01.pphosted.com\n\t[67.231.148.157]) by dpdk.org (Postfix) with ESMTP id 01630378B\n\tfor <dev@dpdk.org>; Thu, 10 Mar 2016 14:46:37 +0100 (CET)",
            "from pps.filterd (m0045602.ppops.net [127.0.0.1])\n\tby mx0a-0016ce01.pphosted.com (8.16.0.11/8.16.0.11) with SMTP id\n\tu2ADgpJs010949 for <dev@dpdk.org>; Thu, 10 Mar 2016 05:46:37 -0800",
            "from avcashub1.qlogic.com ([198.186.0.116])\n\tby mx0a-0016ce01.pphosted.com with ESMTP id 21fyg1s8kb-1\n\t(version=TLSv1 cipher=AES128-SHA bits=128 verify=NOT)\n\tfor <dev@dpdk.org>; Thu, 10 Mar 2016 05:46:36 -0800",
            "from avluser05.qlc.com (10.1.113.115) by qlc.com (10.1.4.191) with\n\tMicrosoft SMTP Server id 14.3.235.1;\n\tThu, 10 Mar 2016 05:46:35 -0800",
            "(from rmody@localhost)\tby avluser05.qlc.com (8.14.4/8.14.4/Submit)\n\tid u2ADkZLn026369;\tThu, 10 Mar 2016 05:46:35 -0800"
        ],
        "X-Authentication-Warning": "avluser05.qlc.com: rmody set sender to\n\trasesh.mody@qlogic.com using -f",
        "From": "Rasesh Mody <rasesh.mody@qlogic.com>",
        "To": "<dev@dpdk.org>",
        "Date": "Thu, 10 Mar 2016 05:45:45 -0800",
        "Message-ID": "<1457617548-26252-8-git-send-email-rasesh.mody@qlogic.com>",
        "X-Mailer": "git-send-email 1.7.10.3",
        "In-Reply-To": "<1457617548-26252-1-git-send-email-rasesh.mody@qlogic.com>",
        "References": "<1457617548-26252-1-git-send-email-rasesh.mody@qlogic.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "disclaimer": "bypass",
        "X-Proofpoint-Virus-Version": "vendor=nai engine=5800 definitions=8099\n\tsignatures=670697",
        "X-Proofpoint-Spam-Details": "rule=notspam policy=default score=0 suspectscore=4\n\tmalwarescore=0\n\tphishscore=0 bulkscore=0 spamscore=0 clxscore=1015 lowpriorityscore=0\n\tadultscore=0 classifier=spam adjust=0 reason=mlx scancount=1\n\tengine=8.0.1-1601100000 definitions=main-1603100226",
        "Cc": "sony.chacko@qlogic.com",
        "Subject": "[dpdk-dev] [PATCH v2 07/10] qede: Add SRIOV support",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Signed-off-by: Harish Patil <harish.patil@qlogic.com>\nSigned-off-by: Rasesh Mody <rasesh.mody@qlogic.com>\nSigned-off-by: Sony Chacko <sony.chacko@qlogic.com>\n---\n drivers/net/qede/Makefile              |    2 +\n drivers/net/qede/base/bcm_osal.c       |   57 +-\n drivers/net/qede/base/ecore.h          |    1 +\n drivers/net/qede/base/ecore_dev.c      |  116 +-\n drivers/net/qede/base/ecore_hw.c       |    9 +-\n drivers/net/qede/base/ecore_init_ops.c |    4 +\n drivers/net/qede/base/ecore_int.c      |   31 +-\n drivers/net/qede/base/ecore_iov_api.h  |  933 +++++++++\n drivers/net/qede/base/ecore_l2.c       |  233 ++-\n drivers/net/qede/base/ecore_l2.h       |   50 +\n drivers/net/qede/base/ecore_mcp.c      |   30 +\n drivers/net/qede/base/ecore_spq.c      |    8 +-\n drivers/net/qede/base/ecore_sriov.c    | 3422 ++++++++++++++++++++++++++++++++\n drivers/net/qede/base/ecore_sriov.h    |  390 ++++\n drivers/net/qede/base/ecore_vf.c       | 1322 ++++++++++++\n drivers/net/qede/base/ecore_vf.h       |  415 ++++\n drivers/net/qede/base/ecore_vf_api.h   |  186 ++\n drivers/net/qede/base/ecore_vfpf_if.h  |  590 ++++++\n drivers/net/qede/qede_ethdev.c         |   20 +-\n drivers/net/qede/qede_ethdev.h         |    4 +-\n drivers/net/qede/qede_main.c           |  151 +-\n 21 files changed, 7863 insertions(+), 111 deletions(-)\n create mode 100644 drivers/net/qede/base/ecore_iov_api.h\n create mode 100644 drivers/net/qede/base/ecore_sriov.c\n create mode 100644 drivers/net/qede/base/ecore_sriov.h\n create mode 100644 drivers/net/qede/base/ecore_vf.c\n create mode 100644 drivers/net/qede/base/ecore_vf.h\n create mode 100644 drivers/net/qede/base/ecore_vf_api.h\n create mode 100644 drivers/net/qede/base/ecore_vfpf_if.h",
    "diff": "diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile\nindex eb08635..8970921 100644\n--- a/drivers/net/qede/Makefile\n+++ b/drivers/net/qede/Makefile\n@@ -78,6 +78,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_init_ops.c\n SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_mcp.c\n SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_int.c\n SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/bcm_osal.c\n+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_sriov.c\n+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_vf.c\n SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_ethdev.c\n SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_eth_if.c\n SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_main.c\ndiff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c\nindex 00b27ba..e7720c0 100644\n--- a/drivers/net/qede/base/bcm_osal.c\n+++ b/drivers/net/qede/base/bcm_osal.c\n@@ -14,8 +14,9 @@\n #include \"bcm_osal.h\"\n #include \"ecore.h\"\n #include \"ecore_hw.h\"\n+#include \"ecore_iov_api.h\"\n \n-unsigned long log2_align(unsigned long n)\n+unsigned long qede_log2_align(unsigned long n)\n {\n \tunsigned long ret = n ? 1 : 0;\n \tunsigned long _n = n >> 1;\n@@ -31,7 +32,7 @@ unsigned long log2_align(unsigned long n)\n \treturn ret;\n }\n \n-u32 osal_log2(u32 val)\n+u32 qede_osal_log2(u32 val)\n {\n \tu32 log = 0;\n \n@@ -41,6 +42,54 @@ u32 osal_log2(u32 val)\n \treturn log;\n }\n \n+inline void qede_set_bit(u32 nr, unsigned long *addr)\n+{\n+\t__sync_fetch_and_or(addr, (1UL << nr));\n+}\n+\n+inline void qede_clr_bit(u32 nr, unsigned long *addr)\n+{\n+\t__sync_fetch_and_and(addr, ~(1UL << nr));\n+}\n+\n+inline bool qede_test_bit(u32 nr, unsigned long *addr)\n+{\n+\tbool res;\n+\n+\trte_mb();\n+\tres = ((*addr) & (1UL << nr)) != 0;\n+\trte_mb();\n+\treturn res;\n+}\n+\n+static inline u32 qede_ffz(unsigned long word)\n+{\n+\tunsigned long first_zero;\n+\n+\tfirst_zero = __builtin_ffsl(~word);\n+\treturn first_zero ? (first_zero - 1) : OSAL_BITS_PER_UL;\n+}\n+\n+inline u32 qede_find_first_zero_bit(unsigned long *addr, u32 limit)\n+{\n+\tu32 i;\n+\tu32 nwords = 0;\n+\tOSAL_BUILD_BUG_ON(!limit);\n+\tnwords = (limit - 1) / OSAL_BITS_PER_UL + 1;\n+\tfor (i = 0; i < nwords; i++)\n+\t\tif (~(addr[i] != 0))\n+\t\t\tbreak;\n+\treturn (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffz(addr[i]);\n+}\n+\n+void qede_vf_fill_driver_data(struct ecore_hwfn *hwfn,\n+\t\t\t      __rte_unused struct vf_pf_resc_request *resc_req,\n+\t\t\t      struct ecore_vf_acquire_sw_info *vf_sw_info)\n+{\n+\tvf_sw_info->os_type = VFPF_ACQUIRE_OS_LINUX_USERSPACE;\n+\tvf_sw_info->override_fw_version = 1;\n+}\n+\n void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,\n \t\t\t      dma_addr_t *phys, size_t size)\n {\n@@ -97,8 +146,8 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,\n \treturn mz->addr;\n }\n \n-u32 qed_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,\n-\t\t   u8 *input_buf, u32 max_size, u8 *unzip_buf)\n+u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,\n+\t\t    u8 *input_buf, u32 max_size, u8 *unzip_buf)\n {\n \tint rc;\n \ndiff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h\nindex 2cd7a94..942aaee 100644\n--- a/drivers/net/qede/base/ecore.h\n+++ b/drivers/net/qede/base/ecore.h\n@@ -50,6 +50,7 @@ enum ecore_nvm_cmd {\n #ifndef LINUX_REMOVE\n #if !defined(CONFIG_ECORE_L2)\n #define CONFIG_ECORE_L2\n+#define CONFIG_ECORE_SRIOV\n #endif\n #endif\n \ndiff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c\nindex 734d36e..f84266f 100644\n--- a/drivers/net/qede/base/ecore_dev.c\n+++ b/drivers/net/qede/base/ecore_dev.c\n@@ -21,6 +21,8 @@\n #include \"ecore_init_fw_funcs.h\"\n #include \"ecore_sp_commands.h\"\n #include \"ecore_dev_api.h\"\n+#include \"ecore_sriov.h\"\n+#include \"ecore_vf.h\"\n #include \"ecore_mcp.h\"\n #include \"ecore_hw_defs.h\"\n #include \"mcp_public.h\"\n@@ -126,6 +128,9 @@ void ecore_resc_free(struct ecore_dev *p_dev)\n {\n \tint i;\n \n+\tif (IS_VF(p_dev))\n+\t\treturn;\n+\n \tOSAL_FREE(p_dev, p_dev->fw_data);\n \tp_dev->fw_data = OSAL_NULL;\n \n@@ -149,6 +154,7 @@ void ecore_resc_free(struct ecore_dev *p_dev)\n \t\tecore_eq_free(p_hwfn, p_hwfn->p_eq);\n \t\tecore_consq_free(p_hwfn, p_hwfn->p_consq);\n \t\tecore_int_free(p_hwfn);\n+\t\tecore_iov_free(p_hwfn);\n \t\tecore_dmae_info_free(p_hwfn);\n \t\t/* @@@TBD Flush work-queue ? */\n \t}\n@@ -161,7 +167,11 @@ static enum _ecore_status_t ecore_init_qm_info(struct ecore_hwfn *p_hwfn,\n \tstruct ecore_qm_info *qm_info = &p_hwfn->qm_info;\n \tstruct init_qm_port_params *p_qm_port;\n \tu16 num_pqs, multi_cos_tcs = 1;\n+#ifdef CONFIG_ECORE_SRIOV\n+\tu16 num_vfs = p_hwfn->p_dev->sriov_info.total_vfs;\n+#else\n \tu16 num_vfs = 0;\n+#endif\n \n \tOSAL_MEM_ZERO(qm_info, sizeof(*qm_info));\n \n@@ -363,6 +373,9 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)\n \tstruct ecore_eq *p_eq;\n \tint i;\n \n+\tif (IS_VF(p_dev))\n+\t\treturn rc;\n+\n \tp_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL,\n \t\t\t\t     sizeof(struct ecore_fw_data));\n \tif (!p_dev->fw_data)\n@@ -440,6 +453,10 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)\n \t\tif (rc)\n \t\t\tgoto alloc_err;\n \n+\t\trc = ecore_iov_alloc(p_hwfn);\n+\t\tif (rc)\n+\t\t\tgoto alloc_err;\n+\n \t\t/* EQ */\n \t\tp_eq = ecore_eq_alloc(p_hwfn, 256);\n \t\tif (!p_eq)\n@@ -481,6 +498,9 @@ void ecore_resc_setup(struct ecore_dev *p_dev)\n {\n \tint i;\n \n+\tif (IS_VF(p_dev))\n+\t\treturn;\n+\n \tfor_each_hwfn(p_dev, i) {\n \t\tstruct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];\n \n@@ -496,6 +516,8 @@ void ecore_resc_setup(struct ecore_dev *p_dev)\n \t\t\t    p_hwfn->mcp_info->mfw_mb_length);\n \n \t\tecore_int_setup(p_hwfn, p_hwfn->p_main_ptt);\n+\n+\t\tecore_iov_setup(p_hwfn, p_hwfn->p_main_ptt);\n \t}\n }\n \n@@ -1141,23 +1163,6 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,\n \t/* Pure runtime initializations - directly to the HW  */\n \tecore_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);\n \n-\t/* PCI relaxed ordering causes a decrease in the performance on some\n-\t * systems. Till a root cause is found, disable this attribute in the\n-\t * PCI config space.\n-\t */\n-#if 0\t\t\t\t/* @DPDK */\n-\tpos = OSAL_PCI_FIND_CAPABILITY(p_hwfn->p_dev, PCI_CAP_ID_EXP);\n-\tif (!pos) {\n-\t\tDP_NOTICE(p_hwfn, true,\n-\t\t\t  \"Failed to find the PCI Express\"\n-\t\t\t  \" Capability structure in the PCI config space\\n\");\n-\t\treturn ECORE_IO;\n-\t}\n-\tOSAL_PCI_READ_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, &ctrl);\n-\tctrl &= ~PCI_EXP_DEVCTL_RELAX_EN;\n-\tOSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, ctrl);\n-#endif /* @DPDK */\n-\n \trc = ecore_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);\n \tif (rc)\n \t\treturn rc;\n@@ -1248,13 +1253,22 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,\n \tu32 load_code, param;\n \tint i, j;\n \n-\trc = ecore_init_fw_data(p_dev, bin_fw_data);\n-\tif (rc != ECORE_SUCCESS)\n-\t\treturn rc;\n+\tif (IS_PF(p_dev)) {\n+\t\trc = ecore_init_fw_data(p_dev, bin_fw_data);\n+\t\tif (rc != ECORE_SUCCESS)\n+\t\t\treturn rc;\n+\t}\n \n \tfor_each_hwfn(p_dev, i) {\n \t\tstruct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];\n \n+\t\tif (IS_VF(p_dev)) {\n+\t\t\trc = ecore_vf_pf_init(p_hwfn);\n+\t\t\tif (rc)\n+\t\t\t\treturn rc;\n+\t\t\tcontinue;\n+\t\t}\n+\n \t\t/* Enable DMAE in PXP */\n \t\trc = ecore_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);\n \n@@ -1414,6 +1428,11 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)\n \n \t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, \"Stopping hw/fw\\n\");\n \n+\t\tif (IS_VF(p_dev)) {\n+\t\t\tecore_vf_pf_int_cleanup(p_hwfn);\n+\t\t\tcontinue;\n+\t\t}\n+\n \t\t/* mark the hw as uninitialized... */\n \t\tp_hwfn->hw_init_done = false;\n \n@@ -1452,14 +1471,16 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)\n \t\tOSAL_MSLEEP(1);\n \t}\n \n-\t/* Disable DMAE in PXP - in CMT, this should only be done for\n-\t * first hw-function, and only after all transactions have\n-\t * stopped for all active hw-functions.\n-\t */\n-\tt_rc = ecore_change_pci_hwfn(&p_dev->hwfns[0],\n-\t\t\t\t     p_dev->hwfns[0].p_main_ptt, false);\n-\tif (t_rc != ECORE_SUCCESS)\n-\t\trc = t_rc;\n+\tif (IS_PF(p_dev)) {\n+\t\t/* Disable DMAE in PXP - in CMT, this should only be done for\n+\t\t * first hw-function, and only after all transactions have\n+\t\t * stopped for all active hw-functions.\n+\t\t */\n+\t\tt_rc = ecore_change_pci_hwfn(&p_dev->hwfns[0],\n+\t\t\t\t\t     p_dev->hwfns[0].p_main_ptt, false);\n+\t\tif (t_rc != ECORE_SUCCESS)\n+\t\t\trc = t_rc;\n+\t}\n \n \treturn rc;\n }\n@@ -1472,6 +1493,11 @@ void ecore_hw_stop_fastpath(struct ecore_dev *p_dev)\n \t\tstruct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];\n \t\tstruct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;\n \n+\t\tif (IS_VF(p_dev)) {\n+\t\t\tecore_vf_pf_int_cleanup(p_hwfn);\n+\t\t\tcontinue;\n+\t\t}\n+\n \t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,\n \t\t\t   \"Shutting down the fastpath\\n\");\n \n@@ -1497,6 +1523,9 @@ void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)\n {\n \tstruct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;\n \n+\tif (IS_VF(p_hwfn->p_dev))\n+\t\treturn;\n+\n \t/* Re-open incoming traffic */\n \tecore_wr(p_hwfn, p_hwfn->p_main_ptt,\n \t\t NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);\n@@ -1526,6 +1555,13 @@ enum _ecore_status_t ecore_hw_reset(struct ecore_dev *p_dev)\n \tfor_each_hwfn(p_dev, i) {\n \t\tstruct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];\n \n+\t\tif (IS_VF(p_dev)) {\n+\t\t\trc = ecore_vf_pf_reset(p_hwfn);\n+\t\t\tif (rc)\n+\t\t\t\treturn rc;\n+\t\t\tcontinue;\n+\t\t}\n+\n \t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, \"Resetting hw/fw\\n\");\n \n \t\t/* Check for incorrect states */\n@@ -1655,7 +1691,11 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn)\n \n \tOSAL_MEM_ZERO(&sb_cnt_info, sizeof(sb_cnt_info));\n \n+#ifdef CONFIG_ECORE_SRIOV\n+\tmax_vf_vlan_filters = ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS;\n+#else\n \tmax_vf_vlan_filters = 0;\n+#endif\n \n \tecore_int_get_num_sbs(p_hwfn, &sb_cnt_info);\n \tresc_num[ECORE_SB] = OSAL_MIN_T(u32,\n@@ -2018,6 +2058,10 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn,\n {\n \tenum _ecore_status_t rc;\n \n+\trc = ecore_iov_hw_info(p_hwfn, p_hwfn->p_main_ptt);\n+\tif (rc)\n+\t\treturn rc;\n+\n \t/* TODO In get_hw_info, amoungst others:\n \t * Get MCP FW revision and determine according to it the supported\n \t * featrues (e.g. DCB)\n@@ -2175,6 +2219,9 @@ void ecore_prepare_hibernate(struct ecore_dev *p_dev)\n {\n \tint j;\n \n+\tif (IS_VF(p_dev))\n+\t\treturn;\n+\n \tfor_each_hwfn(p_dev, j) {\n \t\tstruct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];\n \n@@ -2274,6 +2321,9 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, int personality)\n \tstruct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);\n \tenum _ecore_status_t rc;\n \n+\tif (IS_VF(p_dev))\n+\t\treturn ecore_vf_hw_prepare(p_dev);\n+\n \t/* Store the precompiled init data ptrs */\n \tecore_init_iro_array(p_dev);\n \n@@ -2325,6 +2375,11 @@ void ecore_hw_remove(struct ecore_dev *p_dev)\n \tfor_each_hwfn(p_dev, i) {\n \t\tstruct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];\n \n+\t\tif (IS_VF(p_dev)) {\n+\t\t\tecore_vf_pf_release(p_hwfn);\n+\t\t\tcontinue;\n+\t\t}\n+\n \t\tecore_init_free(p_hwfn);\n \t\tecore_hw_hwfn_free(p_hwfn);\n \t\tecore_mcp_free(p_hwfn);\n@@ -2952,6 +3007,11 @@ static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn,\n {\n \tstruct coalescing_timeset *p_coalesce_timeset;\n \n+\tif (IS_VF(p_hwfn->p_dev)) {\n+\t\tDP_NOTICE(p_hwfn, true, \"VF coalescing config not supported\\n\");\n+\t\treturn ECORE_INVAL;\n+\t}\n+\n \tif (p_hwfn->p_dev->int_coalescing_mode != ECORE_COAL_MODE_ENABLE) {\n \t\tDP_NOTICE(p_hwfn, true,\n \t\t\t  \"Coalescing configuration not enabled\\n\");\ndiff --git a/drivers/net/qede/base/ecore_hw.c b/drivers/net/qede/base/ecore_hw.c\nindex 5a1d173..f21783f 100644\n--- a/drivers/net/qede/base/ecore_hw.c\n+++ b/drivers/net/qede/base/ecore_hw.c\n@@ -13,6 +13,7 @@\n #include \"ecore_hw.h\"\n #include \"reg_addr.h\"\n #include \"ecore_utils.h\"\n+#include \"ecore_iov_api.h\"\n \n #ifndef ASIC_ONLY\n #define ECORE_EMUL_FACTOR 2000\n@@ -243,8 +244,12 @@ static void ecore_memcpy_hw(struct ecore_hwfn *p_hwfn,\n \t\tquota = OSAL_MIN_T(osal_size_t, n - done,\n \t\t\t\t   PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);\n \n-\t\tecore_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);\n-\t\thw_offset = ecore_ptt_get_bar_addr(p_ptt);\n+\t\tif (IS_PF(p_hwfn->p_dev)) {\n+\t\t\tecore_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);\n+\t\t\thw_offset = ecore_ptt_get_bar_addr(p_ptt);\n+\t\t} else {\n+\t\t\thw_offset = hw_addr + done;\n+\t\t}\n \n \t\tdw_count = quota / 4;\n \t\thost_addr = (u32 *) ((u8 *) addr + done);\ndiff --git a/drivers/net/qede/base/ecore_init_ops.c b/drivers/net/qede/base/ecore_init_ops.c\nindex eeaabb6..326eb92 100644\n--- a/drivers/net/qede/base/ecore_init_ops.c\n+++ b/drivers/net/qede/base/ecore_init_ops.c\n@@ -16,6 +16,7 @@\n #include \"ecore_init_fw_funcs.h\"\n \n #include \"ecore_iro_values.h\"\n+#include \"ecore_sriov.h\"\n #include \"ecore_gtt_values.h\"\n #include \"reg_addr.h\"\n #include \"ecore_init_ops.h\"\n@@ -102,6 +103,9 @@ enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn)\n {\n \tstruct ecore_rt_data *rt_data = &p_hwfn->rt_data;\n \n+\tif (IS_VF(p_hwfn->p_dev))\n+\t\treturn ECORE_SUCCESS;\n+\n \trt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,\n \t\t\t\t       sizeof(bool) * RUNTIME_ARRAY_SIZE);\n \tif (!rt_data->b_valid)\ndiff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c\nindex 91e8ad2..f1cc538 100644\n--- a/drivers/net/qede/base/ecore_int.c\n+++ b/drivers/net/qede/base/ecore_int.c\n@@ -16,6 +16,8 @@\n #include \"ecore_int.h\"\n #include \"reg_addr.h\"\n #include \"ecore_hw.h\"\n+#include \"ecore_sriov.h\"\n+#include \"ecore_vf.h\"\n #include \"ecore_hw_defs.h\"\n #include \"ecore_hsi_common.h\"\n #include \"ecore_mcp.h\"\n@@ -373,6 +375,9 @@ void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,\n \tstruct cau_pi_entry pi_entry;\n \tu32 sb_offset, pi_offset;\n \n+\tif (IS_VF(p_hwfn->p_dev))\n+\t\treturn;\t\t/* @@@TBD MichalK- VF CAU... */\n+\n \tsb_offset = igu_sb_id * PIS_PER_SB;\n \tOSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));\n \n@@ -401,7 +406,8 @@ void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,\n \tsb_info->sb_ack = 0;\n \tOSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));\n \n-\tecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,\n+\tif (IS_PF(p_hwfn->p_dev))\n+\t\tecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,\n \t\t\t\t      sb_info->igu_sb_id, 0, 0);\n }\n \n@@ -421,8 +427,10 @@ static u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)\n \t/* Assuming continuous set of IGU SBs dedicated for given PF */\n \tif (sb_id == ECORE_SP_SB_ID)\n \t\tigu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;\n-\telse\n+\telse if (IS_PF(p_hwfn->p_dev))\n \t\tigu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;\n+\telse\n+\t\tigu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id);\n \n \tif (sb_id == ECORE_SP_SB_ID)\n \t\tDP_VERBOSE(p_hwfn, ECORE_MSG_INTR,\n@@ -457,9 +465,17 @@ enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,\n \t/* The igu address will hold the absolute address that needs to be\n \t * written to for a specific status block\n \t */\n-\tsb_info->igu_addr = (u8 OSAL_IOMEM *) p_hwfn->regview +\n+\tif (IS_PF(p_hwfn->p_dev)) {\n+\t\tsb_info->igu_addr = (u8 OSAL_IOMEM *) p_hwfn->regview +\n \t\t    GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3);\n \n+\t} else {\n+\t\tsb_info->igu_addr =\n+\t\t    (u8 OSAL_IOMEM *) p_hwfn->regview +\n+\t\t    PXP_VF_BAR0_START_IGU +\n+\t\t    ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3);\n+\t}\n+\n \tsb_info->flags |= ECORE_SB_INFO_INIT;\n \n \tecore_int_sb_setup(p_hwfn, p_ptt, sb_info);\n@@ -687,6 +703,9 @@ void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,\n {\n \tp_hwfn->b_int_enabled = 0;\n \n+\tif (IS_VF(p_hwfn->p_dev))\n+\t\treturn;\n+\n \tecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);\n }\n \n@@ -853,8 +872,14 @@ enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,\n \tp_igu_info->igu_dsb_id = 0xffff;\n \tp_igu_info->igu_base_sb_iov = 0xffff;\n \n+#ifdef CONFIG_ECORE_SRIOV\n+\tmin_vf = p_hwfn->hw_info.first_vf_in_pf;\n+\tmax_vf = p_hwfn->hw_info.first_vf_in_pf +\n+\t    p_hwfn->p_dev->sriov_info.total_vfs;\n+#else\n \tmin_vf = 0;\n \tmax_vf = 0;\n+#endif\n \n \tfor (sb_id = 0; sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);\n \t     sb_id++) {\ndiff --git a/drivers/net/qede/base/ecore_iov_api.h b/drivers/net/qede/base/ecore_iov_api.h\nnew file mode 100644\nindex 0000000..6e446f6\n--- /dev/null\n+++ b/drivers/net/qede/base/ecore_iov_api.h\n@@ -0,0 +1,933 @@\n+/*\n+ * Copyright (c) 2016 QLogic Corporation.\n+ * All rights reserved.\n+ * www.qlogic.com\n+ *\n+ * See LICENSE.qede_pmd for copyright and licensing details.\n+ */\n+\n+#ifndef __ECORE_SRIOV_API_H__\n+#define __ECORE_SRIOV_API_H__\n+\n+#include \"ecore_status.h\"\n+\n+#define ECORE_VF_ARRAY_LENGTH (3)\n+\n+#define IS_VF(p_dev)\t\t((p_dev)->b_is_vf)\n+#define IS_PF(p_dev)\t\t(!((p_dev)->b_is_vf))\n+#ifdef CONFIG_ECORE_SRIOV\n+#define IS_PF_SRIOV(p_hwfn)\t(!!((p_hwfn)->p_dev->sriov_info.total_vfs))\n+#else\n+#define IS_PF_SRIOV(p_hwfn)\t(0)\n+#endif\n+#define IS_PF_SRIOV_ALLOC(p_hwfn)\t(!!((p_hwfn)->pf_iov_info))\n+#define IS_PF_PDA(p_hwfn)\t0\t/* @@TBD Michalk */\n+\n+/* @@@ TBD MichalK - what should this number be*/\n+#define ECORE_MAX_VF_CHAINS_PER_PF 16\n+\n+/* vport update extended feature tlvs flags */\n+enum ecore_iov_vport_update_flag {\n+\tECORE_IOV_VP_UPDATE_ACTIVATE = 0,\n+\tECORE_IOV_VP_UPDATE_VLAN_STRIP = 1,\n+\tECORE_IOV_VP_UPDATE_TX_SWITCH = 2,\n+\tECORE_IOV_VP_UPDATE_MCAST = 3,\n+\tECORE_IOV_VP_UPDATE_ACCEPT_PARAM = 4,\n+\tECORE_IOV_VP_UPDATE_RSS = 5,\n+\tECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN = 6,\n+\tECORE_IOV_VP_UPDATE_SGE_TPA = 7,\n+\tECORE_IOV_VP_UPDATE_MAX = 8,\n+};\n+\n+struct ecore_mcp_link_params;\n+struct ecore_mcp_link_state;\n+struct ecore_mcp_link_capabilities;\n+\n+/* These defines are used by the hw-channel; should never change order */\n+#define VFPF_ACQUIRE_OS_LINUX (0)\n+#define VFPF_ACQUIRE_OS_WINDOWS (1)\n+#define VFPF_ACQUIRE_OS_ESX (2)\n+#define VFPF_ACQUIRE_OS_SOLARIS (3)\n+#define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)\n+\n+struct ecore_vf_acquire_sw_info {\n+\tu32 driver_version;\n+\tu8 os_type;\n+\tbool override_fw_version;\n+};\n+\n+struct ecore_public_vf_info {\n+\t/* These copies will later be reflected in the bulletin board,\n+\t * but this copy should be newer.\n+\t */\n+\tu8 forced_mac[ETH_ALEN];\n+\tu16 forced_vlan;\n+};\n+\n+#ifdef CONFIG_ECORE_SW_CHANNEL\n+/* This is SW channel related only... */\n+enum mbx_state {\n+\tVF_PF_UNKNOWN_STATE = 0,\n+\tVF_PF_WAIT_FOR_START_REQUEST = 1,\n+\tVF_PF_WAIT_FOR_NEXT_CHUNK_OF_REQUEST = 2,\n+\tVF_PF_REQUEST_IN_PROCESSING = 3,\n+\tVF_PF_RESPONSE_READY = 4,\n+};\n+\n+struct ecore_iov_sw_mbx {\n+\tenum mbx_state mbx_state;\n+\n+\tu32 request_size;\n+\tu32 request_offset;\n+\n+\tu32 response_size;\n+\tu32 response_offset;\n+};\n+\n+/**\n+ * @brief Get the vf sw mailbox params\n+ *\n+ * @param p_hwfn\n+ * @param rel_vf_id\n+ *\n+ * @return struct ecore_iov_sw_mbx*\n+ */\n+struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t u16 rel_vf_id);\n+#endif\n+\n+#ifdef CONFIG_ECORE_SRIOV\n+/**\n+ * @brief mark/clear all VFs before/after an incoming PCIe sriov\n+ *        disable.\n+ *\n+ * @param p_hwfn\n+ * @param to_disable\n+ */\n+void ecore_iov_set_vfs_to_disable(struct ecore_hwfn *p_hwfn, u8 to_disable);\n+\n+/**\n+ * @brief mark/clear chosen VFs before/after an incoming PCIe\n+ *        sriov disable.\n+ *\n+ * @param p_hwfn\n+ * @param to_disable\n+ */\n+void ecore_iov_set_vf_to_disable(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t u16 rel_vf_id, u8 to_disable);\n+\n+/**\n+ *\n+ * @brief ecore_iov_init_hw_for_vf - initialize the HW for\n+ *        enabling access of a VF. Also includes preparing the\n+ *        IGU for VF access. This needs to be called AFTER hw is\n+ *        initialized and BEFORE VF is loaded inside the VM.\n+ *\n+ * @param p_hwfn\n+ * @param p_ptt\n+ * @param rel_vf_id\n+ * @param num_rx_queues\n+ *\n+ * @return enum _ecore_status_t\n+ */\n+enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t      struct ecore_ptt *p_ptt,\n+\t\t\t\t\t      u16 rel_vf_id, u16 num_rx_queues);\n+\n+/**\n+ * @brief ecore_iov_process_mbx_req - process a request received\n+ *        from the VF\n+ *\n+ * @param p_hwfn\n+ * @param p_ptt\n+ * @param vfid\n+ */\n+void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,\n+\t\t\t       struct ecore_ptt *p_ptt, int vfid);\n+\n+/**\n+ * @brief ecore_iov_release_hw_for_vf - called once upper layer\n+ *        knows VF is done with - can release any resources\n+ *        allocated for VF at this point. this must be done once\n+ *        we know VF is no longer loaded in VM.\n+ *\n+ * @param p_hwfn\n+ * @param p_ptt\n+ * @param rel_vf_id\n+ *\n+ * @return enum _ecore_status_t\n+ */\n+enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t struct ecore_ptt *p_ptt,\n+\t\t\t\t\t\t u16 rel_vf_id);\n+\n+#ifndef LINUX_REMOVE\n+/**\n+ * @brief ecore_iov_set_vf_ctx - set a context for a given VF\n+ *\n+ * @param p_hwfn\n+ * @param vf_id\n+ * @param ctx\n+ *\n+ * @return enum _ecore_status_t\n+ */\n+enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t  u16 vf_id, void *ctx);\n+#endif\n+\n+/**\n+ * @brief FLR cleanup for all VFs\n+ *\n+ * @param p_hwfn\n+ * @param p_ptt\n+ *\n+ * @return enum _ecore_status_t\n+ */\n+enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t      struct ecore_ptt *p_ptt);\n+\n+/**\n+ * @brief FLR cleanup for single VF\n+ *\n+ * @param p_hwfn\n+ * @param p_ptt\n+ * @param rel_vf_id\n+ *\n+ * @return enum _ecore_status_t\n+ */\n+enum _ecore_status_t\n+ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,\n+\t\t\t\tstruct ecore_ptt *p_ptt, u16 rel_vf_id);\n+\n+/**\n+ * @brief Update the bulletin with link information. Notice this does NOT\n+ *        send a bulletin update, only updates the PF's bulletin.\n+ *\n+ * @param p_hwfn\n+ * @param p_vf\n+ * @param params - the link params to use for the VF link configuration\n+ * @param link - the link output to use for the VF link configuration\n+ * @param p_caps - the link default capabilities.\n+ */\n+void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,\n+\t\t\tu16 vfid,\n+\t\t\tstruct ecore_mcp_link_params *params,\n+\t\t\tstruct ecore_mcp_link_state *link,\n+\t\t\tstruct ecore_mcp_link_capabilities *p_caps);\n+\n+/**\n+ * @brief Returns link information as perceived by VF.\n+ *\n+ * @param p_hwfn\n+ * @param p_vf\n+ * @param p_params - the link params visible to vf.\n+ * @param p_link - the link state visible to vf.\n+ * @param p_caps - the link default capabilities visible to vf.\n+ */\n+void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,\n+\t\t\tu16 vfid,\n+\t\t\tstruct ecore_mcp_link_params *params,\n+\t\t\tstruct ecore_mcp_link_state *link,\n+\t\t\tstruct ecore_mcp_link_capabilities *p_caps);\n+\n+/**\n+ * @brief return if the VF is pending FLR\n+ *\n+ * @param p_hwfn\n+ * @param rel_vf_id\n+ *\n+ * @return bool\n+ */\n+bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);\n+\n+/**\n+ * @brief Check if given VF ID @vfid is valid\n+ *        w.r.t. @b_enabled_only value\n+ *        if b_enabled_only = true - only enabled VF id is valid\n+ *        else any VF id less than max_vfs is valid\n+ *\n+ * @param p_hwfn\n+ * @param rel_vf_id - Relative VF ID\n+ * @param b_enabled_only - consider only enabled VF\n+ *\n+ * @return bool - true for valid VF ID\n+ */\n+bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn,\n+\t\t\t     int rel_vf_id, bool b_enabled_only);\n+\n+/**\n+ * @brief Get VF's public info structure\n+ *\n+ * @param p_hwfn\n+ * @param vfid - Relative VF ID\n+ * @param b_enabled_only - false if want to access even if vf is disabled\n+ *\n+ * @return struct ecore_public_vf_info *\n+ */\n+struct ecore_public_vf_info *ecore_iov_get_public_vf_info(struct ecore_hwfn\n+\t\t\t\t\t\t\t  *p_hwfn, u16 vfid,\n+\t\t\t\t\t\t\t  bool b_enabled_only);\n+\n+/**\n+ * @brief Set pending events bitmap for given @vfid\n+ *\n+ * @param p_hwfn\n+ * @param vfid\n+ */\n+void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid);\n+\n+/**\n+ * @brief Copy pending events bitmap in @events and clear\n+ *\t  original copy of events\n+ *\n+ * @param p_hwfn\n+ */\n+void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t       u64 *events);\n+\n+/**\n+ * @brief Copy VF's message to PF's buffer\n+ *\n+ * @param p_hwfn\n+ * @param ptt\n+ * @param vfid\n+ *\n+ * @return enum _ecore_status_t\n+ */\n+enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t   struct ecore_ptt *ptt, int vfid);\n+/**\n+ * @brief Set forced MAC address in PFs copy of bulletin board\n+ *        and configures FW/HW to support the configuration.\n+ *\n+ * @param p_hwfn\n+ * @param mac\n+ * @param vfid\n+ */\n+void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t       u8 *mac, int vfid);\n+\n+/**\n+ * @brief Set MAC address in PFs copy of bulletin board without\n+ *        configuring FW/HW.\n+ *\n+ * @param p_hwfn\n+ * @param mac\n+ * @param vfid\n+ */\n+enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\tu8 *mac, int vfid);\n+\n+/**\n+ * @brief Set forced VLAN [pvid] in PFs copy of bulletin board\n+ *        and configures FW/HW to support the configuration.\n+ *        Setting of pvid 0 would clear the feature.\n+ * @param p_hwfn\n+ * @param pvid\n+ * @param vfid\n+ */\n+void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\tu16 pvid, int vfid);\n+\n+/**\n+ * @brief Set default behaviour of VF in case no vlans are configured for it\n+ *        whether to accept only untagged traffic or all.\n+ *        Must be called prior to the VF vport-start.\n+ *\n+ * @param p_hwfn\n+ * @param b_untagged_only\n+ * @param vfid\n+ *\n+ * @return ECORE_SUCCESS if configuration would stick.\n+ */\n+enum _ecore_status_t\n+ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t       bool b_untagged_only, int vfid);\n+/**\n+ * @brief Get VFs opaque fid.\n+ *\n+ * @param p_hwfn\n+ * @param vfid\n+ * @param opaque_fid\n+ */\n+void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,\n+\t\t\t\t  u16 *opaque_fid);\n+\n+/**\n+ * @brief Get VFs VPORT id.\n+ *\n+ * @param p_hwfn\n+ * @param vfid\n+ * @param vport id\n+ */\n+void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn, int vfid,\n+\t\t\t\tu8 *p_vport_id);\n+\n+/**\n+ * @brief Check if VF has VPORT instance. This can be used\n+ *\t  to check if VPORT is active.\n+ *\n+ * @param p_hwfn\n+ */\n+bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid);\n+\n+/**\n+ * @brief PF posts the bulletin to the VF\n+ *\n+ * @param p_hwfn\n+ * @param p_vf\n+ * @param p_ptt\n+ *\n+ * @return enum _ecore_status_t\n+ */\n+enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\tint vfid,\n+\t\t\t\t\t\tstruct ecore_ptt *p_ptt);\n+\n+/**\n+ * @brief Check if given VF (@vfid) is marked as stopped\n+ *\n+ * @param p_hwfn\n+ * @param vfid\n+ *\n+ * @return bool : true if stopped\n+ */\n+bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid);\n+\n+/**\n+ * @brief Configure VF anti spoofing\n+ *\n+ * @param p_hwfn\n+ * @param vfid\n+ * @param val - spoofchk value - true/false\n+ *\n+ * @return enum _ecore_status_t\n+ */\n+enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t    int vfid, bool val);\n+\n+/**\n+ * @brief Get VF's configured spoof value.\n+ *\n+ * @param p_hwfn\n+ * @param vfid\n+ *\n+ * @return bool - spoofchk value - true/false\n+ */\n+bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid);\n+\n+/**\n+ * @brief Check for SRIOV sanity by PF.\n+ *\n+ * @param p_hwfn\n+ * @param vfid\n+ *\n+ * @return bool - true if sanity checks passes, else false\n+ */\n+bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid);\n+\n+/**\n+ * @brief Get the num of VF chains.\n+ *\n+ * @param p_hwfn\n+ *\n+ * @return u8\n+ */\n+u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn);\n+\n+/**\n+ * @brief Get vf request mailbox params\n+ *\n+ * @param p_hwfn\n+ * @param rel_vf_id\n+ * @param pp_req_virt_addr\n+ * @param p_req_virt_size\n+ */\n+void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t  u16 rel_vf_id,\n+\t\t\t\t\t  void **pp_req_virt_addr,\n+\t\t\t\t\t  u16 *p_req_virt_size);\n+\n+/**\n+ * @brief Get vf mailbox params\n+ *\n+ * @param p_hwfn\n+ * @param rel_vf_id\n+ * @param pp_reply_virt_addr\n+ * @param p_reply_virt_size\n+ */\n+void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t    u16 rel_vf_id,\n+\t\t\t\t\t    void **pp_reply_virt_addr,\n+\t\t\t\t\t    u16 *p_reply_virt_size);\n+\n+/**\n+ * @brief Validate if the given length is a valid vfpf message\n+ *        length\n+ *\n+ * @param length\n+ *\n+ * @return bool\n+ */\n+bool ecore_iov_is_valid_vfpf_msg_length(u32 length);\n+\n+/**\n+ * @brief Return the max pfvf message length\n+ *\n+ * @return u32\n+ */\n+u32 ecore_iov_pfvf_msg_length(void);\n+\n+/**\n+ * @brief Returns forced MAC address if one is configured\n+ *\n+ * @parm p_hwfn\n+ * @parm rel_vf_id\n+ *\n+ * @return OSAL_NULL if mac isn't forced; Otherwise, returns MAC.\n+ */\n+u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);\n+\n+/**\n+ * @brief Returns pvid if one is configured\n+ *\n+ * @parm p_hwfn\n+ * @parm rel_vf_id\n+ *\n+ * @return 0 if no pvid is configured, otherwise the pvid.\n+ */\n+u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t       u16 rel_vf_id);\n+/**\n+ * @brief Configure VFs tx rate\n+ *\n+ * @param p_hwfn\n+ * @param p_ptt\n+ * @param vfid\n+ * @param val - tx rate value in Mb/sec.\n+ *\n+ * @return enum _ecore_status_t\n+ */\n+enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t struct ecore_ptt *p_ptt,\n+\t\t\t\t\t\t int vfid, int val);\n+\n+/**\n+ * @brief - Retrieves the statistics associated with a VF\n+ *\n+ * @param p_hwfn\n+ * @param p_ptt\n+ * @param vfid\n+ * @param p_stats - this will be filled with the VF statistics\n+ *\n+ * @return ECORE_SUCCESS iff statistics were retrieved. Error otherwise.\n+ */\n+enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t    struct ecore_ptt *p_ptt,\n+\t\t\t\t\t    int vfid,\n+\t\t\t\t\t    struct ecore_eth_stats *p_stats);\n+\n+/**\n+ * @brief - Retrieves num of rxqs chains\n+ *\n+ * @param p_hwfn\n+ * @param rel_vf_id\n+ *\n+ * @return num of rxqs chains.\n+ */\n+u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);\n+\n+/**\n+ * @brief - Retrieves num of active rxqs chains\n+ *\n+ * @param p_hwfn\n+ * @param rel_vf_id\n+ *\n+ * @return\n+ */\n+u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);\n+\n+/**\n+ * @brief - Retrieves ctx pointer\n+ *\n+ * @param p_hwfn\n+ * @param rel_vf_id\n+ *\n+ * @return\n+ */\n+void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);\n+\n+/**\n+ * @brief - Retrieves VF`s num sbs\n+ *\n+ * @param p_hwfn\n+ * @param rel_vf_id\n+ *\n+ * @return\n+ */\n+u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);\n+\n+/**\n+ * @brief - Returm true if VF is waiting for acquire\n+ *\n+ * @param p_hwfn\n+ * @param rel_vf_id\n+ *\n+ * @return\n+ */\n+bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);\n+\n+/**\n+ * @brief - Returm true if VF is acquired but not initialized\n+ *\n+ * @param p_hwfn\n+ * @param rel_vf_id\n+ *\n+ * @return\n+ */\n+bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t      u16 rel_vf_id);\n+\n+/**\n+ * @brief - Returm true if VF is acquired and initialized\n+ *\n+ * @param p_hwfn\n+ * @param rel_vf_id\n+ *\n+ * @return\n+ */\n+bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);\n+\n+/**\n+ * @brief - Get VF's vport min rate configured.\n+ * @param p_hwfn\n+ * @param rel_vf_id\n+ *\n+ * @return - rate in Mbps\n+ */\n+int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);\n+\n+/**\n+ * @brief - Configure min rate for VF's vport.\n+ * @param p_dev\n+ * @param vfid\n+ * @param - rate in Mbps\n+ *\n+ * @return\n+ */\n+enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,\n+\t\t\t\t\t\t     int vfid, u32 rate);\n+#else\n+static OSAL_INLINE void ecore_iov_set_vfs_to_disable(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t     u8 to_disable)\n+{\n+}\n+\n+static OSAL_INLINE void ecore_iov_set_vf_to_disable(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t    u16 rel_vf_id,\n+\t\t\t\t\t\t    u8 to_disable)\n+{\n+}\n+\n+static OSAL_INLINE enum _ecore_status_t ecore_iov_init_hw_for_vf(struct\n+\t\t\t\t\t\t\t\t ecore_hwfn\n+\t\t\t\t\t\t\t\t *p_hwfn,\n+\t\t\t\t\t\t\t\t struct\n+\t\t\t\t\t\t\t\t ecore_ptt\n+\t\t\t\t\t\t\t\t *p_ptt,\n+\t\t\t\t\t\t\t\t u16 rel_vf_id,\n+\t\t\t\t\t\t\t\t u16\n+\t\t\t\t\t\t\t\t num_rx_queues)\n+{\n+\treturn ECORE_INVAL;\n+}\n+\n+static OSAL_INLINE void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t  struct ecore_ptt *p_ptt,\n+\t\t\t\t\t\t  int vfid)\n+{\n+}\n+\n+static OSAL_INLINE enum _ecore_status_t ecore_iov_release_hw_for_vf(struct\n+\t\t\t\t\t\t\t\t    ecore_hwfn\n+\t\t\t\t\t\t\t\t    *p_hwfn,\n+\t\t\t\t\t\t\t\t    struct\n+\t\t\t\t\t\t\t\t    ecore_ptt\n+\t\t\t\t\t\t\t\t    *p_ptt,\n+\t\t\t\t\t\t\t\t    u16\n+\t\t\t\t\t\t\t\t    rel_vf_id)\n+{\n+\treturn ECORE_SUCCESS;\n+}\n+\n+#ifndef LINUX_REMOVE\n+static OSAL_INLINE enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn\n+\t\t\t\t\t\t\t     *p_hwfn, u16 vf_id,\n+\t\t\t\t\t\t\t     void *ctx)\n+{\n+\treturn ECORE_INVAL;\n+}\n+#endif\n+static OSAL_INLINE enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct\n+\t\t\t\t\t\t\t\t ecore_hwfn\n+\t\t\t\t\t\t\t\t *p_hwfn,\n+\t\t\t\t\t\t\t\t struct\n+\t\t\t\t\t\t\t\t ecore_ptt\n+\t\t\t\t\t\t\t\t *p_ptt)\n+{\n+\treturn ECORE_INVAL;\n+}\n+\n+static OSAL_INLINE enum _ecore_status_t ecore_iov_single_vf_flr_cleanup(\n+\tstruct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 rel_vf_id)\n+{\n+\treturn ECORE_INVAL;\n+}\n+\n+static OSAL_INLINE void ecore_iov_set_link(struct ecore_hwfn *p_hwfn, u16 vfid,\n+\t\t\t\t\t   struct ecore_mcp_link_params *params,\n+\t\t\t\t\t   struct ecore_mcp_link_state *link,\n+\t\t\t\t\t   struct ecore_mcp_link_capabilities\n+\t\t\t\t\t   *p_caps)\n+{\n+}\n+\n+static OSAL_INLINE void ecore_iov_get_link(struct ecore_hwfn *p_hwfn, u16 vfid,\n+\t\t\t\t\t   struct ecore_mcp_link_params *params,\n+\t\t\t\t\t   struct ecore_mcp_link_state *link,\n+\t\t\t\t\t   struct ecore_mcp_link_capabilities\n+\t\t\t\t\t   *p_caps)\n+{\n+}\n+\n+static OSAL_INLINE bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t    u16 rel_vf_id)\n+{\n+\treturn false;\n+}\n+\n+static OSAL_INLINE bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\tint rel_vf_id,\n+\t\t\t\t\t\tbool b_enabled_only)\n+{\n+\treturn false;\n+}\n+\n+static OSAL_INLINE struct ecore_public_vf_info *\n+\tecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn, u16 vfid,\n+\t\t\t\t  bool b_enabled_only)\n+{\n+\treturn OSAL_NULL;\n+}\n+\n+static OSAL_INLINE void ecore_iov_pf_add_pending_events(struct ecore_hwfn\n+\t\t\t\t\t\t\t*p_hwfn, u8 vfid)\n+{\n+}\n+\n+static OSAL_INLINE void ecore_iov_pf_get_and_clear_pending_events(struct\n+\t\t\t\t\t\t\t\t  ecore_hwfn\n+\t\t\t\t\t\t\t\t  *p_hwfn,\n+\t\t\t\t\t\t\t\t  u64 *events)\n+{\n+}\n+\n+static OSAL_INLINE enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn\n+\t\t\t\t\t\t\t      *p_hwfn,\n+\t\t\t\t\t\t\t      struct ecore_ptt\n+\t\t\t\t\t\t\t      *ptt, int vfid)\n+{\n+\treturn ECORE_INVAL;\n+}\n+\n+static OSAL_INLINE void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn\n+\t\t\t\t\t\t\t  *p_hwfn, u8 *mac,\n+\t\t\t\t\t\t\t  int vfid)\n+{\n+}\n+\n+static OSAL_INLINE enum _ecore_status_t ecore_iov_bulletin_set_mac(struct\n+\t\t\t\t\t\t\t\t   ecore_hwfn\n+\t\t\t\t\t\t\t\t   *p_hwfn,\n+\t\t\t\t\t\t\t\t   u8 *mac,\n+\t\t\t\t\t\t\t\t   int vfid)\n+{\n+\treturn ECORE_INVAL;\n+}\n+\n+static OSAL_INLINE void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn\n+\t\t\t\t\t\t\t   p_hwfn, u16 pvid,\n+\t\t\t\t\t\t\t   int vfid)\n+{\n+}\n+\n+static OSAL_INLINE void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t     int vfid, u16 *opaque_fid)\n+{\n+}\n+\n+static OSAL_INLINE void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t   int vfid, u8 *p_vport_id)\n+{\n+}\n+\n+static OSAL_INLINE bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn\n+\t\t\t\t\t\t\t*p_hwfn, int vfid)\n+{\n+\treturn false;\n+}\n+\n+static OSAL_INLINE enum _ecore_status_t ecore_iov_post_vf_bulletin(struct\n+\t\t\t\t\t\t\t\t   ecore_hwfn\n+\t\t\t\t\t\t\t\t   *p_hwfn,\n+\t\t\t\t\t\t\t\t   int vfid,\n+\t\t\t\t\t\t\t\t   struct\n+\t\t\t\t\t\t\t\t   ecore_ptt\n+\t\t\t\t\t\t\t\t   *p_ptt)\n+{\n+\treturn ECORE_INVAL;\n+}\n+\n+static OSAL_INLINE bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\tint vfid)\n+{\n+\treturn false;\n+}\n+\n+static OSAL_INLINE enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn\n+\t\t\t\t\t\t\t       *p_hwfn,\n+\t\t\t\t\t\t\t       int vfid,\n+\t\t\t\t\t\t\t       bool val)\n+{\n+\treturn ECORE_INVAL;\n+}\n+\n+static OSAL_INLINE bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t       int vfid)\n+{\n+\treturn false;\n+}\n+\n+static OSAL_INLINE bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t  int vfid)\n+{\n+\treturn false;\n+}\n+\n+static OSAL_INLINE u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)\n+{\n+\treturn 0;\n+}\n+\n+static OSAL_INLINE void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn\n+\t\t\t\t\t\t\t     *p_hwfn,\n+\t\t\t\t\t\t\t     u16 rel_vf_id,\n+\t\t\t\t\t\t\t     void\n+\t\t\t\t\t\t\t     **pp_req_virt_addr,\n+\t\t\t\t\t\t\t     u16 *\n+\t\t\t\t\t\t\t     p_req_virt_size)\n+{\n+}\n+\n+static OSAL_INLINE void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn\n+\t\t\t\t\t\t\t       *p_hwfn,\n+\t\t\t\t\t\t\t       u16 rel_vf_id,\n+\t\t\t\t\t\t\t       void\n+\t\t\t\t\t\t       **pp_reply_virt_addr,\n+\t\t\t\t\t\t\t       u16 *\n+\t\t\t\t\t\t       p_reply_virt_size)\n+{\n+}\n+\n+static OSAL_INLINE bool ecore_iov_is_valid_vfpf_msg_length(u32 length)\n+{\n+\treturn false;\n+}\n+\n+static OSAL_INLINE u32 ecore_iov_pfvf_msg_length(void)\n+{\n+\treturn 0;\n+}\n+\n+static OSAL_INLINE u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn\n+\t\t\t\t\t\t\t *p_hwfn, u16 rel_vf_id)\n+{\n+\treturn OSAL_NULL;\n+}\n+\n+static OSAL_INLINE u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn\n+\t\t\t\t\t\t\t  *p_hwfn,\n+\t\t\t\t\t\t\t  u16 rel_vf_id)\n+{\n+\treturn 0;\n+}\n+\n+static OSAL_INLINE enum _ecore_status_t ecore_iov_configure_tx_rate(struct\n+\t\t\t\t\t\t\t\t    ecore_hwfn\n+\t\t\t\t\t\t\t\t    *p_hwfn,\n+\t\t\t\t\t\t\t\t    struct\n+\t\t\t\t\t\t\t\t    ecore_ptt\n+\t\t\t\t\t\t\t\t    *p_ptt,\n+\t\t\t\t\t\t\t\t    int vfid,\n+\t\t\t\t\t\t\t\t    int val)\n+{\n+\treturn ECORE_INVAL;\n+}\n+\n+static OSAL_INLINE u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\tu16 rel_vf_id)\n+{\n+\treturn 0;\n+}\n+\n+static OSAL_INLINE u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn\n+\t\t\t\t\t\t       *p_hwfn, u16 rel_vf_id)\n+{\n+\treturn 0;\n+}\n+\n+static OSAL_INLINE void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t      u16 rel_vf_id)\n+{\n+\treturn OSAL_NULL;\n+}\n+\n+static OSAL_INLINE u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t       u16 rel_vf_id)\n+{\n+\treturn 0;\n+}\n+\n+static OSAL_INLINE bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn\n+\t\t\t\t\t\t\t *p_hwfn, u16 rel_vf_id)\n+{\n+\treturn false;\n+}\n+\n+static OSAL_INLINE bool ecore_iov_is_vf_acquired_not_initialized(struct\n+\t\t\t\t\t\t\t\t ecore_hwfn\n+\t\t\t\t\t\t\t\t *p_hwfn,\n+\t\t\t\t\t\t\t\t u16 rel_vf_id)\n+{\n+\treturn false;\n+}\n+\n+static OSAL_INLINE bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t    u16 rel_vf_id)\n+{\n+\treturn false;\n+}\n+\n+static OSAL_INLINE int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t int vfid)\n+{\n+\treturn 0;\n+}\n+\n+static OSAL_INLINE enum _ecore_status_t ecore_iov_configure_min_tx_rate(\n+\tstruct ecore_dev *p_dev, int vfid, u32 rate)\n+{\n+\treturn ECORE_INVAL;\n+}\n+#endif\n+#endif\ndiff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c\nindex 8d713e7..23ea426 100644\n--- a/drivers/net/qede/base/ecore_l2.c\n+++ b/drivers/net/qede/base/ecore_l2.c\n@@ -22,6 +22,8 @@\n #include \"reg_addr.h\"\n #include \"ecore_int.h\"\n #include \"ecore_hw.h\"\n+#include \"ecore_vf.h\"\n+#include \"ecore_sriov.h\"\n #include \"ecore_mcp.h\"\n \n #define ECORE_MAX_SGES_NUM 16\n@@ -106,6 +108,14 @@ enum _ecore_status_t\n ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,\n \t\t     struct ecore_sp_vport_start_params *p_params)\n {\n+\tif (IS_VF(p_hwfn->p_dev))\n+\t\treturn ecore_vf_pf_vport_start(p_hwfn, p_params->vport_id,\n+\t\t\t\t\t       p_params->mtu,\n+\t\t\t\t\t       p_params->remove_inner_vlan,\n+\t\t\t\t\t       p_params->tpa_mode,\n+\t\t\t\t\t       p_params->max_buffers_per_cqe,\n+\t\t\t\t\t       p_params->only_untagged);\n+\n \treturn ecore_sp_eth_vport_start(p_hwfn, p_params);\n }\n \n@@ -339,6 +349,11 @@ ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,\n \tu8 abs_vport_id = 0, val;\n \tu16 wordval;\n \n+\tif (IS_VF(p_hwfn->p_dev)) {\n+\t\trc = ecore_vf_pf_vport_update(p_hwfn, p_params);\n+\t\treturn rc;\n+\t}\n+\n \trc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);\n \tif (rc != ECORE_SUCCESS)\n \t\treturn rc;\n@@ -428,6 +443,9 @@ enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,\n \tenum _ecore_status_t rc;\n \tu8 abs_vport_id = 0;\n \n+\tif (IS_VF(p_hwfn->p_dev))\n+\t\treturn ecore_vf_pf_vport_stop(p_hwfn);\n+\n \trc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);\n \tif (rc != ECORE_SUCCESS)\n \t\treturn rc;\n@@ -450,6 +468,19 @@ enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,\n \treturn ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);\n }\n \n+static enum _ecore_status_t\n+ecore_vf_pf_accept_flags(struct ecore_hwfn *p_hwfn,\n+\t\t\t struct ecore_filter_accept_flags *p_accept_flags)\n+{\n+\tstruct ecore_sp_vport_update_params s_params;\n+\n+\tOSAL_MEMSET(&s_params, 0, sizeof(s_params));\n+\tOSAL_MEMCPY(&s_params.accept_flags, p_accept_flags,\n+\t\t    sizeof(struct ecore_filter_accept_flags));\n+\n+\treturn ecore_vf_pf_vport_update(p_hwfn, &s_params);\n+}\n+\n enum _ecore_status_t\n ecore_filter_accept_cmd(struct ecore_dev *p_dev,\n \t\t\tu8 vport,\n@@ -474,6 +505,13 @@ ecore_filter_accept_cmd(struct ecore_dev *p_dev,\n \n \t\tupdate_params.opaque_fid = p_hwfn->hw_info.opaque_fid;\n \n+\t\tif (IS_VF(p_dev)) {\n+\t\t\trc = ecore_vf_pf_accept_flags(p_hwfn, &accept_flags);\n+\t\t\tif (rc != ECORE_SUCCESS)\n+\t\t\t\treturn rc;\n+\t\t\tcontinue;\n+\t\t}\n+\n \t\trc = ecore_sp_vport_update(p_hwfn, &update_params,\n \t\t\t\t\t   comp_mode, p_comp_data);\n \t\tif (rc != ECORE_SUCCESS) {\n@@ -593,6 +631,17 @@ enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,\n \tenum _ecore_status_t rc;\n \tu64 init_prod_val = 0;\n \n+\tif (IS_VF(p_hwfn->p_dev)) {\n+\t\treturn ecore_vf_pf_rxq_start(p_hwfn,\n+\t\t\t\t\t     rx_queue_id,\n+\t\t\t\t\t     sb,\n+\t\t\t\t\t     sb_index,\n+\t\t\t\t\t     bd_max_bytes,\n+\t\t\t\t\t     bd_chain_phys_addr,\n+\t\t\t\t\t     cqe_pbl_addr,\n+\t\t\t\t\t     cqe_pbl_size, pp_prod);\n+\t}\n+\n \trc = ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_l2_queue);\n \tif (rc != ECORE_SUCCESS)\n \t\treturn rc;\n@@ -651,6 +700,13 @@ ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,\n \tu16 qid, abs_rx_q_id = 0;\n \tu8 i;\n \n+\tif (IS_VF(p_hwfn->p_dev))\n+\t\treturn ecore_vf_pf_rxqs_update(p_hwfn,\n+\t\t\t\t\t       rx_queue_id,\n+\t\t\t\t\t       num_rxqs,\n+\t\t\t\t\t       complete_cqe_flg,\n+\t\t\t\t\t       complete_event_flg);\n+\n \tOSAL_MEMSET(&init_data, 0, sizeof(init_data));\n \tinit_data.comp_mode = comp_mode;\n \tinit_data.p_comp_data = p_comp_data;\n@@ -697,6 +753,10 @@ ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,\n \tstruct ecore_sp_init_data init_data;\n \tu16 abs_rx_q_id = 0;\n \n+\tif (IS_VF(p_hwfn->p_dev))\n+\t\treturn ecore_vf_pf_rxq_stop(p_hwfn, rx_queue_id,\n+\t\t\t\t\t    cqe_completion);\n+\n \t/* Get SPQ entry */\n \tOSAL_MEMSET(&init_data, 0, sizeof(init_data));\n \tinit_data.cid = p_rx_cid->cid;\n@@ -814,6 +874,14 @@ enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,\n \tenum _ecore_status_t rc;\n \tu8 abs_stats_id = 0;\n \n+\tif (IS_VF(p_hwfn->p_dev)) {\n+\t\treturn ecore_vf_pf_txq_start(p_hwfn,\n+\t\t\t\t\t     tx_queue_id,\n+\t\t\t\t\t     sb,\n+\t\t\t\t\t     sb_index,\n+\t\t\t\t\t     pbl_addr, pbl_size, pp_doorbell);\n+\t}\n+\n \trc = ecore_fw_vport(p_hwfn, stats_id, &abs_stats_id);\n \tif (rc != ECORE_SUCCESS)\n \t\treturn rc;\n@@ -867,6 +935,9 @@ enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,\n \tenum _ecore_status_t rc = ECORE_NOTIMPL;\n \tstruct ecore_sp_init_data init_data;\n \n+\tif (IS_VF(p_hwfn->p_dev))\n+\t\treturn ecore_vf_pf_txq_stop(p_hwfn, tx_queue_id);\n+\n \t/* Get SPQ entry */\n \tOSAL_MEMSET(&init_data, 0, sizeof(init_data));\n \tinit_data.cid = p_tx_cid->cid;\n@@ -1274,6 +1345,11 @@ ecore_filter_mcast_cmd(struct ecore_dev *p_dev,\n \tfor_each_hwfn(p_dev, i) {\n \t\tstruct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];\n \n+\t\tif (IS_VF(p_dev)) {\n+\t\t\tecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);\n+\t\t\tcontinue;\n+\t\t}\n+\n \t\trc = ecore_sp_eth_filter_mcast(p_hwfn,\n \t\t\t\t\t       p_hwfn->hw_info.opaque_fid,\n \t\t\t\t\t       p_filter_cmd,\n@@ -1297,6 +1373,11 @@ ecore_filter_ucast_cmd(struct ecore_dev *p_dev,\n \tfor_each_hwfn(p_dev, i) {\n \t\tstruct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];\n \n+\t\tif (IS_VF(p_dev)) {\n+\t\t\trc = ecore_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);\n+\t\t\tcontinue;\n+\t\t}\n+\n \t\trc = ecore_sp_eth_filter_ucast(p_hwfn,\n \t\t\t\t\t       p_hwfn->hw_info.opaque_fid,\n \t\t\t\t\t       p_filter_cmd,\n@@ -1308,14 +1389,96 @@ ecore_filter_ucast_cmd(struct ecore_dev *p_dev,\n \treturn rc;\n }\n \n+/* IOV related */\n+enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t       u32 concrete_vfid, u16 opaque_vfid)\n+{\n+\tstruct vf_start_ramrod_data *p_ramrod = OSAL_NULL;\n+\tstruct ecore_spq_entry *p_ent = OSAL_NULL;\n+\tenum _ecore_status_t rc = ECORE_NOTIMPL;\n+\tstruct ecore_sp_init_data init_data;\n+\n+\t/* Get SPQ entry */\n+\tOSAL_MEMSET(&init_data, 0, sizeof(init_data));\n+\tinit_data.cid = ecore_spq_get_cid(p_hwfn);\n+\tinit_data.opaque_fid = opaque_vfid;\n+\tinit_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;\n+\n+\trc = ecore_sp_init_request(p_hwfn, &p_ent,\n+\t\t\t\t   COMMON_RAMROD_VF_START,\n+\t\t\t\t   PROTOCOLID_COMMON, &init_data);\n+\tif (rc != ECORE_SUCCESS)\n+\t\treturn rc;\n+\n+\tp_ramrod = &p_ent->ramrod.vf_start;\n+\n+\tp_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);\n+\tp_ramrod->opaque_fid = OSAL_CPU_TO_LE16(opaque_vfid);\n+\n+\tswitch (p_hwfn->hw_info.personality) {\n+\tcase ECORE_PCI_ETH:\n+\t\tp_ramrod->personality = PERSONALITY_ETH;\n+\t\tbreak;\n+\tcase ECORE_PCI_ETH_ROCE:\n+\t\tp_ramrod->personality = PERSONALITY_RDMA_AND_ETH;\n+\t\tbreak;\n+\tdefault:\n+\t\tDP_NOTICE(p_hwfn, true, \"Unkown VF personality %d\\n\",\n+\t\t\t  p_hwfn->hw_info.personality);\n+\t\treturn ECORE_INVAL;\n+\t}\n+\n+\treturn ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);\n+}\n+\n+enum _ecore_status_t ecore_sp_vf_update(struct ecore_hwfn *p_hwfn)\n+{\n+\treturn ECORE_NOTIMPL;\n+}\n+\n+enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t      u32 concrete_vfid, u16 opaque_vfid)\n+{\n+\tenum _ecore_status_t rc = ECORE_NOTIMPL;\n+\tstruct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;\n+\tstruct ecore_spq_entry *p_ent = OSAL_NULL;\n+\tstruct ecore_sp_init_data init_data;\n+\n+\t/* Get SPQ entry */\n+\tOSAL_MEMSET(&init_data, 0, sizeof(init_data));\n+\tinit_data.cid = ecore_spq_get_cid(p_hwfn);\n+\tinit_data.opaque_fid = opaque_vfid;\n+\tinit_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;\n+\n+\trc = ecore_sp_init_request(p_hwfn, &p_ent,\n+\t\t\t\t   COMMON_RAMROD_VF_STOP,\n+\t\t\t\t   PROTOCOLID_COMMON, &init_data);\n+\tif (rc != ECORE_SUCCESS)\n+\t\treturn rc;\n+\n+\tp_ramrod = &p_ent->ramrod.vf_stop;\n+\n+\tp_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);\n+\n+\treturn ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);\n+}\n+\n /* Statistics related code */\n static void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn *p_hwfn,\n \t\t\t\t\t     u32 *p_addr, u32 *p_len,\n \t\t\t\t\t     u16 statistics_bin)\n {\n-\t*p_addr = BAR0_MAP_REG_PSDM_RAM +\n+\tif (IS_PF(p_hwfn->p_dev)) {\n+\t\t*p_addr = BAR0_MAP_REG_PSDM_RAM +\n \t\t    PSTORM_QUEUE_STAT_OFFSET(statistics_bin);\n-\t*p_len = sizeof(struct eth_pstorm_per_queue_stat);\n+\t\t*p_len = sizeof(struct eth_pstorm_per_queue_stat);\n+\t} else {\n+\t\tstruct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;\n+\t\tstruct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;\n+\n+\t\t*p_addr = p_resp->pfdev_info.stats_info.pstats.address;\n+\t\t*p_len = p_resp->pfdev_info.stats_info.pstats.len;\n+\t}\n }\n \n static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn,\n@@ -1349,9 +1512,17 @@ static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,\n \tstruct tstorm_per_port_stat tstats;\n \tu32 tstats_addr, tstats_len;\n \n-\ttstats_addr = BAR0_MAP_REG_TSDM_RAM +\n+\tif (IS_PF(p_hwfn->p_dev)) {\n+\t\ttstats_addr = BAR0_MAP_REG_TSDM_RAM +\n \t\t    TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));\n-\ttstats_len = sizeof(struct tstorm_per_port_stat);\n+\t\ttstats_len = sizeof(struct tstorm_per_port_stat);\n+\t} else {\n+\t\tstruct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;\n+\t\tstruct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;\n+\n+\t\ttstats_addr = p_resp->pfdev_info.stats_info.tstats.address;\n+\t\ttstats_len = p_resp->pfdev_info.stats_info.tstats.len;\n+\t}\n \n \tOSAL_MEMSET(&tstats, 0, sizeof(tstats));\n \tecore_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);\n@@ -1366,9 +1537,17 @@ static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn,\n \t\t\t\t\t     u32 *p_addr, u32 *p_len,\n \t\t\t\t\t     u16 statistics_bin)\n {\n-\t*p_addr = BAR0_MAP_REG_USDM_RAM +\n+\tif (IS_PF(p_hwfn->p_dev)) {\n+\t\t*p_addr = BAR0_MAP_REG_USDM_RAM +\n \t\t    USTORM_QUEUE_STAT_OFFSET(statistics_bin);\n-\t*p_len = sizeof(struct eth_ustorm_per_queue_stat);\n+\t\t*p_len = sizeof(struct eth_ustorm_per_queue_stat);\n+\t} else {\n+\t\tstruct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;\n+\t\tstruct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;\n+\n+\t\t*p_addr = p_resp->pfdev_info.stats_info.ustats.address;\n+\t\t*p_len = p_resp->pfdev_info.stats_info.ustats.len;\n+\t}\n }\n \n static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn,\n@@ -1397,9 +1576,17 @@ static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn,\n \t\t\t\t\t     u32 *p_addr, u32 *p_len,\n \t\t\t\t\t     u16 statistics_bin)\n {\n-\t*p_addr = BAR0_MAP_REG_MSDM_RAM +\n+\tif (IS_PF(p_hwfn->p_dev)) {\n+\t\t*p_addr = BAR0_MAP_REG_MSDM_RAM +\n \t\t    MSTORM_QUEUE_STAT_OFFSET(statistics_bin);\n-\t*p_len = sizeof(struct eth_mstorm_per_queue_stat);\n+\t\t*p_len = sizeof(struct eth_mstorm_per_queue_stat);\n+\t} else {\n+\t\tstruct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;\n+\t\tstruct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;\n+\n+\t\t*p_addr = p_resp->pfdev_info.stats_info.mstats.address;\n+\t\t*p_len = p_resp->pfdev_info.stats_info.mstats.len;\n+\t}\n }\n \n static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn,\n@@ -1524,24 +1711,28 @@ static void _ecore_get_vport_stats(struct ecore_dev *p_dev,\n \n \tfor_each_hwfn(p_dev, i) {\n \t\tstruct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];\n-\t\tstruct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);\n-\n-\t\t/* The main vport index is relative first */\n-\t\tif (ecore_fw_vport(p_hwfn, 0, &fw_vport)) {\n-\t\t\tDP_ERR(p_hwfn, \"No vport available!\\n\");\n-\t\t\tgoto out;\n+\t\tstruct ecore_ptt *p_ptt = IS_PF(p_dev) ?\n+\t\t    ecore_ptt_acquire(p_hwfn) : OSAL_NULL;\n+\n+\t\tif (IS_PF(p_dev)) {\n+\t\t\t/* The main vport index is relative first */\n+\t\t\tif (ecore_fw_vport(p_hwfn, 0, &fw_vport)) {\n+\t\t\t\tDP_ERR(p_hwfn, \"No vport available!\\n\");\n+\t\t\t\tgoto out;\n+\t\t\t}\n \t\t}\n \n-\t\tif (!p_ptt) {\n+\t\tif (IS_PF(p_dev) && !p_ptt) {\n \t\t\tDP_ERR(p_hwfn, \"Failed to acquire ptt\\n\");\n \t\t\tcontinue;\n \t\t}\n \n \t\t__ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,\n-\t\t\t\t\ttrue);\n+\t\t\t\t\tIS_PF(p_dev) ? true : false);\n \n out:\n-\t\tecore_ptt_release(p_hwfn, p_ptt);\n+\t\tif (IS_PF(p_dev))\n+\t\t\tecore_ptt_release(p_hwfn, p_ptt);\n \t}\n }\n \n@@ -1575,10 +1766,11 @@ void ecore_reset_vport_stats(struct ecore_dev *p_dev)\n \t\tstruct eth_mstorm_per_queue_stat mstats;\n \t\tstruct eth_ustorm_per_queue_stat ustats;\n \t\tstruct eth_pstorm_per_queue_stat pstats;\n-\t\tstruct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);\n+\t\tstruct ecore_ptt *p_ptt = IS_PF(p_dev) ?\n+\t\t    ecore_ptt_acquire(p_hwfn) : OSAL_NULL;\n \t\tu32 addr = 0, len = 0;\n \n-\t\tif (!p_ptt) {\n+\t\tif (IS_PF(p_dev) && !p_ptt) {\n \t\t\tDP_ERR(p_hwfn, \"Failed to acquire ptt\\n\");\n \t\t\tcontinue;\n \t\t}\n@@ -1595,7 +1787,8 @@ void ecore_reset_vport_stats(struct ecore_dev *p_dev)\n \t\t__ecore_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);\n \t\tecore_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);\n \n-\t\tecore_ptt_release(p_hwfn, p_ptt);\n+\t\tif (IS_PF(p_dev))\n+\t\t\tecore_ptt_release(p_hwfn, p_ptt);\n \t}\n \n \t/* PORT statistics are not necessarily reset, so we need to\ndiff --git a/drivers/net/qede/base/ecore_l2.h b/drivers/net/qede/base/ecore_l2.h\nindex 658af45..b0850ca 100644\n--- a/drivers/net/qede/base/ecore_l2.h\n+++ b/drivers/net/qede/base/ecore_l2.h\n@@ -15,6 +15,56 @@\n #include \"ecore_l2_api.h\"\n \n /**\n+ * @brief ecore_sp_vf_start -  VF Function Start\n+ *\n+ * This ramrod is sent to initialize a virtual function (VF) is loaded.\n+ * It will configure the function related parameters.\n+ *\n+ * @note Final phase API.\n+ *\n+ * @param p_hwfn\n+ * @param concrete_vfid\t\t\t\tVF ID\n+ * @param opaque_vfid\n+ *\n+ * @return enum _ecore_status_t\n+ */\n+\n+enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t       u32 concrete_vfid, u16 opaque_vfid);\n+\n+/**\n+ * @brief ecore_sp_vf_update - VF Function Update Ramrod\n+ *\n+ * This ramrod performs updates of a virtual function (VF).\n+ * It currently contains no functionality.\n+ *\n+ * @note Final phase API.\n+ *\n+ * @param p_hwfn\n+ *\n+ * @return enum _ecore_status_t\n+ */\n+\n+enum _ecore_status_t ecore_sp_vf_update(struct ecore_hwfn *p_hwfn);\n+\n+/**\n+ * @brief ecore_sp_vf_stop - VF Function Stop Ramrod\n+ *\n+ * This ramrod is sent to unload a virtual function (VF).\n+ *\n+ * @note Final phase API.\n+ *\n+ * @param p_hwfn\n+ * @param concrete_vfid\n+ * @param opaque_vfid\n+ *\n+ * @return enum _ecore_status_t\n+ */\n+\n+enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t      u32 concrete_vfid, u16 opaque_vfid);\n+\n+/**\n  * @brief ecore_sp_eth_tx_queue_update -\n  *\n  * This ramrod updates a TX queue. It is used for setting the active\ndiff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c\nindex e51de24..7dff695 100644\n--- a/drivers/net/qede/base/ecore_mcp.c\n+++ b/drivers/net/qede/base/ecore_mcp.c\n@@ -14,6 +14,8 @@\n #include \"reg_addr.h\"\n #include \"ecore_hw.h\"\n #include \"ecore_init_fw_funcs.h\"\n+#include \"ecore_sriov.h\"\n+#include \"ecore_iov_api.h\"\n #include \"ecore_gtt_reg_addr.h\"\n #include \"ecore_iro.h\"\n \n@@ -517,6 +519,9 @@ static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,\n \t\t\t   \"FLR-ed VFs [%08x,...,%08x] - %08x\\n\",\n \t\t\t   i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);\n \t}\n+\n+\tif (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))\n+\t\tOSAL_VF_FLR_UPDATE(p_hwfn);\n }\n \n enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,\n@@ -793,6 +798,10 @@ u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,\n {\n \tu32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;\n \n+\t/* TODO - Add support for VFs */\n+\tif (IS_VF(p_hwfn->p_dev))\n+\t\treturn ECORE_INVAL;\n+\n \tpath_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,\n \t\t\t\t\t\t PUBLIC_PATH);\n \tpath_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);\n@@ -1050,6 +1059,20 @@ enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_dev *p_dev,\n \t}\n #endif\n \n+\tif (IS_VF(p_dev)) {\n+\t\tif (p_hwfn->vf_iov_info) {\n+\t\t\tstruct pfvf_acquire_resp_tlv *p_resp;\n+\n+\t\t\tp_resp = &p_hwfn->vf_iov_info->acquire_resp;\n+\t\t\t*p_mfw_ver = p_resp->pfdev_info.mfw_ver;\n+\t\t\treturn ECORE_SUCCESS;\n+\t\t} else {\n+\t\t\tDP_VERBOSE(p_dev, ECORE_MSG_IOV,\n+\t\t\t\t   \"VF requested MFW vers prior to ACQUIRE\\n\");\n+\t\t\treturn ECORE_INVAL;\n+\t\t}\n+\t}\n+\n \tglobal_offsize = ecore_rd(p_hwfn, p_ptt,\n \t\t\t\t  SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->\n \t\t\t\t\t\t       public_base,\n@@ -1076,6 +1099,10 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,\n \tstruct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];\n \tstruct ecore_ptt *p_ptt;\n \n+\t/* TODO - Add support for VFs */\n+\tif (IS_VF(p_dev))\n+\t\treturn ECORE_INVAL;\n+\n \tif (!ecore_mcp_is_init(p_hwfn)) {\n \t\tDP_NOTICE(p_hwfn, true, \"MFW is not initialized !\\n\");\n \t\treturn ECORE_BUSY;\n@@ -1291,6 +1318,9 @@ enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,\n \t}\n #endif\n \n+\tif (IS_VF(p_hwfn->p_dev))\n+\t\treturn ECORE_INVAL;\n+\n \tflash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);\n \tflash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>\n \t    MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;\ndiff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c\nindex e7743cd..80d234f 100644\n--- a/drivers/net/qede/base/ecore_spq.c\n+++ b/drivers/net/qede/base/ecore_spq.c\n@@ -20,6 +20,7 @@\n #include \"ecore_dev_api.h\"\n #include \"ecore_mcp.h\"\n #include \"ecore_hw.h\"\n+#include \"ecore_sriov.h\"\n \n /***************************************************************************\n  * Structures & Definitions\n@@ -250,7 +251,9 @@ ecore_async_event_completion(struct ecore_hwfn *p_hwfn,\n {\n \tswitch (p_eqe->protocol_id) {\n \tcase PROTOCOLID_COMMON:\n-\t\treturn ECORE_SUCCESS;\n+\t\treturn ecore_sriov_eqe_event(p_hwfn,\n+\t\t\t\t\t     p_eqe->opcode,\n+\t\t\t\t\t     p_eqe->echo, &p_eqe->data);\n \tdefault:\n \t\tDP_NOTICE(p_hwfn,\n \t\t\t  true, \"Unknown Async completion for protocol: %d\\n\",\n@@ -386,6 +389,9 @@ static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,\n \t\t\t\t\t\t *cqe,\n \t\t\t\t\t\t enum protocol_type protocol)\n {\n+\tif (IS_VF(p_hwfn->p_dev))\n+\t\treturn OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);\n+\n \t/* @@@tmp - it's possible we'll eventually want to handle some\n \t * actual commands that can arrive here, but for now this is only\n \t * used to complete the ramrod using the echo value on the cqe\ndiff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c\nnew file mode 100644\nindex 0000000..eb74080\n--- /dev/null\n+++ b/drivers/net/qede/base/ecore_sriov.c\n@@ -0,0 +1,3422 @@\n+/*\n+ * Copyright (c) 2016 QLogic Corporation.\n+ * All rights reserved.\n+ * www.qlogic.com\n+ *\n+ * See LICENSE.qede_pmd for copyright and licensing details.\n+ */\n+\n+#include \"bcm_osal.h\"\n+#include \"ecore.h\"\n+#include \"reg_addr.h\"\n+#include \"ecore_sriov.h\"\n+#include \"ecore_status.h\"\n+#include \"ecore_hw.h\"\n+#include \"ecore_hw_defs.h\"\n+#include \"ecore_int.h\"\n+#include \"ecore_hsi_eth.h\"\n+#include \"ecore_l2.h\"\n+#include \"ecore_vfpf_if.h\"\n+#include \"ecore_rt_defs.h\"\n+#include \"ecore_init_ops.h\"\n+#include \"ecore_gtt_reg_addr.h\"\n+#include \"ecore_iro.h\"\n+#include \"ecore_mcp.h\"\n+#include \"ecore_cxt.h\"\n+#include \"ecore_vf.h\"\n+#include \"ecore_init_fw_funcs.h\"\n+\n+/* TEMPORARY until we implement print_enums... */\n+const char *ecore_channel_tlvs_string[] = {\n+\t\"CHANNEL_TLV_NONE\",\t/* ends tlv sequence */\n+\t\"CHANNEL_TLV_ACQUIRE\",\n+\t\"CHANNEL_TLV_VPORT_START\",\n+\t\"CHANNEL_TLV_VPORT_UPDATE\",\n+\t\"CHANNEL_TLV_VPORT_TEARDOWN\",\n+\t\"CHANNEL_TLV_START_RXQ\",\n+\t\"CHANNEL_TLV_START_TXQ\",\n+\t\"CHANNEL_TLV_STOP_RXQ\",\n+\t\"CHANNEL_TLV_STOP_TXQ\",\n+\t\"CHANNEL_TLV_UPDATE_RXQ\",\n+\t\"CHANNEL_TLV_INT_CLEANUP\",\n+\t\"CHANNEL_TLV_CLOSE\",\n+\t\"CHANNEL_TLV_RELEASE\",\n+\t\"CHANNEL_TLV_LIST_END\",\n+\t\"CHANNEL_TLV_UCAST_FILTER\",\n+\t\"CHANNEL_TLV_VPORT_UPDATE_ACTIVATE\",\n+\t\"CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH\",\n+\t\"CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP\",\n+\t\"CHANNEL_TLV_VPORT_UPDATE_MCAST\",\n+\t\"CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM\",\n+\t\"CHANNEL_TLV_VPORT_UPDATE_RSS\",\n+\t\"CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN\",\n+\t\"CHANNEL_TLV_VPORT_UPDATE_SGE_TPA\",\n+\t\"CHANNEL_TLV_MAX\"\n+};\n+\n+/* TODO - this is linux crc32; Need a way to ifdef it out for linux */\n+u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)\n+{\n+\tint i;\n+\n+\twhile (length--) {\n+\t\tcrc ^= *ptr++;\n+\t\tfor (i = 0; i < 8; i++)\n+\t\t\tcrc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);\n+\t}\n+\treturn crc;\n+}\n+\n+enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\tint vfid,\n+\t\t\t\t\t\tstruct ecore_ptt *p_ptt)\n+{\n+\tstruct ecore_bulletin_content *p_bulletin;\n+\tstruct ecore_dmae_params params;\n+\tstruct ecore_vf_info *p_vf;\n+\tint crc_size = sizeof(p_bulletin->crc);\n+\n+\tp_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);\n+\tif (!p_vf)\n+\t\treturn ECORE_INVAL;\n+\n+\t/* TODO - check VF is in a state where it can accept message */\n+\tif (!p_vf->vf_bulletin)\n+\t\treturn ECORE_INVAL;\n+\n+\tp_bulletin = p_vf->bulletin.p_virt;\n+\n+\t/* Increment bulletin board version and compute crc */\n+\tp_bulletin->version++;\n+\tp_bulletin->crc = ecore_crc32(0, (u8 *)p_bulletin + crc_size,\n+\t\t\t\t      p_vf->bulletin.size - crc_size);\n+\n+\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t   \"Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\\n\",\n+\t\t   p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);\n+\n+\t/* propagate bulletin board via dmae to vm memory */\n+\tOSAL_MEMSET(&params, 0, sizeof(params));\n+\tparams.flags = ECORE_DMAE_FLAG_VF_DST;\n+\tparams.dst_vfid = p_vf->abs_vf_id;\n+\treturn ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,\n+\t\t\t\t    p_vf->vf_bulletin, p_vf->bulletin.size / 4,\n+\t\t\t\t    &params);\n+}\n+\n+static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)\n+{\n+\tstruct ecore_hw_sriov_info *iov = &p_dev->sriov_info;\n+\tint pos = iov->pos;\n+\n+\tDP_VERBOSE(p_dev, ECORE_MSG_IOV, \"sriov ext pos %d\\n\", pos);\n+\tOSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);\n+\n+\tOSAL_PCI_READ_CONFIG_WORD(p_dev,\n+\t\t\t\t  pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);\n+\tOSAL_PCI_READ_CONFIG_WORD(p_dev,\n+\t\t\t\t  pos + PCI_SRIOV_INITIAL_VF,\n+\t\t\t\t  &iov->initial_vfs);\n+\n+\tOSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);\n+\tif (iov->num_vfs) {\n+\t\t/* @@@TODO - in future we might want to add an OSAL here to\n+\t\t * allow each OS to decide on its own how to act.\n+\t\t */\n+\t\tDP_VERBOSE(p_dev, ECORE_MSG_IOV,\n+\t\t\t   \"Number of VFs are already set to non-zero value.\"\n+\t\t\t   \" Ignoring PCI configuration value\\n\");\n+\t\tiov->num_vfs = 0;\n+\t}\n+\n+\tOSAL_PCI_READ_CONFIG_WORD(p_dev,\n+\t\t\t\t  pos + PCI_SRIOV_VF_OFFSET, &iov->offset);\n+\n+\tOSAL_PCI_READ_CONFIG_WORD(p_dev,\n+\t\t\t\t  pos + PCI_SRIOV_VF_STRIDE, &iov->stride);\n+\n+\tOSAL_PCI_READ_CONFIG_WORD(p_dev,\n+\t\t\t\t  pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);\n+\n+\tOSAL_PCI_READ_CONFIG_DWORD(p_dev,\n+\t\t\t\t   pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);\n+\n+\tOSAL_PCI_READ_CONFIG_DWORD(p_dev, pos + PCI_SRIOV_CAP, &iov->cap);\n+\n+\tOSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);\n+\n+\tDP_VERBOSE(p_dev, ECORE_MSG_IOV, \"IOV info[%d]: nres %d, cap 0x%x,\"\n+\t\t   \"ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,\"\n+\t\t   \" stride %d, page size 0x%x\\n\", 0,\n+\t\t   iov->nres, iov->cap, iov->ctrl,\n+\t\t   iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,\n+\t\t   iov->offset, iov->stride, iov->pgsz);\n+\n+\t/* Some sanity checks */\n+\tif (iov->num_vfs > NUM_OF_VFS(p_dev) ||\n+\t    iov->total_vfs > NUM_OF_VFS(p_dev)) {\n+\t\t/* This can happen only due to a bug. In this case we set\n+\t\t * num_vfs to zero to avoid memory corruption in the code that\n+\t\t * assumes max number of vfs\n+\t\t */\n+\t\tDP_NOTICE(p_dev, false,\n+\t\t\t  \"IOV: Unexpected number of vfs set: %d\"\n+\t\t\t  \" setting num_vf to zero\\n\",\n+\t\t\t  iov->num_vfs);\n+\n+\t\tiov->num_vfs = 0;\n+\t\tiov->total_vfs = 0;\n+\t}\n+\n+\treturn ECORE_SUCCESS;\n+}\n+\n+static void ecore_iov_clear_vf_igu_blocks(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t  struct ecore_ptt *p_ptt)\n+{\n+\tstruct ecore_igu_block *p_sb;\n+\tu16 sb_id;\n+\tu32 val;\n+\n+\tif (!p_hwfn->hw_info.p_igu_info) {\n+\t\tDP_ERR(p_hwfn,\n+\t\t       \"ecore_iov_clear_vf_igu_blocks IGU Info not inited\\n\");\n+\t\treturn;\n+\t}\n+\n+\tfor (sb_id = 0;\n+\t     sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); sb_id++) {\n+\t\tp_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];\n+\t\tif ((p_sb->status & ECORE_IGU_STATUS_FREE) &&\n+\t\t    !(p_sb->status & ECORE_IGU_STATUS_PF)) {\n+\t\t\tval = ecore_rd(p_hwfn, p_ptt,\n+\t\t\t\t       IGU_REG_MAPPING_MEMORY + sb_id * 4);\n+\t\t\tSET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);\n+\t\t\tecore_wr(p_hwfn, p_ptt,\n+\t\t\t\t IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);\n+\t\t}\n+\t}\n+}\n+\n+static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)\n+{\n+\tu16 num_vfs = p_hwfn->p_dev->sriov_info.total_vfs;\n+\tunion pfvf_tlvs *p_reply_virt_addr;\n+\tunion vfpf_tlvs *p_req_virt_addr;\n+\tstruct ecore_bulletin_content *p_bulletin_virt;\n+\tstruct ecore_pf_iov *p_iov_info;\n+\tdma_addr_t req_p, rply_p, bulletin_p;\n+\tu8 idx = 0;\n+\n+\tp_iov_info = p_hwfn->pf_iov_info;\n+\n+\tOSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));\n+\n+\tp_req_virt_addr = p_iov_info->mbx_msg_virt_addr;\n+\treq_p = p_iov_info->mbx_msg_phys_addr;\n+\tp_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;\n+\trply_p = p_iov_info->mbx_reply_phys_addr;\n+\tp_bulletin_virt = p_iov_info->p_bulletins;\n+\tbulletin_p = p_iov_info->bulletins_phys;\n+\tif (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {\n+\t\tDP_ERR(p_hwfn,\n+\t\t       \"ecore_iov_setup_vfdb called without alloc mem first\\n\");\n+\t\treturn;\n+\t}\n+\n+\tp_iov_info->base_vport_id = 1;\t/* @@@TBD resource allocation */\n+\n+\tfor (idx = 0; idx < num_vfs; idx++) {\n+\t\tstruct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];\n+\t\tu32 concrete;\n+\n+\t\tvf->vf_mbx.req_virt = p_req_virt_addr + idx;\n+\t\tvf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);\n+\t\tvf->vf_mbx.reply_virt = p_reply_virt_addr + idx;\n+\t\tvf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);\n+\n+#ifdef CONFIG_ECORE_SW_CHANNEL\n+\t\tvf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs);\n+\t\tvf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;\n+#endif\n+\t\tvf->state = VF_STOPPED;\n+\n+\t\tvf->bulletin.phys = idx *\n+\t\t    sizeof(struct ecore_bulletin_content) + bulletin_p;\n+\t\tvf->bulletin.p_virt = p_bulletin_virt + idx;\n+\t\tvf->bulletin.size = sizeof(struct ecore_bulletin_content);\n+\n+\t\tvf->relative_vf_id = idx;\n+\t\tvf->abs_vf_id = idx + p_hwfn->hw_info.first_vf_in_pf;\n+\t\tconcrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id);\n+\t\tvf->concrete_fid = concrete;\n+\t\t/* TODO - need to devise a better way of getting opaque */\n+\t\tvf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |\n+\t\t    (vf->abs_vf_id << 8);\n+\t\t/* @@TBD MichalK - add base vport_id of VFs to equation */\n+\t\tvf->vport_id = p_iov_info->base_vport_id + idx;\n+\t}\n+}\n+\n+static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)\n+{\n+\tstruct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;\n+\tvoid **p_v_addr;\n+\tu16 num_vfs = 0;\n+\n+\tnum_vfs = p_hwfn->p_dev->sriov_info.total_vfs;\n+\n+\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t   \"ecore_iov_allocate_vfdb for %d VFs\\n\", num_vfs);\n+\n+\t/* Allocate PF Mailbox buffer (per-VF) */\n+\tp_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;\n+\tp_v_addr = &p_iov_info->mbx_msg_virt_addr;\n+\t*p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,\n+\t\t\t\t\t    &p_iov_info->mbx_msg_phys_addr,\n+\t\t\t\t\t    p_iov_info->mbx_msg_size);\n+\tif (!*p_v_addr)\n+\t\treturn ECORE_NOMEM;\n+\n+\t/* Allocate PF Mailbox Reply buffer (per-VF) */\n+\tp_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;\n+\tp_v_addr = &p_iov_info->mbx_reply_virt_addr;\n+\t*p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,\n+\t\t\t\t\t    &p_iov_info->mbx_reply_phys_addr,\n+\t\t\t\t\t    p_iov_info->mbx_reply_size);\n+\tif (!*p_v_addr)\n+\t\treturn ECORE_NOMEM;\n+\n+\tp_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) *\n+\t    num_vfs;\n+\tp_v_addr = &p_iov_info->p_bulletins;\n+\t*p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,\n+\t\t\t\t\t    &p_iov_info->bulletins_phys,\n+\t\t\t\t\t    p_iov_info->bulletins_size);\n+\tif (!*p_v_addr)\n+\t\treturn ECORE_NOMEM;\n+\n+\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t   \"PF's Requests mailbox [%p virt 0x%lx phys],  Response\"\n+\t\t   \" mailbox [%p virt 0x%lx phys] Bulletins\"\n+\t\t   \" [%p virt 0x%lx phys]\\n\",\n+\t\t   p_iov_info->mbx_msg_virt_addr,\n+\t\t   (u64)p_iov_info->mbx_msg_phys_addr,\n+\t\t   p_iov_info->mbx_reply_virt_addr,\n+\t\t   (u64)p_iov_info->mbx_reply_phys_addr,\n+\t\t   p_iov_info->p_bulletins, (u64)p_iov_info->bulletins_phys);\n+\n+\t/* @@@TBD MichalK - statistics / RSS */\n+\n+\treturn ECORE_SUCCESS;\n+}\n+\n+static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn)\n+{\n+\tstruct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;\n+\n+\tif (p_hwfn->pf_iov_info->mbx_msg_virt_addr)\n+\t\tOSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,\n+\t\t\t\t       p_iov_info->mbx_msg_virt_addr,\n+\t\t\t\t       p_iov_info->mbx_msg_phys_addr,\n+\t\t\t\t       p_iov_info->mbx_msg_size);\n+\n+\tif (p_hwfn->pf_iov_info->mbx_reply_virt_addr)\n+\t\tOSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,\n+\t\t\t\t       p_iov_info->mbx_reply_virt_addr,\n+\t\t\t\t       p_iov_info->mbx_reply_phys_addr,\n+\t\t\t\t       p_iov_info->mbx_reply_size);\n+\n+\tif (p_iov_info->p_bulletins)\n+\t\tOSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,\n+\t\t\t\t       p_iov_info->p_bulletins,\n+\t\t\t\t       p_iov_info->bulletins_phys,\n+\t\t\t\t       p_iov_info->bulletins_size);\n+\n+\t/* @@@TBD MichalK - statistics / RSS */\n+}\n+\n+enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)\n+{\n+\tenum _ecore_status_t rc = ECORE_SUCCESS;\n+\tstruct ecore_pf_iov *p_sriov;\n+\n+\tif (!IS_PF_SRIOV(p_hwfn)) {\n+\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t   \"No SR-IOV - no need for IOV db\\n\");\n+\t\treturn rc;\n+\t}\n+\n+\tp_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));\n+\tif (!p_sriov) {\n+\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t  \"Failed to allocate `struct ecore_sriov'\");\n+\t\treturn ECORE_NOMEM;\n+\t}\n+\n+\tp_hwfn->pf_iov_info = p_sriov;\n+\n+\trc = ecore_iov_allocate_vfdb(p_hwfn);\n+\n+\treturn rc;\n+}\n+\n+void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)\n+{\n+\tif (!IS_PF_SRIOV(p_hwfn) || !p_hwfn->pf_iov_info)\n+\t\treturn;\n+\n+\tecore_iov_setup_vfdb(p_hwfn);\n+\tecore_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);\n+}\n+\n+void ecore_iov_free(struct ecore_hwfn *p_hwfn)\n+{\n+\tif (p_hwfn->pf_iov_info) {\n+\t\tecore_iov_free_vfdb(p_hwfn);\n+\t\tOSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);\n+\t}\n+}\n+\n+enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t       struct ecore_ptt *p_ptt)\n+{\n+\tenum _ecore_status_t rc;\n+\n+\t/* @@@ TBD get this information from shmem / pci cfg */\n+\tif (IS_VF(p_hwfn->p_dev))\n+\t\treturn ECORE_SUCCESS;\n+\n+\t/* First hwfn should learn the PCI configuration */\n+\tif (IS_LEAD_HWFN(p_hwfn)) {\n+\t\tstruct ecore_dev *p_dev = p_hwfn->p_dev;\n+\t\tint *pos = &p_hwfn->p_dev->sriov_info.pos;\n+\n+\t\t*pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,\n+\t\t\t\t\t\t    PCI_EXT_CAP_ID_SRIOV);\n+\t\tif (!*pos) {\n+\t\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t\t   \"No PCIe IOV support\\n\");\n+\t\t\treturn ECORE_SUCCESS;\n+\t\t}\n+\n+\t\trc = ecore_iov_pci_cfg_info(p_dev);\n+\t\tif (rc)\n+\t\t\treturn rc;\n+\t} else if (!p_hwfn->p_dev->sriov_info.pos) {\n+\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV, \"No PCIe IOV support\\n\");\n+\t\treturn ECORE_SUCCESS;\n+\t}\n+\n+\t/* Calculate the first VF index - this is a bit tricky; Basically,\n+\t * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin\n+\t * after the first engine's VFs.\n+\t */\n+\tp_hwfn->hw_info.first_vf_in_pf = p_hwfn->p_dev->sriov_info.offset +\n+\t    p_hwfn->abs_pf_id - 16;\n+\tif (ECORE_PATH_ID(p_hwfn))\n+\t\tp_hwfn->hw_info.first_vf_in_pf -= MAX_NUM_VFS_BB;\n+\n+\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t   \"First VF in hwfn 0x%08x\\n\", p_hwfn->hw_info.first_vf_in_pf);\n+\n+\treturn ECORE_SUCCESS;\n+}\n+\n+struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t    u16 relative_vf_id,\n+\t\t\t\t\t    bool b_enabled_only)\n+{\n+\tstruct ecore_vf_info *vf = OSAL_NULL;\n+\n+\tif (!p_hwfn->pf_iov_info) {\n+\t\tDP_NOTICE(p_hwfn->p_dev, true, \"No iov info\\n\");\n+\t\treturn OSAL_NULL;\n+\t}\n+\n+\tif (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))\n+\t\tvf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];\n+\telse\n+\t\tDP_ERR(p_hwfn, \"ecore_iov_get_vf_info: VF[%d] is not enabled\\n\",\n+\t\t       relative_vf_id);\n+\n+\treturn vf;\n+}\n+\n+void ecore_iov_set_vf_to_disable(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t u16 rel_vf_id, u8 to_disable)\n+{\n+\tstruct ecore_vf_info *vf;\n+\n+\tvf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);\n+\tif (!vf)\n+\t\treturn;\n+\n+\tvf->to_disable = to_disable;\n+}\n+\n+void ecore_iov_set_vfs_to_disable(struct ecore_hwfn *p_hwfn, u8 to_disable)\n+{\n+\tu16 i;\n+\n+\tfor (i = 0; i < p_hwfn->p_dev->sriov_info.total_vfs; i++)\n+\t\tecore_iov_set_vf_to_disable(p_hwfn, i, to_disable);\n+}\n+\n+#ifndef LINUX_REMOVE\n+/* @@@TBD Consider taking outside of ecore... */\n+enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t  u16 vf_id, void *ctx)\n+{\n+\tenum _ecore_status_t rc = ECORE_SUCCESS;\n+\tstruct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true);\n+\n+\tif (vf != OSAL_NULL) {\n+\t\tvf->ctx = ctx;\n+#ifdef CONFIG_ECORE_SW_CHANNEL\n+\t\tvf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;\n+#endif\n+\t} else {\n+\t\trc = ECORE_UNKNOWN_ERROR;\n+\t}\n+\treturn rc;\n+}\n+#endif\n+\n+/**\n+ * VF enable primitives\n+ *\n+ * when pretend is required the caller is reponsible\n+ * for calling pretend prioir to calling these routines\n+ */\n+\n+/* clears vf error in all semi blocks\n+ * Assumption: called under VF pretend...\n+ */\n+static OSAL_INLINE void ecore_iov_vf_semi_clear_err(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t    struct ecore_ptt *p_ptt)\n+{\n+\tecore_wr(p_hwfn, p_ptt, TSEM_REG_VF_ERROR, 1);\n+\tecore_wr(p_hwfn, p_ptt, USEM_REG_VF_ERROR, 1);\n+\tecore_wr(p_hwfn, p_ptt, MSEM_REG_VF_ERROR, 1);\n+\tecore_wr(p_hwfn, p_ptt, XSEM_REG_VF_ERROR, 1);\n+\tecore_wr(p_hwfn, p_ptt, YSEM_REG_VF_ERROR, 1);\n+\tecore_wr(p_hwfn, p_ptt, PSEM_REG_VF_ERROR, 1);\n+}\n+\n+static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t struct ecore_ptt *p_ptt, u8 abs_vfid)\n+{\n+\tecore_wr(p_hwfn, p_ptt,\n+\t\t PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,\n+\t\t 1 << (abs_vfid & 0x1f));\n+}\n+\n+static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t   struct ecore_ptt *p_ptt,\n+\t\t\t\t   struct ecore_vf_info *vf)\n+{\n+\tint i;\n+\tu16 igu_sb_id;\n+\n+\t/* Set VF masks and configuration - pretend */\n+\tecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);\n+\n+\tecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);\n+\n+\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t   \"value in VF_CONFIGURATION of vf %d after write %x\\n\",\n+\t\t   vf->abs_vf_id,\n+\t\t   ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION));\n+\n+\t/* unpretend */\n+\tecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);\n+\n+\t/* iterate ove all queues, clear sb consumer */\n+\tfor (i = 0; i < vf->num_sbs; i++) {\n+\t\tigu_sb_id = vf->igu_sbs[i];\n+\t\t/* Set then clear... */\n+\t\tecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1,\n+\t\t\t\t\t vf->opaque_fid);\n+\t\tecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0,\n+\t\t\t\t\t vf->opaque_fid);\n+\t}\n+}\n+\n+static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t     struct ecore_ptt *p_ptt,\n+\t\t\t\t     struct ecore_vf_info *vf, bool enable)\n+{\n+\tu32 igu_vf_conf;\n+\n+\tecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);\n+\n+\tigu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);\n+\n+\tif (enable)\n+\t\tigu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;\n+\telse\n+\t\tigu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;\n+\n+\tecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);\n+\n+\t/* unpretend */\n+\tecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);\n+}\n+\n+static enum _ecore_status_t\n+ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,\n+\t\t\t   struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)\n+{\n+\tu32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;\n+\tenum _ecore_status_t rc;\n+\n+\tif (vf->to_disable)\n+\t\treturn ECORE_SUCCESS;\n+\n+\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t   \"Enable internal access for vf %x [abs %x]\\n\", vf->abs_vf_id,\n+\t\t   ECORE_VF_ABS_ID(p_hwfn, vf));\n+\n+\tecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt,\n+\t\t\t\t     ECORE_VF_ABS_ID(p_hwfn, vf));\n+\n+\trc = ecore_mcp_config_vf_msix(p_hwfn, p_ptt,\n+\t\t\t\t      vf->abs_vf_id, vf->num_sbs);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);\n+\n+\tSET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);\n+\tSTORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);\n+\n+\tecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,\n+\t\t       p_hwfn->hw_info.hw_mode);\n+\n+\t/* unpretend */\n+\tecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);\n+\n+\tif (vf->state != VF_STOPPED) {\n+\t\tDP_NOTICE(p_hwfn, true, \"VF[%02x] is already started\\n\",\n+\t\t\t  vf->abs_vf_id);\n+\t\treturn ECORE_INVAL;\n+\t}\n+\n+\t/* Start VF */\n+\trc = ecore_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid);\n+\tif (rc != ECORE_SUCCESS)\n+\t\tDP_NOTICE(p_hwfn, true, \"Failed to start VF[%02x]\\n\",\n+\t\t\t  vf->abs_vf_id);\n+\n+\tvf->state = VF_FREE;\n+\n+\treturn rc;\n+}\n+\n+/**\n+ *\n+ * @brief ecore_iov_config_perm_table - configure the permission\n+ *      zone table.\n+ *      In E4, queue zone permission table size is 320x9. There\n+ *      are 320 VF queues for single engine device (256 for dual\n+ *      engine device), and each entry has the following format:\n+ *      {Valid, VF[7:0]}\n+ * @param p_hwfn\n+ * @param p_ptt\n+ * @param vf\n+ * @param enable\n+ */\n+static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\tstruct ecore_ptt *p_ptt,\n+\t\t\t\t\tstruct ecore_vf_info *vf, u8 enable)\n+{\n+\tu32 reg_addr;\n+\tu32 val;\n+\tu16 qzone_id = 0;\n+\tint qid;\n+\n+\tfor (qid = 0; qid < vf->num_rxqs; qid++) {\n+\t\tecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,\n+\t\t\t\t  &qzone_id);\n+\n+\t\treg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;\n+\t\tval = enable ? (vf->abs_vf_id | (1 << 8)) : 0;\n+\t\tecore_wr(p_hwfn, p_ptt, reg_addr, val);\n+\t}\n+}\n+\n+static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\tstruct ecore_ptt *p_ptt,\n+\t\t\t\t\tstruct ecore_vf_info *vf)\n+{\n+\t/* Reset vf in IGU interrupts are still disabled */\n+\tecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);\n+\n+\tecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1 /* enable */);\n+\n+\t/* Permission Table */\n+\tecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true /* enable */);\n+}\n+\n+static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t     struct ecore_ptt *p_ptt,\n+\t\t\t\t     struct ecore_vf_info *vf,\n+\t\t\t\t     u16 num_rx_queues)\n+{\n+\tint igu_id = 0;\n+\tint qid = 0;\n+\tu32 val = 0;\n+\tstruct ecore_igu_block *igu_blocks =\n+\t    p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;\n+\n+\tif (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)\n+\t\tnum_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;\n+\n+\tp_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;\n+\n+\tSET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);\n+\tSET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);\n+\tSET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);\n+\n+\twhile ((qid < num_rx_queues) &&\n+\t       (igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev))) {\n+\t\tif (igu_blocks[igu_id].status & ECORE_IGU_STATUS_FREE) {\n+\t\t\tstruct cau_sb_entry sb_entry;\n+\n+\t\t\tvf->igu_sbs[qid] = (u16)igu_id;\n+\t\t\tigu_blocks[igu_id].status &= ~ECORE_IGU_STATUS_FREE;\n+\n+\t\t\tSET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);\n+\n+\t\t\tecore_wr(p_hwfn, p_ptt,\n+\t\t\t\t IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,\n+\t\t\t\t val);\n+\n+\t\t\t/* Configure igu sb in CAU which were marked valid */\n+\t\t\tecore_init_cau_sb_entry(p_hwfn, &sb_entry,\n+\t\t\t\t\t\tp_hwfn->rel_pf_id,\n+\t\t\t\t\t\tvf->abs_vf_id, 1);\n+\t\t\tecore_dmae_host2grc(p_hwfn, p_ptt,\n+\t\t\t\t\t    (u64)(osal_uintptr_t)&sb_entry,\n+\t\t\t\t\t    CAU_REG_SB_VAR_MEMORY +\n+\t\t\t\t\t    igu_id * sizeof(u64), 2, 0);\n+\t\t\tqid++;\n+\t\t}\n+\t\tigu_id++;\n+\t}\n+\n+\tvf->num_sbs = (u8)num_rx_queues;\n+\n+\treturn vf->num_sbs;\n+}\n+\n+/**\n+ *\n+ * @brief The function invalidates all the VF entries,\n+ *        technically this isn't required, but added for\n+ *        cleaness and ease of debugging incase a VF attempts to\n+ *        produce an interrupt after it has been taken down.\n+ *\n+ * @param p_hwfn\n+ * @param p_ptt\n+ * @param vf\n+ */\n+static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t      struct ecore_ptt *p_ptt,\n+\t\t\t\t      struct ecore_vf_info *vf)\n+{\n+\tstruct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;\n+\tint idx, igu_id;\n+\tu32 addr, val;\n+\n+\t/* Invalidate igu CAM lines and mark them as free */\n+\tfor (idx = 0; idx < vf->num_sbs; idx++) {\n+\t\tigu_id = vf->igu_sbs[idx];\n+\t\taddr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;\n+\n+\t\tval = ecore_rd(p_hwfn, p_ptt, addr);\n+\t\tSET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);\n+\t\tecore_wr(p_hwfn, p_ptt, addr, val);\n+\n+\t\tp_info->igu_map.igu_blocks[igu_id].status |=\n+\t\t    ECORE_IGU_STATUS_FREE;\n+\n+\t\tp_hwfn->hw_info.p_igu_info->free_blks++;\n+\t}\n+\n+\tvf->num_sbs = 0;\n+}\n+\n+enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t      struct ecore_ptt *p_ptt,\n+\t\t\t\t\t      u16 rel_vf_id, u16 num_rx_queues)\n+{\n+\tenum _ecore_status_t rc = ECORE_SUCCESS;\n+\tstruct ecore_vf_info *vf = OSAL_NULL;\n+\tu8 num_of_vf_avaiable_chains = 0;\n+\tu32 cids;\n+\tu8 i;\n+\n+\tif (ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, rel_vf_id)) {\n+\t\tDP_NOTICE(p_hwfn, true, \"VF[%d] is already active.\\n\",\n+\t\t\t  rel_vf_id);\n+\t\treturn ECORE_INVAL;\n+\t}\n+\n+\tvf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);\n+\tif (!vf) {\n+\t\tDP_ERR(p_hwfn, \"ecore_iov_init_hw_for_vf : vf is OSAL_NULL\\n\");\n+\t\treturn ECORE_UNKNOWN_ERROR;\n+\t}\n+\n+\t/* Limit number of queues according to number of CIDs */\n+\tecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);\n+\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t   \"VF[%d] - requesting to initialize for 0x%04x queues\"\n+\t\t   \" [0x%04x CIDs available]\\n\",\n+\t\t   vf->relative_vf_id, num_rx_queues, (u16)cids);\n+\tnum_rx_queues = OSAL_MIN_T(u16, num_rx_queues, ((u16)cids));\n+\n+\tnum_of_vf_avaiable_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,\n+\t\t\t\t\t\t\t       p_ptt,\n+\t\t\t\t\t\t\t       vf,\n+\t\t\t\t\t\t\t       num_rx_queues);\n+\tif (num_of_vf_avaiable_chains == 0) {\n+\t\tDP_ERR(p_hwfn, \"no available igu sbs\\n\");\n+\t\treturn ECORE_NOMEM;\n+\t}\n+\n+\t/* Choose queue number and index ranges */\n+\tvf->num_rxqs = num_of_vf_avaiable_chains;\n+\tvf->num_txqs = num_of_vf_avaiable_chains;\n+\n+\tfor (i = 0; i < vf->num_rxqs; i++) {\n+\t\tu16 queue_id = ecore_int_queue_id_from_sb_id(p_hwfn,\n+\t\t\t\t\t\t\t     vf->igu_sbs[i]);\n+\n+\t\tif (queue_id > RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {\n+\t\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t\t  \"VF[%d] will require utilizing of\"\n+\t\t\t\t  \" out-of-bounds queues - %04x\\n\",\n+\t\t\t\t  vf->relative_vf_id, queue_id);\n+\t\t\t/* TODO - cleanup the already allocate SBs */\n+\t\t\treturn ECORE_INVAL;\n+\t\t}\n+\n+\t\t/* CIDs are per-VF, so no problem having them 0-based. */\n+\t\tvf->vf_queues[i].fw_rx_qid = queue_id;\n+\t\tvf->vf_queues[i].fw_tx_qid = queue_id;\n+\t\tvf->vf_queues[i].fw_cid = i;\n+\n+\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t   \"VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\\n\",\n+\t\t\t   vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);\n+\t}\n+\n+\trc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);\n+\n+\tif (rc == ECORE_SUCCESS) {\n+\t\tstruct ecore_hw_sriov_info *p_iov = &p_hwfn->p_dev->sriov_info;\n+\t\tu16 vf_id = vf->relative_vf_id;\n+\n+\t\tp_iov->num_vfs++;\n+\t\tp_iov->active_vfs[vf_id / 64] |= (1ULL << (vf_id % 64));\n+\t}\n+\n+\treturn rc;\n+}\n+\n+enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t struct ecore_ptt *p_ptt,\n+\t\t\t\t\t\t u16 rel_vf_id)\n+{\n+\tstruct ecore_vf_info *vf = OSAL_NULL;\n+\tenum _ecore_status_t rc = ECORE_SUCCESS;\n+\n+\tvf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);\n+\tif (!vf) {\n+\t\tDP_ERR(p_hwfn, \"ecore_iov_release_hw_for_vf : vf is NULL\\n\");\n+\t\treturn ECORE_UNKNOWN_ERROR;\n+\t}\n+\n+\tif (vf->state != VF_STOPPED) {\n+\t\t/* Stopping the VF */\n+\t\trc = ecore_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid);\n+\n+\t\tif (rc != ECORE_SUCCESS) {\n+\t\t\tDP_ERR(p_hwfn, \"ecore_sp_vf_stop returned error %d\\n\",\n+\t\t\t       rc);\n+\t\t\treturn rc;\n+\t\t}\n+\n+\t\tvf->state = VF_STOPPED;\n+\t}\n+\n+\t/* disablng interrupts and resetting permission table was done during\n+\t * vf-close, however, we could get here without going through vf_close\n+\t */\n+\t/* Disable Interrupts for VF */\n+\tecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0 /* disable */);\n+\n+\t/* Reset Permission table */\n+\tecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0 /* disable */);\n+\n+\tvf->num_rxqs = 0;\n+\tvf->num_txqs = 0;\n+\tecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);\n+\n+\tif (ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, rel_vf_id)) {\n+\t\tstruct ecore_hw_sriov_info *p_iov = &p_hwfn->p_dev->sriov_info;\n+\t\tu16 vf_id = vf->relative_vf_id;\n+\n+\t\tp_iov->num_vfs--;\n+\t\tp_iov->active_vfs[vf_id / 64] &= ~(1ULL << (vf_id % 64));\n+\t}\n+\n+\treturn ECORE_SUCCESS;\n+}\n+\n+static bool ecore_iov_tlv_supported(u16 tlvtype)\n+{\n+\treturn tlvtype > CHANNEL_TLV_NONE && tlvtype < CHANNEL_TLV_MAX;\n+}\n+\n+static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t struct ecore_vf_info *vf, u16 tlv)\n+{\n+\t/* we don't lock the channel for unsupported tlvs */\n+\tif (!ecore_iov_tlv_supported(tlv))\n+\t\treturn;\n+\n+\t/* lock the channel */\n+\t/* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */\n+\n+\t/* record the locking op */\n+\t/* vf->op_current = tlv; @@@TBD MichalK */\n+\n+\t/* log the lock */\n+\tDP_VERBOSE(p_hwfn,\n+\t\t   ECORE_MSG_IOV,\n+\t\t   \"VF[%d]: vf pf channel locked by     %s\\n\",\n+\t\t   vf->abs_vf_id, ecore_channel_tlvs_string[tlv]);\n+}\n+\n+static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t   struct ecore_vf_info *vf,\n+\t\t\t\t\t   u16 expected_tlv)\n+{\n+\t/* we don't unlock the channel for unsupported tlvs */\n+\tif (!ecore_iov_tlv_supported(expected_tlv))\n+\t\treturn;\n+\n+\t/* WARN(expected_tlv != vf->op_current,\n+\t * \"lock mismatch: expected %s found %s\",\n+\t * channel_tlvs_string[expected_tlv],\n+\t * channel_tlvs_string[vf->op_current]);\n+\t * @@@TBD MichalK\n+\t */\n+\n+\t/* lock the channel */\n+\t/* mutex_unlock(&vf->op_mutex); @@@TBD MichalK add the lock */\n+\n+\t/* log the unlock */\n+\tDP_VERBOSE(p_hwfn,\n+\t\t   ECORE_MSG_IOV,\n+\t\t   \"VF[%d]: vf pf channel unlocked by %s\\n\",\n+\t\t   vf->abs_vf_id, ecore_channel_tlvs_string[expected_tlv]);\n+\n+\t/* record the locking op */\n+\t/* vf->op_current = CHANNEL_TLV_NONE; */\n+}\n+\n+/* place a given tlv on the tlv buffer, continuing current tlv list */\n+void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,\n+\t\t    u8 **offset, u16 type, u16 length)\n+{\n+\tstruct channel_tlv *tl = (struct channel_tlv *)*offset;\n+\n+\ttl->type = type;\n+\ttl->length = length;\n+\n+\t/* Offset should keep pointing to next TLV (the end of the last) */\n+\t*offset += length;\n+\n+\t/* Return a pointer to the start of the added tlv */\n+\treturn *offset - length;\n+}\n+\n+/* list the types and lengths of the tlvs on the buffer */\n+void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)\n+{\n+\tu16 i = 1, total_length = 0;\n+\tstruct channel_tlv *tlv;\n+\n+\tdo {\n+\t\t/* cast current tlv list entry to channel tlv header */\n+\t\ttlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);\n+\n+\t\t/* output tlv */\n+\t\tif (ecore_iov_tlv_supported(tlv->type))\n+\t\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t\t   \"TLV number %d: type %s, length %d\\n\",\n+\t\t\t\t   i, ecore_channel_tlvs_string[tlv->type],\n+\t\t\t\t   tlv->length);\n+\t\telse\n+\t\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t\t   \"TLV number %d: type %d, length %d\\n\",\n+\t\t\t\t   i, tlv->type, tlv->length);\n+\n+\t\tif (tlv->type == CHANNEL_TLV_LIST_END)\n+\t\t\treturn;\n+\n+\t\t/* Validate entry - protect against malicious VFs */\n+\t\tif (!tlv->length) {\n+\t\t\tDP_NOTICE(p_hwfn, false, \"TLV of length 0 found\\n\");\n+\t\t\treturn;\n+\t\t}\n+\t\ttotal_length += tlv->length;\n+\t\tif (total_length >= sizeof(struct tlv_buffer_size)) {\n+\t\t\tDP_NOTICE(p_hwfn, false, \"TLV ==> Buffer overflow\\n\");\n+\t\t\treturn;\n+\t\t}\n+\n+\t\ti++;\n+\t} while (1);\n+}\n+\n+static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t    struct ecore_ptt *p_ptt,\n+\t\t\t\t    struct ecore_vf_info *p_vf,\n+\t\t\t\t    u16 length, u8 status)\n+{\n+\tstruct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;\n+\tstruct ecore_dmae_params params;\n+\tu8 eng_vf_id;\n+\n+\tmbx->reply_virt->default_resp.hdr.status = status;\n+\n+#ifdef CONFIG_ECORE_SW_CHANNEL\n+\tmbx->sw_mbx.response_size =\n+\t    length + sizeof(struct channel_list_end_tlv);\n+#endif\n+\n+\tecore_dp_tlv_list(p_hwfn, mbx->reply_virt);\n+\n+\tif (!p_hwfn->p_dev->sriov_info.b_hw_channel)\n+\t\treturn;\n+\n+\teng_vf_id = p_vf->abs_vf_id;\n+\n+\tOSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));\n+\tparams.flags = ECORE_DMAE_FLAG_VF_DST;\n+\tparams.dst_vfid = eng_vf_id;\n+\n+\tecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),\n+\t\t\t     mbx->req_virt->first_tlv.reply_address +\n+\t\t\t     sizeof(u64),\n+\t\t\t     (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,\n+\t\t\t     &params);\n+\n+\tecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,\n+\t\t\t     mbx->req_virt->first_tlv.reply_address,\n+\t\t\t     sizeof(u64) / 4, &params);\n+\n+\tREG_WR(p_hwfn,\n+\t       GTT_BAR0_MAP_REG_USDM_RAM +\n+\t       USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);\n+}\n+\n+static u16 ecore_iov_vport_to_tlv(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t  enum ecore_iov_vport_update_flag flag)\n+{\n+\tswitch (flag) {\n+\tcase ECORE_IOV_VP_UPDATE_ACTIVATE:\n+\t\treturn CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;\n+\tcase ECORE_IOV_VP_UPDATE_VLAN_STRIP:\n+\t\treturn CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;\n+\tcase ECORE_IOV_VP_UPDATE_TX_SWITCH:\n+\t\treturn CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;\n+\tcase ECORE_IOV_VP_UPDATE_MCAST:\n+\t\treturn CHANNEL_TLV_VPORT_UPDATE_MCAST;\n+\tcase ECORE_IOV_VP_UPDATE_ACCEPT_PARAM:\n+\t\treturn CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;\n+\tcase ECORE_IOV_VP_UPDATE_RSS:\n+\t\treturn CHANNEL_TLV_VPORT_UPDATE_RSS;\n+\tcase ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:\n+\t\treturn CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;\n+\tcase ECORE_IOV_VP_UPDATE_SGE_TPA:\n+\t\treturn CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;\n+\tdefault:\n+\t\treturn 0;\n+\t}\n+}\n+\n+static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t      struct ecore_vf_info *p_vf,\n+\t\t\t\t\t      struct ecore_iov_vf_mbx *p_mbx,\n+\t\t\t\t\t      u8 status, u16 tlvs_mask,\n+\t\t\t\t\t      u16 tlvs_accepted)\n+{\n+\tstruct pfvf_def_resp_tlv *resp;\n+\tu16 size, total_len, i;\n+\n+\tOSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));\n+\tp_mbx->offset = (u8 *)(p_mbx->reply_virt);\n+\tsize = sizeof(struct pfvf_def_resp_tlv);\n+\ttotal_len = size;\n+\n+\tecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);\n+\n+\t/* Prepare response for all extended tlvs if they are found by PF */\n+\tfor (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {\n+\t\tif (!(tlvs_mask & (1 << i)))\n+\t\t\tcontinue;\n+\n+\t\tresp = ecore_add_tlv(p_hwfn, &p_mbx->offset,\n+\t\t\t\t     ecore_iov_vport_to_tlv(p_hwfn, i), size);\n+\n+\t\tif (tlvs_accepted & (1 << i))\n+\t\t\tresp->hdr.status = status;\n+\t\telse\n+\t\t\tresp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;\n+\n+\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t   \"VF[%d] - vport_update resp: TLV %d, status %02x\\n\",\n+\t\t\t   p_vf->relative_vf_id,\n+\t\t\t   ecore_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);\n+\n+\t\ttotal_len += size;\n+\t}\n+\n+\tecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,\n+\t\t      sizeof(struct channel_list_end_tlv));\n+\n+\treturn total_len;\n+}\n+\n+static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t   struct ecore_ptt *p_ptt,\n+\t\t\t\t   struct ecore_vf_info *vf_info,\n+\t\t\t\t   u16 type, u16 length, u8 status)\n+{\n+\tstruct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx;\n+\n+\tmbx->offset = (u8 *)(mbx->reply_virt);\n+\n+\tecore_add_tlv(p_hwfn, &mbx->offset, type, length);\n+\tecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,\n+\t\t      sizeof(struct channel_list_end_tlv));\n+\n+\tecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);\n+}\n+\n+static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t struct ecore_vf_info *p_vf)\n+{\n+\tp_vf->vf_bulletin = 0;\n+\tp_vf->vport_instance = 0;\n+\tp_vf->num_mac_filters = 0;\n+\tp_vf->num_vlan_filters = 0;\n+\tp_vf->num_mc_filters = 0;\n+\tp_vf->configured_features = 0;\n+\n+\t/* If VF previously requested less resources, go back to default */\n+\tp_vf->num_rxqs = p_vf->num_sbs;\n+\tp_vf->num_txqs = p_vf->num_sbs;\n+\n+\tp_vf->num_active_rxqs = 0;\n+\n+\tOSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));\n+\tOSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);\n+}\n+\n+static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t     struct ecore_ptt *p_ptt,\n+\t\t\t\t     struct ecore_vf_info *vf)\n+{\n+\tstruct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;\n+\tstruct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;\n+\tstruct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;\n+\tstruct pf_vf_resc *resc = &resp->resc;\n+\tstruct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;\n+\tu16 length;\n+\tu8 i, vfpf_status = PFVF_STATUS_SUCCESS;\n+\n+\t/* Validate FW compatibility */\n+\tif (req->vfdev_info.fw_major != FW_MAJOR_VERSION ||\n+\t    req->vfdev_info.fw_minor != FW_MINOR_VERSION ||\n+\t    req->vfdev_info.fw_revision != FW_REVISION_VERSION ||\n+\t    req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) {\n+\t\tDP_INFO(p_hwfn,\n+\t\t\t\"VF[%d] is running an incompatible driver [VF needs\"\n+\t\t\t\" FW %02x:%02x:%02x:%02x but Hypervisor is\"\n+\t\t\t\" using %02x:%02x:%02x:%02x]\\n\",\n+\t\t\tvf->abs_vf_id, req->vfdev_info.fw_major,\n+\t\t\treq->vfdev_info.fw_minor, req->vfdev_info.fw_revision,\n+\t\t\treq->vfdev_info.fw_engineering, FW_MAJOR_VERSION,\n+\t\t\tFW_MINOR_VERSION, FW_REVISION_VERSION,\n+\t\t\tFW_ENGINEERING_VERSION);\n+\t\tvfpf_status = PFVF_STATUS_NOT_SUPPORTED;\n+\t\tgoto out;\n+\t}\n+#ifndef __EXTRACT__LINUX__\n+\tif (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) {\n+\t\tvfpf_status = PFVF_STATUS_NOT_SUPPORTED;\n+\t\tgoto out;\n+\t}\n+#endif\n+\n+\tOSAL_MEMSET(resp, 0, sizeof(*resp));\n+\n+\t/* Fill in vf info stuff : @@@TBD MichalK Hard Coded for now... */\n+\tvf->opaque_fid = req->vfdev_info.opaque_fid;\n+\tvf->num_mac_filters = 1;\n+\tvf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;\n+\tvf->num_mc_filters = ECORE_MAX_MC_ADDRS;\n+\n+\tvf->vf_bulletin = req->bulletin_addr;\n+\tvf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?\n+\t    vf->bulletin.size : req->bulletin_size;\n+\n+\t/* fill in pfdev info */\n+\tpfdev_info->chip_num = p_hwfn->p_dev->chip_num;\n+\tpfdev_info->db_size = 0;\t/* @@@ TBD MichalK Vf Doorbells */\n+\tpfdev_info->indices_per_sb = PIS_PER_SB;\n+\tpfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED;\n+\n+\tpfdev_info->stats_info.mstats.address =\n+\t    PXP_VF_BAR0_START_MSDM_ZONE_B +\n+\t    OFFSETOF(struct mstorm_vf_zone, non_trigger.eth_queue_stat);\n+\tpfdev_info->stats_info.mstats.len =\n+\t    sizeof(struct eth_mstorm_per_queue_stat);\n+\n+\tpfdev_info->stats_info.ustats.address =\n+\t    PXP_VF_BAR0_START_USDM_ZONE_B +\n+\t    OFFSETOF(struct ustorm_vf_zone, non_trigger.eth_queue_stat);\n+\tpfdev_info->stats_info.ustats.len =\n+\t    sizeof(struct eth_ustorm_per_queue_stat);\n+\n+\tpfdev_info->stats_info.pstats.address =\n+\t    PXP_VF_BAR0_START_PSDM_ZONE_B +\n+\t    OFFSETOF(struct pstorm_vf_zone, non_trigger.eth_queue_stat);\n+\tpfdev_info->stats_info.pstats.len =\n+\t    sizeof(struct eth_pstorm_per_queue_stat);\n+\n+\tpfdev_info->stats_info.tstats.address = 0;\n+\tpfdev_info->stats_info.tstats.len = 0;\n+\n+\tOSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,\n+\t\t    ETH_ALEN);\n+\n+\tpfdev_info->fw_major = FW_MAJOR_VERSION;\n+\tpfdev_info->fw_minor = FW_MINOR_VERSION;\n+\tpfdev_info->fw_rev = FW_REVISION_VERSION;\n+\tpfdev_info->fw_eng = FW_ENGINEERING_VERSION;\n+\tpfdev_info->os_type = OSAL_IOV_GET_OS_TYPE();\n+\tecore_mcp_get_mfw_ver(p_hwfn->p_dev, p_ptt, &pfdev_info->mfw_ver,\n+\t\t\t      OSAL_NULL);\n+\n+\tpfdev_info->dev_type = p_hwfn->p_dev->type;\n+\tpfdev_info->chip_rev = p_hwfn->p_dev->chip_rev;\n+\n+\t/* Fill in resc : @@@TBD MichalK Hard Coded for now... */\n+\tresc->num_rxqs = vf->num_rxqs;\n+\tresc->num_txqs = vf->num_txqs;\n+\tresc->num_sbs = vf->num_sbs;\n+\tfor (i = 0; i < resc->num_sbs; i++) {\n+\t\tresc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i];\n+\t\tresc->hw_sbs[i].sb_qid = 0;\n+\t}\n+\n+\tfor (i = 0; i < resc->num_rxqs; i++) {\n+\t\tecore_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid,\n+\t\t\t\t  (u16 *)&resc->hw_qid[i]);\n+\t\tresc->cid[i] = vf->vf_queues[i].fw_cid;\n+\t}\n+\n+\tresc->num_mac_filters = OSAL_MIN_T(u8, vf->num_mac_filters,\n+\t\t\t\t\t   req->resc_request.num_mac_filters);\n+\tresc->num_vlan_filters = OSAL_MIN_T(u8, vf->num_vlan_filters,\n+\t\t\t\t\t    req->resc_request.num_vlan_filters);\n+\tresc->num_mc_filters = OSAL_MIN_T(u8, vf->num_mc_filters,\n+\t\t\t\t\t  req->resc_request.num_mc_filters);\n+\n+\t/* Fill agreed size of bulletin board in response, and post\n+\t * an initial image to the bulletin board.\n+\t */\n+\tresp->bulletin_size = vf->bulletin.size;\n+\tecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);\n+\n+\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t   \"VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,\"\n+\t\t   \" db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\\n\"\n+\t\t   \"resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,\"\n+\t\t   \" n_vlans-%d, n_mcs-%d\\n\",\n+\t\t   vf->abs_vf_id, resp->pfdev_info.chip_num,\n+\t\t   resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,\n+\t\t   resp->pfdev_info.capabilities, resc->num_rxqs,\n+\t\t   resc->num_txqs, resc->num_sbs, resc->num_mac_filters,\n+\t\t   resc->num_vlan_filters, resc->num_mc_filters);\n+\n+\tvf->state = VF_ACQUIRED;\n+\n+\t/* Prepare Response */\n+\tlength = sizeof(struct pfvf_acquire_resp_tlv);\n+\n+out:\n+\tecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,\n+\t\t\t       length, vfpf_status);\n+\n+\t/* @@@TBD Bulletin */\n+}\n+\n+static enum _ecore_status_t\n+__ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,\n+\t\t\t struct ecore_vf_info *p_vf, bool val)\n+{\n+\tstruct ecore_sp_vport_update_params params;\n+\tenum _ecore_status_t rc;\n+\n+\tif (val == p_vf->spoof_chk) {\n+\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t   \"Spoofchk value[%d] is already configured\\n\", val);\n+\t\treturn ECORE_SUCCESS;\n+\t}\n+\n+\tOSAL_MEMSET(&params, 0, sizeof(struct ecore_sp_vport_update_params));\n+\tparams.opaque_fid = p_vf->opaque_fid;\n+\tparams.vport_id = p_vf->vport_id;\n+\tparams.update_anti_spoofing_en_flg = 1;\n+\tparams.anti_spoofing_en = val;\n+\n+\trc = ecore_sp_vport_update(p_hwfn, &params, ECORE_SPQ_MODE_EBLOCK,\n+\t\t\t\t   OSAL_NULL);\n+\tif (rc == ECORE_SUCCESS) {\n+\t\tp_vf->spoof_chk = val;\n+\t\tp_vf->req_spoofchk_val = p_vf->spoof_chk;\n+\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t   \"Spoofchk val[%d] configured\\n\", val);\n+\t} else {\n+\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t   \"Spoofchk configuration[val:%d] failed for VF[%d]\\n\",\n+\t\t\t   val, p_vf->relative_vf_id);\n+\t}\n+\n+\treturn rc;\n+}\n+\n+static enum _ecore_status_t\n+ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t   struct ecore_vf_info *p_vf)\n+{\n+\tenum _ecore_status_t rc = ECORE_SUCCESS;\n+\tstruct ecore_filter_ucast filter;\n+\tint i;\n+\n+\tOSAL_MEMSET(&filter, 0, sizeof(filter));\n+\tfilter.is_rx_filter = 1;\n+\tfilter.is_tx_filter = 1;\n+\tfilter.vport_to_add_to = p_vf->vport_id;\n+\tfilter.opcode = ECORE_FILTER_ADD;\n+\n+\t/* Reconfigure vlans */\n+\tfor (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {\n+\t\tif (p_vf->shadow_config.vlans[i].used) {\n+\t\t\tfilter.type = ECORE_FILTER_VLAN;\n+\t\t\tfilter.vlan = p_vf->shadow_config.vlans[i].vid;\n+\t\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t\t   \"Reconfig VLAN [0x%04x] for VF [%04x]\\n\",\n+\t\t\t\t   filter.vlan, p_vf->relative_vf_id);\n+\t\t\trc = ecore_sp_eth_filter_ucast(p_hwfn,\n+\t\t\t\t\t\t       p_vf->opaque_fid,\n+\t\t\t\t\t\t       &filter,\n+\t\t\t\t\t\t       ECORE_SPQ_MODE_CB,\n+\t\t\t\t\t\t       OSAL_NULL);\n+\t\t\tif (rc) {\n+\t\t\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t\t\t  \"Failed to configure VLAN [%04x]\"\n+\t\t\t\t\t  \" to VF [%04x]\\n\",\n+\t\t\t\t\t  filter.vlan, p_vf->relative_vf_id);\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\treturn rc;\n+}\n+\n+static enum _ecore_status_t\n+ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t     struct ecore_vf_info *p_vf, u64 events)\n+{\n+\tenum _ecore_status_t rc = ECORE_SUCCESS;\n+\n+\t/*TODO - what about MACs? */\n+\n+\tif ((events & (1 << VLAN_ADDR_FORCED)) &&\n+\t    !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))\n+\t\trc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);\n+\n+\treturn rc;\n+}\n+\n+static int ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t    struct ecore_vf_info *p_vf,\n+\t\t\t\t\t    u64 events)\n+{\n+\tenum _ecore_status_t rc = ECORE_SUCCESS;\n+\tstruct ecore_filter_ucast filter;\n+\n+\tif (!p_vf->vport_instance)\n+\t\treturn ECORE_INVAL;\n+\n+\tif (events & (1 << MAC_ADDR_FORCED)) {\n+\t\t/* Since there's no way [currently] of removing the MAC,\n+\t\t * we can always assume this means we need to force it.\n+\t\t */\n+\t\tOSAL_MEMSET(&filter, 0, sizeof(filter));\n+\t\tfilter.type = ECORE_FILTER_MAC;\n+\t\tfilter.opcode = ECORE_FILTER_REPLACE;\n+\t\tfilter.is_rx_filter = 1;\n+\t\tfilter.is_tx_filter = 1;\n+\t\tfilter.vport_to_add_to = p_vf->vport_id;\n+\t\tOSAL_MEMCPY(filter.mac, p_vf->bulletin.p_virt->mac, ETH_ALEN);\n+\n+\t\trc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,\n+\t\t\t\t\t       &filter,\n+\t\t\t\t\t       ECORE_SPQ_MODE_CB, OSAL_NULL);\n+\t\tif (rc) {\n+\t\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t\t  \"PF failed to configure MAC for VF\\n\");\n+\t\t\treturn rc;\n+\t\t}\n+\n+\t\tp_vf->configured_features |= 1 << MAC_ADDR_FORCED;\n+\t}\n+\n+\tif (events & (1 << VLAN_ADDR_FORCED)) {\n+\t\tstruct ecore_sp_vport_update_params vport_update;\n+\t\tu8 removal;\n+\t\tint i;\n+\n+\t\tOSAL_MEMSET(&filter, 0, sizeof(filter));\n+\t\tfilter.type = ECORE_FILTER_VLAN;\n+\t\tfilter.is_rx_filter = 1;\n+\t\tfilter.is_tx_filter = 1;\n+\t\tfilter.vport_to_add_to = p_vf->vport_id;\n+\t\tfilter.vlan = p_vf->bulletin.p_virt->pvid;\n+\t\tfilter.opcode = filter.vlan ? ECORE_FILTER_REPLACE :\n+\t\t    ECORE_FILTER_FLUSH;\n+\n+\t\t/* Send the ramrod */\n+\t\trc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,\n+\t\t\t\t\t       &filter,\n+\t\t\t\t\t       ECORE_SPQ_MODE_CB, OSAL_NULL);\n+\t\tif (rc) {\n+\t\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t\t  \"PF failed to configure VLAN for VF\\n\");\n+\t\t\treturn rc;\n+\t\t}\n+\n+\t\t/* Update the default-vlan & silent vlan stripping */\n+\t\tOSAL_MEMSET(&vport_update, 0, sizeof(vport_update));\n+\t\tvport_update.opaque_fid = p_vf->opaque_fid;\n+\t\tvport_update.vport_id = p_vf->vport_id;\n+\t\tvport_update.update_default_vlan_enable_flg = 1;\n+\t\tvport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;\n+\t\tvport_update.update_default_vlan_flg = 1;\n+\t\tvport_update.default_vlan = filter.vlan;\n+\n+\t\tvport_update.update_inner_vlan_removal_flg = 1;\n+\t\tremoval = filter.vlan ?\n+\t\t    1 : p_vf->shadow_config.inner_vlan_removal;\n+\t\tvport_update.inner_vlan_removal_flg = removal;\n+\t\tvport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;\n+\t\trc = ecore_sp_vport_update(p_hwfn, &vport_update,\n+\t\t\t\t\t   ECORE_SPQ_MODE_EBLOCK, OSAL_NULL);\n+\t\tif (rc) {\n+\t\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t\t  \"PF failed to configure VF vport for vlan\\n\");\n+\t\t\treturn rc;\n+\t\t}\n+\n+\t\t/* Update all the Rx queues */\n+\t\tfor (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {\n+\t\t\tu16 qid;\n+\n+\t\t\tif (!p_vf->vf_queues[i].rxq_active)\n+\t\t\t\tcontinue;\n+\n+\t\t\tqid = p_vf->vf_queues[i].fw_rx_qid;\n+\n+\t\t\trc = ecore_sp_eth_rx_queues_update(p_hwfn, qid,\n+\t\t\t\t\t\t   1, 0, 1,\n+\t\t\t\t\t\t   ECORE_SPQ_MODE_EBLOCK,\n+\t\t\t\t\t\t   OSAL_NULL);\n+\t\t\tif (rc) {\n+\t\t\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t\t\t  \"Failed to send Rx update\"\n+\t\t\t\t\t  \" queue[0x%04x]\\n\",\n+\t\t\t\t\t  qid);\n+\t\t\t\treturn rc;\n+\t\t\t}\n+\t\t}\n+\n+\t\tif (filter.vlan)\n+\t\t\tp_vf->configured_features |= 1 << VLAN_ADDR_FORCED;\n+\t\telse\n+\t\t\tp_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);\n+\t}\n+\n+\t/* If forced features are terminated, we need to configure the shadow\n+\t * configuration back again.\n+\t */\n+\tif (events)\n+\t\tecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);\n+\n+\treturn rc;\n+}\n+\n+static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t struct ecore_ptt *p_ptt,\n+\t\t\t\t\t struct ecore_vf_info *vf)\n+{\n+\tstruct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;\n+\tstruct vfpf_vport_start_tlv *start = &mbx->req_virt->start_vport;\n+\tstruct ecore_sp_vport_start_params params = { 0 };\n+\tu8 status = PFVF_STATUS_SUCCESS;\n+\tstruct ecore_vf_info *vf_info;\n+\tenum _ecore_status_t rc;\n+\tu64 *p_bitmap;\n+\tint sb_id;\n+\n+\tvf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);\n+\tif (!vf_info) {\n+\t\tDP_NOTICE(p_hwfn->p_dev, true,\n+\t\t\t  \"Failed to get VF info, invalid vfid [%d]\\n\",\n+\t\t\t  vf->relative_vf_id);\n+\t\treturn;\n+\t}\n+\n+\tvf->state = VF_ENABLED;\n+\n+\t/* Initialize Status block in CAU */\n+\tfor (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {\n+\t\tif (!start->sb_addr[sb_id]) {\n+\t\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t\t   \"VF[%d] did not fill the address of SB %d\\n\",\n+\t\t\t\t   vf->relative_vf_id, sb_id);\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tecore_int_cau_conf_sb(p_hwfn, p_ptt,\n+\t\t\t\t      start->sb_addr[sb_id],\n+\t\t\t\t      vf->igu_sbs[sb_id],\n+\t\t\t\t      vf->abs_vf_id, 1 /* VF Valid */);\n+\t}\n+\tecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);\n+\n+\tvf->mtu = start->mtu;\n+\tvf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;\n+\n+\t/* Take into consideration configuration forced by hypervisor;\n+\t * If none is configured, use the supplied VF values [for old\n+\t * vfs that would still be fine, since they passed '0' as padding].\n+\t */\n+\tp_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;\n+\tif (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {\n+\t\tu8 vf_req = start->only_untagged;\n+\n+\t\tvf_info->bulletin.p_virt->default_only_untagged = vf_req;\n+\t\t*p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;\n+\t}\n+\n+\tparams.tpa_mode = start->tpa_mode;\n+\tparams.remove_inner_vlan = start->inner_vlan_removal;\n+\tparams.tx_switching = true;\n+\n+#ifndef ASIC_ONLY\n+\tif (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {\n+\t\tDP_NOTICE(p_hwfn, false,\n+\t\t\t  \"FPGA: Don't confi VF for Tx-switching [no pVFC]\\n\");\n+\t\tparams.tx_switching = false;\n+\t}\n+#endif\n+\n+\tparams.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;\n+\tparams.drop_ttl0 = false;\n+\tparams.concrete_fid = vf->concrete_fid;\n+\tparams.opaque_fid = vf->opaque_fid;\n+\tparams.vport_id = vf->vport_id;\n+\tparams.max_buffers_per_cqe = start->max_buffers_per_cqe;\n+\tparams.mtu = vf->mtu;\n+\n+\trc = ecore_sp_eth_vport_start(p_hwfn, &params);\n+\tif (rc != ECORE_SUCCESS) {\n+\t\tDP_ERR(p_hwfn,\n+\t\t       \"ecore_iov_vf_mbx_start_vport returned error %d\\n\", rc);\n+\t\tstatus = PFVF_STATUS_FAILURE;\n+\t} else {\n+\t\tvf->vport_instance++;\n+\n+\t\t/* Force configuration if needed on the newly opened vport */\n+\t\tecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);\n+\t\tOSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id,\n+\t\t\t\t\t  vf->vport_id, vf->opaque_fid);\n+\t\t__ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);\n+\t}\n+\n+\tecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,\n+\t\t\t       sizeof(struct pfvf_def_resp_tlv), status);\n+}\n+\n+static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\tstruct ecore_ptt *p_ptt,\n+\t\t\t\t\tstruct ecore_vf_info *vf)\n+{\n+\tu8 status = PFVF_STATUS_SUCCESS;\n+\tenum _ecore_status_t rc;\n+\n+\tvf->vport_instance--;\n+\tvf->spoof_chk = false;\n+\n+\trc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);\n+\tif (rc != ECORE_SUCCESS) {\n+\t\tDP_ERR(p_hwfn,\n+\t\t       \"ecore_iov_vf_mbx_stop_vport returned error %d\\n\", rc);\n+\t\tstatus = PFVF_STATUS_FAILURE;\n+\t}\n+\n+\t/* Forget the configuration on the vport */\n+\tvf->configured_features = 0;\n+\tOSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));\n+\n+\tecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,\n+\t\t\t       sizeof(struct pfvf_def_resp_tlv), status);\n+}\n+\n+static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t       struct ecore_ptt *p_ptt,\n+\t\t\t\t       struct ecore_vf_info *vf)\n+{\n+\tstruct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;\n+\tstruct vfpf_start_rxq_tlv *req = &mbx->req_virt->start_rxq;\n+\tu16 length = sizeof(struct pfvf_def_resp_tlv);\n+\tu8 status = PFVF_STATUS_SUCCESS;\n+\tenum _ecore_status_t rc;\n+\n+\trc = ecore_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,\n+\t\t\t\t\t   vf->vf_queues[req->rx_qid].fw_cid,\n+\t\t\t\t\t   vf->vf_queues[req->rx_qid].fw_rx_qid,\n+\t\t\t\t\t   vf->vport_id,\n+\t\t\t\t\t   vf->abs_vf_id + 0x10,\n+\t\t\t\t\t   req->hw_sb,\n+\t\t\t\t\t   req->sb_index,\n+\t\t\t\t\t   req->bd_max_bytes,\n+\t\t\t\t\t   req->rxq_addr,\n+\t\t\t\t\t   req->cqe_pbl_addr,\n+\t\t\t\t\t   req->cqe_pbl_size);\n+\n+\tif (rc) {\n+\t\tstatus = PFVF_STATUS_FAILURE;\n+\t} else {\n+\t\tvf->vf_queues[req->rx_qid].rxq_active = true;\n+\t\tvf->num_active_rxqs++;\n+\t}\n+\n+\tecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_RXQ,\n+\t\t\t       length, status);\n+}\n+\n+static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t       struct ecore_ptt *p_ptt,\n+\t\t\t\t       struct ecore_vf_info *vf)\n+{\n+\tstruct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;\n+\tstruct vfpf_start_txq_tlv *req = &mbx->req_virt->start_txq;\n+\tu16 length = sizeof(struct pfvf_def_resp_tlv);\n+\tunion ecore_qm_pq_params pq_params;\n+\tu8 status = PFVF_STATUS_SUCCESS;\n+\tenum _ecore_status_t rc;\n+\n+\t/* Prepare the parameters which would choose the right PQ */\n+\tOSAL_MEMSET(&pq_params, 0, sizeof(pq_params));\n+\tpq_params.eth.is_vf = 1;\n+\tpq_params.eth.vf_id = vf->relative_vf_id;\n+\n+\trc = ecore_sp_eth_txq_start_ramrod(p_hwfn,\n+\t\t\t\t\t   vf->opaque_fid,\n+\t\t\t\t\t   vf->vf_queues[req->tx_qid].fw_tx_qid,\n+\t\t\t\t\t   vf->vf_queues[req->tx_qid].fw_cid,\n+\t\t\t\t\t   vf->vport_id,\n+\t\t\t\t\t   vf->abs_vf_id + 0x10,\n+\t\t\t\t\t   req->hw_sb,\n+\t\t\t\t\t   req->sb_index,\n+\t\t\t\t\t   req->pbl_addr,\n+\t\t\t\t\t   req->pbl_size, &pq_params);\n+\n+\tif (rc)\n+\t\tstatus = PFVF_STATUS_FAILURE;\n+\telse\n+\t\tvf->vf_queues[req->tx_qid].txq_active = true;\n+\n+\tecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_TXQ,\n+\t\t\t       length, status);\n+}\n+\n+static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t   struct ecore_vf_info *vf,\n+\t\t\t\t\t\t   u16 rxq_id,\n+\t\t\t\t\t\t   u8 num_rxqs,\n+\t\t\t\t\t\t   bool cqe_completion)\n+{\n+\tenum _ecore_status_t rc = ECORE_SUCCESS;\n+\tint qid;\n+\n+\tif (rxq_id + num_rxqs > OSAL_ARRAY_SIZE(vf->vf_queues))\n+\t\treturn ECORE_INVAL;\n+\n+\tfor (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {\n+\t\tif (vf->vf_queues[qid].rxq_active) {\n+\t\t\trc = ecore_sp_eth_rx_queue_stop(p_hwfn,\n+\t\t\t\t\t\t\tvf->vf_queues[qid].\n+\t\t\t\t\t\t\tfw_rx_qid, false,\n+\t\t\t\t\t\t\tcqe_completion);\n+\n+\t\t\tif (rc)\n+\t\t\t\treturn rc;\n+\t\t}\n+\t\tvf->vf_queues[qid].rxq_active = false;\n+\t\tvf->num_active_rxqs--;\n+\t}\n+\n+\treturn rc;\n+}\n+\n+static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t   struct ecore_vf_info *vf,\n+\t\t\t\t\t\t   u16 txq_id, u8 num_txqs)\n+{\n+\tenum _ecore_status_t rc = ECORE_SUCCESS;\n+\tint qid;\n+\n+\tif (txq_id + num_txqs > OSAL_ARRAY_SIZE(vf->vf_queues))\n+\t\treturn ECORE_INVAL;\n+\n+\tfor (qid = txq_id; qid < txq_id + num_txqs; qid++) {\n+\t\tif (vf->vf_queues[qid].txq_active) {\n+\t\t\trc = ecore_sp_eth_tx_queue_stop(p_hwfn,\n+\t\t\t\t\t\t\tvf->vf_queues[qid].\n+\t\t\t\t\t\t\tfw_tx_qid);\n+\n+\t\t\tif (rc)\n+\t\t\t\treturn rc;\n+\t\t}\n+\t\tvf->vf_queues[qid].txq_active = false;\n+\t}\n+\treturn rc;\n+}\n+\n+static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t       struct ecore_ptt *p_ptt,\n+\t\t\t\t       struct ecore_vf_info *vf)\n+{\n+\tstruct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;\n+\tstruct vfpf_stop_rxqs_tlv *req = &mbx->req_virt->stop_rxqs;\n+\tu16 length = sizeof(struct pfvf_def_resp_tlv);\n+\tu8 status = PFVF_STATUS_SUCCESS;\n+\tenum _ecore_status_t rc;\n+\n+\t/* We give the option of starting from qid != 0, in this case we\n+\t * need to make sure that qid + num_qs doesn't exceed the actual\n+\t * amount of queues that exist.\n+\t */\n+\trc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,\n+\t\t\t\t    req->num_rxqs, req->cqe_completion);\n+\tif (rc)\n+\t\tstatus = PFVF_STATUS_FAILURE;\n+\n+\tecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,\n+\t\t\t       length, status);\n+}\n+\n+static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t       struct ecore_ptt *p_ptt,\n+\t\t\t\t       struct ecore_vf_info *vf)\n+{\n+\tstruct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;\n+\tstruct vfpf_stop_txqs_tlv *req = &mbx->req_virt->stop_txqs;\n+\tu16 length = sizeof(struct pfvf_def_resp_tlv);\n+\tu8 status = PFVF_STATUS_SUCCESS;\n+\tenum _ecore_status_t rc;\n+\n+\t/* We give the option of starting from qid != 0, in this case we\n+\t * need to make sure that qid + num_qs doesn't exceed the actual\n+\t * amount of queues that exist.\n+\t */\n+\trc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);\n+\tif (rc)\n+\t\tstatus = PFVF_STATUS_FAILURE;\n+\n+\tecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,\n+\t\t\t       length, status);\n+}\n+\n+static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t struct ecore_ptt *p_ptt,\n+\t\t\t\t\t struct ecore_vf_info *vf)\n+{\n+\tstruct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;\n+\tstruct vfpf_update_rxq_tlv *req = &mbx->req_virt->update_rxq;\n+\tu16 length = sizeof(struct pfvf_def_resp_tlv);\n+\tu8 status = PFVF_STATUS_SUCCESS;\n+\tu8 complete_event_flg;\n+\tu8 complete_cqe_flg;\n+\tenum _ecore_status_t rc;\n+\tu16 qid;\n+\tu8 i;\n+\n+\tcomplete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);\n+\tcomplete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);\n+\n+\tfor (i = 0; i < req->num_rxqs; i++) {\n+\t\tqid = req->rx_qid + i;\n+\n+\t\tif (!vf->vf_queues[qid].rxq_active) {\n+\t\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t\t  \"VF rx_qid = %d isn`t active!\\n\", qid);\n+\t\t\tstatus = PFVF_STATUS_FAILURE;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\trc = ecore_sp_eth_rx_queues_update(p_hwfn,\n+\t\t\t\t\t\t   vf->vf_queues[qid].fw_rx_qid,\n+\t\t\t\t\t\t   1,\n+\t\t\t\t\t\t   complete_cqe_flg,\n+\t\t\t\t\t\t   complete_event_flg,\n+\t\t\t\t\t\t   ECORE_SPQ_MODE_EBLOCK,\n+\t\t\t\t\t\t   OSAL_NULL);\n+\n+\t\tif (rc) {\n+\t\t\tstatus = PFVF_STATUS_FAILURE;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\tecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,\n+\t\t\t       length, status);\n+}\n+\n+void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t void *p_tlvs_list, u16 req_type)\n+{\n+\tstruct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;\n+\tint len = 0;\n+\n+\tdo {\n+\t\tif (!p_tlv->length) {\n+\t\t\tDP_NOTICE(p_hwfn, true, \"Zero length TLV found\\n\");\n+\t\t\treturn OSAL_NULL;\n+\t\t}\n+\n+\t\tif (p_tlv->type == req_type) {\n+\t\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t\t   \"Extended tlv type %s, length %d found\\n\",\n+\t\t\t\t   ecore_channel_tlvs_string[p_tlv->type],\n+\t\t\t\t   p_tlv->length);\n+\t\t\treturn p_tlv;\n+\t\t}\n+\n+\t\tlen += p_tlv->length;\n+\t\tp_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);\n+\n+\t\tif ((len + p_tlv->length) > TLV_BUFFER_SIZE) {\n+\t\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t\t  \"TLVs has overrun the buffer size\\n\");\n+\t\t\treturn OSAL_NULL;\n+\t\t}\n+\t} while (p_tlv->type != CHANNEL_TLV_LIST_END);\n+\n+\treturn OSAL_NULL;\n+}\n+\n+static void\n+ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,\n+\t\t\t      struct ecore_sp_vport_update_params *p_data,\n+\t\t\t      struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)\n+{\n+\tstruct vfpf_vport_update_activate_tlv *p_act_tlv;\n+\tu16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;\n+\n+\tp_act_tlv = (struct vfpf_vport_update_activate_tlv *)\n+\t    ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);\n+\tif (p_act_tlv) {\n+\t\tp_data->update_vport_active_rx_flg = p_act_tlv->update_rx;\n+\t\tp_data->vport_active_rx_flg = p_act_tlv->active_rx;\n+\t\tp_data->update_vport_active_tx_flg = p_act_tlv->update_tx;\n+\t\tp_data->vport_active_tx_flg = p_act_tlv->active_tx;\n+\t\t*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;\n+\t}\n+}\n+\n+static void\n+ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn,\n+\t\t\t       struct ecore_sp_vport_update_params *p_data,\n+\t\t\t       struct ecore_vf_info *p_vf,\n+\t\t\t       struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)\n+{\n+\tstruct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;\n+\tu16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;\n+\n+\tp_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)\n+\t    ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);\n+\tif (!p_vlan_tlv)\n+\t\treturn;\n+\n+\tp_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;\n+\n+\t/* Ignore the VF request if we're forcing a vlan */\n+\tif (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {\n+\t\tp_data->update_inner_vlan_removal_flg = 1;\n+\t\tp_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;\n+\t}\n+\n+\t*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP;\n+}\n+\n+static void\n+ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,\n+\t\t\t      struct ecore_sp_vport_update_params *p_data,\n+\t\t\t      struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)\n+{\n+\tstruct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;\n+\tu16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;\n+\n+\tp_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)\n+\t    ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);\n+\n+#ifndef ASIC_ONLY\n+\tif (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {\n+\t\tDP_NOTICE(p_hwfn, false,\n+\t\t\t  \"FPGA: Ignore tx-switching configuration originating from VFs\\n\");\n+\t\treturn;\n+\t}\n+#endif\n+\n+\tif (p_tx_switch_tlv) {\n+\t\tp_data->update_tx_switching_flg = 1;\n+\t\tp_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;\n+\t\t*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;\n+\t}\n+}\n+\n+static void\n+ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t    struct ecore_sp_vport_update_params *p_data,\n+\t\t\t\t    struct ecore_iov_vf_mbx *p_mbx,\n+\t\t\t\t    u16 *tlvs_mask)\n+{\n+\tstruct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;\n+\tu16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;\n+\n+\tp_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)\n+\t    ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);\n+\n+\tif (p_mcast_tlv) {\n+\t\tp_data->update_approx_mcast_flg = 1;\n+\t\tOSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,\n+\t\t\t    sizeof(unsigned long) *\n+\t\t\t    ETH_MULTICAST_MAC_BINS_IN_REGS);\n+\t\t*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;\n+\t}\n+}\n+\n+static void\n+ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,\n+\t\t\t\tstruct ecore_sp_vport_update_params *p_data,\n+\t\t\t\tstruct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)\n+{\n+\tstruct vfpf_vport_update_accept_param_tlv *p_accept_tlv;\n+\tu16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;\n+\n+\tp_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)\n+\t    ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);\n+\n+\tif (p_accept_tlv) {\n+\t\tp_data->accept_flags.update_rx_mode_config =\n+\t\t    p_accept_tlv->update_rx_mode;\n+\t\tp_data->accept_flags.rx_accept_filter =\n+\t\t    p_accept_tlv->rx_accept_filter;\n+\t\tp_data->accept_flags.update_tx_mode_config =\n+\t\t    p_accept_tlv->update_tx_mode;\n+\t\tp_data->accept_flags.tx_accept_filter =\n+\t\t    p_accept_tlv->tx_accept_filter;\n+\t\t*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;\n+\t}\n+}\n+\n+static void\n+ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t    struct ecore_sp_vport_update_params *p_data,\n+\t\t\t\t    struct ecore_iov_vf_mbx *p_mbx,\n+\t\t\t\t    u16 *tlvs_mask)\n+{\n+\tstruct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;\n+\tu16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;\n+\n+\tp_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)\n+\t    ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);\n+\n+\tif (p_accept_any_vlan) {\n+\t\tp_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;\n+\t\tp_data->update_accept_any_vlan_flg =\n+\t\t    p_accept_any_vlan->update_accept_any_vlan_flg;\n+\t\t*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;\n+\t}\n+}\n+\n+static void\n+ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,\n+\t\t\t      struct ecore_vf_info *vf,\n+\t\t\t      struct ecore_sp_vport_update_params *p_data,\n+\t\t\t      struct ecore_rss_params *p_rss,\n+\t\t\t      struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)\n+{\n+\tstruct vfpf_vport_update_rss_tlv *p_rss_tlv;\n+\tu16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;\n+\tu16 table_size;\n+\tu16 i, q_idx, max_q_idx;\n+\n+\tp_rss_tlv = (struct vfpf_vport_update_rss_tlv *)\n+\t    ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);\n+\tif (p_rss_tlv) {\n+\t\tOSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));\n+\n+\t\tp_rss->update_rss_config =\n+\t\t    !!(p_rss_tlv->update_rss_flags &\n+\t\t\tVFPF_UPDATE_RSS_CONFIG_FLAG);\n+\t\tp_rss->update_rss_capabilities =\n+\t\t    !!(p_rss_tlv->update_rss_flags &\n+\t\t\tVFPF_UPDATE_RSS_CAPS_FLAG);\n+\t\tp_rss->update_rss_ind_table =\n+\t\t    !!(p_rss_tlv->update_rss_flags &\n+\t\t\tVFPF_UPDATE_RSS_IND_TABLE_FLAG);\n+\t\tp_rss->update_rss_key =\n+\t\t    !!(p_rss_tlv->update_rss_flags & VFPF_UPDATE_RSS_KEY_FLAG);\n+\n+\t\tp_rss->rss_enable = p_rss_tlv->rss_enable;\n+\t\tp_rss->rss_eng_id = vf->relative_vf_id + 1;\n+\t\tp_rss->rss_caps = p_rss_tlv->rss_caps;\n+\t\tp_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;\n+\t\tOSAL_MEMCPY(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,\n+\t\t\t    sizeof(p_rss->rss_ind_table));\n+\t\tOSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,\n+\t\t\t    sizeof(p_rss->rss_key));\n+\n+\t\ttable_size = OSAL_MIN_T(u16,\n+\t\t\t\t\tOSAL_ARRAY_SIZE(p_rss->rss_ind_table),\n+\t\t\t\t\t(1 << p_rss_tlv->rss_table_size_log));\n+\n+\t\tmax_q_idx = OSAL_ARRAY_SIZE(vf->vf_queues);\n+\n+\t\tfor (i = 0; i < table_size; i++) {\n+\t\t\tq_idx = p_rss->rss_ind_table[i];\n+\t\t\tif (q_idx >= max_q_idx) {\n+\t\t\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t\t\t  \"rss_ind_table[%d] = %d, rxq is out of range\\n\",\n+\t\t\t\t\t  i, q_idx);\n+\t\t\t\t/* TBD: fail the request mark VF as malicious */\n+\t\t\t\tp_rss->rss_ind_table[i] =\n+\t\t\t\t    vf->vf_queues[0].fw_rx_qid;\n+\t\t\t} else if (!vf->vf_queues[q_idx].rxq_active) {\n+\t\t\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t\t\t  \"rss_ind_table[%d] = %d, rxq is not active\\n\",\n+\t\t\t\t\t  i, q_idx);\n+\t\t\t\t/* TBD: fail the request mark VF as malicious */\n+\t\t\t\tp_rss->rss_ind_table[i] =\n+\t\t\t\t    vf->vf_queues[0].fw_rx_qid;\n+\t\t\t} else {\n+\t\t\t\tp_rss->rss_ind_table[i] =\n+\t\t\t\t    vf->vf_queues[q_idx].fw_rx_qid;\n+\t\t\t}\n+\t\t}\n+\n+\t\tp_data->rss_params = p_rss;\n+\t\t*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;\n+\t} else {\n+\t\tp_data->rss_params = OSAL_NULL;\n+\t}\n+}\n+\n+static void\n+ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t  struct ecore_vf_info *vf,\n+\t\t\t\t  struct ecore_sp_vport_update_params *p_data,\n+\t\t\t\t  struct ecore_sge_tpa_params *p_sge_tpa,\n+\t\t\t\t  struct ecore_iov_vf_mbx *p_mbx,\n+\t\t\t\t  u16 *tlvs_mask)\n+{\n+\tstruct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;\n+\tu16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;\n+\n+\tp_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)\n+\t    ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);\n+\n+\tif (!p_sge_tpa_tlv) {\n+\t\tp_data->sge_tpa_params = OSAL_NULL;\n+\t\treturn;\n+\t}\n+\n+\tOSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params));\n+\n+\tp_sge_tpa->update_tpa_en_flg =\n+\t    !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);\n+\tp_sge_tpa->update_tpa_param_flg =\n+\t    !!(p_sge_tpa_tlv->update_sge_tpa_flags &\n+\t\tVFPF_UPDATE_TPA_PARAM_FLAG);\n+\n+\tp_sge_tpa->tpa_ipv4_en_flg =\n+\t    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);\n+\tp_sge_tpa->tpa_ipv6_en_flg =\n+\t    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);\n+\tp_sge_tpa->tpa_pkt_split_flg =\n+\t    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);\n+\tp_sge_tpa->tpa_hdr_data_split_flg =\n+\t    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);\n+\tp_sge_tpa->tpa_gro_consistent_flg =\n+\t    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);\n+\n+\tp_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;\n+\tp_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;\n+\tp_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;\n+\tp_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;\n+\tp_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;\n+\n+\tp_data->sge_tpa_params = p_sge_tpa;\n+\n+\t*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA;\n+}\n+\n+static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t  struct ecore_ptt *p_ptt,\n+\t\t\t\t\t  struct ecore_vf_info *vf)\n+{\n+\tstruct ecore_sp_vport_update_params params;\n+\tstruct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;\n+\tstruct ecore_sge_tpa_params sge_tpa_params;\n+\tstruct ecore_rss_params rss_params;\n+\tu8 status = PFVF_STATUS_SUCCESS;\n+\tenum _ecore_status_t rc;\n+\tu16 tlvs_mask = 0, tlvs_accepted;\n+\tu16 length;\n+\n+\tOSAL_MEMSET(&params, 0, sizeof(params));\n+\tparams.opaque_fid = vf->opaque_fid;\n+\tparams.vport_id = vf->vport_id;\n+\tparams.rss_params = OSAL_NULL;\n+\n+\t/* Search for extended tlvs list and update values\n+\t * from VF in struct ecore_sp_vport_update_params.\n+\t */\n+\tecore_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);\n+\tecore_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);\n+\tecore_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);\n+\tecore_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);\n+\tecore_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);\n+\tecore_iov_vp_update_rss_param(p_hwfn, vf, &params, &rss_params,\n+\t\t\t\t      mbx, &tlvs_mask);\n+\tecore_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);\n+\tecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,\n+\t\t\t\t\t  &sge_tpa_params, mbx, &tlvs_mask);\n+\n+\t/* Just log a message if there is no single extended tlv in buffer.\n+\t * When all features of vport update ramrod would be requested by VF\n+\t * as extended TLVs in buffer then an error can be returned in response\n+\t * if there is no extended TLV present in buffer.\n+\t */\n+\ttlvs_accepted = tlvs_mask;\n+\n+#ifndef __EXTRACT__LINUX__\n+\tif (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,\n+\t\t\t\t     &params, &tlvs_accepted) !=\n+\t    ECORE_SUCCESS) {\n+\t\ttlvs_accepted = 0;\n+\t\tstatus = PFVF_STATUS_NOT_SUPPORTED;\n+\t\tgoto out;\n+\t}\n+#endif\n+\n+\tif (!tlvs_accepted) {\n+\t\tif (tlvs_mask)\n+\t\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t\t   \"Upper-layer prevents said VF configuration\\n\");\n+\t\telse\n+\t\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t\t  \"No feature tlvs found for vport update\\n\");\n+\t\tstatus = PFVF_STATUS_NOT_SUPPORTED;\n+\t\tgoto out;\n+\t}\n+\n+\trc = ecore_sp_vport_update(p_hwfn, &params, ECORE_SPQ_MODE_EBLOCK,\n+\t\t\t\t   OSAL_NULL);\n+\n+\tif (rc)\n+\t\tstatus = PFVF_STATUS_FAILURE;\n+\n+out:\n+\tlength = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,\n+\t\t\t\t\t\t    tlvs_mask, tlvs_accepted);\n+\tecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);\n+}\n+\n+static enum _ecore_status_t\n+ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t   struct ecore_vf_info *p_vf,\n+\t\t\t\t   struct ecore_filter_ucast *p_params)\n+{\n+\tint i;\n+\n+\t/* TODO - do we need a MAC shadow registery? */\n+\tif (p_params->type == ECORE_FILTER_MAC)\n+\t\treturn ECORE_SUCCESS;\n+\n+\t/* First remove entries and then add new ones */\n+\tif (p_params->opcode == ECORE_FILTER_REMOVE) {\n+\t\tfor (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)\n+\t\t\tif (p_vf->shadow_config.vlans[i].used &&\n+\t\t\t    p_vf->shadow_config.vlans[i].vid ==\n+\t\t\t    p_params->vlan) {\n+\t\t\t\tp_vf->shadow_config.vlans[i].used = false;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\tif (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {\n+\t\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t\t   \"VF [%d] - Tries to remove a non-existing vlan\\n\",\n+\t\t\t\t   p_vf->relative_vf_id);\n+\t\t\treturn ECORE_INVAL;\n+\t\t}\n+\t} else if (p_params->opcode == ECORE_FILTER_REPLACE ||\n+\t\t   p_params->opcode == ECORE_FILTER_FLUSH) {\n+\t\tfor (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)\n+\t\t\tp_vf->shadow_config.vlans[i].used = false;\n+\t}\n+\n+\t/* In forced mode, we're willing to remove entries - but we don't add\n+\t * new ones.\n+\t */\n+\tif (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))\n+\t\treturn ECORE_SUCCESS;\n+\n+\tif (p_params->opcode == ECORE_FILTER_ADD ||\n+\t    p_params->opcode == ECORE_FILTER_REPLACE) {\n+\t\tfor (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)\n+\t\t\tif (!p_vf->shadow_config.vlans[i].used) {\n+\t\t\t\tp_vf->shadow_config.vlans[i].used = true;\n+\t\t\t\tp_vf->shadow_config.vlans[i].vid =\n+\t\t\t\t    p_params->vlan;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\tif (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {\n+\t\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t\t   \"VF [%d] - Tries to configure more than %d vlan filters\\n\",\n+\t\t\t\t   p_vf->relative_vf_id,\n+\t\t\t\t   ECORE_ETH_VF_NUM_VLAN_FILTERS + 1);\n+\t\t\treturn ECORE_INVAL;\n+\t\t}\n+\t}\n+\n+\treturn ECORE_SUCCESS;\n+}\n+\n+static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t  struct ecore_ptt *p_ptt,\n+\t\t\t\t\t  struct ecore_vf_info *vf)\n+{\n+\tstruct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;\n+\tstruct vfpf_ucast_filter_tlv *req = &mbx->req_virt->ucast_filter;\n+\tstruct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt;\n+\tstruct ecore_filter_ucast params;\n+\tu8 status = PFVF_STATUS_SUCCESS;\n+\tenum _ecore_status_t rc;\n+\n+\t/* Prepare the unicast filter params */\n+\tOSAL_MEMSET(&params, 0, sizeof(struct ecore_filter_ucast));\n+\tparams.opcode = (enum ecore_filter_opcode)req->opcode;\n+\tparams.type = (enum ecore_filter_ucast_type)req->type;\n+\n+\t/* @@@TBD - We might need logic on HV side in determining this */\n+\tparams.is_rx_filter = 1;\n+\tparams.is_tx_filter = 1;\n+\tparams.vport_to_remove_from = vf->vport_id;\n+\tparams.vport_to_add_to = vf->vport_id;\n+\tOSAL_MEMCPY(params.mac, req->mac, ETH_ALEN);\n+\tparams.vlan = req->vlan;\n+\n+\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t   \"VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\\n\",\n+\t\t   vf->abs_vf_id, params.opcode, params.type,\n+\t\t   params.is_rx_filter ? \"RX\" : \"\",\n+\t\t   params.is_tx_filter ? \"TX\" : \"\",\n+\t\t   params.vport_to_add_to,\n+\t\t   params.mac[0], params.mac[1], params.mac[2],\n+\t\t   params.mac[3], params.mac[4], params.mac[5], params.vlan);\n+\n+\tif (!vf->vport_instance) {\n+\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t   \"No VPORT instance available for VF[%d], failing ucast MAC configuration\\n\",\n+\t\t\t   vf->abs_vf_id);\n+\t\tstatus = PFVF_STATUS_FAILURE;\n+\t\tgoto out;\n+\t}\n+\n+\t/* Update shadow copy of the VF configuration */\n+\tif (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, &params) !=\n+\t    ECORE_SUCCESS) {\n+\t\tstatus = PFVF_STATUS_FAILURE;\n+\t\tgoto out;\n+\t}\n+\n+\t/* Determine if the unicast filtering is acceptible by PF */\n+\tif ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&\n+\t    (params.type == ECORE_FILTER_VLAN ||\n+\t     params.type == ECORE_FILTER_MAC_VLAN)) {\n+\t\t/* Once VLAN is forced or PVID is set, do not allow\n+\t\t * to add/replace any further VLANs.\n+\t\t */\n+\t\tif (params.opcode == ECORE_FILTER_ADD ||\n+\t\t    params.opcode == ECORE_FILTER_REPLACE)\n+\t\t\tstatus = PFVF_STATUS_FORCED;\n+\t\tgoto out;\n+\t}\n+\n+\tif ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&\n+\t    (params.type == ECORE_FILTER_MAC ||\n+\t     params.type == ECORE_FILTER_MAC_VLAN)) {\n+\t\tif (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) ||\n+\t\t    (params.opcode != ECORE_FILTER_ADD &&\n+\t\t     params.opcode != ECORE_FILTER_REPLACE))\n+\t\t\tstatus = PFVF_STATUS_FORCED;\n+\t\tgoto out;\n+\t}\n+\n+\trc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, &params);\n+\tif (rc == ECORE_EXISTS) {\n+\t\tgoto out;\n+\t} else if (rc == ECORE_INVAL) {\n+\t\tstatus = PFVF_STATUS_FAILURE;\n+\t\tgoto out;\n+\t}\n+\n+\trc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,\n+\t\t\t\t       ECORE_SPQ_MODE_CB, OSAL_NULL);\n+\tif (rc)\n+\t\tstatus = PFVF_STATUS_FAILURE;\n+\n+out:\n+\tecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,\n+\t\t\t       sizeof(struct pfvf_def_resp_tlv), status);\n+}\n+\n+static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t struct ecore_ptt *p_ptt,\n+\t\t\t\t\t struct ecore_vf_info *vf)\n+{\n+\tint i;\n+\n+\t/* Reset the SBs */\n+\tfor (i = 0; i < vf->num_sbs; i++)\n+\t\tecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,\n+\t\t\t\t\t\t  vf->igu_sbs[i],\n+\t\t\t\t\t\t  vf->opaque_fid, false);\n+\n+\tecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,\n+\t\t\t       sizeof(struct pfvf_def_resp_tlv),\n+\t\t\t       PFVF_STATUS_SUCCESS);\n+}\n+\n+static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t   struct ecore_ptt *p_ptt,\n+\t\t\t\t   struct ecore_vf_info *vf)\n+{\n+\tu16 length = sizeof(struct pfvf_def_resp_tlv);\n+\tu8 status = PFVF_STATUS_SUCCESS;\n+\n+\t/* Disable Interrupts for VF */\n+\tecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0 /* disable */);\n+\n+\t/* Reset Permission table */\n+\tecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0 /* disable */);\n+\n+\tecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,\n+\t\t\t       length, status);\n+}\n+\n+static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t     struct ecore_ptt *p_ptt,\n+\t\t\t\t     struct ecore_vf_info *p_vf)\n+{\n+\tu16 length = sizeof(struct pfvf_def_resp_tlv);\n+\n+\tecore_iov_vf_cleanup(p_hwfn, p_vf);\n+\n+\tecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,\n+\t\t\t       length, PFVF_STATUS_SUCCESS);\n+}\n+\n+static enum _ecore_status_t\n+ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,\n+\t\t\t   struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)\n+{\n+\tint cnt;\n+\tu32 val;\n+\n+\tecore_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);\n+\n+\tfor (cnt = 0; cnt < 50; cnt++) {\n+\t\tval = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);\n+\t\tif (!val)\n+\t\t\tbreak;\n+\t\tOSAL_MSLEEP(20);\n+\t}\n+\tecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);\n+\n+\tif (cnt == 50) {\n+\t\tDP_ERR(p_hwfn,\n+\t\t       \"VF[%d] - dorq failed to cleanup [usage 0x%08x]\\n\",\n+\t\t       p_vf->abs_vf_id, val);\n+\t\treturn ECORE_TIMEOUT;\n+\t}\n+\n+\treturn ECORE_SUCCESS;\n+}\n+\n+static enum _ecore_status_t\n+ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,\n+\t\t\t  struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)\n+{\n+\tu32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];\n+\tint i, cnt;\n+\n+\t/* Read initial consumers & producers */\n+\tfor (i = 0; i < MAX_NUM_VOQS; i++) {\n+\t\tu32 prod;\n+\n+\t\tcons[i] = ecore_rd(p_hwfn, p_ptt,\n+\t\t\t\t   PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +\n+\t\t\t\t   i * 0x40);\n+\t\tprod = ecore_rd(p_hwfn, p_ptt,\n+\t\t\t\tPBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +\n+\t\t\t\ti * 0x40);\n+\t\tdistance[i] = prod - cons[i];\n+\t}\n+\n+\t/* Wait for consumers to pass the producers */\n+\ti = 0;\n+\tfor (cnt = 0; cnt < 50; cnt++) {\n+\t\tfor (; i < MAX_NUM_VOQS; i++) {\n+\t\t\tu32 tmp;\n+\n+\t\t\ttmp = ecore_rd(p_hwfn, p_ptt,\n+\t\t\t\t       PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +\n+\t\t\t\t       i * 0x40);\n+\t\t\tif (distance[i] > tmp - cons[i])\n+\t\t\t\tbreak;\n+\t\t}\n+\n+\t\tif (i == MAX_NUM_VOQS)\n+\t\t\tbreak;\n+\n+\t\tOSAL_MSLEEP(20);\n+\t}\n+\n+\tif (cnt == 50) {\n+\t\tDP_ERR(p_hwfn, \"VF[%d] - pbf polling failed on VOQ %d\\n\",\n+\t\t       p_vf->abs_vf_id, i);\n+\t\treturn ECORE_TIMEOUT;\n+\t}\n+\n+\treturn ECORE_SUCCESS;\n+}\n+\n+static enum _ecore_status_t\n+ecore_iov_vf_flr_poll_prs(struct ecore_hwfn *p_hwfn,\n+\t\t\t  struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)\n+{\n+\tu16 tc_cons[NUM_OF_TCS], tc_lb_cons[NUM_OF_TCS];\n+\tu16 prod[NUM_OF_TCS];\n+\tint i, cnt;\n+\n+\t/* Read initial consumers & producers */\n+\tfor (i = 0; i < NUM_OF_TCS; i++) {\n+\t\ttc_cons[i] = (u16)ecore_rd(p_hwfn, p_ptt,\n+\t\t\t\t\t   PRS_REG_MSG_CT_MAIN_0 + i * 0x4);\n+\t\ttc_lb_cons[i] = (u16)ecore_rd(p_hwfn, p_ptt,\n+\t\t\t\t\t      PRS_REG_MSG_CT_LB_0 + i * 0x4);\n+\t\tprod[i] = (u16)ecore_rd(p_hwfn, p_ptt,\n+\t\t\t\t\tBRB_REG_PER_TC_COUNTERS +\n+\t\t\t\t\tp_hwfn->port_id * 0x20 + i * 0x4);\n+\t}\n+\n+\t/* Wait for consumers to pass the producers */\n+\ti = 0;\n+\tfor (cnt = 0; cnt < 50; cnt++) {\n+\t\tfor (; i < NUM_OF_TCS; i++) {\n+\t\t\tu16 cons;\n+\n+\t\t\tcons = (u16)ecore_rd(p_hwfn, p_ptt,\n+\t\t\t\t\t     PRS_REG_MSG_CT_MAIN_0 + i * 0x4);\n+\t\t\tif (prod[i] - tc_cons[i] > cons - tc_cons[i])\n+\t\t\t\tbreak;\n+\n+\t\t\tcons = (u16)ecore_rd(p_hwfn, p_ptt,\n+\t\t\t\t\t     PRS_REG_MSG_CT_LB_0 + i * 0x4);\n+\t\t\tif (prod[i] - tc_lb_cons[i] > cons - tc_lb_cons[i])\n+\t\t\t\tbreak;\n+\t\t}\n+\n+\t\tif (i == NUM_OF_TCS)\n+\t\t\tbreak;\n+\n+\t\t/* 16-bit counters; Delay instead of sleep... */\n+\t\tOSAL_UDELAY(10);\n+\t}\n+\n+\t/* This is only optional polling for BB, since registers are only\n+\t * 16-bit wide and guarantee is not good enough. Don't fail things\n+\t * if polling didn't return the expected results.\n+\t */\n+\tif (cnt == 50)\n+\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t   \"VF[%d] - prs polling failed on TC %d\\n\",\n+\t\t\t   p_vf->abs_vf_id, i);\n+\n+\treturn ECORE_SUCCESS;\n+}\n+\n+static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t  struct ecore_vf_info *p_vf,\n+\t\t\t\t\t\t  struct ecore_ptt *p_ptt)\n+{\n+\tenum _ecore_status_t rc;\n+\n+\t/* TODO - add SRC and TM polling once we add storage IOV */\n+\n+\trc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\trc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\trc = ecore_iov_vf_flr_poll_prs(p_hwfn, p_vf, p_ptt);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\treturn ECORE_SUCCESS;\n+}\n+\n+static enum _ecore_status_t\n+ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t struct ecore_ptt *p_ptt,\n+\t\t\t\t u16 rel_vf_id, u32 *ack_vfs)\n+{\n+\tenum _ecore_status_t rc = ECORE_SUCCESS;\n+\tstruct ecore_vf_info *p_vf;\n+\n+\tp_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);\n+\tif (!p_vf)\n+\t\treturn ECORE_SUCCESS;\n+\n+\tif (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &\n+\t    (1ULL << (rel_vf_id % 64))) {\n+\t\tu16 vfid = p_vf->abs_vf_id;\n+\n+\t\t/* TODO - should we lock channel? */\n+\n+\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t   \"VF[%d] - Handling FLR\\n\", vfid);\n+\n+\t\tecore_iov_vf_cleanup(p_hwfn, p_vf);\n+\n+\t\t/* If VF isn't active, no need for anything but SW */\n+\t\tif (!ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, p_vf->relative_vf_id))\n+\t\t\tgoto cleanup;\n+\n+\t\t/* TODO - what to do in case of failure? */\n+\t\trc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);\n+\t\tif (rc != ECORE_SUCCESS)\n+\t\t\tgoto cleanup;\n+\n+\t\trc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true);\n+\t\tif (rc) {\n+\t\t\t/* TODO - what's now? What a mess.... */\n+\t\t\tDP_ERR(p_hwfn, \"Failed handle FLR of VF[%d]\\n\", vfid);\n+\t\t\treturn rc;\n+\t\t}\n+\n+\t\t/* VF_STOPPED has to be set only after final cleanup\n+\t\t * but prior to re-enabling the VF.\n+\t\t */\n+\t\tp_vf->state = VF_STOPPED;\n+\n+\t\trc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);\n+\t\tif (rc) {\n+\t\t\t/* TODO - again, a mess... */\n+\t\t\tDP_ERR(p_hwfn, \"Failed to re-enable VF[%d] acces\\n\",\n+\t\t\t       vfid);\n+\t\t\treturn rc;\n+\t\t}\n+cleanup:\n+\t\t/* Mark VF for ack and clean pending state */\n+\t\tif (p_vf->state == VF_RESET)\n+\t\t\tp_vf->state = VF_STOPPED;\n+\t\tack_vfs[vfid / 32] |= (1 << (vfid % 32));\n+\t\tp_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=\n+\t\t    ~(1ULL << (rel_vf_id % 64));\n+\t\tp_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=\n+\t\t    ~(1ULL << (rel_vf_id % 64));\n+\t}\n+\n+\treturn rc;\n+}\n+\n+enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t      struct ecore_ptt *p_ptt)\n+{\n+\tu32 ack_vfs[VF_MAX_STATIC / 32];\n+\tenum _ecore_status_t rc = ECORE_SUCCESS;\n+\tu16 i;\n+\n+\tOSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));\n+\n+\tfor (i = 0; i < p_hwfn->p_dev->sriov_info.total_vfs; i++)\n+\t\tecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);\n+\n+\trc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);\n+\treturn rc;\n+}\n+\n+enum _ecore_status_t\n+ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,\n+\t\t\t\tstruct ecore_ptt *p_ptt, u16 rel_vf_id)\n+{\n+\tu32 ack_vfs[VF_MAX_STATIC / 32];\n+\tenum _ecore_status_t rc = ECORE_SUCCESS;\n+\n+\tOSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));\n+\n+\tecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs);\n+\n+\trc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);\n+\treturn rc;\n+}\n+\n+int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)\n+{\n+\tu16 i, found = 0;\n+\n+\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV, \"Marking FLR-ed VFs\\n\");\n+\tfor (i = 0; i < (VF_MAX_STATIC / 32); i++)\n+\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t   \"[%08x,...,%08x]: %08x\\n\",\n+\t\t\t   i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);\n+\n+\t/* Mark VFs */\n+\tfor (i = 0; i < p_hwfn->p_dev->sriov_info.total_vfs; i++) {\n+\t\tstruct ecore_vf_info *p_vf;\n+\t\tu8 vfid;\n+\n+\t\tp_vf = ecore_iov_get_vf_info(p_hwfn, i, false);\n+\t\tif (!p_vf)\n+\t\t\tcontinue;\n+\n+\t\tvfid = p_vf->abs_vf_id;\n+\t\tif ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {\n+\t\t\tu64 *p_flr = p_hwfn->pf_iov_info->pending_flr;\n+\t\t\tu16 rel_vf_id = p_vf->relative_vf_id;\n+\n+\t\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t\t   \"VF[%d] [rel %d] got FLR-ed\\n\",\n+\t\t\t\t   vfid, rel_vf_id);\n+\n+\t\t\tp_vf->state = VF_RESET;\n+\n+\t\t\t/* No need to lock here, since pending_flr should\n+\t\t\t * only change here and before ACKing MFw. Since\n+\t\t\t * MFW will not trigger an additional attention for\n+\t\t\t * VF flr until ACKs, we're safe.\n+\t\t\t */\n+\t\t\tp_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);\n+\t\t\tfound = 1;\n+\t\t}\n+\t}\n+\n+\treturn found;\n+}\n+\n+void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,\n+\t\t\tu16 vfid,\n+\t\t\tstruct ecore_mcp_link_params *params,\n+\t\t\tstruct ecore_mcp_link_state *link,\n+\t\t\tstruct ecore_mcp_link_capabilities *p_caps)\n+{\n+\tstruct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);\n+\tstruct ecore_bulletin_content *p_bulletin;\n+\n+\tif (!p_vf)\n+\t\treturn;\n+\n+\tp_bulletin = p_vf->bulletin.p_virt;\n+\tp_bulletin->req_autoneg = params->speed.autoneg;\n+\tp_bulletin->req_adv_speed = params->speed.advertised_speeds;\n+\tp_bulletin->req_forced_speed = params->speed.forced_speed;\n+\tp_bulletin->req_autoneg_pause = params->pause.autoneg;\n+\tp_bulletin->req_forced_rx = params->pause.forced_rx;\n+\tp_bulletin->req_forced_tx = params->pause.forced_tx;\n+\tp_bulletin->req_loopback = params->loopback_mode;\n+\n+\tp_bulletin->link_up = link->link_up;\n+\tp_bulletin->speed = link->speed;\n+\tp_bulletin->full_duplex = link->full_duplex;\n+\tp_bulletin->autoneg = link->an;\n+\tp_bulletin->autoneg_complete = link->an_complete;\n+\tp_bulletin->parallel_detection = link->parallel_detection;\n+\tp_bulletin->pfc_enabled = link->pfc_enabled;\n+\tp_bulletin->partner_adv_speed = link->partner_adv_speed;\n+\tp_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;\n+\tp_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;\n+\tp_bulletin->partner_adv_pause = link->partner_adv_pause;\n+\tp_bulletin->sfp_tx_fault = link->sfp_tx_fault;\n+\n+\tp_bulletin->capability_speed = p_caps->speed_capabilities;\n+}\n+\n+void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,\n+\t\t\tu16 vfid,\n+\t\t\tstruct ecore_mcp_link_params *p_params,\n+\t\t\tstruct ecore_mcp_link_state *p_link,\n+\t\t\tstruct ecore_mcp_link_capabilities *p_caps)\n+{\n+\tstruct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);\n+\tstruct ecore_bulletin_content *p_bulletin;\n+\n+\tif (!p_vf)\n+\t\treturn;\n+\n+\tp_bulletin = p_vf->bulletin.p_virt;\n+\n+\tif (p_params)\n+\t\t__ecore_vf_get_link_params(p_hwfn, p_params, p_bulletin);\n+\tif (p_link)\n+\t\t__ecore_vf_get_link_state(p_hwfn, p_link, p_bulletin);\n+\tif (p_caps)\n+\t\t__ecore_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);\n+}\n+\n+void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,\n+\t\t\t       struct ecore_ptt *p_ptt, int vfid)\n+{\n+\tstruct ecore_iov_vf_mbx *mbx;\n+\tstruct ecore_vf_info *p_vf;\n+\tint i;\n+\n+\tp_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);\n+\tif (!p_vf)\n+\t\treturn;\n+\n+\tmbx = &p_vf->vf_mbx;\n+\n+\t/* ecore_iov_process_mbx_request */\n+\tDP_VERBOSE(p_hwfn,\n+\t\t   ECORE_MSG_IOV,\n+\t\t   \"ecore_iov_process_mbx_req vfid %d\\n\", p_vf->abs_vf_id);\n+\n+\tmbx->first_tlv = mbx->req_virt->first_tlv;\n+\n+\t/* check if tlv type is known */\n+\tif (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {\n+\t\t/* Lock the per vf op mutex and note the locker's identity.\n+\t\t * The unlock will take place in mbx response.\n+\t\t */\n+\t\tecore_iov_lock_vf_pf_channel(p_hwfn,\n+\t\t\t\t\t     p_vf, mbx->first_tlv.tl.type);\n+\n+\t\t/* switch on the opcode */\n+\t\tswitch (mbx->first_tlv.tl.type) {\n+\t\tcase CHANNEL_TLV_ACQUIRE:\n+\t\t\tecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);\n+\t\t\tbreak;\n+\t\tcase CHANNEL_TLV_VPORT_START:\n+\t\t\tecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);\n+\t\t\tbreak;\n+\t\tcase CHANNEL_TLV_VPORT_TEARDOWN:\n+\t\t\tecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);\n+\t\t\tbreak;\n+\t\tcase CHANNEL_TLV_START_RXQ:\n+\t\t\tecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);\n+\t\t\tbreak;\n+\t\tcase CHANNEL_TLV_START_TXQ:\n+\t\t\tecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);\n+\t\t\tbreak;\n+\t\tcase CHANNEL_TLV_STOP_RXQS:\n+\t\t\tecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);\n+\t\t\tbreak;\n+\t\tcase CHANNEL_TLV_STOP_TXQS:\n+\t\t\tecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);\n+\t\t\tbreak;\n+\t\tcase CHANNEL_TLV_UPDATE_RXQ:\n+\t\t\tecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);\n+\t\t\tbreak;\n+\t\tcase CHANNEL_TLV_VPORT_UPDATE:\n+\t\t\tecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);\n+\t\t\tbreak;\n+\t\tcase CHANNEL_TLV_UCAST_FILTER:\n+\t\t\tecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);\n+\t\t\tbreak;\n+\t\tcase CHANNEL_TLV_CLOSE:\n+\t\t\tecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);\n+\t\t\tbreak;\n+\t\tcase CHANNEL_TLV_INT_CLEANUP:\n+\t\t\tecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);\n+\t\t\tbreak;\n+\t\tcase CHANNEL_TLV_RELEASE:\n+\t\t\tecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tecore_iov_unlock_vf_pf_channel(p_hwfn,\n+\t\t\t\t\t       p_vf, mbx->first_tlv.tl.type);\n+\n+\t} else {\n+\t\t/* unknown TLV - this may belong to a VF driver from the future\n+\t\t * - a version written after this PF driver was written, which\n+\t\t * supports features unknown as of yet. Too bad since we don't\n+\t\t * support them. Or this may be because someone wrote a crappy\n+\t\t * VF driver and is sending garbage over the channel.\n+\t\t */\n+\t\tDP_ERR(p_hwfn,\n+\t\t       \"unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\\n\",\n+\t\t       mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);\n+\n+\t\tfor (i = 0; i < 20; i++) {\n+\t\t\tDP_VERBOSE(p_hwfn,\n+\t\t\t\t   ECORE_MSG_IOV,\n+\t\t\t\t   \"%x \",\n+\t\t\t\t   mbx->req_virt->tlv_buf_size.tlv_buffer[i]);\n+\t\t}\n+\n+\t\t/* test whether we can respond to the VF (do we have an address\n+\t\t * for it?)\n+\t\t */\n+\t\tif (p_vf->state == VF_ACQUIRED)\n+\t\t\tDP_ERR(p_hwfn, \"UNKNOWN TLV Not supported yet\\n\");\n+\t}\n+\n+#ifdef CONFIG_ECORE_SW_CHANNEL\n+\tmbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY;\n+\tmbx->sw_mbx.response_offset = 0;\n+#endif\n+}\n+\n+static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t __le16 vfid,\n+\t\t\t\t\t\t struct regpair *vf_msg)\n+{\n+\tstruct ecore_vf_info *p_vf;\n+\tu8 min, max;\n+\n+\tif (!p_hwfn->pf_iov_info || !p_hwfn->pf_iov_info->vfs_array) {\n+\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t   \"Got a message from VF while PF is not initialized for IOV support\\n\");\n+\t\treturn ECORE_SUCCESS;\n+\t}\n+\n+\t/* Find the VF record - message comes with realtive [engine] vfid */\n+\tmin = (u8)p_hwfn->hw_info.first_vf_in_pf;\n+\tmax = min + p_hwfn->p_dev->sriov_info.total_vfs;\n+\t/* @@@TBD - for BE machines, should echo field be reversed? */\n+\tif ((u8)vfid < min || (u8)vfid >= max) {\n+\t\tDP_INFO(p_hwfn,\n+\t\t\t\"Got a message from VF with relative id 0x%08x, but PF's range is [0x%02x,...,0x%02x)\\n\",\n+\t\t\t(u8)vfid, min, max);\n+\t\treturn ECORE_INVAL;\n+\t}\n+\tp_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)vfid - min];\n+\n+\t/* List the physical address of the request so that handler\n+\t * could later on copy the message from it.\n+\t */\n+\tp_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;\n+\n+\treturn OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);\n+}\n+\n+enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t   u8 opcode,\n+\t\t\t\t\t   __le16 echo,\n+\t\t\t\t\t   union event_ring_data *data)\n+{\n+\tswitch (opcode) {\n+\tcase COMMON_EVENT_VF_PF_CHANNEL:\n+\t\treturn ecore_sriov_vfpf_msg(p_hwfn, echo,\n+\t\t\t\t\t    &data->vf_pf_channel.msg_addr);\n+\tcase COMMON_EVENT_VF_FLR:\n+\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t   \"VF-FLR is still not supported\\n\");\n+\t\treturn ECORE_SUCCESS;\n+\tdefault:\n+\t\tDP_INFO(p_hwfn->p_dev, \"Unknown sriov eqe event 0x%02x\\n\",\n+\t\t\topcode);\n+\t\treturn ECORE_INVAL;\n+\t}\n+}\n+\n+bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)\n+{\n+\treturn !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &\n+\t\t   (1ULL << (rel_vf_id % 64)));\n+}\n+\n+bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,\n+\t\t\t     bool b_enabled_only)\n+{\n+\tif (!p_hwfn->pf_iov_info) {\n+\t\tDP_NOTICE(p_hwfn->p_dev, true, \"No iov info\\n\");\n+\t\treturn false;\n+\t}\n+\n+\treturn b_enabled_only ? ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, rel_vf_id) :\n+\t    (rel_vf_id < p_hwfn->p_dev->sriov_info.total_vfs);\n+}\n+\n+struct ecore_public_vf_info *ecore_iov_get_public_vf_info(struct ecore_hwfn\n+\t\t\t\t\t\t\t  *p_hwfn,\n+\t\t\t\t\t\t\t  u16 relative_vf_id,\n+\t\t\t\t\t\t\t  bool b_enabled_only)\n+{\n+\tstruct ecore_vf_info *vf = OSAL_NULL;\n+\n+\tvf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);\n+\tif (!vf)\n+\t\treturn OSAL_NULL;\n+\n+\treturn &vf->p_vf_info;\n+}\n+\n+void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid)\n+{\n+\tu64 add_bit = 1ULL << (vfid % 64);\n+\n+\t/* TODO - add locking mechanisms [no atomics in ecore, so we can't\n+\t * add the lock inside the ecore_pf_iov struct].\n+\t */\n+\tp_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;\n+}\n+\n+void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t       u64 *events)\n+{\n+\tu64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;\n+\n+\t/* TODO - Take a lock */\n+\tOSAL_MEMCPY(events, p_pending_events,\n+\t\t    sizeof(u64) * ECORE_VF_ARRAY_LENGTH);\n+\tOSAL_MEMSET(p_pending_events, 0, sizeof(u64) * ECORE_VF_ARRAY_LENGTH);\n+}\n+\n+enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t   struct ecore_ptt *ptt, int vfid)\n+{\n+\tstruct ecore_dmae_params params;\n+\tstruct ecore_vf_info *vf_info;\n+\n+\tvf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);\n+\tif (!vf_info)\n+\t\treturn ECORE_INVAL;\n+\n+\tOSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));\n+\tparams.flags = ECORE_DMAE_FLAG_VF_SRC | ECORE_DMAE_FLAG_COMPLETION_DST;\n+\tparams.src_vfid = vf_info->abs_vf_id;\n+\n+\tif (ecore_dmae_host2host(p_hwfn, ptt,\n+\t\t\t\t vf_info->vf_mbx.pending_req,\n+\t\t\t\t vf_info->vf_mbx.req_phys,\n+\t\t\t\t sizeof(union vfpf_tlvs) / 4, &params)) {\n+\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t   \"Failed to copy message from VF 0x%02x\\n\", vfid);\n+\n+\t\treturn ECORE_IO;\n+\t}\n+\n+\treturn ECORE_SUCCESS;\n+}\n+\n+void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t       u8 *mac, int vfid)\n+{\n+\tstruct ecore_vf_info *vf_info;\n+\tu64 feature;\n+\n+\tvf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);\n+\tif (!vf_info) {\n+\t\tDP_NOTICE(p_hwfn->p_dev, true,\n+\t\t\t  \"Can not set forced MAC, invalid vfid [%d]\\n\", vfid);\n+\t\treturn;\n+\t}\n+\n+\tfeature = 1 << MAC_ADDR_FORCED;\n+\tOSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);\n+\n+\tvf_info->bulletin.p_virt->valid_bitmap |= feature;\n+\t/* Forced MAC will disable MAC_ADDR */\n+\tvf_info->bulletin.p_virt->valid_bitmap &=\n+\t    ~(1 << VFPF_BULLETIN_MAC_ADDR);\n+\n+\tecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);\n+}\n+\n+enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\tu8 *mac, int vfid)\n+{\n+\tstruct ecore_vf_info *vf_info;\n+\tu64 feature;\n+\n+\tvf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);\n+\tif (!vf_info) {\n+\t\tDP_NOTICE(p_hwfn->p_dev, true,\n+\t\t\t  \"Can not set MAC, invalid vfid [%d]\\n\", vfid);\n+\t\treturn ECORE_INVAL;\n+\t}\n+\n+\tif (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {\n+\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t   \"Can not set MAC, Forced MAC is configured\\n\");\n+\t\treturn ECORE_INVAL;\n+\t}\n+\n+\tfeature = 1 << VFPF_BULLETIN_MAC_ADDR;\n+\tOSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);\n+\n+\tvf_info->bulletin.p_virt->valid_bitmap |= feature;\n+\n+\treturn ECORE_SUCCESS;\n+}\n+\n+void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\tu16 pvid, int vfid)\n+{\n+\tstruct ecore_vf_info *vf_info;\n+\tu64 feature;\n+\n+\tvf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);\n+\tif (!vf_info) {\n+\t\tDP_NOTICE(p_hwfn->p_dev, true,\n+\t\t\t  \"Can not set forced MAC, invalid vfid [%d]\\n\", vfid);\n+\t\treturn;\n+\t}\n+\n+\tfeature = 1 << VLAN_ADDR_FORCED;\n+\tvf_info->bulletin.p_virt->pvid = pvid;\n+\tif (pvid)\n+\t\tvf_info->bulletin.p_virt->valid_bitmap |= feature;\n+\telse\n+\t\tvf_info->bulletin.p_virt->valid_bitmap &= ~feature;\n+\n+\tecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);\n+}\n+\n+enum _ecore_status_t\n+ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t       bool b_untagged_only, int vfid)\n+{\n+\tstruct ecore_vf_info *vf_info;\n+\tu64 feature;\n+\n+\tvf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);\n+\tif (!vf_info) {\n+\t\tDP_NOTICE(p_hwfn->p_dev, true,\n+\t\t\t  \"Can not set forced MAC, invalid vfid [%d]\\n\", vfid);\n+\t\treturn ECORE_INVAL;\n+\t}\n+\n+\t/* Since this is configurable only during vport-start, don't take it\n+\t * if we're past that point.\n+\t */\n+\tif (vf_info->state == VF_ENABLED) {\n+\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t   \"Can't support untagged change for vfid[%d] - VF is already active\\n\",\n+\t\t\t   vfid);\n+\t\treturn ECORE_INVAL;\n+\t}\n+\n+\t/* Set configuration; This will later be taken into account during the\n+\t * VF initialization.\n+\t */\n+\tfeature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) |\n+\t    (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED);\n+\tvf_info->bulletin.p_virt->valid_bitmap |= feature;\n+\n+\tvf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1\n+\t    : 0;\n+\n+\treturn ECORE_SUCCESS;\n+}\n+\n+void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,\n+\t\t\t\t  u16 *opaque_fid)\n+{\n+\tstruct ecore_vf_info *vf_info;\n+\n+\tvf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);\n+\tif (!vf_info)\n+\t\treturn;\n+\n+\t*opaque_fid = vf_info->opaque_fid;\n+}\n+\n+void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn, int vfid,\n+\t\t\t\tu8 *p_vort_id)\n+{\n+\tstruct ecore_vf_info *vf_info;\n+\n+\tvf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);\n+\tif (!vf_info)\n+\t\treturn;\n+\n+\t*p_vort_id = vf_info->vport_id;\n+}\n+\n+bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)\n+{\n+\tstruct ecore_vf_info *p_vf_info;\n+\n+\tp_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);\n+\tif (!p_vf_info)\n+\t\treturn false;\n+\n+\treturn !!p_vf_info->vport_instance;\n+}\n+\n+bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid)\n+{\n+\tstruct ecore_vf_info *p_vf_info;\n+\n+\tp_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);\n+\n+\treturn p_vf_info->state == VF_STOPPED;\n+}\n+\n+bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid)\n+{\n+\tstruct ecore_vf_info *vf_info;\n+\n+\tvf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);\n+\tif (!vf_info)\n+\t\treturn false;\n+\n+\treturn vf_info->spoof_chk;\n+}\n+\n+bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)\n+{\n+\tif (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||\n+\t    !IS_PF_SRIOV_ALLOC(p_hwfn) ||\n+\t    !ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, vfid))\n+\t\treturn false;\n+\telse\n+\t\treturn true;\n+}\n+\n+enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t    int vfid, bool val)\n+{\n+\tenum _ecore_status_t rc = ECORE_INVAL;\n+\tstruct ecore_vf_info *vf;\n+\n+\tif (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {\n+\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t  \"SR-IOV sanity check failed, can't set spoofchk\\n\");\n+\t\tgoto out;\n+\t}\n+\n+\tvf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);\n+\tif (!vf)\n+\t\tgoto out;\n+\n+\tif (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {\n+\t\t/* After VF VPORT start PF will configure spoof check */\n+\t\tvf->req_spoofchk_val = val;\n+\t\trc = ECORE_SUCCESS;\n+\t\tgoto out;\n+\t}\n+\n+\trc = __ecore_iov_spoofchk_set(p_hwfn, vf, val);\n+\n+out:\n+\treturn rc;\n+}\n+\n+u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)\n+{\n+\tu8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf;\n+\n+\tmax_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf\n+\t    : ECORE_MAX_VF_CHAINS_PER_PF;\n+\n+\treturn max_chains_per_vf;\n+}\n+\n+void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t  u16 rel_vf_id,\n+\t\t\t\t\t  void **pp_req_virt_addr,\n+\t\t\t\t\t  u16 *p_req_virt_size)\n+{\n+\tstruct ecore_vf_info *vf_info =\n+\t    ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);\n+\n+\tif (!vf_info)\n+\t\treturn;\n+\n+\tif (pp_req_virt_addr)\n+\t\t*pp_req_virt_addr = vf_info->vf_mbx.req_virt;\n+\n+\tif (p_req_virt_size)\n+\t\t*p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt);\n+}\n+\n+void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t    u16 rel_vf_id,\n+\t\t\t\t\t    void **pp_reply_virt_addr,\n+\t\t\t\t\t    u16 *p_reply_virt_size)\n+{\n+\tstruct ecore_vf_info *vf_info =\n+\t    ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);\n+\n+\tif (!vf_info)\n+\t\treturn;\n+\n+\tif (pp_reply_virt_addr)\n+\t\t*pp_reply_virt_addr = vf_info->vf_mbx.reply_virt;\n+\n+\tif (p_reply_virt_size)\n+\t\t*p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt);\n+}\n+\n+#ifdef CONFIG_ECORE_SW_CHANNEL\n+struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t u16 rel_vf_id)\n+{\n+\tstruct ecore_vf_info *vf_info =\n+\t    ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);\n+\n+\tif (!vf_info)\n+\t\treturn OSAL_NULL;\n+\n+\treturn &vf_info->vf_mbx.sw_mbx;\n+}\n+#endif\n+\n+bool ecore_iov_is_valid_vfpf_msg_length(u32 length)\n+{\n+\treturn (length >= sizeof(struct vfpf_first_tlv) &&\n+\t\t(length <= sizeof(union vfpf_tlvs)));\n+}\n+\n+u32 ecore_iov_pfvf_msg_length(void)\n+{\n+\treturn sizeof(union pfvf_tlvs);\n+}\n+\n+u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)\n+{\n+\tstruct ecore_vf_info *p_vf;\n+\n+\tp_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);\n+\tif (!p_vf || !p_vf->bulletin.p_virt)\n+\t\treturn OSAL_NULL;\n+\n+\tif (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))\n+\t\treturn OSAL_NULL;\n+\n+\treturn p_vf->bulletin.p_virt->mac;\n+}\n+\n+u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t       u16 rel_vf_id)\n+{\n+\tstruct ecore_vf_info *p_vf;\n+\n+\tp_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);\n+\tif (!p_vf || !p_vf->bulletin.p_virt)\n+\t\treturn 0;\n+\n+\tif (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))\n+\t\treturn 0;\n+\n+\treturn p_vf->bulletin.p_virt->pvid;\n+}\n+\n+enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t struct ecore_ptt *p_ptt,\n+\t\t\t\t\t\t int vfid, int val)\n+{\n+\tstruct ecore_vf_info *vf;\n+\tenum _ecore_status_t rc;\n+\tu8 abs_vp_id = 0;\n+\n+\tvf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);\n+\n+\tif (!vf)\n+\t\treturn ECORE_INVAL;\n+\n+\trc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);\n+\tif (rc != ECORE_SUCCESS)\n+\t\treturn rc;\n+\n+\trc = ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);\n+\n+\treturn rc;\n+}\n+\n+enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,\n+\t\t\t\t\t\t     int vfid, u32 rate)\n+{\n+\tstruct ecore_vf_info *vf;\n+\tenum _ecore_status_t rc;\n+\tu8 vport_id;\n+\tint i;\n+\n+\tfor_each_hwfn(p_dev, i) {\n+\t\tstruct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];\n+\n+\t\tif (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {\n+\t\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t\t  \"SR-IOV sanity check failed, can't set min rate\\n\");\n+\t\t\treturn ECORE_INVAL;\n+\t\t}\n+\t}\n+\n+\tvf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);\n+\tvport_id = vf->vport_id;\n+\n+\trc = ecore_configure_vport_wfq(p_dev, vport_id, rate);\n+\n+\treturn rc;\n+}\n+\n+enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t    struct ecore_ptt *p_ptt,\n+\t\t\t\t\t    int vfid,\n+\t\t\t\t\t    struct ecore_eth_stats *p_stats)\n+{\n+\tstruct ecore_vf_info *vf;\n+\n+\tvf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);\n+\tif (!vf)\n+\t\treturn ECORE_INVAL;\n+\n+\tif (vf->state != VF_ENABLED)\n+\t\treturn ECORE_INVAL;\n+\n+\t__ecore_get_vport_stats(p_hwfn, p_ptt, p_stats,\n+\t\t\t\tvf->abs_vf_id + 0x10, false);\n+\n+\treturn ECORE_SUCCESS;\n+}\n+\n+u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)\n+{\n+\tstruct ecore_vf_info *p_vf;\n+\n+\tp_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);\n+\tif (!p_vf)\n+\t\treturn 0;\n+\n+\treturn p_vf->num_rxqs;\n+}\n+\n+u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)\n+{\n+\tstruct ecore_vf_info *p_vf;\n+\n+\tp_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);\n+\tif (!p_vf)\n+\t\treturn 0;\n+\n+\treturn p_vf->num_active_rxqs;\n+}\n+\n+void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)\n+{\n+\tstruct ecore_vf_info *p_vf;\n+\n+\tp_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);\n+\tif (!p_vf)\n+\t\treturn OSAL_NULL;\n+\n+\treturn p_vf->ctx;\n+}\n+\n+u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)\n+{\n+\tstruct ecore_vf_info *p_vf;\n+\n+\tp_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);\n+\tif (!p_vf)\n+\t\treturn 0;\n+\n+\treturn p_vf->num_sbs;\n+}\n+\n+bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)\n+{\n+\tstruct ecore_vf_info *p_vf;\n+\n+\tp_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);\n+\tif (!p_vf)\n+\t\treturn false;\n+\n+\treturn (p_vf->state == VF_FREE);\n+}\n+\n+bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t      u16 rel_vf_id)\n+{\n+\tstruct ecore_vf_info *p_vf;\n+\n+\tp_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);\n+\tif (!p_vf)\n+\t\treturn false;\n+\n+\treturn (p_vf->state == VF_ACQUIRED);\n+}\n+\n+bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)\n+{\n+\tstruct ecore_vf_info *p_vf;\n+\n+\tp_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);\n+\tif (!p_vf)\n+\t\treturn false;\n+\n+\treturn (p_vf->state == VF_ENABLED);\n+}\n+\n+int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)\n+{\n+\tstruct ecore_wfq_data *vf_vp_wfq;\n+\tstruct ecore_vf_info *vf_info;\n+\n+\tvf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);\n+\tif (!vf_info)\n+\t\treturn 0;\n+\n+\tvf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];\n+\n+\tif (vf_vp_wfq->configured)\n+\t\treturn vf_vp_wfq->min_speed;\n+\telse\n+\t\treturn 0;\n+}\ndiff --git a/drivers/net/qede/base/ecore_sriov.h b/drivers/net/qede/base/ecore_sriov.h\nnew file mode 100644\nindex 0000000..9ddc9aa\n--- /dev/null\n+++ b/drivers/net/qede/base/ecore_sriov.h\n@@ -0,0 +1,390 @@\n+/*\n+ * Copyright (c) 2016 QLogic Corporation.\n+ * All rights reserved.\n+ * www.qlogic.com\n+ *\n+ * See LICENSE.qede_pmd for copyright and licensing details.\n+ */\n+\n+#ifndef __ECORE_SRIOV_H__\n+#define __ECORE_SRIOV_H__\n+\n+#include \"ecore_status.h\"\n+#include \"ecore_vfpf_if.h\"\n+#include \"ecore_iov_api.h\"\n+#include \"ecore_hsi_common.h\"\n+\n+#define ECORE_ETH_VF_NUM_VLAN_FILTERS 2\n+\n+#define ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS \\\n+\t(MAX_NUM_VFS * ECORE_ETH_VF_NUM_VLAN_FILTERS)\n+\n+/* Represents a full message. Both the request filled by VF\n+ * and the response filled by the PF. The VF needs one copy\n+ * of this message, it fills the request part and sends it to\n+ * the PF. The PF will copy the response to the response part for\n+ * the VF to later read it. The PF needs to hold a message like this\n+ * per VF, the request that is copied to the PF is placed in the\n+ * request size, and the response is filled by the PF before sending\n+ * it to the VF.\n+ */\n+struct ecore_vf_mbx_msg {\n+\tunion vfpf_tlvs req;\n+\tunion pfvf_tlvs resp;\n+};\n+\n+/* This data is held in the ecore_hwfn structure for VFs only. */\n+struct ecore_vf_iov {\n+\tunion vfpf_tlvs *vf2pf_request;\n+\tdma_addr_t vf2pf_request_phys;\n+\tunion pfvf_tlvs *pf2vf_reply;\n+\tdma_addr_t pf2vf_reply_phys;\n+\n+\t/* Should be taken whenever the mailbox buffers are accessed */\n+\tosal_mutex_t mutex;\n+\tu8 *offset;\n+\n+\t/* Bulletin Board */\n+\tstruct ecore_bulletin bulletin;\n+\tstruct ecore_bulletin_content bulletin_shadow;\n+\n+\t/* we set aside a copy of the acquire response */\n+\tstruct pfvf_acquire_resp_tlv acquire_resp;\n+};\n+\n+/* This mailbox is maintained per VF in its PF\n+ * contains all information required for sending / receiving\n+ * a message\n+ */\n+struct ecore_iov_vf_mbx {\n+\tunion vfpf_tlvs *req_virt;\n+\tdma_addr_t req_phys;\n+\tunion pfvf_tlvs *reply_virt;\n+\tdma_addr_t reply_phys;\n+\n+\t/* Address in VF where a pending message is located */\n+\tdma_addr_t pending_req;\n+\n+\tu8 *offset;\n+\n+#ifdef CONFIG_ECORE_SW_CHANNEL\n+\tstruct ecore_iov_sw_mbx sw_mbx;\n+#endif\n+\n+\t/* VF GPA address */\n+\tu32 vf_addr_lo;\n+\tu32 vf_addr_hi;\n+\n+\tstruct vfpf_first_tlv first_tlv;\t/* saved VF request header */\n+\n+\tu8 flags;\n+#define VF_MSG_INPROCESS\t0x1\t/* failsafe - the FW should prevent\n+\t\t\t\t\t * more then one pending msg\n+\t\t\t\t\t */\n+};\n+\n+struct ecore_vf_q_info {\n+\tu16 fw_rx_qid;\n+\tu16 fw_tx_qid;\n+\tu8 fw_cid;\n+\tu8 rxq_active;\n+\tu8 txq_active;\n+};\n+\n+enum int_mod {\n+\tVPORT_INT_MOD_UNDEFINED = 0,\n+\tVPORT_INT_MOD_ADAPTIVE = 1,\n+\tVPORT_INT_MOD_OFF = 2,\n+\tVPORT_INT_MOD_LOW = 100,\n+\tVPORT_INT_MOD_MEDIUM = 200,\n+\tVPORT_INT_MOD_HIGH = 300\n+};\n+\n+enum vf_state {\n+\tVF_FREE = 0,\t\t/* VF ready to be acquired holds no resc */\n+\tVF_ACQUIRED = 1,\t/* VF, aquired, but not initalized */\n+\tVF_ENABLED = 2,\t\t/* VF, Enabled */\n+\tVF_RESET = 3,\t\t/* VF, FLR'd, pending cleanup */\n+\tVF_STOPPED = 4\t\t/* VF, Stopped */\n+};\n+\n+struct ecore_vf_vlan_shadow {\n+\tbool used;\n+\tu16 vid;\n+};\n+\n+struct ecore_vf_shadow_config {\n+\t/* Shadow copy of all guest vlans */\n+\tstruct ecore_vf_vlan_shadow vlans[ECORE_ETH_VF_NUM_VLAN_FILTERS + 1];\n+\n+\tu8 inner_vlan_removal;\n+};\n+\n+/* PFs maintain an array of this structure, per VF */\n+struct ecore_vf_info {\n+\tstruct ecore_iov_vf_mbx vf_mbx;\n+\tenum vf_state state;\n+\tu8 to_disable;\n+\n+\tstruct ecore_bulletin bulletin;\n+\tdma_addr_t vf_bulletin;\n+\n+\tu32 concrete_fid;\n+\tu16 opaque_fid;\n+\tu16 mtu;\n+\n+\tu8 vport_id;\n+\tu8 relative_vf_id;\n+\tu8 abs_vf_id;\n+#define ECORE_VF_ABS_ID(p_hwfn, p_vf)\t(ECORE_PATH_ID(p_hwfn) ? \\\n+\t\t\t\t\t (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \\\n+\t\t\t\t\t (p_vf)->abs_vf_id)\n+\n+\tu8 vport_instance;\t/* Number of active vports */\n+\tu8 num_rxqs;\n+\tu8 num_txqs;\n+\n+\tu8 num_sbs;\n+\n+\tu8 num_mac_filters;\n+\tu8 num_vlan_filters;\n+\tu8 num_mc_filters;\n+\n+\tstruct ecore_vf_q_info vf_queues[ECORE_MAX_VF_CHAINS_PER_PF];\n+\tu16 igu_sbs[ECORE_MAX_VF_CHAINS_PER_PF];\n+\n+\t/* TODO - Only windows is using it - should be removed */\n+\tu8 was_malicious;\n+\tu8 num_active_rxqs;\n+\tvoid *ctx;\n+\tstruct ecore_public_vf_info p_vf_info;\n+\tbool spoof_chk;\t\t/* Current configured on HW */\n+\tbool req_spoofchk_val;\t/* Requested value */\n+\n+\t/* Stores the configuration requested by VF */\n+\tstruct ecore_vf_shadow_config shadow_config;\n+\n+\t/* A bitfield using bulletin's valid-map bits, used to indicate\n+\t * which of the bulletin board features have been configured.\n+\t */\n+\tu64 configured_features;\n+#define ECORE_IOV_CONFIGURED_FEATURES_MASK\t((1 << MAC_ADDR_FORCED) | \\\n+\t\t\t\t\t\t (1 << VLAN_ADDR_FORCED))\n+};\n+\n+/* This structure is part of ecore_hwfn and used only for PFs that have sriov\n+ * capability enabled.\n+ */\n+struct ecore_pf_iov {\n+\tstruct ecore_vf_info vfs_array[MAX_NUM_VFS];\n+\tu64 pending_events[ECORE_VF_ARRAY_LENGTH];\n+\tu64 pending_flr[ECORE_VF_ARRAY_LENGTH];\n+\tu16 base_vport_id;\n+\n+\t/* Allocate message address continuosuly and split to each VF */\n+\tvoid *mbx_msg_virt_addr;\n+\tdma_addr_t mbx_msg_phys_addr;\n+\tu32 mbx_msg_size;\n+\tvoid *mbx_reply_virt_addr;\n+\tdma_addr_t mbx_reply_phys_addr;\n+\tu32 mbx_reply_size;\n+\tvoid *p_bulletins;\n+\tdma_addr_t bulletins_phys;\n+\tu32 bulletins_size;\n+};\n+\n+#ifdef CONFIG_ECORE_SRIOV\n+/**\n+ * @brief Read sriov related information and allocated resources\n+ *  reads from configuraiton space, shmem, and allocates the VF\n+ *  database in the PF.\n+ *\n+ * @param p_hwfn\n+ * @param p_ptt\n+ *\n+ * @return enum _ecore_status_t\n+ */\n+enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t       struct ecore_ptt *p_ptt);\n+\n+/**\n+ * @brief ecore_add_tlv - place a given tlv on the tlv buffer at next offset\n+ *\n+ * @param p_hwfn\n+ * @param p_iov\n+ * @param type\n+ * @param length\n+ *\n+ * @return pointer to the newly placed tlv\n+ */\n+void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,\n+\t\t    u8 **offset, u16 type, u16 length);\n+\n+/**\n+ * @brief list the types and lengths of the tlvs on the buffer\n+ *\n+ * @param p_hwfn\n+ * @param tlvs_list\n+ */\n+void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list);\n+\n+/**\n+ * @brief ecore_iov_alloc - allocate sriov related resources\n+ *\n+ * @param p_hwfn\n+ *\n+ * @return enum _ecore_status_t\n+ */\n+enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn);\n+\n+/**\n+ * @brief ecore_iov_setup - setup sriov related resources\n+ *\n+ * @param p_hwfn\n+ * @param p_ptt\n+ */\n+void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);\n+\n+/**\n+ * @brief ecore_iov_free - free sriov related resources\n+ *\n+ * @param p_hwfn\n+ */\n+void ecore_iov_free(struct ecore_hwfn *p_hwfn);\n+\n+/**\n+ * @brief ecore_sriov_eqe_event - handle async sriov event arrived on eqe.\n+ *\n+ * @param p_hwfn\n+ * @param opcode\n+ * @param echo\n+ * @param data\n+ */\n+enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t   u8 opcode,\n+\t\t\t\t\t   __le16 echo,\n+\t\t\t\t\t   union event_ring_data *data);\n+\n+/**\n+ * @brief calculate CRC for bulletin board validation\n+ *\n+ * @param basic crc seed\n+ * @param ptr to beginning of buffer\n+ * @length in bytes of buffer\n+ *\n+ * @return calculated crc over buffer [with respect to seed].\n+ */\n+u32 ecore_crc32(u32 crc, u8 *ptr, u32 length);\n+\n+/**\n+ * @brief Mark structs of vfs that have been FLR-ed.\n+ *\n+ * @param p_hwfn\n+ * @param disabled_vfs - bitmask of all VFs on path that were FLRed\n+ *\n+ * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.\n+ */\n+int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *disabled_vfs);\n+\n+/**\n+ * @brief Search extended TLVs in request/reply buffer.\n+ *\n+ * @param p_hwfn\n+ * @param p_tlvs_list - Pointer to tlvs list\n+ * @param req_type - Type of TLV\n+ *\n+ * @return pointer to tlv type if found, otherwise returns NULL.\n+ */\n+void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t void *p_tlvs_list, u16 req_type);\n+\n+/**\n+ * @brief ecore_iov_get_vf_info - return the database of a\n+ *        specific VF\n+ *\n+ * @param p_hwfn\n+ * @param relative_vf_id - relative id of the VF for which info\n+ *\t\t\t is requested\n+ * @param b_enabled_only - false iff want to access even if vf is disabled\n+ *\n+ * @return struct ecore_vf_info*\n+ */\n+struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t    u16 relative_vf_id,\n+\t\t\t\t\t    bool b_enabled_only);\n+#else\n+static OSAL_INLINE enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn\n+\t\t\t\t\t\t\t  *p_hwfn,\n+\t\t\t\t\t\t\t  struct ecore_ptt\n+\t\t\t\t\t\t\t  *p_ptt)\n+{\n+\treturn ECORE_SUCCESS;\n+}\n+\n+static OSAL_INLINE void *ecore_add_tlv(struct ecore_hwfn *p_hwfn, u8 **offset,\n+\t\t\t\t       u16 type, u16 length)\n+{\n+\treturn OSAL_NULL;\n+}\n+\n+static OSAL_INLINE void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t  void *tlvs_list)\n+{\n+}\n+\n+static OSAL_INLINE enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn\n+\t\t\t\t\t\t\t*p_hwfn)\n+{\n+\treturn ECORE_SUCCESS;\n+}\n+\n+static OSAL_INLINE void ecore_iov_setup(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\tstruct ecore_ptt *p_ptt)\n+{\n+}\n+\n+static OSAL_INLINE void ecore_iov_free(struct ecore_hwfn *p_hwfn)\n+{\n+}\n+\n+static OSAL_INLINE enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn\n+\t\t\t\t\t\t\t      *p_hwfn,\n+\t\t\t\t\t\t\t      u8 opcode,\n+\t\t\t\t\t\t\t      __le16 echo,\n+\t\t\t\t\t\t\t      union\n+\t\t\t\t\t\t\t      event_ring_data\n+\t\t\t\t\t\t\t      *data)\n+{\n+\treturn ECORE_INVAL;\n+}\n+\n+static OSAL_INLINE u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)\n+{\n+\treturn 0;\n+}\n+\n+static OSAL_INLINE int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t     u32 *disabled_vfs)\n+{\n+\treturn 0;\n+}\n+\n+static OSAL_INLINE void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t    void *p_tlvs_list,\n+\t\t\t\t\t\t    u16 req_type)\n+{\n+\treturn OSAL_NULL;\n+}\n+\n+static OSAL_INLINE struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn\n+\t\t\t\t\t\t\t       *p_hwfn,\n+\t\t\t\t\t\t\t       u16\n+\t\t\t\t\t\t\t       relative_vf_id,\n+\t\t\t\t\t\t\t       bool\n+\t\t\t\t\t\t\t       b_enabled_only)\n+{\n+\treturn OSAL_NULL;\n+}\n+\n+#endif\n+#endif /* __ECORE_SRIOV_H__ */\ndiff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c\nnew file mode 100644\nindex 0000000..a452f3d\n--- /dev/null\n+++ b/drivers/net/qede/base/ecore_vf.c\n@@ -0,0 +1,1322 @@\n+/*\n+ * Copyright (c) 2016 QLogic Corporation.\n+ * All rights reserved.\n+ * www.qlogic.com\n+ *\n+ * See LICENSE.qede_pmd for copyright and licensing details.\n+ */\n+\n+#include \"bcm_osal.h\"\n+#include \"ecore.h\"\n+#include \"ecore_hsi_eth.h\"\n+#include \"ecore_sriov.h\"\n+#include \"ecore_l2_api.h\"\n+#include \"ecore_vf.h\"\n+#include \"ecore_vfpf_if.h\"\n+#include \"ecore_status.h\"\n+#include \"reg_addr.h\"\n+#include \"ecore_int.h\"\n+#include \"ecore_l2.h\"\n+#include \"ecore_mcp_api.h\"\n+#include \"ecore_vf_api.h\"\n+\n+static void *ecore_vf_pf_prep(struct ecore_hwfn *p_hwfn, u16 type, u16 length)\n+{\n+\tstruct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;\n+\tvoid *p_tlv;\n+\n+\t/* This lock is released when we receive PF's response\n+\t * in ecore_send_msg2pf().\n+\t * So, ecore_vf_pf_prep() and ecore_send_msg2pf()\n+\t * must come in sequence.\n+\t */\n+\tOSAL_MUTEX_ACQUIRE(&(p_iov->mutex));\n+\n+\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t   \"preparing to send %s tlv over vf pf channel\\n\",\n+\t\t   ecore_channel_tlvs_string[type]);\n+\n+\t/* Reset Requst offset */\n+\tp_iov->offset = (u8 *)(p_iov->vf2pf_request);\n+\n+\t/* Clear mailbox - both request and reply */\n+\tOSAL_MEMSET(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs));\n+\tOSAL_MEMSET(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));\n+\n+\t/* Init type and length */\n+\tp_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, type, length);\n+\n+\t/* Init first tlv header */\n+\t((struct vfpf_first_tlv *)p_tlv)->reply_address =\n+\t    (u64)p_iov->pf2vf_reply_phys;\n+\n+\treturn p_tlv;\n+}\n+\n+static int ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,\n+\t\t\t     u8 *done, u32 resp_size)\n+{\n+\tstruct ustorm_vf_zone *zone_data = (struct ustorm_vf_zone *)\n+\t    ((u8 *)PXP_VF_BAR0_START_USDM_ZONE_B);\n+\tunion vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;\n+\tstruct ustorm_trigger_vf_zone trigger;\n+\tint rc = ECORE_SUCCESS, time = 100;\n+\tu8 pf_id;\n+\n+\t/* output tlvs list */\n+\tecore_dp_tlv_list(p_hwfn, p_req);\n+\n+\t/* need to add the END TLV to the message size */\n+\tresp_size += sizeof(struct channel_list_end_tlv);\n+\n+\tif (!p_hwfn->p_dev->sriov_info.b_hw_channel) {\n+\t\trc = OSAL_VF_SEND_MSG2PF(p_hwfn->p_dev,\n+\t\t\t\t\t done,\n+\t\t\t\t\t p_req,\n+\t\t\t\t\t p_hwfn->vf_iov_info->pf2vf_reply,\n+\t\t\t\t\t sizeof(union vfpf_tlvs), resp_size);\n+\t\t/* TODO - no prints about message ? */\n+\t\tgoto exit;\n+\t}\n+\n+\t/* Send TLVs over HW channel */\n+\tOSAL_MEMSET(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));\n+\ttrigger.vf_pf_msg_valid = 1;\n+\t/* TODO - FW should remove this requirement */\n+\tpf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, PXP_CONCRETE_FID_PFID);\n+\n+\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t   \"VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\\n\",\n+\t\t   pf_id,\n+\t\t   U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys),\n+\t\t   U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys),\n+\t\t   &zone_data->non_trigger.vf_pf_msg_addr,\n+\t\t   *((u32 *)&trigger), &zone_data->trigger);\n+\n+\tREG_WR(p_hwfn,\n+\t       (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo,\n+\t       U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys));\n+\n+\tREG_WR(p_hwfn,\n+\t       (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi,\n+\t       U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys));\n+\n+\t/* The message data must be written first, to prevent trigger before\n+\t * data is written.\n+\t */\n+\tOSAL_WMB(p_hwfn->p_dev);\n+\n+\tREG_WR(p_hwfn, (osal_uintptr_t)&zone_data->trigger,\n+\t       *((u32 *)&trigger));\n+\n+\twhile ((!*done) && time) {\n+\t\tOSAL_MSLEEP(25);\n+\t\ttime--;\n+\t}\n+\n+\tif (!*done) {\n+\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t   \"VF <-- PF Timeout [Type %d]\\n\",\n+\t\t\t   p_req->first_tlv.tl.type);\n+\t\trc = ECORE_TIMEOUT;\n+\t\tgoto exit;\n+\t} else {\n+\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t   \"PF response: %d [Type %d]\\n\",\n+\t\t\t   *done, p_req->first_tlv.tl.type);\n+\t}\n+\n+exit:\n+\tOSAL_MUTEX_RELEASE(&(p_hwfn->vf_iov_info->mutex));\n+\n+\treturn rc;\n+}\n+\n+#define VF_ACQUIRE_THRESH 3\n+#define VF_ACQUIRE_MAC_FILTERS 1\n+#define VF_ACQUIRE_MC_FILTERS 10\n+\n+static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)\n+{\n+\tstruct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;\n+\tstruct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;\n+\tstruct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;\n+\tstruct ecore_vf_acquire_sw_info vf_sw_info;\n+\tstruct vfpf_acquire_tlv *req;\n+\tint rc = 0, attempts = 0;\n+\tbool resources_acquired = false;\n+\n+\t/* @@@ TBD: MichalK take this from somewhere else... */\n+\tu8 rx_count = 1, tx_count = 1, num_sbs = 1;\n+\tu8 num_mac = VF_ACQUIRE_MAC_FILTERS, num_mc = VF_ACQUIRE_MC_FILTERS;\n+\n+\t/* clear mailbox and prep first tlv */\n+\treq = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));\n+\n+\t/* @@@ TBD: PF may not be ready bnx2x_get_vf_id... */\n+\treq->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;\n+\n+\treq->resc_request.num_rxqs = rx_count;\n+\treq->resc_request.num_txqs = tx_count;\n+\treq->resc_request.num_sbs = num_sbs;\n+\treq->resc_request.num_mac_filters = num_mac;\n+\treq->resc_request.num_mc_filters = num_mc;\n+\treq->resc_request.num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;\n+\n+\tOSAL_MEMSET(&vf_sw_info, 0, sizeof(vf_sw_info));\n+\tOSAL_VF_FILL_ACQUIRE_RESC_REQ(p_hwfn, &req->resc_request, &vf_sw_info);\n+\n+\treq->vfdev_info.os_type = vf_sw_info.os_type;\n+\treq->vfdev_info.driver_version = vf_sw_info.driver_version;\n+\treq->vfdev_info.fw_major = FW_MAJOR_VERSION;\n+\treq->vfdev_info.fw_minor = FW_MINOR_VERSION;\n+\treq->vfdev_info.fw_revision = FW_REVISION_VERSION;\n+\treq->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;\n+\n+\tif (vf_sw_info.override_fw_version)\n+\t\treq->vfdev_info.capabilties |= VFPF_ACQUIRE_CAP_OVERRIDE_FW_VER;\n+\n+\t/* pf 2 vf bulletin board address */\n+\treq->bulletin_addr = p_iov->bulletin.phys;\n+\treq->bulletin_size = p_iov->bulletin.size;\n+\n+\t/* add list termination tlv */\n+\tecore_add_tlv(p_hwfn, &p_iov->offset,\n+\t\t      CHANNEL_TLV_LIST_END,\n+\t\t      sizeof(struct channel_list_end_tlv));\n+\n+\twhile (!resources_acquired) {\n+\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t   \"attempting to acquire resources\\n\");\n+\n+\t\t/* send acquire request */\n+\t\trc = ecore_send_msg2pf(p_hwfn,\n+\t\t\t\t       &resp->hdr.status, sizeof(*resp));\n+\n+\t\t/* PF timeout */\n+\t\tif (rc)\n+\t\t\treturn rc;\n+\n+\t\t/* copy acquire response from buffer to p_hwfn */\n+\t\tOSAL_MEMCPY(&p_iov->acquire_resp,\n+\t\t\t    resp, sizeof(p_iov->acquire_resp));\n+\n+\t\tattempts++;\n+\n+\t\t/* PF agrees to allocate our resources */\n+\t\tif (resp->hdr.status == PFVF_STATUS_SUCCESS) {\n+\t\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t\t   \"resources acquired\\n\");\n+\t\t\tresources_acquired = true;\n+\t\t} /* PF refuses to allocate our resources */\n+\t\telse if (resp->hdr.status ==\n+\t\t\t PFVF_STATUS_NO_RESOURCE &&\n+\t\t\t attempts < VF_ACQUIRE_THRESH) {\n+\t\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t\t   \"PF unwilling to fullfill resource request. Try PF recommended amount\\n\");\n+\n+\t\t\t/* humble our request */\n+\t\t\treq->resc_request.num_txqs = resp->resc.num_txqs;\n+\t\t\treq->resc_request.num_rxqs = resp->resc.num_rxqs;\n+\t\t\treq->resc_request.num_sbs = resp->resc.num_sbs;\n+\t\t\treq->resc_request.num_mac_filters =\n+\t\t\t    resp->resc.num_mac_filters;\n+\t\t\treq->resc_request.num_vlan_filters =\n+\t\t\t    resp->resc.num_vlan_filters;\n+\t\t\treq->resc_request.num_mc_filters =\n+\t\t\t    resp->resc.num_mc_filters;\n+\n+\t\t\t/* Clear response buffer */\n+\t\t\tOSAL_MEMSET(p_iov->pf2vf_reply, 0,\n+\t\t\t\t    sizeof(union pfvf_tlvs));\n+\t\t} else {\n+\t\t\tDP_ERR(p_hwfn,\n+\t\t\t       \"PF returned error %d to VF acquisition request\\n\",\n+\t\t\t       resp->hdr.status);\n+\t\t\treturn ECORE_AGAIN;\n+\t\t}\n+\t}\n+\n+\trc = OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(p_hwfn, &resp->resc);\n+\tif (rc) {\n+\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t  \"VF_UPDATE_ACQUIRE_RESC_RESP Failed: status = 0x%x.\\n\",\n+\t\t\t  rc);\n+\t\treturn ECORE_AGAIN;\n+\t}\n+\n+\t/* Update bulletin board size with response from PF */\n+\tp_iov->bulletin.size = resp->bulletin_size;\n+\n+\t/* get HW info */\n+\tp_hwfn->p_dev->type = resp->pfdev_info.dev_type;\n+\tp_hwfn->p_dev->chip_rev = resp->pfdev_info.chip_rev;\n+\n+\tDP_INFO(p_hwfn, \"Chip details - %s%d\\n\",\n+\t\tECORE_IS_BB(p_hwfn->p_dev) ? \"BB\" : \"AH\",\n+\t\tCHIP_REV_IS_A0(p_hwfn->p_dev) ? 0 : 1);\n+\n+\t/* @@@TBD MichalK: Fw ver... */\n+\t/* strlcpy(p_hwfn->fw_ver, p_hwfn->acquire_resp.pfdev_info.fw_ver,\n+\t *  sizeof(p_hwfn->fw_ver));\n+\t */\n+\n+\tp_hwfn->p_dev->chip_num = pfdev_info->chip_num & 0xffff;\n+\n+\treturn 0;\n+}\n+\n+enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_dev *p_dev)\n+{\n+\tenum _ecore_status_t rc = ECORE_NOMEM;\n+\tstruct ecore_vf_iov *p_sriov;\n+\tstruct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];\t/* @@@TBD CMT */\n+\n+\tp_dev->num_hwfns = 1;\t/* @@@TBD CMT must be fixed... */\n+\n+\tp_hwfn->regview = p_dev->regview;\n+\tif (p_hwfn->regview == OSAL_NULL) {\n+\t\tDP_ERR(p_hwfn,\n+\t\t       \"regview should be initialized before\"\n+\t\t\t\" ecore_vf_hw_prepare is called\\n\");\n+\t\treturn ECORE_INVAL;\n+\t}\n+\n+\t/* Set the doorbell bar. Assumption: regview is set */\n+\tp_hwfn->doorbells = (u8 OSAL_IOMEM *) p_hwfn->regview +\n+\t    PXP_VF_BAR0_START_DQ;\n+\n+\tp_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn,\n+\t\t\t\t\t  PXP_VF_BAR0_ME_OPAQUE_ADDRESS);\n+\n+\tp_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn,\n+\t\t\t\t      PXP_VF_BAR0_ME_CONCRETE_ADDRESS);\n+\n+\t/* Allocate vf sriov info */\n+\tp_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));\n+\tif (!p_sriov) {\n+\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t  \"Failed to allocate `struct ecore_sriov'\\n\");\n+\t\treturn ECORE_NOMEM;\n+\t}\n+\n+\tOSAL_MEMSET(p_sriov, 0, sizeof(*p_sriov));\n+\n+\t/* Allocate vf2pf msg */\n+\tp_sriov->vf2pf_request = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,\n+\t\t\t\t\t\t\t &p_sriov->\n+\t\t\t\t\t\t\t vf2pf_request_phys,\n+\t\t\t\t\t\t\t sizeof(union\n+\t\t\t\t\t\t\t\tvfpf_tlvs));\n+\tif (!p_sriov->vf2pf_request) {\n+\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t  \"Failed to allocate `vf2pf_request' DMA memory\\n\");\n+\t\tgoto free_p_sriov;\n+\t}\n+\n+\tp_sriov->pf2vf_reply = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,\n+\t\t\t\t\t\t       &p_sriov->\n+\t\t\t\t\t\t       pf2vf_reply_phys,\n+\t\t\t\t\t\t       sizeof(union pfvf_tlvs));\n+\tif (!p_sriov->pf2vf_reply) {\n+\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t  \"Failed to allocate `pf2vf_reply' DMA memory\\n\");\n+\t\tgoto free_vf2pf_request;\n+\t}\n+\n+\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t   \"VF's Request mailbox [%p virt 0x%lx phys], Response\"\n+\t\t   \" mailbox [%p virt 0x%lx phys]\\n\",\n+\t\t   p_sriov->vf2pf_request,\n+\t\t   (u64)p_sriov->vf2pf_request_phys,\n+\t\t   p_sriov->pf2vf_reply, (u64)p_sriov->pf2vf_reply_phys);\n+\n+\t/* Allocate Bulletin board */\n+\tp_sriov->bulletin.size = sizeof(struct ecore_bulletin_content);\n+\tp_sriov->bulletin.p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,\n+\t\t\t\t\t\t\t   &p_sriov->bulletin.\n+\t\t\t\t\t\t\t   phys,\n+\t\t\t\t\t\t\t   p_sriov->bulletin.\n+\t\t\t\t\t\t\t   size);\n+\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t   \"VF's bulletin Board [%p virt 0x%lx phys 0x%08x bytes]\\n\",\n+\t\t   p_sriov->bulletin.p_virt, (u64)p_sriov->bulletin.phys,\n+\t\t   p_sriov->bulletin.size);\n+\n+\tOSAL_MUTEX_ALLOC(p_hwfn, &p_sriov->mutex);\n+\tOSAL_MUTEX_INIT(&p_sriov->mutex);\n+\n+\tp_hwfn->vf_iov_info = p_sriov;\n+\n+\tp_hwfn->hw_info.personality = ECORE_PCI_ETH;\n+\n+\t/* First VF needs to query for information from PF */\n+\tif (!p_hwfn->my_id)\n+\t\trc = ecore_vf_pf_acquire(p_hwfn);\n+\n+\treturn rc;\n+\n+free_vf2pf_request:\n+\tOSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sriov->vf2pf_request,\n+\t\t\t       p_sriov->vf2pf_request_phys,\n+\t\t\t       sizeof(union vfpf_tlvs));\n+free_p_sriov:\n+\tOSAL_FREE(p_hwfn->p_dev, p_sriov);\n+\n+\treturn rc;\n+}\n+\n+enum _ecore_status_t ecore_vf_pf_init(struct ecore_hwfn *p_hwfn)\n+{\n+\tp_hwfn->b_int_enabled = 1;\n+\n+\treturn 0;\n+}\n+\n+/* TEMP TEMP until in HSI */\n+#define TSTORM_QZONE_START   PXP_VF_BAR0_START_SDM_ZONE_A\n+#define MSTORM_QZONE_START(dev)   (TSTORM_QZONE_START + \\\n+\t\t\t\t   (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))\n+#define USTORM_QZONE_START(dev)   (MSTORM_QZONE_START + \\\n+\t\t\t\t   (MSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))\n+\n+enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t   u8 rx_qid,\n+\t\t\t\t\t   u16 sb,\n+\t\t\t\t\t   u8 sb_index,\n+\t\t\t\t\t   u16 bd_max_bytes,\n+\t\t\t\t\t   dma_addr_t bd_chain_phys_addr,\n+\t\t\t\t\t   dma_addr_t cqe_pbl_addr,\n+\t\t\t\t\t   u16 cqe_pbl_size,\n+\t\t\t\t\t   void OSAL_IOMEM **pp_prod)\n+{\n+\tstruct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;\n+\tstruct vfpf_start_rxq_tlv *req;\n+\tstruct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;\n+\tint rc;\n+\tu8 hw_qid;\n+\tu64 init_prod_val = 0;\n+\n+\t/* clear mailbox and prep first tlv */\n+\treq = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));\n+\n+\t/* @@@TBD MichalK TPA */\n+\n+\treq->rx_qid = rx_qid;\n+\treq->cqe_pbl_addr = cqe_pbl_addr;\n+\treq->cqe_pbl_size = cqe_pbl_size;\n+\treq->rxq_addr = bd_chain_phys_addr;\n+\treq->hw_sb = sb;\n+\treq->sb_index = sb_index;\n+\treq->hc_rate = 0;\t/* @@@TBD MichalK -> host coalescing! */\n+\treq->bd_max_bytes = bd_max_bytes;\n+\treq->stat_id = -1;\t/* No stats at the moment */\n+\n+\t/* add list termination tlv */\n+\tecore_add_tlv(p_hwfn, &p_iov->offset,\n+\t\t      CHANNEL_TLV_LIST_END,\n+\t\t      sizeof(struct channel_list_end_tlv));\n+\n+\tif (pp_prod) {\n+\t\thw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];\n+\n+\t\t*pp_prod = (u8 OSAL_IOMEM *) p_hwfn->regview +\n+\t\t    MSTORM_QZONE_START(p_hwfn->p_dev) +\n+\t\t    (hw_qid) * MSTORM_QZONE_SIZE +\n+\t\t    OFFSETOF(struct mstorm_eth_queue_zone, rx_producers);\n+\n+\t\t/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */\n+\t\t__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),\n+\t\t\t\t  (u32 *)(&init_prod_val));\n+\t}\n+\n+\trc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tif (resp->hdr.status != PFVF_STATUS_SUCCESS)\n+\t\treturn ECORE_INVAL;\n+\n+\treturn rc;\n+}\n+\n+enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t  u16 rx_qid, bool cqe_completion)\n+{\n+\tstruct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;\n+\tstruct vfpf_stop_rxqs_tlv *req;\n+\tstruct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;\n+\tint rc;\n+\n+\t/* clear mailbox and prep first tlv */\n+\treq = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));\n+\n+\t/* @@@TBD MichalK TPA */\n+\n+\t/* @@@TBD MichalK - relevant ???\n+\t * flags  VFPF_QUEUE_FLG_OV VFPF_QUEUE_FLG_VLAN\n+\t */\n+\treq->rx_qid = rx_qid;\n+\treq->num_rxqs = 1;\n+\treq->cqe_completion = cqe_completion;\n+\n+\t/* add list termination tlv */\n+\tecore_add_tlv(p_hwfn, &p_iov->offset,\n+\t\t      CHANNEL_TLV_LIST_END,\n+\t\t      sizeof(struct channel_list_end_tlv));\n+\n+\trc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tif (resp->hdr.status != PFVF_STATUS_SUCCESS)\n+\t\treturn ECORE_INVAL;\n+\n+\treturn rc;\n+}\n+\n+enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t   u16 tx_queue_id,\n+\t\t\t\t\t   u16 sb,\n+\t\t\t\t\t   u8 sb_index,\n+\t\t\t\t\t   dma_addr_t pbl_addr,\n+\t\t\t\t\t   u16 pbl_size,\n+\t\t\t\t\t   void OSAL_IOMEM **pp_doorbell)\n+{\n+\tstruct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;\n+\tstruct vfpf_start_txq_tlv *req;\n+\tstruct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;\n+\tint rc;\n+\n+\t/* clear mailbox and prep first tlv */\n+\treq = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));\n+\n+\t/* @@@TBD MichalK TPA */\n+\n+\treq->tx_qid = tx_queue_id;\n+\n+\t/* Tx */\n+\treq->pbl_addr = pbl_addr;\n+\treq->pbl_size = pbl_size;\n+\treq->hw_sb = sb;\n+\treq->sb_index = sb_index;\n+\treq->hc_rate = 0;\t/* @@@TBD MichalK -> host coalescing! */\n+\treq->flags = 0;\t\t/* @@@TBD MichalK -> flags... */\n+\n+\t/* add list termination tlv */\n+\tecore_add_tlv(p_hwfn, &p_iov->offset,\n+\t\t      CHANNEL_TLV_LIST_END,\n+\t\t      sizeof(struct channel_list_end_tlv));\n+\n+\trc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tif (resp->hdr.status != PFVF_STATUS_SUCCESS)\n+\t\treturn ECORE_INVAL;\n+\n+\tif (pp_doorbell) {\n+\t\tu8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];\n+\n+\t\t*pp_doorbell = (u8 OSAL_IOMEM *) p_hwfn->doorbells +\n+\t\t    DB_ADDR(cid, DQ_DEMS_LEGACY);\n+\t}\n+\n+\treturn rc;\n+}\n+\n+enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn, u16 tx_qid)\n+{\n+\tstruct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;\n+\tstruct vfpf_stop_txqs_tlv *req;\n+\tstruct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;\n+\tint rc;\n+\n+\t/* clear mailbox and prep first tlv */\n+\treq = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));\n+\n+\t/* @@@TBD MichalK TPA */\n+\n+\t/* @@@TBD MichalK - relevant ??? flags\n+\t * VFPF_QUEUE_FLG_OV VFPF_QUEUE_FLG_VLAN\n+\t */\n+\treq->tx_qid = tx_qid;\n+\treq->num_txqs = 1;\n+\n+\t/* add list termination tlv */\n+\tecore_add_tlv(p_hwfn, &p_iov->offset,\n+\t\t      CHANNEL_TLV_LIST_END,\n+\t\t      sizeof(struct channel_list_end_tlv));\n+\n+\trc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tif (resp->hdr.status != PFVF_STATUS_SUCCESS)\n+\t\treturn ECORE_INVAL;\n+\n+\treturn rc;\n+}\n+\n+enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t     u16 rx_queue_id,\n+\t\t\t\t\t     u8 num_rxqs,\n+\t\t\t\t\t     u8 comp_cqe_flg, u8 comp_event_flg)\n+{\n+\tstruct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;\n+\tstruct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;\n+\tstruct vfpf_update_rxq_tlv *req;\n+\tint rc;\n+\n+\t/* clear mailbox and prep first tlv */\n+\treq = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_RXQ, sizeof(*req));\n+\n+\treq->rx_qid = rx_queue_id;\n+\treq->num_rxqs = num_rxqs;\n+\n+\tif (comp_cqe_flg)\n+\t\treq->flags |= VFPF_RXQ_UPD_COMPLETE_CQE_FLAG;\n+\tif (comp_event_flg)\n+\t\treq->flags |= VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG;\n+\n+\t/* add list termination tlv */\n+\tecore_add_tlv(p_hwfn, &p_iov->offset,\n+\t\t      CHANNEL_TLV_LIST_END,\n+\t\t      sizeof(struct channel_list_end_tlv));\n+\n+\trc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tif (resp->hdr.status != PFVF_STATUS_SUCCESS)\n+\t\treturn ECORE_INVAL;\n+\n+\treturn rc;\n+}\n+\n+enum _ecore_status_t\n+ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn, u8 vport_id,\n+\t\t\tu16 mtu, u8 inner_vlan_removal,\n+\t\t\tenum ecore_tpa_mode tpa_mode, u8 max_buffers_per_cqe,\n+\t\t\tu8 only_untagged)\n+{\n+\tstruct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;\n+\tstruct vfpf_vport_start_tlv *req;\n+\tstruct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;\n+\tint rc, i;\n+\n+\t/* clear mailbox and prep first tlv */\n+\treq = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));\n+\n+\treq->mtu = mtu;\n+\treq->vport_id = vport_id;\n+\treq->inner_vlan_removal = inner_vlan_removal;\n+\treq->tpa_mode = tpa_mode;\n+\treq->max_buffers_per_cqe = max_buffers_per_cqe;\n+\treq->only_untagged = only_untagged;\n+\n+\t/* status blocks */\n+\tfor (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++)\n+\t\tif (p_hwfn->sbs_info[i])\n+\t\t\treq->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;\n+\n+\t/* add list termination tlv */\n+\tecore_add_tlv(p_hwfn, &p_iov->offset,\n+\t\t      CHANNEL_TLV_LIST_END,\n+\t\t      sizeof(struct channel_list_end_tlv));\n+\n+\trc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tif (resp->hdr.status != PFVF_STATUS_SUCCESS)\n+\t\treturn ECORE_INVAL;\n+\n+\treturn rc;\n+}\n+\n+enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn)\n+{\n+\tstruct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;\n+\tstruct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;\n+\tint rc;\n+\n+\t/* clear mailbox and prep first tlv */\n+\tecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,\n+\t\t\t sizeof(struct vfpf_first_tlv));\n+\n+\t/* add list termination tlv */\n+\tecore_add_tlv(p_hwfn, &p_iov->offset,\n+\t\t      CHANNEL_TLV_LIST_END,\n+\t\t      sizeof(struct channel_list_end_tlv));\n+\n+\trc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tif (resp->hdr.status != PFVF_STATUS_SUCCESS)\n+\t\treturn ECORE_INVAL;\n+\n+\treturn rc;\n+}\n+\n+static void\n+ecore_vf_handle_vp_update_tlvs_resp(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t    struct ecore_sp_vport_update_params *p_data)\n+{\n+\tstruct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;\n+\tstruct pfvf_def_resp_tlv *p_resp;\n+\tu16 tlv;\n+\n+\tif (p_data->update_vport_active_rx_flg ||\n+\t    p_data->update_vport_active_tx_flg) {\n+\t\ttlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;\n+\t\tp_resp = (struct pfvf_def_resp_tlv *)\n+\t\t    ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);\n+\t\tif (p_resp && p_resp->hdr.status)\n+\t\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t\t   \"VP update activate tlv configured\\n\");\n+\t\telse\n+\t\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t\t  \"VP update activate tlv config failed\\n\");\n+\t}\n+\n+\tif (p_data->update_tx_switching_flg) {\n+\t\ttlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;\n+\t\tp_resp = (struct pfvf_def_resp_tlv *)\n+\t\t    ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);\n+\t\tif (p_resp && p_resp->hdr.status)\n+\t\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t\t   \"VP update tx switch tlv configured\\n\");\n+#ifndef ASIC_ONLY\n+\t\telse if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))\n+\t\t\tDP_NOTICE(p_hwfn, false,\n+\t\t\t\t  \"FPGA: Skip checking whether PF\"\n+\t\t\t\t  \" replied to Tx-switching request\\n\");\n+#endif\n+\t\telse\n+\t\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t\t  \"VP update tx switch tlv config failed\\n\");\n+\t}\n+\n+\tif (p_data->update_inner_vlan_removal_flg) {\n+\t\ttlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;\n+\t\tp_resp = (struct pfvf_def_resp_tlv *)\n+\t\t    ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);\n+\t\tif (p_resp && p_resp->hdr.status)\n+\t\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t\t   \"VP update vlan strip tlv configured\\n\");\n+\t\telse\n+\t\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t\t  \"VP update vlan strip tlv config failed\\n\");\n+\t}\n+\n+\tif (p_data->update_approx_mcast_flg) {\n+\t\ttlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;\n+\t\tp_resp = (struct pfvf_def_resp_tlv *)\n+\t\t    ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);\n+\t\tif (p_resp && p_resp->hdr.status)\n+\t\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t\t   \"VP update mcast tlv configured\\n\");\n+\t\telse\n+\t\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t\t  \"VP update mcast tlv config failed\\n\");\n+\t}\n+\n+\tif (p_data->accept_flags.update_rx_mode_config ||\n+\t    p_data->accept_flags.update_tx_mode_config) {\n+\t\ttlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;\n+\t\tp_resp = (struct pfvf_def_resp_tlv *)\n+\t\t    ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);\n+\t\tif (p_resp && p_resp->hdr.status)\n+\t\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t\t   \"VP update accept_mode tlv configured\\n\");\n+\t\telse\n+\t\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t\t  \"VP update accept_mode tlv config failed\\n\");\n+\t}\n+\n+\tif (p_data->rss_params) {\n+\t\ttlv = CHANNEL_TLV_VPORT_UPDATE_RSS;\n+\t\tp_resp = (struct pfvf_def_resp_tlv *)\n+\t\t    ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);\n+\t\tif (p_resp && p_resp->hdr.status)\n+\t\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t\t   \"VP update rss tlv configured\\n\");\n+\t\telse\n+\t\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t\t  \"VP update rss tlv config failed\\n\");\n+\t}\n+\n+\tif (p_data->sge_tpa_params) {\n+\t\ttlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;\n+\t\tp_resp = (struct pfvf_def_resp_tlv *)\n+\t\t    ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);\n+\t\tif (p_resp && p_resp->hdr.status)\n+\t\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t\t\t   \"VP update sge tpa tlv configured\\n\");\n+\t\telse\n+\t\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t\t  \"VP update sge tpa tlv config failed\\n\");\n+\t}\n+}\n+\n+enum _ecore_status_t\n+ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,\n+\t\t\t struct ecore_sp_vport_update_params *p_params)\n+{\n+\tstruct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;\n+\tstruct vfpf_vport_update_accept_param_tlv *p_accept_tlv;\n+\tstruct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;\n+\tstruct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;\n+\tstruct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;\n+\tstruct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;\n+\tstruct vfpf_vport_update_activate_tlv *p_act_tlv;\n+\tstruct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;\n+\tstruct vfpf_vport_update_rss_tlv *p_rss_tlv;\n+\tstruct vfpf_vport_update_tlv *req;\n+\tstruct pfvf_def_resp_tlv *resp;\n+\tu8 update_rx, update_tx;\n+\tu32 resp_size = 0;\n+\tu16 size, tlv;\n+\tint rc;\n+\n+\tresp = &p_iov->pf2vf_reply->default_resp;\n+\tresp_size = sizeof(*resp);\n+\n+\tupdate_rx = p_params->update_vport_active_rx_flg;\n+\tupdate_tx = p_params->update_vport_active_tx_flg;\n+\n+\t/* clear mailbox and prep header tlv */\n+\tecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req));\n+\n+\t/* Prepare extended tlvs */\n+\tif (update_rx || update_tx) {\n+\t\tsize = sizeof(struct vfpf_vport_update_activate_tlv);\n+\t\tp_act_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,\n+\t\t\t\t\t  CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,\n+\t\t\t\t\t  size);\n+\t\tresp_size += sizeof(struct pfvf_def_resp_tlv);\n+\n+\t\tif (update_rx) {\n+\t\t\tp_act_tlv->update_rx = update_rx;\n+\t\t\tp_act_tlv->active_rx = p_params->vport_active_rx_flg;\n+\t\t}\n+\n+\t\tif (update_tx) {\n+\t\t\tp_act_tlv->update_tx = update_tx;\n+\t\t\tp_act_tlv->active_tx = p_params->vport_active_tx_flg;\n+\t\t}\n+\t}\n+\n+\tif (p_params->update_inner_vlan_removal_flg) {\n+\t\tsize = sizeof(struct vfpf_vport_update_vlan_strip_tlv);\n+\t\tp_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,\n+\t\t\t\t\t   CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,\n+\t\t\t\t\t   size);\n+\t\tresp_size += sizeof(struct pfvf_def_resp_tlv);\n+\n+\t\tp_vlan_tlv->remove_vlan = p_params->inner_vlan_removal_flg;\n+\t}\n+\n+\tif (p_params->update_tx_switching_flg) {\n+\t\tsize = sizeof(struct vfpf_vport_update_tx_switch_tlv);\n+\t\ttlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;\n+\t\tp_tx_switch_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,\n+\t\t\t\t\t\ttlv, size);\n+\t\tresp_size += sizeof(struct pfvf_def_resp_tlv);\n+\n+\t\tp_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;\n+\t}\n+\n+\tif (p_params->update_approx_mcast_flg) {\n+\t\tsize = sizeof(struct vfpf_vport_update_mcast_bin_tlv);\n+\t\tp_mcast_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,\n+\t\t\t\t\t    CHANNEL_TLV_VPORT_UPDATE_MCAST,\n+\t\t\t\t\t    size);\n+\t\tresp_size += sizeof(struct pfvf_def_resp_tlv);\n+\n+\t\tOSAL_MEMCPY(p_mcast_tlv->bins, p_params->bins,\n+\t\t\t    sizeof(unsigned long) *\n+\t\t\t    ETH_MULTICAST_MAC_BINS_IN_REGS);\n+\t}\n+\n+\tupdate_rx = p_params->accept_flags.update_rx_mode_config;\n+\tupdate_tx = p_params->accept_flags.update_tx_mode_config;\n+\n+\tif (update_rx || update_tx) {\n+\t\ttlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;\n+\t\tsize = sizeof(struct vfpf_vport_update_accept_param_tlv);\n+\t\tp_accept_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, tlv, size);\n+\t\tresp_size += sizeof(struct pfvf_def_resp_tlv);\n+\n+\t\tif (update_rx) {\n+\t\t\tp_accept_tlv->update_rx_mode = update_rx;\n+\t\t\tp_accept_tlv->rx_accept_filter =\n+\t\t\t    p_params->accept_flags.rx_accept_filter;\n+\t\t}\n+\n+\t\tif (update_tx) {\n+\t\t\tp_accept_tlv->update_tx_mode = update_tx;\n+\t\t\tp_accept_tlv->tx_accept_filter =\n+\t\t\t    p_params->accept_flags.tx_accept_filter;\n+\t\t}\n+\t}\n+\n+\tif (p_params->rss_params) {\n+\t\tstruct ecore_rss_params *rss_params = p_params->rss_params;\n+\n+\t\tsize = sizeof(struct vfpf_vport_update_rss_tlv);\n+\t\tp_rss_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,\n+\t\t\t\t\t  CHANNEL_TLV_VPORT_UPDATE_RSS, size);\n+\t\tresp_size += sizeof(struct pfvf_def_resp_tlv);\n+\n+\t\tif (rss_params->update_rss_config)\n+\t\t\tp_rss_tlv->update_rss_flags |=\n+\t\t\t    VFPF_UPDATE_RSS_CONFIG_FLAG;\n+\t\tif (rss_params->update_rss_capabilities)\n+\t\t\tp_rss_tlv->update_rss_flags |=\n+\t\t\t    VFPF_UPDATE_RSS_CAPS_FLAG;\n+\t\tif (rss_params->update_rss_ind_table)\n+\t\t\tp_rss_tlv->update_rss_flags |=\n+\t\t\t    VFPF_UPDATE_RSS_IND_TABLE_FLAG;\n+\t\tif (rss_params->update_rss_key)\n+\t\t\tp_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG;\n+\n+\t\tp_rss_tlv->rss_enable = rss_params->rss_enable;\n+\t\tp_rss_tlv->rss_caps = rss_params->rss_caps;\n+\t\tp_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;\n+\t\tOSAL_MEMCPY(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table,\n+\t\t\t    sizeof(rss_params->rss_ind_table));\n+\t\tOSAL_MEMCPY(p_rss_tlv->rss_key, rss_params->rss_key,\n+\t\t\t    sizeof(rss_params->rss_key));\n+\t}\n+\n+\tif (p_params->update_accept_any_vlan_flg) {\n+\t\tsize = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);\n+\t\ttlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;\n+\t\tp_any_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,\n+\t\t\t\t\t       tlv, size);\n+\n+\t\tresp_size += sizeof(struct pfvf_def_resp_tlv);\n+\t\tp_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;\n+\t\tp_any_vlan_tlv->update_accept_any_vlan_flg =\n+\t\t    p_params->update_accept_any_vlan_flg;\n+\t}\n+\n+\tif (p_params->sge_tpa_params) {\n+\t\tstruct ecore_sge_tpa_params *sge_tpa_params =\n+\t\t    p_params->sge_tpa_params;\n+\n+\t\tsize = sizeof(struct vfpf_vport_update_sge_tpa_tlv);\n+\t\tp_sge_tpa_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,\n+\t\t\t\t\t      CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,\n+\t\t\t\t\t      size);\n+\t\tresp_size += sizeof(struct pfvf_def_resp_tlv);\n+\n+\t\tif (sge_tpa_params->update_tpa_en_flg)\n+\t\t\tp_sge_tpa_tlv->update_sge_tpa_flags |=\n+\t\t\t    VFPF_UPDATE_TPA_EN_FLAG;\n+\t\tif (sge_tpa_params->update_tpa_param_flg)\n+\t\t\tp_sge_tpa_tlv->update_sge_tpa_flags |=\n+\t\t\t    VFPF_UPDATE_TPA_PARAM_FLAG;\n+\n+\t\tif (sge_tpa_params->tpa_ipv4_en_flg)\n+\t\t\tp_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_IPV4_EN_FLAG;\n+\t\tif (sge_tpa_params->tpa_ipv6_en_flg)\n+\t\t\tp_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_IPV6_EN_FLAG;\n+\t\tif (sge_tpa_params->tpa_pkt_split_flg)\n+\t\t\tp_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_PKT_SPLIT_FLAG;\n+\t\tif (sge_tpa_params->tpa_hdr_data_split_flg)\n+\t\t\tp_sge_tpa_tlv->sge_tpa_flags |=\n+\t\t\t    VFPF_TPA_HDR_DATA_SPLIT_FLAG;\n+\t\tif (sge_tpa_params->tpa_gro_consistent_flg)\n+\t\t\tp_sge_tpa_tlv->sge_tpa_flags |=\n+\t\t\t    VFPF_TPA_GRO_CONSIST_FLAG;\n+\n+\t\tp_sge_tpa_tlv->tpa_max_aggs_num =\n+\t\t    sge_tpa_params->tpa_max_aggs_num;\n+\t\tp_sge_tpa_tlv->tpa_max_size = sge_tpa_params->tpa_max_size;\n+\t\tp_sge_tpa_tlv->tpa_min_size_to_start =\n+\t\t    sge_tpa_params->tpa_min_size_to_start;\n+\t\tp_sge_tpa_tlv->tpa_min_size_to_cont =\n+\t\t    sge_tpa_params->tpa_min_size_to_cont;\n+\n+\t\tp_sge_tpa_tlv->max_buffers_per_cqe =\n+\t\t    sge_tpa_params->max_buffers_per_cqe;\n+\t}\n+\n+\t/* add list termination tlv */\n+\tecore_add_tlv(p_hwfn, &p_iov->offset,\n+\t\t      CHANNEL_TLV_LIST_END,\n+\t\t      sizeof(struct channel_list_end_tlv));\n+\n+\trc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tif (resp->hdr.status != PFVF_STATUS_SUCCESS)\n+\t\treturn ECORE_INVAL;\n+\n+\tecore_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);\n+\n+\treturn rc;\n+}\n+\n+enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn)\n+{\n+\tstruct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;\n+\tstruct vfpf_first_tlv *req;\n+\tstruct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;\n+\tint rc;\n+\n+\t/* clear mailbox and prep first tlv */\n+\treq = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));\n+\n+\t/* add list termination tlv */\n+\tecore_add_tlv(p_hwfn, &p_iov->offset,\n+\t\t      CHANNEL_TLV_LIST_END,\n+\t\t      sizeof(struct channel_list_end_tlv));\n+\n+\trc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tif (resp->hdr.status != PFVF_STATUS_SUCCESS)\n+\t\treturn ECORE_AGAIN;\n+\n+\tp_hwfn->b_int_enabled = 0;\n+\n+\treturn ECORE_SUCCESS;\n+}\n+\n+enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)\n+{\n+\tstruct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;\n+\tstruct vfpf_first_tlv *req;\n+\tstruct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;\n+\tu32 size;\n+\tint rc;\n+\n+\t/* clear mailbox and prep first tlv */\n+\treq = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));\n+\n+\t/* add list termination tlv */\n+\tecore_add_tlv(p_hwfn, &p_iov->offset,\n+\t\t      CHANNEL_TLV_LIST_END,\n+\t\t      sizeof(struct channel_list_end_tlv));\n+\n+\trc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));\n+\n+\tif (rc == ECORE_SUCCESS && resp->hdr.status != PFVF_STATUS_SUCCESS)\n+\t\trc = ECORE_AGAIN;\n+\n+\tp_hwfn->b_int_enabled = 0;\n+\n+\t/* TODO - might need to revise this for 100g */\n+\tif (IS_LEAD_HWFN(p_hwfn))\n+\t\tOSAL_MUTEX_DEALLOC(&p_iov->mutex);\n+\n+\tif (p_iov->vf2pf_request)\n+\t\tOSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,\n+\t\t\t\t       p_iov->vf2pf_request,\n+\t\t\t\t       p_iov->vf2pf_request_phys,\n+\t\t\t\t       sizeof(union vfpf_tlvs));\n+\tif (p_iov->pf2vf_reply)\n+\t\tOSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,\n+\t\t\t\t       p_iov->pf2vf_reply,\n+\t\t\t\t       p_iov->pf2vf_reply_phys,\n+\t\t\t\t       sizeof(union pfvf_tlvs));\n+\n+\tif (p_iov->bulletin.p_virt) {\n+\t\tsize = sizeof(struct ecore_bulletin_content);\n+\t\tOSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,\n+\t\t\t\t       p_iov->bulletin.p_virt,\n+\t\t\t\t       p_iov->bulletin.phys, size);\n+\t}\n+\n+\tOSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info);\n+\tp_hwfn->vf_iov_info = OSAL_NULL;\n+\n+\treturn rc;\n+}\n+\n+void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,\n+\t\t\t      struct ecore_filter_mcast *p_filter_cmd)\n+{\n+\tstruct ecore_sp_vport_update_params sp_params;\n+\tint i;\n+\n+\tOSAL_MEMSET(&sp_params, 0, sizeof(sp_params));\n+\tsp_params.update_approx_mcast_flg = 1;\n+\n+\tif (p_filter_cmd->opcode == ECORE_FILTER_ADD) {\n+\t\tfor (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {\n+\t\t\tu32 bit;\n+\n+\t\t\tbit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);\n+\t\t\tOSAL_SET_BIT(bit, sp_params.bins);\n+\t\t}\n+\t}\n+\n+\tecore_vf_pf_vport_update(p_hwfn, &sp_params);\n+}\n+\n+enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t      struct ecore_filter_ucast\n+\t\t\t\t\t      *p_ucast)\n+{\n+\tstruct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;\n+\tstruct vfpf_ucast_filter_tlv *req;\n+\tstruct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;\n+\tint rc;\n+\n+\t/* Sanitize */\n+\tif (p_ucast->opcode == ECORE_FILTER_MOVE) {\n+\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t  \"VFs don't support Moving of filters\\n\");\n+\t\treturn ECORE_INVAL;\n+\t}\n+\n+\t/* clear mailbox and prep first tlv */\n+\treq = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));\n+\treq->opcode = (u8)p_ucast->opcode;\n+\treq->type = (u8)p_ucast->type;\n+\tOSAL_MEMCPY(req->mac, p_ucast->mac, ETH_ALEN);\n+\treq->vlan = p_ucast->vlan;\n+\n+\t/* add list termination tlv */\n+\tecore_add_tlv(p_hwfn, &p_iov->offset,\n+\t\t      CHANNEL_TLV_LIST_END,\n+\t\t      sizeof(struct channel_list_end_tlv));\n+\n+\trc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tif (resp->hdr.status != PFVF_STATUS_SUCCESS)\n+\t\treturn ECORE_AGAIN;\n+\n+\treturn ECORE_SUCCESS;\n+}\n+\n+enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn)\n+{\n+\tstruct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;\n+\tstruct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;\n+\tint rc;\n+\n+\t/* clear mailbox and prep first tlv */\n+\tecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,\n+\t\t\t sizeof(struct vfpf_first_tlv));\n+\n+\t/* add list termination tlv */\n+\tecore_add_tlv(p_hwfn, &p_iov->offset,\n+\t\t      CHANNEL_TLV_LIST_END,\n+\t\t      sizeof(struct channel_list_end_tlv));\n+\n+\trc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tif (resp->hdr.status != PFVF_STATUS_SUCCESS)\n+\t\treturn ECORE_INVAL;\n+\n+\treturn ECORE_SUCCESS;\n+}\n+\n+enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t    u8 *p_change)\n+{\n+\tstruct ecore_bulletin_content shadow;\n+\tstruct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;\n+\tu32 crc, crc_size = sizeof(p_iov->bulletin.p_virt->crc);\n+\n+\t*p_change = 0;\n+\n+\t/* Need to guarantee PF is not in the middle of writing it */\n+\tOSAL_MEMCPY(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size);\n+\n+\t/* If version did not update, no need to do anything */\n+\tif (shadow.version == p_iov->bulletin_shadow.version)\n+\t\treturn ECORE_SUCCESS;\n+\n+\t/* Verify the bulletin we see is valid */\n+\tcrc = ecore_crc32(0, (u8 *)&shadow + crc_size,\n+\t\t\t  p_iov->bulletin.size - crc_size);\n+\tif (crc != shadow.crc)\n+\t\treturn ECORE_AGAIN;\n+\n+\t/* Set the shadow bulletin and process it */\n+\tOSAL_MEMCPY(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size);\n+\n+\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n+\t\t   \"Read a bulletin update %08x\\n\", shadow.version);\n+\n+\t*p_change = 1;\n+\n+\treturn ECORE_SUCCESS;\n+}\n+\n+u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)\n+{\n+\tstruct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;\n+\n+\tif (!p_iov) {\n+\t\tDP_NOTICE(p_hwfn, true, \"vf_sriov_info isn't initialized\\n\");\n+\t\treturn 0;\n+\t}\n+\n+\treturn p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;\n+}\n+\n+void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,\n+\t\t\t\tstruct ecore_mcp_link_params *p_params,\n+\t\t\t\tstruct ecore_bulletin_content *p_bulletin)\n+{\n+\tOSAL_MEMSET(p_params, 0, sizeof(*p_params));\n+\n+\tp_params->speed.autoneg = p_bulletin->req_autoneg;\n+\tp_params->speed.advertised_speeds = p_bulletin->req_adv_speed;\n+\tp_params->speed.forced_speed = p_bulletin->req_forced_speed;\n+\tp_params->pause.autoneg = p_bulletin->req_autoneg_pause;\n+\tp_params->pause.forced_rx = p_bulletin->req_forced_rx;\n+\tp_params->pause.forced_tx = p_bulletin->req_forced_tx;\n+\tp_params->loopback_mode = p_bulletin->req_loopback;\n+}\n+\n+void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,\n+\t\t\t      struct ecore_mcp_link_params *params)\n+{\n+\t__ecore_vf_get_link_params(p_hwfn, params,\n+\t\t\t\t   &(p_hwfn->vf_iov_info->bulletin_shadow));\n+}\n+\n+void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,\n+\t\t\t       struct ecore_mcp_link_state *p_link,\n+\t\t\t       struct ecore_bulletin_content *p_bulletin)\n+{\n+\tOSAL_MEMSET(p_link, 0, sizeof(*p_link));\n+\n+\tp_link->link_up = p_bulletin->link_up;\n+\tp_link->speed = p_bulletin->speed;\n+\tp_link->full_duplex = p_bulletin->full_duplex;\n+\tp_link->an = p_bulletin->autoneg;\n+\tp_link->an_complete = p_bulletin->autoneg_complete;\n+\tp_link->parallel_detection = p_bulletin->parallel_detection;\n+\tp_link->pfc_enabled = p_bulletin->pfc_enabled;\n+\tp_link->partner_adv_speed = p_bulletin->partner_adv_speed;\n+\tp_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en;\n+\tp_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en;\n+\tp_link->partner_adv_pause = p_bulletin->partner_adv_pause;\n+\tp_link->sfp_tx_fault = p_bulletin->sfp_tx_fault;\n+}\n+\n+void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,\n+\t\t\t     struct ecore_mcp_link_state *link)\n+{\n+\t__ecore_vf_get_link_state(p_hwfn, link,\n+\t\t\t\t  &(p_hwfn->vf_iov_info->bulletin_shadow));\n+}\n+\n+void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,\n+\t\t\t      struct ecore_mcp_link_capabilities *p_link_caps,\n+\t\t\t      struct ecore_bulletin_content *p_bulletin)\n+{\n+\tOSAL_MEMSET(p_link_caps, 0, sizeof(*p_link_caps));\n+\tp_link_caps->speed_capabilities = p_bulletin->capability_speed;\n+}\n+\n+void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,\n+\t\t\t    struct ecore_mcp_link_capabilities *p_link_caps)\n+{\n+\t__ecore_vf_get_link_caps(p_hwfn, p_link_caps,\n+\t\t\t\t &(p_hwfn->vf_iov_info->bulletin_shadow));\n+}\n+\n+void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs)\n+{\n+\t*num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;\n+}\n+\n+void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, u8 *port_mac)\n+{\n+\tOSAL_MEMCPY(port_mac,\n+\t\t    p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac,\n+\t\t    ETH_ALEN);\n+}\n+\n+void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t   u8 *num_vlan_filters)\n+{\n+\tstruct ecore_vf_iov *p_vf;\n+\n+\tp_vf = p_hwfn->vf_iov_info;\n+\t*num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;\n+}\n+\n+bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac)\n+{\n+\tstruct ecore_bulletin_content *bulletin;\n+\n+\tbulletin = &p_hwfn->vf_iov_info->bulletin_shadow;\n+\tif (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)))\n+\t\treturn true;\n+\n+\t/* Forbid VF from changing a MAC enforced by PF */\n+\tif (OSAL_MEMCMP(bulletin->mac, mac, ETH_ALEN))\n+\t\treturn false;\n+\n+\treturn false;\n+}\n+\n+bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac,\n+\t\t\t\t      u8 *p_is_forced)\n+{\n+\tstruct ecore_bulletin_content *bulletin;\n+\n+\tbulletin = &hwfn->vf_iov_info->bulletin_shadow;\n+\n+\tif (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) {\n+\t\tif (p_is_forced)\n+\t\t\t*p_is_forced = 1;\n+\t} else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) {\n+\t\tif (p_is_forced)\n+\t\t\t*p_is_forced = 0;\n+\t} else {\n+\t\treturn false;\n+\t}\n+\n+\tOSAL_MEMCPY(dst_mac, bulletin->mac, ETH_ALEN);\n+\n+\treturn true;\n+}\n+\n+bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid)\n+{\n+\tstruct ecore_bulletin_content *bulletin;\n+\n+\tbulletin = &hwfn->vf_iov_info->bulletin_shadow;\n+\n+\tif (!(bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)))\n+\t\treturn false;\n+\n+\tif (dst_pvid)\n+\t\t*dst_pvid = bulletin->pvid;\n+\n+\treturn true;\n+}\n+\n+void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,\n+\t\t\t     u16 *fw_major, u16 *fw_minor, u16 *fw_rev,\n+\t\t\t     u16 *fw_eng)\n+{\n+\tstruct pf_vf_pfdev_info *info;\n+\n+\tinfo = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info;\n+\n+\t*fw_major = info->fw_major;\n+\t*fw_minor = info->fw_minor;\n+\t*fw_rev = info->fw_rev;\n+\t*fw_eng = info->fw_eng;\n+}\ndiff --git a/drivers/net/qede/base/ecore_vf.h b/drivers/net/qede/base/ecore_vf.h\nnew file mode 100644\nindex 0000000..a006dac\n--- /dev/null\n+++ b/drivers/net/qede/base/ecore_vf.h\n@@ -0,0 +1,415 @@\n+/*\n+ * Copyright (c) 2016 QLogic Corporation.\n+ * All rights reserved.\n+ * www.qlogic.com\n+ *\n+ * See LICENSE.qede_pmd for copyright and licensing details.\n+ */\n+\n+#ifndef __ECORE_VF_H__\n+#define __ECORE_VF_H__\n+\n+#include \"ecore_status.h\"\n+#include \"ecore_vf_api.h\"\n+#include \"ecore_l2_api.h\"\n+#include \"ecore_vfpf_if.h\"\n+\n+#ifdef CONFIG_ECORE_SRIOV\n+/**\n+ *\n+ * @brief hw preparation for VF\n+ *\tsends ACQUIRE message\n+ *\n+ * @param p_dev\n+ *\n+ * @return enum _ecore_status_t\n+ */\n+enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_dev *p_dev);\n+\n+/**\n+ *\n+ * @brief VF init in hw (equivalent to hw_init in PF)\n+ *      mark interrupts as enabled\n+ *\n+ * @param p_hwfn\n+ *\n+ * @return enum _ecore_status_t\n+ */\n+enum _ecore_status_t ecore_vf_pf_init(struct ecore_hwfn *p_hwfn);\n+\n+/**\n+ *\n+ * @brief VF - start the RX Queue by sending a message to the PF\n+ *\n+ * @param p_hwfn\n+ * @param cid\t\t\t- zero based within the VF\n+ * @param rx_queue_id\t\t- zero based within the VF\n+ * @param sb\t\t\t- VF status block for this queue\n+ * @param sb_index\t\t- Index within the status block\n+ * @param bd_max_bytes\t\t- maximum number of bytes per bd\n+ * @param bd_chain_phys_addr\t- physical address of bd chain\n+ * @param cqe_pbl_addr\t\t- physical address of pbl\n+ * @param cqe_pbl_size\t\t- pbl size\n+ * @param pp_prod\t\t- pointer to the producer to be\n+ *\t    used in fasthwfn\n+ *\n+ * @return enum _ecore_status_t\n+ */\n+enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t   u8 rx_queue_id,\n+\t\t\t\t\t   u16 sb,\n+\t\t\t\t\t   u8 sb_index,\n+\t\t\t\t\t   u16 bd_max_bytes,\n+\t\t\t\t\t   dma_addr_t bd_chain_phys_addr,\n+\t\t\t\t\t   dma_addr_t cqe_pbl_addr,\n+\t\t\t\t\t   u16 cqe_pbl_size,\n+\t\t\t\t\t   void OSAL_IOMEM **pp_prod);\n+\n+/**\n+ *\n+ * @brief VF - start the TX queue by sending a message to the\n+ *        PF.\n+ *\n+ * @param p_hwfn\n+ * @param tx_queue_id\t\t- zero based within the VF\n+ * @param sb\t\t\t- status block for this queue\n+ * @param sb_index\t\t- index within the status block\n+ * @param bd_chain_phys_addr\t- physical address of tx chain\n+ * @param pp_doorbell\t\t- pointer to address to which to\n+ *\t\twrite the doorbell too..\n+ *\n+ * @return enum _ecore_status_t\n+ */\n+enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t   u16 tx_queue_id,\n+\t\t\t\t\t   u16 sb,\n+\t\t\t\t\t   u8 sb_index,\n+\t\t\t\t\t   dma_addr_t pbl_addr,\n+\t\t\t\t\t   u16 pbl_size,\n+\t\t\t\t\t   void OSAL_IOMEM **pp_doorbell);\n+\n+/**\n+ *\n+ * @brief VF - stop the RX queue by sending a message to the PF\n+ *\n+ * @param p_hwfn\n+ * @param rx_qid\n+ * @param cqe_completion\n+ *\n+ * @return enum _ecore_status_t\n+ */\n+enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t  u16 rx_qid, bool cqe_completion);\n+\n+/**\n+ *\n+ * @brief VF - stop the TX queue by sending a message to the PF\n+ *\n+ * @param p_hwfn\n+ * @param tx_qid\n+ *\n+ * @return enum _ecore_status_t\n+ */\n+enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t  u16 tx_qid);\n+\n+/**\n+ * @brief VF - update the RX queue by sending a message to the\n+ *        PF\n+ *\n+ * @param p_hwfn\n+ * @param rx_queue_id\n+ * @param num_rxqs\n+ * @param init_sge_ring\n+ * @param comp_cqe_flg\n+ * @param comp_event_flg\n+ *\n+ * @return enum _ecore_status_t\n+ */\n+enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t     u16 rx_queue_id,\n+\t\t\t\t\t     u8 num_rxqs,\n+\t\t\t\t\t     u8 comp_cqe_flg,\n+\t\t\t\t\t     u8 comp_event_flg);\n+\n+/**\n+ *\n+ * @brief VF - send a vport update command\n+ *\n+ * @param p_hwfn\n+ * @param params\n+ *\n+ * @return enum _ecore_status_t\n+ */\n+enum _ecore_status_t\n+ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,\n+\t\t\t struct ecore_sp_vport_update_params *p_params);\n+\n+/**\n+ *\n+ * @brief VF - send a close message to PF\n+ *\n+ * @param p_hwfn\n+ *\n+ * @return enum _ecore_status\n+ */\n+enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn);\n+\n+/**\n+ *\n+ * @brief VF - free vf`s memories\n+ *\n+ * @param p_hwfn\n+ *\n+ * @return enum _ecore_status\n+ */\n+enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn);\n+\n+/**\n+ *\n+ * @brief ecore_vf_get_igu_sb_id - Get the IGU SB ID for a given\n+ *        sb_id. For VFs igu sbs don't have to be contiguous\n+ *\n+ * @param p_hwfn\n+ * @param sb_id\n+ *\n+ * @return INLINE u16\n+ */\n+u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id);\n+\n+/**\n+ * @brief ecore_vf_pf_vport_start - perform vport start for VF.\n+ *\n+ * @param p_hwfn\n+ * @param vport_id\n+ * @param mtu\n+ * @param inner_vlan_removal\n+ * @param tpa_mode\n+ * @param max_buffers_per_cqe,\n+ * @param only_untagged - default behavior regarding vlan acceptance\n+ *\n+ * @return enum _ecore_status\n+ */\n+enum _ecore_status_t ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t     u8 vport_id,\n+\t\t\t\t\t     u16 mtu,\n+\t\t\t\t\t     u8 inner_vlan_removal,\n+\t\t\t\t\t     enum ecore_tpa_mode tpa_mode,\n+\t\t\t\t\t     u8 max_buffers_per_cqe,\n+\t\t\t\t\t     u8 only_untagged);\n+\n+/**\n+ * @brief ecore_vf_pf_vport_stop - stop the VF's vport\n+ *\n+ * @param p_hwfn\n+ *\n+ * @return enum _ecore_status\n+ */\n+enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn);\n+\n+enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t      struct ecore_filter_ucast\n+\t\t\t\t\t      *p_param);\n+\n+void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,\n+\t\t\t      struct ecore_filter_mcast *p_filter_cmd);\n+\n+/**\n+ * @brief ecore_vf_pf_int_cleanup - clean the SB of the VF\n+ *\n+ * @param p_hwfn\n+ *\n+ * @return enum _ecore_status\n+ */\n+enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn);\n+\n+/**\n+ * @brief - return the link params in a given bulletin board\n+ *\n+ * @param p_hwfn\n+ * @param p_params - pointer to a struct to fill with link params\n+ * @param p_bulletin\n+ */\n+void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,\n+\t\t\t\tstruct ecore_mcp_link_params *p_params,\n+\t\t\t\tstruct ecore_bulletin_content *p_bulletin);\n+\n+/**\n+ * @brief - return the link state in a given bulletin board\n+ *\n+ * @param p_hwfn\n+ * @param p_link - pointer to a struct to fill with link state\n+ * @param p_bulletin\n+ */\n+void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,\n+\t\t\t       struct ecore_mcp_link_state *p_link,\n+\t\t\t       struct ecore_bulletin_content *p_bulletin);\n+\n+/**\n+ * @brief - return the link capabilities in a given bulletin board\n+ *\n+ * @param p_hwfn\n+ * @param p_link - pointer to a struct to fill with link capabilities\n+ * @param p_bulletin\n+ */\n+void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,\n+\t\t\t      struct ecore_mcp_link_capabilities *p_link_caps,\n+\t\t\t      struct ecore_bulletin_content *p_bulletin);\n+\n+#else\n+static OSAL_INLINE enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_dev\n+\t\t\t\t\t\t\t    *p_dev)\n+{\n+\treturn ECORE_INVAL;\n+}\n+\n+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_init(struct ecore_hwfn\n+\t\t\t\t\t\t\t *p_hwfn)\n+{\n+\treturn ECORE_INVAL;\n+}\n+\n+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn\n+\t\t\t\t\t\t\t      *p_hwfn,\n+\t\t\t\t\t\t\t      u8 rx_queue_id,\n+\t\t\t\t\t\t\t      u16 sb,\n+\t\t\t\t\t\t\t      u8 sb_index,\n+\t\t\t\t\t\t\t      u16 bd_max_bytes,\n+\t\t\t\t\t\t\t      dma_addr_t\n+\t\t\t\t\t\t\t      bd_chain_phys_adr,\n+\t\t\t\t\t\t\t      dma_addr_t\n+\t\t\t\t\t\t\t      cqe_pbl_addr,\n+\t\t\t\t\t\t\t      u16 cqe_pbl_size,\n+\t\t\t\t\t\t\t      void OSAL_IOMEM **\n+\t\t\t\t\t\t\t      pp_prod)\n+{\n+\treturn ECORE_INVAL;\n+}\n+\n+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn\n+\t\t\t\t\t\t\t      *p_hwfn,\n+\t\t\t\t\t\t\t      u16 tx_queue_id,\n+\t\t\t\t\t\t\t      u16 sb,\n+\t\t\t\t\t\t\t      u8 sb_index,\n+\t\t\t\t\t\t\t      dma_addr_t\n+\t\t\t\t\t\t\t      pbl_addr,\n+\t\t\t\t\t\t\t      u16 pbl_size,\n+\t\t\t\t\t\t\t      void OSAL_IOMEM **\n+\t\t\t\t\t\t\t      pp_doorbell)\n+{\n+\treturn ECORE_INVAL;\n+}\n+\n+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn\n+\t\t\t\t\t\t\t     *p_hwfn,\n+\t\t\t\t\t\t\t     u16 rx_qid,\n+\t\t\t\t\t\t\t     bool\n+\t\t\t\t\t\t\t     cqe_completion)\n+{\n+\treturn ECORE_INVAL;\n+}\n+\n+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn\n+\t\t\t\t\t\t\t     *p_hwfn,\n+\t\t\t\t\t\t\t     u16 tx_qid)\n+{\n+\treturn ECORE_INVAL;\n+}\n+\n+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxqs_update(struct\n+\t\t\t\t\t\t\t\tecore_hwfn\n+\t\t\t\t\t\t\t\t*p_hwfn,\n+\t\t\t\t\t\t\t\tu16 rx_queue_id,\n+\t\t\t\t\t\t\t\tu8 num_rxqs,\n+\t\t\t\t\t\t\t\tu8 comp_cqe_flg,\n+\t\t\t\t\t\t\t\tu8\n+\t\t\t\t\t\t\t\tcomp_event_flg)\n+{\n+\treturn ECORE_INVAL;\n+}\n+\n+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_update(\n+\tstruct ecore_hwfn *p_hwfn,\n+\tstruct ecore_sp_vport_update_params *p_params)\n+{\n+\treturn ECORE_INVAL;\n+}\n+\n+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn\n+\t\t\t\t\t\t\t  *p_hwfn)\n+{\n+\treturn ECORE_INVAL;\n+}\n+\n+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn\n+\t\t\t\t\t\t\t    *p_hwfn)\n+{\n+\treturn ECORE_INVAL;\n+}\n+\n+static OSAL_INLINE u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t      u16 sb_id)\n+{\n+\treturn 0;\n+}\n+\n+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_start(\n+\tstruct ecore_hwfn *p_hwfn, u8 vport_id, u16 mtu,\n+\tu8 inner_vlan_removal, enum ecore_tpa_mode tpa_mode,\n+\tu8 max_buffers_per_cqe, u8 only_untagged)\n+{\n+\treturn ECORE_INVAL;\n+}\n+\n+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_stop(\n+\tstruct ecore_hwfn *p_hwfn)\n+{\n+\treturn ECORE_INVAL;\n+}\n+\n+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_filter_ucast(\n+\t struct ecore_hwfn *p_hwfn, struct ecore_filter_ucast *p_param)\n+{\n+\treturn ECORE_INVAL;\n+}\n+\n+static OSAL_INLINE void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t struct ecore_filter_mcast\n+\t\t\t\t\t\t *p_filter_cmd)\n+{\n+}\n+\n+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_int_cleanup(struct\n+\t\t\t\t\t\t\t\tecore_hwfn\n+\t\t\t\t\t\t\t\t*p_hwfn)\n+{\n+\treturn ECORE_INVAL;\n+}\n+\n+static OSAL_INLINE void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t   struct ecore_mcp_link_params\n+\t\t\t\t\t\t   *p_params,\n+\t\t\t\t\t\t   struct ecore_bulletin_content\n+\t\t\t\t\t\t   *p_bulletin)\n+{\n+}\n+\n+static OSAL_INLINE void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t  struct ecore_mcp_link_state\n+\t\t\t\t\t\t  *p_link,\n+\t\t\t\t\t\t  struct ecore_bulletin_content\n+\t\t\t\t\t\t  *p_bulletin)\n+{\n+}\n+\n+static OSAL_INLINE void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t struct\n+\t\t\t\t\t\t ecore_mcp_link_capabilities\n+\t\t\t\t\t\t *p_link_caps,\n+\t\t\t\t\t\t struct ecore_bulletin_content\n+\t\t\t\t\t\t *p_bulletin)\n+{\n+}\n+#endif\n+\n+#endif /* __ECORE_VF_H__ */\ndiff --git a/drivers/net/qede/base/ecore_vf_api.h b/drivers/net/qede/base/ecore_vf_api.h\nnew file mode 100644\nindex 0000000..cce1813\n--- /dev/null\n+++ b/drivers/net/qede/base/ecore_vf_api.h\n@@ -0,0 +1,186 @@\n+/*\n+ * Copyright (c) 2016 QLogic Corporation.\n+ * All rights reserved.\n+ * www.qlogic.com\n+ *\n+ * See LICENSE.qede_pmd for copyright and licensing details.\n+ */\n+\n+#ifndef __ECORE_VF_API_H__\n+#define __ECORE_VF_API_H__\n+\n+#include \"ecore_sp_api.h\"\n+#include \"ecore_mcp_api.h\"\n+\n+#ifdef CONFIG_ECORE_SRIOV\n+/**\n+ * @brief Read the VF bulletin and act on it if needed\n+ *\n+ * @param p_hwfn\n+ * @param p_change - ecore fills 1 iff bulletin board has changed, 0 otherwise.\n+ *\n+ * @return enum _ecore_status\n+ */\n+enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t    u8 *p_change);\n+\n+/**\n+ * @brief Get link paramters for VF from ecore\n+ *\n+ * @param p_hwfn\n+ * @param params - the link params structure to be filled for the VF\n+ */\n+void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,\n+\t\t\t      struct ecore_mcp_link_params *params);\n+\n+/**\n+ * @brief Get link state for VF from ecore\n+ *\n+ * @param p_hwfn\n+ * @param link - the link state structure to be filled for the VF\n+ */\n+void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,\n+\t\t\t     struct ecore_mcp_link_state *link);\n+\n+/**\n+ * @brief Get link capabilities for VF from ecore\n+ *\n+ * @param p_hwfn\n+ * @param p_link_caps - the link capabilities structure to be filled for the VF\n+ */\n+void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,\n+\t\t\t    struct ecore_mcp_link_capabilities *p_link_caps);\n+\n+/**\n+ * @brief Get number of Rx queues allocated for VF by ecore\n+ *\n+ *  @param p_hwfn\n+ *  @param num_rxqs - allocated RX queues\n+ */\n+void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs);\n+\n+/**\n+ * @brief Get port mac address for VF\n+ *\n+ * @param p_hwfn\n+ * @param port_mac - destination location for port mac\n+ */\n+void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, u8 *port_mac);\n+\n+/**\n+ * @brief Get number of VLAN filters allocated for VF by ecore\n+ *\n+ *  @param p_hwfn\n+ *  @param num_rxqs - allocated VLAN filters\n+ */\n+void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t   u8 *num_vlan_filters);\n+\n+/**\n+ * @brief Check if VF can set a MAC address\n+ *\n+ * @param p_hwfn\n+ * @param mac\n+ *\n+ * @return bool\n+ */\n+bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac);\n+\n+/**\n+ * @brief Copy forced MAC address from bulletin board\n+ *\n+ * @param hwfn\n+ * @param dst_mac\n+ * @param p_is_forced - out param which indicate in case mac\n+ *\t        exist if it forced or not.\n+ *\n+ * @return bool       - return true if mac exist and false if\n+ *                      not.\n+ */\n+bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac,\n+\t\t\t\t      u8 *p_is_forced);\n+\n+/**\n+ * @brief Check if force vlan is set and copy the forced vlan\n+ *        from bulletin board\n+ *\n+ * @param hwfn\n+ * @param dst_pvid\n+ * @return bool\n+ */\n+bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid);\n+\n+/**\n+ * @brief Set firmware version information in dev_info from VFs acquire response\n+ *        tlv\n+ *\n+ * @param p_hwfn\n+ * @param fw_major\n+ * @param fw_minor\n+ * @param fw_rev\n+ * @param fw_eng\n+ */\n+void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,\n+\t\t\t     u16 *fw_major,\n+\t\t\t     u16 *fw_minor, u16 *fw_rev, u16 *fw_eng);\n+#else\n+static OSAL_INLINE enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn\n+\t\t\t\t\t\t\t       *p_hwfn,\n+\t\t\t\t\t\t\t       u8 *p_change)\n+{\n+\treturn ECORE_INVAL;\n+}\n+\n+static OSAL_INLINE void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t struct ecore_mcp_link_params\n+\t\t\t\t\t\t *params)\n+{\n+}\n+\n+static OSAL_INLINE void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\tstruct ecore_mcp_link_state\n+\t\t\t\t\t\t*link)\n+{\n+}\n+\n+static OSAL_INLINE void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t       struct\n+\t\t\t\t\t       ecore_mcp_link_capabilities\n+\t\t\t\t\t       *p_link_caps)\n+{\n+}\n+\n+static OSAL_INLINE void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t      u8 *num_rxqs)\n+{\n+}\n+\n+static OSAL_INLINE void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t      u8 *port_mac)\n+{\n+}\n+\n+static OSAL_INLINE void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\t      u8 *num_vlan_filters)\n+{\n+}\n+\n+static OSAL_INLINE bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac)\n+{\n+\treturn false;\n+}\n+\n+static OSAL_INLINE bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn\n+\t\t\t\t\t\t\t *hwfn, u8 *dst_mac,\n+\t\t\t\t\t\t\t u8 *p_is_forced)\n+{\n+\treturn false;\n+}\n+\n+static OSAL_INLINE void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t\tu16 *fw_major, u16 *fw_minor,\n+\t\t\t\t\t\tu16 *fw_rev, u16 *fw_eng)\n+{\n+}\n+#endif\n+#endif\ndiff --git a/drivers/net/qede/base/ecore_vfpf_if.h b/drivers/net/qede/base/ecore_vfpf_if.h\nnew file mode 100644\nindex 0000000..e5cf097\n--- /dev/null\n+++ b/drivers/net/qede/base/ecore_vfpf_if.h\n@@ -0,0 +1,590 @@\n+/*\n+ * Copyright (c) 2016 QLogic Corporation.\n+ * All rights reserved.\n+ * www.qlogic.com\n+ *\n+ * See LICENSE.qede_pmd for copyright and licensing details.\n+ */\n+\n+#ifndef __ECORE_VF_PF_IF_H__\n+#define __ECORE_VF_PF_IF_H__\n+\n+#define T_ETH_INDIRECTION_TABLE_SIZE 128\n+#define T_ETH_RSS_KEY_SIZE 10\n+#ifndef aligned_u64\n+#define aligned_u64 u64\n+#endif\n+\n+/***********************************************\n+ *\n+ * Common definitions for all HVs\n+ *\n+ **/\n+struct vf_pf_resc_request {\n+\tu8 num_rxqs;\n+\tu8 num_txqs;\n+\tu8 num_sbs;\n+\tu8 num_mac_filters;\n+\tu8 num_vlan_filters;\n+\tu8 num_mc_filters;\t/* No limit  so superfluous */\n+\tu16 padding;\n+};\n+\n+struct hw_sb_info {\n+\tu16 hw_sb_id;\t\t/* aka absolute igu id, used to ack the sb */\n+\tu8 sb_qid;\t\t/* used to update DHC for sb */\n+\tu8 padding[5];\n+};\n+\n+/***********************************************\n+ *\n+ * HW VF-PF channel definitions\n+ *\n+ * A.K.A VF-PF mailbox\n+ *\n+ **/\n+#define TLV_BUFFER_SIZE\t\t1024\n+#define TLV_ALIGN\t\tsizeof(u64)\n+#define PF_VF_BULLETIN_SIZE\t512\n+\n+#define VFPF_RX_MASK_ACCEPT_NONE\t\t0x00000000\n+#define VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST     0x00000001\n+#define VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST   0x00000002\n+#define VFPF_RX_MASK_ACCEPT_ALL_UNICAST\t0x00000004\n+#define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST       0x00000008\n+#define VFPF_RX_MASK_ACCEPT_BROADCAST\t0x00000010\n+/* TODO: #define VFPF_RX_MASK_ACCEPT_ANY_VLAN   0x00000020 */\n+\n+#define BULLETIN_CONTENT_SIZE\t(sizeof(struct pf_vf_bulletin_content))\n+#define BULLETIN_ATTEMPTS       5\t/* crc failures before throwing towel */\n+#define BULLETIN_CRC_SEED       0\n+\n+enum {\n+\tPFVF_STATUS_WAITING = 0,\n+\tPFVF_STATUS_SUCCESS,\n+\tPFVF_STATUS_FAILURE,\n+\tPFVF_STATUS_NOT_SUPPORTED,\n+\tPFVF_STATUS_NO_RESOURCE,\n+\tPFVF_STATUS_FORCED,\n+};\n+\n+/* vf pf channel tlvs */\n+/* general tlv header (used for both vf->pf request and pf->vf response) */\n+struct channel_tlv {\n+\tu16 type;\n+\tu16 length;\n+};\n+\n+/* header of first vf->pf tlv carries the offset used to calculate reponse\n+ * buffer address\n+ */\n+struct vfpf_first_tlv {\n+\tstruct channel_tlv tl;\n+\tu32 padding;\n+\taligned_u64 reply_address;\n+};\n+\n+/* header of pf->vf tlvs, carries the status of handling the request */\n+struct pfvf_tlv {\n+\tstruct channel_tlv tl;\n+\tu8 status;\n+\tu8 padding[3];\n+};\n+\n+/* response tlv used for most tlvs */\n+struct pfvf_def_resp_tlv {\n+\tstruct pfvf_tlv hdr;\n+};\n+\n+/* used to terminate and pad a tlv list */\n+struct channel_list_end_tlv {\n+\tstruct channel_tlv tl;\n+\tu8 padding[4];\n+};\n+\n+/* Acquire */\n+struct vfpf_acquire_tlv {\n+\tstruct vfpf_first_tlv first_tlv;\n+\n+\tstruct vf_pf_vfdev_info {\n+#define VFPF_ACQUIRE_CAP_OVERRIDE_FW_VER\t\t(1 << 0)\n+\t\taligned_u64 capabilties;\n+\t\tu8 fw_major;\n+\t\tu8 fw_minor;\n+\t\tu8 fw_revision;\n+\t\tu8 fw_engineering;\n+\t\tu32 driver_version;\n+\t\tu16 opaque_fid;\t/* ME register value */\n+\t\tu8 os_type;\t/* VFPF_ACQUIRE_OS_* value */\n+\t\tu8 padding[5];\n+\t} vfdev_info;\n+\n+\tstruct vf_pf_resc_request resc_request;\n+\n+\taligned_u64 bulletin_addr;\n+\tu32 bulletin_size;\n+\tu32 padding;\n+};\n+\n+/* receive side scaling tlv */\n+struct vfpf_vport_update_rss_tlv {\n+\tstruct channel_tlv tl;\n+\n+\tu8 update_rss_flags;\n+#define VFPF_UPDATE_RSS_CONFIG_FLAG\t  (1 << 0)\n+#define VFPF_UPDATE_RSS_CAPS_FLAG\t  (1 << 1)\n+#define VFPF_UPDATE_RSS_IND_TABLE_FLAG\t  (1 << 2)\n+#define VFPF_UPDATE_RSS_KEY_FLAG\t  (1 << 3)\n+\n+\tu8 rss_enable;\n+\tu8 rss_caps;\n+\tu8 rss_table_size_log;\t/* The table size is 2 ^ rss_table_size_log */\n+\tu16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];\n+\tu32 rss_key[T_ETH_RSS_KEY_SIZE];\n+};\n+\n+struct pfvf_storm_stats {\n+\tu32 address;\n+\tu32 len;\n+};\n+\n+struct pfvf_stats_info {\n+\tstruct pfvf_storm_stats mstats;\n+\tstruct pfvf_storm_stats pstats;\n+\tstruct pfvf_storm_stats tstats;\n+\tstruct pfvf_storm_stats ustats;\n+};\n+\n+/* acquire response tlv - carries the allocated resources */\n+struct pfvf_acquire_resp_tlv {\n+\tstruct pfvf_tlv hdr;\n+\n+\tstruct pf_vf_pfdev_info {\n+\t\tu32 chip_num;\n+\t\tu32 mfw_ver;\n+\n+\t\tu16 fw_major;\n+\t\tu16 fw_minor;\n+\t\tu16 fw_rev;\n+\t\tu16 fw_eng;\n+\n+\t\taligned_u64 capabilities;\n+#define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED\t(1 << 0)\n+\n+\t\tu16 db_size;\n+\t\tu8 indices_per_sb;\n+\t\tu8 os_type;\n+\n+\t\t/* Thesee should match the PF's ecore_dev values */\n+\t\tu16 chip_rev;\n+\t\tu8 dev_type;\n+\n+\t\tu8 padding;\n+\n+\t\tstruct pfvf_stats_info stats_info;\n+\n+\t\tu8 port_mac[ETH_ALEN];\n+\t\tu8 padding2[2];\n+\t} pfdev_info;\n+\n+\tstruct pf_vf_resc {\n+\t\t/* in case of status NO_RESOURCE in message hdr, pf will fill\n+\t\t * this struct with suggested amount of resources for next\n+\t\t * acquire request\n+\t\t */\n+#define PFVF_MAX_QUEUES_PER_VF         16\n+#define PFVF_MAX_SBS_PER_VF            16\n+\t\tstruct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];\n+\t\tu8 hw_qid[PFVF_MAX_QUEUES_PER_VF];\n+\t\tu8 cid[PFVF_MAX_QUEUES_PER_VF];\n+\n+\t\tu8 num_rxqs;\n+\t\tu8 num_txqs;\n+\t\tu8 num_sbs;\n+\t\tu8 num_mac_filters;\n+\t\tu8 num_vlan_filters;\n+\t\tu8 num_mc_filters;\n+\t\tu8 padding[2];\n+\t} resc;\n+\n+\tu32 bulletin_size;\n+\tu32 padding;\n+};\n+\n+/* Init VF */\n+struct vfpf_init_tlv {\n+\tstruct vfpf_first_tlv first_tlv;\n+\taligned_u64 stats_addr;\n+\n+\tu16 rx_mask;\n+\tu16 tx_mask;\n+\tu8 drop_ttl0_flg;\n+\tu8 padding[3];\n+\n+};\n+\n+/* Setup Queue */\n+struct vfpf_start_rxq_tlv {\n+\tstruct vfpf_first_tlv first_tlv;\n+\n+\t/* physical addresses */\n+\taligned_u64 rxq_addr;\n+\taligned_u64 deprecated_sge_addr;\n+\taligned_u64 cqe_pbl_addr;\n+\n+\tu16 cqe_pbl_size;\n+\tu16 hw_sb;\n+\tu16 rx_qid;\n+\tu16 hc_rate;\t\t/* desired interrupts per sec. */\n+\n+\tu16 bd_max_bytes;\n+\tu16 stat_id;\n+\tu8 sb_index;\n+\tu8 padding[3];\n+\n+};\n+\n+struct vfpf_start_txq_tlv {\n+\tstruct vfpf_first_tlv first_tlv;\n+\n+\t/* physical addresses */\n+\taligned_u64 pbl_addr;\n+\tu16 pbl_size;\n+\tu16 stat_id;\n+\tu16 tx_qid;\n+\tu16 hw_sb;\n+\n+\tu32 flags;\t\t/* VFPF_QUEUE_FLG_X flags */\n+\tu16 hc_rate;\t\t/* desired interrupts per sec. */\n+\tu8 sb_index;\n+\tu8 padding[3];\n+};\n+\n+/* Stop RX Queue */\n+struct vfpf_stop_rxqs_tlv {\n+\tstruct vfpf_first_tlv first_tlv;\n+\n+\tu16 rx_qid;\n+\tu8 num_rxqs;\n+\tu8 cqe_completion;\n+\tu8 padding[4];\n+};\n+\n+/* Stop TX Queues */\n+struct vfpf_stop_txqs_tlv {\n+\tstruct vfpf_first_tlv first_tlv;\n+\n+\tu16 tx_qid;\n+\tu8 num_txqs;\n+\tu8 padding[5];\n+};\n+\n+struct vfpf_update_rxq_tlv {\n+\tstruct vfpf_first_tlv first_tlv;\n+\n+\taligned_u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];\n+\n+\tu16 rx_qid;\n+\tu8 num_rxqs;\n+\tu8 flags;\n+#define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG\t(1 << 0)\n+#define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG\t\t(1 << 1)\n+#define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG\t(1 << 2)\n+\n+\tu8 padding[4];\n+};\n+\n+/* Set Queue Filters */\n+struct vfpf_q_mac_vlan_filter {\n+\tu32 flags;\n+#define VFPF_Q_FILTER_DEST_MAC_VALID    0x01\n+#define VFPF_Q_FILTER_VLAN_TAG_VALID    0x02\n+#define VFPF_Q_FILTER_SET_MAC\t0x100\t/* set/clear */\n+\n+\tu8 mac[ETH_ALEN];\n+\tu16 vlan_tag;\n+\n+\tu8 padding[4];\n+};\n+\n+/* Start a vport */\n+struct vfpf_vport_start_tlv {\n+\tstruct vfpf_first_tlv first_tlv;\n+\n+\taligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF];\n+\n+\tu32 tpa_mode;\n+\tu16 dep1;\n+\tu16 mtu;\n+\n+\tu8 vport_id;\n+\tu8 inner_vlan_removal;\n+\n+\tu8 only_untagged;\n+\tu8 max_buffers_per_cqe;\n+\n+\tu8 padding[4];\n+};\n+\n+/* Extended tlvs - need to add rss, mcast, accept mode tlvs */\n+struct vfpf_vport_update_activate_tlv {\n+\tstruct channel_tlv tl;\n+\tu8 update_rx;\n+\tu8 update_tx;\n+\tu8 active_rx;\n+\tu8 active_tx;\n+};\n+\n+struct vfpf_vport_update_tx_switch_tlv {\n+\tstruct channel_tlv tl;\n+\tu8 tx_switching;\n+\tu8 padding[3];\n+};\n+\n+struct vfpf_vport_update_vlan_strip_tlv {\n+\tstruct channel_tlv tl;\n+\tu8 remove_vlan;\n+\tu8 padding[3];\n+};\n+\n+struct vfpf_vport_update_mcast_bin_tlv {\n+\tstruct channel_tlv tl;\n+\tu8 padding[4];\n+\n+\taligned_u64 bins[8];\n+};\n+\n+struct vfpf_vport_update_accept_param_tlv {\n+\tstruct channel_tlv tl;\n+\tu8 update_rx_mode;\n+\tu8 update_tx_mode;\n+\tu8 rx_accept_filter;\n+\tu8 tx_accept_filter;\n+};\n+\n+struct vfpf_vport_update_accept_any_vlan_tlv {\n+\tstruct channel_tlv tl;\n+\tu8 update_accept_any_vlan_flg;\n+\tu8 accept_any_vlan;\n+\n+\tu8 padding[2];\n+};\n+\n+struct vfpf_vport_update_sge_tpa_tlv {\n+\tstruct channel_tlv tl;\n+\n+\tu16 sge_tpa_flags;\n+#define VFPF_TPA_IPV4_EN_FLAG\t     (1 << 0)\n+#define VFPF_TPA_IPV6_EN_FLAG        (1 << 1)\n+#define VFPF_TPA_PKT_SPLIT_FLAG      (1 << 2)\n+#define VFPF_TPA_HDR_DATA_SPLIT_FLAG (1 << 3)\n+#define VFPF_TPA_GRO_CONSIST_FLAG    (1 << 4)\n+\n+\tu8 update_sge_tpa_flags;\n+#define VFPF_UPDATE_SGE_DEPRECATED_FLAG\t   (1 << 0)\n+#define VFPF_UPDATE_TPA_EN_FLAG    (1 << 1)\n+#define VFPF_UPDATE_TPA_PARAM_FLAG (1 << 2)\n+\n+\tu8 max_buffers_per_cqe;\n+\n+\tu16 deprecated_sge_buff_size;\n+\tu16 tpa_max_size;\n+\tu16 tpa_min_size_to_start;\n+\tu16 tpa_min_size_to_cont;\n+\n+\tu8 tpa_max_aggs_num;\n+\tu8 padding[7];\n+\n+};\n+\n+/* Primary tlv as a header for various extended tlvs for\n+ * various functionalities in vport update ramrod.\n+ */\n+struct vfpf_vport_update_tlv {\n+\tstruct vfpf_first_tlv first_tlv;\n+};\n+\n+struct vfpf_ucast_filter_tlv {\n+\tstruct vfpf_first_tlv first_tlv;\n+\n+\tu8 opcode;\n+\tu8 type;\n+\n+\tu8 mac[ETH_ALEN];\n+\n+\tu16 vlan;\n+\tu16 padding[3];\n+};\n+\n+struct tlv_buffer_size {\n+\tu8 tlv_buffer[TLV_BUFFER_SIZE];\n+};\n+\n+union vfpf_tlvs {\n+\tstruct vfpf_first_tlv first_tlv;\n+\tstruct vfpf_acquire_tlv acquire;\n+\tstruct vfpf_init_tlv init;\n+\tstruct vfpf_start_rxq_tlv start_rxq;\n+\tstruct vfpf_start_txq_tlv start_txq;\n+\tstruct vfpf_stop_rxqs_tlv stop_rxqs;\n+\tstruct vfpf_stop_txqs_tlv stop_txqs;\n+\tstruct vfpf_update_rxq_tlv update_rxq;\n+\tstruct vfpf_vport_start_tlv start_vport;\n+\tstruct vfpf_vport_update_tlv vport_update;\n+\tstruct vfpf_ucast_filter_tlv ucast_filter;\n+\tstruct channel_list_end_tlv list_end;\n+\tstruct tlv_buffer_size tlv_buf_size;\n+};\n+\n+union pfvf_tlvs {\n+\tstruct pfvf_def_resp_tlv default_resp;\n+\tstruct pfvf_acquire_resp_tlv acquire_resp;\n+\tstruct channel_list_end_tlv list_end;\n+\tstruct tlv_buffer_size tlv_buf_size;\n+};\n+\n+/* This is a structure which is allocated in the VF, which the PF may update\n+ * when it deems it necessary to do so. The bulletin board is sampled\n+ * periodically by the VF. A copy per VF is maintained in the PF (to prevent\n+ * loss of data upon multiple updates (or the need for read modify write)).\n+ */\n+enum ecore_bulletin_bit {\n+\t/* Alert the VF that a forced MAC was set by the PF */\n+\tMAC_ADDR_FORCED = 0,\n+\n+\t/* The VF should not access the vfpf channel */\n+\tVFPF_CHANNEL_INVALID = 1,\n+\n+\t/* Alert the VF that a forced VLAN was set by the PF */\n+\tVLAN_ADDR_FORCED = 2,\n+\n+\t/* Indicate that `default_only_untagged' contains actual data */\n+\tVFPF_BULLETIN_UNTAGGED_DEFAULT = 3,\n+\tVFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,\n+\n+\t/* Alert the VF that suggested mac was sent by the PF.\n+\t * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set\n+\t */\n+\tVFPF_BULLETIN_MAC_ADDR = 5\n+};\n+\n+struct ecore_bulletin_content {\n+\tu32 crc;\t\t/* crc of structure to ensure is not in\n+\t\t\t\t * mid-update\n+\t\t\t\t */\n+\tu32 version;\n+\n+\taligned_u64 valid_bitmap;\t/* bitmap indicating wich fields\n+\t\t\t\t\t * hold valid values\n+\t\t\t\t\t */\n+\n+\tu8 mac[ETH_ALEN];\t/* used for MAC_ADDR or MAC_ADDR_FORCED */\n+\n+\tu8 default_only_untagged;\t/* If valid, 1 => only untagged Rx\n+\t\t\t\t\t * if no vlan filter is configured.\n+\t\t\t\t\t */\n+\tu8 padding;\n+\n+\t/* The following is a 'copy' of ecore_mcp_link_state,\n+\t * ecore_mcp_link_params and ecore_mcp_link_capabilities. Since it's\n+\t * possible the structs will increase further along the road we cannot\n+\t * have it here; Instead we need to have all of its fields.\n+\t */\n+\tu8 req_autoneg;\n+\tu8 req_autoneg_pause;\n+\tu8 req_forced_rx;\n+\tu8 req_forced_tx;\n+\tu8 padding2[4];\n+\n+\tu32 req_adv_speed;\n+\tu32 req_forced_speed;\n+\tu32 req_loopback;\n+\tu32 padding3;\n+\n+\tu8 link_up;\n+\tu8 full_duplex;\n+\tu8 autoneg;\n+\tu8 autoneg_complete;\n+\tu8 parallel_detection;\n+\tu8 pfc_enabled;\n+\tu8 partner_tx_flow_ctrl_en;\n+\tu8 partner_rx_flow_ctrl_en;\n+\tu8 partner_adv_pause;\n+\tu8 sfp_tx_fault;\n+\tu8 padding4[6];\n+\n+\tu32 speed;\n+\tu32 partner_adv_speed;\n+\n+\tu32 capability_speed;\n+\n+\t/* Forced vlan */\n+\tu16 pvid;\n+\tu16 padding5;\n+};\n+\n+struct ecore_bulletin {\n+\tdma_addr_t phys;\n+\tstruct ecore_bulletin_content *p_virt;\n+\tu32 size;\n+};\n+\n+#ifndef print_enum\n+enum {\n+/*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/\n+\n+\tCHANNEL_TLV_NONE,\t/* ends tlv sequence */\n+\tCHANNEL_TLV_ACQUIRE,\n+\tCHANNEL_TLV_VPORT_START,\n+\tCHANNEL_TLV_VPORT_UPDATE,\n+\tCHANNEL_TLV_VPORT_TEARDOWN,\n+\tCHANNEL_TLV_START_RXQ,\n+\tCHANNEL_TLV_START_TXQ,\n+\tCHANNEL_TLV_STOP_RXQS,\n+\tCHANNEL_TLV_STOP_TXQS,\n+\tCHANNEL_TLV_UPDATE_RXQ,\n+\tCHANNEL_TLV_INT_CLEANUP,\n+\tCHANNEL_TLV_CLOSE,\n+\tCHANNEL_TLV_RELEASE,\n+\tCHANNEL_TLV_LIST_END,\n+\tCHANNEL_TLV_UCAST_FILTER,\n+\tCHANNEL_TLV_VPORT_UPDATE_ACTIVATE,\n+\tCHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,\n+\tCHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,\n+\tCHANNEL_TLV_VPORT_UPDATE_MCAST,\n+\tCHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,\n+\tCHANNEL_TLV_VPORT_UPDATE_RSS,\n+\tCHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,\n+\tCHANNEL_TLV_VPORT_UPDATE_SGE_TPA,\n+\tCHANNEL_TLV_MAX\n+/*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/\n+};\n+extern const char *ecore_channel_tlvs_string[];\n+\n+#else\n+print_enum(channel_tlvs, CHANNEL_TLV_NONE,\t/* ends tlv sequence */\n+\t   CHANNEL_TLV_ACQUIRE,\n+\t   CHANNEL_TLV_VPORT_START,\n+\t   CHANNEL_TLV_VPORT_UPDATE,\n+\t   CHANNEL_TLV_VPORT_TEARDOWN,\n+\t   CHANNEL_TLV_SETUP_RXQ,\n+\t   CHANNEL_TLV_SETUP_TXQ,\n+\t   CHANNEL_TLV_STOP_RXQS,\n+\t   CHANNEL_TLV_STOP_TXQS,\n+\t   CHANNEL_TLV_UPDATE_RXQ,\n+\t   CHANNEL_TLV_INT_CLEANUP,\n+\t   CHANNEL_TLV_CLOSE,\n+\t   CHANNEL_TLV_RELEASE,\n+\t   CHANNEL_TLV_LIST_END,\n+\t   CHANNEL_TLV_UCAST_FILTER,\n+\t   CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,\n+\t   CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,\n+\t   CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,\n+\t   CHANNEL_TLV_VPORT_UPDATE_MCAST,\n+\t   CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,\n+\t   CHANNEL_TLV_VPORT_UPDATE_RSS,\n+\t   CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,\n+\t   CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, CHANNEL_TLV_MAX);\n+#endif\n+\n+#endif /* __ECORE_VF_PF_IF_H__ */\ndiff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c\nindex 530b2c1..33f3f78 100644\n--- a/drivers/net/qede/qede_ethdev.c\n+++ b/drivers/net/qede/qede_ethdev.c\n@@ -821,9 +821,27 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)\n \t\treturn -ENOMEM;\n \t}\n \n-\tether_addr_copy((struct ether_addr *)edev->hwfns[0].\n+\tif (!is_vf)\n+\t\tether_addr_copy((struct ether_addr *)edev->hwfns[0].\n \t\t\t\thw_info.hw_mac_addr,\n \t\t\t\t&eth_dev->data->mac_addrs[0]);\n+\telse {\n+\t\tecore_vf_read_bulletin(&edev->hwfns[0], &bulletin_change);\n+\t\tif (bulletin_change) {\n+\t\t\tis_mac_exist =\n+\t\t\t    ecore_vf_bulletin_get_forced_mac(&edev->hwfns[0],\n+\t\t\t\t\t\t\t     vf_mac,\n+\t\t\t\t\t\t\t     &is_mac_forced);\n+\t\t\tif (is_mac_exist && is_mac_forced) {\n+\t\t\t\tDP_INFO(edev, \"VF macaddr received from PF\\n\");\n+\t\t\t\tether_addr_copy((struct ether_addr *)&vf_mac,\n+\t\t\t\t\t\t&eth_dev->data->mac_addrs[0]);\n+\t\t\t} else {\n+\t\t\t\tDP_NOTICE(edev, false,\n+\t\t\t\t\t  \"No VF macaddr assigned\\n\");\n+\t\t\t}\n+\t\t}\n+\t}\n \n \teth_dev->dev_ops = &qede_eth_dev_ops;\n \ndiff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h\nindex 5550349..eb44e05 100644\n--- a/drivers/net/qede/qede_ethdev.h\n+++ b/drivers/net/qede/qede_ethdev.h\n@@ -19,14 +19,14 @@\n #include \"base/ecore.h\"\n #include \"base/ecore_dev_api.h\"\n #include \"base/ecore_l2_api.h\"\n-#include \"base/ecore_sp_api.h\"\n-#include \"base/ecore_mcp_api.h\"\n+#include \"base/ecore_vf_api.h\"\n #include \"base/ecore_hsi_common.h\"\n #include \"base/ecore_int_api.h\"\n #include \"base/ecore_chain.h\"\n #include \"base/ecore_status.h\"\n #include \"base/ecore_hsi_eth.h\"\n #include \"base/ecore_dev_api.h\"\n+#include \"base/ecore_iov_api.h\"\n \n #include \"qede_logs.h\"\n #include \"qede_if.h\"\ndiff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c\nindex 1f25908..46d4b6c 100644\n--- a/drivers/net/qede/qede_main.c\n+++ b/drivers/net/qede/qede_main.c\n@@ -171,12 +171,14 @@ static int qed_slowpath_start(struct ecore_dev *edev,\n #endif\n \n #ifdef CONFIG_QED_BINARY_FW\n-\trc = qed_load_firmware_data(edev);\n-\tif (rc) {\n-\t\tDP_NOTICE(edev, true,\n-\t\t\t  \"Failed to find fw file %s\\n\",\n-\t\t\t  QEDE_FW_FILE_NAME);\n-\t\tgoto err;\n+\tif (IS_PF(edev)) {\n+\t\trc = qed_load_firmware_data(edev);\n+\t\tif (rc) {\n+\t\t\tDP_NOTICE(edev, true,\n+\t\t\t\t  \"Failed to find fw file %s\\n\",\n+\t\t\t\t  QEDE_FW_FILE_NAME);\n+\t\t\tgoto err;\n+\t\t}\n \t}\n #endif\n \n@@ -188,17 +190,20 @@ static int qed_slowpath_start(struct ecore_dev *edev,\n \tedev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;\n \n \t/* Should go with CONFIG_QED_BINARY_FW */\n-\t/* Allocate stream for unzipping */\n-\trc = qed_alloc_stream_mem(edev);\n-\tif (rc) {\n-\t\tDP_NOTICE(edev, true,\n-\t\t\"Failed to allocate stream memory\\n\");\n-\t\tgoto err2;\n+\tif (IS_PF(edev)) {\n+\t\t/* Allocate stream for unzipping */\n+\t\trc = qed_alloc_stream_mem(edev);\n+\t\tif (rc) {\n+\t\t\tDP_NOTICE(edev, true,\n+\t\t\t\"Failed to allocate stream memory\\n\");\n+\t\t\tgoto err2;\n+\t\t}\n \t}\n \n \t/* Start the slowpath */\n #ifdef CONFIG_QED_BINARY_FW\n-\tdata = edev->firmware;\n+\tif (IS_PF(edev))\n+\t\tdata = edev->firmware;\n #endif\n \tallow_npar_tx_switching = npar_tx_switching ? true : false;\n \n@@ -224,19 +229,21 @@ static int qed_slowpath_start(struct ecore_dev *edev,\n \n \tDP_INFO(edev, \"HW inited and function started\\n\");\n \n-\thwfn = ECORE_LEADING_HWFN(edev);\n-\tdrv_version.version = (params->drv_major << 24) |\n+\tif (IS_PF(edev)) {\n+\t\thwfn = ECORE_LEADING_HWFN(edev);\n+\t\tdrv_version.version = (params->drv_major << 24) |\n \t\t    (params->drv_minor << 16) |\n \t\t    (params->drv_rev << 8) | (params->drv_eng);\n-\t/* TBD: strlcpy() */\n-\tstrncpy((char *)drv_version.name, (const char *)params->name,\n+\t\t/* TBD: strlcpy() */\n+\t\tstrncpy((char *)drv_version.name, (const char *)params->name,\n \t\t\tMCP_DRV_VER_STR_SIZE - 4);\n-\trc = ecore_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,\n+\t\trc = ecore_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,\n \t\t\t\t\t\t&drv_version);\n-\tif (rc) {\n-\t\tDP_NOTICE(edev, true,\n-\t\t\t  \"Failed sending drv version command\\n\");\n-\t\treturn rc;\n+\t\tif (rc) {\n+\t\t\tDP_NOTICE(edev, true,\n+\t\t\t\t  \"Failed sending drv version command\\n\");\n+\t\t\treturn rc;\n+\t\t}\n \t}\n \n \tecore_reset_vport_stats(edev);\n@@ -248,9 +255,11 @@ err2:\n \tecore_resc_free(edev);\n err:\n #ifdef CONFIG_QED_BINARY_FW\n-\tif (edev->firmware)\n-\t\trte_free(edev->firmware);\n-\tedev->firmware = NULL;\n+\tif (IS_PF(edev)) {\n+\t\tif (edev->firmware)\n+\t\t\trte_free(edev->firmware);\n+\t\tedev->firmware = NULL;\n+\t}\n #endif\n \treturn rc;\n }\n@@ -266,28 +275,38 @@ qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info)\n \trte_memcpy(&dev_info->hw_mac, &edev->hwfns[0].hw_info.hw_mac_addr,\n \t       ETHER_ADDR_LEN);\n \n-\tdev_info->fw_major = FW_MAJOR_VERSION;\n-\tdev_info->fw_minor = FW_MINOR_VERSION;\n-\tdev_info->fw_rev = FW_REVISION_VERSION;\n-\tdev_info->fw_eng = FW_ENGINEERING_VERSION;\n-\tdev_info->mf_mode = edev->mf_mode;\n-\tdev_info->tx_switching = tx_switching ? true : false;\n+\tif (IS_PF(edev)) {\n+\t\tdev_info->fw_major = FW_MAJOR_VERSION;\n+\t\tdev_info->fw_minor = FW_MINOR_VERSION;\n+\t\tdev_info->fw_rev = FW_REVISION_VERSION;\n+\t\tdev_info->fw_eng = FW_ENGINEERING_VERSION;\n+\t\tdev_info->mf_mode = edev->mf_mode;\n+\t\tdev_info->tx_switching = tx_switching ? true : false;\n+\t} else {\n+\t\tecore_vf_get_fw_version(&edev->hwfns[0], &dev_info->fw_major,\n+\t\t\t\t\t&dev_info->fw_minor, &dev_info->fw_rev,\n+\t\t\t\t\t&dev_info->fw_eng);\n+\t}\n \n-\tptt = ecore_ptt_acquire(ECORE_LEADING_HWFN(edev));\n-\tif (ptt) {\n-\t\tecore_mcp_get_mfw_ver(edev, ptt,\n+\tif (IS_PF(edev)) {\n+\t\tptt = ecore_ptt_acquire(ECORE_LEADING_HWFN(edev));\n+\t\tif (ptt) {\n+\t\t\tecore_mcp_get_mfw_ver(edev, ptt,\n \t\t\t\t\t      &dev_info->mfw_rev, NULL);\n \n-\t\tecore_mcp_get_flash_size(ECORE_LEADING_HWFN(edev), ptt,\n+\t\t\tecore_mcp_get_flash_size(ECORE_LEADING_HWFN(edev), ptt,\n \t\t\t\t\t\t &dev_info->flash_size);\n \n-\t\t/* Workaround to allow PHY-read commands for\n-\t\t * B0 bringup.\n-\t\t */\n-\t\tif (ECORE_IS_BB_B0(edev))\n-\t\t\tdev_info->flash_size = 0xffffffff;\n+\t\t\t/* Workaround to allow PHY-read commands for\n+\t\t\t * B0 bringup.\n+\t\t\t */\n+\t\t\tif (ECORE_IS_BB_B0(edev))\n+\t\t\t\tdev_info->flash_size = 0xffffffff;\n \n-\t\tecore_ptt_release(ECORE_LEADING_HWFN(edev), ptt);\n+\t\t\tecore_ptt_release(ECORE_LEADING_HWFN(edev), ptt);\n+\t\t}\n+\t} else {\n+\t\tecore_mcp_get_mfw_ver(edev, ptt, &dev_info->mfw_rev, NULL);\n \t}\n \n \treturn 0;\n@@ -303,18 +322,31 @@ qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)\n \n \tinfo->num_tc = 1 /* @@@TBD aelior MULTI_COS */;\n \n-\tinfo->num_queues = 0;\n-\tfor_each_hwfn(edev, i)\n+\tif (IS_PF(edev)) {\n+\t\tinfo->num_queues = 0;\n+\t\tfor_each_hwfn(edev, i)\n \t\t    info->num_queues +=\n \t\t    FEAT_NUM(&edev->hwfns[i], ECORE_PF_L2_QUE);\n \n-\tinfo->num_vlan_filters = RESC_NUM(&edev->hwfns[0], ECORE_VLAN);\n+\t\tinfo->num_vlan_filters = RESC_NUM(&edev->hwfns[0], ECORE_VLAN);\n \n-\trte_memcpy(&info->port_mac, &edev->hwfns[0].hw_info.hw_mac_addr,\n+\t\trte_memcpy(&info->port_mac, &edev->hwfns[0].hw_info.hw_mac_addr,\n \t\t\t   ETHER_ADDR_LEN);\n+\t} else {\n+\t\tecore_vf_get_num_rxqs(&edev->hwfns[0], &info->num_queues);\n+\n+\t\tecore_vf_get_num_vlan_filters(&edev->hwfns[0],\n+\t\t\t\t\t      &info->num_vlan_filters);\n+\n+\t\tecore_vf_get_port_mac(&edev->hwfns[0],\n+\t\t\t\t      (uint8_t *) &info->port_mac);\n+\t}\n \n \tqed_fill_dev_info(edev, &info->common);\n \n+\tif (IS_VF(edev))\n+\t\tmemset(&info->common.hw_mac, 0, ETHER_ADDR_LEN);\n+\n \treturn 0;\n }\n \n@@ -376,11 +408,18 @@ static void qed_fill_link(struct ecore_hwfn *hwfn,\n \tmemset(if_link, 0, sizeof(*if_link));\n \n \t/* Prepare source inputs */\n-\trte_memcpy(&params, ecore_mcp_get_link_params(hwfn),\n+\tif (IS_PF(hwfn->p_dev)) {\n+\t\trte_memcpy(&params, ecore_mcp_get_link_params(hwfn),\n \t\t       sizeof(params));\n-\trte_memcpy(&link, ecore_mcp_get_link_state(hwfn), sizeof(link));\n-\trte_memcpy(&link_caps, ecore_mcp_get_link_capabilities(hwfn),\n+\t\trte_memcpy(&link, ecore_mcp_get_link_state(hwfn), sizeof(link));\n+\t\trte_memcpy(&link_caps, ecore_mcp_get_link_capabilities(hwfn),\n \t\t       sizeof(link_caps));\n+\t} else {\n+\t\tecore_vf_read_bulletin(hwfn, &change);\n+\t\tecore_vf_get_link_params(hwfn, &params);\n+\t\tecore_vf_get_link_state(hwfn, &link);\n+\t\tecore_vf_get_link_caps(hwfn, &link_caps);\n+\t}\n \n \t/* Set the link parameters to pass to protocol driver */\n \tif (link.link_up)\n@@ -426,6 +465,9 @@ static int qed_set_link(struct ecore_dev *edev, struct qed_link_params *params)\n \tstruct ecore_mcp_link_params *link_params;\n \tint rc;\n \n+\tif (IS_VF(edev))\n+\t\treturn 0;\n+\n \t/* The link should be set only once per PF */\n \thwfn = &edev->hwfns[0];\n \n@@ -465,6 +507,9 @@ static int qed_drain(struct ecore_dev *edev)\n \tstruct ecore_ptt *ptt;\n \tint i, rc;\n \n+\tif (IS_VF(edev))\n+\t\treturn 0;\n+\n \tfor_each_hwfn(edev, i) {\n \t\thwfn = &edev->hwfns[i];\n \t\tptt = ecore_ptt_acquire(hwfn);\n@@ -517,9 +562,15 @@ static int qed_slowpath_stop(struct ecore_dev *edev)\n \tif (!edev)\n \t\treturn -ENODEV;\n \n-\tqed_free_stream_mem(edev);\n+\tif (IS_PF(edev)) {\n+\t\tqed_free_stream_mem(edev);\n \n-\tqed_nic_stop(edev);\n+#ifdef CONFIG_QED_SRIOV\n+\t\tif (IS_QED_ETH_IF(edev))\n+\t\t\tqed_sriov_disable(edev, true);\n+#endif\n+\t\tqed_nic_stop(edev);\n+\t}\n \n \tqed_nic_reset(edev);\n \n",
    "prefixes": [
        "dpdk-dev",
        "v2",
        "07/10"
    ]
}