get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/17314/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 17314,
    "url": "https://patches.dpdk.org/api/patches/17314/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1480436367-20749-11-git-send-email-arybchenko@solarflare.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1480436367-20749-11-git-send-email-arybchenko@solarflare.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1480436367-20749-11-git-send-email-arybchenko@solarflare.com",
    "date": "2016-11-29T16:18:42",
    "name": "[dpdk-dev,v2,10/55] net/sfc: import libefx SFN7xxx family support",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "5b521ca722193d81dfb2bc1b9a001f421811196a",
    "submitter": {
        "id": 607,
        "url": "https://patches.dpdk.org/api/people/607/?format=api",
        "name": "Andrew Rybchenko",
        "email": "arybchenko@solarflare.com"
    },
    "delegate": {
        "id": 319,
        "url": "https://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1480436367-20749-11-git-send-email-arybchenko@solarflare.com/mbox/",
    "series": [],
    "comments": "https://patches.dpdk.org/api/patches/17314/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/17314/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id 35F2CFAA1;\n\tTue, 29 Nov 2016 17:22:34 +0100 (CET)",
            "from nbfkord-smmo01.seg.att.com (nbfkord-smmo01.seg.att.com\n\t[209.65.160.76]) by dpdk.org (Postfix) with ESMTP id 6480A5583\n\tfor <dev@dpdk.org>; Tue, 29 Nov 2016 17:20:52 +0100 (CET)",
            "from unknown [12.187.104.26] (EHLO nbfkord-smmo01.seg.att.com)\n\tby nbfkord-smmo01.seg.att.com(mxl_mta-7.2.4-7) with ESMTP id\n\t4eaad385.2b3e4f204940.83579.00-2489.173893.nbfkord-smmo01.seg.att.com\n\t(envelope-from <arybchenko@solarflare.com>); \n\tTue, 29 Nov 2016 16:20:52 +0000 (UTC)",
            "from unknown [12.187.104.26]\n\tby nbfkord-smmo01.seg.att.com(mxl_mta-7.2.4-7) with SMTP id\n\tfdaad385.0.83400.00-2358.173788.nbfkord-smmo01.seg.att.com\n\t(envelope-from <arybchenko@solarflare.com>); \n\tTue, 29 Nov 2016 16:20:48 +0000 (UTC)",
            "from ocex03.SolarFlarecom.com (10.20.40.36) by\n\tocex03.SolarFlarecom.com (10.20.40.36) with Microsoft SMTP Server\n\t(TLS) id 15.0.1044.25; Tue, 29 Nov 2016 08:20:25 -0800",
            "from opal.uk.solarflarecom.com (10.17.10.1) by\n\tocex03.SolarFlarecom.com (10.20.40.36) with Microsoft SMTP Server\n\t(TLS) id\n\t15.0.1044.25 via Frontend Transport; Tue, 29 Nov 2016 08:20:24 -0800",
            "from uklogin.uk.solarflarecom.com (uklogin.uk.solarflarecom.com\n\t[10.17.10.10])\n\tby opal.uk.solarflarecom.com (8.13.8/8.13.8) with ESMTP id\n\tuATGKNY4029923; Tue, 29 Nov 2016 16:20:23 GMT",
            "from uklogin.uk.solarflarecom.com (localhost.localdomain\n\t[127.0.0.1])\n\tby uklogin.uk.solarflarecom.com (8.13.8/8.13.8) with ESMTP id\n\tuATGKM1I021233; Tue, 29 Nov 2016 16:20:23 GMT"
        ],
        "X-MXL-Hash": [
            "583daae42e70a768-d7aa7da046e8b1b07473ca860ba352341339c53a",
            "583daae07d2ed8bf-87c567d8aa7ab1089c9a0bf99c9f6f77a5eefc72"
        ],
        "From": "Andrew Rybchenko <arybchenko@solarflare.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<ferruh.yigit@intel.com>",
        "Date": "Tue, 29 Nov 2016 16:18:42 +0000",
        "Message-ID": "<1480436367-20749-11-git-send-email-arybchenko@solarflare.com>",
        "X-Mailer": "git-send-email 1.8.2.3",
        "In-Reply-To": "<1480436367-20749-1-git-send-email-arybchenko@solarflare.com>",
        "References": "<1479740470-6723-1-git-send-email-arybchenko@solarflare.com>\n\t<1480436367-20749-1-git-send-email-arybchenko@solarflare.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-AnalysisOut": [
            "[v=2.1 cv=UoJlQrEB c=1 sm=1 tr=0 a=8BlWFWvVlq5taO8ncb8nKg==]",
            "[:17 a=L24OOQBejmoA:10 a=zRKbQ67AAAAA:8 a=mUuAItkuXXrDV54Ka]",
            "[RsA:9 a=7Zwj6sZBwVKJAoWSPKxL6X1jA+E=:19 a=fgkPLIpnakGBFbTP]",
            "[:21 a=aD9_xCxGz2wJfzCp:21 a=_1Oq16MHKujymDo1:21 a=AkPqwACD]",
            "[X4TV8K8X:21 a=PA03WX8tBzeizutn5_OT:22]"
        ],
        "X-Spam": "[F=0.2000000000; CM=0.500; S=0.200(2015072901)]",
        "X-MAIL-FROM": "<arybchenko@solarflare.com>",
        "X-SOURCE-IP": "[12.187.104.26]",
        "Subject": "[dpdk-dev] [PATCH v2 10/55] net/sfc: import libefx SFN7xxx family\n\tsupport",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "SFN7xxx is the first family based on EF10 architecture.\n\nEFSYS_OPT_HUNTINGTON should be enabled to use it.\n\nFrom Solarflare Communications Inc.\n\nSigned-off-by: Andrew Rybchenko <arybchenko@solarflare.com>\n---\n drivers/net/sfc/base/ef10_ev.c         | 1226 ++++++++++++++++++++++\n drivers/net/sfc/base/ef10_filter.c     | 1469 ++++++++++++++++++++++++++\n drivers/net/sfc/base/ef10_impl.h       |  718 +++++++++++++\n drivers/net/sfc/base/ef10_intr.c       |  197 ++++\n drivers/net/sfc/base/ef10_mac.c        |  446 ++++++++\n drivers/net/sfc/base/ef10_mcdi.c       |  342 ++++++\n drivers/net/sfc/base/ef10_nic.c        | 1769 ++++++++++++++++++++++++++++++++\n drivers/net/sfc/base/ef10_phy.c        |  393 +++++++\n drivers/net/sfc/base/ef10_rx.c         |  397 +++++++\n drivers/net/sfc/base/ef10_tlv_layout.h |  941 +++++++++++++++++\n drivers/net/sfc/base/ef10_tx.c         |  683 ++++++++++++\n drivers/net/sfc/base/efx.h             |   10 +\n drivers/net/sfc/base/efx_check.h       |    6 +\n drivers/net/sfc/base/efx_ev.c          |   18 +\n drivers/net/sfc/base/efx_filter.c      |   18 +\n drivers/net/sfc/base/efx_impl.h        |   36 +\n drivers/net/sfc/base/efx_intr.c        |   20 +\n drivers/net/sfc/base/efx_mac.c         |   22 +\n drivers/net/sfc/base/efx_mcdi.c        |  122 +++\n drivers/net/sfc/base/efx_nic.c         |   53 +\n drivers/net/sfc/base/efx_phy.c         |   15 +\n drivers/net/sfc/base/efx_regs_ef10.h   |  571 +++++++++++\n drivers/net/sfc/base/efx_rx.c          |   20 +\n drivers/net/sfc/base/efx_sram.c        |   32 +\n drivers/net/sfc/base/efx_tx.c          |   29 +\n drivers/net/sfc/base/hunt_impl.h       |   74 ++\n drivers/net/sfc/base/hunt_nic.c        |  395 +++++++\n 27 files changed, 10022 insertions(+)\n create mode 100644 drivers/net/sfc/base/ef10_ev.c\n create mode 100644 drivers/net/sfc/base/ef10_filter.c\n create mode 100644 drivers/net/sfc/base/ef10_impl.h\n create mode 100644 drivers/net/sfc/base/ef10_intr.c\n create mode 100644 drivers/net/sfc/base/ef10_mac.c\n create mode 100644 drivers/net/sfc/base/ef10_mcdi.c\n create mode 100644 drivers/net/sfc/base/ef10_nic.c\n create mode 100644 drivers/net/sfc/base/ef10_phy.c\n create mode 100644 drivers/net/sfc/base/ef10_rx.c\n create mode 100644 drivers/net/sfc/base/ef10_tlv_layout.h\n create mode 100644 drivers/net/sfc/base/ef10_tx.c\n create mode 100644 drivers/net/sfc/base/efx_regs_ef10.h\n create mode 100644 drivers/net/sfc/base/hunt_impl.h\n create mode 100644 drivers/net/sfc/base/hunt_nic.c",
    "diff": "diff --git a/drivers/net/sfc/base/ef10_ev.c b/drivers/net/sfc/base/ef10_ev.c\nnew file mode 100644\nindex 0000000..46ecd42\n--- /dev/null\n+++ b/drivers/net/sfc/base/ef10_ev.c\n@@ -0,0 +1,1226 @@\n+/*\n+ * Copyright (c) 2012-2016 Solarflare Communications Inc.\n+ * All rights reserved.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions are met:\n+ *\n+ * 1. Redistributions of source code must retain the above copyright notice,\n+ *    this list of conditions and the following disclaimer.\n+ * 2. Redistributions in binary form must reproduce the above copyright notice,\n+ *    this list of conditions and the following disclaimer in the documentation\n+ *    and/or other materials provided with the distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ *\n+ * The views and conclusions contained in the software and documentation are\n+ * those of the authors and should not be interpreted as representing official\n+ * policies, either expressed or implied, of the FreeBSD Project.\n+ */\n+\n+#include \"efx.h\"\n+#include \"efx_impl.h\"\n+\n+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD\n+\n+#define\tEFX_EV_QSTAT_INCR(_eep, _stat)\n+\n+/*\n+ * Non-interrupting event queue requires interrrupting event queue to\n+ * refer to for wake-up events even if wake ups are never used.\n+ * It could be even non-allocated event queue.\n+ */\n+#define\tEFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX\t(0)\n+\n+static\t__checkReturn\tboolean_t\n+ef10_ev_rx(\n+\t__in\t\tefx_evq_t *eep,\n+\t__in\t\tefx_qword_t *eqp,\n+\t__in\t\tconst efx_ev_callbacks_t *eecp,\n+\t__in_opt\tvoid *arg);\n+\n+static\t__checkReturn\tboolean_t\n+ef10_ev_tx(\n+\t__in\t\tefx_evq_t *eep,\n+\t__in\t\tefx_qword_t *eqp,\n+\t__in\t\tconst efx_ev_callbacks_t *eecp,\n+\t__in_opt\tvoid *arg);\n+\n+static\t__checkReturn\tboolean_t\n+ef10_ev_driver(\n+\t__in\t\tefx_evq_t *eep,\n+\t__in\t\tefx_qword_t *eqp,\n+\t__in\t\tconst efx_ev_callbacks_t *eecp,\n+\t__in_opt\tvoid *arg);\n+\n+static\t__checkReturn\tboolean_t\n+ef10_ev_drv_gen(\n+\t__in\t\tefx_evq_t *eep,\n+\t__in\t\tefx_qword_t *eqp,\n+\t__in\t\tconst efx_ev_callbacks_t *eecp,\n+\t__in_opt\tvoid *arg);\n+\n+static\t__checkReturn\tboolean_t\n+ef10_ev_mcdi(\n+\t__in\t\tefx_evq_t *eep,\n+\t__in\t\tefx_qword_t *eqp,\n+\t__in\t\tconst efx_ev_callbacks_t *eecp,\n+\t__in_opt\tvoid *arg);\n+\n+\n+static\t__checkReturn\tefx_rc_t\n+efx_mcdi_set_evq_tmr(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tuint32_t instance,\n+\t__in\t\tuint32_t mode,\n+\t__in\t\tuint32_t timer_ns)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_SET_EVQ_TMR_IN_LEN,\n+\t\t\t    MC_CMD_SET_EVQ_TMR_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_SET_EVQ_TMR;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN;\n+\n+\tMCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance);\n+\tMCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns);\n+\tMCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns);\n+\tMCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode);\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\tif (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) {\n+\t\trc = EMSGSIZE;\n+\t\tgoto fail2;\n+\t}\n+\n+\treturn (0);\n+\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+static\t__checkReturn\tefx_rc_t\n+efx_mcdi_init_evq(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tunsigned int instance,\n+\t__in\t\tefsys_mem_t *esmp,\n+\t__in\t\tsize_t nevs,\n+\t__in\t\tuint32_t irq,\n+\t__in\t\tuint32_t us,\n+\t__in\t\tuint32_t flags,\n+\t__in\t\tboolean_t low_latency)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[\n+\t    MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),\n+\t\tMC_CMD_INIT_EVQ_OUT_LEN)];\n+\tefx_qword_t *dma_addr;\n+\tuint64_t addr;\n+\tint npages;\n+\tint i;\n+\tboolean_t interrupting;\n+\tint ev_cut_through;\n+\tefx_rc_t rc;\n+\n+\tnpages = EFX_EVQ_NBUFS(nevs);\n+\tif (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) {\n+\t\trc = EINVAL;\n+\t\tgoto fail1;\n+\t}\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_INIT_EVQ;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN;\n+\n+\tMCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs);\n+\tMCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance);\n+\tMCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq);\n+\n+\tinterrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==\n+\t    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);\n+\n+\t/*\n+\t * On Huntington RX and TX event batching can only be requested together\n+\t * (even if the datapath firmware doesn't actually support RX\n+\t * batching). If event cut through is enabled no RX batching will occur.\n+\t *\n+\t * So always enable RX and TX event batching, and enable event cut\n+\t * through if we want low latency operation.\n+\t */\n+\tswitch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {\n+\tcase EFX_EVQ_FLAGS_TYPE_AUTO:\n+\t\tev_cut_through = low_latency ? 1 : 0;\n+\t\tbreak;\n+\tcase EFX_EVQ_FLAGS_TYPE_THROUGHPUT:\n+\t\tev_cut_through = 0;\n+\t\tbreak;\n+\tcase EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:\n+\t\tev_cut_through = 1;\n+\t\tbreak;\n+\tdefault:\n+\t\trc = EINVAL;\n+\t\tgoto fail2;\n+\t}\n+\tMCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS,\n+\t    INIT_EVQ_IN_FLAG_INTERRUPTING, interrupting,\n+\t    INIT_EVQ_IN_FLAG_RPTR_DOS, 0,\n+\t    INIT_EVQ_IN_FLAG_INT_ARMD, 0,\n+\t    INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through,\n+\t    INIT_EVQ_IN_FLAG_RX_MERGE, 1,\n+\t    INIT_EVQ_IN_FLAG_TX_MERGE, 1);\n+\n+\t/* If the value is zero then disable the timer */\n+\tif (us == 0) {\n+\t\tMCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,\n+\t\t    MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);\n+\t\tMCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0);\n+\t\tMCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0);\n+\t} else {\n+\t\tunsigned int ticks;\n+\n+\t\tif ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)\n+\t\t\tgoto fail3;\n+\n+\t\tMCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,\n+\t\t    MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF);\n+\t\tMCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks);\n+\t\tMCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks);\n+\t}\n+\n+\tMCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE,\n+\t    MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);\n+\tMCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0);\n+\n+\tdma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR);\n+\taddr = EFSYS_MEM_ADDR(esmp);\n+\n+\tfor (i = 0; i < npages; i++) {\n+\t\tEFX_POPULATE_QWORD_2(*dma_addr,\n+\t\t    EFX_DWORD_1, (uint32_t)(addr >> 32),\n+\t\t    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));\n+\n+\t\tdma_addr++;\n+\t\taddr += EFX_BUF_SIZE;\n+\t}\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail4;\n+\t}\n+\n+\tif (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {\n+\t\trc = EMSGSIZE;\n+\t\tgoto fail5;\n+\t}\n+\n+\t/* NOTE: ignore the returned IRQ param as firmware does not set it. */\n+\n+\treturn (0);\n+\n+fail5:\n+\tEFSYS_PROBE(fail5);\n+fail4:\n+\tEFSYS_PROBE(fail4);\n+fail3:\n+\tEFSYS_PROBE(fail3);\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\n+static\t__checkReturn\tefx_rc_t\n+efx_mcdi_init_evq_v2(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tunsigned int instance,\n+\t__in\t\tefsys_mem_t *esmp,\n+\t__in\t\tsize_t nevs,\n+\t__in\t\tuint32_t irq,\n+\t__in\t\tuint32_t us,\n+\t__in\t\tuint32_t flags)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[\n+\t\tMAX(MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),\n+\t\t    MC_CMD_INIT_EVQ_V2_OUT_LEN)];\n+\tboolean_t interrupting;\n+\tunsigned int evq_type;\n+\tefx_qword_t *dma_addr;\n+\tuint64_t addr;\n+\tint npages;\n+\tint i;\n+\tefx_rc_t rc;\n+\n+\tnpages = EFX_EVQ_NBUFS(nevs);\n+\tif (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) {\n+\t\trc = EINVAL;\n+\t\tgoto fail1;\n+\t}\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_INIT_EVQ;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN;\n+\n+\tMCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs);\n+\tMCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance);\n+\tMCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq);\n+\n+\tinterrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==\n+\t    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);\n+\n+\tswitch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {\n+\tcase EFX_EVQ_FLAGS_TYPE_AUTO:\n+\t\tevq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO;\n+\t\tbreak;\n+\tcase EFX_EVQ_FLAGS_TYPE_THROUGHPUT:\n+\t\tevq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT;\n+\t\tbreak;\n+\tcase EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:\n+\t\tevq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY;\n+\t\tbreak;\n+\tdefault:\n+\t\trc = EINVAL;\n+\t\tgoto fail2;\n+\t}\n+\tMCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS,\n+\t    INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting,\n+\t    INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0,\n+\t    INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0,\n+\t    INIT_EVQ_V2_IN_FLAG_TYPE, evq_type);\n+\n+\t/* If the value is zero then disable the timer */\n+\tif (us == 0) {\n+\t\tMCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,\n+\t\t    MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS);\n+\t\tMCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0);\n+\t\tMCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0);\n+\t} else {\n+\t\tunsigned int ticks;\n+\n+\t\tif ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)\n+\t\t\tgoto fail3;\n+\n+\t\tMCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,\n+\t\t    MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF);\n+\t\tMCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks);\n+\t\tMCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks);\n+\t}\n+\n+\tMCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE,\n+\t    MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS);\n+\tMCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0);\n+\n+\tdma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR);\n+\taddr = EFSYS_MEM_ADDR(esmp);\n+\n+\tfor (i = 0; i < npages; i++) {\n+\t\tEFX_POPULATE_QWORD_2(*dma_addr,\n+\t\t    EFX_DWORD_1, (uint32_t)(addr >> 32),\n+\t\t    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));\n+\n+\t\tdma_addr++;\n+\t\taddr += EFX_BUF_SIZE;\n+\t}\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail4;\n+\t}\n+\n+\tif (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) {\n+\t\trc = EMSGSIZE;\n+\t\tgoto fail5;\n+\t}\n+\n+\t/* NOTE: ignore the returned IRQ param as firmware does not set it. */\n+\n+\tEFSYS_PROBE1(mcdi_evq_flags, uint32_t,\n+\t\t    MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS));\n+\n+\treturn (0);\n+\n+fail5:\n+\tEFSYS_PROBE(fail5);\n+fail4:\n+\tEFSYS_PROBE(fail4);\n+fail3:\n+\tEFSYS_PROBE(fail3);\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+static\t__checkReturn\tefx_rc_t\n+efx_mcdi_fini_evq(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tuint32_t instance)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN,\n+\t\t\t    MC_CMD_FINI_EVQ_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_FINI_EVQ;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN;\n+\n+\tMCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);\n+\n+\tefx_mcdi_execute_quiet(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_ev_init(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\t_NOTE(ARGUNUSED(enp))\n+\treturn (0);\n+}\n+\n+\t\t\tvoid\n+ef10_ev_fini(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\t_NOTE(ARGUNUSED(enp))\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_ev_qcreate(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tunsigned int index,\n+\t__in\t\tefsys_mem_t *esmp,\n+\t__in\t\tsize_t n,\n+\t__in\t\tuint32_t id,\n+\t__in\t\tuint32_t us,\n+\t__in\t\tuint32_t flags,\n+\t__in\t\tefx_evq_t *eep)\n+{\n+\tefx_nic_cfg_t *encp = &(enp->en_nic_cfg);\n+\tuint32_t irq;\n+\tefx_rc_t rc;\n+\n+\t_NOTE(ARGUNUSED(id))\t/* buftbl id managed by MC */\n+\tEFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));\n+\tEFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));\n+\n+\tif (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) {\n+\t\trc = EINVAL;\n+\t\tgoto fail1;\n+\t}\n+\n+\tif (index >= encp->enc_evq_limit) {\n+\t\trc = EINVAL;\n+\t\tgoto fail2;\n+\t}\n+\n+\tif (us > encp->enc_evq_timer_max_us) {\n+\t\trc = EINVAL;\n+\t\tgoto fail3;\n+\t}\n+\n+\t/* Set up the handler table */\n+\teep->ee_rx\t= ef10_ev_rx;\n+\teep->ee_tx\t= ef10_ev_tx;\n+\teep->ee_driver\t= ef10_ev_driver;\n+\teep->ee_drv_gen\t= ef10_ev_drv_gen;\n+\teep->ee_mcdi\t= ef10_ev_mcdi;\n+\n+\t/* Set up the event queue */\n+\t/* INIT_EVQ expects function-relative vector number */\n+\tif ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==\n+\t    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {\n+\t\tirq = index;\n+\t} else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) {\n+\t\tirq = index;\n+\t\tflags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |\n+\t\t    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;\n+\t} else {\n+\t\tirq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX;\n+\t}\n+\n+\t/*\n+\t * Interrupts may be raised for events immediately after the queue is\n+\t * created. See bug58606.\n+\t */\n+\n+\tif (encp->enc_init_evq_v2_supported) {\n+\t\t/*\n+\t\t * On Medford the low latency license is required to enable RX\n+\t\t * and event cut through and to disable RX batching.  If event\n+\t\t * queue type in flags is auto, we let the firmware decide the\n+\t\t * settings to use. If the adapter has a low latency license,\n+\t\t * it will choose the best settings for low latency, otherwise\n+\t\t * it will choose the best settings for throughput.\n+\t\t */\n+\t\trc = efx_mcdi_init_evq_v2(enp, index, esmp, n, irq, us, flags);\n+\t\tif (rc != 0)\n+\t\t\tgoto fail4;\n+\t} else {\n+\t\t/*\n+\t\t * On Huntington we need to specify the settings to use.\n+\t\t * If event queue type in flags is auto, we favour throughput\n+\t\t * if the adapter is running virtualization supporting firmware\n+\t\t * (i.e. the full featured firmware variant)\n+\t\t * and latency otherwise. The Ethernet Virtual Bridging\n+\t\t * capability is used to make this decision. (Note though that\n+\t\t * the low latency firmware variant is also best for\n+\t\t * throughput and corresponding type should be specified\n+\t\t * to choose it.)\n+\t\t */\n+\t\tboolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1;\n+\t\trc = efx_mcdi_init_evq(enp, index, esmp, n, irq, us, flags,\n+\t\t    low_latency);\n+\t\tif (rc != 0)\n+\t\t\tgoto fail5;\n+\t}\n+\n+\treturn (0);\n+\n+fail5:\n+\tEFSYS_PROBE(fail5);\n+fail4:\n+\tEFSYS_PROBE(fail4);\n+fail3:\n+\tEFSYS_PROBE(fail3);\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t\t\tvoid\n+ef10_ev_qdestroy(\n+\t__in\t\tefx_evq_t *eep)\n+{\n+\tefx_nic_t *enp = eep->ee_enp;\n+\n+\tEFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||\n+\t    enp->en_family == EFX_FAMILY_MEDFORD);\n+\n+\t(void) efx_mcdi_fini_evq(eep->ee_enp, eep->ee_index);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_ev_qprime(\n+\t__in\t\tefx_evq_t *eep,\n+\t__in\t\tunsigned int count)\n+{\n+\tefx_nic_t *enp = eep->ee_enp;\n+\tuint32_t rptr;\n+\tefx_dword_t dword;\n+\n+\trptr = count & eep->ee_mask;\n+\n+\tif (enp->en_nic_cfg.enc_bug35388_workaround) {\n+\t\tEFX_STATIC_ASSERT(EFX_EVQ_MINNEVS >\n+\t\t    (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));\n+\t\tEFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS <\n+\t\t    (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));\n+\n+\t\tEFX_POPULATE_DWORD_2(dword,\n+\t\t    ERF_DD_EVQ_IND_RPTR_FLAGS,\n+\t\t    EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,\n+\t\t    ERF_DD_EVQ_IND_RPTR,\n+\t\t    (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));\n+\t\tEFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,\n+\t\t    &dword, B_FALSE);\n+\n+\t\tEFX_POPULATE_DWORD_2(dword,\n+\t\t    ERF_DD_EVQ_IND_RPTR_FLAGS,\n+\t\t    EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,\n+\t\t    ERF_DD_EVQ_IND_RPTR,\n+\t\t    rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));\n+\t\tEFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,\n+\t\t    &dword, B_FALSE);\n+\t} else {\n+\t\tEFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);\n+\t\tEFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,\n+\t\t    &dword, B_FALSE);\n+\t}\n+\n+\treturn (0);\n+}\n+\n+static\t__checkReturn\tefx_rc_t\n+efx_mcdi_driver_event(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tuint32_t evq,\n+\t__in\t\tefx_qword_t data)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN,\n+\t\t\t    MC_CMD_DRIVER_EVENT_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\treq.emr_cmd = MC_CMD_DRIVER_EVENT;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN;\n+\n+\tMCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq);\n+\n+\tMCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO,\n+\t    EFX_QWORD_FIELD(data, EFX_DWORD_0));\n+\tMCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI,\n+\t    EFX_QWORD_FIELD(data, EFX_DWORD_1));\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t\t\tvoid\n+ef10_ev_qpost(\n+\t__in\tefx_evq_t *eep,\n+\t__in\tuint16_t data)\n+{\n+\tefx_nic_t *enp = eep->ee_enp;\n+\tefx_qword_t event;\n+\n+\tEFX_POPULATE_QWORD_3(event,\n+\t    ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV,\n+\t    ESF_DZ_DRV_SUB_CODE, 0,\n+\t    ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data);\n+\n+\t(void) efx_mcdi_driver_event(enp, eep->ee_index, event);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_ev_qmoderate(\n+\t__in\t\tefx_evq_t *eep,\n+\t__in\t\tunsigned int us)\n+{\n+\tefx_nic_t *enp = eep->ee_enp;\n+\tefx_nic_cfg_t *encp = &(enp->en_nic_cfg);\n+\tefx_dword_t dword;\n+\tuint32_t mode;\n+\tefx_rc_t rc;\n+\n+\t/* Check that hardware and MCDI use the same timer MODE values */\n+\tEFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS ==\n+\t    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS);\n+\tEFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START ==\n+\t    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START);\n+\tEFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START ==\n+\t    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START);\n+\tEFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF ==\n+\t    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF);\n+\n+\tif (us > encp->enc_evq_timer_max_us) {\n+\t\trc = EINVAL;\n+\t\tgoto fail1;\n+\t}\n+\n+\t/* If the value is zero then disable the timer */\n+\tif (us == 0) {\n+\t\tmode = FFE_CZ_TIMER_MODE_DIS;\n+\t} else {\n+\t\tmode = FFE_CZ_TIMER_MODE_INT_HLDOFF;\n+\t}\n+\n+\tif (encp->enc_bug61265_workaround) {\n+\t\tuint32_t ns = us * 1000;\n+\n+\t\trc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns);\n+\t\tif (rc != 0)\n+\t\t\tgoto fail2;\n+\t} else {\n+\t\tunsigned int ticks;\n+\n+\t\tif ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)\n+\t\t\tgoto fail3;\n+\n+\t\tif (encp->enc_bug35388_workaround) {\n+\t\t\tEFX_POPULATE_DWORD_3(dword,\n+\t\t\t    ERF_DD_EVQ_IND_TIMER_FLAGS,\n+\t\t\t    EFE_DD_EVQ_IND_TIMER_FLAGS,\n+\t\t\t    ERF_DD_EVQ_IND_TIMER_MODE, mode,\n+\t\t\t    ERF_DD_EVQ_IND_TIMER_VAL, ticks);\n+\t\t\tEFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT,\n+\t\t\t    eep->ee_index, &dword, 0);\n+\t\t} else {\n+\t\t\tEFX_POPULATE_DWORD_2(dword,\n+\t\t\t    ERF_DZ_TC_TIMER_MODE, mode,\n+\t\t\t    ERF_DZ_TC_TIMER_VAL, ticks);\n+\t\t\tEFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG,\n+\t\t\t    eep->ee_index, &dword, 0);\n+\t\t}\n+\t}\n+\n+\treturn (0);\n+\n+fail3:\n+\tEFSYS_PROBE(fail3);\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\n+static\t__checkReturn\tboolean_t\n+ef10_ev_rx(\n+\t__in\t\tefx_evq_t *eep,\n+\t__in\t\tefx_qword_t *eqp,\n+\t__in\t\tconst efx_ev_callbacks_t *eecp,\n+\t__in_opt\tvoid *arg)\n+{\n+\tefx_nic_t *enp = eep->ee_enp;\n+\tuint32_t size;\n+\tuint32_t label;\n+\tuint32_t mac_class;\n+\tuint32_t eth_tag_class;\n+\tuint32_t l3_class;\n+\tuint32_t l4_class;\n+\tuint32_t next_read_lbits;\n+\tuint16_t flags;\n+\tboolean_t cont;\n+\tboolean_t should_abort;\n+\tefx_evq_rxq_state_t *eersp;\n+\tunsigned int desc_count;\n+\tunsigned int last_used_id;\n+\n+\tEFX_EV_QSTAT_INCR(eep, EV_RX);\n+\n+\t/* Discard events after RXQ/TXQ errors */\n+\tif (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))\n+\t\treturn (B_FALSE);\n+\n+\t/* Basic packet information */\n+\tlabel = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);\n+\teersp = &eep->ee_rxq_state[label];\n+\n+\tsize = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);\n+\tnext_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);\n+\teth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);\n+\tmac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);\n+\tl3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);\n+\tl4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS);\n+\tcont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);\n+\n+\tif (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {\n+\t\t/* Drop this event */\n+\t\treturn (B_FALSE);\n+\t}\n+\tflags = 0;\n+\n+\tif (cont != 0) {\n+\t\t/*\n+\t\t * This may be part of a scattered frame, or it may be a\n+\t\t * truncated frame if scatter is disabled on this RXQ.\n+\t\t * Overlength frames can be received if e.g. a VF is configured\n+\t\t * for 1500 MTU but connected to a port set to 9000 MTU\n+\t\t * (see bug56567).\n+\t\t * FIXME: There is not yet any driver that supports scatter on\n+\t\t * Huntington.  Scatter support is required for OSX.\n+\t\t */\n+\t\tflags |= EFX_PKT_CONT;\n+\t}\n+\n+\tif (mac_class == ESE_DZ_MAC_CLASS_UCAST)\n+\t\tflags |= EFX_PKT_UNICAST;\n+\n+\t/* Increment the count of descriptors read */\n+\tdesc_count = (next_read_lbits - eersp->eers_rx_read_ptr) &\n+\t    EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);\n+\teersp->eers_rx_read_ptr += desc_count;\n+\n+\t/*\n+\t * FIXME: add error checking to make sure this a batched event.\n+\t * This could also be an aborted scatter, see Bug36629.\n+\t */\n+\tif (desc_count > 1) {\n+\t\tEFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);\n+\t\tflags |= EFX_PKT_PREFIX_LEN;\n+\t}\n+\n+\t/* Calculate the index of the last descriptor consumed */\n+\tlast_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;\n+\n+\t/* Check for errors that invalidate checksum and L3/L4 fields */\n+\tif (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) {\n+\t\t/* RX frame truncated (error flag is misnamed) */\n+\t\tEFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);\n+\t\tflags |= EFX_DISCARD;\n+\t\tgoto deliver;\n+\t}\n+\tif (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {\n+\t\t/* Bad Ethernet frame CRC */\n+\t\tEFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);\n+\t\tflags |= EFX_DISCARD;\n+\t\tgoto deliver;\n+\t}\n+\tif (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {\n+\t\t/*\n+\t\t * Hardware parse failed, due to malformed headers\n+\t\t * or headers that are too long for the parser.\n+\t\t * Headers and checksums must be validated by the host.\n+\t\t */\n+\t\t/* TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); */\n+\t\tgoto deliver;\n+\t}\n+\n+\tif ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) ||\n+\t    (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) {\n+\t\tflags |= EFX_PKT_VLAN_TAGGED;\n+\t}\n+\n+\tswitch (l3_class) {\n+\tcase ESE_DZ_L3_CLASS_IP4:\n+\tcase ESE_DZ_L3_CLASS_IP4_FRAG:\n+\t\tflags |= EFX_PKT_IPV4;\n+\t\tif (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) {\n+\t\t\tEFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);\n+\t\t} else {\n+\t\t\tflags |= EFX_CKSUM_IPV4;\n+\t\t}\n+\n+\t\tif (l4_class == ESE_DZ_L4_CLASS_TCP) {\n+\t\t\tEFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);\n+\t\t\tflags |= EFX_PKT_TCP;\n+\t\t} else if (l4_class == ESE_DZ_L4_CLASS_UDP) {\n+\t\t\tEFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);\n+\t\t\tflags |= EFX_PKT_UDP;\n+\t\t} else {\n+\t\t\tEFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);\n+\t\t}\n+\t\tbreak;\n+\n+\tcase ESE_DZ_L3_CLASS_IP6:\n+\tcase ESE_DZ_L3_CLASS_IP6_FRAG:\n+\t\tflags |= EFX_PKT_IPV6;\n+\n+\t\tif (l4_class == ESE_DZ_L4_CLASS_TCP) {\n+\t\t\tEFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);\n+\t\t\tflags |= EFX_PKT_TCP;\n+\t\t} else if (l4_class == ESE_DZ_L4_CLASS_UDP) {\n+\t\t\tEFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);\n+\t\t\tflags |= EFX_PKT_UDP;\n+\t\t} else {\n+\t\t\tEFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);\n+\t\t}\n+\t\tbreak;\n+\n+\tdefault:\n+\t\tEFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);\n+\t\tbreak;\n+\t}\n+\n+\tif (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) {\n+\t\tif (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {\n+\t\t\tEFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);\n+\t\t} else {\n+\t\t\tflags |= EFX_CKSUM_TCPUDP;\n+\t\t}\n+\t}\n+\n+deliver:\n+\t/* If we're not discarding the packet then it is ok */\n+\tif (~flags & EFX_DISCARD)\n+\t\tEFX_EV_QSTAT_INCR(eep, EV_RX_OK);\n+\n+\tEFSYS_ASSERT(eecp->eec_rx != NULL);\n+\tshould_abort = eecp->eec_rx(arg, label, last_used_id, size, flags);\n+\n+\treturn (should_abort);\n+}\n+\n+static\t__checkReturn\tboolean_t\n+ef10_ev_tx(\n+\t__in\t\tefx_evq_t *eep,\n+\t__in\t\tefx_qword_t *eqp,\n+\t__in\t\tconst efx_ev_callbacks_t *eecp,\n+\t__in_opt\tvoid *arg)\n+{\n+\tefx_nic_t *enp = eep->ee_enp;\n+\tuint32_t id;\n+\tuint32_t label;\n+\tboolean_t should_abort;\n+\n+\tEFX_EV_QSTAT_INCR(eep, EV_TX);\n+\n+\t/* Discard events after RXQ/TXQ errors */\n+\tif (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))\n+\t\treturn (B_FALSE);\n+\n+\tif (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {\n+\t\t/* Drop this event */\n+\t\treturn (B_FALSE);\n+\t}\n+\n+\t/* Per-packet TX completion (was per-descriptor for Falcon/Siena) */\n+\tid = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX);\n+\tlabel = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL);\n+\n+\tEFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);\n+\n+\tEFSYS_ASSERT(eecp->eec_tx != NULL);\n+\tshould_abort = eecp->eec_tx(arg, label, id);\n+\n+\treturn (should_abort);\n+}\n+\n+static\t__checkReturn\tboolean_t\n+ef10_ev_driver(\n+\t__in\t\tefx_evq_t *eep,\n+\t__in\t\tefx_qword_t *eqp,\n+\t__in\t\tconst efx_ev_callbacks_t *eecp,\n+\t__in_opt\tvoid *arg)\n+{\n+\tunsigned int code;\n+\tboolean_t should_abort;\n+\n+\tEFX_EV_QSTAT_INCR(eep, EV_DRIVER);\n+\tshould_abort = B_FALSE;\n+\n+\tcode = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE);\n+\tswitch (code) {\n+\tcase ESE_DZ_DRV_TIMER_EV: {\n+\t\tuint32_t id;\n+\n+\t\tid = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID);\n+\n+\t\tEFSYS_ASSERT(eecp->eec_timer != NULL);\n+\t\tshould_abort = eecp->eec_timer(arg, id);\n+\t\tbreak;\n+\t}\n+\n+\tcase ESE_DZ_DRV_WAKE_UP_EV: {\n+\t\tuint32_t id;\n+\n+\t\tid = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID);\n+\n+\t\tEFSYS_ASSERT(eecp->eec_wake_up != NULL);\n+\t\tshould_abort = eecp->eec_wake_up(arg, id);\n+\t\tbreak;\n+\t}\n+\n+\tcase ESE_DZ_DRV_START_UP_EV:\n+\t\tEFSYS_ASSERT(eecp->eec_initialized != NULL);\n+\t\tshould_abort = eecp->eec_initialized(arg);\n+\t\tbreak;\n+\n+\tdefault:\n+\t\tEFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,\n+\t\t    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),\n+\t\t    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));\n+\t\tbreak;\n+\t}\n+\n+\treturn (should_abort);\n+}\n+\n+static\t__checkReturn\tboolean_t\n+ef10_ev_drv_gen(\n+\t__in\t\tefx_evq_t *eep,\n+\t__in\t\tefx_qword_t *eqp,\n+\t__in\t\tconst efx_ev_callbacks_t *eecp,\n+\t__in_opt\tvoid *arg)\n+{\n+\tuint32_t data;\n+\tboolean_t should_abort;\n+\n+\tEFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);\n+\tshould_abort = B_FALSE;\n+\n+\tdata = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0);\n+\tif (data >= ((uint32_t)1 << 16)) {\n+\t\tEFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,\n+\t\t    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),\n+\t\t    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));\n+\n+\t\treturn (B_TRUE);\n+\t}\n+\n+\tEFSYS_ASSERT(eecp->eec_software != NULL);\n+\tshould_abort = eecp->eec_software(arg, (uint16_t)data);\n+\n+\treturn (should_abort);\n+}\n+\n+static\t__checkReturn\tboolean_t\n+ef10_ev_mcdi(\n+\t__in\t\tefx_evq_t *eep,\n+\t__in\t\tefx_qword_t *eqp,\n+\t__in\t\tconst efx_ev_callbacks_t *eecp,\n+\t__in_opt\tvoid *arg)\n+{\n+\tefx_nic_t *enp = eep->ee_enp;\n+\tunsigned int code;\n+\tboolean_t should_abort = B_FALSE;\n+\n+\tEFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);\n+\n+\tcode = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);\n+\tswitch (code) {\n+\tcase MCDI_EVENT_CODE_BADSSERT:\n+\t\tefx_mcdi_ev_death(enp, EINTR);\n+\t\tbreak;\n+\n+\tcase MCDI_EVENT_CODE_CMDDONE:\n+\t\tefx_mcdi_ev_cpl(enp,\n+\t\t    MCDI_EV_FIELD(eqp, CMDDONE_SEQ),\n+\t\t    MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),\n+\t\t    MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));\n+\t\tbreak;\n+\n+#if EFSYS_OPT_MCDI_PROXY_AUTH\n+\tcase MCDI_EVENT_CODE_PROXY_RESPONSE:\n+\t\t/*\n+\t\t * This event notifies a function that an authorization request\n+\t\t * has been processed. If the request was authorized then the\n+\t\t * function can now re-send the original MCDI request.\n+\t\t * See SF-113652-SW \"SR-IOV Proxied Network Access Control\".\n+\t\t */\n+\t\tefx_mcdi_ev_proxy_response(enp,\n+\t\t    MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE),\n+\t\t    MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC));\n+\t\tbreak;\n+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */\n+\n+\tcase MCDI_EVENT_CODE_LINKCHANGE: {\n+\t\tefx_link_mode_t link_mode;\n+\n+\t\tef10_phy_link_ev(enp, eqp, &link_mode);\n+\t\tshould_abort = eecp->eec_link_change(arg, link_mode);\n+\t\tbreak;\n+\t}\n+\n+\tcase MCDI_EVENT_CODE_SENSOREVT: {\n+\t\tbreak;\n+\t}\n+\n+\tcase MCDI_EVENT_CODE_SCHEDERR:\n+\t\t/* Informational only */\n+\t\tbreak;\n+\n+\tcase MCDI_EVENT_CODE_REBOOT:\n+\t\t/* Falcon/Siena only (should not been seen with Huntington). */\n+\t\tefx_mcdi_ev_death(enp, EIO);\n+\t\tbreak;\n+\n+\tcase MCDI_EVENT_CODE_MC_REBOOT:\n+\t\t/* MC_REBOOT event is used for Huntington (EF10) and later. */\n+\t\tefx_mcdi_ev_death(enp, EIO);\n+\t\tbreak;\n+\n+\tcase MCDI_EVENT_CODE_MAC_STATS_DMA:\n+\t\tbreak;\n+\n+\tcase MCDI_EVENT_CODE_FWALERT: {\n+\t\tuint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);\n+\n+\t\tif (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)\n+\t\t\tshould_abort = eecp->eec_exception(arg,\n+\t\t\t\tEFX_EXCEPTION_FWALERT_SRAM,\n+\t\t\t\tMCDI_EV_FIELD(eqp, FWALERT_DATA));\n+\t\telse\n+\t\t\tshould_abort = eecp->eec_exception(arg,\n+\t\t\t\tEFX_EXCEPTION_UNKNOWN_FWALERT,\n+\t\t\t\tMCDI_EV_FIELD(eqp, DATA));\n+\t\tbreak;\n+\t}\n+\n+\tcase MCDI_EVENT_CODE_TX_ERR: {\n+\t\t/*\n+\t\t * After a TXQ error is detected, firmware sends a TX_ERR event.\n+\t\t * This may be followed by TX completions (which we discard),\n+\t\t * and then finally by a TX_FLUSH event. Firmware destroys the\n+\t\t * TXQ automatically after sending the TX_FLUSH event.\n+\t\t */\n+\t\tenp->en_reset_flags |= EFX_RESET_TXQ_ERR;\n+\n+\t\tEFSYS_PROBE2(tx_descq_err,\n+\t\t\t    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),\n+\t\t\t    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));\n+\n+\t\t/* Inform the driver that a reset is required. */\n+\t\teecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,\n+\t\t    MCDI_EV_FIELD(eqp, TX_ERR_DATA));\n+\t\tbreak;\n+\t}\n+\n+\tcase MCDI_EVENT_CODE_TX_FLUSH: {\n+\t\tuint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ);\n+\n+\t\t/*\n+\t\t * EF10 firmware sends two TX_FLUSH events: one to the txq's\n+\t\t * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set).\n+\t\t * We want to wait for all completions, so ignore the events\n+\t\t * with TX_FLUSH_TO_DRIVER.\n+\t\t */\n+\t\tif (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) {\n+\t\t\tshould_abort = B_FALSE;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tEFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);\n+\n+\t\tEFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);\n+\n+\t\tEFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);\n+\t\tshould_abort = eecp->eec_txq_flush_done(arg, txq_index);\n+\t\tbreak;\n+\t}\n+\n+\tcase MCDI_EVENT_CODE_RX_ERR: {\n+\t\t/*\n+\t\t * After an RXQ error is detected, firmware sends an RX_ERR\n+\t\t * event. This may be followed by RX events (which we discard),\n+\t\t * and then finally by an RX_FLUSH event. Firmware destroys the\n+\t\t * RXQ automatically after sending the RX_FLUSH event.\n+\t\t */\n+\t\tenp->en_reset_flags |= EFX_RESET_RXQ_ERR;\n+\n+\t\tEFSYS_PROBE2(rx_descq_err,\n+\t\t\t    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),\n+\t\t\t    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));\n+\n+\t\t/* Inform the driver that a reset is required. */\n+\t\teecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,\n+\t\t    MCDI_EV_FIELD(eqp, RX_ERR_DATA));\n+\t\tbreak;\n+\t}\n+\n+\tcase MCDI_EVENT_CODE_RX_FLUSH: {\n+\t\tuint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ);\n+\n+\t\t/*\n+\t\t * EF10 firmware sends two RX_FLUSH events: one to the rxq's\n+\t\t * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set).\n+\t\t * We want to wait for all completions, so ignore the events\n+\t\t * with RX_FLUSH_TO_DRIVER.\n+\t\t */\n+\t\tif (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) {\n+\t\t\tshould_abort = B_FALSE;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tEFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);\n+\n+\t\tEFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);\n+\n+\t\tEFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);\n+\t\tshould_abort = eecp->eec_rxq_flush_done(arg, rxq_index);\n+\t\tbreak;\n+\t}\n+\n+\tdefault:\n+\t\tEFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,\n+\t\t    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),\n+\t\t    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));\n+\t\tbreak;\n+\t}\n+\n+\treturn (should_abort);\n+}\n+\n+\t\tvoid\n+ef10_ev_rxlabel_init(\n+\t__in\t\tefx_evq_t *eep,\n+\t__in\t\tefx_rxq_t *erp,\n+\t__in\t\tunsigned int label,\n+\t__in\t\tboolean_t packed_stream)\n+{\n+\tefx_evq_rxq_state_t *eersp;\n+\n+\tEFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));\n+\teersp = &eep->ee_rxq_state[label];\n+\n+\tEFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);\n+\n+\teersp->eers_rx_read_ptr = 0;\n+\teersp->eers_rx_mask = erp->er_mask;\n+\tEFSYS_ASSERT(!packed_stream);\n+}\n+\n+\t\tvoid\n+ef10_ev_rxlabel_fini(\n+\t__in\t\tefx_evq_t *eep,\n+\t__in\t\tunsigned int label)\n+{\n+\tefx_evq_rxq_state_t *eersp;\n+\n+\tEFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));\n+\teersp = &eep->ee_rxq_state[label];\n+\n+\tEFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);\n+\n+\teersp->eers_rx_read_ptr = 0;\n+\teersp->eers_rx_mask = 0;\n+}\n+\n+#endif\t/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */\ndiff --git a/drivers/net/sfc/base/ef10_filter.c b/drivers/net/sfc/base/ef10_filter.c\nnew file mode 100644\nindex 0000000..4514160\n--- /dev/null\n+++ b/drivers/net/sfc/base/ef10_filter.c\n@@ -0,0 +1,1469 @@\n+/*\n+ * Copyright (c) 2007-2016 Solarflare Communications Inc.\n+ * All rights reserved.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions are met:\n+ *\n+ * 1. Redistributions of source code must retain the above copyright notice,\n+ *    this list of conditions and the following disclaimer.\n+ * 2. Redistributions in binary form must reproduce the above copyright notice,\n+ *    this list of conditions and the following disclaimer in the documentation\n+ *    and/or other materials provided with the distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ *\n+ * The views and conclusions contained in the software and documentation are\n+ * those of the authors and should not be interpreted as representing official\n+ * policies, either expressed or implied, of the FreeBSD Project.\n+ */\n+\n+#include \"efx.h\"\n+#include \"efx_impl.h\"\n+\n+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD\n+\n+#if EFSYS_OPT_FILTER\n+\n+#define\tEFE_SPEC(eftp, index)\t((eftp)->eft_entry[(index)].efe_spec)\n+\n+static\t\t\tefx_filter_spec_t *\n+ef10_filter_entry_spec(\n+\t__in\t\tconst ef10_filter_table_t *eftp,\n+\t__in\t\tunsigned int index)\n+{\n+\treturn ((efx_filter_spec_t *)(EFE_SPEC(eftp, index) &\n+\t\t~(uintptr_t)EFX_EF10_FILTER_FLAGS));\n+}\n+\n+static\t\t\tboolean_t\n+ef10_filter_entry_is_busy(\n+\t__in\t\tconst ef10_filter_table_t *eftp,\n+\t__in\t\tunsigned int index)\n+{\n+\tif (EFE_SPEC(eftp, index) & EFX_EF10_FILTER_FLAG_BUSY)\n+\t\treturn (B_TRUE);\n+\telse\n+\t\treturn (B_FALSE);\n+}\n+\n+static\t\t\tboolean_t\n+ef10_filter_entry_is_auto_old(\n+\t__in\t\tconst ef10_filter_table_t *eftp,\n+\t__in\t\tunsigned int index)\n+{\n+\tif (EFE_SPEC(eftp, index) & EFX_EF10_FILTER_FLAG_AUTO_OLD)\n+\t\treturn (B_TRUE);\n+\telse\n+\t\treturn (B_FALSE);\n+}\n+\n+static\t\t\tvoid\n+ef10_filter_set_entry(\n+\t__inout\t\tef10_filter_table_t *eftp,\n+\t__in\t\tunsigned int index,\n+\t__in_opt\tconst efx_filter_spec_t *efsp)\n+{\n+\tEFE_SPEC(eftp, index) = (uintptr_t)efsp;\n+}\n+\n+static\t\t\tvoid\n+ef10_filter_set_entry_busy(\n+\t__inout\t\tef10_filter_table_t *eftp,\n+\t__in\t\tunsigned int index)\n+{\n+\tEFE_SPEC(eftp, index) |= (uintptr_t)EFX_EF10_FILTER_FLAG_BUSY;\n+}\n+\n+static\t\t\tvoid\n+ef10_filter_set_entry_not_busy(\n+\t__inout\t\tef10_filter_table_t *eftp,\n+\t__in\t\tunsigned int index)\n+{\n+\tEFE_SPEC(eftp, index) &= ~(uintptr_t)EFX_EF10_FILTER_FLAG_BUSY;\n+}\n+\n+static\t\t\tvoid\n+ef10_filter_set_entry_auto_old(\n+\t__inout\t\tef10_filter_table_t *eftp,\n+\t__in\t\tunsigned int index)\n+{\n+\tEFSYS_ASSERT(ef10_filter_entry_spec(eftp, index) != NULL);\n+\tEFE_SPEC(eftp, index) |= (uintptr_t)EFX_EF10_FILTER_FLAG_AUTO_OLD;\n+}\n+\n+static\t\t\tvoid\n+ef10_filter_set_entry_not_auto_old(\n+\t__inout\t\tef10_filter_table_t *eftp,\n+\t__in\t\tunsigned int index)\n+{\n+\tEFE_SPEC(eftp, index) &= ~(uintptr_t)EFX_EF10_FILTER_FLAG_AUTO_OLD;\n+\tEFSYS_ASSERT(ef10_filter_entry_spec(eftp, index) != NULL);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_filter_init(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\tefx_rc_t rc;\n+\tef10_filter_table_t *eftp;\n+\n+\tEFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||\n+\t\t    enp->en_family == EFX_FAMILY_MEDFORD);\n+\n+#define\tMATCH_MASK(match) (EFX_MASK32(match) << EFX_LOW_BIT(match))\n+\tEFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_HOST ==\n+\t    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_IP));\n+\tEFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_HOST ==\n+\t    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_IP));\n+\tEFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_MAC ==\n+\t    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC));\n+\tEFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_PORT ==\n+\t    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT));\n+\tEFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_MAC ==\n+\t    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_MAC));\n+\tEFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_PORT ==\n+\t    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_PORT));\n+\tEFX_STATIC_ASSERT(EFX_FILTER_MATCH_ETHER_TYPE ==\n+\t    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE));\n+\tEFX_STATIC_ASSERT(EFX_FILTER_MATCH_INNER_VID ==\n+\t    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN));\n+\tEFX_STATIC_ASSERT(EFX_FILTER_MATCH_OUTER_VID ==\n+\t    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN));\n+\tEFX_STATIC_ASSERT(EFX_FILTER_MATCH_IP_PROTO ==\n+\t    MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO));\n+#undef MATCH_MASK\n+\n+\tEFSYS_KMEM_ALLOC(enp->en_esip, sizeof (ef10_filter_table_t), eftp);\n+\n+\tif (!eftp) {\n+\t\trc = ENOMEM;\n+\t\tgoto fail1;\n+\t}\n+\n+\tenp->en_filter.ef_ef10_filter_table = eftp;\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t\t\tvoid\n+ef10_filter_fini(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\tEFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||\n+\t\t    enp->en_family == EFX_FAMILY_MEDFORD);\n+\n+\tif (enp->en_filter.ef_ef10_filter_table != NULL) {\n+\t\tEFSYS_KMEM_FREE(enp->en_esip, sizeof (ef10_filter_table_t),\n+\t\t    enp->en_filter.ef_ef10_filter_table);\n+\t}\n+}\n+\n+static\t__checkReturn\tefx_rc_t\n+efx_mcdi_filter_op_add(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tefx_filter_spec_t *spec,\n+\t__in\t\tunsigned int filter_op,\n+\t__inout\t\tef10_filter_handle_t *handle)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_FILTER_OP_IN_LEN,\n+\t\t\t    MC_CMD_FILTER_OP_OUT_LEN)];\n+\tuint32_t match_fields = 0;\n+\tefx_rc_t rc;\n+\n+\tmemset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_FILTER_OP;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_FILTER_OP_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_FILTER_OP_OUT_LEN;\n+\n+\tswitch (filter_op) {\n+\tcase MC_CMD_FILTER_OP_IN_OP_REPLACE:\n+\t\tMCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_LO,\n+\t\t    handle->efh_lo);\n+\t\tMCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_HI,\n+\t\t    handle->efh_hi);\n+\t\t/* Fall through */\n+\tcase MC_CMD_FILTER_OP_IN_OP_INSERT:\n+\tcase MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE:\n+\t\tMCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP, filter_op);\n+\t\tbreak;\n+\tdefault:\n+\t\tEFSYS_ASSERT(0);\n+\t\trc = EINVAL;\n+\t\tgoto fail1;\n+\t}\n+\n+\tif (spec->efs_match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {\n+\t\t/*\n+\t\t * The LOC_MAC_IG match flag can represent unknown unicast\n+\t\t *  or multicast filters - use the MAC address to distinguish\n+\t\t *  them.\n+\t\t */\n+\t\tif (EFX_MAC_ADDR_IS_MULTICAST(spec->efs_loc_mac))\n+\t\t\tmatch_fields |= 1U <<\n+\t\t\t\tMC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN;\n+\t\telse\n+\t\t\tmatch_fields |= 1U <<\n+\t\t\t\tMC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN;\n+\t}\n+\n+\tmatch_fields |= spec->efs_match_flags & (~EFX_FILTER_MATCH_LOC_MAC_IG);\n+\n+\tMCDI_IN_SET_DWORD(req, FILTER_OP_IN_PORT_ID,\n+\t    EVB_PORT_ID_ASSIGNED);\n+\tMCDI_IN_SET_DWORD(req, FILTER_OP_IN_MATCH_FIELDS,\n+\t    match_fields);\n+\tMCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_DEST,\n+\t    MC_CMD_FILTER_OP_IN_RX_DEST_HOST);\n+\tMCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_QUEUE,\n+\t    spec->efs_dmaq_id);\n+\tif (spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) {\n+\t\tMCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_CONTEXT,\n+\t\t    spec->efs_rss_context);\n+\t}\n+\tMCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_MODE,\n+\t    spec->efs_flags & EFX_FILTER_FLAG_RX_RSS ?\n+\t    MC_CMD_FILTER_OP_IN_RX_MODE_RSS :\n+\t    MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);\n+\tMCDI_IN_SET_DWORD(req, FILTER_OP_IN_TX_DEST,\n+\t    MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);\n+\n+\tif (filter_op != MC_CMD_FILTER_OP_IN_OP_REPLACE) {\n+\t\t/*\n+\t\t * NOTE: Unlike most MCDI requests, the filter fields\n+\t\t * are presented in network (big endian) byte order.\n+\t\t */\n+\t\tmemcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_SRC_MAC),\n+\t\t    spec->efs_rem_mac, EFX_MAC_ADDR_LEN);\n+\t\tmemcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_DST_MAC),\n+\t\t    spec->efs_loc_mac, EFX_MAC_ADDR_LEN);\n+\n+\t\tMCDI_IN_SET_WORD(req, FILTER_OP_IN_SRC_PORT,\n+\t\t    __CPU_TO_BE_16(spec->efs_rem_port));\n+\t\tMCDI_IN_SET_WORD(req, FILTER_OP_IN_DST_PORT,\n+\t\t    __CPU_TO_BE_16(spec->efs_loc_port));\n+\n+\t\tMCDI_IN_SET_WORD(req, FILTER_OP_IN_ETHER_TYPE,\n+\t\t    __CPU_TO_BE_16(spec->efs_ether_type));\n+\n+\t\tMCDI_IN_SET_WORD(req, FILTER_OP_IN_INNER_VLAN,\n+\t\t    __CPU_TO_BE_16(spec->efs_inner_vid));\n+\t\tMCDI_IN_SET_WORD(req, FILTER_OP_IN_OUTER_VLAN,\n+\t\t    __CPU_TO_BE_16(spec->efs_outer_vid));\n+\n+\t\t/* IP protocol (in low byte, high byte is zero) */\n+\t\tMCDI_IN_SET_BYTE(req, FILTER_OP_IN_IP_PROTO,\n+\t\t    spec->efs_ip_proto);\n+\n+\t\tEFX_STATIC_ASSERT(sizeof (spec->efs_rem_host) ==\n+\t\t    MC_CMD_FILTER_OP_IN_SRC_IP_LEN);\n+\t\tEFX_STATIC_ASSERT(sizeof (spec->efs_loc_host) ==\n+\t\t    MC_CMD_FILTER_OP_IN_DST_IP_LEN);\n+\n+\t\tmemcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_SRC_IP),\n+\t\t    &spec->efs_rem_host.eo_byte[0],\n+\t\t    MC_CMD_FILTER_OP_IN_SRC_IP_LEN);\n+\t\tmemcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_DST_IP),\n+\t\t    &spec->efs_loc_host.eo_byte[0],\n+\t\t    MC_CMD_FILTER_OP_IN_DST_IP_LEN);\n+\t}\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail2;\n+\t}\n+\n+\tif (req.emr_out_length_used < MC_CMD_FILTER_OP_OUT_LEN) {\n+\t\trc = EMSGSIZE;\n+\t\tgoto fail3;\n+\t}\n+\n+\thandle->efh_lo = MCDI_OUT_DWORD(req, FILTER_OP_OUT_HANDLE_LO);\n+\thandle->efh_hi = MCDI_OUT_DWORD(req, FILTER_OP_OUT_HANDLE_HI);\n+\n+\treturn (0);\n+\n+fail3:\n+\tEFSYS_PROBE(fail3);\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+\n+}\n+\n+static\t__checkReturn\tefx_rc_t\n+efx_mcdi_filter_op_delete(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tunsigned int filter_op,\n+\t__inout\t\tef10_filter_handle_t *handle)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_FILTER_OP_IN_LEN,\n+\t\t\t    MC_CMD_FILTER_OP_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\tmemset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_FILTER_OP;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_FILTER_OP_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_FILTER_OP_OUT_LEN;\n+\n+\tswitch (filter_op) {\n+\tcase MC_CMD_FILTER_OP_IN_OP_REMOVE:\n+\t\tMCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP,\n+\t\t    MC_CMD_FILTER_OP_IN_OP_REMOVE);\n+\t\tbreak;\n+\tcase MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE:\n+\t\tMCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP,\n+\t\t    MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);\n+\t\tbreak;\n+\tdefault:\n+\t\tEFSYS_ASSERT(0);\n+\t\trc = EINVAL;\n+\t\tgoto fail1;\n+\t}\n+\n+\tMCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_LO, handle->efh_lo);\n+\tMCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_HI, handle->efh_hi);\n+\n+\tefx_mcdi_execute_quiet(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail2;\n+\t}\n+\n+\tif (req.emr_out_length_used < MC_CMD_FILTER_OP_OUT_LEN) {\n+\t\trc = EMSGSIZE;\n+\t\tgoto fail3;\n+\t}\n+\n+\treturn (0);\n+\n+fail3:\n+\tEFSYS_PROBE(fail3);\n+\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+static\t__checkReturn\tboolean_t\n+ef10_filter_equal(\n+\t__in\t\tconst efx_filter_spec_t *left,\n+\t__in\t\tconst efx_filter_spec_t *right)\n+{\n+\t/* FIXME: Consider rx vs tx filters (look at efs_flags) */\n+\tif (left->efs_match_flags != right->efs_match_flags)\n+\t\treturn (B_FALSE);\n+\tif (!EFX_OWORD_IS_EQUAL(left->efs_rem_host, right->efs_rem_host))\n+\t\treturn (B_FALSE);\n+\tif (!EFX_OWORD_IS_EQUAL(left->efs_loc_host, right->efs_loc_host))\n+\t\treturn (B_FALSE);\n+\tif (memcmp(left->efs_rem_mac, right->efs_rem_mac, EFX_MAC_ADDR_LEN))\n+\t\treturn (B_FALSE);\n+\tif (memcmp(left->efs_loc_mac, right->efs_loc_mac, EFX_MAC_ADDR_LEN))\n+\t\treturn (B_FALSE);\n+\tif (left->efs_rem_port != right->efs_rem_port)\n+\t\treturn (B_FALSE);\n+\tif (left->efs_loc_port != right->efs_loc_port)\n+\t\treturn (B_FALSE);\n+\tif (left->efs_inner_vid != right->efs_inner_vid)\n+\t\treturn (B_FALSE);\n+\tif (left->efs_outer_vid != right->efs_outer_vid)\n+\t\treturn (B_FALSE);\n+\tif (left->efs_ether_type != right->efs_ether_type)\n+\t\treturn (B_FALSE);\n+\tif (left->efs_ip_proto != right->efs_ip_proto)\n+\t\treturn (B_FALSE);\n+\n+\treturn (B_TRUE);\n+\n+}\n+\n+static\t__checkReturn\tboolean_t\n+ef10_filter_same_dest(\n+\t__in\t\tconst efx_filter_spec_t *left,\n+\t__in\t\tconst efx_filter_spec_t *right)\n+{\n+\tif ((left->efs_flags & EFX_FILTER_FLAG_RX_RSS) &&\n+\t    (right->efs_flags & EFX_FILTER_FLAG_RX_RSS)) {\n+\t\tif (left->efs_rss_context == right->efs_rss_context)\n+\t\t\treturn (B_TRUE);\n+\t} else if ((~(left->efs_flags) & EFX_FILTER_FLAG_RX_RSS) &&\n+\t    (~(right->efs_flags) & EFX_FILTER_FLAG_RX_RSS)) {\n+\t\tif (left->efs_dmaq_id == right->efs_dmaq_id)\n+\t\t\treturn (B_TRUE);\n+\t}\n+\treturn (B_FALSE);\n+}\n+\n+static\t__checkReturn\tuint32_t\n+ef10_filter_hash(\n+\t__in\t\tefx_filter_spec_t *spec)\n+{\n+\tEFX_STATIC_ASSERT((sizeof (efx_filter_spec_t) % sizeof (uint32_t))\n+\t\t\t    == 0);\n+\tEFX_STATIC_ASSERT((EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid) %\n+\t\t\t    sizeof (uint32_t)) == 0);\n+\n+\t/*\n+\t * As the area of the efx_filter_spec_t we need to hash is DWORD\n+\t * aligned and an exact number of DWORDs in size we can use the\n+\t * optimised efx_hash_dwords() rather than efx_hash_bytes()\n+\t */\n+\treturn (efx_hash_dwords((const uint32_t *)&spec->efs_outer_vid,\n+\t\t\t(sizeof (efx_filter_spec_t) -\n+\t\t\tEFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid)) /\n+\t\t\tsizeof (uint32_t), 0));\n+}\n+\n+/*\n+ * Decide whether a filter should be exclusive or else should allow\n+ * delivery to additional recipients.  Currently we decide that\n+ * filters for specific local unicast MAC and IP addresses are\n+ * exclusive.\n+ */\n+static\t__checkReturn\tboolean_t\n+ef10_filter_is_exclusive(\n+\t__in\t\tefx_filter_spec_t *spec)\n+{\n+\tif ((spec->efs_match_flags & EFX_FILTER_MATCH_LOC_MAC) &&\n+\t    !EFX_MAC_ADDR_IS_MULTICAST(spec->efs_loc_mac))\n+\t\treturn (B_TRUE);\n+\n+\tif ((spec->efs_match_flags &\n+\t\t(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==\n+\t    (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {\n+\t\tif ((spec->efs_ether_type == EFX_ETHER_TYPE_IPV4) &&\n+\t\t    ((spec->efs_loc_host.eo_u8[0] & 0xf) != 0xe))\n+\t\t\treturn (B_TRUE);\n+\t\tif ((spec->efs_ether_type == EFX_ETHER_TYPE_IPV6) &&\n+\t\t    (spec->efs_loc_host.eo_u8[0] != 0xff))\n+\t\t\treturn (B_TRUE);\n+\t}\n+\n+\treturn (B_FALSE);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_filter_restore(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\tint tbl_id;\n+\tefx_filter_spec_t *spec;\n+\tef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;\n+\tboolean_t restoring;\n+\tefsys_lock_state_t state;\n+\tefx_rc_t rc;\n+\n+\tEFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||\n+\t\t    enp->en_family == EFX_FAMILY_MEDFORD);\n+\n+\tfor (tbl_id = 0; tbl_id < EFX_EF10_FILTER_TBL_ROWS; tbl_id++) {\n+\n+\t\tEFSYS_LOCK(enp->en_eslp, state);\n+\n+\t\tspec = ef10_filter_entry_spec(eftp, tbl_id);\n+\t\tif (spec == NULL) {\n+\t\t\trestoring = B_FALSE;\n+\t\t} else if (ef10_filter_entry_is_busy(eftp, tbl_id)) {\n+\t\t\t/* Ignore busy entries. */\n+\t\t\trestoring = B_FALSE;\n+\t\t} else {\n+\t\t\tef10_filter_set_entry_busy(eftp, tbl_id);\n+\t\t\trestoring = B_TRUE;\n+\t\t}\n+\n+\t\tEFSYS_UNLOCK(enp->en_eslp, state);\n+\n+\t\tif (restoring == B_FALSE)\n+\t\t\tcontinue;\n+\n+\t\tif (ef10_filter_is_exclusive(spec)) {\n+\t\t\trc = efx_mcdi_filter_op_add(enp, spec,\n+\t\t\t    MC_CMD_FILTER_OP_IN_OP_INSERT,\n+\t\t\t    &eftp->eft_entry[tbl_id].efe_handle);\n+\t\t} else {\n+\t\t\trc = efx_mcdi_filter_op_add(enp, spec,\n+\t\t\t    MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE,\n+\t\t\t    &eftp->eft_entry[tbl_id].efe_handle);\n+\t\t}\n+\n+\t\tif (rc != 0)\n+\t\t\tgoto fail1;\n+\n+\t\tEFSYS_LOCK(enp->en_eslp, state);\n+\n+\t\tef10_filter_set_entry_not_busy(eftp, tbl_id);\n+\n+\t\tEFSYS_UNLOCK(enp->en_eslp, state);\n+\t}\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+/*\n+ * An arbitrary search limit for the software hash table. As per the linux net\n+ * driver.\n+ */\n+#define\tEF10_FILTER_SEARCH_LIMIT 200\n+\n+static\t__checkReturn\tefx_rc_t\n+ef10_filter_add_internal(\n+\t__in\t\tefx_nic_t *enp,\n+\t__inout\t\tefx_filter_spec_t *spec,\n+\t__in\t\tboolean_t may_replace,\n+\t__out_opt\tuint32_t *filter_id)\n+{\n+\tefx_rc_t rc;\n+\tef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;\n+\tefx_filter_spec_t *saved_spec;\n+\tuint32_t hash;\n+\tunsigned int depth;\n+\tint ins_index;\n+\tboolean_t replacing = B_FALSE;\n+\tunsigned int i;\n+\tefsys_lock_state_t state;\n+\tboolean_t locked = B_FALSE;\n+\n+\tEFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||\n+\t\t    enp->en_family == EFX_FAMILY_MEDFORD);\n+\n+\thash = ef10_filter_hash(spec);\n+\n+\t/*\n+\t * FIXME: Add support for inserting filters of different priorities\n+\t * and removing lower priority multicast filters (bug 42378)\n+\t */\n+\n+\t/*\n+\t * Find any existing filters with the same match tuple or\n+\t * else a free slot to insert at.  If any of them are busy,\n+\t * we have to wait and retry.\n+\t */\n+\tfor (;;) {\n+\t\tins_index = -1;\n+\t\tdepth = 1;\n+\t\tEFSYS_LOCK(enp->en_eslp, state);\n+\t\tlocked = B_TRUE;\n+\n+\t\tfor (;;) {\n+\t\t\ti = (hash + depth) & (EFX_EF10_FILTER_TBL_ROWS - 1);\n+\t\t\tsaved_spec = ef10_filter_entry_spec(eftp, i);\n+\n+\t\t\tif (!saved_spec) {\n+\t\t\t\tif (ins_index < 0) {\n+\t\t\t\t\tins_index = i;\n+\t\t\t\t}\n+\t\t\t} else if (ef10_filter_equal(spec, saved_spec)) {\n+\t\t\t\tif (ef10_filter_entry_is_busy(eftp, i))\n+\t\t\t\t\tbreak;\n+\t\t\t\tif (saved_spec->efs_priority\n+\t\t\t\t\t    == EFX_FILTER_PRI_AUTO) {\n+\t\t\t\t\tins_index = i;\n+\t\t\t\t\tgoto found;\n+\t\t\t\t} else if (ef10_filter_is_exclusive(spec)) {\n+\t\t\t\t\tif (may_replace) {\n+\t\t\t\t\t\tins_index = i;\n+\t\t\t\t\t\tgoto found;\n+\t\t\t\t\t} else {\n+\t\t\t\t\t\trc = EEXIST;\n+\t\t\t\t\t\tgoto fail1;\n+\t\t\t\t\t}\n+\t\t\t\t}\n+\n+\t\t\t\t/* Leave existing */\n+\t\t\t}\n+\n+\t\t\t/*\n+\t\t\t * Once we reach the maximum search depth, use\n+\t\t\t * the first suitable slot or return EBUSY if\n+\t\t\t * there was none.\n+\t\t\t */\n+\t\t\tif (depth == EF10_FILTER_SEARCH_LIMIT) {\n+\t\t\t\tif (ins_index < 0) {\n+\t\t\t\t\trc = EBUSY;\n+\t\t\t\t\tgoto fail2;\n+\t\t\t\t}\n+\t\t\t\tgoto found;\n+\t\t\t}\n+\t\t\tdepth++;\n+\t\t}\n+\t\tEFSYS_UNLOCK(enp->en_eslp, state);\n+\t\tlocked = B_FALSE;\n+\t}\n+\n+found:\n+\t/*\n+\t * Create a software table entry if necessary, and mark it\n+\t * busy.  We might yet fail to insert, but any attempt to\n+\t * insert a conflicting filter while we're waiting for the\n+\t * firmware must find the busy entry.\n+\t */\n+\tsaved_spec = ef10_filter_entry_spec(eftp, ins_index);\n+\tif (saved_spec) {\n+\t\tif (saved_spec->efs_priority == EFX_FILTER_PRI_AUTO) {\n+\t\t\t/* This is a filter we are refreshing */\n+\t\t\tef10_filter_set_entry_not_auto_old(eftp, ins_index);\n+\t\t\tgoto out_unlock;\n+\n+\t\t}\n+\t\treplacing = B_TRUE;\n+\t} else {\n+\t\tEFSYS_KMEM_ALLOC(enp->en_esip, sizeof (*spec), saved_spec);\n+\t\tif (!saved_spec) {\n+\t\t\trc = ENOMEM;\n+\t\t\tgoto fail3;\n+\t\t}\n+\t\t*saved_spec = *spec;\n+\t\tef10_filter_set_entry(eftp, ins_index, saved_spec);\n+\t}\n+\tef10_filter_set_entry_busy(eftp, ins_index);\n+\n+\tEFSYS_UNLOCK(enp->en_eslp, state);\n+\tlocked = B_FALSE;\n+\n+\t/*\n+\t * On replacing the filter handle may change after after a successful\n+\t * replace operation.\n+\t */\n+\tif (replacing) {\n+\t\trc = efx_mcdi_filter_op_add(enp, spec,\n+\t\t    MC_CMD_FILTER_OP_IN_OP_REPLACE,\n+\t\t    &eftp->eft_entry[ins_index].efe_handle);\n+\t} else if (ef10_filter_is_exclusive(spec)) {\n+\t\trc = efx_mcdi_filter_op_add(enp, spec,\n+\t\t    MC_CMD_FILTER_OP_IN_OP_INSERT,\n+\t\t    &eftp->eft_entry[ins_index].efe_handle);\n+\t} else {\n+\t\trc = efx_mcdi_filter_op_add(enp, spec,\n+\t\t    MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE,\n+\t\t    &eftp->eft_entry[ins_index].efe_handle);\n+\t}\n+\n+\tif (rc != 0)\n+\t\tgoto fail4;\n+\n+\tEFSYS_LOCK(enp->en_eslp, state);\n+\tlocked = B_TRUE;\n+\n+\tif (replacing) {\n+\t\t/* Update the fields that may differ */\n+\t\tsaved_spec->efs_priority = spec->efs_priority;\n+\t\tsaved_spec->efs_flags = spec->efs_flags;\n+\t\tsaved_spec->efs_rss_context = spec->efs_rss_context;\n+\t\tsaved_spec->efs_dmaq_id = spec->efs_dmaq_id;\n+\t}\n+\n+\tef10_filter_set_entry_not_busy(eftp, ins_index);\n+\n+out_unlock:\n+\n+\tEFSYS_UNLOCK(enp->en_eslp, state);\n+\tlocked = B_FALSE;\n+\n+\tif (filter_id)\n+\t\t*filter_id = ins_index;\n+\n+\treturn (0);\n+\n+fail4:\n+\tEFSYS_PROBE(fail4);\n+\n+\tif (!replacing) {\n+\t\tEFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), saved_spec);\n+\t\tsaved_spec = NULL;\n+\t}\n+\tef10_filter_set_entry_not_busy(eftp, ins_index);\n+\tef10_filter_set_entry(eftp, ins_index, NULL);\n+\n+fail3:\n+\tEFSYS_PROBE(fail3);\n+\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\tif (locked)\n+\t\tEFSYS_UNLOCK(enp->en_eslp, state);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_filter_add(\n+\t__in\t\tefx_nic_t *enp,\n+\t__inout\t\tefx_filter_spec_t *spec,\n+\t__in\t\tboolean_t may_replace)\n+{\n+\tefx_rc_t rc;\n+\n+\trc = ef10_filter_add_internal(enp, spec, may_replace, NULL);\n+\tif (rc != 0)\n+\t\tgoto fail1;\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\n+static\t__checkReturn\tefx_rc_t\n+ef10_filter_delete_internal(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tuint32_t filter_id)\n+{\n+\tefx_rc_t rc;\n+\tef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;\n+\tefx_filter_spec_t *spec;\n+\tefsys_lock_state_t state;\n+\tuint32_t filter_idx = filter_id % EFX_EF10_FILTER_TBL_ROWS;\n+\n+\t/*\n+\t * Find the software table entry and mark it busy.  Don't\n+\t * remove it yet; any attempt to update while we're waiting\n+\t * for the firmware must find the busy entry.\n+\t *\n+\t * FIXME: What if the busy flag is never cleared?\n+\t */\n+\tEFSYS_LOCK(enp->en_eslp, state);\n+\twhile (ef10_filter_entry_is_busy(table, filter_idx)) {\n+\t\tEFSYS_UNLOCK(enp->en_eslp, state);\n+\t\tEFSYS_SPIN(1);\n+\t\tEFSYS_LOCK(enp->en_eslp, state);\n+\t}\n+\tif ((spec = ef10_filter_entry_spec(table, filter_idx)) != NULL) {\n+\t\tef10_filter_set_entry_busy(table, filter_idx);\n+\t}\n+\tEFSYS_UNLOCK(enp->en_eslp, state);\n+\n+\tif (spec == NULL) {\n+\t\trc = ENOENT;\n+\t\tgoto fail1;\n+\t}\n+\n+\t/*\n+\t * Try to remove the hardware filter. This may fail if the MC has\n+\t * rebooted (which frees all hardware filter resources).\n+\t */\n+\tif (ef10_filter_is_exclusive(spec)) {\n+\t\trc = efx_mcdi_filter_op_delete(enp,\n+\t\t    MC_CMD_FILTER_OP_IN_OP_REMOVE,\n+\t\t    &table->eft_entry[filter_idx].efe_handle);\n+\t} else {\n+\t\trc = efx_mcdi_filter_op_delete(enp,\n+\t\t    MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE,\n+\t\t    &table->eft_entry[filter_idx].efe_handle);\n+\t}\n+\n+\t/* Free the software table entry */\n+\tEFSYS_LOCK(enp->en_eslp, state);\n+\tef10_filter_set_entry_not_busy(table, filter_idx);\n+\tef10_filter_set_entry(table, filter_idx, NULL);\n+\tEFSYS_UNLOCK(enp->en_eslp, state);\n+\n+\tEFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), spec);\n+\n+\t/* Check result of hardware filter removal */\n+\tif (rc != 0)\n+\t\tgoto fail2;\n+\n+\treturn (0);\n+\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_filter_delete(\n+\t__in\t\tefx_nic_t *enp,\n+\t__inout\t\tefx_filter_spec_t *spec)\n+{\n+\tefx_rc_t rc;\n+\tef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;\n+\tefx_filter_spec_t *saved_spec;\n+\tunsigned int hash;\n+\tunsigned int depth;\n+\tunsigned int i;\n+\tefsys_lock_state_t state;\n+\tboolean_t locked = B_FALSE;\n+\n+\tEFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||\n+\t\t    enp->en_family == EFX_FAMILY_MEDFORD);\n+\n+\thash = ef10_filter_hash(spec);\n+\n+\tEFSYS_LOCK(enp->en_eslp, state);\n+\tlocked = B_TRUE;\n+\n+\tdepth = 1;\n+\tfor (;;) {\n+\t\ti = (hash + depth) & (EFX_EF10_FILTER_TBL_ROWS - 1);\n+\t\tsaved_spec = ef10_filter_entry_spec(table, i);\n+\t\tif (saved_spec && ef10_filter_equal(spec, saved_spec) &&\n+\t\t    ef10_filter_same_dest(spec, saved_spec)) {\n+\t\t\tbreak;\n+\t\t}\n+\t\tif (depth == EF10_FILTER_SEARCH_LIMIT) {\n+\t\t\trc = ENOENT;\n+\t\t\tgoto fail1;\n+\t\t}\n+\t\tdepth++;\n+\t}\n+\n+\tEFSYS_UNLOCK(enp->en_eslp, state);\n+\tlocked = B_FALSE;\n+\n+\trc = ef10_filter_delete_internal(enp, i);\n+\tif (rc != 0)\n+\t\tgoto fail2;\n+\n+\treturn (0);\n+\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\tif (locked)\n+\t\tEFSYS_UNLOCK(enp->en_eslp, state);\n+\n+\treturn (rc);\n+}\n+\n+static\t__checkReturn\tefx_rc_t\n+efx_mcdi_get_parser_disp_info(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tuint32_t *list,\n+\t__out\t\tsize_t *length)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_GET_PARSER_DISP_INFO_IN_LEN,\n+\t\t\t    MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX)];\n+\tefx_rc_t rc;\n+\tuint32_t i;\n+\tboolean_t support_unknown_ucast = B_FALSE;\n+\tboolean_t support_unknown_mcast = B_FALSE;\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_GET_PARSER_DISP_INFO;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_GET_PARSER_DISP_INFO_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX;\n+\n+\tMCDI_IN_SET_DWORD(req, GET_PARSER_DISP_INFO_OUT_OP,\n+\t    MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\t*length = MCDI_OUT_DWORD(req,\n+\t    GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES);\n+\n+\tif (req.emr_out_length_used <\n+\t    MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(*length)) {\n+\t\trc = EMSGSIZE;\n+\t\tgoto fail2;\n+\t}\n+\n+\tmemcpy(list,\n+\t    MCDI_OUT2(req,\n+\t    uint32_t,\n+\t    GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES),\n+\t    (*length) * sizeof (uint32_t));\n+\tEFX_STATIC_ASSERT(sizeof (uint32_t) ==\n+\t    MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN);\n+\n+\t/*\n+\t * Remove UNKNOWN UCAST and MCAST flags, and if both are present, change\n+\t * the lower priority one to LOC_MAC_IG.\n+\t */\n+\tfor (i = 0; i < *length; i++) {\n+\t\tif (list[i] & MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN) {\n+\t\t\tlist[i] &=\n+\t\t\t(~MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN);\n+\t\t\tsupport_unknown_ucast = B_TRUE;\n+\t\t}\n+\t\tif (list[i] & MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN) {\n+\t\t\tlist[i] &=\n+\t\t\t(~MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN);\n+\t\t\tsupport_unknown_mcast = B_TRUE;\n+\t\t}\n+\n+\t\tif (support_unknown_ucast && support_unknown_mcast) {\n+\t\t\tlist[i] &= EFX_FILTER_MATCH_LOC_MAC_IG;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\treturn (0);\n+\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_filter_supported_filters(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tuint32_t *list,\n+\t__out\t\tsize_t *length)\n+{\n+\tefx_rc_t rc;\n+\n+\tif ((rc = efx_mcdi_get_parser_disp_info(enp, list, length)) != 0)\n+\t\tgoto fail1;\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+static\t__checkReturn\tefx_rc_t\n+ef10_filter_insert_unicast(\n+\t__in\t\t\t\tefx_nic_t *enp,\n+\t__in_ecount(6)\t\t\tuint8_t const *addr,\n+\t__in\t\t\t\tefx_filter_flags_t filter_flags)\n+{\n+\tef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;\n+\tefx_filter_spec_t spec;\n+\tefx_rc_t rc;\n+\n+\t/* Insert the filter for the local station address */\n+\tefx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,\n+\t    filter_flags,\n+\t    eftp->eft_default_rxq);\n+\tefx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC, addr);\n+\n+\trc = ef10_filter_add_internal(enp, &spec, B_TRUE,\n+\t    &eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]);\n+\tif (rc != 0)\n+\t\tgoto fail1;\n+\n+\teftp->eft_unicst_filter_count++;\n+\tEFSYS_ASSERT(eftp->eft_unicst_filter_count <=\n+\t\t    EFX_EF10_FILTER_UNICAST_FILTERS_MAX);\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\treturn (rc);\n+}\n+\n+static\t__checkReturn\tefx_rc_t\n+ef10_filter_insert_all_unicast(\n+\t__in\t\t\t\tefx_nic_t *enp,\n+\t__in\t\t\t\tefx_filter_flags_t filter_flags)\n+{\n+\tef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;\n+\tefx_filter_spec_t spec;\n+\tefx_rc_t rc;\n+\n+\t/* Insert the unknown unicast filter */\n+\tefx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,\n+\t    filter_flags,\n+\t    eftp->eft_default_rxq);\n+\tefx_filter_spec_set_uc_def(&spec);\n+\trc = ef10_filter_add_internal(enp, &spec, B_TRUE,\n+\t    &eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]);\n+\tif (rc != 0)\n+\t\tgoto fail1;\n+\n+\teftp->eft_unicst_filter_count++;\n+\tEFSYS_ASSERT(eftp->eft_unicst_filter_count <=\n+\t\t    EFX_EF10_FILTER_UNICAST_FILTERS_MAX);\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\treturn (rc);\n+}\n+\n+static\t__checkReturn\tefx_rc_t\n+ef10_filter_insert_multicast_list(\n+\t__in\t\t\t\tefx_nic_t *enp,\n+\t__in\t\t\t\tboolean_t mulcst,\n+\t__in\t\t\t\tboolean_t brdcst,\n+\t__in_ecount(6*count)\t\tuint8_t const *addrs,\n+\t__in\t\t\t\tuint32_t count,\n+\t__in\t\t\t\tefx_filter_flags_t filter_flags,\n+\t__in\t\t\t\tboolean_t rollback)\n+{\n+\tef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;\n+\tefx_filter_spec_t spec;\n+\tuint8_t addr[6];\n+\tuint32_t i;\n+\tuint32_t filter_index;\n+\tuint32_t filter_count;\n+\tefx_rc_t rc;\n+\n+\tif (mulcst == B_FALSE)\n+\t\tcount = 0;\n+\n+\tif (count + (brdcst ? 1 : 0) >\n+\t    EFX_ARRAY_SIZE(eftp->eft_mulcst_filter_indexes)) {\n+\t\t/* Too many MAC addresses */\n+\t\trc = EINVAL;\n+\t\tgoto fail1;\n+\t}\n+\n+\t/* Insert/renew multicast address list filters */\n+\tfilter_count = 0;\n+\tfor (i = 0; i < count; i++) {\n+\t\tefx_filter_spec_init_rx(&spec,\n+\t\t    EFX_FILTER_PRI_AUTO,\n+\t\t    filter_flags,\n+\t\t    eftp->eft_default_rxq);\n+\n+\t\tefx_filter_spec_set_eth_local(&spec,\n+\t\t    EFX_FILTER_SPEC_VID_UNSPEC,\n+\t\t    &addrs[i * EFX_MAC_ADDR_LEN]);\n+\n+\t\trc = ef10_filter_add_internal(enp, &spec, B_TRUE,\n+\t\t\t\t\t    &filter_index);\n+\n+\t\tif (rc == 0) {\n+\t\t\teftp->eft_mulcst_filter_indexes[filter_count] =\n+\t\t\t\tfilter_index;\n+\t\t\tfilter_count++;\n+\t\t} else if (rollback == B_TRUE) {\n+\t\t\t/* Only stop upon failure if told to rollback */\n+\t\t\tgoto rollback;\n+\t\t}\n+\n+\t}\n+\n+\tif (brdcst == B_TRUE) {\n+\t\t/* Insert/renew broadcast address filter */\n+\t\tefx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,\n+\t\t    filter_flags,\n+\t\t    eftp->eft_default_rxq);\n+\n+\t\tEFX_MAC_BROADCAST_ADDR_SET(addr);\n+\t\tefx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC,\n+\t\t    addr);\n+\n+\t\trc = ef10_filter_add_internal(enp, &spec, B_TRUE,\n+\t\t\t\t\t    &filter_index);\n+\n+\t\tif (rc == 0) {\n+\t\t\teftp->eft_mulcst_filter_indexes[filter_count] =\n+\t\t\t\tfilter_index;\n+\t\t\tfilter_count++;\n+\t\t} else if (rollback == B_TRUE) {\n+\t\t\t/* Only stop upon failure if told to rollback */\n+\t\t\tgoto rollback;\n+\t\t}\n+\t}\n+\n+\teftp->eft_mulcst_filter_count = filter_count;\n+\teftp->eft_using_all_mulcst = B_FALSE;\n+\n+\treturn (0);\n+\n+rollback:\n+\t/* Remove any filters we have inserted */\n+\ti = filter_count;\n+\twhile (i--) {\n+\t\t(void) ef10_filter_delete_internal(enp,\n+\t\t    eftp->eft_mulcst_filter_indexes[i]);\n+\t}\n+\teftp->eft_mulcst_filter_count = 0;\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+static\t__checkReturn\tefx_rc_t\n+ef10_filter_insert_all_multicast(\n+\t__in\t\t\t\tefx_nic_t *enp,\n+\t__in\t\t\t\tefx_filter_flags_t filter_flags)\n+{\n+\tef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;\n+\tefx_filter_spec_t spec;\n+\tefx_rc_t rc;\n+\n+\t/* Insert the unknown multicast filter */\n+\tefx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,\n+\t    filter_flags,\n+\t    eftp->eft_default_rxq);\n+\tefx_filter_spec_set_mc_def(&spec);\n+\n+\trc = ef10_filter_add_internal(enp, &spec, B_TRUE,\n+\t    &eftp->eft_mulcst_filter_indexes[0]);\n+\tif (rc != 0)\n+\t\tgoto fail1;\n+\n+\teftp->eft_mulcst_filter_count = 1;\n+\teftp->eft_using_all_mulcst = B_TRUE;\n+\n+\t/*\n+\t * FIXME: If brdcst == B_FALSE, add a filter to drop broadcast traffic.\n+\t */\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+static\t\t\tvoid\n+ef10_filter_remove_old(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\tef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;\n+\tuint32_t i;\n+\n+\tfor (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) {\n+\t\tif (ef10_filter_entry_is_auto_old(table, i)) {\n+\t\t\t(void) ef10_filter_delete_internal(enp, i);\n+\t\t}\n+\t}\n+}\n+\n+\n+static\t__checkReturn\tefx_rc_t\n+ef10_filter_get_workarounds(\n+\t__in\t\t\t\tefx_nic_t *enp)\n+{\n+\tefx_nic_cfg_t *encp = &enp->en_nic_cfg;\n+\tuint32_t implemented = 0;\n+\tuint32_t enabled = 0;\n+\tefx_rc_t rc;\n+\n+\trc = efx_mcdi_get_workarounds(enp, &implemented, &enabled);\n+\tif (rc == 0) {\n+\t\t/* Check if chained multicast filter support is enabled */\n+\t\tif (implemented & enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807)\n+\t\t\tencp->enc_bug26807_workaround = B_TRUE;\n+\t\telse\n+\t\t\tencp->enc_bug26807_workaround = B_FALSE;\n+\t} else if (rc == ENOTSUP) {\n+\t\t/*\n+\t\t * Firmware is too old to support GET_WORKAROUNDS, and support\n+\t\t * for this workaround was implemented later.\n+\t\t */\n+\t\tencp->enc_bug26807_workaround = B_FALSE;\n+\t} else {\n+\t\tgoto fail1;\n+\t}\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+\n+}\n+\n+\n+/*\n+ * Reconfigure all filters.\n+ * If all_unicst and/or all mulcst filters cannot be applied then\n+ * return ENOTSUP (Note the filters for the specified addresses are\n+ * still applied in this case).\n+ */\n+\t__checkReturn\tefx_rc_t\n+ef10_filter_reconfigure(\n+\t__in\t\t\t\tefx_nic_t *enp,\n+\t__in_ecount(6)\t\t\tuint8_t const *mac_addr,\n+\t__in\t\t\t\tboolean_t all_unicst,\n+\t__in\t\t\t\tboolean_t mulcst,\n+\t__in\t\t\t\tboolean_t all_mulcst,\n+\t__in\t\t\t\tboolean_t brdcst,\n+\t__in_ecount(6*count)\t\tuint8_t const *addrs,\n+\t__in\t\t\t\tuint32_t count)\n+{\n+\tefx_nic_cfg_t *encp = &enp->en_nic_cfg;\n+\tef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;\n+\tefx_filter_flags_t filter_flags;\n+\tunsigned int i;\n+\tefx_rc_t all_unicst_rc = 0;\n+\tefx_rc_t all_mulcst_rc = 0;\n+\tefx_rc_t rc;\n+\n+\tif (table->eft_default_rxq == NULL) {\n+\t\t/*\n+\t\t * Filters direct traffic to the default RXQ, and so cannot be\n+\t\t * inserted until it is available. Any currently configured\n+\t\t * filters must be removed (ignore errors in case the MC\n+\t\t * has rebooted, which removes hardware filters).\n+\t\t */\n+\t\tfor (i = 0; i < table->eft_unicst_filter_count; i++) {\n+\t\t\t(void) ef10_filter_delete_internal(enp,\n+\t\t\t\t\ttable->eft_unicst_filter_indexes[i]);\n+\t\t}\n+\t\ttable->eft_unicst_filter_count = 0;\n+\n+\t\tfor (i = 0; i < table->eft_mulcst_filter_count; i++) {\n+\t\t\t(void) ef10_filter_delete_internal(enp,\n+\t\t\t\t\ttable->eft_mulcst_filter_indexes[i]);\n+\t\t}\n+\t\ttable->eft_mulcst_filter_count = 0;\n+\n+\t\treturn (0);\n+\t}\n+\n+\tif (table->eft_using_rss)\n+\t\tfilter_flags = EFX_FILTER_FLAG_RX_RSS;\n+\telse\n+\t\tfilter_flags = 0;\n+\n+\t/* Mark old filters which may need to be removed */\n+\tfor (i = 0; i < table->eft_unicst_filter_count; i++) {\n+\t\tef10_filter_set_entry_auto_old(table,\n+\t\t\t\t\ttable->eft_unicst_filter_indexes[i]);\n+\t}\n+\tfor (i = 0; i < table->eft_mulcst_filter_count; i++) {\n+\t\tef10_filter_set_entry_auto_old(table,\n+\t\t\t\t\ttable->eft_mulcst_filter_indexes[i]);\n+\t}\n+\n+\t/*\n+\t * Insert or renew unicast filters.\n+\t *\n+\t * Frimware does not perform chaining on unicast filters. As traffic is\n+\t * therefore only delivered to the first matching filter, we should\n+\t * always insert the specific filter for our MAC address, to try and\n+\t * ensure we get that traffic.\n+\t *\n+\t * (If the filter for our MAC address has already been inserted by\n+\t * another function, we won't receive traffic sent to us, even if we\n+\t * insert a unicast mismatch filter. To prevent traffic stealing, this\n+\t * therefore relies on the privilege model only allowing functions to\n+\t * insert filters for their own MAC address unless explicitly given\n+\t * additional privileges by the user. This also means that, even on a\n+\t * priviliged function, inserting a unicast mismatch filter may not\n+\t * catch all traffic in multi PCI function scenarios.)\n+\t */\n+\ttable->eft_unicst_filter_count = 0;\n+\trc = ef10_filter_insert_unicast(enp, mac_addr, filter_flags);\n+\tif (all_unicst || (rc != 0)) {\n+\t\tall_unicst_rc = ef10_filter_insert_all_unicast(enp,\n+\t\t\t\t\t\t    filter_flags);\n+\t\tif ((rc != 0) && (all_unicst_rc != 0))\n+\t\t\tgoto fail1;\n+\t}\n+\n+\t/*\n+\t * WORKAROUND_BUG26807 controls firmware support for chained multicast\n+\t * filters, and can only be enabled or disabled when the hardware filter\n+\t * table is empty.\n+\t *\n+\t * Chained multicast filters require support from the datapath firmware,\n+\t * and may not be available (e.g. low-latency variants or old Huntington\n+\t * firmware).\n+\t *\n+\t * Firmware will reset (FLR) functions which have inserted filters in\n+\t * the hardware filter table when the workaround is enabled/disabled.\n+\t * Functions without any hardware filters are not reset.\n+\t *\n+\t * Re-check if the workaround is enabled after adding unicast hardware\n+\t * filters. This ensures that encp->enc_bug26807_workaround matches the\n+\t * firmware state, and that later changes to enable/disable the\n+\t * workaround will result in this function seeing a reset (FLR).\n+\t *\n+\t * In common-code drivers, we only support multiple PCI function\n+\t * scenarios with firmware that supports multicast chaining, so we can\n+\t * assume it is enabled for such cases and hence simplify the filter\n+\t * insertion logic. Firmware that does not support multicast chaining\n+\t * does not support multiple PCI function configurations either, so\n+\t * filter insertion is much simpler and the same strategies can still be\n+\t * used.\n+\t */\n+\tif ((rc = ef10_filter_get_workarounds(enp)) != 0)\n+\t\tgoto fail2;\n+\n+\tif ((table->eft_using_all_mulcst != all_mulcst) &&\n+\t    (encp->enc_bug26807_workaround == B_TRUE)) {\n+\t\t/*\n+\t\t * Multicast filter chaining is enabled, so traffic that matches\n+\t\t * more than one multicast filter will be replicated and\n+\t\t * delivered to multiple recipients.  To avoid this duplicate\n+\t\t * delivery, remove old multicast filters before inserting new\n+\t\t * multicast filters.\n+\t\t */\n+\t\tef10_filter_remove_old(enp);\n+\t}\n+\n+\t/* Insert or renew multicast filters */\n+\tif (all_mulcst == B_TRUE) {\n+\t\t/*\n+\t\t * Insert the all multicast filter. If that fails, try to insert\n+\t\t * all of our multicast filters (but without rollback on\n+\t\t * failure).\n+\t\t */\n+\t\tall_mulcst_rc = ef10_filter_insert_all_multicast(enp,\n+\t\t\t\t\t\t\t    filter_flags);\n+\t\tif (all_mulcst_rc != 0) {\n+\t\t\trc = ef10_filter_insert_multicast_list(enp, B_TRUE,\n+\t\t\t    brdcst, addrs, count, filter_flags, B_FALSE);\n+\t\t\tif (rc != 0)\n+\t\t\t\tgoto fail3;\n+\t\t}\n+\t} else {\n+\t\t/*\n+\t\t * Insert filters for multicast addresses.\n+\t\t * If any insertion fails, then rollback and try to insert the\n+\t\t * all multicast filter instead.\n+\t\t * If that also fails, try to insert all of the multicast\n+\t\t * filters (but without rollback on failure).\n+\t\t */\n+\t\trc = ef10_filter_insert_multicast_list(enp, mulcst, brdcst,\n+\t\t\t    addrs, count, filter_flags, B_TRUE);\n+\t\tif (rc != 0) {\n+\t\t\tif ((table->eft_using_all_mulcst == B_FALSE) &&\n+\t\t\t    (encp->enc_bug26807_workaround == B_TRUE)) {\n+\t\t\t\t/*\n+\t\t\t\t * Multicast filter chaining is on, so remove\n+\t\t\t\t * old filters before inserting the multicast\n+\t\t\t\t * all filter to avoid duplicate delivery caused\n+\t\t\t\t * by packets matching multiple filters.\n+\t\t\t\t */\n+\t\t\t\tef10_filter_remove_old(enp);\n+\t\t\t}\n+\n+\t\t\trc = ef10_filter_insert_all_multicast(enp,\n+\t\t\t\t\t\t\t    filter_flags);\n+\t\t\tif (rc != 0) {\n+\t\t\t\trc = ef10_filter_insert_multicast_list(enp,\n+\t\t\t\t    mulcst, brdcst,\n+\t\t\t\t    addrs, count, filter_flags, B_FALSE);\n+\t\t\t\tif (rc != 0)\n+\t\t\t\t\tgoto fail4;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\t/* Remove old filters which were not renewed */\n+\tef10_filter_remove_old(enp);\n+\n+\t/* report if any optional flags were rejected */\n+\tif (((all_unicst != B_FALSE) && (all_unicst_rc != 0)) ||\n+\t    ((all_mulcst != B_FALSE) && (all_mulcst_rc != 0))) {\n+\t\trc = ENOTSUP;\n+\t}\n+\n+\treturn (rc);\n+\n+fail4:\n+\tEFSYS_PROBE(fail4);\n+fail3:\n+\tEFSYS_PROBE(fail3);\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\t/* Clear auto old flags */\n+\tfor (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) {\n+\t\tif (ef10_filter_entry_is_auto_old(table, i)) {\n+\t\t\tef10_filter_set_entry_not_auto_old(table, i);\n+\t\t}\n+\t}\n+\n+\treturn (rc);\n+}\n+\n+\t\tvoid\n+ef10_filter_get_default_rxq(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tefx_rxq_t **erpp,\n+\t__out\t\tboolean_t *using_rss)\n+{\n+\tef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;\n+\n+\t*erpp = table->eft_default_rxq;\n+\t*using_rss = table->eft_using_rss;\n+}\n+\n+\n+\t\tvoid\n+ef10_filter_default_rxq_set(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tefx_rxq_t *erp,\n+\t__in\t\tboolean_t using_rss)\n+{\n+\tef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;\n+\n+\tEFSYS_ASSERT(using_rss == B_FALSE);\n+\ttable->eft_using_rss = B_FALSE;\n+\ttable->eft_default_rxq = erp;\n+}\n+\n+\t\tvoid\n+ef10_filter_default_rxq_clear(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\tef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;\n+\n+\ttable->eft_default_rxq = NULL;\n+\ttable->eft_using_rss = B_FALSE;\n+}\n+\n+\n+#endif /* EFSYS_OPT_FILTER */\n+\n+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */\ndiff --git a/drivers/net/sfc/base/ef10_impl.h b/drivers/net/sfc/base/ef10_impl.h\nnew file mode 100644\nindex 0000000..b901ccc\n--- /dev/null\n+++ b/drivers/net/sfc/base/ef10_impl.h\n@@ -0,0 +1,718 @@\n+/*\n+ * Copyright (c) 2015-2016 Solarflare Communications Inc.\n+ * All rights reserved.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions are met:\n+ *\n+ * 1. Redistributions of source code must retain the above copyright notice,\n+ *    this list of conditions and the following disclaimer.\n+ * 2. Redistributions in binary form must reproduce the above copyright notice,\n+ *    this list of conditions and the following disclaimer in the documentation\n+ *    and/or other materials provided with the distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ *\n+ * The views and conclusions contained in the software and documentation are\n+ * those of the authors and should not be interpreted as representing official\n+ * policies, either expressed or implied, of the FreeBSD Project.\n+ */\n+\n+#ifndef\t_SYS_EF10_IMPL_H\n+#define\t_SYS_EF10_IMPL_H\n+\n+#ifdef\t__cplusplus\n+extern \"C\" {\n+#endif\n+\n+#if   EFSYS_OPT_HUNTINGTON\n+#define\tEF10_MAX_PIOBUF_NBUFS\tHUNT_PIOBUF_NBUFS\n+#endif\n+\n+/*\n+ * FIXME: This is just a power of 2 which fits in an MCDI v1 message, and could\n+ * possibly be increased, or the write size reported by newer firmware used\n+ * instead.\n+ */\n+#define\tEF10_NVRAM_CHUNK 0x80\n+\n+/* Alignment requirement for value written to RX WPTR:\n+ *  the WPTR must be aligned to an 8 descriptor boundary\n+ */\n+#define\tEF10_RX_WPTR_ALIGN 8\n+\n+/*\n+ * Max byte offset into the packet the TCP header must start for the hardware\n+ * to be able to parse the packet correctly.\n+ */\n+#define\tEF10_TCP_HEADER_OFFSET_LIMIT\t208\n+\n+/* Invalid RSS context handle */\n+#define\tEF10_RSS_CONTEXT_INVALID\t(0xffffffff)\n+\n+\n+/* EV */\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_ev_init(\n+\t__in\t\tefx_nic_t *enp);\n+\n+\t\t\tvoid\n+ef10_ev_fini(\n+\t__in\t\tefx_nic_t *enp);\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_ev_qcreate(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tunsigned int index,\n+\t__in\t\tefsys_mem_t *esmp,\n+\t__in\t\tsize_t n,\n+\t__in\t\tuint32_t id,\n+\t__in\t\tuint32_t us,\n+\t__in\t\tuint32_t flags,\n+\t__in\t\tefx_evq_t *eep);\n+\n+\t\t\tvoid\n+ef10_ev_qdestroy(\n+\t__in\t\tefx_evq_t *eep);\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_ev_qprime(\n+\t__in\t\tefx_evq_t *eep,\n+\t__in\t\tunsigned int count);\n+\n+\t\t\tvoid\n+ef10_ev_qpost(\n+\t__in\tefx_evq_t *eep,\n+\t__in\tuint16_t data);\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_ev_qmoderate(\n+\t__in\t\tefx_evq_t *eep,\n+\t__in\t\tunsigned int us);\n+\n+\t\tvoid\n+ef10_ev_rxlabel_init(\n+\t__in\t\tefx_evq_t *eep,\n+\t__in\t\tefx_rxq_t *erp,\n+\t__in\t\tunsigned int label,\n+\t__in\t\tboolean_t packed_stream);\n+\n+\t\tvoid\n+ef10_ev_rxlabel_fini(\n+\t__in\t\tefx_evq_t *eep,\n+\t__in\t\tunsigned int label);\n+\n+/* INTR */\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_intr_init(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tefx_intr_type_t type,\n+\t__in\t\tefsys_mem_t *esmp);\n+\n+\t\t\tvoid\n+ef10_intr_enable(\n+\t__in\t\tefx_nic_t *enp);\n+\n+\t\t\tvoid\n+ef10_intr_disable(\n+\t__in\t\tefx_nic_t *enp);\n+\n+\t\t\tvoid\n+ef10_intr_disable_unlocked(\n+\t__in\t\tefx_nic_t *enp);\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_intr_trigger(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tunsigned int level);\n+\n+\t\t\tvoid\n+ef10_intr_status_line(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tboolean_t *fatalp,\n+\t__out\t\tuint32_t *qmaskp);\n+\n+\t\t\tvoid\n+ef10_intr_status_message(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tunsigned int message,\n+\t__out\t\tboolean_t *fatalp);\n+\n+\t\t\tvoid\n+ef10_intr_fatal(\n+\t__in\t\tefx_nic_t *enp);\n+\t\t\tvoid\n+ef10_intr_fini(\n+\t__in\t\tefx_nic_t *enp);\n+\n+/* NIC */\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_nic_probe(\n+\t__in\t\tefx_nic_t *enp);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_nic_set_drv_limits(\n+\t__inout\t\tefx_nic_t *enp,\n+\t__in\t\tefx_drv_limits_t *edlp);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_nic_get_vi_pool(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tuint32_t *vi_countp);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_nic_get_bar_region(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tefx_nic_region_t region,\n+\t__out\t\tuint32_t *offsetp,\n+\t__out\t\tsize_t *sizep);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_nic_reset(\n+\t__in\t\tefx_nic_t *enp);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_nic_init(\n+\t__in\t\tefx_nic_t *enp);\n+\n+extern\t\t\tvoid\n+ef10_nic_fini(\n+\t__in\t\tefx_nic_t *enp);\n+\n+extern\t\t\tvoid\n+ef10_nic_unprobe(\n+\t__in\t\tefx_nic_t *enp);\n+\n+\n+/* MAC */\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_mac_poll(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tefx_link_mode_t *link_modep);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_mac_up(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tboolean_t *mac_upp);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_mac_addr_set(\n+\t__in\tefx_nic_t *enp);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_mac_pdu_set(\n+\t__in\tefx_nic_t *enp);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_mac_pdu_get(\n+\t__in\tefx_nic_t *enp,\n+\t__out\tsize_t *pdu);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_mac_reconfigure(\n+\t__in\tefx_nic_t *enp);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_mac_multicast_list_set(\n+\t__in\t\t\t\tefx_nic_t *enp);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_mac_filter_default_rxq_set(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tefx_rxq_t *erp,\n+\t__in\t\tboolean_t using_rss);\n+\n+extern\t\t\tvoid\n+ef10_mac_filter_default_rxq_clear(\n+\t__in\t\tefx_nic_t *enp);\n+\n+\n+/* MCDI */\n+\n+#if EFSYS_OPT_MCDI\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_mcdi_init(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tconst efx_mcdi_transport_t *mtp);\n+\n+extern\t\t\tvoid\n+ef10_mcdi_fini(\n+\t__in\t\tefx_nic_t *enp);\n+\n+extern\t\t\tvoid\n+ef10_mcdi_send_request(\n+\t__in\t\t\tefx_nic_t *enp,\n+\t__in_bcount(hdr_len)\tvoid *hdrp,\n+\t__in\t\t\tsize_t hdr_len,\n+\t__in_bcount(sdu_len)\tvoid *sdup,\n+\t__in\t\t\tsize_t sdu_len);\n+\n+extern\t__checkReturn\tboolean_t\n+ef10_mcdi_poll_response(\n+\t__in\t\tefx_nic_t *enp);\n+\n+extern\t\t\tvoid\n+ef10_mcdi_read_response(\n+\t__in\t\t\tefx_nic_t *enp,\n+\t__out_bcount(length)\tvoid *bufferp,\n+\t__in\t\t\tsize_t offset,\n+\t__in\t\t\tsize_t length);\n+\n+extern\t\t\tefx_rc_t\n+ef10_mcdi_poll_reboot(\n+\t__in\t\tefx_nic_t *enp);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_mcdi_feature_supported(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tefx_mcdi_feature_id_t id,\n+\t__out\t\tboolean_t *supportedp);\n+\n+extern\t\t\tvoid\n+ef10_mcdi_get_timeout(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tefx_mcdi_req_t *emrp,\n+\t__out\t\tuint32_t *timeoutp);\n+\n+#endif /* EFSYS_OPT_MCDI */\n+\n+/* NVRAM */\n+\n+\n+/* PHY */\n+\n+typedef struct ef10_link_state_s {\n+\tuint32_t\t\tels_adv_cap_mask;\n+\tuint32_t\t\tels_lp_cap_mask;\n+\tunsigned int\t\tels_fcntl;\n+\tefx_link_mode_t\t\tels_link_mode;\n+\tboolean_t\t\tels_mac_up;\n+} ef10_link_state_t;\n+\n+extern\t\t\tvoid\n+ef10_phy_link_ev(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tefx_qword_t *eqp,\n+\t__out\t\tefx_link_mode_t *link_modep);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_phy_get_link(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tef10_link_state_t *elsp);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_phy_power(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tboolean_t on);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_phy_reconfigure(\n+\t__in\t\tefx_nic_t *enp);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_phy_verify(\n+\t__in\t\tefx_nic_t *enp);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_phy_oui_get(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tuint32_t *ouip);\n+\n+/* TX */\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_tx_init(\n+\t__in\t\tefx_nic_t *enp);\n+\n+extern\t\t\tvoid\n+ef10_tx_fini(\n+\t__in\t\tefx_nic_t *enp);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_tx_qcreate(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tunsigned int index,\n+\t__in\t\tunsigned int label,\n+\t__in\t\tefsys_mem_t *esmp,\n+\t__in\t\tsize_t n,\n+\t__in\t\tuint32_t id,\n+\t__in\t\tuint16_t flags,\n+\t__in\t\tefx_evq_t *eep,\n+\t__in\t\tefx_txq_t *etp,\n+\t__out\t\tunsigned int *addedp);\n+\n+extern\t\tvoid\n+ef10_tx_qdestroy(\n+\t__in\t\tefx_txq_t *etp);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_tx_qpost(\n+\t__in\t\tefx_txq_t *etp,\n+\t__in_ecount(n)\tefx_buffer_t *eb,\n+\t__in\t\tunsigned int n,\n+\t__in\t\tunsigned int completed,\n+\t__inout\t\tunsigned int *addedp);\n+\n+extern\t\t\tvoid\n+ef10_tx_qpush(\n+\t__in\t\tefx_txq_t *etp,\n+\t__in\t\tunsigned int added,\n+\t__in\t\tunsigned int pushed);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_tx_qpace(\n+\t__in\t\tefx_txq_t *etp,\n+\t__in\t\tunsigned int ns);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_tx_qflush(\n+\t__in\t\tefx_txq_t *etp);\n+\n+extern\t\t\tvoid\n+ef10_tx_qenable(\n+\t__in\t\tefx_txq_t *etp);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_tx_qpio_enable(\n+\t__in\t\tefx_txq_t *etp);\n+\n+extern\t\t\tvoid\n+ef10_tx_qpio_disable(\n+\t__in\t\tefx_txq_t *etp);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_tx_qpio_write(\n+\t__in\t\t\tefx_txq_t *etp,\n+\t__in_ecount(buf_length)\tuint8_t *buffer,\n+\t__in\t\t\tsize_t buf_length,\n+\t__in\t\t\tsize_t pio_buf_offset);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_tx_qpio_post(\n+\t__in\t\t\tefx_txq_t *etp,\n+\t__in\t\t\tsize_t pkt_length,\n+\t__in\t\t\tunsigned int completed,\n+\t__inout\t\t\tunsigned int *addedp);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_tx_qdesc_post(\n+\t__in\t\tefx_txq_t *etp,\n+\t__in_ecount(n)\tefx_desc_t *ed,\n+\t__in\t\tunsigned int n,\n+\t__in\t\tunsigned int completed,\n+\t__inout\t\tunsigned int *addedp);\n+\n+extern\tvoid\n+ef10_tx_qdesc_dma_create(\n+\t__in\tefx_txq_t *etp,\n+\t__in\tefsys_dma_addr_t addr,\n+\t__in\tsize_t size,\n+\t__in\tboolean_t eop,\n+\t__out\tefx_desc_t *edp);\n+\n+extern\tvoid\n+ef10_tx_qdesc_tso_create(\n+\t__in\tefx_txq_t *etp,\n+\t__in\tuint16_t ipv4_id,\n+\t__in\tuint32_t tcp_seq,\n+\t__in\tuint8_t\t tcp_flags,\n+\t__out\tefx_desc_t *edp);\n+\n+extern\tvoid\n+ef10_tx_qdesc_tso2_create(\n+\t__in\t\t\tefx_txq_t *etp,\n+\t__in\t\t\tuint16_t ipv4_id,\n+\t__in\t\t\tuint32_t tcp_seq,\n+\t__in\t\t\tuint16_t tcp_mss,\n+\t__out_ecount(count)\tefx_desc_t *edp,\n+\t__in\t\t\tint count);\n+\n+extern\tvoid\n+ef10_tx_qdesc_vlantci_create(\n+\t__in\tefx_txq_t *etp,\n+\t__in\tuint16_t vlan_tci,\n+\t__out\tefx_desc_t *edp);\n+\n+\n+typedef uint32_t\tefx_piobuf_handle_t;\n+\n+#define\tEFX_PIOBUF_HANDLE_INVALID\t((efx_piobuf_handle_t) -1)\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_nic_pio_alloc(\n+\t__inout\t\tefx_nic_t *enp,\n+\t__out\t\tuint32_t *bufnump,\n+\t__out\t\tefx_piobuf_handle_t *handlep,\n+\t__out\t\tuint32_t *blknump,\n+\t__out\t\tuint32_t *offsetp,\n+\t__out\t\tsize_t *sizep);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_nic_pio_free(\n+\t__inout\t\tefx_nic_t *enp,\n+\t__in\t\tuint32_t bufnum,\n+\t__in\t\tuint32_t blknum);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_nic_pio_link(\n+\t__inout\t\tefx_nic_t *enp,\n+\t__in\t\tuint32_t vi_index,\n+\t__in\t\tefx_piobuf_handle_t handle);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_nic_pio_unlink(\n+\t__inout\t\tefx_nic_t *enp,\n+\t__in\t\tuint32_t vi_index);\n+\n+\n+/* VPD */\n+\n+\n+/* RX */\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_rx_init(\n+\t__in\t\tefx_nic_t *enp);\n+\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_rx_prefix_pktlen(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tuint8_t *buffer,\n+\t__out\t\tuint16_t *lengthp);\n+\n+extern\t\t\tvoid\n+ef10_rx_qpost(\n+\t__in\t\tefx_rxq_t *erp,\n+\t__in_ecount(n)\tefsys_dma_addr_t *addrp,\n+\t__in\t\tsize_t size,\n+\t__in\t\tunsigned int n,\n+\t__in\t\tunsigned int completed,\n+\t__in\t\tunsigned int added);\n+\n+extern\t\t\tvoid\n+ef10_rx_qpush(\n+\t__in\t\tefx_rxq_t *erp,\n+\t__in\t\tunsigned int added,\n+\t__inout\t\tunsigned int *pushedp);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_rx_qflush(\n+\t__in\t\tefx_rxq_t *erp);\n+\n+extern\t\tvoid\n+ef10_rx_qenable(\n+\t__in\t\tefx_rxq_t *erp);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_rx_qcreate(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tunsigned int index,\n+\t__in\t\tunsigned int label,\n+\t__in\t\tefx_rxq_type_t type,\n+\t__in\t\tefsys_mem_t *esmp,\n+\t__in\t\tsize_t n,\n+\t__in\t\tuint32_t id,\n+\t__in\t\tefx_evq_t *eep,\n+\t__in\t\tefx_rxq_t *erp);\n+\n+extern\t\t\tvoid\n+ef10_rx_qdestroy(\n+\t__in\t\tefx_rxq_t *erp);\n+\n+extern\t\t\tvoid\n+ef10_rx_fini(\n+\t__in\t\tefx_nic_t *enp);\n+\n+#if EFSYS_OPT_FILTER\n+\n+typedef struct ef10_filter_handle_s {\n+\tuint32_t\tefh_lo;\n+\tuint32_t\tefh_hi;\n+} ef10_filter_handle_t;\n+\n+typedef struct ef10_filter_entry_s {\n+\tuintptr_t efe_spec; /* pointer to filter spec plus busy bit */\n+\tef10_filter_handle_t efe_handle;\n+} ef10_filter_entry_t;\n+\n+/*\n+ * BUSY flag indicates that an update is in progress.\n+ * AUTO_OLD flag is used to mark and sweep MAC packet filters.\n+ */\n+#define\tEFX_EF10_FILTER_FLAG_BUSY\t1U\n+#define\tEFX_EF10_FILTER_FLAG_AUTO_OLD\t2U\n+#define\tEFX_EF10_FILTER_FLAGS\t\t3U\n+\n+/*\n+ * Size of the hash table used by the driver. Doesn't need to be the\n+ * same size as the hardware's table.\n+ */\n+#define\tEFX_EF10_FILTER_TBL_ROWS 8192\n+\n+/* Only need to allow for one directed and one unknown unicast filter */\n+#define\tEFX_EF10_FILTER_UNICAST_FILTERS_MAX\t2\n+\n+/* Allow for the broadcast address to be added to the multicast list */\n+#define\tEFX_EF10_FILTER_MULTICAST_FILTERS_MAX\t(EFX_MAC_MULTICAST_LIST_MAX + 1)\n+\n+typedef struct ef10_filter_table_s {\n+\tef10_filter_entry_t\teft_entry[EFX_EF10_FILTER_TBL_ROWS];\n+\tefx_rxq_t\t\t*eft_default_rxq;\n+\tboolean_t\t\teft_using_rss;\n+\tuint32_t\t\teft_unicst_filter_indexes[\n+\t    EFX_EF10_FILTER_UNICAST_FILTERS_MAX];\n+\tuint32_t\t\teft_unicst_filter_count;\n+\tuint32_t\t\teft_mulcst_filter_indexes[\n+\t    EFX_EF10_FILTER_MULTICAST_FILTERS_MAX];\n+\tuint32_t\t\teft_mulcst_filter_count;\n+\tboolean_t\t\teft_using_all_mulcst;\n+} ef10_filter_table_t;\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_filter_init(\n+\t__in\t\tefx_nic_t *enp);\n+\n+\t\t\tvoid\n+ef10_filter_fini(\n+\t__in\t\tefx_nic_t *enp);\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_filter_restore(\n+\t__in\t\tefx_nic_t *enp);\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_filter_add(\n+\t__in\t\tefx_nic_t *enp,\n+\t__inout\t\tefx_filter_spec_t *spec,\n+\t__in\t\tboolean_t may_replace);\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_filter_delete(\n+\t__in\t\tefx_nic_t *enp,\n+\t__inout\t\tefx_filter_spec_t *spec);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_filter_supported_filters(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tuint32_t *list,\n+\t__out\t\tsize_t *length);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_filter_reconfigure(\n+\t__in\t\t\t\tefx_nic_t *enp,\n+\t__in_ecount(6)\t\t\tuint8_t const *mac_addr,\n+\t__in\t\t\t\tboolean_t all_unicst,\n+\t__in\t\t\t\tboolean_t mulcst,\n+\t__in\t\t\t\tboolean_t all_mulcst,\n+\t__in\t\t\t\tboolean_t brdcst,\n+\t__in_ecount(6*count)\t\tuint8_t const *addrs,\n+\t__in\t\t\t\tuint32_t count);\n+\n+extern\t\tvoid\n+ef10_filter_get_default_rxq(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tefx_rxq_t **erpp,\n+\t__out\t\tboolean_t *using_rss);\n+\n+extern\t\tvoid\n+ef10_filter_default_rxq_set(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tefx_rxq_t *erp,\n+\t__in\t\tboolean_t using_rss);\n+\n+extern\t\tvoid\n+ef10_filter_default_rxq_clear(\n+\t__in\t\tefx_nic_t *enp);\n+\n+\n+#endif /* EFSYS_OPT_FILTER */\n+\n+extern\t__checkReturn\t\t\tefx_rc_t\n+efx_mcdi_get_function_info(\n+\t__in\t\t\t\tefx_nic_t *enp,\n+\t__out\t\t\t\tuint32_t *pfp,\n+\t__out_opt\t\t\tuint32_t *vfp);\n+\n+extern\t__checkReturn\t\tefx_rc_t\n+efx_mcdi_privilege_mask(\n+\t__in\t\t\tefx_nic_t *enp,\n+\t__in\t\t\tuint32_t pf,\n+\t__in\t\t\tuint32_t vf,\n+\t__out\t\t\tuint32_t *maskp);\n+\n+extern\t__checkReturn\tefx_rc_t\n+efx_mcdi_get_port_assignment(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tuint32_t *portp);\n+\n+extern\t__checkReturn\tefx_rc_t\n+efx_mcdi_get_port_modes(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tuint32_t *modesp,\n+\t__out_opt\tuint32_t *current_modep);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_nic_get_port_mode_bandwidth(\n+\t__in\t\tuint32_t port_mode,\n+\t__out\t\tuint32_t *bandwidth_mbpsp);\n+\n+extern\t__checkReturn\tefx_rc_t\n+efx_mcdi_get_mac_address_pf(\n+\t__in\t\t\tefx_nic_t *enp,\n+\t__out_ecount_opt(6)\tuint8_t mac_addrp[6]);\n+\n+extern\t__checkReturn\tefx_rc_t\n+efx_mcdi_get_mac_address_vf(\n+\t__in\t\t\tefx_nic_t *enp,\n+\t__out_ecount_opt(6)\tuint8_t mac_addrp[6]);\n+\n+extern\t__checkReturn\tefx_rc_t\n+efx_mcdi_get_clock(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tuint32_t *sys_freqp,\n+\t__out\t\tuint32_t *dpcpu_freqp);\n+\n+\n+extern\t__checkReturn\tefx_rc_t\n+efx_mcdi_get_vector_cfg(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out_opt\tuint32_t *vec_basep,\n+\t__out_opt\tuint32_t *pf_nvecp,\n+\t__out_opt\tuint32_t *vf_nvecp);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_get_datapath_caps(\n+\t__in\t\tefx_nic_t *enp);\n+\n+extern\t__checkReturn\t\tefx_rc_t\n+ef10_get_privilege_mask(\n+\t__in\t\t\tefx_nic_t *enp,\n+\t__out\t\t\tuint32_t *maskp);\n+\n+extern\t__checkReturn\tefx_rc_t\n+ef10_external_port_mapping(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tuint32_t port,\n+\t__out\t\tuint8_t *external_portp);\n+\n+#ifdef\t__cplusplus\n+}\n+#endif\n+\n+#endif\t/* _SYS_EF10_IMPL_H */\ndiff --git a/drivers/net/sfc/base/ef10_intr.c b/drivers/net/sfc/base/ef10_intr.c\nnew file mode 100644\nindex 0000000..16be3d8\n--- /dev/null\n+++ b/drivers/net/sfc/base/ef10_intr.c\n@@ -0,0 +1,197 @@\n+/*\n+ * Copyright (c) 2012-2016 Solarflare Communications Inc.\n+ * All rights reserved.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions are met:\n+ *\n+ * 1. Redistributions of source code must retain the above copyright notice,\n+ *    this list of conditions and the following disclaimer.\n+ * 2. Redistributions in binary form must reproduce the above copyright notice,\n+ *    this list of conditions and the following disclaimer in the documentation\n+ *    and/or other materials provided with the distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ *\n+ * The views and conclusions contained in the software and documentation are\n+ * those of the authors and should not be interpreted as representing official\n+ * policies, either expressed or implied, of the FreeBSD Project.\n+ */\n+\n+#include \"efx.h\"\n+#include \"efx_impl.h\"\n+\n+\n+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_intr_init(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tefx_intr_type_t type,\n+\t__in\t\tefsys_mem_t *esmp)\n+{\n+\t_NOTE(ARGUNUSED(enp, type, esmp))\n+\treturn (0);\n+}\n+\n+\n+\t\t\tvoid\n+ef10_intr_enable(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\t_NOTE(ARGUNUSED(enp))\n+}\n+\n+\n+\t\t\tvoid\n+ef10_intr_disable(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\t_NOTE(ARGUNUSED(enp))\n+}\n+\n+\n+\t\t\tvoid\n+ef10_intr_disable_unlocked(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\t_NOTE(ARGUNUSED(enp))\n+}\n+\n+\n+static\t__checkReturn\tefx_rc_t\n+efx_mcdi_trigger_interrupt(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tunsigned int level)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_TRIGGER_INTERRUPT_IN_LEN,\n+\t\t\t    MC_CMD_TRIGGER_INTERRUPT_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\tEFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||\n+\t\t    enp->en_family == EFX_FAMILY_MEDFORD);\n+\n+\tif (level >= enp->en_nic_cfg.enc_intr_limit) {\n+\t\trc = EINVAL;\n+\t\tgoto fail1;\n+\t}\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_TRIGGER_INTERRUPT;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_TRIGGER_INTERRUPT_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_TRIGGER_INTERRUPT_OUT_LEN;\n+\n+\tMCDI_IN_SET_DWORD(req, TRIGGER_INTERRUPT_IN_INTR_LEVEL, level);\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail2;\n+\t}\n+\n+\treturn (0);\n+\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_intr_trigger(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tunsigned int level)\n+{\n+\tefx_nic_cfg_t *encp = &(enp->en_nic_cfg);\n+\tefx_rc_t rc;\n+\n+\tif (encp->enc_bug41750_workaround) {\n+\t\t/*\n+\t\t * bug 41750: Test interrupts don't work on Greenport\n+\t\t * bug 50084: Test interrupts don't work on VFs\n+\t\t */\n+\t\trc = ENOTSUP;\n+\t\tgoto fail1;\n+\t}\n+\n+\tif ((rc = efx_mcdi_trigger_interrupt(enp, level)) != 0)\n+\t\tgoto fail2;\n+\n+\treturn (0);\n+\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t\t\tvoid\n+ef10_intr_status_line(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tboolean_t *fatalp,\n+\t__out\t\tuint32_t *qmaskp)\n+{\n+\tefx_dword_t dword;\n+\n+\tEFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||\n+\t\t    enp->en_family == EFX_FAMILY_MEDFORD);\n+\n+\t/* Read the queue mask and implicitly acknowledge the interrupt. */\n+\tEFX_BAR_READD(enp, ER_DZ_BIU_INT_ISR_REG, &dword, B_FALSE);\n+\t*qmaskp = EFX_DWORD_FIELD(dword, EFX_DWORD_0);\n+\n+\tEFSYS_PROBE1(qmask, uint32_t, *qmaskp);\n+\n+\t*fatalp = B_FALSE;\n+}\n+\n+\t\t\tvoid\n+ef10_intr_status_message(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tunsigned int message,\n+\t__out\t\tboolean_t *fatalp)\n+{\n+\tEFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||\n+\t\t    enp->en_family == EFX_FAMILY_MEDFORD);\n+\n+\t_NOTE(ARGUNUSED(enp, message))\n+\n+\t/* EF10 fatal errors are reported via events */\n+\t*fatalp = B_FALSE;\n+}\n+\n+\t\t\tvoid\n+ef10_intr_fatal(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\t/* EF10 fatal errors are reported via events */\n+\t_NOTE(ARGUNUSED(enp))\n+}\n+\n+\t\t\tvoid\n+ef10_intr_fini(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\t_NOTE(ARGUNUSED(enp))\n+}\n+\n+#endif\t/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */\ndiff --git a/drivers/net/sfc/base/ef10_mac.c b/drivers/net/sfc/base/ef10_mac.c\nnew file mode 100644\nindex 0000000..7960067\n--- /dev/null\n+++ b/drivers/net/sfc/base/ef10_mac.c\n@@ -0,0 +1,446 @@\n+/*\n+ * Copyright (c) 2012-2016 Solarflare Communications Inc.\n+ * All rights reserved.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions are met:\n+ *\n+ * 1. Redistributions of source code must retain the above copyright notice,\n+ *    this list of conditions and the following disclaimer.\n+ * 2. Redistributions in binary form must reproduce the above copyright notice,\n+ *    this list of conditions and the following disclaimer in the documentation\n+ *    and/or other materials provided with the distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ *\n+ * The views and conclusions contained in the software and documentation are\n+ * those of the authors and should not be interpreted as representing official\n+ * policies, either expressed or implied, of the FreeBSD Project.\n+ */\n+\n+#include \"efx.h\"\n+#include \"efx_impl.h\"\n+\n+\n+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_mac_poll(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tefx_link_mode_t *link_modep)\n+{\n+\tefx_port_t *epp = &(enp->en_port);\n+\tef10_link_state_t els;\n+\tefx_rc_t rc;\n+\n+\tif ((rc = ef10_phy_get_link(enp, &els)) != 0)\n+\t\tgoto fail1;\n+\n+\tepp->ep_adv_cap_mask = els.els_adv_cap_mask;\n+\tepp->ep_fcntl = els.els_fcntl;\n+\n+\t*link_modep = els.els_link_mode;\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\t*link_modep = EFX_LINK_UNKNOWN;\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_mac_up(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tboolean_t *mac_upp)\n+{\n+\tef10_link_state_t els;\n+\tefx_rc_t rc;\n+\n+\t/*\n+\t * Because EF10 doesn't *require* polling, we can't rely on\n+\t * ef10_mac_poll() being executed to populate epp->ep_mac_up.\n+\t */\n+\tif ((rc = ef10_phy_get_link(enp, &els)) != 0)\n+\t\tgoto fail1;\n+\n+\t*mac_upp = els.els_mac_up;\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+/*\n+ * EF10 adapters use MC_CMD_VADAPTOR_SET_MAC to set the\n+ * MAC address; the address field in MC_CMD_SET_MAC has no\n+ * effect.\n+ * MC_CMD_VADAPTOR_SET_MAC requires mac-spoofing privilege and\n+ * the port to have no filters or queues active.\n+ */\n+static\t__checkReturn\tefx_rc_t\n+efx_mcdi_vadapter_set_mac(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\tefx_port_t *epp = &(enp->en_port);\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_VADAPTOR_SET_MAC_IN_LEN,\n+\t\t\t    MC_CMD_VADAPTOR_SET_MAC_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_VADAPTOR_SET_MAC;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_VADAPTOR_SET_MAC_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_VADAPTOR_SET_MAC_OUT_LEN;\n+\n+\tMCDI_IN_SET_DWORD(req, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,\n+\t    enp->en_vport_id);\n+\tEFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, VADAPTOR_SET_MAC_IN_MACADDR),\n+\t    epp->ep_mac_addr);\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_mac_addr_set(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\tefx_rc_t rc;\n+\n+\tif ((rc = efx_mcdi_vadapter_set_mac(enp)) != 0) {\n+\t\tif (rc != ENOTSUP)\n+\t\t\tgoto fail1;\n+\n+\t\t/*\n+\t\t * Fallback for older Huntington firmware without Vadapter\n+\t\t * support.\n+\t\t */\n+\t\tif ((rc = ef10_mac_reconfigure(enp)) != 0)\n+\t\t\tgoto fail2;\n+\t}\n+\n+\treturn (0);\n+\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+static\t__checkReturn\tefx_rc_t\n+efx_mcdi_mtu_set(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tuint32_t mtu)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_SET_MAC_EXT_IN_LEN,\n+\t\t\t    MC_CMD_SET_MAC_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_SET_MAC;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_SET_MAC_EXT_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_SET_MAC_OUT_LEN;\n+\n+\t/* Only configure the MTU in this call to MC_CMD_SET_MAC */\n+\tMCDI_IN_SET_DWORD(req, SET_MAC_EXT_IN_MTU, mtu);\n+\tMCDI_IN_POPULATE_DWORD_1(req, SET_MAC_EXT_IN_CONTROL,\n+\t\t\t    SET_MAC_EXT_IN_CFG_MTU, 1);\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+static\t__checkReturn\t\tefx_rc_t\n+efx_mcdi_mtu_get(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tsize_t *mtu)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_SET_MAC_EXT_IN_LEN,\n+\t\t\t    MC_CMD_SET_MAC_V2_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_SET_MAC;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_SET_MAC_EXT_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_SET_MAC_V2_OUT_LEN;\n+\n+\t/*\n+\t * With MC_CMD_SET_MAC_EXT_IN_CONTROL set to 0, this just queries the\n+\t * MTU.  This should always be supported on Medford, but it is not\n+\t * supported on older Huntington firmware.\n+\t */\n+\tMCDI_IN_SET_DWORD(req, SET_MAC_EXT_IN_CONTROL, 0);\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\tif (req.emr_out_length_used < MC_CMD_SET_MAC_V2_OUT_MTU_OFST + 4) {\n+\t\trc = EMSGSIZE;\n+\t\tgoto fail2;\n+\t}\n+\n+\t*mtu = MCDI_OUT_DWORD(req, SET_MAC_V2_OUT_MTU);\n+\n+\treturn (0);\n+\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_mac_pdu_set(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\tefx_port_t *epp = &(enp->en_port);\n+\tefx_nic_cfg_t *encp = &(enp->en_nic_cfg);\n+\tefx_rc_t rc;\n+\n+\tif (encp->enc_enhanced_set_mac_supported) {\n+\t\tif ((rc = efx_mcdi_mtu_set(enp, epp->ep_mac_pdu)) != 0)\n+\t\t\tgoto fail1;\n+\t} else {\n+\t\t/*\n+\t\t * Fallback for older Huntington firmware, which always\n+\t\t * configure all of the parameters to MC_CMD_SET_MAC. This isn't\n+\t\t * suitable for setting the MTU on unpriviliged functions.\n+\t\t */\n+\t\tif ((rc = ef10_mac_reconfigure(enp)) != 0)\n+\t\t\tgoto fail2;\n+\t}\n+\n+\treturn (0);\n+\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\t\tefx_rc_t\n+ef10_mac_pdu_get(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tsize_t *pdu)\n+{\n+\tefx_rc_t rc;\n+\n+\tif ((rc = efx_mcdi_mtu_get(enp, pdu)) != 0)\n+\t\tgoto fail1;\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+__checkReturn\tefx_rc_t\n+ef10_mac_reconfigure(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\tefx_port_t *epp = &(enp->en_port);\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_SET_MAC_IN_LEN,\n+\t\t\t    MC_CMD_SET_MAC_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_SET_MAC;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_SET_MAC_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_SET_MAC_OUT_LEN;\n+\n+\tMCDI_IN_SET_DWORD(req, SET_MAC_IN_MTU, epp->ep_mac_pdu);\n+\tMCDI_IN_SET_DWORD(req, SET_MAC_IN_DRAIN, epp->ep_mac_drain ? 1 : 0);\n+\tEFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, SET_MAC_IN_ADDR),\n+\t\t\t    epp->ep_mac_addr);\n+\n+\t/*\n+\t * Note: The Huntington MAC does not support REJECT_BRDCST.\n+\t * The REJECT_UNCST flag will also prevent multicast traffic\n+\t * from reaching the filters. As Huntington filters drop any\n+\t * traffic that does not match a filter it is ok to leave the\n+\t * MAC running in promiscuous mode. See bug41141.\n+\t *\n+\t * FIXME: Does REJECT_UNCST behave the same way on Medford?\n+\t */\n+\tMCDI_IN_POPULATE_DWORD_2(req, SET_MAC_IN_REJECT,\n+\t\t\t\t    SET_MAC_IN_REJECT_UNCST, 0,\n+\t\t\t\t    SET_MAC_IN_REJECT_BRDCST, 0);\n+\n+\t/*\n+\t * Flow control, whether it is auto-negotiated or not,\n+\t * is set via the PHY advertised capabilities.  When set to\n+\t * automatic the MAC will use the PHY settings to determine\n+\t * the flow control settings.\n+\t */\n+\tMCDI_IN_SET_DWORD(req, SET_MAC_IN_FCNTL, MC_CMD_FCNTL_AUTO);\n+\n+\t/* Do not include the Ethernet frame checksum in RX packets */\n+\tMCDI_IN_POPULATE_DWORD_1(req, SET_MAC_IN_FLAGS,\n+\t\t\t\t    SET_MAC_IN_FLAG_INCLUDE_FCS, 0);\n+\n+\tefx_mcdi_execute_quiet(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\t/*\n+\t\t * Unprivileged functions cannot control link state,\n+\t\t * but still need to configure filters.\n+\t\t */\n+\t\tif (req.emr_rc != EACCES) {\n+\t\t\trc = req.emr_rc;\n+\t\t\tgoto fail1;\n+\t\t}\n+\t}\n+\n+\t/*\n+\t * Apply the filters for the MAC configuration.\n+\t * If the NIC isn't ready to accept filters this may\n+\t * return success without setting anything.\n+\t */\n+\trc = efx_filter_reconfigure(enp, epp->ep_mac_addr,\n+\t\t\t\t    epp->ep_all_unicst, epp->ep_mulcst,\n+\t\t\t\t    epp->ep_all_mulcst, epp->ep_brdcst,\n+\t\t\t\t    epp->ep_mulcst_addr_list,\n+\t\t\t\t    epp->ep_mulcst_addr_count);\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\t\t\tefx_rc_t\n+ef10_mac_multicast_list_set(\n+\t__in\t\t\t\tefx_nic_t *enp)\n+{\n+\tefx_port_t *epp = &(enp->en_port);\n+\tconst efx_mac_ops_t *emop = epp->ep_emop;\n+\tefx_rc_t rc;\n+\n+\tEFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||\n+\t\t    enp->en_family == EFX_FAMILY_MEDFORD);\n+\n+\tif ((rc = emop->emo_reconfigure(enp)) != 0)\n+\t\tgoto fail1;\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_mac_filter_default_rxq_set(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tefx_rxq_t *erp,\n+\t__in\t\tboolean_t using_rss)\n+{\n+\tefx_port_t *epp = &(enp->en_port);\n+\tefx_rxq_t *old_rxq;\n+\tboolean_t old_using_rss;\n+\tefx_rc_t rc;\n+\n+\tef10_filter_get_default_rxq(enp, &old_rxq, &old_using_rss);\n+\n+\tef10_filter_default_rxq_set(enp, erp, using_rss);\n+\n+\trc = efx_filter_reconfigure(enp, epp->ep_mac_addr,\n+\t\t\t\t    epp->ep_all_unicst, epp->ep_mulcst,\n+\t\t\t\t    epp->ep_all_mulcst, epp->ep_brdcst,\n+\t\t\t\t    epp->ep_mulcst_addr_list,\n+\t\t\t\t    epp->ep_mulcst_addr_count);\n+\n+\tif (rc != 0)\n+\t\tgoto fail1;\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\tef10_filter_default_rxq_set(enp, old_rxq, old_using_rss);\n+\n+\treturn (rc);\n+}\n+\n+\t\t\tvoid\n+ef10_mac_filter_default_rxq_clear(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\tefx_port_t *epp = &(enp->en_port);\n+\n+\tef10_filter_default_rxq_clear(enp);\n+\n+\tefx_filter_reconfigure(enp, epp->ep_mac_addr,\n+\t\t\t\t    epp->ep_all_unicst, epp->ep_mulcst,\n+\t\t\t\t    epp->ep_all_mulcst, epp->ep_brdcst,\n+\t\t\t\t    epp->ep_mulcst_addr_list,\n+\t\t\t\t    epp->ep_mulcst_addr_count);\n+}\n+\n+\n+#endif\t/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */\ndiff --git a/drivers/net/sfc/base/ef10_mcdi.c b/drivers/net/sfc/base/ef10_mcdi.c\nnew file mode 100644\nindex 0000000..5a26bda\n--- /dev/null\n+++ b/drivers/net/sfc/base/ef10_mcdi.c\n@@ -0,0 +1,342 @@\n+/*\n+ * Copyright (c) 2012-2016 Solarflare Communications Inc.\n+ * All rights reserved.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions are met:\n+ *\n+ * 1. Redistributions of source code must retain the above copyright notice,\n+ *    this list of conditions and the following disclaimer.\n+ * 2. Redistributions in binary form must reproduce the above copyright notice,\n+ *    this list of conditions and the following disclaimer in the documentation\n+ *    and/or other materials provided with the distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ *\n+ * The views and conclusions contained in the software and documentation are\n+ * those of the authors and should not be interpreted as representing official\n+ * policies, either expressed or implied, of the FreeBSD Project.\n+ */\n+\n+#include \"efx.h\"\n+#include \"efx_impl.h\"\n+\n+\n+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD\n+\n+#if EFSYS_OPT_MCDI\n+\n+#ifndef WITH_MCDI_V2\n+#error \"WITH_MCDI_V2 required for EF10 MCDIv2 commands.\"\n+#endif\n+\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_mcdi_init(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tconst efx_mcdi_transport_t *emtp)\n+{\n+\tefx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);\n+\tefsys_mem_t *esmp = emtp->emt_dma_mem;\n+\tefx_dword_t dword;\n+\tefx_rc_t rc;\n+\n+\tEFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||\n+\t\t    enp->en_family == EFX_FAMILY_MEDFORD);\n+\tEFSYS_ASSERT(enp->en_features & EFX_FEATURE_MCDI_DMA);\n+\n+\t/*\n+\t * All EF10 firmware supports MCDIv2 and MCDIv1.\n+\t * Medford BootROM supports MCDIv2 and MCDIv1.\n+\t * Huntington BootROM supports MCDIv1 only.\n+\t */\n+\temip->emi_max_version = 2;\n+\n+\t/* A host DMA buffer is required for EF10 MCDI */\n+\tif (esmp == NULL) {\n+\t\trc = EINVAL;\n+\t\tgoto fail1;\n+\t}\n+\n+\t/*\n+\t * Ensure that the MC doorbell is in a known state before issuing MCDI\n+\t * commands. The recovery algorithm requires that the MC command buffer\n+\t * must be 256 byte aligned. See bug24769.\n+\t */\n+\tif ((EFSYS_MEM_ADDR(esmp) & 0xFF) != 0) {\n+\t\trc = EINVAL;\n+\t\tgoto fail2;\n+\t}\n+\tEFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 1);\n+\tEFX_BAR_WRITED(enp, ER_DZ_MC_DB_HWRD_REG, &dword, B_FALSE);\n+\n+\t/* Save initial MC reboot status */\n+\t(void) ef10_mcdi_poll_reboot(enp);\n+\n+\t/* Start a new epoch (allow fresh MCDI requests to succeed) */\n+\tefx_mcdi_new_epoch(enp);\n+\n+\treturn (0);\n+\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t\t\tvoid\n+ef10_mcdi_fini(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\tefx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);\n+\n+\temip->emi_new_epoch = B_FALSE;\n+}\n+\n+/*\n+ * In older firmware all commands are processed in a single thread, so a long\n+ * running command for one PCIe function can block processing for another\n+ * function (see bug 61269).\n+ *\n+ * In newer firmware that supports multithreaded MCDI processing, we can extend\n+ * the timeout for long-running requests which we know firmware may choose to\n+ * process in a background thread.\n+ */\n+#define\tEF10_MCDI_CMD_TIMEOUT_US\t(10 * 1000 * 1000)\n+#define\tEF10_MCDI_CMD_LONG_TIMEOUT_US\t(60 * 1000 * 1000)\n+\n+\t\t\tvoid\n+ef10_mcdi_get_timeout(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tefx_mcdi_req_t *emrp,\n+\t__out\t\tuint32_t *timeoutp)\n+{\n+\tefx_nic_cfg_t *encp = &(enp->en_nic_cfg);\n+\n+\tswitch (emrp->emr_cmd) {\n+\tcase MC_CMD_POLL_BIST:\n+\tcase MC_CMD_NVRAM_ERASE:\n+\tcase MC_CMD_LICENSING_V3:\n+\tcase MC_CMD_NVRAM_UPDATE_FINISH:\n+\t\tif (encp->enc_fw_verified_nvram_update_required != B_FALSE) {\n+\t\t\t/*\n+\t\t\t * Potentially longer running commands, which firmware\n+\t\t\t * may choose to process in a background thread.\n+\t\t\t */\n+\t\t\t*timeoutp = EF10_MCDI_CMD_LONG_TIMEOUT_US;\n+\t\t\tbreak;\n+\t\t}\n+\t\t/* FALLTHRU */\n+\tdefault:\n+\t\t*timeoutp = EF10_MCDI_CMD_TIMEOUT_US;\n+\t\tbreak;\n+\t}\n+}\n+\n+\t\t\tvoid\n+ef10_mcdi_send_request(\n+\t__in\t\t\tefx_nic_t *enp,\n+\t__in_bcount(hdr_len)\tvoid *hdrp,\n+\t__in\t\t\tsize_t hdr_len,\n+\t__in_bcount(sdu_len)\tvoid *sdup,\n+\t__in\t\t\tsize_t sdu_len)\n+{\n+\tconst efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;\n+\tefsys_mem_t *esmp = emtp->emt_dma_mem;\n+\tefx_dword_t dword;\n+\tunsigned int pos;\n+\n+\tEFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||\n+\t\t    enp->en_family == EFX_FAMILY_MEDFORD);\n+\n+\t/* Write the header */\n+\tfor (pos = 0; pos < hdr_len; pos += sizeof (efx_dword_t)) {\n+\t\tdword = *(efx_dword_t *)((uint8_t *)hdrp + pos);\n+\t\tEFSYS_MEM_WRITED(esmp, pos, &dword);\n+\t}\n+\n+\t/* Write the payload */\n+\tfor (pos = 0; pos < sdu_len; pos += sizeof (efx_dword_t)) {\n+\t\tdword = *(efx_dword_t *)((uint8_t *)sdup + pos);\n+\t\tEFSYS_MEM_WRITED(esmp, hdr_len + pos, &dword);\n+\t}\n+\n+\t/* Guarantee ordering of memory (MCDI request) and PIO (MC doorbell) */\n+\tEFSYS_DMA_SYNC_FOR_DEVICE(esmp, 0, hdr_len + sdu_len);\n+\tEFSYS_PIO_WRITE_BARRIER();\n+\n+\t/* Ring the doorbell to post the command DMA address to the MC */\n+\tEFX_POPULATE_DWORD_1(dword, EFX_DWORD_0,\n+\t    EFSYS_MEM_ADDR(esmp) >> 32);\n+\tEFX_BAR_WRITED(enp, ER_DZ_MC_DB_LWRD_REG, &dword, B_FALSE);\n+\n+\tEFX_POPULATE_DWORD_1(dword, EFX_DWORD_0,\n+\t    EFSYS_MEM_ADDR(esmp) & 0xffffffff);\n+\tEFX_BAR_WRITED(enp, ER_DZ_MC_DB_HWRD_REG, &dword, B_FALSE);\n+}\n+\n+\t__checkReturn\tboolean_t\n+ef10_mcdi_poll_response(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\tconst efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;\n+\tefsys_mem_t *esmp = emtp->emt_dma_mem;\n+\tefx_dword_t hdr;\n+\n+\tEFSYS_MEM_READD(esmp, 0, &hdr);\n+\tEFSYS_MEM_READ_BARRIER();\n+\n+\treturn (EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE) ? B_TRUE : B_FALSE);\n+}\n+\n+\t\t\tvoid\n+ef10_mcdi_read_response(\n+\t__in\t\t\tefx_nic_t *enp,\n+\t__out_bcount(length)\tvoid *bufferp,\n+\t__in\t\t\tsize_t offset,\n+\t__in\t\t\tsize_t length)\n+{\n+\tconst efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;\n+\tefsys_mem_t *esmp = emtp->emt_dma_mem;\n+\tunsigned int pos;\n+\tefx_dword_t data;\n+\n+\tfor (pos = 0; pos < length; pos += sizeof (efx_dword_t)) {\n+\t\tEFSYS_MEM_READD(esmp, offset + pos, &data);\n+\t\tmemcpy((uint8_t *)bufferp + pos, &data,\n+\t\t    MIN(sizeof (data), length - pos));\n+\t}\n+}\n+\n+\t\t\tefx_rc_t\n+ef10_mcdi_poll_reboot(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\tefx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);\n+\tefx_dword_t dword;\n+\tuint32_t old_status;\n+\tuint32_t new_status;\n+\tefx_rc_t rc;\n+\n+\told_status = emip->emi_mc_reboot_status;\n+\n+\t/* Update MC reboot status word */\n+\tEFX_BAR_TBL_READD(enp, ER_DZ_BIU_MC_SFT_STATUS_REG, 0, &dword, B_FALSE);\n+\tnew_status = dword.ed_u32[0];\n+\n+\t/* MC has rebooted if the value has changed */\n+\tif (new_status != old_status) {\n+\t\temip->emi_mc_reboot_status = new_status;\n+\n+\t\t/*\n+\t\t * FIXME: Ignore detected MC REBOOT for now.\n+\t\t *\n+\t\t * The Siena support for checking for MC reboot from status\n+\t\t * flags is broken - see comments in siena_mcdi_poll_reboot().\n+\t\t * As the generic MCDI code is shared the EF10 reboot\n+\t\t * detection suffers similar problems.\n+\t\t *\n+\t\t * Do not report an error when the boot status changes until\n+\t\t * this can be handled by common code drivers (and reworked to\n+\t\t * support Siena too).\n+\t\t */\n+\t\t_NOTE(CONSTANTCONDITION)\n+\t\tif (B_FALSE) {\n+\t\t\trc = EIO;\n+\t\t\tgoto fail1;\n+\t\t}\n+\t}\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_mcdi_feature_supported(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tefx_mcdi_feature_id_t id,\n+\t__out\t\tboolean_t *supportedp)\n+{\n+\tefx_nic_cfg_t *encp = &(enp->en_nic_cfg);\n+\tuint32_t privilege_mask = encp->enc_privilege_mask;\n+\tefx_rc_t rc;\n+\n+\tEFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||\n+\t\t    enp->en_family == EFX_FAMILY_MEDFORD);\n+\n+\t/*\n+\t * Use privilege mask state at MCDI attach.\n+\t */\n+\n+\tswitch (id) {\n+\tcase EFX_MCDI_FEATURE_FW_UPDATE:\n+\t\t/*\n+\t\t * Admin privilege must be used prior to introduction of\n+\t\t * specific flag.\n+\t\t */\n+\t\t*supportedp =\n+\t\t    EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);\n+\t\tbreak;\n+\tcase EFX_MCDI_FEATURE_LINK_CONTROL:\n+\t\t/*\n+\t\t * Admin privilege used prior to introduction of\n+\t\t * specific flag.\n+\t\t */\n+\t\t*supportedp =\n+\t\t    EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, LINK) ||\n+\t\t    EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);\n+\t\tbreak;\n+\tcase EFX_MCDI_FEATURE_MACADDR_CHANGE:\n+\t\t/*\n+\t\t * Admin privilege must be used prior to introduction of\n+\t\t * mac spoofing privilege (at v4.6), which is used up to\n+\t\t * introduction of change mac spoofing privilege (at v4.7)\n+\t\t */\n+\t\t*supportedp =\n+\t\t    EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, CHANGE_MAC) ||\n+\t\t    EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, MAC_SPOOFING) ||\n+\t\t    EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);\n+\t\tbreak;\n+\tcase EFX_MCDI_FEATURE_MAC_SPOOFING:\n+\t\t/*\n+\t\t * Admin privilege must be used prior to introduction of\n+\t\t * mac spoofing privilege (at v4.6), which is used up to\n+\t\t * introduction of mac spoofing TX privilege (at v4.7)\n+\t\t */\n+\t\t*supportedp =\n+\t\t    EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, MAC_SPOOFING_TX) ||\n+\t\t    EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, MAC_SPOOFING) ||\n+\t\t    EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);\n+\t\tbreak;\n+\tdefault:\n+\t\trc = ENOTSUP;\n+\t\tgoto fail1;\n+\t}\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+#endif\t/* EFSYS_OPT_MCDI */\n+\n+#endif\t/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */\ndiff --git a/drivers/net/sfc/base/ef10_nic.c b/drivers/net/sfc/base/ef10_nic.c\nnew file mode 100644\nindex 0000000..538e18c\n--- /dev/null\n+++ b/drivers/net/sfc/base/ef10_nic.c\n@@ -0,0 +1,1769 @@\n+/*\n+ * Copyright (c) 2012-2016 Solarflare Communications Inc.\n+ * All rights reserved.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions are met:\n+ *\n+ * 1. Redistributions of source code must retain the above copyright notice,\n+ *    this list of conditions and the following disclaimer.\n+ * 2. Redistributions in binary form must reproduce the above copyright notice,\n+ *    this list of conditions and the following disclaimer in the documentation\n+ *    and/or other materials provided with the distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ *\n+ * The views and conclusions contained in the software and documentation are\n+ * those of the authors and should not be interpreted as representing official\n+ * policies, either expressed or implied, of the FreeBSD Project.\n+ */\n+\n+#include \"efx.h\"\n+#include \"efx_impl.h\"\n+\n+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD\n+\n+#include \"ef10_tlv_layout.h\"\n+\n+\t__checkReturn\tefx_rc_t\n+efx_mcdi_get_port_assignment(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tuint32_t *portp)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN,\n+\t\t\t    MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\tEFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||\n+\t\t    enp->en_family == EFX_FAMILY_MEDFORD);\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN;\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\tif (req.emr_out_length_used < MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN) {\n+\t\trc = EMSGSIZE;\n+\t\tgoto fail2;\n+\t}\n+\n+\t*portp = MCDI_OUT_DWORD(req, GET_PORT_ASSIGNMENT_OUT_PORT);\n+\n+\treturn (0);\n+\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+efx_mcdi_get_port_modes(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tuint32_t *modesp,\n+\t__out_opt\tuint32_t *current_modep)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_GET_PORT_MODES_IN_LEN,\n+\t\t\t    MC_CMD_GET_PORT_MODES_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\tEFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||\n+\t\t    enp->en_family == EFX_FAMILY_MEDFORD);\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_GET_PORT_MODES;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_GET_PORT_MODES_OUT_LEN;\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\t/*\n+\t * Require only Modes and DefaultMode fields, unless the current mode\n+\t * was requested (CurrentMode field was added for Medford).\n+\t */\n+\tif (req.emr_out_length_used <\n+\t    MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST) {\n+\t\trc = EMSGSIZE;\n+\t\tgoto fail2;\n+\t}\n+\tif ((current_modep != NULL) && (req.emr_out_length_used <\n+\t    MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST + 4)) {\n+\t\trc = EMSGSIZE;\n+\t\tgoto fail3;\n+\t}\n+\n+\t*modesp = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_MODES);\n+\n+\tif (current_modep != NULL) {\n+\t\t*current_modep = MCDI_OUT_DWORD(req,\n+\t\t\t\t\t    GET_PORT_MODES_OUT_CURRENT_MODE);\n+\t}\n+\n+\treturn (0);\n+\n+fail3:\n+\tEFSYS_PROBE(fail3);\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_nic_get_port_mode_bandwidth(\n+\t__in\t\tuint32_t port_mode,\n+\t__out\t\tuint32_t *bandwidth_mbpsp)\n+{\n+\tuint32_t bandwidth;\n+\tefx_rc_t rc;\n+\n+\tswitch (port_mode) {\n+\tcase TLV_PORT_MODE_10G:\n+\t\tbandwidth = 10000;\n+\t\tbreak;\n+\tcase TLV_PORT_MODE_10G_10G:\n+\t\tbandwidth = 10000 * 2;\n+\t\tbreak;\n+\tcase TLV_PORT_MODE_10G_10G_10G_10G:\n+\tcase TLV_PORT_MODE_10G_10G_10G_10G_Q:\n+\tcase TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2:\n+\tcase TLV_PORT_MODE_10G_10G_10G_10G_Q2:\n+\t\tbandwidth = 10000 * 4;\n+\t\tbreak;\n+\tcase TLV_PORT_MODE_40G:\n+\t\tbandwidth = 40000;\n+\t\tbreak;\n+\tcase TLV_PORT_MODE_40G_40G:\n+\t\tbandwidth = 40000 * 2;\n+\t\tbreak;\n+\tcase TLV_PORT_MODE_40G_10G_10G:\n+\tcase TLV_PORT_MODE_10G_10G_40G:\n+\t\tbandwidth = 40000 + (10000 * 2);\n+\t\tbreak;\n+\tdefault:\n+\t\trc = EINVAL;\n+\t\tgoto fail1;\n+\t}\n+\n+\t*bandwidth_mbpsp = bandwidth;\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+static\t__checkReturn\t\tefx_rc_t\n+efx_mcdi_vadaptor_alloc(\n+\t__in\t\t\tefx_nic_t *enp,\n+\t__in\t\t\tuint32_t port_id)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_VADAPTOR_ALLOC_IN_LEN,\n+\t\t\t    MC_CMD_VADAPTOR_ALLOC_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\tEFSYS_ASSERT3U(enp->en_vport_id, ==, EVB_PORT_ID_NULL);\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_VADAPTOR_ALLOC;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_VADAPTOR_ALLOC_OUT_LEN;\n+\n+\tMCDI_IN_SET_DWORD(req, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);\n+\tMCDI_IN_POPULATE_DWORD_1(req, VADAPTOR_ALLOC_IN_FLAGS,\n+\t    VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED,\n+\t    enp->en_nic_cfg.enc_allow_set_mac_with_installed_filters ? 1 : 0);\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+static\t__checkReturn\t\tefx_rc_t\n+efx_mcdi_vadaptor_free(\n+\t__in\t\t\tefx_nic_t *enp,\n+\t__in\t\t\tuint32_t port_id)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_VADAPTOR_FREE_IN_LEN,\n+\t\t\t    MC_CMD_VADAPTOR_FREE_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_VADAPTOR_FREE;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_VADAPTOR_FREE_OUT_LEN;\n+\n+\tMCDI_IN_SET_DWORD(req, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+efx_mcdi_get_mac_address_pf(\n+\t__in\t\t\tefx_nic_t *enp,\n+\t__out_ecount_opt(6)\tuint8_t mac_addrp[6])\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_GET_MAC_ADDRESSES_IN_LEN,\n+\t\t\t    MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\tEFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||\n+\t\t    enp->en_family == EFX_FAMILY_MEDFORD);\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_GET_MAC_ADDRESSES;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN;\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\tif (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) {\n+\t\trc = EMSGSIZE;\n+\t\tgoto fail2;\n+\t}\n+\n+\tif (MCDI_OUT_DWORD(req, GET_MAC_ADDRESSES_OUT_MAC_COUNT) < 1) {\n+\t\trc = ENOENT;\n+\t\tgoto fail3;\n+\t}\n+\n+\tif (mac_addrp != NULL) {\n+\t\tuint8_t *addrp;\n+\n+\t\taddrp = MCDI_OUT2(req, uint8_t,\n+\t\t    GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE);\n+\n+\t\tEFX_MAC_ADDR_COPY(mac_addrp, addrp);\n+\t}\n+\n+\treturn (0);\n+\n+fail3:\n+\tEFSYS_PROBE(fail3);\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+efx_mcdi_get_mac_address_vf(\n+\t__in\t\t\tefx_nic_t *enp,\n+\t__out_ecount_opt(6)\tuint8_t mac_addrp[6])\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN,\n+\t\t\t    MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX)];\n+\tefx_rc_t rc;\n+\n+\tEFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||\n+\t\t    enp->en_family == EFX_FAMILY_MEDFORD);\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX;\n+\n+\tMCDI_IN_SET_DWORD(req, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,\n+\t    EVB_PORT_ID_ASSIGNED);\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\tif (req.emr_out_length_used <\n+\t    MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) {\n+\t\trc = EMSGSIZE;\n+\t\tgoto fail2;\n+\t}\n+\n+\tif (MCDI_OUT_DWORD(req,\n+\t\tVPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT) < 1) {\n+\t\trc = ENOENT;\n+\t\tgoto fail3;\n+\t}\n+\n+\tif (mac_addrp != NULL) {\n+\t\tuint8_t *addrp;\n+\n+\t\taddrp = MCDI_OUT2(req, uint8_t,\n+\t\t    VPORT_GET_MAC_ADDRESSES_OUT_MACADDR);\n+\n+\t\tEFX_MAC_ADDR_COPY(mac_addrp, addrp);\n+\t}\n+\n+\treturn (0);\n+\n+fail3:\n+\tEFSYS_PROBE(fail3);\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+efx_mcdi_get_clock(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tuint32_t *sys_freqp,\n+\t__out\t\tuint32_t *dpcpu_freqp)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_GET_CLOCK_IN_LEN,\n+\t\t\t    MC_CMD_GET_CLOCK_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\tEFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||\n+\t\t    enp->en_family == EFX_FAMILY_MEDFORD);\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_GET_CLOCK;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_GET_CLOCK_OUT_LEN;\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\tif (req.emr_out_length_used < MC_CMD_GET_CLOCK_OUT_LEN) {\n+\t\trc = EMSGSIZE;\n+\t\tgoto fail2;\n+\t}\n+\n+\t*sys_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_SYS_FREQ);\n+\tif (*sys_freqp == 0) {\n+\t\trc = EINVAL;\n+\t\tgoto fail3;\n+\t}\n+\t*dpcpu_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_DPCPU_FREQ);\n+\tif (*dpcpu_freqp == 0) {\n+\t\trc = EINVAL;\n+\t\tgoto fail4;\n+\t}\n+\n+\treturn (0);\n+\n+fail4:\n+\tEFSYS_PROBE(fail4);\n+fail3:\n+\tEFSYS_PROBE(fail3);\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+efx_mcdi_get_vector_cfg(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out_opt\tuint32_t *vec_basep,\n+\t__out_opt\tuint32_t *pf_nvecp,\n+\t__out_opt\tuint32_t *vf_nvecp)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_GET_VECTOR_CFG_IN_LEN,\n+\t\t\t    MC_CMD_GET_VECTOR_CFG_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_GET_VECTOR_CFG;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_GET_VECTOR_CFG_OUT_LEN;\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\tif (req.emr_out_length_used < MC_CMD_GET_VECTOR_CFG_OUT_LEN) {\n+\t\trc = EMSGSIZE;\n+\t\tgoto fail2;\n+\t}\n+\n+\tif (vec_basep != NULL)\n+\t\t*vec_basep = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VEC_BASE);\n+\tif (pf_nvecp != NULL)\n+\t\t*pf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_PF);\n+\tif (vf_nvecp != NULL)\n+\t\t*vf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_VF);\n+\n+\treturn (0);\n+\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+static\t__checkReturn\tefx_rc_t\n+efx_mcdi_get_capabilities(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tuint32_t *flagsp,\n+\t__out\t\tuint32_t *flags2p,\n+\t__out\t\tuint32_t *tso2ncp)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_GET_CAPABILITIES_IN_LEN,\n+\t\t\t    MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_GET_CAPABILITIES;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_GET_CAPABILITIES_V2_OUT_LEN;\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\tif (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) {\n+\t\trc = EMSGSIZE;\n+\t\tgoto fail2;\n+\t}\n+\n+\t*flagsp = MCDI_OUT_DWORD(req, GET_CAPABILITIES_OUT_FLAGS1);\n+\n+\tif (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) {\n+\t\t*flags2p = 0;\n+\t\t*tso2ncp = 0;\n+\t} else {\n+\t\t*flags2p = MCDI_OUT_DWORD(req, GET_CAPABILITIES_V2_OUT_FLAGS2);\n+\t\t*tso2ncp = MCDI_OUT_WORD(req,\n+\t\t\t\tGET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS);\n+\t}\n+\n+\treturn (0);\n+\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\n+static\t__checkReturn\tefx_rc_t\n+efx_mcdi_alloc_vis(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tuint32_t min_vi_count,\n+\t__in\t\tuint32_t max_vi_count,\n+\t__out\t\tuint32_t *vi_basep,\n+\t__out\t\tuint32_t *vi_countp,\n+\t__out\t\tuint32_t *vi_shiftp)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_ALLOC_VIS_IN_LEN,\n+\t\t\t    MC_CMD_ALLOC_VIS_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\tif (vi_countp == NULL) {\n+\t\trc = EINVAL;\n+\t\tgoto fail1;\n+\t}\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_ALLOC_VIS;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_ALLOC_VIS_OUT_LEN;\n+\n+\tMCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MIN_VI_COUNT, min_vi_count);\n+\tMCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MAX_VI_COUNT, max_vi_count);\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail2;\n+\t}\n+\n+\tif (req.emr_out_length_used < MC_CMD_ALLOC_VIS_OUT_LEN) {\n+\t\trc = EMSGSIZE;\n+\t\tgoto fail3;\n+\t}\n+\n+\t*vi_basep = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_BASE);\n+\t*vi_countp = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_COUNT);\n+\n+\t/* Report VI_SHIFT if available (always zero for Huntington) */\n+\tif (req.emr_out_length_used < MC_CMD_ALLOC_VIS_EXT_OUT_LEN)\n+\t\t*vi_shiftp = 0;\n+\telse\n+\t\t*vi_shiftp = MCDI_OUT_DWORD(req, ALLOC_VIS_EXT_OUT_VI_SHIFT);\n+\n+\treturn (0);\n+\n+fail3:\n+\tEFSYS_PROBE(fail3);\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\n+static\t__checkReturn\tefx_rc_t\n+efx_mcdi_free_vis(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\tefx_mcdi_req_t req;\n+\tefx_rc_t rc;\n+\n+\tEFX_STATIC_ASSERT(MC_CMD_FREE_VIS_IN_LEN == 0);\n+\tEFX_STATIC_ASSERT(MC_CMD_FREE_VIS_OUT_LEN == 0);\n+\n+\treq.emr_cmd = MC_CMD_FREE_VIS;\n+\treq.emr_in_buf = NULL;\n+\treq.emr_in_length = 0;\n+\treq.emr_out_buf = NULL;\n+\treq.emr_out_length = 0;\n+\n+\tefx_mcdi_execute_quiet(enp, &req);\n+\n+\t/* Ignore ELREADY (no allocated VIs, so nothing to free) */\n+\tif ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\n+static\t__checkReturn\tefx_rc_t\n+efx_mcdi_alloc_piobuf(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tefx_piobuf_handle_t *handlep)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_ALLOC_PIOBUF_IN_LEN,\n+\t\t\t    MC_CMD_ALLOC_PIOBUF_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\tif (handlep == NULL) {\n+\t\trc = EINVAL;\n+\t\tgoto fail1;\n+\t}\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_ALLOC_PIOBUF;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_ALLOC_PIOBUF_OUT_LEN;\n+\n+\tefx_mcdi_execute_quiet(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail2;\n+\t}\n+\n+\tif (req.emr_out_length_used < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {\n+\t\trc = EMSGSIZE;\n+\t\tgoto fail3;\n+\t}\n+\n+\t*handlep = MCDI_OUT_DWORD(req, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);\n+\n+\treturn (0);\n+\n+fail3:\n+\tEFSYS_PROBE(fail3);\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+static\t__checkReturn\tefx_rc_t\n+efx_mcdi_free_piobuf(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tefx_piobuf_handle_t handle)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_FREE_PIOBUF_IN_LEN,\n+\t\t\t    MC_CMD_FREE_PIOBUF_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_FREE_PIOBUF;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_FREE_PIOBUF_OUT_LEN;\n+\n+\tMCDI_IN_SET_DWORD(req, FREE_PIOBUF_IN_PIOBUF_HANDLE, handle);\n+\n+\tefx_mcdi_execute_quiet(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+static\t__checkReturn\tefx_rc_t\n+efx_mcdi_link_piobuf(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tuint32_t vi_index,\n+\t__in\t\tefx_piobuf_handle_t handle)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_LINK_PIOBUF_IN_LEN,\n+\t\t\t    MC_CMD_LINK_PIOBUF_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_LINK_PIOBUF;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_LINK_PIOBUF_OUT_LEN;\n+\n+\tMCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_PIOBUF_HANDLE, handle);\n+\tMCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+static\t__checkReturn\tefx_rc_t\n+efx_mcdi_unlink_piobuf(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tuint32_t vi_index)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_UNLINK_PIOBUF_IN_LEN,\n+\t\t\t    MC_CMD_UNLINK_PIOBUF_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_UNLINK_PIOBUF;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_UNLINK_PIOBUF_OUT_LEN;\n+\n+\tMCDI_IN_SET_DWORD(req, UNLINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);\n+\n+\tefx_mcdi_execute_quiet(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+static\t\t\tvoid\n+ef10_nic_alloc_piobufs(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tuint32_t max_piobuf_count)\n+{\n+\tefx_piobuf_handle_t *handlep;\n+\tunsigned int i;\n+\n+\tEFSYS_ASSERT3U(max_piobuf_count, <=,\n+\t    EFX_ARRAY_SIZE(enp->en_arch.ef10.ena_piobuf_handle));\n+\n+\tenp->en_arch.ef10.ena_piobuf_count = 0;\n+\n+\tfor (i = 0; i < max_piobuf_count; i++) {\n+\t\thandlep = &enp->en_arch.ef10.ena_piobuf_handle[i];\n+\n+\t\tif (efx_mcdi_alloc_piobuf(enp, handlep) != 0)\n+\t\t\tgoto fail1;\n+\n+\t\tenp->en_arch.ef10.ena_pio_alloc_map[i] = 0;\n+\t\tenp->en_arch.ef10.ena_piobuf_count++;\n+\t}\n+\n+\treturn;\n+\n+fail1:\n+\tfor (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {\n+\t\thandlep = &enp->en_arch.ef10.ena_piobuf_handle[i];\n+\n+\t\tefx_mcdi_free_piobuf(enp, *handlep);\n+\t\t*handlep = EFX_PIOBUF_HANDLE_INVALID;\n+\t}\n+\tenp->en_arch.ef10.ena_piobuf_count = 0;\n+}\n+\n+\n+static\t\t\tvoid\n+ef10_nic_free_piobufs(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\tefx_piobuf_handle_t *handlep;\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {\n+\t\thandlep = &enp->en_arch.ef10.ena_piobuf_handle[i];\n+\n+\t\tefx_mcdi_free_piobuf(enp, *handlep);\n+\t\t*handlep = EFX_PIOBUF_HANDLE_INVALID;\n+\t}\n+\tenp->en_arch.ef10.ena_piobuf_count = 0;\n+}\n+\n+/* Sub-allocate a block from a piobuf */\n+\t__checkReturn\tefx_rc_t\n+ef10_nic_pio_alloc(\n+\t__inout\t\tefx_nic_t *enp,\n+\t__out\t\tuint32_t *bufnump,\n+\t__out\t\tefx_piobuf_handle_t *handlep,\n+\t__out\t\tuint32_t *blknump,\n+\t__out\t\tuint32_t *offsetp,\n+\t__out\t\tsize_t *sizep)\n+{\n+\tefx_nic_cfg_t *encp = &enp->en_nic_cfg;\n+\tefx_drv_cfg_t *edcp = &enp->en_drv_cfg;\n+\tuint32_t blk_per_buf;\n+\tuint32_t buf, blk;\n+\tefx_rc_t rc;\n+\n+\tEFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||\n+\t\t    enp->en_family == EFX_FAMILY_MEDFORD);\n+\tEFSYS_ASSERT(bufnump);\n+\tEFSYS_ASSERT(handlep);\n+\tEFSYS_ASSERT(blknump);\n+\tEFSYS_ASSERT(offsetp);\n+\tEFSYS_ASSERT(sizep);\n+\n+\tif ((edcp->edc_pio_alloc_size == 0) ||\n+\t    (enp->en_arch.ef10.ena_piobuf_count == 0)) {\n+\t\trc = ENOMEM;\n+\t\tgoto fail1;\n+\t}\n+\tblk_per_buf = encp->enc_piobuf_size / edcp->edc_pio_alloc_size;\n+\n+\tfor (buf = 0; buf < enp->en_arch.ef10.ena_piobuf_count; buf++) {\n+\t\tuint32_t *map = &enp->en_arch.ef10.ena_pio_alloc_map[buf];\n+\n+\t\tif (~(*map) == 0)\n+\t\t\tcontinue;\n+\n+\t\tEFSYS_ASSERT3U(blk_per_buf, <=, (8 * sizeof (*map)));\n+\t\tfor (blk = 0; blk < blk_per_buf; blk++) {\n+\t\t\tif ((*map & (1u << blk)) == 0) {\n+\t\t\t\t*map |= (1u << blk);\n+\t\t\t\tgoto done;\n+\t\t\t}\n+\t\t}\n+\t}\n+\trc = ENOMEM;\n+\tgoto fail2;\n+\n+done:\n+\t*handlep = enp->en_arch.ef10.ena_piobuf_handle[buf];\n+\t*bufnump = buf;\n+\t*blknump = blk;\n+\t*sizep = edcp->edc_pio_alloc_size;\n+\t*offsetp = blk * (*sizep);\n+\n+\treturn (0);\n+\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+/* Free a piobuf sub-allocated block */\n+\t__checkReturn\tefx_rc_t\n+ef10_nic_pio_free(\n+\t__inout\t\tefx_nic_t *enp,\n+\t__in\t\tuint32_t bufnum,\n+\t__in\t\tuint32_t blknum)\n+{\n+\tuint32_t *map;\n+\tefx_rc_t rc;\n+\n+\tif ((bufnum >= enp->en_arch.ef10.ena_piobuf_count) ||\n+\t    (blknum >= (8 * sizeof (*map)))) {\n+\t\trc = EINVAL;\n+\t\tgoto fail1;\n+\t}\n+\n+\tmap = &enp->en_arch.ef10.ena_pio_alloc_map[bufnum];\n+\tif ((*map & (1u << blknum)) == 0) {\n+\t\trc = ENOENT;\n+\t\tgoto fail2;\n+\t}\n+\t*map &= ~(1u << blknum);\n+\n+\treturn (0);\n+\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_nic_pio_link(\n+\t__inout\t\tefx_nic_t *enp,\n+\t__in\t\tuint32_t vi_index,\n+\t__in\t\tefx_piobuf_handle_t handle)\n+{\n+\treturn (efx_mcdi_link_piobuf(enp, vi_index, handle));\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_nic_pio_unlink(\n+\t__inout\t\tefx_nic_t *enp,\n+\t__in\t\tuint32_t vi_index)\n+{\n+\treturn (efx_mcdi_unlink_piobuf(enp, vi_index));\n+}\n+\n+static\t__checkReturn\tefx_rc_t\n+ef10_mcdi_get_pf_count(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tuint32_t *pf_countp)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_GET_PF_COUNT_IN_LEN,\n+\t\t\t    MC_CMD_GET_PF_COUNT_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_GET_PF_COUNT;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_GET_PF_COUNT_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_GET_PF_COUNT_OUT_LEN;\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\tif (req.emr_out_length_used < MC_CMD_GET_PF_COUNT_OUT_LEN) {\n+\t\trc = EMSGSIZE;\n+\t\tgoto fail2;\n+\t}\n+\n+\t*pf_countp = *MCDI_OUT(req, uint8_t,\n+\t\t\t\tMC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST);\n+\n+\tEFSYS_ASSERT(*pf_countp != 0);\n+\n+\treturn (0);\n+\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_get_datapath_caps(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\tefx_nic_cfg_t *encp = &(enp->en_nic_cfg);\n+\tuint32_t flags;\n+\tuint32_t flags2;\n+\tuint32_t tso2nc;\n+\tefx_rc_t rc;\n+\n+\tif ((rc = efx_mcdi_get_capabilities(enp, &flags, &flags2,\n+\t\t\t\t\t    &tso2nc)) != 0)\n+\t\tgoto fail1;\n+\n+\tif ((rc = ef10_mcdi_get_pf_count(enp, &encp->enc_hw_pf_count)) != 0)\n+\t\tgoto fail1;\n+\n+#define\tCAP_FLAG(flags1, field)\t\t\\\n+\t((flags1) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN)))\n+\n+#define\tCAP_FLAG2(flags2, field)\t\\\n+\t((flags2) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN)))\n+\n+\t/*\n+\t * Huntington RXDP firmware inserts a 0 or 14 byte prefix.\n+\t * We only support the 14 byte prefix here.\n+\t */\n+\tif (CAP_FLAG(flags, RX_PREFIX_LEN_14) == 0) {\n+\t\trc = ENOTSUP;\n+\t\tgoto fail2;\n+\t}\n+\tencp->enc_rx_prefix_size = 14;\n+\n+\t/* Check if the firmware supports TSO */\n+\tencp->enc_fw_assisted_tso_enabled =\n+\t    CAP_FLAG(flags, TX_TSO) ? B_TRUE : B_FALSE;\n+\n+\t/* Check if the firmware supports FATSOv2 */\n+\tencp->enc_fw_assisted_tso_v2_enabled =\n+\t    CAP_FLAG2(flags2, TX_TSO_V2) ? B_TRUE : B_FALSE;\n+\n+\t/* Get the number of TSO contexts (FATSOv2) */\n+\tencp->enc_fw_assisted_tso_v2_n_contexts =\n+\t\tCAP_FLAG2(flags2, TX_TSO_V2) ? tso2nc : 0;\n+\n+\t/* Check if the firmware has vadapter/vport/vswitch support */\n+\tencp->enc_datapath_cap_evb =\n+\t    CAP_FLAG(flags, EVB) ? B_TRUE : B_FALSE;\n+\n+\t/* Check if the firmware supports VLAN insertion */\n+\tencp->enc_hw_tx_insert_vlan_enabled =\n+\t    CAP_FLAG(flags, TX_VLAN_INSERTION) ? B_TRUE : B_FALSE;\n+\n+\t/* Check if the firmware supports RX event batching */\n+\tencp->enc_rx_batching_enabled =\n+\t    CAP_FLAG(flags, RX_BATCHING) ? B_TRUE : B_FALSE;\n+\n+\t/*\n+\t * Even if batching isn't reported as supported, we may still get\n+\t * batched events (see bug61153).\n+\t */\n+\tencp->enc_rx_batch_max = 16;\n+\n+\t/* Check if the firmware supports disabling scatter on RXQs */\n+\tencp->enc_rx_disable_scatter_supported =\n+\t    CAP_FLAG(flags, RX_DISABLE_SCATTER) ? B_TRUE : B_FALSE;\n+\n+\t/* Check if the firmware supports packed stream mode */\n+\tencp->enc_rx_packed_stream_supported =\n+\t    CAP_FLAG(flags, RX_PACKED_STREAM) ? B_TRUE : B_FALSE;\n+\n+\t/*\n+\t * Check if the firmware supports configurable buffer sizes\n+\t * for packed stream mode (otherwise buffer size is 1Mbyte)\n+\t */\n+\tencp->enc_rx_var_packed_stream_supported =\n+\t    CAP_FLAG(flags, RX_PACKED_STREAM_VAR_BUFFERS) ? B_TRUE : B_FALSE;\n+\n+\t/* Check if the firmware supports set mac with running filters */\n+\tencp->enc_allow_set_mac_with_installed_filters =\n+\t    CAP_FLAG(flags, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED) ?\n+\t    B_TRUE : B_FALSE;\n+\n+\t/*\n+\t * Check if firmware supports the extended MC_CMD_SET_MAC, which allows\n+\t * specifying which parameters to configure.\n+\t */\n+\tencp->enc_enhanced_set_mac_supported =\n+\t\tCAP_FLAG(flags, SET_MAC_ENHANCED) ? B_TRUE : B_FALSE;\n+\n+\t/*\n+\t * Check if firmware supports version 2 of MC_CMD_INIT_EVQ, which allows\n+\t * us to let the firmware choose the settings to use on an EVQ.\n+\t */\n+\tencp->enc_init_evq_v2_supported =\n+\t\tCAP_FLAG2(flags2, INIT_EVQ_V2) ? B_TRUE : B_FALSE;\n+\n+\t/*\n+\t * Check if firmware-verified NVRAM updates must be used.\n+\t *\n+\t * The firmware trusted installer requires all NVRAM updates to use\n+\t * version 2 of MC_CMD_NVRAM_UPDATE_START (to enable verified update)\n+\t * and version 2 of MC_CMD_NVRAM_UPDATE_FINISH (to verify the updated\n+\t * partition and report the result).\n+\t */\n+\tencp->enc_fw_verified_nvram_update_required =\n+\t    CAP_FLAG2(flags2, NVRAM_UPDATE_REPORT_VERIFY_RESULT) ?\n+\t    B_TRUE : B_FALSE;\n+\n+\t/*\n+\t * Check if firmware provides packet memory and Rx datapath\n+\t * counters.\n+\t */\n+\tencp->enc_pm_and_rxdp_counters =\n+\t    CAP_FLAG(flags, PM_AND_RXDP_COUNTERS) ? B_TRUE : B_FALSE;\n+\n+\t/*\n+\t * Check if the 40G MAC hardware is capable of reporting\n+\t * statistics for Tx size bins.\n+\t */\n+\tencp->enc_mac_stats_40g_tx_size_bins =\n+\t    CAP_FLAG2(flags2, MAC_STATS_40G_TX_SIZE_BINS) ? B_TRUE : B_FALSE;\n+\n+#undef CAP_FLAG\n+#undef CAP_FLAG2\n+\n+\treturn (0);\n+\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\n+#define\tEF10_LEGACY_PF_PRIVILEGE_MASK\t\t\t\t\t\\\n+\t(MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN\t\t\t|\t\\\n+\tMC_CMD_PRIVILEGE_MASK_IN_GRP_LINK\t\t\t|\t\\\n+\tMC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD\t\t\t|\t\\\n+\tMC_CMD_PRIVILEGE_MASK_IN_GRP_PTP\t\t\t|\t\\\n+\tMC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS\t\t|\t\\\n+\tMC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING\t\t|\t\\\n+\tMC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST\t\t\t|\t\\\n+\tMC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST\t\t\t|\t\\\n+\tMC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST\t\t\t|\t\\\n+\tMC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST\t\t|\t\\\n+\tMC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS)\n+\n+#define\tEF10_LEGACY_VF_PRIVILEGE_MASK\t0\n+\n+\n+\t__checkReturn\t\tefx_rc_t\n+ef10_get_privilege_mask(\n+\t__in\t\t\tefx_nic_t *enp,\n+\t__out\t\t\tuint32_t *maskp)\n+{\n+\tefx_nic_cfg_t *encp = &(enp->en_nic_cfg);\n+\tuint32_t mask;\n+\tefx_rc_t rc;\n+\n+\tif ((rc = efx_mcdi_privilege_mask(enp, encp->enc_pf, encp->enc_vf,\n+\t\t\t\t\t    &mask)) != 0) {\n+\t\tif (rc != ENOTSUP)\n+\t\t\tgoto fail1;\n+\n+\t\t/* Fallback for old firmware without privilege mask support */\n+\t\tif (EFX_PCI_FUNCTION_IS_PF(encp)) {\n+\t\t\t/* Assume PF has admin privilege */\n+\t\t\tmask = EF10_LEGACY_PF_PRIVILEGE_MASK;\n+\t\t} else {\n+\t\t\t/* VF is always unprivileged by default */\n+\t\t\tmask = EF10_LEGACY_VF_PRIVILEGE_MASK;\n+\t\t}\n+\t}\n+\n+\t*maskp = mask;\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\n+/*\n+ * Table of mapping schemes from port number to the number of the external\n+ * connector on the board. The external numbering does not distinguish\n+ * off-board separated outputs such as from multi-headed cables.\n+ *\n+ * The count of adjacent port numbers that map to each external port\n+ * and the offset in the numbering, is determined by the chip family and\n+ * current port mode.\n+ *\n+ * For the Huntington family, the current port mode cannot be discovered,\n+ * so the mapping used is instead the last match in the table to the full\n+ * set of port modes to which the NIC can be configured. Therefore the\n+ * ordering of entries in the the mapping table is significant.\n+ */\n+static struct {\n+\tefx_family_t\tfamily;\n+\tuint32_t\tmodes_mask;\n+\tint32_t\t\tcount;\n+\tint32_t\t\toffset;\n+}\t__ef10_external_port_mappings[] = {\n+\t/* Supported modes with 1 output per external port */\n+\t{\n+\t\tEFX_FAMILY_HUNTINGTON,\n+\t\t(1 << TLV_PORT_MODE_10G) |\n+\t\t(1 << TLV_PORT_MODE_10G_10G) |\n+\t\t(1 << TLV_PORT_MODE_10G_10G_10G_10G),\n+\t\t1,\n+\t\t1\n+\t},\n+\t{\n+\t\tEFX_FAMILY_MEDFORD,\n+\t\t(1 << TLV_PORT_MODE_10G) |\n+\t\t(1 << TLV_PORT_MODE_10G_10G),\n+\t\t1,\n+\t\t1\n+\t},\n+\t/* Supported modes with 2 outputs per external port */\n+\t{\n+\t\tEFX_FAMILY_HUNTINGTON,\n+\t\t(1 << TLV_PORT_MODE_40G) |\n+\t\t(1 << TLV_PORT_MODE_40G_40G) |\n+\t\t(1 << TLV_PORT_MODE_40G_10G_10G) |\n+\t\t(1 << TLV_PORT_MODE_10G_10G_40G),\n+\t\t2,\n+\t\t1\n+\t},\n+\t{\n+\t\tEFX_FAMILY_MEDFORD,\n+\t\t(1 << TLV_PORT_MODE_40G) |\n+\t\t(1 << TLV_PORT_MODE_40G_40G) |\n+\t\t(1 << TLV_PORT_MODE_40G_10G_10G) |\n+\t\t(1 << TLV_PORT_MODE_10G_10G_40G) |\n+\t\t(1 << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2),\n+\t\t2,\n+\t\t1\n+\t},\n+\t/* Supported modes with 4 outputs per external port */\n+\t{\n+\t\tEFX_FAMILY_MEDFORD,\n+\t\t(1 << TLV_PORT_MODE_10G_10G_10G_10G_Q) |\n+\t\t(1 << TLV_PORT_MODE_10G_10G_10G_10G_Q1),\n+\t\t4,\n+\t\t1,\n+\t},\n+\t{\n+\t\tEFX_FAMILY_MEDFORD,\n+\t\t(1 << TLV_PORT_MODE_10G_10G_10G_10G_Q2),\n+\t\t4,\n+\t\t2\n+\t},\n+};\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_external_port_mapping(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tuint32_t port,\n+\t__out\t\tuint8_t *external_portp)\n+{\n+\tefx_rc_t rc;\n+\tint i;\n+\tuint32_t port_modes;\n+\tuint32_t matches;\n+\tuint32_t current;\n+\tint32_t count = 1; /* Default 1-1 mapping */\n+\tint32_t offset = 1; /* Default starting external port number */\n+\n+\tif ((rc = efx_mcdi_get_port_modes(enp, &port_modes, &current)) != 0) {\n+\t\t/*\n+\t\t * No current port mode information\n+\t\t * - infer mapping from available modes\n+\t\t */\n+\t\tif ((rc = efx_mcdi_get_port_modes(enp,\n+\t\t\t    &port_modes, NULL)) != 0) {\n+\t\t\t/*\n+\t\t\t * No port mode information available\n+\t\t\t * - use default mapping\n+\t\t\t */\n+\t\t\tgoto out;\n+\t\t}\n+\t} else {\n+\t\t/* Only need to scan the current mode */\n+\t\tport_modes = 1 << current;\n+\t}\n+\n+\t/*\n+\t * Infer the internal port -> external port mapping from\n+\t * the possible port modes for this NIC.\n+\t */\n+\tfor (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) {\n+\t\tif (__ef10_external_port_mappings[i].family !=\n+\t\t    enp->en_family)\n+\t\t\tcontinue;\n+\t\tmatches = (__ef10_external_port_mappings[i].modes_mask &\n+\t\t    port_modes);\n+\t\tif (matches != 0) {\n+\t\t\tcount = __ef10_external_port_mappings[i].count;\n+\t\t\toffset = __ef10_external_port_mappings[i].offset;\n+\t\t\tport_modes &= ~matches;\n+\t\t}\n+\t}\n+\n+\tif (port_modes != 0) {\n+\t\t/* Some advertised modes are not supported */\n+\t\trc = ENOTSUP;\n+\t\tgoto fail1;\n+\t}\n+\n+out:\n+\t/*\n+\t * Scale as required by last matched mode and then convert to\n+\t * correctly offset numbering\n+\t */\n+\t*external_portp = (uint8_t)((port / count) + offset);\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_nic_probe(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\tconst efx_nic_ops_t *enop = enp->en_enop;\n+\tefx_nic_cfg_t *encp = &(enp->en_nic_cfg);\n+\tefx_drv_cfg_t *edcp = &(enp->en_drv_cfg);\n+\tefx_rc_t rc;\n+\n+\tEFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||\n+\t\t    enp->en_family == EFX_FAMILY_MEDFORD);\n+\n+\t/* Read and clear any assertion state */\n+\tif ((rc = efx_mcdi_read_assertion(enp)) != 0)\n+\t\tgoto fail1;\n+\n+\t/* Exit the assertion handler */\n+\tif ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)\n+\t\tif (rc != EACCES)\n+\t\t\tgoto fail2;\n+\n+\tif ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)\n+\t\tgoto fail3;\n+\n+\tif ((rc = enop->eno_board_cfg(enp)) != 0)\n+\t\tif (rc != EACCES)\n+\t\t\tgoto fail4;\n+\n+\t/*\n+\t * Set default driver config limits (based on board config).\n+\t *\n+\t * FIXME: For now allocate a fixed number of VIs which is likely to be\n+\t * sufficient and small enough to allow multiple functions on the same\n+\t * port.\n+\t */\n+\tedcp->edc_min_vi_count = edcp->edc_max_vi_count =\n+\t    MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit));\n+\n+\t/* The client driver must configure and enable PIO buffer support */\n+\tedcp->edc_max_piobuf_count = 0;\n+\tedcp->edc_pio_alloc_size = 0;\n+\n+\tencp->enc_features = enp->en_features;\n+\n+\treturn (0);\n+\n+fail4:\n+\tEFSYS_PROBE(fail4);\n+fail3:\n+\tEFSYS_PROBE(fail3);\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_nic_set_drv_limits(\n+\t__inout\t\tefx_nic_t *enp,\n+\t__in\t\tefx_drv_limits_t *edlp)\n+{\n+\tefx_nic_cfg_t *encp = &(enp->en_nic_cfg);\n+\tefx_drv_cfg_t *edcp = &(enp->en_drv_cfg);\n+\tuint32_t min_evq_count, max_evq_count;\n+\tuint32_t min_rxq_count, max_rxq_count;\n+\tuint32_t min_txq_count, max_txq_count;\n+\tefx_rc_t rc;\n+\n+\tif (edlp == NULL) {\n+\t\trc = EINVAL;\n+\t\tgoto fail1;\n+\t}\n+\n+\t/* Get minimum required and maximum usable VI limits */\n+\tmin_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit);\n+\tmin_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit);\n+\tmin_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit);\n+\n+\tedcp->edc_min_vi_count =\n+\t    MAX(min_evq_count, MAX(min_rxq_count, min_txq_count));\n+\n+\tmax_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit);\n+\tmax_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit);\n+\tmax_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit);\n+\n+\tedcp->edc_max_vi_count =\n+\t    MAX(max_evq_count, MAX(max_rxq_count, max_txq_count));\n+\n+\t/*\n+\t * Check limits for sub-allocated piobuf blocks.\n+\t * PIO is optional, so don't fail if the limits are incorrect.\n+\t */\n+\tif ((encp->enc_piobuf_size == 0) ||\n+\t    (encp->enc_piobuf_limit == 0) ||\n+\t    (edlp->edl_min_pio_alloc_size == 0) ||\n+\t    (edlp->edl_min_pio_alloc_size > encp->enc_piobuf_size)) {\n+\t\t/* Disable PIO */\n+\t\tedcp->edc_max_piobuf_count = 0;\n+\t\tedcp->edc_pio_alloc_size = 0;\n+\t} else {\n+\t\tuint32_t blk_size, blk_count, blks_per_piobuf;\n+\n+\t\tblk_size =\n+\t\t    MAX(edlp->edl_min_pio_alloc_size,\n+\t\t\t    encp->enc_piobuf_min_alloc_size);\n+\n+\t\tblks_per_piobuf = encp->enc_piobuf_size / blk_size;\n+\t\tEFSYS_ASSERT3U(blks_per_piobuf, <=, 32);\n+\n+\t\tblk_count = (encp->enc_piobuf_limit * blks_per_piobuf);\n+\n+\t\t/* A zero max pio alloc count means unlimited */\n+\t\tif ((edlp->edl_max_pio_alloc_count > 0) &&\n+\t\t    (edlp->edl_max_pio_alloc_count < blk_count)) {\n+\t\t\tblk_count = edlp->edl_max_pio_alloc_count;\n+\t\t}\n+\n+\t\tedcp->edc_pio_alloc_size = blk_size;\n+\t\tedcp->edc_max_piobuf_count =\n+\t\t    (blk_count + (blks_per_piobuf - 1)) / blks_per_piobuf;\n+\t}\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_nic_reset(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_ENTITY_RESET_IN_LEN,\n+\t\t\t    MC_CMD_ENTITY_RESET_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\t/* ef10_nic_reset() is called to recover from BADASSERT failures. */\n+\tif ((rc = efx_mcdi_read_assertion(enp)) != 0)\n+\t\tgoto fail1;\n+\tif ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)\n+\t\tgoto fail2;\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_ENTITY_RESET;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN;\n+\n+\tMCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG,\n+\t    ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail3;\n+\t}\n+\n+\t/* Clear RX/TX DMA queue errors */\n+\tenp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR);\n+\n+\treturn (0);\n+\n+fail3:\n+\tEFSYS_PROBE(fail3);\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_nic_init(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\tefx_drv_cfg_t *edcp = &(enp->en_drv_cfg);\n+\tuint32_t min_vi_count, max_vi_count;\n+\tuint32_t vi_count, vi_base, vi_shift;\n+\tuint32_t i;\n+\tuint32_t retry;\n+\tuint32_t delay_us;\n+\tefx_rc_t rc;\n+\n+\tEFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||\n+\t\t    enp->en_family == EFX_FAMILY_MEDFORD);\n+\n+\t/* Enable reporting of some events (e.g. link change) */\n+\tif ((rc = efx_mcdi_log_ctrl(enp)) != 0)\n+\t\tgoto fail1;\n+\n+\t/* Allocate (optional) on-chip PIO buffers */\n+\tef10_nic_alloc_piobufs(enp, edcp->edc_max_piobuf_count);\n+\n+\t/*\n+\t * For best performance, PIO writes should use a write-combined\n+\t * (WC) memory mapping. Using a separate WC mapping for the PIO\n+\t * aperture of each VI would be a burden to drivers (and not\n+\t * possible if the host page size is >4Kbyte).\n+\t *\n+\t * To avoid this we use a single uncached (UC) mapping for VI\n+\t * register access, and a single WC mapping for extra VIs used\n+\t * for PIO writes.\n+\t *\n+\t * Each piobuf must be linked to a VI in the WC mapping, and to\n+\t * each VI that is using a sub-allocated block from the piobuf.\n+\t */\n+\tmin_vi_count = edcp->edc_min_vi_count;\n+\tmax_vi_count =\n+\t    edcp->edc_max_vi_count + enp->en_arch.ef10.ena_piobuf_count;\n+\n+\t/* Ensure that the previously attached driver's VIs are freed */\n+\tif ((rc = efx_mcdi_free_vis(enp)) != 0)\n+\t\tgoto fail2;\n+\n+\t/*\n+\t * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this\n+\t * fails then retrying the request for fewer VI resources may succeed.\n+\t */\n+\tvi_count = 0;\n+\tif ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count,\n+\t\t    &vi_base, &vi_count, &vi_shift)) != 0)\n+\t\tgoto fail3;\n+\n+\tEFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count);\n+\n+\tif (vi_count < min_vi_count) {\n+\t\trc = ENOMEM;\n+\t\tgoto fail4;\n+\t}\n+\n+\tenp->en_arch.ef10.ena_vi_base = vi_base;\n+\tenp->en_arch.ef10.ena_vi_count = vi_count;\n+\tenp->en_arch.ef10.ena_vi_shift = vi_shift;\n+\n+\tif (vi_count < min_vi_count + enp->en_arch.ef10.ena_piobuf_count) {\n+\t\t/* Not enough extra VIs to map piobufs */\n+\t\tef10_nic_free_piobufs(enp);\n+\t}\n+\n+\tenp->en_arch.ef10.ena_pio_write_vi_base =\n+\t    vi_count - enp->en_arch.ef10.ena_piobuf_count;\n+\n+\t/* Save UC memory mapping details */\n+\tenp->en_arch.ef10.ena_uc_mem_map_offset = 0;\n+\tif (enp->en_arch.ef10.ena_piobuf_count > 0) {\n+\t\tenp->en_arch.ef10.ena_uc_mem_map_size =\n+\t\t    (ER_DZ_TX_PIOBUF_STEP *\n+\t\t    enp->en_arch.ef10.ena_pio_write_vi_base);\n+\t} else {\n+\t\tenp->en_arch.ef10.ena_uc_mem_map_size =\n+\t\t    (ER_DZ_TX_PIOBUF_STEP *\n+\t\t    enp->en_arch.ef10.ena_vi_count);\n+\t}\n+\n+\t/* Save WC memory mapping details */\n+\tenp->en_arch.ef10.ena_wc_mem_map_offset =\n+\t    enp->en_arch.ef10.ena_uc_mem_map_offset +\n+\t    enp->en_arch.ef10.ena_uc_mem_map_size;\n+\n+\tenp->en_arch.ef10.ena_wc_mem_map_size =\n+\t    (ER_DZ_TX_PIOBUF_STEP *\n+\t    enp->en_arch.ef10.ena_piobuf_count);\n+\n+\t/* Link piobufs to extra VIs in WC mapping */\n+\tif (enp->en_arch.ef10.ena_piobuf_count > 0) {\n+\t\tfor (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {\n+\t\t\trc = efx_mcdi_link_piobuf(enp,\n+\t\t\t    enp->en_arch.ef10.ena_pio_write_vi_base + i,\n+\t\t\t    enp->en_arch.ef10.ena_piobuf_handle[i]);\n+\t\t\tif (rc != 0)\n+\t\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\t/*\n+\t * Allocate a vAdaptor attached to our upstream vPort/pPort.\n+\t *\n+\t * On a VF, this may fail with MC_CMD_ERR_NO_EVB_PORT (ENOENT) if the PF\n+\t * driver has yet to bring up the EVB port. See bug 56147. In this case,\n+\t * retry the request several times after waiting a while. The wait time\n+\t * between retries starts small (10ms) and exponentially increases.\n+\t * Total wait time is a little over two seconds. Retry logic in the\n+\t * client driver may mean this whole loop is repeated if it continues to\n+\t * fail.\n+\t */\n+\tretry = 0;\n+\tdelay_us = 10000;\n+\twhile ((rc = efx_mcdi_vadaptor_alloc(enp, EVB_PORT_ID_ASSIGNED)) != 0) {\n+\t\tif (EFX_PCI_FUNCTION_IS_PF(&enp->en_nic_cfg) ||\n+\t\t    (rc != ENOENT)) {\n+\t\t\t/*\n+\t\t\t * Do not retry alloc for PF, or for other errors on\n+\t\t\t * a VF.\n+\t\t\t */\n+\t\t\tgoto fail5;\n+\t\t}\n+\n+\t\t/* VF startup before PF is ready. Retry allocation. */\n+\t\tif (retry > 5) {\n+\t\t\t/* Too many attempts */\n+\t\t\trc = EINVAL;\n+\t\t\tgoto fail6;\n+\t\t}\n+\t\tEFSYS_PROBE1(mcdi_no_evb_port_retry, int, retry);\n+\t\tEFSYS_SLEEP(delay_us);\n+\t\tretry++;\n+\t\tif (delay_us < 500000)\n+\t\t\tdelay_us <<= 2;\n+\t}\n+\n+\tenp->en_vport_id = EVB_PORT_ID_ASSIGNED;\n+\tenp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2;\n+\n+\treturn (0);\n+\n+fail6:\n+\tEFSYS_PROBE(fail6);\n+fail5:\n+\tEFSYS_PROBE(fail5);\n+fail4:\n+\tEFSYS_PROBE(fail4);\n+fail3:\n+\tEFSYS_PROBE(fail3);\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+\n+\tef10_nic_free_piobufs(enp);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_nic_get_vi_pool(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tuint32_t *vi_countp)\n+{\n+\tEFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||\n+\t\t    enp->en_family == EFX_FAMILY_MEDFORD);\n+\n+\t/*\n+\t * Report VIs that the client driver can use.\n+\t * Do not include VIs used for PIO buffer writes.\n+\t */\n+\t*vi_countp = enp->en_arch.ef10.ena_pio_write_vi_base;\n+\n+\treturn (0);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_nic_get_bar_region(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tefx_nic_region_t region,\n+\t__out\t\tuint32_t *offsetp,\n+\t__out\t\tsize_t *sizep)\n+{\n+\tefx_rc_t rc;\n+\n+\tEFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||\n+\t\t    enp->en_family == EFX_FAMILY_MEDFORD);\n+\n+\t/*\n+\t * TODO: Specify host memory mapping alignment and granularity\n+\t * in efx_drv_limits_t so that they can be taken into account\n+\t * when allocating extra VIs for PIO writes.\n+\t */\n+\tswitch (region) {\n+\tcase EFX_REGION_VI:\n+\t\t/* UC mapped memory BAR region for VI registers */\n+\t\t*offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset;\n+\t\t*sizep = enp->en_arch.ef10.ena_uc_mem_map_size;\n+\t\tbreak;\n+\n+\tcase EFX_REGION_PIO_WRITE_VI:\n+\t\t/* WC mapped memory BAR region for piobuf writes */\n+\t\t*offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset;\n+\t\t*sizep = enp->en_arch.ef10.ena_wc_mem_map_size;\n+\t\tbreak;\n+\n+\tdefault:\n+\t\trc = EINVAL;\n+\t\tgoto fail1;\n+\t}\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t\t\tvoid\n+ef10_nic_fini(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\tuint32_t i;\n+\tefx_rc_t rc;\n+\n+\t(void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id);\n+\tenp->en_vport_id = 0;\n+\n+\t/* Unlink piobufs from extra VIs in WC mapping */\n+\tif (enp->en_arch.ef10.ena_piobuf_count > 0) {\n+\t\tfor (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {\n+\t\t\trc = efx_mcdi_unlink_piobuf(enp,\n+\t\t\t    enp->en_arch.ef10.ena_pio_write_vi_base + i);\n+\t\t\tif (rc != 0)\n+\t\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\tef10_nic_free_piobufs(enp);\n+\n+\t(void) efx_mcdi_free_vis(enp);\n+\tenp->en_arch.ef10.ena_vi_count = 0;\n+}\n+\n+\t\t\tvoid\n+ef10_nic_unprobe(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\t(void) efx_mcdi_drv_attach(enp, B_FALSE);\n+}\n+\n+\n+#endif\t/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */\ndiff --git a/drivers/net/sfc/base/ef10_phy.c b/drivers/net/sfc/base/ef10_phy.c\nnew file mode 100644\nindex 0000000..36e2603\n--- /dev/null\n+++ b/drivers/net/sfc/base/ef10_phy.c\n@@ -0,0 +1,393 @@\n+/*\n+ * Copyright (c) 2012-2016 Solarflare Communications Inc.\n+ * All rights reserved.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions are met:\n+ *\n+ * 1. Redistributions of source code must retain the above copyright notice,\n+ *    this list of conditions and the following disclaimer.\n+ * 2. Redistributions in binary form must reproduce the above copyright notice,\n+ *    this list of conditions and the following disclaimer in the documentation\n+ *    and/or other materials provided with the distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ *\n+ * The views and conclusions contained in the software and documentation are\n+ * those of the authors and should not be interpreted as representing official\n+ * policies, either expressed or implied, of the FreeBSD Project.\n+ */\n+\n+#include \"efx.h\"\n+#include \"efx_impl.h\"\n+\n+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD\n+\n+static\t\t\tvoid\n+mcdi_phy_decode_cap(\n+\t__in\t\tuint32_t mcdi_cap,\n+\t__out\t\tuint32_t *maskp)\n+{\n+\tuint32_t mask;\n+\n+\tmask = 0;\n+\tif (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))\n+\t\tmask |= (1 << EFX_PHY_CAP_10HDX);\n+\tif (mcdi_cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))\n+\t\tmask |= (1 << EFX_PHY_CAP_10FDX);\n+\tif (mcdi_cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))\n+\t\tmask |= (1 << EFX_PHY_CAP_100HDX);\n+\tif (mcdi_cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))\n+\t\tmask |= (1 << EFX_PHY_CAP_100FDX);\n+\tif (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))\n+\t\tmask |= (1 << EFX_PHY_CAP_1000HDX);\n+\tif (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))\n+\t\tmask |= (1 << EFX_PHY_CAP_1000FDX);\n+\tif (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))\n+\t\tmask |= (1 << EFX_PHY_CAP_10000FDX);\n+\tif (mcdi_cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))\n+\t\tmask |= (1 << EFX_PHY_CAP_40000FDX);\n+\tif (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))\n+\t\tmask |= (1 << EFX_PHY_CAP_PAUSE);\n+\tif (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))\n+\t\tmask |= (1 << EFX_PHY_CAP_ASYM);\n+\tif (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))\n+\t\tmask |= (1 << EFX_PHY_CAP_AN);\n+\n+\t*maskp = mask;\n+}\n+\n+static\t\t\tvoid\n+mcdi_phy_decode_link_mode(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tuint32_t link_flags,\n+\t__in\t\tunsigned int speed,\n+\t__in\t\tunsigned int fcntl,\n+\t__out\t\tefx_link_mode_t *link_modep,\n+\t__out\t\tunsigned int *fcntlp)\n+{\n+\tboolean_t fd = !!(link_flags &\n+\t\t    (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));\n+\tboolean_t up = !!(link_flags &\n+\t\t    (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));\n+\n+\t_NOTE(ARGUNUSED(enp))\n+\n+\tif (!up)\n+\t\t*link_modep = EFX_LINK_DOWN;\n+\telse if (speed == 40000 && fd)\n+\t\t*link_modep = EFX_LINK_40000FDX;\n+\telse if (speed == 10000 && fd)\n+\t\t*link_modep = EFX_LINK_10000FDX;\n+\telse if (speed == 1000)\n+\t\t*link_modep = fd ? EFX_LINK_1000FDX : EFX_LINK_1000HDX;\n+\telse if (speed == 100)\n+\t\t*link_modep = fd ? EFX_LINK_100FDX : EFX_LINK_100HDX;\n+\telse if (speed == 10)\n+\t\t*link_modep = fd ? EFX_LINK_10FDX : EFX_LINK_10HDX;\n+\telse\n+\t\t*link_modep = EFX_LINK_UNKNOWN;\n+\n+\tif (fcntl == MC_CMD_FCNTL_OFF)\n+\t\t*fcntlp = 0;\n+\telse if (fcntl == MC_CMD_FCNTL_RESPOND)\n+\t\t*fcntlp = EFX_FCNTL_RESPOND;\n+\telse if (fcntl == MC_CMD_FCNTL_GENERATE)\n+\t\t*fcntlp = EFX_FCNTL_GENERATE;\n+\telse if (fcntl == MC_CMD_FCNTL_BIDIR)\n+\t\t*fcntlp = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;\n+\telse {\n+\t\tEFSYS_PROBE1(mc_pcol_error, int, fcntl);\n+\t\t*fcntlp = 0;\n+\t}\n+}\n+\n+\n+\t\t\tvoid\n+ef10_phy_link_ev(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tefx_qword_t *eqp,\n+\t__out\t\tefx_link_mode_t *link_modep)\n+{\n+\tefx_port_t *epp = &(enp->en_port);\n+\tunsigned int link_flags;\n+\tunsigned int speed;\n+\tunsigned int fcntl;\n+\tefx_link_mode_t link_mode;\n+\tuint32_t lp_cap_mask;\n+\n+\t/*\n+\t * Convert the LINKCHANGE speed enumeration into mbit/s, in the\n+\t * same way as GET_LINK encodes the speed\n+\t */\n+\tswitch (MCDI_EV_FIELD(eqp, LINKCHANGE_SPEED)) {\n+\tcase MCDI_EVENT_LINKCHANGE_SPEED_100M:\n+\t\tspeed = 100;\n+\t\tbreak;\n+\tcase MCDI_EVENT_LINKCHANGE_SPEED_1G:\n+\t\tspeed = 1000;\n+\t\tbreak;\n+\tcase MCDI_EVENT_LINKCHANGE_SPEED_10G:\n+\t\tspeed = 10000;\n+\t\tbreak;\n+\tcase MCDI_EVENT_LINKCHANGE_SPEED_40G:\n+\t\tspeed = 40000;\n+\t\tbreak;\n+\tdefault:\n+\t\tspeed = 0;\n+\t\tbreak;\n+\t}\n+\n+\tlink_flags = MCDI_EV_FIELD(eqp, LINKCHANGE_LINK_FLAGS);\n+\tmcdi_phy_decode_link_mode(enp, link_flags, speed,\n+\t\t\t\t    MCDI_EV_FIELD(eqp, LINKCHANGE_FCNTL),\n+\t\t\t\t    &link_mode, &fcntl);\n+\tmcdi_phy_decode_cap(MCDI_EV_FIELD(eqp, LINKCHANGE_LP_CAP),\n+\t\t\t    &lp_cap_mask);\n+\n+\t/*\n+\t * It's safe to update ep_lp_cap_mask without the driver's port lock\n+\t * because presumably any concurrently running efx_port_poll() is\n+\t * only going to arrive at the same value.\n+\t *\n+\t * ep_fcntl has two meanings. It's either the link common fcntl\n+\t * (if the PHY supports AN), or it's the forced link state. If\n+\t * the former, it's safe to update the value for the same reason as\n+\t * for ep_lp_cap_mask. If the latter, then just ignore the value,\n+\t * because we can race with efx_mac_fcntl_set().\n+\t */\n+\tepp->ep_lp_cap_mask = lp_cap_mask;\n+\tepp->ep_fcntl = fcntl;\n+\n+\t*link_modep = link_mode;\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_phy_power(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tboolean_t power)\n+{\n+\tefx_rc_t rc;\n+\n+\tif (!power)\n+\t\treturn (0);\n+\n+\t/* Check if the PHY is a zombie */\n+\tif ((rc = ef10_phy_verify(enp)) != 0)\n+\t\tgoto fail1;\n+\n+\tenp->en_reset_flags |= EFX_RESET_PHY;\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_phy_get_link(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tef10_link_state_t *elsp)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_GET_LINK_IN_LEN,\n+\t\t\t    MC_CMD_GET_LINK_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_GET_LINK;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_GET_LINK_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_GET_LINK_OUT_LEN;\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\tif (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) {\n+\t\trc = EMSGSIZE;\n+\t\tgoto fail2;\n+\t}\n+\n+\tmcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP),\n+\t\t\t    &elsp->els_adv_cap_mask);\n+\tmcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP),\n+\t\t\t    &elsp->els_lp_cap_mask);\n+\n+\tmcdi_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS),\n+\t\t\t    MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED),\n+\t\t\t    MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL),\n+\t\t\t    &elsp->els_link_mode, &elsp->els_fcntl);\n+\n+\telsp->els_mac_up = MCDI_OUT_DWORD(req, GET_LINK_OUT_MAC_FAULT) == 0;\n+\n+\treturn (0);\n+\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_phy_reconfigure(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\tefx_port_t *epp = &(enp->en_port);\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_SET_LINK_IN_LEN,\n+\t\t\t    MC_CMD_SET_LINK_OUT_LEN)];\n+\tuint32_t cap_mask;\n+\tunsigned int led_mode;\n+\tunsigned int speed;\n+\tboolean_t supported;\n+\tefx_rc_t rc;\n+\n+\tif ((rc = efx_mcdi_link_control_supported(enp, &supported)) != 0)\n+\t\tgoto fail1;\n+\tif (supported == B_FALSE)\n+\t\tgoto out;\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_SET_LINK;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_SET_LINK_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_SET_LINK_OUT_LEN;\n+\n+\tcap_mask = epp->ep_adv_cap_mask;\n+\tMCDI_IN_POPULATE_DWORD_10(req, SET_LINK_IN_CAP,\n+\t\tPHY_CAP_10HDX, (cap_mask >> EFX_PHY_CAP_10HDX) & 0x1,\n+\t\tPHY_CAP_10FDX, (cap_mask >> EFX_PHY_CAP_10FDX) & 0x1,\n+\t\tPHY_CAP_100HDX, (cap_mask >> EFX_PHY_CAP_100HDX) & 0x1,\n+\t\tPHY_CAP_100FDX, (cap_mask >> EFX_PHY_CAP_100FDX) & 0x1,\n+\t\tPHY_CAP_1000HDX, (cap_mask >> EFX_PHY_CAP_1000HDX) & 0x1,\n+\t\tPHY_CAP_1000FDX, (cap_mask >> EFX_PHY_CAP_1000FDX) & 0x1,\n+\t\tPHY_CAP_10000FDX, (cap_mask >> EFX_PHY_CAP_10000FDX) & 0x1,\n+\t\tPHY_CAP_PAUSE, (cap_mask >> EFX_PHY_CAP_PAUSE) & 0x1,\n+\t\tPHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1,\n+\t\tPHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1);\n+\t/* Too many fields for for POPULATE macros, so insert this afterwards */\n+\tMCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,\n+\t    PHY_CAP_40000FDX, (cap_mask >> EFX_PHY_CAP_40000FDX) & 0x1);\n+\n+\tMCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, MC_CMD_LOOPBACK_NONE);\n+\tspeed = 0;\n+\tMCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_SPEED, speed);\n+\n+\tMCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, 0);\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail2;\n+\t}\n+\n+\t/* And set the blink mode */\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_SET_ID_LED;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_SET_ID_LED_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_SET_ID_LED_OUT_LEN;\n+\n+\tMCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, MC_CMD_LED_DEFAULT);\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail3;\n+\t}\n+out:\n+\treturn (0);\n+\n+fail3:\n+\tEFSYS_PROBE(fail3);\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_phy_verify(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_GET_PHY_STATE_IN_LEN,\n+\t\t\t    MC_CMD_GET_PHY_STATE_OUT_LEN)];\n+\tuint32_t state;\n+\tefx_rc_t rc;\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_GET_PHY_STATE;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_GET_PHY_STATE_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_GET_PHY_STATE_OUT_LEN;\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\tif (req.emr_out_length_used < MC_CMD_GET_PHY_STATE_OUT_LEN) {\n+\t\trc = EMSGSIZE;\n+\t\tgoto fail2;\n+\t}\n+\n+\tstate = MCDI_OUT_DWORD(req, GET_PHY_STATE_OUT_STATE);\n+\tif (state != MC_CMD_PHY_STATE_OK) {\n+\t\tif (state != MC_CMD_PHY_STATE_ZOMBIE)\n+\t\t\tEFSYS_PROBE1(mc_pcol_error, int, state);\n+\t\trc = ENOTACTIVE;\n+\t\tgoto fail3;\n+\t}\n+\n+\treturn (0);\n+\n+fail3:\n+\tEFSYS_PROBE(fail3);\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_phy_oui_get(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tuint32_t *ouip)\n+{\n+\t_NOTE(ARGUNUSED(enp, ouip))\n+\n+\treturn (ENOTSUP);\n+}\n+\n+#endif\t/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */\ndiff --git a/drivers/net/sfc/base/ef10_rx.c b/drivers/net/sfc/base/ef10_rx.c\nnew file mode 100644\nindex 0000000..170125e\n--- /dev/null\n+++ b/drivers/net/sfc/base/ef10_rx.c\n@@ -0,0 +1,397 @@\n+/*\n+ * Copyright (c) 2012-2016 Solarflare Communications Inc.\n+ * All rights reserved.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions are met:\n+ *\n+ * 1. Redistributions of source code must retain the above copyright notice,\n+ *    this list of conditions and the following disclaimer.\n+ * 2. Redistributions in binary form must reproduce the above copyright notice,\n+ *    this list of conditions and the following disclaimer in the documentation\n+ *    and/or other materials provided with the distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ *\n+ * The views and conclusions contained in the software and documentation are\n+ * those of the authors and should not be interpreted as representing official\n+ * policies, either expressed or implied, of the FreeBSD Project.\n+ */\n+\n+#include \"efx.h\"\n+#include \"efx_impl.h\"\n+\n+\n+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD\n+\n+\n+static\t__checkReturn\tefx_rc_t\n+efx_mcdi_init_rxq(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tuint32_t size,\n+\t__in\t\tuint32_t target_evq,\n+\t__in\t\tuint32_t label,\n+\t__in\t\tuint32_t instance,\n+\t__in\t\tefsys_mem_t *esmp,\n+\t__in\t\tboolean_t disable_scatter,\n+\t__in\t\tuint32_t ps_bufsize)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_INIT_RXQ_EXT_IN_LEN,\n+\t\t\t    MC_CMD_INIT_RXQ_EXT_OUT_LEN)];\n+\tint npages = EFX_RXQ_NBUFS(size);\n+\tint i;\n+\tefx_qword_t *dma_addr;\n+\tuint64_t addr;\n+\tefx_rc_t rc;\n+\tuint32_t dma_mode;\n+\n+\t/* If this changes, then the payload size might need to change. */\n+\tEFSYS_ASSERT3U(MC_CMD_INIT_RXQ_OUT_LEN, ==, 0);\n+\tEFSYS_ASSERT3U(size, <=, EFX_RXQ_MAXNDESCS);\n+\n+\tif (ps_bufsize > 0)\n+\t\tdma_mode = MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM;\n+\telse\n+\t\tdma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET;\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_INIT_RXQ;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_INIT_RXQ_EXT_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_INIT_RXQ_EXT_OUT_LEN;\n+\n+\tMCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, size);\n+\tMCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_TARGET_EVQ, target_evq);\n+\tMCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_LABEL, label);\n+\tMCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_INSTANCE, instance);\n+\tMCDI_IN_POPULATE_DWORD_8(req, INIT_RXQ_EXT_IN_FLAGS,\n+\t    INIT_RXQ_EXT_IN_FLAG_BUFF_MODE, 0,\n+\t    INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT, 0,\n+\t    INIT_RXQ_EXT_IN_FLAG_TIMESTAMP, 0,\n+\t    INIT_RXQ_EXT_IN_CRC_MODE, 0,\n+\t    INIT_RXQ_EXT_IN_FLAG_PREFIX, 1,\n+\t    INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER, disable_scatter,\n+\t    INIT_RXQ_EXT_IN_DMA_MODE,\n+\t    dma_mode,\n+\t    INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE, ps_bufsize);\n+\tMCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_OWNER_ID, 0);\n+\tMCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);\n+\n+\tdma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR);\n+\taddr = EFSYS_MEM_ADDR(esmp);\n+\n+\tfor (i = 0; i < npages; i++) {\n+\t\tEFX_POPULATE_QWORD_2(*dma_addr,\n+\t\t    EFX_DWORD_1, (uint32_t)(addr >> 32),\n+\t\t    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));\n+\n+\t\tdma_addr++;\n+\t\taddr += EFX_BUF_SIZE;\n+\t}\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+static\t__checkReturn\tefx_rc_t\n+efx_mcdi_fini_rxq(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tuint32_t instance)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_FINI_RXQ_IN_LEN,\n+\t\t\t    MC_CMD_FINI_RXQ_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_FINI_RXQ;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_FINI_RXQ_OUT_LEN;\n+\n+\tMCDI_IN_SET_DWORD(req, FINI_RXQ_IN_INSTANCE, instance);\n+\n+\tefx_mcdi_execute_quiet(enp, &req);\n+\n+\tif ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_rx_init(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\n+\treturn (0);\n+}\n+\n+\n+/*\n+ * EF10 RX pseudo-header\n+ * ---------------------\n+ *\n+ * Receive packets are prefixed by an (optional) 14 byte pseudo-header:\n+ *\n+ *  +00: Toeplitz hash value.\n+ *       (32bit little-endian)\n+ *  +04: Outer VLAN tag. Zero if the packet did not have an outer VLAN tag.\n+ *       (16bit big-endian)\n+ *  +06: Inner VLAN tag. Zero if the packet did not have an inner VLAN tag.\n+ *       (16bit big-endian)\n+ *  +08: Packet Length. Zero if the RX datapath was in cut-through mode.\n+ *       (16bit little-endian)\n+ *  +10: MAC timestamp. Zero if timestamping is not enabled.\n+ *       (32bit little-endian)\n+ *\n+ * See \"The RX Pseudo-header\" in SF-109306-TC.\n+ */\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_rx_prefix_pktlen(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tuint8_t *buffer,\n+\t__out\t\tuint16_t *lengthp)\n+{\n+\t_NOTE(ARGUNUSED(enp))\n+\n+\t/*\n+\t * The RX pseudo-header contains the packet length, excluding the\n+\t * pseudo-header. If the hardware receive datapath was operating in\n+\t * cut-through mode then the length in the RX pseudo-header will be\n+\t * zero, and the packet length must be obtained from the DMA length\n+\t * reported in the RX event.\n+\t */\n+\t*lengthp = buffer[8] | (buffer[9] << 8);\n+\treturn (0);\n+}\n+\n+\t\t\tvoid\n+ef10_rx_qpost(\n+\t__in\t\tefx_rxq_t *erp,\n+\t__in_ecount(n)\tefsys_dma_addr_t *addrp,\n+\t__in\t\tsize_t size,\n+\t__in\t\tunsigned int n,\n+\t__in\t\tunsigned int completed,\n+\t__in\t\tunsigned int added)\n+{\n+\tefx_qword_t qword;\n+\tunsigned int i;\n+\tunsigned int offset;\n+\tunsigned int id;\n+\n+\t/* The client driver must not overfill the queue */\n+\tEFSYS_ASSERT3U(added - completed + n, <=,\n+\t    EFX_RXQ_LIMIT(erp->er_mask + 1));\n+\n+\tid = added & (erp->er_mask);\n+\tfor (i = 0; i < n; i++) {\n+\t\tEFSYS_PROBE4(rx_post, unsigned int, erp->er_index,\n+\t\t    unsigned int, id, efsys_dma_addr_t, addrp[i],\n+\t\t    size_t, size);\n+\n+\t\tEFX_POPULATE_QWORD_3(qword,\n+\t\t    ESF_DZ_RX_KER_BYTE_CNT, (uint32_t)(size),\n+\t\t    ESF_DZ_RX_KER_BUF_ADDR_DW0,\n+\t\t    (uint32_t)(addrp[i] & 0xffffffff),\n+\t\t    ESF_DZ_RX_KER_BUF_ADDR_DW1,\n+\t\t    (uint32_t)(addrp[i] >> 32));\n+\n+\t\toffset = id * sizeof (efx_qword_t);\n+\t\tEFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword);\n+\n+\t\tid = (id + 1) & (erp->er_mask);\n+\t}\n+}\n+\n+\t\t\tvoid\n+ef10_rx_qpush(\n+\t__in\tefx_rxq_t *erp,\n+\t__in\tunsigned int added,\n+\t__inout\tunsigned int *pushedp)\n+{\n+\tefx_nic_t *enp = erp->er_enp;\n+\tunsigned int pushed = *pushedp;\n+\tuint32_t wptr;\n+\tefx_dword_t dword;\n+\n+\t/* Hardware has alignment restriction for WPTR */\n+\twptr = P2ALIGN(added, EF10_RX_WPTR_ALIGN);\n+\tif (pushed == wptr)\n+\t\treturn;\n+\n+\t*pushedp = wptr;\n+\n+\t/* Push the populated descriptors out */\n+\twptr &= erp->er_mask;\n+\n+\tEFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR, wptr);\n+\n+\t/* Guarantee ordering of memory (descriptors) and PIO (doorbell) */\n+\tEFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1,\n+\t    wptr, pushed & erp->er_mask);\n+\tEFSYS_PIO_WRITE_BARRIER();\n+\tEFX_BAR_TBL_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,\n+\t\t\t    erp->er_index, &dword, B_FALSE);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_rx_qflush(\n+\t__in\tefx_rxq_t *erp)\n+{\n+\tefx_nic_t *enp = erp->er_enp;\n+\tefx_rc_t rc;\n+\n+\tif ((rc = efx_mcdi_fini_rxq(enp, erp->er_index)) != 0)\n+\t\tgoto fail1;\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t\tvoid\n+ef10_rx_qenable(\n+\t__in\tefx_rxq_t *erp)\n+{\n+\t/* FIXME */\n+\t_NOTE(ARGUNUSED(erp))\n+\t/* FIXME */\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_rx_qcreate(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tunsigned int index,\n+\t__in\t\tunsigned int label,\n+\t__in\t\tefx_rxq_type_t type,\n+\t__in\t\tefsys_mem_t *esmp,\n+\t__in\t\tsize_t n,\n+\t__in\t\tuint32_t id,\n+\t__in\t\tefx_evq_t *eep,\n+\t__in\t\tefx_rxq_t *erp)\n+{\n+\tefx_nic_cfg_t *encp = &(enp->en_nic_cfg);\n+\tefx_rc_t rc;\n+\tboolean_t disable_scatter;\n+\tunsigned int ps_buf_size;\n+\n+\t_NOTE(ARGUNUSED(id, erp))\n+\n+\tEFX_STATIC_ASSERT(EFX_EV_RX_NLABELS == (1 << ESF_DZ_RX_QLABEL_WIDTH));\n+\tEFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS);\n+\tEFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit);\n+\n+\tEFX_STATIC_ASSERT(ISP2(EFX_RXQ_MAXNDESCS));\n+\tEFX_STATIC_ASSERT(ISP2(EFX_RXQ_MINNDESCS));\n+\n+\tif (!ISP2(n) || (n < EFX_RXQ_MINNDESCS) || (n > EFX_RXQ_MAXNDESCS)) {\n+\t\trc = EINVAL;\n+\t\tgoto fail1;\n+\t}\n+\tif (index >= encp->enc_rxq_limit) {\n+\t\trc = EINVAL;\n+\t\tgoto fail2;\n+\t}\n+\n+\tswitch (type) {\n+\tcase EFX_RXQ_TYPE_DEFAULT:\n+\tcase EFX_RXQ_TYPE_SCATTER:\n+\t\tps_buf_size = 0;\n+\t\tbreak;\n+\tdefault:\n+\t\trc = ENOTSUP;\n+\t\tgoto fail3;\n+\t}\n+\n+\tEFSYS_ASSERT(ps_buf_size == 0);\n+\n+\t/* Scatter can only be disabled if the firmware supports doing so */\n+\tif (type == EFX_RXQ_TYPE_SCATTER)\n+\t\tdisable_scatter = B_FALSE;\n+\telse\n+\t\tdisable_scatter = encp->enc_rx_disable_scatter_supported;\n+\n+\tif ((rc = efx_mcdi_init_rxq(enp, n, eep->ee_index, label, index,\n+\t\t    esmp, disable_scatter, ps_buf_size)) != 0)\n+\t\tgoto fail6;\n+\n+\terp->er_eep = eep;\n+\terp->er_label = label;\n+\n+\tef10_ev_rxlabel_init(eep, erp, label, ps_buf_size != 0);\n+\n+\treturn (0);\n+\n+fail6:\n+\tEFSYS_PROBE(fail6);\n+fail3:\n+\tEFSYS_PROBE(fail3);\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t\tvoid\n+ef10_rx_qdestroy(\n+\t__in\tefx_rxq_t *erp)\n+{\n+\tefx_nic_t *enp = erp->er_enp;\n+\tefx_evq_t *eep = erp->er_eep;\n+\tunsigned int label = erp->er_label;\n+\n+\tef10_ev_rxlabel_fini(eep, label);\n+\n+\tEFSYS_ASSERT(enp->en_rx_qcount != 0);\n+\t--enp->en_rx_qcount;\n+\n+\tEFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);\n+}\n+\n+\t\tvoid\n+ef10_rx_fini(\n+\t__in\tefx_nic_t *enp)\n+{\n+\t_NOTE(ARGUNUSED(enp))\n+}\n+\n+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */\ndiff --git a/drivers/net/sfc/base/ef10_tlv_layout.h b/drivers/net/sfc/base/ef10_tlv_layout.h\nnew file mode 100644\nindex 0000000..7d099b8\n--- /dev/null\n+++ b/drivers/net/sfc/base/ef10_tlv_layout.h\n@@ -0,0 +1,941 @@\n+/**************************************************************************\\\n+*//*! \\file\n+** <L5_PRIVATE L5_SOURCE>\n+** \\author  mjs\n+**  \\brief  TLV item layouts for EF10 static and dynamic config in NVRAM\n+**   \\date  2012/11/20\n+**    \\cop  (c) Solarflare Communications Inc.\n+** </L5_PRIVATE>\n+*//*\n+\\**************************************************************************/\n+\n+/* These structures define the layouts for the TLV items stored in static and\n+ * dynamic configuration partitions in NVRAM for EF10 (Huntington etc.).\n+ *\n+ * They contain the same sort of information that was kept in the\n+ * siena_mc_static_config_hdr_t and siena_mc_dynamic_config_hdr_t structures\n+ * (defined in <ci/mgmt/mc_flash_layout.h> and <ci/mgmt/mc_dynamic_cfg.h>) for\n+ * Siena.\n+ *\n+ * These are used directly by the MC and should also be usable directly on host\n+ * systems which are little-endian and do not do strange things with structure\n+ * padding.  (Big-endian host systems will require some byte-swapping.)\n+ *\n+ *                                    -----\n+ *\n+ * Please refer to SF-108797-SW for a general overview of the TLV partition\n+ * format.\n+ *\n+ *                                    -----\n+ *\n+ * The current tag IDs have a general structure: with the exception of the\n+ * special values defined in the document, they are of the form 0xLTTTNNNN,\n+ * where:\n+ *\n+ *   -  L is a location, indicating where this tag is expected to be found:\n+ *        0: static configuration\n+ *        1: dynamic configuration\n+ *        2: firmware internal use\n+ *        3: license partition\n+ *\n+ *   -  TTT is a type, which is just a unique value.  The same type value\n+ *      might appear in both locations, indicating a relationship between\n+ *      the items (e.g. static and dynamic VPD below).\n+ *\n+ *   -  NNNN is an index of some form.  Some item types are per-port, some\n+ *      are per-PF, some are per-partition-type.\n+ *\n+ *                                    -----\n+ *\n+ * As with the previous Siena structures, each structure here is laid out\n+ * carefully: values are aligned to their natural boundary, with explicit\n+ * padding fields added where necessary.  (No, technically this does not\n+ * absolutely guarantee portability.  But, in practice, compilers are generally\n+ * sensible enough not to introduce completely pointless padding, and it works\n+ * well enough.)\n+ */\n+\n+\n+#ifndef CI_MGMT_TLV_LAYOUT_H\n+#define CI_MGMT_TLV_LAYOUT_H\n+\n+\n+/* ----------------------------------------------------------------------------\n+ *  General structure (defined by SF-108797-SW)\n+ * ----------------------------------------------------------------------------\n+ */\n+\n+\n+/* The \"end\" tag.\n+ *\n+ * (Note that this is *not* followed by length or value fields: anything after\n+ * the tag itself is irrelevant.)\n+ */\n+\n+#define TLV_TAG_END                     (0xEEEEEEEE)\n+\n+\n+/* Other special reserved tag values.\n+ */\n+\n+#define TLV_TAG_SKIP                    (0x00000000)\n+#define TLV_TAG_INVALID                 (0xFFFFFFFF)\n+\n+\n+/* TLV partition header.\n+ *\n+ * In a TLV partition, this must be the first item in the sequence, at offset\n+ * 0.\n+ */\n+\n+#define TLV_TAG_PARTITION_HEADER        (0xEF10DA7A)\n+\n+struct tlv_partition_header {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint16_t type_id;\n+/* 0 indicates the default segment (always located at offset 0), while other values\n+ * are for RFID-selectable presets that should immediately follow the default segment.\n+ * The default segment may also have preset > 0, which means that it is a preset\n+ * selected through an RFID command and copied by FW to the location at offset 0. */\n+  uint16_t preset;\n+  uint32_t generation;\n+  uint32_t total_length;\n+};\n+\n+\n+/* TLV partition trailer.\n+ *\n+ * In a TLV partition, this must be the last item in the sequence, immediately\n+ * preceding the TLV_TAG_END word.\n+ */\n+\n+#define TLV_TAG_PARTITION_TRAILER       (0xEF101A57)\n+\n+struct tlv_partition_trailer {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint32_t generation;\n+  uint32_t checksum;\n+};\n+\n+\n+/* Appendable TLV partition header.\n+ *\n+ * In an appendable TLV partition, this must be the first item in the sequence,\n+ * at offset 0.  (Note that, unlike the configuration partitions, there is no\n+ * trailer before the TLV_TAG_END word.)\n+ */\n+\n+#define TLV_TAG_APPENDABLE_PARTITION_HEADER (0xEF10ADA7)\n+\n+struct tlv_appendable_partition_header {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint16_t type_id;\n+  uint16_t reserved;\n+};\n+\n+\n+/* ----------------------------------------------------------------------------\n+ *  Configuration items\n+ * ----------------------------------------------------------------------------\n+ */\n+\n+\n+/* NIC global capabilities.\n+ */\n+\n+#define TLV_TAG_GLOBAL_CAPABILITIES     (0x00010000)\n+\n+struct tlv_global_capabilities {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint32_t flags;\n+};\n+\n+\n+/* Siena-style per-port MAC address allocation.\n+ *\n+ * There are <count> addresses, starting at <base_address> and incrementing\n+ * by adding <stride> to the low-order byte(s).\n+ *\n+ * (See also TLV_TAG_GLOBAL_MAC for an alternative, specifying a global pool\n+ * of contiguous MAC addresses for the firmware to allocate as it sees fit.)\n+ */\n+\n+#define TLV_TAG_PORT_MAC(port)          (0x00020000 + (port))\n+\n+struct tlv_port_mac {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint8_t  base_address[6];\n+  uint16_t reserved;\n+  uint16_t count;\n+  uint16_t stride;\n+};\n+\n+\n+/* Static VPD.\n+ *\n+ * This is the portion of VPD which is set at manufacturing time and not\n+ * expected to change.  It is formatted as a standard PCI VPD block. There are\n+ * global and per-pf TLVs for this, the global TLV is new for Medford and is\n+ * used in preference to the per-pf TLV.\n+ */\n+\n+#define TLV_TAG_PF_STATIC_VPD(pf)       (0x00030000 + (pf))\n+\n+struct tlv_pf_static_vpd {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint8_t  bytes[];\n+};\n+\n+#define TLV_TAG_GLOBAL_STATIC_VPD       (0x001f0000)\n+\n+struct tlv_global_static_vpd {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint8_t  bytes[];\n+};\n+\n+\n+/* Dynamic VPD.\n+ *\n+ * This is the portion of VPD which may be changed (e.g. by firmware updates).\n+ * It is formatted as a standard PCI VPD block. There are global and per-pf TLVs\n+ * for this, the global TLV is new for Medford and is used in preference to the\n+ * per-pf TLV.\n+ */\n+\n+#define TLV_TAG_PF_DYNAMIC_VPD(pf)      (0x10030000 + (pf))\n+\n+struct tlv_pf_dynamic_vpd {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint8_t  bytes[];\n+};\n+\n+#define TLV_TAG_GLOBAL_DYNAMIC_VPD      (0x10200000)\n+\n+struct tlv_global_dynamic_vpd {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint8_t  bytes[];\n+};\n+\n+\n+/* \"DBI\" PCI config space changes.\n+ *\n+ * This is a set of edits made to the default PCI config space values before\n+ * the device is allowed to enumerate. There are global and per-pf TLVs for\n+ * this, the global TLV is new for Medford and is used in preference to the\n+ * per-pf TLV.\n+ */\n+\n+#define TLV_TAG_PF_DBI(pf)              (0x00040000 + (pf))\n+\n+struct tlv_pf_dbi {\n+  uint32_t tag;\n+  uint32_t length;\n+  struct {\n+    uint16_t addr;\n+    uint16_t byte_enables;\n+    uint32_t value;\n+  } items[];\n+};\n+\n+\n+#define TLV_TAG_GLOBAL_DBI              (0x00210000)\n+\n+struct tlv_global_dbi {\n+  uint32_t tag;\n+  uint32_t length;\n+  struct {\n+    uint16_t addr;\n+    uint16_t byte_enables;\n+    uint32_t value;\n+  } items[];\n+};\n+\n+\n+/* Partition subtype codes.\n+ *\n+ * A subtype may optionally be stored for each type of partition present in\n+ * the NVRAM.  For example, this may be used to allow a generic firmware update\n+ * utility to select a specific variant of firmware for a specific variant of\n+ * board.\n+ *\n+ * The description[] field is an optional string which is returned in the\n+ * MC_CMD_NVRAM_METADATA response if present.\n+ */\n+\n+#define TLV_TAG_PARTITION_SUBTYPE(type) (0x00050000 + (type))\n+\n+struct tlv_partition_subtype {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint32_t subtype;\n+  uint8_t  description[];\n+};\n+\n+\n+/* Partition version codes.\n+ *\n+ * A version may optionally be stored for each type of partition present in\n+ * the NVRAM.  This provides a standard way of tracking the currently stored\n+ * version of each of the various component images.\n+ */\n+\n+#define TLV_TAG_PARTITION_VERSION(type) (0x10060000 + (type))\n+\n+struct tlv_partition_version {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint16_t version_w;\n+  uint16_t version_x;\n+  uint16_t version_y;\n+  uint16_t version_z;\n+};\n+\n+/* Global PCIe configuration */\n+\n+#define TLV_TAG_GLOBAL_PCIE_CONFIG (0x10070000)\n+\n+struct tlv_pcie_config {\n+  uint32_t tag;\n+  uint32_t length;\n+  int16_t max_pf_number;                        /**< Largest PF RID (lower PFs may be hidden) */\n+  uint16_t pf_aper;                             /**< BIU aperture for PF BAR2 */\n+  uint16_t vf_aper;                             /**< BIU aperture for VF BAR0 */\n+  uint16_t int_aper;                            /**< BIU aperture for PF BAR4 and VF BAR2 */\n+#define TLV_MAX_PF_DEFAULT (-1)                 /* Use FW default for largest PF RID  */\n+#define TLV_APER_DEFAULT (0xFFFF)               /* Use FW default for a given aperture */\n+};\n+\n+/* Per-PF configuration. Note that not all these fields are necessarily useful\n+ * as the apertures are constrained by the BIU settings (the one case we do\n+ * use is to make BAR2 bigger than the BIU thinks to reserve space), but we can\n+ * tidy things up later */\n+\n+#define TLV_TAG_PF_PCIE_CONFIG(pf)  (0x10080000 + (pf))\n+\n+struct tlv_per_pf_pcie_config {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint8_t vfs_total;\n+  uint8_t port_allocation;\n+  uint16_t vectors_per_pf;\n+  uint16_t vectors_per_vf;\n+  uint8_t pf_bar0_aperture;\n+  uint8_t pf_bar2_aperture;\n+  uint8_t vf_bar0_aperture;\n+  uint8_t vf_base;\n+  uint16_t supp_pagesz;\n+  uint16_t msix_vec_base;\n+};\n+\n+\n+/* Development ONLY. This is a single TLV tag for all the gubbins\n+ * that can be set through the MC command-line other than the PCIe\n+ * settings. This is a temporary measure. */\n+#define TLV_TAG_TMP_GUBBINS (0x10090000)        /* legacy symbol - do not use */\n+#define TLV_TAG_TMP_GUBBINS_HUNT TLV_TAG_TMP_GUBBINS\n+\n+struct tlv_tmp_gubbins {\n+  uint32_t tag;\n+  uint32_t length;\n+  /* Consumed by dpcpu.c */\n+  uint64_t tx0_tags;     /* Bitmap */\n+  uint64_t tx1_tags;     /* Bitmap */\n+  uint64_t dl_tags;      /* Bitmap */\n+  uint32_t flags;\n+#define TLV_DPCPU_TX_STRIPE (1) /* No longer used, has no effect */\n+#define TLV_DPCPU_BIU_TAGS  (2) /* Use BIU tag manager */\n+#define TLV_DPCPU_TX0_TAGS  (4) /* tx0_tags is valid */\n+#define TLV_DPCPU_TX1_TAGS  (8) /* tx1_tags is valid */\n+#define TLV_DPCPU_DL_TAGS  (16) /* dl_tags is valid */\n+  /* Consumed by features.c */\n+  uint32_t dut_features;        /* All 1s -> leave alone */\n+  int8_t with_rmon;             /* 0 -> off, 1 -> on, -1 -> leave alone */\n+  /* Consumed by clocks_hunt.c */\n+  int8_t clk_mode;             /* 0 -> off, 1 -> on, -1 -> leave alone */\n+  /* No longer used, superseded by TLV_TAG_DESCRIPTOR_CACHE_CONFIG. */\n+  int8_t rx_dc_size;           /* -1 -> leave alone */\n+  int8_t tx_dc_size;\n+  int16_t num_q_allocs;\n+};\n+\n+/* Global port configuration\n+ *\n+ * This is now deprecated in favour of a platform-provided default\n+ * and dynamic config override via tlv_global_port_options.\n+ */\n+#define TLV_TAG_GLOBAL_PORT_CONFIG      (0x000a0000)\n+\n+struct tlv_global_port_config {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint32_t ports_per_core;\n+  uint32_t max_port_speed;\n+};\n+\n+\n+/* Firmware options.\n+ *\n+ * This is intended for user-configurable selection of optional firmware\n+ * features and variants.\n+ *\n+ * Initially, this consists only of the satellite CPU firmware variant\n+ * selection, but this tag could be extended in the future (using the\n+ * tag length to determine whether additional fields are present).\n+ */\n+\n+#define TLV_TAG_FIRMWARE_OPTIONS        (0x100b0000)\n+\n+struct tlv_firmware_options {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint32_t firmware_variant;\n+#define TLV_FIRMWARE_VARIANT_DRIVER_SELECTED (0xffffffff)\n+\n+/* These are the values for overriding the driver's choice; the definitions\n+ * are taken from MCDI so that they don't get out of step.  Include\n+ * <ci/mgmt/mc_driver_pcol.h> or the equivalent from your driver's tree if\n+ * you need to use these constants.\n+ */\n+#define TLV_FIRMWARE_VARIANT_FULL_FEATURED   MC_CMD_FW_FULL_FEATURED\n+#define TLV_FIRMWARE_VARIANT_LOW_LATENCY     MC_CMD_FW_LOW_LATENCY\n+#define TLV_FIRMWARE_VARIANT_PACKED_STREAM   MC_CMD_FW_PACKED_STREAM\n+#define TLV_FIRMWARE_VARIANT_HIGH_TX_RATE    MC_CMD_FW_HIGH_TX_RATE\n+#define TLV_FIRMWARE_VARIANT_PACKED_STREAM_HASH_MODE_1 \\\n+                                             MC_CMD_FW_PACKED_STREAM_HASH_MODE_1\n+#define TLV_FIRMWARE_VARIANT_RULES_ENGINE    MC_CMD_FW_RULES_ENGINE\n+};\n+\n+/* Voltage settings\n+ *\n+ * Intended for boards with A0 silicon where the core voltage may\n+ * need tweaking. Most likely set once when the pass voltage is\n+ * determined. */\n+\n+#define TLV_TAG_0V9_SETTINGS (0x000c0000)\n+\n+struct tlv_0v9_settings {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint16_t flags; /* Boards with high 0v9 settings may need active cooling */\n+#define TLV_TAG_0V9_REQUIRES_FAN (1)\n+  uint16_t target_voltage; /* In millivolts */\n+  /* Since the limits are meant to be centred to the target (and must at least\n+   * contain it) they need setting as well. */\n+  uint16_t warn_low;       /* In millivolts */\n+  uint16_t warn_high;      /* In millivolts */\n+  uint16_t panic_low;      /* In millivolts */\n+  uint16_t panic_high;     /* In millivolts */\n+};\n+\n+\n+/* Clock configuration */\n+\n+#define TLV_TAG_CLOCK_CONFIG       (0x000d0000) /* legacy symbol - do not use */\n+#define TLV_TAG_CLOCK_CONFIG_HUNT  TLV_TAG_CLOCK_CONFIG\n+\n+struct tlv_clock_config {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint16_t clk_sys;        /* MHz */\n+  uint16_t clk_dpcpu;      /* MHz */\n+  uint16_t clk_icore;      /* MHz */\n+  uint16_t clk_pcs;        /* MHz */\n+};\n+\n+#define TLV_TAG_CLOCK_CONFIG_MEDFORD      (0x00100000)\n+\n+struct tlv_clock_config_medford {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint16_t clk_sys;        /* MHz */\n+  uint16_t clk_mc;         /* MHz */\n+  uint16_t clk_rmon;       /* MHz */\n+  uint16_t clk_vswitch;    /* MHz */\n+  uint16_t clk_dpcpu;      /* MHz */\n+  uint16_t clk_pcs;        /* MHz */\n+};\n+\n+\n+/* EF10-style global pool of MAC addresses.\n+ *\n+ * There are <count> addresses, starting at <base_address>, which are\n+ * contiguous.  Firmware is responsible for allocating addresses from this\n+ * pool to ports / PFs as appropriate.\n+ */\n+\n+#define TLV_TAG_GLOBAL_MAC              (0x000e0000)\n+\n+struct tlv_global_mac {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint8_t  base_address[6];\n+  uint16_t reserved1;\n+  uint16_t count;\n+  uint16_t reserved2;\n+};\n+\n+#define TLV_TAG_ATB_0V9_TARGET     (0x000f0000) /* legacy symbol - do not use */\n+#define TLV_TAG_ATB_0V9_TARGET_HUNT     TLV_TAG_ATB_0V9_TARGET\n+\n+/* The target value for the 0v9 power rail measured on-chip at the\n+ * analogue test bus */\n+struct tlv_0v9_atb_target {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint16_t millivolts;\n+  uint16_t reserved;\n+};\n+\n+/* Factory settings for amplitude calibration of the PCIE TX serdes */\n+#define TLV_TAG_TX_PCIE_AMP_CONFIG  (0x00220000)\n+struct tlv_pcie_tx_amp_config {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint8_t quad_tx_imp2k[4];\n+  uint8_t quad_tx_imp50[4];\n+  uint8_t lane_amp[16];\n+};\n+\n+\n+/* Global PCIe configuration, second revision. This represents the visible PFs\n+ * by a bitmap rather than having the number of the highest visible one. As such\n+ * it can (for a 16-PF chip) represent a superset of what TLV_TAG_GLOBAL_PCIE_CONFIG\n+ * can and it should be used in place of that tag in future (but compatibility with\n+ * the old tag will be left in the firmware indefinitely).  */\n+\n+#define TLV_TAG_GLOBAL_PCIE_CONFIG_R2 (0x10100000)\n+\n+struct tlv_pcie_config_r2 {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint16_t visible_pfs;                         /**< Bitmap of visible PFs */\n+  uint16_t pf_aper;                             /**< BIU aperture for PF BAR2 */\n+  uint16_t vf_aper;                             /**< BIU aperture for VF BAR0 */\n+  uint16_t int_aper;                            /**< BIU aperture for PF BAR4 and VF BAR2 */\n+};\n+\n+/* Dynamic port mode.\n+ *\n+ * Allows selecting alternate port configuration for platforms that support it\n+ * (e.g. 1x40G vs 2x10G on Milano, 1x40G vs 4x10G on Medford). This affects the\n+ * number of externally visible ports (and, hence, PF to port mapping), so must\n+ * be done at boot time.\n+ *\n+ * This tag supercedes tlv_global_port_config.\n+ */\n+\n+#define TLV_TAG_GLOBAL_PORT_MODE         (0x10110000)\n+\n+struct tlv_global_port_mode {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint32_t port_mode;\n+#define TLV_PORT_MODE_DEFAULT           (0xffffffff) /* Default for given platform */\n+#define TLV_PORT_MODE_10G                        (0) /* 10G, single SFP/10G-KR */\n+#define TLV_PORT_MODE_40G                        (1) /* 40G, single QSFP/40G-KR */\n+#define TLV_PORT_MODE_10G_10G                    (2) /* 2x10G, dual SFP/10G-KR or single QSFP */\n+#define TLV_PORT_MODE_40G_40G                    (3) /* 40G + 40G, dual QSFP/40G-KR (Greenport, Medford) */\n+#define TLV_PORT_MODE_10G_10G_10G_10G            (4) /* 2x10G + 2x10G, quad SFP/10G-KR or dual QSFP (Greenport) */\n+#define TLV_PORT_MODE_10G_10G_10G_10G_Q1         (4) /* 4x10G, single QSFP, cage 0 (Medford) */\n+#define TLV_PORT_MODE_10G_10G_10G_10G_Q          (5) /* 4x10G, single QSFP, cage 0 (Medford) OBSOLETE DO NOT USE */\n+#define TLV_PORT_MODE_40G_10G_10G                (6) /* 1x40G + 2x10G, dual QSFP (Greenport, Medford) */\n+#define TLV_PORT_MODE_10G_10G_40G                (7) /* 2x10G + 1x40G, dual QSFP (Greenport, Medford) */\n+#define TLV_PORT_MODE_10G_10G_10G_10G_Q2         (8) /* 4x10G, single QSFP, cage 1 (Medford) */\n+#define TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2      (9) /* 2x10G + 2x10G, dual QSFP (Medford) */\n+#define TLV_PORT_MODE_MAX TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2\n+};\n+\n+/* Type of the v-switch created implicitly by the firmware */\n+\n+#define TLV_TAG_VSWITCH_TYPE(port)       (0x10120000 + (port))\n+\n+struct tlv_vswitch_type {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint32_t vswitch_type;\n+#define TLV_VSWITCH_TYPE_DEFAULT        (0xffffffff) /* Firmware default; equivalent to no TLV present for a given port */\n+#define TLV_VSWITCH_TYPE_NONE                    (0)\n+#define TLV_VSWITCH_TYPE_VLAN                    (1)\n+#define TLV_VSWITCH_TYPE_VEB                     (2)\n+#define TLV_VSWITCH_TYPE_VEPA                    (3)\n+#define TLV_VSWITCH_TYPE_MUX                     (4)\n+#define TLV_VSWITCH_TYPE_TEST                    (5)\n+};\n+\n+/* A VLAN tag for the v-port created implicitly by the firmware */\n+\n+#define TLV_TAG_VPORT_VLAN_TAG(pf)               (0x10130000 + (pf))\n+\n+struct tlv_vport_vlan_tag {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint32_t vlan_tag;\n+#define TLV_VPORT_NO_VLAN_TAG                    (0xFFFFFFFF) /* Default in the absence of TLV for a given PF */\n+};\n+\n+/* Offset to be applied to the 0v9 setting, wherever it came from */\n+\n+#define TLV_TAG_ATB_0V9_OFFSET           (0x10140000)\n+\n+struct tlv_0v9_atb_offset {\n+  uint32_t tag;\n+  uint32_t length;\n+  int16_t  offset_millivolts;\n+  uint16_t reserved;\n+};\n+\n+/* A privilege mask given on reset to all non-admin PCIe functions (that is other than first-PF-per-port).\n+ * The meaning of particular bits is defined in mcdi_ef10.yml under MC_CMD_PRIVILEGE_MASK, see also bug 44583.\n+ * TLV_TAG_PRIVILEGE_MASK_ADD specifies bits that should be added (ORed) to firmware default while\n+ * TLV_TAG_PRIVILEGE_MASK_REM specifies bits that should be removed (ANDed) from firmware default:\n+ * Initial_privilege_mask = (firmware_default_mask | privilege_mask_add) & ~privilege_mask_rem */\n+\n+#define TLV_TAG_PRIVILEGE_MASK          (0x10150000) /* legacy symbol - do not use */\n+\n+struct tlv_privilege_mask {                          /* legacy structure - do not use */\n+  uint32_t tag;\n+  uint32_t length;\n+  uint32_t privilege_mask;\n+};\n+\n+#define TLV_TAG_PRIVILEGE_MASK_ADD      (0x10150000)\n+\n+struct tlv_privilege_mask_add {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint32_t privilege_mask_add;\n+};\n+\n+#define TLV_TAG_PRIVILEGE_MASK_REM      (0x10160000)\n+\n+struct tlv_privilege_mask_rem {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint32_t privilege_mask_rem;\n+};\n+\n+/* Additional privileges given to all PFs.\n+ * This tag takes precedence over TLV_TAG_PRIVILEGE_MASK_REM. */\n+\n+#define TLV_TAG_PRIVILEGE_MASK_ADD_ALL_PFS         (0x10190000)\n+\n+struct tlv_privilege_mask_add_all_pfs {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint32_t privilege_mask_add;\n+};\n+\n+/* Additional privileges given to a selected PF.\n+ * This tag takes precedence over TLV_TAG_PRIVILEGE_MASK_REM. */\n+\n+#define TLV_TAG_PRIVILEGE_MASK_ADD_SINGLE_PF(pf)   (0x101A0000 + (pf))\n+\n+struct tlv_privilege_mask_add_single_pf {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint32_t privilege_mask_add;\n+};\n+\n+/* Turning on/off the PFIOV mode.\n+ * This tag only takes effect if TLV_TAG_VSWITCH_TYPE is missing or set to DEFAULT. */\n+\n+#define TLV_TAG_PFIOV(port)             (0x10170000 + (port))\n+\n+struct tlv_pfiov {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint32_t pfiov;\n+#define TLV_PFIOV_OFF                    (0) /* Default */\n+#define TLV_PFIOV_ON                     (1)\n+};\n+\n+/* Multicast filter chaining mode selection.\n+ *\n+ * When enabled, multicast packets are delivered to all recipients of all\n+ * matching multicast filters, with the exception that IP multicast filters\n+ * will steal traffic from MAC multicast filters on a per-function basis.\n+ * (New behaviour.)\n+ *\n+ * When disabled, multicast packets will always be delivered only to the\n+ * recipients of the highest priority matching multicast filter.\n+ * (Legacy behaviour.)\n+ *\n+ * The DEFAULT mode (which is the same as the tag not being present at all)\n+ * is equivalent to ENABLED in production builds, and DISABLED in eftest\n+ * builds.\n+ *\n+ * This option is intended to provide run-time control over this feature\n+ * while it is being stabilised and may be withdrawn at some point in the\n+ * future; the new behaviour is intended to become the standard behaviour.\n+ */\n+\n+#define TLV_TAG_MCAST_FILTER_CHAINING   (0x10180000)\n+\n+struct tlv_mcast_filter_chaining {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint32_t mode;\n+#define TLV_MCAST_FILTER_CHAINING_DEFAULT  (0xffffffff)\n+#define TLV_MCAST_FILTER_CHAINING_DISABLED (0)\n+#define TLV_MCAST_FILTER_CHAINING_ENABLED  (1)\n+};\n+\n+/* Pacer rate limit per PF */\n+#define TLV_TAG_RATE_LIMIT(pf)    (0x101b0000 + (pf))\n+\n+struct tlv_rate_limit {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint32_t rate_mbps;\n+};\n+\n+/* OCSD Enable/Disable\n+ *\n+ * This setting allows OCSD to be disabled. This is a requirement for HP\n+ * servers to support PCI passthrough for virtualization.\n+ *\n+ * The DEFAULT mode (which is the same as the tag not being present) is\n+ * equivalent to ENABLED.\n+ *\n+ * This option is not used by the MCFW, and is entirely handled by the various\n+ * drivers that support OCSD, by reading the setting before they attempt\n+ * to enable OCSD.\n+ *\n+ * bit0: OCSD Disabled/Enabled\n+ */\n+\n+#define TLV_TAG_OCSD (0x101C0000)\n+\n+struct tlv_ocsd {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint32_t mode;\n+#define TLV_OCSD_DISABLED 0\n+#define TLV_OCSD_ENABLED 1 /* Default */\n+};\n+\n+/* Descriptor cache config.\n+ *\n+ * Sets the sizes of the TX and RX descriptor caches as a power of 2. It also\n+ * sets the total number of VIs. When the number of VIs is reduced VIs are taken\n+ * away from the highest numbered port first, so a vi_count of 1024 means 1024\n+ * VIs on the first port and 0 on the second (on a Torino).\n+ */\n+\n+#define TLV_TAG_DESCRIPTOR_CACHE_CONFIG    (0x101d0000)\n+\n+struct tlv_descriptor_cache_config {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint8_t rx_desc_cache_size;\n+  uint8_t tx_desc_cache_size;\n+  uint16_t vi_count;\n+};\n+#define TLV_DESC_CACHE_DEFAULT (0xff)\n+#define TLV_VI_COUNT_DEFAULT   (0xffff)\n+\n+/* RX event merging config (read batching).\n+ *\n+ * Sets the global maximum number of events for the merging bins, and the\n+ * global timeout configuration for the bins.\n+ */\n+\n+#define TLV_TAG_RX_EVENT_MERGING_CONFIG    (0x101e0000)\n+\n+struct tlv_rx_event_merging_config {\n+  uint32_t  tag;\n+  uint32_t  length;\n+  uint32_t  max_events;\n+#define TLV_RX_EVENT_MERGING_CONFIG_MAX_EVENTS_MAX ((1 << 4) - 1)\n+  uint32_t  timeout_ns;\n+};\n+#define TLV_RX_EVENT_MERGING_MAX_EVENTS_DEFAULT (0xffffffff)\n+#define TLV_RX_EVENT_MERGING_TIMEOUT_NS_DEFAULT (0xffffffff)\n+\n+#define TLV_TAG_PCIE_LINK_SETTINGS (0x101f0000)\n+struct tlv_pcie_link_settings {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint16_t gen;   /* Target PCIe generation: 1, 2, 3 */\n+  uint16_t width; /* Number of lanes */\n+};\n+\n+/* TX event merging config.\n+ *\n+ * Sets the global maximum number of events for the merging bins, and the\n+ * global timeout configuration for the bins, and the global timeout for\n+ * empty queues.\n+ */\n+#define TLV_TAG_TX_EVENT_MERGING_CONFIG    (0x10210000)\n+struct tlv_tx_event_merging_config {\n+  uint32_t  tag;\n+  uint32_t  length;\n+  uint32_t  max_events;\n+#define TLV_TX_EVENT_MERGING_CONFIG_MAX_EVENTS_MAX ((1 << 4) - 1)\n+  uint32_t  timeout_ns;\n+  uint32_t  qempty_timeout_ns; /* Medford only */\n+};\n+#define TLV_TX_EVENT_MERGING_MAX_EVENTS_DEFAULT (0xffffffff)\n+#define TLV_TX_EVENT_MERGING_TIMEOUT_NS_DEFAULT (0xffffffff)\n+#define TLV_TX_EVENT_MERGING_QEMPTY_TIMEOUT_NS_DEFAULT (0xffffffff)\n+\n+#define TLV_TAG_LICENSE (0x30800000)\n+\n+typedef struct tlv_license {\n+  uint32_t  tag;\n+  uint32_t  length;\n+  uint8_t   data[];\n+} tlv_license_t;\n+\n+/* TSA NIC IP address configuration\n+ *\n+ * Sets the TSA NIC IP address statically via configuration tool or dynamically\n+ * via DHCP via snooping based on the mode selection (0=Static, 1=DHCP, 2=Snoop)\n+ *\n+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will\n+ * be moved to a private partition during TSA development. It is not used in any\n+ * released code yet.\n+ */\n+\n+#define TLV_TAG_TMP_TSAN_CONFIG         (0x10220000)\n+\n+#define TLV_TSAN_IP_MODE_STATIC         (0)\n+#define TLV_TSAN_IP_MODE_DHCP           (1)\n+#define TLV_TSAN_IP_MODE_SNOOP          (2)\n+typedef struct tlv_tsan_config {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint32_t mode;\n+  uint32_t ip;\n+  uint32_t netmask;\n+  uint32_t gateway;\n+  uint32_t port;\n+  uint32_t bind_retry;  /* DEPRECATED */\n+  uint32_t bind_bkout;  /* DEPRECATED */\n+} tlv_tsan_config_t;\n+\n+/* TSA Controller IP address configuration\n+ *\n+ * Sets the TSA Controller IP address statically via configuration tool\n+ *\n+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will\n+ * be moved to a private partition during TSA development. It is not used in any\n+ * released code yet.\n+ */\n+\n+#define TLV_TAG_TMP_TSAC_CONFIG         (0x10230000)\n+\n+#define TLV_MAX_TSACS (4)\n+typedef struct tlv_tsac_config {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint32_t num_tsacs;\n+  uint32_t ip[TLV_MAX_TSACS];\n+  uint32_t port[TLV_MAX_TSACS];\n+} tlv_tsac_config_t;\n+\n+/* Binding ticket\n+ *\n+ * Sets the TSA NIC binding ticket used for binding process between the TSA NIC\n+ * and the TSA Controller\n+ *\n+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will\n+ * be moved to a private partition during TSA development. It is not used in any\n+ * released code yet.\n+ */\n+\n+#define TLV_TAG_TMP_BINDING_TICKET      (0x10240000)\n+\n+typedef struct tlv_binding_ticket {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint8_t  bytes[];\n+} tlv_binding_ticket_t;\n+\n+/* Solarflare private key  (DEPRECATED)\n+ *\n+ * Sets the Solareflare private key used for signing during the binding process\n+ *\n+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will\n+ * be moved to a private partition during TSA development. It is not used in any\n+ * released code yet.\n+ */\n+\n+#define TLV_TAG_TMP_PIK_SF              (0x10250000)    /* DEPRECATED */\n+\n+typedef struct tlv_pik_sf {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint8_t  bytes[];\n+} tlv_pik_sf_t;\n+\n+/* CA root certificate\n+ *\n+ * Sets the CA root certificate used for TSA Controller verfication during\n+ * TLS connection setup between the TSA NIC and the TSA Controller\n+ *\n+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will\n+ * be moved to a private partition during TSA development. It is not used in any\n+ * released code yet.\n+ */\n+\n+#define TLV_TAG_TMP_CA_ROOT_CERT        (0x10260000)\n+\n+typedef struct tlv_ca_root_cert {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint8_t  bytes[];\n+} tlv_ca_root_cert_t;\n+\n+/* Tx vFIFO Low latency configuration\n+ *\n+ * To keep the desired booting behaviour for the switch, it just requires to\n+ * know if the low latency mode is enabled.\n+ */\n+\n+#define TLV_TAG_TX_VFIFO_ULL_MODE       (0x10270000)\n+struct tlv_tx_vfifo_ull_mode {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint8_t  mode;\n+#define TLV_TX_VFIFO_ULL_MODE_DEFAULT    0\n+};\n+\n+/* BIU mode\n+ *\n+ * Medford2 tag for selecting VI window decode (see values below)\n+ */\n+#define TLV_TAG_BIU_VI_WINDOW_MODE       (0x10280000)\n+struct tlv_biu_vi_window_mode {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint8_t  mode;\n+#define TLV_BIU_VI_WINDOW_MODE_8K    0  /*  8k per VI, CTPIO not mapped, medford/hunt compatible */\n+#define TLV_BIU_VI_WINDOW_MODE_16K   1  /* 16k per VI, CTPIO mapped */\n+#define TLV_BIU_VI_WINDOW_MODE_64K   2  /* 64k per VI, CTPIO mapped, POWER-friendly */\n+};\n+\n+/* FastPD mode\n+ *\n+ * Medford2 tag for configuring the FastPD mode (see values below)\n+ */\n+#define TLV_TAG_FASTPD_MODE(port)       (0x10290000 + (port))\n+struct tlv_fastpd_mode {\n+  uint32_t tag;\n+  uint32_t length;\n+  uint8_t  mode;\n+#define TLV_FASTPD_MODE_SOFT_ALL       0  /* All packets to the SoftPD */\n+#define TLV_FASTPD_MODE_FAST_ALL       1  /* All packets to the FastPD */\n+#define TLV_FASTPD_MODE_FAST_SUPPORTED 2  /* Supported packet types to the FastPD; everything else to the SoftPD  */\n+};\n+\n+#endif /* CI_MGMT_TLV_LAYOUT_H */\ndiff --git a/drivers/net/sfc/base/ef10_tx.c b/drivers/net/sfc/base/ef10_tx.c\nnew file mode 100644\nindex 0000000..59343a3\n--- /dev/null\n+++ b/drivers/net/sfc/base/ef10_tx.c\n@@ -0,0 +1,683 @@\n+/*\n+ * Copyright (c) 2012-2016 Solarflare Communications Inc.\n+ * All rights reserved.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions are met:\n+ *\n+ * 1. Redistributions of source code must retain the above copyright notice,\n+ *    this list of conditions and the following disclaimer.\n+ * 2. Redistributions in binary form must reproduce the above copyright notice,\n+ *    this list of conditions and the following disclaimer in the documentation\n+ *    and/or other materials provided with the distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ *\n+ * The views and conclusions contained in the software and documentation are\n+ * those of the authors and should not be interpreted as representing official\n+ * policies, either expressed or implied, of the FreeBSD Project.\n+ */\n+\n+#include \"efx.h\"\n+#include \"efx_impl.h\"\n+\n+\n+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD\n+\n+#define\tEFX_TX_QSTAT_INCR(_etp, _stat)\n+\n+static\t__checkReturn\tefx_rc_t\n+efx_mcdi_init_txq(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tuint32_t size,\n+\t__in\t\tuint32_t target_evq,\n+\t__in\t\tuint32_t label,\n+\t__in\t\tuint32_t instance,\n+\t__in\t\tuint16_t flags,\n+\t__in\t\tefsys_mem_t *esmp)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS),\n+\t\t\t    MC_CMD_INIT_TXQ_OUT_LEN)];\n+\tefx_qword_t *dma_addr;\n+\tuint64_t addr;\n+\tint npages;\n+\tint i;\n+\tefx_rc_t rc;\n+\n+\tEFSYS_ASSERT(EFX_TXQ_MAX_BUFS >=\n+\t    EFX_TXQ_NBUFS(enp->en_nic_cfg.enc_txq_max_ndescs));\n+\n+\tnpages = EFX_TXQ_NBUFS(size);\n+\tif (npages > MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM) {\n+\t\trc = EINVAL;\n+\t\tgoto fail1;\n+\t}\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_INIT_TXQ;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages);\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN;\n+\n+\tMCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, size);\n+\tMCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq);\n+\tMCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label);\n+\tMCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance);\n+\n+\tMCDI_IN_POPULATE_DWORD_7(req, INIT_TXQ_IN_FLAGS,\n+\t    INIT_TXQ_IN_FLAG_BUFF_MODE, 0,\n+\t    INIT_TXQ_IN_FLAG_IP_CSUM_DIS,\n+\t    (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1,\n+\t    INIT_TXQ_IN_FLAG_TCP_CSUM_DIS,\n+\t    (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1,\n+\t    INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0,\n+\t    INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0,\n+\t    INIT_TXQ_IN_CRC_MODE, 0,\n+\t    INIT_TXQ_IN_FLAG_TIMESTAMP, 0);\n+\n+\tMCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0);\n+\tMCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);\n+\n+\tdma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR);\n+\taddr = EFSYS_MEM_ADDR(esmp);\n+\n+\tfor (i = 0; i < npages; i++) {\n+\t\tEFX_POPULATE_QWORD_2(*dma_addr,\n+\t\t    EFX_DWORD_1, (uint32_t)(addr >> 32),\n+\t\t    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));\n+\n+\t\tdma_addr++;\n+\t\taddr += EFX_BUF_SIZE;\n+\t}\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail2;\n+\t}\n+\n+\treturn (0);\n+\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+static\t__checkReturn\tefx_rc_t\n+efx_mcdi_fini_txq(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tuint32_t instance)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_FINI_TXQ_IN_LEN,\n+\t\t\t    MC_CMD_FINI_TXQ_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_FINI_TXQ;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN;\n+\n+\tMCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance);\n+\n+\tefx_mcdi_execute_quiet(enp, &req);\n+\n+\tif ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_tx_init(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\t_NOTE(ARGUNUSED(enp))\n+\treturn (0);\n+}\n+\n+\t\t\tvoid\n+ef10_tx_fini(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\t_NOTE(ARGUNUSED(enp))\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_tx_qcreate(\n+\t__in\t\tefx_nic_t *enp,\n+\t__in\t\tunsigned int index,\n+\t__in\t\tunsigned int label,\n+\t__in\t\tefsys_mem_t *esmp,\n+\t__in\t\tsize_t n,\n+\t__in\t\tuint32_t id,\n+\t__in\t\tuint16_t flags,\n+\t__in\t\tefx_evq_t *eep,\n+\t__in\t\tefx_txq_t *etp,\n+\t__out\t\tunsigned int *addedp)\n+{\n+\tefx_qword_t desc;\n+\tefx_rc_t rc;\n+\n+\t_NOTE(ARGUNUSED(id))\n+\n+\tif ((rc = efx_mcdi_init_txq(enp, n, eep->ee_index, label, index, flags,\n+\t    esmp)) != 0)\n+\t\tgoto fail1;\n+\n+\t/*\n+\t * A previous user of this TX queue may have written a descriptor to the\n+\t * TX push collector, but not pushed the doorbell (e.g. after a crash).\n+\t * The next doorbell write would then push the stale descriptor.\n+\t *\n+\t * Ensure the (per network port) TX push collector is cleared by writing\n+\t * a no-op TX option descriptor. See bug29981 for details.\n+\t */\n+\t*addedp = 1;\n+\tEFX_POPULATE_QWORD_4(desc,\n+\t    ESF_DZ_TX_DESC_IS_OPT, 1,\n+\t    ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM,\n+\t    ESF_DZ_TX_OPTION_UDP_TCP_CSUM,\n+\t    (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0,\n+\t    ESF_DZ_TX_OPTION_IP_CSUM,\n+\t    (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0);\n+\n+\tEFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc);\n+\tef10_tx_qpush(etp, *addedp, 0);\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t\tvoid\n+ef10_tx_qdestroy(\n+\t__in\tefx_txq_t *etp)\n+{\n+\t/* FIXME */\n+\t_NOTE(ARGUNUSED(etp))\n+\t/* FIXME */\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_tx_qpio_enable(\n+\t__in\t\tefx_txq_t *etp)\n+{\n+\tefx_nic_t *enp = etp->et_enp;\n+\tefx_piobuf_handle_t handle;\n+\tefx_rc_t rc;\n+\n+\tif (etp->et_pio_size != 0) {\n+\t\trc = EALREADY;\n+\t\tgoto fail1;\n+\t}\n+\n+\t/* Sub-allocate a PIO block from a piobuf */\n+\tif ((rc = ef10_nic_pio_alloc(enp,\n+\t\t    &etp->et_pio_bufnum,\n+\t\t    &handle,\n+\t\t    &etp->et_pio_blknum,\n+\t\t    &etp->et_pio_offset,\n+\t\t    &etp->et_pio_size)) != 0) {\n+\t\tgoto fail2;\n+\t}\n+\tEFSYS_ASSERT3U(etp->et_pio_size, !=, 0);\n+\n+\t/* Link the piobuf to this TXQ */\n+\tif ((rc = ef10_nic_pio_link(enp, etp->et_index, handle)) != 0) {\n+\t\tgoto fail3;\n+\t}\n+\n+\t/*\n+\t * et_pio_offset is the offset of the sub-allocated block within the\n+\t * hardware PIO buffer. It is used as the buffer address in the PIO\n+\t * option descriptor.\n+\t *\n+\t * et_pio_write_offset is the offset of the sub-allocated block from the\n+\t * start of the write-combined memory mapping, and is used for writing\n+\t * data into the PIO buffer.\n+\t */\n+\tetp->et_pio_write_offset =\n+\t    (etp->et_pio_bufnum * ER_DZ_TX_PIOBUF_STEP) +\n+\t    ER_DZ_TX_PIOBUF_OFST + etp->et_pio_offset;\n+\n+\treturn (0);\n+\n+fail3:\n+\tEFSYS_PROBE(fail3);\n+\tef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);\n+\tetp->et_pio_size = 0;\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t\t\tvoid\n+ef10_tx_qpio_disable(\n+\t__in\t\tefx_txq_t *etp)\n+{\n+\tefx_nic_t *enp = etp->et_enp;\n+\n+\tif (etp->et_pio_size != 0) {\n+\t\t/* Unlink the piobuf from this TXQ */\n+\t\tef10_nic_pio_unlink(enp, etp->et_index);\n+\n+\t\t/* Free the sub-allocated PIO block */\n+\t\tef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);\n+\t\tetp->et_pio_size = 0;\n+\t\tetp->et_pio_write_offset = 0;\n+\t}\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_tx_qpio_write(\n+\t__in\t\t\tefx_txq_t *etp,\n+\t__in_ecount(length)\tuint8_t *buffer,\n+\t__in\t\t\tsize_t length,\n+\t__in\t\t\tsize_t offset)\n+{\n+\tefx_nic_t *enp = etp->et_enp;\n+\tefsys_bar_t *esbp = enp->en_esbp;\n+\tuint32_t write_offset;\n+\tuint32_t write_offset_limit;\n+\tefx_qword_t *eqp;\n+\tefx_rc_t rc;\n+\n+\tEFSYS_ASSERT(length % sizeof (efx_qword_t) == 0);\n+\n+\tif (etp->et_pio_size == 0) {\n+\t\trc = ENOENT;\n+\t\tgoto fail1;\n+\t}\n+\tif (offset + length > etp->et_pio_size)\t{\n+\t\trc = ENOSPC;\n+\t\tgoto fail2;\n+\t}\n+\n+\t/*\n+\t * Writes to PIO buffers must be 64 bit aligned, and multiples of\n+\t * 64 bits.\n+\t */\n+\twrite_offset = etp->et_pio_write_offset + offset;\n+\twrite_offset_limit = write_offset + length;\n+\teqp = (efx_qword_t *)buffer;\n+\twhile (write_offset < write_offset_limit) {\n+\t\tEFSYS_BAR_WC_WRITEQ(esbp, write_offset, eqp);\n+\t\teqp++;\n+\t\twrite_offset += sizeof (efx_qword_t);\n+\t}\n+\n+\treturn (0);\n+\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_tx_qpio_post(\n+\t__in\t\t\tefx_txq_t *etp,\n+\t__in\t\t\tsize_t pkt_length,\n+\t__in\t\t\tunsigned int completed,\n+\t__inout\t\t\tunsigned int *addedp)\n+{\n+\tefx_qword_t pio_desc;\n+\tunsigned int id;\n+\tsize_t offset;\n+\tunsigned int added = *addedp;\n+\tefx_rc_t rc;\n+\n+\n+\tif (added - completed + 1 > EFX_TXQ_LIMIT(etp->et_mask + 1)) {\n+\t\trc = ENOSPC;\n+\t\tgoto fail1;\n+\t}\n+\n+\tif (etp->et_pio_size == 0) {\n+\t\trc = ENOENT;\n+\t\tgoto fail2;\n+\t}\n+\n+\tid = added++ & etp->et_mask;\n+\toffset = id * sizeof (efx_qword_t);\n+\n+\tEFSYS_PROBE4(tx_pio_post, unsigned int, etp->et_index,\n+\t\t    unsigned int, id, uint32_t, etp->et_pio_offset,\n+\t\t    size_t, pkt_length);\n+\n+\tEFX_POPULATE_QWORD_5(pio_desc,\n+\t\t\tESF_DZ_TX_DESC_IS_OPT, 1,\n+\t\t\tESF_DZ_TX_OPTION_TYPE, 1,\n+\t\t\tESF_DZ_TX_PIO_CONT, 0,\n+\t\t\tESF_DZ_TX_PIO_BYTE_CNT, pkt_length,\n+\t\t\tESF_DZ_TX_PIO_BUF_ADDR, etp->et_pio_offset);\n+\n+\tEFSYS_MEM_WRITEQ(etp->et_esmp, offset, &pio_desc);\n+\n+\tEFX_TX_QSTAT_INCR(etp, TX_POST_PIO);\n+\n+\t*addedp = added;\n+\treturn (0);\n+\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_tx_qpost(\n+\t__in\t\tefx_txq_t *etp,\n+\t__in_ecount(n)\tefx_buffer_t *eb,\n+\t__in\t\tunsigned int n,\n+\t__in\t\tunsigned int completed,\n+\t__inout\t\tunsigned int *addedp)\n+{\n+\tunsigned int added = *addedp;\n+\tunsigned int i;\n+\tefx_rc_t rc;\n+\n+\tif (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {\n+\t\trc = ENOSPC;\n+\t\tgoto fail1;\n+\t}\n+\n+\tfor (i = 0; i < n; i++) {\n+\t\tefx_buffer_t *ebp = &eb[i];\n+\t\tefsys_dma_addr_t addr = ebp->eb_addr;\n+\t\tsize_t size = ebp->eb_size;\n+\t\tboolean_t eop = ebp->eb_eop;\n+\t\tunsigned int id;\n+\t\tsize_t offset;\n+\t\tefx_qword_t qword;\n+\n+\t\t/* Fragments must not span 4k boundaries. */\n+\t\tEFSYS_ASSERT(P2ROUNDUP(addr + 1, 4096) >= (addr + size));\n+\n+\t\tid = added++ & etp->et_mask;\n+\t\toffset = id * sizeof (efx_qword_t);\n+\n+\t\tEFSYS_PROBE5(tx_post, unsigned int, etp->et_index,\n+\t\t    unsigned int, id, efsys_dma_addr_t, addr,\n+\t\t    size_t, size, boolean_t, eop);\n+\n+\t\tEFX_POPULATE_QWORD_5(qword,\n+\t\t    ESF_DZ_TX_KER_TYPE, 0,\n+\t\t    ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,\n+\t\t    ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),\n+\t\t    ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),\n+\t\t    ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));\n+\n+\t\tEFSYS_MEM_WRITEQ(etp->et_esmp, offset, &qword);\n+\t}\n+\n+\tEFX_TX_QSTAT_INCR(etp, TX_POST);\n+\n+\t*addedp = added;\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+/*\n+ * This improves performance by pushing a TX descriptor at the same time as the\n+ * doorbell. The descriptor must be added to the TXQ, so that can be used if the\n+ * hardware decides not to use the pushed descriptor.\n+ */\n+\t\t\tvoid\n+ef10_tx_qpush(\n+\t__in\t\tefx_txq_t *etp,\n+\t__in\t\tunsigned int added,\n+\t__in\t\tunsigned int pushed)\n+{\n+\tefx_nic_t *enp = etp->et_enp;\n+\tunsigned int wptr;\n+\tunsigned int id;\n+\tsize_t offset;\n+\tefx_qword_t desc;\n+\tefx_oword_t oword;\n+\n+\twptr = added & etp->et_mask;\n+\tid = pushed & etp->et_mask;\n+\toffset = id * sizeof (efx_qword_t);\n+\n+\tEFSYS_MEM_READQ(etp->et_esmp, offset, &desc);\n+\tEFX_POPULATE_OWORD_3(oword,\n+\t    ERF_DZ_TX_DESC_WPTR, wptr,\n+\t    ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),\n+\t    ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));\n+\n+\t/* Guarantee ordering of memory (descriptors) and PIO (doorbell) */\n+\tEFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, wptr, id);\n+\tEFSYS_PIO_WRITE_BARRIER();\n+\tEFX_BAR_TBL_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG, etp->et_index,\n+\t\t\t\t    &oword);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_tx_qdesc_post(\n+\t__in\t\tefx_txq_t *etp,\n+\t__in_ecount(n)\tefx_desc_t *ed,\n+\t__in\t\tunsigned int n,\n+\t__in\t\tunsigned int completed,\n+\t__inout\t\tunsigned int *addedp)\n+{\n+\tunsigned int added = *addedp;\n+\tunsigned int i;\n+\tefx_rc_t rc;\n+\n+\tif (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {\n+\t\trc = ENOSPC;\n+\t\tgoto fail1;\n+\t}\n+\n+\tfor (i = 0; i < n; i++) {\n+\t\tefx_desc_t *edp = &ed[i];\n+\t\tunsigned int id;\n+\t\tsize_t offset;\n+\n+\t\tid = added++ & etp->et_mask;\n+\t\toffset = id * sizeof (efx_desc_t);\n+\n+\t\tEFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq);\n+\t}\n+\n+\tEFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index,\n+\t\t    unsigned int, added, unsigned int, n);\n+\n+\tEFX_TX_QSTAT_INCR(etp, TX_POST);\n+\n+\t*addedp = added;\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\tvoid\n+ef10_tx_qdesc_dma_create(\n+\t__in\tefx_txq_t *etp,\n+\t__in\tefsys_dma_addr_t addr,\n+\t__in\tsize_t size,\n+\t__in\tboolean_t eop,\n+\t__out\tefx_desc_t *edp)\n+{\n+\t/* Fragments must not span 4k boundaries. */\n+\tEFSYS_ASSERT(P2ROUNDUP(addr + 1, 4096) >= addr + size);\n+\n+\tEFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index,\n+\t\t    efsys_dma_addr_t, addr,\n+\t\t    size_t, size, boolean_t, eop);\n+\n+\tEFX_POPULATE_QWORD_5(edp->ed_eq,\n+\t\t    ESF_DZ_TX_KER_TYPE, 0,\n+\t\t    ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,\n+\t\t    ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),\n+\t\t    ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),\n+\t\t    ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));\n+}\n+\n+\tvoid\n+ef10_tx_qdesc_tso_create(\n+\t__in\tefx_txq_t *etp,\n+\t__in\tuint16_t ipv4_id,\n+\t__in\tuint32_t tcp_seq,\n+\t__in\tuint8_t  tcp_flags,\n+\t__out\tefx_desc_t *edp)\n+{\n+\tEFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index,\n+\t\t    uint16_t, ipv4_id, uint32_t, tcp_seq,\n+\t\t    uint8_t, tcp_flags);\n+\n+\tEFX_POPULATE_QWORD_5(edp->ed_eq,\n+\t\t\t    ESF_DZ_TX_DESC_IS_OPT, 1,\n+\t\t\t    ESF_DZ_TX_OPTION_TYPE,\n+\t\t\t    ESE_DZ_TX_OPTION_DESC_TSO,\n+\t\t\t    ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,\n+\t\t\t    ESF_DZ_TX_TSO_IP_ID, ipv4_id,\n+\t\t\t    ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);\n+}\n+\n+\tvoid\n+ef10_tx_qdesc_tso2_create(\n+\t__in\t\t\tefx_txq_t *etp,\n+\t__in\t\t\tuint16_t ipv4_id,\n+\t__in\t\t\tuint32_t tcp_seq,\n+\t__in\t\t\tuint16_t tcp_mss,\n+\t__out_ecount(count)\tefx_desc_t *edp,\n+\t__in\t\t\tint count)\n+{\n+\tEFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index,\n+\t\t    uint16_t, ipv4_id, uint32_t, tcp_seq,\n+\t\t    uint16_t, tcp_mss);\n+\n+\tEFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS);\n+\n+\tEFX_POPULATE_QWORD_5(edp[0].ed_eq,\n+\t\t\t    ESF_DZ_TX_DESC_IS_OPT, 1,\n+\t\t\t    ESF_DZ_TX_OPTION_TYPE,\n+\t\t\t    ESE_DZ_TX_OPTION_DESC_TSO,\n+\t\t\t    ESF_DZ_TX_TSO_OPTION_TYPE,\n+\t\t\t    ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,\n+\t\t\t    ESF_DZ_TX_TSO_IP_ID, ipv4_id,\n+\t\t\t    ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);\n+\tEFX_POPULATE_QWORD_4(edp[1].ed_eq,\n+\t\t\t    ESF_DZ_TX_DESC_IS_OPT, 1,\n+\t\t\t    ESF_DZ_TX_OPTION_TYPE,\n+\t\t\t    ESE_DZ_TX_OPTION_DESC_TSO,\n+\t\t\t    ESF_DZ_TX_TSO_OPTION_TYPE,\n+\t\t\t    ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,\n+\t\t\t    ESF_DZ_TX_TSO_TCP_MSS, tcp_mss);\n+}\n+\n+\tvoid\n+ef10_tx_qdesc_vlantci_create(\n+\t__in\tefx_txq_t *etp,\n+\t__in\tuint16_t  tci,\n+\t__out\tefx_desc_t *edp)\n+{\n+\tEFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index,\n+\t\t    uint16_t, tci);\n+\n+\tEFX_POPULATE_QWORD_4(edp->ed_eq,\n+\t\t\t    ESF_DZ_TX_DESC_IS_OPT, 1,\n+\t\t\t    ESF_DZ_TX_OPTION_TYPE,\n+\t\t\t    ESE_DZ_TX_OPTION_DESC_VLAN,\n+\t\t\t    ESF_DZ_TX_VLAN_OP, tci ? 1 : 0,\n+\t\t\t    ESF_DZ_TX_VLAN_TAG1, tci);\n+}\n+\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_tx_qpace(\n+\t__in\t\tefx_txq_t *etp,\n+\t__in\t\tunsigned int ns)\n+{\n+\tefx_rc_t rc;\n+\n+\t/* FIXME */\n+\t_NOTE(ARGUNUSED(etp, ns))\n+\t_NOTE(CONSTANTCONDITION)\n+\tif (B_FALSE) {\n+\t\trc = ENOTSUP;\n+\t\tgoto fail1;\n+\t}\n+\t/* FIXME */\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+ef10_tx_qflush(\n+\t__in\t\tefx_txq_t *etp)\n+{\n+\tefx_nic_t *enp = etp->et_enp;\n+\tefx_rc_t rc;\n+\n+\tif ((rc = efx_mcdi_fini_txq(enp, etp->et_index)) != 0)\n+\t\tgoto fail1;\n+\n+\treturn (0);\n+\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t\t\tvoid\n+ef10_tx_qenable(\n+\t__in\t\tefx_txq_t *etp)\n+{\n+\t/* FIXME */\n+\t_NOTE(ARGUNUSED(etp))\n+\t/* FIXME */\n+}\n+\n+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */\ndiff --git a/drivers/net/sfc/base/efx.h b/drivers/net/sfc/base/efx.h\nindex d68a36b..df56aa2 100644\n--- a/drivers/net/sfc/base/efx.h\n+++ b/drivers/net/sfc/base/efx.h\n@@ -184,6 +184,11 @@ efx_nic_check_pcie_link_speed(\n \n #if EFSYS_OPT_MCDI\n \n+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD\n+/* Huntington and Medford require MCDIv2 commands */\n+#define\tWITH_MCDI_V2 1\n+#endif\n+\n typedef struct efx_mcdi_req_s efx_mcdi_req_t;\n \n typedef enum efx_mcdi_exception_e {\n@@ -581,6 +586,11 @@ typedef struct efx_nic_cfg_s {\n #if EFSYS_OPT_MCDI\n \tuint8_t\t\t\tenc_mcdi_mdio_channel;\n #endif\t/* EFSYS_OPT_MCDI */\n+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD\n+\tuint32_t\t\tenc_pf;\n+\tuint32_t\t\tenc_vf;\n+\tuint32_t\t\tenc_privilege_mask;\n+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */\n \tboolean_t\t\tenc_bug26807_workaround;\n \tboolean_t\t\tenc_bug35388_workaround;\n \tboolean_t\t\tenc_bug41750_workaround;\ndiff --git a/drivers/net/sfc/base/efx_check.h b/drivers/net/sfc/base/efx_check.h\nindex 190ac46..63c809c 100644\n--- a/drivers/net/sfc/base/efx_check.h\n+++ b/drivers/net/sfc/base/efx_check.h\n@@ -70,6 +70,12 @@\n # endif\n #endif /* EFSYS_OPT_FILTER */\n \n+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)\n+# if !EFSYS_OPT_FILTER\n+#  error \"HUNTINGTON or MEDFORD requires FILTER\"\n+# endif\n+#endif /* EFSYS_OPT_HUNTINGTON */\n+\n #ifdef EFSYS_OPT_MAC_FALCON_GMAC\n # error \"MAC_FALCON_GMAC is obsolete and is not supported.\"\n #endif\ndiff --git a/drivers/net/sfc/base/efx_ev.c b/drivers/net/sfc/base/efx_ev.c\nindex 59f4d02..65094c1 100644\n--- a/drivers/net/sfc/base/efx_ev.c\n+++ b/drivers/net/sfc/base/efx_ev.c\n@@ -93,6 +93,18 @@ static const efx_ev_ops_t\t__efx_ev_siena_ops = {\n };\n #endif /* EFSYS_OPT_SIENA */\n \n+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD\n+static const efx_ev_ops_t\t__efx_ev_ef10_ops = {\n+\tef10_ev_init,\t\t\t\t/* eevo_init */\n+\tef10_ev_fini,\t\t\t\t/* eevo_fini */\n+\tef10_ev_qcreate,\t\t\t/* eevo_qcreate */\n+\tef10_ev_qdestroy,\t\t\t/* eevo_qdestroy */\n+\tef10_ev_qprime,\t\t\t\t/* eevo_qprime */\n+\tef10_ev_qpost,\t\t\t\t/* eevo_qpost */\n+\tef10_ev_qmoderate,\t\t\t/* eevo_qmoderate */\n+};\n+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */\n+\n \n \t__checkReturn\tefx_rc_t\n efx_ev_init(\n@@ -116,6 +128,12 @@ efx_ev_init(\n \t\tbreak;\n #endif /* EFSYS_OPT_SIENA */\n \n+#if EFSYS_OPT_HUNTINGTON\n+\tcase EFX_FAMILY_HUNTINGTON:\n+\t\teevop = &__efx_ev_ef10_ops;\n+\t\tbreak;\n+#endif /* EFSYS_OPT_HUNTINGTON */\n+\n \tdefault:\n \t\tEFSYS_ASSERT(0);\n \t\trc = ENOTSUP;\ndiff --git a/drivers/net/sfc/base/efx_filter.c b/drivers/net/sfc/base/efx_filter.c\nindex 429f2b3..47e2ae4 100644\n--- a/drivers/net/sfc/base/efx_filter.c\n+++ b/drivers/net/sfc/base/efx_filter.c\n@@ -79,6 +79,18 @@ static const efx_filter_ops_t\t__efx_filter_siena_ops = {\n };\n #endif /* EFSYS_OPT_SIENA */\n \n+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD\n+static const efx_filter_ops_t\t__efx_filter_ef10_ops = {\n+\tef10_filter_init,\t\t/* efo_init */\n+\tef10_filter_fini,\t\t/* efo_fini */\n+\tef10_filter_restore,\t\t/* efo_restore */\n+\tef10_filter_add,\t\t/* efo_add */\n+\tef10_filter_delete,\t\t/* efo_delete */\n+\tef10_filter_supported_filters,\t/* efo_supported_filters */\n+\tef10_filter_reconfigure,\t/* efo_reconfigure */\n+};\n+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */\n+\n \t__checkReturn\tefx_rc_t\n efx_filter_insert(\n \t__in\t\tefx_nic_t *enp,\n@@ -147,6 +159,12 @@ efx_filter_init(\n \t\tbreak;\n #endif /* EFSYS_OPT_SIENA */\n \n+#if EFSYS_OPT_HUNTINGTON\n+\tcase EFX_FAMILY_HUNTINGTON:\n+\t\tefop = &__efx_filter_ef10_ops;\n+\t\tbreak;\n+#endif /* EFSYS_OPT_HUNTINGTON */\n+\n \tdefault:\n \t\tEFSYS_ASSERT(0);\n \t\trc = ENOTSUP;\ndiff --git a/drivers/net/sfc/base/efx_impl.h b/drivers/net/sfc/base/efx_impl.h\nindex 8d85f3f..10ab36b 100644\n--- a/drivers/net/sfc/base/efx_impl.h\n+++ b/drivers/net/sfc/base/efx_impl.h\n@@ -45,6 +45,14 @@\n #include \"siena_impl.h\"\n #endif\t/* EFSYS_OPT_SIENA */\n \n+#if EFSYS_OPT_HUNTINGTON\n+#include \"hunt_impl.h\"\n+#endif\t/* EFSYS_OPT_HUNTINGTON */\n+\n+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)\n+#include \"ef10_impl.h\"\n+#endif\t/* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) */\n+\n #ifdef\t__cplusplus\n extern \"C\" {\n #endif\n@@ -331,6 +339,9 @@ typedef struct efx_filter_s {\n #if EFSYS_OPT_SIENA\n \tsiena_filter_t\t\t*ef_siena_filter;\n #endif /* EFSYS_OPT_SIENA */\n+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD\n+\tef10_filter_table_t\t*ef_ef10_filter_table;\n+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */\n } efx_filter_t;\n \n #if EFSYS_OPT_SIENA\n@@ -413,6 +424,24 @@ struct efx_nic_s {\n #endif\t/* EFSYS_OPT_SIENA */\n \t\tint\tenu_unused;\n \t} en_u;\n+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)\n+\tunion en_arch {\n+\t\tstruct {\n+\t\t\tint\t\t\tena_vi_base;\n+\t\t\tint\t\t\tena_vi_count;\n+\t\t\tint\t\t\tena_vi_shift;\n+\t\t\tefx_piobuf_handle_t\tena_piobuf_handle[EF10_MAX_PIOBUF_NBUFS];\n+\t\t\tuint32_t\t\tena_piobuf_count;\n+\t\t\tuint32_t\t\tena_pio_alloc_map[EF10_MAX_PIOBUF_NBUFS];\n+\t\t\tuint32_t\t\tena_pio_write_vi_base;\n+\t\t\t/* Memory BAR mapping regions */\n+\t\t\tuint32_t\t\tena_uc_mem_map_offset;\n+\t\t\tsize_t\t\t\tena_uc_mem_map_size;\n+\t\t\tuint32_t\t\tena_wc_mem_map_offset;\n+\t\t\tsize_t\t\t\tena_wc_mem_map_size;\n+\t\t} ef10;\n+\t} en_arch;\n+#endif\t/* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) */\n };\n \n \n@@ -469,6 +498,13 @@ struct efx_txq_s {\n \tunsigned int\t\t\tet_index;\n \tunsigned int\t\t\tet_mask;\n \tefsys_mem_t\t\t\t*et_esmp;\n+#if EFSYS_OPT_HUNTINGTON\n+\tuint32_t\t\t\tet_pio_bufnum;\n+\tuint32_t\t\t\tet_pio_blknum;\n+\tuint32_t\t\t\tet_pio_write_offset;\n+\tuint32_t\t\t\tet_pio_offset;\n+\tsize_t\t\t\t\tet_pio_size;\n+#endif\n };\n \n #define\tEFX_TXQ_MAGIC\t0x05092005\ndiff --git a/drivers/net/sfc/base/efx_intr.c b/drivers/net/sfc/base/efx_intr.c\nindex ecc09d3..50cf388 100644\n--- a/drivers/net/sfc/base/efx_intr.c\n+++ b/drivers/net/sfc/base/efx_intr.c\n@@ -99,6 +99,20 @@ static const efx_intr_ops_t\t__efx_intr_siena_ops = {\n };\n #endif\t/* EFSYS_OPT_SIENA */\n \n+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD\n+static const efx_intr_ops_t\t__efx_intr_ef10_ops = {\n+\tef10_intr_init,\t\t\t/* eio_init */\n+\tef10_intr_enable,\t\t/* eio_enable */\n+\tef10_intr_disable,\t\t/* eio_disable */\n+\tef10_intr_disable_unlocked,\t/* eio_disable_unlocked */\n+\tef10_intr_trigger,\t\t/* eio_trigger */\n+\tef10_intr_status_line,\t\t/* eio_status_line */\n+\tef10_intr_status_message,\t/* eio_status_message */\n+\tef10_intr_fatal,\t\t/* eio_fatal */\n+\tef10_intr_fini,\t\t\t/* eio_fini */\n+};\n+#endif\t/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */\n+\n \t__checkReturn\tefx_rc_t\n efx_intr_init(\n \t__in\t\tefx_nic_t *enp,\n@@ -130,6 +144,12 @@ efx_intr_init(\n \t\tbreak;\n #endif\t/* EFSYS_OPT_SIENA */\n \n+#if EFSYS_OPT_HUNTINGTON\n+\tcase EFX_FAMILY_HUNTINGTON:\n+\t\teiop = &__efx_intr_ef10_ops;\n+\t\tbreak;\n+#endif\t/* EFSYS_OPT_HUNTINGTON */\n+\n \tdefault:\n \t\tEFSYS_ASSERT(B_FALSE);\n \t\trc = ENOTSUP;\ndiff --git a/drivers/net/sfc/base/efx_mac.c b/drivers/net/sfc/base/efx_mac.c\nindex ce27376..1d50128 100644\n--- a/drivers/net/sfc/base/efx_mac.c\n+++ b/drivers/net/sfc/base/efx_mac.c\n@@ -53,6 +53,21 @@ static const efx_mac_ops_t\t__efx_siena_mac_ops = {\n };\n #endif\t/* EFSYS_OPT_SIENA */\n \n+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD\n+static const efx_mac_ops_t\t__efx_ef10_mac_ops = {\n+\tef10_mac_poll,\t\t\t\t/* emo_poll */\n+\tef10_mac_up,\t\t\t\t/* emo_up */\n+\tef10_mac_addr_set,\t\t\t/* emo_addr_set */\n+\tef10_mac_pdu_set,\t\t\t/* emo_pdu_set */\n+\tef10_mac_pdu_get,\t\t\t/* emo_pdu_get */\n+\tef10_mac_reconfigure,\t\t\t/* emo_reconfigure */\n+\tef10_mac_multicast_list_set,\t\t/* emo_multicast_list_set */\n+\tef10_mac_filter_default_rxq_set,\t/* emo_filter_default_rxq_set */\n+\tef10_mac_filter_default_rxq_clear,\n+\t\t\t\t\t/* emo_filter_default_rxq_clear */\n+};\n+#endif\t/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */\n+\n \t__checkReturn\t\t\tefx_rc_t\n efx_mac_pdu_set(\n \t__in\t\t\t\tefx_nic_t *enp,\n@@ -494,6 +509,13 @@ efx_mac_select(\n \t\tbreak;\n #endif /* EFSYS_OPT_SIENA */\n \n+#if EFSYS_OPT_HUNTINGTON\n+\tcase EFX_FAMILY_HUNTINGTON:\n+\t\temop = &__efx_ef10_mac_ops;\n+\t\ttype = EFX_MAC_HUNTINGTON;\n+\t\tbreak;\n+#endif /* EFSYS_OPT_HUNTINGTON */\n+\n \tdefault:\n \t\trc = EINVAL;\n \t\tgoto fail1;\ndiff --git a/drivers/net/sfc/base/efx_mcdi.c b/drivers/net/sfc/base/efx_mcdi.c\nindex deb5d87..338ff49 100644\n--- a/drivers/net/sfc/base/efx_mcdi.c\n+++ b/drivers/net/sfc/base/efx_mcdi.c\n@@ -69,6 +69,21 @@ static const efx_mcdi_ops_t\t__efx_mcdi_siena_ops = {\n \n #endif\t/* EFSYS_OPT_SIENA */\n \n+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD\n+\n+static const efx_mcdi_ops_t\t__efx_mcdi_ef10_ops = {\n+\tef10_mcdi_init,\t\t\t/* emco_init */\n+\tef10_mcdi_send_request,\t\t/* emco_send_request */\n+\tef10_mcdi_poll_reboot,\t\t/* emco_poll_reboot */\n+\tef10_mcdi_poll_response,\t/* emco_poll_response */\n+\tef10_mcdi_read_response,\t/* emco_read_response */\n+\tef10_mcdi_fini,\t\t\t/* emco_fini */\n+\tef10_mcdi_feature_supported,\t/* emco_feature_supported */\n+\tef10_mcdi_get_timeout,\t\t/* emco_get_timeout */\n+};\n+\n+#endif\t/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */\n+\n \n \n \t__checkReturn\tefx_rc_t\n@@ -89,6 +104,12 @@ efx_mcdi_init(\n \t\tbreak;\n #endif\t/* EFSYS_OPT_SIENA */\n \n+#if EFSYS_OPT_HUNTINGTON\n+\tcase EFX_FAMILY_HUNTINGTON:\n+\t\temcop = &__efx_mcdi_ef10_ops;\n+\t\tbreak;\n+#endif\t/* EFSYS_OPT_HUNTINGTON */\n+\n \tdefault:\n \t\tEFSYS_ASSERT(0);\n \t\trc = ENOTSUP;\n@@ -1553,6 +1574,107 @@ efx_mcdi_log_ctrl(\n }\n \n \n+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD\n+\n+/*\n+ * This function returns the pf and vf number of a function.  If it is a pf the\n+ * vf number is 0xffff.  The vf number is the index of the vf on that\n+ * function. So if you have 3 vfs on pf 0 the 3 vfs will return (pf=0,vf=0),\n+ * (pf=0,vf=1), (pf=0,vf=2) aand the pf will return (pf=0, vf=0xffff).\n+ */\n+\t__checkReturn\t\tefx_rc_t\n+efx_mcdi_get_function_info(\n+\t__in\t\t\tefx_nic_t *enp,\n+\t__out\t\t\tuint32_t *pfp,\n+\t__out_opt\t\tuint32_t *vfp)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_GET_FUNCTION_INFO_IN_LEN,\n+\t\t\t    MC_CMD_GET_FUNCTION_INFO_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_GET_FUNCTION_INFO;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_GET_FUNCTION_INFO_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_GET_FUNCTION_INFO_OUT_LEN;\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\tif (req.emr_out_length_used < MC_CMD_GET_FUNCTION_INFO_OUT_LEN) {\n+\t\trc = EMSGSIZE;\n+\t\tgoto fail2;\n+\t}\n+\n+\t*pfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_PF);\n+\tif (vfp != NULL)\n+\t\t*vfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_VF);\n+\n+\treturn (0);\n+\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\t\tefx_rc_t\n+efx_mcdi_privilege_mask(\n+\t__in\t\t\tefx_nic_t *enp,\n+\t__in\t\t\tuint32_t pf,\n+\t__in\t\t\tuint32_t vf,\n+\t__out\t\t\tuint32_t *maskp)\n+{\n+\tefx_mcdi_req_t req;\n+\tuint8_t payload[MAX(MC_CMD_PRIVILEGE_MASK_IN_LEN,\n+\t\t\t    MC_CMD_PRIVILEGE_MASK_OUT_LEN)];\n+\tefx_rc_t rc;\n+\n+\t(void) memset(payload, 0, sizeof (payload));\n+\treq.emr_cmd = MC_CMD_PRIVILEGE_MASK;\n+\treq.emr_in_buf = payload;\n+\treq.emr_in_length = MC_CMD_PRIVILEGE_MASK_IN_LEN;\n+\treq.emr_out_buf = payload;\n+\treq.emr_out_length = MC_CMD_PRIVILEGE_MASK_OUT_LEN;\n+\n+\tMCDI_IN_POPULATE_DWORD_2(req, PRIVILEGE_MASK_IN_FUNCTION,\n+\t    PRIVILEGE_MASK_IN_FUNCTION_PF, pf,\n+\t    PRIVILEGE_MASK_IN_FUNCTION_VF, vf);\n+\n+\tefx_mcdi_execute(enp, &req);\n+\n+\tif (req.emr_rc != 0) {\n+\t\trc = req.emr_rc;\n+\t\tgoto fail1;\n+\t}\n+\n+\tif (req.emr_out_length_used < MC_CMD_PRIVILEGE_MASK_OUT_LEN) {\n+\t\trc = EMSGSIZE;\n+\t\tgoto fail2;\n+\t}\n+\n+\t*maskp = MCDI_OUT_DWORD(req, PRIVILEGE_MASK_OUT_OLD_MASK);\n+\n+\treturn (0);\n+\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */\n+\n \t__checkReturn\t\tefx_rc_t\n efx_mcdi_set_workaround(\n \t__in\t\t\tefx_nic_t *enp,\ndiff --git a/drivers/net/sfc/base/efx_nic.c b/drivers/net/sfc/base/efx_nic.c\nindex 5a758ea..701c6e3 100644\n--- a/drivers/net/sfc/base/efx_nic.c\n+++ b/drivers/net/sfc/base/efx_nic.c\n@@ -54,6 +54,26 @@ efx_family(\n \t\t\treturn (0);\n #endif /* EFSYS_OPT_SIENA */\n \n+#if EFSYS_OPT_HUNTINGTON\n+\t\tcase EFX_PCI_DEVID_HUNTINGTON_PF_UNINIT:\n+\t\t\t/*\n+\t\t\t * Hardware default for PF0 of uninitialised Huntington.\n+\t\t\t * manftest must be able to cope with this device id.\n+\t\t\t */\n+\t\t\t*efp = EFX_FAMILY_HUNTINGTON;\n+\t\t\treturn (0);\n+\n+\t\tcase EFX_PCI_DEVID_FARMINGDALE:\n+\t\tcase EFX_PCI_DEVID_GREENPORT:\n+\t\t\t*efp = EFX_FAMILY_HUNTINGTON;\n+\t\t\treturn (0);\n+\n+\t\tcase EFX_PCI_DEVID_FARMINGDALE_VF:\n+\t\tcase EFX_PCI_DEVID_GREENPORT_VF:\n+\t\t\t*efp = EFX_FAMILY_HUNTINGTON;\n+\t\t\treturn (0);\n+#endif /* EFSYS_OPT_HUNTINGTON */\n+\n \t\tcase EFX_PCI_DEVID_FALCON:\t/* Obsolete, not supported */\n \t\tdefault:\n \t\t\tbreak;\n@@ -152,6 +172,22 @@ static const efx_nic_ops_t\t__efx_nic_siena_ops = {\n \n #endif\t/* EFSYS_OPT_SIENA */\n \n+#if EFSYS_OPT_HUNTINGTON\n+\n+static const efx_nic_ops_t\t__efx_nic_hunt_ops = {\n+\tef10_nic_probe,\t\t\t/* eno_probe */\n+\thunt_board_cfg,\t\t\t/* eno_board_cfg */\n+\tef10_nic_set_drv_limits,\t/* eno_set_drv_limits */\n+\tef10_nic_reset,\t\t\t/* eno_reset */\n+\tef10_nic_init,\t\t\t/* eno_init */\n+\tef10_nic_get_vi_pool,\t\t/* eno_get_vi_pool */\n+\tef10_nic_get_bar_region,\t/* eno_get_bar_region */\n+\tef10_nic_fini,\t\t\t/* eno_fini */\n+\tef10_nic_unprobe,\t\t/* eno_unprobe */\n+};\n+\n+#endif\t/* EFSYS_OPT_HUNTINGTON */\n+\n \n \t__checkReturn\tefx_rc_t\n efx_nic_create(\n@@ -193,6 +229,23 @@ efx_nic_create(\n \t\tbreak;\n #endif\t/* EFSYS_OPT_SIENA */\n \n+#if EFSYS_OPT_HUNTINGTON\n+\tcase EFX_FAMILY_HUNTINGTON:\n+\t\tenp->en_enop = &__efx_nic_hunt_ops;\n+\t\tenp->en_features =\n+\t\t    EFX_FEATURE_IPV6 |\n+\t\t    EFX_FEATURE_LINK_EVENTS |\n+\t\t    EFX_FEATURE_PERIODIC_MAC_STATS |\n+\t\t    EFX_FEATURE_MCDI |\n+\t\t    EFX_FEATURE_MAC_HEADER_FILTERS |\n+\t\t    EFX_FEATURE_MCDI_DMA |\n+\t\t    EFX_FEATURE_PIO_BUFFERS |\n+\t\t    EFX_FEATURE_FW_ASSISTED_TSO |\n+\t\t    EFX_FEATURE_FW_ASSISTED_TSO_V2 |\n+\t\t    EFX_FEATURE_PACKED_STREAM;\n+\t\tbreak;\n+#endif\t/* EFSYS_OPT_HUNTINGTON */\n+\n \tdefault:\n \t\trc = ENOTSUP;\n \t\tgoto fail2;\ndiff --git a/drivers/net/sfc/base/efx_phy.c b/drivers/net/sfc/base/efx_phy.c\nindex a6a2af4..b663cf8 100644\n--- a/drivers/net/sfc/base/efx_phy.c\n+++ b/drivers/net/sfc/base/efx_phy.c\n@@ -42,6 +42,16 @@ static const efx_phy_ops_t\t__efx_phy_siena_ops = {\n };\n #endif\t/* EFSYS_OPT_SIENA */\n \n+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD\n+static const efx_phy_ops_t\t__efx_phy_ef10_ops = {\n+\tef10_phy_power,\t\t\t/* epo_power */\n+\tNULL,\t\t\t\t/* epo_reset */\n+\tef10_phy_reconfigure,\t\t/* epo_reconfigure */\n+\tef10_phy_verify,\t\t/* epo_verify */\n+\tef10_phy_oui_get,\t\t/* epo_oui_get */\n+};\n+#endif\t/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */\n+\n \t__checkReturn\tefx_rc_t\n efx_phy_probe(\n \t__in\t\tefx_nic_t *enp)\n@@ -63,6 +73,11 @@ efx_phy_probe(\n \t\tepop = &__efx_phy_siena_ops;\n \t\tbreak;\n #endif\t/* EFSYS_OPT_SIENA */\n+#if EFSYS_OPT_HUNTINGTON\n+\tcase EFX_FAMILY_HUNTINGTON:\n+\t\tepop = &__efx_phy_ef10_ops;\n+\t\tbreak;\n+#endif\t/* EFSYS_OPT_HUNTINGTON */\n \tdefault:\n \t\trc = ENOTSUP;\n \t\tgoto fail1;\ndiff --git a/drivers/net/sfc/base/efx_regs_ef10.h b/drivers/net/sfc/base/efx_regs_ef10.h\nnew file mode 100644\nindex 0000000..11a9184\n--- /dev/null\n+++ b/drivers/net/sfc/base/efx_regs_ef10.h\n@@ -0,0 +1,571 @@\n+/*\n+ * Copyright (c) 2007-2016 Solarflare Communications Inc.\n+ * All rights reserved.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions are met:\n+ *\n+ * 1. Redistributions of source code must retain the above copyright notice,\n+ *    this list of conditions and the following disclaimer.\n+ * 2. Redistributions in binary form must reproduce the above copyright notice,\n+ *    this list of conditions and the following disclaimer in the documentation\n+ *    and/or other materials provided with the distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ *\n+ * The views and conclusions contained in the software and documentation are\n+ * those of the authors and should not be interpreted as representing official\n+ * policies, either expressed or implied, of the FreeBSD Project.\n+ */\n+\n+#ifndef\t_SYS_EFX_EF10_REGS_H\n+#define\t_SYS_EFX_EF10_REGS_H\n+\n+#ifdef\t__cplusplus\n+extern \"C\" {\n+#endif\n+\n+/**************************************************************************\n+ * NOTE: the line below marks the start of the autogenerated section\n+ * EF10 registers and descriptors\n+ *\n+ **************************************************************************\n+ */\n+\n+/*\n+ * BIU_HW_REV_ID_REG(32bit):\n+ *\n+ */\n+\n+#define\tER_DZ_BIU_HW_REV_ID_REG_OFST 0x00000000\n+/* hunta0,medforda0=pcie_pf_bar2 */\n+#define\tER_DZ_BIU_HW_REV_ID_REG_RESET 0xeb14face\n+\n+\n+#define\tERF_DZ_HW_REV_ID_LBN 0\n+#define\tERF_DZ_HW_REV_ID_WIDTH 32\n+\n+\n+/*\n+ * BIU_MC_SFT_STATUS_REG(32bit):\n+ *\n+ */\n+\n+#define\tER_DZ_BIU_MC_SFT_STATUS_REG_OFST 0x00000010\n+/* hunta0,medforda0=pcie_pf_bar2 */\n+#define\tER_DZ_BIU_MC_SFT_STATUS_REG_STEP 4\n+#define\tER_DZ_BIU_MC_SFT_STATUS_REG_ROWS 8\n+#define\tER_DZ_BIU_MC_SFT_STATUS_REG_RESET 0x1111face\n+\n+\n+#define\tERF_DZ_MC_SFT_STATUS_LBN 0\n+#define\tERF_DZ_MC_SFT_STATUS_WIDTH 32\n+\n+\n+/*\n+ * BIU_INT_ISR_REG(32bit):\n+ *\n+ */\n+\n+#define\tER_DZ_BIU_INT_ISR_REG_OFST 0x00000090\n+/* hunta0,medforda0=pcie_pf_bar2 */\n+#define\tER_DZ_BIU_INT_ISR_REG_RESET 0x0\n+\n+\n+#define\tERF_DZ_ISR_REG_LBN 0\n+#define\tERF_DZ_ISR_REG_WIDTH 32\n+\n+\n+/*\n+ * MC_DB_LWRD_REG(32bit):\n+ *\n+ */\n+\n+#define\tER_DZ_MC_DB_LWRD_REG_OFST 0x00000200\n+/* hunta0,medforda0=pcie_pf_bar2 */\n+#define\tER_DZ_MC_DB_LWRD_REG_RESET 0x0\n+\n+\n+#define\tERF_DZ_MC_DOORBELL_L_LBN 0\n+#define\tERF_DZ_MC_DOORBELL_L_WIDTH 32\n+\n+\n+/*\n+ * MC_DB_HWRD_REG(32bit):\n+ *\n+ */\n+\n+#define\tER_DZ_MC_DB_HWRD_REG_OFST 0x00000204\n+/* hunta0,medforda0=pcie_pf_bar2 */\n+#define\tER_DZ_MC_DB_HWRD_REG_RESET 0x0\n+\n+\n+#define\tERF_DZ_MC_DOORBELL_H_LBN 0\n+#define\tERF_DZ_MC_DOORBELL_H_WIDTH 32\n+\n+\n+/*\n+ * EVQ_RPTR_REG(32bit):\n+ *\n+ */\n+\n+#define\tER_DZ_EVQ_RPTR_REG_OFST 0x00000400\n+/* hunta0,medforda0=pcie_pf_bar2 */\n+#define\tER_DZ_EVQ_RPTR_REG_STEP 8192\n+#define\tER_DZ_EVQ_RPTR_REG_ROWS 2048\n+#define\tER_DZ_EVQ_RPTR_REG_RESET 0x0\n+\n+\n+#define\tERF_DZ_EVQ_RPTR_VLD_LBN 15\n+#define\tERF_DZ_EVQ_RPTR_VLD_WIDTH 1\n+#define\tERF_DZ_EVQ_RPTR_LBN 0\n+#define\tERF_DZ_EVQ_RPTR_WIDTH 15\n+\n+\n+/*\n+ * EVQ_TMR_REG(32bit):\n+ *\n+ */\n+\n+#define\tER_DZ_EVQ_TMR_REG_OFST 0x00000420\n+/* hunta0,medforda0=pcie_pf_bar2 */\n+#define\tER_DZ_EVQ_TMR_REG_STEP 8192\n+#define\tER_DZ_EVQ_TMR_REG_ROWS 2048\n+#define\tER_DZ_EVQ_TMR_REG_RESET 0x0\n+\n+\n+#define\tERF_DZ_TC_TIMER_MODE_LBN 14\n+#define\tERF_DZ_TC_TIMER_MODE_WIDTH 2\n+#define\tERF_DZ_TC_TIMER_VAL_LBN 0\n+#define\tERF_DZ_TC_TIMER_VAL_WIDTH 14\n+\n+\n+/*\n+ * RX_DESC_UPD_REG(32bit):\n+ *\n+ */\n+\n+#define\tER_DZ_RX_DESC_UPD_REG_OFST 0x00000830\n+/* hunta0,medforda0=pcie_pf_bar2 */\n+#define\tER_DZ_RX_DESC_UPD_REG_STEP 8192\n+#define\tER_DZ_RX_DESC_UPD_REG_ROWS 2048\n+#define\tER_DZ_RX_DESC_UPD_REG_RESET 0x0\n+\n+\n+#define\tERF_DZ_RX_DESC_WPTR_LBN 0\n+#define\tERF_DZ_RX_DESC_WPTR_WIDTH 12\n+\n+/*\n+ * TX_DESC_UPD_REG(96bit):\n+ *\n+ */\n+\n+#define\tER_DZ_TX_DESC_UPD_REG_OFST 0x00000a10\n+/* hunta0,medforda0=pcie_pf_bar2 */\n+#define\tER_DZ_TX_DESC_UPD_REG_STEP 8192\n+#define\tER_DZ_TX_DESC_UPD_REG_ROWS 2048\n+#define\tER_DZ_TX_DESC_UPD_REG_RESET 0x0\n+\n+\n+#define\tERF_DZ_RSVD_LBN 76\n+#define\tERF_DZ_RSVD_WIDTH 20\n+#define\tERF_DZ_TX_DESC_WPTR_LBN 64\n+#define\tERF_DZ_TX_DESC_WPTR_WIDTH 12\n+#define\tERF_DZ_TX_DESC_HWORD_LBN 32\n+#define\tERF_DZ_TX_DESC_HWORD_WIDTH 32\n+#define\tERF_DZ_TX_DESC_LWORD_LBN 0\n+#define\tERF_DZ_TX_DESC_LWORD_WIDTH 32\n+\n+\n+/* ES_DRIVER_EV */\n+#define\tESF_DZ_DRV_CODE_LBN 60\n+#define\tESF_DZ_DRV_CODE_WIDTH 4\n+#define\tESF_DZ_DRV_SUB_CODE_LBN 56\n+#define\tESF_DZ_DRV_SUB_CODE_WIDTH 4\n+#define\tESE_DZ_DRV_TIMER_EV 3\n+#define\tESE_DZ_DRV_START_UP_EV 2\n+#define\tESE_DZ_DRV_WAKE_UP_EV 1\n+#define\tESF_DZ_DRV_SUB_DATA_DW0_LBN 0\n+#define\tESF_DZ_DRV_SUB_DATA_DW0_WIDTH 32\n+#define\tESF_DZ_DRV_SUB_DATA_DW1_LBN 32\n+#define\tESF_DZ_DRV_SUB_DATA_DW1_WIDTH 24\n+#define\tESF_DZ_DRV_SUB_DATA_LBN 0\n+#define\tESF_DZ_DRV_SUB_DATA_WIDTH 56\n+#define\tESF_DZ_DRV_EVQ_ID_LBN 0\n+#define\tESF_DZ_DRV_EVQ_ID_WIDTH 14\n+#define\tESF_DZ_DRV_TMR_ID_LBN 0\n+#define\tESF_DZ_DRV_TMR_ID_WIDTH 14\n+\n+\n+/* ES_EVENT_ENTRY */\n+#define\tESF_DZ_EV_CODE_LBN 60\n+#define\tESF_DZ_EV_CODE_WIDTH 4\n+#define\tESE_DZ_EV_CODE_MCDI_EV 12\n+#define\tESE_DZ_EV_CODE_DRIVER_EV 5\n+#define\tESE_DZ_EV_CODE_TX_EV 2\n+#define\tESE_DZ_EV_CODE_RX_EV 0\n+#define\tESE_DZ_OTHER other\n+#define\tESF_DZ_EV_DATA_DW0_LBN 0\n+#define\tESF_DZ_EV_DATA_DW0_WIDTH 32\n+#define\tESF_DZ_EV_DATA_DW1_LBN 32\n+#define\tESF_DZ_EV_DATA_DW1_WIDTH 28\n+#define\tESF_DZ_EV_DATA_LBN 0\n+#define\tESF_DZ_EV_DATA_WIDTH 60\n+\n+\n+/* ES_MC_EVENT */\n+#define\tESF_DZ_MC_CODE_LBN 60\n+#define\tESF_DZ_MC_CODE_WIDTH 4\n+#define\tESF_DZ_MC_OVERRIDE_HOLDOFF_LBN 59\n+#define\tESF_DZ_MC_OVERRIDE_HOLDOFF_WIDTH 1\n+#define\tESF_DZ_MC_DROP_EVENT_LBN 58\n+#define\tESF_DZ_MC_DROP_EVENT_WIDTH 1\n+#define\tESF_DZ_MC_SOFT_DW0_LBN 0\n+#define\tESF_DZ_MC_SOFT_DW0_WIDTH 32\n+#define\tESF_DZ_MC_SOFT_DW1_LBN 32\n+#define\tESF_DZ_MC_SOFT_DW1_WIDTH 26\n+#define\tESF_DZ_MC_SOFT_LBN 0\n+#define\tESF_DZ_MC_SOFT_WIDTH 58\n+\n+\n+/* ES_RX_EVENT */\n+#define\tESF_DZ_RX_CODE_LBN 60\n+#define\tESF_DZ_RX_CODE_WIDTH 4\n+#define\tESF_DZ_RX_OVERRIDE_HOLDOFF_LBN 59\n+#define\tESF_DZ_RX_OVERRIDE_HOLDOFF_WIDTH 1\n+#define\tESF_DZ_RX_DROP_EVENT_LBN 58\n+#define\tESF_DZ_RX_DROP_EVENT_WIDTH 1\n+#define\tESF_DD_RX_EV_RSVD2_LBN 54\n+#define\tESF_DD_RX_EV_RSVD2_WIDTH 4\n+#define\tESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_LBN 57\n+#define\tESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_WIDTH 1\n+#define\tESF_EZ_RX_IP_INNER_CHKSUM_ERR_LBN 56\n+#define\tESF_EZ_RX_IP_INNER_CHKSUM_ERR_WIDTH 1\n+#define\tESF_EZ_RX_EV_RSVD2_LBN 54\n+#define\tESF_EZ_RX_EV_RSVD2_WIDTH 2\n+#define\tESF_DZ_RX_EV_SOFT2_LBN 52\n+#define\tESF_DZ_RX_EV_SOFT2_WIDTH 2\n+#define\tESF_DZ_RX_DSC_PTR_LBITS_LBN 48\n+#define\tESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4\n+#define\tESF_DZ_RX_L4_CLASS_LBN 45\n+#define\tESF_DZ_RX_L4_CLASS_WIDTH 3\n+#define\tESE_DZ_L4_CLASS_RSVD7 7\n+#define\tESE_DZ_L4_CLASS_RSVD6 6\n+#define\tESE_DZ_L4_CLASS_RSVD5 5\n+#define\tESE_DZ_L4_CLASS_RSVD4 4\n+#define\tESE_DZ_L4_CLASS_RSVD3 3\n+#define\tESE_DZ_L4_CLASS_UDP 2\n+#define\tESE_DZ_L4_CLASS_TCP 1\n+#define\tESE_DZ_L4_CLASS_UNKNOWN 0\n+#define\tESF_DZ_RX_L3_CLASS_LBN 42\n+#define\tESF_DZ_RX_L3_CLASS_WIDTH 3\n+#define\tESE_DZ_L3_CLASS_RSVD7 7\n+#define\tESE_DZ_L3_CLASS_IP6_FRAG 6\n+#define\tESE_DZ_L3_CLASS_ARP 5\n+#define\tESE_DZ_L3_CLASS_IP4_FRAG 4\n+#define\tESE_DZ_L3_CLASS_FCOE 3\n+#define\tESE_DZ_L3_CLASS_IP6 2\n+#define\tESE_DZ_L3_CLASS_IP4 1\n+#define\tESE_DZ_L3_CLASS_UNKNOWN 0\n+#define\tESF_DZ_RX_ETH_TAG_CLASS_LBN 39\n+#define\tESF_DZ_RX_ETH_TAG_CLASS_WIDTH 3\n+#define\tESE_DZ_ETH_TAG_CLASS_RSVD7 7\n+#define\tESE_DZ_ETH_TAG_CLASS_RSVD6 6\n+#define\tESE_DZ_ETH_TAG_CLASS_RSVD5 5\n+#define\tESE_DZ_ETH_TAG_CLASS_RSVD4 4\n+#define\tESE_DZ_ETH_TAG_CLASS_RSVD3 3\n+#define\tESE_DZ_ETH_TAG_CLASS_VLAN2 2\n+#define\tESE_DZ_ETH_TAG_CLASS_VLAN1 1\n+#define\tESE_DZ_ETH_TAG_CLASS_NONE 0\n+#define\tESF_DZ_RX_ETH_BASE_CLASS_LBN 36\n+#define\tESF_DZ_RX_ETH_BASE_CLASS_WIDTH 3\n+#define\tESE_DZ_ETH_BASE_CLASS_LLC_SNAP 2\n+#define\tESE_DZ_ETH_BASE_CLASS_LLC 1\n+#define\tESE_DZ_ETH_BASE_CLASS_ETH2 0\n+#define\tESF_DZ_RX_MAC_CLASS_LBN 35\n+#define\tESF_DZ_RX_MAC_CLASS_WIDTH 1\n+#define\tESE_DZ_MAC_CLASS_MCAST 1\n+#define\tESE_DZ_MAC_CLASS_UCAST 0\n+#define\tESF_DD_RX_EV_SOFT1_LBN 32\n+#define\tESF_DD_RX_EV_SOFT1_WIDTH 3\n+#define\tESF_EZ_RX_EV_SOFT1_LBN 34\n+#define\tESF_EZ_RX_EV_SOFT1_WIDTH 1\n+#define\tESF_EZ_RX_ENCAP_HDR_LBN 32\n+#define\tESF_EZ_RX_ENCAP_HDR_WIDTH 2\n+#define\tESE_EZ_ENCAP_HDR_GRE 2\n+#define\tESE_EZ_ENCAP_HDR_VXLAN 1\n+#define\tESE_EZ_ENCAP_HDR_NONE 0\n+#define\tESF_DD_RX_EV_RSVD1_LBN 30\n+#define\tESF_DD_RX_EV_RSVD1_WIDTH 2\n+#define\tESF_EZ_RX_EV_RSVD1_LBN 31\n+#define\tESF_EZ_RX_EV_RSVD1_WIDTH 1\n+#define\tESF_EZ_RX_ABORT_LBN 30\n+#define\tESF_EZ_RX_ABORT_WIDTH 1\n+#define\tESF_DZ_RX_ECC_ERR_LBN 29\n+#define\tESF_DZ_RX_ECC_ERR_WIDTH 1\n+#define\tESF_DZ_RX_CRC1_ERR_LBN 28\n+#define\tESF_DZ_RX_CRC1_ERR_WIDTH 1\n+#define\tESF_DZ_RX_CRC0_ERR_LBN 27\n+#define\tESF_DZ_RX_CRC0_ERR_WIDTH 1\n+#define\tESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN 26\n+#define\tESF_DZ_RX_TCPUDP_CKSUM_ERR_WIDTH 1\n+#define\tESF_DZ_RX_IPCKSUM_ERR_LBN 25\n+#define\tESF_DZ_RX_IPCKSUM_ERR_WIDTH 1\n+#define\tESF_DZ_RX_ECRC_ERR_LBN 24\n+#define\tESF_DZ_RX_ECRC_ERR_WIDTH 1\n+#define\tESF_DZ_RX_QLABEL_LBN 16\n+#define\tESF_DZ_RX_QLABEL_WIDTH 5\n+#define\tESF_DZ_RX_PARSE_INCOMPLETE_LBN 15\n+#define\tESF_DZ_RX_PARSE_INCOMPLETE_WIDTH 1\n+#define\tESF_DZ_RX_CONT_LBN 14\n+#define\tESF_DZ_RX_CONT_WIDTH 1\n+#define\tESF_DZ_RX_BYTES_LBN 0\n+#define\tESF_DZ_RX_BYTES_WIDTH 14\n+\n+\n+/* ES_RX_KER_DESC */\n+#define\tESF_DZ_RX_KER_RESERVED_LBN 62\n+#define\tESF_DZ_RX_KER_RESERVED_WIDTH 2\n+#define\tESF_DZ_RX_KER_BYTE_CNT_LBN 48\n+#define\tESF_DZ_RX_KER_BYTE_CNT_WIDTH 14\n+#define\tESF_DZ_RX_KER_BUF_ADDR_DW0_LBN 0\n+#define\tESF_DZ_RX_KER_BUF_ADDR_DW0_WIDTH 32\n+#define\tESF_DZ_RX_KER_BUF_ADDR_DW1_LBN 32\n+#define\tESF_DZ_RX_KER_BUF_ADDR_DW1_WIDTH 16\n+#define\tESF_DZ_RX_KER_BUF_ADDR_LBN 0\n+#define\tESF_DZ_RX_KER_BUF_ADDR_WIDTH 48\n+\n+\n+/* ES_TX_CSUM_TSTAMP_DESC */\n+#define\tESF_DZ_TX_DESC_IS_OPT_LBN 63\n+#define\tESF_DZ_TX_DESC_IS_OPT_WIDTH 1\n+#define\tESF_DZ_TX_OPTION_TYPE_LBN 60\n+#define\tESF_DZ_TX_OPTION_TYPE_WIDTH 3\n+#define\tESE_DZ_TX_OPTION_DESC_TSO 7\n+#define\tESE_DZ_TX_OPTION_DESC_VLAN 6\n+#define\tESE_DZ_TX_OPTION_DESC_CRC_CSUM 0\n+#define\tESF_DZ_TX_OPTION_TS_AT_TXDP_LBN 8\n+#define\tESF_DZ_TX_OPTION_TS_AT_TXDP_WIDTH 1\n+#define\tESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM_LBN 7\n+#define\tESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM_WIDTH 1\n+#define\tESF_DZ_TX_OPTION_INNER_IP_CSUM_LBN 6\n+#define\tESF_DZ_TX_OPTION_INNER_IP_CSUM_WIDTH 1\n+#define\tESF_DZ_TX_TIMESTAMP_LBN 5\n+#define\tESF_DZ_TX_TIMESTAMP_WIDTH 1\n+#define\tESF_DZ_TX_OPTION_CRC_MODE_LBN 2\n+#define\tESF_DZ_TX_OPTION_CRC_MODE_WIDTH 3\n+#define\tESE_DZ_TX_OPTION_CRC_FCOIP_MPA 5\n+#define\tESE_DZ_TX_OPTION_CRC_FCOIP_FCOE 4\n+#define\tESE_DZ_TX_OPTION_CRC_ISCSI_HDR_AND_PYLD 3\n+#define\tESE_DZ_TX_OPTION_CRC_ISCSI_HDR 2\n+#define\tESE_DZ_TX_OPTION_CRC_FCOE 1\n+#define\tESE_DZ_TX_OPTION_CRC_OFF 0\n+#define\tESF_DZ_TX_OPTION_UDP_TCP_CSUM_LBN 1\n+#define\tESF_DZ_TX_OPTION_UDP_TCP_CSUM_WIDTH 1\n+#define\tESF_DZ_TX_OPTION_IP_CSUM_LBN 0\n+#define\tESF_DZ_TX_OPTION_IP_CSUM_WIDTH 1\n+\n+\n+/* ES_TX_EVENT */\n+#define\tESF_DZ_TX_CODE_LBN 60\n+#define\tESF_DZ_TX_CODE_WIDTH 4\n+#define\tESF_DZ_TX_OVERRIDE_HOLDOFF_LBN 59\n+#define\tESF_DZ_TX_OVERRIDE_HOLDOFF_WIDTH 1\n+#define\tESF_DZ_TX_DROP_EVENT_LBN 58\n+#define\tESF_DZ_TX_DROP_EVENT_WIDTH 1\n+#define\tESF_DD_TX_EV_RSVD_LBN 48\n+#define\tESF_DD_TX_EV_RSVD_WIDTH 10\n+#define\tESF_EZ_TCP_UDP_INNER_CHKSUM_ERR_LBN 57\n+#define\tESF_EZ_TCP_UDP_INNER_CHKSUM_ERR_WIDTH 1\n+#define\tESF_EZ_IP_INNER_CHKSUM_ERR_LBN 56\n+#define\tESF_EZ_IP_INNER_CHKSUM_ERR_WIDTH 1\n+#define\tESF_EZ_TX_EV_RSVD_LBN 48\n+#define\tESF_EZ_TX_EV_RSVD_WIDTH 8\n+#define\tESF_DZ_TX_SOFT2_LBN 32\n+#define\tESF_DZ_TX_SOFT2_WIDTH 16\n+#define\tESF_DD_TX_SOFT1_LBN 24\n+#define\tESF_DD_TX_SOFT1_WIDTH 8\n+#define\tESF_EZ_TX_CAN_MERGE_LBN 31\n+#define\tESF_EZ_TX_CAN_MERGE_WIDTH 1\n+#define\tESF_EZ_TX_SOFT1_LBN 24\n+#define\tESF_EZ_TX_SOFT1_WIDTH 7\n+#define\tESF_DZ_TX_QLABEL_LBN 16\n+#define\tESF_DZ_TX_QLABEL_WIDTH 5\n+#define\tESF_DZ_TX_DESCR_INDX_LBN 0\n+#define\tESF_DZ_TX_DESCR_INDX_WIDTH 16\n+\n+\n+/* ES_TX_KER_DESC */\n+#define\tESF_DZ_TX_KER_TYPE_LBN 63\n+#define\tESF_DZ_TX_KER_TYPE_WIDTH 1\n+#define\tESF_DZ_TX_KER_CONT_LBN 62\n+#define\tESF_DZ_TX_KER_CONT_WIDTH 1\n+#define\tESF_DZ_TX_KER_BYTE_CNT_LBN 48\n+#define\tESF_DZ_TX_KER_BYTE_CNT_WIDTH 14\n+#define\tESF_DZ_TX_KER_BUF_ADDR_DW0_LBN 0\n+#define\tESF_DZ_TX_KER_BUF_ADDR_DW0_WIDTH 32\n+#define\tESF_DZ_TX_KER_BUF_ADDR_DW1_LBN 32\n+#define\tESF_DZ_TX_KER_BUF_ADDR_DW1_WIDTH 16\n+#define\tESF_DZ_TX_KER_BUF_ADDR_LBN 0\n+#define\tESF_DZ_TX_KER_BUF_ADDR_WIDTH 48\n+\n+\n+/* ES_TX_PIO_DESC */\n+#define\tESF_DZ_TX_PIO_TYPE_LBN 63\n+#define\tESF_DZ_TX_PIO_TYPE_WIDTH 1\n+#define\tESF_DZ_TX_PIO_OPT_LBN 60\n+#define\tESF_DZ_TX_PIO_OPT_WIDTH 3\n+#define\tESF_DZ_TX_PIO_CONT_LBN 59\n+#define\tESF_DZ_TX_PIO_CONT_WIDTH 1\n+#define\tESF_DZ_TX_PIO_BYTE_CNT_LBN 32\n+#define\tESF_DZ_TX_PIO_BYTE_CNT_WIDTH 12\n+#define\tESF_DZ_TX_PIO_BUF_ADDR_LBN 0\n+#define\tESF_DZ_TX_PIO_BUF_ADDR_WIDTH 12\n+\n+\n+/* ES_TX_TSO_DESC */\n+#define\tESF_DZ_TX_DESC_IS_OPT_LBN 63\n+#define\tESF_DZ_TX_DESC_IS_OPT_WIDTH 1\n+#define\tESF_DZ_TX_OPTION_TYPE_LBN 60\n+#define\tESF_DZ_TX_OPTION_TYPE_WIDTH 3\n+#define\tESE_DZ_TX_OPTION_DESC_TSO 7\n+#define\tESE_DZ_TX_OPTION_DESC_VLAN 6\n+#define\tESE_DZ_TX_OPTION_DESC_CRC_CSUM 0\n+#define\tESF_DZ_TX_TSO_OPTION_TYPE_LBN 56\n+#define\tESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4\n+#define\tESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1\n+#define\tESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0\n+#define\tESF_DZ_TX_TSO_TCP_FLAGS_LBN 48\n+#define\tESF_DZ_TX_TSO_TCP_FLAGS_WIDTH 8\n+#define\tESF_DZ_TX_TSO_IP_ID_LBN 32\n+#define\tESF_DZ_TX_TSO_IP_ID_WIDTH 16\n+#define\tESF_DZ_TX_TSO_TCP_SEQNO_LBN 0\n+#define\tESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32\n+\n+\n+/* TX_TSO_FATSO2A_DESC */\n+#define\tESF_DZ_TX_DESC_IS_OPT_LBN 63\n+#define\tESF_DZ_TX_DESC_IS_OPT_WIDTH 1\n+#define\tESF_DZ_TX_OPTION_TYPE_LBN 60\n+#define\tESF_DZ_TX_OPTION_TYPE_WIDTH 3\n+#define\tESE_DZ_TX_OPTION_DESC_TSO 7\n+#define\tESE_DZ_TX_OPTION_DESC_VLAN 6\n+#define\tESE_DZ_TX_OPTION_DESC_CRC_CSUM 0\n+#define\tESF_DZ_TX_TSO_OPTION_TYPE_LBN 56\n+#define\tESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4\n+#define\tESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3\n+#define\tESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2\n+#define\tESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1\n+#define\tESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0\n+#define\tESF_DZ_TX_TSO_IP_ID_LBN 32\n+#define\tESF_DZ_TX_TSO_IP_ID_WIDTH 16\n+#define\tESF_DZ_TX_TSO_TCP_SEQNO_LBN 0\n+#define\tESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32\n+\n+\n+/* TX_TSO_FATSO2B_DESC */\n+#define\tESF_DZ_TX_DESC_IS_OPT_LBN 63\n+#define\tESF_DZ_TX_DESC_IS_OPT_WIDTH 1\n+#define\tESF_DZ_TX_OPTION_TYPE_LBN 60\n+#define\tESF_DZ_TX_OPTION_TYPE_WIDTH 3\n+#define\tESE_DZ_TX_OPTION_DESC_TSO 7\n+#define\tESE_DZ_TX_OPTION_DESC_VLAN 6\n+#define\tESE_DZ_TX_OPTION_DESC_CRC_CSUM 0\n+#define\tESF_DZ_TX_TSO_OPTION_TYPE_LBN 56\n+#define\tESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4\n+#define\tESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3\n+#define\tESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2\n+#define\tESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1\n+#define\tESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0\n+#define\tESF_DZ_TX_TSO_OUTER_IP_ID_LBN 16\n+#define\tESF_DZ_TX_TSO_OUTER_IP_ID_WIDTH 16\n+#define\tESF_DZ_TX_TSO_TCP_MSS_LBN 32\n+#define\tESF_DZ_TX_TSO_TCP_MSS_WIDTH 16\n+#define\tESF_DZ_TX_TSO_INNER_PE_CSUM_LBN 0\n+#define\tESF_DZ_TX_TSO_INNER_PE_CSUM_WIDTH 16\n+\n+\n+/* ES_TX_VLAN_DESC */\n+#define\tESF_DZ_TX_DESC_IS_OPT_LBN 63\n+#define\tESF_DZ_TX_DESC_IS_OPT_WIDTH 1\n+#define\tESF_DZ_TX_OPTION_TYPE_LBN 60\n+#define\tESF_DZ_TX_OPTION_TYPE_WIDTH 3\n+#define\tESE_DZ_TX_OPTION_DESC_TSO 7\n+#define\tESE_DZ_TX_OPTION_DESC_VLAN 6\n+#define\tESE_DZ_TX_OPTION_DESC_CRC_CSUM 0\n+#define\tESF_DZ_TX_VLAN_OP_LBN 32\n+#define\tESF_DZ_TX_VLAN_OP_WIDTH 2\n+#define\tESF_DZ_TX_VLAN_TAG2_LBN 16\n+#define\tESF_DZ_TX_VLAN_TAG2_WIDTH 16\n+#define\tESF_DZ_TX_VLAN_TAG1_LBN 0\n+#define\tESF_DZ_TX_VLAN_TAG1_WIDTH 16\n+\n+\n+/*************************************************************************\n+ * NOTE: the comment line above marks the end of the autogenerated section\n+ */\n+\n+/*\n+ * The workaround for bug 35388 requires multiplexing writes through\n+ * the ERF_DZ_TX_DESC_WPTR address.\n+ * TX_DESC_UPD: 0ppppppppppp               (bit 11 lost)\n+ * EVQ_RPTR:    1000hhhhhhhh, 1001llllllll (split into high and low bits)\n+ * EVQ_TMR:     11mmvvvvvvvv               (bits 8:13 of value lost)\n+ */\n+#define\tER_DD_EVQ_INDIRECT_OFST (ER_DZ_TX_DESC_UPD_REG_OFST + 2 * 4)\n+#define\tER_DD_EVQ_INDIRECT_STEP ER_DZ_TX_DESC_UPD_REG_STEP\n+#define\tERF_DD_EVQ_IND_RPTR_FLAGS_LBN 8\n+#define\tERF_DD_EVQ_IND_RPTR_FLAGS_WIDTH 4\n+#define\tEFE_DD_EVQ_IND_RPTR_FLAGS_HIGH 8\n+#define\tEFE_DD_EVQ_IND_RPTR_FLAGS_LOW 9\n+#define\tERF_DD_EVQ_IND_RPTR_LBN 0\n+#define\tERF_DD_EVQ_IND_RPTR_WIDTH 8\n+#define\tERF_DD_EVQ_IND_TIMER_FLAGS_LBN 10\n+#define\tERF_DD_EVQ_IND_TIMER_FLAGS_WIDTH 2\n+#define\tEFE_DD_EVQ_IND_TIMER_FLAGS 3\n+#define\tERF_DD_EVQ_IND_TIMER_MODE_LBN 8\n+#define\tERF_DD_EVQ_IND_TIMER_MODE_WIDTH 2\n+#define\tERF_DD_EVQ_IND_TIMER_VAL_LBN 0\n+#define\tERF_DD_EVQ_IND_TIMER_VAL_WIDTH 8\n+\n+/* Packed stream magic doorbell command */\n+#define\tERF_DZ_RX_DESC_MAGIC_DOORBELL_LBN 11\n+#define\tERF_DZ_RX_DESC_MAGIC_DOORBELL_WIDTH 1\n+\n+#define\tERF_DZ_RX_DESC_MAGIC_CMD_LBN 8\n+#define\tERF_DZ_RX_DESC_MAGIC_CMD_WIDTH 3\n+#define\tERE_DZ_RX_DESC_MAGIC_CMD_PS_CREDITS 0\n+\n+#define\tERF_DZ_RX_DESC_MAGIC_DATA_LBN 0\n+#define\tERF_DZ_RX_DESC_MAGIC_DATA_WIDTH 8\n+\n+/* Packed stream RX packet prefix */\n+#define\tES_DZ_PS_RX_PREFIX_TSTAMP_LBN 0\n+#define\tES_DZ_PS_RX_PREFIX_TSTAMP_WIDTH 32\n+#define\tES_DZ_PS_RX_PREFIX_CAP_LEN_LBN 32\n+#define\tES_DZ_PS_RX_PREFIX_CAP_LEN_WIDTH 16\n+#define\tES_DZ_PS_RX_PREFIX_ORIG_LEN_LBN 48\n+#define\tES_DZ_PS_RX_PREFIX_ORIG_LEN_WIDTH 16\n+\n+/*\n+ * An extra flag for the packed stream mode,\n+ * signalling the start of a new buffer\n+ */\n+#define\tESF_DZ_RX_EV_ROTATE_LBN 53\n+#define\tESF_DZ_RX_EV_ROTATE_WIDTH 1\n+\n+#ifdef\t__cplusplus\n+}\n+#endif\n+\n+#endif /* _SYS_EFX_EF10_REGS_H */\ndiff --git a/drivers/net/sfc/base/efx_rx.c b/drivers/net/sfc/base/efx_rx.c\nindex c1da6b8..8ca5731 100644\n--- a/drivers/net/sfc/base/efx_rx.c\n+++ b/drivers/net/sfc/base/efx_rx.c\n@@ -104,6 +104,20 @@ static const efx_rx_ops_t __efx_rx_siena_ops = {\n };\n #endif\t/* EFSYS_OPT_SIENA */\n \n+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD\n+static const efx_rx_ops_t __efx_rx_ef10_ops = {\n+\tef10_rx_init,\t\t\t\t/* erxo_init */\n+\tef10_rx_fini,\t\t\t\t/* erxo_fini */\n+\tef10_rx_prefix_pktlen,\t\t\t/* erxo_prefix_pktlen */\n+\tef10_rx_qpost,\t\t\t\t/* erxo_qpost */\n+\tef10_rx_qpush,\t\t\t\t/* erxo_qpush */\n+\tef10_rx_qflush,\t\t\t\t/* erxo_qflush */\n+\tef10_rx_qenable,\t\t\t/* erxo_qenable */\n+\tef10_rx_qcreate,\t\t\t/* erxo_qcreate */\n+\tef10_rx_qdestroy,\t\t\t/* erxo_qdestroy */\n+};\n+#endif\t/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */\n+\n \n \t__checkReturn\tefx_rc_t\n efx_rx_init(\n@@ -132,6 +146,12 @@ efx_rx_init(\n \t\tbreak;\n #endif /* EFSYS_OPT_SIENA */\n \n+#if EFSYS_OPT_HUNTINGTON\n+\tcase EFX_FAMILY_HUNTINGTON:\n+\t\terxop = &__efx_rx_ef10_ops;\n+\t\tbreak;\n+#endif /* EFSYS_OPT_HUNTINGTON */\n+\n \tdefault:\n \t\tEFSYS_ASSERT(0);\n \t\trc = ENOTSUP;\ndiff --git a/drivers/net/sfc/base/efx_sram.c b/drivers/net/sfc/base/efx_sram.c\nindex 0f16376..a55b06e 100644\n--- a/drivers/net/sfc/base/efx_sram.c\n+++ b/drivers/net/sfc/base/efx_sram.c\n@@ -49,6 +49,22 @@ efx_sram_buf_tbl_set(\n \tEFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);\n \tEFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);\n \n+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD\n+\tif (enp->en_family == EFX_FAMILY_HUNTINGTON ||\n+\t    enp->en_family == EFX_FAMILY_MEDFORD) {\n+\t\t/*\n+\t\t * FIXME: the efx_sram_buf_tbl_*() functionality needs to be\n+\t\t * pulled inside the Falcon/Siena queue create/destroy code,\n+\t\t * and then the original functions can be removed (see bug30834\n+\t\t * comment #1).  But, for now, we just ensure that they are\n+\t\t * no-ops for EF10, to allow bringing up existing drivers\n+\t\t * without modification.\n+\t\t */\n+\n+\t\treturn (0);\n+\t}\n+#endif\t/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */\n+\n \tif (stop >= EFX_BUF_TBL_SIZE) {\n \t\trc = EFBIG;\n \t\tgoto fail1;\n@@ -155,6 +171,22 @@ efx_sram_buf_tbl_clear(\n \tEFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);\n \tEFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);\n \n+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD\n+\tif (enp->en_family == EFX_FAMILY_HUNTINGTON ||\n+\t    enp->en_family == EFX_FAMILY_MEDFORD) {\n+\t\t/*\n+\t\t * FIXME: the efx_sram_buf_tbl_*() functionality needs to be\n+\t\t * pulled inside the Falcon/Siena queue create/destroy code,\n+\t\t * and then the original functions can be removed (see bug30834\n+\t\t * comment #1).  But, for now, we just ensure that they are\n+\t\t * no-ops for EF10, to allow bringing up existing drivers\n+\t\t * without modification.\n+\t\t */\n+\n+\t\treturn;\n+\t}\n+#endif\t/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */\n+\n \tEFSYS_ASSERT3U(stop, <, EFX_BUF_TBL_SIZE);\n \n \tEFSYS_PROBE2(buf, uint32_t, start, uint32_t, stop - 1);\ndiff --git a/drivers/net/sfc/base/efx_tx.c b/drivers/net/sfc/base/efx_tx.c\nindex 7333f0a..ed66695 100644\n--- a/drivers/net/sfc/base/efx_tx.c\n+++ b/drivers/net/sfc/base/efx_tx.c\n@@ -129,6 +129,29 @@ static const efx_tx_ops_t\t__efx_tx_siena_ops = {\n };\n #endif /* EFSYS_OPT_SIENA */\n \n+#if EFSYS_OPT_HUNTINGTON\n+static const efx_tx_ops_t\t__efx_tx_hunt_ops = {\n+\tef10_tx_init,\t\t\t\t/* etxo_init */\n+\tef10_tx_fini,\t\t\t\t/* etxo_fini */\n+\tef10_tx_qcreate,\t\t\t/* etxo_qcreate */\n+\tef10_tx_qdestroy,\t\t\t/* etxo_qdestroy */\n+\tef10_tx_qpost,\t\t\t\t/* etxo_qpost */\n+\tef10_tx_qpush,\t\t\t\t/* etxo_qpush */\n+\tef10_tx_qpace,\t\t\t\t/* etxo_qpace */\n+\tef10_tx_qflush,\t\t\t\t/* etxo_qflush */\n+\tef10_tx_qenable,\t\t\t/* etxo_qenable */\n+\tef10_tx_qpio_enable,\t\t\t/* etxo_qpio_enable */\n+\tef10_tx_qpio_disable,\t\t\t/* etxo_qpio_disable */\n+\tef10_tx_qpio_write,\t\t\t/* etxo_qpio_write */\n+\tef10_tx_qpio_post,\t\t\t/* etxo_qpio_post */\n+\tef10_tx_qdesc_post,\t\t\t/* etxo_qdesc_post */\n+\tef10_tx_qdesc_dma_create,\t\t/* etxo_qdesc_dma_create */\n+\tef10_tx_qdesc_tso_create,\t\t/* etxo_qdesc_tso_create */\n+\tef10_tx_qdesc_tso2_create,\t\t/* etxo_qdesc_tso2_create */\n+\tef10_tx_qdesc_vlantci_create,\t\t/* etxo_qdesc_vlantci_create */\n+};\n+#endif /* EFSYS_OPT_HUNTINGTON */\n+\n \t__checkReturn\tefx_rc_t\n efx_tx_init(\n \t__in\t\tefx_nic_t *enp)\n@@ -156,6 +179,12 @@ efx_tx_init(\n \t\tbreak;\n #endif /* EFSYS_OPT_SIENA */\n \n+#if EFSYS_OPT_HUNTINGTON\n+\tcase EFX_FAMILY_HUNTINGTON:\n+\t\tetxop = &__efx_tx_hunt_ops;\n+\t\tbreak;\n+#endif /* EFSYS_OPT_HUNTINGTON */\n+\n \tdefault:\n \t\tEFSYS_ASSERT(0);\n \t\trc = ENOTSUP;\ndiff --git a/drivers/net/sfc/base/hunt_impl.h b/drivers/net/sfc/base/hunt_impl.h\nnew file mode 100644\nindex 0000000..0e0c870\n--- /dev/null\n+++ b/drivers/net/sfc/base/hunt_impl.h\n@@ -0,0 +1,74 @@\n+/*\n+ * Copyright (c) 2012-2016 Solarflare Communications Inc.\n+ * All rights reserved.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions are met:\n+ *\n+ * 1. Redistributions of source code must retain the above copyright notice,\n+ *    this list of conditions and the following disclaimer.\n+ * 2. Redistributions in binary form must reproduce the above copyright notice,\n+ *    this list of conditions and the following disclaimer in the documentation\n+ *    and/or other materials provided with the distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ *\n+ * The views and conclusions contained in the software and documentation are\n+ * those of the authors and should not be interpreted as representing official\n+ * policies, either expressed or implied, of the FreeBSD Project.\n+ */\n+\n+#ifndef _SYS_HUNT_IMPL_H\n+#define\t_SYS_HUNT_IMPL_H\n+\n+#include \"efx.h\"\n+#include \"efx_regs.h\"\n+#include \"efx_regs_ef10.h\"\n+#include \"efx_mcdi.h\"\n+\n+#ifdef\t__cplusplus\n+extern \"C\" {\n+#endif\n+\n+/* Missing register definitions */\n+#ifndef\tER_DZ_TX_PIOBUF_OFST\n+#define\tER_DZ_TX_PIOBUF_OFST 0x00001000\n+#endif\n+#ifndef\tER_DZ_TX_PIOBUF_STEP\n+#define\tER_DZ_TX_PIOBUF_STEP 8192\n+#endif\n+#ifndef\tER_DZ_TX_PIOBUF_ROWS\n+#define\tER_DZ_TX_PIOBUF_ROWS 2048\n+#endif\n+\n+#ifndef\tER_DZ_TX_PIOBUF_SIZE\n+#define\tER_DZ_TX_PIOBUF_SIZE 2048\n+#endif\n+\n+#define\tHUNT_PIOBUF_NBUFS\t(16)\n+#define\tHUNT_PIOBUF_SIZE\t(ER_DZ_TX_PIOBUF_SIZE)\n+\n+#define\tHUNT_MIN_PIO_ALLOC_SIZE\t(HUNT_PIOBUF_SIZE / 32)\n+\n+\n+/* NIC */\n+\n+extern\t__checkReturn\tefx_rc_t\n+hunt_board_cfg(\n+\t__in\t\tefx_nic_t *enp);\n+\n+#ifdef\t__cplusplus\n+}\n+#endif\n+\n+#endif\t/* _SYS_HUNT_IMPL_H */\ndiff --git a/drivers/net/sfc/base/hunt_nic.c b/drivers/net/sfc/base/hunt_nic.c\nnew file mode 100644\nindex 0000000..263f474\n--- /dev/null\n+++ b/drivers/net/sfc/base/hunt_nic.c\n@@ -0,0 +1,395 @@\n+/*\n+ * Copyright (c) 2012-2016 Solarflare Communications Inc.\n+ * All rights reserved.\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions are met:\n+ *\n+ * 1. Redistributions of source code must retain the above copyright notice,\n+ *    this list of conditions and the following disclaimer.\n+ * 2. Redistributions in binary form must reproduce the above copyright notice,\n+ *    this list of conditions and the following disclaimer in the documentation\n+ *    and/or other materials provided with the distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ *\n+ * The views and conclusions contained in the software and documentation are\n+ * those of the authors and should not be interpreted as representing official\n+ * policies, either expressed or implied, of the FreeBSD Project.\n+ */\n+\n+#include \"efx.h\"\n+#include \"efx_impl.h\"\n+\n+#if EFSYS_OPT_HUNTINGTON\n+\n+#include \"ef10_tlv_layout.h\"\n+\n+static\t__checkReturn\tefx_rc_t\n+hunt_nic_get_required_pcie_bandwidth(\n+\t__in\t\tefx_nic_t *enp,\n+\t__out\t\tuint32_t *bandwidth_mbpsp)\n+{\n+\tuint32_t port_modes;\n+\tuint32_t max_port_mode;\n+\tuint32_t bandwidth;\n+\tefx_rc_t rc;\n+\n+\t/*\n+\t * On Huntington, the firmware may not give us the current port mode, so\n+\t * we need to go by the set of available port modes and assume the most\n+\t * capable mode is in use.\n+\t */\n+\n+\tif ((rc = efx_mcdi_get_port_modes(enp, &port_modes, NULL)) != 0) {\n+\t\t/* No port mode info available */\n+\t\tbandwidth = 0;\n+\t\tgoto out;\n+\t}\n+\n+\tif (port_modes & (1 << TLV_PORT_MODE_40G_40G)) {\n+\t\t/*\n+\t\t * This needs the full PCIe bandwidth (and could use\n+\t\t * more) - roughly 64 Gbit/s for 8 lanes of Gen3.\n+\t\t */\n+\t\tif ((rc = efx_nic_calculate_pcie_link_bandwidth(8,\n+\t\t\t    EFX_PCIE_LINK_SPEED_GEN3, &bandwidth)) != 0)\n+\t\t\tgoto fail1;\n+\t} else {\n+\t\tif (port_modes & (1 << TLV_PORT_MODE_40G)) {\n+\t\t\tmax_port_mode = TLV_PORT_MODE_40G;\n+\t\t} else if (port_modes & (1 << TLV_PORT_MODE_10G_10G_10G_10G)) {\n+\t\t\tmax_port_mode = TLV_PORT_MODE_10G_10G_10G_10G;\n+\t\t} else {\n+\t\t\t/* Assume two 10G ports */\n+\t\t\tmax_port_mode = TLV_PORT_MODE_10G_10G;\n+\t\t}\n+\n+\t\tif ((rc = ef10_nic_get_port_mode_bandwidth(max_port_mode,\n+\t\t\t\t\t\t\t    &bandwidth)) != 0)\n+\t\t\tgoto fail2;\n+\t}\n+\n+out:\n+\t*bandwidth_mbpsp = bandwidth;\n+\n+\treturn (0);\n+\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\t__checkReturn\tefx_rc_t\n+hunt_board_cfg(\n+\t__in\t\tefx_nic_t *enp)\n+{\n+\tefx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);\n+\tefx_nic_cfg_t *encp = &(enp->en_nic_cfg);\n+\tuint8_t mac_addr[6] = { 0 };\n+\tuint32_t board_type = 0;\n+\tef10_link_state_t els;\n+\tefx_port_t *epp = &(enp->en_port);\n+\tuint32_t port;\n+\tuint32_t pf;\n+\tuint32_t vf;\n+\tuint32_t mask;\n+\tuint32_t flags;\n+\tuint32_t sysclk, dpcpu_clk;\n+\tuint32_t base, nvec;\n+\tuint32_t bandwidth;\n+\tefx_rc_t rc;\n+\n+\tif ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)\n+\t\tgoto fail1;\n+\n+\t/*\n+\t * NOTE: The MCDI protocol numbers ports from zero.\n+\t * The common code MCDI interface numbers ports from one.\n+\t */\n+\temip->emi_port = port + 1;\n+\n+\tif ((rc = ef10_external_port_mapping(enp, port,\n+\t\t    &encp->enc_external_port)) != 0)\n+\t\tgoto fail2;\n+\n+\t/*\n+\t * Get PCIe function number from firmware (used for\n+\t * per-function privilege and dynamic config info).\n+\t *  - PCIe PF: pf = PF number, vf = 0xffff.\n+\t *  - PCIe VF: pf = parent PF, vf = VF number.\n+\t */\n+\tif ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0)\n+\t\tgoto fail3;\n+\n+\tencp->enc_pf = pf;\n+\tencp->enc_vf = vf;\n+\n+\t/* MAC address for this function */\n+\tif (EFX_PCI_FUNCTION_IS_PF(encp)) {\n+\t\trc = efx_mcdi_get_mac_address_pf(enp, mac_addr);\n+\t\tif ((rc == 0) && (mac_addr[0] & 0x02)) {\n+\t\t\t/*\n+\t\t\t * If the static config does not include a global MAC\n+\t\t\t * address pool then the board may return a locally\n+\t\t\t * administered MAC address (this should only happen on\n+\t\t\t * incorrectly programmed boards).\n+\t\t\t */\n+\t\t\trc = EINVAL;\n+\t\t}\n+\t} else {\n+\t\trc = efx_mcdi_get_mac_address_vf(enp, mac_addr);\n+\t}\n+\tif (rc != 0)\n+\t\tgoto fail4;\n+\n+\tEFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);\n+\n+\t/* Board configuration */\n+\trc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);\n+\tif (rc != 0) {\n+\t\t/* Unprivileged functions may not be able to read board cfg */\n+\t\tif (rc == EACCES)\n+\t\t\tboard_type = 0;\n+\t\telse\n+\t\t\tgoto fail5;\n+\t}\n+\n+\tencp->enc_board_type = board_type;\n+\tencp->enc_clk_mult = 1; /* not used for Huntington */\n+\n+\t/* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */\n+\tif ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)\n+\t\tgoto fail6;\n+\n+\t/* Obtain the default PHY advertised capabilities */\n+\tif ((rc = ef10_phy_get_link(enp, &els)) != 0)\n+\t\tgoto fail7;\n+\tepp->ep_default_adv_cap_mask = els.els_adv_cap_mask;\n+\tepp->ep_adv_cap_mask = els.els_adv_cap_mask;\n+\n+\t/*\n+\t * Enable firmware workarounds for hardware errata.\n+\t * Expected responses are:\n+\t *  - 0 (zero):\n+\t *\tSuccess: workaround enabled or disabled as requested.\n+\t *  - MC_CMD_ERR_ENOSYS (reported as ENOTSUP):\n+\t *\tFirmware does not support the MC_CMD_WORKAROUND request.\n+\t *\t(assume that the workaround is not supported).\n+\t *  - MC_CMD_ERR_ENOENT (reported as ENOENT):\n+\t *\tFirmware does not support the requested workaround.\n+\t *  - MC_CMD_ERR_EPERM  (reported as EACCES):\n+\t *\tUnprivileged function cannot enable/disable workarounds.\n+\t *\n+\t * See efx_mcdi_request_errcode() for MCDI error translations.\n+\t */\n+\n+\t/*\n+\t * If the bug35388 workaround is enabled, then use an indirect access\n+\t * method to avoid unsafe EVQ writes.\n+\t */\n+\trc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG35388, B_TRUE,\n+\t    NULL);\n+\tif ((rc == 0) || (rc == EACCES))\n+\t\tencp->enc_bug35388_workaround = B_TRUE;\n+\telse if ((rc == ENOTSUP) || (rc == ENOENT))\n+\t\tencp->enc_bug35388_workaround = B_FALSE;\n+\telse\n+\t\tgoto fail8;\n+\n+\t/*\n+\t * If the bug41750 workaround is enabled, then do not test interrupts,\n+\t * as the test will fail (seen with Greenport controllers).\n+\t */\n+\trc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG41750, B_TRUE,\n+\t    NULL);\n+\tif (rc == 0) {\n+\t\tencp->enc_bug41750_workaround = B_TRUE;\n+\t} else if (rc == EACCES) {\n+\t\t/* Assume a controller with 40G ports needs the workaround. */\n+\t\tif (epp->ep_default_adv_cap_mask & EFX_PHY_CAP_40000FDX)\n+\t\t\tencp->enc_bug41750_workaround = B_TRUE;\n+\t\telse\n+\t\t\tencp->enc_bug41750_workaround = B_FALSE;\n+\t} else if ((rc == ENOTSUP) || (rc == ENOENT)) {\n+\t\tencp->enc_bug41750_workaround = B_FALSE;\n+\t} else {\n+\t\tgoto fail9;\n+\t}\n+\tif (EFX_PCI_FUNCTION_IS_VF(encp)) {\n+\t\t/* Interrupt testing does not work for VFs. See bug50084. */\n+\t\tencp->enc_bug41750_workaround = B_TRUE;\n+\t}\n+\n+\t/*\n+\t * If the bug26807 workaround is enabled, then firmware has enabled\n+\t * support for chained multicast filters. Firmware will reset (FLR)\n+\t * functions which have filters in the hardware filter table when the\n+\t * workaround is enabled/disabled.\n+\t *\n+\t * We must recheck if the workaround is enabled after inserting the\n+\t * first hardware filter, in case it has been changed since this check.\n+\t */\n+\trc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG26807,\n+\t    B_TRUE, &flags);\n+\tif (rc == 0) {\n+\t\tencp->enc_bug26807_workaround = B_TRUE;\n+\t\tif (flags & (1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN)) {\n+\t\t\t/*\n+\t\t\t * Other functions had installed filters before the\n+\t\t\t * workaround was enabled, and they have been reset\n+\t\t\t * by firmware.\n+\t\t\t */\n+\t\t\tEFSYS_PROBE(bug26807_workaround_flr_done);\n+\t\t\t/* FIXME: bump MC warm boot count ? */\n+\t\t}\n+\t} else if (rc == EACCES) {\n+\t\t/*\n+\t\t * Unprivileged functions cannot enable the workaround in older\n+\t\t * firmware.\n+\t\t */\n+\t\tencp->enc_bug26807_workaround = B_FALSE;\n+\t} else if ((rc == ENOTSUP) || (rc == ENOENT)) {\n+\t\tencp->enc_bug26807_workaround = B_FALSE;\n+\t} else {\n+\t\tgoto fail10;\n+\t}\n+\n+\t/* Get clock frequencies (in MHz). */\n+\tif ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0)\n+\t\tgoto fail11;\n+\n+\t/*\n+\t * The Huntington timer quantum is 1536 sysclk cycles, documented for\n+\t * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units.\n+\t */\n+\tencp->enc_evq_timer_quantum_ns = 1536000UL / sysclk; /* 1536 cycles */\n+\tif (encp->enc_bug35388_workaround) {\n+\t\tencp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<\n+\t\tERF_DD_EVQ_IND_TIMER_VAL_WIDTH) / 1000;\n+\t} else {\n+\t\tencp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<\n+\t\tFRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;\n+\t}\n+\n+\tencp->enc_bug61265_workaround = B_FALSE; /* Medford only */\n+\n+\t/* Check capabilities of running datapath firmware */\n+\tif ((rc = ef10_get_datapath_caps(enp)) != 0)\n+\t\tgoto fail12;\n+\n+\t/* Alignment for receive packet DMA buffers */\n+\tencp->enc_rx_buf_align_start = 1;\n+\tencp->enc_rx_buf_align_end = 64; /* RX DMA end padding */\n+\n+\t/* Alignment for WPTR updates */\n+\tencp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;\n+\n+\t/*\n+\t * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use\n+\t * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available\n+\t * resources (allocated to this PCIe function), which is zero until\n+\t * after we have allocated VIs.\n+\t */\n+\tencp->enc_evq_limit = 1024;\n+\tencp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;\n+\tencp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;\n+\n+\t/*\n+\t * The workaround for bug35388 uses the top bit of transmit queue\n+\t * descriptor writes, preventing the use of 4096 descriptor TXQs.\n+\t */\n+\tencp->enc_txq_max_ndescs = encp->enc_bug35388_workaround ? 2048 : 4096;\n+\n+\tencp->enc_buftbl_limit = 0xFFFFFFFF;\n+\n+\tencp->enc_piobuf_limit = HUNT_PIOBUF_NBUFS;\n+\tencp->enc_piobuf_size = HUNT_PIOBUF_SIZE;\n+\tencp->enc_piobuf_min_alloc_size = HUNT_MIN_PIO_ALLOC_SIZE;\n+\n+\t/*\n+\t * Get the current privilege mask. Note that this may be modified\n+\t * dynamically, so this value is informational only. DO NOT use\n+\t * the privilege mask to check for sufficient privileges, as that\n+\t * can result in time-of-check/time-of-use bugs.\n+\t */\n+\tif ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)\n+\t\tgoto fail13;\n+\tencp->enc_privilege_mask = mask;\n+\n+\t/* Get interrupt vector limits */\n+\tif ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {\n+\t\tif (EFX_PCI_FUNCTION_IS_PF(encp))\n+\t\t\tgoto fail14;\n+\n+\t\t/* Ignore error (cannot query vector limits from a VF). */\n+\t\tbase = 0;\n+\t\tnvec = 1024;\n+\t}\n+\tencp->enc_intr_vec_base = base;\n+\tencp->enc_intr_limit = nvec;\n+\n+\t/*\n+\t * Maximum number of bytes into the frame the TCP header can start for\n+\t * firmware assisted TSO to work.\n+\t */\n+\tencp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;\n+\n+\tif ((rc = hunt_nic_get_required_pcie_bandwidth(enp, &bandwidth)) != 0)\n+\t\tgoto fail15;\n+\tencp->enc_required_pcie_bandwidth_mbps = bandwidth;\n+\n+\t/* All Huntington devices have a PCIe Gen3, 8 lane connector */\n+\tencp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;\n+\n+\treturn (0);\n+\n+fail15:\n+\tEFSYS_PROBE(fail15);\n+fail14:\n+\tEFSYS_PROBE(fail14);\n+fail13:\n+\tEFSYS_PROBE(fail13);\n+fail12:\n+\tEFSYS_PROBE(fail12);\n+fail11:\n+\tEFSYS_PROBE(fail11);\n+fail10:\n+\tEFSYS_PROBE(fail10);\n+fail9:\n+\tEFSYS_PROBE(fail9);\n+fail8:\n+\tEFSYS_PROBE(fail8);\n+fail7:\n+\tEFSYS_PROBE(fail7);\n+fail6:\n+\tEFSYS_PROBE(fail6);\n+fail5:\n+\tEFSYS_PROBE(fail5);\n+fail4:\n+\tEFSYS_PROBE(fail4);\n+fail3:\n+\tEFSYS_PROBE(fail3);\n+fail2:\n+\tEFSYS_PROBE(fail2);\n+fail1:\n+\tEFSYS_PROBE1(fail1, efx_rc_t, rc);\n+\n+\treturn (rc);\n+}\n+\n+\n+#endif\t/* EFSYS_OPT_HUNTINGTON */\n",
    "prefixes": [
        "dpdk-dev",
        "v2",
        "10/55"
    ]
}