get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/39477/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 39477,
    "url": "http://patches.dpdk.org/api/patches/39477/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1525789143-138168-3-git-send-email-rosen.xu@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1525789143-138168-3-git-send-email-rosen.xu@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1525789143-138168-3-git-send-email-rosen.xu@intel.com",
    "date": "2018-05-08T14:19:01",
    "name": "[dpdk-dev,v9,2/4] iFPGA: Add Intel FPGA OPAE Share Code",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "8635a9586df577565f4469f4b8fb397883f53096",
    "submitter": {
        "id": 946,
        "url": "http://patches.dpdk.org/api/people/946/?format=api",
        "name": "Xu, Rosen",
        "email": "rosen.xu@intel.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1525789143-138168-3-git-send-email-rosen.xu@intel.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/39477/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/39477/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 6EF079605;\n\tTue,  8 May 2018 16:17:55 +0200 (CEST)",
            "from mga14.intel.com (mga14.intel.com [192.55.52.115])\n\tby dpdk.org (Postfix) with ESMTP id BADB09605\n\tfor <dev@dpdk.org>; Tue,  8 May 2018 16:17:53 +0200 (CEST)",
            "from fmsmga006.fm.intel.com ([10.253.24.20])\n\tby fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t08 May 2018 07:17:53 -0700",
            "from dpdkx8602.sh.intel.com ([10.67.110.200])\n\tby fmsmga006.fm.intel.com with ESMTP; 08 May 2018 07:17:49 -0700"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.49,378,1520924400\"; d=\"scan'208\";a=\"226823416\"",
        "From": "\"Xu, Rosen\" <rosen.xu@intel.com>",
        "To": "dev@dpdk.org,\n\tthomas@monjalon.net",
        "Cc": "rosen.xu@intel.com, declan.doherty@intel.com, bruce.richardson@intel.com,\n\tshreyansh.jain@nxp.com, ferruh.yigit@intel.com,\n\tkonstantin.ananyev@intel.com, tianfei.zhang@intel.com,\n\tsong.liu@intel.com, hao.wu@intel.com, gaetan.rivet@6wind.com, \"Xu,\n\tYilun\" <yilun.xu@intel.com>",
        "Date": "Tue,  8 May 2018 22:19:01 +0800",
        "Message-Id": "<1525789143-138168-3-git-send-email-rosen.xu@intel.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1525789143-138168-1-git-send-email-rosen.xu@intel.com>",
        "References": "<1521553556-62982-1-git-send-email-rosen.xu@intel.com>\n\t<1525789143-138168-1-git-send-email-rosen.xu@intel.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=y",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH v9 2/4] iFPGA: Add Intel FPGA OPAE Share Code",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Rosen Xu <rosen.xu@intel.com>\n\nThis patch adds Intel FPGA Open Programmable Acceleration\nEngine (OPAE)[1] base driver code, in order to support Intel\nFPGA devices under DPDK. The base code currently supports\nIntel FPGA solutions including integrated solution (Intel(R)\nXeon(R) CPU with FPGAs) and discrete solution (Intel(R)\nProgrammable Acceleration Card with Intel(R) Arria(R) 10 FPGA)\nand it could be extended to support more FPGA devices in the\nfuture. Please refer to [1][2] for more introduction on OPAE\nand Intel FPGAs.\n\n[1] https://01.org/OPAE\n[2] https://www.altera.com/solutions/acceleration-hub/overview.html\n\nSigned-off-by: Figo Zhang <tianfei.zhang@intel.com>\nSigned-off-by: Wu, Hao <hao.wu@intel.com>\nSigned-off-by: Xu, Yilun <yilun.xu@intel.com>\n---\n drivers/raw/ifpga_rawdev/base/Makefile             |   30 +\n drivers/raw/ifpga_rawdev/base/README               |   31 +\n drivers/raw/ifpga_rawdev/base/ifpga_api.c          |  294 ++++\n drivers/raw/ifpga_rawdev/base/ifpga_api.h          |   28 +\n drivers/raw/ifpga_rawdev/base/ifpga_compat.h       |   57 +\n drivers/raw/ifpga_rawdev/base/ifpga_defines.h      | 1661 ++++++++++++++++++++\n drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c    |  824 ++++++++++\n drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h    |   11 +\n drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c  |  314 ++++\n drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h  |  164 ++\n drivers/raw/ifpga_rawdev/base/ifpga_fme.c          |  734 +++++++++\n drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c    |  301 ++++\n drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c    |  403 +++++\n drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c    |  715 +++++++++\n drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c       |  372 +++++\n drivers/raw/ifpga_rawdev/base/ifpga_hw.h           |  127 ++\n drivers/raw/ifpga_rawdev/base/ifpga_port.c         |  408 +++++\n drivers/raw/ifpga_rawdev/base/ifpga_port_error.c   |  165 ++\n drivers/raw/ifpga_rawdev/base/meson.build          |   34 +\n drivers/raw/ifpga_rawdev/base/opae_debug.c         |   99 ++\n drivers/raw/ifpga_rawdev/base/opae_debug.h         |   19 +\n drivers/raw/ifpga_rawdev/base/opae_hw_api.c        |  381 +++++\n drivers/raw/ifpga_rawdev/base/opae_hw_api.h        |  253 +++\n drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c  |  145 ++\n drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h  |  279 ++++\n drivers/raw/ifpga_rawdev/base/opae_osdep.h         |   81 +\n .../ifpga_rawdev/base/osdep_raw/osdep_generic.h    |   75 +\n .../ifpga_rawdev/base/osdep_rte/osdep_generic.h    |   45 +\n 28 files changed, 8050 insertions(+)\n create mode 100644 drivers/raw/ifpga_rawdev/base/Makefile\n create mode 100644 drivers/raw/ifpga_rawdev/base/README\n create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_api.c\n create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_api.h\n create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_compat.h\n create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_defines.h\n create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c\n create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h\n create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c\n create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h\n create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_fme.c\n create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c\n create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c\n create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c\n create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c\n create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_hw.h\n create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_port.c\n create mode 100644 drivers/raw/ifpga_rawdev/base/ifpga_port_error.c\n create mode 100644 drivers/raw/ifpga_rawdev/base/meson.build\n create mode 100644 drivers/raw/ifpga_rawdev/base/opae_debug.c\n create mode 100644 drivers/raw/ifpga_rawdev/base/opae_debug.h\n create mode 100644 drivers/raw/ifpga_rawdev/base/opae_hw_api.c\n create mode 100644 drivers/raw/ifpga_rawdev/base/opae_hw_api.h\n create mode 100644 drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c\n create mode 100644 drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h\n create mode 100644 drivers/raw/ifpga_rawdev/base/opae_osdep.h\n create mode 100644 drivers/raw/ifpga_rawdev/base/osdep_raw/osdep_generic.h\n create mode 100644 drivers/raw/ifpga_rawdev/base/osdep_rte/osdep_generic.h",
    "diff": "diff --git a/drivers/raw/ifpga_rawdev/base/Makefile b/drivers/raw/ifpga_rawdev/base/Makefile\nnew file mode 100644\nindex 0000000..ade3551\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/Makefile\n@@ -0,0 +1,30 @@\n+#SPDX-License-Identifier: BSD-3-Clause\n+#Copyright(c) 2010-2018 Intel Corporation\n+\n+ifneq ($(CONFIG_RTE_LIBRTE_EAL),)\n+OSDEP := osdep_rte\n+else\n+OSDEP := osdep_raw\n+endif\n+\n+CFLAGS += -I$(RTE_SDK)/drivers/raw/ifpga_rawdev/base/$(OSDEP)\n+\n+SRCS-y += ifpga_api.c\n+SRCS-y += ifpga_enumerate.c\n+SRCS-y += ifpga_feature_dev.c\n+SRCS-y += ifpga_fme.c\n+SRCS-y += ifpga_fme_iperf.c\n+SRCS-y += ifpga_fme_dperf.c\n+SRCS-y += ifpga_fme_error.c\n+SRCS-y += ifpga_port.c\n+SRCS-y += ifpga_port_error.c\n+SRCS-y += opae_hw_api.c\n+SRCS-y += opae_ifpga_hw_api.c\n+SRCS-y += opae_debug.c\n+SRCS-y += ifpga_fme_pr.c\n+\n+ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)\n+\tCFLAGS_ifpga_fme_pr.o += -march=knl\n+endif\n+\n+SRCS-y += $(wildcard $(SRCDIR)/base/$(OSDEP)/*.c)\ndiff --git a/drivers/raw/ifpga_rawdev/base/README b/drivers/raw/ifpga_rawdev/base/README\nnew file mode 100644\nindex 0000000..3636ac7\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/README\n@@ -0,0 +1,31 @@\n+..\n+\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2018 Intel Corporation\n+ */\n+\n+Intel® iFPGA driver\n+==================\n+\n+This directory contains source code of Intel FPGA driver released by\n+the team which develops Intel FPGA Open Programmable Acceleration Engine (OPAE).\n+The directory of base/ contains the original source package. The base code\n+currently supports Intel FPGA solutions including integrated solution (Intel(R)\n+Xeon(R) CPU with FPGAs) and discrete solution (Intel(R) Programmable Acceleration\n+Card with Intel(R) Arria(R) 10 FPGA) and it could be extended to support more FPGA\n+devices in the future.\n+\n+Please refer to [1][2] for more introduction on OPAE and Intel FPGAs.\n+\n+[1] https://01.org/OPAE\n+[2] https://www.altera.com/solutions/acceleration-hub/overview.html\n+\n+\n+Updating the driver\n+===================\n+\n+NOTE: The source code in this directory should not be modified apart from\n+the following file(s):\n+\n+\tosdep_raw/osdep_generic.h\n+\tosdep_rte/osdep_generic.h\ndiff --git a/drivers/raw/ifpga_rawdev/base/ifpga_api.c b/drivers/raw/ifpga_rawdev/base/ifpga_api.c\nnew file mode 100644\nindex 0000000..540e171\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/ifpga_api.c\n@@ -0,0 +1,294 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2018 Intel Corporation\n+ */\n+\n+#include \"ifpga_api.h\"\n+#include \"ifpga_enumerate.h\"\n+#include \"ifpga_feature_dev.h\"\n+\n+#include \"opae_hw_api.h\"\n+\n+/* Accelerator APIs */\n+static int ifpga_acc_get_uuid(struct opae_accelerator *acc,\n+\t\t\t      struct uuid *uuid)\n+{\n+\tstruct opae_bridge *br = acc->br;\n+\tstruct ifpga_port_hw *port;\n+\n+\tif (!br || !br->data)\n+\t\treturn -EINVAL;\n+\n+\tport = br->data;\n+\n+\treturn fpga_get_afu_uuid(port, uuid);\n+}\n+\n+static int ifpga_acc_set_irq(struct opae_accelerator *acc,\n+\t\t\t     u32 start, u32 count, s32 evtfds[])\n+{\n+\tstruct ifpga_afu_info *afu_info = acc->data;\n+\tstruct opae_bridge *br = acc->br;\n+\tstruct ifpga_port_hw *port;\n+\tstruct fpga_uafu_irq_set irq_set;\n+\n+\tif (!br || !br->data)\n+\t\treturn -EINVAL;\n+\n+\tif (start >= afu_info->num_irqs || start + count > afu_info->num_irqs)\n+\t\treturn -EINVAL;\n+\n+\tport = br->data;\n+\n+\tirq_set.start = start;\n+\tirq_set.count = count;\n+\tirq_set.evtfds = evtfds;\n+\n+\treturn ifpga_set_irq(port->parent, FEATURE_FIU_ID_PORT, port->port_id,\n+\t\t\t     IFPGA_PORT_FEATURE_ID_UINT, &irq_set);\n+}\n+\n+static int ifpga_acc_get_info(struct opae_accelerator *acc,\n+\t\t\t      struct opae_acc_info *info)\n+{\n+\tstruct ifpga_afu_info *afu_info = acc->data;\n+\n+\tif (!afu_info)\n+\t\treturn -ENODEV;\n+\n+\tinfo->num_regions = afu_info->num_regions;\n+\tinfo->num_irqs = afu_info->num_irqs;\n+\n+\treturn 0;\n+}\n+\n+static int ifpga_acc_get_region_info(struct opae_accelerator *acc,\n+\t\t\t\t     struct opae_acc_region_info *info)\n+{\n+\tstruct ifpga_afu_info *afu_info = acc->data;\n+\n+\tif (!afu_info)\n+\t\treturn -EINVAL;\n+\n+\tif (info->index >= afu_info->num_regions)\n+\t\treturn -EINVAL;\n+\n+\t/* always one RW region only for AFU now */\n+\tinfo->flags = ACC_REGION_READ | ACC_REGION_WRITE | ACC_REGION_MMIO;\n+\tinfo->len = afu_info->region[info->index].len;\n+\tinfo->addr = afu_info->region[info->index].addr;\n+\n+\treturn 0;\n+}\n+\n+static int ifpga_acc_read(struct opae_accelerator *acc, unsigned int region_idx,\n+\t\t\t  u64 offset, unsigned int byte, void *data)\n+{\n+\tstruct ifpga_afu_info *afu_info = acc->data;\n+\tstruct opae_reg_region *region;\n+\n+\tif (!afu_info)\n+\t\treturn -EINVAL;\n+\n+\tif (offset + byte <= offset)\n+\t\treturn -EINVAL;\n+\n+\tif (region_idx >= afu_info->num_regions)\n+\t\treturn -EINVAL;\n+\n+\tregion = &afu_info->region[region_idx];\n+\tif (offset + byte > region->len)\n+\t\treturn -EINVAL;\n+\n+\tswitch (byte) {\n+\tcase 8:\n+\t\t*(u64  *)data = opae_readq(region->addr + offset);\n+\t\tbreak;\n+\tcase 4:\n+\t\t*(u32 *)data = opae_readl(region->addr + offset);\n+\t\tbreak;\n+\tcase 2:\n+\t\t*(u16 *)data = opae_readw(region->addr + offset);\n+\t\tbreak;\n+\tcase 1:\n+\t\t*(u8 *)data = opae_readb(region->addr + offset);\n+\t\tbreak;\n+\tdefault:\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int ifpga_acc_write(struct opae_accelerator *acc,\n+\t\t\t   unsigned int region_idx, u64 offset,\n+\t\t\t   unsigned int byte, void *data)\n+{\n+\tstruct ifpga_afu_info *afu_info = acc->data;\n+\tstruct opae_reg_region *region;\n+\n+\tif (!afu_info)\n+\t\treturn -EINVAL;\n+\n+\tif (offset + byte <= offset)\n+\t\treturn -EINVAL;\n+\n+\tif (region_idx >= afu_info->num_regions)\n+\t\treturn -EINVAL;\n+\n+\tregion = &afu_info->region[region_idx];\n+\tif (offset + byte > region->len)\n+\t\treturn -EINVAL;\n+\n+\t/* normal mmio case */\n+\tswitch (byte) {\n+\tcase 8:\n+\t\topae_writeq(*(u64 *)data, region->addr + offset);\n+\t\tbreak;\n+\tcase 4:\n+\t\topae_writel(*(u32 *)data, region->addr + offset);\n+\t\tbreak;\n+\tcase 2:\n+\t\topae_writew(*(u16 *)data, region->addr + offset);\n+\t\tbreak;\n+\tcase 1:\n+\t\topae_writeb(*(u8 *)data, region->addr + offset);\n+\t\tbreak;\n+\tdefault:\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+struct opae_accelerator_ops ifpga_acc_ops = {\n+\t.read = ifpga_acc_read,\n+\t.write = ifpga_acc_write,\n+\t.set_irq = ifpga_acc_set_irq,\n+\t.get_info = ifpga_acc_get_info,\n+\t.get_region_info = ifpga_acc_get_region_info,\n+\t.get_uuid = ifpga_acc_get_uuid,\n+};\n+\n+/* Bridge APIs */\n+\n+static int ifpga_br_reset(struct opae_bridge *br)\n+{\n+\tstruct ifpga_port_hw *port = br->data;\n+\n+\treturn fpga_port_reset(port);\n+}\n+\n+struct opae_bridge_ops ifpga_br_ops = {\n+\t.reset = ifpga_br_reset,\n+};\n+\n+/* Manager APIs */\n+static int ifpga_mgr_flash(struct opae_manager *mgr, int id, void *buf,\n+\t\t\t   u32 size, u64 *status)\n+{\n+\tstruct ifpga_fme_hw *fme = mgr->data;\n+\tstruct ifpga_hw *hw = fme->parent;\n+\n+\treturn ifpga_pr(hw, id, buf, size, status);\n+}\n+\n+struct opae_manager_ops ifpga_mgr_ops = {\n+\t.flash = ifpga_mgr_flash,\n+};\n+\n+/* Adapter APIs */\n+static int ifpga_adapter_enumerate(struct opae_adapter *adapter)\n+{\n+\tstruct ifpga_hw *hw = malloc(sizeof(*hw));\n+\n+\tif (hw) {\n+\t\tmemset(hw, 0, sizeof(*hw));\n+\t\thw->pci_data = adapter->data;\n+\t\thw->adapter = adapter;\n+\t\tif (ifpga_bus_enumerate(hw))\n+\t\t\tgoto error;\n+\t\treturn ifpga_bus_init(hw);\n+\t}\n+\n+error:\n+\treturn -ENOMEM;\n+}\n+\n+struct opae_adapter_ops ifpga_adapter_ops = {\n+\t.enumerate = ifpga_adapter_enumerate,\n+};\n+\n+/**\n+ *  ifpga_pr - do the partial reconfiguration for a given port device\n+ *  @hw: pointer to the HW structure\n+ *  @port_id: the port device id\n+ *  @buffer: the buffer of the bitstream\n+ *  @size: the size of the bitstream\n+ *  @status: hardware status including PR error code if return -EIO.\n+ *\n+ *  @return\n+ *   - 0: Success, partial reconfiguration finished.\n+ *   - <0: Error code returned in partial reconfiguration.\n+ **/\n+int ifpga_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,\n+\t     u64 *status)\n+{\n+\tif (!is_valid_port_id(hw, port_id))\n+\t\treturn -ENODEV;\n+\n+\treturn do_pr(hw, port_id, buffer, size, status);\n+}\n+\n+int ifpga_get_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,\n+\t\t   struct feature_prop *prop)\n+{\n+\tif (!hw || !prop)\n+\t\treturn -EINVAL;\n+\n+\tswitch (fiu_id) {\n+\tcase FEATURE_FIU_ID_FME:\n+\t\treturn fme_get_prop(&hw->fme, prop);\n+\tcase FEATURE_FIU_ID_PORT:\n+\t\tif (!is_valid_port_id(hw, port_id))\n+\t\t\treturn -ENODEV;\n+\t\treturn port_get_prop(&hw->port[port_id], prop);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+int ifpga_set_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,\n+\t\t   struct feature_prop *prop)\n+{\n+\tif (!hw || !prop)\n+\t\treturn -EINVAL;\n+\n+\tswitch (fiu_id) {\n+\tcase FEATURE_FIU_ID_FME:\n+\t\treturn fme_set_prop(&hw->fme, prop);\n+\tcase FEATURE_FIU_ID_PORT:\n+\t\tif (!is_valid_port_id(hw, port_id))\n+\t\t\treturn -ENODEV;\n+\t\treturn port_set_prop(&hw->port[port_id], prop);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+int ifpga_set_irq(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,\n+\t\t  u32 feature_id, void *irq_set)\n+{\n+\tif (!hw || !irq_set)\n+\t\treturn -EINVAL;\n+\n+\tswitch (fiu_id) {\n+\tcase FEATURE_FIU_ID_FME:\n+\t\treturn fme_set_irq(&hw->fme, feature_id, irq_set);\n+\tcase FEATURE_FIU_ID_PORT:\n+\t\tif (!is_valid_port_id(hw, port_id))\n+\t\t\treturn -ENODEV;\n+\t\treturn port_set_irq(&hw->port[port_id], feature_id, irq_set);\n+\t}\n+\n+\treturn -ENOENT;\n+}\ndiff --git a/drivers/raw/ifpga_rawdev/base/ifpga_api.h b/drivers/raw/ifpga_rawdev/base/ifpga_api.h\nnew file mode 100644\nindex 0000000..dae7ca1\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/ifpga_api.h\n@@ -0,0 +1,28 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2018 Intel Corporation\n+ */\n+\n+#ifndef _IFPGA_API_H_\n+#define _IFPGA_API_H_\n+\n+#include \"opae_hw_api.h\"\n+#include \"ifpga_hw.h\"\n+\n+extern struct opae_adapter_ops ifpga_adapter_ops;\n+extern struct opae_manager_ops ifpga_mgr_ops;\n+extern struct opae_bridge_ops ifpga_br_ops;\n+extern struct opae_accelerator_ops ifpga_acc_ops;\n+\n+/* common APIs */\n+int ifpga_get_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,\n+\t\t   struct feature_prop *prop);\n+int ifpga_set_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,\n+\t\t   struct feature_prop *prop);\n+int ifpga_set_irq(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,\n+\t\t  u32 feature_id, void *irq_set);\n+\n+/* FME APIs */\n+int ifpga_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,\n+\t     u64 *status);\n+\n+#endif /* _IFPGA_API_H_ */\ndiff --git a/drivers/raw/ifpga_rawdev/base/ifpga_compat.h b/drivers/raw/ifpga_rawdev/base/ifpga_compat.h\nnew file mode 100644\nindex 0000000..78b904e\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/ifpga_compat.h\n@@ -0,0 +1,57 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2018 Intel Corporation\n+ */\n+\n+#ifndef _IFPGA_COMPAT_H_\n+#define _IFPGA_COMPAT_H_\n+\n+#include \"opae_osdep.h\"\n+\n+#undef container_of\n+#define container_of(ptr, type, member) ({ \\\n+\t\ttypeof(((type *)0)->member)(*__mptr) = (ptr); \\\n+\t\t(type *)((char *)__mptr - offsetof(type, member)); })\n+\n+#define PAGE_SHIFT       12\n+#define PAGE_SIZE        (1 << PAGE_SHIFT)\n+#define PAGE_MASK        (~(PAGE_SIZE - 1))\n+#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)\n+#define ALIGN(x, a)  (((x) + (a) - 1) & ~((a) - 1))\n+\n+#define IS_ALIGNED(x, a)\t\t(((x) & ((typeof(x))(a) - 1)) == 0)\n+#define PAGE_ALIGNED(addr)\tIS_ALIGNED((unsigned long)(addr), PAGE_SIZE)\n+\n+#define readl(addr) opae_readl(addr)\n+#define readq(addr) opae_readq(addr)\n+#define writel(value, addr) opae_writel(value, addr)\n+#define writeq(value, addr) opae_writeq(value, addr)\n+\n+#define malloc(size) opae_malloc(size)\n+#define zmalloc(size) opae_zmalloc(size)\n+#define free(size) opae_free(size)\n+\n+/*\n+ * Wait register's _field to be changed to the given value (_expect's _field)\n+ * by polling with given interval and timeout.\n+ */\n+#define fpga_wait_register_field(_field, _expect, _reg_addr, _timeout, _invl)\\\n+({\t\t\t\t\t\t\t\t\t     \\\n+\tint wait = 0;\t\t\t\t\t\t\t     \\\n+\tint ret = -ETIMEDOUT;\t\t\t\t\t\t     \\\n+\ttypeof(_expect) value;\t\t\t\t\t\t     \\\n+\tfor (; wait <= _timeout; wait += _invl) {\t\t\t     \\\n+\t\tvalue.csr = readq(_reg_addr);\t\t\t\t     \\\n+\t\tif (_expect._field == value._field) {\t\t\t     \\\n+\t\t\tret = 0;\t\t\t\t\t     \\\n+\t\t\tbreak;\t\t\t\t\t\t     \\\n+\t\t}\t\t\t\t\t\t\t     \\\n+\t\tudelay(_invl);\t\t\t\t\t\t     \\\n+\t}\t\t\t\t\t\t\t\t     \\\n+\tret;\t\t\t\t\t\t\t\t     \\\n+})\n+\n+#define __maybe_unused __attribute__((__unused__))\n+\n+#define UNUSED(x)\t(void)(x)\n+\n+#endif /* _IFPGA_COMPAT_H_ */\ndiff --git a/drivers/raw/ifpga_rawdev/base/ifpga_defines.h b/drivers/raw/ifpga_rawdev/base/ifpga_defines.h\nnew file mode 100644\nindex 0000000..8989280\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/ifpga_defines.h\n@@ -0,0 +1,1661 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2018 Intel Corporation\n+ */\n+\n+#ifndef _IFPGA_DEFINES_H_\n+#define _IFPGA_DEFINES_H_\n+\n+#include \"ifpga_compat.h\"\n+\n+#define MAX_FPGA_PORT_NUM  4\n+\n+#define FME_FEATURE_HEADER          \"fme_hdr\"\n+#define FME_FEATURE_THERMAL_MGMT    \"fme_thermal\"\n+#define FME_FEATURE_POWER_MGMT      \"fme_power\"\n+#define FME_FEATURE_GLOBAL_IPERF    \"fme_iperf\"\n+#define FME_FEATURE_GLOBAL_ERR      \"fme_error\"\n+#define FME_FEATURE_PR_MGMT         \"fme_pr\"\n+#define FME_FEATURE_HSSI_ETH        \"fme_hssi\"\n+#define FME_FEATURE_GLOBAL_DPERF    \"fme_dperf\"\n+#define FME_FEATURE_QSPI_FLASH\t    \"fme_qspi_flash\"\n+\n+#define PORT_FEATURE_HEADER         \"port_hdr\"\n+#define PORT_FEATURE_UAFU           \"port_uafu\"\n+#define PORT_FEATURE_ERR            \"port_err\"\n+#define PORT_FEATURE_UMSG           \"port_umsg\"\n+#define PORT_FEATURE_PR             \"port_pr\"\n+#define PORT_FEATURE_UINT           \"port_uint\"\n+#define PORT_FEATURE_STP            \"port_stp\"\n+\n+/*\n+ * do not check the revision id as id may be dynamic under\n+ * some cases, e.g, UAFU.\n+ */\n+#define SKIP_REVISION_CHECK\t\t0xff\n+\n+#define FME_HEADER_REVISION\t\t1\n+#define FME_THERMAL_MGMT_REVISION\t0\n+#define FME_POWER_MGMT_REVISION\t\t1\n+#define FME_GLOBAL_IPERF_REVISION\t1\n+#define FME_GLOBAL_ERR_REVISION\t\t1\n+#define FME_PR_MGMT_REVISION\t\t2\n+#define FME_HSSI_ETH_REVISION\t\t0\n+#define FME_GLOBAL_DPERF_REVISION\t0\n+#define FME_QSPI_REVISION\t\t0\n+\n+#define PORT_HEADER_REVISION\t\t0\n+/* UAFU's header info depends on the downloaded GBS */\n+#define PORT_UAFU_REVISION\t\tSKIP_REVISION_CHECK\n+#define PORT_ERR_REVISION\t\t1\n+#define PORT_UMSG_REVISION\t\t0\n+#define PORT_UINT_REVISION\t\t0\n+#define PORT_STP_REVISION\t\t1\n+\n+#define FEATURE_TYPE_AFU\t0x1\n+#define FEATURE_TYPE_BBB        0x2\n+#define FEATURE_TYPE_PRIVATE\t0x3\n+#define FEATURE_TYPE_FIU\t0x4\n+\n+#define FEATURE_FIU_ID_FME\t0x0\n+#define FEATURE_FIU_ID_PORT\t0x1\n+\n+#define FEATURE_ID_HEADER\t0x0\n+#define FEATURE_ID_AFU\t\t0xff\n+\n+enum fpga_id_type {\n+\tFME_ID,\n+\tPORT_ID,\n+\tFPGA_ID_MAX,\n+};\n+\n+enum fme_feature_id {\n+\tFME_FEATURE_ID_HEADER = 0x0,\n+\n+\tFME_FEATURE_ID_THERMAL_MGMT\t= 0x1,\n+\tFME_FEATURE_ID_POWER_MGMT = 0x2,\n+\tFME_FEATURE_ID_GLOBAL_IPERF = 0x3,\n+\tFME_FEATURE_ID_GLOBAL_ERR = 0x4,\n+\tFME_FEATURE_ID_PR_MGMT = 0x5,\n+\tFME_FEATURE_ID_HSSI_ETH = 0x6,\n+\tFME_FEATURE_ID_GLOBAL_DPERF = 0x7,\n+\tFME_FEATURE_ID_QSPI_FLASH = 0x8,\n+\n+\t/* one for fme header. */\n+\tFME_FEATURE_ID_MAX = 0x9,\n+};\n+\n+enum port_feature_id {\n+\tPORT_FEATURE_ID_HEADER = 0x0,\n+\tPORT_FEATURE_ID_ERROR = 0x1,\n+\tPORT_FEATURE_ID_UMSG = 0x2,\n+\tPORT_FEATURE_ID_UINT = 0x3,\n+\tPORT_FEATURE_ID_STP = 0x4,\n+\tPORT_FEATURE_ID_UAFU = 0x5,\n+\tPORT_FEATURE_ID_MAX = 0x6,\n+};\n+\n+/*\n+ * All headers and structures must be byte-packed to match the spec.\n+ */\n+#pragma pack(1)\n+\n+struct feature_header {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu16 id:12;\n+\t\t\tu8  revision:4;\n+\t\t\tu32 next_header_offset:24;\n+\t\t\tu8  end_of_list:1;\n+\t\t\tu32 reserved:19;\n+\t\t\tu8  type:4;\n+\t\t};\n+\t};\n+};\n+\n+struct feature_bbb_header {\n+\tstruct uuid guid;\n+};\n+\n+struct feature_afu_header {\n+\tstruct uuid guid;\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu64 next_afu:24;\n+\t\t\tu64 reserved:40;\n+\t\t};\n+\t};\n+};\n+\n+struct feature_fiu_header {\n+\tstruct uuid guid;\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu64 next_afu:24;\n+\t\t\tu64 reserved:40;\n+\t\t};\n+\t};\n+};\n+\n+struct feature_fme_capability {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu8  fabric_verid;\t/* Fabric version ID */\n+\t\t\tu8  socket_id:1;\t/* Socket id */\n+\t\t\tu8  rsvd1:3;\t\t/* Reserved */\n+\t\t\t/* pci0 link available yes /no */\n+\t\t\tu8  pci0_link_avile:1;\n+\t\t\t/* pci1 link available yes /no */\n+\t\t\tu8  pci1_link_avile:1;\n+\t\t\t/* Coherent (QPI/UPI) link available yes /no */\n+\t\t\tu8  qpi_link_avile:1;\n+\t\t\tu8  rsvd2:1;\t\t/* Reserved */\n+\t\t\t/* IOMMU or VT-d supported  yes/no */\n+\t\t\tu8  iommu_support:1;\n+\t\t\tu8  num_ports:3;\t/* Number of ports */\n+\t\t\tu8  sf_fab_ctl:1;\t/* Internal validation bit */\n+\t\t\tu8  rsvd3:3;\t\t/* Reserved */\n+\t\t\t/*\n+\t\t\t * Address width supported in bits\n+\t\t\t * BXT -0x26 , SKX -0x30\n+\t\t\t */\n+\t\t\tu8  address_width_bits:6;\n+\t\t\tu8  rsvd4:2;\t\t/* Reserved */\n+\t\t\t/* Size of cache supported in kb */\n+\t\t\tu16 cache_size:12;\n+\t\t\tu8  cache_assoc:4;\t/* Cache Associativity */\n+\t\t\tu16 rsvd5:15;\t\t/* Reserved */\n+\t\t\tu8  lock_bit:1;\t\t/* Lock bit */\n+\t\t};\n+\t};\n+};\n+\n+#define FME_AFU_ACCESS_PF\t\t0\n+#define FME_AFU_ACCESS_VF\t\t1\n+\n+struct feature_fme_port {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu32 port_offset:24;\n+\t\t\tu8  reserved1;\n+\t\t\tu8  port_bar:3;\n+\t\t\tu32 reserved2:20;\n+\t\t\tu8  afu_access_control:1;\n+\t\t\tu8  reserved3:4;\n+\t\t\tu8  port_implemented:1;\n+\t\t\tu8  reserved4:3;\n+\t\t};\n+\t};\n+};\n+\n+struct feature_fme_fab_status {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu8  upilink_status:4;   /* UPI Link Status */\n+\t\t\tu8  rsvd1:4;\t\t/* Reserved */\n+\t\t\tu8  pci0link_status:1;  /* pci0 link status */\n+\t\t\tu8  rsvd2:3;            /* Reserved */\n+\t\t\tu8  pci1link_status:1;  /* pci1 link status */\n+\t\t\tu64 rsvd3:51;           /* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+struct feature_fme_genprotrange2_base {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu16 rsvd1;           /* Reserved */\n+\t\t\t/* Base Address of memory range */\n+\t\t\tu8  protected_base_addrss:4;\n+\t\t\tu64 rsvd2:44;           /* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+struct feature_fme_genprotrange2_limit {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu16 rsvd1;           /* Reserved */\n+\t\t\t/* Limit Address of memory range */\n+\t\t\tu8  protected_limit_addrss:4;\n+\t\t\tu16 rsvd2:11;           /* Reserved */\n+\t\t\tu8  enable:1;        /* Enable GENPROTRANGE check */\n+\t\t\tu32 rsvd3;           /* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+struct feature_fme_dxe_lock {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\t/*\n+\t\t\t * Determines write access to the DXE region CSRs\n+\t\t\t * 1 - CSR region is locked;\n+\t\t\t * 0 - it is open for write access.\n+\t\t\t */\n+\t\t\tu8  dxe_early_lock:1;\n+\t\t\t/*\n+\t\t\t * Determines write access to the HSSI CSR\n+\t\t\t * 1 - CSR region is locked;\n+\t\t\t * 0 - it is open for write access.\n+\t\t\t */\n+\t\t\tu8  dxe_late_lock:1;\n+\t\t\tu64 rsvd:62;\n+\t\t};\n+\t};\n+};\n+\n+#define HSSI_ID_NO_HASSI\t0\n+#define HSSI_ID_PCIE_RP\t\t1\n+#define HSSI_ID_ETHERNET\t2\n+\n+struct feature_fme_bitstream_id {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu32 gitrepo_hash:32;\t/* GIT repository hash */\n+\t\t\t/*\n+\t\t\t * HSSI configuration identifier:\n+\t\t\t * 0 - No HSSI\n+\t\t\t * 1 - PCIe-RP\n+\t\t\t * 2 - Ethernet\n+\t\t\t */\n+\t\t\tu8  hssi_id:4;\n+\t\t\tu16 rsvd1:12;\t\t/* Reserved */\n+\t\t\t/* Bitstream version patch number */\n+\t\t\tu8  bs_verpatch:4;\n+\t\t\t/* Bitstream version minor number */\n+\t\t\tu8  bs_verminor:4;\n+\t\t\t/* Bitstream version major number */\n+\t\t\tu8  bs_vermajor:4;\n+\t\t\t/* Bitstream version debug number */\n+\t\t\tu8  bs_verdebug:4;\n+\t\t};\n+\t};\n+};\n+\n+struct feature_fme_bitstream_md {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\t/* Seed number userd for synthesis flow */\n+\t\t\tu8  synth_seed:4;\n+\t\t\t/* Synthesis date(day number - 2 digits) */\n+\t\t\tu8  synth_day:8;\n+\t\t\t/* Synthesis date(month number - 2 digits) */\n+\t\t\tu8  synth_month:8;\n+\t\t\t/* Synthesis date(year number - 2 digits) */\n+\t\t\tu8  synth_year:8;\n+\t\t\tu64 rsvd:36;\t\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+struct feature_fme_iommu_ctrl {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\t/* Disables IOMMU prefetcher for C0 channel */\n+\t\t\tu8 prefetch_disableC0:1;\n+\t\t\t/* Disables IOMMU prefetcher for C1 channel */\n+\t\t\tu8 prefetch_disableC1:1;\n+\t\t\t/* Disables IOMMU partial cache line writes */\n+\t\t\tu8 prefetch_wrdisable:1;\n+\t\t\tu8 rsvd1:1;\t\t/* Reserved */\n+\t\t\t/*\n+\t\t\t * Select counter and read value from register\n+\t\t\t * iommu_stat.dbg_counters\n+\t\t\t * 0 - Number of 4K page translation response\n+\t\t\t * 1 - Number of 2M page translation response\n+\t\t\t * 2 - Number of 1G page translation response\n+\t\t\t */\n+\t\t\tu8 counter_sel:2;\n+\t\t\tu32 rsvd2:26;\t\t/* Reserved */\n+\t\t\t/* Connected to IOMMU SIP Capabilities */\n+\t\t\tu32 capecap_defeature;\n+\t\t};\n+\t};\n+};\n+\n+struct feature_fme_iommu_stat {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\t/* Translation Enable bit from IOMMU SIP */\n+\t\t\tu8 translation_enable:1;\n+\t\t\t/* Drain request in progress */\n+\t\t\tu8 drain_req_inprog:1;\n+\t\t\t/* Invalidation current state */\n+\t\t\tu8 inv_state:3;\n+\t\t\t/* C0 Response Buffer current state */\n+\t\t\tu8 respbuffer_stateC0:3;\n+\t\t\t/* C1 Response Buffer current state */\n+\t\t\tu8 respbuffer_stateC1:3;\n+\t\t\t/* Last request ID to IOMMU SIP */\n+\t\t\tu8 last_reqID:4;\n+\t\t\t/* Last IOMMU SIP response ID value */\n+\t\t\tu8 last_respID:4;\n+\t\t\t/* Last IOMMU SIP response status value */\n+\t\t\tu8 last_respstatus:3;\n+\t\t\t/* C0 Transaction Buffer is not empty */\n+\t\t\tu8 transbuf_notEmptyC0:1;\n+\t\t\t/* C1 Transaction Buffer is not empty */\n+\t\t\tu8 transbuf_notEmptyC1:1;\n+\t\t\t/* C0 Request FIFO is not empty */\n+\t\t\tu8 reqFIFO_notemptyC0:1;\n+\t\t\t/* C1 Request FIFO is not empty */\n+\t\t\tu8 reqFIFO_notemptyC1:1;\n+\t\t\t/* C0 Response FIFO is not empty */\n+\t\t\tu8 respFIFO_notemptyC0:1;\n+\t\t\t/* C1 Response FIFO is not empty */\n+\t\t\tu8 respFIFO_notemptyC1:1;\n+\t\t\t/* C0 Response FIFO overflow detected */\n+\t\t\tu8 respFIFO_overflowC0:1;\n+\t\t\t/* C1 Response FIFO overflow detected */\n+\t\t\tu8 respFIFO_overflowC1:1;\n+\t\t\t/* C0 Transaction Buffer overflow detected */\n+\t\t\tu8 tranbuf_overflowC0:1;\n+\t\t\t/* C1 Transaction Buffer overflow detected */\n+\t\t\tu8 tranbuf_overflowC1:1;\n+\t\t\t/* Request FIFO overflow detected */\n+\t\t\tu8 reqFIFO_overflow:1;\n+\t\t\t/* IOMMU memory read in progress */\n+\t\t\tu8 memrd_inprog:1;\n+\t\t\t/* IOMMU memory write in progress */\n+\t\t\tu8 memwr_inprog:1;\n+\t\t\tu8 rsvd1:1;\t/* Reserved */\n+\t\t\t/* Value of counter selected by iommu_ctl.counter_sel */\n+\t\t\tu16 dbg_counters:16;\n+\t\t\tu16 rsvd2:12;\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+struct feature_fme_pcie0_ctrl {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu64 vtd_bar_lock:1;\t/* Lock VT-D BAR register */\n+\t\t\tu64 rsvd1:3;\n+\t\t\tu64 rciep:1;\t\t/* Configure PCIE0 as RCiEP */\n+\t\t\tu64 rsvd2:59;\n+\t\t};\n+\t};\n+};\n+\n+struct feature_fme_llpr_smrr_base {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu64 rsvd1:12;\n+\t\t\tu64 base:20;\t/* SMRR2 memory range base address */\n+\t\t\tu64 rsvd2:32;\n+\t\t};\n+\t};\n+};\n+\n+struct feature_fme_llpr_smrr_mask {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu64 rsvd1:11;\n+\t\t\tu64 valid:1;\t/* LLPR_SMRR rule is valid or not */\n+\t\t\t/*\n+\t\t\t * SMRR memory range mask which determines the range\n+\t\t\t * of region being mapped\n+\t\t\t */\n+\t\t\tu64 phys_mask:20;\n+\t\t\tu64 rsvd2:32;\n+\t\t};\n+\t};\n+};\n+\n+struct feature_fme_llpr_smrr2_base {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu64 rsvd1:12;\n+\t\t\tu64 base:20;\t/* SMRR2 memory range base address */\n+\t\t\tu64 rsvd2:32;\n+\t\t};\n+\t};\n+};\n+\n+struct feature_fme_llpr_smrr2_mask {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu64 rsvd1:11;\n+\t\t\tu64 valid:1;\t/* LLPR_SMRR2 rule is valid or not */\n+\t\t\t/*\n+\t\t\t * SMRR2 memory range mask which determines the range\n+\t\t\t * of region being mapped\n+\t\t\t */\n+\t\t\tu64 phys_mask:20;\n+\t\t\tu64 rsvd2:32;\n+\t\t};\n+\t};\n+};\n+\n+struct feature_fme_llpr_meseg_base {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\t/* A[45:19] of base address memory range */\n+\t\t\tu64 me_base:27;\n+\t\t\tu64 rsvd:37;\n+\t\t};\n+\t};\n+};\n+\n+struct feature_fme_llpr_meseg_limit {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\t/* A[45:19] of limit address memory range */\n+\t\t\tu64 me_limit:27;\n+\t\t\tu64 rsvd1:4;\n+\t\t\tu64 enable:1;\t/* Enable LLPR MESEG rule */\n+\t\t\tu64 rsvd2:32;\n+\t\t};\n+\t};\n+};\n+\n+struct feature_fme_header {\n+\tstruct feature_header header;\n+\tstruct feature_afu_header afu_header;\n+\tu64 reserved;\n+\tu64 scratchpad;\n+\tstruct feature_fme_capability capability;\n+\tstruct feature_fme_port port[MAX_FPGA_PORT_NUM];\n+\tstruct feature_fme_fab_status fab_status;\n+\tstruct feature_fme_bitstream_id bitstream_id;\n+\tstruct feature_fme_bitstream_md bitstream_md;\n+\tstruct feature_fme_genprotrange2_base genprotrange2_base;\n+\tstruct feature_fme_genprotrange2_limit genprotrange2_limit;\n+\tstruct feature_fme_dxe_lock dxe_lock;\n+\tstruct feature_fme_iommu_ctrl iommu_ctrl;\n+\tstruct feature_fme_iommu_stat iommu_stat;\n+\tstruct feature_fme_pcie0_ctrl pcie0_control;\n+\tstruct feature_fme_llpr_smrr_base smrr_base;\n+\tstruct feature_fme_llpr_smrr_mask smrr_mask;\n+\tstruct feature_fme_llpr_smrr2_base smrr2_base;\n+\tstruct feature_fme_llpr_smrr2_mask smrr2_mask;\n+\tstruct feature_fme_llpr_meseg_base meseg_base;\n+\tstruct feature_fme_llpr_meseg_limit meseg_limit;\n+};\n+\n+struct feature_port_capability {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu8 port_number:2;\t/* Port Number 0-3 */\n+\t\t\tu8 rsvd1:6;\t\t/* Reserved */\n+\t\t\tu16 mmio_size;\t\t/* User MMIO size in KB */\n+\t\t\tu8 rsvd2;\t\t/* Reserved */\n+\t\t\tu8 sp_intr_num:4;\t/* Supported interrupts num */\n+\t\t\tu32 rsvd3:28;\t\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+struct feature_port_control {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu8 port_sftrst:1;\t/* Port Soft Reset */\n+\t\t\tu8 rsvd1:1;\t\t/* Reserved */\n+\t\t\tu8 latency_tolerance:1;/* '1' >= 40us, '0' < 40us */\n+\t\t\tu8 rsvd2:1;\t\t/* Reserved */\n+\t\t\tu8 port_sftrst_ack:1;\t/* HW ACK for Soft Reset */\n+\t\t\tu64 rsvd3:59;\t\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+#define PORT_POWER_STATE_NORMAL\t\t0\n+#define PORT_POWER_STATE_AP1\t\t1\n+#define PORT_POWER_STATE_AP2\t\t2\n+#define PORT_POWER_STATE_AP6\t\t6\n+\n+struct feature_port_status {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu8 port_freeze:1;\t/* '1' - freezed '0' - normal */\n+\t\t\tu8 rsvd1:7;\t\t/* Reserved */\n+\t\t\tu8 power_state:4;\t/* Power State */\n+\t\t\tu8 ap1_event:1;\t\t/* AP1 event was detected  */\n+\t\t\tu8 ap2_event:1;\t\t/* AP2 event was detected  */\n+\t\t\tu64 rsvd2:50;\t\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+/* Port Header Register Set */\n+struct feature_port_header {\n+\tstruct feature_header header;\n+\tstruct feature_afu_header afu_header;\n+\tu64 port_mailbox;\n+\tu64 scratchpad;\n+\tstruct feature_port_capability capability;\n+\tstruct feature_port_control control;\n+\tstruct feature_port_status status;\n+\tu64 rsvd2;\n+\tu64 user_clk_freq_cmd0;\n+\tu64 user_clk_freq_cmd1;\n+\tu64 user_clk_freq_sts0;\n+\tu64 user_clk_freq_sts1;\n+};\n+\n+struct feature_fme_tmp_threshold {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu8  tmp_thshold1:7;\t  /* temperature Threshold 1 */\n+\t\t\t/* temperature Threshold 1 enable/disable */\n+\t\t\tu8  tmp_thshold1_enable:1;\n+\t\t\tu8  tmp_thshold2:7;       /* temperature Threshold 2 */\n+\t\t\t/* temperature Threshold 2 enable /disable */\n+\t\t\tu8  tmp_thshold2_enable:1;\n+\t\t\tu8  pro_hot_setpoint:7;   /* Proc Hot set point */\n+\t\t\tu8  rsvd4:1;              /* Reserved */\n+\t\t\tu8  therm_trip_thshold:7; /* Thermeal Trip Threshold */\n+\t\t\tu8  rsvd3:1;              /* Reserved */\n+\t\t\tu8  thshold1_status:1;\t  /* Threshold 1 Status */\n+\t\t\tu8  thshold2_status:1;    /* Threshold 2 Status */\n+\t\t\tu8  rsvd5:1;              /* Reserved */\n+\t\t\t/* Thermeal Trip Threshold status */\n+\t\t\tu8  therm_trip_thshold_status:1;\n+\t\t\tu8  rsvd6:4;\t\t  /* Reserved */\n+\t\t\t/* Validation mode- Force Proc Hot */\n+\t\t\tu8  valmodeforce:1;\n+\t\t\t/* Validation mode - Therm trip Hot */\n+\t\t\tu8  valmodetherm:1;\n+\t\t\tu8  rsvd2:2;              /* Reserved */\n+\t\t\tu8  thshold_policy:1;     /* threshold policy */\n+\t\t\tu32 rsvd:19;              /* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+/* Temperature Sensor Read values format 1 */\n+struct feature_fme_temp_rdsensor_fmt1 {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\t/* Reads out FPGA temperature in celsius */\n+\t\t\tu8  fpga_temp:7;\n+\t\t\tu8  rsvd0:1;\t\t\t/* Reserved */\n+\t\t\t/* Temperature reading sequence number */\n+\t\t\tu16 tmp_reading_seq_num;\n+\t\t\t/* Temperature reading is valid */\n+\t\t\tu8  tmp_reading_valid:1;\n+\t\t\tu8  rsvd1:7;\t\t\t/* Reserved */\n+\t\t\tu16 dbg_mode:10;\t\t/* Debug mode */\n+\t\t\tu32 rsvd2:22;\t\t\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+/* Temperature sensor read values format 2 */\n+struct feature_fme_temp_rdsensor_fmt2 {\n+\tu64 rsvd;\t/* Reserved */\n+};\n+\n+/* Temperature Threshold Capability Register */\n+struct feature_fme_tmp_threshold_cap {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\t/* Temperature Threshold Unsupported */\n+\t\t\tu8  tmp_thshold_disabled:1;\n+\t\t\tu64 rsvd:63;\t\t\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+/* FME THERNAL FEATURE */\n+struct feature_fme_thermal {\n+\tstruct feature_header header;\n+\tstruct feature_fme_tmp_threshold threshold;\n+\tstruct feature_fme_temp_rdsensor_fmt1 rdsensor_fm1;\n+\tstruct feature_fme_temp_rdsensor_fmt2 rdsensor_fm2;\n+\tstruct feature_fme_tmp_threshold_cap threshold_cap;\n+};\n+\n+/* Power Status register */\n+struct feature_fme_pm_status {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\t/* FPGA Power consumed, The format is to be defined */\n+\t\t\tu32 pwr_consumed:18;\n+\t\t\t/* FPGA Latency Tolerance Reporting */\n+\t\t\tu8  fpga_latency_report:1;\n+\t\t\tu64 rsvd:45;\t\t\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+/* AP Thresholds */\n+struct feature_fme_pm_ap_threshold {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\t/*\n+\t\t\t * Number of clocks (5ns period) for assertion\n+\t\t\t * of FME_data\n+\t\t\t */\n+\t\t\tu8  threshold1:7;\n+\t\t\tu8  rsvd1:1;\n+\t\t\tu8  threshold2:7;\n+\t\t\tu8  rsvd2:1;\n+\t\t\tu8  threshold1_status:1;\n+\t\t\tu8  threshold2_status:1;\n+\t\t\tu64 rsvd3:46;\t\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+/* Xeon Power Limit */\n+struct feature_fme_pm_xeon_limit {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\t/* Power limit in Watts in 12.3 format */\n+\t\t\tu16 pwr_limit:15;\n+\t\t\t/* Indicates that power limit has been written */\n+\t\t\tu8  enable:1;\n+\t\t\t/* 0 - Turbe range, 1 - Entire range */\n+\t\t\tu8  clamping:1;\n+\t\t\t/* Time constant in XXYYY format */\n+\t\t\tu8  time:7;\n+\t\t\tu64 rsvd:40;\t\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+/* FPGA Power Limit */\n+struct feature_fme_pm_fpga_limit {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\t/* Power limit in Watts in 12.3 format */\n+\t\t\tu16 pwr_limit:15;\n+\t\t\t/* Indicates that power limit has been written */\n+\t\t\tu8  enable:1;\n+\t\t\t/* 0 - Turbe range, 1 - Entire range */\n+\t\t\tu8  clamping:1;\n+\t\t\t/* Time constant in XXYYY format */\n+\t\t\tu8  time:7;\n+\t\t\tu64 rsvd:40;\t\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+/* FME POWER FEATURE */\n+struct feature_fme_power {\n+\tstruct feature_header header;\n+\tstruct feature_fme_pm_status status;\n+\tstruct feature_fme_pm_ap_threshold threshold;\n+\tstruct feature_fme_pm_xeon_limit xeon_limit;\n+\tstruct feature_fme_pm_fpga_limit fpga_limit;\n+};\n+\n+#define CACHE_CHANNEL_RD\t0\n+#define CACHE_CHANNEL_WR\t1\n+\n+enum iperf_cache_events {\n+\tIPERF_CACHE_RD_HIT,\n+\tIPERF_CACHE_WR_HIT,\n+\tIPERF_CACHE_RD_MISS,\n+\tIPERF_CACHE_WR_MISS,\n+\tIPERF_CACHE_RSVD, /* reserved */\n+\tIPERF_CACHE_HOLD_REQ,\n+\tIPERF_CACHE_DATA_WR_PORT_CONTEN,\n+\tIPERF_CACHE_TAG_WR_PORT_CONTEN,\n+\tIPERF_CACHE_TX_REQ_STALL,\n+\tIPERF_CACHE_RX_REQ_STALL,\n+\tIPERF_CACHE_EVICTIONS,\n+};\n+\n+/* FPMON Cache Control */\n+struct feature_fme_ifpmon_ch_ctl {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu8  reset_counters:1;\t/* Reset Counters */\n+\t\t\tu8  rsvd1:7;\t\t/* Reserved */\n+\t\t\tu8  freeze:1;\t\t/* Freeze if set to 1 */\n+\t\t\tu8  rsvd2:7;\t\t/* Reserved */\n+\t\t\tu8  cache_event:4;\t/* Select the cache event */\n+\t\t\tu8  cci_chsel:1;\t/* Select the channel */\n+\t\t\tu64 rsvd3:43;\t\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+/* FPMON Cache Counter */\n+struct feature_fme_ifpmon_ch_ctr {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\t/* Cache Counter for even addresse */\n+\t\t\tu64 cache_counter:48;\n+\t\t\tu16 rsvd:12;\t\t/* Reserved */\n+\t\t\t/* Cache Event being reported */\n+\t\t\tu8  event_code:4;\n+\t\t};\n+\t};\n+};\n+\n+enum iperf_fab_events {\n+\tIPERF_FAB_PCIE0_RD,\n+\tIPERF_FAB_PCIE0_WR,\n+\tIPERF_FAB_PCIE1_RD,\n+\tIPERF_FAB_PCIE1_WR,\n+\tIPERF_FAB_UPI_RD,\n+\tIPERF_FAB_UPI_WR,\n+\tIPERF_FAB_MMIO_RD,\n+\tIPERF_FAB_MMIO_WR,\n+};\n+\n+#define FAB_DISABLE_FILTER     0\n+#define FAB_ENABLE_FILTER      1\n+\n+/* FPMON FAB Control */\n+struct feature_fme_ifpmon_fab_ctl {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu8  reset_counters:1;\t/* Reset Counters */\n+\t\t\tu8  rsvd:7;\t\t/* Reserved */\n+\t\t\tu8  freeze:1;\t\t/* Set to 1 frozen counter */\n+\t\t\tu8  rsvd1:7;\t\t/* Reserved */\n+\t\t\tu8  fab_evtcode:4;\t/* Fabric Event Code */\n+\t\t\tu8  port_id:2;\t\t/* Port ID */\n+\t\t\tu8  rsvd2:1;\t\t/* Reserved */\n+\t\t\tu8  port_filter:1;\t/* Port Filter */\n+\t\t\tu64 rsvd3:40;\t\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+/* FPMON Event Counter */\n+struct feature_fme_ifpmon_fab_ctr {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu64 fab_cnt:60;\t/* Fabric event counter */\n+\t\t\t/* Fabric event code being reported */\n+\t\t\tu8  event_code:4;\n+\t\t};\n+\t};\n+};\n+\n+/* FPMON Clock Counter */\n+struct feature_fme_ifpmon_clk_ctr {\n+\tu64 afu_interf_clock;\t\t/* Clk_16UI (AFU clock) counter. */\n+};\n+\n+enum iperf_vtd_events {\n+\tIPERF_VTD_AFU_MEM_RD_TRANS,\n+\tIPERF_VTD_AFU_MEM_WR_TRANS,\n+\tIPERF_VTD_AFU_DEVTLB_RD_HIT,\n+\tIPERF_VTD_AFU_DEVTLB_WR_HIT,\n+\tIPERF_VTD_DEVTLB_4K_FILL,\n+\tIPERF_VTD_DEVTLB_2M_FILL,\n+\tIPERF_VTD_DEVTLB_1G_FILL,\n+};\n+\n+/* VT-d control register */\n+struct feature_fme_ifpmon_vtd_ctl {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu8  reset_counters:1;\t/* Reset Counters */\n+\t\t\tu8  rsvd:7;\t\t/* Reserved */\n+\t\t\tu8  freeze:1;\t\t/* Set to 1 frozen counter */\n+\t\t\tu8  rsvd1:7;\t\t/* Reserved */\n+\t\t\tu8  vtd_evtcode:4;\t/* VTd and TLB event code */\n+\t\t\tu64 rsvd2:44;\t\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+/* VT-d event counter */\n+struct feature_fme_ifpmon_vtd_ctr {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu64 vtd_counter:48;\t/* VTd event counter */\n+\t\t\tu16 rsvd:12;\t\t/* Reserved */\n+\t\t\tu8  event_code:4;\t/* VTd event code */\n+\t\t};\n+\t};\n+};\n+\n+enum iperf_vtd_sip_events {\n+\tIPERF_VTD_SIP_IOTLB_4K_HIT,\n+\tIPERF_VTD_SIP_IOTLB_2M_HIT,\n+\tIPERF_VTD_SIP_IOTLB_1G_HIT,\n+\tIPERF_VTD_SIP_SLPWC_L3_HIT,\n+\tIPERF_VTD_SIP_SLPWC_L4_HIT,\n+\tIPERF_VTD_SIP_RCC_HIT,\n+\tIPERF_VTD_SIP_IOTLB_4K_MISS,\n+\tIPERF_VTD_SIP_IOTLB_2M_MISS,\n+\tIPERF_VTD_SIP_IOTLB_1G_MISS,\n+\tIPERF_VTD_SIP_SLPWC_L3_MISS,\n+\tIPERF_VTD_SIP_SLPWC_L4_MISS,\n+\tIPERF_VTD_SIP_RCC_MISS,\n+};\n+\n+/* VT-d SIP control register */\n+struct feature_fme_ifpmon_vtd_sip_ctl {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu8  reset_counters:1;\t/* Reset Counters */\n+\t\t\tu8  rsvd:7;\t\t/* Reserved */\n+\t\t\tu8  freeze:1;\t\t/* Set to 1 frozen counter */\n+\t\t\tu8  rsvd1:7;\t\t/* Reserved */\n+\t\t\tu8  vtd_evtcode:4;\t/* VTd and TLB event code */\n+\t\t\tu64 rsvd2:44;\t\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+/* VT-d SIP event counter */\n+struct feature_fme_ifpmon_vtd_sip_ctr {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu64 vtd_counter:48;\t/* VTd event counter */\n+\t\t\tu16 rsvd:12;\t\t/* Reserved */\n+\t\t\tu8 event_code:4;\t/* VTd event code */\n+\t\t};\n+\t};\n+};\n+\n+/* FME IPERF FEATURE */\n+struct feature_fme_iperf {\n+\tstruct feature_header header;\n+\tstruct feature_fme_ifpmon_ch_ctl ch_ctl;\n+\tstruct feature_fme_ifpmon_ch_ctr ch_ctr0;\n+\tstruct feature_fme_ifpmon_ch_ctr ch_ctr1;\n+\tstruct feature_fme_ifpmon_fab_ctl fab_ctl;\n+\tstruct feature_fme_ifpmon_fab_ctr fab_ctr;\n+\tstruct feature_fme_ifpmon_clk_ctr clk;\n+\tstruct feature_fme_ifpmon_vtd_ctl vtd_ctl;\n+\tstruct feature_fme_ifpmon_vtd_ctr vtd_ctr;\n+\tstruct feature_fme_ifpmon_vtd_sip_ctl vtd_sip_ctl;\n+\tstruct feature_fme_ifpmon_vtd_sip_ctr vtd_sip_ctr;\n+};\n+\n+enum dperf_fab_events {\n+\tDPERF_FAB_PCIE0_RD,\n+\tDPERF_FAB_PCIE0_WR,\n+\tDPERF_FAB_MMIO_RD = 6,\n+\tDPERF_FAB_MMIO_WR,\n+};\n+\n+/* FPMON FAB Control */\n+struct feature_fme_dfpmon_fab_ctl {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu8  reset_counters:1;\t/* Reset Counters */\n+\t\t\tu8  rsvd:7;\t\t/* Reserved */\n+\t\t\tu8  freeze:1;\t\t/* Set to 1 frozen counter */\n+\t\t\tu8  rsvd1:7;\t\t/* Reserved */\n+\t\t\tu8  fab_evtcode:4;\t/* Fabric Event Code */\n+\t\t\tu8  port_id:2;\t\t/* Port ID */\n+\t\t\tu8  rsvd2:1;\t\t/* Reserved */\n+\t\t\tu8  port_filter:1;\t/* Port Filter */\n+\t\t\tu64 rsvd3:40;\t\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+/* FPMON Event Counter */\n+struct feature_fme_dfpmon_fab_ctr {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu64 fab_cnt:60;\t/* Fabric event counter */\n+\t\t\t/* Fabric event code being reported */\n+\t\t\tu8  event_code:4;\n+\t\t};\n+\t};\n+};\n+\n+/* FPMON Clock Counter */\n+struct feature_fme_dfpmon_clk_ctr {\n+\tu64 afu_interf_clock;\t\t/* Clk_16UI (AFU clock) counter. */\n+};\n+\n+/* FME DPERF FEATURE */\n+struct feature_fme_dperf {\n+\tstruct feature_header header;\n+\tu64 rsvd[3];\n+\tstruct feature_fme_dfpmon_fab_ctl fab_ctl;\n+\tstruct feature_fme_dfpmon_fab_ctr fab_ctr;\n+\tstruct feature_fme_dfpmon_clk_ctr clk;\n+};\n+\n+struct feature_fme_error0 {\n+#define FME_ERROR0_MASK        0xFFUL\n+#define FME_ERROR0_MASK_DEFAULT 0x40UL  /* pcode workaround */\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu8  fabric_err:1;\t/* Fabric error */\n+\t\t\tu8  fabfifo_overflow:1;\t/* Fabric fifo overflow */\n+\t\t\tu8  kticdc_parity_err:2;/* KTI CDC Parity Error */\n+\t\t\tu8  iommu_parity_err:1;\t/* IOMMU Parity error */\n+\t\t\t/* AFU PF/VF access mismatch detected */\n+\t\t\tu8  afu_acc_mode_err:1;\n+\t\t\tu8  mbp_err:1;\t\t/* Indicates an MBP event */\n+\t\t\t/* PCIE0 CDC Parity Error */\n+\t\t\tu8  pcie0cdc_parity_err:5;\n+\t\t\t/* PCIE1 CDC Parity Error */\n+\t\t\tu8  pcie1cdc_parity_err:5;\n+\t\t\t/* CVL CDC Parity Error */\n+\t\t\tu8  cvlcdc_parity_err:3;\n+\t\t\tu64 rsvd:44;\t\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+/* PCIe0 Error Status register */\n+struct feature_fme_pcie0_error {\n+#define FME_PCIE0_ERROR_MASK   0xFFUL\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu8  formattype_err:1;\t/* TLP format/type error */\n+\t\t\tu8  MWAddr_err:1;\t/* TLP MW address error */\n+\t\t\tu8  MWAddrLength_err:1;\t/* TLP MW length error */\n+\t\t\tu8  MRAddr_err:1;\t/* TLP MR address error */\n+\t\t\tu8  MRAddrLength_err:1;\t/* TLP MR length error */\n+\t\t\tu8  cpl_tag_err:1;\t/* TLP CPL tag error */\n+\t\t\tu8  cpl_status_err:1;\t/* TLP CPL status error */\n+\t\t\tu8  cpl_timeout_err:1;\t/* TLP CPL timeout */\n+\t\t\tu8  cci_parity_err:1;\t/* CCI bridge parity error */\n+\t\t\tu8  rxpoison_tlp_err:1;\t/* Received a TLP with EP set */\n+\t\t\tu64 rsvd:52;\t\t/* Reserved */\n+\t\t\tu8  vfnumb_err:1;\t/* Number of error VF */\n+\t\t\tu8  funct_type_err:1;\t/* Virtual (1) or Physical */\n+\t\t};\n+\t};\n+};\n+\n+/* PCIe1 Error Status register */\n+struct feature_fme_pcie1_error {\n+#define FME_PCIE1_ERROR_MASK   0xFFUL\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu8  formattype_err:1;\t/* TLP format/type error */\n+\t\t\tu8  MWAddr_err:1;\t/* TLP MW address error */\n+\t\t\tu8  MWAddrLength_err:1;\t/* TLP MW length error */\n+\t\t\tu8  MRAddr_err:1;\t/* TLP MR address error */\n+\t\t\tu8  MRAddrLength_err:1;\t/* TLP MR length error */\n+\t\t\tu8  cpl_tag_err:1;\t/* TLP CPL tag error */\n+\t\t\tu8  cpl_status_err:1;\t/* TLP CPL status error */\n+\t\t\tu8  cpl_timeout_err:1;\t/* TLP CPL timeout */\n+\t\t\tu8  cci_parity_err:1;\t/* CCI bridge parity error */\n+\t\t\tu8  rxpoison_tlp_err:1;\t/* Received a TLP with EP set */\n+\t\t\tu64 rsvd:54;\t\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+/* FME First Error register */\n+struct feature_fme_first_error {\n+#define FME_FIRST_ERROR_MASK   ((1UL << 60) - 1)\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\t/*\n+\t\t\t * Indicates the Error Register that was\n+\t\t\t * triggered first\n+\t\t\t */\n+\t\t\tu64 err_reg_status:60;\n+\t\t\t/*\n+\t\t\t * Holds 60 LSBs from the Error register that was\n+\t\t\t * triggered first\n+\t\t\t */\n+\t\t\tu8 errReg_id:4;\n+\t\t};\n+\t};\n+};\n+\n+/* FME Next Error register */\n+struct feature_fme_next_error {\n+#define FME_NEXT_ERROR_MASK    ((1UL << 60) - 1)\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\t/*\n+\t\t\t * Indicates the Error Register that was\n+\t\t\t * triggered second\n+\t\t\t */\n+\t\t\tu64 err_reg_status:60;\n+\t\t\t/*\n+\t\t\t * Holds 60 LSBs from the Error register that was\n+\t\t\t * triggered second\n+\t\t\t */\n+\t\t\tu8  errReg_id:4;\n+\t\t};\n+\t};\n+};\n+\n+/* RAS Non Fatal Error Status register */\n+struct feature_fme_ras_nonfaterror {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\t/* thremal threshold AP1 */\n+\t\t\tu8  temp_thresh_ap1:1;\n+\t\t\t/* thremal threshold AP2 */\n+\t\t\tu8  temp_thresh_ap2:1;\n+\t\t\tu8  pcie_error:1;\t/* pcie Error */\n+\t\t\tu8  portfatal_error:1;\t/* port fatal error */\n+\t\t\tu8  proc_hot:1;\t\t/* Indicates a ProcHot event */\n+\t\t\t/* Indicates an AFU PF/VF access mismatch */\n+\t\t\tu8  afu_acc_mode_err:1;\n+\t\t\t/* Injected nonfata Error */\n+\t\t\tu8  injected_nonfata_err:1;\n+\t\t\tu8  rsvd1:2;\n+\t\t\t/* Temperature threshold triggered AP6*/\n+\t\t\tu8  temp_thresh_AP6:1;\n+\t\t\t/* Power threshold triggered AP1 */\n+\t\t\tu8  power_thresh_AP1:1;\n+\t\t\t/* Power threshold triggered AP2 */\n+\t\t\tu8  power_thresh_AP2:1;\n+\t\t\t/* Indicates a MBP event */\n+\t\t\tu8  mbp_err:1;\n+\t\t\tu64 rsvd2:51;\t\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+/* RAS Catastrophic Fatal Error Status register */\n+struct feature_fme_ras_catfaterror {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\t/* KTI Link layer error detected */\n+\t\t\tu8  ktilink_fatal_err:1;\n+\t\t\t/* tag-n-cache error detected */\n+\t\t\tu8  tagcch_fatal_err:1;\n+\t\t\t/* CCI error detected */\n+\t\t\tu8  cci_fatal_err:1;\n+\t\t\t/* KTI Protocol error detected */\n+\t\t\tu8  ktiprpto_fatal_err:1;\n+\t\t\t/* Fatal DRAM error detected */\n+\t\t\tu8  dram_fatal_err:1;\n+\t\t\t/* IOMMU detected */\n+\t\t\tu8  iommu_fatal_err:1;\n+\t\t\t/* Fabric Fatal Error */\n+\t\t\tu8  fabric_fatal_err:1;\n+\t\t\t/* PCIe possion Error */\n+\t\t\tu8  pcie_poison_err:1;\n+\t\t\t/* Injected fatal Error */\n+\t\t\tu8  inject_fata_err:1;\n+\t\t\t/* Catastrophic CRC Error */\n+\t\t\tu8  crc_catast_err:1;\n+\t\t\t/* Catastrophic Thermal Error */\n+\t\t\tu8  therm_catast_err:1;\n+\t\t\t/* Injected Catastrophic Error */\n+\t\t\tu8  injected_catast_err:1;\n+\t\t\tu64 rsvd:52;\n+\t\t};\n+\t};\n+};\n+\n+/* RAS Error injection register */\n+struct feature_fme_ras_error_inj {\n+#define FME_RAS_ERROR_INJ_MASK      0x7UL\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu8  catast_error:1;\t/* Catastrophic error flag */\n+\t\t\tu8  fatal_error:1;\t/* Fatal error flag */\n+\t\t\tu8  nonfatal_error:1;\t/* NonFatal error flag */\n+\t\t\tu64 rsvd:61;\t\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+/* FME error capabilities */\n+struct feature_fme_error_capability {\n+\tunion {\n+\tu64 csr;\n+\t\tstruct {\n+\t\t\tu8 support_intr:1;\n+\t\t\t/* MSI-X vector table entry number */\n+\t\t\tu16 intr_vector_num:12;\n+\t\t\tu64 rsvd:51;\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+/* FME ERR FEATURE */\n+struct feature_fme_err {\n+\tstruct feature_header header;\n+\tstruct feature_fme_error0 fme_err_mask;\n+\tstruct feature_fme_error0 fme_err;\n+\tstruct feature_fme_pcie0_error pcie0_err_mask;\n+\tstruct feature_fme_pcie0_error pcie0_err;\n+\tstruct feature_fme_pcie1_error pcie1_err_mask;\n+\tstruct feature_fme_pcie1_error pcie1_err;\n+\tstruct feature_fme_first_error fme_first_err;\n+\tstruct feature_fme_next_error fme_next_err;\n+\tstruct feature_fme_ras_nonfaterror ras_nonfat_mask;\n+\tstruct feature_fme_ras_nonfaterror ras_nonfaterr;\n+\tstruct feature_fme_ras_catfaterror ras_catfat_mask;\n+\tstruct feature_fme_ras_catfaterror ras_catfaterr;\n+\tstruct feature_fme_ras_error_inj ras_error_inj;\n+\tstruct feature_fme_error_capability fme_err_capability;\n+};\n+\n+/* FME Partial Reconfiguration Control */\n+struct feature_fme_pr_ctl {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu8  pr_reset:1;\t\t/* Reset PR Engine */\n+\t\t\tu8  rsvd3:3;\t\t/* Reserved */\n+\t\t\tu8  pr_reset_ack:1;\t/* Reset PR Engine Ack */\n+\t\t\tu8  rsvd4:3;\t\t/* Reserved */\n+\t\t\tu8  pr_regionid:2;\t/* PR Region ID */\n+\t\t\tu8  rsvd1:2;\t\t/* Reserved */\n+\t\t\tu8  pr_start_req:1;\t/* PR Start Request */\n+\t\t\tu8  pr_push_complete:1;\t/* PR Data push complete */\n+\t\t\tu8  pr_kind:1;\t\t/* PR Data push complete */\n+\t\t\tu32 rsvd:17;\t\t/* Reserved */\n+\t\t\tu32 config_data;\t/* Config data TBD */\n+\t\t};\n+\t};\n+};\n+\n+/* FME Partial Reconfiguration Status */\n+struct feature_fme_pr_status {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu16 pr_credit:9;\t/* PR Credits */\n+\t\t\tu8  rsvd2:7;\t\t/* Reserved */\n+\t\t\tu8  pr_status:1;\t/* PR status */\n+\t\t\tu8  rsvd:3;\t\t/* Reserved */\n+\t\t\t/* Altra PR Controller Block status */\n+\t\t\tu8  pr_controller_status:3;\n+\t\t\tu8  rsvd1:1;            /* Reserved */\n+\t\t\tu8  pr_host_status:4;   /* PR Host status */\n+\t\t\tu8  rsvd3:4;\t\t/* Reserved */\n+\t\t\t/* Security Block Status fields (TBD) */\n+\t\t\tu32 security_bstatus;\n+\t\t};\n+\t};\n+};\n+\n+/* FME Partial Reconfiguration Data */\n+struct feature_fme_pr_data {\n+\tunion {\n+\t\tu64 csr;\t/* PR data from the raw-binary file */\n+\t\tstruct {\n+\t\t\t/* PR data from the raw-binary file */\n+\t\t\tu32 pr_data_raw;\n+\t\t\tu32 rsvd;\n+\t\t};\n+\t};\n+};\n+\n+/* FME PR Public Key */\n+struct feature_fme_pr_key {\n+\tu64 key;\t\t/* FME PR Public Hash */\n+};\n+\n+/* FME PR FEATURE */\n+struct feature_fme_pr {\n+\tstruct feature_header header;\n+\t/*Partial Reconfiguration control */\n+\tstruct feature_fme_pr_ctl\tccip_fme_pr_control;\n+\n+\t/* Partial Reconfiguration Status */\n+\tstruct feature_fme_pr_status\tccip_fme_pr_status;\n+\n+\t/* Partial Reconfiguration data */\n+\tstruct feature_fme_pr_data\tccip_fme_pr_data;\n+\n+\t/* Partial Reconfiguration data */\n+\tu64\t\t\t\tccip_fme_pr_err;\n+\n+\tu64 rsvd1[3];\n+\n+\t/* Partial Reconfiguration data registers */\n+\tu64 fme_pr_data1;\n+\tu64 fme_pr_data2;\n+\tu64 fme_pr_data3;\n+\tu64 fme_pr_data4;\n+\tu64 fme_pr_data5;\n+\tu64 fme_pr_data6;\n+\tu64 fme_pr_data7;\n+\tu64 fme_pr_data8;\n+\n+\tu64 rsvd2[5];\n+\n+\t/* PR Interface ID */\n+\tu64 fme_pr_intfc_id_l;\n+\tu64 fme_pr_intfc_id_h;\n+\n+\t/* MSIX filed to be Added */\n+};\n+\n+/* FME HSSI Control */\n+struct feature_fme_hssi_eth_ctrl {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu32 data:32;\t\t/* HSSI data */\n+\t\t\tu16 address:16;\t\t/* HSSI address */\n+\t\t\t/*\n+\t\t\t * HSSI comamnd\n+\t\t\t * 0x0 - No request\n+\t\t\t * 0x08 - SW register RD request\n+\t\t\t * 0x10 - SW register WR request\n+\t\t\t * 0x40 - Auxiliar bus RD request\n+\t\t\t * 0x80 - Auxiliar bus WR request\n+\t\t\t */\n+\t\t\tu16 cmd:16;\n+\t\t};\n+\t};\n+};\n+\n+/* FME HSSI Status */\n+struct feature_fme_hssi_eth_stat {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu32 data:32;\t\t/* HSSI data */\n+\t\t\tu8  acknowledge:1;\t/* HSSI acknowledge */\n+\t\t\tu8  spare:1;\t\t/* HSSI spare */\n+\t\t\tu32 rsvd:30;\t\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+/* FME HSSI FEATURE */\n+struct feature_fme_hssi {\n+\tstruct feature_header header;\n+\tstruct feature_fme_hssi_eth_ctrl\thssi_control;\n+\tstruct feature_fme_hssi_eth_stat\thssi_status;\n+};\n+\n+#define PORT_ERR_MASK\t\t0xfff0703ff001f\n+struct feature_port_err_key {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\t/* Tx Channel0: Overflow */\n+\t\t\tu8 tx_ch0_overflow:1;\n+\t\t\t/* Tx Channel0: Invalid request encoding */\n+\t\t\tu8 tx_ch0_invaldreq :1;\n+\t\t\t/* Tx Channel0: Request with cl_len=3 not supported */\n+\t\t\tu8 tx_ch0_cl_len3:1;\n+\t\t\t/* Tx Channel0: Request with cl_len=2 not aligned 2CL */\n+\t\t\tu8 tx_ch0_cl_len2:1;\n+\t\t\t/* Tx Channel0: Request with cl_len=4 not aligned 4CL */\n+\t\t\tu8 tx_ch0_cl_len4:1;\n+\n+\t\t\tu16 rsvd1:4;\t\t\t/* Reserved */\n+\n+\t\t\t/* AFU MMIO RD received while PORT is in reset */\n+\t\t\tu8 mmio_rd_whilerst:1;\n+\t\t\t/* AFU MMIO WR received while PORT is in reset */\n+\t\t\tu8 mmio_wr_whilerst:1;\n+\n+\t\t\tu16 rsvd2:5;\t\t\t/* Reserved */\n+\n+\t\t\t/* Tx Channel1: Overflow */\n+\t\t\tu8 tx_ch1_overflow:1;\n+\t\t\t/* Tx Channel1: Invalid request encoding */\n+\t\t\tu8 tx_ch1_invaldreq:1;\n+\t\t\t/* Tx Channel1: Request with cl_len=3 not supported */\n+\t\t\tu8 tx_ch1_cl_len3:1;\n+\t\t\t/* Tx Channel1: Request with cl_len=2 not aligned 2CL */\n+\t\t\tu8 tx_ch1_cl_len2:1;\n+\t\t\t/* Tx Channel1: Request with cl_len=4 not aligned 4CL */\n+\t\t\tu8 tx_ch1_cl_len4:1;\n+\n+\t\t\t/* Tx Channel1: Insufficient data payload */\n+\t\t\tu8 tx_ch1_insuff_data:1;\n+\t\t\t/* Tx Channel1: Data payload overrun */\n+\t\t\tu8 tx_ch1_data_overrun:1;\n+\t\t\t/* Tx Channel1 : Incorrect address */\n+\t\t\tu8 tx_ch1_incorr_addr:1;\n+\t\t\t/* Tx Channel1 : NON-Zero SOP Detected */\n+\t\t\tu8 tx_ch1_nzsop:1;\n+\t\t\t/* Tx Channel1 : Illegal VC_SEL, atomic request VLO */\n+\t\t\tu8 tx_ch1_illegal_vcsel:1;\n+\n+\t\t\tu8 rsvd3:6;\t\t\t/* Reserved */\n+\n+\t\t\t/* MMIO Read Timeout in AFU */\n+\t\t\tu8 mmioread_timeout:1;\n+\n+\t\t\t/* Tx Channel2: FIFO Overflow */\n+\t\t\tu8 tx_ch2_fifo_overflow:1;\n+\n+\t\t\t/* MMIO read is not matching pending request */\n+\t\t\tu8 unexp_mmio_resp:1;\n+\n+\t\t\tu8 rsvd4:5;\t\t\t/* Reserved */\n+\n+\t\t\t/* Number of pending Requests: counter overflow */\n+\t\t\tu8 tx_req_counter_overflow:1;\n+\t\t\t/* Req with Address violating SMM Range */\n+\t\t\tu8 llpr_smrr_err:1;\n+\t\t\t/* Req with Address violating second SMM Range */\n+\t\t\tu8 llpr_smrr2_err:1;\n+\t\t\t/* Req with Address violating ME Stolen message */\n+\t\t\tu8 llpr_mesg_err:1;\n+\t\t\t/* Req with Address violating Generic Protected Range */\n+\t\t\tu8 genprot_range_err:1;\n+\t\t\t/* Req with Address violating Legacy Range low */\n+\t\t\tu8 legrange_low_err:1;\n+\t\t\t/* Req with Address violating Legacy Range High */\n+\t\t\tu8 legrange_high_err:1;\n+\t\t\t/* Req with Address violating VGA memory range */\n+\t\t\tu8 vgmem_range_err:1;\n+\t\t\tu8 page_fault_err:1;\t\t/* Page fault */\n+\t\t\tu8 pmr_err:1;\t\t\t/* PMR Error */\n+\t\t\tu8 ap6_event:1;\t\t\t/* AP6 event */\n+\t\t\t/* VF FLR detected on Port with PF access control */\n+\t\t\tu8 vfflr_access_err:1;\n+\t\t\tu16 rsvd5:12;\t\t\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+/* Port first error register, not contain all error bits in error register. */\n+struct feature_port_first_err_key {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu8 tx_ch0_overflow:1;\n+\t\t\tu8 tx_ch0_invaldreq :1;\n+\t\t\tu8 tx_ch0_cl_len3:1;\n+\t\t\tu8 tx_ch0_cl_len2:1;\n+\t\t\tu8 tx_ch0_cl_len4:1;\n+\t\t\tu8 rsvd1:4;\t\t\t/* Reserved */\n+\t\t\tu8 mmio_rd_whilerst:1;\n+\t\t\tu8 mmio_wr_whilerst:1;\n+\t\t\tu8 rsvd2:5;\t\t\t/* Reserved */\n+\t\t\tu8 tx_ch1_overflow:1;\n+\t\t\tu8 tx_ch1_invaldreq:1;\n+\t\t\tu8 tx_ch1_cl_len3:1;\n+\t\t\tu8 tx_ch1_cl_len2:1;\n+\t\t\tu8 tx_ch1_cl_len4:1;\n+\t\t\tu8 tx_ch1_insuff_data:1;\n+\t\t\tu8 tx_ch1_data_overrun:1;\n+\t\t\tu8 tx_ch1_incorr_addr:1;\n+\t\t\tu8 tx_ch1_nzsop:1;\n+\t\t\tu8 tx_ch1_illegal_vcsel:1;\n+\t\t\tu8 rsvd3:6;\t\t\t/* Reserved */\n+\t\t\tu8 mmioread_timeout:1;\n+\t\t\tu8 tx_ch2_fifo_overflow:1;\n+\t\t\tu8 rsvd4:6;\t\t\t/* Reserved */\n+\t\t\tu8 tx_req_counter_overflow:1;\n+\t\t\tu32 rsvd5:23;\t\t\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+/* Port malformed Req0 */\n+struct feature_port_malformed_req0 {\n+\tu64 header_lsb;\n+};\n+\n+/* Port malformed Req1 */\n+struct feature_port_malformed_req1 {\n+\tu64 header_msb;\n+};\n+\n+/* Port debug register */\n+struct feature_port_debug {\n+\tu64 port_debug;\n+};\n+\n+/* Port error capabilities */\n+struct feature_port_err_capability {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu8  support_intr:1;\n+\t\t\t/* MSI-X vector table entry number */\n+\t\t\tu16 intr_vector_num:12;\n+\t\t\tu64 rsvd:51;            /* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+/* PORT FEATURE ERROR */\n+struct feature_port_error {\n+\tstruct feature_header header;\n+\tstruct feature_port_err_key error_mask;\n+\tstruct feature_port_err_key port_error;\n+\tstruct feature_port_first_err_key port_first_error;\n+\tstruct feature_port_malformed_req0 malreq0;\n+\tstruct feature_port_malformed_req1 malreq1;\n+\tstruct feature_port_debug port_debug;\n+\tstruct feature_port_err_capability error_capability;\n+};\n+\n+/* Port UMSG Capability */\n+struct feature_port_umsg_cap {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\t/* Number of umsg allocated to this port */\n+\t\t\tu8 umsg_allocated;\n+\t\t\t/* Enable / Disable UMsg engine for this port */\n+\t\t\tu8 umsg_enable:1;\n+\t\t\t/* Usmg initialization status */\n+\t\t\tu8 umsg_init_complete:1;\n+\t\t\t/* IOMMU can not translate the umsg base address */\n+\t\t\tu8 umsg_trans_error:1;\n+\t\t\tu64 rsvd:53;\t\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+/* Port UMSG base address */\n+struct feature_port_umsg_baseaddr {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu64 base_addr:48;\t/* 48 bit physical address */\n+\t\t\tu16 rsvd;\t\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+struct feature_port_umsg_mode {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu32 umsg_hint_enable;\t/* UMSG hint enable/disable */\n+\t\t\tu32 rsvd;\t\t/* Reserved */\n+\t\t};\n+\t};\n+};\n+\n+/* PORT FEATURE UMSG */\n+struct feature_port_umsg {\n+\tstruct feature_header header;\n+\tstruct feature_port_umsg_cap capability;\n+\tstruct feature_port_umsg_baseaddr baseaddr;\n+\tstruct feature_port_umsg_mode mode;\n+};\n+\n+#define UMSG_EN_POLL_INVL 10 /* us */\n+#define UMSG_EN_POLL_TIMEOUT 1000 /* us */\n+\n+/* Port UINT Capability */\n+struct feature_port_uint_cap {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\tu16 intr_num:12;\t/* Supported interrupts num */\n+\t\t\t/* First MSI-X vector table entry number */\n+\t\t\tu16 first_vec_num:12;\n+\t\t\tu64 rsvd:40;\n+\t\t};\n+\t};\n+};\n+\n+/* PORT FEATURE UINT */\n+struct feature_port_uint {\n+\tstruct feature_header header;\n+\tstruct feature_port_uint_cap capability;\n+};\n+\n+/* STP region supports mmap operation, so use page aligned size. */\n+#define PORT_FEATURE_STP_REGION_SIZE PAGE_ALIGN(sizeof(struct feature_port_stp))\n+\n+/* Port STP status register (for debug only)*/\n+struct feature_port_stp_status {\n+\tunion {\n+\t\tu64 csr;\n+\t\tstruct {\n+\t\t\t/* SLD Hub end-point read/write timeout */\n+\t\t\tu8 sld_ep_timeout:1;\n+\t\t\t/* Remote STP in reset/disable */\n+\t\t\tu8 rstp_disabled:1;\n+\t\t\tu8 unsupported_read:1;\n+\t\t\t/* MMIO timeout detected and faked with a response */\n+\t\t\tu8 mmio_timeout:1;\n+\t\t\tu8 txfifo_count:4;\n+\t\t\tu8 rxfifo_count:4;\n+\t\t\tu8 txfifo_overflow:1;\n+\t\t\tu8 txfifo_underflow:1;\n+\t\t\tu8 rxfifo_overflow:1;\n+\t\t\tu8 rxfifo_underflow:1;\n+\t\t\t/* Number of MMIO write requests */\n+\t\t\tu16 write_requests;\n+\t\t\t/* Number of MMIO read requests */\n+\t\t\tu16 read_requests;\n+\t\t\t/* Number of MMIO read responses */\n+\t\t\tu16 read_responses;\n+\t\t};\n+\t};\n+};\n+\n+/*\n+ * PORT FEATURE STP\n+ * Most registers in STP region are not touched by driver, but mmapped to user\n+ * space. So they are not defined in below data structure, as its actual size\n+ * is 0x18c per spec.\n+ */\n+struct feature_port_stp {\n+\tstruct feature_header header;\n+\tstruct feature_port_stp_status stp_status;\n+};\n+\n+/**\n+ * enum fpga_pr_states - fpga PR states\n+ * @FPGA_PR_STATE_UNKNOWN: can't determine state\n+ * @FPGA_PR_STATE_WRITE_INIT: preparing FPGA for programming\n+ * @FPGA_PR_STATE_WRITE_INIT_ERR: Error during WRITE_INIT stage\n+ * @FPGA_PR_STATE_WRITE: writing image to FPGA\n+ * @FPGA_PR_STATE_WRITE_ERR: Error while writing FPGA\n+ * @FPGA_PR_STATE_WRITE_COMPLETE: Doing post programming steps\n+ * @FPGA_PR_STATE_WRITE_COMPLETE_ERR: Error during WRITE_COMPLETE\n+ * @FPGA_PR_STATE_OPERATING: FPGA PR done\n+ */\n+enum fpga_pr_states {\n+\t/* canot determine state states */\n+\tFPGA_PR_STATE_UNKNOWN,\n+\n+\t/* write sequence: init, write, complete */\n+\tFPGA_PR_STATE_WRITE_INIT,\n+\tFPGA_PR_STATE_WRITE_INIT_ERR,\n+\tFPGA_PR_STATE_WRITE,\n+\tFPGA_PR_STATE_WRITE_ERR,\n+\tFPGA_PR_STATE_WRITE_COMPLETE,\n+\tFPGA_PR_STATE_WRITE_COMPLETE_ERR,\n+\n+\t/* FPGA PR done */\n+\tFPGA_PR_STATE_DONE,\n+};\n+\n+/*\n+ * FPGA Manager flags\n+ * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported\n+ */\n+#define FPGA_MGR_PARTIAL_RECONFIG\tBIT(0)\n+\n+/**\n+ * struct fpga_pr_info - specific information to a FPGA PR\n+ * @flags: boolean flags as defined above\n+ * @pr_err: PR error code\n+ * @state: fpga manager state\n+ * @port_id: port id\n+ */\n+struct fpga_pr_info {\n+\tu32 flags;\n+\tu64 pr_err;\n+\tenum fpga_pr_states state;\n+\tint port_id;\n+};\n+\n+#define DEFINE_FPGA_PR_ERR_MSG(_name_)\t\t\t\\\n+static const char * const _name_[] = {\t\t\t\\\n+\t\"PR operation error detected\",\t\t\t\\\n+\t\"PR CRC error detected\",\t\t\t\\\n+\t\"PR incompatiable bitstream error detected\",\t\\\n+\t\"PR IP protocol error detected\",\t\t\\\n+\t\"PR FIFO overflow error detected\",\t\t\\\n+\t\"PR timeout error detected\",\t\t\t\\\n+\t\"PR secure load error detected\",\t\t\\\n+}\n+\n+#define RST_POLL_INVL 10 /* us */\n+#define RST_POLL_TIMEOUT 1000 /* us */\n+\n+#define PR_WAIT_TIMEOUT   15000000\n+\n+#define PR_HOST_STATUS_IDLE\t0\n+#define PR_MAX_ERR_NUM\t7\n+\n+DEFINE_FPGA_PR_ERR_MSG(pr_err_msg);\n+\n+/*\n+ * green bitstream header must be byte-packed to match the\n+ * real file format.\n+ */\n+struct bts_header {\n+\tu64 guid_h;\n+\tu64 guid_l;\n+\tu32 metadata_len;\n+};\n+\n+#define GBS_GUID_H\t\t0x414750466e6f6558\n+#define GBS_GUID_L\t\t0x31303076534247b7\n+#define is_valid_bts(bts_hdr)\t\t\t\t\\\n+\t(((bts_hdr)->guid_h == GBS_GUID_H) &&\t\t\\\n+\t((bts_hdr)->guid_l == GBS_GUID_L))\n+\n+#endif /* _BASE_IFPGA_DEFINES_H_ */\ndiff --git a/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c b/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c\nnew file mode 100644\nindex 0000000..10a8f06\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c\n@@ -0,0 +1,824 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2018 Intel Corporation\n+ */\n+\n+#include \"opae_hw_api.h\"\n+#include \"ifpga_api.h\"\n+\n+#include \"ifpga_hw.h\"\n+#include \"ifpga_enumerate.h\"\n+#include \"ifpga_feature_dev.h\"\n+\n+struct build_feature_devs_info {\n+\tstruct opae_adapter_data_pci *pci_data;\n+\n+\tstruct ifpga_afu_info *acc_info;\n+\n+\tvoid *fiu;\n+\tenum fpga_id_type current_type;\n+\tint current_port_id;\n+\n+\tvoid *ioaddr;\n+\tvoid *ioend;\n+\tuint64_t phys_addr;\n+\tint current_bar;\n+\n+\tvoid *pfme_hdr;\n+\n+\tstruct ifpga_hw *hw;\n+};\n+\n+struct feature_info {\n+\tconst char *name;\n+\tuint32_t resource_size;\n+\tint feature_index;\n+\tint revision_id;\n+\tunsigned int vec_start;\n+\tunsigned int vec_cnt;\n+\n+\tstruct feature_ops *ops;\n+};\n+\n+/* indexed by fme feature IDs which are defined in 'enum fme_feature_id'. */\n+static struct feature_info fme_features[] = {\n+\t{\n+\t\t.name = FME_FEATURE_HEADER,\n+\t\t.resource_size = sizeof(struct feature_fme_header),\n+\t\t.feature_index = FME_FEATURE_ID_HEADER,\n+\t\t.revision_id = FME_HEADER_REVISION,\n+\t\t.ops = &fme_hdr_ops,\n+\t},\n+\t{\n+\t\t.name = FME_FEATURE_THERMAL_MGMT,\n+\t\t.resource_size = sizeof(struct feature_fme_thermal),\n+\t\t.feature_index = FME_FEATURE_ID_THERMAL_MGMT,\n+\t\t.revision_id = FME_THERMAL_MGMT_REVISION,\n+\t\t.ops = &fme_thermal_mgmt_ops,\n+\t},\n+\t{\n+\t\t.name = FME_FEATURE_POWER_MGMT,\n+\t\t.resource_size = sizeof(struct feature_fme_power),\n+\t\t.feature_index = FME_FEATURE_ID_POWER_MGMT,\n+\t\t.revision_id = FME_POWER_MGMT_REVISION,\n+\t\t.ops = &fme_power_mgmt_ops,\n+\t},\n+\t{\n+\t\t.name = FME_FEATURE_GLOBAL_IPERF,\n+\t\t.resource_size = sizeof(struct feature_fme_iperf),\n+\t\t.feature_index = FME_FEATURE_ID_GLOBAL_IPERF,\n+\t\t.revision_id = FME_GLOBAL_IPERF_REVISION,\n+\t\t.ops = &fme_global_iperf_ops,\n+\t},\n+\t{\n+\t\t.name = FME_FEATURE_GLOBAL_ERR,\n+\t\t.resource_size = sizeof(struct feature_fme_err),\n+\t\t.feature_index = FME_FEATURE_ID_GLOBAL_ERR,\n+\t\t.revision_id = FME_GLOBAL_ERR_REVISION,\n+\t\t.ops = &fme_global_err_ops,\n+\t},\n+\t{\n+\t\t.name = FME_FEATURE_PR_MGMT,\n+\t\t.resource_size = sizeof(struct feature_fme_pr),\n+\t\t.feature_index = FME_FEATURE_ID_PR_MGMT,\n+\t\t.revision_id = FME_PR_MGMT_REVISION,\n+\t\t.ops = &fme_pr_mgmt_ops,\n+\t},\n+\t{\n+\t\t.name = FME_FEATURE_HSSI_ETH,\n+\t\t.resource_size = sizeof(struct feature_fme_hssi),\n+\t\t.feature_index = FME_FEATURE_ID_HSSI_ETH,\n+\t\t.revision_id = FME_HSSI_ETH_REVISION\n+\t},\n+\t{\n+\t\t.name = FME_FEATURE_GLOBAL_DPERF,\n+\t\t.resource_size = sizeof(struct feature_fme_dperf),\n+\t\t.feature_index = FME_FEATURE_ID_GLOBAL_DPERF,\n+\t\t.revision_id = FME_GLOBAL_DPERF_REVISION,\n+\t\t.ops = &fme_global_dperf_ops,\n+\t}\n+};\n+\n+static struct feature_info port_features[] = {\n+\t{\n+\t\t.name = PORT_FEATURE_HEADER,\n+\t\t.resource_size = sizeof(struct feature_port_header),\n+\t\t.feature_index = PORT_FEATURE_ID_HEADER,\n+\t\t.revision_id = PORT_HEADER_REVISION,\n+\t\t.ops = &port_hdr_ops,\n+\t},\n+\t{\n+\t\t.name = PORT_FEATURE_ERR,\n+\t\t.resource_size = sizeof(struct feature_port_error),\n+\t\t.feature_index = PORT_FEATURE_ID_ERROR,\n+\t\t.revision_id = PORT_ERR_REVISION,\n+\t\t.ops = &port_error_ops,\n+\t},\n+\t{\n+\t\t.name = PORT_FEATURE_UMSG,\n+\t\t.resource_size = sizeof(struct feature_port_umsg),\n+\t\t.feature_index = PORT_FEATURE_ID_UMSG,\n+\t\t.revision_id = PORT_UMSG_REVISION,\n+\t},\n+\t{\n+\t\t.name = PORT_FEATURE_UINT,\n+\t\t.resource_size = sizeof(struct feature_port_uint),\n+\t\t.feature_index = PORT_FEATURE_ID_UINT,\n+\t\t.revision_id = PORT_UINT_REVISION,\n+\t\t.ops = &port_uint_ops,\n+\t},\n+\t{\n+\t\t.name = PORT_FEATURE_STP,\n+\t\t.resource_size = PORT_FEATURE_STP_REGION_SIZE,\n+\t\t.feature_index = PORT_FEATURE_ID_STP,\n+\t\t.revision_id = PORT_STP_REVISION,\n+\t\t.ops = &port_stp_ops,\n+\t},\n+\t{\n+\t\t.name = PORT_FEATURE_UAFU,\n+\t\t/* UAFU feature size should be read from PORT_CAP.MMIOSIZE.\n+\t\t * Will set uafu feature size while parse port device.\n+\t\t */\n+\t\t.resource_size = 0,\n+\t\t.feature_index = PORT_FEATURE_ID_UAFU,\n+\t\t.revision_id = PORT_UAFU_REVISION\n+\t},\n+};\n+\n+static u64 feature_id(void __iomem *start)\n+{\n+\tstruct feature_header header;\n+\n+\theader.csr = readq(start);\n+\n+\tswitch (header.type) {\n+\tcase FEATURE_TYPE_FIU:\n+\t\treturn FEATURE_ID_HEADER;\n+\tcase FEATURE_TYPE_PRIVATE:\n+\t\treturn header.id;\n+\tcase FEATURE_TYPE_AFU:\n+\t\treturn FEATURE_ID_AFU;\n+\t}\n+\n+\tWARN_ON(1);\n+\treturn 0;\n+}\n+\n+static int\n+build_info_add_sub_feature(struct build_feature_devs_info *binfo,\n+\t\t\t   struct feature_info *finfo, void __iomem *start)\n+{\n+\tstruct ifpga_hw *hw = binfo->hw;\n+\tstruct feature *feature = NULL;\n+\tint feature_idx = finfo->feature_index;\n+\tunsigned int vec_start = finfo->vec_start;\n+\tunsigned int vec_cnt = finfo->vec_cnt;\n+\tstruct feature_irq_ctx *ctx = NULL;\n+\tint port_id, ret = 0;\n+\tunsigned int i;\n+\n+\tif (binfo->current_type == FME_ID) {\n+\t\tfeature = &hw->fme.sub_feature[feature_idx];\n+\t\tfeature->parent = &hw->fme;\n+\t} else if (binfo->current_type == PORT_ID) {\n+\t\tport_id = binfo->current_port_id;\n+\t\tfeature = &hw->port[port_id].sub_feature[feature_idx];\n+\t\tfeature->parent = &hw->port[port_id];\n+\t} else {\n+\t\treturn -EFAULT;\n+\t}\n+\n+\tfeature->state = IFPGA_FEATURE_ATTACHED;\n+\tfeature->addr = start;\n+\tfeature->id = feature_id(start);\n+\tfeature->size = finfo->resource_size;\n+\tfeature->name = finfo->name;\n+\tfeature->revision = finfo->revision_id;\n+\tfeature->ops = finfo->ops;\n+\tfeature->phys_addr = binfo->phys_addr +\n+\t\t\t\t((u8 *)start - (u8 *)binfo->ioaddr);\n+\n+\tif (vec_cnt) {\n+\t\tif (vec_start + vec_cnt <= vec_start)\n+\t\t\treturn -EINVAL;\n+\n+\t\tctx = zmalloc(sizeof(*ctx) * vec_cnt);\n+\t\tif (!ctx)\n+\t\t\treturn -ENOMEM;\n+\n+\t\tfor (i = 0; i < vec_cnt; i++) {\n+\t\t\tctx[i].eventfd = -1;\n+\t\t\tctx[i].idx = vec_start + i;\n+\t\t}\n+\t}\n+\n+\tfeature->ctx = ctx;\n+\tfeature->ctx_num = vec_cnt;\n+\tfeature->vfio_dev_fd = binfo->pci_data->vfio_dev_fd;\n+\n+\treturn ret;\n+}\n+\n+static int\n+create_feature_instance(struct build_feature_devs_info *binfo,\n+\t\t\tvoid __iomem *start, struct feature_info *finfo)\n+{\n+\tstruct feature_header *hdr = start;\n+\n+\tif (((u8 *)binfo->ioend - (u8 *)start) < finfo->resource_size)\n+\t\treturn -EINVAL;\n+\n+\tif (finfo->revision_id != SKIP_REVISION_CHECK &&\n+\t    hdr->revision > finfo->revision_id) {\n+\t\tdev_err(binfo, \"feature %s revision :default:%x, now at:%x, mis-match.\\n\",\n+\t\t\tfinfo->name, finfo->revision_id, hdr->revision);\n+\t}\n+\n+\treturn build_info_add_sub_feature(binfo, finfo, start);\n+}\n+\n+/*\n+ * UAFU GUID is dynamic as it can be changed after FME downloads different\n+ * Green Bitstream to the port, so we treat the unknown GUIDs which are\n+ * attached on port's feature list as UAFU.\n+ */\n+static bool feature_is_UAFU(struct build_feature_devs_info *binfo)\n+{\n+\tif (binfo->current_type != PORT_ID)\n+\t\treturn false;\n+\n+\treturn true;\n+}\n+\n+static int parse_feature_port_uafu(struct build_feature_devs_info *binfo,\n+\t\t\t\t   struct feature_header *hdr)\n+{\n+\tenum port_feature_id id = PORT_FEATURE_ID_UAFU;\n+\tstruct ifpga_afu_info *info;\n+\tvoid *start = (void *)hdr;\n+\tint ret;\n+\n+\tif (port_features[id].resource_size) {\n+\t\tret = create_feature_instance(binfo, hdr, &port_features[id]);\n+\t} else {\n+\t\tdev_err(binfo, \"the uafu feature header is mis-configured.\\n\");\n+\t\tret = -EINVAL;\n+\t}\n+\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t/* FIXME: need to figure out a better name */\n+\tinfo = malloc(sizeof(*info));\n+\tif (!info)\n+\t\treturn -ENOMEM;\n+\n+\tinfo->region[0].addr = start;\n+\tinfo->region[0].phys_addr = binfo->phys_addr +\n+\t\t\t(uint8_t *)start - (uint8_t *)binfo->ioaddr;\n+\tinfo->region[0].len = port_features[id].resource_size;\n+\tport_features[id].resource_size = 0;\n+\tinfo->num_regions = 1;\n+\n+\tbinfo->acc_info = info;\n+\n+\treturn ret;\n+}\n+\n+static int parse_feature_afus(struct build_feature_devs_info *binfo,\n+\t\t\t      struct feature_header *hdr)\n+{\n+\tint ret;\n+\tstruct feature_afu_header *afu_hdr, header;\n+\tu8 __iomem *start;\n+\tu8 __iomem *end = binfo->ioend;\n+\n+\tstart = (u8 __iomem *)hdr;\n+\tfor (; start < end; start += header.next_afu) {\n+\t\tif (end - start <\n+\t\t\t(unsigned int)(sizeof(*afu_hdr) + sizeof(*hdr)))\n+\t\t\treturn -EINVAL;\n+\n+\t\thdr = (struct feature_header *)start;\n+\t\tafu_hdr = (struct feature_afu_header *)(hdr + 1);\n+\t\theader.csr = readq(&afu_hdr->csr);\n+\n+\t\tif (feature_is_UAFU(binfo)) {\n+\t\t\tret = parse_feature_port_uafu(binfo, hdr);\n+\t\t\tif (ret)\n+\t\t\t\treturn ret;\n+\t\t}\n+\n+\t\tif (!header.next_afu)\n+\t\t\tbreak;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/* create and register proper private data */\n+static int build_info_commit_dev(struct build_feature_devs_info *binfo)\n+{\n+\tstruct ifpga_afu_info *info = binfo->acc_info;\n+\tstruct ifpga_hw *hw = binfo->hw;\n+\tstruct opae_manager *mgr;\n+\tstruct opae_bridge *br;\n+\tstruct opae_accelerator *acc;\n+\n+\tif (!binfo->fiu)\n+\t\treturn 0;\n+\n+\tif (binfo->current_type == PORT_ID) {\n+\t\t/* return error if no valid acc info data structure */\n+\t\tif (!info)\n+\t\t\treturn -EFAULT;\n+\n+\t\tbr = opae_bridge_alloc(hw->adapter->name, &ifpga_br_ops,\n+\t\t\t\t       binfo->fiu);\n+\t\tif (!br)\n+\t\t\treturn -ENOMEM;\n+\n+\t\tbr->id = binfo->current_port_id;\n+\n+\t\t/* update irq info */\n+\t\tinfo->num_irqs = port_features[PORT_FEATURE_ID_UINT].vec_cnt;\n+\n+\t\tacc = opae_accelerator_alloc(hw->adapter->name,\n+\t\t\t\t\t     &ifpga_acc_ops, info);\n+\t\tif (!acc) {\n+\t\t\topae_bridge_free(br);\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\tacc->br = br;\n+\t\tacc->index = br->id;\n+\n+\t\topae_adapter_add_acc(hw->adapter, acc);\n+\n+\t} else if (binfo->current_type == FME_ID) {\n+\t\tmgr = opae_manager_alloc(hw->adapter->name, &ifpga_mgr_ops,\n+\t\t\t\t\t binfo->fiu);\n+\t\tif (!mgr)\n+\t\t\treturn -ENOMEM;\n+\n+\t\tmgr->adapter = hw->adapter;\n+\t\thw->adapter->mgr = mgr;\n+\t}\n+\n+\tbinfo->fiu = NULL;\n+\n+\treturn 0;\n+}\n+\n+static int\n+build_info_create_dev(struct build_feature_devs_info *binfo,\n+\t\t      enum fpga_id_type type, unsigned int index)\n+{\n+\tint ret;\n+\n+\tret = build_info_commit_dev(binfo);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tbinfo->current_type = type;\n+\n+\tif (type == FME_ID) {\n+\t\tbinfo->fiu = &binfo->hw->fme;\n+\t} else if (type == PORT_ID) {\n+\t\tbinfo->fiu = &binfo->hw->port[index];\n+\t\tbinfo->current_port_id = index;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int parse_feature_fme(struct build_feature_devs_info *binfo,\n+\t\t\t     struct feature_header *start)\n+{\n+\tstruct ifpga_hw *hw = binfo->hw;\n+\tstruct ifpga_fme_hw *fme = &hw->fme;\n+\tint ret;\n+\n+\tret = build_info_create_dev(binfo, FME_ID, 0);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t/* Update FME states */\n+\tfme->state = IFPGA_FME_IMPLEMENTED;\n+\tfme->parent = hw;\n+\tspinlock_init(&fme->lock);\n+\n+\treturn create_feature_instance(binfo, start,\n+\t\t\t\t       &fme_features[FME_FEATURE_ID_HEADER]);\n+}\n+\n+static int parse_feature_port(struct build_feature_devs_info *binfo,\n+\t\t\t      void __iomem *start)\n+{\n+\tstruct feature_port_header *port_hdr;\n+\tstruct feature_port_capability capability;\n+\tstruct ifpga_hw *hw = binfo->hw;\n+\tstruct ifpga_port_hw *port;\n+\tunsigned int port_id;\n+\tint ret;\n+\n+\t/* Get current port's id */\n+\tport_hdr = (struct feature_port_header *)start;\n+\tcapability.csr = readq(&port_hdr->capability);\n+\tport_id = capability.port_number;\n+\n+\tret = build_info_create_dev(binfo, PORT_ID, port_id);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t/*found a Port device*/\n+\tport = &hw->port[port_id];\n+\tport->port_id = binfo->current_port_id;\n+\tport->parent = hw;\n+\tport->state = IFPGA_PORT_ATTACHED;\n+\tspinlock_init(&port->lock);\n+\n+\treturn create_feature_instance(binfo, start,\n+\t\t\t\t      &port_features[PORT_FEATURE_ID_HEADER]);\n+}\n+\n+static void enable_port_uafu(struct build_feature_devs_info *binfo,\n+\t\t\t     void __iomem *start)\n+{\n+\tenum port_feature_id id = PORT_FEATURE_ID_UAFU;\n+\tstruct feature_port_header *port_hdr;\n+\tstruct feature_port_capability capability;\n+\tstruct ifpga_port_hw *port = &binfo->hw->port[binfo->current_port_id];\n+\n+\tport_hdr = (struct feature_port_header *)start;\n+\tcapability.csr = readq(&port_hdr->capability);\n+\tport_features[id].resource_size = (capability.mmio_size << 10);\n+\n+\t/*\n+\t * From spec, to Enable UAFU, we should reset related port,\n+\t * or the whole mmio space in this UAFU will be invalid\n+\t */\n+\tif (port_features[id].resource_size)\n+\t\tfpga_port_reset(port);\n+}\n+\n+static int parse_feature_fiu(struct build_feature_devs_info *binfo,\n+\t\t\t     struct feature_header *hdr)\n+{\n+\tstruct feature_header header;\n+\tstruct feature_fiu_header *fiu_hdr, fiu_header;\n+\tu8 __iomem *start = (u8 __iomem *)hdr;\n+\tint ret;\n+\n+\theader.csr = readq(hdr);\n+\n+\tswitch (header.id) {\n+\tcase FEATURE_FIU_ID_FME:\n+\t\tret = parse_feature_fme(binfo, hdr);\n+\t\tbinfo->pfme_hdr = hdr;\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t\tbreak;\n+\tcase FEATURE_FIU_ID_PORT:\n+\t\tret = parse_feature_port(binfo, hdr);\n+\t\tenable_port_uafu(binfo, hdr);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\n+\t\t/* Check Port FIU's next_afu pointer to User AFU DFH */\n+\t\tfiu_hdr = (struct feature_fiu_header *)(hdr + 1);\n+\t\tfiu_header.csr = readq(&fiu_hdr->csr);\n+\n+\t\tif (fiu_header.next_afu) {\n+\t\t\tstart += fiu_header.next_afu;\n+\t\t\tret = parse_feature_afus(binfo,\n+\t\t\t\t\t\t(struct feature_header *)start);\n+\t\t\tif (ret)\n+\t\t\t\treturn ret;\n+\t\t} else {\n+\t\t\tdev_info(binfo, \"No AFUs detected on Port\\n\");\n+\t\t}\n+\n+\t\tbreak;\n+\tdefault:\n+\t\tdev_info(binfo, \"FIU TYPE %d is not supported yet.\\n\",\n+\t\t\t header.id);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void parse_feature_irqs(struct build_feature_devs_info *binfo,\n+\t\t\t       void __iomem *start, struct feature_info *finfo)\n+{\n+\tfinfo->vec_start = 0;\n+\tfinfo->vec_cnt = 0;\n+\n+\tUNUSED(binfo);\n+\n+\tif (!strcmp(finfo->name, PORT_FEATURE_UINT)) {\n+\t\tstruct feature_port_uint *port_uint = start;\n+\t\tstruct feature_port_uint_cap uint_cap;\n+\n+\t\tuint_cap.csr = readq(&port_uint->capability);\n+\t\tif (uint_cap.intr_num) {\n+\t\t\tfinfo->vec_start = uint_cap.first_vec_num;\n+\t\t\tfinfo->vec_cnt = uint_cap.intr_num;\n+\t\t} else {\n+\t\t\tdev_debug(binfo, \"UAFU doesn't support interrupt\\n\");\n+\t\t}\n+\t} else if (!strcmp(finfo->name, PORT_FEATURE_ERR)) {\n+\t\tstruct feature_port_error *port_err = start;\n+\t\tstruct feature_port_err_capability port_err_cap;\n+\n+\t\tport_err_cap.csr = readq(&port_err->error_capability);\n+\t\tif (port_err_cap.support_intr) {\n+\t\t\tfinfo->vec_start = port_err_cap.intr_vector_num;\n+\t\t\tfinfo->vec_cnt = 1;\n+\t\t} else {\n+\t\t\tdev_debug(&binfo, \"Port error doesn't support interrupt\\n\");\n+\t\t}\n+\n+\t} else if (!strcmp(finfo->name, FME_FEATURE_GLOBAL_ERR)) {\n+\t\tstruct feature_fme_err *fme_err = start;\n+\t\tstruct feature_fme_error_capability fme_err_cap;\n+\n+\t\tfme_err_cap.csr = readq(&fme_err->fme_err_capability);\n+\t\tif (fme_err_cap.support_intr) {\n+\t\t\tfinfo->vec_start = fme_err_cap.intr_vector_num;\n+\t\t\tfinfo->vec_cnt = 1;\n+\t\t} else {\n+\t\t\tdev_debug(&binfo, \"FME error doesn't support interrupt\\n\");\n+\t\t}\n+\t}\n+}\n+\n+static int parse_feature_fme_private(struct build_feature_devs_info *binfo,\n+\t\t\t\t     struct feature_header *hdr)\n+{\n+\tstruct feature_header header;\n+\n+\theader.csr = readq(hdr);\n+\n+\tif (header.id >= ARRAY_SIZE(fme_features)) {\n+\t\tdev_err(binfo, \"FME feature id %x is not supported yet.\\n\",\n+\t\t\theader.id);\n+\t\treturn 0;\n+\t}\n+\n+\tparse_feature_irqs(binfo, hdr, &fme_features[header.id]);\n+\n+\treturn create_feature_instance(binfo, hdr, &fme_features[header.id]);\n+}\n+\n+static int parse_feature_port_private(struct build_feature_devs_info *binfo,\n+\t\t\t\t      struct feature_header *hdr)\n+{\n+\tstruct feature_header header;\n+\tenum port_feature_id id;\n+\n+\theader.csr = readq(hdr);\n+\t/*\n+\t * the region of port feature id is [0x10, 0x13], + 1 to reserve 0\n+\t * which is dedicated for port-hdr.\n+\t */\n+\tid = (header.id & 0x000f) + 1;\n+\n+\tif (id >= ARRAY_SIZE(port_features)) {\n+\t\tdev_err(binfo, \"Port feature id %x is not supported yet.\\n\",\n+\t\t\theader.id);\n+\t\treturn 0;\n+\t}\n+\n+\tparse_feature_irqs(binfo, hdr, &port_features[id]);\n+\n+\treturn create_feature_instance(binfo, hdr, &port_features[id]);\n+}\n+\n+static int parse_feature_private(struct build_feature_devs_info *binfo,\n+\t\t\t\t struct feature_header *hdr)\n+{\n+\tstruct feature_header header;\n+\n+\theader.csr = readq(hdr);\n+\n+\tswitch (binfo->current_type) {\n+\tcase FME_ID:\n+\t\treturn parse_feature_fme_private(binfo, hdr);\n+\tcase PORT_ID:\n+\t\treturn parse_feature_port_private(binfo, hdr);\n+\tdefault:\n+\t\tdev_err(binfo, \"private feature %x belonging to AFU %d (unknown_type) is not supported yet.\\n\",\n+\t\t\theader.id, binfo->current_type);\n+\t}\n+\treturn 0;\n+}\n+\n+static int parse_feature(struct build_feature_devs_info *binfo,\n+\t\t\t struct feature_header *hdr)\n+{\n+\tstruct feature_header header;\n+\tint ret = 0;\n+\n+\theader.csr = readq(hdr);\n+\n+\tswitch (header.type) {\n+\tcase FEATURE_TYPE_AFU:\n+\t\tret = parse_feature_afus(binfo, hdr);\n+\t\tbreak;\n+\tcase FEATURE_TYPE_PRIVATE:\n+\t\tret = parse_feature_private(binfo, hdr);\n+\t\tbreak;\n+\tcase FEATURE_TYPE_FIU:\n+\t\tret = parse_feature_fiu(binfo, hdr);\n+\t\tbreak;\n+\tdefault:\n+\t\tdev_err(binfo, \"Feature Type %x is not supported.\\n\",\n+\t\t\thdr->type);\n+\t};\n+\n+\treturn ret;\n+}\n+\n+static int\n+parse_feature_list(struct build_feature_devs_info *binfo, u8 __iomem *start)\n+{\n+\tstruct feature_header *hdr, header;\n+\tu8 __iomem *end = (u8 __iomem *)binfo->ioend;\n+\tint ret = 0;\n+\n+\tfor (; start < end; start += header.next_header_offset) {\n+\t\tif (end - start < (unsigned int)sizeof(*hdr)) {\n+\t\t\tdev_err(binfo, \"The region is too small to contain a feature.\\n\");\n+\t\t\tret =  -EINVAL;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\thdr = (struct feature_header *)start;\n+\t\tret = parse_feature(binfo, hdr);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\n+\t\theader.csr = readq(hdr);\n+\t\tif (!header.next_header_offset)\n+\t\t\tbreak;\n+\t}\n+\n+\treturn build_info_commit_dev(binfo);\n+}\n+\n+/* switch the memory mapping to BAR# @bar */\n+static int parse_switch_to(struct build_feature_devs_info *binfo, int bar)\n+{\n+\tstruct opae_adapter_data_pci *pci_data = binfo->pci_data;\n+\n+\tif (!pci_data->region[bar].addr)\n+\t\treturn -ENOMEM;\n+\n+\tbinfo->ioaddr = pci_data->region[bar].addr;\n+\tbinfo->ioend = (u8 __iomem *)binfo->ioaddr + pci_data->region[bar].len;\n+\tbinfo->phys_addr = pci_data->region[bar].phys_addr;\n+\tbinfo->current_bar = bar;\n+\n+\treturn 0;\n+}\n+\n+static int parse_ports_from_fme(struct build_feature_devs_info *binfo)\n+{\n+\tstruct feature_fme_header *fme_hdr;\n+\tstruct feature_fme_port port;\n+\tint i = 0, ret = 0;\n+\n+\tif (!binfo->pfme_hdr) {\n+\t\tdev_info(binfo,  \"VF is detected.\\n\");\n+\t\treturn ret;\n+\t}\n+\n+\tfme_hdr = binfo->pfme_hdr;\n+\n+\tdo {\n+\t\tport.csr = readq(&fme_hdr->port[i]);\n+\t\tif (!port.port_implemented)\n+\t\t\tbreak;\n+\n+\t\t/* skip port which only could be accessed via VF */\n+\t\tif (port.afu_access_control == FME_AFU_ACCESS_VF)\n+\t\t\tcontinue;\n+\n+\t\tret = parse_switch_to(binfo, port.port_bar);\n+\t\tif (ret)\n+\t\t\tbreak;\n+\n+\t\tret = parse_feature_list(binfo,\n+\t\t\t\t\t (u8 __iomem *)binfo->ioaddr +\n+\t\t\t\t\t  port.port_offset);\n+\t\tif (ret)\n+\t\t\tbreak;\n+\t} while (++i < MAX_FPGA_PORT_NUM);\n+\n+\treturn ret;\n+}\n+\n+static struct build_feature_devs_info *\n+build_info_alloc_and_init(struct ifpga_hw *hw)\n+{\n+\tstruct build_feature_devs_info *binfo;\n+\n+\tbinfo = zmalloc(sizeof(*binfo));\n+\tif (!binfo)\n+\t\treturn binfo;\n+\n+\tbinfo->hw = hw;\n+\tbinfo->pci_data = hw->pci_data;\n+\n+\t/* fpga feature list starts from BAR 0 */\n+\tif (parse_switch_to(binfo, 0)) {\n+\t\tfree(binfo);\n+\t\treturn NULL;\n+\t}\n+\n+\treturn binfo;\n+}\n+\n+static void build_info_free(struct build_feature_devs_info *binfo)\n+{\n+\tfree(binfo);\n+}\n+\n+static void ifpga_print_device_feature_list(struct ifpga_hw *hw)\n+{\n+\tstruct ifpga_fme_hw *fme = &hw->fme;\n+\tstruct ifpga_port_hw *port;\n+\tstruct feature *feature;\n+\tint i, j;\n+\n+\tdev_info(hw, \"found fme_device, is in PF: %s\\n\",\n+\t\t is_ifpga_hw_pf(hw) ? \"yes\" : \"no\");\n+\n+\tfor (i = 0; i < FME_FEATURE_ID_MAX; i++) {\n+\t\tfeature = &fme->sub_feature[i];\n+\t\tif (feature->state != IFPGA_FEATURE_ATTACHED)\n+\t\t\tcontinue;\n+\n+\t\tdev_info(hw, \"%12s:\t0x%llx - 0x%llx  - paddr: 0x%lx\\n\",\n+\t\t\t feature->name, (unsigned long long)feature->addr,\n+\t\t\t (unsigned long long)feature->addr + feature->size - 1,\n+\t\t\t feature->phys_addr);\n+\t}\n+\n+\tfor (i = 0; i < MAX_FPGA_PORT_NUM; i++) {\n+\t\tport = &hw->port[i];\n+\n+\t\tif (port->state != IFPGA_PORT_ATTACHED)\n+\t\t\tcontinue;\n+\n+\t\tdev_info(hw, \"port device: %d\\n\", port->port_id);\n+\n+\t\tfor (j = 0; j < PORT_FEATURE_ID_MAX; j++) {\n+\t\t\tfeature = &port->sub_feature[j];\n+\t\t\tif (feature->state != IFPGA_FEATURE_ATTACHED)\n+\t\t\t\tcontinue;\n+\n+\t\t\tdev_info(hw, \"%12s:\t0x%llx - 0x%llx  - paddr:0x%lx\\n\",\n+\t\t\t\t feature->name,\n+\t\t\t\t (unsigned long long)feature->addr,\n+\t\t\t\t (unsigned long long)feature->addr +\n+\t\t\t\t feature->size - 1,\n+\t\t\t\t feature->phys_addr);\n+\t\t}\n+\t}\n+}\n+\n+int ifpga_bus_enumerate(struct ifpga_hw *hw)\n+{\n+\tstruct build_feature_devs_info *binfo;\n+\tint ret;\n+\n+\tbinfo = build_info_alloc_and_init(hw);\n+\tif (!binfo)\n+\t\treturn -ENOMEM;\n+\n+\tret = parse_feature_list(binfo, binfo->ioaddr);\n+\tif (ret)\n+\t\tgoto exit;\n+\n+\tret = parse_ports_from_fme(binfo);\n+\tif (ret)\n+\t\tgoto exit;\n+\n+\tifpga_print_device_feature_list(hw);\n+\n+exit:\n+\tbuild_info_free(binfo);\n+\treturn ret;\n+}\n+\n+int ifpga_bus_init(struct ifpga_hw *hw)\n+{\n+\tint i;\n+\n+\tfme_hw_init(&hw->fme);\n+\tfor (i = 0; i < MAX_FPGA_PORT_NUM; i++)\n+\t\tport_hw_init(&hw->port[i]);\n+\n+\treturn 0;\n+}\ndiff --git a/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h b/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h\nnew file mode 100644\nindex 0000000..14131e3\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h\n@@ -0,0 +1,11 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2018 Intel Corporation\n+ */\n+\n+#ifndef _IFPGA_ENUMERATE_H_\n+#define _IFPGA_ENUMERATE_H_\n+\n+int ifpga_bus_init(struct ifpga_hw *hw);\n+int ifpga_bus_enumerate(struct ifpga_hw *hw);\n+\n+#endif /* _IFPGA_ENUMERATE_H_ */\ndiff --git a/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c b/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c\nnew file mode 100644\nindex 0000000..6d14523\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c\n@@ -0,0 +1,314 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2018 Intel Corporation\n+ */\n+\n+#include <linux/vfio.h>\n+#include <sys/ioctl.h>\n+\n+#include \"ifpga_feature_dev.h\"\n+\n+/*\n+ * Enable Port by clear the port soft reset bit, which is set by default.\n+ * The AFU is unable to respond to any MMIO access while in reset.\n+ * __fpga_port_enable function should only be used after __fpga_port_disable\n+ * function.\n+ */\n+void __fpga_port_enable(struct ifpga_port_hw *port)\n+{\n+\tstruct feature_port_header *port_hdr;\n+\tstruct feature_port_control control;\n+\n+\tWARN_ON(!port->disable_count);\n+\n+\tif (--port->disable_count != 0)\n+\t\treturn;\n+\n+\tport_hdr = get_port_feature_ioaddr_by_index(port,\n+\t\t\t\t\t\t    PORT_FEATURE_ID_HEADER);\n+\tWARN_ON(!port_hdr);\n+\n+\tcontrol.csr = readq(&port_hdr->control);\n+\tcontrol.port_sftrst = 0x0;\n+\twriteq(control.csr, &port_hdr->control);\n+}\n+\n+int __fpga_port_disable(struct ifpga_port_hw *port)\n+{\n+\tstruct feature_port_header *port_hdr;\n+\tstruct feature_port_control control;\n+\n+\tif (port->disable_count++ != 0)\n+\t\treturn 0;\n+\n+\tport_hdr = get_port_feature_ioaddr_by_index(port,\n+\t\t\t\t\t\t    PORT_FEATURE_ID_HEADER);\n+\tWARN_ON(!port_hdr);\n+\n+\t/* Set port soft reset */\n+\tcontrol.csr = readq(&port_hdr->control);\n+\tcontrol.port_sftrst = 0x1;\n+\twriteq(control.csr, &port_hdr->control);\n+\n+\t/*\n+\t * HW sets ack bit to 1 when all outstanding requests have been drained\n+\t * on this port and minimum soft reset pulse width has elapsed.\n+\t * Driver polls port_soft_reset_ack to determine if reset done by HW.\n+\t */\n+\tcontrol.port_sftrst_ack = 1;\n+\n+\tif (fpga_wait_register_field(port_sftrst_ack, control,\n+\t\t\t\t     &port_hdr->control, RST_POLL_TIMEOUT,\n+\t\t\t\t     RST_POLL_INVL)) {\n+\t\tdev_err(port, \"timeout, fail to reset device\\n\");\n+\t\treturn -ETIMEDOUT;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int fpga_get_afu_uuid(struct ifpga_port_hw *port, struct uuid *uuid)\n+{\n+\tstruct feature_port_header *port_hdr;\n+\tu64 guidl, guidh;\n+\n+\tport_hdr = get_port_feature_ioaddr_by_index(port, PORT_FEATURE_ID_UAFU);\n+\n+\tspinlock_lock(&port->lock);\n+\tguidl = readq(&port_hdr->afu_header.guid.b[0]);\n+\tguidh = readq(&port_hdr->afu_header.guid.b[8]);\n+\tspinlock_unlock(&port->lock);\n+\n+\tprintf(\"%s: guidl=0x%lx, guidh=0x%lx\\n\", __func__, guidl, guidh);\n+\n+\tmemcpy(uuid->b, &guidl, sizeof(u64));\n+\tmemcpy(uuid->b + 8, &guidh, sizeof(u64));\n+\n+\treturn 0;\n+}\n+\n+/* Mask / Unmask Port Errors by the Error Mask register. */\n+void port_err_mask(struct ifpga_port_hw *port, bool mask)\n+{\n+\tstruct feature_port_error *port_err;\n+\tstruct feature_port_err_key err_mask;\n+\n+\tport_err = get_port_feature_ioaddr_by_index(port,\n+\t\t\t\t\t\t    PORT_FEATURE_ID_ERROR);\n+\n+\tif (mask)\n+\t\terr_mask.csr = PORT_ERR_MASK;\n+\telse\n+\t\terr_mask.csr = 0;\n+\n+\twriteq(err_mask.csr, &port_err->error_mask);\n+}\n+\n+/* Clear All Port Errors. */\n+int port_err_clear(struct ifpga_port_hw *port, u64 err)\n+{\n+\tstruct feature_port_header *port_hdr;\n+\tstruct feature_port_error *port_err;\n+\tstruct feature_port_err_key mask;\n+\tstruct feature_port_first_err_key first;\n+\tstruct feature_port_status status;\n+\tint ret = 0;\n+\n+\tport_err = get_port_feature_ioaddr_by_index(port,\n+\t\t\t\t\t\t    PORT_FEATURE_ID_ERROR);\n+\tport_hdr = get_port_feature_ioaddr_by_index(port,\n+\t\t\t\t\t\t    PORT_FEATURE_ID_HEADER);\n+\n+\t/*\n+\t * Clear All Port Errors\n+\t *\n+\t * - Check for AP6 State\n+\t * - Halt Port by keeping Port in reset\n+\t * - Set PORT Error mask to all 1 to mask errors\n+\t * - Clear all errors\n+\t * - Set Port mask to all 0 to enable errors\n+\t * - All errors start capturing new errors\n+\t * - Enable Port by pulling the port out of reset\n+\t */\n+\n+\t/* If device is still in AP6 state, can not clear any error.*/\n+\tstatus.csr = readq(&port_hdr->status);\n+\tif (status.power_state == PORT_POWER_STATE_AP6) {\n+\t\tdev_err(dev, \"Could not clear errors, device in AP6 state.\\n\");\n+\t\treturn -EBUSY;\n+\t}\n+\n+\t/* Halt Port by keeping Port in reset */\n+\tret = __fpga_port_disable(port);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t/* Mask all errors */\n+\tport_err_mask(port, true);\n+\n+\t/* Clear errors if err input matches with current port errors.*/\n+\tmask.csr = readq(&port_err->port_error);\n+\n+\tif (mask.csr == err) {\n+\t\twriteq(mask.csr, &port_err->port_error);\n+\n+\t\tfirst.csr = readq(&port_err->port_first_error);\n+\t\twriteq(first.csr, &port_err->port_first_error);\n+\t} else {\n+\t\tret = -EBUSY;\n+\t}\n+\n+\t/* Clear mask */\n+\tport_err_mask(port, false);\n+\n+\t/* Enable the Port by clear the reset */\n+\t__fpga_port_enable(port);\n+\n+\treturn ret;\n+}\n+\n+int port_clear_error(struct ifpga_port_hw *port)\n+{\n+\tstruct feature_port_error *port_err;\n+\tstruct feature_port_err_key error;\n+\n+\tport_err = get_port_feature_ioaddr_by_index(port,\n+\t\t\t\t\t\t    PORT_FEATURE_ID_ERROR);\n+\terror.csr = readq(&port_err->port_error);\n+\n+\tdev_info(port, \"read port error: 0x%lx\\n\", error.csr);\n+\n+\treturn port_err_clear(port, error.csr);\n+}\n+\n+void fme_hw_uinit(struct ifpga_fme_hw *fme)\n+{\n+\tstruct feature *feature;\n+\tint i;\n+\n+\tif (fme->state != IFPGA_FME_IMPLEMENTED)\n+\t\treturn;\n+\n+\tfor (i = 0; i < FME_FEATURE_ID_MAX; i++) {\n+\t\tfeature = &fme->sub_feature[i];\n+\t\tif (feature->state == IFPGA_FEATURE_ATTACHED &&\n+\t\t    feature->ops && feature->ops->uinit)\n+\t\t\tfeature->ops->uinit(feature);\n+\t}\n+}\n+\n+int fme_hw_init(struct ifpga_fme_hw *fme)\n+{\n+\tstruct feature *feature;\n+\tint i, ret;\n+\n+\tif (fme->state != IFPGA_FME_IMPLEMENTED)\n+\t\treturn -EINVAL;\n+\n+\tfor (i = 0; i < FME_FEATURE_ID_MAX; i++) {\n+\t\tfeature = &fme->sub_feature[i];\n+\t\tif (feature->state == IFPGA_FEATURE_ATTACHED &&\n+\t\t    feature->ops && feature->ops->init) {\n+\t\t\tret = feature->ops->init(feature);\n+\t\t\tif (ret) {\n+\t\t\t\tfme_hw_uinit(fme);\n+\t\t\t\treturn ret;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+void port_hw_uinit(struct ifpga_port_hw *port)\n+{\n+\tstruct feature *feature;\n+\tint i;\n+\n+\tfor (i = 0; i < PORT_FEATURE_ID_MAX; i++) {\n+\t\tfeature = &port->sub_feature[i];\n+\t\tif (feature->state == IFPGA_FEATURE_ATTACHED &&\n+\t\t    feature->ops && feature->ops->uinit)\n+\t\t\tfeature->ops->uinit(feature);\n+\t}\n+}\n+\n+int port_hw_init(struct ifpga_port_hw *port)\n+{\n+\tstruct feature *feature;\n+\tint i, ret;\n+\n+\tif (port->state == IFPGA_PORT_UNUSED)\n+\t\treturn 0;\n+\n+\tfor (i = 0; i < PORT_FEATURE_ID_MAX; i++) {\n+\t\tfeature = &port->sub_feature[i];\n+\t\tif (feature->ops && feature->ops->init) {\n+\t\t\tret = feature->ops->init(feature);\n+\t\t\tif (ret) {\n+\t\t\t\tport_hw_uinit(port);\n+\t\t\t\treturn ret;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * FIXME: we should get msix vec count during pci enumeration instead of\n+ * below hardcode value.\n+ */\n+#define FPGA_MSIX_VEC_COUNT\t20\n+/* irq set buffer length for interrupt */\n+#define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \\\n+\t\t\t\tsizeof(int) * FPGA_MSIX_VEC_COUNT)\n+\n+/* only support msix for now*/\n+static int vfio_msix_enable_block(s32 vfio_dev_fd, unsigned int vec_start,\n+\t\t\t\t  unsigned int count, s32 *fds)\n+{\n+\tchar irq_set_buf[MSIX_IRQ_SET_BUF_LEN];\n+\tstruct vfio_irq_set *irq_set;\n+\tint len, ret;\n+\tint *fd_ptr;\n+\n+\tlen = sizeof(irq_set_buf);\n+\n+\tirq_set = (struct vfio_irq_set *)irq_set_buf;\n+\tirq_set->argsz = len;\n+\tirq_set->count = count;\n+\tirq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |\n+\t\t\t\tVFIO_IRQ_SET_ACTION_TRIGGER;\n+\tirq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;\n+\tirq_set->start = vec_start;\n+\n+\tfd_ptr = (int *)&irq_set->data;\n+\tmemcpy(fd_ptr, fds, sizeof(int) * count);\n+\n+\tret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);\n+\tif (ret)\n+\t\tprintf(\"Error enabling MSI-X interrupts\\n\");\n+\n+\treturn ret;\n+}\n+\n+int fpga_msix_set_block(struct feature *feature, unsigned int start,\n+\t\t\tunsigned int count, s32 *fds)\n+{\n+\tstruct feature_irq_ctx *ctx = feature->ctx;\n+\tunsigned int i;\n+\tint ret;\n+\n+\tif (start >= feature->ctx_num || start + count > feature->ctx_num)\n+\t\treturn -EINVAL;\n+\n+\t/* assume that each feature has continuous vector space in msix*/\n+\tret = vfio_msix_enable_block(feature->vfio_dev_fd,\n+\t\t\t\t     ctx[start].idx, count, fds);\n+\tif (!ret) {\n+\t\tfor (i = 0; i < count; i++)\n+\t\t\tctx[i].eventfd = fds[i];\n+\t}\n+\n+\treturn ret;\n+}\ndiff --git a/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h b/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h\nnew file mode 100644\nindex 0000000..cd114fb\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h\n@@ -0,0 +1,164 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2018 Intel Corporation\n+ */\n+\n+#ifndef _IFPGA_FEATURE_DEV_H_\n+#define _IFPGA_FEATURE_DEV_H_\n+\n+#include \"ifpga_hw.h\"\n+\n+static inline struct ifpga_port_hw *\n+get_port(struct ifpga_hw *hw, u32 port_id)\n+{\n+\tif (!is_valid_port_id(hw, port_id))\n+\t\treturn NULL;\n+\n+\treturn &hw->port[port_id];\n+}\n+\n+#define ifpga_for_each_feature(hw, feature)\t\t\\\n+\tfor ((feature) = (hw)->sub_feature;\t\t\t\\\n+\t   (feature) < (hw)->sub_feature + (FME_FEATURE_ID_MAX); (feature)++)\n+\n+static inline struct feature *\n+get_fme_feature_by_id(struct ifpga_fme_hw *fme, u64 id)\n+{\n+\tstruct feature *feature;\n+\n+\tifpga_for_each_feature(fme, feature) {\n+\t\tif (feature->id == id)\n+\t\t\treturn feature;\n+\t}\n+\n+\treturn NULL;\n+}\n+\n+static inline struct feature *\n+get_port_feature_by_id(struct ifpga_port_hw *port, u64 id)\n+{\n+\tstruct feature *feature;\n+\n+\tifpga_for_each_feature(port, feature) {\n+\t\tif (feature->id == id)\n+\t\t\treturn feature;\n+\t}\n+\n+\treturn NULL;\n+}\n+\n+static inline void  *\n+get_fme_feature_ioaddr_by_index(struct ifpga_fme_hw *fme, int index)\n+{\n+\treturn fme->sub_feature[index].addr;\n+}\n+\n+static inline void  *\n+get_port_feature_ioaddr_by_index(struct ifpga_port_hw *port, int index)\n+{\n+\treturn port->sub_feature[index].addr;\n+}\n+\n+static inline bool\n+is_fme_feature_present(struct ifpga_fme_hw *fme, int index)\n+{\n+\treturn !!get_fme_feature_ioaddr_by_index(fme, index);\n+}\n+\n+static inline bool\n+is_port_feature_present(struct ifpga_port_hw *port, int index)\n+{\n+\treturn !!get_port_feature_ioaddr_by_index(port, index);\n+}\n+\n+int fpga_get_afu_uuid(struct ifpga_port_hw *port, struct uuid *uuid);\n+\n+int __fpga_port_disable(struct ifpga_port_hw *port);\n+void __fpga_port_enable(struct ifpga_port_hw *port);\n+\n+static inline int fpga_port_disable(struct ifpga_port_hw *port)\n+{\n+\tint ret;\n+\n+\tspinlock_lock(&port->lock);\n+\tret = __fpga_port_disable(port);\n+\tspinlock_unlock(&port->lock);\n+\treturn ret;\n+}\n+\n+static inline int fpga_port_enable(struct ifpga_port_hw *port)\n+{\n+\tspinlock_lock(&port->lock);\n+\t__fpga_port_enable(port);\n+\tspinlock_unlock(&port->lock);\n+\n+\treturn 0;\n+}\n+\n+static inline int __fpga_port_reset(struct ifpga_port_hw *port)\n+{\n+\tint ret;\n+\n+\tret = __fpga_port_disable(port);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\t__fpga_port_enable(port);\n+\n+\treturn 0;\n+}\n+\n+static inline int fpga_port_reset(struct ifpga_port_hw *port)\n+{\n+\tint ret;\n+\n+\tspinlock_lock(&port->lock);\n+\tret = __fpga_port_reset(port);\n+\tspinlock_unlock(&port->lock);\n+\treturn ret;\n+}\n+\n+int do_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,\n+\t  u64 *status);\n+\n+int fme_get_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop);\n+int fme_set_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop);\n+int fme_set_irq(struct ifpga_fme_hw *fme, u32 feature_id, void *irq_set);\n+\n+int fme_hw_init(struct ifpga_fme_hw *fme);\n+void fme_hw_uinit(struct ifpga_fme_hw *fme);\n+void port_hw_uinit(struct ifpga_port_hw *port);\n+int port_hw_init(struct ifpga_port_hw *port);\n+int port_clear_error(struct ifpga_port_hw *port);\n+void port_err_mask(struct ifpga_port_hw *port, bool mask);\n+int port_err_clear(struct ifpga_port_hw *port, u64 err);\n+\n+extern struct feature_ops fme_hdr_ops;\n+extern struct feature_ops fme_thermal_mgmt_ops;\n+extern struct feature_ops fme_power_mgmt_ops;\n+extern struct feature_ops fme_global_err_ops;\n+extern struct feature_ops fme_pr_mgmt_ops;\n+extern struct feature_ops fme_global_iperf_ops;\n+extern struct feature_ops fme_global_dperf_ops;\n+\n+int port_get_prop(struct ifpga_port_hw *port, struct feature_prop *prop);\n+int port_set_prop(struct ifpga_port_hw *port, struct feature_prop *prop);\n+\n+/* This struct is used when parsing uafu irq_set */\n+struct fpga_uafu_irq_set {\n+\tu32 start;\n+\tu32 count;\n+\ts32 *evtfds;\n+};\n+\n+int port_set_irq(struct ifpga_port_hw *port, u32 feature_id, void *irq_set);\n+\n+extern struct feature_ops port_hdr_ops;\n+extern struct feature_ops port_error_ops;\n+extern struct feature_ops port_stp_ops;\n+extern struct feature_ops port_uint_ops;\n+\n+/* help functions for feature ops */\n+int fpga_msix_set_block(struct feature *feature, unsigned int start,\n+\t\t\tunsigned int count, s32 *fds);\n+\n+#endif /* _IFPGA_FEATURE_DEV_H_ */\ndiff --git a/drivers/raw/ifpga_rawdev/base/ifpga_fme.c b/drivers/raw/ifpga_rawdev/base/ifpga_fme.c\nnew file mode 100644\nindex 0000000..4be60c0\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/ifpga_fme.c\n@@ -0,0 +1,734 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2018 Intel Corporation\n+ */\n+\n+#include \"ifpga_feature_dev.h\"\n+\n+#define PWR_THRESHOLD_MAX       0x7F\n+\n+int fme_get_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)\n+{\n+\tstruct feature *feature;\n+\n+\tif (!fme)\n+\t\treturn -ENOENT;\n+\n+\tfeature = get_fme_feature_by_id(fme, prop->feature_id);\n+\n+\tif (feature && feature->ops && feature->ops->get_prop)\n+\t\treturn feature->ops->get_prop(feature, prop);\n+\n+\treturn -ENOENT;\n+}\n+\n+int fme_set_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)\n+{\n+\tstruct feature *feature;\n+\n+\tif (!fme)\n+\t\treturn -ENOENT;\n+\n+\tfeature = get_fme_feature_by_id(fme, prop->feature_id);\n+\n+\tif (feature && feature->ops && feature->ops->set_prop)\n+\t\treturn feature->ops->set_prop(feature, prop);\n+\n+\treturn -ENOENT;\n+}\n+\n+int fme_set_irq(struct ifpga_fme_hw *fme, u32 feature_id, void *irq_set)\n+{\n+\tstruct feature *feature;\n+\n+\tif (!fme)\n+\t\treturn -ENOENT;\n+\n+\tfeature = get_fme_feature_by_id(fme, feature_id);\n+\n+\tif (feature && feature->ops && feature->ops->set_irq)\n+\t\treturn feature->ops->set_irq(feature, irq_set);\n+\n+\treturn -ENOENT;\n+}\n+\n+/* fme private feature head */\n+static int fme_hdr_init(struct feature *feature)\n+{\n+\tstruct feature_fme_header *fme_hdr;\n+\n+\tfme_hdr = (struct feature_fme_header *)feature->addr;\n+\n+\tdev_info(NULL, \"FME HDR Init.\\n\");\n+\tdev_info(NULL, \"FME cap %llx.\\n\",\n+\t\t (unsigned long long)fme_hdr->capability.csr);\n+\n+\treturn 0;\n+}\n+\n+static void fme_hdr_uinit(struct feature *feature)\n+{\n+\tUNUSED(feature);\n+\n+\tdev_info(NULL, \"FME HDR UInit.\\n\");\n+}\n+\n+static int fme_hdr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)\n+{\n+\tstruct feature_fme_header *fme_hdr\n+\t\t= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);\n+\tstruct feature_header header;\n+\n+\theader.csr = readq(&fme_hdr->header);\n+\t*revision = header.revision;\n+\n+\treturn 0;\n+}\n+\n+static int fme_hdr_get_ports_num(struct ifpga_fme_hw *fme, u64 *ports_num)\n+{\n+\tstruct feature_fme_header *fme_hdr\n+\t\t= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);\n+\tstruct feature_fme_capability fme_capability;\n+\n+\tfme_capability.csr = readq(&fme_hdr->capability);\n+\t*ports_num = fme_capability.num_ports;\n+\n+\treturn 0;\n+}\n+\n+static int fme_hdr_get_cache_size(struct ifpga_fme_hw *fme, u64 *cache_size)\n+{\n+\tstruct feature_fme_header *fme_hdr\n+\t\t= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);\n+\tstruct feature_fme_capability fme_capability;\n+\n+\tfme_capability.csr = readq(&fme_hdr->capability);\n+\t*cache_size = fme_capability.cache_size;\n+\n+\treturn 0;\n+}\n+\n+static int fme_hdr_get_version(struct ifpga_fme_hw *fme, u64 *version)\n+{\n+\tstruct feature_fme_header *fme_hdr\n+\t\t= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);\n+\tstruct feature_fme_capability fme_capability;\n+\n+\tfme_capability.csr = readq(&fme_hdr->capability);\n+\t*version = fme_capability.fabric_verid;\n+\n+\treturn 0;\n+}\n+\n+static int fme_hdr_get_socket_id(struct ifpga_fme_hw *fme, u64 *socket_id)\n+{\n+\tstruct feature_fme_header *fme_hdr\n+\t\t= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);\n+\tstruct feature_fme_capability fme_capability;\n+\n+\tfme_capability.csr = readq(&fme_hdr->capability);\n+\t*socket_id = fme_capability.socket_id;\n+\n+\treturn 0;\n+}\n+\n+static int fme_hdr_get_bitstream_id(struct ifpga_fme_hw *fme,\n+\t\t\t\t    u64 *bitstream_id)\n+{\n+\tstruct feature_fme_header *fme_hdr\n+\t\t= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);\n+\n+\t*bitstream_id = readq(&fme_hdr->bitstream_id);\n+\n+\treturn 0;\n+}\n+\n+static int fme_hdr_get_bitstream_metadata(struct ifpga_fme_hw *fme,\n+\t\t\t\t\t  u64 *bitstream_metadata)\n+{\n+\tstruct feature_fme_header *fme_hdr\n+\t\t= get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);\n+\n+\t*bitstream_metadata = readq(&fme_hdr->bitstream_md);\n+\n+\treturn 0;\n+}\n+\n+static int\n+fme_hdr_get_prop(struct feature *feature, struct feature_prop *prop)\n+{\n+\tstruct ifpga_fme_hw *fme = feature->parent;\n+\n+\tswitch (prop->prop_id) {\n+\tcase FME_HDR_PROP_REVISION:\n+\t\treturn fme_hdr_get_revision(fme, &prop->data);\n+\tcase FME_HDR_PROP_PORTS_NUM:\n+\t\treturn fme_hdr_get_ports_num(fme, &prop->data);\n+\tcase FME_HDR_PROP_CACHE_SIZE:\n+\t\treturn fme_hdr_get_cache_size(fme, &prop->data);\n+\tcase FME_HDR_PROP_VERSION:\n+\t\treturn fme_hdr_get_version(fme, &prop->data);\n+\tcase FME_HDR_PROP_SOCKET_ID:\n+\t\treturn fme_hdr_get_socket_id(fme, &prop->data);\n+\tcase FME_HDR_PROP_BITSTREAM_ID:\n+\t\treturn fme_hdr_get_bitstream_id(fme, &prop->data);\n+\tcase FME_HDR_PROP_BITSTREAM_METADATA:\n+\t\treturn fme_hdr_get_bitstream_metadata(fme, &prop->data);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+struct feature_ops fme_hdr_ops = {\n+\t.init = fme_hdr_init,\n+\t.uinit = fme_hdr_uinit,\n+\t.get_prop = fme_hdr_get_prop,\n+};\n+\n+/* thermal management */\n+static int fme_thermal_get_threshold1(struct ifpga_fme_hw *fme, u64 *thres1)\n+{\n+\tstruct feature_fme_thermal *thermal;\n+\tstruct feature_fme_tmp_threshold temp_threshold;\n+\n+\tthermal = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\t  FME_FEATURE_ID_THERMAL_MGMT);\n+\n+\ttemp_threshold.csr = readq(&thermal->threshold);\n+\t*thres1 = temp_threshold.tmp_thshold1;\n+\n+\treturn 0;\n+}\n+\n+static int fme_thermal_set_threshold1(struct ifpga_fme_hw *fme, u64 thres1)\n+{\n+\tstruct feature_fme_thermal *thermal;\n+\tstruct feature_fme_header *fme_hdr;\n+\tstruct feature_fme_tmp_threshold tmp_threshold;\n+\tstruct feature_fme_capability fme_capability;\n+\n+\tthermal = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\t  FME_FEATURE_ID_THERMAL_MGMT);\n+\tfme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);\n+\n+\tspinlock_lock(&fme->lock);\n+\ttmp_threshold.csr = readq(&thermal->threshold);\n+\tfme_capability.csr = readq(&fme_hdr->capability);\n+\n+\tif (fme_capability.lock_bit == 1) {\n+\t\tspinlock_unlock(&fme->lock);\n+\t\treturn -EBUSY;\n+\t} else if (thres1 > 100) {\n+\t\tspinlock_unlock(&fme->lock);\n+\t\treturn -EINVAL;\n+\t} else if (thres1 == 0) {\n+\t\ttmp_threshold.tmp_thshold1_enable = 0;\n+\t\ttmp_threshold.tmp_thshold1 = thres1;\n+\t} else {\n+\t\ttmp_threshold.tmp_thshold1_enable = 1;\n+\t\ttmp_threshold.tmp_thshold1 = thres1;\n+\t}\n+\n+\twriteq(tmp_threshold.csr, &thermal->threshold);\n+\tspinlock_unlock(&fme->lock);\n+\n+\treturn 0;\n+}\n+\n+static int fme_thermal_get_threshold2(struct ifpga_fme_hw *fme, u64 *thres2)\n+{\n+\tstruct feature_fme_thermal *thermal;\n+\tstruct feature_fme_tmp_threshold temp_threshold;\n+\n+\tthermal = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\t  FME_FEATURE_ID_THERMAL_MGMT);\n+\n+\ttemp_threshold.csr = readq(&thermal->threshold);\n+\t*thres2 = temp_threshold.tmp_thshold2;\n+\n+\treturn 0;\n+}\n+\n+static int fme_thermal_set_threshold2(struct ifpga_fme_hw *fme, u64 thres2)\n+{\n+\tstruct feature_fme_thermal *thermal;\n+\tstruct feature_fme_header *fme_hdr;\n+\tstruct feature_fme_tmp_threshold tmp_threshold;\n+\tstruct feature_fme_capability fme_capability;\n+\n+\tthermal = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\t  FME_FEATURE_ID_THERMAL_MGMT);\n+\tfme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);\n+\n+\tspinlock_lock(&fme->lock);\n+\ttmp_threshold.csr = readq(&thermal->threshold);\n+\tfme_capability.csr = readq(&fme_hdr->capability);\n+\n+\tif (fme_capability.lock_bit == 1) {\n+\t\tspinlock_unlock(&fme->lock);\n+\t\treturn -EBUSY;\n+\t} else if (thres2 > 100) {\n+\t\tspinlock_unlock(&fme->lock);\n+\t\treturn -EINVAL;\n+\t} else if (thres2 == 0) {\n+\t\ttmp_threshold.tmp_thshold2_enable = 0;\n+\t\ttmp_threshold.tmp_thshold2 = thres2;\n+\t} else {\n+\t\ttmp_threshold.tmp_thshold2_enable = 1;\n+\t\ttmp_threshold.tmp_thshold2 = thres2;\n+\t}\n+\n+\twriteq(tmp_threshold.csr, &thermal->threshold);\n+\tspinlock_unlock(&fme->lock);\n+\n+\treturn 0;\n+}\n+\n+static int fme_thermal_get_threshold_trip(struct ifpga_fme_hw *fme,\n+\t\t\t\t\t  u64 *thres_trip)\n+{\n+\tstruct feature_fme_thermal *thermal;\n+\tstruct feature_fme_tmp_threshold temp_threshold;\n+\n+\tthermal = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\t  FME_FEATURE_ID_THERMAL_MGMT);\n+\n+\ttemp_threshold.csr = readq(&thermal->threshold);\n+\t*thres_trip = temp_threshold.therm_trip_thshold;\n+\n+\treturn 0;\n+}\n+\n+static int fme_thermal_get_threshold1_reached(struct ifpga_fme_hw *fme,\n+\t\t\t\t\t      u64 *thres1_reached)\n+{\n+\tstruct feature_fme_thermal *thermal;\n+\tstruct feature_fme_tmp_threshold temp_threshold;\n+\n+\tthermal = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\t  FME_FEATURE_ID_THERMAL_MGMT);\n+\n+\ttemp_threshold.csr = readq(&thermal->threshold);\n+\t*thres1_reached = temp_threshold.thshold1_status;\n+\n+\treturn 0;\n+}\n+\n+static int fme_thermal_get_threshold2_reached(struct ifpga_fme_hw *fme,\n+\t\t\t\t\t      u64 *thres1_reached)\n+{\n+\tstruct feature_fme_thermal *thermal;\n+\tstruct feature_fme_tmp_threshold temp_threshold;\n+\n+\tthermal = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\t  FME_FEATURE_ID_THERMAL_MGMT);\n+\n+\ttemp_threshold.csr = readq(&thermal->threshold);\n+\t*thres1_reached = temp_threshold.thshold2_status;\n+\n+\treturn 0;\n+}\n+\n+static int fme_thermal_get_threshold1_policy(struct ifpga_fme_hw *fme,\n+\t\t\t\t\t     u64 *thres1_policy)\n+{\n+\tstruct feature_fme_thermal *thermal;\n+\tstruct feature_fme_tmp_threshold temp_threshold;\n+\n+\tthermal = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\t  FME_FEATURE_ID_THERMAL_MGMT);\n+\n+\ttemp_threshold.csr = readq(&thermal->threshold);\n+\t*thres1_policy = temp_threshold.thshold_policy;\n+\n+\treturn 0;\n+}\n+\n+static int fme_thermal_set_threshold1_policy(struct ifpga_fme_hw *fme,\n+\t\t\t\t\t     u64 thres1_policy)\n+{\n+\tstruct feature_fme_thermal *thermal;\n+\tstruct feature_fme_tmp_threshold tmp_threshold;\n+\n+\tthermal = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\t  FME_FEATURE_ID_THERMAL_MGMT);\n+\n+\tspinlock_lock(&fme->lock);\n+\ttmp_threshold.csr = readq(&thermal->threshold);\n+\n+\tif (thres1_policy == 0) {\n+\t\ttmp_threshold.thshold_policy = 0;\n+\t} else if (thres1_policy == 1) {\n+\t\ttmp_threshold.thshold_policy = 1;\n+\t} else {\n+\t\tspinlock_unlock(&fme->lock);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\twriteq(tmp_threshold.csr, &thermal->threshold);\n+\tspinlock_unlock(&fme->lock);\n+\n+\treturn 0;\n+}\n+\n+static int fme_thermal_get_temperature(struct ifpga_fme_hw *fme, u64 *temp)\n+{\n+\tstruct feature_fme_thermal *thermal;\n+\tstruct feature_fme_temp_rdsensor_fmt1 temp_rdsensor_fmt1;\n+\n+\tthermal = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\t  FME_FEATURE_ID_THERMAL_MGMT);\n+\n+\ttemp_rdsensor_fmt1.csr = readq(&thermal->rdsensor_fm1);\n+\t*temp = temp_rdsensor_fmt1.fpga_temp;\n+\n+\treturn 0;\n+}\n+\n+static int fme_thermal_get_revision(struct ifpga_fme_hw *fme, u64 *revision)\n+{\n+\tstruct feature_fme_thermal *fme_thermal\n+\t\t= get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\t  FME_FEATURE_ID_THERMAL_MGMT);\n+\tstruct feature_header header;\n+\n+\theader.csr = readq(&fme_thermal->header);\n+\t*revision = header.revision;\n+\n+\treturn 0;\n+}\n+\n+#define FME_THERMAL_CAP_NO_TMP_THRESHOLD\t0x1\n+\n+static int fme_thermal_mgmt_init(struct feature *feature)\n+{\n+\tstruct feature_fme_thermal *fme_thermal;\n+\tstruct feature_fme_tmp_threshold_cap thermal_cap;\n+\n+\tUNUSED(feature);\n+\n+\tdev_info(NULL, \"FME thermal mgmt Init.\\n\");\n+\n+\tfme_thermal = (struct feature_fme_thermal *)feature->addr;\n+\tthermal_cap.csr = readq(&fme_thermal->threshold_cap);\n+\n+\tdev_info(NULL, \"FME thermal cap %llx.\\n\",\n+\t\t (unsigned long long)fme_thermal->threshold_cap.csr);\n+\n+\tif (thermal_cap.tmp_thshold_disabled)\n+\t\tfeature->cap |= FME_THERMAL_CAP_NO_TMP_THRESHOLD;\n+\n+\treturn 0;\n+}\n+\n+static void fme_thermal_mgmt_uinit(struct feature *feature)\n+{\n+\tUNUSED(feature);\n+\n+\tdev_info(NULL, \"FME thermal mgmt UInit.\\n\");\n+}\n+\n+static int\n+fme_thermal_set_prop(struct feature *feature, struct feature_prop *prop)\n+{\n+\tstruct ifpga_fme_hw *fme = feature->parent;\n+\n+\tif (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD)\n+\t\treturn -ENOENT;\n+\n+\tswitch (prop->prop_id) {\n+\tcase FME_THERMAL_PROP_THRESHOLD1:\n+\t\treturn fme_thermal_set_threshold1(fme, prop->data);\n+\tcase FME_THERMAL_PROP_THRESHOLD2:\n+\t\treturn fme_thermal_set_threshold2(fme, prop->data);\n+\tcase FME_THERMAL_PROP_THRESHOLD1_POLICY:\n+\t\treturn fme_thermal_set_threshold1_policy(fme, prop->data);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+static int\n+fme_thermal_get_prop(struct feature *feature, struct feature_prop *prop)\n+{\n+\tstruct ifpga_fme_hw *fme = feature->parent;\n+\n+\tif (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD &&\n+\t    prop->prop_id != FME_THERMAL_PROP_TEMPERATURE &&\n+\t    prop->prop_id != FME_THERMAL_PROP_REVISION)\n+\t\treturn -ENOENT;\n+\n+\tswitch (prop->prop_id) {\n+\tcase FME_THERMAL_PROP_THRESHOLD1:\n+\t\treturn fme_thermal_get_threshold1(fme, &prop->data);\n+\tcase FME_THERMAL_PROP_THRESHOLD2:\n+\t\treturn fme_thermal_get_threshold2(fme, &prop->data);\n+\tcase FME_THERMAL_PROP_THRESHOLD_TRIP:\n+\t\treturn fme_thermal_get_threshold_trip(fme, &prop->data);\n+\tcase FME_THERMAL_PROP_THRESHOLD1_REACHED:\n+\t\treturn fme_thermal_get_threshold1_reached(fme, &prop->data);\n+\tcase FME_THERMAL_PROP_THRESHOLD2_REACHED:\n+\t\treturn fme_thermal_get_threshold2_reached(fme, &prop->data);\n+\tcase FME_THERMAL_PROP_THRESHOLD1_POLICY:\n+\t\treturn fme_thermal_get_threshold1_policy(fme, &prop->data);\n+\tcase FME_THERMAL_PROP_TEMPERATURE:\n+\t\treturn fme_thermal_get_temperature(fme, &prop->data);\n+\tcase FME_THERMAL_PROP_REVISION:\n+\t\treturn fme_thermal_get_revision(fme, &prop->data);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+struct feature_ops fme_thermal_mgmt_ops = {\n+\t.init = fme_thermal_mgmt_init,\n+\t.uinit = fme_thermal_mgmt_uinit,\n+\t.get_prop = fme_thermal_get_prop,\n+\t.set_prop = fme_thermal_set_prop,\n+};\n+\n+static int fme_pwr_get_consumed(struct ifpga_fme_hw *fme, u64 *consumed)\n+{\n+\tstruct feature_fme_power *fme_power\n+\t\t= get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\tFME_FEATURE_ID_POWER_MGMT);\n+\tstruct feature_fme_pm_status pm_status;\n+\n+\tpm_status.csr = readq(&fme_power->status);\n+\n+\t*consumed = pm_status.pwr_consumed;\n+\n+\treturn 0;\n+}\n+\n+static int fme_pwr_get_threshold1(struct ifpga_fme_hw *fme, u64 *threshold)\n+{\n+\tstruct feature_fme_power *fme_power\n+\t\t= get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\tFME_FEATURE_ID_POWER_MGMT);\n+\tstruct feature_fme_pm_ap_threshold pm_ap_threshold;\n+\n+\tpm_ap_threshold.csr = readq(&fme_power->threshold);\n+\n+\t*threshold = pm_ap_threshold.threshold1;\n+\n+\treturn 0;\n+}\n+\n+static int fme_pwr_set_threshold1(struct ifpga_fme_hw *fme, u64 threshold)\n+{\n+\tstruct feature_fme_power *fme_power\n+\t\t= get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\tFME_FEATURE_ID_POWER_MGMT);\n+\tstruct feature_fme_pm_ap_threshold pm_ap_threshold;\n+\n+\tspinlock_lock(&fme->lock);\n+\tpm_ap_threshold.csr = readq(&fme_power->threshold);\n+\n+\tif (threshold <= PWR_THRESHOLD_MAX) {\n+\t\tpm_ap_threshold.threshold1 = threshold;\n+\t} else {\n+\t\tspinlock_unlock(&fme->lock);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\twriteq(pm_ap_threshold.csr, &fme_power->threshold);\n+\tspinlock_unlock(&fme->lock);\n+\n+\treturn 0;\n+}\n+\n+static int fme_pwr_get_threshold2(struct ifpga_fme_hw *fme, u64 *threshold)\n+{\n+\tstruct feature_fme_power *fme_power\n+\t\t= get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\tFME_FEATURE_ID_POWER_MGMT);\n+\tstruct feature_fme_pm_ap_threshold pm_ap_threshold;\n+\n+\tpm_ap_threshold.csr = readq(&fme_power->threshold);\n+\n+\t*threshold = pm_ap_threshold.threshold2;\n+\n+\treturn 0;\n+}\n+\n+static int fme_pwr_set_threshold2(struct ifpga_fme_hw *fme, u64 threshold)\n+{\n+\tstruct feature_fme_power *fme_power\n+\t\t= get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\tFME_FEATURE_ID_POWER_MGMT);\n+\tstruct feature_fme_pm_ap_threshold pm_ap_threshold;\n+\n+\tspinlock_lock(&fme->lock);\n+\tpm_ap_threshold.csr = readq(&fme_power->threshold);\n+\n+\tif (threshold <= PWR_THRESHOLD_MAX) {\n+\t\tpm_ap_threshold.threshold2 = threshold;\n+\t} else {\n+\t\tspinlock_unlock(&fme->lock);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\twriteq(pm_ap_threshold.csr, &fme_power->threshold);\n+\tspinlock_unlock(&fme->lock);\n+\n+\treturn 0;\n+}\n+\n+static int fme_pwr_get_threshold1_status(struct ifpga_fme_hw *fme,\n+\t\t\t\t\t u64 *threshold_status)\n+{\n+\tstruct feature_fme_power *fme_power\n+\t\t= get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\tFME_FEATURE_ID_POWER_MGMT);\n+\tstruct feature_fme_pm_ap_threshold pm_ap_threshold;\n+\n+\tpm_ap_threshold.csr = readq(&fme_power->threshold);\n+\n+\t*threshold_status = pm_ap_threshold.threshold1_status;\n+\n+\treturn 0;\n+}\n+\n+static int fme_pwr_get_threshold2_status(struct ifpga_fme_hw *fme,\n+\t\t\t\t\t u64 *threshold_status)\n+{\n+\tstruct feature_fme_power *fme_power\n+\t\t= get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\tFME_FEATURE_ID_POWER_MGMT);\n+\tstruct feature_fme_pm_ap_threshold pm_ap_threshold;\n+\n+\tpm_ap_threshold.csr = readq(&fme_power->threshold);\n+\n+\t*threshold_status = pm_ap_threshold.threshold2_status;\n+\n+\treturn 0;\n+}\n+\n+static int fme_pwr_get_rtl(struct ifpga_fme_hw *fme, u64 *rtl)\n+{\n+\tstruct feature_fme_power *fme_power\n+\t\t= get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\tFME_FEATURE_ID_POWER_MGMT);\n+\tstruct feature_fme_pm_status pm_status;\n+\n+\tpm_status.csr = readq(&fme_power->status);\n+\n+\t*rtl = pm_status.fpga_latency_report;\n+\n+\treturn 0;\n+}\n+\n+static int fme_pwr_get_xeon_limit(struct ifpga_fme_hw *fme, u64 *limit)\n+{\n+\tstruct feature_fme_power *fme_power\n+\t\t= get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\tFME_FEATURE_ID_POWER_MGMT);\n+\tstruct feature_fme_pm_xeon_limit xeon_limit;\n+\n+\txeon_limit.csr = readq(&fme_power->xeon_limit);\n+\n+\tif (!xeon_limit.enable)\n+\t\txeon_limit.pwr_limit = 0;\n+\n+\t*limit = xeon_limit.pwr_limit;\n+\n+\treturn 0;\n+}\n+\n+static int fme_pwr_get_fpga_limit(struct ifpga_fme_hw *fme, u64 *limit)\n+{\n+\tstruct feature_fme_power *fme_power\n+\t\t= get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\tFME_FEATURE_ID_POWER_MGMT);\n+\tstruct feature_fme_pm_fpga_limit fpga_limit;\n+\n+\tfpga_limit.csr = readq(&fme_power->fpga_limit);\n+\n+\tif (!fpga_limit.enable)\n+\t\tfpga_limit.pwr_limit = 0;\n+\n+\t*limit = fpga_limit.pwr_limit;\n+\n+\treturn 0;\n+}\n+\n+static int fme_pwr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)\n+{\n+\tstruct feature_fme_power *fme_power\n+\t\t= get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\t  FME_FEATURE_ID_POWER_MGMT);\n+\tstruct feature_header header;\n+\n+\theader.csr = readq(&fme_power->header);\n+\t*revision = header.revision;\n+\n+\treturn 0;\n+}\n+\n+static int fme_power_mgmt_init(struct feature *feature)\n+{\n+\tUNUSED(feature);\n+\n+\tdev_info(NULL, \"FME power mgmt Init.\\n\");\n+\n+\treturn 0;\n+}\n+\n+static void fme_power_mgmt_uinit(struct feature *feature)\n+{\n+\tUNUSED(feature);\n+\n+\tdev_info(NULL, \"FME power mgmt UInit.\\n\");\n+}\n+\n+static int fme_power_mgmt_get_prop(struct feature *feature,\n+\t\t\t\t   struct feature_prop *prop)\n+{\n+\tstruct ifpga_fme_hw *fme = feature->parent;\n+\n+\tswitch (prop->prop_id) {\n+\tcase FME_PWR_PROP_CONSUMED:\n+\t\treturn fme_pwr_get_consumed(fme, &prop->data);\n+\tcase FME_PWR_PROP_THRESHOLD1:\n+\t\treturn fme_pwr_get_threshold1(fme, &prop->data);\n+\tcase FME_PWR_PROP_THRESHOLD2:\n+\t\treturn fme_pwr_get_threshold2(fme, &prop->data);\n+\tcase FME_PWR_PROP_THRESHOLD1_STATUS:\n+\t\treturn fme_pwr_get_threshold1_status(fme, &prop->data);\n+\tcase FME_PWR_PROP_THRESHOLD2_STATUS:\n+\t\treturn fme_pwr_get_threshold2_status(fme, &prop->data);\n+\tcase FME_PWR_PROP_RTL:\n+\t\treturn fme_pwr_get_rtl(fme, &prop->data);\n+\tcase FME_PWR_PROP_XEON_LIMIT:\n+\t\treturn fme_pwr_get_xeon_limit(fme, &prop->data);\n+\tcase FME_PWR_PROP_FPGA_LIMIT:\n+\t\treturn fme_pwr_get_fpga_limit(fme, &prop->data);\n+\tcase FME_PWR_PROP_REVISION:\n+\t\treturn fme_pwr_get_revision(fme, &prop->data);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+static int fme_power_mgmt_set_prop(struct feature *feature,\n+\t\t\t\t   struct feature_prop *prop)\n+{\n+\tstruct ifpga_fme_hw *fme = feature->parent;\n+\n+\tswitch (prop->prop_id) {\n+\tcase FME_PWR_PROP_THRESHOLD1:\n+\t\treturn fme_pwr_set_threshold1(fme, prop->data);\n+\tcase FME_PWR_PROP_THRESHOLD2:\n+\t\treturn fme_pwr_set_threshold2(fme, prop->data);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+struct feature_ops fme_power_mgmt_ops = {\n+\t.init = fme_power_mgmt_init,\n+\t.uinit = fme_power_mgmt_uinit,\n+\t.get_prop = fme_power_mgmt_get_prop,\n+\t.set_prop = fme_power_mgmt_set_prop,\n+};\ndiff --git a/drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c b/drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c\nnew file mode 100644\nindex 0000000..1773b87\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c\n@@ -0,0 +1,301 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2018 Intel Corporation\n+ */\n+\n+#include \"ifpga_feature_dev.h\"\n+\n+#define PERF_OBJ_ROOT_ID\t0xff\n+\n+static int fme_dperf_get_clock(struct ifpga_fme_hw *fme, u64 *clock)\n+{\n+\tstruct feature_fme_dperf *dperf;\n+\tstruct feature_fme_dfpmon_clk_ctr clk;\n+\n+\tdperf = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\tFME_FEATURE_ID_GLOBAL_DPERF);\n+\tclk.afu_interf_clock = readq(&dperf->clk);\n+\n+\t*clock = clk.afu_interf_clock;\n+\treturn 0;\n+}\n+\n+static int fme_dperf_get_revision(struct ifpga_fme_hw *fme, u64 *revision)\n+{\n+\tstruct feature_fme_dperf *dperf;\n+\tstruct feature_header header;\n+\n+\tdperf = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\tFME_FEATURE_ID_GLOBAL_DPERF);\n+\theader.csr = readq(&dperf->header);\n+\t*revision = header.revision;\n+\n+\treturn 0;\n+}\n+\n+#define DPERF_TIMEOUT\t30\n+\n+static bool fabric_pobj_is_enabled(int port_id,\n+\t\t\t\t   struct feature_fme_dperf *dperf)\n+{\n+\tstruct feature_fme_dfpmon_fab_ctl ctl;\n+\n+\tctl.csr = readq(&dperf->fab_ctl);\n+\n+\tif (ctl.port_filter == FAB_DISABLE_FILTER)\n+\t\treturn port_id == PERF_OBJ_ROOT_ID;\n+\n+\treturn port_id == ctl.port_id;\n+}\n+\n+static u64 read_fabric_counter(struct ifpga_fme_hw *fme, u8 port_id,\n+\t\t\t       enum dperf_fab_events fab_event)\n+{\n+\tstruct feature_fme_dfpmon_fab_ctl ctl;\n+\tstruct feature_fme_dfpmon_fab_ctr ctr;\n+\tstruct feature_fme_dperf *dperf;\n+\tu64 counter = 0;\n+\n+\tspinlock_lock(&fme->lock);\n+\tdperf = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\tFME_FEATURE_ID_GLOBAL_DPERF);\n+\n+\t/* if it is disabled, force the counter to return zero. */\n+\tif (!fabric_pobj_is_enabled(port_id, dperf))\n+\t\tgoto exit;\n+\n+\tctl.csr = readq(&dperf->fab_ctl);\n+\tctl.fab_evtcode = fab_event;\n+\twriteq(ctl.csr, &dperf->fab_ctl);\n+\n+\tctr.event_code = fab_event;\n+\n+\tif (fpga_wait_register_field(event_code, ctr,\n+\t\t\t\t     &dperf->fab_ctr, DPERF_TIMEOUT, 1)) {\n+\t\tdev_err(fme, \"timeout, unmatched VTd event type in counter registers.\\n\");\n+\t\tspinlock_unlock(&fme->lock);\n+\t\treturn -ETIMEDOUT;\n+\t}\n+\n+\tctr.csr = readq(&dperf->fab_ctr);\n+\tcounter = ctr.fab_cnt;\n+exit:\n+\tspinlock_unlock(&fme->lock);\n+\treturn counter;\n+}\n+\n+#define FAB_PORT_SHOW(name, event)\t\t\t\t\t\\\n+static int fme_dperf_get_fab_port_##name(struct ifpga_fme_hw *fme,\t\\\n+\t\t\t\t\t u8 port_id, u64 *counter)\t\\\n+{\t\t\t\t\t\t\t\t\t\\\n+\t*counter = read_fabric_counter(fme, port_id, event);\t\t\\\n+\treturn 0;\t\t\t\t\t\t\t\\\n+}\n+\n+FAB_PORT_SHOW(pcie0_read, DPERF_FAB_PCIE0_RD);\n+FAB_PORT_SHOW(pcie0_write, DPERF_FAB_PCIE0_WR);\n+FAB_PORT_SHOW(mmio_read, DPERF_FAB_MMIO_RD);\n+FAB_PORT_SHOW(mmio_write, DPERF_FAB_MMIO_WR);\n+\n+static int fme_dperf_get_fab_port_enable(struct ifpga_fme_hw *fme,\n+\t\t\t\t\t u8 port_id, u64 *enable)\n+{\n+\tstruct feature_fme_dperf *dperf;\n+\tint status;\n+\n+\tdperf = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\tFME_FEATURE_ID_GLOBAL_DPERF);\n+\n+\tstatus = fabric_pobj_is_enabled(port_id, dperf);\n+\t*enable = (u64)status;\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * If enable one port or all port event counter in fabric, other\n+ * fabric event counter originally enabled will be disable automatically.\n+ */\n+static int fme_dperf_set_fab_port_enable(struct ifpga_fme_hw *fme,\n+\t\t\t\t\t u8 port_id, u64 enable)\n+{\n+\tstruct feature_fme_dfpmon_fab_ctl ctl;\n+\tstruct feature_fme_dperf *dperf;\n+\tbool state;\n+\n+\tstate = !!enable;\n+\n+\tif (!state)\n+\t\treturn -EINVAL;\n+\n+\tdperf = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\tFME_FEATURE_ID_GLOBAL_DPERF);\n+\n+\t/* if it is already enabled. */\n+\tif (fabric_pobj_is_enabled(port_id, dperf))\n+\t\treturn 0;\n+\n+\tspinlock_lock(&fme->lock);\n+\tctl.csr = readq(&dperf->fab_ctl);\n+\tif (port_id == PERF_OBJ_ROOT_ID) {\n+\t\tctl.port_filter = FAB_DISABLE_FILTER;\n+\t} else {\n+\t\tctl.port_filter = FAB_ENABLE_FILTER;\n+\t\tctl.port_id = port_id;\n+\t}\n+\n+\twriteq(ctl.csr, &dperf->fab_ctl);\n+\tspinlock_unlock(&fme->lock);\n+\n+\treturn 0;\n+}\n+\n+static int fme_dperf_get_fab_freeze(struct ifpga_fme_hw *fme, u64 *freeze)\n+{\n+\tstruct feature_fme_dperf *dperf;\n+\tstruct feature_fme_dfpmon_fab_ctl ctl;\n+\n+\tdperf = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\tFME_FEATURE_ID_GLOBAL_DPERF);\n+\tctl.csr = readq(&dperf->fab_ctl);\n+\t*freeze = (u64)ctl.freeze;\n+\n+\treturn 0;\n+}\n+\n+static int fme_dperf_set_fab_freeze(struct ifpga_fme_hw *fme, u64 freeze)\n+{\n+\tstruct feature_fme_dperf *dperf;\n+\tstruct feature_fme_dfpmon_fab_ctl ctl;\n+\tbool state;\n+\n+\tstate = !!freeze;\n+\n+\tspinlock_lock(&fme->lock);\n+\tdperf = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\tFME_FEATURE_ID_GLOBAL_DPERF);\n+\tctl.csr = readq(&dperf->fab_ctl);\n+\tctl.freeze = state;\n+\twriteq(ctl.csr, &dperf->fab_ctl);\n+\tspinlock_unlock(&fme->lock);\n+\n+\treturn 0;\n+}\n+\n+#define PERF_MAX_PORT_NUM\t1\n+\n+static int fme_global_dperf_init(struct feature *feature)\n+{\n+\tUNUSED(feature);\n+\n+\tdev_info(NULL, \"FME global_dperf Init.\\n\");\n+\n+\treturn 0;\n+}\n+\n+static void fme_global_dperf_uinit(struct feature *feature)\n+{\n+\tUNUSED(feature);\n+\n+\tdev_info(NULL, \"FME global_dperf UInit.\\n\");\n+}\n+\n+static int fme_dperf_fab_get_prop(struct feature *feature,\n+\t\t\t\t  struct feature_prop *prop)\n+{\n+\tstruct ifpga_fme_hw *fme = feature->parent;\n+\tu8 sub = GET_FIELD(PROP_SUB, prop->prop_id);\n+\tu16 id = GET_FIELD(PROP_ID, prop->prop_id);\n+\n+\tswitch (id) {\n+\tcase 0x1: /* FREEZE */\n+\t\treturn fme_dperf_get_fab_freeze(fme, &prop->data);\n+\tcase 0x2: /* PCIE0_READ */\n+\t\treturn fme_dperf_get_fab_port_pcie0_read(fme, sub, &prop->data);\n+\tcase 0x3: /* PCIE0_WRITE */\n+\t\treturn fme_dperf_get_fab_port_pcie0_write(fme, sub,\n+\t\t\t\t\t\t\t  &prop->data);\n+\tcase 0x4: /* MMIO_READ */\n+\t\treturn fme_dperf_get_fab_port_mmio_read(fme, sub, &prop->data);\n+\tcase 0x5: /* MMIO_WRITE */\n+\t\treturn fme_dperf_get_fab_port_mmio_write(fme, sub, &prop->data);\n+\tcase 0x6: /* ENABLE */\n+\t\treturn fme_dperf_get_fab_port_enable(fme, sub, &prop->data);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+static int fme_dperf_root_get_prop(struct feature *feature,\n+\t\t\t\t   struct feature_prop *prop)\n+{\n+\tstruct ifpga_fme_hw *fme = feature->parent;\n+\tu8 sub = GET_FIELD(PROP_SUB, prop->prop_id);\n+\tu16 id = GET_FIELD(PROP_ID, prop->prop_id);\n+\n+\tif (sub != PERF_PROP_SUB_UNUSED)\n+\t\treturn -ENOENT;\n+\n+\tswitch (id) {\n+\tcase 0x1: /* CLOCK */\n+\t\treturn fme_dperf_get_clock(fme, &prop->data);\n+\tcase 0x2: /* REVISION */\n+\t\treturn fme_dperf_get_revision(fme, &prop->data);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+static int fme_global_dperf_get_prop(struct feature *feature,\n+\t\t\t\t     struct feature_prop *prop)\n+{\n+\tu8 top = GET_FIELD(PROP_TOP, prop->prop_id);\n+\n+\tswitch (top) {\n+\tcase PERF_PROP_TOP_FAB:\n+\t\treturn fme_dperf_fab_get_prop(feature, prop);\n+\tcase PERF_PROP_TOP_UNUSED:\n+\t\treturn fme_dperf_root_get_prop(feature, prop);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+static int fme_dperf_fab_set_prop(struct feature *feature,\n+\t\t\t\t  struct feature_prop *prop)\n+{\n+\tstruct ifpga_fme_hw *fme = feature->parent;\n+\tu8 sub = GET_FIELD(PROP_SUB, prop->prop_id);\n+\tu16 id = GET_FIELD(PROP_ID, prop->prop_id);\n+\n+\tswitch (id) {\n+\tcase 0x1: /* FREEZE - fab root only prop */\n+\t\tif (sub != PERF_PROP_SUB_UNUSED)\n+\t\t\treturn -ENOENT;\n+\t\treturn fme_dperf_set_fab_freeze(fme, prop->data);\n+\tcase 0x6: /* ENABLE - fab both root and sub */\n+\t\treturn fme_dperf_set_fab_port_enable(fme, sub, prop->data);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+static int fme_global_dperf_set_prop(struct feature *feature,\n+\t\t\t\t     struct feature_prop *prop)\n+{\n+\tu8 top = GET_FIELD(PROP_TOP, prop->prop_id);\n+\n+\tswitch (top) {\n+\tcase PERF_PROP_TOP_FAB:\n+\t\treturn fme_dperf_fab_set_prop(feature, prop);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+struct feature_ops fme_global_dperf_ops = {\n+\t.init = fme_global_dperf_init,\n+\t.uinit = fme_global_dperf_uinit,\n+\t.get_prop = fme_global_dperf_get_prop,\n+\t.set_prop = fme_global_dperf_set_prop,\n+\n+};\ndiff --git a/drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c b/drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c\nnew file mode 100644\nindex 0000000..33c241e\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c\n@@ -0,0 +1,403 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2018 Intel Corporation\n+ */\n+\n+#include \"ifpga_feature_dev.h\"\n+\n+static int fme_err_get_errors(struct ifpga_fme_hw *fme, u64 *val)\n+{\n+\tstruct feature_fme_err *fme_err\n+\t\t= get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\t  FME_FEATURE_ID_GLOBAL_ERR);\n+\tstruct feature_fme_error0 fme_error0;\n+\n+\tfme_error0.csr = readq(&fme_err->fme_err);\n+\t*val = fme_error0.csr;\n+\n+\treturn 0;\n+}\n+\n+static int fme_err_get_first_error(struct ifpga_fme_hw *fme, u64 *val)\n+{\n+\tstruct feature_fme_err *fme_err\n+\t\t= get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\t  FME_FEATURE_ID_GLOBAL_ERR);\n+\tstruct feature_fme_first_error fme_first_err;\n+\n+\tfme_first_err.csr = readq(&fme_err->fme_first_err);\n+\t*val = fme_first_err.err_reg_status;\n+\n+\treturn 0;\n+}\n+\n+static int fme_err_get_next_error(struct ifpga_fme_hw *fme, u64 *val)\n+{\n+\tstruct feature_fme_err *fme_err\n+\t\t= get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\t  FME_FEATURE_ID_GLOBAL_ERR);\n+\tstruct feature_fme_next_error fme_next_err;\n+\n+\tfme_next_err.csr = readq(&fme_err->fme_next_err);\n+\t*val = fme_next_err.err_reg_status;\n+\n+\treturn 0;\n+}\n+\n+static int fme_err_set_clear(struct ifpga_fme_hw *fme, u64 val)\n+{\n+\tstruct feature_fme_err *fme_err\n+\t\t= get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\t  FME_FEATURE_ID_GLOBAL_ERR);\n+\tstruct feature_fme_error0 fme_error0;\n+\tstruct feature_fme_first_error fme_first_err;\n+\tstruct feature_fme_next_error fme_next_err;\n+\tint ret = 0;\n+\n+\tspinlock_lock(&fme->lock);\n+\twriteq(FME_ERROR0_MASK, &fme_err->fme_err_mask);\n+\n+\tfme_error0.csr = readq(&fme_err->fme_err);\n+\tif (val != fme_error0.csr) {\n+\t\tret = -EBUSY;\n+\t\tgoto exit;\n+\t}\n+\n+\tfme_first_err.csr = readq(&fme_err->fme_first_err);\n+\tfme_next_err.csr = readq(&fme_err->fme_next_err);\n+\n+\twriteq(fme_error0.csr & FME_ERROR0_MASK, &fme_err->fme_err);\n+\twriteq(fme_first_err.csr & FME_FIRST_ERROR_MASK,\n+\t       &fme_err->fme_first_err);\n+\twriteq(fme_next_err.csr & FME_NEXT_ERROR_MASK,\n+\t       &fme_err->fme_next_err);\n+\n+exit:\n+\twriteq(FME_ERROR0_MASK_DEFAULT, &fme_err->fme_err_mask);\n+\tspinlock_unlock(&fme->lock);\n+\n+\treturn ret;\n+}\n+\n+static int fme_err_get_revision(struct ifpga_fme_hw *fme, u64 *val)\n+{\n+\tstruct feature_fme_err *fme_err\n+\t\t= get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\t  FME_FEATURE_ID_GLOBAL_ERR);\n+\tstruct feature_header header;\n+\n+\theader.csr = readq(&fme_err->header);\n+\t*val = header.revision;\n+\n+\treturn 0;\n+}\n+\n+static int fme_err_get_pcie0_errors(struct ifpga_fme_hw *fme, u64 *val)\n+{\n+\tstruct feature_fme_err *fme_err\n+\t\t= get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\t  FME_FEATURE_ID_GLOBAL_ERR);\n+\tstruct feature_fme_pcie0_error pcie0_err;\n+\n+\tpcie0_err.csr = readq(&fme_err->pcie0_err);\n+\t*val = pcie0_err.csr;\n+\n+\treturn 0;\n+}\n+\n+static int fme_err_set_pcie0_errors(struct ifpga_fme_hw *fme, u64 val)\n+{\n+\tstruct feature_fme_err *fme_err\n+\t\t= get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\t  FME_FEATURE_ID_GLOBAL_ERR);\n+\tstruct feature_fme_pcie0_error pcie0_err;\n+\tint ret = 0;\n+\n+\tspinlock_lock(&fme->lock);\n+\twriteq(FME_PCIE0_ERROR_MASK, &fme_err->pcie0_err_mask);\n+\n+\tpcie0_err.csr = readq(&fme_err->pcie0_err);\n+\tif (val != pcie0_err.csr)\n+\t\tret = -EBUSY;\n+\telse\n+\t\twriteq(pcie0_err.csr & FME_PCIE0_ERROR_MASK,\n+\t\t       &fme_err->pcie0_err);\n+\n+\twriteq(0UL, &fme_err->pcie0_err_mask);\n+\tspinlock_unlock(&fme->lock);\n+\n+\treturn ret;\n+}\n+\n+static int fme_err_get_pcie1_errors(struct ifpga_fme_hw *fme, u64 *val)\n+{\n+\tstruct feature_fme_err *fme_err\n+\t\t= get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\t  FME_FEATURE_ID_GLOBAL_ERR);\n+\tstruct feature_fme_pcie1_error pcie1_err;\n+\n+\tpcie1_err.csr = readq(&fme_err->pcie1_err);\n+\t*val = pcie1_err.csr;\n+\n+\treturn 0;\n+}\n+\n+static int fme_err_set_pcie1_errors(struct ifpga_fme_hw *fme, u64 val)\n+{\n+\tstruct feature_fme_err *fme_err\n+\t\t= get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\t  FME_FEATURE_ID_GLOBAL_ERR);\n+\tstruct feature_fme_pcie1_error pcie1_err;\n+\tint ret = 0;\n+\n+\tspinlock_lock(&fme->lock);\n+\twriteq(FME_PCIE1_ERROR_MASK, &fme_err->pcie1_err_mask);\n+\n+\tpcie1_err.csr = readq(&fme_err->pcie1_err);\n+\tif (val != pcie1_err.csr)\n+\t\tret = -EBUSY;\n+\telse\n+\t\twriteq(pcie1_err.csr & FME_PCIE1_ERROR_MASK,\n+\t\t       &fme_err->pcie1_err);\n+\n+\twriteq(0UL, &fme_err->pcie1_err_mask);\n+\tspinlock_unlock(&fme->lock);\n+\n+\treturn ret;\n+}\n+\n+static int fme_err_get_nonfatal_errors(struct ifpga_fme_hw *fme, u64 *val)\n+{\n+\tstruct feature_fme_err *fme_err\n+\t\t= get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\t  FME_FEATURE_ID_GLOBAL_ERR);\n+\tstruct feature_fme_ras_nonfaterror ras_nonfaterr;\n+\n+\tras_nonfaterr.csr = readq(&fme_err->ras_nonfaterr);\n+\t*val = ras_nonfaterr.csr;\n+\n+\treturn 0;\n+}\n+\n+static int fme_err_get_catfatal_errors(struct ifpga_fme_hw *fme, u64 *val)\n+{\n+\tstruct feature_fme_err *fme_err\n+\t\t= get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\t  FME_FEATURE_ID_GLOBAL_ERR);\n+\tstruct feature_fme_ras_catfaterror ras_catfaterr;\n+\n+\tras_catfaterr.csr = readq(&fme_err->ras_catfaterr);\n+\t*val = ras_catfaterr.csr;\n+\n+\treturn 0;\n+}\n+\n+static int fme_err_get_inject_errors(struct ifpga_fme_hw *fme, u64 *val)\n+{\n+\tstruct feature_fme_err *fme_err\n+\t\t= get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\t  FME_FEATURE_ID_GLOBAL_ERR);\n+\tstruct feature_fme_ras_error_inj ras_error_inj;\n+\n+\tras_error_inj.csr = readq(&fme_err->ras_error_inj);\n+\t*val = ras_error_inj.csr & FME_RAS_ERROR_INJ_MASK;\n+\n+\treturn 0;\n+}\n+\n+static int fme_err_set_inject_errors(struct ifpga_fme_hw *fme, u64 val)\n+{\n+\tstruct feature_fme_err *fme_err\n+\t\t= get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t      FME_FEATURE_ID_GLOBAL_ERR);\n+\tstruct feature_fme_ras_error_inj ras_error_inj;\n+\n+\tspinlock_lock(&fme->lock);\n+\tras_error_inj.csr = readq(&fme_err->ras_error_inj);\n+\n+\tif (val <= FME_RAS_ERROR_INJ_MASK) {\n+\t\tras_error_inj.csr = val;\n+\t} else {\n+\t\tspinlock_unlock(&fme->lock);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\twriteq(ras_error_inj.csr, &fme_err->ras_error_inj);\n+\tspinlock_unlock(&fme->lock);\n+\n+\treturn 0;\n+}\n+\n+static void fme_error_enable(struct ifpga_fme_hw *fme)\n+{\n+\tstruct feature_fme_err *fme_err\n+\t\t= get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\t  FME_FEATURE_ID_GLOBAL_ERR);\n+\n+\twriteq(FME_ERROR0_MASK_DEFAULT, &fme_err->fme_err_mask);\n+\twriteq(0UL, &fme_err->pcie0_err_mask);\n+\twriteq(0UL, &fme_err->pcie1_err_mask);\n+\twriteq(0UL, &fme_err->ras_nonfat_mask);\n+\twriteq(0UL, &fme_err->ras_catfat_mask);\n+}\n+\n+static int fme_global_error_init(struct feature *feature)\n+{\n+\tstruct ifpga_fme_hw *fme = feature->parent;\n+\n+\tfme_error_enable(fme);\n+\n+\tif (feature->ctx_num)\n+\t\tfme->capability |= FPGA_FME_CAP_ERR_IRQ;\n+\n+\treturn 0;\n+}\n+\n+static void fme_global_error_uinit(struct feature *feature)\n+{\n+\tUNUSED(feature);\n+}\n+\n+static int fme_err_fme_err_get_prop(struct feature *feature,\n+\t\t\t\t    struct feature_prop *prop)\n+{\n+\tstruct ifpga_fme_hw *fme = feature->parent;\n+\tu16 id = GET_FIELD(PROP_ID, prop->prop_id);\n+\n+\tswitch (id) {\n+\tcase 0x1: /* ERRORS */\n+\t\treturn fme_err_get_errors(fme, &prop->data);\n+\tcase 0x2: /* FIRST_ERROR */\n+\t\treturn fme_err_get_first_error(fme, &prop->data);\n+\tcase 0x3: /* NEXT_ERROR */\n+\t\treturn fme_err_get_next_error(fme, &prop->data);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+static int fme_err_root_get_prop(struct feature *feature,\n+\t\t\t\t struct feature_prop *prop)\n+{\n+\tstruct ifpga_fme_hw *fme = feature->parent;\n+\tu16 id = GET_FIELD(PROP_ID, prop->prop_id);\n+\n+\tswitch (id) {\n+\tcase 0x5: /* REVISION */\n+\t\treturn fme_err_get_revision(fme, &prop->data);\n+\tcase 0x6: /* PCIE0_ERRORS */\n+\t\treturn fme_err_get_pcie0_errors(fme, &prop->data);\n+\tcase 0x7: /* PCIE1_ERRORS */\n+\t\treturn fme_err_get_pcie1_errors(fme, &prop->data);\n+\tcase 0x8: /* NONFATAL_ERRORS */\n+\t\treturn fme_err_get_nonfatal_errors(fme, &prop->data);\n+\tcase 0x9: /* CATFATAL_ERRORS */\n+\t\treturn fme_err_get_catfatal_errors(fme, &prop->data);\n+\tcase 0xa: /* INJECT_ERRORS */\n+\t\treturn fme_err_get_inject_errors(fme, &prop->data);\n+\tcase 0xb: /* REVISION*/\n+\t\treturn fme_err_get_revision(fme, &prop->data);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+static int fme_global_error_get_prop(struct feature *feature,\n+\t\t\t\t     struct feature_prop *prop)\n+{\n+\tu8 top = GET_FIELD(PROP_TOP, prop->prop_id);\n+\tu8 sub = GET_FIELD(PROP_SUB, prop->prop_id);\n+\n+\t/* PROP_SUB is never used */\n+\tif (sub != PROP_SUB_UNUSED)\n+\t\treturn -ENOENT;\n+\n+\tswitch (top) {\n+\tcase ERR_PROP_TOP_FME_ERR:\n+\t\treturn fme_err_fme_err_get_prop(feature, prop);\n+\tcase ERR_PROP_TOP_UNUSED:\n+\t\treturn fme_err_root_get_prop(feature, prop);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+static int fme_err_fme_err_set_prop(struct feature *feature,\n+\t\t\t\t    struct feature_prop *prop)\n+{\n+\tstruct ifpga_fme_hw *fme = feature->parent;\n+\tu16 id = GET_FIELD(PROP_ID, prop->prop_id);\n+\n+\tswitch (id) {\n+\tcase 0x4: /* CLEAR */\n+\t\treturn fme_err_set_clear(fme, prop->data);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+static int fme_err_root_set_prop(struct feature *feature,\n+\t\t\t\t struct feature_prop *prop)\n+{\n+\tstruct ifpga_fme_hw *fme = feature->parent;\n+\tu16 id = GET_FIELD(PROP_ID, prop->prop_id);\n+\n+\tswitch (id) {\n+\tcase 0x6: /* PCIE0_ERRORS */\n+\t\treturn fme_err_set_pcie0_errors(fme, prop->data);\n+\tcase 0x7: /* PCIE1_ERRORS */\n+\t\treturn fme_err_set_pcie1_errors(fme, prop->data);\n+\tcase 0xa: /* INJECT_ERRORS */\n+\t\treturn fme_err_set_inject_errors(fme, prop->data);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+static int fme_global_error_set_prop(struct feature *feature,\n+\t\t\t\t     struct feature_prop *prop)\n+{\n+\tu8 top = GET_FIELD(PROP_TOP, prop->prop_id);\n+\tu8 sub = GET_FIELD(PROP_SUB, prop->prop_id);\n+\n+\t/* PROP_SUB is never used */\n+\tif (sub != PROP_SUB_UNUSED)\n+\t\treturn -ENOENT;\n+\n+\tswitch (top) {\n+\tcase ERR_PROP_TOP_FME_ERR:\n+\t\treturn fme_err_fme_err_set_prop(feature, prop);\n+\tcase ERR_PROP_TOP_UNUSED:\n+\t\treturn fme_err_root_set_prop(feature, prop);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+static int fme_global_err_set_irq(struct feature *feature, void *irq_set)\n+{\n+\tstruct fpga_fme_err_irq_set *err_irq_set =\n+\t\t\t(struct fpga_fme_err_irq_set *)irq_set;\n+\tstruct ifpga_fme_hw *fme;\n+\tint ret;\n+\n+\tfme = (struct ifpga_fme_hw *)feature->parent;\n+\n+\tspinlock_lock(&fme->lock);\n+\tif (!(fme->capability & FPGA_FME_CAP_ERR_IRQ)) {\n+\t\tspinlock_unlock(&fme->lock);\n+\t\treturn -ENODEV;\n+\t}\n+\n+\tret = fpga_msix_set_block(feature, 0, 1, &err_irq_set->evtfd);\n+\tspinlock_unlock(&fme->lock);\n+\n+\treturn ret;\n+}\n+\n+struct feature_ops fme_global_err_ops = {\n+\t.init = fme_global_error_init,\n+\t.uinit = fme_global_error_uinit,\n+\t.get_prop = fme_global_error_get_prop,\n+\t.set_prop = fme_global_error_set_prop,\n+\t.set_irq = fme_global_err_set_irq,\n+};\ndiff --git a/drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c b/drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c\nnew file mode 100644\nindex 0000000..e6c40a1\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c\n@@ -0,0 +1,715 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2018 Intel Corporation\n+ */\n+\n+#include \"ifpga_feature_dev.h\"\n+\n+#define PERF_OBJ_ROOT_ID\t0xff\n+\n+static int fme_iperf_get_clock(struct ifpga_fme_hw *fme, u64 *clock)\n+{\n+\tstruct feature_fme_iperf *iperf;\n+\tstruct feature_fme_ifpmon_clk_ctr clk;\n+\n+\tiperf = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\tFME_FEATURE_ID_GLOBAL_IPERF);\n+\tclk.afu_interf_clock = readq(&iperf->clk);\n+\n+\t*clock = clk.afu_interf_clock;\n+\treturn 0;\n+}\n+\n+static int fme_iperf_get_revision(struct ifpga_fme_hw *fme, u64 *revision)\n+{\n+\tstruct feature_fme_iperf *iperf;\n+\tstruct feature_header header;\n+\n+\tiperf = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\tFME_FEATURE_ID_GLOBAL_IPERF);\n+\theader.csr = readq(&iperf->header);\n+\t*revision = header.revision;\n+\n+\treturn 0;\n+}\n+\n+static int fme_iperf_get_cache_freeze(struct ifpga_fme_hw *fme, u64 *freeze)\n+{\n+\tstruct feature_fme_iperf *iperf;\n+\tstruct feature_fme_ifpmon_ch_ctl ctl;\n+\n+\tiperf = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\tFME_FEATURE_ID_GLOBAL_IPERF);\n+\tctl.csr = readq(&iperf->ch_ctl);\n+\t*freeze = (u64)ctl.freeze;\n+\treturn 0;\n+}\n+\n+static int fme_iperf_set_cache_freeze(struct ifpga_fme_hw *fme, u64 freeze)\n+{\n+\tstruct feature_fme_iperf *iperf;\n+\tstruct feature_fme_ifpmon_ch_ctl ctl;\n+\tbool state;\n+\n+\tstate = !!freeze;\n+\n+\tspinlock_lock(&fme->lock);\n+\tiperf = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\tFME_FEATURE_ID_GLOBAL_IPERF);\n+\tctl.csr = readq(&iperf->ch_ctl);\n+\tctl.freeze = state;\n+\twriteq(ctl.csr, &iperf->ch_ctl);\n+\tspinlock_unlock(&fme->lock);\n+\n+\treturn 0;\n+}\n+\n+#define IPERF_TIMEOUT\t30\n+\n+static u64 read_cache_counter(struct ifpga_fme_hw *fme,\n+\t\t\t      u8 channel, enum iperf_cache_events event)\n+{\n+\tstruct feature_fme_iperf *iperf;\n+\tstruct feature_fme_ifpmon_ch_ctl ctl;\n+\tstruct feature_fme_ifpmon_ch_ctr ctr0, ctr1;\n+\tu64 counter;\n+\n+\tspinlock_lock(&fme->lock);\n+\tiperf = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\tFME_FEATURE_ID_GLOBAL_IPERF);\n+\n+\t/* set channel access type and cache event code. */\n+\tctl.csr = readq(&iperf->ch_ctl);\n+\tctl.cci_chsel = channel;\n+\tctl.cache_event = event;\n+\twriteq(ctl.csr, &iperf->ch_ctl);\n+\n+\t/* check the event type in the counter registers */\n+\tctr0.event_code = event;\n+\n+\tif (fpga_wait_register_field(event_code, ctr0,\n+\t\t\t\t     &iperf->ch_ctr0, IPERF_TIMEOUT, 1)) {\n+\t\tdev_err(fme, \"timeout, unmatched cache event type in counter registers.\\n\");\n+\t\tspinlock_unlock(&fme->lock);\n+\t\treturn -ETIMEDOUT;\n+\t}\n+\n+\tctr0.csr = readq(&iperf->ch_ctr0);\n+\tctr1.csr = readq(&iperf->ch_ctr1);\n+\tcounter = ctr0.cache_counter + ctr1.cache_counter;\n+\tspinlock_unlock(&fme->lock);\n+\n+\treturn counter;\n+}\n+\n+#define CACHE_SHOW(name, type, event)\t\t\t\t\t\\\n+static int fme_iperf_get_cache_##name(struct ifpga_fme_hw *fme,\t\t\\\n+\t\t\t\t\tu64 *counter)\t\t\t\\\n+{\t\t\t\t\t\t\t\t\t\\\n+\t*counter = read_cache_counter(fme, type, event);\t\t\\\n+\treturn 0;\t\t\t\t\t\t\t\\\n+}\n+\n+CACHE_SHOW(read_hit, CACHE_CHANNEL_RD, IPERF_CACHE_RD_HIT);\n+CACHE_SHOW(read_miss, CACHE_CHANNEL_RD, IPERF_CACHE_RD_MISS);\n+CACHE_SHOW(write_hit, CACHE_CHANNEL_WR, IPERF_CACHE_WR_HIT);\n+CACHE_SHOW(write_miss, CACHE_CHANNEL_WR, IPERF_CACHE_WR_MISS);\n+CACHE_SHOW(hold_request, CACHE_CHANNEL_RD, IPERF_CACHE_HOLD_REQ);\n+CACHE_SHOW(tx_req_stall, CACHE_CHANNEL_RD, IPERF_CACHE_TX_REQ_STALL);\n+CACHE_SHOW(rx_req_stall, CACHE_CHANNEL_RD, IPERF_CACHE_RX_REQ_STALL);\n+CACHE_SHOW(rx_eviction, CACHE_CHANNEL_RD, IPERF_CACHE_EVICTIONS);\n+CACHE_SHOW(data_write_port_contention, CACHE_CHANNEL_WR,\n+\t   IPERF_CACHE_DATA_WR_PORT_CONTEN);\n+CACHE_SHOW(tag_write_port_contention, CACHE_CHANNEL_WR,\n+\t   IPERF_CACHE_TAG_WR_PORT_CONTEN);\n+\n+static int fme_iperf_get_vtd_freeze(struct ifpga_fme_hw *fme, u64 *freeze)\n+{\n+\tstruct feature_fme_ifpmon_vtd_ctl ctl;\n+\tstruct feature_fme_iperf *iperf;\n+\n+\tiperf = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\tFME_FEATURE_ID_GLOBAL_IPERF);\n+\tctl.csr = readq(&iperf->vtd_ctl);\n+\t*freeze = (u64)ctl.freeze;\n+\n+\treturn 0;\n+}\n+\n+static int fme_iperf_set_vtd_freeze(struct ifpga_fme_hw *fme, u64 freeze)\n+{\n+\tstruct feature_fme_ifpmon_vtd_ctl ctl;\n+\tstruct feature_fme_iperf *iperf;\n+\tbool state;\n+\n+\tstate = !!freeze;\n+\n+\tspinlock_lock(&fme->lock);\n+\tiperf = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\tFME_FEATURE_ID_GLOBAL_IPERF);\n+\tctl.csr = readq(&iperf->vtd_ctl);\n+\tctl.freeze = state;\n+\twriteq(ctl.csr, &iperf->vtd_ctl);\n+\tspinlock_unlock(&fme->lock);\n+\n+\treturn 0;\n+}\n+\n+static u64 read_iommu_sip_counter(struct ifpga_fme_hw *fme,\n+\t\t\t\t  enum iperf_vtd_sip_events event)\n+{\n+\tstruct feature_fme_ifpmon_vtd_sip_ctl sip_ctl;\n+\tstruct feature_fme_ifpmon_vtd_sip_ctr sip_ctr;\n+\tstruct feature_fme_iperf *iperf;\n+\tu64 counter;\n+\n+\tspinlock_lock(&fme->lock);\n+\tiperf = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\tFME_FEATURE_ID_GLOBAL_IPERF);\n+\tsip_ctl.csr = readq(&iperf->vtd_sip_ctl);\n+\tsip_ctl.vtd_evtcode = event;\n+\twriteq(sip_ctl.csr, &iperf->vtd_sip_ctl);\n+\n+\tsip_ctr.event_code = event;\n+\n+\tif (fpga_wait_register_field(event_code, sip_ctr,\n+\t\t\t\t     &iperf->vtd_sip_ctr, IPERF_TIMEOUT, 1)) {\n+\t\tdev_err(fme, \"timeout, unmatched VTd SIP event type in counter registers\\n\");\n+\t\tspinlock_unlock(&fme->lock);\n+\t\treturn -ETIMEDOUT;\n+\t}\n+\n+\tsip_ctr.csr = readq(&iperf->vtd_sip_ctr);\n+\tcounter = sip_ctr.vtd_counter;\n+\tspinlock_unlock(&fme->lock);\n+\n+\treturn counter;\n+}\n+\n+#define VTD_SIP_SHOW(name, event)\t\t\t\t\t\\\n+static int fme_iperf_get_vtd_sip_##name(struct ifpga_fme_hw *fme,\t\\\n+\t\t\t\t\t\tu64 *counter)\t\t\\\n+{\t\t\t\t\t\t\t\t\t\\\n+\t*counter = read_iommu_sip_counter(fme, event);\t\t\t\\\n+\treturn 0;\t\t\t\t\t\t\t\\\n+}\n+\n+VTD_SIP_SHOW(iotlb_4k_hit, IPERF_VTD_SIP_IOTLB_4K_HIT);\n+VTD_SIP_SHOW(iotlb_2m_hit, IPERF_VTD_SIP_IOTLB_2M_HIT);\n+VTD_SIP_SHOW(iotlb_1g_hit, IPERF_VTD_SIP_IOTLB_1G_HIT);\n+VTD_SIP_SHOW(slpwc_l3_hit, IPERF_VTD_SIP_SLPWC_L3_HIT);\n+VTD_SIP_SHOW(slpwc_l4_hit, IPERF_VTD_SIP_SLPWC_L4_HIT);\n+VTD_SIP_SHOW(rcc_hit, IPERF_VTD_SIP_RCC_HIT);\n+VTD_SIP_SHOW(iotlb_4k_miss, IPERF_VTD_SIP_IOTLB_4K_MISS);\n+VTD_SIP_SHOW(iotlb_2m_miss, IPERF_VTD_SIP_IOTLB_2M_MISS);\n+VTD_SIP_SHOW(iotlb_1g_miss, IPERF_VTD_SIP_IOTLB_1G_MISS);\n+VTD_SIP_SHOW(slpwc_l3_miss, IPERF_VTD_SIP_SLPWC_L3_MISS);\n+VTD_SIP_SHOW(slpwc_l4_miss, IPERF_VTD_SIP_SLPWC_L4_MISS);\n+VTD_SIP_SHOW(rcc_miss, IPERF_VTD_SIP_RCC_MISS);\n+\n+static u64 read_iommu_counter(struct ifpga_fme_hw *fme, u8 port_id,\n+\t\t\t      enum iperf_vtd_events base_event)\n+{\n+\tstruct feature_fme_ifpmon_vtd_ctl ctl;\n+\tstruct feature_fme_ifpmon_vtd_ctr ctr;\n+\tstruct feature_fme_iperf *iperf;\n+\tenum iperf_vtd_events event = base_event + port_id;\n+\tu64 counter;\n+\n+\tspinlock_lock(&fme->lock);\n+\tiperf = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\tFME_FEATURE_ID_GLOBAL_IPERF);\n+\tctl.csr = readq(&iperf->vtd_ctl);\n+\tctl.vtd_evtcode = event;\n+\twriteq(ctl.csr, &iperf->vtd_ctl);\n+\n+\tctr.event_code = event;\n+\n+\tif (fpga_wait_register_field(event_code, ctr,\n+\t\t\t\t     &iperf->vtd_ctr, IPERF_TIMEOUT, 1)) {\n+\t\tdev_err(fme, \"timeout, unmatched VTd event type in counter registers.\\n\");\n+\t\tspinlock_unlock(&fme->lock);\n+\t\treturn -ETIMEDOUT;\n+\t}\n+\n+\tctr.csr = readq(&iperf->vtd_ctr);\n+\tcounter = ctr.vtd_counter;\n+\tspinlock_unlock(&fme->lock);\n+\n+\treturn counter;\n+}\n+\n+#define VTD_PORT_SHOW(name, base_event)\t\t\t\t\t\\\n+static int fme_iperf_get_vtd_port_##name(struct ifpga_fme_hw *fme,\t\\\n+\t\t\t\tu8 port_id, u64 *counter)\t\t\\\n+{\t\t\t\t\t\t\t\t\t\\\n+\t*counter = read_iommu_counter(fme, port_id, base_event);\t\\\n+\treturn 0;\t\t\t\t\t\t\t\\\n+}\n+\n+VTD_PORT_SHOW(read_transaction, IPERF_VTD_AFU_MEM_RD_TRANS);\n+VTD_PORT_SHOW(write_transaction, IPERF_VTD_AFU_MEM_WR_TRANS);\n+VTD_PORT_SHOW(devtlb_read_hit, IPERF_VTD_AFU_DEVTLB_RD_HIT);\n+VTD_PORT_SHOW(devtlb_write_hit, IPERF_VTD_AFU_DEVTLB_WR_HIT);\n+VTD_PORT_SHOW(devtlb_4k_fill, IPERF_VTD_DEVTLB_4K_FILL);\n+VTD_PORT_SHOW(devtlb_2m_fill, IPERF_VTD_DEVTLB_2M_FILL);\n+VTD_PORT_SHOW(devtlb_1g_fill, IPERF_VTD_DEVTLB_1G_FILL);\n+\n+static bool fabric_pobj_is_enabled(u8 port_id, struct feature_fme_iperf *iperf)\n+{\n+\tstruct feature_fme_ifpmon_fab_ctl ctl;\n+\n+\tctl.csr = readq(&iperf->fab_ctl);\n+\n+\tif (ctl.port_filter == FAB_DISABLE_FILTER)\n+\t\treturn port_id == PERF_OBJ_ROOT_ID;\n+\n+\treturn port_id == ctl.port_id;\n+}\n+\n+static u64 read_fabric_counter(struct ifpga_fme_hw *fme, u8 port_id,\n+\t\t\t       enum iperf_fab_events fab_event)\n+{\n+\tstruct feature_fme_ifpmon_fab_ctl ctl;\n+\tstruct feature_fme_ifpmon_fab_ctr ctr;\n+\tstruct feature_fme_iperf *iperf;\n+\tu64 counter = 0;\n+\n+\tspinlock_lock(&fme->lock);\n+\tiperf = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\tFME_FEATURE_ID_GLOBAL_IPERF);\n+\n+\t/* if it is disabled, force the counter to return zero. */\n+\tif (!fabric_pobj_is_enabled(port_id, iperf))\n+\t\tgoto exit;\n+\n+\tctl.csr = readq(&iperf->fab_ctl);\n+\tctl.fab_evtcode = fab_event;\n+\twriteq(ctl.csr, &iperf->fab_ctl);\n+\n+\tctr.event_code = fab_event;\n+\n+\tif (fpga_wait_register_field(event_code, ctr,\n+\t\t\t\t     &iperf->fab_ctr, IPERF_TIMEOUT, 1)) {\n+\t\tdev_err(fme, \"timeout, unmatched VTd event type in counter registers.\\n\");\n+\t\tspinlock_unlock(&fme->lock);\n+\t\treturn -ETIMEDOUT;\n+\t}\n+\n+\tctr.csr = readq(&iperf->fab_ctr);\n+\tcounter = ctr.fab_cnt;\n+exit:\n+\tspinlock_unlock(&fme->lock);\n+\treturn counter;\n+}\n+\n+#define FAB_PORT_SHOW(name, event)\t\t\t\t\t\\\n+static int fme_iperf_get_fab_port_##name(struct ifpga_fme_hw *fme,\t\\\n+\t\t\t\tu8 port_id, u64 *counter)\t\t\\\n+{\t\t\t\t\t\t\t\t\t\\\n+\t*counter = read_fabric_counter(fme, port_id, event);\t\t\\\n+\treturn 0;\t\t\t\t\t\t\t\\\n+}\n+\n+FAB_PORT_SHOW(pcie0_read, IPERF_FAB_PCIE0_RD);\n+FAB_PORT_SHOW(pcie0_write, IPERF_FAB_PCIE0_WR);\n+FAB_PORT_SHOW(pcie1_read, IPERF_FAB_PCIE1_RD);\n+FAB_PORT_SHOW(pcie1_write, IPERF_FAB_PCIE1_WR);\n+FAB_PORT_SHOW(upi_read, IPERF_FAB_UPI_RD);\n+FAB_PORT_SHOW(upi_write, IPERF_FAB_UPI_WR);\n+FAB_PORT_SHOW(mmio_read, IPERF_FAB_MMIO_RD);\n+FAB_PORT_SHOW(mmio_write, IPERF_FAB_MMIO_WR);\n+\n+static int fme_iperf_get_fab_port_enable(struct ifpga_fme_hw *fme,\n+\t\t\t\t\t u8 port_id, u64 *enable)\n+{\n+\tstruct feature_fme_iperf *iperf;\n+\tint status;\n+\n+\tiperf = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\tFME_FEATURE_ID_GLOBAL_IPERF);\n+\n+\tstatus = fabric_pobj_is_enabled(port_id, iperf);\n+\t*enable = (u64)status;\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * If enable one port or all port event counter in fabric, other\n+ * fabric event counter originally enabled will be disable automatically.\n+ */\n+static int fme_iperf_set_fab_port_enable(struct ifpga_fme_hw *fme,\n+\t\t\t\t\t u8 port_id, u64 enable)\n+{\n+\tstruct feature_fme_ifpmon_fab_ctl ctl;\n+\tstruct feature_fme_iperf *iperf;\n+\tbool state;\n+\n+\tstate = !!enable;\n+\n+\tif (!state)\n+\t\treturn -EINVAL;\n+\n+\tiperf = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\tFME_FEATURE_ID_GLOBAL_IPERF);\n+\n+\t/* if it is already enabled. */\n+\tif (fabric_pobj_is_enabled(port_id, iperf))\n+\t\treturn 0;\n+\n+\tspinlock_lock(&fme->lock);\n+\tctl.csr = readq(&iperf->fab_ctl);\n+\tif (port_id == PERF_OBJ_ROOT_ID) {\n+\t\tctl.port_filter = FAB_DISABLE_FILTER;\n+\t} else {\n+\t\tctl.port_filter = FAB_ENABLE_FILTER;\n+\t\tctl.port_id = port_id;\n+\t}\n+\n+\twriteq(ctl.csr, &iperf->fab_ctl);\n+\tspinlock_unlock(&fme->lock);\n+\n+\treturn 0;\n+}\n+\n+static int fme_iperf_get_fab_freeze(struct ifpga_fme_hw *fme, u64 *freeze)\n+{\n+\tstruct feature_fme_iperf *iperf;\n+\tstruct feature_fme_ifpmon_fab_ctl ctl;\n+\n+\tiperf = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\tFME_FEATURE_ID_GLOBAL_IPERF);\n+\tctl.csr = readq(&iperf->fab_ctl);\n+\t*freeze = (u64)ctl.freeze;\n+\n+\treturn 0;\n+}\n+\n+static int fme_iperf_set_fab_freeze(struct ifpga_fme_hw *fme, u64 freeze)\n+{\n+\tstruct feature_fme_iperf *iperf;\n+\tstruct feature_fme_ifpmon_fab_ctl ctl;\n+\tbool state;\n+\n+\tstate = !!freeze;\n+\n+\tspinlock_lock(&fme->lock);\n+\tiperf = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\tFME_FEATURE_ID_GLOBAL_IPERF);\n+\tctl.csr = readq(&iperf->fab_ctl);\n+\tctl.freeze = state;\n+\twriteq(ctl.csr, &iperf->fab_ctl);\n+\tspinlock_unlock(&fme->lock);\n+\n+\treturn 0;\n+}\n+\n+#define PERF_MAX_PORT_NUM\t1\n+#define FME_IPERF_CAP_IOMMU\t0x1\n+\n+static int fme_global_iperf_init(struct feature *feature)\n+{\n+\tstruct ifpga_fme_hw *fme;\n+\tstruct feature_fme_header *fme_hdr;\n+\tstruct feature_fme_capability fme_capability;\n+\n+\tdev_info(NULL, \"FME global_iperf Init.\\n\");\n+\n+\tfme = (struct ifpga_fme_hw *)feature->parent;\n+\tfme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);\n+\n+\t/* check if iommu is not supported on this device. */\n+\tfme_capability.csr = readq(&fme_hdr->capability);\n+\tdev_info(NULL, \"FME HEAD fme_capability %llx.\\n\",\n+\t\t (unsigned long long)fme_hdr->capability.csr);\n+\n+\tif (fme_capability.iommu_support)\n+\t\tfeature->cap |= FME_IPERF_CAP_IOMMU;\n+\n+\treturn 0;\n+}\n+\n+static void fme_global_iperf_uinit(struct feature *feature)\n+{\n+\tUNUSED(feature);\n+\n+\tdev_info(NULL, \"FME global_iperf UInit.\\n\");\n+}\n+\n+static int fme_iperf_root_get_prop(struct feature *feature,\n+\t\t\t\t   struct feature_prop *prop)\n+{\n+\tstruct ifpga_fme_hw *fme = feature->parent;\n+\tu8 sub = GET_FIELD(PROP_SUB, prop->prop_id);\n+\tu16 id = GET_FIELD(PROP_ID, prop->prop_id);\n+\n+\tif (sub != PERF_PROP_SUB_UNUSED)\n+\t\treturn -ENOENT;\n+\n+\tswitch (id) {\n+\tcase 0x1: /* CLOCK */\n+\t\treturn fme_iperf_get_clock(fme, &prop->data);\n+\tcase 0x2: /* REVISION */\n+\t\treturn fme_iperf_get_revision(fme, &prop->data);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+static int fme_iperf_cache_get_prop(struct feature *feature,\n+\t\t\t\t    struct feature_prop *prop)\n+{\n+\tstruct ifpga_fme_hw *fme = feature->parent;\n+\tu8 sub = GET_FIELD(PROP_SUB, prop->prop_id);\n+\tu16 id = GET_FIELD(PROP_ID, prop->prop_id);\n+\n+\tif (sub != PERF_PROP_SUB_UNUSED)\n+\t\treturn -ENOENT;\n+\n+\tswitch (id) {\n+\tcase 0x1: /* FREEZE */\n+\t\treturn fme_iperf_get_cache_freeze(fme, &prop->data);\n+\tcase 0x2: /* READ_HIT */\n+\t\treturn fme_iperf_get_cache_read_hit(fme, &prop->data);\n+\tcase 0x3: /* READ_MISS */\n+\t\treturn fme_iperf_get_cache_read_miss(fme, &prop->data);\n+\tcase 0x4: /* WRITE_HIT */\n+\t\treturn fme_iperf_get_cache_write_hit(fme, &prop->data);\n+\tcase 0x5: /* WRITE_MISS */\n+\t\treturn fme_iperf_get_cache_write_miss(fme, &prop->data);\n+\tcase 0x6: /* HOLD_REQUEST */\n+\t\treturn fme_iperf_get_cache_hold_request(fme, &prop->data);\n+\tcase 0x7: /* TX_REQ_STALL */\n+\t\treturn fme_iperf_get_cache_tx_req_stall(fme, &prop->data);\n+\tcase 0x8: /* RX_REQ_STALL */\n+\t\treturn fme_iperf_get_cache_rx_req_stall(fme, &prop->data);\n+\tcase 0x9: /* RX_EVICTION */\n+\t\treturn fme_iperf_get_cache_rx_eviction(fme, &prop->data);\n+\tcase 0xa: /* DATA_WRITE_PORT_CONTENTION */\n+\t\treturn fme_iperf_get_cache_data_write_port_contention(fme,\n+\t\t\t\t\t\t\t&prop->data);\n+\tcase 0xb: /* TAG_WRITE_PORT_CONTENTION */\n+\t\treturn fme_iperf_get_cache_tag_write_port_contention(fme,\n+\t\t\t\t\t\t\t&prop->data);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+static int fme_iperf_vtd_root_get_prop(struct feature *feature,\n+\t\t\t\t       struct feature_prop *prop)\n+{\n+\tstruct ifpga_fme_hw *fme = feature->parent;\n+\tu16 id = GET_FIELD(PROP_ID, prop->prop_id);\n+\n+\tswitch (id) {\n+\tcase 0x1: /* FREEZE */\n+\t\treturn fme_iperf_get_vtd_freeze(fme, &prop->data);\n+\tcase 0x2: /* IOTLB_4K_HIT */\n+\t\treturn fme_iperf_get_vtd_sip_iotlb_4k_hit(fme, &prop->data);\n+\tcase 0x3: /* IOTLB_2M_HIT */\n+\t\treturn fme_iperf_get_vtd_sip_iotlb_2m_hit(fme, &prop->data);\n+\tcase 0x4: /* IOTLB_1G_HIT */\n+\t\treturn fme_iperf_get_vtd_sip_iotlb_1g_hit(fme, &prop->data);\n+\tcase 0x5: /* SLPWC_L3_HIT */\n+\t\treturn fme_iperf_get_vtd_sip_slpwc_l3_hit(fme, &prop->data);\n+\tcase 0x6: /* SLPWC_L4_HIT */\n+\t\treturn fme_iperf_get_vtd_sip_slpwc_l4_hit(fme, &prop->data);\n+\tcase 0x7: /* RCC_HIT */\n+\t\treturn fme_iperf_get_vtd_sip_rcc_hit(fme, &prop->data);\n+\tcase 0x8: /* IOTLB_4K_MISS */\n+\t\treturn fme_iperf_get_vtd_sip_iotlb_4k_miss(fme, &prop->data);\n+\tcase 0x9: /* IOTLB_2M_MISS */\n+\t\treturn fme_iperf_get_vtd_sip_iotlb_2m_miss(fme, &prop->data);\n+\tcase 0xa: /* IOTLB_1G_MISS */\n+\t\treturn fme_iperf_get_vtd_sip_iotlb_1g_miss(fme, &prop->data);\n+\tcase 0xb: /* SLPWC_L3_MISS */\n+\t\treturn fme_iperf_get_vtd_sip_slpwc_l3_miss(fme, &prop->data);\n+\tcase 0xc: /* SLPWC_L4_MISS */\n+\t\treturn fme_iperf_get_vtd_sip_slpwc_l4_miss(fme, &prop->data);\n+\tcase 0xd: /* RCC_MISS */\n+\t\treturn fme_iperf_get_vtd_sip_rcc_miss(fme, &prop->data);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+static int fme_iperf_vtd_sub_get_prop(struct feature *feature,\n+\t\t\t\t      struct feature_prop *prop)\n+{\n+\tstruct ifpga_fme_hw *fme = feature->parent;\n+\tu16 id = GET_FIELD(PROP_ID, prop->prop_id);\n+\tu8 sub = GET_FIELD(PROP_SUB, prop->prop_id);\n+\n+\tif (sub > PERF_MAX_PORT_NUM)\n+\t\treturn -ENOENT;\n+\n+\tswitch (id) {\n+\tcase 0xe: /* READ_TRANSACTION */\n+\t\treturn fme_iperf_get_vtd_port_read_transaction(fme, sub,\n+\t\t\t\t\t\t\t       &prop->data);\n+\tcase 0xf: /* WRITE_TRANSACTION */\n+\t\treturn fme_iperf_get_vtd_port_write_transaction(fme, sub,\n+\t\t\t\t\t\t\t\t&prop->data);\n+\tcase 0x10: /* DEVTLB_READ_HIT */\n+\t\treturn fme_iperf_get_vtd_port_devtlb_read_hit(fme, sub,\n+\t\t\t\t\t\t\t      &prop->data);\n+\tcase 0x11: /* DEVTLB_WRITE_HIT */\n+\t\treturn fme_iperf_get_vtd_port_devtlb_write_hit(fme, sub,\n+\t\t\t\t\t\t\t       &prop->data);\n+\tcase 0x12: /* DEVTLB_4K_FILL */\n+\t\treturn fme_iperf_get_vtd_port_devtlb_4k_fill(fme, sub,\n+\t\t\t\t\t\t\t     &prop->data);\n+\tcase 0x13: /* DEVTLB_2M_FILL */\n+\t\treturn fme_iperf_get_vtd_port_devtlb_2m_fill(fme, sub,\n+\t\t\t\t\t\t\t     &prop->data);\n+\tcase 0x14: /* DEVTLB_1G_FILL */\n+\t\treturn fme_iperf_get_vtd_port_devtlb_1g_fill(fme, sub,\n+\t\t\t\t\t\t\t     &prop->data);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+static int fme_iperf_vtd_get_prop(struct feature *feature,\n+\t\t\t\t  struct feature_prop *prop)\n+{\n+\tu8 sub = GET_FIELD(PROP_SUB, prop->prop_id);\n+\n+\tif (sub == PERF_PROP_SUB_UNUSED)\n+\t\treturn fme_iperf_vtd_root_get_prop(feature, prop);\n+\n+\treturn fme_iperf_vtd_sub_get_prop(feature, prop);\n+}\n+\n+static int fme_iperf_fab_get_prop(struct feature *feature,\n+\t\t\t\t  struct feature_prop *prop)\n+{\n+\tstruct ifpga_fme_hw *fme = feature->parent;\n+\tu8 sub = GET_FIELD(PROP_SUB, prop->prop_id);\n+\tu16 id = GET_FIELD(PROP_ID, prop->prop_id);\n+\n+\t/* Other properties are present for both top and sub levels */\n+\tswitch (id) {\n+\tcase 0x1: /* FREEZE */\n+\t\tif (sub != PERF_PROP_SUB_UNUSED)\n+\t\t\treturn -ENOENT;\n+\t\treturn fme_iperf_get_fab_freeze(fme, &prop->data);\n+\tcase 0x2: /* PCIE0_READ */\n+\t\treturn fme_iperf_get_fab_port_pcie0_read(fme, sub,\n+\t\t\t\t\t\t\t &prop->data);\n+\tcase 0x3: /* PCIE0_WRITE */\n+\t\treturn fme_iperf_get_fab_port_pcie0_write(fme, sub,\n+\t\t\t\t\t\t\t  &prop->data);\n+\tcase 0x4: /* PCIE1_READ */\n+\t\treturn fme_iperf_get_fab_port_pcie1_read(fme, sub,\n+\t\t\t\t\t\t\t &prop->data);\n+\tcase 0x5: /* PCIE1_WRITE */\n+\t\treturn fme_iperf_get_fab_port_pcie1_write(fme, sub,\n+\t\t\t\t\t\t\t  &prop->data);\n+\tcase 0x6: /* UPI_READ */\n+\t\treturn fme_iperf_get_fab_port_upi_read(fme, sub,\n+\t\t\t\t\t\t       &prop->data);\n+\tcase 0x7: /* UPI_WRITE */\n+\t\treturn fme_iperf_get_fab_port_upi_write(fme, sub,\n+\t\t\t\t\t\t\t&prop->data);\n+\tcase 0x8: /* MMIO_READ */\n+\t\treturn fme_iperf_get_fab_port_mmio_read(fme, sub,\n+\t\t\t\t\t\t\t&prop->data);\n+\tcase 0x9: /* MMIO_WRITE */\n+\t\treturn fme_iperf_get_fab_port_mmio_write(fme, sub,\n+\t\t\t\t\t\t\t &prop->data);\n+\tcase 0xa: /* ENABLE */\n+\t\treturn fme_iperf_get_fab_port_enable(fme, sub, &prop->data);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+static int fme_global_iperf_get_prop(struct feature *feature,\n+\t\t\t\t     struct feature_prop *prop)\n+{\n+\tu8 top = GET_FIELD(PROP_TOP, prop->prop_id);\n+\n+\tswitch (top) {\n+\tcase PERF_PROP_TOP_CACHE:\n+\t\treturn fme_iperf_cache_get_prop(feature, prop);\n+\tcase PERF_PROP_TOP_VTD:\n+\t\treturn fme_iperf_vtd_get_prop(feature, prop);\n+\tcase PERF_PROP_TOP_FAB:\n+\t\treturn fme_iperf_fab_get_prop(feature, prop);\n+\tcase PERF_PROP_TOP_UNUSED:\n+\t\treturn fme_iperf_root_get_prop(feature, prop);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+static int fme_iperf_cache_set_prop(struct feature *feature,\n+\t\t\t\t    struct feature_prop *prop)\n+{\n+\tstruct ifpga_fme_hw *fme = feature->parent;\n+\tu8 sub = GET_FIELD(PROP_SUB, prop->prop_id);\n+\tu16 id = GET_FIELD(PROP_ID, prop->prop_id);\n+\n+\tif (sub == PERF_PROP_SUB_UNUSED && id == 0x1) /* FREEZE */\n+\t\treturn fme_iperf_set_cache_freeze(fme, prop->data);\n+\n+\treturn -ENOENT;\n+}\n+\n+static int fme_iperf_vtd_set_prop(struct feature *feature,\n+\t\t\t\t  struct feature_prop *prop)\n+{\n+\tstruct ifpga_fme_hw *fme = feature->parent;\n+\tu8 sub = GET_FIELD(PROP_SUB, prop->prop_id);\n+\tu16 id = GET_FIELD(PROP_ID, prop->prop_id);\n+\n+\tif (sub == PERF_PROP_SUB_UNUSED && id == 0x1) /* FREEZE */\n+\t\treturn fme_iperf_set_vtd_freeze(fme, prop->data);\n+\n+\treturn -ENOENT;\n+}\n+\n+static int fme_iperf_fab_set_prop(struct feature *feature,\n+\t\t\t\t  struct feature_prop *prop)\n+{\n+\tstruct ifpga_fme_hw *fme = feature->parent;\n+\tu8 sub = GET_FIELD(PROP_SUB, prop->prop_id);\n+\tu16 id = GET_FIELD(PROP_ID, prop->prop_id);\n+\n+\tswitch (id) {\n+\tcase 0x1: /* FREEZE */\n+\t\tif (sub != PERF_PROP_SUB_UNUSED)\n+\t\t\treturn -ENOENT;\n+\t\treturn fme_iperf_set_fab_freeze(fme, prop->data);\n+\tcase 0xa: /* ENABLE */\n+\t\treturn fme_iperf_set_fab_port_enable(fme, sub, prop->data);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+static int fme_global_iperf_set_prop(struct feature *feature,\n+\t\t\t\t     struct feature_prop *prop)\n+{\n+\tu8 top = GET_FIELD(PROP_TOP, prop->prop_id);\n+\n+\tswitch (top) {\n+\tcase PERF_PROP_TOP_CACHE:\n+\t\treturn fme_iperf_cache_set_prop(feature, prop);\n+\tcase PERF_PROP_TOP_VTD:\n+\t\treturn fme_iperf_vtd_set_prop(feature, prop);\n+\tcase PERF_PROP_TOP_FAB:\n+\t\treturn fme_iperf_fab_set_prop(feature, prop);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+struct feature_ops fme_global_iperf_ops = {\n+\t.init = fme_global_iperf_init,\n+\t.uinit = fme_global_iperf_uinit,\n+\t.get_prop = fme_global_iperf_get_prop,\n+\t.set_prop = fme_global_iperf_set_prop,\n+};\ndiff --git a/drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c b/drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c\nnew file mode 100644\nindex 0000000..6192fa7\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c\n@@ -0,0 +1,372 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2018 Intel Corporation\n+ */\n+\n+#include \"ifpga_feature_dev.h\"\n+\n+#if defined(RTE_ARCH_X86_64)\n+static inline void copy512(const void *src, void *dst)\n+{\n+\tasm volatile(\"vmovdqu64 (%0), %%zmm0;\"\n+\t\t     \"vmovntdq %%zmm0, (%1);\"\n+\t\t     :\n+\t\t     : \"r\"(src), \"r\"(dst));\n+}\n+#else\n+static inline void copy512(const void *src, void *dst)\n+{\n+\tUNUSED(src);\n+\tUNUSED(dst);\n+\tWARN_ON(1);\n+}\n+#endif\n+\n+static u64\n+pr_err_handle(struct feature_fme_pr *fme_pr)\n+{\n+\tstruct feature_fme_pr_status fme_pr_status;\n+\tunsigned long err_code;\n+\tu64 fme_pr_error;\n+\tint i;\n+\n+\tfme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);\n+\tif (!fme_pr_status.pr_status)\n+\t\treturn 0;\n+\n+\terr_code = readq(&fme_pr->ccip_fme_pr_err);\n+\tfme_pr_error = err_code;\n+\n+\tfor (i = 0; i < PR_MAX_ERR_NUM; i++) {\n+\t\tif (err_code & (1 << i))\n+\t\t\tdev_info(NULL, \"%s\\n\", pr_err_msg[i]);\n+\t}\n+\n+\twriteq(fme_pr_error, &fme_pr->ccip_fme_pr_err);\n+\treturn fme_pr_error;\n+}\n+\n+static int fme_pr_write_init(struct ifpga_fme_hw *fme_dev,\n+\t\t\t     struct fpga_pr_info *info)\n+{\n+\tstruct feature_fme_pr *fme_pr;\n+\tstruct feature_fme_pr_ctl fme_pr_ctl;\n+\tstruct feature_fme_pr_status fme_pr_status;\n+\n+\tfme_pr = get_fme_feature_ioaddr_by_index(fme_dev,\n+\t\t\t\t\t\t FME_FEATURE_ID_PR_MGMT);\n+\tif (!fme_pr)\n+\t\treturn -EINVAL;\n+\n+\tif (info->flags != FPGA_MGR_PARTIAL_RECONFIG)\n+\t\treturn -EINVAL;\n+\n+\tdev_info(fme_dev, \"resetting PR before initiated PR\\n\");\n+\n+\tfme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);\n+\tfme_pr_ctl.pr_reset = 1;\n+\twriteq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);\n+\n+\tfme_pr_ctl.pr_reset_ack = 1;\n+\n+\tif (fpga_wait_register_field(pr_reset_ack, fme_pr_ctl,\n+\t\t\t\t     &fme_pr->ccip_fme_pr_control,\n+\t\t\t\t     PR_WAIT_TIMEOUT, 1)) {\n+\t\tdev_err(fme_dev, \"maximum PR timeout\\n\");\n+\t\treturn -ETIMEDOUT;\n+\t}\n+\n+\tfme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);\n+\tfme_pr_ctl.pr_reset = 0;\n+\twriteq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);\n+\n+\tdev_info(fme_dev, \"waiting for PR resource in HW to be initialized and ready\\n\");\n+\n+\tfme_pr_status.pr_host_status = PR_HOST_STATUS_IDLE;\n+\n+\tif (fpga_wait_register_field(pr_host_status, fme_pr_status,\n+\t\t\t\t     &fme_pr->ccip_fme_pr_status,\n+\t\t\t\t     PR_WAIT_TIMEOUT, 1)) {\n+\t\tdev_err(fme_dev, \"maximum PR timeout\\n\");\n+\t\treturn -ETIMEDOUT;\n+\t}\n+\n+\tdev_info(fme_dev, \"check if have any previous PR error\\n\");\n+\tpr_err_handle(fme_pr);\n+\treturn 0;\n+}\n+\n+static int fme_pr_write(struct ifpga_fme_hw *fme_dev,\n+\t\t\tint port_id, const char *buf, size_t count,\n+\t\t\tstruct fpga_pr_info *info)\n+{\n+\tstruct feature_fme_pr *fme_pr;\n+\tstruct feature_fme_pr_ctl fme_pr_ctl;\n+\tstruct feature_fme_pr_status fme_pr_status;\n+\tstruct feature_fme_pr_data fme_pr_data;\n+\tint delay, pr_credit;\n+\tint ret = 0;\n+\n+\tfme_pr = get_fme_feature_ioaddr_by_index(fme_dev,\n+\t\t\t\t\t\t FME_FEATURE_ID_PR_MGMT);\n+\tif (!fme_pr)\n+\t\treturn -EINVAL;\n+\n+\tdev_info(fme_dev, \"set PR port ID and start request\\n\");\n+\n+\tfme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);\n+\tfme_pr_ctl.pr_regionid = port_id;\n+\tfme_pr_ctl.pr_start_req = 1;\n+\twriteq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);\n+\n+\tdev_info(fme_dev, \"pushing data from bitstream to HW\\n\");\n+\n+\tfme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);\n+\tpr_credit = fme_pr_status.pr_credit;\n+\n+\twhile (count > 0) {\n+\t\tdelay = 0;\n+\t\twhile (pr_credit <= 1) {\n+\t\t\tif (delay++ > PR_WAIT_TIMEOUT) {\n+\t\t\t\tdev_err(fme_dev, \"maximum try\\n\");\n+\n+\t\t\t\tinfo->pr_err = pr_err_handle(fme_pr);\n+\t\t\t\treturn info->pr_err ? -EIO : -ETIMEDOUT;\n+\t\t\t}\n+\t\t\tudelay(1);\n+\n+\t\t\tfme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);\n+\t\t\tpr_credit = fme_pr_status.pr_credit;\n+\t\t};\n+\n+\t\tif (count >= fme_dev->pr_bandwidth) {\n+\t\t\tswitch (fme_dev->pr_bandwidth) {\n+\t\t\tcase 4:\n+\t\t\t\tfme_pr_data.rsvd = 0;\n+\t\t\t\tfme_pr_data.pr_data_raw = *((const u32 *)buf);\n+\t\t\t\twriteq(fme_pr_data.csr,\n+\t\t\t\t       &fme_pr->ccip_fme_pr_data);\n+\t\t\t\tbreak;\n+\t\t\tcase 64:\n+\t\t\t\tcopy512(buf, &fme_pr->fme_pr_data1);\n+\t\t\t\tbreak;\n+\t\t\tdefault:\n+\t\t\t\tret = -EFAULT;\n+\t\t\t\tgoto done;\n+\t\t\t}\n+\n+\t\t\tbuf += fme_dev->pr_bandwidth;\n+\t\t\tcount -= fme_dev->pr_bandwidth;\n+\t\t\tpr_credit--;\n+\t\t} else {\n+\t\t\tWARN_ON(1);\n+\t\t\treturn -EINVAL;\n+\t\t\tgoto done;\n+\t\t}\n+\t}\n+\n+done:\n+\treturn ret;\n+}\n+\n+static int fme_pr_write_complete(struct ifpga_fme_hw *fme_dev,\n+\t\t\t\t struct fpga_pr_info *info)\n+{\n+\tstruct feature_fme_pr *fme_pr;\n+\tstruct feature_fme_pr_ctl fme_pr_ctl;\n+\n+\tfme_pr = get_fme_feature_ioaddr_by_index(fme_dev,\n+\t\t\t\t\t\t FME_FEATURE_ID_PR_MGMT);\n+\n+\tfme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);\n+\tfme_pr_ctl.pr_push_complete = 1;\n+\twriteq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);\n+\n+\tdev_info(fme_dev, \"green bitstream push complete\\n\");\n+\tdev_info(fme_dev, \"waiting for HW to release PR resource\\n\");\n+\n+\tfme_pr_ctl.pr_start_req = 0;\n+\n+\tif (fpga_wait_register_field(pr_start_req, fme_pr_ctl,\n+\t\t\t\t     &fme_pr->ccip_fme_pr_control,\n+\t\t\t\t     PR_WAIT_TIMEOUT, 1)) {\n+\t\tprintf(\"maximum try.\\n\");\n+\t\treturn -ETIMEDOUT;\n+\t}\n+\n+\tdev_info(fme_dev, \"PR operation complete, checking status\\n\");\n+\tinfo->pr_err = pr_err_handle(fme_pr);\n+\tif (info->pr_err)\n+\t\treturn -EIO;\n+\n+\tdev_info(fme_dev, \"PR done successfully\\n\");\n+\treturn 0;\n+}\n+\n+static int fpga_pr_buf_load(struct ifpga_fme_hw *fme_dev,\n+\t\t\t    struct fpga_pr_info *info, const char *buf,\n+\t\t\t    size_t count)\n+{\n+\tint ret;\n+\n+\tinfo->state = FPGA_PR_STATE_WRITE_INIT;\n+\tret = fme_pr_write_init(fme_dev, info);\n+\tif (ret) {\n+\t\tdev_err(fme_dev, \"Error preparing FPGA for writing\\n\");\n+\t\tinfo->state = FPGA_PR_STATE_WRITE_INIT_ERR;\n+\t\treturn ret;\n+\t}\n+\n+\t/*\n+\t * Write the FPGA image to the FPGA.\n+\t */\n+\tinfo->state = FPGA_PR_STATE_WRITE;\n+\tret = fme_pr_write(fme_dev, info->port_id, buf, count, info);\n+\tif (ret) {\n+\t\tdev_err(fme_dev, \"Error while writing image data to FPGA\\n\");\n+\t\tinfo->state = FPGA_PR_STATE_WRITE_ERR;\n+\t\treturn ret;\n+\t}\n+\n+\t/*\n+\t * After all the FPGA image has been written, do the device specific\n+\t * steps to finish and set the FPGA into operating mode.\n+\t */\n+\tinfo->state = FPGA_PR_STATE_WRITE_COMPLETE;\n+\tret = fme_pr_write_complete(fme_dev, info);\n+\tif (ret) {\n+\t\tdev_err(fme_dev, \"Error after writing image data to FPGA\\n\");\n+\t\tinfo->state = FPGA_PR_STATE_WRITE_COMPLETE_ERR;\n+\t\treturn ret;\n+\t}\n+\tinfo->state = FPGA_PR_STATE_DONE;\n+\n+\treturn 0;\n+}\n+\n+static int fme_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,\n+\t\t  u64 *status)\n+{\n+\tstruct feature_fme_header *fme_hdr;\n+\tstruct feature_fme_capability fme_capability;\n+\tstruct ifpga_fme_hw *fme = &hw->fme;\n+\tstruct fpga_pr_info info;\n+\tstruct ifpga_port_hw *port;\n+\tint ret = 0;\n+\n+\tif (!buffer || size == 0)\n+\t\treturn -EINVAL;\n+\tif (fme->state != IFPGA_FME_IMPLEMENTED)\n+\t\treturn -EINVAL;\n+\n+\t/*\n+\t * Padding extra zeros to align PR buffer with PR bandwidth, HW will\n+\t * ignore these zeros automatically.\n+\t */\n+\tsize = ALIGN(size, fme->pr_bandwidth);\n+\n+\t/* get fme header region */\n+\tfme_hdr = get_fme_feature_ioaddr_by_index(fme,\n+\t\t\t\t\t\t  FME_FEATURE_ID_HEADER);\n+\tif (!fme_hdr)\n+\t\treturn -EINVAL;\n+\n+\t/* check port id */\n+\tfme_capability.csr = readq(&fme_hdr->capability);\n+\tif (port_id >= fme_capability.num_ports) {\n+\t\tdev_err(fme,  \"port number more than maximum\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tmemset(&info, 0, sizeof(struct fpga_pr_info));\n+\tinfo.flags = FPGA_MGR_PARTIAL_RECONFIG;\n+\tinfo.port_id = port_id;\n+\n+\tspinlock_lock(&fme->lock);\n+\n+\t/* get port device by port_id */\n+\tport = &hw->port[port_id];\n+\n+\t/* Disable Port before PR */\n+\tfpga_port_disable(port);\n+\n+\tret = fpga_pr_buf_load(fme, &info, (void *)buffer, size);\n+\n+\t*status = info.pr_err;\n+\n+\t/* Re-enable Port after PR finished */\n+\tfpga_port_enable(port);\n+\tspinlock_unlock(&fme->lock);\n+\n+\treturn ret;\n+}\n+\n+int do_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size, u64 *status)\n+{\n+\tstruct bts_header *bts_hdr;\n+\tvoid *buf;\n+\tstruct ifpga_port_hw *port;\n+\tint ret;\n+\n+\tif (!buffer || size == 0) {\n+\t\tdev_err(hw, \"invalid parameter\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tbts_hdr = (struct bts_header *)buffer;\n+\n+\tif (is_valid_bts(bts_hdr)) {\n+\t\tdev_info(hw, \"this is a valid bitsteam..\\n\");\n+\t\tsize -= (sizeof(struct bts_header) +\n+\t\t\t\t     bts_hdr->metadata_len);\n+\t\tbuf = (u8 *)buffer + sizeof(struct bts_header) +\n+\t\t\t       bts_hdr->metadata_len;\n+\t} else {\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* clean port error before do PR */\n+\tport = &hw->port[port_id];\n+\tret = port_clear_error(port);\n+\tif (ret) {\n+\t\tdev_err(hw, \"port cannot clear error\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn fme_pr(hw, port_id, buf, size, status);\n+}\n+\n+static int fme_pr_mgmt_init(struct feature *feature)\n+{\n+\tstruct feature_fme_pr *fme_pr;\n+\tstruct feature_header fme_pr_header;\n+\tstruct ifpga_fme_hw *fme;\n+\n+\tdev_info(NULL, \"FME PR MGMT Init.\\n\");\n+\n+\tfme = (struct ifpga_fme_hw *)feature->parent;\n+\n+\tfme_pr = (struct feature_fme_pr *)feature->addr;\n+\n+\tfme_pr_header.csr = readq(&fme_pr->header);\n+\tif (fme_pr_header.revision == 2) {\n+\t\tdev_info(NULL, \"using 512-bit PR\\n\");\n+\t\tfme->pr_bandwidth = 64;\n+\t} else {\n+\t\tdev_info(NULL, \"using 32-bit PR\\n\");\n+\t\tfme->pr_bandwidth = 4;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void fme_pr_mgmt_uinit(struct feature *feature)\n+{\n+\tUNUSED(feature);\n+\n+\tdev_info(NULL, \"FME PR MGMT UInit.\\n\");\n+}\n+\n+struct feature_ops fme_pr_mgmt_ops = {\n+\t.init = fme_pr_mgmt_init,\n+\t.uinit = fme_pr_mgmt_uinit,\n+};\ndiff --git a/drivers/raw/ifpga_rawdev/base/ifpga_hw.h b/drivers/raw/ifpga_rawdev/base/ifpga_hw.h\nnew file mode 100644\nindex 0000000..a20520c\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/ifpga_hw.h\n@@ -0,0 +1,127 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2018 Intel Corporation\n+ */\n+\n+#ifndef _IFPGA_HW_H_\n+#define _IFPGA_HW_H_\n+\n+#include \"ifpga_defines.h\"\n+#include \"opae_ifpga_hw_api.h\"\n+\n+enum ifpga_feature_state {\n+\tIFPGA_FEATURE_UNUSED = 0,\n+\tIFPGA_FEATURE_ATTACHED,\n+};\n+\n+struct feature_irq_ctx {\n+\tint eventfd;\n+\tint idx;\n+};\n+\n+struct feature {\n+\tenum ifpga_feature_state state;\n+\tconst char *name;\n+\tu64 id;\n+\tu8 *addr;\n+\tuint64_t phys_addr;\n+\tu32 size;\n+\tint revision;\n+\tu64 cap;\n+\tint vfio_dev_fd;\n+\tstruct feature_irq_ctx *ctx;\n+\tunsigned int ctx_num;\n+\n+\tvoid *parent;\t\t/* to parent hw data structure */\n+\n+\tstruct feature_ops *ops;/* callback to this private feature */\n+};\n+\n+struct feature_ops {\n+\tint (*init)(struct feature *feature);\n+\tvoid (*uinit)(struct feature *feature);\n+\tint (*get_prop)(struct feature *feature, struct feature_prop *prop);\n+\tint (*set_prop)(struct feature *feature, struct feature_prop *prop);\n+\tint (*set_irq)(struct feature *feature, void *irq_set);\n+};\n+\n+enum ifpga_fme_state {\n+\tIFPGA_FME_UNUSED = 0,\n+\tIFPGA_FME_IMPLEMENTED,\n+};\n+\n+struct ifpga_fme_hw {\n+\tenum ifpga_fme_state state;\n+\n+\tstruct feature sub_feature[FME_FEATURE_ID_MAX];\n+\tspinlock_t lock;\t/* protect hardware access */\n+\n+\tvoid *parent;\t\t/* pointer to ifpga_hw */\n+\n+\t/* provied by HEADER feature */\n+\tu32 port_num;\n+\tstruct uuid bitstream_id;\n+\tu64 bitstream_md;\n+\tsize_t pr_bandwidth;\n+\tu32 socket_id;\n+\tu32 fabric_version_id;\n+\tu32 cache_size;\n+\n+\tu32 capability;\n+};\n+\n+enum ifpga_port_state {\n+\tIFPGA_PORT_UNUSED = 0,\n+\tIFPGA_PORT_ATTACHED,\n+\tIFPGA_PORT_DETACHED,\n+};\n+\n+struct ifpga_port_hw {\n+\tenum ifpga_port_state state;\n+\n+\tstruct feature sub_feature[PORT_FEATURE_ID_MAX];\n+\tspinlock_t lock;\t/* protect access to hw */\n+\n+\tvoid *parent;\t\t/* pointer to ifpga_hw */\n+\n+\tint port_id;\t\t/* provied by HEADER feature */\n+\tstruct uuid afu_id;\t/* provied by User AFU feature */\n+\n+\tunsigned int disable_count;\n+\n+\tu32 capability;\n+\tu32 num_umsgs;\t/* The number of allocated umsgs */\n+\tu32 num_uafu_irqs;\t/* The number of uafu interrupts */\n+\tu8 *stp_addr;\n+\tu32 stp_size;\n+};\n+\n+#define AFU_MAX_REGION 1\n+\n+struct ifpga_afu_info {\n+\tstruct opae_reg_region region[AFU_MAX_REGION];\n+\tunsigned int num_regions;\n+\tunsigned int num_irqs;\n+};\n+\n+struct ifpga_hw {\n+\tstruct opae_adapter *adapter;\n+\tstruct opae_adapter_data_pci *pci_data;\n+\n+\tstruct ifpga_fme_hw fme;\n+\tstruct ifpga_port_hw port[MAX_FPGA_PORT_NUM];\n+};\n+\n+static inline bool is_ifpga_hw_pf(struct ifpga_hw *hw)\n+{\n+\treturn hw->fme.state != IFPGA_FME_UNUSED;\n+}\n+\n+static inline bool is_valid_port_id(struct ifpga_hw *hw, u32 port_id)\n+{\n+\tif (port_id >= MAX_FPGA_PORT_NUM ||\n+\t    hw->port[port_id].state != IFPGA_PORT_ATTACHED)\n+\t\treturn false;\n+\n+\treturn true;\n+}\n+#endif /* _IFPGA_HW_H_ */\ndiff --git a/drivers/raw/ifpga_rawdev/base/ifpga_port.c b/drivers/raw/ifpga_rawdev/base/ifpga_port.c\nnew file mode 100644\nindex 0000000..8ab1bdb\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/ifpga_port.c\n@@ -0,0 +1,408 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2018 Intel Corporation\n+ */\n+\n+#include \"ifpga_feature_dev.h\"\n+\n+int port_get_prop(struct ifpga_port_hw *port, struct feature_prop *prop)\n+{\n+\tstruct feature *feature;\n+\n+\tif (!port)\n+\t\treturn -ENOENT;\n+\n+\tfeature = get_port_feature_by_id(port, prop->feature_id);\n+\n+\tif (feature && feature->ops && feature->ops->get_prop)\n+\t\treturn feature->ops->get_prop(feature, prop);\n+\n+\treturn -ENOENT;\n+}\n+\n+int port_set_prop(struct ifpga_port_hw *port, struct feature_prop *prop)\n+{\n+\tstruct feature *feature;\n+\n+\tif (!port)\n+\t\treturn -ENOENT;\n+\n+\tfeature = get_port_feature_by_id(port, prop->feature_id);\n+\n+\tif (feature && feature->ops && feature->ops->set_prop)\n+\t\treturn feature->ops->set_prop(feature, prop);\n+\n+\treturn -ENOENT;\n+}\n+\n+int port_set_irq(struct ifpga_port_hw *port, u32 feature_id, void *irq_set)\n+{\n+\tstruct feature *feature;\n+\n+\tif (!port)\n+\t\treturn -ENOENT;\n+\n+\tfeature = get_port_feature_by_id(port, feature_id);\n+\n+\tif (feature && feature->ops && feature->ops->set_irq)\n+\t\treturn feature->ops->set_irq(feature, irq_set);\n+\n+\treturn -ENOENT;\n+}\n+\n+static int port_get_revision(struct ifpga_port_hw *port, u64 *revision)\n+{\n+\tstruct feature_port_header *port_hdr\n+\t\t= get_port_feature_ioaddr_by_index(port,\n+\t\t\t\t\t\t   PORT_FEATURE_ID_HEADER);\n+\tstruct feature_header header;\n+\n+\theader.csr = readq(&port_hdr->header);\n+\n+\t*revision = header.revision;\n+\n+\treturn 0;\n+}\n+\n+static int port_get_portidx(struct ifpga_port_hw *port, u64 *idx)\n+{\n+\tstruct feature_port_header *port_hdr;\n+\tstruct feature_port_capability capability;\n+\n+\tport_hdr = get_port_feature_ioaddr_by_index(port,\n+\t\t\t\t\t\t    PORT_FEATURE_ID_HEADER);\n+\n+\tcapability.csr = readq(&port_hdr->capability);\n+\t*idx = capability.port_number;\n+\n+\treturn 0;\n+}\n+\n+static int port_get_latency_tolerance(struct ifpga_port_hw *port, u64 *val)\n+{\n+\tstruct feature_port_header *port_hdr;\n+\tstruct feature_port_control control;\n+\n+\tport_hdr = get_port_feature_ioaddr_by_index(port,\n+\t\t\t\t\t\t    PORT_FEATURE_ID_HEADER);\n+\n+\tcontrol.csr = readq(&port_hdr->control);\n+\t*val = control.latency_tolerance;\n+\n+\treturn 0;\n+}\n+\n+static int port_get_ap1_event(struct ifpga_port_hw *port, u64 *val)\n+{\n+\tstruct feature_port_header *port_hdr;\n+\tstruct feature_port_status status;\n+\n+\tport_hdr = get_port_feature_ioaddr_by_index(port,\n+\t\t\t\t\t\t    PORT_FEATURE_ID_HEADER);\n+\n+\tspinlock_lock(&port->lock);\n+\tstatus.csr = readq(&port_hdr->status);\n+\tspinlock_unlock(&port->lock);\n+\n+\t*val = status.ap1_event;\n+\n+\treturn 0;\n+}\n+\n+static int port_set_ap1_event(struct ifpga_port_hw *port, u64 val)\n+{\n+\tstruct feature_port_header *port_hdr;\n+\tstruct feature_port_status status;\n+\n+\tport_hdr = get_port_feature_ioaddr_by_index(port,\n+\t\t\t\t\t\t    PORT_FEATURE_ID_HEADER);\n+\n+\tspinlock_lock(&port->lock);\n+\tstatus.csr = readq(&port_hdr->status);\n+\tstatus.ap1_event = val;\n+\twriteq(status.csr, &port_hdr->status);\n+\tspinlock_unlock(&port->lock);\n+\n+\treturn 0;\n+}\n+\n+static int port_get_ap2_event(struct ifpga_port_hw *port, u64 *val)\n+{\n+\tstruct feature_port_header *port_hdr;\n+\tstruct feature_port_status status;\n+\n+\tport_hdr = get_port_feature_ioaddr_by_index(port,\n+\t\t\t\t\t\t    PORT_FEATURE_ID_HEADER);\n+\n+\tspinlock_lock(&port->lock);\n+\tstatus.csr = readq(&port_hdr->status);\n+\tspinlock_unlock(&port->lock);\n+\n+\t*val = status.ap2_event;\n+\n+\treturn 0;\n+}\n+\n+static int port_set_ap2_event(struct ifpga_port_hw *port, u64 val)\n+{\n+\tstruct feature_port_header *port_hdr;\n+\tstruct feature_port_status status;\n+\n+\tport_hdr = get_port_feature_ioaddr_by_index(port,\n+\t\t\t\t\t\t    PORT_FEATURE_ID_HEADER);\n+\n+\tspinlock_lock(&port->lock);\n+\tstatus.csr = readq(&port_hdr->status);\n+\tstatus.ap2_event = val;\n+\twriteq(status.csr, &port_hdr->status);\n+\tspinlock_unlock(&port->lock);\n+\n+\treturn 0;\n+}\n+\n+static int port_get_power_state(struct ifpga_port_hw *port, u64 *val)\n+{\n+\tstruct feature_port_header *port_hdr;\n+\tstruct feature_port_status status;\n+\n+\tport_hdr = get_port_feature_ioaddr_by_index(port,\n+\t\t\t\t\t\t    PORT_FEATURE_ID_HEADER);\n+\n+\tspinlock_lock(&port->lock);\n+\tstatus.csr = readq(&port_hdr->status);\n+\tspinlock_unlock(&port->lock);\n+\n+\t*val = status.power_state;\n+\n+\treturn 0;\n+}\n+\n+static int port_get_userclk_freqcmd(struct ifpga_port_hw *port, u64 *val)\n+{\n+\tstruct feature_port_header *port_hdr;\n+\n+\tport_hdr = get_port_feature_ioaddr_by_index(port,\n+\t\t\t\t\t\t    PORT_FEATURE_ID_HEADER);\n+\n+\tspinlock_lock(&port->lock);\n+\t*val = readq(&port_hdr->user_clk_freq_cmd0);\n+\tspinlock_unlock(&port->lock);\n+\n+\treturn 0;\n+}\n+\n+static int port_set_userclk_freqcmd(struct ifpga_port_hw *port, u64 val)\n+{\n+\tstruct feature_port_header *port_hdr;\n+\n+\tport_hdr = get_port_feature_ioaddr_by_index(port,\n+\t\t\t\t\t\t    PORT_FEATURE_ID_HEADER);\n+\n+\tspinlock_lock(&port->lock);\n+\twriteq(val, &port_hdr->user_clk_freq_cmd0);\n+\tspinlock_unlock(&port->lock);\n+\n+\treturn 0;\n+}\n+\n+static int port_get_userclk_freqcntrcmd(struct ifpga_port_hw *port, u64 *val)\n+{\n+\tstruct feature_port_header *port_hdr;\n+\n+\tport_hdr = get_port_feature_ioaddr_by_index(port,\n+\t\t\t\t\t\t    PORT_FEATURE_ID_HEADER);\n+\n+\tspinlock_lock(&port->lock);\n+\t*val = readq(&port_hdr->user_clk_freq_cmd1);\n+\tspinlock_unlock(&port->lock);\n+\n+\treturn 0;\n+}\n+\n+static int port_set_userclk_freqcntrcmd(struct ifpga_port_hw *port, u64 val)\n+{\n+\tstruct feature_port_header *port_hdr;\n+\n+\tport_hdr = get_port_feature_ioaddr_by_index(port,\n+\t\t\t\t\t\t    PORT_FEATURE_ID_HEADER);\n+\n+\tspinlock_lock(&port->lock);\n+\twriteq(val, &port_hdr->user_clk_freq_cmd1);\n+\tspinlock_unlock(&port->lock);\n+\n+\treturn 0;\n+}\n+\n+static int port_get_userclk_freqsts(struct ifpga_port_hw *port, u64 *val)\n+{\n+\tstruct feature_port_header *port_hdr;\n+\n+\tport_hdr = get_port_feature_ioaddr_by_index(port,\n+\t\t\t\t\t\t    PORT_FEATURE_ID_HEADER);\n+\n+\tspinlock_lock(&port->lock);\n+\t*val = readq(&port_hdr->user_clk_freq_sts0);\n+\tspinlock_unlock(&port->lock);\n+\n+\treturn 0;\n+}\n+\n+static int port_get_userclk_freqcntrsts(struct ifpga_port_hw *port, u64 *val)\n+{\n+\tstruct feature_port_header *port_hdr;\n+\n+\tport_hdr = get_port_feature_ioaddr_by_index(port,\n+\t\t\t\t\t\t    PORT_FEATURE_ID_HEADER);\n+\n+\tspinlock_lock(&port->lock);\n+\t*val = readq(&port_hdr->user_clk_freq_sts1);\n+\tspinlock_unlock(&port->lock);\n+\n+\treturn 0;\n+}\n+\n+static int port_hdr_init(struct feature *feature)\n+{\n+\tstruct ifpga_port_hw *port = feature->parent;\n+\n+\tdev_info(NULL, \"port hdr Init.\\n\");\n+\n+\tfpga_port_reset(port);\n+\n+\treturn 0;\n+}\n+\n+static void port_hdr_uinit(struct feature *feature)\n+{\n+\tUNUSED(feature);\n+\n+\tdev_info(NULL, \"port hdr uinit.\\n\");\n+}\n+\n+static int port_hdr_get_prop(struct feature *feature, struct feature_prop *prop)\n+{\n+\tstruct ifpga_port_hw *port = feature->parent;\n+\n+\tswitch (prop->prop_id) {\n+\tcase PORT_HDR_PROP_REVISION:\n+\t\treturn port_get_revision(port, &prop->data);\n+\tcase PORT_HDR_PROP_PORTIDX:\n+\t\treturn port_get_portidx(port, &prop->data);\n+\tcase PORT_HDR_PROP_LATENCY_TOLERANCE:\n+\t\treturn port_get_latency_tolerance(port, &prop->data);\n+\tcase PORT_HDR_PROP_AP1_EVENT:\n+\t\treturn port_get_ap1_event(port, &prop->data);\n+\tcase PORT_HDR_PROP_AP2_EVENT:\n+\t\treturn port_get_ap2_event(port, &prop->data);\n+\tcase PORT_HDR_PROP_POWER_STATE:\n+\t\treturn port_get_power_state(port, &prop->data);\n+\tcase PORT_HDR_PROP_USERCLK_FREQCMD:\n+\t\treturn port_get_userclk_freqcmd(port, &prop->data);\n+\tcase PORT_HDR_PROP_USERCLK_FREQCNTRCMD:\n+\t\treturn port_get_userclk_freqcntrcmd(port, &prop->data);\n+\tcase PORT_HDR_PROP_USERCLK_FREQSTS:\n+\t\treturn port_get_userclk_freqsts(port, &prop->data);\n+\tcase PORT_HDR_PROP_USERCLK_CNTRSTS:\n+\t\treturn port_get_userclk_freqcntrsts(port, &prop->data);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+static int port_hdr_set_prop(struct feature *feature, struct feature_prop *prop)\n+{\n+\tstruct ifpga_port_hw *port = feature->parent;\n+\n+\tswitch (prop->prop_id) {\n+\tcase PORT_HDR_PROP_AP1_EVENT:\n+\t\treturn port_set_ap1_event(port, prop->data);\n+\tcase PORT_HDR_PROP_AP2_EVENT:\n+\t\treturn port_set_ap2_event(port, prop->data);\n+\tcase PORT_HDR_PROP_USERCLK_FREQCMD:\n+\t\treturn port_set_userclk_freqcmd(port, prop->data);\n+\tcase PORT_HDR_PROP_USERCLK_FREQCNTRCMD:\n+\t\treturn port_set_userclk_freqcntrcmd(port, prop->data);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+struct feature_ops port_hdr_ops = {\n+\t.init = port_hdr_init,\n+\t.uinit = port_hdr_uinit,\n+\t.get_prop = port_hdr_get_prop,\n+\t.set_prop = port_hdr_set_prop,\n+};\n+\n+static int port_stp_init(struct feature *feature)\n+{\n+\tstruct ifpga_port_hw *port = feature->parent;\n+\n+\tdev_info(NULL, \"port stp Init.\\n\");\n+\n+\tspinlock_lock(&port->lock);\n+\tport->stp_addr = feature->addr;\n+\tport->stp_size = feature->size;\n+\tspinlock_unlock(&port->lock);\n+\n+\treturn 0;\n+}\n+\n+static void port_stp_uinit(struct feature *feature)\n+{\n+\tUNUSED(feature);\n+\n+\tdev_info(NULL, \"port stp uinit.\\n\");\n+}\n+\n+struct feature_ops port_stp_ops = {\n+\t.init = port_stp_init,\n+\t.uinit = port_stp_uinit,\n+};\n+\n+static int port_uint_init(struct feature *feature)\n+{\n+\tstruct ifpga_port_hw *port = feature->parent;\n+\n+\tdev_info(NULL, \"PORT UINT Init.\\n\");\n+\n+\tspinlock_lock(&port->lock);\n+\tif (feature->ctx_num) {\n+\t\tport->capability |= FPGA_PORT_CAP_UAFU_IRQ;\n+\t\tport->num_uafu_irqs = feature->ctx_num;\n+\t}\n+\tspinlock_unlock(&port->lock);\n+\n+\treturn 0;\n+}\n+\n+static void port_uint_uinit(struct feature *feature)\n+{\n+\tUNUSED(feature);\n+\n+\tdev_info(NULL, \"PORT UINT UInit.\\n\");\n+}\n+\n+static int port_uint_set_irq(struct feature *feature, void *irq_set)\n+{\n+\tstruct fpga_uafu_irq_set *uafu_irq_set = irq_set;\n+\tstruct ifpga_port_hw *port = feature->parent;\n+\tint ret;\n+\n+\tspinlock_lock(&port->lock);\n+\tif (!(port->capability & FPGA_PORT_CAP_UAFU_IRQ)) {\n+\t\tspinlock_unlock(&port->lock);\n+\t\treturn -ENODEV;\n+\t}\n+\n+\tret = fpga_msix_set_block(feature, uafu_irq_set->start,\n+\t\t\t\t  uafu_irq_set->count, uafu_irq_set->evtfds);\n+\tspinlock_unlock(&port->lock);\n+\n+\treturn ret;\n+}\n+\n+struct feature_ops port_uint_ops = {\n+\t.init = port_uint_init,\n+\t.uinit = port_uint_uinit,\n+\t.set_irq = port_uint_set_irq,\n+};\ndiff --git a/drivers/raw/ifpga_rawdev/base/ifpga_port_error.c b/drivers/raw/ifpga_rawdev/base/ifpga_port_error.c\nnew file mode 100644\nindex 0000000..3be0f5e\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/ifpga_port_error.c\n@@ -0,0 +1,165 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2018 Intel Corporation\n+ */\n+\n+#include \"ifpga_feature_dev.h\"\n+\n+static int port_err_get_revision(struct ifpga_port_hw *port, u64 *val)\n+{\n+\tstruct feature_port_error *port_err;\n+\tstruct feature_header header;\n+\n+\tport_err = get_port_feature_ioaddr_by_index(port,\n+\t\t\t\t\t\t    PORT_FEATURE_ID_ERROR);\n+\theader.csr = readq(&port_err->header);\n+\t*val = header.revision;\n+\n+\treturn 0;\n+}\n+\n+static int port_err_get_errors(struct ifpga_port_hw *port, u64 *val)\n+{\n+\tstruct feature_port_error *port_err;\n+\tstruct feature_port_err_key error;\n+\n+\tport_err = get_port_feature_ioaddr_by_index(port,\n+\t\t\t\t\t\t    PORT_FEATURE_ID_ERROR);\n+\terror.csr = readq(&port_err->port_error);\n+\t*val = error.csr;\n+\n+\treturn 0;\n+}\n+\n+static int port_err_get_first_error(struct ifpga_port_hw *port, u64 *val)\n+{\n+\tstruct feature_port_error *port_err;\n+\tstruct feature_port_first_err_key first_error;\n+\n+\tport_err = get_port_feature_ioaddr_by_index(port,\n+\t\t\t\t\t\t    PORT_FEATURE_ID_ERROR);\n+\tfirst_error.csr = readq(&port_err->port_first_error);\n+\t*val = first_error.csr;\n+\n+\treturn 0;\n+}\n+\n+static int port_err_get_first_malformed_req_lsb(struct ifpga_port_hw *port,\n+\t\t\t\t\t\tu64 *val)\n+{\n+\tstruct feature_port_error *port_err;\n+\tstruct feature_port_malformed_req0 malreq0;\n+\n+\tport_err = get_port_feature_ioaddr_by_index(port,\n+\t\t\t\t\t\t    PORT_FEATURE_ID_ERROR);\n+\n+\tmalreq0.header_lsb = readq(&port_err->malreq0);\n+\t*val = malreq0.header_lsb;\n+\n+\treturn 0;\n+}\n+\n+static int port_err_get_first_malformed_req_msb(struct ifpga_port_hw *port,\n+\t\t\t\t\t\tu64 *val)\n+{\n+\tstruct feature_port_error *port_err;\n+\tstruct feature_port_malformed_req1 malreq1;\n+\n+\tport_err = get_port_feature_ioaddr_by_index(port,\n+\t\t\t\t\t\t    PORT_FEATURE_ID_ERROR);\n+\n+\tmalreq1.header_msb = readq(&port_err->malreq1);\n+\t*val = malreq1.header_msb;\n+\n+\treturn 0;\n+}\n+\n+static int port_err_set_clear(struct ifpga_port_hw *port, u64 val)\n+{\n+\tint ret;\n+\n+\tspinlock_lock(&port->lock);\n+\tret = port_err_clear(port, val);\n+\tspinlock_unlock(&port->lock);\n+\n+\treturn ret;\n+}\n+\n+static int port_error_init(struct feature *feature)\n+{\n+\tstruct ifpga_port_hw *port = feature->parent;\n+\n+\tdev_info(NULL, \"port error Init.\\n\");\n+\n+\tspinlock_lock(&port->lock);\n+\tport_err_mask(port, false);\n+\tif (feature->ctx_num)\n+\t\tport->capability |= FPGA_PORT_CAP_ERR_IRQ;\n+\tspinlock_unlock(&port->lock);\n+\n+\treturn 0;\n+}\n+\n+static void port_error_uinit(struct feature *feature)\n+{\n+\tUNUSED(feature);\n+}\n+\n+static int port_error_get_prop(struct feature *feature,\n+\t\t\t       struct feature_prop *prop)\n+{\n+\tstruct ifpga_port_hw *port = feature->parent;\n+\n+\tswitch (prop->prop_id) {\n+\tcase PORT_ERR_PROP_REVISION:\n+\t\treturn port_err_get_revision(port, &prop->data);\n+\tcase PORT_ERR_PROP_ERRORS:\n+\t\treturn port_err_get_errors(port, &prop->data);\n+\tcase PORT_ERR_PROP_FIRST_ERROR:\n+\t\treturn port_err_get_first_error(port, &prop->data);\n+\tcase PORT_ERR_PROP_FIRST_MALFORMED_REQ_LSB:\n+\t\treturn port_err_get_first_malformed_req_lsb(port, &prop->data);\n+\tcase PORT_ERR_PROP_FIRST_MALFORMED_REQ_MSB:\n+\t\treturn port_err_get_first_malformed_req_msb(port, &prop->data);\n+\t}\n+\n+\treturn -ENOENT;\n+}\n+\n+static int port_error_set_prop(struct feature *feature,\n+\t\t\t       struct feature_prop *prop)\n+{\n+\tstruct ifpga_port_hw *port = feature->parent;\n+\n+\tif (prop->prop_id == PORT_ERR_PROP_CLEAR)\n+\t\treturn port_err_set_clear(port, prop->data);\n+\n+\treturn -ENOENT;\n+}\n+\n+static int port_error_set_irq(struct feature *feature, void *irq_set)\n+{\n+\tstruct fpga_port_err_irq_set *err_irq_set = irq_set;\n+\tstruct ifpga_port_hw *port;\n+\tint ret;\n+\n+\tport = feature->parent;\n+\n+\tspinlock_lock(&port->lock);\n+\tif (!(port->capability & FPGA_PORT_CAP_ERR_IRQ)) {\n+\t\tspinlock_unlock(&port->lock);\n+\t\treturn -ENODEV;\n+\t}\n+\n+\tret = fpga_msix_set_block(feature, 0, 1, &err_irq_set->evtfd);\n+\tspinlock_unlock(&port->lock);\n+\n+\treturn ret;\n+}\n+\n+struct feature_ops port_error_ops = {\n+\t.init = port_error_init,\n+\t.uinit = port_error_uinit,\n+\t.get_prop = port_error_get_prop,\n+\t.set_prop = port_error_set_prop,\n+\t.set_irq = port_error_set_irq,\n+};\ndiff --git a/drivers/raw/ifpga_rawdev/base/meson.build b/drivers/raw/ifpga_rawdev/base/meson.build\nnew file mode 100644\nindex 0000000..cb65535\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/meson.build\n@@ -0,0 +1,34 @@\n+# SPDX-License-Identifier: BSD-3-Clause\n+# Copyright(c) 2018 Intel Corporation\n+\n+sources = [\n+\t'ifpga_api.c',\n+\t'ifpga_enumerate.c',\n+\t'ifpga_feature_dev.c',\n+\t'ifpga_fme.c',\n+\t'ifpga_fme_iperf.c',\n+\t'ifpga_fme_dperf.c',\n+\t'ifpga_fme_error.c',\n+\t'ifpga_port.c',\n+\t'ifpga_port_error.c',\n+\t'ifpga_fme_pr.c',\n+\t'opae_hw_api.c',\n+\t'opae_ifpga_hw_api.c',\n+\t'opae_debug.c'\n+]\n+\n+error_cflags = ['-Wno-sign-compare', '-Wno-unused-value',\n+\t\t'-Wno-format', '-Wno-unused-but-set-variable',\n+\t\t'-Wno-strict-aliasing'\n+]\n+c_args = cflags\n+foreach flag: error_cflags\n+\tif cc.has_argument(flag)\n+\t\tc_args += flag\n+\tendif\n+endforeach\n+\n+base_lib = static_library('ifpga_rawdev_base', sources,\n+\tdependencies: static_rte_eal,\n+\tc_args: c_args)\n+base_objs = base_lib.extract_all_objects()\ndiff --git a/drivers/raw/ifpga_rawdev/base/opae_debug.c b/drivers/raw/ifpga_rawdev/base/opae_debug.c\nnew file mode 100644\nindex 0000000..024d7d2\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/opae_debug.c\n@@ -0,0 +1,99 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2018 Intel Corporation\n+ */\n+\n+#define OPAE_HW_DEBUG\n+\n+#include \"opae_hw_api.h\"\n+#include \"opae_debug.h\"\n+\n+void opae_manager_dump(struct opae_manager *mgr)\n+{\n+\topae_log(\"=====%s=====\\n\", __func__);\n+\topae_log(\"OPAE Manger %s\\n\", mgr->name);\n+\topae_log(\"OPAE Manger OPs = %p\\n\", mgr->ops);\n+\topae_log(\"OPAE Manager Private Data = %p\\n\", mgr->data);\n+\topae_log(\"OPAE Adapter(parent) = %p\\n\", mgr->adapter);\n+\topae_log(\"==========================\\n\");\n+}\n+\n+void opae_bridge_dump(struct opae_bridge *br)\n+{\n+\topae_log(\"=====%s=====\\n\", __func__);\n+\topae_log(\"OPAE Bridge %s\\n\", br->name);\n+\topae_log(\"OPAE Bridge ID = %d\\n\", br->id);\n+\topae_log(\"OPAE Bridge OPs = %p\\n\", br->ops);\n+\topae_log(\"OPAE Bridge Private Data = %p\\n\", br->data);\n+\topae_log(\"OPAE Accelerator(under this bridge) = %p\\n\", br->acc);\n+\topae_log(\"==========================\\n\");\n+}\n+\n+void opae_accelerator_dump(struct opae_accelerator *acc)\n+{\n+\topae_log(\"=====%s=====\\n\", __func__);\n+\topae_log(\"OPAE Accelerator %s\\n\", acc->name);\n+\topae_log(\"OPAE Accelerator Index = %d\\n\", acc->index);\n+\topae_log(\"OPAE Accelerator OPs = %p\\n\", acc->ops);\n+\topae_log(\"OPAE Accelerator Private Data = %p\\n\", acc->data);\n+\topae_log(\"OPAE Bridge (upstream) = %p\\n\", acc->br);\n+\topae_log(\"OPAE Manager (upstream) = %p\\n\", acc->mgr);\n+\topae_log(\"==========================\\n\");\n+\n+\tif (acc->br)\n+\t\topae_bridge_dump(acc->br);\n+}\n+\n+static void opae_adapter_data_dump(void *data)\n+{\n+\tstruct opae_adapter_data *d = data;\n+\tstruct opae_adapter_data_pci *d_pci;\n+\tstruct opae_reg_region *r;\n+\tint i;\n+\n+\topae_log(\"=====%s=====\\n\", __func__);\n+\n+\tswitch (d->type) {\n+\tcase OPAE_FPGA_PCI:\n+\t\td_pci = (struct opae_adapter_data_pci *)d;\n+\n+\t\topae_log(\"OPAE Adapter Type = PCI\\n\");\n+\t\topae_log(\"PCI Device ID: 0x%04x\\n\", d_pci->device_id);\n+\t\topae_log(\"PCI Vendor ID: 0x%04x\\n\", d_pci->vendor_id);\n+\n+\t\tfor (i = 0; i < PCI_MAX_RESOURCE; i++) {\n+\t\t\tr = &d_pci->region[i];\n+\t\t\topae_log(\"PCI Bar %d: phy(%llx) len(%llx) addr(%p)\\n\",\n+\t\t\t\t i, (unsigned long long)r->phys_addr,\n+\t\t\t\t (unsigned long long)r->len, r->addr);\n+\t\t}\n+\t\tbreak;\n+\tcase OPAE_FPGA_NET:\n+\t\tbreak;\n+\t}\n+\n+\topae_log(\"==========================\\n\");\n+}\n+\n+void opae_adapter_dump(struct opae_adapter *adapter, int verbose)\n+{\n+\tstruct opae_accelerator *acc;\n+\n+\topae_log(\"=====%s=====\\n\", __func__);\n+\topae_log(\"OPAE Adapter %s\\n\", adapter->name);\n+\topae_log(\"OPAE Adapter OPs = %p\\n\", adapter->ops);\n+\topae_log(\"OPAE Adapter Private Data = %p\\n\", adapter->data);\n+\topae_log(\"OPAE Manager (downstream) = %p\\n\", adapter->mgr);\n+\n+\tif (verbose) {\n+\t\tif (adapter->mgr)\n+\t\t\topae_manager_dump(adapter->mgr);\n+\n+\t\topae_adapter_for_each_acc(adapter, acc)\n+\t\t\topae_accelerator_dump(acc);\n+\n+\t\tif (adapter->data)\n+\t\t\topae_adapter_data_dump(adapter->data);\n+\t}\n+\n+\topae_log(\"==========================\\n\");\n+}\ndiff --git a/drivers/raw/ifpga_rawdev/base/opae_debug.h b/drivers/raw/ifpga_rawdev/base/opae_debug.h\nnew file mode 100644\nindex 0000000..a03dff9\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/opae_debug.h\n@@ -0,0 +1,19 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2018 Intel Corporation\n+ */\n+\n+#ifndef _OPAE_DEBUG_H_\n+#define _OPAE_DEBUG_H_\n+\n+#ifdef OPAE_HW_DEBUG\n+#define opae_log(fmt, args...) printf(fmt, ## args)\n+#else\n+#define opae_log(fme, args...) do {} while (0)\n+#endif\n+\n+void opae_manager_dump(struct opae_manager *mgr);\n+void opae_bridge_dump(struct opae_bridge *br);\n+void opae_accelerator_dump(struct opae_accelerator *acc);\n+void opae_adapter_dump(struct opae_adapter *adapter, int verbose);\n+\n+#endif /* _OPAE_DEBUG_H_ */\ndiff --git a/drivers/raw/ifpga_rawdev/base/opae_hw_api.c b/drivers/raw/ifpga_rawdev/base/opae_hw_api.c\nnew file mode 100644\nindex 0000000..a533dfe\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/opae_hw_api.c\n@@ -0,0 +1,381 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2018 Intel Corporation\n+ */\n+\n+#include \"opae_hw_api.h\"\n+#include \"opae_debug.h\"\n+#include \"ifpga_api.h\"\n+\n+/* OPAE Bridge Functions */\n+\n+/**\n+ * opae_bridge_alloc - alloc opae_bridge data structure\n+ * @name: bridge name.\n+ * @ops: ops of this bridge.\n+ * @data: private data of this bridge.\n+ *\n+ * Return opae_bridge on success, otherwise NULL.\n+ */\n+struct opae_bridge *\n+opae_bridge_alloc(const char *name, struct opae_bridge_ops *ops, void *data)\n+{\n+\tstruct opae_bridge *br = opae_zmalloc(sizeof(*br));\n+\n+\tif (!br)\n+\t\treturn NULL;\n+\n+\tbr->name = name;\n+\tbr->ops = ops;\n+\tbr->data = data;\n+\n+\topae_log(\"%s %p\\n\", __func__, br);\n+\n+\treturn br;\n+}\n+\n+/**\n+ * opae_bridge_reset -  reset opae_bridge\n+ * @br: bridge to be reset.\n+ *\n+ * Return: 0 on success, otherwise error code.\n+ */\n+int opae_bridge_reset(struct opae_bridge *br)\n+{\n+\tif (!br)\n+\t\treturn -EINVAL;\n+\n+\tif (br->ops && br->ops->reset)\n+\t\treturn br->ops->reset(br);\n+\n+\topae_log(\"%s no ops\\n\", __func__);\n+\n+\treturn -ENOENT;\n+}\n+\n+/* Accelerator Functions */\n+\n+/**\n+ * opae_accelerator_alloc - alloc opae_accelerator data structure\n+ * @name: accelerator name.\n+ * @ops: ops of this accelerator.\n+ * @data: private data of this accelerator.\n+ *\n+ * Return: opae_accelerator on success, otherwise NULL.\n+ */\n+struct opae_accelerator *\n+opae_accelerator_alloc(const char *name, struct opae_accelerator_ops *ops,\n+\t\t       void *data)\n+{\n+\tstruct opae_accelerator *acc = opae_zmalloc(sizeof(*acc));\n+\n+\tif (!acc)\n+\t\treturn NULL;\n+\n+\tacc->name = name;\n+\tacc->ops = ops;\n+\tacc->data = data;\n+\n+\topae_log(\"%s %p\\n\", __func__, acc);\n+\n+\treturn acc;\n+}\n+\n+/**\n+ * opae_acc_reg_read - read accelerator's register from its reg region.\n+ * @acc: accelerator to read.\n+ * @region_idx: reg region index.\n+ * @offset: reg offset.\n+ * @byte: read operation width, e.g 4 byte = 32bit read.\n+ * @data: data to store the value read from the register.\n+ *\n+ * Return: 0 on success, otherwise error code.\n+ */\n+int opae_acc_reg_read(struct opae_accelerator *acc, unsigned int region_idx,\n+\t\t      u64 offset, unsigned int byte, void *data)\n+{\n+\tif (!acc || !data)\n+\t\treturn -EINVAL;\n+\n+\tif (acc->ops && acc->ops->read)\n+\t\treturn acc->ops->read(acc, region_idx, offset, byte, data);\n+\n+\treturn -ENOENT;\n+}\n+\n+/**\n+ * opae_acc_reg_write - write to accelerator's register from its reg region.\n+ * @acc: accelerator to write.\n+ * @region_idx: reg region index.\n+ * @offset: reg offset.\n+ * @byte: write operation width, e.g 4 byte = 32bit write.\n+ * @data: data stored the value to write to the register.\n+ *\n+ * Return: 0 on success, otherwise error code.\n+ */\n+int opae_acc_reg_write(struct opae_accelerator *acc, unsigned int region_idx,\n+\t\t       u64 offset, unsigned int byte, void *data)\n+{\n+\tif (!acc || !data)\n+\t\treturn -EINVAL;\n+\n+\tif (acc->ops && acc->ops->write)\n+\t\treturn acc->ops->write(acc, region_idx, offset, byte, data);\n+\n+\treturn -ENOENT;\n+}\n+\n+/**\n+ * opae_acc_get_info - get information of an accelerator.\n+ * @acc: targeted accelerator\n+ * @info: accelerator info data structure to be filled.\n+ *\n+ * Return: 0 on success, otherwise error code.\n+ */\n+int opae_acc_get_info(struct opae_accelerator *acc, struct opae_acc_info *info)\n+{\n+\tif (!acc || !info)\n+\t\treturn -EINVAL;\n+\n+\tif (acc->ops && acc->ops->get_info)\n+\t\treturn acc->ops->get_info(acc, info);\n+\n+\treturn -ENOENT;\n+}\n+\n+/**\n+ * opae_acc_get_region_info - get information of an accelerator register region.\n+ * @acc: targeted accelerator\n+ * @info: accelerator region info data structure to be filled.\n+ *\n+ * Return: 0 on success, otherwise error code.\n+ */\n+int opae_acc_get_region_info(struct opae_accelerator *acc,\n+\t\t\t     struct opae_acc_region_info *info)\n+{\n+\tif (!acc || !info)\n+\t\treturn -EINVAL;\n+\n+\tif (acc->ops && acc->ops->get_region_info)\n+\t\treturn acc->ops->get_region_info(acc, info);\n+\n+\treturn -ENOENT;\n+}\n+\n+/**\n+ * opae_acc_set_irq -  set an accelerator's irq.\n+ * @acc: targeted accelerator\n+ * @start: start vector number\n+ * @count: count of vectors to be set from the start vector\n+ * @evtfds: event fds to be notified when corresponding irqs happens\n+ *\n+ * Return: 0 on success, otherwise error code.\n+ */\n+int opae_acc_set_irq(struct opae_accelerator *acc,\n+\t\t     u32 start, u32 count, s32 evtfds[])\n+{\n+\tif (!acc || !acc->data)\n+\t\treturn -EINVAL;\n+\n+\tif (start + count <= start)\n+\t\treturn -EINVAL;\n+\n+\tif (acc->ops && acc->ops->set_irq)\n+\t\treturn acc->ops->set_irq(acc, start, count, evtfds);\n+\n+\treturn -ENOENT;\n+}\n+\n+/**\n+ * opae_acc_get_uuid -  get accelerator's UUID.\n+ * @acc: targeted accelerator\n+ * @uuid: a pointer to UUID\n+ *\n+ * Return: 0 on success, otherwise error code.\n+ */\n+int opae_acc_get_uuid(struct opae_accelerator *acc,\n+\t\t      struct uuid *uuid)\n+{\n+\tif (!acc || !uuid)\n+\t\treturn -EINVAL;\n+\n+\tif (acc->ops && acc->ops->get_uuid)\n+\t\treturn acc->ops->get_uuid(acc, uuid);\n+\n+\treturn -ENOENT;\n+}\n+\n+/* Manager Functions */\n+\n+/**\n+ * opae_manager_alloc - alloc opae_manager data structure\n+ * @name: manager name.\n+ * @ops: ops of this manager.\n+ * @data: private data of this manager.\n+ *\n+ * Return: opae_manager on success, otherwise NULL.\n+ */\n+struct opae_manager *\n+opae_manager_alloc(const char *name, struct opae_manager_ops *ops, void *data)\n+{\n+\tstruct opae_manager *mgr = opae_zmalloc(sizeof(*mgr));\n+\n+\tif (!mgr)\n+\t\treturn NULL;\n+\n+\tmgr->name = name;\n+\tmgr->ops = ops;\n+\tmgr->data = data;\n+\n+\topae_log(\"%s %p\\n\", __func__, mgr);\n+\n+\treturn mgr;\n+}\n+\n+/**\n+ * opae_manager_flash - flash a reconfiguration image via opae_manager\n+ * @mgr: opae_manager for flash.\n+ * @id: id of target region (accelerator).\n+ * @buf: image data buffer.\n+ * @size: buffer size.\n+ * @status: status to store flash result.\n+ *\n+ * Return: 0 on success, otherwise error code.\n+ */\n+int opae_manager_flash(struct opae_manager *mgr, int id, void *buf, u32 size,\n+\t\t       u64 *status)\n+{\n+\tif (!mgr)\n+\t\treturn -EINVAL;\n+\n+\tif (mgr && mgr->ops && mgr->ops->flash)\n+\t\treturn mgr->ops->flash(mgr, id, buf, size, status);\n+\n+\treturn -ENOENT;\n+}\n+\n+/* Adapter Functions */\n+\n+/**\n+ * opae_adapter_data_alloc - alloc opae_adapter_data data structure\n+ * @type: opae_adapter_type.\n+ *\n+ * Return: opae_adapter_data on success, otherwise NULL.\n+ */\n+void *opae_adapter_data_alloc(enum opae_adapter_type type)\n+{\n+\tstruct opae_adapter_data *data;\n+\tint size;\n+\n+\tswitch (type) {\n+\tcase OPAE_FPGA_PCI:\n+\t\tsize = sizeof(struct opae_adapter_data_pci);\n+\t\tbreak;\n+\tcase OPAE_FPGA_NET:\n+\t\tsize = sizeof(struct opae_adapter_data_net);\n+\t\tbreak;\n+\tdefault:\n+\t\tsize = sizeof(struct opae_adapter_data);\n+\t\tbreak;\n+\t}\n+\n+\tdata = opae_zmalloc(size);\n+\tif (!data)\n+\t\treturn NULL;\n+\n+\tdata->type = type;\n+\n+\treturn data;\n+}\n+\n+static struct opae_adapter_ops *match_ops(struct opae_adapter *adapter)\n+{\n+\tstruct opae_adapter_data *data;\n+\n+\tif (!adapter || !adapter->data)\n+\t\treturn NULL;\n+\n+\tdata = adapter->data;\n+\n+\tif (data->type == OPAE_FPGA_PCI)\n+\t\treturn &ifpga_adapter_ops;\n+\n+\treturn NULL;\n+}\n+\n+/**\n+ * opae_adapter_data_alloc - alloc opae_adapter_data data structure\n+ * @name: adapter name.\n+ * @data: private data of this adapter.\n+ *\n+ * Return: opae_adapter on success, otherwise NULL.\n+ */\n+struct opae_adapter *opae_adapter_alloc(const char *name, void *data)\n+{\n+\tstruct opae_adapter *adapter = opae_zmalloc(sizeof(*adapter));\n+\n+\tif (!adapter)\n+\t\treturn NULL;\n+\n+\tTAILQ_INIT(&adapter->acc_list);\n+\tadapter->data = data;\n+\tadapter->name = name;\n+\tadapter->ops = match_ops(adapter);\n+\n+\treturn adapter;\n+}\n+\n+/**\n+ * opae_adapter_enumerate - enumerate this adapter\n+ * @adapter: adapter to enumerate.\n+ *\n+ * Return: 0 on success, otherwise error code.\n+ */\n+int opae_adapter_enumerate(struct opae_adapter *adapter)\n+{\n+\tint ret = -ENOENT;\n+\n+\tif (!adapter)\n+\t\treturn -EINVAL;\n+\n+\tif (adapter->ops && adapter->ops->enumerate)\n+\t\tret = adapter->ops->enumerate(adapter);\n+\n+\tif (!ret)\n+\t\topae_adapter_dump(adapter, 1);\n+\n+\treturn ret;\n+}\n+\n+/**\n+ * opae_adapter_destroy - destroy this adapter\n+ * @adapter: adapter to destroy.\n+ *\n+ * destroy things allocated during adapter enumeration.\n+ */\n+void opae_adapter_destroy(struct opae_adapter *adapter)\n+{\n+\tif (adapter && adapter->ops && adapter->ops->destroy)\n+\t\tadapter->ops->destroy(adapter);\n+}\n+\n+/**\n+ * opae_adapter_get_acc - find and return accelerator with matched id\n+ * @adapter: adapter to find the accelerator.\n+ * @acc_id: id (index) of the accelerator.\n+ *\n+ * destroy things allocated during adapter enumeration.\n+ */\n+struct opae_accelerator *\n+opae_adapter_get_acc(struct opae_adapter *adapter, int acc_id)\n+{\n+\tstruct opae_accelerator *acc = NULL;\n+\n+\tif (!adapter)\n+\t\treturn NULL;\n+\n+\topae_adapter_for_each_acc(adapter, acc)\n+\t\tif (acc->index == acc_id)\n+\t\t\treturn acc;\n+\n+\treturn NULL;\n+}\ndiff --git a/drivers/raw/ifpga_rawdev/base/opae_hw_api.h b/drivers/raw/ifpga_rawdev/base/opae_hw_api.h\nnew file mode 100644\nindex 0000000..4bbc9df\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/opae_hw_api.h\n@@ -0,0 +1,253 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2018 Intel Corporation\n+ */\n+\n+#ifndef _OPAE_HW_API_H_\n+#define _OPAE_HW_API_H_\n+\n+#include <stdint.h>\n+#include <stdlib.h>\n+#include <stdio.h>\n+#include <sys/queue.h>\n+\n+#include \"opae_osdep.h\"\n+\n+#ifndef PCI_MAX_RESOURCE\n+#define PCI_MAX_RESOURCE 6\n+#endif\n+\n+struct opae_adapter;\n+\n+enum opae_adapter_type {\n+\tOPAE_FPGA_PCI,\n+\tOPAE_FPGA_NET,\n+};\n+\n+/* OPAE Manager Data Structure */\n+struct opae_manager_ops;\n+\n+/*\n+ * opae_manager has pointer to its parent adapter, as it could be able to manage\n+ * all components on this FPGA device (adapter). If not the case, don't set this\n+ * adapter, which limit opae_manager ops to manager itself.\n+ */\n+struct opae_manager {\n+\tconst char *name;\n+\tstruct opae_adapter *adapter;\n+\tstruct opae_manager_ops *ops;\n+\tvoid *data;\n+};\n+\n+/* FIXME: add more management ops, e.g power/thermal and etc */\n+struct opae_manager_ops {\n+\tint (*flash)(struct opae_manager *mgr, int id, void *buffer,\n+\t\t     u32 size, u64 *status);\n+};\n+\n+/* OPAE Manager APIs */\n+struct opae_manager *\n+opae_manager_alloc(const char *name, struct opae_manager_ops *ops, void *data);\n+#define opae_manager_free(mgr) opae_free(mgr)\n+int opae_manager_flash(struct opae_manager *mgr, int acc_id, void *buf,\n+\t\t       u32 size, u64 *status);\n+\n+/* OPAE Bridge Data Structure */\n+struct opae_bridge_ops;\n+\n+/*\n+ * opae_bridge only has pointer to its downstream accelerator.\n+ */\n+struct opae_bridge {\n+\tconst char *name;\n+\tint id;\n+\tstruct opae_accelerator *acc;\n+\tstruct opae_bridge_ops *ops;\n+\tvoid *data;\n+};\n+\n+struct opae_bridge_ops {\n+\tint (*reset)(struct opae_bridge *br);\n+};\n+\n+/* OPAE Bridge APIs */\n+struct opae_bridge *\n+opae_bridge_alloc(const char *name, struct opae_bridge_ops *ops, void *data);\n+int opae_bridge_reset(struct opae_bridge *br);\n+#define opae_bridge_free(br) opae_free(br)\n+\n+/* OPAE Acceleraotr Data Structure */\n+struct opae_accelerator_ops;\n+\n+/*\n+ * opae_accelerator has pointer to its upstream bridge(port).\n+ * In some cases, if we allow same user to do PR on its own accelerator, then\n+ * set the manager pointer during the enumeration. But in other cases, the PR\n+ * functions only could be done via manager in another module / thread / service\n+ * / application for better protection.\n+ */\n+struct opae_accelerator {\n+\tTAILQ_ENTRY(opae_accelerator) node;\n+\tconst char *name;\n+\tint index;\n+\tstruct opae_bridge *br;\n+\tstruct opae_manager *mgr;\n+\tstruct opae_accelerator_ops *ops;\n+\tvoid *data;\n+};\n+\n+struct opae_acc_info {\n+\tunsigned int num_regions;\n+\tunsigned int num_irqs;\n+};\n+\n+struct opae_acc_region_info {\n+\tu32 flags;\n+#define ACC_REGION_READ\t\t(1 << 0)\n+#define ACC_REGION_WRITE\t(1 << 1)\n+#define ACC_REGION_MMIO\t\t(1 << 2)\n+\tu32 index;\n+\tu64 phys_addr;\n+\tu64 len;\n+\tu8 *addr;\n+};\n+\n+struct opae_accelerator_ops {\n+\tint (*read)(struct opae_accelerator *acc, unsigned int region_idx,\n+\t\t    u64 offset, unsigned int byte, void *data);\n+\tint (*write)(struct opae_accelerator *acc, unsigned int region_idx,\n+\t\t     u64 offset, unsigned int byte, void *data);\n+\tint (*get_info)(struct opae_accelerator *acc,\n+\t\t\tstruct opae_acc_info *info);\n+\tint (*get_region_info)(struct opae_accelerator *acc,\n+\t\t\t       struct opae_acc_region_info *info);\n+\tint (*set_irq)(struct opae_accelerator *acc,\n+\t\t       u32 start, u32 count, s32 evtfds[]);\n+\tint (*get_uuid)(struct opae_accelerator *acc,\n+\t\t\tstruct uuid *uuid);\n+};\n+\n+/* OPAE accelerator APIs */\n+struct opae_accelerator *\n+opae_accelerator_alloc(const char *name, struct opae_accelerator_ops *ops,\n+\t\t       void *data);\n+#define opae_accelerator_free(acc) opae_free(acc)\n+int opae_acc_get_info(struct opae_accelerator *acc, struct opae_acc_info *info);\n+int opae_acc_get_region_info(struct opae_accelerator *acc,\n+\t\t\t     struct opae_acc_region_info *info);\n+int opae_acc_set_irq(struct opae_accelerator *acc,\n+\t\t     u32 start, u32 count, s32 evtfds[]);\n+int opae_acc_get_uuid(struct opae_accelerator *acc,\n+\t\t      struct uuid *uuid);\n+\n+static inline struct opae_bridge *\n+opae_acc_get_br(struct opae_accelerator *acc)\n+{\n+\treturn acc ? acc->br : NULL;\n+}\n+\n+static inline struct opae_manager *\n+opae_acc_get_mgr(struct opae_accelerator *acc)\n+{\n+\treturn acc ? acc->mgr : NULL;\n+}\n+\n+int opae_acc_reg_read(struct opae_accelerator *acc, unsigned int region_idx,\n+\t\t      u64 offset, unsigned int byte, void *data);\n+int opae_acc_reg_write(struct opae_accelerator *acc, unsigned int region_idx,\n+\t\t       u64 offset, unsigned int byte, void *data);\n+\n+#define opae_acc_reg_read64(acc, region, offset, data) \\\n+\topae_acc_reg_read(acc, region, offset, 8, data)\n+#define opae_acc_reg_write64(acc, region, offset, data) \\\n+\topae_acc_reg_write(acc, region, offset, 8, data)\n+#define opae_acc_reg_read32(acc, region, offset, data) \\\n+\topae_acc_reg_read(acc, region, offset, 4, data)\n+#define opae_acc_reg_write32(acc, region, offset, data) \\\n+\topae_acc_reg_write(acc, region, offset, 4, data)\n+#define opae_acc_reg_read16(acc, region, offset, data) \\\n+\topae_acc_reg_read(acc, region, offset, 2, data)\n+#define opae_acc_reg_write16(acc, region, offset, data) \\\n+\topae_acc_reg_write(acc, region, offset, 2, data)\n+#define opae_acc_reg_read8(acc, region, offset, data) \\\n+\topae_acc_reg_read(acc, region, offset, 1, data)\n+#define opae_acc_reg_write8(acc, region, offset, data) \\\n+\topae_acc_reg_write(acc, region, offset, 1, data)\n+\n+/*for data stream read/write*/\n+int opae_acc_data_read(struct opae_accelerator *acc, unsigned int flags,\n+\t\t       u64 offset, unsigned int byte, void *data);\n+int opae_acc_data_write(struct opae_accelerator *acc, unsigned int flags,\n+\t\t\tu64 offset, unsigned int byte, void *data);\n+\n+/* OPAE Adapter Data Structure */\n+struct opae_adapter_data {\n+\tenum opae_adapter_type type;\n+};\n+\n+struct opae_reg_region {\n+\tu64 phys_addr;\n+\tu64 len;\n+\tu8 *addr;\n+};\n+\n+struct opae_adapter_data_pci {\n+\tenum opae_adapter_type type;\n+\tu16 device_id;\n+\tu16 vendor_id;\n+\tstruct opae_reg_region region[PCI_MAX_RESOURCE];\n+\tint vfio_dev_fd;  /* VFIO device file descriptor */\n+};\n+\n+/* FIXME: OPAE_FPGA_NET type */\n+struct opae_adapter_data_net {\n+\tenum opae_adapter_type type;\n+};\n+\n+struct opae_adapter_ops {\n+\tint (*enumerate)(struct opae_adapter *adapter);\n+\tvoid (*destroy)(struct opae_adapter *adapter);\n+};\n+\n+TAILQ_HEAD(opae_accelerator_list, opae_accelerator);\n+\n+#define opae_adapter_for_each_acc(adatper, acc) \\\n+\tTAILQ_FOREACH(acc, &adapter->acc_list, node)\n+\n+struct opae_adapter {\n+\tconst char *name;\n+\tstruct opae_manager *mgr;\n+\tstruct opae_accelerator_list acc_list;\n+\tstruct opae_adapter_ops *ops;\n+\tvoid *data;\n+};\n+\n+/* OPAE Adapter APIs */\n+void *opae_adapter_data_alloc(enum opae_adapter_type type);\n+#define opae_adapter_data_free(data) opae_free(data)\n+\n+struct opae_adapter *opae_adapter_alloc(const char *name, void *data);\n+#define opae_adapter_free(adapter) opae_free(adapter)\n+\n+int opae_adapter_enumerate(struct opae_adapter *adapter);\n+void opae_adapter_destroy(struct opae_adapter *adapter);\n+static inline struct opae_manager *\n+opae_adapter_get_mgr(struct opae_adapter *adapter)\n+{\n+\treturn adapter ? adapter->mgr : NULL;\n+}\n+\n+struct opae_accelerator *\n+opae_adapter_get_acc(struct opae_adapter *adapter, int acc_id);\n+\n+static inline void opae_adapter_add_acc(struct opae_adapter *adapter,\n+\t\t\t\t\tstruct opae_accelerator *acc)\n+{\n+\tTAILQ_INSERT_TAIL(&adapter->acc_list, acc, node);\n+}\n+\n+static inline void opae_adapter_remove_acc(struct opae_adapter *adapter,\n+\t\t\t\t\t   struct opae_accelerator *acc)\n+{\n+\tTAILQ_REMOVE(&adapter->acc_list, acc, node);\n+}\n+#endif /* _OPAE_HW_API_H_*/\ndiff --git a/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c b/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c\nnew file mode 100644\nindex 0000000..a40c8da\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c\n@@ -0,0 +1,145 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2018 Intel Corporation\n+ */\n+\n+#include \"opae_ifpga_hw_api.h\"\n+#include \"ifpga_api.h\"\n+\n+int opae_manager_ifpga_get_prop(struct opae_manager *mgr,\n+\t\t\t\tstruct feature_prop *prop)\n+{\n+\tstruct ifpga_fme_hw *fme;\n+\n+\tif (!mgr || !mgr->data)\n+\t\treturn -EINVAL;\n+\n+\tfme = mgr->data;\n+\n+\treturn ifpga_get_prop(fme->parent, FEATURE_FIU_ID_FME, 0, prop);\n+}\n+\n+int opae_manager_ifpga_set_prop(struct opae_manager *mgr,\n+\t\t\t\tstruct feature_prop *prop)\n+{\n+\tstruct ifpga_fme_hw *fme;\n+\n+\tif (!mgr || !mgr->data)\n+\t\treturn -EINVAL;\n+\n+\tfme = mgr->data;\n+\n+\treturn ifpga_set_prop(fme->parent, FEATURE_FIU_ID_FME, 0, prop);\n+}\n+\n+int opae_manager_ifpga_get_info(struct opae_manager *mgr,\n+\t\t\t\tstruct fpga_fme_info *fme_info)\n+{\n+\tstruct ifpga_fme_hw *fme;\n+\n+\tif (!mgr || !mgr->data || !fme_info)\n+\t\treturn -EINVAL;\n+\n+\tfme = mgr->data;\n+\n+\tspinlock_lock(&fme->lock);\n+\tfme_info->capability = fme->capability;\n+\tspinlock_unlock(&fme->lock);\n+\n+\treturn 0;\n+}\n+\n+int opae_manager_ifpga_set_err_irq(struct opae_manager *mgr,\n+\t\t\t\t   struct fpga_fme_err_irq_set *err_irq_set)\n+{\n+\tstruct ifpga_fme_hw *fme;\n+\n+\tif (!mgr || !mgr->data)\n+\t\treturn -EINVAL;\n+\n+\tfme = mgr->data;\n+\n+\treturn ifpga_set_irq(fme->parent, FEATURE_FIU_ID_FME, 0,\n+\t\t\t     IFPGA_FME_FEATURE_ID_GLOBAL_ERR, err_irq_set);\n+}\n+\n+int opae_bridge_ifpga_get_prop(struct opae_bridge *br,\n+\t\t\t       struct feature_prop *prop)\n+{\n+\tstruct ifpga_port_hw *port;\n+\n+\tif (!br || !br->data)\n+\t\treturn -EINVAL;\n+\n+\tport = br->data;\n+\n+\treturn ifpga_get_prop(port->parent, FEATURE_FIU_ID_PORT,\n+\t\t\t      port->port_id, prop);\n+}\n+\n+int opae_bridge_ifpga_set_prop(struct opae_bridge *br,\n+\t\t\t       struct feature_prop *prop)\n+{\n+\tstruct ifpga_port_hw *port;\n+\n+\tif (!br || !br->data)\n+\t\treturn -EINVAL;\n+\n+\tport = br->data;\n+\n+\treturn ifpga_set_prop(port->parent, FEATURE_FIU_ID_PORT,\n+\t\t\t      port->port_id, prop);\n+}\n+\n+int opae_bridge_ifpga_get_info(struct opae_bridge *br,\n+\t\t\t       struct fpga_port_info *port_info)\n+{\n+\tstruct ifpga_port_hw *port;\n+\n+\tif (!br || !br->data || !port_info)\n+\t\treturn -EINVAL;\n+\n+\tport = br->data;\n+\n+\tspinlock_lock(&port->lock);\n+\tport_info->capability = port->capability;\n+\tport_info->num_uafu_irqs = port->num_uafu_irqs;\n+\tspinlock_unlock(&port->lock);\n+\n+\treturn 0;\n+}\n+\n+int opae_bridge_ifpga_get_region_info(struct opae_bridge *br,\n+\t\t\t\t      struct fpga_port_region_info *info)\n+{\n+\tstruct ifpga_port_hw *port;\n+\n+\tif (!br || !br->data || !info)\n+\t\treturn -EINVAL;\n+\n+\t/* Only support STP region now */\n+\tif (info->index != PORT_REGION_INDEX_STP)\n+\t\treturn -EINVAL;\n+\n+\tport = br->data;\n+\n+\tspinlock_lock(&port->lock);\n+\tinfo->addr = (u64)port->stp_addr;\n+\tinfo->size = port->stp_size;\n+\tspinlock_unlock(&port->lock);\n+\n+\treturn 0;\n+}\n+\n+int opae_bridge_ifpga_set_err_irq(struct opae_bridge *br,\n+\t\t\t\t  struct fpga_port_err_irq_set *err_irq_set)\n+{\n+\tstruct ifpga_port_hw *port;\n+\n+\tif (!br || !br->data)\n+\t\treturn -EINVAL;\n+\n+\tport = br->data;\n+\n+\treturn ifpga_set_irq(port->parent, FEATURE_FIU_ID_PORT, port->port_id,\n+\t\t\t     IFPGA_PORT_FEATURE_ID_ERROR, err_irq_set);\n+}\ndiff --git a/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h b/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h\nnew file mode 100644\nindex 0000000..65086cf\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h\n@@ -0,0 +1,279 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2018 Intel Corporation\n+ */\n+\n+#ifndef _OPAE_IFPGA_HW_API_H_\n+#define _OPAE_IFPGA_HW_API_H_\n+\n+#include \"opae_hw_api.h\"\n+\n+/**\n+ * struct feature_prop - data structure for feature property\n+ * @feature_id: id of this feature.\n+ * @prop_id: id of this property under this feature.\n+ * @data: property value to set/get.\n+ */\n+struct feature_prop {\n+\tu64 feature_id;\n+\tu64 prop_id;\n+\tu64 data;\n+};\n+\n+#define IFPGA_FIU_ID_FME\t0x0\n+#define IFPGA_FIU_ID_PORT\t0x1\n+\n+#define IFPGA_FME_FEATURE_ID_HEADER\t\t0x0\n+#define IFPGA_FME_FEATURE_ID_THERMAL_MGMT\t0x1\n+#define IFPGA_FME_FEATURE_ID_POWER_MGMT\t\t0x2\n+#define IFPGA_FME_FEATURE_ID_GLOBAL_IPERF\t0x3\n+#define IFPGA_FME_FEATURE_ID_GLOBAL_ERR\t\t0x4\n+#define IFPGA_FME_FEATURE_ID_PR_MGMT\t\t0x5\n+#define IFPGA_FME_FEATURE_ID_HSSI\t\t0x6\n+#define IFPGA_FME_FEATURE_ID_GLOBAL_DPERF\t0x7\n+\n+#define IFPGA_PORT_FEATURE_ID_HEADER\t\t0x0\n+#define IFPGA_PORT_FEATURE_ID_AFU\t\t0xff\n+#define IFPGA_PORT_FEATURE_ID_ERROR\t\t0x10\n+#define IFPGA_PORT_FEATURE_ID_UMSG\t\t0x11\n+#define IFPGA_PORT_FEATURE_ID_UINT\t\t0x12\n+#define IFPGA_PORT_FEATURE_ID_STP\t\t0x13\n+\n+/*\n+ * PROP format (TOP + SUB + ID)\n+ *\n+ * (~0x0) means this field is unused.\n+ */\n+#define PROP_TOP\tGENMASK(31, 24)\n+#define PROP_TOP_UNUSED\t0xff\n+#define PROP_SUB\tGENMASK(23, 16)\n+#define PROP_SUB_UNUSED\t0xff\n+#define PROP_ID\t\tGENMASK(15, 0)\n+\n+#define PROP(_top, _sub, _id) \\\n+\t(SET_FIELD(PROP_TOP, _top) | SET_FIELD(PROP_SUB, _sub) |\\\n+\t SET_FIELD(PROP_ID, _id))\n+\n+/* FME head feature's properties*/\n+#define FME_HDR_PROP_REVISION\t\t0x1\t/* RDONLY */\n+#define FME_HDR_PROP_PORTS_NUM\t\t0x2\t/* RDONLY */\n+#define FME_HDR_PROP_CACHE_SIZE\t\t0x3\t/* RDONLY */\n+#define FME_HDR_PROP_VERSION\t\t\t0x4\t/* RDONLY */\n+#define FME_HDR_PROP_SOCKET_ID\t\t0x5\t/* RDONLY */\n+#define FME_HDR_PROP_BITSTREAM_ID\t\t0x6\t/* RDONLY */\n+#define FME_HDR_PROP_BITSTREAM_METADATA\t0x7\t/* RDONLY */\n+\n+/* FME error reporting feature's properties */\n+/* FME error reporting properties format */\n+#define ERR_PROP(_top, _id)\t\tPROP(_top, 0xff, _id)\n+#define ERR_PROP_TOP_UNUSED\t\tPROP_TOP_UNUSED\n+#define ERR_PROP_TOP_FME_ERR\t\t0x1\n+#define ERR_PROP_ROOT(_id)\t\tERR_PROP(0xff, _id)\n+#define ERR_PROP_FME_ERR(_id)\t\tERR_PROP(ERR_PROP_TOP_FME_ERR, _id)\n+\n+#define FME_ERR_PROP_ERRORS\t\tERR_PROP_FME_ERR(0x1)\n+#define FME_ERR_PROP_FIRST_ERROR\tERR_PROP_FME_ERR(0x2)\n+#define FME_ERR_PROP_NEXT_ERROR\t\tERR_PROP_FME_ERR(0x3)\n+#define FME_ERR_PROP_CLEAR\t\tERR_PROP_FME_ERR(0x4)\t/* WO */\n+#define FME_ERR_PROP_REVISION\t\tERR_PROP_ROOT(0x5)\n+#define FME_ERR_PROP_PCIE0_ERRORS\tERR_PROP_ROOT(0x6)\t/* RW */\n+#define FME_ERR_PROP_PCIE1_ERRORS\tERR_PROP_ROOT(0x7)\t/* RW */\n+#define FME_ERR_PROP_NONFATAL_ERRORS\tERR_PROP_ROOT(0x8)\n+#define FME_ERR_PROP_CATFATAL_ERRORS\tERR_PROP_ROOT(0x9)\n+#define FME_ERR_PROP_INJECT_ERRORS\tERR_PROP_ROOT(0xa)\t/* RW */\n+\n+/* FME thermal feature's properties */\n+#define FME_THERMAL_PROP_THRESHOLD1\t\t0x1\t/* RW */\n+#define FME_THERMAL_PROP_THRESHOLD2\t\t0x2\t/* RW */\n+#define FME_THERMAL_PROP_THRESHOLD_TRIP\t\t0x3\t/* RDONLY */\n+#define FME_THERMAL_PROP_THRESHOLD1_REACHED\t0x4\t/* RDONLY */\n+#define FME_THERMAL_PROP_THRESHOLD2_REACHED\t0x5\t/* RDONLY */\n+#define FME_THERMAL_PROP_THRESHOLD1_POLICY\t0x6\t/* RW */\n+#define FME_THERMAL_PROP_TEMPERATURE\t\t0x7\t/* RDONLY */\n+#define FME_THERMAL_PROP_REVISION\t\t0x8\t/* RDONLY */\n+\n+/* FME power feature's properties */\n+#define FME_PWR_PROP_CONSUMED\t\t\t0x1\t/* RDONLY */\n+#define FME_PWR_PROP_THRESHOLD1\t\t\t0x2\t/* RW */\n+#define FME_PWR_PROP_THRESHOLD2\t\t\t0x3\t/* RW */\n+#define FME_PWR_PROP_THRESHOLD1_STATUS\t\t0x4\t/* RDONLY */\n+#define FME_PWR_PROP_THRESHOLD2_STATUS\t\t0x5\t/* RDONLY */\n+#define FME_PWR_PROP_RTL\t\t\t0x6\t/* RDONLY */\n+#define FME_PWR_PROP_XEON_LIMIT\t\t\t0x7\t/* RDONLY */\n+#define FME_PWR_PROP_FPGA_LIMIT\t\t\t0x8\t/* RDONLY */\n+#define FME_PWR_PROP_REVISION\t\t\t0x9\t/* RDONLY */\n+\n+/* FME iperf/dperf PROP format */\n+#define PERF_PROP_TOP_CACHE\t\t\t0x1\n+#define PERF_PROP_TOP_VTD\t\t\t0x2\n+#define PERF_PROP_TOP_FAB\t\t\t0x3\n+#define PERF_PROP_TOP_UNUSED\t\t\tPROP_TOP_UNUSED\n+#define PERF_PROP_SUB_UNUSED\t\t\tPROP_SUB_UNUSED\n+\n+#define PERF_PROP_ROOT(_id)\t\tPROP(0xff, 0xff, _id)\n+#define PERF_PROP_CACHE(_id)\t\tPROP(PERF_PROP_TOP_CACHE, 0xff, _id)\n+#define PERF_PROP_VTD(_sub, _id)\tPROP(PERF_PROP_TOP_VTD, _sub, _id)\n+#define PERF_PROP_VTD_ROOT(_id)\t\tPROP(PERF_PROP_TOP_VTD, 0xff, _id)\n+#define PERF_PROP_FAB(_sub, _id)\tPROP(PERF_PROP_TOP_FAB, _sub, _id)\n+#define PERF_PROP_FAB_ROOT(_id)\t\tPROP(PERF_PROP_TOP_FAB, 0xff, _id)\n+\n+/* FME iperf feature's properties */\n+#define FME_IPERF_PROP_CLOCK\t\t\tPERF_PROP_ROOT(0x1)\n+#define FME_IPERF_PROP_REVISION\t\t\tPERF_PROP_ROOT(0x2)\n+\n+/* iperf CACHE properties */\n+#define FME_IPERF_PROP_CACHE_FREEZE\t\tPERF_PROP_CACHE(0x1) /* RW */\n+#define FME_IPERF_PROP_CACHE_READ_HIT\t\tPERF_PROP_CACHE(0x2)\n+#define FME_IPERF_PROP_CACHE_READ_MISS\t\tPERF_PROP_CACHE(0x3)\n+#define FME_IPERF_PROP_CACHE_WRITE_HIT\t\tPERF_PROP_CACHE(0x4)\n+#define FME_IPERF_PROP_CACHE_WRITE_MISS\t\tPERF_PROP_CACHE(0x5)\n+#define FME_IPERF_PROP_CACHE_HOLD_REQUEST\tPERF_PROP_CACHE(0x6)\n+#define FME_IPERF_PROP_CACHE_TX_REQ_STALL\tPERF_PROP_CACHE(0x7)\n+#define FME_IPERF_PROP_CACHE_RX_REQ_STALL\tPERF_PROP_CACHE(0x8)\n+#define FME_IPERF_PROP_CACHE_RX_EVICTION\tPERF_PROP_CACHE(0x9)\n+#define FME_IPERF_PROP_CACHE_DATA_WRITE_PORT_CONTENTION\tPERF_PROP_CACHE(0xa)\n+#define FME_IPERF_PROP_CACHE_TAG_WRITE_PORT_CONTENTION\tPERF_PROP_CACHE(0xb)\n+/* iperf VTD properties */\n+#define FME_IPERF_PROP_VTD_FREEZE\t\tPERF_PROP_VTD_ROOT(0x1) /* RW */\n+#define FME_IPERF_PROP_VTD_SIP_IOTLB_4K_HIT\tPERF_PROP_VTD_ROOT(0x2)\n+#define FME_IPERF_PROP_VTD_SIP_IOTLB_2M_HIT\tPERF_PROP_VTD_ROOT(0x3)\n+#define FME_IPERF_PROP_VTD_SIP_IOTLB_1G_HIT\tPERF_PROP_VTD_ROOT(0x4)\n+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L3_HIT\tPERF_PROP_VTD_ROOT(0x5)\n+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L4_HIT\tPERF_PROP_VTD_ROOT(0x6)\n+#define FME_IPERF_PROP_VTD_SIP_RCC_HIT\t\tPERF_PROP_VTD_ROOT(0x7)\n+#define FME_IPERF_PROP_VTD_SIP_IOTLB_4K_MISS\tPERF_PROP_VTD_ROOT(0x8)\n+#define FME_IPERF_PROP_VTD_SIP_IOTLB_2M_MISS\tPERF_PROP_VTD_ROOT(0x9)\n+#define FME_IPERF_PROP_VTD_SIP_IOTLB_1G_MISS\tPERF_PROP_VTD_ROOT(0xa)\n+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L3_MISS\tPERF_PROP_VTD_ROOT(0xb)\n+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L4_MISS\tPERF_PROP_VTD_ROOT(0xc)\n+#define FME_IPERF_PROP_VTD_SIP_RCC_MISS\t\tPERF_PROP_VTD_ROOT(0xd)\n+#define FME_IPERF_PROP_VTD_PORT_READ_TRANSACTION(n)\tPERF_PROP_VTD(n, 0xe)\n+#define FME_IPERF_PROP_VTD_PORT_WRITE_TRANSACTION(n)\tPERF_PROP_VTD(n, 0xf)\n+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_READ_HIT(n)\tPERF_PROP_VTD(n, 0x10)\n+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_WRITE_HIT(n)\tPERF_PROP_VTD(n, 0x11)\n+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_4K_FILL(n)\tPERF_PROP_VTD(n, 0x12)\n+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_2M_FILL(n)\tPERF_PROP_VTD(n, 0x13)\n+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_1G_FILL(n)\tPERF_PROP_VTD(n, 0x14)\n+/* iperf FAB properties */\n+#define FME_IPERF_PROP_FAB_FREEZE\t\tPERF_PROP_FAB_ROOT(0x1) /* RW */\n+#define FME_IPERF_PROP_FAB_PCIE0_READ\t\tPERF_PROP_FAB_ROOT(0x2)\n+#define FME_IPERF_PROP_FAB_PORT_PCIE0_READ(n)\tPERF_PROP_FAB(n, 0x2)\n+#define FME_IPERF_PROP_FAB_PCIE0_WRITE\t\tPERF_PROP_FAB_ROOT(0x3)\n+#define FME_IPERF_PROP_FAB_PORT_PCIE0_WRITE(n)\tPERF_PROP_FAB(n, 0x3)\n+#define FME_IPERF_PROP_FAB_PCIE1_READ\t\tPERF_PROP_FAB_ROOT(0x4)\n+#define FME_IPERF_PROP_FAB_PORT_PCIE1_READ(n)\tPERF_PROP_FAB(n, 0x4)\n+#define FME_IPERF_PROP_FAB_PCIE1_WRITE\t\tPERF_PROP_FAB_ROOT(0x5)\n+#define FME_IPERF_PROP_FAB_PORT_PCIE1_WRITE(n)\tPERF_PROP_FAB(n, 0x5)\n+#define FME_IPERF_PROP_FAB_UPI_READ\t\tPERF_PROP_FAB_ROOT(0x6)\n+#define FME_IPERF_PROP_FAB_PORT_UPI_READ(n)\tPERF_PROP_FAB(n, 0x6)\n+#define FME_IPERF_PROP_FAB_UPI_WRITE\t\tPERF_PROP_FAB_ROOT(0x7)\n+#define FME_IPERF_PROP_FAB_PORT_UPI_WRITE(n)\tPERF_PROP_FAB(n, 0x7)\n+#define FME_IPERF_PROP_FAB_MMIO_READ\t\tPERF_PROP_FAB_ROOT(0x8)\n+#define FME_IPERF_PROP_FAB_PORT_MMIO_READ(n)\tPERF_PROP_FAB(n, 0x8)\n+#define FME_IPERF_PROP_FAB_MMIO_WRITE\t\tPERF_PROP_FAB_ROOT(0x9)\n+#define FME_IPERF_PROP_FAB_PORT_MMIO_WRITE(n)\tPERF_PROP_FAB(n, 0x9)\n+#define FME_IPERF_PROP_FAB_ENABLE\t\tPERF_PROP_FAB_ROOT(0xa) /* RW */\n+#define FME_IPERF_PROP_FAB_PORT_ENABLE(n)\tPERF_PROP_FAB(n, 0xa)   /* RW */\n+\n+/* FME dperf properties */\n+#define FME_DPERF_PROP_CLOCK\t\t\tPERF_PROP_ROOT(0x1)\n+#define FME_DPERF_PROP_REVISION\t\t\tPERF_PROP_ROOT(0x2)\n+\n+/* dperf FAB properties */\n+#define FME_DPERF_PROP_FAB_FREEZE\t\tPERF_PROP_FAB_ROOT(0x1) /* RW */\n+#define FME_DPERF_PROP_FAB_PCIE0_READ\t\tPERF_PROP_FAB_ROOT(0x2)\n+#define FME_DPERF_PROP_FAB_PORT_PCIE0_READ(n)\tPERF_PROP_FAB(n, 0x2)\n+#define FME_DPERF_PROP_FAB_PCIE0_WRITE\t\tPERF_PROP_FAB_ROOT(0x3)\n+#define FME_DPERF_PROP_FAB_PORT_PCIE0_WRITE(n)\tPERF_PROP_FAB(n, 0x3)\n+#define FME_DPERF_PROP_FAB_MMIO_READ\t\tPERF_PROP_FAB_ROOT(0x4)\n+#define FME_DPERF_PROP_FAB_PORT_MMIO_READ(n)\tPERF_PROP_FAB(n, 0x4)\n+#define FME_DPERF_PROP_FAB_MMIO_WRITE\t\tPERF_PROP_FAB_ROOT(0x5)\n+#define FME_DPERF_PROP_FAB_PORT_MMIO_WRITE(n)\tPERF_PROP_FAB(n, 0x5)\n+#define FME_DPERF_PROP_FAB_ENABLE\t\tPERF_PROP_FAB_ROOT(0x6) /* RW */\n+#define FME_DPERF_PROP_FAB_PORT_ENABLE(n)\tPERF_PROP_FAB(n, 0x6)   /* RW */\n+\n+/*PORT hdr feature's properties*/\n+#define PORT_HDR_PROP_REVISION\t\t\t0x1\t/* RDONLY */\n+#define PORT_HDR_PROP_PORTIDX\t\t\t0x2\t/* RDONLY */\n+#define PORT_HDR_PROP_LATENCY_TOLERANCE\t\t0x3\t/* RDONLY */\n+#define PORT_HDR_PROP_AP1_EVENT\t\t\t0x4\t/* RW */\n+#define PORT_HDR_PROP_AP2_EVENT\t\t\t0x5\t/* RW */\n+#define PORT_HDR_PROP_POWER_STATE\t\t0x6\t/* RDONLY */\n+#define PORT_HDR_PROP_USERCLK_FREQCMD\t\t0x7\t/* RW */\n+#define PORT_HDR_PROP_USERCLK_FREQCNTRCMD\t0x8\t/* RW */\n+#define PORT_HDR_PROP_USERCLK_FREQSTS\t\t0x9\t/* RDONLY */\n+#define PORT_HDR_PROP_USERCLK_CNTRSTS\t\t0xa\t/* RDONLY */\n+\n+/*PORT error feature's properties*/\n+#define PORT_ERR_PROP_REVISION\t\t\t0x1\t/* RDONLY */\n+#define PORT_ERR_PROP_ERRORS\t\t\t0x2\t/* RDONLY */\n+#define PORT_ERR_PROP_FIRST_ERROR\t\t0x3\t/* RDONLY */\n+#define PORT_ERR_PROP_FIRST_MALFORMED_REQ_LSB\t0x4\t/* RDONLY */\n+#define PORT_ERR_PROP_FIRST_MALFORMED_REQ_MSB\t0x5\t/* RDONLY */\n+#define PORT_ERR_PROP_CLEAR\t\t\t0x6\t/* WRONLY */\n+\n+int opae_manager_ifpga_get_prop(struct opae_manager *mgr,\n+\t\t\t\tstruct feature_prop *prop);\n+int opae_manager_ifpga_set_prop(struct opae_manager *mgr,\n+\t\t\t\tstruct feature_prop *prop);\n+int opae_bridge_ifpga_get_prop(struct opae_bridge *br,\n+\t\t\t       struct feature_prop *prop);\n+int opae_bridge_ifpga_set_prop(struct opae_bridge *br,\n+\t\t\t       struct feature_prop *prop);\n+\n+/*\n+ * Retrieve information about the fpga fme.\n+ * Driver fills the info in provided struct fpga_fme_info.\n+ */\n+struct fpga_fme_info {\n+\tu32 capability;\t\t/* The capability of FME device */\n+#define FPGA_FME_CAP_ERR_IRQ\t(1 << 0) /* Support fme error interrupt */\n+};\n+\n+int opae_manager_ifpga_get_info(struct opae_manager *mgr,\n+\t\t\t\tstruct fpga_fme_info *fme_info);\n+\n+/* Set eventfd information for ifpga FME error interrupt */\n+struct fpga_fme_err_irq_set {\n+\ts32 evtfd;\t\t/* Eventfd handler */\n+};\n+\n+int opae_manager_ifpga_set_err_irq(struct opae_manager *mgr,\n+\t\t\t\t   struct fpga_fme_err_irq_set *err_irq_set);\n+\n+/*\n+ * Retrieve information about the fpga port.\n+ * Driver fills the info in provided struct fpga_port_info.\n+ */\n+struct fpga_port_info {\n+\tu32 capability;\t/* The capability of port device */\n+#define FPGA_PORT_CAP_ERR_IRQ\t(1 << 0) /* Support port error interrupt */\n+#define FPGA_PORT_CAP_UAFU_IRQ\t(1 << 1) /* Support uafu error interrupt */\n+\tu32 num_umsgs;\t/* The number of allocated umsgs */\n+\tu32 num_uafu_irqs;\t/* The number of uafu interrupts */\n+};\n+\n+int opae_bridge_ifpga_get_info(struct opae_bridge *br,\n+\t\t\t       struct fpga_port_info *port_info);\n+/*\n+ * Retrieve region information about the fpga port.\n+ * Driver needs to fill the index of struct fpga_port_region_info.\n+ */\n+struct fpga_port_region_info {\n+\tu32 index;\n+#define PORT_REGION_INDEX_STP\t(1 << 1)\t/* Signal Tap Region */\n+\tu64 size;\t/* Region Size */\n+\tu64 addr;\t/* Base address of the region */\n+};\n+\n+int opae_bridge_ifpga_get_region_info(struct opae_bridge *br,\n+\t\t\t\t      struct fpga_port_region_info *info);\n+\n+/* Set eventfd information for ifpga port error interrupt */\n+struct fpga_port_err_irq_set {\n+\ts32 evtfd;\t\t/* Eventfd handler */\n+};\n+\n+int opae_bridge_ifpga_set_err_irq(struct opae_bridge *br,\n+\t\t\t\t  struct fpga_port_err_irq_set *err_irq_set);\n+\n+#endif /* _OPAE_IFPGA_HW_API_H_ */\ndiff --git a/drivers/raw/ifpga_rawdev/base/opae_osdep.h b/drivers/raw/ifpga_rawdev/base/opae_osdep.h\nnew file mode 100644\nindex 0000000..e9da710\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/opae_osdep.h\n@@ -0,0 +1,81 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2018 Intel Corporation\n+ */\n+\n+#ifndef _OPAE_OSDEP_H\n+#define _OPAE_OSDEP_H\n+\n+#include <string.h>\n+#include <stdbool.h>\n+\n+#ifdef RTE_LIBRTE_EAL\n+#include \"osdep_rte/osdep_generic.h\"\n+#else\n+#include \"osdep_raw/osdep_generic.h\"\n+#endif\n+\n+#include <asm/types.h>\n+\n+#define __iomem\n+\n+typedef uint8_t\t\tu8;\n+typedef int8_t\t\ts8;\n+typedef uint16_t\tu16;\n+typedef uint32_t\tu32;\n+typedef int32_t\t\ts32;\n+typedef uint64_t\tu64;\n+typedef uint64_t\tdma_addr_t;\n+\n+struct uuid {\n+\tu8 b[16];\n+};\n+\n+#ifndef LINUX_MACROS\n+#ifndef BITS_PER_LONG\n+#define BITS_PER_LONG\t(__SIZEOF_LONG__ * 8)\n+#endif\n+#ifndef BIT\n+#define BIT(a) (1UL << (a))\n+#endif /* BIT */\n+#ifndef BIT_ULL\n+#define BIT_ULL(a) (1ULL << (a))\n+#endif /* BIT_ULL */\n+#ifndef GENMASK\n+#define GENMASK(h, l)\t(((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))\n+#endif /* GENMASK */\n+#ifndef GENMASK_ULL\n+#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))\n+#endif /* GENMASK_ULL */\n+#endif /* LINUX_MACROS */\n+\n+#define SET_FIELD(m, v) (((v) << (__builtin_ffsll(m) - 1)) & (m))\n+#define GET_FIELD(m, v) (((v) & (m)) >> (__builtin_ffsll(m) - 1))\n+\n+#define dev_err(x, args...) dev_printf(ERR, args)\n+#define dev_info(x, args...) dev_printf(INFO, args)\n+#define dev_warn(x, args...) dev_printf(WARNING, args)\n+\n+#ifdef OPAE_DEBUG\n+#define dev_debug(x, args...) dev_printf(DEBUG, args)\n+#else\n+#define dev_debug(x, args...) do { } while (0)\n+#endif\n+\n+#define pr_err(y, args...) dev_err(0, y, ##args)\n+#define pr_warn(y, args...) dev_warn(0, y, ##args)\n+#define pr_info(y, args...) dev_info(0, y, ##args)\n+\n+#ifndef WARN_ON\n+#define WARN_ON(x) do { \\\n+\tint ret = !!(x); \\\n+\tif (unlikely(ret)) \\\n+\t\tpr_warn(\"WARN_ON: \\\"\" #x \"\\\" at %s:%d\\n\", __func__, __LINE__); \\\n+} while (0)\n+#endif\n+\n+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))\n+#define udelay(x) opae_udelay(x)\n+#define msleep(x) opae_udelay(1000 * (x))\n+#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))\n+\n+#endif\ndiff --git a/drivers/raw/ifpga_rawdev/base/osdep_raw/osdep_generic.h b/drivers/raw/ifpga_rawdev/base/osdep_raw/osdep_generic.h\nnew file mode 100644\nindex 0000000..895a1d8\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/osdep_raw/osdep_generic.h\n@@ -0,0 +1,75 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2018 Intel Corporation\n+ */\n+\n+#ifndef _OSDEP_RAW_GENERIC_H\n+#define _OSDEP_RAW_GENERIC_H\n+\n+#define\tcompiler_barrier() (asm volatile (\"\" : : : \"memory\"))\n+\n+#define io_wmb() compiler_barrier()\n+#define io_rmb() compiler_barrier()\n+\n+static inline uint8_t opae_readb(const volatile void *addr)\n+{\n+\tuint8_t val;\n+\n+\tval = *(const volatile uint8_t *)addr;\n+\tio_rmb();\n+\treturn val;\n+}\n+\n+static inline uint16_t opae_readw(const volatile void *addr)\n+{\n+\tuint16_t val;\n+\n+\tval = *(const volatile uint16_t *)addr;\n+\tio_rmb();\n+\treturn val;\n+}\n+\n+static inline uint32_t opae_readl(const volatile void *addr)\n+{\n+\tuint32_t val;\n+\n+\tval = *(const volatile uint32_t *)addr;\n+\tio_rmb();\n+\treturn val;\n+}\n+\n+static inline uint64_t opae_readq(const volatile void *addr)\n+{\n+\tuint64_t val;\n+\n+\tval = *(const volatile uint64_t *)addr;\n+\tio_rmb();\n+\treturn val;\n+}\n+\n+static inline void opae_writeb(uint8_t value, volatile void *addr)\n+{\n+\tio_wmb();\n+\t*(volatile uint8_t *)addr = value;\n+}\n+\n+static inline void opae_writew(uint16_t value, volatile void *addr)\n+{\n+\tio_wmb();\n+\t*(volatile uint16_t *)addr = value;\n+}\n+\n+static inline void opae_writel(uint32_t value, volatile void *addr)\n+{\n+\tio_wmb();\n+\t*(volatile uint32_t *)addr = value;\n+}\n+\n+static inline void opae_writeq(uint64_t value, volatile void *addr)\n+{\n+\tio_wmb();\n+\t*(volatile uint64_t *)addr = value;\n+}\n+\n+#define opae_free(addr) free(addr)\n+\n+#endif\ndiff --git a/drivers/raw/ifpga_rawdev/base/osdep_rte/osdep_generic.h b/drivers/raw/ifpga_rawdev/base/osdep_rte/osdep_generic.h\nnew file mode 100644\nindex 0000000..76902e2\n--- /dev/null\n+++ b/drivers/raw/ifpga_rawdev/base/osdep_rte/osdep_generic.h\n@@ -0,0 +1,45 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2018 Intel Corporation\n+ */\n+\n+#ifndef _OSDEP_RTE_GENERIC_H\n+#define _OSDEP_RTE_GENERIC_H\n+\n+#include <rte_common.h>\n+#include <rte_cycles.h>\n+#include <rte_spinlock.h>\n+#include <rte_log.h>\n+#include <rte_io.h>\n+#include <rte_malloc.h>\n+\n+#define dev_printf(level, fmt, args...) \\\n+\tRTE_LOG(level, PMD, \"osdep_rte: \" fmt, ## args)\n+\n+#define osdep_panic(...) rte_panic(...)\n+\n+#define opae_udelay(x) rte_delay_us(x)\n+\n+#define opae_readb(addr) rte_read8(addr)\n+#define opae_readw(addr) rte_read16(addr)\n+#define opae_readl(addr) rte_read32(addr)\n+#define opae_readq(addr) rte_read64(addr)\n+#define opae_writeb(value, addr) rte_write8(value, addr)\n+#define opae_writew(value, addr) rte_write16(value, addr)\n+#define opae_writel(value, addr) rte_write32(value, addr)\n+#define opae_writeq(value, addr) rte_write64(value, addr)\n+\n+#define opae_malloc(size) rte_malloc(NULL, size, 0)\n+#define opae_zmalloc(size) rte_zmalloc(NULL, size, 0)\n+#define opae_free(addr) rte_free(addr)\n+\n+#define ARRAY_SIZE(arr) RTE_DIM(arr)\n+\n+#define min(a, b) RTE_MIN(a, b)\n+#define max(a, b) RTE_MAX(a, b)\n+\n+#define spinlock_t rte_spinlock_t\n+#define spinlock_init(x) rte_spinlock_init(x)\n+#define spinlock_lock(x) rte_spinlock_lock(x)\n+#define spinlock_unlock(x) rte_spinlock_unlock(x)\n+\n+#endif\n",
    "prefixes": [
        "dpdk-dev",
        "v9",
        "2/4"
    ]
}