[v1,22/22] net/ixgbe/base: add support for E610 device

Message ID 9dc7b80e7c3542323ab1d0d22ab96a882abb7fff.1713964708.git.anatoly.burakov@intel.com (mailing list archive)
State Superseded
Delegated to: Bruce Richardson
Headers
Series Update IXGBE base driver |

Checks

Context Check Description
ci/loongarch-compilation success Compilation OK
ci/checkpatch warning coding style issues
ci/loongarch-unit-testing success Unit Testing PASS
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS
ci/intel-Functional success Functional PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/github-robot: build success github build: passed
ci/iol-sample-apps-testing success Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-abi-testing success Testing PASS
ci/iol-unit-amd64-testing success Testing PASS
ci/iol-compile-amd64-testing success Testing PASS
ci/iol-unit-arm64-testing success Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-compile-arm64-testing success Testing PASS

Commit Message

Anatoly Burakov April 24, 2024, 1:21 p.m. UTC
  From: Stefan Wegrzyn <stefan.wegrzyn@intel.com>

This patch adds support for E610 device to the ixgbe driver. This is a
squashed commit from internal development tree.

Signed-off-by: Andrii Staikov <andrii.staikov@intel.com>
Signed-off-by: Andrzej Komor <andrzejx.komor@intel.com>
Signed-off-by: Artur Tyminski <arturx.tyminski@intel.com>
Signed-off-by: Bartosz Jakub Rosadzinski <bartosz.jakub.rosadzinski@intel.com>
Signed-off-by: Brzezinski, Filip <filip.brzezinski@intel.com>
Signed-off-by: Carolyn Wyborny <carolyn.wyborny@intel.com>
Signed-off-by: Chinh Cao <chinh.t.cao@intel.com>
Signed-off-by: Dawid Zielinski <dawid.zielinski@intel.com>
Signed-off-by: Eryk Rybak <eryk.roch.rybak@intel.com>
Signed-off-by: Fabio Pricoco <fabio.pricoco@intel.com>
Signed-off-by: Jedrzej Jagielski <jedrzej.jagielski@intel.com>
Signed-off-by: Julian Grajkowski <julianx.grajkowski@intel.com>
Signed-off-by: Krzysztof Galazka <krzysztof.galazka@intel.com>
Signed-off-by: Leszek Zygo <leszek.zygo@intel.com>
Signed-off-by: Mical MarekX <marekx.mical@intel.com>
Signed-off-by: Milosz Szymonek <milosz.szymonek@intel.com>
Signed-off-by: Pawel Malinowski <pawel.malinowski@intel.com>
Signed-off-by: Piotr Kubaj <piotrx.kubaj@intel.com>
Signed-off-by: Piotr Skajewski <piotrx.skajewski@intel.com>
Signed-off-by: Slawomir Mrozowicz <slawomirx.mrozowicz@intel.com>
Signed-off-by: Stefan Wegrzyn <stefan.wegrzyn@intel.com>
Signed-off-by: Yogesh Bhosale <yogesh.bhosale@intel.com>
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
---
 drivers/net/ixgbe/base/README            |    6 +-
 drivers/net/ixgbe/base/ixgbe_82599.c     |    5 +-
 drivers/net/ixgbe/base/ixgbe_api.c       |   58 +-
 drivers/net/ixgbe/base/ixgbe_api.h       |    5 +-
 drivers/net/ixgbe/base/ixgbe_common.c    |   29 +-
 drivers/net/ixgbe/base/ixgbe_e610.c      | 4982 ++++++++++++++++++++++
 drivers/net/ixgbe/base/ixgbe_e610.h      |  163 +
 drivers/net/ixgbe/base/ixgbe_hv_vf.c     |    3 +-
 drivers/net/ixgbe/base/ixgbe_mbx.c       |    3 +
 drivers/net/ixgbe/base/ixgbe_osdep.c     |   43 +
 drivers/net/ixgbe/base/ixgbe_osdep.h     |   17 +
 drivers/net/ixgbe/base/ixgbe_phy.c       |    4 +-
 drivers/net/ixgbe/base/ixgbe_type.h      |   65 +-
 drivers/net/ixgbe/base/ixgbe_type_e610.h | 2181 ++++++++++
 drivers/net/ixgbe/base/meson.build       |    2 +
 15 files changed, 7547 insertions(+), 19 deletions(-)
 create mode 100644 drivers/net/ixgbe/base/ixgbe_e610.c
 create mode 100644 drivers/net/ixgbe/base/ixgbe_e610.h
 create mode 100644 drivers/net/ixgbe/base/ixgbe_osdep.c
 create mode 100644 drivers/net/ixgbe/base/ixgbe_type_e610.h
  

Patch

diff --git a/drivers/net/ixgbe/base/README b/drivers/net/ixgbe/base/README
index 2c74693924..98353ba26f 100644
--- a/drivers/net/ixgbe/base/README
+++ b/drivers/net/ixgbe/base/README
@@ -1,12 +1,12 @@ 
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2020 Intel Corporation
+ * Copyright(c) 2010-2024 Intel Corporation
  */
 
 Intel® IXGBE driver
 ===================
 
 This directory contains source code of FreeBSD ixgbe driver of version
-not-released-cid-ixgbe.2020.06.09.tar.gz released by the team which develop
+not-released-cid-ixgbe.2024.04.24.tar.gz released by the team which develop
 basic drivers for any ixgbe NIC. The sub-directory of base/
 contains the original source package.
 This driver is valid for the product(s) listed below
@@ -24,6 +24,7 @@  This driver is valid for the product(s) listed below
 * Intel® Ethernet Server Adapter X520 Series
 * Intel® Ethernet Server Adapter X520-T2
 * Intel® Ethernet Controller X550 Series
+* Intel® Ethernet Controller E610 Series
 
 Updating the driver
 ===================
@@ -32,3 +33,4 @@  NOTE: The source code in this directory should not be modified apart from
 the following file(s):
 
     ixgbe_osdep.h
+    ixgbe_osdep.c
diff --git a/drivers/net/ixgbe/base/ixgbe_82599.c b/drivers/net/ixgbe/base/ixgbe_82599.c
index 562034b242..419fec689e 100644
--- a/drivers/net/ixgbe/base/ixgbe_82599.c
+++ b/drivers/net/ixgbe/base/ixgbe_82599.c
@@ -1389,7 +1389,8 @@  void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue)
 	fdirctrl |= (dropqueue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
 	if ((hw->mac.type == ixgbe_mac_X550) ||
 	    (hw->mac.type == ixgbe_mac_X550EM_x) ||
-	    (hw->mac.type == ixgbe_mac_X550EM_a))
+	    (hw->mac.type == ixgbe_mac_X550EM_a) ||
+	    (hw->mac.type == ixgbe_mac_E610))
 		fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH;
 
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
@@ -1804,6 +1805,7 @@  s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
 		case ixgbe_mac_X550:
 		case ixgbe_mac_X550EM_x:
 		case ixgbe_mac_X550EM_a:
+		case ixgbe_mac_E610:
 			IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
 			break;
 		default:
@@ -1827,6 +1829,7 @@  s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
 		case ixgbe_mac_X550:
 		case ixgbe_mac_X550EM_x:
 		case ixgbe_mac_X550EM_a:
+		case ixgbe_mac_E610:
 			IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
 			break;
 		default:
diff --git a/drivers/net/ixgbe/base/ixgbe_api.c b/drivers/net/ixgbe/base/ixgbe_api.c
index d2b74cdffc..d348a81a88 100644
--- a/drivers/net/ixgbe/base/ixgbe_api.c
+++ b/drivers/net/ixgbe/base/ixgbe_api.c
@@ -89,6 +89,9 @@  s32 ixgbe_init_shared_code(struct ixgbe_hw *hw)
 	case ixgbe_mac_X550EM_a_vf:
 		status = ixgbe_init_ops_vf(hw);
 		break;
+	case ixgbe_mac_E610:
+		status = ixgbe_init_ops_E610(hw);
+		break;
 	default:
 		status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
 		break;
@@ -208,6 +211,14 @@  s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
 		hw->mac.type = ixgbe_mac_X550EM_a_vf;
 		hw->mvals = ixgbe_mvals_X550EM_a;
 		break;
+	case IXGBE_DEV_ID_E610_BACKPLANE:
+	case IXGBE_DEV_ID_E610_SFP:
+	case IXGBE_DEV_ID_E610_10G_T:
+	case IXGBE_DEV_ID_E610_2_5G_T:
+	case IXGBE_DEV_ID_E610_SGMII:
+		hw->mac.type = ixgbe_mac_E610;
+		hw->mvals = ixgbe_mvals_X550EM_a;
+		break;
 	default:
 		ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
 		ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
@@ -445,7 +456,8 @@  s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
  **/
 s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size)
 {
-	return ixgbe_read_pba_string_generic(hw, pba_num, pba_num_size);
+	return ixgbe_call_func(hw, hw->eeprom.ops.read_pba_string, (hw, pba_num,
+			       pba_num_size), IXGBE_NOT_IMPLEMENTED);
 }
 
 /**
@@ -1141,6 +1153,19 @@  s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
 			       IXGBE_NOT_IMPLEMENTED);
 }
 
+/**
+ * ixgbe_get_fw_tsam_mode - Returns information whether TSAM is enabled
+ * @hw: pointer to hardware structure
+ *
+ * Checks Thermal Sensor Autonomous Mode by reading the value of the
+ * dedicated register.
+ * Returns True if TSAM is enabled, False if TSAM is disabled.
+ */
+bool ixgbe_get_fw_tsam_mode(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.get_fw_tsam_mode, (hw),
+			       IXGBE_NOT_IMPLEMENTED);
+}
 
 /**
  * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
@@ -1684,3 +1709,34 @@  void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed)
 	if (hw->mac.ops.set_rate_select_speed)
 		hw->mac.ops.set_rate_select_speed(hw, speed);
 }
+
+/**
+ * ixgbe_get_fw_version - get FW version
+ * @hw: pointer to hardware structure
+ *
+ * Get the current FW version.
+ *
+ * Return: the exit code of the operation or IXGBE_NOT_IMPLEMENTED
+ * if the function is not implemented.
+ */
+s32 ixgbe_get_fw_version(struct ixgbe_hw *hw)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.get_fw_version,
+			       (hw), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_get_nvm_ver - get NVM version
+ * @hw: pointer to hardware structure
+ * @nvm: pointer to NVM info structure
+ *
+ * Get the current NVM version.
+ *
+ * Return: the exit code of the operation or IXGBE_NOT_IMPLEMENTED
+ * if the function is not implemented.
+ */
+s32 ixgbe_get_nvm_ver(struct ixgbe_hw* hw, struct ixgbe_nvm_info *nvm)
+{
+	return ixgbe_call_func(hw, hw->mac.ops.get_nvm_version,
+			       (hw, nvm), IXGBE_NOT_IMPLEMENTED);
+}
diff --git a/drivers/net/ixgbe/base/ixgbe_api.h b/drivers/net/ixgbe/base/ixgbe_api.h
index 51decc5fae..cb572a337d 100644
--- a/drivers/net/ixgbe/base/ixgbe_api.h
+++ b/drivers/net/ixgbe/base/ixgbe_api.h
@@ -18,6 +18,7 @@  extern s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw);
 extern s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw);
 extern s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw);
 extern s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_E610(struct ixgbe_hw *hw);
 extern s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
 
 s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
@@ -105,6 +106,7 @@  s32 ixgbe_fc_enable(struct ixgbe_hw *hw);
 s32 ixgbe_setup_fc(struct ixgbe_hw *hw);
 s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
 			 u8 ver, u16 len, char *driver_ver);
+bool ixgbe_get_fw_tsam_mode(struct ixgbe_hw *hw);
 s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw);
 s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw);
 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr);
@@ -192,5 +194,6 @@  void ixgbe_disable_rx(struct ixgbe_hw *hw);
 void ixgbe_enable_rx(struct ixgbe_hw *hw);
 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
 			u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
-
+s32 ixgbe_get_fw_version(struct ixgbe_hw *hw);
+s32 ixgbe_get_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
 #endif /* _IXGBE_API_H_ */
diff --git a/drivers/net/ixgbe/base/ixgbe_common.c b/drivers/net/ixgbe/base/ixgbe_common.c
index 73b5935d88..51fb4050c6 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.c
+++ b/drivers/net/ixgbe/base/ixgbe_common.c
@@ -61,6 +61,7 @@  s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
 				      ixgbe_validate_eeprom_checksum_generic;
 	eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
 	eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
+	eeprom->ops.read_pba_string = ixgbe_read_pba_string_generic;
 
 	/* MAC */
 	mac->ops.init_hw = ixgbe_init_hw_generic;
@@ -146,6 +147,7 @@  bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
 		case IXGBE_DEV_ID_X550EM_A_SFP_N:
 		case IXGBE_DEV_ID_X550EM_A_QSFP:
 		case IXGBE_DEV_ID_X550EM_A_QSFP_N:
+		case IXGBE_DEV_ID_E610_SFP:
 			supported = false;
 			break;
 		default:
@@ -177,6 +179,8 @@  bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
 		case IXGBE_DEV_ID_X550EM_A_10G_T:
 		case IXGBE_DEV_ID_X550EM_A_1G_T:
 		case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+		case IXGBE_DEV_ID_E610_10G_T:
+		case IXGBE_DEV_ID_E610_2_5G_T:
 			supported = true;
 			break;
 		default:
@@ -577,17 +581,11 @@  s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
 		}
 	}
 
-	if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
+	if (hw->mac.type == ixgbe_mac_X540 ||
+	    hw->mac.type == ixgbe_mac_X550 ||
+	    hw->mac.type == ixgbe_mac_E610) {
 		if (hw->phy.id == 0)
 			ixgbe_identify_phy(hw);
-		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
-				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
-		hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
-				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
-		hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
-				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
-		hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
-				     IXGBE_MDIO_PCS_DEV_TYPE, &i);
 	}
 
 	return IXGBE_SUCCESS;
@@ -998,6 +996,9 @@  void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
 	case IXGBE_PCI_LINK_SPEED_8000:
 		hw->bus.speed = ixgbe_bus_speed_8000;
 		break;
+	case IXGBE_PCI_LINK_SPEED_16000:
+		hw->bus.speed = ixgbe_bus_speed_16000;
+		break;
 	default:
 		hw->bus.speed = ixgbe_bus_speed_unknown;
 		break;
@@ -1020,7 +1021,9 @@  s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
 	DEBUGFUNC("ixgbe_get_bus_info_generic");
 
 	/* Get the negotiated link width and speed from PCI config space */
-	link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
+	link_status = IXGBE_READ_PCIE_WORD(hw, hw->mac.type == ixgbe_mac_E610 ?
+					   IXGBE_PCI_LINK_STATUS_E610 :
+					   IXGBE_PCI_LINK_STATUS);
 
 	ixgbe_set_pci_config_data_generic(hw, link_status);
 
@@ -3653,6 +3656,10 @@  u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
 		pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
 		break;
+	case ixgbe_mac_E610:
+		pcie_offset = IXGBE_PCIE_MSIX_LKV_CAPS;
+		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
+		break;
 	default:
 		return msix_count;
 	}
@@ -4228,7 +4235,7 @@  s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
 		break;
 	case IXGBE_LINKS_SPEED_100_82599:
 		*speed = IXGBE_LINK_SPEED_100_FULL;
-		if (hw->mac.type == ixgbe_mac_X550) {
+		if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_E610) {
 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
 				*speed = IXGBE_LINK_SPEED_5GB_FULL;
 		}
diff --git a/drivers/net/ixgbe/base/ixgbe_e610.c b/drivers/net/ixgbe/base/ixgbe_e610.c
new file mode 100644
index 0000000000..37e0acf81c
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_e610.c
@@ -0,0 +1,4982 @@ 
+#include "ixgbe_type.h"
+#include "ixgbe_e610.h"
+#include "ixgbe_x550.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+#include "ixgbe_api.h"
+
+/**
+ * ixgbe_init_aci - initialization routine for Admin Command Interface
+ * @hw: pointer to the hardware structure
+ *
+ * Initialize the ACI lock.
+ */
+void ixgbe_init_aci(struct ixgbe_hw *hw)
+{
+	ixgbe_init_lock(&hw->aci.lock);
+}
+
+/**
+ * ixgbe_shutdown_aci - shutdown routine for Admin Command Interface
+ * @hw: pointer to the hardware structure
+ *
+ * Destroy the ACI lock.
+ */
+void ixgbe_shutdown_aci(struct ixgbe_hw *hw)
+{
+	ixgbe_destroy_lock(&hw->aci.lock);
+}
+
+/**
+ * ixgbe_should_retry_aci_send_cmd_execute - decide if ACI command should
+ * be resent
+ * @opcode: ACI opcode
+ *
+ * Check if ACI command should be sent again depending on the provided opcode.
+ *
+ * Return: true if the sending command routine should be repeated,
+ * otherwise false.
+ */
+STATIC bool ixgbe_should_retry_aci_send_cmd_execute(u16 opcode)
+{
+
+	switch (opcode) {
+	case ixgbe_aci_opc_disable_rxen:
+	case ixgbe_aci_opc_get_phy_caps:
+	case ixgbe_aci_opc_get_link_status:
+	case ixgbe_aci_opc_get_link_topo:
+		return true;
+	}
+
+	return false;
+}
+
+/**
+ * ixgbe_aci_send_cmd_execute - execute sending FW Admin Command to FW Admin
+ * Command Interface
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ *
+ * Admin Command is sent using CSR by setting descriptor and buffer in specific
+ * registers.
+ *
+ * Return: the exit code of the operation.
+ * * - IXGBE_SUCCESS - success.
+ * * - IXGBE_ERR_ACI_DISABLED - CSR mechanism is not enabled.
+ * * - IXGBE_ERR_ACI_BUSY - CSR mechanism is busy.
+ * * - IXGBE_ERR_PARAM - buf_size is too big or
+ * invalid argument buf or buf_size.
+ * * - IXGBE_ERR_ACI_TIMEOUT - Admin Command X command timeout.
+ * * - IXGBE_ERR_ACI_ERROR - Admin Command X invalid state of HICR register or
+ * Admin Command failed because of bad opcode was returned or
+ * Admin Command failed with error Y.
+ */
+STATIC s32
+ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+			   void *buf, u16 buf_size)
+{
+	u32 hicr = 0, tmp_buf_size = 0, i = 0;
+	u32 *raw_desc = (u32 *)desc;
+	s32 status = IXGBE_SUCCESS;
+	bool valid_buf = false;
+	u32 *tmp_buf = NULL;
+	u16 opcode = 0;
+
+	do {
+		hw->aci.last_status = IXGBE_ACI_RC_OK;
+
+		/* It's necessary to check if mechanism is enabled */
+		hicr = IXGBE_READ_REG(hw, PF_HICR);
+		if (!(hicr & PF_HICR_EN)) {
+			status = IXGBE_ERR_ACI_DISABLED;
+			break;
+		}
+		if (hicr & PF_HICR_C) {
+			hw->aci.last_status = IXGBE_ACI_RC_EBUSY;
+			status = IXGBE_ERR_ACI_BUSY;
+			break;
+		}
+		opcode = desc->opcode;
+
+		if (buf_size > IXGBE_ACI_MAX_BUFFER_SIZE) {
+			status = IXGBE_ERR_PARAM;
+			break;
+		}
+
+		if (buf)
+			desc->flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF);
+
+		/* Check if buf and buf_size are proper params */
+		if (desc->flags & IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF)) {
+			if ((buf && buf_size == 0) ||
+			    (buf == NULL && buf_size)) {
+				status = IXGBE_ERR_PARAM;
+				break;
+			}
+			if (buf && buf_size)
+				valid_buf = true;
+		}
+
+		if (valid_buf == true) {
+			if (buf_size % 4 == 0)
+				tmp_buf_size = buf_size;
+			else
+				tmp_buf_size = (buf_size & (u16)(~0x03)) + 4;
+
+			tmp_buf = (u32*)ixgbe_malloc(hw, tmp_buf_size);
+			if (!tmp_buf)
+				return IXGBE_ERR_OUT_OF_MEM;
+
+			/* tmp_buf will be firstly filled with 0xFF and after
+			 * that the content of buf will be written into it.
+			 * This approach lets us use valid buf_size and
+			 * prevents us from reading past buf area
+			 * when buf_size mod 4 not equal to 0.
+			 */
+			memset(tmp_buf, 0xFF, tmp_buf_size);
+			memcpy(tmp_buf, buf, buf_size);
+
+			if (tmp_buf_size > IXGBE_ACI_LG_BUF)
+				desc->flags |=
+				IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_LB);
+
+			desc->datalen = IXGBE_CPU_TO_LE16(buf_size);
+
+			if (desc->flags & IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD)) {
+				for (i = 0; i < tmp_buf_size / 4; i++) {
+					IXGBE_WRITE_REG(hw, PF_HIBA(i),
+						IXGBE_LE32_TO_CPU(tmp_buf[i]));
+				}
+			}
+		}
+
+		/* Descriptor is written to specific registers */
+		for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++)
+			IXGBE_WRITE_REG(hw, PF_HIDA(i),
+					IXGBE_LE32_TO_CPU(raw_desc[i]));
+
+		/* SW has to set PF_HICR.C bit and clear PF_HICR.SV and
+		 * PF_HICR_EV
+		 */
+		hicr = IXGBE_READ_REG(hw, PF_HICR);
+		hicr = (hicr | PF_HICR_C) & ~(PF_HICR_SV | PF_HICR_EV);
+		IXGBE_WRITE_REG(hw, PF_HICR, hicr);
+
+		/* Wait for sync Admin Command response */
+		for (i = 0; i < IXGBE_ACI_SYNC_RESPONSE_TIMEOUT; i += 1) {
+			hicr = IXGBE_READ_REG(hw, PF_HICR);
+			if ((hicr & PF_HICR_SV) || !(hicr & PF_HICR_C))
+				break;
+
+			msec_delay(1);
+		}
+
+		/* Wait for async Admin Command response */
+		if ((hicr & PF_HICR_SV) && (hicr & PF_HICR_C)) {
+			for (i = 0; i < IXGBE_ACI_ASYNC_RESPONSE_TIMEOUT;
+			     i += 1) {
+				hicr = IXGBE_READ_REG(hw, PF_HICR);
+				if ((hicr & PF_HICR_EV) || !(hicr & PF_HICR_C))
+					break;
+
+				msec_delay(1);
+			}
+		}
+
+		/* Read sync Admin Command response */
+		if ((hicr & PF_HICR_SV)) {
+			for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
+				raw_desc[i] = IXGBE_READ_REG(hw, PF_HIDA(i));
+				raw_desc[i] = IXGBE_CPU_TO_LE32(raw_desc[i]);
+			}
+		}
+
+		/* Read async Admin Command response */
+		if ((hicr & PF_HICR_EV) && !(hicr & PF_HICR_C)) {
+			for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
+				raw_desc[i] = IXGBE_READ_REG(hw, PF_HIDA_2(i));
+				raw_desc[i] = IXGBE_CPU_TO_LE32(raw_desc[i]);
+			}
+		}
+
+		/* Handle timeout and invalid state of HICR register */
+		if (hicr & PF_HICR_C) {
+			status = IXGBE_ERR_ACI_TIMEOUT;
+			break;
+		} else if (!(hicr & PF_HICR_SV) && !(hicr & PF_HICR_EV)) {
+			status = IXGBE_ERR_ACI_ERROR;
+			break;
+		}
+
+		/* For every command other than 0x0014 treat opcode mismatch
+		 * as an error. Response to 0x0014 command read from HIDA_2
+		 * is a descriptor of an event which is expected to contain
+		 * different opcode than the command.
+		 */
+		if (desc->opcode != opcode &&
+		    opcode != IXGBE_CPU_TO_LE16(ixgbe_aci_opc_get_fw_event)) {
+			status = IXGBE_ERR_ACI_ERROR;
+			break;
+		}
+
+		if (desc->retval != IXGBE_ACI_RC_OK) {
+			hw->aci.last_status = (enum ixgbe_aci_err)desc->retval;
+			status = IXGBE_ERR_ACI_ERROR;
+			break;
+		}
+
+		/* Write a response values to a buf */
+		if (valid_buf && (desc->flags &
+				  IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF))) {
+			for (i = 0; i < tmp_buf_size / 4; i++) {
+				tmp_buf[i] = IXGBE_READ_REG(hw, PF_HIBA(i));
+				tmp_buf[i] = IXGBE_CPU_TO_LE32(tmp_buf[i]);
+			}
+			memcpy(buf, tmp_buf, buf_size);
+		}
+	} while (0);
+
+	if (tmp_buf)
+		ixgbe_free(hw, tmp_buf);
+
+	return status;
+}
+
+/**
+ * ixgbe_aci_send_cmd - send FW Admin Command to FW Admin Command Interface
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ *
+ * Helper function to send FW Admin Commands to the FW Admin Command Interface.
+ *
+ * Retry sending the FW Admin Command multiple times to the FW ACI
+ * if the EBUSY Admin Command error is returned.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+		       void *buf, u16 buf_size)
+{
+	struct ixgbe_aci_desc desc_cpy;
+	enum ixgbe_aci_err last_status;
+	bool is_cmd_for_retry;
+	u8 *buf_cpy = NULL;
+	s32 status;
+	u16 opcode;
+	u8 idx = 0;
+
+	opcode = IXGBE_LE16_TO_CPU(desc->opcode);
+	is_cmd_for_retry = ixgbe_should_retry_aci_send_cmd_execute(opcode);
+	memset(&desc_cpy, 0, sizeof(desc_cpy));
+
+	if (is_cmd_for_retry) {
+		if (buf) {
+			buf_cpy = (u8 *)ixgbe_malloc(hw, buf_size);
+			if (!buf_cpy)
+				return IXGBE_ERR_OUT_OF_MEM;
+		}
+		memcpy(&desc_cpy, desc, sizeof(desc_cpy));
+	}
+
+	do {
+		ixgbe_acquire_lock(&hw->aci.lock);
+		status = ixgbe_aci_send_cmd_execute(hw, desc, buf, buf_size);
+		last_status = hw->aci.last_status;
+		ixgbe_release_lock(&hw->aci.lock);
+
+		if (!is_cmd_for_retry || status == IXGBE_SUCCESS ||
+		    last_status != IXGBE_ACI_RC_EBUSY)
+			break;
+
+		if (buf)
+			memcpy(buf, buf_cpy, buf_size);
+		memcpy(desc, &desc_cpy, sizeof(desc_cpy));
+
+		msec_delay(IXGBE_ACI_SEND_DELAY_TIME_MS);
+	} while (++idx < IXGBE_ACI_SEND_MAX_EXECUTE);
+
+	if (buf_cpy)
+		ixgbe_free(hw, buf_cpy);
+
+	return status;
+}
+
+/**
+ * ixgbe_aci_check_event_pending - check if there are any pending events
+ * @hw: pointer to the HW struct
+ *
+ * Determine if there are any pending events.
+ *
+ * Return: true if there are any currently pending events
+ * otherwise false.
+ */
+bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw)
+{
+	u32 ep_bit_mask;
+	u32 fwsts;
+
+	ep_bit_mask = hw->bus.func ? GL_FWSTS_EP_PF1 : GL_FWSTS_EP_PF0;
+
+	/* Check state of Event Pending (EP) bit */
+	fwsts = IXGBE_READ_REG(hw, GL_FWSTS);
+	return (fwsts & ep_bit_mask) ? true : false;
+}
+
+/**
+ * ixgbe_aci_get_event - get an event from ACI
+ * @hw: pointer to the HW struct
+ * @e: event information structure
+ * @pending: optional flag signaling that there are more pending events
+ *
+ * Obtain an event from ACI and return its content
+ * through 'e' using ACI command (0x0014).
+ * Provide information if there are more events
+ * to retrieve through 'pending'.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
+			bool *pending)
+{
+	struct ixgbe_aci_desc desc;
+	s32 status;
+
+	if (!e || (!e->msg_buf && e->buf_len) || (e->msg_buf && !e->buf_len))
+		return IXGBE_ERR_PARAM;
+
+	ixgbe_acquire_lock(&hw->aci.lock);
+
+	/* Check if there are any events pending */
+	if (!ixgbe_aci_check_event_pending(hw)) {
+		status = IXGBE_ERR_ACI_NO_EVENTS;
+		goto aci_get_event_exit;
+	}
+
+	/* Obtain pending event */
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_fw_event);
+	status = ixgbe_aci_send_cmd_execute(hw, &desc, e->msg_buf, e->buf_len);
+	if (status)
+		goto aci_get_event_exit;
+
+	/* Returned 0x0014 opcode indicates that no event was obtained */
+	if (desc.opcode == IXGBE_CPU_TO_LE16(ixgbe_aci_opc_get_fw_event)) {
+		status = IXGBE_ERR_ACI_NO_EVENTS;
+		goto aci_get_event_exit;
+	}
+
+	/* Determine size of event data */
+	e->msg_len = MIN_T(u16, IXGBE_LE16_TO_CPU(desc.datalen), e->buf_len);
+	/* Write event descriptor to event info structure */
+	memcpy(&e->desc, &desc, sizeof(e->desc));
+
+	/* Check if there are any further events pending */
+	if (pending) {
+		*pending = ixgbe_aci_check_event_pending(hw);
+	}
+
+aci_get_event_exit:
+	ixgbe_release_lock(&hw->aci.lock);
+
+	return status;
+}
+
+/**
+ * ixgbe_fill_dflt_direct_cmd_desc - fill ACI descriptor with default values.
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Helper function to fill the descriptor desc with default values
+ * and the provided opcode.
+ */
+void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode)
+{
+	/* zero out the desc */
+	memset(desc, 0, sizeof(*desc));
+	desc->opcode = IXGBE_CPU_TO_LE16(opcode);
+	desc->flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_SI);
+}
+
+/**
+ * ixgbe_aci_get_fw_ver - get the firmware version
+ * @hw: pointer to the HW struct
+ *
+ * Get the firmware version using ACI command (0x0001).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw)
+{
+	struct ixgbe_aci_cmd_get_ver *resp;
+	struct ixgbe_aci_desc desc;
+	s32 status;
+
+	resp = &desc.params.get_ver;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_ver);
+
+	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+	if (!status) {
+		hw->fw_branch = resp->fw_branch;
+		hw->fw_maj_ver = resp->fw_major;
+		hw->fw_min_ver = resp->fw_minor;
+		hw->fw_patch = resp->fw_patch;
+		hw->fw_build = IXGBE_LE32_TO_CPU(resp->fw_build);
+		hw->api_branch = resp->api_branch;
+		hw->api_maj_ver = resp->api_major;
+		hw->api_min_ver = resp->api_minor;
+		hw->api_patch = resp->api_patch;
+	}
+
+	return status;
+}
+
+/**
+ * ixgbe_aci_send_driver_ver - send the driver version to firmware
+ * @hw: pointer to the HW struct
+ * @dv: driver's major, minor version
+ *
+ * Send the driver version to the firmware
+ * using the ACI command (0x0002).
+ *
+ * Return: the exit code of the operation.
+ * Returns IXGBE_ERR_PARAM, if dv is NULL.
+ */
+s32 ixgbe_aci_send_driver_ver(struct ixgbe_hw *hw, struct ixgbe_driver_ver *dv)
+{
+	struct ixgbe_aci_cmd_driver_ver *cmd;
+	struct ixgbe_aci_desc desc;
+	u16 len;
+
+	cmd = &desc.params.driver_ver;
+
+	if (!dv)
+		return IXGBE_ERR_PARAM;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_driver_ver);
+
+	desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+	cmd->major_ver = dv->major_ver;
+	cmd->minor_ver = dv->minor_ver;
+	cmd->build_ver = dv->build_ver;
+	cmd->subbuild_ver = dv->subbuild_ver;
+
+	len = 0;
+	while (len < sizeof(dv->driver_string) &&
+	       IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
+		len++;
+
+	return ixgbe_aci_send_cmd(hw, &desc, dv->driver_string, len);
+}
+
+/**
+ * ixgbe_aci_req_res - request a common resource
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ *
+ * Requests a common resource using the ACI command (0x0008).
+ * Specifies the maximum time the driver may hold the resource.
+ * If the requested resource is currently occupied by some other driver,
+ * a busy return value is returned and the timeout field value indicates the
+ * maximum time the current owner has to free it.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32
+ixgbe_aci_req_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+		  enum ixgbe_aci_res_access_type access, u8 sdp_number,
+		  u32 *timeout)
+{
+	struct ixgbe_aci_cmd_req_res *cmd_resp;
+	struct ixgbe_aci_desc desc;
+	s32 status;
+
+	cmd_resp = &desc.params.res_owner;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_req_res);
+
+	cmd_resp->res_id = IXGBE_CPU_TO_LE16(res);
+	cmd_resp->access_type = IXGBE_CPU_TO_LE16(access);
+	cmd_resp->res_number = IXGBE_CPU_TO_LE32(sdp_number);
+	cmd_resp->timeout = IXGBE_CPU_TO_LE32(*timeout);
+	*timeout = 0;
+
+	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+	/* The completion specifies the maximum time in ms that the driver
+	 * may hold the resource in the Timeout field.
+	 */
+
+	/* If the resource is held by some other driver, the command completes
+	 * with a busy return value and the timeout field indicates the maximum
+	 * time the current owner of the resource has to free it.
+	 */
+	if (!status || hw->aci.last_status == IXGBE_ACI_RC_EBUSY)
+		*timeout = IXGBE_LE32_TO_CPU(cmd_resp->timeout);
+
+	return status;
+}
+
+/**
+ * ixgbe_aci_release_res - release a common resource using ACI
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @sdp_number: resource number
+ *
+ * Release a common resource using ACI command (0x0009).
+ *
+ * Return: the exit code of the operation.
+ */
+static s32
+ixgbe_aci_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+		      u8 sdp_number)
+{
+	struct ixgbe_aci_cmd_req_res *cmd;
+	struct ixgbe_aci_desc desc;
+
+	cmd = &desc.params.res_owner;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_release_res);
+
+	cmd->res_id = IXGBE_CPU_TO_LE16(res);
+	cmd->res_number = IXGBE_CPU_TO_LE32(sdp_number);
+
+	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_acquire_res - acquire the ownership of a resource
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ * @access: access type (read or write)
+ * @timeout: timeout in milliseconds
+ *
+ * Make an attempt to acquire the ownership of a resource using
+ * the ixgbe_aci_req_res to utilize ACI.
+ * In case if some other driver has previously acquired the resource and
+ * performed any necessary updates, the IXGBE_ERR_ACI_NO_WORK is returned,
+ * and the caller does not obtain the resource and has no further work to do.
+ * If needed, the function will poll until the current lock owner timeouts.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+		      enum ixgbe_aci_res_access_type access, u32 timeout)
+{
+#define IXGBE_RES_POLLING_DELAY_MS	10
+	u32 delay = IXGBE_RES_POLLING_DELAY_MS;
+	u32 res_timeout = timeout;
+	u32 retry_timeout = 0;
+	s32 status;
+
+	status = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
+
+	/* A return code of IXGBE_ERR_ACI_NO_WORK means that another driver has
+	 * previously acquired the resource and performed any necessary updates;
+	 * in this case the caller does not obtain the resource and has no
+	 * further work to do.
+	 */
+	if (status == IXGBE_ERR_ACI_NO_WORK)
+		goto ixgbe_acquire_res_exit;
+
+	/* If necessary, poll until the current lock owner timeouts.
+	 * Set retry_timeout to the timeout value reported by the FW in the
+	 * response to the "Request Resource Ownership" (0x0008) Admin Command
+	 * as it indicates the maximum time the current owner of the resource
+	 * is allowed to hold it.
+	 */
+	retry_timeout = res_timeout;
+	while (status && retry_timeout && res_timeout) {
+		msec_delay(delay);
+		retry_timeout = (retry_timeout > delay) ?
+			retry_timeout - delay : 0;
+		status = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
+
+		if (status == IXGBE_ERR_ACI_NO_WORK)
+			/* lock free, but no work to do */
+			break;
+
+		if (!status)
+			/* lock acquired */
+			break;
+	}
+
+ixgbe_acquire_res_exit:
+	return status;
+}
+
+/**
+ * ixgbe_release_res - release a common resource
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ *
+ * Release a common resource using ixgbe_aci_release_res.
+ */
+void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res)
+{
+	u32 total_delay = 0;
+	s32 status;
+
+	status = ixgbe_aci_release_res(hw, res, 0);
+
+	/* There are some rare cases when trying to release the resource
+	 * results in an admin command timeout, so handle them correctly.
+	 */
+	while ((status == IXGBE_ERR_ACI_TIMEOUT) &&
+	       (total_delay < IXGBE_ACI_RELEASE_RES_TIMEOUT)) {
+		msec_delay(1);
+		status = ixgbe_aci_release_res(hw, res, 0);
+		total_delay++;
+	}
+}
+
+/**
+ * ixgbe_parse_common_caps - Parse common device/function capabilities
+ * @hw: pointer to the HW struct
+ * @caps: pointer to common capabilities structure
+ * @elem: the capability element to parse
+ * @prefix: message prefix for tracing capabilities
+ *
+ * Given a capability element, extract relevant details into the common
+ * capability structure.
+ *
+ * Return: true if the capability matches one of the common capability ids,
+ * false otherwise.
+ */
+static bool
+ixgbe_parse_common_caps(struct ixgbe_hw *hw, struct ixgbe_hw_common_caps *caps,
+			struct ixgbe_aci_cmd_list_caps_elem *elem,
+			const char *prefix)
+{
+	u32 logical_id = IXGBE_LE32_TO_CPU(elem->logical_id);
+	u32 phys_id = IXGBE_LE32_TO_CPU(elem->phys_id);
+	u32 number = IXGBE_LE32_TO_CPU(elem->number);
+	u16 cap = IXGBE_LE16_TO_CPU(elem->cap);
+	bool found = true;
+
+	UNREFERENCED_1PARAMETER(hw);
+
+	switch (cap) {
+	case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
+		caps->valid_functions = number;
+		break;
+	case IXGBE_ACI_CAPS_VMDQ:
+		caps->vmdq = (number == 1);
+		break;
+	case IXGBE_ACI_CAPS_DCB:
+		caps->dcb = (number == 1);
+		caps->active_tc_bitmap = logical_id;
+		caps->maxtc = phys_id;
+		break;
+	case IXGBE_ACI_CAPS_RSS:
+		caps->rss_table_size = number;
+		caps->rss_table_entry_width = logical_id;
+		break;
+	case IXGBE_ACI_CAPS_RXQS:
+		caps->num_rxq = number;
+		caps->rxq_first_id = phys_id;
+		break;
+	case IXGBE_ACI_CAPS_TXQS:
+		caps->num_txq = number;
+		caps->txq_first_id = phys_id;
+		break;
+	case IXGBE_ACI_CAPS_MSIX:
+		caps->num_msix_vectors = number;
+		caps->msix_vector_first_id = phys_id;
+		break;
+	case IXGBE_ACI_CAPS_NVM_VER:
+		break;
+	case IXGBE_ACI_CAPS_NVM_MGMT:
+		caps->sec_rev_disabled =
+			(number & IXGBE_NVM_MGMT_SEC_REV_DISABLED) ?
+			true : false;
+		caps->update_disabled =
+			(number & IXGBE_NVM_MGMT_UPDATE_DISABLED) ?
+			true : false;
+		caps->nvm_unified_update =
+			(number & IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
+			true : false;
+		caps->netlist_auth =
+			(number & IXGBE_NVM_MGMT_NETLIST_AUTH_SUPPORT) ?
+			true : false;
+		break;
+	case IXGBE_ACI_CAPS_MAX_MTU:
+		caps->max_mtu = number;
+		break;
+	case IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE:
+		caps->pcie_reset_avoidance = (number > 0);
+		break;
+	case IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT:
+		caps->reset_restrict_support = (number == 1);
+		break;
+	case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0:
+	case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1:
+	case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2:
+	case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3:
+	{
+		u8 index = cap - IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0;
+
+		caps->ext_topo_dev_img_ver_high[index] = number;
+		caps->ext_topo_dev_img_ver_low[index] = logical_id;
+		caps->ext_topo_dev_img_part_num[index] =
+			(phys_id & IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M) >>
+			IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S;
+		caps->ext_topo_dev_img_load_en[index] =
+			(phys_id & IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
+		caps->ext_topo_dev_img_prog_en[index] =
+			(phys_id & IXGBE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
+		break;
+	}
+
+	case IXGBE_ACI_CAPS_NEXT_CLUSTER_ID:
+		caps->next_cluster_id_support = (number == 1);
+		DEBUGOUT2("%s: next_cluster_id_support = %d\n",
+			  prefix, caps->next_cluster_id_support);
+		break;
+	default:
+		/* Not one of the recognized common capabilities */
+		found = false;
+	}
+
+	return found;
+}
+
+/**
+ * ixgbe_hweight8 - count set bits among the 8 lowest bits
+ * @w: variable storing set bits to count
+ *
+ * Return: the number of set bits among the 8 lowest bits in the provided value.
+ */
+static u8 ixgbe_hweight8(u32 w)
+{
+	u8 hweight = 0, i;
+
+	for (i = 0; i < 8; i++)
+		if (w & (1 << i))
+			hweight++;
+
+	return hweight;
+}
+
+/**
+ * ixgbe_hweight32 - count set bits among the 32 lowest bits
+ * @w: variable storing set bits to count
+ *
+ * Return: the number of set bits among the 32 lowest bits in the
+ * provided value.
+ */
+static u8 ixgbe_hweight32(u32 w)
+{
+	u32 bitMask = 0x1, i;
+	u8  bitCnt = 0;
+
+	for (i = 0; i < 32; i++)
+	{
+		if (w & bitMask)
+			bitCnt++;
+
+		bitMask = bitMask << 0x1;
+	}
+
+	return bitCnt;
+}
+
+/**
+ * ixgbe_func_id_to_logical_id - map from function id to logical pf id
+ * @active_function_bitmap: active function bitmap
+ * @pf_id: function number of device
+ *
+ * Return: the logical id of a function mapped by the provided pf_id.
+ */
+static int ixgbe_func_id_to_logical_id(u32 active_function_bitmap, u8 pf_id)
+{
+	u8 logical_id = 0;
+	u8 i;
+
+	for (i = 0; i < pf_id; i++)
+		if (active_function_bitmap & BIT(i))
+			logical_id++;
+
+	return logical_id;
+}
+
+/**
+ * ixgbe_parse_valid_functions_cap - Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS for device capabilities.
+ */
+static void
+ixgbe_parse_valid_functions_cap(struct ixgbe_hw *hw,
+				struct ixgbe_hw_dev_caps *dev_p,
+				struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+	u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+	UNREFERENCED_1PARAMETER(hw);
+
+	dev_p->num_funcs = ixgbe_hweight32(number);
+
+	hw->logical_pf_id = ixgbe_func_id_to_logical_id(number, hw->pf_id);
+}
+
+/**
+ * ixgbe_parse_vsi_dev_caps - Parse IXGBE_ACI_CAPS_VSI device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VSI for device capabilities.
+ */
+static void ixgbe_parse_vsi_dev_caps(struct ixgbe_hw *hw,
+				     struct ixgbe_hw_dev_caps *dev_p,
+				     struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+	u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+	UNREFERENCED_1PARAMETER(hw);
+
+	dev_p->num_vsi_allocd_to_host = number;
+}
+
+/**
+ * ixgbe_parse_1588_dev_caps - Parse IXGBE_ACI_CAPS_1588 device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_1588 for device capabilities.
+ */
+static void ixgbe_parse_1588_dev_caps(struct ixgbe_hw *hw,
+				      struct ixgbe_hw_dev_caps *dev_p,
+				      struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+	struct ixgbe_ts_dev_info *info = &dev_p->ts_dev_info;
+	u32 logical_id = IXGBE_LE32_TO_CPU(cap->logical_id);
+	u32 phys_id = IXGBE_LE32_TO_CPU(cap->phys_id);
+	u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+	UNREFERENCED_1PARAMETER(hw);
+
+	info->ena = ((number & IXGBE_TS_DEV_ENA_M) != 0);
+	dev_p->common_cap.ieee_1588 = info->ena;
+
+	info->tmr0_owner = number & IXGBE_TS_TMR0_OWNR_M;
+	info->tmr0_owned = ((number & IXGBE_TS_TMR0_OWND_M) != 0);
+	info->tmr0_ena = ((number & IXGBE_TS_TMR0_ENA_M) != 0);
+
+	info->tmr1_owner = (number & IXGBE_TS_TMR1_OWNR_M) >>
+			   IXGBE_TS_TMR1_OWNR_S;
+	info->tmr1_owned = ((number & IXGBE_TS_TMR1_OWND_M) != 0);
+	info->tmr1_ena = ((number & IXGBE_TS_TMR1_ENA_M) != 0);
+
+	info->ena_ports = logical_id;
+	info->tmr_own_map = phys_id;
+
+}
+
+/**
+ * ixgbe_parse_fdir_dev_caps - Parse IXGBE_ACI_CAPS_FD device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_FD for device capabilities.
+ */
+static void ixgbe_parse_fdir_dev_caps(struct ixgbe_hw *hw,
+				      struct ixgbe_hw_dev_caps *dev_p,
+				      struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+	u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+	UNREFERENCED_1PARAMETER(hw);
+
+	dev_p->num_flow_director_fltr = number;
+}
+
+/**
+ * ixgbe_parse_dev_caps - Parse device capabilities
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @buf: buffer containing the device capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper device to parse device (0x000B) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ixgbe_parse_common_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the device capabilities structured.
+ */
+static void ixgbe_parse_dev_caps(struct ixgbe_hw *hw,
+				 struct ixgbe_hw_dev_caps *dev_p,
+				 void *buf, u32 cap_count)
+{
+	struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
+	u32 i;
+
+	cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
+
+	memset(dev_p, 0, sizeof(*dev_p));
+
+	for (i = 0; i < cap_count; i++) {
+		u16 cap = IXGBE_LE16_TO_CPU(cap_resp[i].cap);
+		bool found;
+
+		found = ixgbe_parse_common_caps(hw, &dev_p->common_cap,
+					      &cap_resp[i], "dev caps");
+
+		switch (cap) {
+		case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
+			ixgbe_parse_valid_functions_cap(hw, dev_p,
+							&cap_resp[i]);
+			break;
+		case IXGBE_ACI_CAPS_VSI:
+			ixgbe_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
+			break;
+		case IXGBE_ACI_CAPS_1588:
+			ixgbe_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
+			break;
+		case  IXGBE_ACI_CAPS_FD:
+			ixgbe_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
+			break;
+		default:
+			/* Don't list common capabilities as unknown */
+			if (!found)
+			break;
+		}
+	}
+
+}
+
+/**
+ * ixgbe_get_num_per_func - determine number of resources per PF
+ * @hw: pointer to the HW structure
+ * @max: value to be evenly split between each PF
+ *
+ * Determine the number of valid functions by going through the bitmap returned
+ * from parsing capabilities and use this to calculate the number of resources
+ * per PF based on the max value passed in.
+ *
+ * Return: the number of resources per PF or 0, if no PH are available.
+ */
+static u32 ixgbe_get_num_per_func(struct ixgbe_hw *hw, u32 max)
+{
+	u8 funcs;
+
+#define IXGBE_CAPS_VALID_FUNCS_M	0xFF
+	funcs = ixgbe_hweight8(hw->dev_caps.common_cap.valid_functions &
+			     IXGBE_CAPS_VALID_FUNCS_M);
+
+	if (!funcs)
+		return 0;
+
+	return max / funcs;
+}
+
+/**
+ * ixgbe_parse_vsi_func_caps - Parse IXGBE_ACI_CAPS_VSI function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for IXGBE_ACI_CAPS_VSI.
+ */
+static void ixgbe_parse_vsi_func_caps(struct ixgbe_hw *hw,
+				      struct ixgbe_hw_func_caps *func_p,
+				      struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+	func_p->guar_num_vsi = ixgbe_get_num_per_func(hw, IXGBE_MAX_VSI);
+}
+
+/**
+ * ixgbe_parse_1588_func_caps - Parse IXGBE_ACI_CAPS_1588 function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for IXGBE_ACI_CAPS_1588.
+ */
+static void ixgbe_parse_1588_func_caps(struct ixgbe_hw *hw,
+				       struct ixgbe_hw_func_caps *func_p,
+				       struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+	struct ixgbe_ts_func_info *info = &func_p->ts_func_info;
+	u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+	UNREFERENCED_1PARAMETER(hw);
+
+	info->ena = ((number & IXGBE_TS_FUNC_ENA_M) != 0);
+	func_p->common_cap.ieee_1588 = info->ena;
+
+	info->src_tmr_owned = ((number & IXGBE_TS_SRC_TMR_OWND_M) != 0);
+	info->tmr_ena = ((number & IXGBE_TS_TMR_ENA_M) != 0);
+	info->tmr_index_owned = ((number & IXGBE_TS_TMR_IDX_OWND_M) != 0);
+	info->tmr_index_assoc = ((number & IXGBE_TS_TMR_IDX_ASSOC_M) != 0);
+
+	info->clk_freq = (number & IXGBE_TS_CLK_FREQ_M) >> IXGBE_TS_CLK_FREQ_S;
+	info->clk_src = ((number & IXGBE_TS_CLK_SRC_M) != 0);
+
+	if (info->clk_freq < NUM_IXGBE_TIME_REF_FREQ) {
+		info->time_ref = (enum ixgbe_time_ref_freq)info->clk_freq;
+	} else {
+		/* Unknown clock frequency, so assume a (probably incorrect)
+		 * default to avoid out-of-bounds look ups of frequency
+		 * related information.
+		 */
+		info->time_ref = IXGBE_TIME_REF_FREQ_25_000;
+	}
+
+}
+/**
+ * ixgbe_parse_func_caps - Parse function capabilities
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @buf: buffer containing the function capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper function to parse function (0x000A) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ixgbe_parse_common_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the function capabilities structured.
+ */
+static void ixgbe_parse_func_caps(struct ixgbe_hw *hw,
+				  struct ixgbe_hw_func_caps *func_p,
+				  void *buf, u32 cap_count)
+{
+	struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
+	u32 i;
+
+	cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
+
+	memset(func_p, 0, sizeof(*func_p));
+
+	for (i = 0; i < cap_count; i++) {
+		u16 cap = IXGBE_LE16_TO_CPU(cap_resp[i].cap);
+
+		ixgbe_parse_common_caps(hw, &func_p->common_cap,
+					&cap_resp[i], "func caps");
+
+		switch (cap) {
+		case IXGBE_ACI_CAPS_VSI:
+			ixgbe_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
+			break;
+		case IXGBE_ACI_CAPS_1588:
+			ixgbe_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
+			break;
+		default:
+			/* Don't list common capabilities as unknown */
+			break;
+		}
+	}
+
+}
+
+/**
+ * ixgbe_aci_list_caps - query function/device capabilities
+ * @hw: pointer to the HW struct
+ * @buf: a buffer to hold the capabilities
+ * @buf_size: size of the buffer
+ * @cap_count: if not NULL, set to the number of capabilities reported
+ * @opc: capabilities type to discover, device or function
+ *
+ * Get the function (0x000A) or device (0x000B) capabilities description from
+ * firmware and store it in the buffer.
+ *
+ * If the cap_count pointer is not NULL, then it is set to the number of
+ * capabilities firmware will report. Note that if the buffer size is too
+ * small, it is possible the command will return IXGBE_ERR_OUT_OF_MEM. The
+ * cap_count will still be updated in this case. It is recommended that the
+ * buffer size be set to IXGBE_ACI_MAX_BUFFER_SIZE (the largest possible
+ * buffer that firmware could return) to avoid this.
+ *
+ * Return: the exit code of the operation.
+ * Exit code of IXGBE_ERR_OUT_OF_MEM means the buffer size is too small.
+ */
+s32 ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
+			u32 *cap_count, enum ixgbe_aci_opc opc)
+{
+	struct ixgbe_aci_cmd_list_caps *cmd;
+	struct ixgbe_aci_desc desc;
+	s32 status;
+
+	cmd = &desc.params.get_cap;
+
+	if (opc != ixgbe_aci_opc_list_func_caps &&
+	    opc != ixgbe_aci_opc_list_dev_caps)
+		return IXGBE_ERR_PARAM;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, opc);
+	status = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
+
+	if (cap_count)
+		*cap_count = IXGBE_LE32_TO_CPU(cmd->count);
+
+	return status;
+}
+
+/**
+ * ixgbe_discover_dev_caps - Read and extract device capabilities
+ * @hw: pointer to the hardware structure
+ * @dev_caps: pointer to device capabilities structure
+ *
+ * Read the device capabilities and extract them into the dev_caps structure
+ * for later use.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
+			    struct ixgbe_hw_dev_caps *dev_caps)
+{
+	u32 status, cap_count = 0;
+	u8 *cbuf = NULL;
+
+	cbuf = (u8*)ixgbe_malloc(hw, IXGBE_ACI_MAX_BUFFER_SIZE);
+	if (!cbuf)
+		return IXGBE_ERR_OUT_OF_MEM;
+	/* Although the driver doesn't know the number of capabilities the
+	 * device will return, we can simply send a 4KB buffer, the maximum
+	 * possible size that firmware can return.
+	 */
+	cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
+		    sizeof(struct ixgbe_aci_cmd_list_caps_elem);
+
+	status = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
+				     &cap_count,
+				     ixgbe_aci_opc_list_dev_caps);
+	if (!status)
+		ixgbe_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
+
+	if (cbuf)
+		ixgbe_free(hw, cbuf);
+
+	return status;
+}
+
+/**
+ * ixgbe_discover_func_caps - Read and extract function capabilities
+ * @hw: pointer to the hardware structure
+ * @func_caps: pointer to function capabilities structure
+ *
+ * Read the function capabilities and extract them into the func_caps structure
+ * for later use.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_discover_func_caps(struct ixgbe_hw *hw,
+			     struct ixgbe_hw_func_caps *func_caps)
+{
+	u32 cap_count = 0;
+	u8 *cbuf = NULL;
+	s32 status;
+
+	cbuf = (u8*)ixgbe_malloc(hw, IXGBE_ACI_MAX_BUFFER_SIZE);
+	if(!cbuf)
+		return IXGBE_ERR_OUT_OF_MEM;
+	/* Although the driver doesn't know the number of capabilities the
+	 * device will return, we can simply send a 4KB buffer, the maximum
+	 * possible size that firmware can return.
+	 */
+	cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
+		    sizeof(struct ixgbe_aci_cmd_list_caps_elem);
+
+	status = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
+				     &cap_count,
+				     ixgbe_aci_opc_list_func_caps);
+	if (!status)
+		ixgbe_parse_func_caps(hw, func_caps, cbuf, cap_count);
+
+	if (cbuf)
+		ixgbe_free(hw, cbuf);
+
+	return status;
+}
+
+/**
+ * ixgbe_get_caps - get info about the HW
+ * @hw: pointer to the hardware structure
+ *
+ * Retrieve both device and function capabilities.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_caps(struct ixgbe_hw *hw)
+{
+	s32 status;
+
+	status = ixgbe_discover_dev_caps(hw, &hw->dev_caps);
+	if (status)
+		return status;
+
+	return ixgbe_discover_func_caps(hw, &hw->func_caps);
+}
+
+/**
+ * ixgbe_aci_disable_rxen - disable RX
+ * @hw: pointer to the HW struct
+ *
+ * Request a safe disable of Receive Enable using ACI command (0x000C).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_disable_rxen(struct ixgbe_hw *hw)
+{
+	struct ixgbe_aci_cmd_disable_rxen *cmd;
+	struct ixgbe_aci_desc desc;
+
+	UNREFERENCED_1PARAMETER(hw);
+
+	cmd = &desc.params.disable_rxen;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_disable_rxen);
+
+	cmd->lport_num = (u8)hw->bus.func;
+
+	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_get_phy_caps - returns PHY capabilities
+ * @hw: pointer to the HW struct
+ * @qual_mods: report qualified modules
+ * @report_mode: report mode capabilities
+ * @pcaps: structure for PHY capabilities to be filled
+ *
+ * Returns the various PHY capabilities supported on the Port
+ * using ACI command (0x0600).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
+			   struct ixgbe_aci_cmd_get_phy_caps_data *pcaps)
+{
+	struct ixgbe_aci_cmd_get_phy_caps *cmd;
+	u16 pcaps_size = sizeof(*pcaps);
+	struct ixgbe_aci_desc desc;
+	s32 status;
+
+	cmd = &desc.params.get_phy;
+
+	if (!pcaps || (report_mode & ~IXGBE_ACI_REPORT_MODE_M))
+		return IXGBE_ERR_PARAM;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_phy_caps);
+
+	if (qual_mods)
+		cmd->param0 |= IXGBE_CPU_TO_LE16(IXGBE_ACI_GET_PHY_RQM);
+
+	cmd->param0 |= IXGBE_CPU_TO_LE16(report_mode);
+	status = ixgbe_aci_send_cmd(hw, &desc, pcaps, pcaps_size);
+
+	if (status == IXGBE_SUCCESS &&
+	    report_mode == IXGBE_ACI_REPORT_TOPO_CAP_MEDIA) {
+		hw->phy.phy_type_low = IXGBE_LE64_TO_CPU(pcaps->phy_type_low);
+		hw->phy.phy_type_high = IXGBE_LE64_TO_CPU(pcaps->phy_type_high);
+		memcpy(hw->link.link_info.module_type, &pcaps->module_type,
+			   sizeof(hw->link.link_info.module_type));
+	}
+
+	return status;
+}
+
+/**
+ * ixgbe_phy_caps_equals_cfg - check if capabilities match the PHY config
+ * @phy_caps: PHY capabilities
+ * @phy_cfg: PHY configuration
+ *
+ * Helper function to determine if PHY capabilities match PHY
+ * configuration
+ *
+ * Return: true if PHY capabilities match PHY configuration.
+ */
+bool
+ixgbe_phy_caps_equals_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *phy_caps,
+			  struct ixgbe_aci_cmd_set_phy_cfg_data *phy_cfg)
+{
+	u8 caps_mask, cfg_mask;
+
+	if (!phy_caps || !phy_cfg)
+		return false;
+
+	/* These bits are not common between capabilities and configuration.
+	 * Do not use them to determine equality.
+	 */
+	caps_mask = IXGBE_ACI_PHY_CAPS_MASK & ~(IXGBE_ACI_PHY_AN_MODE |
+					      IXGBE_ACI_PHY_EN_MOD_QUAL);
+	cfg_mask = IXGBE_ACI_PHY_ENA_VALID_MASK &
+		   ~IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+	if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
+	    phy_caps->phy_type_high != phy_cfg->phy_type_high ||
+	    ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
+	    phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
+	    phy_caps->eee_cap != phy_cfg->eee_cap ||
+	    phy_caps->eeer_value != phy_cfg->eeer_value ||
+	    phy_caps->link_fec_options != phy_cfg->link_fec_opt)
+		return false;
+
+	return true;
+}
+
+/**
+ * ixgbe_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
+ * @caps: PHY ability structure to copy data from
+ * @cfg: PHY configuration structure to copy data to
+ *
+ * Helper function to copy data from PHY capabilities data structure
+ * to PHY configuration data structure
+ */
+void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+				struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
+{
+	if (!caps || !cfg)
+		return;
+
+	memset(cfg, 0, sizeof(*cfg));
+	cfg->phy_type_low = caps->phy_type_low;
+	cfg->phy_type_high = caps->phy_type_high;
+	cfg->caps = caps->caps;
+	cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
+	cfg->eee_cap = caps->eee_cap;
+	cfg->eeer_value = caps->eeer_value;
+	cfg->link_fec_opt = caps->link_fec_options;
+	cfg->module_compliance_enforcement =
+		caps->module_compliance_enforcement;
+}
+
+/**
+ * ixgbe_aci_set_phy_cfg - set PHY configuration
+ * @hw: pointer to the HW struct
+ * @cfg: structure with PHY configuration data to be set
+ *
+ * Set the various PHY configuration parameters supported on the Port
+ * using ACI command (0x0601).
+ * One or more of the Set PHY config parameters may be ignored in an MFP
+ * mode as the PF may not have the privilege to set some of the PHY Config
+ * parameters.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
+			  struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
+{
+	struct ixgbe_aci_desc desc;
+	s32 status;
+
+	if (!cfg)
+		return IXGBE_ERR_PARAM;
+
+	/* Ensure that only valid bits of cfg->caps can be turned on. */
+	if (cfg->caps & ~IXGBE_ACI_PHY_ENA_VALID_MASK) {
+		cfg->caps &= IXGBE_ACI_PHY_ENA_VALID_MASK;
+	}
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_phy_cfg);
+	desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+
+	status = ixgbe_aci_send_cmd(hw, &desc, cfg, sizeof(*cfg));
+
+	if (!status)
+		hw->phy.curr_user_phy_cfg = *cfg;
+
+	return status;
+}
+
+/**
+ * ixgbe_aci_set_link_restart_an - set up link and restart AN
+ * @hw: pointer to the HW struct
+ * @ena_link: if true: enable link, if false: disable link
+ *
+ * Function sets up the link and restarts the Auto-Negotiation over the link.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link)
+{
+	struct ixgbe_aci_cmd_restart_an *cmd;
+	struct ixgbe_aci_desc desc;
+
+	cmd = &desc.params.restart_an;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_restart_an);
+
+	cmd->cmd_flags = IXGBE_ACI_RESTART_AN_LINK_RESTART;
+	if (ena_link)
+		cmd->cmd_flags |= IXGBE_ACI_RESTART_AN_LINK_ENABLE;
+	else
+		cmd->cmd_flags &= ~IXGBE_ACI_RESTART_AN_LINK_ENABLE;
+
+	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_is_media_cage_present - check if media cage is present
+ * @hw: pointer to the HW struct
+ *
+ * Identify presence of media cage using the ACI command (0x06E0).
+ *
+ * Return: true if media cage is present, else false. If no cage, then
+ * media type is backplane or BASE-T.
+ */
+static bool ixgbe_is_media_cage_present(struct ixgbe_hw *hw)
+{
+	struct ixgbe_aci_cmd_get_link_topo *cmd;
+	struct ixgbe_aci_desc desc;
+
+	cmd = &desc.params.get_link_topo;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
+
+	cmd->addr.topo_params.node_type_ctx =
+		(IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT <<
+		 IXGBE_ACI_LINK_TOPO_NODE_CTX_S);
+
+	/* set node type */
+	cmd->addr.topo_params.node_type_ctx |=
+		(IXGBE_ACI_LINK_TOPO_NODE_TYPE_M &
+		 IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE);
+
+	/* Node type cage can be used to determine if cage is present. If AQC
+	 * returns error (ENOENT), then no cage present. If no cage present then
+	 * connection type is backplane or BASE-T.
+	 */
+	return ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL);
+}
+
+/**
+ * ixgbe_get_media_type_from_phy_type - Gets media type based on phy type
+ * @hw: pointer to the HW struct
+ *
+ * Try to identify the media type based on the phy type.
+ * If more than one media type, the ixgbe_media_type_unknown is returned.
+ * First, phy_type_low is checked, then phy_type_high.
+ * If none are identified, the ixgbe_media_type_unknown is returned
+ *
+ * Return: type of a media based on phy type in form of enum.
+ */
+static enum ixgbe_media_type
+ixgbe_get_media_type_from_phy_type(struct ixgbe_hw *hw)
+{
+	struct ixgbe_link_status *hw_link_info;
+
+	if (!hw)
+		return ixgbe_media_type_unknown;
+
+	hw_link_info = &hw->link.link_info;
+	if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
+		/* If more than one media type is selected, report unknown */
+		return ixgbe_media_type_unknown;
+
+	if (hw_link_info->phy_type_low) {
+		/* 1G SGMII is a special case where some DA cable PHYs
+		 * may show this as an option when it really shouldn't
+		 * be since SGMII is meant to be between a MAC and a PHY
+		 * in a backplane. Try to detect this case and handle it
+		 */
+		if (hw_link_info->phy_type_low == IXGBE_PHY_TYPE_LOW_1G_SGMII &&
+		    (hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
+		    IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
+		    hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
+		    IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
+			return ixgbe_media_type_da;
+
+		switch (hw_link_info->phy_type_low) {
+		case IXGBE_PHY_TYPE_LOW_1000BASE_SX:
+		case IXGBE_PHY_TYPE_LOW_1000BASE_LX:
+		case IXGBE_PHY_TYPE_LOW_10GBASE_SR:
+		case IXGBE_PHY_TYPE_LOW_10GBASE_LR:
+		case IXGBE_PHY_TYPE_LOW_25GBASE_SR:
+		case IXGBE_PHY_TYPE_LOW_25GBASE_LR:
+			return ixgbe_media_type_fiber;
+		case IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
+		case IXGBE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
+			return ixgbe_media_type_fiber;
+		case IXGBE_PHY_TYPE_LOW_100BASE_TX:
+		case IXGBE_PHY_TYPE_LOW_1000BASE_T:
+		case IXGBE_PHY_TYPE_LOW_2500BASE_T:
+		case IXGBE_PHY_TYPE_LOW_5GBASE_T:
+		case IXGBE_PHY_TYPE_LOW_10GBASE_T:
+		case IXGBE_PHY_TYPE_LOW_25GBASE_T:
+			return ixgbe_media_type_copper;
+		case IXGBE_PHY_TYPE_LOW_10G_SFI_DA:
+		case IXGBE_PHY_TYPE_LOW_25GBASE_CR:
+		case IXGBE_PHY_TYPE_LOW_25GBASE_CR_S:
+		case IXGBE_PHY_TYPE_LOW_25GBASE_CR1:
+			return ixgbe_media_type_da;
+		case IXGBE_PHY_TYPE_LOW_25G_AUI_C2C:
+			if (ixgbe_is_media_cage_present(hw))
+				return ixgbe_media_type_aui;
+			return ixgbe_media_type_backplane;
+		case IXGBE_PHY_TYPE_LOW_1000BASE_KX:
+		case IXGBE_PHY_TYPE_LOW_2500BASE_KX:
+		case IXGBE_PHY_TYPE_LOW_2500BASE_X:
+		case IXGBE_PHY_TYPE_LOW_5GBASE_KR:
+		case IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1:
+		case IXGBE_PHY_TYPE_LOW_10G_SFI_C2C:
+		case IXGBE_PHY_TYPE_LOW_25GBASE_KR:
+		case IXGBE_PHY_TYPE_LOW_25GBASE_KR1:
+		case IXGBE_PHY_TYPE_LOW_25GBASE_KR_S:
+			return ixgbe_media_type_backplane;
+		}
+	} else {
+		switch (hw_link_info->phy_type_high) {
+		case IXGBE_PHY_TYPE_HIGH_10BASE_T:
+			return ixgbe_media_type_copper;
+		}
+	}
+	return ixgbe_media_type_unknown;
+}
+
+/**
+ * ixgbe_update_link_info - update status of the HW network link
+ * @hw: pointer to the HW struct
+ *
+ * Update the status of the HW network link.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_update_link_info(struct ixgbe_hw *hw)
+{
+	struct ixgbe_aci_cmd_get_phy_caps_data *pcaps;
+	struct ixgbe_link_status *li;
+	s32 status;
+
+	if (!hw)
+		return IXGBE_ERR_PARAM;
+
+	li = &hw->link.link_info;
+
+	status = ixgbe_aci_get_link_info(hw, true, NULL);
+	if (status)
+		return status;
+
+	if (li->link_info & IXGBE_ACI_MEDIA_AVAILABLE) {
+		pcaps = (struct ixgbe_aci_cmd_get_phy_caps_data *)
+			ixgbe_malloc(hw, sizeof(*pcaps));
+		if (!pcaps)
+			return IXGBE_ERR_OUT_OF_MEM;
+
+		status = ixgbe_aci_get_phy_caps(hw, false,
+						IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+						pcaps);
+
+		if (status == IXGBE_SUCCESS)
+			memcpy(li->module_type, &pcaps->module_type,
+			       sizeof(li->module_type));
+
+		ixgbe_free(hw, pcaps);
+	}
+
+	return status;
+}
+
+/**
+ * ixgbe_get_link_status - get status of the HW network link
+ * @hw: pointer to the HW struct
+ * @link_up: pointer to bool (true/false = linkup/linkdown)
+ *
+ * Variable link_up is true if link is up, false if link is down.
+ * The variable link_up is invalid if status is non zero. As a
+ * result of this call, link status reporting becomes enabled
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up)
+{
+	s32 status = IXGBE_SUCCESS;
+
+	if (!hw || !link_up)
+		return IXGBE_ERR_PARAM;
+
+	if (hw->link.get_link_info) {
+		status = ixgbe_update_link_info(hw);
+		if (status) {
+			return status;
+		}
+	}
+
+	*link_up = hw->link.link_info.link_info & IXGBE_ACI_LINK_UP;
+
+	return status;
+}
+
+/**
+ * ixgbe_aci_get_link_info - get the link status
+ * @hw: pointer to the HW struct
+ * @ena_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ *
+ * Get the current Link Status using ACI command (0x607).
+ * The current link can be optionally provided to update
+ * the status.
+ *
+ * Return: the link status of the adapter.
+ */
+s32 ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
+			    struct ixgbe_link_status *link)
+{
+	struct ixgbe_aci_cmd_get_link_status_data link_data = { 0 };
+	struct ixgbe_aci_cmd_get_link_status *resp;
+	struct ixgbe_link_status *li_old, *li;
+	struct ixgbe_fc_info *hw_fc_info;
+	enum ixgbe_media_type *hw_media_type;
+	struct ixgbe_aci_desc desc;
+	bool tx_pause, rx_pause;
+	u8 cmd_flags;
+	s32 status;
+
+	if (!hw)
+		return IXGBE_ERR_PARAM;
+
+	li_old = &hw->link.link_info_old;
+	hw_media_type = &hw->phy.media_type;
+	li = &hw->link.link_info;
+	hw_fc_info = &hw->fc;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
+	cmd_flags = (ena_lse) ? IXGBE_ACI_LSE_ENA : IXGBE_ACI_LSE_DIS;
+	resp = &desc.params.get_link_status;
+	resp->cmd_flags = cmd_flags;
+
+	status = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
+
+	if (status != IXGBE_SUCCESS)
+		return status;
+
+	/* save off old link status information */
+	*li_old = *li;
+
+	/* update current link status information */
+	li->link_speed = IXGBE_LE16_TO_CPU(link_data.link_speed);
+	li->phy_type_low = IXGBE_LE64_TO_CPU(link_data.phy_type_low);
+	li->phy_type_high = IXGBE_LE64_TO_CPU(link_data.phy_type_high);
+	*hw_media_type = ixgbe_get_media_type_from_phy_type(hw);
+	li->link_info = link_data.link_info;
+	li->link_cfg_err = link_data.link_cfg_err;
+	li->an_info = link_data.an_info;
+	li->ext_info = link_data.ext_info;
+	li->max_frame_size = IXGBE_LE16_TO_CPU(link_data.max_frame_size);
+	li->fec_info = link_data.cfg & IXGBE_ACI_FEC_MASK;
+	li->topo_media_conflict = link_data.topo_media_conflict;
+	li->pacing = link_data.cfg & (IXGBE_ACI_CFG_PACING_M |
+				      IXGBE_ACI_CFG_PACING_TYPE_M);
+
+	/* update fc info */
+	tx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_TX);
+	rx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_RX);
+	if (tx_pause && rx_pause)
+		hw_fc_info->current_mode = ixgbe_fc_full;
+	else if (tx_pause)
+		hw_fc_info->current_mode = ixgbe_fc_tx_pause;
+	else if (rx_pause)
+		hw_fc_info->current_mode = ixgbe_fc_rx_pause;
+	else
+		hw_fc_info->current_mode = ixgbe_fc_none;
+
+	li->lse_ena = !!(resp->cmd_flags & IXGBE_ACI_LSE_IS_ENABLED);
+
+	/* save link status information */
+	if (link)
+		*link = *li;
+
+	/* flag cleared so calling functions don't call AQ again */
+	hw->link.get_link_info = false;
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_aci_set_event_mask - set event mask
+ * @hw: pointer to the HW struct
+ * @port_num: port number of the physical function
+ * @mask: event mask to be set
+ *
+ * Set the event mask using ACI command (0x0613).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask)
+{
+	struct ixgbe_aci_cmd_set_event_mask *cmd;
+	struct ixgbe_aci_desc desc;
+
+	cmd = &desc.params.set_event_mask;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_event_mask);
+
+	cmd->event_mask = IXGBE_CPU_TO_LE16(mask);
+	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_configure_lse - enable/disable link status events
+ * @hw: pointer to the HW struct
+ * @activate: bool value deciding if lse should be enabled nor disabled
+ * @mask: event mask to be set; a set bit means deactivation of the
+ * corresponding event
+ *
+ * Set the event mask and then enable or disable link status events
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask)
+{
+	s32 rc;
+
+	rc = ixgbe_aci_set_event_mask(hw, (u8)hw->bus.func, mask);
+	if (rc) {
+		return rc;
+	}
+
+	/* Enabling link status events generation by fw */
+	rc = ixgbe_aci_get_link_info(hw, activate, NULL);
+	if (rc) {
+		return rc;
+	}
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_aci_get_netlist_node - get a node handle
+ * @hw: pointer to the hw struct
+ * @cmd: get_link_topo AQ structure
+ * @node_part_number: output node part number if node found
+ * @node_handle: output node handle parameter if node found
+ *
+ * Get the netlist node and assigns it to
+ * the provided handle using ACI command (0x06E0).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
+			       struct ixgbe_aci_cmd_get_link_topo *cmd,
+			       u8 *node_part_number, u16 *node_handle)
+{
+	struct ixgbe_aci_desc desc;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
+	desc.params.get_link_topo = *cmd;
+
+	if (ixgbe_aci_send_cmd(hw, &desc, NULL, 0))
+		return IXGBE_ERR_NOT_SUPPORTED;
+
+	if (node_handle)
+		*node_handle =
+			IXGBE_LE16_TO_CPU(desc.params.get_link_topo.addr.handle);
+	if (node_part_number)
+		*node_part_number = desc.params.get_link_topo.node_part_num;
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_aci_get_netlist_node_pin - get a node pin handle
+ * @hw: pointer to the hw struct
+ * @cmd: get_link_topo_pin AQ structure
+ * @node_handle: output node handle parameter if node found
+ *
+ * Get the netlist node pin and assign it to
+ * the provided handle using ACI command (0x06E1).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_netlist_node_pin(struct ixgbe_hw *hw,
+				   struct ixgbe_aci_cmd_get_link_topo_pin *cmd,
+				   u16 *node_handle)
+{
+	struct ixgbe_aci_desc desc;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo_pin);
+	desc.params.get_link_topo_pin = *cmd;
+
+	if (ixgbe_aci_send_cmd(hw, &desc, NULL, 0))
+		return IXGBE_ERR_NOT_SUPPORTED;
+
+	if (node_handle)
+		*node_handle =
+			IXGBE_LE16_TO_CPU(desc.params.get_link_topo_pin.addr.handle);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_find_netlist_node - find a node handle
+ * @hw: pointer to the hw struct
+ * @node_type_ctx: type of netlist node to look for
+ * @node_part_number: node part number to look for
+ * @node_handle: output parameter if node found - optional
+ *
+ * Find and return the node handle for a given node type and part number in the
+ * netlist. When found IXGBE_SUCCESS is returned, IXGBE_ERR_NOT_SUPPORTED
+ * otherwise. If @node_handle provided, it would be set to found node handle.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_find_netlist_node(struct ixgbe_hw *hw, u8 node_type_ctx,
+			    u8 node_part_number, u16 *node_handle)
+{
+	struct ixgbe_aci_cmd_get_link_topo cmd;
+	u8 rec_node_part_number;
+	u16 rec_node_handle;
+	s32 status;
+	u8 idx;
+
+	for (idx = 0; idx < IXGBE_MAX_NETLIST_SIZE; idx++) {
+		memset(&cmd, 0, sizeof(cmd));
+
+		cmd.addr.topo_params.node_type_ctx =
+			(node_type_ctx << IXGBE_ACI_LINK_TOPO_NODE_TYPE_S);
+		cmd.addr.topo_params.index = idx;
+
+		status = ixgbe_aci_get_netlist_node(hw, &cmd,
+						    &rec_node_part_number,
+						    &rec_node_handle);
+		if (status)
+			return status;
+
+		if (rec_node_part_number == node_part_number) {
+			if (node_handle)
+				*node_handle = rec_node_handle;
+			return IXGBE_SUCCESS;
+		}
+	}
+
+	return IXGBE_ERR_NOT_SUPPORTED;
+}
+
+/**
+ * ixgbe_aci_read_i2c - read I2C register value
+ * @hw: pointer to the hw struct
+ * @topo_addr: topology address for a device to communicate with
+ * @bus_addr: 7-bit I2C bus address
+ * @addr: I2C memory address (I2C offset) with up to 16 bits
+ * @params: I2C parameters: bit [7] - Repeated start,
+ *				      bits [6:5] data offset size,
+ *			    bit [4] - I2C address type, bits [3:0] - data size
+ *				      to read (0-16 bytes)
+ * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
+ *
+ * Read the value of the I2C pin register using ACI command (0x06E2).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_read_i2c(struct ixgbe_hw *hw,
+		       struct ixgbe_aci_cmd_link_topo_addr topo_addr,
+		       u16 bus_addr, __le16 addr, u8 params, u8 *data)
+{
+	struct ixgbe_aci_desc desc = { 0 };
+	struct ixgbe_aci_cmd_i2c *cmd;
+	u8 data_size;
+	s32 status;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_i2c);
+	cmd = &desc.params.read_write_i2c;
+
+	if (!data)
+		return IXGBE_ERR_PARAM;
+
+	data_size = (params & IXGBE_ACI_I2C_DATA_SIZE_M) >>
+		    IXGBE_ACI_I2C_DATA_SIZE_S;
+
+	cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(bus_addr);
+	cmd->topo_addr = topo_addr;
+	cmd->i2c_params = params;
+	cmd->i2c_addr = addr;
+
+	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+	if (!status) {
+		struct ixgbe_aci_cmd_read_i2c_resp *resp;
+		u8 i;
+
+		resp = &desc.params.read_i2c_resp;
+		for (i = 0; i < data_size; i++) {
+			*data = resp->i2c_data[i];
+			data++;
+		}
+	}
+
+	return status;
+}
+
+/**
+ * ixgbe_aci_write_i2c - write a value to I2C register
+ * @hw: pointer to the hw struct
+ * @topo_addr: topology address for a device to communicate with
+ * @bus_addr: 7-bit I2C bus address
+ * @addr: I2C memory address (I2C offset) with up to 16 bits
+ * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size
+ *				      to write (0-7 bytes)
+ * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
+ *
+ * Write a value to the I2C pin register using ACI command (0x06E3).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_write_i2c(struct ixgbe_hw *hw,
+			struct ixgbe_aci_cmd_link_topo_addr topo_addr,
+			u16 bus_addr, __le16 addr, u8 params, u8 *data)
+{
+	struct ixgbe_aci_desc desc = { 0 };
+	struct ixgbe_aci_cmd_i2c *cmd;
+	u8 i, data_size;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_write_i2c);
+	cmd = &desc.params.read_write_i2c;
+
+	data_size = (params & IXGBE_ACI_I2C_DATA_SIZE_M) >>
+		    IXGBE_ACI_I2C_DATA_SIZE_S;
+
+	/* data_size limited to 4 */
+	if (data_size > 4)
+		return IXGBE_ERR_PARAM;
+
+	cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(bus_addr);
+	cmd->topo_addr = topo_addr;
+	cmd->i2c_params = params;
+	cmd->i2c_addr = addr;
+
+	for (i = 0; i < data_size; i++) {
+		cmd->i2c_data[i] = *data;
+		data++;
+	}
+
+	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_set_gpio - set GPIO pin state
+ * @hw: pointer to the hw struct
+ * @gpio_ctrl_handle: GPIO controller node handle
+ * @pin_idx: IO Number of the GPIO that needs to be set
+ * @value: SW provide IO value to set in the LSB
+ *
+ * Set the GPIO pin state that is a part of the topology
+ * using ACI command (0x06EC).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+		       bool value)
+{
+	struct ixgbe_aci_cmd_gpio *cmd;
+	struct ixgbe_aci_desc desc;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_gpio);
+	cmd = &desc.params.read_write_gpio;
+	cmd->gpio_ctrl_handle = IXGBE_CPU_TO_LE16(gpio_ctrl_handle);
+	cmd->gpio_num = pin_idx;
+	cmd->gpio_val = value ? 1 : 0;
+
+	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_get_gpio - get GPIO pin state
+ * @hw: pointer to the hw struct
+ * @gpio_ctrl_handle: GPIO controller node handle
+ * @pin_idx: IO Number of the GPIO that needs to be set
+ * @value: IO value read
+ *
+ * Get the value of a GPIO signal which is part of the topology
+ * using ACI command (0x06ED).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+		       bool *value)
+{
+	struct ixgbe_aci_cmd_gpio *cmd;
+	struct ixgbe_aci_desc desc;
+	s32 status;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_gpio);
+	cmd = &desc.params.read_write_gpio;
+	cmd->gpio_ctrl_handle = IXGBE_CPU_TO_LE16(gpio_ctrl_handle);
+	cmd->gpio_num = pin_idx;
+
+	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+	if (status)
+		return status;
+
+	*value = !!cmd->gpio_val;
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_aci_sff_eeprom - read/write SFF EEPROM
+ * @hw: pointer to the HW struct
+ * @lport: bits [7:0] = logical port, bit [8] = logical port valid
+ * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
+ * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
+ * @page: QSFP page
+ * @page_bank_ctrl: configuration of SFF/CMIS paging and banking control
+ * @data: pointer to data buffer to be read/written to the I2C device.
+ * @length: 1-16 for read, 1 for write.
+ * @write: 0 read, 1 for write.
+ *
+ * Read/write SFF EEPROM using ACI command (0x06EE).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_sff_eeprom(struct ixgbe_hw *hw, u16 lport, u8 bus_addr,
+			 u16 mem_addr, u8 page, u8 page_bank_ctrl, u8 *data,
+			 u8 length, bool write)
+{
+	struct ixgbe_aci_cmd_sff_eeprom *cmd;
+	struct ixgbe_aci_desc desc;
+	s32 status;
+
+	if (!data || (mem_addr & 0xff00))
+		return IXGBE_ERR_PARAM;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_sff_eeprom);
+	cmd = &desc.params.read_write_sff_param;
+	desc.flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+	cmd->lport_num = (u8)(lport & 0xff);
+	cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
+	cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(((bus_addr >> 1) &
+					 IXGBE_ACI_SFF_I2CBUS_7BIT_M) |
+					((page_bank_ctrl <<
+					  IXGBE_ACI_SFF_PAGE_BANK_CTRL_S) &
+					 IXGBE_ACI_SFF_PAGE_BANK_CTRL_M));
+	cmd->i2c_offset = IXGBE_CPU_TO_LE16(mem_addr & 0xff);
+	cmd->module_page = page;
+	if (write)
+		cmd->i2c_bus_addr |= IXGBE_CPU_TO_LE16(IXGBE_ACI_SFF_IS_WRITE);
+
+	status = ixgbe_aci_send_cmd(hw, &desc, data, length);
+	return status;
+}
+
+/**
+ * ixgbe_aci_prog_topo_dev_nvm - program Topology Device NVM
+ * @hw: pointer to the hardware structure
+ * @topo_params: pointer to structure storing topology parameters for a device
+ *
+ * Program Topology Device NVM using ACI command (0x06F2).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_prog_topo_dev_nvm(struct ixgbe_hw *hw,
+			struct ixgbe_aci_cmd_link_topo_params *topo_params)
+{
+	struct ixgbe_aci_cmd_prog_topo_dev_nvm *cmd;
+	struct ixgbe_aci_desc desc;
+
+	cmd = &desc.params.prog_topo_dev_nvm;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_prog_topo_dev_nvm);
+
+	memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params));
+
+	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_read_topo_dev_nvm - read Topology Device NVM
+ * @hw: pointer to the hardware structure
+ * @topo_params: pointer to structure storing topology parameters for a device
+ * @start_address: byte offset in the topology device NVM
+ * @data: pointer to data buffer
+ * @data_size: number of bytes to be read from the topology device NVM
+ * Read Topology Device NVM (0x06F3)
+ *
+ * Read Topology of Device NVM using ACI command (0x06F3).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_read_topo_dev_nvm(struct ixgbe_hw *hw,
+			struct ixgbe_aci_cmd_link_topo_params *topo_params,
+			u32 start_address, u8 *data, u8 data_size)
+{
+	struct ixgbe_aci_cmd_read_topo_dev_nvm *cmd;
+	struct ixgbe_aci_desc desc;
+	s32 status;
+
+	if (!data || data_size == 0 ||
+	    data_size > IXGBE_ACI_READ_TOPO_DEV_NVM_DATA_READ_SIZE)
+		return IXGBE_ERR_PARAM;
+
+	cmd = &desc.params.read_topo_dev_nvm;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_topo_dev_nvm);
+
+	desc.datalen = IXGBE_CPU_TO_LE16(data_size);
+	memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params));
+	cmd->start_address = IXGBE_CPU_TO_LE32(start_address);
+
+	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+	if (status)
+		return status;
+
+	memcpy(data, cmd->data_read, data_size);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_acquire_nvm - Generic request for acquiring the NVM ownership
+ * @hw: pointer to the HW structure
+ * @access: NVM access type (read or write)
+ *
+ * Request NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_acquire_nvm(struct ixgbe_hw *hw,
+		      enum ixgbe_aci_res_access_type access)
+{
+	u32 fla;
+
+	/* Skip if we are in blank NVM programming mode */
+	fla = IXGBE_READ_REG(hw, GLNVM_FLA);
+	if ((fla & GLNVM_FLA_LOCKED_M) == 0)
+		return IXGBE_SUCCESS;
+
+	return ixgbe_acquire_res(hw, IXGBE_NVM_RES_ID, access,
+				 IXGBE_NVM_TIMEOUT);
+}
+
+/**
+ * ixgbe_release_nvm - Generic request for releasing the NVM ownership
+ * @hw: pointer to the HW structure
+ *
+ * Release NVM ownership.
+ */
+void ixgbe_release_nvm(struct ixgbe_hw *hw)
+{
+	u32 fla;
+
+	/* Skip if we are in blank NVM programming mode */
+	fla = IXGBE_READ_REG(hw, GLNVM_FLA);
+	if ((fla & GLNVM_FLA_LOCKED_M) == 0)
+		return;
+
+	ixgbe_release_res(hw, IXGBE_NVM_RES_ID);
+}
+
+
+/**
+ * ixgbe_aci_read_nvm - read NVM
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be read (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @read_shadow_ram: tell if this is a shadow RAM read
+ *
+ * Read the NVM using ACI command (0x0701).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
+		       u16 length, void *data, bool last_command,
+		       bool read_shadow_ram)
+{
+	struct ixgbe_aci_desc desc;
+	struct ixgbe_aci_cmd_nvm *cmd;
+
+	cmd = &desc.params.nvm;
+
+	if (offset > IXGBE_ACI_NVM_MAX_OFFSET)
+		return IXGBE_ERR_PARAM;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_read);
+
+	if (!read_shadow_ram && module_typeid == IXGBE_ACI_NVM_START_POINT)
+		cmd->cmd_flags |= IXGBE_ACI_NVM_FLASH_ONLY;
+
+	/* If this is the last command in a series, set the proper flag. */
+	if (last_command)
+		cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
+	cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
+	cmd->offset_low = IXGBE_CPU_TO_LE16(offset & 0xFFFF);
+	cmd->offset_high = (offset >> 16) & 0xFF;
+	cmd->length = IXGBE_CPU_TO_LE16(length);
+
+	return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/**
+ * ixgbe_nvm_validate_checksum - validate checksum
+ * @hw: pointer to the HW struct
+ *
+ * Verify NVM PFA checksum validity using ACI command (0x0706).
+ * If the checksum verification failed, IXGBE_ERR_NVM_CHECKSUM is returned.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw)
+{
+	struct ixgbe_aci_cmd_nvm_checksum *cmd;
+	struct ixgbe_aci_desc desc;
+	s32 status;
+
+	status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+	if (status)
+		return status;
+
+	cmd = &desc.params.nvm_checksum;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
+	cmd->flags = IXGBE_ACI_NVM_CHECKSUM_VERIFY;
+
+	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+	ixgbe_release_nvm(hw);
+
+	if (!status)
+		if (IXGBE_LE16_TO_CPU(cmd->checksum) !=
+		    IXGBE_ACI_NVM_CHECKSUM_CORRECT) {
+			ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+				      "Invalid Shadow Ram checksum");
+			status = IXGBE_ERR_NVM_CHECKSUM;
+		}
+
+	return status;
+}
+
+/**
+ * ixgbe_nvm_recalculate_checksum - recalculate checksum
+ * @hw: pointer to the HW struct
+ *
+ * Recalculate NVM PFA checksum using ACI command (0x0706).
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_nvm_recalculate_checksum(struct ixgbe_hw *hw)
+{
+	struct ixgbe_aci_cmd_nvm_checksum *cmd;
+	struct ixgbe_aci_desc desc;
+	s32 status;
+
+	status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+	if (status)
+		return status;
+
+	cmd = &desc.params.nvm_checksum;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
+	cmd->flags = IXGBE_ACI_NVM_CHECKSUM_RECALC;
+
+	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+	ixgbe_release_nvm(hw);
+
+	return status;
+}
+
+/**
+ * ixgbe_get_flash_bank_offset - Get offset into requested flash bank
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive flash bank
+ * @module: the module to read from
+ *
+ * Based on the module, lookup the module offset from the beginning of the
+ * flash.
+ *
+ * Return: the flash offset. Note that a value of zero is invalid and must be
+ * treated as an error.
+ */
+static u32 ixgbe_get_flash_bank_offset(struct ixgbe_hw *hw,
+				       enum ixgbe_bank_select bank,
+				       u16 module)
+{
+	struct ixgbe_bank_info *banks = &hw->flash.banks;
+	enum ixgbe_flash_bank active_bank;
+	bool second_bank_active;
+	u32 offset, size;
+
+	switch (module) {
+	case E610_SR_1ST_NVM_BANK_PTR:
+		offset = banks->nvm_ptr;
+		size = banks->nvm_size;
+		active_bank = banks->nvm_bank;
+		break;
+	case E610_SR_1ST_OROM_BANK_PTR:
+		offset = banks->orom_ptr;
+		size = banks->orom_size;
+		active_bank = banks->orom_bank;
+		break;
+	case E610_SR_NETLIST_BANK_PTR:
+		offset = banks->netlist_ptr;
+		size = banks->netlist_size;
+		active_bank = banks->netlist_bank;
+		break;
+	default:
+		return 0;
+	}
+
+	switch (active_bank) {
+	case IXGBE_1ST_FLASH_BANK:
+		second_bank_active = false;
+		break;
+	case IXGBE_2ND_FLASH_BANK:
+		second_bank_active = true;
+		break;
+	default:
+		return 0;
+    }
+
+	/* The second flash bank is stored immediately following the first
+	 * bank. Based on whether the 1st or 2nd bank is active, and whether
+	 * we want the active or inactive bank, calculate the desired offset.
+	 */
+	switch (bank) {
+	case IXGBE_ACTIVE_FLASH_BANK:
+		return offset + (second_bank_active ? size : 0);
+	case IXGBE_INACTIVE_FLASH_BANK:
+		return offset + (second_bank_active ? 0 : size);
+	}
+
+	return 0;
+}
+
+/**
+ * ixgbe_read_flash_module - Read a word from one of the main NVM modules
+ * @hw: pointer to the HW structure
+ * @bank: which bank of the module to read
+ * @module: the module to read
+ * @offset: the offset into the module in bytes
+ * @data: storage for the word read from the flash
+ * @length: bytes of data to read
+ *
+ * Read data from the specified flash module. The bank parameter indicates
+ * whether or not to read from the active bank or the inactive bank of that
+ * module.
+ *
+ * The word will be read using flat NVM access, and relies on the
+ * hw->flash.banks data being setup by ixgbe_determine_active_flash_banks()
+ * during initialization.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_flash_module(struct ixgbe_hw *hw,
+				   enum ixgbe_bank_select bank,
+				   u16 module, u32 offset, u8 *data, u32 length)
+{
+	s32 status;
+	u32 start;
+
+	start = ixgbe_get_flash_bank_offset(hw, bank, module);
+	if (!start) {
+		return IXGBE_ERR_PARAM;
+	}
+
+	status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+	if (status)
+		return status;
+
+	status = ixgbe_read_flat_nvm(hw, start + offset, &length, data, false);
+
+	ixgbe_release_nvm(hw);
+
+	return status;
+}
+
+/**
+ * ixgbe_read_nvm_module - Read from the active main NVM module
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from active or inactive NVM module
+ * @offset: offset into the NVM module to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the active NVM module. This includes the CSS
+ * header at the start of the NVM module.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_nvm_module(struct ixgbe_hw *hw,
+				 enum ixgbe_bank_select bank,
+				  u32 offset, u16 *data)
+{
+	__le16 data_local;
+	s32 status;
+
+	status = ixgbe_read_flash_module(hw, bank, E610_SR_1ST_NVM_BANK_PTR,
+					 offset * sizeof(u16),
+					 (u8 *)&data_local,
+					 sizeof(u16));
+	if (!status)
+		*data = IXGBE_LE16_TO_CPU(data_local);
+
+	return status;
+}
+
+/**
+ * ixgbe_get_nvm_css_hdr_len - Read the CSS header length from the
+ * NVM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @hdr_len: storage for header length in words
+ *
+ * Read the CSS header length from the NVM CSS header and add the
+ * Authentication header size, and then convert to words.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_nvm_css_hdr_len(struct ixgbe_hw *hw,
+				     enum ixgbe_bank_select bank,
+				     u32 *hdr_len)
+{
+	u16 hdr_len_l, hdr_len_h;
+	u32 hdr_len_dword;
+	s32 status;
+
+	status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_L,
+				       &hdr_len_l);
+	if (status)
+		return status;
+
+	status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_H,
+				       &hdr_len_h);
+	if (status)
+		return status;
+
+	/* CSS header length is in DWORD, so convert to words and add
+	 * authentication header size
+	 */
+	hdr_len_dword = hdr_len_h << 16 | hdr_len_l;
+	*hdr_len = (hdr_len_dword * 2) + IXGBE_NVM_AUTH_HEADER_LEN;
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_nvm_sr_copy - Read a word from the Shadow RAM copy in the NVM bank
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive NVM module
+ * @offset: offset into the Shadow RAM copy to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the copy of the Shadow RAM found in the
+ * specified NVM module.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_nvm_sr_copy(struct ixgbe_hw *hw,
+				  enum ixgbe_bank_select bank,
+				  u32 offset, u16 *data)
+{
+	u32 hdr_len;
+	s32 status;
+
+	status = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len);
+	if (status)
+		return status;
+
+	hdr_len = ROUND_UP(hdr_len, 32);
+
+	return ixgbe_read_nvm_module(hw, bank, hdr_len + offset, data);
+}
+
+/**
+ * ixgbe_get_nvm_minsrevs - Get the minsrevs values from flash
+ * @hw: pointer to the HW struct
+ * @minsrevs: structure to store NVM and OROM minsrev values
+ *
+ * Read the Minimum Security Revision TLV and extract
+ * the revision values from the flash image
+ * into a readable structure for processing.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_nvm_minsrevs(struct ixgbe_hw *hw,
+			   struct ixgbe_minsrev_info *minsrevs)
+{
+	struct ixgbe_aci_cmd_nvm_minsrev data;
+	s32 status;
+	u16 valid;
+
+	status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+	if (status)
+		return status;
+
+	status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID,
+				    0, sizeof(data), &data,
+				    true, false);
+
+	ixgbe_release_nvm(hw);
+
+	if (status)
+		return status;
+
+	valid = IXGBE_LE16_TO_CPU(data.validity);
+
+	/* Extract NVM minimum security revision */
+	if (valid & IXGBE_ACI_NVM_MINSREV_NVM_VALID) {
+		u16 minsrev_l = IXGBE_LE16_TO_CPU(data.nvm_minsrev_l);
+		u16 minsrev_h = IXGBE_LE16_TO_CPU(data.nvm_minsrev_h);
+
+		minsrevs->nvm = minsrev_h << 16 | minsrev_l;
+		minsrevs->nvm_valid = true;
+	}
+
+	/* Extract the OROM minimum security revision */
+	if (valid & IXGBE_ACI_NVM_MINSREV_OROM_VALID) {
+		u16 minsrev_l = IXGBE_LE16_TO_CPU(data.orom_minsrev_l);
+		u16 minsrev_h = IXGBE_LE16_TO_CPU(data.orom_minsrev_h);
+
+		minsrevs->orom = minsrev_h << 16 | minsrev_l;
+		minsrevs->orom_valid = true;
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_nvm_srev - Read the security revision from the NVM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @srev: storage for security revision
+ *
+ * Read the security revision out of the CSS header of the active NVM module
+ * bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_nvm_srev(struct ixgbe_hw *hw,
+			      enum ixgbe_bank_select bank, u32 *srev)
+{
+	u16 srev_l, srev_h;
+	s32 status;
+
+	status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_L, &srev_l);
+	if (status)
+		return status;
+
+	status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_H, &srev_h);
+	if (status)
+		return status;
+
+	*srev = srev_h << 16 | srev_l;
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_nvm_ver_info - Read NVM version information
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @nvm: pointer to NVM info structure
+ *
+ * Read the NVM EETRACK ID and map version of the main NVM image bank, filling
+ * in the nvm info structure.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_nvm_ver_info(struct ixgbe_hw *hw,
+				  enum ixgbe_bank_select bank,
+				  struct ixgbe_nvm_info *nvm)
+{
+	u16 eetrack_lo, eetrack_hi, ver;
+	s32 status;
+
+	status = ixgbe_read_nvm_sr_copy(hw, bank,
+					E610_SR_NVM_DEV_STARTER_VER, &ver);
+	if (status) {
+		return status;
+	}
+
+	nvm->major = (ver & E610_NVM_VER_HI_MASK) >> E610_NVM_VER_HI_SHIFT;
+	nvm->minor = (ver & E610_NVM_VER_LO_MASK) >> E610_NVM_VER_LO_SHIFT;
+
+	status = ixgbe_read_nvm_sr_copy(hw, bank, E610_SR_NVM_EETRACK_LO,
+					&eetrack_lo);
+	if (status) {
+		return status;
+	}
+	status = ixgbe_read_nvm_sr_copy(hw, bank, E610_SR_NVM_EETRACK_HI,
+					&eetrack_hi);
+	if (status) {
+		return status;
+	}
+
+	nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
+
+	status = ixgbe_get_nvm_srev(hw, bank, &nvm->srev);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_inactive_nvm_ver - Read Option ROM version from the inactive bank
+ * @hw: pointer to the HW structure
+ * @nvm: storage for Option ROM version information
+ *
+ * Reads the NVM EETRACK ID, Map version, and security revision of the
+ * inactive NVM bank. Used to access version data for a pending update that
+ * has not yet been activated.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm)
+{
+	return ixgbe_get_nvm_ver_info(hw, IXGBE_INACTIVE_FLASH_BANK, nvm);
+}
+
+/**
+ * ixgbe_get_active_nvm_ver - Read Option ROM version from the active bank
+ * @hw: pointer to the HW structure
+ * @nvm: storage for Option ROM version information
+ *
+ * Reads the NVM EETRACK ID, Map version, and security revision of the
+ * active NVM bank.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_active_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm)
+{
+	return ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK, nvm);
+}
+
+/**
+ * ixgbe_read_sr_pointer - Read the value of a Shadow RAM pointer word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM word to read
+ * @pointer: pointer value read from Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to a pointer value specified
+ * in bytes. This function assumes the specified offset is a valid pointer
+ * word.
+ *
+ * Each pointer word specifies whether it is stored in word size or 4KB
+ * sector size by using the highest bit. The reported pointer value will be in
+ * bytes, intended for flat NVM reads.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_sr_pointer(struct ixgbe_hw *hw, u16 offset, u32 *pointer)
+{
+	s32 status;
+	u16 value;
+
+	status = ixgbe_read_ee_aci_E610(hw, offset, &value);
+	if (status)
+		return status;
+
+	/* Determine if the pointer is in 4KB or word units */
+	if (value & IXGBE_SR_NVM_PTR_4KB_UNITS)
+		*pointer = (value & ~IXGBE_SR_NVM_PTR_4KB_UNITS) * 4 * 1024;
+	else
+		*pointer = value * 2;
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_sr_area_size - Read an area size from a Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM to read
+ * @size: size value read from the Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to an area size value
+ * specified in bytes. This function assumes the specified offset is a valid
+ * area size word.
+ *
+ * Each area size word is specified in 4KB sector units. This function reports
+ * the size in bytes, intended for flat NVM reads.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_sr_area_size(struct ixgbe_hw *hw, u16 offset, u32 *size)
+{
+	s32 status;
+	u16 value;
+
+	status = ixgbe_read_ee_aci_E610(hw, offset, &value);
+	if (status)
+		return status;
+
+	/* Area sizes are always specified in 4KB units */
+	*size = value * 4 * 1024;
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_discover_flash_size - Discover the available flash size.
+ * @hw: pointer to the HW struct
+ *
+ * The device flash could be up to 16MB in size. However, it is possible that
+ * the actual size is smaller. Use bisection to determine the accessible size
+ * of flash memory.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_discover_flash_size(struct ixgbe_hw *hw)
+{
+	u32 min_size = 0, max_size = IXGBE_ACI_NVM_MAX_OFFSET + 1;
+	s32 status;
+
+	status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+	if (status)
+		return status;
+
+	while ((max_size - min_size) > 1) {
+		u32 offset = (max_size + min_size) / 2;
+		u32 len = 1;
+		u8 data;
+
+		status = ixgbe_read_flat_nvm(hw, offset, &len, &data, false);
+		if (status == IXGBE_ERR_ACI_ERROR &&
+		    hw->aci.last_status == IXGBE_ACI_RC_EINVAL) {
+			status = IXGBE_SUCCESS;
+			max_size = offset;
+		} else if (!status) {
+			min_size = offset;
+		} else {
+			/* an unexpected error occurred */
+			goto err_read_flat_nvm;
+		}
+	}
+
+	hw->flash.flash_size = max_size;
+
+err_read_flat_nvm:
+	ixgbe_release_nvm(hw);
+
+	return status;
+}
+
+/**
+ * ixgbe_determine_active_flash_banks - Discover active bank for each module
+ * @hw: pointer to the HW struct
+ *
+ * Read the Shadow RAM control word and determine which banks are active for
+ * the NVM, OROM, and Netlist modules. Also read and calculate the associated
+ * pointer and size. These values are then cached into the ixgbe_flash_info
+ * structure for later use in order to calculate the correct offset to read
+ * from the active module.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_determine_active_flash_banks(struct ixgbe_hw *hw)
+{
+	struct ixgbe_bank_info *banks = &hw->flash.banks;
+	u16 ctrl_word;
+	s32 status;
+
+	status = ixgbe_read_ee_aci_E610(hw, E610_SR_NVM_CTRL_WORD, &ctrl_word);
+	if (status) {
+		return status;
+	}
+
+	/* Check that the control word indicates validity */
+	if ((ctrl_word & IXGBE_SR_CTRL_WORD_1_M) >> IXGBE_SR_CTRL_WORD_1_S !=
+	    IXGBE_SR_CTRL_WORD_VALID) {
+		return IXGBE_ERR_CONFIG;
+	}
+
+	if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NVM_BANK))
+		banks->nvm_bank = IXGBE_1ST_FLASH_BANK;
+	else
+		banks->nvm_bank = IXGBE_2ND_FLASH_BANK;
+
+	if (!(ctrl_word & IXGBE_SR_CTRL_WORD_OROM_BANK))
+		banks->orom_bank = IXGBE_1ST_FLASH_BANK;
+	else
+		banks->orom_bank = IXGBE_2ND_FLASH_BANK;
+
+	if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NETLIST_BANK))
+		banks->netlist_bank = IXGBE_1ST_FLASH_BANK;
+	else
+		banks->netlist_bank = IXGBE_2ND_FLASH_BANK;
+
+	status = ixgbe_read_sr_pointer(hw, E610_SR_1ST_NVM_BANK_PTR,
+				       &banks->nvm_ptr);
+	if (status) {
+		return status;
+	}
+
+	status = ixgbe_read_sr_area_size(hw, E610_SR_NVM_BANK_SIZE,
+					 &banks->nvm_size);
+	if (status) {
+		return status;
+	}
+
+	status = ixgbe_read_sr_pointer(hw, E610_SR_1ST_OROM_BANK_PTR,
+				       &banks->orom_ptr);
+	if (status) {
+		return status;
+	}
+
+	status = ixgbe_read_sr_area_size(hw, E610_SR_OROM_BANK_SIZE,
+					 &banks->orom_size);
+	if (status) {
+		return status;
+	}
+
+	status = ixgbe_read_sr_pointer(hw, E610_SR_NETLIST_BANK_PTR,
+				       &banks->netlist_ptr);
+	if (status) {
+		return status;
+	}
+
+	status = ixgbe_read_sr_area_size(hw, E610_SR_NETLIST_BANK_SIZE,
+					 &banks->netlist_size);
+	if (status) {
+		return status;
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_nvm - initializes NVM setting
+ * @hw: pointer to the HW struct
+ *
+ * Read and populate NVM settings such as Shadow RAM size,
+ * max_timeout, and blank_nvm_mode
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_init_nvm(struct ixgbe_hw *hw)
+{
+	struct ixgbe_flash_info *flash = &hw->flash;
+	u32 fla, gens_stat, status;
+	u8 sr_size;
+
+	/* The SR size is stored regardless of the NVM programming mode
+	 * as the blank mode may be used in the factory line.
+	 */
+	gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
+	sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S;
+
+	/* Switching to words (sr_size contains power of 2) */
+	flash->sr_words = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
+
+	/* Check if we are in the normal or blank NVM programming mode */
+	fla = IXGBE_READ_REG(hw, GLNVM_FLA);
+	if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */
+		flash->blank_nvm_mode = false;
+	} else {
+		/* Blank programming mode */
+		flash->blank_nvm_mode = true;
+		return IXGBE_ERR_NVM_BLANK_MODE;
+	}
+
+	status = ixgbe_discover_flash_size(hw);
+	if (status) {
+		return status;
+	}
+
+	status = ixgbe_determine_active_flash_banks(hw);
+	if (status) {
+		return status;
+	}
+
+	status = ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK,
+					&flash->nvm);
+	if (status) {
+		return status;
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_sanitize_operate - Clear the user data
+ * @hw: pointer to the HW struct
+ *
+ * Clear user data from NVM using ACI command (0x070C).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_sanitize_operate(struct ixgbe_hw *hw)
+{
+	s32 status;
+	u8 values;
+
+	u8 cmd_flags = IXGBE_ACI_SANITIZE_REQ_OPERATE |
+		       IXGBE_ACI_SANITIZE_OPERATE_SUBJECT_CLEAR;
+
+	status = ixgbe_sanitize_nvm(hw, cmd_flags, &values);
+	if (status)
+		return status;
+	if ((!(values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_DONE) &&
+	     !(values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_DONE)) ||
+	    ((values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_DONE) &&
+	     !(values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_SUCCESS)) ||
+	    ((values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_DONE) &&
+	     !(values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_SUCCESS)))
+		return IXGBE_ERR_ACI_ERROR;
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_sanitize_nvm - Sanitize NVM
+ * @hw: pointer to the HW struct
+ * @cmd_flags: flag to the ACI command
+ * @values: values returned from the command
+ *
+ * Sanitize NVM using ACI command (0x070C).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_sanitize_nvm(struct ixgbe_hw *hw, u8 cmd_flags, u8 *values)
+{
+	struct ixgbe_aci_desc desc;
+	struct ixgbe_aci_cmd_nvm_sanitization *cmd;
+	s32 status;
+
+	cmd = &desc.params.nvm_sanitization;
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_sanitization);
+	cmd->cmd_flags = cmd_flags;
+
+	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+	if (values)
+		*values = cmd->values;
+
+	return status;
+}
+
+/**
+ * ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using ixgbe_read_flat_nvm.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_sr_word_aci(struct ixgbe_hw  *hw, u16 offset, u16 *data)
+{
+	u32 bytes = sizeof(u16);
+	__le16 data_local;
+	s32 status;
+
+	status = ixgbe_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
+				     (u8 *)&data_local, true);
+	if (status)
+		return status;
+
+	*data = IXGBE_LE16_TO_CPU(data_local);
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_sr_buf_aci - Reads Shadow RAM buf via ACI
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buf) from the Shadow RAM. Ownership of the NVM is
+ * taken before reading the buffer and later released.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words,
+			  u16 *data)
+{
+	u32 bytes = *words * 2, i;
+	s32 status;
+
+	status = ixgbe_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
+
+	*words = bytes / 2;
+
+	for (i = 0; i < *words; i++)
+		data[i] = IXGBE_LE16_TO_CPU(((__le16 *)data)[i]);
+
+	return status;
+}
+
+/**
+ * ixgbe_read_flat_nvm - Read portion of NVM by flat offset
+ * @hw: pointer to the HW struct
+ * @offset: offset from beginning of NVM
+ * @length: (in) number of bytes to read; (out) number of bytes actually read
+ * @data: buffer to return data in (sized to fit the specified length)
+ * @read_shadow_ram: if true, read from shadow RAM instead of NVM
+ *
+ * Reads a portion of the NVM, as a flat memory space. This function correctly
+ * breaks read requests across Shadow RAM sectors, prevents Shadow RAM size
+ * from being exceeded in case of Shadow RAM read requests and ensures that no
+ * single read request exceeds the maximum 4KB read for a single admin command.
+ *
+ * Returns a status code on failure. Note that the data pointer may be
+ * partially updated if some reads succeed before a failure.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_flat_nvm(struct ixgbe_hw  *hw, u32 offset, u32 *length,
+			u8 *data, bool read_shadow_ram)
+{
+	u32 inlen = *length;
+	u32 bytes_read = 0;
+	bool last_cmd;
+	s32 status;
+
+	*length = 0;
+
+	/* Verify the length of the read if this is for the Shadow RAM */
+	if (read_shadow_ram && ((offset + inlen) >
+				(hw->eeprom.word_size * 2u))) {
+		return IXGBE_ERR_PARAM;
+	}
+
+	do {
+		u32 read_size, sector_offset;
+
+		/* ixgbe_aci_read_nvm cannot read more than 4KB at a time.
+		 * Additionally, a read from the Shadow RAM may not cross over
+		 * a sector boundary. Conveniently, the sector size is also 4KB.
+		 */
+		sector_offset = offset % IXGBE_ACI_MAX_BUFFER_SIZE;
+		read_size = MIN_T(u32,
+				  IXGBE_ACI_MAX_BUFFER_SIZE - sector_offset,
+				  inlen - bytes_read);
+
+		last_cmd = !(bytes_read + read_size < inlen);
+
+		/* ixgbe_aci_read_nvm takes the length as a u16. Our read_size
+		 * is calculated using a u32, but the IXGBE_ACI_MAX_BUFFER_SIZE
+		 * maximum size guarantees that it will fit within the 2 bytes.
+		 */
+		status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_START_POINT,
+					    offset, (u16)read_size,
+					    data + bytes_read, last_cmd,
+					    read_shadow_ram);
+		if (status)
+			break;
+
+		bytes_read += read_size;
+		offset += read_size;
+	} while (!last_cmd);
+
+	*length = bytes_read;
+	return status;
+}
+
+/**
+ * ixgbe_aci_alternate_write - write to alternate structure
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be written
+ * @reg_val0: value to be written under 'reg_addr0'
+ * @reg_addr1: address of second dword to be written
+ * @reg_val1: value to be written under 'reg_addr1'
+ *
+ * Write one or two dwords to alternate structure using ACI command (0x0900).
+ * Fields are indicated by 'reg_addr0' and 'reg_addr1' register numbers.
+ *
+ * Return: 0 on success and error code on failure.
+ */
+s32 ixgbe_aci_alternate_write(struct ixgbe_hw *hw, u32 reg_addr0,
+			      u32 reg_val0, u32 reg_addr1, u32 reg_val1)
+{
+	struct ixgbe_aci_cmd_read_write_alt_direct *cmd;
+	struct ixgbe_aci_desc desc;
+	s32 status;
+
+	cmd = &desc.params.read_write_alt_direct;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_write_alt_direct);
+	cmd->dword0_addr = IXGBE_CPU_TO_LE32(reg_addr0);
+	cmd->dword1_addr = IXGBE_CPU_TO_LE32(reg_addr1);
+	cmd->dword0_value = IXGBE_CPU_TO_LE32(reg_val0);
+	cmd->dword1_value = IXGBE_CPU_TO_LE32(reg_val1);
+
+	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+	return status;
+}
+
+/**
+ * ixgbe_aci_alternate_read - read from alternate structure
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: pointer for data read from 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: pointer for data read from 'reg_addr1'
+ *
+ * Read one or two dwords from alternate structure using ACI command (0x0902).
+ * Fields are indicated by 'reg_addr0' and 'reg_addr1' register numbers.
+ * If 'reg_val1' pointer is not passed then only register at 'reg_addr0'
+ * is read.
+ *
+ * Return: 0 on success and error code on failure.
+ */
+s32 ixgbe_aci_alternate_read(struct ixgbe_hw *hw, u32 reg_addr0,
+			     u32 *reg_val0, u32 reg_addr1, u32 *reg_val1)
+{
+	struct ixgbe_aci_cmd_read_write_alt_direct *cmd;
+	struct ixgbe_aci_desc desc;
+	s32 status;
+
+	cmd = &desc.params.read_write_alt_direct;
+
+	if (!reg_val0)
+		return IXGBE_ERR_PARAM;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_alt_direct);
+	cmd->dword0_addr = IXGBE_CPU_TO_LE32(reg_addr0);
+	cmd->dword1_addr = IXGBE_CPU_TO_LE32(reg_addr1);
+
+	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+	if (status == IXGBE_SUCCESS) {
+		*reg_val0 = IXGBE_LE32_TO_CPU(cmd->dword0_value);
+
+		if (reg_val1)
+			*reg_val1 = IXGBE_LE32_TO_CPU(cmd->dword1_value);
+	}
+
+	return status;
+}
+
+/**
+ * ixgbe_aci_alternate_write_done - check if writing to alternate structure
+ * is done
+ * @hw: pointer to the HW structure.
+ * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
+ * @reset_needed: indicates the SW should trigger GLOBAL reset
+ *
+ * Indicates to the FW that alternate structures have been changed.
+ *
+ * Return: 0 on success and error code on failure.
+ */
+s32 ixgbe_aci_alternate_write_done(struct ixgbe_hw *hw, u8 bios_mode,
+				   bool *reset_needed)
+{
+	struct ixgbe_aci_cmd_done_alt_write *cmd;
+	struct ixgbe_aci_desc desc;
+	s32 status;
+
+	cmd = &desc.params.done_alt_write;
+
+	if (!reset_needed)
+		return IXGBE_ERR_PARAM;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_done_alt_write);
+	cmd->flags = bios_mode;
+
+	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+	if (!status)
+		*reset_needed = (IXGBE_LE16_TO_CPU(cmd->flags) &
+				 IXGBE_ACI_RESP_RESET_NEEDED) != 0;
+
+	return status;
+}
+
+/**
+ * ixgbe_aci_alternate_clear - clear alternate structure
+ * @hw: pointer to the HW structure.
+ *
+ * Clear the alternate structures of the port from which the function
+ * is called.
+ *
+ * Return: 0 on success and error code on failure.
+ */
+s32 ixgbe_aci_alternate_clear(struct ixgbe_hw *hw)
+{
+	struct ixgbe_aci_desc desc;
+	s32 status;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc,
+					ixgbe_aci_opc_clear_port_alt_write);
+
+	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+	return status;
+}
+
+/**
+ * ixgbe_aci_get_internal_data - get internal FW/HW data
+ * @hw: pointer to the hardware structure
+ * @cluster_id: specific cluster to dump
+ * @table_id: table ID within cluster
+ * @start: index of line in the block to read
+ * @buf: dump buffer
+ * @buf_size: dump buffer size
+ * @ret_buf_size: return buffer size (returned by FW)
+ * @ret_next_cluster: next cluster to read (returned by FW)
+ * @ret_next_table: next block to read (returned by FW)
+ * @ret_next_index: next index to read (returned by FW)
+ *
+ * Get internal FW/HW data using ACI command (0xFF08) for debug purposes.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_internal_data(struct ixgbe_hw *hw, u16 cluster_id,
+				u16 table_id, u32 start, void *buf,
+				u16 buf_size, u16 *ret_buf_size,
+				u16 *ret_next_cluster, u16 *ret_next_table,
+				u32 *ret_next_index)
+{
+	struct ixgbe_aci_cmd_debug_dump_internals *cmd;
+	struct ixgbe_aci_desc desc;
+	s32 status;
+
+	cmd = &desc.params.debug_dump;
+
+	if (buf_size == 0 || !buf)
+		return IXGBE_ERR_PARAM;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc,
+					ixgbe_aci_opc_debug_dump_internals);
+
+	cmd->cluster_id = IXGBE_CPU_TO_LE16(cluster_id);
+	cmd->table_id = IXGBE_CPU_TO_LE16(table_id);
+	cmd->idx = IXGBE_CPU_TO_LE32(start);
+
+	status = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
+
+	if (!status) {
+		if (ret_buf_size)
+			*ret_buf_size = IXGBE_LE16_TO_CPU(desc.datalen);
+		if (ret_next_cluster)
+			*ret_next_cluster = IXGBE_LE16_TO_CPU(cmd->cluster_id);
+		if (ret_next_table)
+			*ret_next_table = IXGBE_LE16_TO_CPU(cmd->table_id);
+		if (ret_next_index)
+			*ret_next_index = IXGBE_LE32_TO_CPU(cmd->idx);
+	}
+
+	return status;
+}
+
+/**
+ * ixgbe_validate_nvm_rw_reg - Check that an NVM access request is valid
+ * @cmd: NVM access command structure
+ *
+ * Validates that an NVM access structure is request to read or write a valid
+ * register offset. First validates that the module and flags are correct, and
+ * then ensures that the register offset is one of the accepted registers.
+ *
+ * Return: 0 if the register access is valid, out of range error code otherwise.
+ */
+static s32
+ixgbe_validate_nvm_rw_reg(struct ixgbe_nvm_access_cmd *cmd)
+{
+	u16 i;
+
+	switch (cmd->offset) {
+	case GL_HICR:
+	case GL_HICR_EN: /* Note, this register is read only */
+	case GL_FWSTS:
+	case GL_MNG_FWSM:
+	case GLNVM_GENS:
+	case GLNVM_FLA:
+	case GL_FWRESETCNT:
+		return 0;
+	default:
+		break;
+	}
+
+	for (i = 0; i <= GL_HIDA_MAX_INDEX; i++)
+		if (cmd->offset == (u32)GL_HIDA(i))
+			return 0;
+
+	for (i = 0; i <= GL_HIBA_MAX_INDEX; i++)
+		if (cmd->offset == (u32)GL_HIBA(i))
+			return 0;
+
+	/* All other register offsets are not valid */
+	return IXGBE_ERR_OUT_OF_RANGE;
+}
+
+/**
+ * ixgbe_nvm_access_read - Handle an NVM read request
+ * @hw: pointer to the HW struct
+ * @cmd: NVM access command to process
+ * @data: storage for the register value read
+ *
+ * Process an NVM access request to read a register.
+ *
+ * Return: 0 if the register read is valid and successful,
+ * out of range error code otherwise.
+ */
+static s32 ixgbe_nvm_access_read(struct ixgbe_hw *hw,
+			struct ixgbe_nvm_access_cmd *cmd,
+			struct ixgbe_nvm_access_data *data)
+{
+	s32 status;
+
+	/* Always initialize the output data, even on failure */
+	memset(&data->regval, 0, cmd->data_size);
+
+	/* Make sure this is a valid read/write access request */
+	status = ixgbe_validate_nvm_rw_reg(cmd);
+	if (status)
+		return status;
+
+	DEBUGOUT1("NVM access: reading register %08x\n", cmd->offset);
+
+	/* Read the register and store the contents in the data field */
+	data->regval = IXGBE_READ_REG(hw, cmd->offset);
+
+	return 0;
+}
+
+/**
+ * ixgbe_nvm_access_write - Handle an NVM write request
+ * @hw: pointer to the HW struct
+ * @cmd: NVM access command to process
+ * @data: NVM access data to write
+ *
+ * Process an NVM access request to write a register.
+ *
+ * Return: 0 if the register write is valid and successful,
+ * out of range error code otherwise.
+ */
+static s32 ixgbe_nvm_access_write(struct ixgbe_hw *hw,
+			struct ixgbe_nvm_access_cmd *cmd,
+			struct ixgbe_nvm_access_data *data)
+{
+	s32 status;
+
+	/* Make sure this is a valid read/write access request */
+	status = ixgbe_validate_nvm_rw_reg(cmd);
+	if (status)
+		return status;
+
+	/* Reject requests to write to read-only registers */
+	switch (cmd->offset) {
+	case GL_HICR_EN:
+		return IXGBE_ERR_OUT_OF_RANGE;
+	default:
+		break;
+	}
+
+	DEBUGOUT2("NVM access: writing register %08x with value %08x\n",
+		cmd->offset, data->regval);
+
+	/* Write the data field to the specified register */
+	IXGBE_WRITE_REG(hw, cmd->offset, data->regval);
+
+	return 0;
+}
+
+/**
+ * ixgbe_handle_nvm_access - Handle an NVM access request
+ * @hw: pointer to the HW struct
+ * @cmd: NVM access command info
+ * @data: pointer to read or return data
+ *
+ * Process an NVM access request. Read the command structure information and
+ * determine if it is valid. If not, report an error indicating the command
+ * was invalid.
+ *
+ * For valid commands, perform the necessary function, copying the data into
+ * the provided data buffer.
+ *
+ * Return: 0 if the nvm access request is valid and successful,
+ * error code otherwise.
+ */
+s32 ixgbe_handle_nvm_access(struct ixgbe_hw *hw,
+			struct ixgbe_nvm_access_cmd *cmd,
+			struct ixgbe_nvm_access_data *data)
+{
+	switch (cmd->command) {
+	case IXGBE_NVM_CMD_READ:
+		return ixgbe_nvm_access_read(hw, cmd, data);
+	case IXGBE_NVM_CMD_WRITE:
+		return ixgbe_nvm_access_write(hw, cmd, data);
+	default:
+		return IXGBE_ERR_PARAM;
+	}
+}
+
+/**
+ * ixgbe_init_ops_E610 - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for E610.
+ * Does not touch the hardware.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_init_ops_E610(struct ixgbe_hw *hw)
+{
+	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+	struct ixgbe_mac_info *mac = &hw->mac;
+	struct ixgbe_phy_info *phy = &hw->phy;
+	s32 ret_val;
+
+	ret_val = ixgbe_init_ops_X550(hw);
+	/* TODO Additional ops overrides for e610 to go here */
+
+	/* MAC */
+	mac->ops.reset_hw = ixgbe_reset_hw_E610;
+	mac->ops.start_hw = ixgbe_start_hw_E610;
+	mac->ops.get_media_type = ixgbe_get_media_type_E610;
+	mac->ops.get_supported_physical_layer =
+		ixgbe_get_supported_physical_layer_E610;
+	mac->ops.get_san_mac_addr = NULL;
+	mac->ops.set_san_mac_addr = NULL;
+	mac->ops.get_wwn_prefix = NULL;
+	mac->ops.setup_link = ixgbe_setup_link_E610;
+	mac->ops.check_link = ixgbe_check_link_E610;
+	mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_E610;
+	mac->ops.setup_fc = ixgbe_setup_fc_E610;
+	mac->ops.fc_autoneg = ixgbe_fc_autoneg_E610;
+	mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_E610;
+	mac->ops.disable_rx = ixgbe_disable_rx_E610;
+	mac->ops.setup_eee = ixgbe_setup_eee_E610;
+	mac->ops.fw_recovery_mode = ixgbe_fw_recovery_mode_E610;
+	mac->ops.get_fw_tsam_mode = ixgbe_get_fw_tsam_mode_E610;
+	mac->ops.get_fw_version = ixgbe_aci_get_fw_ver;
+	mac->ops.get_nvm_version = ixgbe_get_active_nvm_ver;
+
+	/* PHY */
+	phy->ops.init = ixgbe_init_phy_ops_E610;
+	phy->ops.identify = ixgbe_identify_phy_E610;
+	phy->eee_speeds_supported = IXGBE_LINK_SPEED_10_FULL |
+				    IXGBE_LINK_SPEED_100_FULL |
+				    IXGBE_LINK_SPEED_1GB_FULL;
+	phy->eee_speeds_advertised = phy->eee_speeds_supported;
+
+	/* Additional ops overrides for e610 to go here */
+	eeprom->ops.init_params = ixgbe_init_eeprom_params_E610;
+	eeprom->ops.read = ixgbe_read_ee_aci_E610;
+	eeprom->ops.read_buffer = ixgbe_read_ee_aci_buffer_E610;
+	eeprom->ops.write = NULL;
+	eeprom->ops.write_buffer = NULL;
+	eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_E610;
+	eeprom->ops.update_checksum = NULL;
+	eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_E610;
+	eeprom->ops.read_pba_string = ixgbe_read_pba_string_E610;
+
+	/* Initialize bus function number */
+	hw->mac.ops.set_lan_id(hw);
+
+	return ret_val;
+}
+
+/**
+ * ixgbe_reset_hw_E610 - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, and perform a reset.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_reset_hw_E610(struct ixgbe_hw *hw)
+{
+	u32 swfw_mask = hw->phy.phy_semaphore_mask;
+	u32 ctrl, i;
+	s32 status;
+
+	DEBUGFUNC("ixgbe_reset_hw_E610");
+
+	/* Call adapter stop to disable tx/rx and clear interrupts */
+	status = hw->mac.ops.stop_adapter(hw);
+	if (status != IXGBE_SUCCESS)
+		goto reset_hw_out;
+
+	/* flush pending Tx transactions */
+	ixgbe_clear_tx_pending(hw);
+
+	status = hw->phy.ops.init(hw);
+	if (status != IXGBE_SUCCESS)
+		DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
+			  status);
+mac_reset_top:
+	status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+	if (status != IXGBE_SUCCESS) {
+		ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+			      "semaphore failed with %d", status);
+		return IXGBE_ERR_SWFW_SYNC;
+	}
+	ctrl = IXGBE_CTRL_RST;
+	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+	IXGBE_WRITE_FLUSH(hw);
+	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+	/* Poll for reset bit to self-clear indicating reset is complete */
+	for (i = 0; i < 10; i++) {
+		usec_delay(1);
+		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+		if (!(ctrl & IXGBE_CTRL_RST_MASK))
+			break;
+	}
+
+	if (ctrl & IXGBE_CTRL_RST_MASK) {
+		status = IXGBE_ERR_RESET_FAILED;
+		ERROR_REPORT1(IXGBE_ERROR_POLLING,
+			      "Reset polling failed to complete.\n");
+	}
+	msec_delay(100);
+
+	/*
+	 * Double resets are required for recovery from certain error
+	 * conditions.  Between resets, it is necessary to stall to allow time
+	 * for any pending HW events to complete.
+	 */
+	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+		goto mac_reset_top;
+	}
+
+	/* Set the Rx packet buffer size. */
+	IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
+
+	/* Store the permanent mac address */
+	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+	/*
+	 * Store MAC address from RAR0, clear receive address registers, and
+	 * clear the multicast table.  Also reset num_rar_entries to 128,
+	 * since we modify this value when programming the SAN MAC address.
+	 */
+	hw->mac.num_rar_entries = 128;
+	hw->mac.ops.init_rx_addrs(hw);
+
+reset_hw_out:
+	return status;
+}
+/**
+ * ixgbe_fw_ver_check - Check the reported FW API version
+ * @hw: pointer to the hardware structure
+ *
+ * Checks if the driver should load on a given FW API version.
+ *
+ * Return: 'true' if the driver should attempt to load. 'false' otherwise.
+ */
+static bool ixgbe_fw_ver_check(struct ixgbe_hw *hw)
+{
+	if (hw->api_maj_ver > IXGBE_FW_API_VER_MAJOR) {
+		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
+		return false;
+	} else if (hw->api_maj_ver == IXGBE_FW_API_VER_MAJOR) {
+		if (hw->api_min_ver >
+		    (IXGBE_FW_API_VER_MINOR + IXGBE_FW_API_VER_DIFF_ALLOWED)) {
+			ERROR_REPORT1(IXGBE_ERROR_CAUTION, "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
+		} else if ((hw->api_min_ver + IXGBE_FW_API_VER_DIFF_ALLOWED) <
+			   IXGBE_FW_API_VER_MINOR) {
+			ERROR_REPORT1(IXGBE_ERROR_CAUTION, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+		}
+	} else {
+		ERROR_REPORT1(IXGBE_ERROR_CAUTION, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+	}
+	return true;
+}
+/**
+ * ixgbe_start_hw_E610 - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Gets firmware version and if API version matches it
+ * starts the hardware using the generic start_hw function
+ * and the generation start_hw function.
+ * Then performs revision-specific operations, if any.
+ **/
+s32 ixgbe_start_hw_E610(struct ixgbe_hw *hw)
+{
+	s32 ret_val = IXGBE_SUCCESS;
+
+	ret_val = hw->mac.ops.get_fw_version(hw);
+	if (ret_val)
+		goto out;
+
+	if (!ixgbe_fw_ver_check(hw)) {
+		ret_val = IXGBE_ERR_FW_API_VER;
+		goto out;
+	}
+	ret_val = ixgbe_start_hw_generic(hw);
+	if (ret_val != IXGBE_SUCCESS)
+		goto out;
+
+	ixgbe_start_hw_gen2(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ * ixgbe_get_media_type_E610 - Gets media type
+ * @hw: pointer to the HW struct
+ *
+ * In order to get the media type, the function gets PHY
+ * capabilities and later on use them to identify the PHY type
+ * checking phy_type_high and phy_type_low.
+ *
+ * Return: the type of media in form of ixgbe_media_type enum
+ * or ixgbe_media_type_unknown in case of an error.
+ */
+enum ixgbe_media_type ixgbe_get_media_type_E610(struct ixgbe_hw *hw)
+{
+	struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+	u64 phy_mask = 0;
+	s32 rc;
+	u8 i;
+
+	rc = ixgbe_update_link_info(hw);
+	if (rc) {
+		return ixgbe_media_type_unknown;
+	}
+
+	/* If there is no link but PHY (dongle) is available SW should use
+	 * Get PHY Caps admin command instead of Get Link Status, find most
+	 * significant bit that is set in PHY types reported by the command
+	 * and use it to discover media type.
+	 */
+	if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP) &&
+	    (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) {
+		/* Get PHY Capabilities */
+		rc = ixgbe_aci_get_phy_caps(hw, false,
+					    IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+					    &pcaps);
+		if (rc) {
+			return ixgbe_media_type_unknown;
+		}
+
+		/* Check if there is some bit set in phy_type_high */
+		for (i = 64; i > 0; i--) {
+			phy_mask = (u64)((u64)1 << (i - 1));
+			if ((pcaps.phy_type_high & phy_mask) != 0) {
+				/* If any bit is set treat it as PHY type */
+				hw->link.link_info.phy_type_high = phy_mask;
+				hw->link.link_info.phy_type_low = 0;
+				break;
+			}
+			phy_mask = 0;
+		}
+
+		/* If nothing found in phy_type_high search in phy_type_low */
+		if (phy_mask == 0) {
+			for (i = 64; i > 0; i--) {
+				phy_mask = (u64)((u64)1 << (i - 1));
+				if ((pcaps.phy_type_low & phy_mask) != 0) {
+					/* If any bit is set treat it as PHY type */
+					hw->link.link_info.phy_type_high = 0;
+					hw->link.link_info.phy_type_low = phy_mask;
+					break;
+				}
+			}
+		}
+
+		/* Based on search above try to discover media type */
+		hw->phy.media_type = ixgbe_get_media_type_from_phy_type(hw);
+	}
+
+	return hw->phy.media_type;
+}
+
+/**
+ * ixgbe_get_supported_physical_layer_E610 - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ *
+ * Return: the exit code of the operation.
+ **/
+u64 ixgbe_get_supported_physical_layer_E610(struct ixgbe_hw *hw)
+{
+	u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+	struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+	u64 phy_type;
+	s32 rc;
+
+	rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+				    &pcaps);
+	if (rc)
+		return IXGBE_PHYSICAL_LAYER_UNKNOWN;
+
+	phy_type = IXGBE_LE64_TO_CPU(pcaps.phy_type_low);
+	if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_T)
+		physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+	if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_T)
+		physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+	if(phy_type & IXGBE_PHY_TYPE_LOW_100BASE_TX)
+		physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+	if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_LR)
+		physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+	if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_SR)
+		physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+	if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_KX)
+		physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+	if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1)
+		physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
+	if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_SX)
+		physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_SX;
+	if(phy_type & IXGBE_PHY_TYPE_LOW_2500BASE_KX)
+		physical_layer |= IXGBE_PHYSICAL_LAYER_2500BASE_KX;
+	if(phy_type & IXGBE_PHY_TYPE_LOW_2500BASE_T)
+		physical_layer |= IXGBE_PHYSICAL_LAYER_2500BASE_T;
+	if(phy_type & IXGBE_PHY_TYPE_LOW_5GBASE_T)
+		physical_layer |= IXGBE_PHYSICAL_LAYER_5000BASE_T;
+
+	phy_type = IXGBE_LE64_TO_CPU(pcaps.phy_type_high);
+	if(phy_type & IXGBE_PHY_TYPE_HIGH_10BASE_T)
+		physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
+
+	return physical_layer;
+}
+
+/**
+ * ixgbe_setup_link_E610 - Set up link
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait: true when waiting for completion is needed
+ *
+ * Set up the link with the specified speed.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_setup_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+			  bool autoneg_wait)
+{
+
+	/* Simply request FW to perform proper PHY setup */
+	return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
+}
+
+/**
+ * ixgbe_check_link_E610 - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Determine if the link is up and the current link speed
+ * using ACI command (0x0607).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_check_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+			  bool *link_up, bool link_up_wait_to_complete)
+{
+	s32 rc;
+	u32 i;
+
+	if (!speed || !link_up)
+		return IXGBE_ERR_PARAM;
+
+	/* Set get_link_info flag to ensure that fresh
+	 * link information will be obtained from FW
+	 * by sending Get Link Status admin command. */
+	hw->link.get_link_info = true;
+
+	/* Update link information in adapter context. */
+	rc = ixgbe_get_link_status(hw, link_up);
+	if (rc)
+		return rc;
+
+	/* Wait for link up if it was requested. */
+	if (link_up_wait_to_complete && *link_up == false) {
+		for (i = 0; i < hw->mac.max_link_up_time; i++) {
+			msec_delay(100);
+			hw->link.get_link_info = true;
+			rc = ixgbe_get_link_status(hw, link_up);
+			if (rc)
+				return rc;
+			if (*link_up)
+				break;
+		}
+	}
+
+	/* Use link information in adapter context updated by the call
+	 * to ixgbe_get_link_status() to determine current link speed.
+	 * Link speed information is valid only when link up was
+	 * reported by FW. */
+	if (*link_up) {
+		switch (hw->link.link_info.link_speed) {
+		case IXGBE_ACI_LINK_SPEED_10MB:
+			*speed = IXGBE_LINK_SPEED_10_FULL;
+			break;
+		case IXGBE_ACI_LINK_SPEED_100MB:
+			*speed = IXGBE_LINK_SPEED_100_FULL;
+			break;
+		case IXGBE_ACI_LINK_SPEED_1000MB:
+			*speed = IXGBE_LINK_SPEED_1GB_FULL;
+			break;
+		case IXGBE_ACI_LINK_SPEED_2500MB:
+			*speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+			break;
+		case IXGBE_ACI_LINK_SPEED_5GB:
+			*speed = IXGBE_LINK_SPEED_5GB_FULL;
+			break;
+		case IXGBE_ACI_LINK_SPEED_10GB:
+			*speed = IXGBE_LINK_SPEED_10GB_FULL;
+			break;
+		default:
+			*speed = IXGBE_LINK_SPEED_UNKNOWN;
+			break;
+		}
+	} else {
+		*speed = IXGBE_LINK_SPEED_UNKNOWN;
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_link_capabilities_E610 - Determine link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: true when autoneg or autotry is enabled
+ *
+ * Determine speed and AN parameters of a link.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_link_capabilities_E610(struct ixgbe_hw *hw,
+				     ixgbe_link_speed *speed,
+				     bool *autoneg)
+{
+
+	if (!speed || !autoneg)
+		return IXGBE_ERR_PARAM;
+
+	*autoneg = true;
+	*speed = hw->phy.speeds_supported;
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_cfg_phy_fc - Configure PHY Flow Control (FC) data based on FC mode
+ * @hw: pointer to hardware structure
+ * @cfg: PHY configuration data to set FC mode
+ * @req_mode: FC mode to configure
+ *
+ * Configures PHY Flow Control according to the provided configuration.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
+		     struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
+		     enum ixgbe_fc_mode req_mode)
+{
+	struct ixgbe_aci_cmd_get_phy_caps_data* pcaps = NULL;
+	s32 status = IXGBE_SUCCESS;
+	u8 pause_mask = 0x0;
+
+	if (!cfg)
+		return IXGBE_ERR_PARAM;
+
+	switch (req_mode) {
+	case ixgbe_fc_auto:
+	{
+		pcaps = (struct ixgbe_aci_cmd_get_phy_caps_data *)
+			ixgbe_malloc(hw, sizeof(*pcaps));
+		if (!pcaps) {
+			status = IXGBE_ERR_OUT_OF_MEM;
+			goto out;
+		}
+
+		/* Query the value of FC that both the NIC and the attached
+		 * media can do. */
+		status = ixgbe_aci_get_phy_caps(hw, false,
+			IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, pcaps);
+		if (status)
+			goto out;
+
+		pause_mask |= pcaps->caps & IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+		pause_mask |= pcaps->caps & IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+
+		break;
+	}
+	case ixgbe_fc_full:
+		pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+		pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+		break;
+	case ixgbe_fc_rx_pause:
+		pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+		break;
+	case ixgbe_fc_tx_pause:
+		pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+		break;
+	default:
+		break;
+	}
+
+	/* clear the old pause settings */
+	cfg->caps &= ~(IXGBE_ACI_PHY_EN_TX_LINK_PAUSE |
+		IXGBE_ACI_PHY_EN_RX_LINK_PAUSE);
+
+	/* set the new capabilities */
+	cfg->caps |= pause_mask;
+
+out:
+	if (pcaps)
+		ixgbe_free(hw, pcaps);
+	return status;
+}
+
+/**
+ * ixgbe_setup_fc_E610 - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Set up flow control. This has to be done during init time.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_setup_fc_E610(struct ixgbe_hw *hw)
+{
+	struct ixgbe_aci_cmd_get_phy_caps_data pcaps = { 0 };
+	struct ixgbe_aci_cmd_set_phy_cfg_data cfg = { 0 };
+	s32 status;
+
+	/* Get the current PHY config */
+	status = ixgbe_aci_get_phy_caps(hw, false,
+		IXGBE_ACI_REPORT_ACTIVE_CFG, &pcaps);
+	if (status)
+		return status;
+
+	ixgbe_copy_phy_caps_to_cfg(&pcaps, &cfg);
+
+	/* Configure the set PHY data */
+	status = ixgbe_cfg_phy_fc(hw, &cfg, hw->fc.requested_mode);
+	if (status)
+		return status;
+
+	/* If the capabilities have changed, then set the new config */
+	if (cfg.caps != pcaps.caps) {
+		cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+		status = ixgbe_aci_set_phy_cfg(hw, &cfg);
+		if (status)
+			return status;
+	}
+
+	return status;
+}
+
+/**
+ * ixgbe_fc_autoneg_E610 - Configure flow control
+ * @hw: pointer to hardware structure
+ *
+ * Configure Flow Control.
+ */
+void ixgbe_fc_autoneg_E610(struct ixgbe_hw *hw)
+{
+	s32 status;
+
+	/* Get current link status.
+	 * Current FC mode will be stored in the hw context. */
+	status = ixgbe_aci_get_link_info(hw, false, NULL);
+	if (status) {
+		goto out;
+	}
+
+	/* Check if the link is up */
+	if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP)) {
+		status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+		goto out;
+	}
+
+	/* Check if auto-negotiation has completed */
+	if (!(hw->link.link_info.an_info & IXGBE_ACI_AN_COMPLETED)) {
+		status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+		goto out;
+	}
+
+out:
+	if (status == IXGBE_SUCCESS) {
+		hw->fc.fc_was_autonegged = true;
+	} else {
+		hw->fc.fc_was_autonegged = false;
+		hw->fc.current_mode = hw->fc.requested_mode;
+	}
+}
+
+/**
+ * ixgbe_set_fw_drv_ver_E610 - Send driver version to FW
+ * @hw: pointer to the HW structure
+ * @maj: driver version major number
+ * @minor: driver version minor number
+ * @build: driver version build number
+ * @sub: driver version sub build number
+ * @len: length of driver_ver string
+ * @driver_ver: driver string
+ *
+ * Send driver version number to Firmware using ACI command (0x0002).
+ *
+ * Return: the exit code of the operation.
+ * IXGBE_SUCCESS - OK
+ * IXGBE_ERR_PARAM - incorrect parameters were given
+ * IXGBE_ERR_ACI_ERROR - encountered an error during sending the command
+ * IXGBE_ERR_ACI_TIMEOUT - a timeout occurred
+ * IXGBE_ERR_OUT_OF_MEM - ran out of memory
+ */
+s32 ixgbe_set_fw_drv_ver_E610(struct ixgbe_hw *hw, u8 maj, u8 minor, u8 build,
+			      u8 sub, u16 len, const char *driver_ver)
+{
+	size_t limited_len = min(len, (u16)IXGBE_DRV_VER_STR_LEN_E610);
+	struct ixgbe_driver_ver dv;
+
+	DEBUGFUNC("ixgbe_set_fw_drv_ver_E610");
+
+	if (!len || !driver_ver)
+		return IXGBE_ERR_PARAM;
+
+	dv.major_ver = maj;
+	dv.minor_ver = minor;
+	dv.build_ver = build;
+	dv.subbuild_ver = sub;
+
+	memset(dv.driver_string, 0, IXGBE_DRV_VER_STR_LEN_E610);
+	memcpy(dv.driver_string, driver_ver, limited_len);
+
+	return ixgbe_aci_send_driver_ver(hw, &dv);
+}
+
+/**
+ * ixgbe_disable_rx_E610 - Disable RX unit
+ * @hw: pointer to hardware structure
+ *
+ * Disable RX DMA unit on E610 with use of ACI command (0x000C).
+ *
+ * Return: the exit code of the operation.
+ */
+void ixgbe_disable_rx_E610(struct ixgbe_hw *hw)
+{
+	u32 rxctrl;
+
+	DEBUGFUNC("ixgbe_disable_rx_E610");
+
+	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+	if (rxctrl & IXGBE_RXCTRL_RXEN) {
+		u32 pfdtxgswc;
+		s32 status;
+
+		pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+		if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
+			pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+			IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+			hw->mac.set_lben = true;
+		} else {
+			hw->mac.set_lben = false;
+		}
+
+		status = ixgbe_aci_disable_rxen(hw);
+
+		/* If we fail - disable RX using register write */
+		if (status) {
+			rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+			if (rxctrl & IXGBE_RXCTRL_RXEN) {
+				rxctrl &= ~IXGBE_RXCTRL_RXEN;
+				IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
+			}
+		}
+	}
+}
+
+/**
+ * ixgbe_setup_eee_E610 - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ * @enable_eee: boolean flag to enable EEE
+ *
+ * Enables/disable EEE based on enable_eee flag.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_setup_eee_E610(struct ixgbe_hw *hw, bool enable_eee)
+{
+	struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
+	struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
+	u16 eee_cap = 0;
+	s32 status;
+
+	status = ixgbe_aci_get_phy_caps(hw, false,
+		IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
+	if (status != IXGBE_SUCCESS)
+		return status;
+
+	ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+	phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+	phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+	if (enable_eee) {
+		if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX)
+			eee_cap |= IXGBE_ACI_PHY_EEE_EN_100BASE_TX;
+		if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T)
+			eee_cap |= IXGBE_ACI_PHY_EEE_EN_1000BASE_T;
+		if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX)
+			eee_cap |= IXGBE_ACI_PHY_EEE_EN_1000BASE_KX;
+		if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T)
+			eee_cap |= IXGBE_ACI_PHY_EEE_EN_10GBASE_T;
+		if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1)
+			eee_cap |= IXGBE_ACI_PHY_EEE_EN_10GBASE_KR;
+		if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_25GBASE_KR   ||
+		    phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_25GBASE_KR_S ||
+		    phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_25GBASE_KR1)
+			eee_cap |= IXGBE_ACI_PHY_EEE_EN_25GBASE_KR;
+
+		if (phy_caps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T)
+			eee_cap |= IXGBE_ACI_PHY_EEE_EN_10BASE_T;
+	}
+
+	/* Set EEE capability for particular PHY types */
+	phy_cfg.eee_cap = IXGBE_CPU_TO_LE16(eee_cap);
+
+	status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+
+	return status;
+}
+
+/**
+ * ixgbe_fw_recovery_mode_E610 - Check FW NVM recovery mode
+ * @hw: pointer to hardware structure
+ *
+ * Checks FW NVM recovery mode by
+ * reading the value of the dedicated register.
+ *
+ * Return: true if FW is in recovery mode, otherwise false.
+ */
+bool ixgbe_fw_recovery_mode_E610(struct ixgbe_hw *hw)
+{
+	u32 fwsm = IXGBE_READ_REG(hw, GL_MNG_FWSM);
+
+	return !!(fwsm & GL_MNG_FWSM_FW_MODES_RECOVERY_M);
+}
+
+/**
+ * ixgbe_get_fw_tsam_mode_E610 - Check FW NVM Thermal Sensor Autonomous Mode
+ * @hw: pointer to hardware structure
+ *
+ * Checks Thermal Sensor Autonomous Mode by reading the
+ * value of the dedicated register.
+ *
+ * Return: true if FW is in TSAM, otherwise false.
+ */
+bool ixgbe_get_fw_tsam_mode_E610(struct ixgbe_hw *hw)
+{
+	u32 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_X550EM_a);
+
+	return !!(fwsm & IXGBE_FWSM_TS_ENABLED);
+}
+
+/**
+ * ixgbe_init_phy_ops_E610 - PHY specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during init_shared_code because the PHY type was not known.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_init_phy_ops_E610(struct ixgbe_hw *hw)
+{
+	struct ixgbe_mac_info *mac = &hw->mac;
+	struct ixgbe_phy_info *phy = &hw->phy;
+	s32 ret_val;
+
+	phy->ops.identify_sfp = ixgbe_identify_module_E610;
+	phy->ops.read_reg = NULL; /* PHY reg access is not required */
+	phy->ops.write_reg = NULL;
+	phy->ops.read_reg_mdi = NULL;
+	phy->ops.write_reg_mdi = NULL;
+	phy->ops.setup_link = ixgbe_setup_phy_link_E610;
+	phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_E610;
+	phy->ops.read_i2c_byte = NULL; /* disabled for E610 */
+	phy->ops.write_i2c_byte = NULL; /* disabled for E610 */
+	phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_E610;
+	phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_E610;
+	phy->ops.write_i2c_eeprom = ixgbe_write_i2c_eeprom_E610;
+	phy->ops.i2c_bus_clear = NULL; /* do not use generic implementation  */
+	phy->ops.check_overtemp = ixgbe_check_overtemp_E610;
+	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
+		phy->ops.set_phy_power = ixgbe_set_phy_power_E610;
+	else
+		phy->ops.set_phy_power = NULL;
+	phy->ops.enter_lplu = ixgbe_enter_lplu_E610;
+	phy->ops.handle_lasi = NULL; /* no implementation for E610 */
+	phy->ops.read_i2c_byte_unlocked = NULL; /* disabled for E610 */
+	phy->ops.write_i2c_byte_unlocked = NULL; /* disabled for E610 */
+
+	/* TODO: Set functions pointers based on device ID */
+
+	/* Identify the PHY */
+	ret_val = phy->ops.identify(hw);
+	if (ret_val != IXGBE_SUCCESS)
+		return ret_val;
+
+	/* TODO: Set functions pointers based on PHY type */
+
+	return ret_val;
+}
+
+/**
+ * ixgbe_identify_phy_E610 - Identify PHY
+ * @hw: pointer to hardware structure
+ * 
+ * Determine PHY type, supported speeds and PHY ID.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_identify_phy_E610(struct ixgbe_hw *hw)
+{
+	struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+	s32 rc;
+
+	/* Set PHY type */
+	hw->phy.type = ixgbe_phy_fw;
+
+	rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+				    &pcaps);
+	if (rc)
+		return rc;
+
+	if (!(pcaps.module_compliance_enforcement &
+	      IXGBE_ACI_MOD_ENFORCE_STRICT_MODE)) {
+		/* Handle lenient mode */
+		rc = ixgbe_aci_get_phy_caps(hw, false,
+					    IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA,
+					    &pcaps);
+		if (rc)
+			return rc;
+	}
+
+	/* Determine supported speeds */
+	hw->phy.speeds_supported = IXGBE_LINK_SPEED_UNKNOWN;
+
+	if (pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T ||
+	    pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10M_SGMII)
+		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10_FULL;
+	if (pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_100BASE_TX ||
+	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_100M_SGMII ||
+	    pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_100M_USXGMII)
+		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
+	if (pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_1000BASE_T  ||
+	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_1000BASE_SX ||
+	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_1000BASE_LX ||
+	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_1000BASE_KX ||
+	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_1G_SGMII    ||
+	    pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_1G_USXGMII)
+		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
+	if (pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_2500BASE_T   ||
+	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_2500BASE_X   ||
+	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_2500BASE_KX  ||
+	    pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII ||
+	    pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII)
+		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
+	if (pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_5GBASE_T  ||
+	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_5GBASE_KR ||
+	    pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII)
+		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
+	if (pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_10GBASE_T       ||
+	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_10G_SFI_DA      ||
+	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_10GBASE_SR      ||
+	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_10GBASE_LR      ||
+	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1  ||
+	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC ||
+	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_10G_SFI_C2C     ||
+	    pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10G_USXGMII)
+		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
+
+	/* Initialize autoneg speeds */
+	if (!hw->phy.autoneg_advertised)
+		hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+
+	/* Set PHY ID */
+	memcpy(&hw->phy.id, pcaps.phy_id_oui, sizeof(u32));
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_identify_module_E610 - Identify SFP module type
+ * @hw: pointer to hardware structure
+ *
+ * Identify the SFP module type.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_identify_module_E610(struct ixgbe_hw *hw)
+{
+	bool media_available;
+	u8 module_type;
+	s32 rc;
+
+	rc = ixgbe_update_link_info(hw);
+	if (rc)
+		goto err;
+
+	media_available =
+		(hw->link.link_info.link_info &
+		 IXGBE_ACI_MEDIA_AVAILABLE) ? true : false;
+
+	if (media_available) {
+		hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+
+		/* Get module type from hw context updated by ixgbe_update_link_info() */
+		module_type = hw->link.link_info.module_type[IXGBE_ACI_MOD_TYPE_IDENT];
+
+		if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE) ||
+		    (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE)) {
+			hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
+		} else if (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR) {
+			hw->phy.sfp_type = ixgbe_sfp_type_sr;
+		} else if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR) ||
+			   (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM)) {
+			hw->phy.sfp_type = ixgbe_sfp_type_lr;
+		}
+		rc = IXGBE_SUCCESS;
+	} else {
+		hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+		rc = IXGBE_ERR_SFP_NOT_PRESENT;
+	}
+err:
+	return rc;
+}
+
+/**
+ * ixgbe_setup_phy_link_E610 - Sets up firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ *
+ * Set the parameters for the firmware-controlled PHYs.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_setup_phy_link_E610(struct ixgbe_hw *hw)
+{
+	struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+	struct ixgbe_aci_cmd_set_phy_cfg_data pcfg;
+	u8 rmode = IXGBE_ACI_REPORT_ACTIVE_CFG;
+	s32 rc;
+
+	rc = ixgbe_aci_get_link_info(hw, false, NULL);
+	if (rc) {
+		goto err;
+	}
+
+	/* If media is not available get default config */
+	if (!(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE))
+		rmode = IXGBE_ACI_REPORT_DFLT_CFG;
+
+	rc = ixgbe_aci_get_phy_caps(hw, false, rmode, &pcaps);
+	if (rc) {
+		goto err;
+	}
+
+	ixgbe_copy_phy_caps_to_cfg(&pcaps, &pcfg);
+
+	/* Set default PHY types for a given speed */
+	pcfg.phy_type_low = 0;
+	pcfg.phy_type_high = 0;
+
+	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) {
+		pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10BASE_T;
+		pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10M_SGMII;
+	}
+	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) {
+		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_100BASE_TX;
+		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_100M_SGMII;
+		pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_100M_USXGMII;
+	}
+	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
+		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_1000BASE_T;
+		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_1000BASE_SX;
+		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_1000BASE_LX;
+		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_1000BASE_KX;
+		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_1G_SGMII;
+		pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_1G_USXGMII;
+	}
+	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) {
+		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_2500BASE_T;
+		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_2500BASE_X;
+		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_2500BASE_KX;
+		pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_SGMII;
+		pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_USXGMII;
+	}
+	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) {
+		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_5GBASE_T;
+		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_5GBASE_KR;
+		pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_5G_USXGMII;
+	}
+	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) {
+		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_10GBASE_T;
+		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_10G_SFI_DA;
+		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_10GBASE_SR;
+		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_10GBASE_LR;
+		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1;
+		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC;
+		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_10G_SFI_C2C;
+		pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10G_USXGMII;
+	}
+
+	/* Mask the set values to avoid requesting unsupported link types */
+	pcfg.phy_type_low &= pcaps.phy_type_low;
+	pcfg.phy_type_high &= pcaps.phy_type_high;
+
+	if (pcfg.phy_type_high != pcaps.phy_type_high ||
+	    pcfg.phy_type_low != pcaps.phy_type_low ||
+	    pcfg.caps != pcaps.caps) {
+		pcfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+		pcfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+		rc = ixgbe_aci_set_phy_cfg(hw, &pcfg);
+	}
+
+err:
+	return rc;
+}
+
+/**
+ * ixgbe_get_phy_firmware_version_E610 - Gets the PHY Firmware Version
+ * @hw: pointer to hardware structure
+ * @firmware_version: pointer to the PHY Firmware Version
+ * 
+ * Determines PHY FW version based on response to Get PHY Capabilities
+ * admin command (0x0600).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_phy_firmware_version_E610(struct ixgbe_hw *hw,
+					u16 *firmware_version)
+{
+	struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+	s32 status;
+
+	if (!firmware_version)
+		return IXGBE_ERR_PARAM;
+
+	status = ixgbe_aci_get_phy_caps(hw, false,
+					IXGBE_ACI_REPORT_ACTIVE_CFG,
+					&pcaps);
+	if (status)
+		return status;
+
+	/* TODO: determine which bytes of the 8-byte phy_fw_ver
+	 * field should be written to the 2-byte firmware_version
+	 * output argument. */
+	memcpy(firmware_version, pcaps.phy_fw_ver, sizeof(u16));
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_i2c_sff8472_E610 - Reads 8 bit word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset at address 0xA2
+ * @sff8472_data: value read
+ *
+ * Performs byte read operation from SFP module's SFF-8472 data over I2C.
+ *
+ * Return: the exit code of the operation.
+ **/
+s32 ixgbe_read_i2c_sff8472_E610(struct ixgbe_hw *hw, u8 byte_offset,
+				u8 *sff8472_data)
+{
+	return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR2,
+				    byte_offset, 0,
+				    IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
+				    sff8472_data, 1, false);
+}
+
+/**
+ * ixgbe_read_i2c_eeprom_E610 - Reads 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to read
+ * @eeprom_data: value read
+ *
+ * Performs byte read operation from SFP module's EEPROM over I2C interface.
+ *
+ * Return: the exit code of the operation.
+ **/
+s32 ixgbe_read_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
+			       u8 *eeprom_data)
+{
+	return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR,
+				    byte_offset, 0,
+				    IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
+				    eeprom_data, 1, false);
+}
+
+/**
+ * ixgbe_write_i2c_eeprom_E610 - Writes 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to write
+ * @eeprom_data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface.
+ *
+ * Return: the exit code of the operation.
+ **/
+s32 ixgbe_write_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
+				u8 eeprom_data)
+{
+	return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR,
+				    byte_offset, 0,
+				    IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
+				    &eeprom_data, 1, true);
+}
+
+/**
+ * ixgbe_check_overtemp_E610 - Check firmware-controlled PHYs for overtemp
+ * @hw: pointer to hardware structure
+ *
+ * Get the link status and check if the PHY temperature alarm detected.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_check_overtemp_E610(struct ixgbe_hw *hw)
+{
+	struct ixgbe_aci_cmd_get_link_status_data link_data = { 0 };
+	struct ixgbe_aci_cmd_get_link_status *resp;
+	struct ixgbe_aci_desc desc;
+	s32 status = IXGBE_SUCCESS;
+
+	if (!hw)
+		return IXGBE_ERR_PARAM;
+
+	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
+	resp = &desc.params.get_link_status;
+	resp->cmd_flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_LSE_NOP);
+
+	status = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
+	if (status != IXGBE_SUCCESS)
+		return status;
+
+	if (link_data.ext_info & IXGBE_ACI_LINK_PHY_TEMP_ALARM) {
+		ERROR_REPORT1(IXGBE_ERROR_CAUTION,
+			      "PHY Temperature Alarm detected");
+		status = IXGBE_ERR_OVERTEMP;
+	}
+
+	return status;
+}
+
+/**
+ * ixgbe_set_phy_power_E610 - Control power for copper PHY
+ * @hw: pointer to hardware structure
+ * @on: true for on, false for off
+ *
+ * Set the power on/off of the PHY
+ * by getting its capabilities and setting the appropriate
+ * configuration parameters.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_set_phy_power_E610(struct ixgbe_hw *hw, bool on)
+{
+	struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
+	struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
+	s32 status;
+
+	status = ixgbe_aci_get_phy_caps(hw, false,
+		IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
+	if (status != IXGBE_SUCCESS)
+		return status;
+
+	ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+	if (on) {
+		phy_cfg.caps &= ~IXGBE_ACI_PHY_ENA_LOW_POWER;
+	} else {
+		phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LOW_POWER;
+	}
+
+	/* PHY is already in requested power mode */
+	if (phy_caps.caps == phy_cfg.caps)
+		return IXGBE_SUCCESS;
+
+	phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+	phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+	status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+
+	return status;
+}
+
+/**
+ * ixgbe_enter_lplu_E610 - Transition to low power states
+ * @hw: pointer to hardware structure
+ *
+ * Configures Low Power Link Up on transition to low power states
+ * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
+ * X557 PHY immediately prior to entering LPLU.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_enter_lplu_E610(struct ixgbe_hw *hw)
+{
+	struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
+	struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
+	s32 status;
+
+	status = ixgbe_aci_get_phy_caps(hw, false,
+		IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
+	if (status != IXGBE_SUCCESS)
+		return status;
+
+	ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+	phy_cfg.low_power_ctrl_an |= IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG;
+
+	status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+
+	return status;
+}
+
+/**
+ * ixgbe_init_eeprom_params_E610 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_init_eeprom_params_E610(struct ixgbe_hw *hw)
+{
+	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+	u32 gens_stat;
+	u8 sr_size;
+
+	if (eeprom->type == ixgbe_eeprom_uninitialized) {
+		eeprom->type = ixgbe_flash;
+
+		gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
+		sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >>
+			  GLNVM_GENS_SR_SIZE_S;
+
+		/* Switching to words (sr_size contains power of 2) */
+		eeprom->word_size = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
+
+		DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
+			  eeprom->type, eeprom->word_size);
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_ee_aci_E610 - Read EEPROM word using the admin command.
+ * @hw: pointer to hardware structure
+ * @offset: offset of  word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with reading.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+	s32 status;
+
+	if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+		status = ixgbe_init_eeprom_params(hw);
+		if (status)
+			return status;
+	}
+
+	status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+	if (status)
+		return status;
+
+	status = ixgbe_read_sr_word_aci(hw, offset, data);
+	ixgbe_release_nvm(hw);
+
+	return status;
+}
+
+/**
+ * ixgbe_read_ee_aci_buffer_E610- Read EEPROM word(s) using admin commands.
+ * @hw: pointer to hardware structure
+ * @offset: offset of  word in the EEPROM to read
+ * @words: number of words
+ * @data: word(s) read from the EEPROM
+ *
+ * Reads a 16 bit word(s) from the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with reading.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
+				  u16 words, u16 *data)
+{
+	s32 status;
+
+	if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+		status = ixgbe_init_eeprom_params(hw);
+		if (status)
+			return status;
+	}
+
+	status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+	if (status)
+		return status;
+
+	status = ixgbe_read_sr_buf_aci(hw, offset, &words, data);
+	ixgbe_release_nvm(hw);
+
+	return status;
+}
+
+/**
+ * ixgbe_calc_eeprom_checksum_E610 - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ *
+ * Calculate SW Checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
+ * is customer specific and unknown. Therefore, this function skips all maximum
+ * possible size of VPD (1kB).
+ * If the EEPROM params are not initialized, the function
+ * initializes them before proceeding.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the negative error code on error, or the 16-bit checksum
+ */
+s32 ixgbe_calc_eeprom_checksum_E610(struct ixgbe_hw *hw)
+{
+	bool nvm_acquired = false;
+	u16 pcie_alt_module = 0;
+	u16 checksum_local = 0;
+	u16 checksum = 0;
+	u16 vpd_module;
+	void *vmem;
+	s32 status;
+	u16 *data;
+	u16 i;
+
+	if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+		status = ixgbe_init_eeprom_params(hw);
+		if (status)
+			return status;
+	}
+
+	vmem = ixgbe_calloc(hw, IXGBE_SR_SECTOR_SIZE_IN_WORDS, sizeof(u16));
+	if (!vmem)
+		return IXGBE_ERR_OUT_OF_MEM;
+	data = (u16 *)vmem;
+	status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+	if (status)
+		goto ixgbe_calc_sr_checksum_exit;
+	nvm_acquired = true;
+
+	/* read pointer to VPD area */
+	status = ixgbe_read_sr_word_aci(hw, E610_SR_VPD_PTR, &vpd_module);
+	if (status)
+		goto ixgbe_calc_sr_checksum_exit;
+
+	/* read pointer to PCIe Alt Auto-load module */
+	status = ixgbe_read_sr_word_aci(hw, E610_SR_PCIE_ALT_AUTO_LOAD_PTR,
+					&pcie_alt_module);
+	if (status)
+		goto ixgbe_calc_sr_checksum_exit;
+
+	/* Calculate SW checksum that covers the whole 64kB shadow RAM
+	 * except the VPD and PCIe ALT Auto-load modules
+	 */
+	for (i = 0; i < hw->eeprom.word_size; i++) {
+		/* Read SR page */
+		if ((i % IXGBE_SR_SECTOR_SIZE_IN_WORDS) == 0) {
+			u16 words = IXGBE_SR_SECTOR_SIZE_IN_WORDS;
+
+			status = ixgbe_read_sr_buf_aci(hw, i, &words, data);
+			if (status != IXGBE_SUCCESS)
+				goto ixgbe_calc_sr_checksum_exit;
+		}
+
+		/* Skip Checksum word */
+		if (i == E610_SR_SW_CHECKSUM_WORD)
+			continue;
+		/* Skip VPD module (convert byte size to word count) */
+		if (i >= (u32)vpd_module &&
+		    i < ((u32)vpd_module + E610_SR_VPD_SIZE_WORDS))
+			continue;
+		/* Skip PCIe ALT module (convert byte size to word count) */
+		if (i >= (u32)pcie_alt_module &&
+		    i < ((u32)pcie_alt_module + E610_SR_PCIE_ALT_SIZE_WORDS))
+			continue;
+
+		checksum_local += data[i % IXGBE_SR_SECTOR_SIZE_IN_WORDS];
+	}
+
+	checksum = (u16)IXGBE_SR_SW_CHECKSUM_BASE - checksum_local;
+
+ixgbe_calc_sr_checksum_exit:
+	if(nvm_acquired)
+		ixgbe_release_nvm(hw);
+	ixgbe_free(hw, vmem);
+
+	if(!status)
+		return (s32)checksum;
+	else
+		return status;
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum_E610 - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_validate_eeprom_checksum_E610(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+	u32 status;
+
+	if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+		status = ixgbe_init_eeprom_params(hw);
+		if (status)
+			return status;
+	}
+
+	status = ixgbe_nvm_validate_checksum(hw);
+
+	if (status)
+		return status;
+
+	if (checksum_val) {
+		u16 tmp_checksum;
+		status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+		if (status)
+			return status;
+
+		status = ixgbe_read_sr_word_aci(hw, E610_SR_SW_CHECKSUM_WORD,
+						&tmp_checksum);
+		ixgbe_release_nvm(hw);
+
+		if (!status)
+			*checksum_val = tmp_checksum;
+	}
+
+	return status;
+}
+
+/**
+ * ixgbe_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
+ * @hw: pointer to hardware structure
+ * @module_tlv: pointer to module TLV to return
+ * @module_tlv_len: pointer to module TLV length to return
+ * @module_type: module type requested
+ *
+ * Finds the requested sub module TLV type from the Preserved Field
+ * Area (PFA) and returns the TLV pointer and length. The caller can
+ * use these to read the variable length TLV value.
+ *
+ * Return: the exit code of the operation.
+ */
+STATIC s32 ixgbe_get_pfa_module_tlv(struct ixgbe_hw *hw, u16 *module_tlv,
+				    u16 *module_tlv_len, u16 module_type)
+{
+	u16 pfa_len, pfa_ptr, pfa_end_ptr;
+	u16 next_tlv;
+	s32 status;
+
+	status = ixgbe_read_ee_aci_E610(hw, E610_SR_PFA_PTR, &pfa_ptr);
+	if (status != IXGBE_SUCCESS) {
+		return status;
+	}
+	status = ixgbe_read_ee_aci_E610(hw, pfa_ptr, &pfa_len);
+	if (status != IXGBE_SUCCESS) {
+		return status;
+	}
+	/* Starting with first TLV after PFA length, iterate through the list
+	 * of TLVs to find the requested one.
+	 */
+	next_tlv = pfa_ptr + 1;
+	pfa_end_ptr = pfa_ptr + pfa_len;
+	while (next_tlv < pfa_end_ptr) {
+		u16 tlv_sub_module_type, tlv_len;
+
+		/* Read TLV type */
+		status = ixgbe_read_ee_aci_E610(hw, next_tlv,
+						&tlv_sub_module_type);
+		if (status != IXGBE_SUCCESS) {
+			break;
+		}
+		/* Read TLV length */
+		status = ixgbe_read_ee_aci_E610(hw, next_tlv + 1, &tlv_len);
+		if (status != IXGBE_SUCCESS) {
+			break;
+		}
+		if (tlv_sub_module_type == module_type) {
+			if (tlv_len) {
+				*module_tlv = next_tlv;
+				*module_tlv_len = tlv_len;
+				return IXGBE_SUCCESS;
+			}
+			return IXGBE_ERR_INVAL_SIZE;
+		}
+		/* Check next TLV, i.e. current TLV pointer + length + 2 words
+		 * (for current TLV's type and length)
+		 */
+		next_tlv = next_tlv + tlv_len + 2;
+	}
+	/* Module does not exist */
+	return IXGBE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ixgbe_read_pba_string_E610 - Reads part number string from NVM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the NVM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the NVM.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_pba_string_E610(struct ixgbe_hw *hw, u8 *pba_num,
+			       u32 pba_num_size)
+{
+	u16 pba_tlv, pba_tlv_len;
+	u16 pba_word, pba_size;
+	s32 status;
+	u16 i;
+
+	status = ixgbe_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
+					E610_SR_PBA_BLOCK_PTR);
+	if (status != IXGBE_SUCCESS) {
+		return status;
+	}
+
+	/* pba_size is the next word */
+	status = ixgbe_read_ee_aci_E610(hw, (pba_tlv + 2), &pba_size);
+	if (status != IXGBE_SUCCESS) {
+		return status;
+	}
+
+	if (pba_tlv_len < pba_size) {
+		return IXGBE_ERR_INVAL_SIZE;
+	}
+
+	/* Subtract one to get PBA word count (PBA Size word is included in
+	 * total size)
+	 */
+	pba_size--;
+	if (pba_num_size < (((u32)pba_size * 2) + 1)) {
+		return IXGBE_ERR_PARAM;
+	}
+
+	for (i = 0; i < pba_size; i++) {
+		status = ixgbe_read_ee_aci_E610(hw, (pba_tlv + 2 + 1) + i,
+						&pba_word);
+		if (status != IXGBE_SUCCESS) {
+			return status;
+		}
+
+		pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
+		pba_num[(i * 2) + 1] = pba_word & 0xFF;
+	}
+	pba_num[(pba_size * 2)] = '\0';
+
+	return status;
+}
diff --git a/drivers/net/ixgbe/base/ixgbe_e610.h b/drivers/net/ixgbe/base/ixgbe_e610.h
new file mode 100644
index 0000000000..f241955ada
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_e610.h
@@ -0,0 +1,163 @@ 
+#ifndef _IXGBE_E610_H_
+#define _IXGBE_E610_H_
+
+#include "ixgbe_type.h"
+
+void ixgbe_init_aci(struct ixgbe_hw *hw);
+void ixgbe_shutdown_aci(struct ixgbe_hw *hw);
+
+s32 ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+		       void *buf, u16 buf_size);
+bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw);
+s32 ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
+			bool *pending);
+
+void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode);
+
+s32 ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw);
+s32 ixgbe_aci_send_driver_ver(struct ixgbe_hw *hw, struct ixgbe_driver_ver *dv);
+s32 ixgbe_aci_set_pf_context(struct ixgbe_hw *hw, u8 pf_id);
+
+s32 ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+		      enum ixgbe_aci_res_access_type access, u32 timeout);
+void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res);
+s32 ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
+			u32 *cap_count, enum ixgbe_aci_opc opc);
+s32 ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
+			    struct ixgbe_hw_dev_caps *dev_caps);
+s32 ixgbe_discover_func_caps(struct ixgbe_hw* hw,
+			     struct ixgbe_hw_func_caps* func_caps);
+s32 ixgbe_get_caps(struct ixgbe_hw *hw);
+s32 ixgbe_aci_disable_rxen(struct ixgbe_hw *hw);
+s32 ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
+			   struct ixgbe_aci_cmd_get_phy_caps_data *pcaps);
+bool ixgbe_phy_caps_equals_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+			       struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+				struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+s32 ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
+			  struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+s32 ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link);
+s32 ixgbe_update_link_info(struct ixgbe_hw *hw);
+s32 ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up);
+s32 ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
+			    struct ixgbe_link_status *link);
+s32 ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask);
+s32 ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask);
+
+s32 ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
+			       struct ixgbe_aci_cmd_get_link_topo *cmd,
+			       u8 *node_part_number, u16 *node_handle);
+s32 ixgbe_aci_get_netlist_node_pin(struct ixgbe_hw *hw,
+				   struct ixgbe_aci_cmd_get_link_topo_pin *cmd,
+				   u16 *node_handle);
+s32 ixgbe_find_netlist_node(struct ixgbe_hw *hw, u8 node_type_ctx,
+			    u8 node_part_number, u16 *node_handle);
+s32 ixgbe_aci_read_i2c(struct ixgbe_hw *hw,
+		       struct ixgbe_aci_cmd_link_topo_addr topo_addr,
+		       u16 bus_addr, __le16 addr, u8 params, u8 *data);
+s32 ixgbe_aci_write_i2c(struct ixgbe_hw *hw,
+			struct ixgbe_aci_cmd_link_topo_addr topo_addr,
+			u16 bus_addr, __le16 addr, u8 params, u8 *data);
+s32 ixgbe_aci_set_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+		       bool value);
+s32 ixgbe_aci_get_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+		       bool *value);
+s32 ixgbe_aci_sff_eeprom(struct ixgbe_hw *hw, u16 lport, u8 bus_addr,
+			 u16 mem_addr, u8 page, u8 page_bank_ctrl, u8 *data,
+			 u8 length, bool write);
+s32 ixgbe_aci_prog_topo_dev_nvm(struct ixgbe_hw *hw,
+			struct ixgbe_aci_cmd_link_topo_params *topo_params);
+s32 ixgbe_aci_read_topo_dev_nvm(struct ixgbe_hw *hw,
+			struct ixgbe_aci_cmd_link_topo_params *topo_params,
+			u32 start_address, u8 *data, u8 data_size);
+
+s32 ixgbe_acquire_nvm(struct ixgbe_hw *hw,
+		      enum ixgbe_aci_res_access_type access);
+void ixgbe_release_nvm(struct ixgbe_hw *hw);
+
+s32 ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
+		       u16 length, void *data, bool last_command,
+		       bool read_shadow_ram);
+
+s32 ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw);
+s32 ixgbe_nvm_recalculate_checksum(struct ixgbe_hw *hw);
+
+s32 ixgbe_get_nvm_minsrevs(struct ixgbe_hw *hw, struct ixgbe_minsrev_info *minsrevs);
+s32 ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
+s32 ixgbe_get_active_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
+s32 ixgbe_init_nvm(struct ixgbe_hw *hw);
+
+s32 ixgbe_sanitize_operate(struct ixgbe_hw *hw);
+s32 ixgbe_sanitize_nvm(struct ixgbe_hw *hw, u8 cmd_flags, u8 *values);
+
+s32 ixgbe_read_sr_word_aci(struct ixgbe_hw  *hw, u16 offset, u16 *data);
+s32 ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words, u16 *data);
+s32 ixgbe_read_flat_nvm(struct ixgbe_hw  *hw, u32 offset, u32 *length,
+			u8 *data, bool read_shadow_ram);
+
+s32 ixgbe_aci_alternate_write(struct ixgbe_hw *hw, u32 reg_addr0,
+			      u32 reg_val0, u32 reg_addr1, u32 reg_val1);
+s32 ixgbe_aci_alternate_read(struct ixgbe_hw *hw, u32 reg_addr0,
+			     u32 *reg_val0, u32 reg_addr1, u32 *reg_val1);
+s32 ixgbe_aci_alternate_write_done(struct ixgbe_hw *hw, u8 bios_mode,
+				   bool *reset_needed);
+s32 ixgbe_aci_alternate_clear(struct ixgbe_hw *hw);
+
+s32 ixgbe_aci_get_internal_data(struct ixgbe_hw *hw, u16 cluster_id,
+				u16 table_id, u32 start, void *buf,
+				u16 buf_size, u16 *ret_buf_size,
+				u16 *ret_next_cluster, u16 *ret_next_table,
+				u32 *ret_next_index);
+
+s32 ixgbe_handle_nvm_access(struct ixgbe_hw *hw,
+				struct ixgbe_nvm_access_cmd *cmd,
+				struct ixgbe_nvm_access_data *data);
+
+/* E610 operations */
+s32 ixgbe_reset_hw_E610(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_E610(struct ixgbe_hw *hw);
+enum ixgbe_media_type ixgbe_get_media_type_E610(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_physical_layer_E610(struct ixgbe_hw *hw);
+s32 ixgbe_setup_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+			  bool autoneg_wait);
+s32 ixgbe_check_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+			  bool *link_up, bool link_up_wait_to_complete);
+s32 ixgbe_get_link_capabilities_E610(struct ixgbe_hw *hw,
+				     ixgbe_link_speed *speed,
+				     bool *autoneg);
+s32 ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
+		     struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
+		     enum ixgbe_fc_mode req_mode);
+s32 ixgbe_setup_fc_E610(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg_E610(struct ixgbe_hw *hw);
+s32 ixgbe_set_fw_drv_ver_E610(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
+			      u8 sub, u16 len, const char *driver_ver);
+void ixgbe_disable_rx_E610(struct ixgbe_hw *hw);
+s32 ixgbe_setup_eee_E610(struct ixgbe_hw *hw, bool enable_eee);
+bool ixgbe_fw_recovery_mode_E610(struct ixgbe_hw *hw);
+bool ixgbe_get_fw_tsam_mode_E610(struct ixgbe_hw *hw);
+s32 ixgbe_init_phy_ops_E610(struct ixgbe_hw *hw);
+s32 ixgbe_identify_phy_E610(struct ixgbe_hw *hw);
+s32 ixgbe_identify_module_E610(struct ixgbe_hw *hw);
+s32 ixgbe_setup_phy_link_E610(struct ixgbe_hw *hw);
+s32 ixgbe_get_phy_firmware_version_E610(struct ixgbe_hw *hw,
+					u16 *firmware_version);
+s32 ixgbe_read_i2c_sff8472_E610(struct ixgbe_hw *hw, u8 byte_offset,
+				u8 *sff8472_data);
+s32 ixgbe_read_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
+			       u8 *eeprom_data);
+s32 ixgbe_write_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
+				u8 eeprom_data);
+s32 ixgbe_check_overtemp_E610(struct ixgbe_hw *hw);
+s32 ixgbe_set_phy_power_E610(struct ixgbe_hw *hw, bool on);
+s32 ixgbe_enter_lplu_E610(struct ixgbe_hw *hw);
+s32 ixgbe_init_eeprom_params_E610(struct ixgbe_hw *hw);
+s32 ixgbe_read_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
+				  u16 words, u16 *data);
+s32 ixgbe_calc_eeprom_checksum_E610(struct ixgbe_hw *hw);
+s32 ixgbe_validate_eeprom_checksum_E610(struct ixgbe_hw *hw, u16 *checksum_val);
+s32 ixgbe_read_pba_string_E610(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size);
+
+#endif /* _IXGBE_E610_H_ */
diff --git a/drivers/net/ixgbe/base/ixgbe_hv_vf.c b/drivers/net/ixgbe/base/ixgbe_hv_vf.c
index 1279659926..246dafcdf1 100644
--- a/drivers/net/ixgbe/base/ixgbe_hv_vf.c
+++ b/drivers/net/ixgbe/base/ixgbe_hv_vf.c
@@ -136,7 +136,8 @@  static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
 		break;
 	case IXGBE_LINKS_SPEED_100_82599:
 		*speed = IXGBE_LINK_SPEED_100_FULL;
-		if (hw->mac.type == ixgbe_mac_X550) {
+		if (hw->mac.type == ixgbe_mac_X550 ||
+		    hw->mac.type == ixgbe_mac_E610) {
 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
 				*speed = IXGBE_LINK_SPEED_5GB_FULL;
 		}
diff --git a/drivers/net/ixgbe/base/ixgbe_mbx.c b/drivers/net/ixgbe/base/ixgbe_mbx.c
index 2dab347396..9ad45fbfcc 100644
--- a/drivers/net/ixgbe/base/ixgbe_mbx.c
+++ b/drivers/net/ixgbe/base/ixgbe_mbx.c
@@ -777,6 +777,7 @@  STATIC s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_id)
 	case ixgbe_mac_X550EM_x:
 	case ixgbe_mac_X550EM_a:
 	case ixgbe_mac_X540:
+	case ixgbe_mac_E610:
 		vflre = IXGBE_READ_REG(hw, IXGBE_PFVFLREC(index));
 		break;
 	default:
@@ -1061,6 +1062,7 @@  void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
 	    hw->mac.type != ixgbe_mac_X550 &&
 	    hw->mac.type != ixgbe_mac_X550EM_x &&
 	    hw->mac.type != ixgbe_mac_X550EM_a &&
+	    hw->mac.type != ixgbe_mac_E610 &&
 	    hw->mac.type != ixgbe_mac_X540)
 		return;
 
@@ -1103,6 +1105,7 @@  void ixgbe_upgrade_mbx_params_pf(struct ixgbe_hw *hw, u16 vf_id)
 	    hw->mac.type != ixgbe_mac_X550 &&
 	    hw->mac.type != ixgbe_mac_X550EM_x &&
 	    hw->mac.type != ixgbe_mac_X550EM_a &&
+	    hw->mac.type != ixgbe_mac_E610 &&
 	    hw->mac.type != ixgbe_mac_X540)
 		return;
 
diff --git a/drivers/net/ixgbe/base/ixgbe_osdep.c b/drivers/net/ixgbe/base/ixgbe_osdep.c
new file mode 100644
index 0000000000..fe4db60a8d
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_osdep.c
@@ -0,0 +1,43 @@ 
+#include <stdlib.h>
+
+#include <rte_common.h>
+
+#include "ixgbe_osdep.h"
+
+void *
+ixgbe_calloc(struct ixgbe_hw __rte_unused *hw, size_t count, size_t size)
+{
+	return malloc(count * size);
+}
+
+void *
+ixgbe_malloc(struct ixgbe_hw __rte_unused *hw, size_t size)
+{
+	return malloc(size);
+}
+
+void
+ixgbe_free(struct ixgbe_hw __rte_unused *hw, void *addr)
+{
+	free(addr);
+}
+
+void ixgbe_init_lock(struct ixgbe_lock *lock)
+{
+	pthread_mutex_init(&lock->mutex, NULL);
+}
+
+void ixgbe_destroy_lock(struct ixgbe_lock *lock)
+{
+	pthread_mutex_destroy(&lock->mutex);
+}
+
+void ixgbe_acquire_lock(struct ixgbe_lock *lock)
+{
+	pthread_mutex_lock(&lock->mutex);
+}
+
+void ixgbe_release_lock(struct ixgbe_lock *lock)
+{
+	pthread_mutex_unlock(&lock->mutex);
+}
diff --git a/drivers/net/ixgbe/base/ixgbe_osdep.h b/drivers/net/ixgbe/base/ixgbe_osdep.h
index 6c25f608b1..721043fb2e 100644
--- a/drivers/net/ixgbe/base/ixgbe_osdep.h
+++ b/drivers/net/ixgbe/base/ixgbe_osdep.h
@@ -5,6 +5,7 @@ 
 #ifndef _IXGBE_OS_H_
 #define _IXGBE_OS_H_
 
+#include <pthread.h>
 #include <string.h>
 #include <stdint.h>
 #include <stdio.h>
@@ -79,7 +80,9 @@  enum {
 #define IXGBE_NTOHS(_i)	rte_be_to_cpu_16(_i)
 #define IXGBE_CPU_TO_LE16(_i)  rte_cpu_to_le_16(_i)
 #define IXGBE_CPU_TO_LE32(_i)  rte_cpu_to_le_32(_i)
+#define IXGBE_LE16_TO_CPU(_i)  rte_le_to_cpu_16(_i)
 #define IXGBE_LE32_TO_CPU(_i)  rte_le_to_cpu_32(_i)
+#define IXGBE_LE64_TO_CPU(_i)  rte_le_to_cpu_64(_i)
 #define IXGBE_LE32_TO_CPUS(_i) rte_le_to_cpu_32(_i)
 #define IXGBE_CPU_TO_BE16(_i)  rte_cpu_to_be_16(_i)
 #define IXGBE_CPU_TO_BE32(_i)  rte_cpu_to_be_32(_i)
@@ -152,4 +155,18 @@  do {									\
 		rte_delay_ms(1);					\
 } while (0)
 
+struct ixgbe_hw;
+struct ixgbe_lock {
+	pthread_mutex_t mutex;
+};
+
+void *ixgbe_calloc(struct ixgbe_hw *hw, size_t count, size_t size);
+void *ixgbe_malloc(struct ixgbe_hw *hw, size_t size);
+void ixgbe_free(struct ixgbe_hw *hw, void *addr);
+
+void ixgbe_init_lock(struct ixgbe_lock *lock);
+void ixgbe_destroy_lock(struct ixgbe_lock *lock);
+void ixgbe_acquire_lock(struct ixgbe_lock *lock);
+void ixgbe_release_lock(struct ixgbe_lock *lock);
+
 #endif /* _IXGBE_OS_H_ */
diff --git a/drivers/net/ixgbe/base/ixgbe_phy.c b/drivers/net/ixgbe/base/ixgbe_phy.c
index 8a7712c8f2..47dab74b3c 100644
--- a/drivers/net/ixgbe/base/ixgbe_phy.c
+++ b/drivers/net/ixgbe/base/ixgbe_phy.c
@@ -800,7 +800,8 @@  s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
 			     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
 			     &autoneg_reg);
 
-	if (hw->mac.type == ixgbe_mac_X550) {
+	if ((hw->mac.type == ixgbe_mac_X550) ||
+	    (hw->mac.type == ixgbe_mac_E610)) {
 		/* Set or unset auto-negotiation 5G advertisement */
 		autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
 		if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) &&
@@ -915,6 +916,7 @@  static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
 
 	switch (hw->mac.type) {
 	case ixgbe_mac_X550:
+	case ixgbe_mac_E610:
 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
 		break;
diff --git a/drivers/net/ixgbe/base/ixgbe_type.h b/drivers/net/ixgbe/base/ixgbe_type.h
index 5db9e03b4d..1070c281a2 100644
--- a/drivers/net/ixgbe/base/ixgbe_type.h
+++ b/drivers/net/ixgbe/base/ixgbe_type.h
@@ -44,6 +44,7 @@ 
  */
 
 #include "ixgbe_osdep.h"
+#include "ixgbe_type_e610.h"
 
 /* Override this by setting IOMEM in your ixgbe_osdep.h header */
 
@@ -124,6 +125,11 @@ 
 #define IXGBE_DEV_ID_X550EM_A_VF_HV		0x15B4
 #define IXGBE_DEV_ID_X550EM_X_VF		0x15A8
 #define IXGBE_DEV_ID_X550EM_X_VF_HV		0x15A9
+#define IXGBE_DEV_ID_E610_BACKPLANE		0x57AE
+#define IXGBE_DEV_ID_E610_SFP			0x57AF
+#define IXGBE_DEV_ID_E610_10G_T			0x57B0
+#define IXGBE_DEV_ID_E610_2_5G_T		0x57B1
+#define IXGBE_DEV_ID_E610_SGMII			0x57B2
 
 #define IXGBE_CAT(r, m) IXGBE_##r##m
 
@@ -1887,6 +1893,7 @@  enum {
 #define IXGBE_EICR_MAILBOX	0x00080000 /* VF to PF Mailbox Interrupt */
 #define IXGBE_EICR_LSC		0x00100000 /* Link Status Change */
 #define IXGBE_EICR_LINKSEC	0x00200000 /* PN Threshold */
+#define IXGBE_EICR_FW_EVENT	0x00200000 /* Async FW event */
 #define IXGBE_EICR_MNG		0x00400000 /* Manageability Event Interrupt */
 #define IXGBE_EICR_TS		0x00800000 /* Thermal Sensor Event */
 #define IXGBE_EICR_TIMESYNC	0x01000000 /* Timesync Event */
@@ -1922,6 +1929,7 @@  enum {
 #define IXGBE_EICS_PCI		IXGBE_EICR_PCI /* PCI Exception */
 #define IXGBE_EICS_MAILBOX	IXGBE_EICR_MAILBOX   /* VF to PF Mailbox Int */
 #define IXGBE_EICS_LSC		IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EICS_FW_EVENT	IXGBE_EICR_FW_EVENT /* Async FW event */
 #define IXGBE_EICS_MNG		IXGBE_EICR_MNG /* MNG Event Interrupt */
 #define IXGBE_EICS_TIMESYNC	IXGBE_EICR_TIMESYNC /* Timesync Event */
 #define IXGBE_EICS_GPI_SDP0	IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
@@ -1943,6 +1951,7 @@  enum {
 #define IXGBE_EIMS_PCI		IXGBE_EICR_PCI /* PCI Exception */
 #define IXGBE_EIMS_MAILBOX	IXGBE_EICR_MAILBOX   /* VF to PF Mailbox Int */
 #define IXGBE_EIMS_LSC		IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMS_FW_EVENT	IXGBE_EICR_FW_EVENT /* Async FW event */
 #define IXGBE_EIMS_MNG		IXGBE_EICR_MNG /* MNG Event Interrupt */
 #define IXGBE_EIMS_TS		IXGBE_EICR_TS /* Thermal Sensor Event */
 #define IXGBE_EIMS_TIMESYNC	IXGBE_EICR_TIMESYNC /* Timesync Event */
@@ -1965,6 +1974,7 @@  enum {
 #define IXGBE_EIMC_PCI		IXGBE_EICR_PCI /* PCI Exception */
 #define IXGBE_EIMC_MAILBOX	IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
 #define IXGBE_EIMC_LSC		IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMC_FW_EVENT	IXGBE_EICR_FW_EVENT /* Async FW event */
 #define IXGBE_EIMC_MNG		IXGBE_EICR_MNG /* MNG Event Interrupt */
 #define IXGBE_EIMC_TIMESYNC	IXGBE_EICR_TIMESYNC /* Timesync Event */
 #define IXGBE_EIMC_GPI_SDP0	IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
@@ -2372,6 +2382,7 @@  enum {
 #define IXGBE_82599_SERIAL_NUMBER_MAC_ADDR	0x11
 #define IXGBE_X550_SERIAL_NUMBER_MAC_ADDR	0x04
 
+#define IXGBE_PCIE_MSIX_LKV_CAPS		0xB2
 #define IXGBE_PCIE_MSIX_82599_CAPS	0x72
 #define IXGBE_MAX_MSIX_VECTORS_82599	0x40
 #define IXGBE_PCIE_MSIX_82598_CAPS	0x62
@@ -2489,6 +2500,7 @@  enum {
 #define IXGBE_PCI_DEVICE_STATUS		0xAA
 #define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING	0x0020
 #define IXGBE_PCI_LINK_STATUS		0xB2
+#define IXGBE_PCI_LINK_STATUS_E610	0x82
 #define IXGBE_PCI_DEVICE_CONTROL2	0xC8
 #define IXGBE_PCI_LINK_WIDTH		0x3F0
 #define IXGBE_PCI_LINK_WIDTH_1		0x10
@@ -2499,6 +2511,7 @@  enum {
 #define IXGBE_PCI_LINK_SPEED_2500	0x1
 #define IXGBE_PCI_LINK_SPEED_5000	0x2
 #define IXGBE_PCI_LINK_SPEED_8000	0x3
+#define IXGBE_PCI_LINK_SPEED_16000	0x4
 #define IXGBE_PCI_HEADER_TYPE_REGISTER	0x0E
 #define IXGBE_PCI_HEADER_TYPE_MULTIFUNC	0x80
 #define IXGBE_PCI_DEVICE_CONTROL2_16ms	0x0005
@@ -2626,6 +2639,7 @@  enum {
 #define IXGBE_RXMTRL_V2_MGMT_MSG	0x0D00
 
 #define IXGBE_FCTRL_SBP		0x00000002 /* Store Bad Packet */
+#define IXGBE_FCTRL_TPE		0x00000080 /* Tag Promiscuous Ena*/
 #define IXGBE_FCTRL_MPE		0x00000100 /* Multicast Promiscuous Ena*/
 #define IXGBE_FCTRL_UPE		0x00000200 /* Unicast Promiscuous Ena */
 #define IXGBE_FCTRL_BAM		0x00000400 /* Broadcast Accept Mode */
@@ -2693,6 +2707,7 @@  enum {
 /* Multiple Transmit Queue Command Register */
 #define IXGBE_MTQC_RT_ENA	0x1 /* DCB Enable */
 #define IXGBE_MTQC_VT_ENA	0x2 /* VMDQ2 Enable */
+#define IXGBE_MTQC_NUM_TC_OR_Q  0xC /* Numer of TCs or TxQs per pool */
 #define IXGBE_MTQC_64Q_1PB	0x0 /* 64 queues 1 pack buffer */
 #define IXGBE_MTQC_32VF		0x8 /* 4 TX Queues per pool w/32VF's */
 #define IXGBE_MTQC_64VF		0x4 /* 2 TX Queues per pool w/64VF's */
@@ -3660,6 +3675,7 @@  enum ixgbe_mac_type {
 	ixgbe_mac_X550_vf,
 	ixgbe_mac_X550EM_x_vf,
 	ixgbe_mac_X550EM_a_vf,
+	ixgbe_mac_E610,
 	ixgbe_num_macs
 };
 
@@ -3738,7 +3754,9 @@  enum ixgbe_media_type {
 	ixgbe_media_type_copper,
 	ixgbe_media_type_backplane,
 	ixgbe_media_type_cx4,
-	ixgbe_media_type_virtual
+	ixgbe_media_type_virtual,
+	ixgbe_media_type_da,
+	ixgbe_media_type_aui
 };
 
 /* Flow Control Settings */
@@ -3747,6 +3765,8 @@  enum ixgbe_fc_mode {
 	ixgbe_fc_rx_pause,
 	ixgbe_fc_tx_pause,
 	ixgbe_fc_full,
+	ixgbe_fc_auto,
+	ixgbe_fc_pfc,
 	ixgbe_fc_default
 };
 
@@ -3779,6 +3799,7 @@  enum ixgbe_bus_speed {
 	ixgbe_bus_speed_2500	= 2500,
 	ixgbe_bus_speed_5000	= 5000,
 	ixgbe_bus_speed_8000	= 8000,
+	ixgbe_bus_speed_16000   = 16000,
 	ixgbe_bus_speed_reserved
 };
 
@@ -3923,6 +3944,7 @@  struct ixgbe_eeprom_operations {
 	s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
 	s32 (*update_checksum)(struct ixgbe_hw *);
 	s32 (*calc_checksum)(struct ixgbe_hw *);
+	s32 (*read_pba_string)(struct ixgbe_hw *, u8 *, u32);
 };
 
 struct ixgbe_mac_operations {
@@ -4029,6 +4051,9 @@  struct ixgbe_mac_operations {
 	void (*mdd_event)(struct ixgbe_hw *hw, u32 *vf_bitmap);
 	void (*restore_mdd_vf)(struct ixgbe_hw *hw, u32 vf);
 	bool (*fw_recovery_mode)(struct ixgbe_hw *hw);
+	bool (*get_fw_tsam_mode)(struct ixgbe_hw *hw);
+	s32 (*get_fw_version)(struct ixgbe_hw *hw);
+	s32 (*get_nvm_version)(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
 };
 
 struct ixgbe_phy_operations {
@@ -4073,6 +4098,9 @@  struct ixgbe_link_operations {
 struct ixgbe_link_info {
 	struct ixgbe_link_operations ops;
 	u8 addr;
+	struct ixgbe_link_status link_info;
+	struct ixgbe_link_status link_info_old;
+	u8 get_link_info;
 };
 
 struct ixgbe_eeprom_info {
@@ -4144,6 +4172,9 @@  struct ixgbe_phy_info {
 	bool reset_if_overtemp;
 	bool qsfp_shared_i2c_bus;
 	u32 nw_mng_if_sel;
+	u64 phy_type_low;
+	u64 phy_type_high;
+	struct ixgbe_aci_cmd_set_phy_cfg_data curr_user_phy_cfg;
 };
 
 #include "ixgbe_mbx.h"
@@ -4165,6 +4196,8 @@  struct ixgbe_hw {
 	u16 subsystem_device_id;
 	u16 subsystem_vendor_id;
 	u8 revision_id;
+	u8 pf_id;
+	u8 logical_pf_id;
 	bool adapter_stopped;
 	int api_version;
 	bool force_full_reset;
@@ -4172,6 +4205,19 @@  struct ixgbe_hw {
 	bool wol_enabled;
 	bool need_crosstalk_fix;
 	u32 fw_rst_cnt;
+	u8 api_branch;
+	u8 api_maj_ver;
+	u8 api_min_ver;
+	u8 api_patch;
+	u8 fw_branch;
+	u8 fw_maj_ver;
+	u8 fw_min_ver;
+	u8 fw_patch;
+	u32 fw_build;
+	struct ixgbe_aci_info aci;
+	struct ixgbe_flash_info flash;
+	struct ixgbe_hw_dev_caps dev_caps;
+	struct ixgbe_hw_func_caps func_caps;
 };
 
 #define ixgbe_call_func(hw, func, params, error) \
@@ -4221,6 +4267,23 @@  struct ixgbe_hw {
 #define IXGBE_ERR_MBX				-41
 #define IXGBE_ERR_MBX_NOMSG			-42
 #define IXGBE_ERR_TIMEOUT			-43
+#define IXGBE_ERR_NOT_SUPPORTED			-45
+#define IXGBE_ERR_OUT_OF_RANGE			-46
+
+#define IXGBE_ERR_NVM				-50
+#define IXGBE_ERR_NVM_CHECKSUM			-51
+#define IXGBE_ERR_BUF_TOO_SHORT			-52
+#define IXGBE_ERR_NVM_BLANK_MODE		-53
+#define IXGBE_ERR_INVAL_SIZE			-54
+#define IXGBE_ERR_DOES_NOT_EXIST		-55
+
+#define IXGBE_ERR_ACI_ERROR			-100
+#define IXGBE_ERR_ACI_DISABLED			-101
+#define IXGBE_ERR_ACI_TIMEOUT			-102
+#define IXGBE_ERR_ACI_BUSY			-103
+#define IXGBE_ERR_ACI_NO_WORK			-104
+#define IXGBE_ERR_ACI_NO_EVENTS			-105
+#define IXGBE_ERR_FW_API_VER			-106
 
 #define IXGBE_NOT_IMPLEMENTED			0x7FFFFFFF
 
diff --git a/drivers/net/ixgbe/base/ixgbe_type_e610.h b/drivers/net/ixgbe/base/ixgbe_type_e610.h
new file mode 100644
index 0000000000..0b734d5af7
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_type_e610.h
@@ -0,0 +1,2181 @@ 
+#ifndef _IXGBE_TYPE_E610_H_
+#define _IXGBE_TYPE_E610_H_
+
+/* Little Endian defines */
+#ifndef __le16
+#define __le16  u16
+#endif
+#ifndef __le32
+#define __le32  u32
+#endif
+#ifndef __le64
+#define __le64  u64
+#endif
+
+/* Generic defines */
+#ifndef BIT
+#define BIT(a) (1UL << (a))
+#endif /* !BIT */
+#ifndef BIT_ULL
+#define BIT_ULL(a) (1ULL << (a))
+#endif /* !BIT_ULL */
+#ifndef BITS_PER_BYTE
+#define BITS_PER_BYTE	8
+#endif /* !BITS_PER_BYTE */
+#ifndef DIVIDE_AND_ROUND_UP
+#define DIVIDE_AND_ROUND_UP(a, b) (((a) + (b) - 1) / (b))
+#endif /* !DIVIDE_AND_ROUND_UP */
+
+#ifndef ROUND_UP
+/**
+ * ROUND_UP - round up to next arbitrary multiple (not a power of 2)
+ * @a: value to round up
+ * @b: arbitrary multiple
+ *
+ * Round up to the next multiple of the arbitrary b.
+ */
+#define ROUND_UP(a, b)	((b) * DIVIDE_AND_ROUND_UP((a), (b)))
+#endif /* !ROUND_UP */
+
+#define MAKEMASK(mask, shift) (mask << shift)
+
+#define BYTES_PER_WORD	2
+#define BYTES_PER_DWORD	4
+
+#ifndef BITS_PER_LONG
+#define BITS_PER_LONG		64
+#endif /* !BITS_PER_LONG */
+#ifndef BITS_PER_LONG_LONG
+#define BITS_PER_LONG_LONG	64
+#endif /* !BITS_PER_LONG_LONG */
+#undef GENMASK
+#define GENMASK(h, l) \
+	(((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+#undef GENMASK_ULL
+#define GENMASK_ULL(h, l) \
+	(((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+/* Data type manipulation macros. */
+#define HI_DWORD(x)	((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
+#define LO_DWORD(x)	((u32)((x) & 0xFFFFFFFF))
+#define HI_WORD(x)	((u16)(((x) >> 16) & 0xFFFF))
+#define LO_WORD(x)	((u16)((x) & 0xFFFF))
+#define HI_BYTE(x)	((u8)(((x) >> 8) & 0xFF))
+#define LO_BYTE(x)	((u8)((x) & 0xFF))
+
+#define MIN_T(_t, _a, _b)	min((_t)(_a), (_t)(_b))
+
+#define IS_ASCII(_ch)	((_ch) < 0x80)
+
+#define STRUCT_HACK_VAR_LEN
+/**
+ * ixgbe_struct_size - size of struct with C99 flexible array member
+ * @ptr: pointer to structure
+ * @field: flexible array member (last member of the structure)
+ * @num: number of elements of that flexible array member
+ */
+#define ixgbe_struct_size(ptr, field, num) \
+	(sizeof(*(ptr)) + sizeof(*(ptr)->field) * (num))
+
+/* General E610 defines */
+#define IXGBE_MAX_VSI			768
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define E610_SR_VPD_SIZE_WORDS		512
+#define E610_SR_PCIE_ALT_SIZE_WORDS	512
+
+/* Checksum and Shadow RAM pointers */
+#define E610_SR_NVM_DEV_STARTER_VER		0x18
+#define E610_NVM_VER_LO_SHIFT			0
+#define E610_NVM_VER_LO_MASK			(0xff << E610_NVM_VER_LO_SHIFT)
+#define E610_NVM_VER_HI_SHIFT			12
+#define E610_NVM_VER_HI_MASK			(0xf << E610_NVM_VER_HI_SHIFT)
+#define E610_SR_NVM_MAP_VER			0x29
+#define E610_SR_NVM_EETRACK_LO			0x2D
+#define E610_SR_NVM_EETRACK_HI			0x2E
+#define E610_SR_VPD_PTR				0x2F
+#define E610_SR_PCIE_ALT_AUTO_LOAD_PTR		0x3E
+#define E610_SR_SW_CHECKSUM_WORD		0x3F
+#define E610_SR_PFA_PTR				0x40
+#define E610_SR_1ST_NVM_BANK_PTR		0x42
+#define E610_SR_NVM_BANK_SIZE			0x43
+#define E610_SR_1ST_OROM_BANK_PTR		0x44
+#define E610_SR_OROM_BANK_SIZE			0x45
+#define E610_SR_NETLIST_BANK_PTR		0x46
+#define E610_SR_NETLIST_BANK_SIZE		0x47
+#define E610_SR_POINTER_TYPE_BIT		BIT(15)
+#define E610_SR_POINTER_MASK			0x7fff
+#define E610_SR_HALF_4KB_SECTOR_UNITS		2048
+#define E610_GET_PFA_POINTER_IN_WORDS(offset)				    \
+    ((offset & E610_SR_POINTER_TYPE_BIT) == E610_SR_POINTER_TYPE_BIT) ?     \
+        ((offset & E610_SR_POINTER_MASK) * E610_SR_HALF_4KB_SECTOR_UNITS) : \
+        (offset & E610_SR_POINTER_MASK)
+
+/* Checksum and Shadow RAM pointers */
+#define E610_SR_NVM_CTRL_WORD		0x00
+#define E610_SR_PBA_BLOCK_PTR		0x16
+
+/* The Orom version topology */
+#define IXGBE_OROM_VER_PATCH_SHIFT	0
+#define IXGBE_OROM_VER_PATCH_MASK	(0xff << IXGBE_OROM_VER_PATCH_SHIFT)
+#define IXGBE_OROM_VER_BUILD_SHIFT	8
+#define IXGBE_OROM_VER_BUILD_MASK	(0xffff << IXGBE_OROM_VER_BUILD_SHIFT)
+#define IXGBE_OROM_VER_SHIFT		24
+#define IXGBE_OROM_VER_MASK		(0xff << IXGBE_OROM_VER_SHIFT)
+
+/* CSS Header words */
+#define IXGBE_NVM_CSS_HDR_LEN_L			0x02
+#define IXGBE_NVM_CSS_HDR_LEN_H			0x03
+#define IXGBE_NVM_CSS_SREV_L			0x14
+#define IXGBE_NVM_CSS_SREV_H			0x15
+
+/* Length of Authentication header section in words */
+#define IXGBE_NVM_AUTH_HEADER_LEN		0x08
+
+/* The Netlist ID Block is located after all of the Link Topology nodes. */
+#define IXGBE_NETLIST_ID_BLK_SIZE		0x30
+#define IXGBE_NETLIST_ID_BLK_OFFSET(n)		IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0004 + 2 * (n))
+
+/* netlist ID block field offsets (word offsets) */
+#define IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW	0x02
+#define IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH	0x03
+#define IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW	0x04
+#define IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH	0x05
+#define IXGBE_NETLIST_ID_BLK_TYPE_LOW		0x06
+#define IXGBE_NETLIST_ID_BLK_TYPE_HIGH		0x07
+#define IXGBE_NETLIST_ID_BLK_REV_LOW		0x08
+#define IXGBE_NETLIST_ID_BLK_REV_HIGH		0x09
+#define IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(n)	(0x0A + (n))
+#define IXGBE_NETLIST_ID_BLK_CUST_VER		0x2F
+
+/* The Link Topology Netlist section is stored as a series of words. It is
+ * stored in the NVM as a TLV, with the first two words containing the type
+ * and length.
+ */
+#define IXGBE_NETLIST_LINK_TOPO_MOD_ID		0x011B
+#define IXGBE_NETLIST_TYPE_OFFSET		0x0000
+#define IXGBE_NETLIST_LEN_OFFSET		0x0001
+
+/* The Link Topology section follows the TLV header. When reading the netlist
+ * using ixgbe_read_netlist_module, we need to account for the 2-word TLV
+ * header.
+ */
+#define IXGBE_NETLIST_LINK_TOPO_OFFSET(n)	((n) + 2)
+#define IXGBE_LINK_TOPO_MODULE_LEN	IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0000)
+#define IXGBE_LINK_TOPO_NODE_COUNT	IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0001)
+#define IXGBE_LINK_TOPO_NODE_COUNT_M	MAKEMASK(0x3FF, 0)
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define IXGBE_SR_CTRL_WORD_1_S		0x06
+#define IXGBE_SR_CTRL_WORD_1_M		(0x03 << IXGBE_SR_CTRL_WORD_1_S)
+#define IXGBE_SR_CTRL_WORD_VALID	0x1
+#define IXGBE_SR_CTRL_WORD_OROM_BANK	BIT(3)
+#define IXGBE_SR_CTRL_WORD_NETLIST_BANK	BIT(4)
+#define IXGBE_SR_CTRL_WORD_NVM_BANK	BIT(5)
+#define IXGBE_SR_NVM_PTR_4KB_UNITS	BIT(15)
+
+/* These macros strip from NVM Image Revision the particular part of NVM ver:
+   major ver, minor ver and image id */
+#define E610_NVM_MAJOR_VER(x)	((x & 0xF000) >> 12)
+#define E610_NVM_MINOR_VER(x)	(x & 0x00FF)
+
+/* Minimal Security Revision */
+
+/* Shadow RAM related */
+#define IXGBE_SR_SECTOR_SIZE_IN_WORDS		0x800
+#define IXGBE_SR_WORDS_IN_1KB			512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define IXGBE_SR_SW_CHECKSUM_BASE		0xBABA
+
+/* Netlist */
+#define IXGBE_MAX_NETLIST_SIZE			10
+
+/* General registers */
+
+/* Firmware Status Register (GL_FWSTS) */
+#define GL_FWSTS				0x00083048 /* Reset Source: POR */
+#define GL_FWSTS_FWS0B_S			0
+#define GL_FWSTS_FWS0B_M			MAKEMASK(0xFF, 0)
+#define GL_FWSTS_FWROWD_S			8
+#define GL_FWSTS_FWROWD_M			BIT(8)
+#define GL_FWSTS_FWRI_S				9
+#define GL_FWSTS_FWRI_M				BIT(9)
+#define GL_FWSTS_FWS1B_S			16
+#define GL_FWSTS_FWS1B_M			MAKEMASK(0xFF, 16)
+#define GL_FWSTS_EP_PF0				BIT(24)
+#define GL_FWSTS_EP_PF1				BIT(25)
+
+/* Recovery mode values of Firmware Status 1 Byte (FWS1B) bitfield */
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_CORER_LEGACY  0x0B
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_GLOBR_LEGACY  0x0C
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_CORER         0x30
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_GLOBR         0x31
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_TRANSITION    0x32
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_NVM           0x33
+
+/* Firmware Status (GL_MNG_FWSM) */
+#define GL_MNG_FWSM				0x000B6134 /* Reset Source: POR */
+#define GL_MNG_FWSM_FW_MODES_S			0
+#define GL_MNG_FWSM_FW_MODES_M			MAKEMASK(0x7, 0)
+#define GL_MNG_FWSM_RSV0_S			2
+#define GL_MNG_FWSM_RSV0_M			MAKEMASK(0xFF, 2)
+#define GL_MNG_FWSM_EEP_RELOAD_IND_S		10
+#define GL_MNG_FWSM_EEP_RELOAD_IND_M		BIT(10)
+#define GL_MNG_FWSM_RSV1_S			11
+#define GL_MNG_FWSM_RSV1_M			MAKEMASK(0xF, 11)
+#define GL_MNG_FWSM_RSV2_S			15
+#define GL_MNG_FWSM_RSV2_M			BIT(15)
+#define GL_MNG_FWSM_PCIR_AL_FAILURE_S		16
+#define GL_MNG_FWSM_PCIR_AL_FAILURE_M		BIT(16)
+#define GL_MNG_FWSM_POR_AL_FAILURE_S		17
+#define GL_MNG_FWSM_POR_AL_FAILURE_M		BIT(17)
+#define GL_MNG_FWSM_RSV3_S			18
+#define GL_MNG_FWSM_RSV3_M			BIT(18)
+#define GL_MNG_FWSM_EXT_ERR_IND_S		19
+#define GL_MNG_FWSM_EXT_ERR_IND_M		MAKEMASK(0x3F, 19)
+#define GL_MNG_FWSM_RSV4_S			25
+#define GL_MNG_FWSM_RSV4_M			BIT(25)
+#define GL_MNG_FWSM_RESERVED_11_S		26
+#define GL_MNG_FWSM_RESERVED_11_M		MAKEMASK(0xF, 26)
+#define GL_MNG_FWSM_RSV5_S			30
+#define GL_MNG_FWSM_RSV5_M			MAKEMASK(0x3, 30)
+
+/* FW mode indications */
+#define GL_MNG_FWSM_FW_MODES_DEBUG_M           BIT(0)
+#define GL_MNG_FWSM_FW_MODES_RECOVERY_M        BIT(1)
+#define GL_MNG_FWSM_FW_MODES_ROLLBACK_M        BIT(2)
+
+/* PF - Manageability  Registers  */
+
+/* Global NVM General Status Register */
+#define GLNVM_GENS				0x000B6100 /* Reset Source: POR */
+#define GLNVM_GENS_NVM_PRES_S			0
+#define GLNVM_GENS_NVM_PRES_M			BIT(0)
+#define GLNVM_GENS_SR_SIZE_S			5
+#define GLNVM_GENS_SR_SIZE_M			MAKEMASK(0x7, 5)
+#define GLNVM_GENS_BANK1VAL_S			8
+#define GLNVM_GENS_BANK1VAL_M			BIT(8)
+#define GLNVM_GENS_ALT_PRST_S			23
+#define GLNVM_GENS_ALT_PRST_M			BIT(23)
+#define GLNVM_GENS_FL_AUTO_RD_S			25
+#define GLNVM_GENS_FL_AUTO_RD_M			BIT(25)
+
+/* Flash Access Register */
+#define GLNVM_FLA				0x000B6108 /* Reset Source: POR */
+#define GLNVM_FLA_LOCKED_S			6
+#define GLNVM_FLA_LOCKED_M			BIT(6)
+
+/* Bit Bang registers */
+#define RDASB_MSGCTL				0x000B6820
+#define RDASB_MSGCTL_HDR_DWS_S			0
+#define RDASB_MSGCTL_EXP_RDW_S			8
+#define RDASB_MSGCTL_CMDV_M			BIT(31)
+#define RDASB_RSPCTL				0x000B6824
+#define RDASB_RSPCTL_BAD_LENGTH_M		BIT(30)
+#define RDASB_RSPCTL_NOT_SUCCESS_M		BIT(31)
+#define RDASB_WHDR0				0x000B68F4
+#define RDASB_WHDR1				0x000B68F8
+#define RDASB_WHDR2				0x000B68FC
+#define RDASB_WHDR3				0x000B6900
+#define RDASB_WHDR4				0x000B6904
+#define RDASB_RHDR0				0x000B6AFC
+#define RDASB_RHDR0_RESPONSE_S			27
+#define RDASB_RHDR0_RESPONSE_M			MAKEMASK(0x7, 27)
+#define RDASB_RDATA0				0x000B6B00
+#define RDASB_RDATA1				0x000B6B04
+
+/* SPI Registers */
+#define SPISB_MSGCTL				0x000B7020
+#define SPISB_MSGCTL_HDR_DWS_S			0
+#define SPISB_MSGCTL_EXP_RDW_S			8
+#define SPISB_MSGCTL_MSG_MODE_S			26
+#define SPISB_MSGCTL_TOKEN_MODE_S		28
+#define SPISB_MSGCTL_BARCLR_S			30
+#define SPISB_MSGCTL_CMDV_S			31
+#define SPISB_MSGCTL_CMDV_M			BIT(31)
+#define SPISB_RSPCTL				0x000B7024
+#define SPISB_RSPCTL_BAD_LENGTH_M		BIT(30)
+#define SPISB_RSPCTL_NOT_SUCCESS_M		BIT(31)
+#define SPISB_WHDR0				0x000B70F4
+#define SPISB_WHDR0_DEST_SEL_S			12
+#define SPISB_WHDR0_OPCODE_SEL_S		16
+#define SPISB_WHDR0_TAG_S			24
+#define SPISB_WHDR1				0x000B70F8
+#define SPISB_WHDR2				0x000B70FC
+#define SPISB_RDATA				0x000B7300
+#define SPISB_WDATA				0x000B7100
+
+/* Firmware Reset Count register */
+#define GL_FWRESETCNT				0x00083100 /* Reset Source: POR */
+#define GL_FWRESETCNT_FWRESETCNT_S		0
+#define GL_FWRESETCNT_FWRESETCNT_M		MAKEMASK(0xFFFFFFFF, 0)
+
+/* Admin Command Interface (ACI) registers */
+#define PF_HIDA(_i)			(0x00085000 + ((_i) * 4))
+#define PF_HIDA_2(_i)			(0x00085020 + ((_i) * 4))
+#define PF_HIBA(_i)			(0x00084000 + ((_i) * 4))
+#define PF_HICR				0x00082048
+
+#define PF_HIDA_MAX_INDEX		15
+#define PF_HIBA_MAX_INDEX		1023
+
+#define PF_HICR_EN			BIT(0)
+#define PF_HICR_C			BIT(1)
+#define PF_HICR_SV			BIT(2)
+#define PF_HICR_EV			BIT(3)
+
+#define GL_HIDA(_i)			(0x00082000 + ((_i) * 4))
+#define GL_HIDA_2(_i)			(0x00082020 + ((_i) * 4))
+#define GL_HIBA(_i)			(0x00081000 + ((_i) * 4))
+#define GL_HICR				0x00082040
+
+#define GL_HIDA_MAX_INDEX		15
+#define GL_HIBA_MAX_INDEX		1023
+
+#define GL_HICR_C			BIT(1)
+#define GL_HICR_SV			BIT(2)
+#define GL_HICR_EV			BIT(3)
+
+#define GL_HICR_EN			0x00082044
+
+#define GL_HICR_EN_CHECK		BIT(0)
+
+/* Admin Command Interface (ACI) defines */
+/* Defines that help manage the driver vs FW API checks.
+ */
+#define IXGBE_FW_API_VER_BRANCH		0x00
+#define IXGBE_FW_API_VER_MAJOR		0x01
+#define IXGBE_FW_API_VER_MINOR		0x05
+#define IXGBE_FW_API_VER_DIFF_ALLOWED	0x02
+
+#define IXGBE_ACI_DESC_SIZE		32
+#define IXGBE_ACI_DESC_SIZE_IN_DWORDS	IXGBE_ACI_DESC_SIZE / BYTES_PER_DWORD
+
+#define IXGBE_ACI_MAX_BUFFER_SIZE		4096    /* Size in bytes */
+#define IXGBE_ACI_DESC_COOKIE_L_DWORD_OFFSET	3
+#define IXGBE_ACI_SEND_DELAY_TIME_MS		10
+#define IXGBE_ACI_SEND_MAX_EXECUTE		3
+/* [ms] timeout of waiting for sync response */
+#define IXGBE_ACI_SYNC_RESPONSE_TIMEOUT		100000
+/* [ms] timeout of waiting for async response */
+#define IXGBE_ACI_ASYNC_RESPONSE_TIMEOUT	150000
+/* [ms] timeout of waiting for resource release */
+#define IXGBE_ACI_RELEASE_RES_TIMEOUT		10000
+
+/* Timestamp spacing for Tools ACI: queue is active if spacing is within the range [LO..HI] */
+#define IXGBE_TOOLS_ACI_ACTIVE_STAMP_SPACING_LO      0
+#define IXGBE_TOOLS_ACI_ACTIVE_STAMP_SPACING_HI      200
+
+/* Timestamp spacing for Tools ACI: queue is expired if spacing is outside the range [LO..HI] */
+#define IXGBE_TOOLS_ACI_EXPIRED_STAMP_SPACING_LO     -5
+#define IXGBE_TOOLS_ACI_EXPIRED_STAMP_SPACING_HI     205
+
+/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */
+#define IXGBE_ACI_LG_BUF		512
+
+/* Flags sub-structure
+ * |0  |1  |2  |3  |4  |5  |6  |7  |8  |9  |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * *  RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets */
+#define IXGBE_ACI_FLAG_DD_S	0
+#define IXGBE_ACI_FLAG_CMP_S	1
+#define IXGBE_ACI_FLAG_ERR_S	2
+#define IXGBE_ACI_FLAG_VFE_S	3
+#define IXGBE_ACI_FLAG_LB_S	9
+#define IXGBE_ACI_FLAG_RD_S	10
+#define IXGBE_ACI_FLAG_VFC_S	11
+#define IXGBE_ACI_FLAG_BUF_S	12
+#define IXGBE_ACI_FLAG_SI_S	13
+#define IXGBE_ACI_FLAG_EI_S	14
+#define IXGBE_ACI_FLAG_FE_S	15
+
+#define IXGBE_ACI_FLAG_DD		BIT(IXGBE_ACI_FLAG_DD_S)  /* 0x1    */
+#define IXGBE_ACI_FLAG_CMP		BIT(IXGBE_ACI_FLAG_CMP_S) /* 0x2    */
+#define IXGBE_ACI_FLAG_ERR		BIT(IXGBE_ACI_FLAG_ERR_S) /* 0x4    */
+#define IXGBE_ACI_FLAG_VFE		BIT(IXGBE_ACI_FLAG_VFE_S) /* 0x8    */
+#define IXGBE_ACI_FLAG_LB		BIT(IXGBE_ACI_FLAG_LB_S)  /* 0x200  */
+#define IXGBE_ACI_FLAG_RD		BIT(IXGBE_ACI_FLAG_RD_S)  /* 0x400  */
+#define IXGBE_ACI_FLAG_VFC		BIT(IXGBE_ACI_FLAG_VFC_S) /* 0x800  */
+#define IXGBE_ACI_FLAG_BUF		BIT(IXGBE_ACI_FLAG_BUF_S) /* 0x1000 */
+#define IXGBE_ACI_FLAG_SI		BIT(IXGBE_ACI_FLAG_SI_S)  /* 0x2000 */
+#define IXGBE_ACI_FLAG_EI		BIT(IXGBE_ACI_FLAG_EI_S)  /* 0x4000 */
+#define IXGBE_ACI_FLAG_FE		BIT(IXGBE_ACI_FLAG_FE_S)  /* 0x8000 */
+
+/* Admin Command Interface (ACI) error codes */
+enum ixgbe_aci_err {
+	IXGBE_ACI_RC_OK			= 0,  /* Success */
+	IXGBE_ACI_RC_EPERM		= 1,  /* Operation not permitted */
+	IXGBE_ACI_RC_ENOENT		= 2,  /* No such element */
+	IXGBE_ACI_RC_ESRCH		= 3,  /* Bad opcode */
+	IXGBE_ACI_RC_EINTR		= 4,  /* Operation interrupted */
+	IXGBE_ACI_RC_EIO		= 5,  /* I/O error */
+	IXGBE_ACI_RC_ENXIO		= 6,  /* No such resource */
+	IXGBE_ACI_RC_E2BIG		= 7,  /* Arg too long */
+	IXGBE_ACI_RC_EAGAIN		= 8,  /* Try again */
+	IXGBE_ACI_RC_ENOMEM		= 9,  /* Out of memory */
+	IXGBE_ACI_RC_EACCES		= 10, /* Permission denied */
+	IXGBE_ACI_RC_EFAULT		= 11, /* Bad address */
+	IXGBE_ACI_RC_EBUSY		= 12, /* Device or resource busy */
+	IXGBE_ACI_RC_EEXIST		= 13, /* Object already exists */
+	IXGBE_ACI_RC_EINVAL		= 14, /* Invalid argument */
+	IXGBE_ACI_RC_ENOTTY		= 15, /* Not a typewriter */
+	IXGBE_ACI_RC_ENOSPC		= 16, /* No space left or allocation failure */
+	IXGBE_ACI_RC_ENOSYS		= 17, /* Function not implemented */
+	IXGBE_ACI_RC_ERANGE		= 18, /* Parameter out of range */
+	IXGBE_ACI_RC_EFLUSHED		= 19, /* Cmd flushed due to prev cmd error */
+	IXGBE_ACI_RC_BAD_ADDR		= 20, /* Descriptor contains a bad pointer */
+	IXGBE_ACI_RC_EMODE		= 21, /* Op not allowed in current dev mode */
+	IXGBE_ACI_RC_EFBIG		= 22, /* File too big */
+	IXGBE_ACI_RC_ESBCOMP		= 23, /* SB-IOSF completion unsuccessful */
+	IXGBE_ACI_RC_ENOSEC		= 24, /* Missing security manifest */
+	IXGBE_ACI_RC_EBADSIG		= 25, /* Bad RSA signature */
+	IXGBE_ACI_RC_ESVN		= 26, /* SVN number prohibits this package */
+	IXGBE_ACI_RC_EBADMAN		= 27, /* Manifest hash mismatch */
+	IXGBE_ACI_RC_EBADBUF		= 28, /* Buffer hash mismatches manifest */
+	IXGBE_ACI_RC_EACCES_BMCU	= 29, /* BMC Update in progress */
+};
+
+/* Admin Command Interface (ACI) opcodes */
+enum ixgbe_aci_opc {
+	ixgbe_aci_opc_get_ver				= 0x0001,
+	ixgbe_aci_opc_driver_ver			= 0x0002,
+	ixgbe_aci_opc_get_exp_err			= 0x0005,
+
+	/* resource ownership */
+	ixgbe_aci_opc_req_res				= 0x0008,
+	ixgbe_aci_opc_release_res			= 0x0009,
+
+	/* device/function capabilities */
+	ixgbe_aci_opc_list_func_caps			= 0x000A,
+	ixgbe_aci_opc_list_dev_caps			= 0x000B,
+
+	/* safe disable of RXEN */
+	ixgbe_aci_opc_disable_rxen			= 0x000C,
+
+	/* FW events */
+	ixgbe_aci_opc_get_fw_event			= 0x0014,
+
+	/* PHY commands */
+	ixgbe_aci_opc_get_phy_caps			= 0x0600,
+	ixgbe_aci_opc_set_phy_cfg			= 0x0601,
+	ixgbe_aci_opc_restart_an			= 0x0605,
+	ixgbe_aci_opc_get_link_status			= 0x0607,
+	ixgbe_aci_opc_set_event_mask			= 0x0613,
+	ixgbe_aci_opc_get_link_topo			= 0x06E0,
+	ixgbe_aci_opc_get_link_topo_pin			= 0x06E1,
+	ixgbe_aci_opc_read_i2c				= 0x06E2,
+	ixgbe_aci_opc_write_i2c				= 0x06E3,
+	ixgbe_aci_opc_read_mdio				= 0x06E4,
+	ixgbe_aci_opc_write_mdio			= 0x06E5,
+	ixgbe_aci_opc_set_gpio_by_func			= 0x06E6,
+	ixgbe_aci_opc_get_gpio_by_func			= 0x06E7,
+	ixgbe_aci_opc_set_gpio				= 0x06EC,
+	ixgbe_aci_opc_get_gpio				= 0x06ED,
+	ixgbe_aci_opc_sff_eeprom			= 0x06EE,
+	ixgbe_aci_opc_prog_topo_dev_nvm			= 0x06F2,
+	ixgbe_aci_opc_read_topo_dev_nvm			= 0x06F3,
+
+	/* NVM commands */
+	ixgbe_aci_opc_nvm_read				= 0x0701,
+	ixgbe_aci_opc_nvm_erase				= 0x0702,
+	ixgbe_aci_opc_nvm_write				= 0x0703,
+	ixgbe_aci_opc_nvm_cfg_read			= 0x0704,
+	ixgbe_aci_opc_nvm_cfg_write			= 0x0705,
+	ixgbe_aci_opc_nvm_checksum			= 0x0706,
+	ixgbe_aci_opc_nvm_write_activate		= 0x0707,
+	ixgbe_aci_opc_nvm_sr_dump			= 0x0707,
+	ixgbe_aci_opc_nvm_save_factory_settings		= 0x0708,
+	ixgbe_aci_opc_nvm_update_empr			= 0x0709,
+	ixgbe_aci_opc_nvm_pkg_data			= 0x070A,
+	ixgbe_aci_opc_nvm_pass_component_tbl		= 0x070B,
+	ixgbe_aci_opc_nvm_sanitization			= 0x070C,
+
+	/* Alternate Structure Commands */
+	ixgbe_aci_opc_write_alt_direct			= 0x0900,
+	ixgbe_aci_opc_write_alt_indirect		= 0x0901,
+	ixgbe_aci_opc_read_alt_direct			= 0x0902,
+	ixgbe_aci_opc_read_alt_indirect			= 0x0903,
+	ixgbe_aci_opc_done_alt_write			= 0x0904,
+	ixgbe_aci_opc_clear_port_alt_write		= 0x0906,
+
+	/* debug commands */
+	ixgbe_aci_opc_debug_dump_internals		= 0xFF08,
+
+	/* SystemDiagnostic commands */
+	ixgbe_aci_opc_set_health_status_config		= 0xFF20,
+	ixgbe_aci_opc_get_supported_health_status_codes	= 0xFF21,
+	ixgbe_aci_opc_get_health_status			= 0xFF22,
+	ixgbe_aci_opc_clear_health_status		= 0xFF23,
+
+};
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define IXGBE_CHECK_STRUCT_LEN(n, X) enum ixgbe_static_assert_enum_##X \
+	{ ixgbe_static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used to generate a compilation error if a variable-length
+ * structure is not exactly the correct length assuming a single element of
+ * the variable-length object as the last element of the structure. It gives
+ * a divide by zero error if the structure is not of the correct size,
+ * otherwise it creates an enum that is never used.
+ */
+#define IXGBE_CHECK_VAR_LEN_STRUCT_LEN(n, X, T) enum ixgbe_static_assert_enum_##X \
+	{ ixgbe_static_assert_##X = (n) / \
+	  (((sizeof(struct X) + sizeof(T)) == (n)) ? 1 : 0) }
+
+/* This macro is used to ensure that parameter structures (i.e. structures
+ * in the params union member of struct ixgbe_aci_desc) are 16 bytes in length.
+ *
+ * NOT intended to be used to check the size of an indirect command/response
+ * additional data buffer (e.g. struct foo) which should just happen to be 16
+ * bytes (instead, use IXGBE_CHECK_STRUCT_LEN(16, foo) for that).
+ */
+#define IXGBE_CHECK_PARAM_LEN(X)	IXGBE_CHECK_STRUCT_LEN(16, X)
+
+struct ixgbe_aci_cmd_generic {
+	__le32 param0;
+	__le32 param1;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_generic);
+
+/* Get version (direct 0x0001) */
+struct ixgbe_aci_cmd_get_ver {
+	__le32 rom_ver;
+	__le32 fw_build;
+	u8 fw_branch;
+	u8 fw_major;
+	u8 fw_minor;
+	u8 fw_patch;
+	u8 api_branch;
+	u8 api_major;
+	u8 api_minor;
+	u8 api_patch;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_ver);
+
+#define IXGBE_DRV_VER_STR_LEN_E610	32
+
+struct ixgbe_driver_ver {
+	u8 major_ver;
+	u8 minor_ver;
+	u8 build_ver;
+	u8 subbuild_ver;
+	u8 driver_string[IXGBE_DRV_VER_STR_LEN_E610];
+};
+
+/* Send driver version (indirect 0x0002) */
+struct ixgbe_aci_cmd_driver_ver {
+	u8 major_ver;
+	u8 minor_ver;
+	u8 build_ver;
+	u8 subbuild_ver;
+	u8 reserved[4];
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_driver_ver);
+
+/* Get Expanded Error Code (0x0005, direct) */
+struct ixgbe_aci_cmd_get_exp_err {
+	__le32 reason;
+#define IXGBE_ACI_EXPANDED_ERROR_NOT_PROVIDED	0xFFFFFFFF
+	__le32 identifier;
+	u8 rsvd[8];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_exp_err);
+
+/* FW update timeout definitions are in milliseconds */
+#define IXGBE_NVM_TIMEOUT		180000
+#define IXGBE_CHANGE_LOCK_TIMEOUT	1000
+#define IXGBE_GLOBAL_CFG_LOCK_TIMEOUT	3000
+
+enum ixgbe_aci_res_access_type {
+	IXGBE_RES_READ = 1,
+	IXGBE_RES_WRITE
+};
+
+enum ixgbe_aci_res_ids {
+	IXGBE_NVM_RES_ID = 1,
+	IXGBE_SPD_RES_ID,
+	IXGBE_CHANGE_LOCK_RES_ID,
+	IXGBE_GLOBAL_CFG_LOCK_RES_ID
+};
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+struct ixgbe_aci_cmd_req_res {
+	__le16 res_id;
+#define IXGBE_ACI_RES_ID_NVM		1
+#define IXGBE_ACI_RES_ID_SDP		2
+#define IXGBE_ACI_RES_ID_CHNG_LOCK	3
+#define IXGBE_ACI_RES_ID_GLBL_LOCK	4
+	__le16 access_type;
+#define IXGBE_ACI_RES_ACCESS_READ	1
+#define IXGBE_ACI_RES_ACCESS_WRITE	2
+
+	/* Upon successful completion, FW writes this value and driver is
+	 * expected to release resource before timeout. This value is provided
+	 * in milliseconds.
+	 */
+	__le32 timeout;
+#define IXGBE_ACI_RES_NVM_READ_DFLT_TIMEOUT_MS	3000
+#define IXGBE_ACI_RES_NVM_WRITE_DFLT_TIMEOUT_MS	180000
+#define IXGBE_ACI_RES_CHNG_LOCK_DFLT_TIMEOUT_MS	1000
+#define IXGBE_ACI_RES_GLBL_LOCK_DFLT_TIMEOUT_MS	3000
+	/* For SDP: pin ID of the SDP */
+	__le32 res_number;
+	/* Status is only used for IXGBE_ACI_RES_ID_GLBL_LOCK */
+	__le16 status;
+#define IXGBE_ACI_RES_GLBL_SUCCESS		0
+#define IXGBE_ACI_RES_GLBL_IN_PROG		1
+#define IXGBE_ACI_RES_GLBL_DONE			2
+	u8 reserved[2];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_req_res);
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct ixgbe_aci_cmd_list_caps {
+	u8 cmd_flags;
+	u8 pf_index;
+	u8 reserved[2];
+	__le32 count;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_list_caps);
+
+/* Device/Function buffer entry, repeated per reported capability */
+struct ixgbe_aci_cmd_list_caps_elem {
+	__le16 cap;
+#define IXGBE_ACI_CAPS_VALID_FUNCTIONS			0x0005
+#define IXGBE_ACI_MAX_VALID_FUNCTIONS			0x8
+#define IXGBE_ACI_CAPS_VMDQ				0x0014
+#define IXGBE_ACI_CAPS_VSI				0x0017
+#define IXGBE_ACI_CAPS_DCB				0x0018
+#define IXGBE_ACI_CAPS_RSS				0x0040
+#define IXGBE_ACI_CAPS_RXQS				0x0041
+#define IXGBE_ACI_CAPS_TXQS				0x0042
+#define IXGBE_ACI_CAPS_MSIX				0x0043
+#define IXGBE_ACI_CAPS_FD				0x0045
+#define IXGBE_ACI_CAPS_1588				0x0046
+#define IXGBE_ACI_CAPS_MAX_MTU				0x0047
+#define IXGBE_ACI_CAPS_NVM_VER				0x0048
+#define IXGBE_ACI_CAPS_OROM_VER				0x004A
+#define IXGBE_ACI_CAPS_INLINE_IPSEC			0x0070
+#define IXGBE_ACI_CAPS_NUM_ENABLED_PORTS		0x0072
+#define IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE		0x0076
+#define IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT	0x0077
+#define IXGBE_ACI_CAPS_NVM_MGMT				0x0080
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0		0x0081
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1		0x0082
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2		0x0083
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3		0x0084
+#define IXGBE_ACI_CAPS_NEXT_CLUSTER_ID			0x0096
+	u8 major_ver;
+	u8 minor_ver;
+	/* Number of resources described by this capability */
+	__le32 number;
+	/* Only meaningful for some types of resources */
+	__le32 logical_id;
+	/* Only meaningful for some types of resources */
+	__le32 phys_id;
+	__le64 rsvd1;
+	__le64 rsvd2;
+};
+
+IXGBE_CHECK_STRUCT_LEN(32, ixgbe_aci_cmd_list_caps_elem);
+
+/* Disable RXEN (direct 0x000C) */
+struct ixgbe_aci_cmd_disable_rxen {
+	u8 lport_num;
+	u8 reserved[15];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_disable_rxen);
+
+/* Get FW Event (indirect 0x0014) */
+struct ixgbe_aci_cmd_get_fw_event {
+	__le16 fw_buf_status;
+#define IXGBE_ACI_GET_FW_EVENT_STATUS_OBTAINED	BIT(0)
+#define IXGBE_ACI_GET_FW_EVENT_STATUS_PENDING	BIT(1)
+	u8 rsvd[14];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_fw_event);
+
+/* Get PHY capabilities (indirect 0x0600) */
+struct ixgbe_aci_cmd_get_phy_caps {
+	u8 lport_num;
+	u8 reserved;
+	__le16 param0;
+	/* 18.0 - Report qualified modules */
+#define IXGBE_ACI_GET_PHY_RQM		BIT(0)
+	/* 18.1 - 18.3 : Report mode
+	 * 000b - Report topology capabilities, without media
+	 * 001b - Report topology capabilities, with media
+	 * 010b - Report Active configuration
+	 * 011b - Report PHY Type and FEC mode capabilities
+	 * 100b - Report Default capabilities
+	 */
+#define IXGBE_ACI_REPORT_MODE_S			1
+#define IXGBE_ACI_REPORT_MODE_M			(7 << IXGBE_ACI_REPORT_MODE_S)
+#define IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA	0
+#define IXGBE_ACI_REPORT_TOPO_CAP_MEDIA		BIT(1)
+#define IXGBE_ACI_REPORT_ACTIVE_CFG		BIT(2)
+#define IXGBE_ACI_REPORT_DFLT_CFG		BIT(3)
+	__le32 reserved1;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_phy_caps);
+
+/* This is #define of PHY type (Extended):
+ * The first set of defines is for phy_type_low.
+ */
+#define IXGBE_PHY_TYPE_LOW_100BASE_TX		BIT_ULL(0)
+#define IXGBE_PHY_TYPE_LOW_100M_SGMII		BIT_ULL(1)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_T		BIT_ULL(2)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_SX		BIT_ULL(3)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_LX		BIT_ULL(4)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_KX		BIT_ULL(5)
+#define IXGBE_PHY_TYPE_LOW_1G_SGMII		BIT_ULL(6)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_T		BIT_ULL(7)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_X		BIT_ULL(8)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_KX		BIT_ULL(9)
+#define IXGBE_PHY_TYPE_LOW_5GBASE_T		BIT_ULL(10)
+#define IXGBE_PHY_TYPE_LOW_5GBASE_KR		BIT_ULL(11)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_T		BIT_ULL(12)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_DA		BIT_ULL(13)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_SR		BIT_ULL(14)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_LR		BIT_ULL(15)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1	BIT_ULL(16)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC	BIT_ULL(17)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_C2C		BIT_ULL(18)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_T		BIT_ULL(19)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_CR		BIT_ULL(20)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_CR_S		BIT_ULL(21)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_CR1		BIT_ULL(22)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_SR		BIT_ULL(23)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_LR		BIT_ULL(24)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_KR		BIT_ULL(25)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_KR_S		BIT_ULL(26)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_KR1		BIT_ULL(27)
+#define IXGBE_PHY_TYPE_LOW_25G_AUI_AOC_ACC	BIT_ULL(28)
+#define IXGBE_PHY_TYPE_LOW_25G_AUI_C2C		BIT_ULL(29)
+#define IXGBE_PHY_TYPE_LOW_MAX_INDEX		29
+/* The second set of defines is for phy_type_high. */
+#define IXGBE_PHY_TYPE_HIGH_10BASE_T		BIT_ULL(1)
+#define IXGBE_PHY_TYPE_HIGH_10M_SGMII		BIT_ULL(2)
+#define IXGBE_PHY_TYPE_HIGH_2500M_SGMII		BIT_ULL(56)
+#define IXGBE_PHY_TYPE_HIGH_100M_USXGMII	BIT_ULL(57)
+#define IXGBE_PHY_TYPE_HIGH_1G_USXGMII		BIT_ULL(58)
+#define IXGBE_PHY_TYPE_HIGH_2500M_USXGMII	BIT_ULL(59)
+#define IXGBE_PHY_TYPE_HIGH_5G_USXGMII		BIT_ULL(60)
+#define IXGBE_PHY_TYPE_HIGH_10G_USXGMII		BIT_ULL(61)
+#define IXGBE_PHY_TYPE_HIGH_MAX_INDEX		61
+
+struct ixgbe_aci_cmd_get_phy_caps_data {
+	__le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+	__le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+	u8 caps;
+#define IXGBE_ACI_PHY_EN_TX_LINK_PAUSE			BIT(0)
+#define IXGBE_ACI_PHY_EN_RX_LINK_PAUSE			BIT(1)
+#define IXGBE_ACI_PHY_LOW_POWER_MODE			BIT(2)
+#define IXGBE_ACI_PHY_EN_LINK				BIT(3)
+#define IXGBE_ACI_PHY_AN_MODE				BIT(4)
+#define IXGBE_ACI_PHY_EN_MOD_QUAL			BIT(5)
+#define IXGBE_ACI_PHY_EN_LESM				BIT(6)
+#define IXGBE_ACI_PHY_EN_AUTO_FEC			BIT(7)
+#define IXGBE_ACI_PHY_CAPS_MASK				MAKEMASK(0xff, 0)
+	u8 low_power_ctrl_an;
+#define IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG	BIT(0)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE28			BIT(1)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE73			BIT(2)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE37			BIT(3)
+	__le16 eee_cap;
+#define IXGBE_ACI_PHY_EEE_EN_100BASE_TX			BIT(0)
+#define IXGBE_ACI_PHY_EEE_EN_1000BASE_T			BIT(1)
+#define IXGBE_ACI_PHY_EEE_EN_10GBASE_T			BIT(2)
+#define IXGBE_ACI_PHY_EEE_EN_1000BASE_KX		BIT(3)
+#define IXGBE_ACI_PHY_EEE_EN_10GBASE_KR			BIT(4)
+#define IXGBE_ACI_PHY_EEE_EN_25GBASE_KR			BIT(5)
+#define IXGBE_ACI_PHY_EEE_EN_10BASE_T			BIT(11)
+	__le16 eeer_value;
+	u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */
+	u8 phy_fw_ver[8];
+	u8 link_fec_options;
+#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_EN		BIT(0)
+#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_REQ		BIT(1)
+#define IXGBE_ACI_PHY_FEC_25G_RS_528_REQ		BIT(2)
+#define IXGBE_ACI_PHY_FEC_25G_KR_REQ			BIT(3)
+#define IXGBE_ACI_PHY_FEC_25G_RS_544_REQ		BIT(4)
+#define IXGBE_ACI_PHY_FEC_25G_RS_CLAUSE91_EN		BIT(6)
+#define IXGBE_ACI_PHY_FEC_25G_KR_CLAUSE74_EN		BIT(7)
+#define IXGBE_ACI_PHY_FEC_MASK				MAKEMASK(0xdf, 0)
+	u8 module_compliance_enforcement;
+#define IXGBE_ACI_MOD_ENFORCE_STRICT_MODE		BIT(0)
+	u8 extended_compliance_code;
+#define IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE		3
+	u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE];
+#define IXGBE_ACI_MOD_TYPE_BYTE0_SFP_PLUS		0xA0
+#define IXGBE_ACI_MOD_TYPE_BYTE0_QSFP_PLUS		0x80
+#define IXGBE_ACI_MOD_TYPE_IDENT			1
+#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE	BIT(0)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE	BIT(1)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR		BIT(4)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR		BIT(5)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM		BIT(6)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_ER		BIT(7)
+#define IXGBE_ACI_MOD_TYPE_BYTE2_SFP_PLUS		0xA0
+#define IXGBE_ACI_MOD_TYPE_BYTE2_QSFP_PLUS		0x86
+	u8 qualified_module_count;
+	u8 rsvd2[7];	/* Bytes 47:41 reserved */
+#define IXGBE_ACI_QUAL_MOD_COUNT_MAX			16
+	struct {
+		u8 v_oui[3];
+		u8 rsvd3;
+		u8 v_part[16];
+		__le32 v_rev;
+		__le64 rsvd4;
+	} qual_modules[IXGBE_ACI_QUAL_MOD_COUNT_MAX];
+};
+
+IXGBE_CHECK_STRUCT_LEN(560, ixgbe_aci_cmd_get_phy_caps_data);
+
+/* Set PHY capabilities (direct 0x0601)
+ * NOTE: This command must be followed by setup link and restart auto-neg
+ */
+struct ixgbe_aci_cmd_set_phy_cfg {
+	u8 reserved[8];
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_phy_cfg);
+
+/* Set PHY config command data structure */
+struct ixgbe_aci_cmd_set_phy_cfg_data {
+	__le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+	__le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+	u8 caps;
+#define IXGBE_ACI_PHY_ENA_VALID_MASK		MAKEMASK(0xef, 0)
+#define IXGBE_ACI_PHY_ENA_TX_PAUSE_ABILITY	BIT(0)
+#define IXGBE_ACI_PHY_ENA_RX_PAUSE_ABILITY	BIT(1)
+#define IXGBE_ACI_PHY_ENA_LOW_POWER		BIT(2)
+#define IXGBE_ACI_PHY_ENA_LINK			BIT(3)
+#define IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT	BIT(5)
+#define IXGBE_ACI_PHY_ENA_LESM			BIT(6)
+#define IXGBE_ACI_PHY_ENA_AUTO_FEC		BIT(7)
+	u8 low_power_ctrl_an;
+	__le16 eee_cap; /* Value from ixgbe_aci_get_phy_caps */
+	__le16 eeer_value; /* Use defines from ixgbe_aci_get_phy_caps */
+	u8 link_fec_opt; /* Use defines from ixgbe_aci_get_phy_caps */
+	u8 module_compliance_enforcement;
+};
+
+IXGBE_CHECK_STRUCT_LEN(24, ixgbe_aci_cmd_set_phy_cfg_data);
+
+/* Restart AN command data structure (direct 0x0605)
+ * Also used for response, with only the lport_num field present.
+ */
+struct ixgbe_aci_cmd_restart_an {
+	u8 reserved[2];
+	u8 cmd_flags;
+#define IXGBE_ACI_RESTART_AN_LINK_RESTART	BIT(1)
+#define IXGBE_ACI_RESTART_AN_LINK_ENABLE	BIT(2)
+	u8 reserved2[13];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_restart_an);
+
+#pragma pack(1)
+/* Get link status (indirect 0x0607), also used for Link Status Event */
+struct ixgbe_aci_cmd_get_link_status {
+	u8 reserved[2];
+	u8 cmd_flags;
+#define IXGBE_ACI_LSE_M				0x3
+#define IXGBE_ACI_LSE_NOP			0x0
+#define IXGBE_ACI_LSE_DIS			0x2
+#define IXGBE_ACI_LSE_ENA			0x3
+	/* only response uses this flag */
+#define IXGBE_ACI_LSE_IS_ENABLED		0x1
+	u8 reserved2[5];
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_link_status);
+
+/* Get link status response data structure, also used for Link Status Event */
+struct ixgbe_aci_cmd_get_link_status_data {
+	u8 topo_media_conflict;
+#define IXGBE_ACI_LINK_TOPO_CONFLICT		BIT(0)
+#define IXGBE_ACI_LINK_MEDIA_CONFLICT		BIT(1)
+#define IXGBE_ACI_LINK_TOPO_CORRUPT		BIT(2)
+#define IXGBE_ACI_LINK_TOPO_UNREACH_PRT		BIT(4)
+#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_PRT	BIT(5)
+#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_MEDIA	BIT(6)
+#define IXGBE_ACI_LINK_TOPO_UNSUPP_MEDIA	BIT(7)
+	u8 link_cfg_err;
+#define IXGBE_ACI_LINK_CFG_ERR				BIT(0)
+#define IXGBE_ACI_LINK_CFG_COMPLETED			BIT(1)
+#define IXGBE_ACI_LINK_ACT_PORT_OPT_INVAL		BIT(2)
+#define IXGBE_ACI_LINK_FEAT_ID_OR_CONFIG_ID_INVAL	BIT(3)
+#define IXGBE_ACI_LINK_TOPO_CRITICAL_SDP_ERR		BIT(4)
+#define IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED		BIT(5)
+#define IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE	BIT(6)
+#define IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT		BIT(7)
+	u8 link_info;
+#define IXGBE_ACI_LINK_UP		BIT(0)	/* Link Status */
+#define IXGBE_ACI_LINK_FAULT		BIT(1)
+#define IXGBE_ACI_LINK_FAULT_TX		BIT(2)
+#define IXGBE_ACI_LINK_FAULT_RX		BIT(3)
+#define IXGBE_ACI_LINK_FAULT_REMOTE	BIT(4)
+#define IXGBE_ACI_LINK_UP_PORT		BIT(5)	/* External Port Link Status */
+#define IXGBE_ACI_MEDIA_AVAILABLE	BIT(6)
+#define IXGBE_ACI_SIGNAL_DETECT		BIT(7)
+	u8 an_info;
+#define IXGBE_ACI_AN_COMPLETED		BIT(0)
+#define IXGBE_ACI_LP_AN_ABILITY		BIT(1)
+#define IXGBE_ACI_PD_FAULT		BIT(2)	/* Parallel Detection Fault */
+#define IXGBE_ACI_FEC_EN		BIT(3)
+#define IXGBE_ACI_PHY_LOW_POWER		BIT(4)	/* Low Power State */
+#define IXGBE_ACI_LINK_PAUSE_TX		BIT(5)
+#define IXGBE_ACI_LINK_PAUSE_RX		BIT(6)
+#define IXGBE_ACI_QUALIFIED_MODULE	BIT(7)
+	u8 ext_info;
+#define IXGBE_ACI_LINK_PHY_TEMP_ALARM	BIT(0)
+#define IXGBE_ACI_LINK_EXCESSIVE_ERRORS	BIT(1)	/* Excessive Link Errors */
+	/* Port Tx Suspended */
+#define IXGBE_ACI_LINK_TX_S		2
+#define IXGBE_ACI_LINK_TX_M		(0x03 << IXGBE_ACI_LINK_TX_S)
+#define IXGBE_ACI_LINK_TX_ACTIVE	0
+#define IXGBE_ACI_LINK_TX_DRAINED	1
+#define IXGBE_ACI_LINK_TX_FLUSHED	3
+	u8 lb_status;
+#define IXGBE_ACI_LINK_LB_PHY_LCL	BIT(0)
+#define IXGBE_ACI_LINK_LB_PHY_RMT	BIT(1)
+#define IXGBE_ACI_LINK_LB_MAC_LCL	BIT(2)
+#define IXGBE_ACI_LINK_LB_PHY_IDX_S	3
+#define IXGBE_ACI_LINK_LB_PHY_IDX_M	(0x7 << IXGBE_ACI_LB_PHY_IDX_S)
+	__le16 max_frame_size;
+	u8 cfg;
+#define IXGBE_ACI_LINK_25G_KR_FEC_EN		BIT(0)
+#define IXGBE_ACI_LINK_25G_RS_528_FEC_EN	BIT(1)
+#define IXGBE_ACI_LINK_25G_RS_544_FEC_EN	BIT(2)
+#define IXGBE_ACI_FEC_MASK			MAKEMASK(0x7, 0)
+	/* Pacing Config */
+#define IXGBE_ACI_CFG_PACING_S		3
+#define IXGBE_ACI_CFG_PACING_M		(0xF << IXGBE_ACI_CFG_PACING_S)
+#define IXGBE_ACI_CFG_PACING_TYPE_M	BIT(7)
+#define IXGBE_ACI_CFG_PACING_TYPE_AVG	0
+#define IXGBE_ACI_CFG_PACING_TYPE_FIXED	IXGBE_ACI_CFG_PACING_TYPE_M
+	/* External Device Power Ability */
+	u8 power_desc;
+#define IXGBE_ACI_PWR_CLASS_M			0x3F
+#define IXGBE_ACI_LINK_PWR_BASET_LOW_HIGH	0
+#define IXGBE_ACI_LINK_PWR_BASET_HIGH		1
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_1		0
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_2		1
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_3		2
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_4		3
+	__le16 link_speed;
+#define IXGBE_ACI_LINK_SPEED_M			0x7FF
+#define IXGBE_ACI_LINK_SPEED_10MB		BIT(0)
+#define IXGBE_ACI_LINK_SPEED_100MB		BIT(1)
+#define IXGBE_ACI_LINK_SPEED_1000MB		BIT(2)
+#define IXGBE_ACI_LINK_SPEED_2500MB		BIT(3)
+#define IXGBE_ACI_LINK_SPEED_5GB		BIT(4)
+#define IXGBE_ACI_LINK_SPEED_10GB		BIT(5)
+#define IXGBE_ACI_LINK_SPEED_20GB		BIT(6)
+#define IXGBE_ACI_LINK_SPEED_25GB		BIT(7)
+#define IXGBE_ACI_LINK_SPEED_40GB		BIT(8)
+#define IXGBE_ACI_LINK_SPEED_50GB		BIT(9)
+#define IXGBE_ACI_LINK_SPEED_100GB		BIT(10)
+#define IXGBE_ACI_LINK_SPEED_200GB		BIT(11)
+#define IXGBE_ACI_LINK_SPEED_UNKNOWN		BIT(15)
+	__le16 reserved3; /* Aligns next field to 8-byte boundary */
+	u8 ext_fec_status;
+#define IXGBE_ACI_LINK_RS_272_FEC_EN	BIT(0) /* RS 272 FEC enabled */
+	u8 reserved4;
+	__le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+	__le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+	/* Get link status version 2 link partner data */
+	__le64 lp_phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+	__le64 lp_phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+	u8 lp_fec_adv;
+#define IXGBE_ACI_LINK_LP_10G_KR_FEC_CAP	BIT(0)
+#define IXGBE_ACI_LINK_LP_25G_KR_FEC_CAP	BIT(1)
+#define IXGBE_ACI_LINK_LP_RS_528_FEC_CAP	BIT(2)
+#define IXGBE_ACI_LINK_LP_50G_KR_272_FEC_CAP	BIT(3)
+#define IXGBE_ACI_LINK_LP_100G_KR_272_FEC_CAP	BIT(4)
+#define IXGBE_ACI_LINK_LP_200G_KR_272_FEC_CAP	BIT(5)
+	u8 lp_fec_req;
+#define IXGBE_ACI_LINK_LP_10G_KR_FEC_REQ	BIT(0)
+#define IXGBE_ACI_LINK_LP_25G_KR_FEC_REQ	BIT(1)
+#define IXGBE_ACI_LINK_LP_RS_528_FEC_REQ	BIT(2)
+#define IXGBE_ACI_LINK_LP_KR_272_FEC_REQ	BIT(3)
+	u8 lp_flowcontrol;
+#define IXGBE_ACI_LINK_LP_PAUSE_ADV		BIT(0)
+#define IXGBE_ACI_LINK_LP_ASM_DIR_ADV		BIT(1)
+	u8 reserved5[5];
+};
+#pragma pack()
+
+IXGBE_CHECK_STRUCT_LEN(56, ixgbe_aci_cmd_get_link_status_data);
+
+/* Set event mask command (direct 0x0613) */
+struct ixgbe_aci_cmd_set_event_mask {
+	u8	reserved[8];
+	__le16	event_mask;
+#define IXGBE_ACI_LINK_EVENT_UPDOWN		BIT(1)
+#define IXGBE_ACI_LINK_EVENT_MEDIA_NA		BIT(2)
+#define IXGBE_ACI_LINK_EVENT_LINK_FAULT		BIT(3)
+#define IXGBE_ACI_LINK_EVENT_PHY_TEMP_ALARM	BIT(4)
+#define IXGBE_ACI_LINK_EVENT_EXCESSIVE_ERRORS	BIT(5)
+#define IXGBE_ACI_LINK_EVENT_SIGNAL_DETECT	BIT(6)
+#define IXGBE_ACI_LINK_EVENT_AN_COMPLETED	BIT(7)
+#define IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL	BIT(8)
+#define IXGBE_ACI_LINK_EVENT_PORT_TX_SUSPENDED	BIT(9)
+#define IXGBE_ACI_LINK_EVENT_TOPO_CONFLICT	BIT(10)
+#define IXGBE_ACI_LINK_EVENT_MEDIA_CONFLICT	BIT(11)
+#define IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL	BIT(12)
+	u8	reserved1[6];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_event_mask);
+
+struct ixgbe_aci_cmd_link_topo_params {
+	u8 lport_num;
+	u8 lport_num_valid;
+#define IXGBE_ACI_LINK_TOPO_PORT_NUM_VALID	BIT(0)
+	u8 node_type_ctx;
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_S		0
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_M		(0xF << IXGBE_ACI_LINK_TOPO_NODE_TYPE_S)
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_PHY	0
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPIO_CTRL	1
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MUX_CTRL	2
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED_CTRL	3
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED	4
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_THERMAL	5
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE	6
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MEZZ	7
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_ID_EEPROM	8
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPS	11
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_S		4
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_M		\
+				(0xF << IXGBE_ACI_LINK_TOPO_NODE_CTX_S)
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_GLOBAL			0
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_BOARD			1
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT			2
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE			3
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE		4
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_DIRECT_BUS_ACCESS		5
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE_BUS_ADDRESS	6
+	u8 index;
+};
+
+IXGBE_CHECK_STRUCT_LEN(4, ixgbe_aci_cmd_link_topo_params);
+
+struct ixgbe_aci_cmd_link_topo_addr {
+	struct ixgbe_aci_cmd_link_topo_params topo_params;
+	__le16 handle;
+#define IXGBE_ACI_LINK_TOPO_HANDLE_S	0
+#define IXGBE_ACI_LINK_TOPO_HANDLE_M	(0x3FF << IXGBE_ACI_LINK_TOPO_HANDLE_S)
+/* Used to decode the handle field */
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_M		BIT(9)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_LOM		BIT(9)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ	0
+#define IXGBE_ACI_LINK_TOPO_HANDLE_NODE_S		0
+/* In case of a Mezzanine type */
+#define IXGBE_ACI_LINK_TOPO_HANDLE_MEZZ_NODE_M	\
+				(0x3F << IXGBE_ACI_LINK_TOPO_HANDLE_NODE_S)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_MEZZ_S	6
+#define IXGBE_ACI_LINK_TOPO_HANDLE_MEZZ_M	\
+				(0x7 << IXGBE_ACI_LINK_TOPO_HANDLE_MEZZ_S)
+/* In case of a LOM type */
+#define IXGBE_ACI_LINK_TOPO_HANDLE_LOM_NODE_M	\
+				(0x1FF << IXGBE_ACI_LINK_TOPO_HANDLE_NODE_S)
+};
+
+IXGBE_CHECK_STRUCT_LEN(6, ixgbe_aci_cmd_link_topo_addr);
+
+/* Get Link Topology Handle (direct, 0x06E0) */
+struct ixgbe_aci_cmd_get_link_topo {
+	struct ixgbe_aci_cmd_link_topo_addr addr;
+	u8 node_part_num;
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_PCA9575		0x21
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_GEN_GPS		0x48
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_E610_PTC	0x49
+	u8 rsvd[9];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_link_topo);
+
+/* Get Link Topology Pin (direct, 0x06E1) */
+struct ixgbe_aci_cmd_get_link_topo_pin {
+	struct ixgbe_aci_cmd_link_topo_addr addr;
+	u8 input_io_params;
+#define IXGBE_ACI_LINK_TOPO_INPUT_IO_FUNC_S	0
+#define IXGBE_ACI_LINK_TOPO_INPUT_IO_FUNC_M	\
+				(0x1F << IXGBE_ACI_LINK_TOPO_INPUT_IO_FUNC_S)
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_GPIO	0
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RESET_N	1
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_INT_N	2
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_PRESENT_N	3
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_TX_DIS	4
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_MODSEL_N	5
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_LPMODE	6
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_TX_FAULT	7
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RX_LOSS	8
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RS0		9
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RS1		10
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_EEPROM_WP	11
+/* 12 repeats intentionally due to two different uses depending on context */
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_LED		12
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RED_LED	12
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_GREEN_LED	13
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_BLUE_LED	14
+#define IXGBE_ACI_LINK_TOPO_INPUT_IO_TYPE_S	5
+#define IXGBE_ACI_LINK_TOPO_INPUT_IO_TYPE_M	\
+			(0x7 << IXGBE_ACI_LINK_TOPO_INPUT_IO_TYPE_S)
+#define IXGBE_ACI_LINK_TOPO_INPUT_IO_TYPE_GPIO	3
+/* Use IXGBE_ACI_LINK_TOPO_NODE_TYPE_* for the type values */
+	u8 output_io_params;
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_IO_FUNC_S	0
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_IO_FUNC_M	\
+			(0x1F << \ IXGBE_ACI_LINK_TOPO_INPUT_IO_FUNC_NUM_S)
+/* Use IXGBE_ACI_LINK_TOPO_IO_FUNC_* for the non-numerical options */
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_IO_TYPE_S	5
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_IO_TYPE_M	\
+			(0x7 << IXGBE_ACI_LINK_TOPO_INPUT_IO_TYPE_S)
+/* Use IXGBE_ACI_LINK_TOPO_NODE_TYPE_* for the type values */
+	u8 output_io_flags;
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_SPEED_S	0
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_SPEED_M	\
+			(0x7 << IXGBE_ACI_LINK_TOPO_OUTPUT_SPEED_S)
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_INT_S	3
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_INT_M	\
+			(0x3 << IXGBE_ACI_LINK_TOPO_OUTPUT_INT_S)
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_POLARITY	BIT(5)
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_VALUE	BIT(6)
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_DRIVEN	BIT(7)
+	u8 rsvd[7];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_link_topo_pin);
+/* Read/Write I2C (direct, 0x06E2/0x06E3) */
+struct ixgbe_aci_cmd_i2c {
+	struct ixgbe_aci_cmd_link_topo_addr topo_addr;
+	__le16 i2c_addr;
+	u8 i2c_params;
+#define IXGBE_ACI_I2C_DATA_SIZE_S		0
+#define IXGBE_ACI_I2C_DATA_SIZE_M		(0xF << IXGBE_ACI_I2C_DATA_SIZE_S)
+#define IXGBE_ACI_I2C_ADDR_TYPE_M		BIT(4)
+#define IXGBE_ACI_I2C_ADDR_TYPE_7BIT		0
+#define IXGBE_ACI_I2C_ADDR_TYPE_10BIT		IXGBE_ACI_I2C_ADDR_TYPE_M
+#define IXGBE_ACI_I2C_DATA_OFFSET_S		5
+#define IXGBE_ACI_I2C_DATA_OFFSET_M		(0x3 << IXGBE_ACI_I2C_DATA_OFFSET_S)
+#define IXGBE_ACI_I2C_USE_REPEATED_START	BIT(7)
+	u8 rsvd;
+	__le16 i2c_bus_addr;
+#define IXGBE_ACI_I2C_ADDR_7BIT_MASK		0x7F
+#define IXGBE_ACI_I2C_ADDR_10BIT_MASK		0x3FF
+	u8 i2c_data[4]; /* Used only by write command, reserved in read. */
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_i2c);
+
+/* Read I2C Response (direct, 0x06E2) */
+struct ixgbe_aci_cmd_read_i2c_resp {
+	u8 i2c_data[16];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_read_i2c_resp);
+
+/* Read/Write MDIO (direct, 0x06E4/0x06E5) */
+struct ixgbe_aci_cmd_mdio {
+	struct ixgbe_aci_cmd_link_topo_addr topo_addr;
+	u8 mdio_device_addr;
+#define IXGBE_ACI_MDIO_DEV_S		0
+#define IXGBE_ACI_MDIO_DEV_M		(0x1F << IXGBE_ACI_MDIO_DEV_S)
+#define IXGBE_ACI_MDIO_CLAUSE_22	BIT(5)
+#define IXGBE_ACI_MDIO_CLAUSE_45	BIT(6)
+	u8 mdio_bus_address;
+#define IXGBE_ACI_MDIO_BUS_ADDR_S 0
+#define IXGBE_ACI_MDIO_BUS_ADDR_M (0x1F << IXGBE_ACI_MDIO_BUS_ADDR_S)
+	__le16 offset;
+	__le16 data; /* Input in write cmd, output in read cmd. */
+	u8 rsvd1[4];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_mdio);
+
+/* Set/Get GPIO By Function (direct, 0x06E6/0x06E7) */
+struct ixgbe_aci_cmd_gpio_by_func {
+	struct ixgbe_aci_cmd_link_topo_addr topo_addr;
+	u8 io_func_num;
+#define IXGBE_ACI_GPIO_FUNC_S	0
+#define IXGBE_ACI_GPIO_FUNC_M	(0x1F << IXGBE_ACI_GPIO_IO_FUNC_NUM_S)
+	u8 io_value; /* Input in write cmd, output in read cmd. */
+#define IXGBE_ACI_GPIO_ON	BIT(0)
+#define IXGBE_ACI_GPIO_OFF	0
+	u8 rsvd[8];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_gpio_by_func);
+
+/* Set/Get GPIO (direct, 0x06EC/0x06ED) */
+struct ixgbe_aci_cmd_gpio {
+	__le16 gpio_ctrl_handle;
+#define IXGBE_ACI_GPIO_HANDLE_S	0
+#define IXGBE_ACI_GPIO_HANDLE_M	(0x3FF << IXGBE_ACI_GPIO_HANDLE_S)
+	u8 gpio_num;
+	u8 gpio_val;
+	u8 rsvd[12];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_gpio);
+
+/* Read/Write SFF EEPROM command (indirect 0x06EE) */
+struct ixgbe_aci_cmd_sff_eeprom {
+	u8 lport_num;
+	u8 lport_num_valid;
+#define IXGBE_ACI_SFF_PORT_NUM_VALID		BIT(0)
+	__le16 i2c_bus_addr;
+#define IXGBE_ACI_SFF_I2CBUS_7BIT_M		0x7F
+#define IXGBE_ACI_SFF_I2CBUS_10BIT_M		0x3FF
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_M		BIT(10)
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_7BIT		0
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_10BIT		IXGBE_ACI_SFF_I2CBUS_TYPE_M
+#define IXGBE_ACI_SFF_PAGE_BANK_CTRL_S		11
+#define IXGBE_ACI_SFF_PAGE_BANK_CTRL_M		(0x3 << IXGBE_ACI_SFF_PAGE_BANK_CTRL_S)
+#define IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE	0
+#define IXGBE_ACI_SFF_UPDATE_PAGE		1
+#define IXGBE_ACI_SFF_UPDATE_BANK		2
+#define IXGBE_ACI_SFF_UPDATE_PAGE_BANK		3
+#define IXGBE_ACI_SFF_IS_WRITE			BIT(15)
+	__le16 i2c_offset;
+	u8 module_bank;
+	u8 module_page;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_sff_eeprom);
+
+/* Program Topology Device NVM (direct, 0x06F2) */
+struct ixgbe_aci_cmd_prog_topo_dev_nvm {
+	struct ixgbe_aci_cmd_link_topo_params topo_params;
+	u8 rsvd[12];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_prog_topo_dev_nvm);
+
+/* Read Topology Device NVM (direct, 0x06F3) */
+struct ixgbe_aci_cmd_read_topo_dev_nvm {
+	struct ixgbe_aci_cmd_link_topo_params topo_params;
+	__le32 start_address;
+#define IXGBE_ACI_READ_TOPO_DEV_NVM_DATA_READ_SIZE 8
+	u8 data_read[IXGBE_ACI_READ_TOPO_DEV_NVM_DATA_READ_SIZE];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_read_topo_dev_nvm);
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Write commands (indirect 0x0703)
+ * NVM Write Activate commands (direct 0x0707)
+ * NVM Shadow RAM Dump commands (direct 0x0707)
+ */
+struct ixgbe_aci_cmd_nvm {
+#define IXGBE_ACI_NVM_MAX_OFFSET	0xFFFFFF
+	__le16 offset_low;
+	u8 offset_high; /* For Write Activate offset_high is used as flags2 */
+	u8 cmd_flags;
+#define IXGBE_ACI_NVM_LAST_CMD		BIT(0)
+#define IXGBE_ACI_NVM_PCIR_REQ		BIT(0)	/* Used by NVM Write reply */
+#define IXGBE_ACI_NVM_PRESERVATION_S	1 /* Used by NVM Write Activate only */
+#define IXGBE_ACI_NVM_PRESERVATION_M	(3 << IXGBE_ACI_NVM_PRESERVATION_S)
+#define IXGBE_ACI_NVM_NO_PRESERVATION	(0 << IXGBE_ACI_NVM_PRESERVATION_S)
+#define IXGBE_ACI_NVM_PRESERVE_ALL	BIT(1)
+#define IXGBE_ACI_NVM_FACTORY_DEFAULT	(2 << IXGBE_ACI_NVM_PRESERVATION_S)
+#define IXGBE_ACI_NVM_PRESERVE_SELECTED	(3 << IXGBE_ACI_NVM_PRESERVATION_S)
+#define IXGBE_ACI_NVM_ACTIV_SEL_NVM	BIT(3) /* Write Activate/SR Dump only */
+#define IXGBE_ACI_NVM_ACTIV_SEL_OROM	BIT(4)
+#define IXGBE_ACI_NVM_ACTIV_SEL_NETLIST	BIT(5)
+#define IXGBE_ACI_NVM_SPECIAL_UPDATE	BIT(6)
+#define IXGBE_ACI_NVM_REVERT_LAST_ACTIV	BIT(6) /* Write Activate only */
+#define IXGBE_ACI_NVM_ACTIV_SEL_MASK	MAKEMASK(0x7, 3)
+#define IXGBE_ACI_NVM_FLASH_ONLY		BIT(7)
+#define IXGBE_ACI_NVM_RESET_LVL_M		MAKEMASK(0x3, 0) /* Write reply only */
+#define IXGBE_ACI_NVM_POR_FLAG		0
+#define IXGBE_ACI_NVM_PERST_FLAG	1
+#define IXGBE_ACI_NVM_EMPR_FLAG		2
+#define IXGBE_ACI_NVM_EMPR_ENA		BIT(0) /* Write Activate reply only */
+	/* For Write Activate, several flags are sent as part of a separate
+	 * flags2 field using a separate byte. For simplicity of the software
+	 * interface, we pass the flags as a 16 bit value so these flags are
+	 * all offset by 8 bits
+	 */
+#define IXGBE_ACI_NVM_ACTIV_REQ_EMPR	BIT(8) /* NVM Write Activate only */
+	__le16 module_typeid;
+	__le16 length;
+#define IXGBE_ACI_NVM_ERASE_LEN	0xFFFF
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+/* NVM Module_Type ID, needed offset and read_len for struct ixgbe_aci_cmd_nvm. */
+#define IXGBE_ACI_NVM_SECTOR_UNIT		4096 /* In Bytes */
+#define IXGBE_ACI_NVM_WORD_UNIT			2 /* In Bytes */
+
+#define IXGBE_ACI_NVM_START_POINT		0
+#define IXGBE_ACI_NVM_EMP_SR_PTR_OFFSET		0x90
+#define IXGBE_ACI_NVM_EMP_SR_PTR_RD_LEN		2 /* In Bytes */
+#define IXGBE_ACI_NVM_EMP_SR_PTR_M		MAKEMASK(0x7FFF, 0)
+#define IXGBE_ACI_NVM_EMP_SR_PTR_TYPE_S		15
+#define IXGBE_ACI_NVM_EMP_SR_PTR_TYPE_M		BIT(15)
+#define IXGBE_ACI_NVM_EMP_SR_PTR_TYPE_SECTOR	1
+
+#define IXGBE_ACI_NVM_LLDP_CFG_PTR_OFFSET	0x46
+#define IXGBE_ACI_NVM_LLDP_CFG_HEADER_LEN	2 /* In Bytes */
+#define IXGBE_ACI_NVM_LLDP_CFG_PTR_RD_LEN	2 /* In Bytes */
+
+#define IXGBE_ACI_NVM_LLDP_PRESERVED_MOD_ID		0x129
+#define IXGBE_ACI_NVM_CUR_LLDP_PERSIST_RD_OFFSET	2 /* In Bytes */
+#define IXGBE_ACI_NVM_LLDP_STATUS_M			MAKEMASK(0xF, 0)
+#define IXGBE_ACI_NVM_LLDP_STATUS_M_LEN			4 /* In Bits */
+#define IXGBE_ACI_NVM_LLDP_STATUS_RD_LEN		4 /* In Bytes */
+
+#define IXGBE_ACI_NVM_MINSREV_MOD_ID		0x130
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_nvm);
+
+/* Used for reading and writing MinSRev using 0x0701 and 0x0703. Note that the
+ * type field is excluded from the section when reading and writing from
+ * a module using the module_typeid field with these AQ commands.
+ */
+struct ixgbe_aci_cmd_nvm_minsrev {
+	__le16 length;
+	__le16 validity;
+#define IXGBE_ACI_NVM_MINSREV_NVM_VALID		BIT(0)
+#define IXGBE_ACI_NVM_MINSREV_OROM_VALID	BIT(1)
+	__le16 nvm_minsrev_l;
+	__le16 nvm_minsrev_h;
+	__le16 orom_minsrev_l;
+	__le16 orom_minsrev_h;
+};
+
+IXGBE_CHECK_STRUCT_LEN(12, ixgbe_aci_cmd_nvm_minsrev);
+
+/* Used for 0x0704 as well as for 0x0705 commands */
+struct ixgbe_aci_cmd_nvm_cfg {
+	u8	cmd_flags;
+#define IXGBE_ACI_ANVM_MULTIPLE_ELEMS	BIT(0)
+#define IXGBE_ACI_ANVM_IMMEDIATE_FIELD	BIT(1)
+#define IXGBE_ACI_ANVM_NEW_CFG		BIT(2)
+	u8	reserved;
+	__le16 count;
+	__le16 id;
+	u8 reserved1[2];
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_nvm_cfg);
+
+struct ixgbe_aci_cmd_nvm_cfg_data {
+	__le16 field_id;
+	__le16 field_options;
+	__le16 field_value;
+};
+
+IXGBE_CHECK_STRUCT_LEN(6, ixgbe_aci_cmd_nvm_cfg_data);
+
+/* NVM Checksum Command (direct, 0x0706) */
+struct ixgbe_aci_cmd_nvm_checksum {
+	u8 flags;
+#define IXGBE_ACI_NVM_CHECKSUM_VERIFY	BIT(0)
+#define IXGBE_ACI_NVM_CHECKSUM_RECALC	BIT(1)
+	u8 rsvd;
+	__le16 checksum; /* Used only by response */
+#define IXGBE_ACI_NVM_CHECKSUM_CORRECT	0xBABA
+	u8 rsvd2[12];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_nvm_checksum);
+
+/* Used for NVM Sanitization command - 0x070C */
+struct ixgbe_aci_cmd_nvm_sanitization {
+	u8 cmd_flags;
+#define IXGBE_ACI_SANITIZE_REQ_READ			0
+#define IXGBE_ACI_SANITIZE_REQ_OPERATE			BIT(0)
+
+#define IXGBE_ACI_SANITIZE_READ_SUBJECT_NVM_BITS	0
+#define IXGBE_ACI_SANITIZE_READ_SUBJECT_NVM_STATE	BIT(1)
+#define IXGBE_ACI_SANITIZE_OPERATE_SUBJECT_CLEAR	0
+	u8 values;
+#define IXGBE_ACI_SANITIZE_NVM_BITS_HOST_CLEAN_SUPPORT	BIT(0)
+#define IXGBE_ACI_SANITIZE_NVM_BITS_BMC_CLEAN_SUPPORT	BIT(2)
+#define IXGBE_ACI_SANITIZE_NVM_STATE_HOST_CLEAN_DONE	BIT(0)
+#define IXGBE_ACI_SANITIZE_NVM_STATE_HOST_CLEAN_SUCCESS	BIT(1)
+#define IXGBE_ACI_SANITIZE_NVM_STATE_BMC_CLEAN_DONE	BIT(2)
+#define IXGBE_ACI_SANITIZE_NVM_STATE_BMC_CLEAN_SUCCESS	BIT(3)
+#define IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_DONE	BIT(0)
+#define IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_SUCCESS	BIT(1)
+#define IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_DONE	BIT(2)
+#define IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_SUCCESS	BIT(3)
+	u8 reserved[14];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_nvm_sanitization);
+
+/* Write/Read Alternate - Direct (direct 0x0900/0x0902) */
+struct ixgbe_aci_cmd_read_write_alt_direct {
+	__le32 dword0_addr;
+	__le32 dword0_value;
+	__le32 dword1_addr;
+	__le32 dword1_value;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_read_write_alt_direct);
+
+/* Write/Read Alternate - Indirect (indirect 0x0901/0x0903) */
+struct ixgbe_aci_cmd_read_write_alt_indirect {
+	__le32 base_dword_addr;
+	__le32 num_dwords;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_read_write_alt_indirect);
+
+/* Done Alternate Write (direct 0x0904) */
+struct ixgbe_aci_cmd_done_alt_write {
+	u8 flags;
+#define IXGBE_ACI_CMD_UEFI_BIOS_MODE	BIT(0)
+#define IXGBE_ACI_RESP_RESET_NEEDED	BIT(1)
+	u8 reserved[15];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_done_alt_write);
+
+/* Clear Port Alternate Write (direct 0x0906) */
+struct ixgbe_aci_cmd_clear_port_alt_write {
+	u8 reserved[16];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_clear_port_alt_write);
+
+/* Get CGU abilities command response data structure (indirect 0x0C61) */
+struct ixgbe_aci_cmd_get_cgu_abilities {
+	u8 num_inputs;
+	u8 num_outputs;
+	u8 pps_dpll_idx;
+	u8 synce_dpll_idx;
+	__le32 max_in_freq;
+	__le32 max_in_phase_adj;
+	__le32 max_out_freq;
+	__le32 max_out_phase_adj;
+	u8 cgu_part_num;
+	u8 rsvd[3];
+};
+
+IXGBE_CHECK_STRUCT_LEN(24, ixgbe_aci_cmd_get_cgu_abilities);
+
+#define IXGBE_ACI_NODE_HANDLE_VALID	BIT(10)
+#define IXGBE_ACI_NODE_HANDLE		MAKEMASK(0x3FF, 0)
+#define IXGBE_ACI_DRIVING_CLK_NUM_SHIFT	10
+#define IXGBE_ACI_DRIVING_CLK_NUM	MAKEMASK(0x3F, IXGBE_ACI_DRIVING_CLK_NUM_SHIFT)
+
+/* Set CGU input config (direct 0x0C62) */
+struct ixgbe_aci_cmd_set_cgu_input_config {
+	u8 input_idx;
+	u8 flags1;
+#define IXGBE_ACI_SET_CGU_IN_CFG_FLG1_UPDATE_FREQ	BIT(6)
+#define IXGBE_ACI_SET_CGU_IN_CFG_FLG1_UPDATE_DELAY	BIT(7)
+	u8 flags2;
+#define IXGBE_ACI_SET_CGU_IN_CFG_FLG2_INPUT_EN		BIT(5)
+#define IXGBE_ACI_SET_CGU_IN_CFG_FLG2_ESYNC_EN		BIT(6)
+	u8 rsvd;
+	__le32 freq;
+	__le32 phase_delay;
+	u8 rsvd2[2];
+	__le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_cgu_input_config);
+
+/* Get CGU input config response descriptor structure (direct 0x0C63) */
+struct ixgbe_aci_cmd_get_cgu_input_config {
+	u8 input_idx;
+	u8 status;
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_LOS		BIT(0)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_SCM_FAIL	BIT(1)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_CFM_FAIL	BIT(2)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_GST_FAIL	BIT(3)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_PFM_FAIL	BIT(4)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_ESYNC_FAIL	BIT(6)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_ESYNC_CAP	BIT(7)
+	u8 type;
+#define IXGBE_ACI_GET_CGU_IN_CFG_TYPE_READ_ONLY		BIT(0)
+#define IXGBE_ACI_GET_CGU_IN_CFG_TYPE_GPS		BIT(4)
+#define IXGBE_ACI_GET_CGU_IN_CFG_TYPE_EXTERNAL		BIT(5)
+#define IXGBE_ACI_GET_CGU_IN_CFG_TYPE_PHY		BIT(6)
+	u8 flags1;
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG1_PHASE_DELAY_SUPP	BIT(0)
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG1_1PPS_SUPP		BIT(2)
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG1_10MHZ_SUPP	BIT(3)
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG1_ANYFREQ		BIT(7)
+	__le32 freq;
+	__le32 phase_delay;
+	u8 flags2;
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG2_INPUT_EN		BIT(5)
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG2_ESYNC_EN		BIT(6)
+	u8 rsvd[1];
+	__le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_input_config);
+
+/* Set CGU output config (direct 0x0C64) */
+struct ixgbe_aci_cmd_set_cgu_output_config {
+	u8 output_idx;
+	u8 flags;
+#define IXGBE_ACI_SET_CGU_OUT_CFG_OUT_EN		BIT(0)
+#define IXGBE_ACI_SET_CGU_OUT_CFG_ESYNC_EN		BIT(1)
+#define IXGBE_ACI_SET_CGU_OUT_CFG_UPDATE_FREQ		BIT(2)
+#define IXGBE_ACI_SET_CGU_OUT_CFG_UPDATE_PHASE		BIT(3)
+#define IXGBE_ACI_SET_CGU_OUT_CFG_UPDATE_SRC_SEL	BIT(4)
+	u8 src_sel;
+#define IXGBE_ACI_SET_CGU_OUT_CFG_DPLL_SRC_SEL		MAKEMASK(0x1F, 0)
+	u8 rsvd;
+	__le32 freq;
+	__le32 phase_delay;
+	u8 rsvd2[2];
+	__le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_cgu_output_config);
+
+/* Get CGU output config (direct 0x0C65) */
+struct ixgbe_aci_cmd_get_cgu_output_config {
+	u8 output_idx;
+	u8 flags;
+#define IXGBE_ACI_GET_CGU_OUT_CFG_OUT_EN		BIT(0)
+#define IXGBE_ACI_GET_CGU_OUT_CFG_ESYNC_EN		BIT(1)
+#define IXGBE_ACI_GET_CGU_OUT_CFG_ESYNC_ABILITY		BIT(2)
+	u8 src_sel;
+#define IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_SRC_SEL_SHIFT	0
+#define IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_SRC_SEL \
+	MAKEMASK(0x1F, IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_SRC_SEL_SHIFT)
+#define IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_MODE_SHIFT	5
+#define IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_MODE \
+	MAKEMASK(0x7, IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_MODE_SHIFT)
+	u8 rsvd;
+	__le32 freq;
+	__le32 src_freq;
+	u8 rsvd2[2];
+	__le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_output_config);
+
+/* Get CGU DPLL status (direct 0x0C66) */
+struct ixgbe_aci_cmd_get_cgu_dpll_status {
+	u8 dpll_num;
+	u8 ref_state;
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_LOS		BIT(0)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_SCM		BIT(1)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_CFM		BIT(2)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_GST		BIT(3)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_PFM		BIT(4)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_FAST_LOCK_EN		BIT(5)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_ESYNC		BIT(6)
+	__le16 dpll_state;
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_LOCK		BIT(0)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_HO			BIT(1)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_HO_READY		BIT(2)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_FLHIT		BIT(5)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_PSLHIT		BIT(7)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_CLK_REF_SHIFT	8
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_CLK_REF_SEL		\
+	MAKEMASK(0x1F, IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_CLK_REF_SHIFT)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_MODE_SHIFT		13
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_MODE 		\
+	MAKEMASK(0x7, IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_MODE_SHIFT)
+	__le32 phase_offset_h;
+	__le32 phase_offset_l;
+	u8 eec_mode;
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_EEC_MODE_1		0xA
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_EEC_MODE_2		0xB
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_EEC_MODE_UNKNOWN		0xF
+	u8 rsvd[1];
+	__le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_dpll_status);
+
+/* Set CGU DPLL config (direct 0x0C67) */
+struct ixgbe_aci_cmd_set_cgu_dpll_config {
+	u8 dpll_num;
+	u8 ref_state;
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_LOS	BIT(0)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_SCM	BIT(1)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_CFM	BIT(2)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_GST	BIT(3)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_PFM	BIT(4)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_FLOCK_EN	BIT(5)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_ESYNC	BIT(6)
+	u8 rsvd;
+	u8 config;
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_CLK_REF_SEL	MAKEMASK(0x1F, 0)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_MODE		MAKEMASK(0x7, 5)
+	u8 rsvd2[8];
+	u8 eec_mode;
+	u8 rsvd3[1];
+	__le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_cgu_dpll_config);
+
+/* Set CGU reference priority (direct 0x0C68) */
+struct ixgbe_aci_cmd_set_cgu_ref_prio {
+	u8 dpll_num;
+	u8 ref_idx;
+	u8 ref_priority;
+	u8 rsvd[11];
+	__le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_cgu_ref_prio);
+
+/* Get CGU reference priority (direct 0x0C69) */
+struct ixgbe_aci_cmd_get_cgu_ref_prio {
+	u8 dpll_num;
+	u8 ref_idx;
+	u8 ref_priority; /* Valid only in response */
+	u8 rsvd[13];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_ref_prio);
+
+/* Get CGU info (direct 0x0C6A) */
+struct ixgbe_aci_cmd_get_cgu_info {
+	__le32 cgu_id;
+	__le32 cgu_cfg_ver;
+	__le32 cgu_fw_ver;
+	u8 node_part_num;
+	u8 dev_rev;
+	__le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_info);
+
+/* Debug Dump Internal Data (indirect 0xFF08) */
+struct ixgbe_aci_cmd_debug_dump_internals {
+	__le16 cluster_id; /* Expresses next cluster ID in response */
+#define IXGBE_ACI_DBG_DUMP_CLUSTER_ID_LINK		0
+#define IXGBE_ACI_DBG_DUMP_CLUSTER_ID_FULL_CSR_SPACE	1
+	__le16 table_id; /* Used only for non-memory clusters */
+	__le32 idx; /* In table entries for tables, in bytes for memory */
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_debug_dump_internals);
+
+/* Set Health Status (direct 0xFF20) */
+struct ixgbe_aci_cmd_set_health_status_config {
+	u8 event_source;
+#define IXGBE_ACI_HEALTH_STATUS_SET_PF_SPECIFIC_MASK	BIT(0)
+#define IXGBE_ACI_HEALTH_STATUS_SET_ALL_PF_MASK		BIT(1)
+#define IXGBE_ACI_HEALTH_STATUS_SET_GLOBAL_MASK		BIT(2)
+	u8 reserved[15];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_health_status_config);
+
+#define IXGBE_ACI_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT		0x101
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_TYPE			0x102
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_QUAL			0x103
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_COMM			0x104
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_CONFLICT		0x105
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_NOT_PRESENT		0x106
+#define IXGBE_ACI_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED		0x107
+#define IXGBE_ACI_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT		0x108
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_DIAGNOSTIC_FEATURE	0x109
+#define IXGBE_ACI_HEALTH_STATUS_ERR_INVALID_LINK_CFG		0x10B
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PORT_ACCESS			0x10C
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PORT_UNREACHABLE		0x10D
+#define IXGBE_ACI_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED	0x10F
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PARALLEL_FAULT		0x110
+#define IXGBE_ACI_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED	0x111
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NETLIST_TOPO		0x112
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NETLIST			0x113
+#define IXGBE_ACI_HEALTH_STATUS_ERR_TOPO_CONFLICT		0x114
+#define IXGBE_ACI_HEALTH_STATUS_ERR_LINK_HW_ACCESS		0x115
+#define IXGBE_ACI_HEALTH_STATUS_ERR_LINK_RUNTIME		0x116
+#define IXGBE_ACI_HEALTH_STATUS_ERR_DNL_INIT			0x117
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PHY_NVM_PROG		0x120
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PHY_FW_LOAD			0x121
+#define IXGBE_ACI_HEALTH_STATUS_INFO_RECOVERY			0x500
+#define IXGBE_ACI_HEALTH_STATUS_ERR_FLASH_ACCESS		0x501
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NVM_AUTH			0x502
+#define IXGBE_ACI_HEALTH_STATUS_ERR_OROM_AUTH			0x503
+#define IXGBE_ACI_HEALTH_STATUS_ERR_DDP_AUTH			0x504
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NVM_COMPAT			0x505
+#define IXGBE_ACI_HEALTH_STATUS_ERR_OROM_COMPAT			0x506
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NVM_SEC_VIOLATION		0x507
+#define IXGBE_ACI_HEALTH_STATUS_ERR_OROM_SEC_VIOLATION		0x508
+#define IXGBE_ACI_HEALTH_STATUS_ERR_DCB_MIB			0x509
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MNG_TIMEOUT			0x50A
+#define IXGBE_ACI_HEALTH_STATUS_ERR_BMC_RESET			0x50B
+#define IXGBE_ACI_HEALTH_STATUS_ERR_LAST_MNG_FAIL		0x50C
+#define IXGBE_ACI_HEALTH_STATUS_ERR_RESOURCE_ALLOC_FAIL		0x50D
+#define IXGBE_ACI_HEALTH_STATUS_ERR_FW_LOOP			0x1000
+#define IXGBE_ACI_HEALTH_STATUS_ERR_FW_PFR_FAIL			0x1001
+#define IXGBE_ACI_HEALTH_STATUS_ERR_LAST_FAIL_AQ		0x1002
+
+/* Get Health Status codes (indirect 0xFF21) */
+struct ixgbe_aci_cmd_get_supported_health_status_codes {
+	__le16 health_code_count;
+	u8 reserved[6];
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_supported_health_status_codes);
+
+/* Get Health Status (indirect 0xFF22) */
+struct ixgbe_aci_cmd_get_health_status {
+	__le16 health_status_count;
+	u8 reserved[6];
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_health_status);
+
+/* Get Health Status event buffer entry, (0xFF22)
+ * repeated per reported health status
+ */
+struct ixgbe_aci_cmd_health_status_elem {
+	__le16 health_status_code;
+	__le16 event_source;
+#define IXGBE_ACI_HEALTH_STATUS_PF		(0x1)
+#define IXGBE_ACI_HEALTH_STATUS_PORT		(0x2)
+#define IXGBE_ACI_HEALTH_STATUS_GLOBAL		(0x3)
+	__le32 internal_data1;
+#define IXGBE_ACI_HEALTH_STATUS_UNDEFINED_DATA	(0xDEADBEEF)
+	__le32 internal_data2;
+};
+
+IXGBE_CHECK_STRUCT_LEN(12, ixgbe_aci_cmd_health_status_elem);
+
+/* Clear Health Status (direct 0xFF23) */
+struct ixgbe_aci_cmd_clear_health_status {
+	__le32 reserved[4];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_clear_health_status);
+
+/**
+ * struct ixgbe_aq_desc - Admin Command (AC) descriptor
+ * @flags: IXGBE_ACI_FLAG_* flags
+ * @opcode: Admin command opcode
+ * @datalen: length in bytes of indirect/external data buffer
+ * @retval: return value from firmware
+ * @cookie_high: opaque data high-half
+ * @cookie_low: opaque data low-half
+ * @params: command-specific parameters
+ *
+ * Descriptor format for commands the driver posts via the Admin Command Interface
+ * (ACI). The firmware writes back onto the command descriptor and returns
+ * the result of the command. Asynchronous events that are not an immediate
+ * result of the command are written to the Admin Command Interface (ACI) using
+ * the same descriptor format. Descriptors are in little-endian notation with
+ * 32-bit words.
+ */
+struct ixgbe_aci_desc {
+	__le16 flags;
+	__le16 opcode;
+	__le16 datalen;
+	__le16 retval;
+	__le32 cookie_high;
+	__le32 cookie_low;
+	union {
+		u8 raw[16];
+		struct ixgbe_aci_cmd_generic generic;
+		struct ixgbe_aci_cmd_get_ver get_ver;
+		struct ixgbe_aci_cmd_driver_ver driver_ver;
+		struct ixgbe_aci_cmd_get_exp_err exp_err;
+		struct ixgbe_aci_cmd_req_res res_owner;
+		struct ixgbe_aci_cmd_list_caps get_cap;
+		struct ixgbe_aci_cmd_disable_rxen disable_rxen;
+		struct ixgbe_aci_cmd_get_fw_event get_fw_event;
+		struct ixgbe_aci_cmd_get_phy_caps get_phy;
+		struct ixgbe_aci_cmd_set_phy_cfg set_phy;
+		struct ixgbe_aci_cmd_restart_an restart_an;
+		struct ixgbe_aci_cmd_get_link_status get_link_status;
+		struct ixgbe_aci_cmd_set_event_mask set_event_mask;
+		struct ixgbe_aci_cmd_get_link_topo get_link_topo;
+		struct ixgbe_aci_cmd_get_link_topo_pin get_link_topo_pin;
+		struct ixgbe_aci_cmd_i2c read_write_i2c;
+		struct ixgbe_aci_cmd_read_i2c_resp read_i2c_resp;
+		struct ixgbe_aci_cmd_mdio read_write_mdio;
+		struct ixgbe_aci_cmd_mdio read_mdio;
+		struct ixgbe_aci_cmd_mdio write_mdio;
+		struct ixgbe_aci_cmd_gpio_by_func read_write_gpio_by_func;
+		struct ixgbe_aci_cmd_gpio read_write_gpio;
+		struct ixgbe_aci_cmd_sff_eeprom read_write_sff_param;
+		struct ixgbe_aci_cmd_prog_topo_dev_nvm prog_topo_dev_nvm;
+		struct ixgbe_aci_cmd_read_topo_dev_nvm read_topo_dev_nvm;
+		struct ixgbe_aci_cmd_nvm nvm;
+		struct ixgbe_aci_cmd_nvm_cfg nvm_cfg;
+		struct ixgbe_aci_cmd_nvm_checksum nvm_checksum;
+		struct ixgbe_aci_cmd_read_write_alt_direct read_write_alt_direct;
+		struct ixgbe_aci_cmd_read_write_alt_indirect read_write_alt_indirect;
+		struct ixgbe_aci_cmd_done_alt_write done_alt_write;
+		struct ixgbe_aci_cmd_clear_port_alt_write clear_port_alt_write;
+		struct ixgbe_aci_cmd_debug_dump_internals debug_dump;
+		struct ixgbe_aci_cmd_set_health_status_config
+			set_health_status_config;
+		struct ixgbe_aci_cmd_get_supported_health_status_codes
+			get_supported_health_status_codes;
+		struct ixgbe_aci_cmd_get_health_status get_health_status;
+		struct ixgbe_aci_cmd_clear_health_status clear_health_status;
+		struct ixgbe_aci_cmd_nvm_sanitization nvm_sanitization;
+	} params;
+};
+
+/* LKV-specific adapter context structures */
+
+struct ixgbe_link_status {
+	/* Refer to ixgbe_aci_phy_type for bits definition */
+	u64 phy_type_low;
+	u64 phy_type_high;
+	u8 topo_media_conflict;
+	u16 max_frame_size;
+	u16 link_speed;
+	u16 req_speeds;
+	u8 link_cfg_err;
+	u8 lse_ena;	/* Link Status Event notification */
+	u8 link_info;
+	u8 an_info;
+	u8 ext_info;
+	u8 fec_info;
+	u8 pacing;
+	/* Refer to #define from module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE] of
+	 * ixgbe_aci_get_phy_caps structure
+	 */
+	u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE];
+};
+
+/* Common HW capabilities for SW use */
+struct ixgbe_hw_common_caps {
+	/* Write CSR protection */
+	u64 wr_csr_prot;
+	u32 switching_mode;
+	/* switching mode supported - EVB switching (including cloud) */
+#define IXGBE_NVM_IMAGE_TYPE_EVB		0x0
+
+	/* Manageability mode & supported protocols over MCTP */
+	u32 mgmt_mode;
+#define IXGBE_MGMT_MODE_PASS_THRU_MODE_M	0xF
+#define IXGBE_MGMT_MODE_CTL_INTERFACE_M		0xF0
+#define IXGBE_MGMT_MODE_REDIR_SB_INTERFACE_M	0xF00
+
+	u32 mgmt_protocols_mctp;
+#define IXGBE_MGMT_MODE_PROTO_RSVD	BIT(0)
+#define IXGBE_MGMT_MODE_PROTO_PLDM	BIT(1)
+#define IXGBE_MGMT_MODE_PROTO_OEM	BIT(2)
+#define IXGBE_MGMT_MODE_PROTO_NC_SI	BIT(3)
+
+	u32 os2bmc;
+	u32 valid_functions;
+	/* DCB capabilities */
+	u32 active_tc_bitmap;
+	u32 maxtc;
+
+	/* RSS related capabilities */
+	u32 rss_table_size;		/* 512 for PFs and 64 for VFs */
+	u32 rss_table_entry_width;	/* RSS Entry width in bits */
+
+	/* Tx/Rx queues */
+	u32 num_rxq;			/* Number/Total Rx queues */
+	u32 rxq_first_id;		/* First queue ID for Rx queues */
+	u32 num_txq;			/* Number/Total Tx queues */
+	u32 txq_first_id;		/* First queue ID for Tx queues */
+
+	/* MSI-X vectors */
+	u32 num_msix_vectors;
+	u32 msix_vector_first_id;
+
+	/* Max MTU for function or device */
+	u32 max_mtu;
+
+	/* WOL related */
+	u32 num_wol_proxy_fltr;
+	u32 wol_proxy_vsi_seid;
+
+	/* LED/SDP pin count */
+	u32 led_pin_num;
+	u32 sdp_pin_num;
+
+	/* LED/SDP - Supports up to 12 LED pins and 8 SDP signals */
+#define IXGBE_MAX_SUPPORTED_GPIO_LED	12
+#define IXGBE_MAX_SUPPORTED_GPIO_SDP	8
+	u8 led[IXGBE_MAX_SUPPORTED_GPIO_LED];
+	u8 sdp[IXGBE_MAX_SUPPORTED_GPIO_SDP];
+	/* VMDQ */
+	u8 vmdq;			/* VMDQ supported */
+
+	/* EVB capabilities */
+	u8 evb_802_1_qbg;		/* Edge Virtual Bridging */
+	u8 evb_802_1_qbh;		/* Bridge Port Extension */
+
+	u8 dcb;
+	u8 iscsi;
+	u8 ieee_1588;
+	u8 mgmt_cem;
+
+	/* WoL and APM support */
+#define IXGBE_WOL_SUPPORT_M		BIT(0)
+#define IXGBE_ACPI_PROG_MTHD_M		BIT(1)
+#define IXGBE_PROXY_SUPPORT_M		BIT(2)
+	u8 apm_wol_support;
+	u8 acpi_prog_mthd;
+	u8 proxy_support;
+	bool sec_rev_disabled;
+	bool update_disabled;
+	bool nvm_unified_update;
+	bool netlist_auth;
+#define IXGBE_NVM_MGMT_SEC_REV_DISABLED		BIT(0)
+#define IXGBE_NVM_MGMT_UPDATE_DISABLED		BIT(1)
+#define IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT	BIT(3)
+#define IXGBE_NVM_MGMT_NETLIST_AUTH_SUPPORT	BIT(5)
+	bool no_drop_policy_support;
+	/* PCIe reset avoidance */
+	bool pcie_reset_avoidance; /* false: not supported, true: supported */
+	/* Post update reset restriction */
+	bool reset_restrict_support; /* false: not supported, true: supported */
+
+	/* External topology device images within the NVM */
+#define IXGBE_EXT_TOPO_DEV_IMG_COUNT	4
+	u32 ext_topo_dev_img_ver_high[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+	u32 ext_topo_dev_img_ver_low[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+	u8 ext_topo_dev_img_part_num[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S	8
+#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M	\
+		MAKEMASK(0xFF, IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S)
+	bool ext_topo_dev_img_load_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN	BIT(0)
+	bool ext_topo_dev_img_prog_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_PROG_EN	BIT(1)
+	bool next_cluster_id_support;
+};
+
+/* IEEE 1588 TIME_SYNC specific info */
+/* Function specific definitions */
+#define IXGBE_TS_FUNC_ENA_M		BIT(0)
+#define IXGBE_TS_SRC_TMR_OWND_M		BIT(1)
+#define IXGBE_TS_TMR_ENA_M		BIT(2)
+#define IXGBE_TS_TMR_IDX_OWND_S		4
+#define IXGBE_TS_TMR_IDX_OWND_M		BIT(4)
+#define IXGBE_TS_CLK_FREQ_S		16
+#define IXGBE_TS_CLK_FREQ_M		MAKEMASK(0x7, IXGBE_TS_CLK_FREQ_S)
+#define IXGBE_TS_CLK_SRC_S		20
+#define IXGBE_TS_CLK_SRC_M		BIT(20)
+#define IXGBE_TS_TMR_IDX_ASSOC_S	24
+#define IXGBE_TS_TMR_IDX_ASSOC_M	BIT(24)
+
+/* TIME_REF clock rate specification */
+enum ixgbe_time_ref_freq {
+	IXGBE_TIME_REF_FREQ_25_000	= 0,
+	IXGBE_TIME_REF_FREQ_122_880	= 1,
+	IXGBE_TIME_REF_FREQ_125_000	= 2,
+	IXGBE_TIME_REF_FREQ_153_600	= 3,
+	IXGBE_TIME_REF_FREQ_156_250	= 4,
+	IXGBE_TIME_REF_FREQ_245_760	= 5,
+
+	NUM_IXGBE_TIME_REF_FREQ
+};
+
+struct ixgbe_ts_func_info {
+	/* Function specific info */
+	enum ixgbe_time_ref_freq time_ref;
+	u8 clk_freq;
+	u8 clk_src;
+	u8 tmr_index_assoc;
+	u8 ena;
+	u8 tmr_index_owned;
+	u8 src_tmr_owned;
+	u8 tmr_ena;
+};
+
+/* Device specific definitions */
+#define IXGBE_TS_TMR0_OWNR_M		0x7
+#define IXGBE_TS_TMR0_OWND_M		BIT(3)
+#define IXGBE_TS_TMR1_OWNR_S		4
+#define IXGBE_TS_TMR1_OWNR_M		MAKEMASK(0x7, IXGBE_TS_TMR1_OWNR_S)
+#define IXGBE_TS_TMR1_OWND_M		BIT(7)
+#define IXGBE_TS_DEV_ENA_M		BIT(24)
+#define IXGBE_TS_TMR0_ENA_M		BIT(25)
+#define IXGBE_TS_TMR1_ENA_M		BIT(26)
+
+struct ixgbe_ts_dev_info {
+	/* Device specific info */
+	u32 ena_ports;
+	u32 tmr_own_map;
+	u32 tmr0_owner;
+	u32 tmr1_owner;
+	u8 tmr0_owned;
+	u8 tmr1_owned;
+	u8 ena;
+	u8 tmr0_ena;
+	u8 tmr1_ena;
+};
+
+#pragma pack(1)
+struct ixgbe_orom_civd_info {
+	u8 signature[4];	/* Must match ASCII '$CIV' characters */
+	u8 checksum;		/* Simple modulo 256 sum of all structure bytes must equal 0 */
+	__le32 combo_ver;	/* Combo Image Version number */
+	u8 combo_name_len;	/* Length of the unicode combo image version string, max of 32 */
+	__le16 combo_name[32];	/* Unicode string representing the Combo Image version */
+};
+#pragma pack()
+
+/* Function specific capabilities */
+struct ixgbe_hw_func_caps {
+	struct ixgbe_hw_common_caps common_cap;
+	u32 guar_num_vsi;
+	struct ixgbe_ts_func_info ts_func_info;
+	bool no_drop_policy_ena;
+};
+
+/* Device wide capabilities */
+struct ixgbe_hw_dev_caps {
+	struct ixgbe_hw_common_caps common_cap;
+	u32 num_vsi_allocd_to_host;	/* Excluding EMP VSI */
+	u32 num_flow_director_fltr;	/* Number of FD filters available */
+	struct ixgbe_ts_dev_info ts_dev_info;
+	u32 num_funcs;
+};
+
+/* ACI event information */
+struct ixgbe_aci_event {
+	struct ixgbe_aci_desc desc;
+	u16 msg_len;
+	u16 buf_len;
+	u8 *msg_buf;
+};
+
+struct ixgbe_aci_info {
+	enum ixgbe_aci_err last_status;	/* last status of sent admin command */
+	struct ixgbe_lock lock;		/* admin command interface lock */
+};
+
+/* Minimum Security Revision information */
+struct ixgbe_minsrev_info {
+	u32 nvm;
+	u32 orom;
+	u8 nvm_valid : 1;
+	u8 orom_valid : 1;
+};
+
+/* Enumeration of which flash bank is desired to read from, either the active
+ * bank or the inactive bank. Used to abstract 1st and 2nd bank notion from
+ * code which just wants to read the active or inactive flash bank.
+ */
+enum ixgbe_bank_select {
+	IXGBE_ACTIVE_FLASH_BANK,
+	IXGBE_INACTIVE_FLASH_BANK,
+};
+
+/* Option ROM version information */
+struct ixgbe_orom_info {
+	u8 major;			/* Major version of OROM */
+	u8 patch;			/* Patch version of OROM */
+	u16 build;			/* Build version of OROM */
+	u32 srev;			/* Security revision */
+};
+
+/* NVM version information */
+struct ixgbe_nvm_info {
+	u32 eetrack;
+	u32 srev;
+	u8 major;
+	u8 minor;
+};
+
+/* Enumeration of possible flash banks for the NVM, OROM, and Netlist modules
+ * of the flash image.
+ */
+enum ixgbe_flash_bank {
+	IXGBE_INVALID_FLASH_BANK,
+	IXGBE_1ST_FLASH_BANK,
+	IXGBE_2ND_FLASH_BANK,
+};
+
+/* information for accessing NVM, OROM, and Netlist flash banks */
+struct ixgbe_bank_info {
+	u32 nvm_ptr;				/* Pointer to 1st NVM bank */
+	u32 nvm_size;				/* Size of NVM bank */
+	u32 orom_ptr;				/* Pointer to 1st OROM bank */
+	u32 orom_size;				/* Size of OROM bank */
+	u32 netlist_ptr;			/* Pointer to 1st Netlist bank */
+	u32 netlist_size;			/* Size of Netlist bank */
+	enum ixgbe_flash_bank nvm_bank;		/* Active NVM bank */
+	enum ixgbe_flash_bank orom_bank;	/* Active OROM bank */
+	enum ixgbe_flash_bank netlist_bank;	/* Active Netlist bank */
+};
+
+/* Flash Chip Information */
+struct ixgbe_flash_info {
+	struct ixgbe_orom_info orom;		/* Option ROM version info */
+	struct ixgbe_nvm_info nvm;		/* NVM version information */
+	struct ixgbe_bank_info banks;		/* Flash Bank information */
+	u16 sr_words;				/* Shadow RAM size in words */
+	u32 flash_size;				/* Size of available flash in bytes */
+	u8 blank_nvm_mode;			/* is NVM empty (no FW present) */
+};
+
+#define IXGBE_NVM_CMD_READ		0x0000000B
+#define IXGBE_NVM_CMD_WRITE		0x0000000C
+
+/* NVM Access command */
+struct ixgbe_nvm_access_cmd {
+	u32 command;		/* NVM command: READ or WRITE */
+	u32 offset;			/* Offset to read/write, in bytes */
+	u32 data_size;		/* Size of data field, in bytes */
+};
+
+/* NVM Access data */
+struct ixgbe_nvm_access_data {
+	u32 regval;			/* Storage for register value */
+};
+
+#endif /* _IXGBE_TYPE_E610_H_ */
diff --git a/drivers/net/ixgbe/base/meson.build b/drivers/net/ixgbe/base/meson.build
index f6497014da..6d72c11504 100644
--- a/drivers/net/ixgbe/base/meson.build
+++ b/drivers/net/ixgbe/base/meson.build
@@ -9,8 +9,10 @@  sources = [
         'ixgbe_dcb_82598.c',
         'ixgbe_dcb_82599.c',
         'ixgbe_dcb.c',
+        'ixgbe_e610.c',
         'ixgbe_hv_vf.c',
         'ixgbe_mbx.c',
+        'ixgbe_osdep.c',
         'ixgbe_phy.c',
         'ixgbe_vf.c',
         'ixgbe_x540.c',