[v3,09/25] net/nfp: change the parameter of APIs

Message ID 20231026064324.177531-10-chaoyong.he@corigine.com (mailing list archive)
State Changes Requested, archived
Delegated to: Ferruh Yigit
Headers
Series add the NFP vDPA PMD |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Chaoyong He Oct. 26, 2023, 6:43 a.m. UTC
  Change the parameter of some APIs from 'struct nfp_net_hw' into the
super class 'struct nfp_hw', prepare for the upcoming common library.

Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
Reviewed-by: Long Wu <long.wu@corigine.com>
Reviewed-by: Peng Zhang <peng.zhang@corigine.com>
---
 drivers/net/nfp/flower/nfp_flower.c           |  24 ++--
 .../net/nfp/flower/nfp_flower_representor.c   |  10 +-
 drivers/net/nfp/nfd3/nfp_nfd3_dp.c            |   4 +-
 drivers/net/nfp/nfdk/nfp_nfdk_dp.c            |   4 +-
 drivers/net/nfp/nfp_ethdev.c                  |   8 +-
 drivers/net/nfp/nfp_ethdev_vf.c               |  12 +-
 drivers/net/nfp/nfp_ipsec.c                   |   4 +-
 drivers/net/nfp/nfp_net_common.c              | 129 +++++++++---------
 drivers/net/nfp/nfp_net_common.h              |  32 ++---
 drivers/net/nfp/nfp_rxtx.c                    |   4 +-
 10 files changed, 116 insertions(+), 115 deletions(-)
  

Patch

diff --git a/drivers/net/nfp/flower/nfp_flower.c b/drivers/net/nfp/flower/nfp_flower.c
index 8bb5914888..ccb579541b 100644
--- a/drivers/net/nfp/flower/nfp_flower.c
+++ b/drivers/net/nfp/flower/nfp_flower.c
@@ -36,7 +36,7 @@  nfp_pf_repr_enable_queues(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_tx_queues; i++)
 		enabled_queues |= (1 << i);
 
-	nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, enabled_queues);
+	nn_cfg_writeq(&hw->super, NFP_NET_CFG_TXRS_ENABLE, enabled_queues);
 
 	enabled_queues = 0;
 
@@ -44,7 +44,7 @@  nfp_pf_repr_enable_queues(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
 		enabled_queues |= (1 << i);
 
-	nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, enabled_queues);
+	nn_cfg_writeq(&hw->super, NFP_NET_CFG_RXRS_ENABLE, enabled_queues);
 }
 
 static void
@@ -58,8 +58,8 @@  nfp_pf_repr_disable_queues(struct rte_eth_dev *dev)
 	repr = dev->data->dev_private;
 	hw = repr->app_fw_flower->pf_hw;
 
-	nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, 0);
-	nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, 0);
+	nn_cfg_writeq(&hw->super, NFP_NET_CFG_TXRS_ENABLE, 0);
+	nn_cfg_writeq(&hw->super, NFP_NET_CFG_RXRS_ENABLE, 0);
 
 	new_ctrl = hw->super.ctrl & ~NFP_NET_CFG_CTRL_ENABLE;
 	update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING |
@@ -114,7 +114,7 @@  nfp_flower_pf_start(struct rte_eth_dev *dev)
 	if ((hw->super.cap & NFP_NET_CFG_CTRL_RINGCFG) != 0)
 		new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
 
-	nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
+	nn_cfg_writel(&hw->super, NFP_NET_CFG_CTRL, new_ctrl);
 
 	/* If an error when reconfig we avoid to change hw state */
 	ret = nfp_net_reconfig(hw, new_ctrl, update);
@@ -219,7 +219,7 @@  nfp_flower_pf_close(struct rte_eth_dev *dev)
 	/* Cancel possible impending LSC work here before releasing the port */
 	rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev);
 
-	nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
+	nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, 0xff);
 
 	/* Now it is safe to free all PF resources */
 	PMD_DRV_LOG(INFO, "Freeing PF resources");
@@ -356,9 +356,9 @@  nfp_flower_init_vnic_common(struct nfp_net_hw *hw,
 		return err;
 
 	/* Work out where in the BAR the queues start */
-	start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
+	start_q = nn_cfg_readl(&hw->super, NFP_NET_CFG_START_TXQ);
 	tx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
-	start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
+	start_q = nn_cfg_readl(&hw->super, NFP_NET_CFG_START_RXQ);
 	rx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
 
 	hw->tx_bar = pf_dev->qc_bar + tx_bar_off;
@@ -543,8 +543,8 @@  nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 		 * Telling the HW about the physical address of the RX ring and number
 		 * of descriptors in log2 format.
 		 */
-		nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(i), rxq->dma);
-		nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(i), rte_log2_u32(CTRL_VNIC_NB_DESC));
+		nn_cfg_writeq(&hw->super, NFP_NET_CFG_RXR_ADDR(i), rxq->dma);
+		nn_cfg_writeb(&hw->super, NFP_NET_CFG_RXR_SZ(i), rte_log2_u32(CTRL_VNIC_NB_DESC));
 	}
 
 	snprintf(ctrl_txring_name, sizeof(ctrl_txring_name), "%s_cttx_ring", pci_name);
@@ -608,8 +608,8 @@  nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 		 * Telling the HW about the physical address of the TX ring and number
 		 * of descriptors in log2 format.
 		 */
-		nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(i), txq->dma);
-		nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(i), rte_log2_u32(CTRL_VNIC_NB_DESC));
+		nn_cfg_writeq(&hw->super, NFP_NET_CFG_TXR_ADDR(i), txq->dma);
+		nn_cfg_writeb(&hw->super, NFP_NET_CFG_TXR_SZ(i), rte_log2_u32(CTRL_VNIC_NB_DESC));
 	}
 
 	return 0;
diff --git a/drivers/net/nfp/flower/nfp_flower_representor.c b/drivers/net/nfp/flower/nfp_flower_representor.c
index 650f09a475..b52c6f514a 100644
--- a/drivers/net/nfp/flower/nfp_flower_representor.c
+++ b/drivers/net/nfp/flower/nfp_flower_representor.c
@@ -97,8 +97,8 @@  nfp_pf_repr_rx_queue_setup(struct rte_eth_dev *dev,
 	 * Telling the HW about the physical address of the RX ring and number
 	 * of descriptors in log2 format.
 	 */
-	nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
-	nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
+	nn_cfg_writeq(&hw->super, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
+	nn_cfg_writeb(&hw->super, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
 
 	return 0;
 }
@@ -181,8 +181,8 @@  nfp_pf_repr_tx_queue_setup(struct rte_eth_dev *dev,
 	 * Telling the HW about the physical address of the TX ring and number
 	 * of descriptors in log2 format.
 	 */
-	nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
-	nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc));
+	nn_cfg_writeq(&hw->super, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
+	nn_cfg_writeb(&hw->super, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc));
 
 	return 0;
 }
@@ -228,7 +228,7 @@  nfp_flower_repr_link_update(struct rte_eth_dev *dev,
 				}
 			}
 		} else {
-			nn_link_status = nn_cfg_readw(pf_hw, NFP_NET_CFG_STS);
+			nn_link_status = nn_cfg_readw(&pf_hw->super, NFP_NET_CFG_STS);
 			nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
 					NFP_NET_CFG_STS_LINK_RATE_MASK;
 
diff --git a/drivers/net/nfp/nfd3/nfp_nfd3_dp.c b/drivers/net/nfp/nfd3/nfp_nfd3_dp.c
index 3045533857..c85fadc80d 100644
--- a/drivers/net/nfp/nfd3/nfp_nfd3_dp.c
+++ b/drivers/net/nfp/nfd3/nfp_nfd3_dp.c
@@ -465,8 +465,8 @@  nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev,
 	 * Telling the HW about the physical address of the TX ring and number
 	 * of descriptors in log2 format.
 	 */
-	nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
-	nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(txq->tx_count));
+	nn_cfg_writeq(&hw->super, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
+	nn_cfg_writeb(&hw->super, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(txq->tx_count));
 
 	return 0;
 }
diff --git a/drivers/net/nfp/nfdk/nfp_nfdk_dp.c b/drivers/net/nfp/nfdk/nfp_nfdk_dp.c
index 63421ba796..3f8d25aa29 100644
--- a/drivers/net/nfp/nfdk/nfp_nfdk_dp.c
+++ b/drivers/net/nfp/nfdk/nfp_nfdk_dp.c
@@ -542,8 +542,8 @@  nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev,
 	 * Telling the HW about the physical address of the TX ring and number
 	 * of descriptors in log2 format.
 	 */
-	nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
-	nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(txq->tx_count));
+	nn_cfg_writeq(&hw->super, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
+	nn_cfg_writeb(&hw->super, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(txq->tx_count));
 
 	return 0;
 }
diff --git a/drivers/net/nfp/nfp_ethdev.c b/drivers/net/nfp/nfp_ethdev.c
index 0f6f97be8e..a773a81e55 100644
--- a/drivers/net/nfp/nfp_ethdev.c
+++ b/drivers/net/nfp/nfp_ethdev.c
@@ -286,7 +286,7 @@  nfp_net_close(struct rte_eth_dev *dev)
 
 	/* Only free PF resources after all physical ports have been closed */
 	/* Mark this port as unused and free device priv resources */
-	nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
+	nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, 0xff);
 	app_fw_nic->ports[hw->idx] = NULL;
 
 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
@@ -567,8 +567,8 @@  nfp_net_init(struct rte_eth_dev *eth_dev)
 	}
 
 	/* Work out where in the BAR the queues start. */
-	tx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
-	rx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
+	tx_base = nn_cfg_readl(&hw->super, NFP_NET_CFG_START_TXQ);
+	rx_base = nn_cfg_readl(&hw->super, NFP_NET_CFG_START_RXQ);
 
 	hw->tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
 	hw->rx_bar = pf_dev->qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
@@ -625,7 +625,7 @@  nfp_net_init(struct rte_eth_dev *eth_dev)
 	rte_intr_callback_register(pci_dev->intr_handle,
 			nfp_net_dev_interrupt_handler, (void *)eth_dev);
 	/* Telling the firmware about the LSC interrupt entry */
-	nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
+	nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
 	/* Recording current stats counters values */
 	nfp_net_stats_reset(eth_dev);
 
diff --git a/drivers/net/nfp/nfp_ethdev_vf.c b/drivers/net/nfp/nfp_ethdev_vf.c
index 684968903c..7fb2a3d378 100644
--- a/drivers/net/nfp/nfp_ethdev_vf.c
+++ b/drivers/net/nfp/nfp_ethdev_vf.c
@@ -20,10 +20,10 @@  nfp_netvf_read_mac(struct nfp_net_hw *hw)
 {
 	uint32_t tmp;
 
-	tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
+	tmp = rte_be_to_cpu_32(nn_cfg_readl(&hw->super, NFP_NET_CFG_MACADDR));
 	memcpy(&hw->mac_addr.addr_bytes[0], &tmp, 4);
 
-	tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
+	tmp = rte_be_to_cpu_32(nn_cfg_readl(&hw->super, NFP_NET_CFG_MACADDR + 4));
 	memcpy(&hw->mac_addr.addr_bytes[4], &tmp, 2);
 }
 
@@ -97,7 +97,7 @@  nfp_netvf_start(struct rte_eth_dev *dev)
 	if ((hw->super.cap & NFP_NET_CFG_CTRL_RINGCFG) != 0)
 		new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
 
-	nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
+	nn_cfg_writel(&hw->super, NFP_NET_CFG_CTRL, new_ctrl);
 	if (nfp_net_reconfig(hw, new_ctrl, update) != 0)
 		return -EIO;
 
@@ -299,9 +299,9 @@  nfp_netvf_init(struct rte_eth_dev *eth_dev)
 	}
 
 	/* Work out where in the BAR the queues start. */
-	start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
+	start_q = nn_cfg_readl(&hw->super, NFP_NET_CFG_START_TXQ);
 	tx_bar_off = nfp_qcp_queue_offset(dev_info, start_q);
-	start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
+	start_q = nn_cfg_readl(&hw->super, NFP_NET_CFG_START_RXQ);
 	rx_bar_off = nfp_qcp_queue_offset(dev_info, start_q);
 
 	hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + tx_bar_off;
@@ -357,7 +357,7 @@  nfp_netvf_init(struct rte_eth_dev *eth_dev)
 		rte_intr_callback_register(pci_dev->intr_handle,
 				nfp_net_dev_interrupt_handler, (void *)eth_dev);
 		/* Telling the firmware about the LSC interrupt entry */
-		nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
+		nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
 		/* Recording current stats counters values */
 		nfp_net_stats_reset(eth_dev);
 	}
diff --git a/drivers/net/nfp/nfp_ipsec.c b/drivers/net/nfp/nfp_ipsec.c
index e080e71db2..0da5c2a3d2 100644
--- a/drivers/net/nfp/nfp_ipsec.c
+++ b/drivers/net/nfp/nfp_ipsec.c
@@ -445,7 +445,7 @@  nfp_ipsec_cfg_cmd_issue(struct nfp_net_hw *hw,
 	msg->rsp = NFP_IPSEC_CFG_MSG_OK;
 
 	for (i = 0; i < msg_size; i++)
-		nn_cfg_writel(hw, NFP_NET_CFG_MBOX_VAL + 4 * i, msg->raw[i]);
+		nn_cfg_writel(&hw->super, NFP_NET_CFG_MBOX_VAL + 4 * i, msg->raw[i]);
 
 	ret = nfp_net_mbox_reconfig(hw, NFP_NET_CFG_MBOX_CMD_IPSEC);
 	if (ret < 0) {
@@ -459,7 +459,7 @@  nfp_ipsec_cfg_cmd_issue(struct nfp_net_hw *hw,
 	 * response. One example where the data is needed is for statistics.
 	 */
 	for (i = 0; i < msg_size; i++)
-		msg->raw[i] = nn_cfg_readl(hw, NFP_NET_CFG_MBOX_VAL + 4 * i);
+		msg->raw[i] = nn_cfg_readl(&hw->super, NFP_NET_CFG_MBOX_VAL + 4 * i);
 
 	switch (msg->rsp) {
 	case NFP_IPSEC_CFG_MSG_OK:
diff --git a/drivers/net/nfp/nfp_net_common.c b/drivers/net/nfp/nfp_net_common.c
index 058260bda3..2ab8d8fadd 100644
--- a/drivers/net/nfp/nfp_net_common.c
+++ b/drivers/net/nfp/nfp_net_common.c
@@ -182,7 +182,8 @@  nfp_net_notify_port_speed(struct nfp_net_hw *hw,
 	 * NFP_NET_CFG_STS_NSP_LINK_RATE.
 	 */
 	if (link->link_status == RTE_ETH_LINK_DOWN) {
-		nn_cfg_writew(hw, NFP_NET_CFG_STS_NSP_LINK_RATE, NFP_NET_CFG_STS_LINK_RATE_UNKNOWN);
+		nn_cfg_writew(&hw->super, NFP_NET_CFG_STS_NSP_LINK_RATE,
+				NFP_NET_CFG_STS_LINK_RATE_UNKNOWN);
 		return;
 	}
 
@@ -190,7 +191,7 @@  nfp_net_notify_port_speed(struct nfp_net_hw *hw,
 	 * Link is up so write the link speed from the eth_table to
 	 * NFP_NET_CFG_STS_NSP_LINK_RATE.
 	 */
-	nn_cfg_writew(hw, NFP_NET_CFG_STS_NSP_LINK_RATE,
+	nn_cfg_writew(&hw->super, NFP_NET_CFG_STS_NSP_LINK_RATE,
 			nfp_net_link_speed_rte2nfp(link->link_speed));
 }
 
@@ -222,7 +223,7 @@  __nfp_net_reconfig(struct nfp_net_hw *hw,
 
 	/* Poll update field, waiting for NFP to ack the config */
 	for (cnt = 0; ; cnt++) {
-		new = nn_cfg_readl(hw, NFP_NET_CFG_UPDATE);
+		new = nn_cfg_readl(&hw->super, NFP_NET_CFG_UPDATE);
 		if (new == 0)
 			break;
 
@@ -270,8 +271,8 @@  nfp_net_reconfig(struct nfp_net_hw *hw,
 
 	rte_spinlock_lock(&hw->reconfig_lock);
 
-	nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl);
-	nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update);
+	nn_cfg_writel(&hw->super, NFP_NET_CFG_CTRL, ctrl);
+	nn_cfg_writel(&hw->super, NFP_NET_CFG_UPDATE, update);
 
 	rte_wmb();
 
@@ -314,8 +315,8 @@  nfp_net_ext_reconfig(struct nfp_net_hw *hw,
 
 	rte_spinlock_lock(&hw->reconfig_lock);
 
-	nn_cfg_writel(hw, NFP_NET_CFG_CTRL_WORD1, ctrl_ext);
-	nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update);
+	nn_cfg_writel(&hw->super, NFP_NET_CFG_CTRL_WORD1, ctrl_ext);
+	nn_cfg_writel(&hw->super, NFP_NET_CFG_UPDATE, update);
 
 	rte_wmb();
 
@@ -355,8 +356,8 @@  nfp_net_mbox_reconfig(struct nfp_net_hw *hw,
 
 	rte_spinlock_lock(&hw->reconfig_lock);
 
-	nn_cfg_writeq(hw, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
-	nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, NFP_NET_CFG_UPDATE_MBOX);
+	nn_cfg_writeq(&hw->super, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
+	nn_cfg_writel(&hw->super, NFP_NET_CFG_UPDATE, NFP_NET_CFG_UPDATE_MBOX);
 
 	rte_wmb();
 
@@ -370,7 +371,7 @@  nfp_net_mbox_reconfig(struct nfp_net_hw *hw,
 		return -EIO;
 	}
 
-	return nn_cfg_readl(hw, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
+	return nn_cfg_readl(&hw->super, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
 }
 
 /*
@@ -478,14 +479,14 @@  nfp_net_enable_queues(struct rte_eth_dev *dev)
 	for (i = 0; i < dev->data->nb_tx_queues; i++)
 		enabled_queues |= (1 << i);
 
-	nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, enabled_queues);
+	nn_cfg_writeq(&hw->super, NFP_NET_CFG_TXRS_ENABLE, enabled_queues);
 
 	/* Enabling the required RX queues in the device */
 	enabled_queues = 0;
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
 		enabled_queues |= (1 << i);
 
-	nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, enabled_queues);
+	nn_cfg_writeq(&hw->super, NFP_NET_CFG_RXRS_ENABLE, enabled_queues);
 }
 
 void
@@ -497,8 +498,8 @@  nfp_net_disable_queues(struct rte_eth_dev *dev)
 
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, 0);
-	nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, 0);
+	nn_cfg_writeq(&hw->super, NFP_NET_CFG_TXRS_ENABLE, 0);
+	nn_cfg_writeq(&hw->super, NFP_NET_CFG_RXRS_ENABLE, 0);
 
 	new_ctrl = hw->super.ctrl & ~NFP_NET_CFG_CTRL_ENABLE;
 	update = NFP_NET_CFG_UPDATE_GEN |
@@ -518,8 +519,8 @@  nfp_net_disable_queues(struct rte_eth_dev *dev)
 void
 nfp_net_params_setup(struct nfp_net_hw *hw)
 {
-	nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu);
-	nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
+	nn_cfg_writel(&hw->super, NFP_NET_CFG_MTU, hw->mtu);
+	nn_cfg_writel(&hw->super, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
 }
 
 void
@@ -596,7 +597,7 @@  nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
 	if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
 		PMD_DRV_LOG(INFO, "VF: enabling RX interrupt with UIO");
 		/* UIO just supports one queue and no LSC */
-		nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);
+		nn_cfg_writeb(&hw->super, NFP_NET_CFG_RXR_VEC(0), 0);
 		if (rte_intr_vec_list_index_set(intr_handle, 0, 0) != 0)
 			return -1;
 	} else {
@@ -606,7 +607,7 @@  nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
 			 * The first msix vector is reserved for non
 			 * efd interrupts.
 			 */
-			nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
+			nn_cfg_writeb(&hw->super, NFP_NET_CFG_RXR_VEC(i), i + 1);
 			if (rte_intr_vec_list_index_set(intr_handle, i, i + 1) != 0)
 				return -1;
 		}
@@ -771,7 +772,7 @@  nfp_net_link_update(struct rte_eth_dev *dev,
 	memset(&link, 0, sizeof(struct rte_eth_link));
 
 	/* Read link status */
-	nn_link_status = nn_cfg_readw(hw, NFP_NET_CFG_STS);
+	nn_link_status = nn_cfg_readw(&hw->super, NFP_NET_CFG_STS);
 	if ((nn_link_status & NFP_NET_CFG_STS_LINK) != 0)
 		link.link_status = RTE_ETH_LINK_UP;
 
@@ -842,12 +843,12 @@  nfp_net_stats_get(struct rte_eth_dev *dev,
 			break;
 
 		nfp_dev_stats.q_ipackets[i] =
-				nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
+				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i));
 		nfp_dev_stats.q_ipackets[i] -=
 				hw->eth_stats_base.q_ipackets[i];
 
 		nfp_dev_stats.q_ibytes[i] =
-				nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
+				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i) + 0x8);
 		nfp_dev_stats.q_ibytes[i] -=
 				hw->eth_stats_base.q_ibytes[i];
 	}
@@ -858,42 +859,42 @@  nfp_net_stats_get(struct rte_eth_dev *dev,
 			break;
 
 		nfp_dev_stats.q_opackets[i] =
-				nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
+				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i));
 		nfp_dev_stats.q_opackets[i] -= hw->eth_stats_base.q_opackets[i];
 
 		nfp_dev_stats.q_obytes[i] =
-				nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
+				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i) + 0x8);
 		nfp_dev_stats.q_obytes[i] -= hw->eth_stats_base.q_obytes[i];
 	}
 
-	nfp_dev_stats.ipackets = nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
+	nfp_dev_stats.ipackets = nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_FRAMES);
 	nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
 
-	nfp_dev_stats.ibytes = nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
+	nfp_dev_stats.ibytes = nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_OCTETS);
 	nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
 
 	nfp_dev_stats.opackets =
-			nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
+			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_FRAMES);
 	nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
 
 	nfp_dev_stats.obytes =
-			nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
+			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_OCTETS);
 	nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
 
 	/* Reading general device stats */
 	nfp_dev_stats.ierrors =
-			nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
+			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_ERRORS);
 	nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
 
 	nfp_dev_stats.oerrors =
-			nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
+			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_ERRORS);
 	nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
 
 	/* RX ring mbuf allocation failures */
 	nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
 
 	nfp_dev_stats.imissed =
-			nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
+			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_DISCARDS);
 	nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
 
 	memcpy(stats, &nfp_dev_stats, sizeof(*stats));
@@ -918,10 +919,10 @@  nfp_net_stats_reset(struct rte_eth_dev *dev)
 			break;
 
 		hw->eth_stats_base.q_ipackets[i] =
-				nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
+				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i));
 
 		hw->eth_stats_base.q_ibytes[i] =
-				nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
+				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i) + 0x8);
 	}
 
 	/* Reading per TX ring stats */
@@ -930,36 +931,36 @@  nfp_net_stats_reset(struct rte_eth_dev *dev)
 			break;
 
 		hw->eth_stats_base.q_opackets[i] =
-				nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
+				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i));
 
 		hw->eth_stats_base.q_obytes[i] =
-				nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
+				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i) + 0x8);
 	}
 
 	hw->eth_stats_base.ipackets =
-			nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
+			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_FRAMES);
 
 	hw->eth_stats_base.ibytes =
-			nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
+			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_OCTETS);
 
 	hw->eth_stats_base.opackets =
-			nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
+			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_FRAMES);
 
 	hw->eth_stats_base.obytes =
-			nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
+			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_OCTETS);
 
 	/* Reading general device stats */
 	hw->eth_stats_base.ierrors =
-			nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
+			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_ERRORS);
 
 	hw->eth_stats_base.oerrors =
-			nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
+			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_ERRORS);
 
 	/* RX ring mbuf allocation failures */
 	dev->data->rx_mbuf_alloc_failed = 0;
 
 	hw->eth_stats_base.imissed =
-			nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
+			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_DISCARDS);
 
 	return 0;
 }
@@ -1012,7 +1013,7 @@  nfp_net_xstats_value(const struct rte_eth_dev *dev,
 	if (xstat.group == NFP_XSTAT_GROUP_MAC)
 		value = nn_readq(hw->mac_stats + xstat.offset);
 	else
-		value = nn_cfg_readq(hw, xstat.offset);
+		value = nn_cfg_readq(&hw->super, xstat.offset);
 
 	if (raw)
 		return value;
@@ -1320,8 +1321,8 @@  nfp_net_common_init(struct rte_pci_device *pci_dev,
 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
 
-	hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
-	hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
+	hw->max_rx_queues = nn_cfg_readl(&hw->super, NFP_NET_CFG_MAX_RXRINGS);
+	hw->max_tx_queues = nn_cfg_readl(&hw->super, NFP_NET_CFG_MAX_TXRINGS);
 	if (hw->max_rx_queues == 0 || hw->max_tx_queues == 0) {
 		PMD_INIT_LOG(ERR, "Device %s can not be used, there are no valid queue "
 				"pairs for use", pci_dev->name);
@@ -1336,9 +1337,9 @@  nfp_net_common_init(struct rte_pci_device *pci_dev,
 		return -ENODEV;
 
 	/* Get some of the read-only fields from the config BAR */
-	hw->super.cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
-	hw->super.cap_ext = nn_cfg_readl(hw, NFP_NET_CFG_CAP_WORD1);
-	hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
+	hw->super.cap = nn_cfg_readl(&hw->super, NFP_NET_CFG_CAP);
+	hw->super.cap_ext = nn_cfg_readl(&hw->super, NFP_NET_CFG_CAP_WORD1);
+	hw->max_mtu = nn_cfg_readl(&hw->super, NFP_NET_CFG_MAX_MTU);
 	hw->flbufsz = DEFAULT_FLBUF_SIZE;
 
 	nfp_net_init_metadata_format(hw);
@@ -1347,7 +1348,7 @@  nfp_net_common_init(struct rte_pci_device *pci_dev,
 	if (hw->ver.major < 2)
 		hw->rx_offset = NFP_NET_RX_OFFSET;
 	else
-		hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
+		hw->rx_offset = nn_cfg_readl(&hw->super, NFP_NET_CFG_RX_OFFSET_ADDR);
 
 	hw->super.ctrl = 0;
 	hw->stride_rx = stride;
@@ -1389,7 +1390,7 @@  nfp_rx_queue_intr_enable(struct rte_eth_dev *dev,
 	rte_wmb();
 
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id),
+	nn_cfg_writeb(&hw->super, NFP_NET_CFG_ICR(base + queue_id),
 			NFP_NET_CFG_ICR_UNMASKED);
 	return 0;
 }
@@ -1410,7 +1411,7 @@  nfp_rx_queue_intr_disable(struct rte_eth_dev *dev,
 	rte_wmb();
 
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), NFP_NET_CFG_ICR_RXTX);
+	nn_cfg_writeb(&hw->super, NFP_NET_CFG_ICR(base + queue_id), NFP_NET_CFG_ICR_RXTX);
 
 	return 0;
 }
@@ -1457,7 +1458,7 @@  nfp_net_irq_unmask(struct rte_eth_dev *dev)
 		/* If MSI-X auto-masking is used, clear the entry */
 		rte_intr_ack(pci_dev->intr_handle);
 	} else {
-		nn_cfg_writeb(hw, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
+		nn_cfg_writeb(&hw->super, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
 				NFP_NET_CFG_ICR_UNMASKED);
 	}
 }
@@ -1539,7 +1540,7 @@  nfp_net_dev_mtu_set(struct rte_eth_dev *dev,
 	}
 
 	/* Writing to configuration space */
-	nn_cfg_writel(hw, NFP_NET_CFG_MTU, mtu);
+	nn_cfg_writel(&hw->super, NFP_NET_CFG_MTU, mtu);
 
 	hw->mtu = mtu;
 
@@ -1630,7 +1631,7 @@  nfp_net_rss_reta_write(struct rte_eth_dev *dev,
 
 		/* If all 4 entries were set, don't need read RETA register */
 		if (mask != 0xF)
-			reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
+			reta = nn_cfg_readl(&hw->super, NFP_NET_CFG_RSS_ITBL + i);
 
 		for (j = 0; j < 4; j++) {
 			if ((mask & (0x1 << j)) == 0)
@@ -1643,7 +1644,7 @@  nfp_net_rss_reta_write(struct rte_eth_dev *dev,
 			reta |= reta_conf[idx].reta[shift + j] << (8 * j);
 		}
 
-		nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift, reta);
+		nn_cfg_writel(&hw->super, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift, reta);
 	}
 
 	return 0;
@@ -1713,7 +1714,7 @@  nfp_net_reta_query(struct rte_eth_dev *dev,
 		if (mask == 0)
 			continue;
 
-		reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift);
+		reta = nn_cfg_readl(&hw->super, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift);
 		for (j = 0; j < 4; j++) {
 			if ((mask & (0x1 << j)) == 0)
 				continue;
@@ -1741,7 +1742,7 @@  nfp_net_rss_hash_write(struct rte_eth_dev *dev,
 	/* Writing the key byte by byte */
 	for (i = 0; i < rss_conf->rss_key_len; i++) {
 		memcpy(&key, &rss_conf->rss_key[i], 1);
-		nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
+		nn_cfg_writeb(&hw->super, NFP_NET_CFG_RSS_KEY + i, key);
 	}
 
 	rss_hf = rss_conf->rss_hf;
@@ -1774,10 +1775,10 @@  nfp_net_rss_hash_write(struct rte_eth_dev *dev,
 	cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
 
 	/* Configuring where to apply the RSS hash */
-	nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
+	nn_cfg_writel(&hw->super, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
 
 	/* Writing the key size */
-	nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
+	nn_cfg_writeb(&hw->super, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
 
 	return 0;
 }
@@ -1835,7 +1836,7 @@  nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
 		return -EINVAL;
 
 	rss_hf = rss_conf->rss_hf;
-	cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
+	cfg_rss_ctrl = nn_cfg_readl(&hw->super, NFP_NET_CFG_RSS_CTRL);
 
 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4) != 0)
 		rss_hf |= RTE_ETH_RSS_IPV4;
@@ -1865,11 +1866,11 @@  nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
 	rss_conf->rss_hf = rss_hf;
 
 	/* Reading the key size */
-	rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
+	rss_conf->rss_key_len = nn_cfg_readl(&hw->super, NFP_NET_CFG_RSS_KEY_SZ);
 
 	/* Reading the key byte a byte */
 	for (i = 0; i < rss_conf->rss_key_len; i++) {
-		key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i);
+		key = nn_cfg_readb(&hw->super, NFP_NET_CFG_RSS_KEY + i);
 		memcpy(&rss_conf->rss_key[i], &key, 1);
 	}
 
@@ -1983,13 +1984,13 @@  nfp_net_set_vxlan_port(struct nfp_net_hw *hw,
 	hw->vxlan_ports[idx] = port;
 
 	for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2) {
-		nn_cfg_writel(hw, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port),
+		nn_cfg_writel(&hw->super, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port),
 				(hw->vxlan_ports[i + 1] << 16) | hw->vxlan_ports[i]);
 	}
 
 	rte_spinlock_lock(&hw->reconfig_lock);
 
-	nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, NFP_NET_CFG_UPDATE_VXLAN);
+	nn_cfg_writel(&hw->super, NFP_NET_CFG_UPDATE, NFP_NET_CFG_UPDATE_VXLAN);
 	rte_wmb();
 
 	ret = __nfp_net_reconfig(hw, NFP_NET_CFG_UPDATE_VXLAN);
@@ -2048,7 +2049,7 @@  nfp_net_cfg_read_version(struct nfp_net_hw *hw)
 		struct nfp_net_fw_ver split;
 	} version;
 
-	version.whole = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
+	version.whole = nn_cfg_readl(&hw->super, NFP_NET_CFG_VERSION);
 	hw->ver = version.split;
 }
 
diff --git a/drivers/net/nfp/nfp_net_common.h b/drivers/net/nfp/nfp_net_common.h
index 3fb3b34613..3fe3e96107 100644
--- a/drivers/net/nfp/nfp_net_common.h
+++ b/drivers/net/nfp/nfp_net_common.h
@@ -247,63 +247,63 @@  nn_writeq(uint64_t val,
 }
 
 static inline uint8_t
-nn_cfg_readb(struct nfp_net_hw *hw,
+nn_cfg_readb(struct nfp_hw *hw,
 		uint32_t off)
 {
-	return nn_readb(hw->super.ctrl_bar + off);
+	return nn_readb(hw->ctrl_bar + off);
 }
 
 static inline void
-nn_cfg_writeb(struct nfp_net_hw *hw,
+nn_cfg_writeb(struct nfp_hw *hw,
 		uint32_t off,
 		uint8_t val)
 {
-	nn_writeb(val, hw->super.ctrl_bar + off);
+	nn_writeb(val, hw->ctrl_bar + off);
 }
 
 static inline uint16_t
-nn_cfg_readw(struct nfp_net_hw *hw,
+nn_cfg_readw(struct nfp_hw *hw,
 		uint32_t off)
 {
-	return rte_le_to_cpu_16(nn_readw(hw->super.ctrl_bar + off));
+	return rte_le_to_cpu_16(nn_readw(hw->ctrl_bar + off));
 }
 
 static inline void
-nn_cfg_writew(struct nfp_net_hw *hw,
+nn_cfg_writew(struct nfp_hw *hw,
 		uint32_t off,
 		uint16_t val)
 {
-	nn_writew(rte_cpu_to_le_16(val), hw->super.ctrl_bar + off);
+	nn_writew(rte_cpu_to_le_16(val), hw->ctrl_bar + off);
 }
 
 static inline uint32_t
-nn_cfg_readl(struct nfp_net_hw *hw,
+nn_cfg_readl(struct nfp_hw *hw,
 		uint32_t off)
 {
-	return rte_le_to_cpu_32(nn_readl(hw->super.ctrl_bar + off));
+	return rte_le_to_cpu_32(nn_readl(hw->ctrl_bar + off));
 }
 
 static inline void
-nn_cfg_writel(struct nfp_net_hw *hw,
+nn_cfg_writel(struct nfp_hw *hw,
 		uint32_t off,
 		uint32_t val)
 {
-	nn_writel(rte_cpu_to_le_32(val), hw->super.ctrl_bar + off);
+	nn_writel(rte_cpu_to_le_32(val), hw->ctrl_bar + off);
 }
 
 static inline uint64_t
-nn_cfg_readq(struct nfp_net_hw *hw,
+nn_cfg_readq(struct nfp_hw *hw,
 		uint32_t off)
 {
-	return rte_le_to_cpu_64(nn_readq(hw->super.ctrl_bar + off));
+	return rte_le_to_cpu_64(nn_readq(hw->ctrl_bar + off));
 }
 
 static inline void
-nn_cfg_writeq(struct nfp_net_hw *hw,
+nn_cfg_writeq(struct nfp_hw *hw,
 		uint32_t off,
 		uint64_t val)
 {
-	nn_writeq(rte_cpu_to_le_64(val), hw->super.ctrl_bar + off);
+	nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off);
 }
 
 /**
diff --git a/drivers/net/nfp/nfp_rxtx.c b/drivers/net/nfp/nfp_rxtx.c
index a9dd464a6a..f17cc13cc1 100644
--- a/drivers/net/nfp/nfp_rxtx.c
+++ b/drivers/net/nfp/nfp_rxtx.c
@@ -925,8 +925,8 @@  nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
 	 * Telling the HW about the physical address of the RX ring and number
 	 * of descriptors in log2 format.
 	 */
-	nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
-	nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
+	nn_cfg_writeq(&hw->super, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
+	nn_cfg_writeb(&hw->super, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
 
 	return 0;
 }