[v2,02/11] net/nfp: unify the indent coding style

Message ID 20231012012704.483828-3-chaoyong.he@corigine.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series Unify the PMD coding style |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Chaoyong He Oct. 12, 2023, 1:26 a.m. UTC
  Each parameter of function should occupy one line, and indent two TAB
character.
All the statement which span multi line should indent two TAB character.

Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
Reviewed-by: Long Wu <long.wu@corigine.com>
Reviewed-by: Peng Zhang <peng.zhang@corigine.com>
---
 drivers/net/nfp/flower/nfp_flower.c           |   5 +-
 drivers/net/nfp/flower/nfp_flower_ctrl.c      |   7 +-
 .../net/nfp/flower/nfp_flower_representor.c   |   2 +-
 drivers/net/nfp/nfdk/nfp_nfdk.h               |   2 +-
 drivers/net/nfp/nfdk/nfp_nfdk_dp.c            |   4 +-
 drivers/net/nfp/nfp_common.c                  | 250 +++++++++---------
 drivers/net/nfp/nfp_common.h                  |  81 ++++--
 drivers/net/nfp/nfp_cpp_bridge.c              |  56 ++--
 drivers/net/nfp/nfp_ethdev.c                  |  82 +++---
 drivers/net/nfp/nfp_ethdev_vf.c               |  66 +++--
 drivers/net/nfp/nfp_flow.c                    |  36 +--
 drivers/net/nfp/nfp_rxtx.c                    |  86 +++---
 drivers/net/nfp/nfp_rxtx.h                    |  10 +-
 13 files changed, 358 insertions(+), 329 deletions(-)
  

Patch

diff --git a/drivers/net/nfp/flower/nfp_flower.c b/drivers/net/nfp/flower/nfp_flower.c
index 3ddaf0f28d..3352693d71 100644
--- a/drivers/net/nfp/flower/nfp_flower.c
+++ b/drivers/net/nfp/flower/nfp_flower.c
@@ -63,7 +63,7 @@  nfp_pf_repr_disable_queues(struct rte_eth_dev *dev)
 
 	new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_ENABLE;
 	update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING |
-		 NFP_NET_CFG_UPDATE_MSIX;
+			NFP_NET_CFG_UPDATE_MSIX;
 
 	if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
 		new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
@@ -330,7 +330,8 @@  nfp_flower_pf_xmit_pkts(void *tx_queue,
 }
 
 static int
-nfp_flower_init_vnic_common(struct nfp_net_hw *hw, const char *vnic_type)
+nfp_flower_init_vnic_common(struct nfp_net_hw *hw,
+		const char *vnic_type)
 {
 	int err;
 	uint32_t start_q;
diff --git a/drivers/net/nfp/flower/nfp_flower_ctrl.c b/drivers/net/nfp/flower/nfp_flower_ctrl.c
index b564e7cd73..4967cc2375 100644
--- a/drivers/net/nfp/flower/nfp_flower_ctrl.c
+++ b/drivers/net/nfp/flower/nfp_flower_ctrl.c
@@ -64,9 +64,8 @@  nfp_flower_ctrl_vnic_recv(void *rx_queue,
 		 */
 		new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
 		if (unlikely(new_mb == NULL)) {
-			PMD_RX_LOG(ERR,
-				"RX mbuf alloc failed port_id=%u queue_id=%hu",
-				rxq->port_id, rxq->qidx);
+			PMD_RX_LOG(ERR, "RX mbuf alloc failed port_id=%u queue_id=%hu",
+					rxq->port_id, rxq->qidx);
 			nfp_net_mbuf_alloc_failed(rxq);
 			break;
 		}
@@ -141,7 +140,7 @@  nfp_flower_ctrl_vnic_recv(void *rx_queue,
 	rte_wmb();
 	if (nb_hold >= rxq->rx_free_thresh) {
 		PMD_RX_LOG(DEBUG, "port=%hu queue=%hu nb_hold=%hu avail=%hu",
-			rxq->port_id, rxq->qidx, nb_hold, avail);
+				rxq->port_id, rxq->qidx, nb_hold, avail);
 		nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
 		nb_hold = 0;
 	}
diff --git a/drivers/net/nfp/flower/nfp_flower_representor.c b/drivers/net/nfp/flower/nfp_flower_representor.c
index 55ca3e6db0..01c2c5a517 100644
--- a/drivers/net/nfp/flower/nfp_flower_representor.c
+++ b/drivers/net/nfp/flower/nfp_flower_representor.c
@@ -826,7 +826,7 @@  nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower)
 		snprintf(flower_repr.name, sizeof(flower_repr.name),
 				"%s_repr_vf%d", pci_name, i);
 
-		 /* This will also allocate private memory for the device*/
+		/* This will also allocate private memory for the device*/
 		ret = rte_eth_dev_create(eth_dev->device, flower_repr.name,
 				sizeof(struct nfp_flower_representor),
 				NULL, NULL, nfp_flower_repr_init, &flower_repr);
diff --git a/drivers/net/nfp/nfdk/nfp_nfdk.h b/drivers/net/nfp/nfdk/nfp_nfdk.h
index 75ecb361ee..99675b6bd7 100644
--- a/drivers/net/nfp/nfdk/nfp_nfdk.h
+++ b/drivers/net/nfp/nfdk/nfp_nfdk.h
@@ -143,7 +143,7 @@  nfp_net_nfdk_free_tx_desc(struct nfp_net_txq *txq)
 		free_desc = txq->rd_p - txq->wr_p;
 
 	return (free_desc > NFDK_TX_DESC_STOP_CNT) ?
-		(free_desc - NFDK_TX_DESC_STOP_CNT) : 0;
+			(free_desc - NFDK_TX_DESC_STOP_CNT) : 0;
 }
 
 /*
diff --git a/drivers/net/nfp/nfdk/nfp_nfdk_dp.c b/drivers/net/nfp/nfdk/nfp_nfdk_dp.c
index d4bd5edb0a..2426ffb261 100644
--- a/drivers/net/nfp/nfdk/nfp_nfdk_dp.c
+++ b/drivers/net/nfp/nfdk/nfp_nfdk_dp.c
@@ -101,9 +101,7 @@  static inline uint16_t
 nfp_net_nfdk_headlen_to_segs(uint16_t headlen)
 {
 	/* First descriptor fits less data, so adjust for that */
-	return DIV_ROUND_UP(headlen +
-			NFDK_TX_MAX_DATA_PER_DESC -
-			NFDK_TX_MAX_DATA_PER_HEAD,
+	return DIV_ROUND_UP(headlen + NFDK_TX_MAX_DATA_PER_DESC - NFDK_TX_MAX_DATA_PER_HEAD,
 			NFDK_TX_MAX_DATA_PER_DESC);
 }
 
diff --git a/drivers/net/nfp/nfp_common.c b/drivers/net/nfp/nfp_common.c
index 36752583dd..9719a9212b 100644
--- a/drivers/net/nfp/nfp_common.c
+++ b/drivers/net/nfp/nfp_common.c
@@ -172,7 +172,8 @@  nfp_net_link_speed_rte2nfp(uint16_t speed)
 }
 
 static void
-nfp_net_notify_port_speed(struct nfp_net_hw *hw, struct rte_eth_link *link)
+nfp_net_notify_port_speed(struct nfp_net_hw *hw,
+		struct rte_eth_link *link)
 {
 	/**
 	 * Read the link status from NFP_NET_CFG_STS. If the link is down
@@ -188,21 +189,22 @@  nfp_net_notify_port_speed(struct nfp_net_hw *hw, struct rte_eth_link *link)
 	 * NFP_NET_CFG_STS_NSP_LINK_RATE.
 	 */
 	nn_cfg_writew(hw, NFP_NET_CFG_STS_NSP_LINK_RATE,
-		      nfp_net_link_speed_rte2nfp(link->link_speed));
+			nfp_net_link_speed_rte2nfp(link->link_speed));
 }
 
 /* The length of firmware version string */
 #define FW_VER_LEN        32
 
 static int
-__nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
+__nfp_net_reconfig(struct nfp_net_hw *hw,
+		uint32_t update)
 {
 	int cnt;
 	uint32_t new;
 	struct timespec wait;
 
 	PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...",
-		    hw->qcp_cfg);
+			hw->qcp_cfg);
 
 	if (hw->qcp_cfg == NULL) {
 		PMD_INIT_LOG(ERR, "Bad configuration queue pointer");
@@ -227,7 +229,7 @@  __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
 		}
 		if (cnt >= NFP_NET_POLL_TIMEOUT) {
 			PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after"
-					  " %dms", update, cnt);
+					" %dms", update, cnt);
 			return -EIO;
 		}
 		nanosleep(&wait, 0); /* waiting for a 1ms */
@@ -254,7 +256,9 @@  __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
  *   - (EIO) if I/O err and fail to reconfigure the device.
  */
 int
-nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
+nfp_net_reconfig(struct nfp_net_hw *hw,
+		uint32_t ctrl,
+		uint32_t update)
 {
 	int ret;
 
@@ -296,7 +300,9 @@  nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
  *   - (EIO) if I/O err and fail to reconfigure the device.
  */
 int
-nfp_net_ext_reconfig(struct nfp_net_hw *hw, uint32_t ctrl_ext, uint32_t update)
+nfp_net_ext_reconfig(struct nfp_net_hw *hw,
+		uint32_t ctrl_ext,
+		uint32_t update)
 {
 	int ret;
 
@@ -401,7 +407,7 @@  nfp_net_configure(struct rte_eth_dev *dev)
 
 	/* Checking RX mode */
 	if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) != 0 &&
-	    (hw->cap & NFP_NET_CFG_CTRL_RSS_ANY) == 0) {
+			(hw->cap & NFP_NET_CFG_CTRL_RSS_ANY) == 0) {
 		PMD_INIT_LOG(INFO, "RSS not supported");
 		return -EINVAL;
 	}
@@ -409,7 +415,7 @@  nfp_net_configure(struct rte_eth_dev *dev)
 	/* Checking MTU set */
 	if (rxmode->mtu > NFP_FRAME_SIZE_MAX) {
 		PMD_INIT_LOG(ERR, "MTU (%u) larger than NFP_FRAME_SIZE_MAX (%u) not supported",
-				    rxmode->mtu, NFP_FRAME_SIZE_MAX);
+				rxmode->mtu, NFP_FRAME_SIZE_MAX);
 		return -ERANGE;
 	}
 
@@ -446,7 +452,8 @@  nfp_net_log_device_information(const struct nfp_net_hw *hw)
 }
 
 static inline void
-nfp_net_enbable_rxvlan_cap(struct nfp_net_hw *hw, uint32_t *ctrl)
+nfp_net_enbable_rxvlan_cap(struct nfp_net_hw *hw,
+		uint32_t *ctrl)
 {
 	if ((hw->cap & NFP_NET_CFG_CTRL_RXVLAN_V2) != 0)
 		*ctrl |= NFP_NET_CFG_CTRL_RXVLAN_V2;
@@ -490,8 +497,9 @@  nfp_net_disable_queues(struct rte_eth_dev *dev)
 	nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, 0);
 
 	new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_ENABLE;
-	update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING |
-		 NFP_NET_CFG_UPDATE_MSIX;
+	update = NFP_NET_CFG_UPDATE_GEN |
+			NFP_NET_CFG_UPDATE_RING |
+			NFP_NET_CFG_UPDATE_MSIX;
 
 	if ((hw->cap & NFP_NET_CFG_CTRL_RINGCFG) != 0)
 		new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
@@ -517,7 +525,8 @@  nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
 }
 
 void
-nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac)
+nfp_net_write_mac(struct nfp_net_hw *hw,
+		uint8_t *mac)
 {
 	uint32_t mac0 = *(uint32_t *)mac;
 	uint16_t mac1;
@@ -527,20 +536,21 @@  nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac)
 	mac += 4;
 	mac1 = *(uint16_t *)mac;
 	nn_writew(rte_cpu_to_be_16(mac1),
-		  hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6);
+			hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6);
 }
 
 int
-nfp_net_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
+nfp_net_set_mac_addr(struct rte_eth_dev *dev,
+		struct rte_ether_addr *mac_addr)
 {
 	struct nfp_net_hw *hw;
 	uint32_t update, ctrl;
 
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) != 0 &&
-	    (hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0) {
+			(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0) {
 		PMD_INIT_LOG(INFO, "MAC address unable to change when"
-				  " port enabled");
+				" port enabled");
 		return -EBUSY;
 	}
 
@@ -551,7 +561,7 @@  nfp_net_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
 	update = NFP_NET_CFG_UPDATE_MACADDR;
 	ctrl = hw->ctrl;
 	if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) != 0 &&
-	    (hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) != 0)
+			(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) != 0)
 		ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR;
 	if (nfp_net_reconfig(hw, ctrl, update) != 0) {
 		PMD_INIT_LOG(INFO, "MAC address update failed");
@@ -562,15 +572,15 @@  nfp_net_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
 
 int
 nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
-			   struct rte_intr_handle *intr_handle)
+		struct rte_intr_handle *intr_handle)
 {
 	struct nfp_net_hw *hw;
 	int i;
 
 	if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
-				    dev->data->nb_rx_queues) != 0) {
+				dev->data->nb_rx_queues) != 0) {
 		PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
-			     " intr_vec", dev->data->nb_rx_queues);
+				" intr_vec", dev->data->nb_rx_queues);
 		return -ENOMEM;
 	}
 
@@ -590,12 +600,10 @@  nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
 			 * efd interrupts
 			*/
 			nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
-			if (rte_intr_vec_list_index_set(intr_handle, i,
-							       i + 1) != 0)
+			if (rte_intr_vec_list_index_set(intr_handle, i, i + 1) != 0)
 				return -1;
 			PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d", i,
-				rte_intr_vec_list_index_get(intr_handle,
-								   i));
+					rte_intr_vec_list_index_get(intr_handle, i));
 		}
 	}
 
@@ -651,13 +659,13 @@  nfp_check_offloads(struct rte_eth_dev *dev)
 
 	/* TX checksum offload */
 	if ((txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) != 0 ||
-	    (txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) != 0 ||
-	    (txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) != 0)
+			(txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) != 0 ||
+			(txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) != 0)
 		ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
 
 	/* LSO offload */
 	if ((txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) != 0 ||
-	    (txmode->offloads & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO) != 0) {
+			(txmode->offloads & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO) != 0) {
 		if ((hw->cap & NFP_NET_CFG_CTRL_LSO) != 0)
 			ctrl |= NFP_NET_CFG_CTRL_LSO;
 		else
@@ -751,7 +759,8 @@  nfp_net_promisc_disable(struct rte_eth_dev *dev)
  * status.
  */
 int
-nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
+nfp_net_link_update(struct rte_eth_dev *dev,
+		__rte_unused int wait_to_complete)
 {
 	int ret;
 	uint32_t i;
@@ -820,7 +829,8 @@  nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
 }
 
 int
-nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+nfp_net_stats_get(struct rte_eth_dev *dev,
+		struct rte_eth_stats *stats)
 {
 	int i;
 	struct nfp_net_hw *hw;
@@ -838,16 +848,16 @@  nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 			break;
 
 		nfp_dev_stats.q_ipackets[i] =
-			nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
+				nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
 
 		nfp_dev_stats.q_ipackets[i] -=
-			hw->eth_stats_base.q_ipackets[i];
+				hw->eth_stats_base.q_ipackets[i];
 
 		nfp_dev_stats.q_ibytes[i] =
-			nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
+				nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
 
 		nfp_dev_stats.q_ibytes[i] -=
-			hw->eth_stats_base.q_ibytes[i];
+				hw->eth_stats_base.q_ibytes[i];
 	}
 
 	/* reading per TX ring stats */
@@ -856,46 +866,42 @@  nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 			break;
 
 		nfp_dev_stats.q_opackets[i] =
-			nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
+				nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
 
-		nfp_dev_stats.q_opackets[i] -=
-			hw->eth_stats_base.q_opackets[i];
+		nfp_dev_stats.q_opackets[i] -= hw->eth_stats_base.q_opackets[i];
 
 		nfp_dev_stats.q_obytes[i] =
-			nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
+				nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
 
-		nfp_dev_stats.q_obytes[i] -=
-			hw->eth_stats_base.q_obytes[i];
+		nfp_dev_stats.q_obytes[i] -= hw->eth_stats_base.q_obytes[i];
 	}
 
-	nfp_dev_stats.ipackets =
-		nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
+	nfp_dev_stats.ipackets = nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
 
 	nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
 
-	nfp_dev_stats.ibytes =
-		nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
+	nfp_dev_stats.ibytes = nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
 
 	nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
 
 	nfp_dev_stats.opackets =
-		nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
+			nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
 
 	nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
 
 	nfp_dev_stats.obytes =
-		nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
+			nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
 
 	nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
 
 	/* reading general device stats */
 	nfp_dev_stats.ierrors =
-		nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
+			nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
 
 	nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
 
 	nfp_dev_stats.oerrors =
-		nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
+			nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
 
 	nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
 
@@ -903,7 +909,7 @@  nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 	nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
 
 	nfp_dev_stats.imissed =
-		nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
+			nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
 
 	nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
 
@@ -933,10 +939,10 @@  nfp_net_stats_reset(struct rte_eth_dev *dev)
 			break;
 
 		hw->eth_stats_base.q_ipackets[i] =
-			nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
+				nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
 
 		hw->eth_stats_base.q_ibytes[i] =
-			nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
+				nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
 	}
 
 	/* reading per TX ring stats */
@@ -945,36 +951,36 @@  nfp_net_stats_reset(struct rte_eth_dev *dev)
 			break;
 
 		hw->eth_stats_base.q_opackets[i] =
-			nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
+				nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
 
 		hw->eth_stats_base.q_obytes[i] =
-			nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
+				nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
 	}
 
 	hw->eth_stats_base.ipackets =
-		nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
+			nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
 
 	hw->eth_stats_base.ibytes =
-		nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
+			nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
 
 	hw->eth_stats_base.opackets =
-		nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
+			nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
 
 	hw->eth_stats_base.obytes =
-		nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
+			nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
 
 	/* reading general device stats */
 	hw->eth_stats_base.ierrors =
-		nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
+			nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
 
 	hw->eth_stats_base.oerrors =
-		nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
+			nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
 
 	/* RX ring mbuf allocation failures */
 	dev->data->rx_mbuf_alloc_failed = 0;
 
 	hw->eth_stats_base.imissed =
-		nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
+			nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
 
 	return 0;
 }
@@ -1237,16 +1243,16 @@  nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	if ((hw->cap & NFP_NET_CFG_CTRL_RXCSUM) != 0)
 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
-					     RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
-					     RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
+				RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+				RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
 
 	if ((hw->cap & (NFP_NET_CFG_CTRL_TXVLAN | NFP_NET_CFG_CTRL_TXVLAN_V2)) != 0)
 		dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
 
 	if ((hw->cap & NFP_NET_CFG_CTRL_TXCSUM) != 0)
 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
-					     RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
-					     RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
+				RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+				RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
 	if ((hw->cap & NFP_NET_CFG_CTRL_LSO_ANY) != 0) {
 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
@@ -1301,21 +1307,24 @@  nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 		dev_info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
-						   RTE_ETH_RSS_NONFRAG_IPV4_TCP |
-						   RTE_ETH_RSS_NONFRAG_IPV4_UDP |
-						   RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
-						   RTE_ETH_RSS_IPV6 |
-						   RTE_ETH_RSS_NONFRAG_IPV6_TCP |
-						   RTE_ETH_RSS_NONFRAG_IPV6_UDP |
-						   RTE_ETH_RSS_NONFRAG_IPV6_SCTP;
+				RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+				RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+				RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+				RTE_ETH_RSS_IPV6 |
+				RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+				RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+				RTE_ETH_RSS_NONFRAG_IPV6_SCTP;
 
 		dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
 		dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
 	}
 
-	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
-			       RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
-			       RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
+	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G |
+			RTE_ETH_LINK_SPEED_10G |
+			RTE_ETH_LINK_SPEED_25G |
+			RTE_ETH_LINK_SPEED_40G |
+			RTE_ETH_LINK_SPEED_50G |
+			RTE_ETH_LINK_SPEED_100G;
 
 	return 0;
 }
@@ -1384,7 +1393,8 @@  nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
 }
 
 int
-nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+nfp_rx_queue_intr_enable(struct rte_eth_dev *dev,
+		uint16_t queue_id)
 {
 	struct rte_pci_device *pci_dev;
 	struct nfp_net_hw *hw;
@@ -1393,19 +1403,19 @@  nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 
-	if (rte_intr_type_get(pci_dev->intr_handle) !=
-							RTE_INTR_HANDLE_UIO)
+	if (rte_intr_type_get(pci_dev->intr_handle) != RTE_INTR_HANDLE_UIO)
 		base = 1;
 
 	/* Make sure all updates are written before un-masking */
 	rte_wmb();
 	nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id),
-		      NFP_NET_CFG_ICR_UNMASKED);
+			NFP_NET_CFG_ICR_UNMASKED);
 	return 0;
 }
 
 int
-nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+nfp_rx_queue_intr_disable(struct rte_eth_dev *dev,
+		uint16_t queue_id)
 {
 	struct rte_pci_device *pci_dev;
 	struct nfp_net_hw *hw;
@@ -1414,8 +1424,7 @@  nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 
-	if (rte_intr_type_get(pci_dev->intr_handle) !=
-							RTE_INTR_HANDLE_UIO)
+	if (rte_intr_type_get(pci_dev->intr_handle) != RTE_INTR_HANDLE_UIO)
 		base = 1;
 
 	/* Make sure all updates are written before un-masking */
@@ -1433,16 +1442,15 @@  nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
 	rte_eth_linkstatus_get(dev, &link);
 	if (link.link_status != 0)
 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
-			    dev->data->port_id, link.link_speed,
-			    link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX
-			    ? "full-duplex" : "half-duplex");
+				dev->data->port_id, link.link_speed,
+				link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
+				"full-duplex" : "half-duplex");
 	else
-		PMD_DRV_LOG(INFO, " Port %d: Link Down",
-			    dev->data->port_id);
+		PMD_DRV_LOG(INFO, " Port %d: Link Down", dev->data->port_id);
 
 	PMD_DRV_LOG(INFO, "PCI Address: " PCI_PRI_FMT,
-		    pci_dev->addr.domain, pci_dev->addr.bus,
-		    pci_dev->addr.devid, pci_dev->addr.function);
+			pci_dev->addr.domain, pci_dev->addr.bus,
+			pci_dev->addr.devid, pci_dev->addr.function);
 }
 
 /* Interrupt configuration and handling */
@@ -1470,7 +1478,7 @@  nfp_net_irq_unmask(struct rte_eth_dev *dev)
 		/* Make sure all updates are written before un-masking */
 		rte_wmb();
 		nn_cfg_writeb(hw, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
-			      NFP_NET_CFG_ICR_UNMASKED);
+				NFP_NET_CFG_ICR_UNMASKED);
 	}
 }
 
@@ -1523,8 +1531,8 @@  nfp_net_dev_interrupt_handler(void *param)
 	}
 
 	if (rte_eal_alarm_set(timeout * 1000,
-			      nfp_net_dev_interrupt_delayed_handler,
-			      (void *)dev) != 0) {
+			nfp_net_dev_interrupt_delayed_handler,
+			(void *)dev) != 0) {
 		PMD_INIT_LOG(ERR, "Error setting alarm");
 		/* Unmasking */
 		nfp_net_irq_unmask(dev);
@@ -1532,7 +1540,8 @@  nfp_net_dev_interrupt_handler(void *param)
 }
 
 int
-nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+nfp_net_dev_mtu_set(struct rte_eth_dev *dev,
+		uint16_t mtu)
 {
 	struct nfp_net_hw *hw;
 
@@ -1541,14 +1550,14 @@  nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	/* mtu setting is forbidden if port is started */
 	if (dev->data->dev_started) {
 		PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
-			    dev->data->port_id);
+				dev->data->port_id);
 		return -EBUSY;
 	}
 
 	/* MTU larger than current mbufsize not supported */
 	if (mtu > hw->flbufsz) {
 		PMD_DRV_LOG(ERR, "MTU (%u) larger than current mbufsize (%u) not supported",
-			    mtu, hw->flbufsz);
+				mtu, hw->flbufsz);
 		return -ERANGE;
 	}
 
@@ -1561,7 +1570,8 @@  nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 }
 
 int
-nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+nfp_net_vlan_offload_set(struct rte_eth_dev *dev,
+		int mask)
 {
 	uint32_t new_ctrl, update;
 	struct nfp_net_hw *hw;
@@ -1606,8 +1616,8 @@  nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
 static int
 nfp_net_rss_reta_write(struct rte_eth_dev *dev,
-		    struct rte_eth_rss_reta_entry64 *reta_conf,
-		    uint16_t reta_size)
+		struct rte_eth_rss_reta_entry64 *reta_conf,
+		uint16_t reta_size)
 {
 	uint32_t reta, mask;
 	int i, j;
@@ -1617,8 +1627,8 @@  nfp_net_rss_reta_write(struct rte_eth_dev *dev,
 
 	if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
-			"(%d) doesn't match the number hardware can supported "
-			"(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
+				"(%d) doesn't match the number hardware can supported "
+				"(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
 		return -EINVAL;
 	}
 
@@ -1648,8 +1658,7 @@  nfp_net_rss_reta_write(struct rte_eth_dev *dev,
 				reta &= ~(0xFF << (8 * j));
 			reta |= reta_conf[idx].reta[shift + j] << (8 * j);
 		}
-		nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift,
-			      reta);
+		nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift, reta);
 	}
 	return 0;
 }
@@ -1657,8 +1666,8 @@  nfp_net_rss_reta_write(struct rte_eth_dev *dev,
 /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
 int
 nfp_net_reta_update(struct rte_eth_dev *dev,
-		    struct rte_eth_rss_reta_entry64 *reta_conf,
-		    uint16_t reta_size)
+		struct rte_eth_rss_reta_entry64 *reta_conf,
+		uint16_t reta_size)
 {
 	struct nfp_net_hw *hw =
 		NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1683,8 +1692,8 @@  nfp_net_reta_update(struct rte_eth_dev *dev,
  /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
 int
 nfp_net_reta_query(struct rte_eth_dev *dev,
-		   struct rte_eth_rss_reta_entry64 *reta_conf,
-		   uint16_t reta_size)
+		struct rte_eth_rss_reta_entry64 *reta_conf,
+		uint16_t reta_size)
 {
 	uint8_t i, j, mask;
 	int idx, shift;
@@ -1698,8 +1707,8 @@  nfp_net_reta_query(struct rte_eth_dev *dev,
 
 	if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
-			"(%d) doesn't match the number hardware can supported "
-			"(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
+				"(%d) doesn't match the number hardware can supported "
+				"(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
 		return -EINVAL;
 	}
 
@@ -1716,13 +1725,12 @@  nfp_net_reta_query(struct rte_eth_dev *dev,
 		if (mask == 0)
 			continue;
 
-		reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) +
-				    shift);
+		reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift);
 		for (j = 0; j < 4; j++) {
 			if ((mask & (0x1 << j)) == 0)
 				continue;
 			reta_conf[idx].reta[shift + j] =
-				(uint8_t)((reta >> (8 * j)) & 0xF);
+					(uint8_t)((reta >> (8 * j)) & 0xF);
 		}
 	}
 	return 0;
@@ -1730,7 +1738,7 @@  nfp_net_reta_query(struct rte_eth_dev *dev,
 
 static int
 nfp_net_rss_hash_write(struct rte_eth_dev *dev,
-			struct rte_eth_rss_conf *rss_conf)
+		struct rte_eth_rss_conf *rss_conf)
 {
 	struct nfp_net_hw *hw;
 	uint64_t rss_hf;
@@ -1786,7 +1794,7 @@  nfp_net_rss_hash_write(struct rte_eth_dev *dev,
 
 int
 nfp_net_rss_hash_update(struct rte_eth_dev *dev,
-			struct rte_eth_rss_conf *rss_conf)
+		struct rte_eth_rss_conf *rss_conf)
 {
 	uint32_t update;
 	uint64_t rss_hf;
@@ -1822,7 +1830,7 @@  nfp_net_rss_hash_update(struct rte_eth_dev *dev,
 
 int
 nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
-			  struct rte_eth_rss_conf *rss_conf)
+		struct rte_eth_rss_conf *rss_conf)
 {
 	uint64_t rss_hf;
 	uint32_t cfg_rss_ctrl;
@@ -1888,7 +1896,7 @@  nfp_net_rss_config_default(struct rte_eth_dev *dev)
 	int i, j, ret;
 
 	PMD_DRV_LOG(INFO, "setting default RSS conf for %u queues",
-		rx_queues);
+			rx_queues);
 
 	nfp_reta_conf[0].mask = ~0x0;
 	nfp_reta_conf[1].mask = ~0x0;
@@ -1984,7 +1992,7 @@  nfp_net_set_vxlan_port(struct nfp_net_hw *hw,
 
 	for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2) {
 		nn_cfg_writel(hw, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port),
-			(hw->vxlan_ports[i + 1] << 16) | hw->vxlan_ports[i]);
+				(hw->vxlan_ports[i + 1] << 16) | hw->vxlan_ports[i]);
 	}
 
 	rte_spinlock_lock(&hw->reconfig_lock);
@@ -2004,7 +2012,8 @@  nfp_net_set_vxlan_port(struct nfp_net_hw *hw,
  * than 40 bits
  */
 int
-nfp_net_check_dma_mask(struct nfp_net_hw *hw, char *name)
+nfp_net_check_dma_mask(struct nfp_net_hw *hw,
+		char *name)
 {
 	if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3 &&
 			rte_mem_check_dma_mask(40) != 0) {
@@ -2052,7 +2061,8 @@  nfp_net_cfg_read_version(struct nfp_net_hw *hw)
 }
 
 static void
-nfp_net_get_nsp_info(struct nfp_net_hw *hw, char *nsp_version)
+nfp_net_get_nsp_info(struct nfp_net_hw *hw,
+		char *nsp_version)
 {
 	struct nfp_nsp *nsp;
 
@@ -2068,7 +2078,8 @@  nfp_net_get_nsp_info(struct nfp_net_hw *hw, char *nsp_version)
 }
 
 static void
-nfp_net_get_mip_name(struct nfp_net_hw *hw, char *mip_name)
+nfp_net_get_mip_name(struct nfp_net_hw *hw,
+		char *mip_name)
 {
 	struct nfp_mip *mip;
 
@@ -2082,7 +2093,8 @@  nfp_net_get_mip_name(struct nfp_net_hw *hw, char *mip_name)
 }
 
 static void
-nfp_net_get_app_name(struct nfp_net_hw *hw, char *app_name)
+nfp_net_get_app_name(struct nfp_net_hw *hw,
+		char *app_name)
 {
 	switch (hw->pf_dev->app_fw_id) {
 	case NFP_APP_FW_CORE_NIC:
diff --git a/drivers/net/nfp/nfp_common.h b/drivers/net/nfp/nfp_common.h
index bc3a948231..e4fd394868 100644
--- a/drivers/net/nfp/nfp_common.h
+++ b/drivers/net/nfp/nfp_common.h
@@ -180,37 +180,47 @@  struct nfp_net_adapter {
 	struct nfp_net_hw hw;
 };
 
-static inline uint8_t nn_readb(volatile const void *addr)
+static inline uint8_t
+nn_readb(volatile const void *addr)
 {
 	return rte_read8(addr);
 }
 
-static inline void nn_writeb(uint8_t val, volatile void *addr)
+static inline void
+nn_writeb(uint8_t val,
+		volatile void *addr)
 {
 	rte_write8(val, addr);
 }
 
-static inline uint32_t nn_readl(volatile const void *addr)
+static inline uint32_t
+nn_readl(volatile const void *addr)
 {
 	return rte_read32(addr);
 }
 
-static inline void nn_writel(uint32_t val, volatile void *addr)
+static inline void
+nn_writel(uint32_t val,
+		volatile void *addr)
 {
 	rte_write32(val, addr);
 }
 
-static inline uint16_t nn_readw(volatile const void *addr)
+static inline uint16_t
+nn_readw(volatile const void *addr)
 {
 	return rte_read16(addr);
 }
 
-static inline void nn_writew(uint16_t val, volatile void *addr)
+static inline void
+nn_writew(uint16_t val,
+		volatile void *addr)
 {
 	rte_write16(val, addr);
 }
 
-static inline uint64_t nn_readq(volatile void *addr)
+static inline uint64_t
+nn_readq(volatile void *addr)
 {
 	const volatile uint32_t *p = addr;
 	uint32_t low, high;
@@ -221,7 +231,9 @@  static inline uint64_t nn_readq(volatile void *addr)
 	return low + ((uint64_t)high << 32);
 }
 
-static inline void nn_writeq(uint64_t val, volatile void *addr)
+static inline void
+nn_writeq(uint64_t val,
+		volatile void *addr)
 {
 	nn_writel(val >> 32, (volatile char *)addr + 4);
 	nn_writel(val, addr);
@@ -232,49 +244,61 @@  static inline void nn_writeq(uint64_t val, volatile void *addr)
  * Performs any endian conversion necessary.
  */
 static inline uint8_t
-nn_cfg_readb(struct nfp_net_hw *hw, int off)
+nn_cfg_readb(struct nfp_net_hw *hw,
+		int off)
 {
 	return nn_readb(hw->ctrl_bar + off);
 }
 
 static inline void
-nn_cfg_writeb(struct nfp_net_hw *hw, int off, uint8_t val)
+nn_cfg_writeb(struct nfp_net_hw *hw,
+		int off,
+		uint8_t val)
 {
 	nn_writeb(val, hw->ctrl_bar + off);
 }
 
 static inline uint16_t
-nn_cfg_readw(struct nfp_net_hw *hw, int off)
+nn_cfg_readw(struct nfp_net_hw *hw,
+		int off)
 {
 	return rte_le_to_cpu_16(nn_readw(hw->ctrl_bar + off));
 }
 
 static inline void
-nn_cfg_writew(struct nfp_net_hw *hw, int off, uint16_t val)
+nn_cfg_writew(struct nfp_net_hw *hw,
+		int off,
+		uint16_t val)
 {
 	nn_writew(rte_cpu_to_le_16(val), hw->ctrl_bar + off);
 }
 
 static inline uint32_t
-nn_cfg_readl(struct nfp_net_hw *hw, int off)
+nn_cfg_readl(struct nfp_net_hw *hw,
+		int off)
 {
 	return rte_le_to_cpu_32(nn_readl(hw->ctrl_bar + off));
 }
 
 static inline void
-nn_cfg_writel(struct nfp_net_hw *hw, int off, uint32_t val)
+nn_cfg_writel(struct nfp_net_hw *hw,
+		int off,
+		uint32_t val)
 {
 	nn_writel(rte_cpu_to_le_32(val), hw->ctrl_bar + off);
 }
 
 static inline uint64_t
-nn_cfg_readq(struct nfp_net_hw *hw, int off)
+nn_cfg_readq(struct nfp_net_hw *hw,
+		int off)
 {
 	return rte_le_to_cpu_64(nn_readq(hw->ctrl_bar + off));
 }
 
 static inline void
-nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val)
+nn_cfg_writeq(struct nfp_net_hw *hw,
+		int off,
+		uint64_t val)
 {
 	nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off);
 }
@@ -286,7 +310,9 @@  nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val)
  * @val: Value to add to the queue pointer
  */
 static inline void
-nfp_qcp_ptr_add(uint8_t *q, enum nfp_qcp_ptr ptr, uint32_t val)
+nfp_qcp_ptr_add(uint8_t *q,
+		enum nfp_qcp_ptr ptr,
+		uint32_t val)
 {
 	uint32_t off;
 
@@ -304,7 +330,8 @@  nfp_qcp_ptr_add(uint8_t *q, enum nfp_qcp_ptr ptr, uint32_t val)
  * @ptr: Read or Write pointer
  */
 static inline uint32_t
-nfp_qcp_read(uint8_t *q, enum nfp_qcp_ptr ptr)
+nfp_qcp_read(uint8_t *q,
+		enum nfp_qcp_ptr ptr)
 {
 	uint32_t off;
 	uint32_t val;
@@ -343,12 +370,12 @@  void nfp_net_params_setup(struct nfp_net_hw *hw);
 void nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac);
 int nfp_net_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr);
 int nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
-			       struct rte_intr_handle *intr_handle);
+		struct rte_intr_handle *intr_handle);
 uint32_t nfp_check_offloads(struct rte_eth_dev *dev);
 int nfp_net_promisc_enable(struct rte_eth_dev *dev);
 int nfp_net_promisc_disable(struct rte_eth_dev *dev);
 int nfp_net_link_update(struct rte_eth_dev *dev,
-			__rte_unused int wait_to_complete);
+		__rte_unused int wait_to_complete);
 int nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
 int nfp_net_stats_reset(struct rte_eth_dev *dev);
 uint32_t nfp_net_xstats_size(const struct rte_eth_dev *dev);
@@ -368,7 +395,7 @@  int nfp_net_xstats_get_by_id(struct rte_eth_dev *dev,
 		unsigned int n);
 int nfp_net_xstats_reset(struct rte_eth_dev *dev);
 int nfp_net_infos_get(struct rte_eth_dev *dev,
-		      struct rte_eth_dev_info *dev_info);
+		struct rte_eth_dev_info *dev_info);
 const uint32_t *nfp_net_supported_ptypes_get(struct rte_eth_dev *dev);
 int nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
 int nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
@@ -379,15 +406,15 @@  void nfp_net_dev_interrupt_delayed_handler(void *param);
 int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
 int nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask);
 int nfp_net_reta_update(struct rte_eth_dev *dev,
-			struct rte_eth_rss_reta_entry64 *reta_conf,
-			uint16_t reta_size);
+		struct rte_eth_rss_reta_entry64 *reta_conf,
+		uint16_t reta_size);
 int nfp_net_reta_query(struct rte_eth_dev *dev,
-		       struct rte_eth_rss_reta_entry64 *reta_conf,
-		       uint16_t reta_size);
+		struct rte_eth_rss_reta_entry64 *reta_conf,
+		uint16_t reta_size);
 int nfp_net_rss_hash_update(struct rte_eth_dev *dev,
-			    struct rte_eth_rss_conf *rss_conf);
+		struct rte_eth_rss_conf *rss_conf);
 int nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
-			      struct rte_eth_rss_conf *rss_conf);
+		struct rte_eth_rss_conf *rss_conf);
 int nfp_net_rss_config_default(struct rte_eth_dev *dev);
 void nfp_net_stop_rx_queue(struct rte_eth_dev *dev);
 void nfp_net_close_rx_queue(struct rte_eth_dev *dev);
diff --git a/drivers/net/nfp/nfp_cpp_bridge.c b/drivers/net/nfp/nfp_cpp_bridge.c
index 34764a8a32..85a8bf9235 100644
--- a/drivers/net/nfp/nfp_cpp_bridge.c
+++ b/drivers/net/nfp/nfp_cpp_bridge.c
@@ -116,7 +116,8 @@  nfp_enable_cpp_service(struct nfp_pf_dev *pf_dev)
  * of CPP interface handler configured by the PMD setup.
  */
 static int
-nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp)
+nfp_cpp_bridge_serve_write(int sockfd,
+		struct nfp_cpp *cpp)
 {
 	struct nfp_cpp_area *area;
 	off_t offset, nfp_offset;
@@ -126,7 +127,7 @@  nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp)
 	int err = 0;
 
 	PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu\n", __func__,
-		sizeof(off_t), sizeof(size_t));
+			sizeof(off_t), sizeof(size_t));
 
 	/* Reading the count param */
 	err = recv(sockfd, &count, sizeof(off_t), 0);
@@ -145,21 +146,21 @@  nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp)
 	nfp_offset = offset & ((1ull << 40) - 1);
 
 	PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd\n", __func__, count,
-		offset);
+			offset);
 	PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd\n", __func__,
-		cpp_id, nfp_offset);
+			cpp_id, nfp_offset);
 
 	/* Adjust length if not aligned */
 	if (((nfp_offset + (off_t)count - 1) & ~(NFP_CPP_MEMIO_BOUNDARY - 1)) !=
-	    (nfp_offset & ~(NFP_CPP_MEMIO_BOUNDARY - 1))) {
+			(nfp_offset & ~(NFP_CPP_MEMIO_BOUNDARY - 1))) {
 		curlen = NFP_CPP_MEMIO_BOUNDARY -
-			(nfp_offset & (NFP_CPP_MEMIO_BOUNDARY - 1));
+				(nfp_offset & (NFP_CPP_MEMIO_BOUNDARY - 1));
 	}
 
 	while (count > 0) {
 		/* configure a CPP PCIe2CPP BAR for mapping the CPP target */
 		area = nfp_cpp_area_alloc_with_name(cpp, cpp_id, "nfp.cdev",
-						    nfp_offset, curlen);
+				nfp_offset, curlen);
 		if (area == NULL) {
 			PMD_CPP_LOG(ERR, "area alloc fail");
 			return -EIO;
@@ -179,12 +180,11 @@  nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp)
 				len = sizeof(tmpbuf);
 
 			PMD_CPP_LOG(DEBUG, "%s: Receive %u of %zu\n", __func__,
-					   len, count);
+					len, count);
 			err = recv(sockfd, tmpbuf, len, MSG_WAITALL);
 			if (err != (int)len) {
-				PMD_CPP_LOG(ERR,
-					"error when receiving, %d of %zu",
-					err, count);
+				PMD_CPP_LOG(ERR, "error when receiving, %d of %zu",
+						err, count);
 				nfp_cpp_area_release(area);
 				nfp_cpp_area_free(area);
 				return -EIO;
@@ -204,7 +204,7 @@  nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp)
 
 		count -= pos;
 		curlen = (count > NFP_CPP_MEMIO_BOUNDARY) ?
-			 NFP_CPP_MEMIO_BOUNDARY : count;
+				NFP_CPP_MEMIO_BOUNDARY : count;
 	}
 
 	return 0;
@@ -217,7 +217,8 @@  nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp)
  * data is sent to the requester using the same socket.
  */
 static int
-nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp)
+nfp_cpp_bridge_serve_read(int sockfd,
+		struct nfp_cpp *cpp)
 {
 	struct nfp_cpp_area *area;
 	off_t offset, nfp_offset;
@@ -227,7 +228,7 @@  nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp)
 	int err = 0;
 
 	PMD_CPP_LOG(DEBUG, "%s: offset size %zu, count_size: %zu\n", __func__,
-		sizeof(off_t), sizeof(size_t));
+			sizeof(off_t), sizeof(size_t));
 
 	/* Reading the count param */
 	err = recv(sockfd, &count, sizeof(off_t), 0);
@@ -246,20 +247,20 @@  nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp)
 	nfp_offset = offset & ((1ull << 40) - 1);
 
 	PMD_CPP_LOG(DEBUG, "%s: count %zu and offset %jd\n", __func__, count,
-			   offset);
+			offset);
 	PMD_CPP_LOG(DEBUG, "%s: cpp_id %08x and nfp_offset %jd\n", __func__,
-			   cpp_id, nfp_offset);
+			cpp_id, nfp_offset);
 
 	/* Adjust length if not aligned */
 	if (((nfp_offset + (off_t)count - 1) & ~(NFP_CPP_MEMIO_BOUNDARY - 1)) !=
-	    (nfp_offset & ~(NFP_CPP_MEMIO_BOUNDARY - 1))) {
+			(nfp_offset & ~(NFP_CPP_MEMIO_BOUNDARY - 1))) {
 		curlen = NFP_CPP_MEMIO_BOUNDARY -
-			(nfp_offset & (NFP_CPP_MEMIO_BOUNDARY - 1));
+				(nfp_offset & (NFP_CPP_MEMIO_BOUNDARY - 1));
 	}
 
 	while (count > 0) {
 		area = nfp_cpp_area_alloc_with_name(cpp, cpp_id, "nfp.cdev",
-						    nfp_offset, curlen);
+				nfp_offset, curlen);
 		if (area == NULL) {
 			PMD_CPP_LOG(ERR, "area alloc failed");
 			return -EIO;
@@ -285,13 +286,12 @@  nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp)
 				return -EIO;
 			}
 			PMD_CPP_LOG(DEBUG, "%s: sending %u of %zu\n", __func__,
-					   len, count);
+					len, count);
 
 			err = send(sockfd, tmpbuf, len, 0);
 			if (err != (int)len) {
-				PMD_CPP_LOG(ERR,
-					"error when sending: %d of %zu",
-					err, count);
+				PMD_CPP_LOG(ERR, "error when sending: %d of %zu",
+						err, count);
 				nfp_cpp_area_release(area);
 				nfp_cpp_area_free(area);
 				return -EIO;
@@ -304,7 +304,7 @@  nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp)
 
 		count -= pos;
 		curlen = (count > NFP_CPP_MEMIO_BOUNDARY) ?
-			NFP_CPP_MEMIO_BOUNDARY : count;
+				NFP_CPP_MEMIO_BOUNDARY : count;
 	}
 	return 0;
 }
@@ -316,7 +316,8 @@  nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp)
  * does not require any CPP access at all.
  */
 static int
-nfp_cpp_bridge_serve_ioctl(int sockfd, struct nfp_cpp *cpp)
+nfp_cpp_bridge_serve_ioctl(int sockfd,
+		struct nfp_cpp *cpp)
 {
 	uint32_t cmd, ident_size, tmp;
 	int err;
@@ -395,7 +396,7 @@  nfp_cpp_bridge_service_func(void *args)
 	strcpy(address.sa_data, "/tmp/nfp_cpp");
 
 	ret = bind(sockfd, (const struct sockaddr *)&address,
-		   sizeof(struct sockaddr));
+			sizeof(struct sockaddr));
 	if (ret < 0) {
 		PMD_CPP_LOG(ERR, "bind error (%d). Service failed", errno);
 		close(sockfd);
@@ -426,8 +427,7 @@  nfp_cpp_bridge_service_func(void *args)
 		while (1) {
 			ret = recv(datafd, &op, 4, 0);
 			if (ret <= 0) {
-				PMD_CPP_LOG(DEBUG, "%s: socket close\n",
-						   __func__);
+				PMD_CPP_LOG(DEBUG, "%s: socket close\n", __func__);
 				break;
 			}
 
diff --git a/drivers/net/nfp/nfp_ethdev.c b/drivers/net/nfp/nfp_ethdev.c
index 12feec8eb4..65473d87e8 100644
--- a/drivers/net/nfp/nfp_ethdev.c
+++ b/drivers/net/nfp/nfp_ethdev.c
@@ -22,7 +22,8 @@ 
 #include "nfp_logs.h"
 
 static int
-nfp_net_pf_read_mac(struct nfp_app_fw_nic *app_fw_nic, int port)
+nfp_net_pf_read_mac(struct nfp_app_fw_nic *app_fw_nic,
+		int port)
 {
 	struct nfp_eth_table *nfp_eth_table;
 	struct nfp_net_hw *hw = NULL;
@@ -70,21 +71,20 @@  nfp_net_start(struct rte_eth_dev *dev)
 	if (dev->data->dev_conf.intr_conf.rxq != 0) {
 		if (app_fw_nic->multiport) {
 			PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
-					  "with NFP multiport PF");
+					"with NFP multiport PF");
 				return -EINVAL;
 		}
-		if (rte_intr_type_get(intr_handle) ==
-						RTE_INTR_HANDLE_UIO) {
+		if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
 			/*
 			 * Better not to share LSC with RX interrupts.
 			 * Unregistering LSC interrupt handler
 			 */
 			rte_intr_callback_unregister(pci_dev->intr_handle,
-				nfp_net_dev_interrupt_handler, (void *)dev);
+					nfp_net_dev_interrupt_handler, (void *)dev);
 
 			if (dev->data->nb_rx_queues > 1) {
 				PMD_INIT_LOG(ERR, "PMD rx interrupt only "
-					     "supports 1 queue with UIO");
+						"supports 1 queue with UIO");
 				return -EIO;
 			}
 		}
@@ -162,8 +162,7 @@  nfp_net_start(struct rte_eth_dev *dev)
 		/* Configure the physical port up */
 		nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1);
 	else
-		nfp_eth_set_configured(dev->process_private,
-				       hw->nfp_idx, 1);
+		nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 1);
 
 	hw->ctrl = new_ctrl;
 
@@ -209,8 +208,7 @@  nfp_net_stop(struct rte_eth_dev *dev)
 		/* Configure the physical port down */
 		nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0);
 	else
-		nfp_eth_set_configured(dev->process_private,
-				       hw->nfp_idx, 0);
+		nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 0);
 
 	return 0;
 }
@@ -229,8 +227,7 @@  nfp_net_set_link_up(struct rte_eth_dev *dev)
 		/* Configure the physical port down */
 		return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1);
 	else
-		return nfp_eth_set_configured(dev->process_private,
-					      hw->nfp_idx, 1);
+		return nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 1);
 }
 
 /* Set the link down. */
@@ -247,8 +244,7 @@  nfp_net_set_link_down(struct rte_eth_dev *dev)
 		/* Configure the physical port down */
 		return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0);
 	else
-		return nfp_eth_set_configured(dev->process_private,
-					      hw->nfp_idx, 0);
+		return nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 0);
 }
 
 /* Reset and stop device. The device can not be restarted. */
@@ -287,8 +283,7 @@  nfp_net_close(struct rte_eth_dev *dev)
 	nfp_ipsec_uninit(dev);
 
 	/* Cancel possible impending LSC work here before releasing the port*/
-	rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler,
-			     (void *)dev);
+	rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev);
 
 	/* Only free PF resources after all physical ports have been closed */
 	/* Mark this port as unused and free device priv resources*/
@@ -525,8 +520,7 @@  nfp_net_init(struct rte_eth_dev *eth_dev)
 
 	hw->ctrl_bar = pci_dev->mem_resource[0].addr;
 	if (hw->ctrl_bar == NULL) {
-		PMD_DRV_LOG(ERR,
-			"hw->ctrl_bar is NULL. BAR0 not configured");
+		PMD_DRV_LOG(ERR, "hw->ctrl_bar is NULL. BAR0 not configured");
 		return -ENODEV;
 	}
 
@@ -592,7 +586,7 @@  nfp_net_init(struct rte_eth_dev *eth_dev)
 	eth_dev->data->dev_private = hw;
 
 	PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
-		     hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
+			hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
 
 	nfp_net_cfg_queue_setup(hw);
 	hw->mtu = RTE_ETHER_MTU;
@@ -607,8 +601,7 @@  nfp_net_init(struct rte_eth_dev *eth_dev)
 	rte_spinlock_init(&hw->reconfig_lock);
 
 	/* Allocating memory for mac addr */
-	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
-					       RTE_ETHER_ADDR_LEN, 0);
+	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
 	if (eth_dev->data->mac_addrs == NULL) {
 		PMD_INIT_LOG(ERR, "Failed to space for MAC address");
 		return -ENOMEM;
@@ -634,10 +627,10 @@  nfp_net_init(struct rte_eth_dev *eth_dev)
 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
 	PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
-		     "mac=" RTE_ETHER_ADDR_PRT_FMT,
-		     eth_dev->data->port_id, pci_dev->id.vendor_id,
-		     pci_dev->id.device_id,
-		     RTE_ETHER_ADDR_BYTES(&hw->mac_addr));
+			"mac=" RTE_ETHER_ADDR_PRT_FMT,
+			eth_dev->data->port_id, pci_dev->id.vendor_id,
+			pci_dev->id.device_id,
+			RTE_ETHER_ADDR_BYTES(&hw->mac_addr));
 
 	/* Registering LSC interrupt handler */
 	rte_intr_callback_register(pci_dev->intr_handle,
@@ -653,7 +646,9 @@  nfp_net_init(struct rte_eth_dev *eth_dev)
 #define DEFAULT_FW_PATH       "/lib/firmware/netronome"
 
 static int
-nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card)
+nfp_fw_upload(struct rte_pci_device *dev,
+		struct nfp_nsp *nsp,
+		char *card)
 {
 	struct nfp_cpp *cpp = nfp_nsp_cpp(nsp);
 	void *fw_buf;
@@ -675,11 +670,10 @@  nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card)
 	/* First try to find a firmware image specific for this device */
 	snprintf(serial, sizeof(serial),
 			"serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
-		cpp_serial[0], cpp_serial[1], cpp_serial[2], cpp_serial[3],
-		cpp_serial[4], cpp_serial[5], interface >> 8, interface & 0xff);
+			cpp_serial[0], cpp_serial[1], cpp_serial[2], cpp_serial[3],
+			cpp_serial[4], cpp_serial[5], interface >> 8, interface & 0xff);
 
-	snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH,
-			serial);
+	snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH, serial);
 
 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
 	if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0)
@@ -703,7 +697,7 @@  nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card)
 
 load_fw:
 	PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu",
-		fw_name, fsize);
+			fw_name, fsize);
 	PMD_DRV_LOG(INFO, "Uploading the firmware ...");
 	nfp_nsp_load_fw(nsp, fw_buf, fsize);
 	PMD_DRV_LOG(INFO, "Done");
@@ -737,7 +731,7 @@  nfp_fw_setup(struct rte_pci_device *dev,
 
 	if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) {
 		PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u",
-			nfp_eth_table->count);
+				nfp_eth_table->count);
 		return -EIO;
 	}
 
@@ -829,7 +823,7 @@  nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev,
 	numa_node = rte_socket_id();
 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
 		snprintf(port_name, sizeof(port_name), "%s_port%d",
-			 pf_dev->pci_dev->device.name, i);
+				pf_dev->pci_dev->device.name, i);
 
 		/* Allocate a eth_dev for this phyport */
 		eth_dev = rte_eth_dev_allocate(port_name);
@@ -839,8 +833,8 @@  nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev,
 		}
 
 		/* Allocate memory for this phyport */
-		eth_dev->data->dev_private =
-			rte_zmalloc_socket(port_name, sizeof(struct nfp_net_hw),
+		eth_dev->data->dev_private = rte_zmalloc_socket(port_name,
+				sizeof(struct nfp_net_hw),
 				RTE_CACHE_LINE_SIZE, numa_node);
 		if (eth_dev->data->dev_private == NULL) {
 			ret = -ENOMEM;
@@ -961,8 +955,7 @@  nfp_pf_init(struct rte_pci_device *pci_dev)
 	/* Now the symbol table should be there */
 	sym_tbl = nfp_rtsym_table_read(cpp);
 	if (sym_tbl == NULL) {
-		PMD_INIT_LOG(ERR, "Something is wrong with the firmware"
-				" symbol table");
+		PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table");
 		ret = -EIO;
 		goto eth_table_cleanup;
 	}
@@ -1144,8 +1137,7 @@  nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
 	 */
 	sym_tbl = nfp_rtsym_table_read(cpp);
 	if (sym_tbl == NULL) {
-		PMD_INIT_LOG(ERR, "Something is wrong with the firmware"
-				" symbol table");
+		PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table");
 		return -EIO;
 	}
 
@@ -1198,27 +1190,27 @@  nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
 	{
 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
-			       PCI_DEVICE_ID_NFP3800_PF_NIC)
+				PCI_DEVICE_ID_NFP3800_PF_NIC)
 	},
 	{
 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
-			       PCI_DEVICE_ID_NFP4000_PF_NIC)
+				PCI_DEVICE_ID_NFP4000_PF_NIC)
 	},
 	{
 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
-			       PCI_DEVICE_ID_NFP6000_PF_NIC)
+				PCI_DEVICE_ID_NFP6000_PF_NIC)
 	},
 	{
 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
-			       PCI_DEVICE_ID_NFP3800_PF_NIC)
+				PCI_DEVICE_ID_NFP3800_PF_NIC)
 	},
 	{
 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
-			       PCI_DEVICE_ID_NFP4000_PF_NIC)
+				PCI_DEVICE_ID_NFP4000_PF_NIC)
 	},
 	{
 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
-			       PCI_DEVICE_ID_NFP6000_PF_NIC)
+				PCI_DEVICE_ID_NFP6000_PF_NIC)
 	},
 	{
 		.vendor_id = 0,
diff --git a/drivers/net/nfp/nfp_ethdev_vf.c b/drivers/net/nfp/nfp_ethdev_vf.c
index c8d6b0461b..ac6a10685d 100644
--- a/drivers/net/nfp/nfp_ethdev_vf.c
+++ b/drivers/net/nfp/nfp_ethdev_vf.c
@@ -50,18 +50,17 @@  nfp_netvf_start(struct rte_eth_dev *dev)
 
 	/* check and configure queue intr-vector mapping */
 	if (dev->data->dev_conf.intr_conf.rxq != 0) {
-		if (rte_intr_type_get(intr_handle) ==
-						RTE_INTR_HANDLE_UIO) {
+		if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
 			/*
 			 * Better not to share LSC with RX interrupts.
 			 * Unregistering LSC interrupt handler
 			 */
 			rte_intr_callback_unregister(pci_dev->intr_handle,
-				nfp_net_dev_interrupt_handler, (void *)dev);
+					nfp_net_dev_interrupt_handler, (void *)dev);
 
 			if (dev->data->nb_rx_queues > 1) {
 				PMD_INIT_LOG(ERR, "PMD rx interrupt only "
-					     "supports 1 queue with UIO");
+						"supports 1 queue with UIO");
 				return -EIO;
 			}
 		}
@@ -190,12 +189,10 @@  nfp_netvf_close(struct rte_eth_dev *dev)
 
 	/* unregister callback func from eal lib */
 	rte_intr_callback_unregister(pci_dev->intr_handle,
-				     nfp_net_dev_interrupt_handler,
-				     (void *)dev);
+			nfp_net_dev_interrupt_handler, (void *)dev);
 
 	/* Cancel possible impending LSC work here before releasing the port*/
-	rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler,
-			     (void *)dev);
+	rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev);
 
 	/*
 	 * The ixgbe PMD disables the pcie master on the
@@ -282,8 +279,7 @@  nfp_netvf_init(struct rte_eth_dev *eth_dev)
 
 	hw->ctrl_bar = pci_dev->mem_resource[0].addr;
 	if (hw->ctrl_bar == NULL) {
-		PMD_DRV_LOG(ERR,
-			"hw->ctrl_bar is NULL. BAR0 not configured");
+		PMD_DRV_LOG(ERR, "hw->ctrl_bar is NULL. BAR0 not configured");
 		return -ENODEV;
 	}
 
@@ -301,8 +297,8 @@  nfp_netvf_init(struct rte_eth_dev *eth_dev)
 
 	rte_eth_copy_pci_info(eth_dev, pci_dev);
 
-	hw->eth_xstats_base = rte_malloc("rte_eth_xstat", sizeof(struct rte_eth_xstat) *
-			nfp_net_xstats_size(eth_dev), 0);
+	hw->eth_xstats_base = rte_malloc("rte_eth_xstat",
+			sizeof(struct rte_eth_xstat) * nfp_net_xstats_size(eth_dev), 0);
 	if (hw->eth_xstats_base == NULL) {
 		PMD_INIT_LOG(ERR, "no memory for xstats base values on device %s!",
 				pci_dev->device.name);
@@ -318,13 +314,11 @@  nfp_netvf_init(struct rte_eth_dev *eth_dev)
 	PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off);
 	PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off);
 
-	hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
-		     tx_bar_off;
-	hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
-		     rx_bar_off;
+	hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + tx_bar_off;
+	hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + rx_bar_off;
 
 	PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
-		     hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
+			hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
 
 	nfp_net_cfg_queue_setup(hw);
 	hw->mtu = RTE_ETHER_MTU;
@@ -339,8 +333,7 @@  nfp_netvf_init(struct rte_eth_dev *eth_dev)
 	rte_spinlock_init(&hw->reconfig_lock);
 
 	/* Allocating memory for mac addr */
-	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
-					       RTE_ETHER_ADDR_LEN, 0);
+	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
 	if (eth_dev->data->mac_addrs == NULL) {
 		PMD_INIT_LOG(ERR, "Failed to space for MAC address");
 		err = -ENOMEM;
@@ -351,8 +344,7 @@  nfp_netvf_init(struct rte_eth_dev *eth_dev)
 
 	tmp_ether_addr = &hw->mac_addr;
 	if (rte_is_valid_assigned_ether_addr(tmp_ether_addr) == 0) {
-		PMD_INIT_LOG(INFO, "Using random mac address for port %d",
-				   port);
+		PMD_INIT_LOG(INFO, "Using random mac address for port %d", port);
 		/* Using random mac addresses for VFs */
 		rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]);
 		nfp_net_write_mac(hw, &hw->mac_addr.addr_bytes[0]);
@@ -367,16 +359,15 @@  nfp_netvf_init(struct rte_eth_dev *eth_dev)
 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
 	PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
-		     "mac=" RTE_ETHER_ADDR_PRT_FMT,
-		     eth_dev->data->port_id, pci_dev->id.vendor_id,
-		     pci_dev->id.device_id,
-		     RTE_ETHER_ADDR_BYTES(&hw->mac_addr));
+			"mac=" RTE_ETHER_ADDR_PRT_FMT,
+			eth_dev->data->port_id, pci_dev->id.vendor_id,
+			pci_dev->id.device_id,
+			RTE_ETHER_ADDR_BYTES(&hw->mac_addr));
 
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
 		/* Registering LSC interrupt handler */
 		rte_intr_callback_register(pci_dev->intr_handle,
-					   nfp_net_dev_interrupt_handler,
-					   (void *)eth_dev);
+				nfp_net_dev_interrupt_handler, (void *)eth_dev);
 		/* Telling the firmware about the LSC interrupt entry */
 		nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
 		/* Recording current stats counters values */
@@ -394,39 +385,42 @@  nfp_netvf_init(struct rte_eth_dev *eth_dev)
 static const struct rte_pci_id pci_id_nfp_vf_net_map[] = {
 	{
 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
-			       PCI_DEVICE_ID_NFP3800_VF_NIC)
+				PCI_DEVICE_ID_NFP3800_VF_NIC)
 	},
 	{
 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
-			       PCI_DEVICE_ID_NFP6000_VF_NIC)
+				PCI_DEVICE_ID_NFP6000_VF_NIC)
 	},
 	{
 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
-			       PCI_DEVICE_ID_NFP3800_VF_NIC)
+				PCI_DEVICE_ID_NFP3800_VF_NIC)
 	},
 	{
 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
-			       PCI_DEVICE_ID_NFP6000_VF_NIC)
+				PCI_DEVICE_ID_NFP6000_VF_NIC)
 	},
 	{
 		.vendor_id = 0,
 	},
 };
 
-static int nfp_vf_pci_uninit(struct rte_eth_dev *eth_dev)
+static int
+nfp_vf_pci_uninit(struct rte_eth_dev *eth_dev)
 {
 	/* VF cleanup, just free private port data */
 	return nfp_netvf_close(eth_dev);
 }
 
-static int eth_nfp_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
-	struct rte_pci_device *pci_dev)
+static int
+eth_nfp_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+		struct rte_pci_device *pci_dev)
 {
 	return rte_eth_dev_pci_generic_probe(pci_dev,
-		sizeof(struct nfp_net_adapter), nfp_netvf_init);
+			sizeof(struct nfp_net_adapter), nfp_netvf_init);
 }
 
-static int eth_nfp_vf_pci_remove(struct rte_pci_device *pci_dev)
+static int
+eth_nfp_vf_pci_remove(struct rte_pci_device *pci_dev)
 {
 	return rte_eth_dev_pci_generic_remove(pci_dev, nfp_vf_pci_uninit);
 }
diff --git a/drivers/net/nfp/nfp_flow.c b/drivers/net/nfp/nfp_flow.c
index 3ea6813d9a..6d9a1c249f 100644
--- a/drivers/net/nfp/nfp_flow.c
+++ b/drivers/net/nfp/nfp_flow.c
@@ -156,7 +156,8 @@  nfp_flow_dev_to_priv(struct rte_eth_dev *dev)
 }
 
 static int
-nfp_mask_id_alloc(struct nfp_flow_priv *priv, uint8_t *mask_id)
+nfp_mask_id_alloc(struct nfp_flow_priv *priv,
+		uint8_t *mask_id)
 {
 	uint8_t temp_id;
 	uint8_t freed_id;
@@ -188,7 +189,8 @@  nfp_mask_id_alloc(struct nfp_flow_priv *priv, uint8_t *mask_id)
 }
 
 static int
-nfp_mask_id_free(struct nfp_flow_priv *priv, uint8_t mask_id)
+nfp_mask_id_free(struct nfp_flow_priv *priv,
+		uint8_t mask_id)
 {
 	struct circ_buf *ring;
 
@@ -703,7 +705,8 @@  nfp_tun_check_ip_off_del(struct nfp_flower_representor *repr,
 }
 
 static void
-nfp_flower_compile_meta_tci(char *mbuf_off, struct nfp_fl_key_ls *key_layer)
+nfp_flower_compile_meta_tci(char *mbuf_off,
+		struct nfp_fl_key_ls *key_layer)
 {
 	struct nfp_flower_meta_tci *tci_meta;
 
@@ -714,7 +717,8 @@  nfp_flower_compile_meta_tci(char *mbuf_off, struct nfp_fl_key_ls *key_layer)
 }
 
 static void
-nfp_flower_update_meta_tci(char *exact, uint8_t mask_id)
+nfp_flower_update_meta_tci(char *exact,
+		uint8_t mask_id)
 {
 	struct nfp_flower_meta_tci *meta_tci;
 
@@ -723,7 +727,8 @@  nfp_flower_update_meta_tci(char *exact, uint8_t mask_id)
 }
 
 static void
-nfp_flower_compile_ext_meta(char *mbuf_off, struct nfp_fl_key_ls *key_layer)
+nfp_flower_compile_ext_meta(char *mbuf_off,
+		struct nfp_fl_key_ls *key_layer)
 {
 	struct nfp_flower_ext_meta *ext_meta;
 
@@ -1436,14 +1441,14 @@  nfp_flow_merge_tcp(__rte_unused struct nfp_app_fw_flower *app_fw_flower,
 	meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data;
 	if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV4) != 0) {
 		ipv4  = (struct nfp_flower_ipv4 *)
-			(*mbuf_off - sizeof(struct nfp_flower_ipv4));
+				(*mbuf_off - sizeof(struct nfp_flower_ipv4));
 		ports = (struct nfp_flower_tp_ports *)
-			((char *)ipv4 - sizeof(struct nfp_flower_tp_ports));
+				((char *)ipv4 - sizeof(struct nfp_flower_tp_ports));
 	} else { /* IPv6 */
 		ipv6  = (struct nfp_flower_ipv6 *)
-			(*mbuf_off - sizeof(struct nfp_flower_ipv6));
+				(*mbuf_off - sizeof(struct nfp_flower_ipv6));
 		ports = (struct nfp_flower_tp_ports *)
-			((char *)ipv6 - sizeof(struct nfp_flower_tp_ports));
+				((char *)ipv6 - sizeof(struct nfp_flower_tp_ports));
 	}
 
 	mask = item->mask ? item->mask : proc->mask_default;
@@ -1514,10 +1519,10 @@  nfp_flow_merge_udp(__rte_unused struct nfp_app_fw_flower *app_fw_flower,
 	meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data;
 	if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV4) != 0) {
 		ports_off = *mbuf_off - sizeof(struct nfp_flower_ipv4) -
-			sizeof(struct nfp_flower_tp_ports);
+				sizeof(struct nfp_flower_tp_ports);
 	} else {/* IPv6 */
 		ports_off = *mbuf_off - sizeof(struct nfp_flower_ipv6) -
-			sizeof(struct nfp_flower_tp_ports);
+				sizeof(struct nfp_flower_tp_ports);
 	}
 	ports = (struct nfp_flower_tp_ports *)ports_off;
 
@@ -1557,10 +1562,10 @@  nfp_flow_merge_sctp(__rte_unused struct nfp_app_fw_flower *app_fw_flower,
 	meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data;
 	if ((meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV4) != 0) {
 		ports_off = *mbuf_off - sizeof(struct nfp_flower_ipv4) -
-			sizeof(struct nfp_flower_tp_ports);
+				sizeof(struct nfp_flower_tp_ports);
 	} else { /* IPv6 */
 		ports_off = *mbuf_off - sizeof(struct nfp_flower_ipv6) -
-			sizeof(struct nfp_flower_tp_ports);
+				sizeof(struct nfp_flower_tp_ports);
 	}
 	ports = (struct nfp_flower_tp_ports *)ports_off;
 
@@ -1951,9 +1956,8 @@  nfp_flow_item_check(const struct rte_flow_item *item,
 		return 0;
 	}
 
-	mask = item->mask ?
-		(const uint8_t *)item->mask :
-		(const uint8_t *)proc->mask_default;
+	mask = item->mask ? (const uint8_t *)item->mask :
+			(const uint8_t *)proc->mask_default;
 
 	/*
 	 * Single-pass check to make sure that:
diff --git a/drivers/net/nfp/nfp_rxtx.c b/drivers/net/nfp/nfp_rxtx.c
index 4528417559..7885166753 100644
--- a/drivers/net/nfp/nfp_rxtx.c
+++ b/drivers/net/nfp/nfp_rxtx.c
@@ -158,8 +158,9 @@  struct nfp_ptype_parsed {
 
 /* set mbuf checksum flags based on RX descriptor flags */
 void
-nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
-		 struct rte_mbuf *mb)
+nfp_net_rx_cksum(struct nfp_net_rxq *rxq,
+		struct nfp_net_rx_desc *rxd,
+		struct rte_mbuf *mb)
 {
 	struct nfp_net_hw *hw = rxq->hw;
 
@@ -192,7 +193,7 @@  nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
 	unsigned int i;
 
 	PMD_RX_LOG(DEBUG, "Fill Rx Freelist for %u descriptors",
-		   rxq->rx_count);
+			rxq->rx_count);
 
 	for (i = 0; i < rxq->rx_count; i++) {
 		struct nfp_net_rx_desc *rxd;
@@ -218,8 +219,7 @@  nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
 	rte_wmb();
 
 	/* Not advertising the whole ring as the firmware gets confused if so */
-	PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u",
-		   rxq->rx_count - 1);
+	PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u", rxq->rx_count - 1);
 
 	nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, rxq->rx_count - 1);
 
@@ -521,7 +521,8 @@  nfp_net_parse_meta(struct nfp_net_rx_desc *rxds,
  *   Mbuf to set the packet type.
  */
 static void
-nfp_net_set_ptype(const struct nfp_ptype_parsed *nfp_ptype, struct rte_mbuf *mb)
+nfp_net_set_ptype(const struct nfp_ptype_parsed *nfp_ptype,
+		struct rte_mbuf *mb)
 {
 	uint32_t mbuf_ptype = RTE_PTYPE_L2_ETHER;
 	uint8_t nfp_tunnel_ptype = nfp_ptype->tunnel_ptype;
@@ -678,7 +679,9 @@  nfp_net_parse_ptype(struct nfp_net_rx_desc *rxds,
  */
 
 uint16_t
-nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+nfp_net_recv_pkts(void *rx_queue,
+		struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
 {
 	struct nfp_net_rxq *rxq;
 	struct nfp_net_rx_desc *rxds;
@@ -728,8 +731,7 @@  nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 		 */
 		new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
 		if (unlikely(new_mb == NULL)) {
-			PMD_RX_LOG(DEBUG,
-			"RX mbuf alloc failed port_id=%u queue_id=%hu",
+			PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%hu",
 					rxq->port_id, rxq->qidx);
 			nfp_net_mbuf_alloc_failed(rxq);
 			break;
@@ -743,29 +745,28 @@  nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 		rxb->mbuf = new_mb;
 
 		PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u",
-			   rxds->rxd.data_len, rxq->mbuf_size);
+				rxds->rxd.data_len, rxq->mbuf_size);
 
 		/* Size of this segment */
 		mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
 		/* Size of the whole packet. We just support 1 segment */
 		mb->pkt_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
 
-		if (unlikely((mb->data_len + hw->rx_offset) >
-			     rxq->mbuf_size)) {
+		if (unlikely((mb->data_len + hw->rx_offset) > rxq->mbuf_size)) {
 			/*
 			 * This should not happen and the user has the
 			 * responsibility of avoiding it. But we have
 			 * to give some info about the error
 			 */
 			PMD_RX_LOG(ERR,
-				"mbuf overflow likely due to the RX offset.\n"
-				"\t\tYour mbuf size should have extra space for"
-				" RX offset=%u bytes.\n"
-				"\t\tCurrently you just have %u bytes available"
-				" but the received packet is %u bytes long",
-				hw->rx_offset,
-				rxq->mbuf_size - hw->rx_offset,
-				mb->data_len);
+					"mbuf overflow likely due to the RX offset.\n"
+					"\t\tYour mbuf size should have extra space for"
+					" RX offset=%u bytes.\n"
+					"\t\tCurrently you just have %u bytes available"
+					" but the received packet is %u bytes long",
+					hw->rx_offset,
+					rxq->mbuf_size - hw->rx_offset,
+					mb->data_len);
 			rte_pktmbuf_free(mb);
 			break;
 		}
@@ -774,8 +775,7 @@  nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 		if (hw->rx_offset != 0)
 			mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
 		else
-			mb->data_off = RTE_PKTMBUF_HEADROOM +
-				       NFP_DESC_META_LEN(rxds);
+			mb->data_off = RTE_PKTMBUF_HEADROOM + NFP_DESC_META_LEN(rxds);
 
 		/* No scatter mode supported */
 		mb->nb_segs = 1;
@@ -817,7 +817,7 @@  nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 		return nb_hold;
 
 	PMD_RX_LOG(DEBUG, "RX  port_id=%hu queue_id=%hu, %hu packets received",
-		   rxq->port_id, rxq->qidx, avail);
+			rxq->port_id, rxq->qidx, avail);
 
 	nb_hold += rxq->nb_rx_hold;
 
@@ -828,7 +828,7 @@  nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 	rte_wmb();
 	if (nb_hold > rxq->rx_free_thresh) {
 		PMD_RX_LOG(DEBUG, "port=%hu queue=%hu nb_hold=%hu avail=%hu",
-			   rxq->port_id, rxq->qidx, nb_hold, avail);
+				rxq->port_id, rxq->qidx, nb_hold, avail);
 		nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
 		nb_hold = 0;
 	}
@@ -854,7 +854,8 @@  nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq)
 }
 
 void
-nfp_net_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
+nfp_net_rx_queue_release(struct rte_eth_dev *dev,
+		uint16_t queue_idx)
 {
 	struct nfp_net_rxq *rxq = dev->data->rx_queues[queue_idx];
 
@@ -876,10 +877,11 @@  nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq)
 
 int
 nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
-		       uint16_t queue_idx, uint16_t nb_desc,
-		       unsigned int socket_id,
-		       const struct rte_eth_rxconf *rx_conf,
-		       struct rte_mempool *mp)
+		uint16_t queue_idx,
+		uint16_t nb_desc,
+		unsigned int socket_id,
+		const struct rte_eth_rxconf *rx_conf,
+		struct rte_mempool *mp)
 {
 	uint16_t min_rx_desc;
 	uint16_t max_rx_desc;
@@ -897,7 +899,7 @@  nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
 	/* Validating number of descriptors */
 	rx_desc_sz = nb_desc * sizeof(struct nfp_net_rx_desc);
 	if (rx_desc_sz % NFP_ALIGN_RING_DESC != 0 ||
-	    nb_desc > max_rx_desc || nb_desc < min_rx_desc) {
+			nb_desc > max_rx_desc || nb_desc < min_rx_desc) {
 		PMD_DRV_LOG(ERR, "Wrong nb_desc value");
 		return -EINVAL;
 	}
@@ -913,7 +915,7 @@  nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
 
 	/* Allocating rx queue data structure */
 	rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq),
-				 RTE_CACHE_LINE_SIZE, socket_id);
+			RTE_CACHE_LINE_SIZE, socket_id);
 	if (rxq == NULL)
 		return -ENOMEM;
 
@@ -943,9 +945,8 @@  nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
 	 * resizing in later calls to the queue setup function.
 	 */
 	tz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
-				   sizeof(struct nfp_net_rx_desc) *
-				   max_rx_desc, NFP_MEMZONE_ALIGN,
-				   socket_id);
+			sizeof(struct nfp_net_rx_desc) * max_rx_desc,
+			NFP_MEMZONE_ALIGN, socket_id);
 
 	if (tz == NULL) {
 		PMD_DRV_LOG(ERR, "Error allocating rx dma");
@@ -960,8 +961,8 @@  nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
 
 	/* mbuf pointers array for referencing mbufs linked to RX descriptors */
 	rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs",
-					 sizeof(*rxq->rxbufs) * nb_desc,
-					 RTE_CACHE_LINE_SIZE, socket_id);
+			sizeof(*rxq->rxbufs) * nb_desc, RTE_CACHE_LINE_SIZE,
+			socket_id);
 	if (rxq->rxbufs == NULL) {
 		nfp_net_rx_queue_release(dev, queue_idx);
 		dev->data->rx_queues[queue_idx] = NULL;
@@ -969,7 +970,7 @@  nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
 	}
 
 	PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
-		   rxq->rxbufs, rxq->rxds, (unsigned long)rxq->dma);
+			rxq->rxbufs, rxq->rxds, (unsigned long)rxq->dma);
 
 	nfp_net_reset_rx_queue(rxq);
 
@@ -998,15 +999,15 @@  nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
 	int todo;
 
 	PMD_TX_LOG(DEBUG, "queue %hu. Check for descriptor with a complete"
-		   " status", txq->qidx);
+			" status", txq->qidx);
 
 	/* Work out how many packets have been sent */
 	qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR);
 
 	if (qcp_rd_p == txq->rd_p) {
 		PMD_TX_LOG(DEBUG, "queue %hu: It seems harrier is not sending "
-			   "packets (%u, %u)", txq->qidx,
-			   qcp_rd_p, txq->rd_p);
+				"packets (%u, %u)", txq->qidx,
+				qcp_rd_p, txq->rd_p);
 		return 0;
 	}
 
@@ -1016,7 +1017,7 @@  nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
 		todo = qcp_rd_p + txq->tx_count - txq->rd_p;
 
 	PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u",
-		   qcp_rd_p, txq->rd_p, txq->rd_p);
+			qcp_rd_p, txq->rd_p, txq->rd_p);
 
 	if (todo == 0)
 		return todo;
@@ -1045,7 +1046,8 @@  nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
 }
 
 void
-nfp_net_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
+nfp_net_tx_queue_release(struct rte_eth_dev *dev,
+		uint16_t queue_idx)
 {
 	struct nfp_net_txq *txq = dev->data->tx_queues[queue_idx];
 
diff --git a/drivers/net/nfp/nfp_rxtx.h b/drivers/net/nfp/nfp_rxtx.h
index 3c7138f7d6..9a30ebd89e 100644
--- a/drivers/net/nfp/nfp_rxtx.h
+++ b/drivers/net/nfp/nfp_rxtx.h
@@ -234,17 +234,17 @@  nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq)
 }
 
 void nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
-		 struct rte_mbuf *mb);
+		struct rte_mbuf *mb);
 int nfp_net_rx_freelist_setup(struct rte_eth_dev *dev);
 uint32_t nfp_net_rx_queue_count(void *rx_queue);
 uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
-				  uint16_t nb_pkts);
+		uint16_t nb_pkts);
 void nfp_net_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
 void nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq);
 int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
-				  uint16_t nb_desc, unsigned int socket_id,
-				  const struct rte_eth_rxconf *rx_conf,
-				  struct rte_mempool *mp);
+		uint16_t nb_desc, unsigned int socket_id,
+		const struct rte_eth_rxconf *rx_conf,
+		struct rte_mempool *mp);
 void nfp_net_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
 void nfp_net_reset_tx_queue(struct nfp_net_txq *txq);