[v3,07/11] net/nfp: standard the blank character

Message ID 20231013060653.1006410-8-chaoyong.he@corigine.com (mailing list archive)
State Accepted, archived
Delegated to: Ferruh Yigit
Headers
Series Unify the PMD coding style |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Chaoyong He Oct. 13, 2023, 6:06 a.m. UTC
  Use space character to align instead of TAB character.
There should one blank line to split the block of logic, no more no less.

Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
Reviewed-by: Long Wu <long.wu@corigine.com>
Reviewed-by: Peng Zhang <peng.zhang@corigine.com>
---
 drivers/net/nfp/nfp_common.c           | 39 +++++++++--------
 drivers/net/nfp/nfp_common.h           |  6 +--
 drivers/net/nfp/nfp_cpp_bridge.c       |  5 +++
 drivers/net/nfp/nfp_ctrl.h             |  6 +--
 drivers/net/nfp/nfp_ethdev.c           | 58 +++++++++++++-------------
 drivers/net/nfp/nfp_ethdev_vf.c        | 49 +++++++++++-----------
 drivers/net/nfp/nfp_flow.c             | 27 +++++++-----
 drivers/net/nfp/nfp_flow.h             |  7 ++++
 drivers/net/nfp/nfp_rxtx.c             |  7 ++--
 drivers/net/nfp/nfpcore/nfp_resource.h |  2 +-
 10 files changed, 114 insertions(+), 92 deletions(-)
  

Patch

diff --git a/drivers/net/nfp/nfp_common.c b/drivers/net/nfp/nfp_common.c
index 130f004b4d..a102c6f272 100644
--- a/drivers/net/nfp/nfp_common.c
+++ b/drivers/net/nfp/nfp_common.c
@@ -36,6 +36,7 @@  enum nfp_xstat_group {
 	NFP_XSTAT_GROUP_NET,
 	NFP_XSTAT_GROUP_MAC
 };
+
 struct nfp_xstat {
 	char name[RTE_ETH_XSTATS_NAME_SIZE];
 	int offset;
@@ -184,6 +185,7 @@  nfp_net_notify_port_speed(struct nfp_net_hw *hw,
 		nn_cfg_writew(hw, NFP_NET_CFG_STS_NSP_LINK_RATE, NFP_NET_CFG_STS_LINK_RATE_UNKNOWN);
 		return;
 	}
+
 	/*
 	 * Link is up so write the link speed from the eth_table to
 	 * NFP_NET_CFG_STS_NSP_LINK_RATE.
@@ -223,17 +225,21 @@  __nfp_net_reconfig(struct nfp_net_hw *hw,
 		new = nn_cfg_readl(hw, NFP_NET_CFG_UPDATE);
 		if (new == 0)
 			break;
+
 		if ((new & NFP_NET_CFG_UPDATE_ERR) != 0) {
 			PMD_DRV_LOG(ERR, "Reconfig error: %#08x", new);
 			return -1;
 		}
+
 		if (cnt >= NFP_NET_POLL_TIMEOUT) {
 			PMD_DRV_LOG(ERR, "Reconfig timeout for %#08x after %u ms",
 					update, cnt);
 			return -EIO;
 		}
+
 		nanosleep(&wait, 0); /* Waiting for a 1ms */
 	}
+
 	PMD_DRV_LOG(DEBUG, "Ack DONE");
 	return 0;
 }
@@ -387,7 +393,6 @@  nfp_net_configure(struct rte_eth_dev *dev)
 	struct rte_eth_txmode *txmode;
 
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
 	dev_conf = &dev->data->dev_conf;
 	rxmode = &dev_conf->rxmode;
 	txmode = &dev_conf->txmode;
@@ -560,11 +565,13 @@  nfp_net_set_mac_addr(struct rte_eth_dev *dev,
 	if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) != 0 &&
 			(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) != 0)
 		ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR;
+
 	/* Signal the NIC about the change */
 	if (nfp_net_reconfig(hw, ctrl, update) != 0) {
 		PMD_DRV_LOG(ERR, "MAC address update failed");
 		return -EIO;
 	}
+
 	return 0;
 }
 
@@ -832,13 +839,11 @@  nfp_net_stats_get(struct rte_eth_dev *dev,
 
 		nfp_dev_stats.q_ipackets[i] =
 				nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
-
 		nfp_dev_stats.q_ipackets[i] -=
 				hw->eth_stats_base.q_ipackets[i];
 
 		nfp_dev_stats.q_ibytes[i] =
 				nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
-
 		nfp_dev_stats.q_ibytes[i] -=
 				hw->eth_stats_base.q_ibytes[i];
 	}
@@ -850,42 +855,34 @@  nfp_net_stats_get(struct rte_eth_dev *dev,
 
 		nfp_dev_stats.q_opackets[i] =
 				nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
-
 		nfp_dev_stats.q_opackets[i] -= hw->eth_stats_base.q_opackets[i];
 
 		nfp_dev_stats.q_obytes[i] =
 				nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
-
 		nfp_dev_stats.q_obytes[i] -= hw->eth_stats_base.q_obytes[i];
 	}
 
 	nfp_dev_stats.ipackets = nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
-
 	nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
 
 	nfp_dev_stats.ibytes = nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
-
 	nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
 
 	nfp_dev_stats.opackets =
 			nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
-
 	nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
 
 	nfp_dev_stats.obytes =
 			nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
-
 	nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
 
 	/* Reading general device stats */
 	nfp_dev_stats.ierrors =
 			nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
-
 	nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
 
 	nfp_dev_stats.oerrors =
 			nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
-
 	nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
 
 	/* RX ring mbuf allocation failures */
@@ -893,7 +890,6 @@  nfp_net_stats_get(struct rte_eth_dev *dev,
 
 	nfp_dev_stats.imissed =
 			nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
-
 	nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
 
 	if (stats != NULL) {
@@ -981,6 +977,7 @@  nfp_net_xstats_size(const struct rte_eth_dev *dev)
 			if (nfp_net_xstats[count].group == NFP_XSTAT_GROUP_MAC)
 				break;
 		}
+
 		return count;
 	}
 
@@ -1154,6 +1151,7 @@  nfp_net_xstats_reset(struct rte_eth_dev *dev)
 		hw->eth_xstats_base[id].id = id;
 		hw->eth_xstats_base[id].value = nfp_net_xstats_value(dev, id, true);
 	}
+
 	/* Successfully reset xstats, now call function to reset basic stats. */
 	return nfp_net_stats_reset(dev);
 }
@@ -1201,6 +1199,7 @@  nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
 	dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
 	dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
+
 	/**
 	 * The maximum rx packet length (max_rx_pktlen) is set to the
 	 * maximum supported frame size that the NFP can handle. This
@@ -1368,6 +1367,7 @@  nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
 
 	if (dev->rx_pkt_burst == nfp_net_recv_pkts)
 		return ptypes;
+
 	return NULL;
 }
 
@@ -1381,7 +1381,6 @@  nfp_rx_queue_intr_enable(struct rte_eth_dev *dev,
 
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-
 	if (rte_intr_type_get(pci_dev->intr_handle) != RTE_INTR_HANDLE_UIO)
 		base = 1;
 
@@ -1402,7 +1401,6 @@  nfp_rx_queue_intr_disable(struct rte_eth_dev *dev,
 
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-
 	if (rte_intr_type_get(pci_dev->intr_handle) != RTE_INTR_HANDLE_UIO)
 		base = 1;
 
@@ -1619,11 +1617,11 @@  nfp_net_rss_reta_write(struct rte_eth_dev *dev,
 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
-
 		if (mask == 0)
 			continue;
 
 		reta = 0;
+
 		/* If all 4 entries were set, don't need read RETA register */
 		if (mask != 0xF)
 			reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
@@ -1631,13 +1629,17 @@  nfp_net_rss_reta_write(struct rte_eth_dev *dev,
 		for (j = 0; j < 4; j++) {
 			if ((mask & (0x1 << j)) == 0)
 				continue;
+
 			/* Clearing the entry bits */
 			if (mask != 0xF)
 				reta &= ~(0xFF << (8 * j));
+
 			reta |= reta_conf[idx].reta[shift + j] << (8 * j);
 		}
+
 		nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift, reta);
 	}
+
 	return 0;
 }
 
@@ -1682,7 +1684,6 @@  nfp_net_reta_query(struct rte_eth_dev *dev,
 	struct nfp_net_hw *hw;
 
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
 	if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0)
 		return -EINVAL;
 
@@ -1710,10 +1711,12 @@  nfp_net_reta_query(struct rte_eth_dev *dev,
 		for (j = 0; j < 4; j++) {
 			if ((mask & (0x1 << j)) == 0)
 				continue;
+
 			reta_conf[idx].reta[shift + j] =
 					(uint8_t)((reta >> (8 * j)) & 0xF);
 		}
 	}
+
 	return 0;
 }
 
@@ -1791,6 +1794,7 @@  nfp_net_rss_hash_update(struct rte_eth_dev *dev,
 			PMD_DRV_LOG(ERR, "RSS unsupported");
 			return -EINVAL;
 		}
+
 		return 0; /* Nothing to do */
 	}
 
@@ -1888,6 +1892,7 @@  nfp_net_rss_config_default(struct rte_eth_dev *dev)
 			queue %= rx_queues;
 		}
 	}
+
 	ret = nfp_net_rss_reta_write(dev, nfp_reta_conf, 0x80);
 	if (ret != 0)
 		return ret;
@@ -1897,8 +1902,8 @@  nfp_net_rss_config_default(struct rte_eth_dev *dev)
 		PMD_DRV_LOG(ERR, "Wrong rss conf");
 		return -EINVAL;
 	}
-	rss_conf = dev_conf->rx_adv_conf.rss_conf;
 
+	rss_conf = dev_conf->rx_adv_conf.rss_conf;
 	ret = nfp_net_rss_hash_write(dev, &rss_conf);
 
 	return ret;
diff --git a/drivers/net/nfp/nfp_common.h b/drivers/net/nfp/nfp_common.h
index 6a36e2b04c..5439865c5e 100644
--- a/drivers/net/nfp/nfp_common.h
+++ b/drivers/net/nfp/nfp_common.h
@@ -32,7 +32,7 @@ 
 #define DEFAULT_RX_HTHRESH      8
 #define DEFAULT_RX_WTHRESH      0
 
-#define DEFAULT_TX_RS_THRESH	32
+#define DEFAULT_TX_RS_THRESH    32
 #define DEFAULT_TX_FREE_THRESH  32
 #define DEFAULT_TX_PTHRESH      32
 #define DEFAULT_TX_HTHRESH      0
@@ -40,12 +40,12 @@ 
 #define DEFAULT_TX_RSBIT_THRESH 32
 
 /* Alignment for dma zones */
-#define NFP_MEMZONE_ALIGN	128
+#define NFP_MEMZONE_ALIGN       128
 
 #define NFP_QCP_QUEUE_ADDR_SZ   (0x800)
 
 /* Number of supported physical ports */
-#define NFP_MAX_PHYPORTS	12
+#define NFP_MAX_PHYPORTS        12
 
 /* Firmware application ID's */
 enum nfp_app_fw_id {
diff --git a/drivers/net/nfp/nfp_cpp_bridge.c b/drivers/net/nfp/nfp_cpp_bridge.c
index 8f5271cde9..bb2a6fdcda 100644
--- a/drivers/net/nfp/nfp_cpp_bridge.c
+++ b/drivers/net/nfp/nfp_cpp_bridge.c
@@ -191,6 +191,7 @@  nfp_cpp_bridge_serve_write(int sockfd,
 				nfp_cpp_area_free(area);
 				return -EIO;
 			}
+
 			err = nfp_cpp_area_write(area, pos, tmpbuf, len);
 			if (err < 0) {
 				PMD_CPP_LOG(ERR, "nfp_cpp_area_write error");
@@ -312,6 +313,7 @@  nfp_cpp_bridge_serve_read(int sockfd,
 		curlen = (count > NFP_CPP_MEMIO_BOUNDARY) ?
 				NFP_CPP_MEMIO_BOUNDARY : count;
 	}
+
 	return 0;
 }
 
@@ -393,6 +395,7 @@  nfp_cpp_bridge_service_func(void *args)
 	struct timeval timeout = {1, 0};
 
 	unlink("/tmp/nfp_cpp");
+
 	sockfd = socket(AF_UNIX, SOCK_STREAM, 0);
 	if (sockfd < 0) {
 		PMD_CPP_LOG(ERR, "socket creation error. Service failed");
@@ -456,8 +459,10 @@  nfp_cpp_bridge_service_func(void *args)
 			if (op == 0)
 				break;
 		}
+
 		close(datafd);
 	}
+
 	close(sockfd);
 
 	return 0;
diff --git a/drivers/net/nfp/nfp_ctrl.h b/drivers/net/nfp/nfp_ctrl.h
index cd0a2f92a8..5cc83ff3e6 100644
--- a/drivers/net/nfp/nfp_ctrl.h
+++ b/drivers/net/nfp/nfp_ctrl.h
@@ -208,8 +208,8 @@  struct nfp_net_fw_ver {
 /*
  * NFP6000/NFP4000 - Prepend configuration
  */
-#define NFP_NET_CFG_RX_OFFSET		0x0050
-#define NFP_NET_CFG_RX_OFFSET_DYNAMIC		0	/* Prepend mode */
+#define NFP_NET_CFG_RX_OFFSET           0x0050
+#define NFP_NET_CFG_RX_OFFSET_DYNAMIC          0    /* Prepend mode */
 
 /* Start anchor of the TLV area */
 #define NFP_NET_CFG_TLV_BASE            0x0058
@@ -442,7 +442,7 @@  struct nfp_net_fw_ver {
 #define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS6    (NFP_MAC_STATS_BASE + 0x1f0)
 #define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS7    (NFP_MAC_STATS_BASE + 0x1f8)
 
-#define NFP_PF_CSR_SLICE_SIZE	(32 * 1024)
+#define NFP_PF_CSR_SLICE_SIZE    (32 * 1024)
 
 /*
  * General use mailbox area (0x1800 - 0x19ff)
diff --git a/drivers/net/nfp/nfp_ethdev.c b/drivers/net/nfp/nfp_ethdev.c
index 1651ac2455..b65c2c1fe0 100644
--- a/drivers/net/nfp/nfp_ethdev.c
+++ b/drivers/net/nfp/nfp_ethdev.c
@@ -36,6 +36,7 @@  nfp_net_pf_read_mac(struct nfp_app_fw_nic *app_fw_nic,
 	rte_ether_addr_copy(&nfp_eth_table->ports[port].mac_addr, &hw->mac_addr);
 
 	free(nfp_eth_table);
+
 	return 0;
 }
 
@@ -73,6 +74,7 @@  nfp_net_start(struct rte_eth_dev *dev)
 					"with NFP multiport PF");
 				return -EINVAL;
 		}
+
 		if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
 			/*
 			 * Better not to share LSC with RX interrupts.
@@ -87,6 +89,7 @@  nfp_net_start(struct rte_eth_dev *dev)
 				return -EIO;
 			}
 		}
+
 		intr_vector = dev->data->nb_rx_queues;
 		if (rte_intr_efd_enable(intr_handle, intr_vector) != 0)
 			return -1;
@@ -198,7 +201,6 @@  nfp_net_stop(struct rte_eth_dev *dev)
 
 	/* Clear queues */
 	nfp_net_stop_tx_queue(dev);
-
 	nfp_net_stop_rx_queue(dev);
 
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
@@ -262,12 +264,10 @@  nfp_net_close(struct rte_eth_dev *dev)
 	 * We assume that the DPDK application is stopping all the
 	 * threads/queues before calling the device close function.
 	 */
-
 	nfp_net_disable_queues(dev);
 
 	/* Clear queues */
 	nfp_net_close_tx_queue(dev);
-
 	nfp_net_close_rx_queue(dev);
 
 	/* Clear ipsec */
@@ -413,35 +413,35 @@  nfp_udp_tunnel_port_del(struct rte_eth_dev *dev,
 
 /* Initialise and register driver with DPDK Application */
 static const struct eth_dev_ops nfp_net_eth_dev_ops = {
-	.dev_configure		= nfp_net_configure,
-	.dev_start		= nfp_net_start,
-	.dev_stop		= nfp_net_stop,
-	.dev_set_link_up	= nfp_net_set_link_up,
-	.dev_set_link_down	= nfp_net_set_link_down,
-	.dev_close		= nfp_net_close,
-	.promiscuous_enable	= nfp_net_promisc_enable,
-	.promiscuous_disable	= nfp_net_promisc_disable,
-	.link_update		= nfp_net_link_update,
-	.stats_get		= nfp_net_stats_get,
-	.stats_reset		= nfp_net_stats_reset,
+	.dev_configure          = nfp_net_configure,
+	.dev_start              = nfp_net_start,
+	.dev_stop               = nfp_net_stop,
+	.dev_set_link_up        = nfp_net_set_link_up,
+	.dev_set_link_down      = nfp_net_set_link_down,
+	.dev_close              = nfp_net_close,
+	.promiscuous_enable     = nfp_net_promisc_enable,
+	.promiscuous_disable    = nfp_net_promisc_disable,
+	.link_update            = nfp_net_link_update,
+	.stats_get              = nfp_net_stats_get,
+	.stats_reset            = nfp_net_stats_reset,
 	.xstats_get             = nfp_net_xstats_get,
 	.xstats_reset           = nfp_net_xstats_reset,
 	.xstats_get_names       = nfp_net_xstats_get_names,
 	.xstats_get_by_id       = nfp_net_xstats_get_by_id,
 	.xstats_get_names_by_id = nfp_net_xstats_get_names_by_id,
-	.dev_infos_get		= nfp_net_infos_get,
+	.dev_infos_get          = nfp_net_infos_get,
 	.dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
-	.mtu_set		= nfp_net_dev_mtu_set,
-	.mac_addr_set		= nfp_net_set_mac_addr,
-	.vlan_offload_set	= nfp_net_vlan_offload_set,
-	.reta_update		= nfp_net_reta_update,
-	.reta_query		= nfp_net_reta_query,
-	.rss_hash_update	= nfp_net_rss_hash_update,
-	.rss_hash_conf_get	= nfp_net_rss_hash_conf_get,
-	.rx_queue_setup		= nfp_net_rx_queue_setup,
-	.rx_queue_release	= nfp_net_rx_queue_release,
-	.tx_queue_setup		= nfp_net_tx_queue_setup,
-	.tx_queue_release	= nfp_net_tx_queue_release,
+	.mtu_set                = nfp_net_dev_mtu_set,
+	.mac_addr_set           = nfp_net_set_mac_addr,
+	.vlan_offload_set       = nfp_net_vlan_offload_set,
+	.reta_update            = nfp_net_reta_update,
+	.reta_query             = nfp_net_reta_query,
+	.rss_hash_update        = nfp_net_rss_hash_update,
+	.rss_hash_conf_get      = nfp_net_rss_hash_conf_get,
+	.rx_queue_setup         = nfp_net_rx_queue_setup,
+	.rx_queue_release       = nfp_net_rx_queue_release,
+	.tx_queue_setup         = nfp_net_tx_queue_setup,
+	.tx_queue_release       = nfp_net_tx_queue_release,
 	.rx_queue_intr_enable   = nfp_rx_queue_intr_enable,
 	.rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
 	.udp_tunnel_port_add    = nfp_udp_tunnel_port_add,
@@ -501,7 +501,6 @@  nfp_net_init(struct rte_eth_dev *eth_dev)
 
 	rte_eth_copy_pci_info(eth_dev, pci_dev);
 
-
 	hw->ctrl_bar = pci_dev->mem_resource[0].addr;
 	if (hw->ctrl_bar == NULL) {
 		PMD_DRV_LOG(ERR, "hw->ctrl_bar is NULL. BAR0 not configured");
@@ -519,10 +518,12 @@  nfp_net_init(struct rte_eth_dev *eth_dev)
 			PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _mac_stats_bar");
 			return -EIO;
 		}
+
 		hw->mac_stats = hw->mac_stats_bar;
 	} else {
 		if (pf_dev->ctrl_bar == NULL)
 			return -ENODEV;
+
 		/* Use port offset in pf ctrl_bar for this ports control bar */
 		hw->ctrl_bar = pf_dev->ctrl_bar + (port * NFP_PF_CSR_SLICE_SIZE);
 		hw->mac_stats = app_fw_nic->ports[0]->mac_stats_bar + (port * NFP_MAC_STATS_SIZE);
@@ -557,7 +558,6 @@  nfp_net_init(struct rte_eth_dev *eth_dev)
 		return -ENOMEM;
 	}
 
-
 	/* Work out where in the BAR the queues start. */
 	tx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
 	rx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
@@ -653,12 +653,12 @@  nfp_fw_upload(struct rte_pci_device *dev,
 			"serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
 			cpp_serial[0], cpp_serial[1], cpp_serial[2], cpp_serial[3],
 			cpp_serial[4], cpp_serial[5], interface >> 8, interface & 0xff);
-
 	snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH, serial);
 
 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
 	if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0)
 		goto load_fw;
+
 	/* Then try the PCI name */
 	snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH,
 			dev->name);
diff --git a/drivers/net/nfp/nfp_ethdev_vf.c b/drivers/net/nfp/nfp_ethdev_vf.c
index c9e72dd953..7096695de6 100644
--- a/drivers/net/nfp/nfp_ethdev_vf.c
+++ b/drivers/net/nfp/nfp_ethdev_vf.c
@@ -63,6 +63,7 @@  nfp_netvf_start(struct rte_eth_dev *dev)
 				return -EIO;
 			}
 		}
+
 		intr_vector = dev->data->nb_rx_queues;
 		if (rte_intr_efd_enable(intr_handle, intr_vector) != 0)
 			return -1;
@@ -172,12 +173,10 @@  nfp_netvf_close(struct rte_eth_dev *dev)
 	 * We assume that the DPDK application is stopping all the
 	 * threads/queues before calling the device close function.
 	 */
-
 	nfp_net_disable_queues(dev);
 
 	/* Clear queues */
 	nfp_net_close_tx_queue(dev);
-
 	nfp_net_close_rx_queue(dev);
 
 	rte_intr_disable(pci_dev->intr_handle);
@@ -194,35 +193,35 @@  nfp_netvf_close(struct rte_eth_dev *dev)
 
 /* Initialise and register VF driver with DPDK Application */
 static const struct eth_dev_ops nfp_netvf_eth_dev_ops = {
-	.dev_configure		= nfp_net_configure,
-	.dev_start		= nfp_netvf_start,
-	.dev_stop		= nfp_netvf_stop,
-	.dev_set_link_up	= nfp_netvf_set_link_up,
-	.dev_set_link_down	= nfp_netvf_set_link_down,
-	.dev_close		= nfp_netvf_close,
-	.promiscuous_enable	= nfp_net_promisc_enable,
-	.promiscuous_disable	= nfp_net_promisc_disable,
-	.link_update		= nfp_net_link_update,
-	.stats_get		= nfp_net_stats_get,
-	.stats_reset		= nfp_net_stats_reset,
+	.dev_configure          = nfp_net_configure,
+	.dev_start              = nfp_netvf_start,
+	.dev_stop               = nfp_netvf_stop,
+	.dev_set_link_up        = nfp_netvf_set_link_up,
+	.dev_set_link_down      = nfp_netvf_set_link_down,
+	.dev_close              = nfp_netvf_close,
+	.promiscuous_enable     = nfp_net_promisc_enable,
+	.promiscuous_disable    = nfp_net_promisc_disable,
+	.link_update            = nfp_net_link_update,
+	.stats_get              = nfp_net_stats_get,
+	.stats_reset            = nfp_net_stats_reset,
 	.xstats_get             = nfp_net_xstats_get,
 	.xstats_reset           = nfp_net_xstats_reset,
 	.xstats_get_names       = nfp_net_xstats_get_names,
 	.xstats_get_by_id       = nfp_net_xstats_get_by_id,
 	.xstats_get_names_by_id = nfp_net_xstats_get_names_by_id,
-	.dev_infos_get		= nfp_net_infos_get,
+	.dev_infos_get          = nfp_net_infos_get,
 	.dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
-	.mtu_set		= nfp_net_dev_mtu_set,
-	.mac_addr_set		= nfp_net_set_mac_addr,
-	.vlan_offload_set	= nfp_net_vlan_offload_set,
-	.reta_update		= nfp_net_reta_update,
-	.reta_query		= nfp_net_reta_query,
-	.rss_hash_update	= nfp_net_rss_hash_update,
-	.rss_hash_conf_get	= nfp_net_rss_hash_conf_get,
-	.rx_queue_setup		= nfp_net_rx_queue_setup,
-	.rx_queue_release	= nfp_net_rx_queue_release,
-	.tx_queue_setup		= nfp_net_tx_queue_setup,
-	.tx_queue_release	= nfp_net_tx_queue_release,
+	.mtu_set                = nfp_net_dev_mtu_set,
+	.mac_addr_set           = nfp_net_set_mac_addr,
+	.vlan_offload_set       = nfp_net_vlan_offload_set,
+	.reta_update            = nfp_net_reta_update,
+	.reta_query             = nfp_net_reta_query,
+	.rss_hash_update        = nfp_net_rss_hash_update,
+	.rss_hash_conf_get      = nfp_net_rss_hash_conf_get,
+	.rx_queue_setup         = nfp_net_rx_queue_setup,
+	.rx_queue_release       = nfp_net_rx_queue_release,
+	.tx_queue_setup         = nfp_net_tx_queue_setup,
+	.tx_queue_release       = nfp_net_tx_queue_release,
 	.rx_queue_intr_enable   = nfp_rx_queue_intr_enable,
 	.rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
 };
diff --git a/drivers/net/nfp/nfp_flow.c b/drivers/net/nfp/nfp_flow.c
index fbcdb3d19e..1bf31146fc 100644
--- a/drivers/net/nfp/nfp_flow.c
+++ b/drivers/net/nfp/nfp_flow.c
@@ -496,6 +496,7 @@  nfp_stats_id_alloc(struct nfp_flow_priv *priv, uint32_t *ctx)
 			priv->stats_ids.init_unallocated--;
 			priv->active_mem_unit = 0;
 		}
+
 		return 0;
 	}
 
@@ -622,6 +623,7 @@  nfp_tun_add_ipv6_off(struct nfp_app_fw_flower *app_fw_flower,
 		PMD_DRV_LOG(ERR, "Mem error when offloading IP6 address.");
 		return -ENOMEM;
 	}
+
 	memcpy(tmp_entry->ipv6_addr, ipv6, sizeof(tmp_entry->ipv6_addr));
 	tmp_entry->ref_count = 1;
 
@@ -1796,7 +1798,7 @@  static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = {
 		.next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_VLAN,
 			RTE_FLOW_ITEM_TYPE_IPV4,
 			RTE_FLOW_ITEM_TYPE_IPV6),
-		.mask_support = &(const struct rte_flow_item_eth){
+		.mask_support = &(const struct rte_flow_item_eth) {
 			.hdr = {
 				.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
 				.src_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
@@ -1811,7 +1813,7 @@  static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = {
 	[RTE_FLOW_ITEM_TYPE_VLAN] = {
 		.next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_IPV4,
 			RTE_FLOW_ITEM_TYPE_IPV6),
-		.mask_support = &(const struct rte_flow_item_vlan){
+		.mask_support = &(const struct rte_flow_item_vlan) {
 			.hdr = {
 				.vlan_tci  = RTE_BE16(0xefff),
 				.eth_proto = RTE_BE16(0xffff),
@@ -1827,7 +1829,7 @@  static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = {
 			RTE_FLOW_ITEM_TYPE_UDP,
 			RTE_FLOW_ITEM_TYPE_SCTP,
 			RTE_FLOW_ITEM_TYPE_GRE),
-		.mask_support = &(const struct rte_flow_item_ipv4){
+		.mask_support = &(const struct rte_flow_item_ipv4) {
 			.hdr = {
 				.type_of_service = 0xff,
 				.fragment_offset = RTE_BE16(0xffff),
@@ -1846,7 +1848,7 @@  static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = {
 			RTE_FLOW_ITEM_TYPE_UDP,
 			RTE_FLOW_ITEM_TYPE_SCTP,
 			RTE_FLOW_ITEM_TYPE_GRE),
-		.mask_support = &(const struct rte_flow_item_ipv6){
+		.mask_support = &(const struct rte_flow_item_ipv6) {
 			.hdr = {
 				.vtc_flow   = RTE_BE32(0x0ff00000),
 				.proto      = 0xff,
@@ -1863,7 +1865,7 @@  static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = {
 		.merge = nfp_flow_merge_ipv6,
 	},
 	[RTE_FLOW_ITEM_TYPE_TCP] = {
-		.mask_support = &(const struct rte_flow_item_tcp){
+		.mask_support = &(const struct rte_flow_item_tcp) {
 			.hdr = {
 				.tcp_flags = 0xff,
 				.src_port  = RTE_BE16(0xffff),
@@ -1877,7 +1879,7 @@  static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = {
 	[RTE_FLOW_ITEM_TYPE_UDP] = {
 		.next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_VXLAN,
 			RTE_FLOW_ITEM_TYPE_GENEVE),
-		.mask_support = &(const struct rte_flow_item_udp){
+		.mask_support = &(const struct rte_flow_item_udp) {
 			.hdr = {
 				.src_port = RTE_BE16(0xffff),
 				.dst_port = RTE_BE16(0xffff),
@@ -1888,7 +1890,7 @@  static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = {
 		.merge = nfp_flow_merge_udp,
 	},
 	[RTE_FLOW_ITEM_TYPE_SCTP] = {
-		.mask_support = &(const struct rte_flow_item_sctp){
+		.mask_support = &(const struct rte_flow_item_sctp) {
 			.hdr = {
 				.src_port  = RTE_BE16(0xffff),
 				.dst_port  = RTE_BE16(0xffff),
@@ -1900,7 +1902,7 @@  static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = {
 	},
 	[RTE_FLOW_ITEM_TYPE_VXLAN] = {
 		.next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH),
-		.mask_support = &(const struct rte_flow_item_vxlan){
+		.mask_support = &(const struct rte_flow_item_vxlan) {
 			.hdr = {
 				.vx_vni = RTE_BE32(0xffffff00),
 			},
@@ -1911,7 +1913,7 @@  static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = {
 	},
 	[RTE_FLOW_ITEM_TYPE_GENEVE] = {
 		.next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH),
-		.mask_support = &(const struct rte_flow_item_geneve){
+		.mask_support = &(const struct rte_flow_item_geneve) {
 			.vni = "\xff\xff\xff",
 		},
 		.mask_default = &rte_flow_item_geneve_mask,
@@ -1920,7 +1922,7 @@  static const struct nfp_flow_item_proc nfp_flow_item_proc_list[] = {
 	},
 	[RTE_FLOW_ITEM_TYPE_GRE] = {
 		.next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_GRE_KEY),
-		.mask_support = &(const struct rte_flow_item_gre){
+		.mask_support = &(const struct rte_flow_item_gre) {
 			.c_rsvd0_ver = RTE_BE16(0xa000),
 			.protocol = RTE_BE16(0xffff),
 		},
@@ -1952,6 +1954,7 @@  nfp_flow_item_check(const struct rte_flow_item *item,
 					" without a corresponding 'spec'.");
 			return -EINVAL;
 		}
+
 		/* No spec, no mask, no problem. */
 		return 0;
 	}
@@ -3031,6 +3034,7 @@  nfp_pre_tun_table_check_add(struct nfp_flower_representor *repr,
 	for (i = 1; i < NFP_TUN_PRE_TUN_RULE_LIMIT; i++) {
 		if (priv->pre_tun_bitmap[i] == 0)
 			continue;
+
 		entry->mac_index = i;
 		find_entry = nfp_pre_tun_table_search(priv, (char *)entry, entry_size);
 		if (find_entry != NULL) {
@@ -3057,6 +3061,7 @@  nfp_pre_tun_table_check_add(struct nfp_flower_representor *repr,
 
 	*index = entry->mac_index;
 	priv->pre_tun_cnt++;
+
 	return 0;
 }
 
@@ -3091,12 +3096,14 @@  nfp_pre_tun_table_check_del(struct nfp_flower_representor *repr,
 	for (i = 1; i < NFP_TUN_PRE_TUN_RULE_LIMIT; i++) {
 		if (priv->pre_tun_bitmap[i] == 0)
 			continue;
+
 		entry->mac_index = i;
 		find_entry = nfp_pre_tun_table_search(priv, (char *)entry, entry_size);
 		if (find_entry != NULL) {
 			find_entry->ref_cnt--;
 			if (find_entry->ref_cnt != 0)
 				goto free_entry;
+
 			priv->pre_tun_bitmap[i] = 0;
 			break;
 		}
diff --git a/drivers/net/nfp/nfp_flow.h b/drivers/net/nfp/nfp_flow.h
index ab38dbe1f4..991629e6ed 100644
--- a/drivers/net/nfp/nfp_flow.h
+++ b/drivers/net/nfp/nfp_flow.h
@@ -126,11 +126,14 @@  struct nfp_ipv6_addr_entry {
 struct nfp_flow_priv {
 	uint32_t hash_seed; /**< Hash seed for hash tables in this structure. */
 	uint64_t flower_version; /**< Flow version, always increase. */
+
 	/* Mask hash table */
 	struct nfp_fl_mask_id mask_ids; /**< Entry for mask hash table */
 	struct rte_hash *mask_table; /**< Hash table to store mask ids. */
+
 	/* Flow hash table */
 	struct rte_hash *flow_table; /**< Hash table to store flow rules. */
+
 	/* Flow stats */
 	uint32_t active_mem_unit; /**< The size of active mem units. */
 	uint32_t total_mem_units; /**< The size of total mem units. */
@@ -138,16 +141,20 @@  struct nfp_flow_priv {
 	struct nfp_fl_stats_id stats_ids; /**< The stats id ring. */
 	struct nfp_fl_stats *stats; /**< Store stats of flow. */
 	rte_spinlock_t stats_lock; /** < Lock the update of 'stats' field. */
+
 	/* Pre tunnel rule */
 	uint16_t pre_tun_cnt; /**< The size of pre tunnel rule */
 	uint8_t pre_tun_bitmap[NFP_TUN_PRE_TUN_RULE_LIMIT]; /**< Bitmap of pre tunnel rule */
 	struct rte_hash *pre_tun_table; /**< Hash table to store pre tunnel rule */
+
 	/* IPv4 off */
 	LIST_HEAD(, nfp_ipv4_addr_entry) ipv4_off_list; /**< Store ipv4 off */
 	rte_spinlock_t ipv4_off_lock; /**< Lock the ipv4 off list */
+
 	/* IPv6 off */
 	LIST_HEAD(, nfp_ipv6_addr_entry) ipv6_off_list; /**< Store ipv6 off */
 	rte_spinlock_t ipv6_off_lock; /**< Lock the ipv6 off list */
+
 	/* Neighbor next */
 	LIST_HEAD(, nfp_fl_tun)nn_list; /**< Store nn entry */
 	/* Conntrack */
diff --git a/drivers/net/nfp/nfp_rxtx.c b/drivers/net/nfp/nfp_rxtx.c
index 9e08e38955..74599747e8 100644
--- a/drivers/net/nfp/nfp_rxtx.c
+++ b/drivers/net/nfp/nfp_rxtx.c
@@ -190,6 +190,7 @@  nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
 		rxd->fld.dd = 0;
 		rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xffff;
 		rxd->fld.dma_addr_lo = dma_addr & 0xffffffff;
+
 		rxe[i].mbuf = mbuf;
 	}
 
@@ -213,6 +214,7 @@  nfp_net_rx_freelist_setup(struct rte_eth_dev *dev)
 		if (nfp_net_rx_fill_freelist(dev->data->rx_queues[i]) != 0)
 			return -1;
 	}
+
 	return 0;
 }
 
@@ -225,7 +227,6 @@  nfp_net_rx_queue_count(void *rx_queue)
 	struct nfp_net_rx_desc *rxds;
 
 	rxq = rx_queue;
-
 	idx = rxq->rd_p;
 
 	/*
@@ -235,7 +236,6 @@  nfp_net_rx_queue_count(void *rx_queue)
 	 * performance. But ideally that should be done in descriptors
 	 * chunks belonging to the same cache line.
 	 */
-
 	while (count < rxq->rx_count) {
 		rxds = &rxq->rxds[idx];
 		if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
@@ -394,6 +394,7 @@  nfp_net_parse_meta_qinq(const struct nfp_meta_parsed *meta,
 
 	if (meta->vlan[0].offload == 0)
 		mb->vlan_tci = rte_cpu_to_le_16(meta->vlan[0].tci);
+
 	mb->vlan_tci_outer = rte_cpu_to_le_16(meta->vlan[1].tci);
 	PMD_RX_LOG(DEBUG, "Received outer vlan TCI is %u inner vlan TCI is %u",
 			mb->vlan_tci_outer, mb->vlan_tci);
@@ -638,7 +639,6 @@  nfp_net_parse_ptype(struct nfp_net_rx_desc *rxds,
  * so looking at the implications of this type of allocation should be studied
  * deeply.
  */
-
 uint16_t
 nfp_net_recv_pkts(void *rx_queue,
 		struct rte_mbuf **rx_pkts,
@@ -896,7 +896,6 @@  nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
 	tz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
 			sizeof(struct nfp_net_rx_desc) * max_rx_desc,
 			NFP_MEMZONE_ALIGN, socket_id);
-
 	if (tz == NULL) {
 		PMD_DRV_LOG(ERR, "Error allocating rx dma");
 		nfp_net_rx_queue_release(dev, queue_idx);
diff --git a/drivers/net/nfp/nfpcore/nfp_resource.h b/drivers/net/nfp/nfpcore/nfp_resource.h
index 18196d273c..f49c99e462 100644
--- a/drivers/net/nfp/nfpcore/nfp_resource.h
+++ b/drivers/net/nfp/nfpcore/nfp_resource.h
@@ -15,7 +15,7 @@ 
 #define NFP_RESOURCE_NFP_HWINFO         "nfp.info"
 
 /* Service Processor */
-#define NFP_RESOURCE_NSP		"nfp.sp"
+#define NFP_RESOURCE_NSP                "nfp.sp"
 
 /* Opaque handle to a NFP Resource */
 struct nfp_resource;