[v3,10/11] net/nfp: adjust logic to make it more readable

Message ID 20231013060653.1006410-11-chaoyong.he@corigine.com (mailing list archive)
State Accepted, archived
Delegated to: Ferruh Yigit
Headers
Series Unify the PMD coding style |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Chaoyong He Oct. 13, 2023, 6:06 a.m. UTC
  Adjust some logic to make it easier to understand.

Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
Reviewed-by: Long Wu <long.wu@corigine.com>
Reviewed-by: Peng Zhang <peng.zhang@corigine.com>
---
 drivers/net/nfp/nfp_common.c     | 87 +++++++++++++++++---------------
 drivers/net/nfp/nfp_cpp_bridge.c |  5 +-
 drivers/net/nfp/nfp_ctrl.h       |  2 -
 drivers/net/nfp/nfp_ethdev.c     | 23 ++++-----
 drivers/net/nfp/nfp_ethdev_vf.c  | 15 +++---
 drivers/net/nfp/nfp_rxtx.c       |  2 +-
 6 files changed, 63 insertions(+), 71 deletions(-)
  

Patch

diff --git a/drivers/net/nfp/nfp_common.c b/drivers/net/nfp/nfp_common.c
index a102c6f272..2d834b29d9 100644
--- a/drivers/net/nfp/nfp_common.c
+++ b/drivers/net/nfp/nfp_common.c
@@ -453,7 +453,7 @@  nfp_net_log_device_information(const struct nfp_net_hw *hw)
 }
 
 static inline void
-nfp_net_enbable_rxvlan_cap(struct nfp_net_hw *hw,
+nfp_net_enable_rxvlan_cap(struct nfp_net_hw *hw,
 		uint32_t *ctrl)
 {
 	if ((hw->cap & NFP_NET_CFG_CTRL_RXVLAN_V2) != 0)
@@ -467,19 +467,19 @@  nfp_net_enable_queues(struct rte_eth_dev *dev)
 {
 	uint16_t i;
 	struct nfp_net_hw *hw;
-	uint64_t enabled_queues = 0;
+	uint64_t enabled_queues;
 
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	/* Enabling the required TX queues in the device */
+	enabled_queues = 0;
 	for (i = 0; i < dev->data->nb_tx_queues; i++)
 		enabled_queues |= (1 << i);
 
 	nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, enabled_queues);
 
-	enabled_queues = 0;
-
 	/* Enabling the required RX queues in the device */
+	enabled_queues = 0;
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
 		enabled_queues |= (1 << i);
 
@@ -619,33 +619,33 @@  uint32_t
 nfp_check_offloads(struct rte_eth_dev *dev)
 {
 	uint32_t ctrl = 0;
+	uint64_t rx_offload;
+	uint64_t tx_offload;
 	struct nfp_net_hw *hw;
 	struct rte_eth_conf *dev_conf;
-	struct rte_eth_rxmode *rxmode;
-	struct rte_eth_txmode *txmode;
 
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	dev_conf = &dev->data->dev_conf;
-	rxmode = &dev_conf->rxmode;
-	txmode = &dev_conf->txmode;
+	rx_offload = dev_conf->rxmode.offloads;
+	tx_offload = dev_conf->txmode.offloads;
 
-	if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) != 0) {
+	if ((rx_offload & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) != 0) {
 		if ((hw->cap & NFP_NET_CFG_CTRL_RXCSUM) != 0)
 			ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
 	}
 
-	if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) != 0)
-		nfp_net_enbable_rxvlan_cap(hw, &ctrl);
+	if ((rx_offload & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) != 0)
+		nfp_net_enable_rxvlan_cap(hw, &ctrl);
 
-	if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) != 0) {
+	if ((rx_offload & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) != 0) {
 		if ((hw->cap & NFP_NET_CFG_CTRL_RXQINQ) != 0)
 			ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
 	}
 
 	hw->mtu = dev->data->mtu;
 
-	if ((txmode->offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) != 0) {
+	if ((tx_offload & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) != 0) {
 		if ((hw->cap & NFP_NET_CFG_CTRL_TXVLAN_V2) != 0)
 			ctrl |= NFP_NET_CFG_CTRL_TXVLAN_V2;
 		else if ((hw->cap & NFP_NET_CFG_CTRL_TXVLAN) != 0)
@@ -661,14 +661,14 @@  nfp_check_offloads(struct rte_eth_dev *dev)
 		ctrl |= NFP_NET_CFG_CTRL_L2MC;
 
 	/* TX checksum offload */
-	if ((txmode->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) != 0 ||
-			(txmode->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) != 0 ||
-			(txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) != 0)
+	if ((tx_offload & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) != 0 ||
+			(tx_offload & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) != 0 ||
+			(tx_offload & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) != 0)
 		ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
 
 	/* LSO offload */
-	if ((txmode->offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) != 0 ||
-			(txmode->offloads & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO) != 0) {
+	if ((tx_offload & RTE_ETH_TX_OFFLOAD_TCP_TSO) != 0 ||
+			(tx_offload & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO) != 0) {
 		if ((hw->cap & NFP_NET_CFG_CTRL_LSO) != 0)
 			ctrl |= NFP_NET_CFG_CTRL_LSO;
 		else
@@ -676,7 +676,7 @@  nfp_check_offloads(struct rte_eth_dev *dev)
 	}
 
 	/* RX gather */
-	if ((txmode->offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) != 0)
+	if ((tx_offload & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) != 0)
 		ctrl |= NFP_NET_CFG_CTRL_GATHER;
 
 	return ctrl;
@@ -766,11 +766,10 @@  nfp_net_link_update(struct rte_eth_dev *dev,
 
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	/* Read link status */
-	nn_link_status = nn_cfg_readw(hw, NFP_NET_CFG_STS);
-
 	memset(&link, 0, sizeof(struct rte_eth_link));
 
+	/* Read link status */
+	nn_link_status = nn_cfg_readw(hw, NFP_NET_CFG_STS);
 	if ((nn_link_status & NFP_NET_CFG_STS_LINK) != 0)
 		link.link_status = RTE_ETH_LINK_UP;
 
@@ -828,6 +827,9 @@  nfp_net_stats_get(struct rte_eth_dev *dev,
 	struct nfp_net_hw *hw;
 	struct rte_eth_stats nfp_dev_stats;
 
+	if (stats == NULL)
+		return -EINVAL;
+
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
@@ -892,11 +894,8 @@  nfp_net_stats_get(struct rte_eth_dev *dev,
 			nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
 	nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
 
-	if (stats != NULL) {
-		memcpy(stats, &nfp_dev_stats, sizeof(*stats));
-		return 0;
-	}
-	return -EINVAL;
+	memcpy(stats, &nfp_dev_stats, sizeof(*stats));
+	return 0;
 }
 
 /*
@@ -1379,13 +1378,14 @@  nfp_rx_queue_intr_enable(struct rte_eth_dev *dev,
 	struct nfp_net_hw *hw;
 	struct rte_pci_device *pci_dev;
 
-	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	if (rte_intr_type_get(pci_dev->intr_handle) != RTE_INTR_HANDLE_UIO)
 		base = 1;
 
 	/* Make sure all updates are written before un-masking */
 	rte_wmb();
+
+	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id),
 			NFP_NET_CFG_ICR_UNMASKED);
 	return 0;
@@ -1399,14 +1399,16 @@  nfp_rx_queue_intr_disable(struct rte_eth_dev *dev,
 	struct nfp_net_hw *hw;
 	struct rte_pci_device *pci_dev;
 
-	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	if (rte_intr_type_get(pci_dev->intr_handle) != RTE_INTR_HANDLE_UIO)
 		base = 1;
 
 	/* Make sure all updates are written before un-masking */
 	rte_wmb();
-	nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), 0x1);
+
+	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), NFP_NET_CFG_ICR_RXTX);
+
 	return 0;
 }
 
@@ -1445,13 +1447,13 @@  nfp_net_irq_unmask(struct rte_eth_dev *dev)
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 
+	/* Make sure all updates are written before un-masking */
+	rte_wmb();
+
 	if ((hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) != 0) {
 		/* If MSI-X auto-masking is used, clear the entry */
-		rte_wmb();
 		rte_intr_ack(pci_dev->intr_handle);
 	} else {
-		/* Make sure all updates are written before un-masking */
-		rte_wmb();
 		nn_cfg_writeb(hw, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
 				NFP_NET_CFG_ICR_UNMASKED);
 	}
@@ -1548,19 +1550,18 @@  nfp_net_vlan_offload_set(struct rte_eth_dev *dev,
 	int ret;
 	uint32_t update;
 	uint32_t new_ctrl;
+	uint64_t rx_offload;
 	struct nfp_net_hw *hw;
 	uint32_t rxvlan_ctrl = 0;
-	struct rte_eth_conf *dev_conf;
 
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	dev_conf = &dev->data->dev_conf;
+	rx_offload = dev->data->dev_conf.rxmode.offloads;
 	new_ctrl = hw->ctrl;
 
-	nfp_net_enbable_rxvlan_cap(hw, &rxvlan_ctrl);
-
 	/* VLAN stripping setting */
 	if ((mask & RTE_ETH_VLAN_STRIP_MASK) != 0) {
-		if ((dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) != 0)
+		nfp_net_enable_rxvlan_cap(hw, &rxvlan_ctrl);
+		if ((rx_offload & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) != 0)
 			new_ctrl |= rxvlan_ctrl;
 		else
 			new_ctrl &= ~rxvlan_ctrl;
@@ -1568,7 +1569,7 @@  nfp_net_vlan_offload_set(struct rte_eth_dev *dev,
 
 	/* QinQ stripping setting */
 	if ((mask & RTE_ETH_QINQ_STRIP_MASK) != 0) {
-		if ((dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) != 0)
+		if ((rx_offload & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) != 0)
 			new_ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
 		else
 			new_ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
@@ -1580,10 +1581,12 @@  nfp_net_vlan_offload_set(struct rte_eth_dev *dev,
 	update = NFP_NET_CFG_UPDATE_GEN;
 
 	ret = nfp_net_reconfig(hw, new_ctrl, update);
-	if (ret == 0)
-		hw->ctrl = new_ctrl;
+	if (ret != 0)
+		return ret;
 
-	return ret;
+	hw->ctrl = new_ctrl;
+
+	return 0;
 }
 
 static int
diff --git a/drivers/net/nfp/nfp_cpp_bridge.c b/drivers/net/nfp/nfp_cpp_bridge.c
index bb2a6fdcda..36dcdca9de 100644
--- a/drivers/net/nfp/nfp_cpp_bridge.c
+++ b/drivers/net/nfp/nfp_cpp_bridge.c
@@ -22,9 +22,6 @@ 
 #define NFP_IOCTL_CPP_IDENTIFICATION _IOW(NFP_IOCTL, 0x8f, uint32_t)
 
 /* Prototypes */
-static int nfp_cpp_bridge_serve_write(int sockfd, struct nfp_cpp *cpp);
-static int nfp_cpp_bridge_serve_read(int sockfd, struct nfp_cpp *cpp);
-static int nfp_cpp_bridge_serve_ioctl(int sockfd, struct nfp_cpp *cpp);
 static int nfp_cpp_bridge_service_func(void *args);
 
 int
@@ -438,7 +435,7 @@  nfp_cpp_bridge_service_func(void *args)
 			return -EIO;
 		}
 
-		while (1) {
+		for (;;) {
 			ret = recv(datafd, &op, 4, 0);
 			if (ret <= 0) {
 				PMD_CPP_LOG(DEBUG, "%s: socket close", __func__);
diff --git a/drivers/net/nfp/nfp_ctrl.h b/drivers/net/nfp/nfp_ctrl.h
index 5c2065a537..9ec51e0a25 100644
--- a/drivers/net/nfp/nfp_ctrl.h
+++ b/drivers/net/nfp/nfp_ctrl.h
@@ -442,8 +442,6 @@  struct nfp_net_fw_ver {
 #define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS6    (NFP_MAC_STATS_BASE + 0x1f0)
 #define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS7    (NFP_MAC_STATS_BASE + 0x1f8)
 
-#define NFP_PF_CSR_SLICE_SIZE    (32 * 1024)
-
 /*
  * General use mailbox area (0x1800 - 0x19ff)
  * 4B used for update command and 4B return code followed by
diff --git a/drivers/net/nfp/nfp_ethdev.c b/drivers/net/nfp/nfp_ethdev.c
index b65c2c1fe0..c550c12e01 100644
--- a/drivers/net/nfp/nfp_ethdev.c
+++ b/drivers/net/nfp/nfp_ethdev.c
@@ -80,7 +80,7 @@  nfp_net_start(struct rte_eth_dev *dev)
 			 * Better not to share LSC with RX interrupts.
 			 * Unregistering LSC interrupt handler.
 			 */
-			rte_intr_callback_unregister(pci_dev->intr_handle,
+			rte_intr_callback_unregister(intr_handle,
 					nfp_net_dev_interrupt_handler, (void *)dev);
 
 			if (dev->data->nb_rx_queues > 1) {
@@ -525,7 +525,7 @@  nfp_net_init(struct rte_eth_dev *eth_dev)
 			return -ENODEV;
 
 		/* Use port offset in pf ctrl_bar for this ports control bar */
-		hw->ctrl_bar = pf_dev->ctrl_bar + (port * NFP_PF_CSR_SLICE_SIZE);
+		hw->ctrl_bar = pf_dev->ctrl_bar + (port * NFP_NET_CFG_BAR_SZ);
 		hw->mac_stats = app_fw_nic->ports[0]->mac_stats_bar + (port * NFP_MAC_STATS_SIZE);
 	}
 
@@ -743,8 +743,7 @@  nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev,
 		const struct nfp_dev_info *dev_info)
 {
 	uint8_t i;
-	int ret;
-	int err = 0;
+	int ret = 0;
 	uint32_t total_vnics;
 	struct nfp_net_hw *hw;
 	unsigned int numa_node;
@@ -765,8 +764,8 @@  nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev,
 	pf_dev->app_fw_priv = app_fw_nic;
 
 	/* Read the number of vNIC's created for the PF */
-	total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, "nfd_cfg_pf0_num_ports", &err);
-	if (err != 0 || total_vnics == 0 || total_vnics > 8) {
+	total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, "nfd_cfg_pf0_num_ports", &ret);
+	if (ret != 0 || total_vnics == 0 || total_vnics > 8) {
 		PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value");
 		ret = -ENODEV;
 		goto app_cleanup;
@@ -874,8 +873,7 @@  nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev,
 static int
 nfp_pf_init(struct rte_pci_device *pci_dev)
 {
-	int ret;
-	int err = 0;
+	int ret = 0;
 	uint64_t addr;
 	uint32_t cpp_id;
 	struct nfp_cpp *cpp;
@@ -943,8 +941,8 @@  nfp_pf_init(struct rte_pci_device *pci_dev)
 	}
 
 	/* Read the app ID of the firmware loaded */
-	app_fw_id = nfp_rtsym_read_le(sym_tbl, "_pf0_net_app_id", &err);
-	if (err != 0) {
+	app_fw_id = nfp_rtsym_read_le(sym_tbl, "_pf0_net_app_id", &ret);
+	if (ret != 0) {
 		PMD_INIT_LOG(ERR, "Couldn't read app_fw_id from fw");
 		ret = -EIO;
 		goto sym_tbl_cleanup;
@@ -1080,7 +1078,6 @@  nfp_secondary_init_app_fw_nic(struct rte_pci_device *pci_dev,
 static int
 nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
 {
-	int err = 0;
 	int ret = 0;
 	struct nfp_cpp *cpp;
 	enum nfp_app_fw_id app_fw_id;
@@ -1124,8 +1121,8 @@  nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
 	}
 
 	/* Read the app ID of the firmware loaded */
-	app_fw_id = nfp_rtsym_read_le(sym_tbl, "_pf0_net_app_id", &err);
-	if (err != 0) {
+	app_fw_id = nfp_rtsym_read_le(sym_tbl, "_pf0_net_app_id", &ret);
+	if (ret != 0) {
 		PMD_INIT_LOG(ERR, "Couldn't read app_fw_id from fw");
 		goto sym_tbl_cleanup;
 	}
diff --git a/drivers/net/nfp/nfp_ethdev_vf.c b/drivers/net/nfp/nfp_ethdev_vf.c
index 7fb7b3efc5..ac6e67efc6 100644
--- a/drivers/net/nfp/nfp_ethdev_vf.c
+++ b/drivers/net/nfp/nfp_ethdev_vf.c
@@ -39,8 +39,6 @@  nfp_netvf_start(struct rte_eth_dev *dev)
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
 
-	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
 	/* Disabling queues just in case... */
 	nfp_net_disable_queues(dev);
 
@@ -54,7 +52,7 @@  nfp_netvf_start(struct rte_eth_dev *dev)
 			 * Better not to share LSC with RX interrupts.
 			 * Unregistering LSC interrupt handler.
 			 */
-			rte_intr_callback_unregister(pci_dev->intr_handle,
+			rte_intr_callback_unregister(intr_handle,
 					nfp_net_dev_interrupt_handler, (void *)dev);
 
 			if (dev->data->nb_rx_queues > 1) {
@@ -77,6 +75,7 @@  nfp_netvf_start(struct rte_eth_dev *dev)
 	new_ctrl = nfp_check_offloads(dev);
 
 	/* Writing configuration parameters in the device */
+	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	nfp_net_params_setup(hw);
 
 	dev_conf = &dev->data->dev_conf;
@@ -244,15 +243,15 @@  static int
 nfp_netvf_init(struct rte_eth_dev *eth_dev)
 {
 	int err;
+	uint16_t port;
 	uint32_t start_q;
-	uint16_t port = 0;
 	struct nfp_net_hw *hw;
 	uint64_t tx_bar_off = 0;
 	uint64_t rx_bar_off = 0;
 	struct rte_pci_device *pci_dev;
 	const struct nfp_dev_info *dev_info;
-	struct rte_ether_addr *tmp_ether_addr;
 
+	port = eth_dev->data->port_id;
 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
 
 	dev_info = nfp_dev_info_get(pci_dev->id.device_id);
@@ -325,9 +324,7 @@  nfp_netvf_init(struct rte_eth_dev *eth_dev)
 	}
 
 	nfp_netvf_read_mac(hw);
-
-	tmp_ether_addr = &hw->mac_addr;
-	if (rte_is_valid_assigned_ether_addr(tmp_ether_addr) == 0) {
+	if (rte_is_valid_assigned_ether_addr(&hw->mac_addr) == 0) {
 		PMD_INIT_LOG(INFO, "Using random mac address for port %hu", port);
 		/* Using random mac addresses for VFs */
 		rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]);
@@ -344,7 +341,7 @@  nfp_netvf_init(struct rte_eth_dev *eth_dev)
 
 	PMD_INIT_LOG(INFO, "port %hu VendorID=%#x DeviceID=%#x "
 			"mac=" RTE_ETHER_ADDR_PRT_FMT,
-			eth_dev->data->port_id, pci_dev->id.vendor_id,
+			port, pci_dev->id.vendor_id,
 			pci_dev->id.device_id,
 			RTE_ETHER_ADDR_BYTES(&hw->mac_addr));
 
diff --git a/drivers/net/nfp/nfp_rxtx.c b/drivers/net/nfp/nfp_rxtx.c
index 74599747e8..efdca7fccf 100644
--- a/drivers/net/nfp/nfp_rxtx.c
+++ b/drivers/net/nfp/nfp_rxtx.c
@@ -284,7 +284,7 @@  nfp_net_parse_chained_meta(uint8_t *meta_base,
 			meta->vlan[meta->vlan_layer].tci =
 					vlan_info & NFP_NET_META_VLAN_MASK;
 			meta->vlan[meta->vlan_layer].tpid = NFP_NET_META_TPID(vlan_info);
-			++meta->vlan_layer;
+			meta->vlan_layer++;
 			break;
 		case NFP_NET_META_IPSEC:
 			meta->sa_idx = rte_be_to_cpu_32(*(rte_be32_t *)meta_offset);