@@ -26,7 +26,7 @@ nfp_pf_repr_enable_queues(struct rte_eth_dev *dev)
{
struct nfp_net_hw *hw;
uint64_t enabled_queues = 0;
- int i;
+ uint16_t i;
struct nfp_flower_representor *repr;
repr = dev->data->dev_private;
@@ -64,10 +64,10 @@ nfp_flower_cmsg_mac_repr_init(struct rte_mbuf *mbuf,
static void
nfp_flower_cmsg_mac_repr_fill(struct rte_mbuf *m,
- unsigned int idx,
- unsigned int nbi,
- unsigned int nbi_port,
- unsigned int phys_port)
+ uint8_t idx,
+ uint32_t nbi,
+ uint32_t nbi_port,
+ uint32_t phys_port)
{
struct nfp_flower_cmsg_mac_repr *msg;
@@ -81,11 +81,11 @@ nfp_flower_cmsg_mac_repr_fill(struct rte_mbuf *m,
int
nfp_flower_cmsg_mac_repr(struct nfp_app_fw_flower *app_fw_flower)
{
- int i;
+ uint8_t i;
uint16_t cnt;
- unsigned int nbi;
- unsigned int nbi_port;
- unsigned int phys_port;
+ uint32_t nbi;
+ uint32_t nbi_port;
+ uint32_t phys_port;
struct rte_mbuf *mbuf;
struct nfp_eth_table *nfp_eth_table;
@@ -227,9 +227,9 @@ nfp_net_nfd3_xmit_pkts_common(void *tx_queue,
uint16_t nb_pkts,
bool repr_flag)
{
- int i;
- int pkt_size;
- int dma_size;
+ uint16_t i;
+ uint32_t pkt_size;
+ uint16_t dma_size;
uint8_t offset;
uint64_t dma_addr;
uint16_t free_descs;
@@ -199,7 +199,7 @@ static int
__nfp_net_reconfig(struct nfp_net_hw *hw,
uint32_t update)
{
- int cnt;
+ uint32_t cnt;
uint32_t new;
struct timespec wait;
@@ -229,7 +229,7 @@ __nfp_net_reconfig(struct nfp_net_hw *hw,
}
if (cnt >= NFP_NET_POLL_TIMEOUT) {
PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after"
- " %dms", update, cnt);
+ " %ums", update, cnt);
return -EIO;
}
nanosleep(&wait, 0); /* waiting for a 1ms */
@@ -466,7 +466,7 @@ nfp_net_enable_queues(struct rte_eth_dev *dev)
{
struct nfp_net_hw *hw;
uint64_t enabled_queues = 0;
- int i;
+ uint16_t i;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -575,7 +575,7 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
struct rte_intr_handle *intr_handle)
{
struct nfp_net_hw *hw;
- int i;
+ uint16_t i;
if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
dev->data->nb_rx_queues) != 0) {
@@ -832,7 +832,7 @@ int
nfp_net_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats)
{
- int i;
+ uint16_t i;
struct nfp_net_hw *hw;
struct rte_eth_stats nfp_dev_stats;
@@ -923,7 +923,7 @@ nfp_net_stats_get(struct rte_eth_dev *dev,
int
nfp_net_stats_reset(struct rte_eth_dev *dev)
{
- int i;
+ uint16_t i;
struct nfp_net_hw *hw;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1398,7 +1398,7 @@ nfp_rx_queue_intr_enable(struct rte_eth_dev *dev,
{
struct rte_pci_device *pci_dev;
struct nfp_net_hw *hw;
- int base = 0;
+ uint16_t base = 0;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
pci_dev = RTE_ETH_DEV_TO_PCI(dev);
@@ -1419,7 +1419,7 @@ nfp_rx_queue_intr_disable(struct rte_eth_dev *dev,
{
struct rte_pci_device *pci_dev;
struct nfp_net_hw *hw;
- int base = 0;
+ uint16_t base = 0;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
pci_dev = RTE_ETH_DEV_TO_PCI(dev);
@@ -1619,9 +1619,10 @@ nfp_net_rss_reta_write(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- uint32_t reta, mask;
- int i, j;
- int idx, shift;
+ uint8_t mask;
+ uint32_t reta;
+ uint16_t i, j;
+ uint16_t idx, shift;
struct nfp_net_hw *hw =
NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1695,8 +1696,9 @@ nfp_net_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- uint8_t i, j, mask;
- int idx, shift;
+ uint16_t i, j;
+ uint8_t mask;
+ uint16_t idx, shift;
uint32_t reta;
struct nfp_net_hw *hw;
@@ -1720,7 +1722,7 @@ nfp_net_reta_query(struct rte_eth_dev *dev,
/* Handling 4 RSS entries per loop */
idx = i / RTE_ETH_RETA_GROUP_SIZE;
shift = i % RTE_ETH_RETA_GROUP_SIZE;
- mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
+ mask = (reta_conf[idx].mask >> shift) & 0xF;
if (mask == 0)
continue;
@@ -1744,7 +1746,7 @@ nfp_net_rss_hash_write(struct rte_eth_dev *dev,
uint64_t rss_hf;
uint32_t cfg_rss_ctrl = 0;
uint8_t key;
- int i;
+ uint8_t i;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1835,7 +1837,7 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
uint64_t rss_hf;
uint32_t cfg_rss_ctrl;
uint8_t key;
- int i;
+ uint8_t i;
struct nfp_net_hw *hw;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1893,7 +1895,8 @@ nfp_net_rss_config_default(struct rte_eth_dev *dev)
struct rte_eth_rss_reta_entry64 nfp_reta_conf[2];
uint16_t rx_queues = dev->data->nb_rx_queues;
uint16_t queue;
- int i, j, ret;
+ uint8_t i, j;
+ int ret;
PMD_DRV_LOG(INFO, "setting default RSS conf for %u queues",
rx_queues);
@@ -245,14 +245,14 @@ nn_writeq(uint64_t val,
*/
static inline uint8_t
nn_cfg_readb(struct nfp_net_hw *hw,
- int off)
+ uint32_t off)
{
return nn_readb(hw->ctrl_bar + off);
}
static inline void
nn_cfg_writeb(struct nfp_net_hw *hw,
- int off,
+ uint32_t off,
uint8_t val)
{
nn_writeb(val, hw->ctrl_bar + off);
@@ -260,14 +260,14 @@ nn_cfg_writeb(struct nfp_net_hw *hw,
static inline uint16_t
nn_cfg_readw(struct nfp_net_hw *hw,
- int off)
+ uint32_t off)
{
return rte_le_to_cpu_16(nn_readw(hw->ctrl_bar + off));
}
static inline void
nn_cfg_writew(struct nfp_net_hw *hw,
- int off,
+ uint32_t off,
uint16_t val)
{
nn_writew(rte_cpu_to_le_16(val), hw->ctrl_bar + off);
@@ -275,14 +275,14 @@ nn_cfg_writew(struct nfp_net_hw *hw,
static inline uint32_t
nn_cfg_readl(struct nfp_net_hw *hw,
- int off)
+ uint32_t off)
{
return rte_le_to_cpu_32(nn_readl(hw->ctrl_bar + off));
}
static inline void
nn_cfg_writel(struct nfp_net_hw *hw,
- int off,
+ uint32_t off,
uint32_t val)
{
nn_writel(rte_cpu_to_le_32(val), hw->ctrl_bar + off);
@@ -290,14 +290,14 @@ nn_cfg_writel(struct nfp_net_hw *hw,
static inline uint64_t
nn_cfg_readq(struct nfp_net_hw *hw,
- int off)
+ uint32_t off)
{
return rte_le_to_cpu_64(nn_readq(hw->ctrl_bar + off));
}
static inline void
nn_cfg_writeq(struct nfp_net_hw *hw,
- int off,
+ uint32_t off,
uint64_t val)
{
nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off);
@@ -23,7 +23,7 @@
static int
nfp_net_pf_read_mac(struct nfp_app_fw_nic *app_fw_nic,
- int port)
+ uint16_t port)
{
struct nfp_eth_table *nfp_eth_table;
struct nfp_net_hw *hw = NULL;
@@ -255,7 +255,7 @@ nfp_net_close(struct rte_eth_dev *dev)
struct rte_pci_device *pci_dev;
struct nfp_pf_dev *pf_dev;
struct nfp_app_fw_nic *app_fw_nic;
- int i;
+ uint8_t i;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
@@ -487,7 +487,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
struct rte_ether_addr *tmp_ether_addr;
uint64_t rx_base;
uint64_t tx_base;
- int port = 0;
+ uint16_t port = 0;
int err;
PMD_INIT_FUNC_TRACE();
@@ -501,7 +501,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx;
- if (port < 0 || port > 7) {
+ if (port > 7) {
PMD_DRV_LOG(ERR, "Port value is wrong");
return -ENODEV;
}
@@ -761,10 +761,10 @@ static int
nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev,
const struct nfp_dev_info *dev_info)
{
- int i;
+ uint8_t i;
int ret;
int err = 0;
- int total_vnics;
+ uint32_t total_vnics;
struct nfp_net_hw *hw;
unsigned int numa_node;
struct rte_eth_dev *eth_dev;
@@ -785,7 +785,7 @@ nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev,
/* Read the number of vNIC's created for the PF */
total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, "nfd_cfg_pf0_num_ports", &err);
- if (err != 0 || total_vnics <= 0 || total_vnics > 8) {
+ if (err != 0 || total_vnics == 0 || total_vnics > 8) {
PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value");
ret = -ENODEV;
goto app_cleanup;
@@ -795,7 +795,7 @@ nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev,
* For coreNIC the number of vNICs exposed should be the same as the
* number of physical ports
*/
- if (total_vnics != (int)nfp_eth_table->count) {
+ if (total_vnics != nfp_eth_table->count) {
PMD_INIT_LOG(ERR, "Total physical ports do not match number of vNICs");
ret = -ENODEV;
goto app_cleanup;
@@ -1053,15 +1053,15 @@ nfp_secondary_init_app_fw_nic(struct rte_pci_device *pci_dev,
struct nfp_rtsym_table *sym_tbl,
struct nfp_cpp *cpp)
{
- int i;
+ uint32_t i;
int err = 0;
int ret = 0;
- int total_vnics;
+ uint32_t total_vnics;
struct nfp_net_hw *hw;
/* Read the number of vNIC's created for the PF */
total_vnics = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err);
- if (err != 0 || total_vnics <= 0 || total_vnics > 8) {
+ if (err != 0 || total_vnics == 0 || total_vnics > 8) {
PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value");
return -ENODEV;
}
@@ -1069,7 +1069,7 @@ nfp_secondary_init_app_fw_nic(struct rte_pci_device *pci_dev,
for (i = 0; i < total_vnics; i++) {
struct rte_eth_dev *eth_dev;
char port_name[RTE_ETH_NAME_MAX_LEN];
- snprintf(port_name, sizeof(port_name), "%s_port%d",
+ snprintf(port_name, sizeof(port_name), "%s_port%u",
pci_dev->device.name, i);
PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s", port_name);
@@ -260,7 +260,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
uint64_t tx_bar_off = 0, rx_bar_off = 0;
uint32_t start_q;
- int port = 0;
+ uint16_t port = 0;
int err;
const struct nfp_dev_info *dev_info;
@@ -121,7 +121,7 @@ struct nfp_flow_item_proc {
/* Bit-mask to use when @p item->mask is not provided. */
const void *mask_default;
/* Size in bytes for @p mask_support and @p mask_default. */
- const unsigned int mask_sz;
+ const size_t mask_sz;
/* Merge a pattern item into a flow rule handle. */
int (*merge)(struct nfp_app_fw_flower *app_fw_flower,
struct rte_flow *nfp_flow,
@@ -1941,8 +1941,8 @@ static int
nfp_flow_item_check(const struct rte_flow_item *item,
const struct nfp_flow_item_proc *proc)
{
+ size_t i;
int ret = 0;
- unsigned int i;
const uint8_t *mask;
/* item->last and item->mask cannot exist without item->spec. */
@@ -2037,7 +2037,7 @@ nfp_flow_compile_item_proc(struct nfp_flower_representor *repr,
char **mbuf_off_mask,
bool is_outer_layer)
{
- int i;
+ uint32_t i;
int ret = 0;
bool continue_flag = true;
const struct rte_flow_item *item;
@@ -2271,7 +2271,7 @@ nfp_flow_action_set_ipv6(char *act_data,
const struct rte_flow_action *action,
bool ip_src_flag)
{
- int i;
+ uint32_t i;
rte_be32_t tmp;
size_t act_size;
struct nfp_fl_act_set_ipv6_addr *set_ip;
@@ -190,7 +190,7 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
{
struct nfp_net_dp_buf *rxe = rxq->rxbufs;
uint64_t dma_addr;
- unsigned int i;
+ uint16_t i;
PMD_RX_LOG(DEBUG, "Fill Rx Freelist for %u descriptors",
rxq->rx_count);
@@ -229,7 +229,7 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
int
nfp_net_rx_freelist_setup(struct rte_eth_dev *dev)
{
- int i;
+ uint16_t i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
if (nfp_net_rx_fill_freelist(dev->data->rx_queues[i]) != 0)
@@ -840,7 +840,7 @@ nfp_net_recv_pkts(void *rx_queue,
static void
nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq)
{
- unsigned int i;
+ uint16_t i;
if (rxq->rxbufs == NULL)
return;
@@ -992,11 +992,11 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
* @txq: TX queue to work with
* Returns number of descriptors freed
*/
-int
+uint32_t
nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
{
uint32_t qcp_rd_p;
- int todo;
+ uint32_t todo;
PMD_TX_LOG(DEBUG, "queue %hu. Check for descriptor with a complete"
" status", txq->qidx);
@@ -1032,7 +1032,7 @@ nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
static void
nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
{
- unsigned int i;
+ uint32_t i;
if (txq->txbufs == NULL)
return;
@@ -253,7 +253,7 @@ int nfp_net_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t nb_desc,
unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
-int nfp_net_tx_free_bufs(struct nfp_net_txq *txq);
+uint32_t nfp_net_tx_free_bufs(struct nfp_net_txq *txq);
void nfp_net_set_meta_vlan(struct nfp_net_meta_raw *meta_data,
struct rte_mbuf *pkt,
uint8_t layer);