@@ -407,13 +407,13 @@ eth_ark_rx_queue_drain(struct ark_rx_queue *queue)
}
}
-uint32_t
+int
eth_ark_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct ark_rx_queue *queue;
queue = dev->data->rx_queues[queue_id];
- return (queue->prod_index - queue->cons_index); /* mod arith */
+ return (int)(queue->prod_index - queue->cons_index); /* mod arith */
}
/* ************************************************************************* */
@@ -47,8 +47,7 @@ int eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp);
-uint32_t eth_ark_dev_rx_queue_count(struct rte_eth_dev *dev,
- uint16_t rx_queue_id);
+int eth_ark_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int eth_ark_rx_stop_queue(struct rte_eth_dev *dev, uint16_t queue_id);
int eth_ark_rx_start_queue(struct rte_eth_dev *dev, uint16_t queue_id);
uint16_t eth_ark_recv_pkts_noop(void *rx_queue, struct rte_mbuf **rx_pkts,
@@ -1836,13 +1836,13 @@ avf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
}
/* Get the number of used descriptors of a rx queue */
-uint32_t
+int
avf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
{
#define AVF_RXQ_SCAN_INTERVAL 4
volatile union avf_rx_desc *rxdp;
struct avf_rx_queue *rxq;
- uint16_t desc = 0;
+ int desc = 0;
rxq = dev->data->rx_queues[queue_id];
rxdp = &rxq->rx_ring[rxq->rx_tail];
@@ -185,7 +185,7 @@ void avf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
void avf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
-uint32_t avf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id);
+int avf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id);
int avf_dev_rx_desc_status(void *rx_queue, uint16_t offset);
int avf_dev_tx_desc_status(void *tx_queue, uint16_t offset);
@@ -1619,15 +1619,16 @@ bnxt_dev_led_off_op(struct rte_eth_dev *dev)
return bnxt_hwrm_port_led_cfg(bp, false);
}
-static uint32_t
+static int
bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- uint32_t desc = 0, raw_cons = 0, cons;
struct bnxt_cp_ring_info *cpr;
+ uint32_t raw_cons = 0, cons;
struct bnxt_rx_queue *rxq;
struct rx_pkt_cmpl *rxcmp;
uint16_t cmp_type;
uint8_t cmp = 1;
+ int desc = 0;
bool valid;
rxq = dev->data->rx_queues[rx_queue_id];
@@ -738,7 +738,7 @@ static void dpaa_eth_tx_queue_release(void *txq __rte_unused)
PMD_INIT_FUNC_TRACE();
}
-static uint32_t
+static int
dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct dpaa_if *dpaa_intf = dev->data->dev_private;
@@ -751,7 +751,7 @@ dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
RTE_LOG(DEBUG, PMD, "RX frame count for q(%d) is %u\n",
rx_queue_id, frm_cnt);
}
- return frm_cnt;
+ return (int)frm_cnt;
}
static int dpaa_link_down(struct rte_eth_dev *dev)
@@ -620,7 +620,7 @@ dpaa2_dev_tx_queue_release(void *q __rte_unused)
PMD_INIT_FUNC_TRACE();
}
-static uint32_t
+static int
dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
int32_t ret;
@@ -628,7 +628,7 @@ dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct dpaa2_queue *dpaa2_q;
struct qbman_swp *swp;
struct qbman_fq_query_np_rslt state;
- uint32_t frame_cnt = 0;
+ int frame_cnt = 0;
PMD_INIT_FUNC_TRACE();
@@ -644,7 +644,7 @@ dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
- frame_cnt = qbman_fq_state_frame_count(&state);
+ frame_cnt = (int)qbman_fq_state_frame_count(&state);
DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u",
rx_queue_id, frame_cnt);
}
@@ -370,8 +370,7 @@ int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool);
-uint32_t eth_igb_rx_queue_count(struct rte_eth_dev *dev,
- uint16_t rx_queue_id);
+int eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset);
@@ -447,8 +446,7 @@ int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool);
-uint32_t eth_em_rx_queue_count(struct rte_eth_dev *dev,
- uint16_t rx_queue_id);
+int eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset);
@@ -1528,7 +1528,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
return 0;
}
-uint32_t
+int
eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
#define EM_RXQ_SCAN_INTERVAL 4
@@ -1548,7 +1548,7 @@ eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
desc - rxq->nb_rx_desc]);
}
- return desc;
+ return (int)desc;
}
int
@@ -1808,7 +1808,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
return 0;
}
-uint32_t
+int
eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
#define IGB_RXQ_SCAN_INTERVAL 4
@@ -1828,7 +1828,7 @@ eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
desc - rxq->nb_rx_desc]);
}
- return desc;
+ return (int)desc;
}
int
@@ -269,11 +269,10 @@ static void enicpmd_dev_rx_queue_release(void *rxq)
enic_free_rq(rxq);
}
-static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev,
- uint16_t rx_queue_id)
+static int enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id)
{
struct enic *enic = pmd_priv(dev);
- uint32_t queue_count = 0;
struct vnic_cq *cq;
uint32_t cq_tail;
uint16_t cq_idx;
@@ -288,9 +287,7 @@ static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev,
if (cq_tail < cq_idx)
cq_tail += cq->ring.desc_count;
- queue_count = cq_tail - cq_idx;
-
- return queue_count;
+ return (int)(cq_tail - cq_idx);
}
static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
@@ -1961,7 +1961,7 @@ i40e_dev_rx_queue_release(void *rxq)
rte_free(q);
}
-uint32_t
+int
i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
#define I40E_RXQ_SCAN_INTERVAL 4
@@ -1987,7 +1987,7 @@ i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
desc - rxq->nb_rx_desc]);
}
- return desc;
+ return (int)desc;
}
int
@@ -205,8 +205,7 @@ void i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq);
int i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq);
void i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq);
-uint32_t i40e_dev_rx_queue_count(struct rte_eth_dev *dev,
- uint16_t rx_queue_id);
+int i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
int i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
int i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
@@ -566,8 +566,7 @@ int ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
-uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
- uint16_t rx_queue_id);
+int ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
@@ -3118,7 +3118,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
return 0;
}
-uint32_t
+int
ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
#define IXGBE_RXQ_SCAN_INTERVAL 4
@@ -3139,7 +3139,7 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
desc - rxq->nb_rx_desc]);
}
- return desc;
+ return (int)desc;
}
int
@@ -79,8 +79,7 @@ static int nfp_net_link_update(struct rte_eth_dev *dev, int wait_to_complete);
static void nfp_net_promisc_enable(struct rte_eth_dev *dev);
static void nfp_net_promisc_disable(struct rte_eth_dev *dev);
static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq);
-static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev,
- uint16_t queue_idx);
+static int nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx);
static uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
static void nfp_net_rx_queue_release(void *rxq);
@@ -1371,13 +1370,13 @@ nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
-static uint32_t
+static int
nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
{
struct nfp_net_rxq *rxq;
struct nfp_net_rx_desc *rxds;
uint32_t idx;
- uint32_t count;
+ int count;
rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx];
@@ -1402,7 +1401,7 @@ nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
idx++;
/* Wrapping? */
- if ((idx) == rxq->rx_count)
+ if (idx == rxq->rx_count)
idx = 0;
}
@@ -1113,14 +1113,14 @@ sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
sfc_adapter_unlock(sa);
}
-static uint32_t
+static int
sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct sfc_adapter *sa = dev->data->dev_private;
sfc_log_init(sa, "RxQ=%u", rx_queue_id);
- return sfc_rx_qdesc_npending(sa, rx_queue_id);
+ return (int)sfc_rx_qdesc_npending(sa, rx_queue_id);
}
static int
@@ -1054,7 +1054,7 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
static inline void
nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq)
{
- uint32_t rxq_cnt;
+ int rxq_cnt;
uint32_t nb_pkts, released_pkts = 0;
uint32_t refill_cnt = 0;
struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH];
@@ -535,13 +535,13 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
return to_process;
}
-uint32_t
+int
nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
{
struct nicvf_rxq *rxq;
rxq = dev->data->rx_queues[queue_idx];
- return nicvf_addr_read(rxq->cq_status) & NICVF_CQ_CQE_COUNT_MASK;
+ return (int)(nicvf_addr_read(rxq->cq_status) & NICVF_CQ_CQE_COUNT_MASK);
}
uint32_t
@@ -83,7 +83,7 @@ nicvf_mbuff_init_mseg_update(struct rte_mbuf *pkt, const uint64_t mbuf_init,
*(uint64_t *)(&pkt->rearm_data) = init.value;
}
-uint32_t nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx);
+int nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx);
uint32_t nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx);
uint16_t nicvf_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t pkts);
@@ -1163,7 +1163,7 @@ eth_link_update(struct rte_eth_dev *dev __rte_unused,
return 0;
}
-static uint32_t
+static int
eth_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct vhost_queue *vq;
@@ -1172,7 +1172,7 @@ eth_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (vq == NULL)
return 0;
- return rte_vhost_rx_queue_count(vq->vid, vq->virtqueue_id);
+ return (int)rte_vhost_rx_queue_count(vq->vid, vq->virtqueue_id);
}
static const struct eth_dev_ops ops = {
@@ -722,7 +722,7 @@ power_freq_scaleup_heuristic(unsigned lcore_id,
uint16_t port_id,
uint16_t queue_id)
{
- uint32_t rxq_count = rte_eth_rx_queue_count(port_id, queue_id);
+ int rxq_count = rte_eth_rx_queue_count(port_id, queue_id);
/**
* HW Rx queue size is 128 by default, Rx burst read at maximum 32 entries
* per iteration
@@ -144,8 +144,8 @@ typedef int (*eth_rx_disable_intr_t)(struct rte_eth_dev *dev,
typedef void (*eth_queue_release_t)(void *queue);
/**< @internal Release memory resources allocated by given RX/TX queue. */
-typedef uint32_t (*eth_rx_queue_count_t)(struct rte_eth_dev *dev,
- uint16_t rx_queue_id);
+typedef int (*eth_rx_queue_count_t)(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
/**< @internal Get number of used descriptors on a receive queue. */
typedef int (*eth_rx_descriptor_done_t)(void *rxq, uint16_t offset);