As the variable mbuf_alloc_failed is operated by more than thread,
change it to type rte_atomic64_t and operated by rte_atomic64_xx()
function, this will avoid multithreading issue.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
---
drivers/common/idpf/idpf_common_rxtx.c | 10 ++++++----
drivers/common/idpf/idpf_common_rxtx.h | 2 +-
drivers/common/idpf/idpf_common_rxtx_avx512.c | 12 ++++++++----
drivers/net/idpf/idpf_ethdev.c | 5 +++--
4 files changed, 18 insertions(+), 11 deletions(-)
@@ -592,7 +592,8 @@ idpf_split_rx_bufq_refill(struct idpf_rx_queue *rx_bufq)
next_avail = 0;
rx_bufq->nb_rx_hold -= delta;
} else {
- rx_bufq->rx_stats.mbuf_alloc_failed += nb_desc - next_avail;
+ rte_atomic64_add(&(rx_bufq->rx_stats.mbuf_alloc_failed),
+ nb_desc - next_avail);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
rx_bufq->port_id, rx_bufq->queue_id);
return;
@@ -611,7 +612,8 @@ idpf_split_rx_bufq_refill(struct idpf_rx_queue *rx_bufq)
next_avail += nb_refill;
rx_bufq->nb_rx_hold -= nb_refill;
} else {
- rx_bufq->rx_stats.mbuf_alloc_failed += nb_desc - next_avail;
+ rte_atomic64_add(&(rx_bufq->rx_stats.mbuf_alloc_failed),
+ nb_desc - next_avail);
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
rx_bufq->port_id, rx_bufq->queue_id);
}
@@ -1088,7 +1090,7 @@ idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
nmb = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(nmb == NULL)) {
- rxq->rx_stats.mbuf_alloc_failed++;
+ rte_atomic64_inc(&(rxq->rx_stats.mbuf_alloc_failed));
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", rxq->port_id, rxq->queue_id);
break;
@@ -1197,7 +1199,7 @@ idpf_singleq_recv_scatter_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
nmb = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(!nmb)) {
- rxq->rx_stats.mbuf_alloc_failed++;
+ rte_atomic64_inc(&(rxq->rx_stats.mbuf_alloc_failed));
RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", rxq->port_id, rxq->queue_id);
break;
@@ -91,7 +91,7 @@
#define PF_GLTSYN_SHTIME_H_5 (PF_TIMESYNC_BAR4_BASE + 0x13C)
struct idpf_rx_stats {
- uint64_t mbuf_alloc_failed;
+ rte_atomic64_t mbuf_alloc_failed;
};
struct idpf_rx_queue {
@@ -38,7 +38,8 @@ idpf_singleq_rearm_common(struct idpf_rx_queue *rxq)
dma_addr0);
}
}
- rxq->rx_stats.mbuf_alloc_failed += IDPF_RXQ_REARM_THRESH;
+ rte_atomic64_add(&(rxq->rx_stats.mbuf_alloc_failed),
+ IDPF_RXQ_REARM_THRESH);
return;
}
struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
@@ -167,7 +168,8 @@ idpf_singleq_rearm(struct idpf_rx_queue *rxq)
dma_addr0);
}
}
- rxq->rx_stats.mbuf_alloc_failed += IDPF_RXQ_REARM_THRESH;
+ rte_atomic64_add(&(rxq->rx_stats.mbuf_alloc_failed),
+ IDPF_RXQ_REARM_THRESH);
return;
}
}
@@ -562,7 +564,8 @@ idpf_splitq_rearm_common(struct idpf_rx_queue *rx_bufq)
dma_addr0);
}
}
- rx_bufq->rx_stats.mbuf_alloc_failed += IDPF_RXQ_REARM_THRESH;
+ rte_atomic64_add(&(rx_bufq->rx_stats.mbuf_alloc_failed),
+ IDPF_RXQ_REARM_THRESH);
return;
}
@@ -635,7 +638,8 @@ idpf_splitq_rearm(struct idpf_rx_queue *rx_bufq)
dma_addr0);
}
}
- rx_bufq->rx_stats.mbuf_alloc_failed += IDPF_RXQ_REARM_THRESH;
+ rte_atomic64_add(&(rx_bufq->rx_stats.mbuf_alloc_failed),
+ IDPF_RXQ_REARM_THRESH);
return;
}
}
@@ -256,7 +256,8 @@ idpf_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- mbuf_alloc_failed += rxq->rx_stats.mbuf_alloc_failed;
+ mbuf_alloc_failed +=
+ rte_atomic64_read(&(rxq->rx_stats.mbuf_alloc_failed));
}
return mbuf_alloc_failed;
@@ -303,7 +304,7 @@ idpf_reset_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- rxq->rx_stats.mbuf_alloc_failed = 0;
+ rte_atomic64_set(&(rxq->rx_stats.mbuf_alloc_failed), 0);
}
}