@@ -490,7 +490,7 @@ bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_sent;
}
-int __attribute__((cold))
+int __rte_cold
bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq)
{
uintptr_t p;
@@ -2958,7 +2958,7 @@ fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx;
}
-static void __attribute__((cold))
+static void __rte_cold
fm10k_set_tx_function(struct rte_eth_dev *dev)
{
struct fm10k_tx_queue *txq;
@@ -3007,7 +3007,7 @@ fm10k_set_tx_function(struct rte_eth_dev *dev)
}
}
-static void __attribute__((cold))
+static void __rte_cold
fm10k_set_rx_function(struct rte_eth_dev *dev)
{
struct fm10k_dev_info *dev_info =
@@ -203,7 +203,7 @@ fm10k_desc_to_pktype_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
#define fm10k_desc_to_pktype_v(desc, rx_pkts) do {} while (0)
#endif
-int __attribute__((cold))
+int __rte_cold
fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
{
#ifndef RTE_LIBRTE_IEEE1588
@@ -231,7 +231,7 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
#endif
}
-int __attribute__((cold))
+int __rte_cold
fm10k_rxq_vec_setup(struct fm10k_rx_queue *rxq)
{
uintptr_t p;
@@ -349,7 +349,7 @@ fm10k_rxq_rearm(struct fm10k_rx_queue *rxq)
FM10K_PCI_REG_WRITE(rxq->tail_ptr, rx_id);
}
-void __attribute__((cold))
+void __rte_cold
fm10k_rx_queue_release_mbufs_vec(struct fm10k_rx_queue *rxq)
{
const unsigned mask = rxq->nb_desc - 1;
@@ -695,13 +695,13 @@ static const struct fm10k_txq_ops vec_txq_ops = {
.reset = fm10k_reset_tx_queue,
};
-void __attribute__((cold))
+void __rte_cold
fm10k_txq_vec_setup(struct fm10k_tx_queue *txq)
{
txq->ops = &vec_txq_ops;
}
-int __attribute__((cold))
+int __rte_cold
fm10k_tx_vec_condition_check(struct fm10k_tx_queue *txq)
{
/* Vector TX can't offload any features yet */
@@ -864,7 +864,7 @@ fm10k_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_pkts;
}
-static void __attribute__((cold))
+static void __rte_cold
fm10k_reset_tx_queue(struct fm10k_tx_queue *txq)
{
static const struct fm10k_tx_desc zeroed_desc = {0};
@@ -3093,7 +3093,7 @@ i40e_get_recommend_rx_vec(bool scatter)
i40e_recv_pkts_vec;
}
-void __attribute__((cold))
+void __rte_cold
i40e_set_rx_function(struct rte_eth_dev *dev)
{
struct i40e_adapter *ad =
@@ -3208,7 +3208,7 @@ i40e_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
return ret;
}
-void __attribute__((cold))
+void __rte_cold
i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
{
struct i40e_adapter *ad =
@@ -3259,7 +3259,7 @@ i40e_get_recommend_tx_vec(void)
return i40e_xmit_pkts_vec;
}
-void __attribute__((cold))
+void __rte_cold
i40e_set_tx_function(struct rte_eth_dev *dev)
{
struct i40e_adapter *ad =
@@ -3337,7 +3337,7 @@ i40e_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
return ret;
}
-void __attribute__((cold))
+void __rte_cold
i40e_set_default_ptype_table(struct rte_eth_dev *dev)
{
struct i40e_adapter *ad =
@@ -3348,7 +3348,7 @@ i40e_set_default_ptype_table(struct rte_eth_dev *dev)
ad->ptype_tbl[i] = i40e_get_default_pkt_type(i);
}
-void __attribute__((cold))
+void __rte_cold
i40e_set_default_pctype_table(struct rte_eth_dev *dev)
{
struct i40e_adapter *ad =
@@ -591,25 +591,25 @@ i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_pkts;
}
-void __attribute__((cold))
+void __rte_cold
i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
{
_i40e_rx_queue_release_mbufs_vec(rxq);
}
-int __attribute__((cold))
+int __rte_cold
i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
{
return i40e_rxq_vec_setup_default(rxq);
}
-int __attribute__((cold))
+int __rte_cold
i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused * txq)
{
return 0;
}
-int __attribute__((cold))
+int __rte_cold
i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
{
return i40e_rx_vec_dev_conf_condition_check_default(dev);
@@ -569,25 +569,25 @@ i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_pkts;
}
-void __attribute__((cold))
+void __rte_cold
i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
{
_i40e_rx_queue_release_mbufs_vec(rxq);
}
-int __attribute__((cold))
+int __rte_cold
i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
{
return i40e_rxq_vec_setup_default(rxq);
}
-int __attribute__((cold))
+int __rte_cold
i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
{
return 0;
}
-int __attribute__((cold))
+int __rte_cold
i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
{
return i40e_rx_vec_dev_conf_condition_check_default(dev);
@@ -738,25 +738,25 @@ i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_pkts;
}
-void __attribute__((cold))
+void __rte_cold
i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
{
_i40e_rx_queue_release_mbufs_vec(rxq);
}
-int __attribute__((cold))
+int __rte_cold
i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
{
return i40e_rxq_vec_setup_default(rxq);
}
-int __attribute__((cold))
+int __rte_cold
i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
{
return 0;
}
-int __attribute__((cold))
+int __rte_cold
i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
{
return i40e_rx_vec_dev_conf_condition_check_default(dev);
@@ -641,13 +641,13 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx;
}
-static void __attribute__((cold))
+static void __rte_cold
iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
{
_iavf_rx_queue_release_mbufs_vec(rxq);
}
-static void __attribute__((cold))
+static void __rte_cold
iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
{
_iavf_tx_queue_release_mbufs_vec(txq);
@@ -661,27 +661,27 @@ static const struct iavf_txq_ops sse_vec_txq_ops = {
.release_mbufs = iavf_tx_queue_release_mbufs_sse,
};
-int __attribute__((cold))
+int __rte_cold
iavf_txq_vec_setup(struct iavf_tx_queue *txq)
{
txq->ops = &sse_vec_txq_ops;
return 0;
}
-int __attribute__((cold))
+int __rte_cold
iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
{
rxq->ops = &sse_vec_rxq_ops;
return iavf_rxq_vec_setup_default(rxq);
}
-int __attribute__((cold))
+int __rte_cold
iavf_rx_vec_dev_check(struct rte_eth_dev *dev)
{
return iavf_rx_vec_dev_check_default(dev);
}
-int __attribute__((cold))
+int __rte_cold
iavf_tx_vec_dev_check(struct rte_eth_dev *dev)
{
return iavf_tx_vec_dev_check_default(dev);
@@ -2944,7 +2944,7 @@ ice_xmit_pkts_simple(void *tx_queue,
return nb_tx;
}
-void __attribute__((cold))
+void __rte_cold
ice_set_rx_function(struct rte_eth_dev *dev)
{
PMD_INIT_FUNC_TRACE();
@@ -3054,7 +3054,7 @@ ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
return ret;
}
-void __attribute__((cold))
+void __rte_cold
ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
{
struct ice_adapter *ad =
@@ -3123,7 +3123,7 @@ ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
return i;
}
-void __attribute__((cold))
+void __rte_cold
ice_set_tx_function(struct rte_eth_dev *dev)
{
struct ice_adapter *ad =
@@ -3752,7 +3752,7 @@ ice_get_default_pkt_type(uint16_t ptype)
return type_table[ptype];
}
-void __attribute__((cold))
+void __rte_cold
ice_set_default_ptype_table(struct rte_eth_dev *dev)
{
struct ice_adapter *ad =
@@ -609,7 +609,7 @@ ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx;
}
-int __attribute__((cold))
+int __rte_cold
ice_rxq_vec_setup(struct ice_rx_queue *rxq)
{
if (!rxq)
@@ -619,7 +619,7 @@ ice_rxq_vec_setup(struct ice_rx_queue *rxq)
return ice_rxq_vec_setup_default(rxq);
}
-int __attribute__((cold))
+int __rte_cold
ice_txq_vec_setup(struct ice_tx_queue __rte_unused *txq)
{
if (!txq)
@@ -629,13 +629,13 @@ ice_txq_vec_setup(struct ice_tx_queue __rte_unused *txq)
return 0;
}
-int __attribute__((cold))
+int __rte_cold
ice_rx_vec_dev_check(struct rte_eth_dev *dev)
{
return ice_rx_vec_dev_check_default(dev);
}
-int __attribute__((cold))
+int __rte_cold
ice_tx_vec_dev_check(struct rte_eth_dev *dev)
{
return ice_tx_vec_dev_check_default(dev);
@@ -67,7 +67,7 @@ ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.tx_deferred_start = txq->deferred_start;
}
-static inline void __attribute__((cold))
+static inline void __rte_cold
ionic_tx_flush(struct ionic_cq *cq)
{
struct ionic_queue *q = cq->bound_q;
@@ -118,7 +118,7 @@ ionic_tx_flush(struct ionic_cq *cq)
}
}
-void __attribute__((cold))
+void __rte_cold
ionic_dev_tx_queue_release(void *tx_queue)
{
struct ionic_qcq *txq = (struct ionic_qcq *)tx_queue;
@@ -128,7 +128,7 @@ ionic_dev_tx_queue_release(void *tx_queue)
ionic_qcq_free(txq);
}
-int __attribute__((cold))
+int __rte_cold
ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
{
struct ionic_qcq *txq;
@@ -154,7 +154,7 @@ ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
return 0;
}
-int __attribute__((cold))
+int __rte_cold
ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
uint16_t nb_desc, uint32_t socket_id __rte_unused,
const struct rte_eth_txconf *tx_conf)
@@ -208,7 +208,7 @@ ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
/*
* Start Transmit Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
{
struct ionic_qcq *txq;
@@ -609,7 +609,7 @@ ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.offloads = rxq->offloads;
}
-static void __attribute__((cold))
+static void __rte_cold
ionic_rx_empty(struct ionic_queue *q)
{
struct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q);
@@ -625,7 +625,7 @@ ionic_rx_empty(struct ionic_queue *q)
}
}
-void __attribute__((cold))
+void __rte_cold
ionic_dev_rx_queue_release(void *rx_queue)
{
struct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue;
@@ -637,7 +637,7 @@ ionic_dev_rx_queue_release(void *rx_queue)
ionic_qcq_free(rxq);
}
-int __attribute__((cold))
+int __rte_cold
ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
uint16_t rx_queue_id,
uint16_t nb_desc,
@@ -873,7 +873,7 @@ ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index,
ionic_q_post(q, true, ionic_rx_clean, mbuf);
}
-static int __attribute__((cold))
+static int __rte_cold
ionic_rx_fill(struct ionic_qcq *rxq, uint32_t len)
{
struct ionic_queue *q = &rxq->q;
@@ -950,7 +950,7 @@ ionic_rx_fill(struct ionic_qcq *rxq, uint32_t len)
/*
* Start Receive Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
{
uint32_t frame_size = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -983,7 +983,7 @@ ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
return 0;
}
-static inline void __attribute__((cold))
+static inline void __rte_cold
ionic_rxq_service(struct ionic_cq *cq, uint32_t work_to_do,
void *service_cb_arg)
{
@@ -1038,7 +1038,7 @@ ionic_rxq_service(struct ionic_cq *cq, uint32_t work_to_do,
/*
* Stop Receive Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
{
struct ionic_qcq *rxq;
@@ -2284,7 +2284,7 @@ ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
*
**********************************************************************/
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
{
unsigned i;
@@ -2409,7 +2409,7 @@ ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)
return ixgbe_tx_done_cleanup_full(txq, free_cnt);
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
{
if (txq != NULL &&
@@ -2417,7 +2417,7 @@ ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
rte_free(txq->sw_ring);
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
{
if (txq != NULL && txq->ops != NULL) {
@@ -2427,14 +2427,14 @@ ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
}
}
-void __attribute__((cold))
+void __rte_cold
ixgbe_dev_tx_queue_release(void *txq)
{
ixgbe_tx_queue_release(txq);
}
/* (Re)set dynamic ixgbe_tx_queue fields to defaults */
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
{
static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
@@ -2484,7 +2484,7 @@ static const struct ixgbe_txq_ops def_txq_ops = {
* the queue parameters. Used in tx_queue_setup by primary process and then
* in dev_init by secondary process when attaching to an existing ethdev.
*/
-void __attribute__((cold))
+void __rte_cold
ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
{
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
@@ -2555,7 +2555,7 @@ ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
return tx_offload_capa;
}
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
@@ -2770,7 +2770,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
*
* @m scattered cluster head
*/
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_free_sc_cluster(struct rte_mbuf *m)
{
uint16_t i, nb_segs = m->nb_segs;
@@ -2783,7 +2783,7 @@ ixgbe_free_sc_cluster(struct rte_mbuf *m)
}
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
{
unsigned i;
@@ -2820,7 +2820,7 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
}
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
{
if (rxq != NULL) {
@@ -2831,7 +2831,7 @@ ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
}
}
-void __attribute__((cold))
+void __rte_cold
ixgbe_dev_rx_queue_release(void *rxq)
{
ixgbe_rx_queue_release(rxq);
@@ -2845,7 +2845,7 @@ ixgbe_dev_rx_queue_release(void *rxq)
* -EINVAL: the preconditions are NOT satisfied and the default Rx burst
* function must be used.
*/
-static inline int __attribute__((cold))
+static inline int __rte_cold
check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
{
int ret = 0;
@@ -2882,7 +2882,7 @@ check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
}
/* Reset dynamic ixgbe_rx_queue fields back to defaults */
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
{
static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
@@ -3007,7 +3007,7 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
return offloads;
}
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
@@ -3288,7 +3288,7 @@ ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
/*
* Set up link loopback for X540/X550 mode Tx->Rx.
*/
-static inline void __attribute__((cold))
+static inline void __rte_cold
ixgbe_setup_loopback_link_x540_x550(struct ixgbe_hw *hw, bool enable)
{
uint32_t macc;
@@ -3316,7 +3316,7 @@ ixgbe_setup_loopback_link_x540_x550(struct ixgbe_hw *hw, bool enable)
IXGBE_WRITE_REG(hw, IXGBE_MACC, macc);
}
-void __attribute__((cold))
+void __rte_cold
ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
{
unsigned i;
@@ -4424,7 +4424,7 @@ ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
IXGBE_WRITE_FLUSH(hw);
}
-static int __attribute__((cold))
+static int __rte_cold
ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
{
struct ixgbe_rx_entry *rxe = rxq->sw_ring;
@@ -4724,7 +4724,7 @@ ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)
}
}
-void __attribute__((cold))
+void __rte_cold
ixgbe_set_rx_function(struct rte_eth_dev *dev)
{
uint16_t i, rx_using_sse;
@@ -4977,7 +4977,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
/*
* Initializes Receive Unit.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
@@ -5154,7 +5154,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
/*
* Initializes Transmit Unit.
*/
-void __attribute__((cold))
+void __rte_cold
ixgbe_dev_tx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
@@ -5243,7 +5243,7 @@ ixgbe_check_supported_loopback_mode(struct rte_eth_dev *dev)
/*
* Set up link for 82599 loopback mode Tx->Rx.
*/
-static inline void __attribute__((cold))
+static inline void __rte_cold
ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
{
PMD_INIT_FUNC_TRACE();
@@ -5271,7 +5271,7 @@ ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
/*
* Start Transmit and Receive Units.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
@@ -5359,7 +5359,7 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
/*
* Start Receive Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct ixgbe_hw *hw;
@@ -5401,7 +5401,7 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
/*
* Stop Receive Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct ixgbe_hw *hw;
@@ -5441,7 +5441,7 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
/*
* Start Transmit Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct ixgbe_hw *hw;
@@ -5480,7 +5480,7 @@ ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
/*
* Stop Transmit Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct ixgbe_hw *hw;
@@ -5577,7 +5577,7 @@ ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
/*
* [VF] Initializes Receive Unit.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
@@ -5701,7 +5701,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
/*
* [VF] Initializes Transmit Unit.
*/
-void __attribute__((cold))
+void __rte_cold
ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
@@ -5742,7 +5742,7 @@ ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
/*
* [VF] Start Transmit and Receive Units.
*/
-void __attribute__((cold))
+void __rte_cold
ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
@@ -531,25 +531,25 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_pkts;
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
{
_ixgbe_tx_queue_release_mbufs_vec(txq);
}
-void __attribute__((cold))
+void __rte_cold
ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
{
_ixgbe_rx_queue_release_mbufs_vec(rxq);
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
{
_ixgbe_tx_free_swring_vec(txq);
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
{
_ixgbe_reset_tx_queue_vec(txq);
@@ -561,19 +561,19 @@ static const struct ixgbe_txq_ops vec_txq_ops = {
.reset = ixgbe_reset_tx_queue,
};
-int __attribute__((cold))
+int __rte_cold
ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
{
return ixgbe_rxq_vec_setup_default(rxq);
}
-int __attribute__((cold))
+int __rte_cold
ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
{
return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops);
}
-int __attribute__((cold))
+int __rte_cold
ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
{
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
@@ -702,25 +702,25 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_pkts;
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
{
_ixgbe_tx_queue_release_mbufs_vec(txq);
}
-void __attribute__((cold))
+void __rte_cold
ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
{
_ixgbe_rx_queue_release_mbufs_vec(rxq);
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
{
_ixgbe_tx_free_swring_vec(txq);
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
{
_ixgbe_reset_tx_queue_vec(txq);
@@ -732,19 +732,19 @@ static const struct ixgbe_txq_ops vec_txq_ops = {
.reset = ixgbe_reset_tx_queue,
};
-int __attribute__((cold))
+int __rte_cold
ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
{
return ixgbe_rxq_vec_setup_default(rxq);
}
-int __attribute__((cold))
+int __rte_cold
ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
{
return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops);
}
-int __attribute__((cold))
+int __rte_cold
ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
{
return ixgbe_rx_vec_dev_conf_condition_check_default(dev);
@@ -121,7 +121,7 @@ mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
* @return
* 1 if supported, negative errno value if not.
*/
-int __attribute__((cold))
+int __rte_cold
mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq)
{
struct mlx5_rxq_ctrl *ctrl =
@@ -145,7 +145,7 @@ mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq)
* @return
* 1 if supported, negative errno value if not.
*/
-int __attribute__((cold))
+int __rte_cold
mlx5_check_vec_rx_support(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
@@ -27,7 +27,7 @@
#pragma GCC diagnostic ignored "-Wcast-qual"
#endif
-int __attribute__((cold))
+int __rte_cold
virtio_rxq_vec_setup(struct virtnet_rx *rxq)
{
uintptr_t p;
@@ -167,6 +167,11 @@ static void __attribute__((destructor(RTE_PRIO(prio)), used)) func(void)
*/
#define __rte_hot __attribute__((hot))
+/**
+ * Hint function in the cold path
+ */
+#define __rte_cold __attribute__((cold))
+
/*********** Macros for pointer arithmetic ********/
/**
@@ -69,7 +69,7 @@ void rte_dump_registers(void);
void __rte_panic(const char *funcname , const char *format, ...)
#ifdef __GNUC__
#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 2))
- __attribute__((cold))
+ __rte_cold
#endif
#endif
__attribute__((noreturn))
@@ -279,7 +279,7 @@ void rte_log_dump(FILE *f);
int rte_log(uint32_t level, uint32_t logtype, const char *format, ...)
#ifdef __GNUC__
#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 2))
- __attribute__((cold))
+ __rte_cold
#endif
#endif
__attribute__((format(printf, 3, 4)));