Move location of __rte_aligned(a) to new conventional location. The new
placement between {struct,union} and the tag allows the desired
alignment to be imparted on the type regardless of the toolchain being
used for both C and C++. Additionally, it avoids confusion by Doxygen
when generating documentation.
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
drivers/net/mlx5/hws/mlx5dr_send.h | 4 ++--
drivers/net/mlx5/mlx5.h | 6 +++---
drivers/net/mlx5/mlx5_flow.h | 4 ++--
drivers/net/mlx5/mlx5_hws_cnt.h | 14 +++++++-------
drivers/net/mlx5/mlx5_rx.h | 4 ++--
drivers/net/mlx5/mlx5_rxtx.c | 6 +++---
drivers/net/mlx5/mlx5_tx.h | 10 +++++-----
drivers/net/mlx5/mlx5_utils.h | 2 +-
8 files changed, 25 insertions(+), 25 deletions(-)
@@ -144,7 +144,7 @@ struct mlx5dr_completed_poll {
uint16_t mask;
};
-struct mlx5dr_send_engine {
+struct __rte_cache_aligned mlx5dr_send_engine {
struct mlx5dr_send_ring send_ring[MLX5DR_NUM_SEND_RINGS]; /* For now 1:1 mapping */
struct mlx5dv_devx_uar *uar; /* Uar is shared between rings of a queue */
struct mlx5dr_completed_poll completed;
@@ -153,7 +153,7 @@ struct mlx5dr_send_engine {
uint16_t rings;
uint16_t num_entries;
bool err;
-} __rte_cache_aligned;
+};
struct mlx5dr_send_engine_post_ctrl {
struct mlx5dr_send_engine *queue;
@@ -415,7 +415,7 @@ struct mlx5_hw_q_job {
};
/* HW steering job descriptor LIFO pool. */
-struct mlx5_hw_q {
+struct __rte_cache_aligned mlx5_hw_q {
uint32_t job_idx; /* Free job index. */
uint32_t size; /* Job LIFO queue size. */
uint32_t ongoing_flow_ops; /* Number of ongoing flow operations. */
@@ -424,7 +424,7 @@ struct mlx5_hw_q {
struct rte_ring *indir_iq; /* Indirect action SW in progress queue. */
struct rte_ring *flow_transfer_pending;
struct rte_ring *flow_transfer_completed;
-} __rte_cache_aligned;
+};
#define MLX5_COUNTER_POOLS_MAX_NUM (1 << 15)
@@ -1405,7 +1405,7 @@ struct mlx5_hws_cnt_svc_mng {
uint32_t query_interval;
rte_thread_t service_thread;
uint8_t svc_running;
- struct mlx5_hws_aso_mng aso_mng __rte_cache_aligned;
+ alignas(RTE_CACHE_LINE_SIZE) struct mlx5_hws_aso_mng aso_mng;
};
#define MLX5_FLOW_HW_TAGS_MAX 12
@@ -1654,9 +1654,9 @@ struct mlx5_matcher_info {
RTE_ATOMIC(uint32_t) refcnt;
};
-struct mlx5_dr_rule_action_container {
+struct __rte_cache_aligned mlx5_dr_rule_action_container {
struct mlx5dr_rule_action acts[MLX5_HW_MAX_ACTS];
-} __rte_cache_aligned;
+};
struct rte_flow_template_table {
LIST_ENTRY(rte_flow_template_table) next;
@@ -97,11 +97,11 @@ struct mlx5_hws_cnt_pool_caches {
struct rte_ring *qcache[];
};
-struct mlx5_hws_cnt_pool {
+struct __rte_cache_aligned mlx5_hws_cnt_pool {
LIST_ENTRY(mlx5_hws_cnt_pool) next;
- struct mlx5_hws_cnt_pool_cfg cfg __rte_cache_aligned;
- struct mlx5_hws_cnt_dcs_mng dcs_mng __rte_cache_aligned;
- uint32_t query_gen __rte_cache_aligned;
+ alignas(RTE_CACHE_LINE_SIZE) struct mlx5_hws_cnt_pool_cfg cfg;
+ alignas(RTE_CACHE_LINE_SIZE) struct mlx5_hws_cnt_dcs_mng dcs_mng;
+ alignas(RTE_CACHE_LINE_SIZE) uint32_t query_gen;
struct mlx5_hws_cnt *pool;
struct mlx5_hws_cnt_raw_data_mng *raw_mng;
struct rte_ring *reuse_list;
@@ -110,7 +110,7 @@ struct mlx5_hws_cnt_pool {
struct mlx5_hws_cnt_pool_caches *cache;
uint64_t time_of_last_age_check;
struct mlx5_priv *priv;
-} __rte_cache_aligned;
+};
/* HWS AGE status. */
enum {
@@ -133,7 +133,7 @@ enum {
};
/* HWS counter age parameter. */
-struct mlx5_hws_age_param {
+struct __rte_cache_aligned mlx5_hws_age_param {
uint32_t timeout; /* Aging timeout in seconds (atomically accessed). */
uint32_t sec_since_last_hit;
/* Time in seconds since last hit (atomically accessed). */
@@ -149,7 +149,7 @@ struct mlx5_hws_age_param {
cnt_id_t own_cnt_index;
/* Counter action created specifically for this AGE action. */
void *context; /* Flow AGE context. */
-} __rte_packed __rte_cache_aligned;
+} __rte_packed;
/**
@@ -79,7 +79,7 @@ struct mlx5_eth_rxseg {
};
/* RX queue descriptor. */
-struct mlx5_rxq_data {
+struct __rte_cache_aligned mlx5_rxq_data {
unsigned int csum:1; /* Enable checksum offloading. */
unsigned int hw_timestamp:1; /* Enable HW timestamp. */
unsigned int rt_timestamp:1; /* Realtime timestamp format. */
@@ -146,7 +146,7 @@ struct mlx5_rxq_data {
uint32_t rxseg_n; /* Number of split segment descriptions. */
struct mlx5_eth_rxseg rxseg[MLX5_MAX_RXQ_NSEG];
/* Buffer split segment descriptions - sizes, offsets, pools. */
-} __rte_cache_aligned;
+};
/* RX queue control descriptor. */
struct mlx5_rxq_ctrl {
@@ -77,12 +77,12 @@
static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
"invalid WQE size");
-uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
+alignas(RTE_CACHE_LINE_SIZE) uint32_t mlx5_ptype_table[] = {
[0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
};
-uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
-uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
+alignas(RTE_CACHE_LINE_SIZE) uint8_t mlx5_cksum_table[1 << 10];
+alignas(RTE_CACHE_LINE_SIZE) uint8_t mlx5_swp_types_table[1 << 10];
uint64_t rte_net_mlx5_dynf_inline_mask;
@@ -83,9 +83,9 @@ enum mlx5_txcmp_code {
extern uint64_t rte_net_mlx5_dynf_inline_mask;
#define RTE_MBUF_F_TX_DYNF_NOINLINE rte_net_mlx5_dynf_inline_mask
-extern uint32_t mlx5_ptype_table[] __rte_cache_aligned;
-extern uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
-extern uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
+extern alignas(RTE_CACHE_LINE_SIZE) uint32_t mlx5_ptype_table[];
+extern alignas(RTE_CACHE_LINE_SIZE) uint8_t mlx5_cksum_table[1 << 10];
+extern alignas(RTE_CACHE_LINE_SIZE) uint8_t mlx5_swp_types_table[1 << 10];
struct mlx5_txq_stats {
#ifdef MLX5_PMD_SOFT_COUNTERS
@@ -112,7 +112,7 @@ struct mlx5_txq_local {
/* TX queue descriptor. */
__extension__
-struct mlx5_txq_data {
+struct __rte_cache_aligned mlx5_txq_data {
uint16_t elts_head; /* Current counter in (*elts)[]. */
uint16_t elts_tail; /* Counter of first element awaiting completion. */
uint16_t elts_comp; /* elts index since last completion request. */
@@ -173,7 +173,7 @@ struct mlx5_txq_data {
struct mlx5_uar_data uar_data;
struct rte_mbuf *elts[];
/* Storage for queued packets, must be the last field. */
-} __rte_cache_aligned;
+};
/* TX queue control descriptor. */
__extension__
@@ -235,7 +235,7 @@ struct mlx5_indexed_trunk {
uint32_t next; /* Next free trunk in free list. */
uint32_t free; /* Free entries available */
struct rte_bitmap *bmp;
- uint8_t data[] __rte_cache_aligned; /* Entry data start. */
+ alignas(RTE_CACHE_LINE_SIZE) uint8_t data[]; /* Entry data start. */
};
struct mlx5_indexed_cache {