* Move __rte_aligned from the end of {struct,union} definitions to
be between {struct,union} and tag.
The placement between {struct,union} and the tag allows the desired
alignment to be imparted on the type regardless of the toolchain being
used for all of GCC, LLVM, MSVC compilers building both C and C++.
* Replace use of __rte_aligned(a) on variables/fields with alignas(a).
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
lib/eventdev/event_timer_adapter_pmd.h | 4 ++--
lib/eventdev/eventdev_pmd.h | 8 ++++----
lib/eventdev/rte_event_crypto_adapter.c | 16 ++++++++--------
lib/eventdev/rte_event_dma_adapter.c | 16 ++++++++--------
lib/eventdev/rte_event_eth_rx_adapter.c | 8 ++++----
lib/eventdev/rte_event_eth_tx_adapter.c | 4 ++--
lib/eventdev/rte_event_timer_adapter.c | 9 +++++----
lib/eventdev/rte_event_timer_adapter.h | 8 ++++----
lib/eventdev/rte_eventdev.h | 8 ++++----
lib/eventdev/rte_eventdev_core.h | 4 ++--
10 files changed, 43 insertions(+), 42 deletions(-)
@@ -86,7 +86,7 @@ struct event_timer_adapter_ops {
* @internal Adapter data; structure to be placed in shared memory to be
* accessible by various processes in a multi-process configuration.
*/
-struct rte_event_timer_adapter_data {
+struct __rte_cache_aligned rte_event_timer_adapter_data {
uint8_t id;
/**< Event timer adapter ID */
uint8_t event_dev_id;
@@ -110,7 +110,7 @@ struct rte_event_timer_adapter_data {
uint8_t started : 1;
/**< Flag to indicate adapter started. */
-} __rte_cache_aligned;
+};
#ifdef __cplusplus
}
@@ -107,7 +107,7 @@ struct rte_eventdev_global {
* This structure is safe to place in shared memory to be common among
* different processes in a multi-process configuration.
*/
-struct rte_eventdev_data {
+struct __rte_cache_aligned rte_eventdev_data {
int socket_id;
/**< Socket ID where memory is allocated */
uint8_t dev_id;
@@ -146,10 +146,10 @@ struct rte_eventdev_data {
uint64_t reserved_64s[4]; /**< Reserved for future fields */
void *reserved_ptrs[4]; /**< Reserved for future fields */
-} __rte_cache_aligned;
+};
/** @internal The data structure associated with each event device. */
-struct rte_eventdev {
+struct __rte_cache_aligned rte_eventdev {
struct rte_eventdev_data *data;
/**< Pointer to device data */
struct eventdev_ops *dev_ops;
@@ -189,7 +189,7 @@ struct rte_eventdev {
uint64_t reserved_64s[3]; /**< Reserved for future fields */
void *reserved_ptrs[3]; /**< Reserved for future fields */
-} __rte_cache_aligned;
+};
extern struct rte_eventdev *rte_eventdevs;
/** @internal The pool of rte_eventdev structures. */
@@ -42,7 +42,7 @@
#define ECA_ADAPTER_ARRAY "crypto_adapter_array"
-struct crypto_ops_circular_buffer {
+struct __rte_cache_aligned crypto_ops_circular_buffer {
/* index of head element in circular buffer */
uint16_t head;
/* index of tail element in circular buffer */
@@ -53,9 +53,9 @@ struct crypto_ops_circular_buffer {
uint16_t size;
/* Pointer to hold rte_crypto_ops for batching */
struct rte_crypto_op **op_buffer;
-} __rte_cache_aligned;
+};
-struct event_crypto_adapter {
+struct __rte_cache_aligned event_crypto_adapter {
/* Event device identifier */
uint8_t eventdev_id;
/* Event port identifier */
@@ -98,10 +98,10 @@ struct event_crypto_adapter {
uint16_t nb_qps;
/* Adapter mode */
enum rte_event_crypto_adapter_mode mode;
-} __rte_cache_aligned;
+};
/* Per crypto device information */
-struct crypto_device_info {
+struct __rte_cache_aligned crypto_device_info {
/* Pointer to cryptodev */
struct rte_cryptodev *dev;
/* Pointer to queue pair info */
@@ -118,15 +118,15 @@ struct crypto_device_info {
* be invoked if not already invoked
*/
uint16_t num_qpairs;
-} __rte_cache_aligned;
+};
/* Per queue pair information */
-struct crypto_queue_pair_info {
+struct __rte_cache_aligned crypto_queue_pair_info {
/* Set to indicate queue pair is enabled */
bool qp_enabled;
/* Circular buffer for batching crypto ops to cdev */
struct crypto_ops_circular_buffer cbuf;
-} __rte_cache_aligned;
+};
static struct event_crypto_adapter **event_crypto_adapter;
@@ -26,7 +26,7 @@
} while (0)
/* DMA ops circular buffer */
-struct dma_ops_circular_buffer {
+struct __rte_cache_aligned dma_ops_circular_buffer {
/* Index of head element */
uint16_t head;
@@ -41,19 +41,19 @@ struct dma_ops_circular_buffer {
/* Pointer to hold rte_event_dma_adapter_op for processing */
struct rte_event_dma_adapter_op **op_buffer;
-} __rte_cache_aligned;
+};
/* Vchan information */
-struct dma_vchan_info {
+struct __rte_cache_aligned dma_vchan_info {
/* Set to indicate vchan queue is enabled */
bool vq_enabled;
/* Circular buffer for batching DMA ops to dma_dev */
struct dma_ops_circular_buffer dma_buf;
-} __rte_cache_aligned;
+};
/* DMA device information */
-struct dma_device_info {
+struct __rte_cache_aligned dma_device_info {
/* Pointer to vchan queue info */
struct dma_vchan_info *vchanq;
@@ -81,9 +81,9 @@ struct dma_device_info {
* transfer uses a hardware mechanism
*/
uint8_t internal_event_port;
-} __rte_cache_aligned;
+};
-struct event_dma_adapter {
+struct __rte_cache_aligned event_dma_adapter {
/* Event device identifier */
uint8_t eventdev_id;
@@ -145,7 +145,7 @@ struct event_dma_adapter {
/* Per instance stats structure */
struct rte_event_dma_adapter_stats dma_stats;
-} __rte_cache_aligned;
+};
static struct event_dma_adapter **event_dma_adapter;
@@ -72,7 +72,7 @@ struct eth_rx_poll_entry {
uint16_t eth_rx_qid;
};
-struct eth_rx_vector_data {
+struct __rte_cache_aligned eth_rx_vector_data {
TAILQ_ENTRY(eth_rx_vector_data) next;
uint16_t port;
uint16_t queue;
@@ -82,7 +82,7 @@ struct eth_rx_vector_data {
uint64_t vector_timeout_ticks;
struct rte_mempool *vector_pool;
struct rte_event_vector *vector_ev;
-} __rte_cache_aligned;
+};
TAILQ_HEAD(eth_rx_vector_data_list, eth_rx_vector_data);
@@ -103,7 +103,7 @@ struct eth_event_enqueue_buffer {
uint16_t last_mask;
};
-struct event_eth_rx_adapter {
+struct __rte_cache_aligned event_eth_rx_adapter {
/* RSS key */
uint8_t rss_key_be[RSS_KEY_SIZE];
/* Event device identifier */
@@ -188,7 +188,7 @@ struct event_eth_rx_adapter {
uint8_t rxa_started;
/* Adapter ID */
uint8_t id;
-} __rte_cache_aligned;
+};
/* Per eth device */
struct eth_device_info {
@@ -109,7 +109,7 @@ struct txa_service_queue_info {
};
/* PMD private structure */
-struct txa_service_data {
+struct __rte_cache_aligned txa_service_data {
/* Max mbufs processed in any service function invocation */
uint32_t max_nb_tx;
/* Number of Tx queues in adapter */
@@ -144,7 +144,7 @@ struct txa_service_data {
int64_t service_id;
/* Memory allocation name */
char mem_name[TXA_MEM_NAME_LEN];
-} __rte_cache_aligned;
+};
/* Per eth device structure */
struct txa_service_ethdev {
@@ -4,6 +4,7 @@
*/
#include <ctype.h>
+#include <stdalign.h>
#include <string.h>
#include <inttypes.h>
#include <stdbool.h>
@@ -512,11 +513,11 @@ struct rte_event_timer_adapter *
#define EXP_TIM_BUF_SZ 128
-struct event_buffer {
+struct __rte_cache_aligned event_buffer {
size_t head;
size_t tail;
struct rte_event events[EVENT_BUFFER_SZ];
-} __rte_cache_aligned;
+};
static inline bool
event_buffer_full(struct event_buffer *bufp)
@@ -632,9 +633,9 @@ struct swtim {
/* Identifier of timer data instance */
uint32_t timer_data_id;
/* Track which cores have actually armed a timer */
- struct {
+ alignas(RTE_CACHE_LINE_SIZE) struct {
RTE_ATOMIC(uint16_t) v;
- } __rte_cache_aligned in_use[RTE_MAX_LCORE];
+ } in_use[RTE_MAX_LCORE];
/* Track which cores' timer lists should be polled */
RTE_ATOMIC(unsigned int) poll_lcores[RTE_MAX_LCORE];
/* The number of lists that should be polled */
@@ -473,7 +473,7 @@ enum rte_event_timer_state {
* The generic *rte_event_timer* structure to hold the event timer attributes
* for arm and cancel operations.
*/
-struct rte_event_timer {
+struct __rte_cache_aligned rte_event_timer {
struct rte_event ev;
/**<
* Expiry event attributes. On successful event timer timeout,
@@ -504,7 +504,7 @@ struct rte_event_timer {
/**< Memory to store user specific metadata.
* The event timer adapter implementation should not modify this area.
*/
-} __rte_cache_aligned;
+};
typedef uint16_t (*rte_event_timer_arm_burst_t)(
const struct rte_event_timer_adapter *adapter,
@@ -526,7 +526,7 @@ typedef uint16_t (*rte_event_timer_cancel_burst_t)(
/**
* @internal Data structure associated with each event timer adapter.
*/
-struct rte_event_timer_adapter {
+struct __rte_cache_aligned rte_event_timer_adapter {
rte_event_timer_arm_burst_t arm_burst;
/**< Pointer to driver arm_burst function. */
rte_event_timer_arm_tmo_tick_burst_t arm_tmo_tick_burst;
@@ -540,7 +540,7 @@ struct rte_event_timer_adapter {
uint8_t allocated : 1;
/**< Flag to indicate that this adapter has been allocated */
-} __rte_cache_aligned;
+};
#define ADAPTER_VALID_OR_ERR_RET(adapter, retval) do { \
if (adapter == NULL || !adapter->allocated) \
@@ -1078,7 +1078,7 @@ int rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
/**
* Event vector structure.
*/
-struct rte_event_vector {
+struct __rte_aligned(16) rte_event_vector {
uint16_t nb_elem;
/**< Number of elements valid in this event vector. */
uint16_t elem_offset : 12;
@@ -1118,19 +1118,19 @@ struct rte_event_vector {
* value to share between dequeue and enqueue operation.
* The application should not modify this field.
*/
- union {
+ union __rte_aligned(16) {
#endif
struct rte_mbuf *mbufs[0];
void *ptrs[0];
uint64_t u64s[0];
#ifndef __cplusplus
- } __rte_aligned(16);
+ };
#endif
/**< Start of the vector array union. Depending upon the event type the
* vector array can be an array of mbufs or pointers or opaque u64
* values.
*/
-} __rte_aligned(16);
+};
/* Scheduler type definitions */
#define RTE_SCHED_TYPE_ORDERED 0
@@ -49,7 +49,7 @@ typedef uint16_t (*event_dma_adapter_enqueue_t)(void *port, struct rte_event ev[
typedef int (*event_profile_switch_t)(void *port, uint8_t profile);
/**< @internal Switch active link profile on the event port. */
-struct rte_event_fp_ops {
+struct __rte_cache_aligned rte_event_fp_ops {
void **data;
/**< points to array of internal port data pointers */
event_enqueue_t enqueue;
@@ -77,7 +77,7 @@ struct rte_event_fp_ops {
event_profile_switch_t profile_switch;
/**< PMD Event switch profile function. */
uintptr_t reserved[4];
-} __rte_cache_aligned;
+};
extern struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];