@@ -156,7 +156,7 @@
#define SDP_VF_R_OUT_INT_LEVELS_TIMET (32)
/* SDP Instruction Header */
-struct sdp_instr_ih {
+__plt_packed_begin struct sdp_instr_ih {
/* Data Len */
uint64_t tlen : 16;
@@ -177,6 +177,6 @@ struct sdp_instr_ih {
/* Reserved2 */
uint64_t rsvd2 : 1;
-} __plt_packed;
+} __plt_packed_end;
#endif /* __SDP_HW_H_ */
@@ -97,10 +97,10 @@ struct roc_npc_flow_item_eth {
uint32_t reserved : 31; /**< Reserved, must be zero. */
};
-struct roc_vlan_hdr {
+__plt_packed_begin struct roc_vlan_hdr {
uint16_t vlan_tci; /**< Priority (3) + CFI (1) + Identifier Code (12) */
uint16_t eth_proto; /**< Ethernet type of encapsulated frame. */
-} __plt_packed;
+} __plt_packed_end;
struct roc_npc_flow_item_vlan {
union {
@@ -115,23 +115,23 @@ struct roc_npc_flow_item_vlan {
uint32_t reserved : 31; /**< Reserved, must be zero. */
};
-struct roc_ipv6_hdr {
+__plt_packed_begin struct roc_ipv6_hdr {
uint32_t vtc_flow; /**< IP version, traffic class & flow label. */
uint16_t payload_len; /**< IP payload size, including ext. headers */
uint8_t proto; /**< Protocol, next header. */
uint8_t hop_limits; /**< Hop limits. */
uint8_t src_addr[16]; /**< IP address of source host. */
uint8_t dst_addr[16]; /**< IP address of destination host(s). */
-} __plt_packed;
+} __plt_packed_end;
-struct roc_ipv6_fragment_ext {
+__plt_packed_begin struct roc_ipv6_fragment_ext {
uint8_t next_header; /**< Next header type */
uint8_t reserved; /**< Reserved */
uint16_t frag_data; /**< All fragmentation data */
uint32_t id; /**< Packet ID */
-} __plt_packed;
+} __plt_packed_end;
-struct roc_ipv6_routing_ext {
+__plt_packed_begin struct roc_ipv6_routing_ext {
uint8_t next_hdr; /**< Protocol, next header. */
uint8_t hdr_len; /**< Header length. */
uint8_t type; /**< Extension header type. */
@@ -145,7 +145,7 @@ struct roc_ipv6_routing_ext {
};
};
/* Next are 128-bit IPv6 address fields to describe segments. */
-} __plt_packed;
+} __plt_packed_end;
struct roc_flow_item_ipv6_ext {
uint8_t next_hdr; /**< Next header. */
@@ -35,7 +35,7 @@
#define NIX_TX_VTAGACT_VTAG1_OP_MASK GENMASK(45, 44)
#define NIX_TX_VTAGACT_VTAG1_DEF_MASK GENMASK(57, 48)
-struct npc_rx_parse_nibble_s {
+__plt_packed_begin struct npc_rx_parse_nibble_s {
uint16_t chan : 3;
uint16_t errlev : 1;
uint16_t errcode : 2;
@@ -56,7 +56,7 @@ struct npc_rx_parse_nibble_s {
uint16_t lgtype : 1;
uint16_t lhflags : 2;
uint16_t lhtype : 1;
-} __plt_packed;
+} __plt_packed_end;
static const char *const intf_str[] = {
"NIX-RX",
@@ -97,7 +97,8 @@
#define __plt_cache_aligned __rte_cache_aligned
#define __plt_always_inline __rte_always_inline
-#define __plt_packed __rte_packed
+#define __plt_packed_begin __rte_packed_begin
+#define __plt_packed_end __rte_packed_end
#define __plt_unused __rte_unused
#define __roc_api __rte_internal
#define plt_iova_t rte_iova_t
@@ -53,9 +53,6 @@
#ifndef __always_unused
#define __always_unused __rte_unused
#endif
-#ifndef __packed
-#define __packed __rte_packed
-#endif
#ifndef noinline
#define noinline __rte_noinline
#endif
@@ -158,17 +158,17 @@ do { \
} while (0)
/* memory allocation tracking */
-struct iavf_dma_mem {
+__rte_packed_begin struct iavf_dma_mem {
void *va;
u64 pa;
u32 size;
const void *zone;
-} __rte_packed;
+} __rte_packed_end;
-struct iavf_virt_mem {
+__rte_packed_begin struct iavf_virt_mem {
void *va;
u32 size;
-} __rte_packed;
+} __rte_packed_end;
#define iavf_allocate_dma_mem(h, m, unused, s, a) \
iavf_allocate_dma_mem_d(h, m, s, a)
@@ -109,7 +109,7 @@ enum inline_ipsec_ops {
};
/* Not all valid, if certain field is invalid, set 1 for all bits */
-struct virtchnl_algo_cap {
+__rte_packed_begin struct virtchnl_algo_cap {
u32 algo_type;
u16 block_size;
@@ -129,20 +129,20 @@ struct virtchnl_algo_cap {
u16 min_aad_size;
u16 max_aad_size;
u16 inc_aad_size;
-} __rte_packed;
+} __rte_packed_end;
/* vf record the capability of crypto from the virtchnl */
-struct virtchnl_sym_crypto_cap {
+__rte_packed_begin struct virtchnl_sym_crypto_cap {
u8 crypto_type;
u8 algo_cap_num;
struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM];
-} __rte_packed;
+} __rte_packed_end;
/* VIRTCHNL_OP_GET_IPSEC_CAP
* VF pass virtchnl_ipsec_cap to PF
* and PF return capability of ipsec from virtchnl.
*/
-struct virtchnl_ipsec_cap {
+__rte_packed_begin struct virtchnl_ipsec_cap {
/* max number of SA per VF */
u16 max_sa_num;
@@ -169,10 +169,10 @@ struct virtchnl_ipsec_cap {
/* crypto capabilities */
struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
-} __rte_packed;
+} __rte_packed_end;
/* configuration of crypto function */
-struct virtchnl_ipsec_crypto_cfg_item {
+__rte_packed_begin struct virtchnl_ipsec_crypto_cfg_item {
u8 crypto_type;
u32 algo_type;
@@ -191,7 +191,7 @@ struct virtchnl_ipsec_crypto_cfg_item {
/* key data buffer */
u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
-} __rte_packed;
+} __rte_packed_end;
struct virtchnl_ipsec_sym_crypto_cfg {
struct virtchnl_ipsec_crypto_cfg_item
@@ -203,7 +203,7 @@ struct virtchnl_ipsec_sym_crypto_cfg {
* PF create SA as configuration and PF driver will return
* an unique index (sa_idx) for the created SA.
*/
-struct virtchnl_ipsec_sa_cfg {
+__rte_packed_begin struct virtchnl_ipsec_sa_cfg {
/* IPsec SA Protocol - AH/ESP */
u8 virtchnl_protocol_type;
@@ -292,17 +292,17 @@ struct virtchnl_ipsec_sa_cfg {
/* crypto configuration */
struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
-} __rte_packed;
+} __rte_packed_end;
/* VIRTCHNL_OP_IPSEC_SA_UPDATE
* VF send configuration of index of SA to PF
* PF will update SA according to configuration
*/
-struct virtchnl_ipsec_sa_update {
+__rte_packed_begin struct virtchnl_ipsec_sa_update {
u32 sa_index; /* SA to update */
u32 esn_hi; /* high 32 bits of esn */
u32 esn_low; /* low 32 bits of esn */
-} __rte_packed;
+} __rte_packed_end;
/* VIRTCHNL_OP_IPSEC_SA_DESTROY
* VF send configuration of index of SA to PF
@@ -310,7 +310,7 @@ struct virtchnl_ipsec_sa_update {
* flag bitmap indicate all SA or just selected SA will
* be destroyed
*/
-struct virtchnl_ipsec_sa_destroy {
+__rte_packed_begin struct virtchnl_ipsec_sa_destroy {
/* All zero bitmap indicates all SA will be destroyed.
* Non-zero bitmap indicates the selected SA in
* array sa_index will be destroyed.
@@ -319,13 +319,13 @@ struct virtchnl_ipsec_sa_destroy {
/* selected SA index */
u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
-} __rte_packed;
+} __rte_packed_end;
/* VIRTCHNL_OP_IPSEC_SA_READ
* VF send this SA configuration to PF using virtchnl;
* PF read SA and will return configuration for the created SA.
*/
-struct virtchnl_ipsec_sa_read {
+__rte_packed_begin struct virtchnl_ipsec_sa_read {
/* SA valid - invalid/valid */
u8 valid;
@@ -424,14 +424,14 @@ struct virtchnl_ipsec_sa_read {
/* crypto configuration. Salt and keys are set to 0 */
struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
-} __rte_packed;
+} __rte_packed_end;
#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4 (0)
#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6 (1)
/* Add allowlist entry in IES */
-struct virtchnl_ipsec_sp_cfg {
+__rte_packed_begin struct virtchnl_ipsec_sp_cfg {
u32 spi;
u32 dip[4];
@@ -455,15 +455,15 @@ struct virtchnl_ipsec_sp_cfg {
/* NAT-T UDP port number. Only valid in case NAT-T supported */
u16 udp_port;
-} __rte_packed;
+} __rte_packed_end;
/* Delete allowlist entry in IES */
-struct virtchnl_ipsec_sp_destroy {
+__rte_packed_begin struct virtchnl_ipsec_sp_destroy {
/* 0 for IPv4 table, 1 for IPv6 table. */
u8 table_id;
u32 rule_id;
-} __rte_packed;
+} __rte_packed_end;
/* Response from IES to allowlist operations */
struct virtchnl_ipsec_sp_cfg_resp {
@@ -494,7 +494,7 @@ struct virtchnl_ipsec_resp {
};
/* Internal message descriptor for VF <-> IPsec communication */
-struct inline_ipsec_msg {
+__rte_packed_begin struct inline_ipsec_msg {
u16 ipsec_opcode;
u16 req_id;
@@ -520,7 +520,7 @@ struct inline_ipsec_msg {
/* Reserved */
struct virtchnl_ipsec_sa_read sa_read[0];
} ipsec_data;
-} __rte_packed;
+} __rte_packed_end;
static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode)
{
@@ -180,17 +180,17 @@ static inline uint64_t idpf_read_addr64(volatile void *addr)
#define BITS_PER_BYTE 8
/* memory allocation tracking */
-struct idpf_dma_mem {
+__rte_packed_begin struct idpf_dma_mem {
void *va;
u64 pa;
u32 size;
const void *zone;
-} __rte_packed;
+} __rte_packed_end;
-struct idpf_virt_mem {
+__rte_packed_begin struct idpf_virt_mem {
void *va;
u32 size;
-} __rte_packed;
+} __rte_packed_end;
#define idpf_malloc(h, s) rte_zmalloc(NULL, s, 0)
#define idpf_calloc(h, c, s) rte_zmalloc(NULL, (c) * (s), 0)
@@ -49,22 +49,25 @@ struct mlx5_mr {
};
/* Cache entry for Memory Region. */
+__rte_packed_begin
struct mr_cache_entry {
uintptr_t start; /* Start address of MR. */
uintptr_t end; /* End address of MR. */
uint32_t lkey; /* rte_cpu_to_be_32(lkey). */
-} __rte_packed;
+} __rte_packed_end;
/* MR Cache table for Binary search. */
+__rte_packed_begin
struct mlx5_mr_btree {
uint32_t len; /* Number of entries. */
uint32_t size; /* Total number of entries. */
struct mr_cache_entry (*table)[];
-} __rte_packed;
+} __rte_packed_end;
struct mlx5_common_device;
/* Per-queue MR control descriptor. */
+__rte_packed_begin
struct mlx5_mr_ctrl {
uint32_t *dev_gen_ptr; /* Generation number of device to poll. */
uint32_t cur_gen; /* Generation number saved to flush caches. */
@@ -72,12 +75,13 @@ struct mlx5_mr_ctrl {
uint16_t head; /* Index of the oldest entry in top-half cache. */
struct mr_cache_entry cache[MLX5_MR_CACHE_N]; /* Cache for top-half. */
struct mlx5_mr_btree cache_bh; /* Cache for bottom-half. */
-} __rte_packed;
+} __rte_packed_end;
LIST_HEAD(mlx5_mr_list, mlx5_mr);
LIST_HEAD(mlx5_mempool_reg_list, mlx5_mempool_reg);
/* Global per-device MR cache. */
+__rte_packed_begin
struct mlx5_mr_share_cache {
uint32_t dev_gen; /* Generation number to flush local caches. */
rte_rwlock_t rwlock; /* MR cache Lock. */
@@ -88,7 +92,7 @@ struct mlx5_mr_share_cache {
struct mlx5_mempool_reg_list mempool_reg_list; /* Mempool database. */
mlx5_reg_mr_t reg_mr_cb; /* Callback to reg_mr func */
mlx5_dereg_mr_t dereg_mr_cb; /* Callback to dereg_mr func */
-} __rte_packed;
+} __rte_packed_end;
/* Multi-Packet RQ buffer header. */
struct __rte_cache_aligned mlx5_mprq_buf {
@@ -27,6 +27,7 @@ struct mlx5_list;
* Structure of the entry in the mlx5 list, user should define its own struct
* that contains this in order to store the data.
*/
+__rte_packed_begin
struct mlx5_list_entry {
LIST_ENTRY(mlx5_list_entry) next; /* Entry pointers in the list. */
alignas(8) RTE_ATOMIC(uint32_t) ref_cnt; /* 0 means, entry is invalid. */
@@ -35,7 +36,7 @@ struct mlx5_list_entry {
struct mlx5_list_entry *gentry;
uint32_t bucket_idx;
};
-} __rte_packed;
+} __rte_packed_end;
struct __rte_cache_aligned mlx5_list_cache {
LIST_HEAD(mlx5_list_head, mlx5_list_entry) h;
@@ -323,12 +323,13 @@ enum mlx5_mpw_mode {
};
/* WQE Control segment. */
+__rte_packed_begin
struct __rte_aligned(MLX5_WSEG_SIZE) mlx5_wqe_cseg {
uint32_t opcode;
uint32_t sq_ds;
uint32_t flags;
uint32_t misc;
-} __rte_packed;
+} __rte_packed_end;
/*
* WQE CSEG opcode field size is 32 bits, divided:
@@ -340,20 +341,24 @@ struct __rte_aligned(MLX5_WSEG_SIZE) mlx5_wqe_cseg {
#define WQE_CSEG_WQE_INDEX_OFFSET 8
/* Header of data segment. Minimal size Data Segment */
+__rte_packed_begin
struct mlx5_wqe_dseg {
uint32_t bcount;
union {
uint8_t inline_data[MLX5_DSEG_MIN_INLINE_SIZE];
+ __rte_packed_begin
struct {
uint32_t lkey;
uint64_t pbuf;
- } __rte_packed;
+ } __rte_packed_end;
};
-} __rte_packed;
+} __rte_packed_end;
/* Subset of struct WQE Ethernet Segment. */
+__rte_packed_begin
struct mlx5_wqe_eseg {
union {
+ __rte_packed_begin
struct {
uint32_t swp_offs;
uint8_t cs_flags;
@@ -365,23 +370,26 @@ struct mlx5_wqe_eseg {
uint16_t inline_data;
uint16_t vlan_tag;
};
- } __rte_packed;
+ } __rte_packed_end;
+ __rte_packed_begin
struct {
uint32_t offsets;
uint32_t flags;
uint32_t flow_metadata;
uint32_t inline_hdr;
- } __rte_packed;
+ } __rte_packed_end;
};
-} __rte_packed;
+} __rte_packed_end;
+__rte_packed_begin
struct mlx5_wqe_qseg {
uint32_t reserved0;
uint32_t reserved1;
uint32_t max_index;
uint32_t qpn_cqn;
-} __rte_packed;
+} __rte_packed_end;
+__rte_packed_begin
struct mlx5_wqe_wseg {
uint32_t operation;
uint32_t lkey;
@@ -389,9 +397,10 @@ struct mlx5_wqe_wseg {
uint32_t va_low;
uint64_t value;
uint64_t mask;
-} __rte_packed;
+} __rte_packed_end;
/* The title WQEBB, header of WQE. */
+__rte_packed_begin
struct mlx5_wqe {
union {
struct mlx5_wqe_cseg cseg;
@@ -402,7 +411,7 @@ struct mlx5_wqe {
struct mlx5_wqe_dseg dseg[2];
uint8_t data[MLX5_ESEG_EXTRA_DATA_SIZE];
};
-} __rte_packed;
+} __rte_packed_end;
/* WQE for Multi-Packet RQ. */
struct mlx5_wqe_mprq {
@@ -464,10 +473,11 @@ struct mlx5_cqe {
uint8_t lro_num_seg;
union {
uint8_t user_index_bytes[3];
+ __rte_packed_begin
struct {
uint8_t user_index_hi;
uint16_t user_index_low;
- } __rte_packed;
+ } __rte_packed_end;
};
uint32_t flow_table_metadata;
uint8_t rsvd4[4];
@@ -487,11 +497,12 @@ struct mlx5_cqe_ts {
uint8_t op_own;
};
+__rte_packed_begin
struct mlx5_wqe_rseg {
uint64_t raddr;
uint32_t rkey;
uint32_t reserved;
-} __rte_packed;
+} __rte_packed_end;
#define MLX5_UMRC_IF_OFFSET 31u
#define MLX5_UMRC_KO_OFFSET 16u
@@ -506,13 +517,15 @@ struct mlx5_wqe_rseg {
#define MLX5_UMR_KLM_NUM_ALIGN \
(MLX5_UMR_KLM_PTR_ALIGN / sizeof(struct mlx5_klm))
+__rte_packed_begin
struct mlx5_wqe_umr_cseg {
uint32_t if_cf_toe_cq_res;
uint32_t ko_to_bs;
uint64_t mkey_mask;
uint32_t rsvd1[8];
-} __rte_packed;
+} __rte_packed_end;
+__rte_packed_begin
struct mlx5_wqe_mkey_cseg {
uint32_t fr_res_af_sf;
uint32_t qpn_mkey;
@@ -525,7 +538,7 @@ struct mlx5_wqe_mkey_cseg {
uint32_t translations_octword_size;
uint32_t res4_lps;
uint32_t reserved;
-} __rte_packed;
+} __rte_packed_end;
enum {
MLX5_BSF_SIZE_16B = 0x0,
@@ -576,6 +589,7 @@ enum {
#define MLX5_CRYPTO_MMO_TYPE_OFFSET 24
#define MLX5_CRYPTO_MMO_OP_OFFSET 20
+__rte_packed_begin
struct mlx5_wqe_umr_bsf_seg {
/*
* bs_bpt_eo_es contains:
@@ -603,12 +617,13 @@ struct mlx5_wqe_umr_bsf_seg {
uint32_t reserved1;
uint64_t keytag;
uint32_t reserved2[4];
-} __rte_packed;
+} __rte_packed_end;
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-Wpedantic"
#endif
+__rte_packed_begin
struct mlx5_umr_wqe {
struct mlx5_wqe_cseg ctr;
struct mlx5_wqe_umr_cseg ucseg;
@@ -617,24 +632,27 @@ struct mlx5_umr_wqe {
struct mlx5_wqe_dseg kseg[0];
struct mlx5_wqe_umr_bsf_seg bsf[0];
};
-} __rte_packed;
+} __rte_packed_end;
+__rte_packed_begin
struct mlx5_rdma_write_wqe {
struct mlx5_wqe_cseg ctr;
struct mlx5_wqe_rseg rseg;
struct mlx5_wqe_dseg dseg[];
-} __rte_packed;
+} __rte_packed_end;
+__rte_packed_begin
struct mlx5_wqe_send_en_seg {
uint32_t reserve[2];
uint32_t sqnpc;
uint32_t qpn;
-} __rte_packed;
+} __rte_packed_end;
+__rte_packed_begin
struct mlx5_wqe_send_en_wqe {
struct mlx5_wqe_cseg ctr;
struct mlx5_wqe_send_en_seg sseg;
-} __rte_packed;
+} __rte_packed_end;
#ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic"
@@ -677,6 +695,7 @@ struct mlx5_wqe_metadata_seg {
uint64_t addr;
};
+__rte_packed_begin
struct mlx5_gga_wqe {
uint32_t opcode;
uint32_t sq_ds;
@@ -687,40 +706,45 @@ struct mlx5_gga_wqe {
uint64_t opaque_vaddr;
struct mlx5_wqe_dseg gather;
struct mlx5_wqe_dseg scatter;
-} __rte_packed;
+} __rte_packed_end;
union mlx5_gga_compress_opaque {
+ __rte_packed_begin
struct {
uint32_t syndrome;
uint32_t reserved0;
uint32_t scattered_length;
union {
+ __rte_packed_begin
struct {
uint32_t reserved1[5];
uint32_t crc32;
uint32_t adler32;
- } v1 __rte_packed;
+ } v1 __rte_packed_end;
+ __rte_packed_begin
struct {
uint32_t crc32;
uint32_t adler32;
uint32_t crc32c;
uint32_t xxh32;
- } v2 __rte_packed;
+ } v2 __rte_packed_end;
};
- } __rte_packed;
+ } __rte_packed_end;
uint32_t data[64];
};
union mlx5_gga_crypto_opaque {
+ __rte_packed_begin
struct {
uint32_t syndrome;
uint32_t reserved0[2];
+ __rte_packed_begin
struct {
uint32_t iv[3];
uint32_t tag_size;
uint32_t aad_size;
- } cp __rte_packed;
- } __rte_packed;
+ } cp __rte_packed_end;
+ } __rte_packed_end;
uint8_t data[64];
};
@@ -931,6 +955,7 @@ mlx5_regc_value(uint8_t regc_ix)
/* Modification sub command. */
struct mlx5_modification_cmd {
+ __rte_packed_begin
union {
uint32_t data0;
struct {
@@ -941,7 +966,8 @@ struct mlx5_modification_cmd {
unsigned int field:12;
unsigned int action_type:4;
};
- } __rte_packed;
+ } __rte_packed_end;
+ __rte_packed_begin
union {
uint32_t data1;
uint8_t data[4];
@@ -952,7 +978,7 @@ struct mlx5_modification_cmd {
unsigned int dst_field:12;
unsigned int rsvd4:4;
};
- } __rte_packed;
+ } __rte_packed_end;
};
typedef uint64_t u64;
@@ -4191,6 +4217,7 @@ enum mlx5_aso_op {
#define MLX5_ASO_CSEG_READ_ENABLE 1
/* ASO WQE CTRL segment. */
+__rte_packed_begin
struct mlx5_aso_cseg {
uint32_t va_h;
uint32_t va_l_r;
@@ -4202,11 +4229,12 @@ struct mlx5_aso_cseg {
uint32_t condition_1_mask;
uint64_t bitwise_data;
uint64_t data_mask;
-} __rte_packed;
+} __rte_packed_end;
#define MLX5_MTR_MAX_TOKEN_VALUE INT32_MAX
/* A meter data segment - 2 per ASO WQE. */
+__rte_packed_begin
struct mlx5_aso_mtr_dseg {
uint32_t v_bo_sc_bbog_mm;
/*
@@ -4227,7 +4255,7 @@ struct mlx5_aso_mtr_dseg {
*/
uint32_t e_tokens;
uint64_t timestamp;
-} __rte_packed;
+} __rte_packed_end;
#define ASO_DSEG_VALID_OFFSET 31
#define ASO_DSEG_BO_OFFSET 30
@@ -4248,19 +4276,21 @@ struct mlx5_aso_mtr_dseg {
#define MLX5_ASO_MTRS_PER_POOL 128
/* ASO WQE data segment. */
+__rte_packed_begin
struct mlx5_aso_dseg {
union {
uint8_t data[MLX5_ASO_WQE_DSEG_SIZE];
struct mlx5_aso_mtr_dseg mtrs[MLX5_ASO_METERS_PER_WQE];
};
-} __rte_packed;
+} __rte_packed_end;
/* ASO WQE. */
+__rte_packed_begin
struct mlx5_aso_wqe {
struct mlx5_wqe_cseg general_cseg;
struct mlx5_aso_cseg aso_cseg;
struct mlx5_aso_dseg aso_dseg;
-} __rte_packed;
+} __rte_packed_end;
enum {
MLX5_EVENT_TYPE_OBJECT_CHANGE = 0x27,
@@ -357,7 +357,7 @@ struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET 24
#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0)
-struct icp_qat_fw_la_cipher_req_params {
+__rte_packed_begin struct icp_qat_fw_la_cipher_req_params {
uint32_t cipher_offset;
uint32_t cipher_length;
union {
@@ -372,9 +372,9 @@ struct icp_qat_fw_la_cipher_req_params {
uint16_t spc_aad_sz;
uint8_t reserved;
uint8_t spc_auth_res_sz;
-} __rte_packed;
+} __rte_packed_end;
-struct icp_qat_fw_la_auth_req_params {
+__rte_packed_begin struct icp_qat_fw_la_auth_req_params {
uint32_t auth_off;
uint32_t auth_len;
union {
@@ -389,7 +389,7 @@ struct icp_qat_fw_la_auth_req_params {
uint8_t resrvd1;
uint8_t hash_state_sz;
uint8_t auth_res_sz;
-} __rte_packed;
+} __rte_packed_end;
struct icp_qat_fw_la_auth_req_params_resrvd_flds {
uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
@@ -57,11 +57,11 @@ enum qat_svc_list {
};
/**< Common struct for scatter-gather list operations */
-struct qat_flat_buf {
+__rte_packed_begin struct qat_flat_buf {
uint32_t len;
uint32_t resrvd;
uint64_t addr;
-} __rte_packed;
+} __rte_packed_end;
#define qat_sgl_hdr struct { \
uint64_t resrvd; \
@@ -70,11 +70,11 @@ struct qat_flat_buf {
}
__extension__
-struct __rte_cache_aligned qat_sgl {
+__rte_packed_begin struct __rte_cache_aligned qat_sgl {
qat_sgl_hdr;
/* flexible array of flat buffers*/
struct qat_flat_buf buffers[0];
-} __rte_packed;
+} __rte_packed_end;
/** Common, i.e. not service-specific, statistics */
struct qat_common_stats {