[v3,1/1] common/octeontx2: fix unaligned mbox memory accesses
diff mbox series

Message ID 20190802065716.20654-1-vattunuru@marvell.com
State Accepted, archived
Delegated to: Jerin Jacob
Headers show
Series
  • [v3,1/1] common/octeontx2: fix unaligned mbox memory accesses
Related show

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/iol-Compile-Testing success Compile Testing PASS
ci/intel-Performance-Testing success Performance Testing PASS
ci/Intel-compilation success Compilation OK
ci/mellanox-Performance-Testing success Performance Testing PASS

Commit Message

Vamsi Krishna Attunuru Aug. 2, 2019, 6:57 a.m. UTC
From: Vamsi Attunuru <vattunuru@marvell.com>

Octeontx2 PMD's mailbox client uses device memory to send messages
to mailbox server in the admin function Linux kernel driver.
The device memory used for the mailbox communication needs to
be qualified as volatile memory type to avoid unaligned device
memory accesses because of compiler's memory access coalescing.

This patch modifies the mailbox request and responses as volatile
type which were non-volatile earlier and accessed from unaligned
memory addresses which resulted in bus errors on Fedora 30 with
gcc 9.1.1.

Fixes: 2b71657c8660 ("common/octeontx2: add mbox request and response definition")

Signed-off-by: Vamsi Attunuru <vattunuru@marvell.com>
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
V3 Changes:
* Updated Distro information on which bus error reported and 'Fixes' reference
 in commit message.

V2 Changes:
* Moved __otx2_io to individual members to align with other mbox elements.
* Updated commit message.

 drivers/common/octeontx2/otx2_mbox.h           | 42 +++++++++++++-------------
 drivers/mempool/octeontx2/otx2_mempool_debug.c |  4 +--
 drivers/mempool/octeontx2/otx2_mempool_ops.c   |  6 ++--
 drivers/net/octeontx2/otx2_ethdev_debug.c      |  6 ++--
 4 files changed, 29 insertions(+), 29 deletions(-)

Comments

Jerin Jacob Kollanukkaran Aug. 2, 2019, 9:55 a.m. UTC | #1
> -----Original Message-----
> From: vattunuru@marvell.com <vattunuru@marvell.com>
> Sent: Friday, August 2, 2019 12:27 PM
> To: dev@dpdk.org
> Cc: thomas@monjalon.net; Jerin Jacob Kollanukkaran <jerinj@marvell.com>;
> Vamsi Krishna Attunuru <vattunuru@marvell.com>; Nithin Kumar
> Dabilpuram <ndabilpuram@marvell.com>
> Subject: [dpdk-dev] [PATCH v3 1/1] common/octeontx2: fix unaligned mbox
> memory accesses
> 
> From: Vamsi Attunuru <vattunuru@marvell.com>
> 
> Octeontx2 PMD's mailbox client uses device memory to send messages to
> mailbox server in the admin function Linux kernel driver.
> The device memory used for the mailbox communication needs to be
> qualified as volatile memory type to avoid unaligned device memory
> accesses because of compiler's memory access coalescing.
> 
> This patch modifies the mailbox request and responses as volatile type which
> were non-volatile earlier and accessed from unaligned memory addresses
> which resulted in bus errors on Fedora 30 with gcc 9.1.1.
> 
> Fixes: 2b71657c8660 ("common/octeontx2: add mbox request and response
> definition")
> 
> Signed-off-by: Vamsi Attunuru <vattunuru@marvell.com>
> Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>

Acked-by: Jerin Jacob <jerinj@marvell.com>

Thomas,

Could you merge this patch for rc4. It fixes the bus error with  Fedora 30 with gcc 9.1.1.
Thomas Monjalon Aug. 5, 2019, 9:22 p.m. UTC | #2
02/08/2019 11:55, Jerin Jacob Kollanukkaran:
> > From: Vamsi Attunuru <vattunuru@marvell.com>
> > 
> > Octeontx2 PMD's mailbox client uses device memory to send messages to
> > mailbox server in the admin function Linux kernel driver.
> > The device memory used for the mailbox communication needs to be
> > qualified as volatile memory type to avoid unaligned device memory
> > accesses because of compiler's memory access coalescing.
> > 
> > This patch modifies the mailbox request and responses as volatile type which
> > were non-volatile earlier and accessed from unaligned memory addresses
> > which resulted in bus errors on Fedora 30 with gcc 9.1.1.
> > 
> > Fixes: 2b71657c8660 ("common/octeontx2: add mbox request and response
> > definition")
> > 
> > Signed-off-by: Vamsi Attunuru <vattunuru@marvell.com>
> > Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
> 
> Acked-by: Jerin Jacob <jerinj@marvell.com>
> 
> Thomas,
> 
> Could you merge this patch for rc4. It fixes the bus error with  Fedora 30 with gcc 9.1.1.

Applied, thanks

Patch
diff mbox series

diff --git a/drivers/common/octeontx2/otx2_mbox.h b/drivers/common/octeontx2/otx2_mbox.h
index b2c59c8..1bdaa91 100644
--- a/drivers/common/octeontx2/otx2_mbox.h
+++ b/drivers/common/octeontx2/otx2_mbox.h
@@ -552,16 +552,16 @@  struct npa_aq_enq_req {
 		 * LF fills the pool_id in aura.pool_addr. AF will translate
 		 * the pool_id to pool context pointer.
 		 */
-		struct npa_aura_s aura;
+		__otx2_io struct npa_aura_s aura;
 		/* Valid when op == WRITE/INIT and ctype == POOL */
-		struct npa_pool_s pool;
+		__otx2_io struct npa_pool_s pool;
 	};
 	/* Mask data when op == WRITE (1=write, 0=don't write) */
 	union {
 		/* Valid when op == WRITE and ctype == AURA */
-		struct npa_aura_s aura_mask;
+		__otx2_io struct npa_aura_s aura_mask;
 		/* Valid when op == WRITE and ctype == POOL */
-		struct npa_pool_s pool_mask;
+		__otx2_io struct npa_pool_s pool_mask;
 	};
 };
 
@@ -569,9 +569,9 @@  struct npa_aq_enq_rsp {
 	struct mbox_msghdr hdr;
 	union {
 		/* Valid when op == READ and ctype == AURA */
-		struct npa_aura_s aura;
+		__otx2_io struct npa_aura_s aura;
 		/* Valid when op == READ and ctype == POOL */
-		struct npa_pool_s pool;
+		__otx2_io struct npa_pool_s pool;
 	};
 };
 
@@ -655,39 +655,39 @@  struct nix_aq_enq_req {
 	uint8_t __otx2_io op;
 	union {
 		/* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_RQ */
-		struct nix_rq_ctx_s rq;
+		__otx2_io struct nix_rq_ctx_s rq;
 		/* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_SQ */
-		struct nix_sq_ctx_s sq;
+		__otx2_io struct nix_sq_ctx_s sq;
 		/* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_CQ */
-		struct nix_cq_ctx_s cq;
+		__otx2_io struct nix_cq_ctx_s cq;
 		/* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_RSS */
-		struct nix_rsse_s rss;
+		__otx2_io struct nix_rsse_s rss;
 		/* Valid when op == WRITE/INIT and ctype == NIX_AQ_CTYPE_MCE */
-		struct nix_rx_mce_s mce;
+		__otx2_io struct nix_rx_mce_s mce;
 	};
 	/* Mask data when op == WRITE (1=write, 0=don't write) */
 	union {
 		/* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_RQ */
-		struct nix_rq_ctx_s rq_mask;
+		__otx2_io struct nix_rq_ctx_s rq_mask;
 		/* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_SQ */
-		struct nix_sq_ctx_s sq_mask;
+		__otx2_io struct nix_sq_ctx_s sq_mask;
 		/* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_CQ */
-		struct nix_cq_ctx_s cq_mask;
+		__otx2_io struct nix_cq_ctx_s cq_mask;
 		/* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_RSS */
-		struct nix_rsse_s rss_mask;
+		__otx2_io struct nix_rsse_s rss_mask;
 		/* Valid when op == WRITE and ctype == NIX_AQ_CTYPE_MCE */
-		struct nix_rx_mce_s mce_mask;
+		__otx2_io struct nix_rx_mce_s mce_mask;
 	};
 };
 
 struct nix_aq_enq_rsp {
 	struct mbox_msghdr hdr;
 	union {
-		struct nix_rq_ctx_s rq;
-		struct nix_sq_ctx_s sq;
-		struct nix_cq_ctx_s cq;
-		struct nix_rsse_s   rss;
-		struct nix_rx_mce_s mce;
+		__otx2_io struct nix_rq_ctx_s rq;
+		__otx2_io struct nix_sq_ctx_s sq;
+		__otx2_io struct nix_cq_ctx_s cq;
+		__otx2_io struct nix_rsse_s   rss;
+		__otx2_io struct nix_rx_mce_s mce;
 	};
 };
 
diff --git a/drivers/mempool/octeontx2/otx2_mempool_debug.c b/drivers/mempool/octeontx2/otx2_mempool_debug.c
index eef61ef..4d40fde 100644
--- a/drivers/mempool/octeontx2/otx2_mempool_debug.c
+++ b/drivers/mempool/octeontx2/otx2_mempool_debug.c
@@ -7,7 +7,7 @@ 
 #define npa_dump(fmt, ...) fprintf(stderr, fmt "\n", ##__VA_ARGS__)
 
 static inline void
-npa_lf_pool_dump(struct npa_pool_s *pool)
+npa_lf_pool_dump(__otx2_io struct npa_pool_s *pool)
 {
 	npa_dump("W0: Stack base\t\t0x%"PRIx64"", pool->stack_base);
 	npa_dump("W1: ena \t\t%d\nW1: nat_align \t\t%d\nW1: stack_caching \t%d",
@@ -45,7 +45,7 @@  npa_lf_pool_dump(struct npa_pool_s *pool)
 }
 
 static inline void
-npa_lf_aura_dump(struct npa_aura_s *aura)
+npa_lf_aura_dump(__otx2_io struct npa_aura_s *aura)
 {
 	npa_dump("W0: Pool addr\t\t0x%"PRIx64"\n", aura->pool_addr);
 
diff --git a/drivers/mempool/octeontx2/otx2_mempool_ops.c b/drivers/mempool/octeontx2/otx2_mempool_ops.c
index ff63be5..f5a4fe3 100644
--- a/drivers/mempool/octeontx2/otx2_mempool_ops.c
+++ b/drivers/mempool/octeontx2/otx2_mempool_ops.c
@@ -355,14 +355,14 @@  npa_lf_aura_pool_init(struct otx2_mbox *mbox, uint32_t aura_id,
 	aura_init_req->aura_id = aura_id;
 	aura_init_req->ctype = NPA_AQ_CTYPE_AURA;
 	aura_init_req->op = NPA_AQ_INSTOP_INIT;
-	memcpy(&aura_init_req->aura, aura, sizeof(*aura));
+	otx2_mbox_memcpy(&aura_init_req->aura, aura, sizeof(*aura));
 
 	pool_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
 
 	pool_init_req->aura_id = aura_id;
 	pool_init_req->ctype = NPA_AQ_CTYPE_POOL;
 	pool_init_req->op = NPA_AQ_INSTOP_INIT;
-	memcpy(&pool_init_req->pool, pool, sizeof(*pool));
+	otx2_mbox_memcpy(&pool_init_req->pool, pool, sizeof(*pool));
 
 	otx2_mbox_msg_send(mbox, 0);
 	rc = otx2_mbox_wait_for_rsp(mbox, 0);
@@ -605,9 +605,9 @@  npa_lf_aura_range_update_check(uint64_t aura_handle)
 	uint64_t aura_id = npa_lf_aura_handle_to_aura(aura_handle);
 	struct otx2_npa_lf *lf = otx2_npa_lf_obj_get();
 	struct npa_aura_lim *lim = lf->aura_lim;
+	__otx2_io struct npa_pool_s *pool;
 	struct npa_aq_enq_req *req;
 	struct npa_aq_enq_rsp *rsp;
-	struct npa_pool_s *pool;
 	int rc;
 
 	req  = otx2_mbox_alloc_msg_npa_aq_enq(lf->mbox);
diff --git a/drivers/net/octeontx2/otx2_ethdev_debug.c b/drivers/net/octeontx2/otx2_ethdev_debug.c
index 9f06e55..c8b4cd5 100644
--- a/drivers/net/octeontx2/otx2_ethdev_debug.c
+++ b/drivers/net/octeontx2/otx2_ethdev_debug.c
@@ -235,7 +235,7 @@  otx2_nix_dev_get_reg(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs)
 }
 
 static inline void
-nix_lf_sq_dump(struct  nix_sq_ctx_s *ctx)
+nix_lf_sq_dump(__otx2_io struct nix_sq_ctx_s *ctx)
 {
 	nix_dump("W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d",
 		 ctx->sqe_way_mask, ctx->cq);
@@ -295,7 +295,7 @@  nix_lf_sq_dump(struct  nix_sq_ctx_s *ctx)
 }
 
 static inline void
-nix_lf_rq_dump(struct  nix_rq_ctx_s *ctx)
+nix_lf_rq_dump(__otx2_io struct nix_rq_ctx_s *ctx)
 {
 	nix_dump("W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x",
 		 ctx->wqe_aura, ctx->substream);
@@ -355,7 +355,7 @@  nix_lf_rq_dump(struct  nix_rq_ctx_s *ctx)
 }
 
 static inline void
-nix_lf_cq_dump(struct nix_cq_ctx_s *ctx)
+nix_lf_cq_dump(__otx2_io struct nix_cq_ctx_s *ctx)
 {
 	nix_dump("W0: base \t\t\t0x%" PRIx64 "\n", ctx->base);