diff mbox

[dpdk-dev] xenvirt: fix compilation issues

Message ID 1449760558-63146-1-git-send-email-huawei.xie@intel.com (mailing list archive)
State Accepted, archived
Headers show

Commit Message

Huawei@dpdk.org, Xie@dpdk.org Dec. 10, 2015, 3:15 p.m. UTC
Reported-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
---
 drivers/net/xenvirt/rte_mempool_gntalloc.c | 6 +++---
 drivers/net/xenvirt/rte_xen_lib.c          | 4 ++--
 drivers/net/xenvirt/virtqueue.h            | 4 ++--
 3 files changed, 7 insertions(+), 7 deletions(-)

Comments

Jianfeng Tan Dec. 12, 2015, 7:15 a.m. UTC | #1
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Huawei@dpdk.org
> Sent: Thursday, December 10, 2015 11:16 PM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH] xenvirt: fix compilation issues
> 
> Reported-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
> Signed-off-by: Huawei Xie <huawei.xie@intel.com>
> ---
>  drivers/net/xenvirt/rte_mempool_gntalloc.c | 6 +++---
>  drivers/net/xenvirt/rte_xen_lib.c          | 4 ++--
>  drivers/net/xenvirt/virtqueue.h            | 4 ++--
>  3 files changed, 7 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/net/xenvirt/rte_mempool_gntalloc.c
> b/drivers/net/xenvirt/rte_mempool_gntalloc.c
> index 3a650e8..0585f08 100644
> --- a/drivers/net/xenvirt/rte_mempool_gntalloc.c
> +++ b/drivers/net/xenvirt/rte_mempool_gntalloc.c
> @@ -184,10 +184,10 @@ _create_mempool(const char *name, unsigned
> elt_num, unsigned elt_size,
>  				rv = ioctl(gntalloc_fd,
> IOCTL_GNTALLOC_DEALLOC_GREF, &arg);
>  				if (rv) {
>  					/* shouldn't fail here */
> -					RTE_LOG(ERR, PMD, "va=%p pa=%p
> index=%p %s\n",
> +					RTE_LOG(ERR, PMD, "va=%p
> pa=%"PRIu64"x index=%"PRIu64" %s\n",
>  						gnt_arr[i].va,
> -						(void *)gnt_arr[i].pa,
> -						(void *)arg.index,
> strerror(errno));
> +						gnt_arr[i].pa,
> +						arg.index, strerror(errno));
>  					rte_panic("gntdealloc failed when
> freeing pages\n");
>  				}
>  			}
> diff --git a/drivers/net/xenvirt/rte_xen_lib.c
> b/drivers/net/xenvirt/rte_xen_lib.c
> index 3e97c1a..de63cd3 100644
> --- a/drivers/net/xenvirt/rte_xen_lib.c
> +++ b/drivers/net/xenvirt/rte_xen_lib.c
> @@ -115,8 +115,8 @@ get_phys_map(void *va, phys_addr_t pa[], uint32_t
> pg_num, uint32_t pg_sz)
>  			(rc = pread(fd, pa, nb, ofs)) < 0 ||
>  			(rc -= nb) != 0) {
>  		RTE_LOG(ERR, PMD, "%s: failed read of %u bytes from \'%s\'
> "
> -			"at offset %zu, error code: %d\n",
> -			__func__, nb, PAGEMAP_FNAME, ofs, errno);
> +			"at offset %lu, error code: %d\n",
> +			__func__, nb, PAGEMAP_FNAME, (unsigned
> long)ofs, errno);
>  		rc = ENOENT;
>  	}
> 
> diff --git a/drivers/net/xenvirt/virtqueue.h
> b/drivers/net/xenvirt/virtqueue.h
> index 6dcb0ef..5312347 100644
> --- a/drivers/net/xenvirt/virtqueue.h
> +++ b/drivers/net/xenvirt/virtqueue.h
> @@ -55,7 +55,7 @@ struct rte_mbuf;
>   * rather than gpa<->hva in virito spec.
>   */
>  #define RTE_MBUF_DATA_DMA_ADDR(mb) \
> -	rte_pktmbuf_mtod(mb, uint64_t)
> +	((uint64_t)(uintptr_t)rte_pktmbuf_mtod(mb, void *))
> 
>  enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
> 
> @@ -198,7 +198,7 @@ virtqueue_enqueue_recv_refill(struct virtqueue
> *rxvq, struct rte_mbuf *cookie)
>  	dxp->ndescs = needed;
> 
>  	start_dp[head_idx].addr  =
> -		(uint64_t) ((uint64_t)cookie->buf_addr +
> RTE_PKTMBUF_HEADROOM - sizeof(struct virtio_net_hdr));
> +		(uint64_t) ((uintptr_t)cookie->buf_addr +
> RTE_PKTMBUF_HEADROOM - sizeof(struct virtio_net_hdr));
>  	start_dp[head_idx].len   = cookie->buf_len -
> RTE_PKTMBUF_HEADROOM + sizeof(struct virtio_net_hdr);
>  	start_dp[head_idx].flags = VRING_DESC_F_WRITE;
>  	rxvq->vq_desc_head_idx   = start_dp[head_idx].next;
> --
> 1.8.1.4


Looks good to me. Just one thing, shall we keep unified on printing out either errno, or strerror(errno)?

Acked-by: Jianfeng Tan <jianfeng.tan@intel.com>

Thanks,
Jianfeng
Thomas Monjalon Dec. 12, 2015, 9:19 p.m. UTC | #2
> > Reported-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
> > Signed-off-by: Huawei Xie <huawei.xie@intel.com>
> 
> Looks good to me. Just one thing, shall we keep unified on printing out either errno, or strerror(errno)?
> 
> Acked-by: Jianfeng Tan <jianfeng.tan@intel.com>

Applied, thanks
diff mbox

Patch

diff --git a/drivers/net/xenvirt/rte_mempool_gntalloc.c b/drivers/net/xenvirt/rte_mempool_gntalloc.c
index 3a650e8..0585f08 100644
--- a/drivers/net/xenvirt/rte_mempool_gntalloc.c
+++ b/drivers/net/xenvirt/rte_mempool_gntalloc.c
@@ -184,10 +184,10 @@  _create_mempool(const char *name, unsigned elt_num, unsigned elt_size,
 				rv = ioctl(gntalloc_fd, IOCTL_GNTALLOC_DEALLOC_GREF, &arg);
 				if (rv) {
 					/* shouldn't fail here */
-					RTE_LOG(ERR, PMD, "va=%p pa=%p index=%p %s\n",
+					RTE_LOG(ERR, PMD, "va=%p pa=%"PRIu64"x index=%"PRIu64" %s\n",
 						gnt_arr[i].va,
-						(void *)gnt_arr[i].pa,
-						(void *)arg.index, strerror(errno));
+						gnt_arr[i].pa,
+						arg.index, strerror(errno));
 					rte_panic("gntdealloc failed when freeing pages\n");
 				}
 			}
diff --git a/drivers/net/xenvirt/rte_xen_lib.c b/drivers/net/xenvirt/rte_xen_lib.c
index 3e97c1a..de63cd3 100644
--- a/drivers/net/xenvirt/rte_xen_lib.c
+++ b/drivers/net/xenvirt/rte_xen_lib.c
@@ -115,8 +115,8 @@  get_phys_map(void *va, phys_addr_t pa[], uint32_t pg_num, uint32_t pg_sz)
 			(rc = pread(fd, pa, nb, ofs)) < 0 ||
 			(rc -= nb) != 0) {
 		RTE_LOG(ERR, PMD, "%s: failed read of %u bytes from \'%s\' "
-			"at offset %zu, error code: %d\n",
-			__func__, nb, PAGEMAP_FNAME, ofs, errno);
+			"at offset %lu, error code: %d\n",
+			__func__, nb, PAGEMAP_FNAME, (unsigned long)ofs, errno);
 		rc = ENOENT;
 	}
 
diff --git a/drivers/net/xenvirt/virtqueue.h b/drivers/net/xenvirt/virtqueue.h
index 6dcb0ef..5312347 100644
--- a/drivers/net/xenvirt/virtqueue.h
+++ b/drivers/net/xenvirt/virtqueue.h
@@ -55,7 +55,7 @@  struct rte_mbuf;
  * rather than gpa<->hva in virito spec.
  */
 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
-	rte_pktmbuf_mtod(mb, uint64_t)
+	((uint64_t)(uintptr_t)rte_pktmbuf_mtod(mb, void *))
 
 enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
 
@@ -198,7 +198,7 @@  virtqueue_enqueue_recv_refill(struct virtqueue *rxvq, struct rte_mbuf *cookie)
 	dxp->ndescs = needed;
 
 	start_dp[head_idx].addr  =
-		(uint64_t) ((uint64_t)cookie->buf_addr + RTE_PKTMBUF_HEADROOM - sizeof(struct virtio_net_hdr));
+		(uint64_t) ((uintptr_t)cookie->buf_addr + RTE_PKTMBUF_HEADROOM - sizeof(struct virtio_net_hdr));
 	start_dp[head_idx].len   = cookie->buf_len - RTE_PKTMBUF_HEADROOM + sizeof(struct virtio_net_hdr);
 	start_dp[head_idx].flags = VRING_DESC_F_WRITE;
 	rxvq->vq_desc_head_idx   = start_dp[head_idx].next;