[v4,1/5] vhost: fix async vhost ops return type

Message ID 20210716025923.27164-2-cheng1.jiang@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series vhost: handle memory hotplug for async vhost |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Jiang, Cheng1 July 16, 2021, 2:59 a.m. UTC
  The async vhost ops callback should return -1 when there are something
wrong in the callback, so the return type should be changed into
int32_t. The issue in vhost example is also fixed in this patch.

Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
 examples/vhost/ioat.c       |  4 +--
 examples/vhost/ioat.h       |  4 +--
 lib/vhost/rte_vhost_async.h |  4 +--
 lib/vhost/virtio_net.c      | 58 ++++++++++++++++++++++++++++++++-----
 4 files changed, 56 insertions(+), 14 deletions(-)
  

Comments

Chenbo Xia July 16, 2021, 5:36 a.m. UTC | #1
Hi Cheng,

> -----Original Message-----
> From: Jiang, Cheng1 <cheng1.jiang@intel.com>
> Sent: Friday, July 16, 2021 10:59 AM
> To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Yang, YvonneX
> <yvonnex.yang@intel.com>; Jiang, Cheng1 <cheng1.jiang@intel.com>
> Subject: [PATCH v4 1/5] vhost: fix async vhost ops return type
> 
> The async vhost ops callback should return -1 when there are something

Ops callback -> callback ops

Since the return value is redefined. Let's update ops description of struct
rte_vhost_async_channel_ops. And I suggest return negative value when error,
rather than only -1.
 
> wrong in the callback, so the return type should be changed into
> int32_t. The issue in vhost example is also fixed in this patch.
> 
> Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
> ---
>  examples/vhost/ioat.c       |  4 +--
>  examples/vhost/ioat.h       |  4 +--
>  lib/vhost/rte_vhost_async.h |  4 +--
>  lib/vhost/virtio_net.c      | 58 ++++++++++++++++++++++++++++++++-----
>  4 files changed, 56 insertions(+), 14 deletions(-)
> 
> diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c
> index 2a2c2d7202..457f8171f0 100644
> --- a/examples/vhost/ioat.c
> +++ b/examples/vhost/ioat.c
> @@ -122,7 +122,7 @@ open_ioat(const char *value)
>  	return ret;
>  }
> 
> -uint32_t
> +int32_t
>  ioat_transfer_data_cb(int vid, uint16_t queue_id,
>  		struct rte_vhost_async_desc *descs,
>  		struct rte_vhost_async_status *opaque_data, uint16_t count)
> @@ -168,7 +168,7 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,
>  	return i_desc;
>  }
> 
> -uint32_t
> +int32_t
>  ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
>  		struct rte_vhost_async_status *opaque_data,
>  		uint16_t max_packets)
> diff --git a/examples/vhost/ioat.h b/examples/vhost/ioat.h
> index 1aa28ed6a3..b57b5645b0 100644
> --- a/examples/vhost/ioat.h
> +++ b/examples/vhost/ioat.h
> @@ -27,12 +27,12 @@ struct dma_for_vhost {
>  #ifdef RTE_RAW_IOAT
>  int open_ioat(const char *value);
> 
> -uint32_t
> +int32_t
>  ioat_transfer_data_cb(int vid, uint16_t queue_id,
>  		struct rte_vhost_async_desc *descs,
>  		struct rte_vhost_async_status *opaque_data, uint16_t count);
> 
> -uint32_t
> +int32_t
>  ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
>  		struct rte_vhost_async_status *opaque_data,
>  		uint16_t max_packets);
> diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
> index 6faa31f5ad..bc81cd0caa 100644
> --- a/lib/vhost/rte_vhost_async.h
> +++ b/lib/vhost/rte_vhost_async.h
> @@ -61,7 +61,7 @@ struct rte_vhost_async_channel_ops {
>  	 * @return
>  	 *  number of descs processed
>  	 */
> -	uint32_t (*transfer_data)(int vid, uint16_t queue_id,
> +	int32_t (*transfer_data)(int vid, uint16_t queue_id,
>  		struct rte_vhost_async_desc *descs,
>  		struct rte_vhost_async_status *opaque_data,
>  		uint16_t count);
> @@ -78,7 +78,7 @@ struct rte_vhost_async_channel_ops {
>  	 * @return
>  	 *  number of async descs completed
>  	 */
> -	uint32_t (*check_completed_copies)(int vid, uint16_t queue_id,
> +	int32_t (*check_completed_copies)(int vid, uint16_t queue_id,
>  		struct rte_vhost_async_status *opaque_data,
>  		uint16_t max_packets);
>  };
> diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
> index b93482587c..8156796a46 100644
> --- a/lib/vhost/virtio_net.c
> +++ b/lib/vhost/virtio_net.c
> @@ -1528,6 +1528,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
>  	struct async_inflight_info *pkts_info = vq->async_pkts_info;
>  	uint32_t n_pkts = 0, pkt_err = 0;
>  	uint32_t num_async_pkts = 0, num_done_pkts = 0;
> +	int32_t n_enq;
>  	struct {
>  		uint16_t pkt_idx;
>  		uint16_t last_avail_idx;
> @@ -1608,8 +1609,16 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
>  		if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
>  			((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
>  			BUF_VECTOR_MAX))) {
> -			n_pkts = vq->async_ops.transfer_data(dev->vid,
> +			n_enq = vq->async_ops.transfer_data(dev->vid,
>  					queue_id, tdes, 0, pkt_burst_idx);
> +			if (n_enq >= 0) {
> +				n_pkts = n_enq;
> +			} else {
> +				VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for
> queue id %d.\n",

You can't assume the error is caused by wrong opaque data because of different
implementation of the callback. 

It's better to replace 'n_enq' with 'n_xfer' as we use the name 'transfer' in
callback definition.  

If you agree with above, please also change in other funcs below.

> +					dev->vid, __func__, queue_id);
> +				n_pkts = 0;
> +			}
> +
>  			iovec_idx = 0;
>  			it_idx = 0;
> 
> @@ -1632,8 +1641,15 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
>  	}
> 
>  	if (pkt_burst_idx) {
> -		n_pkts = vq->async_ops.transfer_data(dev->vid,
> -				queue_id, tdes, 0, pkt_burst_idx);
> +		n_enq = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0,
> pkt_burst_idx);
> +		if (n_enq >= 0) {
> +			n_pkts = n_enq;
> +		} else {
> +			VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for queue
> id %d.\n",
> +				dev->vid, __func__, queue_id);
> +			n_pkts = 0;
> +		}
> +
>  		vq->async_pkts_inflight_n += n_pkts;
> 
>  		if (unlikely(n_pkts < pkt_burst_idx))
> @@ -1903,6 +1919,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
>  	uint16_t async_descs_idx = 0;
>  	uint16_t num_buffers;
>  	uint16_t num_descs;
> +	int32_t n_enq;
> 
>  	struct rte_vhost_iov_iter *it_pool = vq->it_pool;
>  	struct iovec *vec_pool = vq->vec_pool;
> @@ -1983,8 +2000,16 @@ virtio_dev_rx_async_submit_packed(struct virtio_net
> *dev,
>  		 */
>  		if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
>  			((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX)))
> {
> -			n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id,
> -				tdes, 0, pkt_burst_idx);
> +			n_enq = vq->async_ops.transfer_data(dev->vid,
> +				queue_id, tdes, 0, pkt_burst_idx);
> +			if (n_enq >= 0) {
> +				n_pkts = n_enq;
> +			} else {
> +				VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for
> queue id %d.\n",
> +					dev->vid, __func__, queue_id);
> +				n_pkts = 0;
> +			}
> +
>  			iovec_idx = 0;
>  			it_idx = 0;
>  			segs_await = 0;
> @@ -2006,7 +2031,15 @@ virtio_dev_rx_async_submit_packed(struct virtio_net
> *dev,
>  	} while (pkt_idx < count);
> 
>  	if (pkt_burst_idx) {
> -		n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0,
> pkt_burst_idx);
> +		n_enq = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0,
> pkt_burst_idx);
> +		if (n_enq >= 0) {
> +			n_pkts = n_enq;
> +		} else {
> +			VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for queue
> id %d.\n",
> +				dev->vid, __func__, queue_id);
> +			n_pkts = 0;
> +		}
> +
>  		vq->async_pkts_inflight_n += n_pkts;
> 
>  		if (unlikely(n_pkts < pkt_burst_idx))
> @@ -2091,6 +2124,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid,
> uint16_t queue_id,
>  	uint16_t start_idx, pkts_idx, vq_size;
>  	struct async_inflight_info *pkts_info;
>  	uint16_t from, i;
> +	int32_t n_poll;
> 
>  	if (!dev)
>  		return 0;
> @@ -2118,9 +2152,17 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid,
> uint16_t queue_id,
>  	start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
>  		vq_size, vq->async_pkts_inflight_n);
> 
> -	if (count > vq->async_last_pkts_n)
> -		n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
> +	if (count > vq->async_last_pkts_n) {
> +		n_poll = vq->async_ops.check_completed_copies(vid,
>  			queue_id, 0, count - vq->async_last_pkts_n);

The name 'n_poll' is not related with the callback name. Maybe 'n_cpl'?

> +		if (n_poll >= 0) {
> +			n_pkts_cpl = n_poll;
> +		} else {
> +			VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for queue
> id %d.\n",

I suggest using different log for submit and check complete so that it's easier
for users to know what's wrong.

Thanks,
Chenbo

> +				dev->vid, __func__, queue_id);
> +			n_pkts_cpl = 0;
> +		}
> +	}
>  	n_pkts_cpl += vq->async_last_pkts_n;
> 
>  	n_pkts_put = RTE_MIN(count, n_pkts_cpl);
> --
> 2.29.2
  
Jiang, Cheng1 July 16, 2021, 5:58 a.m. UTC | #2
Hi Chenbo,

> -----Original Message-----
> From: Xia, Chenbo <chenbo.xia@intel.com>
> Sent: Friday, July 16, 2021 1:37 PM
> To: Jiang, Cheng1 <cheng1.jiang@intel.com>; maxime.coquelin@redhat.com
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Yang, YvonneX
> <yvonnex.yang@intel.com>
> Subject: RE: [PATCH v4 1/5] vhost: fix async vhost ops return type
> 
> Hi Cheng,
> 
> > -----Original Message-----
> > From: Jiang, Cheng1 <cheng1.jiang@intel.com>
> > Sent: Friday, July 16, 2021 10:59 AM
> > To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>
> > Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Yang, YvonneX
> > <yvonnex.yang@intel.com>; Jiang, Cheng1 <cheng1.jiang@intel.com>
> > Subject: [PATCH v4 1/5] vhost: fix async vhost ops return type
> >
> > The async vhost ops callback should return -1 when there are something
> 
> Ops callback -> callback ops
> 
> Since the return value is redefined. Let's update ops description of struct
> rte_vhost_async_channel_ops. And I suggest return negative value when
> error, rather than only -1.
> 
Sure, agreed.

> > wrong in the callback, so the return type should be changed into
> > int32_t. The issue in vhost example is also fixed in this patch.
> >
> > Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
> > ---
> >  examples/vhost/ioat.c       |  4 +--
> >  examples/vhost/ioat.h       |  4 +--
> >  lib/vhost/rte_vhost_async.h |  4 +--
> >  lib/vhost/virtio_net.c      | 58 ++++++++++++++++++++++++++++++++-----
> >  4 files changed, 56 insertions(+), 14 deletions(-)
> >
> > diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c index
> > 2a2c2d7202..457f8171f0 100644
> > --- a/examples/vhost/ioat.c
> > +++ b/examples/vhost/ioat.c
> > @@ -122,7 +122,7 @@ open_ioat(const char *value)
> >  	return ret;
> >  }
> >
> > -uint32_t
> > +int32_t
> >  ioat_transfer_data_cb(int vid, uint16_t queue_id,
> >  		struct rte_vhost_async_desc *descs,
> >  		struct rte_vhost_async_status *opaque_data, uint16_t count)
> @@
> > -168,7 +168,7 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,
> >  	return i_desc;
> >  }
> >
> > -uint32_t
> > +int32_t
> >  ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
> >  		struct rte_vhost_async_status *opaque_data,
> >  		uint16_t max_packets)
> > diff --git a/examples/vhost/ioat.h b/examples/vhost/ioat.h index
> > 1aa28ed6a3..b57b5645b0 100644
> > --- a/examples/vhost/ioat.h
> > +++ b/examples/vhost/ioat.h
> > @@ -27,12 +27,12 @@ struct dma_for_vhost {  #ifdef RTE_RAW_IOAT  int
> > open_ioat(const char *value);
> >
> > -uint32_t
> > +int32_t
> >  ioat_transfer_data_cb(int vid, uint16_t queue_id,
> >  		struct rte_vhost_async_desc *descs,
> >  		struct rte_vhost_async_status *opaque_data, uint16_t
> count);
> >
> > -uint32_t
> > +int32_t
> >  ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
> >  		struct rte_vhost_async_status *opaque_data,
> >  		uint16_t max_packets);
> > diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
> > index 6faa31f5ad..bc81cd0caa 100644
> > --- a/lib/vhost/rte_vhost_async.h
> > +++ b/lib/vhost/rte_vhost_async.h
> > @@ -61,7 +61,7 @@ struct rte_vhost_async_channel_ops {
> >  	 * @return
> >  	 *  number of descs processed
> >  	 */
> > -	uint32_t (*transfer_data)(int vid, uint16_t queue_id,
> > +	int32_t (*transfer_data)(int vid, uint16_t queue_id,
> >  		struct rte_vhost_async_desc *descs,
> >  		struct rte_vhost_async_status *opaque_data,
> >  		uint16_t count);
> > @@ -78,7 +78,7 @@ struct rte_vhost_async_channel_ops {
> >  	 * @return
> >  	 *  number of async descs completed
> >  	 */
> > -	uint32_t (*check_completed_copies)(int vid, uint16_t queue_id,
> > +	int32_t (*check_completed_copies)(int vid, uint16_t queue_id,
> >  		struct rte_vhost_async_status *opaque_data,
> >  		uint16_t max_packets);
> >  };
> > diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c index
> > b93482587c..8156796a46 100644
> > --- a/lib/vhost/virtio_net.c
> > +++ b/lib/vhost/virtio_net.c
> > @@ -1528,6 +1528,7 @@ virtio_dev_rx_async_submit_split(struct
> virtio_net *dev,
> >  	struct async_inflight_info *pkts_info = vq->async_pkts_info;
> >  	uint32_t n_pkts = 0, pkt_err = 0;
> >  	uint32_t num_async_pkts = 0, num_done_pkts = 0;
> > +	int32_t n_enq;
> >  	struct {
> >  		uint16_t pkt_idx;
> >  		uint16_t last_avail_idx;
> > @@ -1608,8 +1609,16 @@ virtio_dev_rx_async_submit_split(struct
> virtio_net *dev,
> >  		if (unlikely(pkt_burst_idx >=
> VHOST_ASYNC_BATCH_THRESHOLD ||
> >  			((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
> >  			BUF_VECTOR_MAX))) {
> > -			n_pkts = vq->async_ops.transfer_data(dev->vid,
> > +			n_enq = vq->async_ops.transfer_data(dev->vid,
> >  					queue_id, tdes, 0, pkt_burst_idx);
> > +			if (n_enq >= 0) {
> > +				n_pkts = n_enq;
> > +			} else {
> > +				VHOST_LOG_DATA(ERR, "(%d) %s: wrong
> opaque data for
> > queue id %d.\n",
> 
> You can't assume the error is caused by wrong opaque data because of
> different implementation of the callback.
> 
> It's better to replace 'n_enq' with 'n_xfer' as we use the name 'transfer' in
> callback definition.
> 
> If you agree with above, please also change in other funcs below.

Sure, agreed. It will be fixed in the next version.

> 
> > +					dev->vid, __func__, queue_id);
> > +				n_pkts = 0;
> > +			}
> > +
> >  			iovec_idx = 0;
> >  			it_idx = 0;
> >
> > @@ -1632,8 +1641,15 @@ virtio_dev_rx_async_submit_split(struct
> virtio_net *dev,
> >  	}
> >
> >  	if (pkt_burst_idx) {
> > -		n_pkts = vq->async_ops.transfer_data(dev->vid,
> > -				queue_id, tdes, 0, pkt_burst_idx);
> > +		n_enq = vq->async_ops.transfer_data(dev->vid, queue_id,
> tdes, 0,
> > pkt_burst_idx);
> > +		if (n_enq >= 0) {
> > +			n_pkts = n_enq;
> > +		} else {
> > +			VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque
> data for queue
> > id %d.\n",
> > +				dev->vid, __func__, queue_id);
> > +			n_pkts = 0;
> > +		}
> > +
> >  		vq->async_pkts_inflight_n += n_pkts;
> >
> >  		if (unlikely(n_pkts < pkt_burst_idx)) @@ -1903,6 +1919,7
> @@
> > virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
> >  	uint16_t async_descs_idx = 0;
> >  	uint16_t num_buffers;
> >  	uint16_t num_descs;
> > +	int32_t n_enq;
> >
> >  	struct rte_vhost_iov_iter *it_pool = vq->it_pool;
> >  	struct iovec *vec_pool = vq->vec_pool; @@ -1983,8 +2000,16 @@
> > virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
> >  		 */
> >  		if (unlikely(pkt_burst_idx >=
> VHOST_ASYNC_BATCH_THRESHOLD ||
> >  			((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
> BUF_VECTOR_MAX))) {
> > -			n_pkts = vq->async_ops.transfer_data(dev->vid,
> queue_id,
> > -				tdes, 0, pkt_burst_idx);
> > +			n_enq = vq->async_ops.transfer_data(dev->vid,
> > +				queue_id, tdes, 0, pkt_burst_idx);
> > +			if (n_enq >= 0) {
> > +				n_pkts = n_enq;
> > +			} else {
> > +				VHOST_LOG_DATA(ERR, "(%d) %s: wrong
> opaque data for
> > queue id %d.\n",
> > +					dev->vid, __func__, queue_id);
> > +				n_pkts = 0;
> > +			}
> > +
> >  			iovec_idx = 0;
> >  			it_idx = 0;
> >  			segs_await = 0;
> > @@ -2006,7 +2031,15 @@ virtio_dev_rx_async_submit_packed(struct
> > virtio_net *dev,
> >  	} while (pkt_idx < count);
> >
> >  	if (pkt_burst_idx) {
> > -		n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id,
> tdes, 0,
> > pkt_burst_idx);
> > +		n_enq = vq->async_ops.transfer_data(dev->vid, queue_id,
> tdes, 0,
> > pkt_burst_idx);
> > +		if (n_enq >= 0) {
> > +			n_pkts = n_enq;
> > +		} else {
> > +			VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque
> data for queue
> > id %d.\n",
> > +				dev->vid, __func__, queue_id);
> > +			n_pkts = 0;
> > +		}
> > +
> >  		vq->async_pkts_inflight_n += n_pkts;
> >
> >  		if (unlikely(n_pkts < pkt_burst_idx)) @@ -2091,6 +2124,7
> @@
> > uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> >  	uint16_t start_idx, pkts_idx, vq_size;
> >  	struct async_inflight_info *pkts_info;
> >  	uint16_t from, i;
> > +	int32_t n_poll;
> >
> >  	if (!dev)
> >  		return 0;
> > @@ -2118,9 +2152,17 @@ uint16_t
> rte_vhost_poll_enqueue_completed(int
> > vid, uint16_t queue_id,
> >  	start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
> >  		vq_size, vq->async_pkts_inflight_n);
> >
> > -	if (count > vq->async_last_pkts_n)
> > -		n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
> > +	if (count > vq->async_last_pkts_n) {
> > +		n_poll = vq->async_ops.check_completed_copies(vid,
> >  			queue_id, 0, count - vq->async_last_pkts_n);
> 
> The name 'n_poll' is not related with the callback name. Maybe 'n_cpl'?
> 
> > +		if (n_poll >= 0) {
> > +			n_pkts_cpl = n_poll;
> > +		} else {
> > +			VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque
> data for queue
> > id %d.\n",
> 
> I suggest using different log for submit and check complete so that it's easier
> for users to know what's wrong.

Agreed, it will be fixed in the next version.

Thanks,
Cheng

> 
> Thanks,
> Chenbo
> 
> > +				dev->vid, __func__, queue_id);
> > +			n_pkts_cpl = 0;
> > +		}
> > +	}
> >  	n_pkts_cpl += vq->async_last_pkts_n;
> >
> >  	n_pkts_put = RTE_MIN(count, n_pkts_cpl);
> > --
> > 2.29.2
  

Patch

diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c
index 2a2c2d7202..457f8171f0 100644
--- a/examples/vhost/ioat.c
+++ b/examples/vhost/ioat.c
@@ -122,7 +122,7 @@  open_ioat(const char *value)
 	return ret;
 }
 
-uint32_t
+int32_t
 ioat_transfer_data_cb(int vid, uint16_t queue_id,
 		struct rte_vhost_async_desc *descs,
 		struct rte_vhost_async_status *opaque_data, uint16_t count)
@@ -168,7 +168,7 @@  ioat_transfer_data_cb(int vid, uint16_t queue_id,
 	return i_desc;
 }
 
-uint32_t
+int32_t
 ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
 		struct rte_vhost_async_status *opaque_data,
 		uint16_t max_packets)
diff --git a/examples/vhost/ioat.h b/examples/vhost/ioat.h
index 1aa28ed6a3..b57b5645b0 100644
--- a/examples/vhost/ioat.h
+++ b/examples/vhost/ioat.h
@@ -27,12 +27,12 @@  struct dma_for_vhost {
 #ifdef RTE_RAW_IOAT
 int open_ioat(const char *value);
 
-uint32_t
+int32_t
 ioat_transfer_data_cb(int vid, uint16_t queue_id,
 		struct rte_vhost_async_desc *descs,
 		struct rte_vhost_async_status *opaque_data, uint16_t count);
 
-uint32_t
+int32_t
 ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
 		struct rte_vhost_async_status *opaque_data,
 		uint16_t max_packets);
diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
index 6faa31f5ad..bc81cd0caa 100644
--- a/lib/vhost/rte_vhost_async.h
+++ b/lib/vhost/rte_vhost_async.h
@@ -61,7 +61,7 @@  struct rte_vhost_async_channel_ops {
 	 * @return
 	 *  number of descs processed
 	 */
-	uint32_t (*transfer_data)(int vid, uint16_t queue_id,
+	int32_t (*transfer_data)(int vid, uint16_t queue_id,
 		struct rte_vhost_async_desc *descs,
 		struct rte_vhost_async_status *opaque_data,
 		uint16_t count);
@@ -78,7 +78,7 @@  struct rte_vhost_async_channel_ops {
 	 * @return
 	 *  number of async descs completed
 	 */
-	uint32_t (*check_completed_copies)(int vid, uint16_t queue_id,
+	int32_t (*check_completed_copies)(int vid, uint16_t queue_id,
 		struct rte_vhost_async_status *opaque_data,
 		uint16_t max_packets);
 };
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index b93482587c..8156796a46 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -1528,6 +1528,7 @@  virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	struct async_inflight_info *pkts_info = vq->async_pkts_info;
 	uint32_t n_pkts = 0, pkt_err = 0;
 	uint32_t num_async_pkts = 0, num_done_pkts = 0;
+	int32_t n_enq;
 	struct {
 		uint16_t pkt_idx;
 		uint16_t last_avail_idx;
@@ -1608,8 +1609,16 @@  virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 		if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
 			((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
 			BUF_VECTOR_MAX))) {
-			n_pkts = vq->async_ops.transfer_data(dev->vid,
+			n_enq = vq->async_ops.transfer_data(dev->vid,
 					queue_id, tdes, 0, pkt_burst_idx);
+			if (n_enq >= 0) {
+				n_pkts = n_enq;
+			} else {
+				VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for queue id %d.\n",
+					dev->vid, __func__, queue_id);
+				n_pkts = 0;
+			}
+
 			iovec_idx = 0;
 			it_idx = 0;
 
@@ -1632,8 +1641,15 @@  virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	}
 
 	if (pkt_burst_idx) {
-		n_pkts = vq->async_ops.transfer_data(dev->vid,
-				queue_id, tdes, 0, pkt_burst_idx);
+		n_enq = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+		if (n_enq >= 0) {
+			n_pkts = n_enq;
+		} else {
+			VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for queue id %d.\n",
+				dev->vid, __func__, queue_id);
+			n_pkts = 0;
+		}
+
 		vq->async_pkts_inflight_n += n_pkts;
 
 		if (unlikely(n_pkts < pkt_burst_idx))
@@ -1903,6 +1919,7 @@  virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 	uint16_t async_descs_idx = 0;
 	uint16_t num_buffers;
 	uint16_t num_descs;
+	int32_t n_enq;
 
 	struct rte_vhost_iov_iter *it_pool = vq->it_pool;
 	struct iovec *vec_pool = vq->vec_pool;
@@ -1983,8 +2000,16 @@  virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 		 */
 		if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
 			((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) {
-			n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id,
-				tdes, 0, pkt_burst_idx);
+			n_enq = vq->async_ops.transfer_data(dev->vid,
+				queue_id, tdes, 0, pkt_burst_idx);
+			if (n_enq >= 0) {
+				n_pkts = n_enq;
+			} else {
+				VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for queue id %d.\n",
+					dev->vid, __func__, queue_id);
+				n_pkts = 0;
+			}
+
 			iovec_idx = 0;
 			it_idx = 0;
 			segs_await = 0;
@@ -2006,7 +2031,15 @@  virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 	} while (pkt_idx < count);
 
 	if (pkt_burst_idx) {
-		n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+		n_enq = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+		if (n_enq >= 0) {
+			n_pkts = n_enq;
+		} else {
+			VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for queue id %d.\n",
+				dev->vid, __func__, queue_id);
+			n_pkts = 0;
+		}
+
 		vq->async_pkts_inflight_n += n_pkts;
 
 		if (unlikely(n_pkts < pkt_burst_idx))
@@ -2091,6 +2124,7 @@  uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 	uint16_t start_idx, pkts_idx, vq_size;
 	struct async_inflight_info *pkts_info;
 	uint16_t from, i;
+	int32_t n_poll;
 
 	if (!dev)
 		return 0;
@@ -2118,9 +2152,17 @@  uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 	start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
 		vq_size, vq->async_pkts_inflight_n);
 
-	if (count > vq->async_last_pkts_n)
-		n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
+	if (count > vq->async_last_pkts_n) {
+		n_poll = vq->async_ops.check_completed_copies(vid,
 			queue_id, 0, count - vq->async_last_pkts_n);
+		if (n_poll >= 0) {
+			n_pkts_cpl = n_poll;
+		} else {
+			VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for queue id %d.\n",
+				dev->vid, __func__, queue_id);
+			n_pkts_cpl = 0;
+		}
+	}
 	n_pkts_cpl += vq->async_last_pkts_n;
 
 	n_pkts_put = RTE_MIN(count, n_pkts_cpl);