Hi,
> -----Original Message-----
> From: Ma, WenwuX <wenwux.ma@intel.com>
> Sent: Tuesday, September 7, 2021 4:49 AM
> To: dev@dpdk.org
> Cc: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>; Jiang,
> Cheng1 <cheng1.jiang@intel.com>; Hu, Jiayu <jiayu.hu@intel.com>; Pai G, Sunil
> <sunil.pai.g@intel.com>; Yang, YvonneX <yvonnex.yang@intel.com>; Ma, WenwuX
> <wenwux.ma@intel.com>
> Subject: [PATCH 2/4] examples/vhost: refactor vhost enqueue and dequeue
> datapaths
>
> Previously, by judging the flag, we call different enqueue/dequeue
> functions in data path.
>
> Now, we use an ops that was initialized when Vhost was created,
> so that we can call ops directly in Vhost data path without any more
> flag judgment.
>
> Signed-off-by: Wenwu Ma <wenwux.ma@intel.com>
> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
> examples/vhost/main.c | 100 +++++++++++++++++++++---------------
> examples/vhost/main.h | 31 +++++++++--
> examples/vhost/virtio_net.c | 16 +++++-
> 3 files changed, 101 insertions(+), 46 deletions(-)
>
> diff --git a/examples/vhost/main.c b/examples/vhost/main.c
> index a4a8214e05..e246b640ea 100644
> --- a/examples/vhost/main.c
> +++ b/examples/vhost/main.c
> @@ -106,6 +106,8 @@ static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
> static char *socket_files;
[...]
>
> @@ -87,7 +100,19 @@ void vs_vhost_net_remove(struct vhost_dev *dev);
> uint16_t vs_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
> struct rte_mbuf **pkts, uint32_t count);
>
> -uint16_t vs_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
> - struct rte_mempool *mbuf_pool,
> - struct rte_mbuf **pkts, uint16_t count);
> +uint16_t builtin_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
> + struct rte_mbuf **pkts, uint32_t count);
> +uint16_t builtin_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
> + struct rte_mempool *mbuf_pool,
> + struct rte_mbuf **pkts, uint16_t count);
> +uint16_t sync_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
> + struct rte_mbuf **pkts, uint32_t count);
> +uint16_t sync_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
> + struct rte_mempool *mbuf_pool,
> + struct rte_mbuf **pkts, uint16_t count);
> +uint16_t async_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
> + struct rte_mbuf **pkts, uint32_t count);
> +uint16_t async_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
> + struct rte_mempool *mbuf_pool,
> + struct rte_mbuf **pkts, uint16_t count);
This func is defined in the 4th. So please remove it here.
Thanks,
Chenbo
@@ -106,6 +106,8 @@ static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
static char *socket_files;
static int nb_sockets;
+static struct vhost_queue_ops vdev_queue_ops[MAX_VHOST_DEVICE];
+
/* empty vmdq configuration structure. Filled in programatically */
static struct rte_eth_conf vmdq_conf_default = {
.rxmode = {
@@ -888,22 +890,8 @@ drain_vhost(struct vhost_dev *vdev)
uint16_t nr_xmit = vhost_txbuff[buff_idx]->len;
struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;
- if (builtin_net_driver) {
- ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit);
- } else if (async_vhost_driver) {
- uint16_t enqueue_fail = 0;
-
- complete_async_pkts(vdev);
- ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, m, nr_xmit);
- __atomic_add_fetch(&vdev->pkts_inflight, ret, __ATOMIC_SEQ_CST);
-
- enqueue_fail = nr_xmit - ret;
- if (enqueue_fail)
- free_pkts(&m[ret], nr_xmit - ret);
- } else {
- ret = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
- m, nr_xmit);
- }
+ ret = vdev_queue_ops[vdev->vid].enqueue_pkt_burst(vdev,
+ VIRTIO_RXQ, m, nr_xmit);
if (enable_stats) {
__atomic_add_fetch(&vdev->stats.rx_total_atomic, nr_xmit,
@@ -1182,6 +1170,33 @@ drain_mbuf_table(struct mbuf_table *tx_q)
}
}
+uint16_t
+async_enqueue_pkts(struct vhost_dev *vdev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t rx_count)
+{
+ uint16_t enqueue_count;
+ uint16_t enqueue_fail = 0;
+
+ complete_async_pkts(vdev);
+ enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
+ queue_id, pkts, rx_count);
+ __atomic_add_fetch(&vdev->pkts_inflight, enqueue_count,
+ __ATOMIC_SEQ_CST);
+
+ enqueue_fail = rx_count - enqueue_count;
+ if (enqueue_fail)
+ free_pkts(&pkts[enqueue_count], enqueue_fail);
+
+ return enqueue_count;
+}
+
+uint16_t
+sync_enqueue_pkts(struct vhost_dev *vdev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t rx_count)
+{
+ return rte_vhost_enqueue_burst(vdev->vid, queue_id, pkts, rx_count);
+}
+
static __rte_always_inline void
drain_eth_rx(struct vhost_dev *vdev)
{
@@ -1212,25 +1227,8 @@ drain_eth_rx(struct vhost_dev *vdev)
}
}
- if (builtin_net_driver) {
- enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
- pkts, rx_count);
- } else if (async_vhost_driver) {
- uint16_t enqueue_fail = 0;
-
- complete_async_pkts(vdev);
- enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
- VIRTIO_RXQ, pkts, rx_count);
- __atomic_add_fetch(&vdev->pkts_inflight, enqueue_count, __ATOMIC_SEQ_CST);
-
- enqueue_fail = rx_count - enqueue_count;
- if (enqueue_fail)
- free_pkts(&pkts[enqueue_count], enqueue_fail);
-
- } else {
- enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
- pkts, rx_count);
- }
+ enqueue_count = vdev_queue_ops[vdev->vid].enqueue_pkt_burst(vdev,
+ VIRTIO_RXQ, pkts, rx_count);
if (enable_stats) {
__atomic_add_fetch(&vdev->stats.rx_total_atomic, rx_count,
@@ -1243,6 +1241,14 @@ drain_eth_rx(struct vhost_dev *vdev)
free_pkts(pkts, rx_count);
}
+uint16_t sync_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ return rte_vhost_dequeue_burst(dev->vid, queue_id,
+ mbuf_pool, pkts, count);
+}
+
static __rte_always_inline void
drain_virtio_tx(struct vhost_dev *vdev)
{
@@ -1250,13 +1256,8 @@ drain_virtio_tx(struct vhost_dev *vdev)
uint16_t count;
uint16_t i;
- if (builtin_net_driver) {
- count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool,
- pkts, MAX_PKT_BURST);
- } else {
- count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ,
- mbuf_pool, pkts, MAX_PKT_BURST);
- }
+ count = vdev_queue_ops[vdev->vid].dequeue_pkt_burst(vdev,
+ VIRTIO_TXQ, mbuf_pool, pkts, MAX_PKT_BURST);
/* setup VMDq for the first packet */
if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
@@ -1441,6 +1442,21 @@ new_device(int vid)
}
}
+ if (builtin_net_driver) {
+ vdev_queue_ops[vid].enqueue_pkt_burst = builtin_enqueue_pkts;
+ vdev_queue_ops[vid].dequeue_pkt_burst = builtin_dequeue_pkts;
+ } else {
+ if (async_vhost_driver) {
+ vdev_queue_ops[vid].enqueue_pkt_burst =
+ async_enqueue_pkts;
+ } else {
+ vdev_queue_ops[vid].enqueue_pkt_burst =
+ sync_enqueue_pkts;
+ }
+
+ vdev_queue_ops[vid].dequeue_pkt_burst = sync_dequeue_pkts;
+ }
+
if (builtin_net_driver)
vs_vhost_net_setup(vdev);
@@ -61,6 +61,19 @@ struct vhost_dev {
struct vhost_queue queues[MAX_QUEUE_PAIRS * 2];
} __rte_cache_aligned;
+typedef uint16_t (*vhost_enqueue_burst_t)(struct vhost_dev *dev,
+ uint16_t queue_id, struct rte_mbuf **pkts,
+ uint32_t count);
+
+typedef uint16_t (*vhost_dequeue_burst_t)(struct vhost_dev *dev,
+ uint16_t queue_id, struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts, uint16_t count);
+
+struct vhost_queue_ops {
+ vhost_enqueue_burst_t enqueue_pkt_burst;
+ vhost_dequeue_burst_t dequeue_pkt_burst;
+};
+
TAILQ_HEAD(vhost_dev_tailq_list, vhost_dev);
@@ -87,7 +100,19 @@ void vs_vhost_net_remove(struct vhost_dev *dev);
uint16_t vs_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint32_t count);
-uint16_t vs_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
- struct rte_mempool *mbuf_pool,
- struct rte_mbuf **pkts, uint16_t count);
+uint16_t builtin_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count);
+uint16_t builtin_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts, uint16_t count);
+uint16_t sync_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count);
+uint16_t sync_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts, uint16_t count);
+uint16_t async_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count);
+uint16_t async_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts, uint16_t count);
#endif /* _MAIN_H_ */
@@ -238,6 +238,13 @@ vs_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
return count;
}
+uint16_t
+builtin_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count)
+{
+ return vs_enqueue_pkts(dev, queue_id, pkts, count);
+}
+
static __rte_always_inline int
dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
struct rte_mbuf *m, uint16_t desc_idx,
@@ -363,7 +370,7 @@ dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
return 0;
}
-uint16_t
+static uint16_t
vs_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
{
@@ -440,3 +447,10 @@ vs_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
return i;
}
+
+uint16_t
+builtin_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
+{
+ return vs_dequeue_pkts(dev, queue_id, mbuf_pool, pkts, count);
+}