@@ -38,7 +38,7 @@ is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
}
static __rte_always_inline void
-do_flush_shadow_used_ring_split(struct virtio_net *dev,
+do_flush_shadow_split(struct virtio_net *dev,
struct vhost_virtqueue *vq,
uint16_t to, uint16_t from, uint16_t size)
{
@@ -51,22 +51,22 @@ do_flush_shadow_used_ring_split(struct virtio_net *dev,
}
static __rte_always_inline void
-flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
+flush_shadow_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
if (used_idx + vq->shadow_used_idx <= vq->size) {
- do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
+ do_flush_shadow_split(dev, vq, used_idx, 0,
vq->shadow_used_idx);
} else {
uint16_t size;
/* update used ring interval [used_idx, vq->size] */
size = vq->size - used_idx;
- do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
+ do_flush_shadow_split(dev, vq, used_idx, 0, size);
/* update the left half used ring interval [0, left_size] */
- do_flush_shadow_used_ring_split(dev, vq, 0, size,
+ do_flush_shadow_split(dev, vq, 0, size,
vq->shadow_used_idx - size);
}
vq->last_used_idx += vq->shadow_used_idx;
@@ -82,7 +82,7 @@ flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
}
static __rte_always_inline void
-update_shadow_used_ring_split(struct vhost_virtqueue *vq,
+update_shadow_split(struct vhost_virtqueue *vq,
uint16_t desc_idx, uint32_t len)
{
uint16_t i = vq->shadow_used_idx++;
@@ -92,8 +92,7 @@ update_shadow_used_ring_split(struct vhost_virtqueue *vq,
}
static __rte_always_inline void
-flush_shadow_used_ring_packed(struct virtio_net *dev,
- struct vhost_virtqueue *vq)
+flush_shadow_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
int i;
uint16_t used_idx = vq->last_used_idx;
@@ -159,7 +158,7 @@ flush_shadow_used_ring_packed(struct virtio_net *dev,
}
static __rte_always_inline void
-update_shadow_used_ring_packed(struct vhost_virtqueue *vq,
+update_shadow_packed(struct vhost_virtqueue *vq,
uint16_t desc_idx, uint32_t len, uint16_t count)
{
uint16_t i = vq->shadow_used_idx++;
@@ -421,7 +420,7 @@ reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
VHOST_ACCESS_RW) < 0))
return -1;
len = RTE_MIN(len, size);
- update_shadow_used_ring_split(vq, head_idx, len);
+ update_shadow_split(vq, head_idx, len);
size -= len;
cur_idx++;
@@ -597,7 +596,7 @@ reserve_avail_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
return -1;
len = RTE_MIN(len, size);
- update_shadow_used_ring_packed(vq, buf_id, len, desc_count);
+ update_shadow_packed(vq, buf_id, len, desc_count);
size -= len;
avail_idx += desc_count;
@@ -889,7 +888,7 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
do_data_copy_enqueue(dev, vq);
if (likely(vq->shadow_used_idx)) {
- flush_shadow_used_ring_split(dev, vq);
+ flush_shadow_split(dev, vq);
vhost_vring_call_split(dev, vq);
}
@@ -1069,7 +1068,7 @@ virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
do_data_copy_enqueue(dev, vq);
if (likely(vq->shadow_used_idx)) {
- flush_shadow_used_ring_packed(dev, vq);
+ flush_shadow_packed(dev, vq);
vhost_vring_call_packed(dev, vq);
}
@@ -1498,8 +1497,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
next = TAILQ_NEXT(zmbuf, next);
if (mbuf_is_consumed(zmbuf->mbuf)) {
- update_shadow_used_ring_split(vq,
- zmbuf->desc_idx, 0);
+ update_shadow_split(vq, zmbuf->desc_idx, 0);
TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
restore_mbuf(zmbuf->mbuf);
rte_pktmbuf_free(zmbuf->mbuf);
@@ -1509,7 +1507,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
}
if (likely(vq->shadow_used_idx)) {
- flush_shadow_used_ring_split(dev, vq);
+ flush_shadow_split(dev, vq);
vhost_vring_call_split(dev, vq);
}
}
@@ -1549,7 +1547,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
break;
if (likely(dev->dequeue_zero_copy == 0))
- update_shadow_used_ring_split(vq, head_idx, 0);
+ update_shadow_split(vq, head_idx, 0);
pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
if (unlikely(pkts[i] == NULL)) {
@@ -1595,7 +1593,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
if (unlikely(i < count))
vq->shadow_used_idx = i;
if (likely(vq->shadow_used_idx)) {
- flush_shadow_used_ring_split(dev, vq);
+ flush_shadow_split(dev, vq);
vhost_vring_call_split(dev, vq);
}
}
@@ -1817,10 +1815,8 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
next = TAILQ_NEXT(zmbuf, next);
if (mbuf_is_consumed(zmbuf->mbuf)) {
- update_shadow_used_ring_packed(vq,
- zmbuf->desc_idx,
- 0,
- zmbuf->desc_count);
+ update_shadow_packed(vq, zmbuf->desc_idx, 0,
+ zmbuf->desc_count);
TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
restore_mbuf(zmbuf->mbuf);
@@ -1831,7 +1827,7 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
}
if (likely(vq->shadow_used_idx)) {
- flush_shadow_used_ring_packed(dev, vq);
+ flush_shadow_packed(dev, vq);
vhost_vring_call_packed(dev, vq);
}
}
@@ -1857,8 +1853,7 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
break;
if (likely(dev->dequeue_zero_copy == 0))
- update_shadow_used_ring_packed(vq, buf_id, 0,
- desc_count);
+ update_shadow_packed(vq, buf_id, 0, desc_count);
pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
if (unlikely(pkts[i] == NULL)) {
@@ -1910,7 +1905,7 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
if (unlikely(i < count))
vq->shadow_used_idx = i;
if (likely(vq->shadow_used_idx)) {
- flush_shadow_used_ring_packed(dev, vq);
+ flush_shadow_packed(dev, vq);
vhost_vring_call_packed(dev, vq);
}
}