@@ -31,6 +31,12 @@ rxvq_is_mergeable(struct virtio_net *dev)
return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
}
+static __rte_always_inline bool
+virtio_net_is_inorder(struct virtio_net *dev)
+{
+ return dev->features & (1ULL << VIRTIO_F_IN_ORDER);
+}
+
static bool
is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
{
@@ -213,6 +219,30 @@ flush_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
}
}
+static __rte_always_inline void
+update_dequeue_burst_packed_inorder(struct vhost_virtqueue *vq, uint16_t id)
+{
+ vq->shadow_used_packed[0].id = id;
+
+ if (!vq->shadow_used_idx) {
+ vq->dequeue_shadow_head = vq->last_used_idx;
+ vq->shadow_used_packed[0].len = 0;
+ vq->shadow_used_packed[0].count = 1;
+ vq->shadow_used_packed[0].used_idx = vq->last_used_idx;
+ vq->shadow_used_packed[0].used_wrap_counter =
+ vq->used_wrap_counter;
+
+ vq->shadow_used_idx = 1;
+
+ }
+
+ vq->last_used_idx += PACKED_DESCS_BURST;
+ if (vq->last_used_idx >= vq->size) {
+ vq->used_wrap_counter ^= 1;
+ vq->last_used_idx -= vq->size;
+ }
+}
+
static __rte_always_inline void
update_dequeue_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint16_t *ids)
@@ -315,7 +345,6 @@ update_dequeue_shadow_packed(struct vhost_virtqueue *vq, uint16_t buf_id,
else
vq->desc_packed[vq->last_used_idx].flags =
VIRTIO_TX_USED_WRAP_FLAG;
-
}
vq->last_used_idx += count;
@@ -326,6 +355,31 @@ update_dequeue_shadow_packed(struct vhost_virtqueue *vq, uint16_t buf_id,
}
}
+static __rte_always_inline void
+update_dequeue_shadow_packed_inorder(struct vhost_virtqueue *vq,
+ uint16_t buf_id, uint16_t count)
+{
+ vq->shadow_used_packed[0].id = buf_id;
+
+ if (!vq->shadow_used_idx) {
+ vq->dequeue_shadow_head = vq->last_used_idx;
+
+ vq->shadow_used_packed[0].len = 0;
+ vq->shadow_used_packed[0].count = count;
+ vq->shadow_used_packed[0].used_idx = vq->last_used_idx;
+ vq->shadow_used_packed[0].used_wrap_counter =
+ vq->used_wrap_counter;
+
+ vq->shadow_used_idx = 1;
+ }
+
+ vq->last_used_idx += count;
+
+ if (vq->last_used_idx >= vq->size) {
+ vq->used_wrap_counter ^= 1;
+ vq->last_used_idx -= vq->size;
+ }
+}
static inline void
do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
@@ -1834,7 +1888,12 @@ virtio_dev_tx_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
pkts[i]->pkt_len);
}
- update_dequeue_burst_packed(dev, vq, ids);
+ if (virtio_net_is_inorder(dev))
+ update_dequeue_burst_packed_inorder(vq,
+ ids[PACKED_BURST_MASK]);
+ else
+ update_dequeue_burst_packed(dev, vq, ids);
+
if (virtio_net_with_host_offload(dev)) {
UNROLL_PRAGMA(PRAGMA_PARAM)
for (i = 0; i < PACKED_DESCS_BURST; i++) {
@@ -1897,7 +1956,10 @@ virtio_dev_tx_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
&desc_count))
return -1;
- update_dequeue_shadow_packed(vq, buf_id, desc_count);
+ if (virtio_net_is_inorder(dev))
+ update_dequeue_shadow_packed_inorder(vq, buf_id, desc_count);
+ else
+ update_dequeue_shadow_packed(vq, buf_id, desc_count);
vq->last_avail_idx += desc_count;
if (vq->last_avail_idx >= vq->size) {