@@ -147,7 +147,7 @@ virtio_send_command_packed(struct virtnet_ctl *cvq,
{
struct virtqueue *vq = cvq->vq;
int head;
- struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
+ struct vring_packed_desc *desc = vq->packed.ring.desc;
struct virtio_pmd_ctrl *result;
uint16_t flags;
int sum = 0;
@@ -160,45 +160,45 @@ virtio_send_command_packed(struct virtnet_ctl *cvq,
* At least one TX packet per argument;
* One RX packet for ACK.
*/
- head = vq->vq_avail_idx;
- flags = vq->vq_packed.cached_flags;
+ head = vq->avail_idx;
+ flags = vq->packed.cached_flags;
desc[head].addr = cvq->virtio_net_hdr_mem;
desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
- vq->vq_free_cnt--;
+ vq->free_cnt--;
nb_descs++;
- if (++vq->vq_avail_idx >= vq->vq_nentries) {
- vq->vq_avail_idx -= vq->vq_nentries;
- vq->vq_packed.cached_flags ^=
+ if (++vq->avail_idx >= vq->nentries) {
+ vq->avail_idx -= vq->nentries;
+ vq->packed.cached_flags ^=
VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
}
for (k = 0; k < pkt_num; k++) {
- desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
+ desc[vq->avail_idx].addr = cvq->virtio_net_hdr_mem
+ sizeof(struct virtio_net_ctrl_hdr)
+ sizeof(ctrl->status) + sizeof(uint8_t) * sum;
- desc[vq->vq_avail_idx].len = dlen[k];
- desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT |
- vq->vq_packed.cached_flags;
+ desc[vq->avail_idx].len = dlen[k];
+ desc[vq->avail_idx].flags = VRING_DESC_F_NEXT |
+ vq->packed.cached_flags;
sum += dlen[k];
- vq->vq_free_cnt--;
+ vq->free_cnt--;
nb_descs++;
- if (++vq->vq_avail_idx >= vq->vq_nentries) {
- vq->vq_avail_idx -= vq->vq_nentries;
- vq->vq_packed.cached_flags ^=
+ if (++vq->avail_idx >= vq->nentries) {
+ vq->avail_idx -= vq->nentries;
+ vq->packed.cached_flags ^=
VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
}
}
- desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
+ desc[vq->avail_idx].addr = cvq->virtio_net_hdr_mem
+ sizeof(struct virtio_net_ctrl_hdr);
- desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
- desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE |
- vq->vq_packed.cached_flags;
- vq->vq_free_cnt--;
+ desc[vq->avail_idx].len = sizeof(ctrl->status);
+ desc[vq->avail_idx].flags = VRING_DESC_F_WRITE |
+ vq->packed.cached_flags;
+ vq->free_cnt--;
nb_descs++;
- if (++vq->vq_avail_idx >= vq->vq_nentries) {
- vq->vq_avail_idx -= vq->vq_nentries;
- vq->vq_packed.cached_flags ^=
+ if (++vq->avail_idx >= vq->nentries) {
+ vq->avail_idx -= vq->nentries;
+ vq->packed.cached_flags ^=
VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
}
@@ -215,23 +215,23 @@ virtio_send_command_packed(struct virtnet_ctl *cvq,
virtio_rmb(vq->hw->weak_barriers);
/* now get used descriptors */
- vq->vq_free_cnt += nb_descs;
- vq->vq_used_cons_idx += nb_descs;
- if (vq->vq_used_cons_idx >= vq->vq_nentries) {
- vq->vq_used_cons_idx -= vq->vq_nentries;
- vq->vq_packed.used_wrap_counter ^= 1;
- }
-
- PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\n"
- "vq->vq_avail_idx=%d\n"
- "vq->vq_used_cons_idx=%d\n"
- "vq->vq_packed.cached_flags=0x%x\n"
- "vq->vq_packed.used_wrap_counter=%d\n",
- vq->vq_free_cnt,
- vq->vq_avail_idx,
- vq->vq_used_cons_idx,
- vq->vq_packed.cached_flags,
- vq->vq_packed.used_wrap_counter);
+ vq->free_cnt += nb_descs;
+ vq->used_cons_idx += nb_descs;
+ if (vq->used_cons_idx >= vq->nentries) {
+ vq->used_cons_idx -= vq->nentries;
+ vq->packed.used_wrap_counter ^= 1;
+ }
+
+ PMD_INIT_LOG(DEBUG, "vq->free_cnt=%d\n"
+ "vq->avail_idx=%d\n"
+ "vq->used_cons_idx=%d\n"
+ "vq->packed.cached_flags=0x%x\n"
+ "vq->packed.used_wrap_counter=%d\n",
+ vq->free_cnt,
+ vq->avail_idx,
+ vq->used_cons_idx,
+ vq->packed.cached_flags,
+ vq->packed.used_wrap_counter);
result = cvq->virtio_net_hdr_mz->addr;
return result;
@@ -247,7 +247,7 @@ virtio_send_command_split(struct virtnet_ctl *cvq,
uint32_t head, i;
int k, sum = 0;
- head = vq->vq_desc_head_idx;
+ head = vq->desc_head_idx;
/*
* Format is enforced in qemu code:
@@ -255,35 +255,35 @@ virtio_send_command_split(struct virtnet_ctl *cvq,
* At least one TX packet per argument;
* One RX packet for ACK.
*/
- vq->vq_split.ring.desc[head].flags = VRING_DESC_F_NEXT;
- vq->vq_split.ring.desc[head].addr = cvq->virtio_net_hdr_mem;
- vq->vq_split.ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
- vq->vq_free_cnt--;
- i = vq->vq_split.ring.desc[head].next;
+ vq->split.ring.desc[head].flags = VRING_DESC_F_NEXT;
+ vq->split.ring.desc[head].addr = cvq->virtio_net_hdr_mem;
+ vq->split.ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
+ vq->free_cnt--;
+ i = vq->split.ring.desc[head].next;
for (k = 0; k < pkt_num; k++) {
- vq->vq_split.ring.desc[i].flags = VRING_DESC_F_NEXT;
- vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
+ vq->split.ring.desc[i].flags = VRING_DESC_F_NEXT;
+ vq->split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
+ sizeof(struct virtio_net_ctrl_hdr)
+ sizeof(ctrl->status) + sizeof(uint8_t)*sum;
- vq->vq_split.ring.desc[i].len = dlen[k];
+ vq->split.ring.desc[i].len = dlen[k];
sum += dlen[k];
- vq->vq_free_cnt--;
- i = vq->vq_split.ring.desc[i].next;
+ vq->free_cnt--;
+ i = vq->split.ring.desc[i].next;
}
- vq->vq_split.ring.desc[i].flags = VRING_DESC_F_WRITE;
- vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
+ vq->split.ring.desc[i].flags = VRING_DESC_F_WRITE;
+ vq->split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
+ sizeof(struct virtio_net_ctrl_hdr);
- vq->vq_split.ring.desc[i].len = sizeof(ctrl->status);
- vq->vq_free_cnt--;
+ vq->split.ring.desc[i].len = sizeof(ctrl->status);
+ vq->free_cnt--;
- vq->vq_desc_head_idx = vq->vq_split.ring.desc[i].next;
+ vq->desc_head_idx = vq->split.ring.desc[i].next;
vq_update_avail_ring(vq, head);
vq_update_avail_idx(vq);
- PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index);
+ PMD_INIT_LOG(DEBUG, "vq->queue_index = %d", vq->queue_index);
virtqueue_notify(vq);
@@ -297,27 +297,26 @@ virtio_send_command_split(struct virtnet_ctl *cvq,
uint32_t idx, desc_idx, used_idx;
struct vring_used_elem *uep;
- used_idx = (uint32_t)(vq->vq_used_cons_idx
- & (vq->vq_nentries - 1));
- uep = &vq->vq_split.ring.used->ring[used_idx];
+ used_idx = (uint32_t)(vq->used_cons_idx & (vq->nentries - 1));
+ uep = &vq->split.ring.used->ring[used_idx];
idx = (uint32_t) uep->id;
desc_idx = idx;
- while (vq->vq_split.ring.desc[desc_idx].flags &
+ while (vq->split.ring.desc[desc_idx].flags &
VRING_DESC_F_NEXT) {
- desc_idx = vq->vq_split.ring.desc[desc_idx].next;
- vq->vq_free_cnt++;
+ desc_idx = vq->split.ring.desc[desc_idx].next;
+ vq->free_cnt++;
}
- vq->vq_split.ring.desc[desc_idx].next = vq->vq_desc_head_idx;
- vq->vq_desc_head_idx = idx;
+ vq->split.ring.desc[desc_idx].next = vq->desc_head_idx;
+ vq->desc_head_idx = idx;
- vq->vq_used_cons_idx++;
- vq->vq_free_cnt++;
+ vq->used_cons_idx++;
+ vq->free_cnt++;
}
- PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d",
- vq->vq_free_cnt, vq->vq_desc_head_idx);
+ PMD_INIT_LOG(DEBUG, "vq->free_cnt=%d\nvq->desc_head_idx=%d",
+ vq->free_cnt, vq->desc_head_idx);
result = cvq->virtio_net_hdr_mz->addr;
return result;
@@ -341,11 +340,11 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
rte_spinlock_lock(&cvq->lock);
vq = cvq->vq;
- PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
+ PMD_INIT_LOG(DEBUG, "vq->desc_head_idx = %d, status = %d, "
"vq->hw->cvq = %p vq = %p",
- vq->vq_desc_head_idx, status, vq->hw->cvq, vq);
+ vq->desc_head_idx, status, vq->hw->cvq, vq);
- if (vq->vq_free_cnt < pkt_num + 2 || pkt_num < 1) {
+ if (vq->free_cnt < pkt_num + 2 || pkt_num < 1) {
rte_spinlock_unlock(&cvq->lock);
return -1;
}
@@ -406,25 +405,25 @@ virtio_get_nr_vq(struct virtio_hw *hw)
static void
virtio_init_vring(struct virtqueue *vq)
{
- int size = vq->vq_nentries;
- uint8_t *ring_mem = vq->vq_ring_virt_mem;
+ int size = vq->nentries;
+ uint8_t *ring_mem = vq->ring_virt_mem;
PMD_INIT_FUNC_TRACE();
- memset(ring_mem, 0, vq->vq_ring_size);
+ memset(ring_mem, 0, vq->ring_size);
- vq->vq_used_cons_idx = 0;
- vq->vq_desc_head_idx = 0;
- vq->vq_avail_idx = 0;
- vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
- vq->vq_free_cnt = vq->vq_nentries;
- memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
+ vq->used_cons_idx = 0;
+ vq->desc_head_idx = 0;
+ vq->avail_idx = 0;
+ vq->desc_tail_idx = (uint16_t)(vq->nentries - 1);
+ vq->free_cnt = vq->nentries;
+ memset(vq->descx, 0, sizeof(struct vq_desc_extra) * vq->nentries);
if (vtpci_packed_queue(vq->hw)) {
- vring_init_packed(&vq->vq_packed.ring, ring_mem,
+ vring_init_packed(&vq->packed.ring, ring_mem,
VIRTIO_PCI_VRING_ALIGN, size);
vring_desc_init_packed(vq, size);
} else {
- struct vring *vr = &vq->vq_split.ring;
+ struct vring *vr = &vq->split.ring;
vring_init_split(vr, ring_mem, VIRTIO_PCI_VRING_ALIGN, size);
vring_desc_init_split(vr->desc, size);
@@ -498,25 +497,25 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
hw->vqs[vtpci_queue_idx] = vq;
vq->hw = hw;
- vq->vq_queue_index = vtpci_queue_idx;
- vq->vq_nentries = vq_size;
+ vq->queue_index = vtpci_queue_idx;
+ vq->nentries = vq_size;
if (vtpci_packed_queue(hw)) {
- vq->vq_packed.used_wrap_counter = 1;
- vq->vq_packed.cached_flags = VRING_DESC_F_AVAIL(1);
- vq->vq_packed.event_flags_shadow = 0;
+ vq->packed.used_wrap_counter = 1;
+ vq->packed.cached_flags = VRING_DESC_F_AVAIL(1);
+ vq->packed.event_flags_shadow = 0;
if (queue_type == VTNET_RQ)
- vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
+ vq->packed.cached_flags |= VRING_DESC_F_WRITE;
}
/*
* Reserve a memzone for vring elements
*/
size = vring_size(hw, vq_size, VIRTIO_PCI_VRING_ALIGN);
- vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
+ vq->ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d",
- size, vq->vq_ring_size);
+ size, vq->ring_size);
- mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
+ mz = rte_memzone_reserve_aligned(vq_name, vq->ring_size,
numa_node, RTE_MEMZONE_IOVA_CONTIG,
VIRTIO_PCI_VRING_ALIGN);
if (mz == NULL) {
@@ -530,11 +529,11 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
memset(mz->addr, 0, mz->len);
- vq->vq_ring_mem = mz->iova;
- vq->vq_ring_virt_mem = mz->addr;
- PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64,
+ vq->ring_mem = mz->iova;
+ vq->ring_virt_mem = mz->addr;
+ PMD_INIT_LOG(DEBUG, "vq->ring_mem: 0x%" PRIx64,
(uint64_t)mz->iova);
- PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64,
+ PMD_INIT_LOG(DEBUG, "vq->ring_virt_mem: 0x%" PRIx64,
(uint64_t)(uintptr_t)mz->addr);
virtio_init_vring(vq);
@@ -597,7 +596,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
if (!hw->virtio_user_dev)
vq->offset = offsetof(struct rte_mbuf, buf_iova);
else {
- vq->vq_ring_mem = (uintptr_t)mz->addr;
+ vq->ring_mem = (uintptr_t)mz->addr;
vq->offset = offsetof(struct rte_mbuf, buf_addr);
if (queue_type == VTNET_TQ)
txvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
@@ -38,7 +38,7 @@ check_vq_phys_addr_ok(struct virtqueue *vq)
* and only accepts 32 bit page frame number.
* Check if the allocated physical memory exceeds 16TB.
*/
- if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >>
+ if ((vq->ring_mem + vq->ring_size - 1) >>
(VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!");
return 0;
@@ -191,7 +191,7 @@ legacy_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec)
{
uint16_t dst;
- rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
+ rte_pci_ioport_write(VTPCI_IO(hw), &vq->queue_index, 2,
VIRTIO_PCI_QUEUE_SEL);
rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_QUEUE_VECTOR);
rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_QUEUE_VECTOR);
@@ -216,9 +216,9 @@ legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
if (!check_vq_phys_addr_ok(vq))
return -1;
- rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
+ rte_pci_ioport_write(VTPCI_IO(hw), &vq->queue_index, 2,
VIRTIO_PCI_QUEUE_SEL);
- src = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
+ src = vq->ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
return 0;
@@ -229,7 +229,7 @@ legacy_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
{
uint32_t src = 0;
- rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
+ rte_pci_ioport_write(VTPCI_IO(hw), &vq->queue_index, 2,
VIRTIO_PCI_QUEUE_SEL);
rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
}
@@ -237,7 +237,7 @@ legacy_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
static void
legacy_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
{
- rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
+ rte_pci_ioport_write(VTPCI_IO(hw), &vq->queue_index, 2,
VIRTIO_PCI_QUEUE_NOTIFY);
}
@@ -348,7 +348,7 @@ modern_set_config_irq(struct virtio_hw *hw, uint16_t vec)
static uint16_t
modern_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec)
{
- rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+ rte_write16(vq->queue_index, &hw->common_cfg->queue_select);
rte_write16(vec, &hw->common_cfg->queue_msix_vector);
return rte_read16(&hw->common_cfg->queue_msix_vector);
}
@@ -369,13 +369,13 @@ modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
if (!check_vq_phys_addr_ok(vq))
return -1;
- desc_addr = vq->vq_ring_mem;
- avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
+ desc_addr = vq->ring_mem;
+ avail_addr = desc_addr + vq->nentries * sizeof(struct vring_desc);
used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
- ring[vq->vq_nentries]),
+ ring[vq->nentries]),
VIRTIO_PCI_VRING_ALIGN);
- rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+ rte_write16(vq->queue_index, &hw->common_cfg->queue_select);
io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo,
&hw->common_cfg->queue_desc_hi);
@@ -390,7 +390,7 @@ modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
rte_write16(1, &hw->common_cfg->queue_enable);
- PMD_INIT_LOG(DEBUG, "queue %u addresses:", vq->vq_queue_index);
+ PMD_INIT_LOG(DEBUG, "queue %u addresses:", vq->queue_index);
PMD_INIT_LOG(DEBUG, "\t desc_addr: %" PRIx64, desc_addr);
PMD_INIT_LOG(DEBUG, "\t aval_addr: %" PRIx64, avail_addr);
PMD_INIT_LOG(DEBUG, "\t used_addr: %" PRIx64, used_addr);
@@ -403,7 +403,7 @@ modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
static void
modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
{
- rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+ rte_write16(vq->queue_index, &hw->common_cfg->queue_select);
io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,
&hw->common_cfg->queue_desc_hi);
@@ -418,7 +418,7 @@ modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
static void
modern_notify_queue(struct virtio_hw *hw __rte_unused, struct virtqueue *vq)
{
- rte_write16(vq->vq_queue_index, vq->notify_addr);
+ rte_write16(vq->queue_index, vq->notify_addr);
}
const struct virtio_pci_ops modern_ops = {
@@ -51,8 +51,8 @@ virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
void
vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
{
- vq->vq_free_cnt += num;
- vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
+ vq->free_cnt += num;
+ vq->desc_tail_idx = desc_idx & (vq->nentries - 1);
}
void
@@ -62,13 +62,13 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
struct vq_desc_extra *dxp;
uint16_t desc_idx_last = desc_idx;
- dp = &vq->vq_split.ring.desc[desc_idx];
- dxp = &vq->vq_descx[desc_idx];
- vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
+ dp = &vq->split.ring.desc[desc_idx];
+ dxp = &vq->descx[desc_idx];
+ vq->free_cnt = (uint16_t)(vq->free_cnt + dxp->ndescs);
if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
while (dp->flags & VRING_DESC_F_NEXT) {
desc_idx_last = dp->next;
- dp = &vq->vq_split.ring.desc[dp->next];
+ dp = &vq->split.ring.desc[dp->next];
}
}
dxp->ndescs = 0;
@@ -78,14 +78,14 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
* newly freed chain. If the virtqueue was completely used, then
* head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
*/
- if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
- vq->vq_desc_head_idx = desc_idx;
+ if (vq->desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
+ vq->desc_head_idx = desc_idx;
} else {
- dp_tail = &vq->vq_split.ring.desc[vq->vq_desc_tail_idx];
+ dp_tail = &vq->split.ring.desc[vq->desc_tail_idx];
dp_tail->next = desc_idx;
}
- vq->vq_desc_tail_idx = desc_idx_last;
+ vq->desc_tail_idx = desc_idx_last;
dp->next = VQ_RING_DESC_CHAIN_END;
}
@@ -94,15 +94,15 @@ vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
{
struct vq_desc_extra *dxp;
- dxp = &vq->vq_descx[id];
- vq->vq_free_cnt += dxp->ndescs;
+ dxp = &vq->descx[id];
+ vq->free_cnt += dxp->ndescs;
- if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END)
- vq->vq_desc_head_idx = id;
+ if (vq->desc_tail_idx == VQ_RING_DESC_CHAIN_END)
+ vq->desc_head_idx = id;
else
- vq->vq_descx[vq->vq_desc_tail_idx].next = id;
+ vq->descx[vq->desc_tail_idx].next = id;
- vq->vq_desc_tail_idx = id;
+ vq->desc_tail_idx = id;
dxp->next = VQ_RING_DESC_CHAIN_END;
}
@@ -118,30 +118,30 @@ virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
struct vring_packed_desc *desc;
uint16_t i;
- desc = vq->vq_packed.ring.desc;
+ desc = vq->packed.ring.desc;
for (i = 0; i < num; i++) {
- used_idx = vq->vq_used_cons_idx;
+ used_idx = vq->used_cons_idx;
if (!desc_is_used(&desc[used_idx], vq))
return i;
virtio_rmb(vq->hw->weak_barriers);
len[i] = desc[used_idx].len;
id = desc[used_idx].id;
- cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
+ cookie = (struct rte_mbuf *)vq->descx[id].cookie;
if (unlikely(cookie == NULL)) {
PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
- vq->vq_used_cons_idx);
+ vq->used_cons_idx);
break;
}
rte_prefetch0(cookie);
rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
rx_pkts[i] = cookie;
- vq->vq_free_cnt++;
- vq->vq_used_cons_idx++;
- if (vq->vq_used_cons_idx >= vq->vq_nentries) {
- vq->vq_used_cons_idx -= vq->vq_nentries;
- vq->vq_packed.used_wrap_counter ^= 1;
+ vq->free_cnt++;
+ vq->used_cons_idx++;
+ if (vq->used_cons_idx >= vq->nentries) {
+ vq->used_cons_idx -= vq->nentries;
+ vq->packed.used_wrap_counter ^= 1;
}
}
@@ -159,24 +159,24 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
/* Caller does the check */
for (i = 0; i < num ; i++) {
- used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
- uep = &vq->vq_split.ring.used->ring[used_idx];
+ used_idx = (uint16_t)(vq->used_cons_idx & (vq->nentries - 1));
+ uep = &vq->split.ring.used->ring[used_idx];
desc_idx = (uint16_t) uep->id;
len[i] = uep->len;
- cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
+ cookie = (struct rte_mbuf *)vq->descx[desc_idx].cookie;
if (unlikely(cookie == NULL)) {
PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
- vq->vq_used_cons_idx);
+ vq->used_cons_idx);
break;
}
rte_prefetch0(cookie);
rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
rx_pkts[i] = cookie;
- vq->vq_used_cons_idx++;
+ vq->used_cons_idx++;
vq_ring_free_chain(vq, desc_idx);
- vq->vq_descx[desc_idx].cookie = NULL;
+ vq->descx[desc_idx].cookie = NULL;
}
return i;
@@ -197,23 +197,23 @@ virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
return 0;
for (i = 0; i < num; i++) {
- used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
+ used_idx = vq->used_cons_idx & (vq->nentries - 1);
/* Desc idx same as used idx */
- uep = &vq->vq_split.ring.used->ring[used_idx];
+ uep = &vq->split.ring.used->ring[used_idx];
len[i] = uep->len;
- cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
+ cookie = (struct rte_mbuf *)vq->descx[used_idx].cookie;
if (unlikely(cookie == NULL)) {
PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
- vq->vq_used_cons_idx);
+ vq->used_cons_idx);
break;
}
rte_prefetch0(cookie);
rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
rx_pkts[i] = cookie;
- vq->vq_used_cons_idx++;
- vq->vq_descx[used_idx].cookie = NULL;
+ vq->used_cons_idx++;
+ vq->descx[used_idx].cookie = NULL;
}
vq_ring_free_inorder(vq, used_idx, i);
@@ -228,23 +228,23 @@ static void
virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num)
{
uint16_t used_idx, id, curr_id, free_cnt = 0;
- uint16_t size = vq->vq_nentries;
- struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
+ uint16_t size = vq->nentries;
+ struct vring_packed_desc *desc = vq->packed.ring.desc;
struct vq_desc_extra *dxp;
- used_idx = vq->vq_used_cons_idx;
+ used_idx = vq->used_cons_idx;
while (num > 0 && desc_is_used(&desc[used_idx], vq)) {
virtio_rmb(vq->hw->weak_barriers);
id = desc[used_idx].id;
do {
curr_id = used_idx;
- dxp = &vq->vq_descx[used_idx];
+ dxp = &vq->descx[used_idx];
used_idx += dxp->ndescs;
free_cnt += dxp->ndescs;
num -= dxp->ndescs;
if (used_idx >= size) {
used_idx -= size;
- vq->vq_packed.used_wrap_counter ^= 1;
+ vq->packed.used_wrap_counter ^= 1;
}
if (dxp->cookie != NULL) {
rte_pktmbuf_free(dxp->cookie);
@@ -252,34 +252,34 @@ virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num)
}
} while (curr_id != id);
}
- vq->vq_used_cons_idx = used_idx;
- vq->vq_free_cnt += free_cnt;
+ vq->used_cons_idx = used_idx;
+ vq->free_cnt += free_cnt;
}
static void
virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num)
{
uint16_t used_idx, id;
- uint16_t size = vq->vq_nentries;
- struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
+ uint16_t size = vq->nentries;
+ struct vring_packed_desc *desc = vq->packed.ring.desc;
struct vq_desc_extra *dxp;
- used_idx = vq->vq_used_cons_idx;
+ used_idx = vq->used_cons_idx;
while (num-- && desc_is_used(&desc[used_idx], vq)) {
virtio_rmb(vq->hw->weak_barriers);
id = desc[used_idx].id;
- dxp = &vq->vq_descx[id];
- vq->vq_used_cons_idx += dxp->ndescs;
- if (vq->vq_used_cons_idx >= size) {
- vq->vq_used_cons_idx -= size;
- vq->vq_packed.used_wrap_counter ^= 1;
+ dxp = &vq->descx[id];
+ vq->used_cons_idx += dxp->ndescs;
+ if (vq->used_cons_idx >= size) {
+ vq->used_cons_idx -= size;
+ vq->packed.used_wrap_counter ^= 1;
}
vq_ring_free_id_packed(vq, id);
if (dxp->cookie != NULL) {
rte_pktmbuf_free(dxp->cookie);
dxp->cookie = NULL;
}
- used_idx = vq->vq_used_cons_idx;
+ used_idx = vq->used_cons_idx;
}
}
@@ -301,12 +301,12 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
struct vring_used_elem *uep;
struct vq_desc_extra *dxp;
- used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
- uep = &vq->vq_split.ring.used->ring[used_idx];
+ used_idx = (uint16_t)(vq->used_cons_idx & (vq->nentries - 1));
+ uep = &vq->split.ring.used->ring[used_idx];
desc_idx = (uint16_t) uep->id;
- dxp = &vq->vq_descx[desc_idx];
- vq->vq_used_cons_idx++;
+ dxp = &vq->descx[desc_idx];
+ vq->used_cons_idx++;
vq_ring_free_chain(vq, desc_idx);
if (dxp->cookie != NULL) {
@@ -320,7 +320,7 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
static void
virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
{
- uint16_t i, idx = vq->vq_used_cons_idx;
+ uint16_t i, idx = vq->used_cons_idx;
int16_t free_cnt = 0;
struct vq_desc_extra *dxp = NULL;
@@ -328,7 +328,7 @@ virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
return;
for (i = 0; i < num; i++) {
- dxp = &vq->vq_descx[idx++ & (vq->vq_nentries - 1)];
+ dxp = &vq->descx[idx++ & (vq->nentries - 1)];
free_cnt += dxp->ndescs;
if (dxp->cookie != NULL) {
rte_pktmbuf_free(dxp->cookie);
@@ -336,8 +336,8 @@ virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
}
}
- vq->vq_free_cnt += free_cnt;
- vq->vq_used_cons_idx = idx;
+ vq->free_cnt += free_cnt;
+ vq->used_cons_idx = idx;
}
static inline int
@@ -350,17 +350,17 @@ virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
struct vring_desc *start_dp;
uint16_t head_idx, idx, i = 0;
- if (unlikely(vq->vq_free_cnt == 0))
+ if (unlikely(vq->free_cnt == 0))
return -ENOSPC;
- if (unlikely(vq->vq_free_cnt < num))
+ if (unlikely(vq->free_cnt < num))
return -EMSGSIZE;
- head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
- start_dp = vq->vq_split.ring.desc;
+ head_idx = vq->desc_head_idx & (vq->nentries - 1);
+ start_dp = vq->split.ring.desc;
while (i < num) {
- idx = head_idx & (vq->vq_nentries - 1);
- dxp = &vq->vq_descx[idx];
+ idx = head_idx & (vq->nentries - 1);
+ dxp = &vq->descx[idx];
dxp->cookie = (void *)cookies[i];
dxp->ndescs = 1;
@@ -378,8 +378,8 @@ virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
i++;
}
- vq->vq_desc_head_idx += num;
- vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
+ vq->desc_head_idx += num;
+ vq->free_cnt = (uint16_t)(vq->free_cnt - num);
return 0;
}
@@ -389,20 +389,20 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
{
struct vq_desc_extra *dxp;
struct virtio_hw *hw = vq->hw;
- struct vring_desc *start_dp = vq->vq_split.ring.desc;
+ struct vring_desc *start_dp = vq->split.ring.desc;
uint16_t idx, i;
- if (unlikely(vq->vq_free_cnt == 0))
+ if (unlikely(vq->free_cnt == 0))
return -ENOSPC;
- if (unlikely(vq->vq_free_cnt < num))
+ if (unlikely(vq->free_cnt < num))
return -EMSGSIZE;
- if (unlikely(vq->vq_desc_head_idx >= vq->vq_nentries))
+ if (unlikely(vq->desc_head_idx >= vq->nentries))
return -EFAULT;
for (i = 0; i < num; i++) {
- idx = vq->vq_desc_head_idx;
- dxp = &vq->vq_descx[idx];
+ idx = vq->desc_head_idx;
+ dxp = &vq->descx[idx];
dxp->cookie = (void *)cookie[i];
dxp->ndescs = 1;
@@ -413,15 +413,15 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM +
hw->vtnet_hdr_size;
start_dp[idx].flags = VRING_DESC_F_WRITE;
- vq->vq_desc_head_idx = start_dp[idx].next;
+ vq->desc_head_idx = start_dp[idx].next;
vq_update_avail_ring(vq, idx);
- if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) {
- vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
+ if (vq->desc_head_idx == VQ_RING_DESC_CHAIN_END) {
+ vq->desc_tail_idx = vq->desc_head_idx;
break;
}
}
- vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
+ vq->free_cnt = (uint16_t)(vq->free_cnt - num);
return 0;
}
@@ -430,21 +430,21 @@ static inline int
virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
struct rte_mbuf **cookie, uint16_t num)
{
- struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
- uint16_t flags = vq->vq_packed.cached_flags;
+ struct vring_packed_desc *start_dp = vq->packed.ring.desc;
+ uint16_t flags = vq->packed.cached_flags;
struct virtio_hw *hw = vq->hw;
struct vq_desc_extra *dxp;
uint16_t idx;
int i;
- if (unlikely(vq->vq_free_cnt == 0))
+ if (unlikely(vq->free_cnt == 0))
return -ENOSPC;
- if (unlikely(vq->vq_free_cnt < num))
+ if (unlikely(vq->free_cnt < num))
return -EMSGSIZE;
for (i = 0; i < num; i++) {
- idx = vq->vq_avail_idx;
- dxp = &vq->vq_descx[idx];
+ idx = vq->avail_idx;
+ dxp = &vq->descx[idx];
dxp->cookie = (void *)cookie[i];
dxp->ndescs = 1;
@@ -453,19 +453,19 @@ virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM
+ hw->vtnet_hdr_size;
- vq->vq_desc_head_idx = dxp->next;
- if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
- vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
+ vq->desc_head_idx = dxp->next;
+ if (vq->desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ vq->desc_tail_idx = vq->desc_head_idx;
virtio_wmb(hw->weak_barriers);
start_dp[idx].flags = flags;
- if (++vq->vq_avail_idx >= vq->vq_nentries) {
- vq->vq_avail_idx -= vq->vq_nentries;
- vq->vq_packed.cached_flags ^=
+ if (++vq->avail_idx >= vq->nentries) {
+ vq->avail_idx -= vq->nentries;
+ vq->packed.cached_flags ^=
VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
- flags = vq->vq_packed.cached_flags;
+ flags = vq->packed.cached_flags;
}
}
- vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
+ vq->free_cnt = (uint16_t)(vq->free_cnt - num);
return 0;
}
@@ -588,12 +588,12 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
uint16_t head_size = vq->hw->vtnet_hdr_size;
uint16_t i = 0;
- idx = vq->vq_desc_head_idx;
- start_dp = vq->vq_split.ring.desc;
+ idx = vq->desc_head_idx;
+ start_dp = vq->split.ring.desc;
while (i < num) {
- idx = idx & (vq->vq_nentries - 1);
- dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
+ idx = idx & (vq->nentries - 1);
+ dxp = &vq->descx[vq->avail_idx & (vq->nentries - 1)];
dxp->cookie = (void *)cookies[i];
dxp->ndescs = 1;
@@ -617,8 +617,8 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
i++;
};
- vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
- vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
+ vq->free_cnt = (uint16_t)(vq->free_cnt - num);
+ vq->desc_head_idx = idx & (vq->nentries - 1);
}
static inline void
@@ -633,15 +633,15 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
uint16_t head_size = vq->hw->vtnet_hdr_size;
struct virtio_net_hdr *hdr;
- id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
- idx = vq->vq_avail_idx;
- dp = &vq->vq_packed.ring.desc[idx];
+ id = in_order ? vq->avail_idx : vq->desc_head_idx;
+ idx = vq->avail_idx;
+ dp = &vq->packed.ring.desc[idx];
- dxp = &vq->vq_descx[id];
+ dxp = &vq->descx[id];
dxp->ndescs = 1;
dxp->cookie = cookie;
- flags = vq->vq_packed.cached_flags;
+ flags = vq->packed.cached_flags;
/* prepend cannot fail, checked by caller */
hdr = (struct virtio_net_hdr *)
@@ -658,18 +658,18 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
dp->len = cookie->data_len;
dp->id = id;
- if (++vq->vq_avail_idx >= vq->vq_nentries) {
- vq->vq_avail_idx -= vq->vq_nentries;
- vq->vq_packed.cached_flags ^=
+ if (++vq->avail_idx >= vq->nentries) {
+ vq->avail_idx -= vq->nentries;
+ vq->packed.cached_flags ^=
VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
}
- vq->vq_free_cnt--;
+ vq->free_cnt--;
if (!in_order) {
- vq->vq_desc_head_idx = dxp->next;
- if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
- vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
+ vq->desc_head_idx = dxp->next;
+ if (vq->desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ vq->desc_tail_idx = VQ_RING_DESC_CHAIN_END;
}
virtio_wmb(vq->hw->weak_barriers);
@@ -689,20 +689,20 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
struct virtio_net_hdr *hdr;
uint16_t prev;
- id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
+ id = in_order ? vq->avail_idx : vq->desc_head_idx;
- dxp = &vq->vq_descx[id];
+ dxp = &vq->descx[id];
dxp->ndescs = needed;
dxp->cookie = cookie;
- head_idx = vq->vq_avail_idx;
+ head_idx = vq->avail_idx;
idx = head_idx;
prev = head_idx;
- start_dp = vq->vq_packed.ring.desc;
+ start_dp = vq->packed.ring.desc;
- head_dp = &vq->vq_packed.ring.desc[idx];
+ head_dp = &vq->packed.ring.desc[idx];
head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
- head_flags |= vq->vq_packed.cached_flags;
+ head_flags |= vq->packed.cached_flags;
if (can_push) {
/* prepend cannot fail, checked by caller */
@@ -725,9 +725,9 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
start_dp[idx].len = vq->hw->vtnet_hdr_size;
hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
idx++;
- if (idx >= vq->vq_nentries) {
- idx -= vq->vq_nentries;
- vq->vq_packed.cached_flags ^=
+ if (idx >= vq->nentries) {
+ idx -= vq->nentries;
+ vq->packed.cached_flags ^=
VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
}
}
@@ -741,27 +741,27 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
start_dp[idx].len = cookie->data_len;
if (likely(idx != head_idx)) {
flags = cookie->next ? VRING_DESC_F_NEXT : 0;
- flags |= vq->vq_packed.cached_flags;
+ flags |= vq->packed.cached_flags;
start_dp[idx].flags = flags;
}
prev = idx;
idx++;
- if (idx >= vq->vq_nentries) {
- idx -= vq->vq_nentries;
- vq->vq_packed.cached_flags ^=
+ if (idx >= vq->nentries) {
+ idx -= vq->nentries;
+ vq->packed.cached_flags ^=
VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
}
} while ((cookie = cookie->next) != NULL);
start_dp[prev].id = id;
- vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
- vq->vq_avail_idx = idx;
+ vq->free_cnt = (uint16_t)(vq->free_cnt - needed);
+ vq->avail_idx = idx;
if (!in_order) {
- vq->vq_desc_head_idx = dxp->next;
- if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
- vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
+ vq->desc_head_idx = dxp->next;
+ if (vq->desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ vq->desc_tail_idx = VQ_RING_DESC_CHAIN_END;
}
virtio_wmb(vq->hw->weak_barriers);
@@ -782,16 +782,16 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
uint16_t head_size = vq->hw->vtnet_hdr_size;
struct virtio_net_hdr *hdr;
- head_idx = vq->vq_desc_head_idx;
+ head_idx = vq->desc_head_idx;
idx = head_idx;
if (in_order)
- dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
+ dxp = &vq->descx[vq->avail_idx & (vq->nentries - 1)];
else
- dxp = &vq->vq_descx[idx];
+ dxp = &vq->descx[idx];
dxp->cookie = (void *)cookie;
dxp->ndescs = needed;
- start_dp = vq->vq_split.ring.desc;
+ start_dp = vq->split.ring.desc;
if (can_push) {
/* prepend cannot fail, checked by caller */
@@ -844,16 +844,16 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
} while ((cookie = cookie->next) != NULL);
if (use_indirect)
- idx = vq->vq_split.ring.desc[head_idx].next;
+ idx = vq->split.ring.desc[head_idx].next;
- vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
+ vq->free_cnt = (uint16_t)(vq->free_cnt - needed);
- vq->vq_desc_head_idx = idx;
+ vq->desc_head_idx = idx;
vq_update_avail_ring(vq, head_idx);
if (!in_order) {
- if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
- vq->vq_desc_tail_idx = idx;
+ if (vq->desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ vq->desc_tail_idx = idx;
}
}
@@ -883,9 +883,9 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
- if (nb_desc == 0 || nb_desc > vq->vq_nentries)
- nb_desc = vq->vq_nentries;
- vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
+ if (nb_desc == 0 || nb_desc > vq->nentries)
+ nb_desc = vq->nentries;
+ vq->free_cnt = RTE_MIN(vq->free_cnt, nb_desc);
rxvq = &vq->rxq;
rxvq->queue_id = queue_idx;
@@ -917,10 +917,10 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
nbufs = 0;
if (hw->use_simple_rx) {
- for (desc_idx = 0; desc_idx < vq->vq_nentries;
+ for (desc_idx = 0; desc_idx < vq->nentries;
desc_idx++) {
- vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
- vq->vq_split.ring.desc[desc_idx].flags =
+ vq->split.ring.avail->ring[desc_idx] = desc_idx;
+ vq->split.ring.desc[desc_idx].flags =
VRING_DESC_F_WRITE;
}
@@ -930,18 +930,18 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
desc_idx++) {
- vq->sw_ring[vq->vq_nentries + desc_idx] =
+ vq->sw_ring[vq->nentries + desc_idx] =
&rxvq->fake_mbuf;
}
if (hw->use_simple_rx) {
- while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
+ while (vq->free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
virtio_rxq_rearm_vec(rxvq);
nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
}
} else if (hw->use_inorder_rx) {
if ((!virtqueue_full(vq))) {
- uint16_t free_cnt = vq->vq_free_cnt;
+ uint16_t free_cnt = vq->free_cnt;
struct rte_mbuf *pkts[free_cnt];
if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
@@ -1011,9 +1011,9 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
- if (nb_desc == 0 || nb_desc > vq->vq_nentries)
- nb_desc = vq->vq_nentries;
- vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
+ if (nb_desc == 0 || nb_desc > vq->nentries)
+ nb_desc = vq->nentries;
+ vq->free_cnt = RTE_MIN(vq->free_cnt, nb_desc);
txvq = &vq->txq;
txvq->queue_id = queue_idx;
@@ -1021,18 +1021,18 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
tx_free_thresh = tx_conf->tx_free_thresh;
if (tx_free_thresh == 0)
tx_free_thresh =
- RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
+ RTE_MIN(vq->nentries / 4, DEFAULT_TX_FREE_THRESH);
- if (tx_free_thresh >= (vq->vq_nentries - 3)) {
+ if (tx_free_thresh >= (vq->nentries - 3)) {
RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the "
"number of TX entries minus 3 (%u)."
" (tx_free_thresh=%u port=%u queue=%u)\n",
- vq->vq_nentries - 3,
+ vq->nentries - 3,
tx_free_thresh, dev->data->port_id, queue_idx);
return -EINVAL;
}
- vq->vq_free_thresh = tx_free_thresh;
+ vq->free_thresh = tx_free_thresh;
dev->data->tx_queues[queue_idx] = txvq;
return 0;
@@ -1050,7 +1050,7 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
if (!vtpci_packed_queue(hw)) {
if (hw->use_inorder_tx)
- vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
+ vq->split.ring.desc[vq->nentries - 1].next = 0;
}
VIRTQUEUE_DUMP(vq);
@@ -1232,7 +1232,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
num = VIRTIO_MBUF_BURST_SZ;
if (likely(num > DESC_PER_CACHELINE))
- num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
+ num = num - ((vq->used_cons_idx + num) % DESC_PER_CACHELINE);
num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
@@ -1282,7 +1282,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
/* Allocate new mbuf for the used descriptor */
if (likely(!virtqueue_full(vq))) {
- uint16_t free_cnt = vq->vq_free_cnt;
+ uint16_t free_cnt = vq->free_cnt;
struct rte_mbuf *new_pkts[free_cnt];
if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
@@ -1335,7 +1335,7 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
if (likely(num > DESC_PER_CACHELINE))
- num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
+ num = num - ((vq->used_cons_idx + num) % DESC_PER_CACHELINE);
num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
PMD_RX_LOG(DEBUG, "dequeue:%d", num);
@@ -1385,7 +1385,7 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
/* Allocate new mbuf for the used descriptor */
if (likely(!virtqueue_full(vq))) {
- uint16_t free_cnt = vq->vq_free_cnt;
+ uint16_t free_cnt = vq->free_cnt;
struct rte_mbuf *new_pkts[free_cnt];
if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
@@ -1577,7 +1577,7 @@ virtio_recv_pkts_inorder(void *rx_queue,
if (likely(!virtqueue_full(vq))) {
/* free_cnt may include mrg descs */
- uint16_t free_cnt = vq->vq_free_cnt;
+ uint16_t free_cnt = vq->free_cnt;
struct rte_mbuf *new_pkts[free_cnt];
if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
@@ -1640,7 +1640,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
num = VIRTIO_MBUF_BURST_SZ;
if (likely(num > DESC_PER_CACHELINE))
- num = num - ((vq->vq_used_cons_idx + num) %
+ num = num - ((vq->used_cons_idx + num) %
DESC_PER_CACHELINE);
@@ -1763,7 +1763,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
/* Allocate new mbuf for the used descriptor */
if (likely(!virtqueue_full(vq))) {
/* free_cnt may include mrg descs */
- uint16_t free_cnt = vq->vq_free_cnt;
+ uint16_t free_cnt = vq->free_cnt;
struct rte_mbuf *new_pkts[free_cnt];
if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
@@ -1821,7 +1821,7 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue,
if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
num = VIRTIO_MBUF_BURST_SZ;
if (likely(num > DESC_PER_CACHELINE))
- num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
+ num = num - ((vq->used_cons_idx + num) % DESC_PER_CACHELINE);
num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
@@ -1900,7 +1900,7 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue,
while (seg_res != 0) {
uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
VIRTIO_MBUF_BURST_SZ);
- if (likely(vq->vq_free_cnt >= rcv_cnt)) {
+ if (likely(vq->free_cnt >= rcv_cnt)) {
num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
len, rcv_cnt);
uint16_t extra_idx = 0;
@@ -1941,7 +1941,7 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue,
/* Allocate new mbuf for the used descriptor */
if (likely(!virtqueue_full(vq))) {
/* free_cnt may include mrg descs */
- uint16_t free_cnt = vq->vq_free_cnt;
+ uint16_t free_cnt = vq->free_cnt;
struct rte_mbuf *new_pkts[free_cnt];
if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
@@ -1989,8 +1989,8 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
- if (nb_pkts > vq->vq_free_cnt)
- virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt,
+ if (nb_pkts > vq->free_cnt)
+ virtio_xmit_cleanup_packed(vq, nb_pkts - vq->free_cnt,
in_order);
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
@@ -2022,12 +2022,12 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
* default => number of segments + 1
*/
slots = txm->nb_segs + !can_push;
- need = slots - vq->vq_free_cnt;
+ need = slots - vq->free_cnt;
/* Positive value indicates it need free vring descriptors */
if (unlikely(need > 0)) {
virtio_xmit_cleanup_packed(vq, need, in_order);
- need = slots - vq->vq_free_cnt;
+ need = slots - vq->free_cnt;
if (unlikely(need > 0)) {
PMD_TX_LOG(ERR,
"No free tx descriptors to transmit");
@@ -2077,7 +2077,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
nb_used = VIRTQUEUE_NUSED(vq);
virtio_rmb(hw->weak_barriers);
- if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
+ if (likely(nb_used > vq->nentries - vq->free_thresh))
virtio_xmit_cleanup(vq, nb_used);
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
@@ -2113,7 +2113,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
* default => number of segments + 1
*/
slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
- need = slots - vq->vq_free_cnt;
+ need = slots - vq->free_cnt;
/* Positive value indicates it need free vring descriptors */
if (unlikely(need > 0)) {
@@ -2122,7 +2122,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
need = RTE_MIN(need, (int)nb_used);
virtio_xmit_cleanup(vq, need);
- need = slots - vq->vq_free_cnt;
+ need = slots - vq->free_cnt;
if (unlikely(need > 0)) {
PMD_TX_LOG(ERR,
"No free tx descriptors to transmit");
@@ -2175,13 +2175,13 @@ virtio_xmit_pkts_inorder(void *tx_queue,
nb_used = VIRTQUEUE_NUSED(vq);
virtio_rmb(hw->weak_barriers);
- if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
+ if (likely(nb_used > vq->nentries - vq->free_thresh))
virtio_xmit_cleanup_inorder(vq, nb_used);
- if (unlikely(!vq->vq_free_cnt))
+ if (unlikely(!vq->free_cnt))
virtio_xmit_cleanup_inorder(vq, nb_used);
- nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
+ nb_avail = RTE_MIN(vq->free_cnt, nb_pkts);
for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
struct rte_mbuf *txm = tx_pkts[nb_tx];
@@ -2219,7 +2219,7 @@ virtio_xmit_pkts_inorder(void *tx_queue,
}
slots = txm->nb_segs + 1;
- need = slots - vq->vq_free_cnt;
+ need = slots - vq->free_cnt;
if (unlikely(need > 0)) {
nb_used = VIRTQUEUE_NUSED(vq);
virtio_rmb(hw->weak_barriers);
@@ -2227,7 +2227,7 @@ virtio_xmit_pkts_inorder(void *tx_queue,
virtio_xmit_cleanup_inorder(vq, need);
- need = slots - vq->vq_free_cnt;
+ need = slots - vq->free_cnt;
if (unlikely(need > 0)) {
PMD_TX_LOG(ERR,
@@ -25,9 +25,9 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
int ret;
struct virtqueue *vq = rxvq->vq;
- desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
+ desc_idx = vq->avail_idx & (vq->nentries - 1);
sw_ring = &vq->sw_ring[desc_idx];
- start_dp = &vq->vq_split.ring.desc[desc_idx];
+ start_dp = &vq->split.ring.desc[desc_idx];
ret = rte_mempool_get_bulk(rxvq->mpool, (void **)sw_ring,
RTE_VIRTIO_VPMD_RX_REARM_THRESH);
@@ -50,8 +50,8 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
}
- vq->vq_avail_idx += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
- vq->vq_free_cnt -= RTE_VIRTIO_VPMD_RX_REARM_THRESH;
+ vq->avail_idx += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
+ vq->free_cnt -= RTE_VIRTIO_VPMD_RX_REARM_THRESH;
vq_update_avail_idx(vq);
}
@@ -92,14 +92,14 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_VIRTIO_DESC_PER_LOOP);
nb_used = RTE_MIN(nb_used, nb_pkts);
- desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
- rused = &vq->vq_split.ring.used->ring[desc_idx];
+ desc_idx = (uint16_t)(vq->used_cons_idx & (vq->nentries - 1));
+ rused = &vq->split.ring.used->ring[desc_idx];
sw_ring = &vq->sw_ring[desc_idx];
- sw_ring_end = &vq->sw_ring[vq->vq_nentries];
+ sw_ring_end = &vq->sw_ring[vq->nentries];
rte_prefetch_non_temporal(rused);
- if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
+ if (vq->free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
virtio_rxq_rearm_vec(rxvq);
if (unlikely(virtqueue_kick_prepare(vq)))
virtqueue_notify(vq);
@@ -201,8 +201,8 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
}
}
- vq->vq_used_cons_idx += nb_pkts_received;
- vq->vq_free_cnt += nb_pkts_received;
+ vq->used_cons_idx += nb_pkts_received;
+ vq->free_cnt += nb_pkts_received;
rxvq->stats.packets += nb_pkts_received;
return nb_pkts_received;
}
@@ -94,14 +94,14 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_VIRTIO_DESC_PER_LOOP);
nb_used = RTE_MIN(nb_used, nb_pkts);
- desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
- rused = &vq->vq_split.ring.used->ring[desc_idx];
+ desc_idx = (uint16_t)(vq->used_cons_idx & (vq->nentries - 1));
+ rused = &vq->split.ring.used->ring[desc_idx];
sw_ring = &vq->sw_ring[desc_idx];
- sw_ring_end = &vq->sw_ring[vq->vq_nentries];
+ sw_ring_end = &vq->sw_ring[vq->nentries];
rte_prefetch0(rused);
- if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
+ if (vq->free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
virtio_rxq_rearm_vec(rxvq);
if (unlikely(virtqueue_kick_prepare(vq)))
virtqueue_notify(vq);
@@ -187,8 +187,8 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
}
}
- vq->vq_used_cons_idx += nb_pkts_received;
- vq->vq_free_cnt += nb_pkts_received;
+ vq->used_cons_idx += nb_pkts_received;
+ vq->free_cnt += nb_pkts_received;
rxvq->stats.packets += nb_pkts_received;
return nb_pkts_received;
}
@@ -275,7 +275,7 @@ static void
virtio_user_setup_queue_packed(struct virtqueue *vq,
struct virtio_user_dev *dev)
{
- uint16_t queue_idx = vq->vq_queue_index;
+ uint16_t queue_idx = vq->queue_index;
struct vring_packed *vring;
uint64_t desc_addr;
uint64_t avail_addr;
@@ -283,13 +283,13 @@ virtio_user_setup_queue_packed(struct virtqueue *vq,
uint16_t i;
vring = &dev->packed_vrings[queue_idx];
- desc_addr = (uintptr_t)vq->vq_ring_virt_mem;
- avail_addr = desc_addr + vq->vq_nentries *
+ desc_addr = (uintptr_t)vq->ring_virt_mem;
+ avail_addr = desc_addr + vq->nentries *
sizeof(struct vring_packed_desc);
used_addr = RTE_ALIGN_CEIL(avail_addr +
sizeof(struct vring_packed_desc_event),
VIRTIO_PCI_VRING_ALIGN);
- vring->num = vq->vq_nentries;
+ vring->num = vq->nentries;
vring->desc = (void *)(uintptr_t)desc_addr;
vring->driver = (void *)(uintptr_t)avail_addr;
vring->device = (void *)(uintptr_t)used_addr;
@@ -303,16 +303,16 @@ virtio_user_setup_queue_packed(struct virtqueue *vq,
static void
virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev)
{
- uint16_t queue_idx = vq->vq_queue_index;
+ uint16_t queue_idx = vq->queue_index;
uint64_t desc_addr, avail_addr, used_addr;
- desc_addr = (uintptr_t)vq->vq_ring_virt_mem;
- avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
+ desc_addr = (uintptr_t)vq->ring_virt_mem;
+ avail_addr = desc_addr + vq->nentries * sizeof(struct vring_desc);
used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
- ring[vq->vq_nentries]),
+ ring[vq->nentries]),
VIRTIO_PCI_VRING_ALIGN);
- dev->vrings[queue_idx].num = vq->vq_nentries;
+ dev->vrings[queue_idx].num = vq->nentries;
dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr;
dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr;
dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr;
@@ -345,8 +345,8 @@ virtio_user_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
*/
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
- close(dev->callfds[vq->vq_queue_index]);
- close(dev->kickfds[vq->vq_queue_index]);
+ close(dev->callfds[vq->queue_index]);
+ close(dev->kickfds[vq->queue_index]);
}
static void
@@ -357,13 +357,13 @@ virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
if (hw->cvq && (hw->cvq->vq == vq)) {
if (vtpci_packed_queue(vq->hw))
- virtio_user_handle_cq_packed(dev, vq->vq_queue_index);
+ virtio_user_handle_cq_packed(dev, vq->queue_index);
else
- virtio_user_handle_cq(dev, vq->vq_queue_index);
+ virtio_user_handle_cq(dev, vq->queue_index);
return;
}
- if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0)
+ if (write(dev->kickfds[vq->queue_index], &buf, sizeof(buf)) < 0)
PMD_DRV_LOG(ERR, "failed to kick backend: %s",
strerror(errno));
}
@@ -27,11 +27,11 @@ virtqueue_detach_unused(struct virtqueue *vq)
return NULL;
hw = vq->hw;
- type = virtio_get_queue_type(hw, vq->vq_queue_index);
- start = vq->vq_avail_idx & (vq->vq_nentries - 1);
- end = (vq->vq_avail_idx + vq->vq_free_cnt) & (vq->vq_nentries - 1);
+ type = virtio_get_queue_type(hw, vq->queue_index);
+ start = vq->avail_idx & (vq->nentries - 1);
+ end = (vq->avail_idx + vq->free_cnt) & (vq->nentries - 1);
- for (idx = 0; idx < vq->vq_nentries; idx++) {
+ for (idx = 0; idx < vq->nentries; idx++) {
if (hw->use_simple_rx && type == VTNET_RQ) {
if (start <= end && idx >= start && idx < end)
continue;
@@ -43,9 +43,9 @@ virtqueue_detach_unused(struct virtqueue *vq)
return cookie;
}
} else {
- cookie = vq->vq_descx[idx].cookie;
+ cookie = vq->descx[idx].cookie;
if (cookie != NULL) {
- vq->vq_descx[idx].cookie = NULL;
+ vq->descx[idx].cookie = NULL;
return cookie;
}
}
@@ -61,23 +61,23 @@ virtqueue_rxvq_flush_packed(struct virtqueue *vq)
struct vq_desc_extra *dxp;
uint16_t i;
- struct vring_packed_desc *descs = vq->vq_packed.ring.desc;
+ struct vring_packed_desc *descs = vq->packed.ring.desc;
int cnt = 0;
- i = vq->vq_used_cons_idx;
- while (desc_is_used(&descs[i], vq) && cnt++ < vq->vq_nentries) {
- dxp = &vq->vq_descx[descs[i].id];
+ i = vq->used_cons_idx;
+ while (desc_is_used(&descs[i], vq) && cnt++ < vq->nentries) {
+ dxp = &vq->descx[descs[i].id];
if (dxp->cookie != NULL) {
rte_pktmbuf_free(dxp->cookie);
dxp->cookie = NULL;
}
- vq->vq_free_cnt++;
- vq->vq_used_cons_idx++;
- if (vq->vq_used_cons_idx >= vq->vq_nentries) {
- vq->vq_used_cons_idx -= vq->vq_nentries;
- vq->vq_packed.used_wrap_counter ^= 1;
+ vq->free_cnt++;
+ vq->used_cons_idx++;
+ if (vq->used_cons_idx >= vq->nentries) {
+ vq->used_cons_idx -= vq->nentries;
+ vq->packed.used_wrap_counter ^= 1;
}
- i = vq->vq_used_cons_idx;
+ i = vq->used_cons_idx;
}
}
@@ -95,15 +95,15 @@ virtqueue_rxvq_flush_split(struct virtqueue *vq)
nb_used = VIRTQUEUE_NUSED(vq);
for (i = 0; i < nb_used; i++) {
- used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
- uep = &vq->vq_split.ring.used->ring[used_idx];
+ used_idx = vq->used_cons_idx & (vq->nentries - 1);
+ uep = &vq->split.ring.used->ring[used_idx];
if (hw->use_simple_rx) {
desc_idx = used_idx;
rte_pktmbuf_free(vq->sw_ring[desc_idx]);
- vq->vq_free_cnt++;
+ vq->free_cnt++;
} else if (hw->use_inorder_rx) {
desc_idx = (uint16_t)uep->id;
- dxp = &vq->vq_descx[desc_idx];
+ dxp = &vq->descx[desc_idx];
if (dxp->cookie != NULL) {
rte_pktmbuf_free(dxp->cookie);
dxp->cookie = NULL;
@@ -111,18 +111,18 @@ virtqueue_rxvq_flush_split(struct virtqueue *vq)
vq_ring_free_inorder(vq, desc_idx, 1);
} else {
desc_idx = (uint16_t)uep->id;
- dxp = &vq->vq_descx[desc_idx];
+ dxp = &vq->descx[desc_idx];
if (dxp->cookie != NULL) {
rte_pktmbuf_free(dxp->cookie);
dxp->cookie = NULL;
}
vq_ring_free_chain(vq, desc_idx);
}
- vq->vq_used_cons_idx++;
+ vq->used_cons_idx++;
}
if (hw->use_simple_rx) {
- while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
+ while (vq->free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
virtio_rxq_rearm_vec(rxq);
if (virtqueue_kick_prepare(vq))
virtqueue_notify(vq);
@@ -94,7 +94,7 @@ enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
* The maximum virtqueue size is 2^15. Use that value as the end of
* descriptor chain terminator since it will never be a valid index
* in the descriptor table. This is used to verify we are correctly
- * handling vq_free_cnt.
+ * handling free_cnt.
*/
#define VQ_RING_DESC_CHAIN_END 32768
@@ -195,7 +195,7 @@ struct virtqueue {
struct {
/**< vring keeping desc, used and avail */
struct vring ring;
- } vq_split;
+ } split;
struct {
/**< vring keeping descs and events */
@@ -203,17 +203,17 @@ struct virtqueue {
bool used_wrap_counter;
uint16_t cached_flags; /**< cached flags for descs */
uint16_t event_flags_shadow;
- } vq_packed;
+ } packed;
};
- uint16_t vq_used_cons_idx; /**< last consumed descriptor */
- uint16_t vq_nentries; /**< vring desc numbers */
- uint16_t vq_free_cnt; /**< num of desc available */
- uint16_t vq_avail_idx; /**< sync until needed */
- uint16_t vq_free_thresh; /**< free threshold */
+ uint16_t used_cons_idx; /**< last consumed descriptor */
+ uint16_t nentries; /**< vring desc numbers */
+ uint16_t free_cnt; /**< num of desc available */
+ uint16_t avail_idx; /**< sync until needed */
+ uint16_t free_thresh; /**< free threshold */
- void *vq_ring_virt_mem; /**< linear address of vring*/
- unsigned int vq_ring_size;
+ void *ring_virt_mem; /**< linear address of vring*/
+ unsigned int ring_size;
union {
struct virtnet_rx rxq;
@@ -221,7 +221,7 @@ struct virtqueue {
struct virtnet_ctl cq;
};
- rte_iova_t vq_ring_mem; /**< physical address of vring,
+ rte_iova_t ring_mem; /**< physical address of vring,
* or virtual address for virtio_user. */
/**
@@ -229,13 +229,13 @@ struct virtqueue {
* there are no free descriptors, this will be set to
* VQ_RING_DESC_CHAIN_END.
*/
- uint16_t vq_desc_head_idx;
- uint16_t vq_desc_tail_idx;
- uint16_t vq_queue_index; /**< PCI queue index */
+ uint16_t desc_head_idx;
+ uint16_t desc_tail_idx;
+ uint16_t queue_index; /**< PCI queue index */
uint16_t offset; /**< relative offset to obtain addr in mbuf */
uint16_t *notify_addr;
struct rte_mbuf **sw_ring; /**< RX software ring. */
- struct vq_desc_extra vq_descx[0];
+ struct vq_desc_extra descx[0];
};
/* If multiqueue is provided by host, then we suppport it. */
@@ -290,7 +290,7 @@ desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
used = !!(flags & VRING_DESC_F_USED(1));
avail = !!(flags & VRING_DESC_F_AVAIL(1));
- return avail == used && used == vq->vq_packed.used_wrap_counter;
+ return avail == used && used == vq->packed.used_wrap_counter;
}
static inline void
@@ -298,11 +298,11 @@ vring_desc_init_packed(struct virtqueue *vq, int n)
{
int i;
for (i = 0; i < n - 1; i++) {
- vq->vq_packed.ring.desc[i].id = i;
- vq->vq_descx[i].next = i + 1;
+ vq->packed.ring.desc[i].id = i;
+ vq->descx[i].next = i + 1;
}
- vq->vq_packed.ring.desc[i].id = i;
- vq->vq_descx[i].next = VQ_RING_DESC_CHAIN_END;
+ vq->packed.ring.desc[i].id = i;
+ vq->descx[i].next = VQ_RING_DESC_CHAIN_END;
}
/* Chain all the descriptors in the ring with an END */
@@ -322,10 +322,10 @@ vring_desc_init_split(struct vring_desc *dp, uint16_t n)
static inline void
virtqueue_disable_intr_packed(struct virtqueue *vq)
{
- if (vq->vq_packed.event_flags_shadow != RING_EVENT_FLAGS_DISABLE) {
- vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_DISABLE;
- vq->vq_packed.ring.driver->desc_event_flags =
- vq->vq_packed.event_flags_shadow;
+ if (vq->packed.event_flags_shadow != RING_EVENT_FLAGS_DISABLE) {
+ vq->packed.event_flags_shadow = RING_EVENT_FLAGS_DISABLE;
+ vq->packed.ring.driver->desc_event_flags =
+ vq->packed.event_flags_shadow;
}
}
@@ -335,7 +335,7 @@ virtqueue_disable_intr_packed(struct virtqueue *vq)
static inline void
virtqueue_disable_intr_split(struct virtqueue *vq)
{
- vq->vq_split.ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+ vq->split.ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
}
/**
@@ -356,10 +356,10 @@ virtqueue_disable_intr(struct virtqueue *vq)
static inline void
virtqueue_enable_intr_packed(struct virtqueue *vq)
{
- if (vq->vq_packed.event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {
- vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_ENABLE;
- vq->vq_packed.ring.driver->desc_event_flags =
- vq->vq_packed.event_flags_shadow;
+ if (vq->packed.event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {
+ vq->packed.event_flags_shadow = RING_EVENT_FLAGS_ENABLE;
+ vq->packed.ring.driver->desc_event_flags =
+ vq->packed.event_flags_shadow;
}
}
@@ -369,7 +369,7 @@ virtqueue_enable_intr_packed(struct virtqueue *vq)
static inline void
virtqueue_enable_intr_split(struct virtqueue *vq)
{
- vq->vq_split.ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
+ vq->split.ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
}
/**
@@ -399,7 +399,7 @@ void virtqueue_rxvq_flush(struct virtqueue *vq);
static inline int
virtqueue_full(const struct virtqueue *vq)
{
- return vq->vq_free_cnt == 0;
+ return vq->free_cnt == 0;
}
static inline int
@@ -413,8 +413,8 @@ virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
return VTNET_TQ;
}
-#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_split.ring.used->idx - \
- (vq)->vq_used_cons_idx))
+#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->split.ring.used->idx - \
+ (vq)->used_cons_idx))
void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t used_idx);
@@ -425,7 +425,7 @@ static inline void
vq_update_avail_idx(struct virtqueue *vq)
{
virtio_wmb(vq->hw->weak_barriers);
- vq->vq_split.ring.avail->idx = vq->vq_avail_idx;
+ vq->split.ring.avail->idx = vq->avail_idx;
}
static inline void
@@ -439,10 +439,10 @@ vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
* currently running on another CPU, we can keep it processing the new
* descriptor.
*/
- avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
- if (unlikely(vq->vq_split.ring.avail->ring[avail_idx] != desc_idx))
- vq->vq_split.ring.avail->ring[avail_idx] = desc_idx;
- vq->vq_avail_idx++;
+ avail_idx = (uint16_t)(vq->avail_idx & (vq->nentries - 1));
+ if (unlikely(vq->split.ring.avail->ring[avail_idx] != desc_idx))
+ vq->split.ring.avail->ring[avail_idx] = desc_idx;
+ vq->avail_idx++;
}
static inline int
@@ -453,7 +453,7 @@ virtqueue_kick_prepare(struct virtqueue *vq)
* the used->flags.
*/
virtio_mb(vq->hw->weak_barriers);
- return !(vq->vq_split.ring.used->flags & VRING_USED_F_NO_NOTIFY);
+ return !(vq->split.ring.used->flags & VRING_USED_F_NO_NOTIFY);
}
static inline int
@@ -465,7 +465,7 @@ virtqueue_kick_prepare_packed(struct virtqueue *vq)
* Ensure updated data is visible to vhost before reading the flags.
*/
virtio_mb(vq->hw->weak_barriers);
- flags = vq->vq_packed.ring.device->desc_event_flags;
+ flags = vq->packed.ring.device->desc_event_flags;
return flags != RING_EVENT_FLAGS_DISABLE;
}
@@ -483,25 +483,25 @@ virtqueue_notify(struct virtqueue *vq)
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
#define VIRTQUEUE_DUMP(vq) do { \
uint16_t used_idx, nused; \
- used_idx = (vq)->vq_split.ring.used->idx; \
- nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
+ used_idx = (vq)->split.ring.used->idx; \
+ nused = (uint16_t)(used_idx - (vq)->used_cons_idx); \
if (vtpci_packed_queue((vq)->hw)) { \
PMD_INIT_LOG(DEBUG, \
"VQ: - size=%d; free=%d; used_cons_idx=%d; avail_idx=%d;" \
" cached_flags=0x%x; used_wrap_counter=%d", \
- (vq)->vq_nentries, (vq)->vq_free_cnt, (vq)->vq_used_cons_idx, \
- (vq)->vq_avail_idx, (vq)->vq_packed.cached_flags, \
- (vq)->vq_packed.used_wrap_counter); \
+ (vq)->nentries, (vq)->free_cnt, (vq)->used_cons_idx, \
+ (vq)->avail_idx, (vq)->packed.cached_flags, \
+ (vq)->packed.used_wrap_counter); \
break; \
} \
PMD_INIT_LOG(DEBUG, \
"VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
" avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
" avail.flags=0x%x; used.flags=0x%x", \
- (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \
- (vq)->vq_desc_head_idx, (vq)->vq_split.ring.avail->idx, \
- (vq)->vq_used_cons_idx, (vq)->vq_split.ring.used->idx, \
- (vq)->vq_split.ring.avail->flags, (vq)->vq_split.ring.used->flags); \
+ (vq)->nentries, (vq)->free_cnt, nused, \
+ (vq)->desc_head_idx, (vq)->split.ring.avail->idx, \
+ (vq)->used_cons_idx, (vq)->split.ring.used->idx, \
+ (vq)->split.ring.avail->flags, (vq)->split.ring.used->flags); \
} while (0)
#else
#define VIRTQUEUE_DUMP(vq) do { } while (0)