[v9,6/8] net/virtio: implement receive path for packed queues
Checks
Commit Message
Implement the receive part.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 18 +-
drivers/net/virtio/virtio_ethdev.h | 2 +
drivers/net/virtio/virtio_rxtx.c | 280 ++++++++++++++++++++++++++---
drivers/net/virtio/virtqueue.c | 22 +++
drivers/net/virtio/virtqueue.h | 2 +-
5 files changed, 297 insertions(+), 27 deletions(-)
Comments
On Wed, Oct 24, 2018 at 04:32:34PM +0200, Jens Freimann wrote:
> Implement the receive part.
>
> Signed-off-by: Jens Freimann <jfreimann@redhat.com>
> ---
> drivers/net/virtio/virtio_ethdev.c | 18 +-
> drivers/net/virtio/virtio_ethdev.h | 2 +
> drivers/net/virtio/virtio_rxtx.c | 280 ++++++++++++++++++++++++++---
> drivers/net/virtio/virtqueue.c | 22 +++
> drivers/net/virtio/virtqueue.h | 2 +-
> 5 files changed, 297 insertions(+), 27 deletions(-)
>
> diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
> index d2118e6a9..7f81d24aa 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -384,8 +384,10 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
> vq->hw = hw;
> vq->vq_queue_index = vtpci_queue_idx;
> vq->vq_nentries = vq_size;
> - if (vtpci_packed_queue(hw))
> + if (vtpci_packed_queue(hw)) {
> vq->vq_ring.avail_wrap_counter = 1;
> + vq->vq_ring.used_wrap_counter = 1;
Why just use used_wrap_counter in receive path?
> + }
>
> /*
> * Reserve a memzone for vring elements
> @@ -1326,7 +1328,13 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
> {
> struct virtio_hw *hw = eth_dev->data->dev_private;
>
> - if (hw->use_simple_rx) {
> + if (vtpci_packed_queue(hw)) {
> + if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
> + eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
> + } else {
> + eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
The indent is wrong.
> + }
> + } else if (hw->use_simple_rx) {
Please make the structure more clear, e.g.
if (vtpci_packed_queue(hw)) {
// packed ring
} else {
// split ring
}
> PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
> eth_dev->data->port_id);
> eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
> @@ -1490,7 +1498,8 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
>
> /* Setting up rx_header size for the device */
> if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
> - vtpci_with_feature(hw, VIRTIO_F_VERSION_1))
> + vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
> + vtpci_with_feature(hw, VIRTIO_F_RING_PACKED))
> hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
> else
> hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
> @@ -1918,7 +1927,8 @@ virtio_dev_configure(struct rte_eth_dev *dev)
>
> rte_spinlock_init(&hw->state_lock);
>
> - hw->use_simple_rx = 1;
> + if (!vtpci_packed_queue(hw))
> + hw->use_simple_rx = 1;
>
> if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
> hw->use_inorder_tx = 1;
> diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
> index 05d355180..6c9247639 100644
> --- a/drivers/net/virtio/virtio_ethdev.h
> +++ b/drivers/net/virtio/virtio_ethdev.h
> @@ -73,6 +73,8 @@ int virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
>
> uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> uint16_t nb_pkts);
> +uint16_t virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
> + uint16_t nb_pkts);
>
> uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> uint16_t nb_pkts);
> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
> index 837457243..42702527c 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -31,6 +31,7 @@
> #include "virtqueue.h"
> #include "virtio_rxtx.h"
> #include "virtio_rxtx_simple.h"
> +#include "virtio_ring.h"
>
> #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
> #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
> @@ -89,7 +90,7 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
> }
>
> void
> -vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t desc_idx, used_idx)
> +vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t desc_idx, uint16_t used_idx)
> {
> struct vring_desc_packed *dp;
> struct vq_desc_extra *dxp = NULL, *dxp_tail = NULL;
> @@ -102,7 +103,6 @@ vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t desc_idx, used_idx)
> if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
> while (dp->flags & VRING_DESC_F_NEXT && i < dxp->ndescs) {
> desc_idx_last = dxp->next;
> - dp = &vq->vq_ring.desc_packed[dxp->next];
Why above line was added before this patch?
> dxp = &vq->vq_descx[dxp->next];
> i++;
> }
> @@ -124,6 +124,47 @@ vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t desc_idx, used_idx)
> dxp->next = VQ_RING_DESC_CHAIN_END;
> }
>
> +static uint16_t
> +virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
> + struct rte_mbuf **rx_pkts,
> + uint32_t *len,
> + uint16_t num)
> +{
> + struct rte_mbuf *cookie;
> + uint16_t used_idx;
> + uint16_t id;
> + struct vring_desc_packed *desc;
> + uint16_t i;
> +
> + for (i = 0; i < num; i++) {
> + used_idx = vq->vq_used_cons_idx;
> + desc = (struct vring_desc_packed *) vq->vq_ring.desc_packed;
> + if (!desc_is_used(&desc[used_idx], &vq->vq_ring))
> + return i;
> + len[i] = desc[used_idx].len;
> + id = desc[used_idx].index;
> + cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
> +
> + if (unlikely(cookie == NULL)) {
> + PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
> + vq->vq_used_cons_idx);
> + break;
> + }
> + rte_prefetch0(cookie);
> + rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
> + rx_pkts[i] = cookie;
> + vq_ring_free_chain_packed(vq, id, used_idx);
> +
> + vq->vq_used_cons_idx += vq->vq_descx[id].ndescs;
> + if (vq->vq_used_cons_idx >= vq->vq_nentries) {
> + vq->vq_used_cons_idx -= vq->vq_nentries;
> + vq->vq_ring.used_wrap_counter ^= 1;
> + }
> + }
> +
> + return i;
> +}
> +
> static uint16_t
> virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
> uint32_t *len, uint16_t num)
> @@ -369,6 +410,55 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
> return 0;
> }
>
> +static inline int
> +virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq, struct rte_mbuf *cookie)
> +{
> + struct vq_desc_extra *dxp;
> + struct virtio_hw *hw = vq->hw;
> + struct vring_desc_packed *start_dp;
> + uint16_t needed = 1;
> + uint16_t head_idx, idx;
> + uint16_t flags;
> +
> + if (unlikely(vq->vq_free_cnt == 0))
> + return -ENOSPC;
> + if (unlikely(vq->vq_free_cnt < needed))
> + return -EMSGSIZE;
> +
> + head_idx = vq->vq_desc_head_idx;
> + if (unlikely(head_idx >= vq->vq_nentries))
> + return -EFAULT;
> +
> + idx = head_idx;
> + dxp = &vq->vq_descx[idx];
> + dxp->cookie = (void *)cookie;
> + dxp->ndescs = needed;
> +
> + start_dp = vq->vq_ring.desc_packed;
> + start_dp[idx].addr =
> + VIRTIO_MBUF_ADDR(cookie, vq) +
> + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
> + start_dp[idx].len =
> + cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
> + flags = VRING_DESC_F_WRITE;
> + flags |= VRING_DESC_F_AVAIL(vq->vq_ring.avail_wrap_counter) |
> + VRING_DESC_F_USED(!vq->vq_ring.avail_wrap_counter);
> + rte_smp_wmb();
> + start_dp[idx].flags = flags;
> + idx = dxp->next;
We should always use the descriptors in packed ring in the
ring order (we have to use the elements in vq_descx[] out
of order if device processes descriptors out of order).
> + vq->vq_desc_head_idx = idx;
> + if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
> + vq->vq_desc_tail_idx = idx;
> + vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
> +
> + vq->vq_avail_idx += needed;
> + if (vq->vq_avail_idx >= vq->vq_nentries) {
> + vq->vq_avail_idx = 0;
> + vq->vq_ring.avail_wrap_counter ^= 1;
> + }
> +
> + return 0;
> +}
[...]
> uint16_t
> virtio_recv_mergeable_pkts_inorder(void *rx_queue,
> struct rte_mbuf **rx_pkts,
> @@ -1385,12 +1582,20 @@ virtio_recv_mergeable_pkts(void *rx_queue,
> uint16_t extra_idx;
> uint32_t seg_res;
> uint32_t hdr_size;
> + uint32_t rx_num = 0;
>
> nb_rx = 0;
> if (unlikely(hw->started == 0))
> return nb_rx;
>
> - nb_used = VIRTQUEUE_NUSED(vq);
> + if (vtpci_packed_queue(vq->hw)) {
> + if (!desc_is_used(&vq->vq_ring.desc_packed[vq->vq_used_cons_idx],
> + &vq->vq_ring))
> + return 0;
> + nb_used = VIRTIO_MBUF_BURST_SZ;
> + } else {
> + nb_used = VIRTQUEUE_NUSED(vq);
> + }
>
> virtio_rmb();
>
> @@ -1403,13 +1608,20 @@ virtio_recv_mergeable_pkts(void *rx_queue,
> seg_res = 0;
> hdr_size = hw->vtnet_hdr_size;
>
> +
No need to add above empty line.
> while (i < nb_used) {
> struct virtio_net_hdr_mrg_rxbuf *header;
>
> if (nb_rx == nb_pkts)
> break;
>
> - num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1);
> + if (vtpci_packed_queue(vq->hw))
> + num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
> + len, 1);
> + else
> + num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1);
> + if (num == 0)
> + return nb_rx;
> if (num != 1)
> continue;
>
[...]
On Thu, Oct 25, 2018 at 05:39:09PM +0800, Tiwei Bie wrote:
>On Wed, Oct 24, 2018 at 04:32:34PM +0200, Jens Freimann wrote:
>> Implement the receive part.
>>
>> Signed-off-by: Jens Freimann <jfreimann@redhat.com>
>> ---
>> drivers/net/virtio/virtio_ethdev.c | 18 +-
>> drivers/net/virtio/virtio_ethdev.h | 2 +
>> drivers/net/virtio/virtio_rxtx.c | 280 ++++++++++++++++++++++++++---
>> drivers/net/virtio/virtqueue.c | 22 +++
>> drivers/net/virtio/virtqueue.h | 2 +-
>> 5 files changed, 297 insertions(+), 27 deletions(-)
>>
>> diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
>> index d2118e6a9..7f81d24aa 100644
>> --- a/drivers/net/virtio/virtio_ethdev.c
>> +++ b/drivers/net/virtio/virtio_ethdev.c
>> @@ -384,8 +384,10 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
>> vq->hw = hw;
>> vq->vq_queue_index = vtpci_queue_idx;
>> vq->vq_nentries = vq_size;
>> - if (vtpci_packed_queue(hw))
>> + if (vtpci_packed_queue(hw)) {
>> vq->vq_ring.avail_wrap_counter = 1;
>> + vq->vq_ring.used_wrap_counter = 1;
>
>Why just use used_wrap_counter in receive path?
You mean add it in a previous patch?
>
>> + }
>>
>> /*
>> * Reserve a memzone for vring elements
>> @@ -1326,7 +1328,13 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
>> {
>> struct virtio_hw *hw = eth_dev->data->dev_private;
>>
>> - if (hw->use_simple_rx) {
>> + if (vtpci_packed_queue(hw)) {
>> + if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
>> + eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
>> + } else {
>> + eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
>
>The indent is wrong.
ok
>
>> + }
>> + } else if (hw->use_simple_rx) {
>
>Please make the structure more clear, e.g.
>
> if (vtpci_packed_queue(hw)) {
> // packed ring
> } else {
> // split ring
> }
ok
>
>> PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
>> eth_dev->data->port_id);
>> eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
>> @@ -1490,7 +1498,8 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
>>
>> /* Setting up rx_header size for the device */
>> if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
>> - vtpci_with_feature(hw, VIRTIO_F_VERSION_1))
>> + vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
>> + vtpci_with_feature(hw, VIRTIO_F_RING_PACKED))
>> hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
>> else
>> hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
>> @@ -1918,7 +1927,8 @@ virtio_dev_configure(struct rte_eth_dev *dev)
>>
>> rte_spinlock_init(&hw->state_lock);
>>
>> - hw->use_simple_rx = 1;
>> + if (!vtpci_packed_queue(hw))
>> + hw->use_simple_rx = 1;
>>
>> if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
>> hw->use_inorder_tx = 1;
>> diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
>> index 05d355180..6c9247639 100644
>> --- a/drivers/net/virtio/virtio_ethdev.h
>> +++ b/drivers/net/virtio/virtio_ethdev.h
>> @@ -73,6 +73,8 @@ int virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
>>
>> uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
>> uint16_t nb_pkts);
>> +uint16_t virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
>> + uint16_t nb_pkts);
>>
>> uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
>> uint16_t nb_pkts);
>> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
>> index 837457243..42702527c 100644
>> --- a/drivers/net/virtio/virtio_rxtx.c
>> +++ b/drivers/net/virtio/virtio_rxtx.c
>> @@ -31,6 +31,7 @@
>> #include "virtqueue.h"
>> #include "virtio_rxtx.h"
>> #include "virtio_rxtx_simple.h"
>> +#include "virtio_ring.h"
>>
>> #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
>> #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
>> @@ -89,7 +90,7 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
>> }
>>
>> void
>> -vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t desc_idx, used_idx)
>> +vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t desc_idx, uint16_t used_idx)
>> {
>> struct vring_desc_packed *dp;
>> struct vq_desc_extra *dxp = NULL, *dxp_tail = NULL;
>> @@ -102,7 +103,6 @@ vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t desc_idx, used_idx)
>> if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
>> while (dp->flags & VRING_DESC_F_NEXT && i < dxp->ndescs) {
>> desc_idx_last = dxp->next;
>> - dp = &vq->vq_ring.desc_packed[dxp->next];
>
>Why above line was added before this patch?
rebase mistake, will fix.
>
>> dxp = &vq->vq_descx[dxp->next];
>> i++;
>> }
>> @@ -124,6 +124,47 @@ vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t desc_idx, used_idx)
>> dxp->next = VQ_RING_DESC_CHAIN_END;
>> }
>>
>> +static uint16_t
>> +virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
>> + struct rte_mbuf **rx_pkts,
>> + uint32_t *len,
>> + uint16_t num)
>> +{
>> + struct rte_mbuf *cookie;
>> + uint16_t used_idx;
>> + uint16_t id;
>> + struct vring_desc_packed *desc;
>> + uint16_t i;
>> +
>> + for (i = 0; i < num; i++) {
>> + used_idx = vq->vq_used_cons_idx;
>> + desc = (struct vring_desc_packed *) vq->vq_ring.desc_packed;
>> + if (!desc_is_used(&desc[used_idx], &vq->vq_ring))
>> + return i;
>> + len[i] = desc[used_idx].len;
>> + id = desc[used_idx].index;
>> + cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
>> +
>> + if (unlikely(cookie == NULL)) {
>> + PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
>> + vq->vq_used_cons_idx);
>> + break;
>> + }
>> + rte_prefetch0(cookie);
>> + rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
>> + rx_pkts[i] = cookie;
>> + vq_ring_free_chain_packed(vq, id, used_idx);
>> +
>> + vq->vq_used_cons_idx += vq->vq_descx[id].ndescs;
>> + if (vq->vq_used_cons_idx >= vq->vq_nentries) {
>> + vq->vq_used_cons_idx -= vq->vq_nentries;
>> + vq->vq_ring.used_wrap_counter ^= 1;
>> + }
>> + }
>> +
>> + return i;
>> +}
>> +
>> static uint16_t
>> virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
>> uint32_t *len, uint16_t num)
>> @@ -369,6 +410,55 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
>> return 0;
>> }
>>
>> +static inline int
>> +virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq, struct rte_mbuf *cookie)
>> +{
>> + struct vq_desc_extra *dxp;
>> + struct virtio_hw *hw = vq->hw;
>> + struct vring_desc_packed *start_dp;
>> + uint16_t needed = 1;
>> + uint16_t head_idx, idx;
>> + uint16_t flags;
>> +
>> + if (unlikely(vq->vq_free_cnt == 0))
>> + return -ENOSPC;
>> + if (unlikely(vq->vq_free_cnt < needed))
>> + return -EMSGSIZE;
>> +
>> + head_idx = vq->vq_desc_head_idx;
>> + if (unlikely(head_idx >= vq->vq_nentries))
>> + return -EFAULT;
>> +
>> + idx = head_idx;
>> + dxp = &vq->vq_descx[idx];
>> + dxp->cookie = (void *)cookie;
>> + dxp->ndescs = needed;
>> +
>> + start_dp = vq->vq_ring.desc_packed;
>> + start_dp[idx].addr =
>> + VIRTIO_MBUF_ADDR(cookie, vq) +
>> + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
>> + start_dp[idx].len =
>> + cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
>> + flags = VRING_DESC_F_WRITE;
>> + flags |= VRING_DESC_F_AVAIL(vq->vq_ring.avail_wrap_counter) |
>> + VRING_DESC_F_USED(!vq->vq_ring.avail_wrap_counter);
>> + rte_smp_wmb();
>> + start_dp[idx].flags = flags;
>> + idx = dxp->next;
>
>We should always use the descriptors in packed ring in the
>ring order (we have to use the elements in vq_descx[] out
>of order if device processes descriptors out of order).
Yes, I think you're right. I'll try to fix it.
>
>> + vq->vq_desc_head_idx = idx;
>> + if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
>> + vq->vq_desc_tail_idx = idx;
>> + vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
>> +
>> + vq->vq_avail_idx += needed;
>> + if (vq->vq_avail_idx >= vq->vq_nentries) {
>> + vq->vq_avail_idx = 0;
>> + vq->vq_ring.avail_wrap_counter ^= 1;
>> + }
>> +
>> + return 0;
>> +}
>[...]
>> uint16_t
>> virtio_recv_mergeable_pkts_inorder(void *rx_queue,
>> struct rte_mbuf **rx_pkts,
>> @@ -1385,12 +1582,20 @@ virtio_recv_mergeable_pkts(void *rx_queue,
>> uint16_t extra_idx;
>> uint32_t seg_res;
>> uint32_t hdr_size;
>> + uint32_t rx_num = 0;
>>
>> nb_rx = 0;
>> if (unlikely(hw->started == 0))
>> return nb_rx;
>>
>> - nb_used = VIRTQUEUE_NUSED(vq);
>> + if (vtpci_packed_queue(vq->hw)) {
>> + if (!desc_is_used(&vq->vq_ring.desc_packed[vq->vq_used_cons_idx],
>> + &vq->vq_ring))
>> + return 0;
>> + nb_used = VIRTIO_MBUF_BURST_SZ;
>> + } else {
>> + nb_used = VIRTQUEUE_NUSED(vq);
>> + }
>>
>> virtio_rmb();
>>
>> @@ -1403,13 +1608,20 @@ virtio_recv_mergeable_pkts(void *rx_queue,
>> seg_res = 0;
>> hdr_size = hw->vtnet_hdr_size;
>>
>> +
>
>No need to add above empty line.
ok
thanks for the review!
regards,
Jens
>
>> while (i < nb_used) {
>> struct virtio_net_hdr_mrg_rxbuf *header;
>>
>> if (nb_rx == nb_pkts)
>> break;
>>
>> - num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1);
>> + if (vtpci_packed_queue(vq->hw))
>> + num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
>> + len, 1);
>> + else
>> + num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1);
>> + if (num == 0)
>> + return nb_rx;
>> if (num != 1)
>> continue;
>>
>[...]
On Thu, Oct 25, 2018 at 03:54:16PM +0200, Jens Freimann wrote:
> On Thu, Oct 25, 2018 at 05:39:09PM +0800, Tiwei Bie wrote:
> > On Wed, Oct 24, 2018 at 04:32:34PM +0200, Jens Freimann wrote:
> > > Implement the receive part.
> > >
> > > Signed-off-by: Jens Freimann <jfreimann@redhat.com>
> > > ---
> > > drivers/net/virtio/virtio_ethdev.c | 18 +-
> > > drivers/net/virtio/virtio_ethdev.h | 2 +
> > > drivers/net/virtio/virtio_rxtx.c | 280 ++++++++++++++++++++++++++---
> > > drivers/net/virtio/virtqueue.c | 22 +++
> > > drivers/net/virtio/virtqueue.h | 2 +-
> > > 5 files changed, 297 insertions(+), 27 deletions(-)
> > >
> > > diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
> > > index d2118e6a9..7f81d24aa 100644
> > > --- a/drivers/net/virtio/virtio_ethdev.c
> > > +++ b/drivers/net/virtio/virtio_ethdev.c
> > > @@ -384,8 +384,10 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
> > > vq->hw = hw;
> > > vq->vq_queue_index = vtpci_queue_idx;
> > > vq->vq_nentries = vq_size;
> > > - if (vtpci_packed_queue(hw))
> > > + if (vtpci_packed_queue(hw)) {
> > > vq->vq_ring.avail_wrap_counter = 1;
> > > + vq->vq_ring.used_wrap_counter = 1;
> >
> > Why just use used_wrap_counter in receive path?
>
> You mean add it in a previous patch?
>
Hmm, the used_wrap_counter is already used in the previous
patch, but it wasn't initialized until the receive path
is added. We should have a patch to introduce these packed
ring related generic fields (e.g. the wrap counters and
vring) in struct virtqueue and also do the corresponding
initializations in that patch too (maybe we can do this
in the current 1/8 patch).
On Fri, Oct 26, 2018 at 01:43:45PM +0800, Tiwei Bie wrote:
>On Thu, Oct 25, 2018 at 03:54:16PM +0200, Jens Freimann wrote:
>> On Thu, Oct 25, 2018 at 05:39:09PM +0800, Tiwei Bie wrote:
>> > On Wed, Oct 24, 2018 at 04:32:34PM +0200, Jens Freimann wrote:
>> > > Implement the receive part.
>> > >
>> > > Signed-off-by: Jens Freimann <jfreimann@redhat.com>
>> > > ---
>> > > drivers/net/virtio/virtio_ethdev.c | 18 +-
>> > > drivers/net/virtio/virtio_ethdev.h | 2 +
>> > > drivers/net/virtio/virtio_rxtx.c | 280 ++++++++++++++++++++++++++---
>> > > drivers/net/virtio/virtqueue.c | 22 +++
>> > > drivers/net/virtio/virtqueue.h | 2 +-
>> > > 5 files changed, 297 insertions(+), 27 deletions(-)
>> > >
>> > > diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
>> > > index d2118e6a9..7f81d24aa 100644
>> > > --- a/drivers/net/virtio/virtio_ethdev.c
>> > > +++ b/drivers/net/virtio/virtio_ethdev.c
>> > > @@ -384,8 +384,10 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
>> > > vq->hw = hw;
>> > > vq->vq_queue_index = vtpci_queue_idx;
>> > > vq->vq_nentries = vq_size;
>> > > - if (vtpci_packed_queue(hw))
>> > > + if (vtpci_packed_queue(hw)) {
>> > > vq->vq_ring.avail_wrap_counter = 1;
>> > > + vq->vq_ring.used_wrap_counter = 1;
>> >
>> > Why just use used_wrap_counter in receive path?
>>
>> You mean add it in a previous patch?
>>
>
>Hmm, the used_wrap_counter is already used in the previous
>patch, but it wasn't initialized until the receive path
>is added. We should have a patch to introduce these packed
>ring related generic fields (e.g. the wrap counters and
>vring) in struct virtqueue and also do the corresponding
>initializations in that patch too (maybe we can do this
>in the current 1/8 patch).
sounds good, will do it like this.
regards,
Jens
@@ -384,8 +384,10 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
vq->hw = hw;
vq->vq_queue_index = vtpci_queue_idx;
vq->vq_nentries = vq_size;
- if (vtpci_packed_queue(hw))
+ if (vtpci_packed_queue(hw)) {
vq->vq_ring.avail_wrap_counter = 1;
+ vq->vq_ring.used_wrap_counter = 1;
+ }
/*
* Reserve a memzone for vring elements
@@ -1326,7 +1328,13 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
{
struct virtio_hw *hw = eth_dev->data->dev_private;
- if (hw->use_simple_rx) {
+ if (vtpci_packed_queue(hw)) {
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
+ } else {
+ eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
+ }
+ } else if (hw->use_simple_rx) {
PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
eth_dev->data->port_id);
eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
@@ -1490,7 +1498,8 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
/* Setting up rx_header size for the device */
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
- vtpci_with_feature(hw, VIRTIO_F_VERSION_1))
+ vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
+ vtpci_with_feature(hw, VIRTIO_F_RING_PACKED))
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
else
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
@@ -1918,7 +1927,8 @@ virtio_dev_configure(struct rte_eth_dev *dev)
rte_spinlock_init(&hw->state_lock);
- hw->use_simple_rx = 1;
+ if (!vtpci_packed_queue(hw))
+ hw->use_simple_rx = 1;
if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
hw->use_inorder_tx = 1;
@@ -73,6 +73,8 @@ int virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
@@ -31,6 +31,7 @@
#include "virtqueue.h"
#include "virtio_rxtx.h"
#include "virtio_rxtx_simple.h"
+#include "virtio_ring.h"
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
#define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
@@ -89,7 +90,7 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
}
void
-vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t desc_idx, used_idx)
+vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t desc_idx, uint16_t used_idx)
{
struct vring_desc_packed *dp;
struct vq_desc_extra *dxp = NULL, *dxp_tail = NULL;
@@ -102,7 +103,6 @@ vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t desc_idx, used_idx)
if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
while (dp->flags & VRING_DESC_F_NEXT && i < dxp->ndescs) {
desc_idx_last = dxp->next;
- dp = &vq->vq_ring.desc_packed[dxp->next];
dxp = &vq->vq_descx[dxp->next];
i++;
}
@@ -124,6 +124,47 @@ vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t desc_idx, used_idx)
dxp->next = VQ_RING_DESC_CHAIN_END;
}
+static uint16_t
+virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
+ struct rte_mbuf **rx_pkts,
+ uint32_t *len,
+ uint16_t num)
+{
+ struct rte_mbuf *cookie;
+ uint16_t used_idx;
+ uint16_t id;
+ struct vring_desc_packed *desc;
+ uint16_t i;
+
+ for (i = 0; i < num; i++) {
+ used_idx = vq->vq_used_cons_idx;
+ desc = (struct vring_desc_packed *) vq->vq_ring.desc_packed;
+ if (!desc_is_used(&desc[used_idx], &vq->vq_ring))
+ return i;
+ len[i] = desc[used_idx].len;
+ id = desc[used_idx].index;
+ cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
+
+ if (unlikely(cookie == NULL)) {
+ PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
+ vq->vq_used_cons_idx);
+ break;
+ }
+ rte_prefetch0(cookie);
+ rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
+ rx_pkts[i] = cookie;
+ vq_ring_free_chain_packed(vq, id, used_idx);
+
+ vq->vq_used_cons_idx += vq->vq_descx[id].ndescs;
+ if (vq->vq_used_cons_idx >= vq->vq_nentries) {
+ vq->vq_used_cons_idx -= vq->vq_nentries;
+ vq->vq_ring.used_wrap_counter ^= 1;
+ }
+ }
+
+ return i;
+}
+
static uint16_t
virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
uint32_t *len, uint16_t num)
@@ -369,6 +410,55 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
return 0;
}
+static inline int
+virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq, struct rte_mbuf *cookie)
+{
+ struct vq_desc_extra *dxp;
+ struct virtio_hw *hw = vq->hw;
+ struct vring_desc_packed *start_dp;
+ uint16_t needed = 1;
+ uint16_t head_idx, idx;
+ uint16_t flags;
+
+ if (unlikely(vq->vq_free_cnt == 0))
+ return -ENOSPC;
+ if (unlikely(vq->vq_free_cnt < needed))
+ return -EMSGSIZE;
+
+ head_idx = vq->vq_desc_head_idx;
+ if (unlikely(head_idx >= vq->vq_nentries))
+ return -EFAULT;
+
+ idx = head_idx;
+ dxp = &vq->vq_descx[idx];
+ dxp->cookie = (void *)cookie;
+ dxp->ndescs = needed;
+
+ start_dp = vq->vq_ring.desc_packed;
+ start_dp[idx].addr =
+ VIRTIO_MBUF_ADDR(cookie, vq) +
+ RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
+ start_dp[idx].len =
+ cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
+ flags = VRING_DESC_F_WRITE;
+ flags |= VRING_DESC_F_AVAIL(vq->vq_ring.avail_wrap_counter) |
+ VRING_DESC_F_USED(!vq->vq_ring.avail_wrap_counter);
+ rte_smp_wmb();
+ start_dp[idx].flags = flags;
+ idx = dxp->next;
+ vq->vq_desc_head_idx = idx;
+ if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ vq->vq_desc_tail_idx = idx;
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
+
+ vq->vq_avail_idx += needed;
+ if (vq->vq_avail_idx >= vq->vq_nentries) {
+ vq->vq_avail_idx = 0;
+ vq->vq_ring.avail_wrap_counter ^= 1;
+ }
+
+ return 0;
+}
/* When doing TSO, the IP length is not included in the pseudo header
* checksum of the packet given to the PMD, but for virtio it is
* expected.
@@ -847,7 +937,10 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
break;
/* Enqueue allocated buffers */
- error = virtqueue_enqueue_recv_refill(vq, m);
+ if (vtpci_packed_queue(vq->hw))
+ error = virtqueue_enqueue_recv_refill_packed(vq, m);
+ else
+ error = virtqueue_enqueue_recv_refill(vq, m);
if (error) {
rte_pktmbuf_free(m);
break;
@@ -855,7 +948,8 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
nbufs++;
}
- vq_update_avail_idx(vq);
+ if (!vtpci_packed_queue(vq->hw))
+ vq_update_avail_idx(vq);
}
PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
@@ -1179,6 +1273,109 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
return nb_rx;
}
+uint16_t
+virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct virtnet_rx *rxvq = rx_queue;
+ struct virtqueue *vq = rxvq->vq;
+ struct virtio_hw *hw = vq->hw;
+ struct rte_mbuf *rxm, *new_mbuf;
+ uint16_t nb_used, num, nb_rx;
+ uint32_t len[VIRTIO_MBUF_BURST_SZ];
+ struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
+ int error;
+ uint32_t i, nb_enqueued;
+ uint32_t hdr_size;
+ struct virtio_net_hdr *hdr;
+
+ nb_rx = 0;
+ if (unlikely(hw->started == 0))
+ return nb_rx;
+
+ nb_used = VIRTIO_MBUF_BURST_SZ;
+
+ virtio_rmb();
+
+ num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
+ if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
+ num = VIRTIO_MBUF_BURST_SZ;
+ if (likely(num > DESC_PER_CACHELINE))
+ num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
+
+ num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
+ PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
+
+ nb_enqueued = 0;
+ hdr_size = hw->vtnet_hdr_size;
+
+ for (i = 0; i < num ; i++) {
+ rxm = rcv_pkts[i];
+
+ PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
+
+ if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ PMD_RX_LOG(ERR, "Packet drop");
+ nb_enqueued++;
+ virtio_discard_rxbuf(vq, rxm);
+ rxvq->stats.errors++;
+ continue;
+ }
+
+ rxm->port = rxvq->port_id;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rxm->ol_flags = 0;
+ rxm->vlan_tci = 0;
+
+ rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
+ rxm->data_len = (uint16_t)(len[i] - hdr_size);
+
+ hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
+ RTE_PKTMBUF_HEADROOM - hdr_size);
+
+ if (hw->vlan_strip)
+ rte_vlan_strip(rxm);
+
+ if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
+ virtio_discard_rxbuf(vq, rxm);
+ rxvq->stats.errors++;
+ continue;
+ }
+
+ virtio_rx_stats_updated(rxvq, rxm);
+
+ rx_pkts[nb_rx++] = rxm;
+ }
+
+ rxvq->stats.packets += nb_rx;
+
+ /* Allocate new mbuf for the used descriptor */
+ while (likely(!virtqueue_full(vq))) {
+ new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
+ if (unlikely(new_mbuf == NULL)) {
+ struct rte_eth_dev *dev
+ = &rte_eth_devices[rxvq->port_id];
+ dev->data->rx_mbuf_alloc_failed++;
+ break;
+ }
+ error = virtqueue_enqueue_recv_refill_packed(vq, new_mbuf);
+ if (unlikely(error)) {
+ rte_pktmbuf_free(new_mbuf);
+ break;
+ }
+ nb_enqueued++;
+ }
+
+ if (likely(nb_enqueued)) {
+ if (unlikely(virtqueue_kick_prepare_packed(vq))) {
+ virtqueue_notify(vq);
+ PMD_RX_LOG(DEBUG, "Notified");
+ }
+ }
+
+ return nb_rx;
+}
+
+
uint16_t
virtio_recv_mergeable_pkts_inorder(void *rx_queue,
struct rte_mbuf **rx_pkts,
@@ -1385,12 +1582,20 @@ virtio_recv_mergeable_pkts(void *rx_queue,
uint16_t extra_idx;
uint32_t seg_res;
uint32_t hdr_size;
+ uint32_t rx_num = 0;
nb_rx = 0;
if (unlikely(hw->started == 0))
return nb_rx;
- nb_used = VIRTQUEUE_NUSED(vq);
+ if (vtpci_packed_queue(vq->hw)) {
+ if (!desc_is_used(&vq->vq_ring.desc_packed[vq->vq_used_cons_idx],
+ &vq->vq_ring))
+ return 0;
+ nb_used = VIRTIO_MBUF_BURST_SZ;
+ } else {
+ nb_used = VIRTQUEUE_NUSED(vq);
+ }
virtio_rmb();
@@ -1403,13 +1608,20 @@ virtio_recv_mergeable_pkts(void *rx_queue,
seg_res = 0;
hdr_size = hw->vtnet_hdr_size;
+
while (i < nb_used) {
struct virtio_net_hdr_mrg_rxbuf *header;
if (nb_rx == nb_pkts)
break;
- num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1);
+ if (vtpci_packed_queue(vq->hw))
+ num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
+ len, 1);
+ else
+ num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1);
+ if (num == 0)
+ return nb_rx;
if (num != 1)
continue;
@@ -1461,20 +1673,37 @@ virtio_recv_mergeable_pkts(void *rx_queue,
*/
uint16_t rcv_cnt =
RTE_MIN(seg_res, RTE_DIM(rcv_pkts));
- if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
- uint32_t rx_num =
- virtqueue_dequeue_burst_rx(vq,
- rcv_pkts, len, rcv_cnt);
- i += rx_num;
- rcv_cnt = rx_num;
+ if (vtpci_packed_queue(vq->hw)) {
+ if (likely(vq->vq_free_cnt >= rcv_cnt)) {
+ if (!desc_is_used(&vq->vq_ring.desc_packed[vq->vq_used_cons_idx],
+ &vq->vq_ring))
+ rx_num = 0;
+ else
+ rx_num = virtqueue_dequeue_burst_rx_packed(vq,
+ rcv_pkts, len, rcv_cnt);
+ } else {
+ PMD_RX_LOG(ERR,
+ "No enough segments for packet.");
+ nb_enqueued++;
+ virtio_discard_rxbuf(vq, rxm);
+ rxvq->stats.errors++;
+ break;
+ }
} else {
- PMD_RX_LOG(ERR,
- "No enough segments for packet.");
- nb_enqueued++;
- virtio_discard_rxbuf(vq, rxm);
- rxvq->stats.errors++;
- break;
+ if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
+ rx_num = virtqueue_dequeue_burst_rx(vq,
+ rcv_pkts, len, rcv_cnt);
+ } else {
+ PMD_RX_LOG(ERR,
+ "No enough segments for packet.");
+ nb_enqueued++;
+ virtio_discard_rxbuf(vq, rxm);
+ rxvq->stats.errors++;
+ break;
+ }
}
+ i += rx_num;
+ rcv_cnt = rx_num;
extra_idx = 0;
@@ -1517,7 +1746,10 @@ virtio_recv_mergeable_pkts(void *rx_queue,
dev->data->rx_mbuf_alloc_failed++;
break;
}
- error = virtqueue_enqueue_recv_refill(vq, new_mbuf);
+ if (vtpci_packed_queue(vq->hw))
+ error = virtqueue_enqueue_recv_refill_packed(vq, new_mbuf);
+ else
+ error = virtqueue_enqueue_recv_refill(vq, new_mbuf);
if (unlikely(error)) {
rte_pktmbuf_free(new_mbuf);
break;
@@ -1526,9 +1758,13 @@ virtio_recv_mergeable_pkts(void *rx_queue,
}
if (likely(nb_enqueued)) {
- vq_update_avail_idx(vq);
-
- if (unlikely(virtqueue_kick_prepare(vq))) {
+ if (likely(!vtpci_packed_queue(vq->hw))) {
+ vq_update_avail_idx(vq);
+ if (unlikely(virtqueue_kick_prepare(vq))) {
+ virtqueue_notify(vq);
+ PMD_RX_LOG(DEBUG, "Notified");
+ }
+ } else if (virtqueue_kick_prepare_packed(vq)) {
virtqueue_notify(vq);
PMD_RX_LOG(DEBUG, "Notified");
}
@@ -58,12 +58,34 @@ virtqueue_detach_unused(struct virtqueue *vq)
void
virtqueue_rxvq_flush(struct virtqueue *vq)
{
+ struct vring_desc_packed *descs = vq->vq_ring.desc_packed;
struct virtnet_rx *rxq = &vq->rxq;
struct virtio_hw *hw = vq->hw;
struct vring_used_elem *uep;
struct vq_desc_extra *dxp;
uint16_t used_idx, desc_idx;
uint16_t nb_used, i;
+ uint16_t size = vq->vq_nentries;
+
+ if (vtpci_packed_queue(vq->hw)) {
+ i = vq->vq_used_cons_idx;
+ if (i > size) {
+ PMD_INIT_LOG(ERR, "vq_used_cons_idx out of range, %d", vq->vq_used_cons_idx);
+ return;
+ }
+ while (desc_is_used(&descs[i], &vq->vq_ring) &&
+ i < size) {
+ dxp = &vq->vq_descx[descs[i].index];
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ vq_ring_free_chain_packed(vq, descs[i].index, i);
+ i = dxp->next;
+ vq->vq_used_cons_idx++;
+ }
+ return;
+ }
nb_used = VIRTQUEUE_NUSED(vq);
@@ -323,7 +323,7 @@ virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
-void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t desc_idx);
+void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t desc_idx, uint16_t used_idx);
void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
uint16_t num);