vhost: fix packed ring dequeue offloading
Checks
Commit Message
When vhost doing dequeue offloading, will parse ethernet and l3/l4
header of the packet. Then vhost will set corresponded value in mbuf
attributes. Thus mean offloading action should after packet data copy.
Fixes: 75ed51697820 ("vhost: add packed ring batch dequeue")
Cc: stable@dpdk.org
Signed-off-by: Marvin Liu <yong.liu@intel.com>
Comments
On 2/5/21 8:47 AM, Marvin Liu wrote:
> When vhost doing dequeue offloading, will parse ethernet and l3/l4
> header of the packet. Then vhost will set corresponded value in mbuf
corresponding*
> attributes. Thus mean offloading action should after packet data copy.
It means*
>
> Fixes: 75ed51697820 ("vhost: add packed ring batch dequeue")
> Cc: stable@dpdk.org
>
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
>
Please check your git patch formatting config, diff stats are missing.
> diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
> index 730b92e478..0a7d008a91 100644
> --- a/lib/librte_vhost/virtio_net.c
> +++ b/lib/librte_vhost/virtio_net.c
> @@ -2267,7 +2267,6 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
> {
> bool wrap = vq->avail_wrap_counter;
> struct vring_packed_desc *descs = vq->desc_packed;
> - struct virtio_net_hdr *hdr;
> uint64_t lens[PACKED_BATCH_SIZE];
> uint64_t buf_lens[PACKED_BATCH_SIZE];
> uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
> @@ -2324,13 +2323,6 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
> ids[i] = descs[avail_idx + i].id;
> }
>
> - if (virtio_net_with_host_offload(dev)) {
> - vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
> - hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
> - vhost_dequeue_offload(hdr, pkts[i]);
> - }
> - }
> -
> return 0;
>
> free_buf:
> @@ -2348,6 +2340,7 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev,
> {
> uint16_t avail_idx = vq->last_avail_idx;
> uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
> + struct virtio_net_hdr *hdr;
> uintptr_t desc_addrs[PACKED_BATCH_SIZE];
> uint16_t ids[PACKED_BATCH_SIZE];
> uint16_t i;
> @@ -2364,6 +2357,13 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev,
> (void *)(uintptr_t)(desc_addrs[i] + buf_offset),
> pkts[i]->pkt_len);
>
> + if (virtio_net_with_host_offload(dev)) {
> + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
> + hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
> + vhost_dequeue_offload(hdr, pkts[i]);
> + }
> + }
> +
> if (virtio_net_is_inorder(dev))
> vhost_shadow_dequeue_batch_packed_inorder(vq,
> ids[PACKED_BATCH_SIZE - 1]);
>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
I'll fixup the commit message while applying.
Thanks,
Maxime
09/02/2021 15:52, Maxime Coquelin:
>
> On 2/5/21 8:47 AM, Marvin Liu wrote:
> > When vhost doing dequeue offloading, will parse ethernet and l3/l4
> > header of the packet. Then vhost will set corresponded value in mbuf
>
> corresponding*
>
> > attributes. Thus mean offloading action should after packet data copy.
>
> It means*
>
> >
> > Fixes: 75ed51697820 ("vhost: add packed ring batch dequeue")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Marvin Liu <yong.liu@intel.com>
>
> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
>
> I'll fixup the commit message while applying.
Fixed english wording and applied, thanks.
@@ -2267,7 +2267,6 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
{
bool wrap = vq->avail_wrap_counter;
struct vring_packed_desc *descs = vq->desc_packed;
- struct virtio_net_hdr *hdr;
uint64_t lens[PACKED_BATCH_SIZE];
uint64_t buf_lens[PACKED_BATCH_SIZE];
uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
@@ -2324,13 +2323,6 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
ids[i] = descs[avail_idx + i].id;
}
- if (virtio_net_with_host_offload(dev)) {
- vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
- hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
- vhost_dequeue_offload(hdr, pkts[i]);
- }
- }
-
return 0;
free_buf:
@@ -2348,6 +2340,7 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev,
{
uint16_t avail_idx = vq->last_avail_idx;
uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ struct virtio_net_hdr *hdr;
uintptr_t desc_addrs[PACKED_BATCH_SIZE];
uint16_t ids[PACKED_BATCH_SIZE];
uint16_t i;
@@ -2364,6 +2357,13 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev,
(void *)(uintptr_t)(desc_addrs[i] + buf_offset),
pkts[i]->pkt_len);
+ if (virtio_net_with_host_offload(dev)) {
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
+ vhost_dequeue_offload(hdr, pkts[i]);
+ }
+ }
+
if (virtio_net_is_inorder(dev))
vhost_shadow_dequeue_batch_packed_inorder(vq,
ids[PACKED_BATCH_SIZE - 1]);