[4/5] net/virtio: refactor Tx offload helper
Checks
Commit Message
Purely cosmetic but it is rather odd to have an "offload" helper that
checks if it actually must do something.
We already have the same checks in most callers, so move this branch
in them.
Signed-off-by: David Marchand <david.marchand@redhat.com>
---
drivers/net/virtio/virtio_rxtx.c | 7 +-
drivers/net/virtio/virtio_rxtx_packed_avx.h | 2 +-
drivers/net/virtio/virtio_rxtx_packed_neon.h | 2 +-
drivers/net/virtio/virtqueue.h | 83 +++++++++-----------
4 files changed, 44 insertions(+), 50 deletions(-)
Comments
On Thu, Apr 01, 2021 at 11:52:42AM +0200, David Marchand wrote:
> Purely cosmetic but it is rather odd to have an "offload" helper that
> checks if it actually must do something.
> We already have the same checks in most callers, so move this branch
> in them.
>
> Signed-off-by: David Marchand <david.marchand@redhat.com>
> ---
Reviewed-by: Flavio Leitner <fbl@sysclose.org>
> -----Original Message-----
> From: David Marchand <david.marchand@redhat.com>
> Sent: Thursday, April 1, 2021 5:53 PM
> To: dev@dpdk.org
> Cc: maxime.coquelin@redhat.com; olivier.matz@6wind.com;
> fbl@sysclose.org; i.maximets@ovn.org; Chenbo Xia <chenbo.xia@intel.com>;
> Bruce Richardson <bruce.richardson@intel.com>; Konstantin Ananyev
> <konstantin.ananyev@intel.com>; jerinj@marvell.com; Ruifeng Wang
> <Ruifeng.Wang@arm.com>
> Subject: [PATCH 4/5] net/virtio: refactor Tx offload helper
>
> Purely cosmetic but it is rather odd to have an "offload" helper that checks if
> it actually must do something.
> We already have the same checks in most callers, so move this branch in
> them.
>
> Signed-off-by: David Marchand <david.marchand@redhat.com>
> ---
> drivers/net/virtio/virtio_rxtx.c | 7 +-
> drivers/net/virtio/virtio_rxtx_packed_avx.h | 2 +-
> drivers/net/virtio/virtio_rxtx_packed_neon.h | 2 +-
> drivers/net/virtio/virtqueue.h | 83 +++++++++-----------
> 4 files changed, 44 insertions(+), 50 deletions(-)
>
> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
> index 40283001b0..a4e37ef379 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -448,7 +448,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx
> *txvq,
> if (!vq->hw->has_tx_offload)
> virtqueue_clear_net_hdr(hdr);
> else
> - virtqueue_xmit_offload(hdr, cookies[i], true);
> + virtqueue_xmit_offload(hdr, cookies[i]);
>
> start_dp[idx].addr = rte_mbuf_data_iova(cookies[i]) -
> head_size;
> start_dp[idx].len = cookies[i]->data_len + head_size;
> @@ -495,7 +495,7 @@ virtqueue_enqueue_xmit_packed_fast(struct
> virtnet_tx *txvq,
> if (!vq->hw->has_tx_offload)
> virtqueue_clear_net_hdr(hdr);
> else
> - virtqueue_xmit_offload(hdr, cookie, true);
> + virtqueue_xmit_offload(hdr, cookie);
>
> dp->addr = rte_mbuf_data_iova(cookie) - head_size;
> dp->len = cookie->data_len + head_size; @@ -581,7 +581,8 @@
> virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
> idx = start_dp[idx].next;
> }
>
> - virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
> + if (vq->hw->has_tx_offload)
> + virtqueue_xmit_offload(hdr, cookie);
>
> do {
> start_dp[idx].addr = rte_mbuf_data_iova(cookie); diff --git
> a/drivers/net/virtio/virtio_rxtx_packed_avx.h
> b/drivers/net/virtio/virtio_rxtx_packed_avx.h
> index 49e845d02a..33cac3244f 100644
> --- a/drivers/net/virtio/virtio_rxtx_packed_avx.h
> +++ b/drivers/net/virtio/virtio_rxtx_packed_avx.h
> @@ -115,7 +115,7 @@ virtqueue_enqueue_batch_packed_vec(struct
> virtnet_tx *txvq,
> virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
> hdr = rte_pktmbuf_mtod_offset(tx_pkts[i],
> struct virtio_net_hdr *, -head_size);
> - virtqueue_xmit_offload(hdr, tx_pkts[i], true);
> + virtqueue_xmit_offload(hdr, tx_pkts[i]);
> }
> }
>
> diff --git a/drivers/net/virtio/virtio_rxtx_packed_neon.h
> b/drivers/net/virtio/virtio_rxtx_packed_neon.h
> index 851c81f312..1a49caf8af 100644
> --- a/drivers/net/virtio/virtio_rxtx_packed_neon.h
> +++ b/drivers/net/virtio/virtio_rxtx_packed_neon.h
> @@ -134,7 +134,7 @@ virtqueue_enqueue_batch_packed_vec(struct
> virtnet_tx *txvq,
> virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
> hdr = rte_pktmbuf_mtod_offset(tx_pkts[i],
> struct virtio_net_hdr *, -head_size);
> - virtqueue_xmit_offload(hdr, tx_pkts[i], true);
> + virtqueue_xmit_offload(hdr, tx_pkts[i]);
> }
> }
>
> diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
> index 2e8826bc28..41a9b82a5f 100644
> --- a/drivers/net/virtio/virtqueue.h
> +++ b/drivers/net/virtio/virtqueue.h
> @@ -613,52 +613,44 @@ virtqueue_notify(struct virtqueue *vq) } while (0)
>
> static inline void
> -virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
> - struct rte_mbuf *cookie,
> - uint8_t offload)
> +virtqueue_xmit_offload(struct virtio_net_hdr *hdr, struct rte_mbuf
> +*cookie)
> {
> - if (offload) {
> - uint64_t csum_l4 = cookie->ol_flags & PKT_TX_L4_MASK;
> -
> - if (cookie->ol_flags & PKT_TX_TCP_SEG)
> - csum_l4 |= PKT_TX_TCP_CKSUM;
> -
> - switch (csum_l4) {
> - case PKT_TX_UDP_CKSUM:
> - hdr->csum_start = cookie->l2_len + cookie->l3_len;
> - hdr->csum_offset = offsetof(struct rte_udp_hdr,
> - dgram_cksum);
> - hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
> - break;
> -
> - case PKT_TX_TCP_CKSUM:
> - hdr->csum_start = cookie->l2_len + cookie->l3_len;
> - hdr->csum_offset = offsetof(struct rte_tcp_hdr,
> cksum);
> - hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
> - break;
> -
> - default:
> - ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
> - ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
> - ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
> - break;
> - }
> + uint64_t csum_l4 = cookie->ol_flags & PKT_TX_L4_MASK;
> +
> + if (cookie->ol_flags & PKT_TX_TCP_SEG)
> + csum_l4 |= PKT_TX_TCP_CKSUM;
> +
> + switch (csum_l4) {
> + case PKT_TX_UDP_CKSUM:
> + hdr->csum_start = cookie->l2_len + cookie->l3_len;
> + hdr->csum_offset = offsetof(struct rte_udp_hdr,
> dgram_cksum);
> + hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
> + break;
> +
> + case PKT_TX_TCP_CKSUM:
> + hdr->csum_start = cookie->l2_len + cookie->l3_len;
> + hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
> + hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
> + break;
> +
> + default:
> + ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
> + ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
> + ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
> + break;
> + }
>
> - /* TCP Segmentation Offload */
> - if (cookie->ol_flags & PKT_TX_TCP_SEG) {
> - hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
> - VIRTIO_NET_HDR_GSO_TCPV6 :
> - VIRTIO_NET_HDR_GSO_TCPV4;
> - hdr->gso_size = cookie->tso_segsz;
> - hdr->hdr_len =
> - cookie->l2_len +
> - cookie->l3_len +
> - cookie->l4_len;
> - } else {
> - ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
> - ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
> - ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
> - }
> + /* TCP Segmentation Offload */
> + if (cookie->ol_flags & PKT_TX_TCP_SEG) {
> + hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
> + VIRTIO_NET_HDR_GSO_TCPV6 :
> + VIRTIO_NET_HDR_GSO_TCPV4;
> + hdr->gso_size = cookie->tso_segsz;
> + hdr->hdr_len = cookie->l2_len + cookie->l3_len + cookie-
> >l4_len;
> + } else {
> + ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
> + ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
> + ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
> }
> }
>
> @@ -737,7 +729,8 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx
> *txvq, struct rte_mbuf *cookie,
> }
> }
>
> - virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
> + if (vq->hw->has_tx_offload)
> + virtqueue_xmit_offload(hdr, cookie);
>
> do {
> uint16_t flags;
> --
> 2.23.0
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
@@ -448,7 +448,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
if (!vq->hw->has_tx_offload)
virtqueue_clear_net_hdr(hdr);
else
- virtqueue_xmit_offload(hdr, cookies[i], true);
+ virtqueue_xmit_offload(hdr, cookies[i]);
start_dp[idx].addr = rte_mbuf_data_iova(cookies[i]) - head_size;
start_dp[idx].len = cookies[i]->data_len + head_size;
@@ -495,7 +495,7 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
if (!vq->hw->has_tx_offload)
virtqueue_clear_net_hdr(hdr);
else
- virtqueue_xmit_offload(hdr, cookie, true);
+ virtqueue_xmit_offload(hdr, cookie);
dp->addr = rte_mbuf_data_iova(cookie) - head_size;
dp->len = cookie->data_len + head_size;
@@ -581,7 +581,8 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
idx = start_dp[idx].next;
}
- virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
+ if (vq->hw->has_tx_offload)
+ virtqueue_xmit_offload(hdr, cookie);
do {
start_dp[idx].addr = rte_mbuf_data_iova(cookie);
@@ -115,7 +115,7 @@ virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq,
virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
hdr = rte_pktmbuf_mtod_offset(tx_pkts[i],
struct virtio_net_hdr *, -head_size);
- virtqueue_xmit_offload(hdr, tx_pkts[i], true);
+ virtqueue_xmit_offload(hdr, tx_pkts[i]);
}
}
@@ -134,7 +134,7 @@ virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq,
virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
hdr = rte_pktmbuf_mtod_offset(tx_pkts[i],
struct virtio_net_hdr *, -head_size);
- virtqueue_xmit_offload(hdr, tx_pkts[i], true);
+ virtqueue_xmit_offload(hdr, tx_pkts[i]);
}
}
@@ -613,52 +613,44 @@ virtqueue_notify(struct virtqueue *vq)
} while (0)
static inline void
-virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
- struct rte_mbuf *cookie,
- uint8_t offload)
+virtqueue_xmit_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *cookie)
{
- if (offload) {
- uint64_t csum_l4 = cookie->ol_flags & PKT_TX_L4_MASK;
-
- if (cookie->ol_flags & PKT_TX_TCP_SEG)
- csum_l4 |= PKT_TX_TCP_CKSUM;
-
- switch (csum_l4) {
- case PKT_TX_UDP_CKSUM:
- hdr->csum_start = cookie->l2_len + cookie->l3_len;
- hdr->csum_offset = offsetof(struct rte_udp_hdr,
- dgram_cksum);
- hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- break;
-
- case PKT_TX_TCP_CKSUM:
- hdr->csum_start = cookie->l2_len + cookie->l3_len;
- hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
- hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- break;
-
- default:
- ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
- ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
- ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
- break;
- }
+ uint64_t csum_l4 = cookie->ol_flags & PKT_TX_L4_MASK;
+
+ if (cookie->ol_flags & PKT_TX_TCP_SEG)
+ csum_l4 |= PKT_TX_TCP_CKSUM;
+
+ switch (csum_l4) {
+ case PKT_TX_UDP_CKSUM:
+ hdr->csum_start = cookie->l2_len + cookie->l3_len;
+ hdr->csum_offset = offsetof(struct rte_udp_hdr, dgram_cksum);
+ hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ break;
+
+ case PKT_TX_TCP_CKSUM:
+ hdr->csum_start = cookie->l2_len + cookie->l3_len;
+ hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
+ hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ break;
+
+ default:
+ ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
+ break;
+ }
- /* TCP Segmentation Offload */
- if (cookie->ol_flags & PKT_TX_TCP_SEG) {
- hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
- VIRTIO_NET_HDR_GSO_TCPV6 :
- VIRTIO_NET_HDR_GSO_TCPV4;
- hdr->gso_size = cookie->tso_segsz;
- hdr->hdr_len =
- cookie->l2_len +
- cookie->l3_len +
- cookie->l4_len;
- } else {
- ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
- ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
- ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
- }
+ /* TCP Segmentation Offload */
+ if (cookie->ol_flags & PKT_TX_TCP_SEG) {
+ hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
+ VIRTIO_NET_HDR_GSO_TCPV6 :
+ VIRTIO_NET_HDR_GSO_TCPV4;
+ hdr->gso_size = cookie->tso_segsz;
+ hdr->hdr_len = cookie->l2_len + cookie->l3_len + cookie->l4_len;
+ } else {
+ ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
}
}
@@ -737,7 +729,8 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
}
}
- virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
+ if (vq->hw->has_tx_offload)
+ virtqueue_xmit_offload(hdr, cookie);
do {
uint16_t flags;