From patchwork Thu Apr 18 08:20:21 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: David Marchand X-Patchwork-Id: 139487 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 61DE843E9E; Thu, 18 Apr 2024 10:21:26 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id AAACA40ECF; Thu, 18 Apr 2024 10:21:03 +0200 (CEST) Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.129.124]) by mails.dpdk.org (Postfix) with ESMTP id CA20B40ECF for ; Thu, 18 Apr 2024 10:21:00 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1713428460; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=IyzzhgjfPik4Bwo/KDaW56NPfzd2PTxAr+DHPeCNhXA=; b=NiS3pY+YzagPY0af+VY2IyAoZxe0UtDHQBP+GtRwaRqGbal1pV0KKHcMZr17H9kdhDtS97 CZDGddiTKpvJqgc99eik3LtgM35gKSaQO7GiZduT9A1FtueUQsckhRUm/Oizsn5NRlblAj EywgDIHxoRoKvSI5tjqJqhPPlRIp52c= Received: from mimecast-mx02.redhat.com (mimecast-mx02.redhat.com [66.187.233.88]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.3, cipher=TLS_AES_256_GCM_SHA384) id us-mta-45-X15BMsHgM9GqP9jj4uGHgw-1; Thu, 18 Apr 2024 04:20:57 -0400 X-MC-Unique: X15BMsHgM9GqP9jj4uGHgw-1 Received: from smtp.corp.redhat.com (int-mx02.intmail.prod.int.rdu2.redhat.com [10.11.54.2]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits) server-digest SHA256) (No client certificate requested) by mimecast-mx02.redhat.com (Postfix) with ESMTPS id 8319918065B3; Thu, 18 Apr 2024 08:20:56 +0000 (UTC) Received: from dmarchan.redhat.com (unknown [10.45.225.69]) by smtp.corp.redhat.com (Postfix) with ESMTP id 221FC40357A7; Thu, 18 Apr 2024 08:20:54 +0000 (UTC) From: David Marchand To: dev@dpdk.org Cc: thomas@monjalon.net, ferruh.yigit@amd.com, Aman Singh , Yuying Zhang , Jie Hai , Yisen Zhuang Subject: [PATCH v3 7/7] net: clear outer UDP checksum in Intel prepare helper Date: Thu, 18 Apr 2024 10:20:21 +0200 Message-ID: <20240418082023.1767998-8-david.marchand@redhat.com> In-Reply-To: <20240418082023.1767998-1-david.marchand@redhat.com> References: <20240405125039.897933-1-david.marchand@redhat.com> <20240418082023.1767998-1-david.marchand@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 3.4.1 on 10.11.54.2 X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org If requesting an inner (L3/L4 checksum or L4 segmentation) offload, when the hardware does not support recomputing outer UDP checksum, automatically disable it in the common helper. Signed-off-by: David Marchand --- Changes since v2: - fixed GRE tunneling, - dropped documentation update, --- app/test-pmd/csumonly.c | 10 ++------ drivers/net/hns3/hns3_rxtx.c | 44 ------------------------------------ lib/net/rte_net.h | 22 +++++++++++++----- 3 files changed, 18 insertions(+), 58 deletions(-) diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c index 71add6ca47..2246c22e8e 100644 --- a/app/test-pmd/csumonly.c +++ b/app/test-pmd/csumonly.c @@ -612,19 +612,13 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info, return ol_flags; } - /* outer UDP checksum is done in software. In the other side, for - * UDP tunneling, like VXLAN or Geneve, outer UDP checksum can be - * set to zero. + /* Outer UDP checksum is done in software. * * If a packet will be TSOed into small packets by NIC, we cannot * set/calculate a non-zero checksum, because it will be a wrong * value after the packet be split into several small packets. */ - if (tso_enabled) - udp_hdr->dgram_cksum = 0; - - /* do not recalculate udp cksum if it was 0 */ - if (udp_hdr->dgram_cksum != 0) { + if (!tso_enabled && udp_hdr->dgram_cksum != 0) { udp_hdr->dgram_cksum = 0; udp_hdr->dgram_cksum = get_udptcp_checksum(m, outer_l3_hdr, info->outer_l2_len + info->outer_l3_len, diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c index 03fc919fd7..b5436c51e7 100644 --- a/drivers/net/hns3/hns3_rxtx.c +++ b/drivers/net/hns3/hns3_rxtx.c @@ -3616,47 +3616,6 @@ hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num, return false; } -static void -hns3_outer_header_cksum_prepare(struct rte_mbuf *m) -{ - uint64_t ol_flags = m->ol_flags; - uint32_t paylen, hdr_len, l4_proto; - struct rte_udp_hdr *udp_hdr; - - if (!(ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6)) && - ((ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) || - !(ol_flags & RTE_MBUF_F_TX_TCP_SEG))) - return; - - if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) { - struct rte_ipv4_hdr *ipv4_hdr; - - ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, - m->outer_l2_len); - l4_proto = ipv4_hdr->next_proto_id; - } else { - struct rte_ipv6_hdr *ipv6_hdr; - - ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, - m->outer_l2_len); - l4_proto = ipv6_hdr->proto; - } - - if (l4_proto != IPPROTO_UDP) - return; - - /* driver should ensure the outer udp cksum is 0 for TUNNEL TSO */ - hdr_len = m->l2_len + m->l3_len + m->l4_len; - hdr_len += m->outer_l2_len + m->outer_l3_len; - paylen = m->pkt_len - hdr_len; - if (paylen <= m->tso_segsz) - return; - udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, - m->outer_l2_len + - m->outer_l3_len); - udp_hdr->dgram_cksum = 0; -} - static int hns3_check_tso_pkt_valid(struct rte_mbuf *m) { @@ -3834,7 +3793,6 @@ hns3_prep_pkt_proc(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m) * checksum of packets that need TSO, so network driver * software not need to recalculate it. */ - hns3_outer_header_cksum_prepare(m); return 0; } } @@ -3848,8 +3806,6 @@ hns3_prep_pkt_proc(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m) if (!hns3_validate_tunnel_cksum(tx_queue, m)) return 0; - hns3_outer_header_cksum_prepare(m); - return 0; } diff --git a/lib/net/rte_net.h b/lib/net/rte_net.h index efd9d5f5ee..cdc6cf956d 100644 --- a/lib/net/rte_net.h +++ b/lib/net/rte_net.h @@ -108,6 +108,10 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m, static inline int rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags) { + const uint64_t inner_requests = RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK | + RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG; + const uint64_t outer_requests = RTE_MBUF_F_TX_OUTER_IP_CKSUM | + RTE_MBUF_F_TX_OUTER_UDP_CKSUM; /* Initialise ipv4_hdr to avoid false positive compiler warnings. */ struct rte_ipv4_hdr *ipv4_hdr = NULL; struct rte_ipv6_hdr *ipv6_hdr; @@ -120,9 +124,7 @@ rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags) * Mainly it is required to avoid fragmented headers check if * no offloads are requested. */ - if (!(ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK | RTE_MBUF_F_TX_TCP_SEG | - RTE_MBUF_F_TX_UDP_SEG | RTE_MBUF_F_TX_OUTER_IP_CKSUM | - RTE_MBUF_F_TX_OUTER_UDP_CKSUM))) + if (!(ol_flags & (inner_requests | outer_requests))) return 0; if (ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6)) { @@ -136,19 +138,27 @@ rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags) struct rte_ipv4_hdr *, m->outer_l2_len); ipv4_hdr->hdr_checksum = 0; } - if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) { + if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM || ol_flags & inner_requests) { if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) { ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, m->outer_l2_len); udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr + m->outer_l3_len); - udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr, m->ol_flags); + if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) + udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr, + m->ol_flags); + else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) + udp_hdr->dgram_cksum = 0; } else { ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, m->outer_l2_len); udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, m->outer_l2_len + m->outer_l3_len); - udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr, m->ol_flags); + if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) + udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr, + m->ol_flags); + else if (ipv6_hdr->proto == IPPROTO_UDP) + udp_hdr->dgram_cksum = 0; } } }