[14/15] net/ena: rework Rx checksum inspection

Message ID 20240702144626.14545-15-shaibran@amazon.com (mailing list archive)
State Accepted, archived
Delegated to: Ferruh Yigit
Headers
Series net/ena: driver release 2.10.0 |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Brandes, Shai July 2, 2024, 2:46 p.m. UTC
From: Shai Brandes <shaibran@amazon.com>

This restructure is a simplification of the
Rx checksum inspection logic in ena_rx_mbuf_prepare.
Its purpose is to improve readability and maintainability
by consolidating conditions.

Signed-off-by: Shai Brandes <shaibran@amazon.com>
---
 doc/guides/rel_notes/release_24_07.rst |  2 +
 drivers/net/ena/ena_ethdev.c           | 66 +++++++++++++++-----------
 2 files changed, 39 insertions(+), 29 deletions(-)
  

Patch

diff --git a/doc/guides/rel_notes/release_24_07.rst b/doc/guides/rel_notes/release_24_07.rst
index ec960d93cc..d2253999fa 100644
--- a/doc/guides/rel_notes/release_24_07.rst
+++ b/doc/guides/rel_notes/release_24_07.rst
@@ -81,6 +81,8 @@  New Features
   * Removed an obsolete workaround for a false L4 bad Rx checksum indication.
   * Fixed an invalid return value check.
   * Fixed Rx chcecksum inspection to check only TCP/UDP packets.
+  * Reworked the Rx checksum inspection routine to improve
+    readability and maintainability.
 
 * **Update Tap PMD driver.**
 
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index a18c94df28..feb229c5ec 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -53,8 +53,6 @@ 
  */
 #define ENA_CLEANUP_BUF_THRESH	256
 
-#define ENA_PTYPE_HAS_HASH	(RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP)
-
 struct ena_stats {
 	char name[ETH_GSTRING_LEN];
 	int stat_offset;
@@ -645,19 +643,14 @@  static inline void ena_trigger_reset(struct ena_adapter *adapter,
 
 static inline void ena_rx_mbuf_prepare(struct ena_ring *rx_ring,
 				       struct rte_mbuf *mbuf,
-				       struct ena_com_rx_ctx *ena_rx_ctx,
-				       bool fill_hash)
+				       struct ena_com_rx_ctx *ena_rx_ctx)
 {
 	struct ena_stats_rx *rx_stats = &rx_ring->rx_stats;
 	uint64_t ol_flags = 0;
 	uint32_t packet_type = 0;
 
-	if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP)
-		packet_type |= RTE_PTYPE_L4_TCP;
-	else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)
-		packet_type |= RTE_PTYPE_L4_UDP;
-
-	if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) {
+	switch (ena_rx_ctx->l3_proto) {
+	case ENA_ETH_IO_L3_PROTO_IPV4:
 		packet_type |= RTE_PTYPE_L3_IPV4;
 		if (unlikely(ena_rx_ctx->l3_csum_err)) {
 			++rx_stats->l3_csum_bad;
@@ -665,27 +658,45 @@  static inline void ena_rx_mbuf_prepare(struct ena_ring *rx_ring,
 		} else {
 			ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
 		}
-	} else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) {
+		break;
+	case ENA_ETH_IO_L3_PROTO_IPV6:
 		packet_type |= RTE_PTYPE_L3_IPV6;
+		break;
+	default:
+		break;
 	}
 
-	if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag ||
-		!(packet_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP))) {
-		ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
-	} else {
-		if (unlikely(ena_rx_ctx->l4_csum_err)) {
-			++rx_stats->l4_csum_bad;
-			ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
+	switch (ena_rx_ctx->l4_proto) {
+	case ENA_ETH_IO_L4_PROTO_TCP:
+		packet_type |= RTE_PTYPE_L4_TCP;
+		break;
+	case ENA_ETH_IO_L4_PROTO_UDP:
+		packet_type |= RTE_PTYPE_L4_UDP;
+		break;
+	default:
+		break;
+	}
+
+	/* L4 csum is relevant only for TCP/UDP packets */
+	if ((packet_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP)) && !ena_rx_ctx->frag) {
+		if (ena_rx_ctx->l4_csum_checked) {
+			if (likely(!ena_rx_ctx->l4_csum_err)) {
+				++rx_stats->l4_csum_good;
+				ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
+			} else {
+				++rx_stats->l4_csum_bad;
+				ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
+			}
 		} else {
-			++rx_stats->l4_csum_good;
-			ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
+			ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
 		}
-	}
 
-	if (fill_hash &&
-	    likely((packet_type & ENA_PTYPE_HAS_HASH) && !ena_rx_ctx->frag)) {
-		ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
-		mbuf->hash.rss = ena_rx_ctx->hash;
+		if (rx_ring->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) {
+			ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
+			mbuf->hash.rss = ena_rx_ctx->hash;
+		}
+	} else {
+		ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
 	}
 
 	mbuf->ol_flags = ol_flags;
@@ -2765,7 +2776,6 @@  static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	uint16_t completed;
 	struct ena_com_rx_ctx ena_rx_ctx;
 	int i, rc = 0;
-	bool fill_hash;
 
 #ifdef RTE_ETHDEV_DEBUG_RX
 	/* Check adapter state */
@@ -2776,8 +2786,6 @@  static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	}
 #endif
 
-	fill_hash = rx_ring->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH;
-
 	descs_in_use = rx_ring->ring_size -
 		ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1;
 	nb_pkts = RTE_MIN(descs_in_use, nb_pkts);
@@ -2823,7 +2831,7 @@  static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		}
 
 		/* fill mbuf attributes if any */
-		ena_rx_mbuf_prepare(rx_ring, mbuf, &ena_rx_ctx, fill_hash);
+		ena_rx_mbuf_prepare(rx_ring, mbuf, &ena_rx_ctx);
 
 		if (unlikely(mbuf->ol_flags &
 				(RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD)))