From patchwork Thu Mar 14 12:18:54 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Rushil Gupta X-Patchwork-Id: 138397 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8F34543CA5; Thu, 14 Mar 2024 13:18:59 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 7E5A94028C; Thu, 14 Mar 2024 13:18:59 +0100 (CET) Received: from mail-yw1-f202.google.com (mail-yw1-f202.google.com [209.85.128.202]) by mails.dpdk.org (Postfix) with ESMTP id E43E340041 for ; Thu, 14 Mar 2024 13:18:57 +0100 (CET) Received: by mail-yw1-f202.google.com with SMTP id 00721157ae682-60a61b31993so16294447b3.1 for ; Thu, 14 Mar 2024 05:18:57 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20230601; t=1710418737; x=1711023537; darn=dpdk.org; h=cc:to:from:subject:message-id:mime-version:date:from:to:cc:subject :date:message-id:reply-to; bh=csRs+72sEegmdoinpFGRSFnfUU+NBvyNSpvyfP6KdlI=; b=uA6XEUVz8MhL7Lu3zGKNvfeGszk+/LfzB4sPR1SUo8akF6SLsmbiDRwnBDoEj3+dUV bOereWOlTnP/eCbkaurSaBks/iJrbU7BtOkdPBn6kQoDcGWbM543PoMThGgpikjATybM HXz3yiBU/lmgrZzWHQmlYz8/xo5mUgXzp8M76MyaYfNfHvmoyDSzMzfJmIaOS2majEGC nGJ1MMC5oPpziHjsq+KwCjG8BmECpqU0t+fm5APaO2gJsOy4RmECNfGEQ5tFzfqSB3ZS B5K8tCdJAf0z2vQQZUVBl46HEzC4UY5qefnD+UN4GovHW4yv5HN3Wv1/urBBFpGyM9FZ mKXg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1710418737; x=1711023537; h=cc:to:from:subject:message-id:mime-version:date:x-gm-message-state :from:to:cc:subject:date:message-id:reply-to; bh=csRs+72sEegmdoinpFGRSFnfUU+NBvyNSpvyfP6KdlI=; b=jQIpBeJnrkZ+7ttzY3TO9dM5iKfgNtFIeXCJ34e/SqwcuM5eU2Ufe9cVtV1hITDUlZ zI4niDH3b2DgDpm/CUWxWajE483s+QlySKtalZQzk2DOmJkH4oUrsH3RHWWOVGb5TBKv sWrY16ZvMnc48idv3OB5HNaHfxFZOgRceB4dVxFUo03Iffi1ZO3ytRQupp+gyk85YeOe Zuo+eTjAM8Y9xok/H5WIAyudDp4BNVb5/8dAyJDFwtaPfv9OYMYdcc8U1hxXeM0fxA7M +eaBrCbudIHU4h2JMOefIv6MKK11PJdiWCaFCjmXbbqhsPpwBDXqI3UU9nIsj5bDR0Sz EBEg== X-Gm-Message-State: AOJu0YzDd6TrcapPYiZoMUsxMwF2mORiNCwtya0xIc2B8k/v82GQgbkY Htd7//K14fD6GclEKyHvl/ahUUw7DQh17oSsawzAljyrJuKtF1mBNBTaTg3BqmQsNE8zWIzVMIW 8EcF7sQ== X-Google-Smtp-Source: AGHT+IE6Re8aJzzHTNMRVuxrZaCNSceGLuDZ6hucT/gY8wx5x5RQCZmpHmy1kugvhgrf0LcnYgdK9cixBYBv X-Received: from wrushilg.c.googlers.com ([fda3:e722:ac3:cc00:20:ed76:c0a8:2168]) (user=rushilg job=sendgmr) by 2002:a0d:e252:0:b0:60c:29b7:41b5 with SMTP id l79-20020a0de252000000b0060c29b741b5mr345686ywe.6.1710418737296; Thu, 14 Mar 2024 05:18:57 -0700 (PDT) Date: Thu, 14 Mar 2024 12:18:54 +0000 Mime-Version: 1.0 X-Mailer: git-send-email 2.44.0.291.gc1ea87d7ee-goog Message-ID: <20240314121854.2551690-1-rushilg@google.com> Subject: [PATCH] net/gve: add IPv4 checksum offloading capability From: Rushil Gupta To: junfeng.guo@intel.com, jeroendb@google.com, joshwash@google.com, ferruh.yigit@amd.com Cc: dev@dpdk.org, Rushil Gupta X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Gvnic's DQO format allows offloading IPv4 checksum. Made changes to Tx and Rx path to translate DPDK flags to descriptor for offloading (and vice-versa). Add ptype adminq support to only add this flags for supported L3/L4 packet-types. Signed-off-by: Rushil Gupta Reviewed-by: Joshua Washington --- drivers/net/gve/gve_ethdev.c | 34 +++++++++++++++++++++++++++++++--- drivers/net/gve/gve_ethdev.h | 5 +++++ drivers/net/gve/gve_rx_dqo.c | 38 ++++++++++++++++++++++++++++++++++++-- drivers/net/gve/gve_tx_dqo.c | 2 +- 4 files changed, 73 insertions(+), 6 deletions(-) diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c index 3b8ec58..475745b 100644 --- a/drivers/net/gve/gve_ethdev.c +++ b/drivers/net/gve/gve_ethdev.c @@ -434,8 +434,14 @@ gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_TSO; - if (priv->queue_format == GVE_DQO_RDA_FORMAT) - dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO; + if (!gve_is_gqi(priv)) { + dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; + dev_info->rx_offload_capa |= + RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_RX_OFFLOAD_UDP_CKSUM | + RTE_ETH_RX_OFFLOAD_TCP_CKSUM | + RTE_ETH_RX_OFFLOAD_TCP_LRO; + } dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_free_thresh = GVE_DEFAULT_RX_FREE_THRESH, @@ -938,6 +944,11 @@ gve_teardown_device_resources(struct gve_priv *priv) if (err) PMD_DRV_LOG(ERR, "Could not deconfigure device resources: err=%d", err); } + + if (!gve_is_gqi(priv)) { + rte_free(priv->ptype_lut_dqo); + priv->ptype_lut_dqo = NULL; + } gve_free_counter_array(priv); gve_free_irq_db(priv); gve_clear_device_resources_ok(priv); @@ -997,8 +1008,25 @@ gve_setup_device_resources(struct gve_priv *priv) PMD_DRV_LOG(ERR, "Could not config device resources: err=%d", err); goto free_irq_dbs; } - return 0; + if (!gve_is_gqi(priv)) { + priv->ptype_lut_dqo = rte_zmalloc("gve_ptype_lut_dqo", + sizeof(struct gve_ptype_lut), 0); + if (priv->ptype_lut_dqo == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc ptype lut."); + err = -ENOMEM; + goto free_irq_dbs; + } + err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo); + if (unlikely(err)) { + PMD_DRV_LOG(ERR, "Failed to get ptype map: err=%d", err); + goto free_ptype_lut; + } + } + return 0; +free_ptype_lut: + rte_free(priv->ptype_lut_dqo); + priv->ptype_lut_dqo = NULL; free_irq_dbs: gve_free_irq_db(priv); free_cnt_array: diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h index d713657..9b19fc5 100644 --- a/drivers/net/gve/gve_ethdev.h +++ b/drivers/net/gve/gve_ethdev.h @@ -36,6 +36,10 @@ RTE_MBUF_F_TX_L4_MASK | \ RTE_MBUF_F_TX_TCP_SEG) +#define GVE_TX_CKSUM_OFFLOAD_MASK_DQO ( \ + GVE_TX_CKSUM_OFFLOAD_MASK | \ + RTE_MBUF_F_TX_IP_CKSUM) + #define GVE_RTE_RSS_OFFLOAD_ALL ( \ RTE_ETH_RSS_IPV4 | \ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \ @@ -295,6 +299,7 @@ struct gve_priv { uint16_t stats_end_idx; /* end index of array of stats written by NIC */ struct gve_rss_config rss_config; + struct gve_ptype_lut *ptype_lut_dqo; }; static inline bool diff --git a/drivers/net/gve/gve_rx_dqo.c b/drivers/net/gve/gve_rx_dqo.c index 7c7a8c4..1c37c54 100644 --- a/drivers/net/gve/gve_rx_dqo.c +++ b/drivers/net/gve/gve_rx_dqo.c @@ -75,6 +75,40 @@ gve_rx_refill_dqo(struct gve_rx_queue *rxq) rxq->bufq_tail = next_avail; } +static inline uint16_t +gve_parse_csum_ol_flags(volatile struct gve_rx_compl_desc_dqo *rx_desc, + struct gve_priv *priv) { + uint64_t ol_flags = 0; + struct gve_ptype ptype = + priv->ptype_lut_dqo->ptypes[rx_desc->packet_type]; + + if (!rx_desc->l3_l4_processed) + return ol_flags; + + if (ptype.l3_type == GVE_L3_TYPE_IPV4) { + if (rx_desc->csum_ip_err) + ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; + else + ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; + } + + if (rx_desc->csum_l4_err) { + ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; + return ol_flags; + } + switch (ptype.l4_type) { + case GVE_L4_TYPE_TCP: + case GVE_L4_TYPE_UDP: + case GVE_L4_TYPE_ICMP: + case GVE_L4_TYPE_SCTP: + ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; + break; + default: + break; + } + return ol_flags; +} + uint16_t gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { @@ -125,8 +159,8 @@ gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxm->data_len = pkt_len; rxm->port = rxq->port_id; rxm->ol_flags = 0; - - rxm->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; + rxm->ol_flags |= RTE_MBUF_F_RX_RSS_HASH | + gve_parse_csum_ol_flags(rx_desc, rxq->hw); rxm->hash.rss = rte_be_to_cpu_32(rx_desc->hash); rx_pkts[nb_rx++] = rxm; diff --git a/drivers/net/gve/gve_tx_dqo.c b/drivers/net/gve/gve_tx_dqo.c index 30a1455..a65e6aa 100644 --- a/drivers/net/gve/gve_tx_dqo.c +++ b/drivers/net/gve/gve_tx_dqo.c @@ -138,7 +138,7 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* fill the last descriptor with End of Packet (EOP) bit */ txd->pkt.end_of_packet = 1; - if (ol_flags & GVE_TX_CKSUM_OFFLOAD_MASK) + if (ol_flags & GVE_TX_CKSUM_OFFLOAD_MASK_DQO) txd->pkt.checksum_offload_enable = 1; txq->nb_free -= nb_used;