From patchwork Wed Jan 18 02:53:46 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junfeng Guo X-Patchwork-Id: 122232 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5C33D42407; Wed, 18 Jan 2023 03:59:45 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id A0C9442D5D; Wed, 18 Jan 2023 03:59:21 +0100 (CET) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id D6CF242D5D for ; Wed, 18 Jan 2023 03:59:19 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1674010760; x=1705546760; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=3ztCh2ZPPctYTGF9UOOMn7a52dNfWwqxrmS7OrHE7rQ=; b=nEl3mqzQYkRPnpT+cxaIBr2JMII+lqtnstt27E9eQg4/r9Bslzeg7v+r ZiJuSMQ8sSsWDbE47Z/Zk5zILiIFfCixDrxPJUckaSTeluZlnYv1YKkxY 8cGs/e29rCXbiOtUe9zeMq99uj+AzXTw16j+5j88vUtjzYz7UYmp21dxD qCgmw+tzHR/Z36b7O0SgAqXhrhEgeL1dtEbpMbMeJg/KTGG2Ioc5Afl6Y n15YsYW8PfNWjh1Egn7+6w5C3G7cPAKrMMVA9Zz3OL/BhJ4H5nkQS45Tf roB4XVwgwDC6SyRi+mdpCrjStoC0Jgc0QNujFDGDIAnLs9HxeDbHPxZ4f Q==; X-IronPort-AV: E=McAfee;i="6500,9779,10593"; a="322575532" X-IronPort-AV: E=Sophos;i="5.97,224,1669104000"; d="scan'208";a="322575532" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Jan 2023 18:59:19 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10593"; a="722911228" X-IronPort-AV: E=Sophos;i="5.97,224,1669104000"; d="scan'208";a="722911228" Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104]) by fmsmga008.fm.intel.com with ESMTP; 17 Jan 2023 18:59:16 -0800 From: Junfeng Guo To: qi.z.zhang@intel.com, jingjing.wu@intel.com, ferruh.yigit@amd.com, beilei.xing@intel.com Cc: dev@dpdk.org, xiaoyun.li@intel.com, helin.zhang@intel.com, Junfeng Guo , Jordan Kimbrough , Rushil Gupta , Jeroen de Borst Subject: [RFC 7/8] net/gve: support jumbo frame for GQI Date: Wed, 18 Jan 2023 10:53:46 +0800 Message-Id: <20230118025347.1567078-8-junfeng.guo@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230118025347.1567078-1-junfeng.guo@intel.com> References: <20230118025347.1567078-1-junfeng.guo@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add multi-segment support to enable GQI Rx Jumbo Frame. Signed-off-by: Jordan Kimbrough Signed-off-by: Rushil Gupta Signed-off-by: Junfeng Guo Signed-off-by: Jeroen de Borst --- drivers/net/gve/gve_ethdev.h | 8 +++ drivers/net/gve/gve_rx.c | 128 ++++++++++++++++++++++++++--------- 2 files changed, 105 insertions(+), 31 deletions(-) diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h index 2e0f96499d..608a2f2fb4 100644 --- a/drivers/net/gve/gve_ethdev.h +++ b/drivers/net/gve/gve_ethdev.h @@ -138,6 +138,13 @@ struct gve_tx_queue { uint8_t is_gqi_qpl; }; +struct gve_rx_ctx { + struct rte_mbuf *mbuf_head; + struct rte_mbuf *mbuf_tail; + uint16_t total_frags; + bool drop_pkt; +}; + struct gve_rx_queue { volatile struct gve_rx_desc *rx_desc_ring; volatile union gve_rx_data_slot *rx_data_ring; @@ -146,6 +153,7 @@ struct gve_rx_queue { uint64_t rx_ring_phys_addr; struct rte_mbuf **sw_ring; struct rte_mempool *mpool; + struct gve_rx_ctx ctx; uint16_t rx_tail; uint16_t nb_rx_desc; diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c index 9ba975c9b4..2468fc70ee 100644 --- a/drivers/net/gve/gve_rx.c +++ b/drivers/net/gve/gve_rx.c @@ -5,6 +5,8 @@ #include "gve_ethdev.h" #include "base/gve_adminq.h" +#define GVE_PKT_CONT_BIT_IS_SET(x) (GVE_RXF_PKT_CONT & (x)) + static inline void gve_rx_refill(struct gve_rx_queue *rxq) { @@ -80,40 +82,70 @@ gve_rx_refill(struct gve_rx_queue *rxq) } } -uint16_t -gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +/* + * This method processes a single rte_mbuf and handles packet segmentation + * In QPL mode it copies data from the mbuf to the gve_rx_queue. + */ +static void +gve_rx_mbuf(struct gve_rx_queue *rxq, struct rte_mbuf *rxe, uint16_t len, + uint16_t rx_id) { - volatile struct gve_rx_desc *rxr, *rxd; - struct gve_rx_queue *rxq = rx_queue; - uint16_t rx_id = rxq->rx_tail; - struct rte_mbuf *rxe; - uint16_t nb_rx, len; + uint16_t padding = 0; uint64_t addr; - uint16_t i; - - rxr = rxq->rx_desc_ring; - nb_rx = 0; - - for (i = 0; i < nb_pkts; i++) { - rxd = &rxr[rx_id]; - if (GVE_SEQNO(rxd->flags_seq) != rxq->expected_seqno) - break; - if (rxd->flags_seq & GVE_RXF_ERR) - continue; - - len = rte_be_to_cpu_16(rxd->len) - GVE_RX_PAD; - rxe = rxq->sw_ring[rx_id]; - if (rxq->is_gqi_qpl) { - addr = (uint64_t)(rxq->qpl->mz->addr) + rx_id * PAGE_SIZE + GVE_RX_PAD; - rte_memcpy((void *)((size_t)rxe->buf_addr + rxe->data_off), - (void *)(size_t)addr, len); - } + rxe->data_len = len; + if (!rxq->ctx.mbuf_head) { + rxq->ctx.mbuf_head = rxe; + rxq->ctx.mbuf_tail = rxe; + rxe->nb_segs = 1; rxe->pkt_len = len; rxe->data_len = len; rxe->port = rxq->port_id; rxe->ol_flags = 0; + padding = GVE_RX_PAD; + } else { + rxq->ctx.mbuf_head->pkt_len += len; + rxq->ctx.mbuf_head->nb_segs += 1; + rxq->ctx.mbuf_tail->next = rxe; + rxq->ctx.mbuf_tail = rxe; + } + if (rxq->is_gqi_qpl) { + addr = (uint64_t)(rxq->qpl->mz->addr) + rx_id * PAGE_SIZE + padding; + rte_memcpy((void *)((size_t)rxe->buf_addr + rxe->data_off), + (void *)(size_t)addr, len); + } +} + +/* + * This method processes a single packet fragment associated with the + * passed packet descriptor. + * This methods returns whether the fragment is the last fragment + * of a packet. + */ +static bool +gve_rx(struct gve_rx_queue *rxq, volatile struct gve_rx_desc *rxd, uint16_t rx_id) +{ + bool is_last_frag = !GVE_PKT_CONT_BIT_IS_SET(rxd->flags_seq); + uint16_t frag_size = rte_be_to_cpu_16(rxd->len); + struct gve_rx_ctx *ctx = &rxq->ctx; + bool is_first_frag = ctx->total_frags == 0; + struct rte_mbuf *rxe; + + if (ctx->drop_pkt) + goto finish_frag; + if (rxd->flags_seq & GVE_RXF_ERR) { + ctx->drop_pkt = true; + goto finish_frag; + } + + if (is_first_frag) + frag_size -= GVE_RX_PAD; + + rxe = rxq->sw_ring[rx_id]; + gve_rx_mbuf(rxq, rxe, frag_size, rx_id); + + if (is_first_frag) { if (rxd->flags_seq & GVE_RXF_TCP) rxe->packet_type |= RTE_PTYPE_L4_TCP; if (rxd->flags_seq & GVE_RXF_UDP) @@ -127,18 +159,52 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxe->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; rxe->hash.rss = rte_be_to_cpu_32(rxd->rss_hash); } + } - rxq->expected_seqno = gve_next_seqno(rxq->expected_seqno); +finish_frag: + ctx->total_frags++; + return is_last_frag; +} + +static void +gve_rx_ctx_clear(struct gve_rx_ctx *ctx) +{ + ctx->mbuf_head = NULL; + ctx->mbuf_tail = NULL; + ctx->drop_pkt = false; + ctx->total_frags = 0; +} + +uint16_t +gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + volatile struct gve_rx_desc *rxr, *rxd; + struct gve_rx_queue *rxq = rx_queue; + struct gve_rx_ctx *ctx = &rxq->ctx; + uint16_t rx_id = rxq->rx_tail; + uint16_t nb_rx; + + rxr = rxq->rx_desc_ring; + nb_rx = 0; + + while (nb_rx < nb_pkts) { + rxd = &rxr[rx_id]; + if (GVE_SEQNO(rxd->flags_seq) != rxq->expected_seqno) + break; + + if (gve_rx(rxq, rxd, rx_id)) { + if (!ctx->drop_pkt) + rx_pkts[nb_rx++] = ctx->mbuf_head; + rxq->nb_avail += ctx->total_frags; + gve_rx_ctx_clear(ctx); + } rx_id++; if (rx_id == rxq->nb_rx_desc) rx_id = 0; - - rx_pkts[nb_rx] = rxe; - nb_rx++; + rxq->expected_seqno = gve_next_seqno(rxq->expected_seqno); } - rxq->nb_avail += nb_rx; rxq->rx_tail = rx_id; if (rxq->nb_avail > rxq->free_thresh)