From patchwork Tue Dec 5 09:45:39 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Rakesh Kudurumalla X-Patchwork-Id: 134869 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 0EA0C43676; Tue, 5 Dec 2023 10:46:00 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 28B3A42E17; Tue, 5 Dec 2023 10:45:53 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id E9B9242E09 for ; Tue, 5 Dec 2023 10:45:50 +0100 (CET) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id 3B59TjHn007242 for ; Tue, 5 Dec 2023 01:45:50 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=3rEfE/zHOYl7Me0HDe46E9dZ3pIE2jUkhlRNmsiNouY=; b=JwKgmkc26k0KqUhrHgmLBg3zrPS8wGs8ww5FedDoLxKYTgYNju7suiUMGMl/IfV5+Hi/ X45wfcUbpSPeahEMUSJt0QLlKBMXocwPfmASG6oT/sN5MbgpPNytVTenbXuLpjZizOlL AQ6qLNLttCxcm/3TtIuPNrGsKVK6OFIT4zYl7pui65jUPrW6qvSdH/G7fIprWi7XhRig 0NlZNnyIEPxxN183CTiqihj49axgC+99bfNTDFjIurki29MOV0EQqBEamjVoCt1EoXG5 oYx4pVPtzePmG2xPf0RorsdJPD3ONZoeaMU7RXhLHNqqq5IrEZiiBW8do6Dg9KIn73DE AQ== Received: from dc5-exch02.marvell.com ([199.233.59.182]) by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3ut0e686va-2 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT) for ; Tue, 05 Dec 2023 01:45:50 -0800 Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.48; Tue, 5 Dec 2023 01:45:47 -0800 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.48 via Frontend Transport; Tue, 5 Dec 2023 01:45:47 -0800 Received: from localhost.localdomain (unknown [10.28.36.154]) by maili.marvell.com (Postfix) with ESMTP id C371E3F70A6; Tue, 5 Dec 2023 01:45:45 -0800 (PST) From: Rakesh Kudurumalla To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao CC: , , Rakesh Kudurumalla Subject: [PATCH 2/2] net/cnxk: dump Rx descriptor info to file Date: Tue, 5 Dec 2023 15:15:39 +0530 Message-ID: <20231205094539.1377142-2-rkudurumalla@marvell.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20231205094539.1377142-1-rkudurumalla@marvell.com> References: <20231205094539.1377142-1-rkudurumalla@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: iv9Et-UcPPNn3-piUQAjdbGUTFIcYSrj X-Proofpoint-ORIG-GUID: iv9Et-UcPPNn3-piUQAjdbGUTFIcYSrj X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.997,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2023-12-05_04,2023-12-04_01,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add support for eth_rx_descriptor_dump for cn9k and cn10k. This patch dumps contents of receviced packet descriptor from CQ for debug to file Signed-off-by: Rakesh Kudurumalla --- drivers/net/cnxk/cn10k_ethdev.c | 67 +++++++++++++++++++++++++++++++++ drivers/net/cnxk/cn9k_ethdev.c | 53 ++++++++++++++++++++++++++ 2 files changed, 120 insertions(+) diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c index 4a4e97287c..7eee9b1da8 100644 --- a/drivers/net/cnxk/cn10k_ethdev.c +++ b/drivers/net/cnxk/cn10k_ethdev.c @@ -656,6 +656,72 @@ cn10k_nix_reassembly_conf_set(struct rte_eth_dev *eth_dev, return rc; } +static int +cn10k_nix_rx_avail_get(struct cn10k_eth_rxq *rxq) +{ + uint32_t qmask = rxq->qmask; + uint64_t reg, head, tail; + int available; + + /* Use LDADDA version to avoid reorder */ + reg = roc_atomic64_add_sync(rxq->wdata, rxq->cq_status); + /* CQ_OP_STATUS operation error */ + if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) || + reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR)) + return 0; + tail = reg & 0xFFFFF; + head = (reg >> 20) & 0xFFFFF; + if (tail < head) + available = tail - head + qmask + 1; + else + available = tail - head; + + return available; +} + +static int +cn10k_rx_descriptor_dump(const struct rte_eth_dev *eth_dev, uint16_t qid, + uint16_t offset, uint16_t num, FILE *file) +{ + struct cn10k_eth_rxq *rxq = eth_dev->data->rx_queues[qid]; + const uint64_t data_off = rxq->data_off; + const uint32_t qmask = rxq->qmask; + const uintptr_t desc = rxq->desc; + struct cpt_parse_hdr_s *cpth; + uint32_t head = rxq->head; + struct nix_cqe_hdr_s *cq; + uint16_t count = 0; + int availble_pkts; + uint64_t cq_w1; + + availble_pkts = cn10k_nix_rx_avail_get(rxq); + + if ((offset + num - 1) >= availble_pkts) { + plt_err("Invalid BD num=%u\n", num); + return -EINVAL; + } + + while (count < num) { + cq = (struct nix_cqe_hdr_s *)(desc + CQE_SZ(head) + + count + offset); + cq_w1 = *((const uint64_t *)cq + 1); + if (cq_w1 & BIT(11)) { + rte_iova_t buff = *((rte_iova_t *)((uint64_t *)cq + 9)); + struct rte_mbuf *mbuf = + (struct rte_mbuf *)(buff - data_off); + cpth = (struct cpt_parse_hdr_s *) + ((uintptr_t)mbuf + (uint16_t)data_off); + roc_cpt_parse_hdr_dump(file, cpth); + } else { + roc_nix_cqe_dump(file, cq); + } + + count++; + head &= qmask; + } + return 0; +} + static int cn10k_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green, int mark_yellow, int mark_red, @@ -794,6 +860,7 @@ nix_eth_dev_ops_override(void) cn10k_nix_reassembly_capability_get; cnxk_eth_dev_ops.ip_reassembly_conf_get = cn10k_nix_reassembly_conf_get; cnxk_eth_dev_ops.ip_reassembly_conf_set = cn10k_nix_reassembly_conf_set; + cnxk_eth_dev_ops.eth_rx_descriptor_dump = cn10k_rx_descriptor_dump; } /* Update platform specific tm ops */ diff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c index bae4dda5e2..e88631a02e 100644 --- a/drivers/net/cnxk/cn9k_ethdev.c +++ b/drivers/net/cnxk/cn9k_ethdev.c @@ -664,6 +664,58 @@ cn9k_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green, return rc; } +static int +cn9k_nix_rx_avail_get(struct cn9k_eth_rxq *rxq) +{ + uint32_t qmask = rxq->qmask; + uint64_t reg, head, tail; + int available; + + /* Use LDADDA version to avoid reorder */ + reg = roc_atomic64_add_sync(rxq->wdata, rxq->cq_status); + /* CQ_OP_STATUS operation error */ + if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) || + reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR)) + return 0; + tail = reg & 0xFFFFF; + head = (reg >> 20) & 0xFFFFF; + if (tail < head) + available = tail - head + qmask + 1; + else + available = tail - head; + + return available; +} + +static int +cn9k_rx_descriptor_dump(const struct rte_eth_dev *eth_dev, uint16_t qid, + uint16_t offset, uint16_t num, FILE *file) +{ + struct cn9k_eth_rxq *rxq = eth_dev->data->rx_queues[qid]; + const uint32_t qmask = rxq->qmask; + const uintptr_t desc = rxq->desc; + uint32_t head = rxq->head; + struct nix_cqe_hdr_s *cq; + uint16_t count = 0; + int availble_pkts; + + availble_pkts = cn9k_nix_rx_avail_get(rxq); + + if ((offset + num - 1) >= availble_pkts) { + plt_err("Invalid BD num=%u\n", num); + return -EINVAL; + } + + while (count < num) { + cq = (struct nix_cqe_hdr_s *)(desc + CQE_SZ(head) + + count + offset); + roc_nix_cqe_dump(file, cq); + count++; + head &= qmask; + } + return 0; +} + /* Update platform specific eth dev ops */ static void nix_eth_dev_ops_override(void) @@ -687,6 +739,7 @@ nix_eth_dev_ops_override(void) cnxk_eth_dev_ops.mtr_ops_get = NULL; cnxk_eth_dev_ops.timesync_read_tx_timestamp = cn9k_nix_timesync_read_tx_timestamp; + cnxk_eth_dev_ops.eth_rx_descriptor_dump = cn9k_rx_descriptor_dump; } /* Update platform specific eth dev ops */