From patchwork Sun Mar 3 17:38:11 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Harman Kalra X-Patchwork-Id: 137815 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E811A43C3E; Sun, 3 Mar 2024 18:38:48 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 7D2E540F1A; Sun, 3 Mar 2024 18:38:45 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id A304A40EA5 for ; Sun, 3 Mar 2024 18:38:43 +0100 (CET) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id 423Dsqmi027903 for ; Sun, 3 Mar 2024 09:38:42 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h= from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-type; s=pfpt0220; bh=UOZNcYMrVaE11n12lJSHS 9JgEJDe0qwWS87Lxinu2g4=; b=OXgVqhFcF2SEPsk3/h0pyHUWsvddQykkTq7zw u9oIFwoPS8IymDHTd0dMR+8+8TynAW+thUm7mojG4D1xNwQ5ELuZSHGMfWcAQS42 AEw1U2bATGsGYOXFNHZZcXqIVYg9R25SZoAUFS1ox/+B4iIWey+numa8O6T+4+cn 2qyKMaPRJ5SGrD/oWtQCd6Rp6+aQLxM4xRD79wfM4mpaOr3FS24YPOcnIiG0l9Oc pNxLb5x+Z1OuIbsPEXCXxM22wyX9E800xgrcpDWCKc3e3OIcyaL5+9qlI1u1tf1p uhuDyFwBnHYYUGv2U+nmIuCzC2lkRpsSqHQe7QMoGOcsL2GQg== Received: from dc6wp-exch02.marvell.com ([4.21.29.225]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3wm2bptrsa-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT) for ; Sun, 03 Mar 2024 09:38:42 -0800 (PST) Received: from DC6WP-EXCH02.marvell.com (10.76.176.209) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1258.12; Sun, 3 Mar 2024 09:38:41 -0800 Received: from maili.marvell.com (10.69.176.80) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server id 15.2.1258.12 via Frontend Transport; Sun, 3 Mar 2024 09:38:41 -0800 Received: from localhost.localdomain (unknown [10.29.52.211]) by maili.marvell.com (Postfix) with ESMTP id A745D3F718E; Sun, 3 Mar 2024 09:38:38 -0800 (PST) From: Harman Kalra To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao , Harman Kalra CC: Subject: [PATCH v6 01/23] common/cnxk: add support for representors Date: Sun, 3 Mar 2024 23:08:11 +0530 Message-ID: <20240303173833.100039-2-hkalra@marvell.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20240303173833.100039-1-hkalra@marvell.com> References: <20230811163419.165790-1-hkalra@marvell.com> <20240303173833.100039-1-hkalra@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: MfMxydEPKMNfUbVMEoqyN7vPzGuVwUfc X-Proofpoint-ORIG-GUID: MfMxydEPKMNfUbVMEoqyN7vPzGuVwUfc X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.1011,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2024-03-03_08,2024-03-01_03,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Introducing a new Mailbox for registering base device behind all representors and also registering debug log type for representors and base device driver. Signed-off-by: Harman Kalra --- doc/guides/nics/cnxk.rst | 4 ++++ drivers/common/cnxk/roc_constants.h | 1 + drivers/common/cnxk/roc_mbox.h | 8 ++++++++ drivers/common/cnxk/roc_nix.c | 31 +++++++++++++++++++++++++++++ drivers/common/cnxk/roc_nix.h | 3 +++ drivers/common/cnxk/roc_platform.c | 2 ++ drivers/common/cnxk/roc_platform.h | 4 ++++ drivers/common/cnxk/version.map | 3 +++ 8 files changed, 56 insertions(+) diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst index 39660dba82..1ab8a0ca74 100644 --- a/doc/guides/nics/cnxk.rst +++ b/doc/guides/nics/cnxk.rst @@ -654,3 +654,7 @@ Debugging Options +---+------------+-------------------------------------------------------+ | 2 | NPC | --log-level='pmd\.net.cnxk\.flow,8' | +---+------------+-------------------------------------------------------+ + | 3 | REP | --log-level='pmd\.net.cnxk\.rep,8' | + +---+------------+-------------------------------------------------------+ + | 4 | ESW | --log-level='pmd\.net.cnxk\.esw,8' | + +---+------------+-------------------------------------------------------+ diff --git a/drivers/common/cnxk/roc_constants.h b/drivers/common/cnxk/roc_constants.h index 291b6a4bc9..cb4edbea58 100644 --- a/drivers/common/cnxk/roc_constants.h +++ b/drivers/common/cnxk/roc_constants.h @@ -43,6 +43,7 @@ #define PCI_DEVID_CNXK_RVU_NIX_INL_VF 0xA0F1 #define PCI_DEVID_CNXK_RVU_REE_PF 0xA0f4 #define PCI_DEVID_CNXK_RVU_REE_VF 0xA0f5 +#define PCI_DEVID_CNXK_RVU_ESWITCH_PF 0xA0E0 #define PCI_DEVID_CN9K_CGX 0xA059 #define PCI_DEVID_CN10K_RPM 0xA060 diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h index d8a8494ac4..54956a6a06 100644 --- a/drivers/common/cnxk/roc_mbox.h +++ b/drivers/common/cnxk/roc_mbox.h @@ -68,6 +68,7 @@ struct mbox_msghdr { M(NDC_SYNC_OP, 0x009, ndc_sync_op, ndc_sync_op, msg_rsp) \ M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \ msg_rsp) \ + M(GET_REP_CNT, 0x00d, get_rep_cnt, msg_req, get_rep_cnt_rsp) \ /* CGX mbox IDs (range 0x200 - 0x3FF) */ \ M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \ M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \ @@ -548,6 +549,13 @@ struct lmtst_tbl_setup_req { uint64_t __io rsvd[2]; /* Future use */ }; +#define MAX_PFVF_REP 64 +struct get_rep_cnt_rsp { + struct mbox_msghdr hdr; + uint16_t __io rep_cnt; + uint16_t __io rep_pfvf_map[MAX_PFVF_REP]; +}; + /* CGX mbox message formats */ /* CGX mailbox error codes * Range 1101 - 1200. diff --git a/drivers/common/cnxk/roc_nix.c b/drivers/common/cnxk/roc_nix.c index 90ccb260fb..e68d472f43 100644 --- a/drivers/common/cnxk/roc_nix.c +++ b/drivers/common/cnxk/roc_nix.c @@ -533,3 +533,34 @@ roc_nix_dev_fini(struct roc_nix *roc_nix) rc |= dev_fini(&nix->dev, nix->pci_dev); return rc; } + +int +roc_nix_max_rep_count(struct roc_nix *roc_nix) +{ + struct nix *nix = roc_nix_to_nix_priv(roc_nix); + struct dev *dev = &nix->dev; + struct mbox *mbox = mbox_get(dev->mbox); + struct get_rep_cnt_rsp *rsp; + struct msg_req *req; + int rc, i; + + req = mbox_alloc_msg_get_rep_cnt(mbox); + if (!req) { + rc = -ENOSPC; + goto exit; + } + + req->hdr.pcifunc = roc_nix_get_pf_func(roc_nix); + + rc = mbox_process_msg(mbox, (void *)&rsp); + if (rc) + goto exit; + + roc_nix->rep_cnt = rsp->rep_cnt; + for (i = 0; i < rsp->rep_cnt; i++) + roc_nix->rep_pfvf_map[i] = rsp->rep_pfvf_map[i]; + +exit: + mbox_put(mbox); + return rc; +} diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h index 4db71544f0..0289ce9820 100644 --- a/drivers/common/cnxk/roc_nix.h +++ b/drivers/common/cnxk/roc_nix.h @@ -482,6 +482,8 @@ struct roc_nix { uint32_t buf_sz; uint64_t meta_aura_handle; uintptr_t meta_mempool; + uint16_t rep_cnt; + uint16_t rep_pfvf_map[MAX_PFVF_REP]; TAILQ_ENTRY(roc_nix) next; #define ROC_NIX_MEM_SZ (6 * 1070) @@ -1014,4 +1016,5 @@ int __roc_api roc_nix_mcast_list_setup(struct mbox *mbox, uint8_t intf, int nb_e uint16_t *pf_funcs, uint16_t *channels, uint32_t *rqs, uint32_t *grp_index, uint32_t *start_index); int __roc_api roc_nix_mcast_list_free(struct mbox *mbox, uint32_t mcast_grp_index); +int __roc_api roc_nix_max_rep_count(struct roc_nix *roc_nix); #endif /* _ROC_NIX_H_ */ diff --git a/drivers/common/cnxk/roc_platform.c b/drivers/common/cnxk/roc_platform.c index 15cbb6d68f..181902a585 100644 --- a/drivers/common/cnxk/roc_platform.c +++ b/drivers/common/cnxk/roc_platform.c @@ -96,4 +96,6 @@ RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_sso, NOTICE); RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_tim, NOTICE); RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_tm, NOTICE); RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_dpi, NOTICE); +RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_rep, NOTICE); +RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_esw, NOTICE); RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_ree, NOTICE); diff --git a/drivers/common/cnxk/roc_platform.h b/drivers/common/cnxk/roc_platform.h index b7fe132093..4dc69765a8 100644 --- a/drivers/common/cnxk/roc_platform.h +++ b/drivers/common/cnxk/roc_platform.h @@ -264,6 +264,8 @@ extern int cnxk_logtype_tim; extern int cnxk_logtype_tm; extern int cnxk_logtype_ree; extern int cnxk_logtype_dpi; +extern int cnxk_logtype_rep; +extern int cnxk_logtype_esw; #define RTE_LOGTYPE_CNXK cnxk_logtype_base @@ -295,6 +297,8 @@ extern int cnxk_logtype_dpi; #define plt_tm_dbg(fmt, ...) plt_dbg(tm, fmt, ##__VA_ARGS__) #define plt_ree_dbg(fmt, ...) plt_dbg(ree, fmt, ##__VA_ARGS__) #define plt_dpi_dbg(fmt, ...) plt_dbg(dpi, fmt, ##__VA_ARGS__) +#define plt_rep_dbg(fmt, ...) plt_dbg(rep, fmt, ##__VA_ARGS__) +#define plt_esw_dbg(fmt, ...) plt_dbg(esw, fmt, ##__VA_ARGS__) /* Datapath logs */ #define plt_dp_err(fmt, args...) \ diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map index 76dbbe4666..9bea7af6f4 100644 --- a/drivers/common/cnxk/version.map +++ b/drivers/common/cnxk/version.map @@ -8,12 +8,14 @@ INTERNAL { cnxk_logtype_base; cnxk_logtype_cpt; cnxk_logtype_dpi; + cnxk_logtype_esw; cnxk_logtype_mbox; cnxk_logtype_ml; cnxk_logtype_nix; cnxk_logtype_npa; cnxk_logtype_npc; cnxk_logtype_ree; + cnxk_logtype_rep; cnxk_logtype_sso; cnxk_logtype_tim; cnxk_logtype_tm; @@ -216,6 +218,7 @@ INTERNAL { roc_nix_get_base_chan; roc_nix_get_pf; roc_nix_get_pf_func; + roc_nix_max_rep_count; roc_nix_get_rx_chan_cnt; roc_nix_get_vf; roc_nix_get_vwqe_interval; From patchwork Sun Mar 3 17:38:12 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Harman Kalra X-Patchwork-Id: 137816 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8F2C943C3E; Sun, 3 Mar 2024 18:38:57 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 2106242E76; Sun, 3 Mar 2024 18:38:49 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id 497AE42EEC for ; Sun, 3 Mar 2024 18:38:47 +0100 (CET) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id 423DOJuL029127; Sun, 3 Mar 2024 09:38:46 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h= from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-type; s=pfpt0220; bh=J7D99jccCtAsGteMD0/hI CDxtTl8Q+BaIgD26YnNFhY=; b=WHgglOuK9elii7YQ/QdspIvfV+//TpEVdioFA mEQYCtbbchzPWfeNUAeQ6T3qJZdrMXFU2rhR9YG/HMpqlWoUDU+HcMuBWlsQbBEf WlB8F9Ay4fvo89WjYcie/dtIMXM+Nl5pxGAxjQOjduT7i8g5hOihCBSPVVNnc12X pW5OaIAKGCuBx4G4sBP3wQGNZ06UeQjQwOcYG6/lolO55wCw6Di+cLKv/p6+z3lA mN6PS6xHctHdWa07wfiV71L/lQNTFFIG4qdvHzh95nCkphoWyRDx+9E/gfNSwHea DI71BxJXS5pEYufPeryR5GwI6qcV60ZMVJIo4aB78+vfsWCcw== Received: from dc6wp-exch02.marvell.com ([4.21.29.225]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3wm2bptrsf-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT); Sun, 03 Mar 2024 09:38:45 -0800 (PST) Received: from DC6WP-EXCH02.marvell.com (10.76.176.209) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1258.12; Sun, 3 Mar 2024 09:38:44 -0800 Received: from maili.marvell.com (10.69.176.80) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server id 15.2.1258.12 via Frontend Transport; Sun, 3 Mar 2024 09:38:44 -0800 Received: from localhost.localdomain (unknown [10.29.52.211]) by maili.marvell.com (Postfix) with ESMTP id 871853F718E; Sun, 3 Mar 2024 09:38:41 -0800 (PST) From: Harman Kalra To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao , Harman Kalra , Anatoly Burakov CC: Subject: [PATCH v6 02/23] net/cnxk: implementing eswitch device Date: Sun, 3 Mar 2024 23:08:12 +0530 Message-ID: <20240303173833.100039-3-hkalra@marvell.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20240303173833.100039-1-hkalra@marvell.com> References: <20230811163419.165790-1-hkalra@marvell.com> <20240303173833.100039-1-hkalra@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: CHyA-OCfBoyAoirFZoMx3JnKtlGmTv93 X-Proofpoint-ORIG-GUID: CHyA-OCfBoyAoirFZoMx3JnKtlGmTv93 X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.1011,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2024-03-03_08,2024-03-01_03,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Eswitch device is a parent or base device behind all the representors, acting as transport layer between representors and representees Signed-off-by: Harman Kalra --- drivers/net/cnxk/cnxk_eswitch.c | 379 ++++++++++++++++++++++++++++++++ drivers/net/cnxk/cnxk_eswitch.h | 103 +++++++++ drivers/net/cnxk/meson.build | 1 + 3 files changed, 483 insertions(+) create mode 100644 drivers/net/cnxk/cnxk_eswitch.c create mode 100644 drivers/net/cnxk/cnxk_eswitch.h diff --git a/drivers/net/cnxk/cnxk_eswitch.c b/drivers/net/cnxk/cnxk_eswitch.c new file mode 100644 index 0000000000..8f216d7c88 --- /dev/null +++ b/drivers/net/cnxk/cnxk_eswitch.c @@ -0,0 +1,379 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2024 Marvell. + */ + +#include + +#define CNXK_NIX_DEF_SQ_COUNT 512 + +static int +cnxk_eswitch_dev_remove(struct rte_pci_device *pci_dev) +{ + struct cnxk_eswitch_dev *eswitch_dev; + int rc = 0; + + PLT_SET_USED(pci_dev); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + eswitch_dev = cnxk_eswitch_pmd_priv(); + if (!eswitch_dev) { + rc = -EINVAL; + goto exit; + } + + rte_free(eswitch_dev); +exit: + return rc; +} + +int +cnxk_eswitch_nix_rsrc_start(struct cnxk_eswitch_dev *eswitch_dev) +{ + int rc; + + /* Enable Rx in NPC */ + rc = roc_nix_npc_rx_ena_dis(&eswitch_dev->nix, true); + if (rc) { + plt_err("Failed to enable NPC rx %d", rc); + goto done; + } + + rc = roc_npc_mcam_enable_all_entries(&eswitch_dev->npc, 1); + if (rc) { + plt_err("Failed to enable NPC entries %d", rc); + goto done; + } + +done: + return 0; +} + +int +cnxk_eswitch_txq_start(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid) +{ + struct roc_nix_sq *sq = &eswitch_dev->txq[qid].sqs; + int rc = -EINVAL; + + if (eswitch_dev->txq[qid].state == CNXK_ESWITCH_QUEUE_STATE_STARTED) + return 0; + + if (eswitch_dev->txq[qid].state != CNXK_ESWITCH_QUEUE_STATE_CONFIGURED) { + plt_err("Eswitch txq %d not configured yet", qid); + goto done; + } + + rc = roc_nix_sq_ena_dis(sq, true); + if (rc) { + plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", qid, rc); + goto done; + } + + eswitch_dev->txq[qid].state = CNXK_ESWITCH_QUEUE_STATE_STARTED; +done: + return rc; +} + +int +cnxk_eswitch_txq_stop(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid) +{ + struct roc_nix_sq *sq = &eswitch_dev->txq[qid].sqs; + int rc = -EINVAL; + + if (eswitch_dev->txq[qid].state == CNXK_ESWITCH_QUEUE_STATE_STOPPED || + eswitch_dev->txq[qid].state == CNXK_ESWITCH_QUEUE_STATE_RELEASED) + return 0; + + if (eswitch_dev->txq[qid].state != CNXK_ESWITCH_QUEUE_STATE_STARTED) { + plt_err("Eswitch txq %d not started", qid); + goto done; + } + + rc = roc_nix_sq_ena_dis(sq, false); + if (rc) { + plt_err("Failed to disable sqb aura fc, txq=%u, rc=%d", qid, rc); + goto done; + } + + eswitch_dev->txq[qid].state = CNXK_ESWITCH_QUEUE_STATE_STOPPED; +done: + return rc; +} + +int +cnxk_eswitch_rxq_start(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid) +{ + struct roc_nix_rq *rq = &eswitch_dev->rxq[qid].rqs; + int rc = -EINVAL; + + if (eswitch_dev->rxq[qid].state == CNXK_ESWITCH_QUEUE_STATE_STARTED) + return 0; + + if (eswitch_dev->rxq[qid].state != CNXK_ESWITCH_QUEUE_STATE_CONFIGURED) { + plt_err("Eswitch rxq %d not configured yet", qid); + goto done; + } + + rc = roc_nix_rq_ena_dis(rq, true); + if (rc) { + plt_err("Failed to enable rxq=%u, rc=%d", qid, rc); + goto done; + } + + eswitch_dev->rxq[qid].state = CNXK_ESWITCH_QUEUE_STATE_STARTED; +done: + return rc; +} + +int +cnxk_eswitch_rxq_stop(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid) +{ + struct roc_nix_rq *rq = &eswitch_dev->rxq[qid].rqs; + int rc = -EINVAL; + + if (eswitch_dev->rxq[qid].state == CNXK_ESWITCH_QUEUE_STATE_STOPPED || + eswitch_dev->rxq[qid].state == CNXK_ESWITCH_QUEUE_STATE_RELEASED) + return 0; + + if (eswitch_dev->rxq[qid].state != CNXK_ESWITCH_QUEUE_STATE_STARTED) { + plt_err("Eswitch rxq %d not started", qid); + goto done; + } + + rc = roc_nix_rq_ena_dis(rq, false); + if (rc) { + plt_err("Failed to disable rxq=%u, rc=%d", qid, rc); + goto done; + } + + eswitch_dev->rxq[qid].state = CNXK_ESWITCH_QUEUE_STATE_STOPPED; +done: + return rc; +} + +int +cnxk_eswitch_rxq_release(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid) +{ + struct roc_nix_rq *rq; + struct roc_nix_cq *cq; + int rc; + + if (eswitch_dev->rxq[qid].state == CNXK_ESWITCH_QUEUE_STATE_RELEASED) + return 0; + + /* Cleanup ROC SQ */ + rq = &eswitch_dev->rxq[qid].rqs; + rc = roc_nix_rq_fini(rq); + if (rc) { + plt_err("Failed to cleanup sq, rc=%d", rc); + goto fail; + } + + eswitch_dev->rxq[qid].state = CNXK_ESWITCH_QUEUE_STATE_RELEASED; + + /* Cleanup ROC CQ */ + cq = &eswitch_dev->cxq[qid].cqs; + rc = roc_nix_cq_fini(cq); + if (rc) { + plt_err("Failed to cleanup cq, rc=%d", rc); + goto fail; + } + + eswitch_dev->cxq[qid].state = CNXK_ESWITCH_QUEUE_STATE_RELEASED; +fail: + return rc; +} + +int +cnxk_eswitch_rxq_setup(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid, uint16_t nb_desc, + const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) +{ + struct roc_nix *nix = &eswitch_dev->nix; + struct rte_mempool *lpb_pool = mp; + struct rte_mempool_ops *ops; + const char *platform_ops; + struct roc_nix_rq *rq; + struct roc_nix_cq *cq; + uint16_t first_skip; + int rc = -EINVAL; + + if (eswitch_dev->rxq[qid].state != CNXK_ESWITCH_QUEUE_STATE_RELEASED || + eswitch_dev->cxq[qid].state != CNXK_ESWITCH_QUEUE_STATE_RELEASED) { + plt_err("Queue %d is in invalid state %d, cannot be setup", qid, + eswitch_dev->txq[qid].state); + goto fail; + } + + RTE_SET_USED(rx_conf); + platform_ops = rte_mbuf_platform_mempool_ops(); + /* This driver needs cnxk_npa mempool ops to work */ + ops = rte_mempool_get_ops(lpb_pool->ops_index); + if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) { + plt_err("mempool ops should be of cnxk_npa type"); + goto fail; + } + + if (lpb_pool->pool_id == 0) { + plt_err("Invalid pool_id"); + goto fail; + } + + /* Setup ROC CQ */ + cq = &eswitch_dev->cxq[qid].cqs; + memset(cq, 0, sizeof(struct roc_nix_cq)); + cq->qid = qid; + cq->nb_desc = nb_desc; + rc = roc_nix_cq_init(nix, cq); + if (rc) { + plt_err("Failed to init roc cq for rq=%d, rc=%d", qid, rc); + goto fail; + } + eswitch_dev->cxq[qid].state = CNXK_ESWITCH_QUEUE_STATE_CONFIGURED; + + /* Setup ROC RQ */ + rq = &eswitch_dev->rxq[qid].rqs; + memset(rq, 0, sizeof(struct roc_nix_rq)); + rq->qid = qid; + rq->cqid = cq->qid; + rq->aura_handle = lpb_pool->pool_id; + rq->flow_tag_width = 32; + rq->sso_ena = false; + + /* Calculate first mbuf skip */ + first_skip = (sizeof(struct rte_mbuf)); + first_skip += RTE_PKTMBUF_HEADROOM; + first_skip += rte_pktmbuf_priv_size(lpb_pool); + rq->first_skip = first_skip; + rq->later_skip = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(lpb_pool); + rq->lpb_size = lpb_pool->elt_size; + if (roc_errata_nix_no_meta_aura()) + rq->lpb_drop_ena = true; + + rc = roc_nix_rq_init(nix, rq, true); + if (rc) { + plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc); + goto cq_fini; + } + eswitch_dev->rxq[qid].state = CNXK_ESWITCH_QUEUE_STATE_CONFIGURED; + + return 0; +cq_fini: + rc |= roc_nix_cq_fini(cq); +fail: + return rc; +} + +int +cnxk_eswitch_txq_release(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid) +{ + struct roc_nix_sq *sq; + int rc = 0; + + if (eswitch_dev->txq[qid].state == CNXK_ESWITCH_QUEUE_STATE_RELEASED) + return 0; + + /* Cleanup ROC SQ */ + sq = &eswitch_dev->txq[qid].sqs; + rc = roc_nix_sq_fini(sq); + if (rc) { + plt_err("Failed to cleanup sq, rc=%d", rc); + goto fail; + } + + eswitch_dev->txq[qid].state = CNXK_ESWITCH_QUEUE_STATE_RELEASED; +fail: + return rc; +} + +int +cnxk_eswitch_txq_setup(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid, uint16_t nb_desc, + const struct rte_eth_txconf *tx_conf) +{ + struct roc_nix_sq *sq; + int rc = 0; + + if (eswitch_dev->txq[qid].state != CNXK_ESWITCH_QUEUE_STATE_RELEASED) { + plt_err("Queue %d is in invalid state %d, cannot be setup", qid, + eswitch_dev->txq[qid].state); + rc = -EINVAL; + goto fail; + } + RTE_SET_USED(tx_conf); + /* Setup ROC SQ */ + sq = &eswitch_dev->txq[qid].sqs; + memset(sq, 0, sizeof(struct roc_nix_sq)); + sq->qid = qid; + sq->nb_desc = nb_desc; + sq->max_sqe_sz = NIX_MAXSQESZ_W8; + if (sq->nb_desc >= CNXK_NIX_DEF_SQ_COUNT) + sq->fc_hyst_bits = 0x1; + + rc = roc_nix_sq_init(&eswitch_dev->nix, sq); + if (rc) + plt_err("Failed to init sq=%d, rc=%d", qid, rc); + + eswitch_dev->txq[qid].state = CNXK_ESWITCH_QUEUE_STATE_CONFIGURED; + +fail: + return rc; +} + +static int +cnxk_eswitch_dev_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) +{ + struct cnxk_eswitch_dev *eswitch_dev; + const struct rte_memzone *mz = NULL; + int rc = -ENOMEM; + + RTE_SET_USED(pci_drv); + + eswitch_dev = cnxk_eswitch_pmd_priv(); + if (!eswitch_dev) { + rc = roc_plt_init(); + if (rc) { + plt_err("Failed to initialize platform model, rc=%d", rc); + return rc; + } + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + mz = rte_memzone_reserve_aligned(CNXK_REP_ESWITCH_DEV_MZ, sizeof(*eswitch_dev), + SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE); + if (mz == NULL) { + plt_err("Failed to reserve a memzone"); + goto fail; + } + + eswitch_dev = mz->addr; + eswitch_dev->pci_dev = pci_dev; + } + + /* Spinlock for synchronization between representors traffic and control + * messages + */ + rte_spinlock_init(&eswitch_dev->rep_lock); + + return rc; +fail: + return rc; +} + +static const struct rte_pci_id cnxk_eswitch_pci_map[] = { + {RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNXK_RVU_ESWITCH_PF)}, + { + .vendor_id = 0, + }, +}; + +static struct rte_pci_driver cnxk_eswitch_pci = { + .id_table = cnxk_eswitch_pci_map, + .drv_flags = + RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA | RTE_PCI_DRV_PROBE_AGAIN, + .probe = cnxk_eswitch_dev_probe, + .remove = cnxk_eswitch_dev_remove, +}; + +RTE_PMD_REGISTER_PCI(cnxk_eswitch, cnxk_eswitch_pci); +RTE_PMD_REGISTER_PCI_TABLE(cnxk_eswitch, cnxk_eswitch_pci_map); +RTE_PMD_REGISTER_KMOD_DEP(cnxk_eswitch, "vfio-pci"); diff --git a/drivers/net/cnxk/cnxk_eswitch.h b/drivers/net/cnxk/cnxk_eswitch.h new file mode 100644 index 0000000000..d1b4fa8761 --- /dev/null +++ b/drivers/net/cnxk/cnxk_eswitch.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2024 Marvell. + */ + +#ifndef __CNXK_ESWITCH_H__ +#define __CNXK_ESWITCH_H__ + +#include +#include + +#include + +#include "cn10k_tx.h" + +#define CNXK_ESWITCH_CTRL_MSG_SOCK_PATH "/tmp/cxk_rep_ctrl_msg_sock" +#define CNXK_REP_ESWITCH_DEV_MZ "cnxk_eswitch_dev" +#define CNXK_ESWITCH_VLAN_TPID 0x8100 +#define CNXK_ESWITCH_MAX_TXQ 256 +#define CNXK_ESWITCH_MAX_RXQ 256 +#define CNXK_ESWITCH_LBK_CHAN 63 +#define CNXK_ESWITCH_VFPF_SHIFT 8 + +#define CNXK_ESWITCH_QUEUE_STATE_RELEASED 0 +#define CNXK_ESWITCH_QUEUE_STATE_CONFIGURED 1 +#define CNXK_ESWITCH_QUEUE_STATE_STARTED 2 +#define CNXK_ESWITCH_QUEUE_STATE_STOPPED 3 + +struct cnxk_rep_info { + struct rte_eth_dev *rep_eth_dev; +}; + +struct cnxk_eswitch_txq { + struct roc_nix_sq sqs; + uint8_t state; +}; + +struct cnxk_eswitch_rxq { + struct roc_nix_rq rqs; + uint8_t state; +}; + +struct cnxk_eswitch_cxq { + struct roc_nix_cq cqs; + uint8_t state; +}; + +TAILQ_HEAD(eswitch_flow_list, roc_npc_flow); +struct cnxk_eswitch_dev { + /* Input parameters */ + struct plt_pci_device *pci_dev; + /* ROC NIX */ + struct roc_nix nix; + + /* ROC NPC */ + struct roc_npc npc; + + /* ROC NPA */ + struct rte_mempool *ctrl_chan_pool; + const struct plt_memzone *pktmem_mz; + uint64_t pkt_aura; + + /* Eswitch RQs, SQs and CQs */ + struct cnxk_eswitch_txq *txq; + struct cnxk_eswitch_rxq *rxq; + struct cnxk_eswitch_cxq *cxq; + + /* Configured queue count */ + uint16_t nb_rxq; + uint16_t nb_txq; + uint16_t rep_cnt; + uint8_t configured; + + /* Port representor fields */ + rte_spinlock_t rep_lock; + uint16_t switch_domain_id; + uint16_t eswitch_vdev; + struct cnxk_rep_info *rep_info; +}; + +static inline struct cnxk_eswitch_dev * +cnxk_eswitch_pmd_priv(void) +{ + const struct rte_memzone *mz; + + mz = rte_memzone_lookup(CNXK_REP_ESWITCH_DEV_MZ); + if (!mz) + return NULL; + + return mz->addr; +} + +int cnxk_eswitch_nix_rsrc_start(struct cnxk_eswitch_dev *eswitch_dev); +int cnxk_eswitch_txq_setup(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid, uint16_t nb_desc, + const struct rte_eth_txconf *tx_conf); +int cnxk_eswitch_txq_release(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid); +int cnxk_eswitch_rxq_setup(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid, uint16_t nb_desc, + const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp); +int cnxk_eswitch_rxq_release(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid); +int cnxk_eswitch_rxq_start(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid); +int cnxk_eswitch_rxq_stop(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid); +int cnxk_eswitch_txq_start(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid); +int cnxk_eswitch_txq_stop(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid); +#endif /* __CNXK_ESWITCH_H__ */ diff --git a/drivers/net/cnxk/meson.build b/drivers/net/cnxk/meson.build index e83f3c9050..012d098f80 100644 --- a/drivers/net/cnxk/meson.build +++ b/drivers/net/cnxk/meson.build @@ -28,6 +28,7 @@ sources = files( 'cnxk_ethdev_sec.c', 'cnxk_ethdev_telemetry.c', 'cnxk_ethdev_sec_telemetry.c', + 'cnxk_eswitch.c', 'cnxk_link.c', 'cnxk_lookup.c', 'cnxk_ptp.c', From patchwork Sun Mar 3 17:38:13 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Harman Kalra X-Patchwork-Id: 137817 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id AE25943C3E; Sun, 3 Mar 2024 18:39:05 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 593A443603; Sun, 3 Mar 2024 18:38:51 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id 5AF6643603 for ; Sun, 3 Mar 2024 18:38:49 +0100 (CET) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id 423GWjXG019709 for ; Sun, 3 Mar 2024 09:38:48 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h= from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-type; s=pfpt0220; bh=NiJDiXOcGiWMssIXP1hyX +tV6hiJGPvOeUnxxbI9dW4=; b=FLrI1AI+xW9tVfR1fJri6h7BwPZz3ZTxMgBRp qzvH2XSY0bXBNsNzFej0Jd1gZARSdnFCGpnCBmlL2ag8OxnPiNdwhKnh5idHLJ1y IOMnHabNMx1zmGd1tup3hlgmXDa7Gp/wCfIFkv9TWp7W/h3odDMb5SzsKs6DsGmz rC2Z+j9WkoHt3Eky3pdAX1qCKn49hkG/Xe0a1xvtlVY+UarjHmPpIV9aM+mB0Y3q eTHu2XpeARYtR3fKEZRwRrz/1seGiUHEOsOUqWHkRMiv4gz1wuTFgUY8PN5ovLi2 GvaTdk6K5xzYJm1Vnfy2feRYlz/ffnhMi2tIPue/yCDBOu3hQ== Received: from dc6wp-exch02.marvell.com ([4.21.29.225]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3wm2bptrsj-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT) for ; Sun, 03 Mar 2024 09:38:48 -0800 (PST) Received: from DC6WP-EXCH02.marvell.com (10.76.176.209) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1258.12; Sun, 3 Mar 2024 09:38:47 -0800 Received: from maili.marvell.com (10.69.176.80) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server id 15.2.1258.12 via Frontend Transport; Sun, 3 Mar 2024 09:38:47 -0800 Received: from localhost.localdomain (unknown [10.29.52.211]) by maili.marvell.com (Postfix) with ESMTP id B10CB3F718E; Sun, 3 Mar 2024 09:38:44 -0800 (PST) From: Harman Kalra To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao , Harman Kalra CC: Subject: [PATCH v6 03/23] net/cnxk: eswitch HW resource configuration Date: Sun, 3 Mar 2024 23:08:13 +0530 Message-ID: <20240303173833.100039-4-hkalra@marvell.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20240303173833.100039-1-hkalra@marvell.com> References: <20230811163419.165790-1-hkalra@marvell.com> <20240303173833.100039-1-hkalra@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: cyzk3H9vTbygw5Ba9G5b_ZtI2PxrDiVa X-Proofpoint-ORIG-GUID: cyzk3H9vTbygw5Ba9G5b_ZtI2PxrDiVa X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.1011,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2024-03-03_08,2024-03-01_03,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Configuring the hardware resources used by the eswitch device. Signed-off-by: Harman Kalra --- drivers/net/cnxk/cnxk_eswitch.c | 217 +++++++++++++++++++++++++++++++- 1 file changed, 216 insertions(+), 1 deletion(-) diff --git a/drivers/net/cnxk/cnxk_eswitch.c b/drivers/net/cnxk/cnxk_eswitch.c index 8f216d7c88..810e7c9c25 100644 --- a/drivers/net/cnxk/cnxk_eswitch.c +++ b/drivers/net/cnxk/cnxk_eswitch.c @@ -6,13 +6,53 @@ #define CNXK_NIX_DEF_SQ_COUNT 512 +static int +eswitch_hw_rsrc_cleanup(struct cnxk_eswitch_dev *eswitch_dev, struct rte_pci_device *pci_dev) +{ + struct roc_nix *nix; + int rc = 0; + + nix = &eswitch_dev->nix; + + roc_nix_unregister_queue_irqs(nix); + roc_nix_tm_fini(nix); + rc = roc_nix_lf_free(nix); + if (rc) { + plt_err("Failed to cleanup sq, rc %d", rc); + goto exit; + } + + /* Check if this device is hosting common resource */ + nix = roc_idev_npa_nix_get(); + if (!nix || nix->pci_dev != pci_dev) { + rc = 0; + goto exit; + } + + /* Try nix fini now */ + rc = roc_nix_dev_fini(nix); + if (rc == -EAGAIN) { + plt_info("Common resource in use by other devices %s", pci_dev->name); + goto exit; + } else if (rc) { + plt_err("Failed in nix dev fini, rc=%d", rc); + goto exit; + } + + rte_free(eswitch_dev->txq); + rte_free(eswitch_dev->rxq); + rte_free(eswitch_dev->cxq); + +exit: + return rc; +} + static int cnxk_eswitch_dev_remove(struct rte_pci_device *pci_dev) { struct cnxk_eswitch_dev *eswitch_dev; int rc = 0; - PLT_SET_USED(pci_dev); if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; @@ -22,6 +62,9 @@ cnxk_eswitch_dev_remove(struct rte_pci_device *pci_dev) goto exit; } + /* Cleanup HW resources */ + eswitch_hw_rsrc_cleanup(eswitch_dev, pci_dev); + rte_free(eswitch_dev); exit: return rc; @@ -318,6 +361,170 @@ cnxk_eswitch_txq_setup(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid, uint1 return rc; } +static int +nix_lf_setup(struct cnxk_eswitch_dev *eswitch_dev) +{ + uint16_t nb_rxq, nb_txq, nb_cq; + struct roc_nix_fc_cfg fc_cfg; + struct roc_nix *nix; + uint64_t rx_cfg; + void *qs; + int rc; + + /* Initialize base roc nix */ + nix = &eswitch_dev->nix; + nix->pci_dev = eswitch_dev->pci_dev; + nix->hw_vlan_ins = true; + nix->reta_sz = ROC_NIX_RSS_RETA_SZ_256; + rc = roc_nix_dev_init(nix); + if (rc) { + plt_err("Failed to init nix eswitch device, rc=%d(%s)", rc, roc_error_msg_get(rc)); + goto fail; + } + + /* Get the representors count */ + rc = roc_nix_max_rep_count(&eswitch_dev->nix); + if (rc) { + plt_err("Failed to get rep cnt, rc=%d(%s)", rc, roc_error_msg_get(rc)); + goto free_cqs; + } + + /* Allocating an NIX LF */ + nb_rxq = CNXK_ESWITCH_MAX_RXQ; + nb_txq = CNXK_ESWITCH_MAX_TXQ; + nb_cq = CNXK_ESWITCH_MAX_RXQ; + rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD; + rc = roc_nix_lf_alloc(nix, nb_rxq, nb_txq, rx_cfg); + if (rc) { + plt_err("lf alloc failed = %s(%d)", roc_error_msg_get(rc), rc); + goto dev_fini; + } + + if (nb_rxq) { + /* Allocate memory for eswitch rq's and cq's */ + qs = plt_zmalloc(sizeof(struct cnxk_eswitch_rxq) * nb_rxq, 0); + if (!qs) { + plt_err("Failed to alloc eswitch rxq"); + goto lf_free; + } + eswitch_dev->rxq = qs; + } + + if (nb_txq) { + /* Allocate memory for roc sq's */ + qs = plt_zmalloc(sizeof(struct cnxk_eswitch_txq) * nb_txq, 0); + if (!qs) { + plt_err("Failed to alloc eswitch txq"); + goto free_rqs; + } + eswitch_dev->txq = qs; + } + + if (nb_cq) { + qs = plt_zmalloc(sizeof(struct cnxk_eswitch_cxq) * nb_cq, 0); + if (!qs) { + plt_err("Failed to alloc eswitch cxq"); + goto free_sqs; + } + eswitch_dev->cxq = qs; + } + + eswitch_dev->nb_rxq = nb_rxq; + eswitch_dev->nb_txq = nb_txq; + + /* Re-enable NIX LF error interrupts */ + roc_nix_err_intr_ena_dis(nix, true); + roc_nix_ras_intr_ena_dis(nix, true); + + rc = roc_nix_lso_fmt_setup(nix); + if (rc) { + plt_err("lso setup failed = %s(%d)", roc_error_msg_get(rc), rc); + goto free_cqs; + } + + rc = roc_nix_switch_hdr_set(nix, 0, 0, 0, 0); + if (rc) { + plt_err("switch hdr set failed = %s(%d)", roc_error_msg_get(rc), rc); + goto free_cqs; + } + + rc = roc_nix_tm_init(nix); + if (rc) { + plt_err("tm failed = %s(%d)", roc_error_msg_get(rc), rc); + goto free_cqs; + } + + /* Register queue IRQs */ + rc = roc_nix_register_queue_irqs(nix); + if (rc) { + plt_err("Failed to register queue interrupts rc=%d", rc); + goto tm_fini; + } + + /* Enable default tree */ + rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_DEFAULT, false); + if (rc) { + plt_err("tm default hierarchy enable failed = %s(%d)", roc_error_msg_get(rc), rc); + goto q_irq_fini; + } + + memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg)); + fc_cfg.rxchan_cfg.enable = false; + rc = roc_nix_fc_config_set(nix, &fc_cfg); + if (rc) { + plt_err("Failed to setup flow control, rc=%d(%s)", rc, roc_error_msg_get(rc)); + goto q_irq_fini; + } + + roc_nix_fc_mode_get(nix); + + return rc; +q_irq_fini: + roc_nix_unregister_queue_irqs(nix); +tm_fini: + roc_nix_tm_fini(nix); +free_cqs: + rte_free(eswitch_dev->cxq); +free_sqs: + rte_free(eswitch_dev->txq); +free_rqs: + rte_free(eswitch_dev->rxq); +lf_free: + roc_nix_lf_free(nix); +dev_fini: + roc_nix_dev_fini(nix); +fail: + return rc; +} + +static int +eswitch_hw_rsrc_setup(struct cnxk_eswitch_dev *eswitch_dev, struct rte_pci_device *pci_dev) +{ + struct roc_nix *nix; + int rc; + + nix = &eswitch_dev->nix; + rc = nix_lf_setup(eswitch_dev); + if (rc) { + plt_err("Failed to setup hw rsrc, rc=%d(%s)", rc, roc_error_msg_get(rc)); + goto fail; + } + + /* Initialize roc npc */ + eswitch_dev->npc.roc_nix = nix; + eswitch_dev->npc.flow_max_priority = 3; + eswitch_dev->npc.flow_prealloc_size = 1; + rc = roc_npc_init(&eswitch_dev->npc); + if (rc) + goto rsrc_cleanup; + + return rc; +rsrc_cleanup: + eswitch_hw_rsrc_cleanup(eswitch_dev, pci_dev); +fail: + return rc; +} + static int cnxk_eswitch_dev_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) { @@ -347,6 +554,12 @@ cnxk_eswitch_dev_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pc eswitch_dev = mz->addr; eswitch_dev->pci_dev = pci_dev; + + rc = eswitch_hw_rsrc_setup(eswitch_dev, pci_dev); + if (rc) { + plt_err("Failed to setup hw rsrc, rc=%d(%s)", rc, roc_error_msg_get(rc)); + goto free_mem; + } } /* Spinlock for synchronization between representors traffic and control @@ -355,6 +568,8 @@ cnxk_eswitch_dev_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pc rte_spinlock_init(&eswitch_dev->rep_lock); return rc; +free_mem: + rte_memzone_free(mz); fail: return rc; } From patchwork Sun Mar 3 17:38:14 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Harman Kalra X-Patchwork-Id: 137818 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id B242643C3E; Sun, 3 Mar 2024 18:39:12 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 9F27D43648; Sun, 3 Mar 2024 18:38:56 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id A9D2D43648 for ; Sun, 3 Mar 2024 18:38:54 +0100 (CET) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id 423EvbQq031413 for ; Sun, 3 Mar 2024 09:38:54 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h= from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-type; s=pfpt0220; bh=mjo/CH41g3xjrevHVO85T CO8YCf+5c6QU7Bahr9HAmA=; b=NvjTIxrvQBYRHFMkJQLZI7yQLPvwapclCTiuR sAuf2cuRPm0itKl72jUGvspb7mmy3qSsnr6BaLo2dTAHmacLNvDtQZX+L/d2Jf2S 97FRnHz2M2EUABmebhgtLpmbqqBOxQ8J/EmQtJVA9tXrob/3qF4QlMoJO2iltfhU ShXKgI51YB/IZwYoC3BYr4V+E0LaFbVIEXY4suuXU32ydgHFi2v+kw4pzdiHq7sX SOFMrFpaiPV/uwzu1g4e/uOZduMX8UeTs9255WSEUXI/bS6f1Hf2Xn/KJ4FEH+Tx ZExzZsEvBB1m7kc1iqxobZkm812xPzf1R94Tbzo1JPgwxaHfg== Received: from dc5-exch05.marvell.com ([199.233.59.128]) by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3wm4gmjjkb-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT) for ; Sun, 03 Mar 2024 09:38:53 -0800 (PST) Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH05.marvell.com (10.69.176.209) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id 15.2.1258.12; Sun, 3 Mar 2024 09:38:52 -0800 Received: from DC5-EXCH05.marvell.com (10.69.176.209) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.48; Sun, 3 Mar 2024 09:38:49 -0800 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH05.marvell.com (10.69.176.209) with Microsoft SMTP Server id 15.2.1258.12 via Frontend Transport; Sun, 3 Mar 2024 09:38:49 -0800 Received: from localhost.localdomain (unknown [10.29.52.211]) by maili.marvell.com (Postfix) with ESMTP id 9CDAE3F718E; Sun, 3 Mar 2024 09:38:47 -0800 (PST) From: Harman Kalra To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao , Harman Kalra CC: Subject: [PATCH v6 04/23] net/cnxk: eswitch devargs parsing Date: Sun, 3 Mar 2024 23:08:14 +0530 Message-ID: <20240303173833.100039-5-hkalra@marvell.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20240303173833.100039-1-hkalra@marvell.com> References: <20230811163419.165790-1-hkalra@marvell.com> <20240303173833.100039-1-hkalra@marvell.com> MIME-Version: 1.0 X-Proofpoint-ORIG-GUID: 8jqMANhPrYHw4QGjiz3pNhbbsE8a3zHK X-Proofpoint-GUID: 8jqMANhPrYHw4QGjiz3pNhbbsE8a3zHK X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.1011,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2024-03-03_08,2024-03-01_03,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Implementing the devargs parsing logic via which the representors pattern is provided. These patterns define for which representies representors shall be created. Signed-off-by: Harman Kalra --- drivers/net/cnxk/cnxk_eswitch.c | 88 +++++++++++++++++ drivers/net/cnxk/cnxk_eswitch.h | 52 ++++++++++ drivers/net/cnxk/cnxk_eswitch_devargs.c | 124 ++++++++++++++++++++++++ drivers/net/cnxk/meson.build | 1 + 4 files changed, 265 insertions(+) create mode 100644 drivers/net/cnxk/cnxk_eswitch_devargs.c diff --git a/drivers/net/cnxk/cnxk_eswitch.c b/drivers/net/cnxk/cnxk_eswitch.c index 810e7c9c25..687bb7d146 100644 --- a/drivers/net/cnxk/cnxk_eswitch.c +++ b/drivers/net/cnxk/cnxk_eswitch.c @@ -388,6 +388,7 @@ nix_lf_setup(struct cnxk_eswitch_dev *eswitch_dev) plt_err("Failed to get rep cnt, rc=%d(%s)", rc, roc_error_msg_get(rc)); goto free_cqs; } + eswitch_dev->repr_cnt.max_repr = eswitch_dev->nix.rep_cnt; /* Allocating an NIX LF */ nb_rxq = CNXK_ESWITCH_MAX_RXQ; @@ -525,11 +526,73 @@ eswitch_hw_rsrc_setup(struct cnxk_eswitch_dev *eswitch_dev, struct rte_pci_devic return rc; } +int +cnxk_eswitch_representor_info_get(struct cnxk_eswitch_dev *eswitch_dev, + struct rte_eth_representor_info *info) +{ + struct cnxk_eswitch_devargs *esw_da; + int rc = 0, n_entries, i, j = 0, k = 0; + + for (i = 0; i < eswitch_dev->nb_esw_da; i++) { + for (j = 0; j < eswitch_dev->esw_da[i].nb_repr_ports; j++) + k++; + } + n_entries = k; + + if (info == NULL) + goto out; + + if ((uint32_t)n_entries > info->nb_ranges_alloc) + n_entries = info->nb_ranges_alloc; + + k = 0; + info->controller = 0; + info->pf = 0; + for (i = 0; i < eswitch_dev->nb_esw_da; i++) { + esw_da = &eswitch_dev->esw_da[i]; + info->ranges[k].type = esw_da->da.type; + switch (esw_da->da.type) { + case RTE_ETH_REPRESENTOR_PF: + info->ranges[k].controller = 0; + info->ranges[k].pf = esw_da->repr_hw_info[0].pfvf; + info->ranges[k].vf = 0; + info->ranges[k].id_base = info->ranges[i].pf; + info->ranges[k].id_end = info->ranges[i].pf; + snprintf(info->ranges[k].name, sizeof(info->ranges[k].name), "pf%d", + info->ranges[k].pf); + k++; + break; + case RTE_ETH_REPRESENTOR_VF: + for (j = 0; j < esw_da->nb_repr_ports; j++) { + info->ranges[k].controller = 0; + info->ranges[k].pf = esw_da->da.ports[0]; + info->ranges[k].vf = esw_da->repr_hw_info[j].pfvf; + info->ranges[k].id_base = esw_da->repr_hw_info[j].port_id; + info->ranges[k].id_end = esw_da->repr_hw_info[j].port_id; + snprintf(info->ranges[k].name, sizeof(info->ranges[k].name), + "pf%dvf%d", info->ranges[k].pf, info->ranges[k].vf); + k++; + } + break; + default: + plt_err("Invalid type %d", esw_da->da.type); + rc = 0; + goto fail; + }; + } + info->nb_ranges = k; +fail: + return rc; +out: + return n_entries; +} + static int cnxk_eswitch_dev_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) { struct cnxk_eswitch_dev *eswitch_dev; const struct rte_memzone *mz = NULL; + uint16_t num_reps; int rc = -ENOMEM; RTE_SET_USED(pci_drv); @@ -562,12 +625,37 @@ cnxk_eswitch_dev_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pc } } + if (pci_dev->device.devargs) { + rc = cnxk_eswitch_repr_devargs(pci_dev, eswitch_dev); + if (rc) + goto rsrc_cleanup; + } + + if (eswitch_dev->repr_cnt.nb_repr_created > eswitch_dev->repr_cnt.max_repr) { + plt_err("Representors to be created %d can be greater than max allowed %d", + eswitch_dev->repr_cnt.nb_repr_created, eswitch_dev->repr_cnt.max_repr); + rc = -EINVAL; + goto rsrc_cleanup; + } + + num_reps = eswitch_dev->repr_cnt.nb_repr_created; + if (!num_reps) { + plt_err("No representors enabled"); + goto fail; + } + + plt_esw_dbg("Max no of reps %d reps to be created %d Eswtch pfunc %x", + eswitch_dev->repr_cnt.max_repr, eswitch_dev->repr_cnt.nb_repr_created, + roc_nix_get_pf_func(&eswitch_dev->nix)); + /* Spinlock for synchronization between representors traffic and control * messages */ rte_spinlock_init(&eswitch_dev->rep_lock); return rc; +rsrc_cleanup: + eswitch_hw_rsrc_cleanup(eswitch_dev, pci_dev); free_mem: rte_memzone_free(mz); fail: diff --git a/drivers/net/cnxk/cnxk_eswitch.h b/drivers/net/cnxk/cnxk_eswitch.h index d1b4fa8761..6ff296399e 100644 --- a/drivers/net/cnxk/cnxk_eswitch.h +++ b/drivers/net/cnxk/cnxk_eswitch.h @@ -25,6 +25,47 @@ #define CNXK_ESWITCH_QUEUE_STATE_STARTED 2 #define CNXK_ESWITCH_QUEUE_STATE_STOPPED 3 +enum cnxk_esw_da_pattern_type { + CNXK_ESW_DA_TYPE_LIST = 0, + CNXK_ESW_DA_TYPE_PFVF, +}; + +struct cnxk_esw_repr_hw_info { + /* Representee pcifunc value */ + uint16_t hw_func; + /* rep id in sync with kernel */ + uint16_t rep_id; + /* pf or vf id */ + uint16_t pfvf; + /* representor port id assigned to representee */ + uint16_t port_id; +}; + +/* Structure representing per devarg information - this can be per representee + * or range of representee + */ +struct cnxk_eswitch_devargs { + /* Devargs populated */ + struct rte_eth_devargs da; + /* HW info of representee */ + struct cnxk_esw_repr_hw_info *repr_hw_info; + /* No of representor ports */ + uint16_t nb_repr_ports; + /* Devargs pattern type */ + enum cnxk_esw_da_pattern_type type; +}; + +struct cnxk_eswitch_repr_cnt { + /* Max possible representors */ + uint16_t max_repr; + /* Representors to be created as per devargs passed */ + uint16_t nb_repr_created; + /* Representors probed successfully */ + uint16_t nb_repr_probed; + /* Representors started representing a representee */ + uint16_t nb_repr_started; +}; + struct cnxk_rep_info { struct rte_eth_dev *rep_eth_dev; }; @@ -70,6 +111,14 @@ struct cnxk_eswitch_dev { uint16_t rep_cnt; uint8_t configured; + /* Eswitch Representors Devargs */ + uint16_t nb_esw_da; + uint16_t last_probed; + struct cnxk_eswitch_devargs esw_da[RTE_MAX_ETHPORTS]; + + /* No of representors */ + struct cnxk_eswitch_repr_cnt repr_cnt; + /* Port representor fields */ rte_spinlock_t rep_lock; uint16_t switch_domain_id; @@ -90,6 +139,9 @@ cnxk_eswitch_pmd_priv(void) } int cnxk_eswitch_nix_rsrc_start(struct cnxk_eswitch_dev *eswitch_dev); +int cnxk_eswitch_repr_devargs(struct rte_pci_device *pci_dev, struct cnxk_eswitch_dev *eswitch_dev); +int cnxk_eswitch_representor_info_get(struct cnxk_eswitch_dev *eswitch_dev, + struct rte_eth_representor_info *info); int cnxk_eswitch_txq_setup(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid, uint16_t nb_desc, const struct rte_eth_txconf *tx_conf); int cnxk_eswitch_txq_release(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid); diff --git a/drivers/net/cnxk/cnxk_eswitch_devargs.c b/drivers/net/cnxk/cnxk_eswitch_devargs.c new file mode 100644 index 0000000000..58383fb835 --- /dev/null +++ b/drivers/net/cnxk/cnxk_eswitch_devargs.c @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2024 Marvell. + */ + +#include + +#define PF_SHIFT 10 +static inline int +get_hw_func(uint16_t pf, uint16_t vf) +{ + return (pf << PF_SHIFT) | vf; +} + +static int +populate_repr_hw_info(struct cnxk_eswitch_dev *eswitch_dev, struct rte_eth_devargs *eth_da, + uint16_t idx) +{ + struct cnxk_eswitch_devargs *esw_da = &eswitch_dev->esw_da[idx]; + uint16_t nb_repr_ports, hw_func; + int rc, i, j; + + if (eth_da->type == RTE_ETH_REPRESENTOR_NONE) { + plt_err("No representor type found"); + return -EINVAL; + } + + if (eth_da->type != RTE_ETH_REPRESENTOR_VF && eth_da->type != RTE_ETH_REPRESENTOR_PF && + eth_da->type != RTE_ETH_REPRESENTOR_SF) { + plt_err("unsupported representor type %d\n", eth_da->type); + return -ENOTSUP; + } + + nb_repr_ports = (eth_da->type == RTE_ETH_REPRESENTOR_PF) ? eth_da->nb_ports : + eth_da->nb_representor_ports; + esw_da->nb_repr_ports = nb_repr_ports; + /* If plain list is provided as representor pattern */ + if (eth_da->nb_ports == 0) + return 0; + + esw_da->repr_hw_info = plt_zmalloc(nb_repr_ports * sizeof(struct cnxk_esw_repr_hw_info), 0); + if (!esw_da->repr_hw_info) { + plt_err("Failed to allocate memory"); + rc = -ENOMEM; + goto fail; + } + + plt_esw_dbg("Representor param %d has %d pfvf", idx, nb_repr_ports); + /* Check if representor can be created for PFVF and populating HW func list */ + for (i = 0; i < nb_repr_ports; i++) { + if (eth_da->type == RTE_ETH_REPRESENTOR_PF) + hw_func = get_hw_func(eth_da->ports[i], 0); + else + hw_func = get_hw_func(eth_da->ports[0], eth_da->representor_ports[i] + 1); + + for (j = 0; j < eswitch_dev->repr_cnt.max_repr; j++) { + if (eswitch_dev->nix.rep_pfvf_map[j] == hw_func) + break; + } + + /* HW func which doesn not match the map table received from AF, no + * representor port is assigned. + */ + if (j == eswitch_dev->repr_cnt.max_repr) { + plt_err("Representor port can't be created for PF%dVF%d", eth_da->ports[0], + eth_da->representor_ports[i]); + rc = -EINVAL; + goto fail; + } + + esw_da->repr_hw_info[i].hw_func = hw_func; + esw_da->repr_hw_info[i].rep_id = j; + esw_da->repr_hw_info[i].pfvf = (eth_da->type == RTE_ETH_REPRESENTOR_PF) ? + eth_da->ports[0] : + eth_da->representor_ports[i]; + plt_esw_dbg(" HW func %x index %d type %d", hw_func, j, eth_da->type); + } + + esw_da->type = CNXK_ESW_DA_TYPE_PFVF; + + return 0; +fail: + return rc; +} + +int +cnxk_eswitch_repr_devargs(struct rte_pci_device *pci_dev, struct cnxk_eswitch_dev *eswitch_dev) +{ + struct rte_devargs *devargs = pci_dev->device.devargs; + struct rte_eth_devargs eth_da[RTE_MAX_ETHPORTS]; + int rc, i, j, count; + + if (devargs == NULL) { + plt_err("No devargs passed"); + rc = -EINVAL; + goto fail; + } + + /* Parse devargs passed to ESW device */ + rc = rte_eth_devargs_parse(devargs->args, eth_da, RTE_MAX_ETHPORTS); + if (rc < 0) { + plt_err("Failed to parse devargs, err %d", rc); + goto fail; + } + + count = rc; + j = eswitch_dev->nb_esw_da; + for (i = 0; i < count; i++) { + rc = populate_repr_hw_info(eswitch_dev, ð_da[i], j); + if (rc) { + plt_err("Failed to populate representer hw funcs, err %d", rc); + goto fail; + } + + rte_memcpy(&eswitch_dev->esw_da[j].da, ð_da[i], sizeof(struct rte_eth_devargs)); + /* No of representor ports to be created */ + eswitch_dev->repr_cnt.nb_repr_created += eswitch_dev->esw_da[j].nb_repr_ports; + j++; + } + eswitch_dev->nb_esw_da += count; + + return 0; +fail: + return rc; +} diff --git a/drivers/net/cnxk/meson.build b/drivers/net/cnxk/meson.build index 012d098f80..ea7e363e89 100644 --- a/drivers/net/cnxk/meson.build +++ b/drivers/net/cnxk/meson.build @@ -29,6 +29,7 @@ sources = files( 'cnxk_ethdev_telemetry.c', 'cnxk_ethdev_sec_telemetry.c', 'cnxk_eswitch.c', + 'cnxk_eswitch_devargs.c', 'cnxk_link.c', 'cnxk_lookup.c', 'cnxk_ptp.c', From patchwork Sun Mar 3 17:38:15 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Harman Kalra X-Patchwork-Id: 137819 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3573743C3E; Sun, 3 Mar 2024 18:39:22 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 5C22943653; Sun, 3 Mar 2024 18:38:58 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id A00364364A for ; Sun, 3 Mar 2024 18:38:56 +0100 (CET) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id 423Dsqmm027903; Sun, 3 Mar 2024 09:38:55 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h= from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-type; s=pfpt0220; bh=IrWX6rFnhK8+rieoumGBj RJ4U0+T4A4SczmsfU/qveM=; b=H6jXsMBDHtvK6BKnAOuisjG3ixcoA98QeAO+9 hIDMr8C96XQ8SQTKgv2ZmTk4VcJ/VOUC51ojx9mL40NWZtm7cCNxBwy82fCj/6EU w+iN67tI3sM7oYlhgmSSJ9eQnMtGHA6cHE33bTroJO0xMSDQ2eQqAbVsFRsInHgc cYdl522dnPgE2x8AG/UiWR9l79K2SEfnCWigQOotycfVP76aC/webSt6sJWehU7i BHoEv+ImYVPd/kKrFyGvNdiW0YE+axtkw0fWXZmsy6i11VXUqvnZIcnvGUfcljbq E14SvXFfILpsl9A54Ih/Gsp9ZwcAmFPgOcsrhusrNC+W2Sc2g== Received: from dc5-exch05.marvell.com ([199.233.59.128]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3wm2bptrst-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT); Sun, 03 Mar 2024 09:38:55 -0800 (PST) Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH05.marvell.com (10.69.176.209) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id 15.2.1258.12; Sun, 3 Mar 2024 09:38:54 -0800 Received: from DC5-EXCH05.marvell.com (10.69.176.209) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.48; Sun, 3 Mar 2024 09:38:53 -0800 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH05.marvell.com (10.69.176.209) with Microsoft SMTP Server id 15.2.1258.12 via Frontend Transport; Sun, 3 Mar 2024 09:38:53 -0800 Received: from localhost.localdomain (unknown [10.29.52.211]) by maili.marvell.com (Postfix) with ESMTP id 8768A3F7193; Sun, 3 Mar 2024 09:38:50 -0800 (PST) From: Harman Kalra To: Thomas Monjalon , Nithin Dabilpuram , Kiran Kumar K , "Sunil Kumar Kori" , Satha Rao , "Harman Kalra" , Anatoly Burakov CC: Subject: [PATCH v6 05/23] net/cnxk: probing representor ports Date: Sun, 3 Mar 2024 23:08:15 +0530 Message-ID: <20240303173833.100039-6-hkalra@marvell.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20240303173833.100039-1-hkalra@marvell.com> References: <20230811163419.165790-1-hkalra@marvell.com> <20240303173833.100039-1-hkalra@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: TG5sZQIzkkcTJuM13PmSREkQImiBfpgl X-Proofpoint-ORIG-GUID: TG5sZQIzkkcTJuM13PmSREkQImiBfpgl X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.1011,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2024-03-03_08,2024-03-01_03,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Basic skeleton for probing representor devices. If PF device is passed with "representor" devargs, representor ports gets probed as a separate ethdev device. Signed-off-by: Harman Kalra --- MAINTAINERS | 1 + doc/guides/nics/cnxk.rst | 35 +++++ drivers/net/cnxk/cnxk_eswitch.c | 12 ++ drivers/net/cnxk/cnxk_eswitch.h | 8 +- drivers/net/cnxk/cnxk_rep.c | 256 ++++++++++++++++++++++++++++++++ drivers/net/cnxk/cnxk_rep.h | 50 +++++++ drivers/net/cnxk/cnxk_rep_ops.c | 129 ++++++++++++++++ drivers/net/cnxk/meson.build | 2 + 8 files changed, 492 insertions(+), 1 deletion(-) create mode 100644 drivers/net/cnxk/cnxk_rep.c create mode 100644 drivers/net/cnxk/cnxk_rep.h create mode 100644 drivers/net/cnxk/cnxk_rep_ops.c diff --git a/MAINTAINERS b/MAINTAINERS index 962c359cdd..062812e7c3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -818,6 +818,7 @@ M: Nithin Dabilpuram M: Kiran Kumar K M: Sunil Kumar Kori M: Satha Rao +M: Harman Kalra T: git://dpdk.org/next/dpdk-next-net-mrvl F: drivers/common/cnxk/ F: drivers/net/cnxk/ diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst index 1ab8a0ca74..93d6db5cb0 100644 --- a/doc/guides/nics/cnxk.rst +++ b/doc/guides/nics/cnxk.rst @@ -37,6 +37,7 @@ Features of the CNXK Ethdev PMD are: - Inline IPsec processing support - Ingress meter support - Queue based priority flow control support +- Port representors Prerequisites ------------- @@ -640,6 +641,40 @@ Runtime Config Options for inline device With the above configuration, driver would enable packet inject from ARM cores to crypto to process and send back in Rx path. +Port Representors +----------------- + +The CNXK driver supports port representor model by adding virtual ethernet +ports providing a logical representation in DPDK for physical function(PF) or +SR-IOV virtual function (VF) devices for control and monitoring. + +Base device or parent device underneath the representor ports is a eswitch +device which is not a cnxk ethernet device but has NIC RX and TX capabilities. +Each representor port is represented by a RQ and SQ pair of this eswitch +device. + +Implementation supports representors for both physical function and virtual +function. + +Port representor ethdev instances can be spawned on an as needed basis +through configuration parameters passed to the driver of the underlying +base device using devargs ``-a ,representor=pf*vf*`` + +.. note:: + + Representor ports to be created for respective representees should be + defined via standard representor devargs patterns + Eg. To create a representor for representee PF1VF0, devargs to be passed + is ``-a ,representor=pf01vf0`` + + Implementation supports creation of multiple port representors with pattern: + ``-a ,representor=[pf0vf[1,2],pf1vf[2-5]]`` + +Port representor PMD supports following operations: + +- Get PF/VF statistics +- Flow operations - create, validate, destroy, query, flush, dump + Debugging Options ----------------- diff --git a/drivers/net/cnxk/cnxk_eswitch.c b/drivers/net/cnxk/cnxk_eswitch.c index 687bb7d146..599ed149ae 100644 --- a/drivers/net/cnxk/cnxk_eswitch.c +++ b/drivers/net/cnxk/cnxk_eswitch.c @@ -3,6 +3,7 @@ */ #include +#include #define CNXK_NIX_DEF_SQ_COUNT 512 @@ -62,6 +63,10 @@ cnxk_eswitch_dev_remove(struct rte_pci_device *pci_dev) goto exit; } + /* Remove representor devices associated with PF */ + if (eswitch_dev->repr_cnt.nb_repr_created) + cnxk_rep_dev_remove(eswitch_dev); + /* Cleanup HW resources */ eswitch_hw_rsrc_cleanup(eswitch_dev, pci_dev); @@ -648,6 +653,13 @@ cnxk_eswitch_dev_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pc eswitch_dev->repr_cnt.max_repr, eswitch_dev->repr_cnt.nb_repr_created, roc_nix_get_pf_func(&eswitch_dev->nix)); + /* Probe representor ports */ + rc = cnxk_rep_dev_probe(pci_dev, eswitch_dev); + if (rc) { + plt_err("Failed to probe representor ports"); + goto rsrc_cleanup; + } + /* Spinlock for synchronization between representors traffic and control * messages */ diff --git a/drivers/net/cnxk/cnxk_eswitch.h b/drivers/net/cnxk/cnxk_eswitch.h index 6ff296399e..dcd5add6d0 100644 --- a/drivers/net/cnxk/cnxk_eswitch.h +++ b/drivers/net/cnxk/cnxk_eswitch.h @@ -66,6 +66,11 @@ struct cnxk_eswitch_repr_cnt { uint16_t nb_repr_started; }; +struct cnxk_eswitch_switch_domain { + uint16_t switch_domain_id; + uint16_t pf; +}; + struct cnxk_rep_info { struct rte_eth_dev *rep_eth_dev; }; @@ -121,7 +126,8 @@ struct cnxk_eswitch_dev { /* Port representor fields */ rte_spinlock_t rep_lock; - uint16_t switch_domain_id; + uint16_t nb_switch_domain; + struct cnxk_eswitch_switch_domain sw_dom[RTE_MAX_ETHPORTS]; uint16_t eswitch_vdev; struct cnxk_rep_info *rep_info; }; diff --git a/drivers/net/cnxk/cnxk_rep.c b/drivers/net/cnxk/cnxk_rep.c new file mode 100644 index 0000000000..55156f5b56 --- /dev/null +++ b/drivers/net/cnxk/cnxk_rep.c @@ -0,0 +1,256 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2024 Marvell. + */ +#include + +#define PF_SHIFT 10 +#define PF_MASK 0x3F + +static uint16_t +get_pf(uint16_t hw_func) +{ + return (hw_func >> PF_SHIFT) & PF_MASK; +} + +static uint16_t +switch_domain_id_allocate(struct cnxk_eswitch_dev *eswitch_dev, uint16_t pf) +{ + int i = 0; + + for (i = 0; i < eswitch_dev->nb_switch_domain; i++) { + if (eswitch_dev->sw_dom[i].pf == pf) + return eswitch_dev->sw_dom[i].switch_domain_id; + } + + return RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; +} + +int +cnxk_rep_dev_uninit(struct rte_eth_dev *ethdev) +{ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + plt_rep_dbg("Representor port:%d uninit", ethdev->data->port_id); + rte_free(ethdev->data->mac_addrs); + ethdev->data->mac_addrs = NULL; + + return 0; +} + +int +cnxk_rep_dev_remove(struct cnxk_eswitch_dev *eswitch_dev) +{ + int i, rc = 0; + + for (i = 0; i < eswitch_dev->nb_switch_domain; i++) { + rc = rte_eth_switch_domain_free(eswitch_dev->sw_dom[i].switch_domain_id); + if (rc) + plt_err("Failed to alloc switch domain: %d", rc); + } + + return rc; +} + +static int +cnxk_rep_parent_setup(struct cnxk_eswitch_dev *eswitch_dev) +{ + uint16_t pf, prev_pf = 0, switch_domain_id; + int rc, i, j = 0; + + if (eswitch_dev->rep_info) + return 0; + + eswitch_dev->rep_info = + plt_zmalloc(sizeof(eswitch_dev->rep_info[0]) * eswitch_dev->repr_cnt.max_repr, 0); + if (!eswitch_dev->rep_info) { + plt_err("Failed to alloc memory for rep info"); + rc = -ENOMEM; + goto fail; + } + + /* Allocate switch domain for all PFs (VFs will be under same domain as PF) */ + for (i = 0; i < eswitch_dev->repr_cnt.max_repr; i++) { + pf = get_pf(eswitch_dev->nix.rep_pfvf_map[i]); + if (pf == prev_pf) + continue; + + rc = rte_eth_switch_domain_alloc(&switch_domain_id); + if (rc) { + plt_err("Failed to alloc switch domain: %d", rc); + goto fail; + } + plt_rep_dbg("Allocated switch domain id %d for pf %d\n", switch_domain_id, pf); + eswitch_dev->sw_dom[j].switch_domain_id = switch_domain_id; + eswitch_dev->sw_dom[j].pf = pf; + prev_pf = pf; + j++; + } + eswitch_dev->nb_switch_domain = j; + + return 0; +fail: + return rc; +} + +static uint16_t +cnxk_rep_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + PLT_SET_USED(tx_queue); + PLT_SET_USED(tx_pkts); + PLT_SET_USED(nb_pkts); + + return 0; +} + +static uint16_t +cnxk_rep_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + PLT_SET_USED(rx_queue); + PLT_SET_USED(rx_pkts); + PLT_SET_USED(nb_pkts); + + return 0; +} + +static int +cnxk_rep_dev_init(struct rte_eth_dev *eth_dev, void *params) +{ + struct cnxk_rep_dev *rep_params = (struct cnxk_rep_dev *)params; + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev); + + rep_dev->port_id = rep_params->port_id; + rep_dev->switch_domain_id = rep_params->switch_domain_id; + rep_dev->parent_dev = rep_params->parent_dev; + rep_dev->hw_func = rep_params->hw_func; + rep_dev->rep_id = rep_params->rep_id; + + eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; + eth_dev->data->representor_id = rep_params->port_id; + eth_dev->data->backer_port_id = eth_dev->data->port_id; + + eth_dev->data->mac_addrs = plt_zmalloc(RTE_ETHER_ADDR_LEN, 0); + if (!eth_dev->data->mac_addrs) { + plt_err("Failed to allocate memory for mac addr"); + return -ENOMEM; + } + + rte_eth_random_addr(rep_dev->mac_addr); + memcpy(eth_dev->data->mac_addrs, rep_dev->mac_addr, RTE_ETHER_ADDR_LEN); + + /* Set the device operations */ + eth_dev->dev_ops = &cnxk_rep_dev_ops; + + /* Rx/Tx functions stubs to avoid crashing */ + eth_dev->rx_pkt_burst = cnxk_rep_rx_burst; + eth_dev->tx_pkt_burst = cnxk_rep_tx_burst; + + /* Only single queues for representor devices */ + eth_dev->data->nb_rx_queues = 1; + eth_dev->data->nb_tx_queues = 1; + + eth_dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE; + eth_dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; + eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP; + eth_dev->data->dev_link.link_autoneg = RTE_ETH_LINK_FIXED; + + return 0; +} + +static int +create_representor_ethdev(struct rte_pci_device *pci_dev, struct cnxk_eswitch_dev *eswitch_dev, + struct cnxk_eswitch_devargs *esw_da, int idx) +{ + char name[RTE_ETH_NAME_MAX_LEN]; + struct rte_eth_dev *rep_eth_dev; + uint16_t hw_func; + int rc = 0; + + struct cnxk_rep_dev rep = {.port_id = eswitch_dev->repr_cnt.nb_repr_probed, + .parent_dev = eswitch_dev}; + + if (esw_da->type == CNXK_ESW_DA_TYPE_PFVF) { + hw_func = esw_da->repr_hw_info[idx].hw_func; + rep.switch_domain_id = switch_domain_id_allocate(eswitch_dev, get_pf(hw_func)); + if (rep.switch_domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { + plt_err("Failed to get a valid switch domain id"); + rc = -EINVAL; + goto fail; + } + + esw_da->repr_hw_info[idx].port_id = rep.port_id; + /* Representor port net_bdf_port */ + snprintf(name, sizeof(name), "net_%s_hw_%x_representor_%d", pci_dev->device.name, + hw_func, rep.port_id); + + rep.hw_func = hw_func; + rep.rep_id = esw_da->repr_hw_info[idx].rep_id; + + } else { + snprintf(name, sizeof(name), "net_%s_representor_%d", pci_dev->device.name, + rep.port_id); + rep.switch_domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; + } + + rc = rte_eth_dev_create(&pci_dev->device, name, sizeof(struct cnxk_rep_dev), NULL, NULL, + cnxk_rep_dev_init, &rep); + if (rc) { + plt_err("Failed to create cnxk vf representor %s", name); + rc = -EINVAL; + goto fail; + } + + rep_eth_dev = rte_eth_dev_allocated(name); + if (!rep_eth_dev) { + plt_err("Failed to find the eth_dev for VF-Rep: %s.", name); + rc = -ENODEV; + goto fail; + } + + plt_rep_dbg("Representor portid %d (%s) type %d probe done", rep_eth_dev->data->port_id, + name, esw_da->da.type); + eswitch_dev->rep_info[rep.port_id].rep_eth_dev = rep_eth_dev; + eswitch_dev->repr_cnt.nb_repr_probed++; + + return 0; +fail: + return rc; +} + +int +cnxk_rep_dev_probe(struct rte_pci_device *pci_dev, struct cnxk_eswitch_dev *eswitch_dev) +{ + struct cnxk_eswitch_devargs *esw_da; + uint16_t num_rep; + int i, j, rc; + + if (eswitch_dev->repr_cnt.nb_repr_created > RTE_MAX_ETHPORTS) { + plt_err("nb_representor_ports %d > %d MAX ETHPORTS\n", + eswitch_dev->repr_cnt.nb_repr_created, RTE_MAX_ETHPORTS); + rc = -EINVAL; + goto fail; + } + + /* Initialize the internals of representor ports */ + rc = cnxk_rep_parent_setup(eswitch_dev); + if (rc) { + plt_err("Failed to setup the parent device, err %d", rc); + goto fail; + } + + for (i = eswitch_dev->last_probed; i < eswitch_dev->nb_esw_da; i++) { + esw_da = &eswitch_dev->esw_da[i]; + /* Check the representor devargs */ + num_rep = esw_da->nb_repr_ports; + for (j = 0; j < num_rep; j++) { + rc = create_representor_ethdev(pci_dev, eswitch_dev, esw_da, j); + if (rc) + goto fail; + } + } + eswitch_dev->last_probed = i; + + return 0; +fail: + return rc; +} diff --git a/drivers/net/cnxk/cnxk_rep.h b/drivers/net/cnxk/cnxk_rep.h new file mode 100644 index 0000000000..b802c44b33 --- /dev/null +++ b/drivers/net/cnxk/cnxk_rep.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2024 Marvell. + */ +#include +#include + +#ifndef __CNXK_REP_H__ +#define __CNXK_REP_H__ + +/* Common ethdev ops */ +extern struct eth_dev_ops cnxk_rep_dev_ops; + +struct cnxk_rep_dev { + uint16_t port_id; + uint16_t rep_id; + uint16_t switch_domain_id; + struct cnxk_eswitch_dev *parent_dev; + uint16_t hw_func; + uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; +}; + +static inline struct cnxk_rep_dev * +cnxk_rep_pmd_priv(const struct rte_eth_dev *eth_dev) +{ + return eth_dev->data->dev_private; +} + +int cnxk_rep_dev_probe(struct rte_pci_device *pci_dev, struct cnxk_eswitch_dev *eswitch_dev); +int cnxk_rep_dev_remove(struct cnxk_eswitch_dev *eswitch_dev); +int cnxk_rep_dev_uninit(struct rte_eth_dev *ethdev); +int cnxk_rep_dev_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *dev_info); +int cnxk_rep_representor_info_get(struct rte_eth_dev *dev, struct rte_eth_representor_info *info); +int cnxk_rep_dev_configure(struct rte_eth_dev *eth_dev); + +int cnxk_rep_link_update(struct rte_eth_dev *eth_dev, int wait_to_compl); +int cnxk_rep_dev_start(struct rte_eth_dev *eth_dev); +int cnxk_rep_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx, uint16_t nb_desc, + unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp); +int cnxk_rep_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx, uint16_t nb_desc, + unsigned int socket_id, const struct rte_eth_txconf *tx_conf); +void cnxk_rep_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx); +void cnxk_rep_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx); +int cnxk_rep_dev_stop(struct rte_eth_dev *eth_dev); +int cnxk_rep_dev_close(struct rte_eth_dev *eth_dev); +int cnxk_rep_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats); +int cnxk_rep_stats_reset(struct rte_eth_dev *eth_dev); +int cnxk_rep_flow_ops_get(struct rte_eth_dev *ethdev, const struct rte_flow_ops **ops); + +#endif /* __CNXK_REP_H__ */ diff --git a/drivers/net/cnxk/cnxk_rep_ops.c b/drivers/net/cnxk/cnxk_rep_ops.c new file mode 100644 index 0000000000..15448688ce --- /dev/null +++ b/drivers/net/cnxk/cnxk_rep_ops.c @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2024 Marvell. + */ + +#include + +int +cnxk_rep_link_update(struct rte_eth_dev *ethdev, int wait_to_complete) +{ + PLT_SET_USED(ethdev); + PLT_SET_USED(wait_to_complete); + return 0; +} + +int +cnxk_rep_dev_info_get(struct rte_eth_dev *ethdev, struct rte_eth_dev_info *devinfo) +{ + PLT_SET_USED(ethdev); + PLT_SET_USED(devinfo); + return 0; +} + +int +cnxk_rep_dev_configure(struct rte_eth_dev *ethdev) +{ + PLT_SET_USED(ethdev); + return 0; +} + +int +cnxk_rep_dev_start(struct rte_eth_dev *ethdev) +{ + PLT_SET_USED(ethdev); + return 0; +} + +int +cnxk_rep_dev_close(struct rte_eth_dev *ethdev) +{ + PLT_SET_USED(ethdev); + return 0; +} + +int +cnxk_rep_dev_stop(struct rte_eth_dev *ethdev) +{ + PLT_SET_USED(ethdev); + return 0; +} + +int +cnxk_rep_rx_queue_setup(struct rte_eth_dev *ethdev, uint16_t rx_queue_id, uint16_t nb_rx_desc, + unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool) +{ + PLT_SET_USED(ethdev); + PLT_SET_USED(rx_queue_id); + PLT_SET_USED(nb_rx_desc); + PLT_SET_USED(socket_id); + PLT_SET_USED(rx_conf); + PLT_SET_USED(mb_pool); + return 0; +} + +void +cnxk_rep_rx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id) +{ + PLT_SET_USED(ethdev); + PLT_SET_USED(queue_id); +} + +int +cnxk_rep_tx_queue_setup(struct rte_eth_dev *ethdev, uint16_t tx_queue_id, uint16_t nb_tx_desc, + unsigned int socket_id, const struct rte_eth_txconf *tx_conf) +{ + PLT_SET_USED(ethdev); + PLT_SET_USED(tx_queue_id); + PLT_SET_USED(nb_tx_desc); + PLT_SET_USED(socket_id); + PLT_SET_USED(tx_conf); + return 0; +} + +void +cnxk_rep_tx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id) +{ + PLT_SET_USED(ethdev); + PLT_SET_USED(queue_id); +} + +int +cnxk_rep_stats_get(struct rte_eth_dev *ethdev, struct rte_eth_stats *stats) +{ + PLT_SET_USED(ethdev); + PLT_SET_USED(stats); + return 0; +} + +int +cnxk_rep_stats_reset(struct rte_eth_dev *ethdev) +{ + PLT_SET_USED(ethdev); + return 0; +} + +int +cnxk_rep_flow_ops_get(struct rte_eth_dev *ethdev, const struct rte_flow_ops **ops) +{ + PLT_SET_USED(ethdev); + PLT_SET_USED(ops); + return 0; +} + +/* CNXK platform representor dev ops */ +struct eth_dev_ops cnxk_rep_dev_ops = { + .dev_infos_get = cnxk_rep_dev_info_get, + .dev_configure = cnxk_rep_dev_configure, + .dev_start = cnxk_rep_dev_start, + .rx_queue_setup = cnxk_rep_rx_queue_setup, + .rx_queue_release = cnxk_rep_rx_queue_release, + .tx_queue_setup = cnxk_rep_tx_queue_setup, + .tx_queue_release = cnxk_rep_tx_queue_release, + .link_update = cnxk_rep_link_update, + .dev_close = cnxk_rep_dev_close, + .dev_stop = cnxk_rep_dev_stop, + .stats_get = cnxk_rep_stats_get, + .stats_reset = cnxk_rep_stats_reset, + .flow_ops_get = cnxk_rep_flow_ops_get +}; diff --git a/drivers/net/cnxk/meson.build b/drivers/net/cnxk/meson.build index ea7e363e89..fcd5d3d569 100644 --- a/drivers/net/cnxk/meson.build +++ b/drivers/net/cnxk/meson.build @@ -34,6 +34,8 @@ sources = files( 'cnxk_lookup.c', 'cnxk_ptp.c', 'cnxk_flow.c', + 'cnxk_rep.c', + 'cnxk_rep_ops.c', 'cnxk_stats.c', 'cnxk_tm.c', ) From patchwork Sun Mar 3 17:38:16 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Harman Kalra X-Patchwork-Id: 137820 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8C51243C3E; Sun, 3 Mar 2024 18:39:29 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id B42BD4364D; Sun, 3 Mar 2024 18:39:02 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id 016804364D for ; Sun, 3 Mar 2024 18:39:00 +0100 (CET) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id 423GDW6G025029 for ; Sun, 3 Mar 2024 09:39:00 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h= from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-type; s=pfpt0220; bh=5qh0A2IJW2WVaTbJ9wc0R bEUeXsJpmODCyXt39RaAhc=; b=H/eq44MJxOY5zbimDZ4gqQ1wyfheATQXoaJMp 3v/eULlwKlH71nOt8NmZALEfb/d3oafSb8PcBTxhok+pF/HP/ZFR9C6HEJ3F+unB 3ZPbuWTm/kdz4EMSh/WiAdFY5jA3wLv8bKykr/ziRnnI1iJcZLQgt7C7kzSZ9VP0 TNXbzjjY7MA8ThQQcoMdCiW2AaVCx7qT6OO1UHdMAnWXr/ELnXKbJ/AeViiQS+Eg mhJbuf0O3nVocM5SwAzxqowi2BycgisiOdIJ8ibuRc6m8nJKfGFAdgpQAXugR13f lajrFUUFW+QEBTlSKNklNOrnisepb7chnIJkN89JIhzIE1ovA== Received: from dc5-exch05.marvell.com ([199.233.59.128]) by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3wm4gmjjkk-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT) for ; Sun, 03 Mar 2024 09:38:59 -0800 (PST) Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH05.marvell.com (10.69.176.209) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id 15.2.1258.12; Sun, 3 Mar 2024 09:38:58 -0800 Received: from DC5-EXCH05.marvell.com (10.69.176.209) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.48; Sun, 3 Mar 2024 09:38:56 -0800 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH05.marvell.com (10.69.176.209) with Microsoft SMTP Server id 15.2.1258.12 via Frontend Transport; Sun, 3 Mar 2024 09:38:56 -0800 Received: from localhost.localdomain (unknown [10.29.52.211]) by maili.marvell.com (Postfix) with ESMTP id 0FA7D3F718E; Sun, 3 Mar 2024 09:38:53 -0800 (PST) From: Harman Kalra To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao , Harman Kalra CC: Subject: [PATCH v6 06/23] common/cnxk: common NPC changes for eswitch Date: Sun, 3 Mar 2024 23:08:16 +0530 Message-ID: <20240303173833.100039-7-hkalra@marvell.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20240303173833.100039-1-hkalra@marvell.com> References: <20230811163419.165790-1-hkalra@marvell.com> <20240303173833.100039-1-hkalra@marvell.com> MIME-Version: 1.0 X-Proofpoint-ORIG-GUID: 1r_c4-kfzSvoWWuURr33efccL2RWxI-w X-Proofpoint-GUID: 1r_c4-kfzSvoWWuURr33efccL2RWxI-w X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.1011,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2024-03-03_08,2024-03-01_03,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Adding new MCAM API for installing flow using generic npc_install_flow mbox and other helper APIs. Also adding rss action configuration for eswitch. Signed-off-by: Harman Kalra --- drivers/common/cnxk/meson.build | 1 + drivers/common/cnxk/roc_api.h | 3 + drivers/common/cnxk/roc_eswitch.c | 306 +++++++++++++++++++++++++++++ drivers/common/cnxk/roc_eswitch.h | 22 +++ drivers/common/cnxk/roc_mbox.h | 33 ++++ drivers/common/cnxk/roc_npc.c | 26 ++- drivers/common/cnxk/roc_npc.h | 5 +- drivers/common/cnxk/roc_npc_mcam.c | 2 +- drivers/common/cnxk/roc_npc_priv.h | 3 +- drivers/common/cnxk/version.map | 6 + 10 files changed, 398 insertions(+), 9 deletions(-) create mode 100644 drivers/common/cnxk/roc_eswitch.c create mode 100644 drivers/common/cnxk/roc_eswitch.h diff --git a/drivers/common/cnxk/meson.build b/drivers/common/cnxk/meson.build index 56eea52909..e0e4600989 100644 --- a/drivers/common/cnxk/meson.build +++ b/drivers/common/cnxk/meson.build @@ -20,6 +20,7 @@ sources = files( 'roc_cpt_debug.c', 'roc_dev.c', 'roc_dpi.c', + 'roc_eswitch.c', 'roc_hash.c', 'roc_idev.c', 'roc_irq.c', diff --git a/drivers/common/cnxk/roc_api.h b/drivers/common/cnxk/roc_api.h index f630853088..6a86863c57 100644 --- a/drivers/common/cnxk/roc_api.h +++ b/drivers/common/cnxk/roc_api.h @@ -117,4 +117,7 @@ /* MACsec */ #include "roc_mcs.h" +/* Eswitch */ +#include "roc_eswitch.h" + #endif /* _ROC_API_H_ */ diff --git a/drivers/common/cnxk/roc_eswitch.c b/drivers/common/cnxk/roc_eswitch.c new file mode 100644 index 0000000000..e480ab1046 --- /dev/null +++ b/drivers/common/cnxk/roc_eswitch.c @@ -0,0 +1,306 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2024 Marvell. + */ + +#include + +#include "roc_api.h" +#include "roc_priv.h" + +static int +eswitch_vlan_rx_cfg(uint16_t pcifunc, struct mbox *mbox) +{ + struct nix_vtag_config *vtag_cfg; + int rc; + + vtag_cfg = mbox_alloc_msg_nix_vtag_cfg(mbox_get(mbox)); + if (!vtag_cfg) { + rc = -EINVAL; + goto exit; + } + + /* config strip, capture and size */ + vtag_cfg->hdr.pcifunc = pcifunc; + vtag_cfg->vtag_size = NIX_VTAGSIZE_T4; + vtag_cfg->cfg_type = VTAG_RX; /* rx vlan cfg */ + vtag_cfg->rx.vtag_type = NIX_RX_VTAG_TYPE0; + vtag_cfg->rx.strip_vtag = true; + vtag_cfg->rx.capture_vtag = true; + + rc = mbox_process(mbox); + if (rc) + goto exit; + + rc = 0; +exit: + mbox_put(mbox); + return rc; +} + +static int +eswitch_vlan_tx_cfg(struct roc_npc_flow *flow, uint16_t pcifunc, struct mbox *mbox, + uint16_t vlan_tci, uint16_t *vidx) +{ + struct nix_vtag_config *vtag_cfg; + struct nix_vtag_config_rsp *rsp; + int rc; + + union { + uint64_t reg; + struct nix_tx_vtag_action_s act; + } tx_vtag_action; + + vtag_cfg = mbox_alloc_msg_nix_vtag_cfg(mbox_get(mbox)); + if (!vtag_cfg) { + rc = -EINVAL; + goto exit; + } + + /* Insert vlan tag */ + vtag_cfg->hdr.pcifunc = pcifunc; + vtag_cfg->vtag_size = NIX_VTAGSIZE_T4; + vtag_cfg->cfg_type = VTAG_TX; /* tx vlan cfg */ + vtag_cfg->tx.cfg_vtag0 = true; + vtag_cfg->tx.vtag0 = (((uint32_t)ROC_ESWITCH_VLAN_TPID << 16) | vlan_tci); + + rc = mbox_process_msg(mbox, (void *)&rsp); + if (rc) + goto exit; + + if (rsp->vtag0_idx < 0) { + plt_err("Failed to config TX VTAG action"); + rc = -EINVAL; + goto exit; + } + + *vidx = rsp->vtag0_idx; + tx_vtag_action.reg = 0; + tx_vtag_action.act.vtag0_def = rsp->vtag0_idx; + tx_vtag_action.act.vtag0_lid = NPC_LID_LA; + tx_vtag_action.act.vtag0_op = NIX_TX_VTAGOP_INSERT; + tx_vtag_action.act.vtag0_relptr = NIX_TX_VTAGACTION_VTAG0_RELPTR; + + flow->vtag_action = tx_vtag_action.reg; + + rc = 0; +exit: + mbox_put(mbox); + return rc; +} + +int +roc_eswitch_npc_mcam_tx_rule(struct roc_npc *roc_npc, struct roc_npc_flow *flow, uint16_t pcifunc, + uint32_t vlan_tci) +{ + struct npc *npc = roc_npc_to_npc_priv(roc_npc); + struct npc_install_flow_req *req; + struct npc_install_flow_rsp *rsp; + struct mbox *mbox = npc->mbox; + uint16_t vidx = 0, lbkid; + int rc; + + rc = eswitch_vlan_tx_cfg(flow, roc_npc->pf_func, mbox, vlan_tci, &vidx); + if (rc) { + plt_err("Failed to configure VLAN TX, err %d", rc); + goto fail; + } + + req = mbox_alloc_msg_npc_install_flow(mbox_get(mbox)); + if (!req) { + rc = -EINVAL; + goto exit; + } + + lbkid = 0; + req->hdr.pcifunc = roc_npc->pf_func; /* Eswitch PF is requester */ + req->vf = pcifunc; + req->entry = flow->mcam_id; + req->intf = NPC_MCAM_TX; + req->op = NIX_TX_ACTIONOP_UCAST_CHAN; + req->index = (lbkid << 8) | ROC_ESWITCH_LBK_CHAN; + req->set_cntr = 1; + req->vtag0_def = vidx; + req->vtag0_op = 1; + rc = mbox_process_msg(mbox, (void *)&rsp); + if (rc) + goto exit; + + flow->nix_intf = NIX_INTF_TX; +exit: + mbox_put(mbox); +fail: + return rc; +} + +static int +eswitch_vtag_cfg_delete(struct roc_npc *roc_npc, struct roc_npc_flow *flow) +{ + struct npc *npc = roc_npc_to_npc_priv(roc_npc); + struct nix_vtag_config *vtag_cfg; + struct nix_vtag_config_rsp *rsp; + struct mbox *mbox = npc->mbox; + int rc = 0; + + union { + uint64_t reg; + struct nix_tx_vtag_action_s act; + } tx_vtag_action; + + tx_vtag_action.reg = flow->vtag_action; + vtag_cfg = mbox_alloc_msg_nix_vtag_cfg(mbox_get(mbox)); + + if (vtag_cfg == NULL) { + rc = -ENOSPC; + goto exit; + } + + vtag_cfg->cfg_type = VTAG_TX; + vtag_cfg->vtag_size = NIX_VTAGSIZE_T4; + vtag_cfg->tx.vtag0_idx = tx_vtag_action.act.vtag0_def; + vtag_cfg->tx.free_vtag0 = true; + + rc = mbox_process_msg(mbox, (void *)&rsp); + if (rc) + goto exit; + + rc = rsp->hdr.rc; +exit: + mbox_put(mbox); + return rc; +} + +int +roc_eswitch_npc_mcam_delete_rule(struct roc_npc *roc_npc, struct roc_npc_flow *flow, + uint16_t pcifunc) +{ + struct npc *npc = roc_npc_to_npc_priv(roc_npc); + struct npc_delete_flow_req *req; + struct msg_rsp *rsp; + struct mbox *mbox = npc->mbox; + int rc = 0; + + /* Removing the VLAN TX config */ + if (flow->nix_intf == NIX_INTF_TX) { + rc = eswitch_vtag_cfg_delete(roc_npc, flow); + if (rc) + plt_err("Failed to delete TX vtag config"); + } + + req = mbox_alloc_msg_npc_delete_flow(mbox_get(mbox)); + if (!req) { + rc = -EINVAL; + goto exit; + } + + req->entry = flow->mcam_id; + req->vf = pcifunc; + rc = mbox_process_msg(mbox, (void *)&rsp); + if (rc) + goto exit; + + rc = rsp->hdr.rc; +exit: + mbox_put(mbox); + return rc; +} + +int +roc_eswitch_npc_mcam_rx_rule(struct roc_npc *roc_npc, struct roc_npc_flow *flow, uint16_t pcifunc, + uint16_t vlan_tci, uint16_t vlan_tci_mask) +{ + struct npc *npc = roc_npc_to_npc_priv(roc_npc); + struct npc_install_flow_req *req; + struct npc_install_flow_rsp *rsp; + struct mbox *mbox = npc->mbox; + bool is_esw_dev; + int rc; + + /* For ESW PF/VF */ + is_esw_dev = (dev_get_pf(roc_npc->pf_func) == dev_get_pf(pcifunc)); + /* VLAN Rx config */ + if (is_esw_dev) { + rc = eswitch_vlan_rx_cfg(roc_npc->pf_func, mbox); + if (rc) { + plt_err("Failed to configure VLAN RX rule, err %d", rc); + goto fail; + } + } + + req = mbox_alloc_msg_npc_install_flow(mbox_get(mbox)); + if (!req) { + rc = -EINVAL; + goto exit; + } + + req->vf = pcifunc; + /* Action */ + req->op = NIX_RX_ACTIONOP_DEFAULT; + req->index = 0; + req->entry = flow->mcam_id; + req->hdr.pcifunc = roc_npc->pf_func; /* Eswitch PF is requester */ + req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_VLAN_ETYPE_CTAG); + req->vtag0_valid = true; + /* For ESW PF/VF using configured vlan rx cfg while for other + * representees using standard vlan_type = 7 which is strip. + */ + req->vtag0_type = is_esw_dev ? NIX_RX_VTAG_TYPE0 : NIX_RX_VTAG_TYPE7; + req->packet.vlan_etype = ROC_ESWITCH_VLAN_TPID; + req->mask.vlan_etype = 0xFFFF; + req->packet.vlan_tci = ntohs(vlan_tci & 0xFFFF); + req->mask.vlan_tci = ntohs(vlan_tci_mask); + + req->channel = ROC_ESWITCH_LBK_CHAN; + req->chan_mask = 0xffff; + req->intf = NPC_MCAM_RX; + req->set_cntr = 1; + req->cntr_val = flow->ctr_id; + + rc = mbox_process_msg(mbox, (void *)&rsp); + if (rc) + goto exit; + + flow->nix_intf = NIX_INTF_RX; +exit: + mbox_put(mbox); +fail: + return rc; +} + +int +roc_eswitch_npc_rss_action_configure(struct roc_npc *roc_npc, struct roc_npc_flow *flow, + uint32_t flowkey_cfg, uint16_t *reta_tbl) +{ + struct npc *npc = roc_npc_to_npc_priv(roc_npc); + struct roc_nix *roc_nix = roc_npc->roc_nix; + uint32_t rss_grp_idx; + uint8_t flowkey_algx; + int rc; + + rc = npc_rss_free_grp_get(npc, &rss_grp_idx); + /* RSS group :0 is not usable for flow rss action */ + if (rc < 0 || rss_grp_idx == 0) + return -ENOSPC; + + /* Populating reta table for the specific RSS group */ + rc = roc_nix_rss_reta_set(roc_nix, rss_grp_idx, reta_tbl); + if (rc) { + plt_err("Failed to init rss table rc = %d", rc); + return rc; + } + + rc = roc_nix_rss_flowkey_set(roc_nix, &flowkey_algx, flowkey_cfg, rss_grp_idx, + flow->mcam_id); + if (rc) { + plt_err("Failed to set rss hash function rc = %d", rc); + return rc; + } + + plt_bitmap_set(npc->rss_grp_entries, rss_grp_idx); + + flow->npc_action &= (~(0xfULL)); + flow->npc_action |= NIX_RX_ACTIONOP_RSS; + flow->npc_action |= + ((uint64_t)(flowkey_algx & NPC_RSS_ACT_ALG_MASK) << NPC_RSS_ACT_ALG_OFFSET) | + ((uint64_t)(rss_grp_idx & NPC_RSS_ACT_GRP_MASK) << NPC_RSS_ACT_GRP_OFFSET); + return 0; +} diff --git a/drivers/common/cnxk/roc_eswitch.h b/drivers/common/cnxk/roc_eswitch.h new file mode 100644 index 0000000000..cdbe808a71 --- /dev/null +++ b/drivers/common/cnxk/roc_eswitch.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2024 Marvell. + */ + +#ifndef __ROC_ESWITCH_H__ +#define __ROC_ESWITCH_H__ + +#define ROC_ESWITCH_VLAN_TPID 0x8100 +#define ROC_ESWITCH_LBK_CHAN 63 + +/* NPC */ +int __roc_api roc_eswitch_npc_mcam_rx_rule(struct roc_npc *roc_npc, struct roc_npc_flow *flow, + uint16_t pcifunc, uint16_t vlan_tci, + uint16_t vlan_tci_mask); +int __roc_api roc_eswitch_npc_mcam_tx_rule(struct roc_npc *roc_npc, struct roc_npc_flow *flow, + uint16_t pcifunc, uint32_t vlan_tci); +int __roc_api roc_eswitch_npc_mcam_delete_rule(struct roc_npc *roc_npc, struct roc_npc_flow *flow, + uint16_t pcifunc); +int __roc_api roc_eswitch_npc_rss_action_configure(struct roc_npc *roc_npc, + struct roc_npc_flow *flow, uint32_t flowkey_cfg, + uint16_t *reta_tbl); +#endif /* __ROC_ESWITCH_H__ */ diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h index 54956a6a06..b76e97e9f9 100644 --- a/drivers/common/cnxk/roc_mbox.h +++ b/drivers/common/cnxk/roc_mbox.h @@ -388,6 +388,18 @@ enum rvu_af_status { RVU_INVALID_VF_ID = -256, }; +/* For NIX RX vtag action */ +enum nix_rx_vtag0_type { + NIX_RX_VTAG_TYPE0, + NIX_RX_VTAG_TYPE1, + NIX_RX_VTAG_TYPE2, + NIX_RX_VTAG_TYPE3, + NIX_RX_VTAG_TYPE4, + NIX_RX_VTAG_TYPE5, + NIX_RX_VTAG_TYPE6, + NIX_RX_VTAG_TYPE7, +}; + struct ready_msg_rsp { struct mbox_msghdr hdr; uint16_t __io sclk_freq; /* SCLK frequency */ @@ -2446,6 +2458,8 @@ enum header_fields { NPC_DMAC, NPC_SMAC, NPC_ETYPE, + NPC_VLAN_ETYPE_CTAG, /* 0x8100 */ + NPC_VLAN_ETYPE_STAG, /* 0x88A8 */ NPC_OUTER_VID, NPC_TOS, NPC_SIP_IPV4, @@ -2474,12 +2488,27 @@ struct flow_msg { uint32_t __io ip4dst; uint32_t __io ip6dst[4]; }; + union { + uint32_t spi; + }; uint8_t __io tos; uint8_t __io ip_ver; uint8_t __io ip_proto; uint8_t __io tc; uint16_t __io sport; uint16_t __io dport; + union { + uint8_t __io ip_flag; + uint8_t __io next_header; + }; + uint16_t __io vlan_itci; + uint8_t __io icmp_type; + uint8_t __io icmp_code; + uint16_t __io tcp_flags; + uint32_t __io gtpu_teid; + uint32_t __io gtpc_teid; + uint32_t __io mpls_lse[4]; + uint16_t __io sq_id; }; struct npc_install_flow_req { @@ -2489,6 +2518,7 @@ struct npc_install_flow_req { uint64_t __io features; uint16_t __io entry; uint16_t __io channel; + uint16_t __io chan_mask; uint8_t __io intf; uint8_t __io set_cntr; uint8_t __io default_rule; @@ -2511,6 +2541,8 @@ struct npc_install_flow_req { uint8_t __io vtag0_op; uint16_t __io vtag1_def; uint8_t __io vtag1_op; + /* old counter value */ + uint16_t __io cntr_val; }; struct npc_install_flow_rsp { @@ -2525,6 +2557,7 @@ struct npc_delete_flow_req { uint16_t __io start; /*Disable range of entries */ uint16_t __io end; uint8_t __io all; /* PF + VFs */ + uint16_t __io vf; /* Requesting VF */ }; struct npc_mcam_read_entry_req { diff --git a/drivers/common/cnxk/roc_npc.c b/drivers/common/cnxk/roc_npc.c index 9a0fe5f4e2..67a660a2bc 100644 --- a/drivers/common/cnxk/roc_npc.c +++ b/drivers/common/cnxk/roc_npc.c @@ -77,8 +77,23 @@ roc_npc_inl_mcam_clear_counter(uint32_t ctr_id) } int -roc_npc_mcam_read_counter(struct roc_npc *roc_npc, uint32_t ctr_id, - uint64_t *count) +roc_npc_mcam_alloc_counter(struct roc_npc *roc_npc, uint16_t *ctr_id) +{ + struct npc *npc = roc_npc_to_npc_priv(roc_npc); + + return npc_mcam_alloc_counter(npc->mbox, ctr_id); +} + +int +roc_npc_get_free_mcam_entry(struct roc_npc *roc_npc, struct roc_npc_flow *flow) +{ + struct npc *npc = roc_npc_to_npc_priv(roc_npc); + + return npc_get_free_mcam_entry(npc->mbox, flow, npc); +} + +int +roc_npc_mcam_read_counter(struct roc_npc *roc_npc, uint32_t ctr_id, uint64_t *count) { struct npc *npc = roc_npc_to_npc_priv(roc_npc); @@ -157,14 +172,13 @@ roc_npc_mcam_free_all_resources(struct roc_npc *roc_npc) } int -roc_npc_mcam_alloc_entries(struct roc_npc *roc_npc, int ref_entry, - int *alloc_entry, int req_count, int priority, - int *resp_count) +roc_npc_mcam_alloc_entries(struct roc_npc *roc_npc, int ref_entry, int *alloc_entry, int req_count, + int priority, int *resp_count, bool is_conti) { struct npc *npc = roc_npc_to_npc_priv(roc_npc); return npc_mcam_alloc_entries(npc->mbox, ref_entry, alloc_entry, req_count, priority, - resp_count, 0); + resp_count, is_conti); } int diff --git a/drivers/common/cnxk/roc_npc.h b/drivers/common/cnxk/roc_npc.h index e880a7fa67..349c7f9d22 100644 --- a/drivers/common/cnxk/roc_npc.h +++ b/drivers/common/cnxk/roc_npc.h @@ -431,7 +431,8 @@ int __roc_api roc_npc_mcam_enable_all_entries(struct roc_npc *roc_npc, bool enab int __roc_api roc_npc_mcam_alloc_entry(struct roc_npc *roc_npc, struct roc_npc_flow *mcam, struct roc_npc_flow *ref_mcam, int prio, int *resp_count); int __roc_api roc_npc_mcam_alloc_entries(struct roc_npc *roc_npc, int ref_entry, int *alloc_entry, - int req_count, int priority, int *resp_count); + int req_count, int priority, int *resp_count, + bool is_conti); int __roc_api roc_npc_mcam_ena_dis_entry(struct roc_npc *roc_npc, struct roc_npc_flow *mcam, bool enable); int __roc_api roc_npc_mcam_write_entry(struct roc_npc *roc_npc, struct roc_npc_flow *mcam); @@ -442,6 +443,8 @@ int __roc_api roc_npc_get_low_priority_mcam(struct roc_npc *roc_npc); int __roc_api roc_npc_mcam_free_counter(struct roc_npc *roc_npc, uint16_t ctr_id); int __roc_api roc_npc_mcam_read_counter(struct roc_npc *roc_npc, uint32_t ctr_id, uint64_t *count); int __roc_api roc_npc_mcam_clear_counter(struct roc_npc *roc_npc, uint32_t ctr_id); +int __roc_api roc_npc_mcam_alloc_counter(struct roc_npc *roc_npc, uint16_t *ctr_id); +int __roc_api roc_npc_get_free_mcam_entry(struct roc_npc *roc_npc, struct roc_npc_flow *flow); int __roc_api roc_npc_inl_mcam_read_counter(uint32_t ctr_id, uint64_t *count); int __roc_api roc_npc_inl_mcam_clear_counter(uint32_t ctr_id); int __roc_api roc_npc_mcam_free_all_resources(struct roc_npc *roc_npc); diff --git a/drivers/common/cnxk/roc_npc_mcam.c b/drivers/common/cnxk/roc_npc_mcam.c index 3ef189e184..2de988a44b 100644 --- a/drivers/common/cnxk/roc_npc_mcam.c +++ b/drivers/common/cnxk/roc_npc_mcam.c @@ -4,7 +4,7 @@ #include "roc_api.h" #include "roc_priv.h" -static int +int npc_mcam_alloc_counter(struct mbox *mbox, uint16_t *ctr) { struct npc_mcam_alloc_counter_req *req; diff --git a/drivers/common/cnxk/roc_npc_priv.h b/drivers/common/cnxk/roc_npc_priv.h index c0809407a6..50b62b1244 100644 --- a/drivers/common/cnxk/roc_npc_priv.h +++ b/drivers/common/cnxk/roc_npc_priv.h @@ -432,6 +432,7 @@ roc_npc_to_npc_priv(struct roc_npc *npc) return (struct npc *)npc->reserved; } +int npc_mcam_alloc_counter(struct mbox *mbox, uint16_t *ctr); int npc_mcam_free_counter(struct mbox *mbox, uint16_t ctr_id); int npc_mcam_read_counter(struct mbox *mbox, uint32_t ctr_id, uint64_t *count); int npc_mcam_clear_counter(struct mbox *mbox, uint32_t ctr_id); @@ -480,7 +481,6 @@ uint64_t npc_get_kex_capability(struct npc *npc); int npc_process_ipv6_field_hash(const struct roc_npc_flow_item_ipv6 *ipv6_spec, const struct roc_npc_flow_item_ipv6 *ipv6_mask, struct npc_parse_state *pst, uint8_t type); -int npc_rss_free_grp_get(struct npc *npc, uint32_t *grp); int npc_rss_action_configure(struct roc_npc *roc_npc, const struct roc_npc_action_rss *rss, uint8_t *alg_idx, uint32_t *rss_grp, uint32_t mcam_id); int npc_rss_action_program(struct roc_npc *roc_npc, const struct roc_npc_action actions[], @@ -496,4 +496,5 @@ void npc_aged_flows_bitmap_free(struct roc_npc *roc_npc); int npc_aging_ctrl_thread_create(struct roc_npc *roc_npc, const struct roc_npc_action_age *age, struct roc_npc_flow *flow); void npc_aging_ctrl_thread_destroy(struct roc_npc *roc_npc); +int npc_rss_free_grp_get(struct npc *npc, uint32_t *pos); #endif /* _ROC_NPC_PRIV_H_ */ diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map index 9bea7af6f4..b509c28b86 100644 --- a/drivers/common/cnxk/version.map +++ b/drivers/common/cnxk/version.map @@ -87,6 +87,10 @@ INTERNAL { roc_dpi_disable; roc_dpi_enable; roc_error_msg_get; + roc_eswitch_npc_mcam_delete_rule; + roc_eswitch_npc_mcam_rx_rule; + roc_eswitch_npc_mcam_tx_rule; + roc_eswitch_npc_rss_action_configure; roc_hash_md5_gen; roc_hash_sha1_gen; roc_hash_sha256_gen; @@ -446,6 +450,7 @@ INTERNAL { roc_npc_flow_dump; roc_npc_flow_mcam_dump; roc_npc_flow_parse; + roc_npc_get_free_mcam_entry; roc_npc_get_low_priority_mcam; roc_npc_init; roc_npc_kex_capa_get; @@ -453,6 +458,7 @@ INTERNAL { roc_npc_mark_actions_sub_return; roc_npc_vtag_actions_get; roc_npc_vtag_actions_sub_return; + roc_npc_mcam_alloc_counter; roc_npc_mcam_alloc_entries; roc_npc_mcam_alloc_entry; roc_npc_mcam_clear_counter; From patchwork Sun Mar 3 17:38:17 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Harman Kalra X-Patchwork-Id: 137821 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id DD53143C3E; Sun, 3 Mar 2024 18:39:38 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 6E31443663; Sun, 3 Mar 2024 18:39:04 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id A259D4364D for ; Sun, 3 Mar 2024 18:39:01 +0100 (CET) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id 423HZW64022991 for ; Sun, 3 Mar 2024 09:39:00 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h= from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-type; s=pfpt0220; bh=iuEZprj/V4ZCEsYf9FN9R Ik7s8aTutFoOlxhT10wC54=; b=B0nj613i4o9i1PiM3iLfJhJyUvISU9rZQYUqm r15vjGnvZScgfR1OZ2DNwIOhtDn+AFm2Lf5IDUSTnrWXMYcdAHmCAmScXlSvbfgu y3Hy1T7Ay9N70W57xI1/Mp1sVDgHHjwiWLHuKjuqWdyFKbE478S93rzBWe+Of1xs apaNRqkbR5OXM9Aggz1tTz1Xyggu4/5DHByisgBedFqwXJ9+jO50UHRcHc54CfPF lNZ3cgv+QcSRoW1eKJ0vDqWLHiAPyAoBvq6gIFPh6ADj2gmq4kJAm/jTS98Tvrx6 YtEe2jzeiMf/Fiv5Cahg/9zvCBp4/0qaTeKO3Jc3OqFkeCwJg== Received: from dc6wp-exch02.marvell.com ([4.21.29.225]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3wm2bptrt6-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT) for ; Sun, 03 Mar 2024 09:39:00 -0800 (PST) Received: from DC6WP-EXCH02.marvell.com (10.76.176.209) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1258.12; Sun, 3 Mar 2024 09:38:59 -0800 Received: from maili.marvell.com (10.69.176.80) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server id 15.2.1258.12 via Frontend Transport; Sun, 3 Mar 2024 09:38:59 -0800 Received: from localhost.localdomain (unknown [10.29.52.211]) by maili.marvell.com (Postfix) with ESMTP id F17FC3F7193; Sun, 3 Mar 2024 09:38:56 -0800 (PST) From: Harman Kalra To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao , Harman Kalra CC: Subject: [PATCH v6 07/23] common/cnxk: interface to update VLAN TPID Date: Sun, 3 Mar 2024 23:08:17 +0530 Message-ID: <20240303173833.100039-8-hkalra@marvell.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20240303173833.100039-1-hkalra@marvell.com> References: <20230811163419.165790-1-hkalra@marvell.com> <20240303173833.100039-1-hkalra@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: 0hkHvlK4Mg7FsmjbaUNwSm8cIEH_tjWx X-Proofpoint-ORIG-GUID: 0hkHvlK4Mg7FsmjbaUNwSm8cIEH_tjWx X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.1011,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2024-03-03_08,2024-03-01_03,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Introducing eswitch variant of set vlan tpid api which can be using for PF and VF Signed-off-by: Harman Kalra --- drivers/common/cnxk/roc_eswitch.c | 15 +++++++++++++++ drivers/common/cnxk/roc_eswitch.h | 4 ++++ drivers/common/cnxk/roc_nix_priv.h | 11 +++++------ drivers/common/cnxk/roc_nix_vlan.c | 23 ++++++++++++++++++----- drivers/common/cnxk/version.map | 1 + 5 files changed, 43 insertions(+), 11 deletions(-) diff --git a/drivers/common/cnxk/roc_eswitch.c b/drivers/common/cnxk/roc_eswitch.c index e480ab1046..020a891a32 100644 --- a/drivers/common/cnxk/roc_eswitch.c +++ b/drivers/common/cnxk/roc_eswitch.c @@ -304,3 +304,18 @@ roc_eswitch_npc_rss_action_configure(struct roc_npc *roc_npc, struct roc_npc_flo ((uint64_t)(rss_grp_idx & NPC_RSS_ACT_GRP_MASK) << NPC_RSS_ACT_GRP_OFFSET); return 0; } + +int +roc_eswitch_nix_vlan_tpid_set(struct roc_nix *roc_nix, uint32_t type, uint16_t tpid, bool is_vf) +{ + struct nix *nix = roc_nix_to_nix_priv(roc_nix); + struct dev *dev = &nix->dev; + int rc; + + /* Configuring for PF/VF */ + rc = nix_vlan_tpid_set(dev->mbox, dev->pf_func | is_vf, type, tpid); + if (rc) + plt_err("Failed to set tpid for PF, rc %d", rc); + + return rc; +} diff --git a/drivers/common/cnxk/roc_eswitch.h b/drivers/common/cnxk/roc_eswitch.h index cdbe808a71..34b75d10ac 100644 --- a/drivers/common/cnxk/roc_eswitch.h +++ b/drivers/common/cnxk/roc_eswitch.h @@ -19,4 +19,8 @@ int __roc_api roc_eswitch_npc_mcam_delete_rule(struct roc_npc *roc_npc, struct r int __roc_api roc_eswitch_npc_rss_action_configure(struct roc_npc *roc_npc, struct roc_npc_flow *flow, uint32_t flowkey_cfg, uint16_t *reta_tbl); + +/* NIX */ +int __roc_api roc_eswitch_nix_vlan_tpid_set(struct roc_nix *nix, uint32_t type, uint16_t tpid, + bool is_vf); #endif /* __ROC_ESWITCH_H__ */ diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h index a582b9df33..3d99ade2b4 100644 --- a/drivers/common/cnxk/roc_nix_priv.h +++ b/drivers/common/cnxk/roc_nix_priv.h @@ -469,13 +469,12 @@ void nix_tm_shaper_profile_free(struct nix_tm_shaper_profile *profile); uint64_t nix_get_blkaddr(struct dev *dev); void nix_lf_rq_dump(__io struct nix_cn10k_rq_ctx_s *ctx, FILE *file); int nix_lf_gen_reg_dump(uintptr_t nix_lf_base, uint64_t *data); -int nix_lf_stat_reg_dump(uintptr_t nix_lf_base, uint64_t *data, - uint8_t lf_tx_stats, uint8_t lf_rx_stats); -int nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints, - uint16_t cints); -int nix_q_ctx_get(struct dev *dev, uint8_t ctype, uint16_t qid, - __io void **ctx_p); +int nix_lf_stat_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint8_t lf_tx_stats, + uint8_t lf_rx_stats); +int nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints, uint16_t cints); +int nix_q_ctx_get(struct dev *dev, uint8_t ctype, uint16_t qid, __io void **ctx_p); uint8_t nix_tm_lbk_relchan_get(struct nix *nix); +int nix_vlan_tpid_set(struct mbox *mbox, uint16_t pcifunc, uint32_t type, uint16_t tpid); /* * Telemetry diff --git a/drivers/common/cnxk/roc_nix_vlan.c b/drivers/common/cnxk/roc_nix_vlan.c index abd2eb0571..db218593ad 100644 --- a/drivers/common/cnxk/roc_nix_vlan.c +++ b/drivers/common/cnxk/roc_nix_vlan.c @@ -211,18 +211,17 @@ roc_nix_vlan_insert_ena_dis(struct roc_nix *roc_nix, } int -roc_nix_vlan_tpid_set(struct roc_nix *roc_nix, uint32_t type, uint16_t tpid) +nix_vlan_tpid_set(struct mbox *mbox, uint16_t pcifunc, uint32_t type, uint16_t tpid) { - struct nix *nix = roc_nix_to_nix_priv(roc_nix); - struct dev *dev = &nix->dev; - struct mbox *mbox = mbox_get(dev->mbox); struct nix_set_vlan_tpid *tpid_cfg; int rc = -ENOSPC; - tpid_cfg = mbox_alloc_msg_nix_set_vlan_tpid(mbox); + /* Configuring for PF */ + tpid_cfg = mbox_alloc_msg_nix_set_vlan_tpid(mbox_get(mbox)); if (tpid_cfg == NULL) goto exit; tpid_cfg->tpid = tpid; + tpid_cfg->hdr.pcifunc = pcifunc; if (type & ROC_NIX_VLAN_TYPE_OUTER) tpid_cfg->vlan_type = NIX_VLAN_TYPE_OUTER; @@ -234,3 +233,17 @@ roc_nix_vlan_tpid_set(struct roc_nix *roc_nix, uint32_t type, uint16_t tpid) mbox_put(mbox); return rc; } + +int +roc_nix_vlan_tpid_set(struct roc_nix *roc_nix, uint32_t type, uint16_t tpid) +{ + struct nix *nix = roc_nix_to_nix_priv(roc_nix); + struct dev *dev = &nix->dev; + int rc; + + rc = nix_vlan_tpid_set(dev->mbox, dev->pf_func, type, tpid); + if (rc) + plt_err("Failed to set tpid for PF, rc %d", rc); + + return rc; +} diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map index b509c28b86..0331c0fb9d 100644 --- a/drivers/common/cnxk/version.map +++ b/drivers/common/cnxk/version.map @@ -87,6 +87,7 @@ INTERNAL { roc_dpi_disable; roc_dpi_enable; roc_error_msg_get; + roc_eswitch_nix_vlan_tpid_set; roc_eswitch_npc_mcam_delete_rule; roc_eswitch_npc_mcam_rx_rule; roc_eswitch_npc_mcam_tx_rule; From patchwork Sun Mar 3 17:38:18 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Harman Kalra X-Patchwork-Id: 137822 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E0E8A43C3E; Sun, 3 Mar 2024 18:39:45 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 925F44365C; Sun, 3 Mar 2024 18:39:06 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id A7B6543659 for ; Sun, 3 Mar 2024 18:39:04 +0100 (CET) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id 423EeIWr031240 for ; Sun, 3 Mar 2024 09:39:03 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h= from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-type; s=pfpt0220; bh=L91ch2k8CzBwnv8hl0zcU fkKY+EOQKrSEd05MurYeU0=; b=NFWtSD66pclKnr6tRDXiVHs8Nae1vRLCy0wdm UEqUl+aF+dqZuISQJ1HRfyyj06LxXxMIzzz+h0gVSr2kRCyZMZeRqwQ3rQao6IKp SjmeBhgosLj83Nh5flCSa6xHZTydp5fuQXDrwqPoaTt2ye2NpJFwWnqsznL1aZwD e7MqwwjADnZxQVbuGqbBLcszJfA3IUK8p7LWjLnQHuBAInBVkr/bzbdlVajydQ+r ZsbDkfxr5BKlsWopTjUOUZBKg6PHuqqTFTfoPIXshuK363yO86hjT7OwRcpPpqhL KTwHGiQx31/wE2dT64XhTBq8otFQb5WcykBv7+YHzp0pB4gvg== Received: from dc6wp-exch02.marvell.com ([4.21.29.225]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3wm2bptrta-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT) for ; Sun, 03 Mar 2024 09:39:03 -0800 (PST) Received: from DC6WP-EXCH02.marvell.com (10.76.176.209) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1258.12; Sun, 3 Mar 2024 09:39:02 -0800 Received: from maili.marvell.com (10.69.176.80) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server id 15.2.1258.12 via Frontend Transport; Sun, 3 Mar 2024 09:39:02 -0800 Received: from localhost.localdomain (unknown [10.29.52.211]) by maili.marvell.com (Postfix) with ESMTP id E06233F718E; Sun, 3 Mar 2024 09:38:59 -0800 (PST) From: Harman Kalra To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao , Harman Kalra CC: Subject: [PATCH v6 08/23] net/cnxk: eswitch flow configurations Date: Sun, 3 Mar 2024 23:08:18 +0530 Message-ID: <20240303173833.100039-9-hkalra@marvell.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20240303173833.100039-1-hkalra@marvell.com> References: <20230811163419.165790-1-hkalra@marvell.com> <20240303173833.100039-1-hkalra@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: WkTmZNSP6PGcCPaIooJOND2_ubM1MbrH X-Proofpoint-ORIG-GUID: WkTmZNSP6PGcCPaIooJOND2_ubM1MbrH X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.1011,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2024-03-03_08,2024-03-01_03,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Adding flow rules for eswitch PF and VF and implementing interfaces to delete, shift flow rules Signed-off-by: Harman Kalra --- drivers/net/cnxk/cnxk_eswitch.c | 44 +++ drivers/net/cnxk/cnxk_eswitch.h | 25 +- drivers/net/cnxk/cnxk_eswitch_devargs.c | 1 + drivers/net/cnxk/cnxk_eswitch_flow.c | 454 ++++++++++++++++++++++++ drivers/net/cnxk/meson.build | 1 + 5 files changed, 522 insertions(+), 3 deletions(-) create mode 100644 drivers/net/cnxk/cnxk_eswitch_flow.c diff --git a/drivers/net/cnxk/cnxk_eswitch.c b/drivers/net/cnxk/cnxk_eswitch.c index 599ed149ae..25992fddc9 100644 --- a/drivers/net/cnxk/cnxk_eswitch.c +++ b/drivers/net/cnxk/cnxk_eswitch.c @@ -2,11 +2,33 @@ * Copyright(C) 2024 Marvell. */ +#include + #include #include #define CNXK_NIX_DEF_SQ_COUNT 512 +struct cnxk_esw_repr_hw_info * +cnxk_eswitch_representor_hw_info(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func) +{ + struct cnxk_eswitch_devargs *esw_da; + int i, j; + + if (!eswitch_dev) + return NULL; + + /* Traversing the initialized represented list */ + for (i = 0; i < eswitch_dev->nb_esw_da; i++) { + esw_da = &eswitch_dev->esw_da[i]; + for (j = 0; j < esw_da->nb_repr_ports; j++) { + if (esw_da->repr_hw_info[j].hw_func == hw_func) + return &esw_da->repr_hw_info[j]; + } + } + return NULL; +} + static int eswitch_hw_rsrc_cleanup(struct cnxk_eswitch_dev *eswitch_dev, struct rte_pci_device *pci_dev) { @@ -67,6 +89,10 @@ cnxk_eswitch_dev_remove(struct rte_pci_device *pci_dev) if (eswitch_dev->repr_cnt.nb_repr_created) cnxk_rep_dev_remove(eswitch_dev); + /* Cleanup NPC rxtx flow rules */ + cnxk_eswitch_flow_rules_remove_list(eswitch_dev, &eswitch_dev->esw_flow_list, + eswitch_dev->npc.pf_func); + /* Cleanup HW resources */ eswitch_hw_rsrc_cleanup(eswitch_dev, pci_dev); @@ -87,6 +113,21 @@ cnxk_eswitch_nix_rsrc_start(struct cnxk_eswitch_dev *eswitch_dev) goto done; } + /* Install eswitch PF mcam rules */ + rc = cnxk_eswitch_pfvf_flow_rules_install(eswitch_dev, false); + if (rc) { + plt_err("Failed to install rxtx rules, rc %d", rc); + goto done; + } + + /* Configure TPID for Eswitch PF LFs */ + rc = roc_eswitch_nix_vlan_tpid_set(&eswitch_dev->nix, ROC_NIX_VLAN_TYPE_OUTER, + CNXK_ESWITCH_VLAN_TPID, false); + if (rc) { + plt_err("Failed to configure tpid, rc %d", rc); + goto done; + } + rc = roc_npc_mcam_enable_all_entries(&eswitch_dev->npc, 1); if (rc) { plt_err("Failed to enable NPC entries %d", rc); @@ -524,6 +565,9 @@ eswitch_hw_rsrc_setup(struct cnxk_eswitch_dev *eswitch_dev, struct rte_pci_devic if (rc) goto rsrc_cleanup; + /* List for eswitch default flows */ + TAILQ_INIT(&eswitch_dev->esw_flow_list); + return rc; rsrc_cleanup: eswitch_hw_rsrc_cleanup(eswitch_dev, pci_dev); diff --git a/drivers/net/cnxk/cnxk_eswitch.h b/drivers/net/cnxk/cnxk_eswitch.h index dcd5add6d0..5b4e1b0a71 100644 --- a/drivers/net/cnxk/cnxk_eswitch.h +++ b/drivers/net/cnxk/cnxk_eswitch.h @@ -13,11 +13,10 @@ #include "cn10k_tx.h" #define CNXK_ESWITCH_CTRL_MSG_SOCK_PATH "/tmp/cxk_rep_ctrl_msg_sock" +#define CNXK_ESWITCH_VLAN_TPID ROC_ESWITCH_VLAN_TPID #define CNXK_REP_ESWITCH_DEV_MZ "cnxk_eswitch_dev" -#define CNXK_ESWITCH_VLAN_TPID 0x8100 #define CNXK_ESWITCH_MAX_TXQ 256 #define CNXK_ESWITCH_MAX_RXQ 256 -#define CNXK_ESWITCH_LBK_CHAN 63 #define CNXK_ESWITCH_VFPF_SHIFT 8 #define CNXK_ESWITCH_QUEUE_STATE_RELEASED 0 @@ -25,6 +24,7 @@ #define CNXK_ESWITCH_QUEUE_STATE_STARTED 2 #define CNXK_ESWITCH_QUEUE_STATE_STOPPED 3 +TAILQ_HEAD(eswitch_flow_list, roc_npc_flow); enum cnxk_esw_da_pattern_type { CNXK_ESW_DA_TYPE_LIST = 0, CNXK_ESW_DA_TYPE_PFVF, @@ -39,6 +39,9 @@ struct cnxk_esw_repr_hw_info { uint16_t pfvf; /* representor port id assigned to representee */ uint16_t port_id; + uint16_t num_flow_entries; + + TAILQ_HEAD(flow_list, roc_npc_flow) repr_flow_list; }; /* Structure representing per devarg information - this can be per representee @@ -90,7 +93,6 @@ struct cnxk_eswitch_cxq { uint8_t state; }; -TAILQ_HEAD(eswitch_flow_list, roc_npc_flow); struct cnxk_eswitch_dev { /* Input parameters */ struct plt_pci_device *pci_dev; @@ -116,6 +118,13 @@ struct cnxk_eswitch_dev { uint16_t rep_cnt; uint8_t configured; + /* NPC rxtx rules */ + struct flow_list esw_flow_list; + uint16_t num_entries; + bool eswitch_vf_rules_setup; + uint16_t esw_pf_entry; + uint16_t esw_vf_entry; + /* Eswitch Representors Devargs */ uint16_t nb_esw_da; uint16_t last_probed; @@ -144,7 +153,10 @@ cnxk_eswitch_pmd_priv(void) return mz->addr; } +/* HW Resources */ int cnxk_eswitch_nix_rsrc_start(struct cnxk_eswitch_dev *eswitch_dev); +struct cnxk_esw_repr_hw_info *cnxk_eswitch_representor_hw_info(struct cnxk_eswitch_dev *eswitch_dev, + uint16_t hw_func); int cnxk_eswitch_repr_devargs(struct rte_pci_device *pci_dev, struct cnxk_eswitch_dev *eswitch_dev); int cnxk_eswitch_representor_info_get(struct cnxk_eswitch_dev *eswitch_dev, struct rte_eth_representor_info *info); @@ -158,4 +170,11 @@ int cnxk_eswitch_rxq_start(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid); int cnxk_eswitch_rxq_stop(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid); int cnxk_eswitch_txq_start(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid); int cnxk_eswitch_txq_stop(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid); +/* Flow Rules */ +int cnxk_eswitch_flow_rules_install(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func); +int cnxk_eswitch_flow_rules_delete(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func); +int cnxk_eswitch_pfvf_flow_rules_install(struct cnxk_eswitch_dev *eswitch_dev, bool is_vf); +int cnxk_eswitch_flow_rule_shift(uint16_t hw_func, uint16_t *new_entry); +int cnxk_eswitch_flow_rules_remove_list(struct cnxk_eswitch_dev *eswitch_dev, + struct flow_list *list, uint16_t hw_func); #endif /* __CNXK_ESWITCH_H__ */ diff --git a/drivers/net/cnxk/cnxk_eswitch_devargs.c b/drivers/net/cnxk/cnxk_eswitch_devargs.c index 58383fb835..8167ce673a 100644 --- a/drivers/net/cnxk/cnxk_eswitch_devargs.c +++ b/drivers/net/cnxk/cnxk_eswitch_devargs.c @@ -72,6 +72,7 @@ populate_repr_hw_info(struct cnxk_eswitch_dev *eswitch_dev, struct rte_eth_devar esw_da->repr_hw_info[i].pfvf = (eth_da->type == RTE_ETH_REPRESENTOR_PF) ? eth_da->ports[0] : eth_da->representor_ports[i]; + TAILQ_INIT(&esw_da->repr_hw_info[i].repr_flow_list); plt_esw_dbg(" HW func %x index %d type %d", hw_func, j, eth_da->type); } diff --git a/drivers/net/cnxk/cnxk_eswitch_flow.c b/drivers/net/cnxk/cnxk_eswitch_flow.c new file mode 100644 index 0000000000..06077bfe92 --- /dev/null +++ b/drivers/net/cnxk/cnxk_eswitch_flow.c @@ -0,0 +1,454 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2024 Marvell. + */ + +#include + +#include + +const uint8_t eswitch_vlan_rss_key[ROC_NIX_RSS_KEY_LEN] = { + 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, + 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, + 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, + 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, + 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, + 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE}; + +int +cnxk_eswitch_flow_rules_remove_list(struct cnxk_eswitch_dev *eswitch_dev, struct flow_list *list, + uint16_t hw_func) +{ + struct roc_npc_flow *flow, *tvar; + int rc = 0; + + RTE_TAILQ_FOREACH_SAFE(flow, list, next, tvar) { + plt_esw_dbg("Removing flow %d", flow->mcam_id); + rc = roc_eswitch_npc_mcam_delete_rule(&eswitch_dev->npc, flow, + hw_func); + if (rc) + plt_err("Failed to delete rule %d", flow->mcam_id); + rc = roc_npc_mcam_free(&eswitch_dev->npc, flow); + if (rc) + plt_err("Failed to free entry %d", flow->mcam_id); + TAILQ_REMOVE(list, flow, next); + rte_free(flow); + } + + return rc; +} + +static int +eswitch_npc_vlan_rss_configure(struct roc_npc *roc_npc, struct roc_npc_flow *flow) +{ + struct roc_nix *roc_nix = roc_npc->roc_nix; + uint32_t qid, idx, hash, vlan_tci; + uint16_t *reta, reta_sz, id; + int rc = 0; + + id = flow->mcam_id; + /* Setting up the key */ + roc_nix_rss_key_set(roc_nix, eswitch_vlan_rss_key); + + reta_sz = roc_nix->reta_sz; + reta = plt_zmalloc(reta_sz * sizeof(uint16_t), 0); + if (!reta) { + plt_err("Failed to allocate mem for reta table"); + rc = -ENOMEM; + goto fail; + } + for (qid = 0; qid < reta_sz; qid++) { + vlan_tci = (1 << CNXK_ESWITCH_VFPF_SHIFT) | qid; + hash = rte_softrss(&vlan_tci, 1, eswitch_vlan_rss_key); + idx = hash & 0xFF; + reta[idx] = qid; + } + flow->mcam_id = id; + rc = roc_eswitch_npc_rss_action_configure(roc_npc, flow, FLOW_KEY_TYPE_VLAN, reta); + if (rc) { + plt_err("Failed to configure rss action, err %d", rc); + goto done; + } + +done: + plt_free(reta); +fail: + return rc; +} + +static int +eswitch_pfvf_mcam_install_rules(struct cnxk_eswitch_dev *eswitch_dev, struct roc_npc_flow *flow, + bool is_vf) +{ + uint16_t vlan_tci = 0, hw_func; + int rc; + + hw_func = eswitch_dev->npc.pf_func | is_vf; + if (!is_vf) { + /* Eswitch PF RX VLAN rule */ + vlan_tci = 1ULL << CNXK_ESWITCH_VFPF_SHIFT; + rc = roc_eswitch_npc_mcam_rx_rule(&eswitch_dev->npc, flow, hw_func, vlan_tci, + 0xFF00); + if (rc) { + plt_err("Failed to install RX rule for ESW PF to ESW VF, rc %d", rc); + goto exit; + } + plt_esw_dbg("Installed eswitch PF RX rule %d", flow->mcam_id); + rc = eswitch_npc_vlan_rss_configure(&eswitch_dev->npc, flow); + if (rc) + goto exit; + flow->enable = true; + } else { + /* Eswitch VF RX VLAN rule */ + rc = roc_eswitch_npc_mcam_rx_rule(&eswitch_dev->npc, flow, hw_func, vlan_tci, + 0xFF00); + if (rc) { + plt_err("Failed to install RX rule for ESW VF to ESW PF, rc %d", rc); + goto exit; + } + flow->enable = true; + plt_esw_dbg("Installed eswitch PF RX rule %d", flow->mcam_id); + } + + return 0; +exit: + return rc; +} + +static int +eswitch_npc_get_counter(struct roc_npc *npc, struct roc_npc_flow *flow) +{ + uint16_t ctr_id; + int rc; + + rc = roc_npc_mcam_alloc_counter(npc, &ctr_id); + if (rc < 0) { + plt_err("Failed to allocate counter, rc %d", rc); + goto fail; + } + flow->ctr_id = ctr_id; + flow->use_ctr = true; + + rc = roc_npc_mcam_clear_counter(npc, flow->ctr_id); + if (rc < 0) { + plt_err("Failed to clear counter idx %d, rc %d", flow->ctr_id, rc); + goto free; + } + return 0; +free: + roc_npc_mcam_free_counter(npc, ctr_id); +fail: + return rc; +} + +static int +eswitch_npc_get_counter_entry_ref(struct roc_npc *npc, struct roc_npc_flow *flow, + struct roc_npc_flow *ref_flow) +{ + int rc = 0, resp_count; + + rc = eswitch_npc_get_counter(npc, flow); + if (rc) + goto free; + + /* Allocate an entry viz higher priority than ref flow */ + rc = roc_npc_mcam_alloc_entry(npc, flow, ref_flow, NPC_MCAM_HIGHER_PRIO, &resp_count); + if (rc) { + plt_err("Failed to allocate entry, err %d", rc); + goto free; + } + plt_esw_dbg("New entry %d ref entry %d resp_count %d", flow->mcam_id, ref_flow->mcam_id, + resp_count); + + return 0; +free: + roc_npc_mcam_free_counter(npc, flow->ctr_id); + return rc; +} + +int +cnxk_eswitch_flow_rule_shift(uint16_t hw_func, uint16_t *entry) +{ + struct cnxk_esw_repr_hw_info *repr_info; + struct cnxk_eswitch_dev *eswitch_dev; + struct roc_npc_flow *ref_flow, *flow; + uint16_t curr_entry, new_entry; + int rc = 0, resp_count; + + eswitch_dev = cnxk_eswitch_pmd_priv(); + if (!eswitch_dev) { + plt_err("Invalid eswitch_dev handle"); + rc = -EINVAL; + goto fail; + } + + repr_info = cnxk_eswitch_representor_hw_info(eswitch_dev, hw_func); + if (!repr_info) { + plt_warn("Failed to get representor group for %x", hw_func); + rc = -ENOENT; + goto fail; + } + + ref_flow = TAILQ_FIRST(&repr_info->repr_flow_list); + if (*entry > ref_flow->mcam_id) { + flow = plt_zmalloc(sizeof(struct roc_npc_flow), 0); + if (!flow) { + plt_err("Failed to allocate memory"); + rc = -ENOMEM; + goto fail; + } + + /* Allocate a higher priority flow rule */ + rc = roc_npc_mcam_alloc_entry(&eswitch_dev->npc, flow, ref_flow, + NPC_MCAM_HIGHER_PRIO, &resp_count); + if (rc < 0) { + plt_err("Failed to allocate a newmcam entry, rc %d", rc); + goto fail; + } + + if (flow->mcam_id > ref_flow->mcam_id) { + plt_err("New flow %d is still at higher priority than ref_flow %d", + flow->mcam_id, ref_flow->mcam_id); + rc = -EINVAL; + goto free_entry; + } + + plt_info("Before shift: HW_func %x curr_entry %d ref flow id %d new_entry %d", + hw_func, *entry, ref_flow->mcam_id, flow->mcam_id); + + curr_entry = *entry; + new_entry = flow->mcam_id; + + rc = roc_npc_mcam_move(&eswitch_dev->npc, curr_entry, new_entry); + if (rc) { + plt_err("Failed to shift the new index %d to curr index %d, err %d", *entry, + curr_entry, rc); + goto free_entry; + } + *entry = flow->mcam_id; + + /* Freeing the current entry */ + rc = roc_npc_mcam_free_entry(&eswitch_dev->npc, curr_entry); + if (rc) { + plt_err("Failed to free the old entry. err %d", rc); + goto free_entry; + } + + plt_free(flow); + plt_info("After shift: HW_func %x old_entry %d new_entry %d", hw_func, curr_entry, + *entry); + } + + return 0; +free_entry: + +fail: + return rc; +} + +int +cnxk_eswitch_flow_rules_delete(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func) +{ + struct cnxk_esw_repr_hw_info *repr_info; + struct flow_list *list; + int rc = 0; + + repr_info = cnxk_eswitch_representor_hw_info(eswitch_dev, hw_func); + if (!repr_info) { + plt_warn("Failed to get representor group for %x", hw_func); + rc = -ENOENT; + goto fail; + } + list = &repr_info->repr_flow_list; + + plt_esw_dbg("Deleting flows for %x", hw_func); + rc = cnxk_eswitch_flow_rules_remove_list(eswitch_dev, list, hw_func); + if (rc) + plt_err("Failed to delete rules for hw func %x", hw_func); + +fail: + return rc; +} + +int +cnxk_eswitch_flow_rules_install(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func) +{ + struct roc_npc_flow *rx_flow, *tx_flow, *flow_iter, *esw_pf_flow = NULL; + struct cnxk_esw_repr_hw_info *repr_info; + struct flow_list *list; + uint16_t vlan_tci; + int rc = 0; + + repr_info = cnxk_eswitch_representor_hw_info(eswitch_dev, hw_func); + if (!repr_info) { + plt_err("Failed to get representor group for %x", hw_func); + rc = -EINVAL; + goto fail; + } + list = &repr_info->repr_flow_list; + + /* Taking ESW PF as reference entry for installing new rules */ + TAILQ_FOREACH(flow_iter, &eswitch_dev->esw_flow_list, next) { + if (flow_iter->mcam_id == eswitch_dev->esw_pf_entry) { + esw_pf_flow = flow_iter; + break; + } + } + + if (!esw_pf_flow) { + plt_err("Failed to get the ESW PF flow"); + rc = -EINVAL; + goto fail; + } + + /* Installing RX rule */ + rx_flow = plt_zmalloc(sizeof(struct roc_npc_flow), 0); + if (!rx_flow) { + plt_err("Failed to allocate memory"); + rc = -ENOMEM; + goto fail; + } + + rc = eswitch_npc_get_counter_entry_ref(&eswitch_dev->npc, rx_flow, esw_pf_flow); + if (rc) { + plt_err("Failed to get counter and mcam entry, rc %d", rc); + goto free_rx_flow; + } + + /* VLAN TCI value for this representee is the rep id from AF driver */ + vlan_tci = repr_info->rep_id; + rc = roc_eswitch_npc_mcam_rx_rule(&eswitch_dev->npc, rx_flow, hw_func, vlan_tci, 0xFFFF); + if (rc) { + plt_err("Failed to install RX rule for ESW PF to ESW VF, rc %d", rc); + goto free_rx_entry; + } + rx_flow->enable = true; + /* List in ascending order of mcam entries */ + TAILQ_FOREACH(flow_iter, list, next) { + if (flow_iter->mcam_id > rx_flow->mcam_id) { + TAILQ_INSERT_BEFORE(flow_iter, rx_flow, next); + goto done_rx; + } + } + TAILQ_INSERT_TAIL(list, rx_flow, next); +done_rx: + repr_info->num_flow_entries++; + plt_esw_dbg("Installed RX flow rule %d for representee %x with vlan tci %x MCAM id %d", + eswitch_dev->num_entries, hw_func, vlan_tci, rx_flow->mcam_id); + + /* Installing TX rule */ + tx_flow = plt_zmalloc(sizeof(struct roc_npc_flow), 0); + if (!tx_flow) { + plt_err("Failed to allocate memory"); + rc = -ENOMEM; + goto remove_rx_rule; + } + + rc = eswitch_npc_get_counter_entry_ref(&eswitch_dev->npc, tx_flow, esw_pf_flow); + if (rc) { + plt_err("Failed to get counter and mcam entry, rc %d", rc); + goto free_tx_flow; + } + + vlan_tci = (1ULL << CNXK_ESWITCH_VFPF_SHIFT) | repr_info->rep_id; + rc = roc_eswitch_npc_mcam_tx_rule(&eswitch_dev->npc, tx_flow, hw_func, vlan_tci); + if (rc) { + plt_err("Failed to install RX rule for ESW PF to ESW VF, rc %d", rc); + goto free_tx_entry; + } + tx_flow->enable = true; + /* List in ascending order of mcam entries */ + TAILQ_FOREACH(flow_iter, list, next) { + if (flow_iter->mcam_id > tx_flow->mcam_id) { + TAILQ_INSERT_BEFORE(flow_iter, tx_flow, next); + goto done_tx; + } + } + TAILQ_INSERT_TAIL(list, tx_flow, next); +done_tx: + repr_info->num_flow_entries++; + plt_esw_dbg("Installed TX flow rule %d for representee %x with vlan tci %x MCAM id %d", + repr_info->num_flow_entries, hw_func, vlan_tci, tx_flow->mcam_id); + + return 0; +free_tx_entry: + roc_npc_mcam_free(&eswitch_dev->npc, tx_flow); +free_tx_flow: + rte_free(tx_flow); +remove_rx_rule: + TAILQ_REMOVE(list, rx_flow, next); +free_rx_entry: + roc_npc_mcam_free(&eswitch_dev->npc, rx_flow); +free_rx_flow: + rte_free(rx_flow); +fail: + return rc; +} + +int +cnxk_eswitch_pfvf_flow_rules_install(struct cnxk_eswitch_dev *eswitch_dev, bool is_vf) +{ + struct roc_npc_flow *flow, *flow_iter; + struct flow_list *list; + int rc = 0; + + list = &eswitch_dev->esw_flow_list; + flow = plt_zmalloc(sizeof(struct roc_npc_flow), 0); + if (!flow) { + plt_err("Failed to allocate memory"); + rc = -ENOMEM; + goto fail; + } + + rc = eswitch_npc_get_counter(&eswitch_dev->npc, flow); + if (rc) { + plt_err("Failed to get counter and mcam entry, rc %d", rc); + goto free_flow; + } + if (!is_vf) { + /* Reserving an entry for esw VF but will not be installed */ + rc = roc_npc_get_free_mcam_entry(&eswitch_dev->npc, flow); + if (rc < 0) { + plt_err("Failed to allocate entry for vf, err %d", rc); + goto free_flow; + } + eswitch_dev->esw_vf_entry = flow->mcam_id; + /* Allocate an entry for esw PF */ + rc = eswitch_npc_get_counter_entry_ref(&eswitch_dev->npc, flow, flow); + if (rc) { + plt_err("Failed to allocate entry for pf, err %d", rc); + goto free_flow; + } + eswitch_dev->esw_pf_entry = flow->mcam_id; + plt_esw_dbg("Allocated entries for esw: PF %d and VF %d", eswitch_dev->esw_pf_entry, + eswitch_dev->esw_vf_entry); + } else { + flow->mcam_id = eswitch_dev->esw_vf_entry; + } + + rc = eswitch_pfvf_mcam_install_rules(eswitch_dev, flow, is_vf); + if (rc) { + plt_err("Failed to install entries, rc %d", rc); + goto free_flow; + } + + /* List in ascending order of mcam entries */ + TAILQ_FOREACH(flow_iter, list, next) { + if (flow_iter->mcam_id > flow->mcam_id) { + TAILQ_INSERT_BEFORE(flow_iter, flow, next); + goto done; + } + } + TAILQ_INSERT_TAIL(list, flow, next); +done: + eswitch_dev->num_entries++; + plt_esw_dbg("Installed new eswitch flow rule %d with MCAM id %d", eswitch_dev->num_entries, + flow->mcam_id); + + return 0; + +free_flow: + cnxk_eswitch_flow_rules_remove_list(eswitch_dev, &eswitch_dev->esw_flow_list, + eswitch_dev->npc.pf_func); +fail: + return rc; +} diff --git a/drivers/net/cnxk/meson.build b/drivers/net/cnxk/meson.build index fcd5d3d569..488e89253d 100644 --- a/drivers/net/cnxk/meson.build +++ b/drivers/net/cnxk/meson.build @@ -30,6 +30,7 @@ sources = files( 'cnxk_ethdev_sec_telemetry.c', 'cnxk_eswitch.c', 'cnxk_eswitch_devargs.c', + 'cnxk_eswitch_flow.c', 'cnxk_link.c', 'cnxk_lookup.c', 'cnxk_ptp.c', From patchwork Sun Mar 3 17:38:19 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Harman Kalra X-Patchwork-Id: 137823 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3839243C3E; Sun, 3 Mar 2024 18:39:56 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 19A6943670; Sun, 3 Mar 2024 18:39:09 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id 6B61143667 for ; Sun, 3 Mar 2024 18:39:07 +0100 (CET) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id 423HZW65022991 for ; Sun, 3 Mar 2024 09:39:06 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h= from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-type; s=pfpt0220; bh=+WlWTcfMGW/zWv758/Dky GbNh4XxjIjYFZlVqjzC7MA=; b=AwZi4VkYSogTAKkUYc8PNE73sKRmGaqqZ3YwR XJCxv+H/rwKtpI7z97bIaNPIRFQSX4MgfTXHfQy3wzHshqLb26cWuvAYt/rjdIlP c+1KRRmGGXRxMkqNVmGvzGOx1gzTtaSgaoTQyArvpJiMiTm3jx5fcq+hNg+5HqSw sv6Sq0FzlSbE2yYX2zuxyWbmSLLpV91NVq3+hmQg+/QEu6hMzGtEWXAsACdfKqf0 ewVjmAImY2EUP3vT7Wm0abZvThiu64SmfspGBn4bkr0jbXYQ12o3Lqg0rQHdAbHS S6/Xpyiwt3T/gRH2mLOtmEerEWOYw0wi0hDmGrrOHcY0br/hw== Received: from dc6wp-exch02.marvell.com ([4.21.29.225]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3wm2bptrtf-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT) for ; Sun, 03 Mar 2024 09:39:06 -0800 (PST) Received: from DC6WP-EXCH02.marvell.com (10.76.176.209) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1258.12; Sun, 3 Mar 2024 09:39:05 -0800 Received: from maili.marvell.com (10.69.176.80) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server id 15.2.1258.12 via Frontend Transport; Sun, 3 Mar 2024 09:39:05 -0800 Received: from localhost.localdomain (unknown [10.29.52.211]) by maili.marvell.com (Postfix) with ESMTP id CC5B63F718E; Sun, 3 Mar 2024 09:39:02 -0800 (PST) From: Harman Kalra To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao , Harman Kalra CC: Subject: [PATCH v6 09/23] net/cnxk: eswitch fastpath routines Date: Sun, 3 Mar 2024 23:08:19 +0530 Message-ID: <20240303173833.100039-10-hkalra@marvell.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20240303173833.100039-1-hkalra@marvell.com> References: <20230811163419.165790-1-hkalra@marvell.com> <20240303173833.100039-1-hkalra@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: -ln7VIsXa-lHPooHPIRLDABiP9NI2tJI X-Proofpoint-ORIG-GUID: -ln7VIsXa-lHPooHPIRLDABiP9NI2tJI X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.1011,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2024-03-03_08,2024-03-01_03,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Implementing fastpath RX and TX fast path routines which can be invoked from respective representors rx burst and tx burst Signed-off-by: Harman Kalra --- drivers/net/cnxk/cnxk_eswitch.h | 5 + drivers/net/cnxk/cnxk_eswitch_rxtx.c | 211 +++++++++++++++++++++++++++ drivers/net/cnxk/meson.build | 1 + 3 files changed, 217 insertions(+) create mode 100644 drivers/net/cnxk/cnxk_eswitch_rxtx.c diff --git a/drivers/net/cnxk/cnxk_eswitch.h b/drivers/net/cnxk/cnxk_eswitch.h index 5b4e1b0a71..4edfa91bdc 100644 --- a/drivers/net/cnxk/cnxk_eswitch.h +++ b/drivers/net/cnxk/cnxk_eswitch.h @@ -177,4 +177,9 @@ int cnxk_eswitch_pfvf_flow_rules_install(struct cnxk_eswitch_dev *eswitch_dev, b int cnxk_eswitch_flow_rule_shift(uint16_t hw_func, uint16_t *new_entry); int cnxk_eswitch_flow_rules_remove_list(struct cnxk_eswitch_dev *eswitch_dev, struct flow_list *list, uint16_t hw_func); +/* RX TX fastpath routines */ +uint16_t cnxk_eswitch_dev_tx_burst(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid, + struct rte_mbuf **pkts, uint16_t nb_tx, const uint16_t flags); +uint16_t cnxk_eswitch_dev_rx_burst(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid, + struct rte_mbuf **pkts, uint16_t nb_pkts); #endif /* __CNXK_ESWITCH_H__ */ diff --git a/drivers/net/cnxk/cnxk_eswitch_rxtx.c b/drivers/net/cnxk/cnxk_eswitch_rxtx.c new file mode 100644 index 0000000000..d57e32b091 --- /dev/null +++ b/drivers/net/cnxk/cnxk_eswitch_rxtx.c @@ -0,0 +1,211 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2024 Marvell. + */ + +#include + +static __rte_always_inline struct rte_mbuf * +eswitch_nix_get_mbuf_from_cqe(void *cq, const uint64_t data_off) +{ + rte_iova_t buff; + + /* Skip CQE, NIX_RX_PARSE_S and SG HDR(9 DWORDs) and peek buff addr */ + buff = *((rte_iova_t *)((uint64_t *)cq + 9)); + return (struct rte_mbuf *)(buff - data_off); +} + +static inline uint64_t +eswitch_nix_rx_nb_pkts(struct roc_nix_cq *cq, const uint64_t wdata, const uint32_t qmask) +{ + uint64_t reg, head, tail; + uint32_t available; + + /* Update the available count if cached value is not enough */ + + /* Use LDADDA version to avoid reorder */ + reg = roc_atomic64_add_sync(wdata, cq->status); + /* CQ_OP_STATUS operation error */ + if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) || reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR)) + return 0; + + tail = reg & 0xFFFFF; + head = (reg >> 20) & 0xFFFFF; + if (tail < head) + available = tail - head + qmask + 1; + else + available = tail - head; + + return available; +} + +static inline void +nix_cn9k_xmit_one(uint64_t *cmd, void *lmt_addr, const plt_iova_t io_addr) +{ + uint64_t lmt_status; + + do { + roc_lmt_mov(lmt_addr, cmd, 0); + lmt_status = roc_lmt_submit_ldeor(io_addr); + } while (lmt_status == 0); +} + +uint16_t +cnxk_eswitch_dev_tx_burst(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid, + struct rte_mbuf **pkts, uint16_t nb_xmit, const uint16_t flags) +{ + struct roc_nix_sq *sq = &eswitch_dev->txq[qid].sqs; + struct roc_nix_rq *rq = &eswitch_dev->rxq[qid].rqs; + uint64_t aura_handle, cmd[6], data = 0; + uint16_t lmt_id, pkt = 0, nb_tx = 0; + struct nix_send_ext_s *send_hdr_ext; + struct nix_send_hdr_s *send_hdr; + uint16_t vlan_tci = qid; + union nix_send_sg_s *sg; + uintptr_t lmt_base, pa; + int64_t fc_pkts, dw_m1; + rte_iova_t io_addr; + + if (unlikely(eswitch_dev->txq[qid].state != CNXK_ESWITCH_QUEUE_STATE_STARTED)) + return 0; + + lmt_base = sq->roc_nix->lmt_base; + io_addr = sq->io_addr; + aura_handle = rq->aura_handle; + /* Get LMT base address and LMT ID as per thread ID */ + lmt_id = roc_plt_control_lmt_id_get(); + lmt_base += ((uint64_t)lmt_id << ROC_LMT_LINE_SIZE_LOG2); + /* Double word minus 1: LMTST size-1 in units of 128 bits */ + /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */ + dw_m1 = cn10k_nix_tx_ext_subs(flags) + 1; + + memset(cmd, 0, sizeof(cmd)); + send_hdr = (struct nix_send_hdr_s *)&cmd[0]; + send_hdr->w0.sizem1 = dw_m1; + send_hdr->w0.sq = sq->qid; + + if (dw_m1 >= 2) { + send_hdr_ext = (struct nix_send_ext_s *)&cmd[2]; + send_hdr_ext->w0.subdc = NIX_SUBDC_EXT; + if (flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) { + send_hdr_ext->w1.vlan0_ins_ena = true; + /* 2B before end of l2 header */ + send_hdr_ext->w1.vlan0_ins_ptr = 12; + send_hdr_ext->w1.vlan0_ins_tci = 0; + } + sg = (union nix_send_sg_s *)&cmd[4]; + } else { + sg = (union nix_send_sg_s *)&cmd[2]; + } + + sg->subdc = NIX_SUBDC_SG; + sg->segs = 1; + sg->ld_type = NIX_SENDLDTYPE_LDD; + + /* Tx */ + fc_pkts = ((int64_t)sq->nb_sqb_bufs_adj - *((uint64_t *)sq->fc)) << sq->sqes_per_sqb_log2; + + if (fc_pkts < 0) + nb_tx = 0; + else + nb_tx = PLT_MIN(nb_xmit, (uint64_t)fc_pkts); + + for (pkt = 0; pkt < nb_tx; pkt++) { + send_hdr->w0.total = pkts[pkt]->pkt_len; + if (pkts[pkt]->pool) { + aura_handle = pkts[pkt]->pool->pool_id; + send_hdr->w0.aura = roc_npa_aura_handle_to_aura(aura_handle); + } else { + send_hdr->w0.df = 1; + } + if (dw_m1 >= 2 && flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) + send_hdr_ext->w1.vlan0_ins_tci = vlan_tci; + sg->seg1_size = pkts[pkt]->pkt_len; + *(plt_iova_t *)(sg + 1) = rte_mbuf_data_iova(pkts[pkt]); + + plt_esw_dbg("Transmitting pkt %d (%p) vlan tci %x on sq %d esw qid %d", pkt, + pkts[pkt], vlan_tci, sq->qid, qid); + if (roc_model_is_cn9k()) { + nix_cn9k_xmit_one(cmd, sq->lmt_addr, sq->io_addr); + } else { + cn10k_nix_xmit_mv_lmt_base(lmt_base, cmd, flags); + /* PA<6:4> = LMTST size-1 in units of 128 bits. Size of the first LMTST in + * burst. + */ + pa = io_addr | (dw_m1 << 4); + data &= ~0x7ULL; + /*<15:12> = CNTM1: Count minus one of LMTSTs in the burst */ + data = (0ULL << 12); + /* *<10:0> = LMT_ID: Identifies which LMT line is used for the first LMTST + */ + data |= (uint64_t)lmt_id; + + /* STEOR0 */ + roc_lmt_submit_steorl(data, pa); + rte_io_wmb(); + } + } + + return nb_tx; +} + +uint16_t +cnxk_eswitch_dev_rx_burst(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid, + struct rte_mbuf **pkts, uint16_t nb_pkts) +{ + struct roc_nix_rq *rq = &eswitch_dev->rxq[qid].rqs; + struct roc_nix_cq *cq = &eswitch_dev->cxq[qid].cqs; + const union nix_rx_parse_u *rx; + struct nix_cqe_hdr_s *cqe; + uint64_t pkt = 0, nb_rx; + struct rte_mbuf *mbuf; + uint64_t wdata; + uint32_t qmask; + uintptr_t desc; + uint32_t head; + + if (unlikely(eswitch_dev->rxq[qid].state != CNXK_ESWITCH_QUEUE_STATE_STARTED)) + return 0; + + wdata = cq->wdata; + qmask = cq->qmask; + desc = (uintptr_t)cq->desc_base; + nb_rx = eswitch_nix_rx_nb_pkts(cq, wdata, qmask); + nb_rx = RTE_MIN(nb_rx, nb_pkts); + head = cq->head; + + /* Nothing to receive */ + if (!nb_rx) + return 0; + + /* Rx */ + for (pkt = 0; pkt < nb_rx; pkt++) { + /* Prefetch N desc ahead */ + rte_prefetch_non_temporal((void *)(desc + (CQE_SZ((head + 2) & qmask)))); + cqe = (struct nix_cqe_hdr_s *)(desc + CQE_SZ(head)); + rx = (const union nix_rx_parse_u *)((const uint64_t *)cqe + 1); + + /* Skip QE, NIX_RX_PARSE_S and SG HDR(9 DWORDs) and peek buff addr */ + mbuf = eswitch_nix_get_mbuf_from_cqe(cqe, rq->first_skip); + mbuf->pkt_len = rx->pkt_lenm1 + 1; + mbuf->data_len = rx->pkt_lenm1 + 1; + mbuf->data_off = 128; + /* Rx parse to capture vlan info */ + if (rx->vtag0_valid) + mbuf->vlan_tci = rx->vtag0_tci; + /* Populate RSS hash */ + mbuf->hash.rss = cqe->tag; + mbuf->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; + pkts[pkt] = mbuf; + roc_prefetch_store_keep(mbuf); + plt_esw_dbg("Packet %d rec on queue %d esw qid %d hash %x mbuf %p vlan tci %d", + (uint32_t)pkt, rq->qid, qid, mbuf->hash.rss, mbuf, mbuf->vlan_tci); + head++; + head &= qmask; + } + + /* Free all the CQs that we've processed */ + rte_write64_relaxed((wdata | nb_rx), (void *)cq->door); + cq->head = head; + + return nb_rx; +} diff --git a/drivers/net/cnxk/meson.build b/drivers/net/cnxk/meson.build index 488e89253d..7121845dc6 100644 --- a/drivers/net/cnxk/meson.build +++ b/drivers/net/cnxk/meson.build @@ -31,6 +31,7 @@ sources = files( 'cnxk_eswitch.c', 'cnxk_eswitch_devargs.c', 'cnxk_eswitch_flow.c', + 'cnxk_eswitch_rxtx.c', 'cnxk_link.c', 'cnxk_lookup.c', 'cnxk_ptp.c', From patchwork Sun Mar 3 17:38:20 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Harman Kalra X-Patchwork-Id: 137824 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 99BB743C3E; Sun, 3 Mar 2024 18:40:02 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 4CC6843674; Sun, 3 Mar 2024 18:39:13 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id 865184365E for ; Sun, 3 Mar 2024 18:39:11 +0100 (CET) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id 423GWjXI019709 for ; Sun, 3 Mar 2024 09:39:10 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h= from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-type; s=pfpt0220; bh=inS0RrqpVa4yl58txW/yW T8tkmlX/Y5UwBYURztMJgA=; b=RigsKEVtXTJN5czKtOGkoe56u1ecnCLTaQ112 lG/XcbA0VKH7LgMkGraIBL7W0aGXVTg7P2OHD0yhO18pjVTh3OG8lim5/CmTrQVR 36ilyhv33tiuEhW/ERHjfDDc+rQcb+gMSAkO/bC2B8u/tGZ6DRCp4x1ES8LvSLAL FHbH/2LdGq/Spde+naUgohbdKPrfHBulNXNMKhHsXhFzQ5zR8yGgeqAstOhQDSov gzbEmS23XXsVP+JMKyM0qS3JA1zk46fdP6dBjLKMVWHcyxB4loG2KNeuedge0cAi 3wbmQhcAniw1hsa84hpKtBIfQEfFhlVYFwsiQFcmiFoeuEBmw== Received: from dc6wp-exch02.marvell.com ([4.21.29.225]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3wm2bptrtp-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT) for ; Sun, 03 Mar 2024 09:39:09 -0800 (PST) Received: from DC6WP-EXCH02.marvell.com (10.76.176.209) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1258.12; Sun, 3 Mar 2024 09:39:08 -0800 Received: from maili.marvell.com (10.69.176.80) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server id 15.2.1258.12 via Frontend Transport; Sun, 3 Mar 2024 09:39:08 -0800 Received: from localhost.localdomain (unknown [10.29.52.211]) by maili.marvell.com (Postfix) with ESMTP id BDD2B3F718E; Sun, 3 Mar 2024 09:39:05 -0800 (PST) From: Harman Kalra To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao , Harman Kalra CC: Subject: [PATCH v6 10/23] net/cnxk: add representor control plane Date: Sun, 3 Mar 2024 23:08:20 +0530 Message-ID: <20240303173833.100039-11-hkalra@marvell.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20240303173833.100039-1-hkalra@marvell.com> References: <20230811163419.165790-1-hkalra@marvell.com> <20240303173833.100039-1-hkalra@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: N3kyWEe6Iek1Ega4Y1aAR1opjZF80DF- X-Proofpoint-ORIG-GUID: N3kyWEe6Iek1Ega4Y1aAR1opjZF80DF- X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.1011,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2024-03-03_08,2024-03-01_03,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Implementing the control path for representor ports, where represented ports can be configured using TLV messaging. Signed-off-by: Harman Kalra --- drivers/net/cnxk/cnxk_eswitch.c | 70 ++- drivers/net/cnxk/cnxk_eswitch.h | 8 + drivers/net/cnxk/cnxk_rep.c | 52 ++ drivers/net/cnxk/cnxk_rep.h | 3 + drivers/net/cnxk/cnxk_rep_msg.c | 827 ++++++++++++++++++++++++++++++++ drivers/net/cnxk/cnxk_rep_msg.h | 95 ++++ drivers/net/cnxk/meson.build | 1 + 7 files changed, 1048 insertions(+), 8 deletions(-) create mode 100644 drivers/net/cnxk/cnxk_rep_msg.c create mode 100644 drivers/net/cnxk/cnxk_rep_msg.h diff --git a/drivers/net/cnxk/cnxk_eswitch.c b/drivers/net/cnxk/cnxk_eswitch.c index 25992fddc9..14d0df8791 100644 --- a/drivers/net/cnxk/cnxk_eswitch.c +++ b/drivers/net/cnxk/cnxk_eswitch.c @@ -9,6 +9,27 @@ #define CNXK_NIX_DEF_SQ_COUNT 512 +int +cnxk_eswitch_representor_id(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func, + uint16_t *rep_id) +{ + struct cnxk_esw_repr_hw_info *repr_info; + int rc = 0; + + repr_info = cnxk_eswitch_representor_hw_info(eswitch_dev, hw_func); + if (!repr_info) { + plt_warn("Failed to get representor group for %x", hw_func); + rc = -ENOENT; + goto fail; + } + + *rep_id = repr_info->rep_id; + + return 0; +fail: + return rc; +} + struct cnxk_esw_repr_hw_info * cnxk_eswitch_representor_hw_info(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func) { @@ -86,8 +107,41 @@ cnxk_eswitch_dev_remove(struct rte_pci_device *pci_dev) } /* Remove representor devices associated with PF */ - if (eswitch_dev->repr_cnt.nb_repr_created) + if (eswitch_dev->repr_cnt.nb_repr_created) { + /* Exiting the rep msg ctrl thread */ + if (eswitch_dev->start_ctrl_msg_thrd) { + uint32_t sunlen; + struct sockaddr_un sun = {0}; + int sock_fd = 0; + + eswitch_dev->start_ctrl_msg_thrd = false; + if (!eswitch_dev->client_connected) { + plt_esw_dbg("Establishing connection for teardown"); + sock_fd = socket(AF_UNIX, SOCK_STREAM, 0); + if (sock_fd == -1) { + plt_err("Failed to open socket. err %d", -errno); + return -errno; + } + sun.sun_family = AF_UNIX; + sunlen = sizeof(struct sockaddr_un); + strncpy(sun.sun_path, CNXK_ESWITCH_CTRL_MSG_SOCK_PATH, + sizeof(sun.sun_path) - 1); + + if (connect(sock_fd, (struct sockaddr *)&sun, sunlen) < 0) { + plt_err("Failed to connect socket: %s, err %d", + CNXK_ESWITCH_CTRL_MSG_SOCK_PATH, errno); + close(sock_fd); + return -errno; + } + } + rte_thread_join(eswitch_dev->rep_ctrl_msg_thread, NULL); + if (!eswitch_dev->client_connected) + close(sock_fd); + } + + /* Remove representor devices associated with PF */ cnxk_rep_dev_remove(eswitch_dev); + } /* Cleanup NPC rxtx flow rules */ cnxk_eswitch_flow_rules_remove_list(eswitch_dev, &eswitch_dev->esw_flow_list, @@ -106,13 +160,6 @@ cnxk_eswitch_nix_rsrc_start(struct cnxk_eswitch_dev *eswitch_dev) { int rc; - /* Enable Rx in NPC */ - rc = roc_nix_npc_rx_ena_dis(&eswitch_dev->nix, true); - if (rc) { - plt_err("Failed to enable NPC rx %d", rc); - goto done; - } - /* Install eswitch PF mcam rules */ rc = cnxk_eswitch_pfvf_flow_rules_install(eswitch_dev, false); if (rc) { @@ -128,6 +175,13 @@ cnxk_eswitch_nix_rsrc_start(struct cnxk_eswitch_dev *eswitch_dev) goto done; } + /* Enable Rx in NPC */ + rc = roc_nix_npc_rx_ena_dis(&eswitch_dev->nix, true); + if (rc) { + plt_err("Failed to enable NPC rx %d", rc); + goto done; + } + rc = roc_npc_mcam_enable_all_entries(&eswitch_dev->npc, 1); if (rc) { plt_err("Failed to enable NPC entries %d", rc); diff --git a/drivers/net/cnxk/cnxk_eswitch.h b/drivers/net/cnxk/cnxk_eswitch.h index 4edfa91bdc..ecf10a8e08 100644 --- a/drivers/net/cnxk/cnxk_eswitch.h +++ b/drivers/net/cnxk/cnxk_eswitch.h @@ -133,6 +133,12 @@ struct cnxk_eswitch_dev { /* No of representors */ struct cnxk_eswitch_repr_cnt repr_cnt; + /* Representor control channel field */ + bool start_ctrl_msg_thrd; + rte_thread_t rep_ctrl_msg_thread; + bool client_connected; + int sock_fd; + /* Port representor fields */ rte_spinlock_t rep_lock; uint16_t nb_switch_domain; @@ -155,6 +161,8 @@ cnxk_eswitch_pmd_priv(void) /* HW Resources */ int cnxk_eswitch_nix_rsrc_start(struct cnxk_eswitch_dev *eswitch_dev); +int cnxk_eswitch_representor_id(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func, + uint16_t *rep_id); struct cnxk_esw_repr_hw_info *cnxk_eswitch_representor_hw_info(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func); int cnxk_eswitch_repr_devargs(struct rte_pci_device *pci_dev, struct cnxk_eswitch_dev *eswitch_dev); diff --git a/drivers/net/cnxk/cnxk_rep.c b/drivers/net/cnxk/cnxk_rep.c index 55156f5b56..5b619ebb9e 100644 --- a/drivers/net/cnxk/cnxk_rep.c +++ b/drivers/net/cnxk/cnxk_rep.c @@ -2,6 +2,7 @@ * Copyright(C) 2024 Marvell. */ #include +#include #define PF_SHIFT 10 #define PF_MASK 0x3F @@ -25,6 +26,48 @@ switch_domain_id_allocate(struct cnxk_eswitch_dev *eswitch_dev, uint16_t pf) return RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; } +int +cnxk_rep_state_update(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func, uint16_t *rep_id) +{ + struct cnxk_rep_dev *rep_dev = NULL; + struct rte_eth_dev *rep_eth_dev; + int i, rc = 0; + + /* Delete the individual PFVF flows as common eswitch VF rule will be used. */ + rc = cnxk_eswitch_flow_rules_delete(eswitch_dev, hw_func); + if (rc) { + if (rc != -ENOENT) { + plt_err("Failed to delete %x flow rules", hw_func); + goto fail; + } + } + /* Rep ID for respective HW func */ + rc = cnxk_eswitch_representor_id(eswitch_dev, hw_func, rep_id); + if (rc) { + if (rc != -ENOENT) { + plt_err("Failed to get rep info for %x", hw_func); + goto fail; + } + } + /* Update the state - representee is standalone or part of companian app */ + for (i = 0; i < eswitch_dev->repr_cnt.nb_repr_probed; i++) { + rep_eth_dev = eswitch_dev->rep_info[i].rep_eth_dev; + if (!rep_eth_dev) { + plt_err("Failed to get rep ethdev handle"); + rc = -EINVAL; + goto fail; + } + + rep_dev = cnxk_rep_pmd_priv(rep_eth_dev); + if (rep_dev->hw_func == hw_func && rep_dev->is_vf_active) + rep_dev->native_repte = false; + } + + return 0; +fail: + return rc; +} + int cnxk_rep_dev_uninit(struct rte_eth_dev *ethdev) { @@ -250,6 +293,15 @@ cnxk_rep_dev_probe(struct rte_pci_device *pci_dev, struct cnxk_eswitch_dev *eswi } eswitch_dev->last_probed = i; + /* Launch a thread to handle control messages */ + if (!eswitch_dev->start_ctrl_msg_thrd) { + rc = cnxk_rep_msg_control_thread_launch(eswitch_dev); + if (rc) { + plt_err("Failed to launch message ctrl thread"); + goto fail; + } + } + return 0; fail: return rc; diff --git a/drivers/net/cnxk/cnxk_rep.h b/drivers/net/cnxk/cnxk_rep.h index b802c44b33..da298823a7 100644 --- a/drivers/net/cnxk/cnxk_rep.h +++ b/drivers/net/cnxk/cnxk_rep.h @@ -16,6 +16,8 @@ struct cnxk_rep_dev { uint16_t switch_domain_id; struct cnxk_eswitch_dev *parent_dev; uint16_t hw_func; + bool is_vf_active; + bool native_repte; uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; }; @@ -46,5 +48,6 @@ int cnxk_rep_dev_close(struct rte_eth_dev *eth_dev); int cnxk_rep_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats); int cnxk_rep_stats_reset(struct rte_eth_dev *eth_dev); int cnxk_rep_flow_ops_get(struct rte_eth_dev *ethdev, const struct rte_flow_ops **ops); +int cnxk_rep_state_update(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func, uint16_t *rep_id); #endif /* __CNXK_REP_H__ */ diff --git a/drivers/net/cnxk/cnxk_rep_msg.c b/drivers/net/cnxk/cnxk_rep_msg.c new file mode 100644 index 0000000000..0af87f0169 --- /dev/null +++ b/drivers/net/cnxk/cnxk_rep_msg.c @@ -0,0 +1,827 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2024 Marvell. + */ + +#include +#include + +#define CTRL_MSG_RCV_TIMEOUT_MS 2000 +#define CTRL_MSG_READY_WAIT_US 2000 +#define CTRL_MSG_THRD_NAME_LEN 35 +#define CTRL_MSG_BUFFER_SZ 1500 +#define CTRL_MSG_SIGNATURE 0xcdacdeadbeefcadc + +static void +close_socket(int fd) +{ + close(fd); + unlink(CNXK_ESWITCH_CTRL_MSG_SOCK_PATH); +} + +static int +receive_control_message(int socketfd, void *data, uint32_t len) +{ + char ctl[CMSG_SPACE(sizeof(int)) + CMSG_SPACE(sizeof(struct ucred))] = {0}; + struct ucred *cr __rte_unused; + struct msghdr mh = {0}; + struct cmsghdr *cmsg; + static uint64_t rec; + struct iovec iov[1]; + ssize_t size; + int afd = -1; + + iov[0].iov_base = data; + iov[0].iov_len = len; + mh.msg_iov = iov; + mh.msg_iovlen = 1; + mh.msg_control = ctl; + mh.msg_controllen = sizeof(ctl); + + size = recvmsg(socketfd, &mh, MSG_DONTWAIT); + if (size < 0) { + if (errno == EAGAIN) + return 0; + plt_err("recvmsg err %d size %ld", errno, size); + return -errno; + } else if (size == 0) { + return 0; + } + + rec++; + plt_rep_dbg("Packet %" PRId64 " Received %" PRId64 " bytes over socketfd %d", + rec, size, socketfd); + + cr = 0; + cmsg = CMSG_FIRSTHDR(&mh); + while (cmsg) { + if (cmsg->cmsg_level == SOL_SOCKET) { + if (cmsg->cmsg_type == SCM_CREDENTIALS) { + cr = (struct ucred *)CMSG_DATA(cmsg); + } else if (cmsg->cmsg_type == SCM_RIGHTS) { + rte_memcpy(&afd, CMSG_DATA(cmsg), sizeof(int)); + plt_rep_dbg("afd %d", afd); + } + } + cmsg = CMSG_NXTHDR(&mh, cmsg); + } + return size; +} + +static int +send_message_on_socket(int socketfd, void *data, uint32_t len, int afd) +{ + char ctl[CMSG_SPACE(sizeof(int))]; + struct msghdr mh = {0}; + struct cmsghdr *cmsg; + static uint64_t sent; + struct iovec iov[1]; + int size; + + iov[0].iov_base = data; + iov[0].iov_len = len; + mh.msg_iov = iov; + mh.msg_iovlen = 1; + + if (afd > 0) { + memset(&ctl, 0, sizeof(ctl)); + mh.msg_control = ctl; + mh.msg_controllen = sizeof(ctl); + cmsg = CMSG_FIRSTHDR(&mh); + cmsg->cmsg_len = CMSG_LEN(sizeof(int)); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SCM_RIGHTS; + rte_memcpy(CMSG_DATA(cmsg), &afd, sizeof(int)); + } + + size = sendmsg(socketfd, &mh, MSG_DONTWAIT); + if (size < 0) { + if (errno == EAGAIN) + return 0; + plt_err("Failed to send message, err %d", -errno); + return -errno; + } else if (size == 0) { + return 0; + } + sent++; + plt_rep_dbg("Sent %" PRId64 " packets of size %d on socketfd %d", sent, size, socketfd); + + return size; +} + +static int +open_socket_ctrl_channel(void) +{ + struct sockaddr_un un; + int sock_fd; + + sock_fd = socket(AF_UNIX, SOCK_STREAM, 0); + if (sock_fd < 0) { + RTE_LOG(ERR, EAL, "failed to create unix socket\n"); + return -1; + } + + /* Set unix socket path and bind */ + memset(&un, 0, sizeof(un)); + un.sun_family = AF_UNIX; + + if (strlen(CNXK_ESWITCH_CTRL_MSG_SOCK_PATH) > sizeof(un.sun_path) - 1) { + plt_err("Server socket path too long: %s", CNXK_ESWITCH_CTRL_MSG_SOCK_PATH); + close(sock_fd); + return -E2BIG; + } + + if (remove(CNXK_ESWITCH_CTRL_MSG_SOCK_PATH) == -1 && errno != ENOENT) { + plt_err("remove-%s", CNXK_ESWITCH_CTRL_MSG_SOCK_PATH); + close(sock_fd); + return -errno; + } + + memset(&un, 0, sizeof(struct sockaddr_un)); + un.sun_family = AF_UNIX; + strncpy(un.sun_path, CNXK_ESWITCH_CTRL_MSG_SOCK_PATH, sizeof(un.sun_path) - 1); + + if (bind(sock_fd, (struct sockaddr *)&un, sizeof(un)) < 0) { + plt_err("Failed to bind %s: %s", un.sun_path, strerror(errno)); + close(sock_fd); + return -errno; + } + + if (listen(sock_fd, 1) < 0) { + plt_err("Failed to listen, err %s", strerror(errno)); + close(sock_fd); + return -errno; + } + + plt_rep_dbg("Unix socket path %s", un.sun_path); + return sock_fd; +} + +static int +send_control_message(struct cnxk_eswitch_dev *eswitch_dev, void *buffer, uint32_t len) +{ + int sz; + int rc = 0; + + sz = send_message_on_socket(eswitch_dev->sock_fd, buffer, len, 0); + if (sz < 0) { + plt_err("Error sending message, err %d", sz); + rc = sz; + goto done; + } + + /* Ensuring entire message has been processed */ + if (sz != (int)len) { + plt_err("Out of %d bytes only %d bytes sent", sz, len); + rc = -EFAULT; + goto done; + } + plt_rep_dbg("Sent %d bytes of buffer", sz); +done: + return rc; +} + +void +cnxk_rep_msg_populate_msg_end(void *buffer, uint32_t *length) +{ + cnxk_rep_msg_populate_command(buffer, length, CNXK_REP_MSG_END, 0); +} + +void +cnxk_rep_msg_populate_type(void *buffer, uint32_t *length, cnxk_type_t type, uint32_t sz) +{ + uint32_t len = *length; + cnxk_type_data_t data; + + memset(&data, 0, sizeof(cnxk_type_data_t)); + /* Prepare type data */ + data.type = type; + data.length = sz; + + /* Populate the type data */ + rte_memcpy(RTE_PTR_ADD(buffer, len), &data, sizeof(cnxk_type_data_t)); + len += sizeof(cnxk_type_data_t); + + *length = len; +} + +void +cnxk_rep_msg_populate_header(void *buffer, uint32_t *length) +{ + cnxk_header_t hdr; + int len; + + cnxk_rep_msg_populate_type(buffer, length, CNXK_TYPE_HEADER, sizeof(cnxk_header_t)); + + memset(&hdr, 0, sizeof(cnxk_header_t)); + len = *length; + /* Prepare header data */ + hdr.signature = CTRL_MSG_SIGNATURE; + + /* Populate header data */ + rte_memcpy(RTE_PTR_ADD(buffer, len), &hdr, sizeof(cnxk_header_t)); + len += sizeof(cnxk_header_t); + + *length = len; +} + +void +cnxk_rep_msg_populate_command(void *buffer, uint32_t *length, cnxk_rep_msg_t type, uint32_t size) +{ + cnxk_rep_msg_data_t msg_data; + uint32_t len; + uint16_t sz = sizeof(cnxk_rep_msg_data_t); + + memset(&msg_data, 0, sz); + cnxk_rep_msg_populate_type(buffer, length, CNXK_TYPE_MSG, sz); + + len = *length; + /* Prepare command data */ + msg_data.type = type; + msg_data.length = size; + + /* Populate the command */ + rte_memcpy(RTE_PTR_ADD(buffer, len), &msg_data, sz); + len += sz; + + *length = len; +} + +void +cnxk_rep_msg_populate_command_meta(void *buffer, uint32_t *length, void *msg_meta, uint32_t sz, + cnxk_rep_msg_t msg) +{ + uint32_t len; + + cnxk_rep_msg_populate_command(buffer, length, msg, sz); + + len = *length; + /* Populate command data */ + rte_memcpy(RTE_PTR_ADD(buffer, len), msg_meta, sz); + len += sz; + + *length = len; +} + +static int +parse_validate_header(void *msg_buf, uint32_t *buf_trav_len) +{ + cnxk_type_data_t *tdata = NULL; + cnxk_header_t *hdr = NULL; + void *data = NULL; + uint16_t len = 0; + + /* Read first bytes of type data */ + data = msg_buf; + tdata = (cnxk_type_data_t *)data; + if (tdata->type != CNXK_TYPE_HEADER) { + plt_err("Invalid type %d, type header expected", tdata->type); + goto fail; + } + + /* Get the header value */ + data = RTE_PTR_ADD(msg_buf, sizeof(cnxk_type_data_t)); + len += sizeof(cnxk_type_data_t); + + /* Validate the header */ + hdr = (cnxk_header_t *)data; + if (hdr->signature != CTRL_MSG_SIGNATURE) { + plt_err("Invalid signature %" PRIu64 " detected", hdr->signature); + goto fail; + } + + /* Update length read till point */ + len += tdata->length; + + *buf_trav_len = len; + return 0; +fail: + return errno; +} + +static cnxk_rep_msg_data_t * +message_data_extract(void *msg_buf, uint32_t *buf_trav_len) +{ + cnxk_type_data_t *tdata = NULL; + cnxk_rep_msg_data_t *msg = NULL; + uint16_t len = *buf_trav_len; + void *data; + + tdata = (cnxk_type_data_t *)RTE_PTR_ADD(msg_buf, len); + if (tdata->type != CNXK_TYPE_MSG) { + plt_err("Invalid type %d, type MSG expected", tdata->type); + goto fail; + } + + /* Get the message type */ + len += sizeof(cnxk_type_data_t); + data = RTE_PTR_ADD(msg_buf, len); + msg = (cnxk_rep_msg_data_t *)data; + + /* Advance to actual message data */ + len += tdata->length; + *buf_trav_len = len; + + return msg; +fail: + return NULL; +} + +static void +process_ack_message(void *msg_buf, uint32_t *buf_trav_len, uint32_t msg_len, void *data) +{ + cnxk_rep_msg_ack_data_t *adata = (cnxk_rep_msg_ack_data_t *)data; + uint16_t len = *buf_trav_len; + void *buf; + + /* Get the message type data viz ack data */ + buf = RTE_PTR_ADD(msg_buf, len); + adata->u.data = rte_zmalloc("Ack data", msg_len, 0); + adata->size = msg_len; + if (adata->size == sizeof(uint64_t)) + rte_memcpy(&adata->u.data, buf, msg_len); + else + rte_memcpy(adata->u.data, buf, msg_len); + plt_rep_dbg("Address %p val 0x%" PRIu64 " sval %" PRId64 " msg_len %d", + adata->u.data, adata->u.val, adata->u.sval, msg_len); + + /* Advance length to nex message */ + len += msg_len; + *buf_trav_len = len; +} + +static int +notify_rep_dev_ready(cnxk_rep_msg_ready_data_t *rdata, void *data, + cnxk_rep_msg_ack_data1_t **padata) +{ + struct cnxk_eswitch_dev *eswitch_dev; + uint64_t rep_id_arr[RTE_MAX_ETHPORTS]; + cnxk_rep_msg_ack_data1_t *adata; + uint16_t rep_id, sz, total_sz; + int rc, i, j = 0; + + PLT_SET_USED(data); + eswitch_dev = cnxk_eswitch_pmd_priv(); + if (!eswitch_dev) { + plt_err("Failed to get PF ethdev handle"); + rc = -EINVAL; + goto fail; + } + + memset(rep_id_arr, 0, RTE_MAX_ETHPORTS * sizeof(uint64_t)); + /* For ready state */ + if ((rdata->nb_ports / 2) > eswitch_dev->repr_cnt.nb_repr_probed) { + rc = CNXK_REP_CTRL_MSG_NACK_INV_REP_CNT; + goto fail; + } + + for (i = 0; i < rdata->nb_ports / 2; i++) { + rep_id = UINT16_MAX; + rc = cnxk_rep_state_update(eswitch_dev, rdata->data[i], &rep_id); + if (rc) { + rc = CNXK_REP_CTRL_MSG_NACK_REP_STAT_UP_FAIL; + goto fail; + } + if (rep_id != UINT16_MAX) + rep_id_arr[j++] = rep_id; + } + + /* Send Rep Id array to companian app */ + sz = j * sizeof(uint64_t); + total_sz = sizeof(cnxk_rep_msg_ack_data1_t) + sz; + adata = plt_zmalloc(total_sz, 0); + rte_memcpy(adata->data, rep_id_arr, sz); + adata->size = sz; + *padata = adata; + + plt_rep_dbg("Installing NPC rules for Eswitch VF"); + /* Install RX VLAN rule for eswitch VF */ + if (!eswitch_dev->eswitch_vf_rules_setup) { + rc = cnxk_eswitch_pfvf_flow_rules_install(eswitch_dev, true); + if (rc) { + plt_err("Failed to install rxtx rules, rc %d", rc); + goto fail; + } + + /* Configure TPID for Eswitch PF LFs */ + rc = roc_eswitch_nix_vlan_tpid_set(&eswitch_dev->nix, ROC_NIX_VLAN_TYPE_OUTER, + CNXK_ESWITCH_VLAN_TPID, true); + if (rc) { + plt_err("Failed to configure tpid, rc %d", rc); + goto fail; + } + eswitch_dev->eswitch_vf_rules_setup = true; + } + + return 0; +fail: + sz = sizeof(cnxk_rep_msg_ack_data1_t) + sizeof(uint64_t); + adata = plt_zmalloc(sz, 0); + adata->data[0] = rc; + adata->size = sizeof(uint64_t); + *padata = adata; + + return rc; +} + +static int +process_ready_message(void *msg_buf, uint32_t *buf_trav_len, uint32_t msg_len, void *data, + cnxk_rep_msg_ack_data1_t **padata) +{ + cnxk_rep_msg_ready_data_t *rdata = NULL; + cnxk_rep_msg_ack_data1_t *adata; + uint16_t len = *buf_trav_len; + void *buf; + int rc = 0, sz; + + /* Get the message type data viz ready data */ + buf = RTE_PTR_ADD(msg_buf, len); + rdata = (cnxk_rep_msg_ready_data_t *)buf; + + plt_rep_dbg("Ready data received %d, nb_ports %d", rdata->val, rdata->nb_ports); + + /* Wait required to ensure other side ready for receiving the ack */ + usleep(CTRL_MSG_READY_WAIT_US); + + /* Update all representor about ready message */ + if (rdata->val) { + rc = notify_rep_dev_ready(rdata, data, padata); + } else { + sz = sizeof(cnxk_rep_msg_ack_data1_t) + sizeof(uint64_t); + adata = plt_zmalloc(sz, 0); + adata->data[0] = CNXK_REP_CTRL_MSG_NACK_INV_RDY_DATA; + adata->size = sizeof(uint64_t); + *padata = adata; + } + + /* Advance length to nex message */ + len += msg_len; + *buf_trav_len = len; + + return rc; +} + +static int +notify_rep_dev_exit(cnxk_rep_msg_exit_data_t *edata, void *data) +{ + struct cnxk_eswitch_dev *eswitch_dev; + struct cnxk_rep_dev *rep_dev = NULL; + struct rte_eth_dev *rep_eth_dev; + int i, rc = 0; + + PLT_SET_USED(data); + eswitch_dev = cnxk_eswitch_pmd_priv(); + if (!eswitch_dev) { + plt_err("Failed to get PF ethdev handle"); + rc = -EINVAL; + goto fail; + } + if ((edata->nb_ports / 2) > eswitch_dev->repr_cnt.nb_repr_probed) { + rc = CNXK_REP_CTRL_MSG_NACK_INV_REP_CNT; + goto fail; + } + + for (i = 0; i < eswitch_dev->repr_cnt.nb_repr_probed; i++) { + rep_eth_dev = eswitch_dev->rep_info[i].rep_eth_dev; + if (!rep_eth_dev) { + plt_err("Failed to get rep ethdev handle"); + rc = -EINVAL; + goto fail; + } + + rep_dev = cnxk_rep_pmd_priv(rep_eth_dev); + if (!rep_dev->native_repte) + rep_dev->is_vf_active = false; + } + /* For Exit message */ + eswitch_dev->client_connected = false; + return 0; +fail: + return rc; +} + +static void +process_exit_message(void *msg_buf, uint32_t *buf_trav_len, uint32_t msg_len, void *data) +{ + cnxk_rep_msg_exit_data_t *edata = NULL; + uint16_t len = *buf_trav_len; + void *buf; + + /* Get the message type data viz exit data */ + buf = RTE_PTR_ADD(msg_buf, len); + edata = (cnxk_rep_msg_exit_data_t *)buf; + + plt_rep_dbg("Exit data received %d", edata->val); + + /* Update all representor about ready/exit message */ + if (edata->val) + notify_rep_dev_exit(edata, data); + + /* Advance length to nex message */ + len += msg_len; + *buf_trav_len = len; +} + +static void +populate_ack_msg(void *buffer, uint32_t *length, cnxk_rep_msg_ack_data1_t *adata) +{ + uint32_t sz = sizeof(cnxk_rep_msg_ack_data1_t) + adata->size; + uint32_t len; + + cnxk_rep_msg_populate_command(buffer, length, CNXK_REP_MSG_ACK, sz); + + len = *length; + + /* Populate ACK message data */ + rte_memcpy(RTE_PTR_ADD(buffer, len), adata, sz); + + len += sz; + + *length = len; +} + +static int +send_ack_message(void *data, cnxk_rep_msg_ack_data1_t *adata) +{ + struct cnxk_eswitch_dev *eswitch_dev = (struct cnxk_eswitch_dev *)data; + uint32_t len = 0, size; + void *buffer; + int rc = 0; + + /* Allocate memory for preparing a message */ + size = CTRL_MSG_BUFFER_SZ; + buffer = rte_zmalloc("ACK msg", size, 0); + if (!buffer) { + plt_err("Failed to allocate mem"); + return -ENOMEM; + } + + /* Prepare the ACK message */ + cnxk_rep_msg_populate_header(buffer, &len); + populate_ack_msg(buffer, &len, adata); + cnxk_rep_msg_populate_msg_end(buffer, &len); + + /* Length check to avoid buffer overflow */ + if (len > CTRL_MSG_BUFFER_SZ) { + plt_err("Invalid length %d for max sized buffer %d", len, CTRL_MSG_BUFFER_SZ); + rc = -EFAULT; + goto done; + } + + /* Send it to the peer */ + rc = send_control_message(eswitch_dev, buffer, len); + if (rc) + plt_err("Failed send ack"); + +done: + return rc; +} + +static int +process_message(void *msg_buf, uint32_t *buf_trav_len, void *data) +{ + cnxk_rep_msg_data_t *msg = NULL; + cnxk_rep_msg_ack_data1_t *adata = NULL; + bool send_ack; + int rc = 0, sz; + + /* Get the message data */ + msg = message_data_extract(msg_buf, buf_trav_len); + if (!msg) { + plt_err("Failed to get message data"); + rc = -EINVAL; + goto fail; + } + + /* Different message type processing */ + while (msg->type != CNXK_REP_MSG_END) { + send_ack = true; + switch (msg->type) { + case CNXK_REP_MSG_ACK: + plt_rep_dbg("Received ack response"); + process_ack_message(msg_buf, buf_trav_len, msg->length, data); + send_ack = false; + break; + case CNXK_REP_MSG_READY: + plt_rep_dbg("Received ready message"); + process_ready_message(msg_buf, buf_trav_len, msg->length, data, &adata); + adata->type = CNXK_REP_MSG_READY; + break; + case CNXK_REP_MSG_EXIT: + plt_rep_dbg("Received exit message"); + process_exit_message(msg_buf, buf_trav_len, msg->length, data); + sz = sizeof(cnxk_rep_msg_ack_data1_t) + sizeof(uint64_t); + adata = plt_zmalloc(sz, 0); + adata->type = CNXK_REP_MSG_EXIT; + adata->data[0] = 0; + adata->size = sizeof(uint64_t); + break; + default: + send_ack = false; + plt_err("Invalid message type: %d", msg->type); + rc = -EINVAL; + }; + + /* Send ACK */ + if (send_ack) + send_ack_message(data, adata); + + /* Advance to next message */ + msg = message_data_extract(msg_buf, buf_trav_len); + if (!msg) { + plt_err("Failed to get message data"); + rc = -EINVAL; + goto fail; + } + } + + return 0; +fail: + return rc; +} + +static int +process_control_message(void *msg_buf, void *data, size_t sz) +{ + uint32_t buf_trav_len = 0; + int rc; + + /* Validate the validity of the received message */ + parse_validate_header(msg_buf, &buf_trav_len); + + /* Detect message and process */ + rc = process_message(msg_buf, &buf_trav_len, data); + if (rc) { + plt_err("Failed to process message"); + goto fail; + } + + /* Ensuring entire message has been processed */ + if (sz != buf_trav_len) { + plt_err("Out of %" PRId64 " bytes %d bytes of msg_buf processed", sz, buf_trav_len); + rc = -EFAULT; + goto fail; + } + + return 0; +fail: + return rc; +} + +static int +receive_control_msg_resp(struct cnxk_eswitch_dev *eswitch_dev, void *data) +{ + uint32_t wait_us = CTRL_MSG_RCV_TIMEOUT_MS * 1000; + uint32_t timeout = 0, sleep = 1; + int sz = 0; + int rc = -1; + uint32_t len = BUFSIZ; + void *msg_buf; + + msg_buf = plt_zmalloc(len, 0); + + do { + sz = receive_control_message(eswitch_dev->sock_fd, msg_buf, len); + if (sz != 0) + break; + + /* Timeout after CTRL_MSG_RCV_TIMEOUT_MS */ + if (timeout >= wait_us) { + plt_err("Control message wait timedout"); + return -ETIMEDOUT; + } + + plt_delay_us(sleep); + timeout += sleep; + } while ((sz == 0) || (timeout < wait_us)); + + if (sz > 0) { + plt_rep_dbg("Received %d sized response packet", sz); + rc = process_control_message(msg_buf, data, sz); + plt_free(msg_buf); + } + + return rc; +} + +int +cnxk_rep_msg_send_process(struct cnxk_rep_dev *rep_dev, void *buffer, uint32_t len, + cnxk_rep_msg_ack_data_t *adata) +{ + struct cnxk_eswitch_dev *eswitch_dev; + int rc = 0; + + eswitch_dev = rep_dev->parent_dev; + if (!eswitch_dev) { + plt_err("Failed to get parent eswitch handle"); + rc = -1; + goto fail; + } + + plt_spinlock_lock(&eswitch_dev->rep_lock); + rc = send_control_message(eswitch_dev, buffer, len); + if (rc) { + plt_err("Failed to send the message, err %d", rc); + goto free; + } + + /* Get response of the command sent */ + rc = receive_control_msg_resp(eswitch_dev, adata); + if (rc) { + plt_err("Failed to receive the response, err %d", rc); + goto free; + } + plt_spinlock_unlock(&eswitch_dev->rep_lock); + + return 0; +free: + plt_spinlock_unlock(&eswitch_dev->rep_lock); +fail: + return rc; +} + +static void +poll_for_control_msg(void *data) +{ + struct cnxk_eswitch_dev *eswitch_dev = (struct cnxk_eswitch_dev *)data; + uint32_t len = BUFSIZ; + int sz = 0; + void *msg_buf; + + while (eswitch_dev->client_connected) { + msg_buf = plt_zmalloc(len, 0); + do { + plt_spinlock_lock(&eswitch_dev->rep_lock); + sz = receive_control_message(eswitch_dev->sock_fd, msg_buf, len); + plt_spinlock_unlock(&eswitch_dev->rep_lock); + if (sz != 0) + break; + plt_delay_us(2000); + } while (sz == 0); + + if (sz > 0) { + plt_rep_dbg("Received new %d bytes control message", sz); + plt_spinlock_lock(&eswitch_dev->rep_lock); + process_control_message(msg_buf, data, sz); + plt_spinlock_unlock(&eswitch_dev->rep_lock); + plt_free(msg_buf); + } + } + plt_rep_dbg("Exiting poll for control message loop"); +} + +static uint32_t +rep_ctrl_msg_thread_main(void *arg) +{ + struct cnxk_eswitch_dev *eswitch_dev = (struct cnxk_eswitch_dev *)arg; + struct sockaddr_un client; + int addr_len; + int ssock_fd; + int sock_fd; + + ssock_fd = open_socket_ctrl_channel(); + if (ssock_fd < 0) { + plt_err("Failed to open socket for ctrl channel, err %d", ssock_fd); + return UINT32_MAX; + } + + addr_len = sizeof(client); + while (eswitch_dev->start_ctrl_msg_thrd) { + /* Accept client connection until the thread is running */ + sock_fd = accept(ssock_fd, (struct sockaddr *)&client, (socklen_t *)&addr_len); + if (sock_fd < 0) { + plt_err("Failed to accept connection request on socket fd %d", ssock_fd); + break; + } + + plt_rep_dbg("Client %s: Connection request accepted.", client.sun_path); + eswitch_dev->sock_fd = sock_fd; + if (eswitch_dev->start_ctrl_msg_thrd) { + eswitch_dev->client_connected = true; + poll_for_control_msg(eswitch_dev); + } + eswitch_dev->sock_fd = -1; + close(sock_fd); + } + + /* Closing the opened socket */ + close_socket(ssock_fd); + plt_rep_dbg("Exiting representor ctrl thread"); + + return 0; +} + +int +cnxk_rep_msg_control_thread_launch(struct cnxk_eswitch_dev *eswitch_dev) +{ + char name[CTRL_MSG_THRD_NAME_LEN]; + int rc = 0; + + rte_strscpy(name, "rep_ctrl_msg_hndlr", CTRL_MSG_THRD_NAME_LEN); + eswitch_dev->start_ctrl_msg_thrd = true; + rc = rte_thread_create_internal_control(&eswitch_dev->rep_ctrl_msg_thread, name, + rep_ctrl_msg_thread_main, eswitch_dev); + if (rc) + plt_err("Failed to create rep control message handling"); + + return rc; +} diff --git a/drivers/net/cnxk/cnxk_rep_msg.h b/drivers/net/cnxk/cnxk_rep_msg.h new file mode 100644 index 0000000000..0543805148 --- /dev/null +++ b/drivers/net/cnxk/cnxk_rep_msg.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2024 Marvell. + */ + +#ifndef __CNXK_REP_MSG_H__ +#define __CNXK_REP_MSG_H__ + +#include + +#define CNXK_REP_MSG_MAX_BUFFER_SZ 1500 + +typedef enum CNXK_TYPE { + CNXK_TYPE_HEADER = 0, + CNXK_TYPE_MSG, +} cnxk_type_t; + +typedef enum CNXK_REP_MSG { + /* General sync messages */ + CNXK_REP_MSG_READY = 0, + CNXK_REP_MSG_ACK, + CNXK_REP_MSG_EXIT, + /* End of messaging sequence */ + CNXK_REP_MSG_END, +} cnxk_rep_msg_t; + +typedef enum CNXK_NACK_CODE { + CNXK_REP_CTRL_MSG_NACK_INV_RDY_DATA = 0x501, + CNXK_REP_CTRL_MSG_NACK_INV_REP_CNT = 0x502, + CNXK_REP_CTRL_MSG_NACK_REP_STAT_UP_FAIL = 0x503, +} cnxk_nack_code_t; + +/* Types */ +typedef struct cnxk_type_data { + cnxk_type_t type; + uint32_t length; + uint64_t data[]; +} __rte_packed cnxk_type_data_t; + +/* Header */ +typedef struct cnxk_header { + uint64_t signature; + uint16_t nb_hops; +} __rte_packed cnxk_header_t; + +/* Message meta */ +typedef struct cnxk_rep_msg_data { + cnxk_rep_msg_t type; + uint32_t length; + uint64_t data[]; +} __rte_packed cnxk_rep_msg_data_t; + +/* Ack msg */ +typedef struct cnxk_rep_msg_ack_data { + cnxk_rep_msg_t type; + uint32_t size; + union { + void *data; + uint64_t val; + int64_t sval; + } u; +} __rte_packed cnxk_rep_msg_ack_data_t; + +/* Ack msg */ +typedef struct cnxk_rep_msg_ack_data1 { + cnxk_rep_msg_t type; + uint32_t size; + uint64_t data[]; +} __rte_packed cnxk_rep_msg_ack_data1_t; + +/* Ready msg */ +typedef struct cnxk_rep_msg_ready_data { + uint8_t val; + uint16_t nb_ports; + uint16_t data[]; +} __rte_packed cnxk_rep_msg_ready_data_t; + +/* Exit msg */ +typedef struct cnxk_rep_msg_exit_data { + uint8_t val; + uint16_t nb_ports; + uint16_t data[]; +} __rte_packed cnxk_rep_msg_exit_data_t; + +void cnxk_rep_msg_populate_command(void *buffer, uint32_t *length, cnxk_rep_msg_t type, + uint32_t size); +void cnxk_rep_msg_populate_command_meta(void *buffer, uint32_t *length, void *msg_meta, uint32_t sz, + cnxk_rep_msg_t msg); +void cnxk_rep_msg_populate_msg_end(void *buffer, uint32_t *length); +void cnxk_rep_msg_populate_type(void *buffer, uint32_t *length, cnxk_type_t type, uint32_t sz); +void cnxk_rep_msg_populate_header(void *buffer, uint32_t *length); +int cnxk_rep_msg_send_process(struct cnxk_rep_dev *rep_dev, void *buffer, uint32_t len, + cnxk_rep_msg_ack_data_t *adata); +int cnxk_rep_msg_control_thread_launch(struct cnxk_eswitch_dev *eswitch_dev); + +#endif /* __CNXK_REP_MSG_H__ */ diff --git a/drivers/net/cnxk/meson.build b/drivers/net/cnxk/meson.build index 7121845dc6..9ca7732713 100644 --- a/drivers/net/cnxk/meson.build +++ b/drivers/net/cnxk/meson.build @@ -37,6 +37,7 @@ sources = files( 'cnxk_ptp.c', 'cnxk_flow.c', 'cnxk_rep.c', + 'cnxk_rep_msg.c', 'cnxk_rep_ops.c', 'cnxk_stats.c', 'cnxk_tm.c', From patchwork Sun Mar 3 17:38:21 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Harman Kalra X-Patchwork-Id: 137825 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 68F3043C3E; Sun, 3 Mar 2024 18:40:11 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 46A644367F; Sun, 3 Mar 2024 18:39:15 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id 8CFF843676 for ; Sun, 3 Mar 2024 18:39:13 +0100 (CET) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id 4236Qu5D005556 for ; Sun, 3 Mar 2024 09:39:12 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h= from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-type; s=pfpt0220; bh=YlTbKJK0n9d+Wigy0mpbF TXWW+PVVwbJctkKGgyfou8=; b=WQCnU+jnvhPL10+JUgkGPaYP1XPEzs53Fbz1X JZ9oBJcBEwrRKyxgk/ev2JnW7zUhLKK9U1fMQUWZo06NOt05bd+IalqGchoM50tr JAPRyPyI86GiAZwDSiMTQJjYuBS+M6RCZdrU5eovFSvsl49xupXA+7DDKfMP3s1M JDhpqabTmKTPaqrmZ4W4Gk6hODSivT4kWbj2dDG3vOTDMG4rxWbWsCY/IeJ7KdEu 8g6x4eKNJr8H3whzP7rfN4m9u7cJ+VPFiScPtD05UHM2YB2Y05iIYCMlvspTVopL gWEM9o0MNMPAFPE+W0ptWwbbRVvqwXSvOrq//7c9GltWOb+4Q== Received: from dc6wp-exch02.marvell.com ([4.21.29.225]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3wm2bptrtw-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT) for ; Sun, 03 Mar 2024 09:39:12 -0800 (PST) Received: from DC6WP-EXCH02.marvell.com (10.76.176.209) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1258.12; Sun, 3 Mar 2024 09:39:11 -0800 Received: from maili.marvell.com (10.69.176.80) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server id 15.2.1258.12 via Frontend Transport; Sun, 3 Mar 2024 09:39:11 -0800 Received: from localhost.localdomain (unknown [10.29.52.211]) by maili.marvell.com (Postfix) with ESMTP id EFE103F719F; Sun, 3 Mar 2024 09:39:08 -0800 (PST) From: Harman Kalra To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao , Harman Kalra CC: Subject: [PATCH v6 11/23] common/cnxk: representee notification callback Date: Sun, 3 Mar 2024 23:08:21 +0530 Message-ID: <20240303173833.100039-12-hkalra@marvell.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20240303173833.100039-1-hkalra@marvell.com> References: <20230811163419.165790-1-hkalra@marvell.com> <20240303173833.100039-1-hkalra@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: U3HCNq6ofCPDHY02pOmcXCmQGA7xOitI X-Proofpoint-ORIG-GUID: U3HCNq6ofCPDHY02pOmcXCmQGA7xOitI X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.1011,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2024-03-03_08,2024-03-01_03,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Setting up a callback which gets invoked every time a representee comes up or goes down. Later this callback gets handled by network counterpart. Signed-off-by: Harman Kalra --- drivers/common/cnxk/roc_dev.c | 70 ++++++++++++++++++++++++++++++ drivers/common/cnxk/roc_dev_priv.h | 3 ++ drivers/common/cnxk/roc_eswitch.c | 23 ++++++++++ drivers/common/cnxk/roc_eswitch.h | 31 +++++++++++++ drivers/common/cnxk/roc_mbox.c | 2 + drivers/common/cnxk/roc_mbox.h | 19 +++++++- drivers/common/cnxk/version.map | 2 + 7 files changed, 149 insertions(+), 1 deletion(-) diff --git a/drivers/common/cnxk/roc_dev.c b/drivers/common/cnxk/roc_dev.c index 14aff233d5..867f981423 100644 --- a/drivers/common/cnxk/roc_dev.c +++ b/drivers/common/cnxk/roc_dev.c @@ -539,6 +539,75 @@ pf_vf_mbox_send_up_msg(struct dev *dev, void *rec_msg) } } +static int +mbox_up_handler_rep_repte_notify(struct dev *dev, struct rep_repte_req *req, struct msg_rsp *rsp) +{ + struct roc_eswitch_repte_notify_msg *notify_msg; + int rc = 0; + + plt_base_dbg("pf:%d/vf:%d msg id 0x%x (%s) from: pf:%d/vf:%d", dev_get_pf(dev->pf_func), + dev_get_vf(dev->pf_func), req->hdr.id, mbox_id2name(req->hdr.id), + dev_get_pf(req->hdr.pcifunc), dev_get_vf(req->hdr.pcifunc)); + + plt_base_dbg("repte pcifunc %x, enable %d", req->repte_pcifunc, req->enable); + if (dev->ops && dev->ops->repte_notify) { + notify_msg = plt_zmalloc(sizeof(struct roc_eswitch_repte_notify_msg), 0); + if (!notify_msg) { + plt_err("Failed to allocate memory"); + rc = -ENOMEM; + goto fail; + } + notify_msg->type = ROC_ESWITCH_REPTE_STATE; + notify_msg->state.hw_func = req->repte_pcifunc; + notify_msg->state.enable = req->enable; + + rc = dev->ops->repte_notify(dev->roc_nix, (void *)notify_msg); + if (rc < 0) + plt_err("Failed to sent new representee %x notification to %s", + req->repte_pcifunc, (req->enable == true) ? "enable" : "disable"); + + plt_free(notify_msg); + } +fail: + rsp->hdr.rc = rc; + return rc; +} + +static int +mbox_up_handler_rep_set_mtu(struct dev *dev, struct rep_mtu *req, struct msg_rsp *rsp) +{ + struct roc_eswitch_repte_notify_msg *notify_msg; + int rc = 0; + + plt_base_dbg("pf:%d/vf:%d msg id 0x%x (%s) from: pf:%d/vf:%d", dev_get_pf(dev->pf_func), + dev_get_vf(dev->pf_func), req->hdr.id, mbox_id2name(req->hdr.id), + dev_get_pf(req->hdr.pcifunc), dev_get_vf(req->hdr.pcifunc)); + + plt_base_dbg("rep pcifunc %x, rep id %d mtu %d", req->rep_pcifunc, req->rep_id, req->mtu); + if (dev->ops && dev->ops->repte_notify) { + notify_msg = plt_zmalloc(sizeof(struct roc_eswitch_repte_notify_msg), 0); + if (!notify_msg) { + plt_err("Failed to allocate memory"); + rc = -ENOMEM; + goto fail; + } + notify_msg->type = ROC_ESWITCH_REPTE_MTU; + notify_msg->mtu.hw_func = req->rep_pcifunc; + notify_msg->mtu.rep_id = req->rep_id; + notify_msg->mtu.mtu = req->mtu; + + rc = dev->ops->repte_notify(dev->roc_nix, (void *)notify_msg); + if (rc < 0) + plt_err("Failed to send new mtu notification for representee %x ", + req->rep_pcifunc); + + plt_free(notify_msg); + } +fail: + rsp->hdr.rc = rc; + return rc; +} + static int mbox_up_handler_mcs_intr_notify(struct dev *dev, struct mcs_intr_info *info, struct msg_rsp *rsp) { @@ -713,6 +782,7 @@ mbox_process_msgs_up(struct dev *dev, struct mbox_msghdr *req) } MBOX_UP_CGX_MESSAGES MBOX_UP_MCS_MESSAGES + MBOX_UP_REP_MESSAGES #undef M } diff --git a/drivers/common/cnxk/roc_dev_priv.h b/drivers/common/cnxk/roc_dev_priv.h index 5b2c5096f8..50e12cbf17 100644 --- a/drivers/common/cnxk/roc_dev_priv.h +++ b/drivers/common/cnxk/roc_dev_priv.h @@ -36,12 +36,15 @@ typedef void (*q_err_cb_t)(void *roc_nix, void *data); /* Link status get callback */ typedef void (*link_status_get_t)(void *roc_nix, struct cgx_link_user_info *link); +/* Representee notification callback */ +typedef int (*repte_notify_t)(void *roc_nix, void *notify_msg); struct dev_ops { link_info_t link_status_update; ptp_info_t ptp_info_update; link_status_get_t link_status_get; q_err_cb_t q_err_cb; + repte_notify_t repte_notify; }; #define dev_is_vf(dev) ((dev)->hwcap & DEV_HWCAP_F_VF) diff --git a/drivers/common/cnxk/roc_eswitch.c b/drivers/common/cnxk/roc_eswitch.c index 020a891a32..14819bad75 100644 --- a/drivers/common/cnxk/roc_eswitch.c +++ b/drivers/common/cnxk/roc_eswitch.c @@ -319,3 +319,26 @@ roc_eswitch_nix_vlan_tpid_set(struct roc_nix *roc_nix, uint32_t type, uint16_t t return rc; } + +int +roc_eswitch_nix_process_repte_notify_cb_register(struct roc_nix *roc_nix, + process_repte_notify_t proc_repte_nt) +{ + struct nix *nix = roc_nix_to_nix_priv(roc_nix); + struct dev *dev = &nix->dev; + + if (proc_repte_nt == NULL) + return NIX_ERR_PARAM; + + dev->ops->repte_notify = (repte_notify_t)proc_repte_nt; + return 0; +} + +void +roc_eswitch_nix_process_repte_notify_cb_unregister(struct roc_nix *roc_nix) +{ + struct nix *nix = roc_nix_to_nix_priv(roc_nix); + struct dev *dev = &nix->dev; + + dev->ops->repte_notify = NULL; +} diff --git a/drivers/common/cnxk/roc_eswitch.h b/drivers/common/cnxk/roc_eswitch.h index 34b75d10ac..e0df0038d4 100644 --- a/drivers/common/cnxk/roc_eswitch.h +++ b/drivers/common/cnxk/roc_eswitch.h @@ -8,6 +8,34 @@ #define ROC_ESWITCH_VLAN_TPID 0x8100 #define ROC_ESWITCH_LBK_CHAN 63 +typedef enum roc_eswitch_repte_notify_msg_type { + ROC_ESWITCH_REPTE_STATE = 0, + ROC_ESWITCH_REPTE_MTU, +} roc_eswitch_repte_notify_msg_type_t; + +struct roc_eswitch_repte_state { + bool enable; + uint16_t hw_func; +}; + +struct roc_eswitch_repte_mtu { + uint16_t mtu; + uint16_t rep_id; + uint16_t hw_func; +}; + +struct roc_eswitch_repte_notify_msg { + roc_eswitch_repte_notify_msg_type_t type; + union { + struct roc_eswitch_repte_state state; + struct roc_eswitch_repte_mtu mtu; + }; +}; + +/* Process representee notification callback */ +typedef int (*process_repte_notify_t)(void *roc_nix, + struct roc_eswitch_repte_notify_msg *notify_msg); + /* NPC */ int __roc_api roc_eswitch_npc_mcam_rx_rule(struct roc_npc *roc_npc, struct roc_npc_flow *flow, uint16_t pcifunc, uint16_t vlan_tci, @@ -23,4 +51,7 @@ int __roc_api roc_eswitch_npc_rss_action_configure(struct roc_npc *roc_npc, /* NIX */ int __roc_api roc_eswitch_nix_vlan_tpid_set(struct roc_nix *nix, uint32_t type, uint16_t tpid, bool is_vf); +int __roc_api roc_eswitch_nix_process_repte_notify_cb_register(struct roc_nix *roc_nix, + process_repte_notify_t proc_repte_nt); +void __roc_api roc_eswitch_nix_process_repte_notify_cb_unregister(struct roc_nix *roc_nix); #endif /* __ROC_ESWITCH_H__ */ diff --git a/drivers/common/cnxk/roc_mbox.c b/drivers/common/cnxk/roc_mbox.c index 7b734fcd24..10cdbc4d13 100644 --- a/drivers/common/cnxk/roc_mbox.c +++ b/drivers/common/cnxk/roc_mbox.c @@ -499,6 +499,7 @@ mbox_id2name(uint16_t id) return #_name; MBOX_MESSAGES MBOX_UP_CGX_MESSAGES + MBOX_UP_REP_MESSAGES #undef M } } @@ -514,6 +515,7 @@ mbox_id2size(uint16_t id) return sizeof(struct _req_type); MBOX_MESSAGES MBOX_UP_CGX_MESSAGES + MBOX_UP_REP_MESSAGES #undef M } } diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h index b76e97e9f9..d28e3ffd70 100644 --- a/drivers/common/cnxk/roc_mbox.h +++ b/drivers/common/cnxk/roc_mbox.h @@ -357,9 +357,13 @@ struct mbox_msghdr { #define MBOX_UP_MCS_MESSAGES M(MCS_INTR_NOTIFY, 0xE00, mcs_intr_notify, mcs_intr_info, msg_rsp) +#define MBOX_UP_REP_MESSAGES \ +M(REP_REPTE_NOTIFY, 0xEF1, rep_repte_notify, rep_repte_req, msg_rsp) \ +M(REP_SET_MTU, 0xEF2, rep_set_mtu, rep_mtu, msg_rsp) + enum { #define M(_name, _id, _1, _2, _3) MBOX_MSG_##_name = _id, - MBOX_MESSAGES MBOX_UP_CGX_MESSAGES MBOX_UP_MCS_MESSAGES + MBOX_MESSAGES MBOX_UP_CGX_MESSAGES MBOX_UP_MCS_MESSAGES MBOX_UP_REP_MESSAGES #undef M }; @@ -2799,4 +2803,17 @@ struct nix_spi_to_sa_delete_req { uint16_t __io hash_index; uint8_t __io way; }; + +struct rep_repte_req { + struct mbox_msghdr hdr; + uint16_t __io repte_pcifunc; + bool __io enable; +}; + +struct rep_mtu { + struct mbox_msghdr hdr; + uint16_t __io rep_pcifunc; + uint16_t __io rep_id; + uint16_t __io mtu; +}; #endif /* __ROC_MBOX_H__ */ diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map index 0331c0fb9d..5dd42c8044 100644 --- a/drivers/common/cnxk/version.map +++ b/drivers/common/cnxk/version.map @@ -87,6 +87,8 @@ INTERNAL { roc_dpi_disable; roc_dpi_enable; roc_error_msg_get; + roc_eswitch_nix_process_repte_notify_cb_register; + roc_eswitch_nix_process_repte_notify_cb_unregister; roc_eswitch_nix_vlan_tpid_set; roc_eswitch_npc_mcam_delete_rule; roc_eswitch_npc_mcam_rx_rule; From patchwork Sun Mar 3 17:38:22 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Harman Kalra X-Patchwork-Id: 137826 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8D01543C3E; Sun, 3 Mar 2024 18:40:17 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 324B943678; Sun, 3 Mar 2024 18:39:20 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id 4615543681 for ; Sun, 3 Mar 2024 18:39:18 +0100 (CET) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id 423DmDEJ014917 for ; Sun, 3 Mar 2024 09:39:17 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h= from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-type; s=pfpt0220; bh=99BwmvIT1zjt/9wAQ+O4i Ur5siezBA9p3iBJYQDC32g=; b=NCcmaUCNVdR5j/7prIRAlOpF+HBCW9LKQLTod BPvOtU8FdKp+7wOP6zkCzz7wd9Ul33hTy5ley/BJ8vb2ZWbdhDY4e6Fh7WLvtaaE 5+zG3FT26qDWgy8eYW/Fu7kBhgS7pQhKDvWHXs5VEJz8bzlLOGbLT6X65YfPoKkk VjarMBnJMUAhPVijTmylyS66uCjZqsgc9iemw+JQjNzEzuLCFHUmkf5X8fTEmLM+ TLQ7WDHiEWrLOpXOX0IKLCO3vaRiUyIEqyHf8C0sOoVNfVLrdkSAKoVVTd6NiCGE RGTtyVpQfsDEnQbeAYK9hshCaR2w6gYsh8gWTcYxN6JMN0Qyg== Received: from dc5-exch05.marvell.com ([199.233.59.128]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3wm2bptru2-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT) for ; Sun, 03 Mar 2024 09:39:17 -0800 (PST) Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH05.marvell.com (10.69.176.209) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id 15.2.1258.12; Sun, 3 Mar 2024 09:39:16 -0800 Received: from DC5-EXCH05.marvell.com (10.69.176.209) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.48; Sun, 3 Mar 2024 09:39:14 -0800 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH05.marvell.com (10.69.176.209) with Microsoft SMTP Server id 15.2.1258.12 via Frontend Transport; Sun, 3 Mar 2024 09:39:14 -0800 Received: from localhost.localdomain (unknown [10.29.52.211]) by maili.marvell.com (Postfix) with ESMTP id DC9DA3F71A2; Sun, 3 Mar 2024 09:39:11 -0800 (PST) From: Harman Kalra To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao , Harman Kalra CC: Subject: [PATCH v6 12/23] net/cnxk: handling representee notification Date: Sun, 3 Mar 2024 23:08:22 +0530 Message-ID: <20240303173833.100039-13-hkalra@marvell.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20240303173833.100039-1-hkalra@marvell.com> References: <20230811163419.165790-1-hkalra@marvell.com> <20240303173833.100039-1-hkalra@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: spKArhu7o-tcwYHnTXNkWFQqUqtl9Xwn X-Proofpoint-ORIG-GUID: spKArhu7o-tcwYHnTXNkWFQqUqtl9Xwn X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.1011,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2024-03-03_08,2024-03-01_03,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org In case of any representee coming up or going down, kernel sends a mbox up call which signals a thread to process these messages and enable/disable HW resources accordingly. Signed-off-by: Harman Kalra --- drivers/net/cnxk/cnxk_eswitch.c | 8 + drivers/net/cnxk/cnxk_eswitch.h | 19 ++ drivers/net/cnxk/cnxk_rep.c | 326 ++++++++++++++++++++++++++++++++ drivers/net/cnxk/cnxk_rep.h | 37 ++++ 4 files changed, 390 insertions(+) diff --git a/drivers/net/cnxk/cnxk_eswitch.c b/drivers/net/cnxk/cnxk_eswitch.c index 14d0df8791..f420d01ef8 100644 --- a/drivers/net/cnxk/cnxk_eswitch.c +++ b/drivers/net/cnxk/cnxk_eswitch.c @@ -139,6 +139,14 @@ cnxk_eswitch_dev_remove(struct rte_pci_device *pci_dev) close(sock_fd); } + if (eswitch_dev->repte_msg_proc.start_thread) { + eswitch_dev->repte_msg_proc.start_thread = false; + pthread_cond_signal(&eswitch_dev->repte_msg_proc.repte_msg_cond); + rte_thread_join(eswitch_dev->repte_msg_proc.repte_msg_thread, NULL); + pthread_mutex_destroy(&eswitch_dev->repte_msg_proc.mutex); + pthread_cond_destroy(&eswitch_dev->repte_msg_proc.repte_msg_cond); + } + /* Remove representor devices associated with PF */ cnxk_rep_dev_remove(eswitch_dev); } diff --git a/drivers/net/cnxk/cnxk_eswitch.h b/drivers/net/cnxk/cnxk_eswitch.h index ecf10a8e08..0275e760fb 100644 --- a/drivers/net/cnxk/cnxk_eswitch.h +++ b/drivers/net/cnxk/cnxk_eswitch.h @@ -30,6 +30,22 @@ enum cnxk_esw_da_pattern_type { CNXK_ESW_DA_TYPE_PFVF, }; +struct cnxk_esw_repte_msg { + struct roc_eswitch_repte_notify_msg *notify_msg; + + TAILQ_ENTRY(cnxk_esw_repte_msg) next; +}; + +struct cnxk_esw_repte_msg_proc { + bool start_thread; + uint8_t msg_avail; + rte_thread_t repte_msg_thread; + pthread_cond_t repte_msg_cond; + pthread_mutex_t mutex; + + TAILQ_HEAD(esw_repte_msg_list, cnxk_esw_repte_msg) msg_list; +}; + struct cnxk_esw_repr_hw_info { /* Representee pcifunc value */ uint16_t hw_func; @@ -139,6 +155,9 @@ struct cnxk_eswitch_dev { bool client_connected; int sock_fd; + /* Representee notification */ + struct cnxk_esw_repte_msg_proc repte_msg_proc; + /* Port representor fields */ rte_spinlock_t rep_lock; uint16_t nb_switch_domain; diff --git a/drivers/net/cnxk/cnxk_rep.c b/drivers/net/cnxk/cnxk_rep.c index 5b619ebb9e..dc00cdecc1 100644 --- a/drivers/net/cnxk/cnxk_rep.c +++ b/drivers/net/cnxk/cnxk_rep.c @@ -4,6 +4,8 @@ #include #include +#define REPTE_MSG_PROC_THRD_NAME_MAX_LEN 30 + #define PF_SHIFT 10 #define PF_MASK 0x3F @@ -86,6 +88,7 @@ cnxk_rep_dev_remove(struct cnxk_eswitch_dev *eswitch_dev) { int i, rc = 0; + roc_eswitch_nix_process_repte_notify_cb_unregister(&eswitch_dev->nix); for (i = 0; i < eswitch_dev->nb_switch_domain; i++) { rc = rte_eth_switch_domain_free(eswitch_dev->sw_dom[i].switch_domain_id); if (rc) @@ -95,6 +98,299 @@ cnxk_rep_dev_remove(struct cnxk_eswitch_dev *eswitch_dev) return rc; } +static int +cnxk_representee_release(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func) +{ + struct cnxk_rep_dev *rep_dev = NULL; + struct rte_eth_dev *rep_eth_dev; + int i, rc = 0; + + for (i = 0; i < eswitch_dev->repr_cnt.nb_repr_probed; i++) { + rep_eth_dev = eswitch_dev->rep_info[i].rep_eth_dev; + if (!rep_eth_dev) { + plt_err("Failed to get rep ethdev handle"); + rc = -EINVAL; + goto done; + } + + rep_dev = cnxk_rep_pmd_priv(rep_eth_dev); + if (rep_dev->hw_func == hw_func && + (!rep_dev->native_repte || rep_dev->is_vf_active)) { + rep_dev->is_vf_active = false; + rc = cnxk_rep_dev_stop(rep_eth_dev); + if (rc) { + plt_err("Failed to stop repr port %d, rep id %d", rep_dev->port_id, + rep_dev->rep_id); + goto done; + } + + cnxk_rep_rx_queue_release(rep_eth_dev, 0); + cnxk_rep_tx_queue_release(rep_eth_dev, 0); + plt_rep_dbg("Released representor ID %d representing %x", rep_dev->rep_id, + hw_func); + break; + } + } +done: + return rc; +} + +static int +cnxk_representee_setup(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func, uint16_t rep_id) +{ + struct cnxk_rep_dev *rep_dev = NULL; + struct rte_eth_dev *rep_eth_dev; + int i, rc = 0; + + for (i = 0; i < eswitch_dev->repr_cnt.nb_repr_probed; i++) { + rep_eth_dev = eswitch_dev->rep_info[i].rep_eth_dev; + if (!rep_eth_dev) { + plt_err("Failed to get rep ethdev handle"); + rc = -EINVAL; + goto done; + } + + rep_dev = cnxk_rep_pmd_priv(rep_eth_dev); + if (rep_dev->hw_func == hw_func && !rep_dev->is_vf_active) { + rep_dev->is_vf_active = true; + rep_dev->native_repte = true; + if (rep_dev->rep_id != rep_id) { + plt_err("Rep ID assigned during init %d does not match %d", + rep_dev->rep_id, rep_id); + rc = -EINVAL; + goto done; + } + + rc = cnxk_rep_rx_queue_setup(rep_eth_dev, rep_dev->rxq->qid, + rep_dev->rxq->nb_desc, 0, + rep_dev->rxq->rx_conf, rep_dev->rxq->mpool); + if (rc) { + plt_err("Failed to setup rxq repr port %d, rep id %d", + rep_dev->port_id, rep_dev->rep_id); + goto done; + } + + rc = cnxk_rep_tx_queue_setup(rep_eth_dev, rep_dev->txq->qid, + rep_dev->txq->nb_desc, 0, + rep_dev->txq->tx_conf); + if (rc) { + plt_err("Failed to setup txq repr port %d, rep id %d", + rep_dev->port_id, rep_dev->rep_id); + goto done; + } + + rc = cnxk_rep_dev_start(rep_eth_dev); + if (rc) { + plt_err("Failed to start repr port %d, rep id %d", rep_dev->port_id, + rep_dev->rep_id); + goto done; + } + break; + } + } +done: + return rc; +} + +static int +cnxk_representee_state_msg_process(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func, + bool enable) +{ + struct cnxk_eswitch_devargs *esw_da; + uint16_t rep_id = UINT16_MAX; + int rc = 0, i, j; + + /* Traversing the initialized represented list */ + for (i = 0; i < eswitch_dev->nb_esw_da; i++) { + esw_da = &eswitch_dev->esw_da[i]; + for (j = 0; j < esw_da->nb_repr_ports; j++) { + if (esw_da->repr_hw_info[j].hw_func == hw_func) { + rep_id = esw_da->repr_hw_info[j].rep_id; + break; + } + } + if (rep_id != UINT16_MAX) + break; + } + /* No action on PF func for which representor has not been created */ + if (rep_id == UINT16_MAX) + goto done; + + if (enable) { + rc = cnxk_representee_setup(eswitch_dev, hw_func, rep_id); + if (rc) { + plt_err("Failed to setup representee, err %d", rc); + goto fail; + } + plt_rep_dbg(" Representor ID %d representing %x", rep_id, hw_func); + rc = cnxk_eswitch_flow_rules_install(eswitch_dev, hw_func); + if (rc) { + plt_err("Failed to install rxtx flow rules for %x", hw_func); + goto fail; + } + } else { + rc = cnxk_eswitch_flow_rules_delete(eswitch_dev, hw_func); + if (rc) { + plt_err("Failed to delete flow rules for %x", hw_func); + goto fail; + } + rc = cnxk_representee_release(eswitch_dev, hw_func); + if (rc) { + plt_err("Failed to release representee, err %d", rc); + goto fail; + } + } + +done: + return 0; +fail: + return rc; +} + +static int +cnxk_representee_mtu_msg_process(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func, + uint16_t rep_id, uint16_t mtu) +{ + struct cnxk_rep_dev *rep_dev = NULL; + struct rte_eth_dev *rep_eth_dev; + int rc = 0; + int i; + + for (i = 0; i < eswitch_dev->repr_cnt.nb_repr_probed; i++) { + rep_eth_dev = eswitch_dev->rep_info[i].rep_eth_dev; + if (!rep_eth_dev) { + plt_err("Failed to get rep ethdev handle"); + rc = -EINVAL; + goto done; + } + + rep_dev = cnxk_rep_pmd_priv(rep_eth_dev); + if (rep_dev->rep_id == rep_id) { + plt_rep_dbg("Setting MTU as %d for hw_func %x rep_id %d\n", mtu, hw_func, + rep_id); + rep_dev->repte_mtu = mtu; + break; + } + } + +done: + return rc; +} + +static int +cnxk_representee_msg_process(struct cnxk_eswitch_dev *eswitch_dev, + struct roc_eswitch_repte_notify_msg *notify_msg) +{ + int rc = 0; + + switch (notify_msg->type) { + case ROC_ESWITCH_REPTE_STATE: + plt_rep_dbg(" type %d: hw_func %x action %s", notify_msg->type, + notify_msg->state.hw_func, + notify_msg->state.enable ? "enable" : "disable"); + rc = cnxk_representee_state_msg_process(eswitch_dev, notify_msg->state.hw_func, + notify_msg->state.enable); + break; + case ROC_ESWITCH_REPTE_MTU: + plt_rep_dbg(" type %d: hw_func %x rep_id %d mtu %d", notify_msg->type, + notify_msg->mtu.hw_func, notify_msg->mtu.rep_id, notify_msg->mtu.mtu); + rc = cnxk_representee_mtu_msg_process(eswitch_dev, notify_msg->mtu.hw_func, + notify_msg->mtu.rep_id, notify_msg->mtu.mtu); + break; + default: + plt_err("Invalid notification msg received %d", notify_msg->type); + break; + }; + + return rc; +} + +static uint32_t +cnxk_representee_msg_thread_main(void *arg) +{ + struct cnxk_eswitch_dev *eswitch_dev = (struct cnxk_eswitch_dev *)arg; + struct cnxk_esw_repte_msg_proc *repte_msg_proc; + struct cnxk_esw_repte_msg *msg, *next_msg; + int count, rc; + + repte_msg_proc = &eswitch_dev->repte_msg_proc; + pthread_mutex_lock(&eswitch_dev->repte_msg_proc.mutex); + while (eswitch_dev->repte_msg_proc.start_thread) { + do { + rc = pthread_cond_wait(&eswitch_dev->repte_msg_proc.repte_msg_cond, + &eswitch_dev->repte_msg_proc.mutex); + } while (rc != 0); + + /* Go through list pushed from interrupt context and process each message */ + next_msg = TAILQ_FIRST(&repte_msg_proc->msg_list); + count = 0; + while (next_msg) { + msg = next_msg; + count++; + plt_rep_dbg(" Processing msg %d: ", count); + /* Unlocking for interrupt thread to grab lock + * while thread process the message. + */ + pthread_mutex_unlock(&eswitch_dev->repte_msg_proc.mutex); + /* Processing the message */ + cnxk_representee_msg_process(eswitch_dev, msg->notify_msg); + /* Locking as cond wait will unlock before wait */ + pthread_mutex_lock(&eswitch_dev->repte_msg_proc.mutex); + next_msg = TAILQ_NEXT(msg, next); + TAILQ_REMOVE(&repte_msg_proc->msg_list, msg, next); + rte_free(msg->notify_msg); + rte_free(msg); + } + } + + pthread_mutex_unlock(&eswitch_dev->repte_msg_proc.mutex); + + return 0; +} + +static int +cnxk_representee_notification(void *roc_nix, struct roc_eswitch_repte_notify_msg *notify_msg) +{ + struct cnxk_esw_repte_msg_proc *repte_msg_proc; + struct cnxk_eswitch_dev *eswitch_dev; + struct cnxk_esw_repte_msg *msg; + int rc = 0; + + RTE_SET_USED(roc_nix); + eswitch_dev = cnxk_eswitch_pmd_priv(); + if (!eswitch_dev) { + plt_err("Failed to get PF ethdev handle"); + rc = -EINVAL; + goto done; + } + + repte_msg_proc = &eswitch_dev->repte_msg_proc; + msg = rte_zmalloc("msg", sizeof(struct cnxk_esw_repte_msg), 0); + if (!msg) { + plt_err("Failed to allocate memory for repte msg"); + rc = -ENOMEM; + goto done; + } + + msg->notify_msg = plt_zmalloc(sizeof(struct roc_eswitch_repte_notify_msg), 0); + if (!msg->notify_msg) { + plt_err("Failed to allocate memory"); + rc = -ENOMEM; + goto done; + } + + rte_memcpy(msg->notify_msg, notify_msg, sizeof(struct roc_eswitch_repte_notify_msg)); + plt_rep_dbg("Pushing new notification : msg type %d", msg->notify_msg->type); + pthread_mutex_lock(&eswitch_dev->repte_msg_proc.mutex); + TAILQ_INSERT_TAIL(&repte_msg_proc->msg_list, msg, next); + /* Signal vf message handler thread */ + pthread_cond_signal(&eswitch_dev->repte_msg_proc.repte_msg_cond); + pthread_mutex_unlock(&eswitch_dev->repte_msg_proc.mutex); + +done: + return rc; +} + static int cnxk_rep_parent_setup(struct cnxk_eswitch_dev *eswitch_dev) { @@ -263,6 +559,7 @@ create_representor_ethdev(struct rte_pci_device *pci_dev, struct cnxk_eswitch_de int cnxk_rep_dev_probe(struct rte_pci_device *pci_dev, struct cnxk_eswitch_dev *eswitch_dev) { + char name[REPTE_MSG_PROC_THRD_NAME_MAX_LEN]; struct cnxk_eswitch_devargs *esw_da; uint16_t num_rep; int i, j, rc; @@ -302,7 +599,36 @@ cnxk_rep_dev_probe(struct rte_pci_device *pci_dev, struct cnxk_eswitch_dev *eswi } } + if (!eswitch_dev->repte_msg_proc.start_thread) { + /* Register callback for representee notification */ + if (roc_eswitch_nix_process_repte_notify_cb_register(&eswitch_dev->nix, + cnxk_representee_notification)) { + plt_err("Failed to register callback for representee notification"); + rc = -EINVAL; + goto fail; + } + + /* Create a thread for handling msgs from VFs */ + TAILQ_INIT(&eswitch_dev->repte_msg_proc.msg_list); + pthread_cond_init(&eswitch_dev->repte_msg_proc.repte_msg_cond, NULL); + pthread_mutex_init(&eswitch_dev->repte_msg_proc.mutex, NULL); + + rte_strscpy(name, "repte_msg_proc_thrd", REPTE_MSG_PROC_THRD_NAME_MAX_LEN); + eswitch_dev->repte_msg_proc.start_thread = true; + rc = + rte_thread_create_internal_control(&eswitch_dev->repte_msg_proc.repte_msg_thread, + name, cnxk_representee_msg_thread_main, + eswitch_dev); + if (rc != 0) { + plt_err("Failed to create thread for VF mbox handling\n"); + goto thread_fail; + } + } + return 0; +thread_fail: + pthread_mutex_destroy(&eswitch_dev->repte_msg_proc.mutex); + pthread_cond_destroy(&eswitch_dev->repte_msg_proc.repte_msg_cond); fail: return rc; } diff --git a/drivers/net/cnxk/cnxk_rep.h b/drivers/net/cnxk/cnxk_rep.h index da298823a7..5a85d4376e 100644 --- a/drivers/net/cnxk/cnxk_rep.h +++ b/drivers/net/cnxk/cnxk_rep.h @@ -10,6 +10,40 @@ /* Common ethdev ops */ extern struct eth_dev_ops cnxk_rep_dev_ops; +struct cnxk_rep_queue_stats { + uint64_t pkts; + uint64_t bytes; +}; + +struct cnxk_rep_rxq { + /* Parent rep device */ + struct cnxk_rep_dev *rep_dev; + /* Queue ID */ + uint16_t qid; + /* No of desc */ + uint16_t nb_desc; + /* mempool handle */ + struct rte_mempool *mpool; + /* RX config parameters */ + const struct rte_eth_rxconf *rx_conf; + /* Per queue TX statistics */ + struct cnxk_rep_queue_stats stats; +}; + +struct cnxk_rep_txq { + /* Parent rep device */ + struct cnxk_rep_dev *rep_dev; + /* Queue ID */ + uint16_t qid; + /* No of desc */ + uint16_t nb_desc; + /* TX config parameters */ + const struct rte_eth_txconf *tx_conf; + /* Per queue TX statistics */ + struct cnxk_rep_queue_stats stats; +}; + +/* Representor port configurations */ struct cnxk_rep_dev { uint16_t port_id; uint16_t rep_id; @@ -18,7 +52,10 @@ struct cnxk_rep_dev { uint16_t hw_func; bool is_vf_active; bool native_repte; + struct cnxk_rep_rxq *rxq; + struct cnxk_rep_txq *txq; uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; + uint16_t repte_mtu; }; static inline struct cnxk_rep_dev * From patchwork Sun Mar 3 17:38:23 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Harman Kalra X-Patchwork-Id: 137827 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 92B8343C3E; Sun, 3 Mar 2024 18:40:22 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 77AD343681; Sun, 3 Mar 2024 18:39:21 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id 7D1E943676 for ; Sun, 3 Mar 2024 18:39:19 +0100 (CET) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id 423DOJuO029127 for ; Sun, 3 Mar 2024 09:39:18 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h= from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-type; s=pfpt0220; bh=K1xbwWLEwikYfz8Iy/H16 WbYK0z8+pixdjGJACvzwnA=; b=SRSlNXqvOK9Fqe2u/zJIvDPNOag/byQrDOaPp MQpA+O7WtyeCwEMRj3XRaWC0R1PKtp9ahfJlgEgyiW65TcetpUJ9eh/Due6zShJE CA0sdDm4h8+Ug5303HDBeHuU9y5AZCrWQIlP/0K+1FFQUqOmwntWVTPnbpNsAP15 r3Pkk/XTADZof0NCWjIHVObB8oCZiZuCPUhGNzBCiTqBRxz1D+NRKG7IfygPL4DT jNciir6U8pygvwQ3vtbIbQMio2V+XCQlHoIUdnROgDolICgowW3cJ9yW/9Fhwqmj +tiz0UKFjnrCLXkoMlQrkU07DPdXubCyYv2VnYYbiJWpcBNyQ== Received: from dc6wp-exch02.marvell.com ([4.21.29.225]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3wm2bptru3-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT) for ; Sun, 03 Mar 2024 09:39:18 -0800 (PST) Received: from DC6WP-EXCH02.marvell.com (10.76.176.209) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1258.12; Sun, 3 Mar 2024 09:39:17 -0800 Received: from maili.marvell.com (10.69.176.80) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server id 15.2.1258.12 via Frontend Transport; Sun, 3 Mar 2024 09:39:17 -0800 Received: from localhost.localdomain (unknown [10.29.52.211]) by maili.marvell.com (Postfix) with ESMTP id C904E3F71A8; Sun, 3 Mar 2024 09:39:14 -0800 (PST) From: Harman Kalra To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao , Harman Kalra CC: Subject: [PATCH v6 13/23] net/cnxk: representor ethdev ops Date: Sun, 3 Mar 2024 23:08:23 +0530 Message-ID: <20240303173833.100039-14-hkalra@marvell.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20240303173833.100039-1-hkalra@marvell.com> References: <20230811163419.165790-1-hkalra@marvell.com> <20240303173833.100039-1-hkalra@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: yRnUYuYnzT77ruDIxhyOFwPbkleH2f2d X-Proofpoint-ORIG-GUID: yRnUYuYnzT77ruDIxhyOFwPbkleH2f2d X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.1011,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2024-03-03_08,2024-03-01_03,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Implementing ethernet device operation callbacks for port representors PMD Signed-off-by: Harman Kalra --- drivers/net/cnxk/cnxk_rep.c | 28 +- drivers/net/cnxk/cnxk_rep.h | 35 +++ drivers/net/cnxk/cnxk_rep_msg.h | 8 + drivers/net/cnxk/cnxk_rep_ops.c | 495 ++++++++++++++++++++++++++++++-- 4 files changed, 523 insertions(+), 43 deletions(-) diff --git a/drivers/net/cnxk/cnxk_rep.c b/drivers/net/cnxk/cnxk_rep.c index dc00cdecc1..ca0637bde5 100644 --- a/drivers/net/cnxk/cnxk_rep.c +++ b/drivers/net/cnxk/cnxk_rep.c @@ -73,6 +73,8 @@ cnxk_rep_state_update(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func, ui int cnxk_rep_dev_uninit(struct rte_eth_dev *ethdev) { + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; @@ -80,6 +82,8 @@ cnxk_rep_dev_uninit(struct rte_eth_dev *ethdev) rte_free(ethdev->data->mac_addrs); ethdev->data->mac_addrs = NULL; + rep_dev->parent_dev->repr_cnt.nb_repr_probed--; + return 0; } @@ -432,26 +436,6 @@ cnxk_rep_parent_setup(struct cnxk_eswitch_dev *eswitch_dev) return rc; } -static uint16_t -cnxk_rep_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) -{ - PLT_SET_USED(tx_queue); - PLT_SET_USED(tx_pkts); - PLT_SET_USED(nb_pkts); - - return 0; -} - -static uint16_t -cnxk_rep_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) -{ - PLT_SET_USED(rx_queue); - PLT_SET_USED(rx_pkts); - PLT_SET_USED(nb_pkts); - - return 0; -} - static int cnxk_rep_dev_init(struct rte_eth_dev *eth_dev, void *params) { @@ -481,8 +465,8 @@ cnxk_rep_dev_init(struct rte_eth_dev *eth_dev, void *params) eth_dev->dev_ops = &cnxk_rep_dev_ops; /* Rx/Tx functions stubs to avoid crashing */ - eth_dev->rx_pkt_burst = cnxk_rep_rx_burst; - eth_dev->tx_pkt_burst = cnxk_rep_tx_burst; + eth_dev->rx_pkt_burst = cnxk_rep_rx_burst_dummy; + eth_dev->tx_pkt_burst = cnxk_rep_tx_burst_dummy; /* Only single queues for representor devices */ eth_dev->data->nb_rx_queues = 1; diff --git a/drivers/net/cnxk/cnxk_rep.h b/drivers/net/cnxk/cnxk_rep.h index 5a85d4376e..6a43259980 100644 --- a/drivers/net/cnxk/cnxk_rep.h +++ b/drivers/net/cnxk/cnxk_rep.h @@ -7,6 +7,13 @@ #ifndef __CNXK_REP_H__ #define __CNXK_REP_H__ +#define CNXK_REP_TX_OFFLOAD_CAPA \ + (RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \ + RTE_ETH_TX_OFFLOAD_MULTI_SEGS) + +#define CNXK_REP_RX_OFFLOAD_CAPA \ + (RTE_ETH_RX_OFFLOAD_SCATTER | RTE_ETH_RX_OFFLOAD_RSS_HASH | RTE_ETH_RX_OFFLOAD_VLAN_STRIP) + /* Common ethdev ops */ extern struct eth_dev_ops cnxk_rep_dev_ops; @@ -58,12 +65,33 @@ struct cnxk_rep_dev { uint16_t repte_mtu; }; +/* Inline functions */ +static inline void +cnxk_rep_lock(struct cnxk_rep_dev *rep) +{ + rte_spinlock_lock(&rep->parent_dev->rep_lock); +} + +static inline void +cnxk_rep_unlock(struct cnxk_rep_dev *rep) +{ + rte_spinlock_unlock(&rep->parent_dev->rep_lock); +} + static inline struct cnxk_rep_dev * cnxk_rep_pmd_priv(const struct rte_eth_dev *eth_dev) { return eth_dev->data->dev_private; } +static __rte_always_inline void +cnxk_rep_pool_buffer_stats(struct rte_mempool *pool) +{ + plt_rep_dbg(" pool %s size %d buffer count in use %d available %d\n", pool->name, + pool->size, rte_mempool_in_use_count(pool), rte_mempool_avail_count(pool)); +} + +/* Prototypes */ int cnxk_rep_dev_probe(struct rte_pci_device *pci_dev, struct cnxk_eswitch_dev *eswitch_dev); int cnxk_rep_dev_remove(struct cnxk_eswitch_dev *eswitch_dev); int cnxk_rep_dev_uninit(struct rte_eth_dev *ethdev); @@ -86,5 +114,12 @@ int cnxk_rep_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats) int cnxk_rep_stats_reset(struct rte_eth_dev *eth_dev); int cnxk_rep_flow_ops_get(struct rte_eth_dev *ethdev, const struct rte_flow_ops **ops); int cnxk_rep_state_update(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func, uint16_t *rep_id); +int cnxk_rep_promiscuous_enable(struct rte_eth_dev *ethdev); +int cnxk_rep_promiscuous_disable(struct rte_eth_dev *ethdev); +int cnxk_rep_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr); +uint16_t cnxk_rep_tx_burst_dummy(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +uint16_t cnxk_rep_rx_burst_dummy(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +void cnxk_rep_tx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id); +void cnxk_rep_rx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id); #endif /* __CNXK_REP_H__ */ diff --git a/drivers/net/cnxk/cnxk_rep_msg.h b/drivers/net/cnxk/cnxk_rep_msg.h index 0543805148..63cfbe3f19 100644 --- a/drivers/net/cnxk/cnxk_rep_msg.h +++ b/drivers/net/cnxk/cnxk_rep_msg.h @@ -19,6 +19,8 @@ typedef enum CNXK_REP_MSG { CNXK_REP_MSG_READY = 0, CNXK_REP_MSG_ACK, CNXK_REP_MSG_EXIT, + /* Ethernet operation msgs */ + CNXK_REP_MSG_ETH_SET_MAC, /* End of messaging sequence */ CNXK_REP_MSG_END, } cnxk_rep_msg_t; @@ -81,6 +83,12 @@ typedef struct cnxk_rep_msg_exit_data { uint16_t data[]; } __rte_packed cnxk_rep_msg_exit_data_t; +/* Ethernet op - set mac */ +typedef struct cnxk_rep_msg_eth_mac_set_meta { + uint16_t portid; + uint8_t addr_bytes[RTE_ETHER_ADDR_LEN]; +} __rte_packed cnxk_rep_msg_eth_set_mac_meta_t; + void cnxk_rep_msg_populate_command(void *buffer, uint32_t *length, cnxk_rep_msg_t type, uint32_t size); void cnxk_rep_msg_populate_command_meta(void *buffer, uint32_t *length, void *msg_meta, uint32_t sz, diff --git a/drivers/net/cnxk/cnxk_rep_ops.c b/drivers/net/cnxk/cnxk_rep_ops.c index 15448688ce..97643a50f2 100644 --- a/drivers/net/cnxk/cnxk_rep_ops.c +++ b/drivers/net/cnxk/cnxk_rep_ops.c @@ -3,25 +3,221 @@ */ #include +#include + +#define MEMPOOL_CACHE_SIZE 256 +#define TX_DESC_PER_QUEUE 512 +#define RX_DESC_PER_QUEUE 256 +#define NB_REP_VDEV_MBUF 1024 + +static uint16_t +cnxk_rep_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct cnxk_rep_txq *txq = tx_queue; + struct cnxk_rep_dev *rep_dev; + uint16_t n_tx; + + if (unlikely(!txq)) + return 0; + + rep_dev = txq->rep_dev; + plt_rep_dbg("Transmitting %d packets on eswitch queue %d", nb_pkts, txq->qid); + n_tx = cnxk_eswitch_dev_tx_burst(rep_dev->parent_dev, txq->qid, tx_pkts, nb_pkts, + NIX_TX_OFFLOAD_VLAN_QINQ_F); + return n_tx; +} + +static uint16_t +cnxk_rep_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct cnxk_rep_rxq *rxq = rx_queue; + struct cnxk_rep_dev *rep_dev; + uint16_t n_rx; + + if (unlikely(!rxq)) + return 0; + + rep_dev = rxq->rep_dev; + n_rx = cnxk_eswitch_dev_rx_burst(rep_dev->parent_dev, rxq->qid, rx_pkts, nb_pkts); + if (n_rx == 0) + return 0; + + plt_rep_dbg("Received %d packets on eswitch queue %d", n_rx, rxq->qid); + return n_rx; +} + +uint16_t +cnxk_rep_tx_burst_dummy(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + PLT_SET_USED(tx_queue); + PLT_SET_USED(tx_pkts); + PLT_SET_USED(nb_pkts); + + return 0; +} + +uint16_t +cnxk_rep_rx_burst_dummy(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + PLT_SET_USED(rx_queue); + PLT_SET_USED(rx_pkts); + PLT_SET_USED(nb_pkts); + + return 0; +} int cnxk_rep_link_update(struct rte_eth_dev *ethdev, int wait_to_complete) { - PLT_SET_USED(ethdev); + struct rte_eth_link link; PLT_SET_USED(wait_to_complete); + + memset(&link, 0, sizeof(link)); + if (ethdev->data->dev_started) + link.link_status = RTE_ETH_LINK_UP; + else + link.link_status = RTE_ETH_LINK_DOWN; + + link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; + link.link_autoneg = RTE_ETH_LINK_FIXED; + link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN; + + return rte_eth_linkstatus_set(ethdev, &link); +} + +int +cnxk_rep_dev_info_get(struct rte_eth_dev *ethdev, struct rte_eth_dev_info *dev_info) +{ + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev); + uint32_t max_rx_pktlen; + + max_rx_pktlen = (roc_nix_max_pkt_len(&rep_dev->parent_dev->nix) + RTE_ETHER_CRC_LEN - + CNXK_NIX_MAX_VTAG_ACT_SIZE); + + dev_info->min_rx_bufsize = NIX_MIN_HW_FRS + RTE_ETHER_CRC_LEN; + dev_info->max_rx_pktlen = max_rx_pktlen; + dev_info->max_mac_addrs = roc_nix_mac_max_entries_get(&rep_dev->parent_dev->nix); + + dev_info->rx_offload_capa = CNXK_REP_RX_OFFLOAD_CAPA; + dev_info->tx_offload_capa = CNXK_REP_TX_OFFLOAD_CAPA; + dev_info->rx_queue_offload_capa = 0; + dev_info->tx_queue_offload_capa = 0; + + /* For the sake of symmetry, max_rx_queues = max_tx_queues */ + dev_info->max_rx_queues = 1; + dev_info->max_tx_queues = 1; + + /* MTU specifics */ + dev_info->max_mtu = dev_info->max_rx_pktlen - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN); + dev_info->min_mtu = dev_info->min_rx_bufsize - CNXK_NIX_L2_OVERHEAD; + + /* Switch info specific */ + dev_info->switch_info.name = ethdev->device->name; + dev_info->switch_info.domain_id = rep_dev->switch_domain_id; + dev_info->switch_info.port_id = rep_dev->port_id; + return 0; } int -cnxk_rep_dev_info_get(struct rte_eth_dev *ethdev, struct rte_eth_dev_info *devinfo) +cnxk_rep_representor_info_get(struct rte_eth_dev *ethdev, struct rte_eth_representor_info *info) +{ + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev); + + return cnxk_eswitch_representor_info_get(rep_dev->parent_dev, info); +} + +static int +rep_eth_conf_chk(const struct rte_eth_conf *conf, uint16_t nb_rx_queues) +{ + const struct rte_eth_rss_conf *rss_conf; + int ret = 0; + + if (conf->link_speeds != 0) { + plt_err("specific link speeds not supported"); + ret = -EINVAL; + } + + switch (conf->rxmode.mq_mode) { + case RTE_ETH_MQ_RX_RSS: + if (nb_rx_queues != 1) { + plt_err("Rx RSS is not supported with %u queues", nb_rx_queues); + ret = -EINVAL; + break; + } + + rss_conf = &conf->rx_adv_conf.rss_conf; + if (rss_conf->rss_key != NULL || rss_conf->rss_key_len != 0 || + rss_conf->rss_hf != 0) { + plt_err("Rx RSS configuration is not supported"); + ret = -EINVAL; + } + break; + case RTE_ETH_MQ_RX_NONE: + break; + default: + plt_err("Rx mode MQ modes other than RSS not supported"); + ret = -EINVAL; + break; + } + + if (conf->txmode.mq_mode != RTE_ETH_MQ_TX_NONE) { + plt_err("Tx mode MQ modes not supported"); + ret = -EINVAL; + } + + if (conf->lpbk_mode != 0) { + plt_err("loopback not supported"); + ret = -EINVAL; + } + + if (conf->dcb_capability_en != 0) { + plt_err("priority-based flow control not supported"); + ret = -EINVAL; + } + + if (conf->intr_conf.lsc != 0) { + plt_err("link status change interrupt not supported"); + ret = -EINVAL; + } + + if (conf->intr_conf.rxq != 0) { + plt_err("receive queue interrupt not supported"); + ret = -EINVAL; + } + + if (conf->intr_conf.rmv != 0) { + plt_err("remove interrupt not supported"); + ret = -EINVAL; + } + + return ret; +} + +int +cnxk_rep_dev_configure(struct rte_eth_dev *ethdev) +{ + struct rte_eth_dev_data *ethdev_data = ethdev->data; + int rc = -1; + + rc = rep_eth_conf_chk(ðdev_data->dev_conf, ethdev_data->nb_rx_queues); + if (rc) + goto fail; + + return 0; +fail: + return rc; +} + +int +cnxk_rep_promiscuous_enable(struct rte_eth_dev *ethdev) { PLT_SET_USED(ethdev); - PLT_SET_USED(devinfo); return 0; } int -cnxk_rep_dev_configure(struct rte_eth_dev *ethdev) +cnxk_rep_promiscuous_disable(struct rte_eth_dev *ethdev) { PLT_SET_USED(ethdev); return 0; @@ -30,21 +226,73 @@ cnxk_rep_dev_configure(struct rte_eth_dev *ethdev) int cnxk_rep_dev_start(struct rte_eth_dev *ethdev) { - PLT_SET_USED(ethdev); + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev); + int rc = 0, qid; + + ethdev->rx_pkt_burst = cnxk_rep_rx_burst; + ethdev->tx_pkt_burst = cnxk_rep_tx_burst; + + if (!rep_dev->is_vf_active) + return 0; + + if (!rep_dev->rxq || !rep_dev->txq) { + plt_err("Invalid rxq or txq for representor id %d", rep_dev->rep_id); + rc = -EINVAL; + goto fail; + } + + /* Start rx queues */ + qid = rep_dev->rxq->qid; + rc = cnxk_eswitch_rxq_start(rep_dev->parent_dev, qid); + if (rc) { + plt_err("Failed to start rxq %d, rc=%d", qid, rc); + goto fail; + } + + /* Start tx queues */ + qid = rep_dev->txq->qid; + rc = cnxk_eswitch_txq_start(rep_dev->parent_dev, qid); + if (rc) { + plt_err("Failed to start txq %d, rc=%d", qid, rc); + goto fail; + } + + /* Start rep_xport device only once after first representor gets active */ + if (!rep_dev->parent_dev->repr_cnt.nb_repr_started) { + rc = cnxk_eswitch_nix_rsrc_start(rep_dev->parent_dev); + if (rc) { + plt_err("Failed to start nix dev, rc %d", rc); + goto fail; + } + } + + ethdev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED; + ethdev->data->rx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED; + + rep_dev->parent_dev->repr_cnt.nb_repr_started++; + return 0; +fail: + return rc; } int cnxk_rep_dev_close(struct rte_eth_dev *ethdev) { - PLT_SET_USED(ethdev); - return 0; + return cnxk_rep_dev_uninit(ethdev); } int cnxk_rep_dev_stop(struct rte_eth_dev *ethdev) { - PLT_SET_USED(ethdev); + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev); + + ethdev->rx_pkt_burst = cnxk_rep_rx_burst_dummy; + ethdev->tx_pkt_burst = cnxk_rep_tx_burst_dummy; + cnxk_rep_rx_queue_stop(ethdev, 0); + cnxk_rep_tx_queue_stop(ethdev, 0); + rep_dev->parent_dev->repr_cnt.nb_repr_started--; + return 0; } @@ -53,39 +301,189 @@ cnxk_rep_rx_queue_setup(struct rte_eth_dev *ethdev, uint16_t rx_queue_id, uint16 unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool) { - PLT_SET_USED(ethdev); - PLT_SET_USED(rx_queue_id); - PLT_SET_USED(nb_rx_desc); + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev); + struct cnxk_rep_rxq *rxq = NULL; + uint16_t qid = 0; + int rc; + PLT_SET_USED(socket_id); - PLT_SET_USED(rx_conf); - PLT_SET_USED(mb_pool); + /* If no representee assigned, store the respective rxq parameters */ + if (!rep_dev->is_vf_active && !rep_dev->rxq) { + rxq = plt_zmalloc(sizeof(*rxq), RTE_CACHE_LINE_SIZE); + if (!rxq) { + rc = -ENOMEM; + plt_err("Failed to alloc RxQ for rep id %d", rep_dev->rep_id); + goto fail; + } + + rxq->qid = qid; + rxq->nb_desc = nb_rx_desc; + rxq->rep_dev = rep_dev; + rxq->mpool = mb_pool; + rxq->rx_conf = rx_conf; + rep_dev->rxq = rxq; + ethdev->data->rx_queues[rx_queue_id] = NULL; + + return 0; + } + + qid = rep_dev->rep_id; + rc = cnxk_eswitch_rxq_setup(rep_dev->parent_dev, qid, nb_rx_desc, rx_conf, mb_pool); + if (rc) { + plt_err("failed to setup eswitch queue id %d", qid); + goto fail; + } + + rxq = rep_dev->rxq; + if (!rxq) { + plt_err("Invalid RXQ handle for representor port %d rep id %d", rep_dev->port_id, + rep_dev->rep_id); + goto free_queue; + } + + rxq->qid = qid; + ethdev->data->rx_queues[rx_queue_id] = rxq; + ethdev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + plt_rep_dbg("representor id %d portid %d rxq id %d", rep_dev->port_id, + ethdev->data->port_id, rxq->qid); + return 0; +free_queue: + cnxk_eswitch_rxq_release(rep_dev->parent_dev, qid); +fail: + return rc; +} + +void +cnxk_rep_rx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id) +{ + struct cnxk_rep_rxq *rxq = ethdev->data->rx_queues[queue_id]; + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev); + int rc; + + if (!rxq) + return; + + plt_rep_dbg("Stopping rxq %u", rxq->qid); + + rc = cnxk_eswitch_rxq_stop(rep_dev->parent_dev, rxq->qid); + if (rc) + plt_err("Failed to stop rxq %d, rc=%d", rc, rxq->qid); + + ethdev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; } void cnxk_rep_rx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id) { - PLT_SET_USED(ethdev); - PLT_SET_USED(queue_id); + struct cnxk_rep_rxq *rxq = ethdev->data->rx_queues[queue_id]; + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev); + int rc; + + if (!rxq) { + plt_err("Invalid rxq retrieved for rep_id %d", rep_dev->rep_id); + return; + } + + plt_rep_dbg("Releasing rxq %u", rxq->qid); + + rc = cnxk_eswitch_rxq_release(rep_dev->parent_dev, rxq->qid); + if (rc) + plt_err("Failed to release rxq %d, rc=%d", rc, rxq->qid); } int cnxk_rep_tx_queue_setup(struct rte_eth_dev *ethdev, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf) { - PLT_SET_USED(ethdev); - PLT_SET_USED(tx_queue_id); - PLT_SET_USED(nb_tx_desc); + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev); + struct cnxk_rep_txq *txq = NULL; + int rc = 0, qid = 0; + PLT_SET_USED(socket_id); - PLT_SET_USED(tx_conf); + /* If no representee assigned, store the respective rxq parameters */ + if (!rep_dev->is_vf_active && !rep_dev->txq) { + txq = plt_zmalloc(sizeof(*txq), RTE_CACHE_LINE_SIZE); + if (!txq) { + rc = -ENOMEM; + plt_err("failed to alloc txq for rep id %d", rep_dev->rep_id); + goto free_queue; + } + + txq->qid = qid; + txq->nb_desc = nb_tx_desc; + txq->tx_conf = tx_conf; + txq->rep_dev = rep_dev; + rep_dev->txq = txq; + + ethdev->data->tx_queues[tx_queue_id] = NULL; + + return 0; + } + + qid = rep_dev->rep_id; + rc = cnxk_eswitch_txq_setup(rep_dev->parent_dev, qid, nb_tx_desc, tx_conf); + if (rc) { + plt_err("failed to setup eswitch queue id %d", qid); + goto fail; + } + + txq = rep_dev->txq; + if (!txq) { + plt_err("Invalid TXQ handle for representor port %d rep id %d", rep_dev->port_id, + rep_dev->rep_id); + goto free_queue; + } + + txq->qid = qid; + ethdev->data->tx_queues[tx_queue_id] = txq; + ethdev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + plt_rep_dbg("representor id %d portid %d txq id %d", rep_dev->port_id, + ethdev->data->port_id, txq->qid); + return 0; +free_queue: + cnxk_eswitch_txq_release(rep_dev->parent_dev, qid); +fail: + return rc; +} + +void +cnxk_rep_tx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id) +{ + struct cnxk_rep_txq *txq = ethdev->data->tx_queues[queue_id]; + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev); + int rc; + + if (!txq) + return; + + plt_rep_dbg("Releasing txq %u", txq->qid); + + rc = cnxk_eswitch_txq_stop(rep_dev->parent_dev, txq->qid); + if (rc) + plt_err("Failed to stop txq %d, rc=%d", rc, txq->qid); + + ethdev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; } void cnxk_rep_tx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id) { - PLT_SET_USED(ethdev); - PLT_SET_USED(queue_id); + struct cnxk_rep_txq *txq = ethdev->data->tx_queues[queue_id]; + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev); + int rc; + + if (!txq) { + plt_err("Invalid txq retrieved for rep_id %d", rep_dev->rep_id); + return; + } + + plt_rep_dbg("Releasing txq %u", txq->qid); + + rc = cnxk_eswitch_txq_release(rep_dev->parent_dev, txq->qid); + if (rc) + plt_err("Failed to release txq %d, rc=%d", rc, txq->qid); } int @@ -111,15 +509,70 @@ cnxk_rep_flow_ops_get(struct rte_eth_dev *ethdev, const struct rte_flow_ops **op return 0; } +int +cnxk_rep_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr) +{ + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev); + cnxk_rep_msg_eth_set_mac_meta_t msg_sm_meta; + cnxk_rep_msg_ack_data_t adata; + uint32_t len = 0, rc; + void *buffer; + size_t size; + + /* If representor not representing any VF, return 0 */ + if (!rep_dev->is_vf_active) + return 0; + + size = CNXK_REP_MSG_MAX_BUFFER_SZ; + buffer = plt_zmalloc(size, 0); + if (!buffer) { + plt_err("Failed to allocate mem"); + rc = -ENOMEM; + goto fail; + } + + cnxk_rep_msg_populate_header(buffer, &len); + + msg_sm_meta.portid = rep_dev->rep_id; + rte_memcpy(&msg_sm_meta.addr_bytes, addr->addr_bytes, RTE_ETHER_ADDR_LEN); + cnxk_rep_msg_populate_command_meta(buffer, &len, &msg_sm_meta, + sizeof(cnxk_rep_msg_eth_set_mac_meta_t), + CNXK_REP_MSG_ETH_SET_MAC); + cnxk_rep_msg_populate_msg_end(buffer, &len); + + rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, &adata); + if (rc) { + plt_err("Failed to process the message, err %d", rc); + goto fail; + } + + if (adata.u.sval < 0) { + rc = adata.u.sval; + plt_err("Failed to set mac address, err %d", rc); + goto fail; + } + + rte_free(buffer); + + return 0; +fail: + rte_free(buffer); + return rc; +} + /* CNXK platform representor dev ops */ struct eth_dev_ops cnxk_rep_dev_ops = { .dev_infos_get = cnxk_rep_dev_info_get, + .representor_info_get = cnxk_rep_representor_info_get, .dev_configure = cnxk_rep_dev_configure, .dev_start = cnxk_rep_dev_start, .rx_queue_setup = cnxk_rep_rx_queue_setup, .rx_queue_release = cnxk_rep_rx_queue_release, .tx_queue_setup = cnxk_rep_tx_queue_setup, .tx_queue_release = cnxk_rep_tx_queue_release, + .promiscuous_enable = cnxk_rep_promiscuous_enable, + .promiscuous_disable = cnxk_rep_promiscuous_disable, + .mac_addr_set = cnxk_rep_mac_addr_set, .link_update = cnxk_rep_link_update, .dev_close = cnxk_rep_dev_close, .dev_stop = cnxk_rep_dev_stop, From patchwork Sun Mar 3 17:38:24 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Harman Kalra X-Patchwork-Id: 137828 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id F347643C3E; Sun, 3 Mar 2024 18:40:32 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1845243685; Sun, 3 Mar 2024 18:39:24 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id 4ADAB43685 for ; Sun, 3 Mar 2024 18:39:22 +0100 (CET) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id 423Dsqmr027903 for ; Sun, 3 Mar 2024 09:39:21 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h= from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-type; s=pfpt0220; bh=Swcp99AmSytwOqNQIkWIS AKIE97JDJWk0qa+ST+Rgic=; b=Ovz5zGf4nUVEQu+zX47Xquxs/jsmGLIQzlHLY 5C9a470LmUBqnxQ2MtGOvukjNA0k2v7jgQJsA8lVkbVjcDNLh4KDRMvCcRUALiD0 XOUMxXu5K4kjtUbzmpCGYiUZBD1XpJZIlyYbPxiAejDM2po7RDBvteOnTeN+hkXG IV1xNa1iaHI1zQV59b8hJNgXqVMEuyXa7oVPZumeVKNaXFUCsjhYZWkLkjWz9WQv TLA/nNPtS1cBkZnpkq7f/awcWtYvSG+V2znRk9VGYjPpKPG/ab9DCyEcBrOnCgSo B3XdzkL3u40QSBa0AKimOiMAGxS2Vl5Hv6lq4ANk869LiI19w== Received: from dc6wp-exch02.marvell.com ([4.21.29.225]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3wm2bptru5-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT) for ; Sun, 03 Mar 2024 09:39:21 -0800 (PST) Received: from DC6WP-EXCH02.marvell.com (10.76.176.209) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1258.12; Sun, 3 Mar 2024 09:39:20 -0800 Received: from maili.marvell.com (10.69.176.80) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server id 15.2.1258.12 via Frontend Transport; Sun, 3 Mar 2024 09:39:20 -0800 Received: from localhost.localdomain (unknown [10.29.52.211]) by maili.marvell.com (Postfix) with ESMTP id B70D13F71A8; Sun, 3 Mar 2024 09:39:17 -0800 (PST) From: Harman Kalra To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao , Harman Kalra CC: Subject: [PATCH v6 14/23] common/cnxk: get representees ethernet stats Date: Sun, 3 Mar 2024 23:08:24 +0530 Message-ID: <20240303173833.100039-15-hkalra@marvell.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20240303173833.100039-1-hkalra@marvell.com> References: <20230811163419.165790-1-hkalra@marvell.com> <20240303173833.100039-1-hkalra@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: w0h05TJe5Iu_RxNN-lc8Yh9xNpLH9G93 X-Proofpoint-ORIG-GUID: w0h05TJe5Iu_RxNN-lc8Yh9xNpLH9G93 X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.1011,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2024-03-03_08,2024-03-01_03,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Implementing an mbox interface to fetch the representees's ethernet stats from the kernel. Signed-off-by: Harman Kalra --- drivers/common/cnxk/roc_eswitch.c | 45 +++++++++++++++++++++++++++++++ drivers/common/cnxk/roc_eswitch.h | 2 ++ drivers/common/cnxk/roc_mbox.h | 31 +++++++++++++++++++++ drivers/common/cnxk/version.map | 1 + 4 files changed, 79 insertions(+) diff --git a/drivers/common/cnxk/roc_eswitch.c b/drivers/common/cnxk/roc_eswitch.c index 14819bad75..c67b4090a5 100644 --- a/drivers/common/cnxk/roc_eswitch.c +++ b/drivers/common/cnxk/roc_eswitch.c @@ -342,3 +342,48 @@ roc_eswitch_nix_process_repte_notify_cb_unregister(struct roc_nix *roc_nix) dev->ops->repte_notify = NULL; } + +int +roc_eswitch_nix_repte_stats(struct roc_nix *roc_nix, uint16_t pf_func, struct roc_nix_stats *stats) +{ + struct nix *nix = roc_nix_to_nix_priv(roc_nix); + struct dev *dev = &nix->dev; + struct nix_get_lf_stats_req *req; + struct nix_lf_stats_rsp *rsp; + struct mbox *mbox; + int rc; + + mbox = mbox_get(dev->mbox); + req = mbox_alloc_msg_nix_get_lf_stats(mbox); + if (!req) { + rc = -ENOSPC; + goto exit; + } + + req->hdr.pcifunc = roc_nix_get_pf_func(roc_nix); + req->pcifunc = pf_func; + + rc = mbox_process_msg(mbox, (void *)&rsp); + if (rc) + goto exit; + + stats->rx_octs = rsp->rx.octs; + stats->rx_ucast = rsp->rx.ucast; + stats->rx_bcast = rsp->rx.bcast; + stats->rx_mcast = rsp->rx.mcast; + stats->rx_drop = rsp->rx.drop; + stats->rx_drop_octs = rsp->rx.drop_octs; + stats->rx_drop_bcast = rsp->rx.drop_bcast; + stats->rx_drop_mcast = rsp->rx.drop_mcast; + stats->rx_err = rsp->rx.err; + + stats->tx_ucast = rsp->tx.ucast; + stats->tx_bcast = rsp->tx.bcast; + stats->tx_mcast = rsp->tx.mcast; + stats->tx_drop = rsp->tx.drop; + stats->tx_octs = rsp->tx.octs; + +exit: + mbox_put(mbox); + return rc; +} diff --git a/drivers/common/cnxk/roc_eswitch.h b/drivers/common/cnxk/roc_eswitch.h index e0df0038d4..b701ea69ee 100644 --- a/drivers/common/cnxk/roc_eswitch.h +++ b/drivers/common/cnxk/roc_eswitch.h @@ -51,6 +51,8 @@ int __roc_api roc_eswitch_npc_rss_action_configure(struct roc_npc *roc_npc, /* NIX */ int __roc_api roc_eswitch_nix_vlan_tpid_set(struct roc_nix *nix, uint32_t type, uint16_t tpid, bool is_vf); +int __roc_api roc_eswitch_nix_repte_stats(struct roc_nix *roc_nix, uint16_t pf_func, + struct roc_nix_stats *stats); int __roc_api roc_eswitch_nix_process_repte_notify_cb_register(struct roc_nix *roc_nix, process_repte_notify_t proc_repte_nt); void __roc_api roc_eswitch_nix_process_repte_notify_cb_unregister(struct roc_nix *roc_nix); diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h index d28e3ffd70..f1a3371ef9 100644 --- a/drivers/common/cnxk/roc_mbox.h +++ b/drivers/common/cnxk/roc_mbox.h @@ -306,6 +306,7 @@ struct mbox_msghdr { M(NIX_MCAST_GRP_DESTROY, 0x802c, nix_mcast_grp_destroy, nix_mcast_grp_destroy_req, msg_rsp)\ M(NIX_MCAST_GRP_UPDATE, 0x802d, nix_mcast_grp_update, nix_mcast_grp_update_req, \ nix_mcast_grp_update_rsp) \ + M(NIX_GET_LF_STATS, 0x802e, nix_get_lf_stats, nix_get_lf_stats_req, nix_lf_stats_rsp) \ /* MCS mbox IDs (range 0xa000 - 0xbFFF) */ \ M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \ mcs_alloc_rsrc_rsp) \ @@ -1850,6 +1851,36 @@ struct nix_mcast_grp_update_rsp { uint32_t __io mce_start_index; }; +struct nix_get_lf_stats_req { + struct mbox_msghdr hdr; + uint16_t __io pcifunc; + uint64_t __io rsvd; +}; + +struct nix_lf_stats_rsp { + struct mbox_msghdr hdr; + uint16_t __io pcifunc; + struct { + uint64_t __io octs; + uint64_t __io ucast; + uint64_t __io bcast; + uint64_t __io mcast; + uint64_t __io drop; + uint64_t __io drop_octs; + uint64_t __io drop_mcast; + uint64_t __io drop_bcast; + uint64_t __io err; + uint64_t __io rsvd[5]; + } rx; + struct { + uint64_t __io ucast; + uint64_t __io bcast; + uint64_t __io mcast; + uint64_t __io drop; + uint64_t __io octs; + } tx; +}; + /* Global NIX inline IPSec configuration */ struct nix_inline_ipsec_cfg { struct mbox_msghdr hdr; diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map index 5dd42c8044..18c2d9d632 100644 --- a/drivers/common/cnxk/version.map +++ b/drivers/common/cnxk/version.map @@ -89,6 +89,7 @@ INTERNAL { roc_error_msg_get; roc_eswitch_nix_process_repte_notify_cb_register; roc_eswitch_nix_process_repte_notify_cb_unregister; + roc_eswitch_nix_repte_stats; roc_eswitch_nix_vlan_tpid_set; roc_eswitch_npc_mcam_delete_rule; roc_eswitch_npc_mcam_rx_rule; From patchwork Sun Mar 3 17:38:25 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Harman Kalra X-Patchwork-Id: 137829 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id B2D3343C3E; Sun, 3 Mar 2024 18:40:38 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 442B843690; Sun, 3 Mar 2024 18:39:30 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id 9E1AA4368D for ; Sun, 3 Mar 2024 18:39:28 +0100 (CET) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id 423GR6SZ018399 for ; Sun, 3 Mar 2024 09:39:28 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h= from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-type; s=pfpt0220; bh=MrWBUZ9Oajc3GYq5T5/+k SvC7GuIdfnoS5JhyYZH4L8=; b=hv/LSpWgstR7BezHDmKxGGNT4bQwx2De7i1iz px/F7otU44nuymyxWM4T7IAy9zN3qlvs9ijF+6pGAmO6+DLk+g3EFuVrZmgTBh2/ lCZ7Y+maHmi9SALCtornD7Tai4XN5KzBGN8po9KwvsA5lHF1W3dGNV1Yv3SXO8QM Bu5dK5tVEwhuFj2s34OhbMfkTACgJGxoKS6nmNUmuFqFt+HVd+azmtDIjwsi8Lba 3aDA4pWxuBb2WLn35s1IaBjhWVVJbIUxtnk4YTWUFDPi1vg59CJioXzgevnxKOCU Sdg0zobz3kj7PUVlvZ4GU5P4K3qUYAbkRmhg1dsyrDRhMbSgA== Received: from dc5-exch05.marvell.com ([199.233.59.128]) by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3wm4gmjjng-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT) for ; Sun, 03 Mar 2024 09:39:27 -0800 (PST) Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH05.marvell.com (10.69.176.209) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id 15.2.1258.12; Sun, 3 Mar 2024 09:39:26 -0800 Received: from DC5-EXCH05.marvell.com (10.69.176.209) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.48; Sun, 3 Mar 2024 09:39:23 -0800 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH05.marvell.com (10.69.176.209) with Microsoft SMTP Server id 15.2.1258.12 via Frontend Transport; Sun, 3 Mar 2024 09:39:23 -0800 Received: from localhost.localdomain (unknown [10.29.52.211]) by maili.marvell.com (Postfix) with ESMTP id A29D73F71A8; Sun, 3 Mar 2024 09:39:20 -0800 (PST) From: Harman Kalra To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao , Harman Kalra CC: , Ankur Dwivedi Subject: [PATCH v6 15/23] net/cnxk: ethernet statistics for representor Date: Sun, 3 Mar 2024 23:08:25 +0530 Message-ID: <20240303173833.100039-16-hkalra@marvell.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20240303173833.100039-1-hkalra@marvell.com> References: <20230811163419.165790-1-hkalra@marvell.com> <20240303173833.100039-1-hkalra@marvell.com> MIME-Version: 1.0 X-Proofpoint-ORIG-GUID: GLdU3qQ05z3PJmD8wX7VAMlNuGfNAROs X-Proofpoint-GUID: GLdU3qQ05z3PJmD8wX7VAMlNuGfNAROs X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.1011,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2024-03-03_08,2024-03-01_03,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Adding representor ethernet statistics support which can fetch stats for representees which are operating independently or part of companian app. Adds xstats callback for representor port statistics. Signed-off-by: Harman Kalra Signed-off-by: Ankur Dwivedi --- drivers/net/cnxk/cnxk_rep.h | 8 + drivers/net/cnxk/cnxk_rep_msg.h | 7 + drivers/net/cnxk/cnxk_rep_ops.c | 275 +++++++++++++++++++++++++++++++- 3 files changed, 285 insertions(+), 5 deletions(-) diff --git a/drivers/net/cnxk/cnxk_rep.h b/drivers/net/cnxk/cnxk_rep.h index 6a43259980..51a2e97624 100644 --- a/drivers/net/cnxk/cnxk_rep.h +++ b/drivers/net/cnxk/cnxk_rep.h @@ -121,5 +121,13 @@ uint16_t cnxk_rep_tx_burst_dummy(void *tx_queue, struct rte_mbuf **tx_pkts, uint uint16_t cnxk_rep_rx_burst_dummy(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); void cnxk_rep_tx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id); void cnxk_rep_rx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id); +int cnxk_rep_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *stats, unsigned int n); +int cnxk_rep_xstats_reset(struct rte_eth_dev *eth_dev); +int cnxk_rep_xstats_get_names(struct rte_eth_dev *eth_dev, struct rte_eth_xstat_name *xstats_names, + unsigned int n); +int cnxk_rep_xstats_get_by_id(struct rte_eth_dev *eth_dev, const uint64_t *ids, uint64_t *values, + unsigned int n); +int cnxk_rep_xstats_get_names_by_id(struct rte_eth_dev *eth_dev, const uint64_t *ids, + struct rte_eth_xstat_name *xstats_names, unsigned int n); #endif /* __CNXK_REP_H__ */ diff --git a/drivers/net/cnxk/cnxk_rep_msg.h b/drivers/net/cnxk/cnxk_rep_msg.h index 63cfbe3f19..277e25d92a 100644 --- a/drivers/net/cnxk/cnxk_rep_msg.h +++ b/drivers/net/cnxk/cnxk_rep_msg.h @@ -21,6 +21,8 @@ typedef enum CNXK_REP_MSG { CNXK_REP_MSG_EXIT, /* Ethernet operation msgs */ CNXK_REP_MSG_ETH_SET_MAC, + CNXK_REP_MSG_ETH_STATS_GET, + CNXK_REP_MSG_ETH_STATS_CLEAR, /* End of messaging sequence */ CNXK_REP_MSG_END, } cnxk_rep_msg_t; @@ -89,6 +91,11 @@ typedef struct cnxk_rep_msg_eth_mac_set_meta { uint8_t addr_bytes[RTE_ETHER_ADDR_LEN]; } __rte_packed cnxk_rep_msg_eth_set_mac_meta_t; +/* Ethernet op - get/clear stats */ +typedef struct cnxk_rep_msg_eth_stats_meta { + uint16_t portid; +} __rte_packed cnxk_rep_msg_eth_stats_meta_t; + void cnxk_rep_msg_populate_command(void *buffer, uint32_t *length, cnxk_rep_msg_t type, uint32_t size); void cnxk_rep_msg_populate_command_meta(void *buffer, uint32_t *length, void *msg_meta, uint32_t sz, diff --git a/drivers/net/cnxk/cnxk_rep_ops.c b/drivers/net/cnxk/cnxk_rep_ops.c index 97643a50f2..0ba4d55398 100644 --- a/drivers/net/cnxk/cnxk_rep_ops.c +++ b/drivers/net/cnxk/cnxk_rep_ops.c @@ -10,6 +10,11 @@ #define RX_DESC_PER_QUEUE 256 #define NB_REP_VDEV_MBUF 1024 +static const struct rte_eth_xstat_name cnxk_rep_xstats_string[] = { + {"rep_nb_rx"}, + {"rep_nb_tx"}, +}; + static uint16_t cnxk_rep_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -24,6 +29,7 @@ cnxk_rep_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) plt_rep_dbg("Transmitting %d packets on eswitch queue %d", nb_pkts, txq->qid); n_tx = cnxk_eswitch_dev_tx_burst(rep_dev->parent_dev, txq->qid, tx_pkts, nb_pkts, NIX_TX_OFFLOAD_VLAN_QINQ_F); + txq->stats.pkts += n_tx; return n_tx; } @@ -43,6 +49,7 @@ cnxk_rep_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) return 0; plt_rep_dbg("Received %d packets on eswitch queue %d", n_rx, rxq->qid); + rxq->stats.pkts += n_rx; return n_rx; } @@ -486,19 +493,154 @@ cnxk_rep_tx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id) plt_err("Failed to release txq %d, rc=%d", rc, txq->qid); } +static int +process_eth_stats(struct cnxk_rep_dev *rep_dev, cnxk_rep_msg_ack_data_t *adata, cnxk_rep_msg_t msg) +{ + cnxk_rep_msg_eth_stats_meta_t msg_st_meta; + uint32_t len = 0, rc; + void *buffer; + size_t size; + + size = CNXK_REP_MSG_MAX_BUFFER_SZ; + buffer = plt_zmalloc(size, 0); + if (!buffer) { + plt_err("Failed to allocate mem"); + rc = -ENOMEM; + goto fail; + } + + cnxk_rep_msg_populate_header(buffer, &len); + + msg_st_meta.portid = rep_dev->rep_id; + cnxk_rep_msg_populate_command_meta(buffer, &len, &msg_st_meta, + sizeof(cnxk_rep_msg_eth_stats_meta_t), msg); + cnxk_rep_msg_populate_msg_end(buffer, &len); + + rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, adata); + if (rc) { + plt_err("Failed to process the message, err %d", rc); + goto fail; + } + + rte_free(buffer); + + return 0; +fail: + rte_free(buffer); + return rc; +} + +static int +native_repte_eth_stats(struct cnxk_rep_dev *rep_dev, struct rte_eth_stats *stats) +{ + struct roc_nix_stats nix_stats; + int rc = 0; + + rc = roc_eswitch_nix_repte_stats(&rep_dev->parent_dev->nix, rep_dev->hw_func, &nix_stats); + if (rc) { + plt_err("Failed to get stats for representee %x, err %d", rep_dev->hw_func, rc); + goto fail; + } + + memset(stats, 0, sizeof(struct rte_eth_stats)); + stats->opackets = nix_stats.tx_ucast; + stats->opackets += nix_stats.tx_mcast; + stats->opackets += nix_stats.tx_bcast; + stats->oerrors = nix_stats.tx_drop; + stats->obytes = nix_stats.tx_octs; + + stats->ipackets = nix_stats.rx_ucast; + stats->ipackets += nix_stats.rx_mcast; + stats->ipackets += nix_stats.rx_bcast; + stats->imissed = nix_stats.rx_drop; + stats->ibytes = nix_stats.rx_octs; + stats->ierrors = nix_stats.rx_err; + + return 0; +fail: + return rc; +} + int cnxk_rep_stats_get(struct rte_eth_dev *ethdev, struct rte_eth_stats *stats) { - PLT_SET_USED(ethdev); - PLT_SET_USED(stats); + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev); + struct rte_eth_stats vf_stats; + cnxk_rep_msg_ack_data_t adata; + int rc; + + /* If representor not representing any active VF, return 0 */ + if (!rep_dev->is_vf_active) + return 0; + + if (rep_dev->native_repte) { + /* For representees which are independent */ + rc = native_repte_eth_stats(rep_dev, &vf_stats); + if (rc) { + plt_err("Failed to get stats for vf rep %x (hw_func %x), err %d", + rep_dev->port_id, rep_dev->hw_func, rc); + goto fail; + } + } else { + /* For representees which are part of companian app */ + rc = process_eth_stats(rep_dev, &adata, CNXK_REP_MSG_ETH_STATS_GET); + if (rc || adata.u.sval < 0) { + if (adata.u.sval < 0) + rc = adata.u.sval; + + plt_err("Failed to get stats for vf rep %x, err %d", rep_dev->port_id, rc); + } + + if (adata.size != sizeof(struct rte_eth_stats)) { + rc = -EINVAL; + plt_err("Incomplete stats received for vf rep %d", rep_dev->port_id); + goto fail; + } + + rte_memcpy(&vf_stats, adata.u.data, adata.size); + } + + stats->q_ipackets[0] = vf_stats.ipackets; + stats->q_ibytes[0] = vf_stats.ibytes; + stats->ipackets = vf_stats.ipackets; + stats->ibytes = vf_stats.ibytes; + + stats->q_opackets[0] = vf_stats.opackets; + stats->q_obytes[0] = vf_stats.obytes; + stats->opackets = vf_stats.opackets; + stats->obytes = vf_stats.obytes; + + plt_rep_dbg("Input packets %" PRId64 " Output packets %" PRId64 "", stats->ipackets, + stats->opackets); + return 0; +fail: + return rc; } int cnxk_rep_stats_reset(struct rte_eth_dev *ethdev) { - PLT_SET_USED(ethdev); - return 0; + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev); + cnxk_rep_msg_ack_data_t adata; + int rc = 0; + + /* If representor not representing any active VF, return 0 */ + if (!rep_dev->is_vf_active) + return 0; + + if (rep_dev->native_repte) + return -ENOTSUP; + + rc = process_eth_stats(rep_dev, &adata, CNXK_REP_MSG_ETH_STATS_CLEAR); + if (rc || adata.u.sval < 0) { + if (adata.u.sval < 0) + rc = adata.u.sval; + + plt_err("Failed to clear stats for vf rep %x, err %d", rep_dev->port_id, rc); + } + + return rc; } int @@ -560,6 +702,124 @@ cnxk_rep_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr) return rc; } +int +cnxk_rep_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *stats, unsigned int n) +{ + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev); + unsigned int num = RTE_DIM(cnxk_rep_xstats_string); + int cnt = 0; + + if (!rep_dev) + return -EINVAL; + + if (n < num) + return num; + + stats[cnt].id = cnt; + stats[cnt].value = rep_dev->rxq->stats.pkts; + cnt++; + stats[cnt].id = cnt; + stats[cnt].value = rep_dev->txq->stats.pkts; + cnt++; + + return cnt; +} + +int +cnxk_rep_xstats_reset(struct rte_eth_dev *eth_dev) +{ + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev); + int rc; + + if (!rep_dev) + return -EINVAL; + + rc = cnxk_rep_stats_reset(eth_dev); + if (rc < 0 && rc != -ENOTSUP) + return rc; + + rep_dev->rxq->stats.pkts = 0; + rep_dev->txq->stats.pkts = 0; + + return 0; +} + +int +cnxk_rep_xstats_get_names(__rte_unused struct rte_eth_dev *eth_dev, + struct rte_eth_xstat_name *xstats_names, unsigned int n) +{ + unsigned int num = RTE_DIM(cnxk_rep_xstats_string); + unsigned int i; + + if (xstats_names == NULL) + return num; + + if (n < num) + return num; + + for (i = 0; i < num; i++) + rte_strscpy(xstats_names[i].name, cnxk_rep_xstats_string[i].name, + sizeof(xstats_names[i].name)); + + return num; +} + +int +cnxk_rep_xstats_get_by_id(struct rte_eth_dev *eth_dev, const uint64_t *ids, uint64_t *values, + unsigned int n) +{ + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev); + unsigned int num = RTE_DIM(cnxk_rep_xstats_string); + unsigned int i; + + if (!rep_dev) + return -EINVAL; + + if (n < num) + return num; + + if (n > num) + return -EINVAL; + + for (i = 0; i < n; i++) { + switch (ids[i]) { + case 0: + values[i] = rep_dev->rxq->stats.pkts; + break; + case 1: + values[i] = rep_dev->txq->stats.pkts; + break; + default: + return -EINVAL; + } + } + + return n; +} + +int +cnxk_rep_xstats_get_names_by_id(__rte_unused struct rte_eth_dev *eth_dev, const uint64_t *ids, + struct rte_eth_xstat_name *xstats_names, unsigned int n) +{ + unsigned int num = RTE_DIM(cnxk_rep_xstats_string); + unsigned int i; + + if (n < num) + return num; + + if (n > num) + return -EINVAL; + + for (i = 0; i < n; i++) { + if (ids[i] >= num) + return -EINVAL; + rte_strscpy(xstats_names[i].name, cnxk_rep_xstats_string[ids[i]].name, + sizeof(xstats_names[i].name)); + } + + return n; +} + /* CNXK platform representor dev ops */ struct eth_dev_ops cnxk_rep_dev_ops = { .dev_infos_get = cnxk_rep_dev_info_get, @@ -578,5 +838,10 @@ struct eth_dev_ops cnxk_rep_dev_ops = { .dev_stop = cnxk_rep_dev_stop, .stats_get = cnxk_rep_stats_get, .stats_reset = cnxk_rep_stats_reset, - .flow_ops_get = cnxk_rep_flow_ops_get + .flow_ops_get = cnxk_rep_flow_ops_get, + .xstats_get = cnxk_rep_xstats_get, + .xstats_reset = cnxk_rep_xstats_reset, + .xstats_get_names = cnxk_rep_xstats_get_names, + .xstats_get_by_id = cnxk_rep_xstats_get_by_id, + .xstats_get_names_by_id = cnxk_rep_xstats_get_names_by_id }; From patchwork Sun Mar 3 17:38:26 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Harman Kalra X-Patchwork-Id: 137830 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 74CCD43C3E; Sun, 3 Mar 2024 18:40:44 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 9CCE74368E; Sun, 3 Mar 2024 18:39:31 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id ADE1E4366F for ; Sun, 3 Mar 2024 18:39:28 +0100 (CET) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id 423DmDEM014917 for ; Sun, 3 Mar 2024 09:39:27 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h= from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-type; s=pfpt0220; bh=5SloPpWrYHaBWs31nZIG8 yNKn/l+uW/JDNhgHnkk8o4=; b=HFjVAi4OOE3FLzvci4dDapIbcmR8reklYu/kh zapRm7QhCTT6SWhrgWjJ43nhsCEE64vsMpMs1vGpxDYuoT6MHtZL8dF5RFB7xFsz mdjWzzqRdx+w0rTYwmLaI0pN2yBq7aOQQikKJZ6JvE6owdKlaH1jcHmEtuoneiug JA0tTWC0gXTONgnj2W5yflL8mI/6nepYAENjtaBhkgaTV2IVSuVu3hJypsGpNylC dNLFXrJPe8m4Rrknl0cSooA7k5nHCATBFud+8yChMSZPoSjoqEIA+GsFYEIPK9dz +/MrJRM58K7ZF9XpnXKO6QYZQkRaA12klWcqJdYpPnyN89XAw== Received: from dc6wp-exch02.marvell.com ([4.21.29.225]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3wm2bptrup-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT) for ; Sun, 03 Mar 2024 09:39:27 -0800 (PST) Received: from DC6WP-EXCH02.marvell.com (10.76.176.209) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1258.12; Sun, 3 Mar 2024 09:39:26 -0800 Received: from maili.marvell.com (10.69.176.80) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server id 15.2.1258.12 via Frontend Transport; Sun, 3 Mar 2024 09:39:26 -0800 Received: from localhost.localdomain (unknown [10.29.52.211]) by maili.marvell.com (Postfix) with ESMTP id D9B013F71B0; Sun, 3 Mar 2024 09:39:23 -0800 (PST) From: Harman Kalra To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao , Harman Kalra CC: Subject: [PATCH v6 16/23] common/cnxk: base support for eswitch VF Date: Sun, 3 Mar 2024 23:08:26 +0530 Message-ID: <20240303173833.100039-17-hkalra@marvell.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20240303173833.100039-1-hkalra@marvell.com> References: <20230811163419.165790-1-hkalra@marvell.com> <20240303173833.100039-1-hkalra@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: GQx0YKdWVw_diylrfKCJhUzIzAxK1wzX X-Proofpoint-ORIG-GUID: GQx0YKdWVw_diylrfKCJhUzIzAxK1wzX X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.1011,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2024-03-03_08,2024-03-01_03,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Base ROC layer changes for supporting eswitch VF and NIX lbk changes for ESW Signed-off-by: Harman Kalra --- drivers/common/cnxk/roc_constants.h | 1 + drivers/common/cnxk/roc_dev.c | 1 + drivers/common/cnxk/roc_nix.c | 15 +++++++++++++-- drivers/common/cnxk/roc_nix.h | 1 + drivers/common/cnxk/roc_nix_priv.h | 1 + drivers/common/cnxk/version.map | 1 + 6 files changed, 18 insertions(+), 2 deletions(-) diff --git a/drivers/common/cnxk/roc_constants.h b/drivers/common/cnxk/roc_constants.h index cb4edbea58..21b3998cee 100644 --- a/drivers/common/cnxk/roc_constants.h +++ b/drivers/common/cnxk/roc_constants.h @@ -44,6 +44,7 @@ #define PCI_DEVID_CNXK_RVU_REE_PF 0xA0f4 #define PCI_DEVID_CNXK_RVU_REE_VF 0xA0f5 #define PCI_DEVID_CNXK_RVU_ESWITCH_PF 0xA0E0 +#define PCI_DEVID_CNXK_RVU_ESWITCH_VF 0xA0E1 #define PCI_DEVID_CN9K_CGX 0xA059 #define PCI_DEVID_CN10K_RPM 0xA060 diff --git a/drivers/common/cnxk/roc_dev.c b/drivers/common/cnxk/roc_dev.c index 867f981423..daf7684d8e 100644 --- a/drivers/common/cnxk/roc_dev.c +++ b/drivers/common/cnxk/roc_dev.c @@ -1272,6 +1272,7 @@ dev_vf_hwcap_update(struct plt_pci_device *pci_dev, struct dev *dev) case PCI_DEVID_CNXK_RVU_VF: case PCI_DEVID_CNXK_RVU_SDP_VF: case PCI_DEVID_CNXK_RVU_NIX_INL_VF: + case PCI_DEVID_CNXK_RVU_ESWITCH_VF: dev->hwcap |= DEV_HWCAP_F_VF; break; } diff --git a/drivers/common/cnxk/roc_nix.c b/drivers/common/cnxk/roc_nix.c index e68d472f43..20202788b5 100644 --- a/drivers/common/cnxk/roc_nix.c +++ b/drivers/common/cnxk/roc_nix.c @@ -13,6 +13,14 @@ roc_nix_is_lbk(struct roc_nix *roc_nix) return nix->lbk_link; } +bool +roc_nix_is_esw(struct roc_nix *roc_nix) +{ + struct nix *nix = roc_nix_to_nix_priv(roc_nix); + + return nix->esw_link; +} + int roc_nix_get_base_chan(struct roc_nix *roc_nix) { @@ -156,7 +164,7 @@ roc_nix_max_pkt_len(struct roc_nix *roc_nix) if (roc_model_is_cn9k()) return NIX_CN9K_MAX_HW_FRS; - if (nix->lbk_link) + if (nix->lbk_link || nix->esw_link) return NIX_LBK_MAX_HW_FRS; return NIX_RPM_MAX_HW_FRS; @@ -351,7 +359,7 @@ roc_nix_get_hw_info(struct roc_nix *roc_nix) rc = mbox_process_msg(mbox, (void *)&hw_info); if (rc == 0) { nix->vwqe_interval = hw_info->vwqe_delay; - if (nix->lbk_link) + if (nix->lbk_link || nix->esw_link) roc_nix->dwrr_mtu = hw_info->lbk_dwrr_mtu; else if (nix->sdp_link) roc_nix->dwrr_mtu = hw_info->sdp_dwrr_mtu; @@ -368,6 +376,7 @@ sdp_lbk_id_update(struct plt_pci_device *pci_dev, struct nix *nix) { nix->sdp_link = false; nix->lbk_link = false; + nix->esw_link = false; /* Update SDP/LBK link based on PCI device id */ switch (pci_dev->id.device_id) { @@ -376,7 +385,9 @@ sdp_lbk_id_update(struct plt_pci_device *pci_dev, struct nix *nix) nix->sdp_link = true; break; case PCI_DEVID_CNXK_RVU_AF_VF: + case PCI_DEVID_CNXK_RVU_ESWITCH_VF: nix->lbk_link = true; + nix->esw_link = true; break; default: break; diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h index 0289ce9820..bd3e540f45 100644 --- a/drivers/common/cnxk/roc_nix.h +++ b/drivers/common/cnxk/roc_nix.h @@ -526,6 +526,7 @@ int __roc_api roc_nix_dev_fini(struct roc_nix *roc_nix); /* Type */ bool __roc_api roc_nix_is_lbk(struct roc_nix *roc_nix); +bool __roc_api roc_nix_is_esw(struct roc_nix *roc_nix); bool __roc_api roc_nix_is_sdp(struct roc_nix *roc_nix); bool __roc_api roc_nix_is_pf(struct roc_nix *roc_nix); bool __roc_api roc_nix_is_vf_or_sdp(struct roc_nix *roc_nix); diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h index 3d99ade2b4..275ffc8ea3 100644 --- a/drivers/common/cnxk/roc_nix_priv.h +++ b/drivers/common/cnxk/roc_nix_priv.h @@ -170,6 +170,7 @@ struct nix { uintptr_t base; bool sdp_link; bool lbk_link; + bool esw_link; bool ptp_en; bool is_nix1; diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map index 18c2d9d632..424ad7f484 100644 --- a/drivers/common/cnxk/version.map +++ b/drivers/common/cnxk/version.map @@ -278,6 +278,7 @@ INTERNAL { roc_nix_inl_outb_cpt_lfs_dump; roc_nix_cpt_ctx_cache_sync; roc_nix_is_lbk; + roc_nix_is_esw; roc_nix_is_pf; roc_nix_is_sdp; roc_nix_is_vf_or_sdp; From patchwork Sun Mar 3 17:38:27 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Harman Kalra X-Patchwork-Id: 137831 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 1557243C3E; Sun, 3 Mar 2024 18:40:50 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id C770443695; Sun, 3 Mar 2024 18:39:32 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id 7589B42EDE for ; Sun, 3 Mar 2024 18:39:31 +0100 (CET) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id 423GWjXJ019709 for ; Sun, 3 Mar 2024 09:39:30 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h= from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-type; s=pfpt0220; bh=DhlvSxNGtpbR67YmZGPuK aElE7nde/xGHAFSMRpLynU=; b=ZlxFgN8rWYXtqpBj+586LS4rL/xmJqNegUqW0 4oSaW+i1dTY4S5M6fNyJqTPOIbzL3QPOtu8P0r3R7sy42ZszhbMwHUcCy6kvzomL 7flsbUEhBi+3XOsVNoC7Mkk1fS4RzATsQhlcuoWcpRx+2iIZG2f5c2MFV1TT+GIj dFEwjN7pkk3MOhgFto+SDFPGG8R9KesY4T4EJ6bE8Dup1Typ+KZqOpjW7GGED5ys 0Y02fhnISy4XcTvGL6JPdocacuP9vr7DKeF+QpWHjBQW07PawU/2xfW06K3K+m79 NiJPtMCZr6njDSy81N8HtmCfB2/f3v+I6GFaagfP3oj2UwnXg== Received: from dc6wp-exch02.marvell.com ([4.21.29.225]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3wm2bptruw-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT) for ; Sun, 03 Mar 2024 09:39:30 -0800 (PST) Received: from DC6WP-EXCH02.marvell.com (10.76.176.209) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1258.12; Sun, 3 Mar 2024 09:39:29 -0800 Received: from maili.marvell.com (10.69.176.80) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server id 15.2.1258.12 via Frontend Transport; Sun, 3 Mar 2024 09:39:29 -0800 Received: from localhost.localdomain (unknown [10.29.52.211]) by maili.marvell.com (Postfix) with ESMTP id CE6013F71A8; Sun, 3 Mar 2024 09:39:26 -0800 (PST) From: Harman Kalra To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao , Harman Kalra CC: Subject: [PATCH v6 17/23] net/cnxk: eswitch VF as ethernet device Date: Sun, 3 Mar 2024 23:08:27 +0530 Message-ID: <20240303173833.100039-18-hkalra@marvell.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20240303173833.100039-1-hkalra@marvell.com> References: <20230811163419.165790-1-hkalra@marvell.com> <20240303173833.100039-1-hkalra@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: g_gxm3pRJaLIMp1O2I_TBl-XQLub_N0U X-Proofpoint-ORIG-GUID: g_gxm3pRJaLIMp1O2I_TBl-XQLub_N0U X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.1011,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2024-03-03_08,2024-03-01_03,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Adding support for eswitch VF to probe as normal cnxk ethernet device Signed-off-by: Harman Kalra --- drivers/net/cnxk/cn10k_ethdev.c | 3 +++ drivers/net/cnxk/cnxk_ethdev.c | 41 +++++++++++++++++++++--------- drivers/net/cnxk/cnxk_ethdev.h | 3 +++ drivers/net/cnxk/cnxk_ethdev_ops.c | 4 +++ drivers/net/cnxk/cnxk_link.c | 3 ++- 5 files changed, 41 insertions(+), 13 deletions(-) diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c index 05d6d3b53f..55ed54bb0f 100644 --- a/drivers/net/cnxk/cn10k_ethdev.c +++ b/drivers/net/cnxk/cn10k_ethdev.c @@ -973,6 +973,9 @@ static const struct rte_pci_id cn10k_pci_nix_map[] = { CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KB, PCI_DEVID_CNXK_RVU_PF), CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KB, PCI_DEVID_CNXK_RVU_PF), CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_VF), + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_ESWITCH_VF), + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KB, PCI_DEVID_CNXK_RVU_ESWITCH_VF), + CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_ESWITCH_VF), CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_VF), CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_VF), CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KB, PCI_DEVID_CNXK_RVU_VF), diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c index 7640910782..6b37bd877f 100644 --- a/drivers/net/cnxk/cnxk_ethdev.c +++ b/drivers/net/cnxk/cnxk_ethdev.c @@ -390,7 +390,7 @@ nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev) struct cnxk_fc_cfg *fc = &dev->fc_cfg; struct rte_eth_fc_conf fc_cfg = {0}; - if (roc_nix_is_sdp(&dev->nix)) + if (roc_nix_is_sdp(&dev->nix) || roc_nix_is_esw(&dev->nix)) return 0; /* Don't do anything if PFC is enabled */ @@ -1449,12 +1449,14 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) goto cq_fini; /* Init flow control configuration */ - fc_cfg.type = ROC_NIX_FC_RXCHAN_CFG; - fc_cfg.rxchan_cfg.enable = true; - rc = roc_nix_fc_config_set(nix, &fc_cfg); - if (rc) { - plt_err("Failed to initialize flow control rc=%d", rc); - goto cq_fini; + if (!roc_nix_is_esw(nix)) { + fc_cfg.type = ROC_NIX_FC_RXCHAN_CFG; + fc_cfg.rxchan_cfg.enable = true; + rc = roc_nix_fc_config_set(nix, &fc_cfg); + if (rc) { + plt_err("Failed to initialize flow control rc=%d", rc); + goto cq_fini; + } } /* Update flow control configuration to PMD */ @@ -1977,11 +1979,21 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev) TAILQ_INIT(&dev->mcs_list); } - plt_nix_dbg("Port=%d pf=%d vf=%d ver=%s hwcap=0x%" PRIx64 - " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64, - eth_dev->data->port_id, roc_nix_get_pf(nix), - roc_nix_get_vf(nix), CNXK_ETH_DEV_PMD_VERSION, dev->hwcap, - dev->rx_offload_capa, dev->tx_offload_capa); + /* Reserve a switch domain for eswitch device */ + if (pci_dev->id.device_id == PCI_DEVID_CNXK_RVU_ESWITCH_VF) { + eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; + rc = rte_eth_switch_domain_alloc(&dev->switch_domain_id); + if (rc) { + plt_err("Failed to alloc switch domain: %d", rc); + goto free_mac_addrs; + } + } + + plt_nix_dbg("Port=%d pf=%d vf=%d ver=%s hwcap=0x%" PRIx64 " rxoffload_capa=0x%" PRIx64 + " txoffload_capa=0x%" PRIx64, + eth_dev->data->port_id, roc_nix_get_pf(nix), roc_nix_get_vf(nix), + CNXK_ETH_DEV_PMD_VERSION, dev->hwcap, dev->rx_offload_capa, + dev->tx_offload_capa); return 0; free_mac_addrs: @@ -2047,6 +2059,11 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset) } } + /* Free switch domain ID reserved for eswitch device */ + if ((eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) && + rte_eth_switch_domain_free(dev->switch_domain_id)) + plt_err("Failed to free switch domain"); + /* Disable and free rte_meter entries */ nix_meter_fini(dev); diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h index 80a9dc83a1..5d42e1306a 100644 --- a/drivers/net/cnxk/cnxk_ethdev.h +++ b/drivers/net/cnxk/cnxk_ethdev.h @@ -427,6 +427,9 @@ struct cnxk_eth_dev { /* Inject packets */ struct cnxk_ethdev_inj_cfg inj_cfg; + + /* Eswitch domain ID */ + uint16_t switch_domain_id; }; struct cnxk_eth_rxq_sp { diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c index 56049c5dd2..c8260fcb9c 100644 --- a/drivers/net/cnxk/cnxk_ethdev_ops.c +++ b/drivers/net/cnxk/cnxk_ethdev_ops.c @@ -70,6 +70,10 @@ cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo) RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; devinfo->max_rx_mempools = CNXK_NIX_NUM_POOLS_MAX; + if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) { + devinfo->switch_info.name = eth_dev->device->name; + devinfo->switch_info.domain_id = dev->switch_domain_id; + } return 0; } diff --git a/drivers/net/cnxk/cnxk_link.c b/drivers/net/cnxk/cnxk_link.c index 127c9e72e7..903b44de2c 100644 --- a/drivers/net/cnxk/cnxk_link.c +++ b/drivers/net/cnxk/cnxk_link.c @@ -13,7 +13,8 @@ cnxk_nix_toggle_flag_link_cfg(struct cnxk_eth_dev *dev, bool set) dev->flags &= ~CNXK_LINK_CFG_IN_PROGRESS_F; /* Update link info for LBK */ - if (!set && (roc_nix_is_lbk(&dev->nix) || roc_nix_is_sdp(&dev->nix))) { + if (!set && + (roc_nix_is_lbk(&dev->nix) || roc_nix_is_sdp(&dev->nix) || roc_nix_is_esw(&dev->nix))) { struct rte_eth_link link; link.link_status = RTE_ETH_LINK_UP; From patchwork Sun Mar 3 17:38:28 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Harman Kalra X-Patchwork-Id: 137832 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8CBED43C3E; Sun, 3 Mar 2024 18:40:57 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 855344367B; Sun, 3 Mar 2024 18:39:36 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id C6A60436A0 for ; Sun, 3 Mar 2024 18:39:34 +0100 (CET) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id 423GWjXM019709 for ; Sun, 3 Mar 2024 09:39:34 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h= from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-type; s=pfpt0220; bh=YmgeU4BMCyBUpNGDVaqqN nOGyQiFAqMpGAPdnVU6K+k=; b=bGfhg5MdsK0F5LtUGxTcnfJ5d299sUYvnys/c mkiUV87kGAM8cJueVUX438lUeo8+sMCsvzFUO46jWWQw7E7qtLDwc1WSDUX3aVNT tpTfacEGbYvwP8me77Vfu20GugjnYN1paFpZXLJ75FCHyOajbydrn3Mg0/FDucxh keRdn+RTv6DnJb3ykd3AkoYYHAibo6iAjHK+PLdz+NKpU5fAcCcLx/X23NrqGc7P 9sNgnAwllC051h07mLOdD3isp5Vej/vqxwutFIeeA7QMEKshWvchRqMLHKYn6swN wYpfU6falkCBro4zoI8gW5nRnAMjiGVLn4vM3d9UavAl+Xycg== Received: from dc6wp-exch02.marvell.com ([4.21.29.225]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3wm2bptrv4-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT) for ; Sun, 03 Mar 2024 09:39:33 -0800 (PST) Received: from DC6WP-EXCH02.marvell.com (10.76.176.209) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1258.12; Sun, 3 Mar 2024 09:39:32 -0800 Received: from maili.marvell.com (10.69.176.80) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server id 15.2.1258.12 via Frontend Transport; Sun, 3 Mar 2024 09:39:32 -0800 Received: from localhost.localdomain (unknown [10.29.52.211]) by maili.marvell.com (Postfix) with ESMTP id BB1973F71A8; Sun, 3 Mar 2024 09:39:29 -0800 (PST) From: Harman Kalra To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao , Harman Kalra CC: , Satheesh Paul Subject: [PATCH v6 18/23] common/cnxk: support port representor and represented port Date: Sun, 3 Mar 2024 23:08:28 +0530 Message-ID: <20240303173833.100039-19-hkalra@marvell.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20240303173833.100039-1-hkalra@marvell.com> References: <20230811163419.165790-1-hkalra@marvell.com> <20240303173833.100039-1-hkalra@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: wO9VsmqYEFnfcNtaV_2S9tWa4rJum4ka X-Proofpoint-ORIG-GUID: wO9VsmqYEFnfcNtaV_2S9tWa4rJum4ka X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.1011,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2024-03-03_08,2024-03-01_03,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Kiran Kumar K Implementing the common infrastructural changes for supporting port representors and represented ports used as action and pattern in net layer. Signed-off-by: Kiran Kumar K Signed-off-by: Satheesh Paul Signed-off-by: Harman Kalra --- drivers/common/cnxk/roc_npc.c | 84 +++++++++++++++++++++++------ drivers/common/cnxk/roc_npc.h | 19 ++++++- drivers/common/cnxk/roc_npc_mcam.c | 65 +++++++++++----------- drivers/common/cnxk/roc_npc_parse.c | 28 +++++++++- drivers/common/cnxk/roc_npc_priv.h | 2 + drivers/net/cnxk/cnxk_flow.c | 2 +- 6 files changed, 150 insertions(+), 50 deletions(-) diff --git a/drivers/common/cnxk/roc_npc.c b/drivers/common/cnxk/roc_npc.c index 67a660a2bc..d6ad3756bb 100644 --- a/drivers/common/cnxk/roc_npc.c +++ b/drivers/common/cnxk/roc_npc.c @@ -570,6 +570,8 @@ npc_parse_actions(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, flow->ctr_id = NPC_COUNTER_NONE; flow->mtr_id = ROC_NIX_MTR_ID_INVALID; pf_func = npc->pf_func; + if (flow->has_rep) + pf_func = flow->rep_pf_func; for (; actions->type != ROC_NPC_ACTION_TYPE_END; actions++) { switch (actions->type) { @@ -788,9 +790,12 @@ npc_parse_actions(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, if (req_act & ROC_NPC_ACTION_TYPE_DROP) { flow->npc_action = NIX_TX_ACTIONOP_DROP; - } else if ((req_act & ROC_NPC_ACTION_TYPE_COUNT) || - vlan_insert_action) { + } else if ((req_act & ROC_NPC_ACTION_TYPE_COUNT) || vlan_insert_action) { flow->npc_action = NIX_TX_ACTIONOP_UCAST_DEFAULT; + if (flow->rep_act_rep) { + flow->npc_action = NIX_TX_ACTIONOP_UCAST_CHAN; + flow->npc_action |= (uint64_t)0x3f << 12; + } } else { plt_err("Unsupported action for egress"); errcode = NPC_ERR_ACTION_NOTSUP; @@ -802,7 +807,9 @@ npc_parse_actions(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, flow->mcast_channels[1] = npc->channel; } - goto set_pf_func; + /* PF func who is sending the packet */ + flow->tx_pf_func = pf_func; + goto done; } else { if (vlan_insert_action) { errcode = NPC_ERR_ACTION_NOTSUP; @@ -881,10 +888,10 @@ npc_parse_actions(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, if (mark) flow->npc_action |= (uint64_t)mark << 40; -set_pf_func: /* Ideally AF must ensure that correct pf_func is set */ flow->npc_action |= (uint64_t)pf_func << 4; +done: return 0; err_exit: @@ -898,10 +905,14 @@ npc_parse_pattern(struct npc *npc, const struct roc_npc_item_info pattern[], struct roc_npc_flow *flow, struct npc_parse_state *pst) { npc_parse_stage_func_t parse_stage_funcs[] = { - npc_parse_meta_items, npc_parse_mark_item, npc_parse_pre_l2, npc_parse_cpt_hdr, - npc_parse_higig2_hdr, npc_parse_tx_queue, npc_parse_la, npc_parse_lb, - npc_parse_lc, npc_parse_ld, npc_parse_le, npc_parse_lf, - npc_parse_lg, npc_parse_lh, + npc_parse_meta_items, npc_parse_port_representor_id, + npc_parse_mark_item, npc_parse_pre_l2, + npc_parse_cpt_hdr, npc_parse_higig2_hdr, + npc_parse_tx_queue, npc_parse_la, + npc_parse_lb, npc_parse_lc, + npc_parse_ld, npc_parse_le, + npc_parse_lf, npc_parse_lg, + npc_parse_lh, }; uint8_t layer = 0; int key_offset; @@ -1140,15 +1151,20 @@ npc_rss_action_program(struct roc_npc *roc_npc, struct roc_npc_flow *flow) { const struct roc_npc_action_rss *rss; + struct roc_npc *npc = roc_npc; uint32_t rss_grp; uint8_t alg_idx; int rc; + if (flow->has_rep) { + npc = roc_npc->rep_npc; + npc->flowkey_cfg_state = roc_npc->flowkey_cfg_state; + } + for (; actions->type != ROC_NPC_ACTION_TYPE_END; actions++) { if (actions->type == ROC_NPC_ACTION_TYPE_RSS) { rss = (const struct roc_npc_action_rss *)actions->conf; - rc = npc_rss_action_configure(roc_npc, rss, &alg_idx, - &rss_grp, flow->mcam_id); + rc = npc_rss_action_configure(npc, rss, &alg_idx, &rss_grp, flow->mcam_id); if (rc) return rc; @@ -1171,7 +1187,7 @@ npc_vtag_cfg_delete(struct roc_npc *roc_npc, struct roc_npc_flow *flow) struct roc_nix *roc_nix = roc_npc->roc_nix; struct nix_vtag_config *vtag_cfg; struct nix_vtag_config_rsp *rsp; - struct mbox *mbox; + struct mbox *mbox, *ombox; struct nix *nix; int rc = 0; @@ -1181,7 +1197,10 @@ npc_vtag_cfg_delete(struct roc_npc *roc_npc, struct roc_npc_flow *flow) } tx_vtag_action; nix = roc_nix_to_nix_priv(roc_nix); - mbox = mbox_get((&nix->dev)->mbox); + ombox = (&nix->dev)->mbox; + if (flow->has_rep) + ombox = flow->rep_mbox; + mbox = mbox_get(ombox); tx_vtag_action.reg = flow->vtag_action; vtag_cfg = mbox_alloc_msg_nix_vtag_cfg(mbox); @@ -1400,6 +1419,7 @@ npc_vtag_strip_action_configure(struct mbox *mbox, rx_vtag_action |= (NIX_RX_VTAGACTION_VTAG_VALID << 15); rx_vtag_action |= ((uint64_t)NPC_LID_LB << 8); + rx_vtag_action |= ((uint64_t)NIX_RX_VTAG_TYPE6 << 12); rx_vtag_action |= NIX_RX_VTAGACTION_VTAG0_RELPTR; if (*strip_cnt == 2) { @@ -1432,6 +1452,8 @@ npc_vtag_action_program(struct roc_npc *roc_npc, nix = roc_nix_to_nix_priv(roc_nix); mbox = (&nix->dev)->mbox; + if (flow->has_rep) + mbox = flow->rep_mbox; memset(vlan_info, 0, sizeof(vlan_info)); @@ -1448,6 +1470,7 @@ npc_vtag_action_program(struct roc_npc *roc_npc, if (rc) return rc; + plt_npc_dbg("VLAN strip action, strip_cnt %d", strip_cnt); if (strip_cnt == 2) actions++; @@ -1587,6 +1610,25 @@ roc_npc_flow_create(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, memset(flow, 0, sizeof(*flow)); memset(&parse_state, 0, sizeof(parse_state)); + flow->port_id = -1; + if (roc_npc->rep_npc) { + flow->rep_channel = + (roc_npc->rep_rx_channel == 0) ? + roc_nix_to_nix_priv(roc_npc->rep_npc->roc_nix)->rx_chan_base : + roc_npc->rep_rx_channel; + flow->rep_pf_func = roc_npc->rep_pf_func; + flow->rep_act_pf_func = roc_npc->rep_act_pf_func; + flow->rep_act_rep = roc_npc->rep_act_rep; + flow->rep_mbox = roc_npc_to_npc_priv(roc_npc->rep_npc)->mbox; + flow->has_rep = true; + flow->is_rep_vf = !roc_nix_is_pf(roc_npc->rep_npc->roc_nix); + flow->port_id = roc_npc->rep_port_id; + flow->rep_npc = roc_npc_to_npc_priv(roc_npc->rep_npc); + roc_npc->rep_act_rep = false; + roc_npc->rep_act_pf_func = 0; + roc_npc->rep_rx_channel = 0; + } + parse_state.dst_pf_func = dst_pf_func; rc = npc_parse_rule(roc_npc, attr, pattern, actions, flow, &parse_state); @@ -1629,6 +1671,7 @@ roc_npc_flow_create(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, *errcode = rc; goto set_rss_failed; } + roc_npc->rep_npc = NULL; if (flow->has_age_action) npc_age_flow_list_entry_add(roc_npc, flow); @@ -1641,6 +1684,7 @@ roc_npc_flow_create(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, TAILQ_FOREACH(flow_iter, list, next) { if (flow_iter->mcam_id > flow->mcam_id) { TAILQ_INSERT_BEFORE(flow_iter, flow, next); + roc_npc->rep_npc = NULL; return flow; } } @@ -1649,6 +1693,7 @@ roc_npc_flow_create(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, return flow; set_rss_failed: + roc_npc->rep_npc = NULL; if (flow->use_pre_alloc == 0) { rc = roc_npc_mcam_free_entry(roc_npc, flow->mcam_id); if (rc != 0) { @@ -1660,6 +1705,7 @@ roc_npc_flow_create(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, npc_inline_dev_ipsec_action_free(npc, flow); } err_exit: + roc_npc->rep_npc = NULL; plt_free(flow); return NULL; } @@ -1667,15 +1713,19 @@ roc_npc_flow_create(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, int npc_rss_group_free(struct npc *npc, struct roc_npc_flow *flow) { + struct npc *lnpc = npc; uint32_t rss_grp; + if (flow->has_rep) + lnpc = flow->rep_npc; + if ((flow->npc_action & 0xF) == NIX_RX_ACTIONOP_RSS) { rss_grp = (flow->npc_action >> NPC_RSS_ACT_GRP_OFFSET) & NPC_RSS_ACT_GRP_MASK; if (rss_grp == 0 || rss_grp >= npc->rss_grps) return -EINVAL; - plt_bitmap_clear(npc->rss_grp_entries, rss_grp); + plt_bitmap_clear(lnpc->rss_grp_entries, rss_grp); } return 0; @@ -1770,7 +1820,7 @@ roc_npc_flow_destroy(struct roc_npc *roc_npc, struct roc_npc_flow *flow) } void -roc_npc_flow_dump(FILE *file, struct roc_npc *roc_npc) +roc_npc_flow_dump(FILE *file, struct roc_npc *roc_npc, int rep_port_id) { struct npc *npc = roc_npc_to_npc_priv(roc_npc); struct roc_npc_flow *flow_iter; @@ -1784,12 +1834,14 @@ roc_npc_flow_dump(FILE *file, struct roc_npc *roc_npc) /* List in ascending order of mcam entries */ TAILQ_FOREACH(flow_iter, list, next) { - roc_npc_flow_mcam_dump(file, roc_npc, flow_iter); + if (rep_port_id == -1 || rep_port_id == flow_iter->port_id) + roc_npc_flow_mcam_dump(file, roc_npc, flow_iter); } } TAILQ_FOREACH(flow_iter, &npc->ipsec_list, next) { - roc_npc_flow_mcam_dump(file, roc_npc, flow_iter); + if (rep_port_id == -1 || rep_port_id == flow_iter->port_id) + roc_npc_flow_mcam_dump(file, roc_npc, flow_iter); } } diff --git a/drivers/common/cnxk/roc_npc.h b/drivers/common/cnxk/roc_npc.h index 349c7f9d22..72aada84a1 100644 --- a/drivers/common/cnxk/roc_npc.h +++ b/drivers/common/cnxk/roc_npc.h @@ -42,6 +42,7 @@ enum roc_npc_item_type { ROC_NPC_ITEM_TYPE_MARK, ROC_NPC_ITEM_TYPE_TX_QUEUE, ROC_NPC_ITEM_TYPE_IPV6_ROUTING_EXT, + ROC_NPC_ITEM_TYPE_REPRESENTED_PORT, ROC_NPC_ITEM_TYPE_END, }; @@ -324,6 +325,7 @@ struct roc_npc_flow { struct roc_npc_flow_dump_data dump_data[ROC_NPC_MAX_FLOW_PATTERNS]; uint16_t num_patterns; struct roc_npc_spi_to_sa_action_info spi_to_sa_info; + uint16_t tx_pf_func; bool is_validate; uint16_t match_id; uint8_t is_inline_dev; @@ -332,6 +334,15 @@ struct roc_npc_flow { void *age_context; uint32_t timeout; bool has_age_action; + uint16_t rep_pf_func; + uint16_t rep_act_pf_func; + bool rep_act_rep; + uint16_t rep_channel; + struct mbox *rep_mbox; + bool has_rep; + bool is_rep_vf; + struct npc *rep_npc; + int port_id; bool is_sampling_rule; uint32_t recv_queue; uint32_t mcast_grp_index; @@ -407,6 +418,12 @@ struct roc_npc { uint16_t sdp_channel; uint16_t sdp_channel_mask; struct roc_npc_flow_age flow_age; + struct roc_npc *rep_npc; + uint16_t rep_pf_func; + uint16_t rep_rx_channel; + uint16_t rep_act_pf_func; + bool rep_act_rep; + int rep_port_id; #define ROC_NPC_MEM_SZ (6 * 1024) uint8_t reserved[ROC_NPC_MEM_SZ]; @@ -448,7 +465,7 @@ int __roc_api roc_npc_get_free_mcam_entry(struct roc_npc *roc_npc, struct roc_np int __roc_api roc_npc_inl_mcam_read_counter(uint32_t ctr_id, uint64_t *count); int __roc_api roc_npc_inl_mcam_clear_counter(uint32_t ctr_id); int __roc_api roc_npc_mcam_free_all_resources(struct roc_npc *roc_npc); -void __roc_api roc_npc_flow_dump(FILE *file, struct roc_npc *roc_npc); +void __roc_api roc_npc_flow_dump(FILE *file, struct roc_npc *roc_npc, int rep_port_id); void __roc_api roc_npc_flow_mcam_dump(FILE *file, struct roc_npc *roc_npc, struct roc_npc_flow *mcam); int __roc_api roc_npc_mark_actions_get(struct roc_npc *roc_npc); diff --git a/drivers/common/cnxk/roc_npc_mcam.c b/drivers/common/cnxk/roc_npc_mcam.c index 2de988a44b..cdb9db1383 100644 --- a/drivers/common/cnxk/roc_npc_mcam.c +++ b/drivers/common/cnxk/roc_npc_mcam.c @@ -143,8 +143,8 @@ npc_lid_lt_in_kex(struct npc *npc, uint8_t lid, uint8_t lt) } static void -npc_construct_ldata_mask(struct npc *npc, struct plt_bitmap *bmap, uint8_t lid, - uint8_t lt, uint8_t ld) +npc_construct_ldata_mask(struct npc *npc, struct plt_bitmap *bmap, uint8_t lid, uint8_t lt, + uint8_t ld) { struct npc_xtract_info *x_info, *infoflag; int hdr_off, keylen; @@ -197,8 +197,7 @@ npc_construct_ldata_mask(struct npc *npc, struct plt_bitmap *bmap, uint8_t lid, * @param len length of the match */ static bool -npc_is_kex_enabled(struct npc *npc, uint8_t lid, uint8_t lt, int offset, - int len) +npc_is_kex_enabled(struct npc *npc, uint8_t lid, uint8_t lt, int offset, int len) { struct plt_bitmap *bmap; uint32_t bmap_sz; @@ -349,8 +348,8 @@ npc_mcam_alloc_entries(struct mbox *mbox, int ref_mcam, int *alloc_entry, int re } int -npc_mcam_alloc_entry(struct npc *npc, struct roc_npc_flow *mcam, - struct roc_npc_flow *ref_mcam, int prio, int *resp_count) +npc_mcam_alloc_entry(struct npc *npc, struct roc_npc_flow *mcam, struct roc_npc_flow *ref_mcam, + int prio, int *resp_count) { struct npc_mcam_alloc_entry_req *req; struct npc_mcam_alloc_entry_rsp *rsp; @@ -450,22 +449,17 @@ npc_mcam_write_entry(struct mbox *mbox, struct roc_npc_flow *mcam) static void npc_mcam_process_mkex_cfg(struct npc *npc, struct npc_get_kex_cfg_rsp *kex_rsp) { - volatile uint64_t( - *q)[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD]; + volatile uint64_t(*q)[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD]; struct npc_xtract_info *x_info = NULL; int lid, lt, ld, fl, ix; npc_dxcfg_t *p; uint64_t keyw; uint64_t val; - npc->keyx_supp_nmask[NPC_MCAM_RX] = - kex_rsp->rx_keyx_cfg & 0x7fffffffULL; - npc->keyx_supp_nmask[NPC_MCAM_TX] = - kex_rsp->tx_keyx_cfg & 0x7fffffffULL; - npc->keyx_len[NPC_MCAM_RX] = - npc_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_RX]); - npc->keyx_len[NPC_MCAM_TX] = - npc_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_TX]); + npc->keyx_supp_nmask[NPC_MCAM_RX] = kex_rsp->rx_keyx_cfg & 0x7fffffffULL; + npc->keyx_supp_nmask[NPC_MCAM_TX] = kex_rsp->tx_keyx_cfg & 0x7fffffffULL; + npc->keyx_len[NPC_MCAM_RX] = npc_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_RX]); + npc->keyx_len[NPC_MCAM_TX] = npc_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_TX]); keyw = (kex_rsp->rx_keyx_cfg >> 32) & 0x7ULL; npc->keyw[NPC_MCAM_RX] = keyw; @@ -485,8 +479,7 @@ npc_mcam_process_mkex_cfg(struct npc *npc, struct npc_get_kex_cfg_rsp *kex_rsp) /* Update LID, LT and LDATA cfg */ p = &npc->prx_dxcfg; - q = (volatile uint64_t(*)[][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD])( - &kex_rsp->intf_lid_lt_ld); + q = (volatile uint64_t(*)[][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD])(&kex_rsp->intf_lid_lt_ld); for (ix = 0; ix < NPC_MAX_INTF; ix++) { for (lid = 0; lid < NPC_MAX_LID; lid++) { for (lt = 0; lt < NPC_MAX_LT; lt++) { @@ -539,8 +532,7 @@ npc_mcam_fetch_kex_cfg(struct npc *npc) goto done; } - mbox_memcpy((char *)npc->profile_name, kex_rsp->mkex_pfl_name, - MKEX_NAME_LEN); + mbox_memcpy((char *)npc->profile_name, kex_rsp->mkex_pfl_name, MKEX_NAME_LEN); npc->exact_match_ena = (kex_rsp->rx_keyx_cfg >> 40) & 0xF; npc_mcam_process_mkex_cfg(npc, kex_rsp); @@ -551,9 +543,8 @@ npc_mcam_fetch_kex_cfg(struct npc *npc) } static void -npc_mcam_set_channel(struct roc_npc_flow *flow, - struct npc_mcam_write_entry_req *req, uint16_t channel, - uint16_t chan_mask, bool is_second_pass) +npc_mcam_set_channel(struct roc_npc_flow *flow, struct npc_mcam_write_entry_req *req, + uint16_t channel, uint16_t chan_mask, bool is_second_pass) { uint16_t chan = 0, mask = 0; @@ -681,7 +672,10 @@ npc_mcam_alloc_and_write(struct npc *npc, struct roc_npc_flow *flow, struct npc_ } if (flow->nix_intf == NIX_INTF_TX) { - uint16_t pf_func = (flow->npc_action >> 4) & 0xffff; + uint16_t pf_func = flow->tx_pf_func; + + if (flow->has_rep) + pf_func = flow->rep_pf_func; pf_func = plt_cpu_to_be_16(pf_func); @@ -759,6 +753,15 @@ npc_mcam_alloc_and_write(struct npc *npc, struct roc_npc_flow *flow, struct npc_ npc_mcam_set_channel(flow, req, inl_dev->channel, inl_dev->chan_mask, false); + } else if (flow->has_rep) { + pf_func = (flow->rep_act_pf_func == 0) ? flow->rep_pf_func : + flow->rep_act_pf_func; + req->entry_data.action &= ~(GENMASK(19, 4)); + req->entry_data.action |= (uint64_t)pf_func << 4; + flow->npc_action &= ~(GENMASK(19, 4)); + flow->npc_action |= (uint64_t)pf_func << 4; + npc_mcam_set_channel(flow, req, flow->rep_channel, (BIT_ULL(12) - 1), + false); } else if (npc->is_sdp_link) { npc_mcam_set_channel(flow, req, npc->sdp_channel, npc->sdp_channel_mask, pst->is_second_pass_rule); @@ -932,13 +935,11 @@ npc_program_mcam(struct npc *npc, struct npc_parse_state *pst, bool mcam_alloc) data_off = 0; index++; } - key_data[index] |= - ((uint64_t)data << data_off); + key_data[index] |= ((uint64_t)data << data_off); if (lt == 0) mask = 0; - key_mask[index] |= - ((uint64_t)mask << data_off); + key_mask[index] |= ((uint64_t)mask << data_off); data_off += 4; } } @@ -963,8 +964,12 @@ npc_program_mcam(struct npc *npc, struct npc_parse_state *pst, bool mcam_alloc) (pst->flow->npc_action & NIX_RX_ACTIONOP_UCAST_IPSEC)) skip_base_rule = true; - if (pst->is_vf && pst->flow->nix_intf == NIX_INTF_RX && !skip_base_rule) { - mbox = mbox_get(npc->mbox); + if ((pst->is_vf || pst->flow->is_rep_vf) && pst->flow->nix_intf == NIX_INTF_RX && + !skip_base_rule) { + if (pst->flow->has_rep) + mbox = mbox_get(pst->flow->rep_mbox); + else + mbox = mbox_get(npc->mbox); (void)mbox_alloc_msg_npc_read_base_steer_rule(mbox); rc = mbox_process_msg(mbox, (void *)&base_rule_rsp); if (rc) { diff --git a/drivers/common/cnxk/roc_npc_parse.c b/drivers/common/cnxk/roc_npc_parse.c index 571d6b8e5d..0aaf86c768 100644 --- a/drivers/common/cnxk/roc_npc_parse.c +++ b/drivers/common/cnxk/roc_npc_parse.c @@ -35,11 +35,35 @@ npc_parse_mark_item(struct npc_parse_state *pst) return 0; } +int +npc_parse_port_representor_id(struct npc_parse_state *pst) +{ + if (pst->pattern->type != ROC_NPC_ITEM_TYPE_REPRESENTED_PORT) + return 0; + + pst->pattern++; + + return 0; +} + +int +npc_parse_represented_port_id(struct npc_parse_state *pst) +{ + if (pst->pattern->type != ROC_NPC_ITEM_TYPE_REPRESENTED_PORT) + return 0; + + if (pst->flow->nix_intf != NIX_INTF_RX) + return -EINVAL; + + pst->pattern++; + + return 0; +} + static int npc_flow_raw_item_prepare(const struct roc_npc_flow_item_raw *raw_spec, const struct roc_npc_flow_item_raw *raw_mask, - struct npc_parse_item_info *info, uint8_t *spec_buf, - uint8_t *mask_buf) + struct npc_parse_item_info *info, uint8_t *spec_buf, uint8_t *mask_buf) { memset(spec_buf, 0, NPC_MAX_RAW_ITEM_LEN); diff --git a/drivers/common/cnxk/roc_npc_priv.h b/drivers/common/cnxk/roc_npc_priv.h index 50b62b1244..069c625911 100644 --- a/drivers/common/cnxk/roc_npc_priv.h +++ b/drivers/common/cnxk/roc_npc_priv.h @@ -457,6 +457,8 @@ int npc_mask_is_supported(const char *mask, const char *hw_mask, int len); int npc_parse_item_basic(const struct roc_npc_item_info *item, struct npc_parse_item_info *info); int npc_parse_meta_items(struct npc_parse_state *pst); int npc_parse_mark_item(struct npc_parse_state *pst); +int npc_parse_port_representor_id(struct npc_parse_state *pst); +int npc_parse_represented_port_id(struct npc_parse_state *pst); int npc_parse_pre_l2(struct npc_parse_state *pst); int npc_parse_higig2_hdr(struct npc_parse_state *pst); int npc_parse_cpt_hdr(struct npc_parse_state *pst); diff --git a/drivers/net/cnxk/cnxk_flow.c b/drivers/net/cnxk/cnxk_flow.c index 4deccd1a67..b7e6ebc2e1 100644 --- a/drivers/net/cnxk/cnxk_flow.c +++ b/drivers/net/cnxk/cnxk_flow.c @@ -594,7 +594,7 @@ cnxk_flow_dev_dump(struct rte_eth_dev *eth_dev, struct rte_flow *flow, return -EINVAL; } - roc_npc_flow_dump(file, npc); + roc_npc_flow_dump(file, npc, -1); return 0; } From patchwork Sun Mar 3 17:38:29 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Harman Kalra X-Patchwork-Id: 137833 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 9E72543C3E; Sun, 3 Mar 2024 18:41:04 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id DFAFC4366A; Sun, 3 Mar 2024 18:39:38 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id E2104436A2 for ; Sun, 3 Mar 2024 18:39:37 +0100 (CET) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id 423EeIWs031240 for ; Sun, 3 Mar 2024 09:39:37 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h= from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-type; s=pfpt0220; bh=Xtc/4XQW2D4yxCrslcoDz fNuH9m7jvFAMzPa4cNaSwY=; b=PVtcwcUanYmwHoEbPI3AgQMP+y2f/y+7DUzpv JzD5XZzlFh7Vr23DTkKvwt1w/uXbQRulV/q4C4v2365WHZv+UPBsDV0uAc4xqlyE 8HpPOsn4giHc6r102jxgEpSluz/1D5iuQ1Pgoe5aeJmZ4RywnM1+KBaZYtIep2Gm CBaSI92+Yx1U9VtjcUGO2H86iPq2inCd9gsBF+ufmMpI4vTakc/JvllB4htuP2ic lYNMsWyoHqD2vvefcsagKAPsnOhS78dyx9D7pSnvlN7gfo87eraA0ci4lmnscOOc q0Xc80G+K6dCGL0wrom734svtYwMg3QwpLbt18NJI+1s33y5g== Received: from dc6wp-exch02.marvell.com ([4.21.29.225]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3wm2bptrv8-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT) for ; Sun, 03 Mar 2024 09:39:36 -0800 (PST) Received: from DC6WP-EXCH02.marvell.com (10.76.176.209) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1258.12; Sun, 3 Mar 2024 09:39:35 -0800 Received: from maili.marvell.com (10.69.176.80) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server id 15.2.1258.12 via Frontend Transport; Sun, 3 Mar 2024 09:39:35 -0800 Received: from localhost.localdomain (unknown [10.29.52.211]) by maili.marvell.com (Postfix) with ESMTP id EF91B3F71A8; Sun, 3 Mar 2024 09:39:32 -0800 (PST) From: Harman Kalra To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao , Harman Kalra CC: , Satheesh Paul Subject: [PATCH v6 19/23] net/cnxk: add represented port pattern and action Date: Sun, 3 Mar 2024 23:08:29 +0530 Message-ID: <20240303173833.100039-20-hkalra@marvell.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20240303173833.100039-1-hkalra@marvell.com> References: <20230811163419.165790-1-hkalra@marvell.com> <20240303173833.100039-1-hkalra@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: E25_Lvcqrgf3jwwH6bAp8Ltnfns6S5PB X-Proofpoint-ORIG-GUID: E25_Lvcqrgf3jwwH6bAp8Ltnfns6S5PB X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.1011,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2024-03-03_08,2024-03-01_03,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Kiran Kumar K Adding support for represented_port item matching and action. Signed-off-by: Kiran Kumar K Signed-off-by: Satheesh Paul Signed-off-by: Harman Kalra --- doc/guides/nics/cnxk.rst | 1 + doc/guides/nics/features/cnxk.ini | 1 + doc/guides/nics/features/cnxk_vec.ini | 2 + doc/guides/nics/features/cnxk_vf.ini | 2 + drivers/net/cnxk/cnxk_flow.c | 107 ++++++++++++++------------ 5 files changed, 63 insertions(+), 50 deletions(-) diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst index 93d6db5cb0..e156340cf0 100644 --- a/doc/guides/nics/cnxk.rst +++ b/doc/guides/nics/cnxk.rst @@ -38,6 +38,7 @@ Features of the CNXK Ethdev PMD are: - Ingress meter support - Queue based priority flow control support - Port representors +- Represented port pattern matching and action Prerequisites ------------- diff --git a/doc/guides/nics/features/cnxk.ini b/doc/guides/nics/features/cnxk.ini index 94e7a6ab8d..8957eba46b 100644 --- a/doc/guides/nics/features/cnxk.ini +++ b/doc/guides/nics/features/cnxk.ini @@ -73,6 +73,7 @@ mpls = Y nvgre = Y pppoes = Y raw = Y +represented_port = Y sctp = Y tcp = Y tx_queue = Y diff --git a/doc/guides/nics/features/cnxk_vec.ini b/doc/guides/nics/features/cnxk_vec.ini index 6086b3d73f..193fd14fbb 100644 --- a/doc/guides/nics/features/cnxk_vec.ini +++ b/doc/guides/nics/features/cnxk_vec.ini @@ -67,6 +67,7 @@ mpls = Y nvgre = Y pppoes = Y raw = Y +represented_port = Y sctp = Y tcp = Y tx_queue = Y @@ -86,6 +87,7 @@ of_set_vlan_pcp = Y of_set_vlan_vid = Y pf = Y queue = Y +represented_port = Y rss = Y security = Y vf = Y diff --git a/doc/guides/nics/features/cnxk_vf.ini b/doc/guides/nics/features/cnxk_vf.ini index 53aa2a3d0c..7ae49b8107 100644 --- a/doc/guides/nics/features/cnxk_vf.ini +++ b/doc/guides/nics/features/cnxk_vf.ini @@ -64,6 +64,7 @@ mpls = Y nvgre = Y pppoes = Y raw = Y +represented_port = Y sctp = Y tcp = Y tx_queue = Y @@ -85,6 +86,7 @@ of_set_vlan_pcp = Y of_set_vlan_vid = Y pf = Y queue = Y +represented_port = Y rss = Y security = Y skip_cman = Y diff --git a/drivers/net/cnxk/cnxk_flow.c b/drivers/net/cnxk/cnxk_flow.c index b7e6ebc2e1..7b684eb759 100644 --- a/drivers/net/cnxk/cnxk_flow.c +++ b/drivers/net/cnxk/cnxk_flow.c @@ -4,67 +4,48 @@ #include const struct cnxk_rte_flow_term_info term[] = { - [RTE_FLOW_ITEM_TYPE_ETH] = {ROC_NPC_ITEM_TYPE_ETH, - sizeof(struct rte_flow_item_eth)}, - [RTE_FLOW_ITEM_TYPE_VLAN] = {ROC_NPC_ITEM_TYPE_VLAN, - sizeof(struct rte_flow_item_vlan)}, - [RTE_FLOW_ITEM_TYPE_E_TAG] = {ROC_NPC_ITEM_TYPE_E_TAG, - sizeof(struct rte_flow_item_e_tag)}, - [RTE_FLOW_ITEM_TYPE_IPV4] = {ROC_NPC_ITEM_TYPE_IPV4, - sizeof(struct rte_flow_item_ipv4)}, - [RTE_FLOW_ITEM_TYPE_IPV6] = {ROC_NPC_ITEM_TYPE_IPV6, - sizeof(struct rte_flow_item_ipv6)}, - [RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT] = { - ROC_NPC_ITEM_TYPE_IPV6_FRAG_EXT, - sizeof(struct rte_flow_item_ipv6_frag_ext)}, - [RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4] = { - ROC_NPC_ITEM_TYPE_ARP_ETH_IPV4, - sizeof(struct rte_flow_item_arp_eth_ipv4)}, - [RTE_FLOW_ITEM_TYPE_MPLS] = {ROC_NPC_ITEM_TYPE_MPLS, - sizeof(struct rte_flow_item_mpls)}, - [RTE_FLOW_ITEM_TYPE_ICMP] = {ROC_NPC_ITEM_TYPE_ICMP, - sizeof(struct rte_flow_item_icmp)}, - [RTE_FLOW_ITEM_TYPE_UDP] = {ROC_NPC_ITEM_TYPE_UDP, - sizeof(struct rte_flow_item_udp)}, - [RTE_FLOW_ITEM_TYPE_TCP] = {ROC_NPC_ITEM_TYPE_TCP, - sizeof(struct rte_flow_item_tcp)}, - [RTE_FLOW_ITEM_TYPE_SCTP] = {ROC_NPC_ITEM_TYPE_SCTP, - sizeof(struct rte_flow_item_sctp)}, - [RTE_FLOW_ITEM_TYPE_ESP] = {ROC_NPC_ITEM_TYPE_ESP, - sizeof(struct rte_flow_item_esp)}, - [RTE_FLOW_ITEM_TYPE_GRE] = {ROC_NPC_ITEM_TYPE_GRE, - sizeof(struct rte_flow_item_gre)}, - [RTE_FLOW_ITEM_TYPE_NVGRE] = {ROC_NPC_ITEM_TYPE_NVGRE, - sizeof(struct rte_flow_item_nvgre)}, - [RTE_FLOW_ITEM_TYPE_VXLAN] = {ROC_NPC_ITEM_TYPE_VXLAN, - sizeof(struct rte_flow_item_vxlan)}, - [RTE_FLOW_ITEM_TYPE_GTPC] = {ROC_NPC_ITEM_TYPE_GTPC, - sizeof(struct rte_flow_item_gtp)}, - [RTE_FLOW_ITEM_TYPE_GTPU] = {ROC_NPC_ITEM_TYPE_GTPU, - sizeof(struct rte_flow_item_gtp)}, + [RTE_FLOW_ITEM_TYPE_ETH] = {ROC_NPC_ITEM_TYPE_ETH, sizeof(struct rte_flow_item_eth)}, + [RTE_FLOW_ITEM_TYPE_VLAN] = {ROC_NPC_ITEM_TYPE_VLAN, sizeof(struct rte_flow_item_vlan)}, + [RTE_FLOW_ITEM_TYPE_E_TAG] = {ROC_NPC_ITEM_TYPE_E_TAG, sizeof(struct rte_flow_item_e_tag)}, + [RTE_FLOW_ITEM_TYPE_IPV4] = {ROC_NPC_ITEM_TYPE_IPV4, sizeof(struct rte_flow_item_ipv4)}, + [RTE_FLOW_ITEM_TYPE_IPV6] = {ROC_NPC_ITEM_TYPE_IPV6, sizeof(struct rte_flow_item_ipv6)}, + [RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT] = {ROC_NPC_ITEM_TYPE_IPV6_FRAG_EXT, + sizeof(struct rte_flow_item_ipv6_frag_ext)}, + [RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4] = {ROC_NPC_ITEM_TYPE_ARP_ETH_IPV4, + sizeof(struct rte_flow_item_arp_eth_ipv4)}, + [RTE_FLOW_ITEM_TYPE_MPLS] = {ROC_NPC_ITEM_TYPE_MPLS, sizeof(struct rte_flow_item_mpls)}, + [RTE_FLOW_ITEM_TYPE_ICMP] = {ROC_NPC_ITEM_TYPE_ICMP, sizeof(struct rte_flow_item_icmp)}, + [RTE_FLOW_ITEM_TYPE_UDP] = {ROC_NPC_ITEM_TYPE_UDP, sizeof(struct rte_flow_item_udp)}, + [RTE_FLOW_ITEM_TYPE_TCP] = {ROC_NPC_ITEM_TYPE_TCP, sizeof(struct rte_flow_item_tcp)}, + [RTE_FLOW_ITEM_TYPE_SCTP] = {ROC_NPC_ITEM_TYPE_SCTP, sizeof(struct rte_flow_item_sctp)}, + [RTE_FLOW_ITEM_TYPE_ESP] = {ROC_NPC_ITEM_TYPE_ESP, sizeof(struct rte_flow_item_esp)}, + [RTE_FLOW_ITEM_TYPE_GRE] = {ROC_NPC_ITEM_TYPE_GRE, sizeof(struct rte_flow_item_gre)}, + [RTE_FLOW_ITEM_TYPE_NVGRE] = {ROC_NPC_ITEM_TYPE_NVGRE, sizeof(struct rte_flow_item_nvgre)}, + [RTE_FLOW_ITEM_TYPE_VXLAN] = {ROC_NPC_ITEM_TYPE_VXLAN, sizeof(struct rte_flow_item_vxlan)}, + [RTE_FLOW_ITEM_TYPE_GTPC] = {ROC_NPC_ITEM_TYPE_GTPC, sizeof(struct rte_flow_item_gtp)}, + [RTE_FLOW_ITEM_TYPE_GTPU] = {ROC_NPC_ITEM_TYPE_GTPU, sizeof(struct rte_flow_item_gtp)}, [RTE_FLOW_ITEM_TYPE_GENEVE] = {ROC_NPC_ITEM_TYPE_GENEVE, sizeof(struct rte_flow_item_geneve)}, - [RTE_FLOW_ITEM_TYPE_VXLAN_GPE] = { - ROC_NPC_ITEM_TYPE_VXLAN_GPE, - sizeof(struct rte_flow_item_vxlan_gpe)}, + [RTE_FLOW_ITEM_TYPE_VXLAN_GPE] = {ROC_NPC_ITEM_TYPE_VXLAN_GPE, + sizeof(struct rte_flow_item_vxlan_gpe)}, [RTE_FLOW_ITEM_TYPE_IPV6_EXT] = {ROC_NPC_ITEM_TYPE_IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)}, [RTE_FLOW_ITEM_TYPE_VOID] = {ROC_NPC_ITEM_TYPE_VOID, 0}, [RTE_FLOW_ITEM_TYPE_ANY] = {ROC_NPC_ITEM_TYPE_ANY, 0}, - [RTE_FLOW_ITEM_TYPE_GRE_KEY] = {ROC_NPC_ITEM_TYPE_GRE_KEY, - sizeof(uint32_t)}, + [RTE_FLOW_ITEM_TYPE_GRE_KEY] = {ROC_NPC_ITEM_TYPE_GRE_KEY, sizeof(uint32_t)}, [RTE_FLOW_ITEM_TYPE_HIGIG2] = {ROC_NPC_ITEM_TYPE_HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)}, - [RTE_FLOW_ITEM_TYPE_RAW] = {ROC_NPC_ITEM_TYPE_RAW, - sizeof(struct rte_flow_item_raw)}, - [RTE_FLOW_ITEM_TYPE_MARK] = {ROC_NPC_ITEM_TYPE_MARK, - sizeof(struct rte_flow_item_mark)}, + [RTE_FLOW_ITEM_TYPE_RAW] = {ROC_NPC_ITEM_TYPE_RAW, sizeof(struct rte_flow_item_raw)}, + [RTE_FLOW_ITEM_TYPE_MARK] = {ROC_NPC_ITEM_TYPE_MARK, sizeof(struct rte_flow_item_mark)}, [RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT] = {ROC_NPC_ITEM_TYPE_IPV6_ROUTING_EXT, - sizeof(struct rte_flow_item_ipv6_routing_ext)}, + sizeof(struct rte_flow_item_ipv6_routing_ext)}, [RTE_FLOW_ITEM_TYPE_TX_QUEUE] = {ROC_NPC_ITEM_TYPE_TX_QUEUE, - sizeof(struct rte_flow_item_tx_queue)}, + sizeof(struct rte_flow_item_tx_queue)}, + [RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT] = {ROC_NPC_ITEM_TYPE_REPRESENTED_PORT, + sizeof(struct rte_flow_item_ethdev)}, [RTE_FLOW_ITEM_TYPE_PPPOES] = {ROC_NPC_ITEM_TYPE_PPPOES, - sizeof(struct rte_flow_item_pppoe)}}; + sizeof(struct rte_flow_item_pppoe)} +}; static int npc_rss_action_validate(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, @@ -372,6 +353,11 @@ cnxk_map_flow_data(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr struct roc_npc_action_sample *in_sample_actions, uint32_t *flowkey_cfg, uint16_t *dst_pf_func) { + struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); + const struct rte_flow_item_ethdev *rep_eth_dev; + struct rte_eth_dev *portid_eth_dev; + char if_name[RTE_ETH_NAME_MAX_LEN]; + struct cnxk_eth_dev *hw_dst; int i = 0; in_attr->priority = attr->priority; @@ -384,6 +370,27 @@ cnxk_map_flow_data(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr in_pattern[i].mask = pattern->mask; in_pattern[i].type = term[pattern->type].item_type; in_pattern[i].size = term[pattern->type].item_size; + if (pattern->type == RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT) { + rep_eth_dev = (const struct rte_flow_item_ethdev *)pattern->spec; + if (rte_eth_dev_get_name_by_port(rep_eth_dev->port_id, if_name)) { + plt_err("Name not found for output port id"); + return -EINVAL; + } + portid_eth_dev = rte_eth_dev_allocated(if_name); + if (!portid_eth_dev) { + plt_err("eth_dev not found for output port id"); + return -EINVAL; + } + if (strcmp(portid_eth_dev->device->driver->name, + eth_dev->device->driver->name) != 0) { + plt_err("Output port not under same driver"); + return -EINVAL; + } + hw_dst = portid_eth_dev->data->dev_private; + dev->npc.rep_npc = &hw_dst->npc; + dev->npc.rep_port_id = rep_eth_dev->port_id; + dev->npc.rep_pf_func = hw_dst->npc.pf_func; + } pattern++; i++; } From patchwork Sun Mar 3 17:38:30 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Harman Kalra X-Patchwork-Id: 137834 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D726B43C3E; Sun, 3 Mar 2024 18:41:13 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id AA18543668; Sun, 3 Mar 2024 18:39:42 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id C9A4243668 for ; Sun, 3 Mar 2024 18:39:40 +0100 (CET) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id 423GWjXN019709 for ; Sun, 3 Mar 2024 09:39:40 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h= from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-type; s=pfpt0220; bh=pcrcv2I7Kbutgh0GRKAUS VZ72NHY2uAJW52EQf9viFo=; b=hvrQvJoxG9uzi6out4gwTi8on73U+ZEblvmHQ 4bbdUFwUTIhiI+5OYZ9ps3RfiD9bhKxz5h6WQj/4fS2Z44K8GG+9NzOqPsmOGp43 NkosfaRFS73lCp/UVCdDI3cAYRGNcHZv0PoI22eiWtGy0NVMh5zDb9Q5V0l8bcEI dkhKeOvSLxVBnp1ihZ3w7Ss7b7Q38uTagHQwCbDJ55ib1mTGrjiIENyUtA6RBKrI 0IUr+InH4ytJbr359M30o2j9HDrwo6LcuTpURTgGnUyqxrO9oXpxdBu86Wok8GIq hQqt7nO81fDuIVTtZnFIdacrdVyQdNkAPNEI/QHH1t3zvHZqA== Received: from dc6wp-exch02.marvell.com ([4.21.29.225]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3wm2bptrvg-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT) for ; Sun, 03 Mar 2024 09:39:39 -0800 (PST) Received: from DC6WP-EXCH02.marvell.com (10.76.176.209) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1258.12; Sun, 3 Mar 2024 09:39:38 -0800 Received: from maili.marvell.com (10.69.176.80) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server id 15.2.1258.12 via Frontend Transport; Sun, 3 Mar 2024 09:39:38 -0800 Received: from localhost.localdomain (unknown [10.29.52.211]) by maili.marvell.com (Postfix) with ESMTP id 2EF3F3F71A8; Sun, 3 Mar 2024 09:39:35 -0800 (PST) From: Harman Kalra To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao , Harman Kalra CC: Subject: [PATCH v6 20/23] net/cnxk: add representor port pattern and action Date: Sun, 3 Mar 2024 23:08:30 +0530 Message-ID: <20240303173833.100039-21-hkalra@marvell.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20240303173833.100039-1-hkalra@marvell.com> References: <20230811163419.165790-1-hkalra@marvell.com> <20240303173833.100039-1-hkalra@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: jxhkkBRkpLT0HoGRRwmQ8thDumwX_zeO X-Proofpoint-ORIG-GUID: jxhkkBRkpLT0HoGRRwmQ8thDumwX_zeO X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.1011,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2024-03-03_08,2024-03-01_03,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Adding support for representor port as item matching and action. Signed-off-by: Harman Kalra --- doc/guides/nics/cnxk.rst | 1 + doc/guides/nics/features/cnxk.ini | 2 + doc/guides/nics/features/cnxk_vec.ini | 2 + doc/guides/nics/features/cnxk_vf.ini | 2 + drivers/net/cnxk/cnxk_flow.c | 219 +++++++++++++++++++++++--- drivers/net/cnxk/cnxk_rep.h | 14 ++ 6 files changed, 214 insertions(+), 26 deletions(-) diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst index e156340cf0..e8c64ef34c 100644 --- a/doc/guides/nics/cnxk.rst +++ b/doc/guides/nics/cnxk.rst @@ -39,6 +39,7 @@ Features of the CNXK Ethdev PMD are: - Queue based priority flow control support - Port representors - Represented port pattern matching and action +- Port representor pattern matching and action Prerequisites ------------- diff --git a/doc/guides/nics/features/cnxk.ini b/doc/guides/nics/features/cnxk.ini index 8957eba46b..9603d2566e 100644 --- a/doc/guides/nics/features/cnxk.ini +++ b/doc/guides/nics/features/cnxk.ini @@ -95,6 +95,7 @@ of_set_vlan_pcp = Y of_set_vlan_vid = Y pf = Y port_id = Y +port_representor = Y queue = Y represented_port = Y rss = Y @@ -102,3 +103,4 @@ sample = Y security = Y skip_cman = Y vf = Y +vxlan_decap = I diff --git a/doc/guides/nics/features/cnxk_vec.ini b/doc/guides/nics/features/cnxk_vec.ini index 193fd14fbb..96289059ec 100644 --- a/doc/guides/nics/features/cnxk_vec.ini +++ b/doc/guides/nics/features/cnxk_vec.ini @@ -86,8 +86,10 @@ of_push_vlan = Y of_set_vlan_pcp = Y of_set_vlan_vid = Y pf = Y +port_representor = Y queue = Y represented_port = Y rss = Y security = Y vf = Y +vxlan_decap = I diff --git a/doc/guides/nics/features/cnxk_vf.ini b/doc/guides/nics/features/cnxk_vf.ini index 7ae49b8107..850c49c5be 100644 --- a/doc/guides/nics/features/cnxk_vf.ini +++ b/doc/guides/nics/features/cnxk_vf.ini @@ -85,9 +85,11 @@ of_push_vlan = Y of_set_vlan_pcp = Y of_set_vlan_vid = Y pf = Y +port_representor = Y queue = Y represented_port = Y rss = Y security = Y skip_cman = Y vf = Y +vxlan_decap = I diff --git a/drivers/net/cnxk/cnxk_flow.c b/drivers/net/cnxk/cnxk_flow.c index 7b684eb759..2cd88f0334 100644 --- a/drivers/net/cnxk/cnxk_flow.c +++ b/drivers/net/cnxk/cnxk_flow.c @@ -2,6 +2,7 @@ * Copyright(C) 2021 Marvell. */ #include +#include const struct cnxk_rte_flow_term_info term[] = { [RTE_FLOW_ITEM_TYPE_ETH] = {ROC_NPC_ITEM_TYPE_ETH, sizeof(struct rte_flow_item_eth)}, @@ -185,11 +186,44 @@ roc_npc_parse_sample_subaction(struct rte_eth_dev *eth_dev, const struct rte_flo return 0; } +static int +representor_portid_action(struct roc_npc_action *in_actions, struct rte_eth_dev *portid_eth_dev, + uint16_t *dst_pf_func, uint8_t has_tunnel_pattern, int *act_cnt) +{ + struct rte_eth_dev *rep_eth_dev = portid_eth_dev; + struct rte_flow_action_mark *act_mark; + struct cnxk_rep_dev *rep_dev; + /* For inserting an action in the list */ + int i = *act_cnt; + + rep_dev = cnxk_rep_pmd_priv(rep_eth_dev); + *dst_pf_func = rep_dev->hw_func; + + /* Add Mark action */ + i++; + act_mark = plt_zmalloc(sizeof(struct rte_flow_action_mark), 0); + if (!act_mark) { + plt_err("Error allocation memory"); + return -ENOMEM; + } + + /* Mark ID format: (tunnel type - VxLAN, Geneve << 6) | Tunnel decap */ + act_mark->id = has_tunnel_pattern ? ((has_tunnel_pattern << 6) | 5) : 1; + in_actions[i].type = ROC_NPC_ACTION_TYPE_MARK; + in_actions[i].conf = (struct rte_flow_action_mark *)act_mark; + + *act_cnt = i; + plt_rep_dbg("Rep port %d ID %d mark ID is %d rep_dev->hw_func 0x%x", rep_dev->port_id, + rep_dev->rep_id, act_mark->id, rep_dev->hw_func); + + return 0; +} + static int cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, const struct rte_flow_action actions[], struct roc_npc_action in_actions[], struct roc_npc_action_sample *in_sample_actions, uint32_t *flowkey_cfg, - uint16_t *dst_pf_func) + uint16_t *dst_pf_func, uint8_t has_tunnel_pattern) { struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); const struct rte_flow_action_queue *act_q = NULL; @@ -238,6 +272,7 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, break; case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: + case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: case RTE_FLOW_ACTION_TYPE_PORT_ID: in_actions[i].type = ROC_NPC_ACTION_TYPE_PORT_ID; in_actions[i].conf = actions->conf; @@ -256,14 +291,27 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, plt_err("eth_dev not found for output port id"); goto err_exit; } - if (strcmp(portid_eth_dev->device->driver->name, - eth_dev->device->driver->name) != 0) { - plt_err("Output port not under same driver"); - goto err_exit; + + if (cnxk_ethdev_is_representor(if_name)) { + plt_rep_dbg("Representor port %d act port %d", port_act->id, + act_ethdev->port_id); + if (representor_portid_action(in_actions, portid_eth_dev, + dst_pf_func, has_tunnel_pattern, + &i)) { + plt_err("Representor port action set failed"); + goto err_exit; + } + } else { + if (strcmp(portid_eth_dev->device->driver->name, + eth_dev->device->driver->name) != 0) { + plt_err("Output port not under same driver"); + goto err_exit; + } + + hw_dst = portid_eth_dev->data->dev_private; + roc_npc_dst = &hw_dst->npc; + *dst_pf_func = roc_npc_dst->pf_func; } - hw_dst = portid_eth_dev->data->dev_private; - roc_npc_dst = &hw_dst->npc; - *dst_pf_func = roc_npc_dst->pf_func; break; case RTE_FLOW_ACTION_TYPE_QUEUE: @@ -324,6 +372,8 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, in_actions[i].type = ROC_NPC_ACTION_TYPE_SAMPLE; in_actions[i].conf = in_sample_actions; break; + case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: + continue; default: plt_npc_dbg("Action is not supported = %d", actions->type); goto err_exit; @@ -346,12 +396,8 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, } static int -cnxk_map_flow_data(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], const struct rte_flow_action actions[], - struct roc_npc_attr *in_attr, struct roc_npc_item_info in_pattern[], - struct roc_npc_action in_actions[], - struct roc_npc_action_sample *in_sample_actions, uint32_t *flowkey_cfg, - uint16_t *dst_pf_func) +cnxk_map_pattern(struct rte_eth_dev *eth_dev, const struct rte_flow_item pattern[], + struct roc_npc_item_info in_pattern[], uint8_t *has_tunnel_pattern) { struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); const struct rte_flow_item_ethdev *rep_eth_dev; @@ -360,10 +406,6 @@ cnxk_map_flow_data(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr struct cnxk_eth_dev *hw_dst; int i = 0; - in_attr->priority = attr->priority; - in_attr->ingress = attr->ingress; - in_attr->egress = attr->egress; - while (pattern->type != RTE_FLOW_ITEM_TYPE_END) { in_pattern[i].spec = pattern->spec; in_pattern[i].last = pattern->last; @@ -374,30 +416,81 @@ cnxk_map_flow_data(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr rep_eth_dev = (const struct rte_flow_item_ethdev *)pattern->spec; if (rte_eth_dev_get_name_by_port(rep_eth_dev->port_id, if_name)) { plt_err("Name not found for output port id"); - return -EINVAL; + goto fail; } portid_eth_dev = rte_eth_dev_allocated(if_name); if (!portid_eth_dev) { plt_err("eth_dev not found for output port id"); - return -EINVAL; + goto fail; } if (strcmp(portid_eth_dev->device->driver->name, eth_dev->device->driver->name) != 0) { plt_err("Output port not under same driver"); - return -EINVAL; + goto fail; + } + if (cnxk_ethdev_is_representor(if_name)) { + /* Case where represented port not part of same + * app and represented by a representor port. + */ + struct cnxk_rep_dev *rep_dev; + struct cnxk_eswitch_dev *eswitch_dev; + + rep_dev = cnxk_rep_pmd_priv(portid_eth_dev); + eswitch_dev = rep_dev->parent_dev; + dev->npc.rep_npc = &eswitch_dev->npc; + dev->npc.rep_port_id = rep_eth_dev->port_id; + dev->npc.rep_pf_func = rep_dev->hw_func; + plt_rep_dbg("Represented port %d act port %d rep_dev->hw_func 0x%x", + rep_eth_dev->port_id, eth_dev->data->port_id, + rep_dev->hw_func); + } else { + /* Case where represented port part of same app + * as PF. + */ + hw_dst = portid_eth_dev->data->dev_private; + dev->npc.rep_npc = &hw_dst->npc; + dev->npc.rep_port_id = rep_eth_dev->port_id; + dev->npc.rep_pf_func = hw_dst->npc.pf_func; } - hw_dst = portid_eth_dev->data->dev_private; - dev->npc.rep_npc = &hw_dst->npc; - dev->npc.rep_port_id = rep_eth_dev->port_id; - dev->npc.rep_pf_func = hw_dst->npc.pf_func; } + + if (pattern->type == RTE_FLOW_ITEM_TYPE_VXLAN || + pattern->type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE || + pattern->type == RTE_FLOW_ITEM_TYPE_GRE) + *has_tunnel_pattern = pattern->type; + pattern++; i++; } in_pattern[i].type = ROC_NPC_ITEM_TYPE_END; + return 0; +fail: + return -EINVAL; +} + +static int +cnxk_map_flow_data(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], const struct rte_flow_action actions[], + struct roc_npc_attr *in_attr, struct roc_npc_item_info in_pattern[], + struct roc_npc_action in_actions[], + struct roc_npc_action_sample *in_sample_actions, uint32_t *flowkey_cfg, + uint16_t *dst_pf_func) +{ + uint8_t has_tunnel_pattern = 0; + int rc; + + in_attr->priority = attr->priority; + in_attr->ingress = attr->ingress; + in_attr->egress = attr->egress; + + rc = cnxk_map_pattern(eth_dev, pattern, in_pattern, &has_tunnel_pattern); + if (rc) { + plt_err("Failed to map pattern list"); + return rc; + } return cnxk_map_actions(eth_dev, attr, actions, in_actions, in_sample_actions, flowkey_cfg, - dst_pf_func); + dst_pf_func, has_tunnel_pattern); } static int @@ -461,6 +554,7 @@ cnxk_flow_create(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, int rc; memset(&in_sample_action, 0, sizeof(in_sample_action)); + memset(&in_attr, 0, sizeof(struct roc_npc_attr)); rc = cnxk_map_flow_data(eth_dev, attr, pattern, actions, &in_attr, in_pattern, in_actions, &in_sample_action, &npc->flowkey_cfg_state, &dst_pf_func); if (rc) { @@ -649,6 +743,75 @@ cnxk_flow_get_aged_flows(struct rte_eth_dev *eth_dev, void **context, return cnt; } +static int +cnxk_flow_tunnel_decap_set(__rte_unused struct rte_eth_dev *dev, struct rte_flow_tunnel *tunnel, + struct rte_flow_action **pmd_actions, uint32_t *num_of_actions, + __rte_unused struct rte_flow_error *err) +{ + struct rte_flow_action *nfp_action; + + nfp_action = rte_zmalloc("nfp_tun_action", sizeof(struct rte_flow_action), 0); + if (nfp_action == NULL) { + plt_err("Alloc memory for nfp tunnel action failed."); + return -ENOMEM; + } + + if (tunnel->is_ipv6) + nfp_action->conf = (void *)~0; + + switch (tunnel->type) { + case RTE_FLOW_ITEM_TYPE_VXLAN: + nfp_action->type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP; + *pmd_actions = nfp_action; + *num_of_actions = 1; + break; + default: + *pmd_actions = NULL; + *num_of_actions = 0; + rte_free(nfp_action); + break; + } + + return 0; +} + +static int +cnxk_flow_tunnel_action_decap_release(__rte_unused struct rte_eth_dev *dev, + struct rte_flow_action *pmd_actions, uint32_t num_of_actions, + __rte_unused struct rte_flow_error *err) +{ + uint32_t i; + struct rte_flow_action *nfp_action; + + for (i = 0; i < num_of_actions; i++) { + nfp_action = &pmd_actions[i]; + nfp_action->conf = NULL; + rte_free(nfp_action); + } + + return 0; +} + +static int +cnxk_flow_tunnel_match(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct rte_flow_tunnel *tunnel, + __rte_unused struct rte_flow_item **pmd_items, uint32_t *num_of_items, + __rte_unused struct rte_flow_error *err) +{ + *num_of_items = 0; + + return 0; +} + +static int +cnxk_flow_tunnel_item_release(__rte_unused struct rte_eth_dev *dev, + __rte_unused struct rte_flow_item *pmd_items, + __rte_unused uint32_t num_of_items, + __rte_unused struct rte_flow_error *err) +{ + return 0; +} + struct rte_flow_ops cnxk_flow_ops = { .validate = cnxk_flow_validate, .flush = cnxk_flow_flush, @@ -656,4 +819,8 @@ struct rte_flow_ops cnxk_flow_ops = { .isolate = cnxk_flow_isolate, .dev_dump = cnxk_flow_dev_dump, .get_aged_flows = cnxk_flow_get_aged_flows, + .tunnel_match = cnxk_flow_tunnel_match, + .tunnel_item_release = cnxk_flow_tunnel_item_release, + .tunnel_decap_set = cnxk_flow_tunnel_decap_set, + .tunnel_action_decap_release = cnxk_flow_tunnel_action_decap_release, }; diff --git a/drivers/net/cnxk/cnxk_rep.h b/drivers/net/cnxk/cnxk_rep.h index 51a2e97624..ab8b3fb152 100644 --- a/drivers/net/cnxk/cnxk_rep.h +++ b/drivers/net/cnxk/cnxk_rep.h @@ -1,6 +1,9 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright(C) 2024 Marvell. */ + +#include + #include #include @@ -91,6 +94,17 @@ cnxk_rep_pool_buffer_stats(struct rte_mempool *pool) pool->size, rte_mempool_in_use_count(pool), rte_mempool_avail_count(pool)); } +static inline int +cnxk_ethdev_is_representor(const char *if_name) +{ + regex_t regex; + int val; + + val = regcomp(®ex, "net_.*_representor_.*", 0); + val = regexec(®ex, if_name, 0, NULL, 0); + return (val == 0); +} + /* Prototypes */ int cnxk_rep_dev_probe(struct rte_pci_device *pci_dev, struct cnxk_eswitch_dev *eswitch_dev); int cnxk_rep_dev_remove(struct cnxk_eswitch_dev *eswitch_dev); From patchwork Sun Mar 3 17:38:31 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Harman Kalra X-Patchwork-Id: 137835 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id B22D143C3E; Sun, 3 Mar 2024 18:41:20 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id F0175436AF; Sun, 3 Mar 2024 18:39:45 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id 4F5E4436B3 for ; Sun, 3 Mar 2024 18:39:44 +0100 (CET) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id 423GWjXP019709 for ; Sun, 3 Mar 2024 09:39:43 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h= from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-type; s=pfpt0220; bh=VAAQzFGTCjH8yMf49DDfr JY0kCSD3K/ZjSuDqMYoMuE=; b=e1cSpDxq71n+SZawJVK89Ke2aBQXHY58qEupQ YyOo550H8P+GQfd5ZgZ/rBJfqJWl46auJEoVQFAtbp5LFU1hfAkj4cPiV8Hz0QSn y3AVJxHjHNZ45jRrO/0D3X7/GJEq9csmh1bVkhWpFkEoM+Yye27SiDGja6D8/o+x mFbiTtlcL50Khj+RQVHiuMeVA3y9jIujoHdppPJ9pSCxR2SXbywjKo5Pvuo3N29o 7kaoVajuQC/ataaA0MeHIYCFzwZws4tWJZl2VsuOhLM2Mx42ySORoqDC18XkSDZF 8BG7EpvlL0qfeOFzQ1QZPQleppGJe3SuEwHyUQfrOSS0cGhbw== Received: from dc6wp-exch02.marvell.com ([4.21.29.225]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3wm2bptrvn-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT) for ; Sun, 03 Mar 2024 09:39:43 -0800 (PST) Received: from DC6WP-EXCH02.marvell.com (10.76.176.209) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1258.12; Sun, 3 Mar 2024 09:39:41 -0800 Received: from maili.marvell.com (10.69.176.80) by DC6WP-EXCH02.marvell.com (10.76.176.209) with Microsoft SMTP Server id 15.2.1258.12 via Frontend Transport; Sun, 3 Mar 2024 09:39:41 -0800 Received: from localhost.localdomain (unknown [10.29.52.211]) by maili.marvell.com (Postfix) with ESMTP id 1E1793F71A8; Sun, 3 Mar 2024 09:39:38 -0800 (PST) From: Harman Kalra To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao , Harman Kalra CC: Subject: [PATCH v6 21/23] net/cnxk: generalise flow operation APIs Date: Sun, 3 Mar 2024 23:08:31 +0530 Message-ID: <20240303173833.100039-22-hkalra@marvell.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20240303173833.100039-1-hkalra@marvell.com> References: <20230811163419.165790-1-hkalra@marvell.com> <20240303173833.100039-1-hkalra@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: 65U9HMnH4sM3ZAFo-fqOFeSSATRnbgf1 X-Proofpoint-ORIG-GUID: 65U9HMnH4sM3ZAFo-fqOFeSSATRnbgf1 X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.1011,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2024-03-03_08,2024-03-01_03,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Flow operations can be performed on cnxk ports as well as representor ports. Since representor ports are not cnxk ports but have eswitch as base device underneath, special handling is required to align with base infra. Introducing a flag to generic flow APIs to discriminate if the operation request made on normal or representor ports. Signed-off-by: Harman Kalra --- doc/guides/nics/features/cnxk.ini | 1 + doc/guides/nics/features/cnxk_vec.ini | 1 + doc/guides/nics/features/cnxk_vf.ini | 1 + drivers/net/cnxk/cnxk_flow.c | 556 ++++++++++++++++++++++---- drivers/net/cnxk/cnxk_flow.h | 18 + 5 files changed, 492 insertions(+), 85 deletions(-) diff --git a/doc/guides/nics/features/cnxk.ini b/doc/guides/nics/features/cnxk.ini index 9603d2566e..7d85fb9f93 100644 --- a/doc/guides/nics/features/cnxk.ini +++ b/doc/guides/nics/features/cnxk.ini @@ -71,6 +71,7 @@ ipv6_routing_ext = Y mark = Y mpls = Y nvgre = Y +port_representor = Y pppoes = Y raw = Y represented_port = Y diff --git a/doc/guides/nics/features/cnxk_vec.ini b/doc/guides/nics/features/cnxk_vec.ini index 96289059ec..d925933b34 100644 --- a/doc/guides/nics/features/cnxk_vec.ini +++ b/doc/guides/nics/features/cnxk_vec.ini @@ -65,6 +65,7 @@ ipv6_frag_ext = Y mark = Y mpls = Y nvgre = Y +port_representor = Y pppoes = Y raw = Y represented_port = Y diff --git a/doc/guides/nics/features/cnxk_vf.ini b/doc/guides/nics/features/cnxk_vf.ini index 850c49c5be..0da0106fa3 100644 --- a/doc/guides/nics/features/cnxk_vf.ini +++ b/doc/guides/nics/features/cnxk_vf.ini @@ -62,6 +62,7 @@ ipv6_routing_ext = Y mark = Y mpls = Y nvgre = Y +port_representor = Y pppoes = Y raw = Y represented_port = Y diff --git a/drivers/net/cnxk/cnxk_flow.c b/drivers/net/cnxk/cnxk_flow.c index 2cd88f0334..d3c20e8315 100644 --- a/drivers/net/cnxk/cnxk_flow.c +++ b/drivers/net/cnxk/cnxk_flow.c @@ -4,6 +4,7 @@ #include #include +#define IS_REP_BIT 7 const struct cnxk_rte_flow_term_info term[] = { [RTE_FLOW_ITEM_TYPE_ETH] = {ROC_NPC_ITEM_TYPE_ETH, sizeof(struct rte_flow_item_eth)}, [RTE_FLOW_ITEM_TYPE_VLAN] = {ROC_NPC_ITEM_TYPE_VLAN, sizeof(struct rte_flow_item_vlan)}, @@ -186,17 +187,162 @@ roc_npc_parse_sample_subaction(struct rte_eth_dev *eth_dev, const struct rte_flo return 0; } +static int +representor_rep_portid_action(struct roc_npc_action *in_actions, struct rte_eth_dev *eth_dev, + struct rte_eth_dev *portid_eth_dev, + enum rte_flow_action_type act_type, uint8_t rep_pattern, + uint16_t *dst_pf_func, bool is_rep, uint64_t *free_allocs, + int *act_cnt) +{ + struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); + struct rte_eth_dev *rep_eth_dev = portid_eth_dev; + struct rte_flow_action_of_set_vlan_vid *vlan_vid; + struct rte_flow_action_of_set_vlan_pcp *vlan_pcp; + struct rte_flow_action_of_push_vlan *push_vlan; + struct rte_flow_action_queue *act_q = NULL; + struct cnxk_rep_dev *rep_dev; + struct roc_npc *npc; + uint16_t vlan_tci; + int j = 0; + + /* For inserting an action in the list */ + int i = *act_cnt; + + rep_dev = cnxk_rep_pmd_priv(rep_eth_dev); + if (!is_rep) { + dev = cnxk_eth_pmd_priv(eth_dev); + npc = &dev->npc; + } else { + npc = &rep_dev->parent_dev->npc; + } + if (rep_pattern >> IS_REP_BIT) { /* Check for normal/representor port as action */ + if ((rep_pattern & 0x7f) == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) { + /* Case: Repr port pattern -> Default TX rule -> LBK -> + * Pattern RX LBK rule hit -> Action: send to new pf_func + */ + if (act_type == RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR) { + /* New pf_func corresponds to ESW + queue corresponding to rep_id */ + act_q = plt_zmalloc(sizeof(struct rte_flow_action_queue), 0); + if (!act_q) { + plt_err("Error allocation memory"); + return -ENOMEM; + } + act_q->index = rep_dev->rep_id; + + while (free_allocs[j] != 0) + j++; + free_allocs[j] = (uint64_t)act_q; + in_actions[i].type = ROC_NPC_ACTION_TYPE_QUEUE; + in_actions[i].conf = (struct rte_flow_action_queue *)act_q; + npc->rep_act_pf_func = rep_dev->parent_dev->npc.pf_func; + } else { + /* New pf_func corresponds to hw_func of representee */ + in_actions[i].type = ROC_NPC_ACTION_TYPE_PORT_ID; + npc->rep_act_pf_func = rep_dev->hw_func; + *dst_pf_func = rep_dev->hw_func; + } + /* Additional action to strip the VLAN from packets received by LBK */ + i++; + in_actions[i].type = ROC_NPC_ACTION_TYPE_VLAN_STRIP; + goto done; + } + /* Case: Repd port pattern -> TX Rule with VLAN -> LBK -> Default RX LBK rule hit + * base on vlan, if packet goes to ESW or actual pf_func -> Action : + * act port_representor: send to ESW respective using 1<<8 | rep_id as tci value + * act represented_port: send to actual port using rep_id as tci value. + */ + /* Add RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN action */ + push_vlan = plt_zmalloc(sizeof(struct rte_flow_action_of_push_vlan), 0); + if (!push_vlan) { + plt_err("Error allocation memory"); + return -ENOMEM; + } + + while (free_allocs[j] != 0) + j++; + free_allocs[j] = (uint64_t)push_vlan; + push_vlan->ethertype = ntohs(ROC_ESWITCH_VLAN_TPID); + in_actions[i].type = ROC_NPC_ACTION_TYPE_VLAN_ETHTYPE_INSERT; + in_actions[i].conf = (struct rte_flow_action_of_push_vlan *)push_vlan; + i++; + + /* Add RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP action */ + vlan_pcp = plt_zmalloc(sizeof(struct rte_flow_action_of_set_vlan_pcp), 0); + if (!vlan_pcp) { + plt_err("Error allocation memory"); + return -ENOMEM; + } + + free_allocs[j + 1] = (uint64_t)vlan_pcp; + vlan_pcp->vlan_pcp = 0; + in_actions[i].type = ROC_NPC_ACTION_TYPE_VLAN_PCP_INSERT; + in_actions[i].conf = (struct rte_flow_action_of_set_vlan_pcp *)vlan_pcp; + i++; + + /* Add RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID action */ + vlan_vid = plt_zmalloc(sizeof(struct rte_flow_action_of_set_vlan_vid), 0); + if (!vlan_vid) { + plt_err("Error allocation memory"); + return -ENOMEM; + } + + free_allocs[j + 2] = (uint64_t)vlan_vid; + if (act_type == RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR) + vlan_tci = rep_dev->rep_id | (1ULL << CNXK_ESWITCH_VFPF_SHIFT); + else + vlan_tci = rep_dev->rep_id; + vlan_vid->vlan_vid = ntohs(vlan_tci); + in_actions[i].type = ROC_NPC_ACTION_TYPE_VLAN_INSERT; + in_actions[i].conf = (struct rte_flow_action_of_set_vlan_vid *)vlan_vid; + + /* Change default channel to UCAST_CHAN (63) while sending */ + npc->rep_act_rep = true; + } else { + if (act_type == RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR) { + /* Case: Pattern wire port -> Pattern RX rule-> + * Action: pf_func = ESW. queue = rep_id + */ + act_q = plt_zmalloc(sizeof(struct rte_flow_action_queue), 0); + if (!act_q) { + plt_err("Error allocation memory"); + return -ENOMEM; + } + while (free_allocs[j] != 0) + j++; + free_allocs[j] = (uint64_t)act_q; + act_q->index = rep_dev->rep_id; + + in_actions[i].type = ROC_NPC_ACTION_TYPE_QUEUE; + in_actions[i].conf = (struct rte_flow_action_queue *)act_q; + npc->rep_act_pf_func = rep_dev->parent_dev->npc.pf_func; + } else { + /* Case: Pattern wire port -> Pattern RX rule-> + * Action: Receive at actual hw_func + */ + in_actions[i].type = ROC_NPC_ACTION_TYPE_PORT_ID; + npc->rep_act_pf_func = rep_dev->hw_func; + *dst_pf_func = rep_dev->hw_func; + } + } +done: + *act_cnt = i; + + return 0; +} + static int representor_portid_action(struct roc_npc_action *in_actions, struct rte_eth_dev *portid_eth_dev, - uint16_t *dst_pf_func, uint8_t has_tunnel_pattern, int *act_cnt) + uint16_t *dst_pf_func, uint8_t has_tunnel_pattern, uint64_t *free_allocs, + int *act_cnt) { struct rte_eth_dev *rep_eth_dev = portid_eth_dev; struct rte_flow_action_mark *act_mark; struct cnxk_rep_dev *rep_dev; /* For inserting an action in the list */ - int i = *act_cnt; + int i = *act_cnt, j = 0; rep_dev = cnxk_rep_pmd_priv(rep_eth_dev); + *dst_pf_func = rep_dev->hw_func; /* Add Mark action */ @@ -207,6 +353,9 @@ representor_portid_action(struct roc_npc_action *in_actions, struct rte_eth_dev return -ENOMEM; } + while (free_allocs[j] != 0) + j++; + free_allocs[j] = (uint64_t)act_mark; /* Mark ID format: (tunnel type - VxLAN, Geneve << 6) | Tunnel decap */ act_mark->id = has_tunnel_pattern ? ((has_tunnel_pattern << 6) | 5) : 1; in_actions[i].type = ROC_NPC_ACTION_TYPE_MARK; @@ -223,7 +372,8 @@ static int cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, const struct rte_flow_action actions[], struct roc_npc_action in_actions[], struct roc_npc_action_sample *in_sample_actions, uint32_t *flowkey_cfg, - uint16_t *dst_pf_func, uint8_t has_tunnel_pattern) + uint16_t *dst_pf_func, uint8_t has_tunnel_pattern, bool is_rep, + uint8_t rep_pattern, uint64_t *free_allocs) { struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); const struct rte_flow_action_queue *act_q = NULL; @@ -273,16 +423,48 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: + in_actions[i].conf = actions->conf; + act_ethdev = (const struct rte_flow_action_ethdev *)actions->conf; + if (rte_eth_dev_get_name_by_port(act_ethdev->port_id, if_name)) { + plt_err("Name not found for output port id"); + goto err_exit; + } + portid_eth_dev = rte_eth_dev_allocated(if_name); + if (!portid_eth_dev) { + plt_err("eth_dev not found for output port id"); + goto err_exit; + } + + plt_rep_dbg("Rule installed by port %d if_name %s act_ethdev->port_id %d", + eth_dev->data->port_id, if_name, act_ethdev->port_id); + if (cnxk_ethdev_is_representor(if_name)) { + if (representor_rep_portid_action(in_actions, eth_dev, + portid_eth_dev, actions->type, + rep_pattern, dst_pf_func, is_rep, + free_allocs, &i)) { + plt_err("Representor port action set failed"); + goto err_exit; + } + } else { + if (actions->type == RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT) + continue; + /* Normal port as represented_port as action not supported*/ + return -ENOTSUP; + } + break; case RTE_FLOW_ACTION_TYPE_PORT_ID: + /* No port ID action on representor ethdevs */ + if (is_rep) + continue; in_actions[i].type = ROC_NPC_ACTION_TYPE_PORT_ID; in_actions[i].conf = actions->conf; - act_ethdev = (const struct rte_flow_action_ethdev *) - actions->conf; - port_act = (const struct rte_flow_action_port_id *) - actions->conf; + act_ethdev = (const struct rte_flow_action_ethdev *)actions->conf; + port_act = (const struct rte_flow_action_port_id *)actions->conf; if (rte_eth_dev_get_name_by_port( - actions->type != RTE_FLOW_ACTION_TYPE_PORT_ID ? - act_ethdev->port_id : port_act->id, if_name)) { + actions->type != RTE_FLOW_ACTION_TYPE_PORT_ID ? + act_ethdev->port_id : + port_act->id, + if_name)) { plt_err("Name not found for output port id"); goto err_exit; } @@ -297,7 +479,7 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, act_ethdev->port_id); if (representor_portid_action(in_actions, portid_eth_dev, dst_pf_func, has_tunnel_pattern, - &i)) { + free_allocs, &i)) { plt_err("Representor port action set failed"); goto err_exit; } @@ -321,6 +503,9 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, break; case RTE_FLOW_ACTION_TYPE_RSS: + /* No RSS action on representor ethdevs */ + if (is_rep) + continue; rc = npc_rss_action_validate(eth_dev, attr, actions); if (rc) goto err_exit; @@ -397,14 +582,29 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, static int cnxk_map_pattern(struct rte_eth_dev *eth_dev, const struct rte_flow_item pattern[], - struct roc_npc_item_info in_pattern[], uint8_t *has_tunnel_pattern) + struct roc_npc_item_info in_pattern[], uint8_t *has_tunnel_pattern, bool is_rep, + uint8_t *rep_pattern, uint64_t *free_allocs) { - struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); const struct rte_flow_item_ethdev *rep_eth_dev; struct rte_eth_dev *portid_eth_dev; char if_name[RTE_ETH_NAME_MAX_LEN]; struct cnxk_eth_dev *hw_dst; - int i = 0; + struct cnxk_rep_dev *rdev; + struct cnxk_eth_dev *dev; + struct roc_npc *npc; + int i = 0, j = 0; + + if (!is_rep) { + dev = cnxk_eth_pmd_priv(eth_dev); + npc = &dev->npc; + } else { + rdev = cnxk_rep_pmd_priv(eth_dev); + npc = &rdev->parent_dev->npc; + + npc->rep_npc = npc; + npc->rep_port_id = rdev->port_id; + npc->rep_pf_func = rdev->hw_func; + } while (pattern->type != RTE_FLOW_ITEM_TYPE_END) { in_pattern[i].spec = pattern->spec; @@ -412,7 +612,8 @@ cnxk_map_pattern(struct rte_eth_dev *eth_dev, const struct rte_flow_item pattern in_pattern[i].mask = pattern->mask; in_pattern[i].type = term[pattern->type].item_type; in_pattern[i].size = term[pattern->type].item_size; - if (pattern->type == RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT) { + if (pattern->type == RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT || + pattern->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) { rep_eth_dev = (const struct rte_flow_item_ethdev *)pattern->spec; if (rte_eth_dev_get_name_by_port(rep_eth_dev->port_id, if_name)) { plt_err("Name not found for output port id"); @@ -423,11 +624,7 @@ cnxk_map_pattern(struct rte_eth_dev *eth_dev, const struct rte_flow_item pattern plt_err("eth_dev not found for output port id"); goto fail; } - if (strcmp(portid_eth_dev->device->driver->name, - eth_dev->device->driver->name) != 0) { - plt_err("Output port not under same driver"); - goto fail; - } + *rep_pattern = pattern->type; if (cnxk_ethdev_is_representor(if_name)) { /* Case where represented port not part of same * app and represented by a representor port. @@ -437,20 +634,56 @@ cnxk_map_pattern(struct rte_eth_dev *eth_dev, const struct rte_flow_item pattern rep_dev = cnxk_rep_pmd_priv(portid_eth_dev); eswitch_dev = rep_dev->parent_dev; - dev->npc.rep_npc = &eswitch_dev->npc; - dev->npc.rep_port_id = rep_eth_dev->port_id; - dev->npc.rep_pf_func = rep_dev->hw_func; + npc->rep_npc = &eswitch_dev->npc; + npc->rep_port_id = rep_eth_dev->port_id; + npc->rep_pf_func = rep_dev->hw_func; + + if (pattern->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) { + struct rte_flow_item_vlan *vlan; + + npc->rep_pf_func = eswitch_dev->npc.pf_func; + /* Add VLAN pattern corresponding to rep_id */ + i++; + vlan = plt_zmalloc(sizeof(struct rte_flow_item_vlan), 0); + if (!vlan) { + plt_err("error allocation memory"); + return -ENOMEM; + } + + while (free_allocs[j] != 0) + j++; + free_allocs[j] = (uint64_t)vlan; + + npc->rep_rx_channel = ROC_ESWITCH_LBK_CHAN; + vlan->hdr.vlan_tci = RTE_BE16(rep_dev->rep_id); + in_pattern[i].spec = (struct rte_flow_item_vlan *)vlan; + in_pattern[i].last = NULL; + in_pattern[i].mask = &rte_flow_item_vlan_mask; + in_pattern[i].type = + term[RTE_FLOW_ITEM_TYPE_VLAN].item_type; + in_pattern[i].size = + term[RTE_FLOW_ITEM_TYPE_VLAN].item_size; + } + *rep_pattern |= 1 << IS_REP_BIT; plt_rep_dbg("Represented port %d act port %d rep_dev->hw_func 0x%x", rep_eth_dev->port_id, eth_dev->data->port_id, rep_dev->hw_func); } else { + if (strcmp(portid_eth_dev->device->driver->name, + eth_dev->device->driver->name) != 0) { + plt_err("Output port not under same driver"); + goto fail; + } + /* Normal port as port_representor pattern can't be supported */ + if (pattern->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) + return -ENOTSUP; /* Case where represented port part of same app * as PF. */ hw_dst = portid_eth_dev->data->dev_private; - dev->npc.rep_npc = &hw_dst->npc; - dev->npc.rep_port_id = rep_eth_dev->port_id; - dev->npc.rep_pf_func = hw_dst->npc.pf_func; + npc->rep_npc = &hw_dst->npc; + npc->rep_port_id = rep_eth_dev->port_id; + npc->rep_pf_func = hw_dst->npc.pf_func; } } @@ -474,56 +707,96 @@ cnxk_map_flow_data(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr struct roc_npc_attr *in_attr, struct roc_npc_item_info in_pattern[], struct roc_npc_action in_actions[], struct roc_npc_action_sample *in_sample_actions, uint32_t *flowkey_cfg, - uint16_t *dst_pf_func) + uint16_t *dst_pf_func, bool is_rep, uint64_t *free_allocs) { - uint8_t has_tunnel_pattern = 0; + uint8_t has_tunnel_pattern = 0, rep_pattern = 0; int rc; in_attr->priority = attr->priority; in_attr->ingress = attr->ingress; in_attr->egress = attr->egress; - rc = cnxk_map_pattern(eth_dev, pattern, in_pattern, &has_tunnel_pattern); + rc = cnxk_map_pattern(eth_dev, pattern, in_pattern, &has_tunnel_pattern, is_rep, + &rep_pattern, free_allocs); if (rc) { plt_err("Failed to map pattern list"); return rc; } + if (attr->transfer) { + /* rep_pattern is used to identify if RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT + * OR RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR is defined + if pattern's portid is + * normal port or representor port. + * For normal port_id, rep_pattern = pattern-> type + * For representor port, rep_pattern = pattern-> type | 1 << IS_REP_BIT + */ + if (is_rep || rep_pattern) { + if (rep_pattern == RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT || + ((rep_pattern & 0x7f) == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR)) + /* If pattern is port_representor or pattern has normal port as + * represented port, install ingress rule. + */ + in_attr->ingress = attr->transfer; + else + in_attr->egress = attr->transfer; + } else { + in_attr->ingress = attr->transfer; + } + } + return cnxk_map_actions(eth_dev, attr, actions, in_actions, in_sample_actions, flowkey_cfg, - dst_pf_func, has_tunnel_pattern); + dst_pf_func, has_tunnel_pattern, is_rep, rep_pattern, free_allocs); } -static int -cnxk_flow_validate(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], const struct rte_flow_action actions[], - struct rte_flow_error *error) +int +cnxk_flow_validate_common(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], struct rte_flow_error *error, + bool is_rep) { struct roc_npc_item_info in_pattern[ROC_NPC_ITEM_TYPE_END + 1]; struct roc_npc_action in_actions[ROC_NPC_MAX_ACTION_COUNT]; - struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); struct roc_npc_action_sample in_sample_action; - struct roc_npc *npc = &dev->npc; + struct cnxk_rep_dev *rep_dev; struct roc_npc_attr in_attr; + uint64_t *free_allocs, sz; + struct cnxk_eth_dev *dev; struct roc_npc_flow flow; uint32_t flowkey_cfg = 0; uint16_t dst_pf_func = 0; - int rc; - - /* Skip flow validation for MACsec. */ - if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY && - cnxk_eth_macsec_sess_get_by_sess(dev, actions[0].conf) != NULL) - return 0; + struct roc_npc *npc; + int rc, j; + + /* is_rep set for operation performed via representor ports */ + if (!is_rep) { + dev = cnxk_eth_pmd_priv(eth_dev); + npc = &dev->npc; + /* Skip flow validation for MACsec. */ + if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY && + cnxk_eth_macsec_sess_get_by_sess(dev, actions[0].conf) != NULL) + return 0; + } else { + rep_dev = cnxk_rep_pmd_priv(eth_dev); + npc = &rep_dev->parent_dev->npc; + } memset(&flow, 0, sizeof(flow)); memset(&in_sample_action, 0, sizeof(in_sample_action)); flow.is_validate = true; + sz = ROC_NPC_MAX_ACTION_COUNT + ROC_NPC_ITEM_TYPE_END + 1; + free_allocs = plt_zmalloc(sz * sizeof(uint64_t), 0); + if (!free_allocs) { + rte_flow_error_set(error, -ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL, + "Failed to map flow data"); + return -ENOMEM; + } rc = cnxk_map_flow_data(eth_dev, attr, pattern, actions, &in_attr, in_pattern, in_actions, - &in_sample_action, &flowkey_cfg, &dst_pf_func); + &in_sample_action, &flowkey_cfg, &dst_pf_func, is_rep, free_allocs); if (rc) { rte_flow_error_set(error, 0, RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL, "Failed to map flow data"); - return rc; + goto clean; } rc = roc_npc_flow_parse(npc, &in_attr, in_pattern, in_actions, &flow); @@ -531,73 +804,147 @@ cnxk_flow_validate(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr if (rc) { rte_flow_error_set(error, 0, rc, NULL, "Flow validation failed"); - return rc; + goto clean; } - return 0; +clean: + /* Freeing the allocations done for additional patterns/actions */ + for (j = 0; (j < (int)sz) && free_allocs[j]; j++) + plt_free((void *)free_allocs[j]); + plt_free(free_allocs); + + return rc; +} + +static int +cnxk_flow_validate(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + return cnxk_flow_validate_common(eth_dev, attr, pattern, actions, error, false); } struct roc_npc_flow * -cnxk_flow_create(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) +cnxk_flow_create_common(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], struct rte_flow_error *error, + bool is_rep) { - struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); struct roc_npc_item_info in_pattern[ROC_NPC_ITEM_TYPE_END + 1]; struct roc_npc_action in_actions[ROC_NPC_MAX_ACTION_COUNT]; struct roc_npc_action_sample in_sample_action; - struct roc_npc *npc = &dev->npc; + struct cnxk_rep_dev *rep_dev = NULL; + struct roc_npc_flow *flow = NULL; + struct cnxk_eth_dev *dev = NULL; struct roc_npc_attr in_attr; - struct roc_npc_flow *flow; + uint64_t *free_allocs, sz; uint16_t dst_pf_func = 0; + struct roc_npc *npc; int errcode = 0; - int rc; + int rc, j; + + /* is_rep set for operation performed via representor ports */ + if (!is_rep) { + dev = cnxk_eth_pmd_priv(eth_dev); + npc = &dev->npc; + } else { + rep_dev = cnxk_rep_pmd_priv(eth_dev); + npc = &rep_dev->parent_dev->npc; + } + sz = ROC_NPC_MAX_ACTION_COUNT + ROC_NPC_ITEM_TYPE_END + 1; + free_allocs = plt_zmalloc(sz * sizeof(uint64_t), 0); + if (!free_allocs) { + rte_flow_error_set(error, -ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL, + "Failed to map flow data"); + return NULL; + } memset(&in_sample_action, 0, sizeof(in_sample_action)); memset(&in_attr, 0, sizeof(struct roc_npc_attr)); rc = cnxk_map_flow_data(eth_dev, attr, pattern, actions, &in_attr, in_pattern, in_actions, - &in_sample_action, &npc->flowkey_cfg_state, &dst_pf_func); + &in_sample_action, &npc->flowkey_cfg_state, &dst_pf_func, is_rep, + free_allocs); if (rc) { - rte_flow_error_set(error, 0, RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL, + rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL, "Failed to map flow data"); - return NULL; + goto clean; } flow = roc_npc_flow_create(npc, &in_attr, in_pattern, in_actions, dst_pf_func, &errcode); if (errcode != 0) { rte_flow_error_set(error, errcode, errcode, NULL, roc_error_msg_get(errcode)); - return NULL; + goto clean; } +clean: + /* Freeing the allocations done for additional patterns/actions */ + for (j = 0; (j < (int)sz) && free_allocs[j]; j++) + plt_free((void *)free_allocs[j]); + plt_free(free_allocs); + return flow; } +struct roc_npc_flow * +cnxk_flow_create(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + return cnxk_flow_create_common(eth_dev, attr, pattern, actions, error, false); +} + int -cnxk_flow_destroy(struct rte_eth_dev *eth_dev, struct roc_npc_flow *flow, - struct rte_flow_error *error) +cnxk_flow_destroy_common(struct rte_eth_dev *eth_dev, struct roc_npc_flow *flow, + struct rte_flow_error *error, bool is_rep) { - struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); - struct roc_npc *npc = &dev->npc; + struct cnxk_rep_dev *rep_dev; + struct cnxk_eth_dev *dev; + struct roc_npc *npc; int rc; + /* is_rep set for operation performed via representor ports */ + if (!is_rep) { + dev = cnxk_eth_pmd_priv(eth_dev); + npc = &dev->npc; + } else { + rep_dev = cnxk_rep_pmd_priv(eth_dev); + npc = &rep_dev->parent_dev->npc; + } + rc = roc_npc_flow_destroy(npc, flow); if (rc) - rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "Flow Destroy failed"); + rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Flow Destroy failed"); return rc; } -static int -cnxk_flow_flush(struct rte_eth_dev *eth_dev, struct rte_flow_error *error) +int +cnxk_flow_destroy(struct rte_eth_dev *eth_dev, struct roc_npc_flow *flow, + struct rte_flow_error *error) { - struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); - struct roc_npc *npc = &dev->npc; + return cnxk_flow_destroy_common(eth_dev, flow, error, false); +} + +int +cnxk_flow_flush_common(struct rte_eth_dev *eth_dev, struct rte_flow_error *error, bool is_rep) +{ + struct cnxk_rep_dev *rep_dev; + struct cnxk_eth_dev *dev; + struct roc_npc *npc; int rc; + /* is_rep set for operation performed via representor ports */ + if (!is_rep) { + dev = cnxk_eth_pmd_priv(eth_dev); + npc = &dev->npc; + } else { + rep_dev = cnxk_rep_pmd_priv(eth_dev); + npc = &rep_dev->parent_dev->npc; + } + rc = roc_npc_mcam_free_all_resources(npc); if (rc) { - rte_flow_error_set(error, EIO, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "Failed to flush filter"); + rte_flow_error_set(error, EIO, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to flush filter"); return -rte_errno; } @@ -605,14 +952,21 @@ cnxk_flow_flush(struct rte_eth_dev *eth_dev, struct rte_flow_error *error) } static int -cnxk_flow_query(struct rte_eth_dev *eth_dev, struct rte_flow *flow, - const struct rte_flow_action *action, void *data, - struct rte_flow_error *error) +cnxk_flow_flush(struct rte_eth_dev *eth_dev, struct rte_flow_error *error) +{ + return cnxk_flow_flush_common(eth_dev, error, false); +} + +int +cnxk_flow_query_common(struct rte_eth_dev *eth_dev, struct rte_flow *flow, + const struct rte_flow_action *action, void *data, + struct rte_flow_error *error, bool is_rep) { struct roc_npc_flow *in_flow = (struct roc_npc_flow *)flow; - struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); - struct roc_npc *npc = &dev->npc; struct rte_flow_query_count *query = data; + struct cnxk_rep_dev *rep_dev; + struct cnxk_eth_dev *dev; + struct roc_npc *npc; const char *errmsg = NULL; int errcode = ENOTSUP; int rc; @@ -627,6 +981,15 @@ cnxk_flow_query(struct rte_eth_dev *eth_dev, struct rte_flow *flow, goto err_exit; } + /* is_rep set for operation performed via representor ports */ + if (!is_rep) { + dev = cnxk_eth_pmd_priv(eth_dev); + npc = &dev->npc; + } else { + rep_dev = cnxk_rep_pmd_priv(eth_dev); + npc = &rep_dev->parent_dev->npc; + } + if (in_flow->use_pre_alloc) rc = roc_npc_inl_mcam_read_counter(in_flow->ctr_id, &query->hits); else @@ -660,8 +1023,15 @@ cnxk_flow_query(struct rte_eth_dev *eth_dev, struct rte_flow *flow, } static int -cnxk_flow_isolate(struct rte_eth_dev *eth_dev __rte_unused, - int enable __rte_unused, struct rte_flow_error *error) +cnxk_flow_query(struct rte_eth_dev *eth_dev, struct rte_flow *flow, + const struct rte_flow_action *action, void *data, struct rte_flow_error *error) +{ + return cnxk_flow_query_common(eth_dev, flow, action, data, error, false); +} + +static int +cnxk_flow_isolate(struct rte_eth_dev *eth_dev __rte_unused, int enable __rte_unused, + struct rte_flow_error *error) { /* If we support, we need to un-install the default mcam * entry for this port. @@ -673,16 +1043,25 @@ cnxk_flow_isolate(struct rte_eth_dev *eth_dev __rte_unused, return -rte_errno; } -static int -cnxk_flow_dev_dump(struct rte_eth_dev *eth_dev, struct rte_flow *flow, - FILE *file, struct rte_flow_error *error) +int +cnxk_flow_dev_dump_common(struct rte_eth_dev *eth_dev, struct rte_flow *flow, FILE *file, + struct rte_flow_error *error, bool is_rep) { - struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); - struct roc_npc *npc = &dev->npc; + struct cnxk_rep_dev *rep_dev; + struct cnxk_eth_dev *dev; + struct roc_npc *npc; + + /* is_rep set for operation performed via representor ports */ + if (!is_rep) { + dev = cnxk_eth_pmd_priv(eth_dev); + npc = &dev->npc; + } else { + rep_dev = cnxk_rep_pmd_priv(eth_dev); + npc = &rep_dev->parent_dev->npc; + } if (file == NULL) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "Invalid file"); return -rte_errno; } @@ -701,8 +1080,15 @@ cnxk_flow_dev_dump(struct rte_eth_dev *eth_dev, struct rte_flow *flow, } static int -cnxk_flow_get_aged_flows(struct rte_eth_dev *eth_dev, void **context, - uint32_t nb_contexts, struct rte_flow_error *err) +cnxk_flow_dev_dump(struct rte_eth_dev *eth_dev, struct rte_flow *flow, FILE *file, + struct rte_flow_error *error) +{ + return cnxk_flow_dev_dump_common(eth_dev, flow, file, error, false); +} + +static int +cnxk_flow_get_aged_flows(struct rte_eth_dev *eth_dev, void **context, uint32_t nb_contexts, + struct rte_flow_error *err) { struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); struct roc_npc *roc_npc = &dev->npc; diff --git a/drivers/net/cnxk/cnxk_flow.h b/drivers/net/cnxk/cnxk_flow.h index bb23629819..226694fbed 100644 --- a/drivers/net/cnxk/cnxk_flow.h +++ b/drivers/net/cnxk/cnxk_flow.h @@ -24,4 +24,22 @@ struct roc_npc_flow *cnxk_flow_create(struct rte_eth_dev *dev, int cnxk_flow_destroy(struct rte_eth_dev *dev, struct roc_npc_flow *flow, struct rte_flow_error *error); +struct roc_npc_flow *cnxk_flow_create_common(struct rte_eth_dev *eth_dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, bool is_rep); +int cnxk_flow_validate_common(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], struct rte_flow_error *error, + bool is_rep); +int cnxk_flow_destroy_common(struct rte_eth_dev *eth_dev, struct roc_npc_flow *flow, + struct rte_flow_error *error, bool is_rep); +int cnxk_flow_flush_common(struct rte_eth_dev *eth_dev, struct rte_flow_error *error, bool is_rep); +int cnxk_flow_query_common(struct rte_eth_dev *eth_dev, struct rte_flow *flow, + const struct rte_flow_action *action, void *data, + struct rte_flow_error *error, bool is_rep); +int cnxk_flow_dev_dump_common(struct rte_eth_dev *eth_dev, struct rte_flow *flow, FILE *file, + struct rte_flow_error *error, bool is_rep); + #endif /* __CNXK_RTE_FLOW_H__ */ From patchwork Sun Mar 3 17:38:32 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Harman Kalra X-Patchwork-Id: 137836 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id DB83D43C01; Sun, 3 Mar 2024 18:41:29 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id B1846436BA; Sun, 3 Mar 2024 18:39:50 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id 6B2A643659 for ; Sun, 3 Mar 2024 18:39:48 +0100 (CET) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id 423DOJuP029127 for ; Sun, 3 Mar 2024 09:39:47 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h= from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-type; s=pfpt0220; bh=JkvYz5mJUpV/dl84WmRmm UX3trXWjrJNJP9L+1VO0Eo=; b=h5sO5IqOJpMI8mZ69FV/3R/emgvT1LQYVgrPQ mfKmKzQmBetDQN1SZG2bnuMXeZAZW5QHF6gIqNsgkD+c1gISlNOa38sId5hlDoaR 5EIxm2NRw/RuZyv3Q85Jt4Mh1weCLg/YVUPNbGFx5LMjAPxaP/q4gbMhvfOAgyxE uexGzRNd0Ot2+2hjrfmtHzmEa8HCf1svYspOE50ew1BiL1WduD8+9fnhv2zH4Ryn wOSxIDGSJnFZkS5mwCuj7AOnWVvwruz48jrT3/dS1oaqKKIerk/PqXk8z/SdErv7 ApNcSiKxJ9IRuddh67u6im7WxEFb4JjEeFYfkqEexM399ZfmQ== Received: from dc5-exch05.marvell.com ([199.233.59.128]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3wm2bptrvw-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT) for ; Sun, 03 Mar 2024 09:39:47 -0800 (PST) Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH05.marvell.com (10.69.176.209) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id 15.2.1258.12; Sun, 3 Mar 2024 09:39:46 -0800 Received: from DC5-EXCH05.marvell.com (10.69.176.209) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.48; Sun, 3 Mar 2024 09:39:44 -0800 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH05.marvell.com (10.69.176.209) with Microsoft SMTP Server id 15.2.1258.12 via Frontend Transport; Sun, 3 Mar 2024 09:39:44 -0800 Received: from localhost.localdomain (unknown [10.29.52.211]) by maili.marvell.com (Postfix) with ESMTP id 12D2B3F71B0; Sun, 3 Mar 2024 09:39:41 -0800 (PST) From: Harman Kalra To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao , Harman Kalra CC: Subject: [PATCH v6 22/23] net/cnxk: flow create on representor ports Date: Sun, 3 Mar 2024 23:08:32 +0530 Message-ID: <20240303173833.100039-23-hkalra@marvell.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20240303173833.100039-1-hkalra@marvell.com> References: <20230811163419.165790-1-hkalra@marvell.com> <20240303173833.100039-1-hkalra@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: jXGx1PyKp-Aups5ZJ3hBNkzsD3kG-n4Z X-Proofpoint-ORIG-GUID: jXGx1PyKp-Aups5ZJ3hBNkzsD3kG-n4Z X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.1011,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2024-03-03_08,2024-03-01_03,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Implementing base infra for handling flow operations performed on representor ports, where these representor ports may be representing native representees or part of companian apps. Also added support for handling flow create operation Signed-off-by: Harman Kalra --- doc/guides/nics/features/cnxk.ini | 1 + doc/guides/nics/features/cnxk_vec.ini | 1 + doc/guides/nics/features/cnxk_vf.ini | 1 + drivers/net/cnxk/cnxk_flow.h | 9 +- drivers/net/cnxk/cnxk_rep.h | 3 + drivers/net/cnxk/cnxk_rep_flow.c | 401 ++++++++++++++++++++++++++ drivers/net/cnxk/cnxk_rep_msg.h | 27 ++ drivers/net/cnxk/cnxk_rep_ops.c | 3 +- drivers/net/cnxk/meson.build | 1 + 9 files changed, 444 insertions(+), 3 deletions(-) create mode 100644 drivers/net/cnxk/cnxk_rep_flow.c diff --git a/doc/guides/nics/features/cnxk.ini b/doc/guides/nics/features/cnxk.ini index 7d85fb9f93..b5d9f7e579 100644 --- a/doc/guides/nics/features/cnxk.ini +++ b/doc/guides/nics/features/cnxk.ini @@ -105,3 +105,4 @@ security = Y skip_cman = Y vf = Y vxlan_decap = I +vxlan_encap = I diff --git a/doc/guides/nics/features/cnxk_vec.ini b/doc/guides/nics/features/cnxk_vec.ini index d925933b34..92a486664f 100644 --- a/doc/guides/nics/features/cnxk_vec.ini +++ b/doc/guides/nics/features/cnxk_vec.ini @@ -94,3 +94,4 @@ rss = Y security = Y vf = Y vxlan_decap = I +vxlan_encap = I diff --git a/doc/guides/nics/features/cnxk_vf.ini b/doc/guides/nics/features/cnxk_vf.ini index 0da0106fa3..a55f0e7ce5 100644 --- a/doc/guides/nics/features/cnxk_vf.ini +++ b/doc/guides/nics/features/cnxk_vf.ini @@ -94,3 +94,4 @@ security = Y skip_cman = Y vf = Y vxlan_decap = I +vxlan_encap = I diff --git a/drivers/net/cnxk/cnxk_flow.h b/drivers/net/cnxk/cnxk_flow.h index 226694fbed..e51d04b2c9 100644 --- a/drivers/net/cnxk/cnxk_flow.h +++ b/drivers/net/cnxk/cnxk_flow.h @@ -16,8 +16,13 @@ struct cnxk_rte_flow_term_info { uint16_t item_size; }; -struct roc_npc_flow *cnxk_flow_create(struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, +struct cnxk_rte_flow_action_info { + uint16_t conf_size; +}; + +extern const struct cnxk_rte_flow_term_info term[]; + +struct roc_npc_flow *cnxk_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], struct rte_flow_error *error); diff --git a/drivers/net/cnxk/cnxk_rep.h b/drivers/net/cnxk/cnxk_rep.h index ab8b3fb152..9bdea47bd4 100644 --- a/drivers/net/cnxk/cnxk_rep.h +++ b/drivers/net/cnxk/cnxk_rep.h @@ -20,6 +20,9 @@ /* Common ethdev ops */ extern struct eth_dev_ops cnxk_rep_dev_ops; +/* Flow ops for representor ports */ +extern struct rte_flow_ops cnxk_rep_flow_ops; + struct cnxk_rep_queue_stats { uint64_t pkts; uint64_t bytes; diff --git a/drivers/net/cnxk/cnxk_rep_flow.c b/drivers/net/cnxk/cnxk_rep_flow.c new file mode 100644 index 0000000000..2613be5b9e --- /dev/null +++ b/drivers/net/cnxk/cnxk_rep_flow.c @@ -0,0 +1,401 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2024 Marvell. + */ + +#include +#include + +#include +#include +#include + +#define DEFAULT_DUMP_FILE_NAME "/tmp/fdump" +#define MAX_BUFFER_SIZE 1500 + +const struct cnxk_rte_flow_action_info action_info[] = { + [RTE_FLOW_ACTION_TYPE_MARK] = {sizeof(struct rte_flow_action_mark)}, + [RTE_FLOW_ACTION_TYPE_VF] = {sizeof(struct rte_flow_action_vf)}, + [RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT] = {sizeof(struct rte_flow_action_port_id)}, + [RTE_FLOW_ACTION_TYPE_PORT_ID] = {sizeof(struct rte_flow_action_port_id)}, + [RTE_FLOW_ACTION_TYPE_QUEUE] = {sizeof(struct rte_flow_action_queue)}, + [RTE_FLOW_ACTION_TYPE_RSS] = {sizeof(struct rte_flow_action_rss)}, + [RTE_FLOW_ACTION_TYPE_SECURITY] = {sizeof(struct rte_flow_action_security)}, + [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] = {sizeof(struct rte_flow_action_of_set_vlan_vid)}, + [RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = {sizeof(struct rte_flow_action_of_push_vlan)}, + [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] = {sizeof(struct rte_flow_action_of_set_vlan_pcp)}, + [RTE_FLOW_ACTION_TYPE_METER] = {sizeof(struct rte_flow_action_meter)}, + [RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = {sizeof(struct rte_flow_action_vxlan_encap)}, + [RTE_FLOW_ACTION_TYPE_COUNT] = {sizeof(struct rte_flow_action_count)}, +}; + +static void +cnxk_flow_params_count(const struct rte_flow_item pattern[], const struct rte_flow_action actions[], + uint16_t *n_pattern, uint16_t *n_action) +{ + int i = 0; + + for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) + i++; + + *n_pattern = ++i; + plt_rep_dbg("Total patterns is %d", *n_pattern); + + i = 0; + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) + i++; + *n_action = ++i; + plt_rep_dbg("Total actions is %d", *n_action); +} + +static void +populate_attr_data(void *buffer, uint32_t *length, const struct rte_flow_attr *attr) +{ + uint32_t sz = sizeof(struct rte_flow_attr); + uint32_t len; + + cnxk_rep_msg_populate_type(buffer, length, CNXK_TYPE_ATTR, sz); + + len = *length; + /* Populate the attribute data */ + rte_memcpy(RTE_PTR_ADD(buffer, len), attr, sz); + len += sz; + + *length = len; +} + +static uint16_t +prepare_pattern_data(const struct rte_flow_item *pattern, uint16_t nb_pattern, + uint64_t *pattern_data) +{ + cnxk_pattern_hdr_t hdr; + uint16_t len = 0; + int i = 0; + + for (i = 0; i < nb_pattern; i++) { + /* Populate the pattern type hdr */ + memset(&hdr, 0, sizeof(cnxk_pattern_hdr_t)); + hdr.type = pattern->type; + if (pattern->spec) { + hdr.spec_sz = term[pattern->type].item_size; + hdr.last_sz = 0; + hdr.mask_sz = term[pattern->type].item_size; + } + + rte_memcpy(RTE_PTR_ADD(pattern_data, len), &hdr, sizeof(cnxk_pattern_hdr_t)); + len += sizeof(cnxk_pattern_hdr_t); + + /* Copy pattern spec data */ + if (pattern->spec) { + rte_memcpy(RTE_PTR_ADD(pattern_data, len), pattern->spec, + term[pattern->type].item_size); + len += term[pattern->type].item_size; + } + + /* Copy pattern last data */ + if (pattern->last) { + rte_memcpy(RTE_PTR_ADD(pattern_data, len), pattern->last, + term[pattern->type].item_size); + len += term[pattern->type].item_size; + } + + /* Copy pattern mask data */ + if (pattern->mask) { + rte_memcpy(RTE_PTR_ADD(pattern_data, len), pattern->mask, + term[pattern->type].item_size); + len += term[pattern->type].item_size; + } + pattern++; + } + + return len; +} + +static void +populate_pattern_data(void *buffer, uint32_t *length, const struct rte_flow_item *pattern, + uint16_t nb_pattern) +{ + uint64_t pattern_data[BUFSIZ]; + uint32_t len; + uint32_t sz; + + memset(pattern_data, 0, BUFSIZ * sizeof(uint64_t)); + /* Prepare pattern_data */ + sz = prepare_pattern_data(pattern, nb_pattern, pattern_data); + + cnxk_rep_msg_populate_type(buffer, length, CNXK_TYPE_PATTERN, sz); + + len = *length; + /* Populate the pattern data */ + rte_memcpy(RTE_PTR_ADD(buffer, len), pattern_data, sz); + len += sz; + + *length = len; +} + +static uint16_t +populate_rss_action_conf(const struct rte_flow_action_rss *conf, void *rss_action_conf) +{ + int len, sz; + + len = sizeof(struct rte_flow_action_rss) - sizeof(conf->key) - sizeof(conf->queue); + + if (rss_action_conf) + rte_memcpy(rss_action_conf, conf, len); + + if (conf->key) { + sz = conf->key_len; + if (rss_action_conf) + rte_memcpy(RTE_PTR_ADD(rss_action_conf, len), conf->key, sz); + len += sz; + } + + if (conf->queue) { + sz = conf->queue_num * sizeof(conf->queue); + if (rss_action_conf) + rte_memcpy(RTE_PTR_ADD(rss_action_conf, len), conf->queue, sz); + len += sz; + } + + return len; +} + +static uint16_t +populate_vxlan_encap_action_conf(const struct rte_flow_action_vxlan_encap *vxlan_conf, + void *vxlan_encap_action_data) +{ + const struct rte_flow_item *pattern; + uint64_t nb_patterns = 0; + uint16_t len, sz; + + pattern = vxlan_conf->definition; + for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) + nb_patterns++; + + len = sizeof(uint64_t); + rte_memcpy(vxlan_encap_action_data, &nb_patterns, len); + pattern = vxlan_conf->definition; + /* Prepare pattern_data */ + sz = prepare_pattern_data(pattern, nb_patterns, RTE_PTR_ADD(vxlan_encap_action_data, len)); + + len += sz; + if (len > BUFSIZ) { + plt_err("Incomplete item definition loaded, len %d", len); + return 0; + } + + return len; +} + +static uint16_t +prepare_action_data(const struct rte_flow_action *action, uint16_t nb_action, uint64_t *action_data) +{ + void *action_conf_data = NULL; + cnxk_action_hdr_t hdr; + uint16_t len = 0, sz = 0; + int i = 0; + + for (i = 0; i < nb_action; i++) { + if (action->conf) { + switch (action->type) { + case RTE_FLOW_ACTION_TYPE_RSS: + sz = populate_rss_action_conf(action->conf, NULL); + action_conf_data = plt_zmalloc(sz, 0); + if (populate_rss_action_conf(action->conf, action_conf_data) != + sz) { + plt_err("Populating RSS action config failed"); + return 0; + } + break; + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + action_conf_data = plt_zmalloc(BUFSIZ, 0); + sz = populate_vxlan_encap_action_conf(action->conf, + action_conf_data); + if (!sz) { + plt_err("Populating vxlan action config failed"); + return 0; + } + break; + default: + sz = action_info[action->type].conf_size; + action_conf_data = plt_zmalloc(sz, 0); + rte_memcpy(action_conf_data, action->conf, sz); + break; + }; + } + + /* Populate the action type hdr */ + memset(&hdr, 0, sizeof(cnxk_action_hdr_t)); + hdr.type = action->type; + hdr.conf_sz = sz; + + rte_memcpy(RTE_PTR_ADD(action_data, len), &hdr, sizeof(cnxk_action_hdr_t)); + len += sizeof(cnxk_action_hdr_t); + + /* Copy action conf data */ + if (action_conf_data) { + rte_memcpy(RTE_PTR_ADD(action_data, len), action_conf_data, sz); + len += sz; + plt_free(action_conf_data); + action_conf_data = NULL; + } + + action++; + } + + return len; +} + +static void +populate_action_data(void *buffer, uint32_t *length, const struct rte_flow_action *action, + uint16_t nb_action) +{ + uint64_t action_data[BUFSIZ]; + uint32_t len; + uint32_t sz; + + memset(action_data, 0, BUFSIZ * sizeof(uint64_t)); + /* Prepare action_data */ + sz = prepare_action_data(action, nb_action, action_data); + + cnxk_rep_msg_populate_type(buffer, length, CNXK_TYPE_ACTION, sz); + + len = *length; + /* Populate the action data */ + rte_memcpy(RTE_PTR_ADD(buffer, len), action_data, sz); + len += sz; + + *length = len; +} + +static int +process_flow_rule(struct cnxk_rep_dev *rep_dev, const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], const struct rte_flow_action actions[], + cnxk_rep_msg_ack_data_t *adata, cnxk_rep_msg_t msg) +{ + cnxk_rep_msg_flow_create_meta_t msg_fc_meta; + uint16_t n_pattern, n_action; + uint32_t len = 0, rc = 0; + void *buffer; + size_t size; + + size = MAX_BUFFER_SIZE; + buffer = plt_zmalloc(size, 0); + if (!buffer) { + plt_err("Failed to allocate mem"); + rc = -ENOMEM; + goto fail; + } + + /* Get no of actions and patterns */ + cnxk_flow_params_count(pattern, actions, &n_pattern, &n_action); + + /* Adding the header */ + cnxk_rep_msg_populate_header(buffer, &len); + + /* Representor port identified as rep_xport queue */ + msg_fc_meta.portid = rep_dev->rep_id; + msg_fc_meta.nb_pattern = n_pattern; + msg_fc_meta.nb_action = n_action; + + cnxk_rep_msg_populate_command_meta(buffer, &len, &msg_fc_meta, + sizeof(cnxk_rep_msg_flow_create_meta_t), msg); + + /* Populate flow create parameters data */ + populate_attr_data(buffer, &len, attr); + populate_pattern_data(buffer, &len, pattern, n_pattern); + populate_action_data(buffer, &len, actions, n_action); + + cnxk_rep_msg_populate_msg_end(buffer, &len); + + rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, adata); + if (rc) { + plt_err("Failed to process the message, err %d", rc); + goto fail; + } + + return 0; +fail: + return rc; +} + +static struct rte_flow * +cnxk_rep_flow_create_native(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], struct rte_flow_error *error) +{ + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev); + struct roc_npc_flow *flow; + uint16_t new_entry; + int rc; + + flow = cnxk_flow_create_common(eth_dev, attr, pattern, actions, error, true); + if (!flow) { + plt_err("Fail to create flow"); + goto fail; + } + + /* Shifting the rules with higher priority than exception path rules */ + new_entry = (uint16_t)flow->mcam_id; + rc = cnxk_eswitch_flow_rule_shift(rep_dev->hw_func, &new_entry); + if (rc) { + plt_err("Failed to shift the flow rule entry, err %d", rc); + goto fail; + } + + flow->mcam_id = new_entry; + + return (struct rte_flow *)flow; +fail: + return NULL; +} + +static struct rte_flow * +cnxk_rep_flow_create(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev); + struct rte_flow *flow = NULL; + cnxk_rep_msg_ack_data_t adata; + int rc = 0; + + /* If representor not representing any active VF, return 0 */ + if (!rep_dev->is_vf_active) { + rte_flow_error_set(error, -EAGAIN, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Represented VF not active yet"); + return 0; + } + + if (rep_dev->native_repte) + return cnxk_rep_flow_create_native(eth_dev, attr, pattern, actions, error); + + rc = process_flow_rule(rep_dev, attr, pattern, actions, &adata, CNXK_REP_MSG_FLOW_CREATE); + if (!rc || adata.u.sval < 0) { + if (adata.u.sval < 0) { + rc = (int)adata.u.sval; + rte_flow_error_set(error, adata.u.sval, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "Failed to validate flow"); + goto fail; + } + + flow = adata.u.data; + if (!flow) { + rte_flow_error_set(error, adata.u.sval, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "Failed to create flow"); + goto fail; + } + } else { + rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to create flow"); + goto fail; + } + plt_rep_dbg("Flow %p created successfully", adata.u.data); + + return flow; +fail: + return NULL; +} + +struct rte_flow_ops cnxk_rep_flow_ops = { + .create = cnxk_rep_flow_create, +}; diff --git a/drivers/net/cnxk/cnxk_rep_msg.h b/drivers/net/cnxk/cnxk_rep_msg.h index 277e25d92a..d27a234e48 100644 --- a/drivers/net/cnxk/cnxk_rep_msg.h +++ b/drivers/net/cnxk/cnxk_rep_msg.h @@ -12,6 +12,10 @@ typedef enum CNXK_TYPE { CNXK_TYPE_HEADER = 0, CNXK_TYPE_MSG, + CNXK_TYPE_ATTR, + CNXK_TYPE_PATTERN, + CNXK_TYPE_ACTION, + CNXK_TYPE_FLOW } cnxk_type_t; typedef enum CNXK_REP_MSG { @@ -23,6 +27,8 @@ typedef enum CNXK_REP_MSG { CNXK_REP_MSG_ETH_SET_MAC, CNXK_REP_MSG_ETH_STATS_GET, CNXK_REP_MSG_ETH_STATS_CLEAR, + /* Flow operation msgs */ + CNXK_REP_MSG_FLOW_CREATE, /* End of messaging sequence */ CNXK_REP_MSG_END, } cnxk_rep_msg_t; @@ -96,6 +102,27 @@ typedef struct cnxk_rep_msg_eth_stats_meta { uint16_t portid; } __rte_packed cnxk_rep_msg_eth_stats_meta_t; +/* Flow create msg meta */ +typedef struct cnxk_rep_msg_flow_create_meta { + uint16_t portid; + uint16_t nb_pattern; + uint16_t nb_action; +} __rte_packed cnxk_rep_msg_flow_create_meta_t; + +/* Type pattern meta */ +typedef struct cnxk_pattern_hdr { + uint16_t type; + uint16_t spec_sz; + uint16_t last_sz; + uint16_t mask_sz; +} __rte_packed cnxk_pattern_hdr_t; + +/* Type action meta */ +typedef struct cnxk_action_hdr { + uint16_t type; + uint16_t conf_sz; +} __rte_packed cnxk_action_hdr_t; + void cnxk_rep_msg_populate_command(void *buffer, uint32_t *length, cnxk_rep_msg_t type, uint32_t size); void cnxk_rep_msg_populate_command_meta(void *buffer, uint32_t *length, void *msg_meta, uint32_t sz, diff --git a/drivers/net/cnxk/cnxk_rep_ops.c b/drivers/net/cnxk/cnxk_rep_ops.c index 0ba4d55398..8bcb689468 100644 --- a/drivers/net/cnxk/cnxk_rep_ops.c +++ b/drivers/net/cnxk/cnxk_rep_ops.c @@ -647,7 +647,8 @@ int cnxk_rep_flow_ops_get(struct rte_eth_dev *ethdev, const struct rte_flow_ops **ops) { PLT_SET_USED(ethdev); - PLT_SET_USED(ops); + *ops = &cnxk_rep_flow_ops; + return 0; } diff --git a/drivers/net/cnxk/meson.build b/drivers/net/cnxk/meson.build index 9ca7732713..8cc06f4967 100644 --- a/drivers/net/cnxk/meson.build +++ b/drivers/net/cnxk/meson.build @@ -39,6 +39,7 @@ sources = files( 'cnxk_rep.c', 'cnxk_rep_msg.c', 'cnxk_rep_ops.c', + 'cnxk_rep_flow.c', 'cnxk_stats.c', 'cnxk_tm.c', ) From patchwork Sun Mar 3 17:38:33 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Harman Kalra X-Patchwork-Id: 137837 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id F1D9143C01; Sun, 3 Mar 2024 18:41:35 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id DB2F3436BE; Sun, 3 Mar 2024 18:39:51 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id D991D406BA for ; Sun, 3 Mar 2024 18:39:48 +0100 (CET) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.24/8.17.1.24) with ESMTP id 423DOJuQ029127 for ; Sun, 3 Mar 2024 09:39:48 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h= from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-type; s=pfpt0220; bh=odBhNz2FuZMmxsyaO7FM+ kMebXAwMZyXf0uspcbDb2Q=; b=GAgLcjlG0RXwnS5arpzBMl18EOo6HohX0GZil pI17qBlvzFDwZ3aVVweVKR+weJvy3N9vbgP5Lli7G4TOhRUe74LncOP6WzHiRTI7 esE7YLYiv13l3m4gyym/EumnUX1QS0X/sj+j6ovy4B185od9ZAzD8yxqHx2m06+q BPpH9Kp6PLONUxZJ1loR5BxFIW2zfsh0aoJ4m4J9g9D/hPvDiPUhbfpThpmSShlv AtTDnhm+aVKyfTzVU3iKsBsTUt/SizIWOAqZ8kr5bv3qCU3MDVRQmwyCgrfo2fnB bD0p5ZXYKhca0CRGRr2S62iVjKz8gaYcIOKkvsxqSH5JcZflA== Received: from dc5-exch05.marvell.com ([199.233.59.128]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3wm2bptrvw-2 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT) for ; Sun, 03 Mar 2024 09:39:47 -0800 (PST) Received: from DC5-EXCH05.marvell.com (10.69.176.209) by DC5-EXCH05.marvell.com (10.69.176.209) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1258.12; Sun, 3 Mar 2024 09:39:47 -0800 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH05.marvell.com (10.69.176.209) with Microsoft SMTP Server id 15.2.1258.12 via Frontend Transport; Sun, 3 Mar 2024 09:39:47 -0800 Received: from localhost.localdomain (unknown [10.29.52.211]) by maili.marvell.com (Postfix) with ESMTP id EE9643F71A8; Sun, 3 Mar 2024 09:39:44 -0800 (PST) From: Harman Kalra To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao , Harman Kalra CC: Subject: [PATCH v6 23/23] net/cnxk: other flow operations Date: Sun, 3 Mar 2024 23:08:33 +0530 Message-ID: <20240303173833.100039-24-hkalra@marvell.com> X-Mailer: git-send-email 2.18.0 In-Reply-To: <20240303173833.100039-1-hkalra@marvell.com> References: <20230811163419.165790-1-hkalra@marvell.com> <20240303173833.100039-1-hkalra@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: WEY9zwpWK7WHJdZmt9tJjIsAzhjAj6ub X-Proofpoint-ORIG-GUID: WEY9zwpWK7WHJdZmt9tJjIsAzhjAj6ub X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.1011,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2024-03-03_08,2024-03-01_03,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Implementing other flow operations - validate, destroy, query, flush, dump for representor ports Signed-off-by: Harman Kalra --- doc/guides/rel_notes/release_24_03.rst | 1 + drivers/net/cnxk/cnxk_rep_flow.c | 414 +++++++++++++++++++++++++ drivers/net/cnxk/cnxk_rep_msg.h | 32 ++ 3 files changed, 447 insertions(+) diff --git a/doc/guides/rel_notes/release_24_03.rst b/doc/guides/rel_notes/release_24_03.rst index 39ffef11b0..2b160cfd0f 100644 --- a/doc/guides/rel_notes/release_24_03.rst +++ b/doc/guides/rel_notes/release_24_03.rst @@ -112,6 +112,7 @@ New Features * Added support for ``RTE_FLOW_ACTION_TYPE_SAMPLE`` flow item. * Added support for Rx inject. * Optimized SW external mbuf free for better performance and avoid SQ corruption. + * Added support for port representors. * **Updated Marvell OCTEON EP driver.** diff --git a/drivers/net/cnxk/cnxk_rep_flow.c b/drivers/net/cnxk/cnxk_rep_flow.c index 2613be5b9e..d26f5aa12c 100644 --- a/drivers/net/cnxk/cnxk_rep_flow.c +++ b/drivers/net/cnxk/cnxk_rep_flow.c @@ -267,6 +267,222 @@ populate_action_data(void *buffer, uint32_t *length, const struct rte_flow_actio *length = len; } +static int +process_flow_destroy(struct cnxk_rep_dev *rep_dev, void *flow, cnxk_rep_msg_ack_data_t *adata) +{ + cnxk_rep_msg_flow_destroy_meta_t msg_fd_meta; + uint32_t len = 0, rc; + void *buffer; + size_t size; + + /* If representor not representing any active VF, return 0 */ + if (!rep_dev->is_vf_active) + return 0; + + size = MAX_BUFFER_SIZE; + buffer = plt_zmalloc(size, 0); + if (!buffer) { + plt_err("Failed to allocate mem"); + rc = -ENOMEM; + goto fail; + } + + cnxk_rep_msg_populate_header(buffer, &len); + + msg_fd_meta.portid = rep_dev->rep_id; + msg_fd_meta.flow = (uint64_t)flow; + plt_rep_dbg("Flow Destroy: flow 0x%" PRIu64 ", portid %d", msg_fd_meta.flow, + msg_fd_meta.portid); + cnxk_rep_msg_populate_command_meta(buffer, &len, &msg_fd_meta, + sizeof(cnxk_rep_msg_flow_destroy_meta_t), + CNXK_REP_MSG_FLOW_DESTROY); + cnxk_rep_msg_populate_msg_end(buffer, &len); + + rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, adata); + if (rc) { + plt_err("Failed to process the message, err %d", rc); + goto fail; + } + + return 0; +fail: + return rc; +} + +static int +copy_flow_dump_file(FILE *target) +{ + FILE *source = NULL; + int pos; + char ch; + + source = fopen(DEFAULT_DUMP_FILE_NAME, "r"); + if (source == NULL) { + plt_err("Failed to read default dump file: %s, err %d", DEFAULT_DUMP_FILE_NAME, + errno); + return errno; + } + + fseek(source, 0L, SEEK_END); + pos = ftell(source); + fseek(source, 0L, SEEK_SET); + while (pos--) { + ch = fgetc(source); + fputc(ch, target); + } + + fclose(source); + + /* Remove the default file after reading */ + remove(DEFAULT_DUMP_FILE_NAME); + + return 0; +} + +static int +process_flow_dump(struct cnxk_rep_dev *rep_dev, struct rte_flow *flow, FILE *file, + cnxk_rep_msg_ack_data_t *adata) +{ + cnxk_rep_msg_flow_dump_meta_t msg_fp_meta; + uint32_t len = 0, rc; + void *buffer; + size_t size; + + size = MAX_BUFFER_SIZE; + buffer = plt_zmalloc(size, 0); + if (!buffer) { + plt_err("Failed to allocate mem"); + rc = -ENOMEM; + goto fail; + } + + cnxk_rep_msg_populate_header(buffer, &len); + + msg_fp_meta.portid = rep_dev->rep_id; + msg_fp_meta.flow = (uint64_t)flow; + msg_fp_meta.is_stdout = (file == stdout) ? 1 : 0; + + plt_rep_dbg("Flow Dump: flow 0x%" PRIu64 ", portid %d stdout %d", msg_fp_meta.flow, + msg_fp_meta.portid, msg_fp_meta.is_stdout); + cnxk_rep_msg_populate_command_meta(buffer, &len, &msg_fp_meta, + sizeof(cnxk_rep_msg_flow_dump_meta_t), + CNXK_REP_MSG_FLOW_DUMP); + cnxk_rep_msg_populate_msg_end(buffer, &len); + + rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, adata); + if (rc) { + plt_err("Failed to process the message, err %d", rc); + goto fail; + } + + /* Copy contents from default file to user file */ + if (file != stdout) + copy_flow_dump_file(file); + + return 0; +fail: + return rc; +} + +static int +process_flow_flush(struct cnxk_rep_dev *rep_dev, cnxk_rep_msg_ack_data_t *adata) +{ + cnxk_rep_msg_flow_flush_meta_t msg_ff_meta; + uint32_t len = 0, rc; + void *buffer; + size_t size; + + size = MAX_BUFFER_SIZE; + buffer = plt_zmalloc(size, 0); + if (!buffer) { + plt_err("Failed to allocate mem"); + rc = -ENOMEM; + goto fail; + } + + cnxk_rep_msg_populate_header(buffer, &len); + + msg_ff_meta.portid = rep_dev->rep_id; + plt_rep_dbg("Flow Flush: portid %d", msg_ff_meta.portid); + cnxk_rep_msg_populate_command_meta(buffer, &len, &msg_ff_meta, + sizeof(cnxk_rep_msg_flow_flush_meta_t), + CNXK_REP_MSG_FLOW_FLUSH); + cnxk_rep_msg_populate_msg_end(buffer, &len); + + rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, adata); + if (rc) { + plt_err("Failed to process the message, err %d", rc); + goto fail; + } + + return 0; +fail: + return rc; +} + +static int +process_flow_query(struct cnxk_rep_dev *rep_dev, struct rte_flow *flow, + const struct rte_flow_action *action, void *data, cnxk_rep_msg_ack_data_t *adata) +{ + cnxk_rep_msg_flow_query_meta_t *msg_fq_meta; + struct rte_flow_query_count *query = data; + uint32_t len = 0, rc, sz, total_sz; + uint64_t action_data[BUFSIZ]; + void *buffer; + size_t size; + + size = MAX_BUFFER_SIZE; + buffer = plt_zmalloc(size, 0); + if (!buffer) { + plt_err("Failed to allocate mem"); + rc = -ENOMEM; + goto fail; + } + + cnxk_rep_msg_populate_header(buffer, &len); + + memset(action_data, 0, BUFSIZ * sizeof(uint64_t)); + sz = prepare_action_data(action, 1, action_data); + total_sz = sz + sizeof(cnxk_rep_msg_flow_query_meta_t); + + msg_fq_meta = plt_zmalloc(total_sz, 0); + if (!msg_fq_meta) { + plt_err("Failed to allocate memory"); + rc = -ENOMEM; + goto fail; + } + + msg_fq_meta->portid = rep_dev->rep_id; + msg_fq_meta->reset = query->reset; + ; + msg_fq_meta->flow = (uint64_t)flow; + /* Populate the action data */ + rte_memcpy(msg_fq_meta->action_data, action_data, sz); + msg_fq_meta->action_data_sz = sz; + + plt_rep_dbg("Flow query: flow 0x%" PRIu64 ", portid %d, action type %d total sz %d " + "action sz %d", msg_fq_meta->flow, msg_fq_meta->portid, action->type, total_sz, + sz); + cnxk_rep_msg_populate_command_meta(buffer, &len, msg_fq_meta, total_sz, + CNXK_REP_MSG_FLOW_QUERY); + cnxk_rep_msg_populate_msg_end(buffer, &len); + + rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, adata); + if (rc) { + plt_err("Failed to process the message, err %d", rc); + goto free; + } + + rte_free(msg_fq_meta); + + return 0; + +free: + rte_free(msg_fq_meta); +fail: + return rc; +} + static int process_flow_rule(struct cnxk_rep_dev *rep_dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], @@ -396,6 +612,204 @@ cnxk_rep_flow_create(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *at return NULL; } +static int +cnxk_rep_flow_validate(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev); + cnxk_rep_msg_ack_data_t adata; + int rc = 0; + + /* If representor not representing any active VF, return 0 */ + if (!rep_dev->is_vf_active) { + rte_flow_error_set(error, -EAGAIN, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Represented VF not active yet"); + return 0; + } + + if (rep_dev->native_repte) + return cnxk_flow_validate_common(eth_dev, attr, pattern, actions, error, true); + + rc = process_flow_rule(rep_dev, attr, pattern, actions, &adata, CNXK_REP_MSG_FLOW_VALIDATE); + if (!rc || adata.u.sval < 0) { + if (adata.u.sval < 0) { + rc = (int)adata.u.sval; + rte_flow_error_set(error, adata.u.sval, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "Failed to validate flow"); + goto fail; + } + } else { + rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to validate flow"); + } + + plt_rep_dbg("Flow %p validated successfully", adata.u.data); + +fail: + return rc; +} + +static int +cnxk_rep_flow_destroy(struct rte_eth_dev *eth_dev, struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev); + cnxk_rep_msg_ack_data_t adata; + int rc; + + /* If representor not representing any active VF, return 0 */ + if (!rep_dev->is_vf_active) { + rte_flow_error_set(error, -EAGAIN, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Represented VF not active yet"); + return 0; + } + + if (rep_dev->native_repte) + return cnxk_flow_destroy_common(eth_dev, (struct roc_npc_flow *)flow, error, true); + + rc = process_flow_destroy(rep_dev, flow, &adata); + if (rc || adata.u.sval < 0) { + if (adata.u.sval < 0) + rc = adata.u.sval; + + rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to destroy flow"); + goto fail; + } + + return 0; +fail: + return rc; +} + +static int +cnxk_rep_flow_query(struct rte_eth_dev *eth_dev, struct rte_flow *flow, + const struct rte_flow_action *action, void *data, struct rte_flow_error *error) +{ + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev); + cnxk_rep_msg_ack_data_t adata; + int rc; + + /* If representor not representing any active VF, return 0 */ + if (!rep_dev->is_vf_active) { + rte_flow_error_set(error, -EAGAIN, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Represented VF not active yet"); + return 0; + } + + if (action->type != RTE_FLOW_ACTION_TYPE_COUNT) { + rc = -ENOTSUP; + rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Only COUNT is supported in query"); + goto fail; + } + + if (rep_dev->native_repte) + return cnxk_flow_query_common(eth_dev, flow, action, data, error, true); + + rc = process_flow_query(rep_dev, flow, action, data, &adata); + if (rc || adata.u.sval < 0) { + if (adata.u.sval < 0) + rc = adata.u.sval; + + rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to query the flow"); + goto fail; + } + + rte_memcpy(data, adata.u.data, adata.size); + + return 0; +fail: + return rc; +} + +static int +cnxk_rep_flow_flush(struct rte_eth_dev *eth_dev, struct rte_flow_error *error) +{ + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev); + cnxk_rep_msg_ack_data_t adata; + int rc; + + /* If representor not representing any active VF, return 0 */ + if (!rep_dev->is_vf_active) { + rte_flow_error_set(error, -EAGAIN, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Represented VF not active yet"); + return 0; + } + + if (rep_dev->native_repte) + return cnxk_flow_flush_common(eth_dev, error, true); + + rc = process_flow_flush(rep_dev, &adata); + if (rc || adata.u.sval < 0) { + if (adata.u.sval < 0) + rc = adata.u.sval; + + rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to destroy flow"); + goto fail; + } + + return 0; +fail: + return rc; +} + +static int +cnxk_rep_flow_dev_dump(struct rte_eth_dev *eth_dev, struct rte_flow *flow, FILE *file, + struct rte_flow_error *error) +{ + struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev); + cnxk_rep_msg_ack_data_t adata; + int rc; + + /* If representor not representing any active VF, return 0 */ + if (!rep_dev->is_vf_active) { + rte_flow_error_set(error, -EAGAIN, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Represented VF not active yet"); + return 0; + } + + if (rep_dev->native_repte) + return cnxk_flow_dev_dump_common(eth_dev, flow, file, error, true); + + rc = process_flow_dump(rep_dev, flow, file, &adata); + if (rc || adata.u.sval < 0) { + if (adata.u.sval < 0) + rc = adata.u.sval; + + rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to destroy flow"); + goto fail; + } + + return 0; +fail: + return rc; +} + +static int +cnxk_rep_flow_isolate(struct rte_eth_dev *eth_dev __rte_unused, int enable __rte_unused, + struct rte_flow_error *error) +{ + /* If we support, we need to un-install the default mcam + * entry for this port. + */ + + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Flow isolation not supported"); + + return -rte_errno; +} + struct rte_flow_ops cnxk_rep_flow_ops = { + .validate = cnxk_rep_flow_validate, .create = cnxk_rep_flow_create, + .destroy = cnxk_rep_flow_destroy, + .query = cnxk_rep_flow_query, + .flush = cnxk_rep_flow_flush, + .isolate = cnxk_rep_flow_isolate, + .dev_dump = cnxk_rep_flow_dev_dump, }; diff --git a/drivers/net/cnxk/cnxk_rep_msg.h b/drivers/net/cnxk/cnxk_rep_msg.h index d27a234e48..bfd9ce9c7b 100644 --- a/drivers/net/cnxk/cnxk_rep_msg.h +++ b/drivers/net/cnxk/cnxk_rep_msg.h @@ -29,6 +29,11 @@ typedef enum CNXK_REP_MSG { CNXK_REP_MSG_ETH_STATS_CLEAR, /* Flow operation msgs */ CNXK_REP_MSG_FLOW_CREATE, + CNXK_REP_MSG_FLOW_DESTROY, + CNXK_REP_MSG_FLOW_VALIDATE, + CNXK_REP_MSG_FLOW_FLUSH, + CNXK_REP_MSG_FLOW_DUMP, + CNXK_REP_MSG_FLOW_QUERY, /* End of messaging sequence */ CNXK_REP_MSG_END, } cnxk_rep_msg_t; @@ -109,6 +114,33 @@ typedef struct cnxk_rep_msg_flow_create_meta { uint16_t nb_action; } __rte_packed cnxk_rep_msg_flow_create_meta_t; +/* Flow destroy msg meta */ +typedef struct cnxk_rep_msg_flow_destroy_meta { + uint64_t flow; + uint16_t portid; +} __rte_packed cnxk_rep_msg_flow_destroy_meta_t; + +/* Flow flush msg meta */ +typedef struct cnxk_rep_msg_flow_flush_meta { + uint16_t portid; +} __rte_packed cnxk_rep_msg_flow_flush_meta_t; + +/* Flow dump msg meta */ +typedef struct cnxk_rep_msg_flow_dump_meta { + uint64_t flow; + uint16_t portid; + uint8_t is_stdout; +} __rte_packed cnxk_rep_msg_flow_dump_meta_t; + +/* Flow query msg meta */ +typedef struct cnxk_rep_msg_flow_query_meta { + uint64_t flow; + uint16_t portid; + uint8_t reset; + uint32_t action_data_sz; + uint8_t action_data[]; +} __rte_packed cnxk_rep_msg_flow_query_meta_t; + /* Type pattern meta */ typedef struct cnxk_pattern_hdr { uint16_t type;