From patchwork Thu Jun 16 07:07:39 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Nithin Dabilpuram X-Patchwork-Id: 112843 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 080C7A00C3; Thu, 16 Jun 2022 09:10:31 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 681D242BFF; Thu, 16 Jun 2022 09:10:14 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id D278A42BF1 for ; Thu, 16 Jun 2022 09:10:10 +0200 (CEST) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.17.1.5/8.17.1.5) with ESMTP id 25G2AMm3013083 for ; Thu, 16 Jun 2022 00:10:10 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-type; s=pfpt0220; bh=FEcRracprF4exZpT4zj0cFqptmsby+O2HlJ4DphCcZE=; b=FyBICcXfMfmtslttwRnrD68qDfdkVbhpapQDps8PZbkp75DxGSnufqPJt5dr+nfHpjXH rVcnpPXmOcJiBZjTUguOwJKstgkAXE39C5GIiGpoqIgCZAB19rdmG0FHgHOjieLEiPTa p/aPIFLL3tWesXx+xSljr/ihwSn7tiX7H6ptXtbyOQ5ljgGCi3/DkW2WhAuvNsYI9XFm SltjfA4p2H9TNNz9r5cB5B94hp0Lnl+DL9ccKfGS4utV4V5k5P4RMVKwmsr6EH1C1+hb i329Z3FK6KZOdCFos2CL/aWC2p5vSVpvgV2aP8wo6ijoo+TuxKnB2Wf7Yw2zCbdY1AdI mw== Received: from dc5-exch02.marvell.com ([199.233.59.182]) by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3gq83yx522-8 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT) for ; Thu, 16 Jun 2022 00:10:09 -0700 Received: from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Thu, 16 Jun 2022 00:09:52 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.2 via Frontend Transport; Thu, 16 Jun 2022 00:09:52 -0700 Received: from hyd1588t430.marvell.com (unknown [10.29.52.204]) by maili.marvell.com (Postfix) with ESMTP id 863BF3F709B; Thu, 16 Jun 2022 00:09:50 -0700 (PDT) From: Nithin Dabilpuram To: , Nithin Dabilpuram , "Kiran Kumar K" , Sunil Kumar Kori , Satha Rao CC: Subject: [PATCH 08/12] net/cnxk: remove restriction on VFs for PFC config Date: Thu, 16 Jun 2022 12:37:39 +0530 Message-ID: <20220616070743.30658-8-ndabilpuram@marvell.com> X-Mailer: git-send-email 2.8.4 In-Reply-To: <20220616070743.30658-1-ndabilpuram@marvell.com> References: <20220616070743.30658-1-ndabilpuram@marvell.com> MIME-Version: 1.0 X-Proofpoint-ORIG-GUID: ZMwzLpU3KJ5s5s2FpOhMattl6NJfJ8H9 X-Proofpoint-GUID: ZMwzLpU3KJ5s5s2FpOhMattl6NJfJ8H9 X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.874,Hydra:6.0.517,FMLib:17.11.64.514 definitions=2022-06-16_03,2022-06-15_01,2022-02-23_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Sunil Kumar Kori Currently PFC configuration is not allowed on VFs. Patch enables PFC configuration on VFs Signed-off-by: Sunil Kumar Kori --- drivers/net/cnxk/cnxk_ethdev.c | 9 +- drivers/net/cnxk/cnxk_ethdev.h | 13 +-- drivers/net/cnxk/cnxk_ethdev_ops.c | 219 +++++++++++++++++++++---------------- 3 files changed, 137 insertions(+), 104 deletions(-) diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c index 09e5736..941b270 100644 --- a/drivers/net/cnxk/cnxk_ethdev.c +++ b/drivers/net/cnxk/cnxk_ethdev.c @@ -323,7 +323,7 @@ nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev) struct cnxk_fc_cfg *fc = &dev->fc_cfg; int rc; - if (roc_nix_is_sdp(&dev->nix)) + if (roc_nix_is_vf_or_sdp(&dev->nix)) return 0; /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */ @@ -604,6 +604,9 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, rxq_sp->qconf.conf.rx.offloads = dev->rx_offloads; rxq_sp->qconf.nb_desc = nb_desc; rxq_sp->qconf.mp = mp; + rxq_sp->tc = 0; + rxq_sp->tx_pause = (dev->fc_cfg.mode == RTE_ETH_FC_FULL || + dev->fc_cfg.mode == RTE_ETH_FC_TX_PAUSE); if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) { /* Pass a tagmask used to handle error packets in inline device. @@ -1795,7 +1798,6 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset) if (dev->pfc_tc_sq_map[i] != 0xFFFF) { pfc_conf.rx_pause.tx_qid = dev->pfc_tc_sq_map[i]; pfc_conf.rx_pause.tc = i; - pfc_conf.tx_pause.rx_qid = i; pfc_conf.tx_pause.tc = i; rc = cnxk_nix_priority_flow_ctrl_queue_config(eth_dev, &pfc_conf); @@ -1805,9 +1807,6 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset) } } - fc_conf.mode = RTE_ETH_FC_FULL; - rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf); - /* Disable and free rte_meter entries */ nix_meter_fini(dev); diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h index 0400d73..db2d849 100644 --- a/drivers/net/cnxk/cnxk_ethdev.h +++ b/drivers/net/cnxk/cnxk_ethdev.h @@ -156,13 +156,10 @@ struct cnxk_fc_cfg { }; struct cnxk_pfc_cfg { - struct cnxk_fc_cfg fc_cfg; uint16_t class_en; uint16_t pause_time; - uint8_t rx_tc; - uint8_t rx_qid; - uint8_t tx_tc; - uint8_t tx_qid; + uint16_t rx_pause_en; + uint16_t tx_pause_en; }; struct cnxk_eth_qconf { @@ -669,8 +666,10 @@ int nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id, uint32_t *prev_id, uint32_t *next_id, struct cnxk_mtr_policy_node *policy, int *tree_level); -int nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev, - struct cnxk_pfc_cfg *conf); +int nix_priority_flow_ctrl_rq_conf(struct rte_eth_dev *eth_dev, uint16_t qid, + uint8_t tx_pause, uint8_t tc); +int nix_priority_flow_ctrl_sq_conf(struct rte_eth_dev *eth_dev, uint16_t qid, + uint8_t rx_pause, uint8_t tc); /* Inlines */ static __rte_always_inline uint64_t diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c index 15d8e8e..caace9d 100644 --- a/drivers/net/cnxk/cnxk_ethdev_ops.c +++ b/drivers/net/cnxk/cnxk_ethdev_ops.c @@ -225,15 +225,17 @@ nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable) struct roc_nix *nix = &dev->nix; struct roc_nix_fc_cfg fc_cfg; struct roc_nix_cq *cq; + struct roc_nix_rq *rq; memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg)); + rq = &dev->rqs[qid]; cq = &dev->cqs[qid]; - fc_cfg.type = ROC_NIX_FC_CQ_CFG; - fc_cfg.cq_cfg.enable = enable; - /* Map all CQs to last channel */ - fc_cfg.cq_cfg.tc = roc_nix_chan_count_get(nix) - 1; - fc_cfg.cq_cfg.rq = qid; - fc_cfg.cq_cfg.cq_drop = cq->drop_thresh; + fc_cfg.type = ROC_NIX_FC_RQ_CFG; + fc_cfg.rq_cfg.enable = enable; + fc_cfg.rq_cfg.tc = 0; + fc_cfg.rq_cfg.rq = qid; + fc_cfg.rq_cfg.pool = rq->aura_handle; + fc_cfg.rq_cfg.cq_drop = cq->drop_thresh; return roc_nix_fc_config_set(nix, &fc_cfg); } @@ -255,10 +257,8 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev, uint8_t rx_pause, tx_pause; int rc, i; - if (roc_nix_is_vf_or_sdp(nix) && !roc_nix_is_lbk(nix)) { - plt_err("Flow control configuration is not allowed on VFs"); - return -ENOTSUP; - } + if (roc_nix_is_sdp(nix)) + return 0; if (fc_conf->high_water || fc_conf->low_water || fc_conf->pause_time || fc_conf->mac_ctrl_frame_fwd || fc_conf->autoneg) { @@ -266,14 +266,18 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev, return -EINVAL; } - if (fc_conf->mode == fc->mode) - return 0; rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) || (fc_conf->mode == RTE_ETH_FC_RX_PAUSE); tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) || (fc_conf->mode == RTE_ETH_FC_TX_PAUSE); + if (fc_conf->mode == fc->mode) { + fc->rx_pause = rx_pause; + fc->tx_pause = tx_pause; + return 0; + } + /* Check if TX pause frame is already enabled or not */ if (fc->tx_pause ^ tx_pause) { if (roc_model_is_cn96_ax() && data->dev_started) { @@ -291,6 +295,7 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev, memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg)); rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[i]) - 1; + rxq->tx_pause = !!tx_pause; rc = nix_fc_cq_config_set(dev, rxq->qid, !!tx_pause); if (rc) return rc; @@ -321,13 +326,12 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev, fc->rx_pause = rx_pause; fc->tx_pause = tx_pause; fc->mode = fc_conf->mode; - return rc; } int cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev, - struct rte_eth_pfc_queue_info *pfc_info) + struct rte_eth_pfc_queue_info *pfc_info) { struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); @@ -338,25 +342,42 @@ cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev, int cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev, - struct rte_eth_pfc_queue_conf *pfc_conf) + struct rte_eth_pfc_queue_conf *pfc_conf) { - struct cnxk_pfc_cfg conf; - int rc; + struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); + struct roc_nix *nix = &dev->nix; + enum rte_eth_fc_mode mode; + uint8_t en, tc; + uint16_t qid; + int rc = 0; - memset(&conf, 0, sizeof(struct cnxk_pfc_cfg)); + if (dev->fc_cfg.mode != RTE_ETH_FC_NONE) { + plt_err("Disable Flow Control before configuring PFC"); + return -ENOTSUP; + } - conf.fc_cfg.mode = pfc_conf->mode; + if (roc_nix_is_sdp(nix)) { + plt_err("Prio flow ctrl config is not allowed on SDP"); + return -ENOTSUP; + } - conf.pause_time = pfc_conf->tx_pause.pause_time; - conf.rx_tc = pfc_conf->tx_pause.tc; - conf.rx_qid = pfc_conf->tx_pause.rx_qid; + mode = pfc_conf->mode; - conf.tx_tc = pfc_conf->rx_pause.tc; - conf.tx_qid = pfc_conf->rx_pause.tx_qid; + /* Perform Tx pause configuration on RQ */ + qid = pfc_conf->tx_pause.rx_qid; + if (qid < eth_dev->data->nb_rx_queues) { + en = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_TX_PAUSE); + tc = pfc_conf->tx_pause.tc; + rc = nix_priority_flow_ctrl_rq_conf(eth_dev, qid, en, tc); + } - rc = nix_priority_flow_ctrl_configure(eth_dev, &conf); - if (rc) - return rc; + /* Perform Rx pause configuration on SQ */ + qid = pfc_conf->rx_pause.tx_qid; + if (qid < eth_dev->data->nb_tx_queues) { + en = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_RX_PAUSE); + tc = pfc_conf->rx_pause.tc; + rc |= nix_priority_flow_ctrl_sq_conf(eth_dev, qid, en, tc); + } return rc; } @@ -1026,11 +1047,9 @@ cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev, } int -nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev, - struct cnxk_pfc_cfg *conf) +nix_priority_flow_ctrl_rq_conf(struct rte_eth_dev *eth_dev, uint16_t qid, + uint8_t tx_pause, uint8_t tc) { - enum roc_nix_fc_mode mode_map[] = {ROC_NIX_FC_NONE, ROC_NIX_FC_RX, - ROC_NIX_FC_TX, ROC_NIX_FC_FULL}; struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); struct rte_eth_dev_data *data = eth_dev->data; struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg; @@ -1038,18 +1057,11 @@ nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev, struct roc_nix_pfc_cfg pfc_cfg; struct roc_nix_fc_cfg fc_cfg; struct cnxk_eth_rxq_sp *rxq; - struct cnxk_eth_txq_sp *txq; - uint8_t rx_pause, tx_pause; - enum rte_eth_fc_mode mode; + enum roc_nix_fc_mode mode; + struct roc_nix_rq *rq; struct roc_nix_cq *cq; - struct roc_nix_sq *sq; int rc; - if (roc_nix_is_vf_or_sdp(nix)) { - plt_err("Prio flow ctrl config is not allowed on VF and SDP"); - return -ENOTSUP; - } - if (roc_model_is_cn96_ax() && data->dev_started) { /* On Ax, CQ should be in disabled state * while setting flow control configuration. @@ -1059,39 +1071,83 @@ nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev, return 0; } - if (dev->pfc_tc_sq_map[conf->tx_tc] != 0xFFFF && - dev->pfc_tc_sq_map[conf->tx_tc] != conf->tx_qid) { + if (data->rx_queues == NULL) + return -EINVAL; + + if (qid >= eth_dev->data->nb_rx_queues) + return -ENOTSUP; + + /* Configure RQ */ + rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[qid]) - 1; + rq = &dev->rqs[qid]; + cq = &dev->cqs[qid]; + + memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg)); + fc_cfg.type = ROC_NIX_FC_RQ_CFG; + fc_cfg.rq_cfg.tc = tc; + fc_cfg.rq_cfg.enable = !!tx_pause; + fc_cfg.rq_cfg.rq = rq->qid; + fc_cfg.rq_cfg.pool = rxq->qconf.mp->pool_id; + fc_cfg.rq_cfg.cq_drop = cq->drop_thresh; + rc = roc_nix_fc_config_set(nix, &fc_cfg); + if (rc) + return rc; + + if (rxq->tx_pause != tx_pause) { + if (tx_pause) + pfc->tx_pause_en++; + else + pfc->tx_pause_en--; + } + + rxq->tx_pause = !!tx_pause; + rxq->tc = tc; + + /* Skip if PFC already enabled in mac */ + if (pfc->tx_pause_en > 1) + return 0; + + /* Configure MAC block */ + pfc->class_en = pfc->tx_pause_en ? 0xFF : 0x0; + + if (pfc->rx_pause_en) + mode = pfc->tx_pause_en ? ROC_NIX_FC_FULL : ROC_NIX_FC_RX; + else + mode = pfc->tx_pause_en ? ROC_NIX_FC_TX : ROC_NIX_FC_NONE; + + memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg)); + pfc_cfg.mode = mode; + pfc_cfg.tc = pfc->class_en; + return roc_nix_pfc_mode_set(nix, &pfc_cfg); +} + +int +nix_priority_flow_ctrl_sq_conf(struct rte_eth_dev *eth_dev, uint16_t qid, + uint8_t rx_pause, uint8_t tc) +{ + struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); + struct rte_eth_dev_data *data = eth_dev->data; + struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg; + struct roc_nix *nix = &dev->nix; + struct roc_nix_fc_cfg fc_cfg; + struct cnxk_eth_txq_sp *txq; + struct roc_nix_sq *sq; + int rc; + + if (data->tx_queues == NULL) + return -EINVAL; + + if (qid >= eth_dev->data->nb_tx_queues) + return -ENOTSUP; + + if (dev->pfc_tc_sq_map[tc] != 0xFFFF && + dev->pfc_tc_sq_map[tc] != qid) { plt_err("Same TC can not be configured on multiple SQs"); return -ENOTSUP; } - mode = conf->fc_cfg.mode; - rx_pause = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_RX_PAUSE); - tx_pause = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_TX_PAUSE); - - if (data->rx_queues == NULL || data->tx_queues == NULL) { - rc = 0; - goto exit; - } - - /* Configure CQs */ - memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg)); - rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[conf->rx_qid]) - 1; - cq = &dev->cqs[rxq->qid]; - fc_cfg.type = ROC_NIX_FC_CQ_CFG; - fc_cfg.cq_cfg.tc = conf->rx_tc; - fc_cfg.cq_cfg.enable = !!tx_pause; - fc_cfg.cq_cfg.rq = cq->qid; - fc_cfg.cq_cfg.cq_drop = cq->drop_thresh; - rc = roc_nix_fc_config_set(nix, &fc_cfg); - if (rc) - goto exit; - /* Check if RX pause frame is enabled or not */ - if (pfc->fc_cfg.rx_pause ^ rx_pause) { - if (conf->tx_qid >= eth_dev->data->nb_tx_queues) - goto exit; - + if (!pfc->rx_pause_en) { if ((roc_nix_tm_tree_type_get(nix) == ROC_NIX_TM_DEFAULT) && eth_dev->data->nb_tx_queues > 1) { /* @@ -1113,39 +1169,18 @@ nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev, } } - txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[conf->tx_qid]) - 1; + txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[qid]) - 1; sq = &dev->sqs[txq->qid]; memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg)); fc_cfg.type = ROC_NIX_FC_TM_CFG; fc_cfg.tm_cfg.sq = sq->qid; - fc_cfg.tm_cfg.tc = conf->tx_tc; + fc_cfg.tm_cfg.tc = tc; fc_cfg.tm_cfg.enable = !!rx_pause; rc = roc_nix_fc_config_set(nix, &fc_cfg); if (rc) return rc; - dev->pfc_tc_sq_map[conf->tx_tc] = sq->qid; - - /* Configure MAC block */ - if (tx_pause) - pfc->class_en |= BIT(conf->rx_tc); - else - pfc->class_en &= ~BIT(conf->rx_tc); - - if (pfc->class_en) - mode = RTE_ETH_FC_FULL; - - memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg)); - pfc_cfg.mode = mode_map[mode]; - pfc_cfg.tc = pfc->class_en; - rc = roc_nix_pfc_mode_set(nix, &pfc_cfg); - if (rc) - return rc; - - pfc->fc_cfg.rx_pause = rx_pause; - pfc->fc_cfg.tx_pause = tx_pause; - pfc->fc_cfg.mode = mode; - + dev->pfc_tc_sq_map[tc] = sq->qid; exit: return rc; }