From patchwork Tue Jan 14 12:01:27 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wang, Haiyue" X-Patchwork-Id: 64650 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 67CFCA04FD; Tue, 14 Jan 2020 13:09:12 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 4E3B41C224; Tue, 14 Jan 2020 13:08:42 +0100 (CET) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by dpdk.org (Postfix) with ESMTP id 2ABDC1C1EF for ; Tue, 14 Jan 2020 13:08:36 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga003.jf.intel.com ([10.7.209.27]) by orsmga106.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 14 Jan 2020 04:08:30 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.69,432,1571727600"; d="scan'208";a="225200920" Received: from npg-dpdk-haiyue-1.sh.intel.com ([10.67.119.213]) by orsmga003.jf.intel.com with ESMTP; 14 Jan 2020 04:08:27 -0800 From: Haiyue Wang To: dev@dpdk.org, xiaolong.ye@intel.com, qi.z.zhang@intel.com, qiming.yang@intel.com Cc: Haiyue Wang Date: Tue, 14 Jan 2020 20:01:27 +0800 Message-Id: <20200114120130.29411-2-haiyue.wang@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200114120130.29411-1-haiyue.wang@intel.com> References: <20200114052258.78791-1-haiyue.wang@intel.com> <20200114120130.29411-1-haiyue.wang@intel.com> Subject: [dpdk-dev] [PATCH v2 1/4] net/iavf: unify the bool type value X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Replaces the redefined TRUE and FALSE values with standard ones to match the 'bool' type definition. Signed-off-by: Haiyue Wang Acked-by: Qiming Yang --- drivers/net/iavf/iavf_ethdev.c | 31 ++++++++++++++++--------------- drivers/net/iavf/iavf_rxtx.c | 34 +++++++++++++++++----------------- 2 files changed, 33 insertions(+), 32 deletions(-) diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index f69c50df5..34913f9c4 100644 --- a/drivers/net/iavf/iavf_ethdev.c +++ b/drivers/net/iavf/iavf_ethdev.c @@ -454,7 +454,7 @@ iavf_dev_start(struct rte_eth_dev *dev) } /* Set all mac addrs */ - iavf_add_del_all_mac_addr(adapter, TRUE); + iavf_add_del_all_mac_addr(adapter, true); if (iavf_start_queues(dev) != 0) { PMD_DRV_LOG(ERR, "enable queues failed"); @@ -464,7 +464,7 @@ iavf_dev_start(struct rte_eth_dev *dev) return 0; err_mac: - iavf_add_del_all_mac_addr(adapter, FALSE); + iavf_add_del_all_mac_addr(adapter, false); err_queue: err_rss: return -1; @@ -493,7 +493,7 @@ iavf_dev_stop(struct rte_eth_dev *dev) } /* remove all mac addrs */ - iavf_add_del_all_mac_addr(adapter, FALSE); + iavf_add_del_all_mac_addr(adapter, false); adapter->stopped = 1; } @@ -648,9 +648,9 @@ iavf_dev_promiscuous_enable(struct rte_eth_dev *dev) if (vf->promisc_unicast_enabled) return 0; - ret = iavf_config_promisc(adapter, TRUE, vf->promisc_multicast_enabled); + ret = iavf_config_promisc(adapter, true, vf->promisc_multicast_enabled); if (!ret) - vf->promisc_unicast_enabled = TRUE; + vf->promisc_unicast_enabled = true; else ret = -EAGAIN; @@ -668,9 +668,10 @@ iavf_dev_promiscuous_disable(struct rte_eth_dev *dev) if (!vf->promisc_unicast_enabled) return 0; - ret = iavf_config_promisc(adapter, FALSE, vf->promisc_multicast_enabled); + ret = iavf_config_promisc(adapter, false, + vf->promisc_multicast_enabled); if (!ret) - vf->promisc_unicast_enabled = FALSE; + vf->promisc_unicast_enabled = false; else ret = -EAGAIN; @@ -688,9 +689,9 @@ iavf_dev_allmulticast_enable(struct rte_eth_dev *dev) if (vf->promisc_multicast_enabled) return 0; - ret = iavf_config_promisc(adapter, vf->promisc_unicast_enabled, TRUE); + ret = iavf_config_promisc(adapter, vf->promisc_unicast_enabled, true); if (!ret) - vf->promisc_multicast_enabled = TRUE; + vf->promisc_multicast_enabled = true; else ret = -EAGAIN; @@ -708,9 +709,9 @@ iavf_dev_allmulticast_disable(struct rte_eth_dev *dev) if (!vf->promisc_multicast_enabled) return 0; - ret = iavf_config_promisc(adapter, vf->promisc_unicast_enabled, FALSE); + ret = iavf_config_promisc(adapter, vf->promisc_unicast_enabled, false); if (!ret) - vf->promisc_multicast_enabled = FALSE; + vf->promisc_multicast_enabled = false; else ret = -EAGAIN; @@ -732,7 +733,7 @@ iavf_dev_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr, return -EINVAL; } - err = iavf_add_del_eth_addr(adapter, addr, TRUE); + err = iavf_add_del_eth_addr(adapter, addr, true); if (err) { PMD_DRV_LOG(ERR, "fail to add MAC address"); return -EIO; @@ -754,7 +755,7 @@ iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index) addr = &dev->data->mac_addrs[index]; - err = iavf_add_del_eth_addr(adapter, addr, FALSE); + err = iavf_add_del_eth_addr(adapter, addr, false); if (err) PMD_DRV_LOG(ERR, "fail to delete MAC address"); @@ -979,7 +980,7 @@ iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev, if (rte_is_valid_assigned_ether_addr(perm_addr)) return -EPERM; - ret = iavf_add_del_eth_addr(adapter, old_addr, FALSE); + ret = iavf_add_del_eth_addr(adapter, old_addr, false); if (ret) PMD_DRV_LOG(ERR, "Fail to delete old MAC:" " %02X:%02X:%02X:%02X:%02X:%02X", @@ -990,7 +991,7 @@ iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev, old_addr->addr_bytes[4], old_addr->addr_bytes[5]); - ret = iavf_add_del_eth_addr(adapter, mac_addr, TRUE); + ret = iavf_add_del_eth_addr(adapter, mac_addr, true); if (ret) PMD_DRV_LOG(ERR, "Fail to add new MAC:" " %02X:%02X:%02X:%02X:%02X:%02X", diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index 067290db4..85d9a8e3b 100644 --- a/drivers/net/iavf/iavf_rxtx.c +++ b/drivers/net/iavf/iavf_rxtx.c @@ -95,11 +95,11 @@ check_rx_vec_allow(struct iavf_rx_queue *rxq) if (rxq->rx_free_thresh >= IAVF_VPMD_RX_MAX_BURST && rxq->nb_rx_desc % rxq->rx_free_thresh == 0) { PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq."); - return TRUE; + return true; } PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq."); - return FALSE; + return false; } static inline bool @@ -109,29 +109,29 @@ check_tx_vec_allow(struct iavf_tx_queue *txq) txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST && txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) { PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq."); - return TRUE; + return true; } PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq."); - return FALSE; + return false; } static inline bool check_rx_bulk_allow(struct iavf_rx_queue *rxq) { - int ret = TRUE; + int ret = true; if (!(rxq->rx_free_thresh >= IAVF_RX_MAX_BURST)) { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " "rxq->rx_free_thresh=%d, " "IAVF_RX_MAX_BURST=%d", rxq->rx_free_thresh, IAVF_RX_MAX_BURST); - ret = FALSE; + ret = false; } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " "rxq->nb_rx_desc=%d, " "rxq->rx_free_thresh=%d", rxq->nb_rx_desc, rxq->rx_free_thresh); - ret = FALSE; + ret = false; } return ret; } @@ -390,12 +390,12 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, rxq->mz = mz; reset_rx_queue(rxq); - rxq->q_set = TRUE; + rxq->q_set = true; dev->data->rx_queues[queue_idx] = rxq; rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id); rxq->ops = &def_rxq_ops; - if (check_rx_bulk_allow(rxq) == TRUE) { + if (check_rx_bulk_allow(rxq) == true) { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " "satisfied. Rx Burst Bulk Alloc function will be " "used on port=%d, queue=%d.", @@ -408,7 +408,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, ad->rx_bulk_alloc_allowed = false; } - if (check_rx_vec_allow(rxq) == FALSE) + if (check_rx_vec_allow(rxq) == false) ad->rx_vec_allowed = false; return 0; @@ -500,12 +500,12 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->mz = mz; reset_tx_queue(txq); - txq->q_set = TRUE; + txq->q_set = true; dev->data->tx_queues[queue_idx] = txq; txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx); txq->ops = &def_txq_ops; - if (check_tx_vec_allow(txq) == FALSE) { + if (check_tx_vec_allow(txq) == false) { struct iavf_adapter *ad = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); ad->tx_vec_allowed = false; @@ -543,7 +543,7 @@ iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) IAVF_WRITE_FLUSH(hw); /* Ready to switch the queue on */ - err = iavf_switch_queue(adapter, rx_queue_id, TRUE, TRUE); + err = iavf_switch_queue(adapter, rx_queue_id, true, true); if (err) PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", rx_queue_id); @@ -575,7 +575,7 @@ iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) IAVF_WRITE_FLUSH(hw); /* Ready to switch the queue on */ - err = iavf_switch_queue(adapter, tx_queue_id, FALSE, TRUE); + err = iavf_switch_queue(adapter, tx_queue_id, false, true); if (err) PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on", @@ -600,7 +600,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) if (rx_queue_id >= dev->data->nb_rx_queues) return -EINVAL; - err = iavf_switch_queue(adapter, rx_queue_id, TRUE, FALSE); + err = iavf_switch_queue(adapter, rx_queue_id, true, false); if (err) { PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", rx_queue_id); @@ -628,7 +628,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) if (tx_queue_id >= dev->data->nb_tx_queues) return -EINVAL; - err = iavf_switch_queue(adapter, tx_queue_id, FALSE, FALSE); + err = iavf_switch_queue(adapter, tx_queue_id, false, false); if (err) { PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off", tx_queue_id); @@ -1815,7 +1815,7 @@ iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->nb_desc = rxq->nb_rx_desc; qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; - qinfo->conf.rx_drop_en = TRUE; + qinfo->conf.rx_drop_en = true; qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; } From patchwork Tue Jan 14 12:01:28 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wang, Haiyue" X-Patchwork-Id: 64648 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id BF676A04FD; Tue, 14 Jan 2020 13:08:52 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id BE17C1C20F; Tue, 14 Jan 2020 13:08:38 +0100 (CET) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by dpdk.org (Postfix) with ESMTP id 8909E1B05 for ; Tue, 14 Jan 2020 13:08:35 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga003.jf.intel.com ([10.7.209.27]) by orsmga106.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 14 Jan 2020 04:08:30 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.69,432,1571727600"; d="scan'208";a="225200927" Received: from npg-dpdk-haiyue-1.sh.intel.com ([10.67.119.213]) by orsmga003.jf.intel.com with ESMTP; 14 Jan 2020 04:08:28 -0800 From: Haiyue Wang To: dev@dpdk.org, xiaolong.ye@intel.com, qi.z.zhang@intel.com, qiming.yang@intel.com Cc: Haiyue Wang Date: Tue, 14 Jan 2020 20:01:28 +0800 Message-Id: <20200114120130.29411-3-haiyue.wang@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200114120130.29411-1-haiyue.wang@intel.com> References: <20200114052258.78791-1-haiyue.wang@intel.com> <20200114120130.29411-1-haiyue.wang@intel.com> Subject: [dpdk-dev] [PATCH v2 2/4] net/ice: unify the bool type value X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Replaces the redefined TRUE and FALSE values with standard ones to match the 'bool' type definition. Signed-off-by: Haiyue Wang Acked-by: Qiming Yang --- drivers/net/ice/ice_ethdev.c | 22 +++++++++++----------- drivers/net/ice/ice_rxtx.c | 16 ++++++++-------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index f99eb4e1b..8e9369e0a 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -1720,7 +1720,7 @@ ice_pf_setup(struct ice_pf *pf) uint16_t unused; /* Clear all stats counters */ - pf->offset_loaded = FALSE; + pf->offset_loaded = false; memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats)); memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats)); memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats)); @@ -2234,16 +2234,16 @@ ice_dev_init(struct rte_eth_dev *dev) vsi = pf->main_vsi; /* Disable double vlan by default */ - ice_vsi_config_double_vlan(vsi, FALSE); + ice_vsi_config_double_vlan(vsi, false); - ret = ice_aq_stop_lldp(hw, TRUE, FALSE, NULL); + ret = ice_aq_stop_lldp(hw, true, false, NULL); if (ret != ICE_SUCCESS) PMD_INIT_LOG(DEBUG, "lldp has already stopped\n"); - ret = ice_init_dcb(hw, TRUE); + ret = ice_init_dcb(hw, true); if (ret != ICE_SUCCESS) PMD_INIT_LOG(DEBUG, "Failed to init DCB\n"); /* Forward LLDP packets to default VSI */ - ret = ice_vsi_config_sw_lldp(vsi, TRUE); + ret = ice_vsi_config_sw_lldp(vsi, true); if (ret != ICE_SUCCESS) PMD_INIT_LOG(DEBUG, "Failed to cfg lldp\n"); /* register callback func to eal lib */ @@ -3449,23 +3449,23 @@ ice_vlan_offload_set(struct rte_eth_dev *dev, int mask) rxmode = &dev->data->dev_conf.rxmode; if (mask & ETH_VLAN_FILTER_MASK) { if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) - ice_vsi_config_vlan_filter(vsi, TRUE); + ice_vsi_config_vlan_filter(vsi, true); else - ice_vsi_config_vlan_filter(vsi, FALSE); + ice_vsi_config_vlan_filter(vsi, false); } if (mask & ETH_VLAN_STRIP_MASK) { if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) - ice_vsi_config_vlan_stripping(vsi, TRUE); + ice_vsi_config_vlan_stripping(vsi, true); else - ice_vsi_config_vlan_stripping(vsi, FALSE); + ice_vsi_config_vlan_stripping(vsi, false); } if (mask & ETH_VLAN_EXTEND_MASK) { if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) - ice_vsi_config_double_vlan(vsi, TRUE); + ice_vsi_config_double_vlan(vsi, true); else - ice_vsi_config_double_vlan(vsi, FALSE); + ice_vsi_config_double_vlan(vsi, false); } return 0; diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index ce499af43..ad3cb9c46 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -424,7 +424,7 @@ ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) /* Init the RX tail register. */ ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); - err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE); + err = ice_switch_rx_queue(hw, rxq->reg_idx, true); if (err) { PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", rx_queue_id); @@ -450,7 +450,7 @@ ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) if (rx_queue_id < dev->data->nb_rx_queues) { rxq = dev->data->rx_queues[rx_queue_id]; - err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE); + err = ice_switch_rx_queue(hw, rxq->reg_idx, false); if (err) { PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", rx_queue_id); @@ -630,7 +630,7 @@ ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) /* Init the RX tail register. */ ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); - err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE); + err = ice_switch_rx_queue(hw, rxq->reg_idx, true); if (err) { PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on", rx_queue_id); @@ -816,7 +816,7 @@ ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) rxq = pf->fdir.rxq; - err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE); + err = ice_switch_rx_queue(hw, rxq->reg_idx, false); if (err) { PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off", rx_queue_id); @@ -973,7 +973,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, } ice_reset_rx_queue(rxq); - rxq->q_set = TRUE; + rxq->q_set = true; dev->data->rx_queues[queue_idx] = rxq; rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs; @@ -1186,7 +1186,7 @@ ice_tx_queue_setup(struct rte_eth_dev *dev, } ice_reset_tx_queue(txq); - txq->q_set = TRUE; + txq->q_set = true; dev->data->tx_queues[queue_idx] = txq; txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs; ice_set_tx_function_flag(dev, txq); @@ -2043,7 +2043,7 @@ ice_fdir_setup_tx_resources(struct ice_pf *pf) * don't need to allocate software ring and reset for the fdir * program queue just set the queue has been configured. */ - txq->q_set = TRUE; + txq->q_set = true; pf->fdir.txq = txq; txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs; @@ -2104,7 +2104,7 @@ ice_fdir_setup_rx_resources(struct ice_pf *pf) * Don't need to allocate software ring and reset for the fdir * rx queue, just set the queue has been configured. */ - rxq->q_set = TRUE; + rxq->q_set = true; pf->fdir.rxq = rxq; rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;