From patchwork Sat Oct 9 07:48:05 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "humin (Q)" X-Patchwork-Id: 100906 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8D9F1A0C41; Sat, 9 Oct 2021 09:49:51 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 0FD9340040; Sat, 9 Oct 2021 09:49:51 +0200 (CEST) Received: from szxga08-in.huawei.com (szxga08-in.huawei.com [45.249.212.255]) by mails.dpdk.org (Postfix) with ESMTP id 0E85B4003C for ; Sat, 9 Oct 2021 09:49:49 +0200 (CEST) Received: from dggeme756-chm.china.huawei.com (unknown [172.30.72.57]) by szxga08-in.huawei.com (SkyGuard) with ESMTP id 4HRHH2740fz1DHCd; Sat, 9 Oct 2021 15:48:14 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by dggeme756-chm.china.huawei.com (10.3.19.102) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256_P256) id 15.1.2308.8; Sat, 9 Oct 2021 15:49:46 +0800 From: "Min Hu (Connor)" To: CC: , Date: Sat, 9 Oct 2021 15:48:05 +0800 Message-ID: <20211009074805.15447-1-humin29@huawei.com> X-Mailer: git-send-email 2.33.0 MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-ClientProxiedBy: dggems706-chm.china.huawei.com (10.3.19.183) To dggeme756-chm.china.huawei.com (10.3.19.102) X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH] net/hns3: remove similar macro function definitions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Chengchang Tang For different capabilities, we declare different macro functions to determine whether the capabilities are supported. This patch declare a unified macro function to judge capabilities. Signed-off-by: Chengchang Tang Signed-off-by: Min Hu (Connor) --- drivers/net/hns3/hns3_cmd.c | 6 ++--- drivers/net/hns3/hns3_dcb.c | 4 +-- drivers/net/hns3/hns3_ethdev.c | 24 +++++++++--------- drivers/net/hns3/hns3_ethdev.h | 41 ++----------------------------- drivers/net/hns3/hns3_ethdev_vf.c | 6 ++--- drivers/net/hns3/hns3_flow.c | 2 +- drivers/net/hns3/hns3_intr.c | 2 +- drivers/net/hns3/hns3_ptp.c | 18 +++++++------- drivers/net/hns3/hns3_rxtx.c | 32 ++++++++++++------------ drivers/net/hns3/hns3_rxtx_vec.c | 4 +-- drivers/net/hns3/hns3_tm.c | 10 ++++---- 11 files changed, 56 insertions(+), 93 deletions(-) diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c index 6a1e634684..50769c6226 100644 --- a/drivers/net/hns3/hns3_cmd.c +++ b/drivers/net/hns3/hns3_cmd.c @@ -617,7 +617,7 @@ hns3_update_dev_lsc_cap(struct hns3_hw *hw, int fw_compact_cmd_result) static int hns3_apply_fw_compat_cmd_result(struct hns3_hw *hw, int result) { - if (result != 0 && hns3_dev_copper_supported(hw)) { + if (result != 0 && hns3_dev_get_support(hw, COPPER)) { hns3_err(hw, "firmware fails to initialize the PHY, ret = %d.", result); return result; @@ -656,7 +656,7 @@ hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init) } if (revision == PCI_REVISION_ID_HIP09_A) { struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); - if (hns3_dev_copper_supported(hw) == 0 || pf->is_tmp_phy) { + if (hns3_dev_get_support(hw, COPPER) == 0 || pf->is_tmp_phy) { PMD_INIT_LOG(ERR, "***use temp phy driver in dpdk***"); pf->is_tmp_phy = true; hns3_set_bit(hw->capability, @@ -674,7 +674,7 @@ hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init) if (is_init) { hns3_set_bit(compat, HNS3_LINK_EVENT_REPORT_EN_B, 1); hns3_set_bit(compat, HNS3_NCSI_ERROR_REPORT_EN_B, 0); - if (hns3_dev_copper_supported(hw)) + if (hns3_dev_get_support(hw, COPPER)) hns3_set_bit(compat, HNS3_FIRMWARE_PHY_DRIVER_EN_B, 1); } req->compat = rte_cpu_to_le_32(compat); diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c index b71e2e9ea4..8753c340e7 100644 --- a/drivers/net/hns3/hns3_dcb.c +++ b/drivers/net/hns3/hns3_dcb.c @@ -918,7 +918,7 @@ hns3_dcb_pri_dwrr_cfg(struct hns3_hw *hw) if (ret) return ret; - if (!hns3_dev_dcb_supported(hw)) + if (!hns3_dev_get_support(hw, DCB)) return 0; ret = hns3_dcb_ets_tc_dwrr_cfg(hw); @@ -1368,7 +1368,7 @@ hns3_dcb_pause_setup_hw(struct hns3_hw *hw) } /* Only DCB-supported dev supports qset back pressure and pfc cmd */ - if (!hns3_dev_dcb_supported(hw)) + if (!hns3_dev_get_support(hw, DCB)) return 0; ret = hns3_pfc_setup_hw(hw); diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index cabf73ffbc..b98a46f73e 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -2408,7 +2408,7 @@ hns3_setup_dcb(struct rte_eth_dev *dev) struct hns3_hw *hw = &hns->hw; int ret; - if (!hns3_dev_dcb_supported(hw)) { + if (!hns3_dev_get_support(hw, DCB)) { hns3_err(hw, "this port does not support dcb configurations."); return -EOPNOTSUPP; } @@ -2746,14 +2746,14 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) DEV_TX_OFFLOAD_MBUF_FAST_FREE | hns3_txvlan_cap_get(hw)); - if (hns3_dev_outer_udp_cksum_supported(hw)) + if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM)) info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM; - if (hns3_dev_indep_txrx_supported(hw)) + if (hns3_dev_get_support(hw, INDEP_TXRX)) info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; - if (hns3_dev_ptp_supported(hw)) + if (hns3_dev_get_support(hw, PTP)) info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP; info->rx_desc_lim = (struct rte_eth_desc_lim) { @@ -3421,7 +3421,7 @@ hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type) switch (media_type) { case HNS3_MEDIA_TYPE_COPPER: - if (!hns3_dev_copper_supported(hw)) { + if (!hns3_dev_get_support(hw, COPPER)) { PMD_INIT_LOG(ERR, "Media type is copper, not supported."); ret = -EOPNOTSUPP; @@ -3489,7 +3489,7 @@ hns3_get_board_configuration(struct hns3_hw *hw) } /* Dev does not support DCB */ - if (!hns3_dev_dcb_supported(hw)) { + if (!hns3_dev_get_support(hw, DCB)) { pf->tc_max = 1; pf->pfc_max = 0; } else @@ -3802,7 +3802,7 @@ hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc, tc_num = hns3_get_tc_num(hw); aligned_mps = roundup(pf->mps, HNS3_BUF_SIZE_UNIT); - if (hns3_dev_dcb_supported(hw)) + if (hns3_dev_get_support(hw, DCB)) shared_buf_min = HNS3_BUF_MUL_BY * aligned_mps + pf->dv_buf_size; else @@ -3819,7 +3819,7 @@ hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc, shared_buf = rounddown(rx_all - rx_priv, HNS3_BUF_SIZE_UNIT); buf_alloc->s_buf.buf_size = shared_buf; - if (hns3_dev_dcb_supported(hw)) { + if (hns3_dev_get_support(hw, DCB)) { buf_alloc->s_buf.self.high = shared_buf - pf->dv_buf_size; buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high - roundup(aligned_mps / HNS3_BUF_DIV_BY, @@ -3830,7 +3830,7 @@ hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc, buf_alloc->s_buf.self.low = aligned_mps; } - if (hns3_dev_dcb_supported(hw)) { + if (hns3_dev_get_support(hw, DCB)) { hi_thrd = shared_buf - pf->dv_buf_size; if (tc_num <= NEED_RESERVE_TC_NUM) @@ -4036,7 +4036,7 @@ static int hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) { /* When DCB is not supported, rx private buffer is not allocated. */ - if (!hns3_dev_dcb_supported(hw)) { + if (!hns3_dev_get_support(hw, DCB)) { struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); struct hns3_pf *pf = &hns->pf; uint32_t rx_all = pf->pkt_buf_size; @@ -4264,7 +4264,7 @@ hns3_buffer_alloc(struct hns3_hw *hw) return ret; } - if (hns3_dev_dcb_supported(hw)) { + if (hns3_dev_get_support(hw, DCB)) { ret = hns3_rx_priv_wl_config(hw, &pkt_buf); if (ret) { PMD_INIT_LOG(ERR, @@ -6233,7 +6233,7 @@ hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); int ret; - if (!hns3_dev_dcb_supported(hw)) { + if (!hns3_dev_get_support(hw, DCB)) { hns3_err(hw, "This port does not support dcb configurations."); return -EOPNOTSUPP; } diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h index 243a4046ae..e28056b1bd 100644 --- a/drivers/net/hns3/hns3_ethdev.h +++ b/drivers/net/hns3/hns3_ethdev.h @@ -883,45 +883,8 @@ enum { HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B, }; -#define hns3_dev_dcb_supported(hw) \ - hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_DCB_B) - -/* Support copper media type */ -#define hns3_dev_copper_supported(hw) \ - hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_COPPER_B) - -/* Support the queue region action rule of flow directory */ -#define hns3_dev_fd_queue_region_supported(hw) \ - hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_FD_QUEUE_REGION_B) - -/* Support PTP timestamp offload */ -#define hns3_dev_ptp_supported(hw) \ - hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_PTP_B) - -/* Support to Independently enable/disable/reset Tx or Rx queues */ -#define hns3_dev_indep_txrx_supported(hw) \ - hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_INDEP_TXRX_B) - -#define hns3_dev_stash_supported(hw) \ - hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_STASH_B) - -#define hns3_dev_rxd_adv_layout_supported(hw) \ - hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B) - -#define hns3_dev_outer_udp_cksum_supported(hw) \ - hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_OUTER_UDP_CKSUM_B) - -#define hns3_dev_ras_imp_supported(hw) \ - hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_RAS_IMP_B) - -#define hns3_dev_tx_push_supported(hw) \ - hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_TX_PUSH_B) - -#define hns3_dev_tm_supported(hw) \ - hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_TM_B) - -#define hns3_dev_vf_vlan_flt_supported(hw) \ - hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B) +#define hns3_dev_get_support(hw, _name) \ + hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_##_name##_B) #define HNS3_DEV_PRIVATE_TO_HW(adapter) \ (&((struct hns3_adapter *)adapter)->hw) diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c index 8d9b7979c8..f3cc190d1c 100644 --- a/drivers/net/hns3/hns3_ethdev_vf.c +++ b/drivers/net/hns3/hns3_ethdev_vf.c @@ -988,10 +988,10 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) DEV_TX_OFFLOAD_MBUF_FAST_FREE | hns3_txvlan_cap_get(hw)); - if (hns3_dev_outer_udp_cksum_supported(hw)) + if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM)) info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_UDP_CKSUM; - if (hns3_dev_indep_txrx_supported(hw)) + if (hns3_dev_get_support(hw, INDEP_TXRX)) info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; @@ -1623,7 +1623,7 @@ hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable) uint8_t msg_data; int ret; - if (!hns3_dev_vf_vlan_flt_supported(hw)) + if (!hns3_dev_get_support(hw, VF_VLAN_FLT_MOD)) return 0; msg_data = enable ? 1 : 0; diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c index 841e0b9da3..6d6e693254 100644 --- a/drivers/net/hns3/hns3_flow.c +++ b/drivers/net/hns3/hns3_flow.c @@ -301,7 +301,7 @@ hns3_handle_action_queue_region(struct rte_eth_dev *dev, struct hns3_hw *hw = &hns->hw; uint16_t idx; - if (!hns3_dev_fd_queue_region_supported(hw)) + if (!hns3_dev_get_support(hw, FD_QUEUE_REGION)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, action, "Not support config queue region!"); diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c index 0b307fdd1d..3484c76d23 100644 --- a/drivers/net/hns3/hns3_intr.c +++ b/drivers/net/hns3/hns3_intr.c @@ -2368,7 +2368,7 @@ hns3_handle_error(struct hns3_adapter *hns) { struct hns3_hw *hw = &hns->hw; - if (hns3_dev_ras_imp_supported(hw)) { + if (hns3_dev_get_support(hw, RAS_IMP)) { hns3_handle_hw_error_v2(hw); hns3_schedule_reset(hns); } else { diff --git a/drivers/net/hns3/hns3_ptp.c b/drivers/net/hns3/hns3_ptp.c index df84859046..5dfe68cc4d 100644 --- a/drivers/net/hns3/hns3_ptp.c +++ b/drivers/net/hns3/hns3_ptp.c @@ -61,7 +61,7 @@ hns3_ptp_init(struct hns3_hw *hw) { int ret; - if (!hns3_dev_ptp_supported(hw)) + if (!hns3_dev_get_support(hw, PTP)) return 0; ret = hns3_ptp_int_en(hw, true); @@ -120,7 +120,7 @@ hns3_timesync_enable(struct rte_eth_dev *dev) struct hns3_pf *pf = &hns->pf; int ret; - if (!hns3_dev_ptp_supported(hw)) + if (!hns3_dev_get_support(hw, PTP)) return -ENOTSUP; if (pf->ptp_enable) @@ -140,7 +140,7 @@ hns3_timesync_disable(struct rte_eth_dev *dev) struct hns3_pf *pf = &hns->pf; int ret; - if (!hns3_dev_ptp_supported(hw)) + if (!hns3_dev_get_support(hw, PTP)) return -ENOTSUP; if (!pf->ptp_enable) @@ -164,7 +164,7 @@ hns3_timesync_read_rx_timestamp(struct rte_eth_dev *dev, struct hns3_pf *pf = &hns->pf; uint64_t ns, sec; - if (!hns3_dev_ptp_supported(hw)) + if (!hns3_dev_get_support(hw, PTP)) return -ENOTSUP; ns = pf->rx_timestamp & TIME_RX_STAMP_NS_MASK; @@ -190,7 +190,7 @@ hns3_timesync_read_tx_timestamp(struct rte_eth_dev *dev, uint64_t ns; int ts_cnt; - if (!hns3_dev_ptp_supported(hw)) + if (!hns3_dev_get_support(hw, PTP)) return -ENOTSUP; ts_cnt = hns3_read_dev(hw, HNS3_TX_1588_BACK_TSP_CNT) & @@ -219,7 +219,7 @@ hns3_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint64_t ns, sec; - if (!hns3_dev_ptp_supported(hw)) + if (!hns3_dev_get_support(hw, PTP)) return -ENOTSUP; sec = hns3_read_dev(hw, HNS3_CURR_TIME_OUT_L); @@ -240,7 +240,7 @@ hns3_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) uint64_t sec = ts->tv_sec; uint64_t ns = ts->tv_nsec; - if (!hns3_dev_ptp_supported(hw)) + if (!hns3_dev_get_support(hw, PTP)) return -ENOTSUP; /* Set the timecounters to a new value. */ @@ -261,7 +261,7 @@ hns3_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) struct timespec cur_time; uint64_t ns; - if (!hns3_dev_ptp_supported(hw)) + if (!hns3_dev_get_support(hw, PTP)) return -ENOTSUP; (void)hns3_timesync_read_time(dev, &cur_time); @@ -280,7 +280,7 @@ hns3_restore_ptp(struct hns3_adapter *hns) bool en = pf->ptp_enable; int ret; - if (!hns3_dev_ptp_supported(hw)) + if (!hns3_dev_get_support(hw, PTP)) return 0; ret = hns3_timesync_configure(hns, en); diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c index 6b77672aa1..70de0d2b58 100644 --- a/drivers/net/hns3/hns3_rxtx.c +++ b/drivers/net/hns3/hns3_rxtx.c @@ -393,7 +393,7 @@ hns3_enable_all_queues(struct hns3_hw *hw, bool en) int i; for (i = 0; i < hw->cfg_max_queues; i++) { - if (hns3_dev_indep_txrx_supported(hw)) { + if (hns3_dev_get_support(hw, INDEP_TXRX)) { rxq = i < nb_rx_q ? hw->data->rx_queues[i] : NULL; txq = i < nb_tx_q ? hw->data->tx_queues[i] : NULL; @@ -438,7 +438,7 @@ hns3_enable_txq(struct hns3_tx_queue *txq, bool en) struct hns3_hw *hw = &txq->hns->hw; uint32_t reg; - if (hns3_dev_indep_txrx_supported(hw)) { + if (hns3_dev_get_support(hw, INDEP_TXRX)) { reg = hns3_read_dev(txq, HNS3_RING_TX_EN_REG); if (en) reg |= BIT(HNS3_RING_EN_B); @@ -455,7 +455,7 @@ hns3_enable_rxq(struct hns3_rx_queue *rxq, bool en) struct hns3_hw *hw = &rxq->hns->hw; uint32_t reg; - if (hns3_dev_indep_txrx_supported(hw)) { + if (hns3_dev_get_support(hw, INDEP_TXRX)) { reg = hns3_read_dev(rxq, HNS3_RING_RX_EN_REG); if (en) reg |= BIT(HNS3_RING_EN_B); @@ -1630,7 +1630,7 @@ hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q, uint16_t q; int ret; - if (hns3_dev_indep_txrx_supported(hw)) + if (hns3_dev_get_support(hw, INDEP_TXRX)) return 0; /* Setup new number of fake RX/TX queues and reconfigure device. */ @@ -1874,7 +1874,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, conf->rx_free_thresh : HNS3_DEFAULT_RX_FREE_THRESH; rxq->rx_deferred_start = conf->rx_deferred_start; - if (rxq->rx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) { + if (rxq->rx_deferred_start && !hns3_dev_get_support(hw, INDEP_TXRX)) { hns3_warn(hw, "deferred start is not supported."); rxq->rx_deferred_start = false; } @@ -1910,7 +1910,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, HNS3_PORT_BASE_VLAN_ENABLE; else rxq->pvid_sw_discard_en = false; - rxq->ptype_en = hns3_dev_rxd_adv_layout_supported(hw) ? true : false; + rxq->ptype_en = hns3_dev_get_support(hw, RXD_ADV_LAYOUT) ? true : false; rxq->configured = true; rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET + idx * HNS3_TQP_REG_SIZE); @@ -2038,7 +2038,7 @@ hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev) dev->rx_pkt_burst == hns3_recv_scattered_pkts || dev->rx_pkt_burst == hns3_recv_pkts_vec || dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) { - if (hns3_dev_rxd_adv_layout_supported(hw)) + if (hns3_dev_get_support(hw, RXD_ADV_LAYOUT)) return adv_layout_ptypes; else return ptypes; @@ -2940,7 +2940,7 @@ hns3_tx_push_init(struct rte_eth_dev *dev) volatile uint32_t *reg; uint32_t val; - if (!hns3_dev_tx_push_supported(hw)) + if (!hns3_dev_get_support(hw, TX_PUSH)) return; reg = (volatile uint32_t *)hns3_tx_push_get_queue_tail_reg(dev, 0); @@ -2961,7 +2961,7 @@ hns3_tx_push_queue_init(struct rte_eth_dev *dev, struct hns3_tx_queue *txq) { struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (!hns3_dev_tx_push_supported(hw)) { + if (!hns3_dev_get_support(hw, TX_PUSH)) { txq->tx_push_enable = false; return; } @@ -3006,7 +3006,7 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, } txq->tx_deferred_start = conf->tx_deferred_start; - if (txq->tx_deferred_start && !hns3_dev_indep_txrx_supported(hw)) { + if (txq->tx_deferred_start && !hns3_dev_get_support(hw, INDEP_TXRX)) { hns3_warn(hw, "deferred start is not supported."); txq->tx_deferred_start = false; } @@ -4288,7 +4288,7 @@ hns3_tx_check_simple_support(struct rte_eth_dev *dev) uint64_t offloads = dev->data->dev_conf.txmode.offloads; struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (hns3_dev_ptp_supported(hw)) + if (hns3_dev_get_support(hw, PTP)) return false; return (offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)); @@ -4449,7 +4449,7 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); int ret; - if (!hns3_dev_indep_txrx_supported(hw)) + if (!hns3_dev_get_support(hw, INDEP_TXRX)) return -ENOTSUP; rte_spinlock_lock(&hw->lock); @@ -4495,7 +4495,7 @@ hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct hns3_rx_queue *rxq = dev->data->rx_queues[rx_queue_id]; - if (!hns3_dev_indep_txrx_supported(hw)) + if (!hns3_dev_get_support(hw, INDEP_TXRX)) return -ENOTSUP; rte_spinlock_lock(&hw->lock); @@ -4517,7 +4517,7 @@ hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id]; int ret; - if (!hns3_dev_indep_txrx_supported(hw)) + if (!hns3_dev_get_support(hw, INDEP_TXRX)) return -ENOTSUP; rte_spinlock_lock(&hw->lock); @@ -4543,7 +4543,7 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct hns3_tx_queue *txq = dev->data->tx_queues[tx_queue_id]; - if (!hns3_dev_indep_txrx_supported(hw)) + if (!hns3_dev_get_support(hw, INDEP_TXRX)) return -ENOTSUP; rte_spinlock_lock(&hw->lock); @@ -4716,7 +4716,7 @@ hns3_enable_rxd_adv_layout(struct hns3_hw *hw) * If the hardware support rxd advanced layout, then driver enable it * default. */ - if (hns3_dev_rxd_adv_layout_supported(hw)) + if (hns3_dev_get_support(hw, RXD_ADV_LAYOUT)) hns3_write_dev(hw, HNS3_RXD_ADV_LAYOUT_EN_REG, 1); } diff --git a/drivers/net/hns3/hns3_rxtx_vec.c b/drivers/net/hns3/hns3_rxtx_vec.c index 844512f6ce..ff434d2d33 100644 --- a/drivers/net/hns3/hns3_rxtx_vec.c +++ b/drivers/net/hns3/hns3_rxtx_vec.c @@ -19,7 +19,7 @@ hns3_tx_check_vec_support(struct rte_eth_dev *dev) struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode; struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (hns3_dev_ptp_supported(hw)) + if (hns3_dev_get_support(hw, PTP)) return -ENOTSUP; /* Only support DEV_TX_OFFLOAD_MBUF_FAST_FREE */ @@ -234,7 +234,7 @@ hns3_rx_check_vec_support(struct rte_eth_dev *dev) DEV_RX_OFFLOAD_VLAN; struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (hns3_dev_ptp_supported(hw)) + if (hns3_dev_get_support(hw, PTP)) return -ENOTSUP; if (dev->data->scattered_rx) diff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c index db5ac786c7..44b607af7a 100644 --- a/drivers/net/hns3/hns3_tm.c +++ b/drivers/net/hns3/hns3_tm.c @@ -31,7 +31,7 @@ hns3_tm_conf_init(struct rte_eth_dev *dev) struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t max_tx_queues = hns3_tm_max_tx_queues_get(dev); - if (!hns3_dev_tm_supported(hw)) + if (!hns3_dev_get_support(hw, TM)) return; pf->tm_conf.nb_leaf_nodes_max = max_tx_queues; @@ -58,7 +58,7 @@ hns3_tm_conf_uninit(struct rte_eth_dev *dev) struct hns3_tm_shaper_profile *shaper_profile; struct hns3_tm_node *tm_node; - if (!hns3_dev_tm_supported(hw)) + if (!hns3_dev_get_support(hw, TM)) return; if (pf->tm_conf.nb_queue_node > 0) { @@ -1233,7 +1233,7 @@ hns3_tm_ops_get(struct rte_eth_dev *dev, void *arg) if (arg == NULL) return -EINVAL; - if (!hns3_dev_tm_supported(hw)) + if (!hns3_dev_get_support(hw, TM)) return -EOPNOTSUPP; *(const void **)arg = &hns3_tm_ops; @@ -1246,7 +1246,7 @@ hns3_tm_dev_start_proc(struct hns3_hw *hw) { struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); - if (!hns3_dev_tm_supported(hw)) + if (!hns3_dev_get_support(hw, TM)) return; if (pf->tm_conf.root && !pf->tm_conf.committed) @@ -1295,7 +1295,7 @@ hns3_tm_conf_update(struct hns3_hw *hw) struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); struct rte_tm_error error; - if (!hns3_dev_tm_supported(hw)) + if (!hns3_dev_get_support(hw, TM)) return 0; if (pf->tm_conf.root == NULL || !pf->tm_conf.committed)