From patchwork Fri Nov 6 03:51:52 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 83789 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 0A48FA0524; Fri, 6 Nov 2020 04:52:21 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id AA8CF2C55; Fri, 6 Nov 2020 04:51:40 +0100 (CET) Received: from szxga04-in.huawei.com (szxga04-in.huawei.com [45.249.212.190]) by dpdk.org (Postfix) with ESMTP id 21423126B for ; Fri, 6 Nov 2020 04:51:36 +0100 (CET) Received: from DGGEMS407-HUB.china.huawei.com (unknown [172.30.72.59]) by szxga04-in.huawei.com (SkyGuard) with ESMTP id 4CS5zN2lG2z15QRJ for ; Fri, 6 Nov 2020 11:51:28 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS407-HUB.china.huawei.com (10.3.19.207) with Microsoft SMTP Server id 14.3.487.0; Fri, 6 Nov 2020 11:51:26 +0800 From: Lijun Ou To: CC: , Date: Fri, 6 Nov 2020 11:51:52 +0800 Message-ID: <1604634716-43484-2-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1604634716-43484-1-git-send-email-oulijun@huawei.com> References: <1604586194-29523-1-git-send-email-oulijun@huawei.com> <1604634716-43484-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v2 1/5] net/hns3: use correct logging format symbol X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Hongbo Zheng In current driver print log function, some print format symbols does not match with the actual variable types. Signed-off-by: Hongbo Zheng Signed-off-by: Lijun Ou --- drivers/net/hns3/hns3_dcb.c | 10 +++++----- drivers/net/hns3/hns3_ethdev.c | 32 ++++++++++++++++---------------- drivers/net/hns3/hns3_ethdev_vf.c | 16 ++++++++-------- drivers/net/hns3/hns3_fdir.c | 16 ++++++++-------- drivers/net/hns3/hns3_flow.c | 4 ++-- drivers/net/hns3/hns3_mbx.c | 16 ++++++++-------- drivers/net/hns3/hns3_mp.c | 2 +- drivers/net/hns3/hns3_rxtx.c | 22 +++++++++++----------- drivers/net/hns3/hns3_stats.c | 12 ++++++------ 9 files changed, 65 insertions(+), 65 deletions(-) diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c index 351eb87..ab02c87 100644 --- a/drivers/net/hns3/hns3_dcb.c +++ b/drivers/net/hns3/hns3_dcb.c @@ -50,13 +50,13 @@ hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level, /* Calc tick */ if (shaper_level >= HNS3_SHAPER_LVL_CNT) { hns3_err(hw, - "shaper_level(%d) is greater than HNS3_SHAPER_LVL_CNT(%d)", + "shaper_level(%u) is greater than HNS3_SHAPER_LVL_CNT(%d)", shaper_level, HNS3_SHAPER_LVL_CNT); return -EINVAL; } if (ir > hw->max_tm_rate) { - hns3_err(hw, "rate(%d) exceeds the max rate(%d) driver " + hns3_err(hw, "rate(%u) exceeds the max rate(%u) driver " "supported.", ir, hw->max_tm_rate); return -EINVAL; } @@ -1138,7 +1138,7 @@ hns3_pause_param_setup_hw(struct hns3_hw *hw, uint16_t pause_time) pause_time <= PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP) pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1; else { - hns3_warn(hw, "pause_time(%d) is adjusted to 4", pause_time); + hns3_warn(hw, "pause_time(%u) is adjusted to 4", pause_time); pause_time = PAUSE_TIME_MIN_VALUE; pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1; } @@ -1449,13 +1449,13 @@ hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc) return -EINVAL; if (nb_rx_q < num_tc) { - hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).", + hns3_err(hw, "number of Rx queues(%u) is less than tcs(%u).", nb_rx_q, num_tc); return -EINVAL; } if (nb_tx_q < num_tc) { - hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).", + hns3_err(hw, "number of Tx queues(%u) is less than tcs(%u).", nb_tx_q, num_tc); return -EINVAL; } diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index 84d8346..ba96724 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -1100,8 +1100,8 @@ hns3_dev_configure_vlan(struct rte_eth_dev *dev) hns3_warn(hw, "hw_vlan_reject_tagged or hw_vlan_reject_untagged " "configuration is not supported! Ignore these two " - "parameters: hw_vlan_reject_tagged(%d), " - "hw_vlan_reject_untagged(%d)", + "parameters: hw_vlan_reject_tagged(%u), " + "hw_vlan_reject_untagged(%u)", txmode->hw_vlan_reject_tagged, txmode->hw_vlan_reject_untagged); @@ -1125,7 +1125,7 @@ hns3_dev_configure_vlan(struct rte_eth_dev *dev) ret = hns3_vlan_pvid_set(dev, txmode->pvid, txmode->hw_vlan_insert_pvid); if (ret) - hns3_err(hw, "dev config vlan pvid(%d) failed, ret = %d", + hns3_err(hw, "dev config vlan pvid(%u) failed, ret = %d", txmode->pvid, ret); return ret; @@ -1890,7 +1890,7 @@ hns3_set_mc_addr_chk_param(struct hns3_hw *hw, uint32_t j; if (nb_mc_addr > HNS3_MC_MACADDR_NUM) { - hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%d) " + hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) " "invalid. valid range: 0~%d", nb_mc_addr, HNS3_MC_MACADDR_NUM); return -EINVAL; @@ -2164,7 +2164,7 @@ hns3_check_mq_mode(struct rte_eth_dev *dev) for (i = 0; i < HNS3_MAX_USER_PRIO; i++) { if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) { - hns3_err(hw, "dcb_tc[%d] = %d in rx direction, " + hns3_err(hw, "dcb_tc[%d] = %u in rx direction, " "is not equal to one in tx direction.", i, dcb_rx_conf->dcb_tc[i]); return -EINVAL; @@ -2238,7 +2238,7 @@ hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap, op_str = mmap ? "Map" : "Unmap"; status = hns3_cmd_send(hw, &desc, 1); if (status) { - hns3_err(hw, "%s TQP %d fail, vector_id is %d, status is %d.", + hns3_err(hw, "%s TQP %u fail, vector_id is %u, status is %d.", op_str, queue_id, req->int_vector_id, status); return status; } @@ -2286,7 +2286,7 @@ hns3_init_ring_with_vector(struct hns3_hw *hw) HNS3_RING_TYPE_TX, i); if (ret) { PMD_INIT_LOG(ERR, "PF fail to unbind TX ring(%d) with " - "vector: %d, ret=%d", i, vec, ret); + "vector: %u, ret=%d", i, vec, ret); return ret; } @@ -2294,7 +2294,7 @@ hns3_init_ring_with_vector(struct hns3_hw *hw) HNS3_RING_TYPE_RX, i); if (ret) { PMD_INIT_LOG(ERR, "PF fail to unbind RX ring(%d) with " - "vector: %d, ret=%d", i, vec, ret); + "vector: %u, ret=%d", i, vec, ret); return ret; } } @@ -3083,7 +3083,7 @@ hns3_get_board_configuration(struct hns3_hw *hw) ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed); if (ret) { - PMD_INIT_LOG(ERR, "Get wrong speed %d, ret = %d", + PMD_INIT_LOG(ERR, "Get wrong speed %u, ret = %d", cfg.default_speed, ret); return ret; } @@ -3928,7 +3928,7 @@ hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code) if (cmdq_resp) { PMD_INIT_LOG(ERR, - "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n", + "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n", cmdq_resp); return -EIO; } @@ -3949,7 +3949,7 @@ hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code) break; default: PMD_INIT_LOG(ERR, - "add mac ethertype failed for undefined, code=%d.", + "add mac ethertype failed for undefined, code=%u.", resp_code); return_status = -EIO; break; @@ -4107,7 +4107,7 @@ hns3_promisc_init(struct hns3_hw *hw) hns3_promisc_param_init(¶m, false, false, false, func_id); ret = hns3_cmd_set_promisc_mode(hw, ¶m); if (ret) { - PMD_INIT_LOG(ERR, "failed to clear vf:%d promisc mode," + PMD_INIT_LOG(ERR, "failed to clear vf:%u promisc mode," " ret = %d", func_id, ret); return ret; } @@ -4790,7 +4790,7 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev) rte_zmalloc("intr_vec", hw->used_rx_queues * sizeof(int), 0); if (intr_handle->intr_vec == NULL) { - hns3_err(hw, "Failed to allocate %d rx_queues" + hns3_err(hw, "Failed to allocate %u rx_queues" " intr_vec", hw->used_rx_queues); ret = -ENOMEM; goto alloc_intr_vec_error; @@ -5056,7 +5056,7 @@ hns3_dev_close(struct rte_eth_dev *eth_dev) rte_free(eth_dev->process_private); eth_dev->process_private = NULL; hns3_mp_uninit_primary(); - hns3_warn(hw, "Close port %d finished", hw->data->port_id); + hns3_warn(hw, "Close port %u finished", hw->data->port_id); return ret; } @@ -5134,7 +5134,7 @@ hns3_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) return -EINVAL; } if (!fc_conf->pause_time) { - hns3_err(hw, "Invalid pause time %d setting.", + hns3_err(hw, "Invalid pause time %u setting.", fc_conf->pause_time); return -EINVAL; } @@ -5187,7 +5187,7 @@ hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev, return -EINVAL; } if (pfc_conf->fc.pause_time == 0) { - hns3_err(hw, "Invalid pause time %d setting.", + hns3_err(hw, "Invalid pause time %u setting.", pfc_conf->fc.pause_time); return -EINVAL; } diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c index fb55d58..6f71cd6 100644 --- a/drivers/net/hns3/hns3_ethdev_vf.c +++ b/drivers/net/hns3/hns3_ethdev_vf.c @@ -425,7 +425,7 @@ hns3vf_set_mc_addr_chk_param(struct hns3_hw *hw, uint32_t j; if (nb_mc_addr > HNS3_MC_MACADDR_NUM) { - hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%d) " + hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) " "invalid. valid range: 0~%d", nb_mc_addr, HNS3_MC_MACADDR_NUM); return -EINVAL; @@ -702,7 +702,7 @@ hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg, sizeof(bind_msg), false, NULL, 0); if (ret) - hns3_err(hw, "%s TQP %d fail, vector_id is %d, ret is %d.", + hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret is %d.", op_str, queue_id, bind_msg.vector_id, ret); return ret; @@ -748,7 +748,7 @@ hns3vf_init_ring_with_vector(struct hns3_hw *hw) HNS3_RING_TYPE_TX, i); if (ret) { PMD_INIT_LOG(ERR, "VF fail to unbind TX ring(%d) with " - "vector: %d, ret=%d", i, vec, ret); + "vector: %u, ret=%d", i, vec, ret); return ret; } @@ -756,7 +756,7 @@ hns3vf_init_ring_with_vector(struct hns3_hw *hw) HNS3_RING_TYPE_RX, i); if (ret) { PMD_INIT_LOG(ERR, "VF fail to unbind RX ring(%d) with " - "vector: %d, ret=%d", i, vec, ret); + "vector: %u, ret=%d", i, vec, ret); return ret; } } @@ -1414,13 +1414,13 @@ hns3vf_set_tc_queue_mapping(struct hns3_adapter *hns, uint16_t nb_rx_q, struct hns3_hw *hw = &hns->hw; if (nb_rx_q < hw->num_tc) { - hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).", + hns3_err(hw, "number of Rx queues(%u) is less than tcs(%u).", nb_rx_q, hw->num_tc); return -EINVAL; } if (nb_tx_q < hw->num_tc) { - hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).", + hns3_err(hw, "number of Tx queues(%u) is less than tcs(%u).", nb_tx_q, hw->num_tc); return -EINVAL; } @@ -1992,7 +1992,7 @@ hns3vf_dev_close(struct rte_eth_dev *eth_dev) rte_free(eth_dev->process_private); eth_dev->process_private = NULL; hns3_mp_uninit_primary(); - hns3_warn(hw, "Close port %d finished", hw->data->port_id); + hns3_warn(hw, "Close port %u finished", hw->data->port_id); return ret; } @@ -2107,7 +2107,7 @@ hns3vf_map_rx_interrupt(struct rte_eth_dev *dev) rte_zmalloc("intr_vec", hw->used_rx_queues * sizeof(int), 0); if (intr_handle->intr_vec == NULL) { - hns3_err(hw, "Failed to allocate %d rx_queues" + hns3_err(hw, "Failed to allocate %u rx_queues" " intr_vec", hw->used_rx_queues); ret = -ENOMEM; goto vf_alloc_intr_vec_error; diff --git a/drivers/net/hns3/hns3_fdir.c b/drivers/net/hns3/hns3_fdir.c index 62ccabc..857cc94 100644 --- a/drivers/net/hns3/hns3_fdir.c +++ b/drivers/net/hns3/hns3_fdir.c @@ -320,7 +320,7 @@ int hns3_init_fd_config(struct hns3_adapter *hns) hns3_warn(hw, "Unsupported tunnel filter in 4K*200Bit"); break; default: - hns3_err(hw, "Unsupported flow director mode %d", + hns3_err(hw, "Unsupported flow director mode %u", pf->fdir.fd_cfg.fd_mode); return -EOPNOTSUPP; } @@ -617,7 +617,7 @@ static bool hns3_fd_convert_tuple(struct hns3_hw *hw, key_conf->mask.ip_proto); break; default: - hns3_warn(hw, "not support tuple of (%d)", tuple); + hns3_warn(hw, "not support tuple of (%u)", tuple); break; } return true; @@ -744,14 +744,14 @@ static int hns3_config_key(struct hns3_adapter *hns, ret = hns3_fd_tcam_config(hw, false, rule->location, key_y, true); if (ret) { - hns3_err(hw, "Config fd key_y fail, loc=%d, ret=%d", + hns3_err(hw, "Config fd key_y fail, loc=%u, ret=%d", rule->queue_id, ret); return ret; } ret = hns3_fd_tcam_config(hw, true, rule->location, key_x, true); if (ret) - hns3_err(hw, "Config fd key_x fail, loc=%d, ret=%d", + hns3_err(hw, "Config fd key_x fail, loc=%u, ret=%d", rule->queue_id, ret); return ret; } @@ -965,8 +965,8 @@ int hns3_fdir_filter_program(struct hns3_adapter *hns, ret = hns3_fd_tcam_config(hw, true, rule->location, NULL, false); if (ret) - hns3_err(hw, "Failed to delete fdir: %d src_ip:%x " - "dst_ip:%x src_port:%d dst_port:%d ret = %d", + hns3_err(hw, "Failed to delete fdir: %u src_ip:%x " + "dst_ip:%x src_port:%u dst_port:%u ret = %d", rule->location, rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID], rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID], @@ -1006,8 +1006,8 @@ int hns3_fdir_filter_program(struct hns3_adapter *hns, ret = hns3_config_key(hns, rule); rte_spinlock_unlock(&fdir_info->flows_lock); if (ret) { - hns3_err(hw, "Failed to config fdir: %d src_ip:%x dst_ip:%x " - "src_port:%d dst_port:%d ret = %d", + hns3_err(hw, "Failed to config fdir: %u src_ip:%x dst_ip:%x " + "src_port:%u dst_port:%u ret = %d", rule->location, rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID], rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID], diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c index ff00dc1..2fff157 100644 --- a/drivers/net/hns3/hns3_flow.c +++ b/drivers/net/hns3/hns3_flow.c @@ -267,8 +267,8 @@ hns3_handle_action_queue(struct rte_eth_dev *dev, queue = (const struct rte_flow_action_queue *)action->conf; if (queue->index >= hw->used_rx_queues) { - hns3_err(hw, "queue ID(%d) is greater than number of " - "available queue (%d) in driver.", + hns3_err(hw, "queue ID(%u) is greater than number of " + "available queue (%u) in driver.", queue->index, hw->used_rx_queues); return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c index adb23cb..d2a5db8 100644 --- a/drivers/net/hns3/hns3_mbx.c +++ b/drivers/net/hns3/hns3_mbx.c @@ -74,7 +74,7 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code0, uint16_t code1, uint64_t end; if (resp_len > HNS3_MBX_MAX_RESP_DATA_SIZE) { - hns3_err(hw, "VF mbx response len(=%d) exceeds maximum(=%d)", + hns3_err(hw, "VF mbx response len(=%u) exceeds maximum(=%d)", resp_len, HNS3_MBX_MAX_RESP_DATA_SIZE); return -EINVAL; } @@ -113,7 +113,7 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code0, uint16_t code1, if (now >= end) { hw->mbx_resp.lost++; hns3_err(hw, - "VF could not get mbx(%d,%d) head(%d) tail(%d) lost(%d) from PF in_irq:%d", + "VF could not get mbx(%u,%u) head(%u) tail(%u) lost(%u) from PF in_irq:%d", code0, code1, hw->mbx_resp.head, hw->mbx_resp.tail, hw->mbx_resp.lost, in_irq); return -ETIME; @@ -146,7 +146,7 @@ hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode, /* first two bytes are reserved for code & subcode */ if (msg_len > (HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET)) { hns3_err(hw, - "VF send mbx msg fail, msg len %d exceeds max payload len %d", + "VF send mbx msg fail, msg len %u exceeds max payload len %d", msg_len, HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET); return -EINVAL; } @@ -237,7 +237,7 @@ hns3_mbx_handler(struct hns3_hw *hw) hns3_schedule_reset(HNS3_DEV_HW_TO_ADAPTER(hw)); break; default: - hns3_err(hw, "Fetched unsupported(%d) message from arq", + hns3_err(hw, "Fetched unsupported(%u) message from arq", opcode); break; } @@ -266,13 +266,13 @@ hns3_update_resp_position(struct hns3_hw *hw, uint32_t resp_msg) if (resp->lost) resp->lost--; hns3_warn(hw, "Received a mismatched response req_msg(%x) " - "resp_msg(%x) head(%d) tail(%d) lost(%d)", + "resp_msg(%x) head(%u) tail(%u) lost(%u)", resp->req_msg_data, resp_msg, resp->head, tail, resp->lost); } else if (tail + resp->lost > resp->head) { resp->lost--; hns3_warn(hw, "Received a new response again resp_msg(%x) " - "head(%d) tail(%d) lost(%d)", resp_msg, + "head(%u) tail(%u) lost(%u)", resp_msg, resp->head, tail, resp->lost); } rte_io_wmb(); @@ -377,7 +377,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag); if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) { hns3_warn(hw, - "dropped invalid mailbox message, code = %d", + "dropped invalid mailbox message, code = %u", opcode); /* dropping/not processing this invalid message */ @@ -428,7 +428,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) break; default: hns3_err(hw, - "VF received unsupported(%d) mbx msg from PF", + "VF received unsupported(%u) mbx msg from PF", req->msg[0]); break; } diff --git a/drivers/net/hns3/hns3_mp.c b/drivers/net/hns3/hns3_mp.c index a5e98ff..ed2567a 100644 --- a/drivers/net/hns3/hns3_mp.c +++ b/drivers/net/hns3/hns3_mp.c @@ -78,7 +78,7 @@ mp_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer) if (!rte_eth_dev_is_valid_port(param->port_id)) { rte_errno = ENODEV; - PMD_INIT_LOG(ERR, "port %u invalid port ID", param->port_id); + PMD_INIT_LOG(ERR, "port %d invalid port ID", param->port_id); return -rte_errno; } dev = &rte_eth_devices[param->port_id]; diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c index bd3ccf6..85316ca 100644 --- a/drivers/net/hns3/hns3_rxtx.c +++ b/drivers/net/hns3/hns3_rxtx.c @@ -256,7 +256,7 @@ hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq) for (i = 0; i < rxq->nb_rx_desc; i++) { mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); if (unlikely(mbuf == NULL)) { - hns3_err(hw, "Failed to allocate RXD[%d] for rx queue!", + hns3_err(hw, "Failed to allocate RXD[%u] for rx queue!", i); hns3_rx_queue_release_mbufs(rxq); return -ENOMEM; @@ -1192,7 +1192,7 @@ hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev, rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue), RTE_CACHE_LINE_SIZE, q_info->socket_id); if (rxq == NULL) { - hns3_err(hw, "Failed to allocate memory for No.%d rx ring!", + hns3_err(hw, "Failed to allocate memory for No.%u rx ring!", q_info->idx); return NULL; } @@ -1211,7 +1211,7 @@ hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev, rx_desc, HNS3_RING_BASE_ALIGN, q_info->socket_id); if (rx_mz == NULL) { - hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!", + hns3_err(hw, "Failed to reserve DMA memory for No.%u rx ring!", q_info->idx); hns3_rx_queue_release(rxq); return NULL; @@ -1220,7 +1220,7 @@ hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev, rxq->rx_ring = (struct hns3_desc *)rx_mz->addr; rxq->rx_ring_phys_addr = rx_mz->iova; - hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, q_info->idx, + hns3_dbg(hw, "No.%u rx descriptors iova 0x%" PRIx64, q_info->idx, rxq->rx_ring_phys_addr); return rxq; @@ -1248,7 +1248,7 @@ hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, q_info.ring_name = "rx_fake_ring"; rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info); if (rxq == NULL) { - hns3_err(hw, "Failed to setup No.%d fake rx ring.", idx); + hns3_err(hw, "Failed to setup No.%u fake rx ring.", idx); return -ENOMEM; } @@ -1285,7 +1285,7 @@ hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev, txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue), RTE_CACHE_LINE_SIZE, q_info->socket_id); if (txq == NULL) { - hns3_err(hw, "Failed to allocate memory for No.%d tx ring!", + hns3_err(hw, "Failed to allocate memory for No.%u tx ring!", q_info->idx); return NULL; } @@ -1298,7 +1298,7 @@ hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev, tx_desc, HNS3_RING_BASE_ALIGN, q_info->socket_id); if (tx_mz == NULL) { - hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!", + hns3_err(hw, "Failed to reserve DMA memory for No.%u tx ring!", q_info->idx); hns3_tx_queue_release(txq); return NULL; @@ -1307,7 +1307,7 @@ hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev, txq->tx_ring = (struct hns3_desc *)tx_mz->addr; txq->tx_ring_phys_addr = tx_mz->iova; - hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, q_info->idx, + hns3_dbg(hw, "No.%u tx descriptors iova 0x%" PRIx64, q_info->idx, txq->tx_ring_phys_addr); /* Clear tx bd */ @@ -1342,7 +1342,7 @@ hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, q_info.ring_name = "tx_fake_ring"; txq = hns3_alloc_txq_and_dma_zone(dev, &q_info); if (txq == NULL) { - hns3_err(hw, "Failed to setup No.%d fake tx ring.", idx); + hns3_err(hw, "Failed to setup No.%u fake tx ring.", idx); return -ENOMEM; } @@ -2475,8 +2475,8 @@ hns3_tx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_txconf *conf, if (rs_thresh + free_thresh > nb_desc || nb_desc % rs_thresh || rs_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP || free_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP) { - hns3_err(hw, "tx_rs_thresh (%d) tx_free_thresh (%d) nb_desc " - "(%d) of tx descriptors for port=%d queue=%d check " + hns3_err(hw, "tx_rs_thresh (%u) tx_free_thresh (%u) nb_desc " + "(%u) of tx descriptors for port=%u queue=%u check " "fail!", rs_thresh, free_thresh, nb_desc, hw->data->port_id, idx); diff --git a/drivers/net/hns3/hns3_stats.c b/drivers/net/hns3/hns3_stats.c index 9bf6962..c590647 100644 --- a/drivers/net/hns3/hns3_stats.c +++ b/drivers/net/hns3/hns3_stats.c @@ -457,7 +457,7 @@ hns3_update_tqp_stats(struct hns3_hw *hw) desc.data[0] = rte_cpu_to_le_32((uint32_t)i); ret = hns3_cmd_send(hw, &desc, 1); if (ret) { - hns3_err(hw, "Failed to query RX No.%d queue stat: %d", + hns3_err(hw, "Failed to query RX No.%u queue stat: %d", i, ret); return ret; } @@ -471,7 +471,7 @@ hns3_update_tqp_stats(struct hns3_hw *hw) desc.data[0] = rte_cpu_to_le_32((uint32_t)i); ret = hns3_cmd_send(hw, &desc, 1); if (ret) { - hns3_err(hw, "Failed to query TX No.%d queue stat: %d", + hns3_err(hw, "Failed to query TX No.%u queue stat: %d", i, ret); return ret; } @@ -565,7 +565,7 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev) desc_reset.data[0] = rte_cpu_to_le_32((uint32_t)i); ret = hns3_cmd_send(hw, &desc_reset, 1); if (ret) { - hns3_err(hw, "Failed to reset RX No.%d queue stat: %d", + hns3_err(hw, "Failed to reset RX No.%u queue stat: %d", i, ret); return ret; } @@ -575,7 +575,7 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev) desc_reset.data[0] = rte_cpu_to_le_32((uint32_t)i); ret = hns3_cmd_send(hw, &desc_reset, 1); if (ret) { - hns3_err(hw, "Failed to reset TX No.%d queue stat: %d", + hns3_err(hw, "Failed to reset TX No.%u queue stat: %d", i, ret); return ret; } @@ -960,7 +960,7 @@ hns3_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, for (i = 0; i < size; i++) { if (ids[i] >= cnt_stats) { - hns3_err(hw, "ids[%d] (%" PRIx64 ") is invalid, " + hns3_err(hw, "ids[%u] (%" PRIx64 ") is invalid, " "should < %u", i, ids[i], cnt_stats); rte_free(values_copy); return -EINVAL; @@ -1021,7 +1021,7 @@ hns3_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, for (i = 0; i < size; i++) { if (ids[i] >= cnt_stats) { - hns3_err(hw, "ids[%d] (%" PRIx64 ") is invalid, " + hns3_err(hw, "ids[%u] (%" PRIx64 ") is invalid, " "should < %u", i, ids[i], cnt_stats); rte_free(names_copy); return -EINVAL; From patchwork Fri Nov 6 03:51:53 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 83788 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 58658A0524; Fri, 6 Nov 2020 04:51:57 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id AD9972C1A; Fri, 6 Nov 2020 04:51:38 +0100 (CET) Received: from szxga06-in.huawei.com (szxga06-in.huawei.com [45.249.212.32]) by dpdk.org (Postfix) with ESMTP id C7AA3F90 for ; Fri, 6 Nov 2020 04:51:35 +0100 (CET) Received: from DGGEMS407-HUB.china.huawei.com (unknown [172.30.72.60]) by szxga06-in.huawei.com (SkyGuard) with ESMTP id 4CS5zQ5PtBzhg79 for ; Fri, 6 Nov 2020 11:51:30 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS407-HUB.china.huawei.com (10.3.19.207) with Microsoft SMTP Server id 14.3.487.0; Fri, 6 Nov 2020 11:51:27 +0800 From: Lijun Ou To: CC: , Date: Fri, 6 Nov 2020 11:51:53 +0800 Message-ID: <1604634716-43484-3-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1604634716-43484-1-git-send-email-oulijun@huawei.com> References: <1604586194-29523-1-git-send-email-oulijun@huawei.com> <1604634716-43484-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v2 2/5] net/hns3: use unsigned types for bit operator X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Hongbo Zheng According to bit operator reliability style, variables in the right expression participating int bit operation cannot be of unsigned type. Signed-off-by: Hongbo Zheng Signed-off-by: Lijun Ou --- drivers/net/hns3/hns3_ethdev_vf.c | 2 +- drivers/net/hns3/hns3_rxtx_vec_neon.h | 11 +++++------ 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c index 6f71cd6..2e9bfda 100644 --- a/drivers/net/hns3/hns3_ethdev_vf.c +++ b/drivers/net/hns3/hns3_ethdev_vf.c @@ -1331,7 +1331,7 @@ hns3vf_get_tc_info(struct hns3_hw *hw) { uint8_t resp_msg; int ret; - int i; + uint32_t i; ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_TCINFO, 0, NULL, 0, true, &resp_msg, sizeof(resp_msg)); diff --git a/drivers/net/hns3/hns3_rxtx_vec_neon.h b/drivers/net/hns3/hns3_rxtx_vec_neon.h index 8d7721b..fe525de 100644 --- a/drivers/net/hns3/hns3_rxtx_vec_neon.h +++ b/drivers/net/hns3/hns3_rxtx_vec_neon.h @@ -89,13 +89,12 @@ hns3_desc_parse_field(struct hns3_rx_queue *rxq, struct hns3_desc *rxdp, uint32_t bd_vld_num) { - uint32_t l234_info, ol_info, bd_base_info; + uint32_t l234_info, ol_info, bd_base_info, cksum_err, i; struct rte_mbuf *pkt; uint32_t retcode = 0; - uint32_t cksum_err; - int ret, i; + int ret; - for (i = 0; i < (int)bd_vld_num; i++) { + for (i = 0; i < bd_vld_num; i++) { pkt = sw_ring[i].mbuf; /* init rte_mbuf.rearm_data last 64-bit */ @@ -129,9 +128,9 @@ hns3_recv_burst_vec(struct hns3_rx_queue *__restrict rxq, uint16_t rx_id = rxq->next_to_use; struct hns3_entry *sw_ring = &rxq->sw_ring[rx_id]; struct hns3_desc *rxdp = &rxq->rx_ring[rx_id]; - uint32_t bd_valid_num, parse_retcode; + uint32_t bd_valid_num, parse_retcode, pos; uint16_t nb_rx = 0; - int pos, offset; + int offset; /* mask to shuffle from desc to mbuf's rx_descriptor_fields1 */ uint8x16_t shuf_desc_fields_msk = { From patchwork Fri Nov 6 03:51:54 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 83792 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 2465AA0524; Fri, 6 Nov 2020 04:53:20 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id AA0C14C96; Fri, 6 Nov 2020 04:52:05 +0100 (CET) Received: from szxga04-in.huawei.com (szxga04-in.huawei.com [45.249.212.190]) by dpdk.org (Postfix) with ESMTP id AFE454C96 for ; Fri, 6 Nov 2020 04:51:41 +0100 (CET) Received: from DGGEMS407-HUB.china.huawei.com (unknown [172.30.72.58]) by szxga04-in.huawei.com (SkyGuard) with ESMTP id 4CS5zT3RnWz15QSP for ; Fri, 6 Nov 2020 11:51:33 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS407-HUB.china.huawei.com (10.3.19.207) with Microsoft SMTP Server id 14.3.487.0; Fri, 6 Nov 2020 11:51:27 +0800 From: Lijun Ou To: CC: , Date: Fri, 6 Nov 2020 11:51:54 +0800 Message-ID: <1604634716-43484-4-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1604634716-43484-1-git-send-email-oulijun@huawei.com> References: <1604586194-29523-1-git-send-email-oulijun@huawei.com> <1604634716-43484-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v2 3/5] net/hns3: adjust some code style X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Hongbo Zheng Here adjusts some code style for making the lines more compact and removes some static check tool warnings. Signed-off-by: Hongbo Zheng Signed-off-by: Lijun Ou --- V1->V2: - fix checkpatch warning --- drivers/net/hns3/hns3_cmd.c | 1 - drivers/net/hns3/hns3_ethdev.c | 6 +- drivers/net/hns3/hns3_ethdev.h | 1 - drivers/net/hns3/hns3_flow.c | 37 +- drivers/net/hns3/hns3_intr.c | 873 +++++++++++++++++++++++++++-------------- drivers/net/hns3/hns3_rxtx.c | 1 - drivers/net/hns3/hns3_stats.c | 1 - 7 files changed, 597 insertions(+), 323 deletions(-) diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c index 4f52ed0..f58f4f7 100644 --- a/drivers/net/hns3/hns3_cmd.c +++ b/drivers/net/hns3/hns3_cmd.c @@ -198,7 +198,6 @@ hns3_cmd_csq_clean(struct hns3_hw *hw) int clean; head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG); - if (!is_valid_csq_clean_head(csq, head)) { hns3_err(hw, "wrong cmd head (%u, %u-%u)", head, csq->next_to_use, csq->next_to_clean); diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index ba96724..b27cf67 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -238,7 +238,6 @@ hns3_interrupt_handler(void *param) hns3_pf_disable_irq0(hw); event_cause = hns3_check_event_cause(hns, &clearval); - /* vector 0 interrupt is shared with reset and mailbox source events. */ if (event_cause == HNS3_VECTOR0_EVENT_ERR) { hns3_warn(hw, "Received err interrupt"); @@ -3556,9 +3555,7 @@ hns3_drop_pfc_buf_till_fit(struct hns3_hw *hw, for (i = HNS3_MAX_TC_NUM - 1; i >= 0; i--) { priv = &buf_alloc->priv_buf[i]; mask = BIT((uint8_t)i); - - if (hw->hw_tc_map & mask && - hw->dcb_info.hw_pfc_map & mask) { + if (hw->hw_tc_map & mask && hw->dcb_info.hw_pfc_map & mask) { /* Reduce the number of pfc TC with private buffer */ priv->wl.low = 0; priv->enable = 0; @@ -3612,7 +3609,6 @@ hns3_only_alloc_priv_buff(struct hns3_hw *hw, for (i = 0; i < HNS3_MAX_TC_NUM; i++) { priv = &buf_alloc->priv_buf[i]; - priv->enable = 0; priv->wl.low = 0; priv->wl.high = 0; diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h index 63e0c2f..7470de0 100644 --- a/drivers/net/hns3/hns3_ethdev.h +++ b/drivers/net/hns3/hns3_ethdev.h @@ -831,7 +831,6 @@ struct hns3_adapter { #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) #define rounddown(x, y) ((x) - ((x) % (y))) - #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) /* diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c index 2fff157..ee6ec15 100644 --- a/drivers/net/hns3/hns3_flow.c +++ b/drivers/net/hns3/hns3_flow.c @@ -209,8 +209,7 @@ hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow, ret = hns3_get_count(&hns->hw, flow->counter_id, &value); if (ret) { - rte_flow_error_set(error, -ret, - RTE_FLOW_ERROR_TYPE_HANDLE, + rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Read counter fail."); return ret; } @@ -547,7 +546,6 @@ hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, if (item->mask) { ipv4_mask = item->mask; - if (ipv4_mask->hdr.total_length || ipv4_mask->hdr.packet_id || ipv4_mask->hdr.fragment_offset || @@ -616,8 +614,8 @@ hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, if (item->mask) { ipv6_mask = item->mask; - if (ipv6_mask->hdr.vtc_flow || - ipv6_mask->hdr.payload_len || ipv6_mask->hdr.hop_limits) { + if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len || + ipv6_mask->hdr.hop_limits) { return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, @@ -672,12 +670,10 @@ hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, if (item->mask) { tcp_mask = item->mask; - if (tcp_mask->hdr.sent_seq || - tcp_mask->hdr.recv_ack || - tcp_mask->hdr.data_off || - tcp_mask->hdr.tcp_flags || - tcp_mask->hdr.rx_win || - tcp_mask->hdr.cksum || tcp_mask->hdr.tcp_urp) { + if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags || + tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, @@ -776,7 +772,6 @@ hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Only support src & dst port in SCTP"); - if (sctp_mask->hdr.src_port) { hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1); rule->key_conf.mask.src_port = @@ -1069,8 +1064,7 @@ hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, } static int -hns3_parse_normal(const struct rte_flow_item *item, - struct hns3_fdir_rule *rule, +hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, struct items_step_mngr *step_mngr, struct rte_flow_error *error) { @@ -1331,9 +1325,8 @@ hns3_rss_conf_copy(struct hns3_rss_conf *out, .key_len = in->key_len, .queue_num = in->queue_num, }; - out->conf.queue = - memcpy(out->queue, in->queue, - sizeof(*in->queue) * in->queue_num); + out->conf.queue = memcpy(out->queue, in->queue, + sizeof(*in->queue) * in->queue_num); if (in->key) out->conf.key = memcpy(out->key, in->key, in->key_len); @@ -1783,17 +1776,15 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0); if (flow == NULL) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Failed to allocate flow memory"); + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to allocate flow memory"); return NULL; } flow_node = rte_zmalloc("hns3 flow node", sizeof(struct hns3_flow_mem), 0); if (flow_node == NULL) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Failed to allocate flow list memory"); + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to allocate flow list memory"); rte_free(flow); return NULL; } diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c index bfd2ba8..02e221a 100644 --- a/drivers/net/hns3/hns3_intr.c +++ b/drivers/net/hns3/hns3_intr.c @@ -28,201 +28,281 @@ static const char *reset_string[HNS3_MAX_RESET] = { }; static const struct hns3_hw_error mac_afifo_tnl_int[] = { - { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err", + { .int_msk = BIT(0), + .msg = "egu_cge_afifo_ecc_1bit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err", + { .int_msk = BIT(1), + .msg = "egu_cge_afifo_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err", + { .int_msk = BIT(2), + .msg = "egu_lge_afifo_ecc_1bit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err", + { .int_msk = BIT(3), + .msg = "egu_lge_afifo_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err", + { .int_msk = BIT(4), + .msg = "cge_igu_afifo_ecc_1bit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err", + { .int_msk = BIT(5), + .msg = "cge_igu_afifo_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err", + { .int_msk = BIT(6), + .msg = "lge_igu_afifo_ecc_1bit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err", + { .int_msk = BIT(7), + .msg = "lge_igu_afifo_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err", + { .int_msk = BIT(8), + .msg = "cge_igu_afifo_overflow_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err", + { .int_msk = BIT(9), + .msg = "lge_igu_afifo_overflow_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err", + { .int_msk = BIT(10), + .msg = "egu_cge_afifo_underrun_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err", + { .int_msk = BIT(11), + .msg = "egu_lge_afifo_underrun_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err", + { .int_msk = BIT(12), + .msg = "egu_ge_afifo_underrun_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err", + { .int_msk = BIT(13), + .msg = "ge_igu_afifo_overflow_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error ppu_mpf_abnormal_int_st1[] = { - { .int_msk = 0xFFFFFFFF, .msg = "rpu_rx_pkt_ecc_mbit_err", + { .int_msk = 0xFFFFFFFF, + .msg = "rpu_rx_pkt_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error ppu_mpf_abnormal_int_st2_ras[] = { - { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err", + { .int_msk = BIT(13), + .msg = "rpu_rx_pkt_bit32_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err", + { .int_msk = BIT(14), + .msg = "rpu_rx_pkt_bit33_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err", + { .int_msk = BIT(15), + .msg = "rpu_rx_pkt_bit34_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err", + { .int_msk = BIT(16), + .msg = "rpu_rx_pkt_bit35_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err", + { .int_msk = BIT(17), + .msg = "rcb_tx_ring_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err", + { .int_msk = BIT(18), + .msg = "rcb_rx_ring_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err", + { .int_msk = BIT(19), + .msg = "rcb_tx_fbd_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err", + { .int_msk = BIT(20), + .msg = "rcb_rx_ebd_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err", + { .int_msk = BIT(21), + .msg = "rcb_tso_info_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err", + { .int_msk = BIT(22), + .msg = "rcb_tx_int_info_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err", + { .int_msk = BIT(23), + .msg = "rcb_rx_int_info_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err", + { .int_msk = BIT(24), + .msg = "tpu_tx_pkt_0_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err", + { .int_msk = BIT(25), + .msg = "tpu_tx_pkt_1_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(26), .msg = "rd_bus_err", + { .int_msk = BIT(26), + .msg = "rd_bus_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(27), .msg = "wr_bus_err", + { .int_msk = BIT(27), + .msg = "wr_bus_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect", + { .int_msk = BIT(30), + .msg = "ooo_ecc_err_detect", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl", + { .int_msk = BIT(31), + .msg = "ooo_ecc_err_multpl", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error ppu_mpf_abnormal_int_st2_msix[] = { - { .int_msk = BIT(29), .msg = "rx_q_search_miss", + { .int_msk = BIT(29), + .msg = "rx_q_search_miss", .reset_level = HNS3_NONE_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error ssu_port_based_pf_int[] = { - { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port", + { .int_msk = BIT(0), + .msg = "roc_pkt_without_key_port", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "low_water_line_err_port", + { .int_msk = BIT(9), + .msg = "low_water_line_err_port", .reset_level = HNS3_NONE_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error ppp_pf_abnormal_int[] = { - { .int_msk = BIT(0), .msg = "tx_vlan_tag_err", + { .int_msk = BIT(0), + .msg = "tx_vlan_tag_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err", + { .int_msk = BIT(1), + .msg = "rss_list_tc_unassigned_queue_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error ppu_pf_abnormal_int_ras[] = { - { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison", + { .int_msk = BIT(3), + .msg = "tx_rd_fbd_poison", .reset_level = HNS3_FUNC_RESET }, - { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison", + { .int_msk = BIT(4), + .msg = "rx_rd_ebd_poison", .reset_level = HNS3_FUNC_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error ppu_pf_abnormal_int_msix[] = { - { .int_msk = BIT(0), .msg = "over_8bd_no_fe", + { .int_msk = BIT(0), + .msg = "over_8bd_no_fe", .reset_level = HNS3_FUNC_RESET }, - { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err", + { .int_msk = BIT(1), + .msg = "tso_mss_cmp_min_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err", + { .int_msk = BIT(2), + .msg = "tso_mss_cmp_max_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(5), .msg = "buf_wait_timeout", + { .int_msk = BIT(5), + .msg = "buf_wait_timeout", .reset_level = HNS3_NONE_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error imp_tcm_ecc_int[] = { - { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err", + { .int_msk = BIT(1), + .msg = "imp_itcm0_ecc_mbit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err", + { .int_msk = BIT(3), + .msg = "imp_itcm1_ecc_mbit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err", + { .int_msk = BIT(5), + .msg = "imp_itcm2_ecc_mbit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err", + { .int_msk = BIT(7), + .msg = "imp_itcm3_ecc_mbit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err", + { .int_msk = BIT(9), + .msg = "imp_dtcm0_mem0_ecc_mbit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err", + { .int_msk = BIT(11), + .msg = "imp_dtcm0_mem1_ecc_mbit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err", + { .int_msk = BIT(13), + .msg = "imp_dtcm1_mem0_ecc_mbit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err", + { .int_msk = BIT(15), + .msg = "imp_dtcm1_mem1_ecc_mbit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err", + { .int_msk = BIT(17), + .msg = "imp_itcm4_ecc_mbit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error cmdq_mem_ecc_int[] = { - { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err", + { .int_msk = BIT(1), + .msg = "cmdq_nic_rx_depth_ecc_mbit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err", + { .int_msk = BIT(3), + .msg = "cmdq_nic_tx_depth_ecc_mbit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err", + { .int_msk = BIT(5), + .msg = "cmdq_nic_rx_tail_ecc_mbit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err", + { .int_msk = BIT(7), + .msg = "cmdq_nic_tx_tail_ecc_mbit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err", + { .int_msk = BIT(9), + .msg = "cmdq_nic_rx_head_ecc_mbit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err", + { .int_msk = BIT(11), + .msg = "cmdq_nic_tx_head_ecc_mbit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err", + { .int_msk = BIT(13), + .msg = "cmdq_nic_rx_addr_ecc_mbit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err", + { .int_msk = BIT(15), + .msg = "cmdq_nic_tx_addr_ecc_mbit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error tqp_int_ecc_int[] = { - { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err", + { .int_msk = BIT(6), + .msg = "tqp_int_cfg_even_ecc_mbit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err", + { .int_msk = BIT(7), + .msg = "tqp_int_cfg_odd_ecc_mbit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err", + { .int_msk = BIT(8), + .msg = "tqp_int_ctrl_even_ecc_mbit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err", + { .int_msk = BIT(9), + .msg = "tqp_int_ctrl_odd_ecc_mbit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err", + { .int_msk = BIT(10), + .msg = "tx_que_scan_int_ecc_mbit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err", + { .int_msk = BIT(11), + .msg = "rx_que_scan_int_ecc_mbit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error imp_rd_poison_int[] = { - { .int_msk = BIT(0), .msg = "imp_rd_poison_int", + { .int_msk = BIT(0), + .msg = "imp_rd_poison_int", .reset_level = HNS3_NONE_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; #define HNS3_SSU_MEM_ECC_ERR(x) \ - { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err", \ + { .int_msk = BIT(x), \ + .msg = "ssu_mem" #x "_ecc_mbit_err", \ .reset_level = HNS3_GLOBAL_RESET } static const struct hns3_hw_error ssu_ecc_multi_bit_int_0[] = { @@ -258,515 +338,726 @@ static const struct hns3_hw_error ssu_ecc_multi_bit_int_0[] = { HNS3_SSU_MEM_ECC_ERR(29), HNS3_SSU_MEM_ECC_ERR(30), HNS3_SSU_MEM_ECC_ERR(31), - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error ssu_ecc_multi_bit_int_1[] = { - { .int_msk = BIT(0), .msg = "ssu_mem32_ecc_mbit_err", + { .int_msk = BIT(0), + .msg = "ssu_mem32_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error ssu_common_ecc_int[] = { - { .int_msk = BIT(0), .msg = "buf_sum_err", + { .int_msk = BIT(0), + .msg = "buf_sum_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(1), .msg = "ppp_mb_num_err", + { .int_msk = BIT(1), + .msg = "ppp_mb_num_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = BIT(2), .msg = "ppp_mbid_err", + { .int_msk = BIT(2), + .msg = "ppp_mbid_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err", + { .int_msk = BIT(3), + .msg = "ppp_rlt_mac_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "ppp_rlt_host_err", + { .int_msk = BIT(4), + .msg = "ppp_rlt_host_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "cks_edit_position_err", + { .int_msk = BIT(5), + .msg = "cks_edit_position_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "cks_edit_condition_err", + { .int_msk = BIT(6), + .msg = "cks_edit_condition_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "vlan_edit_condition_err", + { .int_msk = BIT(7), + .msg = "vlan_edit_condition_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "vlan_num_ot_err", + { .int_msk = BIT(8), + .msg = "vlan_num_ot_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "vlan_num_in_err", + { .int_msk = BIT(9), + .msg = "vlan_num_in_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error igu_int[] = { - { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err", + { .int_msk = BIT(0), + .msg = "igu_rx_buf0_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err", + { .int_msk = BIT(2), + .msg = "igu_rx_buf1_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error msix_ecc_int[] = { - { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err", + { .int_msk = BIT(1), + .msg = "msix_nic_ecc_mbit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error ppp_mpf_abnormal_int_st1[] = { - { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err", + { .int_msk = BIT(0), + .msg = "vf_vlan_ad_mem_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err", + { .int_msk = BIT(1), + .msg = "umv_mcast_group_mem_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err", + { .int_msk = BIT(2), + .msg = "umv_key_mem0_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err", + { .int_msk = BIT(3), + .msg = "umv_key_mem1_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err", + { .int_msk = BIT(4), + .msg = "umv_key_mem2_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err", + { .int_msk = BIT(5), + .msg = "umv_key_mem3_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err", + { .int_msk = BIT(6), + .msg = "umv_ad_mem_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err", + { .int_msk = BIT(7), + .msg = "rss_tc_mode_mem_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err", + { .int_msk = BIT(8), + .msg = "rss_idt_mem0_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err", + { .int_msk = BIT(9), + .msg = "rss_idt_mem1_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err", + { .int_msk = BIT(10), + .msg = "rss_idt_mem2_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err", + { .int_msk = BIT(11), + .msg = "rss_idt_mem3_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err", + { .int_msk = BIT(12), + .msg = "rss_idt_mem4_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err", + { .int_msk = BIT(13), + .msg = "rss_idt_mem5_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err", + { .int_msk = BIT(14), + .msg = "rss_idt_mem6_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err", + { .int_msk = BIT(15), + .msg = "rss_idt_mem7_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err", + { .int_msk = BIT(16), + .msg = "rss_idt_mem8_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err", + { .int_msk = BIT(17), + .msg = "rss_idt_mem9_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err", + { .int_msk = BIT(18), + .msg = "rss_idt_mem10_ecc_m1bit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err", + { .int_msk = BIT(19), + .msg = "rss_idt_mem11_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err", + { .int_msk = BIT(20), + .msg = "rss_idt_mem12_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err", + { .int_msk = BIT(21), + .msg = "rss_idt_mem13_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err", + { .int_msk = BIT(22), + .msg = "rss_idt_mem14_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err", + { .int_msk = BIT(23), + .msg = "rss_idt_mem15_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err", + { .int_msk = BIT(24), + .msg = "port_vlan_mem_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err", + { .int_msk = BIT(25), + .msg = "mcast_linear_table_mem_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err", + { .int_msk = BIT(26), + .msg = "mcast_result_mem_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(27), .msg = "flow_director_ad_mem0_ecc_mbit_err", + { .int_msk = BIT(27), + .msg = "flow_director_ad_mem0_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(28), .msg = "flow_director_ad_mem1_ecc_mbit_err", + { .int_msk = BIT(28), + .msg = "flow_director_ad_mem1_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(29), .msg = "rx_vlan_tag_memory_ecc_mbit_err", + { .int_msk = BIT(29), + .msg = "rx_vlan_tag_memory_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(30), .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err", + { .int_msk = BIT(30), + .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error ppp_mpf_abnormal_int_st3[] = { - { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err", + { .int_msk = BIT(0), + .msg = "hfs_fifo_mem_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err", + { .int_msk = BIT(1), + .msg = "rslt_descr_fifo_mem_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err", + { .int_msk = BIT(2), + .msg = "tx_vlan_tag_mem_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err", + { .int_msk = BIT(3), + .msg = "FD_CN0_memory_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err", + { .int_msk = BIT(4), + .msg = "FD_CN1_memory_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err", + { .int_msk = BIT(5), + .msg = "GRO_AD_memory_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error ppu_mpf_abnormal_int_st3[] = { - { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err", + { .int_msk = BIT(4), + .msg = "gro_bd_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err", + { .int_msk = BIT(5), + .msg = "gro_context_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err", + { .int_msk = BIT(6), + .msg = "rx_stash_cfg_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err", + { .int_msk = BIT(7), + .msg = "axi_rd_fbd_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error tm_sch_int[] = { - { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err", + { .int_msk = BIT(1), + .msg = "tm_sch_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err", + { .int_msk = BIT(2), + .msg = "tm_sch_port_shap_sub_fifo_wr_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err", + { .int_msk = BIT(3), + .msg = "tm_sch_port_shap_sub_fifo_rd_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err", + { .int_msk = BIT(4), + .msg = "tm_sch_pg_pshap_sub_fifo_wr_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err", + { .int_msk = BIT(5), + .msg = "tm_sch_pg_pshap_sub_fifo_rd_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err", + { .int_msk = BIT(6), + .msg = "tm_sch_pg_cshap_sub_fifo_wr_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err", + { .int_msk = BIT(7), + .msg = "tm_sch_pg_cshap_sub_fifo_rd_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err", + { .int_msk = BIT(8), + .msg = "tm_sch_pri_pshap_sub_fifo_wr_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err", + { .int_msk = BIT(9), + .msg = "tm_sch_pri_pshap_sub_fifo_rd_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err", + { .int_msk = BIT(10), + .msg = "tm_sch_pri_cshap_sub_fifo_wr_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err", + { .int_msk = BIT(11), + .msg = "tm_sch_pri_cshap_sub_fifo_rd_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(12), .msg = "tm_sch_port_shap_offset_fifo_wr_err", + { .int_msk = BIT(12), + .msg = "tm_sch_port_shap_offset_fifo_wr_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "tm_sch_port_shap_offset_fifo_rd_err", + { .int_msk = BIT(13), + .msg = "tm_sch_port_shap_offset_fifo_rd_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(14), .msg = "tm_sch_pg_pshap_offset_fifo_wr_err", + { .int_msk = BIT(14), + .msg = "tm_sch_pg_pshap_offset_fifo_wr_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(15), .msg = "tm_sch_pg_pshap_offset_fifo_rd_err", + { .int_msk = BIT(15), + .msg = "tm_sch_pg_pshap_offset_fifo_rd_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(16), .msg = "tm_sch_pg_cshap_offset_fifo_wr_err", + { .int_msk = BIT(16), + .msg = "tm_sch_pg_cshap_offset_fifo_wr_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(17), .msg = "tm_sch_pg_cshap_offset_fifo_rd_err", + { .int_msk = BIT(17), + .msg = "tm_sch_pg_cshap_offset_fifo_rd_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(18), .msg = "tm_sch_pri_pshap_offset_fifo_wr_err", + { .int_msk = BIT(18), + .msg = "tm_sch_pri_pshap_offset_fifo_wr_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(19), .msg = "tm_sch_pri_pshap_offset_fifo_rd_err", + { .int_msk = BIT(19), + .msg = "tm_sch_pri_pshap_offset_fifo_rd_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(20), .msg = "tm_sch_pri_cshap_offset_fifo_wr_err", + { .int_msk = BIT(20), + .msg = "tm_sch_pri_cshap_offset_fifo_wr_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(21), .msg = "tm_sch_pri_cshap_offset_fifo_rd_err", + { .int_msk = BIT(21), + .msg = "tm_sch_pri_cshap_offset_fifo_rd_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err", + { .int_msk = BIT(22), + .msg = "tm_sch_rq_fifo_wr_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err", + { .int_msk = BIT(23), + .msg = "tm_sch_rq_fifo_rd_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err", + { .int_msk = BIT(24), + .msg = "tm_sch_nq_fifo_wr_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err", + { .int_msk = BIT(25), + .msg = "tm_sch_nq_fifo_rd_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err", + { .int_msk = BIT(26), + .msg = "tm_sch_roce_up_fifo_wr_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err", + { .int_msk = BIT(27), + .msg = "tm_sch_roce_up_fifo_rd_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err", + { .int_msk = BIT(28), + .msg = "tm_sch_rcb_byte_fifo_wr_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err", + { .int_msk = BIT(29), + .msg = "tm_sch_rcb_byte_fifo_rd_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err", + { .int_msk = BIT(30), + .msg = "tm_sch_ssu_byte_fifo_wr_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err", + { .int_msk = BIT(31), + .msg = "tm_sch_ssu_byte_fifo_rd_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error qcn_fifo_int[] = { - { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err", + { .int_msk = BIT(0), + .msg = "qcn_shap_gp0_sch_fifo_rd_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err", + { .int_msk = BIT(1), + .msg = "qcn_shap_gp0_sch_fifo_wr_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err", + { .int_msk = BIT(2), + .msg = "qcn_shap_gp1_sch_fifo_rd_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err", + { .int_msk = BIT(3), + .msg = "qcn_shap_gp1_sch_fifo_wr_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err", + { .int_msk = BIT(4), + .msg = "qcn_shap_gp2_sch_fifo_rd_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err", + { .int_msk = BIT(5), + .msg = "qcn_shap_gp2_sch_fifo_wr_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err", + { .int_msk = BIT(6), + .msg = "qcn_shap_gp3_sch_fifo_rd_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err", + { .int_msk = BIT(7), + .msg = "qcn_shap_gp3_sch_fifo_wr_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err", + { .int_msk = BIT(8), + .msg = "qcn_shap_gp0_offset_fifo_rd_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err", + { .int_msk = BIT(9), + .msg = "qcn_shap_gp0_offset_fifo_wr_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err", + { .int_msk = BIT(10), + .msg = "qcn_shap_gp1_offset_fifo_rd_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err", + { .int_msk = BIT(11), + .msg = "qcn_shap_gp1_offset_fifo_wr_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err", + { .int_msk = BIT(12), + .msg = "qcn_shap_gp2_offset_fifo_rd_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err", + { .int_msk = BIT(13), + .msg = "qcn_shap_gp2_offset_fifo_wr_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err", + { .int_msk = BIT(14), + .msg = "qcn_shap_gp3_offset_fifo_rd_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err", + { .int_msk = BIT(15), + .msg = "qcn_shap_gp3_offset_fifo_wr_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err", + { .int_msk = BIT(16), + .msg = "qcn_byte_info_fifo_rd_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err", + { .int_msk = BIT(17), + .msg = "qcn_byte_info_fifo_wr_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error qcn_ecc_int[] = { - { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err", + { .int_msk = BIT(1), + .msg = "qcn_byte_mem_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err", + { .int_msk = BIT(3), + .msg = "qcn_time_mem_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err", + { .int_msk = BIT(5), + .msg = "qcn_fb_mem_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err", + { .int_msk = BIT(7), + .msg = "qcn_link_mem_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err", + { .int_msk = BIT(9), + .msg = "qcn_rate_mem_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err", + { .int_msk = BIT(11), + .msg = "qcn_tmplt_mem_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err", + { .int_msk = BIT(13), + .msg = "qcn_shap_cfg_mem_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err", + { .int_msk = BIT(15), + .msg = "qcn_gp0_barrel_mem_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err", + { .int_msk = BIT(17), + .msg = "qcn_gp1_barrel_mem_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err", + { .int_msk = BIT(19), + .msg = "qcn_gp2_barrel_mem_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err", + { .int_msk = BIT(21), + .msg = "qcn_gp3_barral_mem_ecc_mbit_err", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error ncsi_ecc_int[] = { - { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err", + { .int_msk = BIT(1), + .msg = "ncsi_tx_ecc_mbit_err", .reset_level = HNS3_NONE_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error ssu_fifo_overflow_int[] = { - { .int_msk = BIT(0), .msg = "ig_mac_inf_int", + { .int_msk = BIT(0), + .msg = "ig_mac_inf_int", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(1), .msg = "ig_host_inf_int", + { .int_msk = BIT(1), + .msg = "ig_host_inf_int", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "ig_roc_buf_int", + { .int_msk = BIT(2), + .msg = "ig_roc_buf_int", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int", + { .int_msk = BIT(3), + .msg = "ig_host_data_fifo_int", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int", + { .int_msk = BIT(4), + .msg = "ig_host_key_fifo_int", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int", + { .int_msk = BIT(5), + .msg = "tx_qcn_fifo_int", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int", + { .int_msk = BIT(6), + .msg = "rx_qcn_fifo_int", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int", + { .int_msk = BIT(7), + .msg = "tx_pf_rd_fifo_int", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int", + { .int_msk = BIT(8), + .msg = "rx_pf_rd_fifo_int", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "qm_eof_fifo_int", + { .int_msk = BIT(9), + .msg = "qm_eof_fifo_int", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int", + { .int_msk = BIT(10), + .msg = "mb_rlt_fifo_int", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int", + { .int_msk = BIT(11), + .msg = "dup_uncopy_fifo_int", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int", + { .int_msk = BIT(12), + .msg = "dup_cnt_rd_fifo_int", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int", + { .int_msk = BIT(13), + .msg = "dup_cnt_drop_fifo_int", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int", + { .int_msk = BIT(14), + .msg = "dup_cnt_wrb_fifo_int", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(15), .msg = "host_cmd_fifo_int", + { .int_msk = BIT(15), + .msg = "host_cmd_fifo_int", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int", + { .int_msk = BIT(16), + .msg = "mac_cmd_fifo_int", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int", + { .int_msk = BIT(17), + .msg = "host_cmd_bitmap_empty_int", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int", + { .int_msk = BIT(18), + .msg = "mac_cmd_bitmap_empty_int", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int", + { .int_msk = BIT(19), + .msg = "dup_bitmap_empty_int", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int", + { .int_msk = BIT(20), + .msg = "out_queue_bitmap_empty_int", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int", + { .int_msk = BIT(21), + .msg = "bank2_bitmap_empty_int", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int", + { .int_msk = BIT(22), + .msg = "bank1_bitmap_empty_int", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int", + { .int_msk = BIT(23), + .msg = "bank0_bitmap_empty_int", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error ssu_ets_tcg_int[] = { - { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg", + { .int_msk = BIT(0), + .msg = "ets_rd_int_rx_tcg", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg", + { .int_msk = BIT(1), + .msg = "ets_wr_int_rx_tcg", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg", + { .int_msk = BIT(2), + .msg = "ets_rd_int_tx_tcg", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg", + { .int_msk = BIT(3), + .msg = "ets_wr_int_tx_tcg", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error igu_egu_tnl_int[] = { - { .int_msk = BIT(0), .msg = "rx_buf_overflow", + { .int_msk = BIT(0), + .msg = "rx_buf_overflow", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow", + { .int_msk = BIT(1), + .msg = "rx_stp_fifo_overflow", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "rx_stp_fifo_underflow", + { .int_msk = BIT(2), + .msg = "rx_stp_fifo_underflow", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "tx_buf_overflow", + { .int_msk = BIT(3), + .msg = "tx_buf_overflow", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "tx_buf_underrun", + { .int_msk = BIT(4), + .msg = "tx_buf_underrun", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow", + { .int_msk = BIT(5), + .msg = "rx_stp_buf_overflow", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error ssu_port_based_err_int[] = { - { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port", + { .int_msk = BIT(0), + .msg = "roc_pkt_without_key_port", .reset_level = HNS3_FUNC_RESET }, - { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port", + { .int_msk = BIT(1), + .msg = "tpu_pkt_without_key_port", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port", + { .int_msk = BIT(2), + .msg = "igu_pkt_without_key_port", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port", + { .int_msk = BIT(3), + .msg = "roc_eof_mis_match_port", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port", + { .int_msk = BIT(4), + .msg = "tpu_eof_mis_match_port", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port", + { .int_msk = BIT(5), + .msg = "igu_eof_mis_match_port", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port", + { .int_msk = BIT(6), + .msg = "roc_sof_mis_match_port", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port", + { .int_msk = BIT(7), + .msg = "tpu_sof_mis_match_port", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port", + { .int_msk = BIT(8), + .msg = "igu_sof_mis_match_port", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port", + { .int_msk = BIT(11), + .msg = "ets_rd_int_rx_port", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port", + { .int_msk = BIT(12), + .msg = "ets_wr_int_rx_port", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port", + { .int_msk = BIT(13), + .msg = "ets_rd_int_tx_port", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port", + { .int_msk = BIT(14), + .msg = "ets_wr_int_tx_port", .reset_level = HNS3_GLOBAL_RESET }, - { .int_msk = 0, .msg = NULL, + { .int_msk = 0, + .msg = NULL, .reset_level = HNS3_NONE_RESET} }; static const struct hns3_hw_error_desc mpf_ras_err_tbl[] = { - { .desc_offset = 0, .data_offset = 0, + { .desc_offset = 0, + .data_offset = 0, .msg = "IMP_TCM_ECC_INT_STS", .hw_err = imp_tcm_ecc_int }, - { .desc_offset = 0, .data_offset = 1, + { .desc_offset = 0, + .data_offset = 1, .msg = "CMDQ_MEM_ECC_INT_STS", .hw_err = cmdq_mem_ecc_int }, - { .desc_offset = 0, .data_offset = 2, + { .desc_offset = 0, + .data_offset = 2, .msg = "IMP_RD_POISON_INT_STS", .hw_err = imp_rd_poison_int }, - { .desc_offset = 0, .data_offset = 3, + { .desc_offset = 0, + .data_offset = 3, .msg = "TQP_INT_ECC_INT_STS", .hw_err = tqp_int_ecc_int }, - { .desc_offset = 0, .data_offset = 4, + { .desc_offset = 0, + .data_offset = 4, .msg = "MSIX_ECC_INT_STS", .hw_err = msix_ecc_int }, - { .desc_offset = 2, .data_offset = 2, + { .desc_offset = 2, + .data_offset = 2, .msg = "SSU_ECC_MULTI_BIT_INT_0", .hw_err = ssu_ecc_multi_bit_int_0 }, - { .desc_offset = 2, .data_offset = 3, + { .desc_offset = 2, + .data_offset = 3, .msg = "SSU_ECC_MULTI_BIT_INT_1", .hw_err = ssu_ecc_multi_bit_int_1 }, - { .desc_offset = 2, .data_offset = 4, + { .desc_offset = 2, + .data_offset = 4, .msg = "SSU_COMMON_ERR_INT", .hw_err = ssu_common_ecc_int }, - { .desc_offset = 3, .data_offset = 0, + { .desc_offset = 3, + .data_offset = 0, .msg = "IGU_INT_STS", .hw_err = igu_int }, - { .desc_offset = 4, .data_offset = 1, + { .desc_offset = 4, + .data_offset = 1, .msg = "PPP_MPF_ABNORMAL_INT_ST1", .hw_err = ppp_mpf_abnormal_int_st1 }, - { .desc_offset = 4, .data_offset = 3, + { .desc_offset = 4, + .data_offset = 3, .msg = "PPP_MPF_ABNORMAL_INT_ST3", .hw_err = ppp_mpf_abnormal_int_st3 }, - { .desc_offset = 5, .data_offset = 1, + { .desc_offset = 5, + .data_offset = 1, .msg = "PPU_MPF_ABNORMAL_INT_ST1", .hw_err = ppu_mpf_abnormal_int_st1 }, - { .desc_offset = 5, .data_offset = 2, + { .desc_offset = 5, + .data_offset = 2, .msg = "PPU_MPF_ABNORMAL_INT_ST2_RAS", .hw_err = ppu_mpf_abnormal_int_st2_ras }, - { .desc_offset = 5, .data_offset = 3, + { .desc_offset = 5, + .data_offset = 3, .msg = "PPU_MPF_ABNORMAL_INT_ST3", .hw_err = ppu_mpf_abnormal_int_st3 }, - { .desc_offset = 6, .data_offset = 0, + { .desc_offset = 6, + .data_offset = 0, .msg = "TM_SCH_RINT", .hw_err = tm_sch_int }, - { .desc_offset = 7, .data_offset = 0, + { .desc_offset = 7, + .data_offset = 0, .msg = "QCN_FIFO_RINT", .hw_err = qcn_fifo_int }, - { .desc_offset = 7, .data_offset = 1, + { .desc_offset = 7, + .data_offset = 1, .msg = "QCN_ECC_RINT", .hw_err = qcn_ecc_int }, - { .desc_offset = 9, .data_offset = 0, + { .desc_offset = 9, + .data_offset = 0, .msg = "NCSI_ECC_INT_RPT", .hw_err = ncsi_ecc_int }, - { .desc_offset = 0, .data_offset = 0, + { .desc_offset = 0, + .data_offset = 0, .msg = NULL, .hw_err = NULL } }; static const struct hns3_hw_error_desc pf_ras_err_tbl[] = { - { .desc_offset = 0, .data_offset = 0, + { .desc_offset = 0, + .data_offset = 0, .msg = "SSU_PORT_BASED_ERR_INT_RAS", .hw_err = ssu_port_based_err_int }, - { .desc_offset = 0, .data_offset = 1, + { .desc_offset = 0, + .data_offset = 1, .msg = "SSU_FIFO_OVERFLOW_INT", .hw_err = ssu_fifo_overflow_int }, - { .desc_offset = 0, .data_offset = 2, + { .desc_offset = 0, + .data_offset = 2, .msg = "SSU_ETS_TCG_INT", .hw_err = ssu_ets_tcg_int }, - { .desc_offset = 1, .data_offset = 0, + { .desc_offset = 1, + .data_offset = 0, .msg = "IGU_EGU_TNL_INT_STS", .hw_err = igu_egu_tnl_int }, - { .desc_offset = 3, .data_offset = 0, + { .desc_offset = 3, + .data_offset = 0, .msg = "PPU_PF_ABNORMAL_INT_ST_RAS", .hw_err = ppu_pf_abnormal_int_ras }, - { .desc_offset = 0, .data_offset = 0, + { .desc_offset = 0, + .data_offset = 0, .msg = NULL, .hw_err = NULL } }; static const struct hns3_hw_error_desc mpf_msix_err_tbl[] = { - { .desc_offset = 1, .data_offset = 0, + { .desc_offset = 1, + .data_offset = 0, .msg = "MAC_AFIFO_TNL_INT_R", .hw_err = mac_afifo_tnl_int }, - { .desc_offset = 5, .data_offset = 2, + { .desc_offset = 5, + .data_offset = 2, .msg = "PPU_MPF_ABNORMAL_INT_ST2_MSIX", .hw_err = ppu_mpf_abnormal_int_st2_msix }, - { .desc_offset = 0, .data_offset = 0, + { .desc_offset = 0, + .data_offset = 0, .msg = NULL, .hw_err = NULL } }; static const struct hns3_hw_error_desc pf_msix_err_tbl[] = { - { .desc_offset = 0, .data_offset = 0, + { .desc_offset = 0, + .data_offset = 0, .msg = "SSU_PORT_BASED_ERR_INT_MSIX", .hw_err = ssu_port_based_pf_int }, - { .desc_offset = 2, .data_offset = 0, + { .desc_offset = 2, + .data_offset = 0, .msg = "PPP_PF_ABNORMAL_INT_ST0", .hw_err = ppp_pf_abnormal_int }, - { .desc_offset = 3, .data_offset = 0, + { .desc_offset = 3, + .data_offset = 0, .msg = "PPU_PF_ABNORMAL_INT_ST_MSIX", .hw_err = ppu_pf_abnormal_int_msix }, - { .desc_offset = 0, .data_offset = 0, + { .desc_offset = 0, + .data_offset = 0, .msg = NULL, .hw_err = NULL } }; diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c index 85316ca..285c06d 100644 --- a/drivers/net/hns3/hns3_rxtx.c +++ b/drivers/net/hns3/hns3_rxtx.c @@ -1564,7 +1564,6 @@ hns3_rx_buf_len_calc(struct rte_mempool *mp, uint16_t *rx_buf_len) vld_buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM); - if (vld_buf_size < HNS3_MIN_BD_BUF_SIZE) return -EINVAL; diff --git a/drivers/net/hns3/hns3_stats.c b/drivers/net/hns3/hns3_stats.c index c590647..91168ac 100644 --- a/drivers/net/hns3/hns3_stats.c +++ b/drivers/net/hns3/hns3_stats.c @@ -679,7 +679,6 @@ hns3_get_queue_stats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, (*count)++; } } - } void From patchwork Fri Nov 6 03:51:55 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 83790 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 84B1FA0524; Fri, 6 Nov 2020 04:52:41 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 698323772; Fri, 6 Nov 2020 04:51:42 +0100 (CET) Received: from szxga04-in.huawei.com (szxga04-in.huawei.com [45.249.212.190]) by dpdk.org (Postfix) with ESMTP id D7E882C2E for ; Fri, 6 Nov 2020 04:51:39 +0100 (CET) Received: from DGGEMS407-HUB.china.huawei.com (unknown [172.30.72.58]) by szxga04-in.huawei.com (SkyGuard) with ESMTP id 4CS5zT3Jh0z15QRJ for ; Fri, 6 Nov 2020 11:51:33 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS407-HUB.china.huawei.com (10.3.19.207) with Microsoft SMTP Server id 14.3.487.0; Fri, 6 Nov 2020 11:51:27 +0800 From: Lijun Ou To: CC: , Date: Fri, 6 Nov 2020 11:51:55 +0800 Message-ID: <1604634716-43484-5-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1604634716-43484-1-git-send-email-oulijun@huawei.com> References: <1604586194-29523-1-git-send-email-oulijun@huawei.com> <1604634716-43484-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v2 4/5] net/hns3: check PCI config space writes X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Here adds a check for the return value when calling rte_pci_write_config. Coverity issue: 363714 Fixes: cea37e513329 ("net/hns3: fix FLR reset") Cc: stable@dpdk.org Signed-off-by: Lijun Ou --- V1->V2: - rte_pci_wirte_config -> rte_pci_write_config --- drivers/net/hns3/hns3_ethdev_vf.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c index 2e9bfda..2f6d91b 100644 --- a/drivers/net/hns3/hns3_ethdev_vf.c +++ b/drivers/net/hns3/hns3_ethdev_vf.c @@ -139,7 +139,7 @@ hns3vf_enable_msix(const struct rte_pci_device *device, bool op) ret = rte_pci_read_config(device, &control, sizeof(control), (pos + PCI_MSIX_FLAGS)); if (ret < 0) { - PMD_INIT_LOG(ERR, "Failed to read PCI offset 0x%x", + PMD_INIT_LOG(ERR, "failed to read PCI offset 0x%x", (pos + PCI_MSIX_FLAGS)); return -ENXIO; } @@ -148,8 +148,12 @@ hns3vf_enable_msix(const struct rte_pci_device *device, bool op) control |= PCI_MSIX_FLAGS_ENABLE; else control &= ~PCI_MSIX_FLAGS_ENABLE; - rte_pci_write_config(device, &control, sizeof(control), - (pos + PCI_MSIX_FLAGS)); + ret = rte_pci_write_config(device, &control, sizeof(control), + (pos + PCI_MSIX_FLAGS)); + if (ret < 0) { + PMD_INIT_LOG(ERR, "failed to write PCI offset 0x%x", + (pos + PCI_MSIX_FLAGS)); + } return 0; } return -ENXIO; From patchwork Fri Nov 6 03:51:56 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 83791 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 20E6DA0524; Fri, 6 Nov 2020 04:53:01 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id B504A558E; Fri, 6 Nov 2020 04:51:43 +0100 (CET) Received: from szxga04-in.huawei.com (szxga04-in.huawei.com [45.249.212.190]) by dpdk.org (Postfix) with ESMTP id F08A42C55 for ; Fri, 6 Nov 2020 04:51:39 +0100 (CET) Received: from DGGEMS407-HUB.china.huawei.com (unknown [172.30.72.58]) by szxga04-in.huawei.com (SkyGuard) with ESMTP id 4CS5zT3hp5z15QTk for ; Fri, 6 Nov 2020 11:51:33 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS407-HUB.china.huawei.com (10.3.19.207) with Microsoft SMTP Server id 14.3.487.0; Fri, 6 Nov 2020 11:51:27 +0800 From: Lijun Ou To: CC: , Date: Fri, 6 Nov 2020 11:51:56 +0800 Message-ID: <1604634716-43484-6-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1604634716-43484-1-git-send-email-oulijun@huawei.com> References: <1604586194-29523-1-git-send-email-oulijun@huawei.com> <1604634716-43484-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH v2 5/5] net/hns3: fix queue enabling status not store after FLR X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Chengchang Tang The FLR will resets the queue enabling status. In the current code, the queue enabling status is not restored after the reset. Therefore, if upper layer users have called queue start/stop function before the reset, the behavior after the reset is not as expected. This patch fix it by add a queue enabling status restore function to the reset handler. Fixes: fa29fe45a7b4 ("net/hns3: support queue start and stop") Cc: stable@dpdk.org Signed-off-by: Chengchang Tang Signed-off-by: Lijun Ou --- drivers/net/hns3/hns3_ethdev.c | 5 +++++ drivers/net/hns3/hns3_ethdev_vf.c | 5 +++++ drivers/net/hns3/hns3_rxtx.c | 20 ++++++++++++++++++++ drivers/net/hns3/hns3_rxtx.h | 1 + 4 files changed, 31 insertions(+) diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index b27cf67..e177549 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -5567,6 +5567,11 @@ hns3_start_service(struct hns3_adapter *hns) /* Enable interrupt of all rx queues before enabling queues */ hns3_dev_all_rx_queue_intr_enable(hw, true); /* + * Enable state of each rxq and txq will be recovered after + * reset, so we need restore them before enable all tqps; + */ + hns3_restore_tqp_enable_state(hw); + /* * When finished the initialization, enable queues to receive * and transmit packets. */ diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c index 2f6d91b..511cd26 100644 --- a/drivers/net/hns3/hns3_ethdev_vf.c +++ b/drivers/net/hns3/hns3_ethdev_vf.c @@ -2424,6 +2424,11 @@ hns3vf_start_service(struct hns3_adapter *hns) /* Enable interrupt of all rx queues before enabling queues */ hns3_dev_all_rx_queue_intr_enable(hw, true); /* + * Enable state of each rxq and txq will be recovered after + * reset, so we need restore them before enable all tqps; + */ + hns3_restore_tqp_enable_state(hw); + /* * When finished the initialization, enable queues to receive * and transmit packets. */ diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c index 285c06d..c76e635 100644 --- a/drivers/net/hns3/hns3_rxtx.c +++ b/drivers/net/hns3/hns3_rxtx.c @@ -503,6 +503,26 @@ hns3_start_all_rxqs(struct rte_eth_dev *dev) } void +hns3_restore_tqp_enable_state(struct hns3_hw *hw) +{ + struct hns3_rx_queue *rxq; + struct hns3_tx_queue *txq; + uint16_t i; + + for (i = 0; i < hw->data->nb_rx_queues; i++) { + rxq = hw->data->rx_queues[i]; + if (rxq != NULL) + hns3_enable_rxq(rxq, rxq->enabled); + } + + for (i = 0; i < hw->data->nb_tx_queues; i++) { + txq = hw->data->tx_queues[i]; + if (txq != NULL) + hns3_enable_txq(txq, txq->enabled); + } +} + +void hns3_stop_all_txqs(struct rte_eth_dev *dev) { struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h index 8b32abe..6538848 100644 --- a/drivers/net/hns3/hns3_rxtx.h +++ b/drivers/net/hns3/hns3_rxtx.h @@ -677,5 +677,6 @@ uint32_t hns3_get_tqp_reg_offset(uint16_t idx); int hns3_start_all_txqs(struct rte_eth_dev *dev); int hns3_start_all_rxqs(struct rte_eth_dev *dev); void hns3_stop_all_txqs(struct rte_eth_dev *dev); +void hns3_restore_tqp_enable_state(struct hns3_hw *hw); #endif /* _HNS3_RXTX_H_ */