From patchwork Wed Feb 3 07:46:06 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 87656 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 13AFBA0A0E; Wed, 3 Feb 2021 08:47:33 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 44C8F24046B; Wed, 3 Feb 2021 08:46:59 +0100 (CET) Received: from szxga07-in.huawei.com (szxga07-in.huawei.com [45.249.212.35]) by mails.dpdk.org (Postfix) with ESMTP id 07FBB240445 for ; Wed, 3 Feb 2021 08:46:52 +0100 (CET) Received: from DGGEMS404-HUB.china.huawei.com (unknown [172.30.72.60]) by szxga07-in.huawei.com (SkyGuard) with ESMTP id 4DVtyN2bm3z7gP6; Wed, 3 Feb 2021 15:45:32 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS404-HUB.china.huawei.com (10.3.19.204) with Microsoft SMTP Server id 14.3.498.0; Wed, 3 Feb 2021 15:46:47 +0800 From: Lijun Ou To: CC: , Date: Wed, 3 Feb 2021 15:46:06 +0800 Message-ID: <1612338382-3253-2-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1612338382-3253-1-git-send-email-oulijun@huawei.com> References: <1612338382-3253-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH 01/17] net/hns3: support module EEPROM dump X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Chengchang Tang This patch add support for dumping module EEPROM. Signed-off-by: Chengchang Tang Signed-off-by: Lijun Ou --- Depends-on: series-10000 ("update doc for hns3") --- doc/guides/nics/features/hns3.ini | 1 + doc/guides/rel_notes/release_21_02.rst | 1 + drivers/net/hns3/hns3_cmd.h | 16 ++++ drivers/net/hns3/hns3_ethdev.c | 159 +++++++++++++++++++++++++++++++++ 4 files changed, 177 insertions(+) diff --git a/doc/guides/nics/features/hns3.ini b/doc/guides/nics/features/hns3.ini index 452a7b5..40febb0 100644 --- a/doc/guides/nics/features/hns3.ini +++ b/doc/guides/nics/features/hns3.ini @@ -38,6 +38,7 @@ Extended stats = Y Stats per queue = Y FW version = Y Registers dump = Y +Module EEPROM dump = Y Multiprocess aware = Y FEC = Y Linux = Y diff --git a/doc/guides/rel_notes/release_21_02.rst b/doc/guides/rel_notes/release_21_02.rst index b87db22..cfbc42f 100644 --- a/doc/guides/rel_notes/release_21_02.rst +++ b/doc/guides/rel_notes/release_21_02.rst @@ -95,6 +95,7 @@ New Features * **Updated hns3 driver.** * Added support for traffic management + * Added support for module EEPROM dumping * **Updated Intel ice driver.** diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h index dc97a1a..e5852dc 100644 --- a/drivers/net/hns3/hns3_cmd.h +++ b/drivers/net/hns3/hns3_cmd.h @@ -207,6 +207,8 @@ enum hns3_opcode_type { HNS3_OPC_CLEAR_HW_STATE = 0x700B, /* SFP command */ + HNS3_OPC_GET_SFP_EEPROM = 0x7100, + HNS3_OPC_GET_SFP_EXIST = 0x7101, HNS3_OPC_SFP_GET_SPEED = 0x7104, /* Interrupts commands */ @@ -698,6 +700,20 @@ struct hns3_config_auto_neg_cmd { #define HNS3_MAC_FEC_BASER 1 #define HNS3_MAC_FEC_RS 2 +#define HNS3_SFP_INFO_BD0_LEN 20UL +#define HNS3_SFP_INFO_BDX_LEN 24UL + +struct hns3_sfp_info_bd0_cmd { + uint16_t offset; + uint16_t read_len; + uint8_t data[HNS3_SFP_INFO_BD0_LEN]; +}; + +struct hns3_sfp_type { + uint8_t type; + uint8_t ext_type; +}; + struct hns3_sfp_speed_cmd { uint32_t sfp_speed; uint8_t query_type; /* 0: sfp speed, 1: active fec */ diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index 221e008..31418b8 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -6105,6 +6105,163 @@ hns3_query_dev_fec_info(struct hns3_hw *hw) return ret; } +static bool +hns3_optical_module_existed(struct hns3_hw *hw) +{ + struct hns3_cmd_desc desc; + bool existed; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_EXIST, true); + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + hns3_err(hw, + "fail to get optical module exist state, ret = %d.\n", + ret); + return false; + } + existed = !!desc.data[0]; + + return existed; +} + +static int +hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset, + uint32_t len, uint8_t *data) +{ +#define HNS3_SFP_INFO_CMD_NUM 6 +#define HNS3_SFP_INFO_MAX_LEN \ + (HNS3_SFP_INFO_BD0_LEN + \ + (HNS3_SFP_INFO_CMD_NUM - 1) * HNS3_SFP_INFO_BDX_LEN) + struct hns3_cmd_desc desc[HNS3_SFP_INFO_CMD_NUM]; + struct hns3_sfp_info_bd0_cmd *sfp_info_bd0; + uint16_t read_len; + uint16_t copy_len; + int ret; + int i; + + for (i = 0; i < HNS3_SFP_INFO_CMD_NUM; i++) { + hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_GET_SFP_EEPROM, + true); + if (i < HNS3_SFP_INFO_CMD_NUM - 1) + desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); + } + + sfp_info_bd0 = (struct hns3_sfp_info_bd0_cmd *)desc[0].data; + sfp_info_bd0->offset = rte_cpu_to_le_16((uint16_t)offset); + read_len = RTE_MIN(len, HNS3_SFP_INFO_MAX_LEN); + sfp_info_bd0->read_len = rte_cpu_to_le_16((uint16_t)read_len); + + ret = hns3_cmd_send(hw, desc, HNS3_SFP_INFO_CMD_NUM); + if (ret) { + hns3_err(hw, "fail to get module EEPROM info, ret = %d.\n", + ret); + return ret; + } + + /* The data format in BD0 is different with the others. */ + copy_len = RTE_MIN(len, HNS3_SFP_INFO_BD0_LEN); + memcpy(data, sfp_info_bd0->data, copy_len); + read_len = copy_len; + + for (i = 1; i < HNS3_SFP_INFO_CMD_NUM; i++) { + if (read_len >= len) + break; + + copy_len = RTE_MIN(len - read_len, HNS3_SFP_INFO_BDX_LEN); + memcpy(data + read_len, desc[i].data, copy_len); + read_len += copy_len; + } + + return (int)read_len; +} + +static int +hns3_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); + uint32_t offset = info->offset; + uint32_t len = info->length; + uint8_t *data = info->data; + uint32_t read_len = 0; + + if (hw->mac.media_type != HNS3_MEDIA_TYPE_FIBER) + return -ENOTSUP; + + if (!hns3_optical_module_existed(hw)) { + hns3_err(hw, "fail to read module EEPROM: no module is connected.\n"); + return -EIO; + } + + while (read_len < len) { + int ret; + ret = hns3_get_module_eeprom_data(hw, offset + read_len, + len - read_len, + data + read_len); + if (ret < 0) + return -EIO; + read_len += ret; + } + + return 0; +} + +static int +hns3_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo) +{ +#define HNS3_SFF8024_ID_SFP 0x03 +#define HNS3_SFF8024_ID_QSFP_8438 0x0c +#define HNS3_SFF8024_ID_QSFP_8436_8636 0x0d +#define HNS3_SFF8024_ID_QSFP28_8636 0x11 +#define HNS3_SFF_8636_V1_3 0x03 + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); + struct rte_dev_eeprom_info info; + struct hns3_sfp_type sfp_type; + int ret; + + memset(&sfp_type, 0, sizeof(sfp_type)); + memset(&info, 0, sizeof(info)); + info.data = (uint8_t *)&sfp_type; + info.length = sizeof(sfp_type); + ret = hns3_get_module_eeprom(dev, &info); + if (ret) + return ret; + + switch (sfp_type.type) { + case HNS3_SFF8024_ID_SFP: + modinfo->type = RTE_ETH_MODULE_SFF_8472; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; + break; + case HNS3_SFF8024_ID_QSFP_8438: + modinfo->type = RTE_ETH_MODULE_SFF_8436; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN; + break; + case HNS3_SFF8024_ID_QSFP_8436_8636: + if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) { + modinfo->type = RTE_ETH_MODULE_SFF_8436; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN; + } else { + modinfo->type = RTE_ETH_MODULE_SFF_8636; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; + } + break; + case HNS3_SFF8024_ID_QSFP28_8636: + modinfo->type = RTE_ETH_MODULE_SFF_8636; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; + break; + default: + hns3_err(hw, "unknown module, type = %u, extra_type = %u.\n", + sfp_type.type, sfp_type.ext_type); + return -EINVAL; + } + + return 0; +} + static const struct eth_dev_ops hns3_eth_dev_ops = { .dev_configure = hns3_dev_configure, .dev_start = hns3_dev_start, @@ -6156,6 +6313,8 @@ static const struct eth_dev_ops hns3_eth_dev_ops = { .vlan_offload_set = hns3_vlan_offload_set, .vlan_pvid_set = hns3_vlan_pvid_set, .get_reg = hns3_get_regs, + .get_module_info = hns3_get_module_info, + .get_module_eeprom = hns3_get_module_eeprom, .get_dcb_info = hns3_get_dcb_info, .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get, .fec_get_capability = hns3_fec_get_capability, From patchwork Wed Feb 3 07:46:07 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 87654 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4E6D0A0A0E; Wed, 3 Feb 2021 08:47:11 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 6A3C5240458; Wed, 3 Feb 2021 08:46:56 +0100 (CET) Received: from szxga06-in.huawei.com (szxga06-in.huawei.com [45.249.212.32]) by mails.dpdk.org (Postfix) with ESMTP id F3A2224043F for ; Wed, 3 Feb 2021 08:46:52 +0100 (CET) Received: from DGGEMS404-HUB.china.huawei.com (unknown [172.30.72.60]) by szxga06-in.huawei.com (SkyGuard) with ESMTP id 4DVtyf5jgjzjGRq; Wed, 3 Feb 2021 15:45:46 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS404-HUB.china.huawei.com (10.3.19.204) with Microsoft SMTP Server id 14.3.498.0; Wed, 3 Feb 2021 15:46:47 +0800 From: Lijun Ou To: CC: , Date: Wed, 3 Feb 2021 15:46:07 +0800 Message-ID: <1612338382-3253-3-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1612338382-3253-1-git-send-email-oulijun@huawei.com> References: <1612338382-3253-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH 02/17] net/hns3: add more registers to dump X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Chengchang Tang This patch makes more registers dumped in the dump_reg API to help loacte the fault. Signed-off-by: Chengchang Tang Signed-off-by: Lijun Ou --- drivers/net/hns3/hns3_cmd.h | 13 ++++ drivers/net/hns3/hns3_regs.c | 171 ++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 180 insertions(+), 4 deletions(-) diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h index e5852dc..e473a30 100644 --- a/drivers/net/hns3/hns3_cmd.h +++ b/drivers/net/hns3/hns3_cmd.h @@ -94,6 +94,19 @@ enum hns3_opcode_type { HNS3_OPC_QUERY_REG_NUM = 0x0040, HNS3_OPC_QUERY_32_BIT_REG = 0x0041, HNS3_OPC_QUERY_64_BIT_REG = 0x0042, + HNS3_OPC_DFX_BD_NUM = 0x0043, + HNS3_OPC_DFX_BIOS_COMMON_REG = 0x0044, + HNS3_OPC_DFX_SSU_REG_0 = 0x0045, + HNS3_OPC_DFX_SSU_REG_1 = 0x0046, + HNS3_OPC_DFX_IGU_EGU_REG = 0x0047, + HNS3_OPC_DFX_RPU_REG_0 = 0x0048, + HNS3_OPC_DFX_RPU_REG_1 = 0x0049, + HNS3_OPC_DFX_NCSI_REG = 0x004A, + HNS3_OPC_DFX_RTC_REG = 0x004B, + HNS3_OPC_DFX_PPP_REG = 0x004C, + HNS3_OPC_DFX_RCB_REG = 0x004D, + HNS3_OPC_DFX_TQP_REG = 0x004E, + HNS3_OPC_DFX_SSU_REG_2 = 0x004F, HNS3_OPC_QUERY_DEV_SPECS = 0x0050, diff --git a/drivers/net/hns3/hns3_regs.c b/drivers/net/hns3/hns3_regs.c index 84f3157..4022bb9 100644 --- a/drivers/net/hns3/hns3_regs.c +++ b/drivers/net/hns3/hns3_regs.c @@ -15,6 +15,8 @@ #define REG_NUM_PER_LINE 4 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(uint32_t)) +static int hns3_get_dfx_reg_line(struct hns3_hw *hw, uint32_t *length); + static const uint32_t cmdq_reg_addrs[] = {HNS3_CMDQ_TX_ADDR_L_REG, HNS3_CMDQ_TX_ADDR_H_REG, HNS3_CMDQ_TX_DEPTH_REG, @@ -77,6 +79,21 @@ static const uint32_t tqp_intr_reg_addrs[] = {HNS3_TQP_INTR_CTRL_REG, HNS3_TQP_INTR_GL2_REG, HNS3_TQP_INTR_RL_REG}; +static const uint32_t hns3_dfx_reg_opcode_list[] = { + HNS3_OPC_DFX_BIOS_COMMON_REG, + HNS3_OPC_DFX_SSU_REG_0, + HNS3_OPC_DFX_SSU_REG_1, + HNS3_OPC_DFX_IGU_EGU_REG, + HNS3_OPC_DFX_RPU_REG_0, + HNS3_OPC_DFX_RPU_REG_1, + HNS3_OPC_DFX_NCSI_REG, + HNS3_OPC_DFX_RTC_REG, + HNS3_OPC_DFX_PPP_REG, + HNS3_OPC_DFX_RCB_REG, + HNS3_OPC_DFX_TQP_REG, + HNS3_OPC_DFX_SSU_REG_2 +}; + static int hns3_get_regs_num(struct hns3_hw *hw, uint32_t *regs_num_32_bit, uint32_t *regs_num_64_bit) @@ -123,14 +140,21 @@ hns3_get_regs_length(struct hns3_hw *hw, uint32_t *length) if (!hns->is_vf) { ret = hns3_get_regs_num(hw, ®s_num_32_bit, ®s_num_64_bit); if (ret) { - hns3_err(hw, "Get register number failed, ret = %d.", - ret); - return -ENOTSUP; + hns3_err(hw, "fail to get the number of registers, " + "ret = %d.", ret); + return ret; } dfx_reg_lines = regs_num_32_bit * sizeof(uint32_t) / REG_LEN_PER_LINE + 1; dfx_reg_lines += regs_num_64_bit * sizeof(uint64_t) / REG_LEN_PER_LINE + 1; + + ret = hns3_get_dfx_reg_line(hw, &dfx_reg_lines); + if (ret) { + hns3_err(hw, "fail to get the number of dfx registers, " + "ret = %d.", ret); + return ret; + } len += dfx_reg_lines * REG_NUM_PER_LINE; } @@ -310,6 +334,144 @@ hns3_direct_access_regs(struct hns3_hw *hw, uint32_t *data) return data - origin_data_ptr; } +static int +hns3_get_dfx_reg_bd_num(struct hns3_hw *hw, uint32_t *bd_num_list, + uint32_t list_size) +{ +#define HNS3_GET_DFX_REG_BD_NUM_SIZE 4 + struct hns3_cmd_desc desc[HNS3_GET_DFX_REG_BD_NUM_SIZE]; + uint32_t index, desc_index; + uint32_t bd_num; + uint32_t i; + int ret; + + for (i = 0; i < HNS3_GET_DFX_REG_BD_NUM_SIZE - 1; i++) { + hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_DFX_BD_NUM, true); + desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); + } + /* The last BD does not need a next flag */ + hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_DFX_BD_NUM, true); + + ret = hns3_cmd_send(hw, desc, HNS3_GET_DFX_REG_BD_NUM_SIZE); + if (ret) { + hns3_err(hw, "fail to get dfx bd num, ret = %d.\n", ret); + return ret; + } + + /* The first data in the first BD is a reserved field */ + for (i = 1; i <= list_size; i++) { + desc_index = i / HNS3_CMD_DESC_DATA_NUM; + index = i % HNS3_CMD_DESC_DATA_NUM; + bd_num = rte_le_to_cpu_32(desc[desc_index].data[index]); + bd_num_list[i - 1] = bd_num; + } + + return 0; +} + +static int +hns3_dfx_reg_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, + int bd_num, uint32_t opcode) +{ + int ret; + int i; + + for (i = 0; i < bd_num - 1; i++) { + hns3_cmd_setup_basic_desc(&desc[i], opcode, true); + desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); + } + /* The last BD does not need a next flag */ + hns3_cmd_setup_basic_desc(&desc[i], opcode, true); + + ret = hns3_cmd_send(hw, desc, bd_num); + if (ret) { + hns3_err(hw, "fail to query dfx registers, opcode = 0x%04X, " + "ret = %d.\n", opcode, ret); + } + + return ret; +} + +static int +hns3_dfx_reg_fetch_data(struct hns3_cmd_desc *desc, int bd_num, uint32_t *reg) +{ + int desc_index; + int reg_num; + int index; + int i; + + reg_num = bd_num * HNS3_CMD_DESC_DATA_NUM; + for (i = 0; i < reg_num; i++) { + desc_index = i / HNS3_CMD_DESC_DATA_NUM; + index = i % HNS3_CMD_DESC_DATA_NUM; + *reg++ = desc[desc_index].data[index]; + } + reg_num += hns3_insert_reg_separator(reg_num, reg); + + return reg_num; +} + +static int +hns3_get_dfx_reg_line(struct hns3_hw *hw, uint32_t *lines) +{ + int opcode_num = RTE_DIM(hns3_dfx_reg_opcode_list); + uint32_t bd_num_list[opcode_num]; + uint32_t bd_num, data_len; + int ret; + int i; + + ret = hns3_get_dfx_reg_bd_num(hw, bd_num_list, opcode_num); + if (ret) + return ret; + + for (i = 0; i < opcode_num; i++) { + bd_num = bd_num_list[i]; + data_len = bd_num * HNS3_CMD_DESC_DATA_NUM * sizeof(uint32_t); + *lines += data_len / REG_LEN_PER_LINE + 1; + } + + return 0; +} + +static int +hns3_get_dfx_regs(struct hns3_hw *hw, void **data) +{ + int opcode_num = RTE_DIM(hns3_dfx_reg_opcode_list); + uint32_t max_bd_num, bd_num, opcode; + uint32_t bd_num_list[opcode_num]; + struct hns3_cmd_desc *cmd_descs; + uint32_t *reg_val = (uint32_t *)*data; + int ret; + int i; + + ret = hns3_get_dfx_reg_bd_num(hw, bd_num_list, opcode_num); + if (ret) + return ret; + + max_bd_num = 0; + for (i = 0; i < opcode_num; i++) + max_bd_num = RTE_MAX(bd_num_list[i], max_bd_num); + + cmd_descs = rte_zmalloc(NULL, sizeof(*cmd_descs) * max_bd_num, 0); + if (cmd_descs == NULL) + return -ENOMEM; + + for (i = 0; i < opcode_num; i++) { + opcode = hns3_dfx_reg_opcode_list[i]; + bd_num = bd_num_list[i]; + if (bd_num == 0) + continue; + ret = hns3_dfx_reg_cmd_send(hw, cmd_descs, bd_num, opcode); + if (ret) + break; + reg_val += hns3_dfx_reg_fetch_data(cmd_descs, bd_num, reg_val); + } + rte_free(cmd_descs); + *data = (void *)reg_val; + + return ret; +} + int hns3_get_regs(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs) { @@ -371,5 +533,6 @@ hns3_get_regs(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs) data += regs_num_64_bit * HNS3_64_BIT_REG_SIZE; data += hns3_insert_reg_separator(regs_num_64_bit * HNS3_64_BIT_REG_SIZE, data); - return ret; + + return hns3_get_dfx_regs(hw, (void **)&data); } From patchwork Wed Feb 3 07:46:08 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 87653 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 6C2F8A0A0E; Wed, 3 Feb 2021 08:47:02 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 38E05240449; Wed, 3 Feb 2021 08:46:55 +0100 (CET) Received: from szxga06-in.huawei.com (szxga06-in.huawei.com [45.249.212.32]) by mails.dpdk.org (Postfix) with ESMTP id F131E24043E for ; Wed, 3 Feb 2021 08:46:52 +0100 (CET) Received: from DGGEMS404-HUB.china.huawei.com (unknown [172.30.72.60]) by szxga06-in.huawei.com (SkyGuard) with ESMTP id 4DVtyf5Vv2zjGRW; Wed, 3 Feb 2021 15:45:46 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS404-HUB.china.huawei.com (10.3.19.204) with Microsoft SMTP Server id 14.3.498.0; Wed, 3 Feb 2021 15:46:48 +0800 From: Lijun Ou To: CC: , Date: Wed, 3 Feb 2021 15:46:08 +0800 Message-ID: <1612338382-3253-4-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1612338382-3253-1-git-send-email-oulijun@huawei.com> References: <1612338382-3253-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH 03/17] net/hns3: implement cleanup for Tx done X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Chengwen Feng This patch add support tx_done_cleanup ops, which could support for the API rte_eth_tx_done_cleanup to free consumed mbufs on Tx ring. Signed-off-by: Chengwen Feng Signed-off-by: Lijun Ou --- Depends-on: series-10000 ("update doc for hns3") --- doc/guides/nics/features/hns3.ini | 1 + doc/guides/nics/features/hns3_vf.ini | 1 + drivers/net/hns3/hns3_ethdev.c | 1 + drivers/net/hns3/hns3_ethdev_vf.c | 1 + drivers/net/hns3/hns3_rxtx.c | 59 ++++++++++++++++++++++++++++++++++++ drivers/net/hns3/hns3_rxtx.h | 1 + 6 files changed, 64 insertions(+) diff --git a/doc/guides/nics/features/hns3.ini b/doc/guides/nics/features/hns3.ini index 40febb0..aa4cba2 100644 --- a/doc/guides/nics/features/hns3.ini +++ b/doc/guides/nics/features/hns3.ini @@ -10,6 +10,7 @@ Queue start/stop = Y Runtime Rx queue setup = Y Runtime Tx queue setup = Y Burst mode info = Y +Free Tx mbuf on demand = Y MTU update = Y Jumbo frame = Y Scattered Rx = Y diff --git a/doc/guides/nics/features/hns3_vf.ini b/doc/guides/nics/features/hns3_vf.ini index 99a0bf0..c796cd5 100644 --- a/doc/guides/nics/features/hns3_vf.ini +++ b/doc/guides/nics/features/hns3_vf.ini @@ -10,6 +10,7 @@ Queue start/stop = Y Runtime Rx queue setup = Y Runtime Tx queue setup = Y Burst mode info = Y +Free Tx mbuf on demand = Y MTU update = Y Jumbo frame = Y Scattered Rx = Y diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index 31418b8..f85149d 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -6321,6 +6321,7 @@ static const struct eth_dev_ops hns3_eth_dev_ops = { .fec_get = hns3_fec_get, .fec_set = hns3_fec_set, .tm_ops_get = hns3_tm_ops_get, + .tx_done_cleanup = hns3_tx_done_cleanup, }; static const struct hns3_reset_ops hns3_reset_ops = { diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c index faf7e01..11cab37 100644 --- a/drivers/net/hns3/hns3_ethdev_vf.c +++ b/drivers/net/hns3/hns3_ethdev_vf.c @@ -2738,6 +2738,7 @@ static const struct eth_dev_ops hns3vf_eth_dev_ops = { .vlan_offload_set = hns3vf_vlan_offload_set, .get_reg = hns3_get_regs, .dev_supported_ptypes_get = hns3_dev_supported_ptypes_get, + .tx_done_cleanup = hns3_tx_done_cleanup, }; static const struct hns3_reset_ops hns3vf_reset_ops = { diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c index 222cf8a..5e79177 100644 --- a/drivers/net/hns3/hns3_rxtx.c +++ b/drivers/net/hns3/hns3_rxtx.c @@ -3913,6 +3913,65 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) return 0; } +static int +hns3_tx_done_cleanup_full(struct hns3_tx_queue *txq, uint32_t free_cnt) +{ + uint16_t next_to_clean = txq->next_to_clean; + uint16_t next_to_use = txq->next_to_use; + uint16_t tx_bd_ready = txq->tx_bd_ready; + struct hns3_entry *tx_pkt = &txq->sw_ring[next_to_clean]; + struct hns3_desc *desc = &txq->tx_ring[next_to_clean]; + uint32_t idx; + + if (free_cnt == 0 || free_cnt > txq->nb_tx_desc) + free_cnt = txq->nb_tx_desc; + + for (idx = 0; idx < free_cnt; idx++) { + if (next_to_clean == next_to_use) + break; + + if (desc->tx.tp_fe_sc_vld_ra_ri & + rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B))) + break; + + if (tx_pkt->mbuf != NULL) { + rte_pktmbuf_free_seg(tx_pkt->mbuf); + tx_pkt->mbuf = NULL; + } + + next_to_clean++; + tx_bd_ready++; + tx_pkt++; + desc++; + if (next_to_clean == txq->nb_tx_desc) { + tx_pkt = txq->sw_ring; + desc = txq->tx_ring; + next_to_clean = 0; + } + } + + if (idx > 0) { + txq->next_to_clean = next_to_clean; + txq->tx_bd_ready = tx_bd_ready; + } + + return (int)idx; +} + +int +hns3_tx_done_cleanup(void *txq, uint32_t free_cnt) +{ + struct hns3_tx_queue *q = (struct hns3_tx_queue *)txq; + struct rte_eth_dev *dev = &rte_eth_devices[q->port_id]; + + if (dev->tx_pkt_burst == hns3_xmit_pkts) + return hns3_tx_done_cleanup_full(q, free_cnt); + else if (dev->tx_pkt_burst == hns3_dummy_rxtx_burst) + return 0; + else + return -ENOTSUP; +} + uint32_t hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) { diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h index 8f5ae5c..7118bd4 100644 --- a/drivers/net/hns3/hns3_rxtx.h +++ b/drivers/net/hns3/hns3_rxtx.h @@ -706,5 +706,6 @@ int hns3_start_all_txqs(struct rte_eth_dev *dev); int hns3_start_all_rxqs(struct rte_eth_dev *dev); void hns3_stop_all_txqs(struct rte_eth_dev *dev); void hns3_restore_tqp_enable_state(struct hns3_hw *hw); +int hns3_tx_done_cleanup(void *txq, uint32_t free_cnt); #endif /* _HNS3_RXTX_H_ */ From patchwork Wed Feb 3 07:46:09 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 87655 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 107C6A0A0E; Wed, 3 Feb 2021 08:47:20 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 9AC54240460; Wed, 3 Feb 2021 08:46:57 +0100 (CET) Received: from szxga06-in.huawei.com (szxga06-in.huawei.com [45.249.212.32]) by mails.dpdk.org (Postfix) with ESMTP id 03C33240444 for ; Wed, 3 Feb 2021 08:46:52 +0100 (CET) Received: from DGGEMS404-HUB.china.huawei.com (unknown [172.30.72.60]) by szxga06-in.huawei.com (SkyGuard) with ESMTP id 4DVtyf6DrYzjH0G; Wed, 3 Feb 2021 15:45:46 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS404-HUB.china.huawei.com (10.3.19.204) with Microsoft SMTP Server id 14.3.498.0; Wed, 3 Feb 2021 15:46:48 +0800 From: Lijun Ou To: CC: , Date: Wed, 3 Feb 2021 15:46:09 +0800 Message-ID: <1612338382-3253-5-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1612338382-3253-1-git-send-email-oulijun@huawei.com> References: <1612338382-3253-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH 04/17] net/hns3: add enhance stats function X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: "Min Hu (Connor)" In current HNS3 PMD, Rx/Tx bytes and imissed from packet stats are not implemented. This patch implemented Rx/Tx bytes using soft counters. Rx/Tx bytes stats will be enabled if the macro RTE_LIBRTE_HNS3_PMD_SOFT_COUNTERS is defined. Also, Rx imissed stats was in implemented in this patch. Signed-off-by: Min Hu (Connor) Signed-off-by: Lijun Ou --- drivers/net/hns3/hns3_cmd.h | 7 ++ drivers/net/hns3/hns3_ethdev.c | 7 ++ drivers/net/hns3/hns3_ethdev.h | 1 + drivers/net/hns3/hns3_rxtx.c | 24 ++++++ drivers/net/hns3/hns3_rxtx_vec_neon.h | 15 ++++ drivers/net/hns3/hns3_rxtx_vec_sve.c | 11 +++ drivers/net/hns3/hns3_stats.c | 137 +++++++++++++++++++++++++++++++--- drivers/net/hns3/hns3_stats.h | 8 ++ 8 files changed, 201 insertions(+), 9 deletions(-) diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h index e473a30..6b1ce22 100644 --- a/drivers/net/hns3/hns3_cmd.h +++ b/drivers/net/hns3/hns3_cmd.h @@ -889,6 +889,13 @@ struct hns3_dev_specs_0_cmd { uint32_t max_tm_rate; }; +struct hns3_query_rpu_cmd { + uint32_t tc_queue_num; + uint32_t rsv1[2]; + uint32_t rpu_rx_pkt_drop_cnt; + uint32_t rsv2[2]; +}; + #define HNS3_MAX_TQP_NUM_HIP08_PF 64 #define HNS3_DEFAULT_TX_BUF 0x4000 /* 16k bytes */ #define HNS3_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */ diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index f85149d..bd998f6 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -4679,6 +4679,13 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) goto err_cmd_init; } + /* Hardware statistics of imissed registers cleared. */ + ret = hns3_update_imissed_stats(hw, true); + if (ret) { + hns3_err(hw, "clear imissed stats failed, ret = %d", ret); + return ret; + } + hns3_config_all_msix_error(hw, true); ret = rte_intr_callback_register(&pci_dev->intr_handle, diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h index e72f3e1..520af20 100644 --- a/drivers/net/hns3/hns3_ethdev.h +++ b/drivers/net/hns3/hns3_ethdev.h @@ -434,6 +434,7 @@ struct hns3_hw { struct hns3_tqp_stats tqp_stats; /* Include Mac stats | Rx stats | Tx stats */ struct hns3_mac_stats mac_stats; + struct hns3_rx_missed_stats imissed_stats; uint32_t fw_version; uint16_t num_msi; diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c index 5e79177..a8bd2cc 100644 --- a/drivers/net/hns3/hns3_rxtx.c +++ b/drivers/net/hns3/hns3_rxtx.c @@ -2181,6 +2181,10 @@ hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) cksum_err); hns3_rxd_to_vlan_tci(rxq, rxm, l234_info, &rxd); +#ifdef RTE_LIBRTE_HNS3_PMD_SOFT_COUNTERS + /* Increment bytes counter */ + rxq->basic_stats.bytes += rxm->pkt_len; +#endif rx_pkts[nb_rx++] = rxm; continue; pkt_err: @@ -2401,6 +2405,10 @@ hns3_recv_scattered_pkts(void *rx_queue, cksum_err); hns3_rxd_to_vlan_tci(rxq, first_seg, l234_info, &rxd); +#ifdef RTE_LIBRTE_HNS3_PMD_SOFT_COUNTERS + /* Increment bytes counter */ + rxq->basic_stats.bytes += first_seg->pkt_len; +#endif rx_pkts[nb_rx++] = first_seg; first_seg = NULL; continue; @@ -3516,6 +3524,13 @@ hns3_tx_fill_hw_ring(struct hns3_tx_queue *txq, for (i = 0; i < mainpart; i += PER_LOOP_NUM) { hns3_tx_backup_4mbuf(tx_entry + i, pkts + i); hns3_tx_setup_4bd(txdp + i, pkts + i); + +#ifdef RTE_LIBRTE_HNS3_PMD_SOFT_COUNTERS + /* Increment bytes counter */ + uint32_t j; + for (j = 0; j < PER_LOOP_NUM; j++) + txq->basic_stats.bytes += pkts[i + j]->pkt_len; +#endif } if (unlikely(leftover > 0)) { for (i = 0; i < leftover; i++) { @@ -3523,6 +3538,11 @@ hns3_tx_fill_hw_ring(struct hns3_tx_queue *txq, pkts + mainpart + i); hns3_tx_setup_1bd(txdp + mainpart + i, pkts + mainpart + i); + +#ifdef RTE_LIBRTE_HNS3_PMD_SOFT_COUNTERS + /* Increment bytes counter */ + txq->basic_stats.bytes += pkts[mainpart + i]->pkt_len; +#endif } } } @@ -3661,6 +3681,10 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) desc->tx.tp_fe_sc_vld_ra_ri |= rte_cpu_to_le_16(BIT(HNS3_TXD_FE_B)); +#ifdef RTE_LIBRTE_HNS3_PMD_SOFT_COUNTERS + /* Increment bytes counter */ + txq->basic_stats.bytes += tx_pkt->pkt_len; +#endif nb_hold += i; txq->next_to_use = tx_next_use; txq->tx_bd_ready -= i; diff --git a/drivers/net/hns3/hns3_rxtx_vec_neon.h b/drivers/net/hns3/hns3_rxtx_vec_neon.h index a693b4b..54d358d 100644 --- a/drivers/net/hns3/hns3_rxtx_vec_neon.h +++ b/drivers/net/hns3/hns3_rxtx_vec_neon.h @@ -61,6 +61,11 @@ hns3_xmit_fixed_burst_vec(void *__restrict tx_queue, for (i = 0; i < n; i++, tx_pkts++, tx_desc++) { hns3_vec_tx(tx_desc, *tx_pkts); tx_entry[i].mbuf = *tx_pkts; + +#ifdef RTE_LIBRTE_HNS3_PMD_SOFT_COUNTERS + /* Increment bytes counter */ + txq->basic_stats.bytes += (*tx_pkts)->pkt_len; +#endif } nb_commit -= n; @@ -72,6 +77,11 @@ hns3_xmit_fixed_burst_vec(void *__restrict tx_queue, for (i = 0; i < nb_commit; i++, tx_pkts++, tx_desc++) { hns3_vec_tx(tx_desc, *tx_pkts); tx_entry[i].mbuf = *tx_pkts; + +#ifdef RTE_LIBRTE_HNS3_PMD_SOFT_COUNTERS + /* Increment bytes counter */ + txq->basic_stats.bytes += (*tx_pkts)->pkt_len; +#endif } next_to_use += nb_commit; @@ -116,6 +126,11 @@ hns3_desc_parse_field(struct hns3_rx_queue *rxq, if (likely(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) hns3_rx_set_cksum_flag(pkt, pkt->packet_type, cksum_err); + +#ifdef RTE_LIBRTE_HNS3_PMD_SOFT_COUNTERS + /* Increment bytes counter */ + rxq->basic_stats.bytes += pkt->pkt_len; +#endif } return retcode; diff --git a/drivers/net/hns3/hns3_rxtx_vec_sve.c b/drivers/net/hns3/hns3_rxtx_vec_sve.c index b02bae7..8b9172a 100644 --- a/drivers/net/hns3/hns3_rxtx_vec_sve.c +++ b/drivers/net/hns3/hns3_rxtx_vec_sve.c @@ -58,6 +58,11 @@ hns3_desc_parse_field_sve(struct hns3_rx_queue *rxq, if (likely(key->bd_base_info[i] & BIT(HNS3_RXD_L3L4P_B))) hns3_rx_set_cksum_flag(rx_pkts[i], rx_pkts[i]->packet_type, cksum_err); + +#ifdef RTE_LIBRTE_HNS3_PMD_SOFT_COUNTERS + /* Increment bytes counter */ + rxq->basic_stats.bytes += rx_pkts[i]->pkt_len; +#endif } return retcode; @@ -408,6 +413,12 @@ hns3_tx_fill_hw_ring_sve(struct hns3_tx_queue *txq, svst1_scatter_u64offset_u64(pg, (uint64_t *)&txdp->tx.paylen, offsets, svdup_n_u64(valid_bit)); +#ifdef RTE_LIBRTE_HNS3_PMD_SOFT_COUNTERS + /* Increment bytes counter */ + uint32_t idx; + for (idx = 0; idx < svcntd(); idx++) + txq->basic_stats.bytes += pkts[idx]->pkt_len; +#endif /* update index for next loop */ i += svcntd(); pkts += svcntd(); diff --git a/drivers/net/hns3/hns3_stats.c b/drivers/net/hns3/hns3_stats.c index 3ba09e2..e58ebea 100644 --- a/drivers/net/hns3/hns3_stats.c +++ b/drivers/net/hns3/hns3_stats.c @@ -324,6 +324,12 @@ static const struct hns3_xstats_name_offset hns3_tx_queue_strings[] = { {"TX_QUEUE_FBD", HNS3_RING_TX_FBDNUM_REG} }; +/* The statistic of imissed packet */ +static const struct hns3_xstats_name_offset hns3_imissed_stats_strings[] = { + {"RPU_DROP_CNT", + HNS3_IMISSED_STATS_FIELD_OFFSET(rpu_rx_drop_cnt)}, +}; + #define HNS3_NUM_MAC_STATS (sizeof(hns3_mac_strings) / \ sizeof(hns3_mac_strings[0])) @@ -354,10 +360,14 @@ static const struct hns3_xstats_name_offset hns3_tx_queue_strings[] = { #define HNS3_NUM_TXQ_BASIC_STATS (sizeof(hns3_txq_basic_stats_strings) / \ sizeof(hns3_txq_basic_stats_strings[0])) +#define HNS3_NUM_IMISSED_XSTATS (sizeof(hns3_imissed_stats_strings) / \ + sizeof(hns3_imissed_stats_strings[0])) + #define HNS3_FIX_NUM_STATS (HNS3_NUM_MAC_STATS + HNS3_NUM_ERROR_INT_XSTATS + \ - HNS3_NUM_RESET_XSTATS) + HNS3_NUM_RESET_XSTATS + HNS3_NUM_IMISSED_XSTATS) static void hns3_tqp_stats_clear(struct hns3_hw *hw); +static void hns3_tqp_basic_stats_clear(struct rte_eth_dev *dev); /* * Query all the MAC statistics data of Network ICL command ,opcode id: 0x0034. @@ -514,6 +524,52 @@ hns3_update_tqp_stats(struct hns3_hw *hw) return 0; } +static int +hns3_update_rpu_drop_stats(struct hns3_hw *hw) +{ + struct hns3_rx_missed_stats *stats = &hw->imissed_stats; + struct hns3_query_rpu_cmd *req; + struct hns3_cmd_desc desc; + uint64_t cnt; + uint32_t tc_num; + int ret; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_DFX_RPU_REG_0, true); + req = (struct hns3_query_rpu_cmd *)desc.data; + + /* + * tc_num is 0, means rpu stats of all TC channels will be + * get from firmware + */ + tc_num = 0; + req->tc_queue_num = rte_cpu_to_le_32(tc_num); + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + hns3_err(hw, "failed to query RPU stats: %d", ret); + return ret; + } + + cnt = rte_le_to_cpu_32(req->rpu_rx_pkt_drop_cnt); + stats->rpu_rx_drop_cnt += cnt; + + return 0; +} + +int +hns3_update_imissed_stats(struct hns3_hw *hw, bool is_clear) +{ + int ret; + + ret = hns3_update_rpu_drop_stats(hw); + if (ret) + return ret; + + if (is_clear) + memset(&hw->imissed_stats, 0, sizeof(hw->imissed_stats)); + + return 0; +} + /* * Query tqp tx queue statistics ,opcode id: 0x0B03. * Query tqp rx queue statistics ,opcode id: 0x0B13. @@ -530,6 +586,7 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats) { struct hns3_adapter *hns = eth_dev->data->dev_private; struct hns3_hw *hw = &hns->hw; + struct hns3_rx_missed_stats *imissed_stats = &hw->imissed_stats; struct hns3_tqp_stats *stats = &hw->tqp_stats; struct hns3_rx_queue *rxq; uint64_t cnt; @@ -539,22 +596,48 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats) /* Update tqp stats by read register */ ret = hns3_update_tqp_stats(hw); if (ret) { - hns3_err(hw, "Update tqp stats fail : %d", ret); + hns3_err(hw, "update tqp stats fail : %d", ret); return ret; } - /* Get the error stats of received packets */ + if (!hns->is_vf) { + /* Update imissed stats */ + ret = hns3_update_imissed_stats(hw, false); + if (ret) { + hns3_err(hw, "update imissed stats failed, ret = %d", + ret); + return ret; + } + + rte_stats->imissed = imissed_stats->rpu_rx_drop_cnt; + } + + /* Get the error stats and bytes of received packets */ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { rxq = eth_dev->data->rx_queues[i]; if (rxq) { cnt = rxq->err_stats.l2_errors + rxq->err_stats.pkt_len_errors; rte_stats->ierrors += cnt; + +#ifdef RTE_LIBRTE_HNS3_PMD_SOFT_COUNTERS + rte_stats->ibytes += rxq->basic_stats.bytes; +#endif } } +#ifdef RTE_LIBRTE_HNS3_PMD_SOFT_COUNTERS + /* Get the bytes of received packets */ + struct hns3_tx_queue *txq; + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + txq = eth_dev->data->tx_queues[i]; + if (txq) + rte_stats->obytes += txq->basic_stats.bytes; + } +#endif + rte_stats->oerrors = 0; - rte_stats->ipackets = stats->rcb_rx_ring_pktnum_rcd - + rte_stats->ipackets = stats->rcb_rx_ring_pktnum_rcd - rte_stats->ierrors; rte_stats->opackets = stats->rcb_tx_ring_pktnum_rcd - rte_stats->oerrors; @@ -583,7 +666,7 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev) desc_reset.data[0] = rte_cpu_to_le_32((uint32_t)i); ret = hns3_cmd_send(hw, &desc_reset, 1); if (ret) { - hns3_err(hw, "Failed to reset RX No.%u queue stat: %d", + hns3_err(hw, "failed to reset RX No.%u queue stat: %d", i, ret); return ret; } @@ -593,12 +676,25 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev) desc_reset.data[0] = rte_cpu_to_le_32((uint32_t)i); ret = hns3_cmd_send(hw, &desc_reset, 1); if (ret) { - hns3_err(hw, "Failed to reset TX No.%u queue stat: %d", + hns3_err(hw, "failed to reset TX No.%u queue stat: %d", i, ret); return ret; } } + if (!hns->is_vf) { + /* + * Note: Reading hardware statistics of imissed registers will + * clear them. + */ + ret = hns3_update_imissed_stats(hw, true); + if (ret) { + hns3_err(hw, "clear imissed stats failed, ret = %d", + ret); + return ret; + } + } + /* * Clear soft stats of rx error packet which will be dropped * in driver. @@ -617,6 +713,7 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev) * their source. */ hns3_tqp_stats_clear(hw); + hns3_tqp_basic_stats_clear(eth_dev); return 0; } @@ -794,7 +891,7 @@ hns3_rxq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, rxq->err_stats.pkt_len_errors; rxq_stats->packets = stats->rcb_rx_ring_pktnum[i] - rxq_stats->errors; - rxq_stats->bytes = 0; + for (j = 0; j < HNS3_NUM_RXQ_BASIC_STATS; j++) { val = (char *)rxq_stats + hns3_rxq_basic_stats_strings[j].offset; @@ -823,7 +920,7 @@ hns3_txq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, txq_stats = &txq->basic_stats; txq_stats->packets = stats->rcb_tx_ring_pktnum[i]; - txq_stats->bytes = 0; + for (j = 0; j < HNS3_NUM_TXQ_BASIC_STATS; j++) { val = (char *)txq_stats + hns3_txq_basic_stats_strings[j].offset; @@ -904,6 +1001,7 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, struct hns3_adapter *hns = dev->data->dev_private; struct hns3_pf *pf = &hns->pf; struct hns3_hw *hw = &hns->hw; + struct hns3_rx_missed_stats *imissed_stats = &hw->imissed_stats; struct hns3_mac_stats *mac_stats = &hw->mac_stats; struct hns3_reset_stats *reset_stats = &hw->reset.stats; struct hns3_rx_bd_errors_stats *rx_err_stats; @@ -942,6 +1040,21 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, count++; } + ret = hns3_update_imissed_stats(hw, false); + if (ret) { + hns3_err(hw, "update imissed stats failed, ret = %d", + ret); + return ret; + } + + for (i = 0; i < HNS3_NUM_IMISSED_XSTATS; i++) { + addr = (char *)imissed_stats + + hns3_imissed_stats_strings[i].offset; + xstats[count].value = *(uint64_t *)addr; + xstats[count].id = count; + count++; + } + for (i = 0; i < HNS3_NUM_ERROR_INT_XSTATS; i++) { addr = (char *)&pf->abn_int_stats + hns3_error_int_stats_strings[i].offset; @@ -1084,6 +1197,13 @@ hns3_dev_xstats_get_names(struct rte_eth_dev *dev, count++; } + for (i = 0; i < HNS3_NUM_IMISSED_XSTATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", hns3_imissed_stats_strings[i].name); + count++; + } + for (i = 0; i < HNS3_NUM_ERROR_INT_XSTATS; i++) { snprintf(xstats_names[count].name, sizeof(xstats_names[count].name), @@ -1315,7 +1435,6 @@ hns3_dev_xstats_reset(struct rte_eth_dev *dev) if (ret) return ret; - hns3_tqp_basic_stats_clear(dev); hns3_tqp_dfx_stats_clear(dev); /* Clear reset stats */ diff --git a/drivers/net/hns3/hns3_stats.h b/drivers/net/hns3/hns3_stats.h index d213be5..01b4f36 100644 --- a/drivers/net/hns3/hns3_stats.h +++ b/drivers/net/hns3/hns3_stats.h @@ -110,6 +110,10 @@ struct hns3_mac_stats { uint64_t mac_rx_ctrl_pkt_num; }; +struct hns3_rx_missed_stats { + uint64_t rpu_rx_drop_cnt; +}; + /* store statistics names and its offset in stats structure */ struct hns3_xstats_name_offset { char name[RTE_ETH_XSTATS_NAME_SIZE]; @@ -141,6 +145,9 @@ struct hns3_reset_stats; #define HNS3_TXQ_BASIC_STATS_FIELD_OFFSET(f) \ (offsetof(struct hns3_tx_basic_stats, f)) +#define HNS3_IMISSED_STATS_FIELD_OFFSET(f) \ + (offsetof(struct hns3_rx_missed_stats, f)) + int hns3_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats); int hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned int n); @@ -160,5 +167,6 @@ int hns3_stats_reset(struct rte_eth_dev *dev); void hns3_error_int_stats_add(struct hns3_adapter *hns, const char *err); int hns3_tqp_stats_init(struct hns3_hw *hw); void hns3_tqp_stats_uninit(struct hns3_hw *hw); +int hns3_update_imissed_stats(struct hns3_hw *hw, bool is_clear); #endif /* _HNS3_STATS_H_ */ From patchwork Wed Feb 3 07:46:10 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 87668 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 857BFA0A0E; Wed, 3 Feb 2021 08:49:26 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id E5EEB2404BB; Wed, 3 Feb 2021 08:47:13 +0100 (CET) Received: from szxga06-in.huawei.com (szxga06-in.huawei.com [45.249.212.32]) by mails.dpdk.org (Postfix) with ESMTP id AE4C4240462 for ; Wed, 3 Feb 2021 08:46:57 +0100 (CET) Received: from DGGEMS404-HUB.china.huawei.com (unknown [172.30.72.59]) by szxga06-in.huawei.com (SkyGuard) with ESMTP id 4DVtym1SwFzjGXV; Wed, 3 Feb 2021 15:45:52 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS404-HUB.china.huawei.com (10.3.19.204) with Microsoft SMTP Server id 14.3.498.0; Wed, 3 Feb 2021 15:46:49 +0800 From: Lijun Ou To: CC: , Date: Wed, 3 Feb 2021 15:46:10 +0800 Message-ID: <1612338382-3253-6-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1612338382-3253-1-git-send-email-oulijun@huawei.com> References: <1612338382-3253-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH 05/17] net/hns3: fix query order of link status and link info X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Huisong Li When link information is updated in the firmware, the link information is updated first and then the link status is updated. In a 1s periodic task, PF driver queries the link information and then obtains link status. It may lead to a 1s time difference for obtaining valid link information when the port is up. Therefore, the query order of driver should be reversed to the order of firmware. Fixes: 109e4dd1bd7a ("net/hns3: get link state change through mailbox") Fixes: 59fad0f32135 ("net/hns3: support link update operation") Cc: stable@dpdk.org Signed-off-by: Huisong Li Signed-off-by: Lijun Ou --- drivers/net/hns3/hns3_ethdev.c | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index bd998f6..13f78f2 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -93,7 +93,7 @@ static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns, static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid, int on); -static int hns3_update_speed_duplex(struct rte_eth_dev *eth_dev); +static int hns3_update_link_info(struct rte_eth_dev *eth_dev); static bool hns3_update_link_status(struct hns3_hw *hw); static int hns3_add_mc_addr(struct hns3_hw *hw, @@ -2642,8 +2642,8 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, struct rte_eth_link new_link; if (!hns3_is_reset_pending(hns)) { - hns3_update_speed_duplex(eth_dev); hns3_update_link_status(hw); + hns3_update_link_info(eth_dev); } memset(&new_link, 0, sizeof(new_link)); @@ -4368,11 +4368,9 @@ hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) } static int -hns3_update_speed_duplex(struct rte_eth_dev *eth_dev) +hns3_update_fiber_link_info(struct hns3_hw *hw) { - struct hns3_adapter *hns = eth_dev->data->dev_private; - struct hns3_hw *hw = &hns->hw; - struct hns3_pf *pf = &hns->pf; + struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw); uint32_t speed; int ret; @@ -4395,6 +4393,21 @@ hns3_update_speed_duplex(struct rte_eth_dev *eth_dev) } static int +hns3_update_link_info(struct rte_eth_dev *eth_dev) +{ + struct hns3_adapter *hns = eth_dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + int ret = 0; + + if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) + return 0; + else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER) + ret = hns3_update_fiber_link_info(hw); + + return ret; +} + +static int hns3_cfg_mac_mode(struct hns3_hw *hw, bool enable) { struct hns3_config_mac_mode_cmd *req; @@ -4509,8 +4522,8 @@ hns3_service_handler(void *param) struct hns3_hw *hw = &hns->hw; if (!hns3_is_reset_pending(hns)) { - hns3_update_speed_duplex(eth_dev); hns3_update_link_status_and_event(hw); + hns3_update_link_info(eth_dev); } else { hns3_warn(hw, "Cancel the query when reset is pending"); } From patchwork Wed Feb 3 07:46:11 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 87662 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E4C4BA0A0E; Wed, 3 Feb 2021 08:48:29 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id B67AC240496; Wed, 3 Feb 2021 08:47:06 +0100 (CET) Received: from szxga06-in.huawei.com (szxga06-in.huawei.com [45.249.212.32]) by mails.dpdk.org (Postfix) with ESMTP id 336F1240453 for ; Wed, 3 Feb 2021 08:46:57 +0100 (CET) Received: from DGGEMS404-HUB.china.huawei.com (unknown [172.30.72.59]) by szxga06-in.huawei.com (SkyGuard) with ESMTP id 4DVtym106lzjHbR; Wed, 3 Feb 2021 15:45:52 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS404-HUB.china.huawei.com (10.3.19.204) with Microsoft SMTP Server id 14.3.498.0; Wed, 3 Feb 2021 15:46:49 +0800 From: Lijun Ou To: CC: , Date: Wed, 3 Feb 2021 15:46:11 +0800 Message-ID: <1612338382-3253-7-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1612338382-3253-1-git-send-email-oulijun@huawei.com> References: <1612338382-3253-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH 06/17] net/hns3: fix link status change from firmware X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Huisong Li When the hardware link status changes, the firmware proactively reports the link status change message, and then driver update link status. This feature is lack of a switch to control in pf driver. Otherwise, this feature does not take effect when the kernel PF driver that supports the feature is not loaded. Fixes: 109e4dd1bd7a ("net/hns3: get link state change through mailbox") Cc: stable@dpdk.org Signed-off-by: Huisong Li Signed-off-by: Lijun Ou --- drivers/net/hns3/hns3_cmd.h | 10 ++++++++++ drivers/net/hns3/hns3_ethdev.c | 31 +++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h index 6b1ce22..5ebeff0 100644 --- a/drivers/net/hns3/hns3_cmd.h +++ b/drivers/net/hns3/hns3_cmd.h @@ -219,6 +219,9 @@ enum hns3_opcode_type { /* Clear hardware state command */ HNS3_OPC_CLEAR_HW_STATE = 0x700B, + /* Firmware stats command */ + HNS3_OPC_FIRMWARE_COMPAT_CFG = 0x701A, + /* SFP command */ HNS3_OPC_GET_SFP_EEPROM = 0x7100, HNS3_OPC_GET_SFP_EXIST = 0x7101, @@ -648,6 +651,13 @@ enum hns3_promisc_type { HNS3_BROADCAST = 3, }; +#define HNS3_LINK_EVENT_REPORT_EN_B 0 +#define HNS3_NCSI_ERROR_REPORT_EN_B 1 +struct hns3_firmware_compat_cmd { + uint32_t compat; + uint8_t rsv[20]; +}; + #define HNS3_MAC_TX_EN_B 6 #define HNS3_MAC_RX_EN_B 7 #define HNS3_MAC_PAD_TX_B 11 diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index 13f78f2..3706e05 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -3919,6 +3919,26 @@ hns3_buffer_alloc(struct hns3_hw *hw) } static int +hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init) +{ + struct hns3_firmware_compat_cmd *req; + struct hns3_cmd_desc desc; + uint32_t compat = 0; + + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FIRMWARE_COMPAT_CFG, false); + req = (struct hns3_firmware_compat_cmd *)desc.data; + + if (is_init) { + hns3_set_bit(compat, HNS3_LINK_EVENT_REPORT_EN_B, 1); + hns3_set_bit(compat, HNS3_NCSI_ERROR_REPORT_EN_B, 0); + } + + req->compat = rte_cpu_to_le_32(compat); + + return hns3_cmd_send(hw, &desc, 1); +} + +static int hns3_mac_init(struct hns3_hw *hw) { struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); @@ -4610,6 +4630,15 @@ hns3_init_hardware(struct hns3_adapter *hns) goto err_mac_init; } + /* + * Requiring firmware to enable some features, driver can + * still work without it. + */ + ret = hns3_firmware_compat_config(hw, true); + if (ret) + PMD_INIT_LOG(WARNING, "firmware compatible features not " + "supported, ret = %d.", ret); + return 0; err_mac_init: @@ -4753,6 +4782,7 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) err_enable_intr: hns3_fdir_filter_uninit(hns); err_fdir: + (void)hns3_firmware_compat_config(hw, false); hns3_uninit_umv_space(hw); err_init_hw: hns3_tqp_stats_uninit(hw); @@ -4787,6 +4817,7 @@ hns3_uninit_pf(struct rte_eth_dev *eth_dev) (void)hns3_config_gro(hw, false); hns3_promisc_uninit(hw); hns3_fdir_filter_uninit(hns); + (void)hns3_firmware_compat_config(hw, false); hns3_uninit_umv_space(hw); hns3_tqp_stats_uninit(hw); hns3_pf_disable_irq0(hw); From patchwork Wed Feb 3 07:46:12 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 87664 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 28786A0A0E; Wed, 3 Feb 2021 08:48:48 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 187D92404A3; Wed, 3 Feb 2021 08:47:09 +0100 (CET) Received: from szxga06-in.huawei.com (szxga06-in.huawei.com [45.249.212.32]) by mails.dpdk.org (Postfix) with ESMTP id 4F5E5240455 for ; Wed, 3 Feb 2021 08:46:57 +0100 (CET) Received: from DGGEMS404-HUB.china.huawei.com (unknown [172.30.72.59]) by szxga06-in.huawei.com (SkyGuard) with ESMTP id 4DVtym0ZwhzjHbP; Wed, 3 Feb 2021 15:45:52 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS404-HUB.china.huawei.com (10.3.19.204) with Microsoft SMTP Server id 14.3.498.0; Wed, 3 Feb 2021 15:46:49 +0800 From: Lijun Ou To: CC: , Date: Wed, 3 Feb 2021 15:46:12 +0800 Message-ID: <1612338382-3253-8-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1612338382-3253-1-git-send-email-oulijun@huawei.com> References: <1612338382-3253-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH 07/17] net/hns3: encapsulate a port shaping interface X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Huisong Li When rate of port changes, the rate limit of the port needs to be updated. So it is necessary to encapsulate an interface that configures the rate limit based on the rate. Signed-off-by: Huisong Li Signed-off-by: Lijun Ou --- drivers/net/hns3/hns3_dcb.c | 22 +++++++++++++++++----- drivers/net/hns3/hns3_dcb.h | 2 +- drivers/net/hns3/hns3_ethdev.c | 10 +++------- 3 files changed, 21 insertions(+), 13 deletions(-) diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c index 5aa374c..4fe5956 100644 --- a/drivers/net/hns3/hns3_dcb.c +++ b/drivers/net/hns3/hns3_dcb.c @@ -330,8 +330,8 @@ hns3_dcb_get_shapping_para(uint8_t ir_b, uint8_t ir_u, uint8_t ir_s, return shapping_para; } -int -hns3_dcb_port_shaper_cfg(struct hns3_hw *hw) +static int +hns3_dcb_port_shaper_cfg(struct hns3_hw *hw, uint32_t speed) { struct hns3_port_shapping_cmd *shap_cfg_cmd; struct hns3_shaper_parameter shaper_parameter; @@ -340,7 +340,7 @@ hns3_dcb_port_shaper_cfg(struct hns3_hw *hw) struct hns3_cmd_desc desc; int ret; - ret = hns3_shaper_para_calc(hw, hw->mac.link_speed, + ret = hns3_shaper_para_calc(hw, speed, HNS3_SHAPER_LVL_PORT, &shaper_parameter); if (ret) { hns3_err(hw, "calculate shaper parameter failed: %d", ret); @@ -366,12 +366,24 @@ hns3_dcb_port_shaper_cfg(struct hns3_hw *hw) * depends on the firmware version. But driver still needs to * calculate it and configure to firmware for better compatibility. */ - shap_cfg_cmd->port_rate = rte_cpu_to_le_32(hw->mac.link_speed); + shap_cfg_cmd->port_rate = rte_cpu_to_le_32(speed); hns3_set_bit(shap_cfg_cmd->flag, HNS3_TM_RATE_VLD_B, 1); return hns3_cmd_send(hw, &desc, 1); } +int +hns3_port_shaper_update(struct hns3_hw *hw, uint32_t speed) +{ + int ret; + + ret = hns3_dcb_port_shaper_cfg(hw, speed); + if (ret) + hns3_err(hw, "configure port shappering failed: ret = %d", ret); + + return ret; +} + static int hns3_dcb_pg_shapping_cfg(struct hns3_hw *hw, enum hns3_shap_bucket bucket, uint8_t pg_id, uint32_t shapping_para, uint32_t rate) @@ -961,7 +973,7 @@ hns3_dcb_shaper_cfg(struct hns3_hw *hw) { int ret; - ret = hns3_dcb_port_shaper_cfg(hw); + ret = hns3_dcb_port_shaper_cfg(hw, hw->mac.link_speed); if (ret) { hns3_err(hw, "config port shaper failed: %d", ret); return ret; diff --git a/drivers/net/hns3/hns3_dcb.h b/drivers/net/hns3/hns3_dcb.h index 8248434..0d25d3b 100644 --- a/drivers/net/hns3/hns3_dcb.h +++ b/drivers/net/hns3/hns3_dcb.h @@ -208,7 +208,7 @@ int hns3_queue_to_tc_mapping(struct hns3_hw *hw, uint16_t nb_rx_q, uint16_t nb_tx_q); int hns3_dcb_cfg_update(struct hns3_adapter *hns); -int hns3_dcb_port_shaper_cfg(struct hns3_hw *hw); +int hns3_port_shaper_update(struct hns3_hw *hw, uint32_t speed); int hns3_pg_shaper_rate_cfg(struct hns3_hw *hw, uint8_t pg_id, uint32_t rate); int hns3_pri_shaper_rate_cfg(struct hns3_hw *hw, uint8_t tc_no, uint32_t rate); uint8_t hns3_txq_mapped_tc_get(struct hns3_hw *hw, uint16_t txq_no); diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index 3706e05..cf7d0aa 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -4363,7 +4363,6 @@ static int hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) { struct hns3_mac *mac = &hw->mac; - uint32_t cur_speed = mac->link_speed; int ret; duplex = hns3_check_speed_dup(duplex, speed); @@ -4374,14 +4373,11 @@ hns3_cfg_mac_speed_dup(struct hns3_hw *hw, uint32_t speed, uint8_t duplex) if (ret) return ret; - mac->link_speed = speed; - ret = hns3_dcb_port_shaper_cfg(hw); - if (ret) { - hns3_err(hw, "failed to configure port shaper, ret = %d.", ret); - mac->link_speed = cur_speed; + ret = hns3_port_shaper_update(hw, speed); + if (ret) return ret; - } + mac->link_speed = speed; mac->link_duplex = duplex; return 0; From patchwork Wed Feb 3 07:46:13 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 87665 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 78A5DA0A0E; Wed, 3 Feb 2021 08:48:57 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 4A6AB2404A9; Wed, 3 Feb 2021 08:47:10 +0100 (CET) Received: from szxga06-in.huawei.com (szxga06-in.huawei.com [45.249.212.32]) by mails.dpdk.org (Postfix) with ESMTP id 6140E24045C for ; Wed, 3 Feb 2021 08:46:57 +0100 (CET) Received: from DGGEMS404-HUB.china.huawei.com (unknown [172.30.72.59]) by szxga06-in.huawei.com (SkyGuard) with ESMTP id 4DVtym0M13zjHbN; Wed, 3 Feb 2021 15:45:52 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS404-HUB.china.huawei.com (10.3.19.204) with Microsoft SMTP Server id 14.3.498.0; Wed, 3 Feb 2021 15:46:50 +0800 From: Lijun Ou To: CC: , Date: Wed, 3 Feb 2021 15:46:13 +0800 Message-ID: <1612338382-3253-9-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1612338382-3253-1-git-send-email-oulijun@huawei.com> References: <1612338382-3253-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH 08/17] net/hns3: support PF on electrical net device X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Huisong Li The normal operation of electrical interface devices depends on the initialization and configuration of the PHY chip. The task of driving the PHY chip is implemented in some firmware versions. If firmware supports the phy driver, it will report a capability flag to driver in probing process. The driver determines whether to support electrical device based on the capability bit. If supported, the driver set a flag indicating that the firmware takes over the PHY, and then the firmware intializes the PHY. This patch supports the query of link status and link info, and existing basic features for electrical device. Signed-off-by: Huisong Li Signed-off-by: Lijun Ou --- drivers/net/hns3/hns3_cmd.h | 37 +++++++++++++ drivers/net/hns3/hns3_ethdev.c | 115 ++++++++++++++++++++++++++++++++++++++--- drivers/net/hns3/hns3_ethdev.h | 5 ++ 3 files changed, 151 insertions(+), 6 deletions(-) diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h index 5ebeff0..e5ca74e 100644 --- a/drivers/net/hns3/hns3_cmd.h +++ b/drivers/net/hns3/hns3_cmd.h @@ -221,6 +221,8 @@ enum hns3_opcode_type { /* Firmware stats command */ HNS3_OPC_FIRMWARE_COMPAT_CFG = 0x701A, + /* Firmware control phy command */ + HNS3_OPC_PHY_PARAM_CFG = 0x7025, /* SFP command */ HNS3_OPC_GET_SFP_EEPROM = 0x7100, @@ -653,11 +655,46 @@ enum hns3_promisc_type { #define HNS3_LINK_EVENT_REPORT_EN_B 0 #define HNS3_NCSI_ERROR_REPORT_EN_B 1 +#define HNS3_FIRMWARE_PHY_DRIVER_EN_B 2 struct hns3_firmware_compat_cmd { uint32_t compat; uint8_t rsv[20]; }; +/* Bitmap flags in supported, advertising and lp_advertising */ +#define HNS3_PHY_LINK_SPEED_10M_HD_BIT BIT(0) +#define HNS3_PHY_LINK_SPEED_10M_BIT BIT(1) +#define HNS3_PHY_LINK_SPEED_100M_HD_BIT BIT(2) +#define HNS3_PHY_LINK_SPEED_100M_BIT BIT(3) +#define HNS3_PHY_LINK_MODE_AUTONEG_BIT BIT(6) +#define HNS3_PHY_LINK_MODE_PAUSE_BIT BIT(13) +#define HNS3_PHY_LINK_MODE_ASYM_PAUSE_BIT BIT(14) + +#define HNS3_PHY_PARAM_CFG_BD_NUM 2 +struct hns3_phy_params_bd0_cmd { + uint32_t speed; +#define HNS3_PHY_DUPLEX_CFG_B 0 + uint8_t duplex; +#define HNS3_PHY_AUTONEG_CFG_B 0 + uint8_t autoneg; + uint8_t eth_tp_mdix; + uint8_t eth_tp_mdix_ctrl; + uint8_t port; + uint8_t transceiver; + uint8_t phy_address; + uint8_t rsv; + uint32_t supported; + uint32_t advertising; + uint32_t lp_advertising; +}; + +struct hns3_phy_params_bd1_cmd { + uint8_t master_slave_cfg; + uint8_t master_slave_state; + uint8_t rsv1[2]; + uint32_t rsv2[5]; +}; + #define HNS3_MAC_TX_EN_B 6 #define HNS3_MAC_RX_EN_B 7 #define HNS3_MAC_PAD_TX_B 11 diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index cf7d0aa..a92399f 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -3069,6 +3069,37 @@ hns3_get_capability(struct hns3_hw *hw) } static int +hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type) +{ + int ret; + + switch (media_type) { + case HNS3_MEDIA_TYPE_COPPER: + if (!hns3_dev_copper_supported(hw)) { + PMD_INIT_LOG(ERR, + "Media type is copper, not supported."); + ret = -EOPNOTSUPP; + } else { + ret = 0; + } + break; + case HNS3_MEDIA_TYPE_FIBER: + ret = 0; + break; + case HNS3_MEDIA_TYPE_BACKPLANE: + PMD_INIT_LOG(ERR, "Media type is Backplane, not supported."); + ret = -EOPNOTSUPP; + break; + default: + PMD_INIT_LOG(ERR, "Unknown media type = %u!", media_type); + ret = -EINVAL; + break; + } + + return ret; +} + +static int hns3_get_board_configuration(struct hns3_hw *hw) { struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); @@ -3082,11 +3113,9 @@ hns3_get_board_configuration(struct hns3_hw *hw) return ret; } - if (cfg.media_type == HNS3_MEDIA_TYPE_COPPER && - !hns3_dev_copper_supported(hw)) { - PMD_INIT_LOG(ERR, "media type is copper, not supported."); - return -EOPNOTSUPP; - } + ret = hns3_check_media_type(hw, cfg.media_type); + if (ret) + return ret; hw->mac.media_type = cfg.media_type; hw->rss_size_max = cfg.rss_size_max; @@ -3931,6 +3960,8 @@ hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init) if (is_init) { hns3_set_bit(compat, HNS3_LINK_EVENT_REPORT_EN_B, 1); hns3_set_bit(compat, HNS3_NCSI_ERROR_REPORT_EN_B, 0); + if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) + hns3_set_bit(compat, HNS3_FIRMWARE_PHY_DRIVER_EN_B, 1); } req->compat = rte_cpu_to_le_32(compat); @@ -4408,6 +4439,78 @@ hns3_update_fiber_link_info(struct hns3_hw *hw) return hns3_cfg_mac_speed_dup(hw, speed, ETH_LINK_FULL_DUPLEX); } +static void +hns3_parse_phy_params(struct hns3_cmd_desc *desc, struct hns3_mac *mac) +{ + struct hns3_phy_params_bd0_cmd *req; + + req = (struct hns3_phy_params_bd0_cmd *)desc[0].data; + mac->link_speed = rte_le_to_cpu_32(req->speed); + mac->link_duplex = hns3_get_bit(req->duplex, + HNS3_PHY_DUPLEX_CFG_B); + mac->link_autoneg = hns3_get_bit(req->autoneg, + HNS3_PHY_AUTONEG_CFG_B); + mac->supported_capa = rte_le_to_cpu_32(req->supported); + mac->advertising = rte_le_to_cpu_32(req->advertising); + mac->lp_advertising = rte_le_to_cpu_32(req->lp_advertising); + mac->support_autoneg = !!(mac->supported_capa & + HNS3_PHY_LINK_MODE_AUTONEG_BIT); +} + +static int +hns3_get_phy_params(struct hns3_hw *hw, struct hns3_mac *mac) +{ + struct hns3_cmd_desc desc[HNS3_PHY_PARAM_CFG_BD_NUM]; + uint16_t i; + int ret; + + for (i = 0; i < HNS3_PHY_PARAM_CFG_BD_NUM - 1; i++) { + hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, + true); + desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); + } + hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_PHY_PARAM_CFG, true); + + ret = hns3_cmd_send(hw, desc, HNS3_PHY_PARAM_CFG_BD_NUM); + if (ret) { + hns3_err(hw, "get phy parameters failed, ret = %d.", ret); + return ret; + } + + hns3_parse_phy_params(desc, mac); + + return 0; +} + +static int +hns3_update_phy_link_info(struct hns3_hw *hw) +{ + struct hns3_mac *mac = &hw->mac; + struct hns3_mac mac_info; + int ret; + + memset(&mac_info, 0, sizeof(struct hns3_mac)); + ret = hns3_get_phy_params(hw, &mac_info); + if (ret) + return ret; + + if (mac_info.link_speed != mac->link_speed) { + ret = hns3_port_shaper_update(hw, mac_info.link_speed); + if (ret) + return ret; + } + + mac->link_speed = mac_info.link_speed; + mac->link_duplex = mac_info.link_duplex; + mac->link_autoneg = mac_info.link_autoneg; + mac->supported_capa = mac_info.supported_capa; + mac->advertising = mac_info.advertising; + mac->lp_advertising = mac_info.lp_advertising; + mac->support_autoneg = mac_info.support_autoneg; + + return 0; +} + static int hns3_update_link_info(struct rte_eth_dev *eth_dev) { @@ -4416,7 +4519,7 @@ hns3_update_link_info(struct rte_eth_dev *eth_dev) int ret = 0; if (hw->mac.media_type == HNS3_MEDIA_TYPE_COPPER) - return 0; + ret = hns3_update_phy_link_info(hw); else if (hw->mac.media_type == HNS3_MEDIA_TYPE_FIBER) ret = hns3_update_fiber_link_info(hw); diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h index 520af20..26cc122 100644 --- a/drivers/net/hns3/hns3_ethdev.h +++ b/drivers/net/hns3/hns3_ethdev.h @@ -180,6 +180,11 @@ struct hns3_mac { uint8_t link_autoneg : 1; /* ETH_LINK_[AUTONEG/FIXED] */ uint8_t link_status : 1; /* ETH_LINK_[DOWN/UP] */ uint32_t link_speed; /* ETH_SPEED_NUM_ */ + uint32_t supported_capa; /* supported capability for current media */ + uint32_t advertising; /* advertised capability in the local part */ + /* advertised capability in the link partner */ + uint32_t lp_advertising; + uint8_t support_autoneg; }; struct hns3_fake_queue_data { From patchwork Wed Feb 3 07:46:14 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 87663 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 1B8F9A0A0E; Wed, 3 Feb 2021 08:48:39 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id D933924049C; Wed, 3 Feb 2021 08:47:07 +0100 (CET) Received: from szxga06-in.huawei.com (szxga06-in.huawei.com [45.249.212.32]) by mails.dpdk.org (Postfix) with ESMTP id 45E76240454 for ; Wed, 3 Feb 2021 08:46:57 +0100 (CET) Received: from DGGEMS404-HUB.china.huawei.com (unknown [172.30.72.59]) by szxga06-in.huawei.com (SkyGuard) with ESMTP id 4DVtym0mbdzjHbQ; Wed, 3 Feb 2021 15:45:52 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS404-HUB.china.huawei.com (10.3.19.204) with Microsoft SMTP Server id 14.3.498.0; Wed, 3 Feb 2021 15:46:50 +0800 From: Lijun Ou To: CC: , Date: Wed, 3 Feb 2021 15:46:14 +0800 Message-ID: <1612338382-3253-10-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1612338382-3253-1-git-send-email-oulijun@huawei.com> References: <1612338382-3253-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH 09/17] net/hns3: fix RSS indirection table size X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" the driver should not use the fixed value as the validity check of RSS indirection table size with HW supported. As a result, it will cause misjudgment when the RSS reta size with HW supported have changed. Fixes: c37ca66f2b27 ("net/hns3: support RSS") Cc: stable@dpdk.org Signed-off-by: Lijun Ou --- drivers/net/hns3/hns3_cmd.c | 11 +++++++++++ drivers/net/hns3/hns3_cmd.h | 7 ++++++- drivers/net/hns3/hns3_dcb.c | 2 +- drivers/net/hns3/hns3_ethdev.c | 18 ++++++++++++++++-- drivers/net/hns3/hns3_ethdev_vf.c | 18 ++++++++++++++++-- drivers/net/hns3/hns3_flow.c | 6 +++--- drivers/net/hns3/hns3_rss.c | 28 ++++++++++++++-------------- drivers/net/hns3/hns3_rss.h | 5 ++--- 8 files changed, 69 insertions(+), 26 deletions(-) diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c index 9ed8161..b750022 100644 --- a/drivers/net/hns3/hns3_cmd.c +++ b/drivers/net/hns3/hns3_cmd.c @@ -430,6 +430,16 @@ static void hns3_parse_capability(struct hns3_hw *hw, hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_STASH_B, 1); } +static uint32_t +hns3_build_api_caps(void) +{ + uint32_t api_caps = 0; + + hns3_set_bit(api_caps, HNS3_API_CAP_FLEX_RSS_TBL_B, 1); + + return rte_cpu_to_le_32(api_caps); +} + static enum hns3_cmd_status hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw) { @@ -439,6 +449,7 @@ hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw) hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FW_VER, 1); resp = (struct hns3_query_version_cmd *)desc.data; + resp->api_caps = hns3_build_api_caps(); /* Initialize the cmd function */ ret = hns3_cmd_send(hw, &desc, 1); diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h index e5ca74e..c6c4093 100644 --- a/drivers/net/hns3/hns3_cmd.h +++ b/drivers/net/hns3/hns3_cmd.h @@ -312,11 +312,16 @@ enum HNS3_CAPS_BITS { HNS3_CAPS_HW_PAD_B, HNS3_CAPS_STASH_B, }; + +enum HNS3_API_CAP_BITS { + HNS3_API_CAP_FLEX_RSS_TBL_B, +}; + #define HNS3_QUERY_CAP_LENGTH 3 struct hns3_query_version_cmd { uint32_t firmware; uint32_t hardware; - uint32_t rsv; + uint32_t api_caps; uint32_t caps[HNS3_QUERY_CAP_LENGTH]; /* capabilities of device */ }; diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c index 4fe5956..ebfc240 100644 --- a/drivers/net/hns3/hns3_dcb.c +++ b/drivers/net/hns3/hns3_dcb.c @@ -656,7 +656,7 @@ hns3_set_rss_size(struct hns3_hw *hw, uint16_t nb_rx_q) * stage of the reset process. */ if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { - for (i = 0; i < HNS3_RSS_IND_TBL_SIZE; i++) + for (i = 0; i < hw->rss_ind_tbl_size; i++) rss_cfg->rss_indirection_tbl[i] = i % hw->alloc_rss_size; } diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index a92399f..9d2e4d2 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -2593,7 +2593,7 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) info->vmdq_queue_num = 0; - info->reta_size = HNS3_RSS_IND_TBL_SIZE; + info->reta_size = hw->rss_ind_tbl_size; info->hash_key_size = HNS3_RSS_KEY_SIZE; info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT; @@ -2984,6 +2984,20 @@ hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) } static int +hns3_check_dev_specifications(struct hns3_hw *hw) +{ + if (hw->rss_ind_tbl_size == 0 || + hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) { + hns3_err(hw, "the size of hash lookup table configured (%u)" + " exceeds the maximum(%u)", hw->rss_ind_tbl_size, + HNS3_RSS_IND_TBL_SIZE_MAX); + return -EINVAL; + } + + return 0; +} + +static int hns3_query_dev_specifications(struct hns3_hw *hw) { struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM]; @@ -3003,7 +3017,7 @@ hns3_query_dev_specifications(struct hns3_hw *hw) hns3_parse_dev_specifications(hw, desc); - return 0; + return hns3_check_dev_specifications(hw); } static int diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c index 11cab37..b3dd40a 100644 --- a/drivers/net/hns3/hns3_ethdev_vf.c +++ b/drivers/net/hns3/hns3_ethdev_vf.c @@ -1016,7 +1016,7 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) info->vmdq_queue_num = 0; - info->reta_size = HNS3_RSS_IND_TBL_SIZE; + info->reta_size = hw->rss_ind_tbl_size; info->hash_key_size = HNS3_RSS_KEY_SIZE; info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT; info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC; @@ -1149,6 +1149,20 @@ hns3vf_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) } static int +hns3vf_check_dev_specifications(struct hns3_hw *hw) +{ + if (hw->rss_ind_tbl_size == 0 || + hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) { + hns3_warn(hw, "the size of hash lookup table configured (%u)" + " exceeds the maximum(%u)", hw->rss_ind_tbl_size, + HNS3_RSS_IND_TBL_SIZE_MAX); + return -EINVAL; + } + + return 0; +} + +static int hns3vf_query_dev_specifications(struct hns3_hw *hw) { struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM]; @@ -1168,7 +1182,7 @@ hns3vf_query_dev_specifications(struct hns3_hw *hw) hns3vf_parse_dev_specifications(hw, desc); - return 0; + return hns3vf_check_dev_specifications(hw); } static int diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c index 3e387ac..a601124 100644 --- a/drivers/net/hns3/hns3_flow.c +++ b/drivers/net/hns3/hns3_flow.c @@ -1489,14 +1489,14 @@ hns3_update_indir_table(struct rte_eth_dev *dev, { struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; - uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE]; + uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE_MAX]; uint16_t j; uint32_t i; /* Fill in redirection table */ memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl, sizeof(hw->rss_info.rss_indirection_tbl)); - for (i = 0, j = 0; i < HNS3_RSS_IND_TBL_SIZE; i++, j++) { + for (i = 0, j = 0; i < hw->rss_ind_tbl_size; i++, j++) { j %= num; if (conf->queue[j] >= hw->alloc_rss_size) { hns3_err(hw, "queue id(%u) set to redirection table " @@ -1507,7 +1507,7 @@ hns3_update_indir_table(struct rte_eth_dev *dev, indir_tbl[i] = conf->queue[j]; } - return hns3_set_rss_indir_table(hw, indir_tbl, HNS3_RSS_IND_TBL_SIZE); + return hns3_set_rss_indir_table(hw, indir_tbl, hw->rss_ind_tbl_size); } static int diff --git a/drivers/net/hns3/hns3_rss.c b/drivers/net/hns3/hns3_rss.c index 7d1a297..858e31a 100644 --- a/drivers/net/hns3/hns3_rss.c +++ b/drivers/net/hns3/hns3_rss.c @@ -312,7 +312,7 @@ hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, uint16_t size) /* Update redirection table of hw */ memcpy(hw->rss_info.rss_indirection_tbl, indir, - sizeof(hw->rss_info.rss_indirection_tbl)); + sizeof(uint16_t) * size); return 0; } @@ -324,13 +324,13 @@ hns3_rss_reset_indir_table(struct hns3_hw *hw) int ret; lut = rte_zmalloc("hns3_rss_lut", - HNS3_RSS_IND_TBL_SIZE * sizeof(uint16_t), 0); + hw->rss_ind_tbl_size * sizeof(uint16_t), 0); if (lut == NULL) { hns3_err(hw, "No hns3_rss_lut memory can be allocated"); return -ENOMEM; } - ret = hns3_set_rss_indir_table(hw, lut, HNS3_RSS_IND_TBL_SIZE); + ret = hns3_set_rss_indir_table(hw, lut, hw->rss_ind_tbl_size); if (ret) hns3_err(hw, "RSS uninit indir table failed: %d", ret); rte_free(lut); @@ -428,7 +428,7 @@ hns3_dev_rss_hash_update(struct rte_eth_dev *dev, } else if (rss_hf && rss_cfg->conf.types == 0) { /* Enable RSS, restore indirection table by hw's config */ ret = hns3_set_rss_indir_table(hw, rss_cfg->rss_indirection_tbl, - HNS3_RSS_IND_TBL_SIZE); + hw->rss_ind_tbl_size); if (ret) goto conf_err; } @@ -505,15 +505,15 @@ hns3_dev_rss_reta_update(struct rte_eth_dev *dev, struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; struct hns3_rss_conf *rss_cfg = &hw->rss_info; - uint16_t i, indir_size = HNS3_RSS_IND_TBL_SIZE; /* Table size is 512 */ - uint16_t indirection_tbl[HNS3_RSS_IND_TBL_SIZE]; + uint16_t indirection_tbl[HNS3_RSS_IND_TBL_SIZE_MAX]; uint16_t idx, shift; + uint16_t i; int ret; - if (reta_size != indir_size || reta_size > ETH_RSS_RETA_SIZE_512) { + if (reta_size != hw->rss_ind_tbl_size) { hns3_err(hw, "The size of hash lookup table configured (%u)" "doesn't match the number hardware can supported" - "(%u)", reta_size, indir_size); + "(%u)", reta_size, hw->rss_ind_tbl_size); return -EINVAL; } rte_spinlock_lock(&hw->lock); @@ -536,7 +536,7 @@ hns3_dev_rss_reta_update(struct rte_eth_dev *dev, } ret = hns3_set_rss_indir_table(hw, indirection_tbl, - HNS3_RSS_IND_TBL_SIZE); + hw->rss_ind_tbl_size); rte_spinlock_unlock(&hw->lock); return ret; @@ -561,13 +561,13 @@ hns3_dev_rss_reta_query(struct rte_eth_dev *dev, struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; struct hns3_rss_conf *rss_cfg = &hw->rss_info; - uint16_t i, indir_size = HNS3_RSS_IND_TBL_SIZE; /* Table size is 512 */ uint16_t idx, shift; + uint16_t i; - if (reta_size != indir_size || reta_size > ETH_RSS_RETA_SIZE_512) { + if (reta_size != hw->rss_ind_tbl_size) { hns3_err(hw, "The size of hash lookup table configured (%u)" " doesn't match the number hardware can supported" - "(%u)", reta_size, indir_size); + "(%u)", reta_size, hw->rss_ind_tbl_size); return -EINVAL; } rte_spinlock_lock(&hw->lock); @@ -662,7 +662,7 @@ hns3_rss_set_default_args(struct hns3_hw *hw) memcpy(rss_cfg->key, hns3_hash_key, HNS3_RSS_KEY_SIZE); /* Initialize RSS indirection table */ - for (i = 0; i < HNS3_RSS_IND_TBL_SIZE; i++) + for (i = 0; i < hw->rss_ind_tbl_size; i++) rss_cfg->rss_indirection_tbl[i] = i % queue_num; } @@ -711,7 +711,7 @@ hns3_config_rss(struct hns3_adapter *hns) */ if (((uint32_t)mq_mode & ETH_MQ_RX_RSS_FLAG)) { ret = hns3_set_rss_indir_table(hw, rss_cfg->rss_indirection_tbl, - HNS3_RSS_IND_TBL_SIZE); + hw->rss_ind_tbl_size); if (ret) goto rss_tuple_uninit; } diff --git a/drivers/net/hns3/hns3_rss.h b/drivers/net/hns3/hns3_rss.h index 05d5c26..94668ed 100644 --- a/drivers/net/hns3/hns3_rss.h +++ b/drivers/net/hns3/hns3_rss.h @@ -24,9 +24,8 @@ ETH_RSS_L4_DST_ONLY) #define HNS3_RSS_IND_TBL_SIZE 512 /* The size of hash lookup table */ +#define HNS3_RSS_IND_TBL_SIZE_MAX 2048 #define HNS3_RSS_KEY_SIZE 40 -#define HNS3_RSS_CFG_TBL_NUM \ - (HNS3_RSS_IND_TBL_SIZE / HNS3_RSS_CFG_TBL_SIZE) #define HNS3_RSS_SET_BITMAP_MSK 0xffff #define HNS3_RSS_HASH_ALGO_TOEPLITZ 0 @@ -45,7 +44,7 @@ struct hns3_rss_conf { uint8_t hash_algo; /* hash function type definited by hardware */ uint8_t key[HNS3_RSS_KEY_SIZE]; /* Hash key */ struct hns3_rss_tuple_cfg rss_tuple_sets; - uint16_t rss_indirection_tbl[HNS3_RSS_IND_TBL_SIZE]; /* Shadow table */ + uint16_t rss_indirection_tbl[HNS3_RSS_IND_TBL_SIZE_MAX]; uint16_t queue[HNS3_RSS_QUEUES_BUFFER_NUM]; /* Queues indices to use */ bool valid; /* check if RSS rule is valid */ /* From patchwork Wed Feb 3 07:46:15 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 87657 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 67B25A0A0E; Wed, 3 Feb 2021 08:47:45 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id D4DA5240476; Wed, 3 Feb 2021 08:47:00 +0100 (CET) Received: from szxga06-in.huawei.com (szxga06-in.huawei.com [45.249.212.32]) by mails.dpdk.org (Postfix) with ESMTP id 1712B240452 for ; Wed, 3 Feb 2021 08:46:56 +0100 (CET) Received: from DGGEMS404-HUB.china.huawei.com (unknown [172.30.72.59]) by szxga06-in.huawei.com (SkyGuard) with ESMTP id 4DVtyl6fDHzjGbS; Wed, 3 Feb 2021 15:45:51 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS404-HUB.china.huawei.com (10.3.19.204) with Microsoft SMTP Server id 14.3.498.0; Wed, 3 Feb 2021 15:46:50 +0800 From: Lijun Ou To: CC: , Date: Wed, 3 Feb 2021 15:46:15 +0800 Message-ID: <1612338382-3253-11-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1612338382-3253-1-git-send-email-oulijun@huawei.com> References: <1612338382-3253-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH 10/17] net/hns3: constraint TM peak rate X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Chengwen Feng User could config Port or TC's peak rate by TM ops, but hardware does not support peak rate which lower than 1Mbps. So we constraint tm peak rate must be at least 1Mbps. Fixes: c09c7847d892 ("net/hns3: support traffic management") Signed-off-by: Chengwen Feng Signed-off-by: Lijun Ou --- drivers/net/hns3/hns3_tm.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c index d1639d4..bcae57a 100644 --- a/drivers/net/hns3/hns3_tm.c +++ b/drivers/net/hns3/hns3_tm.c @@ -200,6 +200,12 @@ hns3_tm_shaper_profile_param_check(struct rte_eth_dev *dev, return -EINVAL; } + if (profile->peak.rate < hns3_tm_rate_convert_firmware2tm(1)) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE; + error->message = "peak rate must be at least 1Mbps"; + return -EINVAL; + } + if (profile->peak.size) { error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE; error->message = "peak bucket size not supported"; From patchwork Wed Feb 3 07:46:16 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 87661 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 7762EA0A0E; Wed, 3 Feb 2021 08:48:21 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 8CC09240492; Wed, 3 Feb 2021 08:47:05 +0100 (CET) Received: from szxga06-in.huawei.com (szxga06-in.huawei.com [45.249.212.32]) by mails.dpdk.org (Postfix) with ESMTP id 6462F24045D for ; Wed, 3 Feb 2021 08:46:57 +0100 (CET) Received: from DGGEMS404-HUB.china.huawei.com (unknown [172.30.72.59]) by szxga06-in.huawei.com (SkyGuard) with ESMTP id 4DVtym1HTDzjHbV; Wed, 3 Feb 2021 15:45:52 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS404-HUB.china.huawei.com (10.3.19.204) with Microsoft SMTP Server id 14.3.498.0; Wed, 3 Feb 2021 15:46:51 +0800 From: Lijun Ou To: CC: , Date: Wed, 3 Feb 2021 15:46:16 +0800 Message-ID: <1612338382-3253-12-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1612338382-3253-1-git-send-email-oulijun@huawei.com> References: <1612338382-3253-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH 11/17] net/hns3: remove MPLS type from supported flow items X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Chengwen Feng The Kunpeng920 and Kunpeng930 don't support parse MPLS packet, so remove the type from supported flow items. Fixes: fcba820d9b9e ("net/hns3: support flow director") Cc: stable@dpdk.org Signed-off-by: Chengwen Feng Signed-off-by: Lijun Ou --- drivers/net/hns3/hns3_flow.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c index a601124..c484114 100644 --- a/drivers/net/hns3/hns3_flow.c +++ b/drivers/net/hns3/hns3_flow.c @@ -44,8 +44,7 @@ static enum rte_flow_item_type first_items[] = { RTE_FLOW_ITEM_TYPE_NVGRE, RTE_FLOW_ITEM_TYPE_VXLAN, RTE_FLOW_ITEM_TYPE_GENEVE, - RTE_FLOW_ITEM_TYPE_VXLAN_GPE, - RTE_FLOW_ITEM_TYPE_MPLS + RTE_FLOW_ITEM_TYPE_VXLAN_GPE }; static enum rte_flow_item_type L2_next_items[] = { @@ -65,8 +64,7 @@ static enum rte_flow_item_type L3_next_items[] = { static enum rte_flow_item_type L4_next_items[] = { RTE_FLOW_ITEM_TYPE_VXLAN, RTE_FLOW_ITEM_TYPE_GENEVE, - RTE_FLOW_ITEM_TYPE_VXLAN_GPE, - RTE_FLOW_ITEM_TYPE_MPLS + RTE_FLOW_ITEM_TYPE_VXLAN_GPE }; static enum rte_flow_item_type tunnel_next_items[] = { @@ -1118,8 +1116,7 @@ is_tunnel_packet(enum rte_flow_item_type type) if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE || type == RTE_FLOW_ITEM_TYPE_VXLAN || type == RTE_FLOW_ITEM_TYPE_NVGRE || - type == RTE_FLOW_ITEM_TYPE_GENEVE || - type == RTE_FLOW_ITEM_TYPE_MPLS) + type == RTE_FLOW_ITEM_TYPE_GENEVE) return true; return false; } From patchwork Wed Feb 3 07:46:17 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 87658 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 76F0CA0A0E; Wed, 3 Feb 2021 08:47:54 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1BBB8240482; Wed, 3 Feb 2021 08:47:02 +0100 (CET) Received: from szxga06-in.huawei.com (szxga06-in.huawei.com [45.249.212.32]) by mails.dpdk.org (Postfix) with ESMTP id 3DE9A240454 for ; Wed, 3 Feb 2021 08:46:56 +0100 (CET) Received: from DGGEMS404-HUB.china.huawei.com (unknown [172.30.72.59]) by szxga06-in.huawei.com (SkyGuard) with ESMTP id 4DVtym0BDgzjHbL; Wed, 3 Feb 2021 15:45:52 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS404-HUB.china.huawei.com (10.3.19.204) with Microsoft SMTP Server id 14.3.498.0; Wed, 3 Feb 2021 15:46:51 +0800 From: Lijun Ou To: CC: , Date: Wed, 3 Feb 2021 15:46:17 +0800 Message-ID: <1612338382-3253-13-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1612338382-3253-1-git-send-email-oulijun@huawei.com> References: <1612338382-3253-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH 12/17] net/hns3: fix stats flip overflow X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Chengchang Tang Currently, statistics may overflow in some scenarios. For example, if HW statistics are reset by stats reset operation, but there are still a lot of residual packets exist in the HW queues and these packets are error packets, flip may occurred because the ipacket is obtained by substracting the number of software error packets from the number of HW received packets. This patch verifies the calculation and returns 0 when overflow may occur. Fixes: 8839c5e202f3 ("net/hns3: support device stats") Cc: stable@dpdk.org Signed-off-by: Chengchang Tang Signed-off-by: Lijun Ou --- drivers/net/hns3/hns3_stats.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/drivers/net/hns3/hns3_stats.c b/drivers/net/hns3/hns3_stats.c index e58ebea..58cce35 100644 --- a/drivers/net/hns3/hns3_stats.c +++ b/drivers/net/hns3/hns3_stats.c @@ -637,8 +637,14 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats) #endif rte_stats->oerrors = 0; - rte_stats->ipackets = stats->rcb_rx_ring_pktnum_rcd - - rte_stats->ierrors; + /* + * If HW statistics are reset by stats_reset, but a lot of residual + * packets exist in the hardware queue and these packets are error + * packets, flip overflow may occurred. So return 0 in this case. + */ + rte_stats->ipackets = + stats->rcb_rx_ring_pktnum_rcd > rte_stats->ierrors ? + stats->rcb_rx_ring_pktnum_rcd - rte_stats->ierrors : 0; rte_stats->opackets = stats->rcb_tx_ring_pktnum_rcd - rte_stats->oerrors; rte_stats->rx_nombuf = eth_dev->data->rx_mbuf_alloc_failed; @@ -889,8 +895,15 @@ hns3_rxq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, rxq_stats = &rxq->basic_stats; rxq_stats->errors = rxq->err_stats.l2_errors + rxq->err_stats.pkt_len_errors; - rxq_stats->packets = stats->rcb_rx_ring_pktnum[i] - - rxq_stats->errors; + /* + * If HW statistics are reset by stats_reset, but a lot of + * residual packets exist in the hardware queue and these + * packets are error packets, flip overflow may occurred. + * So return 0 in this case. + */ + rxq_stats->packets = + stats->rcb_rx_ring_pktnum[i] > rxq_stats->errors ? + stats->rcb_rx_ring_pktnum[i] - rxq_stats->errors : 0; for (j = 0; j < HNS3_NUM_RXQ_BASIC_STATS; j++) { val = (char *)rxq_stats + From patchwork Wed Feb 3 07:46:18 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 87660 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 09639A0A0E; Wed, 3 Feb 2021 08:48:12 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 6811124048D; Wed, 3 Feb 2021 08:47:04 +0100 (CET) Received: from szxga06-in.huawei.com (szxga06-in.huawei.com [45.249.212.32]) by mails.dpdk.org (Postfix) with ESMTP id 29163240453 for ; Wed, 3 Feb 2021 08:46:56 +0100 (CET) Received: from DGGEMS404-HUB.china.huawei.com (unknown [172.30.72.59]) by szxga06-in.huawei.com (SkyGuard) with ESMTP id 4DVtyl6pH2zjHbH; Wed, 3 Feb 2021 15:45:51 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS404-HUB.china.huawei.com (10.3.19.204) with Microsoft SMTP Server id 14.3.498.0; Wed, 3 Feb 2021 15:46:51 +0800 From: Lijun Ou To: CC: , Date: Wed, 3 Feb 2021 15:46:18 +0800 Message-ID: <1612338382-3253-14-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1612338382-3253-1-git-send-email-oulijun@huawei.com> References: <1612338382-3253-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH 13/17] net/hns3: replace all atomic type with C11 atomic builtins X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Chengchang Tang Replace all the atomic type with C11 atomic builtins in hns3 PMD. Signed-off-by: Chengchang Tang Signed-off-by: Lijun Ou --- drivers/net/hns3/hns3_cmd.c | 13 +++++++------ drivers/net/hns3/hns3_ethdev.c | 21 ++++++++++++--------- drivers/net/hns3/hns3_ethdev.h | 4 ++-- drivers/net/hns3/hns3_ethdev_vf.c | 19 +++++++++++-------- drivers/net/hns3/hns3_intr.c | 22 ++++++++++++++-------- drivers/net/hns3/hns3_mbx.c | 4 ++-- 6 files changed, 48 insertions(+), 35 deletions(-) diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c index b750022..3d6ffc0 100644 --- a/drivers/net/hns3/hns3_cmd.c +++ b/drivers/net/hns3/hns3_cmd.c @@ -202,7 +202,8 @@ hns3_cmd_csq_clean(struct hns3_hw *hw) hns3_err(hw, "wrong cmd head (%u, %u-%u)", head, csq->next_to_use, csq->next_to_clean); if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - rte_atomic16_set(&hw->reset.disable_cmd, 1); + __atomic_store_n(&hw->reset.disable_cmd, 1, + __ATOMIC_RELAXED); hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw)); } @@ -311,7 +312,7 @@ static int hns3_cmd_poll_reply(struct hns3_hw *hw) if (hns3_cmd_csq_done(hw)) return 0; - if (rte_atomic16_read(&hw->reset.disable_cmd)) { + if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) { hns3_err(hw, "Don't wait for reply because of disable_cmd"); return -EBUSY; @@ -358,7 +359,7 @@ hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num) int retval; uint32_t ntc; - if (rte_atomic16_read(&hw->reset.disable_cmd)) + if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) return -EBUSY; rte_spinlock_lock(&hw->cmq.csq.lock); @@ -535,7 +536,7 @@ hns3_cmd_init(struct hns3_hw *hw) ret = -EBUSY; goto err_cmd_init; } - rte_atomic16_clear(&hw->reset.disable_cmd); + __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED); ret = hns3_cmd_query_firmware_version_and_capability(hw); if (ret) { @@ -557,7 +558,7 @@ hns3_cmd_init(struct hns3_hw *hw) return 0; err_cmd_init: - rte_atomic16_set(&hw->reset.disable_cmd, 1); + __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); return ret; } @@ -583,7 +584,7 @@ hns3_cmd_uninit(struct hns3_hw *hw) { rte_spinlock_lock(&hw->cmq.csq.lock); rte_spinlock_lock(&hw->cmq.crq.lock); - rte_atomic16_set(&hw->reset.disable_cmd, 1); + __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); hns3_cmd_clear_regs(hw); rte_spinlock_unlock(&hw->cmq.crq.lock); rte_spinlock_unlock(&hw->cmq.csq.lock); diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index 9d2e4d2..a3c1340 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -130,7 +130,7 @@ hns3_proc_imp_reset_event(struct hns3_adapter *hns, bool is_delay, { struct hns3_hw *hw = &hns->hw; - rte_atomic16_set(&hw->reset.disable_cmd, 1); + __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); *vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B); if (!is_delay) { @@ -150,7 +150,7 @@ hns3_proc_global_reset_event(struct hns3_adapter *hns, bool is_delay, { struct hns3_hw *hw = &hns->hw; - rte_atomic16_set(&hw->reset.disable_cmd, 1); + __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending); *vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B); if (!is_delay) { @@ -5176,7 +5176,7 @@ hns3_do_stop(struct hns3_adapter *hns) return ret; hw->mac.link_status = ETH_LINK_DOWN; - if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) { + if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) { hns3_configure_all_mac_addr(hns, true); ret = hns3_reset_all_tqps(hns); if (ret) { @@ -5719,7 +5719,7 @@ hns3_prepare_reset(struct hns3_adapter *hns) * any mailbox handling or command to firmware is only valid * after hns3_cmd_init is called. */ - rte_atomic16_set(&hw->reset.disable_cmd, 1); + __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); hw->reset.stats.request_cnt++; break; case HNS3_IMP_RESET: @@ -5779,7 +5779,7 @@ hns3_stop_service(struct hns3_adapter *hns) * from table space. Hence, for function reset software intervention is * required to delete the entries */ - if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) + if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) hns3_configure_all_mc_mac_addr(hns, true); rte_spinlock_unlock(&hw->lock); @@ -5901,8 +5901,10 @@ hns3_reset_service(void *param) * The interrupt may have been lost. It is necessary to handle * the interrupt to recover from the error. */ - if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) { - rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED); + if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == + SCHEDULE_DEFERRED) { + __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, + __ATOMIC_RELAXED); hns3_err(hw, "Handling interrupts in delayed tasks"); hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]); reset_level = hns3_get_reset_level(hns, &hw->reset.pending); @@ -5911,7 +5913,7 @@ hns3_reset_service(void *param) hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); } } - rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE); + __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED); /* * Check if there is any ongoing reset in the hardware. This status can @@ -6591,7 +6593,8 @@ hns3_dev_init(struct rte_eth_dev *eth_dev) hw->adapter_state = HNS3_NIC_INITIALIZED; - if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_PENDING) { + if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == + SCHEDULE_PENDING) { hns3_err(hw, "Reschedule reset service after dev_init"); hns3_schedule_reset(hns); } else { diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h index 26cc122..f1bbc69 100644 --- a/drivers/net/hns3/hns3_ethdev.h +++ b/drivers/net/hns3/hns3_ethdev.h @@ -357,11 +357,11 @@ enum hns3_schedule { struct hns3_reset_data { enum hns3_reset_stage stage; - rte_atomic16_t schedule; + uint16_t schedule; /* Reset flag, covering the entire reset process */ uint16_t resetting; /* Used to disable sending cmds during reset */ - rte_atomic16_t disable_cmd; + uint16_t disable_cmd; /* The reset level being processed */ enum hns3_reset_level level; /* Reset level set, each bit represents a reset level */ diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c index b3dd40a..44c94e7 100644 --- a/drivers/net/hns3/hns3_ethdev_vf.c +++ b/drivers/net/hns3/hns3_ethdev_vf.c @@ -1059,7 +1059,7 @@ hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING); hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg); hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending); - rte_atomic16_set(&hw->reset.disable_cmd, 1); + __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); val = hns3_read_dev(hw, HNS3_VF_RST_ING); hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT); val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B); @@ -1934,7 +1934,7 @@ hns3vf_do_stop(struct hns3_adapter *hns) hw->mac.link_status = ETH_LINK_DOWN; - if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) { + if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) { hns3vf_configure_mac_addr(hns, true); ret = hns3_reset_all_tqps(hns); if (ret) { @@ -2410,7 +2410,7 @@ hns3vf_prepare_reset(struct hns3_adapter *hns) ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL, 0, true, NULL, 0); } - rte_atomic16_set(&hw->reset.disable_cmd, 1); + __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); return ret; } @@ -2449,7 +2449,7 @@ hns3vf_stop_service(struct hns3_adapter *hns) * from table space. Hence, for function reset software intervention is * required to delete the entries. */ - if (rte_atomic16_read(&hw->reset.disable_cmd) == 0) + if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) hns3vf_configure_all_mc_mac_addr(hns, true); rte_spinlock_unlock(&hw->lock); @@ -2621,8 +2621,10 @@ hns3vf_reset_service(void *param) * The interrupt may have been lost. It is necessary to handle * the interrupt to recover from the error. */ - if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) { - rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED); + if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == + SCHEDULE_DEFERRED) { + __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, + __ATOMIC_RELAXED); hns3_err(hw, "Handling interrupts in delayed tasks"); hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]); reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending); @@ -2631,7 +2633,7 @@ hns3vf_reset_service(void *param) hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending); } } - rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE); + __atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED); /* * Hardware reset has been notified, we now have to poll & check if @@ -2855,7 +2857,8 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev) hw->adapter_state = HNS3_NIC_INITIALIZED; - if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_PENDING) { + if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == + SCHEDULE_PENDING) { hns3_err(hw, "Reschedule reset service after dev_init"); hns3_schedule_reset(hns); } else { diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c index 51f19b4..88ce4c6 100644 --- a/drivers/net/hns3/hns3_intr.c +++ b/drivers/net/hns3/hns3_intr.c @@ -1762,7 +1762,7 @@ hns3_reset_init(struct hns3_hw *hw) hw->reset.request = 0; hw->reset.pending = 0; hw->reset.resetting = 0; - rte_atomic16_init(&hw->reset.disable_cmd); + __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED); hw->reset.wait_data = rte_zmalloc("wait_data", sizeof(struct hns3_wait_data), 0); if (!hw->reset.wait_data) { @@ -1779,7 +1779,8 @@ hns3_schedule_reset(struct hns3_adapter *hns) /* Reschedule the reset process after successful initialization */ if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) { - rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_PENDING); + __atomic_store_n(&hw->reset.schedule, SCHEDULE_PENDING, + __ATOMIC_RELAXED); return; } @@ -1787,11 +1788,14 @@ hns3_schedule_reset(struct hns3_adapter *hns) return; /* Schedule restart alarm if it is not scheduled yet */ - if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_REQUESTED) + if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == + SCHEDULE_REQUESTED) return; - if (rte_atomic16_read(&hns->hw.reset.schedule) == SCHEDULE_DEFERRED) + if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == + SCHEDULE_DEFERRED) rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns); - rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED); + __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, + __ATOMIC_RELAXED); rte_eal_alarm_set(SWITCH_CONTEXT_US, hw->reset.ops->reset_service, hns); } @@ -1808,9 +1812,11 @@ hns3_schedule_delayed_reset(struct hns3_adapter *hns) return; } - if (rte_atomic16_read(&hns->hw.reset.schedule) != SCHEDULE_NONE) + if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) != + SCHEDULE_NONE) return; - rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_DEFERRED); + __atomic_store_n(&hw->reset.schedule, SCHEDULE_DEFERRED, + __ATOMIC_RELAXED); rte_eal_alarm_set(DEFERRED_SCHED_US, hw->reset.ops->reset_service, hns); } @@ -1983,7 +1989,7 @@ hns3_reset_err_handle(struct hns3_adapter *hns) * Regardless of whether the execution is successful or not, the * flow after execution must be continued. */ - if (rte_atomic16_read(&hw->reset.disable_cmd)) + if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) (void)hns3_cmd_init(hw); reset_fail: hw->reset.attempts = 0; diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c index 925cfca..61d1584 100644 --- a/drivers/net/hns3/hns3_mbx.c +++ b/drivers/net/hns3/hns3_mbx.c @@ -83,7 +83,7 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code0, uint16_t code1, end = now + HNS3_MAX_RETRY_MS; while ((hw->mbx_resp.head != hw->mbx_resp.tail + hw->mbx_resp.lost) && (now < end)) { - if (rte_atomic16_read(&hw->reset.disable_cmd)) { + if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) { hns3_err(hw, "Don't wait for mbx respone because of " "disable_cmd"); return -EBUSY; @@ -369,7 +369,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) int i; while (!hns3_cmd_crq_empty(hw)) { - if (rte_atomic16_read(&hw->reset.disable_cmd)) + if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) return; desc = &crq->desc[crq->next_to_use]; From patchwork Wed Feb 3 07:46:19 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 87667 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id EA83BA0A0E; Wed, 3 Feb 2021 08:49:16 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id B15FF2404B6; Wed, 3 Feb 2021 08:47:12 +0100 (CET) Received: from szxga06-in.huawei.com (szxga06-in.huawei.com [45.249.212.32]) by mails.dpdk.org (Postfix) with ESMTP id B4761240463 for ; Wed, 3 Feb 2021 08:46:57 +0100 (CET) Received: from DGGEMS404-HUB.china.huawei.com (unknown [172.30.72.59]) by szxga06-in.huawei.com (SkyGuard) with ESMTP id 4DVtym01YbzjHbJ; Wed, 3 Feb 2021 15:45:52 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS404-HUB.china.huawei.com (10.3.19.204) with Microsoft SMTP Server id 14.3.498.0; Wed, 3 Feb 2021 15:46:51 +0800 From: Lijun Ou To: CC: , Date: Wed, 3 Feb 2021 15:46:19 +0800 Message-ID: <1612338382-3253-15-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1612338382-3253-1-git-send-email-oulijun@huawei.com> References: <1612338382-3253-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH 14/17] net/hns3: fix FD rule residue in hardware when malloc fail X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Chengwen Feng After FD rule config success, driver will malloc fdir_fule to hold the rule info, if malloc fail the FD rule in hardware was not cleanup. Fixes: fcba820d9b9e ("net/hns3: support flow director") Cc: stable@dpdk.org Signed-off-by: Chengwen Feng Signed-off-by: Lijun Ou --- drivers/net/hns3/hns3_flow.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c index c484114..a016857 100644 --- a/drivers/net/hns3/hns3_flow.c +++ b/drivers/net/hns3/hns3_flow.c @@ -1806,17 +1806,18 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, flow->counter_id = fdir_rule.act_cnt.id; } + + fdir_rule_ptr = rte_zmalloc("hns3 fdir rule", + sizeof(struct hns3_fdir_rule_ele), + 0); + if (fdir_rule_ptr == NULL) { + hns3_err(hw, "failed to allocate fdir_rule memory."); + ret = -ENOMEM; + goto err_fdir; + } + ret = hns3_fdir_filter_program(hns, &fdir_rule, false); if (!ret) { - fdir_rule_ptr = rte_zmalloc("hns3 fdir rule", - sizeof(struct hns3_fdir_rule_ele), - 0); - if (fdir_rule_ptr == NULL) { - hns3_err(hw, "Failed to allocate fdir_rule memory"); - ret = -ENOMEM; - goto err_fdir; - } - memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule, sizeof(struct hns3_fdir_rule)); TAILQ_INSERT_TAIL(&process_list->fdir_list, @@ -1827,10 +1828,10 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, return flow; } + rte_free(fdir_rule_ptr); err_fdir: if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) hns3_counter_release(dev, fdir_rule.act_cnt.id); - err: rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Failed to create flow"); From patchwork Wed Feb 3 07:46:20 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 87669 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 29C2BA0A0E; Wed, 3 Feb 2021 08:49:36 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 0AE952404C2; Wed, 3 Feb 2021 08:47:15 +0100 (CET) Received: from szxga06-in.huawei.com (szxga06-in.huawei.com [45.249.212.32]) by mails.dpdk.org (Postfix) with ESMTP id E81FF240466 for ; Wed, 3 Feb 2021 08:46:58 +0100 (CET) Received: from DGGEMS404-HUB.china.huawei.com (unknown [172.30.72.59]) by szxga06-in.huawei.com (SkyGuard) with ESMTP id 4DVtyl6Sy1zjGW9; Wed, 3 Feb 2021 15:45:51 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS404-HUB.china.huawei.com (10.3.19.204) with Microsoft SMTP Server id 14.3.498.0; Wed, 3 Feb 2021 15:46:52 +0800 From: Lijun Ou To: CC: , Date: Wed, 3 Feb 2021 15:46:20 +0800 Message-ID: <1612338382-3253-16-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1612338382-3253-1-git-send-email-oulijun@huawei.com> References: <1612338382-3253-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH 15/17] net/hns3: fix cmdq cleared during firmware process X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Chengchang Tang There are two scenarios that command queue uninit performed concurrently with the firmware command: asynchronous command and timeout command. For asynchronous command, if a large number of functions send commands, these commands may need to be queued to wait for firmware processing. If a function is uninited suddenly, CMDQ clearing and firmware processing may be performed concurrently. For timeout command, if the command failed due to busy scheduling of firmware, this command will be processed in the next scheduling. And this may lead to concurrency. The preceding concurrency may lead to a firmware exceptions. This patch add a waiting time to ensure the firmware complete the processing of left over command when PMD uninit. Fixes: 737f30e1c3ab ("net/hns3: support command interface with firmware") Cc: stable@dpdk.org Signed-off-by: Chengchang Tang Signed-off-by: Lijun Ou --- drivers/net/hns3/hns3_cmd.c | 14 +++++++++++++- drivers/net/hns3/hns3_cmd.h | 1 + 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c index 3d6ffc0..32cd56b 100644 --- a/drivers/net/hns3/hns3_cmd.c +++ b/drivers/net/hns3/hns3_cmd.c @@ -582,9 +582,21 @@ hns3_cmd_destroy_queue(struct hns3_hw *hw) void hns3_cmd_uninit(struct hns3_hw *hw) { + __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); + + /* + * A delay is added to ensure that the register cleanup operations + * will not be performed concurrently with the firmware command and + * ensure that all the reserved commands are executed. + * Concurrency may occur in two scenarios: asynchronous command and + * timeout command. If the command fails to be executed due to busy + * scheduling, the command will be processed in the next scheduling + * of the firmware. + */ + rte_delay_ms(HNS3_CMDQ_CLEAR_WAIT_TIME); + rte_spinlock_lock(&hw->cmq.csq.lock); rte_spinlock_lock(&hw->cmq.crq.lock); - __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); hns3_cmd_clear_regs(hw); rte_spinlock_unlock(&hw->cmq.crq.lock); rte_spinlock_unlock(&hw->cmq.csq.lock); diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h index c6c4093..7f567cb 100644 --- a/drivers/net/hns3/hns3_cmd.h +++ b/drivers/net/hns3/hns3_cmd.h @@ -8,6 +8,7 @@ #include #define HNS3_CMDQ_TX_TIMEOUT 30000 +#define HNS3_CMDQ_CLEAR_WAIT_TIME 200 #define HNS3_CMDQ_RX_INVLD_B 0 #define HNS3_CMDQ_RX_OUTVLD_B 1 #define HNS3_CMD_DESC_ALIGNMENT 4096 From patchwork Wed Feb 3 07:46:21 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 87659 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 7D881A0A0E; Wed, 3 Feb 2021 08:48:03 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 487CA240448; Wed, 3 Feb 2021 08:47:03 +0100 (CET) Received: from szxga06-in.huawei.com (szxga06-in.huawei.com [45.249.212.32]) by mails.dpdk.org (Postfix) with ESMTP id 41E46240455 for ; Wed, 3 Feb 2021 08:46:56 +0100 (CET) Received: from DGGEMS404-HUB.china.huawei.com (unknown [172.30.72.59]) by szxga06-in.huawei.com (SkyGuard) with ESMTP id 4DVtyl65ZxzjCHs; Wed, 3 Feb 2021 15:45:51 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS404-HUB.china.huawei.com (10.3.19.204) with Microsoft SMTP Server id 14.3.498.0; Wed, 3 Feb 2021 15:46:52 +0800 From: Lijun Ou To: CC: , Date: Wed, 3 Feb 2021 15:46:21 +0800 Message-ID: <1612338382-3253-17-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1612338382-3253-1-git-send-email-oulijun@huawei.com> References: <1612338382-3253-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH 16/17] net/hns3: fix VF reset after MBX failed X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Chengchang Tang Currently, during the VF reset, the VF will send a MBX to inform PF to reset it and the disable command bit will be set whether the MBX is successful. Generally, multiple reset attempts are made after a failure. However, because the command is diabled, all subsequent reset will all fail. This patch disable the command only after the MBX message is successfully. Fixes: 2790c6464725 ("net/hns3: support device reset") Cc: stable@dpdk.org Signed-off-by: Chengchang Tang Signed-off-by: Lijun Ou --- drivers/net/hns3/hns3_ethdev_vf.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c index 44c94e7..c18e00f 100644 --- a/drivers/net/hns3/hns3_ethdev_vf.c +++ b/drivers/net/hns3/hns3_ethdev_vf.c @@ -2404,15 +2404,17 @@ static int hns3vf_prepare_reset(struct hns3_adapter *hns) { struct hns3_hw *hw = &hns->hw; - int ret = 0; + int ret; if (hw->reset.level == HNS3_VF_FUNC_RESET) { ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL, 0, true, NULL, 0); + if (ret) + return ret; } __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); - return ret; + return 0; } static int From patchwork Wed Feb 3 07:46:22 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 87666 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5BBDCA0A0E; Wed, 3 Feb 2021 08:49:07 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 7B6F22404AF; Wed, 3 Feb 2021 08:47:11 +0100 (CET) Received: from szxga06-in.huawei.com (szxga06-in.huawei.com [45.249.212.32]) by mails.dpdk.org (Postfix) with ESMTP id 579CC24045B for ; Wed, 3 Feb 2021 08:46:57 +0100 (CET) Received: from DGGEMS404-HUB.china.huawei.com (unknown [172.30.72.59]) by szxga06-in.huawei.com (SkyGuard) with ESMTP id 4DVtyl6GvHzjGRj; Wed, 3 Feb 2021 15:45:51 +0800 (CST) Received: from localhost.localdomain (10.69.192.56) by DGGEMS404-HUB.china.huawei.com (10.3.19.204) with Microsoft SMTP Server id 14.3.498.0; Wed, 3 Feb 2021 15:46:52 +0800 From: Lijun Ou To: CC: , Date: Wed, 3 Feb 2021 15:46:22 +0800 Message-ID: <1612338382-3253-18-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1612338382-3253-1-git-send-email-oulijun@huawei.com> References: <1612338382-3253-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.69.192.56] X-CFilter-Loop: Reflected Subject: [dpdk-dev] [PATCH 17/17] net/hns3: add check for max pkt length of Rx X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Huisong Li When jumbo frame is enabled, the MTU size needs to be modified based on 'max_rx_pkt_len'. Driver needs to check the validity of 'max_rx_pkt_len'. And it should be in the range of HNS3_DEFAULT_FRAME_LEN and HNS3_MAX_FRAME_LEN. Otherwise, it may cause that the MTU size is inconsistent with jumbo frame offload. Fixes: 19a3ca4c99cf ("net/hns3: add start/stop and configure operations") Cc: stable@dpdk.org Signed-off-by: Huisong Li Signed-off-by: Lijun Ou --- drivers/net/hns3/hns3_ethdev.c | 19 +++++++++++++------ drivers/net/hns3/hns3_ethdev_vf.c | 19 +++++++++++++------ 2 files changed, 26 insertions(+), 12 deletions(-) diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c index a3c1340..a97dee4 100644 --- a/drivers/net/hns3/hns3_ethdev.c +++ b/drivers/net/hns3/hns3_ethdev.c @@ -2343,6 +2343,7 @@ hns3_dev_configure(struct rte_eth_dev *dev) uint16_t nb_rx_q = dev->data->nb_rx_queues; uint16_t nb_tx_q = dev->data->nb_tx_queues; struct rte_eth_rss_conf rss_conf; + uint32_t max_rx_pkt_len; uint16_t mtu; bool gro_en; int ret; @@ -2396,12 +2397,18 @@ hns3_dev_configure(struct rte_eth_dev *dev) * according to the maximum RX packet length. */ if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { - /* - * Security of max_rx_pkt_len is guaranteed in dpdk frame. - * Maximum value of max_rx_pkt_len is HNS3_MAX_FRAME_LEN, so it - * can safely assign to "uint16_t" type variable. - */ - mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(conf->rxmode.max_rx_pkt_len); + max_rx_pkt_len = conf->rxmode.max_rx_pkt_len; + if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN || + max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) { + hns3_err(hw, "maximum Rx packet length must be greater " + "than %u and less than %u when jumbo frame enabled.", + (uint16_t)HNS3_DEFAULT_FRAME_LEN, + (uint16_t)HNS3_MAX_FRAME_LEN); + ret = -EINVAL; + goto cfg_err; + } + + mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len); ret = hns3_dev_mtu_set(dev, mtu); if (ret) goto cfg_err; diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c index c18e00f..1b8c029 100644 --- a/drivers/net/hns3/hns3_ethdev_vf.c +++ b/drivers/net/hns3/hns3_ethdev_vf.c @@ -778,6 +778,7 @@ hns3vf_dev_configure(struct rte_eth_dev *dev) uint16_t nb_rx_q = dev->data->nb_rx_queues; uint16_t nb_tx_q = dev->data->nb_tx_queues; struct rte_eth_rss_conf rss_conf; + uint32_t max_rx_pkt_len; uint16_t mtu; bool gro_en; int ret; @@ -825,12 +826,18 @@ hns3vf_dev_configure(struct rte_eth_dev *dev) * according to the maximum RX packet length. */ if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { - /* - * Security of max_rx_pkt_len is guaranteed in dpdk frame. - * Maximum value of max_rx_pkt_len is HNS3_MAX_FRAME_LEN, so it - * can safely assign to "uint16_t" type variable. - */ - mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(conf->rxmode.max_rx_pkt_len); + max_rx_pkt_len = conf->rxmode.max_rx_pkt_len; + if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN || + max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) { + hns3_err(hw, "maximum Rx packet length must be greater " + "than %u and less than %u when jumbo frame enabled.", + (uint16_t)HNS3_DEFAULT_FRAME_LEN, + (uint16_t)HNS3_MAX_FRAME_LEN); + ret = -EINVAL; + goto cfg_err; + } + + mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len); ret = hns3vf_dev_mtu_set(dev, mtu); if (ret) goto cfg_err;