@@ -57,13 +57,13 @@ hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level,
/* Calc tick */
if (shaper_level >= HNS3_SHAPER_LVL_CNT) {
hns3_err(hw,
- "shaper_level(%d) is greater than HNS3_SHAPER_LVL_CNT(%d)",
+ "shaper_level(%u) is greater than HNS3_SHAPER_LVL_CNT(%d)",
shaper_level, HNS3_SHAPER_LVL_CNT);
return -EINVAL;
}
if (ir > hw->max_tm_rate) {
- hns3_err(hw, "rate(%d) exceeds the max rate(%d) driver "
+ hns3_err(hw, "rate(%u) exceeds the max rate(%u) driver "
"supported.", ir, hw->max_tm_rate);
return -EINVAL;
}
@@ -1145,7 +1145,7 @@ hns3_pause_param_setup_hw(struct hns3_hw *hw, uint16_t pause_time)
pause_time <= PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
else {
- hns3_warn(hw, "pause_time(%d) is adjusted to 4", pause_time);
+ hns3_warn(hw, "pause_time(%u) is adjusted to 4", pause_time);
pause_time = PAUSE_TIME_MIN_VALUE;
pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
}
@@ -1456,13 +1456,13 @@ hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc)
return -EINVAL;
if (nb_rx_q < num_tc) {
- hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).",
+ hns3_err(hw, "number of Rx queues(%u) is less than tcs(%u).",
nb_rx_q, num_tc);
return -EINVAL;
}
if (nb_tx_q < num_tc) {
- hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).",
+ hns3_err(hw, "number of Tx queues(%u) is less than tcs(%u).",
nb_tx_q, num_tc);
return -EINVAL;
}
@@ -1115,8 +1115,8 @@ hns3_dev_configure_vlan(struct rte_eth_dev *dev)
hns3_warn(hw,
"hw_vlan_reject_tagged or hw_vlan_reject_untagged "
"configuration is not supported! Ignore these two "
- "parameters: hw_vlan_reject_tagged(%d), "
- "hw_vlan_reject_untagged(%d)",
+ "parameters: hw_vlan_reject_tagged(%u), "
+ "hw_vlan_reject_untagged(%u)",
txmode->hw_vlan_reject_tagged,
txmode->hw_vlan_reject_untagged);
@@ -1140,7 +1140,7 @@ hns3_dev_configure_vlan(struct rte_eth_dev *dev)
ret = hns3_vlan_pvid_set(dev, txmode->pvid,
txmode->hw_vlan_insert_pvid);
if (ret)
- hns3_err(hw, "dev config vlan pvid(%d) failed, ret = %d",
+ hns3_err(hw, "dev config vlan pvid(%u) failed, ret = %d",
txmode->pvid, ret);
return ret;
@@ -1905,7 +1905,7 @@ hns3_set_mc_addr_chk_param(struct hns3_hw *hw,
uint32_t j;
if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
- hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%d) "
+ hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) "
"invalid. valid range: 0~%d",
nb_mc_addr, HNS3_MC_MACADDR_NUM);
return -EINVAL;
@@ -2179,7 +2179,7 @@ hns3_check_mq_mode(struct rte_eth_dev *dev)
for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) {
- hns3_err(hw, "dcb_tc[%d] = %d in rx direction, "
+ hns3_err(hw, "dcb_tc[%d] = %u in rx direction, "
"is not equal to one in tx direction.",
i, dcb_rx_conf->dcb_tc[i]);
return -EINVAL;
@@ -2253,7 +2253,7 @@ hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap,
op_str = mmap ? "Map" : "Unmap";
status = hns3_cmd_send(hw, &desc, 1);
if (status) {
- hns3_err(hw, "%s TQP %d fail, vector_id is %d, status is %d.",
+ hns3_err(hw, "%s TQP %u fail, vector_id is %u, status is %d.",
op_str, queue_id, req->int_vector_id, status);
return status;
}
@@ -2301,7 +2301,7 @@ hns3_init_ring_with_vector(struct hns3_hw *hw)
HNS3_RING_TYPE_TX, i);
if (ret) {
PMD_INIT_LOG(ERR, "PF fail to unbind TX ring(%d) with "
- "vector: %d, ret=%d", i, vec, ret);
+ "vector: %u, ret=%d", i, vec, ret);
return ret;
}
@@ -2309,7 +2309,7 @@ hns3_init_ring_with_vector(struct hns3_hw *hw)
HNS3_RING_TYPE_RX, i);
if (ret) {
PMD_INIT_LOG(ERR, "PF fail to unbind RX ring(%d) with "
- "vector: %d, ret=%d", i, vec, ret);
+ "vector: %u, ret=%d", i, vec, ret);
return ret;
}
}
@@ -3098,7 +3098,7 @@ hns3_get_board_configuration(struct hns3_hw *hw)
ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed);
if (ret) {
- PMD_INIT_LOG(ERR, "Get wrong speed %d, ret = %d",
+ PMD_INIT_LOG(ERR, "Get wrong speed %u, ret = %d",
cfg.default_speed, ret);
return ret;
}
@@ -3943,7 +3943,7 @@ hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code)
if (cmdq_resp) {
PMD_INIT_LOG(ERR,
- "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
+ "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
cmdq_resp);
return -EIO;
}
@@ -3964,7 +3964,7 @@ hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code)
break;
default:
PMD_INIT_LOG(ERR,
- "add mac ethertype failed for undefined, code=%d.",
+ "add mac ethertype failed for undefined, code=%u.",
resp_code);
return_status = -EIO;
break;
@@ -4122,7 +4122,7 @@ hns3_promisc_init(struct hns3_hw *hw)
hns3_promisc_param_init(¶m, false, false, false, func_id);
ret = hns3_cmd_set_promisc_mode(hw, ¶m);
if (ret) {
- PMD_INIT_LOG(ERR, "failed to clear vf:%d promisc mode,"
+ PMD_INIT_LOG(ERR, "failed to clear vf:%u promisc mode,"
" ret = %d", func_id, ret);
return ret;
}
@@ -4805,7 +4805,7 @@ hns3_map_rx_interrupt(struct rte_eth_dev *dev)
rte_zmalloc("intr_vec",
hw->used_rx_queues * sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
- hns3_err(hw, "Failed to allocate %d rx_queues"
+ hns3_err(hw, "Failed to allocate %u rx_queues"
" intr_vec", hw->used_rx_queues);
ret = -ENOMEM;
goto alloc_intr_vec_error;
@@ -5071,7 +5071,7 @@ hns3_dev_close(struct rte_eth_dev *eth_dev)
rte_free(eth_dev->process_private);
eth_dev->process_private = NULL;
hns3_mp_uninit_primary();
- hns3_warn(hw, "Close port %d finished", hw->data->port_id);
+ hns3_warn(hw, "Close port %u finished", hw->data->port_id);
return ret;
}
@@ -5149,7 +5149,7 @@ hns3_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
return -EINVAL;
}
if (!fc_conf->pause_time) {
- hns3_err(hw, "Invalid pause time %d setting.",
+ hns3_err(hw, "Invalid pause time %u setting.",
fc_conf->pause_time);
return -EINVAL;
}
@@ -5202,7 +5202,7 @@ hns3_priority_flow_ctrl_set(struct rte_eth_dev *dev,
return -EINVAL;
}
if (pfc_conf->fc.pause_time == 0) {
- hns3_err(hw, "Invalid pause time %d setting.",
+ hns3_err(hw, "Invalid pause time %u setting.",
pfc_conf->fc.pause_time);
return -EINVAL;
}
@@ -444,7 +444,7 @@ hns3vf_set_mc_addr_chk_param(struct hns3_hw *hw,
uint32_t j;
if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
- hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%d) "
+ hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) "
"invalid. valid range: 0~%d",
nb_mc_addr, HNS3_MC_MACADDR_NUM);
return -EINVAL;
@@ -721,7 +721,7 @@ hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id,
ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
sizeof(bind_msg), false, NULL, 0);
if (ret)
- hns3_err(hw, "%s TQP %d fail, vector_id is %d, ret is %d.",
+ hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret is %d.",
op_str, queue_id, bind_msg.vector_id, ret);
return ret;
@@ -767,7 +767,7 @@ hns3vf_init_ring_with_vector(struct hns3_hw *hw)
HNS3_RING_TYPE_TX, i);
if (ret) {
PMD_INIT_LOG(ERR, "VF fail to unbind TX ring(%d) with "
- "vector: %d, ret=%d", i, vec, ret);
+ "vector: %u, ret=%d", i, vec, ret);
return ret;
}
@@ -775,7 +775,7 @@ hns3vf_init_ring_with_vector(struct hns3_hw *hw)
HNS3_RING_TYPE_RX, i);
if (ret) {
PMD_INIT_LOG(ERR, "VF fail to unbind RX ring(%d) with "
- "vector: %d, ret=%d", i, vec, ret);
+ "vector: %u, ret=%d", i, vec, ret);
return ret;
}
}
@@ -1349,8 +1349,8 @@ static int
hns3vf_get_tc_info(struct hns3_hw *hw)
{
uint8_t resp_msg;
+ uint32_t i;
int ret;
- int i;
ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_TCINFO, 0, NULL, 0,
true, &resp_msg, sizeof(resp_msg));
@@ -1433,13 +1433,13 @@ hns3vf_set_tc_queue_mapping(struct hns3_adapter *hns, uint16_t nb_rx_q,
struct hns3_hw *hw = &hns->hw;
if (nb_rx_q < hw->num_tc) {
- hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).",
+ hns3_err(hw, "number of Rx queues(%u) is less than tcs(%u).",
nb_rx_q, hw->num_tc);
return -EINVAL;
}
if (nb_tx_q < hw->num_tc) {
- hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).",
+ hns3_err(hw, "number of Tx queues(%u) is less than tcs(%u).",
nb_tx_q, hw->num_tc);
return -EINVAL;
}
@@ -2011,7 +2011,7 @@ hns3vf_dev_close(struct rte_eth_dev *eth_dev)
rte_free(eth_dev->process_private);
eth_dev->process_private = NULL;
hns3_mp_uninit_primary();
- hns3_warn(hw, "Close port %d finished", hw->data->port_id);
+ hns3_warn(hw, "Close port %u finished", hw->data->port_id);
return ret;
}
@@ -2065,6 +2065,7 @@ hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
break;
default:
new_link.link_speed = ETH_SPEED_NUM_100M;
+ hns3_info(hw, "default link speed: %u", new_link.link_speed);
break;
}
@@ -2126,7 +2127,7 @@ hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
rte_zmalloc("intr_vec",
hw->used_rx_queues * sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
- hns3_err(hw, "Failed to allocate %d rx_queues"
+ hns3_err(hw, "Failed to allocate %u rx_queues"
" intr_vec", hw->used_rx_queues);
ret = -ENOMEM;
goto vf_alloc_intr_vec_error;
@@ -321,7 +321,7 @@ int hns3_init_fd_config(struct hns3_adapter *hns)
hns3_warn(hw, "Unsupported tunnel filter in 4K*200Bit");
break;
default:
- hns3_err(hw, "Unsupported flow director mode %d",
+ hns3_err(hw, "Unsupported flow director mode %u",
pf->fdir.fd_cfg.fd_mode);
return -EOPNOTSUPP;
}
@@ -618,7 +618,7 @@ static bool hns3_fd_convert_tuple(struct hns3_hw *hw,
key_conf->mask.ip_proto);
break;
default:
- hns3_warn(hw, "not support tuple of (%d)", tuple);
+ hns3_warn(hw, "not support tuple of (%u)", tuple);
break;
}
return true;
@@ -745,14 +745,14 @@ static int hns3_config_key(struct hns3_adapter *hns,
ret = hns3_fd_tcam_config(hw, false, rule->location, key_y, true);
if (ret) {
- hns3_err(hw, "Config fd key_y fail, loc=%d, ret=%d",
+ hns3_err(hw, "Config fd key_y fail, loc=%u, ret=%d",
rule->queue_id, ret);
return ret;
}
ret = hns3_fd_tcam_config(hw, true, rule->location, key_x, true);
if (ret)
- hns3_err(hw, "Config fd key_x fail, loc=%d, ret=%d",
+ hns3_err(hw, "Config fd key_x fail, loc=%u, ret=%d",
rule->queue_id, ret);
return ret;
}
@@ -966,8 +966,8 @@ int hns3_fdir_filter_program(struct hns3_adapter *hns,
ret = hns3_fd_tcam_config(hw, true, rule->location, NULL,
false);
if (ret)
- hns3_err(hw, "Failed to delete fdir: %d src_ip:%x "
- "dst_ip:%x src_port:%d dst_port:%d ret = %d",
+ hns3_err(hw, "Failed to delete fdir: %u src_ip:%x "
+ "dst_ip:%x src_port:%u dst_port:%u ret = %d",
rule->location,
rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID],
rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID],
@@ -1007,8 +1007,8 @@ int hns3_fdir_filter_program(struct hns3_adapter *hns,
ret = hns3_config_key(hns, rule);
rte_spinlock_unlock(&fdir_info->flows_lock);
if (ret) {
- hns3_err(hw, "Failed to config fdir: %d src_ip:%x dst_ip:%x "
- "src_port:%d dst_port:%d ret = %d",
+ hns3_err(hw, "Failed to config fdir: %u src_ip:%x dst_ip:%x "
+ "src_port:%u dst_port:%u ret = %d",
rule->location,
rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID],
rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID],
@@ -269,8 +269,8 @@ hns3_handle_action_queue(struct rte_eth_dev *dev,
queue = (const struct rte_flow_action_queue *)action->conf;
if (queue->index >= hw->used_rx_queues) {
- hns3_err(hw, "queue ID(%d) is greater than number of "
- "available queue (%d) in driver.",
+ hns3_err(hw, "queue ID(%u) is greater than number of "
+ "available queue (%u) in driver.",
queue->index, hw->used_rx_queues);
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
@@ -88,7 +88,7 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code0, uint16_t code1,
uint64_t end;
if (resp_len > HNS3_MBX_MAX_RESP_DATA_SIZE) {
- hns3_err(hw, "VF mbx response len(=%d) exceeds maximum(=%d)",
+ hns3_err(hw, "VF mbx response len(=%u) exceeds maximum(=%d)",
resp_len, HNS3_MBX_MAX_RESP_DATA_SIZE);
return -EINVAL;
}
@@ -127,7 +127,7 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code0, uint16_t code1,
if (now >= end) {
hw->mbx_resp.lost++;
hns3_err(hw,
- "VF could not get mbx(%d,%d) head(%d) tail(%d) lost(%d) from PF in_irq:%d",
+ "VF could not get mbx(%u,%u) head(%u) tail(%u) lost(%u) from PF in_irq:%d",
code0, code1, hw->mbx_resp.head, hw->mbx_resp.tail,
hw->mbx_resp.lost, in_irq);
return -ETIME;
@@ -160,7 +160,7 @@ hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
/* first two bytes are reserved for code & subcode */
if (msg_len > (HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET)) {
hns3_err(hw,
- "VF send mbx msg fail, msg len %d exceeds max payload len %d",
+ "VF send mbx msg fail, msg len %u exceeds max payload len %d",
msg_len, HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET);
return -EINVAL;
}
@@ -251,7 +251,7 @@ hns3_mbx_handler(struct hns3_hw *hw)
hns3_schedule_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
break;
default:
- hns3_err(hw, "Fetched unsupported(%d) message from arq",
+ hns3_err(hw, "Fetched unsupported(%u) message from arq",
opcode);
break;
}
@@ -280,13 +280,13 @@ hns3_update_resp_position(struct hns3_hw *hw, uint32_t resp_msg)
if (resp->lost)
resp->lost--;
hns3_warn(hw, "Received a mismatched response req_msg(%x) "
- "resp_msg(%x) head(%d) tail(%d) lost(%d)",
+ "resp_msg(%x) head(%u) tail(%u) lost(%u)",
resp->req_msg_data, resp_msg, resp->head, tail,
resp->lost);
} else if (tail + resp->lost > resp->head) {
resp->lost--;
hns3_warn(hw, "Received a new response again resp_msg(%x) "
- "head(%d) tail(%d) lost(%d)", resp_msg,
+ "head(%u) tail(%u) lost(%u)", resp_msg,
resp->head, tail, resp->lost);
}
rte_io_wmb();
@@ -391,7 +391,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag);
if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) {
hns3_warn(hw,
- "dropped invalid mailbox message, code = %d",
+ "dropped invalid mailbox message, code = %u",
opcode);
/* dropping/not processing this invalid message */
@@ -442,7 +442,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
break;
default:
hns3_err(hw,
- "VF received unsupported(%d) mbx msg from PF",
+ "VF received unsupported(%u) mbx msg from PF",
req->msg[0]);
break;
}
@@ -80,7 +80,7 @@ mp_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer)
if (!rte_eth_dev_is_valid_port(param->port_id)) {
rte_errno = ENODEV;
- PMD_INIT_LOG(ERR, "port %u invalid port ID", param->port_id);
+ PMD_INIT_LOG(ERR, "port %d invalid port ID", param->port_id);
return -rte_errno;
}
dev = &rte_eth_devices[param->port_id];
@@ -269,7 +269,7 @@ hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq)
for (i = 0; i < rxq->nb_rx_desc; i++) {
mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
if (unlikely(mbuf == NULL)) {
- hns3_err(hw, "Failed to allocate RXD[%d] for rx queue!",
+ hns3_err(hw, "Failed to allocate RXD[%u] for rx queue!",
i);
hns3_rx_queue_release_mbufs(rxq);
return -ENOMEM;
@@ -1205,7 +1205,7 @@ hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
RTE_CACHE_LINE_SIZE, q_info->socket_id);
if (rxq == NULL) {
- hns3_err(hw, "Failed to allocate memory for No.%d rx ring!",
+ hns3_err(hw, "Failed to allocate memory for No.%u rx ring!",
q_info->idx);
return NULL;
}
@@ -1224,7 +1224,7 @@ hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
rx_desc, HNS3_RING_BASE_ALIGN,
q_info->socket_id);
if (rx_mz == NULL) {
- hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
+ hns3_err(hw, "Failed to reserve DMA memory for No.%u rx ring!",
q_info->idx);
hns3_rx_queue_release(rxq);
return NULL;
@@ -1233,7 +1233,7 @@ hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev,
rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
rxq->rx_ring_phys_addr = rx_mz->iova;
- hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, q_info->idx,
+ hns3_dbg(hw, "No.%u rx descriptors iova 0x%" PRIx64, q_info->idx,
rxq->rx_ring_phys_addr);
return rxq;
@@ -1261,7 +1261,7 @@ hns3_fake_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
q_info.ring_name = "rx_fake_ring";
rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
if (rxq == NULL) {
- hns3_err(hw, "Failed to setup No.%d fake rx ring.", idx);
+ hns3_err(hw, "Failed to setup No.%u fake rx ring.", idx);
return -ENOMEM;
}
@@ -1298,7 +1298,7 @@ hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),
RTE_CACHE_LINE_SIZE, q_info->socket_id);
if (txq == NULL) {
- hns3_err(hw, "Failed to allocate memory for No.%d tx ring!",
+ hns3_err(hw, "Failed to allocate memory for No.%u tx ring!",
q_info->idx);
return NULL;
}
@@ -1311,7 +1311,7 @@ hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
tx_desc, HNS3_RING_BASE_ALIGN,
q_info->socket_id);
if (tx_mz == NULL) {
- hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!",
+ hns3_err(hw, "Failed to reserve DMA memory for No.%u tx ring!",
q_info->idx);
hns3_tx_queue_release(txq);
return NULL;
@@ -1320,7 +1320,7 @@ hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev,
txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
txq->tx_ring_phys_addr = tx_mz->iova;
- hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, q_info->idx,
+ hns3_dbg(hw, "No.%u tx descriptors iova 0x%" PRIx64, q_info->idx,
txq->tx_ring_phys_addr);
/* Clear tx bd */
@@ -1355,7 +1355,7 @@ hns3_fake_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
q_info.ring_name = "tx_fake_ring";
txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
if (txq == NULL) {
- hns3_err(hw, "Failed to setup No.%d fake tx ring.", idx);
+ hns3_err(hw, "Failed to setup No.%u fake tx ring.", idx);
return -ENOMEM;
}
@@ -2488,8 +2488,8 @@ hns3_tx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_txconf *conf,
if (rs_thresh + free_thresh > nb_desc || nb_desc % rs_thresh ||
rs_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP ||
free_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP) {
- hns3_err(hw, "tx_rs_thresh (%d) tx_free_thresh (%d) nb_desc "
- "(%d) of tx descriptors for port=%d queue=%d check "
+ hns3_err(hw, "tx_rs_thresh (%u) tx_free_thresh (%u) nb_desc "
+ "(%u) of tx descriptors for port=%u queue=%u check "
"fail!",
rs_thresh, free_thresh, nb_desc, hw->data->port_id,
idx);
@@ -89,13 +89,12 @@ hns3_desc_parse_field(struct hns3_rx_queue *rxq,
struct hns3_desc *rxdp,
uint32_t bd_vld_num)
{
- uint32_t l234_info, ol_info, bd_base_info;
+ uint32_t l234_info, ol_info, bd_base_info, cksum_err, i;
struct rte_mbuf *pkt;
uint32_t retcode = 0;
- uint32_t cksum_err;
- int ret, i;
+ int ret;
- for (i = 0; i < (int)bd_vld_num; i++) {
+ for (i = 0; i < bd_vld_num; i++) {
pkt = sw_ring[i].mbuf;
/* init rte_mbuf.rearm_data last 64-bit */
@@ -129,9 +128,9 @@ hns3_recv_burst_vec(struct hns3_rx_queue *__restrict rxq,
uint16_t rx_id = rxq->next_to_use;
struct hns3_entry *sw_ring = &rxq->sw_ring[rx_id];
struct hns3_desc *rxdp = &rxq->rx_ring[rx_id];
- uint32_t bd_valid_num, parse_retcode;
+ uint32_t bd_valid_num, parse_retcode, pos;
uint16_t nb_rx = 0;
- int pos, offset;
+ int offset;
/* mask to shuffle from desc to mbuf's rx_descriptor_fields1 */
uint8x16_t shuf_desc_fields_msk = {
@@ -461,7 +461,7 @@ hns3_update_tqp_stats(struct hns3_hw *hw)
desc.data[0] = rte_cpu_to_le_32((uint32_t)i);
ret = hns3_cmd_send(hw, &desc, 1);
if (ret) {
- hns3_err(hw, "Failed to query RX No.%d queue stat: %d",
+ hns3_err(hw, "Failed to query RX No.%u queue stat: %d",
i, ret);
return ret;
}
@@ -475,7 +475,7 @@ hns3_update_tqp_stats(struct hns3_hw *hw)
desc.data[0] = rte_cpu_to_le_32((uint32_t)i);
ret = hns3_cmd_send(hw, &desc, 1);
if (ret) {
- hns3_err(hw, "Failed to query TX No.%d queue stat: %d",
+ hns3_err(hw, "Failed to query TX No.%u queue stat: %d",
i, ret);
return ret;
}
@@ -569,7 +569,7 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev)
desc_reset.data[0] = rte_cpu_to_le_32((uint32_t)i);
ret = hns3_cmd_send(hw, &desc_reset, 1);
if (ret) {
- hns3_err(hw, "Failed to reset RX No.%d queue stat: %d",
+ hns3_err(hw, "Failed to reset RX No.%u queue stat: %d",
i, ret);
return ret;
}
@@ -579,7 +579,7 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev)
desc_reset.data[0] = rte_cpu_to_le_32((uint32_t)i);
ret = hns3_cmd_send(hw, &desc_reset, 1);
if (ret) {
- hns3_err(hw, "Failed to reset TX No.%d queue stat: %d",
+ hns3_err(hw, "Failed to reset TX No.%u queue stat: %d",
i, ret);
return ret;
}
@@ -964,7 +964,7 @@ hns3_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
for (i = 0; i < size; i++) {
if (ids[i] >= cnt_stats) {
- hns3_err(hw, "ids[%d] (%" PRIx64 ") is invalid, "
+ hns3_err(hw, "ids[%u] (%" PRIx64 ") is invalid, "
"should < %u", i, ids[i], cnt_stats);
rte_free(values_copy);
return -EINVAL;
@@ -1025,7 +1025,7 @@ hns3_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
for (i = 0; i < size; i++) {
if (ids[i] >= cnt_stats) {
- hns3_err(hw, "ids[%d] (%" PRIx64 ") is invalid, "
+ hns3_err(hw, "ids[%u] (%" PRIx64 ") is invalid, "
"should < %u", i, ids[i], cnt_stats);
rte_free(names_copy);
return -EINVAL;