From patchwork Wed Sep 17 13:46:36 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: David Marchand X-Patchwork-Id: 401 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 090BBB3AF; Wed, 17 Sep 2014 15:41:29 +0200 (CEST) Received: from mail-we0-f171.google.com (mail-we0-f171.google.com [74.125.82.171]) by dpdk.org (Postfix) with ESMTP id 598A7B3A8 for ; Wed, 17 Sep 2014 15:41:24 +0200 (CEST) Received: by mail-we0-f171.google.com with SMTP id p10so1472467wes.2 for ; Wed, 17 Sep 2014 06:47:05 -0700 (PDT) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=Oqf7ereFCa4b0eQ7HgA3xy/VZwHWrDyYzEYyTF5iSuY=; b=mh4b/Z8K5RiRZihyAamDl7FOCHmPH+JTjdW+ibGIqPs0y1PYy6Vc4hm5oX1LhMT855 wjwQ28kerhQ5YgDMKYLn8VK4hONZgOpxHwIs4HyVdmp5l0yeICHezohAD6j8D3npJd3A 0DYkELgbqPX0Cu/Ar7Rk/4HY/FMMuchP7fFkJ00vPJvMOGRoWWNyC8ctBs4L5PnUCSB+ odPbUZbRxnqMIpvbA4lAr9Toq0GJBKMB1wjVANRbQEZIoeuz24+2vQpk38EKMxb/RLEZ zlrrkUp+wIuAZqQlfbSwH7Jl3/J95P7k38+8J4v2D+hqZlv1iGJjKJGPwtcXc5XFOeYn 6/xQ== X-Gm-Message-State: ALoCoQm6AXra7AI4dA3zeRT03U9NtANs4imPm88DG5ioNCjf2drFH9/pay6ZT2rUiwbWdbSKeGB+ X-Received: by 10.180.188.13 with SMTP id fw13mr5607639wic.83.1410961625508; Wed, 17 Sep 2014 06:47:05 -0700 (PDT) Received: from alcyon.dev.6wind.com (guy78-3-82-239-227-177.fbx.proxad.net. [82.239.227.177]) by mx.google.com with ESMTPSA id fa20sm5712872wic.1.2014.09.17.06.47.03 for (version=TLSv1.2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Wed, 17 Sep 2014 06:47:04 -0700 (PDT) From: David Marchand To: dev@dpdk.org Date: Wed, 17 Sep 2014 15:46:36 +0200 Message-Id: <1410961612-8571-5-git-send-email-david.marchand@6wind.com> X-Mailer: git-send-email 1.7.10.4 In-Reply-To: <1410961612-8571-1-git-send-email-david.marchand@6wind.com> References: <1410961612-8571-1-git-send-email-david.marchand@6wind.com> Subject: [dpdk-dev] [PATCH v3 04/20] ixgbe: clean log messages X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Clean log messages: - remove leading \n in some messages, - remove trailing \n in some messages, - split multi lines messages. Signed-off-by: David Marchand v2 Reviewed-by: Jay Rolette v2 Acked-by: Bruce Richardson --- lib/librte_pmd_ixgbe/ixgbe_ethdev.c | 82 +++++++++++++++++------------------ lib/librte_pmd_ixgbe/ixgbe_fdir.c | 6 +-- lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 64 +++++++++++++-------------- 3 files changed, 76 insertions(+), 76 deletions(-) diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c index 71b964a..18988be 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c @@ -547,13 +547,13 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, if ((hw->mac.type != ixgbe_mac_82599EB) && (hw->mac.type != ixgbe_mac_X540)) return -ENOSYS; - PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d\n", + PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d", (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx); n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG); if (n >= IXGBE_NB_STAT_MAPPING_REGS) { - PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded\n"); + PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded"); return -EIO; } offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG); @@ -573,20 +573,20 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, else stat_mappings->rqsmr[n] |= qsmr_mask; - PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d\n" - "%s[%d] = 0x%08x\n", + PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d", (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", - queue_id, stat_idx, is_rx ? "RQSMR" : "TQSM", n, + queue_id, stat_idx); + PMD_INIT_LOG(INFO, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n, is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]); /* Now write the mapping in the appropriate register */ if (is_rx) { - PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d\n", + PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d", stat_mappings->rqsmr[n], n); IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]); } else { - PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d\n", + PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d", stat_mappings->tqsm[n], n); IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]); } @@ -793,11 +793,12 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, if (diag == IXGBE_ERR_EEPROM_VERSION) { PMD_INIT_LOG(ERR, "This device is a pre-production adapter/" "LOM. Please be aware there may be issues associated " - "with your hardware.\n If you are experiencing problems " + "with your hardware."); + PMD_INIT_LOG(ERR, "If you are experiencing problems " "please contact your Intel or hardware representative " - "who provided you with this hardware.\n"); + "who provided you with this hardware."); } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED) - PMD_INIT_LOG(ERR, "Unsupported SFP+ Module\n"); + PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); if (diag) { PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag); return -EIO; @@ -851,11 +852,11 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, IXGBE_WRITE_FLUSH(hw); if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) - PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %dmac.type, (int) hw->phy.type, (int) hw->phy.sfp_type); else - PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d\n", + PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d", (int) hw->mac.type, (int) hw->phy.type); PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", @@ -1038,7 +1039,7 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, return (-EIO); } - PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x mac.type=%s\n", + PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id, "ixgbe_mac_82599_vf"); @@ -1418,7 +1419,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev) /* IXGBE devices don't support half duplex */ if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) && (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) { - PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu\n", + PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu", dev->data->dev_conf.link_duplex, dev->data->port_id); return -EINVAL; @@ -1444,7 +1445,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev) /* This can fail when allocating mbufs for descriptor rings */ err = ixgbe_dev_rx_init(dev); if (err) { - PMD_INIT_LOG(ERR, "Unable to initialize RX hardware\n"); + PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); goto error; } @@ -1491,7 +1492,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev) speed = IXGBE_LINK_SPEED_10GB_FULL; break; default: - PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu\n", + PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu", dev->data->dev_conf.link_speed, dev->data->port_id); goto error; @@ -1599,8 +1600,8 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev) #ifdef RTE_NIC_BYPASS if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { /* Not suported in bypass mode */ - PMD_INIT_LOG(ERR, "\nSet link up is not supported " - "by device id 0x%x\n", hw->device_id); + PMD_INIT_LOG(ERR, "Set link up is not supported " + "by device id 0x%x", hw->device_id); return -ENOTSUP; } #endif @@ -1609,7 +1610,7 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev) return 0; } - PMD_INIT_LOG(ERR, "\nSet link up is not supported by device id 0x%x\n", + PMD_INIT_LOG(ERR, "Set link up is not supported by device id 0x%x", hw->device_id); return -ENOTSUP; } @@ -1626,8 +1627,8 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev) #ifdef RTE_NIC_BYPASS if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { /* Not suported in bypass mode */ - PMD_INIT_LOG(ERR, "\nSet link down is not supported " - "by device id 0x%x\n", hw->device_id); + PMD_INIT_LOG(ERR, "Set link down is not supported " + "by device id 0x%x", hw->device_id); return -ENOTSUP; } #endif @@ -1636,7 +1637,7 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev) return 0; } - PMD_INIT_LOG(ERR, "\nSet link down is not supported by device id 0x%x\n", + PMD_INIT_LOG(ERR, "Set link down is not supported by device id 0x%x", hw->device_id); return -ENOTSUP; } @@ -2175,7 +2176,7 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) struct rte_eth_link link; int intr_enable_delay = false; - PMD_DRV_LOG(DEBUG, "intr action type %d\n", intr->flags); + PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); if (intr->flags & IXGBE_FLAG_MAILBOX) { ixgbe_pf_mbx_process(dev); @@ -2252,7 +2253,7 @@ ixgbe_dev_interrupt_delayed_handler(void *param) _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); } - PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]\n", eicr); + PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); ixgbe_enable_intr(dev); rte_intr_enable(&(dev->pci_dev->intr_handle)); } @@ -2366,7 +2367,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) if (fc_conf->autoneg != !hw->fc.disable_fc_autoneg) return -ENOTSUP; rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)); - PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size); + PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); /* * At least reserve one Ethernet frame for watermark @@ -2375,8 +2376,8 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; if ((fc_conf->high_water > max_high_water) || (fc_conf->high_water < fc_conf->low_water)) { - PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n"); - PMD_INIT_LOG(ERR, "High_water must <= 0x%x\n", max_high_water); + PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); + PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); return (-EINVAL); } @@ -2408,7 +2409,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) return 0; } - PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x \n", err); + PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err); return -EIO; } @@ -2438,13 +2439,13 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) if (hw->fc.current_mode & ixgbe_fc_tx_pause) { /* High/Low water can not be 0 */ if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) { - PMD_INIT_LOG(ERR,"Invalid water mark configuration\n"); + PMD_INIT_LOG(ERR,"Invalid water mark configuration"); ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; goto out; } if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) { - PMD_INIT_LOG(ERR,"Invalid water mark configuration\n"); + PMD_INIT_LOG(ERR, "Invalid water mark configuration"); ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; goto out; } @@ -2588,7 +2589,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map); tc_num = map[pfc_conf->priority]; rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)); - PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size); + PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); /* * At least reserve one Ethernet frame for watermark * high_water/low_water in kilo bytes for ixgbe @@ -2596,8 +2597,8 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; if ((pfc_conf->fc.high_water > max_high_water) || (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) { - PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n"); - PMD_INIT_LOG(ERR, "High_water must <= 0x%x\n", max_high_water); + PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); + PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); return (-EINVAL); } @@ -2613,7 +2614,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) return 0; - PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x \n", err); + PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err); return -EIO; } @@ -2773,7 +2774,7 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev) { struct rte_eth_conf* conf = &dev->data->dev_conf; - PMD_INIT_LOG(DEBUG, "\nConfigured Virtual Function port id: %d\n", + PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", dev->data->port_id); /* @@ -2782,12 +2783,12 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev) */ #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC if (!conf->rxmode.hw_strip_crc) { - PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n"); + PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip"); conf->rxmode.hw_strip_crc = 1; } #else if (conf->rxmode.hw_strip_crc) { - PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip\n"); + PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip"); conf->rxmode.hw_strip_crc = 0; } #endif @@ -2814,8 +2815,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) /* This can fail when allocating mbufs for descriptor rings */ err = ixgbevf_dev_rx_init(dev); if (err) { - PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)\n", - err); + PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err); ixgbe_dev_clear_queues(dev); return err; } @@ -2966,7 +2966,7 @@ ixgbe_vmdq_mode_check(struct ixgbe_hw *hw) /* we only need to do this if VMDq is enabled */ reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL); if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) { - PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting\n"); + PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting"); return (-1); } @@ -3095,7 +3095,7 @@ ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool, if (hw->mac.type == ixgbe_mac_82598EB) { PMD_INIT_LOG(ERR, "setting VF receive mode set should be done" - " on 82599 hardware and newer\n"); + " on 82599 hardware and newer"); return (-ENOTSUP); } if (ixgbe_vmdq_mode_check(hw) < 0) diff --git a/lib/librte_pmd_ixgbe/ixgbe_fdir.c b/lib/librte_pmd_ixgbe/ixgbe_fdir.c index 6c0a530..8819aac 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_fdir.c +++ b/lib/librte_pmd_ixgbe/ixgbe_fdir.c @@ -112,7 +112,7 @@ static void fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) } if (i >= IXGBE_FDIR_INIT_DONE_POLL) - PMD_INIT_LOG(WARNING, "Flow Director poll time exceeded!\n"); + PMD_INIT_LOG(WARNING, "Flow Director poll time exceeded!"); } /* @@ -381,7 +381,7 @@ fdir_add_signature_filter_82599(struct ixgbe_hw *hw, fdirhashcmd |= fdirhash; IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); - PMD_INIT_LOG(DEBUG, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); + PMD_INIT_LOG(DEBUG, "Tx Queue=%x hash=%x", queue, (u32)fdirhashcmd); } /* @@ -614,7 +614,7 @@ fdir_set_input_mask_82599(struct ixgbe_hw *hw, /* use the L4 protocol mask for raw IPv4/IPv6 traffic */ fdirm |= IXGBE_FDIRM_L4P; if (input_mask->dst_port_mask || input_mask->src_port_mask) { - PMD_INIT_LOG(ERR, " Error on src/dst port mask\n"); + PMD_INIT_LOG(ERR, " Error on src/dst port mask"); return -EINVAL; } } diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c index 8732051..9a3fd0d 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c @@ -615,7 +615,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_last = (uint16_t) (tx_last - txq->nb_tx_desc); PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u" - " tx_first=%u tx_last=%u\n", + " tx_first=%u tx_last=%u", (unsigned) txq->port_id, (unsigned) txq->queue_id, (unsigned) pkt_len, @@ -1066,7 +1066,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, if (ixgbe_rx_alloc_bufs(rxq) != 0) { int i, j; PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " - "queue_id=%u\n", (unsigned) rxq->port_id, + "queue_id=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id); rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += @@ -1193,7 +1193,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * frames to its peer(s). */ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " - "ext_err_stat=0x%08x pkt_len=%u\n", + "ext_err_stat=0x%08x pkt_len=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id, (unsigned) rx_id, (unsigned) staterr, (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); @@ -1201,7 +1201,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nmb = rte_rxmbuf_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " - "queue_id=%u\n", (unsigned) rxq->port_id, + "queue_id=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id); rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; break; @@ -1295,7 +1295,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); if (nb_hold > rxq->rx_free_thresh) { PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " - "nb_hold=%u nb_rx=%u\n", + "nb_hold=%u nb_rx=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id, (unsigned) rx_id, (unsigned) nb_hold, (unsigned) nb_rx); @@ -1382,8 +1382,8 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * to happen by sending specific "back-pressure" flow control * frames to its peer(s). */ - PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u " - "staterr=0x%x data_len=%u\n", + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " + "staterr=0x%x data_len=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id, (unsigned) rx_id, (unsigned) staterr, (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); @@ -1391,7 +1391,7 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nmb = rte_rxmbuf_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " - "queue_id=%u\n", (unsigned) rxq->port_id, + "queue_id=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id); rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; break; @@ -1559,7 +1559,7 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); if (nb_hold > rxq->rx_free_thresh) { PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " - "nb_hold=%u nb_rx=%u\n", + "nb_hold=%u nb_rx=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id, (unsigned) rx_id, (unsigned) nb_hold, (unsigned) nb_rx); @@ -1871,30 +1871,30 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, ixgbe_tx_queue_release(txq); return (-ENOMEM); } - PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n", + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); /* Use a simple Tx queue (no offloads, no multi segs) if possible */ if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) { - PMD_INIT_LOG(INFO, "Using simple tx code path\n"); + PMD_INIT_LOG(INFO, "Using simple tx code path"); #ifdef RTE_IXGBE_INC_VECTOR if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ && ixgbe_txq_vec_setup(txq, socket_id) == 0) { - PMD_INIT_LOG(INFO, "Vector tx enabled.\n"); + PMD_INIT_LOG(INFO, "Vector tx enabled."); dev->tx_pkt_burst = ixgbe_xmit_pkts_vec; } else #endif dev->tx_pkt_burst = ixgbe_xmit_pkts_simple; } else { - PMD_INIT_LOG(INFO, "Using full-featured tx code path\n"); + PMD_INIT_LOG(INFO, "Using full-featured tx code path"); PMD_INIT_LOG(INFO, " - txq_flags = %lx " - "[IXGBE_SIMPLE_FLAGS=%lx]\n", + "[IXGBE_SIMPLE_FLAGS=%lx]", (long unsigned)txq->txq_flags, (long unsigned)IXGBE_SIMPLE_FLAGS); PMD_INIT_LOG(INFO, " - tx_rs_thresh = %lu " - "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]\n", + "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]", (long unsigned)txq->tx_rs_thresh, (long unsigned)RTE_PMD_IXGBE_TX_MAX_BURST); dev->tx_pkt_burst = ixgbe_xmit_pkts; @@ -2156,7 +2156,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, ixgbe_rx_queue_release(rxq); return (-ENOMEM); } - PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n", + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr); /* @@ -2170,13 +2170,13 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " "satisfied. Rx Burst Bulk Alloc function will be " - "used on port=%d, queue=%d.\n", + "used on port=%d, queue=%d.", rxq->port_id, rxq->queue_id); dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc; #ifdef RTE_IXGBE_INC_VECTOR if (!ixgbe_rx_vec_condition_check(dev)) { PMD_INIT_LOG(INFO, "Vector rx enabled, please make " - "sure RX burst size no less than 32.\n"); + "sure RX burst size no less than 32."); ixgbe_rxq_vec_setup(rxq, socket_id); dev->rx_pkt_burst = ixgbe_recv_pkts_vec; } @@ -2186,7 +2186,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions " "are not satisfied, Scattered Rx is requested, " "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC is not " - "enabled (port=%d, queue=%d).\n", + "enabled (port=%d, queue=%d).", rxq->port_id, rxq->queue_id); } dev->data->rx_queues[queue_idx] = rxq; @@ -2205,7 +2205,7 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) uint32_t desc = 0; if (rx_queue_id >= dev->data->nb_rx_queues) { - PMD_RX_LOG(ERR, "Invalid RX queue id=%d\n", rx_queue_id); + PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id); return 0; } @@ -2921,7 +2921,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, ixgbe_dcb_rx_hw_config(hw, dcb_config); break; default: - PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration\n"); + PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration"); break; } switch (dev->data->dev_conf.txmode.mq_mode) { @@ -2943,7 +2943,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, ixgbe_dcb_tx_hw_config(hw, dcb_config); break; default: - PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration\n"); + PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration"); break; } @@ -3214,7 +3214,7 @@ ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq) volatile union ixgbe_adv_rx_desc *rxd; struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool); if (mbuf == NULL) { - PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u\n", + PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u", (unsigned) rxq->queue_id); return (-ENOMEM); } @@ -3606,7 +3606,7 @@ ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw) if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) != IXGBE_SUCCESS) { - PMD_INIT_LOG(ERR, "Could not enable loopback mode\n"); + PMD_INIT_LOG(ERR, "Could not enable loopback mode"); /* ignore error */ return; } @@ -3701,7 +3701,7 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) /* Allocate buffers for descriptor rings */ if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) { - PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d\n", + PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d", rx_queue_id); return -1; } @@ -3716,7 +3716,7 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE)); if (!poll_ms) - PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d\n", + PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id); rte_wmb(); IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0); @@ -3755,7 +3755,7 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); } while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE)); if (!poll_ms) - PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d\n", + PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id); rte_delay_us(RTE_IXGBE_WAIT_100_US); @@ -3799,7 +3799,7 @@ ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!poll_ms) PMD_INIT_LOG(ERR, "Could not enable " - "Tx Queue %d\n", tx_queue_id); + "Tx Queue %d", tx_queue_id); } rte_wmb(); IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0); @@ -3840,7 +3840,7 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) } while (--poll_ms && (txtdh != txtdt)); if (!poll_ms) PMD_INIT_LOG(ERR, "Tx Queue %d is not empty " - "when stopping.\n", tx_queue_id); + "when stopping.", tx_queue_id); } txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); @@ -3857,7 +3857,7 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) } while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE)); if (!poll_ms) PMD_INIT_LOG(ERR, "Could not disable " - "Tx Queue %d\n", tx_queue_id); + "Tx Queue %d", tx_queue_id); } if (txq->ops != NULL) { @@ -4073,7 +4073,7 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!poll_ms) - PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d\n", i); + PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i); } for (i = 0; i < dev->data->nb_rx_queues; i++) { @@ -4090,7 +4090,7 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE)); if (!poll_ms) - PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d\n", i); + PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i); rte_wmb(); IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);