From patchwork Mon Sep 1 10:24:37 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: David Marchand X-Patchwork-Id: 287 Return-Path: Received: from mail-wg0-f45.google.com (mail-wg0-f45.google.com [74.125.82.45]) by dpdk.org (Postfix) with ESMTP id A9253B39A for ; Mon, 1 Sep 2014 12:20:46 +0200 (CEST) Received: by mail-wg0-f45.google.com with SMTP id k14so5064555wgh.28 for ; Mon, 01 Sep 2014 03:25:13 -0700 (PDT) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:from:to:subject:date:message-id:in-reply-to :references; bh=zm2MBZN/JFKHQH45sw41vaRxovOWJn8SI+3IZRzrPsU=; b=kB3WdLqdnux9wQY6Y1HWNUF2UsQAGE+MuBOg95W7nbDqtuII70aLFgqXQtuhDqAAZY 7PguX37++ZA9T4Pu6G5CCZ0WdGDRSGeBjdHHF9gPkhs4SNRGyget1qt/03VUuS5KI+M2 ZjHZqiCaofP1bIg3/dWrpWRI5xASuhFdxgEN5PgguPNgkjPBP0b9L4h/l8iun+0isSuA SQ7SezW2cKR0Poox2NJV5A4nwz2Ra89OewSG4GczNVjgg83aQOZgP6Br7j91XzOY1deq 6sbJtLIaeQhqIWpgJkAZfa5Dm6RWPIp2HfXopeEwtA0KLhxUwwpzH0bCwm7KSRM17J4y n5dw== X-Gm-Message-State: ALoCoQkAsBRN3wGQAo9WbFI3hj+EAw6RUzfD2OvCAsgAkUCpgMpGr+X0WnDFC5VgdbhM7/ICDhGN X-Received: by 10.194.203.8 with SMTP id km8mr31309888wjc.51.1409567113549; Mon, 01 Sep 2014 03:25:13 -0700 (PDT) Received: from alcyon.dev.6wind.com (guy78-3-82-239-227-177.fbx.proxad.net. [82.239.227.177]) by mx.google.com with ESMTPSA id lm18sm24287018wic.22.2014.09.01.03.25.11 for (version=TLSv1.2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Mon, 01 Sep 2014 03:25:12 -0700 (PDT) From: David Marchand To: dev@dpdk.org Date: Mon, 1 Sep 2014 12:24:37 +0200 Message-Id: <1409567080-27083-15-git-send-email-david.marchand@6wind.com> X-Mailer: git-send-email 1.7.10.4 In-Reply-To: <1409567080-27083-1-git-send-email-david.marchand@6wind.com> References: <1409567080-27083-1-git-send-email-david.marchand@6wind.com> Subject: [dpdk-dev] [PATCH v2 14/17] e1000: clean log messages X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 01 Sep 2014 10:20:46 -0000 Clean log messages: - remove leading \n in some messages, - remove trailing \n in some messages, - split multi lines messages, - replace some PMD_INIT_LOG(DEBUG, "some_func") with PMD_INIT_FUNC_TRACE(). Signed-off-by: David Marchand --- lib/librte_pmd_e1000/e1000_logs.h | 4 +- lib/librte_pmd_e1000/em_ethdev.c | 64 ++++++++++------------ lib/librte_pmd_e1000/em_rxtx.c | 109 ++++++++++++++++++------------------- lib/librte_pmd_e1000/igb_ethdev.c | 91 +++++++++++++++---------------- lib/librte_pmd_e1000/igb_pf.c | 4 +- lib/librte_pmd_e1000/igb_rxtx.c | 45 +++++++-------- 6 files changed, 153 insertions(+), 164 deletions(-) diff --git a/lib/librte_pmd_e1000/e1000_logs.h b/lib/librte_pmd_e1000/e1000_logs.h index fe6e023..4dd7208 100644 --- a/lib/librte_pmd_e1000/e1000_logs.h +++ b/lib/librte_pmd_e1000/e1000_logs.h @@ -37,8 +37,10 @@ #ifdef RTE_LIBRTE_E1000_DEBUG_INIT #define PMD_INIT_LOG(level, fmt, args...) \ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") #else -#define PMD_INIT_LOG(level, fmt, args...) do { } while(0) +#define PMD_INIT_LOG(level, fmt, args...) do { } while (0) +#define PMD_INIT_FUNC_TRACE() do { } while (0) #endif #ifdef RTE_LIBRTE_E1000_DEBUG_RX diff --git a/lib/librte_pmd_e1000/em_ethdev.c b/lib/librte_pmd_e1000/em_ethdev.c index 4555294..fd36b37 100644 --- a/lib/librte_pmd_e1000/em_ethdev.c +++ b/lib/librte_pmd_e1000/em_ethdev.c @@ -249,9 +249,9 @@ eth_em_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS || em_hw_init(hw) != 0) { PMD_INIT_LOG(ERR, "port_id %d vendorID=0x%x deviceID=0x%x: " - "failed to init HW", - eth_dev->data->port_id, pci_dev->id.vendor_id, - pci_dev->id.device_id); + "failed to init HW", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); return -(ENODEV); } @@ -260,8 +260,8 @@ eth_em_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, hw->mac.rar_entry_count, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to " - "store MAC addresses", - ETHER_ADDR_LEN * hw->mac.rar_entry_count); + "store MAC addresses", + ETHER_ADDR_LEN * hw->mac.rar_entry_count); return -(ENOMEM); } @@ -272,9 +272,9 @@ eth_em_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, /* initialize the vfta */ memset(shadow_vfta, 0, sizeof(*shadow_vfta)); - PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x\n", - eth_dev->data->port_id, pci_dev->id.vendor_id, - pci_dev->id.device_id); + PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); rte_intr_callback_register(&(pci_dev->intr_handle), eth_em_interrupt_handler, (void *)eth_dev); @@ -306,17 +306,17 @@ em_hw_init(struct e1000_hw *hw) diag = hw->mac.ops.init_params(hw); if (diag != 0) { - PMD_INIT_LOG(ERR, "MAC Initialization Error\n"); + PMD_INIT_LOG(ERR, "MAC Initialization Error"); return diag; } diag = hw->nvm.ops.init_params(hw); if (diag != 0) { - PMD_INIT_LOG(ERR, "NVM Initialization Error\n"); + PMD_INIT_LOG(ERR, "NVM Initialization Error"); return diag; } diag = hw->phy.ops.init_params(hw); if (diag != 0) { - PMD_INIT_LOG(ERR, "PHY Initialization Error\n"); + PMD_INIT_LOG(ERR, "PHY Initialization Error"); return diag; } (void) e1000_get_bus_info(hw); @@ -375,7 +375,7 @@ em_hw_init(struct e1000_hw *hw) diag = e1000_check_reset_block(hw); if (diag < 0) { PMD_INIT_LOG(ERR, "PHY reset is blocked due to " - "SOL/IDER session"); + "SOL/IDER session"); } return (0); @@ -390,11 +390,10 @@ eth_em_configure(struct rte_eth_dev *dev) struct e1000_interrupt *intr = E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - PMD_INIT_LOG(DEBUG, ">>"); - + PMD_INIT_FUNC_TRACE(); intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; + PMD_INIT_FUNC_TRACE(); - PMD_INIT_LOG(DEBUG, "<<"); return (0); } @@ -453,7 +452,7 @@ eth_em_start(struct rte_eth_dev *dev) E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); int ret, mask; - PMD_INIT_LOG(DEBUG, ">>"); + PMD_INIT_FUNC_TRACE(); eth_em_stop(dev); @@ -573,9 +572,9 @@ eth_em_start(struct rte_eth_dev *dev) return (0); error_invalid_config: - PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port " - "%u\n", dev->data->dev_conf.link_speed, - dev->data->dev_conf.link_duplex, dev->data->port_id); + PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u", + dev->data->dev_conf.link_speed, + dev->data->dev_conf.link_duplex, dev->data->port_id); em_dev_clear_queues(dev); return (-EINVAL); } @@ -1296,20 +1295,17 @@ eth_em_interrupt_action(struct rte_eth_dev *dev) memset(&link, 0, sizeof(link)); rte_em_dev_atomic_read_link_status(dev, &link); if (link.link_status) { - PMD_INIT_LOG(INFO, - " Port %d: Link Up - speed %u Mbps - %s\n", - dev->data->port_id, (unsigned)link.link_speed, - link.link_duplex == ETH_LINK_FULL_DUPLEX ? - "full-duplex" : "half-duplex"); + PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s", + dev->data->port_id, (unsigned)link.link_speed, + link.link_duplex == ETH_LINK_FULL_DUPLEX ? + "full-duplex" : "half-duplex"); } else { - PMD_INIT_LOG(INFO, " Port %d: Link Down\n", - dev->data->port_id); + PMD_INIT_LOG(INFO, " Port %d: Link Down", + dev->data->port_id); } PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d", - dev->pci_dev->addr.domain, - dev->pci_dev->addr.bus, - dev->pci_dev->addr.devid, - dev->pci_dev->addr.function); + dev->pci_dev->addr.domain, dev->pci_dev->addr.bus, + dev->pci_dev->addr.devid, dev->pci_dev->addr.function); tctl = E1000_READ_REG(hw, E1000_TCTL); rctl = E1000_READ_REG(hw, E1000_RCTL); if (link.link_status) { @@ -1429,14 +1425,14 @@ eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) if (fc_conf->autoneg != hw->mac.autoneg) return -ENOTSUP; rx_buf_size = em_get_rx_buffer_size(hw); - PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size); + PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); /* At least reserve one Ethernet frame for watermark */ max_high_water = rx_buf_size - ETHER_MAX_LEN; if ((fc_conf->high_water > max_high_water) || (fc_conf->high_water < fc_conf->low_water)) { - PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value \n"); - PMD_INIT_LOG(ERR, "high water must <= 0x%x \n", max_high_water); + PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value"); + PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water); return (-EINVAL); } @@ -1466,7 +1462,7 @@ eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) return 0; } - PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x \n", err); + PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err); return (-EIO); } diff --git a/lib/librte_pmd_e1000/em_rxtx.c b/lib/librte_pmd_e1000/em_rxtx.c index 01efa50..83ecb33 100644 --- a/lib/librte_pmd_e1000/em_rxtx.c +++ b/lib/librte_pmd_e1000/em_rxtx.c @@ -317,10 +317,8 @@ em_xmit_cleanup(struct em_tx_queue *txq) desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; if (! (txr[desc_to_clean_to].upper.fields.status & E1000_TXD_STAT_DD)) { - PMD_TX_FREE_LOG(DEBUG, - "TX descriptor %4u is not done" - "(port=%d queue=%d)", - desc_to_clean_to, + PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done" + "(port=%d queue=%d)", desc_to_clean_to, txq->port_id, txq->queue_id); /* Failed to clean any descriptors, better luck next time */ return -(1); @@ -334,11 +332,10 @@ em_xmit_cleanup(struct em_tx_queue *txq) nb_tx_to_clean = (uint16_t)(desc_to_clean_to - last_desc_cleaned); - PMD_TX_FREE_LOG(DEBUG, - "Cleaning %4u TX descriptors: %4u to %4u " - "(port=%d queue=%d)", - nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to, - txq->port_id, txq->queue_id); + PMD_TX_FREE_LOG(DEBUG, "Cleaning %4u TX descriptors: %4u to %4u " + "(port=%d queue=%d)", nb_tx_to_clean, + last_desc_cleaned, desc_to_clean_to, txq->port_id, + txq->queue_id); /* * The last descriptor to clean is done, so that means all the @@ -451,12 +448,12 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_last = (uint16_t) (tx_last - txq->nb_tx_desc); PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u" - " tx_first=%u tx_last=%u\n", - (unsigned) txq->port_id, - (unsigned) txq->queue_id, - (unsigned) tx_pkt->pkt.pkt_len, - (unsigned) tx_id, - (unsigned) tx_last); + " tx_first=%u tx_last=%u", + (unsigned) txq->port_id, + (unsigned) txq->queue_id, + (unsigned) tx_pkt->pkt.pkt_len, + (unsigned) tx_id, + (unsigned) tx_last); /* * Make sure there are enough TX descriptors available to @@ -464,8 +461,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * nb_used better be less than or equal to txq->tx_rs_thresh */ while (unlikely (nb_used > txq->nb_tx_free)) { - PMD_TX_FREE_LOG(DEBUG, - "Not enough free TX descriptors " + PMD_TX_FREE_LOG(DEBUG, "Not enough free TX descriptors " "nb_used=%4u nb_free=%4u " "(port=%d queue=%d)", nb_used, txq->nb_tx_free, @@ -588,9 +584,8 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* Set RS bit only on threshold packets' last descriptor */ if (txq->nb_tx_used >= txq->tx_rs_thresh) { - PMD_TX_FREE_LOG(DEBUG, - "Setting RS bit on TXD id=" - "%4u (port=%d queue=%d)", + PMD_TX_FREE_LOG(DEBUG, "Setting RS bit on TXD id=%4u " + "(port=%d queue=%d)", tx_last, txq->port_id, txq->queue_id); cmd_type_len |= E1000_TXD_CMD_RS; @@ -607,8 +602,8 @@ end_of_tx: * Set the Transmit Descriptor Tail (TDT) */ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", - (unsigned) txq->port_id, (unsigned) txq->queue_id, - (unsigned) tx_id, (unsigned) nb_tx); + (unsigned) txq->port_id, (unsigned) txq->queue_id, + (unsigned) tx_id, (unsigned) nb_tx); E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id); txq->tx_tail = tx_id; @@ -712,19 +707,19 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * to happen by sending specific "back-pressure" flow control * frames to its peer(s). */ - PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u " - "status=0x%x pkt_len=%u\n", - (unsigned) rxq->port_id, (unsigned) rxq->queue_id, - (unsigned) rx_id, (unsigned) status, - (unsigned) rte_le_to_cpu_16(rxd.length)); + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " + "status=0x%x pkt_len=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) status, + (unsigned) rte_le_to_cpu_16(rxd.length)); nmb = rte_rxmbuf_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " - "queue_id=%u\n", - (unsigned) rxq->port_id, - (unsigned) rxq->queue_id); - rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; + "queue_id=%u", + (unsigned) rxq->port_id, + (unsigned) rxq->queue_id); + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; break; } @@ -806,10 +801,10 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); if (nb_hold > rxq->rx_free_thresh) { PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " - "nb_hold=%u nb_rx=%u\n", - (unsigned) rxq->port_id, (unsigned) rxq->queue_id, - (unsigned) rx_id, (unsigned) nb_hold, - (unsigned) nb_rx); + "nb_hold=%u nb_rx=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) nb_hold, + (unsigned) nb_rx); rx_id = (uint16_t) ((rx_id == 0) ? (rxq->nb_rx_desc - 1) : (rx_id - 1)); E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); @@ -892,17 +887,17 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * to happen by sending specific "back-pressure" flow control * frames to its peer(s). */ - PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u " - "status=0x%x data_len=%u\n", - (unsigned) rxq->port_id, (unsigned) rxq->queue_id, - (unsigned) rx_id, (unsigned) status, - (unsigned) rte_le_to_cpu_16(rxd.length)); + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " + "status=0x%x data_len=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) status, + (unsigned) rte_le_to_cpu_16(rxd.length)); nmb = rte_rxmbuf_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " - "queue_id=%u\n", (unsigned) rxq->port_id, - (unsigned) rxq->queue_id); + "queue_id=%u", (unsigned) rxq->port_id, + (unsigned) rxq->queue_id); rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; break; } @@ -1050,10 +1045,10 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); if (nb_hold > rxq->rx_free_thresh) { PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " - "nb_hold=%u nb_rx=%u\n", - (unsigned) rxq->port_id, (unsigned) rxq->queue_id, - (unsigned) rx_id, (unsigned) nb_hold, - (unsigned) nb_rx); + "nb_hold=%u nb_rx=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) nb_hold, + (unsigned) nb_rx); rx_id = (uint16_t) ((rx_id == 0) ? (rxq->nb_rx_desc - 1) : (rx_id - 1)); E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); @@ -1213,7 +1208,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev, if (tx_free_thresh >= (nb_desc - 3)) { PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the " "number of TX descriptors minus 3. " - "(tx_free_thresh=%u port=%d queue=%d)\n", + "(tx_free_thresh=%u port=%d queue=%d)", (unsigned int)tx_free_thresh, (int)dev->data->port_id, (int)queue_idx); return -(EINVAL); @@ -1221,7 +1216,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev, if (tx_rs_thresh > tx_free_thresh) { PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to " "tx_free_thresh. (tx_free_thresh=%u " - "tx_rs_thresh=%u port=%d queue=%d)\n", + "tx_rs_thresh=%u port=%d queue=%d)", (unsigned int)tx_free_thresh, (unsigned int)tx_rs_thresh, (int)dev->data->port_id, @@ -1238,7 +1233,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev, if (tx_conf->tx_thresh.wthresh != 0 && tx_rs_thresh != 1) { PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if " "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u " - "port=%d queue=%d)\n", (unsigned int)tx_rs_thresh, + "port=%d queue=%d)", (unsigned int)tx_rs_thresh, (int)dev->data->port_id, (int)queue_idx); return -(EINVAL); } @@ -1289,8 +1284,8 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev, #endif txq->tx_ring = (struct e1000_data_desc *) tz->addr; - PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n", - txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, + txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); em_reset_tx_queue(txq); @@ -1370,7 +1365,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, */ if (rx_conf->rx_drop_en) { PMD_INIT_LOG(ERR, "drop_en functionality not supported by " - "device\n"); + "device"); return (-EINVAL); } @@ -1419,8 +1414,8 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, #endif rxq->rx_ring = (struct e1000_rx_desc *) rz->addr; - PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n", - rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr); + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, + rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr); dev->data->rx_queues[queue_idx] = rxq; em_reset_rx_queue(rxq); @@ -1437,7 +1432,7 @@ eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) uint32_t desc = 0; if (rx_queue_id >= dev->data->nb_rx_queues) { - PMD_RX_LOG(DEBUG,"Invalid RX queue_id=%d\n", rx_queue_id); + PMD_RX_LOG(DEBUG, "Invalid RX queue_id=%d", rx_queue_id); return 0; } @@ -1582,8 +1577,8 @@ em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq) struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool); if (mbuf == NULL) { - PMD_INIT_LOG(ERR, "RX mbuf alloc failed " - "queue_id=%hu\n", rxq->queue_id); + PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%hu", + rxq->queue_id); return (-ENOMEM); } diff --git a/lib/librte_pmd_e1000/igb_ethdev.c b/lib/librte_pmd_e1000/igb_ethdev.c index b45eb24..4dbf059 100644 --- a/lib/librte_pmd_e1000/igb_ethdev.c +++ b/lib/librte_pmd_e1000/igb_ethdev.c @@ -528,8 +528,8 @@ eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to " - "store MAC addresses", - ETHER_ADDR_LEN * hw->mac.rar_entry_count); + "store MAC addresses", + ETHER_ADDR_LEN * hw->mac.rar_entry_count); error = -ENOMEM; goto err_late; } @@ -553,7 +553,7 @@ eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, /* Indicate SOL/IDER usage */ if (e1000_check_reset_block(hw) < 0) { PMD_INIT_LOG(ERR, "PHY reset is blocked due to" - "SOL/IDER session"); + "SOL/IDER session"); } /* initialize PF if max_vfs not zero */ @@ -565,7 +565,7 @@ eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); E1000_WRITE_FLUSH(hw); - PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x\n", + PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x", eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id); @@ -598,7 +598,7 @@ eth_igbvf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); int diag; - PMD_INIT_LOG(DEBUG, "eth_igbvf_dev_init"); + PMD_INIT_FUNC_TRACE(); eth_dev->dev_ops = &igbvf_eth_dev_ops; eth_dev->rx_pkt_burst = ð_igb_recv_pkts; @@ -623,7 +623,7 @@ eth_igbvf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, diag = e1000_setup_init_funcs(hw, TRUE); if (diag != 0) { PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d", - diag); + diag); return -EIO; } @@ -639,10 +639,9 @@ eth_igbvf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0); if (eth_dev->data->mac_addrs == NULL) { - PMD_INIT_LOG(ERR, - "Failed to allocate %d bytes needed to store MAC " - "addresses", - ETHER_ADDR_LEN * hw->mac.rar_entry_count); + PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to " + "store MAC addresses", + ETHER_ADDR_LEN * hw->mac.rar_entry_count); return -ENOMEM; } @@ -650,11 +649,9 @@ eth_igbvf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, ether_addr_copy((struct ether_addr *) hw->mac.perm_addr, ð_dev->data->mac_addrs[0]); - PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x " - "mac.type=%s\n", - eth_dev->data->port_id, pci_dev->id.vendor_id, - pci_dev->id.device_id, - "igb_mac_82576_vf"); + PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id, "igb_mac_82576_vf"); return 0; } @@ -720,11 +717,9 @@ eth_igb_configure(struct rte_eth_dev *dev) struct e1000_interrupt *intr = E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - PMD_INIT_LOG(DEBUG, ">>"); - + PMD_INIT_FUNC_TRACE(); intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; - - PMD_INIT_LOG(DEBUG, "<<"); + PMD_INIT_FUNC_TRACE(); return (0); } @@ -737,7 +732,7 @@ eth_igb_start(struct rte_eth_dev *dev) int ret, i, mask; uint32_t ctrl_ext; - PMD_INIT_LOG(DEBUG, ">>"); + PMD_INIT_FUNC_TRACE(); /* Power up the phy. Needed to make the link go Up */ e1000_power_up_phy(hw); @@ -888,9 +883,9 @@ eth_igb_start(struct rte_eth_dev *dev) return (0); error_invalid_config: - PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u\n", - dev->data->dev_conf.link_speed, - dev->data->dev_conf.link_duplex, dev->data->port_id); + PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u", + dev->data->dev_conf.link_speed, + dev->data->dev_conf.link_duplex, dev->data->port_id); igb_dev_clear_queues(dev); return (-EINVAL); } @@ -1789,20 +1784,20 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev) memset(&link, 0, sizeof(link)); rte_igb_dev_atomic_read_link_status(dev, &link); if (link.link_status) { - PMD_INIT_LOG(INFO, - " Port %d: Link Up - speed %u Mbps - %s\n", - dev->data->port_id, (unsigned)link.link_speed, - link.link_duplex == ETH_LINK_FULL_DUPLEX ? - "full-duplex" : "half-duplex"); + PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps " + "- %s", dev->data->port_id, + (unsigned)link.link_speed, + link.link_duplex == ETH_LINK_FULL_DUPLEX ? + "full-duplex" : "half-duplex"); } else { - PMD_INIT_LOG(INFO, " Port %d: Link Down\n", - dev->data->port_id); + PMD_INIT_LOG(INFO, " Port %d: Link Down", + dev->data->port_id); } PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d", - dev->pci_dev->addr.domain, - dev->pci_dev->addr.bus, - dev->pci_dev->addr.devid, - dev->pci_dev->addr.function); + dev->pci_dev->addr.domain, + dev->pci_dev->addr.bus, + dev->pci_dev->addr.devid, + dev->pci_dev->addr.function); tctl = E1000_READ_REG(hw, E1000_TCTL); rctl = E1000_READ_REG(hw, E1000_RCTL); if (link.link_status) { @@ -1923,14 +1918,14 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) if (fc_conf->autoneg != hw->mac.autoneg) return -ENOTSUP; rx_buf_size = igb_get_rx_buffer_size(hw); - PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size); + PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); /* At least reserve one Ethernet frame for watermark */ max_high_water = rx_buf_size - ETHER_MAX_LEN; if ((fc_conf->high_water > max_high_water) || (fc_conf->high_water < fc_conf->low_water)) { - PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value \n"); - PMD_INIT_LOG(ERR, "high water must <= 0x%x \n", max_high_water); + PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value"); + PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water); return (-EINVAL); } @@ -1960,7 +1955,7 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) return 0; } - PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x \n", err); + PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err); return (-EIO); } @@ -1995,7 +1990,7 @@ eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index) static void igbvf_intr_disable(struct e1000_hw *hw) { - PMD_INIT_LOG(DEBUG, "igbvf_intr_disable"); + PMD_INIT_FUNC_TRACE(); /* Clear interrupt mask to stop from interrupts being generated */ E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF); @@ -2077,8 +2072,8 @@ igbvf_dev_configure(struct rte_eth_dev *dev) { struct rte_eth_conf* conf = &dev->data->dev_conf; - PMD_INIT_LOG(DEBUG, "\nConfigured Virtual Function port id: %d\n", - dev->data->port_id); + PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", + dev->data->port_id); /* * VF has no ability to enable/disable HW CRC @@ -2086,12 +2081,12 @@ igbvf_dev_configure(struct rte_eth_dev *dev) */ #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC if (!conf->rxmode.hw_strip_crc) { - PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n"); + PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip"); conf->rxmode.hw_strip_crc = 1; } #else if (conf->rxmode.hw_strip_crc) { - PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip\n"); + PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip"); conf->rxmode.hw_strip_crc = 0; } #endif @@ -2106,7 +2101,7 @@ igbvf_dev_start(struct rte_eth_dev *dev) E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); int ret; - PMD_INIT_LOG(DEBUG, "igbvf_dev_start"); + PMD_INIT_FUNC_TRACE(); hw->mac.ops.reset_hw(hw); @@ -2129,7 +2124,7 @@ igbvf_dev_start(struct rte_eth_dev *dev) static void igbvf_dev_stop(struct rte_eth_dev *dev) { - PMD_INIT_LOG(DEBUG, "igbvf_dev_stop"); + PMD_INIT_FUNC_TRACE(); igbvf_stop_adapter(dev); @@ -2147,7 +2142,7 @@ igbvf_dev_close(struct rte_eth_dev *dev) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - PMD_INIT_LOG(DEBUG, "igbvf_dev_close"); + PMD_INIT_FUNC_TRACE(); e1000_reset_hw(hw); @@ -2203,7 +2198,7 @@ igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) uint32_t vid_bit = 0; int ret = 0; - PMD_INIT_LOG(DEBUG, "igbvf_vlan_filter_set"); + PMD_INIT_FUNC_TRACE(); /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/ ret = igbvf_set_vfta(hw, vlan_id, !!on); @@ -2432,7 +2427,7 @@ eth_igb_add_ethertype_filter(struct rte_eth_dev *dev, uint16_t index, if (filter->priority_en) { PMD_INIT_LOG(ERR, "vlan and priority (%d) is not supported" - " in E1000.", filter->priority); + " in E1000.", filter->priority); return -EINVAL; } diff --git a/lib/librte_pmd_e1000/igb_pf.c b/lib/librte_pmd_e1000/igb_pf.c index 76033ad..bc3816a 100644 --- a/lib/librte_pmd_e1000/igb_pf.c +++ b/lib/librte_pmd_e1000/igb_pf.c @@ -404,7 +404,7 @@ igb_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf) retval = e1000_read_mbx(hw, msgbuf, mbx_size, vf); if (retval) { - PMD_INIT_LOG(ERR, "Error mbx recv msg from VF %d\n", vf); + PMD_INIT_LOG(ERR, "Error mbx recv msg from VF %d", vf); return retval; } @@ -432,7 +432,7 @@ igb_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf) retval = igb_vf_set_vlan(dev, vf, msgbuf); break; default: - PMD_INIT_LOG(DEBUG, "Unhandled Msg %8.8x\n", + PMD_INIT_LOG(DEBUG, "Unhandled Msg %8.8x", (unsigned) msgbuf[0]); retval = E1000_ERR_MBX; break; diff --git a/lib/librte_pmd_e1000/igb_rxtx.c b/lib/librte_pmd_e1000/igb_rxtx.c index 3aa9609..5ca06c9 100644 --- a/lib/librte_pmd_e1000/igb_rxtx.c +++ b/lib/librte_pmd_e1000/igb_rxtx.c @@ -396,7 +396,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_last = (uint16_t) (tx_last - txq->nb_tx_desc); PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u" - " tx_first=%u tx_last=%u\n", + " tx_first=%u tx_last=%u", (unsigned) txq->port_id, (unsigned) txq->queue_id, (unsigned) pkt_len, @@ -548,7 +548,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, txd->read.cmd_type_len |= rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS); } - end_of_tx: +end_of_tx: rte_wmb(); /* @@ -697,8 +697,8 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * to happen by sending specific "back-pressure" flow control * frames to its peer(s). */ - PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u " - "staterr=0x%x pkt_len=%u\n", + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " + "staterr=0x%x pkt_len=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id, (unsigned) rx_id, (unsigned) staterr, (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); @@ -706,7 +706,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nmb = rte_rxmbuf_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " - "queue_id=%u\n", (unsigned) rxq->port_id, + "queue_id=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id); rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; break; @@ -794,7 +794,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); if (nb_hold > rxq->rx_free_thresh) { PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " - "nb_hold=%u nb_rx=%u\n", + "nb_hold=%u nb_rx=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id, (unsigned) rx_id, (unsigned) nb_hold, (unsigned) nb_rx); @@ -881,8 +881,8 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * to happen by sending specific "back-pressure" flow control * frames to its peer(s). */ - PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u " - "staterr=0x%x data_len=%u\n", + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " + "staterr=0x%x data_len=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id, (unsigned) rx_id, (unsigned) staterr, (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); @@ -890,7 +890,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nmb = rte_rxmbuf_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " - "queue_id=%u\n", (unsigned) rxq->port_id, + "queue_id=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id); rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; break; @@ -1049,7 +1049,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); if (nb_hold > rxq->rx_free_thresh) { PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " - "nb_hold=%u nb_rx=%u\n", + "nb_hold=%u nb_rx=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id, (unsigned) rx_id, (unsigned) nb_hold, (unsigned) nb_rx); @@ -1211,14 +1211,14 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, */ if (tx_conf->tx_free_thresh != 0) PMD_INIT_LOG(WARNING, "The tx_free_thresh parameter is not " - "used for the 1G driver.\n"); + "used for the 1G driver."); if (tx_conf->tx_rs_thresh != 0) PMD_INIT_LOG(WARNING, "The tx_rs_thresh parameter is not " - "used for the 1G driver.\n"); + "used for the 1G driver."); if (tx_conf->tx_thresh.wthresh == 0) PMD_INIT_LOG(WARNING, "To improve 1G driver performance, " "consider setting the TX WTHRESH value to 4, 8, " - "or 16.\n"); + "or 16."); /* Free memory prior to re-allocation if needed */ if (dev->data->tx_queues[queue_idx] != NULL) { @@ -1271,7 +1271,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, igb_tx_queue_release(txq); return (-ENOMEM); } - PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n", + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"", txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); igb_reset_tx_queue(txq, dev); @@ -1409,7 +1409,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, igb_rx_queue_release(rxq); return (-ENOMEM); } - PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n", + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"", rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr); dev->data->rx_queues[queue_idx] = rxq; @@ -1427,7 +1427,7 @@ eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) uint32_t desc = 0; if (rx_queue_id >= dev->data->nb_rx_queues) { - PMD_RX_LOG(ERR, "Invalid RX queue id=%d\n", rx_queue_id); + PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id); return 0; } @@ -1726,7 +1726,7 @@ igb_is_vmdq_supported(const struct rte_eth_dev *dev) case e1000_i210: case e1000_i211: default: - PMD_INIT_LOG(ERR, "Cannot support VMDq feature\n"); + PMD_INIT_LOG(ERR, "Cannot support VMDq feature"); return 0; } } @@ -1739,7 +1739,8 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev) uint32_t mrqc, vt_ctl, vmolr, rctl; int i; - PMD_INIT_LOG(DEBUG, ">>"); + PMD_INIT_FUNC_TRACE(); + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf; @@ -1827,8 +1828,8 @@ igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq) struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool); if (mbuf == NULL) { - PMD_INIT_LOG(ERR, "RX mbuf alloc failed " - "queue_id=%hu\n", rxq->queue_id); + PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%hu", + rxq->queue_id); return (-ENOMEM); } dma_addr = @@ -2273,7 +2274,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev) * to avoid Write-Back not triggered sometimes */ rxdctl |= 0x10000; - PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !\n"); + PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !"); } else rxdctl |= ((rxq->wthresh & 0x1F) << 16); @@ -2341,7 +2342,7 @@ eth_igbvf_tx_init(struct rte_eth_dev *dev) * to avoid Write-Back not triggered sometimes */ txdctl |= 0x10000; - PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !\n"); + PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !"); } else txdctl |= ((txq->wthresh & 0x1F) << 16);