List comments

GET /api/patches/276/comments/
HTTP 200 OK
Allow: GET, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

[
    {
        "id": 624,
        "web_url": "http://patches.dpdk.org/comment/624/",
        "msgid": "<CADNuJVq7F-KDCFAyes_Evoz=vEXzWq=raF+749U+i4usz1H8NA@mail.gmail.com>",
        "date": "2014-09-02T15:19:57",
        "subject": "Re: [dpdk-dev] [PATCH v2 03/17] ixgbe: clean log messages",
        "submitter": {
            "id": 61,
            "url": "http://patches.dpdk.org/api/people/61/",
            "name": "Jay Rolette",
            "email": "rolette@infiniteio.com"
        },
        "content": "Looks good\n\n\nOn Mon, Sep 1, 2014 at 5:24 AM, David Marchand <david.marchand@6wind.com>\nwrote:\n\n> Clean log messages:\n> - remove leading \\n in some messages,\n> - remove trailing \\n in some messages,\n> - split multi lines messages,\n> - replace some PMD_INIT_LOG(DEBUG, \"some_func\") with PMD_INIT_FUNC_TRACE().\n>\n> Signed-off-by: David Marchand <david.marchand@6wind.com>\n> ---\n>  lib/librte_pmd_ixgbe/ixgbe_ethdev.c |  150\n> +++++++++++++++++------------------\n>  lib/librte_pmd_ixgbe/ixgbe_fdir.c   |    6 +-\n>  lib/librte_pmd_ixgbe/ixgbe_rxtx.c   |   93 +++++++++++-----------\n>  3 files changed, 124 insertions(+), 125 deletions(-)\n>\n> diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c\n> b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c\n> index a8a7ed6..1419494 100644\n> --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c\n> +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c\n> @@ -547,12 +547,12 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev\n> *eth_dev,\n>         if ((hw->mac.type != ixgbe_mac_82599EB) && (hw->mac.type !=\n> ixgbe_mac_X540))\n>                 return -ENOSYS;\n>\n> -       PMD_INIT_LOG(INFO, \"Setting port %d, %s queue_id %d to stat index\n> %d\\n\",\n> +       PMD_INIT_LOG(INFO, \"Setting port %d, %s queue_id %d to stat index\n> %d\",\n>                      (int)(eth_dev->data->port_id), is_rx ? \"RX\" : \"TX\",\n> queue_id, stat_idx);\n>\n>         n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);\n>         if (n >= IXGBE_NB_STAT_MAPPING_REGS) {\n> -               PMD_INIT_LOG(ERR, \"Nb of stat mapping registers\n> exceeded\\n\");\n> +               PMD_INIT_LOG(ERR, \"Nb of stat mapping registers exceeded\");\n>                 return -EIO;\n>         }\n>         offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);\n> @@ -572,19 +572,20 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev\n> *eth_dev,\n>         else\n>                 stat_mappings->rqsmr[n] |= qsmr_mask;\n>\n> -       PMD_INIT_LOG(INFO, \"Set port %d, %s queue_id %d to stat index %d\\n\"\n> -                    \"%s[%d] = 0x%08x\\n\",\n> -                    (int)(eth_dev->data->port_id), is_rx ? \"RX\" : \"TX\",\n> queue_id, stat_idx,\n> -                    is_rx ? \"RQSMR\" : \"TQSM\",n, is_rx ?\n> stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);\n> +       PMD_INIT_LOG(INFO, \"Set port %d, %s queue_id %d to stat index %d\",\n> +                    (int)(eth_dev->data->port_id), is_rx ? \"RX\" : \"TX\",\n> +                    queue_id, stat_idx);\n> +       PMD_INIT_LOG(INFO, \"%s[%d] = 0x%08x\", is_rx ? \"RQSMR\" : \"TQSM\", n,\n> +                    is_rx ? stat_mappings->rqsmr[n] :\n> stat_mappings->tqsm[n]);\n>\n>         /* Now write the mapping in the appropriate register */\n>         if (is_rx) {\n> -               PMD_INIT_LOG(INFO, \"Write 0x%x to RX IXGBE stat mapping\n> reg:%d\\n\",\n> +               PMD_INIT_LOG(INFO, \"Write 0x%x to RX IXGBE stat mapping\n> reg:%d\",\n>                              stat_mappings->rqsmr[n], n);\n>                 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n),\n> stat_mappings->rqsmr[n]);\n>         }\n>         else {\n> -               PMD_INIT_LOG(INFO, \"Write 0x%x to TX IXGBE stat mapping\n> reg:%d\\n\",\n> +               PMD_INIT_LOG(INFO, \"Write 0x%x to TX IXGBE stat mapping\n> reg:%d\",\n>                              stat_mappings->tqsm[n], n);\n>                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);\n>         }\n> @@ -790,12 +791,13 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct\n> eth_driver *eth_drv,\n>\n>         if (diag == IXGBE_ERR_EEPROM_VERSION) {\n>                 PMD_INIT_LOG(ERR, \"This device is a pre-production\n> adapter/\"\n> -                   \"LOM.  Please be aware there may be issues associated \"\n> -                   \"with your hardware.\\n If you are experiencing\n> problems \"\n> +                   \"LOM. Please be aware there may be issues associated \"\n> +                   \"with your hardware.\");\n> +               PMD_INIT_LOG(ERR, \"If you are experiencing problems \"\n>                     \"please contact your Intel or hardware representative \"\n> -                   \"who provided you with this hardware.\\n\");\n> +                   \"who provided you with this hardware.\");\n>         } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)\n> -               PMD_INIT_LOG(ERR, \"Unsupported SFP+ Module\\n\");\n> +               PMD_INIT_LOG(ERR, \"Unsupported SFP+ Module\");\n>         if (diag) {\n>                 PMD_INIT_LOG(ERR, \"Hardware Initialization Failure: %d\",\n> diag);\n>                 return -EIO;\n> @@ -811,10 +813,9 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct\n> eth_driver *eth_drv,\n>         eth_dev->data->mac_addrs = rte_zmalloc(\"ixgbe\", ETHER_ADDR_LEN *\n>                         hw->mac.num_rar_entries, 0);\n>         if (eth_dev->data->mac_addrs == NULL) {\n> -               PMD_INIT_LOG(ERR,\n> -                       \"Failed to allocate %u bytes needed to store \"\n> -                       \"MAC addresses\",\n> -                       ETHER_ADDR_LEN * hw->mac.num_rar_entries);\n> +               PMD_INIT_LOG(ERR, \"Failed to allocate %u bytes needed to\n> store \"\n> +                            \"MAC addresses\",\n> +                            ETHER_ADDR_LEN * hw->mac.num_rar_entries);\n>                 return -ENOMEM;\n>         }\n>         /* Copy the permanent MAC address */\n> @@ -825,9 +826,9 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct\n> eth_driver *eth_drv,\n>         eth_dev->data->hash_mac_addrs = rte_zmalloc(\"ixgbe\",\n> ETHER_ADDR_LEN *\n>                         IXGBE_VMDQ_NUM_UC_MAC, 0);\n>         if (eth_dev->data->hash_mac_addrs == NULL) {\n> -               PMD_INIT_LOG(ERR,\n> -                       \"Failed to allocate %d bytes needed to store MAC\n> addresses\",\n> -                       ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);\n> +               PMD_INIT_LOG(ERR, \"Failed to allocate %d bytes needed to\n> store \"\n> +                            \"MAC addresses\",\n> +                            ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);\n>                 return -ENOMEM;\n>         }\n>\n> @@ -849,12 +850,11 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct\n> eth_driver *eth_drv,\n>         IXGBE_WRITE_FLUSH(hw);\n>\n>         if (ixgbe_is_sfp(hw) && hw->phy.sfp_type !=\n> ixgbe_sfp_type_not_present)\n> -               PMD_INIT_LOG(DEBUG,\n> -                            \"MAC: %d, PHY: %d, SFP+: %d<n\",\n> +               PMD_INIT_LOG(DEBUG, \"MAC: %d, PHY: %d, SFP+: %d\",\n>                              (int) hw->mac.type, (int) hw->phy.type,\n>                              (int) hw->phy.sfp_type);\n>         else\n> -               PMD_INIT_LOG(DEBUG, \"MAC: %d, PHY: %d\\n\",\n> +               PMD_INIT_LOG(DEBUG, \"MAC: %d, PHY: %d\",\n>                              (int) hw->mac.type, (int) hw->phy.type);\n>\n>         PMD_INIT_LOG(DEBUG, \"port %d vendorID=0x%x deviceID=0x%x\",\n> @@ -933,7 +933,7 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct\n> eth_driver *eth_drv,\n>\n> IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);\n>         struct ether_addr *perm_addr = (struct ether_addr *)\n> hw->mac.perm_addr;\n>\n> -       PMD_INIT_LOG(DEBUG, \"eth_ixgbevf_dev_init\");\n> +       PMD_INIT_FUNC_TRACE();\n>\n>         eth_dev->dev_ops = &ixgbevf_eth_dev_ops;\n>         eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;\n> @@ -963,7 +963,8 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct\n> eth_driver *eth_drv,\n>         /* Initialize the shared code (base driver) */\n>         diag = ixgbe_init_shared_code(hw);\n>         if (diag != IXGBE_SUCCESS) {\n> -               PMD_INIT_LOG(ERR, \"Shared code init failed for ixgbevf:\n> %d\", diag);\n> +               PMD_INIT_LOG(ERR, \"Shared code init failed for ixgbevf:\n> %d\",\n> +                            diag);\n>                 return -EIO;\n>         }\n>\n> @@ -996,10 +997,9 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct\n> eth_driver *eth_drv,\n>         eth_dev->data->mac_addrs = rte_zmalloc(\"ixgbevf\", ETHER_ADDR_LEN *\n>                         hw->mac.num_rar_entries, 0);\n>         if (eth_dev->data->mac_addrs == NULL) {\n> -               PMD_INIT_LOG(ERR,\n> -                       \"Failed to allocate %u bytes needed to store \"\n> -                       \"MAC addresses\",\n> -                       ETHER_ADDR_LEN * hw->mac.num_rar_entries);\n> +               PMD_INIT_LOG(ERR, \"Failed to allocate %u bytes needed to\n> store \"\n> +                            \"MAC addresses\",\n> +                            ETHER_ADDR_LEN * hw->mac.num_rar_entries);\n>                 return -ENOMEM;\n>         }\n>\n> @@ -1033,13 +1033,14 @@ eth_ixgbevf_dev_init(__attribute__((unused))\n> struct eth_driver *eth_drv,\n>                         break;\n>\n>                 default:\n> -                       PMD_INIT_LOG(ERR, \"VF Initialization Failure: %d\",\n> diag);\n> +                       PMD_INIT_LOG(ERR, \"VF Initialization Failure: %d\",\n> +                                    diag);\n>                         return (-EIO);\n>         }\n>\n> -       PMD_INIT_LOG(DEBUG, \"\\nport %d vendorID=0x%x deviceID=0x%x\n> mac.type=%s\\n\",\n> -                        eth_dev->data->port_id, pci_dev->id.vendor_id,\n> pci_dev->id.device_id,\n> -                        \"ixgbe_mac_82599_vf\");\n> +       PMD_INIT_LOG(DEBUG, \"port %d vendorID=0x%x deviceID=0x%x\n> mac.type=%s\",\n> +                    eth_dev->data->port_id, pci_dev->id.vendor_id,\n> +                    pci_dev->id.device_id, \"ixgbe_mac_82599_vf\");\n>\n>         return 0;\n>  }\n> @@ -1416,8 +1417,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)\n>\n>         /* IXGBE devices don't support half duplex */\n>         if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&\n> -                       (dev->data->dev_conf.link_duplex !=\n> ETH_LINK_FULL_DUPLEX)) {\n> -               PMD_INIT_LOG(ERR, \"Invalid link_duplex (%hu) for port\n> %hhu\\n\",\n> +           (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {\n> +               PMD_INIT_LOG(ERR, \"Invalid link_duplex (%hu) for port\n> %hhu\",\n>                                 dev->data->dev_conf.link_duplex,\n>                                 dev->data->port_id);\n>                 return -EINVAL;\n> @@ -1443,7 +1444,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)\n>         /* This can fail when allocating mbufs for descriptor rings */\n>         err = ixgbe_dev_rx_init(dev);\n>         if (err) {\n> -               PMD_INIT_LOG(ERR, \"Unable to initialize RX hardware\\n\");\n> +               PMD_INIT_LOG(ERR, \"Unable to initialize RX hardware\");\n>                 goto error;\n>         }\n>\n> @@ -1490,9 +1491,9 @@ ixgbe_dev_start(struct rte_eth_dev *dev)\n>                 speed = IXGBE_LINK_SPEED_10GB_FULL;\n>                 break;\n>         default:\n> -               PMD_INIT_LOG(ERR, \"Invalid link_speed (%hu) for port\n> %hhu\\n\",\n> -                               dev->data->dev_conf.link_speed,\n> -                               dev->data->port_id);\n> +               PMD_INIT_LOG(ERR, \"Invalid link_speed (%hu) for port %hhu\",\n> +                            dev->data->dev_conf.link_speed,\n> +                            dev->data->port_id);\n>                 goto error;\n>         }\n>\n> @@ -1598,10 +1599,8 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev)\n>  #ifdef RTE_NIC_BYPASS\n>                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {\n>                         /* Not suported in bypass mode */\n> -                       PMD_INIT_LOG(ERR,\n> -                               \"\\nSet link up is not supported \"\n> -                               \"by device id 0x%x\\n\",\n> -                               hw->device_id);\n> +                       PMD_INIT_LOG(ERR, \"Set link up is not supported \"\n> +                                    \"by device id 0x%x\", hw->device_id);\n>                         return -ENOTSUP;\n>                 }\n>  #endif\n> @@ -1610,8 +1609,8 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev)\n>                 return 0;\n>         }\n>\n> -       PMD_INIT_LOG(ERR, \"\\nSet link up is not supported by device id\n> 0x%x\\n\",\n> -               hw->device_id);\n> +       PMD_INIT_LOG(ERR, \"Set link up is not supported by device id 0x%x\",\n> +                    hw->device_id);\n>         return -ENOTSUP;\n>  }\n>\n> @@ -1627,10 +1626,8 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev)\n>  #ifdef RTE_NIC_BYPASS\n>                 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {\n>                         /* Not suported in bypass mode */\n> -                       PMD_INIT_LOG(ERR,\n> -                               \"\\nSet link down is not supported \"\n> -                               \"by device id 0x%x\\n\",\n> -                                hw->device_id);\n> +                       PMD_INIT_LOG(ERR, \"Set link down is not supported \"\n> +                                    \"by device id 0x%x\", hw->device_id);\n>                         return -ENOTSUP;\n>                 }\n>  #endif\n> @@ -1639,9 +1636,8 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev)\n>                 return 0;\n>         }\n>\n> -       PMD_INIT_LOG(ERR,\n> -               \"\\nSet link down is not supported by device id 0x%x\\n\",\n> -                hw->device_id);\n> +       PMD_INIT_LOG(ERR, \"Set link down is not supported by device id\n> 0x%x\",\n> +                    hw->device_id);\n>         return -ENOTSUP;\n>  }\n>\n> @@ -2179,7 +2175,7 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)\n>         struct rte_eth_link link;\n>         int intr_enable_delay = false;\n>\n> -       PMD_DRV_LOG(DEBUG, \"intr action type %d\\n\", intr->flags);\n> +       PMD_DRV_LOG(DEBUG, \"intr action type %d\", intr->flags);\n>\n>         if (intr->flags & IXGBE_FLAG_MAILBOX) {\n>                 ixgbe_pf_mbx_process(dev);\n> @@ -2209,7 +2205,8 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)\n>\n>         if (intr_enable_delay) {\n>                 if (rte_eal_alarm_set(timeout * 1000,\n> -                                     ixgbe_dev_interrupt_delayed_handler,\n> (void*)dev) < 0)\n> +                                     ixgbe_dev_interrupt_delayed_handler,\n> +                                     (void *)dev) < 0)\n>                         PMD_DRV_LOG(ERR, \"Error setting alarm\");\n>         } else {\n>                 PMD_DRV_LOG(DEBUG, \"enable intr immediately\");\n> @@ -2256,7 +2253,7 @@ ixgbe_dev_interrupt_delayed_handler(void *param)\n>                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);\n>         }\n>\n> -       PMD_DRV_LOG(DEBUG, \"enable intr in delayed handler S[%08x]\\n\",\n> eicr);\n> +       PMD_DRV_LOG(DEBUG, \"enable intr in delayed handler S[%08x]\", eicr);\n>         ixgbe_enable_intr(dev);\n>         rte_intr_enable(&(dev->pci_dev->intr_handle));\n>  }\n> @@ -2370,7 +2367,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct\n> rte_eth_fc_conf *fc_conf)\n>         if (fc_conf->autoneg != !hw->fc.disable_fc_autoneg)\n>                 return -ENOTSUP;\n>         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));\n> -       PMD_INIT_LOG(DEBUG, \"Rx packet buffer size = 0x%x \\n\",\n> rx_buf_size);\n> +       PMD_INIT_LOG(DEBUG, \"Rx packet buffer size = 0x%x\", rx_buf_size);\n>\n>         /*\n>          * At least reserve one Ethernet frame for watermark\n> @@ -2379,8 +2376,8 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct\n> rte_eth_fc_conf *fc_conf)\n>         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >>\n> IXGBE_RXPBSIZE_SHIFT;\n>         if ((fc_conf->high_water > max_high_water) ||\n>                 (fc_conf->high_water < fc_conf->low_water)) {\n> -               PMD_INIT_LOG(ERR, \"Invalid high/low water setup value in\n> KB\\n\");\n> -               PMD_INIT_LOG(ERR, \"High_water must <=  0x%x\\n\",\n> max_high_water);\n> +               PMD_INIT_LOG(ERR, \"Invalid high/low water setup value in\n> KB\");\n> +               PMD_INIT_LOG(ERR, \"High_water must <= 0x%x\",\n> max_high_water);\n>                 return (-EINVAL);\n>         }\n>\n> @@ -2412,7 +2409,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct\n> rte_eth_fc_conf *fc_conf)\n>                 return 0;\n>         }\n>\n> -       PMD_INIT_LOG(ERR, \"ixgbe_fc_enable = 0x%x \\n\", err);\n> +       PMD_INIT_LOG(ERR, \"ixgbe_fc_enable = 0x%x\", err);\n>         return -EIO;\n>  }\n>\n> @@ -2442,13 +2439,13 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw\n> *hw,uint8_t tc_num)\n>         if (hw->fc.current_mode & ixgbe_fc_tx_pause) {\n>                  /* High/Low water can not be 0 */\n>                 if( (!hw->fc.high_water[tc_num])||\n> (!hw->fc.low_water[tc_num])) {\n> -                       PMD_INIT_LOG(ERR,\"Invalid water mark\n> configuration\\n\");\n> +                       PMD_INIT_LOG(ERR, \"Invalid water mark\n> configuration\");\n>                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;\n>                         goto out;\n>                 }\n>\n>                 if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {\n> -                       PMD_INIT_LOG(ERR,\"Invalid water mark\n> configuration\\n\");\n> +                       PMD_INIT_LOG(ERR, \"Invalid water mark\n> configuration\");\n>                         ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;\n>                         goto out;\n>                 }\n> @@ -2592,7 +2589,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev\n> *dev, struct rte_eth_pfc_conf *p\n>         ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);\n>         tc_num = map[pfc_conf->priority];\n>         rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));\n> -       PMD_INIT_LOG(DEBUG, \"Rx packet buffer size = 0x%x \\n\",\n> rx_buf_size);\n> +       PMD_INIT_LOG(DEBUG, \"Rx packet buffer size = 0x%x\", rx_buf_size);\n>         /*\n>          * At least reserve one Ethernet frame for watermark\n>          * high_water/low_water in kilo bytes for ixgbe\n> @@ -2600,8 +2597,8 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev\n> *dev, struct rte_eth_pfc_conf *p\n>         max_high_water = (rx_buf_size - ETHER_MAX_LEN) >>\n> IXGBE_RXPBSIZE_SHIFT;\n>         if ((pfc_conf->fc.high_water > max_high_water) ||\n>                 (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {\n> -               PMD_INIT_LOG(ERR, \"Invalid high/low water setup value in\n> KB\\n\");\n> -               PMD_INIT_LOG(ERR, \"High_water must <=  0x%x\\n\",\n> max_high_water);\n> +               PMD_INIT_LOG(ERR, \"Invalid high/low water setup value in\n> KB\");\n> +               PMD_INIT_LOG(ERR, \"High_water must <= 0x%x\",\n> max_high_water);\n>                 return (-EINVAL);\n>         }\n>\n> @@ -2617,7 +2614,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev\n> *dev, struct rte_eth_pfc_conf *p\n>         if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))\n>                 return 0;\n>\n> -       PMD_INIT_LOG(ERR, \"ixgbe_dcb_pfc_enable = 0x%x \\n\", err);\n> +       PMD_INIT_LOG(ERR, \"ixgbe_dcb_pfc_enable = 0x%x\", err);\n>         return -EIO;\n>  }\n>\n> @@ -2764,7 +2761,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t\n> mtu)\n>  static void\n>  ixgbevf_intr_disable(struct ixgbe_hw *hw)\n>  {\n> -       PMD_INIT_LOG(DEBUG, \"ixgbevf_intr_disable\");\n> +       PMD_INIT_FUNC_TRACE();\n>\n>         /* Clear interrupt mask to stop from interrupts being generated */\n>         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);\n> @@ -2777,8 +2774,8 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)\n>  {\n>         struct rte_eth_conf* conf = &dev->data->dev_conf;\n>\n> -       PMD_INIT_LOG(DEBUG, \"\\nConfigured Virtual Function port id: %d\\n\",\n> -               dev->data->port_id);\n> +       PMD_INIT_LOG(DEBUG, \"Configured Virtual Function port id: %d\",\n> +                    dev->data->port_id);\n>\n>         /*\n>          * VF has no ability to enable/disable HW CRC\n> @@ -2786,12 +2783,12 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)\n>          */\n>  #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC\n>         if (!conf->rxmode.hw_strip_crc) {\n> -               PMD_INIT_LOG(INFO, \"VF can't disable HW CRC Strip\\n\");\n> +               PMD_INIT_LOG(INFO, \"VF can't disable HW CRC Strip\");\n>                 conf->rxmode.hw_strip_crc = 1;\n>         }\n>  #else\n>         if (conf->rxmode.hw_strip_crc) {\n> -               PMD_INIT_LOG(INFO, \"VF can't enable HW CRC Strip\\n\");\n> +               PMD_INIT_LOG(INFO, \"VF can't enable HW CRC Strip\");\n>                 conf->rxmode.hw_strip_crc = 0;\n>         }\n>  #endif\n> @@ -2806,7 +2803,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)\n>                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n>         int err, mask = 0;\n>\n> -       PMD_INIT_LOG(DEBUG, \"ixgbevf_dev_start\");\n> +       PMD_INIT_FUNC_TRACE();\n>\n>         hw->mac.ops.reset_hw(hw);\n>\n> @@ -2818,7 +2815,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)\n>         /* This can fail when allocating mbufs for descriptor rings */\n>         err = ixgbevf_dev_rx_init(dev);\n>         if (err) {\n> -               PMD_INIT_LOG(ERR, \"Unable to initialize RX hardware\n> (%d)\\n\", err);\n> +               PMD_INIT_LOG(ERR, \"Unable to initialize RX hardware (%d)\",\n> err);\n>                 ixgbe_dev_clear_queues(dev);\n>                 return err;\n>         }\n> @@ -2841,7 +2838,7 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)\n>  {\n>         struct ixgbe_hw *hw =\n> IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n>\n> -       PMD_INIT_LOG(DEBUG, \"ixgbevf_dev_stop\");\n> +       PMD_INIT_FUNC_TRACE();\n>\n>         hw->adapter_stopped = TRUE;\n>         ixgbe_stop_adapter(hw);\n> @@ -2860,7 +2857,7 @@ ixgbevf_dev_close(struct rte_eth_dev *dev)\n>  {\n>         struct ixgbe_hw *hw =\n> IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n>\n> -       PMD_INIT_LOG(DEBUG, \"ixgbevf_dev_close\");\n> +       PMD_INIT_FUNC_TRACE();\n>\n>         ixgbe_reset_hw(hw);\n>\n> @@ -2969,7 +2966,7 @@ ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)\n>         /* we only need to do this if VMDq is enabled */\n>         reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);\n>         if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {\n> -               PMD_INIT_LOG(ERR, \"VMDq must be enabled for this\n> setting\\n\");\n> +               PMD_INIT_LOG(ERR, \"VMDq must be enabled for this setting\");\n>                 return (-1);\n>         }\n>\n> @@ -3098,7 +3095,7 @@ ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev,\n> uint16_t pool,\n>\n>         if (hw->mac.type == ixgbe_mac_82598EB) {\n>                 PMD_INIT_LOG(ERR, \"setting VF receive mode set should be\n> done\"\n> -                       \" on 82599 hardware and newer\\n\");\n> +                            \" on 82599 hardware and newer\");\n>                 return (-ENOTSUP);\n>         }\n>         if (ixgbe_vmdq_mode_check(hw) < 0)\n> @@ -3513,8 +3510,7 @@ ixgbevf_remove_mac_addr(struct rte_eth_dev *dev,\n> uint32_t index)\n>                         continue;\n>                 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);\n>                 if (diag != 0)\n> -                       PMD_DRV_LOG(ERR,\n> -                                   \"Adding again MAC address \"\n> +                       PMD_DRV_LOG(ERR, \"Adding again MAC address \"\n>                                     \"%02x:%02x:%02x:%02x:%02x:%02x failed \"\n>                                     \"diag=%d\",\n>                                     mac_addr->addr_bytes[0],\n> diff --git a/lib/librte_pmd_ixgbe/ixgbe_fdir.c\n> b/lib/librte_pmd_ixgbe/ixgbe_fdir.c\n> index 6c0a530..8819aac 100644\n> --- a/lib/librte_pmd_ixgbe/ixgbe_fdir.c\n> +++ b/lib/librte_pmd_ixgbe/ixgbe_fdir.c\n> @@ -112,7 +112,7 @@ static void fdir_enable_82599(struct ixgbe_hw *hw, u32\n> fdirctrl)\n>         }\n>\n>         if (i >= IXGBE_FDIR_INIT_DONE_POLL)\n> -               PMD_INIT_LOG(WARNING, \"Flow Director poll time\n> exceeded!\\n\");\n> +               PMD_INIT_LOG(WARNING, \"Flow Director poll time exceeded!\");\n>  }\n>\n>  /*\n> @@ -381,7 +381,7 @@ fdir_add_signature_filter_82599(struct ixgbe_hw *hw,\n>         fdirhashcmd |= fdirhash;\n>         IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);\n>\n> -       PMD_INIT_LOG(DEBUG, \"Tx Queue=%x hash=%x\\n\", queue,\n> (u32)fdirhashcmd);\n> +       PMD_INIT_LOG(DEBUG, \"Tx Queue=%x hash=%x\", queue,\n> (u32)fdirhashcmd);\n>  }\n>\n>  /*\n> @@ -614,7 +614,7 @@ fdir_set_input_mask_82599(struct ixgbe_hw *hw,\n>                 /* use the L4 protocol mask for raw IPv4/IPv6 traffic */\n>                 fdirm |= IXGBE_FDIRM_L4P;\n>                 if (input_mask->dst_port_mask ||\n> input_mask->src_port_mask) {\n> -                       PMD_INIT_LOG(ERR, \" Error on src/dst port mask\\n\");\n> +                       PMD_INIT_LOG(ERR, \" Error on src/dst port mask\");\n>                         return -EINVAL;\n>                 }\n>         }\n> diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c\n> b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c\n> index 46962bc..981df60 100644\n> --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c\n> +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c\n> @@ -490,8 +490,7 @@ ixgbe_xmit_cleanup(struct igb_tx_queue *txq)\n>         desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;\n>         if (! (txr[desc_to_clean_to].wb.status & IXGBE_TXD_STAT_DD))\n>         {\n> -               PMD_TX_FREE_LOG(DEBUG,\n> -                               \"TX descriptor %4u is not done\"\n> +               PMD_TX_FREE_LOG(DEBUG, \"TX descriptor %4u is not done\"\n>                                 \"(port=%d queue=%d)\",\n>                                 desc_to_clean_to,\n>                                 txq->port_id, txq->queue_id);\n> @@ -507,8 +506,7 @@ ixgbe_xmit_cleanup(struct igb_tx_queue *txq)\n>                 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -\n>                                                 last_desc_cleaned);\n>\n> -       PMD_TX_FREE_LOG(DEBUG,\n> -                       \"Cleaning %4u TX descriptors: %4u to %4u \"\n> +       PMD_TX_FREE_LOG(DEBUG, \"Cleaning %4u TX descriptors: %4u to %4u \"\n>                         \"(port=%d queue=%d)\",\n>                         nb_tx_to_clean, last_desc_cleaned,\n> desc_to_clean_to,\n>                         txq->port_id, txq->queue_id);\n> @@ -614,7 +612,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf\n> **tx_pkts,\n>                         tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);\n>\n>                 PMD_TX_LOG(DEBUG, \"port_id=%u queue_id=%u pktlen=%u\"\n> -                          \" tx_first=%u tx_last=%u\\n\",\n> +                          \" tx_first=%u tx_last=%u\",\n>                            (unsigned) txq->port_id,\n>                            (unsigned) txq->queue_id,\n>                            (unsigned) pkt_len,\n> @@ -627,8 +625,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf\n> **tx_pkts,\n>                  * nb_used better be less than or equal to\n> txq->tx_rs_thresh\n>                  */\n>                 if (nb_used > txq->nb_tx_free) {\n> -                       PMD_TX_FREE_LOG(DEBUG,\n> -                                       \"Not enough free TX descriptors \"\n> +                       PMD_TX_FREE_LOG(DEBUG, \"Not enough free TX\n> descriptors \"\n>                                         \"nb_used=%4u nb_free=%4u \"\n>                                         \"(port=%d queue=%d)\",\n>                                         nb_used, txq->nb_tx_free,\n> @@ -1066,7 +1063,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf\n> **rx_pkts,\n>                 if (ixgbe_rx_alloc_bufs(rxq) != 0) {\n>                         int i, j;\n>                         PMD_RX_LOG(DEBUG, \"RX mbuf alloc failed port_id=%u\n> \"\n> -                                  \"queue_id=%u\\n\", (unsigned)\n> rxq->port_id,\n> +                                  \"queue_id=%u\", (unsigned) rxq->port_id,\n>                                    (unsigned) rxq->queue_id);\n>\n>\n> rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=\n> @@ -1193,7 +1190,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf\n> **rx_pkts,\n>                  * frames to its peer(s).\n>                  */\n>                 PMD_RX_LOG(DEBUG, \"port_id=%u queue_id=%u rx_id=%u \"\n> -                          \"ext_err_stat=0x%08x pkt_len=%u\\n\",\n> +                          \"ext_err_stat=0x%08x pkt_len=%u\",\n>                            (unsigned) rxq->port_id, (unsigned)\n> rxq->queue_id,\n>                            (unsigned) rx_id, (unsigned) staterr,\n>                            (unsigned)\n> rte_le_to_cpu_16(rxd.wb.upper.length));\n> @@ -1201,7 +1198,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf\n> **rx_pkts,\n>                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);\n>                 if (nmb == NULL) {\n>                         PMD_RX_LOG(DEBUG, \"RX mbuf alloc failed port_id=%u\n> \"\n> -                                  \"queue_id=%u\\n\", (unsigned)\n> rxq->port_id,\n> +                                  \"queue_id=%u\", (unsigned) rxq->port_id,\n>                                    (unsigned) rxq->queue_id);\n>\n> rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;\n>                         break;\n> @@ -1296,7 +1293,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf\n> **rx_pkts,\n>         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);\n>         if (nb_hold > rxq->rx_free_thresh) {\n>                 PMD_RX_LOG(DEBUG, \"port_id=%u queue_id=%u rx_tail=%u \"\n> -                          \"nb_hold=%u nb_rx=%u\\n\",\n> +                          \"nb_hold=%u nb_rx=%u\",\n>                            (unsigned) rxq->port_id, (unsigned)\n> rxq->queue_id,\n>                            (unsigned) rx_id, (unsigned) nb_hold,\n>                            (unsigned) nb_rx);\n> @@ -1383,8 +1380,8 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct\n> rte_mbuf **rx_pkts,\n>                  * to happen by sending specific \"back-pressure\" flow\n> control\n>                  * frames to its peer(s).\n>                  */\n> -               PMD_RX_LOG(DEBUG, \"\\nport_id=%u queue_id=%u rx_id=%u \"\n> -                          \"staterr=0x%x data_len=%u\\n\",\n> +               PMD_RX_LOG(DEBUG, \"port_id=%u queue_id=%u rx_id=%u \"\n> +                          \"staterr=0x%x data_len=%u\",\n>                            (unsigned) rxq->port_id, (unsigned)\n> rxq->queue_id,\n>                            (unsigned) rx_id, (unsigned) staterr,\n>                            (unsigned)\n> rte_le_to_cpu_16(rxd.wb.upper.length));\n> @@ -1392,7 +1389,7 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct\n> rte_mbuf **rx_pkts,\n>                 nmb = rte_rxmbuf_alloc(rxq->mb_pool);\n>                 if (nmb == NULL) {\n>                         PMD_RX_LOG(DEBUG, \"RX mbuf alloc failed port_id=%u\n> \"\n> -                                  \"queue_id=%u\\n\", (unsigned)\n> rxq->port_id,\n> +                                  \"queue_id=%u\", (unsigned) rxq->port_id,\n>                                    (unsigned) rxq->queue_id);\n>\n> rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;\n>                         break;\n> @@ -1561,7 +1558,7 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct\n> rte_mbuf **rx_pkts,\n>         nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);\n>         if (nb_hold > rxq->rx_free_thresh) {\n>                 PMD_RX_LOG(DEBUG, \"port_id=%u queue_id=%u rx_tail=%u \"\n> -                          \"nb_hold=%u nb_rx=%u\\n\",\n> +                          \"nb_hold=%u nb_rx=%u\",\n>                            (unsigned) rxq->port_id, (unsigned)\n> rxq->queue_id,\n>                            (unsigned) rx_id, (unsigned) nb_hold,\n>                            (unsigned) nb_rx);\n> @@ -1767,7 +1764,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,\n>         if (tx_rs_thresh >= (nb_desc - 2)) {\n>                 PMD_INIT_LOG(ERR, \"tx_rs_thresh must be less than the\n> number \"\n>                              \"of TX descriptors minus 2. (tx_rs_thresh=%u \"\n> -                            \"port=%d queue=%d)\\n\", (unsigned\n> int)tx_rs_thresh,\n> +                            \"port=%d queue=%d)\", (unsigned\n> int)tx_rs_thresh,\n>                              (int)dev->data->port_id, (int)queue_idx);\n>                 return -(EINVAL);\n>         }\n> @@ -1775,7 +1772,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,\n>                 PMD_INIT_LOG(ERR, \"tx_rs_thresh must be less than the \"\n>                              \"tx_free_thresh must be less than the number\n> of \"\n>                              \"TX descriptors minus 3. (tx_free_thresh=%u \"\n> -                            \"port=%d queue=%d)\\n\",\n> +                            \"port=%d queue=%d)\",\n>                              (unsigned int)tx_free_thresh,\n>                              (int)dev->data->port_id, (int)queue_idx);\n>                 return -(EINVAL);\n> @@ -1783,7 +1780,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,\n>         if (tx_rs_thresh > tx_free_thresh) {\n>                 PMD_INIT_LOG(ERR, \"tx_rs_thresh must be less than or equal\n> to \"\n>                              \"tx_free_thresh. (tx_free_thresh=%u \"\n> -                            \"tx_rs_thresh=%u port=%d queue=%d)\\n\",\n> +                            \"tx_rs_thresh=%u port=%d queue=%d)\",\n>                              (unsigned int)tx_free_thresh,\n>                              (unsigned int)tx_rs_thresh,\n>                              (int)dev->data->port_id,\n> @@ -1793,7 +1790,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,\n>         if ((nb_desc % tx_rs_thresh) != 0) {\n>                 PMD_INIT_LOG(ERR, \"tx_rs_thresh must be a divisor of the \"\n>                              \"number of TX descriptors. (tx_rs_thresh=%u \"\n> -                            \"port=%d queue=%d)\\n\", (unsigned\n> int)tx_rs_thresh,\n> +                            \"port=%d queue=%d)\", (unsigned\n> int)tx_rs_thresh,\n>                              (int)dev->data->port_id, (int)queue_idx);\n>                 return -(EINVAL);\n>         }\n> @@ -1807,7 +1804,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,\n>         if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {\n>                 PMD_INIT_LOG(ERR, \"TX WTHRESH must be set to 0 if \"\n>                              \"tx_rs_thresh is greater than 1.\n> (tx_rs_thresh=%u \"\n> -                            \"port=%d queue=%d)\\n\", (unsigned\n> int)tx_rs_thresh,\n> +                            \"port=%d queue=%d)\", (unsigned\n> int)tx_rs_thresh,\n>                              (int)dev->data->port_id, (int)queue_idx);\n>                 return -(EINVAL);\n>         }\n> @@ -1873,26 +1870,32 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,\n>                 ixgbe_tx_queue_release(txq);\n>                 return (-ENOMEM);\n>         }\n> -       PMD_INIT_LOG(DEBUG, \"sw_ring=%p hw_ring=%p dma_addr=0x%\"PRIx64\"\\n\",\n> +       PMD_INIT_LOG(DEBUG, \"sw_ring=%p hw_ring=%p dma_addr=0x%\"PRIx64,\n>                      txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);\n>\n>         /* Use a simple Tx queue (no offloads, no multi segs) if possible\n> */\n>         if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)\n> &&\n>             (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {\n> -               PMD_INIT_LOG(INFO, \"Using simple tx code path\\n\");\n> +               PMD_INIT_LOG(INFO, \"Using simple tx code path\");\n>  #ifdef RTE_IXGBE_INC_VECTOR\n>                 if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&\n>                     ixgbe_txq_vec_setup(txq, socket_id) == 0) {\n> -                       PMD_INIT_LOG(INFO, \"Vector tx enabled.\\n\");\n> +                       PMD_INIT_LOG(INFO, \"Vector tx enabled.\");\n>                         dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;\n>                 }\n>                 else\n>  #endif\n>                         dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;\n>         } else {\n> -               PMD_INIT_LOG(INFO, \"Using full-featured tx code path\\n\");\n> -               PMD_INIT_LOG(INFO, \" - txq_flags = %lx\n> [IXGBE_SIMPLE_FLAGS=%lx]\\n\", (long unsigned)txq->txq_flags, (long\n> unsigned)IXGBE_SIMPLE_FLAGS);\n> -               PMD_INIT_LOG(INFO, \" - tx_rs_thresh = %lu\n> [RTE_PMD_IXGBE_TX_MAX_BURST=%lu]\\n\", (long unsigned)txq->tx_rs_thresh,\n> (long unsigned)RTE_PMD_IXGBE_TX_MAX_BURST);\n> +               PMD_INIT_LOG(INFO, \"Using full-featured tx code path\");\n> +               PMD_INIT_LOG(INFO, \" - txq_flags = %lx \"\n> +                            \"[IXGBE_SIMPLE_FLAGS=%lx]\",\n> +                            (long unsigned)txq->txq_flags,\n> +                            (long unsigned)IXGBE_SIMPLE_FLAGS);\n> +               PMD_INIT_LOG(INFO, \" - tx_rs_thresh = %lu \"\n> +                            \"[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]\",\n> +                            (long unsigned)txq->tx_rs_thresh,\n> +                            (long unsigned)RTE_PMD_IXGBE_TX_MAX_BURST);\n>                 dev->tx_pkt_burst = ixgbe_xmit_pkts;\n>         }\n>\n> @@ -2152,7 +2155,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,\n>                 ixgbe_rx_queue_release(rxq);\n>                 return (-ENOMEM);\n>         }\n> -       PMD_INIT_LOG(DEBUG, \"sw_ring=%p hw_ring=%p dma_addr=0x%\"PRIx64\"\\n\",\n> +       PMD_INIT_LOG(DEBUG, \"sw_ring=%p hw_ring=%p dma_addr=0x%\"PRIx64,\n>                      rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);\n>\n>         /*\n> @@ -2166,13 +2169,13 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,\n>  #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC\n>                 PMD_INIT_LOG(DEBUG, \"Rx Burst Bulk Alloc Preconditions are\n> \"\n>                              \"satisfied. Rx Burst Bulk Alloc function will\n> be \"\n> -                            \"used on port=%d, queue=%d.\\n\",\n> +                            \"used on port=%d, queue=%d.\",\n>                              rxq->port_id, rxq->queue_id);\n>                 dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;\n>  #ifdef RTE_IXGBE_INC_VECTOR\n>                 if (!ixgbe_rx_vec_condition_check(dev)) {\n>                         PMD_INIT_LOG(INFO, \"Vector rx enabled, please make\n> \"\n> -                                    \"sure RX burst size no less than\n> 32.\\n\");\n> +                                    \"sure RX burst size no less than\n> 32.\");\n>                         ixgbe_rxq_vec_setup(rxq, socket_id);\n>                         dev->rx_pkt_burst = ixgbe_recv_pkts_vec;\n>                 }\n> @@ -2182,7 +2185,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,\n>                 PMD_INIT_LOG(DEBUG, \"Rx Burst Bulk Alloc Preconditions \"\n>                              \"are not satisfied, Scattered Rx is\n> requested, \"\n>                              \"or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC is\n> not \"\n> -                            \"enabled (port=%d, queue=%d).\\n\",\n> +                            \"enabled (port=%d, queue=%d).\",\n>                              rxq->port_id, rxq->queue_id);\n>         }\n>         dev->data->rx_queues[queue_idx] = rxq;\n> @@ -2201,7 +2204,7 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev,\n> uint16_t rx_queue_id)\n>         uint32_t desc = 0;\n>\n>         if (rx_queue_id >= dev->data->nb_rx_queues) {\n> -               PMD_RX_LOG(ERR, \"Invalid RX queue id=%d\\n\", rx_queue_id);\n> +               PMD_RX_LOG(ERR, \"Invalid RX queue id=%d\", rx_queue_id);\n>                 return 0;\n>         }\n>\n> @@ -2917,7 +2920,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,\n>                 ixgbe_dcb_rx_hw_config(hw, dcb_config);\n>                 break;\n>         default:\n> -               PMD_INIT_LOG(ERR, \"Incorrect DCB RX mode configuration\\n\");\n> +               PMD_INIT_LOG(ERR, \"Incorrect DCB RX mode configuration\");\n>                 break;\n>         }\n>         switch (dev->data->dev_conf.txmode.mq_mode) {\n> @@ -2939,7 +2942,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,\n>                 ixgbe_dcb_tx_hw_config(hw, dcb_config);\n>                 break;\n>         default:\n> -               PMD_INIT_LOG(ERR, \"Incorrect DCB TX mode configuration\\n\");\n> +               PMD_INIT_LOG(ERR, \"Incorrect DCB TX mode configuration\");\n>                 break;\n>         }\n>\n> @@ -3210,7 +3213,7 @@ ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)\n>                 volatile union ixgbe_adv_rx_desc *rxd;\n>                 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);\n>                 if (mbuf == NULL) {\n> -                       PMD_INIT_LOG(ERR, \"RX mbuf alloc failed\n> queue_id=%u\\n\",\n> +                       PMD_INIT_LOG(ERR, \"RX mbuf alloc failed\n> queue_id=%u\",\n>                                      (unsigned) rxq->queue_id);\n>                         return (-ENOMEM);\n>                 }\n> @@ -3282,7 +3285,7 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)\n>                         IXGBE_WRITE_REG(hw, IXGBE_MRQC,\n> IXGBE_MRQC_VMDQRT8TCEN);\n>                         break;\n>                 default:\n> -                       PMD_INIT_LOG(ERR, \"invalid pool number in IOV\n> mode\\n\");\n> +                       PMD_INIT_LOG(ERR, \"invalid pool number in IOV\n> mode\");\n>                 }\n>         }\n>\n> @@ -3335,7 +3338,7 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)\n>                         break;\n>                 default:\n>                         mtqc = IXGBE_MTQC_64Q_1PB;\n> -                       PMD_INIT_LOG(ERR, \"invalid pool number in IOV\n> mode\\n\");\n> +                       PMD_INIT_LOG(ERR, \"invalid pool number in IOV\n> mode\");\n>                 }\n>                 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);\n>         }\n> @@ -3603,7 +3606,7 @@ ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)\n>         if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {\n>                 if (hw->mac.ops.acquire_swfw_sync(hw,\n> IXGBE_GSSR_MAC_CSR_SM) !=\n>                                 IXGBE_SUCCESS) {\n> -                       PMD_INIT_LOG(ERR, \"Could not enable loopback\n> mode\\n\");\n> +                       PMD_INIT_LOG(ERR, \"Could not enable loopback\n> mode\");\n>                         /* ignore error */\n>                         return;\n>                 }\n> @@ -3699,7 +3702,7 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev,\n> uint16_t rx_queue_id)\n>                 /* Allocate buffers for descriptor rings */\n>                 if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {\n>                         PMD_INIT_LOG(ERR,\n> -                               \"Could not alloc mbuf for queue:%d\\n\",\n> +                               \"Could not alloc mbuf for queue:%d\",\n>                                 rx_queue_id);\n>                         return -1;\n>                 }\n> @@ -3715,7 +3718,7 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev,\n> uint16_t rx_queue_id)\n>                 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));\n>                 if (!poll_ms)\n>                         PMD_INIT_LOG(ERR, \"Could not enable \"\n> -                                    \"Rx Queue %d\\n\", rx_queue_id);\n> +                                    \"Rx Queue %d\", rx_queue_id);\n>                 rte_wmb();\n>                 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);\n>                 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx),\n> rxq->nb_rx_desc - 1);\n> @@ -3754,7 +3757,7 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev,\n> uint16_t rx_queue_id)\n>                 } while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE));\n>                 if (!poll_ms)\n>                         PMD_INIT_LOG(ERR, \"Could not disable \"\n> -                                    \"Rx Queue %d\\n\", rx_queue_id);\n> +                                    \"Rx Queue %d\", rx_queue_id);\n>\n>                 rte_delay_us(RTE_IXGBE_WAIT_100_US);\n>\n> @@ -3797,7 +3800,7 @@ ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev,\n> uint16_t tx_queue_id)\n>                         } while (--poll_ms && !(txdctl &\n> IXGBE_TXDCTL_ENABLE));\n>                         if (!poll_ms)\n>                                 PMD_INIT_LOG(ERR, \"Could not enable \"\n> -                                            \"Tx Queue %d\\n\", tx_queue_id);\n> +                                            \"Tx Queue %d\", tx_queue_id);\n>                 }\n>                 rte_wmb();\n>                 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);\n> @@ -3838,7 +3841,7 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev,\n> uint16_t tx_queue_id)\n>                         } while (--poll_ms && (txtdh != txtdt));\n>                         if (!poll_ms)\n>                                 PMD_INIT_LOG(ERR,\n> -                               \"Tx Queue %d is not empty when\n> stopping.\\n\",\n> +                               \"Tx Queue %d is not empty when stopping.\",\n>                                 tx_queue_id);\n>                 }\n>\n> @@ -3856,7 +3859,7 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev,\n> uint16_t tx_queue_id)\n>                         } while (--poll_ms && (txdctl |\n> IXGBE_TXDCTL_ENABLE));\n>                         if (!poll_ms)\n>                                 PMD_INIT_LOG(ERR, \"Could not disable \"\n> -                                            \"Tx Queue %d\\n\", tx_queue_id);\n> +                                            \"Tx Queue %d\", tx_queue_id);\n>                 }\n>\n>                 if (txq->ops != NULL) {\n> @@ -4073,7 +4076,7 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)\n>                 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));\n>                 if (!poll_ms)\n>                         PMD_INIT_LOG(ERR, \"Could not enable \"\n> -                                        \"Tx Queue %d\\n\", i);\n> +                                        \"Tx Queue %d\", i);\n>         }\n>         for (i = 0; i < dev->data->nb_rx_queues; i++) {\n>\n> @@ -4091,7 +4094,7 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)\n>                 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));\n>                 if (!poll_ms)\n>                         PMD_INIT_LOG(ERR, \"Could not enable \"\n> -                                        \"Rx Queue %d\\n\", i);\n> +                                        \"Rx Queue %d\", i);\n>                 rte_wmb();\n>                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);\n>\n> --\n> 1.7.10.4\n>\n>\nReviewed-By: Jay Rolette<rolette@infiniteio.com>",
        "headers": {
            "Return-Path": "<rolette@infiniteio.com>",
            "MIME-Version": "1.0",
            "X-Mailman-Version": "2.1.15",
            "Date": "Tue, 2 Sep 2014 10:19:57 -0500",
            "X-List-Received-Date": "Tue, 02 Sep 2014 15:15:26 -0000",
            "References": "<1409567080-27083-1-git-send-email-david.marchand@6wind.com>\n\t<1409567080-27083-4-git-send-email-david.marchand@6wind.com>",
            "Content-Type": "text/plain; charset=UTF-8",
            "X-BeenThere": "dev@dpdk.org",
            "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20130820;\n\th=x-gm-message-state:mime-version:in-reply-to:references:date\n\t:message-id:subject:from:to:cc:content-type;\n\tbh=xfTCO9s53Z11GxcexhlrZNIMqngVvxabaAXWaD+PhDA=;\n\tb=MtwHelD4zT/2ahQfTuCjpi2bkGyTU+g4NDZkkfJld4NjxbR9BGQ1sFVedpxxHIVn7o\n\tdflFvLL6/O1CI9bktB32lb0TdeQcTz4imh/cQWTfFaCiQ5TEZPpa2evEY9Ek25cTlwW6\n\tsBHBHz1ylFl2YFOF3Z1YgCJxxcuu5QVfz2/77XA708CLdsn2Sz5kCcvQdg8XU0cleKFP\n\tRDJadmFSMURFUQW3+5LNzP4DzFFCCEmWETMGBID8juM2MnjIpS1iX8OU35x6odmZc01p\n\tZBL6j7N2yXsYeeMLCB4qSZ8s7j3xm+iyzTa1aAGpROo1Wr8Y5awPax+AylLikgnjejLN\n\trv3g==",
            "Received": [
                "from mail-yh0-f49.google.com (mail-yh0-f49.google.com\n\t[209.85.213.49]) by dpdk.org (Postfix) with ESMTP id C74935941\n\tfor <dev@dpdk.org>; Tue,  2 Sep 2014 17:15:25 +0200 (CEST)",
                "by mail-yh0-f49.google.com with SMTP id z6so4381709yhz.36\n\tfor <dev@dpdk.org>; Tue, 02 Sep 2014 08:19:57 -0700 (PDT)",
                "by 10.170.96.213 with HTTP; Tue, 2 Sep 2014 08:19:57 -0700 (PDT)"
            ],
            "Subject": "Re: [dpdk-dev] [PATCH v2 03/17] ixgbe: clean log messages",
            "X-Content-Filtered-By": "Mailman/MimeDel 2.1.15",
            "X-Received": "by 10.236.50.198 with SMTP id z46mr4256964yhb.115.1409671197624; \n\tTue, 02 Sep 2014 08:19:57 -0700 (PDT)",
            "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
            "Message-ID": "<CADNuJVq7F-KDCFAyes_Evoz=vEXzWq=raF+749U+i4usz1H8NA@mail.gmail.com>",
            "Precedence": "list",
            "X-Gm-Message-State": "ALoCoQn0bdlOk7KeHh1r0B+PGs1Y7LzXZPhc2QmBD846PClY0e2NWCLsbDi6EKL9xiyhAPLCMkPb",
            "From": "Jay Rolette <rolette@infiniteio.com>",
            "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
            "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
            "Cc": "\"dev@dpdk.org\" <dev@dpdk.org>",
            "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
            "In-Reply-To": "<1409567080-27083-4-git-send-email-david.marchand@6wind.com>",
            "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
            "List-Post": "<mailto:dev@dpdk.org>",
            "To": "David Marchand <david.marchand@6wind.com>"
        }
    }
]