@@ -97,7 +97,7 @@ dev_add(const char *dev_name)
FOREACH_DEVICE_ON_PLATFORM_BUS(tmp) {
if (!strcmp(tmp->name, pdev->name)) {
- PLATFORM_LOG(INFO, "device %s already added\n", pdev->name);
+ PLATFORM_LOG_LINE(INFO, "device %s already added", pdev->name);
if (tmp->device.devargs != pdev->device.devargs)
rte_devargs_remove(pdev->device.devargs);
@@ -109,7 +109,7 @@ dev_add(const char *dev_name)
TAILQ_INSERT_HEAD(&platform_bus.device_list, pdev, next);
- PLATFORM_LOG(INFO, "adding device %s to the list\n", dev_name);
+ PLATFORM_LOG_LINE(INFO, "adding device %s to the list", dev_name);
return 0;
}
@@ -161,7 +161,7 @@ platform_bus_scan(void)
dp = opendir(PLATFORM_BUS_DEVICES_PATH);
if (dp == NULL) {
- PLATFORM_LOG(INFO, "failed to open %s\n", PLATFORM_BUS_DEVICES_PATH);
+ PLATFORM_LOG_LINE(INFO, "failed to open %s", PLATFORM_BUS_DEVICES_PATH);
return -errno;
}
@@ -195,7 +195,7 @@ device_map_resource_offset(struct rte_platform_device *pdev, struct rte_platform
if (res->mem.addr == MAP_FAILED)
return -errno;
- PLATFORM_LOG(DEBUG, "adding resource va = %p len = %"PRIu64" name = %s\n", res->mem.addr,
+ PLATFORM_LOG_LINE(DEBUG, "adding resource va = %p len = %"PRIu64" name = %s", res->mem.addr,
res->mem.len, res->name);
return 0;
@@ -271,7 +271,7 @@ device_map_resources(struct rte_platform_device *pdev, unsigned int num)
int ret;
if (num == 0) {
- PLATFORM_LOG(WARNING, "device %s has no resources\n", pdev->name);
+ PLATFORM_LOG_LINE(WARNING, "device %s has no resources", pdev->name);
return 0;
}
@@ -287,7 +287,7 @@ device_map_resources(struct rte_platform_device *pdev, unsigned int num)
ret = ioctl(pdev->dev_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info);
if (ret) {
- PLATFORM_LOG(ERR, "failed to get region info at %d\n", i);
+ PLATFORM_LOG_LINE(ERR, "failed to get region info at %d", i);
ret = -errno;
goto out;
}
@@ -297,7 +297,7 @@ device_map_resources(struct rte_platform_device *pdev, unsigned int num)
res->mem.len = reg_info.size;
ret = device_map_resource_offset(pdev, res, reg_info.offset);
if (ret) {
- PLATFORM_LOG(ERR, "failed to ioremap resource at %d\n", i);
+ PLATFORM_LOG_LINE(ERR, "failed to ioremap resource at %d", i);
goto out;
}
@@ -327,7 +327,7 @@ device_setup(struct rte_platform_device *pdev)
ret = rte_vfio_setup_device(PLATFORM_BUS_DEVICES_PATH, name, &pdev->dev_fd, &dev_info);
if (ret) {
- PLATFORM_LOG(ERR, "failed to setup %s\n", name);
+ PLATFORM_LOG_LINE(ERR, "failed to setup %s", name);
return -ENODEV;
}
@@ -342,7 +342,7 @@ device_setup(struct rte_platform_device *pdev)
*/
#ifdef VFIO_DEVICE_FLAGS_PLATFORM
if (!(dev_info.flags & VFIO_DEVICE_FLAGS_PLATFORM)) {
- PLATFORM_LOG(ERR, "device not backed by vfio-platform\n");
+ PLATFORM_LOG_LINE(ERR, "device not backed by vfio-platform");
ret = -ENOTSUP;
goto out;
}
@@ -350,7 +350,7 @@ device_setup(struct rte_platform_device *pdev)
ret = device_map_resources(pdev, dev_info.num_regions);
if (ret) {
- PLATFORM_LOG(ERR, "failed to setup platform resources\n");
+ PLATFORM_LOG_LINE(ERR, "failed to setup platform resources");
goto out;
}
@@ -389,7 +389,7 @@ driver_probe_device(struct rte_platform_driver *pdrv, struct rte_platform_device
iova_mode = rte_eal_iova_mode();
if (pdrv->drv_flags & RTE_PLATFORM_DRV_NEED_IOVA_AS_VA && iova_mode != RTE_IOVA_VA) {
- PLATFORM_LOG(ERR, "driver %s expects VA IOVA mode but current mode is PA\n",
+ PLATFORM_LOG_LINE(ERR, "driver %s expects VA IOVA mode but current mode is PA",
pdrv->driver.name);
return -EINVAL;
}
@@ -462,11 +462,11 @@ platform_bus_probe(void)
FOREACH_DEVICE_ON_PLATFORM_BUS(pdev) {
ret = device_attach(pdev);
if (ret == -EBUSY) {
- PLATFORM_LOG(DEBUG, "device %s already probed\n", pdev->name);
+ PLATFORM_LOG_LINE(DEBUG, "device %s already probed", pdev->name);
continue;
}
if (ret)
- PLATFORM_LOG(ERR, "failed to probe %s\n", pdev->name);
+ PLATFORM_LOG_LINE(ERR, "failed to probe %s", pdev->name);
}
return 0;
@@ -517,7 +517,7 @@ device_release_driver(struct rte_platform_device *pdev)
if (pdrv != NULL && pdrv->remove != NULL) {
ret = pdrv->remove(pdev);
if (ret)
- PLATFORM_LOG(WARNING, "failed to remove %s\n", pdev->name);
+ PLATFORM_LOG_LINE(WARNING, "failed to remove %s", pdev->name);
}
pdev->device.driver = NULL;
@@ -56,7 +56,7 @@ platform_bus_dev_iterate(const void *start, const char *str,
if (str != NULL) {
kvargs = rte_kvargs_parse(str, platform_params_keys);
if (!kvargs) {
- PLATFORM_LOG(ERR, "cannot parse argument list %s", str);
+ PLATFORM_LOG_LINE(ERR, "cannot parse argument list %s", str);
rte_errno = EINVAL;
return NULL;
}
@@ -15,7 +15,6 @@
#include "bus_platform_driver.h"
extern struct rte_platform_bus platform_bus;
-extern int platform_bus_logtype;
/* Platform bus iterators. */
#define FOREACH_DEVICE_ON_PLATFORM_BUS(p) \
@@ -33,10 +32,11 @@ struct rte_platform_bus {
RTE_TAILQ_HEAD(, rte_platform_driver) driver_list; /* List of bus drivers */
};
-#define PLATFORM_LOG(level, ...) \
- rte_log(RTE_LOG_ ## level, platform_bus_logtype, \
- RTE_FMT("platform bus: " RTE_FMT_HEAD(__VA_ARGS__,), \
- RTE_FMT_TAIL(__VA_ARGS__,)))
+extern int platform_bus_logtype;
+#define RTE_LOGTYPE_PLATFORM_BUS platform_bus_logtype
+#define PLATFORM_LOG_LINE(level, ...) \
+ RTE_LOG(level, PLATFORM_BUS, RTE_FMT(RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ RTE_FMT_TAIL(__VA_ARGS__,)))
/*
* Iterate registered platform devices and find one that matches provided string.
@@ -190,7 +190,7 @@ zqmq_input_ring_disable(uint8_t *bar_addr, uint16_t ring)
}
if (zqmq_activity_stat.s.queue_active) {
- NITROX_LOG(ERR, "Failed to disable zqmq ring %d\n", ring);
+ NITROX_LOG_LINE(ERR, "Failed to disable zqmq ring %d", ring);
return -EBUSY;
}
@@ -271,7 +271,7 @@ setup_zqmq_input_ring(uint8_t *bar_addr, uint16_t ring, uint32_t rsize,
}
if (!zqmq_en.s.queue_enable) {
- NITROX_LOG(ERR, "Failed to enable zqmq ring %d\n", ring);
+ NITROX_LOG_LINE(ERR, "Failed to enable zqmq ring %d", ring);
err = -EFAULT;
} else {
err = 0;
@@ -5,11 +5,9 @@
#ifndef _NITROX_LOGS_H_
#define _NITROX_LOGS_H_
-#define LOG_PREFIX "NITROX: "
-#define NITROX_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, nitrox_logtype, \
- LOG_PREFIX "%s:%d " fmt, __func__, __LINE__, ## args)
-
extern int nitrox_logtype;
+#define RTE_LOGTYPE_NITROX nitrox_logtype
+#define NITROX_LOG_LINE(level, fmt, args...) \
+ RTE_LOG(level, NITROX, "%s:%d " fmt "\n", __func__, __LINE__, ## args)
#endif /* _NITROX_LOGS_H_ */
@@ -28,7 +28,7 @@ nitrox_setup_cmdq(struct nitrox_qp *qp, uint8_t *bar_addr,
RTE_MEMZONE_256MB,
CMDQ_PKT_IN_ALIGN);
if (!mz) {
- NITROX_LOG(ERR, "cmdq memzone reserve failed for %s queue\n",
+ NITROX_LOG_LINE(ERR, "cmdq memzone reserve failed for %s queue",
mz_name);
return -ENOMEM;
}
@@ -48,7 +48,7 @@ nitrox_setup_cmdq(struct nitrox_qp *qp, uint8_t *bar_addr,
mz->iova);
break;
default:
- NITROX_LOG(ERR, "Invalid queue type %d\n", qp->type);
+ NITROX_LOG_LINE(ERR, "Invalid queue type %d", qp->type);
err = -EINVAL;
break;
}
@@ -73,7 +73,7 @@ nitrox_setup_ridq(struct nitrox_qp *qp, int socket_id)
RTE_CACHE_LINE_SIZE,
socket_id);
if (!qp->ridq) {
- NITROX_LOG(ERR, "Failed to create rid queue\n");
+ NITROX_LOG_LINE(ERR, "Failed to create rid queue");
return -ENOMEM;
}
@@ -112,8 +112,8 @@ nitrox_qp_setup(struct nitrox_qp *qp, uint8_t *bar_addr, const char *dev_name,
count = rte_align32pow2(nb_descriptors);
if (count > MAX_CMD_QLEN) {
- NITROX_LOG(ERR, "%s: Number of descriptors too big %d,"
- " greater than max queue length %d\n",
+ NITROX_LOG_LINE(ERR, "%s: Number of descriptors too big %d,"
+ " greater than max queue length %d",
dev_name, count,
MAX_CMD_QLEN);
return -EINVAL;
@@ -53,14 +53,14 @@ static int nitrox_comp_dev_configure(struct rte_compressdev *dev,
char name[RTE_MEMPOOL_NAMESIZE];
if (config->nb_queue_pairs > ndev->nr_queues) {
- NITROX_LOG(ERR, "Invalid queue pairs, max supported %d\n",
+ NITROX_LOG_LINE(ERR, "Invalid queue pairs, max supported %d",
ndev->nr_queues);
return -EINVAL;
}
xform_cnt = config->max_nb_priv_xforms + config->max_nb_streams;
if (unlikely(xform_cnt == 0)) {
- NITROX_LOG(ERR, "Invalid configuration with 0 xforms\n");
+ NITROX_LOG_LINE(ERR, "Invalid configuration with 0 xforms");
return -EINVAL;
}
@@ -70,7 +70,7 @@ static int nitrox_comp_dev_configure(struct rte_compressdev *dev,
0, 0, NULL, NULL, NULL, NULL,
config->socket_id, 0);
if (comp_dev->xform_pool == NULL) {
- NITROX_LOG(ERR, "Failed to create xform pool, err %d\n",
+ NITROX_LOG_LINE(ERR, "Failed to create xform pool, err %d",
rte_errno);
return -rte_errno;
}
@@ -160,9 +160,9 @@ static int nitrox_comp_queue_pair_setup(struct rte_compressdev *dev,
struct nitrox_qp *qp = NULL;
int err;
- NITROX_LOG(DEBUG, "queue %d\n", qp_id);
+ NITROX_LOG_LINE(DEBUG, "queue %d", qp_id);
if (qp_id >= ndev->nr_queues) {
- NITROX_LOG(ERR, "queue %u invalid, max queues supported %d\n",
+ NITROX_LOG_LINE(ERR, "queue %u invalid, max queues supported %d",
qp_id, ndev->nr_queues);
return -EINVAL;
}
@@ -177,7 +177,7 @@ static int nitrox_comp_queue_pair_setup(struct rte_compressdev *dev,
RTE_CACHE_LINE_SIZE,
socket_id);
if (!qp) {
- NITROX_LOG(ERR, "Failed to allocate nitrox qp\n");
+ NITROX_LOG_LINE(ERR, "Failed to allocate nitrox qp");
return -ENOMEM;
}
@@ -195,7 +195,7 @@ static int nitrox_comp_queue_pair_setup(struct rte_compressdev *dev,
goto req_pool_err;
dev->data->queue_pairs[qp_id] = qp;
- NITROX_LOG(DEBUG, "queue %d setup done\n", qp_id);
+ NITROX_LOG_LINE(DEBUG, "queue %d setup done", qp_id);
return 0;
req_pool_err:
@@ -213,21 +213,21 @@ static int nitrox_comp_queue_pair_release(struct rte_compressdev *dev,
struct nitrox_qp *qp;
int err;
- NITROX_LOG(DEBUG, "queue %d\n", qp_id);
+ NITROX_LOG_LINE(DEBUG, "queue %d", qp_id);
if (qp_id >= ndev->nr_queues) {
- NITROX_LOG(ERR, "queue %u invalid, max queues supported %d\n",
+ NITROX_LOG_LINE(ERR, "queue %u invalid, max queues supported %d",
qp_id, ndev->nr_queues);
return -EINVAL;
}
qp = dev->data->queue_pairs[qp_id];
if (!qp) {
- NITROX_LOG(DEBUG, "queue %u already freed\n", qp_id);
+ NITROX_LOG_LINE(DEBUG, "queue %u already freed", qp_id);
return 0;
}
if (!nitrox_qp_is_empty(qp)) {
- NITROX_LOG(ERR, "queue %d not empty\n", qp_id);
+ NITROX_LOG_LINE(ERR, "queue %d not empty", qp_id);
return -EAGAIN;
}
@@ -235,7 +235,7 @@ static int nitrox_comp_queue_pair_release(struct rte_compressdev *dev,
err = nitrox_qp_release(qp, ndev->bar_addr);
nitrox_comp_req_pool_free(qp->sr_mp);
rte_free(qp);
- NITROX_LOG(DEBUG, "queue %d release done\n", qp_id);
+ NITROX_LOG_LINE(DEBUG, "queue %d release done", qp_id);
return err;
}
@@ -249,12 +249,12 @@ static int nitrox_comp_private_xform_create(struct rte_compressdev *dev,
int ret;
if (unlikely(comp_dev->xform_pool == NULL)) {
- NITROX_LOG(ERR, "private xform pool not yet created\n");
+ NITROX_LOG_LINE(ERR, "private xform pool not yet created");
return -EINVAL;
}
if (rte_mempool_get(comp_dev->xform_pool, private_xform)) {
- NITROX_LOG(ERR, "Failed to get from private xform pool\n");
+ NITROX_LOG_LINE(ERR, "Failed to get from private xform pool");
return -ENOMEM;
}
@@ -266,7 +266,7 @@ static int nitrox_comp_private_xform_create(struct rte_compressdev *dev,
nxform->op = NITROX_COMP_OP_COMPRESS;
if (xform->compress.algo != RTE_COMP_ALGO_DEFLATE) {
- NITROX_LOG(ERR, "Only deflate is supported\n");
+ NITROX_LOG_LINE(ERR, "Only deflate is supported");
ret = -ENOTSUP;
goto err_exit;
}
@@ -279,7 +279,7 @@ static int nitrox_comp_private_xform_create(struct rte_compressdev *dev,
else if (algo == RTE_COMP_HUFFMAN_DYNAMIC)
nxform->algo = NITROX_COMP_ALGO_DEFLATE_DYNHUFF;
else {
- NITROX_LOG(ERR, "Invalid deflate algorithm %d\n", algo);
+ NITROX_LOG_LINE(ERR, "Invalid deflate algorithm %d", algo);
ret = -EINVAL;
goto err_exit;
}
@@ -300,7 +300,7 @@ static int nitrox_comp_private_xform_create(struct rte_compressdev *dev,
level <= NITROX_COMP_LEVEL_BEST_END) {
nxform->level = NITROX_COMP_LEVEL_BEST;
} else {
- NITROX_LOG(ERR, "Unsupported compression level %d\n",
+ NITROX_LOG_LINE(ERR, "Unsupported compression level %d",
xform->compress.level);
ret = -ENOTSUP;
goto err_exit;
@@ -310,7 +310,7 @@ static int nitrox_comp_private_xform_create(struct rte_compressdev *dev,
} else if (xform->type == RTE_COMP_DECOMPRESS) {
nxform->op = NITROX_COMP_OP_DECOMPRESS;
if (xform->decompress.algo != RTE_COMP_ALGO_DEFLATE) {
- NITROX_LOG(ERR, "Only deflate is supported\n");
+ NITROX_LOG_LINE(ERR, "Only deflate is supported");
ret = -ENOTSUP;
goto err_exit;
}
@@ -330,7 +330,7 @@ static int nitrox_comp_private_xform_create(struct rte_compressdev *dev,
else if (chksum_type == RTE_COMP_CHECKSUM_ADLER32)
nxform->chksum_type = NITROX_CHKSUM_TYPE_ADLER32;
else {
- NITROX_LOG(ERR, "Unsupported checksum type %d\n",
+ NITROX_LOG_LINE(ERR, "Unsupported checksum type %d",
chksum_type);
ret = -ENOTSUP;
goto err_exit;
@@ -397,7 +397,7 @@ static int nitrox_comp_stream_create(struct rte_compressdev *dev,
if (unlikely(window_size < NITROX_COMP_WINDOW_SIZE_MIN ||
window_size > NITROX_COMP_WINDOW_SIZE_MAX)) {
- NITROX_LOG(ERR, "Invalid window size %d\n",
+ NITROX_LOG_LINE(ERR, "Invalid window size %d",
window_size);
return -EINVAL;
}
@@ -569,7 +569,7 @@ nitrox_comp_pmd_create(struct nitrox_device *ndev)
sizeof(struct nitrox_comp_device),
&init_params);
if (!cdev) {
- NITROX_LOG(ERR, "Cryptodev '%s' creation failed\n", name);
+ NITROX_LOG_LINE(ERR, "Cryptodev '%s' creation failed", name);
return -ENODEV;
}
@@ -582,7 +582,7 @@ nitrox_comp_pmd_create(struct nitrox_device *ndev)
ndev->comp_dev->cdev = cdev;
ndev->comp_dev->ndev = ndev;
ndev->comp_dev->xform_pool = NULL;
- NITROX_LOG(DEBUG, "Created compressdev '%s', dev_id %d\n",
+ NITROX_LOG_LINE(DEBUG, "Created compressdev '%s', dev_id %d",
cdev->data->name, cdev->data->dev_id);
return 0;
}
@@ -337,7 +337,7 @@ static void nitrox_dump_databuf(const char *name, struct rte_mbuf *m,
rte_pktmbuf_mtod(m, char *), mlen);
}
- NITROX_LOG(DEBUG, "\n");
+ NITROX_LOG_LINE(DEBUG,);
}
static void nitrox_dump_zip_instr(struct nitrox_zip_instr *instr,
@@ -348,108 +348,108 @@ static void nitrox_dump_zip_instr(struct nitrox_zip_instr *instr,
uint64_t value;
int i = 0;
- NITROX_LOG(DEBUG, "\nZIP instruction..(%p)\n", instr);
- NITROX_LOG(DEBUG, "\tWORD0 = 0x%016"PRIx64"\n", instr->w0.u64);
- NITROX_LOG(DEBUG, "\t\tTOL = %d\n", instr->w0.tol);
- NITROX_LOG(DEBUG, "\t\tEXNUM = %d\n", instr->w0.exn);
- NITROX_LOG(DEBUG, "\t\tEXBITS = %x\n", instr->w0.exbits);
- NITROX_LOG(DEBUG, "\t\tCA = %d\n", instr->w0.ca);
- NITROX_LOG(DEBUG, "\t\tSF = %d\n", instr->w0.sf);
- NITROX_LOG(DEBUG, "\t\tSS = %d\n", instr->w0.ss);
- NITROX_LOG(DEBUG, "\t\tCC = %d\n", instr->w0.cc);
- NITROX_LOG(DEBUG, "\t\tEF = %d\n", instr->w0.ef);
- NITROX_LOG(DEBUG, "\t\tBF = %d\n", instr->w0.bf);
- NITROX_LOG(DEBUG, "\t\tCO = %d\n", instr->w0.co);
- NITROX_LOG(DEBUG, "\t\tDS = %d\n", instr->w0.ds);
- NITROX_LOG(DEBUG, "\t\tDG = %d\n", instr->w0.dg);
- NITROX_LOG(DEBUG, "\t\tHG = %d\n", instr->w0.hg);
- NITROX_LOG(DEBUG, "\n");
-
- NITROX_LOG(DEBUG, "\tWORD1 = 0x%016"PRIx64"\n", instr->w1.u64);
- NITROX_LOG(DEBUG, "\t\tHL = %d\n", instr->w1.hl);
- NITROX_LOG(DEBUG, "\t\tADLERCRC32 = 0x%08x\n", instr->w1.adlercrc32);
- NITROX_LOG(DEBUG, "\n");
+ NITROX_LOG_LINE(DEBUG, "\nZIP instruction..(%p)", instr);
+ NITROX_LOG_LINE(DEBUG, "\tWORD0 = 0x%016"PRIx64, instr->w0.u64);
+ NITROX_LOG_LINE(DEBUG, "\t\tTOL = %d", instr->w0.tol);
+ NITROX_LOG_LINE(DEBUG, "\t\tEXNUM = %d", instr->w0.exn);
+ NITROX_LOG_LINE(DEBUG, "\t\tEXBITS = %x", instr->w0.exbits);
+ NITROX_LOG_LINE(DEBUG, "\t\tCA = %d", instr->w0.ca);
+ NITROX_LOG_LINE(DEBUG, "\t\tSF = %d", instr->w0.sf);
+ NITROX_LOG_LINE(DEBUG, "\t\tSS = %d", instr->w0.ss);
+ NITROX_LOG_LINE(DEBUG, "\t\tCC = %d", instr->w0.cc);
+ NITROX_LOG_LINE(DEBUG, "\t\tEF = %d", instr->w0.ef);
+ NITROX_LOG_LINE(DEBUG, "\t\tBF = %d", instr->w0.bf);
+ NITROX_LOG_LINE(DEBUG, "\t\tCO = %d", instr->w0.co);
+ NITROX_LOG_LINE(DEBUG, "\t\tDS = %d", instr->w0.ds);
+ NITROX_LOG_LINE(DEBUG, "\t\tDG = %d", instr->w0.dg);
+ NITROX_LOG_LINE(DEBUG, "\t\tHG = %d", instr->w0.hg);
+ NITROX_LOG_LINE(DEBUG,);
+
+ NITROX_LOG_LINE(DEBUG, "\tWORD1 = 0x%016"PRIx64, instr->w1.u64);
+ NITROX_LOG_LINE(DEBUG, "\t\tHL = %d", instr->w1.hl);
+ NITROX_LOG_LINE(DEBUG, "\t\tADLERCRC32 = 0x%08x", instr->w1.adlercrc32);
+ NITROX_LOG_LINE(DEBUG,);
value = instr->w2.cptr;
- NITROX_LOG(DEBUG, "\tWORD2 = 0x%016"PRIx64"\n", instr->w2.u64);
- NITROX_LOG(DEBUG, "\t\tCPTR = 0x%11"PRIx64"\n", value);
- NITROX_LOG(DEBUG, "\n");
+ NITROX_LOG_LINE(DEBUG, "\tWORD2 = 0x%016"PRIx64, instr->w2.u64);
+ NITROX_LOG_LINE(DEBUG, "\t\tCPTR = 0x%11"PRIx64, value);
+ NITROX_LOG_LINE(DEBUG,);
value = instr->w3.hptr;
- NITROX_LOG(DEBUG, "\tWORD3 = 0x%016"PRIx64"\n", instr->w3.u64);
- NITROX_LOG(DEBUG, "\t\tHLEN = %d\n", instr->w3.hlen);
- NITROX_LOG(DEBUG, "\t\tHPTR = 0x%11"PRIx64"\n", value);
+ NITROX_LOG_LINE(DEBUG, "\tWORD3 = 0x%016"PRIx64, instr->w3.u64);
+ NITROX_LOG_LINE(DEBUG, "\t\tHLEN = %d", instr->w3.hlen);
+ NITROX_LOG_LINE(DEBUG, "\t\tHPTR = 0x%11"PRIx64, value);
if (instr->w0.hg && hptr_arr) {
for (i = 0; i < instr->w3.hlen; i++) {
value = hptr_arr[i].s.addr;
- NITROX_LOG(DEBUG, "\t\t\tZPTR[%d] : Length = %d Addr = 0x%11"PRIx64"\n",
+ NITROX_LOG_LINE(DEBUG, "\t\t\tZPTR[%d] : Length = %d Addr = 0x%11"PRIx64,
i, hptr_arr[i].s.length, value);
}
}
- NITROX_LOG(DEBUG, "\n");
+ NITROX_LOG_LINE(DEBUG,);
value = instr->w4.iptr;
- NITROX_LOG(DEBUG, "\tWORD4 = 0x%016"PRIx64"\n", instr->w4.u64);
- NITROX_LOG(DEBUG, "\t\tILEN = %d\n", instr->w4.ilen);
- NITROX_LOG(DEBUG, "\t\tIPTR = 0x%11"PRIx64"\n", value);
+ NITROX_LOG_LINE(DEBUG, "\tWORD4 = 0x%016"PRIx64, instr->w4.u64);
+ NITROX_LOG_LINE(DEBUG, "\t\tILEN = %d", instr->w4.ilen);
+ NITROX_LOG_LINE(DEBUG, "\t\tIPTR = 0x%11"PRIx64, value);
if (instr->w0.dg && iptr_arr) {
for (i = 0; i < instr->w4.ilen; i++) {
value = iptr_arr[i].s.addr;
- NITROX_LOG(DEBUG, "\t\t\tZPTR[%d] : Length = %d Addr = 0x%11"PRIx64"\n",
+ NITROX_LOG_LINE(DEBUG, "\t\t\tZPTR[%d] : Length = %d Addr = 0x%11"PRIx64,
i, iptr_arr[i].s.length, value);
}
}
- NITROX_LOG(DEBUG, "\n");
+ NITROX_LOG_LINE(DEBUG,);
value = instr->w5.optr;
- NITROX_LOG(DEBUG, "\tWORD5 = 0x%016"PRIx64"\n", instr->w5.u64);
- NITROX_LOG(DEBUG, "\t\t OLEN = %d\n", instr->w5.olen);
- NITROX_LOG(DEBUG, "\t\t OPTR = 0x%11"PRIx64"\n", value);
+ NITROX_LOG_LINE(DEBUG, "\tWORD5 = 0x%016"PRIx64, instr->w5.u64);
+ NITROX_LOG_LINE(DEBUG, "\t\t OLEN = %d", instr->w5.olen);
+ NITROX_LOG_LINE(DEBUG, "\t\t OPTR = 0x%11"PRIx64, value);
if (instr->w0.ds && optr_arr) {
for (i = 0; i < instr->w5.olen; i++) {
value = optr_arr[i].s.addr;
- NITROX_LOG(DEBUG, "\t\t\tZPTR[%d] : Length = %d Addr = 0x%11"PRIx64"\n",
+ NITROX_LOG_LINE(DEBUG, "\t\t\tZPTR[%d] : Length = %d Addr = 0x%11"PRIx64,
i, optr_arr[i].s.length, value);
}
}
- NITROX_LOG(DEBUG, "\n");
+ NITROX_LOG_LINE(DEBUG,);
value = instr->w6.rptr;
- NITROX_LOG(DEBUG, "\tWORD6 = 0x%016"PRIx64"\n", instr->w6.u64);
- NITROX_LOG(DEBUG, "\t\tRPTR = 0x%11"PRIx64"\n", value);
- NITROX_LOG(DEBUG, "\n");
-
- NITROX_LOG(DEBUG, "\tWORD7 = 0x%016"PRIx64"\n", instr->w7.u64);
- NITROX_LOG(DEBUG, "\t\tGRP = %x\n", instr->w7.grp);
- NITROX_LOG(DEBUG, "\t\tADDR_MSB = 0x%5x\n", instr->w7.addr_msb);
- NITROX_LOG(DEBUG, "\n");
+ NITROX_LOG_LINE(DEBUG, "\tWORD6 = 0x%016"PRIx64, instr->w6.u64);
+ NITROX_LOG_LINE(DEBUG, "\t\tRPTR = 0x%11"PRIx64, value);
+ NITROX_LOG_LINE(DEBUG,);
+
+ NITROX_LOG_LINE(DEBUG, "\tWORD7 = 0x%016"PRIx64, instr->w7.u64);
+ NITROX_LOG_LINE(DEBUG, "\t\tGRP = %x", instr->w7.grp);
+ NITROX_LOG_LINE(DEBUG, "\t\tADDR_MSB = 0x%5x", instr->w7.addr_msb);
+ NITROX_LOG_LINE(DEBUG,);
}
static void nitrox_dump_zip_result(struct nitrox_zip_instr *instr,
struct nitrox_zip_result *result)
{
- NITROX_LOG(DEBUG, "ZIP result..(instr %p)\n", instr);
- NITROX_LOG(DEBUG, "\tWORD0 = 0x%016"PRIx64"\n", result->w0.u64);
- NITROX_LOG(DEBUG, "\t\tCRC32 = 0x%8x\n", result->w0.crc32);
- NITROX_LOG(DEBUG, "\t\tADLER32 = 0x%8x\n", result->w0.adler32);
- NITROX_LOG(DEBUG, "\n");
-
- NITROX_LOG(DEBUG, "\tWORD1 = 0x%016"PRIx64"\n", result->w1.u64);
- NITROX_LOG(DEBUG, "\t\tTBYTESWRITTEN = %u\n", result->w1.tbyteswritten);
- NITROX_LOG(DEBUG, "\t\tTBYTESREAD = %u\n", result->w1.tbytesread);
- NITROX_LOG(DEBUG, "\n");
-
- NITROX_LOG(DEBUG, "\tWORD2 = 0x%016"PRIx64"\n", result->w2.u64);
- NITROX_LOG(DEBUG, "\t\tTBITS = %u\n", result->w2.tbits);
- NITROX_LOG(DEBUG, "\t\tEXN = %d\n", result->w2.exn);
- NITROX_LOG(DEBUG, "\t\tEBITS = %x\n", result->w2.exbits);
- NITROX_LOG(DEBUG, "\t\tEF = %d\n", result->w2.ef);
- NITROX_LOG(DEBUG, "\t\tCOMPCODE = 0x%2x\n", result->w2.compcode);
- NITROX_LOG(DEBUG, "\n");
+ NITROX_LOG_LINE(DEBUG, "ZIP result..(instr %p)", instr);
+ NITROX_LOG_LINE(DEBUG, "\tWORD0 = 0x%016"PRIx64, result->w0.u64);
+ NITROX_LOG_LINE(DEBUG, "\t\tCRC32 = 0x%8x", result->w0.crc32);
+ NITROX_LOG_LINE(DEBUG, "\t\tADLER32 = 0x%8x", result->w0.adler32);
+ NITROX_LOG_LINE(DEBUG,);
+
+ NITROX_LOG_LINE(DEBUG, "\tWORD1 = 0x%016"PRIx64, result->w1.u64);
+ NITROX_LOG_LINE(DEBUG, "\t\tTBYTESWRITTEN = %u", result->w1.tbyteswritten);
+ NITROX_LOG_LINE(DEBUG, "\t\tTBYTESREAD = %u", result->w1.tbytesread);
+ NITROX_LOG_LINE(DEBUG,);
+
+ NITROX_LOG_LINE(DEBUG, "\tWORD2 = 0x%016"PRIx64, result->w2.u64);
+ NITROX_LOG_LINE(DEBUG, "\t\tTBITS = %u", result->w2.tbits);
+ NITROX_LOG_LINE(DEBUG, "\t\tEXN = %d", result->w2.exn);
+ NITROX_LOG_LINE(DEBUG, "\t\tEBITS = %x", result->w2.exbits);
+ NITROX_LOG_LINE(DEBUG, "\t\tEF = %d", result->w2.ef);
+ NITROX_LOG_LINE(DEBUG, "\t\tCOMPCODE = 0x%2x", result->w2.compcode);
+ NITROX_LOG_LINE(DEBUG,);
}
#else
#define nitrox_dump_databuf(name, m, off, datalen)
@@ -533,7 +533,7 @@ static int create_sglist_from_mbuf(struct nitrox_sgtable *sgtbl,
uint32_t mlen;
if (unlikely(datalen > NITROX_ZIP_MAX_DATASIZE)) {
- NITROX_LOG(ERR, "Unsupported datalen %d, max supported %d\n",
+ NITROX_LOG_LINE(ERR, "Unsupported datalen %d, max supported %d",
datalen, NITROX_ZIP_MAX_DATASIZE);
return -ENOTSUP;
}
@@ -545,7 +545,7 @@ static int create_sglist_from_mbuf(struct nitrox_sgtable *sgtbl,
}
if (unlikely(nb_segs > NITROX_ZIP_MAX_ZPTRS)) {
- NITROX_LOG(ERR, "Mbuf has more segments %d than supported\n",
+ NITROX_LOG_LINE(ERR, "Mbuf has more segments %d than supported",
nb_segs);
return -ENOTSUP;
}
@@ -553,13 +553,13 @@ static int create_sglist_from_mbuf(struct nitrox_sgtable *sgtbl,
if (unlikely(nb_segs > sgtbl->nb_sgls)) {
union nitrox_zip_zptr *sgl;
- NITROX_LOG(INFO, "Mbuf has more segs %d than allocated %d\n",
+ NITROX_LOG_LINE(INFO, "Mbuf has more segs %d than allocated %d",
nb_segs, sgtbl->nb_sgls);
sgl = rte_realloc_socket(sgtbl->sgl,
sizeof(*sgtbl->sgl) * nb_segs,
8, socket_id);
if (unlikely(!sgl)) {
- NITROX_LOG(ERR, "Failed to expand sglist memory\n");
+ NITROX_LOG_LINE(ERR, "Failed to expand sglist memory");
return -ENOMEM;
}
@@ -591,7 +591,7 @@ static int create_sglist_from_mbuf(struct nitrox_sgtable *sgtbl,
rte_pktmbuf_data_len(m) : datalen;
zip_addr.u64 = rte_pktmbuf_iova(m);
if (unlikely(zip_addr.zda.addr_msb != sgtbl->addr_msb)) {
- NITROX_LOG(ERR, "zip_ptrs have different msb addr\n");
+ NITROX_LOG_LINE(ERR, "zip_ptrs have different msb addr");
return -ENOTSUP;
}
@@ -682,7 +682,7 @@ static int process_zip_request(struct nitrox_softreq *sr)
xform = sr->op->private_xform;
if (unlikely(xform == NULL)) {
- NITROX_LOG(ERR, "Invalid stateless comp op\n");
+ NITROX_LOG_LINE(ERR, "Invalid stateless comp op");
return -EINVAL;
}
@@ -696,7 +696,7 @@ static int process_zip_request(struct nitrox_softreq *sr)
xform->op == NITROX_COMP_OP_COMPRESS &&
sr->op->flush_flag != RTE_COMP_FLUSH_FULL &&
sr->op->flush_flag != RTE_COMP_FLUSH_FINAL)) {
- NITROX_LOG(ERR, "Invalid flush flag %d in stateless op\n",
+ NITROX_LOG_LINE(ERR, "Invalid flush flag %d in stateless op",
sr->op->flush_flag);
return -EINVAL;
}
@@ -805,7 +805,7 @@ static int process_zip_request(struct nitrox_softreq *sr)
if (unlikely(iptr_msb != optr_msb || iptr_msb != rptr_msb ||
(xform->history_window && (iptr_msb != hptr_msb)) ||
(xform->context && (iptr_msb != cptr_msb)))) {
- NITROX_LOG(ERR, "addr_msb is not same for all addresses\n");
+ NITROX_LOG_LINE(ERR, "addr_msb is not same for all addresses");
return -ENOTSUP;
}
@@ -861,7 +861,7 @@ static int post_process_zip_stateless(struct nitrox_softreq *sr,
if (unlikely(zip_res->w2.compcode != NITROX_CC_SUCCESS)) {
struct rte_comp_op *op = sr->op;
- NITROX_LOG(ERR, "Dequeue error 0x%x\n",
+ NITROX_LOG_LINE(ERR, "Dequeue error 0x%x",
zip_res->w2.compcode);
if (zip_res->w2.compcode == NITROX_CC_STOP ||
zip_res->w2.compcode == NITROX_CC_DTRUNC)
@@ -877,7 +877,7 @@ static int post_process_zip_stateless(struct nitrox_softreq *sr,
output_unused_bytes = sr->dst.total_bytes - zip_res->w1.tbyteswritten;
if (unlikely(xform->op == NITROX_COMP_OP_DECOMPRESS &&
output_unused_bytes < NITROX_ZIP_MAX_ONFSIZE)) {
- NITROX_LOG(ERR, "TOL %d, Total bytes written %d\n",
+ NITROX_LOG_LINE(ERR, "TOL %d, Total bytes written %d",
sr->dst.total_bytes, zip_res->w1.tbyteswritten);
sr->op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
sr->op->consumed = 0;
@@ -908,7 +908,7 @@ static int update_history(struct rte_mbuf *mbuf, uint32_t off, uint16_t datalen,
off -= rte_pktmbuf_data_len(m);
if (unlikely(!m)) {
- NITROX_LOG(ERR, "Failed to update history. Invalid mbuf\n");
+ NITROX_LOG_LINE(ERR, "Failed to update history. Invalid mbuf");
return -EINVAL;
}
@@ -928,7 +928,7 @@ static int update_history(struct rte_mbuf *mbuf, uint32_t off, uint16_t datalen,
}
if (unlikely(datalen != 0)) {
- NITROX_LOG(ERR, "Failed to update history. Invalid datalen\n");
+ NITROX_LOG_LINE(ERR, "Failed to update history. Invalid datalen");
return -EINVAL;
}
@@ -955,11 +955,11 @@ static int post_process_zip_stateful(struct nitrox_softreq *sr,
sr->op->produced = 0;
xform->hlen = 0;
sr->op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE;
- NITROX_LOG(ERR, "Dequeue compress DTRUNC error\n");
+ NITROX_LOG_LINE(ERR, "Dequeue compress DTRUNC error");
return 0;
} else if (unlikely(zip_res->w2.compcode == NITROX_CC_STOP)) {
sr->op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE;
- NITROX_LOG(NOTICE, "Dequeue decompress dynamic STOP\n");
+ NITROX_LOG_LINE(NOTICE, "Dequeue decompress dynamic STOP");
} else if (zip_res->w2.compcode == NITROX_CC_SUCCESS) {
sr->op->status = RTE_COMP_OP_STATUS_SUCCESS;
} else {
@@ -968,14 +968,14 @@ static int post_process_zip_stateful(struct nitrox_softreq *sr,
xform->exbits = 0;
xform->bf = true;
sr->op->status = RTE_COMP_OP_STATUS_ERROR;
- NITROX_LOG(ERR, "Dequeue error 0x%x\n",
+ NITROX_LOG_LINE(ERR, "Dequeue error 0x%x",
zip_res->w2.compcode);
return -EFAULT;
}
if (xform->op == NITROX_COMP_OP_COMPRESS) {
if (zip_res->w1.tbytesread < xform->hlen) {
- NITROX_LOG(ERR, "Invalid bytesread\n");
+ NITROX_LOG_LINE(ERR, "Invalid bytesread");
reset_nitrox_xform(xform);
sr->op->status = RTE_COMP_OP_STATUS_ERROR;
return -EFAULT;
@@ -1068,7 +1068,7 @@ nitrox_check_comp_req(struct nitrox_softreq *sr, struct rte_comp_op **op)
zip_res = zip_result_to_cpu64(&sr->zip_res);
if (zip_res.w2.compcode == NITROX_CC_NOTDONE) {
if (rte_get_timer_cycles() >= sr->timeout) {
- NITROX_LOG(ERR, "Op timedout\n");
+ NITROX_LOG_LINE(ERR, "Op timedout");
sr->op->status = RTE_COMP_OP_STATUS_ERROR;
err = -ETIMEDOUT;
goto exit;
@@ -1166,7 +1166,7 @@ static void req_pool_obj_init(struct rte_mempool *mp, void *arg, void *obj,
sizeof(*sr->dst.sgl) * NITROX_ZIP_SGL_COUNT,
8, mp->socket_id);
if (sr->src.sgl == NULL || sr->dst.sgl == NULL) {
- NITROX_LOG(ERR, "Failed to allocate zip_sgl memory\n");
+ NITROX_LOG_LINE(ERR, "Failed to allocate zip_sgl memory");
*err = -ENOMEM;
}
@@ -1192,7 +1192,7 @@ nitrox_comp_req_pool_create(struct rte_compressdev *dev, uint32_t nobjs,
64, 0, NULL, NULL, req_pool_obj_init, &err,
socket_id, 0);
if (unlikely(!mp))
- NITROX_LOG(ERR, "Failed to create req pool, qid %d, err %d\n",
+ NITROX_LOG_LINE(ERR, "Failed to create req pool, qid %d, err %d",
qp_id, rte_errno);
if (unlikely(err)) {
@@ -79,7 +79,7 @@ nitrox_sym_dev_config(struct rte_cryptodev *cdev,
struct nitrox_device *ndev = sym_dev->ndev;
if (config->nb_queue_pairs > ndev->nr_queues) {
- NITROX_LOG(ERR, "Invalid queue pairs, max supported %d\n",
+ NITROX_LOG_LINE(ERR, "Invalid queue pairs, max supported %d",
ndev->nr_queues);
return -EINVAL;
}
@@ -177,9 +177,9 @@ nitrox_sym_dev_qp_setup(struct rte_cryptodev *cdev, uint16_t qp_id,
struct nitrox_qp *qp = NULL;
int err;
- NITROX_LOG(DEBUG, "queue %d\n", qp_id);
+ NITROX_LOG_LINE(DEBUG, "queue %d", qp_id);
if (qp_id >= ndev->nr_queues) {
- NITROX_LOG(ERR, "queue %u invalid, max queues supported %d\n",
+ NITROX_LOG_LINE(ERR, "queue %u invalid, max queues supported %d",
qp_id, ndev->nr_queues);
return -EINVAL;
}
@@ -194,7 +194,7 @@ nitrox_sym_dev_qp_setup(struct rte_cryptodev *cdev, uint16_t qp_id,
RTE_CACHE_LINE_SIZE,
socket_id);
if (!qp) {
- NITROX_LOG(ERR, "Failed to allocate nitrox qp\n");
+ NITROX_LOG_LINE(ERR, "Failed to allocate nitrox qp");
return -ENOMEM;
}
@@ -212,7 +212,7 @@ nitrox_sym_dev_qp_setup(struct rte_cryptodev *cdev, uint16_t qp_id,
goto req_pool_err;
cdev->data->queue_pairs[qp_id] = qp;
- NITROX_LOG(DEBUG, "queue %d setup done\n", qp_id);
+ NITROX_LOG_LINE(DEBUG, "queue %d setup done", qp_id);
return 0;
req_pool_err:
@@ -230,21 +230,21 @@ nitrox_sym_dev_qp_release(struct rte_cryptodev *cdev, uint16_t qp_id)
struct nitrox_qp *qp;
int err;
- NITROX_LOG(DEBUG, "queue %d\n", qp_id);
+ NITROX_LOG_LINE(DEBUG, "queue %d", qp_id);
if (qp_id >= ndev->nr_queues) {
- NITROX_LOG(ERR, "queue %u invalid, max queues supported %d\n",
+ NITROX_LOG_LINE(ERR, "queue %u invalid, max queues supported %d",
qp_id, ndev->nr_queues);
return -EINVAL;
}
qp = cdev->data->queue_pairs[qp_id];
if (!qp) {
- NITROX_LOG(DEBUG, "queue %u already freed\n", qp_id);
+ NITROX_LOG_LINE(DEBUG, "queue %u already freed", qp_id);
return 0;
}
if (!nitrox_qp_is_empty(qp)) {
- NITROX_LOG(ERR, "queue %d not empty\n", qp_id);
+ NITROX_LOG_LINE(ERR, "queue %d not empty", qp_id);
return -EAGAIN;
}
@@ -252,7 +252,7 @@ nitrox_sym_dev_qp_release(struct rte_cryptodev *cdev, uint16_t qp_id)
err = nitrox_qp_release(qp, ndev->bar_addr);
nitrox_sym_req_pool_free(qp->sr_mp);
rte_free(qp);
- NITROX_LOG(DEBUG, "queue %d release done\n", qp_id);
+ NITROX_LOG_LINE(DEBUG, "queue %d release done", qp_id);
return err;
}
@@ -280,7 +280,7 @@ get_crypto_chain_order(const struct rte_crypto_sym_xform *xform)
RTE_CRYPTO_CIPHER_OP_DECRYPT) {
res = NITROX_CHAIN_AUTH_CIPHER;
} else {
- NITROX_LOG(ERR, "auth op %d, cipher op %d\n",
+ NITROX_LOG_LINE(ERR, "auth op %d, cipher op %d",
xform->auth.op, xform->next->cipher.op);
}
}
@@ -294,7 +294,7 @@ get_crypto_chain_order(const struct rte_crypto_sym_xform *xform)
RTE_CRYPTO_AUTH_OP_GENERATE) {
res = NITROX_CHAIN_CIPHER_AUTH;
} else {
- NITROX_LOG(ERR, "cipher op %d, auth op %d\n",
+ NITROX_LOG_LINE(ERR, "cipher op %d, auth op %d",
xform->cipher.op, xform->next->auth.op);
}
}
@@ -325,7 +325,7 @@ get_flexi_cipher_type(enum rte_crypto_cipher_algorithm algo, bool *is_aes)
break;
default:
type = CIPHER_INVALID;
- NITROX_LOG(ERR, "Algorithm not supported %d\n", algo);
+ NITROX_LOG_LINE(ERR, "Algorithm not supported %d", algo);
break;
}
@@ -351,7 +351,7 @@ flexi_aes_keylen(size_t keylen, bool is_aes)
aes_keylen = 3;
break;
default:
- NITROX_LOG(ERR, "Invalid keylen %zu\n", keylen);
+ NITROX_LOG_LINE(ERR, "Invalid keylen %zu", keylen);
aes_keylen = -EINVAL;
break;
}
@@ -364,7 +364,7 @@ crypto_key_is_valid(struct rte_crypto_cipher_xform *xform,
struct flexi_crypto_context *fctx)
{
if (unlikely(xform->key.length > sizeof(fctx->crypto.key))) {
- NITROX_LOG(ERR, "Invalid crypto key length %d\n",
+ NITROX_LOG_LINE(ERR, "Invalid crypto key length %d",
xform->key.length);
return false;
}
@@ -427,7 +427,7 @@ get_flexi_auth_type(enum rte_crypto_auth_algorithm algo)
type = AUTH_SHA2_SHA256;
break;
default:
- NITROX_LOG(ERR, "Algorithm not supported %d\n", algo);
+ NITROX_LOG_LINE(ERR, "Algorithm not supported %d", algo);
type = AUTH_INVALID;
break;
}
@@ -440,12 +440,12 @@ auth_key_is_valid(const uint8_t *data, uint16_t length,
struct flexi_crypto_context *fctx)
{
if (unlikely(!data && length)) {
- NITROX_LOG(ERR, "Invalid auth key\n");
+ NITROX_LOG_LINE(ERR, "Invalid auth key");
return false;
}
if (unlikely(length > sizeof(fctx->auth.opad))) {
- NITROX_LOG(ERR, "Invalid auth key length %d\n",
+ NITROX_LOG_LINE(ERR, "Invalid auth key length %d",
length);
return false;
}
@@ -488,7 +488,7 @@ configure_aead_ctx(struct rte_crypto_aead_xform *xform,
struct flexi_crypto_context *fctx = &ctx->fctx;
if (unlikely(xform->aad_length > FLEXI_CRYPTO_MAX_AAD_LEN)) {
- NITROX_LOG(ERR, "AAD length %d not supported\n",
+ NITROX_LOG_LINE(ERR, "AAD length %d not supported",
xform->aad_length);
return -ENOTSUP;
}
@@ -515,14 +515,14 @@ configure_aead_ctx(struct rte_crypto_aead_xform *xform,
if (unlikely(xform->digest_length < 4 ||
xform->digest_length > 16 ||
(xform->digest_length & 1) == 1)) {
- NITROX_LOG(ERR, "Invalid digest length %d\n",
+ NITROX_LOG_LINE(ERR, "Invalid digest length %d",
xform->digest_length);
return -EINVAL;
}
L = 15 - xform->iv.length;
if (unlikely(L < 2 || L > 8)) {
- NITROX_LOG(ERR, "Invalid iv length %d\n",
+ NITROX_LOG_LINE(ERR, "Invalid iv length %d",
xform->iv.length);
return -EINVAL;
}
@@ -581,23 +581,23 @@ nitrox_sym_dev_sess_configure(struct rte_cryptodev *cdev __rte_unused,
aead_xform = &xform->aead;
break;
default:
- NITROX_LOG(ERR, "Crypto chain not supported\n");
+ NITROX_LOG_LINE(ERR, "Crypto chain not supported");
ret = -ENOTSUP;
goto err;
}
if (cipher_xform && unlikely(configure_cipher_ctx(cipher_xform, ctx))) {
- NITROX_LOG(ERR, "Failed to configure cipher ctx\n");
+ NITROX_LOG_LINE(ERR, "Failed to configure cipher ctx");
goto err;
}
if (auth_xform && unlikely(configure_auth_ctx(auth_xform, ctx))) {
- NITROX_LOG(ERR, "Failed to configure auth ctx\n");
+ NITROX_LOG_LINE(ERR, "Failed to configure auth ctx");
goto err;
}
if (aead_xform && unlikely(configure_aead_ctx(aead_xform, ctx))) {
- NITROX_LOG(ERR, "Failed to configure aead ctx\n");
+ NITROX_LOG_LINE(ERR, "Failed to configure aead ctx");
goto err;
}
@@ -763,7 +763,7 @@ nitrox_sym_pmd_create(struct nitrox_device *ndev)
cdev = rte_cryptodev_pmd_create(name, &ndev->rte_sym_dev,
&init_params);
if (!cdev) {
- NITROX_LOG(ERR, "Cryptodev '%s' creation failed\n", name);
+ NITROX_LOG_LINE(ERR, "Cryptodev '%s' creation failed", name);
return -ENODEV;
}
@@ -787,7 +787,7 @@ nitrox_sym_pmd_create(struct nitrox_device *ndev)
rte_cryptodev_pmd_probing_finish(cdev);
- NITROX_LOG(DEBUG, "Created cryptodev '%s', dev_id %d, drv_id %d\n",
+ NITROX_LOG_LINE(DEBUG, "Created cryptodev '%s', dev_id %d, drv_id %d",
cdev->data->name, cdev->data->dev_id, nitrox_sym_drv_id);
return 0;
}
@@ -466,7 +466,7 @@ create_cipher_auth_sglist(struct nitrox_softreq *sr,
if (unlikely(
op->sym->cipher.data.offset + op->sym->cipher.data.length !=
op->sym->auth.data.offset + op->sym->auth.data.length)) {
- NITROX_LOG(ERR, "Auth only data after cipher data not supported\n");
+ NITROX_LOG_LINE(ERR, "Auth only data after cipher data not supported");
return -ENOTSUP;
}
@@ -679,7 +679,7 @@ softreq_copy_salt(struct nitrox_softreq *sr)
uint8_t *addr;
if (unlikely(ctx->iv.length < AES_GCM_SALT_SIZE)) {
- NITROX_LOG(ERR, "Invalid IV length %d\n", ctx->iv.length);
+ NITROX_LOG_LINE(ERR, "Invalid IV length %d", ctx->iv.length);
return -EINVAL;
}
@@ -829,8 +829,8 @@ nitrox_process_se_req(uint16_t qno, struct rte_crypto_op *op,
if (unlikely(op->sym->m_src->nb_segs > MAX_SUPPORTED_MBUF_SEGS ||
(op->sym->m_dst &&
op->sym->m_dst->nb_segs > MAX_SUPPORTED_MBUF_SEGS))) {
- NITROX_LOG(ERR, "Mbuf segments not supported. "
- "Max supported %d\n", MAX_SUPPORTED_MBUF_SEGS);
+ NITROX_LOG_LINE(ERR, "Mbuf segments not supported. "
+ "Max supported %d", MAX_SUPPORTED_MBUF_SEGS);
return -ENOTSUP;
}
@@ -865,7 +865,7 @@ nitrox_check_se_req(struct nitrox_softreq *sr, struct rte_crypto_op **op)
return -EAGAIN;
if (unlikely(err))
- NITROX_LOG(ERR, "Request err 0x%x, orh 0x%"PRIx64"\n", err,
+ NITROX_LOG_LINE(ERR, "Request err 0x%x, orh 0x%"PRIx64, err,
sr->resp.orh);
*op = sr->op;
@@ -901,7 +901,7 @@ nitrox_sym_req_pool_create(struct rte_cryptodev *cdev, uint32_t nobjs,
64, 0, NULL, NULL, req_pool_obj_init, NULL,
socket_id, 0);
if (unlikely(!mp))
- NITROX_LOG(ERR, "Failed to create req pool, qid %d, err %d\n",
+ NITROX_LOG_LINE(ERR, "Failed to create req pool, qid %d, err %d",
qp_id, rte_errno);
return mp;
@@ -66,17 +66,9 @@ extern int odm_logtype;
#define odm_read64(addr) rte_read64_relaxed((volatile void *)(addr))
#define odm_write64(val, addr) rte_write64_relaxed((val), (volatile void *)(addr))
-#define odm_err(...) \
- rte_log(RTE_LOG_ERR, odm_logtype, \
- RTE_FMT("%s(): %u" RTE_FMT_HEAD(__VA_ARGS__, ), __func__, __LINE__, \
- RTE_FMT_TAIL(__VA_ARGS__, )))
-#define odm_info(...) \
- rte_log(RTE_LOG_INFO, odm_logtype, \
- RTE_FMT("%s(): %u" RTE_FMT_HEAD(__VA_ARGS__, ), __func__, __LINE__, \
- RTE_FMT_TAIL(__VA_ARGS__, )))
-#define odm_debug(...) \
- rte_log(RTE_LOG_DEBUG, odm_logtype, \
- RTE_FMT("%s(): %u" RTE_FMT_HEAD(__VA_ARGS__, ), __func__, __LINE__, \
+#define ODM_LOG(level, ...) \
+ rte_log(RTE_LOG_ ## level, odm_logtype, \
+ RTE_FMT("%s(): %u" RTE_FMT_HEAD(__VA_ARGS__,) "\n", __func__, __LINE__, \
RTE_FMT_TAIL(__VA_ARGS__, )))
/*
@@ -494,10 +494,10 @@ odm_dmadev_completed_status(void *dev_private, uint16_t vchan, const uint16_t nb
}
#ifdef ODM_DEBUG
- odm_debug("cring_head: 0x%" PRIx16, cring_head);
- odm_debug("Submitted: 0x%" PRIx64, vq->stats.submitted);
- odm_debug("Completed: 0x%" PRIx64, vq->stats.completed);
- odm_debug("Hardware count: 0x%" PRIx64, odm_read64(odm->rbase + ODM_VDMA_CNT(vchan)));
+ ODM_LOG(DEBUG, "cring_head: 0x%" PRIx16, cring_head);
+ ODM_LOG(DEBUG, "Submitted: 0x%" PRIx64, vq->stats.submitted);
+ ODM_LOG(DEBUG, "Completed: 0x%" PRIx64, vq->stats.completed);
+ ODM_LOG(DEBUG, "Hardware count: 0x%" PRIx64, odm_read64(odm->rbase + ODM_VDMA_CNT(vchan)));
#endif
for (cnt = 0; cnt < nb_cpls; cnt++) {
@@ -651,11 +651,11 @@ odm_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_dev
dmadev = rte_dma_pmd_allocate(name, pci_dev->device.numa_node, sizeof(*odm));
if (dmadev == NULL) {
- odm_err("DMA device allocation failed for %s", name);
+ ODM_LOG(ERR, "DMA device allocation failed for %s", name);
return -ENOMEM;
}
- odm_info("DMA device %s probed", name);
+ ODM_LOG(INFO, "DMA device %s probed", name);
odm = dmadev->data->dev_private;
dmadev->device = &pci_dev->device;
@@ -302,12 +302,12 @@ dsw_pmd_priv(const struct rte_eventdev *eventdev)
return eventdev->data->dev_private;
}
-#define DSW_LOG_DP(level, fmt, args...) \
- RTE_LOG_DP(level, EVENTDEV, "[%s] %s() line %u: " fmt, \
+#define DSW_LOG_DP_LINE(level, fmt, args...) \
+ RTE_LOG_DP(level, EVENTDEV, "[%s] %s() line %u: " fmt "\n", \
DSW_PMD_NAME, \
__func__, __LINE__, ## args)
-#define DSW_LOG_DP_PORT(level, port_id, fmt, args...) \
- DSW_LOG_DP(level, "<Port %d> " fmt, port_id, ## args)
+#define DSW_LOG_DP_PORT_LINE(level, port_id, fmt, args...) \
+ DSW_LOG_DP_LINE(level, "<Port %d> " fmt, port_id, ## args)
#endif
@@ -59,7 +59,7 @@ dsw_port_acquire_credits(struct dsw_evdev *dsw, struct dsw_port *port,
return false;
}
- DSW_LOG_DP_PORT(DEBUG, port->id, "Acquired %d tokens from pool.\n",
+ DSW_LOG_DP_PORT_LINE(DEBUG, port->id, "Acquired %d tokens from pool.",
acquired_credits);
port->inflight_credits += acquired_credits;
@@ -85,8 +85,8 @@ dsw_port_return_credits(struct dsw_evdev *dsw, struct dsw_port *port,
return_credits,
rte_memory_order_relaxed);
- DSW_LOG_DP_PORT(DEBUG, port->id,
- "Returned %d tokens to pool.\n",
+ DSW_LOG_DP_PORT_LINE(DEBUG, port->id,
+ "Returned %d tokens to pool.",
return_credits);
}
}
@@ -264,8 +264,8 @@ dsw_port_add_paused_flows(struct dsw_port *port, struct dsw_queue_flow *qfs,
for (i = 0; i < qfs_len; i++) {
struct dsw_queue_flow *qf = &qfs[i];
- DSW_LOG_DP_PORT(DEBUG, port->id,
- "Pausing queue_id %d flow_hash %d.\n",
+ DSW_LOG_DP_PORT_LINE(DEBUG, port->id,
+ "Pausing queue_id %d flow_hash %d.",
qf->queue_id, qf->flow_hash);
port->paused_flows[port->paused_flows_len] = *qf;
@@ -290,8 +290,8 @@ dsw_port_remove_paused_flow(struct dsw_port *port,
port->paused_flows[last_idx];
port->paused_flows_len--;
- DSW_LOG_DP_PORT(DEBUG, port->id,
- "Unpausing queue_id %d flow_hash %d.\n",
+ DSW_LOG_DP_PORT_LINE(DEBUG, port->id,
+ "Unpausing queue_id %d flow_hash %d.",
target_qf->queue_id,
target_qf->flow_hash);
@@ -299,8 +299,8 @@ dsw_port_remove_paused_flow(struct dsw_port *port,
}
}
- DSW_LOG_DP_PORT(ERR, port->id,
- "Failed to unpause queue_id %d flow_hash %d.\n",
+ DSW_LOG_DP_PORT_LINE(ERR, port->id,
+ "Failed to unpause queue_id %d flow_hash %d.",
target_qf->queue_id, target_qf->flow_hash);
RTE_VERIFY(0);
}
@@ -519,9 +519,9 @@ dsw_select_emigration_target(struct dsw_evdev *dsw,
if (candidate_weight < 0)
return false;
- DSW_LOG_DP_PORT(DEBUG, source_port->id, "Selected queue_id %d "
+ DSW_LOG_DP_PORT_LINE(DEBUG, source_port->id, "Selected queue_id %d "
"flow_hash %d (with flow load %d) for migration "
- "to port %d.\n", candidate_qf->queue_id,
+ "to port %d.", candidate_qf->queue_id,
candidate_qf->flow_hash,
DSW_LOAD_TO_PERCENT(candidate_flow_load),
candidate_port_id);
@@ -566,9 +566,9 @@ dsw_select_emigration_targets(struct dsw_evdev *dsw,
}
if (*targets_len == 0)
- DSW_LOG_DP_PORT(DEBUG, source_port->id,
+ DSW_LOG_DP_PORT_LINE(DEBUG, source_port->id,
"For the %d flows considered, no target port "
- "was found.\n", num_bursts);
+ "was found.", num_bursts);
}
static uint8_t
@@ -585,8 +585,8 @@ dsw_schedule(struct dsw_evdev *dsw, uint8_t queue_id, uint16_t flow_hash)
*/
port_id = rte_bsf64(queue->serving_ports);
- DSW_LOG_DP(DEBUG, "Event with queue_id %d flow_hash %d is scheduled "
- "to port %d.\n", queue_id, flow_hash, port_id);
+ DSW_LOG_DP_LINE(DEBUG, "Event with queue_id %d flow_hash %d is scheduled "
+ "to port %d.", queue_id, flow_hash, port_id);
return port_id;
}
@@ -774,8 +774,8 @@ dsw_port_end_emigration(struct dsw_evdev *dsw, struct dsw_port *port,
continue;
}
- DSW_LOG_DP_PORT(DEBUG, port->id, "Migration completed for "
- "queue_id %d flow_hash %d.\n", queue_id,
+ DSW_LOG_DP_PORT_LINE(DEBUG, port->id, "Migration completed for "
+ "queue_id %d flow_hash %d.", queue_id,
flow_hash);
}
@@ -844,27 +844,27 @@ dsw_port_consider_emigration(struct dsw_evdev *dsw,
if (dsw->num_ports == 1)
return;
- DSW_LOG_DP_PORT(DEBUG, source_port->id, "Considering emigration.\n");
+ DSW_LOG_DP_PORT_LINE(DEBUG, source_port->id, "Considering emigration.");
/* For simplicity, postpone migration if there are still
* events to consume in the in_buffer (from the last
* emigration).
*/
if (source_port->in_buffer_len > 0) {
- DSW_LOG_DP_PORT(DEBUG, source_port->id, "There are still "
- "events in the input buffer.\n");
+ DSW_LOG_DP_PORT_LINE(DEBUG, source_port->id, "There are still "
+ "events in the input buffer.");
return;
}
if (source_port->migration_state != DSW_MIGRATION_STATE_IDLE) {
- DSW_LOG_DP_PORT(DEBUG, source_port->id,
- "Emigration already in progress.\n");
+ DSW_LOG_DP_PORT_LINE(DEBUG, source_port->id,
+ "Emigration already in progress.");
return;
}
if (seen_events_len < DSW_MAX_EVENTS_RECORDED) {
- DSW_LOG_DP_PORT(DEBUG, source_port->id, "Not enough events "
- "are recorded to allow for a migration.\n");
+ DSW_LOG_DP_PORT_LINE(DEBUG, source_port->id, "Not enough events "
+ "are recorded to allow for a migration.");
return;
}
@@ -873,8 +873,8 @@ dsw_port_consider_emigration(struct dsw_evdev *dsw,
* leading to wasted CPU cycles (e.g., sorting queue flows).
*/
if (source_port->paused_events_len > 0) {
- DSW_LOG_DP_PORT(DEBUG, source_port->id, "Paused events on "
- "port. Postponing any migrations.\n");
+ DSW_LOG_DP_PORT_LINE(DEBUG, source_port->id, "Paused events on "
+ "port. Postponing any migrations.");
return;
}
@@ -890,8 +890,8 @@ dsw_port_consider_emigration(struct dsw_evdev *dsw,
rte_atomic_load_explicit(&source_port->load,
rte_memory_order_relaxed);
if (source_port_load < DSW_MIN_SOURCE_LOAD_FOR_MIGRATION) {
- DSW_LOG_DP_PORT(DEBUG, source_port->id,
- "Load %d is below threshold level %d.\n",
+ DSW_LOG_DP_PORT_LINE(DEBUG, source_port->id,
+ "Load %d is below threshold level %d.",
DSW_LOAD_TO_PERCENT(source_port_load),
DSW_LOAD_TO_PERCENT(DSW_MIN_SOURCE_LOAD_FOR_MIGRATION));
return;
@@ -904,9 +904,9 @@ dsw_port_consider_emigration(struct dsw_evdev *dsw,
dsw_retrieve_port_loads(dsw, port_loads,
DSW_MAX_TARGET_LOAD_FOR_MIGRATION);
if (!any_port_below_limit) {
- DSW_LOG_DP_PORT(DEBUG, source_port->id,
+ DSW_LOG_DP_PORT_LINE(DEBUG, source_port->id,
"Candidate target ports are all too highly "
- "loaded.\n");
+ "loaded.");
return;
}
@@ -917,8 +917,8 @@ dsw_port_consider_emigration(struct dsw_evdev *dsw,
* only (known) flow.
*/
if (num_bursts < 2) {
- DSW_LOG_DP_PORT(DEBUG, source_port->id, "Only a single flow "
- "queue_id %d flow_hash %d has been seen.\n",
+ DSW_LOG_DP_PORT_LINE(DEBUG, source_port->id, "Only a single flow "
+ "queue_id %d flow_hash %d has been seen.",
bursts[0].queue_flow.queue_id,
bursts[0].queue_flow.flow_hash);
return;
@@ -980,8 +980,8 @@ dsw_port_continue_emigration(struct dsw_evdev *dsw,
* unpause.
*/
if (source_port->paused_events_len > 0) {
- DSW_LOG_DP_PORT(DEBUG, source_port->id, "There are events in "
- "the pause buffer. Aborting migration.\n");
+ DSW_LOG_DP_PORT_LINE(DEBUG, source_port->id, "There are events in "
+ "the pause buffer. Aborting migration.");
dsw_port_abort_migration(source_port);
return;
}
@@ -1339,8 +1339,8 @@ dsw_event_enqueue_burst_generic(struct dsw_port *source_port,
bool enough_credits;
uint16_t i;
- DSW_LOG_DP_PORT(DEBUG, source_port->id, "Attempting to enqueue %d "
- "events.\n", events_len);
+ DSW_LOG_DP_PORT_LINE(DEBUG, source_port->id, "Attempting to enqueue %d "
+ "events.", events_len);
dsw_port_bg_process(dsw, source_port);
@@ -1414,8 +1414,8 @@ dsw_event_enqueue_burst_generic(struct dsw_port *source_port,
dsw_port_queue_enqueue_stats(source_port, event->queue_id);
}
- DSW_LOG_DP_PORT(DEBUG, source_port->id, "%d non-release events "
- "accepted.\n", num_new + num_forward);
+ DSW_LOG_DP_PORT_LINE(DEBUG, source_port->id, "%d non-release events "
+ "accepted.", num_new + num_forward);
return (num_new + num_forward + num_release);
}
@@ -1581,7 +1581,7 @@ dsw_event_dequeue_burst(void *port, struct rte_event *events, uint16_t num,
dsw_port_note_op(source_port, dequeued);
if (dequeued > 0) {
- DSW_LOG_DP_PORT(DEBUG, source_port->id, "Dequeued %d events.\n",
+ DSW_LOG_DP_PORT_LINE(DEBUG, source_port->id, "Dequeued %d events.",
dequeued);
/* One potential optimization one might think of is to
@@ -63,10 +63,10 @@
#endif
RTE_LOG_REGISTER_DEFAULT(af_xdp_logtype, NOTICE);
+#define RTE_LOGTYPE_NET_AF_XDP af_xdp_logtype
-#define AF_XDP_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, af_xdp_logtype, \
- "%s(): " fmt, __func__, ##args)
+#define AF_XDP_LOG_LINE(level, fmt, args...) \
+ RTE_LOG(level, NET_AF_XDP, "%s(): " fmt "\n", __func__, ##args)
#define ETH_AF_XDP_FRAME_SIZE 2048
#define ETH_AF_XDP_NUM_BUFFERS 4096
@@ -243,7 +243,7 @@ reserve_fill_queue_zc(struct xsk_umem_info *umem, uint16_t reserve_size,
if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
for (i = 0; i < reserve_size; i++)
rte_pktmbuf_free(bufs[i]);
- AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
+ AF_XDP_LOG_LINE(DEBUG, "Failed to reserve enough fq descs.");
return -1;
}
@@ -273,12 +273,12 @@ reserve_fill_queue_cp(struct xsk_umem_info *umem, uint16_t reserve_size,
if (rte_ring_dequeue_bulk(umem->buf_ring, addrs, reserve_size, NULL)
!= reserve_size) {
- AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
+ AF_XDP_LOG_LINE(DEBUG, "Failed to get enough buffers for fq.");
return -1;
}
if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
- AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
+ AF_XDP_LOG_LINE(DEBUG, "Failed to reserve enough fq descs.");
rte_ring_enqueue_bulk(umem->buf_ring, addrs,
reserve_size, NULL);
return -1;
@@ -342,8 +342,8 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
/* allocate bufs for fill queue replenishment after rx */
if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) {
- AF_XDP_LOG(DEBUG,
- "Failed to get enough buffers for fq.\n");
+ AF_XDP_LOG_LINE(DEBUG,
+ "Failed to get enough buffers for fq.");
/* rollback cached_cons which is added by
* xsk_ring_cons__peek
*/
@@ -888,7 +888,7 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
ret = fd >= 0 ? getsockopt(fd, SOL_XDP, XDP_STATISTICS,
&xdp_stats, &optlen) : -1;
if (ret != 0) {
- AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n");
+ AF_XDP_LOG_LINE(ERR, "getsockopt() failed for XDP_STATISTICS.");
return -1;
}
stats->imissed += xdp_stats.rx_dropped - rxq->stats.imissed_offset;
@@ -919,7 +919,7 @@ eth_stats_reset(struct rte_eth_dev *dev)
ret = fd >= 0 ? getsockopt(fd, SOL_XDP, XDP_STATISTICS,
&xdp_stats, &optlen) : -1;
if (ret != 0) {
- AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n");
+ AF_XDP_LOG_LINE(ERR, "getsockopt() failed for XDP_STATISTICS.");
return -1;
}
internals->rx_queues[i].stats.imissed_offset = xdp_stats.rx_dropped;
@@ -944,14 +944,14 @@ remove_xdp_program(struct pmd_internals *internals)
ret = bpf_xdp_query_id(internals->if_index, XDP_FLAGS_UPDATE_IF_NOEXIST,
&curr_prog_id);
if (ret != 0) {
- AF_XDP_LOG(ERR, "bpf_xdp_query_id failed\n");
+ AF_XDP_LOG_LINE(ERR, "bpf_xdp_query_id failed");
return ret;
}
ret = bpf_xdp_detach(internals->if_index, XDP_FLAGS_UPDATE_IF_NOEXIST,
NULL);
if (ret != 0)
- AF_XDP_LOG(ERR, "bpf_xdp_detach failed\n");
+ AF_XDP_LOG_LINE(ERR, "bpf_xdp_detach failed");
return ret;
}
@@ -971,14 +971,14 @@ remove_xdp_program(struct pmd_internals *internals)
ret = bpf_get_link_xdp_id(internals->if_index, &curr_prog_id,
XDP_FLAGS_UPDATE_IF_NOEXIST);
if (ret != 0) {
- AF_XDP_LOG(ERR, "bpf_get_link_xdp_id failed\n");
+ AF_XDP_LOG_LINE(ERR, "bpf_get_link_xdp_id failed");
return ret;
}
ret = bpf_set_link_xdp_fd(internals->if_index, -1,
XDP_FLAGS_UPDATE_IF_NOEXIST);
if (ret != 0)
- AF_XDP_LOG(ERR, "bpf_set_link_xdp_fd failed\n");
+ AF_XDP_LOG_LINE(ERR, "bpf_set_link_xdp_fd failed");
return ret;
}
@@ -1013,7 +1013,7 @@ eth_dev_close(struct rte_eth_dev *dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
goto out;
- AF_XDP_LOG(INFO, "Closing AF_XDP ethdev on numa socket %u\n",
+ AF_XDP_LOG_LINE(INFO, "Closing AF_XDP ethdev on numa socket %u",
rte_socket_id());
for (i = 0; i < internals->queue_cnt; i++) {
@@ -1038,7 +1038,7 @@ eth_dev_close(struct rte_eth_dev *dev)
dev->data->mac_addrs = NULL;
if (remove_xdp_program(internals) != 0)
- AF_XDP_LOG(ERR, "Error while removing XDP program.\n");
+ AF_XDP_LOG_LINE(ERR, "Error while removing XDP program.");
if (internals->shared_umem) {
struct internal_list *list;
@@ -1076,7 +1076,7 @@ ctx_exists(struct pkt_rx_queue *rxq, const char *ifname,
if (rxq->xsk_queue_idx == list_rxq->xsk_queue_idx &&
!strncmp(ifname, list_ifname, IFNAMSIZ)) {
- AF_XDP_LOG(ERR, "ctx %s,%i already exists, cannot share umem\n",
+ AF_XDP_LOG_LINE(ERR, "ctx %s,%i already exists, cannot share umem",
ifname, rxq->xsk_queue_idx);
exists = true;
}
@@ -1149,7 +1149,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
if (umem != NULL &&
rte_atomic_load_explicit(&umem->refcnt, rte_memory_order_acquire) <
umem->max_xsks) {
- AF_XDP_LOG(INFO, "%s,qid%i sharing UMEM\n",
+ AF_XDP_LOG_LINE(INFO, "%s,qid%i sharing UMEM",
internals->if_name, rxq->xsk_queue_idx);
rte_atomic_fetch_add_explicit(&umem->refcnt, 1, rte_memory_order_acquire);
}
@@ -1167,18 +1167,18 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
umem = rte_zmalloc_socket("umem", sizeof(*umem), 0,
rte_socket_id());
if (umem == NULL) {
- AF_XDP_LOG(ERR, "Failed to allocate umem info\n");
+ AF_XDP_LOG_LINE(ERR, "Failed to allocate umem info");
return NULL;
}
umem->mb_pool = mb_pool;
ret = rte_mempool_get_mem_range(mb_pool, &range);
if (ret < 0) {
- AF_XDP_LOG(ERR, "Failed(%d) to get range from mempool\n", ret);
+ AF_XDP_LOG_LINE(ERR, "Failed(%d) to get range from mempool", ret);
goto err;
}
if (!range.is_contiguous) {
- AF_XDP_LOG(ERR, "Can't mapped to umem as mempool is not contiguous\n");
+ AF_XDP_LOG_LINE(ERR, "Can't mapped to umem as mempool is not contiguous");
goto err;
}
/*
@@ -1190,7 +1190,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
ret = xsk_umem__create(&umem->umem, aligned_addr, umem_size,
&rxq->fq, &rxq->cq, &usr_config);
if (ret) {
- AF_XDP_LOG(ERR, "Failed to create umem [%d]: [%s]\n",
+ AF_XDP_LOG_LINE(ERR, "Failed to create umem [%d]: [%s]",
errno, strerror(errno));
goto err;
}
@@ -1199,7 +1199,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
if (internals->shared_umem) {
umem->max_xsks = mb_pool->populated_size /
ETH_AF_XDP_NUM_BUFFERS;
- AF_XDP_LOG(INFO, "Max xsks for UMEM %s: %u\n",
+ AF_XDP_LOG_LINE(INFO, "Max xsks for UMEM %s: %u",
mb_pool->name, umem->max_xsks);
}
@@ -1231,7 +1231,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id());
if (umem == NULL) {
- AF_XDP_LOG(ERR, "Failed to allocate umem info\n");
+ AF_XDP_LOG_LINE(ERR, "Failed to allocate umem info");
return NULL;
}
@@ -1242,7 +1242,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
rte_socket_id(),
0x0);
if (umem->buf_ring == NULL) {
- AF_XDP_LOG(ERR, "Failed to create rte_ring\n");
+ AF_XDP_LOG_LINE(ERR, "Failed to create rte_ring");
goto err;
}
@@ -1257,7 +1257,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
getpagesize());
if (mz == NULL) {
- AF_XDP_LOG(ERR, "Failed to reserve memzone for af_xdp umem.\n");
+ AF_XDP_LOG_LINE(ERR, "Failed to reserve memzone for af_xdp umem.");
goto err;
}
umem->mz = mz;
@@ -1268,7 +1268,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
&usr_config);
if (ret) {
- AF_XDP_LOG(ERR, "Failed to create umem\n");
+ AF_XDP_LOG_LINE(ERR, "Failed to create umem");
goto err;
}
@@ -1285,11 +1285,11 @@ get_pinned_map(const char *dp_path, int *map_fd)
{
*map_fd = bpf_obj_get(dp_path);
if (!*map_fd) {
- AF_XDP_LOG(ERR, "Failed to find xsks_map in %s\n", dp_path);
+ AF_XDP_LOG_LINE(ERR, "Failed to find xsks_map in %s", dp_path);
return -1;
}
- AF_XDP_LOG(INFO, "Successfully retrieved map %s with fd %d\n",
+ AF_XDP_LOG_LINE(INFO, "Successfully retrieved map %s with fd %d",
dp_path, *map_fd);
return 0;
@@ -1303,7 +1303,7 @@ load_custom_xdp_prog(const char *prog_path, int if_index, struct bpf_map **map)
prog_fd = load_program(prog_path, &obj);
if (prog_fd < 0) {
- AF_XDP_LOG(ERR, "Failed to load program %s\n", prog_path);
+ AF_XDP_LOG_LINE(ERR, "Failed to load program %s", prog_path);
return -1;
}
@@ -1313,7 +1313,7 @@ load_custom_xdp_prog(const char *prog_path, int if_index, struct bpf_map **map)
*/
*map = bpf_object__find_map_by_name(obj, "xsks_map");
if (!*map) {
- AF_XDP_LOG(ERR, "Failed to find xsks_map in %s\n", prog_path);
+ AF_XDP_LOG_LINE(ERR, "Failed to find xsks_map in %s", prog_path);
return -1;
}
@@ -1321,12 +1321,12 @@ load_custom_xdp_prog(const char *prog_path, int if_index, struct bpf_map **map)
ret = link_xdp_prog_with_dev(if_index, prog_fd,
XDP_FLAGS_UPDATE_IF_NOEXIST);
if (ret) {
- AF_XDP_LOG(ERR, "Failed to set prog fd %d on interface\n",
+ AF_XDP_LOG_LINE(ERR, "Failed to set prog fd %d on interface",
prog_fd);
return -1;
}
- AF_XDP_LOG(INFO, "Successfully loaded XDP program %s with fd %d\n",
+ AF_XDP_LOG_LINE(INFO, "Successfully loaded XDP program %s with fd %d",
prog_path, prog_fd);
return 0;
@@ -1343,7 +1343,7 @@ configure_preferred_busy_poll(struct pkt_rx_queue *rxq)
ret = setsockopt(fd, SOL_SOCKET, SO_PREFER_BUSY_POLL,
(void *)&sock_opt, sizeof(sock_opt));
if (ret < 0) {
- AF_XDP_LOG(DEBUG, "Failed to set SO_PREFER_BUSY_POLL\n");
+ AF_XDP_LOG_LINE(DEBUG, "Failed to set SO_PREFER_BUSY_POLL");
goto err_prefer;
}
@@ -1351,7 +1351,7 @@ configure_preferred_busy_poll(struct pkt_rx_queue *rxq)
ret = setsockopt(fd, SOL_SOCKET, SO_BUSY_POLL, (void *)&sock_opt,
sizeof(sock_opt));
if (ret < 0) {
- AF_XDP_LOG(DEBUG, "Failed to set SO_BUSY_POLL\n");
+ AF_XDP_LOG_LINE(DEBUG, "Failed to set SO_BUSY_POLL");
goto err_timeout;
}
@@ -1359,9 +1359,9 @@ configure_preferred_busy_poll(struct pkt_rx_queue *rxq)
ret = setsockopt(fd, SOL_SOCKET, SO_BUSY_POLL_BUDGET,
(void *)&sock_opt, sizeof(sock_opt));
if (ret < 0) {
- AF_XDP_LOG(DEBUG, "Failed to set SO_BUSY_POLL_BUDGET\n");
+ AF_XDP_LOG_LINE(DEBUG, "Failed to set SO_BUSY_POLL_BUDGET");
} else {
- AF_XDP_LOG(INFO, "Busy polling budget set to: %u\n",
+ AF_XDP_LOG_LINE(INFO, "Busy polling budget set to: %u",
rxq->busy_budget);
return 0;
}
@@ -1373,7 +1373,7 @@ configure_preferred_busy_poll(struct pkt_rx_queue *rxq)
ret = setsockopt(fd, SOL_SOCKET, SO_BUSY_POLL, (void *)&sock_opt,
sizeof(sock_opt));
if (ret < 0) {
- AF_XDP_LOG(ERR, "Failed to unset SO_BUSY_POLL\n");
+ AF_XDP_LOG_LINE(ERR, "Failed to unset SO_BUSY_POLL");
return -1;
}
@@ -1382,7 +1382,7 @@ configure_preferred_busy_poll(struct pkt_rx_queue *rxq)
ret = setsockopt(fd, SOL_SOCKET, SO_PREFER_BUSY_POLL,
(void *)&sock_opt, sizeof(sock_opt));
if (ret < 0) {
- AF_XDP_LOG(ERR, "Failed to unset SO_PREFER_BUSY_POLL\n");
+ AF_XDP_LOG_LINE(ERR, "Failed to unset SO_PREFER_BUSY_POLL");
return -1;
}
@@ -1398,7 +1398,7 @@ init_uds_sock(struct sockaddr_un *server, const char *dp_path)
sock = socket(AF_UNIX, SOCK_SEQPACKET, 0);
if (sock < 0) {
- AF_XDP_LOG(ERR, "Failed to opening stream socket\n");
+ AF_XDP_LOG_LINE(ERR, "Failed to opening stream socket");
return -1;
}
@@ -1407,7 +1407,7 @@ init_uds_sock(struct sockaddr_un *server, const char *dp_path)
if (connect(sock, (struct sockaddr *)server, sizeof(struct sockaddr_un)) < 0) {
close(sock);
- AF_XDP_LOG(ERR, "Error connecting stream socket errno = [%d]: [%s]\n",
+ AF_XDP_LOG_LINE(ERR, "Error connecting stream socket errno = [%d]: [%s]",
errno, strerror(errno));
return -1;
}
@@ -1492,7 +1492,7 @@ read_msg(int sock, char *response, struct sockaddr_un *s, int *fd)
return 0;
if (msglen < 0) {
- AF_XDP_LOG(ERR, "recvmsg failed, %s\n", strerror(errno));
+ AF_XDP_LOG_LINE(ERR, "recvmsg failed, %s", strerror(errno));
return -1;
}
@@ -1516,7 +1516,7 @@ make_request_dp(int sock, struct sockaddr_un *server, char *request,
{
int rval;
- AF_XDP_LOG(DEBUG, "Request: [%s]\n", request);
+ AF_XDP_LOG_LINE(DEBUG, "Request: [%s]", request);
/* if no file descriptor to send then directly write to socket.
* else use sendmsg() to send the file descriptor.
@@ -1527,16 +1527,16 @@ make_request_dp(int sock, struct sockaddr_un *server, char *request,
rval = send_msg(sock, request, req_fd, dp_path);
if (rval < 0) {
- AF_XDP_LOG(ERR, "Write error %s\n", strerror(errno));
+ AF_XDP_LOG_LINE(ERR, "Write error %s", strerror(errno));
return -1;
}
rval = read_msg(sock, response, server, out_fd);
if (rval <= 0) {
- AF_XDP_LOG(ERR, "Read error %d\n", rval);
+ AF_XDP_LOG_LINE(ERR, "Read error %d", rval);
return -1;
}
- AF_XDP_LOG(DEBUG, "Response: [%s]\n", request);
+ AF_XDP_LOG_LINE(DEBUG, "Response: [%s]", request);
return 0;
}
@@ -1569,21 +1569,21 @@ uds_get_xskmap_fd(char *if_name, const char *dp_path)
snprintf(request, sizeof(request), "%s,%s", UDS_CONNECT_MSG, hostname);
memset(response, 0, sizeof(response));
if (make_request_dp(sock, &server, request, NULL, response, &out_fd, dp_path) < 0) {
- AF_XDP_LOG(ERR, "Error in processing cmd [%s]\n", request);
+ AF_XDP_LOG_LINE(ERR, "Error in processing cmd [%s]", request);
goto err_close;
}
/* Expect /host_ok */
strlcpy(exp_resp, UDS_HOST_OK_MSG, UDS_MAX_CMD_LEN);
if (check_response(response, exp_resp, strlen(exp_resp)) < 0) {
- AF_XDP_LOG(ERR, "Unexpected response [%s]\n", response);
+ AF_XDP_LOG_LINE(ERR, "Unexpected response [%s]", response);
goto err_close;
}
/* Request for "/version" */
strlcpy(request, UDS_VERSION_MSG, UDS_MAX_CMD_LEN);
memset(response, 0, sizeof(response));
if (make_request_dp(sock, &server, request, NULL, response, &out_fd, dp_path) < 0) {
- AF_XDP_LOG(ERR, "Error in processing cmd [%s]\n", request);
+ AF_XDP_LOG_LINE(ERR, "Error in processing cmd [%s]", request);
goto err_close;
}
@@ -1591,12 +1591,12 @@ uds_get_xskmap_fd(char *if_name, const char *dp_path)
snprintf(request, sizeof(request), "%s,%s", UDS_XSK_MAP_FD_MSG, if_name);
memset(response, 0, sizeof(response));
if (make_request_dp(sock, &server, request, NULL, response, &out_fd, dp_path) < 0) {
- AF_XDP_LOG(ERR, "Error in processing cmd [%s]\n", request);
+ AF_XDP_LOG_LINE(ERR, "Error in processing cmd [%s]", request);
goto err_close;
}
if (out_fd < 0) {
- AF_XDP_LOG(ERR, "Error in processing cmd [%s]\n", request);
+ AF_XDP_LOG_LINE(ERR, "Error in processing cmd [%s]", request);
goto err_close;
}
@@ -1605,7 +1605,7 @@ uds_get_xskmap_fd(char *if_name, const char *dp_path)
/* Expect fd_ack with file descriptor */
strlcpy(exp_resp, UDS_FD_ACK_MSG, UDS_MAX_CMD_LEN);
if (check_response(response, exp_resp, strlen(exp_resp)) < 0) {
- AF_XDP_LOG(ERR, "Unexpected response [%s]\n", response);
+ AF_XDP_LOG_LINE(ERR, "Unexpected response [%s]", response);
goto err_close;
}
@@ -1613,14 +1613,14 @@ uds_get_xskmap_fd(char *if_name, const char *dp_path)
strlcpy(request, UDS_FIN_MSG, UDS_MAX_CMD_LEN);
memset(response, 0, sizeof(response));
if (make_request_dp(sock, &server, request, NULL, response, &out_fd, dp_path) < 0) {
- AF_XDP_LOG(ERR, "Error in processing cmd [%s]\n", request);
+ AF_XDP_LOG_LINE(ERR, "Error in processing cmd [%s]", request);
goto err_close;
}
/* Connection close */
strlcpy(exp_resp, UDS_FIN_ACK_MSG, UDS_MAX_CMD_LEN);
if (check_response(response, exp_resp, strlen(exp_resp)) < 0) {
- AF_XDP_LOG(ERR, "Unexpected response [%s]\n", response);
+ AF_XDP_LOG_LINE(ERR, "Unexpected response [%s]", response);
goto err_close;
}
close(sock);
@@ -1653,7 +1653,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size);
if (ret) {
- AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
+ AF_XDP_LOG_LINE(DEBUG, "Failed to get enough buffers for fq.");
goto out_umem;
}
#endif
@@ -1662,7 +1662,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
if (reserve_before) {
ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs, &rxq->fq);
if (ret) {
- AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n");
+ AF_XDP_LOG_LINE(ERR, "Failed to reserve fill queue.");
goto out_umem;
}
}
@@ -1691,7 +1691,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
internals->if_index,
&internals->map);
if (ret) {
- AF_XDP_LOG(ERR, "Failed to load custom XDP program %s\n",
+ AF_XDP_LOG_LINE(ERR, "Failed to load custom XDP program %s",
internals->prog_path);
goto out_umem;
}
@@ -1710,7 +1710,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
&txq->tx, &cfg);
if (ret) {
- AF_XDP_LOG(ERR, "Failed to create xsk socket.\n");
+ AF_XDP_LOG_LINE(ERR, "Failed to create xsk socket.");
goto out_umem;
}
@@ -1718,7 +1718,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
/* reserve fill queue of queues sharing UMEM */
ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs, &rxq->fq);
if (ret) {
- AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n");
+ AF_XDP_LOG_LINE(ERR, "Failed to reserve fill queue.");
goto out_xsk;
}
}
@@ -1731,7 +1731,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
err = bpf_map_update_elem(bpf_map__fd(internals->map),
&rxq->xsk_queue_idx, &fd, 0);
if (err) {
- AF_XDP_LOG(ERR, "Failed to insert xsk in map.\n");
+ AF_XDP_LOG_LINE(ERR, "Failed to insert xsk in map.");
goto out_xsk;
}
}
@@ -1743,28 +1743,28 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
/* get socket fd from AF_XDP Device Plugin */
map_fd = uds_get_xskmap_fd(internals->if_name, internals->dp_path);
if (map_fd < 0) {
- AF_XDP_LOG(ERR, "Failed to receive xskmap fd from AF_XDP Device Plugin\n");
+ AF_XDP_LOG_LINE(ERR, "Failed to receive xskmap fd from AF_XDP Device Plugin");
goto out_xsk;
}
} else {
/* get socket fd from AF_XDP plugin */
err = get_pinned_map(internals->dp_path, &map_fd);
if (err < 0 || map_fd < 0) {
- AF_XDP_LOG(ERR, "Failed to retrieve pinned map fd\n");
+ AF_XDP_LOG_LINE(ERR, "Failed to retrieve pinned map fd");
goto out_xsk;
}
}
err = update_xskmap(rxq->xsk, map_fd, rxq->xsk_queue_idx);
if (err) {
- AF_XDP_LOG(ERR, "Failed to insert xsk in map.\n");
+ AF_XDP_LOG_LINE(ERR, "Failed to insert xsk in map.");
goto out_xsk;
}
} else if (rxq->busy_budget) {
ret = configure_preferred_busy_poll(rxq);
if (ret) {
- AF_XDP_LOG(ERR, "Failed configure busy polling.\n");
+ AF_XDP_LOG_LINE(ERR, "Failed configure busy polling.");
goto out_xsk;
}
}
@@ -1795,7 +1795,7 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
rxq = &internals->rx_queues[rx_queue_id];
- AF_XDP_LOG(INFO, "Set up rx queue, rx queue id: %d, xsk queue id: %d\n",
+ AF_XDP_LOG_LINE(INFO, "Set up rx queue, rx queue id: %d, xsk queue id: %d",
rx_queue_id, rxq->xsk_queue_idx);
#ifndef XDP_UMEM_UNALIGNED_CHUNK_FLAG
@@ -1807,7 +1807,7 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
data_size = ETH_AF_XDP_FRAME_SIZE;
if (data_size > buf_size) {
- AF_XDP_LOG(ERR, "%s: %d bytes will not fit in mbuf (%d bytes)\n",
+ AF_XDP_LOG_LINE(ERR, "%s: %d bytes will not fit in mbuf (%d bytes)",
dev->device->name, data_size, buf_size);
ret = -ENOMEM;
goto err;
@@ -1817,13 +1817,13 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
rxq->mb_pool = mb_pool;
if (xsk_configure(internals, rxq, nb_rx_desc)) {
- AF_XDP_LOG(ERR, "Failed to configure xdp socket\n");
+ AF_XDP_LOG_LINE(ERR, "Failed to configure xdp socket");
ret = -EINVAL;
goto err;
}
if (!rxq->busy_budget)
- AF_XDP_LOG(DEBUG, "Preferred busy polling not enabled\n");
+ AF_XDP_LOG_LINE(DEBUG, "Preferred busy polling not enabled");
rxq->fds[0].fd = xsk_socket__fd(rxq->xsk);
rxq->fds[0].events = POLLIN;
@@ -1965,7 +1965,7 @@ parse_budget_arg(const char *key __rte_unused,
*i = strtol(value, &end, 10);
if (*i < 0 || *i > UINT16_MAX) {
- AF_XDP_LOG(ERR, "Invalid busy_budget %i, must be >= 0 and <= %u\n",
+ AF_XDP_LOG_LINE(ERR, "Invalid busy_budget %i, must be >= 0 and <= %u",
*i, UINT16_MAX);
return -EINVAL;
}
@@ -1983,7 +1983,7 @@ parse_integer_arg(const char *key __rte_unused,
*i = strtol(value, &end, 10);
if (*i < 0) {
- AF_XDP_LOG(ERR, "Argument has to be positive.\n");
+ AF_XDP_LOG_LINE(ERR, "Argument has to be positive.");
return -EINVAL;
}
@@ -1998,7 +1998,7 @@ parse_name_arg(const char *key __rte_unused,
char *name = extra_args;
if (strnlen(value, IFNAMSIZ) > IFNAMSIZ - 1) {
- AF_XDP_LOG(ERR, "Invalid name %s, should be less than %u bytes.\n",
+ AF_XDP_LOG_LINE(ERR, "Invalid name %s, should be less than %u bytes.",
value, IFNAMSIZ);
return -EINVAL;
}
@@ -2016,13 +2016,13 @@ parse_prog_arg(const char *key __rte_unused,
char *path = extra_args;
if (strnlen(value, PATH_MAX) == PATH_MAX) {
- AF_XDP_LOG(ERR, "Invalid path %s, should be less than %u bytes.\n",
+ AF_XDP_LOG_LINE(ERR, "Invalid path %s, should be less than %u bytes.",
value, PATH_MAX);
return -EINVAL;
}
if (access(value, F_OK) != 0) {
- AF_XDP_LOG(ERR, "Error accessing %s: %s\n",
+ AF_XDP_LOG_LINE(ERR, "Error accessing %s: %s",
value, strerror(errno));
return -EINVAL;
}
@@ -2194,8 +2194,8 @@ init_internals(struct rte_vdev_device *dev, const char *if_name,
#ifndef ETH_AF_XDP_SHARED_UMEM
if (shared_umem) {
- AF_XDP_LOG(ERR, "Shared UMEM feature not available. "
- "Check kernel and libbpf version\n");
+ AF_XDP_LOG_LINE(ERR, "Shared UMEM feature not available. "
+ "Check kernel and libbpf version");
goto err_free_internals;
}
#endif
@@ -2207,13 +2207,13 @@ init_internals(struct rte_vdev_device *dev, const char *if_name,
if (xdp_get_channels_info(if_name, &internals->max_queue_cnt,
&internals->combined_queue_cnt)) {
- AF_XDP_LOG(ERR, "Failed to get channel info of interface: %s\n",
+ AF_XDP_LOG_LINE(ERR, "Failed to get channel info of interface: %s",
if_name);
goto err_free_internals;
}
if (queue_cnt > internals->combined_queue_cnt) {
- AF_XDP_LOG(ERR, "Specified queue count %d is larger than combined queue count %d.\n",
+ AF_XDP_LOG_LINE(ERR, "Specified queue count %d is larger than combined queue count %d.",
queue_cnt, internals->combined_queue_cnt);
goto err_free_internals;
}
@@ -2222,7 +2222,7 @@ init_internals(struct rte_vdev_device *dev, const char *if_name,
sizeof(struct pkt_rx_queue) * queue_cnt,
0, numa_node);
if (internals->rx_queues == NULL) {
- AF_XDP_LOG(ERR, "Failed to allocate memory for rx queues.\n");
+ AF_XDP_LOG_LINE(ERR, "Failed to allocate memory for rx queues.");
goto err_free_internals;
}
@@ -2230,7 +2230,7 @@ init_internals(struct rte_vdev_device *dev, const char *if_name,
sizeof(struct pkt_tx_queue) * queue_cnt,
0, numa_node);
if (internals->tx_queues == NULL) {
- AF_XDP_LOG(ERR, "Failed to allocate memory for tx queues.\n");
+ AF_XDP_LOG_LINE(ERR, "Failed to allocate memory for tx queues.");
goto err_free_rx;
}
for (i = 0; i < queue_cnt; i++) {
@@ -2250,7 +2250,7 @@ init_internals(struct rte_vdev_device *dev, const char *if_name,
rte_zmalloc_socket(name, sizeof(struct pmd_process_private),
RTE_CACHE_LINE_SIZE, numa_node);
if (process_private == NULL) {
- AF_XDP_LOG(ERR, "Failed to alloc memory for process private\n");
+ AF_XDP_LOG_LINE(ERR, "Failed to alloc memory for process private");
goto err_free_tx;
}
@@ -2275,7 +2275,7 @@ init_internals(struct rte_vdev_device *dev, const char *if_name,
process_private->rxq_xsk_fds[i] = -1;
#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
- AF_XDP_LOG(INFO, "Zero copy between umem and mbuf enabled.\n");
+ AF_XDP_LOG_LINE(INFO, "Zero copy between umem and mbuf enabled.");
#endif
return eth_dev;
@@ -2310,17 +2310,17 @@ afxdp_mp_request_fds(const char *name, struct rte_eth_dev *dev)
request.len_param = sizeof(*request_param);
/* Send the request and receive the reply */
- AF_XDP_LOG(DEBUG, "Sending multi-process IPC request for %s\n", name);
+ AF_XDP_LOG_LINE(DEBUG, "Sending multi-process IPC request for %s", name);
ret = rte_mp_request_sync(&request, &replies, &timeout);
if (ret < 0 || replies.nb_received != 1) {
- AF_XDP_LOG(ERR, "Failed to request fds from primary: %d\n",
+ AF_XDP_LOG_LINE(ERR, "Failed to request fds from primary: %d",
rte_errno);
return -1;
}
reply = replies.msgs;
- AF_XDP_LOG(DEBUG, "Received multi-process IPC reply for %s\n", name);
+ AF_XDP_LOG_LINE(DEBUG, "Received multi-process IPC reply for %s", name);
if (dev->data->nb_rx_queues != reply->num_fds) {
- AF_XDP_LOG(ERR, "Incorrect number of fds received: %d != %d\n",
+ AF_XDP_LOG_LINE(ERR, "Incorrect number of fds received: %d != %d",
reply->num_fds, dev->data->nb_rx_queues);
return -EINVAL;
}
@@ -2346,13 +2346,13 @@ afxdp_mp_send_fds(const struct rte_mp_msg *request, const void *peer)
const char *request_name = request_param->port_name;
int i;
- AF_XDP_LOG(DEBUG, "Received multi-process IPC request for %s\n",
+ AF_XDP_LOG_LINE(DEBUG, "Received multi-process IPC request for %s",
request_name);
/* Find the requested port */
dev = rte_eth_dev_get_by_name(request_name);
if (!dev) {
- AF_XDP_LOG(ERR, "Failed to get port id for %s\n", request_name);
+ AF_XDP_LOG_LINE(ERR, "Failed to get port id for %s", request_name);
return -1;
}
process_private = dev->process_private;
@@ -2360,7 +2360,7 @@ afxdp_mp_send_fds(const struct rte_mp_msg *request, const void *peer)
/* Populate the reply with the xsk fd for each queue */
reply.num_fds = 0;
if (dev->data->nb_rx_queues > RTE_MP_MAX_FD_NUM) {
- AF_XDP_LOG(ERR, "Number of rx queues (%d) exceeds max number of fds (%d)\n",
+ AF_XDP_LOG_LINE(ERR, "Number of rx queues (%d) exceeds max number of fds (%d)",
dev->data->nb_rx_queues, RTE_MP_MAX_FD_NUM);
return -EINVAL;
}
@@ -2373,10 +2373,10 @@ afxdp_mp_send_fds(const struct rte_mp_msg *request, const void *peer)
strlcpy(reply_param->port_name, request_name,
sizeof(reply_param->port_name));
reply.len_param = sizeof(*reply_param);
- AF_XDP_LOG(DEBUG, "Sending multi-process IPC reply for %s\n",
+ AF_XDP_LOG_LINE(DEBUG, "Sending multi-process IPC reply for %s",
reply_param->port_name);
if (rte_mp_reply(&reply, peer) < 0) {
- AF_XDP_LOG(ERR, "Failed to reply to multi-process IPC request\n");
+ AF_XDP_LOG_LINE(ERR, "Failed to reply to multi-process IPC request");
return -1;
}
return 0;
@@ -2399,12 +2399,12 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
struct rte_eth_dev *eth_dev = NULL;
const char *name = rte_vdev_device_name(dev);
- AF_XDP_LOG(INFO, "Initializing pmd_af_xdp for %s\n", name);
+ AF_XDP_LOG_LINE(INFO, "Initializing pmd_af_xdp for %s", name);
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
eth_dev = rte_eth_dev_attach_secondary(name);
if (eth_dev == NULL) {
- AF_XDP_LOG(ERR, "Failed to probe %s\n", name);
+ AF_XDP_LOG_LINE(ERR, "Failed to probe %s", name);
return -EINVAL;
}
eth_dev->dev_ops = &ops;
@@ -2417,8 +2417,8 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
RTE_CACHE_LINE_SIZE,
eth_dev->device->numa_node);
if (eth_dev->process_private == NULL) {
- AF_XDP_LOG(ERR,
- "Failed to alloc memory for process private\n");
+ AF_XDP_LOG_LINE(ERR,
+ "Failed to alloc memory for process private");
return -ENOMEM;
}
@@ -2432,7 +2432,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
if (kvlist == NULL) {
- AF_XDP_LOG(ERR, "Invalid kvargs key\n");
+ AF_XDP_LOG_LINE(ERR, "Invalid kvargs key");
return -EINVAL;
}
@@ -2440,25 +2440,25 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
&xsk_queue_cnt, &shared_umem, prog_path,
&busy_budget, &force_copy, &use_cni, &use_pinned_map,
dp_path) < 0) {
- AF_XDP_LOG(ERR, "Invalid kvargs value\n");
+ AF_XDP_LOG_LINE(ERR, "Invalid kvargs value");
return -EINVAL;
}
if (use_cni && use_pinned_map) {
- AF_XDP_LOG(ERR, "When '%s' parameter is used, '%s' parameter is not valid\n",
+ AF_XDP_LOG_LINE(ERR, "When '%s' parameter is used, '%s' parameter is not valid",
ETH_AF_XDP_USE_CNI_ARG, ETH_AF_XDP_USE_PINNED_MAP_ARG);
return -EINVAL;
}
if ((use_cni || use_pinned_map) && busy_budget > 0) {
- AF_XDP_LOG(ERR, "When '%s' or '%s' parameter is used, '%s' parameter is not valid\n",
+ AF_XDP_LOG_LINE(ERR, "When '%s' or '%s' parameter is used, '%s' parameter is not valid",
ETH_AF_XDP_USE_CNI_ARG, ETH_AF_XDP_USE_PINNED_MAP_ARG,
ETH_AF_XDP_BUDGET_ARG);
return -EINVAL;
}
if ((use_cni || use_pinned_map) && strnlen(prog_path, PATH_MAX)) {
- AF_XDP_LOG(ERR, "When '%s' or '%s' parameter is used, '%s' parameter is not valid\n",
+ AF_XDP_LOG_LINE(ERR, "When '%s' or '%s' parameter is used, '%s' parameter is not valid",
ETH_AF_XDP_USE_CNI_ARG, ETH_AF_XDP_USE_PINNED_MAP_ARG,
ETH_AF_XDP_PROG_ARG);
return -EINVAL;
@@ -2466,25 +2466,25 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
if (use_cni && !strnlen(dp_path, PATH_MAX)) {
snprintf(dp_path, sizeof(dp_path), "%s/%s/%s", DP_BASE_PATH, if_name, DP_UDS_SOCK);
- AF_XDP_LOG(INFO, "'%s' parameter not provided, setting value to '%s'\n",
+ AF_XDP_LOG_LINE(INFO, "'%s' parameter not provided, setting value to '%s'",
ETH_AF_XDP_DP_PATH_ARG, dp_path);
}
if (use_pinned_map && !strnlen(dp_path, PATH_MAX)) {
snprintf(dp_path, sizeof(dp_path), "%s/%s/%s", DP_BASE_PATH, if_name, DP_XSK_MAP);
- AF_XDP_LOG(INFO, "'%s' parameter not provided, setting value to '%s'\n",
+ AF_XDP_LOG_LINE(INFO, "'%s' parameter not provided, setting value to '%s'",
ETH_AF_XDP_DP_PATH_ARG, dp_path);
}
if ((!use_cni && !use_pinned_map) && strnlen(dp_path, PATH_MAX)) {
- AF_XDP_LOG(ERR, "'%s' parameter is set, but '%s' or '%s' were not enabled\n",
+ AF_XDP_LOG_LINE(ERR, "'%s' parameter is set, but '%s' or '%s' were not enabled",
ETH_AF_XDP_DP_PATH_ARG, ETH_AF_XDP_USE_CNI_ARG,
ETH_AF_XDP_USE_PINNED_MAP_ARG);
return -EINVAL;
}
if (strlen(if_name) == 0) {
- AF_XDP_LOG(ERR, "Network interface must be specified\n");
+ AF_XDP_LOG_LINE(ERR, "Network interface must be specified");
return -EINVAL;
}
@@ -2509,7 +2509,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
busy_budget, force_copy, use_cni, use_pinned_map,
dp_path);
if (eth_dev == NULL) {
- AF_XDP_LOG(ERR, "Failed to init internals\n");
+ AF_XDP_LOG_LINE(ERR, "Failed to init internals");
return -1;
}
@@ -2517,7 +2517,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
if (!afxdp_dev_count) {
ret = rte_mp_action_register(ETH_AF_XDP_MP_KEY, afxdp_mp_send_fds);
if (ret < 0 && rte_errno != ENOTSUP) {
- AF_XDP_LOG(ERR, "%s: Failed to register multi-process IPC callback: %s\n",
+ AF_XDP_LOG_LINE(ERR, "%s: Failed to register multi-process IPC callback: %s",
name, strerror(rte_errno));
return -1;
}
@@ -2534,7 +2534,7 @@ rte_pmd_af_xdp_remove(struct rte_vdev_device *dev)
{
struct rte_eth_dev *eth_dev = NULL;
- AF_XDP_LOG(INFO, "Removing AF_XDP ethdev on numa socket %u\n",
+ AF_XDP_LOG_LINE(INFO, "Removing AF_XDP ethdev on numa socket %u",
rte_socket_id());
if (dev == NULL)
@@ -240,18 +240,18 @@ avp_dev_process_request(struct avp_dev *avp, struct rte_avp_request *request)
unsigned int count;
int ret;
- PMD_DRV_LOG(DEBUG, "Sending request %u to host\n", request->req_id);
+ PMD_DRV_LOG_LINE(DEBUG, "Sending request %u to host", request->req_id);
request->result = -ENOTSUP;
/* Discard any stale responses before starting a new request */
while (avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1))
- PMD_DRV_LOG(DEBUG, "Discarding stale response\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Discarding stale response");
rte_memcpy(avp->sync_addr, request, sizeof(*request));
count = avp_fifo_put(avp->req_q, &avp->host_sync_addr, 1);
if (count < 1) {
- PMD_DRV_LOG(ERR, "Cannot send request %u to host\n",
+ PMD_DRV_LOG_LINE(ERR, "Cannot send request %u to host",
request->req_id);
ret = -EBUSY;
goto done;
@@ -268,7 +268,7 @@ avp_dev_process_request(struct avp_dev *avp, struct rte_avp_request *request)
}
if (retry == 0) {
- PMD_DRV_LOG(ERR, "Timeout while waiting for a response for %u\n",
+ PMD_DRV_LOG_LINE(ERR, "Timeout while waiting for a response for %u",
request->req_id);
ret = -ETIME;
goto done;
@@ -278,7 +278,7 @@ avp_dev_process_request(struct avp_dev *avp, struct rte_avp_request *request)
/* retrieve the response */
count = avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1);
if ((count != 1) || (resp_addr != avp->host_sync_addr)) {
- PMD_DRV_LOG(ERR, "Invalid response from host, count=%u resp=%p host_sync_addr=%p\n",
+ PMD_DRV_LOG_LINE(ERR, "Invalid response from host, count=%u resp=%p host_sync_addr=%p",
count, resp_addr, avp->host_sync_addr);
ret = -ENODATA;
goto done;
@@ -288,7 +288,7 @@ avp_dev_process_request(struct avp_dev *avp, struct rte_avp_request *request)
rte_memcpy(request, avp->sync_addr, sizeof(*request));
ret = 0;
- PMD_DRV_LOG(DEBUG, "Result %d received for request %u\n",
+ PMD_DRV_LOG_LINE(DEBUG, "Result %d received for request %u",
request->result, request->req_id);
done:
@@ -383,7 +383,7 @@ avp_dev_translate_address(struct rte_eth_dev *eth_dev,
offset += (host_phys_addr - map->phys_addr);
addr = RTE_PTR_ADD(addr, (uintptr_t)offset);
- PMD_DRV_LOG(DEBUG, "Translating host physical 0x%" PRIx64 " to guest virtual 0x%p\n",
+ PMD_DRV_LOG_LINE(DEBUG, "Translating host physical 0x%" PRIx64 " to guest virtual 0x%p",
host_phys_addr, addr);
return addr;
@@ -425,7 +425,7 @@ avp_dev_check_regions(struct rte_eth_dev *eth_dev)
if ((resource->phys_addr == 0) || (resource->len == 0))
continue;
- PMD_DRV_LOG(DEBUG, "resource[%u]: phys=0x%" PRIx64 " len=%" PRIu64 " addr=%p\n",
+ PMD_DRV_LOG_LINE(DEBUG, "resource[%u]: phys=0x%" PRIx64 " len=%" PRIu64 " addr=%p",
i, resource->phys_addr,
resource->len, resource->addr);
@@ -434,7 +434,7 @@ avp_dev_check_regions(struct rte_eth_dev *eth_dev)
memmap = (struct rte_avp_memmap_info *)resource->addr;
if ((memmap->magic != RTE_AVP_MEMMAP_MAGIC) ||
(memmap->version != RTE_AVP_MEMMAP_VERSION)) {
- PMD_DRV_LOG(ERR, "Invalid memmap magic 0x%08x and version %u\n",
+ PMD_DRV_LOG_LINE(ERR, "Invalid memmap magic 0x%08x and version %u",
memmap->magic, memmap->version);
return -EINVAL;
}
@@ -444,7 +444,7 @@ avp_dev_check_regions(struct rte_eth_dev *eth_dev)
info = (struct rte_avp_device_info *)resource->addr;
if ((info->magic != RTE_AVP_DEVICE_MAGIC) ||
avp_dev_version_check(info->version)) {
- PMD_DRV_LOG(ERR, "Invalid device info magic 0x%08x or version 0x%08x > 0x%08x\n",
+ PMD_DRV_LOG_LINE(ERR, "Invalid device info magic 0x%08x or version 0x%08x > 0x%08x",
info->magic, info->version,
AVP_DPDK_DRIVER_VERSION);
return -EINVAL;
@@ -454,7 +454,7 @@ avp_dev_check_regions(struct rte_eth_dev *eth_dev)
case RTE_AVP_PCI_MEMORY_BAR:
case RTE_AVP_PCI_MMIO_BAR:
if (resource->addr == NULL) {
- PMD_DRV_LOG(ERR, "Missing address space for BAR%u\n",
+ PMD_DRV_LOG_LINE(ERR, "Missing address space for BAR%u",
i);
return -EINVAL;
}
@@ -476,13 +476,13 @@ avp_dev_detach(struct rte_eth_dev *eth_dev)
struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
int ret;
- PMD_DRV_LOG(NOTICE, "Detaching port %u from AVP device 0x%" PRIx64 "\n",
+ PMD_DRV_LOG_LINE(NOTICE, "Detaching port %u from AVP device 0x%" PRIx64 "",
eth_dev->data->port_id, avp->device_id);
rte_spinlock_lock(&avp->lock);
if (avp->flags & AVP_F_DETACHED) {
- PMD_DRV_LOG(NOTICE, "port %u already detached\n",
+ PMD_DRV_LOG_LINE(NOTICE, "port %u already detached",
eth_dev->data->port_id);
ret = 0;
goto unlock;
@@ -491,7 +491,7 @@ avp_dev_detach(struct rte_eth_dev *eth_dev)
/* shutdown the device first so the host stops sending us packets. */
ret = avp_dev_ctrl_shutdown(eth_dev);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "Failed to send/recv shutdown to host, ret=%d\n",
+ PMD_DRV_LOG_LINE(ERR, "Failed to send/recv shutdown to host, ret=%d",
ret);
avp->flags &= ~AVP_F_DETACHED;
goto unlock;
@@ -540,7 +540,7 @@ _avp_set_rx_queue_mappings(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
rxq->queue_limit = rxq->queue_base + queue_count - 1;
}
- PMD_DRV_LOG(DEBUG, "rxq %u at %p base %u limit %u\n",
+ PMD_DRV_LOG_LINE(DEBUG, "rxq %u at %p base %u limit %u",
rx_queue_id, rxq, rxq->queue_base, rxq->queue_limit);
rxq->queue_id = rxq->queue_base;
@@ -574,7 +574,7 @@ _avp_set_queue_counts(struct rte_eth_dev *eth_dev)
avp->num_rx_queues = RTE_MAX(host_info->min_rx_queues,
eth_dev->data->nb_rx_queues);
- PMD_DRV_LOG(DEBUG, "Requesting %u Tx and %u Rx queues from host\n",
+ PMD_DRV_LOG_LINE(DEBUG, "Requesting %u Tx and %u Rx queues from host",
avp->num_tx_queues, avp->num_rx_queues);
}
@@ -586,13 +586,13 @@ avp_dev_attach(struct rte_eth_dev *eth_dev)
unsigned int i;
int ret;
- PMD_DRV_LOG(NOTICE, "Attaching port %u to AVP device 0x%" PRIx64 "\n",
+ PMD_DRV_LOG_LINE(NOTICE, "Attaching port %u to AVP device 0x%" PRIx64 "",
eth_dev->data->port_id, avp->device_id);
rte_spinlock_lock(&avp->lock);
if (!(avp->flags & AVP_F_DETACHED)) {
- PMD_DRV_LOG(NOTICE, "port %u already attached\n",
+ PMD_DRV_LOG_LINE(NOTICE, "port %u already attached",
eth_dev->data->port_id);
ret = 0;
goto unlock;
@@ -611,7 +611,7 @@ avp_dev_attach(struct rte_eth_dev *eth_dev)
*/
ret = avp_dev_create(RTE_ETH_DEV_TO_PCI(eth_dev), eth_dev);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "Failed to re-create AVP device, ret=%d\n",
+ PMD_DRV_LOG_LINE(ERR, "Failed to re-create AVP device, ret=%d",
ret);
goto unlock;
}
@@ -643,7 +643,7 @@ avp_dev_attach(struct rte_eth_dev *eth_dev)
ret = avp_dev_ctrl_set_config(eth_dev, &config);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n",
+ PMD_DRV_LOG_LINE(ERR, "Config request failed by host, ret=%d",
ret);
goto unlock;
}
@@ -692,7 +692,7 @@ avp_dev_interrupt_handler(void *data)
ret = avp_dev_attach(eth_dev);
break;
default:
- PMD_DRV_LOG(ERR, "unexpected migration status, status=%u\n",
+ PMD_DRV_LOG_LINE(ERR, "unexpected migration status, status=%u",
value);
ret = -EINVAL;
}
@@ -703,17 +703,17 @@ avp_dev_interrupt_handler(void *data)
RTE_PTR_ADD(registers,
RTE_AVP_MIGRATION_ACK_OFFSET));
- PMD_DRV_LOG(NOTICE, "AVP migration interrupt handled\n");
+ PMD_DRV_LOG_LINE(NOTICE, "AVP migration interrupt handled");
}
if (status & ~RTE_AVP_MIGRATION_INTERRUPT_MASK)
- PMD_DRV_LOG(WARNING, "AVP unexpected interrupt, status=0x%08x\n",
+ PMD_DRV_LOG_LINE(WARNING, "AVP unexpected interrupt, status=0x%08x",
status);
/* re-enable UIO interrupt handling */
ret = rte_intr_ack(pci_dev->intr_handle);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "Failed to re-enable UIO interrupts, ret=%d\n",
+ PMD_DRV_LOG_LINE(ERR, "Failed to re-enable UIO interrupts, ret=%d",
ret);
/* continue */
}
@@ -732,7 +732,7 @@ avp_dev_enable_interrupts(struct rte_eth_dev *eth_dev)
/* enable UIO interrupt handling */
ret = rte_intr_enable(pci_dev->intr_handle);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "Failed to enable UIO interrupts, ret=%d\n",
+ PMD_DRV_LOG_LINE(ERR, "Failed to enable UIO interrupts, ret=%d",
ret);
return ret;
}
@@ -761,7 +761,7 @@ avp_dev_disable_interrupts(struct rte_eth_dev *eth_dev)
/* enable UIO interrupt handling */
ret = rte_intr_disable(pci_dev->intr_handle);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "Failed to disable UIO interrupts, ret=%d\n",
+ PMD_DRV_LOG_LINE(ERR, "Failed to disable UIO interrupts, ret=%d",
ret);
return ret;
}
@@ -780,7 +780,7 @@ avp_dev_setup_interrupts(struct rte_eth_dev *eth_dev)
avp_dev_interrupt_handler,
(void *)eth_dev);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "Failed to register UIO interrupt callback, ret=%d\n",
+ PMD_DRV_LOG_LINE(ERR, "Failed to register UIO interrupt callback, ret=%d",
ret);
return ret;
}
@@ -826,7 +826,7 @@ avp_dev_create(struct rte_pci_device *pci_dev,
resource = &pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR];
if (resource->addr == NULL) {
- PMD_DRV_LOG(ERR, "BAR%u is not mapped\n",
+ PMD_DRV_LOG_LINE(ERR, "BAR%u is not mapped",
RTE_AVP_PCI_DEVICE_BAR);
return -EFAULT;
}
@@ -834,22 +834,22 @@ avp_dev_create(struct rte_pci_device *pci_dev,
if ((host_info->magic != RTE_AVP_DEVICE_MAGIC) ||
avp_dev_version_check(host_info->version)) {
- PMD_DRV_LOG(ERR, "Invalid AVP PCI device, magic 0x%08x version 0x%08x > 0x%08x\n",
+ PMD_DRV_LOG_LINE(ERR, "Invalid AVP PCI device, magic 0x%08x version 0x%08x > 0x%08x",
host_info->magic, host_info->version,
AVP_DPDK_DRIVER_VERSION);
return -EINVAL;
}
- PMD_DRV_LOG(DEBUG, "AVP host device is v%u.%u.%u\n",
+ PMD_DRV_LOG_LINE(DEBUG, "AVP host device is v%u.%u.%u",
RTE_AVP_GET_RELEASE_VERSION(host_info->version),
RTE_AVP_GET_MAJOR_VERSION(host_info->version),
RTE_AVP_GET_MINOR_VERSION(host_info->version));
- PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u TX queue(s)\n",
+ PMD_DRV_LOG_LINE(DEBUG, "AVP host supports %u to %u TX queue(s)",
host_info->min_tx_queues, host_info->max_tx_queues);
- PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u RX queue(s)\n",
+ PMD_DRV_LOG_LINE(DEBUG, "AVP host supports %u to %u RX queue(s)",
host_info->min_rx_queues, host_info->max_rx_queues);
- PMD_DRV_LOG(DEBUG, "AVP host supports features 0x%08x\n",
+ PMD_DRV_LOG_LINE(DEBUG, "AVP host supports features 0x%08x",
host_info->features);
if (avp->magic != AVP_ETHDEV_MAGIC) {
@@ -876,7 +876,7 @@ avp_dev_create(struct rte_pci_device *pci_dev,
/* TODO... requires validation of host values */
if ((host_info->features & avp->features) != avp->features) {
- PMD_DRV_LOG(ERR, "AVP host features mismatched; 0x%08x, host=0x%08x\n",
+ PMD_DRV_LOG_LINE(ERR, "AVP host features mismatched; 0x%08x, host=0x%08x",
avp->features, host_info->features);
/* this should not be possible; continue for now */
}
@@ -886,9 +886,9 @@ avp_dev_create(struct rte_pci_device *pci_dev,
avp->device_id = host_info->device_id;
/* translate incoming host addresses to guest address space */
- PMD_DRV_LOG(DEBUG, "AVP first host tx queue at 0x%" PRIx64 "\n",
+ PMD_DRV_LOG_LINE(DEBUG, "AVP first host tx queue at 0x%" PRIx64 "",
host_info->tx_phys);
- PMD_DRV_LOG(DEBUG, "AVP first host alloc queue at 0x%" PRIx64 "\n",
+ PMD_DRV_LOG_LINE(DEBUG, "AVP first host alloc queue at 0x%" PRIx64 "",
host_info->alloc_phys);
for (i = 0; i < avp->max_tx_queues; i++) {
avp->tx_q[i] = avp_dev_translate_address(eth_dev,
@@ -898,9 +898,9 @@ avp_dev_create(struct rte_pci_device *pci_dev,
host_info->alloc_phys + (i * host_info->alloc_size));
}
- PMD_DRV_LOG(DEBUG, "AVP first host rx queue at 0x%" PRIx64 "\n",
+ PMD_DRV_LOG_LINE(DEBUG, "AVP first host rx queue at 0x%" PRIx64 "",
host_info->rx_phys);
- PMD_DRV_LOG(DEBUG, "AVP first host free queue at 0x%" PRIx64 "\n",
+ PMD_DRV_LOG_LINE(DEBUG, "AVP first host free queue at 0x%" PRIx64 "",
host_info->free_phys);
for (i = 0; i < avp->max_rx_queues; i++) {
avp->rx_q[i] = avp_dev_translate_address(eth_dev,
@@ -909,13 +909,13 @@ avp_dev_create(struct rte_pci_device *pci_dev,
host_info->free_phys + (i * host_info->free_size));
}
- PMD_DRV_LOG(DEBUG, "AVP host request queue at 0x%" PRIx64 "\n",
+ PMD_DRV_LOG_LINE(DEBUG, "AVP host request queue at 0x%" PRIx64 "",
host_info->req_phys);
- PMD_DRV_LOG(DEBUG, "AVP host response queue at 0x%" PRIx64 "\n",
+ PMD_DRV_LOG_LINE(DEBUG, "AVP host response queue at 0x%" PRIx64 "",
host_info->resp_phys);
- PMD_DRV_LOG(DEBUG, "AVP host sync address at 0x%" PRIx64 "\n",
+ PMD_DRV_LOG_LINE(DEBUG, "AVP host sync address at 0x%" PRIx64 "",
host_info->sync_phys);
- PMD_DRV_LOG(DEBUG, "AVP host mbuf address at 0x%" PRIx64 "\n",
+ PMD_DRV_LOG_LINE(DEBUG, "AVP host mbuf address at 0x%" PRIx64 "",
host_info->mbuf_phys);
avp->req_q = avp_dev_translate_address(eth_dev, host_info->req_phys);
avp->resp_q = avp_dev_translate_address(eth_dev, host_info->resp_phys);
@@ -935,7 +935,7 @@ avp_dev_create(struct rte_pci_device *pci_dev,
* store the maximum packet length that is supported by the host.
*/
avp->max_rx_pkt_len = host_info->max_rx_pkt_len;
- PMD_DRV_LOG(DEBUG, "AVP host max receive packet length is %u\n",
+ PMD_DRV_LOG_LINE(DEBUG, "AVP host max receive packet length is %u",
host_info->max_rx_pkt_len);
return 0;
@@ -966,7 +966,7 @@ eth_avp_dev_init(struct rte_eth_dev *eth_dev)
* be valid.
*/
if (eth_dev->data->scattered_rx) {
- PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n");
+ PMD_DRV_LOG_LINE(NOTICE, "AVP device configured for chained mbufs");
eth_dev->rx_pkt_burst = avp_recv_scattered_pkts;
eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts;
}
@@ -978,14 +978,14 @@ eth_avp_dev_init(struct rte_eth_dev *eth_dev)
/* Check current migration status */
if (avp_dev_migration_pending(eth_dev)) {
- PMD_DRV_LOG(ERR, "VM live migration operation in progress\n");
+ PMD_DRV_LOG_LINE(ERR, "VM live migration operation in progress");
return -EBUSY;
}
/* Check BAR resources */
ret = avp_dev_check_regions(eth_dev);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "Failed to validate BAR resources, ret=%d\n",
+ PMD_DRV_LOG_LINE(ERR, "Failed to validate BAR resources, ret=%d",
ret);
return ret;
}
@@ -993,14 +993,14 @@ eth_avp_dev_init(struct rte_eth_dev *eth_dev)
/* Enable interrupts */
ret = avp_dev_setup_interrupts(eth_dev);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "Failed to enable interrupts, ret=%d\n", ret);
+ PMD_DRV_LOG_LINE(ERR, "Failed to enable interrupts, ret=%d", ret);
return ret;
}
/* Handle each subtype */
ret = avp_dev_create(pci_dev, eth_dev);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "Failed to create device, ret=%d\n", ret);
+ PMD_DRV_LOG_LINE(ERR, "Failed to create device, ret=%d", ret);
return ret;
}
@@ -1008,7 +1008,7 @@ eth_avp_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->data->mac_addrs = rte_zmalloc("avp_ethdev",
RTE_ETHER_ADDR_LEN, 0);
if (eth_dev->data->mac_addrs == NULL) {
- PMD_DRV_LOG(ERR, "Failed to allocate %d bytes needed to store MAC addresses\n",
+ PMD_DRV_LOG_LINE(ERR, "Failed to allocate %d bytes needed to store MAC addresses",
RTE_ETHER_ADDR_LEN);
return -ENOMEM;
}
@@ -1101,7 +1101,7 @@ avp_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
struct avp_queue *rxq;
if (rx_queue_id >= eth_dev->data->nb_rx_queues) {
- PMD_DRV_LOG(ERR, "RX queue id is out of range: rx_queue_id=%u, nb_rx_queues=%u\n",
+ PMD_DRV_LOG_LINE(ERR, "RX queue id is out of range: rx_queue_id=%u, nb_rx_queues=%u",
rx_queue_id, eth_dev->data->nb_rx_queues);
return -EINVAL;
}
@@ -1116,14 +1116,14 @@ avp_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
if (avp_dev_enable_scattered(eth_dev, avp)) {
if (!eth_dev->data->scattered_rx) {
- PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n");
+ PMD_DRV_LOG_LINE(NOTICE, "AVP device configured for chained mbufs");
eth_dev->data->scattered_rx = 1;
eth_dev->rx_pkt_burst = avp_recv_scattered_pkts;
eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts;
}
}
- PMD_DRV_LOG(DEBUG, "AVP max_rx_pkt_len=(%u,%u) mbuf_size=(%u,%u)\n",
+ PMD_DRV_LOG_LINE(DEBUG, "AVP max_rx_pkt_len=(%u,%u) mbuf_size=(%u,%u)",
avp->max_rx_pkt_len,
eth_dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN,
avp->host_mbuf_size,
@@ -1133,7 +1133,7 @@ avp_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct avp_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (rxq == NULL) {
- PMD_DRV_LOG(ERR, "Failed to allocate new Rx queue object\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to allocate new Rx queue object");
return -ENOMEM;
}
@@ -1145,7 +1145,7 @@ avp_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
/* setup the queue receive mapping for the current queue. */
_avp_set_rx_queue_mappings(eth_dev, rx_queue_id);
- PMD_DRV_LOG(DEBUG, "Rx queue %u setup at %p\n", rx_queue_id, rxq);
+ PMD_DRV_LOG_LINE(DEBUG, "Rx queue %u setup at %p", rx_queue_id, rxq);
(void)nb_rx_desc;
(void)rx_conf;
@@ -1163,7 +1163,7 @@ avp_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
struct avp_queue *txq;
if (tx_queue_id >= eth_dev->data->nb_tx_queues) {
- PMD_DRV_LOG(ERR, "TX queue id is out of range: tx_queue_id=%u, nb_tx_queues=%u\n",
+ PMD_DRV_LOG_LINE(ERR, "TX queue id is out of range: tx_queue_id=%u, nb_tx_queues=%u",
tx_queue_id, eth_dev->data->nb_tx_queues);
return -EINVAL;
}
@@ -1172,7 +1172,7 @@ avp_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct avp_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (txq == NULL) {
- PMD_DRV_LOG(ERR, "Failed to allocate new Tx queue object\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to allocate new Tx queue object");
return -ENOMEM;
}
@@ -1186,7 +1186,7 @@ avp_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
txq->dev_data = eth_dev->data;
eth_dev->data->tx_queues[tx_queue_id] = (void *)txq;
- PMD_DRV_LOG(DEBUG, "Tx queue %u setup at %p\n", tx_queue_id, txq);
+ PMD_DRV_LOG_LINE(DEBUG, "Tx queue %u setup at %p", tx_queue_id, txq);
(void)nb_tx_desc;
(void)tx_conf;
@@ -1428,7 +1428,7 @@ avp_recv_scattered_pkts(void *rx_queue,
/* retrieve pending packets */
n = avp_fifo_get(rx_q, (void **)&avp_bufs, count);
- PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n",
+ PMD_RX_LOG_LINE(DEBUG, "Receiving %u packets from Rx queue at %p",
count, rx_q);
count = 0;
@@ -1525,7 +1525,7 @@ avp_recv_pkts(void *rx_queue,
/* retrieve pending packets */
n = avp_fifo_get(rx_q, (void **)&avp_bufs, count);
- PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n",
+ PMD_RX_LOG_LINE(DEBUG, "Receiving %u packets from Rx queue at %p",
count, rx_q);
count = 0;
@@ -1761,14 +1761,13 @@ avp_xmit_scattered_pkts(void *tx_queue,
return 0;
}
- PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n",
+ PMD_TX_LOG_LINE(DEBUG, "Sending %u packets on Tx queue at %p",
nb_pkts, tx_q);
/* retrieve sufficient send buffers */
n = avp_fifo_get(alloc_q, (void **)&avp_bufs, segments);
if (unlikely(n != segments)) {
- PMD_TX_LOG(DEBUG, "Failed to allocate buffers "
- "n=%u, segments=%u, orig=%u\n",
+ PMD_TX_LOG_LINE(DEBUG, "Failed to allocate buffers n=%u, segments=%u, orig=%u",
n, segments, orig_nb_pkts);
txq->errors += orig_nb_pkts;
return 0;
@@ -1856,7 +1855,7 @@ avp_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
return 0;
}
- PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n",
+ PMD_TX_LOG_LINE(DEBUG, "Sending %u packets on Tx queue at %p",
count, tx_q);
/* retrieve sufficient send buffers */
@@ -1987,7 +1986,7 @@ avp_dev_configure(struct rte_eth_dev *eth_dev)
rte_spinlock_lock(&avp->lock);
if (avp->flags & AVP_F_DETACHED) {
- PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
+ PMD_DRV_LOG_LINE(ERR, "Operation not supported during VM live migration");
ret = -ENOTSUP;
goto unlock;
}
@@ -2003,7 +2002,7 @@ avp_dev_configure(struct rte_eth_dev *eth_dev)
RTE_ETH_VLAN_EXTEND_MASK);
ret = avp_vlan_offload_set(eth_dev, mask);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "VLAN offload set failed by host, ret=%d\n",
+ PMD_DRV_LOG_LINE(ERR, "VLAN offload set failed by host, ret=%d",
ret);
goto unlock;
}
@@ -2019,7 +2018,7 @@ avp_dev_configure(struct rte_eth_dev *eth_dev)
ret = avp_dev_ctrl_set_config(eth_dev, &config);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n",
+ PMD_DRV_LOG_LINE(ERR, "Config request failed by host, ret=%d",
ret);
goto unlock;
}
@@ -2041,7 +2040,7 @@ avp_dev_start(struct rte_eth_dev *eth_dev)
rte_spinlock_lock(&avp->lock);
if (avp->flags & AVP_F_DETACHED) {
- PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
+ PMD_DRV_LOG_LINE(ERR, "Operation not supported during VM live migration");
ret = -ENOTSUP;
goto unlock;
}
@@ -2049,7 +2048,7 @@ avp_dev_start(struct rte_eth_dev *eth_dev)
/* update link state */
ret = avp_dev_ctrl_set_link_state(eth_dev, 1);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "Link state change failed by host, ret=%d\n",
+ PMD_DRV_LOG_LINE(ERR, "Link state change failed by host, ret=%d",
ret);
goto unlock;
}
@@ -2078,7 +2077,7 @@ avp_dev_stop(struct rte_eth_dev *eth_dev)
rte_spinlock_lock(&avp->lock);
if (avp->flags & AVP_F_DETACHED) {
- PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
+ PMD_DRV_LOG_LINE(ERR, "Operation not supported during VM live migration");
ret = -ENOTSUP;
goto unlock;
}
@@ -2089,7 +2088,7 @@ avp_dev_stop(struct rte_eth_dev *eth_dev)
/* update link state */
ret = avp_dev_ctrl_set_link_state(eth_dev, 0);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "Link state change failed by host, ret=%d\n",
+ PMD_DRV_LOG_LINE(ERR, "Link state change failed by host, ret=%d",
ret);
}
@@ -2114,7 +2113,7 @@ avp_dev_close(struct rte_eth_dev *eth_dev)
rte_spinlock_lock(&avp->lock);
if (avp->flags & AVP_F_DETACHED) {
- PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
+ PMD_DRV_LOG_LINE(ERR, "Operation not supported during VM live migration");
goto unlock;
}
@@ -2124,14 +2123,14 @@ avp_dev_close(struct rte_eth_dev *eth_dev)
ret = avp_dev_disable_interrupts(eth_dev);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "Failed to disable interrupts\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to disable interrupts");
/* continue */
}
/* update device state */
ret = avp_dev_ctrl_shutdown(eth_dev);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "Device shutdown failed by host, ret=%d\n",
+ PMD_DRV_LOG_LINE(ERR, "Device shutdown failed by host, ret=%d",
ret);
/* continue */
}
@@ -2167,7 +2166,7 @@ avp_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
rte_spinlock_lock(&avp->lock);
if ((avp->flags & AVP_F_PROMISC) == 0) {
avp->flags |= AVP_F_PROMISC;
- PMD_DRV_LOG(DEBUG, "Promiscuous mode enabled on %u\n",
+ PMD_DRV_LOG_LINE(DEBUG, "Promiscuous mode enabled on %u",
eth_dev->data->port_id);
}
rte_spinlock_unlock(&avp->lock);
@@ -2183,7 +2182,7 @@ avp_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
rte_spinlock_lock(&avp->lock);
if ((avp->flags & AVP_F_PROMISC) != 0) {
avp->flags &= ~AVP_F_PROMISC;
- PMD_DRV_LOG(DEBUG, "Promiscuous mode disabled on %u\n",
+ PMD_DRV_LOG_LINE(DEBUG, "Promiscuous mode disabled on %u",
eth_dev->data->port_id);
}
rte_spinlock_unlock(&avp->lock);
@@ -2224,18 +2223,18 @@ avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
else
avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
} else {
- PMD_DRV_LOG(ERR, "VLAN strip offload not supported\n");
+ PMD_DRV_LOG_LINE(ERR, "VLAN strip offload not supported");
}
}
if (mask & RTE_ETH_VLAN_FILTER_MASK) {
if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
- PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
+ PMD_DRV_LOG_LINE(ERR, "VLAN filter offload not supported");
}
if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
- PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
+ PMD_DRV_LOG_LINE(ERR, "VLAN extend offload not supported");
}
return 0;
@@ -8,24 +8,23 @@
#include <rte_log.h>
#ifdef RTE_LIBRTE_AVP_DEBUG_RX
-#define PMD_RX_LOG(level, fmt, args...) \
- RTE_LOG(level, AVP_DRIVER, "%s() rx: " fmt, __func__, ## args)
+#define PMD_RX_LOG_LINE(level, fmt, ...) \
+ RTE_LOG(level, AVP_DRIVER, "%s() rx: " fmt "\n", __func__, ## __VA_ARGS__)
#else
-#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#define PMD_RX_LOG_LINE(...) do { } while (0)
#endif
#ifdef RTE_LIBRTE_AVP_DEBUG_TX
-#define PMD_TX_LOG(level, fmt, args...) \
- RTE_LOG(level, AVP_DRIVER, "%s() tx: " fmt, __func__, ## args)
+#define PMD_TX_LOG_LINE(level, fmt, ...) \
+ RTE_LOG(level, AVP_DRIVER, "%s() tx: " fmt "\n", __func__, ## __VA_ARGS__)
#else
-#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#define PMD_TX_LOG_LINE(...) do { } while (0)
#endif
extern int avp_logtype_driver;
#define RTE_LOGTYPE_AVP_DRIVER avp_logtype_driver
-#define PMD_DRV_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, avp_logtype_driver, \
- "%s(): " fmt, __func__, ## args)
+#define PMD_DRV_LOG_LINE(level, fmt, ...) \
+ RTE_LOG(level, AVP_DRIVER, "%s(): " fmt "\n", __func__, ## __VA_ARGS__)
#endif /* _AVP_LOGS_H_ */
@@ -107,7 +107,7 @@ static int axgbe_write_ext_mii_regs(struct axgbe_port *pdata,
return 0;
}
- PMD_DRV_LOG(ERR, "Mdio write operation timed out\n");
+ PMD_DRV_LOG_LINE(ERR, "Mdio write operation timed out");
return -ETIMEDOUT;
}
@@ -154,7 +154,7 @@ static int axgbe_read_ext_mii_regs(struct axgbe_port *pdata,
goto success;
}
- PMD_DRV_LOG(ERR, "Mdio read operation timed out\n");
+ PMD_DRV_LOG_LINE(ERR, "Mdio read operation timed out");
return -ETIMEDOUT;
success:
@@ -272,7 +272,7 @@ static int axgbe_read_mmd_regs(struct axgbe_port *pdata, int prtad,
{
switch (pdata->vdata->xpcs_access) {
case AXGBE_XPCS_ACCESS_V1:
- PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n");
+ PMD_DRV_LOG_LINE(ERR, "PHY_Version 1 is not supported");
return -1;
case AXGBE_XPCS_ACCESS_V2:
default:
@@ -285,7 +285,7 @@ static void axgbe_write_mmd_regs(struct axgbe_port *pdata, int prtad,
{
switch (pdata->vdata->xpcs_access) {
case AXGBE_XPCS_ACCESS_V1:
- PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n");
+ PMD_DRV_LOG_LINE(ERR, "PHY_Version 1 is not supported");
return;
case AXGBE_XPCS_ACCESS_V2:
default:
@@ -369,7 +369,7 @@ static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata)
AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
- PMD_DRV_LOG(DEBUG, "flow control %s for RXq%u\n",
+ PMD_DRV_LOG_LINE(DEBUG, "flow control %s for RXq%u",
ehfc ? "enabled" : "disabled", i);
}
@@ -608,8 +608,8 @@ static int axgbe_update_vlan_hash_table(struct axgbe_port *pdata)
vid_valid = pdata->active_vlans[vid_idx];
vid_valid = (unsigned long)vid_valid >> (vid - (64 * vid_idx));
if (vid_valid & 1)
- PMD_DRV_LOG(DEBUG,
- "vid:%d pdata->active_vlans[%ld]=0x%lx\n",
+ PMD_DRV_LOG_LINE(DEBUG,
+ "vid:%d pdata->active_vlans[%ld]=0x%lx",
vid, vid_idx, pdata->active_vlans[vid_idx]);
else
continue;
@@ -617,13 +617,13 @@ static int axgbe_update_vlan_hash_table(struct axgbe_port *pdata)
vid_le = rte_cpu_to_le_16(vid);
crc = bitrev32(~axgbe_vid_crc32_le(vid_le)) >> 28;
vlan_hash_table |= (1 << crc);
- PMD_DRV_LOG(DEBUG, "crc = %d vlan_hash_table = 0x%x\n",
+ PMD_DRV_LOG_LINE(DEBUG, "crc = %d vlan_hash_table = 0x%x",
crc, vlan_hash_table);
}
/* Set the VLAN Hash Table filtering register */
AXGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
reg = AXGMAC_IOREAD(pdata, MAC_VLANHTR);
- PMD_DRV_LOG(DEBUG, "vlan_hash_table reg val = 0x%x\n", reg);
+ PMD_DRV_LOG_LINE(DEBUG, "vlan_hash_table reg val = 0x%x", reg);
return 0;
}
@@ -927,7 +927,7 @@ static int axgbe_config_rss(struct axgbe_port *pdata)
i % pdata->eth_dev->data->nb_rx_queues);
axgbe_rss_options(pdata);
if (axgbe_enable_rss(pdata)) {
- PMD_DRV_LOG(ERR, "Error in enabling RSS support\n");
+ PMD_DRV_LOG_LINE(ERR, "Error in enabling RSS support");
return -1;
}
} else {
@@ -1012,7 +1012,7 @@ static int wrapper_rx_desc_init(struct axgbe_port *pdata)
for (j = 0; j < rxq->nb_desc; j++) {
mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
if (mbuf == NULL) {
- PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id = %u, idx = %d\n",
+ PMD_DRV_LOG_LINE(ERR, "RX mbuf alloc failed queue_id = %u, idx = %d",
(unsigned int)rxq->queue_id, j);
axgbe_dev_rx_queue_release(pdata->eth_dev, i);
return -ENOMEM;
@@ -1138,7 +1138,7 @@ static void axgbe_config_rx_fifo_size(struct axgbe_port *pdata)
axgbe_calculate_flow_control_threshold(pdata);
axgbe_config_flow_control_threshold(pdata);
- PMD_DRV_LOG(DEBUG, "%d Rx hardware queues, %d byte fifo per queue\n",
+ PMD_DRV_LOG_LINE(DEBUG, "%d Rx hardware queues, %d byte fifo per queue",
pdata->rx_q_count, q_fifo_size);
}
@@ -1164,7 +1164,7 @@ static void axgbe_config_tx_fifo_size(struct axgbe_port *pdata)
for (i = 0; i < pdata->tx_q_count; i++)
AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, p_fifo);
- PMD_DRV_LOG(DEBUG, "%d Tx hardware queues, %d byte fifo per queue\n",
+ PMD_DRV_LOG_LINE(DEBUG, "%d Tx hardware queues, %d byte fifo per queue",
pdata->tx_q_count, q_fifo_size);
}
@@ -1181,12 +1181,12 @@ static void axgbe_config_queue_mapping(struct axgbe_port *pdata)
for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
for (j = 0; j < qptc; j++) {
- PMD_DRV_LOG(DEBUG, "TXq%u mapped to TC%u\n", queue, i);
+ PMD_DRV_LOG_LINE(DEBUG, "TXq%u mapped to TC%u", queue, i);
AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
Q2TCMAP, i);
}
if (i < qptc_extra) {
- PMD_DRV_LOG(DEBUG, "TXq%u mapped to TC%u\n", queue, i);
+ PMD_DRV_LOG_LINE(DEBUG, "TXq%u mapped to TC%u", queue, i);
AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
Q2TCMAP, i);
}
@@ -1254,7 +1254,7 @@ void axgbe_set_mac_hash_table(struct axgbe_port *pdata, u8 *addr, bool add)
pdata->uc_hash_table[htable_index] &= ~htable_bitmask;
pdata->uc_hash_mac_addr--;
}
- PMD_DRV_LOG(DEBUG, "%s MAC hash table Bit %d at Index %#x\n",
+ PMD_DRV_LOG_LINE(DEBUG, "%s MAC hash table Bit %d at Index %#x",
add ? "set" : "clear", (crc & 0x1f), htable_index);
AXGMAC_IOWRITE(pdata, MAC_HTR(htable_index),
@@ -1283,7 +1283,7 @@ void axgbe_set_mac_addn_addr(struct axgbe_port *pdata, u8 *addr, uint32_t index)
AXGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
}
- PMD_DRV_LOG(DEBUG, "%s mac address at %#x\n",
+ PMD_DRV_LOG_LINE(DEBUG, "%s mac address at %#x",
addr ? "set" : "clear", index);
AXGMAC_IOWRITE(pdata, MAC_MACAHR(index), mac_addr_hi);
@@ -319,14 +319,14 @@ axgbe_dev_interrupt_handler(void *param)
pdata->phy_if.an_isr(pdata);
/*DMA related interrupts*/
dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);
- PMD_DRV_LOG(DEBUG, "DMA_ISR=%#010x\n", dma_isr);
+ PMD_DRV_LOG_LINE(DEBUG, "DMA_ISR=%#010x", dma_isr);
if (dma_isr) {
if (dma_isr & 1) {
dma_ch_isr =
AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)
pdata->rx_queues[0],
DMA_CH_SR);
- PMD_DRV_LOG(DEBUG, "DMA_CH0_ISR=%#010x\n", dma_ch_isr);
+ PMD_DRV_LOG_LINE(DEBUG, "DMA_CH0_ISR=%#010x", dma_ch_isr);
AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)
pdata->rx_queues[0],
DMA_CH_SR, dma_ch_isr);
@@ -378,17 +378,17 @@ axgbe_dev_start(struct rte_eth_dev *dev)
/* Multiqueue RSS */
ret = axgbe_dev_rx_mq_config(dev);
if (ret) {
- PMD_DRV_LOG(ERR, "Unable to config RX MQ\n");
+ PMD_DRV_LOG_LINE(ERR, "Unable to config RX MQ");
return ret;
}
ret = axgbe_phy_reset(pdata);
if (ret) {
- PMD_DRV_LOG(ERR, "phy reset failed\n");
+ PMD_DRV_LOG_LINE(ERR, "phy reset failed");
return ret;
}
ret = pdata->hw_if.init(pdata);
if (ret) {
- PMD_DRV_LOG(ERR, "dev_init failed\n");
+ PMD_DRV_LOG_LINE(ERR, "dev_init failed");
return ret;
}
@@ -510,7 +510,7 @@ axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
if (index > hw_feat->addn_mac) {
- PMD_DRV_LOG(ERR, "Invalid Index %d\n", index);
+ PMD_DRV_LOG_LINE(ERR, "Invalid Index %d", index);
return -EINVAL;
}
axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, index);
@@ -527,12 +527,12 @@ axgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
int ret;
if (!pdata->rss_enable) {
- PMD_DRV_LOG(ERR, "RSS not enabled\n");
+ PMD_DRV_LOG_LINE(ERR, "RSS not enabled");
return -ENOTSUP;
}
if (reta_size == 0 || reta_size > AXGBE_RSS_MAX_TABLE_SIZE) {
- PMD_DRV_LOG(ERR, "reta_size %d is not supported\n", reta_size);
+ PMD_DRV_LOG_LINE(ERR, "reta_size %d is not supported", reta_size);
return -EINVAL;
}
@@ -558,12 +558,12 @@ axgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
unsigned int i, idx, shift;
if (!pdata->rss_enable) {
- PMD_DRV_LOG(ERR, "RSS not enabled\n");
+ PMD_DRV_LOG_LINE(ERR, "RSS not enabled");
return -ENOTSUP;
}
if (reta_size == 0 || reta_size > AXGBE_RSS_MAX_TABLE_SIZE) {
- PMD_DRV_LOG(ERR, "reta_size %d is not supported\n", reta_size);
+ PMD_DRV_LOG_LINE(ERR, "reta_size %d is not supported", reta_size);
return -EINVAL;
}
@@ -585,12 +585,12 @@ axgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
int ret;
if (!pdata->rss_enable) {
- PMD_DRV_LOG(ERR, "RSS not enabled\n");
+ PMD_DRV_LOG_LINE(ERR, "RSS not enabled");
return -ENOTSUP;
}
if (rss_conf == NULL) {
- PMD_DRV_LOG(ERR, "rss_conf value isn't valid\n");
+ PMD_DRV_LOG_LINE(ERR, "rss_conf value isn't valid");
return -EINVAL;
}
@@ -628,12 +628,12 @@ axgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
struct axgbe_port *pdata = dev->data->dev_private;
if (!pdata->rss_enable) {
- PMD_DRV_LOG(ERR, "RSS not enabled\n");
+ PMD_DRV_LOG_LINE(ERR, "RSS not enabled");
return -ENOTSUP;
}
if (rss_conf == NULL) {
- PMD_DRV_LOG(ERR, "rss_conf value isn't valid\n");
+ PMD_DRV_LOG_LINE(ERR, "rss_conf value isn't valid");
return -EINVAL;
}
@@ -668,7 +668,7 @@ axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
if (index > hw_feat->addn_mac) {
- PMD_DRV_LOG(ERR, "Invalid Index %d\n", index);
+ PMD_DRV_LOG_LINE(ERR, "Invalid Index %d", index);
return;
}
axgbe_set_mac_addn_addr(pdata, NULL, index);
@@ -685,7 +685,7 @@ axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
uint32_t i;
if (nb_mc_addr > hw_feat->addn_mac) {
- PMD_DRV_LOG(ERR, "Invalid Index %d\n", nb_mc_addr);
+ PMD_DRV_LOG_LINE(ERR, "Invalid Index %d", nb_mc_addr);
return -EINVAL;
}
@@ -711,7 +711,7 @@ axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev,
struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
if (!hw_feat->hash_table_size) {
- PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n");
+ PMD_DRV_LOG_LINE(ERR, "MAC Hash Table not supported");
return -ENOTSUP;
}
@@ -735,7 +735,7 @@ axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t add)
uint32_t index;
if (!hw_feat->hash_table_size) {
- PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n");
+ PMD_DRV_LOG_LINE(ERR, "MAC Hash Table not supported");
return -ENOTSUP;
}
@@ -745,7 +745,7 @@ axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t add)
else
pdata->uc_hash_table[index] = 0;
- PMD_DRV_LOG(DEBUG, "%s MAC hash table at Index %#x\n",
+ PMD_DRV_LOG_LINE(DEBUG, "%s MAC hash table at Index %#x",
add ? "set" : "clear", index);
AXGMAC_IOWRITE(pdata, MAC_HTR(index),
@@ -784,7 +784,7 @@ axgbe_dev_link_update(struct rte_eth_dev *dev,
RTE_ETH_LINK_SPEED_FIXED);
ret = rte_eth_linkstatus_set(dev, &link);
if (ret == 0)
- PMD_DRV_LOG(ERR, "Link status changed\n");
+ PMD_DRV_LOG_LINE(ERR, "Link status changed");
return ret;
}
@@ -1084,7 +1084,7 @@ axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
for (i = 0; i < n; i++) {
if (ids[i] >= AXGBE_XSTATS_COUNT) {
- PMD_DRV_LOG(ERR, "id value isn't valid\n");
+ PMD_DRV_LOG_LINE(ERR, "id value isn't valid");
return -1;
}
values[i] = values_copy[ids[i]];
@@ -1108,7 +1108,7 @@ axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
for (i = 0; i < size; i++) {
if (ids[i] >= AXGBE_XSTATS_COUNT) {
- PMD_DRV_LOG(ERR, "id value isn't valid\n");
+ PMD_DRV_LOG_LINE(ERR, "id value isn't valid");
return -1;
}
strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
@@ -1157,7 +1157,7 @@ axgbe_dev_stats_get(struct rte_eth_dev *dev,
+ rxq->rx_mbuf_alloc_failed;
stats->ierrors += rxq->errors;
} else {
- PMD_DRV_LOG(DEBUG, "Rx queue not setup for port %d\n",
+ PMD_DRV_LOG_LINE(DEBUG, "Rx queue not setup for port %d",
dev->data->port_id);
}
}
@@ -1171,7 +1171,7 @@ axgbe_dev_stats_get(struct rte_eth_dev *dev,
stats->obytes += txq->bytes;
stats->oerrors += txq->errors;
} else {
- PMD_DRV_LOG(DEBUG, "Tx queue not setup for port %d\n",
+ PMD_DRV_LOG_LINE(DEBUG, "Tx queue not setup for port %d",
dev->data->port_id);
}
}
@@ -1194,7 +1194,7 @@ axgbe_dev_stats_reset(struct rte_eth_dev *dev)
rxq->errors = 0;
rxq->rx_mbuf_alloc_failed = 0;
} else {
- PMD_DRV_LOG(DEBUG, "Rx queue not setup for port %d\n",
+ PMD_DRV_LOG_LINE(DEBUG, "Rx queue not setup for port %d",
dev->data->port_id);
}
}
@@ -1205,7 +1205,7 @@ axgbe_dev_stats_reset(struct rte_eth_dev *dev)
txq->bytes = 0;
txq->errors = 0;
} else {
- PMD_DRV_LOG(DEBUG, "Tx queue not setup for port %d\n",
+ PMD_DRV_LOG_LINE(DEBUG, "Tx queue not setup for port %d",
dev->data->port_id);
}
}
@@ -1500,7 +1500,7 @@ static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
/* mtu setting is forbidden if port is start */
if (dev->data->dev_started) {
- PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
+ PMD_DRV_LOG_LINE(ERR, "port %d must be stopped before configuration",
dev->data->port_id);
return -EBUSY;
}
@@ -1624,7 +1624,7 @@ axgbe_timesync_write_time(struct rte_eth_dev *dev,
while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSUPDT))
rte_delay_ms(1);
if (!count)
- PMD_DRV_LOG(ERR, "Timed out update timestamp\n");
+ PMD_DRV_LOG_LINE(ERR, "Timed out update timestamp");
return 0;
}
@@ -1641,7 +1641,7 @@ axgbe_update_tstamp_addend(struct axgbe_port *pdata,
while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
rte_delay_ms(1);
if (!count)
- PMD_DRV_LOG(ERR, "Timed out updating timestamp addend register\n");
+ PMD_DRV_LOG_LINE(ERR, "Timed out updating timestamp addend register");
}
static void
@@ -1661,7 +1661,7 @@ axgbe_set_tstamp_time(struct axgbe_port *pdata, unsigned int sec,
while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
rte_delay_ms(1);
if (!count)
- PMD_DRV_LOG(ERR, "Timed out initializing timestamp\n");
+ PMD_DRV_LOG_LINE(ERR, "Timed out initializing timestamp");
}
static int
@@ -1696,7 +1696,7 @@ axgbe_timesync_enable(struct rte_eth_dev *dev)
/* Exit if timestamping is not enabled */
if (!AXGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA)) {
- PMD_DRV_LOG(ERR, "Exiting as timestamp is not enabled\n");
+ PMD_DRV_LOG_LINE(ERR, "Exiting as timestamp is not enabled");
return 0;
}
@@ -1720,7 +1720,7 @@ axgbe_timesync_enable(struct rte_eth_dev *dev)
pdata->systime_tc.cc_shift = 0;
pdata->systime_tc.nsec_mask = 0;
- PMD_DRV_LOG(DEBUG, "Initializing system time counter with realtime\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Initializing system time counter with realtime");
/* Updating the counter once with clock real time */
clock_gettime(CLOCK_REALTIME, ×tamp);
@@ -1773,8 +1773,8 @@ axgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
if (nsec != 0xffffffffffffffffULL) {
if (pmt == 0x01)
*timestamp = rte_ns_to_timespec(nsec);
- PMD_DRV_LOG(DEBUG,
- "flags = 0x%x nsec = %"PRIu64"\n",
+ PMD_DRV_LOG_LINE(DEBUG,
+ "flags = 0x%x nsec = %"PRIu64,
flags, nsec);
}
}
@@ -1801,13 +1801,13 @@ axgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
tx_snr = AXGMAC_IOREAD(pdata, MAC_TXSNR);
}
if (AXGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS)) {
- PMD_DRV_LOG(DEBUG, "Waiting for TXTSSTSMIS\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Waiting for TXTSSTSMIS");
return 0;
}
nsec = tx_ssr;
nsec *= NSEC_PER_SEC;
nsec += tx_snr;
- PMD_DRV_LOG(DEBUG, "nsec = %"PRIu64" tx_ssr = %d tx_snr = %d\n",
+ PMD_DRV_LOG_LINE(DEBUG, "nsec = %"PRIu64" tx_ssr = %d tx_snr = %d",
nsec, tx_ssr, tx_snr);
*timestamp = rte_ns_to_timespec(nsec);
return 0;
@@ -1823,11 +1823,11 @@ axgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on)
vid_idx = VLAN_TABLE_IDX(vid);
if (on) {
- PMD_DRV_LOG(DEBUG, "Set VLAN vid=%d for device = %s\n",
+ PMD_DRV_LOG_LINE(DEBUG, "Set VLAN vid=%d for device = %s",
vid, pdata->eth_dev->device->name);
pdata->active_vlans[vid_idx] |= vid_bit;
} else {
- PMD_DRV_LOG(DEBUG, "Reset VLAN vid=%d for device = %s\n",
+ PMD_DRV_LOG_LINE(DEBUG, "Reset VLAN vid=%d for device = %s",
vid, pdata->eth_dev->device->name);
pdata->active_vlans[vid_idx] &= ~vid_bit;
}
@@ -1845,50 +1845,50 @@ axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
uint32_t qinq = 0;
qinq = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, EDVLP);
- PMD_DRV_LOG(DEBUG, "EDVLP: qinq = 0x%x\n", qinq);
+ PMD_DRV_LOG_LINE(DEBUG, "EDVLP: qinq = 0x%x", qinq);
switch (vlan_type) {
case RTE_ETH_VLAN_TYPE_INNER:
- PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_INNER\n");
+ PMD_DRV_LOG_LINE(DEBUG, "RTE_ETH_VLAN_TYPE_INNER");
if (qinq) {
if (tpid != 0x8100 && tpid != 0x88a8)
- PMD_DRV_LOG(ERR,
- "tag supported 0x8100/0x88A8\n");
- PMD_DRV_LOG(DEBUG, "qinq with inner tag\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "tag supported 0x8100/0x88A8");
+ PMD_DRV_LOG_LINE(DEBUG, "qinq with inner tag");
/*Enable Inner VLAN Tag */
AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERIVLT, 1);
reg = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, ERIVLT);
- PMD_DRV_LOG(DEBUG, "bit ERIVLT = 0x%x\n", reg);
+ PMD_DRV_LOG_LINE(DEBUG, "bit ERIVLT = 0x%x", reg);
} else {
- PMD_DRV_LOG(ERR,
- "Inner type not supported in single tag\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Inner type not supported in single tag");
}
break;
case RTE_ETH_VLAN_TYPE_OUTER:
- PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_OUTER\n");
+ PMD_DRV_LOG_LINE(DEBUG, "RTE_ETH_VLAN_TYPE_OUTER");
if (qinq) {
- PMD_DRV_LOG(DEBUG, "double tagging is enabled\n");
+ PMD_DRV_LOG_LINE(DEBUG, "double tagging is enabled");
/*Enable outer VLAN tag*/
AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERIVLT, 0);
reg = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, ERIVLT);
- PMD_DRV_LOG(DEBUG, "bit ERIVLT = 0x%x\n", reg);
+ PMD_DRV_LOG_LINE(DEBUG, "bit ERIVLT = 0x%x", reg);
AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 1);
reg = AXGMAC_IOREAD_BITS(pdata, MAC_VLANIR, CSVL);
- PMD_DRV_LOG(DEBUG, "bit CSVL = 0x%x\n", reg);
+ PMD_DRV_LOG_LINE(DEBUG, "bit CSVL = 0x%x", reg);
} else {
if (tpid != 0x8100 && tpid != 0x88a8)
- PMD_DRV_LOG(ERR,
- "tag supported 0x8100/0x88A8\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "tag supported 0x8100/0x88A8");
}
break;
case RTE_ETH_VLAN_TYPE_MAX:
- PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_MAX\n");
+ PMD_DRV_LOG_LINE(ERR, "RTE_ETH_VLAN_TYPE_MAX");
break;
case RTE_ETH_VLAN_TYPE_UNKNOWN:
- PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_UNKNOWN\n");
+ PMD_DRV_LOG_LINE(ERR, "RTE_ETH_VLAN_TYPE_UNKNOWN");
break;
}
return 0;
@@ -1900,7 +1900,7 @@ static void axgbe_vlan_extend_enable(struct axgbe_port *pdata)
AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EDVLP, 1);
qinq = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, EDVLP);
- PMD_DRV_LOG(DEBUG, "vlan double tag enabled EDVLP:qinq=0x%x\n", qinq);
+ PMD_DRV_LOG_LINE(DEBUG, "vlan double tag enabled EDVLP:qinq=0x%x", qinq);
}
static void axgbe_vlan_extend_disable(struct axgbe_port *pdata)
@@ -1909,7 +1909,7 @@ static void axgbe_vlan_extend_disable(struct axgbe_port *pdata)
AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EDVLP, 0);
qinq = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, EDVLP);
- PMD_DRV_LOG(DEBUG, "vlan double tag disable EDVLP:qinq=0x%x\n", qinq);
+ PMD_DRV_LOG_LINE(DEBUG, "vlan double tag disable EDVLP:qinq=0x%x", qinq);
}
static int
@@ -1924,29 +1924,29 @@ axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if (mask & RTE_ETH_VLAN_STRIP_MASK) {
if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
- PMD_DRV_LOG(DEBUG, "Strip ON for device = %s\n",
+ PMD_DRV_LOG_LINE(DEBUG, "Strip ON for device = %s",
pdata->eth_dev->device->name);
pdata->hw_if.enable_rx_vlan_stripping(pdata);
} else {
- PMD_DRV_LOG(DEBUG, "Strip OFF for device = %s\n",
+ PMD_DRV_LOG_LINE(DEBUG, "Strip OFF for device = %s",
pdata->eth_dev->device->name);
pdata->hw_if.disable_rx_vlan_stripping(pdata);
}
}
if (mask & RTE_ETH_VLAN_FILTER_MASK) {
if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
- PMD_DRV_LOG(DEBUG, "Filter ON for device = %s\n",
+ PMD_DRV_LOG_LINE(DEBUG, "Filter ON for device = %s",
pdata->eth_dev->device->name);
pdata->hw_if.enable_rx_vlan_filtering(pdata);
} else {
- PMD_DRV_LOG(DEBUG, "Filter OFF for device = %s\n",
+ PMD_DRV_LOG_LINE(DEBUG, "Filter OFF for device = %s",
pdata->eth_dev->device->name);
pdata->hw_if.disable_rx_vlan_filtering(pdata);
}
}
if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
- PMD_DRV_LOG(DEBUG, "enabling vlan extended mode\n");
+ PMD_DRV_LOG_LINE(DEBUG, "enabling vlan extended mode");
axgbe_vlan_extend_enable(pdata);
/* Set global registers with default ethertype*/
axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
@@ -1954,7 +1954,7 @@ axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER,
RTE_ETHER_TYPE_VLAN);
} else {
- PMD_DRV_LOG(DEBUG, "disabling vlan extended mode\n");
+ PMD_DRV_LOG_LINE(DEBUG, "disabling vlan extended mode");
axgbe_vlan_extend_disable(pdata);
}
}
@@ -2283,7 +2283,7 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
break;
}
if (unknown_cpu) {
- PMD_DRV_LOG(ERR, "Unknown CPU family, no supported axgbe device found\n");
+ PMD_DRV_LOG_LINE(ERR, "Unknown CPU family, no supported axgbe device found");
return -ENODEV;
}
}
@@ -2377,7 +2377,7 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
/* Issue software reset to DMA */
ret = pdata->hw_if.exit(pdata);
if (ret)
- PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n");
+ PMD_DRV_LOG_LINE(ERR, "hw_if->exit EBUSY error");
/* Set default configuration data */
axgbe_default_config(pdata);
@@ -162,13 +162,13 @@ static int axgbe_i2c_isr(struct axgbe_port *pdata)
isr = XI2C_IOREAD(pdata, IC_RAW_INTR_STAT);
- PMD_DRV_LOG(DEBUG, "I2C interrupt received: status=%#010x\n", isr);
+ PMD_DRV_LOG_LINE(DEBUG, "I2C interrupt received: status=%#010x", isr);
axgbe_i2c_clear_isr_interrupts(pdata, isr);
if (isr & AXGBE_INTR_TX_ABRT) {
- PMD_DRV_LOG(DEBUG,
- "I2C TX_ABRT received (%#010x) for target %#04x\n",
+ PMD_DRV_LOG_LINE(DEBUG,
+ "I2C TX_ABRT received (%#010x) for target %#04x",
state->tx_abort_source, state->op->target);
axgbe_i2c_disable_interrupts(pdata);
@@ -232,7 +232,7 @@ static int axgbe_i2c_xfer(struct axgbe_port *pdata, struct axgbe_i2c_op *op)
pthread_mutex_lock(&pdata->i2c_mutex);
ret = axgbe_i2c_disable(pdata);
if (ret) {
- PMD_DRV_LOG(ERR, "failed to disable i2c master\n");
+ PMD_DRV_LOG_LINE(ERR, "failed to disable i2c master");
pthread_mutex_unlock(&pdata->i2c_mutex);
return ret;
}
@@ -249,7 +249,7 @@ static int axgbe_i2c_xfer(struct axgbe_port *pdata, struct axgbe_i2c_op *op)
axgbe_i2c_clear_all_interrupts(pdata);
ret = axgbe_i2c_enable(pdata);
if (ret) {
- PMD_DRV_LOG(ERR, "failed to enable i2c master\n");
+ PMD_DRV_LOG_LINE(ERR, "failed to enable i2c master");
pthread_mutex_unlock(&pdata->i2c_mutex);
return ret;
}
@@ -268,7 +268,7 @@ static int axgbe_i2c_xfer(struct axgbe_port *pdata, struct axgbe_i2c_op *op)
}
}
- PMD_DRV_LOG(ERR, "i2c operation timed out\n");
+ PMD_DRV_LOG_LINE(ERR, "i2c operation timed out");
axgbe_i2c_disable_interrupts(pdata);
axgbe_i2c_disable(pdata);
ret = -ETIMEDOUT;
@@ -293,7 +293,7 @@ static void axgbe_i2c_stop(struct axgbe_port *pdata)
if (!pdata->i2c.started)
return;
- PMD_DRV_LOG(DEBUG, "stopping I2C\n");
+ PMD_DRV_LOG_LINE(DEBUG, "stopping I2C");
pdata->i2c.started = 0;
axgbe_i2c_disable_interrupts(pdata);
@@ -306,7 +306,7 @@ static int axgbe_i2c_start(struct axgbe_port *pdata)
if (pdata->i2c.started)
return 0;
- PMD_DRV_LOG(DEBUG, "starting I2C\n");
+ PMD_DRV_LOG_LINE(DEBUG, "starting I2C");
pdata->i2c.started = 1;
@@ -321,7 +321,7 @@ static int axgbe_i2c_init(struct axgbe_port *pdata)
ret = axgbe_i2c_disable(pdata);
if (ret) {
- PMD_DRV_LOG(ERR, "failed to disable i2c master\n");
+ PMD_DRV_LOG_LINE(ERR, "failed to disable i2c master");
return ret;
}
@@ -19,8 +19,8 @@ extern int axgbe_logtype_init;
#endif
extern int axgbe_logtype_driver;
-#define PMD_DRV_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, axgbe_logtype_driver, "%s(): " fmt, \
- __func__, ## args)
+#define RTE_LOGTYPE_AXGBE_DRIVER axgbe_logtype_driver
+#define PMD_DRV_LOG_LINE(level, fmt, ...) \
+ RTE_LOG(level, AXGBE_DRIVER, "%s(): " fmt "\n", __func__, ## __VA_ARGS__)
#endif /* _AXGBE_LOGS_H_ */
@@ -203,7 +203,7 @@ static void axgbe_change_mode(struct axgbe_port *pdata,
case AXGBE_MODE_UNKNOWN:
break;
default:
- PMD_DRV_LOG(ERR, "invalid operation mode requested (%u)\n", mode);
+ PMD_DRV_LOG_LINE(ERR, "invalid operation mode requested (%u)", mode);
}
}
@@ -285,7 +285,7 @@ static void axgbe_an73_restart(struct axgbe_port *pdata)
axgbe_an73_enable_interrupts(pdata);
axgbe_an73_set(pdata, true, true);
- PMD_DRV_LOG(DEBUG, "CL73 AN enabled/restarted\n");
+ PMD_DRV_LOG_LINE(DEBUG, "CL73 AN enabled/restarted");
}
static void axgbe_an73_disable(struct axgbe_port *pdata)
@@ -294,7 +294,7 @@ static void axgbe_an73_disable(struct axgbe_port *pdata)
axgbe_an73_disable_interrupts(pdata);
pdata->an_start = 0;
- PMD_DRV_LOG(DEBUG, "CL73 AN disabled\n");
+ PMD_DRV_LOG_LINE(DEBUG, "CL73 AN disabled");
}
static void axgbe_an_restart(struct axgbe_port *pdata)
@@ -372,7 +372,7 @@ static enum axgbe_an axgbe_an73_tx_training(struct axgbe_port *pdata,
XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
pdata->kr_start_time = rte_get_timer_cycles();
- PMD_DRV_LOG(DEBUG, "KR training initiated\n");
+ PMD_DRV_LOG_LINE(DEBUG, "KR training initiated");
if (pdata->phy_if.phy_impl.kr_training_post)
pdata->phy_if.phy_impl.kr_training_post(pdata);
@@ -455,8 +455,8 @@ static enum axgbe_an axgbe_an73_page_received(struct axgbe_port *pdata)
pdata->an_start = rte_get_timer_cycles();
- PMD_DRV_LOG(NOTICE,
- "CL73 AN timed out, resetting state\n");
+ PMD_DRV_LOG_LINE(NOTICE,
+ "CL73 AN timed out, resetting state");
}
}
@@ -548,7 +548,7 @@ static void axgbe_an73_state_machine(struct axgbe_port *pdata)
pdata->an_state = AXGBE_AN_ERROR;
}
- PMD_DRV_LOG(DEBUG, "CL73 AN : %s\n",
+ PMD_DRV_LOG_LINE(DEBUG, "CL73 AN : %s",
axgbe_state_as_string(pdata->an_state));
again:
@@ -582,7 +582,7 @@ static void axgbe_an73_state_machine(struct axgbe_port *pdata)
pdata->eth_dev->data->dev_link.link_status =
RTE_ETH_LINK_DOWN;
} else if (pdata->an_state == AXGBE_AN_ERROR) {
- PMD_DRV_LOG(ERR, "error during auto-negotiation, state=%u\n",
+ PMD_DRV_LOG_LINE(ERR, "error during auto-negotiation, state=%u",
cur_state);
pdata->an_int = 0;
axgbe_an73_clear_interrupts(pdata);
@@ -597,7 +597,7 @@ static void axgbe_an73_state_machine(struct axgbe_port *pdata)
if (pdata->phy_if.phy_impl.an_post)
pdata->phy_if.phy_impl.an_post(pdata);
- PMD_DRV_LOG(DEBUG, "CL73 AN result: %s\n",
+ PMD_DRV_LOG_LINE(DEBUG, "CL73 AN result: %s",
axgbe_state_as_string(pdata->an_result));
}
@@ -641,7 +641,7 @@ static void axgbe_an37_state_machine(struct axgbe_port *pdata)
}
if (pdata->an_state == AXGBE_AN_ERROR) {
- PMD_DRV_LOG(ERR, "error during auto-negotiation, state=%u\n",
+ PMD_DRV_LOG_LINE(ERR, "error during auto-negotiation, state=%u",
cur_state);
pdata->an_int = 0;
axgbe_an37_clear_interrupts(pdata);
@@ -703,7 +703,7 @@ static void axgbe_an37_isr(struct axgbe_port *pdata)
static void axgbe_an_isr(struct axgbe_port *pdata)
{
- PMD_DRV_LOG(DEBUG, "AN interrupt received\n");
+ PMD_DRV_LOG_LINE(DEBUG, "AN interrupt received");
switch (pdata->an_mode) {
case AXGBE_AN_MODE_CL73:
@@ -813,7 +813,7 @@ static void axgbe_an73_init(struct axgbe_port *pdata)
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
- PMD_DRV_LOG(DEBUG, "CL73 AN initialized\n");
+ PMD_DRV_LOG_LINE(DEBUG, "CL73 AN initialized");
}
static void axgbe_an_init(struct axgbe_port *pdata)
@@ -865,7 +865,7 @@ static int axgbe_phy_config_fixed(struct axgbe_port *pdata)
{
enum axgbe_mode mode;
- PMD_DRV_LOG(DEBUG, "fixed PHY configuration\n");
+ PMD_DRV_LOG_LINE(DEBUG, "fixed PHY configuration");
/* Disable auto-negotiation */
axgbe_an_disable(pdata);
@@ -912,9 +912,9 @@ static int __axgbe_phy_config_aneg(struct axgbe_port *pdata, bool set_mode)
ret = axgbe_phy_config_fixed(pdata);
if (ret || !pdata->kr_redrv)
goto out;
- PMD_DRV_LOG(DEBUG, "AN redriver support\n");
+ PMD_DRV_LOG_LINE(DEBUG, "AN redriver support");
} else {
- PMD_DRV_LOG(DEBUG, "AN PHY configuration\n");
+ PMD_DRV_LOG_LINE(DEBUG, "AN PHY configuration");
}
/* Disable auto-negotiation interrupt */
@@ -1018,7 +1018,7 @@ static void axgbe_check_link_timeout(struct axgbe_port *pdata)
}
}
- PMD_DRV_LOG(NOTICE, "AN link timeout\n");
+ PMD_DRV_LOG_LINE(NOTICE, "AN link timeout");
axgbe_phy_config_aneg(pdata);
}
}
@@ -1161,7 +1161,7 @@ static void axgbe_phy_status(struct axgbe_port *pdata)
static void axgbe_phy_stop(struct axgbe_port *pdata)
{
- PMD_DRV_LOG(DEBUG, "stopping PHY\n");
+ PMD_DRV_LOG_LINE(DEBUG, "stopping PHY");
if (!pdata->phy_started)
return;
/* Indicate the PHY is down */
@@ -1177,7 +1177,7 @@ static int axgbe_phy_start(struct axgbe_port *pdata)
{
int ret;
- PMD_DRV_LOG(DEBUG, "starting PHY\n");
+ PMD_DRV_LOG_LINE(DEBUG, "starting PHY");
ret = pdata->phy_if.phy_impl.start(pdata);
if (ret)
@@ -312,7 +312,7 @@ static int axgbe_phy_redrv_write(struct axgbe_port *pdata, unsigned int reg,
}
if (redrv_data[0] != 0xff) {
- PMD_DRV_LOG(ERR, "Redriver write checksum error\n");
+ PMD_DRV_LOG_LINE(ERR, "Redriver write checksum error");
ret = -EIO;
}
@@ -437,7 +437,7 @@ static int axgbe_phy_get_comm_ownership(struct axgbe_port *pdata)
pthread_mutex_unlock(&pdata->phy_mutex);
- PMD_DRV_LOG(ERR, "unable to obtain hardware mutexes\n");
+ PMD_DRV_LOG_LINE(ERR, "unable to obtain hardware mutexes");
return -ETIMEDOUT;
}
@@ -679,7 +679,7 @@ static int axgbe_phy_sfp_read_eeprom(struct axgbe_port *pdata)
ret = axgbe_phy_sfp_get_mux(pdata);
if (ret) {
- PMD_DRV_LOG(ERR, "I2C error setting SFP MUX\n");
+ PMD_DRV_LOG_LINE(ERR, "I2C error setting SFP MUX");
return ret;
}
@@ -689,7 +689,7 @@ static int axgbe_phy_sfp_read_eeprom(struct axgbe_port *pdata)
&eeprom_addr, sizeof(eeprom_addr),
&sfp_eeprom, sizeof(sfp_eeprom));
if (ret) {
- PMD_DRV_LOG(ERR, "I2C error reading SFP EEPROM\n");
+ PMD_DRV_LOG_LINE(ERR, "I2C error reading SFP EEPROM");
goto put;
}
@@ -735,7 +735,7 @@ static void axgbe_phy_sfp_signals(struct axgbe_port *pdata)
&gpio_reg, sizeof(gpio_reg),
gpio_ports, sizeof(gpio_ports));
if (ret) {
- PMD_DRV_LOG(ERR, "I2C error reading SFP GPIOs\n");
+ PMD_DRV_LOG_LINE(ERR, "I2C error reading SFP GPIOs");
return;
}
@@ -836,7 +836,7 @@ static void axgbe_phy_sfp_detect(struct axgbe_port *pdata)
axgbe_phy_sfp_parse_eeprom(pdata);
axgbe_phy_sfp_external_phy(pdata);
- PMD_DRV_LOG(DEBUG, "SFP Base: %s\n",
+ PMD_DRV_LOG_LINE(DEBUG, "SFP Base: %s",
axgbe_base_as_string(phy_data->sfp_base));
put:
@@ -1064,7 +1064,7 @@ static unsigned int axgbe_phy_an_advertising(struct axgbe_port *pdata)
advertising |= ADVERTISED_1000baseKX_Full;
break;
case AXGBE_PORT_MODE_10GBASE_T:
- PMD_DRV_LOG(ERR, "10GBASE_T mode is not supported\n");
+ PMD_DRV_LOG_LINE(ERR, "10GBASE_T mode is not supported");
break;
case AXGBE_PORT_MODE_10GBASE_R:
advertising |= ADVERTISED_10000baseKR_Full;
@@ -1251,7 +1251,7 @@ static void axgbe_rx_adaptation(struct axgbe_port *pdata)
/* If the block lock is found, update the helpers
* and declare the link up
*/
- PMD_DRV_LOG(NOTICE, "Rx adaptation - Block_lock done\n");
+ PMD_DRV_LOG_LINE(NOTICE, "Rx adaptation - Block_lock done");
pdata->rx_adapt_done = true;
pdata->mode_set = false;
return;
@@ -1271,7 +1271,7 @@ static void axgbe_phy_rx_adaptation(struct axgbe_port *pdata)
/* step 1: Check for RX_VALID && LF_SIGDET */
if ((reg & XGBE_PMA_RX_VAL_SIG_MASK) != XGBE_PMA_RX_VAL_SIG_MASK) {
- PMD_DRV_LOG(NOTICE, "RX_VALID or LF_SIGDET is unset, issue rrc\n");
+ PMD_DRV_LOG_LINE(NOTICE, "RX_VALID or LF_SIGDET is unset, issue rrc");
axgbe_phy_rrc(pdata);
if (pdata->rx_adapt_retries++ >= MAX_RX_ADAPT_RETRIES) {
pdata->rx_adapt_retries = 0;
@@ -1301,7 +1301,7 @@ static void axgbe_phy_rx_reset(struct axgbe_port *pdata)
XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_CTRL1,
XGBE_PMA_RX_RST_0_MASK, XGBE_PMA_RX_RST_0_RESET_OFF);
rte_delay_us(45);
- PMD_DRV_LOG(ERR, "firmware mailbox reset performed\n");
+ PMD_DRV_LOG_LINE(ERR, "firmware mailbox reset performed");
}
}
@@ -1331,7 +1331,7 @@ static void axgbe_phy_perform_ratechange(struct axgbe_port *pdata,
/* Log if a previous command did not complete */
if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) {
- PMD_DRV_LOG(NOTICE, "firmware mailbox not ready for command\n");
+ PMD_DRV_LOG_LINE(NOTICE, "firmware mailbox not ready for command");
axgbe_phy_rx_reset(pdata);
}
@@ -1351,7 +1351,7 @@ static void axgbe_phy_perform_ratechange(struct axgbe_port *pdata,
goto do_rx_adaptation;
rte_delay_us(1500);
}
- PMD_DRV_LOG(NOTICE, "firmware mailbox command did not complete\n");
+ PMD_DRV_LOG_LINE(NOTICE, "firmware mailbox command did not complete");
/* Reset on error */
axgbe_phy_rx_reset(pdata);
goto reenable_pll;
@@ -1360,7 +1360,7 @@ static void axgbe_phy_perform_ratechange(struct axgbe_port *pdata,
do_rx_adaptation:
if (pdata->en_rx_adap && sub_cmd == AXGBE_MB_SUBCMD_RX_ADAP &&
(cmd == AXGBE_MB_CMD_SET_10G_KR || cmd == AXGBE_MB_CMD_SET_10G_SFI)) {
- PMD_DRV_LOG(NOTICE, "Enabling RX adaptation\n");
+ PMD_DRV_LOG_LINE(NOTICE, "Enabling RX adaptation");
pdata->mode_set = true;
axgbe_phy_rx_adaptation(pdata);
/* return from here to avoid enabling PLL ctrl
@@ -1384,7 +1384,7 @@ static void axgbe_phy_rrc(struct axgbe_port *pdata)
/* Receiver Reset Cycle */
axgbe_phy_perform_ratechange(pdata, AXGBE_MB_CMD_RRC, AXGBE_MB_SUBCMD_NONE);
- PMD_DRV_LOG(DEBUG, "receiver reset complete\n");
+ PMD_DRV_LOG_LINE(DEBUG, "receiver reset complete");
}
static void axgbe_phy_power_off(struct axgbe_port *pdata)
@@ -1396,7 +1396,7 @@ static void axgbe_phy_power_off(struct axgbe_port *pdata)
phy_data->cur_mode = AXGBE_MODE_UNKNOWN;
- PMD_DRV_LOG(DEBUG, "phy powered off\n");
+ PMD_DRV_LOG_LINE(DEBUG, "phy powered off");
}
static bool enable_rx_adap(struct axgbe_port *pdata, enum axgbe_mode mode)
@@ -1453,7 +1453,7 @@ static void axgbe_phy_sfi_mode(struct axgbe_port *pdata)
phy_data->cur_mode = AXGBE_MODE_SFI;
- PMD_DRV_LOG(DEBUG, "10GbE SFI mode set\n");
+ PMD_DRV_LOG_LINE(DEBUG, "10GbE SFI mode set");
}
static void axgbe_phy_kr_mode(struct axgbe_port *pdata)
@@ -1471,7 +1471,7 @@ static void axgbe_phy_kr_mode(struct axgbe_port *pdata)
AXGBE_MB_SUBCMD_NONE);
phy_data->cur_mode = AXGBE_MODE_KR;
- PMD_DRV_LOG(DEBUG, "10GbE KR mode set\n");
+ PMD_DRV_LOG_LINE(DEBUG, "10GbE KR mode set");
}
static void axgbe_phy_kx_2500_mode(struct axgbe_port *pdata)
@@ -1987,7 +1987,7 @@ static int axgbe_phy_mdio_reset_setup(struct axgbe_port *pdata)
case AXGBE_MDIO_RESET_INT_GPIO:
break;
default:
- PMD_DRV_LOG(ERR, "unsupported MDIO reset (%#x)\n",
+ PMD_DRV_LOG_LINE(ERR, "unsupported MDIO reset (%#x)",
phy_data->mdio_reset);
return -EINVAL;
}
@@ -2270,7 +2270,7 @@ static int axgbe_phy_init(struct axgbe_port *pdata)
/* Check if enabled */
if (!axgbe_phy_port_enabled(pdata)) {
- PMD_DRV_LOG(ERR, "device is not enabled\n");
+ PMD_DRV_LOG_LINE(ERR, "device is not enabled");
return -ENODEV;
}
@@ -2281,7 +2281,7 @@ static int axgbe_phy_init(struct axgbe_port *pdata)
phy_data = rte_zmalloc("phy_data memory", sizeof(*phy_data), 0);
if (!phy_data) {
- PMD_DRV_LOG(ERR, "phy_data allocation failed\n");
+ PMD_DRV_LOG_LINE(ERR, "phy_data allocation failed");
return -ENOMEM;
}
pdata->phy_data = phy_data;
@@ -2300,14 +2300,14 @@ static int axgbe_phy_init(struct axgbe_port *pdata)
/* Validate the connection requested */
if (axgbe_phy_conn_type_mismatch(pdata)) {
- PMD_DRV_LOG(ERR, "phy mode/connection mismatch (%#x/%#x)\n",
+ PMD_DRV_LOG_LINE(ERR, "phy mode/connection mismatch (%#x/%#x)",
phy_data->port_mode, phy_data->conn_type);
return -EINVAL;
}
/* Validate the mode requested */
if (axgbe_phy_port_mode_mismatch(pdata)) {
- PMD_DRV_LOG(ERR, "phy mode/speed mismatch (%#x/%#x)\n",
+ PMD_DRV_LOG_LINE(ERR, "phy mode/speed mismatch (%#x/%#x)",
phy_data->port_mode, phy_data->port_speeds);
return -EINVAL;
}
@@ -2319,7 +2319,7 @@ static int axgbe_phy_init(struct axgbe_port *pdata)
/* Validate the re-driver information */
if (axgbe_phy_redrv_error(phy_data)) {
- PMD_DRV_LOG(ERR, "phy re-driver settings error\n");
+ PMD_DRV_LOG_LINE(ERR, "phy re-driver settings error");
return -EINVAL;
}
pdata->kr_redrv = phy_data->redrv;
@@ -2499,7 +2499,7 @@ static int axgbe_phy_init(struct axgbe_port *pdata)
ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr,
phy_data->phydev_mode);
if (ret) {
- PMD_DRV_LOG(ERR, "mdio port/clause not compatible (%d/%u)\n",
+ PMD_DRV_LOG_LINE(ERR, "mdio port/clause not compatible (%d/%u)",
phy_data->mdio_addr, phy_data->phydev_mode);
return -EINVAL;
}
@@ -2509,7 +2509,7 @@ static int axgbe_phy_init(struct axgbe_port *pdata)
ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr,
AXGBE_MDIO_MODE_CL22);
if (ret) {
- PMD_DRV_LOG(ERR, "redriver mdio port not compatible (%u)\n",
+ PMD_DRV_LOG_LINE(ERR, "redriver mdio port not compatible (%u)",
phy_data->redrv_addr);
return -EINVAL;
}
@@ -92,7 +92,7 @@ int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
dma = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, 128,
socket_id);
if (!dma) {
- PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed\n");
+ PMD_DRV_LOG_LINE(ERR, "ring_dma_zone_reserve for rx_ring failed");
axgbe_rx_queue_release(rxq);
return -ENOMEM;
}
@@ -105,7 +105,7 @@ int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
RTE_CACHE_LINE_SIZE,
socket_id);
if (!rxq->sw_ring) {
- PMD_DRV_LOG(ERR, "rte_zmalloc for sw_ring failed\n");
+ PMD_DRV_LOG_LINE(ERR, "rte_zmalloc for sw_ring failed");
axgbe_rx_queue_release(rxq);
return -ENOMEM;
}
@@ -139,8 +139,8 @@ static void axgbe_prepare_rx_stop(struct axgbe_port *pdata,
}
if (!time_before(rte_get_timer_cycles(), rx_timeout))
- PMD_DRV_LOG(ERR,
- "timed out waiting for Rx queue %u to empty\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "timed out waiting for Rx queue %u to empty",
queue);
}
@@ -224,8 +224,8 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
break;
tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
if (unlikely(!tmbuf)) {
- PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
- " queue_id = %u\n",
+ PMD_DRV_LOG_LINE(ERR, "RX mbuf alloc failed port_id = %u"
+ " queue_id = %u",
(unsigned int)rxq->port_id,
(unsigned int)rxq->queue_id);
rte_eth_devices[
@@ -359,8 +359,8 @@ uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
if (unlikely(!tmbuf)) {
- PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
- " queue_id = %u\n",
+ PMD_DRV_LOG_LINE(ERR, "RX mbuf alloc failed port_id = %u"
+ " queue_id = %u",
(unsigned int)rxq->port_id,
(unsigned int)rxq->queue_id);
rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
@@ -677,8 +677,8 @@ static void axgbe_txq_prepare_tx_stop(struct axgbe_port *pdata,
}
if (!time_before(rte_get_timer_cycles(), tx_timeout))
- PMD_DRV_LOG(ERR,
- "timed out waiting for Tx queue %u to empty\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "timed out waiting for Tx queue %u to empty",
queue);
}
@@ -721,8 +721,8 @@ static void axgbe_prepare_tx_stop(struct axgbe_port *pdata,
}
if (!time_before(rte_get_timer_cycles(), tx_timeout))
- PMD_DRV_LOG(ERR,
- "timed out waiting for Tx DMA channel %u to stop\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "timed out waiting for Tx DMA channel %u to stop",
queue);
}
@@ -1207,12 +1207,8 @@ extern const struct rte_flow_ops bnxt_flow_meter_ops;
extern int bnxt_logtype_driver;
#define RTE_LOGTYPE_BNXT bnxt_logtype_driver
-#define PMD_DRV_LOG_RAW(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, bnxt_logtype_driver, "%s(): " fmt, \
- __func__, ## args)
-
-#define PMD_DRV_LOG(level, fmt, args...) \
- PMD_DRV_LOG_RAW(level, fmt, ## args)
+#define PMD_DRV_LOG_LINE(level, fmt, ...) \
+ RTE_LOG(level, BNXT, "%s(): " fmt "\n", __func__, ## __VA_ARGS__)
#define BNXT_LINK_SPEEDS_V2_OPTIONS(f) \
((f) & HWRM_PORT_PHY_QCFG_OUTPUT_OPTION_FLAGS_SPEEDS2_SUPPORTED)
@@ -59,7 +59,7 @@ bnxt_process_default_vnic_change(struct bnxt *bp,
if (!BNXT_TRUFLOW_EN(bp))
return;
- PMD_DRV_LOG(INFO, "Default vnic change async event received\n");
+ PMD_DRV_LOG_LINE(INFO, "Default vnic change async event received");
event_data = rte_le_to_cpu_32(async_cmp->event_data1);
vnic_state = (event_data & BNXT_DEFAULT_VNIC_STATE_MASK) >>
@@ -72,7 +72,7 @@ bnxt_process_default_vnic_change(struct bnxt *bp,
vf_fid = (event_data & BNXT_DEFAULT_VNIC_CHANGE_VF_ID_MASK) >>
BNXT_DEFAULT_VNIC_CHANGE_VF_ID_SFT;
- PMD_DRV_LOG(INFO, "async event received vf_id 0x%x\n", vf_fid);
+ PMD_DRV_LOG_LINE(INFO, "async event received vf_id 0x%x", vf_fid);
for (vf_id = 0; vf_id < BNXT_MAX_VF_REPS(bp); vf_id++) {
eth_dev = bp->rep_info[vf_id].vfr_eth_dev;
@@ -97,16 +97,16 @@ static void bnxt_handle_event_error_report(struct bnxt *bp,
{
switch (BNXT_EVENT_ERROR_REPORT_TYPE(data1)) {
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
- PMD_DRV_LOG(WARNING, "Port:%d Pause Storm detected!\n",
+ PMD_DRV_LOG_LINE(WARNING, "Port:%d Pause Storm detected!",
bp->eth_dev->data->port_id);
break;
case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
- PMD_DRV_LOG(WARNING, "Port:%d Speed change not supported with dual rate transceivers on this board",
+ PMD_DRV_LOG_LINE(WARNING, "Port:%d Speed change not supported with dual rate transceivers on this board",
bp->eth_dev->data->port_id);
break;
default:
- PMD_DRV_LOG(INFO, "FW reported unknown error type data1 %d"
- " data2: %d\n", data1, data2);
+ PMD_DRV_LOG_LINE(INFO, "FW reported unknown error type data1 %d"
+ " data2: %d", data1, data2);
break;
}
}
@@ -121,13 +121,13 @@ void bnxt_handle_vf_cfg_change(void *arg)
if (eth_dev->data->dev_started) {
rc = bnxt_dev_stop_op(eth_dev);
if (rc != 0) {
- PMD_DRV_LOG(ERR, "Failed to stop Port:%u\n", eth_dev->data->port_id);
+ PMD_DRV_LOG_LINE(ERR, "Failed to stop Port:%u", eth_dev->data->port_id);
return;
}
rc = bnxt_dev_start_op(eth_dev);
if (rc != 0)
- PMD_DRV_LOG(ERR, "Failed to start Port:%u\n", eth_dev->data->port_id);
+ PMD_DRV_LOG_LINE(ERR, "Failed to start Port:%u", eth_dev->data->port_id);
}
}
@@ -144,7 +144,7 @@ bnxt_process_vf_flr(struct bnxt *bp, uint32_t data1)
vfid = (data1 & HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK) >>
HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT;
- PMD_DRV_LOG(INFO, "VF FLR async event received pfid: %u, vfid: %u\n",
+ PMD_DRV_LOG_LINE(INFO, "VF FLR async event received pfid: %u, vfid: %u",
pfid, vfid);
}
@@ -176,17 +176,17 @@ void bnxt_handle_async_event(struct bnxt *bp,
RTE_ETH_EVENT_INTR_LSC, NULL);
break;
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
- PMD_DRV_LOG(INFO, "Async event: PF driver unloaded\n");
+ PMD_DRV_LOG_LINE(INFO, "Async event: PF driver unloaded");
break;
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
- PMD_DRV_LOG(INFO, "Port %u: VF config change async event\n", port_id);
- PMD_DRV_LOG(INFO, "event: data1 %#x data2 %#x\n", data1, data2);
+ PMD_DRV_LOG_LINE(INFO, "Port %u: VF config change async event", port_id);
+ PMD_DRV_LOG_LINE(INFO, "event: data1 %#x data2 %#x", data1, data2);
bnxt_hwrm_func_qcfg(bp, NULL);
if (BNXT_VF(bp))
rte_eal_alarm_set(1, bnxt_handle_vf_cfg_change, (void *)bp);
break;
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED:
- PMD_DRV_LOG(INFO, "Port conn async event\n");
+ PMD_DRV_LOG_LINE(INFO, "Port conn async event");
break;
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY:
/*
@@ -216,13 +216,13 @@ void bnxt_handle_async_event(struct bnxt *bp,
BNXT_MIN_FW_READY_TIMEOUT;
if ((event_data & EVENT_DATA1_REASON_CODE_MASK) ==
EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL) {
- PMD_DRV_LOG(INFO,
- "Port %u: Firmware fatal reset event received\n",
+ PMD_DRV_LOG_LINE(INFO,
+ "Port %u: Firmware fatal reset event received",
port_id);
bp->flags |= BNXT_FLAG_FATAL_ERROR;
} else {
- PMD_DRV_LOG(INFO,
- "Port %u: Firmware non-fatal reset event received\n",
+ PMD_DRV_LOG_LINE(INFO,
+ "Port %u: Firmware non-fatal reset event received",
port_id);
}
@@ -243,7 +243,7 @@ void bnxt_handle_async_event(struct bnxt *bp,
info->flags |= BNXT_FLAG_RECOVERY_ENABLED;
} else {
info->flags &= ~BNXT_FLAG_RECOVERY_ENABLED;
- PMD_DRV_LOG(INFO, "Driver recovery watchdog is disabled\n");
+ PMD_DRV_LOG_LINE(INFO, "Driver recovery watchdog is disabled");
return;
}
@@ -253,8 +253,8 @@ void bnxt_handle_async_event(struct bnxt *bp,
info->flags &= ~BNXT_FLAG_PRIMARY_FUNC;
status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG);
- PMD_DRV_LOG(INFO,
- "Port: %u Driver recovery watchdog, role: %s, FW status: 0x%x (%s)\n",
+ PMD_DRV_LOG_LINE(INFO,
+ "Port: %u Driver recovery watchdog, role: %s, FW status: 0x%x (%s)",
port_id, bnxt_is_primary_func(bp) ? "primary" : "backup", status,
(status == BNXT_FW_STATUS_HEALTHY) ? "healthy" : "unhealthy");
@@ -269,15 +269,15 @@ void bnxt_handle_async_event(struct bnxt *bp,
bnxt_schedule_fw_health_check(bp);
break;
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
- PMD_DRV_LOG(INFO, "Port: %u DNC event: data1 %#x data2 %#x\n",
+ PMD_DRV_LOG_LINE(INFO, "Port: %u DNC event: data1 %#x data2 %#x",
port_id, data1, data2);
break;
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE:
bnxt_process_default_vnic_change(bp, async_cmp);
break;
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST:
- PMD_DRV_LOG(INFO,
- "Port %u: Received fw echo request: data1 %#x data2 %#x\n",
+ PMD_DRV_LOG_LINE(INFO,
+ "Port %u: Received fw echo request: data1 %#x data2 %#x",
port_id, data1, data2);
if (bp->recovery_info)
bnxt_hwrm_fw_echo_reply(bp, data1, data2);
@@ -289,7 +289,7 @@ void bnxt_handle_async_event(struct bnxt *bp,
bnxt_process_vf_flr(bp, data1);
break;
default:
- PMD_DRV_LOG(DEBUG, "handle_async_event id = 0x%x\n", event_id);
+ PMD_DRV_LOG_LINE(DEBUG, "handle_async_event id = 0x%x", event_id);
break;
}
}
@@ -305,7 +305,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
int rc;
if (bp->pf->active_vfs <= 0) {
- PMD_DRV_LOG(ERR, "Forwarded VF with no active VFs\n");
+ PMD_DRV_LOG_LINE(ERR, "Forwarded VF with no active VFs");
return;
}
@@ -324,8 +324,8 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
if (fw_vf_id < bp->pf->first_vf_id ||
fw_vf_id >= bp->pf->first_vf_id + bp->pf->active_vfs) {
- PMD_DRV_LOG(ERR,
- "FWD req's source_id 0x%x out of range 0x%x - 0x%x (%d %d)\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "FWD req's source_id 0x%x out of range 0x%x - 0x%x (%d %d)",
fw_vf_id, bp->pf->first_vf_id,
(bp->pf->first_vf_id) + bp->pf->active_vfs - 1,
bp->pf->first_vf_id, bp->pf->active_vfs);
@@ -363,8 +363,8 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
/* Forward */
rc = bnxt_hwrm_exec_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
if (rc) {
- PMD_DRV_LOG(ERR,
- "Failed to send FWD req VF 0x%x, type 0x%x.\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to send FWD req VF 0x%x, type 0x%x.",
fw_vf_id - bp->pf->first_vf_id,
rte_le_to_cpu_16(fwd_cmd->req_type));
}
@@ -374,8 +374,8 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
reject:
rc = bnxt_hwrm_reject_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
if (rc) {
- PMD_DRV_LOG(ERR,
- "Failed to send REJECT req VF 0x%x, type 0x%x.\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to send REJECT req VF 0x%x, type 0x%x.",
fw_vf_id - bp->pf->first_vf_id,
rte_le_to_cpu_16(fwd_cmd->req_type));
}
@@ -388,7 +388,7 @@ int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp)
bool evt = 0;
if (bp == NULL || cmp == NULL) {
- PMD_DRV_LOG(ERR, "invalid NULL argument\n");
+ PMD_DRV_LOG_LINE(ERR, "invalid NULL argument");
return evt;
}
@@ -408,7 +408,7 @@ int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp)
break;
default:
/* Ignore any other events */
- PMD_DRV_LOG(DEBUG, "Ignoring %02x completion\n", CMP_TYPE(cmp));
+ PMD_DRV_LOG_LINE(DEBUG, "Ignoring %02x completion", CMP_TYPE(cmp));
break;
}
@@ -432,7 +432,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
if (rc)
goto err_out;
- PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
+ PMD_DRV_LOG_LINE(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p",
vnic_id, vnic, vnic->fw_grp_ids);
/* populate the fw group table */
@@ -451,7 +451,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
* Cap max Rx rings to same value
*/
if (bp->rx_nr_rings > BNXT_RSS_TBL_SIZE_P5) {
- PMD_DRV_LOG(ERR, "RxQ cnt %d > reta_size %d\n",
+ PMD_DRV_LOG_LINE(ERR, "RxQ cnt %d > reta_size %d",
bp->rx_nr_rings, BNXT_RSS_TBL_SIZE_P5);
goto err_out;
}
@@ -463,8 +463,8 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
break;
}
if (rc) {
- PMD_DRV_LOG(ERR,
- "HWRM vnic %d ctx %d alloc failure rc: %x\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "HWRM vnic %d ctx %d alloc failure rc: %x",
vnic_id, j, rc);
goto err_out;
}
@@ -492,15 +492,15 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
for (j = 0; j < bp->rx_num_qs_per_vnic; j++) {
rxq = bp->eth_dev->data->rx_queues[j];
- PMD_DRV_LOG(DEBUG,
- "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n",
+ PMD_DRV_LOG_LINE(DEBUG,
+ "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p",
j, rxq->vnic, rxq->vnic->fw_grp_ids);
if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start)
vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
}
- PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt);
+ PMD_DRV_LOG_LINE(DEBUG, "vnic->rx_queue_cnt = %d", vnic->rx_queue_cnt);
rc = bnxt_vnic_rss_configure(bp, vnic);
if (rc)
@@ -516,7 +516,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
return 0;
err_out:
- PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n",
+ PMD_DRV_LOG_LINE(ERR, "HWRM vnic %d cfg failure rc: %x",
vnic_id, rc);
return rc;
}
@@ -530,9 +530,9 @@ static int bnxt_register_fc_ctx_mem(struct bnxt *bp)
if (rc)
return rc;
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG_LINE(DEBUG,
"rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p"
- " rx_fc_in_tbl.ctx_id = %d\n",
+ " rx_fc_in_tbl.ctx_id = %d",
bp->flow_stat->rx_fc_in_tbl.va,
(void *)((uintptr_t)bp->flow_stat->rx_fc_in_tbl.dma),
bp->flow_stat->rx_fc_in_tbl.ctx_id);
@@ -542,9 +542,9 @@ static int bnxt_register_fc_ctx_mem(struct bnxt *bp)
if (rc)
return rc;
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG_LINE(DEBUG,
"rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p"
- " rx_fc_out_tbl.ctx_id = %d\n",
+ " rx_fc_out_tbl.ctx_id = %d",
bp->flow_stat->rx_fc_out_tbl.va,
(void *)((uintptr_t)bp->flow_stat->rx_fc_out_tbl.dma),
bp->flow_stat->rx_fc_out_tbl.ctx_id);
@@ -554,9 +554,9 @@ static int bnxt_register_fc_ctx_mem(struct bnxt *bp)
if (rc)
return rc;
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG_LINE(DEBUG,
"tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p"
- " tx_fc_in_tbl.ctx_id = %d\n",
+ " tx_fc_in_tbl.ctx_id = %d",
bp->flow_stat->tx_fc_in_tbl.va,
(void *)((uintptr_t)bp->flow_stat->tx_fc_in_tbl.dma),
bp->flow_stat->tx_fc_in_tbl.ctx_id);
@@ -566,9 +566,9 @@ static int bnxt_register_fc_ctx_mem(struct bnxt *bp)
if (rc)
return rc;
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG_LINE(DEBUG,
"tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p"
- " tx_fc_out_tbl.ctx_id = %d\n",
+ " tx_fc_out_tbl.ctx_id = %d",
bp->flow_stat->tx_fc_out_tbl.va,
(void *)((uintptr_t)bp->flow_stat->tx_fc_out_tbl.dma),
bp->flow_stat->tx_fc_out_tbl.ctx_id);
@@ -706,7 +706,7 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
rc = bnxt_get_hwrm_link_config(bp, &new);
if (rc) {
- PMD_DRV_LOG(ERR, "Failed to get link settings\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to get link settings");
return rc;
}
@@ -723,7 +723,7 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
(BNXT_LINK_SPEEDS_V2(bp) && dev_conf->link_speeds != curr_speed_bit)) {
rc = bnxt_set_hwrm_link_config(bp, true);
if (rc) {
- PMD_DRV_LOG(ERR, "Failed to update PHY settings\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to update PHY settings");
return rc;
}
}
@@ -820,19 +820,19 @@ static int bnxt_start_nic(struct bnxt *bp)
rc = bnxt_vnic_queue_db_init(bp);
if (rc) {
- PMD_DRV_LOG(ERR, "could not allocate vnic db\n");
+ PMD_DRV_LOG_LINE(ERR, "could not allocate vnic db");
goto err_out;
}
rc = bnxt_alloc_hwrm_rings(bp);
if (rc) {
- PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc);
+ PMD_DRV_LOG_LINE(ERR, "HWRM ring alloc failure rc: %x", rc);
goto err_out;
}
rc = bnxt_alloc_all_hwrm_ring_grps(bp);
if (rc) {
- PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc);
+ PMD_DRV_LOG_LINE(ERR, "HWRM ring grp alloc failure: %x", rc);
goto err_out;
}
@@ -844,8 +844,8 @@ static int bnxt_start_nic(struct bnxt *bp)
struct bnxt_vnic_info *vnic = &bp->vnic_info[j++];
if (!vnic) {
- PMD_DRV_LOG(ERR,
- "Num pools more than FW profile\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Num pools more than FW profile");
rc = -EINVAL;
goto err_out;
}
@@ -857,7 +857,7 @@ static int bnxt_start_nic(struct bnxt *bp)
skip_cosq_cfg:
rc = bnxt_mq_rx_configure(bp);
if (rc) {
- PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc);
+ PMD_DRV_LOG_LINE(ERR, "MQ mode configure failure rc: %x", rc);
goto err_out;
}
@@ -893,8 +893,8 @@ static int bnxt_start_nic(struct bnxt *bp)
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
if (rc) {
- PMD_DRV_LOG(ERR,
- "HWRM cfa l2 rx mask failure rc: %x\n", rc);
+ PMD_DRV_LOG_LINE(ERR,
+ "HWRM cfa l2 rx mask failure rc: %x", rc);
goto err_out;
}
@@ -903,9 +903,9 @@ static int bnxt_start_nic(struct bnxt *bp)
!RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
intr_vector = bp->eth_dev->data->nb_rx_queues;
- PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector);
+ PMD_DRV_LOG_LINE(DEBUG, "intr_vector = %d", intr_vector);
if (intr_vector > bp->rx_cp_nr_rings) {
- PMD_DRV_LOG(ERR, "At most %d intr queues supported",
+ PMD_DRV_LOG_LINE(ERR, "At most %d intr queues supported",
bp->rx_cp_nr_rings);
return -ENOTSUP;
}
@@ -917,13 +917,13 @@ static int bnxt_start_nic(struct bnxt *bp)
if (rte_intr_dp_is_en(intr_handle)) {
if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
bp->eth_dev->data->nb_rx_queues)) {
- PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues"
+ PMD_DRV_LOG_LINE(ERR, "Failed to allocate %d rx_queues"
" intr_vec", bp->eth_dev->data->nb_rx_queues);
rc = -ENOMEM;
goto err_out;
}
- PMD_DRV_LOG(DEBUG, "intr_handle->nb_efd = %d "
- "intr_handle->max_intr = %d\n",
+ PMD_DRV_LOG_LINE(DEBUG, "intr_handle->nb_efd = %d "
+ "intr_handle->max_intr = %d",
rte_intr_nb_efd_get(intr_handle),
rte_intr_max_intr_get(intr_handle));
for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues;
@@ -950,7 +950,7 @@ static int bnxt_start_nic(struct bnxt *bp)
bp->mark_table = rte_zmalloc("bnxt_mark_table", BNXT_MARK_TABLE_SZ, 0);
if (!bp->mark_table)
- PMD_DRV_LOG(ERR, "Allocation of mark table failed\n");
+ PMD_DRV_LOG_LINE(ERR, "Allocation of mark table failed");
return 0;
@@ -1239,7 +1239,7 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) {
rc = bnxt_hwrm_check_vf_rings(bp);
if (rc) {
- PMD_DRV_LOG(ERR, "HWRM insufficient resources\n");
+ PMD_DRV_LOG_LINE(ERR, "HWRM insufficient resources");
return -ENOSPC;
}
@@ -1258,7 +1258,7 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
rc = bnxt_hwrm_func_reserve_vf_resc(bp, false);
if (rc) {
- PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc);
+ PMD_DRV_LOG_LINE(ERR, "HWRM resource alloc fail:%x", rc);
pthread_mutex_unlock(&bp->def_cp_lock);
return -ENOSPC;
}
@@ -1302,7 +1302,7 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
/* application provides the hash key to program */
if (rss_conf->rss_key != NULL) {
if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE)
- PMD_DRV_LOG(WARNING, "port %u RSS key len must be %d bytes long",
+ PMD_DRV_LOG_LINE(WARNING, "port %u RSS key len must be %d bytes long",
eth_dev->data->port_id, HW_HASH_KEY_SIZE);
else
memcpy(bp->rss_conf.rss_key, rss_conf->rss_key, HW_HASH_KEY_SIZE);
@@ -1315,14 +1315,14 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
return 0;
resource_error:
- PMD_DRV_LOG(ERR,
- "Insufficient resources to support requested config\n");
- PMD_DRV_LOG(ERR,
- "Num Queues Requested: Tx %d, Rx %d\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Insufficient resources to support requested config");
+ PMD_DRV_LOG_LINE(ERR,
+ "Num Queues Requested: Tx %d, Rx %d",
eth_dev->data->nb_tx_queues,
eth_dev->data->nb_rx_queues);
- PMD_DRV_LOG(ERR,
- "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d",
bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics);
return -ENOSPC;
@@ -1333,13 +1333,13 @@ void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
struct rte_eth_link *link = ð_dev->data->dev_link;
if (link->link_status)
- PMD_DRV_LOG(DEBUG, "Port %d Link Up - speed %u Mbps - %s\n",
+ PMD_DRV_LOG_LINE(DEBUG, "Port %d Link Up - speed %u Mbps - %s",
eth_dev->data->port_id,
(uint32_t)link->link_speed,
(link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
- ("full-duplex") : ("half-duplex\n"));
+ ("full-duplex") : ("half-duplex"));
else
- PMD_DRV_LOG(INFO, "Port %d Link Down\n",
+ PMD_DRV_LOG_LINE(INFO, "Port %d Link Down",
eth_dev->data->port_id);
}
@@ -1411,8 +1411,8 @@ bnxt_receive_function(struct rte_eth_dev *eth_dev)
#if defined(RTE_ARCH_X86)
if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 &&
rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) {
- PMD_DRV_LOG(INFO,
- "Using AVX2 vector mode receive for port %d\n",
+ PMD_DRV_LOG_LINE(INFO,
+ "Using AVX2 vector mode receive for port %d",
eth_dev->data->port_id);
bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE;
if (bnxt_compressed_rx_cqe_mode_enabled(bp))
@@ -1421,8 +1421,8 @@ bnxt_receive_function(struct rte_eth_dev *eth_dev)
}
#endif
if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
- PMD_DRV_LOG(INFO,
- "Using SSE vector mode receive for port %d\n",
+ PMD_DRV_LOG_LINE(INFO,
+ "Using SSE vector mode receive for port %d",
eth_dev->data->port_id);
bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE;
if (bnxt_compressed_rx_cqe_mode_enabled(bp)) {
@@ -1436,10 +1436,10 @@ bnxt_receive_function(struct rte_eth_dev *eth_dev)
}
use_scalar_rx:
- PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n",
+ PMD_DRV_LOG_LINE(INFO, "Vector mode receive disabled for port %d",
eth_dev->data->port_id);
- PMD_DRV_LOG(INFO,
- "Port %d scatter: %d rx offload: %" PRIX64 "\n",
+ PMD_DRV_LOG_LINE(INFO,
+ "Port %d scatter: %d rx offload: %" PRIX64,
eth_dev->data->port_id,
eth_dev->data->scattered_rx,
eth_dev->data->dev_conf.rxmode.offloads);
@@ -1468,24 +1468,24 @@ bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev)
#if defined(RTE_ARCH_X86)
if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 &&
rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) {
- PMD_DRV_LOG(INFO,
- "Using AVX2 vector mode transmit for port %d\n",
+ PMD_DRV_LOG_LINE(INFO,
+ "Using AVX2 vector mode transmit for port %d",
eth_dev->data->port_id);
return bnxt_xmit_pkts_vec_avx2;
}
#endif
if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
- PMD_DRV_LOG(INFO,
- "Using SSE vector mode transmit for port %d\n",
+ PMD_DRV_LOG_LINE(INFO,
+ "Using SSE vector mode transmit for port %d",
eth_dev->data->port_id);
return bnxt_xmit_pkts_vec;
}
use_scalar_tx:
- PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n",
+ PMD_DRV_LOG_LINE(INFO, "Vector mode transmit disabled for port %d",
eth_dev->data->port_id);
- PMD_DRV_LOG(INFO,
- "Port %d scatter: %d tx offload: %" PRIX64 "\n",
+ PMD_DRV_LOG_LINE(INFO,
+ "Port %d scatter: %d tx offload: %" PRIX64,
eth_dev->data->port_id,
eth_dev->data->scattered_rx,
offloads);
@@ -1557,7 +1557,7 @@ static void bnxt_free_switch_domain(struct bnxt *bp)
rc = rte_eth_switch_domain_free(bp->switch_domain_id);
if (rc)
- PMD_DRV_LOG(ERR, "free switch domain:%d fail: %d\n",
+ PMD_DRV_LOG_LINE(ERR, "free switch domain:%d fail: %d",
bp->switch_domain_id, rc);
}
@@ -1581,7 +1581,7 @@ static void bnxt_ptp_get_current_time(void *arg)
rte_spinlock_unlock(&ptp->ptp_lock);
rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp);
if (rc != 0) {
- PMD_DRV_LOG(ERR, "Failed to re-schedule PTP alarm\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to re-schedule PTP alarm");
bp->flags2 &= ~BNXT_FLAGS2_PTP_ALARM_SCHEDULED;
}
}
@@ -1625,7 +1625,7 @@ static int bnxt_ptp_start(struct bnxt *bp)
rc = bnxt_schedule_ptp_alarm(bp);
if (rc != 0) {
- PMD_DRV_LOG(ERR, "Failed to schedule PTP alarm\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to schedule PTP alarm");
} else {
bp->flags2 |= BNXT_FLAGS2_PTP_TIMESYNC_ENABLED;
bp->flags2 |= BNXT_FLAGS2_PTP_ALARM_SCHEDULED;
@@ -1717,8 +1717,8 @@ int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
pthread_mutex_lock(&bp->err_recovery_lock);
if (bp->flags & BNXT_FLAG_FW_RESET) {
- PMD_DRV_LOG(ERR,
- "Adapter recovering from error..Please retry\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Adapter recovering from error..Please retry");
pthread_mutex_unlock(&bp->err_recovery_lock);
return -EAGAIN;
}
@@ -1735,8 +1735,8 @@ int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT;
if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS)
- PMD_DRV_LOG(ERR,
- "RxQ cnt %d > RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "RxQ cnt %d > RTE_ETHDEV_QUEUE_STAT_CNTRS %d",
bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
do {
@@ -1844,8 +1844,8 @@ static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
pthread_mutex_lock(&bp->err_recovery_lock);
if (bp->flags & BNXT_FLAG_FW_RESET) {
- PMD_DRV_LOG(ERR,
- "Adapter recovering from error...Please retry\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Adapter recovering from error...Please retry");
pthread_mutex_unlock(&bp->err_recovery_lock);
return -EAGAIN;
}
@@ -1912,8 +1912,8 @@ static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic,
/* Attach requested MAC address to the new l2_filter */
STAILQ_FOREACH(filter, &vnic->filter, next) {
if (filter->mac_index == index) {
- PMD_DRV_LOG(DEBUG,
- "MAC addr already existed for pool %d\n",
+ PMD_DRV_LOG_LINE(DEBUG,
+ "MAC addr already existed for pool %d",
pool);
return 0;
}
@@ -1921,7 +1921,7 @@ static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic,
filter = bnxt_alloc_filter(bp);
if (!filter) {
- PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
+ PMD_DRV_LOG_LINE(ERR, "L2 filter alloc failed");
return -ENODEV;
}
@@ -1960,12 +1960,12 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
return rc;
if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
- PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
+ PMD_DRV_LOG_LINE(ERR, "Cannot add MAC address to a VF interface");
return -ENOTSUP;
}
if (!vnic) {
- PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool);
+ PMD_DRV_LOG_LINE(ERR, "VNIC not found for pool %d!", pool);
return -EINVAL;
}
@@ -2009,8 +2009,8 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
if (rc) {
new.link_speed = RTE_ETH_LINK_SPEED_100M;
new.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
- PMD_DRV_LOG(ERR,
- "Failed to retrieve link rc = 0x%x!\n", rc);
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to retrieve link rc = 0x%x!", rc);
goto out;
}
@@ -2200,14 +2200,14 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
return -EINVAL;
if (reta_size != tbl_size) {
- PMD_DRV_LOG(ERR, "The configured hash table lookup size "
+ PMD_DRV_LOG_LINE(ERR, "The configured hash table lookup size "
"(%d) must equal the size supported by the hardware "
- "(%d)\n", reta_size, tbl_size);
+ "(%d)", reta_size, tbl_size);
return -EINVAL;
}
if (bnxt_vnic_reta_config_update(bp, vnic, reta_conf, reta_size)) {
- PMD_DRV_LOG(ERR, "Error in setting the reta config\n");
+ PMD_DRV_LOG_LINE(ERR, "Error in setting the reta config");
return -EINVAL;
}
for (i = 0; i < reta_size; i++) {
@@ -2254,9 +2254,9 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
return -EINVAL;
if (reta_size != tbl_size) {
- PMD_DRV_LOG(ERR, "The configured hash table lookup size "
+ PMD_DRV_LOG_LINE(ERR, "The configured hash table lookup size "
"(%d) must equal the size supported by the hardware "
- "(%d)\n", reta_size, tbl_size);
+ "(%d)", reta_size, tbl_size);
return -EINVAL;
}
@@ -2274,7 +2274,7 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]);
if (qid == INVALID_HW_RING_ID) {
- PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n");
+ PMD_DRV_LOG_LINE(ERR, "Inv. entry in rss table.");
return -EINVAL;
}
reta_conf[idx].reta[sft] = qid;
@@ -2302,7 +2302,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
*/
if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
if (!rss_conf->rss_hf)
- PMD_DRV_LOG(ERR, "Hash type NONE\n");
+ PMD_DRV_LOG_LINE(ERR, "Hash type NONE");
} else {
if (rss_conf->rss_hf & bnxt_eth_rss_support(bp))
return -EINVAL;
@@ -2332,8 +2332,8 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
goto rss_config;
if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) {
- PMD_DRV_LOG(ERR,
- "Invalid hashkey length, should be %d bytes\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Invalid hashkey length, should be %d bytes",
HW_HASH_KEY_SIZE);
return -EINVAL;
}
@@ -2421,8 +2421,8 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
return rc;
if (!BNXT_SINGLE_PF(bp)) {
- PMD_DRV_LOG(ERR,
- "Flow Control Settings cannot be modified on VF or on shared PF\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Flow Control Settings cannot be modified on VF or on shared PF");
return -ENOTSUP;
}
@@ -2486,10 +2486,10 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
switch (udp_tunnel->prot_type) {
case RTE_ETH_TUNNEL_TYPE_VXLAN:
if (bp->vxlan_port_cnt) {
- PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
+ PMD_DRV_LOG_LINE(ERR, "Tunnel Port %d already programmed",
udp_tunnel->udp_port);
if (bp->vxlan_port != udp_tunnel->udp_port) {
- PMD_DRV_LOG(ERR, "Only one port allowed\n");
+ PMD_DRV_LOG_LINE(ERR, "Only one port allowed");
return -ENOSPC;
}
bp->vxlan_port_cnt++;
@@ -2500,10 +2500,10 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
break;
case RTE_ETH_TUNNEL_TYPE_GENEVE:
if (bp->geneve_port_cnt) {
- PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
+ PMD_DRV_LOG_LINE(ERR, "Tunnel Port %d already programmed",
udp_tunnel->udp_port);
if (bp->geneve_port != udp_tunnel->udp_port) {
- PMD_DRV_LOG(ERR, "Only one port allowed\n");
+ PMD_DRV_LOG_LINE(ERR, "Only one port allowed");
return -ENOSPC;
}
bp->geneve_port_cnt++;
@@ -2514,10 +2514,10 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
break;
case RTE_ETH_TUNNEL_TYPE_ECPRI:
if (bp->ecpri_port_cnt) {
- PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
+ PMD_DRV_LOG_LINE(ERR, "Tunnel Port %d already programmed",
udp_tunnel->udp_port);
if (bp->ecpri_port != udp_tunnel->udp_port) {
- PMD_DRV_LOG(ERR, "Only one port allowed\n");
+ PMD_DRV_LOG_LINE(ERR, "Only one port allowed");
return -ENOSPC;
}
bp->ecpri_port_cnt++;
@@ -2527,7 +2527,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ECPRI;
break;
default:
- PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
+ PMD_DRV_LOG_LINE(ERR, "Tunnel type is not supported");
return -ENOTSUP;
}
rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
@@ -2567,11 +2567,11 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
switch (udp_tunnel->prot_type) {
case RTE_ETH_TUNNEL_TYPE_VXLAN:
if (!bp->vxlan_port_cnt) {
- PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
+ PMD_DRV_LOG_LINE(ERR, "No Tunnel port configured yet");
return -EINVAL;
}
if (bp->vxlan_port != udp_tunnel->udp_port) {
- PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
+ PMD_DRV_LOG_LINE(ERR, "Req Port: %d. Configured port: %d",
udp_tunnel->udp_port, bp->vxlan_port);
return -EINVAL;
}
@@ -2584,11 +2584,11 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
break;
case RTE_ETH_TUNNEL_TYPE_GENEVE:
if (!bp->geneve_port_cnt) {
- PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
+ PMD_DRV_LOG_LINE(ERR, "No Tunnel port configured yet");
return -EINVAL;
}
if (bp->geneve_port != udp_tunnel->udp_port) {
- PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
+ PMD_DRV_LOG_LINE(ERR, "Req Port: %d. Configured port: %d",
udp_tunnel->udp_port, bp->geneve_port);
return -EINVAL;
}
@@ -2601,11 +2601,11 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
break;
case RTE_ETH_TUNNEL_TYPE_ECPRI:
if (!bp->ecpri_port_cnt) {
- PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
+ PMD_DRV_LOG_LINE(ERR, "No Tunnel port configured yet");
return -EINVAL;
}
if (bp->ecpri_port != udp_tunnel->udp_port) {
- PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
+ PMD_DRV_LOG_LINE(ERR, "Req Port: %d. Configured port: %d",
udp_tunnel->udp_port, bp->ecpri_port);
return -EINVAL;
}
@@ -2617,7 +2617,7 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
port = bp->ecpri_fw_dst_port_id;
break;
default:
- PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
+ PMD_DRV_LOG_LINE(ERR, "Tunnel type is not supported");
return -ENOTSUP;
}
@@ -2644,8 +2644,8 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
STAILQ_REMOVE(&vnic->filter, filter,
bnxt_filter_info, next);
bnxt_free_filter(bp, filter);
- PMD_DRV_LOG(INFO,
- "Deleted vlan filter for %d\n",
+ PMD_DRV_LOG_LINE(INFO,
+ "Deleted vlan filter for %d",
vlan_id);
return 0;
}
@@ -2686,8 +2686,8 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
*/
filter = bnxt_alloc_filter(bp);
if (!filter) {
- PMD_DRV_LOG(ERR,
- "MAC/VLAN filter alloc failed\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "MAC/VLAN filter alloc failed");
return -ENOMEM;
}
/* MAC + VLAN ID filter */
@@ -2718,8 +2718,8 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
else
STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
- PMD_DRV_LOG(INFO,
- "Added Vlan filter for %d\n", vlan_id);
+ PMD_DRV_LOG_LINE(INFO,
+ "Added Vlan filter for %d", vlan_id);
return rc;
}
@@ -2734,7 +2734,7 @@ static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
return rc;
if (!eth_dev->data->dev_started) {
- PMD_DRV_LOG(ERR, "port must be started before setting vlan\n");
+ PMD_DRV_LOG_LINE(ERR, "port must be started before setting vlan");
return -EINVAL;
}
@@ -2796,7 +2796,7 @@ bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
/* This filter will allow only untagged packets */
bnxt_add_vlan_filter(bp, 0);
}
- PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
+ PMD_DRV_LOG_LINE(DEBUG, "VLAN Filtering: %d",
!!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER));
return 0;
@@ -2868,7 +2868,7 @@ bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
if (rc)
return rc;
- PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
+ PMD_DRV_LOG_LINE(DEBUG, "VLAN Strip Offload: %d",
!!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP));
return rc;
@@ -2905,9 +2905,9 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
- PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Extend VLAN supported");
else
- PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n");
+ PMD_DRV_LOG_LINE(INFO, "Extend VLAN unsupported");
}
return 0;
@@ -2923,14 +2923,14 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
if (vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
vlan_type != RTE_ETH_VLAN_TYPE_OUTER) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG_LINE(ERR,
"Unsupported vlan type.");
return -EINVAL;
}
if (!qinq) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG_LINE(ERR,
"QinQ not enabled. Needs to be ON as we can "
- "accelerate only outer vlan\n");
+ "accelerate only outer vlan");
return -EINVAL;
}
@@ -2957,14 +2957,14 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300;
break;
default:
- PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid);
+ PMD_DRV_LOG_LINE(ERR, "Invalid TPID: %x", tpid);
return -EINVAL;
}
bp->outer_tpid_bd |= tpid;
- PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd);
+ PMD_DRV_LOG_LINE(INFO, "outer_tpid_bd = %x", bp->outer_tpid_bd);
} else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) {
- PMD_DRV_LOG(ERR,
- "Can accelerate only outer vlan in QinQ\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Can accelerate only outer vlan in QinQ");
return -EINVAL;
}
@@ -3009,7 +3009,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,
rc = bnxt_add_mac_filter(bp, vnic, addr, 0, 0);
}
- PMD_DRV_LOG(DEBUG, "Set MAC addr\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Set MAC addr");
return rc;
}
@@ -3032,16 +3032,16 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
bp->nb_mc_addr = nb_mc_addr;
if (nb_mc_addr > BNXT_MAX_MC_ADDRS) {
- PMD_DRV_LOG(INFO, "Number of Mcast MACs added (%u) exceeded Max supported (%u)\n",
+ PMD_DRV_LOG_LINE(INFO, "Number of Mcast MACs added (%u) exceeded Max supported (%u)",
nb_mc_addr, BNXT_MAX_MC_ADDRS);
- PMD_DRV_LOG(INFO, "Turning on Mcast promiscuous mode\n");
+ PMD_DRV_LOG_LINE(INFO, "Turning on Mcast promiscuous mode");
vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
goto allmulti;
}
/* TODO Check for Duplicate mcast addresses */
if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) {
- PMD_DRV_LOG(INFO, "Turning off Mcast promiscuous mode\n");
+ PMD_DRV_LOG_LINE(INFO, "Turning off Mcast promiscuous mode");
vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
}
for (i = 0; i < nb_mc_addr; i++)
@@ -3213,7 +3213,7 @@ int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
/* Return if port is active */
if (eth_dev->data->dev_started) {
- PMD_DRV_LOG(ERR, "Stop port before changing MTU\n");
+ PMD_DRV_LOG_LINE(ERR, "Stop port before changing MTU");
return -EBUSY;
}
@@ -3232,14 +3232,14 @@ int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
rc = bnxt_vnic_mru_config(bp, new_mtu);
if (rc) {
- PMD_DRV_LOG(ERR, "failed to update mtu in vnic context\n");
+ PMD_DRV_LOG_LINE(ERR, "failed to update mtu in vnic context");
return rc;
}
if (bnxt_hwrm_config_host_mtu(bp))
- PMD_DRV_LOG(WARNING, "Failed to configure host MTU\n");
+ PMD_DRV_LOG_LINE(WARNING, "Failed to configure host MTU");
- PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu);
+ PMD_DRV_LOG_LINE(INFO, "New MTU is %d", new_mtu);
return rc;
}
@@ -3256,7 +3256,7 @@ bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
return rc;
if (!BNXT_SINGLE_PF(bp)) {
- PMD_DRV_LOG(ERR, "PVID cannot be modified on VF or on shared PF\n");
+ PMD_DRV_LOG_LINE(ERR, "PVID cannot be modified on VF or on shared PF");
return -ENOTSUP;
}
bp->vlan = on ? pvid : 0;
@@ -3532,7 +3532,7 @@ bnxt_flow_ops_get_op(struct rte_eth_dev *dev,
bp = vfr->parent_dev->data->dev_private;
/* parent is deleted while children are still valid */
if (!bp) {
- PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR Error\n",
+ PMD_DRV_LOG_LINE(DEBUG, "BNXT Port:%d VFR Error",
dev->data->port_id);
return -EIO;
}
@@ -3910,7 +3910,7 @@ bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
if (rc)
return rc;
- PMD_DRV_LOG(INFO, PCI_PRI_FMT "\n",
+ PMD_DRV_LOG_LINE(INFO, PCI_PRI_FMT,
bp->pdev->addr.domain, bp->pdev->addr.bus,
bp->pdev->addr.devid, bp->pdev->addr.function);
@@ -3934,7 +3934,7 @@ bnxt_get_eeprom_op(struct rte_eth_dev *dev,
if (rc)
return rc;
- PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n",
+ PMD_DRV_LOG_LINE(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d",
bp->pdev->addr.domain, bp->pdev->addr.bus,
bp->pdev->addr.devid, bp->pdev->addr.function,
in_eeprom->offset, in_eeprom->length);
@@ -4010,13 +4010,13 @@ bnxt_set_eeprom_op(struct rte_eth_dev *dev,
if (rc)
return rc;
- PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n",
+ PMD_DRV_LOG_LINE(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d",
bp->pdev->addr.domain, bp->pdev->addr.bus,
bp->pdev->addr.devid, bp->pdev->addr.function,
in_eeprom->offset, in_eeprom->length);
if (!BNXT_PF(bp)) {
- PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n");
+ PMD_DRV_LOG_LINE(ERR, "NVM write not supported from a VF");
return -EINVAL;
}
@@ -4061,14 +4061,14 @@ static int bnxt_get_module_info(struct rte_eth_dev *dev,
*/
if (bp->link_info->module_status >
HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG) {
- PMD_DRV_LOG(NOTICE, "Port %u : Module is not inserted or is powered down\n",
+ PMD_DRV_LOG_LINE(NOTICE, "Port %u : Module is not inserted or is powered down",
dev->data->port_id);
return -ENOTSUP;
}
/* This feature is not supported in older firmware versions */
if (bp->hwrm_spec_code < 0x10202) {
- PMD_DRV_LOG(NOTICE, "Port %u : Feature is not supported in older firmware\n",
+ PMD_DRV_LOG_LINE(NOTICE, "Port %u : Feature is not supported in older firmware",
dev->data->port_id);
return -ENOTSUP;
}
@@ -4099,11 +4099,11 @@ static int bnxt_get_module_info(struct rte_eth_dev *dev,
modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_LEN;
break;
default:
- PMD_DRV_LOG(NOTICE, "Port %u : Unsupported module\n", dev->data->port_id);
+ PMD_DRV_LOG_LINE(NOTICE, "Port %u : Unsupported module", dev->data->port_id);
return -ENOTSUP;
}
- PMD_DRV_LOG(INFO, "Port %u : modinfo->type = %d modinfo->eeprom_len = %d\n",
+ PMD_DRV_LOG_LINE(INFO, "Port %u : modinfo->type = %d modinfo->eeprom_len = %d",
dev->data->port_id, modinfo->type, modinfo->eeprom_len);
return 0;
@@ -4327,7 +4327,7 @@ static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index)
case BNXT_FW_STATUS_REG_TYPE_CFG:
ret = rte_pci_write_config(bp->pdev, &val, sizeof(val), offset);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "Failed to write %#x at PCI offset %#x",
+ PMD_DRV_LOG_LINE(ERR, "Failed to write %#x at PCI offset %#x",
val, offset);
return;
}
@@ -4365,7 +4365,8 @@ bnxt_check_fw_reset_done(struct bnxt *bp)
do {
rc = rte_pci_read_config(bp->pdev, &val, sizeof(val), PCI_SUBSYSTEM_ID_OFFSET);
if (rc < 0) {
- PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_SUBSYSTEM_ID_OFFSET);
+ PMD_DRV_LOG_LINE(ERR, "Failed to read PCI offset 0x%x",
+ PCI_SUBSYSTEM_ID_OFFSET);
return rc;
}
if (val != 0xffff)
@@ -4374,7 +4375,7 @@ bnxt_check_fw_reset_done(struct bnxt *bp)
} while (timeout--);
if (val == 0xffff) {
- PMD_DRV_LOG(ERR, "Firmware reset aborted, PCI config space invalid\n");
+ PMD_DRV_LOG_LINE(ERR, "Firmware reset aborted, PCI config space invalid");
return -1;
}
@@ -4453,7 +4454,7 @@ static int bnxt_restore_mcast_mac_filters(struct bnxt *bp)
ret = bnxt_dev_set_mc_addr_list_op(bp->eth_dev, bp->mcast_addr_list,
bp->nb_mc_addr);
if (ret)
- PMD_DRV_LOG(ERR, "Failed to restore multicast MAC addreeses\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to restore multicast MAC addreeses");
return ret;
}
@@ -4504,7 +4505,7 @@ static int bnxt_check_fw_ready(struct bnxt *bp)
} while (rc && timeout > 0);
if (rc)
- PMD_DRV_LOG(ERR, "FW is not Ready after reset\n");
+ PMD_DRV_LOG_LINE(ERR, "FW is not Ready after reset");
return rc;
}
@@ -4524,7 +4525,7 @@ static void bnxt_dev_recover(void *arg)
/* Clear Error flag so that device re-init should happen */
bp->flags &= ~BNXT_FLAG_FATAL_ERROR;
- PMD_DRV_LOG(INFO, "Port: %u Starting recovery...\n",
+ PMD_DRV_LOG_LINE(INFO, "Port: %u Starting recovery...",
bp->eth_dev->data->port_id);
rc = bnxt_check_fw_ready(bp);
@@ -4533,8 +4534,8 @@ static void bnxt_dev_recover(void *arg)
rc = bnxt_init_resources(bp, true);
if (rc) {
- PMD_DRV_LOG(ERR,
- "Failed to initialize resources after reset\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to initialize resources after reset");
goto err;
}
/* clear reset flag as the device is initialized now */
@@ -4542,7 +4543,7 @@ static void bnxt_dev_recover(void *arg)
rc = bnxt_dev_start_op(bp->eth_dev);
if (rc) {
- PMD_DRV_LOG(ERR, "Failed to start port after reset\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to start port after reset");
goto err_start;
}
@@ -4556,7 +4557,7 @@ static void bnxt_dev_recover(void *arg)
bp->eth_dev->tx_pkt_burst;
rte_mb();
- PMD_DRV_LOG(INFO, "Port: %u Recovered from FW reset\n",
+ PMD_DRV_LOG_LINE(INFO, "Port: %u Recovered from FW reset",
bp->eth_dev->data->port_id);
pthread_mutex_unlock(&bp->err_recovery_lock);
rte_eth_dev_callback_process(bp->eth_dev,
@@ -4576,7 +4577,7 @@ static void bnxt_dev_recover(void *arg)
RTE_ETH_EVENT_INTR_RMV,
NULL);
pthread_mutex_unlock(&bp->err_recovery_lock);
- PMD_DRV_LOG(ERR, "Port %u: Failed to recover from FW reset\n",
+ PMD_DRV_LOG_LINE(ERR, "Port %u: Failed to recover from FW reset",
bp->eth_dev->data->port_id);
}
@@ -4588,7 +4589,7 @@ void bnxt_dev_reset_and_resume(void *arg)
int rc;
bnxt_dev_cleanup(bp);
- PMD_DRV_LOG(INFO, "Port: %u Finished bnxt_dev_cleanup\n",
+ PMD_DRV_LOG_LINE(INFO, "Port: %u Finished bnxt_dev_cleanup",
bp->eth_dev->data->port_id);
bnxt_wait_for_device_shutdown(bp);
@@ -4602,7 +4603,8 @@ void bnxt_dev_reset_and_resume(void *arg)
if (bp->flags & BNXT_FLAG_FATAL_ERROR) {
rc = rte_pci_read_config(bp->pdev, &val, sizeof(val), PCI_SUBSYSTEM_ID_OFFSET);
if (rc < 0) {
- PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_SUBSYSTEM_ID_OFFSET);
+ PMD_DRV_LOG_LINE(ERR, "Failed to read PCI offset 0x%x",
+ PCI_SUBSYSTEM_ID_OFFSET);
return;
}
if (val == 0xffff) {
@@ -4613,7 +4615,7 @@ void bnxt_dev_reset_and_resume(void *arg)
rc = rte_eal_alarm_set(us, bnxt_dev_recover, (void *)bp);
if (rc)
- PMD_DRV_LOG(ERR, "Port %u: Error setting recovery alarm",
+ PMD_DRV_LOG_LINE(ERR, "Port %u: Error setting recovery alarm",
bp->eth_dev->data->port_id);
}
@@ -4631,7 +4633,7 @@ uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index)
case BNXT_FW_STATUS_REG_TYPE_CFG:
ret = rte_pci_read_config(bp->pdev, &val, sizeof(val), offset);
if (ret < 0)
- PMD_DRV_LOG(ERR, "Failed to read PCI offset %#x",
+ PMD_DRV_LOG_LINE(ERR, "Failed to read PCI offset %#x",
offset);
break;
case BNXT_FW_STATUS_REG_TYPE_GRC:
@@ -4662,7 +4664,7 @@ static int bnxt_fw_reset_all(struct bnxt *bp)
/* Reset with the help of Kong processor */
rc = bnxt_hwrm_fw_reset(bp);
if (rc)
- PMD_DRV_LOG(ERR, "Failed to reset FW\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to reset FW");
}
return rc;
@@ -4679,7 +4681,7 @@ static void bnxt_fw_reset_cb(void *arg)
bnxt_is_recovery_enabled(bp)) {
rc = bnxt_fw_reset_all(bp);
if (rc) {
- PMD_DRV_LOG(ERR, "Adapter recovery failed\n");
+ PMD_DRV_LOG_LINE(ERR, "Adapter recovery failed");
return;
}
}
@@ -4740,7 +4742,7 @@ static void bnxt_check_fw_health(void *arg)
bnxt_stop_rxtx(bp->eth_dev);
- PMD_DRV_LOG(ERR, "Detected FW dead condition\n");
+ PMD_DRV_LOG_LINE(ERR, "Detected FW dead condition");
rte_eth_dev_callback_process(bp->eth_dev,
RTE_ETH_EVENT_ERR_RECOVERING,
@@ -4871,7 +4873,7 @@ static int bnxt_map_pci_bars(struct rte_eth_dev *eth_dev)
bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr;
if (!bp->bar0 || !bp->doorbell_base) {
- PMD_DRV_LOG(ERR, "Unable to access Hardware\n");
+ PMD_DRV_LOG_LINE(ERR, "Unable to access Hardware");
return -ENODEV;
}
@@ -5134,8 +5136,8 @@ int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp)
ctxm->max_entries);
ctx_pg[i].entries = entries;
mem_size = ctxm->entry_size * entries;
- PMD_DRV_LOG(DEBUG,
- "Type:0x%x instance:%d entries:%d size:%d\n",
+ PMD_DRV_LOG_LINE(DEBUG,
+ "Type:0x%x instance:%d entries:%d size:%d",
ctxm->type, i, ctx_pg[i].entries, mem_size);
rc = bnxt_alloc_ctx_mem_blk(bp, &ctx_pg[i],
ctxm->init_value ? ctxm : NULL,
@@ -5186,7 +5188,7 @@ int bnxt_alloc_ctx_mem(struct bnxt *bp)
rc = bnxt_hwrm_func_backing_store_qcaps(bp);
if (rc) {
- PMD_DRV_LOG(ERR, "Query context mem capability failed\n");
+ PMD_DRV_LOG_LINE(ERR, "Query context mem capability failed");
return rc;
}
@@ -5270,8 +5272,8 @@ int bnxt_alloc_ctx_mem(struct bnxt *bp)
rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
done:
if (rc)
- PMD_DRV_LOG(ERR,
- "Failed to configure context mem: rc = %d\n", rc);
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to configure context mem: rc = %d", rc);
else
ctx->flags |= BNXT_CTX_FLAG_INITED;
@@ -5375,14 +5377,14 @@ static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev)
int rc = 0;
if (bp->max_l2_ctx > RTE_ETH_NUM_RECEIVE_MAC_ADDR)
- PMD_DRV_LOG(INFO, "Max number of MAC addrs supported is %d, but will be limited to %d\n",
+ PMD_DRV_LOG_LINE(INFO, "Max number of MAC addrs supported is %d, but will be limited to %d",
bp->max_l2_ctx, RTE_ETH_NUM_RECEIVE_MAC_ADDR);
eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
RTE_ETHER_ADDR_LEN * max_mac_addr,
0);
if (eth_dev->data->mac_addrs == NULL) {
- PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to alloc MAC addr tbl");
return -ENOMEM;
}
@@ -5391,10 +5393,10 @@ static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev)
return -EINVAL;
/* Generate a random MAC address, if none was assigned by PF */
- PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n");
+ PMD_DRV_LOG_LINE(INFO, "VF MAC address not assigned by Host PF");
bnxt_eth_hw_addr_random(bp->mac_addr);
- PMD_DRV_LOG(INFO,
- "Assign random MAC:" RTE_ETHER_ADDR_PRT_FMT "\n",
+ PMD_DRV_LOG_LINE(INFO,
+ "Assign random MAC:" RTE_ETHER_ADDR_PRT_FMT,
bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2],
bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]);
@@ -5414,12 +5416,12 @@ static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev)
sizeof(struct rte_ether_addr) *
BNXT_MAX_MC_ADDRS, 0);
if (bp->mcast_addr_list == NULL) {
- PMD_DRV_LOG(ERR, "Failed to allocate multicast addr table\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to allocate multicast addr table");
return -ENOMEM;
}
bp->mc_list_dma_addr = rte_malloc_virt2iova(bp->mcast_addr_list);
if (bp->mc_list_dma_addr == RTE_BAD_IOVA) {
- PMD_DRV_LOG(ERR, "Fail to map mcast_addr_list to physical memory\n");
+ PMD_DRV_LOG_LINE(ERR, "Fail to map mcast_addr_list to physical memory");
return -ENOMEM;
}
@@ -5437,7 +5439,7 @@ static int bnxt_restore_dflt_mac(struct bnxt *bp)
/* Restore the old MAC configured */
rc = bnxt_hwrm_set_mac(bp);
if (rc)
- PMD_DRV_LOG(ERR, "Failed to restore MAC address\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to restore MAC address");
return rc;
}
@@ -5488,7 +5490,7 @@ static void bnxt_check_fw_status(struct bnxt *bp)
fw_status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG);
if (fw_status != BNXT_FW_STATUS_HEALTHY)
- PMD_DRV_LOG(ERR, "Firmware not responding, status: %#x\n",
+ PMD_DRV_LOG_LINE(ERR, "Firmware not responding, status: %#x",
fw_status);
}
@@ -5619,25 +5621,25 @@ bnxt_init_locks(struct bnxt *bp)
err = pthread_mutex_init(&bp->flow_lock, NULL);
if (err) {
- PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n");
+ PMD_DRV_LOG_LINE(ERR, "Unable to initialize flow_lock");
return err;
}
err = pthread_mutex_init(&bp->def_cp_lock, NULL);
if (err) {
- PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n");
+ PMD_DRV_LOG_LINE(ERR, "Unable to initialize def_cp_lock");
return err;
}
err = pthread_mutex_init(&bp->health_check_lock, NULL);
if (err) {
- PMD_DRV_LOG(ERR, "Unable to initialize health_check_lock\n");
+ PMD_DRV_LOG_LINE(ERR, "Unable to initialize health_check_lock");
return err;
}
err = pthread_mutex_init(&bp->err_recovery_lock, NULL);
if (err)
- PMD_DRV_LOG(ERR, "Unable to initialize err_recovery_lock\n");
+ PMD_DRV_LOG_LINE(ERR, "Unable to initialize err_recovery_lock");
return err;
}
@@ -5650,11 +5652,11 @@ static int bnxt_alloc_switch_domain(struct bnxt *bp)
if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) {
rc = rte_eth_switch_domain_alloc(&bp->switch_domain_id);
if (rc)
- PMD_DRV_LOG(ERR,
- "Failed to alloc switch domain: %d\n", rc);
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to alloc switch domain: %d", rc);
else
- PMD_DRV_LOG(INFO,
- "Switch domain allocated %d\n",
+ PMD_DRV_LOG_LINE(INFO,
+ "Switch domain allocated %d",
bp->switch_domain_id);
}
@@ -5689,7 +5691,7 @@ static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev)
rc = bnxt_hwrm_func_driver_register(bp);
if (rc) {
- PMD_DRV_LOG(ERR, "Failed to register driver");
+ PMD_DRV_LOG_LINE(ERR, "Failed to register driver");
return -EBUSY;
}
@@ -5697,13 +5699,13 @@ static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev)
if (bp->pdev->max_vfs) {
rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
if (rc) {
- PMD_DRV_LOG(ERR, "Failed to allocate VFs\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to allocate VFs");
return rc;
}
} else {
rc = bnxt_hwrm_allocate_pf_only(bp);
if (rc) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG_LINE(ERR,
"Failed to allocate PF resources");
return rc;
}
@@ -5714,7 +5716,7 @@ static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev)
bp->rss_conf.rss_key = rte_zmalloc("bnxt_rss_key",
HW_HASH_KEY_SIZE, 0);
if (bp->rss_conf.rss_key == NULL) {
- PMD_DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory",
+ PMD_DRV_LOG_LINE(ERR, "port %u cannot allocate RSS hash key memory",
bp->eth_dev->data->port_id);
return -ENOMEM;
}
@@ -5734,7 +5736,7 @@ static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev)
rc = bnxt_init_ctx_mem(bp);
if (rc) {
- PMD_DRV_LOG(ERR, "Failed to init adv_flow_counters\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to init adv_flow_counters");
return rc;
}
@@ -5750,28 +5752,28 @@ bnxt_parse_devarg_flow_xstat(__rte_unused const char *key,
char *end = NULL;
if (!value || !opaque_arg) {
- PMD_DRV_LOG(ERR,
- "Invalid parameter passed to flow_xstat devarg.\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Invalid parameter passed to flow_xstat devarg.");
return -EINVAL;
}
flow_xstat = strtoul(value, &end, 10);
if (end == NULL || *end != '\0' ||
(flow_xstat == ULONG_MAX && errno == ERANGE)) {
- PMD_DRV_LOG(ERR,
- "Invalid parameter passed to flow_xstat devarg.\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Invalid parameter passed to flow_xstat devarg.");
return -EINVAL;
}
if (BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)) {
- PMD_DRV_LOG(ERR,
- "Invalid value passed to flow_xstat devarg.\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Invalid value passed to flow_xstat devarg.");
return -EINVAL;
}
bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN;
if (BNXT_FLOW_XSTATS_EN(bp))
- PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n");
+ PMD_DRV_LOG_LINE(INFO, "flow_xstat feature enabled.");
return 0;
}
@@ -5785,28 +5787,28 @@ bnxt_parse_devarg_max_num_kflows(__rte_unused const char *key,
char *end = NULL;
if (!value || !opaque_arg) {
- PMD_DRV_LOG(ERR,
- "Invalid parameter passed to max_num_kflows devarg.\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Invalid parameter passed to max_num_kflows devarg.");
return -EINVAL;
}
max_num_kflows = strtoul(value, &end, 10);
if (end == NULL || *end != '\0' ||
(max_num_kflows == ULONG_MAX && errno == ERANGE)) {
- PMD_DRV_LOG(ERR,
- "Invalid parameter passed to max_num_kflows devarg.\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Invalid parameter passed to max_num_kflows devarg.");
return -EINVAL;
}
if (bnxt_devarg_max_num_kflow_invalid(max_num_kflows)) {
- PMD_DRV_LOG(ERR,
- "Invalid value passed to max_num_kflows devarg.\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Invalid value passed to max_num_kflows devarg.");
return -EINVAL;
}
bp->max_num_kflows = max_num_kflows;
if (bp->max_num_kflows)
- PMD_DRV_LOG(INFO, "max_num_kflows set as %ldK.\n",
+ PMD_DRV_LOG_LINE(INFO, "max_num_kflows set as %ldK.",
max_num_kflows);
return 0;
@@ -5821,30 +5823,30 @@ bnxt_parse_devarg_cqe_mode(__rte_unused const char *key,
char *end = NULL;
if (!value || !opaque_arg) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG_LINE(ERR,
"Invalid parameter passed to cqe-mode "
- "devargs.\n");
+ "devargs.");
return -EINVAL;
}
cqe_mode = strtoul(value, &end, 10);
if (end == NULL || *end != '\0' ||
(cqe_mode == ULONG_MAX && errno == ERANGE)) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG_LINE(ERR,
"Invalid parameter passed to cqe-mode "
- "devargs.\n");
+ "devargs.");
return -EINVAL;
}
if (BNXT_DEVARG_CQE_MODE_INVALID(cqe_mode)) {
- PMD_DRV_LOG(ERR, "Invalid cqe-mode(%d) devargs.\n",
+ PMD_DRV_LOG_LINE(ERR, "Invalid cqe-mode(%d) devargs.",
(uint16_t)cqe_mode);
return -EINVAL;
}
if (cqe_mode == 1)
bp->flags2 |= BNXT_FLAGS2_COMPRESSED_RX_CQE;
- PMD_DRV_LOG(INFO, "cqe-mode=%d feature enabled.\n", (uint8_t)cqe_mode);
+ PMD_DRV_LOG_LINE(INFO, "cqe-mode=%d feature enabled.", (uint8_t)cqe_mode);
return 0;
}
@@ -5858,29 +5860,29 @@ bnxt_parse_devarg_app_id(__rte_unused const char *key,
char *end = NULL;
if (!value || !opaque_arg) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG_LINE(ERR,
"Invalid parameter passed to app-id "
- "devargs.\n");
+ "devargs.");
return -EINVAL;
}
app_id = strtoul(value, &end, 10);
if (end == NULL || *end != '\0' ||
(app_id == ULONG_MAX && errno == ERANGE)) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG_LINE(ERR,
"Invalid parameter passed to app_id "
- "devargs.\n");
+ "devargs.");
return -EINVAL;
}
if (BNXT_DEVARG_APP_ID_INVALID(app_id)) {
- PMD_DRV_LOG(ERR, "Invalid app-id(%d) devargs.\n",
+ PMD_DRV_LOG_LINE(ERR, "Invalid app-id(%d) devargs.",
(uint16_t)app_id);
return -EINVAL;
}
bp->app_id = app_id;
- PMD_DRV_LOG(INFO, "app-id=%d feature enabled.\n", (uint16_t)app_id);
+ PMD_DRV_LOG_LINE(INFO, "app-id=%d feature enabled.", (uint16_t)app_id);
return 0;
}
@@ -5894,29 +5896,29 @@ bnxt_parse_devarg_ieee_1588(__rte_unused const char *key,
char *end = NULL;
if (!value || !opaque_arg) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG_LINE(ERR,
"Invalid parameter passed to ieee-1588 "
- "devargs.\n");
+ "devargs.");
return -EINVAL;
}
ieee_1588 = strtoul(value, &end, 10);
if (end == NULL || *end != '\0' ||
(ieee_1588 == ULONG_MAX && errno == ERANGE)) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG_LINE(ERR,
"Invalid parameter passed to ieee_1588 "
- "devargs.\n");
+ "devargs.");
return -EINVAL;
}
if (BNXT_DEVARG_IEEE_1588_INVALID(ieee_1588)) {
- PMD_DRV_LOG(ERR, "Invalid ieee-1588(%d) devargs.\n",
+ PMD_DRV_LOG_LINE(ERR, "Invalid ieee-1588(%d) devargs.",
(uint16_t)ieee_1588);
return -EINVAL;
}
bp->ieee_1588 = ieee_1588;
- PMD_DRV_LOG(INFO, "ieee-1588=%d feature enabled.\n", (uint16_t)ieee_1588);
+ PMD_DRV_LOG_LINE(INFO, "ieee-1588=%d feature enabled.", (uint16_t)ieee_1588);
return 0;
}
@@ -5930,30 +5932,30 @@ bnxt_parse_devarg_rep_is_pf(__rte_unused const char *key,
char *end = NULL;
if (!value || !opaque_arg) {
- PMD_DRV_LOG(ERR,
- "Invalid parameter passed to rep_is_pf devargs.\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Invalid parameter passed to rep_is_pf devargs.");
return -EINVAL;
}
rep_is_pf = strtoul(value, &end, 10);
if (end == NULL || *end != '\0' ||
(rep_is_pf == ULONG_MAX && errno == ERANGE)) {
- PMD_DRV_LOG(ERR,
- "Invalid parameter passed to rep_is_pf devargs.\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Invalid parameter passed to rep_is_pf devargs.");
return -EINVAL;
}
if (BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf)) {
- PMD_DRV_LOG(ERR,
- "Invalid value passed to rep_is_pf devargs.\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Invalid value passed to rep_is_pf devargs.");
return -EINVAL;
}
vfr_bp->flags |= rep_is_pf;
if (BNXT_REP_PF(vfr_bp))
- PMD_DRV_LOG(INFO, "PF representor\n");
+ PMD_DRV_LOG_LINE(INFO, "PF representor");
else
- PMD_DRV_LOG(INFO, "VF representor\n");
+ PMD_DRV_LOG_LINE(INFO, "VF representor");
return 0;
}
@@ -5967,31 +5969,31 @@ bnxt_parse_devarg_rep_based_pf(__rte_unused const char *key,
char *end = NULL;
if (!value || !opaque_arg) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG_LINE(ERR,
"Invalid parameter passed to rep_based_pf "
- "devargs.\n");
+ "devargs.");
return -EINVAL;
}
rep_based_pf = strtoul(value, &end, 10);
if (end == NULL || *end != '\0' ||
(rep_based_pf == ULONG_MAX && errno == ERANGE)) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG_LINE(ERR,
"Invalid parameter passed to rep_based_pf "
- "devargs.\n");
+ "devargs.");
return -EINVAL;
}
if (BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf)) {
- PMD_DRV_LOG(ERR,
- "Invalid value passed to rep_based_pf devargs.\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Invalid value passed to rep_based_pf devargs.");
return -EINVAL;
}
vfr_bp->rep_based_pf = rep_based_pf;
vfr_bp->flags |= BNXT_REP_BASED_PF_VALID;
- PMD_DRV_LOG(INFO, "rep-based-pf = %d\n", vfr_bp->rep_based_pf);
+ PMD_DRV_LOG_LINE(INFO, "rep-based-pf = %d", vfr_bp->rep_based_pf);
return 0;
}
@@ -6005,30 +6007,30 @@ bnxt_parse_devarg_rep_q_r2f(__rte_unused const char *key,
char *end = NULL;
if (!value || !opaque_arg) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG_LINE(ERR,
"Invalid parameter passed to rep_q_r2f "
- "devargs.\n");
+ "devargs.");
return -EINVAL;
}
rep_q_r2f = strtoul(value, &end, 10);
if (end == NULL || *end != '\0' ||
(rep_q_r2f == ULONG_MAX && errno == ERANGE)) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG_LINE(ERR,
"Invalid parameter passed to rep_q_r2f "
- "devargs.\n");
+ "devargs.");
return -EINVAL;
}
if (BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f)) {
- PMD_DRV_LOG(ERR,
- "Invalid value passed to rep_q_r2f devargs.\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Invalid value passed to rep_q_r2f devargs.");
return -EINVAL;
}
vfr_bp->rep_q_r2f = rep_q_r2f;
vfr_bp->flags |= BNXT_REP_Q_R2F_VALID;
- PMD_DRV_LOG(INFO, "rep-q-r2f = %d\n", vfr_bp->rep_q_r2f);
+ PMD_DRV_LOG_LINE(INFO, "rep-q-r2f = %d", vfr_bp->rep_q_r2f);
return 0;
}
@@ -6042,30 +6044,30 @@ bnxt_parse_devarg_rep_q_f2r(__rte_unused const char *key,
char *end = NULL;
if (!value || !opaque_arg) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG_LINE(ERR,
"Invalid parameter passed to rep_q_f2r "
- "devargs.\n");
+ "devargs.");
return -EINVAL;
}
rep_q_f2r = strtoul(value, &end, 10);
if (end == NULL || *end != '\0' ||
(rep_q_f2r == ULONG_MAX && errno == ERANGE)) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG_LINE(ERR,
"Invalid parameter passed to rep_q_f2r "
- "devargs.\n");
+ "devargs.");
return -EINVAL;
}
if (BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r)) {
- PMD_DRV_LOG(ERR,
- "Invalid value passed to rep_q_f2r devargs.\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Invalid value passed to rep_q_f2r devargs.");
return -EINVAL;
}
vfr_bp->rep_q_f2r = rep_q_f2r;
vfr_bp->flags |= BNXT_REP_Q_F2R_VALID;
- PMD_DRV_LOG(INFO, "rep-q-f2r = %d\n", vfr_bp->rep_q_f2r);
+ PMD_DRV_LOG_LINE(INFO, "rep-q-f2r = %d", vfr_bp->rep_q_f2r);
return 0;
}
@@ -6079,30 +6081,30 @@ bnxt_parse_devarg_rep_fc_r2f(__rte_unused const char *key,
char *end = NULL;
if (!value || !opaque_arg) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG_LINE(ERR,
"Invalid parameter passed to rep_fc_r2f "
- "devargs.\n");
+ "devargs.");
return -EINVAL;
}
rep_fc_r2f = strtoul(value, &end, 10);
if (end == NULL || *end != '\0' ||
(rep_fc_r2f == ULONG_MAX && errno == ERANGE)) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG_LINE(ERR,
"Invalid parameter passed to rep_fc_r2f "
- "devargs.\n");
+ "devargs.");
return -EINVAL;
}
if (BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f)) {
- PMD_DRV_LOG(ERR,
- "Invalid value passed to rep_fc_r2f devargs.\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Invalid value passed to rep_fc_r2f devargs.");
return -EINVAL;
}
vfr_bp->flags |= BNXT_REP_FC_R2F_VALID;
vfr_bp->rep_fc_r2f = rep_fc_r2f;
- PMD_DRV_LOG(INFO, "rep-fc-r2f = %lu\n", rep_fc_r2f);
+ PMD_DRV_LOG_LINE(INFO, "rep-fc-r2f = %lu", rep_fc_r2f);
return 0;
}
@@ -6116,30 +6118,30 @@ bnxt_parse_devarg_rep_fc_f2r(__rte_unused const char *key,
char *end = NULL;
if (!value || !opaque_arg) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG_LINE(ERR,
"Invalid parameter passed to rep_fc_f2r "
- "devargs.\n");
+ "devargs.");
return -EINVAL;
}
rep_fc_f2r = strtoul(value, &end, 10);
if (end == NULL || *end != '\0' ||
(rep_fc_f2r == ULONG_MAX && errno == ERANGE)) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG_LINE(ERR,
"Invalid parameter passed to rep_fc_f2r "
- "devargs.\n");
+ "devargs.");
return -EINVAL;
}
if (BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r)) {
- PMD_DRV_LOG(ERR,
- "Invalid value passed to rep_fc_f2r devargs.\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Invalid value passed to rep_fc_f2r devargs.");
return -EINVAL;
}
vfr_bp->flags |= BNXT_REP_FC_F2R_VALID;
vfr_bp->rep_fc_f2r = rep_fc_f2r;
- PMD_DRV_LOG(INFO, "rep-fc-f2r = %lu\n", rep_fc_f2r);
+ PMD_DRV_LOG_LINE(INFO, "rep-fc-f2r = %lu", rep_fc_f2r);
return 0;
}
@@ -6229,8 +6231,8 @@ static int bnxt_drv_init(struct rte_eth_dev *eth_dev)
rc = bnxt_map_pci_bars(eth_dev);
if (rc) {
- PMD_DRV_LOG(ERR,
- "Failed to initialize board rc: %x\n", rc);
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to initialize board rc: %x", rc);
return rc;
}
@@ -6248,8 +6250,8 @@ static int bnxt_drv_init(struct rte_eth_dev *eth_dev)
rc = bnxt_alloc_hwrm_resources(bp);
if (rc) {
- PMD_DRV_LOG(ERR,
- "Failed to allocate response buffer rc: %x\n", rc);
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to allocate response buffer rc: %x", rc);
return rc;
}
rc = bnxt_alloc_leds_info(bp);
@@ -6278,8 +6280,8 @@ static int bnxt_drv_init(struct rte_eth_dev *eth_dev)
bnxt_cfa_code_dynfield_offset =
rte_mbuf_dynfield_register(&bnxt_cfa_code_dynfield_desc);
if (bnxt_cfa_code_dynfield_offset < 0) {
- PMD_DRV_LOG(ERR,
- "Failed to register mbuf field for TruFlow mark\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to register mbuf field for TruFlow mark");
return -rte_errno;
}
}
@@ -6296,7 +6298,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused)
int rc;
if (version_printed++ == 0)
- PMD_DRV_LOG(INFO, "%s\n", bnxt_version);
+ PMD_DRV_LOG_LINE(INFO, "%s", bnxt_version);
eth_dev->dev_ops = &bnxt_dev_ops;
eth_dev->rx_queue_count = bnxt_rx_queue_count_op;
@@ -6338,8 +6340,8 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused)
if (rc)
goto error_free;
- PMD_DRV_LOG(INFO,
- "Found %s device at mem %" PRIX64 ", node addr %pM\n",
+ PMD_DRV_LOG_LINE(INFO,
+ "Found %s device at mem %" PRIX64 ", node addr %pM",
DRV_MODULE_NAME,
pci_dev->mem_resource[0].phys_addr,
pci_dev->mem_resource[0].addr);
@@ -6460,7 +6462,7 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return -EPERM;
- PMD_DRV_LOG(DEBUG, "Calling Device uninit\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Calling Device uninit");
if (eth_dev->state != RTE_ETH_DEV_UNUSED)
bnxt_dev_close_op(eth_dev);
@@ -6481,11 +6483,11 @@ static int bnxt_pci_remove_dev_with_reps(struct rte_eth_dev *eth_dev)
vf_rep_eth_dev = bp->rep_info[i].vfr_eth_dev;
if (!vf_rep_eth_dev)
continue;
- PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci remove\n",
+ PMD_DRV_LOG_LINE(DEBUG, "BNXT Port:%d VFR pci remove",
vf_rep_eth_dev->data->port_id);
rte_eth_dev_destroy(vf_rep_eth_dev, bnxt_representor_uninit);
}
- PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n",
+ PMD_DRV_LOG_LINE(DEBUG, "BNXT Port:%d pci remove",
eth_dev->data->port_id);
ret = rte_eth_dev_destroy(eth_dev, bnxt_dev_uninit);
@@ -6511,14 +6513,14 @@ static int bnxt_init_rep_info(struct bnxt *bp)
sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS(bp),
0);
if (!bp->rep_info) {
- PMD_DRV_LOG(ERR, "Failed to alloc memory for rep info\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to alloc memory for rep info");
return -ENOMEM;
}
bp->cfa_code_map = rte_zmalloc("bnxt_cfa_code_map",
sizeof(*bp->cfa_code_map) *
BNXT_MAX_CFA_CODE, 0);
if (!bp->cfa_code_map) {
- PMD_DRV_LOG(ERR, "Failed to alloc memory for cfa_code_map\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to alloc memory for cfa_code_map");
bnxt_free_rep_info(bp);
return -ENOMEM;
}
@@ -6528,7 +6530,7 @@ static int bnxt_init_rep_info(struct bnxt *bp)
rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL);
if (rc) {
- PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n");
+ PMD_DRV_LOG_LINE(ERR, "Unable to initialize vfr_start_lock");
bnxt_free_rep_info(bp);
return rc;
}
@@ -6553,27 +6555,27 @@ static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev,
if (eth_da->type == RTE_ETH_REPRESENTOR_NONE)
return 0;
if (eth_da->type != RTE_ETH_REPRESENTOR_VF) {
- PMD_DRV_LOG(ERR, "unsupported representor type %d\n",
+ PMD_DRV_LOG_LINE(ERR, "unsupported representor type %d",
eth_da->type);
return -ENOTSUP;
}
num_rep = eth_da->nb_representor_ports;
if (num_rep > max_vf_reps) {
- PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n",
+ PMD_DRV_LOG_LINE(ERR, "nb_representor_ports = %d > %d MAX VF REPS",
num_rep, max_vf_reps);
return -EINVAL;
}
if (num_rep >= RTE_MAX_ETHPORTS) {
- PMD_DRV_LOG(ERR,
- "nb_representor_ports = %d > %d MAX ETHPORTS\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "nb_representor_ports = %d > %d MAX ETHPORTS",
num_rep, RTE_MAX_ETHPORTS);
return -EINVAL;
}
if (!(BNXT_PF(backing_bp) || BNXT_VF_IS_TRUSTED(backing_bp))) {
- PMD_DRV_LOG(ERR,
- "Not a PF or trusted VF. No Representor support\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Not a PF or trusted VF. No Representor support");
/* Returning an error is not an option.
* Applications are not handling this correctly
*/
@@ -6591,7 +6593,7 @@ static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev,
};
if (representor.vf_id >= max_vf_reps) {
- PMD_DRV_LOG(ERR, "VF-Rep id %d >= %d MAX VF ID\n",
+ PMD_DRV_LOG_LINE(ERR, "VF-Rep id %d >= %d MAX VF ID",
representor.vf_id, max_vf_reps);
continue;
}
@@ -6683,20 +6685,20 @@ static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev,
bnxt_representor_init,
&representor);
if (ret) {
- PMD_DRV_LOG(ERR, "failed to create bnxt vf "
+ PMD_DRV_LOG_LINE(ERR, "failed to create bnxt vf "
"representor %s.", name);
goto err;
}
vf_rep_eth_dev = rte_eth_dev_allocated(name);
if (!vf_rep_eth_dev) {
- PMD_DRV_LOG(ERR, "Failed to find the eth_dev"
+ PMD_DRV_LOG_LINE(ERR, "Failed to find the eth_dev"
" for VF-Rep: %s.", name);
ret = -ENODEV;
goto err;
}
- PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci probe\n",
+ PMD_DRV_LOG_LINE(DEBUG, "BNXT Port:%d VFR pci probe",
backing_eth_dev->data->port_id);
backing_bp->rep_info[representor.vf_id].vfr_eth_dev =
vf_rep_eth_dev;
@@ -6735,7 +6737,7 @@ static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
}
num_rep = eth_da.nb_representor_ports;
- PMD_DRV_LOG(DEBUG, "nb_representor_ports = %d\n",
+ PMD_DRV_LOG_LINE(DEBUG, "nb_representor_ports = %d",
num_rep);
/* We could come here after first level of probe is already invoked
@@ -6754,7 +6756,7 @@ static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
}
- PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci probe\n",
+ PMD_DRV_LOG_LINE(DEBUG, "BNXT Port:%d pci probe",
backing_eth_dev->data->port_id);
if (!num_rep)
@@ -6779,7 +6781,7 @@ static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
* +ve value will at least help in proper cleanup
*/
- PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", eth_dev->data->port_id);
+ PMD_DRV_LOG_LINE(DEBUG, "BNXT Port:%d pci remove", eth_dev->data->port_id);
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
if (rte_eth_dev_is_repr(eth_dev))
return rte_eth_dev_destroy(eth_dev,
@@ -28,7 +28,7 @@ struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
filter = bnxt_get_unused_filter(bp);
if (!filter) {
- PMD_DRV_LOG(ERR, "No more free filter resources\n");
+ PMD_DRV_LOG_LINE(ERR, "No more free filter resources");
return NULL;
}
@@ -49,7 +49,7 @@ struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
if (!filter) {
- PMD_DRV_LOG(ERR, "Failed to alloc memory for VF %hu filters\n",
+ PMD_DRV_LOG_LINE(ERR, "Failed to alloc memory for VF %hu filters",
vf);
return NULL;
}
@@ -126,20 +126,20 @@ void bnxt_free_filter_mem(struct bnxt *bp)
/* Call HWRM to try to free filter again */
rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
if (rc)
- PMD_DRV_LOG(ERR,
- "Cannot free ntuple filter: %d\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Cannot free ntuple filter: %d",
rc);
}
filter->fw_ntuple_filter_id = UINT64_MAX;
if (filter->fw_l2_filter_id != ((uint64_t)-1) &&
filter->filter_type == HWRM_CFA_L2_FILTER) {
- PMD_DRV_LOG(DEBUG, "L2 filter is not free\n");
+ PMD_DRV_LOG_LINE(DEBUG, "L2 filter is not free");
/* Call HWRM to try to free filter again */
rc = bnxt_hwrm_clear_l2_filter(bp, filter);
if (rc)
- PMD_DRV_LOG(ERR,
- "Cannot free L2 filter: %d\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Cannot free L2 filter: %d",
rc);
}
filter->fw_l2_filter_id = UINT64_MAX;
@@ -170,7 +170,7 @@ int bnxt_alloc_filter_mem(struct bnxt *bp)
max_filters * sizeof(struct bnxt_filter_info),
0);
if (filter_mem == NULL) {
- PMD_DRV_LOG(ERR, "Failed to alloc memory for %d filters",
+ PMD_DRV_LOG_LINE(ERR, "Failed to alloc memory for %d filters",
max_filters);
return -ENOMEM;
}
@@ -186,7 +186,7 @@ struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
/* Find the 1st unused filter from the free_filter_list pool*/
filter = STAILQ_FIRST(&bp->free_filter_list);
if (!filter) {
- PMD_DRV_LOG(ERR, "No more free filter resources\n");
+ PMD_DRV_LOG_LINE(ERR, "No more free filter resources");
return NULL;
}
STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
@@ -106,15 +106,15 @@ bnxt_filter_type_check(const struct rte_flow_item pattern[],
use_ntuple |= 1;
break;
default:
- PMD_DRV_LOG(DEBUG, "Unknown Flow type\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Unknown Flow type");
use_ntuple |= 0;
}
item++;
}
if (has_vlan && use_ntuple) {
- PMD_DRV_LOG(ERR,
- "VLAN flow cannot use NTUPLE filter\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "VLAN flow cannot use NTUPLE filter");
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
@@ -158,7 +158,7 @@ bnxt_validate_and_parse_flow_type(const struct rte_flow_attr *attr,
use_ntuple = bnxt_filter_type_check(pattern, error);
if (use_ntuple < 0)
return use_ntuple;
- PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
+ PMD_DRV_LOG_LINE(DEBUG, "Use NTUPLE %d", use_ntuple);
filter->filter_type = use_ntuple ?
HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_L2_FILTER;
@@ -181,7 +181,7 @@ bnxt_validate_and_parse_flow_type(const struct rte_flow_attr *attr,
inner =
((const struct rte_flow_item_any *)item->spec)->num > 3;
if (inner)
- PMD_DRV_LOG(DEBUG, "Parse inner header\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Parse inner header");
break;
case RTE_FLOW_ITEM_TYPE_ETH:
if (!item->spec)
@@ -229,8 +229,8 @@ bnxt_validate_and_parse_flow_type(const struct rte_flow_attr *attr,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"DMAC is invalid");
- PMD_DRV_LOG(ERR,
- "DMAC is invalid!\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "DMAC is invalid!");
return -rte_errno;
}
rte_memcpy(filter->dst_macaddr,
@@ -242,8 +242,8 @@ bnxt_validate_and_parse_flow_type(const struct rte_flow_attr *attr,
BNXT_FLOW_L2_INNER_DST_VALID_FLAG :
BNXT_FLOW_L2_DST_VALID_FLAG;
filter->priority = attr->priority;
- PMD_DRV_LOG(DEBUG,
- "Creating a priority flow\n");
+ PMD_DRV_LOG_LINE(DEBUG,
+ "Creating a priority flow");
}
if (rte_is_broadcast_ether_addr(ð_mask->hdr.src_addr)) {
src = ð_spec->hdr.src_addr;
@@ -253,8 +253,8 @@ bnxt_validate_and_parse_flow_type(const struct rte_flow_attr *attr,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"SMAC is invalid");
- PMD_DRV_LOG(ERR,
- "SMAC is invalid!\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "SMAC is invalid!");
return -rte_errno;
}
rte_memcpy(filter->src_macaddr,
@@ -267,7 +267,7 @@ bnxt_validate_and_parse_flow_type(const struct rte_flow_attr *attr,
BNXT_FLOW_L2_SRC_VALID_FLAG;
} /*
* else {
- * PMD_DRV_LOG(ERR, "Handle this condition\n");
+ * PMD_DRV_LOG_LINE(ERR, "Handle this condition");
* }
*/
if (eth_mask->hdr.ether_type) {
@@ -783,18 +783,18 @@ bnxt_create_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG) {
filter1->flags |=
HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
- PMD_DRV_LOG(DEBUG, "Create Outer filter\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Create Outer filter");
}
if (nf->filter_type == HWRM_CFA_L2_FILTER &&
(nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
nf->valid_flags & BNXT_FLOW_L2_INNER_SRC_VALID_FLAG)) {
- PMD_DRV_LOG(DEBUG, "Create L2 filter for SRC MAC\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Create L2 filter for SRC MAC");
filter1->flags |=
HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_SOURCE_VALID;
memcpy(filter1->l2_addr, nf->src_macaddr, RTE_ETHER_ADDR_LEN);
} else {
- PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Create L2 filter for DST MAC");
memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN);
}
@@ -988,8 +988,8 @@ static int match_vnic_rss_cfg(struct bnxt *bp,
}
if (match != vnic->rx_queue_cnt) {
- PMD_DRV_LOG(ERR,
- "VNIC queue count %d vs queues matched %d\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "VNIC queue count %d vs queues matched %d",
match, vnic->rx_queue_cnt);
return -EINVAL;
}
@@ -1021,8 +1021,8 @@ bnxt_update_filter_flags_en(struct bnxt_filter_info *filter,
filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
filter->l2_ref_cnt = filter1->l2_ref_cnt;
filter->flow_id = filter1->flow_id;
- PMD_DRV_LOG(DEBUG,
- "l2_filter: %p fw_l2_filter_id %" PRIx64 " l2_ref_cnt %u\n",
+ PMD_DRV_LOG_LINE(DEBUG,
+ "l2_filter: %p fw_l2_filter_id %" PRIx64 " l2_ref_cnt %u",
filter1, filter->fw_l2_filter_id, filter->l2_ref_cnt);
}
@@ -1087,7 +1087,7 @@ bnxt_vnic_rss_cfg_update(struct bnxt *bp,
/* Validate Rx queues */
for (i = 0; i < rss->queue_num; i++) {
- PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n", rss->queue[i]);
+ PMD_DRV_LOG_LINE(DEBUG, "RSS action Queue %d", rss->queue[i]);
if (rss->queue[i] >= bp->rx_nr_rings ||
!bp->rx_queues[rss->queue[i]]) {
@@ -1306,11 +1306,11 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
rc = -rte_errno;
goto ret;
}
- PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
+ PMD_DRV_LOG_LINE(DEBUG, "Queue index %d", act_q->index);
vnic_id = attr->group;
if (!vnic_id) {
- PMD_DRV_LOG(DEBUG, "Group id is 0\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Group id is 0");
vnic_id = act_q->index;
}
@@ -1319,8 +1319,8 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
vnic = &bp->vnic_info[vnic_id];
if (vnic->rx_queue_cnt) {
if (vnic->start_grp_id != act_q->index) {
- PMD_DRV_LOG(ERR,
- "VNIC already in use\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "VNIC already in use");
rte_flow_error_set(error,
EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -1339,8 +1339,8 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
goto use_vnic;
if (!rxq) {
- PMD_DRV_LOG(ERR,
- "Queue invalid or used with other VNIC\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Queue invalid or used with other VNIC");
rte_flow_error_set(error,
EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -1357,20 +1357,20 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
vnic->end_grp_id = act_q->index;
vnic->func_default = 0; //This is not a default VNIC.
- PMD_DRV_LOG(DEBUG, "VNIC found\n");
+ PMD_DRV_LOG_LINE(DEBUG, "VNIC found");
rc = bnxt_vnic_prep(bp, vnic, act, error);
if (rc)
goto ret;
- PMD_DRV_LOG(DEBUG,
- "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
+ PMD_DRV_LOG_LINE(DEBUG,
+ "vnic[%d] = %p vnic->fw_grp_ids = %p",
act_q->index, vnic, vnic->fw_grp_ids);
use_vnic:
vnic->ff_pool_idx = vnic_id;
- PMD_DRV_LOG(DEBUG,
- "Setting vnic ff_idx %d\n", vnic->ff_pool_idx);
+ PMD_DRV_LOG_LINE(DEBUG,
+ "Setting vnic ff_idx %d", vnic->ff_pool_idx);
filter->dst_id = vnic->fw_vnic_id;
/* For ntuple filter, create the L2 filter with default VNIC.
@@ -1392,7 +1392,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
goto ret;
}
- PMD_DRV_LOG(DEBUG, "new fltr: %p l2fltr: %p l2_ref_cnt: %d\n",
+ PMD_DRV_LOG_LINE(DEBUG, "new fltr: %p l2fltr: %p l2_ref_cnt: %d",
filter, filter1, filter1->l2_ref_cnt);
bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
break;
@@ -1528,8 +1528,8 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
if (vnic->rx_queue_cnt) {
rc = match_vnic_rss_cfg(bp, vnic, rss);
if (rc) {
- PMD_DRV_LOG(ERR,
- "VNIC and RSS config mismatch\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "VNIC and RSS config mismatch");
rte_flow_error_set(error,
EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -1542,7 +1542,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
}
for (i = 0; i < rss->queue_num; i++) {
- PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n",
+ PMD_DRV_LOG_LINE(DEBUG, "RSS action Queue %d",
rss->queue[i]);
if (!rss->queue[i] ||
@@ -1560,8 +1560,8 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] !=
INVALID_HW_RING_ID) {
- PMD_DRV_LOG(ERR,
- "queue active with other VNIC\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "queue active with other VNIC");
rte_flow_error_set(error,
EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -1584,13 +1584,13 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
if (rc)
goto ret;
- PMD_DRV_LOG(DEBUG,
- "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
+ PMD_DRV_LOG_LINE(DEBUG,
+ "vnic[%d] = %p vnic->fw_grp_ids = %p",
vnic_id, vnic, vnic->fw_grp_ids);
vnic->ff_pool_idx = vnic_id;
- PMD_DRV_LOG(DEBUG,
- "Setting vnic ff_pool_idx %d\n", vnic->ff_pool_idx);
+ PMD_DRV_LOG_LINE(DEBUG,
+ "Setting vnic ff_pool_idx %d", vnic->ff_pool_idx);
/* This can be done only after vnic_grp_alloc is done. */
for (i = 0; i < vnic->rx_queue_cnt; i++) {
@@ -1632,7 +1632,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
}
bnxt_hwrm_vnic_rss_cfg(bp, vnic);
} else {
- PMD_DRV_LOG(DEBUG, "No RSS config required\n");
+ PMD_DRV_LOG_LINE(DEBUG, "No RSS config required");
}
vnic_found:
@@ -1648,7 +1648,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
goto ret;
}
- PMD_DRV_LOG(DEBUG, "L2 filter created\n");
+ PMD_DRV_LOG_LINE(DEBUG, "L2 filter created");
bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
break;
case RTE_FLOW_ACTION_TYPE_MARK:
@@ -1663,8 +1663,8 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
}
if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) {
- PMD_DRV_LOG(DEBUG,
- "Disabling vector processing for mark\n");
+ PMD_DRV_LOG_LINE(DEBUG,
+ "Disabling vector processing for mark");
bp->eth_dev->rx_pkt_burst = bnxt_recv_pkts;
bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
}
@@ -1672,7 +1672,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
filter->valid_flags |= BNXT_FLOW_MARK_FLAG;
filter->mark = ((const struct rte_flow_action_mark *)
act->conf)->id;
- PMD_DRV_LOG(DEBUG, "Mark the flow %d\n", filter->mark);
+ PMD_DRV_LOG_LINE(DEBUG, "Mark the flow %d", filter->mark);
break;
default:
rte_flow_error_set(error,
@@ -1718,7 +1718,7 @@ struct bnxt_vnic_info *find_matching_vnic(struct bnxt *bp,
vnic = &bp->vnic_info[i];
if (vnic->fw_vnic_id != INVALID_VNIC_ID &&
filter->dst_id == vnic->fw_vnic_id) {
- PMD_DRV_LOG(DEBUG, "Found matching VNIC Id %d\n",
+ PMD_DRV_LOG_LINE(DEBUG, "Found matching VNIC Id %d",
vnic->ff_pool_idx);
return vnic;
}
@@ -1764,7 +1764,7 @@ bnxt_flow_validate(struct rte_eth_dev *dev,
if (STAILQ_EMPTY(&vnic->filter)) {
bnxt_vnic_cleanup(bp, vnic);
bp->nr_vnics--;
- PMD_DRV_LOG(DEBUG, "Free VNIC\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Free VNIC");
}
}
@@ -1891,7 +1891,7 @@ void bnxt_flow_cnt_alarm_cb(void *arg)
struct bnxt *bp = arg;
if (!bp->flow_stat->rx_fc_out_tbl.va) {
- PMD_DRV_LOG(ERR, "bp->flow_stat->rx_fc_out_tbl.va is NULL?\n");
+ PMD_DRV_LOG_LINE(ERR, "bp->flow_stat->rx_fc_out_tbl.va is NULL?");
bnxt_cancel_fc_thread(bp);
return;
}
@@ -1908,7 +1908,7 @@ void bnxt_flow_cnt_alarm_cb(void *arg)
rc = bnxt_flow_stats_req(bp);
if (rc) {
- PMD_DRV_LOG(ERR, "Flow stat alarm not rescheduled.\n");
+ PMD_DRV_LOG_LINE(ERR, "Flow stat alarm not rescheduled.");
return;
}
@@ -2020,7 +2020,7 @@ bnxt_flow_create(struct rte_eth_dev *dev,
bnxt_acquire_flow_lock(bp);
ret = bnxt_flow_args_validate(attr, pattern, actions, error);
if (ret != 0) {
- PMD_DRV_LOG(ERR, "Not a validate flow.\n");
+ PMD_DRV_LOG_LINE(ERR, "Not a validate flow.");
goto free_flow;
}
@@ -2039,15 +2039,15 @@ bnxt_flow_create(struct rte_eth_dev *dev,
ret = bnxt_match_filter(bp, filter);
if (ret == -EEXIST) {
- PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Flow already exists.");
/* Clear the filter that was created as part of
* validate_and_parse_flow() above
*/
bnxt_hwrm_clear_l2_filter(bp, filter);
goto free_filter;
} else if (ret == -EXDEV) {
- PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n");
- PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Flow with same pattern exists");
+ PMD_DRV_LOG_LINE(DEBUG, "Updating with different destination");
update_flow = true;
}
@@ -2099,8 +2099,8 @@ bnxt_flow_create(struct rte_eth_dev *dev,
bnxt_hwrm_tunnel_redirect_free(bp,
filter->tunnel_type);
if (ret) {
- PMD_DRV_LOG(ERR,
- "Unable to free existing tunnel\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Unable to free existing tunnel");
rte_flow_error_set(error, -ret,
RTE_FLOW_ERROR_TYPE_HANDLE,
NULL,
@@ -2155,8 +2155,8 @@ bnxt_flow_create(struct rte_eth_dev *dev,
}
if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) {
- PMD_DRV_LOG(DEBUG,
- "Mark action: mark id 0x%x, flow id 0x%x\n",
+ PMD_DRV_LOG_LINE(DEBUG,
+ "Mark action: mark id 0x%x, flow id 0x%x",
filter->mark, filter->flow_id);
/* TCAM and EM should be 16-bit only.
@@ -2182,7 +2182,7 @@ bnxt_flow_create(struct rte_eth_dev *dev,
bp->flow_stat->flow_count++;
bnxt_release_flow_lock(bp);
bnxt_setup_flow_counter(bp);
- PMD_DRV_LOG(DEBUG, "Successfully created flow.\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Successfully created flow.");
return flow;
}
@@ -2232,15 +2232,15 @@ static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp,
"tunnel_redirect info cmd fail");
return ret;
}
- PMD_DRV_LOG(INFO, "Pre-existing tunnel fid = %x vf->fid = %x\n",
+ PMD_DRV_LOG_LINE(INFO, "Pre-existing tunnel fid = %x vf->fid = %x",
tun_dst_fid + bp->first_vf_id, bp->fw_fid);
/* Tunnel doesn't belong to this VF, so don't send HWRM
* cmd, just delete the flow from driver
*/
if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id)) {
- PMD_DRV_LOG(ERR,
- "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free");
} else {
ret = bnxt_hwrm_tunnel_redirect_free(bp,
filter->tunnel_type);
@@ -2314,7 +2314,7 @@ _bnxt_flow_destroy(struct bnxt *bp,
ret = bnxt_match_filter(bp, filter);
if (ret == 0)
- PMD_DRV_LOG(ERR, "Could not find matching flow\n");
+ PMD_DRV_LOG_LINE(ERR, "Could not find matching flow");
if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) {
flow_id = filter->flow_id & BNXT_FLOW_ID_MASK;
@@ -323,7 +323,7 @@ static int page_getenum(size_t size)
return 22;
if (size <= 1 << 30)
return 30;
- PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
+ PMD_DRV_LOG_LINE(ERR, "Page size %zu out of range", size);
return sizeof(int) * 8 - 1;
}
@@ -402,7 +402,7 @@ bnxt_check_cq_hwrm_done(struct bnxt_cp_ring_info *cpr,
done = bnxt_flush_rx_cmp(cpr);
if (done)
- PMD_DRV_LOG(DEBUG, "HWRM DONE for %s ring\n",
+ PMD_DRV_LOG_LINE(DEBUG, "HWRM DONE for %s ring",
rx ? "Rx" : "Tx");
/* We are about to timeout and still haven't seen the
@@ -410,7 +410,7 @@ bnxt_check_cq_hwrm_done(struct bnxt_cp_ring_info *cpr,
*/
if (!done && timeout) {
done = 1;
- PMD_DRV_LOG(DEBUG, "Timing out for %s ring\n",
+ PMD_DRV_LOG_LINE(DEBUG, "Timing out for %s ring",
rx ? "Rx" : "Tx");
}
} else {
@@ -555,8 +555,8 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
return -ETIMEDOUT;
- PMD_DRV_LOG(ERR,
- "Error(timeout) sending msg 0x%04x, seq_id %d\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Error(timeout) sending msg 0x%04x, seq_id %d",
req->req_type, req->seq_id);
bp->flags |= BNXT_FLAG_FW_TIMEDOUT;
return -ETIMEDOUT;
@@ -604,7 +604,7 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
#define HWRM_CHECK_RESULT() do {\
if (rc) { \
- PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
+ PMD_DRV_LOG_LINE(ERR, "failed rc:%d", rc); \
rte_spinlock_unlock(&bp->hwrm_lock); \
if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
rc = -EACCES; \
@@ -625,15 +625,15 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
if (resp->resp_len >= 16) { \
struct hwrm_err_output *tmp_hwrm_err_op = \
(void *)resp; \
- PMD_DRV_LOG(ERR, \
- "error %d:%d:%08x:%04x\n", \
+ PMD_DRV_LOG_LINE(ERR, \
+ "error %d:%d:%08x:%04x", \
rc, tmp_hwrm_err_op->cmd_err, \
rte_le_to_cpu_32(\
tmp_hwrm_err_op->opaque_0), \
rte_le_to_cpu_16(\
tmp_hwrm_err_op->opaque_1)); \
} else { \
- PMD_DRV_LOG(ERR, "error %d\n", rc); \
+ PMD_DRV_LOG_LINE(ERR, "error %d", rc); \
} \
rte_spinlock_unlock(&bp->hwrm_lock); \
if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
@@ -804,7 +804,7 @@ int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
if (filter->matching_l2_fltr_ptr)
l2_filter = filter->matching_l2_fltr_ptr;
- PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n",
+ PMD_DRV_LOG_LINE(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d",
filter, l2_filter, l2_filter->l2_ref_cnt);
if (l2_filter->l2_ref_cnt == 0)
@@ -854,8 +854,8 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
//TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
if ((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) &&
conf->pool_map[j].pools & (1UL << j)) {
- PMD_DRV_LOG(DEBUG,
- "Add vlan %u to vmdq pool %u\n",
+ PMD_DRV_LOG_LINE(DEBUG,
+ "Add vlan %u to vmdq pool %u",
conf->pool_map[j].vlan_id, j);
filter->l2_ivlan = conf->pool_map[j].vlan_id;
@@ -1052,7 +1052,7 @@ static int bnxt_alloc_vf_info(struct bnxt *bp, uint16_t max_vfs)
vf_info = rte_zmalloc("bnxt_vf_info", sizeof(*vf_info) * max_vfs, 0);
if (vf_info == NULL) {
- PMD_DRV_LOG(ERR, "Failed to alloc vf info\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to alloc vf info");
return -ENOMEM;
}
@@ -1062,7 +1062,7 @@ static int bnxt_alloc_vf_info(struct bnxt *bp, uint16_t max_vfs)
vf_info[i].vlan_table = rte_zmalloc("VF VLAN table",
getpagesize(), getpagesize());
if (vf_info[i].vlan_table == NULL) {
- PMD_DRV_LOG(ERR, "Failed to alloc VLAN table for VF %d\n", i);
+ PMD_DRV_LOG_LINE(ERR, "Failed to alloc VLAN table for VF %d", i);
goto err;
}
rte_mem_lock_page(vf_info[i].vlan_table);
@@ -1070,7 +1070,7 @@ static int bnxt_alloc_vf_info(struct bnxt *bp, uint16_t max_vfs)
vf_info[i].vlan_as_table = rte_zmalloc("VF VLAN AS table",
getpagesize(), getpagesize());
if (vf_info[i].vlan_as_table == NULL) {
- PMD_DRV_LOG(ERR, "Failed to alloc VLAN AS table for VF %d\n", i);
+ PMD_DRV_LOG_LINE(ERR, "Failed to alloc VLAN AS table for VF %d", i);
goto err;
}
rte_mem_lock_page(vf_info[i].vlan_as_table);
@@ -1138,7 +1138,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->max_vnics = rte_le_to_cpu_16(BNXT_MAX_VNICS_COS_CLASSIFY);
else
bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
- PMD_DRV_LOG(DEBUG, "Max l2_cntxts is %d vnics is %d\n",
+ PMD_DRV_LOG_LINE(DEBUG, "Max l2_cntxts is %d vnics is %d",
bp->max_l2_ctx, bp->max_vnics);
bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
bp->max_mcast_addr = rte_le_to_cpu_32(resp->max_mcast_filters);
@@ -1152,7 +1152,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
if (BNXT_CHIP_P5(bp) || BNXT_PF(bp)) {
bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
- PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n");
+ PMD_DRV_LOG_LINE(DEBUG, "PTP SUPPORTED");
HWRM_UNLOCK();
bnxt_hwrm_ptp_qcfg(bp);
}
@@ -1163,7 +1163,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) {
bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
- PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Adapter Error recovery SUPPORTED");
}
if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
@@ -1176,18 +1176,18 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_BS_V2_SUPPORTED) {
- PMD_DRV_LOG(DEBUG, "Backing store v2 supported\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Backing store v2 supported");
if (BNXT_CHIP_P7(bp))
bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
}
if (!(flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VLAN_ACCELERATION_TX_DISABLED)) {
bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
- PMD_DRV_LOG(DEBUG, "VLAN acceleration for TX is enabled\n");
+ PMD_DRV_LOG_LINE(DEBUG, "VLAN acceleration for TX is enabled");
}
bp->tunnel_disable_flag = rte_le_to_cpu_16(resp->tunnel_disable_flag);
if (bp->tunnel_disable_flag)
- PMD_DRV_LOG(DEBUG, "Tunnel parsing capability is disabled, flags : %#x\n",
+ PMD_DRV_LOG_LINE(DEBUG, "Tunnel parsing capability is disabled, flags : %#x",
bp->tunnel_disable_flag);
if (flags_ext2 & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
@@ -1246,7 +1246,7 @@ int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) {
bp->vnic_cap_flags |= BNXT_VNIC_CAP_COS_CLASSIFY;
- PMD_DRV_LOG(INFO, "CoS assignment capability enabled\n");
+ PMD_DRV_LOG_LINE(INFO, "CoS assignment capability enabled");
}
if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_CAP)
@@ -1254,7 +1254,7 @@ int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_TRUSTED_VF_CAP) {
bp->vnic_cap_flags |= BNXT_VNIC_CAP_OUTER_RSS_TRUSTED_VF;
- PMD_DRV_LOG(DEBUG, "Trusted VF's outer RSS capability is enabled\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Trusted VF's outer RSS capability is enabled");
}
if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RX_CMPL_V2_CAP)
@@ -1262,7 +1262,7 @@ int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_VLAN_STRIP_CAP) {
bp->vnic_cap_flags |= BNXT_VNIC_CAP_VLAN_RX_STRIP;
- PMD_DRV_LOG(DEBUG, "Rx VLAN strip capability enabled\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Rx VLAN strip capability enabled");
}
if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RING_SELECT_MODE_XOR_CAP)
@@ -1549,7 +1549,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
else
HWRM_CHECK_RESULT();
- PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d.%d\n",
+ PMD_DRV_LOG_LINE(INFO, "%d.%d.%d:%d.%d.%d.%d",
resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b,
@@ -1558,7 +1558,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
((uint32_t)resp->hwrm_fw_min_8b << 16) |
((uint32_t)resp->hwrm_fw_bld_8b << 8) |
resp->hwrm_fw_rsvd_8b;
- PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
+ PMD_DRV_LOG_LINE(INFO, "Driver HWRM version: %d.%d.%d",
HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
fw_version = resp->hwrm_intf_maj_8b << 16;
@@ -1574,13 +1574,13 @@ int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
- PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
+ PMD_DRV_LOG_LINE(ERR, "Unsupported firmware API version");
rc = -EINVAL;
goto error;
}
if (bp->max_req_len > resp->max_req_win_len) {
- PMD_DRV_LOG(ERR, "Unsupported request length\n");
+ PMD_DRV_LOG_LINE(ERR, "Unsupported request length");
rc = -EINVAL;
goto error;
}
@@ -1602,7 +1602,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
(dev_caps_cfg &
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
- PMD_DRV_LOG(DEBUG, "Short command supported\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Short command supported");
bp->flags |= BNXT_FLAG_SHORT_CMD;
}
@@ -1627,8 +1627,8 @@ int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
rte_malloc_virt2iova(bp->hwrm_short_cmd_req_addr);
if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
rte_free(bp->hwrm_short_cmd_req_addr);
- PMD_DRV_LOG(ERR,
- "Unable to map buffer to physical memory.\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Unable to map buffer to physical memory.");
rc = -ENOMEM;
goto error;
}
@@ -1636,26 +1636,26 @@ int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
if (dev_caps_cfg &
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
bp->flags |= BNXT_FLAG_KONG_MB_EN;
- PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Kong mailbox channel enabled");
}
if (dev_caps_cfg &
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
- PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
+ PMD_DRV_LOG_LINE(DEBUG, "FW supports Trusted VFs");
if (dev_caps_cfg &
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) {
bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_MGMT;
- PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n");
+ PMD_DRV_LOG_LINE(DEBUG, "FW supports advanced flow management");
}
if (dev_caps_cfg &
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED) {
- PMD_DRV_LOG(DEBUG, "FW supports advanced flow counters\n");
+ PMD_DRV_LOG_LINE(DEBUG, "FW supports advanced flow counters");
bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_COUNTERS;
}
if (dev_caps_cfg &
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED) {
- PMD_DRV_LOG(DEBUG, "Host-based truflow feature enabled.\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Host-based truflow feature enabled.");
bp->fw_cap |= BNXT_FW_CAP_TRUFLOW_EN;
}
@@ -1680,7 +1680,7 @@ int bnxt_hwrm_func_driver_unregister(struct bnxt *bp)
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
- PMD_DRV_LOG(DEBUG, "Port %u: Unregistered with fw\n",
+ PMD_DRV_LOG_LINE(DEBUG, "Port %u: Unregistered with fw",
bp->eth_dev->data->port_id);
return rc;
@@ -1699,7 +1699,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
/* Setting Fixed Speed. But AutoNeg is ON, So disable it */
if (bp->link_info->auto_mode && conf->link_speed) {
req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
- PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Disabling AutoNeg");
}
req.flags = rte_cpu_to_le_32(conf->phy_flags);
@@ -1760,7 +1760,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
} else {
req.flags =
rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
- PMD_DRV_LOG(INFO, "Force Link Down\n");
+ PMD_DRV_LOG_LINE(INFO, "Force Link Down");
}
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
@@ -1828,9 +1828,9 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
/* Display the captured P7 phy details */
if (BNXT_LINK_SPEEDS_V2(bp)) {
- PMD_DRV_LOG(DEBUG, "Phytype:%d, Media_type:%d, Status: %d, Link Signal:%d\n"
+ PMD_DRV_LOG_LINE(DEBUG, "Phytype:%d, Media_type:%d, Status: %d, Link Signal:%d\n"
"Active Fec: %d Support_speeds2:%x, Force_link_speedsv2:%x\n"
- "Auto_link_speedsv2:%x, Active_lanes:%d\n",
+ "Auto_link_speedsv2:%x, Active_lanes:%d",
link_info->phy_type,
link_info->media_type,
link_info->phy_link_status,
@@ -1850,8 +1850,8 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
desc = ((struct link_speeds2_tbl *)
bnxt_get_hwrm_to_rte_speeds2_entry(link_info->link_speed))->desc;
- PMD_DRV_LOG(INFO, "Link Speed: %s %s, Status: %s Signal-mode: %s\n"
- "Media type: %s, Xcvr type: %s, Active FEC: %s Lanes: %d\n",
+ PMD_DRV_LOG_LINE(INFO, "Link Speed: %s %s, Status: %s Signal-mode: %s\n"
+ "Media type: %s, Xcvr type: %s, Active FEC: %s Lanes: %d",
desc,
!(link_info->auto_mode) ? "Forced" : "AutoNegotiated",
link_status_str[link_info->phy_link_status % MAX_LINK_STR],
@@ -1865,11 +1865,11 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
return rc;
}
- PMD_DRV_LOG(DEBUG, "Link Speed:%d,Auto:%d:%x:%x,Support:%x,Force:%x\n",
+ PMD_DRV_LOG_LINE(DEBUG, "Link Speed:%d,Auto:%d:%x:%x,Support:%x,Force:%x",
link_info->link_speed, link_info->auto_mode,
link_info->auto_link_speed, link_info->auto_link_speed_mask,
link_info->support_speeds, link_info->force_link_speed);
- PMD_DRV_LOG(DEBUG, "Link Signal:%d,PAM::Auto:%x,Support:%x,Force:%x\n",
+ PMD_DRV_LOG_LINE(DEBUG, "Link Signal:%d,PAM::Auto:%x,Support:%x,Force:%x",
link_info->link_signal_mode,
link_info->auto_pam4_link_speed_mask,
link_info->support_pam4_speeds,
@@ -1958,7 +1958,7 @@ static bool bnxt_find_lossy_profile(struct bnxt *bp, bool use_prof_type)
int i;
for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
- PMD_DRV_LOG(DEBUG, "profile %d, profile_id %d, type %d\n",
+ PMD_DRV_LOG_LINE(DEBUG, "profile %d, profile_id %d, type %d",
bp->tx_cos_queue[i].profile,
bp->tx_cos_queue[i].id,
bp->tx_cos_queue[i].profile_type);
@@ -2069,7 +2069,7 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
}
}
- PMD_DRV_LOG(DEBUG, "Tx COS Queue ID %d\n", bp->tx_cosq_id[0]);
+ PMD_DRV_LOG_LINE(DEBUG, "Tx COS Queue ID %d", bp->tx_cosq_id[0]);
bp->max_tc = resp->max_configurable_queues;
bp->max_lltc = resp->max_configurable_lossless_queues;
@@ -2165,7 +2165,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
break;
default:
- PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
+ PMD_DRV_LOG_LINE(ERR, "hwrm alloc invalid ring type %d",
ring_type);
HWRM_UNLOCK();
return -EINVAL;
@@ -2179,33 +2179,33 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
rc = rte_le_to_cpu_16(resp->error_code);
switch (ring_type) {
case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
- PMD_DRV_LOG(ERR,
- "hwrm_ring_alloc cp failed. rc:%d\n", rc);
+ PMD_DRV_LOG_LINE(ERR,
+ "hwrm_ring_alloc cp failed. rc:%d", rc);
HWRM_UNLOCK();
return rc;
case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
- PMD_DRV_LOG(ERR,
- "hwrm_ring_alloc rx failed. rc:%d\n", rc);
+ PMD_DRV_LOG_LINE(ERR,
+ "hwrm_ring_alloc rx failed. rc:%d", rc);
HWRM_UNLOCK();
return rc;
case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
- PMD_DRV_LOG(ERR,
- "hwrm_ring_alloc rx agg failed. rc:%d\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "hwrm_ring_alloc rx agg failed. rc:%d",
rc);
HWRM_UNLOCK();
return rc;
case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
- PMD_DRV_LOG(ERR,
- "hwrm_ring_alloc tx failed. rc:%d\n", rc);
+ PMD_DRV_LOG_LINE(ERR,
+ "hwrm_ring_alloc tx failed. rc:%d", rc);
HWRM_UNLOCK();
return rc;
case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
- PMD_DRV_LOG(ERR,
- "hwrm_ring_alloc nq failed. rc:%d\n", rc);
+ PMD_DRV_LOG_LINE(ERR,
+ "hwrm_ring_alloc nq failed. rc:%d", rc);
HWRM_UNLOCK();
return rc;
default:
- PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
+ PMD_DRV_LOG_LINE(ERR, "Invalid ring. rc:%d", rc);
HWRM_UNLOCK();
return rc;
}
@@ -2243,27 +2243,27 @@ int bnxt_hwrm_ring_free(struct bnxt *bp,
switch (ring_type) {
case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
- PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
+ PMD_DRV_LOG_LINE(ERR, "hwrm_ring_free cp failed. rc:%d",
rc);
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
- PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
+ PMD_DRV_LOG_LINE(ERR, "hwrm_ring_free rx failed. rc:%d",
rc);
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
- PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
+ PMD_DRV_LOG_LINE(ERR, "hwrm_ring_free tx failed. rc:%d",
rc);
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_NQ:
- PMD_DRV_LOG(ERR,
- "hwrm_ring_free nq failed. rc:%d\n", rc);
+ PMD_DRV_LOG_LINE(ERR,
+ "hwrm_ring_free nq failed. rc:%d", rc);
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
- PMD_DRV_LOG(ERR,
- "hwrm_ring_free agg failed. rc:%d\n", rc);
+ PMD_DRV_LOG_LINE(ERR,
+ "hwrm_ring_free agg failed. rc:%d", rc);
return rc;
default:
- PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
+ PMD_DRV_LOG_LINE(ERR, "Invalid ring, rc:%d", rc);
return rc;
}
}
@@ -2411,7 +2411,7 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
HWRM_UNLOCK();
- PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
+ PMD_DRV_LOG_LINE(DEBUG, "VNIC ID %x", vnic->fw_vnic_id);
return rc;
}
@@ -2452,7 +2452,7 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
- PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
+ PMD_DRV_LOG_LINE(DEBUG, "VNIC ID %x", vnic->fw_vnic_id);
return rc;
}
@@ -2487,7 +2487,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
uint32_t enables = 0;
if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
- PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
+ PMD_DRV_LOG_LINE(DEBUG, "VNIC ID %x", vnic->fw_vnic_id);
return rc;
}
@@ -2562,7 +2562,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
if (bnxt_compressed_rx_cqe_mode_enabled(bp)) {
req.l2_cqe_mode = HWRM_VNIC_CFG_INPUT_L2_CQE_MODE_COMPRESSED;
enables |= HWRM_VNIC_CFG_INPUT_ENABLES_L2_CQE_MODE;
- PMD_DRV_LOG(DEBUG, "Enabling compressed Rx CQE\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Enabling compressed Rx CQE");
}
req.enables = rte_cpu_to_le_32(enables);
@@ -2602,7 +2602,7 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
- PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
+ PMD_DRV_LOG_LINE(DEBUG, "VNIC QCFG ID %d", vnic->fw_vnic_id);
return rc;
}
HWRM_PREP(&req, HWRM_VNIC_QCFG, BNXT_USE_CHIMP_MB);
@@ -2670,7 +2670,7 @@ int _bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
bp->hwrm_cmd_resp_addr;
if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) {
- PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
+ PMD_DRV_LOG_LINE(DEBUG, "VNIC RSS Rule %x", vnic->rss_rule);
return rc;
}
HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
@@ -2714,7 +2714,7 @@ int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
- PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
+ PMD_DRV_LOG_LINE(DEBUG, "VNIC FREE ID %x", vnic->fw_vnic_id);
return rc;
}
@@ -2779,7 +2779,7 @@ bnxt_hwrm_vnic_rss_qcfg_p5(struct bnxt *bp)
BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
- PMD_DRV_LOG(DEBUG, "RSS QCFG: Hash level %d\n", resp->hash_mode_flags);
+ PMD_DRV_LOG_LINE(DEBUG, "RSS QCFG: Hash level %d", resp->hash_mode_flags);
return rc;
}
@@ -2826,7 +2826,7 @@ bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
- PMD_DRV_LOG(DEBUG, "RSS CFG: Hash level %d\n", req.hash_mode_flags);
+ PMD_DRV_LOG_LINE(DEBUG, "RSS CFG: Hash level %d", req.hash_mode_flags);
}
return rc;
@@ -2874,7 +2874,7 @@ bnxt_hwrm_vnic_rss_cfg_hash_mode_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic
req.vnic_id = rte_cpu_to_le_16(BNXT_DFLT_VNIC_ID_INVALID);
req.rss_ctx_idx = rte_cpu_to_le_16(BNXT_RSS_CTX_IDX_INVALID);
- PMD_DRV_LOG(DEBUG, "RSS CFG: Hash level %d\n", req.hash_mode_flags);
+ PMD_DRV_LOG_LINE(DEBUG, "RSS CFG: Hash level %d", req.hash_mode_flags);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
BNXT_USE_CHIMP_MB);
@@ -2949,7 +2949,7 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
uint16_t size;
if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
- PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
+ PMD_DRV_LOG_LINE(DEBUG, "VNIC ID %x", vnic->fw_vnic_id);
return rc;
}
@@ -3013,18 +3013,18 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
return 0;
/* Return an error if enabling TPA w/ compressed Rx CQE. */
- PMD_DRV_LOG(ERR, "No HW support for LRO with compressed Rx\n");
+ PMD_DRV_LOG_LINE(ERR, "No HW support for LRO with compressed Rx");
return -ENOTSUP;
}
if ((BNXT_CHIP_P5(bp) || BNXT_CHIP_P7(bp)) && !bp->max_tpa_v2) {
if (enable)
- PMD_DRV_LOG(ERR, "No HW support for LRO\n");
+ PMD_DRV_LOG_LINE(ERR, "No HW support for LRO");
return -ENOTSUP;
}
if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
- PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Invalid vNIC ID");
return 0;
}
@@ -3419,8 +3419,8 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
bp->hwrm_cmd_resp_dma_addr =
rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
- PMD_DRV_LOG(ERR,
- "unable to map response address to physical memory\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "unable to map response address to physical memory");
return -ENOMEM;
}
rte_spinlock_init(&bp->hwrm_lock);
@@ -3471,7 +3471,7 @@ bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
while (!STAILQ_EMPTY(&vnic->flow_list)) {
flow = STAILQ_FIRST(&vnic->flow_list);
filter = flow->filter;
- PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type);
+ PMD_DRV_LOG_LINE(DEBUG, "filter type %d", filter->filter_type);
rc = bnxt_clear_one_vnic_filter(bp, filter);
STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
@@ -3658,8 +3658,8 @@ static uint16_t bnxt_parse_eth_link_speed(struct bnxt *bp, uint32_t conf_link_sp
link_info->link_signal_mode = BNXT_SIG_MODE_PAM4;
break;
default:
- PMD_DRV_LOG(ERR,
- "Unsupported link speed %d; default to AUTO\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Unsupported link speed %d; default to AUTO",
conf_link_speed);
break;
}
@@ -3691,21 +3691,21 @@ static int bnxt_validate_link_speed(struct bnxt *bp)
one_speed = link_speed & ~RTE_ETH_LINK_SPEED_FIXED;
if (one_speed & (one_speed - 1)) {
- PMD_DRV_LOG(ERR,
- "Invalid advertised speeds (%u) for port %u\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Invalid advertised speeds (%u) for port %u",
link_speed, port_id);
return -EINVAL;
}
if ((one_speed & link_speed_capa) != one_speed) {
- PMD_DRV_LOG(ERR,
- "Unsupported advertised speed (%u) for port %u\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Unsupported advertised speed (%u) for port %u",
link_speed, port_id);
return -EINVAL;
}
} else {
if (!(link_speed & link_speed_capa)) {
- PMD_DRV_LOG(ERR,
- "Unsupported advertised speeds (%u) for port %u\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Unsupported advertised speeds (%u) for port %u",
link_speed, port_id);
return -EINVAL;
}
@@ -3814,7 +3814,7 @@ static uint32_t bnxt_parse_hw_link_speed(struct bnxt *bp, uint16_t hw_link_speed
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
default:
- PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
+ PMD_DRV_LOG_LINE(ERR, "HWRM link speed %d not defined",
hw_link_speed);
break;
}
@@ -3835,7 +3835,7 @@ static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
eth_link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
break;
default:
- PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
+ PMD_DRV_LOG_LINE(ERR, "HWRM link duplex %d not defined",
hw_link_duplex);
break;
}
@@ -3849,11 +3849,11 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
rc = bnxt_hwrm_port_phy_qcaps(bp);
if (rc)
- PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc);
+ PMD_DRV_LOG_LINE(ERR, "Get link config failed with rc %d", rc);
rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
if (rc) {
- PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc);
+ PMD_DRV_LOG_LINE(ERR, "Get link config failed with rc %d", rc);
goto exit;
}
@@ -3882,14 +3882,14 @@ static int bnxt_hwrm_port_phy_cfg_v2(struct bnxt *bp, struct bnxt_link_info *con
if (!conf->link_up) {
req.flags =
rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
- PMD_DRV_LOG(ERR, "Force Link Down\n");
+ PMD_DRV_LOG_LINE(ERR, "Force Link Down");
goto link_down;
}
/* Setting Fixed Speed. But AutoNeg is ON, So disable it */
if (bp->link_info->auto_mode && conf->link_speed) {
req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
- PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Disabling AutoNeg");
}
req.flags = rte_cpu_to_le_32(conf->phy_flags);
if (!conf->link_speed) {
@@ -3955,7 +3955,7 @@ static int bnxt_set_hwrm_link_config_v2(struct bnxt *bp, bool link_up)
HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
bp->link_info->media_type ==
HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
- PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
+ PMD_DRV_LOG_LINE(ERR, "10GBase-T devices must autoneg");
return -EINVAL;
}
@@ -3970,7 +3970,7 @@ static int bnxt_set_hwrm_link_config_v2(struct bnxt *bp, bool link_up)
port_phy_cfg:
rc = bnxt_hwrm_port_phy_cfg_v2(bp, &link_req);
if (rc)
- PMD_DRV_LOG(ERR, "Set link config failed with rc %d\n", rc);
+ PMD_DRV_LOG_LINE(ERR, "Set link config failed with rc %d", rc);
return rc;
}
@@ -4005,7 +4005,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
* The speed should be forced and autoneg disabled
* to configure 40G speed.
*/
- PMD_DRV_LOG(INFO, "Disabling autoneg for 40G\n");
+ PMD_DRV_LOG_LINE(INFO, "Disabling autoneg for 40G");
autoneg = 0;
}
@@ -4014,7 +4014,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
bp->link_info->force_pam4_link_speed ==
HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB) {
autoneg = 0;
- PMD_DRV_LOG(DEBUG, "Disabling autoneg for 200G\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Disabling autoneg for 200G");
}
speed = bnxt_parse_eth_link_speed(bp, dev_conf->link_speeds,
@@ -4037,7 +4037,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
bp->link_info->media_type ==
HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
- PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
+ PMD_DRV_LOG_LINE(ERR, "10GBase-T devices must autoneg");
return -EINVAL;
}
@@ -4072,8 +4072,8 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
port_phy_cfg:
rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
if (rc) {
- PMD_DRV_LOG(ERR,
- "Set link config failed with rc %d\n", rc);
+ PMD_DRV_LOG_LINE(ERR,
+ "Set link config failed with rc %d", rc);
}
error:
@@ -4111,12 +4111,12 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
!BNXT_VF_IS_TRUSTED(bp) &&
(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
- PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
+ PMD_DRV_LOG_LINE(INFO, "Trusted VF cap enabled");
} else if (BNXT_VF(bp) &&
BNXT_VF_IS_TRUSTED(bp) &&
!(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN;
- PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n");
+ PMD_DRV_LOG_LINE(INFO, "Trusted VF cap disabled");
}
if (mtu)
@@ -4176,13 +4176,13 @@ int bnxt_hwrm_parent_pf_qcfg(struct bnxt *bp)
if (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST) {
bp->flags |= BNXT_FLAG_MULTI_HOST;
bp->multi_host_pf_pci_id = resp->pci_id;
- PMD_DRV_LOG(INFO, "Mult-Host system Parent PCI-ID: 0x%x\n", resp->pci_id);
+ PMD_DRV_LOG_LINE(INFO, "Mult-Host system Parent PCI-ID: 0x%x", resp->pci_id);
}
/* check for the multi-root support */
if (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_ROOT) {
bp->flags2 |= BNXT_FLAGS2_MULTIROOT_EN;
- PMD_DRV_LOG(DEBUG, "PF enabled with multi root capability\n");
+ PMD_DRV_LOG_LINE(DEBUG, "PF enabled with multi root capability");
}
HWRM_UNLOCK();
@@ -4515,7 +4515,7 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
int rc;
if (!BNXT_PF(bp)) {
- PMD_DRV_LOG(ERR, "Attempt to allocate VFs on a VF!\n");
+ PMD_DRV_LOG_LINE(ERR, "Attempt to allocate VFs on a VF!");
return -EINVAL;
}
@@ -4584,10 +4584,10 @@ bnxt_process_vf_resc_config_new(struct bnxt *bp, int num_vfs)
sizeof(req),
BNXT_USE_CHIMP_MB);
if (rc || resp->error_code) {
- PMD_DRV_LOG(ERR,
- "Failed to initialize VF %d\n", i);
- PMD_DRV_LOG(ERR,
- "Not all VFs available. (%d, %d)\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to initialize VF %d", i);
+ PMD_DRV_LOG_LINE(ERR,
+ "Not all VFs available. (%d, %d)",
rc, resp->error_code);
HWRM_UNLOCK();
@@ -4635,10 +4635,10 @@ bnxt_process_vf_resc_config_old(struct bnxt *bp, int num_vfs)
HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
if (rc || resp->error_code) {
- PMD_DRV_LOG(ERR,
- "Failed to initialize VF %d\n", i);
- PMD_DRV_LOG(ERR,
- "Not all VFs available. (%d, %d)\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to initialize VF %d", i);
+ PMD_DRV_LOG_LINE(ERR,
+ "Not all VFs available. (%d, %d)",
rc, resp->error_code);
HWRM_UNLOCK();
@@ -4709,7 +4709,7 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
int rc;
if (!BNXT_PF(bp)) {
- PMD_DRV_LOG(ERR, "Attempt to allocate VFs on a VF!\n");
+ PMD_DRV_LOG_LINE(ERR, "Attempt to allocate VFs on a VF!");
return -EINVAL;
}
@@ -4937,8 +4937,8 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp, int num_vfs)
req.req_buf_page_addr0 =
rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf->vf_req_buf));
if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
- PMD_DRV_LOG(ERR,
- "unable to map buffer address to physical memory\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "unable to map buffer address to physical memory");
HWRM_UNLOCK();
return -ENOMEM;
}
@@ -5562,8 +5562,8 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
dma_handle = rte_malloc_virt2iova(buf);
if (dma_handle == RTE_BAD_IOVA) {
rte_free(buf);
- PMD_DRV_LOG(ERR,
- "unable to map response address to physical memory\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "unable to map response address to physical memory");
return -ENOMEM;
}
HWRM_PREP(&req, HWRM_NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
@@ -5597,8 +5597,8 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
dma_handle = rte_malloc_virt2iova(buf);
if (dma_handle == RTE_BAD_IOVA) {
rte_free(buf);
- PMD_DRV_LOG(ERR,
- "unable to map response address to physical memory\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "unable to map response address to physical memory");
return -ENOMEM;
}
HWRM_PREP(&req, HWRM_NVM_READ, BNXT_USE_CHIMP_MB);
@@ -5650,8 +5650,8 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
dma_handle = rte_malloc_virt2iova(buf);
if (dma_handle == RTE_BAD_IOVA) {
rte_free(buf);
- PMD_DRV_LOG(ERR,
- "unable to map response address to physical memory\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "unable to map response address to physical memory");
return -ENOMEM;
}
memcpy(buf, data, data_len);
@@ -5715,8 +5715,8 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
HWRM_UNLOCK();
- PMD_DRV_LOG(ERR,
- "unable to map VNIC ID table address to physical memory\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "unable to map VNIC ID table address to physical memory");
return -ENOMEM;
}
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
@@ -5846,7 +5846,7 @@ int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
}
}
/* Could not find a default VNIC. */
- PMD_DRV_LOG(ERR, "No default VNIC\n");
+ PMD_DRV_LOG_LINE(ERR, "No default VNIC");
exit:
rte_free(vnic_ids);
return rc;
@@ -6364,8 +6364,8 @@ int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
i++, p++)
ctxm->split[i] = rte_le_to_cpu_32(*p);
- PMD_DRV_LOG(DEBUG,
- "type:0x%x size:%d multiple:%d max:%d min:%d split:%d init_val:%d init_off:%d init:%d bmap:0x%x\n",
+ PMD_DRV_LOG_LINE(DEBUG,
+ "type:0x%x size:%d multiple:%d max:%d min:%d split:%d init_val:%d init_off:%d init:%d bmap:0x%x",
ctxm->type, ctxm->entry_size,
ctxm->entry_multiple, ctxm->max_entries, ctxm->min_entries,
ctxm->split_entry_cnt, init_val, init_off,
@@ -6378,7 +6378,7 @@ int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
HWRM_UNLOCK();
} while (types < bp->ctx->types && type != BNXT_CTX_INV);
ctx->ctx_arr[last_valid_idx].last = true;
- PMD_DRV_LOG(DEBUG, "Last valid type 0x%x\n", last_valid_type);
+ PMD_DRV_LOG_LINE(DEBUG, "Last valid type 0x%x", last_valid_type);
rc = bnxt_alloc_all_ctx_pg_info(bp);
if (rc == 0)
@@ -6409,11 +6409,11 @@ int bnxt_hwrm_func_backing_store_types_count(struct bnxt *bp)
HWRM_UNLOCK();
if (flags & HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_TYPE_VALID) {
- PMD_DRV_LOG(DEBUG, "Valid types 0x%x\n", req.type);
+ PMD_DRV_LOG_LINE(DEBUG, "Valid types 0x%x", req.type);
types++;
}
} while (type != HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_INVALID);
- PMD_DRV_LOG(DEBUG, "Number of valid types %d\n", types);
+ PMD_DRV_LOG_LINE(DEBUG, "Number of valid types %d", types);
return types;
}
@@ -6553,8 +6553,8 @@ int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
int b = 1;
if (!BNXT_PF(bp)) {
- PMD_DRV_LOG(INFO,
- "Backing store config V2 can be issued on PF only\n");
+ PMD_DRV_LOG_LINE(INFO,
+ "Backing store config V2 can be issued on PF only");
return 0;
}
@@ -6586,8 +6586,8 @@ int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req.page_size_pbl_level,
&req.page_dir);
- PMD_DRV_LOG(DEBUG,
- "Backing store config V2 type:0x%x last %d, instance %d, hw %d\n",
+ PMD_DRV_LOG_LINE(DEBUG,
+ "Backing store config V2 type:0x%x last %d, instance %d, hw %d",
req.type, ctxm->last, j, w);
if (ctxm->last && i == (w - 1))
req.flags =
@@ -6830,7 +6830,7 @@ int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
if (dst_fid)
*dst_fid = rte_le_to_cpu_16(resp->dest_fid);
- PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid);
+ PMD_DRV_LOG_LINE(DEBUG, "dst_fid: %x", resp->dest_fid);
HWRM_UNLOCK();
@@ -6894,7 +6894,7 @@ int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
return 0;
if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) {
- PMD_DRV_LOG(INFO, "FW reset happened while port was down\n");
+ PMD_DRV_LOG_LINE(INFO, "FW reset happened while port was down");
bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
}
@@ -7057,8 +7057,8 @@ int bnxt_hwrm_cfa_counter_qcaps(struct bnxt *bp, uint16_t *max_fc)
struct hwrm_cfa_counter_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
- PMD_DRV_LOG(DEBUG,
- "Not a PF or trusted VF. Command not supported\n");
+ PMD_DRV_LOG_LINE(DEBUG,
+ "Not a PF or trusted VF. Command not supported");
return 0;
}
@@ -7081,8 +7081,8 @@ int bnxt_hwrm_ctx_rgtr(struct bnxt *bp, rte_iova_t dma_addr, uint16_t *ctx_id)
struct hwrm_cfa_ctx_mem_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
- PMD_DRV_LOG(DEBUG,
- "Not a PF or trusted VF. Command not supported\n");
+ PMD_DRV_LOG_LINE(DEBUG,
+ "Not a PF or trusted VF. Command not supported");
return 0;
}
@@ -7097,7 +7097,7 @@ int bnxt_hwrm_ctx_rgtr(struct bnxt *bp, rte_iova_t dma_addr, uint16_t *ctx_id)
HWRM_CHECK_RESULT();
if (ctx_id) {
*ctx_id = rte_le_to_cpu_16(resp->ctx_id);
- PMD_DRV_LOG(DEBUG, "ctx_id = %d\n", *ctx_id);
+ PMD_DRV_LOG_LINE(DEBUG, "ctx_id = %d", *ctx_id);
}
HWRM_UNLOCK();
@@ -7111,8 +7111,8 @@ int bnxt_hwrm_ctx_unrgtr(struct bnxt *bp, uint16_t ctx_id)
struct hwrm_cfa_ctx_mem_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
- PMD_DRV_LOG(DEBUG,
- "Not a PF or trusted VF. Command not supported\n");
+ PMD_DRV_LOG_LINE(DEBUG,
+ "Not a PF or trusted VF. Command not supported");
return 0;
}
@@ -7138,8 +7138,8 @@ int bnxt_hwrm_cfa_counter_cfg(struct bnxt *bp, enum bnxt_flow_dir dir,
int rc;
if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
- PMD_DRV_LOG(DEBUG,
- "Not a PF or trusted VF. Command not supported\n");
+ PMD_DRV_LOG_LINE(DEBUG,
+ "Not a PF or trusted VF. Command not supported");
return 0;
}
@@ -7177,8 +7177,8 @@ int bnxt_hwrm_cfa_counter_qstats(struct bnxt *bp,
int rc = 0;
if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
- PMD_DRV_LOG(DEBUG,
- "Not a PF or trusted VF. Command not supported\n");
+ PMD_DRV_LOG_LINE(DEBUG,
+ "Not a PF or trusted VF. Command not supported");
return 0;
}
@@ -7234,8 +7234,8 @@ int bnxt_hwrm_cfa_pair_exists(struct bnxt *bp, struct bnxt_representor *rep_bp)
int rc = 0;
if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
- PMD_DRV_LOG(DEBUG,
- "Not a PF or trusted VF. Command not supported\n");
+ PMD_DRV_LOG_LINE(DEBUG,
+ "Not a PF or trusted VF. Command not supported");
return 0;
}
@@ -7262,8 +7262,8 @@ int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, struct bnxt_representor *rep_bp)
int rc;
if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
- PMD_DRV_LOG(DEBUG,
- "Not a PF or trusted VF. Command not supported\n");
+ PMD_DRV_LOG_LINE(DEBUG,
+ "Not a PF or trusted VF. Command not supported");
return 0;
}
@@ -7296,7 +7296,7 @@ int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, struct bnxt_representor *rep_bp)
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
- PMD_DRV_LOG(DEBUG, "%s %d allocated\n",
+ PMD_DRV_LOG_LINE(DEBUG, "%s %d allocated",
BNXT_REP_PF(rep_bp) ? "PFR" : "VFR", rep_bp->vf_id);
return rc;
}
@@ -7308,8 +7308,8 @@ int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, struct bnxt_representor *rep_bp)
int rc;
if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
- PMD_DRV_LOG(DEBUG,
- "Not a PF or trusted VF. Command not supported\n");
+ PMD_DRV_LOG_LINE(DEBUG,
+ "Not a PF or trusted VF. Command not supported");
return 0;
}
@@ -7323,7 +7323,7 @@ int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, struct bnxt_representor *rep_bp)
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
- PMD_DRV_LOG(DEBUG, "%s %d freed\n", BNXT_REP_PF(rep_bp) ? "PFR" : "VFR",
+ PMD_DRV_LOG_LINE(DEBUG, "%s %d freed", BNXT_REP_PF(rep_bp) ? "PFR" : "VFR",
rep_bp->vf_id);
return rc;
}
@@ -110,7 +110,7 @@ int bnxt_free_int(struct bnxt *bp)
} while (count++ < 10);
if (rc < 0) {
- PMD_DRV_LOG(ERR, "irq cb unregister failed rc: %d\n",
+ PMD_DRV_LOG_LINE(ERR, "irq cb unregister failed rc: %d",
rc);
return rc;
}
@@ -177,7 +177,7 @@ int bnxt_setup_int(struct bnxt *bp)
bp->irq_tbl[i].handler = bnxt_int_handler;
}
} else {
- PMD_DRV_LOG(ERR, "bnxt_irq_tbl setup failed\n");
+ PMD_DRV_LOG_LINE(ERR, "bnxt_irq_tbl setup failed");
return -ENOMEM;
}
@@ -159,11 +159,11 @@ bnxt_get_dflt_vnic_svif(struct bnxt *bp, struct bnxt_representor *vf_rep_bp)
&vf_rep_bp->dflt_vnic_id,
&vf_rep_bp->svif);
if (rc) {
- PMD_DRV_LOG(ERR, "Failed to get default vnic id of VF\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to get default vnic id of VF");
vf_rep_bp->dflt_vnic_id = BNXT_DFLT_VNIC_ID_INVALID;
vf_rep_bp->svif = BNXT_SVIF_INVALID;
} else {
- PMD_DRV_LOG(INFO, "vf_rep->dflt_vnic_id = %d\n",
+ PMD_DRV_LOG_LINE(INFO, "vf_rep->dflt_vnic_id = %d",
vf_rep_bp->dflt_vnic_id);
}
if (vf_rep_bp->dflt_vnic_id != BNXT_DFLT_VNIC_ID_INVALID &&
@@ -185,7 +185,7 @@ int bnxt_representor_init(struct rte_eth_dev *eth_dev, void *params)
uint16_t first_vf_id;
int rc = 0;
- PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR init\n", eth_dev->data->port_id);
+ PMD_DRV_LOG_LINE(DEBUG, "BNXT Port:%d VFR init", eth_dev->data->port_id);
vf_rep_bp->vf_id = rep_params->vf_id;
vf_rep_bp->switch_domain_id = rep_params->switch_domain_id;
vf_rep_bp->parent_dev = rep_params->parent_dev;
@@ -224,8 +224,8 @@ int bnxt_representor_init(struct rte_eth_dev *eth_dev, void *params)
bnxt_print_link_info(eth_dev);
- PMD_DRV_LOG(INFO,
- "Switch domain id %d: Representor Device %d init done\n",
+ PMD_DRV_LOG_LINE(INFO,
+ "Switch domain id %d: Representor Device %d init done",
vf_rep_bp->switch_domain_id, vf_rep_bp->vf_id);
if (BNXT_REP_BASED_PF(vf_rep_bp)) {
@@ -239,12 +239,12 @@ int bnxt_representor_init(struct rte_eth_dev *eth_dev, void *params)
if (rc)
return rc;
if (first_vf_id == 0xffff) {
- PMD_DRV_LOG(ERR,
- "Invalid first_vf_id fid:%x\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Invalid first_vf_id fid:%x",
vf_rep_bp->fw_fid);
return -EINVAL;
}
- PMD_DRV_LOG(INFO, "first_vf_id = %x parent_fid:%x\n",
+ PMD_DRV_LOG_LINE(INFO, "first_vf_id = %x parent_fid:%x",
first_vf_id, vf_rep_bp->fw_fid);
vf_rep_bp->fw_fid = rep_params->vf_id + first_vf_id;
}
@@ -256,7 +256,7 @@ int bnxt_representor_init(struct rte_eth_dev *eth_dev, void *params)
vf_rep_bp->parent_pf_idx = parent_bp->fw_fid - 1;
}
- PMD_DRV_LOG(INFO, "vf_rep->fw_fid = %d\n", vf_rep_bp->fw_fid);
+ PMD_DRV_LOG_LINE(INFO, "vf_rep->fw_fid = %d", vf_rep_bp->fw_fid);
return 0;
}
@@ -271,11 +271,11 @@ int bnxt_representor_uninit(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR uninit\n", eth_dev->data->port_id);
+ PMD_DRV_LOG_LINE(DEBUG, "BNXT Port:%d VFR uninit", eth_dev->data->port_id);
eth_dev->data->mac_addrs = NULL;
if (!bnxt_rep_check_parent(rep)) {
- PMD_DRV_LOG(DEBUG, "BNXT Port:%d already freed\n",
+ PMD_DRV_LOG_LINE(DEBUG, "BNXT Port:%d already freed",
eth_dev->data->port_id);
return 0;
}
@@ -370,15 +370,15 @@ static int bnxt_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
struct bnxt *parent_bp;
if (!vfr || !vfr->parent_dev) {
- PMD_DRV_LOG(ERR,
- "No memory allocated for representor\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "No memory allocated for representor");
return -ENOMEM;
}
parent_bp = vfr->parent_dev->data->dev_private;
if (parent_bp && !parent_bp->ulp_ctx) {
- PMD_DRV_LOG(ERR,
- "ulp context not allocated for parent\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "ulp context not allocated for parent");
return -EIO;
}
@@ -393,11 +393,11 @@ static int bnxt_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
*/
rc = bnxt_tf_vfr_alloc(vfr_ethdev);
if (!rc)
- PMD_DRV_LOG(DEBUG, "allocated representor %d in FW\n",
+ PMD_DRV_LOG_LINE(DEBUG, "allocated representor %d in FW",
vfr->vf_id);
else
- PMD_DRV_LOG(ERR,
- "Failed to alloc representor %d in FW\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to alloc representor %d in FW",
vfr->vf_id);
return rc;
@@ -480,14 +480,14 @@ static int bnxt_vfr_free(struct bnxt_representor *vfr)
struct bnxt *parent_bp;
if (!vfr || !vfr->parent_dev) {
- PMD_DRV_LOG(ERR,
- "No memory allocated for representor\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "No memory allocated for representor");
return -ENOMEM;
}
parent_bp = vfr->parent_dev->data->dev_private;
if (!parent_bp) {
- PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR already freed\n",
+ PMD_DRV_LOG_LINE(DEBUG, "BNXT Port:%d VFR already freed",
vfr->dpdk_port_id);
return 0;
}
@@ -498,12 +498,12 @@ static int bnxt_vfr_free(struct bnxt_representor *vfr)
rc = bnxt_tf_vfr_free(vfr);
if (rc) {
- PMD_DRV_LOG(ERR,
- "Failed to free representor %d in FW\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to free representor %d in FW",
vfr->vf_id);
}
- PMD_DRV_LOG(DEBUG, "freed representor %d in FW\n",
+ PMD_DRV_LOG_LINE(DEBUG, "freed representor %d in FW",
vfr->vf_id);
vfr->vfr_tx_cfa_action = 0;
@@ -549,11 +549,11 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
/* MAC Specifics */
if (!bnxt_rep_check_parent(rep_bp)) {
/* Need not be an error scenario, if parent is closed first */
- PMD_DRV_LOG(INFO, "Rep parent port does not exist.\n");
+ PMD_DRV_LOG_LINE(INFO, "Rep parent port does not exist.");
return rc;
}
parent_bp = rep_bp->parent_dev->data->dev_private;
- PMD_DRV_LOG(DEBUG, "Representor dev_info_get_op\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Representor dev_info_get_op");
dev_info->max_mac_addrs = parent_bp->max_l2_ctx;
dev_info->max_hash_mac_addrs = 0;
@@ -592,7 +592,7 @@ int bnxt_rep_dev_configure_op(struct rte_eth_dev *eth_dev)
{
struct bnxt_representor *rep_bp = eth_dev->data->dev_private;
- PMD_DRV_LOG(DEBUG, "Representor dev_configure_op\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Representor dev_configure_op");
rep_bp->rx_queues = (void *)eth_dev->data->rx_queues;
rep_bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
rep_bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
@@ -640,30 +640,30 @@ int bnxt_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
int rc = 0;
if (queue_idx >= rep_bp->rx_nr_rings) {
- PMD_DRV_LOG(ERR,
- "Cannot create Rx ring %d. %d rings available\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Cannot create Rx ring %d. %d rings available",
queue_idx, rep_bp->rx_nr_rings);
return -EINVAL;
}
if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
- PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
+ PMD_DRV_LOG_LINE(ERR, "nb_desc %d is invalid", nb_desc);
return -EINVAL;
}
if (!parent_bp->rx_queues) {
- PMD_DRV_LOG(ERR, "Parent Rx qs not configured yet\n");
+ PMD_DRV_LOG_LINE(ERR, "Parent Rx qs not configured yet");
return -EINVAL;
}
parent_rxq = parent_bp->rx_queues[queue_idx];
if (!parent_rxq) {
- PMD_DRV_LOG(ERR, "Parent RxQ has not been configured yet\n");
+ PMD_DRV_LOG_LINE(ERR, "Parent RxQ has not been configured yet");
return -EINVAL;
}
if (nb_desc != parent_rxq->nb_rx_desc) {
- PMD_DRV_LOG(ERR, "nb_desc %d do not match parent rxq", nb_desc);
+ PMD_DRV_LOG_LINE(ERR, "nb_desc %d do not match parent rxq", nb_desc);
return -EINVAL;
}
@@ -677,7 +677,7 @@ int bnxt_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
sizeof(struct bnxt_rx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (!rxq) {
- PMD_DRV_LOG(ERR, "bnxt_vfr_rx_queue allocation failed!\n");
+ PMD_DRV_LOG_LINE(ERR, "bnxt_vfr_rx_queue allocation failed!");
return -ENOMEM;
}
@@ -694,7 +694,7 @@ int bnxt_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rxq->rx_ring->rx_ring_struct->ring_size,
RTE_CACHE_LINE_SIZE, socket_id);
if (!buf_ring) {
- PMD_DRV_LOG(ERR, "bnxt_rx_vfr_buf_ring allocation failed!\n");
+ PMD_DRV_LOG_LINE(ERR, "bnxt_rx_vfr_buf_ring allocation failed!");
rc = -ENOMEM;
goto out;
}
@@ -740,30 +740,30 @@ int bnxt_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
struct bnxt_vf_rep_tx_queue *vfr_txq;
if (queue_idx >= rep_bp->tx_nr_rings) {
- PMD_DRV_LOG(ERR,
- "Cannot create Tx rings %d. %d rings available\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Cannot create Tx rings %d. %d rings available",
queue_idx, rep_bp->tx_nr_rings);
return -EINVAL;
}
if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) {
- PMD_DRV_LOG(ERR, "nb_desc %d is invalid", nb_desc);
+ PMD_DRV_LOG_LINE(ERR, "nb_desc %d is invalid", nb_desc);
return -EINVAL;
}
if (!parent_bp->tx_queues) {
- PMD_DRV_LOG(ERR, "Parent Tx qs not configured yet\n");
+ PMD_DRV_LOG_LINE(ERR, "Parent Tx qs not configured yet");
return -EINVAL;
}
parent_txq = parent_bp->tx_queues[queue_idx];
if (!parent_txq) {
- PMD_DRV_LOG(ERR, "Parent TxQ has not been configured yet\n");
+ PMD_DRV_LOG_LINE(ERR, "Parent TxQ has not been configured yet");
return -EINVAL;
}
if (nb_desc != parent_txq->nb_tx_desc) {
- PMD_DRV_LOG(ERR, "nb_desc %d do not match parent txq", nb_desc);
+ PMD_DRV_LOG_LINE(ERR, "nb_desc %d do not match parent txq", nb_desc);
return -EINVAL;
}
@@ -777,14 +777,14 @@ int bnxt_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
sizeof(struct bnxt_vf_rep_tx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (!vfr_txq) {
- PMD_DRV_LOG(ERR, "bnxt_vfr_tx_queue allocation failed!");
+ PMD_DRV_LOG_LINE(ERR, "bnxt_vfr_tx_queue allocation failed!");
return -ENOMEM;
}
txq = rte_zmalloc_socket("bnxt_tx_queue",
sizeof(struct bnxt_tx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (!txq) {
- PMD_DRV_LOG(ERR, "bnxt_tx_queue allocation failed!");
+ PMD_DRV_LOG_LINE(ERR, "bnxt_tx_queue allocation failed!");
rte_free(vfr_txq);
return -ENOMEM;
}
@@ -50,7 +50,7 @@ static void bnxt_init_ring_grps(struct bnxt *bp)
int bnxt_alloc_ring_grps(struct bnxt *bp)
{
if (bp->max_tx_rings == 0) {
- PMD_DRV_LOG(ERR, "No TX rings available!\n");
+ PMD_DRV_LOG_LINE(ERR, "No TX rings available!");
return -EBUSY;
}
@@ -61,7 +61,7 @@ int bnxt_alloc_ring_grps(struct bnxt *bp)
bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_P5;
} else if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
/* 1 ring is for default completion ring */
- PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n");
+ PMD_DRV_LOG_LINE(ERR, "Insufficient resource: Ring Group");
return -ENOSPC;
}
@@ -70,8 +70,8 @@ int bnxt_alloc_ring_grps(struct bnxt *bp)
sizeof(*bp->grp_info) *
bp->max_ring_grps, 0);
if (!bp->grp_info) {
- PMD_DRV_LOG(ERR,
- "Failed to alloc grp info tbl.\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to alloc grp info tbl.");
return -ENOMEM;
}
bnxt_init_ring_grps(bp);
@@ -416,7 +416,7 @@ static int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index,
if (nqr) {
nq_ring_id = nqr->cp_ring_struct->fw_ring_id;
} else {
- PMD_DRV_LOG(ERR, "NQ ring is NULL\n");
+ PMD_DRV_LOG_LINE(ERR, "NQ ring is NULL");
return -EINVAL;
}
}
@@ -657,8 +657,8 @@ int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
if (rxq->rx_started) {
if (bnxt_init_one_rx_ring(rxq)) {
- PMD_DRV_LOG(ERR,
- "ring%d bnxt_init_one_rx_ring failed!\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "ring%d bnxt_init_one_rx_ring failed!",
queue_index);
rc = -ENOMEM;
goto err_out;
@@ -675,8 +675,8 @@ int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
return 0;
err_out:
- PMD_DRV_LOG(ERR,
- "Failed to allocate receive queue %d, rc %d.\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to allocate receive queue %d, rc %d.",
queue_index, rc);
return rc;
}
@@ -94,8 +94,8 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
RTE_MIN(bp->max_l2_ctx,
RTE_MIN(bp->max_rsscos_ctx,
RTE_ETH_64_POOLS)));
- PMD_DRV_LOG(DEBUG,
- "pools = %u max_pools = %u\n",
+ PMD_DRV_LOG_LINE(DEBUG,
+ "pools = %u max_pools = %u",
pools, max_pools);
if (pools > max_pools)
pools = max_pools;
@@ -104,7 +104,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : 1;
break;
default:
- PMD_DRV_LOG(ERR, "Unsupported mq_mod %d\n",
+ PMD_DRV_LOG_LINE(ERR, "Unsupported mq_mod %d",
dev_conf->rxmode.mq_mode);
rc = -EINVAL;
goto err_out;
@@ -115,7 +115,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
pools = RTE_MIN(pools, bp->rx_cp_nr_rings);
nb_q_per_grp = bp->rx_cp_nr_rings / pools;
- PMD_DRV_LOG(DEBUG, "pools = %u nb_q_per_grp = %u\n",
+ PMD_DRV_LOG_LINE(DEBUG, "pools = %u nb_q_per_grp = %u",
pools, nb_q_per_grp);
start_grp_id = 0;
end_grp_id = nb_q_per_grp;
@@ -123,7 +123,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
for (i = 0; i < pools; i++) {
vnic = &bp->vnic_info[i];
if (!vnic) {
- PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
+ PMD_DRV_LOG_LINE(ERR, "VNIC alloc failed");
rc = -ENOMEM;
goto err_out;
}
@@ -133,8 +133,8 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
for (j = 0; j < nb_q_per_grp; j++, ring_idx++) {
rxq = bp->eth_dev->data->rx_queues[ring_idx];
rxq->vnic = vnic;
- PMD_DRV_LOG(DEBUG,
- "rxq[%d] = %p vnic[%d] = %p\n",
+ PMD_DRV_LOG_LINE(DEBUG,
+ "rxq[%d] = %p vnic[%d] = %p",
ring_idx, rxq, i, vnic);
}
if (i == 0) {
@@ -155,7 +155,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
}
filter = bnxt_alloc_filter(bp);
if (!filter) {
- PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
+ PMD_DRV_LOG_LINE(ERR, "L2 filter alloc failed");
rc = -ENOMEM;
goto err_out;
}
@@ -332,14 +332,14 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
return rc;
if (queue_idx >= bnxt_max_rings(bp)) {
- PMD_DRV_LOG(ERR,
- "Cannot create Rx ring %d. Only %d rings available\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Cannot create Rx ring %d. Only %d rings available",
queue_idx, bp->max_rx_rings);
return -EINVAL;
}
if (nb_desc < BNXT_MIN_RING_DESC || nb_desc > MAX_RX_DESC_CNT) {
- PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
+ PMD_DRV_LOG_LINE(ERR, "nb_desc %d is invalid", nb_desc);
return -EINVAL;
}
@@ -351,7 +351,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (!rxq) {
- PMD_DRV_LOG(ERR, "bnxt_rx_queue allocation failed!\n");
+ PMD_DRV_LOG_LINE(ERR, "bnxt_rx_queue allocation failed!");
return -ENOMEM;
}
rxq->bp = bp;
@@ -360,22 +360,22 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rxq->rx_free_thresh =
RTE_MIN(rte_align32pow2(nb_desc) / 4, RTE_BNXT_MAX_RX_BURST);
- PMD_DRV_LOG(DEBUG,
- "App supplied RXQ drop_en status : %d\n", rx_conf->rx_drop_en);
+ PMD_DRV_LOG_LINE(DEBUG,
+ "App supplied RXQ drop_en status : %d", rx_conf->rx_drop_en);
rxq->drop_en = BNXT_DEFAULT_RX_DROP_EN;
- PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);
+ PMD_DRV_LOG_LINE(DEBUG, "RX Buf MTU %d", eth_dev->data->mtu);
eth_dev->data->rx_queues[queue_idx] = rxq;
rc = bnxt_init_rx_ring_struct(rxq, socket_id);
if (rc) {
- PMD_DRV_LOG(ERR,
- "init_rx_ring_struct failed!\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "init_rx_ring_struct failed!");
goto err;
}
- PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_size);
+ PMD_DRV_LOG_LINE(DEBUG, "RX Buf size is %d", rxq->rx_buf_size);
rxq->queue_id = queue_idx;
rxq->port_id = eth_dev->data->port_id;
if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
@@ -387,8 +387,8 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rc = bnxt_alloc_rings(bp, socket_id, queue_idx, NULL, rxq, rxq->cp_ring,
NULL, "rxr");
if (rc) {
- PMD_DRV_LOG(ERR,
- "ring_dma_zone_reserve for rx_ring failed!\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "ring_dma_zone_reserve for rx_ring failed!");
goto err;
}
rxq->rx_mbuf_alloc_fail = 0;
@@ -469,13 +469,13 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return rc;
if (rxq == NULL) {
- PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
+ PMD_DRV_LOG_LINE(ERR, "Invalid Rx queue %d", rx_queue_id);
return -EINVAL;
}
vnic = bnxt_vnic_queue_id_get_next(bp, rx_queue_id, &vnic_idx);
if (vnic == NULL) {
- PMD_DRV_LOG(ERR, "VNIC not initialized for RxQ %d\n",
+ PMD_DRV_LOG_LINE(ERR, "VNIC not initialized for RxQ %d",
rx_queue_id);
return -EINVAL;
}
@@ -511,23 +511,23 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
/* Reconfigure default receive ring and MRU. */
bnxt_hwrm_vnic_cfg(bp, vnic);
- PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
+ PMD_DRV_LOG_LINE(INFO, "Rx queue started %d", rx_queue_id);
if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
if (BNXT_HAS_RING_GRPS(bp)) {
if (vnic->fw_grp_ids[rx_queue_id] !=
INVALID_HW_RING_ID) {
- PMD_DRV_LOG(ERR, "invalid ring id %d\n",
+ PMD_DRV_LOG_LINE(ERR, "invalid ring id %d",
rx_queue_id);
return 0;
}
vnic->fw_grp_ids[rx_queue_id] = fw_grp_id;
- PMD_DRV_LOG(DEBUG, "vnic = %p fw_grp_id = %d\n",
+ PMD_DRV_LOG_LINE(DEBUG, "vnic = %p fw_grp_id = %d",
vnic, fw_grp_id);
}
- PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n",
+ PMD_DRV_LOG_LINE(DEBUG, "Rx Queue Count %d",
vnic->rx_queue_cnt);
rc += bnxt_vnic_rss_queue_status_update(bp, vnic);
}
@@ -541,8 +541,8 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
rxq->rx_started = false;
}
- PMD_DRV_LOG(INFO,
- "queue %d, rx_deferred_start %d, state %d!\n",
+ PMD_DRV_LOG_LINE(INFO,
+ "queue %d, rx_deferred_start %d, state %d!",
rx_queue_id, rxq->rx_deferred_start,
bp->eth_dev->data->rx_queue_state[rx_queue_id]);
@@ -568,25 +568,25 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
* Default CQ for async notifications
*/
if (!BNXT_NUM_ASYNC_CPR(bp) && !rx_queue_id) {
- PMD_DRV_LOG(ERR, "Cannot stop Rx queue id %d\n", rx_queue_id);
+ PMD_DRV_LOG_LINE(ERR, "Cannot stop Rx queue id %d", rx_queue_id);
return -EINVAL;
}
rxq = bp->rx_queues[rx_queue_id];
if (!rxq) {
- PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
+ PMD_DRV_LOG_LINE(ERR, "Invalid Rx queue %d", rx_queue_id);
return -EINVAL;
}
vnic = bnxt_vnic_queue_id_get_next(bp, q_id, &vnic_idx);
if (!vnic) {
- PMD_DRV_LOG(ERR, "VNIC not initialized for RxQ %d\n", q_id);
+ PMD_DRV_LOG_LINE(ERR, "VNIC not initialized for RxQ %d", q_id);
return -EINVAL;
}
dev->data->rx_queue_state[q_id] = RTE_ETH_QUEUE_STATE_STOPPED;
rxq->rx_started = false;
- PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Rx queue stopped");
do {
active_queue_cnt = 0;
@@ -594,7 +594,7 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (BNXT_HAS_RING_GRPS(bp))
vnic->fw_grp_ids[q_id] = INVALID_HW_RING_ID;
- PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n",
+ PMD_DRV_LOG_LINE(DEBUG, "Rx Queue Count %d",
vnic->rx_queue_cnt);
rc = bnxt_vnic_rss_queue_status_update(bp, vnic);
}
@@ -76,12 +76,12 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
rxbd = &rxr->ag_desc_ring[prod];
rx_buf = &rxr->ag_buf_ring[prod];
if (rxbd == NULL) {
- PMD_DRV_LOG(ERR, "Jumbo Frame. rxbd is NULL\n");
+ PMD_DRV_LOG_LINE(ERR, "Jumbo Frame. rxbd is NULL");
return -EINVAL;
}
if (rx_buf == NULL) {
- PMD_DRV_LOG(ERR, "Jumbo Frame. rx_buf is NULL\n");
+ PMD_DRV_LOG_LINE(ERR, "Jumbo Frame. rx_buf is NULL");
return -EINVAL;
}
@@ -159,7 +159,7 @@ static void bnxt_rx_ring_reset(void *arg)
rc = bnxt_hwrm_rx_ring_reset(bp, i);
if (rc) {
- PMD_DRV_LOG(ERR, "Rx ring%d reset failed\n", i);
+ PMD_DRV_LOG_LINE(ERR, "Rx ring%d reset failed", i);
continue;
}
@@ -247,7 +247,7 @@ static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
data_cons = tpa_start->opaque;
tpa_info = &rxr->tpa_info[agg_id];
if (unlikely(data_cons != rxr->rx_next_cons)) {
- PMD_DRV_LOG(ERR, "TPA cons %x, expected cons %x\n",
+ PMD_DRV_LOG_LINE(ERR, "TPA cons %x, expected cons %x",
data_cons, rxr->rx_next_cons);
bnxt_sched_ring_reset(rxq);
return;
@@ -318,7 +318,7 @@ static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
/* TODO batch allocation for better performance */
while (rte_bitmap_get(rxr->ag_bitmap, bmap_next)) {
if (unlikely(bnxt_alloc_ag_data(rxq, rxr, raw_next))) {
- PMD_DRV_LOG(ERR, "agg mbuf alloc failed: prod=0x%x\n",
+ PMD_DRV_LOG_LINE(ERR, "agg mbuf alloc failed: prod=0x%x",
raw_next);
break;
}
@@ -432,7 +432,7 @@ static inline struct rte_mbuf *bnxt_tpa_end(
struct bnxt_tpa_info *tpa_info;
if (unlikely(rxq->in_reset)) {
- PMD_DRV_LOG(ERR, "rxq->in_reset: raw_cp_cons:%d\n",
+ PMD_DRV_LOG_LINE(ERR, "rxq->in_reset: raw_cp_cons:%d",
*raw_cp_cons);
bnxt_discard_rx(rxq->bp, cpr, raw_cp_cons, tpa_end);
return NULL;
@@ -1093,7 +1093,7 @@ static int bnxt_crx_pkt(struct rte_mbuf **rx_pkt,
bnxt_set_vlan_crx(rxcmp, mbuf);
if (bnxt_alloc_rx_data(rxq, rxr, raw_prod)) {
- PMD_DRV_LOG(ERR, "mbuf alloc failed with prod=0x%x\n",
+ PMD_DRV_LOG_LINE(ERR, "mbuf alloc failed with prod=0x%x",
raw_prod);
rc = -ENOMEM;
goto rx;
@@ -1186,7 +1186,7 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
cons = rxcmp->opaque;
if (unlikely(cons != rxr->rx_next_cons)) {
bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
- PMD_DRV_LOG(ERR, "RX cons %x != expected cons %x\n",
+ PMD_DRV_LOG_LINE(ERR, "RX cons %x != expected cons %x",
cons, rxr->rx_next_cons);
bnxt_sched_ring_reset(rxq);
rc = -EBUSY;
@@ -1272,7 +1272,7 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
*/
raw_prod = RING_NEXT(raw_prod);
if (bnxt_alloc_rx_data(rxq, rxr, raw_prod)) {
- PMD_DRV_LOG(ERR, "mbuf alloc failed with prod=0x%x\n",
+ PMD_DRV_LOG_LINE(ERR, "mbuf alloc failed with prod=0x%x",
raw_prod);
rc = -ENOMEM;
goto rx;
@@ -1398,7 +1398,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
cpr->cp_ring_struct->ring_size))
break;
if (CMP_TYPE(rxcmp) == CMPL_BASE_TYPE_HWRM_DONE) {
- PMD_DRV_LOG(ERR, "Rx flush done\n");
+ PMD_DRV_LOG_LINE(ERR, "Rx flush done");
} else if (CMP_TYPE(rxcmp) == CMPL_BASE_TYPE_RX_L2_COMPRESS) {
rc = bnxt_crx_pkt(&rx_pkts[nb_rx_pkts], rxq,
(struct rx_pkt_compress_cmpl *)rxcmp,
@@ -1631,8 +1631,8 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
for (i = 0; i < ring->ring_size; i++) {
if (unlikely(!rxr->rx_buf_ring[i])) {
if (bnxt_alloc_rx_data(rxq, rxr, raw_prod) != 0) {
- PMD_DRV_LOG(WARNING,
- "RxQ %d allocated %d of %d mbufs\n",
+ PMD_DRV_LOG_LINE(WARNING,
+ "RxQ %d allocated %d of %d mbufs",
rxq->queue_id, i, ring->ring_size);
return -ENOMEM;
}
@@ -1661,8 +1661,8 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
for (i = 0; i < ring->ring_size; i++) {
if (unlikely(!rxr->ag_buf_ring[i])) {
if (bnxt_alloc_ag_data(rxq, rxr, raw_prod) != 0) {
- PMD_DRV_LOG(WARNING,
- "RxQ %d allocated %d of %d mbufs\n",
+ PMD_DRV_LOG_LINE(WARNING,
+ "RxQ %d allocated %d of %d mbufs",
rxq->queue_id, i, ring->ring_size);
return -ENOMEM;
}
@@ -1670,7 +1670,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
rxr->ag_raw_prod = raw_prod;
raw_prod = RING_NEXT(raw_prod);
}
- PMD_DRV_LOG(DEBUG, "AGG Done!\n");
+ PMD_DRV_LOG_LINE(DEBUG, "AGG Done!");
if (rxr->tpa_info) {
unsigned int max_aggs = BNXT_TPA_MAX_AGGS(rxq->bp);
@@ -1687,7 +1687,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
}
}
}
- PMD_DRV_LOG(DEBUG, "TPA alloc Done!\n");
+ PMD_DRV_LOG_LINE(DEBUG, "TPA alloc Done!");
return 0;
}
@@ -869,7 +869,7 @@ bnxt_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Tx queue was stopped; wait for it to be restarted */
if (unlikely(!txq->tx_started)) {
- PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Tx q stopped;return");
return 0;
}
@@ -432,7 +432,7 @@ bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Tx queue was stopped; wait for it to be restarted */
if (unlikely(!txq->tx_started)) {
- PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Tx q stopped;return");
return 0;
}
@@ -679,7 +679,7 @@ bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Tx queue was stopped; wait for it to be restarted */
if (unlikely(!txq->tx_started)) {
- PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Tx q stopped;return");
return 0;
}
@@ -781,7 +781,7 @@ int bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
return ret;
if (!eth_dev->data->dev_started) {
- PMD_DRV_LOG(ERR, "Device Initialization not complete!\n");
+ PMD_DRV_LOG_LINE(ERR, "Device Initialization not complete!");
return -EINVAL;
}
@@ -1180,13 +1180,13 @@ int bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev)
if (BNXT_VF(bp) || !BNXT_SINGLE_PF(bp) ||
!(bp->flags & BNXT_FLAG_PORT_STATS)) {
- PMD_DRV_LOG(ERR, "Operation not supported\n");
+ PMD_DRV_LOG_LINE(ERR, "Operation not supported");
return -ENOTSUP;
}
ret = bnxt_hwrm_port_clr_stats(bp);
if (ret != 0)
- PMD_DRV_LOG(ERR, "Failed to reset xstats: %s\n",
+ PMD_DRV_LOG_LINE(ERR, "Failed to reset xstats: %s",
strerror(-ret));
bnxt_clear_prev_stat(bp);
@@ -135,14 +135,14 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
return rc;
if (queue_idx >= bnxt_max_rings(bp)) {
- PMD_DRV_LOG(ERR,
- "Cannot create Tx ring %d. Only %d rings available\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Cannot create Tx ring %d. Only %d rings available",
queue_idx, bp->max_tx_rings);
return -EINVAL;
}
if (nb_desc < BNXT_MIN_RING_DESC || nb_desc > MAX_TX_DESC_CNT) {
- PMD_DRV_LOG(ERR, "nb_desc %d is invalid", nb_desc);
+ PMD_DRV_LOG_LINE(ERR, "nb_desc %d is invalid", nb_desc);
return -EINVAL;
}
@@ -154,7 +154,7 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
txq = rte_zmalloc_socket("bnxt_tx_queue", sizeof(struct bnxt_tx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (!txq) {
- PMD_DRV_LOG(ERR, "bnxt_tx_queue allocation failed!");
+ PMD_DRV_LOG_LINE(ERR, "bnxt_tx_queue allocation failed!");
return -ENOMEM;
}
@@ -165,7 +165,7 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
sizeof(struct rte_mbuf *) * nb_desc,
RTE_CACHE_LINE_SIZE, socket_id);
if (!txq->free) {
- PMD_DRV_LOG(ERR, "allocation of tx mbuf free array failed!");
+ PMD_DRV_LOG_LINE(ERR, "allocation of tx mbuf free array failed!");
rc = -ENOMEM;
goto err;
}
@@ -187,20 +187,20 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
/* Allocate TX ring hardware descriptors */
if (bnxt_alloc_rings(bp, socket_id, queue_idx, txq, NULL, txq->cp_ring,
NULL, "txr")) {
- PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!");
+ PMD_DRV_LOG_LINE(ERR, "ring_dma_zone_reserve for tx_ring failed!");
rc = -ENOMEM;
goto err;
}
if (bnxt_init_one_tx_ring(txq)) {
- PMD_DRV_LOG(ERR, "bnxt_init_one_tx_ring failed!");
+ PMD_DRV_LOG_LINE(ERR, "bnxt_init_one_tx_ring failed!");
rc = -ENOMEM;
goto err;
}
rc = pthread_mutex_init(&txq->txq_lock, NULL);
if (rc != 0) {
- PMD_DRV_LOG(ERR, "TxQ mutex init failed!");
+ PMD_DRV_LOG_LINE(ERR, "TxQ mutex init failed!");
goto err;
}
return 0;
@@ -140,7 +140,7 @@ bnxt_zero_data_len_tso_segsz(struct rte_mbuf *tx_pkt, uint8_t data_len_chk)
}
if (len_to_check == 0) {
- PMD_DRV_LOG(ERR, "Error! Tx pkt %s == 0\n", type_str);
+ PMD_DRV_LOG_LINE(ERR, "Error! Tx pkt %s == 0", type_str);
rte_pktmbuf_dump(stdout, tx_pkt, 64);
rte_dump_stack();
return true;
@@ -226,8 +226,8 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
/* Check if number of Tx descriptors is above HW limit */
if (unlikely(nr_bds > BNXT_MAX_TSO_SEGS)) {
- PMD_DRV_LOG(ERR,
- "Num descriptors %d exceeds HW limit\n", nr_bds);
+ PMD_DRV_LOG_LINE(ERR,
+ "Num descriptors %d exceeds HW limit", nr_bds);
return -ENOSPC;
}
@@ -237,8 +237,8 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
char *seg = rte_pktmbuf_append(tx_pkt, pad);
if (!seg) {
- PMD_DRV_LOG(ERR,
- "Failed to pad mbuf by %d bytes\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to pad mbuf by %d bytes",
pad);
return -ENOMEM;
}
@@ -593,7 +593,7 @@ uint16_t _bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Tx queue was stopped; wait for it to be restarted */
if (unlikely(!txq->tx_started)) {
- PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Tx q stopped;return");
return 0;
}
@@ -639,7 +639,7 @@ int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
txq->tx_started = true;
- PMD_DRV_LOG(DEBUG, "Tx queue started\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Tx queue started");
return 0;
}
@@ -659,7 +659,7 @@ int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
txq->tx_started = false;
- PMD_DRV_LOG(DEBUG, "Tx queue stopped\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Tx queue stopped");
return 0;
}
@@ -86,7 +86,7 @@ struct bnxt_vnic_info *bnxt_alloc_vnic(struct bnxt *bp)
/* Find the 1st unused vnic from the free_vnic_list pool*/
vnic = STAILQ_FIRST(&bp->free_vnic_list);
if (!vnic) {
- PMD_DRV_LOG(ERR, "No more free VNIC resources\n");
+ PMD_DRV_LOG_LINE(ERR, "No more free VNIC resources");
return NULL;
}
STAILQ_REMOVE_HEAD(&bp->free_vnic_list, next);
@@ -164,8 +164,8 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp, bool reconfig)
RTE_MEMZONE_SIZE_HINT_ONLY,
BNXT_PAGE_SIZE);
if (mz == NULL) {
- PMD_DRV_LOG(ERR,
- "Cannot allocate vnic_attributes memory\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Cannot allocate vnic_attributes memory");
return -ENOMEM;
}
}
@@ -207,7 +207,7 @@ void bnxt_free_vnic_mem(struct bnxt *bp)
for (i = 0; i < max_vnics; i++) {
vnic = &bp->vnic_info[i];
if (vnic->fw_vnic_id != (uint16_t)HWRM_NA_SIGNATURE) {
- PMD_DRV_LOG(ERR, "VNIC is not freed yet!\n");
+ PMD_DRV_LOG_LINE(ERR, "VNIC is not freed yet!");
/* TODO Call HWRM to free VNIC */
}
}
@@ -226,7 +226,7 @@ int bnxt_alloc_vnic_mem(struct bnxt *bp)
vnic_mem = rte_zmalloc("bnxt_vnic_info",
max_vnics * sizeof(struct bnxt_vnic_info), 0);
if (vnic_mem == NULL) {
- PMD_DRV_LOG(ERR, "Failed to alloc memory for %d VNICs",
+ PMD_DRV_LOG_LINE(ERR, "Failed to alloc memory for %d VNICs",
max_vnics);
return -ENOMEM;
}
@@ -242,8 +242,8 @@ int bnxt_vnic_grp_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
vnic->fw_grp_ids = rte_zmalloc("vnic_fw_grp_ids", size, 0);
if (!vnic->fw_grp_ids) {
- PMD_DRV_LOG(ERR,
- "Failed to alloc %d bytes for group ids\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to alloc %d bytes for group ids",
size);
return -ENOMEM;
}
@@ -311,8 +311,8 @@ int bnxt_rte_to_hwrm_hash_level(struct bnxt *bp, uint64_t hash_f, uint32_t lvl)
if ((BNXT_CHIP_P5(bp) && BNXT_VNIC_OUTER_RSS_UNSUPPORTED(bp)) ||
(!BNXT_CHIP_P5(bp) && !(bp->vnic_cap_flags & BNXT_VNIC_CAP_OUTER_RSS))) {
if (lvl)
- PMD_DRV_LOG(INFO,
- "Given RSS level is unsupported, using default RSS level\n");
+ PMD_DRV_LOG_LINE(INFO,
+ "Given RSS level is unsupported, using default RSS level");
return mode;
}
@@ -448,7 +448,7 @@ bnxt_vnic_queue_delete(struct bnxt *bp, uint16_t vnic_idx)
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_idx];
if (bnxt_hwrm_vnic_free(bp, vnic))
- PMD_DRV_LOG(ERR, "Failed to delete queue\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to delete queue");
if (vnic->fw_grp_ids) {
rte_free(vnic->fw_grp_ids);
@@ -476,7 +476,7 @@ bnxt_vnic_queue_create(struct bnxt *bp, int32_t vnic_id, uint16_t q_index)
vnic = &bp->vnic_info[vnic_id];
if (vnic->rx_queue_cnt) {
- PMD_DRV_LOG(ERR, "invalid queue configuration %d\n", vnic_id);
+ PMD_DRV_LOG_LINE(ERR, "invalid queue configuration %d", vnic_id);
return NULL;
}
@@ -498,7 +498,7 @@ bnxt_vnic_queue_create(struct bnxt *bp, int32_t vnic_id, uint16_t q_index)
/* Allocate vnic group for p4 platform */
rc = bnxt_vnic_grp_alloc(bp, vnic);
if (rc) {
- PMD_DRV_LOG(DEBUG, "Failed to allocate vnic groups\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Failed to allocate vnic groups");
goto cleanup;
}
@@ -508,7 +508,7 @@ bnxt_vnic_queue_create(struct bnxt *bp, int32_t vnic_id, uint16_t q_index)
rc = bnxt_hwrm_vnic_alloc(bp, vnic);
if (rc) {
- PMD_DRV_LOG(DEBUG, "Failed to allocate vnic %d\n", q_index);
+ PMD_DRV_LOG_LINE(DEBUG, "Failed to allocate vnic %d", q_index);
goto cleanup;
}
@@ -523,7 +523,7 @@ bnxt_vnic_queue_create(struct bnxt *bp, int32_t vnic_id, uint16_t q_index)
vnic->mru = saved_mru;
if (rc) {
- PMD_DRV_LOG(DEBUG, "Failed to configure vnic %d\n", q_index);
+ PMD_DRV_LOG_LINE(DEBUG, "Failed to configure vnic %d", q_index);
goto cleanup;
}
@@ -531,11 +531,11 @@ bnxt_vnic_queue_create(struct bnxt *bp, int32_t vnic_id, uint16_t q_index)
(rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ?
true : false);
if (rc)
- PMD_DRV_LOG(DEBUG, "Failed to configure TPA on this vnic %d\n", q_index);
+ PMD_DRV_LOG_LINE(DEBUG, "Failed to configure TPA on this vnic %d", q_index);
rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
if (rc) {
- PMD_DRV_LOG(DEBUG, "Failed to configure vnic plcmode %d\n",
+ PMD_DRV_LOG_LINE(DEBUG, "Failed to configure vnic plcmode %d",
q_index);
goto cleanup;
}
@@ -573,14 +573,14 @@ bnxt_vnic_queue_db_add(struct bnxt *bp, uint64_t *q_list)
(const void *)q_list);
if (vnic_id < 0 || vnic_id >= bp->max_vnics) {
- PMD_DRV_LOG(DEBUG, "unable to assign vnic index %d\n",
+ PMD_DRV_LOG_LINE(DEBUG, "unable to assign vnic index %d",
vnic_id);
return rc;
}
vnic_info = &bp->vnic_info[vnic_id];
if (vnic_info->fw_vnic_id != INVALID_HW_RING_ID) {
- PMD_DRV_LOG(DEBUG, "Invalid ring id for %d.\n", vnic_id);
+ PMD_DRV_LOG_LINE(DEBUG, "Invalid ring id for %d.", vnic_id);
return rc;
}
return vnic_id;
@@ -598,7 +598,7 @@ int32_t bnxt_vnic_queue_db_rss_validate(struct bnxt *bp,
int32_t out_idx;
if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS)) {
- PMD_DRV_LOG(ERR, "Error Rss is not supported on this port\n");
+ PMD_DRV_LOG_LINE(ERR, "Error Rss is not supported on this port");
return rc;
}
@@ -610,7 +610,7 @@ int32_t bnxt_vnic_queue_db_rss_validate(struct bnxt *bp,
/* Check to see if the queues id are in supported range */
if (rss_info->queue_num > bp->rx_nr_rings) {
- PMD_DRV_LOG(ERR, "Error unsupported queue num.\n");
+ PMD_DRV_LOG_LINE(ERR, "Error unsupported queue num.");
return rc;
}
@@ -618,8 +618,8 @@ int32_t bnxt_vnic_queue_db_rss_validate(struct bnxt *bp,
for (idx = 0; idx < BNXT_VNIC_MAX_QUEUE_SIZE; idx++) {
if (BNXT_VNIC_BITMAP_GET(rss_info->queue_list, idx)) {
if (idx >= bp->rx_nr_rings) {
- PMD_DRV_LOG(ERR,
- "Error %d beyond support size %u\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Error %d beyond support size %u",
idx, bp->rx_nr_rings);
return rc;
}
@@ -711,7 +711,7 @@ bnxt_vnic_rss_create(struct bnxt *bp,
/* Allocate vnic group for p4 platform */
rc = bnxt_vnic_grp_alloc(bp, vnic);
if (rc) {
- PMD_DRV_LOG(ERR, "Failed to allocate vnic groups\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to allocate vnic groups");
goto fail_cleanup;
}
@@ -722,7 +722,7 @@ bnxt_vnic_rss_create(struct bnxt *bp,
/* Allocate the vnic in the firmware */
rc = bnxt_hwrm_vnic_alloc(bp, vnic);
if (rc) {
- PMD_DRV_LOG(ERR, "Failed to allocate vnic %d\n", idx);
+ PMD_DRV_LOG_LINE(ERR, "Failed to allocate vnic %d", idx);
goto fail_cleanup;
}
@@ -735,8 +735,8 @@ bnxt_vnic_rss_create(struct bnxt *bp,
break;
}
if (rc) {
- PMD_DRV_LOG(ERR,
- "HWRM ctx %d alloc failure rc: %x\n", idx, rc);
+ PMD_DRV_LOG_LINE(ERR,
+ "HWRM ctx %d alloc failure rc: %x", idx, rc);
goto fail_cleanup;
}
vnic->num_lb_ctxts = nr_ctxs;
@@ -749,7 +749,7 @@ bnxt_vnic_rss_create(struct bnxt *bp,
rc = bnxt_hwrm_vnic_cfg(bp, vnic);
vnic->mru = saved_mru;
if (rc) {
- PMD_DRV_LOG(ERR, "Failed to configure vnic %d\n", idx);
+ PMD_DRV_LOG_LINE(ERR, "Failed to configure vnic %d", idx);
goto fail_cleanup;
}
@@ -757,11 +757,11 @@ bnxt_vnic_rss_create(struct bnxt *bp,
(rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ?
true : false);
if (rc)
- PMD_DRV_LOG(DEBUG, "Failed to configure TPA on this vnic %d\n", idx);
+ PMD_DRV_LOG_LINE(DEBUG, "Failed to configure TPA on this vnic %d", idx);
rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
if (rc) {
- PMD_DRV_LOG(ERR, "Failed to configure vnic plcmode %d\n",
+ PMD_DRV_LOG_LINE(ERR, "Failed to configure vnic plcmode %d",
idx);
goto fail_cleanup;
}
@@ -771,8 +771,8 @@ bnxt_vnic_rss_create(struct bnxt *bp,
/* If only unsupported type(s) are specified then quit */
if (rss_info->rss_types == 0) {
- PMD_DRV_LOG(ERR,
- "Unsupported RSS hash type(s)\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Unsupported RSS hash type(s)");
goto fail_cleanup;
}
@@ -805,8 +805,8 @@ bnxt_vnic_rss_create(struct bnxt *bp,
rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
if (rc) {
memset(vnic->rss_hash_key, 0, HW_HASH_KEY_SIZE);
- PMD_DRV_LOG(ERR,
- "Failed to configure vnic rss details %d\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to configure vnic rss details %d",
idx);
goto fail_cleanup;
}
@@ -834,7 +834,7 @@ bnxt_vnic_rss_queue_status_update(struct bnxt *bp, struct bnxt_vnic_info *vnic)
/* configure the rss table */
if (bnxt_hwrm_vnic_rss_cfg(bp, vnic)) {
- PMD_DRV_LOG(DEBUG, "Failed to update vnic rss details\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Failed to update vnic rss details");
return -EINVAL;
}
return 0;
@@ -854,8 +854,8 @@ bnxt_vnic_rss_hash_algo_update(struct bnxt *bp,
/* validate key length */
if (rss_info->key_len != 0 && rss_info->key_len != HW_HASH_KEY_SIZE) {
- PMD_DRV_LOG(ERR,
- "Invalid hashkey length, should be %d bytes\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Invalid hashkey length, should be %d bytes",
HW_HASH_KEY_SIZE);
return -EINVAL;
}
@@ -865,8 +865,8 @@ bnxt_vnic_rss_hash_algo_update(struct bnxt *bp,
/* If only unsupported type(s) are specified then quit */
if (!rss_info->rss_types) {
- PMD_DRV_LOG(ERR,
- "Unsupported RSS hash type\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Unsupported RSS hash type");
return -EINVAL;
}
@@ -912,10 +912,10 @@ bnxt_vnic_rss_hash_algo_update(struct bnxt *bp,
if (apply) {
if (bnxt_hwrm_vnic_rss_cfg(bp, vnic)) {
memcpy(vnic->rss_hash_key, old_rss_hash_key, HW_HASH_KEY_SIZE);
- PMD_DRV_LOG(ERR, "Error configuring vnic RSS config\n");
+ PMD_DRV_LOG_LINE(ERR, "Error configuring vnic RSS config");
return -EINVAL;
}
- PMD_DRV_LOG(INFO, "Rss config successfully applied\n");
+ PMD_DRV_LOG_LINE(INFO, "Rss config successfully applied");
}
return 0;
}
@@ -947,7 +947,7 @@ int32_t bnxt_vnic_queue_db_init(struct bnxt *bp)
hash_tbl_params.socket_id = rte_socket_id();
bp->vnic_queue_db.rss_q_db = rte_hash_create(&hash_tbl_params);
if (bp->vnic_queue_db.rss_q_db == NULL) {
- PMD_DRV_LOG(ERR, "Failed to create rss hash tbl\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to create rss hash tbl");
return -ENOMEM;
}
return 0;
@@ -967,13 +967,13 @@ void bnxt_vnic_queue_db_update_dlft_vnic(struct bnxt *bp)
vnic_id = bnxt_vnic_queue_db_add(bp, bitmap);
if (vnic_id < 0) {
- PMD_DRV_LOG(ERR, "Unable to alloc vnic for default rss\n");
+ PMD_DRV_LOG_LINE(ERR, "Unable to alloc vnic for default rss");
return;
}
dflt_vnic = bnxt_vnic_queue_db_get_vnic(bp, vnic_id);
if (dflt_vnic == NULL) {
- PMD_DRV_LOG(ERR, "Invalid vnic for default rss %d\n", vnic_id);
+ PMD_DRV_LOG_LINE(ERR, "Invalid vnic for default rss %d", vnic_id);
return;
}
/* Update the default vnic structure */
@@ -995,7 +995,7 @@ int32_t bnxt_vnic_queue_action_alloc(struct bnxt *bp,
/* validate the given queue id */
if (q_index >= bp->rx_nr_rings || q_index >= BNXT_VNIC_MAX_QUEUE_SIZE) {
- PMD_DRV_LOG(ERR, "invalid queue id should be less than %d\n",
+ PMD_DRV_LOG_LINE(ERR, "invalid queue id should be less than %d",
bp->rx_nr_rings);
return rc;
}
@@ -1009,14 +1009,14 @@ int32_t bnxt_vnic_queue_action_alloc(struct bnxt *bp,
/* Assign the vnic slot */
idx = bnxt_vnic_queue_db_add(bp, queue_list);
if (idx < 0) {
- PMD_DRV_LOG(DEBUG, "Unable to alloc vnic for queue\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Unable to alloc vnic for queue");
return rc;
}
/* Allocate a new one */
vnic_info = bnxt_vnic_queue_create(bp, idx, q_index);
if (!vnic_info) {
- PMD_DRV_LOG(ERR, "failed to create vnic - %d\n",
+ PMD_DRV_LOG_LINE(ERR, "failed to create vnic - %d",
q_index);
bnxt_vnic_queue_db_del(bp, queue_list);
return rc; /* failed */
@@ -1024,7 +1024,7 @@ int32_t bnxt_vnic_queue_action_alloc(struct bnxt *bp,
} else {
vnic_info = bnxt_vnic_queue_db_get_vnic(bp, idx);
if (vnic_info == NULL) {
- PMD_DRV_LOG(ERR, "Unable to lookup vnic for queue %d\n",
+ PMD_DRV_LOG_LINE(ERR, "Unable to lookup vnic for queue %d",
q_index);
return rc;
}
@@ -1045,14 +1045,14 @@ bnxt_vnic_queue_action_free(struct bnxt *bp, uint16_t vnic_id)
/* validate the given vnic idx */
if (vnic_idx >= bp->max_vnics) {
- PMD_DRV_LOG(ERR, "invalid vnic idx %d\n", vnic_idx);
+ PMD_DRV_LOG_LINE(ERR, "invalid vnic idx %d", vnic_idx);
return rc;
}
/* validate the vnic info */
vnic_info = &bp->vnic_info[vnic_idx];
if (!vnic_info->rx_queue_cnt) {
- PMD_DRV_LOG(ERR, "Invalid vnic idx, no queues being used\n");
+ PMD_DRV_LOG_LINE(ERR, "Invalid vnic idx, no queues being used");
return rc;
}
if (vnic_info->ref_cnt) {
@@ -1062,7 +1062,7 @@ bnxt_vnic_queue_action_free(struct bnxt *bp, uint16_t vnic_id)
vnic_info->queue_bitmap);
/* Check to ensure there is no corruption */
if (idx != vnic_idx)
- PMD_DRV_LOG(ERR, "bad vnic idx %d\n", vnic_idx);
+ PMD_DRV_LOG_LINE(ERR, "bad vnic idx %d", vnic_idx);
bnxt_vnic_queue_delete(bp, vnic_idx);
}
@@ -1083,26 +1083,26 @@ bnxt_vnic_rss_action_alloc(struct bnxt *bp,
/* validate the given parameters */
rc = bnxt_vnic_queue_db_rss_validate(bp, rss_info, &idx);
if (rc == -EINVAL) {
- PMD_DRV_LOG(ERR, "Failed to apply the rss action.\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to apply the rss action.");
return rc;
} else if (rc == -ENOENT) {
/* Allocate a new entry */
idx = bnxt_vnic_queue_db_add(bp, rss_info->queue_list);
if (idx < 0) {
- PMD_DRV_LOG(DEBUG, "Unable to alloc vnic for rss\n");
+ PMD_DRV_LOG_LINE(DEBUG, "Unable to alloc vnic for rss");
return rc;
}
/* create the rss vnic */
vnic_info = bnxt_vnic_rss_create(bp, rss_info, idx);
if (!vnic_info) {
- PMD_DRV_LOG(ERR, "Failed to create rss action.\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to create rss action.");
bnxt_vnic_queue_db_del(bp, rss_info->queue_list);
return rc;
}
} else {
vnic_info = bnxt_vnic_queue_db_get_vnic(bp, idx);
if (vnic_info == NULL) {
- PMD_DRV_LOG(ERR, "Unable to lookup vnic for idx %d\n",
+ PMD_DRV_LOG_LINE(ERR, "Unable to lookup vnic for idx %d",
idx);
return rc;
}
@@ -1112,7 +1112,7 @@ bnxt_vnic_rss_action_alloc(struct bnxt *bp,
/* check configuration has changed then update hash details */
rc = bnxt_vnic_rss_hash_algo_update(bp, vnic_info, rss_info);
if (rc) {
- PMD_DRV_LOG(ERR, "Failed to update the rss action.\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to update the rss action.");
return rc;
}
}
@@ -1133,14 +1133,14 @@ bnxt_vnic_rss_action_free(struct bnxt *bp, uint16_t vnic_id)
/* validate the given vnic id */
if (vnic_id >= bp->max_vnics) {
- PMD_DRV_LOG(ERR, "invalid vnic id %d\n", vnic_id);
+ PMD_DRV_LOG_LINE(ERR, "invalid vnic id %d", vnic_id);
return rc;
}
/* validate vnic info */
vnic_info = &bp->vnic_info[vnic_id];
if (!vnic_info->rx_queue_cnt) {
- PMD_DRV_LOG(ERR, "Invalid vnic id, not using any queues\n");
+ PMD_DRV_LOG_LINE(ERR, "Invalid vnic id, not using any queues");
return rc;
}
@@ -1165,7 +1165,7 @@ bnxt_vnic_rss_action_free(struct bnxt *bp, uint16_t vnic_id)
/* check to ensure there is no corruption */
if (idx != vnic_id)
- PMD_DRV_LOG(ERR, "bad vnic idx %d\n", vnic_id);
+ PMD_DRV_LOG_LINE(ERR, "bad vnic idx %d", vnic_id);
bnxt_vnic_rss_delete(bp, vnic_id);
}
}
@@ -1192,7 +1192,7 @@ bnxt_vnic_reta_config_update(struct bnxt *bp,
q_id = reta_conf[idx].reta[sft];
if (q_id >= bp->vnic_queue_db.num_queues ||
!bp->eth_dev->data->rx_queues[q_id]) {
- PMD_DRV_LOG(ERR, "Queue id %d is invalid\n", q_id);
+ PMD_DRV_LOG_LINE(ERR, "Queue id %d is invalid", q_id);
return -EINVAL;
}
BNXT_VNIC_BITMAP_SET(l_bitmap, q_id);
@@ -1273,7 +1273,7 @@ bnxt_vnic_queue_db_get_vnic(struct bnxt *bp, uint16_t vnic_idx)
struct bnxt_vnic_info *vnic_info;
if (vnic_idx >= bp->max_vnics) {
- PMD_DRV_LOG(ERR, "invalid vnic index %u\n", vnic_idx);
+ PMD_DRV_LOG_LINE(ERR, "invalid vnic index %u", vnic_idx);
return NULL;
}
vnic_info = &bp->vnic_info[vnic_idx];
@@ -1338,7 +1338,7 @@ int bnxt_rte_flow_to_hwrm_ring_select_mode(enum rte_eth_hash_function hash_f,
hash_f != RTE_ETH_HASH_FUNCTION_DEFAULT) {
if (hash_f == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ ||
(!BNXT_CHIP_P7(bp) && hash_f == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR)) {
- PMD_DRV_LOG(ERR, "Unsupported hash function\n");
+ PMD_DRV_LOG_LINE(ERR, "Unsupported hash function");
return -ENOTSUP;
}
}
@@ -1355,7 +1355,7 @@ int bnxt_rte_flow_to_hwrm_ring_select_mode(enum rte_eth_hash_function hash_f,
vnic->rss_types_local = types;
return 0;
}
- PMD_DRV_LOG(ERR, "Hash function not supported with checksun type\n");
+ PMD_DRV_LOG_LINE(ERR, "Hash function not supported with checksun type");
return -ENOTSUP;
}
@@ -57,8 +57,8 @@ int rte_pmd_bnxt_set_tx_loopback(uint16_t port, uint8_t on)
bp = eth_dev->data->dev_private;
if (!BNXT_PF(bp)) {
- PMD_DRV_LOG(ERR,
- "Attempt to set Tx loopback on non-PF port %d!\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Attempt to set Tx loopback on non-PF port %d!",
port);
return -ENOTSUP;
}
@@ -99,8 +99,8 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on)
bp = eth_dev->data->dev_private;
if (!BNXT_PF(bp)) {
- PMD_DRV_LOG(ERR,
- "Attempt to set all queues drop on non-PF port!\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Attempt to set all queues drop on non-PF port!");
return -ENOTSUP;
}
@@ -112,7 +112,7 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on)
bp->vnic_info[i].bd_stall = !on;
rc = bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[i]);
if (rc) {
- PMD_DRV_LOG(ERR, "Failed to update PF VNIC %d.\n", i);
+ PMD_DRV_LOG_LINE(ERR, "Failed to update PF VNIC %d.", i);
return rc;
}
}
@@ -123,7 +123,7 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on)
rte_pmd_bnxt_set_all_queues_drop_en_cb, &on,
bnxt_hwrm_vnic_cfg);
if (rc) {
- PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", i);
+ PMD_DRV_LOG_LINE(ERR, "Failed to update VF VNIC %d.", i);
break;
}
}
@@ -147,8 +147,8 @@ int rte_pmd_bnxt_set_vf_mac_addr(uint16_t port, uint16_t vf,
rc = rte_eth_dev_info_get(port, &dev_info);
if (rc != 0) {
- PMD_DRV_LOG(ERR,
- "Error during getting device (port %u) info: %s\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Error during getting device (port %u) info: %s",
port, strerror(-rc));
return rc;
@@ -160,8 +160,8 @@ int rte_pmd_bnxt_set_vf_mac_addr(uint16_t port, uint16_t vf,
return -EINVAL;
if (!BNXT_PF(bp)) {
- PMD_DRV_LOG(ERR,
- "Attempt to set VF %d mac address on non-PF port %d!\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Attempt to set VF %d mac address on non-PF port %d!",
vf, port);
return -ENOTSUP;
}
@@ -189,8 +189,8 @@ int rte_pmd_bnxt_set_vf_rate_limit(uint16_t port, uint16_t vf,
rc = rte_eth_dev_info_get(port, &dev_info);
if (rc != 0) {
- PMD_DRV_LOG(ERR,
- "Error during getting device (port %u) info: %s\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Error during getting device (port %u) info: %s",
port, strerror(-rc));
return rc;
@@ -211,7 +211,7 @@ int rte_pmd_bnxt_set_vf_rate_limit(uint16_t port, uint16_t vf,
/* Requested BW can't be greater than link speed */
if (tot_rate > eth_dev->data->dev_link.link_speed) {
- PMD_DRV_LOG(ERR, "Rate > Link speed. Set to %d\n", tot_rate);
+ PMD_DRV_LOG_LINE(ERR, "Rate > Link speed. Set to %d", tot_rate);
return -EINVAL;
}
@@ -247,8 +247,8 @@ int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
rc = rte_eth_dev_info_get(port, &dev_info);
if (rc != 0) {
- PMD_DRV_LOG(ERR,
- "Error during getting device (port %u) info: %s\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Error during getting device (port %u) info: %s",
port, strerror(-rc));
return rc;
@@ -256,8 +256,8 @@ int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
bp = dev->data->dev_private;
if (!BNXT_PF(bp)) {
- PMD_DRV_LOG(ERR,
- "Attempt to set mac spoof on non-PF port %d!\n", port);
+ PMD_DRV_LOG_LINE(ERR,
+ "Attempt to set mac spoof on non-PF port %d!", port);
return -EINVAL;
}
@@ -306,8 +306,8 @@ int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
rc = rte_eth_dev_info_get(port, &dev_info);
if (rc != 0) {
- PMD_DRV_LOG(ERR,
- "Error during getting device (port %u) info: %s\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Error during getting device (port %u) info: %s",
port, strerror(-rc));
return rc;
@@ -315,8 +315,8 @@ int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
bp = dev->data->dev_private;
if (!BNXT_PF(bp)) {
- PMD_DRV_LOG(ERR,
- "Attempt to set VLAN spoof on non-PF port %d!\n", port);
+ PMD_DRV_LOG_LINE(ERR,
+ "Attempt to set VLAN spoof on non-PF port %d!", port);
return -EINVAL;
}
@@ -334,7 +334,7 @@ int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
rc = -1;
}
} else {
- PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", vf);
+ PMD_DRV_LOG_LINE(ERR, "Failed to update VF VNIC %d.", vf);
}
return rc;
@@ -363,8 +363,8 @@ rte_pmd_bnxt_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
rc = rte_eth_dev_info_get(port, &dev_info);
if (rc != 0) {
- PMD_DRV_LOG(ERR,
- "Error during getting device (port %u) info: %s\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Error during getting device (port %u) info: %s",
port, strerror(-rc));
return rc;
@@ -375,8 +375,8 @@ rte_pmd_bnxt_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
return -EINVAL;
if (!BNXT_PF(bp)) {
- PMD_DRV_LOG(ERR,
- "Attempt to set VF %d stripq on non-PF port %d!\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Attempt to set VF %d stripq on non-PF port %d!",
vf, port);
return -ENOTSUP;
}
@@ -385,7 +385,7 @@ rte_pmd_bnxt_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
rte_pmd_bnxt_set_vf_vlan_stripq_cb, &on,
bnxt_hwrm_vnic_cfg);
if (rc)
- PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", vf);
+ PMD_DRV_LOG_LINE(ERR, "Failed to update VF VNIC %d.", vf);
return rc;
}
@@ -407,8 +407,8 @@ int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
rc = rte_eth_dev_info_get(port, &dev_info);
if (rc != 0) {
- PMD_DRV_LOG(ERR,
- "Error during getting device (port %u) info: %s\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Error during getting device (port %u) info: %s",
port, strerror(-rc));
return rc;
@@ -422,7 +422,7 @@ int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
return -EINVAL;
if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG) {
- PMD_DRV_LOG(ERR, "Currently cannot toggle this setting\n");
+ PMD_DRV_LOG_LINE(ERR, "Currently cannot toggle this setting");
return -ENOTSUP;
}
@@ -445,7 +445,7 @@ int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
&bp->pf->vf_info[vf].l2_rx_mask,
bnxt_set_rx_mask_no_vlan);
if (rc)
- PMD_DRV_LOG(ERR, "bnxt_hwrm_func_vf_vnic_set_rxmask failed\n");
+ PMD_DRV_LOG_LINE(ERR, "bnxt_hwrm_func_vf_vnic_set_rxmask failed");
return rc;
}
@@ -457,8 +457,8 @@ static int bnxt_set_vf_table(struct bnxt *bp, uint16_t vf)
struct bnxt_vnic_info vnic;
if (!BNXT_PF(bp)) {
- PMD_DRV_LOG(ERR,
- "Attempt to set VLAN table on non-PF port!\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Attempt to set VLAN table on non-PF port!");
return -EINVAL;
}
@@ -470,7 +470,7 @@ static int bnxt_set_vf_table(struct bnxt *bp, uint16_t vf)
/* This simply indicates there's no driver loaded.
* This is not an error.
*/
- PMD_DRV_LOG(ERR, "Unable to get default VNIC for VF %d\n", vf);
+ PMD_DRV_LOG_LINE(ERR, "Unable to get default VNIC for VF %d", vf);
} else {
memset(&vnic, 0, sizeof(vnic));
vnic.fw_vnic_id = dflt_vnic;
@@ -534,10 +534,10 @@ int rte_pmd_bnxt_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
/* Now check that there's space */
if (cnt == getpagesize() / sizeof(struct
bnxt_vlan_antispoof_table_entry)) {
- PMD_DRV_LOG(ERR,
- "VLAN anti-spoof table is full\n");
- PMD_DRV_LOG(ERR,
- "VF %d cannot add VLAN %u\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "VLAN anti-spoof table is full");
+ PMD_DRV_LOG_LINE(ERR,
+ "VF %d cannot add VLAN %u",
i, vlan);
rc = -1;
continue;
@@ -598,8 +598,8 @@ int rte_pmd_bnxt_get_vf_stats(uint16_t port,
rc = rte_eth_dev_info_get(port, &dev_info);
if (rc != 0) {
- PMD_DRV_LOG(ERR,
- "Error during getting device (port %u) info: %s\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Error during getting device (port %u) info: %s",
port, strerror(-rc));
return rc;
@@ -610,8 +610,8 @@ int rte_pmd_bnxt_get_vf_stats(uint16_t port,
return -EINVAL;
if (!BNXT_PF(bp)) {
- PMD_DRV_LOG(ERR,
- "Attempt to get VF %d stats on non-PF port %d!\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Attempt to get VF %d stats on non-PF port %d!",
vf_id, port);
return -ENOTSUP;
}
@@ -634,8 +634,8 @@ int rte_pmd_bnxt_reset_vf_stats(uint16_t port,
rc = rte_eth_dev_info_get(port, &dev_info);
if (rc != 0) {
- PMD_DRV_LOG(ERR,
- "Error during getting device (port %u) info: %s\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Error during getting device (port %u) info: %s",
port, strerror(-rc));
return rc;
@@ -646,8 +646,8 @@ int rte_pmd_bnxt_reset_vf_stats(uint16_t port,
return -EINVAL;
if (!BNXT_PF(bp)) {
- PMD_DRV_LOG(ERR,
- "Attempt to reset VF %d stats on non-PF port %d!\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Attempt to reset VF %d stats on non-PF port %d!",
vf_id, port);
return -ENOTSUP;
}
@@ -668,8 +668,8 @@ int rte_pmd_bnxt_get_vf_rx_status(uint16_t port, uint16_t vf_id)
rc = rte_eth_dev_info_get(port, &dev_info);
if (rc != 0) {
- PMD_DRV_LOG(ERR,
- "Error during getting device (port %u) info: %s\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Error during getting device (port %u) info: %s",
port, strerror(-rc));
return rc;
@@ -680,8 +680,8 @@ int rte_pmd_bnxt_get_vf_rx_status(uint16_t port, uint16_t vf_id)
return -EINVAL;
if (!BNXT_PF(bp)) {
- PMD_DRV_LOG(ERR,
- "Attempt to query VF %d RX stats on non-PF port %d!\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Attempt to query VF %d RX stats on non-PF port %d!",
vf_id, port);
return -ENOTSUP;
}
@@ -703,8 +703,8 @@ int rte_pmd_bnxt_get_vf_tx_drop_count(uint16_t port, uint16_t vf_id,
rc = rte_eth_dev_info_get(port, &dev_info);
if (rc != 0) {
- PMD_DRV_LOG(ERR,
- "Error during getting device (port %u) info: %s\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Error during getting device (port %u) info: %s",
port, strerror(-rc));
return rc;
@@ -715,8 +715,8 @@ int rte_pmd_bnxt_get_vf_tx_drop_count(uint16_t port, uint16_t vf_id,
return -EINVAL;
if (!BNXT_PF(bp)) {
- PMD_DRV_LOG(ERR,
- "Attempt to query VF %d TX drops on non-PF port %d!\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Attempt to query VF %d TX drops on non-PF port %d!",
vf_id, port);
return -ENOTSUP;
}
@@ -742,8 +742,8 @@ int rte_pmd_bnxt_mac_addr_add(uint16_t port, struct rte_ether_addr *addr,
rc = rte_eth_dev_info_get(port, &dev_info);
if (rc != 0) {
- PMD_DRV_LOG(ERR,
- "Error during getting device (port %u) info: %s\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Error during getting device (port %u) info: %s",
port, strerror(-rc));
return rc;
@@ -754,8 +754,8 @@ int rte_pmd_bnxt_mac_addr_add(uint16_t port, struct rte_ether_addr *addr,
return -EINVAL;
if (!BNXT_PF(bp)) {
- PMD_DRV_LOG(ERR,
- "Attempt to config VF %d MAC on non-PF port %d!\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Attempt to config VF %d MAC on non-PF port %d!",
vf_id, port);
return -ENOTSUP;
}
@@ -825,8 +825,8 @@ rte_pmd_bnxt_set_vf_vlan_insert(uint16_t port, uint16_t vf,
rc = rte_eth_dev_info_get(port, &dev_info);
if (rc != 0) {
- PMD_DRV_LOG(ERR,
- "Error during getting device (port %u) info: %s\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Error during getting device (port %u) info: %s",
port, strerror(-rc));
return rc;
@@ -837,8 +837,8 @@ rte_pmd_bnxt_set_vf_vlan_insert(uint16_t port, uint16_t vf,
return -EINVAL;
if (!BNXT_PF(bp)) {
- PMD_DRV_LOG(ERR,
- "Attempt to set VF %d vlan insert on non-PF port %d!\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Attempt to set VF %d vlan insert on non-PF port %d!",
vf, port);
return -ENOTSUP;
}
@@ -869,8 +869,8 @@ int rte_pmd_bnxt_set_vf_persist_stats(uint16_t port, uint16_t vf, uint8_t on)
dev = &rte_eth_devices[port];
rc = rte_eth_dev_info_get(port, &dev_info);
if (rc != 0) {
- PMD_DRV_LOG(ERR,
- "Error during getting device (port %u) info: %s\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Error during getting device (port %u) info: %s",
port, strerror(-rc));
return rc;
@@ -878,8 +878,8 @@ int rte_pmd_bnxt_set_vf_persist_stats(uint16_t port, uint16_t vf, uint8_t on)
bp = dev->data->dev_private;
if (!BNXT_PF(bp)) {
- PMD_DRV_LOG(ERR,
- "Attempt to set persist stats on non-PF port %d!\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Attempt to set persist stats on non-PF port %d!",
port);
return -EINVAL;
}
@@ -62,8 +62,8 @@ tf_em_hash_insert_int_entry(struct tf *tfp,
DP_DEFRAG_TO_FIT);
if (index == DP_INVALID_INDEX) {
- PMD_DRV_LOG(ERR,
- "%s, EM entry index allocation failed\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "%s, EM entry index allocation failed",
tf_dir_2_str(parms->dir));
return -1;
}
@@ -56,8 +56,8 @@ tf_em_insert_int_entry(struct tf *tfp,
pool = (struct dpool *)tfs->em_pool[parms->dir];
index = dpool_alloc(pool, TF_SESSION_EM_ENTRY_SIZE, 0);
if (index == DP_INVALID_INDEX) {
- PMD_DRV_LOG(ERR,
- "%s, EM entry index allocation failed\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "%s, EM entry index allocation failed",
tf_dir_2_str(parms->dir));
return -1;
}
@@ -11,7 +11,8 @@
#include "bnxt_ulp.h"
#include "ulp_template_db_enum.h"
-#define BNXT_TF_DBG(lvl, fmt, args...) PMD_DRV_LOG(lvl, fmt, ## args)
+#define BNXT_TF_DBG(lvl, fmt, ...) \
+ RTE_LOG(lvl, BNXT, "%s(): " fmt, __func__, ## __VA_ARGS__)
#define BNXT_TF_INF(fmt, args...)
@@ -54,19 +54,19 @@ bnxt_pmd_get_bp(uint16_t port)
struct rte_eth_dev *dev;
if (!rte_eth_dev_is_valid_port(port)) {
- PMD_DRV_LOG(ERR, "Invalid port %d\n", port);
+ PMD_DRV_LOG_LINE(ERR, "Invalid port %d", port);
return NULL;
}
dev = &rte_eth_devices[port];
if (!is_bnxt_supported(dev)) {
- PMD_DRV_LOG(ERR, "Device %d not supported\n", port);
+ PMD_DRV_LOG_LINE(ERR, "Device %d not supported", port);
return NULL;
}
bp = (struct bnxt *)dev->data->dev_private;
if (!BNXT_TRUFLOW_EN(bp)) {
- PMD_DRV_LOG(ERR, "TRUFLOW not enabled\n");
+ PMD_DRV_LOG_LINE(ERR, "TRUFLOW not enabled");
return NULL;
}
@@ -1347,7 +1347,7 @@ ulp_ctx_attach(struct bnxt *bp,
/* Create a TF Client */
rc = ulp_ctx_session_open(bp, session);
if (rc) {
- PMD_DRV_LOG(ERR, "Failed to open ctxt session, rc:%d\n", rc);
+ PMD_DRV_LOG_LINE(ERR, "Failed to open ctxt session, rc:%d", rc);
tfp->session = NULL;
return rc;
}
@@ -86,7 +86,7 @@ ulp_fc_mgr_init(struct bnxt_ulp_context *ctxt)
rc = pthread_mutex_init(&ulp_fc_info->fc_lock, NULL);
if (rc) {
- PMD_DRV_LOG(ERR, "Failed to initialize fc mutex\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to initialize fc mutex");
goto error;
}
@@ -269,16 +269,16 @@ ulp_bulk_get_flow_stats(struct tf *tfp,
((uintptr_t)(fc_info->shadow_hw_tbl[dir].mem_pa));
if (!stats) {
- PMD_DRV_LOG(ERR,
- "BULK: Memory not initialized id:0x%x dir:%d\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "BULK: Memory not initialized id:0x%x dir:%d",
parms.starting_idx, dir);
return -EINVAL;
}
rc = tf_tbl_bulk_get(tfp, &parms);
if (rc) {
- PMD_DRV_LOG(ERR,
- "BULK: Get failed for id:0x%x rc:%d\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "BULK: Get failed for id:0x%x rc:%d",
parms.starting_idx, rc);
return rc;
}
@@ -337,8 +337,8 @@ ulp_fc_tf_flow_stat_get(struct bnxt_ulp_context *ctxt,
parms.data = (uint8_t *)&stats;
rc = tf_get_tbl_entry(tfp, &parms);
if (rc) {
- PMD_DRV_LOG(ERR,
- "Get failed for id:0x%x rc:%d\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Get failed for id:0x%x rc:%d",
parms.idx, rc);
return rc;
}
@@ -358,7 +358,7 @@ ulp_fc_tf_flow_stat_get(struct bnxt_ulp_context *ctxt,
sparms.data_sz_in_bytes = sizeof(uint64_t);
rc = tf_set_tbl_entry(tfp, &sparms);
if (rc) {
- PMD_DRV_LOG(ERR, "Set failed for id:0x%x rc:%d\n",
+ PMD_DRV_LOG_LINE(ERR, "Set failed for id:0x%x rc:%d",
sparms.idx, rc);
return rc;
}
@@ -391,8 +391,8 @@ static int ulp_get_single_flow_stat(struct bnxt_ulp_context *ctxt,
parms.data = (uint8_t *)&stats;
rc = tf_get_tbl_entry(tfp, &parms);
if (rc) {
- PMD_DRV_LOG(ERR,
- "Get failed for id:0x%x rc:%d\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Get failed for id:0x%x rc:%d",
parms.idx, rc);
return rc;
}
@@ -419,7 +419,7 @@ static int ulp_get_single_flow_stat(struct bnxt_ulp_context *ctxt,
if (ulp_flow_db_parent_flow_count_update(ctxt, pc_idx,
t_sw->pkt_count,
t_sw->byte_count)) {
- PMD_DRV_LOG(ERR, "Error updating parent counters\n");
+ PMD_DRV_LOG_LINE(ERR, "Error updating parent counters");
}
}
@@ -413,7 +413,7 @@ ulp_ha_mgr_init(struct bnxt_ulp_context *ulp_ctx)
rc = pthread_mutex_init(&ha_info->ha_lock, NULL);
if (rc) {
- PMD_DRV_LOG(ERR, "Failed to initialize ha mutex\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to initialize ha mutex");
goto cleanup;
}
rc = ulp_ha_mgr_timer_start(ulp_ctx->cfg_data);
@@ -503,17 +503,17 @@ __extension__ ({ \
if (ret == 0) { \
mp_proc_##f(_a, rsp, ## __VA_ARGS__); \
} else { \
- PMD_DRV_LOG(ERR, \
- "%s returned error: %d\n", \
+ PMD_DRV_LOG_LINE(ERR, \
+ "%s returned error: %d", \
mp_name_ ## f, rsp->result);\
} \
free(mp_rep.msgs); \
} else if (rte_errno == ENOTSUP) { \
- PMD_DRV_LOG(ERR, \
- "No IPC, can't proxy to primary\n");\
+ PMD_DRV_LOG_LINE(ERR, \
+ "No IPC, can't proxy to primary");\
ret = -rte_errno; \
} else { \
- PMD_DRV_LOG(ERR, "Request %s failed: %s\n", \
+ PMD_DRV_LOG_LINE(ERR, "Request %s failed: %s", \
mp_name_ ## f, \
rte_strerror(rte_errno)); \
ret = -EIO; \
@@ -778,10 +778,10 @@ static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
}
if (tx_info)
- PMD_TX_LOG(ERR, "tx_info doesn't have valid mbuf. queue %d:%d req_id %u\n",
+ PMD_TX_LOG_LINE(ERR, "tx_info doesn't have valid mbuf. queue %d:%d req_id %u",
tx_ring->port_id, tx_ring->id, req_id);
else
- PMD_TX_LOG(ERR, "Invalid req_id: %hu in queue %d:%d\n",
+ PMD_TX_LOG_LINE(ERR, "Invalid req_id: %hu in queue %d:%d",
req_id, tx_ring->port_id, tx_ring->id);
/* Trigger device reset */
@@ -798,7 +798,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
/* Allocate only the host info */
rc = ena_com_allocate_host_info(ena_dev);
if (rc) {
- PMD_DRV_LOG(ERR, "Cannot allocate host info\n");
+ PMD_DRV_LOG_LINE(ERR, "Cannot allocate host info");
return;
}
@@ -825,9 +825,9 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
rc = ena_com_set_host_attributes(ena_dev);
if (rc) {
if (rc == ENA_COM_UNSUPPORTED)
- PMD_DRV_LOG(WARNING, "Cannot set host attributes\n");
+ PMD_DRV_LOG_LINE(WARNING, "Cannot set host attributes");
else
- PMD_DRV_LOG(ERR, "Cannot set host attributes\n");
+ PMD_DRV_LOG_LINE(ERR, "Cannot set host attributes");
goto err;
}
@@ -862,16 +862,16 @@ static void ena_config_debug_area(struct ena_adapter *adapter)
rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size);
if (rc) {
- PMD_DRV_LOG(ERR, "Cannot allocate debug area\n");
+ PMD_DRV_LOG_LINE(ERR, "Cannot allocate debug area");
return;
}
rc = ena_com_set_host_attributes(&adapter->ena_dev);
if (rc) {
if (rc == ENA_COM_UNSUPPORTED)
- PMD_DRV_LOG(WARNING, "Cannot set host attributes\n");
+ PMD_DRV_LOG_LINE(WARNING, "Cannot set host attributes");
else
- PMD_DRV_LOG(ERR, "Cannot set host attributes\n");
+ PMD_DRV_LOG_LINE(ERR, "Cannot set host attributes");
goto err;
}
@@ -904,7 +904,7 @@ static int ena_close(struct rte_eth_dev *dev)
rte_intr_disable(intr_handle);
rc = rte_intr_callback_unregister_sync(intr_handle, ena_control_path_handler, dev);
if (unlikely(rc != 0))
- PMD_INIT_LOG(ERR, "Failed to unregister interrupt handler\n");
+ PMD_INIT_LOG_LINE(ERR, "Failed to unregister interrupt handler");
} else {
rte_eal_alarm_cancel(ena_control_path_poll_handler, dev);
}
@@ -944,19 +944,19 @@ ena_dev_reset(struct rte_eth_dev *dev)
/* Cannot release memory in secondary process */
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- PMD_DRV_LOG(WARNING, "dev_reset not supported in secondary.\n");
+ PMD_DRV_LOG_LINE(WARNING, "dev_reset not supported in secondary.");
return -EPERM;
}
rc = eth_ena_dev_uninit(dev);
if (rc) {
- PMD_INIT_LOG(CRIT, "Failed to un-initialize device\n");
+ PMD_INIT_LOG_LINE(CRIT, "Failed to un-initialize device");
return rc;
}
rc = eth_ena_dev_init(dev);
if (rc)
- PMD_INIT_LOG(CRIT, "Cannot initialize device\n");
+ PMD_INIT_LOG_LINE(CRIT, "Cannot initialize device");
return rc;
}
@@ -995,7 +995,7 @@ static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
ring->configured = 0;
- PMD_DRV_LOG(NOTICE, "Rx queue %d:%d released\n",
+ PMD_DRV_LOG_LINE(NOTICE, "Rx queue %d:%d released",
ring->port_id, ring->id);
}
@@ -1016,7 +1016,7 @@ static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
ring->configured = 0;
- PMD_DRV_LOG(NOTICE, "Tx queue %d:%d released\n",
+ PMD_DRV_LOG_LINE(NOTICE, "Tx queue %d:%d released",
ring->port_id, ring->id);
}
@@ -1091,8 +1091,8 @@ static int ena_queue_start_all(struct rte_eth_dev *dev,
rc = ena_queue_start(dev, &queues[i]);
if (rc) {
- PMD_INIT_LOG(ERR,
- "Failed to start queue[%d] of type(%d)\n",
+ PMD_INIT_LOG_LINE(ERR,
+ "Failed to start queue[%d] of type(%d)",
i, ring_type);
goto err;
}
@@ -1173,8 +1173,8 @@ ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx,
* queue depth when working in large llq policy.
*/
max_tx_queue_size >>= 1;
- PMD_INIT_LOG(INFO,
- "large LLQ policy requires limiting Tx queue size to %u entries\n",
+ PMD_INIT_LOG_LINE(INFO,
+ "large LLQ policy requires limiting Tx queue size to %u entries",
max_tx_queue_size);
} else if (dev->max_wide_llq_depth < max_tx_queue_size) {
/* In case the queue depth that the driver calculated exceeds
@@ -1184,20 +1184,20 @@ ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx,
max_tx_queue_size = dev->max_wide_llq_depth;
}
} else {
- PMD_INIT_LOG(INFO,
- "Forcing large LLQ headers failed since device lacks this support\n");
+ PMD_INIT_LOG_LINE(INFO,
+ "Forcing large LLQ headers failed since device lacks this support");
}
}
if (unlikely(max_rx_queue_size == 0 || max_tx_queue_size == 0)) {
- PMD_INIT_LOG(ERR, "Invalid queue size\n");
+ PMD_INIT_LOG_LINE(ERR, "Invalid queue size");
return -EFAULT;
}
ctx->max_tx_queue_size = max_tx_queue_size;
ctx->max_rx_queue_size = max_rx_queue_size;
- PMD_DRV_LOG(INFO, "tx queue size %u\n", max_tx_queue_size);
+ PMD_DRV_LOG_LINE(INFO, "tx queue size %u", max_tx_queue_size);
return 0;
}
@@ -1228,7 +1228,7 @@ static int ena_stats_get(struct rte_eth_dev *dev,
&ena_stats);
rte_spinlock_unlock(&adapter->admin_lock);
if (unlikely(rc)) {
- PMD_DRV_LOG(ERR, "Could not retrieve statistics from ENA\n");
+ PMD_DRV_LOG_LINE(ERR, "Could not retrieve statistics from ENA");
return rc;
}
@@ -1286,9 +1286,9 @@ static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
rc = ENA_PROXY(adapter, ena_com_set_dev_mtu, ena_dev, mtu);
if (rc)
- PMD_DRV_LOG(ERR, "Could not set MTU: %d\n", mtu);
+ PMD_DRV_LOG_LINE(ERR, "Could not set MTU: %d", mtu);
else
- PMD_DRV_LOG(NOTICE, "MTU set to: %d\n", mtu);
+ PMD_DRV_LOG_LINE(NOTICE, "MTU set to: %d", mtu);
return rc;
}
@@ -1302,7 +1302,7 @@ static int ena_start(struct rte_eth_dev *dev)
/* Cannot allocate memory in secondary process */
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- PMD_DRV_LOG(WARNING, "dev_start not supported in secondary.\n");
+ PMD_DRV_LOG_LINE(WARNING, "dev_start not supported in secondary.");
return -EPERM;
}
@@ -1361,7 +1361,7 @@ static int ena_stop(struct rte_eth_dev *dev)
/* Cannot free memory in secondary process */
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- PMD_DRV_LOG(WARNING, "dev_stop not supported in secondary.\n");
+ PMD_DRV_LOG_LINE(WARNING, "dev_stop not supported in secondary.");
return -EPERM;
}
@@ -1372,7 +1372,7 @@ static int ena_stop(struct rte_eth_dev *dev)
if (adapter->trigger_reset) {
rc = ena_com_dev_reset(ena_dev, adapter->reset_reason);
if (rc)
- PMD_DRV_LOG(ERR, "Device reset failed, rc: %d\n", rc);
+ PMD_DRV_LOG_LINE(ERR, "Device reset failed, rc: %d", rc);
}
rte_intr_disable(intr_handle);
@@ -1434,8 +1434,8 @@ static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring)
rc = ena_com_create_io_queue(ena_dev, &ctx);
if (rc) {
- PMD_DRV_LOG(ERR,
- "Failed to create IO queue[%d] (qid:%d), rc: %d\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to create IO queue[%d] (qid:%d), rc: %d",
ring->id, ena_qid, rc);
return rc;
}
@@ -1444,8 +1444,8 @@ static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring)
&ring->ena_com_io_sq,
&ring->ena_com_io_cq);
if (rc) {
- PMD_DRV_LOG(ERR,
- "Failed to get IO queue[%d] handlers, rc: %d\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to get IO queue[%d] handlers, rc: %d",
ring->id, rc);
ena_com_destroy_io_queue(ena_dev, ena_qid);
return rc;
@@ -1503,7 +1503,7 @@ static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring)
rc = ena_create_io_queue(dev, ring);
if (rc) {
- PMD_INIT_LOG(ERR, "Failed to create IO queue\n");
+ PMD_INIT_LOG_LINE(ERR, "Failed to create IO queue");
return rc;
}
@@ -1521,7 +1521,7 @@ static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring)
if (rc != bufs_num) {
ena_com_destroy_io_queue(&ring->adapter->ena_dev,
ENA_IO_RXQ_IDX(ring->id));
- PMD_INIT_LOG(ERR, "Failed to populate Rx ring\n");
+ PMD_INIT_LOG_LINE(ERR, "Failed to populate Rx ring");
return ENA_COM_FAULT;
}
/* Flush per-core RX buffers pools cache as they can be used on other
@@ -1546,22 +1546,22 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
txq = &adapter->tx_ring[queue_idx];
if (txq->configured) {
- PMD_DRV_LOG(CRIT,
- "API violation. Queue[%d] is already configured\n",
+ PMD_DRV_LOG_LINE(CRIT,
+ "API violation. Queue[%d] is already configured",
queue_idx);
return ENA_COM_FAULT;
}
if (!rte_is_power_of_2(nb_desc)) {
- PMD_DRV_LOG(ERR,
- "Unsupported size of Tx queue: %d is not a power of 2.\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Unsupported size of Tx queue: %d is not a power of 2.",
nb_desc);
return -EINVAL;
}
if (nb_desc > adapter->max_tx_ring_size) {
- PMD_DRV_LOG(ERR,
- "Unsupported size of Tx queue (max size: %d)\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Unsupported size of Tx queue (max size: %d)",
adapter->max_tx_ring_size);
return -EINVAL;
}
@@ -1580,8 +1580,8 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE,
socket_id);
if (!txq->tx_buffer_info) {
- PMD_DRV_LOG(ERR,
- "Failed to allocate memory for Tx buffer info\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to allocate memory for Tx buffer info");
return -ENOMEM;
}
@@ -1590,8 +1590,8 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE,
socket_id);
if (!txq->empty_tx_reqs) {
- PMD_DRV_LOG(ERR,
- "Failed to allocate memory for empty Tx requests\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to allocate memory for empty Tx requests");
rte_free(txq->tx_buffer_info);
return -ENOMEM;
}
@@ -1602,7 +1602,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE,
socket_id);
if (!txq->push_buf_intermediate_buf) {
- PMD_DRV_LOG(ERR, "Failed to alloc push buffer for LLQ\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to alloc push buffer for LLQ");
rte_free(txq->tx_buffer_info);
rte_free(txq->empty_tx_reqs);
return -ENOMEM;
@@ -1648,22 +1648,22 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
rxq = &adapter->rx_ring[queue_idx];
if (rxq->configured) {
- PMD_DRV_LOG(CRIT,
- "API violation. Queue[%d] is already configured\n",
+ PMD_DRV_LOG_LINE(CRIT,
+ "API violation. Queue[%d] is already configured",
queue_idx);
return ENA_COM_FAULT;
}
if (!rte_is_power_of_2(nb_desc)) {
- PMD_DRV_LOG(ERR,
- "Unsupported size of Rx queue: %d is not a power of 2.\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Unsupported size of Rx queue: %d is not a power of 2.",
nb_desc);
return -EINVAL;
}
if (nb_desc > adapter->max_rx_ring_size) {
- PMD_DRV_LOG(ERR,
- "Unsupported size of Rx queue (max size: %d)\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Unsupported size of Rx queue (max size: %d)",
adapter->max_rx_ring_size);
return -EINVAL;
}
@@ -1671,8 +1671,8 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
/* ENA isn't supporting buffers smaller than 1400 bytes */
buffer_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
if (buffer_size < ENA_RX_BUF_MIN_SIZE) {
- PMD_DRV_LOG(ERR,
- "Unsupported size of Rx buffer: %zu (min size: %d)\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Unsupported size of Rx buffer: %zu (min size: %d)",
buffer_size, ENA_RX_BUF_MIN_SIZE);
return -EINVAL;
}
@@ -1690,8 +1690,8 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE,
socket_id);
if (!rxq->rx_buffer_info) {
- PMD_DRV_LOG(ERR,
- "Failed to allocate memory for Rx buffer info\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to allocate memory for Rx buffer info");
return -ENOMEM;
}
@@ -1700,8 +1700,8 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE,
socket_id);
if (!rxq->rx_refill_buffer) {
- PMD_DRV_LOG(ERR,
- "Failed to allocate memory for Rx refill buffer\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to allocate memory for Rx refill buffer");
rte_free(rxq->rx_buffer_info);
rxq->rx_buffer_info = NULL;
return -ENOMEM;
@@ -1712,8 +1712,8 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE,
socket_id);
if (!rxq->empty_rx_reqs) {
- PMD_DRV_LOG(ERR,
- "Failed to allocate memory for empty Rx requests\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to allocate memory for empty Rx requests");
rte_free(rxq->rx_buffer_info);
rxq->rx_buffer_info = NULL;
rte_free(rxq->rx_refill_buffer);
@@ -1754,7 +1754,7 @@ static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq,
/* pass resource to device */
rc = ena_com_add_single_rx_desc(io_sq, &ebuf, id);
if (unlikely(rc != 0))
- PMD_RX_LOG(WARNING, "Failed adding Rx desc\n");
+ PMD_RX_LOG_LINE(WARNING, "Failed adding Rx desc");
return rc;
}
@@ -1777,7 +1777,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
in_use = rxq->ring_size - 1 -
ena_com_free_q_entries(rxq->ena_com_io_sq);
if (unlikely((in_use + count) >= rxq->ring_size))
- PMD_RX_LOG(ERR, "Bad Rx ring state\n");
+ PMD_RX_LOG_LINE(ERR, "Bad Rx ring state");
#endif
/* get resources for incoming packets */
@@ -1785,7 +1785,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
if (unlikely(rc < 0)) {
rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf);
++rxq->rx_stats.mbuf_alloc_fail;
- PMD_RX_LOG(DEBUG, "There are not enough free buffers\n");
+ PMD_RX_LOG_LINE(DEBUG, "There are not enough free buffers");
return 0;
}
@@ -1808,8 +1808,8 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
}
if (unlikely(i < count)) {
- PMD_RX_LOG(WARNING,
- "Refilled Rx queue[%d] with only %d/%d buffers\n",
+ PMD_RX_LOG_LINE(WARNING,
+ "Refilled Rx queue[%d] with only %d/%d buffers",
rxq->id, i, count);
rte_pktmbuf_free_bulk(&mbufs[i], count - i);
++rxq->rx_stats.refill_partial;
@@ -1835,9 +1835,9 @@ static size_t ena_get_metrics_entries(struct ena_adapter *adapter)
metrics_num = ENA_STATS_ARRAY_METRICS;
else if (ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS))
metrics_num = ENA_STATS_ARRAY_METRICS_LEGACY;
- PMD_DRV_LOG(NOTICE, "0x%x customer metrics are supported\n", (unsigned int)metrics_num);
+ PMD_DRV_LOG_LINE(NOTICE, "0x%x customer metrics are supported", (unsigned int)metrics_num);
if (metrics_num > ENA_MAX_CUSTOMER_METRICS) {
- PMD_DRV_LOG(NOTICE, "Not enough space for the requested customer metrics\n");
+ PMD_DRV_LOG_LINE(NOTICE, "Not enough space for the requested customer metrics");
metrics_num = ENA_MAX_CUSTOMER_METRICS;
}
return metrics_num;
@@ -1855,7 +1855,7 @@ static int ena_device_init(struct ena_adapter *adapter,
/* Initialize mmio registers */
rc = ena_com_mmio_reg_read_request_init(ena_dev);
if (rc) {
- PMD_DRV_LOG(ERR, "Failed to init MMIO read less\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to init MMIO read less");
return rc;
}
@@ -1868,14 +1868,14 @@ static int ena_device_init(struct ena_adapter *adapter,
/* reset device */
rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
if (rc) {
- PMD_DRV_LOG(ERR, "Cannot reset device\n");
+ PMD_DRV_LOG_LINE(ERR, "Cannot reset device");
goto err_mmio_read_less;
}
/* check FW version */
rc = ena_com_validate_version(ena_dev);
if (rc) {
- PMD_DRV_LOG(ERR, "Device version is too low\n");
+ PMD_DRV_LOG_LINE(ERR, "Device version is too low");
goto err_mmio_read_less;
}
@@ -1884,8 +1884,8 @@ static int ena_device_init(struct ena_adapter *adapter,
/* ENA device administration layer init */
rc = ena_com_admin_init(ena_dev, &aenq_handlers);
if (rc) {
- PMD_DRV_LOG(ERR,
- "Cannot initialize ENA admin queue\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Cannot initialize ENA admin queue");
goto err_mmio_read_less;
}
@@ -1900,8 +1900,8 @@ static int ena_device_init(struct ena_adapter *adapter,
/* Get Device Attributes and features */
rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
if (rc) {
- PMD_DRV_LOG(ERR,
- "Cannot get attribute for ENA device, rc: %d\n", rc);
+ PMD_DRV_LOG_LINE(ERR,
+ "Cannot get attribute for ENA device, rc: %d", rc);
goto err_admin_init;
}
@@ -1952,7 +1952,7 @@ static void ena_control_path_poll_handler(void *cb_arg)
rc = rte_eal_alarm_set(adapter->control_path_poll_interval,
ena_control_path_poll_handler, cb_arg);
if (unlikely(rc != 0)) {
- PMD_DRV_LOG(ERR, "Failed to retrigger control path alarm\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to retrigger control path alarm");
ena_trigger_reset(adapter, ENA_REGS_RESET_GENERIC);
}
}
@@ -1968,7 +1968,7 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >=
adapter->keep_alive_timeout)) {
- PMD_DRV_LOG(ERR, "Keep alive timeout\n");
+ PMD_DRV_LOG_LINE(ERR, "Keep alive timeout");
ena_trigger_reset(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO);
++adapter->dev_stats.wd_expired;
}
@@ -1978,7 +1978,7 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
static void check_for_admin_com_state(struct ena_adapter *adapter)
{
if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) {
- PMD_DRV_LOG(ERR, "ENA admin queue is not in running state\n");
+ PMD_DRV_LOG_LINE(ERR, "ENA admin queue is not in running state");
ena_trigger_reset(adapter, ENA_REGS_RESET_ADMIN_TO);
}
}
@@ -2003,9 +2003,9 @@ static int check_for_tx_completion_in_queue(struct ena_adapter *adapter,
completion_delay = rte_get_timer_cycles() - timestamp;
if (completion_delay > adapter->missing_tx_completion_to) {
if (unlikely(!tx_buf->print_once)) {
- PMD_TX_LOG(WARNING,
+ PMD_TX_LOG_LINE(WARNING,
"Found a Tx that wasn't completed on time, qid %d, index %d. "
- "Missing Tx outstanding for %" PRIu64 " msecs.\n",
+ "Missing Tx outstanding for %" PRIu64 " msecs.",
tx_ring->id, i, completion_delay /
rte_get_timer_hz() * 1000);
tx_buf->print_once = true;
@@ -2015,9 +2015,9 @@ static int check_for_tx_completion_in_queue(struct ena_adapter *adapter,
}
if (unlikely(missed_tx > tx_ring->missing_tx_completion_threshold)) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG_LINE(ERR,
"The number of lost Tx completions is above the threshold (%d > %d). "
- "Trigger the device reset.\n",
+ "Trigger the device reset.",
missed_tx,
tx_ring->missing_tx_completion_threshold);
adapter->reset_reason = ENA_REGS_RESET_MISS_TX_CMPL;
@@ -2078,7 +2078,7 @@ static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer,
check_for_tx_completions(adapter);
if (unlikely(adapter->trigger_reset)) {
- PMD_DRV_LOG(ERR, "Trigger reset is on\n");
+ PMD_DRV_LOG_LINE(ERR, "Trigger reset is on");
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
NULL);
}
@@ -2116,32 +2116,32 @@ ena_set_queues_placement_policy(struct ena_adapter *adapter,
u32 llq_feature_mask;
if (adapter->llq_header_policy == ENA_LLQ_POLICY_DISABLED) {
- PMD_DRV_LOG(WARNING,
+ PMD_DRV_LOG_LINE(WARNING,
"NOTE: LLQ has been disabled as per user's request. "
- "This may lead to a huge performance degradation!\n");
+ "This may lead to a huge performance degradation!");
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
return 0;
}
llq_feature_mask = 1 << ENA_ADMIN_LLQ;
if (!(ena_dev->supported_features & llq_feature_mask)) {
- PMD_DRV_LOG(INFO,
- "LLQ is not supported. Fallback to host mode policy.\n");
+ PMD_DRV_LOG_LINE(INFO,
+ "LLQ is not supported. Fallback to host mode policy.");
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
return 0;
}
if (adapter->dev_mem_base == NULL) {
- PMD_DRV_LOG(ERR,
- "LLQ is advertised as supported, but device doesn't expose mem bar\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "LLQ is advertised as supported, but device doesn't expose mem bar");
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
return 0;
}
rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
if (unlikely(rc)) {
- PMD_INIT_LOG(WARNING,
- "Failed to config dev mode. Fallback to host mode policy.\n");
+ PMD_INIT_LOG_LINE(WARNING,
+ "Failed to config dev mode. Fallback to host mode policy.");
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
return 0;
}
@@ -2185,7 +2185,7 @@ static uint32_t ena_calc_max_io_queue_num(struct ena_com_dev *ena_dev,
max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_cq_num);
if (unlikely(max_num_io_queues == 0)) {
- PMD_DRV_LOG(ERR, "Number of IO queues cannot not be 0\n");
+ PMD_DRV_LOG_LINE(ERR, "Number of IO queues cannot not be 0");
return -EFAULT;
}
@@ -2290,7 +2290,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
- PMD_INIT_LOG(INFO, "Initializing " PCI_PRI_FMT "\n",
+ PMD_INIT_LOG_LINE(INFO, "Initializing " PCI_PRI_FMT,
pci_dev->addr.domain,
pci_dev->addr.bus,
pci_dev->addr.devid,
@@ -2302,7 +2302,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr;
if (!adapter->regs) {
- PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)\n",
+ PMD_INIT_LOG_LINE(CRIT, "Failed to access registers BAR(%d)",
ENA_REGS_BAR);
return -ENXIO;
}
@@ -2327,21 +2327,21 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
/* Get user bypass */
rc = ena_parse_devargs(adapter, pci_dev->device.devargs);
if (rc != 0) {
- PMD_INIT_LOG(CRIT, "Failed to parse devargs\n");
+ PMD_INIT_LOG_LINE(CRIT, "Failed to parse devargs");
goto err;
}
adapter->llq_header_policy = ena_define_llq_hdr_policy(adapter);
rc = ena_com_allocate_customer_metrics_buffer(ena_dev);
if (rc != 0) {
- PMD_INIT_LOG(CRIT, "Failed to allocate customer metrics buffer\n");
+ PMD_INIT_LOG_LINE(CRIT, "Failed to allocate customer metrics buffer");
goto err;
}
/* device specific initialization routine */
rc = ena_device_init(adapter, pci_dev, &get_feat_ctx);
if (rc) {
- PMD_INIT_LOG(CRIT, "Failed to init ENA device\n");
+ PMD_INIT_LOG_LINE(CRIT, "Failed to init ENA device");
goto err_metrics_delete;
}
@@ -2355,7 +2355,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
rc = ena_set_queues_placement_policy(adapter, ena_dev,
&get_feat_ctx.llq, &llq_config);
if (unlikely(rc)) {
- PMD_INIT_LOG(CRIT, "Failed to set placement policy\n");
+ PMD_INIT_LOG_LINE(CRIT, "Failed to set placement policy");
return rc;
}
@@ -2363,9 +2363,9 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
queue_type_str = "Regular";
} else {
queue_type_str = "Low latency";
- PMD_DRV_LOG(INFO, "LLQ entry size %uB\n", llq_config.llq_ring_entry_size_value);
+ PMD_DRV_LOG_LINE(INFO, "LLQ entry size %uB", llq_config.llq_ring_entry_size_value);
}
- PMD_DRV_LOG(INFO, "Placement policy: %s\n", queue_type_str);
+ PMD_DRV_LOG_LINE(INFO, "Placement policy: %s", queue_type_str);
calc_queue_ctx.ena_dev = ena_dev;
calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
@@ -2409,7 +2409,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
if (unlikely(rc != 0)) {
- PMD_DRV_LOG(ERR, "Failed to initialize RSS in ENA device\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to initialize RSS in ENA device");
goto err_delete_debug_area;
}
@@ -2417,8 +2417,8 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
sizeof(*adapter->drv_stats),
RTE_CACHE_LINE_SIZE);
if (!adapter->drv_stats) {
- PMD_DRV_LOG(ERR,
- "Failed to allocate memory for adapter statistics\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to allocate memory for adapter statistics");
rc = -ENOMEM;
goto err_rss_destroy;
}
@@ -2435,7 +2435,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
rc = rte_eal_alarm_set(adapter->control_path_poll_interval,
ena_control_path_poll_handler, eth_dev);
if (unlikely(rc != 0)) {
- PMD_DRV_LOG(ERR, "Failed to set control path alarm\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to set control path alarm");
goto err_control_path_destroy;
}
}
@@ -2780,8 +2780,8 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
#ifdef RTE_ETHDEV_DEBUG_RX
/* Check adapter state */
if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
- PMD_RX_LOG(ALERT,
- "Trying to receive pkts while device is NOT running\n");
+ PMD_RX_LOG_LINE(ALERT,
+ "Trying to receive pkts while device is NOT running");
return 0;
}
#endif
@@ -2800,8 +2800,8 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rx_ring->ena_com_io_sq,
&ena_rx_ctx);
if (unlikely(rc)) {
- PMD_RX_LOG(ERR,
- "Failed to get the packet from the device, rc: %d\n",
+ PMD_RX_LOG_LINE(ERR,
+ "Failed to get the packet from the device, rc: %d",
rc);
if (rc == ENA_COM_NO_SPACE) {
++rx_ring->rx_stats.bad_desc_num;
@@ -2883,8 +2883,8 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* SCTP checksum offload is not supported by the ENA. */
if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) ||
l4_csum_flag == RTE_MBUF_F_TX_SCTP_CKSUM) {
- PMD_TX_LOG(DEBUG,
- "mbuf[%" PRIu32 "] has unsupported offloads flags set: 0x%" PRIu64 "\n",
+ PMD_TX_LOG_LINE(DEBUG,
+ "mbuf[%" PRIu32 "] has unsupported offloads flags set: 0x%" PRIu64,
i, ol_flags);
rte_errno = ENOTSUP;
return i;
@@ -2894,8 +2894,8 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
!(tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
m->nb_segs == tx_ring->sgl_size &&
m->data_len < tx_ring->tx_max_header_size))) {
- PMD_TX_LOG(DEBUG,
- "mbuf[%" PRIu32 "] has too many segments: %" PRIu16 "\n",
+ PMD_TX_LOG_LINE(DEBUG,
+ "mbuf[%" PRIu32 "] has too many segments: %" PRIu16,
i, m->nb_segs);
rte_errno = EINVAL;
return i;
@@ -2909,8 +2909,8 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
!(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) ||
(l4_csum_flag == RTE_MBUF_F_TX_UDP_CKSUM &&
!(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM))) {
- PMD_TX_LOG(DEBUG,
- "mbuf[%" PRIu32 "]: requested offloads: %" PRIu16 " are not enabled for the queue[%u]\n",
+ PMD_TX_LOG_LINE(DEBUG,
+ "mbuf[%" PRIu32 "]: requested offloads: %" PRIu16 " are not enabled for the queue[%u]",
i, m->nb_segs, tx_ring->id);
rte_errno = EINVAL;
return i;
@@ -2921,8 +2921,8 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
*/
if (unlikely(ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK) &&
(m->l2_len == 0 || m->l3_len == 0))) {
- PMD_TX_LOG(DEBUG,
- "mbuf[%" PRIu32 "]: l2_len or l3_len values are 0 while the offload was requested\n",
+ PMD_TX_LOG_LINE(DEBUG,
+ "mbuf[%" PRIu32 "]: l2_len or l3_len values are 0 while the offload was requested",
i);
rte_errno = EINVAL;
return i;
@@ -3122,7 +3122,7 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf)
*/
if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
mbuf->nb_segs + 2)) {
- PMD_TX_LOG(DEBUG, "Not enough space in the tx queue\n");
+ PMD_TX_LOG_LINE(DEBUG, "Not enough space in the tx queue");
return ENA_COM_NO_MEM;
}
@@ -3147,8 +3147,8 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf)
if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq,
&ena_tx_ctx))) {
- PMD_TX_LOG(DEBUG,
- "LLQ Tx max burst size of queue %d achieved, writing doorbell to send burst\n",
+ PMD_TX_LOG_LINE(DEBUG,
+ "LLQ Tx max burst size of queue %d achieved, writing doorbell to send burst",
tx_ring->id);
ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
tx_ring->tx_stats.doorbells++;
@@ -3159,7 +3159,7 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf)
rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
&nb_hw_desc);
if (unlikely(rc)) {
- PMD_DRV_LOG(ERR, "Failed to prepare Tx buffers, rc: %d\n", rc);
+ PMD_DRV_LOG_LINE(ERR, "Failed to prepare Tx buffers, rc: %d", rc);
++tx_ring->tx_stats.prepare_ctx_err;
ena_trigger_reset(tx_ring->adapter,
ENA_REGS_RESET_DRIVER_INVALID_STATE);
@@ -3262,8 +3262,8 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
#ifdef RTE_ETHDEV_DEBUG_TX
/* Check adapter state */
if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
- PMD_TX_LOG(ALERT,
- "Trying to xmit pkts while device is NOT running\n");
+ PMD_TX_LOG_LINE(ALERT,
+ "Trying to xmit pkts while device is NOT running");
return 0;
}
#endif
@@ -3303,7 +3303,7 @@ static void ena_copy_customer_metrics(struct ena_adapter *adapter, uint64_t *buf
if (ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) {
if (num_metrics != ENA_STATS_ARRAY_METRICS) {
- PMD_DRV_LOG(ERR, "Detected discrepancy in the number of customer metrics");
+ PMD_DRV_LOG_LINE(ERR, "Detected discrepancy in the number of customer metrics");
return;
}
rte_spinlock_lock(&adapter->admin_lock);
@@ -3314,13 +3314,13 @@ static void ena_copy_customer_metrics(struct ena_adapter *adapter, uint64_t *buf
num_metrics * sizeof(uint64_t));
rte_spinlock_unlock(&adapter->admin_lock);
if (rc != 0) {
- PMD_DRV_LOG(WARNING, "Failed to get customer metrics, rc: %d\n", rc);
+ PMD_DRV_LOG_LINE(WARNING, "Failed to get customer metrics, rc: %d", rc);
return;
}
} else if (ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
if (num_metrics != ENA_STATS_ARRAY_METRICS_LEGACY) {
- PMD_DRV_LOG(ERR, "Detected discrepancy in the number of legacy metrics");
+ PMD_DRV_LOG_LINE(ERR, "Detected discrepancy in the number of legacy metrics");
return;
}
@@ -3331,8 +3331,8 @@ static void ena_copy_customer_metrics(struct ena_adapter *adapter, uint64_t *buf
(struct ena_admin_eni_stats *)buf);
rte_spinlock_unlock(&adapter->admin_lock);
if (rc != 0) {
- PMD_DRV_LOG(WARNING,
- "Failed to get ENI metrics, rc: %d\n", rc);
+ PMD_DRV_LOG_LINE(WARNING,
+ "Failed to get ENI metrics, rc: %d", rc);
return;
}
}
@@ -3353,8 +3353,8 @@ static void ena_copy_ena_srd_info(struct ena_adapter *adapter,
(struct ena_admin_ena_srd_info *)srd_info);
rte_spinlock_unlock(&adapter->admin_lock);
if (rc != ENA_COM_OK && rc != ENA_COM_UNSUPPORTED) {
- PMD_DRV_LOG(WARNING,
- "Failed to get ENA express srd info, rc: %d\n", rc);
+ PMD_DRV_LOG_LINE(WARNING,
+ "Failed to get ENA express srd info, rc: %d", rc);
return;
}
}
@@ -3445,8 +3445,8 @@ static int ena_xstats_get_names_by_id(struct rte_eth_dev *dev,
for (i = 0; i < size; ++i) {
id = ids[i];
if (id > xstats_count) {
- PMD_DRV_LOG(ERR,
- "ID value out of range: id=%" PRIu64 ", xstats_num=%" PRIu64 "\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "ID value out of range: id=%" PRIu64 ", xstats_num=%" PRIu64,
id, xstats_count);
return -EINVAL;
}
@@ -3687,45 +3687,45 @@ static int ena_process_uint_devarg(const char *key,
uint64_value = strtoull(value, &str_end, DECIMAL_BASE);
if (value == str_end) {
- PMD_INIT_LOG(ERR,
- "Invalid value for key '%s'. Only uint values are accepted.\n",
+ PMD_INIT_LOG_LINE(ERR,
+ "Invalid value for key '%s'. Only uint values are accepted.",
key);
return -EINVAL;
}
if (strcmp(key, ENA_DEVARG_MISS_TXC_TO) == 0) {
if (uint64_value > ENA_MAX_TX_TIMEOUT_SECONDS) {
- PMD_INIT_LOG(ERR,
- "Tx timeout too high: %" PRIu64 " sec. Maximum allowed: %d sec.\n",
+ PMD_INIT_LOG_LINE(ERR,
+ "Tx timeout too high: %" PRIu64 " sec. Maximum allowed: %d sec.",
uint64_value, ENA_MAX_TX_TIMEOUT_SECONDS);
return -EINVAL;
} else if (uint64_value == 0) {
- PMD_INIT_LOG(INFO,
- "Check for missing Tx completions has been disabled.\n");
+ PMD_INIT_LOG_LINE(INFO,
+ "Check for missing Tx completions has been disabled.");
adapter->missing_tx_completion_to =
ENA_HW_HINTS_NO_TIMEOUT;
} else {
- PMD_INIT_LOG(INFO,
- "Tx packet completion timeout set to %" PRIu64 " seconds.\n",
+ PMD_INIT_LOG_LINE(INFO,
+ "Tx packet completion timeout set to %" PRIu64 " seconds.",
uint64_value);
adapter->missing_tx_completion_to =
uint64_value * rte_get_timer_hz();
}
} else if (strcmp(key, ENA_DEVARG_CONTROL_PATH_POLL_INTERVAL) == 0) {
if (uint64_value > ENA_MAX_CONTROL_PATH_POLL_INTERVAL_MSEC) {
- PMD_INIT_LOG(ERR,
+ PMD_INIT_LOG_LINE(ERR,
"Control path polling interval is too long: %" PRIu64 " msecs. "
- "Maximum allowed: %d msecs.\n",
+ "Maximum allowed: %d msecs.",
uint64_value, ENA_MAX_CONTROL_PATH_POLL_INTERVAL_MSEC);
return -EINVAL;
} else if (uint64_value == 0) {
- PMD_INIT_LOG(INFO,
+ PMD_INIT_LOG_LINE(INFO,
"Control path polling interval is set to zero. Operating in "
- "interrupt mode.\n");
+ "interrupt mode.");
adapter->control_path_poll_interval = 0;
} else {
- PMD_INIT_LOG(INFO,
- "Control path polling interval is set to %" PRIu64 " msecs.\n",
+ PMD_INIT_LOG_LINE(INFO,
+ "Control path polling interval is set to %" PRIu64 " msecs.",
uint64_value);
adapter->control_path_poll_interval = uint64_value * USEC_PER_MSEC;
}
@@ -3747,8 +3747,8 @@ static int ena_process_bool_devarg(const char *key,
} else if (strcmp(value, "0") == 0) {
bool_value = false;
} else {
- PMD_INIT_LOG(ERR,
- "Invalid value: '%s' for key '%s'. Accepted: '0' or '1'\n",
+ PMD_INIT_LOG_LINE(ERR,
+ "Invalid value: '%s' for key '%s'. Accepted: '0' or '1'",
value, key);
return -EINVAL;
}
@@ -3783,7 +3783,7 @@ static int ena_parse_devargs(struct ena_adapter *adapter,
kvlist = rte_kvargs_parse(devargs->args, allowed_args);
if (kvlist == NULL) {
- PMD_INIT_LOG(ERR, "Invalid device arguments: %s\n",
+ PMD_INIT_LOG_LINE(ERR, "Invalid device arguments: %s",
devargs->args);
return -EINVAL;
}
@@ -3827,8 +3827,8 @@ static int ena_setup_rx_intr(struct rte_eth_dev *dev)
return 0;
if (!rte_intr_cap_multiple(intr_handle)) {
- PMD_DRV_LOG(ERR,
- "Rx interrupt requested, but it isn't supported by the PCI driver\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Rx interrupt requested, but it isn't supported by the PCI driver");
return -ENOTSUP;
}
@@ -3838,8 +3838,8 @@ static int ena_setup_rx_intr(struct rte_eth_dev *dev)
/* Verify if there are enough vectors available. */
vectors_nb = dev->data->nb_rx_queues;
if (vectors_nb > RTE_MAX_RXTX_INTR_VEC_ID) {
- PMD_DRV_LOG(ERR,
- "Too many Rx interrupts requested, maximum number: %d\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Too many Rx interrupts requested, maximum number: %d",
RTE_MAX_RXTX_INTR_VEC_ID);
rc = -ENOTSUP;
goto enable_intr;
@@ -3848,8 +3848,8 @@ static int ena_setup_rx_intr(struct rte_eth_dev *dev)
/* Allocate the vector list */
if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
dev->data->nb_rx_queues)) {
- PMD_DRV_LOG(ERR,
- "Failed to allocate interrupt vector for %d queues\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to allocate interrupt vector for %d queues",
dev->data->nb_rx_queues);
rc = -ENOMEM;
goto enable_intr;
@@ -3860,8 +3860,8 @@ static int ena_setup_rx_intr(struct rte_eth_dev *dev)
goto free_intr_vec;
if (!rte_intr_allow_others(intr_handle)) {
- PMD_DRV_LOG(ERR,
- "Not enough interrupts available to use both ENA Admin and Rx interrupts\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Not enough interrupts available to use both ENA Admin and Rx interrupts");
goto disable_intr_efd;
}
@@ -3920,8 +3920,8 @@ static int ena_configure_aenq(struct ena_adapter *adapter)
*/
if (adapter->edev_data->dev_conf.intr_conf.lsc != 0) {
if (!(aenq_groups & BIT(ENA_ADMIN_LINK_CHANGE))) {
- PMD_DRV_LOG(ERR,
- "LSC requested, but it's not supported by the AENQ\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "LSC requested, but it's not supported by the AENQ");
return -EINVAL;
}
} else {
@@ -3933,7 +3933,7 @@ static int ena_configure_aenq(struct ena_adapter *adapter)
rc = ena_com_set_aenq_config(&adapter->ena_dev, aenq_groups);
if (rc != 0) {
- PMD_DRV_LOG(ERR, "Cannot configure AENQ groups, rc=%d\n", rc);
+ PMD_DRV_LOG_LINE(ERR, "Cannot configure AENQ groups, rc=%d", rc);
return rc;
}
@@ -3971,8 +3971,8 @@ ena_mem_alloc_coherent(struct rte_eth_dev_data *data, size_t size,
rc = snprintf(z_name, RTE_MEMZONE_NAMESIZE, "ena_p%d_mz%" PRIu64 "",
data->port_id, adapter->memzone_cnt);
if (rc >= RTE_MEMZONE_NAMESIZE) {
- PMD_DRV_LOG(ERR,
- "Name for the ena_com memzone is too long. Port: %d, mz_num: %" PRIu64 "\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Name for the ena_com memzone is too long. Port: %d, mz_num: %" PRIu64,
data->port_id, adapter->memzone_cnt);
goto error;
}
@@ -3981,7 +3981,7 @@ ena_mem_alloc_coherent(struct rte_eth_dev_data *data, size_t size,
memzone = rte_memzone_reserve_aligned(z_name, size, socket_id,
RTE_MEMZONE_IOVA_CONTIG, alignment);
if (memzone == NULL) {
- PMD_DRV_LOG(ERR, "Failed to allocate ena_com memzone: %s\n",
+ PMD_DRV_LOG_LINE(ERR, "Failed to allocate ena_com memzone: %s",
z_name);
goto error;
}
@@ -4070,7 +4070,7 @@ static void ena_notification(void *adapter_data,
struct ena_admin_ena_hw_hints *hints;
if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION)
- PMD_DRV_LOG(WARNING, "Invalid AENQ group: %x. Expected: %x\n",
+ PMD_DRV_LOG_LINE(WARNING, "Invalid AENQ group: %x. Expected: %x",
aenq_e->aenq_common_desc.group,
ENA_ADMIN_NOTIFICATION);
@@ -4081,7 +4081,7 @@ static void ena_notification(void *adapter_data,
ena_update_hints(adapter, hints);
break;
default:
- PMD_DRV_LOG(ERR, "Invalid AENQ notification link state: %d\n",
+ PMD_DRV_LOG_LINE(ERR, "Invalid AENQ notification link state: %d",
aenq_e->aenq_common_desc.syndrome);
}
}
@@ -4121,8 +4121,8 @@ static void ena_suboptimal_configuration(__rte_unused void *adapter_data,
num_bits = BITS_PER_TYPE(desc->notifications_bitmap);
for (bit = 0; bit < num_bits; bit++) {
if (desc->notifications_bitmap & RTE_BIT64(bit)) {
- PMD_DRV_LOG(WARNING,
- "Sub-optimal configuration notification code: %d\n", bit + 1);
+ PMD_DRV_LOG_LINE(WARNING,
+ "Sub-optimal configuration notification code: %d", bit + 1);
}
}
}
@@ -4133,8 +4133,8 @@ static void ena_suboptimal_configuration(__rte_unused void *adapter_data,
static void unimplemented_aenq_handler(__rte_unused void *data,
__rte_unused struct ena_admin_aenq_entry *aenq_e)
{
- PMD_DRV_LOG(ERR,
- "Unknown event was received or event with unimplemented handler\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Unknown event was received or event with unimplemented handler");
}
static struct ena_aenq_handlers aenq_handlers = {
@@ -4168,7 +4168,7 @@ ena_mp_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer)
if (!rte_eth_dev_is_valid_port(req->port_id)) {
rte_errno = ENODEV;
res = -rte_errno;
- PMD_DRV_LOG(ERR, "Unknown port %d in request %d\n",
+ PMD_DRV_LOG_LINE(ERR, "Unknown port %d in request %d",
req->port_id, req->type);
goto end;
}
@@ -4205,7 +4205,7 @@ ena_mp_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer)
(struct ena_admin_ena_srd_info *)&adapter->srd_stats);
break;
default:
- PMD_DRV_LOG(ERR, "Unknown request type %d\n", req->type);
+ PMD_DRV_LOG_LINE(ERR, "Unknown request type %d", req->type);
res = -EINVAL;
break;
}
@@ -4233,7 +4233,7 @@ static bool ena_use_large_llq_hdr(struct ena_adapter *adapter, uint8_t recommend
if (adapter->llq_header_policy == ENA_LLQ_POLICY_LARGE) {
return true;
} else if (adapter->llq_header_policy == ENA_LLQ_POLICY_RECOMMENDED) {
- PMD_DRV_LOG(INFO, "Recommended device entry size policy %u\n",
+ PMD_DRV_LOG_LINE(INFO, "Recommended device entry size policy %u",
recommended_entry_size);
if (recommended_entry_size == ENA_ADMIN_LIST_ENTRY_SIZE_256B)
return true;
@@ -7,31 +7,31 @@
#define _ENA_LOGS_H_
extern int ena_logtype_init;
-#define PMD_INIT_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, ena_logtype_init, \
- "%s(): " fmt, __func__, ## args)
+#define RTE_LOGTYPE_ENA_INIT ena_logtype_init
+#define PMD_INIT_LOG_LINE(level, fmt, ...) \
+ RTE_LOG(level, ENA_INIT, "%s(): " fmt "\n", __func__, ## __VA_ARGS__)
#ifdef RTE_ETHDEV_DEBUG_RX
extern int ena_logtype_rx;
-#define PMD_RX_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, ena_logtype_rx, \
- "%s(): " fmt, __func__, ## args)
+#define RTE_LOGTYPE_ENA_RX ena_logtype_rx
+#define PMD_RX_LOG_LINE(level, fmt, ...) \
+ RTE_LOG(level, ENA_RX, "%s(): " fmt "\n", __func__, ## __VA_ARGS__)
#else
-#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#define PMD_RX_LOG_LINE(...) do { } while (0)
#endif
#ifdef RTE_ETHDEV_DEBUG_TX
extern int ena_logtype_tx;
-#define PMD_TX_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, ena_logtype_tx, \
- "%s(): " fmt, __func__, ## args)
+#define RTE_LOGTYPE_ENA_TX ena_logtype_tx
+#define PMD_TX_LOG_LINE(level, fmt, ...) \
+ RTE_LOG(level, ENA_TX, "%s(): " fmt "\n", __func__, ## __VA_ARGS__)
#else
-#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#define PMD_TX_LOG_LINE(...) do { } while (0)
#endif
extern int ena_logtype_driver;
-#define PMD_DRV_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, ena_logtype_driver, \
- "%s(): " fmt, __func__, ## args)
+#define RTE_LOGTYPE_ENA_DRIVER ena_logtype_driver
+#define PMD_DRV_LOG_LINE(level, fmt, ...) \
+ RTE_LOG(level, ENA_DRIVER, "%s(): " fmt "\n", __func__, ## __VA_ARGS__)
#endif /* _ENA_LOGS_H_ */
@@ -76,14 +76,14 @@ int ena_rss_reta_update(struct rte_eth_dev *dev,
return -EINVAL;
if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
- PMD_DRV_LOG(ERR,
- "RSS was not configured for the PMD\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "RSS was not configured for the PMD");
return -ENOTSUP;
}
if (reta_size > ENA_RX_RSS_TABLE_SIZE) {
- PMD_DRV_LOG(WARNING,
- "Requested indirection table size (%d) is bigger than supported: %d\n",
+ PMD_DRV_LOG_LINE(WARNING,
+ "Requested indirection table size (%d) is bigger than supported: %d",
reta_size, ENA_RX_RSS_TABLE_SIZE);
return -EINVAL;
}
@@ -103,8 +103,8 @@ int ena_rss_reta_update(struct rte_eth_dev *dev,
rc = ena_com_indirect_table_fill_entry(ena_dev, i,
entry_value);
if (unlikely(rc != 0)) {
- PMD_DRV_LOG(ERR,
- "Cannot fill indirection table\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Cannot fill indirection table");
rte_spinlock_unlock(&adapter->admin_lock);
return rc;
}
@@ -114,11 +114,11 @@ int ena_rss_reta_update(struct rte_eth_dev *dev,
rc = ena_mp_indirect_table_set(adapter);
rte_spinlock_unlock(&adapter->admin_lock);
if (unlikely(rc != 0)) {
- PMD_DRV_LOG(ERR, "Cannot set the indirection table\n");
+ PMD_DRV_LOG_LINE(ERR, "Cannot set the indirection table");
return rc;
}
- PMD_DRV_LOG(DEBUG, "RSS configured %d entries for port %d\n",
+ PMD_DRV_LOG_LINE(DEBUG, "RSS configured %d entries for port %d",
reta_size, dev->data->port_id);
return 0;
@@ -140,8 +140,8 @@ int ena_rss_reta_query(struct rte_eth_dev *dev,
return -EINVAL;
if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
- PMD_DRV_LOG(ERR,
- "RSS was not configured for the PMD\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "RSS was not configured for the PMD");
return -ENOTSUP;
}
@@ -149,7 +149,7 @@ int ena_rss_reta_query(struct rte_eth_dev *dev,
rc = ena_mp_indirect_table_get(adapter, indirect_table);
rte_spinlock_unlock(&adapter->admin_lock);
if (unlikely(rc != 0)) {
- PMD_DRV_LOG(ERR, "Cannot get indirection table\n");
+ PMD_DRV_LOG_LINE(ERR, "Cannot get indirection table");
return rc;
}
@@ -177,8 +177,8 @@ static int ena_fill_indirect_table_default(struct ena_com_dev *ena_dev,
rc = ena_com_indirect_table_fill_entry(ena_dev, i,
ENA_IO_RXQ_IDX(val));
if (unlikely(rc != 0)) {
- PMD_DRV_LOG(DEBUG,
- "Failed to set %zu indirection table entry with val %" PRIu16 "\n",
+ PMD_DRV_LOG_LINE(DEBUG,
+ "Failed to set %zu indirection table entry with val %" PRIu16 "",
i, val);
return rc;
}
@@ -380,8 +380,8 @@ static int ena_set_hash_fields(struct ena_com_dev *ena_dev, uint64_t rss_hf)
(enum ena_admin_flow_hash_proto)i,
selected_fields[i].fields);
if (unlikely(rc != 0)) {
- PMD_DRV_LOG(DEBUG,
- "Failed to set ENA HF %d with fields %" PRIu16 "\n",
+ PMD_DRV_LOG_LINE(DEBUG,
+ "Failed to set ENA HF %d with fields %" PRIu16 "",
i, selected_fields[i].fields);
return rc;
}
@@ -411,23 +411,23 @@ static int ena_rss_hash_set(struct ena_com_dev *ena_dev,
rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ,
rss_key, ENA_HASH_KEY_SIZE, 0);
if (rc != 0 && !(default_allowed && rc == ENA_COM_UNSUPPORTED)) {
- PMD_DRV_LOG(ERR,
- "Failed to set RSS hash function in the device\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to set RSS hash function in the device");
return rc;
}
rc = ena_set_hash_fields(ena_dev, rss_conf->rss_hf);
if (rc == ENA_COM_UNSUPPORTED) {
if (rss_conf->rss_key == NULL && !default_allowed) {
- PMD_DRV_LOG(ERR,
- "Setting RSS hash fields is not supported\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Setting RSS hash fields is not supported");
return -ENOTSUP;
}
- PMD_DRV_LOG(WARNING,
- "Setting RSS hash fields is not supported. Using default values: 0x%" PRIx64 "\n",
+ PMD_DRV_LOG_LINE(WARNING,
+ "Setting RSS hash fields is not supported. Using default values: 0x%"PRIx64,
(uint64_t)(ENA_ALL_RSS_HF));
} else if (rc != 0) {
- PMD_DRV_LOG(ERR, "Failed to set RSS hash fields\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to set RSS hash fields");
return rc;
}
@@ -456,8 +456,8 @@ static int ena_get_rss_hash_key(struct ena_com_dev *ena_dev, uint8_t *rss_key)
* explicitly set, this operation shouldn't be supported.
*/
if (ena_dev->rss.hash_key == NULL) {
- PMD_DRV_LOG(WARNING,
- "Retrieving default RSS hash key is not supported\n");
+ PMD_DRV_LOG_LINE(WARNING,
+ "Retrieving default RSS hash key is not supported");
return -ENOTSUP;
}
@@ -489,25 +489,25 @@ int ena_rss_configure(struct ena_adapter *adapter)
rc = ena_fill_indirect_table_default(ena_dev, ENA_RX_RSS_TABLE_SIZE,
adapter->edev_data->nb_rx_queues);
if (unlikely(rc != 0)) {
- PMD_DRV_LOG(ERR,
- "Failed to fill indirection table with default values\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to fill indirection table with default values");
return rc;
}
rc = ena_com_indirect_table_set(ena_dev);
if (unlikely(rc != 0 && rc != ENA_COM_UNSUPPORTED)) {
- PMD_DRV_LOG(ERR,
- "Failed to set indirection table in the device\n");
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to set indirection table in the device");
return rc;
}
rc = ena_rss_hash_set(ena_dev, rss_conf, true);
if (unlikely(rc != 0)) {
- PMD_DRV_LOG(ERR, "Failed to set RSS hash\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to set RSS hash");
return rc;
}
- PMD_DRV_LOG(DEBUG, "RSS configured for port %d\n",
+ PMD_DRV_LOG_LINE(DEBUG, "RSS configured for port %d",
adapter->edev_data->port_id);
return 0;
@@ -523,7 +523,7 @@ int ena_rss_hash_update(struct rte_eth_dev *dev,
rc = ena_rss_hash_set(&adapter->ena_dev, rss_conf, false);
rte_spinlock_unlock(&adapter->admin_lock);
if (unlikely(rc != 0)) {
- PMD_DRV_LOG(ERR, "Failed to set RSS hash\n");
+ PMD_DRV_LOG_LINE(ERR, "Failed to set RSS hash");
return rc;
}
@@ -542,15 +542,15 @@ int ena_rss_hash_conf_get(struct rte_eth_dev *dev,
static bool warn_once;
if (!(dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
- PMD_DRV_LOG(ERR, "RSS was not configured for the PMD\n");
+ PMD_DRV_LOG_LINE(ERR, "RSS was not configured for the PMD");
return -ENOTSUP;
}
if (rss_conf->rss_key != NULL) {
rc = ena_get_rss_hash_key(ena_dev, rss_conf->rss_key);
if (unlikely(rc != 0)) {
- PMD_DRV_LOG(ERR,
- "Cannot retrieve RSS hash key, err: %d\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Cannot retrieve RSS hash key, err: %d",
rc);
return rc;
}
@@ -569,15 +569,15 @@ int ena_rss_hash_conf_get(struct rte_eth_dev *dev,
* interested only in the key value.
*/
if (!warn_once) {
- PMD_DRV_LOG(WARNING,
- "Reading hash control from the device is not supported. .rss_hf will contain a default value.\n");
+ PMD_DRV_LOG_LINE(WARNING,
+ "Reading hash control from the device is not supported. .rss_hf will contain a default value.");
warn_once = true;
}
rss_hf = ENA_ALL_RSS_HF;
break;
} else if (rc != 0) {
- PMD_DRV_LOG(ERR,
- "Failed to retrieve hash ctrl for proto: %d with err: %d\n",
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to retrieve hash ctrl for proto: %d with err: %d",
i, rc);
return rc;
}
@@ -22,9 +22,11 @@
#include "rte_eth_vhost.h"
RTE_LOG_REGISTER_DEFAULT(vhost_logtype, NOTICE);
+#define RTE_LOGTYPE_VHOST vhost_logtype
-#define VHOST_LOG(level, ...) \
- rte_log(RTE_LOG_ ## level, vhost_logtype, __VA_ARGS__)
+#define VHOST_LOG_LINE(level, ...) \
+ RTE_LOG(level, VHOST, RTE_FMT(RTE_FMT_HEAD(__VA_ARGS__ ,) "\n", \
+ RTE_FMT_TAIL(__VA_ARGS__ ,)))
enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
@@ -298,7 +300,7 @@ vhost_dev_csum_configure(struct rte_eth_dev *eth_dev)
if (internal->features & (1ULL << VIRTIO_NET_F_CSUM)) {
if (!(rxmode->offloads &
(RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM))) {
- VHOST_LOG(NOTICE, "Rx csum will be done in SW, may impact performance.\n");
+ VHOST_LOG_LINE(NOTICE, "Rx csum will be done in SW, may impact performance.");
internal->rx_sw_csum = true;
}
}
@@ -306,7 +308,7 @@ vhost_dev_csum_configure(struct rte_eth_dev *eth_dev)
if (!(internal->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM))) {
if (txmode->offloads &
(RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) {
- VHOST_LOG(NOTICE, "Tx csum will be done in SW, may impact performance.\n");
+ VHOST_LOG_LINE(NOTICE, "Tx csum will be done in SW, may impact performance.");
internal->tx_sw_csum = true;
}
}
@@ -557,7 +559,7 @@ eth_vhost_update_intr(struct rte_eth_dev *eth_dev, uint16_t rxq_idx)
return;
if (rte_vhost_get_vhost_vring(vq->vid, (rxq_idx << 1) + 1, &vring) < 0) {
- VHOST_LOG(DEBUG, "Failed to get rxq-%d's vring, skip!\n", rxq_idx);
+ VHOST_LOG_LINE(DEBUG, "Failed to get rxq-%d's vring, skip!", rxq_idx);
return;
}
@@ -566,10 +568,10 @@ eth_vhost_update_intr(struct rte_eth_dev *eth_dev, uint16_t rxq_idx)
/* Remove previous kickfd from proxy epoll */
if (vq->kickfd >= 0 && vq->kickfd != vring.kickfd) {
if (epoll_ctl(vq->ev.data.fd, EPOLL_CTL_DEL, vq->kickfd, &vq->ev) < 0) {
- VHOST_LOG(DEBUG, "Failed to unregister %d from rxq-%d epoll: %s\n",
+ VHOST_LOG_LINE(DEBUG, "Failed to unregister %d from rxq-%d epoll: %s",
vq->kickfd, rxq_idx, strerror(errno));
} else {
- VHOST_LOG(DEBUG, "Unregistered %d from rxq-%d epoll\n",
+ VHOST_LOG_LINE(DEBUG, "Unregistered %d from rxq-%d epoll",
vq->kickfd, rxq_idx);
}
vq->kickfd = -1;
@@ -578,11 +580,11 @@ eth_vhost_update_intr(struct rte_eth_dev *eth_dev, uint16_t rxq_idx)
/* Add new one, if valid */
if (vq->kickfd != vring.kickfd && vring.kickfd >= 0) {
if (epoll_ctl(vq->ev.data.fd, EPOLL_CTL_ADD, vring.kickfd, &vq->ev) < 0) {
- VHOST_LOG(ERR, "Failed to register %d in rxq-%d epoll: %s\n",
+ VHOST_LOG_LINE(ERR, "Failed to register %d in rxq-%d epoll: %s",
vring.kickfd, rxq_idx, strerror(errno));
} else {
vq->kickfd = vring.kickfd;
- VHOST_LOG(DEBUG, "Registered %d in rxq-%d epoll\n",
+ VHOST_LOG_LINE(DEBUG, "Registered %d in rxq-%d epoll",
vq->kickfd, rxq_idx);
}
}
@@ -643,7 +645,7 @@ eth_vhost_install_intr(struct rte_eth_dev *dev)
dev->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE);
if (dev->intr_handle == NULL) {
- VHOST_LOG(ERR, "Fail to allocate intr_handle\n");
+ VHOST_LOG_LINE(ERR, "Fail to allocate intr_handle");
ret = -ENOMEM;
goto error;
}
@@ -653,17 +655,17 @@ eth_vhost_install_intr(struct rte_eth_dev *dev)
}
if (rte_intr_vec_list_alloc(dev->intr_handle, NULL, nb_rxq)) {
- VHOST_LOG(ERR, "Failed to allocate memory for interrupt vector\n");
+ VHOST_LOG_LINE(ERR, "Failed to allocate memory for interrupt vector");
ret = -ENOMEM;
goto error;
}
- VHOST_LOG(DEBUG, "Prepare intr vec\n");
+ VHOST_LOG_LINE(DEBUG, "Prepare intr vec");
for (i = 0; i < nb_rxq; i++) {
int epoll_fd = epoll_create1(0);
if (epoll_fd < 0) {
- VHOST_LOG(ERR, "Failed to create proxy epoll fd for rxq-%d\n", i);
+ VHOST_LOG_LINE(ERR, "Failed to create proxy epoll fd for rxq-%d", i);
ret = -errno;
goto error;
}
@@ -707,7 +709,7 @@ eth_vhost_configure_intr(struct rte_eth_dev *dev)
{
int i;
- VHOST_LOG(DEBUG, "Configure intr vec\n");
+ VHOST_LOG_LINE(DEBUG, "Configure intr vec");
for (i = 0; i < dev->data->nb_rx_queues; i++)
eth_vhost_update_intr(dev, i);
}
@@ -718,7 +720,7 @@ eth_vhost_unconfigure_intr(struct rte_eth_dev *eth_dev)
struct vhost_queue *vq;
int i;
- VHOST_LOG(DEBUG, "Unconfigure intr vec\n");
+ VHOST_LOG_LINE(DEBUG, "Unconfigure intr vec");
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
vq = eth_dev->data->rx_queues[i];
if (vq == NULL || vq->vid < 0)
@@ -729,10 +731,10 @@ eth_vhost_unconfigure_intr(struct rte_eth_dev *eth_dev)
/* Remove previous kickfd from proxy epoll */
if (vq->kickfd >= 0) {
if (epoll_ctl(vq->ev.data.fd, EPOLL_CTL_DEL, vq->kickfd, &vq->ev) < 0) {
- VHOST_LOG(DEBUG, "Failed to unregister %d from rxq-%d epoll: %s\n",
+ VHOST_LOG_LINE(DEBUG, "Failed to unregister %d from rxq-%d epoll: %s",
vq->kickfd, i, strerror(errno));
} else {
- VHOST_LOG(DEBUG, "Unregistered %d from rxq-%d epoll\n",
+ VHOST_LOG_LINE(DEBUG, "Unregistered %d from rxq-%d epoll",
vq->kickfd, i);
}
vq->kickfd = -1;
@@ -826,7 +828,7 @@ new_device(int vid)
rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
list = find_internal_resource(ifname);
if (list == NULL) {
- VHOST_LOG(INFO, "Invalid device name: %s\n", ifname);
+ VHOST_LOG_LINE(INFO, "Invalid device name: %s", ifname);
return -1;
}
@@ -841,7 +843,7 @@ new_device(int vid)
#endif
if (rte_vhost_get_negotiated_features(vid, &internal->features)) {
- VHOST_LOG(ERR, "Failed to get device features\n");
+ VHOST_LOG_LINE(ERR, "Failed to get device features");
return -1;
}
@@ -864,7 +866,7 @@ new_device(int vid)
rte_atomic32_set(&internal->dev_attached, 1);
update_queuing_status(eth_dev, false);
- VHOST_LOG(INFO, "Vhost device %d created\n", vid);
+ VHOST_LOG_LINE(INFO, "Vhost device %d created", vid);
rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
@@ -885,7 +887,7 @@ destroy_device(int vid)
rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
list = find_internal_resource(ifname);
if (list == NULL) {
- VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
+ VHOST_LOG_LINE(ERR, "Invalid interface name: %s", ifname);
return;
}
eth_dev = list->eth_dev;
@@ -921,7 +923,7 @@ destroy_device(int vid)
state->max_vring = 0;
rte_spinlock_unlock(&state->lock);
- VHOST_LOG(INFO, "Vhost device %d destroyed\n", vid);
+ VHOST_LOG_LINE(INFO, "Vhost device %d destroyed", vid);
rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
@@ -937,7 +939,7 @@ vring_state_changed(int vid, uint16_t vring, int enable)
rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
list = find_internal_resource(ifname);
if (list == NULL) {
- VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
+ VHOST_LOG_LINE(ERR, "Invalid interface name: %s", ifname);
return -1;
}
@@ -959,7 +961,7 @@ vring_state_changed(int vid, uint16_t vring, int enable)
update_queuing_status(eth_dev, false);
- VHOST_LOG(INFO, "vring%u is %s\n",
+ VHOST_LOG_LINE(INFO, "vring%u is %s",
vring, enable ? "enabled" : "disabled");
rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
@@ -1018,12 +1020,12 @@ vhost_driver_setup(struct rte_eth_dev *eth_dev)
if (rte_vhost_driver_callback_register(internal->iface_name,
&vhost_ops) < 0) {
- VHOST_LOG(ERR, "Can't register callbacks\n");
+ VHOST_LOG_LINE(ERR, "Can't register callbacks");
goto drv_unreg;
}
if (rte_vhost_driver_start(internal->iface_name) < 0) {
- VHOST_LOG(ERR, "Failed to start driver for %s\n",
+ VHOST_LOG_LINE(ERR, "Failed to start driver for %s",
internal->iface_name);
goto drv_unreg;
}
@@ -1053,13 +1055,13 @@ rte_eth_vhost_get_queue_event(uint16_t port_id,
int idx;
if (port_id >= RTE_MAX_ETHPORTS) {
- VHOST_LOG(ERR, "Invalid port id\n");
+ VHOST_LOG_LINE(ERR, "Invalid port id");
return -1;
}
state = vring_states[port_id];
if (!state) {
- VHOST_LOG(ERR, "Unused port\n");
+ VHOST_LOG_LINE(ERR, "Unused port");
return -1;
}
@@ -1139,7 +1141,7 @@ eth_dev_start(struct rte_eth_dev *eth_dev)
eth_vhost_uninstall_intr(eth_dev);
if (dev_conf->intr_conf.rxq && eth_vhost_install_intr(eth_dev) < 0) {
- VHOST_LOG(ERR, "Failed to install interrupt handler.\n");
+ VHOST_LOG_LINE(ERR, "Failed to install interrupt handler.");
return -1;
}
@@ -1235,7 +1237,7 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (vq == NULL) {
- VHOST_LOG(ERR, "Failed to allocate memory for rx queue\n");
+ VHOST_LOG_LINE(ERR, "Failed to allocate memory for rx queue");
return -ENOMEM;
}
@@ -1259,7 +1261,7 @@ eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (vq == NULL) {
- VHOST_LOG(ERR, "Failed to allocate memory for tx queue\n");
+ VHOST_LOG_LINE(ERR, "Failed to allocate memory for tx queue");
return -ENOMEM;
}
@@ -1279,7 +1281,7 @@ eth_dev_info(struct rte_eth_dev *dev,
internal = dev->data->dev_private;
if (internal == NULL) {
- VHOST_LOG(ERR, "Invalid device specified\n");
+ VHOST_LOG_LINE(ERR, "Invalid device specified");
return -ENODEV;
}
@@ -1508,7 +1510,7 @@ eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
struct rte_eth_dev *eth_dev = NULL;
struct rte_ether_addr *eth_addr = NULL;
- VHOST_LOG(INFO, "Creating VHOST-USER backend on numa socket %u\n",
+ VHOST_LOG_LINE(INFO, "Creating VHOST-USER backend on numa socket %u",
numa_node);
/* reserve an ethdev entry */
@@ -1613,12 +1615,12 @@ rte_pmd_vhost_probe(struct rte_vdev_device *dev)
struct rte_eth_dev *eth_dev;
const char *name = rte_vdev_device_name(dev);
- VHOST_LOG(INFO, "Initializing pmd_vhost for %s\n", name);
+ VHOST_LOG_LINE(INFO, "Initializing pmd_vhost for %s", name);
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
eth_dev = rte_eth_dev_attach_secondary(name);
if (!eth_dev) {
- VHOST_LOG(ERR, "Failed to probe %s\n", name);
+ VHOST_LOG_LINE(ERR, "Failed to probe %s", name);
return -1;
}
eth_dev->rx_pkt_burst = eth_vhost_rx;
@@ -1736,7 +1738,7 @@ rte_pmd_vhost_probe(struct rte_vdev_device *dev)
ret = eth_dev_vhost_create(dev, iface_name, queues,
dev->device.numa_node, flags, disable_flags);
if (ret == -1)
- VHOST_LOG(ERR, "Failed to create %s\n", name);
+ VHOST_LOG_LINE(ERR, "Failed to create %s", name);
out_free:
rte_kvargs_free(kvlist);
@@ -1750,7 +1752,7 @@ rte_pmd_vhost_remove(struct rte_vdev_device *dev)
struct rte_eth_dev *eth_dev = NULL;
name = rte_vdev_device_name(dev);
- VHOST_LOG(INFO, "Un-Initializing pmd_vhost for %s\n", name);
+ VHOST_LOG_LINE(INFO, "Un-Initializing pmd_vhost for %s", name);
/* find an ethdev entry */
eth_dev = rte_eth_dev_allocated(name);