From patchwork Fri Dec 22 17:11:49 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Stephen Hemminger X-Patchwork-Id: 135523 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 78BF643760; Fri, 22 Dec 2023 18:20:19 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1494942E8F; Fri, 22 Dec 2023 18:19:04 +0100 (CET) Received: from mail-pf1-f177.google.com (mail-pf1-f177.google.com [209.85.210.177]) by mails.dpdk.org (Postfix) with ESMTP id C530042E77 for ; Fri, 22 Dec 2023 18:18:57 +0100 (CET) Received: by mail-pf1-f177.google.com with SMTP id d2e1a72fcca58-6d77c6437f0so1043171b3a.2 for ; Fri, 22 Dec 2023 09:18:57 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=networkplumber-org.20230601.gappssmtp.com; s=20230601; t=1703265537; x=1703870337; darn=dpdk.org; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=T/Z638rQSfEkbqZWPoKqctvqpAXmvGLW/Xzv5APFNr0=; b=y6xKfwWHByuONhHceogEAmZ1vHA7GqQYeGiBPgqUpSNaQX3qr2BYwBFjr3apIsCPKt f3LVNXRPf7tGHwVQdGMB9Kr+ltp+Kglp0z6i1fvd230Xp5U3+EcfTUz0Roo3ie55kUEU Wf23ZwNVnlCLbxV+bkndE7uGY4CM/0gmz+GDRJLtlmc3YWZJS1RNAp0qxPEW1o3VwZI4 rbDprCerilDe+q9LPPc5yeQ4Oh/Y40FpG+ioLATygx1MKyb2c73CWZy/nGYUdErltNe7 /qS/cLqVLjyiE0wFE74A7FxlQVDmNLMp1wshVMjD5Ix224OHJ2XfiuueI8Ij5FGH1tqn oA4Q== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1703265537; x=1703870337; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=T/Z638rQSfEkbqZWPoKqctvqpAXmvGLW/Xzv5APFNr0=; b=W3ke1b35nuUVCstk4v+YFYm3ZNqm0PX/1hgpXckCiS+eKVp98m2up+704jFx5vvcw7 NB7R9HJ7sr9FbuA3Wypu4QHabfOjzRNCTC5hbZuN8dwg49z+wT94r4JzuYQM5fGMff0V v5kkdC0GY9j3Y0/Jsm+EcNuCmYFiLCpHoSYm8FlE4eByUfEn2AX6MAAX45L83oe7JK+L X8N3ai/GU2/j9f6IS22TF6k/NzsE1UvGcYgQQfuCkb2CZYpvcRRe4cmYh206flGH6UvD YHnNsBC/Qvd+uhYv8EAikzXyY7ptZfKBEcGyNXfjkl+qPpzLq8T0xRl2/cON75TDX3I0 HKqA== X-Gm-Message-State: AOJu0YwrRsDAuN0qlX3hWZQTU1uiRxAKS7XMULyH1/CcGT9VH2L3uJde pMiX/SdD6etCZN2fERyCPCIa88g32Wa8l21pknXsP1kLOzt8PQ== X-Google-Smtp-Source: AGHT+IHSAisRMb+2TTW7faJ4RY9LLO6bjOXnkCM2Zw2oSoW7ZsrIH1sM3QwB+644v1UINb/62BKPgQ== X-Received: by 2002:a05:6a00:179d:b0:6d8:c7f:230d with SMTP id s29-20020a056a00179d00b006d80c7f230dmr1669750pfg.53.1703265536858; Fri, 22 Dec 2023 09:18:56 -0800 (PST) Received: from hermes.local (204-195-123-141.wavecable.com. [204.195.123.141]) by smtp.gmail.com with ESMTPSA id h19-20020a62b413000000b006d7d454e58asm372024pfn.117.2023.12.22.09.18.56 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 22 Dec 2023 09:18:56 -0800 (PST) From: Stephen Hemminger To: dev@dpdk.org Cc: Stephen Hemminger , Abdullah Sevincer , Timothy McDaniel , Gage Eads Subject: [PATCH v6 16/20] event/dlb2: use dedicated logtype Date: Fri, 22 Dec 2023 09:11:49 -0800 Message-ID: <20231222171820.8778-17-stephen@networkplumber.org> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20231222171820.8778-1-stephen@networkplumber.org> References: <20231213014408.612051-1-stephen@networkplumber.org> <20231222171820.8778-1-stephen@networkplumber.org> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Driver was using RTE_LOGTYPE_PMD when it had its own logtype. Fixes: 5433956d5185 ("event/dlb2: add eventdev probe") Signed-off-by: Stephen Hemminger --- drivers/event/dlb2/dlb2.c | 275 +++++++++++++++++++------------------- 1 file changed, 137 insertions(+), 138 deletions(-) diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c index 050ace0904b4..419876490780 100644 --- a/drivers/event/dlb2/dlb2.c +++ b/drivers/event/dlb2/dlb2.c @@ -169,7 +169,7 @@ dlb2_hw_query_resources(struct dlb2_eventdev *dlb2) ret = dlb2_iface_get_num_resources(handle, &dlb2->hw_rsrc_query_results); if (ret) { - DLB2_LOG_ERR("ioctl get dlb2 num resources, err=%d\n", ret); + DLB2_LOG_ERR("ioctl get dlb2 num resources, err=%d", ret); return ret; } @@ -259,7 +259,7 @@ set_producer_coremask(const char *key __rte_unused, const char **mask_str = opaque; if (value == NULL || opaque == NULL) { - DLB2_LOG_ERR("NULL pointer\n"); + DLB2_LOG_ERR("NULL pointer"); return -EINVAL; } @@ -293,7 +293,7 @@ set_max_cq_depth(const char *key __rte_unused, int ret; if (value == NULL || opaque == NULL) { - DLB2_LOG_ERR("NULL pointer\n"); + DLB2_LOG_ERR("NULL pointer"); return -EINVAL; } @@ -304,7 +304,7 @@ set_max_cq_depth(const char *key __rte_unused, if (*max_cq_depth < DLB2_MIN_CQ_DEPTH_OVERRIDE || *max_cq_depth > DLB2_MAX_CQ_DEPTH_OVERRIDE || !rte_is_power_of_2(*max_cq_depth)) { - DLB2_LOG_ERR("dlb2: max_cq_depth %d and %d and a power of 2\n", + DLB2_LOG_ERR("dlb2: max_cq_depth %d and %d and a power of 2", DLB2_MIN_CQ_DEPTH_OVERRIDE, DLB2_MAX_CQ_DEPTH_OVERRIDE); return -EINVAL; @@ -322,7 +322,7 @@ set_max_enq_depth(const char *key __rte_unused, int ret; if (value == NULL || opaque == NULL) { - DLB2_LOG_ERR("NULL pointer\n"); + DLB2_LOG_ERR("NULL pointer"); return -EINVAL; } @@ -333,7 +333,7 @@ set_max_enq_depth(const char *key __rte_unused, if (*max_enq_depth < DLB2_MIN_ENQ_DEPTH_OVERRIDE || *max_enq_depth > DLB2_MAX_ENQ_DEPTH_OVERRIDE || !rte_is_power_of_2(*max_enq_depth)) { - DLB2_LOG_ERR("dlb2: max_enq_depth %d and %d and a power of 2\n", + DLB2_LOG_ERR("dlb2: max_enq_depth %d and %d and a power of 2", DLB2_MIN_ENQ_DEPTH_OVERRIDE, DLB2_MAX_ENQ_DEPTH_OVERRIDE); return -EINVAL; @@ -351,7 +351,7 @@ set_max_num_events(const char *key __rte_unused, int ret; if (value == NULL || opaque == NULL) { - DLB2_LOG_ERR("NULL pointer\n"); + DLB2_LOG_ERR("NULL pointer"); return -EINVAL; } @@ -361,7 +361,7 @@ set_max_num_events(const char *key __rte_unused, if (*max_num_events < 0 || *max_num_events > DLB2_MAX_NUM_LDB_CREDITS) { - DLB2_LOG_ERR("dlb2: max_num_events must be between 0 and %d\n", + DLB2_LOG_ERR("dlb2: max_num_events must be between 0 and %d", DLB2_MAX_NUM_LDB_CREDITS); return -EINVAL; } @@ -378,7 +378,7 @@ set_num_dir_credits(const char *key __rte_unused, int ret; if (value == NULL || opaque == NULL) { - DLB2_LOG_ERR("NULL pointer\n"); + DLB2_LOG_ERR("NULL pointer"); return -EINVAL; } @@ -388,7 +388,7 @@ set_num_dir_credits(const char *key __rte_unused, if (*num_dir_credits < 0 || *num_dir_credits > DLB2_MAX_NUM_DIR_CREDITS(DLB2_HW_V2)) { - DLB2_LOG_ERR("dlb2: num_dir_credits must be between 0 and %d\n", + DLB2_LOG_ERR("dlb2: num_dir_credits must be between 0 and %d", DLB2_MAX_NUM_DIR_CREDITS(DLB2_HW_V2)); return -EINVAL; } @@ -405,7 +405,7 @@ set_dev_id(const char *key __rte_unused, int ret; if (value == NULL || opaque == NULL) { - DLB2_LOG_ERR("NULL pointer\n"); + DLB2_LOG_ERR("NULL pointer"); return -EINVAL; } @@ -425,7 +425,7 @@ set_poll_interval(const char *key __rte_unused, int ret; if (value == NULL || opaque == NULL) { - DLB2_LOG_ERR("NULL pointer\n"); + DLB2_LOG_ERR("NULL pointer"); return -EINVAL; } @@ -445,7 +445,7 @@ set_port_cos(const char *key __rte_unused, int first, last, cos_id, i; if (value == NULL || opaque == NULL) { - DLB2_LOG_ERR("NULL pointer\n"); + DLB2_LOG_ERR("NULL pointer"); return -EINVAL; } @@ -458,18 +458,18 @@ set_port_cos(const char *key __rte_unused, } else if (sscanf(value, "%d:%d", &first, &cos_id) == 2) { last = first; } else { - DLB2_LOG_ERR("Error parsing ldb port port_cos devarg. Should be port-port:val, or port:val\n"); + DLB2_LOG_ERR("Error parsing ldb port port_cos devarg. Should be port-port:val, or port:val"); return -EINVAL; } if (first > last || first < 0 || last >= DLB2_MAX_NUM_LDB_PORTS) { - DLB2_LOG_ERR("Error parsing ldb port cos_id arg, invalid port value\n"); + DLB2_LOG_ERR("Error parsing ldb port cos_id arg, invalid port value"); return -EINVAL; } if (cos_id < DLB2_COS_0 || cos_id > DLB2_COS_3) { - DLB2_LOG_ERR("Error parsing ldb port cos_id devarg, must be between 0 and 4\n"); + DLB2_LOG_ERR("Error parsing ldb port cos_id devarg, must be between 0 and 4"); return -EINVAL; } @@ -487,7 +487,7 @@ set_cos_bw(const char *key __rte_unused, struct dlb2_cos_bw *cos_bw = opaque; if (opaque == NULL) { - DLB2_LOG_ERR("NULL pointer\n"); + DLB2_LOG_ERR("NULL pointer"); return -EINVAL; } @@ -495,11 +495,11 @@ set_cos_bw(const char *key __rte_unused, if (sscanf(value, "%d:%d:%d:%d", &cos_bw->val[0], &cos_bw->val[1], &cos_bw->val[2], &cos_bw->val[3]) != 4) { - DLB2_LOG_ERR("Error parsing cos bandwidth devarg. Should be bw0:bw1:bw2:bw3 where all values combined are <= 100\n"); + DLB2_LOG_ERR("Error parsing cos bandwidth devarg. Should be bw0:bw1:bw2:bw3 where all values combined are <= 100"); return -EINVAL; } if (cos_bw->val[0] + cos_bw->val[1] + cos_bw->val[2] + cos_bw->val[3] > 100) { - DLB2_LOG_ERR("Error parsing cos bandwidth devarg. Should be bw0:bw1:bw2:bw3 where all values combined are <= 100\n"); + DLB2_LOG_ERR("Error parsing cos bandwidth devarg. Should be bw0:bw1:bw2:bw3 where all values combined are <= 100"); return -EINVAL; } @@ -515,7 +515,7 @@ set_sw_credit_quanta(const char *key __rte_unused, int ret; if (value == NULL || opaque == NULL) { - DLB2_LOG_ERR("NULL pointer\n"); + DLB2_LOG_ERR("NULL pointer"); return -EINVAL; } @@ -524,7 +524,7 @@ set_sw_credit_quanta(const char *key __rte_unused, return ret; if (*sw_credit_quanta <= 0) { - DLB2_LOG_ERR("sw_credit_quanta must be > 0\n"); + DLB2_LOG_ERR("sw_credit_quanta must be > 0"); return -EINVAL; } @@ -540,7 +540,7 @@ set_hw_credit_quanta(const char *key __rte_unused, int ret; if (value == NULL || opaque == NULL) { - DLB2_LOG_ERR("NULL pointer\n"); + DLB2_LOG_ERR("NULL pointer"); return -EINVAL; } @@ -560,7 +560,7 @@ set_default_depth_thresh(const char *key __rte_unused, int ret; if (value == NULL || opaque == NULL) { - DLB2_LOG_ERR("NULL pointer\n"); + DLB2_LOG_ERR("NULL pointer"); return -EINVAL; } @@ -579,7 +579,7 @@ set_vector_opts_enab(const char *key __rte_unused, bool *dlb2_vector_opts_enabled = opaque; if (value == NULL || opaque == NULL) { - DLB2_LOG_ERR("NULL pointer\n"); + DLB2_LOG_ERR("NULL pointer"); return -EINVAL; } @@ -599,7 +599,7 @@ set_default_ldb_port_allocation(const char *key __rte_unused, bool *default_ldb_port_allocation = opaque; if (value == NULL || opaque == NULL) { - DLB2_LOG_ERR("NULL pointer\n"); + DLB2_LOG_ERR("NULL pointer"); return -EINVAL; } @@ -619,7 +619,7 @@ set_enable_cq_weight(const char *key __rte_unused, bool *enable_cq_weight = opaque; if (value == NULL || opaque == NULL) { - DLB2_LOG_ERR("NULL pointer\n"); + DLB2_LOG_ERR("NULL pointer"); return -EINVAL; } @@ -640,7 +640,7 @@ set_qid_depth_thresh(const char *key __rte_unused, int first, last, thresh, i; if (value == NULL || opaque == NULL) { - DLB2_LOG_ERR("NULL pointer\n"); + DLB2_LOG_ERR("NULL pointer"); return -EINVAL; } @@ -657,18 +657,18 @@ set_qid_depth_thresh(const char *key __rte_unused, } else if (sscanf(value, "%d:%d", &first, &thresh) == 2) { last = first; } else { - DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val\n"); + DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val"); return -EINVAL; } if (first > last || first < 0 || last >= DLB2_MAX_NUM_QUEUES(DLB2_HW_V2)) { - DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n"); + DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value"); return -EINVAL; } if (thresh < 0 || thresh > DLB2_MAX_QUEUE_DEPTH_THRESHOLD) { - DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d\n", + DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d", DLB2_MAX_QUEUE_DEPTH_THRESHOLD); return -EINVAL; } @@ -688,7 +688,7 @@ set_qid_depth_thresh_v2_5(const char *key __rte_unused, int first, last, thresh, i; if (value == NULL || opaque == NULL) { - DLB2_LOG_ERR("NULL pointer\n"); + DLB2_LOG_ERR("NULL pointer"); return -EINVAL; } @@ -705,18 +705,18 @@ set_qid_depth_thresh_v2_5(const char *key __rte_unused, } else if (sscanf(value, "%d:%d", &first, &thresh) == 2) { last = first; } else { - DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val\n"); + DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val"); return -EINVAL; } if (first > last || first < 0 || last >= DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5)) { - DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n"); + DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value"); return -EINVAL; } if (thresh < 0 || thresh > DLB2_MAX_QUEUE_DEPTH_THRESHOLD) { - DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d\n", + DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d", DLB2_MAX_QUEUE_DEPTH_THRESHOLD); return -EINVAL; } @@ -738,7 +738,7 @@ dlb2_eventdev_info_get(struct rte_eventdev *dev, if (ret) { const struct rte_eventdev_data *data = dev->data; - DLB2_LOG_ERR("get resources err=%d, devid=%d\n", + DLB2_LOG_ERR("get resources err=%d, devid=%d", ret, data->dev_id); /* fn is void, so fall through and return values set up in * probe @@ -781,7 +781,7 @@ dlb2_hw_create_sched_domain(struct dlb2_eventdev *dlb2, struct dlb2_create_sched_domain_args *cfg; if (resources_asked == NULL) { - DLB2_LOG_ERR("dlb2: dlb2_create NULL parameter\n"); + DLB2_LOG_ERR("dlb2: dlb2_create NULL parameter"); ret = EINVAL; goto error_exit; } @@ -809,7 +809,7 @@ dlb2_hw_create_sched_domain(struct dlb2_eventdev *dlb2, if (cos_ports > resources_asked->num_ldb_ports || (cos_ports && dlb2->max_cos_port >= resources_asked->num_ldb_ports)) { - DLB2_LOG_ERR("dlb2: num_ldb_ports < cos_ports\n"); + DLB2_LOG_ERR("dlb2: num_ldb_ports < cos_ports"); ret = EINVAL; goto error_exit; } @@ -832,7 +832,7 @@ dlb2_hw_create_sched_domain(struct dlb2_eventdev *dlb2, evdev_dlb2_default_info.max_event_port_dequeue_depth; if (device_version == DLB2_HW_V2_5) { - DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, credits=%d\n", + DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, credits=%d", cfg->num_ldb_queues, resources_asked->num_ldb_ports, cfg->num_dir_ports, @@ -840,7 +840,7 @@ dlb2_hw_create_sched_domain(struct dlb2_eventdev *dlb2, cfg->num_hist_list_entries, cfg->num_credits); } else { - DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d\n", + DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d", cfg->num_ldb_queues, resources_asked->num_ldb_ports, cfg->num_dir_ports, @@ -854,7 +854,7 @@ dlb2_hw_create_sched_domain(struct dlb2_eventdev *dlb2, ret = dlb2_iface_sched_domain_create(handle, cfg); if (ret < 0) { - DLB2_LOG_ERR("dlb2: domain create failed, ret = %d, extra status: %s\n", + DLB2_LOG_ERR("dlb2: domain create failed, ret = %d, extra status: %s", ret, dlb2_error_strings[cfg->response.status]); @@ -930,27 +930,27 @@ dlb2_eventdev_configure(const struct rte_eventdev *dev) dlb2_hw_reset_sched_domain(dev, true); ret = dlb2_hw_query_resources(dlb2); if (ret) { - DLB2_LOG_ERR("get resources err=%d, devid=%d\n", + DLB2_LOG_ERR("get resources err=%d, devid=%d", ret, data->dev_id); return ret; } } if (config->nb_event_queues > rsrcs->num_queues) { - DLB2_LOG_ERR("nb_event_queues parameter (%d) exceeds the QM device's capabilities (%d).\n", + DLB2_LOG_ERR("nb_event_queues parameter (%d) exceeds the QM device's capabilities (%d).", config->nb_event_queues, rsrcs->num_queues); return -EINVAL; } if (config->nb_event_ports > (rsrcs->num_ldb_ports + rsrcs->num_dir_ports)) { - DLB2_LOG_ERR("nb_event_ports parameter (%d) exceeds the QM device's capabilities (%d).\n", + DLB2_LOG_ERR("nb_event_ports parameter (%d) exceeds the QM device's capabilities (%d).", config->nb_event_ports, (rsrcs->num_ldb_ports + rsrcs->num_dir_ports)); return -EINVAL; } if (config->nb_events_limit > rsrcs->nb_events_limit) { - DLB2_LOG_ERR("nb_events_limit parameter (%d) exceeds the QM device's capabilities (%d).\n", + DLB2_LOG_ERR("nb_events_limit parameter (%d) exceeds the QM device's capabilities (%d).", config->nb_events_limit, rsrcs->nb_events_limit); return -EINVAL; @@ -1000,7 +1000,7 @@ dlb2_eventdev_configure(const struct rte_eventdev *dev) if (dlb2_hw_create_sched_domain(dlb2, handle, rsrcs, dlb2->version) < 0) { - DLB2_LOG_ERR("dlb2_hw_create_sched_domain failed\n"); + DLB2_LOG_ERR("dlb2_hw_create_sched_domain failed"); return -ENODEV; } @@ -1068,7 +1068,7 @@ dlb2_get_sn_allocation(struct dlb2_eventdev *dlb2, int group) ret = dlb2_iface_get_sn_allocation(handle, &cfg); if (ret < 0) { - DLB2_LOG_ERR("dlb2: get_sn_allocation ret=%d (driver status: %s)\n", + DLB2_LOG_ERR("dlb2: get_sn_allocation ret=%d (driver status: %s)", ret, dlb2_error_strings[cfg.response.status]); return ret; } @@ -1088,7 +1088,7 @@ dlb2_set_sn_allocation(struct dlb2_eventdev *dlb2, int group, int num) ret = dlb2_iface_set_sn_allocation(handle, &cfg); if (ret < 0) { - DLB2_LOG_ERR("dlb2: set_sn_allocation ret=%d (driver status: %s)\n", + DLB2_LOG_ERR("dlb2: set_sn_allocation ret=%d (driver status: %s)", ret, dlb2_error_strings[cfg.response.status]); return ret; } @@ -1107,7 +1107,7 @@ dlb2_get_sn_occupancy(struct dlb2_eventdev *dlb2, int group) ret = dlb2_iface_get_sn_occupancy(handle, &cfg); if (ret < 0) { - DLB2_LOG_ERR("dlb2: get_sn_occupancy ret=%d (driver status: %s)\n", + DLB2_LOG_ERR("dlb2: get_sn_occupancy ret=%d (driver status: %s)", ret, dlb2_error_strings[cfg.response.status]); return ret; } @@ -1161,7 +1161,7 @@ dlb2_program_sn_allocation(struct dlb2_eventdev *dlb2, } if (i == DLB2_NUM_SN_GROUPS) { - DLB2_LOG_ERR("[%s()] No groups with %d sequence_numbers are available or have free slots\n", + DLB2_LOG_ERR("[%s()] No groups with %d sequence_numbers are available or have free slots", __func__, sequence_numbers); return; } @@ -1236,7 +1236,7 @@ dlb2_hw_create_ldb_queue(struct dlb2_eventdev *dlb2, ret = dlb2_iface_ldb_queue_create(handle, &cfg); if (ret < 0) { - DLB2_LOG_ERR("dlb2: create LB event queue error, ret=%d (driver status: %s)\n", + DLB2_LOG_ERR("dlb2: create LB event queue error, ret=%d (driver status: %s)", ret, dlb2_error_strings[cfg.response.status]); return -EINVAL; } @@ -1250,7 +1250,7 @@ dlb2_hw_create_ldb_queue(struct dlb2_eventdev *dlb2, queue->sched_type = sched_type; queue->config_state = DLB2_CONFIGURED; - DLB2_LOG_DBG("Created LB event queue %d, nb_inflights=%d, nb_seq=%d, qid inflights=%d\n", + DLB2_LOG_DBG("Created LB event queue %d, nb_inflights=%d, nb_seq=%d, qid inflights=%d", qm_qid, cfg.num_atomic_inflights, cfg.num_sequence_numbers, @@ -1272,7 +1272,7 @@ dlb2_eventdev_ldb_queue_setup(struct rte_eventdev *dev, qm_qid = dlb2_hw_create_ldb_queue(dlb2, ev_queue, queue_conf); if (qm_qid < 0) { - DLB2_LOG_ERR("Failed to create the load-balanced queue\n"); + DLB2_LOG_ERR("Failed to create the load-balanced queue"); return qm_qid; } @@ -1380,7 +1380,7 @@ dlb2_init_consume_qe(struct dlb2_port *qm_port, char *mz_name) RTE_CACHE_LINE_SIZE); if (qe == NULL) { - DLB2_LOG_ERR("dlb2: no memory for consume_qe\n"); + DLB2_LOG_ERR("dlb2: no memory for consume_qe"); return -ENOMEM; } qm_port->consume_qe = qe; @@ -1412,7 +1412,7 @@ dlb2_init_int_arm_qe(struct dlb2_port *qm_port, char *mz_name) RTE_CACHE_LINE_SIZE); if (qe == NULL) { - DLB2_LOG_ERR("dlb2: no memory for complete_qe\n"); + DLB2_LOG_ERR("dlb2: no memory for complete_qe"); return -ENOMEM; } qm_port->int_arm_qe = qe; @@ -1440,20 +1440,20 @@ dlb2_init_qe_mem(struct dlb2_port *qm_port, char *mz_name) qm_port->qe4 = rte_zmalloc(mz_name, sz, RTE_CACHE_LINE_SIZE); if (qm_port->qe4 == NULL) { - DLB2_LOG_ERR("dlb2: no qe4 memory\n"); + DLB2_LOG_ERR("dlb2: no qe4 memory"); ret = -ENOMEM; goto error_exit; } ret = dlb2_init_int_arm_qe(qm_port, mz_name); if (ret < 0) { - DLB2_LOG_ERR("dlb2: dlb2_init_int_arm_qe ret=%d\n", ret); + DLB2_LOG_ERR("dlb2: dlb2_init_int_arm_qe ret=%d", ret); goto error_exit; } ret = dlb2_init_consume_qe(qm_port, mz_name); if (ret < 0) { - DLB2_LOG_ERR("dlb2: dlb2_init_consume_qe ret=%d\n", ret); + DLB2_LOG_ERR("dlb2: dlb2_init_consume_qe ret=%d", ret); goto error_exit; } @@ -1536,14 +1536,14 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, return -EINVAL; if (dequeue_depth < DLB2_MIN_CQ_DEPTH) { - DLB2_LOG_ERR("dlb2: invalid cq depth, must be at least %d\n", + DLB2_LOG_ERR("dlb2: invalid cq depth, must be at least %d", DLB2_MIN_CQ_DEPTH); return -EINVAL; } if (dlb2->version == DLB2_HW_V2 && ev_port->cq_weight != 0 && ev_port->cq_weight > dequeue_depth) { - DLB2_LOG_ERR("dlb2: invalid cq dequeue depth %d, must be >= cq weight %d\n", + DLB2_LOG_ERR("dlb2: invalid cq dequeue depth %d, must be >= cq weight %d", dequeue_depth, ev_port->cq_weight); return -EINVAL; } @@ -1579,14 +1579,14 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, ret = dlb2_iface_ldb_port_create(handle, &cfg, dlb2->poll_mode); if (ret < 0) { - DLB2_LOG_ERR("dlb2: dlb2_ldb_port_create error, ret=%d (driver status: %s)\n", + DLB2_LOG_ERR("dlb2: dlb2_ldb_port_create error, ret=%d (driver status: %s)", ret, dlb2_error_strings[cfg.response.status]); goto error_exit; } qm_port_id = cfg.response.id; - DLB2_LOG_DBG("dlb2: ev_port %d uses qm LB port %d <<<<<\n", + DLB2_LOG_DBG("dlb2: ev_port %d uses qm LB port %d <<<<<", ev_port->id, qm_port_id); qm_port = &ev_port->qm_port; @@ -1602,7 +1602,7 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, ret = dlb2_init_qe_mem(qm_port, mz_name); if (ret < 0) { - DLB2_LOG_ERR("dlb2: init_qe_mem failed, ret=%d\n", ret); + DLB2_LOG_ERR("dlb2: init_qe_mem failed, ret=%d", ret); goto error_exit; } @@ -1615,7 +1615,7 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, ret = dlb2_iface_enable_cq_weight(handle, &cq_weight_args); if (ret < 0) { - DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)\n", + DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)", ret, dlb2_error_strings[cfg.response. status]); goto error_exit; @@ -1680,7 +1680,7 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool; qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool; - DLB2_LOG_DBG("dlb2: created ldb port %d, depth = %d, ldb credits=%d, dir credits=%d\n", + DLB2_LOG_DBG("dlb2: created ldb port %d, depth = %d, ldb credits=%d, dir credits=%d", qm_port_id, dequeue_depth, qm_port->ldb_credits, @@ -1689,7 +1689,7 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, qm_port->credits = credit_high_watermark; qm_port->credit_pool[DLB2_COMBINED_POOL] = &dlb2->credit_pool; - DLB2_LOG_DBG("dlb2: created ldb port %d, depth = %d, credits=%d\n", + DLB2_LOG_DBG("dlb2: created ldb port %d, depth = %d, credits=%d", qm_port_id, dequeue_depth, qm_port->credits); @@ -1717,7 +1717,7 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, rte_spinlock_unlock(&handle->resource_lock); - DLB2_LOG_ERR("dlb2: create ldb port failed!\n"); + DLB2_LOG_ERR("dlb2: create ldb port failed!"); return ret; } @@ -1761,13 +1761,13 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2, return -EINVAL; if (dequeue_depth < DLB2_MIN_CQ_DEPTH) { - DLB2_LOG_ERR("dlb2: invalid dequeue_depth, must be %d-%d\n", + DLB2_LOG_ERR("dlb2: invalid dequeue_depth, must be %d-%d", DLB2_MIN_CQ_DEPTH, DLB2_MAX_INPUT_QUEUE_DEPTH); return -EINVAL; } if (enqueue_depth < DLB2_MIN_ENQUEUE_DEPTH) { - DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n", + DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d", DLB2_MIN_ENQUEUE_DEPTH); return -EINVAL; } @@ -1802,14 +1802,14 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2, ret = dlb2_iface_dir_port_create(handle, &cfg, dlb2->poll_mode); if (ret < 0) { - DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)\n", + DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)", ret, dlb2_error_strings[cfg.response.status]); goto error_exit; } qm_port_id = cfg.response.id; - DLB2_LOG_DBG("dlb2: ev_port %d uses qm DIR port %d <<<<<\n", + DLB2_LOG_DBG("dlb2: ev_port %d uses qm DIR port %d <<<<<", ev_port->id, qm_port_id); qm_port = &ev_port->qm_port; @@ -1827,7 +1827,7 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2, ret = dlb2_init_qe_mem(qm_port, mz_name); if (ret < 0) { - DLB2_LOG_ERR("dlb2: init_qe_mem failed, ret=%d\n", ret); + DLB2_LOG_ERR("dlb2: init_qe_mem failed, ret=%d", ret); goto error_exit; } @@ -1881,7 +1881,7 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2, qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool; qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool; - DLB2_LOG_DBG("dlb2: created dir port %d, depth = %d cr=%d,%d\n", + DLB2_LOG_DBG("dlb2: created dir port %d, depth = %d cr=%d,%d", qm_port_id, dequeue_depth, dir_credit_high_watermark, @@ -1890,7 +1890,7 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2, qm_port->credits = credit_high_watermark; qm_port->credit_pool[DLB2_COMBINED_POOL] = &dlb2->credit_pool; - DLB2_LOG_DBG("dlb2: created dir port %d, depth = %d cr=%d\n", + DLB2_LOG_DBG("dlb2: created dir port %d, depth = %d cr=%d", qm_port_id, dequeue_depth, credit_high_watermark); @@ -1916,7 +1916,7 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2, rte_spinlock_unlock(&handle->resource_lock); - DLB2_LOG_ERR("dlb2: create dir port failed!\n"); + DLB2_LOG_ERR("dlb2: create dir port failed!"); return ret; } @@ -1932,7 +1932,7 @@ dlb2_eventdev_port_setup(struct rte_eventdev *dev, int ret; if (dev == NULL || port_conf == NULL) { - DLB2_LOG_ERR("Null parameter\n"); + DLB2_LOG_ERR("Null parameter"); return -EINVAL; } @@ -1950,7 +1950,7 @@ dlb2_eventdev_port_setup(struct rte_eventdev *dev, ev_port = &dlb2->ev_ports[ev_port_id]; /* configured? */ if (ev_port->setup_done) { - DLB2_LOG_ERR("evport %d is already configured\n", ev_port_id); + DLB2_LOG_ERR("evport %d is already configured", ev_port_id); return -EINVAL; } @@ -1982,7 +1982,7 @@ dlb2_eventdev_port_setup(struct rte_eventdev *dev, if (port_conf->enqueue_depth > sw_credit_quanta || port_conf->enqueue_depth > hw_credit_quanta) { - DLB2_LOG_ERR("Invalid port config. Enqueue depth %d must be <= credit quanta %d and batch size %d\n", + DLB2_LOG_ERR("Invalid port config. Enqueue depth %d must be <= credit quanta %d and batch size %d", port_conf->enqueue_depth, sw_credit_quanta, hw_credit_quanta); @@ -2004,7 +2004,7 @@ dlb2_eventdev_port_setup(struct rte_eventdev *dev, port_conf->dequeue_depth, port_conf->enqueue_depth); if (ret < 0) { - DLB2_LOG_ERR("Failed to create the lB port ve portId=%d\n", + DLB2_LOG_ERR("Failed to create the lB port ve portId=%d", ev_port_id); return ret; @@ -2015,7 +2015,7 @@ dlb2_eventdev_port_setup(struct rte_eventdev *dev, port_conf->dequeue_depth, port_conf->enqueue_depth); if (ret < 0) { - DLB2_LOG_ERR("Failed to create the DIR port\n"); + DLB2_LOG_ERR("Failed to create the DIR port"); return ret; } } @@ -2082,14 +2082,14 @@ dlb2_hw_map_ldb_qid_to_port(struct dlb2_hw_dev *handle, ret = dlb2_iface_map_qid(handle, &cfg); if (ret < 0) { - DLB2_LOG_ERR("dlb2: map qid error, ret=%d (driver status: %s)\n", + DLB2_LOG_ERR("dlb2: map qid error, ret=%d (driver status: %s)", ret, dlb2_error_strings[cfg.response.status]); - DLB2_LOG_ERR("dlb2: grp=%d, qm_port=%d, qm_qid=%d prio=%d\n", + DLB2_LOG_ERR("dlb2: grp=%d, qm_port=%d, qm_qid=%d prio=%d", handle->domain_id, cfg.port_id, cfg.qid, cfg.priority); } else { - DLB2_LOG_DBG("dlb2: mapped queue %d to qm_port %d\n", + DLB2_LOG_DBG("dlb2: mapped queue %d to qm_port %d", qm_qid, qm_port_id); } @@ -2117,7 +2117,7 @@ dlb2_event_queue_join_ldb(struct dlb2_eventdev *dlb2, first_avail = i; } if (first_avail == -1) { - DLB2_LOG_ERR("dlb2: qm_port %d has no available QID slots.\n", + DLB2_LOG_ERR("dlb2: qm_port %d has no available QID slots.", ev_port->qm_port.id); return -EINVAL; } @@ -2154,7 +2154,7 @@ dlb2_hw_create_dir_queue(struct dlb2_eventdev *dlb2, ret = dlb2_iface_dir_queue_create(handle, &cfg); if (ret < 0) { - DLB2_LOG_ERR("dlb2: create DIR event queue error, ret=%d (driver status: %s)\n", + DLB2_LOG_ERR("dlb2: create DIR event queue error, ret=%d (driver status: %s)", ret, dlb2_error_strings[cfg.response.status]); return -EINVAL; } @@ -2172,7 +2172,7 @@ dlb2_eventdev_dir_queue_setup(struct dlb2_eventdev *dlb2, qm_qid = dlb2_hw_create_dir_queue(dlb2, ev_queue, ev_port->qm_port.id); if (qm_qid < 0) { - DLB2_LOG_ERR("Failed to create the DIR queue\n"); + DLB2_LOG_ERR("Failed to create the DIR queue"); return qm_qid; } @@ -2202,7 +2202,7 @@ dlb2_do_port_link(struct rte_eventdev *dev, err = dlb2_event_queue_join_ldb(dlb2, ev_port, ev_queue, prio); if (err) { - DLB2_LOG_ERR("port link failure for %s ev_q %d, ev_port %d\n", + DLB2_LOG_ERR("port link failure for %s ev_q %d, ev_port %d", ev_queue->qm_queue.is_directed ? "DIR" : "LDB", ev_queue->id, ev_port->id); @@ -2240,7 +2240,7 @@ dlb2_validate_port_link(struct dlb2_eventdev_port *ev_port, queue_is_dir = ev_queue->qm_queue.is_directed; if (port_is_dir != queue_is_dir) { - DLB2_LOG_ERR("%s queue %u can't link to %s port %u\n", + DLB2_LOG_ERR("%s queue %u can't link to %s port %u", queue_is_dir ? "DIR" : "LDB", ev_queue->id, port_is_dir ? "DIR" : "LDB", ev_port->id); @@ -2250,7 +2250,7 @@ dlb2_validate_port_link(struct dlb2_eventdev_port *ev_port, /* Check if there is space for the requested link */ if (!link_exists && index == -1) { - DLB2_LOG_ERR("no space for new link\n"); + DLB2_LOG_ERR("no space for new link"); rte_errno = -ENOSPC; return -1; } @@ -2258,7 +2258,7 @@ dlb2_validate_port_link(struct dlb2_eventdev_port *ev_port, /* Check if the directed port is already linked */ if (ev_port->qm_port.is_directed && ev_port->num_links > 0 && !link_exists) { - DLB2_LOG_ERR("Can't link DIR port %d to >1 queues\n", + DLB2_LOG_ERR("Can't link DIR port %d to >1 queues", ev_port->id); rte_errno = -EINVAL; return -1; @@ -2267,7 +2267,7 @@ dlb2_validate_port_link(struct dlb2_eventdev_port *ev_port, /* Check if the directed queue is already linked */ if (ev_queue->qm_queue.is_directed && ev_queue->num_links > 0 && !link_exists) { - DLB2_LOG_ERR("Can't link DIR queue %d to >1 ports\n", + DLB2_LOG_ERR("Can't link DIR queue %d to >1 ports", ev_queue->id); rte_errno = -EINVAL; return -1; @@ -2289,14 +2289,14 @@ dlb2_eventdev_port_link(struct rte_eventdev *dev, void *event_port, RTE_SET_USED(dev); if (ev_port == NULL) { - DLB2_LOG_ERR("dlb2: evport not setup\n"); + DLB2_LOG_ERR("dlb2: evport not setup"); rte_errno = -EINVAL; return 0; } if (!ev_port->setup_done && ev_port->qm_port.config_state != DLB2_PREV_CONFIGURED) { - DLB2_LOG_ERR("dlb2: evport not setup\n"); + DLB2_LOG_ERR("dlb2: evport not setup"); rte_errno = -EINVAL; return 0; } @@ -2305,13 +2305,13 @@ dlb2_eventdev_port_link(struct rte_eventdev *dev, void *event_port, * queues pointer. */ if (nb_links == 0) { - DLB2_LOG_DBG("dlb2: nb_links is 0\n"); + DLB2_LOG_DBG("dlb2: nb_links is 0"); return 0; /* Ignore and return success */ } dlb2 = ev_port->dlb2; - DLB2_LOG_DBG("Linking %u queues to %s port %d\n", + DLB2_LOG_DBG("Linking %u queues to %s port %d", nb_links, ev_port->qm_port.is_directed ? "DIR" : "LDB", ev_port->id); @@ -2381,7 +2381,7 @@ dlb2_hw_unmap_ldb_qid_from_port(struct dlb2_hw_dev *handle, ret = dlb2_iface_unmap_qid(handle, &cfg); if (ret < 0) - DLB2_LOG_ERR("dlb2: unmap qid error, ret=%d (driver status: %s)\n", + DLB2_LOG_ERR("dlb2: unmap qid error, ret=%d (driver status: %s)", ret, dlb2_error_strings[cfg.response.status]); return ret; @@ -2408,7 +2408,7 @@ dlb2_event_queue_detach_ldb(struct dlb2_eventdev *dlb2, * It blindly attempts to unmap all queues. */ if (i == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) { - DLB2_LOG_DBG("dlb2: ignoring LB QID %d not mapped for qm_port %d.\n", + DLB2_LOG_DBG("dlb2: ignoring LB QID %d not mapped for qm_port %d.", ev_queue->qm_queue.id, ev_port->qm_port.id); return 0; @@ -2434,19 +2434,19 @@ dlb2_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port, RTE_SET_USED(dev); if (!ev_port->setup_done) { - DLB2_LOG_ERR("dlb2: evport %d is not configured\n", + DLB2_LOG_ERR("dlb2: evport %d is not configured", ev_port->id); rte_errno = -EINVAL; return 0; } if (queues == NULL || nb_unlinks == 0) { - DLB2_LOG_DBG("dlb2: queues is NULL or nb_unlinks is 0\n"); + DLB2_LOG_DBG("dlb2: queues is NULL or nb_unlinks is 0"); return 0; /* Ignore and return success */ } if (ev_port->qm_port.is_directed) { - DLB2_LOG_DBG("dlb2: ignore unlink from dir port %d\n", + DLB2_LOG_DBG("dlb2: ignore unlink from dir port %d", ev_port->id); rte_errno = 0; return nb_unlinks; /* as if success */ @@ -2459,7 +2459,7 @@ dlb2_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port, int ret, j; if (queues[i] >= dlb2->num_queues) { - DLB2_LOG_ERR("dlb2: invalid queue id %d\n", queues[i]); + DLB2_LOG_ERR("dlb2: invalid queue id %d", queues[i]); rte_errno = -EINVAL; return i; /* return index of offending queue */ } @@ -2477,7 +2477,7 @@ dlb2_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port, ret = dlb2_event_queue_detach_ldb(dlb2, ev_port, ev_queue); if (ret) { - DLB2_LOG_ERR("unlink err=%d for port %d queue %d\n", + DLB2_LOG_ERR("unlink err=%d for port %d queue %d", ret, ev_port->id, queues[i]); rte_errno = -ENOENT; return i; /* return index of offending queue */ @@ -2504,7 +2504,7 @@ dlb2_eventdev_port_unlinks_in_progress(struct rte_eventdev *dev, RTE_SET_USED(dev); if (!ev_port->setup_done) { - DLB2_LOG_ERR("dlb2: evport %d is not configured\n", + DLB2_LOG_ERR("dlb2: evport %d is not configured", ev_port->id); rte_errno = -EINVAL; return 0; @@ -2516,7 +2516,7 @@ dlb2_eventdev_port_unlinks_in_progress(struct rte_eventdev *dev, ret = dlb2_iface_pending_port_unmaps(handle, &cfg); if (ret < 0) { - DLB2_LOG_ERR("dlb2: num_unlinks_in_progress ret=%d (driver status: %s)\n", + DLB2_LOG_ERR("dlb2: num_unlinks_in_progress ret=%d (driver status: %s)", ret, dlb2_error_strings[cfg.response.status]); return ret; } @@ -2609,7 +2609,7 @@ dlb2_eventdev_start(struct rte_eventdev *dev) rte_spinlock_lock(&dlb2->qm_instance.resource_lock); if (dlb2->run_state != DLB2_RUN_STATE_STOPPED) { - DLB2_LOG_ERR("bad state %d for dev_start\n", + DLB2_LOG_ERR("bad state %d for dev_start", (int)dlb2->run_state); rte_spinlock_unlock(&dlb2->qm_instance.resource_lock); return -EINVAL; @@ -2645,13 +2645,13 @@ dlb2_eventdev_start(struct rte_eventdev *dev) ret = dlb2_iface_sched_domain_start(handle, &cfg); if (ret < 0) { - DLB2_LOG_ERR("dlb2: sched_domain_start ret=%d (driver status: %s)\n", + DLB2_LOG_ERR("dlb2: sched_domain_start ret=%d (driver status: %s)", ret, dlb2_error_strings[cfg.response.status]); return ret; } dlb2->run_state = DLB2_RUN_STATE_STARTED; - DLB2_LOG_DBG("dlb2: sched_domain_start completed OK\n"); + DLB2_LOG_DBG("dlb2: sched_domain_start completed OK"); return 0; } @@ -2746,7 +2746,7 @@ dlb2_check_enqueue_hw_ldb_credits(struct dlb2_port *qm_port) DLB2_INC_STAT( qm_port->ev_port->stats.traffic.tx_nospc_ldb_hw_credits, 1); - DLB2_LOG_DBG("ldb credits exhausted\n"); + DLB2_LOG_DBG("ldb credits exhausted"); return 1; /* credits exhausted */ } } @@ -2765,7 +2765,7 @@ dlb2_check_enqueue_hw_dir_credits(struct dlb2_port *qm_port) DLB2_INC_STAT( qm_port->ev_port->stats.traffic.tx_nospc_dir_hw_credits, 1); - DLB2_LOG_DBG("dir credits exhausted\n"); + DLB2_LOG_DBG("dir credits exhausted"); return 1; /* credits exhausted */ } } @@ -2783,7 +2783,7 @@ dlb2_check_enqueue_hw_credits(struct dlb2_port *qm_port) if (unlikely(qm_port->cached_credits == 0)) { DLB2_INC_STAT( qm_port->ev_port->stats.traffic.tx_nospc_hw_credits, 1); - DLB2_LOG_DBG("credits exhausted\n"); + DLB2_LOG_DBG("credits exhausted"); return 1; /* credits exhausted */ } } @@ -2817,7 +2817,7 @@ dlb2_consume_qe_immediate(struct dlb2_port *qm_port, int num) dlb2_movntdq_single(port_data->pp_addr, qe); - DLB2_LOG_DBG("dlb2: consume immediate - %d QEs\n", num); + DLB2_LOG_DBG("dlb2: consume immediate - %d QEs", num); qm_port->owed_tokens = 0; @@ -2888,9 +2888,9 @@ dlb2_event_enqueue_prep(struct dlb2_eventdev_port *ev_port, } switch (ev->sched_type) { case RTE_SCHED_TYPE_ORDERED: - DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_ORDERED\n"); + DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_ORDERED"); if (qm_queue->sched_type != RTE_SCHED_TYPE_ORDERED) { - DLB2_LOG_ERR("dlb2: tried to send ordered event to unordered queue %d\n", + DLB2_LOG_ERR("dlb2: tried to send ordered event to unordered queue %d", *queue_id); rte_errno = -EINVAL; return 1; @@ -2898,18 +2898,18 @@ dlb2_event_enqueue_prep(struct dlb2_eventdev_port *ev_port, *sched_type = DLB2_SCHED_ORDERED; break; case RTE_SCHED_TYPE_ATOMIC: - DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_ATOMIC\n"); + DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_ATOMIC"); *sched_type = DLB2_SCHED_ATOMIC; break; case RTE_SCHED_TYPE_PARALLEL: - DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_PARALLEL\n"); + DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_PARALLEL"); if (qm_queue->sched_type == RTE_SCHED_TYPE_ORDERED) *sched_type = DLB2_SCHED_ORDERED; else *sched_type = DLB2_SCHED_UNORDERED; break; default: - DLB2_LOG_ERR("Unsupported LDB sched type in put_qe\n"); + DLB2_LOG_ERR("Unsupported LDB sched type in put_qe"); DLB2_INC_STAT(ev_port->stats.tx_invalid, 1); rte_errno = -EINVAL; return 1; @@ -2930,7 +2930,7 @@ dlb2_event_enqueue_prep(struct dlb2_eventdev_port *ev_port, } cached_credits = &qm_port->cached_credits; } - DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_DIRECTED\n"); + DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_DIRECTED"); *sched_type = DLB2_SCHED_DIRECTED; } @@ -3156,7 +3156,7 @@ dlb2_event_release(struct dlb2_eventdev *dlb2, int i; if (port_id > dlb2->num_ports) { - DLB2_LOG_ERR("Invalid port id %d in dlb2-event_release\n", + DLB2_LOG_ERR("Invalid port id %d in dlb2-event_release", port_id); rte_errno = -EINVAL; return; @@ -3213,7 +3213,7 @@ dlb2_event_release(struct dlb2_eventdev *dlb2, sw_credit_update: /* each release returns one credit */ if (unlikely(!ev_port->outstanding_releases)) { - DLB2_LOG_ERR("%s: Outstanding releases underflowed.\n", + DLB2_LOG_ERR("%s: Outstanding releases underflowed.", __func__); return; } @@ -3367,7 +3367,7 @@ dlb2_process_dequeue_qes(struct dlb2_eventdev_port *ev_port, * buffer is a mbuf. */ if (unlikely(qe->error)) { - DLB2_LOG_ERR("QE error bit ON\n"); + DLB2_LOG_ERR("QE error bit ON"); DLB2_INC_STAT(ev_port->stats.traffic.rx_drop, 1); dlb2_consume_qe_immediate(qm_port, 1); continue; /* Ignore */ @@ -4281,7 +4281,7 @@ dlb2_get_ldb_queue_depth(struct dlb2_eventdev *dlb2, ret = dlb2_iface_get_ldb_queue_depth(handle, &cfg); if (ret < 0) { - DLB2_LOG_ERR("dlb2: get_ldb_queue_depth ret=%d (driver status: %s)\n", + DLB2_LOG_ERR("dlb2: get_ldb_queue_depth ret=%d (driver status: %s)", ret, dlb2_error_strings[cfg.response.status]); return ret; } @@ -4301,7 +4301,7 @@ dlb2_get_dir_queue_depth(struct dlb2_eventdev *dlb2, ret = dlb2_iface_get_dir_queue_depth(handle, &cfg); if (ret < 0) { - DLB2_LOG_ERR("dlb2: get_dir_queue_depth ret=%d (driver status: %s)\n", + DLB2_LOG_ERR("dlb2: get_dir_queue_depth ret=%d (driver status: %s)", ret, dlb2_error_strings[cfg.response.status]); return ret; } @@ -4392,7 +4392,7 @@ dlb2_drain(struct rte_eventdev *dev) } if (i == dlb2->num_ports) { - DLB2_LOG_ERR("internal error: no LDB ev_ports\n"); + DLB2_LOG_ERR("internal error: no LDB ev_ports"); return; } @@ -4400,7 +4400,7 @@ dlb2_drain(struct rte_eventdev *dev) rte_event_port_unlink(dev_id, ev_port->id, NULL, 0); if (rte_errno) { - DLB2_LOG_ERR("internal error: failed to unlink ev_port %d\n", + DLB2_LOG_ERR("internal error: failed to unlink ev_port %d", ev_port->id); return; } @@ -4418,7 +4418,7 @@ dlb2_drain(struct rte_eventdev *dev) /* Link the ev_port to the queue */ ret = rte_event_port_link(dev_id, ev_port->id, &qid, &prio, 1); if (ret != 1) { - DLB2_LOG_ERR("internal error: failed to link ev_port %d to queue %d\n", + DLB2_LOG_ERR("internal error: failed to link ev_port %d to queue %d", ev_port->id, qid); return; } @@ -4433,7 +4433,7 @@ dlb2_drain(struct rte_eventdev *dev) /* Unlink the ev_port from the queue */ ret = rte_event_port_unlink(dev_id, ev_port->id, &qid, 1); if (ret != 1) { - DLB2_LOG_ERR("internal error: failed to unlink ev_port %d to queue %d\n", + DLB2_LOG_ERR("internal error: failed to unlink ev_port %d to queue %d", ev_port->id, qid); return; } @@ -4448,11 +4448,11 @@ dlb2_eventdev_stop(struct rte_eventdev *dev) rte_spinlock_lock(&dlb2->qm_instance.resource_lock); if (dlb2->run_state == DLB2_RUN_STATE_STOPPED) { - DLB2_LOG_DBG("Internal error: already stopped\n"); + DLB2_LOG_DBG("Internal error: already stopped"); rte_spinlock_unlock(&dlb2->qm_instance.resource_lock); return; } else if (dlb2->run_state != DLB2_RUN_STATE_STARTED) { - DLB2_LOG_ERR("Internal error: bad state %d for dev_stop\n", + DLB2_LOG_ERR("Internal error: bad state %d for dev_stop", (int)dlb2->run_state); rte_spinlock_unlock(&dlb2->qm_instance.resource_lock); return; @@ -4608,7 +4608,7 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, err = dlb2_iface_open(&dlb2->qm_instance, name); if (err < 0) { - DLB2_LOG_ERR("could not open event hardware device, err=%d\n", + DLB2_LOG_ERR("could not open event hardware device, err=%d", err); return err; } @@ -4616,14 +4616,14 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, err = dlb2_iface_get_device_version(&dlb2->qm_instance, &dlb2->revision); if (err < 0) { - DLB2_LOG_ERR("dlb2: failed to get the device version, err=%d\n", + DLB2_LOG_ERR("dlb2: failed to get the device version, err=%d", err); return err; } err = dlb2_hw_query_resources(dlb2); if (err) { - DLB2_LOG_ERR("get resources err=%d for %s\n", + DLB2_LOG_ERR("get resources err=%d for %s", err, name); return err; } @@ -4646,7 +4646,7 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, break; } if (ret) { - DLB2_LOG_ERR("dlb2: failed to configure class of service, err=%d\n", + DLB2_LOG_ERR("dlb2: failed to configure class of service, err=%d", err); return err; } @@ -4654,7 +4654,7 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, err = dlb2_iface_get_cq_poll_mode(&dlb2->qm_instance, &dlb2->poll_mode); if (err < 0) { - DLB2_LOG_ERR("dlb2: failed to get the poll mode, err=%d\n", + DLB2_LOG_ERR("dlb2: failed to get the poll mode, err=%d", err); return err; } @@ -4662,7 +4662,7 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, /* Complete xtstats runtime initialization */ err = dlb2_xstats_init(dlb2); if (err) { - DLB2_LOG_ERR("dlb2: failed to init xstats, err=%d\n", err); + DLB2_LOG_ERR("dlb2: failed to init xstats, err=%d", err); return err; } @@ -4692,14 +4692,14 @@ dlb2_secondary_eventdev_probe(struct rte_eventdev *dev, err = dlb2_iface_open(&dlb2->qm_instance, name); if (err < 0) { - DLB2_LOG_ERR("could not open event hardware device, err=%d\n", + DLB2_LOG_ERR("could not open event hardware device, err=%d", err); return err; } err = dlb2_hw_query_resources(dlb2); if (err) { - DLB2_LOG_ERR("get resources err=%d for %s\n", + DLB2_LOG_ERR("get resources err=%d for %s", err, name); return err; } @@ -4741,9 +4741,8 @@ dlb2_parse_params(const char *params, struct rte_kvargs *kvlist = rte_kvargs_parse(params, args); if (kvlist == NULL) { - RTE_LOG(INFO, PMD, - "Ignoring unsupported parameters when creating device '%s'\n", - name); + DLB2_LOG_INFO("Ignoring unsupported parameters when creating device '%s'", + name); } else { int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG, set_numa_node,