@@ -42,7 +42,7 @@ if_destructor(struct __fman_if *__if)
if (!__if)
return;
- if (__if->__if.mac_type == fman_offline)
+ if (__if->__if.mac_type == fman_offline_internal)
goto cleanup;
list_for_each_entry_safe(bp, tmpbp, &__if->__if.bpool_list, node) {
@@ -72,10 +72,11 @@ TAILQ_HEAD(rte_fman_if_list, __fman_if);
/* Represents the different flavour of network interface */
enum fman_mac_type {
- fman_offline = 0,
+ fman_offline_internal = 0,
fman_mac_1g,
fman_mac_10g,
fman_mac_2_5g,
+ fman_onic,
};
struct mac_addr {
@@ -252,7 +252,6 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
DPAA_PMD_ERR("Cannot open IF socket");
return -errno;
}
-
strncpy(ifr.ifr_name, dpaa_intf->name, IFNAMSIZ - 1);
if (ioctl(socket_fd, SIOCGIFMTU, &ifr) < 0) {
@@ -1958,6 +1957,41 @@ dpaa_dev_init_secondary(struct rte_eth_dev *eth_dev)
return 0;
}
+#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
+static int
+dpaa_error_queue_init(struct dpaa_if *dpaa_intf,
+ struct fman_if *fman_intf)
+{
+ int i, ret;
+ struct qman_fq *err_queues = dpaa_intf->debug_queues;
+ uint32_t err_fqid = 0;
+
+ if (fman_intf->is_shared_mac) {
+ DPAA_PMD_DEBUG("Shared MAC's err queues are handled in kernel");
+ return 0;
+ }
+
+ for (i = 0; i < DPAA_DEBUG_FQ_MAX_NUM; i++) {
+ if (i == DPAA_DEBUG_FQ_RX_ERROR)
+ err_fqid = fman_intf->fqid_rx_err;
+ else if (i == DPAA_DEBUG_FQ_TX_ERROR)
+ err_fqid = fman_intf->fqid_tx_err;
+ else
+ continue;
+ ret = dpaa_def_queue_init(&err_queues[i], err_fqid);
+ if (ret) {
+ DPAA_PMD_ERR("DPAA %s ERROR queue init failed!",
+ i == DPAA_DEBUG_FQ_RX_ERROR ?
+ "RX" : "TX");
+ return ret;
+ }
+ err_queues[i].dpaa_intf = dpaa_intf;
+ }
+
+ return 0;
+}
+#endif
+
/* Initialise a network interface */
static int
dpaa_dev_init(struct rte_eth_dev *eth_dev)
@@ -2162,22 +2196,11 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
}
dpaa_intf->nb_tx_queues = MAX_DPAA_CORES;
-#if defined(RTE_LIBRTE_DPAA_DEBUG_DRIVER) || defined(RTE_LIBRTE_IEEE1588)
- ret = dpaa_def_queue_init(&dpaa_intf->debug_queues
- [DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err);
- if (ret) {
- DPAA_PMD_ERR("DPAA RX ERROR queue init failed!");
- goto free_tx;
- }
- dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf;
- ret = dpaa_def_queue_init(&dpaa_intf->debug_queues
- [DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err);
- if (ret) {
- DPAA_PMD_ERR("DPAA TX ERROR queue init failed!");
+#if defined(RTE_LIBRTE_DPAA_DEBUG_DRIVER)
+ ret = dpaa_error_queue_init(dpaa_intf, fman_intf);
+ if (ret)
goto free_tx;
- }
#endif
-
DPAA_PMD_DEBUG("All frame queues created");
/* Get the initial configuration for flow control */
@@ -78,8 +78,11 @@
#define DPAA_IF_RX_CONTEXT_STASH 0
/* Each "debug" FQ is represented by one of these */
-#define DPAA_DEBUG_FQ_RX_ERROR 0
-#define DPAA_DEBUG_FQ_TX_ERROR 1
+enum {
+ DPAA_DEBUG_FQ_RX_ERROR,
+ DPAA_DEBUG_FQ_TX_ERROR,
+ DPAA_DEBUG_FQ_MAX_NUM
+};
#define DPAA_RSS_OFFLOAD_ALL ( \
RTE_ETH_RSS_L2_PAYLOAD | \
@@ -107,6 +110,10 @@
#define DPAA_FD_CMD_CFQ 0x00ffffff
/**< Confirmation Frame Queue */
+#define DPAA_1G_MAC_START_IDX 1
+#define DPAA_10G_MAC_START_IDX 9
+#define DPAA_2_5G_MAC_START_IDX DPAA_10G_MAC_START_IDX
+
#define DPAA_DEFAULT_RXQ_VSP_ID 1
#define FMC_FILE "/tmp/fmc.bin"
@@ -133,7 +140,7 @@ struct dpaa_if {
struct qman_fq *tx_queues;
struct qman_fq *tx_conf_queues;
struct qman_cgr *cgr_tx;
- struct qman_fq debug_queues[2];
+ struct qman_fq debug_queues[DPAA_DEBUG_FQ_MAX_NUM];
uint16_t nb_rx_queues;
uint16_t nb_tx_queues;
uint32_t ifid;
@@ -651,7 +651,13 @@ static inline int set_pcd_netenv_scheme(struct dpaa_if *dpaa_intf,
static inline int get_port_type(struct fman_if *fif)
{
- if (fif->mac_type == fman_mac_1g)
+ /* For 1G fm-mac9 and fm-mac10 ports, configure the VSP as 10G
+ * ports so that kernel can configure correct port.
+ */
+ if (fif->mac_type == fman_mac_1g &&
+ fif->mac_idx >= DPAA_10G_MAC_START_IDX)
+ return e_FM_PORT_TYPE_RX_10G;
+ else if (fif->mac_type == fman_mac_1g)
return e_FM_PORT_TYPE_RX;
else if (fif->mac_type == fman_mac_2_5g)
return e_FM_PORT_TYPE_RX_2_5G;
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2017-2021 NXP
+ * Copyright 2017-2023 NXP
*/
/* System headers */
@@ -204,139 +204,258 @@ struct fmc_model_t {
struct fmc_model_t *g_fmc_model;
-static int dpaa_port_fmc_port_parse(struct fman_if *fif,
- const struct fmc_model_t *fmc_model,
- int apply_idx)
+static int
+dpaa_port_fmc_port_parse(struct fman_if *fif,
+ const struct fmc_model_t *fmc_model,
+ int apply_idx)
{
int current_port = fmc_model->apply_order[apply_idx].index;
const fmc_port *pport = &fmc_model->port[current_port];
- const uint8_t mac_idx[] = {-1, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1};
- const uint8_t mac_type[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2};
+ uint32_t num;
+
+ if (pport->type == e_FM_PORT_TYPE_OH_OFFLINE_PARSING &&
+ pport->number == fif->mac_idx &&
+ (fif->mac_type == fman_offline_internal ||
+ fif->mac_type == fman_onic))
+ return current_port;
+
+ if (fif->mac_type == fman_mac_1g) {
+ if (pport->type != e_FM_PORT_TYPE_RX)
+ return -ENODEV;
+ num = pport->number + DPAA_1G_MAC_START_IDX;
+ if (fif->mac_idx == num)
+ return current_port;
+
+ return -ENODEV;
+ }
+
+ if (fif->mac_type == fman_mac_2_5g) {
+ if (pport->type != e_FM_PORT_TYPE_RX_2_5G)
+ return -ENODEV;
+ num = pport->number + DPAA_2_5G_MAC_START_IDX;
+ if (fif->mac_idx == num)
+ return current_port;
+
+ return -ENODEV;
+ }
+
+ if (fif->mac_type == fman_mac_10g) {
+ if (pport->type != e_FM_PORT_TYPE_RX_10G)
+ return -ENODEV;
+ num = pport->number + DPAA_10G_MAC_START_IDX;
+ if (fif->mac_idx == num)
+ return current_port;
+
+ return -ENODEV;
+ }
+
+ DPAA_PMD_ERR("Invalid MAC(mac_idx=%d) type(%d)",
+ fif->mac_idx, fif->mac_type);
+
+ return -EINVAL;
+}
+
+static int
+dpaa_fq_is_in_kernel(uint32_t fqid,
+ struct fman_if *fif)
+{
+ if (!fif->is_shared_mac)
+ return false;
+
+ if ((fqid == fif->fqid_rx_def ||
+ (fqid >= fif->fqid_rx_pcd &&
+ fqid < (fif->fqid_rx_pcd + fif->fqid_rx_pcd_count)) ||
+ fqid == fif->fqid_rx_err ||
+ fqid == fif->fqid_tx_err))
+ return true;
+
+ return false;
+}
+
+static int
+dpaa_vsp_id_is_in_kernel(uint8_t vsp_id,
+ struct fman_if *fif)
+{
+ if (!fif->is_shared_mac)
+ return false;
+
+ if (vsp_id == fif->base_profile_id)
+ return true;
+
+ return false;
+}
+
+static uint8_t
+dpaa_enqueue_vsp_id(struct fman_if *fif,
+ const struct ioc_fm_pcd_cc_next_enqueue_params_t *eq_param)
+{
+ if (eq_param->override_fqid)
+ return eq_param->new_relative_storage_profile_id;
+
+ return fif->base_profile_id;
+}
- if (mac_idx[fif->mac_idx] != pport->number ||
- mac_type[fif->mac_idx] != pport->type)
- return -1;
+static int
+dpaa_kg_storage_is_in_kernel(struct fman_if *fif,
+ const struct ioc_fm_pcd_kg_storage_profile_t *kg_storage)
+{
+ if (!fif->is_shared_mac)
+ return false;
+
+ if (!kg_storage->direct ||
+ (kg_storage->direct &&
+ kg_storage->profile_select.direct_relative_profile_id ==
+ fif->base_profile_id))
+ return true;
- return current_port;
+ return false;
}
-static int dpaa_port_fmc_scheme_parse(struct fman_if *fif,
- const struct fmc_model_t *fmc,
- int apply_idx,
- uint16_t *rxq_idx, int max_nb_rxq,
- uint32_t *fqids, int8_t *vspids)
+static void
+dpaa_fmc_remove_fq_from_allocated(uint32_t *fqids,
+ uint16_t *rxq_idx, uint32_t rm_fqid)
{
- int idx = fmc->apply_order[apply_idx].index;
uint32_t i;
- if (!fmc->scheme[idx].override_storage_profile &&
- fif->is_shared_mac) {
- DPAA_PMD_WARN("No VSP assigned to scheme %d for sharemac %d!",
- idx, fif->mac_idx);
- DPAA_PMD_WARN("Risk to receive pkts from skb pool to CRASH!");
+ for (i = 0; i < (*rxq_idx); i++) {
+ if (fqids[i] != rm_fqid)
+ continue;
+ DPAA_PMD_WARN("Remove fq(0x%08x) allocated.",
+ rm_fqid);
+ if ((*rxq_idx) > (i + 1)) {
+ memmove(&fqids[i], &fqids[i + 1],
+ ((*rxq_idx) - (i + 1)) * sizeof(uint32_t));
+ }
+ (*rxq_idx)--;
+ break;
}
+}
- if (e_IOC_FM_PCD_DONE ==
- fmc->scheme[idx].next_engine) {
- for (i = 0; i < fmc->scheme[idx]
- .key_ext_and_hash.hash_dist_num_of_fqids; i++) {
- uint32_t fqid = fmc->scheme[idx].base_fqid + i;
- int k, found = 0;
-
- if (fqid == fif->fqid_rx_def ||
- (fqid >= fif->fqid_rx_pcd &&
- fqid < (fif->fqid_rx_pcd +
- fif->fqid_rx_pcd_count))) {
- if (fif->is_shared_mac &&
- fmc->scheme[idx].override_storage_profile &&
- fmc->scheme[idx].storage_profile.direct &&
- fmc->scheme[idx].storage_profile
- .profile_select.direct_relative_profile_id !=
- fif->base_profile_id) {
- DPAA_PMD_ERR("Def RXQ must be associated with def VSP on sharemac!");
-
- return -1;
- }
- continue;
+static int
+dpaa_port_fmc_scheme_parse(struct fman_if *fif,
+ const struct fmc_model_t *fmc,
+ int apply_idx,
+ uint16_t *rxq_idx, int max_nb_rxq,
+ uint32_t *fqids, int8_t *vspids)
+{
+ int scheme_idx = fmc->apply_order[apply_idx].index;
+ int k, found = 0;
+ uint32_t i, num_rxq, fqid, rxq_idx_start = *rxq_idx;
+ const struct fm_pcd_kg_scheme_params_t *scheme;
+ const struct ioc_fm_pcd_kg_key_extract_and_hash_params_t *params;
+ const struct ioc_fm_pcd_kg_storage_profile_t *kg_storage;
+ uint8_t vsp_id;
+
+ scheme = &fmc->scheme[scheme_idx];
+ params = &scheme->key_ext_and_hash;
+ num_rxq = params->hash_dist_num_of_fqids;
+ kg_storage = &scheme->storage_profile;
+
+ if (scheme->override_storage_profile && kg_storage->direct)
+ vsp_id = kg_storage->profile_select.direct_relative_profile_id;
+ else
+ vsp_id = fif->base_profile_id;
+
+ if (dpaa_kg_storage_is_in_kernel(fif, kg_storage)) {
+ DPAA_PMD_WARN("Scheme[%d]'s VSP is in kernel",
+ scheme_idx);
+ /** The FQ may be allocated from previous CC or scheme,
+ * find and remove it.
+ */
+ for (i = 0; i < num_rxq; i++) {
+ fqid = scheme->base_fqid + i;
+ DPAA_PMD_WARN("Removed fqid(0x%08x) of Scheme[%d]",
+ fqid, scheme_idx);
+ dpaa_fmc_remove_fq_from_allocated(fqids,
+ rxq_idx, fqid);
+ if (!dpaa_fq_is_in_kernel(fqid, fif)) {
+ char reason_msg[128];
+ char result_msg[128];
+
+ sprintf(reason_msg,
+ "NOT handled in kernel");
+ sprintf(result_msg,
+ "will DRAIN kernel pool!");
+ DPAA_PMD_WARN("Traffic to FQ(%08x)(%s) %s",
+ fqid, reason_msg, result_msg);
}
+ }
- if (fif->is_shared_mac &&
- !fmc->scheme[idx].override_storage_profile) {
- DPAA_PMD_ERR("RXQ to DPDK must be associated with VSP on sharemac!");
- return -1;
- }
+ return 0;
+ }
- if (fif->is_shared_mac &&
- fmc->scheme[idx].override_storage_profile &&
- fmc->scheme[idx].storage_profile.direct &&
- fmc->scheme[idx].storage_profile
- .profile_select.direct_relative_profile_id ==
- fif->base_profile_id) {
- DPAA_PMD_ERR("RXQ can't be associated with default VSP on sharemac!");
+ if (e_IOC_FM_PCD_DONE != scheme->next_engine) {
+ /** Do nothing.*/
+ DPAA_PMD_DEBUG("Will parse scheme[%d]'s next engine(%d)",
+ scheme_idx, scheme->next_engine);
+ return 0;
+ }
- return -1;
- }
+ for (i = 0; i < num_rxq; i++) {
+ fqid = scheme->base_fqid + i;
+ found = 0;
- if ((*rxq_idx) >= max_nb_rxq) {
- DPAA_PMD_DEBUG("Too many queues in FMC policy"
- "%d overflow %d",
- (*rxq_idx), max_nb_rxq);
+ if (dpaa_fq_is_in_kernel(fqid, fif)) {
+ DPAA_PMD_WARN("FQ(0x%08x) is handled in kernel.",
+ fqid);
+ /** The FQ may be allocated from previous CC or scheme,
+ * remove it.
+ */
+ dpaa_fmc_remove_fq_from_allocated(fqids,
+ rxq_idx, fqid);
+ continue;
+ }
- continue;
- }
+ if ((*rxq_idx) >= max_nb_rxq) {
+ DPAA_PMD_WARN("Too many queues(%d) >= MAX number(%d)",
+ (*rxq_idx), max_nb_rxq);
- for (k = 0; k < (*rxq_idx); k++) {
- if (fqids[k] == fqid) {
- found = 1;
- break;
- }
- }
+ break;
+ }
- if (found)
- continue;
- fqids[(*rxq_idx)] = fqid;
- if (fmc->scheme[idx].override_storage_profile) {
- if (fmc->scheme[idx].storage_profile.direct) {
- vspids[(*rxq_idx)] =
- fmc->scheme[idx].storage_profile
- .profile_select
- .direct_relative_profile_id;
- } else {
- vspids[(*rxq_idx)] = -1;
- }
- } else {
- vspids[(*rxq_idx)] = -1;
+ for (k = 0; k < (*rxq_idx); k++) {
+ if (fqids[k] == fqid) {
+ found = 1;
+ break;
}
- (*rxq_idx)++;
}
+
+ if (found)
+ continue;
+ fqids[(*rxq_idx)] = fqid;
+ vspids[(*rxq_idx)] = vsp_id;
+
+ (*rxq_idx)++;
}
- return 0;
+ return (*rxq_idx) - rxq_idx_start;
}
-static int dpaa_port_fmc_ccnode_parse(struct fman_if *fif,
- const struct fmc_model_t *fmc_model,
- int apply_idx,
- uint16_t *rxq_idx, int max_nb_rxq,
- uint32_t *fqids, int8_t *vspids)
+static int
+dpaa_port_fmc_ccnode_parse(struct fman_if *fif,
+ const struct fmc_model_t *fmc,
+ int apply_idx,
+ uint16_t *rxq_idx, int max_nb_rxq,
+ uint32_t *fqids, int8_t *vspids)
{
uint16_t j, k, found = 0;
const struct ioc_keys_params_t *keys_params;
- uint32_t fqid, cc_idx = fmc_model->apply_order[apply_idx].index;
-
- keys_params = &fmc_model->ccnode[cc_idx].keys_params;
+ const struct ioc_fm_pcd_cc_next_engine_params_t *params;
+ uint32_t fqid, cc_idx = fmc->apply_order[apply_idx].index;
+ uint32_t rxq_idx_start = *rxq_idx;
+ uint8_t vsp_id;
- if ((*rxq_idx) >= max_nb_rxq) {
- DPAA_PMD_WARN("Too many queues in FMC policy %d overflow %d",
- (*rxq_idx), max_nb_rxq);
-
- return 0;
- }
+ keys_params = &fmc->ccnode[cc_idx].keys_params;
for (j = 0; j < keys_params->num_of_keys; ++j) {
+ if ((*rxq_idx) >= max_nb_rxq) {
+ DPAA_PMD_WARN("Too many queues(%d) >= MAX number(%d)",
+ (*rxq_idx), max_nb_rxq);
+
+ break;
+ }
found = 0;
- fqid = keys_params->key_params[j].cc_next_engine_params
- .params.enqueue_params.new_fqid;
+ params = &keys_params->key_params[j].cc_next_engine_params;
/* We read DPDK queue from last classification rule present in
* FMC policy file. Hence, this check is required here.
@@ -344,15 +463,30 @@ static int dpaa_port_fmc_ccnode_parse(struct fman_if *fif,
* have userspace queue so that it can be used by DPDK
* application.
*/
- if (keys_params->key_params[j].cc_next_engine_params
- .next_engine != e_IOC_FM_PCD_DONE) {
- DPAA_PMD_WARN("FMC CC next engine not support");
+ if (params->next_engine != e_IOC_FM_PCD_DONE) {
+ DPAA_PMD_WARN("CC next engine(%d) not support",
+ params->next_engine);
continue;
}
- if (keys_params->key_params[j].cc_next_engine_params
- .params.enqueue_params.action !=
+ if (params->params.enqueue_params.action !=
e_IOC_FM_PCD_ENQ_FRAME)
continue;
+
+ fqid = params->params.enqueue_params.new_fqid;
+ vsp_id = dpaa_enqueue_vsp_id(fif,
+ ¶ms->params.enqueue_params);
+ if (dpaa_fq_is_in_kernel(fqid, fif) ||
+ dpaa_vsp_id_is_in_kernel(vsp_id, fif)) {
+ DPAA_PMD_DEBUG("FQ(0x%08x)/VSP(%d) is in kernel.",
+ fqid, vsp_id);
+ /** The FQ may be allocated from previous CC or scheme,
+ * remove it.
+ */
+ dpaa_fmc_remove_fq_from_allocated(fqids,
+ rxq_idx, fqid);
+ continue;
+ }
+
for (k = 0; k < (*rxq_idx); k++) {
if (fqids[k] == fqid) {
found = 1;
@@ -362,38 +496,22 @@ static int dpaa_port_fmc_ccnode_parse(struct fman_if *fif,
if (found)
continue;
- if ((*rxq_idx) >= max_nb_rxq) {
- DPAA_PMD_WARN("Too many queues in FMC policy %d overflow %d",
- (*rxq_idx), max_nb_rxq);
-
- return 0;
- }
-
fqids[(*rxq_idx)] = fqid;
- vspids[(*rxq_idx)] =
- keys_params->key_params[j].cc_next_engine_params
- .params.enqueue_params
- .new_relative_storage_profile_id;
-
- if (vspids[(*rxq_idx)] == fif->base_profile_id &&
- fif->is_shared_mac) {
- DPAA_PMD_ERR("VSP %d can NOT be used on DPDK.",
- vspids[(*rxq_idx)]);
- DPAA_PMD_ERR("It is associated to skb pool of shared interface.");
- return -1;
- }
+ vspids[(*rxq_idx)] = vsp_id;
+
(*rxq_idx)++;
}
- return 0;
+ return (*rxq_idx) - rxq_idx_start;
}
-int dpaa_port_fmc_init(struct fman_if *fif,
- uint32_t *fqids, int8_t *vspids, int max_nb_rxq)
+int
+dpaa_port_fmc_init(struct fman_if *fif,
+ uint32_t *fqids, int8_t *vspids, int max_nb_rxq)
{
int current_port = -1, ret;
uint16_t rxq_idx = 0;
- const struct fmc_model_t *fmc_model;
+ const struct fmc_model_t *fmc;
uint32_t i;
if (!g_fmc_model) {
@@ -402,14 +520,14 @@ int dpaa_port_fmc_init(struct fman_if *fif,
if (!fp) {
DPAA_PMD_ERR("%s not exists", FMC_FILE);
- return -1;
+ return -ENOENT;
}
g_fmc_model = rte_malloc(NULL, sizeof(struct fmc_model_t), 64);
if (!g_fmc_model) {
DPAA_PMD_ERR("FMC memory alloc failed");
fclose(fp);
- return -1;
+ return -ENOBUFS;
}
bytes_read = fread(g_fmc_model,
@@ -419,25 +537,28 @@ int dpaa_port_fmc_init(struct fman_if *fif,
fclose(fp);
rte_free(g_fmc_model);
g_fmc_model = NULL;
- return -1;
+ return -EIO;
}
fclose(fp);
}
- fmc_model = g_fmc_model;
+ fmc = g_fmc_model;
- if (fmc_model->format_version != FMC_OUTPUT_FORMAT_VER)
- return -1;
+ if (fmc->format_version != FMC_OUTPUT_FORMAT_VER) {
+ DPAA_PMD_ERR("FMC version(0x%08x) != Supported ver(0x%08x)",
+ fmc->format_version, FMC_OUTPUT_FORMAT_VER);
+ return -EINVAL;
+ }
- for (i = 0; i < fmc_model->apply_order_count; i++) {
- switch (fmc_model->apply_order[i].type) {
+ for (i = 0; i < fmc->apply_order_count; i++) {
+ switch (fmc->apply_order[i].type) {
case fmcengine_start:
break;
case fmcengine_end:
break;
case fmcport_start:
current_port = dpaa_port_fmc_port_parse(fif,
- fmc_model, i);
+ fmc, i);
break;
case fmcport_end:
break;
@@ -445,24 +566,24 @@ int dpaa_port_fmc_init(struct fman_if *fif,
if (current_port < 0)
break;
- ret = dpaa_port_fmc_scheme_parse(fif, fmc_model,
- i, &rxq_idx,
- max_nb_rxq,
- fqids, vspids);
- if (ret)
- return ret;
+ ret = dpaa_port_fmc_scheme_parse(fif, fmc,
+ i, &rxq_idx, max_nb_rxq, fqids, vspids);
+ DPAA_PMD_INFO("%s %d RXQ(s) from scheme[%d]",
+ ret >= 0 ? "Alloc" : "Remove",
+ ret >= 0 ? ret : -ret,
+ fmc->apply_order[i].index);
break;
case fmcccnode:
if (current_port < 0)
break;
- ret = dpaa_port_fmc_ccnode_parse(fif, fmc_model,
- i, &rxq_idx,
- max_nb_rxq, fqids,
- vspids);
- if (ret)
- return ret;
+ ret = dpaa_port_fmc_ccnode_parse(fif, fmc,
+ i, &rxq_idx, max_nb_rxq, fqids, vspids);
+ DPAA_PMD_INFO("%s %d RXQ(s) from cc[%d]",
+ ret >= 0 ? "Alloc" : "Remove",
+ ret >= 0 ? ret : -ret,
+ fmc->apply_order[i].index);
break;
case fmchtnode:
@@ -693,13 +693,26 @@ dpaa_rx_cb_atomic(void *event,
}
#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
-static inline void dpaa_eth_err_queue(struct dpaa_if *dpaa_intf)
+static inline void
+dpaa_eth_err_queue(struct qman_fq *fq)
{
struct rte_mbuf *mbuf;
struct qman_fq *debug_fq;
int ret, i;
struct qm_dqrr_entry *dq;
struct qm_fd *fd;
+ struct dpaa_if *dpaa_intf;
+
+ dpaa_intf = fq->dpaa_intf;
+ if (fq != &dpaa_intf->rx_queues[0]) {
+ /** Associate error queues to the first RXQ.*/
+ return;
+ }
+
+ if (dpaa_intf->cfg->fman_if->is_shared_mac) {
+ /** Error queues of shared MAC are handled in kernel. */
+ return;
+ }
if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
ret = rte_dpaa_portal_init((void *)0);
@@ -708,7 +721,7 @@ static inline void dpaa_eth_err_queue(struct dpaa_if *dpaa_intf)
return;
}
}
- for (i = 0; i <= DPAA_DEBUG_FQ_TX_ERROR; i++) {
+ for (i = 0; i < DPAA_DEBUG_FQ_MAX_NUM; i++) {
debug_fq = &dpaa_intf->debug_queues[i];
ret = qman_set_vdq(debug_fq, 4, QM_VDQCR_EXACT);
if (ret)
@@ -751,8 +764,7 @@ uint16_t dpaa_eth_queue_rx(void *q,
rte_dpaa_bpid_info = fq->bp_array;
#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
- if (fq->fqid == ((struct dpaa_if *)fq->dpaa_intf)->rx_queues[0].fqid)
- dpaa_eth_err_queue((struct dpaa_if *)fq->dpaa_intf);
+ dpaa_eth_err_queue(fq);
#endif
if (likely(fq->is_static))