@@ -81,22 +81,10 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
}
/* Allocate DQ storage for the DPCI Rx queues */
- rxq = &(dpci_node->rx_queue[i]);
- rxq->q_storage = rte_malloc("dq_storage",
- sizeof(struct queue_storage_info_t),
- RTE_CACHE_LINE_SIZE);
- if (!rxq->q_storage) {
- DPAA2_BUS_ERR("q_storage allocation failed\n");
- ret = -ENOMEM;
+ rxq = &dpci_node->rx_queue[i];
+ ret = dpaa2_queue_storage_alloc(rxq, 1);
+ if (ret)
goto err;
- }
-
- memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
- ret = dpaa2_alloc_dq_storage(rxq->q_storage);
- if (ret) {
- DPAA2_BUS_ERR("dpaa2_alloc_dq_storage failed\n");
- goto err;
- }
}
/* Enable the device */
@@ -141,12 +129,9 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
err:
for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
- struct dpaa2_queue *rxq = &(dpci_node->rx_queue[i]);
+ struct dpaa2_queue *rxq = &dpci_node->rx_queue[i];
- if (rxq->q_storage) {
- dpaa2_free_dq_storage(rxq->q_storage);
- rte_free(rxq->q_storage);
- }
+ dpaa2_queue_storage_free(rxq, 1);
}
rte_free(dpci_node);
@@ -574,6 +574,7 @@ dpaa2_free_dq_storage(struct queue_storage_info_t *q_storage)
for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
rte_free(q_storage->dq_storage[i]);
+ q_storage->dq_storage[i] = NULL;
}
}
@@ -583,7 +584,7 @@ dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
int i = 0;
for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
- q_storage->dq_storage[i] = rte_malloc(NULL,
+ q_storage->dq_storage[i] = rte_zmalloc(NULL,
dpaa2_dqrr_size * sizeof(struct qbman_result),
RTE_CACHE_LINE_SIZE);
if (!q_storage->dq_storage[i])
@@ -591,8 +592,10 @@ dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
}
return 0;
fail:
- while (--i >= 0)
+ while (--i >= 0) {
rte_free(q_storage->dq_storage[i]);
+ q_storage->dq_storage[i] = NULL;
+ }
return -1;
}
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016-2021 NXP
+ * Copyright 2016-2024 NXP
*
*/
@@ -165,7 +165,9 @@ struct __rte_cache_aligned dpaa2_queue {
uint64_t tx_pkts;
uint64_t err_pkts;
union {
- struct queue_storage_info_t *q_storage;
+ /**Ingress*/
+ struct queue_storage_info_t *q_storage[RTE_MAX_LCORE];
+ /**Egress*/
struct qbman_result *cscn;
};
struct rte_event ev;
@@ -186,6 +188,38 @@ struct swp_active_dqs {
uint64_t reserved[7];
};
+#define dpaa2_queue_storage_alloc(q, num) \
+({ \
+ int ret = 0, i; \
+ \
+ for (i = 0; i < (num); i++) { \
+ (q)->q_storage[i] = rte_zmalloc(NULL, \
+ sizeof(struct queue_storage_info_t), \
+ RTE_CACHE_LINE_SIZE); \
+ if (!(q)->q_storage[i]) { \
+ ret = -ENOBUFS; \
+ break; \
+ } \
+ ret = dpaa2_alloc_dq_storage((q)->q_storage[i]); \
+ if (ret) \
+ break; \
+ } \
+ ret; \
+})
+
+#define dpaa2_queue_storage_free(q, num) \
+({ \
+ int i; \
+ \
+ for (i = 0; i < (num); i++) { \
+ if ((q)->q_storage[i]) { \
+ dpaa2_free_dq_storage((q)->q_storage[i]); \
+ rte_free((q)->q_storage[i]); \
+ (q)->q_storage[i] = NULL; \
+ } \
+ } \
+})
+
#define NUM_MAX_SWP 64
extern struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP];
@@ -1891,7 +1891,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
}
}
swp = DPAA2_PER_LCORE_PORTAL;
- dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+ dq_storage = dpaa2_qp->rx_vq.q_storage[0]->dq_storage[0];
qbman_pull_desc_clear(&pulldesc);
qbman_pull_desc_set_numframes(&pulldesc,
@@ -1982,10 +1982,7 @@ dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
PMD_INIT_FUNC_TRACE();
- if (qp->rx_vq.q_storage) {
- dpaa2_free_dq_storage(qp->rx_vq.q_storage);
- rte_free(qp->rx_vq.q_storage);
- }
+ dpaa2_queue_storage_free(&qp->rx_vq, 1);
rte_mempool_free(qp->fle_pool);
rte_free(qp);
@@ -2036,18 +2033,10 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
qp->rx_vq.crypto_data = dev->data;
qp->tx_vq.crypto_data = dev->data;
- qp->rx_vq.q_storage = rte_malloc("sec dq storage",
- sizeof(struct queue_storage_info_t),
- RTE_CACHE_LINE_SIZE);
- if (!qp->rx_vq.q_storage) {
- DPAA2_SEC_ERR("malloc failed for q_storage");
- return -ENOMEM;
- }
- memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
-
- if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
- DPAA2_SEC_ERR("Unable to allocate dequeue storage");
- return -ENOMEM;
+ retcode = dpaa2_queue_storage_alloc((&qp->rx_vq), 1);
+ if (retcode) {
+ dpaa2_queue_storage_free((&qp->rx_vq), 1);
+ return retcode;
}
dev->data->queue_pairs[qp_id] = qp;
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021-2022 NXP
+ * Copyright 2021-2022, 2024 NXP
*/
#include <cryptodev_pmd.h>
@@ -853,7 +853,7 @@ dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
}
}
swp = DPAA2_PER_LCORE_PORTAL;
- dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+ dq_storage = dpaa2_qp->rx_vq.q_storage[0]->dq_storage[0];
qbman_pull_desc_clear(&pulldesc);
qbman_pull_desc_set_numframes(&pulldesc,
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2023 NXP
+ * Copyright 2018-2024 NXP
*/
#include <rte_eal.h>
@@ -824,7 +824,7 @@ dpaa2_qdma_dequeue(void *dev_private,
dpaa2_dqrr_size : nb_cpls;
rxq = &(dpdmai_dev->rx_queue[qdma_vq->vq_id]);
fqid = rxq->fqid;
- q_storage = rxq->q_storage;
+ q_storage = rxq->q_storage[0];
if (unlikely(!q_storage->active_dqs)) {
q_storage->toggle = 0;
@@ -1032,13 +1032,7 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
qdma_dev->vqs[i].ring_cntx_idx = NULL;
}
rxq = &dpdmai_dev->rx_queue[i];
- if (rxq->q_storage) {
- DPAA2_QDMA_DEBUG("%s rxq[%d] re-configure",
- dev->data->dev_name, i);
- dpaa2_free_dq_storage(rxq->q_storage);
- rte_free(rxq->q_storage);
- rxq->q_storage = NULL;
- }
+ dpaa2_queue_storage_free(rxq, 1);
}
rte_free(qdma_dev->vqs);
qdma_dev->vqs = NULL;
@@ -1094,24 +1088,9 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
qdma_dev->vqs[i].vq_id = i;
rxq = &dpdmai_dev->rx_queue[i];
/* Allocate DQ storage for the DPDMAI Rx queues */
- rxq->q_storage = rte_zmalloc(NULL,
- sizeof(struct queue_storage_info_t),
- RTE_CACHE_LINE_SIZE);
- if (!rxq->q_storage) {
- DPAA2_QDMA_ERR("%s Q[%d] storage alloc failed",
- dev->data->dev_name, i);
- ret = -ENOMEM;
- goto alloc_failed;
- }
-
- memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
- ret = dpaa2_alloc_dq_storage(rxq->q_storage);
- if (ret) {
- DPAA2_QDMA_ERR("%s Q[%d] dq storage alloc failed",
- dev->data->dev_name, i);
- ret = -ENOMEM;
+ ret = dpaa2_queue_storage_alloc(rxq, 1);
+ if (ret)
goto alloc_failed;
- }
}
qdma_dev->num_vqs = dev_conf->nb_vchans;
@@ -1122,11 +1101,7 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
alloc_failed:
for (i = 0; i < dev_conf->nb_vchans; i++) {
rxq = &dpdmai_dev->rx_queue[i];
- if (rxq->q_storage) {
- dpaa2_free_dq_storage(rxq->q_storage);
- rte_free(rxq->q_storage);
- rxq->q_storage = NULL;
- }
+ dpaa2_queue_storage_free(rxq, 1);
}
rte_free(qdma_dev->vqs);
@@ -1324,11 +1299,7 @@ dpaa2_qdma_close(struct rte_dma_dev *dev)
/* Free RXQ storages */
for (i = 0; i < qdma_dev->num_vqs; i++) {
rxq = &dpdmai_dev->rx_queue[i];
- if (rxq->q_storage) {
- dpaa2_free_dq_storage(rxq->q_storage);
- rte_free(rxq->q_storage);
- rxq->q_storage = NULL;
- }
+ dpaa2_queue_storage_free(rxq, 1);
}
if (qdma_dev->vqs) {
@@ -1,7 +1,7 @@
/* * SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016-2021 NXP
+ * Copyright 2016-2024 NXP
*
*/
@@ -366,7 +366,7 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
uint8_t num_rxqueue_per_tc;
struct dpaa2_queue *mc_q, *mcq;
uint32_t tot_queues;
- int i;
+ int i, ret = 0;
struct dpaa2_queue *dpaa2_q;
PMD_INIT_FUNC_TRACE();
@@ -386,16 +386,10 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
for (i = 0; i < priv->nb_rx_queues; i++) {
mc_q->eth_data = dev->data;
priv->rx_vq[i] = mc_q++;
- dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
- dpaa2_q->q_storage = rte_malloc("dq_storage",
- sizeof(struct queue_storage_info_t),
- RTE_CACHE_LINE_SIZE);
- if (!dpaa2_q->q_storage)
- goto fail;
-
- memset(dpaa2_q->q_storage, 0,
- sizeof(struct queue_storage_info_t));
- if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
+ dpaa2_q = priv->rx_vq[i];
+ ret = dpaa2_queue_storage_alloc(dpaa2_q,
+ RTE_MAX_LCORE);
+ if (ret)
goto fail;
}
@@ -405,19 +399,11 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
if (!priv->rx_err_vq)
goto fail;
- dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
- dpaa2_q->q_storage = rte_malloc("err_dq_storage",
- sizeof(struct queue_storage_info_t) *
- RTE_MAX_LCORE,
- RTE_CACHE_LINE_SIZE);
- if (!dpaa2_q->q_storage)
+ dpaa2_q = priv->rx_err_vq;
+ ret = dpaa2_queue_storage_alloc(dpaa2_q,
+ RTE_MAX_LCORE);
+ if (ret)
goto fail;
-
- memset(dpaa2_q->q_storage, 0,
- sizeof(struct queue_storage_info_t));
- for (i = 0; i < RTE_MAX_LCORE; i++)
- if (dpaa2_alloc_dq_storage(&dpaa2_q->q_storage[i]))
- goto fail;
}
for (i = 0; i < priv->nb_tx_queues; i++) {
@@ -438,24 +424,17 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
mc_q->tc_index = i;
mc_q->flow_id = 0;
priv->tx_conf_vq[i] = mc_q++;
- dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
- dpaa2_q->q_storage =
- rte_malloc("dq_storage",
- sizeof(struct queue_storage_info_t),
- RTE_CACHE_LINE_SIZE);
- if (!dpaa2_q->q_storage)
- goto fail_tx_conf;
-
- memset(dpaa2_q->q_storage, 0,
- sizeof(struct queue_storage_info_t));
- if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
+ dpaa2_q = priv->tx_conf_vq[i];
+ ret = dpaa2_queue_storage_alloc(dpaa2_q,
+ RTE_MAX_LCORE);
+ if (ret)
goto fail_tx_conf;
}
}
vq_id = 0;
for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
- mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
+ mcq = priv->rx_vq[vq_id];
mcq->tc_index = dist_idx / num_rxqueue_per_tc;
mcq->flow_id = dist_idx % num_rxqueue_per_tc;
vq_id++;
@@ -465,15 +444,15 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
fail_tx_conf:
i -= 1;
while (i >= 0) {
- dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
- rte_free(dpaa2_q->q_storage);
+ dpaa2_q = priv->tx_conf_vq[i];
+ dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
priv->tx_conf_vq[i--] = NULL;
}
i = priv->nb_tx_queues;
fail_tx:
i -= 1;
while (i >= 0) {
- dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
+ dpaa2_q = priv->tx_vq[i];
rte_free(dpaa2_q->cscn);
priv->tx_vq[i--] = NULL;
}
@@ -482,17 +461,14 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
i -= 1;
mc_q = priv->rx_vq[0];
while (i >= 0) {
- dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
- dpaa2_free_dq_storage(dpaa2_q->q_storage);
- rte_free(dpaa2_q->q_storage);
+ dpaa2_q = priv->rx_vq[i];
+ dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
priv->rx_vq[i--] = NULL;
}
if (dpaa2_enable_err_queue) {
- dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
- if (dpaa2_q->q_storage)
- dpaa2_free_dq_storage(dpaa2_q->q_storage);
- rte_free(dpaa2_q->q_storage);
+ dpaa2_q = priv->rx_err_vq;
+ dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
}
rte_free(mc_q);
@@ -512,20 +488,21 @@ dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
if (priv->rx_vq[0]) {
/* cleaning up queue storage */
for (i = 0; i < priv->nb_rx_queues; i++) {
- dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
- rte_free(dpaa2_q->q_storage);
+ dpaa2_q = priv->rx_vq[i];
+ dpaa2_queue_storage_free(dpaa2_q,
+ RTE_MAX_LCORE);
}
/* cleanup tx queue cscn */
for (i = 0; i < priv->nb_tx_queues; i++) {
- dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
+ dpaa2_q = priv->tx_vq[i];
rte_free(dpaa2_q->cscn);
}
if (priv->flags & DPAA2_TX_CONF_ENABLE) {
/* cleanup tx conf queue storage */
for (i = 0; i < priv->nb_tx_queues; i++) {
- dpaa2_q = (struct dpaa2_queue *)
- priv->tx_conf_vq[i];
- rte_free(dpaa2_q->q_storage);
+ dpaa2_q = priv->tx_conf_vq[i];
+ dpaa2_queue_storage_free(dpaa2_q,
+ RTE_MAX_LCORE);
}
}
/*free memory for all queues (RX+TX) */
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016-2021 NXP
+ * Copyright 2016-2024 NXP
*
*/
@@ -647,7 +647,7 @@ dump_err_pkts(struct dpaa2_queue *dpaa2_q)
}
swp = DPAA2_PER_LCORE_PORTAL;
- dq_storage = dpaa2_q->q_storage[lcore_id].dq_storage[0];
+ dq_storage = dpaa2_q->q_storage[lcore_id]->dq_storage[0];
qbman_pull_desc_clear(&pulldesc);
qbman_pull_desc_set_fq(&pulldesc, fqid);
qbman_pull_desc_set_storage(&pulldesc, dq_storage,
@@ -716,7 +716,7 @@ uint16_t
dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
/* Function receive frames for a given device and VQ*/
- struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+ struct dpaa2_queue *dpaa2_q = queue;
struct qbman_result *dq_storage, *dq_storage1 = NULL;
uint32_t fqid = dpaa2_q->fqid;
int ret, num_rx = 0, pull_size;
@@ -724,10 +724,12 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
struct qbman_swp *swp;
const struct qbman_fd *fd;
struct qbman_pull_desc pulldesc;
- struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
+ struct queue_storage_info_t *q_storage;
struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
struct dpaa2_dev_priv *priv = eth_data->dev_private;
+ q_storage = dpaa2_q->q_storage[rte_lcore_id()];
+
if (unlikely(dpaa2_enable_err_queue))
dump_err_pkts(priv->rx_err_vq);
@@ -958,7 +960,7 @@ uint16_t
dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
/* Function receive frames for a given device and VQ */
- struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+ struct dpaa2_queue *dpaa2_q = queue;
struct qbman_result *dq_storage;
uint32_t fqid = dpaa2_q->fqid;
int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
@@ -984,7 +986,7 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
swp = DPAA2_PER_LCORE_PORTAL;
do {
- dq_storage = dpaa2_q->q_storage->dq_storage[0];
+ dq_storage = dpaa2_q->q_storage[0]->dq_storage[0];
qbman_pull_desc_clear(&pulldesc);
qbman_pull_desc_set_fq(&pulldesc, fqid);
qbman_pull_desc_set_storage(&pulldesc, dq_storage,
@@ -1115,7 +1117,7 @@ uint16_t dpaa2_dev_tx_conf(void *queue)
swp = DPAA2_PER_LCORE_PORTAL;
do {
- dq_storage = dpaa2_q->q_storage->dq_storage[0];
+ dq_storage = dpaa2_q->q_storage[0]->dq_storage[0];
qbman_pull_desc_clear(&pulldesc);
qbman_pull_desc_set_fq(&pulldesc, fqid);
qbman_pull_desc_set_storage(&pulldesc, dq_storage,
@@ -1954,12 +1956,13 @@ dpaa2_dev_loopback_rx(void *queue,
struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
struct qbman_pull_desc pulldesc;
struct qbman_eq_desc eqdesc;
- struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
+ struct queue_storage_info_t *q_storage;
struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
struct dpaa2_dev_priv *priv = eth_data->dev_private;
struct dpaa2_queue *tx_q = priv->tx_vq[0];
/* todo - currently we are using 1st TX queue only for loopback*/
+ q_storage = dpaa2_q->q_storage[rte_lcore_id()];
if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
ret = dpaa2_affine_qbman_ethrx_swp();
if (ret) {
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2019, 2024 NXP
*/
#include <stdio.h>
@@ -142,7 +142,7 @@ dpaa2_cmdif_dequeue_bufs(struct rte_rawdev *dev,
cmdif_rcv_cnxt = (struct rte_dpaa2_cmdif_context *)(context);
rxq = &(cidev->rx_queue[cmdif_rcv_cnxt->priority]);
- dq_storage = rxq->q_storage->dq_storage[0];
+ dq_storage = rxq->q_storage[0]->dq_storage[0];
qbman_pull_desc_clear(&pulldesc);
qbman_pull_desc_set_fq(&pulldesc, rxq->fqid);