@@ -987,6 +987,8 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
+ dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
+ RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
@@ -1917,7 +1917,7 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
return rc;
}
-static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
{
int rc;
struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
@@ -2637,10 +2637,11 @@ int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
cpr = rxq->cp_ring;
}
- rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr);
-
- if (rc)
- return rc;
+ if (cpr->hw_stats_ctx_id == HWRM_NA_SIGNATURE) {
+ rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr);
+ if (rc)
+ return rc;
+ }
}
return rc;
}
@@ -2720,6 +2721,12 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
bp->grp_info[queue_index].ag_fw_ring_id =
INVALID_HW_RING_ID;
}
+
+ if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
+ bnxt_hwrm_stat_ctx_free(bp, cpr);
+ cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
+ }
+
if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
bnxt_free_cp_ring(bp, cpr);
@@ -5093,7 +5100,6 @@ static int
bnxt_vnic_rss_configure_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
- uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
struct bnxt_rx_queue **rxqs = bp->rx_queues;
uint16_t *ring_tbl = vnic->rss_table;
@@ -5127,8 +5133,7 @@ bnxt_vnic_rss_configure_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
/* Find next active ring. */
for (cnt = 0; cnt < max_rings; cnt++) {
- if (rx_queue_state[k] !=
- RTE_ETH_QUEUE_STATE_STOPPED)
+ if (rxqs[k]->rx_started)
break;
if (++k == max_rings)
k = 0;
@@ -6194,3 +6199,28 @@ int bnxt_hwrm_read_sfp_module_eeprom_info(struct bnxt *bp, uint16_t i2c_addr,
return rc;
}
+
+void bnxt_free_hwrm_tx_ring(struct bnxt *bp, int queue_index)
+{
+ struct bnxt_tx_queue *txq = bp->tx_queues[queue_index];
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ struct bnxt_ring *ring = txr->tx_ring_struct;
+ struct bnxt_cp_ring_info *cpr = txq->cp_ring;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ bnxt_hwrm_ring_free(bp, ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_TX,
+ cpr->cp_ring_struct->fw_ring_id);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ }
+
+ if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
+ bnxt_hwrm_stat_ctx_free(bp, cpr);
+ cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
+ }
+
+ if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
+ bnxt_free_cp_ring(bp, cpr);
+ cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
+ }
+}
@@ -304,4 +304,7 @@ int bnxt_hwrm_ring_stats(struct bnxt *bp, uint32_t cid, int idx,
int bnxt_hwrm_read_sfp_module_eeprom_info(struct bnxt *bp, uint16_t i2c_addr,
uint16_t page_number, uint16_t start_addr,
uint16_t data_length, uint8_t *buf);
+int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr);
+void bnxt_free_hwrm_tx_ring(struct bnxt *bp, int queue_index);
+int bnxt_alloc_hwrm_tx_ring(struct bnxt *bp, int queue_index);
#endif
@@ -599,6 +599,10 @@ int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
if (rc)
goto err_out;
+ rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr);
+ if (rc)
+ goto err_out;
+
if (BNXT_HAS_RING_GRPS(bp)) {
bp->grp_info[queue_index].fw_stats_ctx = cpr->hw_stats_ctx_id;
bp->grp_info[queue_index].cp_fw_ring_id = cp_ring->fw_ring_id;
@@ -837,3 +841,50 @@ int bnxt_alloc_async_ring_struct(struct bnxt *bp)
return bnxt_alloc_rings(bp, bp->eth_dev->device->numa_node, 0, NULL,
NULL, bp->async_cp_ring, NULL, "def_cp");
}
+
+int bnxt_alloc_hwrm_tx_ring(struct bnxt *bp, int queue_index)
+{
+ struct bnxt_tx_queue *txq = bp->tx_queues[queue_index];
+ struct bnxt_cp_ring_info *cpr = txq->cp_ring;
+ struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ struct bnxt_ring *ring = txr->tx_ring_struct;
+ unsigned int idx = queue_index + bp->rx_cp_nr_rings;
+ uint16_t tx_cosq_id = 0;
+ struct bnxt_coal coal;
+ int rc = 0;
+
+ rc = bnxt_alloc_cmpl_ring(bp, idx, cpr);
+ if (rc)
+ goto err_out;
+
+ bnxt_init_dflt_coal(&coal);
+ bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
+
+ rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr);
+ if (rc)
+ goto err_out;
+
+ if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)
+ tx_cosq_id = bp->tx_cosq_id[queue_index < bp->max_lltc ? queue_index : 0];
+ else
+ tx_cosq_id = bp->tx_cosq_id[0];
+
+ rc = bnxt_hwrm_ring_alloc(bp, ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
+ queue_index, cpr->hw_stats_ctx_id,
+ cp_ring->fw_ring_id,
+ tx_cosq_id);
+ if (rc)
+ goto err_out;
+
+ bnxt_set_db(bp, &txr->tx_db, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
+ queue_index, ring->fw_ring_id,
+ ring->ring_mask);
+ txq->index = idx;
+
+ return rc;
+err_out:
+ bnxt_free_hwrm_tx_ring(bp, queue_index);
+ return rc;
+}
@@ -248,6 +248,7 @@ void bnxt_rx_queue_release_op(void *rx_queue)
if (is_bnxt_in_error(rxq->bp))
return;
+ bnxt_free_hwrm_rx_ring(rxq->bp, rxq->queue_id);
bnxt_rx_queue_release_mbufs(rxq);
/* Free RX ring hardware descriptors */
@@ -286,7 +287,6 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
struct bnxt_rx_queue *rxq;
int rc = 0;
- uint8_t queue_state;
rc = is_bnxt_in_error(bp);
if (rc)
@@ -360,14 +360,8 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
else
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
- if (rxq->rx_deferred_start) {
- queue_state = RTE_ETH_QUEUE_STATE_STOPPED;
- rxq->rx_started = false;
- } else {
- queue_state = RTE_ETH_QUEUE_STATE_STARTED;
- rxq->rx_started = true;
- }
- eth_dev->data->rx_queue_state[queue_idx] = queue_state;
+ rxq->rx_started = rxq->rx_deferred_start ? false : true;
+ rxq->vnic = BNXT_GET_DEFAULT_VNIC(bp);
/* Configure mtu if it is different from what was configured before */
if (!queue_idx)
@@ -8,6 +8,7 @@
#include <rte_malloc.h>
#include "bnxt.h"
+#include "bnxt_hwrm.h"
#include "bnxt_ring.h"
#include "bnxt_txq.h"
#include "bnxt_txr.h"
@@ -61,6 +62,7 @@ void bnxt_tx_queue_release_op(void *tx_queue)
return;
/* Free TX ring hardware descriptors */
+ bnxt_free_hwrm_tx_ring(txq->bp, txq->queue_id);
bnxt_tx_queue_release_mbufs(txq);
if (txq->tx_ring) {
bnxt_free_ring(txq->tx_ring->tx_ring_struct);
@@ -9,6 +9,7 @@
#include <rte_malloc.h>
#include "bnxt.h"
+#include "bnxt_hwrm.h"
#include "bnxt_ring.h"
#include "bnxt_txq.h"
#include "bnxt_txr.h"
@@ -547,6 +548,11 @@ int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
if (rc)
return rc;
+ bnxt_free_hwrm_tx_ring(bp, tx_queue_id);
+ rc = bnxt_alloc_hwrm_tx_ring(bp, tx_queue_id);
+ if (rc)
+ return rc;
+
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
txq->tx_started = true;
PMD_DRV_LOG(DEBUG, "Tx queue started\n");