[v4,2/8] net/ice: add pointer for queue buffer release
Checks
Commit Message
Add function pointers of buffer releasing for RX and
TX queues, for vector functions will be added for RX
and TX.
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
drivers/net/ice/ice_rxtx.c | 24 +++++++++++++++---------
drivers/net/ice/ice_rxtx.h | 5 +++++
2 files changed, 20 insertions(+), 9 deletions(-)
Comments
On 3/21/19 7:26 AM, Wenzhuo Lu wrote:
> Add function pointers of buffer releasing for RX and
> TX queues, for vector functions will be added for RX
> and TX.
>
> Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> ---
> drivers/net/ice/ice_rxtx.c | 24 +++++++++++++++---------
> drivers/net/ice/ice_rxtx.h | 5 +++++
> 2 files changed, 20 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
> index c794ee8..d540ed1 100644
> --- a/drivers/net/ice/ice_rxtx.c
> +++ b/drivers/net/ice/ice_rxtx.c
> @@ -366,7 +366,7 @@
> PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
> rx_queue_id);
>
> - ice_rx_queue_release_mbufs(rxq);
> + rxq->rx_rel_mbufs(rxq);
> ice_reset_rx_queue(rxq);
> return -EINVAL;
> }
> @@ -393,7 +393,7 @@
> rx_queue_id);
> return -EINVAL;
> }
> - ice_rx_queue_release_mbufs(rxq);
> + rxq->rx_rel_mbufs(rxq);
> ice_reset_rx_queue(rxq);
> dev->data->rx_queue_state[rx_queue_id] =
> RTE_ETH_QUEUE_STATE_STOPPED;
> @@ -555,7 +555,7 @@
> return -EINVAL;
> }
>
> - ice_tx_queue_release_mbufs(txq);
> + txq->tx_rel_mbufs(txq);
> ice_reset_tx_queue(txq);
> dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
>
> @@ -669,6 +669,7 @@
> ice_reset_rx_queue(rxq);
> rxq->q_set = TRUE;
> dev->data->rx_queues[queue_idx] = rxq;
> + rxq->rx_rel_mbufs = ice_rx_queue_release_mbufs;
I think it could be cleaner to have ice_rx_queue_release_mbufs() to call
the callback. So you would rename current ice_rx_queue_release_mbufs to
something else.
I would make the code more consistent IMHO, and would avoid to patch all
call sites.
>
> use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
>
> @@ -701,7 +702,7 @@
> return;
> }
>
> - ice_rx_queue_release_mbufs(q);
> + q->rx_rel_mbufs(q);
> rte_free(q->sw_ring);
> rte_free(q);
> }
> @@ -866,6 +867,7 @@
> ice_reset_tx_queue(txq);
> txq->q_set = TRUE;
> dev->data->tx_queues[queue_idx] = txq;
> + txq->tx_rel_mbufs = ice_tx_queue_release_mbufs;
>
> return 0;
> }
> @@ -880,7 +882,7 @@
> return;
> }
>
> - ice_tx_queue_release_mbufs(q);
> + q->tx_rel_mbufs(q);
> rte_free(q->sw_ring);
> rte_free(q);
> }
> @@ -1552,18 +1554,22 @@
> void
> ice_clear_queues(struct rte_eth_dev *dev)
> {
> + struct ice_rx_queue *rxq;
> + struct ice_tx_queue *txq;
> uint16_t i;
>
> PMD_INIT_FUNC_TRACE();
>
> for (i = 0; i < dev->data->nb_tx_queues; i++) {
> - ice_tx_queue_release_mbufs(dev->data->tx_queues[i]);
> - ice_reset_tx_queue(dev->data->tx_queues[i]);
> + txq = dev->data->tx_queues[i];
> + txq->tx_rel_mbufs(txq);
> + ice_reset_tx_queue(txq);
> }
>
> for (i = 0; i < dev->data->nb_rx_queues; i++) {
> - ice_rx_queue_release_mbufs(dev->data->rx_queues[i]);
> - ice_reset_rx_queue(dev->data->rx_queues[i]);
> + rxq = dev->data->rx_queues[i];
> + rxq->rx_rel_mbufs(rxq);
> + ice_reset_rx_queue(rxq);
> }
> }
>
> diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
> index ec0e52e..78b4928 100644
> --- a/drivers/net/ice/ice_rxtx.h
> +++ b/drivers/net/ice/ice_rxtx.h
> @@ -27,6 +27,9 @@
>
> #define ICE_SUPPORT_CHAIN_NUM 5
>
> +typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq);
> +typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq);
> +
> struct ice_rx_entry {
> struct rte_mbuf *mbuf;
> };
> @@ -61,6 +64,7 @@ struct ice_rx_queue {
> uint16_t max_pkt_len; /* Maximum packet length */
> bool q_set; /* indicate if rx queue has been configured */
> bool rx_deferred_start; /* don't start this queue in dev start */
> + ice_rx_release_mbufs_t rx_rel_mbufs;
> };
>
> struct ice_tx_entry {
> @@ -100,6 +104,7 @@ struct ice_tx_queue {
> uint16_t tx_next_rs;
> bool tx_deferred_start; /* don't start this queue in dev start */
> bool q_set; /* indicate if tx queue has been configured */
> + ice_tx_release_mbufs_t tx_rel_mbufs;
> };
>
> /* Offload features */
>
@@ -366,7 +366,7 @@
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
rx_queue_id);
- ice_rx_queue_release_mbufs(rxq);
+ rxq->rx_rel_mbufs(rxq);
ice_reset_rx_queue(rxq);
return -EINVAL;
}
@@ -393,7 +393,7 @@
rx_queue_id);
return -EINVAL;
}
- ice_rx_queue_release_mbufs(rxq);
+ rxq->rx_rel_mbufs(rxq);
ice_reset_rx_queue(rxq);
dev->data->rx_queue_state[rx_queue_id] =
RTE_ETH_QUEUE_STATE_STOPPED;
@@ -555,7 +555,7 @@
return -EINVAL;
}
- ice_tx_queue_release_mbufs(txq);
+ txq->tx_rel_mbufs(txq);
ice_reset_tx_queue(txq);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -669,6 +669,7 @@
ice_reset_rx_queue(rxq);
rxq->q_set = TRUE;
dev->data->rx_queues[queue_idx] = rxq;
+ rxq->rx_rel_mbufs = ice_rx_queue_release_mbufs;
use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
@@ -701,7 +702,7 @@
return;
}
- ice_rx_queue_release_mbufs(q);
+ q->rx_rel_mbufs(q);
rte_free(q->sw_ring);
rte_free(q);
}
@@ -866,6 +867,7 @@
ice_reset_tx_queue(txq);
txq->q_set = TRUE;
dev->data->tx_queues[queue_idx] = txq;
+ txq->tx_rel_mbufs = ice_tx_queue_release_mbufs;
return 0;
}
@@ -880,7 +882,7 @@
return;
}
- ice_tx_queue_release_mbufs(q);
+ q->tx_rel_mbufs(q);
rte_free(q->sw_ring);
rte_free(q);
}
@@ -1552,18 +1554,22 @@
void
ice_clear_queues(struct rte_eth_dev *dev)
{
+ struct ice_rx_queue *rxq;
+ struct ice_tx_queue *txq;
uint16_t i;
PMD_INIT_FUNC_TRACE();
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- ice_tx_queue_release_mbufs(dev->data->tx_queues[i]);
- ice_reset_tx_queue(dev->data->tx_queues[i]);
+ txq = dev->data->tx_queues[i];
+ txq->tx_rel_mbufs(txq);
+ ice_reset_tx_queue(txq);
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- ice_rx_queue_release_mbufs(dev->data->rx_queues[i]);
- ice_reset_rx_queue(dev->data->rx_queues[i]);
+ rxq = dev->data->rx_queues[i];
+ rxq->rx_rel_mbufs(rxq);
+ ice_reset_rx_queue(rxq);
}
}
@@ -27,6 +27,9 @@
#define ICE_SUPPORT_CHAIN_NUM 5
+typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq);
+typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq);
+
struct ice_rx_entry {
struct rte_mbuf *mbuf;
};
@@ -61,6 +64,7 @@ struct ice_rx_queue {
uint16_t max_pkt_len; /* Maximum packet length */
bool q_set; /* indicate if rx queue has been configured */
bool rx_deferred_start; /* don't start this queue in dev start */
+ ice_rx_release_mbufs_t rx_rel_mbufs;
};
struct ice_tx_entry {
@@ -100,6 +104,7 @@ struct ice_tx_queue {
uint16_t tx_next_rs;
bool tx_deferred_start; /* don't start this queue in dev start */
bool q_set; /* indicate if tx queue has been configured */
+ ice_tx_release_mbufs_t tx_rel_mbufs;
};
/* Offload features */