@@ -31,8 +31,9 @@ typedef void (*ice_tx_release_mbufs_t)(struct ci_tx_queue *txq);
struct ci_tx_queue {
union { /* TX ring virtual address */
- volatile struct ice_tx_desc *ice_tx_ring;
volatile struct i40e_tx_desc *i40e_tx_ring;
+ volatile struct iavf_tx_desc *iavf_tx_ring;
+ volatile struct ice_tx_desc *ice_tx_ring;
};
volatile uint8_t *qtx_tail; /* register address of tail */
struct ci_tx_entry *sw_ring; /* virtual address of SW ring */
@@ -63,8 +64,9 @@ struct ci_tx_queue {
bool tx_deferred_start; /* don't start this queue in dev start */
bool q_set; /* indicate if tx queue has been configured */
union { /* the VSI this queue belongs to */
- struct ice_vsi *ice_vsi;
struct i40e_vsi *i40e_vsi;
+ struct iavf_vsi *iavf_vsi;
+ struct ice_vsi *ice_vsi;
};
const struct rte_memzone *mz;
@@ -76,6 +78,15 @@ struct ci_tx_queue {
struct { /* I40E driver specific values */
uint8_t dcb_tc;
};
+ struct { /* iavf driver specific values */
+ uint16_t ipsec_crypto_pkt_md_offset;
+ uint8_t rel_mbufs_type;
+#define IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(0)
+#define IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(1)
+ uint8_t vlan_flag;
+ uint8_t tc;
+ bool use_ctx; /* with ctx info, each pkt needs two descriptors */
+ };
};
};
@@ -98,7 +98,7 @@
struct iavf_adapter;
struct iavf_rx_queue;
-struct iavf_tx_queue;
+struct ci_tx_queue;
struct iavf_ipsec_crypto_stats {
@@ -954,7 +954,7 @@ static int
iavf_start_queues(struct rte_eth_dev *dev)
{
struct iavf_rx_queue *rxq;
- struct iavf_tx_queue *txq;
+ struct ci_tx_queue *txq;
int i;
uint16_t nb_txq, nb_rxq;
@@ -1885,7 +1885,7 @@ iavf_dev_update_mbuf_stats(struct rte_eth_dev *ethdev,
struct iavf_mbuf_stats *mbuf_stats)
{
uint16_t idx;
- struct iavf_tx_queue *txq;
+ struct ci_tx_queue *txq;
for (idx = 0; idx < ethdev->data->nb_tx_queues; idx++) {
txq = ethdev->data->tx_queues[idx];
@@ -213,7 +213,7 @@ check_rx_vec_allow(struct iavf_rx_queue *rxq)
}
static inline bool
-check_tx_vec_allow(struct iavf_tx_queue *txq)
+check_tx_vec_allow(struct ci_tx_queue *txq)
{
if (!(txq->offloads & IAVF_TX_NO_VECTOR_FLAGS) &&
txq->tx_rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
@@ -282,7 +282,7 @@ reset_rx_queue(struct iavf_rx_queue *rxq)
}
static inline void
-reset_tx_queue(struct iavf_tx_queue *txq)
+reset_tx_queue(struct ci_tx_queue *txq)
{
struct ci_tx_entry *txe;
uint32_t i, size;
@@ -388,7 +388,7 @@ release_rxq_mbufs(struct iavf_rx_queue *rxq)
}
static inline void
-release_txq_mbufs(struct iavf_tx_queue *txq)
+release_txq_mbufs(struct ci_tx_queue *txq)
{
uint16_t i;
@@ -778,7 +778,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
struct iavf_info *vf =
IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct iavf_vsi *vsi = &vf->vsi;
- struct iavf_tx_queue *txq;
+ struct ci_tx_queue *txq;
const struct rte_memzone *mz;
uint32_t ring_size;
uint16_t tx_rs_thresh, tx_free_thresh;
@@ -814,7 +814,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
/* Allocate the TX queue data structure. */
txq = rte_zmalloc_socket("iavf txq",
- sizeof(struct iavf_tx_queue),
+ sizeof(struct ci_tx_queue),
RTE_CACHE_LINE_SIZE,
socket_id);
if (!txq) {
@@ -979,7 +979,7 @@ iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct iavf_tx_queue *txq;
+ struct ci_tx_queue *txq;
int err = 0;
PMD_DRV_FUNC_TRACE();
@@ -1048,7 +1048,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
struct iavf_adapter *adapter =
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- struct iavf_tx_queue *txq;
+ struct ci_tx_queue *txq;
int err;
PMD_DRV_FUNC_TRACE();
@@ -1092,7 +1092,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
void
iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct iavf_tx_queue *q = dev->data->tx_queues[qid];
+ struct ci_tx_queue *q = dev->data->tx_queues[qid];
if (!q)
return;
@@ -1107,7 +1107,7 @@ static void
iavf_reset_queues(struct rte_eth_dev *dev)
{
struct iavf_rx_queue *rxq;
- struct iavf_tx_queue *txq;
+ struct ci_tx_queue *txq;
int i;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
@@ -2377,7 +2377,7 @@ iavf_recv_pkts_bulk_alloc(void *rx_queue,
}
static inline int
-iavf_xmit_cleanup(struct iavf_tx_queue *txq)
+iavf_xmit_cleanup(struct ci_tx_queue *txq)
{
struct ci_tx_entry *sw_ring = txq->sw_ring;
uint16_t last_desc_cleaned = txq->last_desc_cleaned;
@@ -2781,7 +2781,7 @@ iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
static struct iavf_ipsec_crypto_pkt_metadata *
-iavf_ipsec_crypto_get_pkt_metadata(const struct iavf_tx_queue *txq,
+iavf_ipsec_crypto_get_pkt_metadata(const struct ci_tx_queue *txq,
struct rte_mbuf *m)
{
if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
@@ -2795,7 +2795,7 @@ iavf_ipsec_crypto_get_pkt_metadata(const struct iavf_tx_queue *txq,
uint16_t
iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
- struct iavf_tx_queue *txq = tx_queue;
+ struct ci_tx_queue *txq = tx_queue;
volatile struct iavf_tx_desc *txr = txq->iavf_tx_ring;
struct ci_tx_entry *txe_ring = txq->sw_ring;
struct ci_tx_entry *txe, *txn;
@@ -3027,7 +3027,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
* correct queue.
*/
static int
-iavf_check_vlan_up2tc(struct iavf_tx_queue *txq, struct rte_mbuf *m)
+iavf_check_vlan_up2tc(struct ci_tx_queue *txq, struct rte_mbuf *m)
{
struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -3646,7 +3646,7 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
int i, ret;
uint64_t ol_flags;
struct rte_mbuf *m;
- struct iavf_tx_queue *txq = tx_queue;
+ struct ci_tx_queue *txq = tx_queue;
struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
@@ -3800,7 +3800,7 @@ static uint16_t
iavf_xmit_pkts_no_poll(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- struct iavf_tx_queue *txq = tx_queue;
+ struct ci_tx_queue *txq = tx_queue;
enum iavf_tx_burst_type tx_burst_type;
if (!txq->iavf_vsi || txq->iavf_vsi->adapter->no_poll)
@@ -3823,7 +3823,7 @@ iavf_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t good_pkts = nb_pkts;
const char *reason = NULL;
bool pkt_error = false;
- struct iavf_tx_queue *txq = tx_queue;
+ struct ci_tx_queue *txq = tx_queue;
struct iavf_adapter *adapter = txq->iavf_vsi->adapter;
enum iavf_tx_burst_type tx_burst_type =
txq->iavf_vsi->adapter->tx_burst_type;
@@ -4144,7 +4144,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
int mbuf_check = adapter->devargs.mbuf_check;
int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
#ifdef RTE_ARCH_X86
- struct iavf_tx_queue *txq;
+ struct ci_tx_queue *txq;
int i;
int check_ret;
bool use_sse = false;
@@ -4265,7 +4265,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
}
static int
-iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq,
+iavf_tx_done_cleanup_full(struct ci_tx_queue *txq,
uint32_t free_cnt)
{
struct ci_tx_entry *swr_ring = txq->sw_ring;
@@ -4324,7 +4324,7 @@ iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq,
int
iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt)
{
- struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
+ struct ci_tx_queue *q = (struct ci_tx_queue *)txq;
return iavf_tx_done_cleanup_full(q, free_cnt);
}
@@ -4350,7 +4350,7 @@ void
iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo)
{
- struct iavf_tx_queue *txq;
+ struct ci_tx_queue *txq;
txq = dev->data->tx_queues[queue_id];
@@ -4422,7 +4422,7 @@ iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
int
iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
{
- struct iavf_tx_queue *txq = tx_queue;
+ struct ci_tx_queue *txq = tx_queue;
volatile uint64_t *status;
uint64_t mask, expect;
uint32_t desc;
@@ -211,7 +211,7 @@ struct iavf_rxq_ops {
};
struct iavf_txq_ops {
- void (*release_mbufs)(struct iavf_tx_queue *txq);
+ void (*release_mbufs)(struct ci_tx_queue *txq);
};
@@ -273,43 +273,6 @@ struct iavf_rx_queue {
uint64_t hw_time_update;
};
-/* Structure associated with each TX queue. */
-struct iavf_tx_queue {
- const struct rte_memzone *mz; /* memzone for Tx ring */
- volatile struct iavf_tx_desc *iavf_tx_ring; /* Tx ring virtual address */
- rte_iova_t tx_ring_dma; /* Tx ring DMA address */
- struct ci_tx_entry *sw_ring; /* address array of SW ring */
- uint16_t nb_tx_desc; /* ring length */
- uint16_t tx_tail; /* current value of tail */
- volatile uint8_t *qtx_tail; /* register address of tail */
- /* number of used desc since RS bit set */
- uint16_t nb_tx_used;
- uint16_t nb_tx_free;
- uint16_t last_desc_cleaned; /* last desc have been cleaned*/
- uint16_t tx_free_thresh;
- uint16_t tx_rs_thresh;
- uint8_t rel_mbufs_type;
- struct iavf_vsi *iavf_vsi; /**< the VSI this queue belongs to */
-
- uint16_t port_id;
- uint16_t queue_id;
- uint64_t offloads;
- uint16_t tx_next_dd; /* next to set RS, for VPMD */
- uint16_t tx_next_rs; /* next to check DD, for VPMD */
- uint16_t ipsec_crypto_pkt_md_offset;
-
- uint64_t mbuf_errors;
-
- bool q_set; /* if rx queue has been configured */
- bool tx_deferred_start; /* don't start this queue in dev start */
- const struct iavf_txq_ops *ops;
-#define IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(0)
-#define IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(1)
- uint8_t vlan_flag;
- uint8_t tc;
- uint8_t use_ctx:1; /* if use the ctx desc, a packet needs two descriptors */
-};
-
/* Offload features */
union iavf_tx_offload {
uint64_t data;
@@ -724,7 +687,7 @@ int iavf_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
int iavf_rx_vec_dev_check(struct rte_eth_dev *dev);
int iavf_tx_vec_dev_check(struct rte_eth_dev *dev);
int iavf_rxq_vec_setup(struct iavf_rx_queue *rxq);
-int iavf_txq_vec_setup(struct iavf_tx_queue *txq);
+int iavf_txq_vec_setup(struct ci_tx_queue *txq);
uint16_t iavf_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
uint16_t iavf_recv_pkts_vec_avx512_offload(void *rx_queue,
@@ -757,14 +720,14 @@ uint16_t iavf_xmit_pkts_vec_avx512_ctx_offload(void *tx_queue, struct rte_mbuf *
uint16_t nb_pkts);
uint16_t iavf_xmit_pkts_vec_avx512_ctx(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
-int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
+int iavf_txq_vec_setup_avx512(struct ci_tx_queue *txq);
uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
void iavf_set_default_ptype_table(struct rte_eth_dev *dev);
-void iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq);
+void iavf_tx_queue_release_mbufs_avx512(struct ci_tx_queue *txq);
void iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq);
-void iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq);
+void iavf_tx_queue_release_mbufs_sse(struct ci_tx_queue *txq);
static inline
void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
@@ -791,7 +754,7 @@ void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
* to print the qwords
*/
static inline
-void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
+void iavf_dump_tx_descriptor(const struct ci_tx_queue *txq,
const volatile void *desc, uint16_t tx_id)
{
const char *name;
@@ -1734,7 +1734,7 @@ static __rte_always_inline uint16_t
iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts, bool offload)
{
- struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
+ struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
volatile struct iavf_tx_desc *txdp;
struct ci_tx_entry *txep;
uint16_t n, nb_commit, tx_id;
@@ -1801,7 +1801,7 @@ iavf_xmit_pkts_vec_avx2_common(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts, bool offload)
{
uint16_t nb_tx = 0;
- struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
+ struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
while (nb_pkts) {
uint16_t ret, num;
@@ -1845,7 +1845,7 @@ iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload(void *rx_queue,
}
static __rte_always_inline int
-iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq)
+iavf_tx_free_bufs_avx512(struct ci_tx_queue *txq)
{
struct ci_tx_entry_vec *txep;
uint32_t n;
@@ -2311,7 +2311,7 @@ static __rte_always_inline uint16_t
iavf_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts, bool offload)
{
- struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
+ struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
volatile struct iavf_tx_desc *txdp;
struct ci_tx_entry_vec *txep;
uint16_t n, nb_commit, tx_id;
@@ -2379,7 +2379,7 @@ static __rte_always_inline uint16_t
iavf_xmit_fixed_burst_vec_avx512_ctx(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts, bool offload)
{
- struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
+ struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
volatile struct iavf_tx_desc *txdp;
struct ci_tx_entry_vec *txep;
uint16_t n, nb_commit, nb_mbuf, tx_id;
@@ -2447,7 +2447,7 @@ iavf_xmit_pkts_vec_avx512_cmn(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts, bool offload)
{
uint16_t nb_tx = 0;
- struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
+ struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
while (nb_pkts) {
uint16_t ret, num;
@@ -2473,7 +2473,7 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
}
void __rte_cold
-iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
+iavf_tx_queue_release_mbufs_avx512(struct ci_tx_queue *txq)
{
unsigned int i;
const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
@@ -2494,7 +2494,7 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
}
int __rte_cold
-iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq)
+iavf_txq_vec_setup_avx512(struct ci_tx_queue *txq)
{
txq->rel_mbufs_type = IAVF_REL_MBUFS_AVX512_VEC;
return 0;
@@ -2512,7 +2512,7 @@ iavf_xmit_pkts_vec_avx512_ctx_cmn(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts, bool offload)
{
uint16_t nb_tx = 0;
- struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
+ struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
while (nb_pkts) {
uint16_t ret, num;
@@ -17,7 +17,7 @@
#endif
static __rte_always_inline int
-iavf_tx_free_bufs(struct iavf_tx_queue *txq)
+iavf_tx_free_bufs(struct ci_tx_queue *txq)
{
struct ci_tx_entry *txep;
uint32_t n;
@@ -104,7 +104,7 @@ _iavf_rx_queue_release_mbufs_vec(struct iavf_rx_queue *rxq)
}
static inline void
-_iavf_tx_queue_release_mbufs_vec(struct iavf_tx_queue *txq)
+_iavf_tx_queue_release_mbufs_vec(struct ci_tx_queue *txq)
{
unsigned i;
const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
@@ -164,7 +164,7 @@ iavf_rx_vec_queue_default(struct iavf_rx_queue *rxq)
}
static inline int
-iavf_tx_vec_queue_default(struct iavf_tx_queue *txq)
+iavf_tx_vec_queue_default(struct ci_tx_queue *txq)
{
if (!txq)
return -1;
@@ -227,7 +227,7 @@ static inline int
iavf_tx_vec_dev_check_default(struct rte_eth_dev *dev)
{
int i;
- struct iavf_tx_queue *txq;
+ struct ci_tx_queue *txq;
int ret;
int result = 0;
@@ -1366,7 +1366,7 @@ uint16_t
iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
+ struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
volatile struct iavf_tx_desc *txdp;
struct ci_tx_entry *txep;
uint16_t n, nb_commit, tx_id;
@@ -1435,7 +1435,7 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
uint16_t nb_tx = 0;
- struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
+ struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
while (nb_pkts) {
uint16_t ret, num;
@@ -1459,13 +1459,13 @@ iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
}
void __rte_cold
-iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
+iavf_tx_queue_release_mbufs_sse(struct ci_tx_queue *txq)
{
_iavf_tx_queue_release_mbufs_vec(txq);
}
int __rte_cold
-iavf_txq_vec_setup(struct iavf_tx_queue *txq)
+iavf_txq_vec_setup(struct ci_tx_queue *txq)
{
txq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
return 0;
@@ -1218,10 +1218,8 @@ int
iavf_configure_queues(struct iavf_adapter *adapter,
uint16_t num_queue_pairs, uint16_t index)
{
- struct iavf_rx_queue **rxq =
- (struct iavf_rx_queue **)adapter->dev_data->rx_queues;
- struct iavf_tx_queue **txq =
- (struct iavf_tx_queue **)adapter->dev_data->tx_queues;
+ struct iavf_rx_queue **rxq = (struct iavf_rx_queue **)adapter->dev_data->rx_queues;
+ struct ci_tx_queue **txq = (struct ci_tx_queue **)adapter->dev_data->tx_queues;
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
struct virtchnl_vsi_queue_config_info *vc_config;
struct virtchnl_queue_pair_info *vc_qp;