@@ -91,7 +91,6 @@
/* forward-declare some functions */
static int ixgbe_is_vf(struct rte_eth_dev *dev);
-static int ixgbe_write_default_ctx_desc(struct ci_tx_queue *txq, struct rte_mempool *mp, bool vec);
/*********************************************************************
*
@@ -361,37 +360,6 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx;
}
-static uint16_t
-ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
-{
- uint16_t nb_tx = 0;
- struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
-
- /* we might check first packet's mempool */
- if (unlikely(nb_pkts == 0))
- return nb_pkts;
-
- /* check if we need to initialize default context descriptor */
- if (unlikely(!txq->vf_ctx_initialized) &&
- ixgbe_write_default_ctx_desc(txq, tx_pkts[0]->pool, true))
- return 0;
-
- while (nb_pkts) {
- uint16_t ret, num;
-
- num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
- ret = ixgbe_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
- num);
- nb_tx += ret;
- nb_pkts -= ret;
- if (ret < num)
- break;
- }
-
- return nb_tx;
-}
-
static inline void
ixgbe_set_xmit_ctx(struct ci_tx_queue *txq,
volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
@@ -2376,7 +2344,7 @@ ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
*
**********************************************************************/
-static inline int
+int
ixgbe_write_default_ctx_desc(struct ci_tx_queue *txq, struct rte_mempool *mp, bool vec)
{
volatile struct ixgbe_adv_tx_context_desc *ctx_txd;
@@ -6280,6 +6248,13 @@ ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)
return -1;
}
+uint16_t
+ixgbe_xmit_pkts_vec(void __rte_unused * tx_queue, struct rte_mbuf __rte_unused * *tx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
uint16_t
ixgbe_xmit_fixed_burst_vec(void __rte_unused *tx_queue,
struct rte_mbuf __rte_unused **tx_pkts,
@@ -221,21 +221,19 @@ int ixgbe_rx_burst_mode_get(struct rte_eth_dev *dev,
uint16_t queue_id, struct rte_eth_burst_mode *mode);
int ixgbe_check_supported_loopback_mode(struct rte_eth_dev *dev);
-uint16_t ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts);
-uint16_t ixgbe_recv_scattered_pkts_vec(void *rx_queue,
- struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
int ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt);
extern const uint32_t ptype_table[IXGBE_PACKET_TYPE_MAX];
extern const uint32_t ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX];
+int ixgbe_write_default_ctx_desc(struct ci_tx_queue *txq, struct rte_mempool *mp, bool vec);
uint16_t ixgbe_recycle_tx_mbufs_reuse_vec(void *tx_queue,
struct rte_eth_recycle_rxq_info *recycle_rxq_info);
void ixgbe_recycle_rx_descriptors_refill_vec(void *rx_queue, uint16_t nb_mbufs);
uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+int ixgbe_write_default_ctx_desc(struct ci_tx_queue *txq, struct rte_mempool *mp, bool vec);
uint64_t ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev);
uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
@@ -139,6 +139,37 @@ ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
#endif
}
+uint16_t
+ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx = 0;
+ struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
+
+ /* we might check first packet's mempool */
+ if (unlikely(nb_pkts == 0))
+ return nb_pkts;
+
+ /* check if we need to initialize default context descriptor */
+ if (unlikely(!txq->vf_ctx_initialized) &&
+ ixgbe_write_default_ctx_desc(txq, tx_pkts[0]->pool, true))
+ return 0;
+
+ while (nb_pkts) {
+ uint16_t ret, num;
+
+ num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+ ret = ixgbe_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
+ num);
+ nb_tx += ret;
+ nb_pkts -= ret;
+ if (ret < num)
+ break;
+ }
+
+ return nb_tx;
+}
+
void
ixgbe_recycle_rx_descriptors_refill_vec(void *rx_queue, uint16_t nb_mbufs)
{
@@ -17,6 +17,10 @@ int ixgbe_txq_vec_setup(struct ci_tx_queue *txq);
void ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq);
void ixgbe_reset_tx_queue_vec(struct ci_tx_queue *txq);
void ixgbe_tx_free_swring_vec(struct ci_tx_queue *txq);
+uint16_t ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+uint16_t ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
void ixgbe_recycle_rx_descriptors_refill_vec(void *rx_queue, uint16_t nb_mbufs);
uint16_t ixgbe_recycle_tx_mbufs_reuse_vec(void *tx_queue,
struct rte_eth_recycle_rxq_info *recycle_rxq_info);