[v2,3/5] net/cpfl: add hairpin queue enable and setup

Message ID 20230214113852.3341607-4-mingxia.liu@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Qi Zhang
Headers
Series add port to port feature |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Liu, Mingxia Feb. 14, 2023, 11:38 a.m. UTC
  This patch add hairpin queue ops
 - hairpin_cap_get
 - rx_hairpin_queue_setup
 - tx_hairpin_queue_setup

Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c |  15 ++
 drivers/net/cpfl/cpfl_rxtx.c   | 443 ++++++++++++++++++++++++++++++++-
 drivers/net/cpfl/cpfl_rxtx.h   |  22 +-
 3 files changed, 468 insertions(+), 12 deletions(-)
  

Patch

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index acc6180ca4..ebee21a82a 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -159,6 +159,18 @@  cpfl_dev_link_update(struct rte_eth_dev *dev,
 	return rte_eth_linkstatus_set(dev, &new_link);
 }
 
+static int
+cpfl_hairpin_cap_get(__rte_unused struct rte_eth_dev *dev,
+		     struct rte_eth_hairpin_cap *cap)
+{
+	cap->max_nb_queues = CPFL_MAX_NB_QUEUES;
+	cap->max_rx_2_tx = CPFL_MAX_HAIRPINQ_RX_2_TX;
+	cap->max_tx_2_rx = CPFL_MAX_HAIRPINQ_TX_2_RX;
+	cap->max_nb_desc = CPFL_MAX_HAIRPINQ_NB_DESC;
+
+	return 0;
+}
+
 static int
 cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
@@ -1295,6 +1307,9 @@  static const struct eth_dev_ops cpfl_eth_dev_ops = {
 	.xstats_get			= cpfl_dev_xstats_get,
 	.xstats_get_names		= cpfl_dev_xstats_get_names,
 	.xstats_reset			= cpfl_dev_xstats_reset,
+	.hairpin_cap_get		= cpfl_hairpin_cap_get,
+	.rx_hairpin_queue_setup		= cpfl_rx_hairpin_queue_setup,
+	.tx_hairpin_queue_setup		= cpfl_tx_hairpin_queue_setup,
 };
 
 static uint16_t
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index c7e5487366..e59cabe893 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -10,11 +10,77 @@ 
 #include "cpfl_rxtx.h"
 #include "cpfl_rxtx_vec_common.h"
 
+#define CPFL_NB_MBUF		4096
+#define CPFL_CACHE_SIZE		250
+#define CPFL_MBUF_SIZE		2048
+#define CPFL_P2P_RING_BUF	128
+
 static void
 cpfl_tx_queue_release(void *txq);
 static void
 cpfl_rx_queue_release(void *txq);
 
+static inline void
+reset_tx_hairpin_descq(struct idpf_tx_queue *txq)
+{
+	uint32_t i, size;
+
+	if (!txq) {
+		PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+		return;
+	}
+
+	size = txq->nb_tx_desc * CPFL_P2P_DESC_LEN;
+	for (i = 0; i < size; i++)
+		((volatile char *)txq->desc_ring)[i] = 0;
+}
+
+static inline void
+reset_tx_hairpin_complq(struct idpf_tx_queue *cq)
+{
+	uint32_t i, size;
+
+	if (!cq) {
+		PMD_DRV_LOG(DEBUG, "Pointer to complq is NULL");
+		return;
+	}
+
+	size = cq->nb_tx_desc * CPFL_P2P_DESC_LEN;
+	for (i = 0; i < size; i++)
+		((volatile char *)cq->compl_ring)[i] = 0;
+}
+
+static inline void
+reset_rx_hairpin_descq(struct idpf_rx_queue *rxq)
+{
+	uint16_t len;
+	uint32_t i;
+
+	if (!rxq)
+		return;
+
+	len = rxq->nb_rx_desc;
+	for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
+		((volatile char *)rxq->rx_ring)[i] = 0;
+}
+
+static inline void
+reset_rx_hairpin_bufq(struct idpf_rx_queue *rxbq)
+{
+	uint16_t len;
+	uint32_t i;
+
+	if (!rxbq)
+		return;
+
+	len = rxbq->nb_rx_desc;
+	for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
+		((volatile char *)rxbq->rx_ring)[i] = 0;
+
+	rxbq->bufq1 = NULL;
+	rxbq->bufq2 = NULL;
+}
+
 static uint64_t
 cpfl_rx_offload_convert(uint64_t offload)
 {
@@ -763,16 +829,25 @@  cpfl_rx_queue_release(void *rxq)
 	q = &(cpfl_rxq->base);
 
 	/* Split queue */
-	if (q->bufq1 != NULL && q->bufq2 != NULL) {
+	if (q->bufq1 != NULL) {
+		/* the mz is shared between Tx/Rx hairpin, let Tx_release
+		 * free the buf.
+		 */
+		if (!cpfl_rxq->hairpin_info.hairpin_q) {
+			rte_memzone_free(q->bufq1->mz);
+			if (q->bufq2 != NULL)
+				rte_memzone_free(q->bufq2->mz);
+			rte_memzone_free(q->mz);
+		}
 		q->bufq1->ops->release_mbufs(q->bufq1);
 		rte_free(q->bufq1->sw_ring);
-		rte_memzone_free(q->bufq1->mz);
 		rte_free(q->bufq1);
-		q->bufq2->ops->release_mbufs(q->bufq2);
-		rte_free(q->bufq2->sw_ring);
-		rte_memzone_free(q->bufq2->mz);
-		rte_free(q->bufq2);
-		rte_memzone_free(q->mz);
+
+		if (q->bufq2 != NULL) {
+			q->bufq2->ops->release_mbufs(q->bufq2);
+			rte_free(q->bufq2->sw_ring);
+			rte_free(q->bufq2);
+		}
 		rte_free(cpfl_rxq);
 		return;
 	}
@@ -1042,3 +1117,357 @@  cpfl_set_tx_function(struct rte_eth_dev *dev)
 	}
 #endif /* RTE_ARCH_X86 */
 }
+
+static int
+cpfl_rx_hairpin_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,
+			   uint16_t queue_idx, uint16_t nb_desc,
+			   struct idpf_tx_queue *peer_txq)
+{
+	struct cpfl_vport *cpfl_vport =
+	    (struct cpfl_vport *)dev->data->dev_private;
+	struct idpf_vport *vport = &(cpfl_vport->base);
+	struct idpf_adapter *adapter = vport->adapter;
+	struct idpf_hw *hw = &adapter->hw;
+	const struct rte_memzone *mz;
+	struct rte_mempool *mp;
+	uint32_t ring_size;
+	char pool_name[RTE_MEMPOOL_NAMESIZE];
+
+	mp = cpfl_vport->p2p_mp;
+	if (!mp) {
+		snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "p2p_mb_pool_%u",
+			 dev->data->port_id);
+		mp = rte_pktmbuf_pool_create(pool_name, CPFL_NB_MBUF, CPFL_CACHE_SIZE,
+					     0, CPFL_MBUF_SIZE, dev->device->numa_node);
+		if (!mp) {
+			PMD_INIT_LOG(ERR, "Failed to allocate mbuf pool for p2p");
+			return -ENOMEM;
+		}
+		cpfl_vport->p2p_mp = mp;
+	}
+
+	bufq->mp = mp;
+	bufq->nb_rx_desc = nb_desc;
+	bufq->queue_id = vport->chunks_info.rx_buf_start_qid + queue_idx;
+	bufq->port_id = dev->data->port_id;
+	bufq->adapter = adapter;
+	bufq->rx_buf_len = CPFL_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
+
+	bufq->sw_ring = rte_zmalloc("sw ring",
+				    sizeof(struct rte_mbuf *) * nb_desc,
+				    RTE_CACHE_LINE_SIZE);
+	if (!bufq->sw_ring) {
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
+		return -ENOMEM;
+	}
+
+	/* Hairpin Rx buffer queue and Tx completion queue share the same HW ring */
+	if (peer_txq && peer_txq->complq->mz) {
+		mz = peer_txq->complq->mz;
+		bufq->rx_ring_phys_addr = mz->iova;
+		bufq->rx_ring = mz->addr;
+		bufq->mz = mz;
+	} else {
+		ring_size = RTE_ALIGN(bufq->nb_rx_desc * CPFL_P2P_DESC_LEN,
+				      CPFL_DMA_MEM_ALIGN);
+		mz = rte_eth_dma_zone_reserve(dev, "hairpin_rx_buf_ring", queue_idx,
+					      ring_size + CPFL_P2P_RING_BUF,
+					      CPFL_RING_BASE_ALIGN,
+					      dev->device->numa_node);
+		if (!mz) {
+			PMD_INIT_LOG(ERR, "Failed to reserve DMA memory"
+					  "for hairpin RX buffer queue.");
+			rte_free(bufq->sw_ring);
+			return -ENOMEM;
+		}
+
+		bufq->rx_ring_phys_addr = mz->iova;
+		bufq->rx_ring = mz->addr;
+		bufq->mz = mz;
+	}
+	reset_rx_hairpin_bufq(bufq);
+	bufq->q_set = true;
+	bufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start +
+			 queue_idx * vport->chunks_info.rx_buf_qtail_spacing);
+	bufq->ops = &def_rxq_ops;
+
+	return 0;
+}
+
+int
+cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+			    uint16_t nb_desc,
+			    const struct rte_eth_hairpin_conf *conf)
+{
+	struct cpfl_vport *cpfl_vport = (struct cpfl_vport *)dev->data->dev_private;
+	struct idpf_vport *vport = &(cpfl_vport->base);
+	struct idpf_adapter *adapter_base = vport->adapter;
+	struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(adapter_base);
+	struct cpfl_rxq_hairpin_info *hairpin_info;
+	struct cpfl_vport *peer_cpfl_vport;
+	struct rte_eth_dev_data *dev_data;
+	struct cpfl_rx_queue *cpfl_rxq;
+	struct cpfl_tx_queue *peer_txq = NULL;
+	struct idpf_vport *peer_vport;
+	struct idpf_rx_queue *bufq1 = NULL;
+	struct idpf_rx_queue *rxq;
+	uint16_t peer_port = conf->peers[0].port;
+	uint16_t peer_q = conf->peers[0].queue;
+	const struct rte_memzone *mz;
+	uint32_t ring_size;
+	uint16_t qid;
+	int ret;
+
+	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue.");
+		return -EINVAL;
+	}
+
+	if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
+	    nb_desc > CPFL_MAX_RING_DESC ||
+	    nb_desc < CPFL_MIN_RING_DESC) {
+		PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is invalid", nb_desc);
+		return -EINVAL;
+	}
+
+	/* Free memory if needed */
+	if (dev->data->rx_queues[queue_idx]) {
+		cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
+		dev->data->rx_queues[queue_idx] = NULL;
+	}
+
+	/* Setup Rx description queue */
+	cpfl_rxq = rte_zmalloc_socket("cpfl hairpin rxq",
+				 sizeof(struct cpfl_rx_queue),
+				 RTE_CACHE_LINE_SIZE,
+				 SOCKET_ID_ANY);
+	if (!cpfl_rxq) {
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
+		return -ENOMEM;
+	}
+
+	rxq = &(cpfl_rxq->base);
+	hairpin_info = &(cpfl_rxq->hairpin_info);
+	rxq->nb_rx_desc = nb_desc * 2;
+	rxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx;
+	rxq->port_id = dev->data->port_id;
+	rxq->adapter = adapter_base;
+	hairpin_info->hairpin_q = true;
+	rxq->rx_buf_len = CPFL_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
+
+	if (peer_port != dev->data->port_id)
+		hairpin_info->hairpin_cv = true;
+	hairpin_info->peer_txp = peer_port;
+	peer_cpfl_vport = adapter->vports[peer_port];
+	peer_vport = &(peer_cpfl_vport->base);
+	dev_data = peer_vport->dev_data;
+	if (peer_q < dev_data->nb_tx_queues)
+		peer_txq = dev_data->tx_queues[peer_q];
+
+	/* Hairpin Rxq and Txq share the same HW ring */
+	if (peer_txq && peer_txq->base.mz) {
+		mz = peer_txq->base.mz;
+		rxq->rx_ring_phys_addr = mz->iova;
+		rxq->rx_ring = mz->addr;
+		rxq->mz = mz;
+	} else {
+		ring_size = RTE_ALIGN(rxq->nb_rx_desc * CPFL_P2P_DESC_LEN,
+				      CPFL_DMA_MEM_ALIGN);
+		mz = rte_eth_dma_zone_reserve(dev, "hairpin_rx_ring", queue_idx,
+					      ring_size + CPFL_P2P_RING_BUF,
+					      CPFL_RING_BASE_ALIGN,
+					      dev->device->numa_node);
+		if (!mz) {
+			PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
+			ret = -ENOMEM;
+			goto free_rxq;
+		}
+
+		rxq->rx_ring_phys_addr = mz->iova;
+		rxq->rx_ring = mz->addr;
+		rxq->mz = mz;
+	}
+	reset_rx_hairpin_descq(rxq);
+
+	/* setup 1 Rx buffer queue for 1 hairpin rxq */
+	bufq1 = rte_zmalloc_socket("hairpin rx bufq1",
+				   sizeof(struct idpf_rx_queue),
+				   RTE_CACHE_LINE_SIZE,
+				   SOCKET_ID_ANY);
+	if (!bufq1) {
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for hairpin Rx buffer queue 1.");
+		ret = -ENOMEM;
+		goto free_mz;
+	}
+	qid = 2 * queue_idx;
+	ret = cpfl_rx_hairpin_bufq_setup(dev, bufq1, qid, nb_desc, &(peer_txq->base));
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to setup hairpin Rx buffer queue 1");
+		ret = -EINVAL;
+		goto free_bufq1;
+	}
+	rxq->bufq1 = bufq1;
+	rxq->bufq2 = NULL;
+	rxq->q_set = true;
+	dev->data->rx_queues[queue_idx] = cpfl_rxq;
+
+	return 0;
+free_bufq1:
+	rte_free(bufq1);
+free_mz:
+	rte_memzone_free(mz);
+free_rxq:
+	rte_free(rxq);
+
+	return ret;
+}
+
+int
+cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+			    uint16_t nb_desc,
+			    const struct rte_eth_hairpin_conf *conf)
+{
+	struct cpfl_vport *cpfl_vport =
+	    (struct cpfl_vport *)dev->data->dev_private;
+
+	struct idpf_vport *vport = &(cpfl_vport->base);
+	struct idpf_adapter *adapter_base = vport->adapter;
+	struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(adapter_base);
+	struct cpfl_txq_hairpin_info *hairpin_info;
+	struct cpfl_vport *peer_cpfl_vport;
+	struct rte_eth_dev_data *dev_data;
+	struct idpf_vport *peer_vport;
+	struct idpf_hw *hw = &adapter_base->hw;
+	struct cpfl_tx_queue *cpfl_txq;
+	struct idpf_tx_queue *txq, *cq;
+	struct idpf_rx_queue *peer_rxq = NULL;
+	const struct rte_memzone *mz;
+	uint32_t ring_size;
+	uint16_t peer_port = conf->peers[0].port;
+	uint16_t peer_q = conf->peers[0].queue;
+
+	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue.");
+		return -EINVAL;
+	}
+
+	if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
+	    nb_desc > CPFL_MAX_RING_DESC ||
+	    nb_desc < CPFL_MIN_RING_DESC) {
+		PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is invalid",
+			     nb_desc);
+		return -EINVAL;
+	}
+
+	/* Free memory if needed. */
+	if (dev->data->tx_queues[queue_idx]) {
+		cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
+		dev->data->tx_queues[queue_idx] = NULL;
+	}
+
+	/* Allocate the TX queue data structure. */
+	cpfl_txq = rte_zmalloc_socket("cpfl hairpin txq",
+				 sizeof(struct cpfl_tx_queue),
+				 RTE_CACHE_LINE_SIZE,
+				 SOCKET_ID_ANY);
+	if (!cpfl_txq) {
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+		return -ENOMEM;
+	}
+
+	txq = &(cpfl_txq->base);
+	hairpin_info = &(cpfl_txq->hairpin_info);
+	/* Txq ring length should be 2 times of Tx completion queue size. */
+	txq->nb_tx_desc = nb_desc * 2;
+	txq->queue_id = vport->chunks_info.tx_start_qid + queue_idx;
+	txq->port_id = dev->data->port_id;
+	hairpin_info->hairpin_q = true;
+
+	if (peer_port != dev->data->port_id)
+		cpfl_txq->hairpin_info.hairpin_cv = true;
+	hairpin_info->peer_rxp = peer_port;
+	peer_cpfl_vport = adapter->vports[peer_port];
+	peer_vport = &(peer_cpfl_vport->base);
+	hairpin_info->peer_rxq_id = peer_vport->chunks_info.rx_start_qid + conf->peers[0].queue;
+	dev_data = peer_vport->dev_data;
+	if (peer_q < dev_data->nb_rx_queues)
+		peer_rxq = dev_data->rx_queues[peer_q];
+
+	/* Hairpin Rxq and Txq share the same HW ring */
+	if (peer_rxq && peer_rxq->mz) {
+		mz = peer_rxq->mz;
+		txq->tx_ring_phys_addr = mz->iova;
+		txq->desc_ring = mz->addr;
+		txq->mz = mz;
+	} else {
+		ring_size = RTE_ALIGN(txq->nb_tx_desc * CPFL_P2P_DESC_LEN,
+				      CPFL_DMA_MEM_ALIGN);
+		mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_ring", queue_idx,
+					      ring_size + CPFL_P2P_RING_BUF,
+					      CPFL_RING_BASE_ALIGN,
+					      dev->device->numa_node);
+		if (!mz) {
+			PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
+			rte_free(txq->sw_ring);
+			rte_free(txq);
+			return -ENOMEM;
+		}
+
+		txq->tx_ring_phys_addr = mz->iova;
+		txq->desc_ring = mz->addr;
+		txq->mz = mz;
+	}
+
+	reset_tx_hairpin_descq(txq);
+	txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
+			queue_idx * vport->chunks_info.tx_qtail_spacing);
+	txq->ops = &def_txq_ops;
+
+	/* Allocate the TX completion queue data structure. */
+	txq->complq = rte_zmalloc_socket("cpfl hairpin cq",
+					 sizeof(struct idpf_tx_queue),
+					 RTE_CACHE_LINE_SIZE,
+					 dev->device->numa_node);
+	cq = txq->complq;
+	if (!cq) {
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+		return -ENOMEM;
+	}
+
+	cq->nb_tx_desc = nb_desc;
+	cq->queue_id = vport->chunks_info.tx_compl_start_qid + queue_idx;
+	cq->port_id = dev->data->port_id;
+	hairpin_info->complq_peer_rxq_id =
+	    peer_vport->chunks_info.rx_buf_start_qid + conf->peers[0].queue * 2;
+
+	/* Hairpin Rx buffer queue and Tx completion queue share the same HW ring */
+	if (peer_rxq && peer_rxq->bufq1->mz) {
+		mz = peer_rxq->bufq1->mz;
+		cq->tx_ring_phys_addr = mz->iova;
+		cq->compl_ring = mz->addr;
+		cq->mz = mz;
+	} else {
+		ring_size = RTE_ALIGN(cq->nb_tx_desc * CPFL_P2P_DESC_LEN,
+				      CPFL_DMA_MEM_ALIGN);
+		mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_compl_ring", queue_idx,
+					      ring_size + CPFL_P2P_RING_BUF,
+					      CPFL_RING_BASE_ALIGN,
+					      dev->device->numa_node);
+		if (!mz) {
+			PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX completion queue");
+			rte_free(txq->sw_ring);
+			rte_free(txq);
+			return -ENOMEM;
+		}
+		cq->tx_ring_phys_addr = mz->iova;
+		cq->compl_ring = mz->addr;
+		cq->mz = mz;
+	}
+
+	reset_tx_hairpin_complq(cq);
+
+	txq->q_set = true;
+	dev->data->tx_queues[queue_idx] = cpfl_txq;
+
+	return 0;
+}
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index e241afece9..d4790d60ae 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -9,12 +9,17 @@ 
 #include "cpfl_ethdev.h"
 
 /* In QLEN must be whole number of 32 descriptors. */
-#define CPFL_ALIGN_RING_DESC	32
-#define CPFL_MIN_RING_DESC	32
-#define CPFL_MAX_RING_DESC	4096
-#define CPFL_DMA_MEM_ALIGN	4096
+#define CPFL_ALIGN_RING_DESC		32
+#define CPFL_MIN_RING_DESC		32
+#define CPFL_MAX_RING_DESC		4096
+#define CPFL_DMA_MEM_ALIGN		4096
+#define CPFL_P2P_DESC_LEN		16
+#define CPFL_MAX_HAIRPINQ_RX_2_TX	1
+#define CPFL_MAX_HAIRPINQ_TX_2_RX	1
+#define CPFL_MAX_HAIRPINQ_NB_DESC	1024
+#define CPFL_MAX_NB_QUEUES		16
 /* Base address of the HW descriptor ring should be 128B aligned. */
-#define CPFL_RING_BASE_ALIGN	128
+#define CPFL_RING_BASE_ALIGN		128
 
 #define CPFL_DEFAULT_RX_FREE_THRESH	32
 
@@ -69,4 +74,11 @@  void cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 void cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 void cpfl_set_rx_function(struct rte_eth_dev *dev);
 void cpfl_set_tx_function(struct rte_eth_dev *dev);
+int
+cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+			    uint16_t nb_desc, const struct rte_eth_hairpin_conf *conf);
+int
+cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+			    uint16_t nb_desc,
+			    const struct rte_eth_hairpin_conf *conf);
 #endif /* _CPFL_RXTX_H_ */