[1/2] raw/cnxk_bphy: add wrappers for available message types

Message ID 20211001201941.2068819-2-tduszynski@marvell.com (mailing list archive)
State Accepted, archived
Delegated to: Jerin Jacob
Headers
Series raw/cnxk_bphy: sync with upstream |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/iol-testing warning apply patch failure

Commit Message

Tomasz Duszynski Oct. 1, 2021, 8:19 p.m. UTC
  One sends commands and receive responses by enqueuing or dequeuing
custom messages. In order to simplify this scheme simple wrappers
were added which take care of all the heavy lifting.

Signed-off-by: Tomasz Duszynski <tduszynski@marvell.com>
Reviewed-by: Jakub Palider <jpalider@marvell.com>
Reviewed-by: Jerin Jacob Kollanukkaran <jerinj@marvell.com>
---
 drivers/raw/cnxk_bphy/cnxk_bphy.c          |  62 ++++--
 drivers/raw/cnxk_bphy/cnxk_bphy_cgx_test.c | 125 ++---------
 drivers/raw/cnxk_bphy/rte_pmd_bphy.h       | 245 +++++++++++++++------
 3 files changed, 236 insertions(+), 196 deletions(-)
  

Patch

diff --git a/drivers/raw/cnxk_bphy/cnxk_bphy.c b/drivers/raw/cnxk_bphy/cnxk_bphy.c
index 4b7669b3e6..6bfda71761 100644
--- a/drivers/raw/cnxk_bphy/cnxk_bphy.c
+++ b/drivers/raw/cnxk_bphy/cnxk_bphy.c
@@ -45,6 +45,7 @@  static int
 bphy_rawdev_selftest(uint16_t dev_id)
 {
 	unsigned int i, queues, descs;
+	uint16_t pf_func;
 	uint64_t max_irq;
 	int ret;
 
@@ -68,12 +69,12 @@  bphy_rawdev_selftest(uint16_t dev_id)
 		goto err_desc;
 	}
 
-	ret = rte_pmd_bphy_npa_pf_func_get(dev_id);
-	if (ret == 0)
+	ret = rte_pmd_bphy_npa_pf_func_get(dev_id, &pf_func);
+	if (ret || pf_func == 0)
 		plt_warn("NPA pf_func is invalid");
 
-	ret = rte_pmd_bphy_sso_pf_func_get(dev_id);
-	if (ret == 0)
+	ret = rte_pmd_bphy_sso_pf_func_get(dev_id, &pf_func);
+	if (ret || pf_func == 0)
 		plt_warn("SSO pf_func is invalid");
 
 	ret = rte_pmd_bphy_intr_init(dev_id);
@@ -169,9 +170,13 @@  cnxk_bphy_irq_enqueue_bufs(struct rte_rawdev *dev,
 {
 	struct bphy_device *bphy_dev = (struct bphy_device *)dev->dev_private;
 	struct cnxk_bphy_irq_msg *msg = buffers[0]->buf_addr;
+	struct bphy_irq_queue *qp = &bphy_dev->queues[0];
 	unsigned int queue = (size_t)context;
 	struct cnxk_bphy_irq_info *info;
-	int ret = 0;
+	struct cnxk_bphy_mem *mem;
+	uint16_t *pf_func;
+	void *rsp = NULL;
+	int ret;
 
 	if (queue >= RTE_DIM(bphy_dev->queues))
 		return -EINVAL;
@@ -182,6 +187,8 @@  cnxk_bphy_irq_enqueue_bufs(struct rte_rawdev *dev,
 	switch (msg->type) {
 	case CNXK_BPHY_IRQ_MSG_TYPE_INIT:
 		ret = cnxk_bphy_intr_init(dev->dev_id);
+		if (ret)
+			return ret;
 		break;
 	case CNXK_BPHY_IRQ_MSG_TYPE_FINI:
 		cnxk_bphy_intr_fini(dev->dev_id);
@@ -191,27 +198,49 @@  cnxk_bphy_irq_enqueue_bufs(struct rte_rawdev *dev,
 		ret = cnxk_bphy_intr_register(dev->dev_id, info->irq_num,
 					      info->handler, info->data,
 					      info->cpu);
+		if (ret)
+			return ret;
 		break;
 	case CNXK_BPHY_IRQ_MSG_TYPE_UNREGISTER:
 		info = (struct cnxk_bphy_irq_info *)msg->data;
 		cnxk_bphy_intr_unregister(dev->dev_id, info->irq_num);
 		break;
 	case CNXK_BPHY_IRQ_MSG_TYPE_MEM_GET:
-		bphy_dev->queues[queue].rsp = &bphy_dev->mem;
+		mem = rte_zmalloc(NULL, sizeof(*mem), 0);
+		if (!mem)
+			return -ENOMEM;
+
+		*mem = bphy_dev->mem;
+		rsp = mem;
 		break;
 	case CNXK_BPHY_MSG_TYPE_NPA_PF_FUNC:
-		bphy_dev->queues[queue].rsp =
-			(void *)(size_t)roc_bphy_npa_pf_func_get();
+		pf_func = rte_malloc(NULL, sizeof(*pf_func), 0);
+		if (!pf_func)
+			return -ENOMEM;
+
+		*pf_func = roc_bphy_npa_pf_func_get();
+		rsp = pf_func;
 		break;
 	case CNXK_BPHY_MSG_TYPE_SSO_PF_FUNC:
-		bphy_dev->queues[queue].rsp =
-			(void *)(size_t)roc_bphy_sso_pf_func_get();
+		pf_func = rte_malloc(NULL, sizeof(*pf_func), 0);
+		if (!pf_func)
+			return -ENOMEM;
+
+		*pf_func = roc_bphy_sso_pf_func_get();
+		rsp = pf_func;
 		break;
 	default:
-		ret = -EINVAL;
+		return -EINVAL;
 	}
 
-	return ret;
+	/* get rid of last response if any */
+	if (qp->rsp) {
+		RTE_LOG(WARNING, PMD, "Previous response got overwritten\n");
+		rte_free(qp->rsp);
+	}
+	qp->rsp = rsp;
+
+	return 1;
 }
 
 static int
@@ -221,6 +250,7 @@  cnxk_bphy_irq_dequeue_bufs(struct rte_rawdev *dev,
 {
 	struct bphy_device *bphy_dev = (struct bphy_device *)dev->dev_private;
 	unsigned int queue = (size_t)context;
+	struct bphy_irq_queue *qp;
 
 	if (queue >= RTE_DIM(bphy_dev->queues))
 		return -EINVAL;
@@ -228,7 +258,13 @@  cnxk_bphy_irq_dequeue_bufs(struct rte_rawdev *dev,
 	if (count == 0)
 		return 0;
 
-	buffers[0]->buf_addr = bphy_dev->queues[queue].rsp;
+	qp = &bphy_dev->queues[queue];
+	if (qp->rsp) {
+		buffers[0]->buf_addr = qp->rsp;
+		qp->rsp = NULL;
+
+		return 1;
+	}
 
 	return 0;
 }
diff --git a/drivers/raw/cnxk_bphy/cnxk_bphy_cgx_test.c b/drivers/raw/cnxk_bphy/cnxk_bphy_cgx_test.c
index d96e98661f..a3021b4bb7 100644
--- a/drivers/raw/cnxk_bphy/cnxk_bphy_cgx_test.c
+++ b/drivers/raw/cnxk_bphy/cnxk_bphy_cgx_test.c
@@ -11,73 +11,20 @@ 
 #include "cnxk_bphy_cgx.h"
 #include "rte_pmd_bphy.h"
 
-static int
-cnxk_bphy_cgx_enq_msg(uint16_t dev_id, unsigned int queue, void *msg)
-{
-	struct rte_rawdev_buf *bufs[1];
-	struct rte_rawdev_buf buf;
-	void *q;
-	int ret;
-
-	q = (void *)(size_t)queue;
-	buf.buf_addr = msg;
-	bufs[0] = &buf;
-
-	ret = rte_rawdev_enqueue_buffers(dev_id, bufs, 1, q);
-	if (ret < 0)
-		return ret;
-	if (ret != 1)
-		return -EIO;
-
-	return 0;
-}
-
-static int
-cnxk_bphy_cgx_deq_msg(uint16_t dev_id, unsigned int queue, void **msg)
-{
-	struct rte_rawdev_buf *bufs[1];
-	struct rte_rawdev_buf buf;
-	void *q;
-	int ret;
-
-	q = (void *)(size_t)queue;
-	bufs[0] = &buf;
-
-	ret = rte_rawdev_dequeue_buffers(dev_id, bufs, 1, q);
-	if (ret < 0)
-		return ret;
-	if (ret != 1)
-		return -EIO;
-
-	*msg = buf.buf_addr;
-
-	return 0;
-}
-
 static int
 cnxk_bphy_cgx_link_cond(uint16_t dev_id, unsigned int queue, int cond)
 {
+	struct cnxk_bphy_cgx_msg_link_info link_info;
 	int tries = 10, ret;
 
 	do {
-		struct cnxk_bphy_cgx_msg_link_info *link_info = NULL;
-		struct cnxk_bphy_cgx_msg msg;
-
-		msg.type = CNXK_BPHY_CGX_MSG_TYPE_GET_LINKINFO;
-		ret = cnxk_bphy_cgx_enq_msg(dev_id, queue, &msg);
-		if (ret)
-			return ret;
-
-		ret = cnxk_bphy_cgx_deq_msg(dev_id, queue, (void **)&link_info);
+		ret = rte_pmd_bphy_cgx_get_link_info(dev_id, queue, &link_info);
 		if (ret)
 			return ret;
 
-		if (link_info->link_up == cond) {
-			rte_free(link_info);
+		if (link_info.link_up == cond)
 			break;
-		}
 
-		rte_free(link_info);
 		rte_delay_ms(500);
 	} while (--tries);
 
@@ -87,22 +34,6 @@  cnxk_bphy_cgx_link_cond(uint16_t dev_id, unsigned int queue, int cond)
 	return -ETIMEDOUT;
 }
 
-static int
-cnxk_bphy_cgx_get_supported_fec(uint16_t dev_id, unsigned int queue,
-				enum cnxk_bphy_cgx_eth_link_fec *fec)
-{
-	struct cnxk_bphy_cgx_msg msg = {
-		.type = CNXK_BPHY_CGX_MSG_TYPE_GET_SUPPORTED_FEC,
-	};
-	int ret;
-
-	ret = cnxk_bphy_cgx_enq_msg(dev_id, queue, &msg);
-	if (ret)
-		return ret;
-
-	return cnxk_bphy_cgx_deq_msg(dev_id, queue, (void **)&fec);
-}
-
 int
 cnxk_bphy_cgx_dev_selftest(uint16_t dev_id)
 {
@@ -118,9 +49,7 @@  cnxk_bphy_cgx_dev_selftest(uint16_t dev_id)
 		return ret;
 
 	for (i = 0; i < queues; i++) {
-		struct cnxk_bphy_cgx_msg_set_link_state link_state;
 		enum cnxk_bphy_cgx_eth_link_fec fec;
-		struct cnxk_bphy_cgx_msg msg;
 		unsigned int descs;
 
 		ret = rte_rawdev_queue_conf_get(dev_id, i, &descs,
@@ -135,27 +64,19 @@  cnxk_bphy_cgx_dev_selftest(uint16_t dev_id)
 
 		RTE_LOG(INFO, PMD, "Testing queue %d\n", i);
 
-		/* stop rx/tx */
-		msg.type = CNXK_BPHY_CGX_MSG_TYPE_STOP_RXTX;
-		ret = cnxk_bphy_cgx_enq_msg(dev_id, i, &msg);
+		ret = rte_pmd_bphy_cgx_stop_rxtx(dev_id, i);
 		if (ret) {
 			RTE_LOG(ERR, PMD, "Failed to stop rx/tx\n");
 			break;
 		}
 
-		/* start rx/tx */
-		msg.type = CNXK_BPHY_CGX_MSG_TYPE_START_RXTX;
-		ret = cnxk_bphy_cgx_enq_msg(dev_id, i, &msg);
+		ret = rte_pmd_bphy_cgx_start_rxtx(dev_id, i);
 		if (ret) {
 			RTE_LOG(ERR, PMD, "Failed to start rx/tx\n");
 			break;
 		}
 
-		/* set link down */
-		link_state.state = false;
-		msg.type = CNXK_BPHY_CGX_MSG_TYPE_SET_LINK_STATE;
-		msg.data = &link_state;
-		ret = cnxk_bphy_cgx_enq_msg(dev_id, i, &msg);
+		ret = rte_pmd_bphy_cgx_set_link_state(dev_id, i, false);
 		if (ret) {
 			RTE_LOG(ERR, PMD, "Failed to set link down\n");
 			break;
@@ -166,11 +87,7 @@  cnxk_bphy_cgx_dev_selftest(uint16_t dev_id)
 			RTE_LOG(ERR, PMD,
 				"Timed out waiting for a link down\n");
 
-		/* set link up */
-		link_state.state = true;
-		msg.type = CNXK_BPHY_CGX_MSG_TYPE_SET_LINK_STATE;
-		msg.data = &link_state;
-		ret = cnxk_bphy_cgx_enq_msg(dev_id, i, &msg);
+		ret = rte_pmd_bphy_cgx_set_link_state(dev_id, i, true);
 		if (ret) {
 			RTE_LOG(ERR, PMD, "Failed to set link up\n");
 			break;
@@ -180,25 +97,19 @@  cnxk_bphy_cgx_dev_selftest(uint16_t dev_id)
 		if (ret != 1)
 			RTE_LOG(ERR, PMD, "Timed out waiting for a link up\n");
 
-		/* enable internal loopback */
-		msg.type = CNXK_BPHY_CGX_MSG_TYPE_INTLBK_ENABLE;
-		ret = cnxk_bphy_cgx_enq_msg(dev_id, i, &msg);
+		ret = rte_pmd_bphy_cgx_intlbk_enable(dev_id, i);
 		if (ret) {
 			RTE_LOG(ERR, PMD, "Failed to enable internal lbk\n");
 			break;
 		}
 
-		/* disable internal loopback */
-		msg.type = CNXK_BPHY_CGX_MSG_TYPE_INTLBK_DISABLE;
-		ret = cnxk_bphy_cgx_enq_msg(dev_id, i, &msg);
+		ret = rte_pmd_bphy_cgx_intlbk_disable(dev_id, i);
 		if (ret) {
 			RTE_LOG(ERR, PMD, "Failed to disable internal lbk\n");
 			break;
 		}
 
-		/* enable ptp */
-		msg.type = CNXK_BPHY_CGX_MSG_TYPE_PTP_RX_ENABLE;
-		ret = cnxk_bphy_cgx_enq_msg(dev_id, i, &msg);
+		ret = rte_pmd_bphy_cgx_ptp_rx_enable(dev_id, i);
 		/* ptp not available on RPM */
 		if (ret < 0 && ret != -ENOTSUP) {
 			RTE_LOG(ERR, PMD, "Failed to enable ptp\n");
@@ -206,9 +117,7 @@  cnxk_bphy_cgx_dev_selftest(uint16_t dev_id)
 		}
 		ret = 0;
 
-		/* disable ptp */
-		msg.type = CNXK_BPHY_CGX_MSG_TYPE_PTP_RX_DISABLE;
-		ret = cnxk_bphy_cgx_enq_msg(dev_id, i, &msg);
+		ret = rte_pmd_bphy_cgx_ptp_rx_disable(dev_id, i);
 		/* ptp not available on RPM */
 		if (ret < 0 && ret != -ENOTSUP) {
 			RTE_LOG(ERR, PMD, "Failed to disable ptp\n");
@@ -216,26 +125,20 @@  cnxk_bphy_cgx_dev_selftest(uint16_t dev_id)
 		}
 		ret = 0;
 
-		ret = cnxk_bphy_cgx_get_supported_fec(dev_id, i, &fec);
+		ret = rte_pmd_bphy_cgx_get_supported_fec(dev_id, i, &fec);
 		if (ret) {
 			RTE_LOG(ERR, PMD, "Failed to get supported FEC\n");
 			break;
 		}
 
-		/* set supported fec */
-		msg.type = CNXK_BPHY_CGX_MSG_TYPE_SET_FEC;
-		msg.data = &fec;
-		ret = cnxk_bphy_cgx_enq_msg(dev_id, i, &msg);
+		ret = rte_pmd_bphy_cgx_set_fec(dev_id, i, fec);
 		if (ret) {
 			RTE_LOG(ERR, PMD, "Failed to set FEC to %d\n", fec);
 			break;
 		}
 
-		/* disable fec */
 		fec = CNXK_BPHY_CGX_ETH_LINK_FEC_NONE;
-		msg.type = CNXK_BPHY_CGX_MSG_TYPE_SET_FEC;
-		msg.data = &fec;
-		ret = cnxk_bphy_cgx_enq_msg(dev_id, i, &msg);
+		ret = rte_pmd_bphy_cgx_set_fec(dev_id, i, fec);
 		if (ret) {
 			RTE_LOG(ERR, PMD, "Failed to disable FEC\n");
 			break;
diff --git a/drivers/raw/cnxk_bphy/rte_pmd_bphy.h b/drivers/raw/cnxk_bphy/rte_pmd_bphy.h
index bcd165f41c..4458342287 100644
--- a/drivers/raw/cnxk_bphy/rte_pmd_bphy.h
+++ b/drivers/raw/cnxk_bphy/rte_pmd_bphy.h
@@ -5,6 +5,8 @@ 
 #ifndef _CNXK_BPHY_H_
 #define _CNXK_BPHY_H_
 
+#include <rte_memcpy.h>
+
 #include "cnxk_bphy_irq.h"
 
 enum cnxk_bphy_cgx_msg_type {
@@ -138,33 +140,59 @@  struct cnxk_bphy_irq_info {
 };
 
 static __rte_always_inline int
-rte_pmd_bphy_intr_init(uint16_t dev_id)
+__rte_pmd_bphy_enq_deq(uint16_t dev_id, unsigned int queue, void *req,
+		       void *rsp, size_t rsp_size)
 {
-	struct cnxk_bphy_irq_msg msg = {
-		.type = CNXK_BPHY_IRQ_MSG_TYPE_INIT,
-	};
 	struct rte_rawdev_buf *bufs[1];
 	struct rte_rawdev_buf buf;
+	void *q;
+	int ret;
 
-	buf.buf_addr = &msg;
+	q = (void *)(size_t)queue;
+	buf.buf_addr = req;
 	bufs[0] = &buf;
 
-	return rte_rawdev_enqueue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
+	ret = rte_rawdev_enqueue_buffers(dev_id, bufs, RTE_DIM(bufs), q);
+	if (ret < 0)
+		return ret;
+	if (ret != RTE_DIM(bufs))
+		return -EIO;
+
+	if (!rsp)
+		return 0;
+
+	ret = rte_rawdev_dequeue_buffers(dev_id, bufs, RTE_DIM(bufs), q);
+	if (ret < 0)
+		return ret;
+	if (ret != RTE_DIM(bufs))
+		return -EIO;
+
+	rte_memcpy(rsp, buf.buf_addr, rsp_size);
+	rte_free(buf.buf_addr);
+
+	return 0;
+}
+
+static __rte_always_inline int
+rte_pmd_bphy_intr_init(uint16_t dev_id)
+{
+	struct cnxk_bphy_irq_msg msg = {
+		.type = CNXK_BPHY_IRQ_MSG_TYPE_INIT,
+	};
+
+	return __rte_pmd_bphy_enq_deq(dev_id, CNXK_BPHY_DEF_QUEUE, &msg,
+				      NULL, 0);
 }
 
-static __rte_always_inline void
+static __rte_always_inline int
 rte_pmd_bphy_intr_fini(uint16_t dev_id)
 {
 	struct cnxk_bphy_irq_msg msg = {
 		.type = CNXK_BPHY_IRQ_MSG_TYPE_FINI,
 	};
-	struct rte_rawdev_buf *bufs[1];
-	struct rte_rawdev_buf buf;
 
-	buf.buf_addr = &msg;
-	bufs[0] = &buf;
-
-	rte_rawdev_enqueue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
+	return __rte_pmd_bphy_enq_deq(dev_id, CNXK_BPHY_DEF_QUEUE, &msg,
+				      NULL, 0);
 }
 
 static __rte_always_inline int
@@ -182,16 +210,12 @@  rte_pmd_bphy_intr_register(uint16_t dev_id, int irq_num,
 		.type = CNXK_BPHY_IRQ_MSG_TYPE_REGISTER,
 		.data = &info
 	};
-	struct rte_rawdev_buf *bufs[1];
-	struct rte_rawdev_buf buf;
 
-	buf.buf_addr = &msg;
-	bufs[0] = &buf;
-
-	return rte_rawdev_enqueue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
+	return __rte_pmd_bphy_enq_deq(dev_id, CNXK_BPHY_DEF_QUEUE, &msg,
+				      NULL, 0);
 }
 
-static __rte_always_inline void
+static __rte_always_inline int
 rte_pmd_bphy_intr_unregister(uint16_t dev_id, int irq_num)
 {
 	struct cnxk_bphy_irq_info info = {
@@ -201,85 +225,162 @@  rte_pmd_bphy_intr_unregister(uint16_t dev_id, int irq_num)
 		.type = CNXK_BPHY_IRQ_MSG_TYPE_UNREGISTER,
 		.data = &info
 	};
-	struct rte_rawdev_buf *bufs[1];
-	struct rte_rawdev_buf buf;
-
-	buf.buf_addr = &msg;
-	bufs[0] = &buf;
 
-	rte_rawdev_enqueue_buffers(dev_id, bufs, 1, 0);
+	return __rte_pmd_bphy_enq_deq(dev_id, CNXK_BPHY_DEF_QUEUE, &msg,
+				      NULL, 0);
 }
 
-static __rte_always_inline struct cnxk_bphy_mem *
-rte_pmd_bphy_intr_mem_get(uint16_t dev_id)
+static __rte_always_inline int
+rte_pmd_bphy_intr_mem_get(uint16_t dev_id, struct cnxk_bphy_mem *mem)
 {
 	struct cnxk_bphy_irq_msg msg = {
 		.type = CNXK_BPHY_IRQ_MSG_TYPE_MEM_GET,
 	};
-	struct rte_rawdev_buf *bufs[1];
-	struct rte_rawdev_buf buf;
-	int ret;
 
-	buf.buf_addr = &msg;
-	bufs[0] = &buf;
-
-	ret = rte_rawdev_enqueue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
-	if (ret)
-		return NULL;
+	return __rte_pmd_bphy_enq_deq(dev_id, CNXK_BPHY_DEF_QUEUE, &msg,
+				      mem, sizeof(*mem));
+}
 
-	ret = rte_rawdev_dequeue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
-	if (ret)
-		return NULL;
+static __rte_always_inline int
+rte_pmd_bphy_npa_pf_func_get(uint16_t dev_id, uint16_t *pf_func)
+{
+	struct cnxk_bphy_irq_msg msg = {
+		.type = CNXK_BPHY_MSG_TYPE_NPA_PF_FUNC,
+	};
 
-	return buf.buf_addr;
+	return __rte_pmd_bphy_enq_deq(dev_id, CNXK_BPHY_DEF_QUEUE, &msg,
+				      pf_func, sizeof(*pf_func));
 }
 
-static __rte_always_inline uint16_t
-rte_pmd_bphy_npa_pf_func_get(uint16_t dev_id)
+static __rte_always_inline int
+rte_pmd_bphy_sso_pf_func_get(uint16_t dev_id, uint16_t *pf_func)
 {
 	struct cnxk_bphy_irq_msg msg = {
-		.type = CNXK_BPHY_MSG_TYPE_NPA_PF_FUNC,
+		.type = CNXK_BPHY_MSG_TYPE_SSO_PF_FUNC,
 	};
-	struct rte_rawdev_buf *bufs[1];
-	struct rte_rawdev_buf buf;
-	int ret;
 
-	buf.buf_addr = &msg;
-	bufs[0] = &buf;
+	return __rte_pmd_bphy_enq_deq(dev_id, CNXK_BPHY_DEF_QUEUE, &msg,
+				      pf_func, sizeof(*pf_func));
+}
 
-	ret = rte_rawdev_enqueue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
-	if (ret)
-		return 0;
+static __rte_always_inline int
+rte_pmd_bphy_cgx_get_link_info(uint16_t dev_id, uint16_t lmac,
+			       struct cnxk_bphy_cgx_msg_link_info *info)
+{
+	struct cnxk_bphy_cgx_msg msg = {
+		.type = CNXK_BPHY_CGX_MSG_TYPE_GET_LINKINFO,
+	};
 
-	ret = rte_rawdev_dequeue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
-	if (ret)
-		return 0;
+	return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, info, sizeof(*info));
+}
 
-	return (uint16_t)(size_t)buf.buf_addr;
+static __rte_always_inline int
+rte_pmd_bphy_cgx_intlbk_disable(uint16_t dev_id, uint16_t lmac)
+{
+	struct cnxk_bphy_cgx_msg msg = {
+		.type = CNXK_BPHY_CGX_MSG_TYPE_INTLBK_DISABLE,
+	};
+
+	return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
 }
 
-static __rte_always_inline uint16_t
-rte_pmd_bphy_sso_pf_func_get(uint16_t dev_id)
+static __rte_always_inline int
+rte_pmd_bphy_cgx_intlbk_enable(uint16_t dev_id, uint16_t lmac)
 {
-	struct cnxk_bphy_irq_msg msg = {
-		.type = CNXK_BPHY_MSG_TYPE_SSO_PF_FUNC,
+	struct cnxk_bphy_cgx_msg msg = {
+		.type = CNXK_BPHY_CGX_MSG_TYPE_INTLBK_ENABLE,
 	};
-	struct rte_rawdev_buf *bufs[1];
-	struct rte_rawdev_buf buf;
-	int ret;
 
-	buf.buf_addr = &msg;
-	bufs[0] = &buf;
+	return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
+}
 
-	ret = rte_rawdev_enqueue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
-	if (ret)
-		return 0;
+static __rte_always_inline int
+rte_pmd_bphy_cgx_ptp_rx_disable(uint16_t dev_id, uint16_t lmac)
+{
+	struct cnxk_bphy_cgx_msg msg = {
+		.type = CNXK_BPHY_CGX_MSG_TYPE_PTP_RX_DISABLE,
+	};
 
-	ret = rte_rawdev_dequeue_buffers(dev_id, bufs, 1, CNXK_BPHY_DEF_QUEUE);
-	if (ret)
-		return 0;
+	return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
+}
+
+static __rte_always_inline int
+rte_pmd_bphy_cgx_ptp_rx_enable(uint16_t dev_id, uint16_t lmac)
+{
+	struct cnxk_bphy_cgx_msg msg = {
+		.type = CNXK_BPHY_CGX_MSG_TYPE_PTP_RX_ENABLE,
+	};
+
+	return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
+}
+
+static __rte_always_inline int
+rte_pmd_bphy_cgx_set_link_mode(uint16_t dev_id, uint16_t lmac,
+			       struct cnxk_bphy_cgx_msg_link_mode *mode)
+{
+	struct cnxk_bphy_cgx_msg msg = {
+		.type = CNXK_BPHY_CGX_MSG_TYPE_SET_LINK_MODE,
+		.data = mode,
+	};
+
+	return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
+}
+
+static __rte_always_inline int
+rte_pmd_bphy_cgx_set_link_state(uint16_t dev_id, uint16_t lmac, bool up)
+{
+	struct cnxk_bphy_cgx_msg_set_link_state state = {
+		.state = up,
+	};
+	struct cnxk_bphy_cgx_msg msg = {
+		.type = CNXK_BPHY_CGX_MSG_TYPE_SET_LINK_STATE,
+		.data = &state,
+	};
+
+	return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
+}
+
+static __rte_always_inline int
+rte_pmd_bphy_cgx_start_rxtx(uint16_t dev_id, uint16_t lmac)
+{
+	struct cnxk_bphy_cgx_msg msg = {
+		.type = CNXK_BPHY_CGX_MSG_TYPE_START_RXTX,
+	};
+
+	return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
+}
+
+static __rte_always_inline int
+rte_pmd_bphy_cgx_stop_rxtx(uint16_t dev_id, uint16_t lmac)
+{
+	struct cnxk_bphy_cgx_msg msg = {
+		.type = CNXK_BPHY_CGX_MSG_TYPE_STOP_RXTX,
+	};
+
+	return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
+}
+
+static __rte_always_inline int
+rte_pmd_bphy_cgx_get_supported_fec(uint16_t dev_id, uint16_t lmac,
+				   enum cnxk_bphy_cgx_eth_link_fec *fec)
+{
+	struct cnxk_bphy_cgx_msg msg = {
+		.type = CNXK_BPHY_CGX_MSG_TYPE_GET_SUPPORTED_FEC,
+	};
+
+	return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, fec, sizeof(*fec));
+}
+
+static __rte_always_inline int
+rte_pmd_bphy_cgx_set_fec(uint16_t dev_id, uint16_t lmac,
+			 enum cnxk_bphy_cgx_eth_link_fec fec)
+{
+	struct cnxk_bphy_cgx_msg msg = {
+		.type = CNXK_BPHY_CGX_MSG_TYPE_SET_FEC,
+		.data = &fec,
+	};
 
-	return (uint16_t)(size_t)buf.buf_addr;
+	return __rte_pmd_bphy_enq_deq(dev_id, lmac, &msg, NULL, 0);
 }
 
 #endif /* _CNXK_BPHY_H_ */