[8/9] raw/cnxk_rvu_lf: process mailbox message

Message ID 20240907193311.1342310-9-gakhil@marvell.com (mailing list archive)
State Changes Requested
Delegated to: Jerin Jacob
Headers
Series drivers/raw: introduce cnxk rvu lf device driver |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Akhil Goyal Sept. 7, 2024, 7:33 p.m. UTC
Added PMD API rte_pmd_rvu_lf_msg_process() to process
mailbox messages between rvu_lf devices.

Signed-off-by: Akhil Goyal <gakhil@marvell.com>
---
 doc/guides/rawdevs/cnxk_rvu_lf.rst       |   8 ++
 drivers/common/cnxk/roc_dev.c            | 117 +++++++++++++++++++++--
 drivers/common/cnxk/roc_mbox.h           |   1 +
 drivers/common/cnxk/roc_rvu_lf.c         |  58 +++++++++++
 drivers/common/cnxk/roc_rvu_lf.h         |   5 +
 drivers/common/cnxk/roc_rvu_lf_priv.h    |   5 +
 drivers/common/cnxk/version.map          |   2 +
 drivers/raw/cnxk_rvu_lf/cnxk_rvu_lf.c    |  15 +++
 drivers/raw/cnxk_rvu_lf/rte_pmd_rvu_lf.h |  29 ++++++
 9 files changed, 234 insertions(+), 6 deletions(-)
  

Patch

diff --git a/doc/guides/rawdevs/cnxk_rvu_lf.rst b/doc/guides/rawdevs/cnxk_rvu_lf.rst
index 7f193e232c..7b321abd38 100644
--- a/doc/guides/rawdevs/cnxk_rvu_lf.rst
+++ b/doc/guides/rawdevs/cnxk_rvu_lf.rst
@@ -72,3 +72,11 @@  can register callbacks using ``rte_pmd_rvu_lf_msg_handler_register()``
 and fill required responses as per the request and message id received.
 Application can also unregister already registered message callbacks using
 ``rte_pmd_rvu_lf_msg_handler_unregister()``.
+
+A PMD API ``rte_pmd_rvu_lf_msg_process()`` is created to send a request and
+receive corresponding response from the other side(PF/VF).
+It accepts an opaque pointer of a request and its size which can be defined by application
+and provides an opaque pointer for a response and its length.
+PF and VF application can define its own request and response based on the message id
+of the mailbox.
+For sample usage of the APIs, please refer to ``rvu_lf_rawdev_selftest()``.
diff --git a/drivers/common/cnxk/roc_dev.c b/drivers/common/cnxk/roc_dev.c
index daf7684d8e..806d34143b 100644
--- a/drivers/common/cnxk/roc_dev.c
+++ b/drivers/common/cnxk/roc_dev.c
@@ -215,6 +215,51 @@  af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg)
 	return req_hdr->num_msgs;
 }
 
+static int
+process_rvu_lf_msgs(struct dev *dev, uint16_t vf, struct mbox_msghdr *msg, size_t size)
+{
+	uint16_t max_bits = sizeof(dev->active_vfs[0]) * 8;
+	uint8_t req[MBOX_MSG_REQ_SIZE_MAX];
+	struct msg_rsp *rsp;
+	uint16_t rsp_len;
+	void *resp;
+	int rc = 0;
+
+	/* Handle BPHY mailbox message in PF */
+	dev->active_vfs[vf / max_bits] |= BIT_ULL(vf % max_bits);
+
+	if ((size - sizeof(struct mbox_msghdr)) > MBOX_MSG_REQ_SIZE_MAX) {
+		plt_err("MBOX request size greater than %d", MBOX_MSG_REQ_SIZE_MAX);
+		return -1;
+	}
+	mbox_memcpy(req, (uint8_t *)msg + sizeof(struct mbox_msghdr),
+		    size - sizeof(struct mbox_msghdr));
+
+	rc = dev->ops->msg_process_cb(dev_get_vf(msg->pcifunc), msg->id, req,
+				      size - sizeof(struct mbox_msghdr), &resp, &rsp_len);
+	if (rc < 0) {
+		plt_err("Failed to process VF%d  message", vf);
+		return -1;
+	}
+
+	rsp = (struct msg_rsp *)mbox_alloc_msg(&dev->mbox_vfpf, vf,
+					       rsp_len + sizeof(struct mbox_msghdr));
+	if (!rsp) {
+		plt_err("Failed to alloc VF%d response message", vf);
+		return -1;
+	}
+
+	mbox_rsp_init(msg->id, rsp);
+
+	mbox_memcpy((uint8_t *)rsp + sizeof(struct mbox_msghdr), resp, rsp_len);
+	free(resp);
+	/* PF/VF function ID */
+	rsp->hdr.pcifunc = msg->pcifunc;
+	rsp->hdr.rc = 0;
+
+	return 0;
+}
+
 /* PF receives mbox DOWN messages from VF and forwards to AF */
 static int
 vf_pf_process_msgs(struct dev *dev, uint16_t vf)
@@ -261,6 +306,9 @@  vf_pf_process_msgs(struct dev *dev, uint16_t vf)
 			/* PF/VF function ID */
 			rsp->hdr.pcifunc = msg->pcifunc;
 			rsp->hdr.rc = 0;
+		} else if (roc_rvu_lf_msg_id_range_check(dev->roc_rvu_lf, msg->id)) {
+			if (process_rvu_lf_msgs(dev, vf, msg, size) < 0)
+				continue;
 		} else {
 			struct mbox_msghdr *af_req;
 			/* Reserve AF/PF mbox message */
@@ -339,8 +387,13 @@  vf_pf_process_up_msgs(struct dev *dev, uint16_t vf)
 				     dev_get_vf(msg->pcifunc));
 			break;
 		default:
-			plt_err("Not handled UP msg 0x%x (%s) func:0x%x",
-				msg->id, mbox_id2name(msg->id), msg->pcifunc);
+			if (roc_rvu_lf_msg_id_range_check(dev->roc_rvu_lf, msg->id))
+				plt_base_dbg("PF: Msg 0x%x fn:0x%x (pf:%d,vf:%d)",
+					     msg->id, msg->pcifunc, dev_get_pf(msg->pcifunc),
+					     dev_get_vf(msg->pcifunc));
+			else
+				plt_err("Not handled UP msg 0x%x (%s) func:0x%x",
+					msg->id, mbox_id2name(msg->id), msg->pcifunc);
 		}
 		offset = mbox->rx_start + msg->next_msgoff;
 	}
@@ -789,6 +842,49 @@  mbox_process_msgs_up(struct dev *dev, struct mbox_msghdr *req)
 	return -ENODEV;
 }
 
+static int
+process_rvu_lf_msgs_up(struct dev *dev, struct mbox_msghdr *msg, size_t size)
+{
+	uint8_t req[MBOX_MSG_REQ_SIZE_MAX];
+	struct msg_rsp *rsp;
+	uint16_t rsp_len;
+	void *resp;
+	int rc = 0;
+
+	/* Check if valid, if not reply with an invalid msg */
+	if (msg->sig != MBOX_REQ_SIG)
+		return -EIO;
+
+	if ((size - sizeof(struct mbox_msghdr)) > MBOX_MSG_REQ_SIZE_MAX) {
+		plt_err("MBOX request size greater than %d", MBOX_MSG_REQ_SIZE_MAX);
+		return -ENOMEM;
+	}
+	mbox_memcpy(req, (uint8_t *)msg + sizeof(struct mbox_msghdr),
+		    size - sizeof(struct mbox_msghdr));
+	rc = dev->ops->msg_process_cb(dev_get_vf(msg->pcifunc), msg->id, req,
+				      size - sizeof(struct mbox_msghdr), &resp, &rsp_len);
+	if (rc < 0) {
+		plt_err("Failed to process VF%d  message", dev->vf);
+		return rc;
+	}
+
+	rsp = (struct msg_rsp *)mbox_alloc_msg(&dev->mbox_up, 0,
+					       rsp_len + sizeof(struct mbox_msghdr));
+	if (!rsp) {
+		plt_err("Failed to alloc VF%d response message", dev->vf);
+		return -ENOMEM;
+	}
+
+	mbox_rsp_init(msg->id, rsp);
+
+	mbox_memcpy((uint8_t *)rsp + sizeof(struct mbox_msghdr), resp, rsp_len);
+	/* PF/VF function ID */
+	rsp->hdr.pcifunc = msg->pcifunc;
+	rsp->hdr.rc = 0;
+
+	return rc;
+}
+
 /* Received up messages from AF (PF context) / PF (in context) */
 static void
 process_msgs_up(struct dev *dev, struct mbox *mbox)
@@ -797,6 +893,7 @@  process_msgs_up(struct dev *dev, struct mbox *mbox)
 	struct mbox_hdr *req_hdr;
 	struct mbox_msghdr *msg;
 	int i, err, offset;
+	size_t size;
 
 	req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
 	if (req_hdr->num_msgs == 0)
@@ -809,10 +906,17 @@  process_msgs_up(struct dev *dev, struct mbox *mbox)
 		plt_base_dbg("Message 0x%x (%s) pf:%d/vf:%d", msg->id,
 			     mbox_id2name(msg->id), dev_get_pf(msg->pcifunc),
 			     dev_get_vf(msg->pcifunc));
-		err = mbox_process_msgs_up(dev, msg);
-		if (err)
-			plt_err("Error %d handling 0x%x (%s)", err, msg->id,
-				mbox_id2name(msg->id));
+		if (roc_rvu_lf_msg_id_range_check(dev->roc_rvu_lf, msg->id)) {
+			size = mbox->rx_start + msg->next_msgoff - offset;
+			err = process_rvu_lf_msgs_up(dev, msg, size);
+			if (err)
+				plt_err("Error %d handling 0x%x RVU_LF up msg", err, msg->id);
+		} else {
+			err = mbox_process_msgs_up(dev, msg);
+			if (err)
+				plt_err("Error %d handling 0x%x (%s)", err, msg->id,
+					mbox_id2name(msg->id));
+		}
 		offset = mbox->rx_start + msg->next_msgoff;
 	}
 	/* Send mbox responses */
@@ -1272,6 +1376,7 @@  dev_vf_hwcap_update(struct plt_pci_device *pci_dev, struct dev *dev)
 	case PCI_DEVID_CNXK_RVU_VF:
 	case PCI_DEVID_CNXK_RVU_SDP_VF:
 	case PCI_DEVID_CNXK_RVU_NIX_INL_VF:
+	case PCI_DEVID_CNXK_RVU_BPHY_VF:
 	case PCI_DEVID_CNXK_RVU_ESWITCH_VF:
 		dev->hwcap |= DEV_HWCAP_F_VF;
 		break;
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 3018a4f73e..ea806ea6e8 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -55,6 +55,7 @@  struct mbox_msghdr {
 #define MBOX_MSG_INVALID 0xFFFE
 #define MBOX_MSG_MAX	 0xFFFF
 #define MBOX_MSG_GENERIC_MAX_ID 0x1FF
+#define MBOX_MSG_REQ_SIZE_MAX	(16 * 1024)
 
 #define MBOX_MESSAGES                                                          \
 	/* Generic mbox IDs (range 0x000 - 0x1FF) */                           \
diff --git a/drivers/common/cnxk/roc_rvu_lf.c b/drivers/common/cnxk/roc_rvu_lf.c
index 1026ccc125..471dfa7a46 100644
--- a/drivers/common/cnxk/roc_rvu_lf.c
+++ b/drivers/common/cnxk/roc_rvu_lf.c
@@ -62,6 +62,15 @@  roc_rvu_lf_dev_fini(struct roc_rvu_lf *roc_rvu_lf)
 	return dev_fini(&rvu->dev, rvu->pci_dev);
 }
 
+static uint16_t
+roc_rvu_lf_pf_func_get(struct roc_rvu_lf *roc_rvu_lf)
+{
+	struct rvu_lf *rvu = roc_rvu_lf_to_rvu_priv(roc_rvu_lf);
+	struct dev *dev = &rvu->dev;
+
+	return dev->pf_func;
+}
+
 int
 roc_rvu_lf_msg_id_range_set(struct roc_rvu_lf *roc_rvu_lf, uint16_t from, uint16_t to)
 {
@@ -92,6 +101,55 @@  roc_rvu_lf_msg_id_range_check(struct roc_rvu_lf *roc_rvu_lf, uint16_t msg_id)
 	return 0;
 }
 
+int
+roc_rvu_lf_msg_process(struct roc_rvu_lf *roc_rvu_lf, uint16_t vf, uint16_t msg_id,
+		       void *req_data, uint16_t req_len, void *rsp_data, uint16_t rsp_len)
+{
+	struct rvu_lf *rvu = roc_rvu_lf_to_rvu_priv(roc_rvu_lf);
+	struct mbox *mbox;
+	struct rvu_lf_msg *req;
+	struct rvu_lf_msg *rsp;
+	int rc = -ENOSPC;
+	int devid = 0;
+
+	if (rvu->dev.vf == -1 && roc_rvu_lf_msg_id_range_check(roc_rvu_lf, msg_id)) {
+		/* This is PF context */
+		if (vf >= rvu->dev.maxvf)
+			return -EINVAL;
+		devid = vf;
+		mbox = mbox_get(&rvu->dev.mbox_vfpf_up);
+	} else {
+		/* This is VF context */
+		devid = 0; /* VF send all message to PF */
+		mbox = mbox_get(rvu->dev.mbox);
+	}
+	req = (struct rvu_lf_msg *)mbox_alloc_msg_rsp(mbox, devid,
+						      req_len + sizeof(struct rvu_lf_msg),
+						      rsp_len + sizeof(struct rvu_lf_msg));
+	if (!req)
+		goto fail;
+	mbox_memcpy(req->data, req_data, req_len);
+	req->hdr.sig = MBOX_REQ_SIG;
+	req->hdr.id = msg_id;
+	req->hdr.pcifunc = roc_rvu_lf_pf_func_get(roc_rvu_lf);
+
+	if (rvu->dev.vf == -1) {
+		mbox_msg_send_up(mbox, devid);
+		rc = mbox_get_rsp(mbox, devid, (void *)&rsp);
+		if (rc)
+			goto fail;
+	} else {
+		rc = mbox_process_msg(mbox, (void *)&rsp);
+		if (rc)
+			goto fail;
+	}
+	if (rsp_len && rsp_data != NULL)
+		mbox_memcpy(rsp_data, rsp->data, rsp_len);
+fail:
+	mbox_put(mbox);
+	return rc;
+}
+
 int
 roc_rvu_lf_irq_register(struct roc_rvu_lf *roc_rvu_lf, unsigned int irq,
 			roc_rvu_lf_intr_cb_fn cb, void *data)
diff --git a/drivers/common/cnxk/roc_rvu_lf.h b/drivers/common/cnxk/roc_rvu_lf.h
index 7243e170b9..6b4819666a 100644
--- a/drivers/common/cnxk/roc_rvu_lf.h
+++ b/drivers/common/cnxk/roc_rvu_lf.h
@@ -21,6 +21,11 @@  TAILQ_HEAD(roc_rvu_lf_head, roc_rvu_lf);
 int __roc_api roc_rvu_lf_dev_init(struct roc_rvu_lf *roc_rvu_lf);
 int __roc_api roc_rvu_lf_dev_fini(struct roc_rvu_lf *roc_rvu_lf);
 
+int __roc_api roc_rvu_lf_msg_process(struct roc_rvu_lf *roc_rvu_lf,
+				     uint16_t vf, uint16_t msg_id,
+				     void *req_data, uint16_t req_len,
+				     void *rsp_data, uint16_t rsp_len);
+
 int __roc_api roc_rvu_lf_msg_id_range_set(struct roc_rvu_lf *roc_rvu_lf,
 					  uint16_t from, uint16_t to);
 bool  __roc_api roc_rvu_lf_msg_id_range_check(struct roc_rvu_lf *roc_rvu_lf, uint16_t msg_id);
diff --git a/drivers/common/cnxk/roc_rvu_lf_priv.h b/drivers/common/cnxk/roc_rvu_lf_priv.h
index 8feff82961..57bb713b21 100644
--- a/drivers/common/cnxk/roc_rvu_lf_priv.h
+++ b/drivers/common/cnxk/roc_rvu_lf_priv.h
@@ -17,6 +17,11 @@  struct rvu_lf {
 	uint16_t msg_id_to;
 };
 
+struct rvu_lf_msg {
+	struct mbox_msghdr hdr;
+	uint8_t data[];
+};
+
 static inline struct rvu_lf *
 roc_rvu_lf_to_rvu_priv(struct roc_rvu_lf *roc_rvu_lf)
 {
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index adfe7ee789..a3e2170739 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -554,5 +554,7 @@  INTERNAL {
 	roc_rvu_lf_msg_handler_unregister;
 	roc_rvu_lf_msg_id_range_check;
 	roc_rvu_lf_msg_id_range_set;
+	roc_rvu_lf_msg_process;
+
 	local: *;
 };
diff --git a/drivers/raw/cnxk_rvu_lf/cnxk_rvu_lf.c b/drivers/raw/cnxk_rvu_lf/cnxk_rvu_lf.c
index a21bca9ea6..66c8e241db 100644
--- a/drivers/raw/cnxk_rvu_lf/cnxk_rvu_lf.c
+++ b/drivers/raw/cnxk_rvu_lf/cnxk_rvu_lf.c
@@ -29,6 +29,21 @@  rte_pmd_rvu_lf_msg_id_range_set(uint8_t dev_id, uint16_t from, uint16_t to)
 	return roc_rvu_lf_msg_id_range_set(roc_rvu_lf, from, to);
 }
 
+int
+rte_pmd_rvu_lf_msg_process(uint8_t dev_id, uint16_t vf, uint16_t msg_id,
+			void *req, uint16_t req_len, void *rsp, uint16_t rsp_len)
+{
+	struct rte_rawdev *rawdev = rte_rawdev_pmd_get_dev(dev_id);
+	struct roc_rvu_lf *roc_rvu_lf;
+
+	if (rawdev == NULL)
+		return -EINVAL;
+
+	roc_rvu_lf = (struct roc_rvu_lf *)rawdev->dev_private;
+
+	return roc_rvu_lf_msg_process(roc_rvu_lf, vf, msg_id, req, req_len, rsp, rsp_len);
+}
+
 int
 rte_pmd_rvu_lf_msg_handler_register(uint8_t dev_id, rte_pmd_rvu_lf_msg_handler_cb_fn cb)
 {
diff --git a/drivers/raw/cnxk_rvu_lf/rte_pmd_rvu_lf.h b/drivers/raw/cnxk_rvu_lf/rte_pmd_rvu_lf.h
index 9ee9c57d7d..cb8bced718 100644
--- a/drivers/raw/cnxk_rvu_lf/rte_pmd_rvu_lf.h
+++ b/drivers/raw/cnxk_rvu_lf/rte_pmd_rvu_lf.h
@@ -47,6 +47,35 @@  extern int cnxk_logtype_rvu_lf;
 __rte_experimental
 int rte_pmd_rvu_lf_msg_id_range_set(uint8_t dev_id, uint16_t from, uint16_t to);
 
+/**
+ * Process a RVU mailbox message.
+ *
+ * Message request and response to be sent/received,
+ * need to be allocated/deallocated by application
+ * before/after processing the message.
+ *
+ * @param dev_id
+ *   device id of RVU LF device
+ * @param vf
+ *   VF number(0 to N) in case of PF->VF message. 0 is valid as VF0.
+ *   (For VF->PF message, this field is ignored)
+ * @param msg_id
+ *   message id
+ * @param req
+ *   pointer to message request data to be sent
+ * @param req_len
+ *   length of request data
+ * @param rsp
+ *   pointer to message response expected to be received, NULL if no response
+ * @param rsp_len
+ *   length of message response expected, 0 if no response
+ *
+ * @return 0 on success, negative value otherwise
+ */
+__rte_experimental
+int rte_pmd_rvu_lf_msg_process(uint8_t dev_id, uint16_t vf, uint16_t msg_id,
+			       void *req, uint16_t req_len, void *rsp, uint16_t rsp_len);
+
 /**
  * Signature of callback function called when a message process handler is called
  * on RVU LF device.