@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2023 Intel Corporation
+ * Copyright(c) 2023 Intel Corporation
*/
#include "cpfl_controlq.h"
@@ -332,6 +332,395 @@ cpfl_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
return status;
}
+/**
+ * cpfl_ctlq_send - send command to Control Queue (CTQ)
+ * @hw: pointer to hw struct
+ * @cq: handle to control queue struct to send on
+ * @num_q_msg: number of messages to send on control queue
+ * @q_msg: pointer to array of queue messages to be sent
+ *
+ * The caller is expected to allocate DMAable buffers and pass them to the
+ * send routine via the q_msg struct / control queue specific data struct.
+ * The control queue will hold a reference to each send message until
+ * the completion for that message has been cleaned.
+ */
+int
+cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+ uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+ struct idpf_ctlq_desc *desc;
+ int num_desc_avail = 0;
+ int status = 0;
+ int i = 0;
+
+ if (!cq || !cq->ring_size)
+ return -ENOBUFS;
+
+ idpf_acquire_lock(&cq->cq_lock);
+
+ /* Ensure there are enough descriptors to send all messages */
+ num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
+ if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
+ status = -ENOSPC;
+ goto sq_send_command_out;
+ }
+
+ for (i = 0; i < num_q_msg; i++) {
+ struct idpf_ctlq_msg *msg = &q_msg[i];
+ uint64_t msg_cookie;
+
+ desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
+ desc->opcode = CPU_TO_LE16(msg->opcode);
+ desc->pfid_vfid = CPU_TO_LE16(msg->func_id);
+ msg_cookie = *(u64 *)&msg->cookie;
+ desc->cookie_high =
+ CPU_TO_LE32(IDPF_HI_DWORD(msg_cookie));
+ desc->cookie_low =
+ CPU_TO_LE32(IDPF_LO_DWORD(msg_cookie));
+ desc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) <<
+ IDPF_CTLQ_FLAG_HOST_ID_S);
+ if (msg->data_len) {
+ struct idpf_dma_mem *buff = msg->ctx.indirect.payload;
+
+ desc->datalen |= CPU_TO_LE16(msg->data_len);
+ desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF);
+ desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD);
+ /* Update the address values in the desc with the pa
+ * value for respective buffer
+ */
+ desc->params.indirect.addr_high =
+ CPU_TO_LE32(IDPF_HI_DWORD(buff->pa));
+ desc->params.indirect.addr_low =
+ CPU_TO_LE32(IDPF_LO_DWORD(buff->pa));
+ idpf_memcpy(&desc->params, msg->ctx.indirect.context,
+ IDPF_INDIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+ } else {
+ idpf_memcpy(&desc->params, msg->ctx.direct,
+ IDPF_DIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+ }
+
+ /* Store buffer info */
+ cq->bi.tx_msg[cq->next_to_use] = msg;
+ (cq->next_to_use)++;
+ if (cq->next_to_use == cq->ring_size)
+ cq->next_to_use = 0;
+ }
+
+ /* Force memory write to complete before letting hardware
+ * know that there are new descriptors to fetch.
+ */
+ idpf_wmb();
+ wr32(hw, cq->reg.tail, cq->next_to_use);
+
+sq_send_command_out:
+ idpf_release_lock(&cq->cq_lock);
+
+ return status;
+}
+
+/**
+ * __cpfl_ctlq_clean_sq - helper function to reclaim descriptors on HW write
+ * back for the requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ * @force: (input) clean descriptors which were not done yet. Use with caution
+ * in kernel mode only
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors. The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+static int
+__cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+ struct idpf_ctlq_msg *msg_status[], bool force)
+{
+ struct idpf_ctlq_desc *desc;
+ uint16_t i = 0, num_to_clean;
+ uint16_t ntc, desc_err;
+ int ret = 0;
+
+ if (!cq || !cq->ring_size)
+ return -ENOBUFS;
+ if (*clean_count == 0)
+ return 0;
+ if (*clean_count > cq->ring_size)
+ return -EINVAL;
+
+ idpf_acquire_lock(&cq->cq_lock);
+ ntc = cq->next_to_clean;
+ num_to_clean = *clean_count;
+
+ for (i = 0; i < num_to_clean; i++) {
+ /* Fetch next descriptor and check if marked as done */
+ desc = IDPF_CTLQ_DESC(cq, ntc);
+ if (!force && !(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD))
+ break;
+
+ desc_err = LE16_TO_CPU(desc->ret_val);
+ if (desc_err) {
+ /* strip off FW internal code */
+ desc_err &= 0xff;
+ }
+
+ msg_status[i] = cq->bi.tx_msg[ntc];
+ if (!msg_status[i])
+ break;
+ msg_status[i]->status = desc_err;
+ cq->bi.tx_msg[ntc] = NULL;
+ /* Zero out any stale data */
+ idpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM);
+ ntc++;
+ if (ntc == cq->ring_size)
+ ntc = 0;
+ }
+
+ cq->next_to_clean = ntc;
+ idpf_release_lock(&cq->cq_lock);
+
+ /* Return number of descriptors actually cleaned */
+ *clean_count = i;
+
+ return ret;
+}
+
+/**
+ * cpfl_ctlq_clean_sq - reclaim send descriptors on HW write back for the
+ * requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors. The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+int
+cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+ struct idpf_ctlq_msg *msg_status[])
+{
+ return __cpfl_ctlq_clean_sq(cq, clean_count, msg_status, false);
+}
+
+/**
+ * cpfl_ctlq_post_rx_buffs - post buffers to descriptor ring
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue handle
+ * @buff_count: (input|output) input is number of buffers caller is trying to
+ * return; output is number of buffers that were not posted
+ * @buffs: array of pointers to dma mem structs to be given to hardware
+ *
+ * Caller uses this function to return DMA buffers to the descriptor ring after
+ * consuming them; buff_count will be the number of buffers.
+ *
+ * Note: this function needs to be called after a receive call even
+ * if there are no DMA buffers to be returned, i.e. buff_count = 0,
+ * buffs = NULL to support direct commands
+ */
+int
+cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+ uint16_t *buff_count, struct idpf_dma_mem **buffs)
+{
+ struct idpf_ctlq_desc *desc;
+ uint16_t ntp = cq->next_to_post;
+ bool buffs_avail = false;
+ uint16_t tbp = ntp + 1;
+ int status = 0;
+ int i = 0;
+
+ if (*buff_count > cq->ring_size)
+ return -EINVAL;
+ if (*buff_count > 0)
+ buffs_avail = true;
+ idpf_acquire_lock(&cq->cq_lock);
+ if (tbp >= cq->ring_size)
+ tbp = 0;
+ if (tbp == cq->next_to_clean)
+ goto post_buffs_out;
+
+ /* Post buffers for as many as provided or up until the last one used */
+ while (ntp != cq->next_to_clean) {
+ desc = IDPF_CTLQ_DESC(cq, ntp);
+ if (cq->bi.rx_buff[ntp])
+ goto fill_desc;
+ if (!buffs_avail) {
+ /* If the caller hasn't given us any buffers or
+ * there are none left, search the ring itself
+ * for an available buffer to move to this
+ * entry starting at the next entry in the ring
+ */
+ tbp = ntp + 1;
+ /* Wrap ring if necessary */
+ if (tbp >= cq->ring_size)
+ tbp = 0;
+
+ while (tbp != cq->next_to_clean) {
+ if (cq->bi.rx_buff[tbp]) {
+ cq->bi.rx_buff[ntp] =
+ cq->bi.rx_buff[tbp];
+ cq->bi.rx_buff[tbp] = NULL;
+ /* Found a buffer, no need to
+ * search anymore
+ */
+ break;
+ }
+ /* Wrap ring if necessary */
+ tbp++;
+ if (tbp >= cq->ring_size)
+ tbp = 0;
+ }
+
+ if (tbp == cq->next_to_clean)
+ goto post_buffs_out;
+ } else {
+ /* Give back pointer to DMA buffer */
+ cq->bi.rx_buff[ntp] = buffs[i];
+ i++;
+
+ if (i >= *buff_count)
+ buffs_avail = false;
+ }
+
+fill_desc:
+ desc->flags =
+ CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+
+ /* Post buffers to descriptor */
+ desc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size);
+ desc->params.indirect.addr_high =
+ CPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]->pa));
+ desc->params.indirect.addr_low =
+ CPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]->pa));
+
+ ntp++;
+ if (ntp == cq->ring_size)
+ ntp = 0;
+ }
+
+post_buffs_out:
+ /* Only update tail if buffers were actually posted */
+ if (cq->next_to_post != ntp) {
+ if (ntp)
+ /* Update next_to_post to ntp - 1 since current ntp
+ * will not have a buffer
+ */
+ cq->next_to_post = ntp - 1;
+ else
+ /* Wrap to end of end ring since current ntp is 0 */
+ cq->next_to_post = cq->ring_size - 1;
+
+ wr32(hw, cq->reg.tail, cq->next_to_post);
+ }
+
+ idpf_release_lock(&cq->cq_lock);
+
+ /* return the number of buffers that were not posted */
+ *buff_count = *buff_count - i;
+
+ return status;
+}
+
+/**
+ * cpfl_ctlq_recv - receive control queue message call back
+ * @cq: pointer to control queue handle to receive on
+ * @num_q_msg: (input|output) input number of messages that should be received;
+ * output number of messages actually received
+ * @q_msg: (output) array of received control queue messages on this q;
+ * needs to be pre-allocated by caller for as many messages as requested
+ *
+ * Called by interrupt handler or polling mechanism. Caller is expected
+ * to free buffers
+ */
+int
+cpfl_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+ struct idpf_ctlq_msg *q_msg)
+{
+ uint16_t num_to_clean, ntc, ret_val, flags;
+ struct idpf_ctlq_desc *desc;
+ int ret_code = 0;
+ uint16_t i = 0;
+
+ if (!cq || !cq->ring_size)
+ return -ENOBUFS;
+
+ if (*num_q_msg == 0)
+ return 0;
+ else if (*num_q_msg > cq->ring_size)
+ return -EINVAL;
+
+ /* take the lock before we start messing with the ring */
+ idpf_acquire_lock(&cq->cq_lock);
+ ntc = cq->next_to_clean;
+ num_to_clean = *num_q_msg;
+
+ for (i = 0; i < num_to_clean; i++) {
+ /* Fetch next descriptor and check if marked as done */
+ desc = IDPF_CTLQ_DESC(cq, ntc);
+ flags = LE16_TO_CPU(desc->flags);
+ if (!(flags & IDPF_CTLQ_FLAG_DD))
+ break;
+
+ ret_val = LE16_TO_CPU(desc->ret_val);
+ q_msg[i].vmvf_type = (flags &
+ (IDPF_CTLQ_FLAG_FTYPE_VM |
+ IDPF_CTLQ_FLAG_FTYPE_PF)) >>
+ IDPF_CTLQ_FLAG_FTYPE_S;
+
+ if (flags & IDPF_CTLQ_FLAG_ERR)
+ ret_code = -EBADMSG;
+
+ q_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high);
+ q_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low);
+ q_msg[i].opcode = LE16_TO_CPU(desc->opcode);
+ q_msg[i].data_len = LE16_TO_CPU(desc->datalen);
+ q_msg[i].status = ret_val;
+
+ if (desc->datalen) {
+ idpf_memcpy(q_msg[i].ctx.indirect.context,
+ &desc->params.indirect,
+ IDPF_INDIRECT_CTX_SIZE,
+ IDPF_DMA_TO_NONDMA);
+ /* Assign pointer to dma buffer to ctlq_msg array
+ * to be given to upper layer
+ */
+ q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
+ /* Zero out pointer to DMA buffer info;
+ * will be repopulated by post buffers API
+ */
+ cq->bi.rx_buff[ntc] = NULL;
+ } else {
+ idpf_memcpy(q_msg[i].ctx.direct,
+ desc->params.raw,
+ IDPF_DIRECT_CTX_SIZE,
+ IDPF_DMA_TO_NONDMA);
+ }
+
+ /* Zero out stale data in descriptor */
+ idpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc),
+ IDPF_DMA_MEM);
+
+ ntc++;
+ if (ntc == cq->ring_size)
+ ntc = 0;
+ };
+
+ cq->next_to_clean = ntc;
+ idpf_release_lock(&cq->cq_lock);
+ *num_q_msg = i;
+ if (*num_q_msg == 0)
+ ret_code = -ENOMSG;
+
+ return ret_code;
+}
+
int
cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
struct idpf_ctlq_info **cq)
@@ -378,3 +767,31 @@ cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
{
cpfl_ctlq_remove(hw, cq);
}
+
+int
+cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+ uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+ return cpfl_ctlq_send(hw, cq, num_q_msg, q_msg);
+}
+
+int
+cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+ struct idpf_ctlq_msg q_msg[])
+{
+ return cpfl_ctlq_recv(cq, num_q_msg, q_msg);
+}
+
+int
+cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+ uint16_t *buff_count, struct idpf_dma_mem **buffs)
+{
+ return cpfl_ctlq_post_rx_buffs(hw, cq, buff_count, buffs);
+}
+
+int
+cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+ struct idpf_ctlq_msg *msg_status[])
+{
+ return cpfl_ctlq_clean_sq(cq, clean_count, msg_status);
+}
@@ -14,6 +14,13 @@
#define CPFL_DFLT_MBX_RING_LEN 512
#define CPFL_CFGQ_RING_LEN 512
+/* CRQ/CSQ specific error codes */
+#define CPFL_ERR_CTLQ_ERROR -74 /* -EBADMSG */
+#define CPFL_ERR_CTLQ_TIMEOUT -110 /* -ETIMEDOUT */
+#define CPFL_ERR_CTLQ_FULL -28 /* -ENOSPC */
+#define CPFL_ERR_CTLQ_NO_WORK -42 /* -ENOMSG */
+#define CPFL_ERR_CTLQ_EMPTY -105 /* -ENOBUFS */
+
/* Generic queue info structures */
/* MB, CONFIG and EVENT q do not have extended info */
struct cpfl_ctlq_create_info {
@@ -44,8 +51,25 @@ int cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw,
int cpfl_ctlq_add(struct idpf_hw *hw,
struct cpfl_ctlq_create_info *qinfo,
struct idpf_ctlq_info **cq);
+int cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+ uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[]);
+int cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+ struct idpf_ctlq_msg *msg_status[]);
+int cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+ uint16_t *buff_count, struct idpf_dma_mem **buffs);
+int cpfl_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+ struct idpf_ctlq_msg *q_msg);
int cpfl_vport_ctlq_add(struct idpf_hw *hw,
struct cpfl_ctlq_create_info *qinfo,
struct idpf_ctlq_info **cq);
void cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq);
+int cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+ uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[]);
+int cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+ struct idpf_ctlq_msg q_msg[]);
+
+int cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+ uint16_t *buff_count, struct idpf_dma_mem **buffs);
+int cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+ struct idpf_ctlq_msg *msg_status[]);
#endif