get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/48974/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 48974,
    "url": "https://patches.dpdk.org/api/patches/48974/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1545032259-77179-7-git-send-email-wenzhuo.lu@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1545032259-77179-7-git-send-email-wenzhuo.lu@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1545032259-77179-7-git-send-email-wenzhuo.lu@intel.com",
    "date": "2018-12-17T07:37:14",
    "name": "[v5,06/31] net/ice/base: add control queue information",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "be3560980a972337400233897d007da4339bfd06",
    "submitter": {
        "id": 258,
        "url": "https://patches.dpdk.org/api/people/258/?format=api",
        "name": "Wenzhuo Lu",
        "email": "wenzhuo.lu@intel.com"
    },
    "delegate": {
        "id": 1540,
        "url": "https://patches.dpdk.org/api/users/1540/?format=api",
        "username": "qzhan15",
        "first_name": "Qi",
        "last_name": "Zhang",
        "email": "qi.z.zhang@intel.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1545032259-77179-7-git-send-email-wenzhuo.lu@intel.com/mbox/",
    "series": [
        {
            "id": 2824,
            "url": "https://patches.dpdk.org/api/series/2824/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=2824",
            "date": "2018-12-17T07:37:08",
            "name": "A new net PMD - ICE",
            "version": 5,
            "mbox": "https://patches.dpdk.org/series/2824/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/48974/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/48974/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 8A04F1B5E0;\n\tMon, 17 Dec 2018 08:33:06 +0100 (CET)",
            "from mga09.intel.com (mga09.intel.com [134.134.136.24])\n\tby dpdk.org (Postfix) with ESMTP id 3D6531B597\n\tfor <dev@dpdk.org>; Mon, 17 Dec 2018 08:33:01 +0100 (CET)",
            "from orsmga002.jf.intel.com ([10.7.209.21])\n\tby orsmga102.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t16 Dec 2018 23:33:00 -0800",
            "from dpdk26.sh.intel.com ([10.67.110.164])\n\tby orsmga002.jf.intel.com with ESMTP; 16 Dec 2018 23:32:59 -0800"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.56,364,1539673200\"; d=\"scan'208\";a=\"118899100\"",
        "From": "Wenzhuo Lu <wenzhuo.lu@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>",
        "Date": "Mon, 17 Dec 2018 15:37:14 +0800",
        "Message-Id": "<1545032259-77179-7-git-send-email-wenzhuo.lu@intel.com>",
        "X-Mailer": "git-send-email 1.9.3",
        "In-Reply-To": "<1545032259-77179-1-git-send-email-wenzhuo.lu@intel.com>",
        "References": "<1542956179-80951-1-git-send-email-wenzhuo.lu@intel.com>\n\t<1545032259-77179-1-git-send-email-wenzhuo.lu@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v5 06/31] net/ice/base: add control queue\n\tinformation",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>\n\nAdd the structures for the control queues.\n\nSigned-off-by: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>\n---\n drivers/net/ice/base/ice_controlq.c | 1098 +++++++++++++++++++++++++++++++++++\n drivers/net/ice/base/ice_controlq.h |   97 ++++\n 2 files changed, 1195 insertions(+)\n create mode 100644 drivers/net/ice/base/ice_controlq.c\n create mode 100644 drivers/net/ice/base/ice_controlq.h",
    "diff": "diff --git a/drivers/net/ice/base/ice_controlq.c b/drivers/net/ice/base/ice_controlq.c\nnew file mode 100644\nindex 0000000..fb82c23\n--- /dev/null\n+++ b/drivers/net/ice/base/ice_controlq.c\n@@ -0,0 +1,1098 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2001-2018\n+ */\n+\n+#include \"ice_common.h\"\n+\n+\n+#define ICE_CQ_INIT_REGS(qinfo, prefix)\t\t\t\t\\\n+do {\t\t\t\t\t\t\t\t\\\n+\t(qinfo)->sq.head = prefix##_ATQH;\t\t\t\\\n+\t(qinfo)->sq.tail = prefix##_ATQT;\t\t\t\\\n+\t(qinfo)->sq.len = prefix##_ATQLEN;\t\t\t\\\n+\t(qinfo)->sq.bah = prefix##_ATQBAH;\t\t\t\\\n+\t(qinfo)->sq.bal = prefix##_ATQBAL;\t\t\t\\\n+\t(qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M;\t\\\n+\t(qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M;\t\\\n+\t(qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M;\t\t\\\n+\t(qinfo)->rq.head = prefix##_ARQH;\t\t\t\\\n+\t(qinfo)->rq.tail = prefix##_ARQT;\t\t\t\\\n+\t(qinfo)->rq.len = prefix##_ARQLEN;\t\t\t\\\n+\t(qinfo)->rq.bah = prefix##_ARQBAH;\t\t\t\\\n+\t(qinfo)->rq.bal = prefix##_ARQBAL;\t\t\t\\\n+\t(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M;\t\\\n+\t(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M;\t\\\n+\t(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M;\t\t\\\n+} while (0)\n+\n+/**\n+ * ice_adminq_init_regs - Initialize AdminQ registers\n+ * @hw: pointer to the hardware structure\n+ *\n+ * This assumes the alloc_sq and alloc_rq functions have already been called\n+ */\n+static void ice_adminq_init_regs(struct ice_hw *hw)\n+{\n+\tstruct ice_ctl_q_info *cq = &hw->adminq;\n+\n+\tICE_CQ_INIT_REGS(cq, PF_FW);\n+}\n+\n+/**\n+ * ice_mailbox_init_regs - Initialize Mailbox registers\n+ * @hw: pointer to the hardware structure\n+ *\n+ * This assumes the alloc_sq and alloc_rq functions have already been called\n+ */\n+static void ice_mailbox_init_regs(struct ice_hw *hw)\n+{\n+\tstruct ice_ctl_q_info *cq = &hw->mailboxq;\n+\n+\tICE_CQ_INIT_REGS(cq, PF_MBX);\n+}\n+\n+\n+/**\n+ * ice_check_sq_alive\n+ * @hw: pointer to the hw struct\n+ * @cq: pointer to the specific Control queue\n+ *\n+ * Returns true if Queue is enabled else false.\n+ */\n+bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)\n+{\n+\t/* check both queue-length and queue-enable fields */\n+\tif (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)\n+\t\treturn (rd32(hw, cq->sq.len) & (cq->sq.len_mask |\n+\t\t\t\t\t\tcq->sq.len_ena_mask)) ==\n+\t\t\t(cq->num_sq_entries | cq->sq.len_ena_mask);\n+\n+\treturn false;\n+}\n+\n+/**\n+ * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings\n+ * @hw: pointer to the hardware structure\n+ * @cq: pointer to the specific Control queue\n+ */\n+static enum ice_status\n+ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)\n+{\n+\tsize_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);\n+\n+\tcq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size);\n+\tif (!cq->sq.desc_buf.va)\n+\t\treturn ICE_ERR_NO_MEMORY;\n+\n+\tcq->sq.cmd_buf = ice_calloc(hw, cq->num_sq_entries,\n+\t\t\t\t    sizeof(struct ice_sq_cd));\n+\tif (!cq->sq.cmd_buf) {\n+\t\tice_free_dma_mem(hw, &cq->sq.desc_buf);\n+\t\treturn ICE_ERR_NO_MEMORY;\n+\t}\n+\n+\treturn ICE_SUCCESS;\n+}\n+\n+/**\n+ * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings\n+ * @hw: pointer to the hardware structure\n+ * @cq: pointer to the specific Control queue\n+ */\n+static enum ice_status\n+ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)\n+{\n+\tsize_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);\n+\n+\tcq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size);\n+\tif (!cq->rq.desc_buf.va)\n+\t\treturn ICE_ERR_NO_MEMORY;\n+\treturn ICE_SUCCESS;\n+}\n+\n+/**\n+ * ice_free_cq_ring - Free control queue ring\n+ * @hw: pointer to the hardware structure\n+ * @ring: pointer to the specific control queue ring\n+ *\n+ * This assumes the posted buffers have already been cleaned\n+ * and de-allocated\n+ */\n+static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)\n+{\n+\tice_free_dma_mem(hw, &ring->desc_buf);\n+}\n+\n+/**\n+ * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ\n+ * @hw: pointer to the hardware structure\n+ * @cq: pointer to the specific Control queue\n+ */\n+static enum ice_status\n+ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)\n+{\n+\tint i;\n+\n+\t/* We'll be allocating the buffer info memory first, then we can\n+\t * allocate the mapped buffers for the event processing\n+\t */\n+\tcq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries,\n+\t\t\t\t     sizeof(cq->rq.desc_buf));\n+\tif (!cq->rq.dma_head)\n+\t\treturn ICE_ERR_NO_MEMORY;\n+\tcq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;\n+\n+\t/* allocate the mapped buffers */\n+\tfor (i = 0; i < cq->num_rq_entries; i++) {\n+\t\tstruct ice_aq_desc *desc;\n+\t\tstruct ice_dma_mem *bi;\n+\n+\t\tbi = &cq->rq.r.rq_bi[i];\n+\t\tbi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size);\n+\t\tif (!bi->va)\n+\t\t\tgoto unwind_alloc_rq_bufs;\n+\n+\t\t/* now configure the descriptors for use */\n+\t\tdesc = ICE_CTL_Q_DESC(cq->rq, i);\n+\n+\t\tdesc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);\n+\t\tif (cq->rq_buf_size > ICE_AQ_LG_BUF)\n+\t\t\tdesc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);\n+\t\tdesc->opcode = 0;\n+\t\t/* This is in accordance with Admin queue design, there is no\n+\t\t * register for buffer size configuration\n+\t\t */\n+\t\tdesc->datalen = CPU_TO_LE16(bi->size);\n+\t\tdesc->retval = 0;\n+\t\tdesc->cookie_high = 0;\n+\t\tdesc->cookie_low = 0;\n+\t\tdesc->params.generic.addr_high =\n+\t\t\tCPU_TO_LE32(ICE_HI_DWORD(bi->pa));\n+\t\tdesc->params.generic.addr_low =\n+\t\t\tCPU_TO_LE32(ICE_LO_DWORD(bi->pa));\n+\t\tdesc->params.generic.param0 = 0;\n+\t\tdesc->params.generic.param1 = 0;\n+\t}\n+\treturn ICE_SUCCESS;\n+\n+unwind_alloc_rq_bufs:\n+\t/* don't try to free the one that failed... */\n+\ti--;\n+\tfor (; i >= 0; i--)\n+\t\tice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]);\n+\tice_free(hw, cq->rq.dma_head);\n+\n+\treturn ICE_ERR_NO_MEMORY;\n+}\n+\n+/**\n+ * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ\n+ * @hw: pointer to the hardware structure\n+ * @cq: pointer to the specific Control queue\n+ */\n+static enum ice_status\n+ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)\n+{\n+\tint i;\n+\n+\t/* No mapped memory needed yet, just the buffer info structures */\n+\tcq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries,\n+\t\t\t\t     sizeof(cq->sq.desc_buf));\n+\tif (!cq->sq.dma_head)\n+\t\treturn ICE_ERR_NO_MEMORY;\n+\tcq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;\n+\n+\t/* allocate the mapped buffers */\n+\tfor (i = 0; i < cq->num_sq_entries; i++) {\n+\t\tstruct ice_dma_mem *bi;\n+\n+\t\tbi = &cq->sq.r.sq_bi[i];\n+\t\tbi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size);\n+\t\tif (!bi->va)\n+\t\t\tgoto unwind_alloc_sq_bufs;\n+\t}\n+\treturn ICE_SUCCESS;\n+\n+unwind_alloc_sq_bufs:\n+\t/* don't try to free the one that failed... */\n+\ti--;\n+\tfor (; i >= 0; i--)\n+\t\tice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]);\n+\tice_free(hw, cq->sq.dma_head);\n+\n+\treturn ICE_ERR_NO_MEMORY;\n+}\n+\n+static enum ice_status\n+ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)\n+{\n+\t/* Clear Head and Tail */\n+\twr32(hw, ring->head, 0);\n+\twr32(hw, ring->tail, 0);\n+\n+\t/* set starting point */\n+\twr32(hw, ring->len, (num_entries | ring->len_ena_mask));\n+\twr32(hw, ring->bal, ICE_LO_DWORD(ring->desc_buf.pa));\n+\twr32(hw, ring->bah, ICE_HI_DWORD(ring->desc_buf.pa));\n+\n+\t/* Check one register to verify that config was applied */\n+\tif (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa))\n+\t\treturn ICE_ERR_AQ_ERROR;\n+\n+\treturn ICE_SUCCESS;\n+}\n+\n+/**\n+ * ice_cfg_sq_regs - configure Control ATQ registers\n+ * @hw: pointer to the hardware structure\n+ * @cq: pointer to the specific Control queue\n+ *\n+ * Configure base address and length registers for the transmit queue\n+ */\n+static enum ice_status\n+ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)\n+{\n+\treturn ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);\n+}\n+\n+/**\n+ * ice_cfg_rq_regs - configure Control ARQ register\n+ * @hw: pointer to the hardware structure\n+ * @cq: pointer to the specific Control queue\n+ *\n+ * Configure base address and length registers for the receive (event q)\n+ */\n+static enum ice_status\n+ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)\n+{\n+\tenum ice_status status;\n+\n+\tstatus = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);\n+\tif (status)\n+\t\treturn status;\n+\n+\t/* Update tail in the HW to post pre-allocated buffers */\n+\twr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));\n+\n+\treturn ICE_SUCCESS;\n+}\n+\n+/**\n+ * ice_init_sq - main initialization routine for Control ATQ\n+ * @hw: pointer to the hardware structure\n+ * @cq: pointer to the specific Control queue\n+ *\n+ * This is the main initialization routine for the Control Send Queue\n+ * Prior to calling this function, drivers *MUST* set the following fields\n+ * in the cq->structure:\n+ *     - cq->num_sq_entries\n+ *     - cq->sq_buf_size\n+ *\n+ * Do *NOT* hold the lock when calling this as the memory allocation routines\n+ * called are not going to be atomic context safe\n+ */\n+static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)\n+{\n+\tenum ice_status ret_code;\n+\n+\tif (cq->sq.count > 0) {\n+\t\t/* queue already initialized */\n+\t\tret_code = ICE_ERR_NOT_READY;\n+\t\tgoto init_ctrlq_exit;\n+\t}\n+\n+\t/* verify input for valid configuration */\n+\tif (!cq->num_sq_entries || !cq->sq_buf_size) {\n+\t\tret_code = ICE_ERR_CFG;\n+\t\tgoto init_ctrlq_exit;\n+\t}\n+\n+\tcq->sq.next_to_use = 0;\n+\tcq->sq.next_to_clean = 0;\n+\n+\t/* allocate the ring memory */\n+\tret_code = ice_alloc_ctrlq_sq_ring(hw, cq);\n+\tif (ret_code)\n+\t\tgoto init_ctrlq_exit;\n+\n+\t/* allocate buffers in the rings */\n+\tret_code = ice_alloc_sq_bufs(hw, cq);\n+\tif (ret_code)\n+\t\tgoto init_ctrlq_free_rings;\n+\n+\t/* initialize base registers */\n+\tret_code = ice_cfg_sq_regs(hw, cq);\n+\tif (ret_code)\n+\t\tgoto init_ctrlq_free_rings;\n+\n+\t/* success! */\n+\tcq->sq.count = cq->num_sq_entries;\n+\tgoto init_ctrlq_exit;\n+\n+init_ctrlq_free_rings:\n+\tice_free_cq_ring(hw, &cq->sq);\n+\n+init_ctrlq_exit:\n+\treturn ret_code;\n+}\n+\n+/**\n+ * ice_init_rq - initialize ARQ\n+ * @hw: pointer to the hardware structure\n+ * @cq: pointer to the specific Control queue\n+ *\n+ * The main initialization routine for the Admin Receive (Event) Queue.\n+ * Prior to calling this function, drivers *MUST* set the following fields\n+ * in the cq->structure:\n+ *     - cq->num_rq_entries\n+ *     - cq->rq_buf_size\n+ *\n+ * Do *NOT* hold the lock when calling this as the memory allocation routines\n+ * called are not going to be atomic context safe\n+ */\n+static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)\n+{\n+\tenum ice_status ret_code;\n+\n+\tif (cq->rq.count > 0) {\n+\t\t/* queue already initialized */\n+\t\tret_code = ICE_ERR_NOT_READY;\n+\t\tgoto init_ctrlq_exit;\n+\t}\n+\n+\t/* verify input for valid configuration */\n+\tif (!cq->num_rq_entries || !cq->rq_buf_size) {\n+\t\tret_code = ICE_ERR_CFG;\n+\t\tgoto init_ctrlq_exit;\n+\t}\n+\n+\tcq->rq.next_to_use = 0;\n+\tcq->rq.next_to_clean = 0;\n+\n+\t/* allocate the ring memory */\n+\tret_code = ice_alloc_ctrlq_rq_ring(hw, cq);\n+\tif (ret_code)\n+\t\tgoto init_ctrlq_exit;\n+\n+\t/* allocate buffers in the rings */\n+\tret_code = ice_alloc_rq_bufs(hw, cq);\n+\tif (ret_code)\n+\t\tgoto init_ctrlq_free_rings;\n+\n+\t/* initialize base registers */\n+\tret_code = ice_cfg_rq_regs(hw, cq);\n+\tif (ret_code)\n+\t\tgoto init_ctrlq_free_rings;\n+\n+\t/* success! */\n+\tcq->rq.count = cq->num_rq_entries;\n+\tgoto init_ctrlq_exit;\n+\n+init_ctrlq_free_rings:\n+\tice_free_cq_ring(hw, &cq->rq);\n+\n+init_ctrlq_exit:\n+\treturn ret_code;\n+}\n+\n+#define ICE_FREE_CQ_BUFS(hw, qi, ring)\t\t\t\t\t\\\n+do {\t\t\t\t\t\t\t\t\t\\\n+\tint i;\t\t\t\t\t\t\t\t\\\n+\t/* free descriptors */\t\t\t\t\t\t\\\n+\tfor (i = 0; i < (qi)->num_##ring##_entries; i++)\t\t\\\n+\t\tif ((qi)->ring.r.ring##_bi[i].pa)\t\t\t\\\n+\t\t\tice_free_dma_mem((hw),\t\t\t\t\\\n+\t\t\t\t\t &(qi)->ring.r.ring##_bi[i]);\t\\\n+\t/* free the buffer info list */\t\t\t\t\t\\\n+\tif ((qi)->ring.cmd_buf)\t\t\t\t\t\t\\\n+\t\tice_free(hw, (qi)->ring.cmd_buf);\t\t\t\\\n+\t/* free dma head */\t\t\t\t\t\t\\\n+\tice_free(hw, (qi)->ring.dma_head);\t\t\t\t\\\n+} while (0)\n+\n+/**\n+ * ice_shutdown_sq - shutdown the Control ATQ\n+ * @hw: pointer to the hardware structure\n+ * @cq: pointer to the specific Control queue\n+ *\n+ * The main shutdown routine for the Control Transmit Queue\n+ */\n+static enum ice_status\n+ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)\n+{\n+\tenum ice_status ret_code = ICE_SUCCESS;\n+\n+\tice_acquire_lock(&cq->sq_lock);\n+\n+\tif (!cq->sq.count) {\n+\t\tret_code = ICE_ERR_NOT_READY;\n+\t\tgoto shutdown_sq_out;\n+\t}\n+\n+\t/* Stop firmware AdminQ processing */\n+\twr32(hw, cq->sq.head, 0);\n+\twr32(hw, cq->sq.tail, 0);\n+\twr32(hw, cq->sq.len, 0);\n+\twr32(hw, cq->sq.bal, 0);\n+\twr32(hw, cq->sq.bah, 0);\n+\n+\tcq->sq.count = 0;\t/* to indicate uninitialized queue */\n+\n+\t/* free ring buffers and the ring itself */\n+\tICE_FREE_CQ_BUFS(hw, cq, sq);\n+\tice_free_cq_ring(hw, &cq->sq);\n+\n+shutdown_sq_out:\n+\tice_release_lock(&cq->sq_lock);\n+\treturn ret_code;\n+}\n+\n+/**\n+ * ice_aq_ver_check - Check the reported AQ API version.\n+ * @hw: pointer to the hardware structure\n+ *\n+ * Checks if the driver should load on a given AQ API version.\n+ *\n+ * Return: 'true' iff the driver should attempt to load. 'false' otherwise.\n+ */\n+static bool ice_aq_ver_check(struct ice_hw *hw)\n+{\n+\tif (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {\n+\t\t/* Major API version is newer than expected, don't load */\n+\t\tice_warn(hw, \"The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\\n\");\n+\t\treturn false;\n+\t} else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {\n+\t\tif (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))\n+\t\t\tice_info(hw, \"The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\\n\");\n+\t\telse if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)\n+\t\t\tice_info(hw, \"The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\\n\");\n+\t} else {\n+\t\t/* Major API version is older than expected, log a warning */\n+\t\tice_info(hw, \"The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\\n\");\n+\t}\n+\treturn true;\n+}\n+\n+/**\n+ * ice_shutdown_rq - shutdown Control ARQ\n+ * @hw: pointer to the hardware structure\n+ * @cq: pointer to the specific Control queue\n+ *\n+ * The main shutdown routine for the Control Receive Queue\n+ */\n+static enum ice_status\n+ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)\n+{\n+\tenum ice_status ret_code = ICE_SUCCESS;\n+\n+\tice_acquire_lock(&cq->rq_lock);\n+\n+\tif (!cq->rq.count) {\n+\t\tret_code = ICE_ERR_NOT_READY;\n+\t\tgoto shutdown_rq_out;\n+\t}\n+\n+\t/* Stop Control Queue processing */\n+\twr32(hw, cq->rq.head, 0);\n+\twr32(hw, cq->rq.tail, 0);\n+\twr32(hw, cq->rq.len, 0);\n+\twr32(hw, cq->rq.bal, 0);\n+\twr32(hw, cq->rq.bah, 0);\n+\n+\t/* set rq.count to 0 to indicate uninitialized queue */\n+\tcq->rq.count = 0;\n+\n+\t/* free ring buffers and the ring itself */\n+\tICE_FREE_CQ_BUFS(hw, cq, rq);\n+\tice_free_cq_ring(hw, &cq->rq);\n+\n+shutdown_rq_out:\n+\tice_release_lock(&cq->rq_lock);\n+\treturn ret_code;\n+}\n+\n+\n+/**\n+ * ice_init_check_adminq - Check version for Admin Queue to know if its alive\n+ * @hw: pointer to the hardware structure\n+ */\n+static enum ice_status ice_init_check_adminq(struct ice_hw *hw)\n+{\n+\tstruct ice_ctl_q_info *cq = &hw->adminq;\n+\tenum ice_status status;\n+\n+\n+\tstatus = ice_aq_get_fw_ver(hw, NULL);\n+\tif (status)\n+\t\tgoto init_ctrlq_free_rq;\n+\n+\n+\tif (!ice_aq_ver_check(hw)) {\n+\t\tstatus = ICE_ERR_FW_API_VER;\n+\t\tgoto init_ctrlq_free_rq;\n+\t}\n+\n+\treturn ICE_SUCCESS;\n+\n+init_ctrlq_free_rq:\n+\tif (cq->rq.count) {\n+\t\tice_shutdown_rq(hw, cq);\n+\t\tice_destroy_lock(&cq->rq_lock);\n+\t}\n+\tif (cq->sq.count) {\n+\t\tice_shutdown_sq(hw, cq);\n+\t\tice_destroy_lock(&cq->sq_lock);\n+\t}\n+\treturn status;\n+}\n+\n+/**\n+ * ice_init_ctrlq - main initialization routine for any control Queue\n+ * @hw: pointer to the hardware structure\n+ * @q_type: specific Control queue type\n+ *\n+ * Prior to calling this function, drivers *MUST* set the following fields\n+ * in the cq->structure:\n+ *     - cq->num_sq_entries\n+ *     - cq->num_rq_entries\n+ *     - cq->rq_buf_size\n+ *     - cq->sq_buf_size\n+ */\n+static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)\n+{\n+\tstruct ice_ctl_q_info *cq;\n+\tenum ice_status ret_code;\n+\n+\tswitch (q_type) {\n+\tcase ICE_CTL_Q_ADMIN:\n+\t\tice_adminq_init_regs(hw);\n+\t\tcq = &hw->adminq;\n+\t\tbreak;\n+\tcase ICE_CTL_Q_MAILBOX:\n+\t\tice_mailbox_init_regs(hw);\n+\t\tcq = &hw->mailboxq;\n+\t\tbreak;\n+\tdefault:\n+\t\treturn ICE_ERR_PARAM;\n+\t}\n+\tcq->qtype = q_type;\n+\n+\t/* verify input for valid configuration */\n+\tif (!cq->num_rq_entries || !cq->num_sq_entries ||\n+\t    !cq->rq_buf_size || !cq->sq_buf_size) {\n+\t\treturn ICE_ERR_CFG;\n+\t}\n+\tice_init_lock(&cq->sq_lock);\n+\tice_init_lock(&cq->rq_lock);\n+\n+\t/* setup SQ command write back timeout */\n+\tcq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;\n+\n+\t/* allocate the ATQ */\n+\tret_code = ice_init_sq(hw, cq);\n+\tif (ret_code)\n+\t\tgoto init_ctrlq_destroy_locks;\n+\n+\t/* allocate the ARQ */\n+\tret_code = ice_init_rq(hw, cq);\n+\tif (ret_code)\n+\t\tgoto init_ctrlq_free_sq;\n+\n+\t/* success! */\n+\treturn ICE_SUCCESS;\n+\n+init_ctrlq_free_sq:\n+\tice_shutdown_sq(hw, cq);\n+init_ctrlq_destroy_locks:\n+\tice_destroy_lock(&cq->sq_lock);\n+\tice_destroy_lock(&cq->rq_lock);\n+\treturn ret_code;\n+}\n+\n+/**\n+ * ice_init_all_ctrlq - main initialization routine for all control queues\n+ * @hw: pointer to the hardware structure\n+ *\n+ * Prior to calling this function, drivers *MUST* set the following fields\n+ * in the cq->structure for all control queues:\n+ *     - cq->num_sq_entries\n+ *     - cq->num_rq_entries\n+ *     - cq->rq_buf_size\n+ *     - cq->sq_buf_size\n+ */\n+enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)\n+{\n+\tenum ice_status ret_code;\n+\n+\n+\t/* Init FW admin queue */\n+\tret_code = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);\n+\tif (ret_code)\n+\t\treturn ret_code;\n+\n+\tret_code = ice_init_check_adminq(hw);\n+\tif (ret_code)\n+\t\treturn ret_code;\n+\t/* Init Mailbox queue */\n+\treturn ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);\n+}\n+\n+/**\n+ * ice_shutdown_ctrlq - shutdown routine for any control queue\n+ * @hw: pointer to the hardware structure\n+ * @q_type: specific Control queue type\n+ */\n+static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)\n+{\n+\tstruct ice_ctl_q_info *cq;\n+\n+\tswitch (q_type) {\n+\tcase ICE_CTL_Q_ADMIN:\n+\t\tcq = &hw->adminq;\n+\t\tif (ice_check_sq_alive(hw, cq))\n+\t\t\tice_aq_q_shutdown(hw, true);\n+\t\tbreak;\n+\tcase ICE_CTL_Q_MAILBOX:\n+\t\tcq = &hw->mailboxq;\n+\t\tbreak;\n+\tdefault:\n+\t\treturn;\n+\t}\n+\n+\tif (cq->sq.count) {\n+\t\tice_shutdown_sq(hw, cq);\n+\t\tice_destroy_lock(&cq->sq_lock);\n+\t}\n+\tif (cq->rq.count) {\n+\t\tice_shutdown_rq(hw, cq);\n+\t\tice_destroy_lock(&cq->rq_lock);\n+\t}\n+}\n+\n+/**\n+ * ice_shutdown_all_ctrlq - shutdown routine for all control queues\n+ * @hw: pointer to the hardware structure\n+ */\n+void ice_shutdown_all_ctrlq(struct ice_hw *hw)\n+{\n+\t/* Shutdown FW admin queue */\n+\tice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);\n+\t/* Shutdown PF-VF Mailbox */\n+\tice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);\n+}\n+\n+/**\n+ * ice_clean_sq - cleans Admin send queue (ATQ)\n+ * @hw: pointer to the hardware structure\n+ * @cq: pointer to the specific Control queue\n+ *\n+ * returns the number of free desc\n+ */\n+static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)\n+{\n+\tstruct ice_ctl_q_ring *sq = &cq->sq;\n+\tu16 ntc = sq->next_to_clean;\n+\tstruct ice_sq_cd *details;\n+#if 0\n+\tstruct ice_aq_desc desc_cb;\n+#endif\n+\tstruct ice_aq_desc *desc;\n+\n+\tdesc = ICE_CTL_Q_DESC(*sq, ntc);\n+\tdetails = ICE_CTL_Q_DETAILS(*sq, ntc);\n+\n+\twhile (rd32(hw, cq->sq.head) != ntc) {\n+\t\tice_debug(hw, ICE_DBG_AQ_MSG,\n+\t\t\t  \"ntc %d head %d.\\n\", ntc, rd32(hw, cq->sq.head));\n+#if 0\n+\t\tif (details->callback) {\n+\t\t\tICE_CTL_Q_CALLBACK cb_func =\n+\t\t\t\t(ICE_CTL_Q_CALLBACK)details->callback;\n+\t\t\tice_memcpy(&desc_cb, desc, sizeof(desc_cb),\n+\t\t\t\t   ICE_DMA_TO_DMA);\n+\t\t\tcb_func(hw, &desc_cb);\n+\t\t}\n+#endif\n+\t\tice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);\n+\t\tice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);\n+\t\tntc++;\n+\t\tif (ntc == sq->count)\n+\t\t\tntc = 0;\n+\t\tdesc = ICE_CTL_Q_DESC(*sq, ntc);\n+\t\tdetails = ICE_CTL_Q_DETAILS(*sq, ntc);\n+\t}\n+\n+\tsq->next_to_clean = ntc;\n+\n+\treturn ICE_CTL_Q_DESC_UNUSED(sq);\n+}\n+\n+/**\n+ * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)\n+ * @hw: pointer to the hw struct\n+ * @cq: pointer to the specific Control queue\n+ *\n+ * Returns true if the firmware has processed all descriptors on the\n+ * admin send queue. Returns false if there are still requests pending.\n+ */\n+bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)\n+{\n+\t/* AQ designers suggest use of head for better\n+\t * timing reliability than DD bit\n+\t */\n+\treturn rd32(hw, cq->sq.head) == cq->sq.next_to_use;\n+}\n+\n+/**\n+ * ice_sq_send_cmd - send command to Control Queue (ATQ)\n+ * @hw: pointer to the hw struct\n+ * @cq: pointer to the specific Control queue\n+ * @desc: prefilled descriptor describing the command (non DMA mem)\n+ * @buf: buffer to use for indirect commands (or NULL for direct commands)\n+ * @buf_size: size of buffer for indirect commands (or 0 for direct commands)\n+ * @cd: pointer to command details structure\n+ *\n+ * This is the main send command routine for the ATQ. It runs the queue,\n+ * cleans the queue, etc.\n+ */\n+enum ice_status\n+ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,\n+\t\tstruct ice_aq_desc *desc, void *buf, u16 buf_size,\n+\t\tstruct ice_sq_cd *cd)\n+{\n+\tstruct ice_dma_mem *dma_buf = NULL;\n+\tstruct ice_aq_desc *desc_on_ring;\n+\tbool cmd_completed = false;\n+\tenum ice_status status = ICE_SUCCESS;\n+\tstruct ice_sq_cd *details;\n+\tu32 total_delay = 0;\n+\tu16 retval = 0;\n+\tu32 val = 0;\n+\n+\t/* if reset is in progress return a soft error */\n+\tif (hw->reset_ongoing)\n+\t\treturn ICE_ERR_RESET_ONGOING;\n+\tice_acquire_lock(&cq->sq_lock);\n+\n+\tcq->sq_last_status = ICE_AQ_RC_OK;\n+\n+\tif (!cq->sq.count) {\n+\t\tice_debug(hw, ICE_DBG_AQ_MSG,\n+\t\t\t  \"Control Send queue not initialized.\\n\");\n+\t\tstatus = ICE_ERR_AQ_EMPTY;\n+\t\tgoto sq_send_command_error;\n+\t}\n+\n+\tif ((buf && !buf_size) || (!buf && buf_size)) {\n+\t\tstatus = ICE_ERR_PARAM;\n+\t\tgoto sq_send_command_error;\n+\t}\n+\n+\tif (buf) {\n+\t\tif (buf_size > cq->sq_buf_size) {\n+\t\t\tice_debug(hw, ICE_DBG_AQ_MSG,\n+\t\t\t\t  \"Invalid buffer size for Control Send queue: %d.\\n\",\n+\t\t\t\t  buf_size);\n+\t\t\tstatus = ICE_ERR_INVAL_SIZE;\n+\t\t\tgoto sq_send_command_error;\n+\t\t}\n+\n+\t\tdesc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF);\n+\t\tif (buf_size > ICE_AQ_LG_BUF)\n+\t\t\tdesc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);\n+\t}\n+\n+\tval = rd32(hw, cq->sq.head);\n+\tif (val >= cq->num_sq_entries) {\n+\t\tice_debug(hw, ICE_DBG_AQ_MSG,\n+\t\t\t  \"head overrun at %d in the Control Send Queue ring\\n\",\n+\t\t\t  val);\n+\t\tstatus = ICE_ERR_AQ_EMPTY;\n+\t\tgoto sq_send_command_error;\n+\t}\n+\n+\tdetails = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);\n+\tif (cd)\n+\t\t*details = *cd;\n+#if 0\n+\t\t/* FIXME: if/when this block gets enabled (when the #if 0\n+\t\t * is removed), add braces to both branches of the surrounding\n+\t\t * conditional expression. The braces have been removed to\n+\t\t * prevent checkpatch complaining.\n+\t\t */\n+\n+\t\t/* If the command details are defined copy the cookie. The\n+\t\t * CPU_TO_LE32 is not needed here because the data is ignored\n+\t\t * by the FW, only used by the driver\n+\t\t */\n+\t\tif (details->cookie) {\n+\t\t\tdesc->cookie_high =\n+\t\t\t\tCPU_TO_LE32(ICE_HI_DWORD(details->cookie));\n+\t\t\tdesc->cookie_low =\n+\t\t\t\tCPU_TO_LE32(ICE_LO_DWORD(details->cookie));\n+\t\t}\n+#endif\n+\telse\n+\t\tice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);\n+#if 0\n+\t/* clear requested flags and then set additional flags if defined */\n+\tdesc->flags &= ~CPU_TO_LE16(details->flags_dis);\n+\tdesc->flags |= CPU_TO_LE16(details->flags_ena);\n+\n+\tif (details->postpone && !details->async) {\n+\t\tice_debug(hw, ICE_DBG_AQ_MSG,\n+\t\t\t  \"Async flag not set along with postpone flag\\n\");\n+\t\tstatus = ICE_ERR_PARAM;\n+\t\tgoto sq_send_command_error;\n+\t}\n+#endif\n+\n+\t/* Call clean and check queue available function to reclaim the\n+\t * descriptors that were processed by FW/MBX; the function returns the\n+\t * number of desc available. The clean function called here could be\n+\t * called in a separate thread in case of asynchronous completions.\n+\t */\n+\tif (ice_clean_sq(hw, cq) == 0) {\n+\t\tice_debug(hw, ICE_DBG_AQ_MSG,\n+\t\t\t  \"Error: Control Send Queue is full.\\n\");\n+\t\tstatus = ICE_ERR_AQ_FULL;\n+\t\tgoto sq_send_command_error;\n+\t}\n+\n+\t/* initialize the temp desc pointer with the right desc */\n+\tdesc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);\n+\n+\t/* if the desc is available copy the temp desc to the right place */\n+\tice_memcpy(desc_on_ring, desc, sizeof(*desc_on_ring),\n+\t\t   ICE_NONDMA_TO_DMA);\n+\n+\t/* if buf is not NULL assume indirect command */\n+\tif (buf) {\n+\t\tdma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];\n+\t\t/* copy the user buf into the respective DMA buf */\n+\t\tice_memcpy(dma_buf->va, buf, buf_size, ICE_NONDMA_TO_DMA);\n+\t\tdesc_on_ring->datalen = CPU_TO_LE16(buf_size);\n+\n+\t\t/* Update the address values in the desc with the pa value\n+\t\t * for respective buffer\n+\t\t */\n+\t\tdesc_on_ring->params.generic.addr_high =\n+\t\t\tCPU_TO_LE32(ICE_HI_DWORD(dma_buf->pa));\n+\t\tdesc_on_ring->params.generic.addr_low =\n+\t\t\tCPU_TO_LE32(ICE_LO_DWORD(dma_buf->pa));\n+\t}\n+\n+\t/* Debug desc and buffer */\n+\tice_debug(hw, ICE_DBG_AQ_MSG,\n+\t\t  \"ATQ: Control Send queue desc and buffer:\\n\");\n+\n+\tice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc_on_ring, buf, buf_size);\n+\n+\n+\t(cq->sq.next_to_use)++;\n+\tif (cq->sq.next_to_use == cq->sq.count)\n+\t\tcq->sq.next_to_use = 0;\n+#if 0\n+\t/* FIXME - handle this case? */\n+\tif (!details->postpone)\n+#endif\n+\twr32(hw, cq->sq.tail, cq->sq.next_to_use);\n+\n+#if 0\n+\t/* if command details are not defined or async flag is not set,\n+\t * we need to wait for desc write back\n+\t */\n+\tif (!details->async && !details->postpone) {\n+\t\t/* FIXME - handle this case? */\n+\t}\n+#endif\n+\tdo {\n+\t\tif (ice_sq_done(hw, cq))\n+\t\t\tbreak;\n+\n+\t\tice_msec_delay(1, false);\n+\t\ttotal_delay++;\n+\t} while (total_delay < cq->sq_cmd_timeout);\n+\n+\t/* if ready, copy the desc back to temp */\n+\tif (ice_sq_done(hw, cq)) {\n+\t\tice_memcpy(desc, desc_on_ring, sizeof(*desc),\n+\t\t\t   ICE_DMA_TO_NONDMA);\n+\t\tif (buf) {\n+\t\t\t/* get returned length to copy */\n+\t\t\tu16 copy_size = LE16_TO_CPU(desc->datalen);\n+\n+\t\t\tif (copy_size > buf_size) {\n+\t\t\t\tice_debug(hw, ICE_DBG_AQ_MSG,\n+\t\t\t\t\t  \"Return len %d > than buf len %d\\n\",\n+\t\t\t\t\t  copy_size, buf_size);\n+\t\t\t\tstatus = ICE_ERR_AQ_ERROR;\n+\t\t\t} else {\n+\t\t\t\tice_memcpy(buf, dma_buf->va, copy_size,\n+\t\t\t\t\t   ICE_DMA_TO_NONDMA);\n+\t\t\t}\n+\t\t}\n+\t\tretval = LE16_TO_CPU(desc->retval);\n+\t\tif (retval) {\n+\t\t\tice_debug(hw, ICE_DBG_AQ_MSG,\n+\t\t\t\t  \"Control Send Queue command completed with error 0x%x\\n\",\n+\t\t\t\t  retval);\n+\n+\t\t\t/* strip off FW internal code */\n+\t\t\tretval &= 0xff;\n+\t\t}\n+\t\tcmd_completed = true;\n+\t\tif (!status && retval != ICE_AQ_RC_OK)\n+\t\t\tstatus = ICE_ERR_AQ_ERROR;\n+\t\tcq->sq_last_status = (enum ice_aq_err)retval;\n+\t}\n+\n+\tice_debug(hw, ICE_DBG_AQ_MSG,\n+\t\t  \"ATQ: desc and buffer writeback:\\n\");\n+\n+\tice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, buf, buf_size);\n+\n+\n+\t/* save writeback AQ if requested */\n+\tif (details->wb_desc)\n+\t\tice_memcpy(details->wb_desc, desc_on_ring,\n+\t\t\t   sizeof(*details->wb_desc), ICE_DMA_TO_NONDMA);\n+\n+\t/* update the error if time out occurred */\n+\tif (!cmd_completed) {\n+#if 0\n+\t    (!details->async && !details->postpone)) {\n+#endif\n+\t\tice_debug(hw, ICE_DBG_AQ_MSG,\n+\t\t\t  \"Control Send Queue Writeback timeout.\\n\");\n+\t\tstatus = ICE_ERR_AQ_TIMEOUT;\n+\t}\n+\n+sq_send_command_error:\n+\tice_release_lock(&cq->sq_lock);\n+\treturn status;\n+}\n+\n+/**\n+ * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function\n+ * @desc: pointer to the temp descriptor (non DMA mem)\n+ * @opcode: the opcode can be used to decide which flags to turn off or on\n+ *\n+ * Fill the desc with default values\n+ */\n+void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)\n+{\n+\t/* zero out the desc */\n+\tice_memset(desc, 0, sizeof(*desc), ICE_NONDMA_MEM);\n+\tdesc->opcode = CPU_TO_LE16(opcode);\n+\tdesc->flags = CPU_TO_LE16(ICE_AQ_FLAG_SI);\n+}\n+\n+/**\n+ * ice_clean_rq_elem\n+ * @hw: pointer to the hw struct\n+ * @cq: pointer to the specific Control queue\n+ * @e: event info from the receive descriptor, includes any buffers\n+ * @pending: number of events that could be left to process\n+ *\n+ * This function cleans one Admin Receive Queue element and returns\n+ * the contents through e. It can also return how many events are\n+ * left to process through 'pending'.\n+ */\n+enum ice_status\n+ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,\n+\t\t  struct ice_rq_event_info *e, u16 *pending)\n+{\n+\tu16 ntc = cq->rq.next_to_clean;\n+\tenum ice_status ret_code = ICE_SUCCESS;\n+\tstruct ice_aq_desc *desc;\n+\tstruct ice_dma_mem *bi;\n+\tu16 desc_idx;\n+\tu16 datalen;\n+\tu16 flags;\n+\tu16 ntu;\n+\n+\t/* pre-clean the event info */\n+\tice_memset(&e->desc, 0, sizeof(e->desc), ICE_NONDMA_MEM);\n+\n+\t/* take the lock before we start messing with the ring */\n+\tice_acquire_lock(&cq->rq_lock);\n+\n+\tif (!cq->rq.count) {\n+\t\tice_debug(hw, ICE_DBG_AQ_MSG,\n+\t\t\t  \"Control Receive queue not initialized.\\n\");\n+\t\tret_code = ICE_ERR_AQ_EMPTY;\n+\t\tgoto clean_rq_elem_err;\n+\t}\n+\n+\t/* set next_to_use to head */\n+\tntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);\n+\n+\tif (ntu == ntc) {\n+\t\t/* nothing to do - shouldn't need to update ring's values */\n+\t\tret_code = ICE_ERR_AQ_NO_WORK;\n+\t\tgoto clean_rq_elem_out;\n+\t}\n+\n+\t/* now clean the next descriptor */\n+\tdesc = ICE_CTL_Q_DESC(cq->rq, ntc);\n+\tdesc_idx = ntc;\n+\n+\tcq->rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval);\n+\tflags = LE16_TO_CPU(desc->flags);\n+\tif (flags & ICE_AQ_FLAG_ERR) {\n+\t\tret_code = ICE_ERR_AQ_ERROR;\n+\t\tice_debug(hw, ICE_DBG_AQ_MSG,\n+\t\t\t  \"Control Receive Queue Event received with error 0x%x\\n\",\n+\t\t\t  cq->rq_last_status);\n+\t}\n+\tice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA);\n+\tdatalen = LE16_TO_CPU(desc->datalen);\n+\te->msg_len = min(datalen, e->buf_len);\n+\tif (e->msg_buf && e->msg_len)\n+\t\tice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va,\n+\t\t\t   e->msg_len, ICE_DMA_TO_NONDMA);\n+\n+\tice_debug(hw, ICE_DBG_AQ_MSG, \"ARQ: desc and buffer:\\n\");\n+\n+\tice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, e->msg_buf,\n+\t\t     cq->rq_buf_size);\n+\n+\n+\t/* Restore the original datalen and buffer address in the desc,\n+\t * FW updates datalen to indicate the event message size\n+\t */\n+\tbi = &cq->rq.r.rq_bi[ntc];\n+\tice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);\n+\n+\tdesc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);\n+\tif (cq->rq_buf_size > ICE_AQ_LG_BUF)\n+\t\tdesc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);\n+\tdesc->datalen = CPU_TO_LE16(bi->size);\n+\tdesc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa));\n+\tdesc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa));\n+\n+\t/* set tail = the last cleaned desc index. */\n+\twr32(hw, cq->rq.tail, ntc);\n+\t/* ntc is updated to tail + 1 */\n+\tntc++;\n+\tif (ntc == cq->num_rq_entries)\n+\t\tntc = 0;\n+\tcq->rq.next_to_clean = ntc;\n+\tcq->rq.next_to_use = ntu;\n+\n+#if 0\n+\tice_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode));\n+#endif\n+clean_rq_elem_out:\n+\t/* Set pending if needed, unlock and return */\n+\tif (pending) {\n+\t\t/* re-read HW head to calculate actual pending messages */\n+\t\tntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);\n+\t\t*pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));\n+\t}\n+clean_rq_elem_err:\n+\tice_release_lock(&cq->rq_lock);\n+\n+\treturn ret_code;\n+}\ndiff --git a/drivers/net/ice/base/ice_controlq.h b/drivers/net/ice/base/ice_controlq.h\nnew file mode 100644\nindex 0000000..db2db93\n--- /dev/null\n+++ b/drivers/net/ice/base/ice_controlq.h\n@@ -0,0 +1,97 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2001-2018\n+ */\n+\n+#ifndef _ICE_CONTROLQ_H_\n+#define _ICE_CONTROLQ_H_\n+\n+#include \"ice_adminq_cmd.h\"\n+\n+\n+/* Maximum buffer lengths for all control queue types */\n+#define ICE_AQ_MAX_BUF_LEN 4096\n+#define ICE_MBXQ_MAX_BUF_LEN 4096\n+\n+#define ICE_CTL_Q_DESC(R, i) \\\n+\t(&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))\n+\n+#define ICE_CTL_Q_DESC_UNUSED(R) \\\n+\t(u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \\\n+\t      (R)->next_to_clean - (R)->next_to_use - 1)\n+\n+/* Defines that help manage the driver vs FW API checks.\n+ * Take a look at ice_aq_ver_check in ice_controlq.c for actual usage.\n+ */\n+#define EXP_FW_API_VER_BRANCH\t\t0x00\n+#define EXP_FW_API_VER_MAJOR\t\t0x01\n+#define EXP_FW_API_VER_MINOR\t\t0x03\n+\n+/* Different control queue types: These are mainly for SW consumption. */\n+enum ice_ctl_q {\n+\tICE_CTL_Q_UNKNOWN = 0,\n+\tICE_CTL_Q_ADMIN,\n+\tICE_CTL_Q_MAILBOX,\n+};\n+\n+/* Control Queue default settings */\n+#define ICE_CTL_Q_SQ_CMD_TIMEOUT\t250  /* msecs */\n+\n+struct ice_ctl_q_ring {\n+\tvoid *dma_head;\t\t\t/* Virtual address to dma head */\n+\tstruct ice_dma_mem desc_buf;\t/* descriptor ring memory */\n+\tvoid *cmd_buf;\t\t\t/* command buffer memory */\n+\n+\tunion {\n+\t\tstruct ice_dma_mem *sq_bi;\n+\t\tstruct ice_dma_mem *rq_bi;\n+\t} r;\n+\n+\tu16 count;\t\t/* Number of descriptors */\n+\n+\t/* used for interrupt processing */\n+\tu16 next_to_use;\n+\tu16 next_to_clean;\n+\n+\t/* used for queue tracking */\n+\tu32 head;\n+\tu32 tail;\n+\tu32 len;\n+\tu32 bah;\n+\tu32 bal;\n+\tu32 len_mask;\n+\tu32 len_ena_mask;\n+\tu32 head_mask;\n+};\n+\n+/* sq transaction details */\n+struct ice_sq_cd {\n+\tstruct ice_aq_desc *wb_desc;\n+};\n+\n+#define ICE_CTL_Q_DETAILS(R, i) (&(((struct ice_sq_cd *)((R).cmd_buf))[i]))\n+\n+/* rq event information */\n+struct ice_rq_event_info {\n+\tstruct ice_aq_desc desc;\n+\tu16 msg_len;\n+\tu16 buf_len;\n+\tu8 *msg_buf;\n+};\n+\n+/* Control Queue information */\n+struct ice_ctl_q_info {\n+\tenum ice_ctl_q qtype;\n+\tstruct ice_ctl_q_ring rq;\t/* receive queue */\n+\tstruct ice_ctl_q_ring sq;\t/* send queue */\n+\tu32 sq_cmd_timeout;\t\t/* send queue cmd write back timeout */\n+\tu16 num_rq_entries;\t\t/* receive queue depth */\n+\tu16 num_sq_entries;\t\t/* send queue depth */\n+\tu16 rq_buf_size;\t\t/* receive queue buffer size */\n+\tu16 sq_buf_size;\t\t/* send queue buffer size */\n+\tstruct ice_lock sq_lock;\t\t/* Send queue lock */\n+\tstruct ice_lock rq_lock;\t\t/* Receive queue lock */\n+\tenum ice_aq_err sq_last_status;\t/* last status on send queue */\n+\tenum ice_aq_err rq_last_status;\t/* last status on receive queue */\n+};\n+\n+#endif /* _ICE_CONTROLQ_H_ */\n",
    "prefixes": [
        "v5",
        "06/31"
    ]
}