get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/66436/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 66436,
    "url": "http://patches.dpdk.org/api/patches/66436/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20200309114357.31800-20-qi.z.zhang@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20200309114357.31800-20-qi.z.zhang@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20200309114357.31800-20-qi.z.zhang@intel.com",
    "date": "2020-03-09T11:43:48",
    "name": "[19/28] net/ice/base: xtract logic of flat NVM read to function",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "5f360eb086175ed7ffbc2807a5de109153e3a106",
    "submitter": {
        "id": 504,
        "url": "http://patches.dpdk.org/api/people/504/?format=api",
        "name": "Qi Zhang",
        "email": "qi.z.zhang@intel.com"
    },
    "delegate": {
        "id": 31221,
        "url": "http://patches.dpdk.org/api/users/31221/?format=api",
        "username": "yexl",
        "first_name": "xiaolong",
        "last_name": "ye",
        "email": "xiaolong.ye@intel.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20200309114357.31800-20-qi.z.zhang@intel.com/mbox/",
    "series": [
        {
            "id": 8843,
            "url": "http://patches.dpdk.org/api/series/8843/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=8843",
            "date": "2020-03-09T11:43:29",
            "name": "update ice base code",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/8843/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/66436/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/66436/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 72751A052E;\n\tMon,  9 Mar 2020 12:43:38 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 1119E1C18F;\n\tMon,  9 Mar 2020 12:41:12 +0100 (CET)",
            "from mga09.intel.com (mga09.intel.com [134.134.136.24])\n by dpdk.org (Postfix) with ESMTP id 820571C0D1\n for <dev@dpdk.org>; Mon,  9 Mar 2020 12:41:08 +0100 (CET)",
            "from fmsmga002.fm.intel.com ([10.253.24.26])\n by orsmga102.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n 09 Mar 2020 04:41:08 -0700",
            "from dpdk51.sh.intel.com ([10.67.110.245])\n by fmsmga002.fm.intel.com with ESMTP; 09 Mar 2020 04:41:06 -0700"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.70,533,1574150400\"; d=\"scan'208\";a=\"276483617\"",
        "From": "Qi Zhang <qi.z.zhang@intel.com>",
        "To": "qiming.yang@intel.com,\n\tbeilei.xing@intel.com",
        "Cc": "xiaolong.ye@intel.com, dev@dpdk.org, Qi Zhang <qi.z.zhang@intel.com>,\n Jacob Keller <jacob.e.keller@intel.com>,\n Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>",
        "Date": "Mon,  9 Mar 2020 19:43:48 +0800",
        "Message-Id": "<20200309114357.31800-20-qi.z.zhang@intel.com>",
        "X-Mailer": "git-send-email 2.13.6",
        "In-Reply-To": "<20200309114357.31800-1-qi.z.zhang@intel.com>",
        "References": "<20200309114357.31800-1-qi.z.zhang@intel.com>",
        "Subject": "[dpdk-dev] [PATCH 19/28] net/ice/base: xtract logic of flat NVM\n\tread to function",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "The ice_read_sr_buf_aq function implements logic to correctly break\napart NVM reads into 4Kb chunks. Additionally, it ensures that each read\nnever crosses a Shadow RAM sector boundary. This logic is useful when\nreading the flat NVM as a byte-addressable stream.\n\nExtract that logic in terms of bytes and implement it as\nice_read_flat_nvm. Use this new function to implement ice_read_sr_buf_aq\nfunction.\n\nSigned-off-by: Jacob Keller <jacob.e.keller@intel.com>\nSigned-off-by: Paul M Stillwell Jr <paul.m.stillwell.jr@intel.com>\nSigned-off-by: Qi Zhang <qi.z.zhang@intel.com>\n---\n drivers/net/ice/base/ice_nvm.c | 114 ++++++++++++++++++++++++++++-------------\n drivers/net/ice/base/ice_nvm.h |   3 ++\n 2 files changed, 81 insertions(+), 36 deletions(-)",
    "diff": "diff --git a/drivers/net/ice/base/ice_nvm.c b/drivers/net/ice/base/ice_nvm.c\nindex 5dd702db3..80420afd3 100644\n--- a/drivers/net/ice/base/ice_nvm.c\n+++ b/drivers/net/ice/base/ice_nvm.c\n@@ -50,6 +50,74 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,\n }\n \n /**\n+ * ice_read_flat_nvm - Read portion of NVM by flat offset\n+ * @hw: pointer to the HW struct\n+ * @offset: offset from beginning of NVM\n+ * @length: (in) number of bytes to read; (out) number of bytes actually read\n+ * @data: buffer to return data in (sized to fit the specified length)\n+ * @read_shadow_ram: if true, read from shadow RAM instead of NVM\n+ *\n+ * Reads a portion of the NVM, as a flat memory space. This function correctly\n+ * breaks read requests across Shadow RAM sectors and ensures that no single\n+ * read request exceeds the maximum 4Kb read for a single AdminQ command.\n+ *\n+ * Returns a status code on failure. Note that the data pointer may be\n+ * partially updated if some reads succeed before a failure.\n+ */\n+enum ice_status\n+ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,\n+\t\t  bool read_shadow_ram)\n+{\n+\tenum ice_status status;\n+\tu32 inlen = *length;\n+\tu32 bytes_read = 0;\n+\tbool last_cmd;\n+\n+\tice_debug(hw, ICE_DBG_TRACE, \"%s\\n\", __func__);\n+\n+\t*length = 0;\n+\n+\t/* Verify the length of the read if this is for the Shadow RAM */\n+\tif (read_shadow_ram && ((offset + inlen) > (hw->nvm.sr_words * 2u))) {\n+\t\tice_debug(hw, ICE_DBG_NVM,\n+\t\t\t  \"NVM error: requested data is beyond Shadow RAM limit\\n\");\n+\t\treturn ICE_ERR_PARAM;\n+\t}\n+\n+\tdo {\n+\t\tu32 read_size, sector_offset;\n+\n+\t\t/* ice_aq_read_nvm cannot read more than 4Kb at a time.\n+\t\t * Additionally, a read from the Shadow RAM may not cross over\n+\t\t * a sector boundary. Conveniently, the sector size is also\n+\t\t * 4Kb.\n+\t\t */\n+\t\tsector_offset = offset % ICE_AQ_MAX_BUF_LEN;\n+\t\tread_size = MIN_T(u32, ICE_AQ_MAX_BUF_LEN - sector_offset,\n+\t\t\t\t  inlen - bytes_read);\n+\n+\t\tlast_cmd = !(bytes_read + read_size < inlen);\n+\n+\t\t/* ice_aq_read_nvm takes the length as a u16. Our read_size is\n+\t\t * calculated using a u32, but the ICE_AQ_MAX_BUF_LEN maximum\n+\t\t * size guarantees that it will fit within the 2 bytes.\n+\t\t */\n+\t\tstatus = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT,\n+\t\t\t\t\t offset, (u16)read_size,\n+\t\t\t\t\t data + bytes_read, last_cmd,\n+\t\t\t\t\t read_shadow_ram, NULL);\n+\t\tif (status)\n+\t\t\tbreak;\n+\n+\t\tbytes_read += read_size;\n+\t\toffset += read_size;\n+\t} while (!last_cmd);\n+\n+\t*length = bytes_read;\n+\treturn status;\n+}\n+\n+/**\n  * ice_check_sr_access_params - verify params for Shadow RAM R/W operations.\n  * @hw: pointer to the HW structure\n  * @offset: offset in words from module start\n@@ -144,55 +212,29 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)\n  * @words: (in) number of words to read; (out) number of words actually read\n  * @data: words read from the Shadow RAM\n  *\n- * Reads 16 bit words (data buf) from the SR using the ice_read_sr_aq\n- * method. Ownership of the NVM is taken before reading the buffer and later\n- * released.\n+ * Reads 16 bit words (data buf) from the Shadow RAM. Ownership of the NVM is\n+ * taken before reading the buffer and later released.\n  */\n static enum ice_status\n ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)\n {\n+\tu32 bytes = *words * 2, i;\n \tenum ice_status status;\n-\tbool last_cmd = false;\n-\tu16 words_read = 0;\n-\tu16 i = 0;\n \n \tice_debug(hw, ICE_DBG_TRACE, \"%s\\n\", __func__);\n \n-\tdo {\n-\t\tu16 read_size, off_w;\n-\n-\t\t/* Calculate number of bytes we should read in this step.\n-\t\t * It's not allowed to read more than one page at a time or\n-\t\t * to cross page boundaries.\n-\t\t */\n-\t\toff_w = offset % ICE_SR_SECTOR_SIZE_IN_WORDS;\n-\t\tread_size = off_w ?\n-\t\t\tMIN_T(u16, *words,\n-\t\t\t      (ICE_SR_SECTOR_SIZE_IN_WORDS - off_w)) :\n-\t\t\tMIN_T(u16, (*words - words_read),\n-\t\t\t      ICE_SR_SECTOR_SIZE_IN_WORDS);\n-\n-\t\t/* Check if this is last command, if so set proper flag */\n-\t\tif ((words_read + read_size) >= *words)\n-\t\t\tlast_cmd = true;\n-\n-\t\tstatus = ice_read_sr_aq(hw, offset, read_size,\n-\t\t\t\t\tdata + words_read, last_cmd);\n-\t\tif (status)\n-\t\t\tgoto read_nvm_buf_aq_exit;\n+\t/* ice_read_flat_nvm takes into account the 4Kb AdminQ and Shadow RAM\n+\t * sector restrictions necessary when reading from the NVM.\n+\t */\n+\tstatus = ice_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);\n \n-\t\t/* Increment counter for words already read and move offset to\n-\t\t * new read location\n-\t\t */\n-\t\twords_read += read_size;\n-\t\toffset += read_size;\n-\t} while (words_read < *words);\n+\t/* Report the number of words successfully read */\n+\t*words = bytes / 2;\n \n+\t/* Byte swap the words up to the amount we actually read */\n \tfor (i = 0; i < *words; i++)\n \t\tdata[i] = LE16_TO_CPU(((_FORCE_ __le16 *)data)[i]);\n \n-read_nvm_buf_aq_exit:\n-\t*words = words_read;\n \treturn status;\n }\n \ndiff --git a/drivers/net/ice/base/ice_nvm.h b/drivers/net/ice/base/ice_nvm.h\nindex d5b7b2d19..8dbda8242 100644\n--- a/drivers/net/ice/base/ice_nvm.h\n+++ b/drivers/net/ice/base/ice_nvm.h\n@@ -84,6 +84,9 @@ ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd,\n enum ice_status\n ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,\n \t\t      union ice_nvm_access_data *data);\n+enum ice_status\n+ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,\n+\t\t  bool read_shadow_ram);\n enum ice_status ice_init_nvm(struct ice_hw *hw);\n enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);\n enum ice_status\n",
    "prefixes": [
        "19/28"
    ]
}