get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/43024/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 43024,
    "url": "http://patches.dpdk.org/api/patches/43024/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20180713022825.33106-2-pablo.de.lara.guarch@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20180713022825.33106-2-pablo.de.lara.guarch@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20180713022825.33106-2-pablo.de.lara.guarch@intel.com",
    "date": "2018-07-13T02:28:10",
    "name": "[v7,01/16] common/qat: updated firmware headers",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "60788d380a2cf2308bf004a9df426cbd3d94ccda",
    "submitter": {
        "id": 9,
        "url": "http://patches.dpdk.org/api/people/9/?format=api",
        "name": "De Lara Guarch, Pablo",
        "email": "pablo.de.lara.guarch@intel.com"
    },
    "delegate": {
        "id": 22,
        "url": "http://patches.dpdk.org/api/users/22/?format=api",
        "username": "pdelarag",
        "first_name": "Pablo",
        "last_name": "de Lara Guarch",
        "email": "pablo.de.lara.guarch@intel.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20180713022825.33106-2-pablo.de.lara.guarch@intel.com/mbox/",
    "series": [
        {
            "id": 561,
            "url": "http://patches.dpdk.org/api/series/561/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=561",
            "date": "2018-07-13T02:28:09",
            "name": "compress/qat: add compression PMD",
            "version": 7,
            "mbox": "http://patches.dpdk.org/series/561/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/43024/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/43024/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 77AF34F90;\n\tFri, 13 Jul 2018 12:34:13 +0200 (CEST)",
            "from mga02.intel.com (mga02.intel.com [134.134.136.20])\n\tby dpdk.org (Postfix) with ESMTP id 7FAE12BEB\n\tfor <dev@dpdk.org>; Fri, 13 Jul 2018 12:34:06 +0200 (CEST)",
            "from orsmga004.jf.intel.com ([10.7.209.38])\n\tby orsmga101.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t13 Jul 2018 03:34:06 -0700",
            "from silpixa00399466.ir.intel.com (HELO\n\tsilpixa00399466.ger.corp.intel.com) ([10.237.223.220])\n\tby orsmga004.jf.intel.com with ESMTP; 13 Jul 2018 03:34:04 -0700"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.51,347,1526367600\"; d=\"scan'208\";a=\"215718163\"",
        "From": "Pablo de Lara <pablo.de.lara.guarch@intel.com>",
        "To": "fiona.trahe@intel.com, tomaszx.jozwiak@intel.com, john.griffin@intel.com,\n\tdeepak.k.jain@intel.com",
        "Cc": "dev@dpdk.org",
        "Date": "Fri, 13 Jul 2018 03:28:10 +0100",
        "Message-Id": "<20180713022825.33106-2-pablo.de.lara.guarch@intel.com>",
        "X-Mailer": "git-send-email 2.14.4",
        "In-Reply-To": "<20180713022825.33106-1-pablo.de.lara.guarch@intel.com>",
        "References": "<1531411499-13156-1-git-send-email-fiona.trahe@intel.com>\n\t<20180713022825.33106-1-pablo.de.lara.guarch@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v7 01/16] common/qat: updated firmware headers",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Fiona Trahe <fiona.trahe@intel.com>\n\nUpdated to latest firmware headers files for QuickAssist devices.\nIncludes updates for symmetric crypto, PKE and Compression services.\n\nSigned-off-by: Fiona Trahe <fiona.trahe@intel.com>\n---\n drivers/common/qat/qat_adf/icp_qat_fw.h      |  69 +++-\n drivers/common/qat/qat_adf/icp_qat_fw_comp.h | 482 +++++++++++++++++++++++++++\n drivers/common/qat/qat_adf/icp_qat_hw.h      | 130 +++++++-\n 3 files changed, 654 insertions(+), 27 deletions(-)\n create mode 100644 drivers/common/qat/qat_adf/icp_qat_fw_comp.h",
    "diff": "diff --git a/drivers/common/qat/qat_adf/icp_qat_fw.h b/drivers/common/qat/qat_adf/icp_qat_fw.h\nindex ae39b7f11..8f7cb37b4 100644\n--- a/drivers/common/qat/qat_adf/icp_qat_fw.h\n+++ b/drivers/common/qat/qat_adf/icp_qat_fw.h\n@@ -117,6 +117,10 @@ struct icp_qat_fw_comn_resp {\n #define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7\n #define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1\n #define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F\n+#define ICP_QAT_FW_COMN_CNV_FLAG_BITPOS 6\n+#define ICP_QAT_FW_COMN_CNV_FLAG_MASK 0x1\n+#define ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS 5\n+#define ICP_QAT_FW_COMN_CNVNR_FLAG_MASK 0x1\n \n #define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \\\n \ticp_qat_fw_comn_req_hdr_t.service_type\n@@ -133,6 +137,16 @@ struct icp_qat_fw_comn_resp {\n #define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \\\n \tICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)\n \n+#define ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_GET(hdr_flags) \\\n+\tQAT_FIELD_GET(hdr_flags, \\\n+\t\tICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS, \\\n+\t\tICP_QAT_FW_COMN_CNVNR_FLAG_MASK)\n+\n+#define ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(hdr_flags) \\\n+\tQAT_FIELD_GET(hdr_flags, \\\n+\t\tICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \\\n+\t\tICP_QAT_FW_COMN_CNV_FLAG_MASK)\n+\n #define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \\\n \tICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val)\n \n@@ -204,29 +218,44 @@ struct icp_qat_fw_comn_resp {\n \t& ICP_QAT_FW_COMN_NEXT_ID_MASK) | \\\n \t((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); }\n \n+#define ICP_QAT_FW_COMN_NEXT_ID_SET_2(next_curr_id, val)                       \\\n+\tdo {                                                                   \\\n+\t\t(next_curr_id) =                                               \\\n+\t\t    (((next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK) |         \\\n+\t\t     (((val) << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) &              \\\n+\t\t      ICP_QAT_FW_COMN_NEXT_ID_MASK))                           \\\n+\t} while (0)\n+\n+#define ICP_QAT_FW_COMN_CURR_ID_SET_2(next_curr_id, val)                       \\\n+\tdo {                                                                   \\\n+\t\t(next_curr_id) =                                               \\\n+\t\t    (((next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) |         \\\n+\t\t     ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK))                   \\\n+\t} while (0)\n+\n #define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7\n #define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1\n+#define QAT_COMN_RESP_PKE_STATUS_BITPOS 6\n+#define QAT_COMN_RESP_PKE_STATUS_MASK 0x1\n #define QAT_COMN_RESP_CMP_STATUS_BITPOS 5\n #define QAT_COMN_RESP_CMP_STATUS_MASK 0x1\n #define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4\n #define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1\n #define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3\n #define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1\n-\n-#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \\\n-\t((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \\\n-\tQAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \\\n-\t(((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \\\n-\tQAT_COMN_RESP_CMP_STATUS_BITPOS) | \\\n-\t(((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \\\n-\tQAT_COMN_RESP_XLAT_STATUS_BITPOS) | \\\n-\t(((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \\\n-\tQAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS))\n+#define QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS 2\n+#define QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK 0x1\n+#define QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS 0\n+#define QAT_COMN_RESP_XLT_WA_APPLIED_MASK 0x1\n \n #define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \\\n \tQAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \\\n \tQAT_COMN_RESP_CRYPTO_STATUS_MASK)\n \n+#define ICP_QAT_FW_COMN_RESP_PKE_STAT_GET(status) \\\n+\tQAT_FIELD_GET(status, QAT_COMN_RESP_PKE_STATUS_BITPOS, \\\n+\tQAT_COMN_RESP_PKE_STATUS_MASK)\n+\n #define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \\\n \tQAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \\\n \tQAT_COMN_RESP_CMP_STATUS_MASK)\n@@ -235,10 +264,18 @@ struct icp_qat_fw_comn_resp {\n \tQAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \\\n \tQAT_COMN_RESP_XLAT_STATUS_MASK)\n \n+#define ICP_QAT_FW_COMN_RESP_XLT_WA_APPLIED_GET(status) \\\n+\tQAT_FIELD_GET(status, QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS, \\\n+\tQAT_COMN_RESP_XLT_WA_APPLIED_MASK)\n+\n #define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \\\n \tQAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \\\n \tQAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK)\n \n+#define ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(status) \\\n+\tQAT_FIELD_GET(status, QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS, \\\n+\tQAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK)\n+\n #define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0\n #define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1\n #define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0\n@@ -257,8 +294,16 @@ struct icp_qat_fw_comn_resp {\n #define ERR_CODE_OVERFLOW_ERROR -11\n #define ERR_CODE_SOFT_ERROR -12\n #define ERR_CODE_FATAL_ERROR -13\n-#define ERR_CODE_SSM_ERROR -14\n-#define ERR_CODE_ENDPOINT_ERROR -15\n+#define ERR_CODE_COMP_OUTPUT_CORRUPTION -14\n+#define ERR_CODE_HW_INCOMPLETE_FILE -15\n+#define ERR_CODE_SSM_ERROR -16\n+#define ERR_CODE_ENDPOINT_ERROR -17\n+#define ERR_CODE_CNV_ERROR -18\n+#define ERR_CODE_EMPTY_DYM_BLOCK -19\n+#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_INVALID_HANDLE -20\n+#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_HMAC_FAILED -21\n+#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_INVALID_WRAPPING_ALGO -22\n+#define ERR_CODE_KPT_DRNG_SEED_NOT_LOAD -23\n \n enum icp_qat_fw_slice {\n \tICP_QAT_FW_SLICE_NULL = 0,\ndiff --git a/drivers/common/qat/qat_adf/icp_qat_fw_comp.h b/drivers/common/qat/qat_adf/icp_qat_fw_comp.h\nnew file mode 100644\nindex 000000000..813817720\n--- /dev/null\n+++ b/drivers/common/qat/qat_adf/icp_qat_fw_comp.h\n@@ -0,0 +1,482 @@\n+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)\n+ * Copyright(c) 2015-2018 Intel Corporation\n+ */\n+#ifndef _ICP_QAT_FW_COMP_H_\n+#define _ICP_QAT_FW_COMP_H_\n+\n+#include \"icp_qat_fw.h\"\n+\n+enum icp_qat_fw_comp_cmd_id {\n+\tICP_QAT_FW_COMP_CMD_STATIC = 0,\n+\t/*!< Static Compress Request */\n+\n+\tICP_QAT_FW_COMP_CMD_DYNAMIC = 1,\n+\t/*!< Dynamic Compress Request */\n+\n+\tICP_QAT_FW_COMP_CMD_DECOMPRESS = 2,\n+\t/*!< Decompress Request */\n+\n+\tICP_QAT_FW_COMP_CMD_DELIMITER\n+\t/**< Delimiter type */\n+};\n+\n+/**< Flag usage */\n+\n+#define ICP_QAT_FW_COMP_STATELESS_SESSION 0\n+/**< @ingroup icp_qat_fw_comp\n+ *  Flag representing that session is stateless\n+ */\n+\n+#define ICP_QAT_FW_COMP_STATEFUL_SESSION 1\n+/**< @ingroup icp_qat_fw_comp\n+ *  Flag representing that session is stateful\n+ */\n+\n+#define ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST 0\n+/**< @ingroup icp_qat_fw_comp\n+ * Flag representing that autoselectbest is NOT used\n+ */\n+\n+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST 1\n+/**< @ingroup icp_qat_fw_comp\n+ * Flag representing that autoselectbest is used\n+ */\n+\n+#define ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST 0\n+/**< @ingroup icp_qat_fw_comp\n+ * Flag representing that enhanced autoselectbest is NOT used\n+ */\n+\n+#define ICP_QAT_FW_COMP_ENH_AUTO_SELECT_BEST 1\n+/**< @ingroup icp_qat_fw_comp\n+ * Flag representing that enhanced autoselectbest is used\n+ */\n+\n+#define ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 0\n+/**< @ingroup icp_qat_fw_comp\n+ * Flag representing that enhanced autoselectbest is NOT used\n+ */\n+\n+#define ICP_QAT_FW_COMP_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 1\n+/**< @ingroup icp_qat_fw_comp\n+ * Flag representing that enhanced autoselectbest is used\n+ */\n+\n+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF 1\n+/**< @ingroup icp_qat_fw_comp\n+ * Flag representing secure RAM from being used as\n+ * an intermediate buffer is DISABLED.\n+ */\n+\n+#define ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF 0\n+/**< @ingroup icp_qat_fw_comp\n+ * Flag representing secure RAM from being used as\n+ * an intermediate buffer is ENABLED.\n+ */\n+\n+/**< Flag mask & bit position */\n+\n+#define ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS 2\n+/**< @ingroup icp_qat_fw_comp\n+ * Starting bit position for the session type\n+ */\n+\n+#define ICP_QAT_FW_COMP_SESSION_TYPE_MASK 0x1\n+/**< @ingroup icp_qat_fw_comp\n+ * One bit mask used to determine the session type\n+ */\n+\n+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS 3\n+/**< @ingroup icp_qat_fw_comp\n+ * Starting bit position for auto select best\n+ */\n+\n+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK 0x1\n+/**< @ingroup icp_qat_fw_comp\n+ * One bit mask for auto select best\n+ */\n+\n+#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS 4\n+/**< @ingroup icp_qat_fw_comp\n+ * Starting bit position for enhanced auto select best\n+ */\n+\n+#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK 0x1\n+/**< @ingroup icp_qat_fw_comp\n+ * One bit mask for enhanced auto select best\n+ */\n+\n+#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS 5\n+/**< @ingroup icp_qat_fw_comp\n+ * Starting bit position for disabling type zero header write back\n+ * when Enhanced autoselect best is enabled. If set firmware does\n+ * not return type0 store block header, only copies src to dest.\n+ * (if best output is Type0)\n+ */\n+\n+#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK 0x1\n+/**< @ingroup icp_qat_fw_comp\n+ * One bit mask for auto select best\n+ */\n+\n+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS 7\n+/**< @ingroup icp_qat_fw_comp\n+ * Starting bit position for flag used to disable secure ram from\n+ *  being used as an intermediate buffer.\n+ */\n+\n+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK 0x1\n+/**< @ingroup icp_qat_fw_comp\n+ * One bit mask for disable secure ram for use as an intermediate\n+ * buffer.\n+ */\n+\n+#define ICP_QAT_FW_COMP_FLAGS_BUILD(sesstype, autoselect, enhanced_asb,        \\\n+\t\t\t\t    ret_uncomp, secure_ram)                    \\\n+\t((((sesstype)&ICP_QAT_FW_COMP_SESSION_TYPE_MASK)                       \\\n+\t  << ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS) |                            \\\n+\t (((autoselect)&ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK)                 \\\n+\t  << ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS) |                        \\\n+\t (((enhanced_asb)&ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK)      \\\n+\t  << ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS) |               \\\n+\t (((ret_uncomp)&ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK)    \\\n+\t  << ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS) |           \\\n+\t (((secure_ram)&ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK)  \\\n+\t  << ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS))\n+\n+union icp_qat_fw_comp_req_hdr_cd_pars {\n+\t/**< LWs 2-5 */\n+\tstruct {\n+\t\tuint64_t content_desc_addr;\n+\t\t/**< Address of the content descriptor */\n+\n+\t\tuint16_t content_desc_resrvd1;\n+\t\t/**< Content descriptor reserved field */\n+\n+\t\tuint8_t content_desc_params_sz;\n+\t\t/**< Size of the content descriptor parameters in quad words.\n+\t\t * These parameters describe the session setup configuration\n+\t\t * info for the slices that this request relies upon i.e.\n+\t\t * the configuration word and cipher key needed by the cipher\n+\t\t * slice if there is a request for cipher processing.\n+\t\t */\n+\n+\t\tuint8_t content_desc_hdr_resrvd2;\n+\t\t/**< Content descriptor reserved field */\n+\n+\t\tuint32_t content_desc_resrvd3;\n+\t\t/**< Content descriptor reserved field */\n+\t} s;\n+\n+\tstruct {\n+\t\tuint32_t comp_slice_cfg_word[ICP_QAT_FW_NUM_LONGWORDS_2];\n+\t\t/* Compression Slice Config Word */\n+\n+\t\tuint32_t content_desc_resrvd4;\n+\t\t/**< Content descriptor reserved field */\n+\n+\t} sl;\n+\n+};\n+\n+struct icp_qat_fw_comp_req_params {\n+\t/**< LW 14 */\n+\tuint32_t comp_len;\n+\t/**< Size of input to process in bytes Note:  Only EOP requests can be\n+\t * odd for decompression. IA must set LSB to zero for odd sized\n+\t * intermediate inputs\n+\t */\n+\n+\t/**< LW 15 */\n+\tuint32_t out_buffer_sz;\n+\t/**< Size of output buffer in bytes */\n+\n+\t/**< LW 16 */\n+\tuint32_t initial_crc32;\n+\t/**< CRC of previously processed bytes */\n+\n+\t/**< LW 17 */\n+\tuint32_t initial_adler;\n+\t/**< Adler of previously processed bytes */\n+\n+\t/**< LW 18 */\n+\tuint32_t req_par_flags;\n+\n+\t/**< LW 19 */\n+\tuint32_t rsrvd;\n+};\n+\n+#define ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(sop, eop, bfinal, cnv, cnvnr)    \\\n+\t((((sop)&ICP_QAT_FW_COMP_SOP_MASK) << ICP_QAT_FW_COMP_SOP_BITPOS) |    \\\n+\t (((eop)&ICP_QAT_FW_COMP_EOP_MASK) << ICP_QAT_FW_COMP_EOP_BITPOS) |    \\\n+\t (((bfinal)&ICP_QAT_FW_COMP_BFINAL_MASK)                               \\\n+\t  << ICP_QAT_FW_COMP_BFINAL_BITPOS) |                                  \\\n+\t ((cnv & ICP_QAT_FW_COMP_CNV_MASK) << ICP_QAT_FW_COMP_CNV_BITPOS) |    \\\n+\t ((cnvnr & ICP_QAT_FW_COMP_CNV_RECOVERY_MASK)                          \\\n+\t  << ICP_QAT_FW_COMP_CNV_RECOVERY_BITPOS))\n+\n+#define ICP_QAT_FW_COMP_NOT_SOP 0\n+/**< @ingroup icp_qat_fw_comp\n+ * Flag representing that a request is NOT Start of Packet\n+ */\n+\n+#define ICP_QAT_FW_COMP_SOP 1\n+/**< @ingroup icp_qat_fw_comp\n+ * Flag representing that a request IS Start of Packet\n+ */\n+\n+#define ICP_QAT_FW_COMP_NOT_EOP 0\n+/**< @ingroup icp_qat_fw_comp\n+ * Flag representing that a request is NOT Start of Packet\n+ */\n+\n+#define ICP_QAT_FW_COMP_EOP 1\n+/**< @ingroup icp_qat_fw_comp\n+ * Flag representing that a request IS End of Packet\n+ */\n+\n+#define ICP_QAT_FW_COMP_NOT_BFINAL 0\n+/**< @ingroup icp_qat_fw_comp\n+ * Flag representing to indicate firmware this is not the last block\n+ */\n+\n+#define ICP_QAT_FW_COMP_BFINAL 1\n+/**< @ingroup icp_qat_fw_comp\n+ * Flag representing to indicate firmware this is the last block\n+ */\n+\n+#define ICP_QAT_FW_COMP_NO_CNV 0\n+/**< @ingroup icp_qat_fw_comp\n+ * Flag indicating that NO cnv check is to be performed on the request\n+ */\n+\n+#define ICP_QAT_FW_COMP_CNV 1\n+/**< @ingroup icp_qat_fw_comp\n+ * Flag indicating that a cnv check IS to be performed on the request\n+ */\n+\n+#define ICP_QAT_FW_COMP_NO_CNV_RECOVERY 0\n+/**< @ingroup icp_qat_fw_comp\n+ * Flag indicating that NO cnv recovery is to be performed on the request\n+ */\n+\n+#define ICP_QAT_FW_COMP_CNV_RECOVERY 1\n+/**< @ingroup icp_qat_fw_comp\n+ * Flag indicating that a cnv recovery is to be performed on the request\n+ */\n+\n+#define ICP_QAT_FW_COMP_SOP_BITPOS 0\n+/**< @ingroup icp_qat_fw_comp\n+ * Starting bit position for SOP\n+ */\n+\n+#define ICP_QAT_FW_COMP_SOP_MASK 0x1\n+/**< @ingroup icp_qat_fw_comp\n+ *  One bit mask used to determine SOP\n+ */\n+\n+#define ICP_QAT_FW_COMP_EOP_BITPOS 1\n+/**< @ingroup icp_qat_fw_comp\n+ *  Starting bit position for EOP\n+ */\n+\n+#define ICP_QAT_FW_COMP_EOP_MASK 0x1\n+/**< @ingroup icp_qat_fw_comp\n+ *  One bit mask used to determine EOP\n+ */\n+\n+#define ICP_QAT_FW_COMP_BFINAL_MASK 0x1\n+/**< @ingroup icp_qat_fw_comp\n+ *  One bit mask for the bfinal bit\n+ */\n+\n+#define ICP_QAT_FW_COMP_BFINAL_BITPOS 6\n+/**< @ingroup icp_qat_fw_comp\n+ *  Starting bit position for the bfinal bit\n+ */\n+\n+#define ICP_QAT_FW_COMP_CNV_MASK 0x1\n+/**< @ingroup icp_qat_fw_comp\n+ * One bit mask for the CNV bit\n+ */\n+\n+#define ICP_QAT_FW_COMP_CNV_BITPOS 16\n+/**< @ingroup icp_qat_fw_comp\n+ * Starting bit position for the CNV bit\n+ */\n+\n+#define ICP_QAT_FW_COMP_CNV_RECOVERY_MASK 0x1\n+/**< @ingroup icp_qat_fw_comp\n+ * One bit mask for the CNV Recovery bit\n+ */\n+\n+#define ICP_QAT_FW_COMP_CNV_RECOVERY_BITPOS 17\n+/**< @ingroup icp_qat_fw_comp\n+ * Starting bit position for the CNV Recovery bit\n+ */\n+\n+struct icp_qat_fw_xlt_req_params {\n+\t/**< LWs 20-21 */\n+\tuint64_t inter_buff_ptr;\n+\t/**< This field specifies the physical address of an intermediate\n+\t *  buffer SGL array. The array contains a pair of 64-bit\n+\t *  intermediate buffer pointers to SGL buffer descriptors, one pair\n+\t *  per CPM. Please refer to the CPM1.6 Firmware Interface HLD\n+\t *  specification for more details.\n+\t */\n+};\n+\n+\n+struct icp_qat_fw_comp_cd_hdr {\n+\t/**< LW 24 */\n+\tuint16_t ram_bank_flags;\n+\t/**< Flags to show which ram banks to access */\n+\n+\tuint8_t comp_cfg_offset;\n+\t/**< Quad word offset from the content descriptor parameters address\n+\t * to the parameters for the compression processing\n+\t */\n+\n+\tuint8_t next_curr_id;\n+\t/**< This field combines the next and current id (each four bits) -\n+\t * the next id is the most significant nibble.\n+\t * Next Id:  Set to the next slice to pass the compressed data through.\n+\t * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through\n+\t * anymore slices after compression\n+\t * Current Id: Initialised with the compression slice type\n+\t */\n+\n+\t/**< LW 25 */\n+\tuint32_t resrvd;\n+\t/**< LWs 26-27 */\n+\n+\tuint64_t comp_state_addr;\n+\t/**< Pointer to compression state */\n+\n+\t/**< LWs 28-29 */\n+\tuint64_t ram_banks_addr;\n+\t/**< Pointer to banks */\n+\n+};\n+\n+\n+struct icp_qat_fw_xlt_cd_hdr {\n+\t/**< LW 30 */\n+\tuint16_t resrvd1;\n+\t/**< Reserved field and assumed set to 0 */\n+\n+\tuint8_t resrvd2;\n+\t/**< Reserved field and assumed set to 0 */\n+\n+\tuint8_t next_curr_id;\n+\t/**< This field combines the next and current id (each four bits) -\n+\t * the next id is the most significant nibble.\n+\t * Next Id:  Set to the next slice to pass the translated data through.\n+\t * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through\n+\t * any more slices after compression\n+\t * Current Id: Initialised with the translation slice type\n+\t */\n+\n+\t/**< LW 31 */\n+\tuint32_t resrvd3;\n+\t/**< Reserved and should be set to zero, needed for quadword\n+\t * alignment\n+\t */\n+};\n+\n+struct icp_qat_fw_comp_req {\n+\t/**< LWs 0-1 */\n+\tstruct icp_qat_fw_comn_req_hdr comn_hdr;\n+\t/**< Common request header - for Service Command Id,\n+\t * use service-specific Compression Command Id.\n+\t * Service Specific Flags - use Compression Command Flags\n+\t */\n+\n+\t/**< LWs 2-5 */\n+\tunion icp_qat_fw_comp_req_hdr_cd_pars cd_pars;\n+\t/**< Compression service-specific content descriptor field which points\n+\t * either to a content descriptor parameter block or contains the\n+\t * compression slice config word.\n+\t */\n+\n+\t/**< LWs 6-13 */\n+\tstruct icp_qat_fw_comn_req_mid comn_mid;\n+\t/**< Common request middle section */\n+\n+\t/**< LWs 14-19 */\n+\tstruct icp_qat_fw_comp_req_params comp_pars;\n+\t/**< Compression request Parameters block */\n+\n+\t/**< LWs 20-21 */\n+\tunion {\n+\t\tstruct icp_qat_fw_xlt_req_params xlt_pars;\n+\t\t/**< Translation request Parameters block */\n+\t\tuint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_2];\n+\t\t/**< Reserved if not used for translation */\n+\n+\t} u1;\n+\n+\t/**< LWs 22-23 */\n+\tunion {\n+\t\tuint32_t resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2];\n+\t\t/**< Reserved - not used if Batch and Pack is disabled.*/\n+\n+\t\tuint64_t bnp_res_table_addr;\n+\t\t/**< A generic pointer to the unbounded list of\n+\t\t * icp_qat_fw_resp_comp_pars members. This pointer is only\n+\t\t * used when the Batch and Pack is enabled.\n+\t\t */\n+\t} u3;\n+\n+\t/**< LWs 24-29 */\n+\tstruct icp_qat_fw_comp_cd_hdr comp_cd_ctrl;\n+\t/**< Compression request content descriptor control block header */\n+\n+\t/**< LWs 30-31 */\n+\tunion {\n+\t\tstruct icp_qat_fw_xlt_cd_hdr xlt_cd_ctrl;\n+\t\t/**< Translation request content descriptor\n+\t\t * control block header\n+\t\t */\n+\n+\t\tuint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_2];\n+\t\t/**< Reserved if not used for translation */\n+\t} u2;\n+};\n+\n+struct icp_qat_fw_resp_comp_pars {\n+\t/**< LW 4 */\n+\tuint32_t input_byte_counter;\n+\t/**< Input byte counter */\n+\n+\t/**< LW 5 */\n+\tuint32_t output_byte_counter;\n+\t/**< Output byte counter */\n+\n+\t/**< LW 6 & 7*/\n+\tunion {\n+\t\tuint64_t curr_chksum;\n+\t\tstruct {\n+\t\t\t/**< LW 6 */\n+\t\t\tuint32_t curr_crc32;\n+\t\t\t/**< LW 7 */\n+\t\t\tuint32_t curr_adler_32;\n+\t\t};\n+\t};\n+};\n+\n+struct icp_qat_fw_comp_resp {\n+\t/**< LWs 0-1 */\n+\tstruct icp_qat_fw_comn_resp_hdr comn_resp;\n+\t/**< Common interface response format see icp_qat_fw.h */\n+\n+\t/**< LWs 2-3 */\n+\tuint64_t opaque_data;\n+\t/**< Opaque data passed from the request to the response message */\n+\n+\t/**< LWs 4-7 */\n+\tstruct icp_qat_fw_resp_comp_pars comp_resp_pars;\n+\t/**< Common response params (checksums and byte counts) */\n+};\n+\n+#endif\ndiff --git a/drivers/common/qat/qat_adf/icp_qat_hw.h b/drivers/common/qat/qat_adf/icp_qat_hw.h\nindex 56e3cf798..e7961dba2 100644\n--- a/drivers/common/qat/qat_adf/icp_qat_hw.h\n+++ b/drivers/common/qat/qat_adf/icp_qat_hw.h\n@@ -72,19 +72,44 @@ struct icp_qat_hw_auth_config {\n #define QAT_AUTH_ALGO_MASK 0xF\n #define QAT_AUTH_CMP_BITPOS 8\n #define QAT_AUTH_CMP_MASK 0x7F\n-#define QAT_AUTH_SHA3_PADDING_BITPOS 16\n-#define QAT_AUTH_SHA3_PADDING_MASK 0x1\n+#define QAT_AUTH_SHA3_PADDING_DISABLE_BITPOS 16\n+#define QAT_AUTH_SHA3_PADDING_DISABLE_MASK 0x1\n+#define QAT_AUTH_SHA3_PADDING_OVERRIDE_BITPOS 17\n+#define QAT_AUTH_SHA3_PADDING_OVERRIDE_MASK 0x1\n #define QAT_AUTH_ALGO_SHA3_BITPOS 22\n #define QAT_AUTH_ALGO_SHA3_MASK 0x3\n-#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \\\n-\t(((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \\\n-\t((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \\\n-\t(((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \\\n-\t QAT_AUTH_ALGO_SHA3_BITPOS) | \\\n-\t (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \\\n-\t(algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \\\n-\t& QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \\\n-\t((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))\n+#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_BITPOS 16\n+#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_MASK 0xF\n+#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_BITPOS 24\n+#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_MASK 0xFF\n+#define QAT_AUTH_SHA3_HW_PADDING_ENABLE 0\n+#define QAT_AUTH_SHA3_HW_PADDING_DISABLE 1\n+#define QAT_AUTH_SHA3_PADDING_DISABLE_USE_DEFAULT 0\n+#define QAT_AUTH_SHA3_PADDING_OVERRIDE_USE_DEFAULT 0\n+#define QAT_AUTH_SHA3_PADDING_OVERRIDE_PROGRAMMABLE 1\n+#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_RESERVED 0\n+#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_RESERVED 0\n+\n+#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len)                      \\\n+\t((((mode) & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) |             \\\n+\t (((algo) & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) |             \\\n+\t (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK)                              \\\n+\t\t\t<< QAT_AUTH_ALGO_SHA3_BITPOS) |                        \\\n+\t (((QAT_AUTH_SHA3_PADDING_DISABLE_USE_DEFAULT) &                       \\\n+\t\t\tQAT_AUTH_SHA3_PADDING_DISABLE_MASK)                    \\\n+\t\t\t<< QAT_AUTH_SHA3_PADDING_DISABLE_BITPOS) |             \\\n+\t (((QAT_AUTH_SHA3_PADDING_OVERRIDE_USE_DEFAULT) &                      \\\n+\t\t\tQAT_AUTH_SHA3_PADDING_OVERRIDE_MASK)                   \\\n+\t\t\t<< QAT_AUTH_SHA3_PADDING_OVERRIDE_BITPOS) |            \\\n+\t (((cmp_len) & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))\n+\n+#define ICP_QAT_HW_AUTH_CONFIG_BUILD_UPPER                                     \\\n+\t((((QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_RESERVED) &                     \\\n+\t\tQAT_AUTH_SHA3_PROG_PADDING_POSTFIX_MASK)                       \\\n+\t\t<< QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_BITPOS) |                \\\n+\t (((QAT_AUTH_SHA3_PROG_PADDING_PREFIX_RESERVED) &                      \\\n+\t\tQAT_AUTH_SHA3_PROG_PADDING_PREFIX_MASK)                        \\\n+\t\t<< QAT_AUTH_SHA3_PROG_PADDING_PREFIX_BITPOS))\n \n struct icp_qat_hw_auth_counter {\n \tuint32_t counter;\n@@ -107,13 +132,13 @@ struct icp_qat_hw_auth_setup {\n #define ICP_QAT_HW_MD5_STATE1_SZ 16\n #define ICP_QAT_HW_SHA1_STATE1_SZ 20\n #define ICP_QAT_HW_SHA224_STATE1_SZ 32\n+#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28\n #define ICP_QAT_HW_SHA256_STATE1_SZ 32\n #define ICP_QAT_HW_SHA3_256_STATE1_SZ 32\n #define ICP_QAT_HW_SHA384_STATE1_SZ 64\n+#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48\n #define ICP_QAT_HW_SHA512_STATE1_SZ 64\n #define ICP_QAT_HW_SHA3_512_STATE1_SZ 64\n-#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28\n-#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48\n #define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16\n #define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16\n #define ICP_QAT_HW_AES_F9_STATE1_SZ 32\n@@ -121,17 +146,18 @@ struct icp_qat_hw_auth_setup {\n #define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16\n #define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8\n #define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8\n+\n #define ICP_QAT_HW_NULL_STATE2_SZ 32\n #define ICP_QAT_HW_MD5_STATE2_SZ 16\n #define ICP_QAT_HW_SHA1_STATE2_SZ 20\n #define ICP_QAT_HW_SHA224_STATE2_SZ 32\n+#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0\n #define ICP_QAT_HW_SHA256_STATE2_SZ 32\n #define ICP_QAT_HW_SHA3_256_STATE2_SZ 0\n #define ICP_QAT_HW_SHA384_STATE2_SZ 64\n+#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0\n #define ICP_QAT_HW_SHA512_STATE2_SZ 64\n #define ICP_QAT_HW_SHA3_512_STATE2_SZ 0\n-#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0\n-#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0\n #define ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ 48\n #define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16\n #define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16\n@@ -154,6 +180,12 @@ struct icp_qat_hw_auth_sha512 {\n \tuint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ];\n };\n \n+struct icp_qat_hw_auth_sha3_512 {\n+\tstruct icp_qat_hw_auth_setup inner_setup;\n+\tuint8_t state1[ICP_QAT_HW_SHA3_512_STATE1_SZ];\n+\tstruct icp_qat_hw_auth_setup outer_setup;\n+};\n+\n struct icp_qat_hw_auth_algo_blk {\n \tstruct icp_qat_hw_auth_sha512 sha;\n };\n@@ -283,4 +315,72 @@ struct icp_qat_hw_cipher_algo_blk {\n \tuint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ];\n } __rte_cache_aligned;\n \n+/* ========================================================================= */\n+/*                COMPRESSION SLICE                                          */\n+/* ========================================================================= */\n+\n+enum icp_qat_hw_compression_direction {\n+\tICP_QAT_HW_COMPRESSION_DIR_COMPRESS = 0,\n+\tICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS = 1,\n+\tICP_QAT_HW_COMPRESSION_DIR_DELIMITER = 2\n+};\n+\n+enum icp_qat_hw_compression_delayed_match {\n+\tICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED = 0,\n+\tICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED = 1,\n+\tICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DELIMITER = 2\n+};\n+\n+enum icp_qat_hw_compression_algo {\n+\tICP_QAT_HW_COMPRESSION_ALGO_DEFLATE = 0,\n+\tICP_QAT_HW_COMPRESSION_ALGO_LZS = 1,\n+\tICP_QAT_HW_COMPRESSION_ALGO_DELIMITER = 2\n+};\n+\n+\n+enum icp_qat_hw_compression_depth {\n+\tICP_QAT_HW_COMPRESSION_DEPTH_1 = 0,\n+\tICP_QAT_HW_COMPRESSION_DEPTH_4 = 1,\n+\tICP_QAT_HW_COMPRESSION_DEPTH_8 = 2,\n+\tICP_QAT_HW_COMPRESSION_DEPTH_16 = 3,\n+\tICP_QAT_HW_COMPRESSION_DEPTH_DELIMITER = 4\n+};\n+\n+enum icp_qat_hw_compression_file_type {\n+\tICP_QAT_HW_COMPRESSION_FILE_TYPE_0 = 0,\n+\tICP_QAT_HW_COMPRESSION_FILE_TYPE_1 = 1,\n+\tICP_QAT_HW_COMPRESSION_FILE_TYPE_2 = 2,\n+\tICP_QAT_HW_COMPRESSION_FILE_TYPE_3 = 3,\n+\tICP_QAT_HW_COMPRESSION_FILE_TYPE_4 = 4,\n+\tICP_QAT_HW_COMPRESSION_FILE_TYPE_DELIMITER = 5\n+};\n+\n+struct icp_qat_hw_compression_config {\n+\tuint32_t val;\n+\tuint32_t reserved;\n+};\n+\n+#define QAT_COMPRESSION_DIR_BITPOS 4\n+#define QAT_COMPRESSION_DIR_MASK 0x7\n+#define QAT_COMPRESSION_DELAYED_MATCH_BITPOS 16\n+#define QAT_COMPRESSION_DELAYED_MATCH_MASK 0x1\n+#define QAT_COMPRESSION_ALGO_BITPOS 31\n+#define QAT_COMPRESSION_ALGO_MASK 0x1\n+#define QAT_COMPRESSION_DEPTH_BITPOS 28\n+#define QAT_COMPRESSION_DEPTH_MASK 0x7\n+#define QAT_COMPRESSION_FILE_TYPE_BITPOS 24\n+#define QAT_COMPRESSION_FILE_TYPE_MASK 0xF\n+\n+#define ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(                                   \\\n+\tdir, delayed, algo, depth, filetype)                                   \\\n+\t((((dir) & QAT_COMPRESSION_DIR_MASK) << QAT_COMPRESSION_DIR_BITPOS) |  \\\n+\t (((delayed) & QAT_COMPRESSION_DELAYED_MATCH_MASK)                     \\\n+\t  << QAT_COMPRESSION_DELAYED_MATCH_BITPOS) |                           \\\n+\t (((algo) & QAT_COMPRESSION_ALGO_MASK)                                 \\\n+\t  << QAT_COMPRESSION_ALGO_BITPOS) |                                    \\\n+\t (((depth) & QAT_COMPRESSION_DEPTH_MASK)                               \\\n+\t  << QAT_COMPRESSION_DEPTH_BITPOS) |                                   \\\n+\t (((filetype) & QAT_COMPRESSION_FILE_TYPE_MASK)                        \\\n+\t  << QAT_COMPRESSION_FILE_TYPE_BITPOS))\n+\n #endif\n",
    "prefixes": [
        "v7",
        "01/16"
    ]
}