get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/41043/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 41043,
    "url": "https://patches.dpdk.org/api/patches/41043/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1528892062-4997-11-git-send-email-tomaszx.jozwiak@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1528892062-4997-11-git-send-email-tomaszx.jozwiak@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1528892062-4997-11-git-send-email-tomaszx.jozwiak@intel.com",
    "date": "2018-06-13T12:13:54",
    "name": "[v3,10/38] crypto/qat: move generic qp fn to qp file",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "a146dbec13aa9db7ab9bb163ba2cd3ec055369c0",
    "submitter": {
        "id": 949,
        "url": "https://patches.dpdk.org/api/people/949/?format=api",
        "name": "Tomasz Jozwiak",
        "email": "tomaszx.jozwiak@intel.com"
    },
    "delegate": {
        "id": 22,
        "url": "https://patches.dpdk.org/api/users/22/?format=api",
        "username": "pdelarag",
        "first_name": "Pablo",
        "last_name": "de Lara Guarch",
        "email": "pablo.de.lara.guarch@intel.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1528892062-4997-11-git-send-email-tomaszx.jozwiak@intel.com/mbox/",
    "series": [
        {
            "id": 111,
            "url": "https://patches.dpdk.org/api/series/111/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=111",
            "date": "2018-06-13T12:13:44",
            "name": "crypto/qat: refactor to support multiple services",
            "version": 3,
            "mbox": "https://patches.dpdk.org/series/111/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/41043/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/41043/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 1D1711EF6A;\n\tWed, 13 Jun 2018 14:14:49 +0200 (CEST)",
            "from mga01.intel.com (mga01.intel.com [192.55.52.88])\n\tby dpdk.org (Postfix) with ESMTP id 1CE1F1EF3C\n\tfor <dev@dpdk.org>; Wed, 13 Jun 2018 14:14:40 +0200 (CEST)",
            "from fmsmga001.fm.intel.com ([10.253.24.23])\n\tby fmsmga101.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t13 Jun 2018 05:14:40 -0700",
            "from tjozwiax-mobl.ger.corp.intel.com (HELO localhost.localdomain)\n\t([10.103.104.42])\n\tby fmsmga001.fm.intel.com with ESMTP; 13 Jun 2018 05:14:39 -0700"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.51,218,1526367600\"; d=\"scan'208\";a=\"63727691\"",
        "From": "Tomasz Jozwiak <tomaszx.jozwiak@intel.com>",
        "To": "fiona.trahe@intel.com,\n\ttomaszx.jozwiak@intel.com,\n\tdev@dpdk.org",
        "Date": "Wed, 13 Jun 2018 14:13:54 +0200",
        "Message-Id": "<1528892062-4997-11-git-send-email-tomaszx.jozwiak@intel.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1528892062-4997-1-git-send-email-tomaszx.jozwiak@intel.com>",
        "References": "<1523040732-3290-1-git-send-email-fiona.trahe@intel.com>\n\t<1528892062-4997-1-git-send-email-tomaszx.jozwiak@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v3 10/38] crypto/qat: move generic qp fn to qp\n\tfile",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Fiona Trahe <fiona.trahe@intel.com>\n\nMove the generic enqueue and dequeue fns from\nthe qat_sym.c file to the qat_qp.c file\nMove generic qp structs to a new qat_qp.h file\n\nSigned-off-by: Fiona Trahe <fiona.trahe@intel.com>\n---\n drivers/crypto/qat/qat_qp.c  | 152 +++++++++++++++++++++++++++++++++++\n drivers/crypto/qat/qat_qp.h  |  63 +++++++++++++++\n drivers/crypto/qat/qat_sym.c | 149 +---------------------------------\n drivers/crypto/qat/qat_sym.h |  49 -----------\n 4 files changed, 216 insertions(+), 197 deletions(-)\n create mode 100644 drivers/crypto/qat/qat_qp.h",
    "diff": "diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c\nindex bae6cf114..56ea10242 100644\n--- a/drivers/crypto/qat/qat_qp.c\n+++ b/drivers/crypto/qat/qat_qp.c\n@@ -13,7 +13,9 @@\n #include <rte_prefetch.h>\n \n #include \"qat_logs.h\"\n+#include \"qat_qp.h\"\n #include \"qat_sym.h\"\n+\n #include \"adf_transport_access_macros.h\"\n \n #define ADF_MAX_SYM_DESC\t\t\t4096\n@@ -450,3 +452,153 @@ static void adf_configure_queues(struct qat_qp *qp)\n \tWRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,\n \t\t\tqueue->hw_queue_number, queue_config);\n }\n+\n+\n+static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)\n+{\n+\tuint32_t div = data >> shift;\n+\tuint32_t mult = div << shift;\n+\n+\treturn data - mult;\n+}\n+\n+static inline void\n+txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {\n+\tWRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,\n+\t\t\tq->hw_queue_number, q->tail);\n+\tq->nb_pending_requests = 0;\n+\tq->csr_tail = q->tail;\n+}\n+\n+static inline\n+void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)\n+{\n+\tuint32_t old_head, new_head;\n+\tuint32_t max_head;\n+\n+\told_head = q->csr_head;\n+\tnew_head = q->head;\n+\tmax_head = qp->nb_descriptors * q->msg_size;\n+\n+\t/* write out free descriptors */\n+\tvoid *cur_desc = (uint8_t *)q->base_addr + old_head;\n+\n+\tif (new_head < old_head) {\n+\t\tmemset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head);\n+\t\tmemset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head);\n+\t} else {\n+\t\tmemset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head);\n+\t}\n+\tq->nb_processed_responses = 0;\n+\tq->csr_head = new_head;\n+\n+\t/* write current head to CSR */\n+\tWRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,\n+\t\t\t    q->hw_queue_number, new_head);\n+}\n+\n+uint16_t\n+qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)\n+{\n+\tregister struct qat_queue *queue;\n+\tstruct qat_qp *tmp_qp = (struct qat_qp *)qp;\n+\tregister uint32_t nb_ops_sent = 0;\n+\tregister int ret;\n+\tuint16_t nb_ops_possible = nb_ops;\n+\tregister uint8_t *base_addr;\n+\tregister uint32_t tail;\n+\tint overflow;\n+\n+\tif (unlikely(nb_ops == 0))\n+\t\treturn 0;\n+\n+\t/* read params used a lot in main loop into registers */\n+\tqueue = &(tmp_qp->tx_q);\n+\tbase_addr = (uint8_t *)queue->base_addr;\n+\ttail = queue->tail;\n+\n+\t/* Find how many can actually fit on the ring */\n+\ttmp_qp->inflights16 += nb_ops;\n+\toverflow = tmp_qp->inflights16 - queue->max_inflights;\n+\tif (overflow > 0) {\n+\t\ttmp_qp->inflights16 -= overflow;\n+\t\tnb_ops_possible = nb_ops - overflow;\n+\t\tif (nb_ops_possible == 0)\n+\t\t\treturn 0;\n+\t}\n+\n+\twhile (nb_ops_sent != nb_ops_possible) {\n+\t\tret = tmp_qp->build_request(*ops, base_addr + tail,\n+\t\t\t\ttmp_qp->op_cookies[tail / queue->msg_size],\n+\t\t\t\ttmp_qp->qat_dev_gen);\n+\t\tif (ret != 0) {\n+\t\t\ttmp_qp->stats.enqueue_err_count++;\n+\t\t\t/*\n+\t\t\t * This message cannot be enqueued,\n+\t\t\t * decrease number of ops that wasn't sent\n+\t\t\t */\n+\t\t\ttmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent;\n+\t\t\tif (nb_ops_sent == 0)\n+\t\t\t\treturn 0;\n+\t\t\tgoto kick_tail;\n+\t\t}\n+\n+\t\ttail = adf_modulo(tail + queue->msg_size, queue->modulo);\n+\t\tops++;\n+\t\tnb_ops_sent++;\n+\t}\n+kick_tail:\n+\tqueue->tail = tail;\n+\ttmp_qp->stats.enqueued_count += nb_ops_sent;\n+\tqueue->nb_pending_requests += nb_ops_sent;\n+\tif (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH ||\n+\t\t    queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) {\n+\t\ttxq_write_tail(tmp_qp, queue);\n+\t}\n+\treturn nb_ops_sent;\n+}\n+\n+uint16_t\n+qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)\n+{\n+\tstruct qat_queue *rx_queue, *tx_queue;\n+\tstruct qat_qp *tmp_qp = (struct qat_qp *)qp;\n+\tuint32_t head;\n+\tuint32_t resp_counter = 0;\n+\tuint8_t *resp_msg;\n+\n+\trx_queue = &(tmp_qp->rx_q);\n+\ttx_queue = &(tmp_qp->tx_q);\n+\thead = rx_queue->head;\n+\tresp_msg = (uint8_t *)rx_queue->base_addr + rx_queue->head;\n+\n+\twhile (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&\n+\t\t\tresp_counter != nb_ops) {\n+\n+\t\ttmp_qp->process_response(ops, resp_msg,\n+\t\t\ttmp_qp->op_cookies[head / rx_queue->msg_size],\n+\t\t\ttmp_qp->qat_dev_gen);\n+\n+\t\thead = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo);\n+\n+\t\tresp_msg = (uint8_t *)rx_queue->base_addr + head;\n+\t\tops++;\n+\t\tresp_counter++;\n+\t}\n+\tif (resp_counter > 0) {\n+\t\trx_queue->head = head;\n+\t\ttmp_qp->stats.dequeued_count += resp_counter;\n+\t\trx_queue->nb_processed_responses += resp_counter;\n+\t\ttmp_qp->inflights16 -= resp_counter;\n+\n+\t\tif (rx_queue->nb_processed_responses >\n+\t\t\t\t\t\tQAT_CSR_HEAD_WRITE_THRESH)\n+\t\t\trxq_free_desc(tmp_qp, rx_queue);\n+\t}\n+\t/* also check if tail needs to be advanced */\n+\tif (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&\n+\t\ttx_queue->tail != tx_queue->csr_tail) {\n+\t\ttxq_write_tail(tmp_qp, tx_queue);\n+\t}\n+\treturn resp_counter;\n+}\ndiff --git a/drivers/crypto/qat/qat_qp.h b/drivers/crypto/qat/qat_qp.h\nnew file mode 100644\nindex 000000000..87d55c5f2\n--- /dev/null\n+++ b/drivers/crypto/qat/qat_qp.h\n@@ -0,0 +1,63 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2018 Intel Corporation\n+ */\n+#ifndef _QAT_QP_H_\n+#define _QAT_QP_H_\n+\n+#include \"qat_common.h\"\n+\n+typedef int (*build_request_t)(void *op,\n+\t\tuint8_t *req, void *op_cookie,\n+\t\tenum qat_device_gen qat_dev_gen);\n+/**< Build a request from an op. */\n+\n+typedef int (*process_response_t)(void **ops,\n+\t\tuint8_t *resp, void *op_cookie,\n+\t\tenum qat_device_gen qat_dev_gen);\n+/**< Process a response descriptor and return the associated op. */\n+\n+/**\n+ * Structure associated with each queue.\n+ */\n+struct qat_queue {\n+\tchar\t\tmemz_name[RTE_MEMZONE_NAMESIZE];\n+\tvoid\t\t*base_addr;\t\t/* Base address */\n+\trte_iova_t\tbase_phys_addr;\t\t/* Queue physical address */\n+\tuint32_t\thead;\t\t\t/* Shadow copy of the head */\n+\tuint32_t\ttail;\t\t\t/* Shadow copy of the tail */\n+\tuint32_t\tmodulo;\n+\tuint32_t\tmsg_size;\n+\tuint16_t\tmax_inflights;\n+\tuint32_t\tqueue_size;\n+\tuint8_t\t\thw_bundle_number;\n+\tuint8_t\t\thw_queue_number;\n+\t/* HW queue aka ring offset on bundle */\n+\tuint32_t\tcsr_head;\t\t/* last written head value */\n+\tuint32_t\tcsr_tail;\t\t/* last written tail value */\n+\tuint16_t\tnb_processed_responses;\n+\t/* number of responses processed since last CSR head write */\n+\tuint16_t\tnb_pending_requests;\n+\t/* number of requests pending since last CSR tail write */\n+};\n+\n+struct qat_qp {\n+\tvoid\t\t\t*mmap_bar_addr;\n+\tuint16_t\t\tinflights16;\n+\tstruct\tqat_queue\ttx_q;\n+\tstruct\tqat_queue\trx_q;\n+\tstruct\trte_cryptodev_stats stats;\n+\tstruct rte_mempool *op_cookie_pool;\n+\tvoid **op_cookies;\n+\tuint32_t nb_descriptors;\n+\tenum qat_device_gen qat_dev_gen;\n+\tbuild_request_t build_request;\n+\tprocess_response_t process_response;\n+} __rte_cache_aligned;\n+\n+uint16_t\n+qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);\n+\n+uint16_t\n+qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);\n+\n+#endif /* _QAT_QP_H_ */\ndiff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c\nindex 2bae913a1..ab8ce2c96 100644\n--- a/drivers/crypto/qat/qat_sym.c\n+++ b/drivers/crypto/qat/qat_sym.c\n@@ -14,6 +14,7 @@\n #include \"qat_logs.h\"\n #include \"qat_sym_session.h\"\n #include \"qat_sym.h\"\n+#include \"qat_qp.h\"\n #include \"adf_transport_access_macros.h\"\n \n #define BYTE_LENGTH    8\n@@ -83,8 +84,6 @@ bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,\n /** Creates a context in either AES or DES in ECB mode\n  *  Depends on openssl libcrypto\n  */\n-static inline uint32_t\n-adf_modulo(uint32_t data, uint32_t shift);\n \n static inline uint32_t\n qat_bpicipher_preprocess(struct qat_sym_session *ctx,\n@@ -197,102 +196,6 @@ qat_bpicipher_postprocess(struct qat_sym_session *ctx,\n \treturn sym_op->cipher.data.length - last_block_len;\n }\n \n-static inline void\n-txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {\n-\tWRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,\n-\t\t\tq->hw_queue_number, q->tail);\n-\tq->nb_pending_requests = 0;\n-\tq->csr_tail = q->tail;\n-}\n-\n-static uint16_t\n-qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)\n-{\n-\tregister struct qat_queue *queue;\n-\tstruct qat_qp *tmp_qp = (struct qat_qp *)qp;\n-\tregister uint32_t nb_ops_sent = 0;\n-\tregister int ret;\n-\tuint16_t nb_ops_possible = nb_ops;\n-\tregister uint8_t *base_addr;\n-\tregister uint32_t tail;\n-\tint overflow;\n-\n-\tif (unlikely(nb_ops == 0))\n-\t\treturn 0;\n-\n-\t/* read params used a lot in main loop into registers */\n-\tqueue = &(tmp_qp->tx_q);\n-\tbase_addr = (uint8_t *)queue->base_addr;\n-\ttail = queue->tail;\n-\n-\t/* Find how many can actually fit on the ring */\n-\ttmp_qp->inflights16 += nb_ops;\n-\toverflow = tmp_qp->inflights16 - queue->max_inflights;\n-\tif (overflow > 0) {\n-\t\ttmp_qp->inflights16 -= overflow;\n-\t\tnb_ops_possible = nb_ops - overflow;\n-\t\tif (nb_ops_possible == 0)\n-\t\t\treturn 0;\n-\t}\n-\n-\twhile (nb_ops_sent != nb_ops_possible) {\n-\t\tret = tmp_qp->build_request(*ops, base_addr + tail,\n-\t\t\t\ttmp_qp->op_cookies[tail / queue->msg_size],\n-\t\t\t\ttmp_qp->qat_dev_gen);\n-\t\tif (ret != 0) {\n-\t\t\ttmp_qp->stats.enqueue_err_count++;\n-\t\t\t/*\n-\t\t\t * This message cannot be enqueued,\n-\t\t\t * decrease number of ops that wasn't sent\n-\t\t\t */\n-\t\t\ttmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent;\n-\t\t\tif (nb_ops_sent == 0)\n-\t\t\t\treturn 0;\n-\t\t\tgoto kick_tail;\n-\t\t}\n-\n-\t\ttail = adf_modulo(tail + queue->msg_size, queue->modulo);\n-\t\tops++;\n-\t\tnb_ops_sent++;\n-\t}\n-kick_tail:\n-\tqueue->tail = tail;\n-\ttmp_qp->stats.enqueued_count += nb_ops_sent;\n-\tqueue->nb_pending_requests += nb_ops_sent;\n-\tif (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH ||\n-\t\t\tqueue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) {\n-\t\ttxq_write_tail(tmp_qp, queue);\n-\t}\n-\treturn nb_ops_sent;\n-}\n-\n-static inline\n-void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)\n-{\n-\tuint32_t old_head, new_head;\n-\tuint32_t max_head;\n-\n-\told_head = q->csr_head;\n-\tnew_head = q->head;\n-\tmax_head = qp->nb_descriptors * q->msg_size;\n-\n-\t/* write out free descriptors */\n-\tvoid *cur_desc = (uint8_t *)q->base_addr + old_head;\n-\n-\tif (new_head < old_head) {\n-\t\tmemset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head);\n-\t\tmemset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head);\n-\t} else {\n-\t\tmemset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head);\n-\t}\n-\tq->nb_processed_responses = 0;\n-\tq->csr_head = new_head;\n-\n-\t/* write current head to CSR */\n-\tWRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,\n-\t\t\t    q->hw_queue_number, new_head);\n-}\n-\n uint16_t\n qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,\n \t\tuint16_t nb_ops)\n@@ -336,49 +239,6 @@ qat_sym_process_response(void **op, uint8_t *resp,\n \treturn 0;\n }\n \n-static uint16_t\n-qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)\n-{\n-\tstruct qat_queue *rx_queue, *tx_queue;\n-\tstruct qat_qp *tmp_qp = (struct qat_qp *)qp;\n-\tuint32_t head;\n-\tuint32_t resp_counter = 0;\n-\tuint8_t *resp_msg;\n-\n-\trx_queue = &(tmp_qp->rx_q);\n-\ttx_queue = &(tmp_qp->tx_q);\n-\thead = rx_queue->head;\n-\tresp_msg = (uint8_t *)rx_queue->base_addr + rx_queue->head;\n-\n-\twhile (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&\n-\t\t\tresp_counter != nb_ops) {\n-\n-\t\ttmp_qp->process_response(ops, resp_msg,\n-\t\t\ttmp_qp->op_cookies[head / rx_queue->msg_size],\n-\t\t\ttmp_qp->qat_dev_gen);\n-\n-\t\thead = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo);\n-\n-\t\tresp_msg = (uint8_t *)rx_queue->base_addr + head;\n-\t\tops++;\n-\t\tresp_counter++;\n-\t}\n-\tif (resp_counter > 0) {\n-\t\trx_queue->head = head;\n-\t\ttmp_qp->stats.dequeued_count += resp_counter;\n-\t\trx_queue->nb_processed_responses += resp_counter;\n-\t\ttmp_qp->inflights16 -= resp_counter;\n-\n-\t\tif (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)\n-\t\t\trxq_free_desc(tmp_qp, rx_queue);\n-\t}\n-\t/* also check if tail needs to be advanced */\n-\tif (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&\n-\t\ttx_queue->tail != tx_queue->csr_tail) {\n-\t\ttxq_write_tail(tmp_qp, tx_queue);\n-\t}\n-\treturn resp_counter;\n-}\n \n uint16_t\n qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,\n@@ -903,13 +763,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,\n \treturn 0;\n }\n \n-static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)\n-{\n-\tuint32_t div = data >> shift;\n-\tuint32_t mult = div << shift;\n-\n-\treturn data - mult;\n-}\n \n void qat_sym_stats_get(struct rte_cryptodev *dev,\n \t\tstruct rte_cryptodev_stats *stats)\ndiff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h\nindex 279d3a3ae..39574eeb6 100644\n--- a/drivers/crypto/qat/qat_sym.h\n+++ b/drivers/crypto/qat/qat_sym.h\n@@ -27,57 +27,8 @@\n #define QAT_CSR_TAIL_FORCE_WRITE_THRESH 256U\n /* number of inflights below which no tail write coalescing should occur */\n \n-typedef int (*build_request_t)(void *op,\n-\t\tuint8_t *req, void *op_cookie,\n-\t\tenum qat_device_gen qat_dev_gen);\n-/**< Build a request from an op. */\n-\n-typedef int (*process_response_t)(void **ops,\n-\t\tuint8_t *resp, void *op_cookie,\n-\t\tenum qat_device_gen qat_dev_gen);\n-/**< Process a response descriptor and return the associated op. */\n-\n struct qat_sym_session;\n \n-/**\n- * Structure associated with each queue.\n- */\n-struct qat_queue {\n-\tchar\t\tmemz_name[RTE_MEMZONE_NAMESIZE];\n-\tvoid\t\t*base_addr;\t\t/* Base address */\n-\trte_iova_t\tbase_phys_addr;\t\t/* Queue physical address */\n-\tuint32_t\thead;\t\t\t/* Shadow copy of the head */\n-\tuint32_t\ttail;\t\t\t/* Shadow copy of the tail */\n-\tuint32_t\tmodulo;\n-\tuint32_t\tmsg_size;\n-\tuint16_t\tmax_inflights;\n-\tuint32_t\tqueue_size;\n-\tuint8_t\t\thw_bundle_number;\n-\tuint8_t\t\thw_queue_number;\n-\t/* HW queue aka ring offset on bundle */\n-\tuint32_t\tcsr_head;\t\t/* last written head value */\n-\tuint32_t\tcsr_tail;\t\t/* last written tail value */\n-\tuint16_t\tnb_processed_responses;\n-\t/* number of responses processed since last CSR head write */\n-\tuint16_t\tnb_pending_requests;\n-\t/* number of requests pending since last CSR tail write */\n-};\n-\n-struct qat_qp {\n-\tvoid\t\t\t*mmap_bar_addr;\n-\tuint16_t\t\tinflights16;\n-\tstruct\tqat_queue\ttx_q;\n-\tstruct\tqat_queue\trx_q;\n-\tstruct\trte_cryptodev_stats stats;\n-\tstruct rte_mempool *op_cookie_pool;\n-\tvoid **op_cookies;\n-\tuint32_t nb_descriptors;\n-\tenum qat_device_gen qat_dev_gen;\n-\tbuild_request_t build_request;\n-\tprocess_response_t process_response;\n-} __rte_cache_aligned;\n-\n-\n int\n qat_sym_build_request(void *in_op, uint8_t *out_msg,\n \t\tvoid *op_cookie, enum qat_device_gen qat_dev_gen);\n",
    "prefixes": [
        "v3",
        "10/38"
    ]
}