get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/101940/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 101940,
    "url": "https://patches.dpdk.org/api/patches/101940/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1634543500-128-2-git-send-email-anoobj@marvell.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1634543500-128-2-git-send-email-anoobj@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1634543500-128-2-git-send-email-anoobj@marvell.com",
    "date": "2021-10-18T07:51:40",
    "name": "[2/2] crypto/cnxk: rework pending queue",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "000225ccb2838656be0dd42baa73fd23b25a4da5",
    "submitter": {
        "id": 1205,
        "url": "https://patches.dpdk.org/api/people/1205/?format=api",
        "name": "Anoob Joseph",
        "email": "anoobj@marvell.com"
    },
    "delegate": {
        "id": 6690,
        "url": "https://patches.dpdk.org/api/users/6690/?format=api",
        "username": "akhil",
        "first_name": "akhil",
        "last_name": "goyal",
        "email": "gakhil@marvell.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1634543500-128-2-git-send-email-anoobj@marvell.com/mbox/",
    "series": [
        {
            "id": 19721,
            "url": "https://patches.dpdk.org/api/series/19721/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=19721",
            "date": "2021-10-18T07:51:39",
            "name": "[1/2] common/cnxk: align CPT queue depth to power of 2",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/19721/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/101940/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/101940/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id B085FA0C43;\n\tMon, 18 Oct 2021 09:52:00 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 026FC410E4;\n\tMon, 18 Oct 2021 09:52:00 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com\n [67.231.148.174])\n by mails.dpdk.org (Postfix) with ESMTP id 3431E4003C\n for <dev@dpdk.org>; Mon, 18 Oct 2021 09:51:58 +0200 (CEST)",
            "from pps.filterd (m0045849.ppops.net [127.0.0.1])\n by mx0a-0016f401.pphosted.com (8.16.1.2/8.16.1.2) with SMTP id 19I3nOYC024145\n for <dev@dpdk.org>; Mon, 18 Oct 2021 00:51:57 -0700",
            "from dc5-exch02.marvell.com ([199.233.59.182])\n by mx0a-0016f401.pphosted.com with ESMTP id 3bs1bugr64-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Mon, 18 Oct 2021 00:51:57 -0700",
            "from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.18;\n Mon, 18 Oct 2021 00:51:55 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.18 via Frontend\n Transport; Mon, 18 Oct 2021 00:51:55 -0700",
            "from HY-LT1002.marvell.com (HY-LT1002.marvell.com [10.28.176.218])\n by maili.marvell.com (Postfix) with ESMTP id 1E2F55E6864;\n Mon, 18 Oct 2021 00:51:52 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-transfer-encoding : content-type; s=pfpt0220;\n bh=uk0Tdh6Y237QLhMV0KEBkaIFrNZvd+81e7Nr8X6RrwU=;\n b=cNTgXRrr5SYWZRqK3ODzHLx09mjBlGRbz8xslo27ymD64S+fBBT0UUI8JwYb3zCuQUEI\n 6BspxzIxiPVX0pZGYuMhf0KHY8Z7hnip/3W+aRsefTqoIeMpLi+vTXsgH7Bq79+XHZfS\n MA0vEliGZTLNv3JX6QHWN1n/60qR7IYdI/w488uXHQ5tobzcKxoZVmnzywbOweZvNIFn\n 7SnYsBmyuUZ5j6lZ5UJRKe73oLeY90PKDhonT9nr6wyvzOiShdPRykRuUyqCPu/wDtGq\n JsnZiQGNv/v91c0DRWQxSTB6HdgO1CvTcWl1CLk2GOwUazG/gVB8rHhwoo0DQdv009pe hA==",
        "From": "Anoob Joseph <anoobj@marvell.com>",
        "To": "Akhil Goyal <gakhil@marvell.com>, Jerin Jacob <jerinj@marvell.com>",
        "CC": "Anoob Joseph <anoobj@marvell.com>, Archana Muniganti\n <marchana@marvell.com>,\n Tejasree Kondoj <ktejasree@marvell.com>, <dev@dpdk.org>",
        "Date": "Mon, 18 Oct 2021 13:21:40 +0530",
        "Message-ID": "<1634543500-128-2-git-send-email-anoobj@marvell.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1634543500-128-1-git-send-email-anoobj@marvell.com>",
        "References": "<1634543500-128-1-git-send-email-anoobj@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-GUID": "JBJH7XQkd0UFUw-dnfMuEyhStZmdoNyE",
        "X-Proofpoint-ORIG-GUID": "JBJH7XQkd0UFUw-dnfMuEyhStZmdoNyE",
        "X-Proofpoint-Virus-Version": "vendor=baseguard\n engine=ICAP:2.0.182.1,Aquarius:18.0.790,Hydra:6.0.425,FMLib:17.0.607.475\n definitions=2021-10-18_02,2021-10-14_02,2020-04-07_01",
        "Subject": "[dpdk-dev] [PATCH 2/2] crypto/cnxk: rework pending queue",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Rework pending queue to allow producer and consumer cores to be\ndifferent.\n\nSigned-off-by: Anoob Joseph <anoobj@marvell.com>\n---\n doc/guides/cryptodevs/cnxk.rst            |  6 ---\n drivers/crypto/cnxk/cn10k_cryptodev_ops.c | 36 +++++++++++-------\n drivers/crypto/cnxk/cn9k_cryptodev_ops.c  | 63 ++++++++++++++-----------------\n drivers/crypto/cnxk/cnxk_cryptodev_ops.c  | 20 +++++++---\n drivers/crypto/cnxk/cnxk_cryptodev_ops.h  | 37 +++++++++++++++---\n 5 files changed, 97 insertions(+), 65 deletions(-)",
    "diff": "diff --git a/doc/guides/cryptodevs/cnxk.rst b/doc/guides/cryptodevs/cnxk.rst\nindex 752316f..1fb0a88 100644\n--- a/doc/guides/cryptodevs/cnxk.rst\n+++ b/doc/guides/cryptodevs/cnxk.rst\n@@ -244,9 +244,3 @@ CN10XX Features supported\n * UDP Encapsulation\n * AES-128/192/256-GCM\n * AES-128/192/256-CBC-SHA1-HMAC\n-\n-Limitations\n------------\n-\n-Multiple lcores may not operate on the same crypto queue pair. The lcore that\n-enqueues to a queue pair is the one that must dequeue from it.\ndiff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c\nindex c25c8e6..7f724de 100644\n--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c\n+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c\n@@ -196,11 +196,15 @@ cn10k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)\n \tstruct pending_queue *pend_q;\n \tstruct cpt_inst_s *inst;\n \tuint16_t lmt_id;\n+\tuint64_t head;\n \tint ret, i;\n \n \tpend_q = &qp->pend_q;\n \n-\tnb_allowed = qp->lf.nb_desc - pend_q->pending_count;\n+\tconst uint64_t pq_mask = pend_q->pq_mask;\n+\n+\thead = pend_q->head;\n+\tnb_allowed = pending_queue_free_cnt(head, pend_q->tail, pq_mask);\n \tnb_ops = RTE_MIN(nb_ops, nb_allowed);\n \n \tif (unlikely(nb_ops == 0))\n@@ -214,18 +218,18 @@ cn10k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)\n \n again:\n \tfor (i = 0; i < RTE_MIN(PKTS_PER_LOOP, nb_ops); i++) {\n-\t\tinfl_req = &pend_q->req_queue[pend_q->enq_tail];\n+\t\tinfl_req = &pend_q->req_queue[head];\n \t\tinfl_req->op_flags = 0;\n \n \t\tret = cn10k_cpt_fill_inst(qp, ops + i, &inst[2 * i], infl_req);\n \t\tif (unlikely(ret != 1)) {\n \t\t\tplt_dp_err(\"Could not process op: %p\", ops + i);\n \t\t\tif (i == 0)\n-\t\t\t\tgoto update_pending;\n+\t\t\t\tgoto pend_q_commit;\n \t\t\tbreak;\n \t\t}\n \n-\t\tMOD_INC(pend_q->enq_tail, qp->lf.nb_desc);\n+\t\tpending_queue_advance(&head, pq_mask);\n \t}\n \n \tif (i > PKTS_PER_STEORL) {\n@@ -251,9 +255,10 @@ cn10k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)\n \t\tgoto again;\n \t}\n \n-update_pending:\n-\tpend_q->pending_count += count + i;\n+pend_q_commit:\n+\trte_atomic_thread_fence(__ATOMIC_RELEASE);\n \n+\tpend_q->head = head;\n \tpend_q->time_out = rte_get_timer_cycles() +\n \t\t\t   DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();\n \n@@ -512,18 +517,23 @@ cn10k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)\n \tstruct cnxk_cpt_qp *qp = qptr;\n \tstruct pending_queue *pend_q;\n \tstruct cpt_cn10k_res_s *res;\n+\tuint64_t infl_cnt, pq_tail;\n \tstruct rte_crypto_op *cop;\n-\tint i, nb_pending;\n+\tint i;\n \n \tpend_q = &qp->pend_q;\n \n-\tnb_pending = pend_q->pending_count;\n+\tconst uint64_t pq_mask = pend_q->pq_mask;\n+\n+\tpq_tail = pend_q->tail;\n+\tinfl_cnt = pending_queue_infl_cnt(pend_q->head, pq_tail, pq_mask);\n+\tnb_ops = RTE_MIN(nb_ops, infl_cnt);\n \n-\tif (nb_ops > nb_pending)\n-\t\tnb_ops = nb_pending;\n+\t/* Ensure infl_cnt isn't read before data lands */\n+\trte_atomic_thread_fence(__ATOMIC_ACQUIRE);\n \n \tfor (i = 0; i < nb_ops; i++) {\n-\t\tinfl_req = &pend_q->req_queue[pend_q->deq_head];\n+\t\tinfl_req = &pend_q->req_queue[pq_tail];\n \n \t\tres = (struct cpt_cn10k_res_s *)&infl_req->res;\n \n@@ -538,7 +548,7 @@ cn10k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)\n \t\t\tbreak;\n \t\t}\n \n-\t\tMOD_INC(pend_q->deq_head, qp->lf.nb_desc);\n+\t\tpending_queue_advance(&pq_tail, pq_mask);\n \n \t\tcop = infl_req->cop;\n \n@@ -550,7 +560,7 @@ cn10k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)\n \t\t\trte_mempool_put(qp->meta_info.pool, infl_req->mdata);\n \t}\n \n-\tpend_q->pending_count -= i;\n+\tpend_q->tail = pq_tail;\n \n \treturn i;\n }\ndiff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c\nindex 7527793..449208d 100644\n--- a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c\n+++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c\n@@ -218,14 +218,14 @@ cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)\n \tuint16_t nb_allowed, count = 0;\n \tstruct cnxk_cpt_qp *qp = qptr;\n \tstruct pending_queue *pend_q;\n-\tuint64_t enq_tail;\n+\tuint64_t head;\n \tint ret;\n \n-\tconst uint32_t nb_desc = qp->lf.nb_desc;\n+\tpend_q = &qp->pend_q;\n+\n \tconst uint64_t lmt_base = qp->lf.lmt_base;\n \tconst uint64_t io_addr = qp->lf.io_addr;\n-\n-\tpend_q = &qp->pend_q;\n+\tconst uint64_t pq_mask = pend_q->pq_mask;\n \n \t/* Clear w0, w2, w3 of both inst */\n \n@@ -236,14 +236,13 @@ cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)\n \tinst[1].w2.u64 = 0;\n \tinst[1].w3.u64 = 0;\n \n-\tnb_allowed = qp->lf.nb_desc - pend_q->pending_count;\n+\thead = pend_q->head;\n+\tnb_allowed = pending_queue_free_cnt(head, pend_q->tail, pq_mask);\n \tnb_ops = RTE_MIN(nb_ops, nb_allowed);\n \n-\tenq_tail = pend_q->enq_tail;\n-\n \tif (unlikely(nb_ops & 1)) {\n \t\top_1 = ops[0];\n-\t\tinfl_req_1 = &pend_q->req_queue[enq_tail];\n+\t\tinfl_req_1 = &pend_q->req_queue[head];\n \t\tinfl_req_1->op_flags = 0;\n \n \t\tret = cn9k_cpt_inst_prep(qp, op_1, infl_req_1, &inst[0]);\n@@ -257,7 +256,7 @@ cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)\n \t\tinst[0].res_addr = (uint64_t)&infl_req_1->res;\n \n \t\tcn9k_cpt_inst_submit(&inst[0], lmt_base, io_addr);\n-\t\tMOD_INC(enq_tail, nb_desc);\n+\t\tpending_queue_advance(&head, pq_mask);\n \t\tcount++;\n \t}\n \n@@ -265,10 +264,10 @@ cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)\n \t\top_1 = ops[count];\n \t\top_2 = ops[count + 1];\n \n-\t\tinfl_req_1 = &pend_q->req_queue[enq_tail];\n-\t\tMOD_INC(enq_tail, nb_desc);\n-\t\tinfl_req_2 = &pend_q->req_queue[enq_tail];\n-\t\tMOD_INC(enq_tail, nb_desc);\n+\t\tinfl_req_1 = &pend_q->req_queue[head];\n+\t\tpending_queue_advance(&head, pq_mask);\n+\t\tinfl_req_2 = &pend_q->req_queue[head];\n+\t\tpending_queue_advance(&head, pq_mask);\n \n \t\tinfl_req_1->cop = op_1;\n \t\tinfl_req_2->cop = op_2;\n@@ -284,23 +283,14 @@ cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)\n \t\tret = cn9k_cpt_inst_prep(qp, op_1, infl_req_1, &inst[0]);\n \t\tif (unlikely(ret)) {\n \t\t\tplt_dp_err(\"Could not process op: %p\", op_1);\n-\t\t\tif (enq_tail == 0)\n-\t\t\t\tenq_tail = nb_desc - 2;\n-\t\t\telse if (enq_tail == 1)\n-\t\t\t\tenq_tail = nb_desc - 1;\n-\t\t\telse\n-\t\t\t\tenq_tail--;\n+\t\t\tpending_queue_retreat(&head, pq_mask, 2);\n \t\t\tbreak;\n \t\t}\n \n \t\tret = cn9k_cpt_inst_prep(qp, op_2, infl_req_2, &inst[1]);\n \t\tif (unlikely(ret)) {\n \t\t\tplt_dp_err(\"Could not process op: %p\", op_2);\n-\t\t\tif (enq_tail == 0)\n-\t\t\t\tenq_tail = nb_desc - 1;\n-\t\t\telse\n-\t\t\t\tenq_tail--;\n-\n+\t\t\tpending_queue_retreat(&head, pq_mask, 1);\n \t\t\tcn9k_cpt_inst_submit(&inst[0], lmt_base, io_addr);\n \t\t\tcount++;\n \t\t\tbreak;\n@@ -311,8 +301,9 @@ cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)\n \t\tcount += 2;\n \t}\n \n-\tpend_q->enq_tail = enq_tail;\n-\tpend_q->pending_count += count;\n+\trte_atomic_thread_fence(__ATOMIC_RELEASE);\n+\n+\tpend_q->head = head;\n \tpend_q->time_out = rte_get_timer_cycles() +\n \t\t\t   DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();\n \n@@ -522,20 +513,23 @@ cn9k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)\n \tstruct cnxk_cpt_qp *qp = qptr;\n \tstruct pending_queue *pend_q;\n \tstruct cpt_cn9k_res_s *res;\n+\tuint64_t infl_cnt, pq_tail;\n \tstruct rte_crypto_op *cop;\n-\tuint32_t pq_deq_head;\n \tint i;\n \n-\tconst uint32_t nb_desc = qp->lf.nb_desc;\n-\n \tpend_q = &qp->pend_q;\n \n-\tnb_ops = RTE_MIN(nb_ops, pend_q->pending_count);\n+\tconst uint64_t pq_mask = pend_q->pq_mask;\n+\n+\tpq_tail = pend_q->tail;\n+\tinfl_cnt = pending_queue_infl_cnt(pend_q->head, pq_tail, pq_mask);\n+\tnb_ops = RTE_MIN(nb_ops, infl_cnt);\n \n-\tpq_deq_head = pend_q->deq_head;\n+\t/* Ensure infl_cnt isn't read before data lands */\n+\trte_atomic_thread_fence(__ATOMIC_ACQUIRE);\n \n \tfor (i = 0; i < nb_ops; i++) {\n-\t\tinfl_req = &pend_q->req_queue[pq_deq_head];\n+\t\tinfl_req = &pend_q->req_queue[pq_tail];\n \n \t\tres = (struct cpt_cn9k_res_s *)&infl_req->res;\n \n@@ -550,7 +544,7 @@ cn9k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)\n \t\t\tbreak;\n \t\t}\n \n-\t\tMOD_INC(pq_deq_head, nb_desc);\n+\t\tpending_queue_advance(&pq_tail, pq_mask);\n \n \t\tcop = infl_req->cop;\n \n@@ -562,8 +556,7 @@ cn9k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)\n \t\t\trte_mempool_put(qp->meta_info.pool, infl_req->mdata);\n \t}\n \n-\tpend_q->pending_count -= i;\n-\tpend_q->deq_head = pq_deq_head;\n+\tpend_q->tail = pq_tail;\n \n \treturn i;\n }\ndiff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c\nindex 41d8fe4..2705c87 100644\n--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c\n+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c\n@@ -171,9 +171,10 @@ cnxk_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,\n {\n \tchar mempool_name[RTE_MEMPOOL_NAMESIZE];\n \tstruct cpt_qp_meta_info *meta_info;\n+\tint lcore_cnt = rte_lcore_count();\n \tstruct rte_mempool *pool;\n+\tint mb_pool_sz, mlen = 8;\n \tuint32_t cache_sz;\n-\tint mlen = 8;\n \n \tif (dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {\n \t\t/* Get meta len */\n@@ -186,14 +187,22 @@ cnxk_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,\n \t\tmlen = RTE_MAX(mlen, cnxk_cpt_asym_get_mlen());\n \t}\n \n+\tmb_pool_sz = nb_elements;\n \tcache_sz = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, nb_elements / 1.5);\n \n+\t/* For poll mode, core that enqueues and core that dequeues can be\n+\t * different. For event mode, all cores are allowed to use same crypto\n+\t * queue pair.\n+\t */\n+\n+\tmb_pool_sz += (RTE_MAX(2, lcore_cnt) * cache_sz);\n+\n \t/* Allocate mempool */\n \n \tsnprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, \"cnxk_cpt_mb_%u:%u\",\n \t\t dev->data->dev_id, qp_id);\n \n-\tpool = rte_mempool_create(mempool_name, nb_elements, mlen, cache_sz, 0,\n+\tpool = rte_mempool_create(mempool_name, mb_pool_sz, mlen, cache_sz, 0,\n \t\t\t\t  NULL, NULL, NULL, NULL, rte_socket_id(), 0);\n \n \tif (pool == NULL) {\n@@ -266,9 +275,8 @@ cnxk_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,\n \n \t/* Initialize pending queue */\n \tqp->pend_q.req_queue = pq_mem->addr;\n-\tqp->pend_q.enq_tail = 0;\n-\tqp->pend_q.deq_head = 0;\n-\tqp->pend_q.pending_count = 0;\n+\tqp->pend_q.head = 0;\n+\tqp->pend_q.tail = 0;\n \n \treturn qp;\n \n@@ -369,6 +377,8 @@ cnxk_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,\n \t\tgoto exit;\n \t}\n \n+\tqp->pend_q.pq_mask = qp->lf.nb_desc - 1;\n+\n \troc_cpt->lf[qp_id] = &qp->lf;\n \n \tret = roc_cpt_lmtline_init(roc_cpt, &qp->lmtline, qp_id);\ndiff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h\nindex c5332de..0d36365 100644\n--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h\n+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h\n@@ -53,14 +53,14 @@ struct cpt_inflight_req {\n } __rte_aligned(16);\n \n struct pending_queue {\n-\t/** Pending requests count */\n-\tuint64_t pending_count;\n \t/** Array of pending requests */\n \tstruct cpt_inflight_req *req_queue;\n-\t/** Tail of queue to be used for enqueue */\n-\tuint16_t enq_tail;\n-\t/** Head of queue to be used for dequeue */\n-\tuint16_t deq_head;\n+\t/** Head of the queue to be used for enqueue */\n+\tuint64_t head;\n+\t/** Tail of the queue to be used for dequeue */\n+\tuint64_t tail;\n+\t/** Pending queue mask */\n+\tuint64_t pq_mask;\n \t/** Timeout to track h/w being unresponsive */\n \tuint64_t time_out;\n };\n@@ -151,4 +151,29 @@ cnxk_event_crypto_mdata_get(struct rte_crypto_op *op)\n \treturn ec_mdata;\n }\n \n+static __rte_always_inline void\n+pending_queue_advance(uint64_t *index, const uint64_t mask)\n+{\n+\t*index = (*index + 1) & mask;\n+}\n+\n+static __rte_always_inline void\n+pending_queue_retreat(uint64_t *index, const uint64_t mask, uint64_t nb_entry)\n+{\n+\t*index = (*index - nb_entry) & mask;\n+}\n+\n+static __rte_always_inline uint64_t\n+pending_queue_infl_cnt(uint64_t head, uint64_t tail, const uint64_t mask)\n+{\n+\treturn (head - tail) & mask;\n+}\n+\n+static __rte_always_inline uint64_t\n+pending_queue_free_cnt(uint64_t head, uint64_t tail, const uint64_t mask)\n+{\n+\t/* mask is nb_desc - 1 */\n+\treturn mask - pending_queue_infl_cnt(head, tail, mask);\n+}\n+\n #endif /* _CNXK_CRYPTODEV_OPS_H_ */\n",
    "prefixes": [
        "2/2"
    ]
}