get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/88546/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 88546,
    "url": "http://patches.dpdk.org/api/patches/88546/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20210305133918.8005-20-ndabilpuram@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210305133918.8005-20-ndabilpuram@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210305133918.8005-20-ndabilpuram@marvell.com",
    "date": "2021-03-05T13:38:45",
    "name": "[19/52] common/cnxk: add nix Rx queue management API",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "34dfa88257be15270853f5db4163c4597a581eea",
    "submitter": {
        "id": 1202,
        "url": "http://patches.dpdk.org/api/people/1202/?format=api",
        "name": "Nithin Dabilpuram",
        "email": "ndabilpuram@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20210305133918.8005-20-ndabilpuram@marvell.com/mbox/",
    "series": [
        {
            "id": 15508,
            "url": "http://patches.dpdk.org/api/series/15508/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=15508",
            "date": "2021-03-05T13:38:26",
            "name": "Add Marvell CNXK common driver",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/15508/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/88546/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/88546/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id A4876A0547;\n\tFri,  5 Mar 2021 14:43:09 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 824EF22A3C4;\n\tFri,  5 Mar 2021 14:40:30 +0100 (CET)",
            "from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com\n [67.231.148.174])\n by mails.dpdk.org (Postfix) with ESMTP id BE37122A34C\n for <dev@dpdk.org>; Fri,  5 Mar 2021 14:40:28 +0100 (CET)",
            "from pps.filterd (m0045849.ppops.net [127.0.0.1])\n by mx0a-0016f401.pphosted.com (8.16.0.43/8.16.0.43) with SMTP id\n 125DUncG018430 for <dev@dpdk.org>; Fri, 5 Mar 2021 05:40:28 -0800",
            "from dc5-exch01.marvell.com ([199.233.59.181])\n by mx0a-0016f401.pphosted.com with ESMTP id 372s2umrnb-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Fri, 05 Mar 2021 05:40:27 -0800",
            "from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.2;\n Fri, 5 Mar 2021 05:40:26 -0800",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.2 via Frontend\n Transport; Fri, 5 Mar 2021 05:40:26 -0800",
            "from hyd1588t430.marvell.com (unknown [10.29.52.204])\n by maili.marvell.com (Postfix) with ESMTP id C4B473F703F;\n Fri,  5 Mar 2021 05:40:23 -0800 (PST)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-type; s=pfpt0220; bh=SUK4FJ4L1mp8+d0qLpTd7lm9jMfXWge/44jgHtt/EAI=;\n b=Y1OByirC34BLB7so/f4BgKwgySBh+q4RvXYbx3UHG3/K3fh6qAOpcw03HYBiXAo7KtTo\n P5NIDCNXz4bTdZ7CtWyMoP16zjX19iGt9p3yYCY7lJ7VxgubZDzRb0hgST5X6W9qrzGW\n 503rIOP+0ol8Swt+RDypryFLmSZ4zqPvRJwBlqR5j5ChAykrpkrRmSB6uEe4M3WtW3Ih\n x081cue415nY/UX6FLlltIvw/ozVHfeQI1QBSC4AjRfEGdo/NFcA0uwIfnrhWuiYlHNd\n mHhs9YWtllUQp1PnYCK0vs1n+lOPtZz+zpxOYhuPm48u8q170QT5coDGqkwMuxJWMHtl 0g==",
        "From": "Nithin Dabilpuram <ndabilpuram@marvell.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<jerinj@marvell.com>, <skori@marvell.com>, <skoteshwar@marvell.com>,\n <pbhagavatula@marvell.com>, <kirankumark@marvell.com>,\n <psatheesh@marvell.com>, <asekhar@marvell.com>",
        "Date": "Fri, 5 Mar 2021 19:08:45 +0530",
        "Message-ID": "<20210305133918.8005-20-ndabilpuram@marvell.com>",
        "X-Mailer": "git-send-email 2.8.4",
        "In-Reply-To": "<20210305133918.8005-1-ndabilpuram@marvell.com>",
        "References": "<20210305133918.8005-1-ndabilpuram@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10434:6.0.369, 18.0.761\n definitions=2021-03-05_08:2021-03-03,\n 2021-03-05 signatures=0",
        "Subject": "[dpdk-dev] [PATCH 19/52] common/cnxk: add nix Rx queue management\n API",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Jerin Jacob <jerinj@marvell.com>\n\nAdd nix Rx queue management API to init/modify/fini\nRQ context and also setup CQ(completion queue) context.\nCurrent support is both for CN9K and CN10K devices.\n\nSigned-off-by: Jerin Jacob <jerinj@marvell.com>\n---\n drivers/common/cnxk/meson.build     |   1 +\n drivers/common/cnxk/roc_nix.h       |  52 ++++\n drivers/common/cnxk/roc_nix_queue.c | 496 ++++++++++++++++++++++++++++++++++++\n drivers/common/cnxk/version.map     |   6 +\n 4 files changed, 555 insertions(+)\n create mode 100644 drivers/common/cnxk/roc_nix_queue.c",
    "diff": "diff --git a/drivers/common/cnxk/meson.build b/drivers/common/cnxk/meson.build\nindex 39aa4ae..ebd659d 100644\n--- a/drivers/common/cnxk/meson.build\n+++ b/drivers/common/cnxk/meson.build\n@@ -18,6 +18,7 @@ sources = files('roc_dev.c',\n \t\t'roc_model.c',\n \t\t'roc_nix.c',\n \t\t'roc_nix_irq.c',\n+\t\t'roc_nix_queue.c',\n \t\t'roc_npa.c',\n \t\t'roc_npa_debug.c',\n \t\t'roc_npa_irq.c',\ndiff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h\nindex f32f69d..383ddb4 100644\n--- a/drivers/common/cnxk/roc_nix.h\n+++ b/drivers/common/cnxk/roc_nix.h\n@@ -41,6 +41,48 @@ enum roc_nix_sq_max_sqe_sz {\n \n #define ROC_NIX_VWQE_MAX_SIZE_LOG2 11\n #define ROC_NIX_VWQE_MIN_SIZE_LOG2 2\n+\n+struct roc_nix_rq {\n+\t/* Input parameters */\n+\tuint16_t qid;\n+\tuint64_t aura_handle;\n+\tbool ipsech_ena;\n+\tuint16_t first_skip;\n+\tuint16_t later_skip;\n+\tuint16_t wqe_skip;\n+\tuint16_t lpb_size;\n+\tuint32_t tag_mask;\n+\tuint32_t flow_tag_width;\n+\tuint8_t tt;\t/* Valid when SSO is enabled */\n+\tuint16_t hwgrp; /* Valid when SSO is enabled */\n+\tbool sso_ena;\n+\tbool vwqe_ena;\n+\tuint64_t spb_aura_handle; /* Valid when SPB is enabled */\n+\tuint16_t spb_size;\t  /* Valid when SPB is enabled */\n+\tbool spb_ena;\n+\tuint8_t vwqe_first_skip;\n+\tuint32_t vwqe_max_sz_exp;\n+\tuint64_t vwqe_wait_tmo;\n+\tuint64_t vwqe_aura_handle;\n+\t/* End of Input parameters */\n+\tstruct roc_nix *roc_nix;\n+};\n+\n+struct roc_nix_cq {\n+\t/* Input parameters */\n+\tuint16_t qid;\n+\tuint16_t nb_desc;\n+\t/* End of Input parameters */\n+\tuint16_t drop_thresh;\n+\tstruct roc_nix *roc_nix;\n+\tuintptr_t door;\n+\tint64_t *status;\n+\tuint64_t wdata;\n+\tvoid *desc_base;\n+\tuint32_t qmask;\n+\tuint32_t head;\n+};\n+\n struct roc_nix {\n \t/* Input parameters */\n \tstruct plt_pci_device *pci_dev;\n@@ -93,4 +135,14 @@ void __roc_api roc_nix_unregister_queue_irqs(struct roc_nix *roc_nix);\n int __roc_api roc_nix_register_cq_irqs(struct roc_nix *roc_nix);\n void __roc_api roc_nix_unregister_cq_irqs(struct roc_nix *roc_nix);\n \n+/* Queue */\n+int __roc_api roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq,\n+\t\t\t      bool ena);\n+int __roc_api roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq,\n+\t\t\t\tbool ena);\n+int __roc_api roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable);\n+int __roc_api roc_nix_rq_fini(struct roc_nix_rq *rq);\n+int __roc_api roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq);\n+int __roc_api roc_nix_cq_fini(struct roc_nix_cq *cq);\n+\n #endif /* _ROC_NIX_H_ */\ndiff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c\nnew file mode 100644\nindex 0000000..716bcec\n--- /dev/null\n+++ b/drivers/common/cnxk/roc_nix_queue.c\n@@ -0,0 +1,496 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(C) 2020 Marvell.\n+ */\n+\n+#include \"roc_api.h\"\n+#include \"roc_priv.h\"\n+\n+static inline uint32_t\n+nix_qsize_to_val(enum nix_q_size qsize)\n+{\n+\treturn (16UL << (qsize * 2));\n+}\n+\n+static inline enum nix_q_size\n+nix_qsize_clampup(uint32_t val)\n+{\n+\tint i = nix_q_size_16;\n+\n+\tfor (; i < nix_q_size_max; i++)\n+\t\tif (val <= nix_qsize_to_val(i))\n+\t\t\tbreak;\n+\n+\tif (i >= nix_q_size_max)\n+\t\ti = nix_q_size_max - 1;\n+\n+\treturn i;\n+}\n+\n+int\n+roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable)\n+{\n+\tstruct nix *nix = roc_nix_to_nix_priv(rq->roc_nix);\n+\tstruct mbox *mbox = (&nix->dev)->mbox;\n+\tint rc;\n+\n+\t/* Pkts will be dropped silently if RQ is disabled */\n+\tif (roc_model_is_cn9k()) {\n+\t\tstruct nix_aq_enq_req *aq;\n+\n+\t\taq = mbox_alloc_msg_nix_aq_enq(mbox);\n+\t\taq->qidx = rq->qid;\n+\t\taq->ctype = NIX_AQ_CTYPE_RQ;\n+\t\taq->op = NIX_AQ_INSTOP_WRITE;\n+\n+\t\taq->rq.ena = enable;\n+\t\taq->rq_mask.ena = ~(aq->rq_mask.ena);\n+\t} else {\n+\t\tstruct nix_cn10k_aq_enq_req *aq;\n+\n+\t\taq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);\n+\t\taq->qidx = rq->qid;\n+\t\taq->ctype = NIX_AQ_CTYPE_RQ;\n+\t\taq->op = NIX_AQ_INSTOP_WRITE;\n+\n+\t\taq->rq.ena = enable;\n+\t\taq->rq_mask.ena = ~(aq->rq_mask.ena);\n+\t}\n+\n+\trc = mbox_process(mbox);\n+\n+\tif (roc_model_is_cn10k())\n+\t\tplt_write64(rq->qid, nix->base + NIX_LF_OP_VWQE_FLUSH);\n+\treturn rc;\n+}\n+\n+static int\n+rq_cn9k_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena)\n+{\n+\tstruct mbox *mbox = (&nix->dev)->mbox;\n+\tstruct nix_aq_enq_req *aq;\n+\n+\taq = mbox_alloc_msg_nix_aq_enq(mbox);\n+\taq->qidx = rq->qid;\n+\taq->ctype = NIX_AQ_CTYPE_RQ;\n+\taq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT;\n+\n+\tif (rq->sso_ena) {\n+\t\t/* SSO mode */\n+\t\taq->rq.sso_ena = 1;\n+\t\taq->rq.sso_tt = rq->tt;\n+\t\taq->rq.sso_grp = rq->hwgrp;\n+\t\taq->rq.ena_wqwd = 1;\n+\t\taq->rq.wqe_skip = rq->wqe_skip;\n+\t\taq->rq.wqe_caching = 1;\n+\n+\t\taq->rq.good_utag = rq->tag_mask >> 24;\n+\t\taq->rq.bad_utag = rq->tag_mask >> 24;\n+\t\taq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);\n+\t} else {\n+\t\t/* CQ mode */\n+\t\taq->rq.sso_ena = 0;\n+\t\taq->rq.good_utag = rq->tag_mask >> 24;\n+\t\taq->rq.bad_utag = rq->tag_mask >> 24;\n+\t\taq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);\n+\t\taq->rq.cq = rq->qid;\n+\t}\n+\n+\tif (rq->ipsech_ena)\n+\t\taq->rq.ipsech_ena = 1;\n+\n+\taq->rq.spb_ena = 0;\n+\taq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);\n+\n+\t/* Sizes must be aligned to 8 bytes */\n+\tif (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7)\n+\t\treturn -EINVAL;\n+\n+\t/* Expressed in number of dwords */\n+\taq->rq.first_skip = rq->first_skip / 8;\n+\taq->rq.later_skip = rq->later_skip / 8;\n+\taq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */\n+\taq->rq.lpb_sizem1 = rq->lpb_size / 8;\n+\taq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */\n+\taq->rq.ena = ena;\n+\taq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */\n+\taq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */\n+\taq->rq.rq_int_ena = 0;\n+\t/* Many to one reduction */\n+\taq->rq.qint_idx = rq->qid % nix->qints;\n+\taq->rq.xqe_drop_ena = 1;\n+\n+\tif (cfg) {\n+\t\tif (rq->sso_ena) {\n+\t\t\t/* SSO mode */\n+\t\t\taq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;\n+\t\t\taq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt;\n+\t\t\taq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp;\n+\t\t\taq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd;\n+\t\t\taq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip;\n+\t\t\taq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching;\n+\t\t\taq->rq_mask.good_utag = ~aq->rq_mask.good_utag;\n+\t\t\taq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;\n+\t\t\taq->rq_mask.ltag = ~aq->rq_mask.ltag;\n+\t\t} else {\n+\t\t\t/* CQ mode */\n+\t\t\taq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;\n+\t\t\taq->rq_mask.good_utag = ~aq->rq_mask.good_utag;\n+\t\t\taq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;\n+\t\t\taq->rq_mask.ltag = ~aq->rq_mask.ltag;\n+\t\t\taq->rq_mask.cq = ~aq->rq_mask.cq;\n+\t\t}\n+\n+\t\tif (rq->ipsech_ena)\n+\t\t\taq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena;\n+\n+\t\taq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena;\n+\t\taq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura;\n+\t\taq->rq_mask.first_skip = ~aq->rq_mask.first_skip;\n+\t\taq->rq_mask.later_skip = ~aq->rq_mask.later_skip;\n+\t\taq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw;\n+\t\taq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1;\n+\t\taq->rq_mask.ena = ~aq->rq_mask.ena;\n+\t\taq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching;\n+\t\taq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size;\n+\t\taq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena;\n+\t\taq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx;\n+\t\taq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+rq_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena)\n+{\n+\tstruct mbox *mbox = (&nix->dev)->mbox;\n+\tstruct nix_cn10k_aq_enq_req *aq;\n+\n+\taq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);\n+\taq->qidx = rq->qid;\n+\taq->ctype = NIX_AQ_CTYPE_RQ;\n+\taq->op = cfg ? NIX_AQ_INSTOP_WRITE : NIX_AQ_INSTOP_INIT;\n+\n+\tif (rq->sso_ena) {\n+\t\t/* SSO mode */\n+\t\taq->rq.sso_ena = 1;\n+\t\taq->rq.sso_tt = rq->tt;\n+\t\taq->rq.sso_grp = rq->hwgrp;\n+\t\taq->rq.ena_wqwd = 1;\n+\t\taq->rq.wqe_skip = rq->wqe_skip;\n+\t\taq->rq.wqe_caching = 1;\n+\n+\t\taq->rq.good_utag = rq->tag_mask >> 24;\n+\t\taq->rq.bad_utag = rq->tag_mask >> 24;\n+\t\taq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);\n+\n+\t\tif (rq->vwqe_ena) {\n+\t\t\taq->rq.vwqe_ena = true;\n+\t\t\taq->rq.vwqe_skip = rq->vwqe_first_skip;\n+\t\t\t/* Maximal Vector size is (2^(MAX_VSIZE_EXP+2)) */\n+\t\t\taq->rq.max_vsize_exp = rq->vwqe_max_sz_exp - 2;\n+\t\t\taq->rq.vtime_wait = rq->vwqe_wait_tmo;\n+\t\t\taq->rq.wqe_aura = rq->vwqe_aura_handle;\n+\t\t}\n+\t} else {\n+\t\t/* CQ mode */\n+\t\taq->rq.sso_ena = 0;\n+\t\taq->rq.good_utag = rq->tag_mask >> 24;\n+\t\taq->rq.bad_utag = rq->tag_mask >> 24;\n+\t\taq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);\n+\t\taq->rq.cq = rq->qid;\n+\t}\n+\n+\tif (rq->ipsech_ena)\n+\t\taq->rq.ipsech_ena = 1;\n+\n+\taq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);\n+\n+\t/* Sizes must be aligned to 8 bytes */\n+\tif (rq->first_skip & 0x7 || rq->later_skip & 0x7 || rq->lpb_size & 0x7)\n+\t\treturn -EINVAL;\n+\n+\t/* Expressed in number of dwords */\n+\taq->rq.first_skip = rq->first_skip / 8;\n+\taq->rq.later_skip = rq->later_skip / 8;\n+\taq->rq.flow_tagw = rq->flow_tag_width; /* 32-bits */\n+\taq->rq.lpb_sizem1 = rq->lpb_size / 8;\n+\taq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */\n+\taq->rq.ena = ena;\n+\n+\tif (rq->spb_ena) {\n+\t\tuint32_t spb_sizem1;\n+\n+\t\taq->rq.spb_ena = 1;\n+\t\taq->rq.spb_aura =\n+\t\t\troc_npa_aura_handle_to_aura(rq->spb_aura_handle);\n+\n+\t\tif (rq->spb_size & 0x7 ||\n+\t\t    rq->spb_size > NIX_RQ_CN10K_SPB_MAX_SIZE)\n+\t\t\treturn -EINVAL;\n+\n+\t\tspb_sizem1 = rq->spb_size / 8; /* Expressed in no. of dwords */\n+\t\tspb_sizem1 -= 1;\t       /* Expressed in size minus one */\n+\t\taq->rq.spb_sizem1 = spb_sizem1 & 0x3F;\n+\t\taq->rq.spb_high_sizem1 = (spb_sizem1 >> 6) & 0x7;\n+\t} else {\n+\t\taq->rq.spb_ena = 0;\n+\t}\n+\n+\taq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */\n+\taq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */\n+\taq->rq.rq_int_ena = 0;\n+\t/* Many to one reduction */\n+\taq->rq.qint_idx = rq->qid % nix->qints;\n+\taq->rq.xqe_drop_ena = 1;\n+\n+\tif (cfg) {\n+\t\tif (rq->sso_ena) {\n+\t\t\t/* SSO mode */\n+\t\t\taq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;\n+\t\t\taq->rq_mask.sso_tt = ~aq->rq_mask.sso_tt;\n+\t\t\taq->rq_mask.sso_grp = ~aq->rq_mask.sso_grp;\n+\t\t\taq->rq_mask.ena_wqwd = ~aq->rq_mask.ena_wqwd;\n+\t\t\taq->rq_mask.wqe_skip = ~aq->rq_mask.wqe_skip;\n+\t\t\taq->rq_mask.wqe_caching = ~aq->rq_mask.wqe_caching;\n+\t\t\taq->rq_mask.good_utag = ~aq->rq_mask.good_utag;\n+\t\t\taq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;\n+\t\t\taq->rq_mask.ltag = ~aq->rq_mask.ltag;\n+\t\t\tif (rq->vwqe_ena) {\n+\t\t\t\taq->rq_mask.vwqe_ena = ~aq->rq_mask.vwqe_ena;\n+\t\t\t\taq->rq_mask.vwqe_skip = ~aq->rq_mask.vwqe_skip;\n+\t\t\t\taq->rq_mask.max_vsize_exp =\n+\t\t\t\t\t~aq->rq_mask.max_vsize_exp;\n+\t\t\t\taq->rq_mask.vtime_wait =\n+\t\t\t\t\t~aq->rq_mask.vtime_wait;\n+\t\t\t\taq->rq_mask.wqe_aura = ~aq->rq_mask.wqe_aura;\n+\t\t\t}\n+\t\t} else {\n+\t\t\t/* CQ mode */\n+\t\t\taq->rq_mask.sso_ena = ~aq->rq_mask.sso_ena;\n+\t\t\taq->rq_mask.good_utag = ~aq->rq_mask.good_utag;\n+\t\t\taq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;\n+\t\t\taq->rq_mask.ltag = ~aq->rq_mask.ltag;\n+\t\t\taq->rq_mask.cq = ~aq->rq_mask.cq;\n+\t\t}\n+\n+\t\tif (rq->ipsech_ena)\n+\t\t\taq->rq_mask.ipsech_ena = ~aq->rq_mask.ipsech_ena;\n+\n+\t\tif (rq->spb_ena) {\n+\t\t\taq->rq_mask.spb_aura = ~aq->rq_mask.spb_aura;\n+\t\t\taq->rq_mask.spb_sizem1 = ~aq->rq_mask.spb_sizem1;\n+\t\t\taq->rq_mask.spb_high_sizem1 =\n+\t\t\t\t~aq->rq_mask.spb_high_sizem1;\n+\t\t}\n+\n+\t\taq->rq_mask.spb_ena = ~aq->rq_mask.spb_ena;\n+\t\taq->rq_mask.lpb_aura = ~aq->rq_mask.lpb_aura;\n+\t\taq->rq_mask.first_skip = ~aq->rq_mask.first_skip;\n+\t\taq->rq_mask.later_skip = ~aq->rq_mask.later_skip;\n+\t\taq->rq_mask.flow_tagw = ~aq->rq_mask.flow_tagw;\n+\t\taq->rq_mask.lpb_sizem1 = ~aq->rq_mask.lpb_sizem1;\n+\t\taq->rq_mask.ena = ~aq->rq_mask.ena;\n+\t\taq->rq_mask.pb_caching = ~aq->rq_mask.pb_caching;\n+\t\taq->rq_mask.xqe_imm_size = ~aq->rq_mask.xqe_imm_size;\n+\t\taq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena;\n+\t\taq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx;\n+\t\taq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int\n+roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)\n+{\n+\tstruct nix *nix = roc_nix_to_nix_priv(roc_nix);\n+\tstruct mbox *mbox = (&nix->dev)->mbox;\n+\tbool is_cn9k = roc_model_is_cn9k();\n+\tint rc;\n+\n+\tif (roc_nix == NULL || rq == NULL)\n+\t\treturn NIX_ERR_PARAM;\n+\n+\tif (rq->qid >= nix->nb_rx_queues)\n+\t\treturn NIX_ERR_QUEUE_INVALID_RANGE;\n+\n+\trq->roc_nix = roc_nix;\n+\n+\tif (is_cn9k)\n+\t\trc = rq_cn9k_cfg(nix, rq, false, ena);\n+\telse\n+\t\trc = rq_cfg(nix, rq, false, ena);\n+\n+\tif (rc)\n+\t\treturn rc;\n+\n+\treturn mbox_process(mbox);\n+}\n+\n+int\n+roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)\n+{\n+\tstruct nix *nix = roc_nix_to_nix_priv(roc_nix);\n+\tstruct mbox *mbox = (&nix->dev)->mbox;\n+\tbool is_cn9k = roc_model_is_cn9k();\n+\tint rc;\n+\n+\tif (roc_nix == NULL || rq == NULL)\n+\t\treturn NIX_ERR_PARAM;\n+\n+\tif (rq->qid >= nix->nb_rx_queues)\n+\t\treturn NIX_ERR_QUEUE_INVALID_RANGE;\n+\n+\trq->roc_nix = roc_nix;\n+\n+\tif (is_cn9k)\n+\t\trc = rq_cn9k_cfg(nix, rq, true, ena);\n+\telse\n+\t\trc = rq_cfg(nix, rq, true, ena);\n+\n+\tif (rc)\n+\t\treturn rc;\n+\n+\treturn mbox_process(mbox);\n+}\n+\n+int\n+roc_nix_rq_fini(struct roc_nix_rq *rq)\n+{\n+\t/* Disabling RQ is sufficient */\n+\treturn roc_nix_rq_ena_dis(rq, false);\n+}\n+\n+int\n+roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq)\n+{\n+\tstruct nix *nix = roc_nix_to_nix_priv(roc_nix);\n+\tstruct mbox *mbox = (&nix->dev)->mbox;\n+\tvolatile struct nix_cq_ctx_s *cq_ctx;\n+\tenum nix_q_size qsize;\n+\tsize_t desc_sz;\n+\tint rc;\n+\n+\tif (cq == NULL)\n+\t\treturn NIX_ERR_PARAM;\n+\n+\tif (cq->qid >= nix->nb_rx_queues)\n+\t\treturn NIX_ERR_QUEUE_INVALID_RANGE;\n+\n+\tqsize = nix_qsize_clampup(cq->nb_desc);\n+\tcq->nb_desc = nix_qsize_to_val(qsize);\n+\tcq->qmask = cq->nb_desc - 1;\n+\tcq->door = nix->base + NIX_LF_CQ_OP_DOOR;\n+\tcq->status = (int64_t *)(nix->base + NIX_LF_CQ_OP_STATUS);\n+\tcq->wdata = (uint64_t)cq->qid << 32;\n+\tcq->roc_nix = roc_nix;\n+\tcq->drop_thresh = NIX_CQ_THRESH_LEVEL;\n+\n+\t/* CQE of W16 */\n+\tdesc_sz = cq->nb_desc * NIX_CQ_ENTRY_SZ;\n+\tcq->desc_base = plt_zmalloc(desc_sz, NIX_CQ_ALIGN);\n+\tif (cq->desc_base == NULL) {\n+\t\trc = NIX_ERR_NO_MEM;\n+\t\tgoto fail;\n+\t}\n+\n+\tif (roc_model_is_cn9k()) {\n+\t\tstruct nix_aq_enq_req *aq;\n+\n+\t\taq = mbox_alloc_msg_nix_aq_enq(mbox);\n+\t\taq->qidx = cq->qid;\n+\t\taq->ctype = NIX_AQ_CTYPE_CQ;\n+\t\taq->op = NIX_AQ_INSTOP_INIT;\n+\t\tcq_ctx = &aq->cq;\n+\t} else {\n+\t\tstruct nix_cn10k_aq_enq_req *aq;\n+\n+\t\taq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);\n+\t\taq->qidx = cq->qid;\n+\t\taq->ctype = NIX_AQ_CTYPE_CQ;\n+\t\taq->op = NIX_AQ_INSTOP_INIT;\n+\t\tcq_ctx = &aq->cq;\n+\t}\n+\n+\tcq_ctx->ena = 1;\n+\tcq_ctx->caching = 1;\n+\tcq_ctx->qsize = qsize;\n+\tcq_ctx->base = (uint64_t)cq->desc_base;\n+\tcq_ctx->avg_level = 0xff;\n+\tcq_ctx->cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT);\n+\tcq_ctx->cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR);\n+\n+\t/* Many to one reduction */\n+\tcq_ctx->qint_idx = cq->qid % nix->qints;\n+\t/* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */\n+\tcq_ctx->cint_idx = cq->qid;\n+\n+\tcq_ctx->drop = cq->drop_thresh;\n+\tcq_ctx->drop_ena = 1;\n+\n+\t/* TX pause frames enable flow ctrl on RX side */\n+\tif (nix->tx_pause) {\n+\t\t/* Single BPID is allocated for all rx channels for now */\n+\t\tcq_ctx->bpid = nix->bpid[0];\n+\t\tcq_ctx->bp = cq_ctx->drop;\n+\t\tcq_ctx->bp_ena = 1;\n+\t}\n+\n+\trc = mbox_process(mbox);\n+\tif (rc)\n+\t\tgoto free_mem;\n+\n+\treturn 0;\n+\n+free_mem:\n+\tplt_free(cq->desc_base);\n+fail:\n+\treturn rc;\n+}\n+\n+int\n+roc_nix_cq_fini(struct roc_nix_cq *cq)\n+{\n+\tstruct mbox *mbox;\n+\tstruct nix *nix;\n+\tint rc;\n+\n+\tif (cq == NULL)\n+\t\treturn NIX_ERR_PARAM;\n+\n+\tnix = roc_nix_to_nix_priv(cq->roc_nix);\n+\tmbox = (&nix->dev)->mbox;\n+\n+\t/* Disable CQ */\n+\tif (roc_model_is_cn9k()) {\n+\t\tstruct nix_aq_enq_req *aq;\n+\n+\t\taq = mbox_alloc_msg_nix_aq_enq(mbox);\n+\t\taq->qidx = cq->qid;\n+\t\taq->ctype = NIX_AQ_CTYPE_CQ;\n+\t\taq->op = NIX_AQ_INSTOP_WRITE;\n+\t\taq->cq.ena = 0;\n+\t\taq->cq.bp_ena = 0;\n+\t\taq->cq_mask.ena = ~aq->cq_mask.ena;\n+\t\taq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena;\n+\t} else {\n+\t\tstruct nix_cn10k_aq_enq_req *aq;\n+\n+\t\taq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);\n+\t\taq->qidx = cq->qid;\n+\t\taq->ctype = NIX_AQ_CTYPE_CQ;\n+\t\taq->op = NIX_AQ_INSTOP_WRITE;\n+\t\taq->cq.ena = 0;\n+\t\taq->cq.bp_ena = 0;\n+\t\taq->cq_mask.ena = ~aq->cq_mask.ena;\n+\t\taq->cq_mask.bp_ena = ~aq->cq_mask.bp_ena;\n+\t}\n+\n+\trc = mbox_process(mbox);\n+\tif (rc)\n+\t\treturn rc;\n+\n+\tplt_free(cq->desc_base);\n+\treturn 0;\n+}\ndiff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map\nindex 3a51c7a..0f56582 100644\n--- a/drivers/common/cnxk/version.map\n+++ b/drivers/common/cnxk/version.map\n@@ -14,6 +14,8 @@ INTERNAL {\n \troc_idev_npa_nix_get;\n \troc_idev_num_lmtlines_get;\n \troc_model;\n+\troc_nix_cq_fini;\n+\troc_nix_cq_init;\n \troc_nix_dev_fini;\n \troc_nix_dev_init;\n \troc_nix_err_intr_ena_dis;\n@@ -32,6 +34,10 @@ INTERNAL {\n \troc_nix_ras_intr_ena_dis;\n \troc_nix_register_cq_irqs;\n \troc_nix_register_queue_irqs;\n+\troc_nix_rq_ena_dis;\n+\troc_nix_rq_fini;\n+\troc_nix_rq_init;\n+\troc_nix_rq_modify;\n \troc_nix_rx_queue_intr_disable;\n \troc_nix_rx_queue_intr_enable;\n \troc_nix_unregister_cq_irqs;\n",
    "prefixes": [
        "19/52"
    ]
}