get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/117654/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 117654,
    "url": "https://patches.dpdk.org/api/patches/117654/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20221007213851.31524-9-nicolas.chautru@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221007213851.31524-9-nicolas.chautru@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221007213851.31524-9-nicolas.chautru@intel.com",
    "date": "2022-10-07T21:38:45",
    "name": "[v9,08/14] baseband/acc: add LDPC processing functions",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "43f2c26d3d9bb986e004c8eac6de62d99a7afb3d",
    "submitter": {
        "id": 1314,
        "url": "https://patches.dpdk.org/api/people/1314/?format=api",
        "name": "Chautru, Nicolas",
        "email": "nicolas.chautru@intel.com"
    },
    "delegate": {
        "id": 6690,
        "url": "https://patches.dpdk.org/api/users/6690/?format=api",
        "username": "akhil",
        "first_name": "akhil",
        "last_name": "goyal",
        "email": "gakhil@marvell.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20221007213851.31524-9-nicolas.chautru@intel.com/mbox/",
    "series": [
        {
            "id": 25041,
            "url": "https://patches.dpdk.org/api/series/25041/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=25041",
            "date": "2022-10-07T21:38:37",
            "name": "bbdev ACC200 PMD",
            "version": 9,
            "mbox": "https://patches.dpdk.org/series/25041/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/117654/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/117654/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 9607EA0543;\n\tFri,  7 Oct 2022 23:40:06 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 0A91942B6E;\n\tFri,  7 Oct 2022 23:39:18 +0200 (CEST)",
            "from mga02.intel.com (mga02.intel.com [134.134.136.20])\n by mails.dpdk.org (Postfix) with ESMTP id 80B9E400D5\n for <dev@dpdk.org>; Fri,  7 Oct 2022 23:39:12 +0200 (CEST)",
            "from orsmga007.jf.intel.com ([10.7.209.58])\n by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 07 Oct 2022 14:39:12 -0700",
            "from unknown (HELO icx-npg-scs1-cp1.localdomain) ([10.233.180.245])\n by orsmga007.jf.intel.com with ESMTP; 07 Oct 2022 14:39:11 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1665178752; x=1696714752;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=T+m4MDiJu29VCMVcCioqE7Y/fsG/O2j5EsDKB5iHKbE=;\n b=OEeeSj0WoxCNXmOCgtBQwGaBeY/eF2rMvrJp1K7eRRZ2ZsjmXE4EmB8E\n LoHf5sfekjjtEwPj0N9YnTKiOFypqzUB3FRcDuyMxVL7/XpJMUXO1Bu02\n 6BClejL04zX+KUASOgW3ki1psnDCIpUn2D/c3j6nk8Xf7g3iJ0SLOC2ph\n EjkyUUgfEgHEPL2/kS7OYOjmlRjN+AfqhdaFAow9BOrS7HjhI8jFmQPqn\n q7E66p/EVr4NxNDVnwcHdCFnkoAbppja6nYG8BjlkmNXO3P1GdLh4WgrQ\n e68lhp+hz7zHrQQbrGOIMqaqtKJz+pdbojRXqzl3FFdikbFHh81RrgG4u Q==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10493\"; a=\"291118505\"",
            "E=Sophos;i=\"5.95,167,1661842800\"; d=\"scan'208\";a=\"291118505\"",
            "E=McAfee;i=\"6500,9779,10493\"; a=\"620388464\"",
            "E=Sophos;i=\"5.95,167,1661842800\"; d=\"scan'208\";a=\"620388464\""
        ],
        "X-ExtLoop1": "1",
        "From": "Nicolas Chautru <nicolas.chautru@intel.com>",
        "To": "dev@dpdk.org,\n\tgakhil@marvell.com,\n\tmaxime.coquelin@redhat.com",
        "Cc": "trix@redhat.com, mdr@ashroe.eu, bruce.richardson@intel.com,\n hemant.agrawal@nxp.com, david.marchand@redhat.com,\n stephen@networkplumber.org, hernan.vargas@intel.com,\n Nic Chautru <nicolas.chautru@intel.com>",
        "Subject": "[PATCH v9 08/14] baseband/acc: add LDPC processing functions",
        "Date": "Fri,  7 Oct 2022 14:38:45 -0700",
        "Message-Id": "<20221007213851.31524-9-nicolas.chautru@intel.com>",
        "X-Mailer": "git-send-email 2.37.1",
        "In-Reply-To": "<20221007213851.31524-1-nicolas.chautru@intel.com>",
        "References": "<20221007213851.31524-1-nicolas.chautru@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Nic Chautru <nicolas.chautru@intel.com>\n\nAdded LDPC encode and decode processing functions.\n\nSigned-off-by: Nic Chautru <nicolas.chautru@intel.com>\nReviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>\n---\n drivers/baseband/acc/acc_common.h     |   84 ++\n drivers/baseband/acc/rte_acc200_pmd.c | 1321 ++++++++++++++++++++++++-\n 2 files changed, 1400 insertions(+), 5 deletions(-)",
    "diff": "diff --git a/drivers/baseband/acc/acc_common.h b/drivers/baseband/acc/acc_common.h\nindex 12af15c3cb..e29571356a 100644\n--- a/drivers/baseband/acc/acc_common.h\n+++ b/drivers/baseband/acc/acc_common.h\n@@ -1302,4 +1302,88 @@ acc_pci_remove(struct rte_pci_device *pci_dev)\n \treturn 0;\n }\n \n+static inline void\n+acc_enqueue_status(struct rte_bbdev_queue_data *q_data,\n+\t\tenum rte_bbdev_enqueue_status status)\n+{\n+\tq_data->enqueue_status = status;\n+\tq_data->queue_stats.enqueue_status_count[status]++;\n+\n+\trte_acc_log(WARNING, \"Enqueue Status: %s %#\"PRIx64\"\",\n+\t\t\trte_bbdev_enqueue_status_str(status),\n+\t\t\tq_data->queue_stats.enqueue_status_count[status]);\n+}\n+\n+static inline void\n+acc_enqueue_invalid(struct rte_bbdev_queue_data *q_data)\n+{\n+\tacc_enqueue_status(q_data, RTE_BBDEV_ENQ_STATUS_INVALID_OP);\n+}\n+\n+static inline void\n+acc_enqueue_ring_full(struct rte_bbdev_queue_data *q_data)\n+{\n+\tacc_enqueue_status(q_data, RTE_BBDEV_ENQ_STATUS_RING_FULL);\n+}\n+\n+static inline void\n+acc_enqueue_queue_full(struct rte_bbdev_queue_data *q_data)\n+{\n+\tacc_enqueue_status(q_data, RTE_BBDEV_ENQ_STATUS_QUEUE_FULL);\n+}\n+\n+/* Number of available descriptor in ring to enqueue */\n+static inline uint32_t\n+acc_ring_avail_enq(struct acc_queue *q)\n+{\n+\treturn (q->sw_ring_depth - 1 + q->sw_ring_tail - q->sw_ring_head) & q->sw_ring_wrap_mask;\n+}\n+\n+/* Number of available descriptor in ring to dequeue */\n+static inline uint32_t\n+acc_ring_avail_deq(struct acc_queue *q)\n+{\n+\treturn (q->sw_ring_depth + q->sw_ring_head - q->sw_ring_tail) & q->sw_ring_wrap_mask;\n+}\n+\n+/* Check room in AQ for the enqueues batches into Qmgr */\n+static inline int32_t\n+acc_aq_avail(struct rte_bbdev_queue_data *q_data, uint16_t num_ops)\n+{\n+\tstruct acc_queue *q = q_data->queue_private;\n+\tint32_t aq_avail = q->aq_depth -\n+\t\t\t((q->aq_enqueued - q->aq_dequeued +\n+\t\t\tACC_MAX_QUEUE_DEPTH) % ACC_MAX_QUEUE_DEPTH)\n+\t\t\t- (num_ops >> 7);\n+\tif (aq_avail <= 0)\n+\t\tacc_enqueue_queue_full(q_data);\n+\treturn aq_avail;\n+}\n+\n+/* Calculates number of CBs in processed encoder TB based on 'r' and input\n+ * length.\n+ */\n+static inline uint8_t\n+get_num_cbs_in_tb_ldpc_enc(struct rte_bbdev_op_ldpc_enc *ldpc_enc)\n+{\n+\tuint8_t c, r, crc24_bits = 0;\n+\tuint16_t k = (ldpc_enc->basegraph == 1 ? 22 : 10) * ldpc_enc->z_c\n+\t\t- ldpc_enc->n_filler;\n+\tuint8_t cbs_in_tb = 0;\n+\tint32_t length;\n+\n+\tlength = ldpc_enc->input.length;\n+\tr = ldpc_enc->tb_params.r;\n+\tc = ldpc_enc->tb_params.c;\n+\tcrc24_bits = 0;\n+\tif (check_bit(ldpc_enc->op_flags, RTE_BBDEV_LDPC_CRC_24B_ATTACH))\n+\t\tcrc24_bits = 24;\n+\twhile (length > 0 && r < c) {\n+\t\tlength -= (k - crc24_bits) >> 3;\n+\t\tr++;\n+\t\tcbs_in_tb++;\n+\t}\n+\treturn cbs_in_tb;\n+}\n+\n #endif /* _ACC_COMMON_H_ */\ndiff --git a/drivers/baseband/acc/rte_acc200_pmd.c b/drivers/baseband/acc/rte_acc200_pmd.c\nindex 68103d6471..0f018b19ac 100644\n--- a/drivers/baseband/acc/rte_acc200_pmd.c\n+++ b/drivers/baseband/acc/rte_acc200_pmd.c\n@@ -564,15 +564,50 @@ acc200_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,\n \treturn ret;\n }\n \n+static inline void\n+acc200_print_op(struct rte_bbdev_dec_op *op, enum rte_bbdev_op_type op_type,\n+\t\tuint16_t index)\n+{\n+\tif (op == NULL)\n+\t\treturn;\n+\tif (op_type == RTE_BBDEV_OP_LDPC_DEC)\n+\t\trte_bbdev_log(INFO,\n+\t\t\t\"  Op 5GUL %d %d %d %d %d %d %d %d %d %d %d %d\",\n+\t\t\tindex,\n+\t\t\top->ldpc_dec.basegraph, op->ldpc_dec.z_c,\n+\t\t\top->ldpc_dec.n_cb, op->ldpc_dec.q_m,\n+\t\t\top->ldpc_dec.n_filler, op->ldpc_dec.cb_params.e,\n+\t\t\top->ldpc_dec.op_flags, op->ldpc_dec.rv_index,\n+\t\t\top->ldpc_dec.iter_max, op->ldpc_dec.iter_count,\n+\t\t\top->ldpc_dec.harq_combined_input.length\n+\t\t\t);\n+\telse if (op_type == RTE_BBDEV_OP_LDPC_ENC) {\n+\t\tstruct rte_bbdev_enc_op *op_dl = (struct rte_bbdev_enc_op *) op;\n+\t\trte_bbdev_log(INFO,\n+\t\t\t\"  Op 5GDL %d %d %d %d %d %d %d %d %d\",\n+\t\t\tindex,\n+\t\t\top_dl->ldpc_enc.basegraph, op_dl->ldpc_enc.z_c,\n+\t\t\top_dl->ldpc_enc.n_cb, op_dl->ldpc_enc.q_m,\n+\t\t\top_dl->ldpc_enc.n_filler, op_dl->ldpc_enc.cb_params.e,\n+\t\t\top_dl->ldpc_enc.op_flags, op_dl->ldpc_enc.rv_index\n+\t\t\t);\n+\t}\n+}\n \n static int\n acc_queue_stop(struct rte_bbdev *dev, uint16_t queue_id)\n {\n \tstruct acc_queue *q;\n+\tstruct rte_bbdev_dec_op *op;\n+\tuint16_t i;\n \tq = dev->data->queues[queue_id].queue_private;\n \trte_bbdev_log(INFO, \"Queue Stop %d H/T/D %d %d %x OpType %d\",\n \t\t\tqueue_id, q->sw_ring_head, q->sw_ring_tail,\n \t\t\tq->sw_ring_depth, q->op_type);\n+\tfor (i = 0; i < q->sw_ring_depth; ++i) {\n+\t\top = (q->ring_addr + i)->req.op_addr;\n+\t\tacc200_print_op(op, q->op_type, i);\n+\t}\n \t/* ignore all operations in flight and clear counters */\n \tq->sw_ring_tail = q->sw_ring_head;\n \tq->aq_enqueued = 0;\n@@ -614,6 +649,43 @@ acc200_dev_info_get(struct rte_bbdev *dev,\n \tstruct acc_device *d = dev->data->dev_private;\n \tint i;\n \tstatic const struct rte_bbdev_op_cap bbdev_capabilities[] = {\n+\t\t{\n+\t\t\t.type   = RTE_BBDEV_OP_LDPC_ENC,\n+\t\t\t.cap.ldpc_enc = {\n+\t\t\t\t.capability_flags =\n+\t\t\t\t\tRTE_BBDEV_LDPC_RATE_MATCH |\n+\t\t\t\t\tRTE_BBDEV_LDPC_CRC_24B_ATTACH |\n+\t\t\t\t\tRTE_BBDEV_LDPC_INTERLEAVER_BYPASS,\n+\t\t\t\t.num_buffers_src =\n+\t\t\t\t\t\tRTE_BBDEV_LDPC_MAX_CODE_BLOCKS,\n+\t\t\t\t.num_buffers_dst =\n+\t\t\t\t\t\tRTE_BBDEV_LDPC_MAX_CODE_BLOCKS,\n+\t\t\t}\n+\t\t},\n+\t\t{\n+\t\t\t.type   = RTE_BBDEV_OP_LDPC_DEC,\n+\t\t\t.cap.ldpc_dec = {\n+\t\t\t.capability_flags =\n+\t\t\t\tRTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK |\n+\t\t\t\tRTE_BBDEV_LDPC_CRC_TYPE_24B_DROP |\n+\t\t\t\tRTE_BBDEV_LDPC_CRC_TYPE_24A_CHECK |\n+\t\t\t\tRTE_BBDEV_LDPC_CRC_TYPE_16_CHECK |\n+\t\t\t\tRTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE |\n+\t\t\t\tRTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE |\n+\t\t\t\tRTE_BBDEV_LDPC_ITERATION_STOP_ENABLE |\n+\t\t\t\tRTE_BBDEV_LDPC_DEINTERLEAVER_BYPASS |\n+\t\t\t\tRTE_BBDEV_LDPC_DEC_SCATTER_GATHER |\n+\t\t\t\tRTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION |\n+\t\t\t\tRTE_BBDEV_LDPC_LLR_COMPRESSION,\n+\t\t\t.llr_size = 8,\n+\t\t\t.llr_decimals = 1,\n+\t\t\t.num_buffers_src =\n+\t\t\t\t\tRTE_BBDEV_LDPC_MAX_CODE_BLOCKS,\n+\t\t\t.num_buffers_hard_out =\n+\t\t\t\t\tRTE_BBDEV_LDPC_MAX_CODE_BLOCKS,\n+\t\t\t.num_buffers_soft_out = 0,\n+\t\t\t}\n+\t\t},\n \t\tRTE_BBDEV_END_OF_CAPABILITIES_LIST()\n \t};\n \n@@ -630,13 +702,15 @@ acc200_dev_info_get(struct rte_bbdev *dev,\n \tdev_info->num_queues[RTE_BBDEV_OP_NONE] = 0;\n \tdev_info->num_queues[RTE_BBDEV_OP_TURBO_DEC] = 0;\n \tdev_info->num_queues[RTE_BBDEV_OP_TURBO_ENC] = 0;\n-\tdev_info->num_queues[RTE_BBDEV_OP_LDPC_DEC] = 0;\n-\tdev_info->num_queues[RTE_BBDEV_OP_LDPC_ENC] = 0;\n+\tdev_info->num_queues[RTE_BBDEV_OP_LDPC_DEC] = d->acc_conf.q_ul_5g.num_aqs_per_groups *\n+\t\t\td->acc_conf.q_ul_5g.num_qgroups;\n+\tdev_info->num_queues[RTE_BBDEV_OP_LDPC_ENC] = d->acc_conf.q_dl_5g.num_aqs_per_groups *\n+\t\t\td->acc_conf.q_dl_5g.num_qgroups;\n \tdev_info->num_queues[RTE_BBDEV_OP_FFT] = 0;\n \tdev_info->queue_priority[RTE_BBDEV_OP_TURBO_DEC] = 0;\n \tdev_info->queue_priority[RTE_BBDEV_OP_TURBO_ENC] = 0;\n-\tdev_info->queue_priority[RTE_BBDEV_OP_LDPC_DEC] = 0;\n-\tdev_info->queue_priority[RTE_BBDEV_OP_LDPC_ENC] = 0;\n+\tdev_info->queue_priority[RTE_BBDEV_OP_LDPC_DEC] = d->acc_conf.q_ul_5g.num_qgroups;\n+\tdev_info->queue_priority[RTE_BBDEV_OP_LDPC_ENC] = d->acc_conf.q_dl_5g.num_qgroups;\n \tdev_info->queue_priority[RTE_BBDEV_OP_FFT] = 0;\n \tdev_info->max_num_queues = 0;\n \tfor (i = RTE_BBDEV_OP_NONE; i <= RTE_BBDEV_OP_FFT; i++)\n@@ -679,13 +753,1250 @@ static struct rte_pci_id pci_id_acc200_vf_map[] = {\n \t{.device_id = 0},\n };\n \n-/* Initialization Function. */\n+/* Fill in a frame control word for LDPC decoding. */\n+static inline void\n+acc200_fcw_ld_fill(struct rte_bbdev_dec_op *op, struct acc_fcw_ld *fcw,\n+\t\tunion acc_harq_layout_data *harq_layout)\n+{\n+\tuint16_t harq_out_length, harq_in_length, ncb_p, k0_p, parity_offset;\n+\tuint32_t harq_index;\n+\tuint32_t l;\n+\n+\tfcw->qm = op->ldpc_dec.q_m;\n+\tfcw->nfiller = op->ldpc_dec.n_filler;\n+\tfcw->BG = (op->ldpc_dec.basegraph - 1);\n+\tfcw->Zc = op->ldpc_dec.z_c;\n+\tfcw->ncb = op->ldpc_dec.n_cb;\n+\tfcw->k0 = get_k0(fcw->ncb, fcw->Zc, op->ldpc_dec.basegraph,\n+\t\t\top->ldpc_dec.rv_index);\n+\tif (op->ldpc_dec.code_block_mode == RTE_BBDEV_CODE_BLOCK)\n+\t\tfcw->rm_e = op->ldpc_dec.cb_params.e;\n+\telse\n+\t\tfcw->rm_e = (op->ldpc_dec.tb_params.r <\n+\t\t\t\top->ldpc_dec.tb_params.cab) ?\n+\t\t\t\t\t\top->ldpc_dec.tb_params.ea :\n+\t\t\t\t\t\top->ldpc_dec.tb_params.eb;\n+\n+\tif (unlikely(check_bit(op->ldpc_dec.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE) &&\n+\t\t\t(op->ldpc_dec.harq_combined_input.length == 0))) {\n+\t\trte_bbdev_log(WARNING, \"Null HARQ input size provided\");\n+\t\t/* Disable HARQ input in that case to carry forward. */\n+\t\top->ldpc_dec.op_flags ^= RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE;\n+\t}\n+\tif (unlikely(fcw->rm_e == 0)) {\n+\t\trte_bbdev_log(WARNING, \"Null E input provided\");\n+\t\tfcw->rm_e = 2;\n+\t}\n+\n+\tfcw->hcin_en = check_bit(op->ldpc_dec.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE);\n+\tfcw->hcout_en = check_bit(op->ldpc_dec.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE);\n+\tfcw->crc_select = check_bit(op->ldpc_dec.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK);\n+\tfcw->bypass_dec = 0;\n+\tfcw->bypass_intlv = check_bit(op->ldpc_dec.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_DEINTERLEAVER_BYPASS);\n+\tif (op->ldpc_dec.q_m == 1) {\n+\t\tfcw->bypass_intlv = 1;\n+\t\tfcw->qm = 2;\n+\t}\n+\tfcw->hcin_decomp_mode = check_bit(op->ldpc_dec.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION);\n+\tfcw->hcout_comp_mode = check_bit(op->ldpc_dec.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION);\n+\tfcw->llr_pack_mode = check_bit(op->ldpc_dec.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_LLR_COMPRESSION);\n+\tharq_index = hq_index(op->ldpc_dec.harq_combined_output.offset);\n+\n+\tif (fcw->hcin_en > 0) {\n+\t\tharq_in_length = op->ldpc_dec.harq_combined_input.length;\n+\t\tif (fcw->hcin_decomp_mode > 0)\n+\t\t\tharq_in_length = harq_in_length * 8 / 6;\n+\t\tharq_in_length = RTE_MIN(harq_in_length, op->ldpc_dec.n_cb\n+\t\t\t\t- op->ldpc_dec.n_filler);\n+\t\tharq_in_length = RTE_ALIGN_CEIL(harq_in_length, 64);\n+\t\tfcw->hcin_size0 = harq_in_length;\n+\t\tfcw->hcin_offset = 0;\n+\t\tfcw->hcin_size1 = 0;\n+\t} else {\n+\t\tfcw->hcin_size0 = 0;\n+\t\tfcw->hcin_offset = 0;\n+\t\tfcw->hcin_size1 = 0;\n+\t}\n+\n+\tfcw->itmax = op->ldpc_dec.iter_max;\n+\tfcw->itstop = check_bit(op->ldpc_dec.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_ITERATION_STOP_ENABLE);\n+\tfcw->cnu_algo = ACC_ALGO_MSA;\n+\tfcw->synd_precoder = fcw->itstop;\n+\t/*\n+\t * These are all implicitly set:\n+\t * fcw->synd_post = 0;\n+\t * fcw->so_en = 0;\n+\t * fcw->so_bypass_rm = 0;\n+\t * fcw->so_bypass_intlv = 0;\n+\t * fcw->dec_convllr = 0;\n+\t * fcw->hcout_convllr = 0;\n+\t * fcw->hcout_size1 = 0;\n+\t * fcw->so_it = 0;\n+\t * fcw->hcout_offset = 0;\n+\t * fcw->negstop_th = 0;\n+\t * fcw->negstop_it = 0;\n+\t * fcw->negstop_en = 0;\n+\t * fcw->gain_i = 1;\n+\t * fcw->gain_h = 1;\n+\t */\n+\tif (fcw->hcout_en > 0) {\n+\t\tparity_offset = (op->ldpc_dec.basegraph == 1 ? 20 : 8)\n+\t\t\t* op->ldpc_dec.z_c - op->ldpc_dec.n_filler;\n+\t\tk0_p = (fcw->k0 > parity_offset) ? fcw->k0 - op->ldpc_dec.n_filler : fcw->k0;\n+\t\tncb_p = fcw->ncb - op->ldpc_dec.n_filler;\n+\t\tl = k0_p + fcw->rm_e;\n+\t\tharq_out_length = (uint16_t) fcw->hcin_size0;\n+\t\tharq_out_length = RTE_MIN(RTE_MAX(harq_out_length, l), ncb_p);\n+\t\tharq_out_length = RTE_ALIGN_CEIL(harq_out_length, 64);\n+\t\tfcw->hcout_size0 = harq_out_length;\n+\t\tfcw->hcout_size1 = 0;\n+\t\tfcw->hcout_offset = 0;\n+\t\tharq_layout[harq_index].offset = fcw->hcout_offset;\n+\t\tharq_layout[harq_index].size0 = fcw->hcout_size0;\n+\t} else {\n+\t\tfcw->hcout_size0 = 0;\n+\t\tfcw->hcout_size1 = 0;\n+\t\tfcw->hcout_offset = 0;\n+\t}\n+\n+\tfcw->tb_crc_select = 0;\n+\tif (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_CRC_TYPE_24A_CHECK))\n+\t\tfcw->tb_crc_select = 2;\n+\tif (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_CRC_TYPE_16_CHECK))\n+\t\tfcw->tb_crc_select = 1;\n+}\n+\n+static inline int\n+acc200_dma_desc_ld_fill(struct rte_bbdev_dec_op *op, struct acc_dma_req_desc *desc,\n+\t\tstruct rte_mbuf **input, struct rte_mbuf *h_output,\n+\t\tuint32_t *in_offset, uint32_t *h_out_offset,\n+\t\tuint32_t *h_out_length, uint32_t *mbuf_total_left,\n+\t\tuint32_t *seg_total_left, struct acc_fcw_ld *fcw)\n+{\n+\tstruct rte_bbdev_op_ldpc_dec *dec = &op->ldpc_dec;\n+\tint next_triplet = 1; /* FCW already done. */\n+\tuint32_t input_length;\n+\tuint16_t output_length, crc24_overlap = 0;\n+\tuint16_t sys_cols, K, h_p_size, h_np_size;\n+\tbool h_comp = check_bit(dec->op_flags, RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION);\n+\n+\tacc_header_init(desc);\n+\n+\tif (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP))\n+\t\tcrc24_overlap = 24;\n+\n+\t/* Compute some LDPC BG lengths. */\n+\tinput_length = fcw->rm_e;\n+\tif (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_LLR_COMPRESSION))\n+\t\tinput_length = (input_length * 3 + 3) / 4;\n+\tsys_cols = (dec->basegraph == 1) ? 22 : 10;\n+\tK = sys_cols * dec->z_c;\n+\toutput_length = K - dec->n_filler - crc24_overlap;\n+\n+\tif (unlikely((*mbuf_total_left == 0) || (*mbuf_total_left < input_length))) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u\",\n+\t\t\t\t*mbuf_total_left, input_length);\n+\t\treturn -1;\n+\t}\n+\n+\tnext_triplet = acc_dma_fill_blk_type_in(desc, input,\n+\t\t\tin_offset, input_length,\n+\t\t\tseg_total_left, next_triplet,\n+\t\t\tcheck_bit(op->ldpc_dec.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_DEC_SCATTER_GATHER));\n+\n+\tif (unlikely(next_triplet < 0)) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Mismatch between data to process and mbuf data length in bbdev_op: %p\",\n+\t\t\t\top);\n+\t\treturn -1;\n+\t}\n+\n+\tif (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {\n+\t\tif (op->ldpc_dec.harq_combined_input.data == 0) {\n+\t\t\trte_bbdev_log(ERR, \"HARQ input is not defined\");\n+\t\t\treturn -1;\n+\t\t}\n+\t\th_p_size = fcw->hcin_size0 + fcw->hcin_size1;\n+\t\tif (h_comp)\n+\t\t\th_p_size = (h_p_size * 3 + 3) / 4;\n+\t\tif (op->ldpc_dec.harq_combined_input.data == 0) {\n+\t\t\trte_bbdev_log(ERR, \"HARQ input is not defined\");\n+\t\t\treturn -1;\n+\t\t}\n+\t\tacc_dma_fill_blk_type(\n+\t\t\t\tdesc,\n+\t\t\t\top->ldpc_dec.harq_combined_input.data,\n+\t\t\t\top->ldpc_dec.harq_combined_input.offset,\n+\t\t\t\th_p_size,\n+\t\t\t\tnext_triplet,\n+\t\t\t\tACC_DMA_BLKID_IN_HARQ);\n+\t\tnext_triplet++;\n+\t}\n+\n+\tdesc->data_ptrs[next_triplet - 1].last = 1;\n+\tdesc->m2dlen = next_triplet;\n+\t*mbuf_total_left -= input_length;\n+\n+\tnext_triplet = acc_dma_fill_blk_type(desc, h_output,\n+\t\t\t*h_out_offset, output_length >> 3, next_triplet,\n+\t\t\tACC_DMA_BLKID_OUT_HARD);\n+\n+\tif (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {\n+\t\tif (op->ldpc_dec.harq_combined_output.data == 0) {\n+\t\t\trte_bbdev_log(ERR, \"HARQ output is not defined\");\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\t/* Pruned size of the HARQ. */\n+\t\th_p_size = fcw->hcout_size0 + fcw->hcout_size1;\n+\t\t/* Non-Pruned size of the HARQ. */\n+\t\th_np_size = fcw->hcout_offset > 0 ?\n+\t\t\t\tfcw->hcout_offset + fcw->hcout_size1 :\n+\t\t\t\th_p_size;\n+\t\tif (h_comp) {\n+\t\t\th_np_size = (h_np_size * 3 + 3) / 4;\n+\t\t\th_p_size = (h_p_size * 3 + 3) / 4;\n+\t\t}\n+\t\tdec->harq_combined_output.length = h_np_size;\n+\t\tacc_dma_fill_blk_type(\n+\t\t\t\tdesc,\n+\t\t\t\tdec->harq_combined_output.data,\n+\t\t\t\tdec->harq_combined_output.offset,\n+\t\t\t\th_p_size,\n+\t\t\t\tnext_triplet,\n+\t\t\t\tACC_DMA_BLKID_OUT_HARQ);\n+\n+\t\tnext_triplet++;\n+\t}\n+\n+\t*h_out_length = output_length >> 3;\n+\tdec->hard_output.length += *h_out_length;\n+\t*h_out_offset += *h_out_length;\n+\tdesc->data_ptrs[next_triplet - 1].last = 1;\n+\tdesc->d2mlen = next_triplet - desc->m2dlen;\n+\n+\tdesc->op_addr = op;\n+\n+\treturn 0;\n+}\n+\n+static inline void\n+acc200_dma_desc_ld_update(struct rte_bbdev_dec_op *op,\n+\t\tstruct acc_dma_req_desc *desc,\n+\t\tstruct rte_mbuf *input, struct rte_mbuf *h_output,\n+\t\tuint32_t *in_offset, uint32_t *h_out_offset,\n+\t\tuint32_t *h_out_length,\n+\t\tunion acc_harq_layout_data *harq_layout)\n+{\n+\tint next_triplet = 1; /* FCW already done. */\n+\tdesc->data_ptrs[next_triplet].address = rte_pktmbuf_iova_offset(input, *in_offset);\n+\tnext_triplet++;\n+\n+\tif (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {\n+\t\tstruct rte_bbdev_op_data hi = op->ldpc_dec.harq_combined_input;\n+\t\tdesc->data_ptrs[next_triplet].address =\n+\t\t\t\trte_pktmbuf_iova_offset(hi.data, hi.offset);\n+\t\tnext_triplet++;\n+\t}\n+\n+\tdesc->data_ptrs[next_triplet].address =\n+\t\t\trte_pktmbuf_iova_offset(h_output, *h_out_offset);\n+\t*h_out_length = desc->data_ptrs[next_triplet].blen;\n+\tnext_triplet++;\n+\n+\tif (check_bit(op->ldpc_dec.op_flags,\n+\t\t\t\tRTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {\n+\t\t/* Adjust based on previous operation. */\n+\t\tstruct rte_bbdev_dec_op *prev_op = desc->op_addr;\n+\t\top->ldpc_dec.harq_combined_output.length =\n+\t\t\t\tprev_op->ldpc_dec.harq_combined_output.length;\n+\t\tuint32_t harq_idx = hq_index(op->ldpc_dec.harq_combined_output.offset);\n+\t\tuint32_t prev_harq_idx = hq_index(prev_op->ldpc_dec.harq_combined_output.offset);\n+\t\tharq_layout[harq_idx].val = harq_layout[prev_harq_idx].val;\n+\t\tstruct rte_bbdev_op_data ho = op->ldpc_dec.harq_combined_output;\n+\t\tdesc->data_ptrs[next_triplet].address =\n+\t\t\t\trte_pktmbuf_iova_offset(ho.data, ho.offset);\n+\t\tnext_triplet++;\n+\t}\n+\n+\top->ldpc_dec.hard_output.length += *h_out_length;\n+\tdesc->op_addr = op;\n+}\n+\n+/* Enqueue one encode operations for ACC200 device in CB mode\n+ * multiplexed on the same descriptor.\n+ */\n+static inline int\n+enqueue_ldpc_enc_n_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ops,\n+\t\tuint16_t total_enqueued_descs, int16_t num)\n+{\n+\tunion acc_dma_desc *desc = NULL;\n+\tuint32_t out_length;\n+\tstruct rte_mbuf *output_head, *output;\n+\tint i, next_triplet;\n+\tuint16_t  in_length_in_bytes;\n+\tstruct rte_bbdev_op_ldpc_enc *enc = &ops[0]->ldpc_enc;\n+\n+\tuint16_t desc_idx = ((q->sw_ring_head + total_enqueued_descs)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\tdesc = q->ring_addr + desc_idx;\n+\tacc_fcw_le_fill(ops[0], &desc->req.fcw_le, num, 0);\n+\n+\t/** This could be done at polling. */\n+\tacc_header_init(&desc->req);\n+\tdesc->req.numCBs = num;\n+\n+\tin_length_in_bytes = ops[0]->ldpc_enc.input.data->data_len;\n+\tout_length = (enc->cb_params.e + 7) >> 3;\n+\tdesc->req.m2dlen = 1 + num;\n+\tdesc->req.d2mlen = num;\n+\tnext_triplet = 1;\n+\n+\tfor (i = 0; i < num; i++) {\n+\t\tdesc->req.data_ptrs[next_triplet].address =\n+\t\t\trte_pktmbuf_iova_offset(ops[i]->ldpc_enc.input.data, 0);\n+\t\tdesc->req.data_ptrs[next_triplet].blen = in_length_in_bytes;\n+\t\tnext_triplet++;\n+\t\tdesc->req.data_ptrs[next_triplet].address = rte_pktmbuf_iova_offset(\n+\t\t\t\tops[i]->ldpc_enc.output.data, 0);\n+\t\tdesc->req.data_ptrs[next_triplet].blen = out_length;\n+\t\tnext_triplet++;\n+\t\tops[i]->ldpc_enc.output.length = out_length;\n+\t\toutput_head = output = ops[i]->ldpc_enc.output.data;\n+\t\tmbuf_append(output_head, output, out_length);\n+\t\toutput->data_len = out_length;\n+\t}\n+\n+\tdesc->req.op_addr = ops[0];\n+\t/* Keep track of pointers even when multiplexed in single descriptor. */\n+\tstruct acc_ptrs *context_ptrs = q->companion_ring_addr + desc_idx;\n+\tfor (i = 0; i < num; i++)\n+\t\tcontext_ptrs->ptr[i].op_addr = ops[i];\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\trte_memdump(stderr, \"FCW\", &desc->req.fcw_le,\n+\t\t\tsizeof(desc->req.fcw_le) - 8);\n+\trte_memdump(stderr, \"Req Desc.\", desc, sizeof(*desc));\n+#endif\n+\n+\t/* Number of compatible CBs/ops successfully prepared to enqueue. */\n+\treturn num;\n+}\n+\n+/* Enqueue one encode operations for ACC200 device for a partial TB\n+ * all codes blocks have same configuration multiplexed on the same descriptor.\n+ */\n+static inline void\n+enqueue_ldpc_enc_part_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op,\n+\t\tuint16_t total_enqueued_descs, int16_t num_cbs, uint32_t e,\n+\t\tuint16_t in_len_B, uint32_t out_len_B, uint32_t *in_offset,\n+\t\tuint32_t *out_offset)\n+{\n+\n+\tunion acc_dma_desc *desc = NULL;\n+\tstruct rte_mbuf *output_head, *output;\n+\tint i, next_triplet;\n+\tstruct rte_bbdev_op_ldpc_enc *enc = &op->ldpc_enc;\n+\n+\n+\tuint16_t desc_idx = ((q->sw_ring_head + total_enqueued_descs) & q->sw_ring_wrap_mask);\n+\tdesc = q->ring_addr + desc_idx;\n+\tacc_fcw_le_fill(op, &desc->req.fcw_le, num_cbs, e);\n+\n+\t/** This could be done at polling. */\n+\tacc_header_init(&desc->req);\n+\tdesc->req.numCBs = num_cbs;\n+\n+\tdesc->req.m2dlen = 1 + num_cbs;\n+\tdesc->req.d2mlen = num_cbs;\n+\tnext_triplet = 1;\n+\n+\tfor (i = 0; i < num_cbs; i++) {\n+\t\tdesc->req.data_ptrs[next_triplet].address = rte_pktmbuf_iova_offset(\n+\t\t\t\tenc->input.data, *in_offset);\n+\t\t*in_offset += in_len_B;\n+\t\tdesc->req.data_ptrs[next_triplet].blen = in_len_B;\n+\t\tnext_triplet++;\n+\t\tdesc->req.data_ptrs[next_triplet].address = rte_pktmbuf_iova_offset(\n+\t\t\t\tenc->output.data, *out_offset);\n+\t\t*out_offset += out_len_B;\n+\t\tdesc->req.data_ptrs[next_triplet].blen = out_len_B;\n+\t\tnext_triplet++;\n+\t\tenc->output.length += out_len_B;\n+\t\toutput_head = output = enc->output.data;\n+\t\tmbuf_append(output_head, output, out_len_B);\n+\t}\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\trte_memdump(stderr, \"FCW\", &desc->req.fcw_le,\n+\t\t\tsizeof(desc->req.fcw_le) - 8);\n+\trte_memdump(stderr, \"Req Desc.\", desc, sizeof(*desc));\n+#endif\n+\n+}\n+\n+/* Enqueue one encode operations for ACC200 device in TB mode.\n+ * returns the number of descs used.\n+ */\n+static inline int\n+enqueue_ldpc_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op,\n+\t\tuint16_t enq_descs, uint8_t cbs_in_tb)\n+{\n+\tuint8_t num_a, num_b;\n+\tuint16_t desc_idx, input_len_B, return_descs;\n+\tuint8_t r = op->ldpc_enc.tb_params.r;\n+\tuint8_t cab =  op->ldpc_enc.tb_params.cab;\n+\tunion acc_dma_desc *desc;\n+\tuint16_t init_enq_descs = enq_descs;\n+\tuint32_t in_offset = 0, out_offset = 0;\n+\n+\tinput_len_B = ((op->ldpc_enc.basegraph == 1 ? 22 : 10) * op->ldpc_enc.z_c) >> 3;\n+\n+\tif (check_bit(op->ldpc_enc.op_flags, RTE_BBDEV_LDPC_CRC_24B_ATTACH))\n+\t\tinput_len_B -= 3;\n+\n+\tif (r < cab) {\n+\t\tnum_a = cab - r;\n+\t\tnum_b = cbs_in_tb - cab;\n+\t} else {\n+\t\tnum_a = 0;\n+\t\tnum_b = cbs_in_tb - r;\n+\t}\n+\n+\twhile (num_a > 0) {\n+\t\tuint32_t e = op->ldpc_enc.tb_params.ea;\n+\t\tuint32_t out_len_B = (e + 7) >> 3;\n+\t\tuint8_t enq = RTE_MIN(num_a, ACC_MUX_5GDL_DESC);\n+\t\tnum_a -= enq;\n+\t\tenqueue_ldpc_enc_part_tb(q, op, enq_descs, enq, e, input_len_B,\n+\t\t\t\tout_len_B, &in_offset, &out_offset);\n+\t\tenq_descs++;\n+\t}\n+\twhile (num_b > 0) {\n+\t\tuint32_t e = op->ldpc_enc.tb_params.eb;\n+\t\tuint32_t out_len_B = (e + 7) >> 3;\n+\t\tuint8_t enq = RTE_MIN(num_b, ACC_MUX_5GDL_DESC);\n+\t\tnum_b -= enq;\n+\t\tenqueue_ldpc_enc_part_tb(q, op, enq_descs, enq, e, input_len_B,\n+\t\t\t\tout_len_B, &in_offset, &out_offset);\n+\t\tenq_descs++;\n+\t}\n+\n+\treturn_descs = enq_descs - init_enq_descs;\n+\t/* Keep total number of CBs in first TB. */\n+\tdesc_idx = ((q->sw_ring_head + init_enq_descs)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\tdesc = q->ring_addr + desc_idx;\n+\tdesc->req.cbs_in_tb = return_descs; /** Actual number of descriptors. */\n+\tdesc->req.op_addr = op;\n+\n+\t/* Set SDone on last CB descriptor for TB mode. */\n+\tdesc_idx = ((q->sw_ring_head + enq_descs - 1)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\tdesc = q->ring_addr + desc_idx;\n+\tdesc->req.sdone_enable = 1;\n+\tdesc->req.irq_enable = q->irq_enable;\n+\tdesc->req.op_addr = op;\n+\treturn return_descs;\n+}\n+\n+/** Enqueue one decode operations for ACC200 device in CB mode. */\n+static inline int\n+enqueue_ldpc_dec_one_op_cb(struct acc_queue *q, struct rte_bbdev_dec_op *op,\n+\t\tuint16_t total_enqueued_cbs, bool same_op)\n+{\n+\tint ret, hq_len;\n+\tunion acc_dma_desc *desc;\n+\tuint16_t desc_idx;\n+\tstruct rte_mbuf *input, *h_output_head, *h_output;\n+\tuint32_t in_offset, h_out_offset, mbuf_total_left, h_out_length = 0;\n+\tunion acc_harq_layout_data *harq_layout;\n+\n+\tif (op->ldpc_dec.cb_params.e == 0)\n+\t\treturn -EINVAL;\n+\n+\tdesc_idx = ((q->sw_ring_head + total_enqueued_cbs) & q->sw_ring_wrap_mask);\n+\tdesc = q->ring_addr + desc_idx;\n+\n+\tinput = op->ldpc_dec.input.data;\n+\th_output_head = h_output = op->ldpc_dec.hard_output.data;\n+\tin_offset = op->ldpc_dec.input.offset;\n+\th_out_offset = op->ldpc_dec.hard_output.offset;\n+\tmbuf_total_left = op->ldpc_dec.input.length;\n+\tharq_layout = q->d->harq_layout;\n+\n+\tif (same_op) {\n+\t\tunion acc_dma_desc *prev_desc;\n+\t\tdesc_idx = ((q->sw_ring_head + total_enqueued_cbs - 1) & q->sw_ring_wrap_mask);\n+\t\tprev_desc = q->ring_addr + desc_idx;\n+\t\tuint8_t *prev_ptr = (uint8_t *) prev_desc;\n+\t\tuint8_t *new_ptr = (uint8_t *) desc;\n+\t\t/* Copy first 4 words and BDESCs. */\n+\t\trte_memcpy(new_ptr, prev_ptr, ACC_5GUL_SIZE_0);\n+\t\trte_memcpy(new_ptr + ACC_5GUL_OFFSET_0,\n+\t\t\t\tprev_ptr + ACC_5GUL_OFFSET_0,\n+\t\t\t\tACC_5GUL_SIZE_1);\n+\t\tdesc->req.op_addr = prev_desc->req.op_addr;\n+\t\t/* Copy FCW. */\n+\t\trte_memcpy(new_ptr + ACC_DESC_FCW_OFFSET,\n+\t\t\t\tprev_ptr + ACC_DESC_FCW_OFFSET,\n+\t\t\t\tACC_FCW_LD_BLEN);\n+\t\tacc200_dma_desc_ld_update(op, &desc->req, input, h_output,\n+\t\t\t\t&in_offset, &h_out_offset,\n+\t\t\t\t&h_out_length, harq_layout);\n+\t} else {\n+\t\tstruct acc_fcw_ld *fcw;\n+\t\tuint32_t seg_total_left;\n+\t\tfcw = &desc->req.fcw_ld;\n+\t\tacc200_fcw_ld_fill(op, fcw, harq_layout);\n+\n+\t\t/* Special handling when using mbuf or not. */\n+\t\tif (check_bit(op->ldpc_dec.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_DEC_SCATTER_GATHER))\n+\t\t\tseg_total_left = rte_pktmbuf_data_len(input) - in_offset;\n+\t\telse\n+\t\t\tseg_total_left = fcw->rm_e;\n+\n+\t\tret = acc200_dma_desc_ld_fill(op, &desc->req, &input, h_output,\n+\t\t\t\t&in_offset, &h_out_offset,\n+\t\t\t\t&h_out_length, &mbuf_total_left,\n+\t\t\t\t&seg_total_left, fcw);\n+\t\tif (unlikely(ret < 0))\n+\t\t\treturn ret;\n+\t}\n+\n+\t/* Hard output. */\n+\tmbuf_append(h_output_head, h_output, h_out_length);\n+\tif (op->ldpc_dec.harq_combined_output.length > 0) {\n+\t\t/* Push the HARQ output into host memory. */\n+\t\tstruct rte_mbuf *hq_output_head, *hq_output;\n+\t\thq_output_head = op->ldpc_dec.harq_combined_output.data;\n+\t\thq_output = op->ldpc_dec.harq_combined_output.data;\n+\t\thq_len = op->ldpc_dec.harq_combined_output.length;\n+\t\tif (unlikely(!mbuf_append(hq_output_head, hq_output, hq_len))) {\n+\t\t\trte_bbdev_log(ERR, \"HARQ output mbuf issue %d %d\\n\",\n+\t\t\t\t\thq_output->buf_len,\n+\t\t\t\t\thq_len);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\trte_memdump(stderr, \"FCW\", &desc->req.fcw_ld,\n+\t\t\tsizeof(desc->req.fcw_ld) - 8);\n+\trte_memdump(stderr, \"Req Desc.\", desc, sizeof(*desc));\n+#endif\n+\n+\t/* One CB (one op) was successfully prepared to enqueue. */\n+\treturn 1;\n+}\n+\n+\n+/* Enqueue one decode operations for ACC200 device in TB mode. */\n+static inline int\n+enqueue_ldpc_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op,\n+\t\tuint16_t total_enqueued_cbs, uint8_t cbs_in_tb)\n+{\n+\tunion acc_dma_desc *desc = NULL;\n+\tunion acc_dma_desc *desc_first = NULL;\n+\tint ret;\n+\tuint8_t r, c;\n+\tuint32_t in_offset, h_out_offset, h_out_length, mbuf_total_left, seg_total_left;\n+\tstruct rte_mbuf *input, *h_output_head, *h_output;\n+\tuint16_t current_enqueued_cbs = 0;\n+\tuint16_t sys_cols, trail_len = 0;\n+\n+\tuint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs) & q->sw_ring_wrap_mask);\n+\tdesc = q->ring_addr + desc_idx;\n+\tdesc_first = desc;\n+\tuint64_t fcw_offset = (desc_idx << 8) + ACC_DESC_FCW_OFFSET;\n+\tunion acc_harq_layout_data *harq_layout = q->d->harq_layout;\n+\tacc200_fcw_ld_fill(op, &desc->req.fcw_ld, harq_layout);\n+\n+\tinput = op->ldpc_dec.input.data;\n+\th_output_head = h_output = op->ldpc_dec.hard_output.data;\n+\tin_offset = op->ldpc_dec.input.offset;\n+\th_out_offset = op->ldpc_dec.hard_output.offset;\n+\th_out_length = 0;\n+\tmbuf_total_left = op->ldpc_dec.input.length;\n+\tc = op->ldpc_dec.tb_params.c;\n+\tr = op->ldpc_dec.tb_params.r;\n+\tif (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_CRC_TYPE_24A_CHECK)) {\n+\t\tsys_cols = (op->ldpc_dec.basegraph == 1) ? 22 : 10;\n+\t\ttrail_len = sys_cols * op->ldpc_dec.z_c -\n+\t\t\t\top->ldpc_dec.n_filler - 24;\n+\t}\n+\n+\twhile (mbuf_total_left > 0 && r < c) {\n+\t\tif (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_DEC_SCATTER_GATHER))\n+\t\t\tseg_total_left = rte_pktmbuf_data_len(input) - in_offset;\n+\t\telse\n+\t\t\tseg_total_left = op->ldpc_dec.input.length;\n+\t\t/* Set up DMA descriptor. */\n+\t\tdesc_idx = ((q->sw_ring_head + total_enqueued_cbs) & q->sw_ring_wrap_mask);\n+\t\tdesc = q->ring_addr + desc_idx;\n+\t\tfcw_offset = (desc_idx << 8) + ACC_DESC_FCW_OFFSET;\n+\t\tdesc->req.data_ptrs[0].address = q->ring_addr_iova + fcw_offset;\n+\t\tdesc->req.data_ptrs[0].blen = ACC_FCW_LD_BLEN;\n+\t\trte_memcpy(&desc->req.fcw_ld, &desc_first->req.fcw_ld, ACC_FCW_LD_BLEN);\n+\t\tdesc->req.fcw_ld.tb_trailer_size = (c - r - 1) * trail_len;\n+\n+\t\tret = acc200_dma_desc_ld_fill(op, &desc->req, &input,\n+\t\t\t\th_output, &in_offset, &h_out_offset,\n+\t\t\t\t&h_out_length,\n+\t\t\t\t&mbuf_total_left, &seg_total_left,\n+\t\t\t\t&desc->req.fcw_ld);\n+\n+\t\tif (unlikely(ret < 0))\n+\t\t\treturn ret;\n+\n+\t\t/* Hard output. */\n+\t\tmbuf_append(h_output_head, h_output, h_out_length);\n+\n+\t\t/* Set total number of CBs in TB. */\n+\t\tdesc->req.cbs_in_tb = cbs_in_tb;\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\t\trte_memdump(stderr, \"FCW\", &desc->req.fcw_td,\n+\t\t\t\tsizeof(desc->req.fcw_td) - 8);\n+\t\trte_memdump(stderr, \"Req Desc.\", desc, sizeof(*desc));\n+#endif\n+\t\tif (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_DEC_SCATTER_GATHER)\n+\t\t\t\t&& (seg_total_left == 0)) {\n+\t\t\t/* Go to the next mbuf. */\n+\t\t\tinput = input->next;\n+\t\t\tin_offset = 0;\n+\t\t\th_output = h_output->next;\n+\t\t\th_out_offset = 0;\n+\t\t}\n+\t\ttotal_enqueued_cbs++;\n+\t\tcurrent_enqueued_cbs++;\n+\t\tr++;\n+\t}\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\tif (check_mbuf_total_left(mbuf_total_left) != 0)\n+\t\treturn -EINVAL;\n+#endif\n+\t/* Set SDone on last CB descriptor for TB mode. */\n+\tdesc->req.sdone_enable = 1;\n+\tdesc->req.irq_enable = q->irq_enable;\n+\n+\treturn current_enqueued_cbs;\n+}\n+\n+/** Enqueue encode operations for ACC200 device in CB mode. */\n+static inline uint16_t\n+acc200_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_enc_op **ops, uint16_t num)\n+{\n+\tstruct acc_queue *q = q_data->queue_private;\n+\tint32_t avail = acc_ring_avail_enq(q);\n+\tuint16_t i = 0;\n+\tunion acc_dma_desc *desc;\n+\tint ret, desc_idx = 0;\n+\tint16_t enq, left = num;\n+\n+\twhile (left > 0) {\n+\t\tif (unlikely(avail < 1)) {\n+\t\t\tacc_enqueue_ring_full(q_data);\n+\t\t\tbreak;\n+\t\t}\n+\t\tavail--;\n+\t\tenq = RTE_MIN(left, ACC_MUX_5GDL_DESC);\n+\t\tenq = check_mux(&ops[i], enq);\n+\t\tret = enqueue_ldpc_enc_n_op_cb(q, &ops[i], desc_idx, enq);\n+\t\tif (ret < 0) {\n+\t\t\tacc_enqueue_invalid(q_data);\n+\t\t\tbreak;\n+\t\t}\n+\t\ti += enq;\n+\t\tdesc_idx++;\n+\t\tleft = num - i;\n+\t}\n+\n+\tif (unlikely(i == 0))\n+\t\treturn 0; /* Nothing to enqueue. */\n+\n+\t/* Set SDone in last CB in enqueued ops for CB mode. */\n+\tdesc = q->ring_addr + ((q->sw_ring_head + desc_idx - 1) & q->sw_ring_wrap_mask);\n+\tdesc->req.sdone_enable = 1;\n+\tdesc->req.irq_enable = q->irq_enable;\n+\n+\tacc_dma_enqueue(q, desc_idx, &q_data->queue_stats);\n+\n+\t/* Update stats. */\n+\tq_data->queue_stats.enqueued_count += i;\n+\tq_data->queue_stats.enqueue_err_count += num - i;\n+\n+\treturn i;\n+}\n+\n+/* Enqueue LDPC encode operations for ACC200 device in TB mode. */\n+static uint16_t\n+acc200_enqueue_ldpc_enc_tb(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_enc_op **ops, uint16_t num)\n+{\n+\tstruct acc_queue *q = q_data->queue_private;\n+\tint32_t avail = acc_ring_avail_enq(q);\n+\tuint16_t i, enqueued_descs = 0;\n+\tuint8_t cbs_in_tb;\n+\tint descs_used;\n+\n+\tfor (i = 0; i < num; ++i) {\n+\t\tcbs_in_tb = get_num_cbs_in_tb_ldpc_enc(&ops[i]->ldpc_enc);\n+\t\t/* Check if there are available space for further processing. */\n+\t\tif (unlikely((avail - cbs_in_tb < 0) || (cbs_in_tb == 0))) {\n+\t\t\tacc_enqueue_ring_full(q_data);\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tdescs_used = enqueue_ldpc_enc_one_op_tb(q, ops[i], enqueued_descs, cbs_in_tb);\n+\t\tif (descs_used < 0) {\n+\t\t\tacc_enqueue_invalid(q_data);\n+\t\t\tbreak;\n+\t\t}\n+\t\tenqueued_descs += descs_used;\n+\t\tavail -= descs_used;\n+\t}\n+\tif (unlikely(enqueued_descs == 0))\n+\t\treturn 0; /* Nothing to enqueue. */\n+\n+\tacc_dma_enqueue(q, enqueued_descs, &q_data->queue_stats);\n+\n+\t/* Update stats. */\n+\tq_data->queue_stats.enqueued_count += i;\n+\tq_data->queue_stats.enqueue_err_count += num - i;\n+\n+\treturn i;\n+}\n+\n+/* Enqueue encode operations for ACC200 device. */\n+static uint16_t\n+acc200_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_enc_op **ops, uint16_t num)\n+{\n+\tint32_t aq_avail = acc_aq_avail(q_data, num);\n+\tif (unlikely((aq_avail <= 0) || (num == 0)))\n+\t\treturn 0;\n+\tif (ops[0]->ldpc_enc.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)\n+\t\treturn acc200_enqueue_ldpc_enc_tb(q_data, ops, num);\n+\telse\n+\t\treturn acc200_enqueue_ldpc_enc_cb(q_data, ops, num);\n+}\n+\n+/* Enqueue decode operations for ACC200 device in TB mode. */\n+static uint16_t\n+acc200_enqueue_ldpc_dec_tb(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_dec_op **ops, uint16_t num)\n+{\n+\tstruct acc_queue *q = q_data->queue_private;\n+\tint32_t avail = acc_ring_avail_enq(q);\n+\tuint16_t i, enqueued_cbs = 0;\n+\tuint8_t cbs_in_tb;\n+\tint ret;\n+\n+\tfor (i = 0; i < num; ++i) {\n+\t\tcbs_in_tb = get_num_cbs_in_tb_ldpc_dec(&ops[i]->ldpc_dec);\n+\t\t/* Check if there are available space for further processing. */\n+\t\tif (unlikely((avail - cbs_in_tb < 0) ||\n+\t\t\t\t(cbs_in_tb == 0)))\n+\t\t\tbreak;\n+\t\tavail -= cbs_in_tb;\n+\n+\t\tret = enqueue_ldpc_dec_one_op_tb(q, ops[i],\n+\t\t\t\tenqueued_cbs, cbs_in_tb);\n+\t\tif (ret <= 0)\n+\t\t\tbreak;\n+\t\tenqueued_cbs += ret;\n+\t}\n+\n+\tacc_dma_enqueue(q, enqueued_cbs, &q_data->queue_stats);\n+\n+\t/* Update stats. */\n+\tq_data->queue_stats.enqueued_count += i;\n+\tq_data->queue_stats.enqueue_err_count += num - i;\n+\treturn i;\n+}\n+\n+/* Enqueue decode operations for ACC200 device in CB mode. */\n+static uint16_t\n+acc200_enqueue_ldpc_dec_cb(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_dec_op **ops, uint16_t num)\n+{\n+\tstruct acc_queue *q = q_data->queue_private;\n+\tint32_t avail = acc_ring_avail_enq(q);\n+\tuint16_t i;\n+\tunion acc_dma_desc *desc;\n+\tint ret;\n+\tbool same_op = false;\n+\n+\tfor (i = 0; i < num; ++i) {\n+\t\t/* Check if there are available space for further processing. */\n+\t\tif (unlikely(avail < 1)) {\n+\t\t\tacc_enqueue_ring_full(q_data);\n+\t\t\tbreak;\n+\t\t}\n+\t\tavail -= 1;\n+\n+\t\trte_bbdev_log(INFO, \"Op %d %d %d %d %d %d %d %d %d %d %d %d\\n\",\n+\t\t\ti, ops[i]->ldpc_dec.op_flags, ops[i]->ldpc_dec.rv_index,\n+\t\t\tops[i]->ldpc_dec.iter_max, ops[i]->ldpc_dec.iter_count,\n+\t\t\tops[i]->ldpc_dec.basegraph, ops[i]->ldpc_dec.z_c,\n+\t\t\tops[i]->ldpc_dec.n_cb, ops[i]->ldpc_dec.q_m,\n+\t\t\tops[i]->ldpc_dec.n_filler, ops[i]->ldpc_dec.cb_params.e,\n+\t\t\tsame_op);\n+\t\tret = enqueue_ldpc_dec_one_op_cb(q, ops[i], i, same_op);\n+\t\tif (ret < 0) {\n+\t\t\tacc_enqueue_invalid(q_data);\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\tif (unlikely(i == 0))\n+\t\treturn 0; /* Nothing to enqueue. */\n+\n+\t/* Set SDone in last CB in enqueued ops for CB mode. */\n+\tdesc = q->ring_addr + ((q->sw_ring_head + i - 1) & q->sw_ring_wrap_mask);\n+\tdesc->req.sdone_enable = 1;\n+\tdesc->req.irq_enable = q->irq_enable;\n+\n+\tacc_dma_enqueue(q, i, &q_data->queue_stats);\n+\n+\t/* Update stats. */\n+\tq_data->queue_stats.enqueued_count += i;\n+\tq_data->queue_stats.enqueue_err_count += num - i;\n+\treturn i;\n+}\n+\n+/* Enqueue decode operations for ACC200 device. */\n+static uint16_t\n+acc200_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_dec_op **ops, uint16_t num)\n+{\n+\tint32_t aq_avail = acc_aq_avail(q_data, num);\n+\tif (unlikely((aq_avail <= 0) || (num == 0)))\n+\t\treturn 0;\n+\tif (ops[0]->ldpc_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)\n+\t\treturn acc200_enqueue_ldpc_dec_tb(q_data, ops, num);\n+\telse\n+\t\treturn acc200_enqueue_ldpc_dec_cb(q_data, ops, num);\n+}\n+\n+\n+/* Dequeue one encode operations from ACC200 device in CB mode. */\n+static inline int\n+dequeue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op,\n+\t\tuint16_t *dequeued_ops, uint32_t *aq_dequeued, uint16_t *dequeued_descs)\n+{\n+\tunion acc_dma_desc *desc, atom_desc;\n+\tunion acc_dma_rsp_desc rsp;\n+\tstruct rte_bbdev_enc_op *op;\n+\tint i;\n+\tstruct acc_ptrs *context_ptrs;\n+\tint desc_idx = ((q->sw_ring_tail + *dequeued_descs) & q->sw_ring_wrap_mask);\n+\n+\tdesc = q->ring_addr + desc_idx;\n+\tatom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc,\n+\t\t\t__ATOMIC_RELAXED);\n+\n+\t/* Check fdone bit. */\n+\tif (!(atom_desc.rsp.val & ACC_FDONE))\n+\t\treturn -1;\n+\n+\trsp.val = atom_desc.rsp.val;\n+\trte_bbdev_log_debug(\"Resp. desc %p: %x\", desc, rsp.val);\n+\n+\t/* Dequeue. */\n+\top = desc->req.op_addr;\n+\n+\t/* Clearing status, it will be set based on response. */\n+\top->status = 0;\n+\n+\top->status |= ((rsp.input_err) ? (1 << RTE_BBDEV_DATA_ERROR) : 0);\n+\top->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);\n+\top->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);\n+\n+\tif (desc->req.last_desc_in_batch) {\n+\t\t(*aq_dequeued)++;\n+\t\tdesc->req.last_desc_in_batch = 0;\n+\t}\n+\tdesc->rsp.val = ACC_DMA_DESC_TYPE;\n+\tdesc->rsp.add_info_0 = 0; /* Reserved bits. */\n+\tdesc->rsp.add_info_1 = 0; /* Reserved bits. */\n+\n+\tref_op[0] = op;\n+\tcontext_ptrs = q->companion_ring_addr + desc_idx;\n+\tfor (i = 1 ; i < desc->req.numCBs; i++)\n+\t\tref_op[i] = context_ptrs->ptr[i].op_addr;\n+\n+\t/* One op was successfully dequeued. */\n+\t(*dequeued_descs)++;\n+\t*dequeued_ops += desc->req.numCBs;\n+\treturn desc->req.numCBs;\n+}\n+\n+/* Dequeue one LDPC encode operations from ACC200 device in TB mode.\n+ * That operation may cover multiple descriptors.\n+ */\n+static inline int\n+dequeue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op,\n+\t\tuint16_t *dequeued_ops, uint32_t *aq_dequeued,\n+\t\tuint16_t *dequeued_descs)\n+{\n+\tunion acc_dma_desc *desc, *last_desc, atom_desc;\n+\tunion acc_dma_rsp_desc rsp;\n+\tstruct rte_bbdev_enc_op *op;\n+\tuint8_t i = 0;\n+\tuint16_t current_dequeued_descs = 0, descs_in_tb;\n+\n+\tdesc = q->ring_addr + ((q->sw_ring_tail + *dequeued_descs) & q->sw_ring_wrap_mask);\n+\tatom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);\n+\n+\t/* Check fdone bit. */\n+\tif (!(atom_desc.rsp.val & ACC_FDONE))\n+\t\treturn -1;\n+\n+\t/* Get number of CBs in dequeued TB. */\n+\tdescs_in_tb = desc->req.cbs_in_tb;\n+\t/* Get last CB */\n+\tlast_desc = q->ring_addr + ((q->sw_ring_tail + *dequeued_descs + descs_in_tb - 1)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\t/* Check if last CB in TB is ready to dequeue (and thus\n+\t * the whole TB) - checking sdone bit. If not return.\n+\t */\n+\tatom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, __ATOMIC_RELAXED);\n+\tif (!(atom_desc.rsp.val & ACC_SDONE))\n+\t\treturn -1;\n+\n+\t/* Dequeue. */\n+\top = desc->req.op_addr;\n+\n+\t/* Clearing status, it will be set based on response. */\n+\top->status = 0;\n+\n+\twhile (i < descs_in_tb) {\n+\t\tdesc = q->ring_addr + ((q->sw_ring_tail + *dequeued_descs) & q->sw_ring_wrap_mask);\n+\t\tatom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);\n+\t\trsp.val = atom_desc.rsp.val;\n+\t\trte_bbdev_log_debug(\"Resp. desc %p: %x\", desc, rsp.val);\n+\n+\t\top->status |= ((rsp.input_err) ? (1 << RTE_BBDEV_DATA_ERROR) : 0);\n+\t\top->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);\n+\t\top->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);\n+\n+\t\tif (desc->req.last_desc_in_batch) {\n+\t\t\t(*aq_dequeued)++;\n+\t\t\tdesc->req.last_desc_in_batch = 0;\n+\t\t}\n+\t\tdesc->rsp.val = ACC_DMA_DESC_TYPE;\n+\t\tdesc->rsp.add_info_0 = 0;\n+\t\tdesc->rsp.add_info_1 = 0;\n+\t\t(*dequeued_descs)++;\n+\t\tcurrent_dequeued_descs++;\n+\t\ti++;\n+\t}\n+\n+\t*ref_op = op;\n+\t(*dequeued_ops)++;\n+\treturn current_dequeued_descs;\n+}\n+\n+/* Dequeue one decode operation from ACC200 device in CB mode. */\n+static inline int\n+dequeue_dec_one_op_cb(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct acc_queue *q, struct rte_bbdev_dec_op **ref_op,\n+\t\tuint16_t dequeued_cbs, uint32_t *aq_dequeued)\n+{\n+\tunion acc_dma_desc *desc, atom_desc;\n+\tunion acc_dma_rsp_desc rsp;\n+\tstruct rte_bbdev_dec_op *op;\n+\n+\tdesc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs) & q->sw_ring_wrap_mask);\n+\tatom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);\n+\n+\t/* Check fdone bit. */\n+\tif (!(atom_desc.rsp.val & ACC_FDONE))\n+\t\treturn -1;\n+\n+\trsp.val = atom_desc.rsp.val;\n+\trte_bbdev_log_debug(\"Resp. desc %p: %x\\n\", desc, rsp.val);\n+\n+\t/* Dequeue. */\n+\top = desc->req.op_addr;\n+\n+\t/* Clearing status, it will be set based on response. */\n+\top->status = 0;\n+\top->status |= ((rsp.input_err) ? (1 << RTE_BBDEV_DATA_ERROR) : 0);\n+\top->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);\n+\top->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);\n+\tif (op->status != 0) {\n+\t\t/* These errors are not expected. */\n+\t\tq_data->queue_stats.dequeue_err_count++;\n+\t}\n+\n+\t/* CRC invalid if error exists. */\n+\tif (!op->status)\n+\t\top->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR;\n+\top->turbo_dec.iter_count = (uint8_t) rsp.iter_cnt;\n+\t/* Check if this is the last desc in batch (Atomic Queue). */\n+\tif (desc->req.last_desc_in_batch) {\n+\t\t(*aq_dequeued)++;\n+\t\tdesc->req.last_desc_in_batch = 0;\n+\t}\n+\tdesc->rsp.val = ACC_DMA_DESC_TYPE;\n+\tdesc->rsp.add_info_0 = 0;\n+\tdesc->rsp.add_info_1 = 0;\n+\t*ref_op = op;\n+\n+\t/* One CB (op) was successfully dequeued. */\n+\treturn 1;\n+}\n+\n+/* Dequeue one decode operations from ACC200 device in CB mode. */\n+static inline int\n+dequeue_ldpc_dec_one_op_cb(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct acc_queue *q, struct rte_bbdev_dec_op **ref_op,\n+\t\tuint16_t dequeued_cbs, uint32_t *aq_dequeued)\n+{\n+\tunion acc_dma_desc *desc, atom_desc;\n+\tunion acc_dma_rsp_desc rsp;\n+\tstruct rte_bbdev_dec_op *op;\n+\n+\tdesc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs) & q->sw_ring_wrap_mask);\n+\tatom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);\n+\n+\t/* Check fdone bit. */\n+\tif (!(atom_desc.rsp.val & ACC_FDONE))\n+\t\treturn -1;\n+\n+\trsp.val = atom_desc.rsp.val;\n+\trte_bbdev_log_debug(\"Resp. desc %p: %x %x %x\\n\", desc, rsp.val, desc->rsp.add_info_0,\n+\t\t\tdesc->rsp.add_info_1);\n+\n+\t/* Dequeue. */\n+\top = desc->req.op_addr;\n+\n+\t/* Clearing status, it will be set based on response. */\n+\top->status = 0;\n+\top->status |= rsp.input_err << RTE_BBDEV_DATA_ERROR;\n+\top->status |= rsp.dma_err << RTE_BBDEV_DRV_ERROR;\n+\top->status |= rsp.fcw_err << RTE_BBDEV_DRV_ERROR;\n+\tif (op->status != 0)\n+\t\tq_data->queue_stats.dequeue_err_count++;\n+\n+\top->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR;\n+\tif (op->ldpc_dec.hard_output.length > 0 && !rsp.synd_ok)\n+\t\top->status |= 1 << RTE_BBDEV_SYNDROME_ERROR;\n+\n+\tif (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_CRC_TYPE_24A_CHECK)  ||\n+\t\t\tcheck_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_CRC_TYPE_16_CHECK)) {\n+\t\tif (desc->rsp.add_info_1 != 0)\n+\t\t\top->status |= 1 << RTE_BBDEV_CRC_ERROR;\n+\t}\n+\n+\top->ldpc_dec.iter_count = (uint8_t) rsp.iter_cnt;\n+\n+\t/* Check if this is the last desc in batch (Atomic Queue). */\n+\tif (desc->req.last_desc_in_batch) {\n+\t\t(*aq_dequeued)++;\n+\t\tdesc->req.last_desc_in_batch = 0;\n+\t}\n+\n+\tdesc->rsp.val = ACC_DMA_DESC_TYPE;\n+\tdesc->rsp.add_info_0 = 0;\n+\tdesc->rsp.add_info_1 = 0;\n+\n+\t*ref_op = op;\n+\n+\t/* One CB (op) was successfully dequeued. */\n+\treturn 1;\n+}\n+\n+/* Dequeue one decode operations from ACC200 device in TB mode. */\n+static inline int\n+dequeue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op **ref_op,\n+\t\tuint16_t dequeued_cbs, uint32_t *aq_dequeued)\n+{\n+\tunion acc_dma_desc *desc, *last_desc, atom_desc;\n+\tunion acc_dma_rsp_desc rsp;\n+\tstruct rte_bbdev_dec_op *op;\n+\tuint8_t cbs_in_tb = 1, cb_idx = 0;\n+\tuint32_t tb_crc_check = 0;\n+\n+\tdesc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs) & q->sw_ring_wrap_mask);\n+\tatom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);\n+\n+\t/* Check fdone bit. */\n+\tif (!(atom_desc.rsp.val & ACC_FDONE))\n+\t\treturn -1;\n+\n+\t/* Dequeue. */\n+\top = desc->req.op_addr;\n+\n+\t/* Get number of CBs in dequeued TB. */\n+\tcbs_in_tb = desc->req.cbs_in_tb;\n+\t/* Get last CB. */\n+\tlast_desc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs + cbs_in_tb - 1)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\t/* Check if last CB in TB is ready to dequeue (and thus the whole TB) - checking sdone bit.\n+\t * If not return.\n+\t */\n+\tatom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,\n+\t\t\t__ATOMIC_RELAXED);\n+\tif (!(atom_desc.rsp.val & ACC_SDONE))\n+\t\treturn -1;\n+\n+\t/* Clearing status, it will be set based on response. */\n+\top->status = 0;\n+\n+\t/* Read remaining CBs if exists. */\n+\twhile (cb_idx < cbs_in_tb) {\n+\t\tdesc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs) & q->sw_ring_wrap_mask);\n+\t\tatom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);\n+\t\trsp.val = atom_desc.rsp.val;\n+\t\trte_bbdev_log_debug(\"Resp. desc %p: %x %x %x\", desc,\n+\t\t\t\trsp.val, desc->rsp.add_info_0,\n+\t\t\t\tdesc->rsp.add_info_1);\n+\n+\t\top->status |= ((rsp.input_err) ? (1 << RTE_BBDEV_DATA_ERROR) : 0);\n+\t\top->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);\n+\t\top->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);\n+\n+\t\tif (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_CRC_TYPE_24A_CHECK))\n+\t\t\ttb_crc_check ^= desc->rsp.add_info_1;\n+\n+\t\t/* CRC invalid if error exists. */\n+\t\tif (!op->status)\n+\t\t\top->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR;\n+\t\top->turbo_dec.iter_count = RTE_MAX((uint8_t) rsp.iter_cnt,\n+\t\t\t\top->turbo_dec.iter_count);\n+\n+\t\t/* Check if this is the last desc in batch (Atomic Queue). */\n+\t\tif (desc->req.last_desc_in_batch) {\n+\t\t\t(*aq_dequeued)++;\n+\t\t\tdesc->req.last_desc_in_batch = 0;\n+\t\t}\n+\t\tdesc->rsp.val = ACC_DMA_DESC_TYPE;\n+\t\tdesc->rsp.add_info_0 = 0;\n+\t\tdesc->rsp.add_info_1 = 0;\n+\t\tdequeued_cbs++;\n+\t\tcb_idx++;\n+\t}\n+\n+\tif (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_CRC_TYPE_24A_CHECK)) {\n+\t\trte_bbdev_log_debug(\"TB-CRC Check %x\\n\", tb_crc_check);\n+\t\tif (tb_crc_check > 0)\n+\t\t\top->status |= 1 << RTE_BBDEV_CRC_ERROR;\n+\t}\n+\n+\t*ref_op = op;\n+\n+\treturn cb_idx;\n+}\n+\n+/* Dequeue LDPC encode operations from ACC200 device. */\n+static uint16_t\n+acc200_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_enc_op **ops, uint16_t num)\n+{\n+\tstruct acc_queue *q = q_data->queue_private;\n+\tuint32_t avail = acc_ring_avail_deq(q);\n+\tuint32_t aq_dequeued = 0;\n+\tuint16_t i, dequeued_ops = 0, dequeued_descs = 0;\n+\tint ret, cbm;\n+\tstruct rte_bbdev_enc_op *op;\n+\tif (avail == 0)\n+\t\treturn 0;\n+\top = (q->ring_addr + (q->sw_ring_tail & q->sw_ring_wrap_mask))->req.op_addr;\n+\tcbm = op->ldpc_enc.code_block_mode;\n+\n+\tfor (i = 0; i < avail; i++) {\n+\t\tif (cbm == RTE_BBDEV_TRANSPORT_BLOCK)\n+\t\t\tret = dequeue_enc_one_op_tb(q, &ops[dequeued_ops],\n+\t\t\t\t\t&dequeued_ops, &aq_dequeued,\n+\t\t\t\t\t&dequeued_descs);\n+\t\telse\n+\t\t\tret = dequeue_enc_one_op_cb(q, &ops[dequeued_ops],\n+\t\t\t\t\t&dequeued_ops, &aq_dequeued,\n+\t\t\t\t\t&dequeued_descs);\n+\t\tif (ret < 0)\n+\t\t\tbreak;\n+\t\tif (dequeued_ops >= num)\n+\t\t\tbreak;\n+\t}\n+\n+\tq->aq_dequeued += aq_dequeued;\n+\tq->sw_ring_tail += dequeued_descs;\n+\n+\t/* Update enqueue stats. */\n+\tq_data->queue_stats.dequeued_count += dequeued_ops;\n+\n+\treturn dequeued_ops;\n+}\n+\n+/* Dequeue decode operations from ACC200 device. */\n+static uint16_t\n+acc200_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_dec_op **ops, uint16_t num)\n+{\n+\tstruct acc_queue *q = q_data->queue_private;\n+\tuint16_t dequeue_num;\n+\tuint32_t avail = acc_ring_avail_deq(q);\n+\tuint32_t aq_dequeued = 0;\n+\tuint16_t i;\n+\tuint16_t dequeued_cbs = 0;\n+\tstruct rte_bbdev_dec_op *op;\n+\tint ret;\n+\n+\tdequeue_num = RTE_MIN(avail, num);\n+\n+\tfor (i = 0; i < dequeue_num; ++i) {\n+\t\top = (q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)\n+\t\t\t& q->sw_ring_wrap_mask))->req.op_addr;\n+\t\tif (op->ldpc_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)\n+\t\t\tret = dequeue_dec_one_op_tb(q, &ops[i], dequeued_cbs,\n+\t\t\t\t\t&aq_dequeued);\n+\t\telse\n+\t\t\tret = dequeue_ldpc_dec_one_op_cb(\n+\t\t\t\t\tq_data, q, &ops[i], dequeued_cbs,\n+\t\t\t\t\t&aq_dequeued);\n+\n+\t\tif (ret <= 0)\n+\t\t\tbreak;\n+\t\tdequeued_cbs += ret;\n+\t}\n+\n+\tq->aq_dequeued += aq_dequeued;\n+\tq->sw_ring_tail += dequeued_cbs;\n+\n+\t/* Update enqueue stats. */\n+\tq_data->queue_stats.dequeued_count += i;\n+\n+\treturn i;\n+}\n+\n+/* Initialization Function */\n static void\n acc200_bbdev_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)\n {\n \tstruct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);\n \n \tdev->dev_ops = &acc200_bbdev_ops;\n+\tdev->enqueue_ldpc_enc_ops = acc200_enqueue_ldpc_enc;\n+\tdev->enqueue_ldpc_dec_ops = acc200_enqueue_ldpc_dec;\n+\tdev->dequeue_ldpc_enc_ops = acc200_dequeue_ldpc_enc;\n+\tdev->dequeue_ldpc_dec_ops = acc200_dequeue_ldpc_dec;\n \n \t((struct acc_device *) dev->data->dev_private)->pf_device =\n \t\t\t!strcmp(drv->driver.name,\n",
    "prefixes": [
        "v9",
        "08/14"
    ]
}