get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/117655/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 117655,
    "url": "https://patches.dpdk.org/api/patches/117655/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20221007213851.31524-10-nicolas.chautru@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221007213851.31524-10-nicolas.chautru@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221007213851.31524-10-nicolas.chautru@intel.com",
    "date": "2022-10-07T21:38:46",
    "name": "[v9,09/14] baseband/acc: add LTE processing functions",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "a057fc7145ba1d8f1bf1cfc28626bada68d48e6a",
    "submitter": {
        "id": 1314,
        "url": "https://patches.dpdk.org/api/people/1314/?format=api",
        "name": "Chautru, Nicolas",
        "email": "nicolas.chautru@intel.com"
    },
    "delegate": {
        "id": 6690,
        "url": "https://patches.dpdk.org/api/users/6690/?format=api",
        "username": "akhil",
        "first_name": "akhil",
        "last_name": "goyal",
        "email": "gakhil@marvell.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20221007213851.31524-10-nicolas.chautru@intel.com/mbox/",
    "series": [
        {
            "id": 25041,
            "url": "https://patches.dpdk.org/api/series/25041/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=25041",
            "date": "2022-10-07T21:38:37",
            "name": "bbdev ACC200 PMD",
            "version": 9,
            "mbox": "https://patches.dpdk.org/series/25041/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/117655/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/117655/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 22BE2A0543;\n\tFri,  7 Oct 2022 23:40:17 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id A388642B9D;\n\tFri,  7 Oct 2022 23:39:19 +0200 (CEST)",
            "from mga02.intel.com (mga02.intel.com [134.134.136.20])\n by mails.dpdk.org (Postfix) with ESMTP id 28EA442B6C\n for <dev@dpdk.org>; Fri,  7 Oct 2022 23:39:13 +0200 (CEST)",
            "from orsmga007.jf.intel.com ([10.7.209.58])\n by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 07 Oct 2022 14:39:12 -0700",
            "from unknown (HELO icx-npg-scs1-cp1.localdomain) ([10.233.180.245])\n by orsmga007.jf.intel.com with ESMTP; 07 Oct 2022 14:39:11 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1665178753; x=1696714753;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=yZcGItPefw7Y60mEUNhxe1rsCEVxWmYHAvnN6rjITkA=;\n b=g+tS0wxUaiZL9IOCW8ed6qO3ocepy3HmjHE+4eHUAth16s29Sk9k6Kq1\n hpsjYcIfMsBPZy7Wfwn1r3iY0VklWaBXbE23A12ldAk9h2tyxr9KjgHR1\n q066O6XACE8AT6x3IB1FXnlqxGdwn92QwC3GVKTOZK03BYn9K2WZVIUSF\n dX9wuJMG5SGV5xFlw4cRyC53T5W0dSsQd4IyvIp/3/iQkWeYWZI9S4R1z\n c2o97YNbCYzTs6D9RmAytOmONy+HirzBDav7czBhqKKBn2951GiG/urvJ\n zuQOrPKj7e7esY87//rNO6dzUKYfZsGAe+Sg+ARSmajjp/ki3KGSl6+pp A==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10493\"; a=\"291118509\"",
            "E=Sophos;i=\"5.95,167,1661842800\"; d=\"scan'208\";a=\"291118509\"",
            "E=McAfee;i=\"6500,9779,10493\"; a=\"620388468\"",
            "E=Sophos;i=\"5.95,167,1661842800\"; d=\"scan'208\";a=\"620388468\""
        ],
        "X-ExtLoop1": "1",
        "From": "Nicolas Chautru <nicolas.chautru@intel.com>",
        "To": "dev@dpdk.org,\n\tgakhil@marvell.com,\n\tmaxime.coquelin@redhat.com",
        "Cc": "trix@redhat.com, mdr@ashroe.eu, bruce.richardson@intel.com,\n hemant.agrawal@nxp.com, david.marchand@redhat.com,\n stephen@networkplumber.org, hernan.vargas@intel.com,\n Nic Chautru <nicolas.chautru@intel.com>",
        "Subject": "[PATCH v9 09/14] baseband/acc: add LTE processing functions",
        "Date": "Fri,  7 Oct 2022 14:38:46 -0700",
        "Message-Id": "<20221007213851.31524-10-nicolas.chautru@intel.com>",
        "X-Mailer": "git-send-email 2.37.1",
        "In-Reply-To": "<20221007213851.31524-1-nicolas.chautru@intel.com>",
        "References": "<20221007213851.31524-1-nicolas.chautru@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Nic Chautru <nicolas.chautru@intel.com>\n\nAdded functions and capability for 4G FEC\n\nSigned-off-by: Nic Chautru <nicolas.chautru@intel.com>\n---\n drivers/baseband/acc/rte_acc200_pmd.c | 851 +++++++++++++++++++++++++-\n 1 file changed, 846 insertions(+), 5 deletions(-)",
    "diff": "diff --git a/drivers/baseband/acc/rte_acc200_pmd.c b/drivers/baseband/acc/rte_acc200_pmd.c\nindex 0f018b19ac..058e38c3ec 100644\n--- a/drivers/baseband/acc/rte_acc200_pmd.c\n+++ b/drivers/baseband/acc/rte_acc200_pmd.c\n@@ -649,6 +649,46 @@ acc200_dev_info_get(struct rte_bbdev *dev,\n \tstruct acc_device *d = dev->data->dev_private;\n \tint i;\n \tstatic const struct rte_bbdev_op_cap bbdev_capabilities[] = {\n+\t\t{\n+\t\t\t.type = RTE_BBDEV_OP_TURBO_DEC,\n+\t\t\t.cap.turbo_dec = {\n+\t\t\t\t.capability_flags =\n+\t\t\t\t\tRTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE |\n+\t\t\t\t\tRTE_BBDEV_TURBO_CRC_TYPE_24B |\n+\t\t\t\t\tRTE_BBDEV_TURBO_EQUALIZER |\n+\t\t\t\t\tRTE_BBDEV_TURBO_SOFT_OUT_SATURATE |\n+\t\t\t\t\tRTE_BBDEV_TURBO_HALF_ITERATION_EVEN |\n+\t\t\t\t\tRTE_BBDEV_TURBO_CONTINUE_CRC_MATCH |\n+\t\t\t\t\tRTE_BBDEV_TURBO_SOFT_OUTPUT |\n+\t\t\t\t\tRTE_BBDEV_TURBO_EARLY_TERMINATION |\n+\t\t\t\t\tRTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN |\n+\t\t\t\t\tRTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT |\n+\t\t\t\t\tRTE_BBDEV_TURBO_MAP_DEC |\n+\t\t\t\t\tRTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP |\n+\t\t\t\t\tRTE_BBDEV_TURBO_DEC_SCATTER_GATHER,\n+\t\t\t\t.max_llr_modulus = INT8_MAX,\n+\t\t\t\t.num_buffers_src =\n+\t\t\t\t\t\tRTE_BBDEV_TURBO_MAX_CODE_BLOCKS,\n+\t\t\t\t.num_buffers_hard_out =\n+\t\t\t\t\t\tRTE_BBDEV_TURBO_MAX_CODE_BLOCKS,\n+\t\t\t\t.num_buffers_soft_out =\n+\t\t\t\t\t\tRTE_BBDEV_TURBO_MAX_CODE_BLOCKS,\n+\t\t\t}\n+\t\t},\n+\t\t{\n+\t\t\t.type = RTE_BBDEV_OP_TURBO_ENC,\n+\t\t\t.cap.turbo_enc = {\n+\t\t\t\t.capability_flags =\n+\t\t\t\t\tRTE_BBDEV_TURBO_CRC_24B_ATTACH |\n+\t\t\t\t\tRTE_BBDEV_TURBO_RV_INDEX_BYPASS |\n+\t\t\t\t\tRTE_BBDEV_TURBO_RATE_MATCH |\n+\t\t\t\t\tRTE_BBDEV_TURBO_ENC_SCATTER_GATHER,\n+\t\t\t\t.num_buffers_src =\n+\t\t\t\t\t\tRTE_BBDEV_TURBO_MAX_CODE_BLOCKS,\n+\t\t\t\t.num_buffers_dst =\n+\t\t\t\t\t\tRTE_BBDEV_TURBO_MAX_CODE_BLOCKS,\n+\t\t\t}\n+\t\t},\n \t\t{\n \t\t\t.type   = RTE_BBDEV_OP_LDPC_ENC,\n \t\t\t.cap.ldpc_enc = {\n@@ -700,15 +740,17 @@ acc200_dev_info_get(struct rte_bbdev *dev,\n \n \t/* Exposed number of queues. */\n \tdev_info->num_queues[RTE_BBDEV_OP_NONE] = 0;\n-\tdev_info->num_queues[RTE_BBDEV_OP_TURBO_DEC] = 0;\n-\tdev_info->num_queues[RTE_BBDEV_OP_TURBO_ENC] = 0;\n+\tdev_info->num_queues[RTE_BBDEV_OP_TURBO_DEC] = d->acc_conf.q_ul_4g.num_aqs_per_groups *\n+\t\t\td->acc_conf.q_ul_4g.num_qgroups;\n+\tdev_info->num_queues[RTE_BBDEV_OP_TURBO_ENC] = d->acc_conf.q_dl_4g.num_aqs_per_groups *\n+\t\t\td->acc_conf.q_dl_4g.num_qgroups;\n \tdev_info->num_queues[RTE_BBDEV_OP_LDPC_DEC] = d->acc_conf.q_ul_5g.num_aqs_per_groups *\n \t\t\td->acc_conf.q_ul_5g.num_qgroups;\n \tdev_info->num_queues[RTE_BBDEV_OP_LDPC_ENC] = d->acc_conf.q_dl_5g.num_aqs_per_groups *\n \t\t\td->acc_conf.q_dl_5g.num_qgroups;\n \tdev_info->num_queues[RTE_BBDEV_OP_FFT] = 0;\n-\tdev_info->queue_priority[RTE_BBDEV_OP_TURBO_DEC] = 0;\n-\tdev_info->queue_priority[RTE_BBDEV_OP_TURBO_ENC] = 0;\n+\tdev_info->queue_priority[RTE_BBDEV_OP_TURBO_DEC] = d->acc_conf.q_ul_4g.num_qgroups;\n+\tdev_info->queue_priority[RTE_BBDEV_OP_TURBO_ENC] = d->acc_conf.q_dl_4g.num_qgroups;\n \tdev_info->queue_priority[RTE_BBDEV_OP_LDPC_DEC] = d->acc_conf.q_ul_5g.num_qgroups;\n \tdev_info->queue_priority[RTE_BBDEV_OP_LDPC_ENC] = d->acc_conf.q_dl_5g.num_qgroups;\n \tdev_info->queue_priority[RTE_BBDEV_OP_FFT] = 0;\n@@ -753,6 +795,70 @@ static struct rte_pci_id pci_id_acc200_vf_map[] = {\n \t{.device_id = 0},\n };\n \n+/* Fill in a frame control word for turbo decoding. */\n+static inline void\n+acc200_fcw_td_fill(const struct rte_bbdev_dec_op *op, struct acc_fcw_td *fcw)\n+{\n+\tfcw->fcw_ver = 1;\n+\tfcw->num_maps = ACC_FCW_TD_AUTOMAP;\n+\tfcw->bypass_sb_deint = !check_bit(op->turbo_dec.op_flags,\n+\t\t\tRTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE);\n+\tif (op->turbo_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {\n+\t\t/* FIXME for TB block */\n+\t\tfcw->k_pos = op->turbo_dec.tb_params.k_pos;\n+\t\tfcw->k_neg = op->turbo_dec.tb_params.k_neg;\n+\t} else {\n+\t\tfcw->k_pos = op->turbo_dec.cb_params.k;\n+\t\tfcw->k_neg = op->turbo_dec.cb_params.k;\n+\t}\n+\tfcw->c = 1;\n+\tfcw->c_neg = 1;\n+\tif (check_bit(op->turbo_dec.op_flags, RTE_BBDEV_TURBO_SOFT_OUTPUT)) {\n+\t\tfcw->soft_output_en = 1;\n+\t\tfcw->sw_soft_out_dis = 0;\n+\t\tfcw->sw_et_cont = check_bit(op->turbo_dec.op_flags,\n+\t\t\t\tRTE_BBDEV_TURBO_CONTINUE_CRC_MATCH);\n+\t\tfcw->sw_soft_out_saturation = check_bit(op->turbo_dec.op_flags,\n+\t\t\t\tRTE_BBDEV_TURBO_SOFT_OUT_SATURATE);\n+\t\tif (check_bit(op->turbo_dec.op_flags,\n+\t\t\t\tRTE_BBDEV_TURBO_EQUALIZER)) {\n+\t\t\tfcw->bypass_teq = 0;\n+\t\t\tfcw->ea = op->turbo_dec.cb_params.e;\n+\t\t\tfcw->eb = op->turbo_dec.cb_params.e;\n+\t\t\tif (op->turbo_dec.rv_index == 0)\n+\t\t\t\tfcw->k0_start_col = ACC_FCW_TD_RVIDX_0;\n+\t\t\telse if (op->turbo_dec.rv_index == 1)\n+\t\t\t\tfcw->k0_start_col = ACC_FCW_TD_RVIDX_1;\n+\t\t\telse if (op->turbo_dec.rv_index == 2)\n+\t\t\t\tfcw->k0_start_col = ACC_FCW_TD_RVIDX_2;\n+\t\t\telse\n+\t\t\t\tfcw->k0_start_col = ACC_FCW_TD_RVIDX_3;\n+\t\t} else {\n+\t\t\tfcw->bypass_teq = 1;\n+\t\t\tfcw->eb = 64; /* avoid undefined value */\n+\t\t}\n+\t} else {\n+\t\tfcw->soft_output_en = 0;\n+\t\tfcw->sw_soft_out_dis = 1;\n+\t\tfcw->bypass_teq = 0;\n+\t}\n+\n+\tfcw->code_block_mode = 1; /* FIXME */\n+\tfcw->turbo_crc_type = check_bit(op->turbo_dec.op_flags,\n+\t\t\tRTE_BBDEV_TURBO_CRC_TYPE_24B);\n+\n+\tfcw->ext_td_cold_reg_en = 1;\n+\tfcw->raw_decoder_input_on = 0;\n+\tfcw->max_iter = RTE_MAX((uint8_t) op->turbo_dec.iter_max, 2);\n+\tfcw->min_iter = 2;\n+\tfcw->half_iter_on = !check_bit(op->turbo_dec.op_flags,\n+\t\t\tRTE_BBDEV_TURBO_HALF_ITERATION_EVEN);\n+\n+\tfcw->early_stop_en = check_bit(op->turbo_dec.op_flags,\n+\t\t\tRTE_BBDEV_TURBO_EARLY_TERMINATION) & !fcw->soft_output_en;\n+\tfcw->ext_scale = 0xF;\n+}\n+\n /* Fill in a frame control word for LDPC decoding. */\n static inline void\n acc200_fcw_ld_fill(struct rte_bbdev_dec_op *op, struct acc_fcw_ld *fcw,\n@@ -876,7 +982,206 @@ acc200_fcw_ld_fill(struct rte_bbdev_dec_op *op, struct acc_fcw_ld *fcw,\n }\n \n static inline int\n-acc200_dma_desc_ld_fill(struct rte_bbdev_dec_op *op, struct acc_dma_req_desc *desc,\n+acc200_dma_desc_te_fill(struct rte_bbdev_enc_op *op,\n+\t\tstruct acc_dma_req_desc *desc, struct rte_mbuf **input,\n+\t\tstruct rte_mbuf *output, uint32_t *in_offset,\n+\t\tuint32_t *out_offset, uint32_t *out_length,\n+\t\tuint32_t *mbuf_total_left, uint32_t *seg_total_left, uint8_t r)\n+{\n+\tint next_triplet = 1; /* FCW already done. */\n+\tuint32_t e, ea, eb, length;\n+\tuint16_t k, k_neg, k_pos;\n+\tuint8_t cab, c_neg;\n+\n+\tdesc->word0 = ACC_DMA_DESC_TYPE;\n+\tdesc->word1 = 0; /**< Timestamp could be disabled. */\n+\tdesc->word2 = 0;\n+\tdesc->word3 = 0;\n+\tdesc->numCBs = 1;\n+\n+\tif (op->turbo_enc.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {\n+\t\tea = op->turbo_enc.tb_params.ea;\n+\t\teb = op->turbo_enc.tb_params.eb;\n+\t\tcab = op->turbo_enc.tb_params.cab;\n+\t\tk_neg = op->turbo_enc.tb_params.k_neg;\n+\t\tk_pos = op->turbo_enc.tb_params.k_pos;\n+\t\tc_neg = op->turbo_enc.tb_params.c_neg;\n+\t\te = (r < cab) ? ea : eb;\n+\t\tk = (r < c_neg) ? k_neg : k_pos;\n+\t} else {\n+\t\te = op->turbo_enc.cb_params.e;\n+\t\tk = op->turbo_enc.cb_params.k;\n+\t}\n+\n+\tif (check_bit(op->turbo_enc.op_flags, RTE_BBDEV_TURBO_CRC_24B_ATTACH))\n+\t\tlength = (k - 24) >> 3;\n+\telse\n+\t\tlength = k >> 3;\n+\n+\tif (unlikely((*mbuf_total_left == 0) || (*mbuf_total_left < length))) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u\",\n+\t\t\t\t*mbuf_total_left, length);\n+\t\treturn -1;\n+\t}\n+\n+\tnext_triplet = acc_dma_fill_blk_type_in(desc, input, in_offset,\n+\t\t\tlength, seg_total_left, next_triplet,\n+\t\t\tcheck_bit(op->turbo_enc.op_flags, RTE_BBDEV_TURBO_ENC_SCATTER_GATHER));\n+\tif (unlikely(next_triplet < 0)) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Mismatch between data to process and mbuf data length in bbdev_op: %p\",\n+\t\t\t\top);\n+\t\treturn -1;\n+\t}\n+\tdesc->data_ptrs[next_triplet - 1].last = 1;\n+\tdesc->m2dlen = next_triplet;\n+\t*mbuf_total_left -= length;\n+\n+\t/* Set output length. */\n+\tif (check_bit(op->turbo_enc.op_flags, RTE_BBDEV_TURBO_RATE_MATCH))\n+\t\t/* Integer round up division by 8. */\n+\t\t*out_length = (e + 7) >> 3;\n+\telse\n+\t\t*out_length = (k >> 3) * 3 + 2;\n+\n+\tnext_triplet = acc_dma_fill_blk_type(desc, output, *out_offset,\n+\t\t\t*out_length, next_triplet, ACC_DMA_BLKID_OUT_ENC);\n+\tif (unlikely(next_triplet < 0)) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Mismatch between data to process and mbuf data length in bbdev_op: %p\",\n+\t\t\t\top);\n+\t\treturn -1;\n+\t}\n+\top->turbo_enc.output.length += *out_length;\n+\t*out_offset += *out_length;\n+\tdesc->data_ptrs[next_triplet - 1].last = 1;\n+\tdesc->d2mlen = next_triplet - desc->m2dlen;\n+\n+\tdesc->op_addr = op;\n+\n+\treturn 0;\n+}\n+\n+static inline int\n+acc200_dma_desc_td_fill(struct rte_bbdev_dec_op *op,\n+\t\tstruct acc_dma_req_desc *desc, struct rte_mbuf **input,\n+\t\tstruct rte_mbuf *h_output, struct rte_mbuf *s_output,\n+\t\tuint32_t *in_offset, uint32_t *h_out_offset,\n+\t\tuint32_t *s_out_offset, uint32_t *h_out_length,\n+\t\tuint32_t *s_out_length, uint32_t *mbuf_total_left,\n+\t\tuint32_t *seg_total_left, uint8_t r)\n+{\n+\tint next_triplet = 1; /* FCW already done. */\n+\tuint16_t k;\n+\tuint16_t crc24_overlap = 0;\n+\tuint32_t e, kw;\n+\n+\tdesc->word0 = ACC_DMA_DESC_TYPE;\n+\tdesc->word1 = 0; /**< Timestamp could be disabled. */\n+\tdesc->word2 = 0;\n+\tdesc->word3 = 0;\n+\tdesc->numCBs = 1;\n+\n+\tif (op->turbo_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {\n+\t\tk = (r < op->turbo_dec.tb_params.c_neg)\n+\t\t\t? op->turbo_dec.tb_params.k_neg\n+\t\t\t: op->turbo_dec.tb_params.k_pos;\n+\t\te = (r < op->turbo_dec.tb_params.cab)\n+\t\t\t? op->turbo_dec.tb_params.ea\n+\t\t\t: op->turbo_dec.tb_params.eb;\n+\t} else {\n+\t\tk = op->turbo_dec.cb_params.k;\n+\t\te = op->turbo_dec.cb_params.e;\n+\t}\n+\n+\tif ((op->turbo_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)\n+\t\t&& !check_bit(op->turbo_dec.op_flags,\n+\t\tRTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP))\n+\t\tcrc24_overlap = 24;\n+\n+\t/* Calculates circular buffer size.\n+\t * According to 3gpp 36.212 section 5.1.4.2\n+\t *   Kw = 3 * Kpi,\n+\t * where:\n+\t *   Kpi = nCol * nRow\n+\t * where nCol is 32 and nRow can be calculated from:\n+\t *   D =< nCol * nRow\n+\t * where D is the size of each output from turbo encoder block (k + 4).\n+\t */\n+\tkw = RTE_ALIGN_CEIL(k + 4, 32) * 3;\n+\n+\tif (unlikely((*mbuf_total_left == 0) || (*mbuf_total_left < kw))) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u\",\n+\t\t\t\t*mbuf_total_left, kw);\n+\t\treturn -1;\n+\t}\n+\n+\tnext_triplet = acc_dma_fill_blk_type_in(desc, input, in_offset, kw,\n+\t\t\tseg_total_left, next_triplet,\n+\t\t\tcheck_bit(op->turbo_dec.op_flags,\n+\t\t\tRTE_BBDEV_TURBO_DEC_SCATTER_GATHER));\n+\tif (unlikely(next_triplet < 0)) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Mismatch between data to process and mbuf data length in bbdev_op: %p\",\n+\t\t\t\top);\n+\t\treturn -1;\n+\t}\n+\tdesc->data_ptrs[next_triplet - 1].last = 1;\n+\tdesc->m2dlen = next_triplet;\n+\t*mbuf_total_left -= kw;\n+\t*h_out_length = ((k - crc24_overlap) >> 3);\n+\tnext_triplet = acc_dma_fill_blk_type(\n+\t\t\tdesc, h_output, *h_out_offset,\n+\t\t\t*h_out_length, next_triplet, ACC_DMA_BLKID_OUT_HARD);\n+\tif (unlikely(next_triplet < 0)) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Mismatch between data to process and mbuf data length in bbdev_op: %p\",\n+\t\t\t\top);\n+\t\treturn -1;\n+\t}\n+\n+\top->turbo_dec.hard_output.length += *h_out_length;\n+\t*h_out_offset += *h_out_length;\n+\n+\t/* Soft output. */\n+\tif (check_bit(op->turbo_dec.op_flags, RTE_BBDEV_TURBO_SOFT_OUTPUT)) {\n+\t\tif (op->turbo_dec.soft_output.data == 0) {\n+\t\t\trte_bbdev_log(ERR, \"Soft output is not defined\");\n+\t\t\treturn -1;\n+\t\t}\n+\t\tif (check_bit(op->turbo_dec.op_flags,\n+\t\t\t\tRTE_BBDEV_TURBO_EQUALIZER))\n+\t\t\t*s_out_length = e;\n+\t\telse\n+\t\t\t*s_out_length = (k * 3) + 12;\n+\n+\t\tnext_triplet = acc_dma_fill_blk_type(desc, s_output,\n+\t\t\t\t*s_out_offset, *s_out_length, next_triplet,\n+\t\t\t\tACC_DMA_BLKID_OUT_SOFT);\n+\t\tif (unlikely(next_triplet < 0)) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"Mismatch between data to process and mbuf data length in bbdev_op: %p\",\n+\t\t\t\t\top);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\top->turbo_dec.soft_output.length += *s_out_length;\n+\t\t*s_out_offset += *s_out_length;\n+\t}\n+\n+\tdesc->data_ptrs[next_triplet - 1].last = 1;\n+\tdesc->d2mlen = next_triplet - desc->m2dlen;\n+\n+\tdesc->op_addr = op;\n+\n+\treturn 0;\n+}\n+\n+static inline int\n+acc200_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,\n+\t\tstruct acc_dma_req_desc *desc,\n \t\tstruct rte_mbuf **input, struct rte_mbuf *h_output,\n \t\tuint32_t *in_offset, uint32_t *h_out_offset,\n \t\tuint32_t *h_out_length, uint32_t *mbuf_total_left,\n@@ -1034,6 +1339,49 @@ acc200_dma_desc_ld_update(struct rte_bbdev_dec_op *op,\n \tdesc->op_addr = op;\n }\n \n+/* Enqueue one encode operations for ACC200 device in CB mode */\n+static inline int\n+enqueue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op *op,\n+\t\tuint16_t total_enqueued_cbs)\n+{\n+\tunion acc_dma_desc *desc = NULL;\n+\tint ret;\n+\tuint32_t in_offset, out_offset, out_length, mbuf_total_left,\n+\t\tseg_total_left;\n+\tstruct rte_mbuf *input, *output_head, *output;\n+\n+\tuint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\tdesc = q->ring_addr + desc_idx;\n+\tacc_fcw_te_fill(op, &desc->req.fcw_te);\n+\n+\tinput = op->turbo_enc.input.data;\n+\toutput_head = output = op->turbo_enc.output.data;\n+\tin_offset = op->turbo_enc.input.offset;\n+\tout_offset = op->turbo_enc.output.offset;\n+\tout_length = 0;\n+\tmbuf_total_left = op->turbo_enc.input.length;\n+\tseg_total_left = rte_pktmbuf_data_len(op->turbo_enc.input.data)\n+\t\t\t- in_offset;\n+\n+\tret = acc200_dma_desc_te_fill(op, &desc->req, &input, output,\n+\t\t\t&in_offset, &out_offset, &out_length, &mbuf_total_left,\n+\t\t\t&seg_total_left, 0);\n+\n+\tif (unlikely(ret < 0))\n+\t\treturn ret;\n+\n+\tmbuf_append(output_head, output, out_length);\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\trte_memdump(stderr, \"FCW\", &desc->req.fcw_te,\n+\t\t\tsizeof(desc->req.fcw_te) - 8);\n+\trte_memdump(stderr, \"Req Desc.\", desc, sizeof(*desc));\n+#endif\n+\t/* One CB (one op) was successfully prepared to enqueue */\n+\treturn 1;\n+}\n+\n /* Enqueue one encode operations for ACC200 device in CB mode\n  * multiplexed on the same descriptor.\n  */\n@@ -1146,6 +1494,78 @@ enqueue_ldpc_enc_part_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op,\n \n }\n \n+/* Enqueue one encode operations for ACC200 device in TB mode. */\n+static inline int\n+enqueue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op,\n+\t\tuint16_t total_enqueued_cbs, uint8_t cbs_in_tb)\n+{\n+\tunion acc_dma_desc *desc = NULL;\n+\tint ret;\n+\tuint8_t r, c;\n+\tuint32_t in_offset, out_offset, out_length, mbuf_total_left,\n+\t\tseg_total_left;\n+\tstruct rte_mbuf *input, *output_head, *output;\n+\tuint16_t current_enqueued_cbs = 0;\n+\n+\tuint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\tdesc = q->ring_addr + desc_idx;\n+\tuint64_t fcw_offset = (desc_idx << 8) + ACC_DESC_FCW_OFFSET;\n+\tacc_fcw_te_fill(op, &desc->req.fcw_te);\n+\n+\tinput = op->turbo_enc.input.data;\n+\toutput_head = output = op->turbo_enc.output.data;\n+\tin_offset = op->turbo_enc.input.offset;\n+\tout_offset = op->turbo_enc.output.offset;\n+\tout_length = 0;\n+\tmbuf_total_left = op->turbo_enc.input.length;\n+\n+\tc = op->turbo_enc.tb_params.c;\n+\tr = op->turbo_enc.tb_params.r;\n+\n+\twhile (mbuf_total_left > 0 && r < c) {\n+\t\tseg_total_left = rte_pktmbuf_data_len(input) - in_offset;\n+\t\t/* Set up DMA descriptor */\n+\t\tdesc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs)\n+\t\t\t\t& q->sw_ring_wrap_mask);\n+\t\tdesc->req.data_ptrs[0].address = q->ring_addr_iova + fcw_offset;\n+\t\tdesc->req.data_ptrs[0].blen = ACC_FCW_TE_BLEN;\n+\n+\t\tret = acc200_dma_desc_te_fill(op, &desc->req, &input, output,\n+\t\t\t\t&in_offset, &out_offset, &out_length,\n+\t\t\t\t&mbuf_total_left, &seg_total_left, r);\n+\t\tif (unlikely(ret < 0))\n+\t\t\treturn ret;\n+\t\tmbuf_append(output_head, output, out_length);\n+\n+\t\t/* Set total number of CBs in TB */\n+\t\tdesc->req.cbs_in_tb = cbs_in_tb;\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\t\trte_memdump(stderr, \"FCW\", &desc->req.fcw_te,\n+\t\t\t\tsizeof(desc->req.fcw_te) - 8);\n+\t\trte_memdump(stderr, \"Req Desc.\", desc, sizeof(*desc));\n+#endif\n+\n+\t\tif (seg_total_left == 0) {\n+\t\t\t/* Go to the next mbuf */\n+\t\t\tinput = input->next;\n+\t\t\tin_offset = 0;\n+\t\t\toutput = output->next;\n+\t\t\tout_offset = 0;\n+\t\t}\n+\n+\t\ttotal_enqueued_cbs++;\n+\t\tcurrent_enqueued_cbs++;\n+\t\tr++;\n+\t}\n+\n+\t/* Set SDone on last CB descriptor for TB mode. */\n+\tdesc->req.sdone_enable = 1;\n+\tdesc->req.irq_enable = q->irq_enable;\n+\n+\treturn current_enqueued_cbs;\n+}\n+\n /* Enqueue one encode operations for ACC200 device in TB mode.\n  * returns the number of descs used.\n  */\n@@ -1213,6 +1633,62 @@ enqueue_ldpc_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op,\n \n /** Enqueue one decode operations for ACC200 device in CB mode. */\n static inline int\n+enqueue_dec_one_op_cb(struct acc_queue *q, struct rte_bbdev_dec_op *op,\n+\t\tuint16_t total_enqueued_cbs)\n+{\n+\tunion acc_dma_desc *desc = NULL;\n+\tint ret;\n+\tuint32_t in_offset, h_out_offset, s_out_offset, s_out_length,\n+\t\th_out_length, mbuf_total_left, seg_total_left;\n+\tstruct rte_mbuf *input, *h_output_head, *h_output,\n+\t\t*s_output_head, *s_output;\n+\n+\tuint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\tdesc = q->ring_addr + desc_idx;\n+\tacc200_fcw_td_fill(op, &desc->req.fcw_td);\n+\n+\tinput = op->turbo_dec.input.data;\n+\th_output_head = h_output = op->turbo_dec.hard_output.data;\n+\ts_output_head = s_output = op->turbo_dec.soft_output.data;\n+\tin_offset = op->turbo_dec.input.offset;\n+\th_out_offset = op->turbo_dec.hard_output.offset;\n+\ts_out_offset = op->turbo_dec.soft_output.offset;\n+\th_out_length = s_out_length = 0;\n+\tmbuf_total_left = op->turbo_dec.input.length;\n+\tseg_total_left = rte_pktmbuf_data_len(input) - in_offset;\n+\n+\t/* Set up DMA descriptor */\n+\tdesc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\n+\tret = acc200_dma_desc_td_fill(op, &desc->req, &input, h_output,\n+\t\t\ts_output, &in_offset, &h_out_offset, &s_out_offset,\n+\t\t\t&h_out_length, &s_out_length, &mbuf_total_left,\n+\t\t\t&seg_total_left, 0);\n+\n+\tif (unlikely(ret < 0))\n+\t\treturn ret;\n+\n+\t/* Hard output */\n+\tmbuf_append(h_output_head, h_output, h_out_length);\n+\n+\t/* Soft output */\n+\tif (check_bit(op->turbo_dec.op_flags, RTE_BBDEV_TURBO_SOFT_OUTPUT))\n+\t\tmbuf_append(s_output_head, s_output, s_out_length);\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\trte_memdump(stderr, \"FCW\", &desc->req.fcw_td,\n+\t\t\tsizeof(desc->req.fcw_td));\n+\trte_memdump(stderr, \"Req Desc.\", desc, sizeof(*desc));\n+#endif\n+\n+\t/* One CB (one op) was successfully prepared to enqueue */\n+\treturn 1;\n+}\n+\n+/** Enqueue one decode operations for ACC200 device in CB mode */\n+static inline int\n enqueue_ldpc_dec_one_op_cb(struct acc_queue *q, struct rte_bbdev_dec_op *op,\n \t\tuint16_t total_enqueued_cbs, bool same_op)\n {\n@@ -1395,6 +1871,139 @@ enqueue_ldpc_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op,\n \treturn current_enqueued_cbs;\n }\n \n+/* Enqueue one decode operations for ACC200 device in TB mode */\n+static inline int\n+enqueue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op,\n+\t\tuint16_t total_enqueued_cbs, uint8_t cbs_in_tb)\n+{\n+\tunion acc_dma_desc *desc = NULL;\n+\tint ret;\n+\tuint8_t r, c;\n+\tuint32_t in_offset, h_out_offset, s_out_offset, s_out_length,\n+\t\th_out_length, mbuf_total_left, seg_total_left;\n+\tstruct rte_mbuf *input, *h_output_head, *h_output,\n+\t\t*s_output_head, *s_output;\n+\tuint16_t current_enqueued_cbs = 0;\n+\n+\tuint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\tdesc = q->ring_addr + desc_idx;\n+\tuint64_t fcw_offset = (desc_idx << 8) + ACC_DESC_FCW_OFFSET;\n+\tacc200_fcw_td_fill(op, &desc->req.fcw_td);\n+\n+\tinput = op->turbo_dec.input.data;\n+\th_output_head = h_output = op->turbo_dec.hard_output.data;\n+\ts_output_head = s_output = op->turbo_dec.soft_output.data;\n+\tin_offset = op->turbo_dec.input.offset;\n+\th_out_offset = op->turbo_dec.hard_output.offset;\n+\ts_out_offset = op->turbo_dec.soft_output.offset;\n+\th_out_length = s_out_length = 0;\n+\tmbuf_total_left = op->turbo_dec.input.length;\n+\tc = op->turbo_dec.tb_params.c;\n+\tr = op->turbo_dec.tb_params.r;\n+\n+\twhile (mbuf_total_left > 0 && r < c) {\n+\n+\t\tseg_total_left = rte_pktmbuf_data_len(input) - in_offset;\n+\n+\t\t/* Set up DMA descriptor */\n+\t\tdesc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs)\n+\t\t\t\t& q->sw_ring_wrap_mask);\n+\t\tdesc->req.data_ptrs[0].address = q->ring_addr_iova + fcw_offset;\n+\t\tdesc->req.data_ptrs[0].blen = ACC_FCW_TD_BLEN;\n+\t\tret = acc200_dma_desc_td_fill(op, &desc->req, &input,\n+\t\t\t\th_output, s_output, &in_offset, &h_out_offset,\n+\t\t\t\t&s_out_offset, &h_out_length, &s_out_length,\n+\t\t\t\t&mbuf_total_left, &seg_total_left, r);\n+\n+\t\tif (unlikely(ret < 0))\n+\t\t\treturn ret;\n+\n+\t\t/* Hard output */\n+\t\tmbuf_append(h_output_head, h_output, h_out_length);\n+\n+\t\t/* Soft output */\n+\t\tif (check_bit(op->turbo_dec.op_flags,\n+\t\t\t\tRTE_BBDEV_TURBO_SOFT_OUTPUT))\n+\t\t\tmbuf_append(s_output_head, s_output, s_out_length);\n+\n+\t\t/* Set total number of CBs in TB */\n+\t\tdesc->req.cbs_in_tb = cbs_in_tb;\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\t\trte_memdump(stderr, \"FCW\", &desc->req.fcw_td,\n+\t\t\t\tsizeof(desc->req.fcw_td) - 8);\n+\t\trte_memdump(stderr, \"Req Desc.\", desc, sizeof(*desc));\n+#endif\n+\n+\t\tif (seg_total_left == 0) {\n+\t\t\t/* Go to the next mbuf */\n+\t\t\tinput = input->next;\n+\t\t\tin_offset = 0;\n+\t\t\th_output = h_output->next;\n+\t\t\th_out_offset = 0;\n+\n+\t\t\tif (check_bit(op->turbo_dec.op_flags,\n+\t\t\t\t\tRTE_BBDEV_TURBO_SOFT_OUTPUT)) {\n+\t\t\t\ts_output = s_output->next;\n+\t\t\t\ts_out_offset = 0;\n+\t\t\t}\n+\t\t}\n+\n+\t\ttotal_enqueued_cbs++;\n+\t\tcurrent_enqueued_cbs++;\n+\t\tr++;\n+\t}\n+\n+\t/* Set SDone on last CB descriptor for TB mode */\n+\tdesc->req.sdone_enable = 1;\n+\tdesc->req.irq_enable = q->irq_enable;\n+\n+\treturn current_enqueued_cbs;\n+}\n+\n+/* Enqueue encode operations for ACC200 device in CB mode. */\n+static uint16_t\n+acc200_enqueue_enc_cb(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_enc_op **ops, uint16_t num)\n+{\n+\tstruct acc_queue *q = q_data->queue_private;\n+\tint32_t avail = acc_ring_avail_enq(q);\n+\tuint16_t i;\n+\tunion acc_dma_desc *desc;\n+\tint ret;\n+\n+\tfor (i = 0; i < num; ++i) {\n+\t\t/* Check if there are available space for further processing */\n+\t\tif (unlikely(avail - 1 < 0)) {\n+\t\t\tacc_enqueue_ring_full(q_data);\n+\t\t\tbreak;\n+\t\t}\n+\t\tavail -= 1;\n+\n+\t\tret = enqueue_enc_one_op_cb(q, ops[i], i);\n+\t\tif (ret < 0) {\n+\t\t\tacc_enqueue_invalid(q_data);\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\tif (unlikely(i == 0))\n+\t\treturn 0; /* Nothing to enqueue */\n+\n+\t/* Set SDone in last CB in enqueued ops for CB mode*/\n+\tdesc = q->ring_addr + ((q->sw_ring_head + i - 1)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\tdesc->req.sdone_enable = 1;\n+\tdesc->req.irq_enable = q->irq_enable;\n+\n+\tacc_dma_enqueue(q, i, &q_data->queue_stats);\n+\n+\t/* Update stats */\n+\tq_data->queue_stats.enqueued_count += i;\n+\tq_data->queue_stats.enqueue_err_count += num - i;\n+\treturn i;\n+}\n+\n /** Enqueue encode operations for ACC200 device in CB mode. */\n static inline uint16_t\n acc200_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data *q_data,\n@@ -1442,6 +2051,45 @@ acc200_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data *q_data,\n \treturn i;\n }\n \n+/* Enqueue encode operations for ACC200 device in TB mode. */\n+static uint16_t\n+acc200_enqueue_enc_tb(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_enc_op **ops, uint16_t num)\n+{\n+\tstruct acc_queue *q = q_data->queue_private;\n+\tint32_t avail = acc_ring_avail_enq(q);\n+\tuint16_t i, enqueued_cbs = 0;\n+\tuint8_t cbs_in_tb;\n+\tint ret;\n+\n+\tfor (i = 0; i < num; ++i) {\n+\t\tcbs_in_tb = get_num_cbs_in_tb_enc(&ops[i]->turbo_enc);\n+\t\t/* Check if there are available space for further processing */\n+\t\tif (unlikely((avail - cbs_in_tb < 0) || (cbs_in_tb == 0))) {\n+\t\t\tacc_enqueue_ring_full(q_data);\n+\t\t\tbreak;\n+\t\t}\n+\t\tavail -= cbs_in_tb;\n+\n+\t\tret = enqueue_enc_one_op_tb(q, ops[i], enqueued_cbs, cbs_in_tb);\n+\t\tif (ret <= 0) {\n+\t\t\tacc_enqueue_invalid(q_data);\n+\t\t\tbreak;\n+\t\t}\n+\t\tenqueued_cbs += ret;\n+\t}\n+\tif (unlikely(enqueued_cbs == 0))\n+\t\treturn 0; /* Nothing to enqueue */\n+\n+\tacc_dma_enqueue(q, enqueued_cbs, &q_data->queue_stats);\n+\n+\t/* Update stats */\n+\tq_data->queue_stats.enqueued_count += i;\n+\tq_data->queue_stats.enqueue_err_count += num - i;\n+\n+\treturn i;\n+}\n+\n /* Enqueue LDPC encode operations for ACC200 device in TB mode. */\n static uint16_t\n acc200_enqueue_ldpc_enc_tb(struct rte_bbdev_queue_data *q_data,\n@@ -1481,6 +2129,20 @@ acc200_enqueue_ldpc_enc_tb(struct rte_bbdev_queue_data *q_data,\n \treturn i;\n }\n \n+/* Enqueue encode operations for ACC200 device. */\n+static uint16_t\n+acc200_enqueue_enc(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_enc_op **ops, uint16_t num)\n+{\n+\tint32_t aq_avail = acc_aq_avail(q_data, num);\n+\tif (unlikely((aq_avail <= 0) || (num == 0)))\n+\t\treturn 0;\n+\tif (ops[0]->turbo_enc.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)\n+\t\treturn acc200_enqueue_enc_tb(q_data, ops, num);\n+\telse\n+\t\treturn acc200_enqueue_enc_cb(q_data, ops, num);\n+}\n+\n /* Enqueue encode operations for ACC200 device. */\n static uint16_t\n acc200_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,\n@@ -1495,6 +2157,47 @@ acc200_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,\n \t\treturn acc200_enqueue_ldpc_enc_cb(q_data, ops, num);\n }\n \n+\n+/* Enqueue decode operations for ACC200 device in CB mode. */\n+static uint16_t\n+acc200_enqueue_dec_cb(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_dec_op **ops, uint16_t num)\n+{\n+\tstruct acc_queue *q = q_data->queue_private;\n+\tint32_t avail = acc_ring_avail_enq(q);\n+\tuint16_t i;\n+\tunion acc_dma_desc *desc;\n+\tint ret;\n+\n+\tfor (i = 0; i < num; ++i) {\n+\t\t/* Check if there are available space for further processing. */\n+\t\tif (unlikely(avail - 1 < 0))\n+\t\t\tbreak;\n+\t\tavail -= 1;\n+\n+\t\tret = enqueue_dec_one_op_cb(q, ops[i], i);\n+\t\tif (ret < 0)\n+\t\t\tbreak;\n+\t}\n+\n+\tif (unlikely(i == 0))\n+\t\treturn 0; /* Nothing to enqueue. */\n+\n+\t/* Set SDone in last CB in enqueued ops for CB mode. */\n+\tdesc = q->ring_addr + ((q->sw_ring_head + i - 1)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\tdesc->req.sdone_enable = 1;\n+\tdesc->req.irq_enable = q->irq_enable;\n+\n+\tacc_dma_enqueue(q, i, &q_data->queue_stats);\n+\n+\t/* Update stats. */\n+\tq_data->queue_stats.enqueued_count += i;\n+\tq_data->queue_stats.enqueue_err_count += num - i;\n+\n+\treturn i;\n+}\n+\n /* Enqueue decode operations for ACC200 device in TB mode. */\n static uint16_t\n acc200_enqueue_ldpc_dec_tb(struct rte_bbdev_queue_data *q_data,\n@@ -1579,6 +2282,58 @@ acc200_enqueue_ldpc_dec_cb(struct rte_bbdev_queue_data *q_data,\n \treturn i;\n }\n \n+\n+/* Enqueue decode operations for ACC200 device in TB mode */\n+static uint16_t\n+acc200_enqueue_dec_tb(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_dec_op **ops, uint16_t num)\n+{\n+\tstruct acc_queue *q = q_data->queue_private;\n+\tint32_t avail = acc_ring_avail_enq(q);\n+\tuint16_t i, enqueued_cbs = 0;\n+\tuint8_t cbs_in_tb;\n+\tint ret;\n+\n+\tfor (i = 0; i < num; ++i) {\n+\t\tcbs_in_tb = get_num_cbs_in_tb_dec(&ops[i]->turbo_dec);\n+\t\t/* Check if there are available space for further processing */\n+\t\tif (unlikely((avail - cbs_in_tb < 0) || (cbs_in_tb == 0))) {\n+\t\t\tacc_enqueue_ring_full(q_data);\n+\t\t\tbreak;\n+\t\t}\n+\t\tavail -= cbs_in_tb;\n+\n+\t\tret = enqueue_dec_one_op_tb(q, ops[i], enqueued_cbs, cbs_in_tb);\n+\t\tif (ret <= 0) {\n+\t\t\tacc_enqueue_invalid(q_data);\n+\t\t\tbreak;\n+\t\t}\n+\t\tenqueued_cbs += ret;\n+\t}\n+\n+\tacc_dma_enqueue(q, enqueued_cbs, &q_data->queue_stats);\n+\n+\t/* Update stats */\n+\tq_data->queue_stats.enqueued_count += i;\n+\tq_data->queue_stats.enqueue_err_count += num - i;\n+\n+\treturn i;\n+}\n+\n+/* Enqueue decode operations for ACC200 device. */\n+static uint16_t\n+acc200_enqueue_dec(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_dec_op **ops, uint16_t num)\n+{\n+\tint32_t aq_avail = acc_aq_avail(q_data, num);\n+\tif (unlikely((aq_avail <= 0) || (num == 0)))\n+\t\treturn 0;\n+\tif (ops[0]->turbo_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)\n+\t\treturn acc200_enqueue_dec_tb(q_data, ops, num);\n+\telse\n+\t\treturn acc200_enqueue_dec_cb(q_data, ops, num);\n+}\n+\n /* Enqueue decode operations for ACC200 device. */\n static uint16_t\n acc200_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,\n@@ -1905,6 +2660,48 @@ dequeue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op **ref_op,\n \treturn cb_idx;\n }\n \n+/* Dequeue encode operations from ACC200 device. */\n+static uint16_t\n+acc200_dequeue_enc(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_enc_op **ops, uint16_t num)\n+{\n+\tstruct acc_queue *q = q_data->queue_private;\n+\tuint32_t avail = acc_ring_avail_deq(q);\n+\tuint32_t aq_dequeued = 0;\n+\tuint16_t i, dequeued_ops = 0, dequeued_descs = 0;\n+\tint ret, cbm;\n+\tstruct rte_bbdev_enc_op *op;\n+\tif (avail == 0)\n+\t\treturn 0;\n+\top = (q->ring_addr + (q->sw_ring_tail &\n+\t\t\tq->sw_ring_wrap_mask))->req.op_addr;\n+\n+\tcbm = op->turbo_enc.code_block_mode;\n+\n+\tfor (i = 0; i < num; i++) {\n+\t\tif (cbm == RTE_BBDEV_TRANSPORT_BLOCK)\n+\t\t\tret = dequeue_enc_one_op_tb(q, &ops[dequeued_ops],\n+\t\t\t\t\t&dequeued_ops, &aq_dequeued,\n+\t\t\t\t\t&dequeued_descs);\n+\t\telse\n+\t\t\tret = dequeue_enc_one_op_cb(q, &ops[dequeued_ops],\n+\t\t\t\t\t&dequeued_ops, &aq_dequeued,\n+\t\t\t\t\t&dequeued_descs);\n+\t\tif (ret < 0)\n+\t\t\tbreak;\n+\t\tif (dequeued_ops >= num)\n+\t\t\tbreak;\n+\t}\n+\n+\tq->aq_dequeued += aq_dequeued;\n+\tq->sw_ring_tail += dequeued_descs;\n+\n+\t/* Update enqueue stats */\n+\tq_data->queue_stats.dequeued_count += dequeued_ops;\n+\n+\treturn dequeued_ops;\n+}\n+\n /* Dequeue LDPC encode operations from ACC200 device. */\n static uint16_t\n acc200_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,\n@@ -1945,6 +2742,46 @@ acc200_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,\n \treturn dequeued_ops;\n }\n \n+/* Dequeue decode operations from ACC200 device. */\n+static uint16_t\n+acc200_dequeue_dec(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_dec_op **ops, uint16_t num)\n+{\n+\tstruct acc_queue *q = q_data->queue_private;\n+\tuint16_t dequeue_num;\n+\tuint32_t avail = acc_ring_avail_deq(q);\n+\tuint32_t aq_dequeued = 0;\n+\tuint16_t i;\n+\tuint16_t dequeued_cbs = 0;\n+\tstruct rte_bbdev_dec_op *op;\n+\tint ret;\n+\n+\tdequeue_num = (avail < num) ? avail : num;\n+\n+\tfor (i = 0; i < dequeue_num; ++i) {\n+\t\top = (q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)\n+\t\t\t& q->sw_ring_wrap_mask))->req.op_addr;\n+\t\tif (op->turbo_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)\n+\t\t\tret = dequeue_dec_one_op_tb(q, &ops[i], dequeued_cbs,\n+\t\t\t\t\t&aq_dequeued);\n+\t\telse\n+\t\t\tret = dequeue_dec_one_op_cb(q_data, q, &ops[i],\n+\t\t\t\t\tdequeued_cbs, &aq_dequeued);\n+\n+\t\tif (ret <= 0)\n+\t\t\tbreak;\n+\t\tdequeued_cbs += ret;\n+\t}\n+\n+\tq->aq_dequeued += aq_dequeued;\n+\tq->sw_ring_tail += dequeued_cbs;\n+\n+\t/* Update enqueue stats */\n+\tq_data->queue_stats.dequeued_count += i;\n+\n+\treturn i;\n+}\n+\n /* Dequeue decode operations from ACC200 device. */\n static uint16_t\n acc200_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data,\n@@ -1993,6 +2830,10 @@ acc200_bbdev_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)\n \tstruct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);\n \n \tdev->dev_ops = &acc200_bbdev_ops;\n+\tdev->enqueue_enc_ops = acc200_enqueue_enc;\n+\tdev->enqueue_dec_ops = acc200_enqueue_dec;\n+\tdev->dequeue_enc_ops = acc200_dequeue_enc;\n+\tdev->dequeue_dec_ops = acc200_dequeue_dec;\n \tdev->enqueue_ldpc_enc_ops = acc200_enqueue_ldpc_enc;\n \tdev->enqueue_ldpc_dec_ops = acc200_enqueue_ldpc_dec;\n \tdev->dequeue_ldpc_enc_ops = acc200_dequeue_ldpc_enc;\n",
    "prefixes": [
        "v9",
        "09/14"
    ]
}