get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/75653/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 75653,
    "url": "http://patches.dpdk.org/api/patches/75653/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1597791894-37041-8-git-send-email-nicolas.chautru@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1597791894-37041-8-git-send-email-nicolas.chautru@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1597791894-37041-8-git-send-email-nicolas.chautru@intel.com",
    "date": "2020-08-18T23:04:50",
    "name": "[v2,07/11] baseband/acc100: add support for 4G processing",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "1b788d5b6ba64386a230918743bb1e1c34bee6bc",
    "submitter": {
        "id": 1314,
        "url": "http://patches.dpdk.org/api/people/1314/?format=api",
        "name": "Chautru, Nicolas",
        "email": "nicolas.chautru@intel.com"
    },
    "delegate": {
        "id": 6690,
        "url": "http://patches.dpdk.org/api/users/6690/?format=api",
        "username": "akhil",
        "first_name": "akhil",
        "last_name": "goyal",
        "email": "gakhil@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1597791894-37041-8-git-send-email-nicolas.chautru@intel.com/mbox/",
    "series": [
        {
            "id": 11695,
            "url": "http://patches.dpdk.org/api/series/11695/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=11695",
            "date": "2020-08-18T23:04:43",
            "name": "bbdev PMD ACC100",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/11695/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/75653/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/75653/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id AE3EBA04AF;\n\tWed, 19 Aug 2020 01:08:28 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 8919A1C12C;\n\tWed, 19 Aug 2020 01:07:00 +0200 (CEST)",
            "from mga14.intel.com (mga14.intel.com [192.55.52.115])\n by dpdk.org (Postfix) with ESMTP id 26A2D1C020\n for <dev@dpdk.org>; Wed, 19 Aug 2020 01:06:47 +0200 (CEST)",
            "from orsmga004.jf.intel.com ([10.7.209.38])\n by fmsmga103.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 18 Aug 2020 16:06:44 -0700",
            "from skx-5gnr-sc12-4.sc.intel.com ([172.25.69.210])\n by orsmga004.jf.intel.com with ESMTP; 18 Aug 2020 16:06:44 -0700"
        ],
        "IronPort-SDR": [
            "\n EMEs3u9bTKPDmBOsmpnItQDwta5qjPr4sREZNFbU03hz/2QEdScqjNpZnjDlzNcNsR3XIUu9dy\n 9hsmoji+QJUA==",
            "\n eDa2MMEOMyPo7RJJQKM+S6wQ3SP1YWwcbl5VDyvC1tnyJDj3/ec7+A/r+Zy6FLyIh9tBZid6Ca\n kEEyIlAsC3fw=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6000,8403,9717\"; a=\"154281362\"",
            "E=Sophos;i=\"5.76,329,1592895600\"; d=\"scan'208\";a=\"154281362\"",
            "E=Sophos;i=\"5.76,329,1592895600\"; d=\"scan'208\";a=\"441400709\""
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "From": "Nicolas Chautru <nicolas.chautru@intel.com>",
        "To": "dev@dpdk.org,\n\takhil.goyal@nxp.com",
        "Cc": "bruce.richardson@intel.com,\n\tNicolas Chautru <nicolas.chautru@intel.com>",
        "Date": "Tue, 18 Aug 2020 16:04:50 -0700",
        "Message-Id": "<1597791894-37041-8-git-send-email-nicolas.chautru@intel.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1597791894-37041-1-git-send-email-nicolas.chautru@intel.com>",
        "References": "<1597791894-37041-1-git-send-email-nicolas.chautru@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v2 07/11] baseband/acc100: add support for 4G\n\tprocessing",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Adding capability for 4G encode and decoder processing\n\nSigned-off-by: Nicolas Chautru <nicolas.chautru@intel.com>\n---\n drivers/baseband/acc100/rte_acc100_pmd.c | 1010 ++++++++++++++++++++++++++++--\n 1 file changed, 943 insertions(+), 67 deletions(-)",
    "diff": "diff --git a/drivers/baseband/acc100/rte_acc100_pmd.c b/drivers/baseband/acc100/rte_acc100_pmd.c\nindex b44b2f5..1de7531 100644\n--- a/drivers/baseband/acc100/rte_acc100_pmd.c\n+++ b/drivers/baseband/acc100/rte_acc100_pmd.c\n@@ -339,7 +339,6 @@\n \tfree_base_addresses(base_addrs, i);\n }\n \n-\n /* Allocate 64MB memory used for all software rings */\n static int\n acc100_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)\n@@ -637,6 +636,41 @@\n \n \tstatic const struct rte_bbdev_op_cap bbdev_capabilities[] = {\n \t\t{\n+\t\t\t.type = RTE_BBDEV_OP_TURBO_DEC,\n+\t\t\t.cap.turbo_dec = {\n+\t\t\t\t.capability_flags =\n+\t\t\t\t\tRTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE |\n+\t\t\t\t\tRTE_BBDEV_TURBO_CRC_TYPE_24B |\n+\t\t\t\t\tRTE_BBDEV_TURBO_HALF_ITERATION_EVEN |\n+\t\t\t\t\tRTE_BBDEV_TURBO_EARLY_TERMINATION |\n+\t\t\t\t\tRTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN |\n+\t\t\t\t\tRTE_BBDEV_TURBO_MAP_DEC |\n+\t\t\t\t\tRTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP |\n+\t\t\t\t\tRTE_BBDEV_TURBO_DEC_SCATTER_GATHER,\n+\t\t\t\t.max_llr_modulus = INT8_MAX,\n+\t\t\t\t.num_buffers_src =\n+\t\t\t\t\t\tRTE_BBDEV_TURBO_MAX_CODE_BLOCKS,\n+\t\t\t\t.num_buffers_hard_out =\n+\t\t\t\t\t\tRTE_BBDEV_TURBO_MAX_CODE_BLOCKS,\n+\t\t\t\t.num_buffers_soft_out =\n+\t\t\t\t\t\tRTE_BBDEV_TURBO_MAX_CODE_BLOCKS,\n+\t\t\t}\n+\t\t},\n+\t\t{\n+\t\t\t.type = RTE_BBDEV_OP_TURBO_ENC,\n+\t\t\t.cap.turbo_enc = {\n+\t\t\t\t.capability_flags =\n+\t\t\t\t\tRTE_BBDEV_TURBO_CRC_24B_ATTACH |\n+\t\t\t\t\tRTE_BBDEV_TURBO_RV_INDEX_BYPASS |\n+\t\t\t\t\tRTE_BBDEV_TURBO_RATE_MATCH |\n+\t\t\t\t\tRTE_BBDEV_TURBO_ENC_SCATTER_GATHER,\n+\t\t\t\t.num_buffers_src =\n+\t\t\t\t\t\tRTE_BBDEV_TURBO_MAX_CODE_BLOCKS,\n+\t\t\t\t.num_buffers_dst =\n+\t\t\t\t\t\tRTE_BBDEV_TURBO_MAX_CODE_BLOCKS,\n+\t\t\t}\n+\t\t},\n+\t\t{\n \t\t\t.type   = RTE_BBDEV_OP_LDPC_ENC,\n \t\t\t.cap.ldpc_enc = {\n \t\t\t\t.capability_flags =\n@@ -719,7 +753,6 @@\n #endif\n }\n \n-\n static const struct rte_bbdev_ops acc100_bbdev_ops = {\n \t.setup_queues = acc100_setup_queues,\n \t.close = acc100_dev_close,\n@@ -763,6 +796,58 @@\n \treturn tail;\n }\n \n+/* Fill in a frame control word for turbo encoding. */\n+static inline void\n+acc100_fcw_te_fill(const struct rte_bbdev_enc_op *op, struct acc100_fcw_te *fcw)\n+{\n+\tfcw->code_block_mode = op->turbo_enc.code_block_mode;\n+\tif (fcw->code_block_mode == 0) { /* For TB mode */\n+\t\tfcw->k_neg = op->turbo_enc.tb_params.k_neg;\n+\t\tfcw->k_pos = op->turbo_enc.tb_params.k_pos;\n+\t\tfcw->c_neg = op->turbo_enc.tb_params.c_neg;\n+\t\tfcw->c = op->turbo_enc.tb_params.c;\n+\t\tfcw->ncb_neg = op->turbo_enc.tb_params.ncb_neg;\n+\t\tfcw->ncb_pos = op->turbo_enc.tb_params.ncb_pos;\n+\n+\t\tif (check_bit(op->turbo_enc.op_flags,\n+\t\t\t\tRTE_BBDEV_TURBO_RATE_MATCH)) {\n+\t\t\tfcw->bypass_rm = 0;\n+\t\t\tfcw->cab = op->turbo_enc.tb_params.cab;\n+\t\t\tfcw->ea = op->turbo_enc.tb_params.ea;\n+\t\t\tfcw->eb = op->turbo_enc.tb_params.eb;\n+\t\t} else {\n+\t\t\t/* E is set to the encoding output size when RM is\n+\t\t\t * bypassed.\n+\t\t\t */\n+\t\t\tfcw->bypass_rm = 1;\n+\t\t\tfcw->cab = fcw->c_neg;\n+\t\t\tfcw->ea = 3 * fcw->k_neg + 12;\n+\t\t\tfcw->eb = 3 * fcw->k_pos + 12;\n+\t\t}\n+\t} else { /* For CB mode */\n+\t\tfcw->k_pos = op->turbo_enc.cb_params.k;\n+\t\tfcw->ncb_pos = op->turbo_enc.cb_params.ncb;\n+\n+\t\tif (check_bit(op->turbo_enc.op_flags,\n+\t\t\t\tRTE_BBDEV_TURBO_RATE_MATCH)) {\n+\t\t\tfcw->bypass_rm = 0;\n+\t\t\tfcw->eb = op->turbo_enc.cb_params.e;\n+\t\t} else {\n+\t\t\t/* E is set to the encoding output size when RM is\n+\t\t\t * bypassed.\n+\t\t\t */\n+\t\t\tfcw->bypass_rm = 1;\n+\t\t\tfcw->eb = 3 * fcw->k_pos + 12;\n+\t\t}\n+\t}\n+\n+\tfcw->bypass_rv_idx1 = check_bit(op->turbo_enc.op_flags,\n+\t\t\tRTE_BBDEV_TURBO_RV_INDEX_BYPASS);\n+\tfcw->code_block_crc = check_bit(op->turbo_enc.op_flags,\n+\t\t\tRTE_BBDEV_TURBO_CRC_24B_ATTACH);\n+\tfcw->rv_idx1 = op->turbo_enc.rv_index;\n+}\n+\n /* Compute value of k0.\n  * Based on 3GPP 38.212 Table 5.4.2.1-2\n  * Starting position of different redundancy versions, k0\n@@ -813,6 +898,25 @@\n \tfcw->mcb_count = num_cb;\n }\n \n+/* Fill in a frame control word for turbo decoding. */\n+static inline void\n+acc100_fcw_td_fill(const struct rte_bbdev_dec_op *op, struct acc100_fcw_td *fcw)\n+{\n+\t/* Note : Early termination is always enabled for 4GUL */\n+\tfcw->fcw_ver = 1;\n+\tif (op->turbo_dec.code_block_mode == 0)\n+\t\tfcw->k_pos = op->turbo_dec.tb_params.k_pos;\n+\telse\n+\t\tfcw->k_pos = op->turbo_dec.cb_params.k;\n+\tfcw->turbo_crc_type = check_bit(op->turbo_dec.op_flags,\n+\t\t\tRTE_BBDEV_TURBO_CRC_TYPE_24B);\n+\tfcw->bypass_sb_deint = 0;\n+\tfcw->raw_decoder_input_on = 0;\n+\tfcw->max_iter = op->turbo_dec.iter_max;\n+\tfcw->half_iter_on = !check_bit(op->turbo_dec.op_flags,\n+\t\t\tRTE_BBDEV_TURBO_HALF_ITERATION_EVEN);\n+}\n+\n /* Fill in a frame control word for LDPC decoding. */\n static inline void\n acc100_fcw_ld_fill(const struct rte_bbdev_dec_op *op, struct acc100_fcw_ld *fcw,\n@@ -1042,6 +1146,87 @@\n }\n \n static inline int\n+acc100_dma_desc_te_fill(struct rte_bbdev_enc_op *op,\n+\t\tstruct acc100_dma_req_desc *desc, struct rte_mbuf **input,\n+\t\tstruct rte_mbuf *output, uint32_t *in_offset,\n+\t\tuint32_t *out_offset, uint32_t *out_length,\n+\t\tuint32_t *mbuf_total_left, uint32_t *seg_total_left, uint8_t r)\n+{\n+\tint next_triplet = 1; /* FCW already done */\n+\tuint32_t e, ea, eb, length;\n+\tuint16_t k, k_neg, k_pos;\n+\tuint8_t cab, c_neg;\n+\n+\tdesc->word0 = ACC100_DMA_DESC_TYPE;\n+\tdesc->word1 = 0; /**< Timestamp could be disabled */\n+\tdesc->word2 = 0;\n+\tdesc->word3 = 0;\n+\tdesc->numCBs = 1;\n+\n+\tif (op->turbo_enc.code_block_mode == 0) {\n+\t\tea = op->turbo_enc.tb_params.ea;\n+\t\teb = op->turbo_enc.tb_params.eb;\n+\t\tcab = op->turbo_enc.tb_params.cab;\n+\t\tk_neg = op->turbo_enc.tb_params.k_neg;\n+\t\tk_pos = op->turbo_enc.tb_params.k_pos;\n+\t\tc_neg = op->turbo_enc.tb_params.c_neg;\n+\t\te = (r < cab) ? ea : eb;\n+\t\tk = (r < c_neg) ? k_neg : k_pos;\n+\t} else {\n+\t\te = op->turbo_enc.cb_params.e;\n+\t\tk = op->turbo_enc.cb_params.k;\n+\t}\n+\n+\tif (check_bit(op->turbo_enc.op_flags, RTE_BBDEV_TURBO_CRC_24B_ATTACH))\n+\t\tlength = (k - 24) >> 3;\n+\telse\n+\t\tlength = k >> 3;\n+\n+\tif (unlikely((*mbuf_total_left == 0) || (*mbuf_total_left < length))) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u\",\n+\t\t\t\t*mbuf_total_left, length);\n+\t\treturn -1;\n+\t}\n+\n+\tnext_triplet = acc100_dma_fill_blk_type_in(desc, input, in_offset,\n+\t\t\tlength, seg_total_left, next_triplet);\n+\tif (unlikely(next_triplet < 0)) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Mismatch between data to process and mbuf data length in bbdev_op: %p\",\n+\t\t\t\top);\n+\t\treturn -1;\n+\t}\n+\tdesc->data_ptrs[next_triplet - 1].last = 1;\n+\tdesc->m2dlen = next_triplet;\n+\t*mbuf_total_left -= length;\n+\n+\t/* Set output length */\n+\tif (check_bit(op->turbo_enc.op_flags, RTE_BBDEV_TURBO_RATE_MATCH))\n+\t\t/* Integer round up division by 8 */\n+\t\t*out_length = (e + 7) >> 3;\n+\telse\n+\t\t*out_length = (k >> 3) * 3 + 2;\n+\n+\tnext_triplet = acc100_dma_fill_blk_type_out(desc, output, *out_offset,\n+\t\t\t*out_length, next_triplet, ACC100_DMA_BLKID_OUT_ENC);\n+\tif (unlikely(next_triplet < 0)) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Mismatch between data to process and mbuf data length in bbdev_op: %p\",\n+\t\t\t\top);\n+\t\treturn -1;\n+\t}\n+\top->turbo_enc.output.length += *out_length;\n+\t*out_offset += *out_length;\n+\tdesc->data_ptrs[next_triplet - 1].last = 1;\n+\tdesc->d2mlen = next_triplet - desc->m2dlen;\n+\n+\tdesc->op_addr = op;\n+\n+\treturn 0;\n+}\n+\n+static inline int\n acc100_dma_desc_le_fill(struct rte_bbdev_enc_op *op,\n \t\tstruct acc100_dma_req_desc *desc, struct rte_mbuf **input,\n \t\tstruct rte_mbuf *output, uint32_t *in_offset,\n@@ -1110,6 +1295,117 @@\n }\n \n static inline int\n+acc100_dma_desc_td_fill(struct rte_bbdev_dec_op *op,\n+\t\tstruct acc100_dma_req_desc *desc, struct rte_mbuf **input,\n+\t\tstruct rte_mbuf *h_output, struct rte_mbuf *s_output,\n+\t\tuint32_t *in_offset, uint32_t *h_out_offset,\n+\t\tuint32_t *s_out_offset, uint32_t *h_out_length,\n+\t\tuint32_t *s_out_length, uint32_t *mbuf_total_left,\n+\t\tuint32_t *seg_total_left, uint8_t r)\n+{\n+\tint next_triplet = 1; /* FCW already done */\n+\tuint16_t k;\n+\tuint16_t crc24_overlap = 0;\n+\tuint32_t e, kw;\n+\n+\tdesc->word0 = ACC100_DMA_DESC_TYPE;\n+\tdesc->word1 = 0; /**< Timestamp could be disabled */\n+\tdesc->word2 = 0;\n+\tdesc->word3 = 0;\n+\tdesc->numCBs = 1;\n+\n+\tif (op->turbo_dec.code_block_mode == 0) {\n+\t\tk = (r < op->turbo_dec.tb_params.c_neg)\n+\t\t\t? op->turbo_dec.tb_params.k_neg\n+\t\t\t: op->turbo_dec.tb_params.k_pos;\n+\t\te = (r < op->turbo_dec.tb_params.cab)\n+\t\t\t? op->turbo_dec.tb_params.ea\n+\t\t\t: op->turbo_dec.tb_params.eb;\n+\t} else {\n+\t\tk = op->turbo_dec.cb_params.k;\n+\t\te = op->turbo_dec.cb_params.e;\n+\t}\n+\n+\tif ((op->turbo_dec.code_block_mode == 0)\n+\t\t&& !check_bit(op->turbo_dec.op_flags,\n+\t\tRTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP))\n+\t\tcrc24_overlap = 24;\n+\n+\t/* Calculates circular buffer size.\n+\t * According to 3gpp 36.212 section 5.1.4.2\n+\t *   Kw = 3 * Kpi,\n+\t * where:\n+\t *   Kpi = nCol * nRow\n+\t * where nCol is 32 and nRow can be calculated from:\n+\t *   D =< nCol * nRow\n+\t * where D is the size of each output from turbo encoder block (k + 4).\n+\t */\n+\tkw = RTE_ALIGN_CEIL(k + 4, 32) * 3;\n+\n+\tif (unlikely((*mbuf_total_left == 0) || (*mbuf_total_left < kw))) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u\",\n+\t\t\t\t*mbuf_total_left, kw);\n+\t\treturn -1;\n+\t}\n+\n+\tnext_triplet = acc100_dma_fill_blk_type_in(desc, input, in_offset, kw,\n+\t\t\tseg_total_left, next_triplet);\n+\tif (unlikely(next_triplet < 0)) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Mismatch between data to process and mbuf data length in bbdev_op: %p\",\n+\t\t\t\top);\n+\t\treturn -1;\n+\t}\n+\tdesc->data_ptrs[next_triplet - 1].last = 1;\n+\tdesc->m2dlen = next_triplet;\n+\t*mbuf_total_left -= kw;\n+\n+\tnext_triplet = acc100_dma_fill_blk_type_out(\n+\t\t\tdesc, h_output, *h_out_offset,\n+\t\t\tk >> 3, next_triplet, ACC100_DMA_BLKID_OUT_HARD);\n+\tif (unlikely(next_triplet < 0)) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Mismatch between data to process and mbuf data length in bbdev_op: %p\",\n+\t\t\t\top);\n+\t\treturn -1;\n+\t}\n+\n+\t*h_out_length = ((k - crc24_overlap) >> 3);\n+\top->turbo_dec.hard_output.length += *h_out_length;\n+\t*h_out_offset += *h_out_length;\n+\n+\t/* Soft output */\n+\tif (check_bit(op->turbo_dec.op_flags, RTE_BBDEV_TURBO_SOFT_OUTPUT)) {\n+\t\tif (check_bit(op->turbo_dec.op_flags,\n+\t\t\t\tRTE_BBDEV_TURBO_EQUALIZER))\n+\t\t\t*s_out_length = e;\n+\t\telse\n+\t\t\t*s_out_length = (k * 3) + 12;\n+\n+\t\tnext_triplet = acc100_dma_fill_blk_type_out(desc, s_output,\n+\t\t\t\t*s_out_offset, *s_out_length, next_triplet,\n+\t\t\t\tACC100_DMA_BLKID_OUT_SOFT);\n+\t\tif (unlikely(next_triplet < 0)) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"Mismatch between data to process and mbuf data length in bbdev_op: %p\",\n+\t\t\t\t\top);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\top->turbo_dec.soft_output.length += *s_out_length;\n+\t\t*s_out_offset += *s_out_length;\n+\t}\n+\n+\tdesc->data_ptrs[next_triplet - 1].last = 1;\n+\tdesc->d2mlen = next_triplet - desc->m2dlen;\n+\n+\tdesc->op_addr = op;\n+\n+\treturn 0;\n+}\n+\n+static inline int\n acc100_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,\n \t\tstruct acc100_dma_req_desc *desc,\n \t\tstruct rte_mbuf **input, struct rte_mbuf *h_output,\n@@ -1374,6 +1670,57 @@\n \n /* Enqueue one encode operations for ACC100 device in CB mode */\n static inline int\n+enqueue_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op *op,\n+\t\tuint16_t total_enqueued_cbs)\n+{\n+\tunion acc100_dma_desc *desc = NULL;\n+\tint ret;\n+\tuint32_t in_offset, out_offset, out_length, mbuf_total_left,\n+\t\tseg_total_left;\n+\tstruct rte_mbuf *input, *output_head, *output;\n+\n+\tuint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\tdesc = q->ring_addr + desc_idx;\n+\tacc100_fcw_te_fill(op, &desc->req.fcw_te);\n+\n+\tinput = op->turbo_enc.input.data;\n+\toutput_head = output = op->turbo_enc.output.data;\n+\tin_offset = op->turbo_enc.input.offset;\n+\tout_offset = op->turbo_enc.output.offset;\n+\tout_length = 0;\n+\tmbuf_total_left = op->turbo_enc.input.length;\n+\tseg_total_left = rte_pktmbuf_data_len(op->turbo_enc.input.data)\n+\t\t\t- in_offset;\n+\n+\tret = acc100_dma_desc_te_fill(op, &desc->req, &input, output,\n+\t\t\t&in_offset, &out_offset, &out_length, &mbuf_total_left,\n+\t\t\t&seg_total_left, 0);\n+\n+\tif (unlikely(ret < 0))\n+\t\treturn ret;\n+\n+\tmbuf_append(output_head, output, out_length);\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\trte_memdump(stderr, \"FCW\", &desc->req.fcw_te,\n+\t\t\tsizeof(desc->req.fcw_te) - 8);\n+\trte_memdump(stderr, \"Req Desc.\", desc, sizeof(*desc));\n+\n+\t/* Check if any data left after processing one CB */\n+\tif (mbuf_total_left != 0) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Some date still left after processing one CB: mbuf_total_left = %u\",\n+\t\t\t\tmbuf_total_left);\n+\t\treturn -EINVAL;\n+\t}\n+#endif\n+\t/* One CB (one op) was successfully prepared to enqueue */\n+\treturn 1;\n+}\n+\n+/* Enqueue one encode operations for ACC100 device in CB mode */\n+static inline int\n enqueue_ldpc_enc_n_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op **ops,\n \t\tuint16_t total_enqueued_cbs, int16_t num)\n {\n@@ -1481,78 +1828,235 @@\n \treturn 1;\n }\n \n-static inline int\n-harq_loopback(struct acc100_queue *q, struct rte_bbdev_dec_op *op,\n-\t\tuint16_t total_enqueued_cbs) {\n-\tstruct acc100_fcw_ld *fcw;\n-\tunion acc100_dma_desc *desc;\n-\tint next_triplet = 1;\n-\tstruct rte_mbuf *hq_output_head, *hq_output;\n-\tuint16_t harq_in_length = op->ldpc_dec.harq_combined_input.length;\n-\tif (harq_in_length == 0) {\n-\t\trte_bbdev_log(ERR, \"Loopback of invalid null size\\n\");\n-\t\treturn -EINVAL;\n-\t}\n \n-\tint h_comp = check_bit(op->ldpc_dec.op_flags,\n-\t\t\tRTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION\n-\t\t\t) ? 1 : 0;\n-\tif (h_comp == 1)\n-\t\tharq_in_length = harq_in_length * 8 / 6;\n-\tharq_in_length = RTE_ALIGN(harq_in_length, 64);\n-\tuint16_t harq_dma_length_in = (h_comp == 0) ?\n-\t\t\tharq_in_length :\n-\t\t\tharq_in_length * 6 / 8;\n-\tuint16_t harq_dma_length_out = harq_dma_length_in;\n-\tbool ddr_mem_in = check_bit(op->ldpc_dec.op_flags,\n-\t\t\tRTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE);\n-\tunion acc100_harq_layout_data *harq_layout = q->d->harq_layout;\n-\tuint16_t harq_index = (ddr_mem_in ?\n-\t\t\top->ldpc_dec.harq_combined_input.offset :\n-\t\t\top->ldpc_dec.harq_combined_output.offset)\n-\t\t\t/ ACC100_HARQ_OFFSET;\n+/* Enqueue one encode operations for ACC100 device in TB mode. */\n+static inline int\n+enqueue_enc_one_op_tb(struct acc100_queue *q, struct rte_bbdev_enc_op *op,\n+\t\tuint16_t total_enqueued_cbs, uint8_t cbs_in_tb)\n+{\n+\tunion acc100_dma_desc *desc = NULL;\n+\tint ret;\n+\tuint8_t r, c;\n+\tuint32_t in_offset, out_offset, out_length, mbuf_total_left,\n+\t\tseg_total_left;\n+\tstruct rte_mbuf *input, *output_head, *output;\n+\tuint16_t current_enqueued_cbs = 0;\n \n \tuint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)\n \t\t\t& q->sw_ring_wrap_mask);\n \tdesc = q->ring_addr + desc_idx;\n-\tfcw = &desc->req.fcw_ld;\n-\t/* Set the FCW from loopback into DDR */\n-\tmemset(fcw, 0, sizeof(struct acc100_fcw_ld));\n-\tfcw->FCWversion = ACC100_FCW_VER;\n-\tfcw->qm = 2;\n-\tfcw->Zc = 384;\n-\tif (harq_in_length < 16 * N_ZC_1)\n-\t\tfcw->Zc = 16;\n-\tfcw->ncb = fcw->Zc * N_ZC_1;\n-\tfcw->rm_e = 2;\n-\tfcw->hcin_en = 1;\n-\tfcw->hcout_en = 1;\n+\tuint64_t fcw_offset = (desc_idx << 8) + ACC100_DESC_FCW_OFFSET;\n+\tacc100_fcw_te_fill(op, &desc->req.fcw_te);\n \n-\trte_bbdev_log(DEBUG, \"Loopback IN %d Index %d offset %d length %d %d\\n\",\n-\t\t\tddr_mem_in, harq_index,\n-\t\t\tharq_layout[harq_index].offset, harq_in_length,\n-\t\t\tharq_dma_length_in);\n+\tinput = op->turbo_enc.input.data;\n+\toutput_head = output = op->turbo_enc.output.data;\n+\tin_offset = op->turbo_enc.input.offset;\n+\tout_offset = op->turbo_enc.output.offset;\n+\tout_length = 0;\n+\tmbuf_total_left = op->turbo_enc.input.length;\n \n-\tif (ddr_mem_in && (harq_layout[harq_index].offset > 0)) {\n-\t\tfcw->hcin_size0 = harq_layout[harq_index].size0;\n-\t\tfcw->hcin_offset = harq_layout[harq_index].offset;\n-\t\tfcw->hcin_size1 = harq_in_length - fcw->hcin_offset;\n-\t\tharq_dma_length_in = (fcw->hcin_size0 + fcw->hcin_size1);\n-\t\tif (h_comp == 1)\n-\t\t\tharq_dma_length_in = harq_dma_length_in * 6 / 8;\n-\t} else {\n-\t\tfcw->hcin_size0 = harq_in_length;\n-\t}\n-\tharq_layout[harq_index].val = 0;\n-\trte_bbdev_log(DEBUG, \"Loopback FCW Config %d %d %d\\n\",\n-\t\t\tfcw->hcin_size0, fcw->hcin_offset, fcw->hcin_size1);\n-\tfcw->hcout_size0 = harq_in_length;\n-\tfcw->hcin_decomp_mode = h_comp;\n-\tfcw->hcout_comp_mode = h_comp;\n-\tfcw->gain_i = 1;\n-\tfcw->gain_h = 1;\n+\tc = op->turbo_enc.tb_params.c;\n+\tr = op->turbo_enc.tb_params.r;\n \n-\t/* Set the prefix of descriptor. This could be done at polling */\n+\twhile (mbuf_total_left > 0 && r < c) {\n+\t\tseg_total_left = rte_pktmbuf_data_len(input) - in_offset;\n+\t\t/* Set up DMA descriptor */\n+\t\tdesc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs)\n+\t\t\t\t& q->sw_ring_wrap_mask);\n+\t\tdesc->req.data_ptrs[0].address = q->ring_addr_phys + fcw_offset;\n+\t\tdesc->req.data_ptrs[0].blen = ACC100_FCW_TE_BLEN;\n+\n+\t\tret = acc100_dma_desc_te_fill(op, &desc->req, &input, output,\n+\t\t\t\t&in_offset, &out_offset, &out_length,\n+\t\t\t\t&mbuf_total_left, &seg_total_left, r);\n+\t\tif (unlikely(ret < 0))\n+\t\t\treturn ret;\n+\t\tmbuf_append(output_head, output, out_length);\n+\n+\t\t/* Set total number of CBs in TB */\n+\t\tdesc->req.cbs_in_tb = cbs_in_tb;\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\t\trte_memdump(stderr, \"FCW\", &desc->req.fcw_te,\n+\t\t\t\tsizeof(desc->req.fcw_te) - 8);\n+\t\trte_memdump(stderr, \"Req Desc.\", desc, sizeof(*desc));\n+#endif\n+\n+\t\tif (seg_total_left == 0) {\n+\t\t\t/* Go to the next mbuf */\n+\t\t\tinput = input->next;\n+\t\t\tin_offset = 0;\n+\t\t\toutput = output->next;\n+\t\t\tout_offset = 0;\n+\t\t}\n+\n+\t\ttotal_enqueued_cbs++;\n+\t\tcurrent_enqueued_cbs++;\n+\t\tr++;\n+\t}\n+\n+\tif (unlikely(desc == NULL))\n+\t\treturn current_enqueued_cbs;\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\t/* Check if any CBs left for processing */\n+\tif (mbuf_total_left != 0) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Some date still left for processing: mbuf_total_left = %u\",\n+\t\t\t\tmbuf_total_left);\n+\t\treturn -EINVAL;\n+\t}\n+#endif\n+\n+\t/* Set SDone on last CB descriptor for TB mode. */\n+\tdesc->req.sdone_enable = 1;\n+\tdesc->req.irq_enable = q->irq_enable;\n+\n+\treturn current_enqueued_cbs;\n+}\n+\n+/** Enqueue one decode operations for ACC100 device in CB mode */\n+static inline int\n+enqueue_dec_one_op_cb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,\n+\t\tuint16_t total_enqueued_cbs)\n+{\n+\tunion acc100_dma_desc *desc = NULL;\n+\tint ret;\n+\tuint32_t in_offset, h_out_offset, s_out_offset, s_out_length,\n+\t\th_out_length, mbuf_total_left, seg_total_left;\n+\tstruct rte_mbuf *input, *h_output_head, *h_output,\n+\t\t*s_output_head, *s_output;\n+\n+\tuint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\tdesc = q->ring_addr + desc_idx;\n+\tacc100_fcw_td_fill(op, &desc->req.fcw_td);\n+\n+\tinput = op->turbo_dec.input.data;\n+\th_output_head = h_output = op->turbo_dec.hard_output.data;\n+\ts_output_head = s_output = op->turbo_dec.soft_output.data;\n+\tin_offset = op->turbo_dec.input.offset;\n+\th_out_offset = op->turbo_dec.hard_output.offset;\n+\ts_out_offset = op->turbo_dec.soft_output.offset;\n+\th_out_length = s_out_length = 0;\n+\tmbuf_total_left = op->turbo_dec.input.length;\n+\tseg_total_left = rte_pktmbuf_data_len(input) - in_offset;\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\tif (unlikely(input == NULL)) {\n+\t\trte_bbdev_log(ERR, \"Invalid mbuf pointer\");\n+\t\treturn -EFAULT;\n+\t}\n+#endif\n+\n+\t/* Set up DMA descriptor */\n+\tdesc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\n+\tret = acc100_dma_desc_td_fill(op, &desc->req, &input, h_output,\n+\t\t\ts_output, &in_offset, &h_out_offset, &s_out_offset,\n+\t\t\t&h_out_length, &s_out_length, &mbuf_total_left,\n+\t\t\t&seg_total_left, 0);\n+\n+\tif (unlikely(ret < 0))\n+\t\treturn ret;\n+\n+\t/* Hard output */\n+\tmbuf_append(h_output_head, h_output, h_out_length);\n+\n+\t/* Soft output */\n+\tif (check_bit(op->turbo_dec.op_flags, RTE_BBDEV_TURBO_SOFT_OUTPUT))\n+\t\tmbuf_append(s_output_head, s_output, s_out_length);\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\trte_memdump(stderr, \"FCW\", &desc->req.fcw_td,\n+\t\t\tsizeof(desc->req.fcw_td) - 8);\n+\trte_memdump(stderr, \"Req Desc.\", desc, sizeof(*desc));\n+\n+\t/* Check if any CBs left for processing */\n+\tif (mbuf_total_left != 0) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Some date still left after processing one CB: mbuf_total_left = %u\",\n+\t\t\t\tmbuf_total_left);\n+\t\treturn -EINVAL;\n+\t}\n+#endif\n+\n+\t/* One CB (one op) was successfully prepared to enqueue */\n+\treturn 1;\n+}\n+\n+static inline int\n+harq_loopback(struct acc100_queue *q, struct rte_bbdev_dec_op *op,\n+\t\tuint16_t total_enqueued_cbs) {\n+\tstruct acc100_fcw_ld *fcw;\n+\tunion acc100_dma_desc *desc;\n+\tint next_triplet = 1;\n+\tstruct rte_mbuf *hq_output_head, *hq_output;\n+\tuint16_t harq_in_length = op->ldpc_dec.harq_combined_input.length;\n+\tif (harq_in_length == 0) {\n+\t\trte_bbdev_log(ERR, \"Loopback of invalid null size\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tint h_comp = check_bit(op->ldpc_dec.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION\n+\t\t\t) ? 1 : 0;\n+\tif (h_comp == 1)\n+\t\tharq_in_length = harq_in_length * 8 / 6;\n+\tharq_in_length = RTE_ALIGN(harq_in_length, 64);\n+\tuint16_t harq_dma_length_in = (h_comp == 0) ?\n+\t\t\tharq_in_length :\n+\t\t\tharq_in_length * 6 / 8;\n+\tuint16_t harq_dma_length_out = harq_dma_length_in;\n+\tbool ddr_mem_in = check_bit(op->ldpc_dec.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE);\n+\tunion acc100_harq_layout_data *harq_layout = q->d->harq_layout;\n+\tuint16_t harq_index = (ddr_mem_in ?\n+\t\t\top->ldpc_dec.harq_combined_input.offset :\n+\t\t\top->ldpc_dec.harq_combined_output.offset)\n+\t\t\t/ ACC100_HARQ_OFFSET;\n+\n+\tuint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\tdesc = q->ring_addr + desc_idx;\n+\tfcw = &desc->req.fcw_ld;\n+\t/* Set the FCW from loopback into DDR */\n+\tmemset(fcw, 0, sizeof(struct acc100_fcw_ld));\n+\tfcw->FCWversion = ACC100_FCW_VER;\n+\tfcw->qm = 2;\n+\tfcw->Zc = 384;\n+\tif (harq_in_length < 16 * N_ZC_1)\n+\t\tfcw->Zc = 16;\n+\tfcw->ncb = fcw->Zc * N_ZC_1;\n+\tfcw->rm_e = 2;\n+\tfcw->hcin_en = 1;\n+\tfcw->hcout_en = 1;\n+\n+\trte_bbdev_log(DEBUG, \"Loopback IN %d Index %d offset %d length %d %d\\n\",\n+\t\t\tddr_mem_in, harq_index,\n+\t\t\tharq_layout[harq_index].offset, harq_in_length,\n+\t\t\tharq_dma_length_in);\n+\n+\tif (ddr_mem_in && (harq_layout[harq_index].offset > 0)) {\n+\t\tfcw->hcin_size0 = harq_layout[harq_index].size0;\n+\t\tfcw->hcin_offset = harq_layout[harq_index].offset;\n+\t\tfcw->hcin_size1 = harq_in_length - fcw->hcin_offset;\n+\t\tharq_dma_length_in = (fcw->hcin_size0 + fcw->hcin_size1);\n+\t\tif (h_comp == 1)\n+\t\t\tharq_dma_length_in = harq_dma_length_in * 6 / 8;\n+\t} else {\n+\t\tfcw->hcin_size0 = harq_in_length;\n+\t}\n+\tharq_layout[harq_index].val = 0;\n+\trte_bbdev_log(DEBUG, \"Loopback FCW Config %d %d %d\\n\",\n+\t\t\tfcw->hcin_size0, fcw->hcin_offset, fcw->hcin_size1);\n+\tfcw->hcout_size0 = harq_in_length;\n+\tfcw->hcin_decomp_mode = h_comp;\n+\tfcw->hcout_comp_mode = h_comp;\n+\tfcw->gain_i = 1;\n+\tfcw->gain_h = 1;\n+\n+\t/* Set the prefix of descriptor. This could be done at polling */\n \tdesc->req.word0 = ACC100_DMA_DESC_TYPE;\n \tdesc->req.word1 = 0; /**< Timestamp could be disabled */\n \tdesc->req.word2 = 0;\n@@ -1816,6 +2320,107 @@\n \treturn current_enqueued_cbs;\n }\n \n+/* Enqueue one decode operations for ACC100 device in TB mode */\n+static inline int\n+enqueue_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,\n+\t\tuint16_t total_enqueued_cbs, uint8_t cbs_in_tb)\n+{\n+\tunion acc100_dma_desc *desc = NULL;\n+\tint ret;\n+\tuint8_t r, c;\n+\tuint32_t in_offset, h_out_offset, s_out_offset, s_out_length,\n+\t\th_out_length, mbuf_total_left, seg_total_left;\n+\tstruct rte_mbuf *input, *h_output_head, *h_output,\n+\t\t*s_output_head, *s_output;\n+\tuint16_t current_enqueued_cbs = 0;\n+\n+\tuint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\tdesc = q->ring_addr + desc_idx;\n+\tuint64_t fcw_offset = (desc_idx << 8) + ACC100_DESC_FCW_OFFSET;\n+\tacc100_fcw_td_fill(op, &desc->req.fcw_td);\n+\n+\tinput = op->turbo_dec.input.data;\n+\th_output_head = h_output = op->turbo_dec.hard_output.data;\n+\ts_output_head = s_output = op->turbo_dec.soft_output.data;\n+\tin_offset = op->turbo_dec.input.offset;\n+\th_out_offset = op->turbo_dec.hard_output.offset;\n+\ts_out_offset = op->turbo_dec.soft_output.offset;\n+\th_out_length = s_out_length = 0;\n+\tmbuf_total_left = op->turbo_dec.input.length;\n+\tc = op->turbo_dec.tb_params.c;\n+\tr = op->turbo_dec.tb_params.r;\n+\n+\twhile (mbuf_total_left > 0 && r < c) {\n+\n+\t\tseg_total_left = rte_pktmbuf_data_len(input) - in_offset;\n+\n+\t\t/* Set up DMA descriptor */\n+\t\tdesc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs)\n+\t\t\t\t& q->sw_ring_wrap_mask);\n+\t\tdesc->req.data_ptrs[0].address = q->ring_addr_phys + fcw_offset;\n+\t\tdesc->req.data_ptrs[0].blen = ACC100_FCW_TD_BLEN;\n+\t\tret = acc100_dma_desc_td_fill(op, &desc->req, &input,\n+\t\t\t\th_output, s_output, &in_offset, &h_out_offset,\n+\t\t\t\t&s_out_offset, &h_out_length, &s_out_length,\n+\t\t\t\t&mbuf_total_left, &seg_total_left, r);\n+\n+\t\tif (unlikely(ret < 0))\n+\t\t\treturn ret;\n+\n+\t\t/* Hard output */\n+\t\tmbuf_append(h_output_head, h_output, h_out_length);\n+\n+\t\t/* Soft output */\n+\t\tif (check_bit(op->turbo_dec.op_flags,\n+\t\t\t\tRTE_BBDEV_TURBO_SOFT_OUTPUT))\n+\t\t\tmbuf_append(s_output_head, s_output, s_out_length);\n+\n+\t\t/* Set total number of CBs in TB */\n+\t\tdesc->req.cbs_in_tb = cbs_in_tb;\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\t\trte_memdump(stderr, \"FCW\", &desc->req.fcw_td,\n+\t\t\t\tsizeof(desc->req.fcw_td) - 8);\n+\t\trte_memdump(stderr, \"Req Desc.\", desc, sizeof(*desc));\n+#endif\n+\n+\t\tif (seg_total_left == 0) {\n+\t\t\t/* Go to the next mbuf */\n+\t\t\tinput = input->next;\n+\t\t\tin_offset = 0;\n+\t\t\th_output = h_output->next;\n+\t\t\th_out_offset = 0;\n+\n+\t\t\tif (check_bit(op->turbo_dec.op_flags,\n+\t\t\t\t\tRTE_BBDEV_TURBO_SOFT_OUTPUT)) {\n+\t\t\t\ts_output = s_output->next;\n+\t\t\t\ts_out_offset = 0;\n+\t\t\t}\n+\t\t}\n+\n+\t\ttotal_enqueued_cbs++;\n+\t\tcurrent_enqueued_cbs++;\n+\t\tr++;\n+\t}\n+\n+\tif (unlikely(desc == NULL))\n+\t\treturn current_enqueued_cbs;\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\t/* Check if any CBs left for processing */\n+\tif (mbuf_total_left != 0) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Some date still left for processing: mbuf_total_left = %u\",\n+\t\t\t\tmbuf_total_left);\n+\t\treturn -EINVAL;\n+\t}\n+#endif\n+\t/* Set SDone on last CB descriptor for TB mode */\n+\tdesc->req.sdone_enable = 1;\n+\tdesc->req.irq_enable = q->irq_enable;\n+\n+\treturn current_enqueued_cbs;\n+}\n \n /* Calculates number of CBs in processed encoder TB based on 'r' and input\n  * length.\n@@ -1893,6 +2498,45 @@\n \treturn cbs_in_tb;\n }\n \n+/* Enqueue encode operations for ACC100 device in CB mode. */\n+static uint16_t\n+acc100_enqueue_enc_cb(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_enc_op **ops, uint16_t num)\n+{\n+\tstruct acc100_queue *q = q_data->queue_private;\n+\tint32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;\n+\tuint16_t i;\n+\tunion acc100_dma_desc *desc;\n+\tint ret;\n+\n+\tfor (i = 0; i < num; ++i) {\n+\t\t/* Check if there are available space for further processing */\n+\t\tif (unlikely(avail - 1 < 0))\n+\t\t\tbreak;\n+\t\tavail -= 1;\n+\n+\t\tret = enqueue_enc_one_op_cb(q, ops[i], i);\n+\t\tif (ret < 0)\n+\t\t\tbreak;\n+\t}\n+\n+\tif (unlikely(i == 0))\n+\t\treturn 0; /* Nothing to enqueue */\n+\n+\t/* Set SDone in last CB in enqueued ops for CB mode*/\n+\tdesc = q->ring_addr + ((q->sw_ring_head + i - 1)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\tdesc->req.sdone_enable = 1;\n+\tdesc->req.irq_enable = q->irq_enable;\n+\n+\tacc100_dma_enqueue(q, i, &q_data->queue_stats);\n+\n+\t/* Update stats */\n+\tq_data->queue_stats.enqueued_count += i;\n+\tq_data->queue_stats.enqueue_err_count += num - i;\n+\treturn i;\n+}\n+\n /* Check we can mux encode operations with common FCW */\n static inline bool\n check_mux(struct rte_bbdev_enc_op **ops, uint16_t num) {\n@@ -1960,6 +2604,52 @@\n \treturn i;\n }\n \n+/* Enqueue encode operations for ACC100 device in TB mode. */\n+static uint16_t\n+acc100_enqueue_enc_tb(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_enc_op **ops, uint16_t num)\n+{\n+\tstruct acc100_queue *q = q_data->queue_private;\n+\tint32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;\n+\tuint16_t i, enqueued_cbs = 0;\n+\tuint8_t cbs_in_tb;\n+\tint ret;\n+\n+\tfor (i = 0; i < num; ++i) {\n+\t\tcbs_in_tb = get_num_cbs_in_tb_enc(&ops[i]->turbo_enc);\n+\t\t/* Check if there are available space for further processing */\n+\t\tif (unlikely(avail - cbs_in_tb < 0))\n+\t\t\tbreak;\n+\t\tavail -= cbs_in_tb;\n+\n+\t\tret = enqueue_enc_one_op_tb(q, ops[i], enqueued_cbs, cbs_in_tb);\n+\t\tif (ret < 0)\n+\t\t\tbreak;\n+\t\tenqueued_cbs += ret;\n+\t}\n+\n+\tacc100_dma_enqueue(q, enqueued_cbs, &q_data->queue_stats);\n+\n+\t/* Update stats */\n+\tq_data->queue_stats.enqueued_count += i;\n+\tq_data->queue_stats.enqueue_err_count += num - i;\n+\n+\treturn i;\n+}\n+\n+/* Enqueue encode operations for ACC100 device. */\n+static uint16_t\n+acc100_enqueue_enc(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_enc_op **ops, uint16_t num)\n+{\n+\tif (unlikely(num == 0))\n+\t\treturn 0;\n+\tif (ops[0]->turbo_enc.code_block_mode == 0)\n+\t\treturn acc100_enqueue_enc_tb(q_data, ops, num);\n+\telse\n+\t\treturn acc100_enqueue_enc_cb(q_data, ops, num);\n+}\n+\n /* Enqueue encode operations for ACC100 device. */\n static uint16_t\n acc100_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,\n@@ -1967,7 +2657,51 @@\n {\n \tif (unlikely(num == 0))\n \t\treturn 0;\n-\treturn acc100_enqueue_ldpc_enc_cb(q_data, ops, num);\n+\tif (ops[0]->ldpc_enc.code_block_mode == 0)\n+\t\treturn acc100_enqueue_enc_tb(q_data, ops, num);\n+\telse\n+\t\treturn acc100_enqueue_ldpc_enc_cb(q_data, ops, num);\n+}\n+\n+\n+/* Enqueue decode operations for ACC100 device in CB mode */\n+static uint16_t\n+acc100_enqueue_dec_cb(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_dec_op **ops, uint16_t num)\n+{\n+\tstruct acc100_queue *q = q_data->queue_private;\n+\tint32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;\n+\tuint16_t i;\n+\tunion acc100_dma_desc *desc;\n+\tint ret;\n+\n+\tfor (i = 0; i < num; ++i) {\n+\t\t/* Check if there are available space for further processing */\n+\t\tif (unlikely(avail - 1 < 0))\n+\t\t\tbreak;\n+\t\tavail -= 1;\n+\n+\t\tret = enqueue_dec_one_op_cb(q, ops[i], i);\n+\t\tif (ret < 0)\n+\t\t\tbreak;\n+\t}\n+\n+\tif (unlikely(i == 0))\n+\t\treturn 0; /* Nothing to enqueue */\n+\n+\t/* Set SDone in last CB in enqueued ops for CB mode*/\n+\tdesc = q->ring_addr + ((q->sw_ring_head + i - 1)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\tdesc->req.sdone_enable = 1;\n+\tdesc->req.irq_enable = q->irq_enable;\n+\n+\tacc100_dma_enqueue(q, i, &q_data->queue_stats);\n+\n+\t/* Update stats */\n+\tq_data->queue_stats.enqueued_count += i;\n+\tq_data->queue_stats.enqueue_err_count += num - i;\n+\n+\treturn i;\n }\n \n /* Check we can mux encode operations with common FCW */\n@@ -2065,6 +2799,53 @@\n \treturn i;\n }\n \n+\n+/* Enqueue decode operations for ACC100 device in TB mode */\n+static uint16_t\n+acc100_enqueue_dec_tb(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_dec_op **ops, uint16_t num)\n+{\n+\tstruct acc100_queue *q = q_data->queue_private;\n+\tint32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;\n+\tuint16_t i, enqueued_cbs = 0;\n+\tuint8_t cbs_in_tb;\n+\tint ret;\n+\n+\tfor (i = 0; i < num; ++i) {\n+\t\tcbs_in_tb = get_num_cbs_in_tb_dec(&ops[i]->turbo_dec);\n+\t\t/* Check if there are available space for further processing */\n+\t\tif (unlikely(avail - cbs_in_tb < 0))\n+\t\t\tbreak;\n+\t\tavail -= cbs_in_tb;\n+\n+\t\tret = enqueue_dec_one_op_tb(q, ops[i], enqueued_cbs, cbs_in_tb);\n+\t\tif (ret < 0)\n+\t\t\tbreak;\n+\t\tenqueued_cbs += ret;\n+\t}\n+\n+\tacc100_dma_enqueue(q, enqueued_cbs, &q_data->queue_stats);\n+\n+\t/* Update stats */\n+\tq_data->queue_stats.enqueued_count += i;\n+\tq_data->queue_stats.enqueue_err_count += num - i;\n+\n+\treturn i;\n+}\n+\n+/* Enqueue decode operations for ACC100 device. */\n+static uint16_t\n+acc100_enqueue_dec(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_dec_op **ops, uint16_t num)\n+{\n+\tif (unlikely(num == 0))\n+\t\treturn 0;\n+\tif (ops[0]->turbo_dec.code_block_mode == 0)\n+\t\treturn acc100_enqueue_dec_tb(q_data, ops, num);\n+\telse\n+\t\treturn acc100_enqueue_dec_cb(q_data, ops, num);\n+}\n+\n /* Enqueue decode operations for ACC100 device. */\n static uint16_t\n acc100_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,\n@@ -2388,6 +3169,51 @@\n \treturn cb_idx;\n }\n \n+/* Dequeue encode operations from ACC100 device. */\n+static uint16_t\n+acc100_dequeue_enc(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_enc_op **ops, uint16_t num)\n+{\n+\tstruct acc100_queue *q = q_data->queue_private;\n+\tuint16_t dequeue_num;\n+\tuint32_t avail = q->sw_ring_head - q->sw_ring_tail;\n+\tuint32_t aq_dequeued = 0;\n+\tuint16_t i;\n+\tuint16_t dequeued_cbs = 0;\n+\tstruct rte_bbdev_enc_op *op;\n+\tint ret;\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\tif (unlikely(ops == 0 && q == NULL))\n+\t\treturn 0;\n+#endif\n+\n+\tdequeue_num = (avail < num) ? avail : num;\n+\n+\tfor (i = 0; i < dequeue_num; ++i) {\n+\t\top = (q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)\n+\t\t\t& q->sw_ring_wrap_mask))->req.op_addr;\n+\t\tif (op->turbo_enc.code_block_mode == 0)\n+\t\t\tret = dequeue_enc_one_op_tb(q, &ops[i], dequeued_cbs,\n+\t\t\t\t\t&aq_dequeued);\n+\t\telse\n+\t\t\tret = dequeue_enc_one_op_cb(q, &ops[i], dequeued_cbs,\n+\t\t\t\t\t&aq_dequeued);\n+\n+\t\tif (ret < 0)\n+\t\t\tbreak;\n+\t\tdequeued_cbs += ret;\n+\t}\n+\n+\tq->aq_dequeued += aq_dequeued;\n+\tq->sw_ring_tail += dequeued_cbs;\n+\n+\t/* Update enqueue stats */\n+\tq_data->queue_stats.dequeued_count += i;\n+\n+\treturn i;\n+}\n+\n /* Dequeue LDPC encode operations from ACC100 device. */\n static uint16_t\n acc100_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,\n@@ -2426,6 +3252,52 @@\n \treturn dequeued_cbs;\n }\n \n+\n+/* Dequeue decode operations from ACC100 device. */\n+static uint16_t\n+acc100_dequeue_dec(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_dec_op **ops, uint16_t num)\n+{\n+\tstruct acc100_queue *q = q_data->queue_private;\n+\tuint16_t dequeue_num;\n+\tuint32_t avail = q->sw_ring_head - q->sw_ring_tail;\n+\tuint32_t aq_dequeued = 0;\n+\tuint16_t i;\n+\tuint16_t dequeued_cbs = 0;\n+\tstruct rte_bbdev_dec_op *op;\n+\tint ret;\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\tif (unlikely(ops == 0 && q == NULL))\n+\t\treturn 0;\n+#endif\n+\n+\tdequeue_num = (avail < num) ? avail : num;\n+\n+\tfor (i = 0; i < dequeue_num; ++i) {\n+\t\top = (q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)\n+\t\t\t& q->sw_ring_wrap_mask))->req.op_addr;\n+\t\tif (op->turbo_dec.code_block_mode == 0)\n+\t\t\tret = dequeue_dec_one_op_tb(q, &ops[i], dequeued_cbs,\n+\t\t\t\t\t&aq_dequeued);\n+\t\telse\n+\t\t\tret = dequeue_dec_one_op_cb(q_data, q, &ops[i],\n+\t\t\t\t\tdequeued_cbs, &aq_dequeued);\n+\n+\t\tif (ret < 0)\n+\t\t\tbreak;\n+\t\tdequeued_cbs += ret;\n+\t}\n+\n+\tq->aq_dequeued += aq_dequeued;\n+\tq->sw_ring_tail += dequeued_cbs;\n+\n+\t/* Update enqueue stats */\n+\tq_data->queue_stats.dequeued_count += i;\n+\n+\treturn i;\n+}\n+\n /* Dequeue decode operations from ACC100 device. */\n static uint16_t\n acc100_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data,\n@@ -2479,6 +3351,10 @@\n \tstruct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);\n \n \tdev->dev_ops = &acc100_bbdev_ops;\n+\tdev->enqueue_enc_ops = acc100_enqueue_enc;\n+\tdev->enqueue_dec_ops = acc100_enqueue_dec;\n+\tdev->dequeue_enc_ops = acc100_dequeue_enc;\n+\tdev->dequeue_dec_ops = acc100_dequeue_dec;\n \tdev->enqueue_ldpc_enc_ops = acc100_enqueue_ldpc_enc;\n \tdev->enqueue_ldpc_dec_ops = acc100_enqueue_ldpc_dec;\n \tdev->dequeue_ldpc_enc_ops = acc100_dequeue_ldpc_enc;\n",
    "prefixes": [
        "v2",
        "07/11"
    ]
}