get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/116767/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 116767,
    "url": "http://patches.dpdk.org/api/patches/116767/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20220924005048.20302-3-nicolas.chautru@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220924005048.20302-3-nicolas.chautru@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220924005048.20302-3-nicolas.chautru@intel.com",
    "date": "2022-09-24T00:50:36",
    "name": "[v6,02/14] baseband/acc100: refactor to segregate common code",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "3c37a9fb27c4cc670e3176dd316154165eec0367",
    "submitter": {
        "id": 1314,
        "url": "http://patches.dpdk.org/api/people/1314/?format=api",
        "name": "Chautru, Nicolas",
        "email": "nicolas.chautru@intel.com"
    },
    "delegate": {
        "id": 6690,
        "url": "http://patches.dpdk.org/api/users/6690/?format=api",
        "username": "akhil",
        "first_name": "akhil",
        "last_name": "goyal",
        "email": "gakhil@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20220924005048.20302-3-nicolas.chautru@intel.com/mbox/",
    "series": [
        {
            "id": 24806,
            "url": "http://patches.dpdk.org/api/series/24806/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=24806",
            "date": "2022-09-24T00:50:35",
            "name": "[v6,01/14] baseband/acc100: remove unused registers",
            "version": 6,
            "mbox": "http://patches.dpdk.org/series/24806/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/116767/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/116767/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id F22E3A054F;\n\tSat, 24 Sep 2022 02:51:07 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 74C4D42BD7;\n\tSat, 24 Sep 2022 02:50:58 +0200 (CEST)",
            "from mga18.intel.com (mga18.intel.com [134.134.136.126])\n by mails.dpdk.org (Postfix) with ESMTP id CB296400D4\n for <dev@dpdk.org>; Sat, 24 Sep 2022 02:50:53 +0200 (CEST)",
            "from fmsmga003.fm.intel.com ([10.253.24.29])\n by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 23 Sep 2022 17:50:52 -0700",
            "from unknown (HELO icx-npg-scs1-cp1.localdomain) ([10.233.180.245])\n by FMSMGA003.fm.intel.com with ESMTP; 23 Sep 2022 17:50:51 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1663980654; x=1695516654;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=LtJ+Mg8dgHLddWEihErSZjNEknWQNUIg9AffwVhDU30=;\n b=b9AO9yH6tfjuZRUWo1fY7jkN2VTvpEnQan2x71qfZgX3pj7EUmWYkJ+0\n 2TIO07bkms1tVUX1rGxIHK6f8UwRl5NLT7yRGfeejNEHt+c6W6UuXjOMG\n IXU6a8Hhr2EOm101Gi/t37ivT1PLbSWtjDElKD96idq4AOv2pi2P0hY4g\n S9yGWgNaXY243OgabH9yNB15jfoKepQytL/AMEUn2eJ1U0yn9V5JV44Qn\n 6wpoMS7gHLZvdS6ckvNJQEYLeq1E8nTWR3X2lkcL9QdfK4Hyws7cacJPd\n MDIb1BVNu1whY17FHo0pgMfEIKnuR6vQ8jm6iWEkJ/ANMT84d0/zIIB+7 w==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10479\"; a=\"283839112\"",
            "E=Sophos;i=\"5.93,340,1654585200\"; d=\"scan'208\";a=\"283839112\"",
            "E=Sophos;i=\"5.93,340,1654585200\"; d=\"scan'208\";a=\"709504405\""
        ],
        "X-ExtLoop1": "1",
        "From": "Nicolas Chautru <nicolas.chautru@intel.com>",
        "To": "dev@dpdk.org,\n\tthomas@monjalon.net",
        "Cc": "maxime.coquelin@redhat.com, trix@redhat.com, mdr@ashroe.eu,\n bruce.richardson@intel.com, hemant.agrawal@nxp.com,\n david.marchand@redhat.com, stephen@networkplumber.org,\n hernan.vargas@intel.com, Nic Chautru <nicolas.chautru@intel.com>",
        "Subject": "[PATCH v6 02/14] baseband/acc100: refactor to segregate common code",
        "Date": "Fri, 23 Sep 2022 17:50:36 -0700",
        "Message-Id": "<20220924005048.20302-3-nicolas.chautru@intel.com>",
        "X-Mailer": "git-send-email 2.37.1",
        "In-Reply-To": "<20220924005048.20302-1-nicolas.chautru@intel.com>",
        "References": "<20220924005048.20302-1-nicolas.chautru@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Nic Chautru <nicolas.chautru@intel.com>\n\nRefactoring all shareable common code to be used by future PMD\n(including ACC200 in  this patchset as well as taking into account\nfollowing PMDs in roadmap) by gathering such structures or inline methods.\nNo functionality change.\n\nSigned-off-by: Nic Chautru <nicolas.chautru@intel.com>\nAcked-by: Bruce Richardson <bruce.richardson@intel.com>\nReviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>\n---\n app/test-bbdev/test_bbdev_perf.c             |    6 +-\n drivers/baseband/acc100/acc100_pmd.h         |  449 +----\n drivers/baseband/acc100/acc101_pmd.h         |   10 -\n drivers/baseband/acc100/acc_common.h         | 1303 +++++++++++++\n drivers/baseband/acc100/rte_acc100_cfg.h     |   70 +-\n drivers/baseband/acc100/rte_acc100_pmd.c     | 1773 ++++++------------\n drivers/baseband/acc100/rte_acc_common_cfg.h |  101 +\n 7 files changed, 1981 insertions(+), 1731 deletions(-)\n create mode 100644 drivers/baseband/acc100/acc_common.h\n create mode 100644 drivers/baseband/acc100/rte_acc_common_cfg.h",
    "diff": "diff --git a/app/test-bbdev/test_bbdev_perf.c b/app/test-bbdev/test_bbdev_perf.c\nindex 5d07670ed3..af9ceca67b 100644\n--- a/app/test-bbdev/test_bbdev_perf.c\n+++ b/app/test-bbdev/test_bbdev_perf.c\n@@ -708,18 +708,18 @@ add_bbdev_dev(uint8_t dev_id, struct rte_bbdev_info *info,\n #ifdef RTE_BASEBAND_ACC100\n \tif ((get_init_device() == true) &&\n \t\t\t(!strcmp(info->drv.driver_name, ACC100PF_DRIVER_NAME))) {\n-\t\tstruct rte_acc100_conf conf;\n+\t\tstruct rte_acc_conf conf;\n \t\tunsigned int i;\n \n \t\tprintf(\"Configure ACC100/ACC101 FEC Driver %s with default values\\n\",\n \t\t\t\tinfo->drv.driver_name);\n \n \t\t/* clear default configuration before initialization */\n-\t\tmemset(&conf, 0, sizeof(struct rte_acc100_conf));\n+\t\tmemset(&conf, 0, sizeof(struct rte_acc_conf));\n \n \t\t/* Always set in PF mode for built-in configuration */\n \t\tconf.pf_mode_en = true;\n-\t\tfor (i = 0; i < RTE_ACC100_NUM_VFS; ++i) {\n+\t\tfor (i = 0; i < RTE_ACC_NUM_VFS; ++i) {\n \t\t\tconf.arb_dl_4g[i].gbr_threshold1 = ACC100_QOS_GBR;\n \t\t\tconf.arb_dl_4g[i].gbr_threshold1 = ACC100_QOS_GBR;\n \t\t\tconf.arb_dl_4g[i].round_robin_weight = ACC100_QMGR_RR;\ndiff --git a/drivers/baseband/acc100/acc100_pmd.h b/drivers/baseband/acc100/acc100_pmd.h\nindex 0c9810ca56..b325948904 100644\n--- a/drivers/baseband/acc100/acc100_pmd.h\n+++ b/drivers/baseband/acc100/acc100_pmd.h\n@@ -8,6 +8,7 @@\n #include \"acc100_pf_enum.h\"\n #include \"acc100_vf_enum.h\"\n #include \"rte_acc100_cfg.h\"\n+#include \"acc_common.h\"\n \n /* Helper macro for logging */\n #define rte_bbdev_log(level, fmt, ...) \\\n@@ -34,64 +35,18 @@\n #define ACC100_PF_DEVICE_ID        (0x0d5c)\n #define ACC100_VF_DEVICE_ID        (0x0d5d)\n \n-/* Values used in filling in descriptors */\n-#define ACC100_DMA_DESC_TYPE           2\n-#define ACC100_DMA_CODE_BLK_MODE       0\n-#define ACC100_DMA_BLKID_FCW           1\n-#define ACC100_DMA_BLKID_IN            2\n-#define ACC100_DMA_BLKID_OUT_ENC       1\n-#define ACC100_DMA_BLKID_OUT_HARD      1\n-#define ACC100_DMA_BLKID_OUT_SOFT      2\n-#define ACC100_DMA_BLKID_OUT_HARQ      3\n-#define ACC100_DMA_BLKID_IN_HARQ       3\n-\n-/* Values used in filling in decode FCWs */\n-#define ACC100_FCW_TD_VER              1\n-#define ACC100_FCW_TD_EXT_COLD_REG_EN  1\n-#define ACC100_FCW_TD_AUTOMAP          0x0f\n-#define ACC100_FCW_TD_RVIDX_0          2\n-#define ACC100_FCW_TD_RVIDX_1          26\n-#define ACC100_FCW_TD_RVIDX_2          50\n-#define ACC100_FCW_TD_RVIDX_3          74\n-\n /* Values used in writing to the registers */\n #define ACC100_REG_IRQ_EN_ALL          0x1FF83FF  /* Enable all interrupts */\n \n-/* ACC100 Specific Dimensioning */\n-#define ACC100_SIZE_64MBYTE            (64*1024*1024)\n-/* Number of elements in an Info Ring */\n-#define ACC100_INFO_RING_NUM_ENTRIES   1024\n-/* Number of elements in HARQ layout memory */\n-#define ACC100_HARQ_LAYOUT             (64*1024*1024)\n-/* Assume offset for HARQ in memory */\n-#define ACC100_HARQ_OFFSET             (32*1024)\n-#define ACC100_HARQ_OFFSET_SHIFT       15\n-#define ACC100_HARQ_OFFSET_MASK        0x7ffffff\n-/* Mask used to calculate an index in an Info Ring array (not a byte offset) */\n-#define ACC100_INFO_RING_MASK          (ACC100_INFO_RING_NUM_ENTRIES-1)\n /* Number of Virtual Functions ACC100 supports */\n #define ACC100_NUM_VFS                  16\n #define ACC100_NUM_QGRPS                8\n-#define ACC100_NUM_QGRPS_PER_WORD       8\n #define ACC100_NUM_AQS                  16\n-#define MAX_ENQ_BATCH_SIZE              255\n-/* All ACC100 Registers alignment are 32bits = 4B */\n-#define ACC100_BYTES_IN_WORD                 4\n-#define ACC100_MAX_E_MBUF                64000\n \n #define ACC100_GRP_ID_SHIFT    10 /* Queue Index Hierarchy */\n #define ACC100_VF_ID_SHIFT     4  /* Queue Index Hierarchy */\n-#define ACC100_VF_OFFSET_QOS   16 /* offset in Memory specific to QoS Mon */\n-#define ACC100_TMPL_PRI_0      0x03020100\n-#define ACC100_TMPL_PRI_1      0x07060504\n-#define ACC100_TMPL_PRI_2      0x0b0a0908\n-#define ACC100_TMPL_PRI_3      0x0f0e0d0c\n-#define ACC100_QUEUE_ENABLE    0x80000000  /* Bit to mark Queue as Enabled */\n #define ACC100_WORDS_IN_ARAM_SIZE (128 * 1024 / 4)\n-#define ACC100_FDONE    0x80000000\n-#define ACC100_SDONE    0x40000000\n \n-#define ACC100_NUM_TMPL       32\n /* Mapping of signals for the available engines */\n #define ACC100_SIG_UL_5G      0\n #define ACC100_SIG_UL_5G_LAST 7\n@@ -102,50 +57,10 @@\n #define ACC100_SIG_DL_4G      27\n #define ACC100_SIG_DL_4G_LAST 31\n #define ACC100_NUM_ACCS       5\n-#define ACC100_ACCMAP_0       0\n-#define ACC100_ACCMAP_1       2\n-#define ACC100_ACCMAP_2       1\n-#define ACC100_ACCMAP_3       3\n-#define ACC100_ACCMAP_4       4\n-#define ACC100_PF_VAL         2\n-\n-/* max number of iterations to allocate memory block for all rings */\n-#define ACC100_SW_RING_MEM_ALLOC_ATTEMPTS 5\n-#define ACC100_MAX_QUEUE_DEPTH            1024\n-#define ACC100_DMA_MAX_NUM_POINTERS       14\n-#define ACC100_DMA_MAX_NUM_POINTERS_IN    7\n-#define ACC100_DMA_DESC_PADDING           8\n-#define ACC100_FCW_PADDING                12\n-#define ACC100_DESC_FCW_OFFSET            192\n-#define ACC100_DESC_SIZE                  256\n-#define ACC100_DESC_OFFSET                (ACC100_DESC_SIZE / 64)\n-#define ACC100_FCW_TE_BLEN                32\n-#define ACC100_FCW_TD_BLEN                24\n-#define ACC100_FCW_LE_BLEN                32\n-#define ACC100_FCW_LD_BLEN                36\n-#define ACC100_5GUL_SIZE_0                16\n-#define ACC100_5GUL_SIZE_1                40\n-#define ACC100_5GUL_OFFSET_0              36\n \n-#define ACC100_FCW_VER         2\n-#define ACC100_MUX_5GDL_DESC   6\n-#define ACC100_CMP_ENC_SIZE    20\n-#define ACC100_CMP_DEC_SIZE    24\n-#define ACC100_ENC_OFFSET     (32)\n-#define ACC100_DEC_OFFSET     (80)\n #define ACC100_EXT_MEM /* Default option with memory external to CPU */\n #define ACC100_HARQ_OFFSET_THRESHOLD 1024\n \n-/* Constants from K0 computation from 3GPP 38.212 Table 5.4.2.1-2 */\n-#define ACC100_N_ZC_1 66 /* N = 66 Zc for BG 1 */\n-#define ACC100_N_ZC_2 50 /* N = 50 Zc for BG 2 */\n-#define ACC100_K0_1_1 17 /* K0 fraction numerator for rv 1 and BG 1 */\n-#define ACC100_K0_1_2 13 /* K0 fraction numerator for rv 1 and BG 2 */\n-#define ACC100_K0_2_1 33 /* K0 fraction numerator for rv 2 and BG 1 */\n-#define ACC100_K0_2_2 25 /* K0 fraction numerator for rv 2 and BG 2 */\n-#define ACC100_K0_3_1 56 /* K0 fraction numerator for rv 3 and BG 1 */\n-#define ACC100_K0_3_2 43 /* K0 fraction numerator for rv 3 and BG 2 */\n-\n /* ACC100 Configuration */\n #define ACC100_DDR_ECC_ENABLE\n #define ACC100_CFG_DMA_ERROR    0x3D7\n@@ -159,12 +74,10 @@\n #define ACC100_PCIE_QUAD_OFFSET 0x2000\n #define ACC100_PCS_EQ           0x6007\n #define ACC100_ADAPT            0x8400\n-#define ACC100_ENGINE_OFFSET    0x1000\n #define ACC100_RESET_HI         0x20100\n #define ACC100_RESET_LO         0x20000\n #define ACC100_RESET_HARD       0x1FF\n #define ACC100_ENGINES_MAX      9\n-#define ACC100_LONG_WAIT        1000\n #define ACC100_GPEX_AXIMAP_NUM  17\n #define ACC100_CLOCK_GATING_EN  0x30000\n #define ACC100_FABRIC_MODE      0xB\n@@ -173,292 +86,8 @@\n  */\n #define ACC100_HARQ_DDR         (512 * 1)\n #define ACC100_PRQ_DDR_VER       0x10092020\n-#define ACC100_MS_IN_US         (1000)\n #define ACC100_DDR_TRAINING_MAX (5000)\n \n-/* ACC100 DMA Descriptor triplet */\n-struct acc100_dma_triplet {\n-\tuint64_t address;\n-\tuint32_t blen:20,\n-\t\tres0:4,\n-\t\tlast:1,\n-\t\tdma_ext:1,\n-\t\tres1:2,\n-\t\tblkid:4;\n-} __rte_packed;\n-\n-/* ACC100 DMA Response Descriptor */\n-union acc100_dma_rsp_desc {\n-\tuint32_t val;\n-\tstruct {\n-\t\tuint32_t crc_status:1,\n-\t\t\tsynd_ok:1,\n-\t\t\tdma_err:1,\n-\t\t\tneg_stop:1,\n-\t\t\tfcw_err:1,\n-\t\t\toutput_err:1,\n-\t\t\tinput_err:1,\n-\t\t\ttimestampEn:1,\n-\t\t\titerCountFrac:8,\n-\t\t\titer_cnt:8,\n-\t\t\trsrvd3:6,\n-\t\t\tsdone:1,\n-\t\t\tfdone:1;\n-\t\tuint32_t add_info_0;\n-\t\tuint32_t add_info_1;\n-\t};\n-};\n-\n-\n-/* ACC100 Queue Manager Enqueue PCI Register */\n-union acc100_enqueue_reg_fmt {\n-\tuint32_t val;\n-\tstruct {\n-\t\tuint32_t num_elem:8,\n-\t\t\taddr_offset:3,\n-\t\t\trsrvd:1,\n-\t\t\treq_elem_addr:20;\n-\t};\n-};\n-\n-/* FEC 4G Uplink Frame Control Word */\n-struct __rte_packed acc100_fcw_td {\n-\tuint8_t fcw_ver:4,\n-\t\tnum_maps:4; /* Unused */\n-\tuint8_t filler:6, /* Unused */\n-\t\trsrvd0:1,\n-\t\tbypass_sb_deint:1;\n-\tuint16_t k_pos;\n-\tuint16_t k_neg; /* Unused */\n-\tuint8_t c_neg; /* Unused */\n-\tuint8_t c; /* Unused */\n-\tuint32_t ea; /* Unused */\n-\tuint32_t eb; /* Unused */\n-\tuint8_t cab; /* Unused */\n-\tuint8_t k0_start_col; /* Unused */\n-\tuint8_t rsrvd1;\n-\tuint8_t code_block_mode:1, /* Unused */\n-\t\tturbo_crc_type:1,\n-\t\trsrvd2:3,\n-\t\tbypass_teq:1, /* Unused */\n-\t\tsoft_output_en:1, /* Unused */\n-\t\text_td_cold_reg_en:1;\n-\tunion { /* External Cold register */\n-\t\tuint32_t ext_td_cold_reg;\n-\t\tstruct {\n-\t\t\tuint32_t min_iter:4, /* Unused */\n-\t\t\t\tmax_iter:4,\n-\t\t\t\text_scale:5, /* Unused */\n-\t\t\t\trsrvd3:3,\n-\t\t\t\tearly_stop_en:1, /* Unused */\n-\t\t\t\tsw_soft_out_dis:1, /* Unused */\n-\t\t\t\tsw_et_cont:1, /* Unused */\n-\t\t\t\tsw_soft_out_saturation:1, /* Unused */\n-\t\t\t\thalf_iter_on:1, /* Unused */\n-\t\t\t\traw_decoder_input_on:1, /* Unused */\n-\t\t\t\trsrvd4:10;\n-\t\t};\n-\t};\n-};\n-\n-/* FEC 5GNR Uplink Frame Control Word */\n-struct __rte_packed acc100_fcw_ld {\n-\tuint32_t FCWversion:4,\n-\t\tqm:4,\n-\t\tnfiller:11,\n-\t\tBG:1,\n-\t\tZc:9,\n-\t\tres0:1,\n-\t\tsynd_precoder:1,\n-\t\tsynd_post:1;\n-\tuint32_t ncb:16,\n-\t\tk0:16;\n-\tuint32_t rm_e:24,\n-\t\thcin_en:1,\n-\t\thcout_en:1,\n-\t\tcrc_select:1,\n-\t\tbypass_dec:1,\n-\t\tbypass_intlv:1,\n-\t\tso_en:1,\n-\t\tso_bypass_rm:1,\n-\t\tso_bypass_intlv:1;\n-\tuint32_t hcin_offset:16,\n-\t\thcin_size0:16;\n-\tuint32_t hcin_size1:16,\n-\t\thcin_decomp_mode:3,\n-\t\tllr_pack_mode:1,\n-\t\thcout_comp_mode:3,\n-\t\tres2:1,\n-\t\tdec_convllr:4,\n-\t\thcout_convllr:4;\n-\tuint32_t itmax:7,\n-\t\titstop:1,\n-\t\tso_it:7,\n-\t\tres3:1,\n-\t\thcout_offset:16;\n-\tuint32_t hcout_size0:16,\n-\t\thcout_size1:16;\n-\tuint32_t gain_i:8,\n-\t\tgain_h:8,\n-\t\tnegstop_th:16;\n-\tuint32_t negstop_it:7,\n-\t\tnegstop_en:1,\n-\t\tres4:24;\n-};\n-\n-/* FEC 4G Downlink Frame Control Word */\n-struct __rte_packed acc100_fcw_te {\n-\tuint16_t k_neg;\n-\tuint16_t k_pos;\n-\tuint8_t c_neg;\n-\tuint8_t c;\n-\tuint8_t filler;\n-\tuint8_t cab;\n-\tuint32_t ea:17,\n-\t\trsrvd0:15;\n-\tuint32_t eb:17,\n-\t\trsrvd1:15;\n-\tuint16_t ncb_neg;\n-\tuint16_t ncb_pos;\n-\tuint8_t rv_idx0:2,\n-\t\trsrvd2:2,\n-\t\trv_idx1:2,\n-\t\trsrvd3:2;\n-\tuint8_t bypass_rv_idx0:1,\n-\t\tbypass_rv_idx1:1,\n-\t\tbypass_rm:1,\n-\t\trsrvd4:5;\n-\tuint8_t rsrvd5:1,\n-\t\trsrvd6:3,\n-\t\tcode_block_crc:1,\n-\t\trsrvd7:3;\n-\tuint8_t code_block_mode:1,\n-\t\trsrvd8:7;\n-\tuint64_t rsrvd9;\n-};\n-\n-/* FEC 5GNR Downlink Frame Control Word */\n-struct __rte_packed acc100_fcw_le {\n-\tuint32_t FCWversion:4,\n-\t\tqm:4,\n-\t\tnfiller:11,\n-\t\tBG:1,\n-\t\tZc:9,\n-\t\tres0:3;\n-\tuint32_t ncb:16,\n-\t\tk0:16;\n-\tuint32_t rm_e:24,\n-\t\tres1:2,\n-\t\tcrc_select:1,\n-\t\tres2:1,\n-\t\tbypass_intlv:1,\n-\t\tres3:3;\n-\tuint32_t res4_a:12,\n-\t\tmcb_count:3,\n-\t\tres4_b:17;\n-\tuint32_t res5;\n-\tuint32_t res6;\n-\tuint32_t res7;\n-\tuint32_t res8;\n-};\n-\n-/* ACC100 DMA Request Descriptor */\n-struct __rte_packed acc100_dma_req_desc {\n-\tunion {\n-\t\tstruct{\n-\t\t\tuint32_t type:4,\n-\t\t\t\trsrvd0:26,\n-\t\t\t\tsdone:1,\n-\t\t\t\tfdone:1;\n-\t\t\tuint32_t rsrvd1;\n-\t\t\tuint32_t rsrvd2;\n-\t\t\tuint32_t pass_param:8,\n-\t\t\t\tsdone_enable:1,\n-\t\t\t\tirq_enable:1,\n-\t\t\t\ttimeStampEn:1,\n-\t\t\t\tres0:5,\n-\t\t\t\tnumCBs:4,\n-\t\t\t\tres1:4,\n-\t\t\t\tm2dlen:4,\n-\t\t\t\td2mlen:4;\n-\t\t};\n-\t\tstruct{\n-\t\t\tuint32_t word0;\n-\t\t\tuint32_t word1;\n-\t\t\tuint32_t word2;\n-\t\t\tuint32_t word3;\n-\t\t};\n-\t};\n-\tstruct acc100_dma_triplet data_ptrs[ACC100_DMA_MAX_NUM_POINTERS];\n-\n-\t/* Virtual addresses used to retrieve SW context info */\n-\tunion {\n-\t\tvoid *op_addr;\n-\t\tuint64_t pad1;  /* pad to 64 bits */\n-\t};\n-\t/*\n-\t * Stores additional information needed for driver processing:\n-\t * - last_desc_in_batch - flag used to mark last descriptor (CB)\n-\t *                        in batch\n-\t * - cbs_in_tb - stores information about total number of Code Blocks\n-\t *               in currently processed Transport Block\n-\t */\n-\tunion {\n-\t\tstruct {\n-\t\t\tunion {\n-\t\t\t\tstruct acc100_fcw_ld fcw_ld;\n-\t\t\t\tstruct acc100_fcw_td fcw_td;\n-\t\t\t\tstruct acc100_fcw_le fcw_le;\n-\t\t\t\tstruct acc100_fcw_te fcw_te;\n-\t\t\t\tuint32_t pad2[ACC100_FCW_PADDING];\n-\t\t\t};\n-\t\t\tuint32_t last_desc_in_batch :8,\n-\t\t\t\tcbs_in_tb:8,\n-\t\t\t\tpad4 : 16;\n-\t\t};\n-\t\tuint64_t pad3[ACC100_DMA_DESC_PADDING]; /* pad to 64 bits */\n-\t};\n-};\n-\n-/* ACC100 DMA Descriptor */\n-union acc100_dma_desc {\n-\tstruct acc100_dma_req_desc req;\n-\tunion acc100_dma_rsp_desc rsp;\n-\tuint64_t atom_hdr;\n-};\n-\n-\n-/* Union describing Info Ring entry */\n-union acc100_harq_layout_data {\n-\tuint32_t val;\n-\tstruct {\n-\t\tuint16_t offset;\n-\t\tuint16_t size0;\n-\t};\n-} __rte_packed;\n-\n-\n-/* Union describing Info Ring entry */\n-union acc100_info_ring_data {\n-\tuint32_t val;\n-\tstruct {\n-\t\tunion {\n-\t\t\tuint16_t detailed_info;\n-\t\t\tstruct {\n-\t\t\t\tuint16_t aq_id: 4;\n-\t\t\t\tuint16_t qg_id: 4;\n-\t\t\t\tuint16_t vf_id: 6;\n-\t\t\t\tuint16_t reserved: 2;\n-\t\t\t};\n-\t\t};\n-\t\tuint16_t int_nb: 7;\n-\t\tuint16_t msi_0: 1;\n-\t\tuint16_t vf2pf: 6;\n-\t\tuint16_t loop: 1;\n-\t\tuint16_t valid: 1;\n-\t};\n-} __rte_packed;\n-\n struct acc100_registry_addr {\n \tunsigned int dma_ring_dl5g_hi;\n \tunsigned int dma_ring_dl5g_lo;\n@@ -545,80 +174,4 @@ static const struct acc100_registry_addr vf_reg_addr = {\n \t.ddr_range = HWVfDmaDdrBaseRangeRoVf,\n };\n \n-/* Structure associated with each queue. */\n-struct __rte_cache_aligned acc100_queue {\n-\tunion acc100_dma_desc *ring_addr;  /* Virtual address of sw ring */\n-\trte_iova_t ring_addr_iova;  /* IOVA address of software ring */\n-\tuint32_t sw_ring_head;  /* software ring head */\n-\tuint32_t sw_ring_tail;  /* software ring tail */\n-\t/* software ring size (descriptors, not bytes) */\n-\tuint32_t sw_ring_depth;\n-\t/* mask used to wrap enqueued descriptors on the sw ring */\n-\tuint32_t sw_ring_wrap_mask;\n-\t/* MMIO register used to enqueue descriptors */\n-\tvoid *mmio_reg_enqueue;\n-\tuint8_t vf_id;  /* VF ID (max = 63) */\n-\tuint8_t qgrp_id;  /* Queue Group ID */\n-\tuint16_t aq_id;  /* Atomic Queue ID */\n-\tuint16_t aq_depth;  /* Depth of atomic queue */\n-\tuint32_t aq_enqueued;  /* Count how many \"batches\" have been enqueued */\n-\tuint32_t aq_dequeued;  /* Count how many \"batches\" have been dequeued */\n-\tuint32_t irq_enable;  /* Enable ops dequeue interrupts if set to 1 */\n-\tstruct rte_mempool *fcw_mempool;  /* FCW mempool */\n-\tenum rte_bbdev_op_type op_type;  /* Type of this Queue: TE or TD */\n-\t/* Internal Buffers for loopback input */\n-\tuint8_t *lb_in;\n-\tuint8_t *lb_out;\n-\trte_iova_t lb_in_addr_iova;\n-\trte_iova_t lb_out_addr_iova;\n-\tstruct acc100_device *d;\n-};\n-\n-typedef void (*acc10x_fcw_ld_fill_fun_t)(struct rte_bbdev_dec_op *op,\n-\t\tstruct acc100_fcw_ld *fcw,\n-\t\tunion acc100_harq_layout_data *harq_layout);\n-\n-/* Private data structure for each ACC100 device */\n-struct acc100_device {\n-\tvoid *mmio_base;  /**< Base address of MMIO registers (BAR0) */\n-\tvoid *sw_rings_base;  /* Base addr of un-aligned memory for sw rings */\n-\tvoid *sw_rings;  /* 64MBs of 64MB aligned memory for sw rings */\n-\trte_iova_t sw_rings_iova;  /* IOVA address of sw_rings */\n-\t/* Virtual address of the info memory routed to the this function under\n-\t * operation, whether it is PF or VF.\n-\t * HW may DMA information data at this location asynchronously\n-\t */\n-\tunion acc100_info_ring_data *info_ring;\n-\n-\tunion acc100_harq_layout_data *harq_layout;\n-\t/* Virtual Info Ring head */\n-\tuint16_t info_ring_head;\n-\t/* Number of bytes available for each queue in device, depending on\n-\t * how many queues are enabled with configure()\n-\t */\n-\tuint32_t sw_ring_size;\n-\tuint32_t ddr_size; /* Size in kB */\n-\tuint32_t *tail_ptrs; /* Base address of response tail pointer buffer */\n-\trte_iova_t tail_ptr_iova; /* IOVA address of tail pointers */\n-\t/* Max number of entries available for each queue in device, depending\n-\t * on how many queues are enabled with configure()\n-\t */\n-\tuint32_t sw_ring_max_depth;\n-\tstruct rte_acc100_conf acc100_conf; /* ACC100 Initial configuration */\n-\t/* Bitmap capturing which Queues have already been assigned */\n-\tuint16_t q_assigned_bit_map[ACC100_NUM_QGRPS];\n-\tbool pf_device; /**< True if this is a PF ACC100 device */\n-\tbool configured; /**< True if this ACC100 device is configured */\n-\tuint16_t device_variant;  /**< Device variant */\n-\tacc10x_fcw_ld_fill_fun_t fcw_ld_fill;  /**< 5GUL FCW generation function */\n-};\n-\n-/**\n- * Structure with details about RTE_BBDEV_EVENT_DEQUEUE event. It's passed to\n- * the callback function.\n- */\n-struct acc100_deq_intr_details {\n-\tuint16_t queue_id;\n-};\n-\n #endif /* _RTE_ACC100_PMD_H_ */\ndiff --git a/drivers/baseband/acc100/acc101_pmd.h b/drivers/baseband/acc100/acc101_pmd.h\nindex 9d8862ce36..37df008330 100644\n--- a/drivers/baseband/acc100/acc101_pmd.h\n+++ b/drivers/baseband/acc100/acc101_pmd.h\n@@ -11,16 +11,9 @@\n #define ACC101_NUM_VFS                  16\n #define ACC101_NUM_QGRPS                8\n #define ACC101_NUM_AQS                  16\n-/* All ACC101 Registers alignment are 32bits = 4B */\n-#define ACC101_BYTES_IN_WORD                 4\n \n-#define ACC101_TMPL_PRI_0      0x03020100\n-#define ACC101_TMPL_PRI_1      0x07060504\n-#define ACC101_TMPL_PRI_2      0x0b0a0908\n-#define ACC101_TMPL_PRI_3      0x0f0e0d0c\n #define ACC101_WORDS_IN_ARAM_SIZE (128 * 1024 / 4)\n \n-#define ACC101_NUM_TMPL       32\n /* Mapping of signals for the available engines */\n #define ACC101_SIG_UL_5G      0\n #define ACC101_SIG_UL_5G_LAST 8\n@@ -31,7 +24,6 @@\n #define ACC101_SIG_DL_4G      27\n #define ACC101_SIG_DL_4G_LAST 31\n #define ACC101_NUM_ACCS       5\n-#define ACC101_PF_VAL         2\n \n /* ACC101 Configuration */\n #define ACC101_CFG_DMA_ERROR    0x3D7\n@@ -39,8 +31,6 @@\n #define ACC101_CFG_QMGR_HI_P    0x0F0F\n #define ACC101_CFG_PCI_AXI      0xC003\n #define ACC101_CFG_PCI_BRIDGE   0x40006033\n-#define ACC101_ENGINE_OFFSET    0x1000\n-#define ACC101_LONG_WAIT        1000\n #define ACC101_GPEX_AXIMAP_NUM  17\n #define ACC101_CLOCK_GATING_EN  0x30000\n #define ACC101_DMA_INBOUND      0x104\ndiff --git a/drivers/baseband/acc100/acc_common.h b/drivers/baseband/acc100/acc_common.h\nnew file mode 100644\nindex 0000000000..ae8de9ed89\n--- /dev/null\n+++ b/drivers/baseband/acc100/acc_common.h\n@@ -0,0 +1,1303 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2022 Intel Corporation\n+ */\n+\n+#ifndef _ACC_COMMON_H_\n+#define _ACC_COMMON_H_\n+\n+#include \"rte_acc_common_cfg.h\"\n+\n+/* Values used in filling in descriptors */\n+#define ACC_DMA_DESC_TYPE           2\n+#define ACC_DMA_BLKID_FCW           1\n+#define ACC_DMA_BLKID_IN            2\n+#define ACC_DMA_BLKID_OUT_ENC       1\n+#define ACC_DMA_BLKID_OUT_HARD      1\n+#define ACC_DMA_BLKID_OUT_SOFT      2\n+#define ACC_DMA_BLKID_OUT_HARQ      3\n+#define ACC_DMA_BLKID_IN_HARQ       3\n+#define ACC_DMA_BLKID_IN_MLD_R      3\n+\n+/* Values used in filling in decode FCWs */\n+#define ACC_FCW_TD_VER              1\n+#define ACC_FCW_TD_EXT_COLD_REG_EN  1\n+#define ACC_FCW_TD_AUTOMAP          0x0f\n+#define ACC_FCW_TD_RVIDX_0          2\n+#define ACC_FCW_TD_RVIDX_1          26\n+#define ACC_FCW_TD_RVIDX_2          50\n+#define ACC_FCW_TD_RVIDX_3          74\n+\n+#define ACC_SIZE_64MBYTE            (64*1024*1024)\n+/* Number of elements in an Info Ring */\n+#define ACC_INFO_RING_NUM_ENTRIES   1024\n+/* Number of elements in HARQ layout memory\n+ * 128M x 32kB = 4GB addressable memory\n+ */\n+#define ACC_HARQ_LAYOUT             (128 * 1024 * 1024)\n+/* Assume offset for HARQ in memory */\n+#define ACC_HARQ_OFFSET             (32 * 1024)\n+#define ACC_HARQ_OFFSET_SHIFT       15\n+#define ACC_HARQ_OFFSET_MASK        0x7ffffff\n+#define ACC_HARQ_OFFSET_THRESHOLD   1024\n+/* Mask used to calculate an index in an Info Ring array (not a byte offset) */\n+#define ACC_INFO_RING_MASK          (ACC_INFO_RING_NUM_ENTRIES-1)\n+\n+#define MAX_ENQ_BATCH_SIZE              255\n+\n+/* All ACC100 Registers alignment are 32bits = 4B */\n+#define ACC_BYTES_IN_WORD                 4\n+#define ACC_MAX_E_MBUF                64000\n+\n+#define ACC_VF_OFFSET_QOS   16 /* offset in Memory specific to QoS Mon */\n+#define ACC_TMPL_PRI_0      0x03020100\n+#define ACC_TMPL_PRI_1      0x07060504\n+#define ACC_TMPL_PRI_2      0x0b0a0908\n+#define ACC_TMPL_PRI_3      0x0f0e0d0c\n+#define ACC_TMPL_PRI_4      0x13121110\n+#define ACC_TMPL_PRI_5      0x17161514\n+#define ACC_TMPL_PRI_6      0x1b1a1918\n+#define ACC_TMPL_PRI_7      0x1f1e1d1c\n+#define ACC_QUEUE_ENABLE    0x80000000  /* Bit to mark Queue as Enabled */\n+#define ACC_FDONE           0x80000000\n+#define ACC_SDONE           0x40000000\n+\n+#define ACC_NUM_TMPL       32\n+\n+#define ACC_ACCMAP_0       0\n+#define ACC_ACCMAP_1       2\n+#define ACC_ACCMAP_2       1\n+#define ACC_ACCMAP_3       3\n+#define ACC_ACCMAP_4       4\n+#define ACC_ACCMAP_5       5\n+#define ACC_PF_VAL         2\n+\n+/* max number of iterations to allocate memory block for all rings */\n+#define ACC_SW_RING_MEM_ALLOC_ATTEMPTS 5\n+#define ACC_MAX_QUEUE_DEPTH            1024\n+#define ACC_DMA_MAX_NUM_POINTERS       14\n+#define ACC_DMA_MAX_NUM_POINTERS_IN    7\n+#define ACC_DMA_DESC_PADDINGS          8\n+#define ACC_FCW_PADDING                12\n+#define ACC_DESC_FCW_OFFSET            192\n+#define ACC_DESC_SIZE                  256\n+#define ACC_DESC_OFFSET                (ACC_DESC_SIZE / 64)\n+#define ACC_FCW_TE_BLEN                32\n+#define ACC_FCW_TD_BLEN                24\n+#define ACC_FCW_LE_BLEN                32\n+#define ACC_FCW_LD_BLEN                36\n+#define ACC_FCW_FFT_BLEN               28\n+#define ACC_5GUL_SIZE_0                16\n+#define ACC_5GUL_SIZE_1                40\n+#define ACC_5GUL_OFFSET_0              36\n+#define ACC_COMPANION_PTRS             8\n+#define ACC_FCW_VER                    2\n+#define ACC_MUX_5GDL_DESC              6\n+#define ACC_CMP_ENC_SIZE               20\n+#define ACC_CMP_DEC_SIZE               24\n+#define ACC_ENC_OFFSET                (32)\n+#define ACC_DEC_OFFSET                (80)\n+#define ACC_LIMIT_DL_MUX_BITS          534\n+#define ACC_NUM_QGRPS_PER_WORD         8\n+#define ACC_MAX_NUM_QGRPS              32\n+\n+/* Constants from K0 computation from 3GPP 38.212 Table 5.4.2.1-2 */\n+#define ACC_N_ZC_1 66 /* N = 66 Zc for BG 1 */\n+#define ACC_N_ZC_2 50 /* N = 50 Zc for BG 2 */\n+#define ACC_K_ZC_1 22 /* K = 22 Zc for BG 1 */\n+#define ACC_K_ZC_2 10 /* K = 10 Zc for BG 2 */\n+#define ACC_K0_1_1 17 /* K0 fraction numerator for rv 1 and BG 1 */\n+#define ACC_K0_1_2 13 /* K0 fraction numerator for rv 1 and BG 2 */\n+#define ACC_K0_2_1 33 /* K0 fraction numerator for rv 2 and BG 1 */\n+#define ACC_K0_2_2 25 /* K0 fraction numerator for rv 2 and BG 2 */\n+#define ACC_K0_3_1 56 /* K0 fraction numerator for rv 3 and BG 1 */\n+#define ACC_K0_3_2 43 /* K0 fraction numerator for rv 3 and BG 2 */\n+\n+#define ACC_ENGINE_OFFSET    0x1000\n+#define ACC_LONG_WAIT        1000\n+#define ACC_MS_IN_US         (1000)\n+\n+#define ACC_ALGO_SPA                0\n+#define ACC_ALGO_MSA                1\n+\n+/* Helper macro for logging */\n+#define rte_acc_log(level, fmt, ...) \\\n+\trte_log(RTE_LOG_ ## level, RTE_LOG_NOTICE, fmt \"\\n\", \\\n+\t\t##__VA_ARGS__)\n+\n+/* ACC100 DMA Descriptor triplet */\n+struct acc_dma_triplet {\n+\tuint64_t address;\n+\tuint32_t blen:20,\n+\t\tres0:4,\n+\t\tlast:1,\n+\t\tdma_ext:1,\n+\t\tres1:2,\n+\t\tblkid:4;\n+} __rte_packed;\n+\n+\n+/* ACC100 Queue Manager Enqueue PCI Register */\n+union acc_enqueue_reg_fmt {\n+\tuint32_t val;\n+\tstruct {\n+\t\tuint32_t num_elem:8,\n+\t\t\taddr_offset:3,\n+\t\t\trsrvd:1,\n+\t\t\treq_elem_addr:20;\n+\t};\n+};\n+\n+/* FEC 4G Uplink Frame Control Word */\n+struct __rte_packed acc_fcw_td {\n+\tuint8_t fcw_ver:4,\n+\t\tnum_maps:4; /* Unused in ACC100 */\n+\tuint8_t filler:6, /* Unused in ACC100 */\n+\t\trsrvd0:1,\n+\t\tbypass_sb_deint:1;\n+\tuint16_t k_pos;\n+\tuint16_t k_neg; /* Unused in ACC100 */\n+\tuint8_t c_neg; /* Unused in ACC100 */\n+\tuint8_t c; /* Unused in ACC100 */\n+\tuint32_t ea; /* Unused in ACC100 */\n+\tuint32_t eb; /* Unused in ACC100 */\n+\tuint8_t cab; /* Unused in ACC100 */\n+\tuint8_t k0_start_col; /* Unused in ACC100 */\n+\tuint8_t rsrvd1;\n+\tuint8_t code_block_mode:1, /* Unused in ACC100 */\n+\t\tturbo_crc_type:1,\n+\t\trsrvd2:3,\n+\t\tbypass_teq:1, /* Unused in ACC100 */\n+\t\tsoft_output_en:1, /* Unused in ACC100 */\n+\t\text_td_cold_reg_en:1;\n+\tunion { /* External Cold register */\n+\t\tuint32_t ext_td_cold_reg;\n+\t\tstruct {\n+\t\t\tuint32_t min_iter:4, /* Unused in ACC100 */\n+\t\t\t\tmax_iter:4,\n+\t\t\t\text_scale:5, /* Unused in ACC100 */\n+\t\t\t\trsrvd3:3,\n+\t\t\t\tearly_stop_en:1, /* Unused in ACC100 */\n+\t\t\t\tsw_soft_out_dis:1, /* Unused in ACC100 */\n+\t\t\t\tsw_et_cont:1, /* Unused in ACC100 */\n+\t\t\t\tsw_soft_out_saturation:1, /* Unused in ACC100 */\n+\t\t\t\thalf_iter_on:1, /* Unused in ACC100 */\n+\t\t\t\traw_decoder_input_on:1, /* Unused in ACC100 */\n+\t\t\t\trsrvd4:10;\n+\t\t};\n+\t};\n+};\n+\n+/* FEC 4G Downlink Frame Control Word */\n+struct __rte_packed acc_fcw_te {\n+\tuint16_t k_neg;\n+\tuint16_t k_pos;\n+\tuint8_t c_neg;\n+\tuint8_t c;\n+\tuint8_t filler;\n+\tuint8_t cab;\n+\tuint32_t ea:17,\n+\t\trsrvd0:15;\n+\tuint32_t eb:17,\n+\t\trsrvd1:15;\n+\tuint16_t ncb_neg;\n+\tuint16_t ncb_pos;\n+\tuint8_t rv_idx0:2,\n+\t\trsrvd2:2,\n+\t\trv_idx1:2,\n+\t\trsrvd3:2;\n+\tuint8_t bypass_rv_idx0:1,\n+\t\tbypass_rv_idx1:1,\n+\t\tbypass_rm:1,\n+\t\trsrvd4:5;\n+\tuint8_t rsrvd5:1,\n+\t\trsrvd6:3,\n+\t\tcode_block_crc:1,\n+\t\trsrvd7:3;\n+\tuint8_t code_block_mode:1,\n+\t\trsrvd8:7;\n+\tuint64_t rsrvd9;\n+};\n+\n+/* FEC 5GNR Downlink Frame Control Word */\n+struct __rte_packed acc_fcw_le {\n+\tuint32_t FCWversion:4,\n+\t\tqm:4,\n+\t\tnfiller:11,\n+\t\tBG:1,\n+\t\tZc:9,\n+\t\tres0:3;\n+\tuint32_t ncb:16,\n+\t\tk0:16;\n+\tuint32_t rm_e:22,\n+\t\tres1:4,\n+\t\tcrc_select:1,\n+\t\tres2:1,\n+\t\tbypass_intlv:1,\n+\t\tres3:3;\n+\tuint32_t res4_a:12,\n+\t\tmcb_count:3,\n+\t\tres4_b:1,\n+\t\tC:8,\n+\t\tCab:8;\n+\tuint32_t rm_e_b:22,\n+\t\tres5:10;\n+\tuint32_t res6;\n+\tuint32_t res7;\n+\tuint32_t res8;\n+};\n+\n+/* FEC 5GNR Uplink Frame Control Word */\n+struct __rte_packed acc_fcw_ld {\n+\tuint32_t FCWversion:4,\n+\t\tqm:4,\n+\t\tnfiller:11,\n+\t\tBG:1,\n+\t\tZc:9,\n+\t\tcnu_algo:1, /* Not supported in ACC100 */\n+\t\tsynd_precoder:1,\n+\t\tsynd_post:1;\n+\tuint32_t ncb:16,\n+\t\tk0:16;\n+\tuint32_t rm_e:24,\n+\t\thcin_en:1,\n+\t\thcout_en:1,\n+\t\tcrc_select:1,\n+\t\tbypass_dec:1,\n+\t\tbypass_intlv:1,\n+\t\tso_en:1,\n+\t\tso_bypass_rm:1,\n+\t\tso_bypass_intlv:1;\n+\tuint32_t hcin_offset:16,\n+\t\thcin_size0:16;\n+\tuint32_t hcin_size1:16,\n+\t\thcin_decomp_mode:3,\n+\t\tllr_pack_mode:1,\n+\t\thcout_comp_mode:3,\n+\t\tsaturate_input:1, /* Not supported in ACC200 */\n+\t\tdec_convllr:4,\n+\t\thcout_convllr:4;\n+\tuint32_t itmax:7,\n+\t\titstop:1,\n+\t\tso_it:7,\n+\t\tminsum_offset:1,  /* Not supported in ACC200 */\n+\t\thcout_offset:16;\n+\tuint32_t hcout_size0:16,\n+\t\thcout_size1:16;\n+\tuint32_t gain_i:8,\n+\t\tgain_h:8,\n+\t\tnegstop_th:16;\n+\tuint32_t negstop_it:7,\n+\t\tnegstop_en:1,\n+\t\ttb_crc_select:2, /* Not supported in ACC100 */\n+\t\tdec_llrclip:2,  /* Not supported in ACC200 */\n+\t\ttb_trailer_size:20; /* Not supported in ACC100 */\n+};\n+\n+/* FFT Frame Control Word */\n+struct __rte_packed acc_fcw_fft {\n+\tuint32_t in_frame_size:16,\n+\t\tleading_pad_size:16;\n+\tuint32_t out_frame_size:16,\n+\t\tleading_depad_size:16;\n+\tuint32_t cs_window_sel;\n+\tuint32_t cs_window_sel2:16,\n+\t\tcs_enable_bmap:16;\n+\tuint32_t num_antennas:8,\n+\t\tidft_size:8,\n+\t\tdft_size:8,\n+\t\tcs_offset:8;\n+\tuint32_t idft_shift:8,\n+\t\tdft_shift:8,\n+\t\tcs_multiplier:16;\n+\tuint32_t bypass:2,\n+\t\tfp16_in:1, /* Not supported in ACC200 */\n+\t\tfp16_out:1,\n+\t\texp_adj:4,\n+\t\tpower_shift:4,\n+\t\tpower_en:1,\n+\t\tres:19;\n+};\n+\n+/* MLD-TS Frame Control Word */\n+struct __rte_packed acc_fcw_mldts {\n+\tuint32_t fcw_version:4,\n+\t\tres0:12,\n+\t\tnrb:13, /* 1 to 1925 */\n+\t\tres1:3;\n+\tuint32_t NLayers:2, /* 1: 2L... 3: 4L */\n+\t\tres2:14,\n+\t\tQmod0:2, /* 0: 2...3: 8 */\n+\t\tres3_0:2,\n+\t\tQmod1:2,\n+\t\tres3_1:2,\n+\t\tQmod2:2,\n+\t\tres3_2:2,\n+\t\tQmod3:2,\n+\t\tres3_3:2;\n+\tuint32_t Rrep:3, /* 0 to 5 */\n+\t\tres4:1,\n+\t\tCrep:3, /* 0 to 6 */\n+\t\tres5:25;\n+\tuint32_t pad0;\n+\tuint32_t pad1;\n+\tuint32_t pad2;\n+\tuint32_t pad3;\n+\tuint32_t pad4;\n+};\n+\n+/* DMA Response Descriptor */\n+union acc_dma_rsp_desc {\n+\tuint32_t val;\n+\tstruct {\n+\t\tuint32_t crc_status:1,\n+\t\t\tsynd_ok:1,\n+\t\t\tdma_err:1,\n+\t\t\tneg_stop:1,\n+\t\t\tfcw_err:1,\n+\t\t\toutput_truncate:1,\n+\t\t\tinput_err:1,\n+\t\t\ttsen_pagefault:1,\n+\t\t\titerCountFrac:8,\n+\t\t\titer_cnt:8,\n+\t\t\tengine_hung:1,\n+\t\t\tcore_reset:5,\n+\t\t\tsdone:1,\n+\t\t\tfdone:1;\n+\t\tuint32_t add_info_0;\n+\t\tuint32_t add_info_1;\n+\t};\n+};\n+\n+/* DMA Request Descriptor */\n+struct __rte_packed acc_dma_req_desc {\n+\tunion {\n+\t\tstruct{\n+\t\t\tuint32_t type:4,\n+\t\t\t\trsrvd0:26,\n+\t\t\t\tsdone:1,\n+\t\t\t\tfdone:1;\n+\t\t\tuint32_t ib_ant_offset:16, /* Not supported in ACC100 */\n+\t\t\t\tres2:12,\n+\t\t\t\tnum_ant:4;\n+\t\t\tuint32_t ob_ant_offset:16,\n+\t\t\t\tob_cyc_offset:12,\n+\t\t\t\tnum_cs:4;\n+\t\t\tuint32_t pass_param:8,\n+\t\t\t\tsdone_enable:1,\n+\t\t\t\tirq_enable:1,\n+\t\t\t\ttimeStampEn:1,\n+\t\t\t\tdltb:1, /* Not supported in ACC200 */\n+\t\t\t\tres0:4,\n+\t\t\t\tnumCBs:8,\n+\t\t\t\tm2dlen:4,\n+\t\t\t\td2mlen:4;\n+\t\t};\n+\t\tstruct{\n+\t\t\tuint32_t word0;\n+\t\t\tuint32_t word1;\n+\t\t\tuint32_t word2;\n+\t\t\tuint32_t word3;\n+\t\t};\n+\t};\n+\tstruct acc_dma_triplet data_ptrs[ACC_DMA_MAX_NUM_POINTERS];\n+\n+\t/* Virtual addresses used to retrieve SW context info */\n+\tunion {\n+\t\tvoid *op_addr;\n+\t\tuint64_t pad1;  /* pad to 64 bits */\n+\t};\n+\t/*\n+\t * Stores additional information needed for driver processing:\n+\t * - last_desc_in_batch - flag used to mark last descriptor (CB)\n+\t *                        in batch\n+\t * - cbs_in_tb - stores information about total number of Code Blocks\n+\t *               in currently processed Transport Block\n+\t */\n+\tunion {\n+\t\tstruct {\n+\t\t\tunion {\n+\t\t\t\tstruct acc_fcw_ld fcw_ld;\n+\t\t\t\tstruct acc_fcw_td fcw_td;\n+\t\t\t\tstruct acc_fcw_le fcw_le;\n+\t\t\t\tstruct acc_fcw_te fcw_te;\n+\t\t\t\tstruct acc_fcw_fft fcw_fft;\n+\t\t\t\tstruct acc_fcw_mldts fcw_mldts;\n+\t\t\t\tuint32_t pad2[ACC_FCW_PADDING];\n+\t\t\t};\n+\t\t\tuint32_t last_desc_in_batch :8,\n+\t\t\t\tcbs_in_tb:8,\n+\t\t\t\tpad4 : 16;\n+\t\t};\n+\t\tuint64_t pad3[ACC_DMA_DESC_PADDINGS]; /* pad to 64 bits */\n+\t};\n+};\n+\n+/* ACC100 DMA Descriptor */\n+union acc_dma_desc {\n+\tstruct acc_dma_req_desc req;\n+\tunion acc_dma_rsp_desc rsp;\n+\tuint64_t atom_hdr;\n+};\n+\n+/* Union describing Info Ring entry */\n+union acc_info_ring_data {\n+\tuint32_t val;\n+\tstruct {\n+\t\tunion {\n+\t\t\tuint16_t detailed_info;\n+\t\t\tstruct {\n+\t\t\t\tuint16_t aq_id: 4;\n+\t\t\t\tuint16_t qg_id: 4;\n+\t\t\t\tuint16_t vf_id: 6;\n+\t\t\t\tuint16_t reserved: 2;\n+\t\t\t};\n+\t\t};\n+\t\tuint16_t int_nb: 7;\n+\t\tuint16_t msi_0: 1;\n+\t\tuint16_t vf2pf: 6;\n+\t\tuint16_t loop: 1;\n+\t\tuint16_t valid: 1;\n+\t};\n+\tstruct {\n+\t\tuint32_t aq_id_3: 6;\n+\t\tuint32_t qg_id_3: 5;\n+\t\tuint32_t vf_id_3: 6;\n+\t\tuint32_t int_nb_3: 6;\n+\t\tuint32_t msi_0_3: 1;\n+\t\tuint32_t vf2pf_3: 6;\n+\t\tuint32_t loop_3: 1;\n+\t\tuint32_t valid_3: 1;\n+\t};\n+} __rte_packed;\n+\n+struct __rte_packed acc_pad_ptr {\n+\tvoid *op_addr;\n+\tuint64_t pad1;  /* pad to 64 bits */\n+};\n+\n+struct __rte_packed acc_ptrs {\n+\tstruct acc_pad_ptr ptr[ACC_COMPANION_PTRS];\n+};\n+\n+/* Union describing Info Ring entry */\n+union acc_harq_layout_data {\n+\tuint32_t val;\n+\tstruct {\n+\t\tuint16_t offset;\n+\t\tuint16_t size0;\n+\t};\n+} __rte_packed;\n+\n+/**\n+ * Structure with details about RTE_BBDEV_EVENT_DEQUEUE event. It's passed to\n+ * the callback function.\n+ */\n+struct acc_deq_intr_details {\n+\tuint16_t queue_id;\n+};\n+\n+/* TIP VF2PF Comms */\n+enum {\n+\tACC_VF2PF_STATUS_REQUEST = 0,\n+\tACC_VF2PF_USING_VF = 1,\n+};\n+\n+\n+typedef void (*acc10x_fcw_ld_fill_fun_t)(struct rte_bbdev_dec_op *op,\n+\t\tstruct acc_fcw_ld *fcw,\n+\t\tunion acc_harq_layout_data *harq_layout);\n+\n+/* Private data structure for each ACC100 device */\n+struct acc_device {\n+\tvoid *mmio_base;  /**< Base address of MMIO registers (BAR0) */\n+\tvoid *sw_rings_base;  /* Base addr of un-aligned memory for sw rings */\n+\tvoid *sw_rings;  /* 64MBs of 64MB aligned memory for sw rings */\n+\trte_iova_t sw_rings_iova;  /* IOVA address of sw_rings */\n+\t/* Virtual address of the info memory routed to the this function under\n+\t * operation, whether it is PF or VF.\n+\t * HW may DMA information data at this location asynchronously\n+\t */\n+\tunion acc_info_ring_data *info_ring;\n+\n+\tunion acc_harq_layout_data *harq_layout;\n+\t/* Virtual Info Ring head */\n+\tuint16_t info_ring_head;\n+\t/* Number of bytes available for each queue in device, depending on\n+\t * how many queues are enabled with configure()\n+\t */\n+\tuint32_t sw_ring_size;\n+\tuint32_t ddr_size; /* Size in kB */\n+\tuint32_t *tail_ptrs; /* Base address of response tail pointer buffer */\n+\trte_iova_t tail_ptr_iova; /* IOVA address of tail pointers */\n+\t/* Max number of entries available for each queue in device, depending\n+\t * on how many queues are enabled with configure()\n+\t */\n+\tuint32_t sw_ring_max_depth;\n+\tstruct rte_acc_conf acc_conf; /* ACC100 Initial configuration */\n+\t/* Bitmap capturing which Queues have already been assigned */\n+\tuint64_t q_assigned_bit_map[ACC_MAX_NUM_QGRPS];\n+\tbool pf_device; /**< True if this is a PF ACC100 device */\n+\tbool configured; /**< True if this ACC100 device is configured */\n+\tuint16_t device_variant;  /**< Device variant */\n+\tacc10x_fcw_ld_fill_fun_t fcw_ld_fill;  /**< 5GUL FCW generation function */\n+};\n+\n+/* Structure associated with each queue. */\n+struct __rte_cache_aligned acc_queue {\n+\tunion acc_dma_desc *ring_addr;  /* Virtual address of sw ring */\n+\trte_iova_t ring_addr_iova;  /* IOVA address of software ring */\n+\tuint32_t sw_ring_head;  /* software ring head */\n+\tuint32_t sw_ring_tail;  /* software ring tail */\n+\t/* software ring size (descriptors, not bytes) */\n+\tuint32_t sw_ring_depth;\n+\t/* mask used to wrap enqueued descriptors on the sw ring */\n+\tuint32_t sw_ring_wrap_mask;\n+\t/* Virtual address of companion ring */\n+\tstruct acc_ptrs *companion_ring_addr;\n+\t/* MMIO register used to enqueue descriptors */\n+\tvoid *mmio_reg_enqueue;\n+\tuint8_t vf_id;  /* VF ID (max = 63) */\n+\tuint8_t qgrp_id;  /* Queue Group ID */\n+\tuint16_t aq_id;  /* Atomic Queue ID */\n+\tuint16_t aq_depth;  /* Depth of atomic queue */\n+\tuint32_t aq_enqueued;  /* Count how many \"batches\" have been enqueued */\n+\tuint32_t aq_dequeued;  /* Count how many \"batches\" have been dequeued */\n+\tuint32_t irq_enable;  /* Enable ops dequeue interrupts if set to 1 */\n+\tstruct rte_mempool *fcw_mempool;  /* FCW mempool */\n+\tenum rte_bbdev_op_type op_type;  /* Type of this Queue: TE or TD */\n+\t/* Internal Buffers for loopback input */\n+\tuint8_t *lb_in;\n+\tuint8_t *lb_out;\n+\trte_iova_t lb_in_addr_iova;\n+\trte_iova_t lb_out_addr_iova;\n+\tint8_t *derm_buffer; /* interim buffer for de-rm in SDK */\n+\tstruct acc_device *d;\n+};\n+\n+/* Write to MMIO register address */\n+static inline void\n+mmio_write(void *addr, uint32_t value)\n+{\n+\t*((volatile uint32_t *)(addr)) = rte_cpu_to_le_32(value);\n+}\n+\n+/* Write a register of a ACC100 device */\n+static inline void\n+acc_reg_write(struct acc_device *d, uint32_t offset, uint32_t value)\n+{\n+\tvoid *reg_addr = RTE_PTR_ADD(d->mmio_base, offset);\n+\tmmio_write(reg_addr, value);\n+\tusleep(ACC_LONG_WAIT);\n+}\n+\n+/* Read a register of a ACC100 device */\n+static inline uint32_t\n+acc_reg_read(struct acc_device *d, uint32_t offset)\n+{\n+\n+\tvoid *reg_addr = RTE_PTR_ADD(d->mmio_base, offset);\n+\tuint32_t ret = *((volatile uint32_t *)(reg_addr));\n+\treturn rte_le_to_cpu_32(ret);\n+}\n+\n+/* Basic Implementation of Log2 for exact 2^N */\n+static inline uint32_t\n+log2_basic(uint32_t value)\n+{\n+\treturn (value == 0) ? 0 : rte_bsf32(value);\n+}\n+\n+/* Calculate memory alignment offset assuming alignment is 2^N */\n+static inline uint32_t\n+calc_mem_alignment_offset(void *unaligned_virt_mem, uint32_t alignment)\n+{\n+\trte_iova_t unaligned_phy_mem = rte_malloc_virt2iova(unaligned_virt_mem);\n+\treturn (uint32_t)(alignment -\n+\t\t\t(unaligned_phy_mem & (alignment-1)));\n+}\n+\n+static void\n+free_base_addresses(void **base_addrs, int size)\n+{\n+\tint i;\n+\tfor (i = 0; i < size; i++)\n+\t\trte_free(base_addrs[i]);\n+}\n+\n+/* Read flag value 0/1 from bitmap */\n+static inline bool\n+check_bit(uint32_t bitmap, uint32_t bitmask)\n+{\n+\treturn bitmap & bitmask;\n+}\n+\n+static inline char *\n+mbuf_append(struct rte_mbuf *m_head, struct rte_mbuf *m, uint16_t len)\n+{\n+\tif (unlikely(len > rte_pktmbuf_tailroom(m)))\n+\t\treturn NULL;\n+\n+\tchar *tail = (char *)m->buf_addr + m->data_off + m->data_len;\n+\tm->data_len = (uint16_t)(m->data_len + len);\n+\tm_head->pkt_len  = (m_head->pkt_len + len);\n+\treturn tail;\n+}\n+\n+\n+static inline uint32_t\n+get_desc_len(void)\n+{\n+\treturn sizeof(union acc_dma_desc);\n+}\n+\n+/* Allocate the 2 * 64MB block for the sw rings */\n+static inline int\n+alloc_2x64mb_sw_rings_mem(struct rte_bbdev *dev, struct acc_device *d,\n+\t\tint socket)\n+{\n+\tuint32_t sw_ring_size = ACC_SIZE_64MBYTE;\n+\td->sw_rings_base = rte_zmalloc_socket(dev->device->driver->name,\n+\t\t\t2 * sw_ring_size, RTE_CACHE_LINE_SIZE, socket);\n+\tif (d->sw_rings_base == NULL) {\n+\t\trte_acc_log(ERR, \"Failed to allocate memory for %s:%u\",\n+\t\t\t\tdev->device->driver->name,\n+\t\t\t\tdev->data->dev_id);\n+\t\treturn -ENOMEM;\n+\t}\n+\tuint32_t next_64mb_align_offset = calc_mem_alignment_offset(\n+\t\t\td->sw_rings_base, ACC_SIZE_64MBYTE);\n+\td->sw_rings = RTE_PTR_ADD(d->sw_rings_base, next_64mb_align_offset);\n+\td->sw_rings_iova = rte_malloc_virt2iova(d->sw_rings_base) +\n+\t\t\tnext_64mb_align_offset;\n+\td->sw_ring_size = ACC_MAX_QUEUE_DEPTH * get_desc_len();\n+\td->sw_ring_max_depth = ACC_MAX_QUEUE_DEPTH;\n+\n+\treturn 0;\n+}\n+\n+/* Attempt to allocate minimised memory space for sw rings */\n+static inline void\n+alloc_sw_rings_min_mem(struct rte_bbdev *dev, struct acc_device *d,\n+\t\tuint16_t num_queues, int socket)\n+{\n+\trte_iova_t sw_rings_base_iova, next_64mb_align_addr_iova;\n+\tuint32_t next_64mb_align_offset;\n+\trte_iova_t sw_ring_iova_end_addr;\n+\tvoid *base_addrs[ACC_SW_RING_MEM_ALLOC_ATTEMPTS];\n+\tvoid *sw_rings_base;\n+\tint i = 0;\n+\tuint32_t q_sw_ring_size = ACC_MAX_QUEUE_DEPTH * get_desc_len();\n+\tuint32_t dev_sw_ring_size = q_sw_ring_size * num_queues;\n+\t/* Free first in case this is a reconfiguration */\n+\trte_free(d->sw_rings_base);\n+\n+\t/* Find an aligned block of memory to store sw rings */\n+\twhile (i < ACC_SW_RING_MEM_ALLOC_ATTEMPTS) {\n+\t\t/*\n+\t\t * sw_ring allocated memory is guaranteed to be aligned to\n+\t\t * q_sw_ring_size at the condition that the requested size is\n+\t\t * less than the page size\n+\t\t */\n+\t\tsw_rings_base = rte_zmalloc_socket(\n+\t\t\t\tdev->device->driver->name,\n+\t\t\t\tdev_sw_ring_size, q_sw_ring_size, socket);\n+\n+\t\tif (sw_rings_base == NULL) {\n+\t\t\trte_acc_log(ERR,\n+\t\t\t\t\t\"Failed to allocate memory for %s:%u\",\n+\t\t\t\t\tdev->device->driver->name,\n+\t\t\t\t\tdev->data->dev_id);\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tsw_rings_base_iova = rte_malloc_virt2iova(sw_rings_base);\n+\t\tnext_64mb_align_offset = calc_mem_alignment_offset(\n+\t\t\t\tsw_rings_base, ACC_SIZE_64MBYTE);\n+\t\tnext_64mb_align_addr_iova = sw_rings_base_iova +\n+\t\t\t\tnext_64mb_align_offset;\n+\t\tsw_ring_iova_end_addr = sw_rings_base_iova + dev_sw_ring_size;\n+\n+\t\t/* Check if the end of the sw ring memory block is before the\n+\t\t * start of next 64MB aligned mem address\n+\t\t */\n+\t\tif (sw_ring_iova_end_addr < next_64mb_align_addr_iova) {\n+\t\t\td->sw_rings_iova = sw_rings_base_iova;\n+\t\t\td->sw_rings = sw_rings_base;\n+\t\t\td->sw_rings_base = sw_rings_base;\n+\t\t\td->sw_ring_size = q_sw_ring_size;\n+\t\t\td->sw_ring_max_depth = ACC_MAX_QUEUE_DEPTH;\n+\t\t\tbreak;\n+\t\t}\n+\t\t/* Store the address of the unaligned mem block */\n+\t\tbase_addrs[i] = sw_rings_base;\n+\t\ti++;\n+\t}\n+\n+\t/* Free all unaligned blocks of mem allocated in the loop */\n+\tfree_base_addresses(base_addrs, i);\n+}\n+\n+/*\n+ * Find queue_id of a device queue based on details from the Info Ring.\n+ * If a queue isn't found UINT16_MAX is returned.\n+ */\n+static inline uint16_t\n+get_queue_id_from_ring_info(struct rte_bbdev_data *data,\n+\t\tconst union acc_info_ring_data ring_data)\n+{\n+\tuint16_t queue_id;\n+\n+\tfor (queue_id = 0; queue_id < data->num_queues; ++queue_id) {\n+\t\tstruct acc_queue *acc_q =\n+\t\t\t\tdata->queues[queue_id].queue_private;\n+\t\tif (acc_q != NULL && acc_q->aq_id == ring_data.aq_id &&\n+\t\t\t\tacc_q->qgrp_id == ring_data.qg_id &&\n+\t\t\t\tacc_q->vf_id == ring_data.vf_id)\n+\t\t\treturn queue_id;\n+\t}\n+\n+\treturn UINT16_MAX;\n+}\n+\n+/* Fill in a frame control word for turbo encoding. */\n+static inline void\n+acc_fcw_te_fill(const struct rte_bbdev_enc_op *op, struct acc_fcw_te *fcw)\n+{\n+\tfcw->code_block_mode = op->turbo_enc.code_block_mode;\n+\tif (fcw->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {\n+\t\tfcw->k_neg = op->turbo_enc.tb_params.k_neg;\n+\t\tfcw->k_pos = op->turbo_enc.tb_params.k_pos;\n+\t\tfcw->c_neg = op->turbo_enc.tb_params.c_neg;\n+\t\tfcw->c = op->turbo_enc.tb_params.c;\n+\t\tfcw->ncb_neg = op->turbo_enc.tb_params.ncb_neg;\n+\t\tfcw->ncb_pos = op->turbo_enc.tb_params.ncb_pos;\n+\n+\t\tif (check_bit(op->turbo_enc.op_flags,\n+\t\t\t\tRTE_BBDEV_TURBO_RATE_MATCH)) {\n+\t\t\tfcw->bypass_rm = 0;\n+\t\t\tfcw->cab = op->turbo_enc.tb_params.cab;\n+\t\t\tfcw->ea = op->turbo_enc.tb_params.ea;\n+\t\t\tfcw->eb = op->turbo_enc.tb_params.eb;\n+\t\t} else {\n+\t\t\t/* E is set to the encoding output size when RM is\n+\t\t\t * bypassed.\n+\t\t\t */\n+\t\t\tfcw->bypass_rm = 1;\n+\t\t\tfcw->cab = fcw->c_neg;\n+\t\t\tfcw->ea = 3 * fcw->k_neg + 12;\n+\t\t\tfcw->eb = 3 * fcw->k_pos + 12;\n+\t\t}\n+\t} else { /* For CB mode */\n+\t\tfcw->k_pos = op->turbo_enc.cb_params.k;\n+\t\tfcw->ncb_pos = op->turbo_enc.cb_params.ncb;\n+\n+\t\tif (check_bit(op->turbo_enc.op_flags,\n+\t\t\t\tRTE_BBDEV_TURBO_RATE_MATCH)) {\n+\t\t\tfcw->bypass_rm = 0;\n+\t\t\tfcw->eb = op->turbo_enc.cb_params.e;\n+\t\t} else {\n+\t\t\t/* E is set to the encoding output size when RM is\n+\t\t\t * bypassed.\n+\t\t\t */\n+\t\t\tfcw->bypass_rm = 1;\n+\t\t\tfcw->eb = 3 * fcw->k_pos + 12;\n+\t\t}\n+\t}\n+\n+\tfcw->bypass_rv_idx1 = check_bit(op->turbo_enc.op_flags,\n+\t\t\tRTE_BBDEV_TURBO_RV_INDEX_BYPASS);\n+\tfcw->code_block_crc = check_bit(op->turbo_enc.op_flags,\n+\t\t\tRTE_BBDEV_TURBO_CRC_24B_ATTACH);\n+\tfcw->rv_idx1 = op->turbo_enc.rv_index;\n+}\n+\n+/* Compute value of k0.\n+ * Based on 3GPP 38.212 Table 5.4.2.1-2\n+ * Starting position of different redundancy versions, k0\n+ */\n+static inline uint16_t\n+get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index)\n+{\n+\tif (rv_index == 0)\n+\t\treturn 0;\n+\tuint16_t n = (bg == 1 ? ACC_N_ZC_1 : ACC_N_ZC_2) * z_c;\n+\tif (n_cb == n) {\n+\t\tif (rv_index == 1)\n+\t\t\treturn (bg == 1 ? ACC_K0_1_1 : ACC_K0_1_2) * z_c;\n+\t\telse if (rv_index == 2)\n+\t\t\treturn (bg == 1 ? ACC_K0_2_1 : ACC_K0_2_2) * z_c;\n+\t\telse\n+\t\t\treturn (bg == 1 ? ACC_K0_3_1 : ACC_K0_3_2) * z_c;\n+\t}\n+\t/* LBRM case - includes a division by N */\n+\tif (unlikely(z_c == 0))\n+\t\treturn 0;\n+\tif (rv_index == 1)\n+\t\treturn (((bg == 1 ? ACC_K0_1_1 : ACC_K0_1_2) * n_cb)\n+\t\t\t\t/ n) * z_c;\n+\telse if (rv_index == 2)\n+\t\treturn (((bg == 1 ? ACC_K0_2_1 : ACC_K0_2_2) * n_cb)\n+\t\t\t\t/ n) * z_c;\n+\telse\n+\t\treturn (((bg == 1 ? ACC_K0_3_1 : ACC_K0_3_2) * n_cb)\n+\t\t\t\t/ n) * z_c;\n+}\n+\n+/* Fill in a frame control word for LDPC encoding. */\n+static inline void\n+acc_fcw_le_fill(const struct rte_bbdev_enc_op *op,\n+\t\tstruct acc_fcw_le *fcw, int num_cb, uint32_t default_e)\n+{\n+\tfcw->qm = op->ldpc_enc.q_m;\n+\tfcw->nfiller = op->ldpc_enc.n_filler;\n+\tfcw->BG = (op->ldpc_enc.basegraph - 1);\n+\tfcw->Zc = op->ldpc_enc.z_c;\n+\tfcw->ncb = op->ldpc_enc.n_cb;\n+\tfcw->k0 = get_k0(fcw->ncb, fcw->Zc, op->ldpc_enc.basegraph,\n+\t\t\top->ldpc_enc.rv_index);\n+\tfcw->rm_e = (default_e == 0) ? op->ldpc_enc.cb_params.e : default_e;\n+\tfcw->crc_select = check_bit(op->ldpc_enc.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_CRC_24B_ATTACH);\n+\tfcw->bypass_intlv = check_bit(op->ldpc_enc.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_INTERLEAVER_BYPASS);\n+\tfcw->mcb_count = num_cb;\n+}\n+\n+/* Enqueue a number of operations to HW and update software rings */\n+static inline void\n+acc_dma_enqueue(struct acc_queue *q, uint16_t n,\n+\t\tstruct rte_bbdev_stats *queue_stats)\n+{\n+\tunion acc_enqueue_reg_fmt enq_req;\n+#ifdef RTE_BBDEV_OFFLOAD_COST\n+\tuint64_t start_time = 0;\n+\tqueue_stats->acc_offload_cycles = 0;\n+#else\n+\tRTE_SET_USED(queue_stats);\n+#endif\n+\n+\tenq_req.val = 0;\n+\t/* Setting offset, 100b for 256 DMA Desc */\n+\tenq_req.addr_offset = ACC_DESC_OFFSET;\n+\n+\t/* Split ops into batches */\n+\tdo {\n+\t\tunion acc_dma_desc *desc;\n+\t\tuint16_t enq_batch_size;\n+\t\tuint64_t offset;\n+\t\trte_iova_t req_elem_addr;\n+\n+\t\tenq_batch_size = RTE_MIN(n, MAX_ENQ_BATCH_SIZE);\n+\n+\t\t/* Set flag on last descriptor in a batch */\n+\t\tdesc = q->ring_addr + ((q->sw_ring_head + enq_batch_size - 1) &\n+\t\t\t\tq->sw_ring_wrap_mask);\n+\t\tdesc->req.last_desc_in_batch = 1;\n+\n+\t\t/* Calculate the 1st descriptor's address */\n+\t\toffset = ((q->sw_ring_head & q->sw_ring_wrap_mask) *\n+\t\t\t\tsizeof(union acc_dma_desc));\n+\t\treq_elem_addr = q->ring_addr_iova + offset;\n+\n+\t\t/* Fill enqueue struct */\n+\t\tenq_req.num_elem = enq_batch_size;\n+\t\t/* low 6 bits are not needed */\n+\t\tenq_req.req_elem_addr = (uint32_t)(req_elem_addr >> 6);\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\t\trte_memdump(stderr, \"Req sdone\", desc, sizeof(*desc));\n+#endif\n+\t\trte_acc_log(DEBUG, \"Enqueue %u reqs (phys %#\"PRIx64\") to reg %p\",\n+\t\t\t\tenq_batch_size,\n+\t\t\t\treq_elem_addr,\n+\t\t\t\t(void *)q->mmio_reg_enqueue);\n+\n+\t\trte_wmb();\n+\n+#ifdef RTE_BBDEV_OFFLOAD_COST\n+\t\t/* Start time measurement for enqueue function offload. */\n+\t\tstart_time = rte_rdtsc_precise();\n+#endif\n+\t\trte_acc_log(DEBUG, \"Debug : MMIO Enqueue\");\n+\t\tmmio_write(q->mmio_reg_enqueue, enq_req.val);\n+\n+#ifdef RTE_BBDEV_OFFLOAD_COST\n+\t\tqueue_stats->acc_offload_cycles +=\n+\t\t\t\trte_rdtsc_precise() - start_time;\n+#endif\n+\n+\t\tq->aq_enqueued++;\n+\t\tq->sw_ring_head += enq_batch_size;\n+\t\tn -= enq_batch_size;\n+\n+\t} while (n);\n+\n+\n+}\n+\n+/* Convert offset to harq index for harq_layout structure */\n+static inline uint32_t hq_index(uint32_t offset)\n+{\n+\treturn (offset >> ACC_HARQ_OFFSET_SHIFT) & ACC_HARQ_OFFSET_MASK;\n+}\n+\n+/* Calculates number of CBs in processed encoder TB based on 'r' and input\n+ * length.\n+ */\n+static inline uint8_t\n+get_num_cbs_in_tb_enc(struct rte_bbdev_op_turbo_enc *turbo_enc)\n+{\n+\tuint8_t c, c_neg, r, crc24_bits = 0;\n+\tuint16_t k, k_neg, k_pos;\n+\tuint8_t cbs_in_tb = 0;\n+\tint32_t length;\n+\n+\tlength = turbo_enc->input.length;\n+\tr = turbo_enc->tb_params.r;\n+\tc = turbo_enc->tb_params.c;\n+\tc_neg = turbo_enc->tb_params.c_neg;\n+\tk_neg = turbo_enc->tb_params.k_neg;\n+\tk_pos = turbo_enc->tb_params.k_pos;\n+\tcrc24_bits = 0;\n+\tif (check_bit(turbo_enc->op_flags, RTE_BBDEV_TURBO_CRC_24B_ATTACH))\n+\t\tcrc24_bits = 24;\n+\twhile (length > 0 && r < c) {\n+\t\tk = (r < c_neg) ? k_neg : k_pos;\n+\t\tlength -= (k - crc24_bits) >> 3;\n+\t\tr++;\n+\t\tcbs_in_tb++;\n+\t}\n+\n+\treturn cbs_in_tb;\n+}\n+\n+/* Calculates number of CBs in processed decoder TB based on 'r' and input\n+ * length.\n+ */\n+static inline uint16_t\n+get_num_cbs_in_tb_dec(struct rte_bbdev_op_turbo_dec *turbo_dec)\n+{\n+\tuint8_t c, c_neg, r = 0;\n+\tuint16_t kw, k, k_neg, k_pos, cbs_in_tb = 0;\n+\tint32_t length;\n+\n+\tlength = turbo_dec->input.length;\n+\tr = turbo_dec->tb_params.r;\n+\tc = turbo_dec->tb_params.c;\n+\tc_neg = turbo_dec->tb_params.c_neg;\n+\tk_neg = turbo_dec->tb_params.k_neg;\n+\tk_pos = turbo_dec->tb_params.k_pos;\n+\twhile (length > 0 && r < c) {\n+\t\tk = (r < c_neg) ? k_neg : k_pos;\n+\t\tkw = RTE_ALIGN_CEIL(k + 4, 32) * 3;\n+\t\tlength -= kw;\n+\t\tr++;\n+\t\tcbs_in_tb++;\n+\t}\n+\n+\treturn cbs_in_tb;\n+}\n+\n+/* Calculates number of CBs in processed decoder TB based on 'r' and input\n+ * length.\n+ */\n+static inline uint16_t\n+get_num_cbs_in_tb_ldpc_dec(struct rte_bbdev_op_ldpc_dec *ldpc_dec)\n+{\n+\tuint16_t r, cbs_in_tb = 0;\n+\tint32_t length = ldpc_dec->input.length;\n+\tr = ldpc_dec->tb_params.r;\n+\twhile (length > 0 && r < ldpc_dec->tb_params.c) {\n+\t\tlength -=  (r < ldpc_dec->tb_params.cab) ?\n+\t\t\t\tldpc_dec->tb_params.ea :\n+\t\t\t\tldpc_dec->tb_params.eb;\n+\t\tr++;\n+\t\tcbs_in_tb++;\n+\t}\n+\treturn cbs_in_tb;\n+}\n+\n+/* Check we can mux encode operations with common FCW */\n+static inline int16_t\n+check_mux(struct rte_bbdev_enc_op **ops, uint16_t num) {\n+\tuint16_t i;\n+\tif (num <= 1)\n+\t\treturn 1;\n+\tfor (i = 1; i < num; ++i) {\n+\t\t/* Only mux compatible code blocks */\n+\t\tif (memcmp((uint8_t *)(&ops[i]->ldpc_enc) + ACC_ENC_OFFSET,\n+\t\t\t\t(uint8_t *)(&ops[0]->ldpc_enc) +\n+\t\t\t\tACC_ENC_OFFSET,\n+\t\t\t\tACC_CMP_ENC_SIZE) != 0)\n+\t\t\treturn i;\n+\t}\n+\t/* Avoid multiplexing small inbound size frames */\n+\tint Kp = (ops[0]->ldpc_enc.basegraph == 1 ? 22 : 10) *\n+\t\t\tops[0]->ldpc_enc.z_c - ops[0]->ldpc_enc.n_filler;\n+\tif (Kp  <= ACC_LIMIT_DL_MUX_BITS)\n+\t\treturn 1;\n+\treturn num;\n+}\n+\n+/* Check we can mux encode operations with common FCW */\n+static inline bool\n+cmp_ldpc_dec_op(struct rte_bbdev_dec_op **ops) {\n+\t/* Only mux compatible code blocks */\n+\tif (memcmp((uint8_t *)(&ops[0]->ldpc_dec) + ACC_DEC_OFFSET,\n+\t\t\t(uint8_t *)(&ops[1]->ldpc_dec) +\n+\t\t\tACC_DEC_OFFSET, ACC_CMP_DEC_SIZE) != 0) {\n+\t\treturn false;\n+\t} else\n+\t\treturn true;\n+}\n+\n+/**\n+ * Fills descriptor with data pointers of one block type.\n+ *\n+ * @param desc\n+ *   Pointer to DMA descriptor.\n+ * @param input\n+ *   Pointer to pointer to input data which will be encoded. It can be changed\n+ *   and points to next segment in scatter-gather case.\n+ * @param offset\n+ *   Input offset in rte_mbuf structure. It is used for calculating the point\n+ *   where data is starting.\n+ * @param cb_len\n+ *   Length of currently processed Code Block\n+ * @param seg_total_left\n+ *   It indicates how many bytes still left in segment (mbuf) for further\n+ *   processing.\n+ * @param op_flags\n+ *   Store information about device capabilities\n+ * @param next_triplet\n+ *   Index for ACC200 DMA Descriptor triplet\n+ * @param scattergather\n+ *   Flag to support scatter-gather for the mbuf\n+ *\n+ * @return\n+ *   Returns index of next triplet on success, other value if lengths of\n+ *   pkt and processed cb do not match.\n+ *\n+ */\n+static inline int\n+acc_dma_fill_blk_type_in(struct acc_dma_req_desc *desc,\n+\t\tstruct rte_mbuf **input, uint32_t *offset, uint32_t cb_len,\n+\t\tuint32_t *seg_total_left, int next_triplet,\n+\t\tbool scattergather)\n+{\n+\tuint32_t part_len;\n+\tstruct rte_mbuf *m = *input;\n+\tif (scattergather)\n+\t\tpart_len = (*seg_total_left < cb_len) ?\n+\t\t\t\t*seg_total_left : cb_len;\n+\telse\n+\t\tpart_len = cb_len;\n+\tcb_len -= part_len;\n+\t*seg_total_left -= part_len;\n+\n+\tdesc->data_ptrs[next_triplet].address =\n+\t\t\trte_pktmbuf_iova_offset(m, *offset);\n+\tdesc->data_ptrs[next_triplet].blen = part_len;\n+\tdesc->data_ptrs[next_triplet].blkid = ACC_DMA_BLKID_IN;\n+\tdesc->data_ptrs[next_triplet].last = 0;\n+\tdesc->data_ptrs[next_triplet].dma_ext = 0;\n+\t*offset += part_len;\n+\tnext_triplet++;\n+\n+\twhile (cb_len > 0) {\n+\t\tif (next_triplet < ACC_DMA_MAX_NUM_POINTERS_IN && m->next != NULL) {\n+\n+\t\t\tm = m->next;\n+\t\t\t*seg_total_left = rte_pktmbuf_data_len(m);\n+\t\t\tpart_len = (*seg_total_left < cb_len) ?\n+\t\t\t\t\t*seg_total_left :\n+\t\t\t\t\tcb_len;\n+\t\t\tdesc->data_ptrs[next_triplet].address =\n+\t\t\t\t\trte_pktmbuf_iova_offset(m, 0);\n+\t\t\tdesc->data_ptrs[next_triplet].blen = part_len;\n+\t\t\tdesc->data_ptrs[next_triplet].blkid =\n+\t\t\t\t\tACC_DMA_BLKID_IN;\n+\t\t\tdesc->data_ptrs[next_triplet].last = 0;\n+\t\t\tdesc->data_ptrs[next_triplet].dma_ext = 0;\n+\t\t\tcb_len -= part_len;\n+\t\t\t*seg_total_left -= part_len;\n+\t\t\t/* Initializing offset for next segment (mbuf) */\n+\t\t\t*offset = part_len;\n+\t\t\tnext_triplet++;\n+\t\t} else {\n+\t\t\trte_acc_log(ERR,\n+\t\t\t\t\"Some data still left for processing: \"\n+\t\t\t\t\"data_left: %u, next_triplet: %u, next_mbuf: %p\",\n+\t\t\t\tcb_len, next_triplet, m->next);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t}\n+\t/* Storing new mbuf as it could be changed in scatter-gather case*/\n+\t*input = m;\n+\n+\treturn next_triplet;\n+}\n+\n+/* Fills descriptor with data pointers of one block type.\n+ * Returns index of next triplet\n+ */\n+static inline int\n+acc_dma_fill_blk_type(struct acc_dma_req_desc *desc,\n+\t\tstruct rte_mbuf *mbuf, uint32_t offset,\n+\t\tuint32_t len, int next_triplet, int blk_id)\n+{\n+\tdesc->data_ptrs[next_triplet].address =\n+\t\t\trte_pktmbuf_iova_offset(mbuf, offset);\n+\tdesc->data_ptrs[next_triplet].blen = len;\n+\tdesc->data_ptrs[next_triplet].blkid = blk_id;\n+\tdesc->data_ptrs[next_triplet].last = 0;\n+\tdesc->data_ptrs[next_triplet].dma_ext = 0;\n+\tnext_triplet++;\n+\n+\treturn next_triplet;\n+}\n+\n+static inline void\n+acc_header_init(struct acc_dma_req_desc *desc)\n+{\n+\tdesc->word0 = ACC_DMA_DESC_TYPE;\n+\tdesc->word1 = 0; /**< Timestamp could be disabled */\n+\tdesc->word2 = 0;\n+\tdesc->word3 = 0;\n+\tdesc->numCBs = 1;\n+}\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+/* Check if any input data is unexpectedly left for processing */\n+static inline int\n+check_mbuf_total_left(uint32_t mbuf_total_left)\n+{\n+\tif (mbuf_total_left == 0)\n+\t\treturn 0;\n+\trte_acc_log(ERR,\n+\t\t\"Some date still left for processing: mbuf_total_left = %u\",\n+\t\tmbuf_total_left);\n+\treturn -EINVAL;\n+}\n+#endif\n+\n+static inline int\n+acc_dma_desc_te_fill(struct rte_bbdev_enc_op *op,\n+\t\tstruct acc_dma_req_desc *desc, struct rte_mbuf **input,\n+\t\tstruct rte_mbuf *output, uint32_t *in_offset,\n+\t\tuint32_t *out_offset, uint32_t *out_length,\n+\t\tuint32_t *mbuf_total_left, uint32_t *seg_total_left, uint8_t r)\n+{\n+\tint next_triplet = 1; /* FCW already done */\n+\tuint32_t e, ea, eb, length;\n+\tuint16_t k, k_neg, k_pos;\n+\tuint8_t cab, c_neg;\n+\n+\tdesc->word0 = ACC_DMA_DESC_TYPE;\n+\tdesc->word1 = 0; /**< Timestamp could be disabled */\n+\tdesc->word2 = 0;\n+\tdesc->word3 = 0;\n+\tdesc->numCBs = 1;\n+\n+\tif (op->turbo_enc.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {\n+\t\tea = op->turbo_enc.tb_params.ea;\n+\t\teb = op->turbo_enc.tb_params.eb;\n+\t\tcab = op->turbo_enc.tb_params.cab;\n+\t\tk_neg = op->turbo_enc.tb_params.k_neg;\n+\t\tk_pos = op->turbo_enc.tb_params.k_pos;\n+\t\tc_neg = op->turbo_enc.tb_params.c_neg;\n+\t\te = (r < cab) ? ea : eb;\n+\t\tk = (r < c_neg) ? k_neg : k_pos;\n+\t} else {\n+\t\te = op->turbo_enc.cb_params.e;\n+\t\tk = op->turbo_enc.cb_params.k;\n+\t}\n+\n+\tif (check_bit(op->turbo_enc.op_flags, RTE_BBDEV_TURBO_CRC_24B_ATTACH))\n+\t\tlength = (k - 24) >> 3;\n+\telse\n+\t\tlength = k >> 3;\n+\n+\tif (unlikely((*mbuf_total_left == 0) || (*mbuf_total_left < length))) {\n+\t\trte_acc_log(ERR,\n+\t\t\t\t\"Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u\",\n+\t\t\t\t*mbuf_total_left, length);\n+\t\treturn -1;\n+\t}\n+\n+\tnext_triplet = acc_dma_fill_blk_type_in(desc, input, in_offset,\n+\t\t\tlength, seg_total_left, next_triplet,\n+\t\t\tcheck_bit(op->turbo_enc.op_flags,\n+\t\t\tRTE_BBDEV_TURBO_ENC_SCATTER_GATHER));\n+\tif (unlikely(next_triplet < 0)) {\n+\t\trte_acc_log(ERR,\n+\t\t\t\t\"Mismatch between data to process and mbuf data length in bbdev_op: %p\",\n+\t\t\t\top);\n+\t\treturn -1;\n+\t}\n+\tdesc->data_ptrs[next_triplet - 1].last = 1;\n+\tdesc->m2dlen = next_triplet;\n+\t*mbuf_total_left -= length;\n+\n+\t/* Set output length */\n+\tif (check_bit(op->turbo_enc.op_flags, RTE_BBDEV_TURBO_RATE_MATCH))\n+\t\t/* Integer round up division by 8 */\n+\t\t*out_length = (e + 7) >> 3;\n+\telse\n+\t\t*out_length = (k >> 3) * 3 + 2;\n+\n+\tnext_triplet = acc_dma_fill_blk_type(desc, output, *out_offset,\n+\t\t\t*out_length, next_triplet, ACC_DMA_BLKID_OUT_ENC);\n+\tif (unlikely(next_triplet < 0)) {\n+\t\trte_acc_log(ERR,\n+\t\t\t\t\"Mismatch between data to process and mbuf data length in bbdev_op: %p\",\n+\t\t\t\top);\n+\t\treturn -1;\n+\t}\n+\top->turbo_enc.output.length += *out_length;\n+\t*out_offset += *out_length;\n+\tdesc->data_ptrs[next_triplet - 1].last = 1;\n+\tdesc->d2mlen = next_triplet - desc->m2dlen;\n+\n+\tdesc->op_addr = op;\n+\n+\treturn 0;\n+}\n+\n+static inline int\n+acc_pci_remove(struct rte_pci_device *pci_dev)\n+{\n+\tstruct rte_bbdev *bbdev;\n+\tint ret;\n+\tuint8_t dev_id;\n+\n+\tif (pci_dev == NULL)\n+\t\treturn -EINVAL;\n+\n+\t/* Find device */\n+\tbbdev = rte_bbdev_get_named_dev(pci_dev->device.name);\n+\tif (bbdev == NULL) {\n+\t\trte_acc_log(CRIT,\n+\t\t\t\t\"Couldn't find HW dev \\\"%s\\\" to uninitialise it\",\n+\t\t\t\tpci_dev->device.name);\n+\t\treturn -ENODEV;\n+\t}\n+\tdev_id = bbdev->data->dev_id;\n+\n+\t/* free device private memory before close */\n+\trte_free(bbdev->data->dev_private);\n+\n+\t/* Close device */\n+\tret = rte_bbdev_close(dev_id);\n+\tif (ret < 0)\n+\t\trte_acc_log(ERR,\n+\t\t\t\t\"Device %i failed to close during uninit: %i\",\n+\t\t\t\tdev_id, ret);\n+\n+\t/* release bbdev from library */\n+\trte_bbdev_release(bbdev);\n+\n+\treturn 0;\n+}\n+\n+#endif /* _ACC_COMMON_H_ */\ndiff --git a/drivers/baseband/acc100/rte_acc100_cfg.h b/drivers/baseband/acc100/rte_acc100_cfg.h\nindex b70803d96d..732c03be24 100644\n--- a/drivers/baseband/acc100/rte_acc100_cfg.h\n+++ b/drivers/baseband/acc100/rte_acc100_cfg.h\n@@ -1,5 +1,5 @@\n /* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright(c) 2020 Intel Corporation\n+ * Copyright(c) 2022 Intel Corporation\n  */\n \n #ifndef _RTE_ACC100_CFG_H_\n@@ -18,76 +18,12 @@\n \n #include <stdint.h>\n #include <stdbool.h>\n+#include \"rte_acc_common_cfg.h\"\n \n #ifdef __cplusplus\n extern \"C\" {\n #endif\n-/**< Number of Virtual Functions ACC100 supports */\n-#define RTE_ACC100_NUM_VFS 16\n \n-/**\n- * Definition of Queue Topology for ACC100 Configuration\n- * Some level of details is abstracted out to expose a clean interface\n- * given that comprehensive flexibility is not required\n- */\n-struct rte_acc100_queue_topology {\n-\t/** Number of QGroups in incremental order of priority */\n-\tuint16_t num_qgroups;\n-\t/**\n-\t * All QGroups have the same number of AQs here.\n-\t * Note : Could be made a 16-array if more flexibility is really\n-\t * required\n-\t */\n-\tuint16_t num_aqs_per_groups;\n-\t/**\n-\t * Depth of the AQs is the same of all QGroups here. Log2 Enum : 2^N\n-\t * Note : Could be made a 16-array if more flexibility is really\n-\t * required\n-\t */\n-\tuint16_t aq_depth_log2;\n-\t/**\n-\t * Index of the first Queue Group Index - assuming contiguity\n-\t * Initialized as -1\n-\t */\n-\tint8_t first_qgroup_index;\n-};\n-\n-/**\n- * Definition of Arbitration related parameters for ACC100 Configuration\n- */\n-struct rte_acc100_arbitration {\n-\t/** Default Weight for VF Fairness Arbitration */\n-\tuint16_t round_robin_weight;\n-\tuint32_t gbr_threshold1; /**< Guaranteed Bitrate Threshold 1 */\n-\tuint32_t gbr_threshold2; /**< Guaranteed Bitrate Threshold 2 */\n-};\n-\n-/**\n- * Structure to pass ACC100 configuration.\n- * Note: all VF Bundles will have the same configuration.\n- */\n-struct rte_acc100_conf {\n-\tbool pf_mode_en; /**< 1 if PF is used for dataplane, 0 for VFs */\n-\t/** 1 if input '1' bit is represented by a positive LLR value, 0 if '1'\n-\t * bit is represented by a negative value.\n-\t */\n-\tbool input_pos_llr_1_bit;\n-\t/** 1 if output '1' bit is represented by a positive value, 0 if '1'\n-\t * bit is represented by a negative value.\n-\t */\n-\tbool output_pos_llr_1_bit;\n-\tuint16_t num_vf_bundles; /**< Number of VF bundles to setup */\n-\t/** Queue topology for each operation type */\n-\tstruct rte_acc100_queue_topology q_ul_4g;\n-\tstruct rte_acc100_queue_topology q_dl_4g;\n-\tstruct rte_acc100_queue_topology q_ul_5g;\n-\tstruct rte_acc100_queue_topology q_dl_5g;\n-\t/** Arbitration configuration for each operation type */\n-\tstruct rte_acc100_arbitration arb_ul_4g[RTE_ACC100_NUM_VFS];\n-\tstruct rte_acc100_arbitration arb_dl_4g[RTE_ACC100_NUM_VFS];\n-\tstruct rte_acc100_arbitration arb_ul_5g[RTE_ACC100_NUM_VFS];\n-\tstruct rte_acc100_arbitration arb_dl_5g[RTE_ACC100_NUM_VFS];\n-};\n \n /**\n  * Configure a ACC100/ACC101 device in PF mode notably for bbdev-test\n@@ -104,7 +40,7 @@ struct rte_acc100_conf {\n  */\n __rte_experimental\n int\n-rte_acc10x_configure(const char *dev_name, struct rte_acc100_conf *conf);\n+rte_acc10x_configure(const char *dev_name, struct rte_acc_conf *conf);\n \n #ifdef __cplusplus\n }\ndiff --git a/drivers/baseband/acc100/rte_acc100_pmd.c b/drivers/baseband/acc100/rte_acc100_pmd.c\nindex 18ec04a7b8..e84d9f2511 100644\n--- a/drivers/baseband/acc100/rte_acc100_pmd.c\n+++ b/drivers/baseband/acc100/rte_acc100_pmd.c\n@@ -30,48 +30,6 @@ RTE_LOG_REGISTER_DEFAULT(acc100_logtype, DEBUG);\n RTE_LOG_REGISTER_DEFAULT(acc100_logtype, NOTICE);\n #endif\n \n-/* Write to MMIO register address */\n-static inline void\n-mmio_write(void *addr, uint32_t value)\n-{\n-\t*((volatile uint32_t *)(addr)) = rte_cpu_to_le_32(value);\n-}\n-\n-/* Write a register of a ACC100 device */\n-static inline void\n-acc100_reg_write(struct acc100_device *d, uint32_t offset, uint32_t value)\n-{\n-\tvoid *reg_addr = RTE_PTR_ADD(d->mmio_base, offset);\n-\tmmio_write(reg_addr, value);\n-\tusleep(ACC100_LONG_WAIT);\n-}\n-\n-/* Read a register of a ACC100 device */\n-static inline uint32_t\n-acc100_reg_read(struct acc100_device *d, uint32_t offset)\n-{\n-\n-\tvoid *reg_addr = RTE_PTR_ADD(d->mmio_base, offset);\n-\tuint32_t ret = *((volatile uint32_t *)(reg_addr));\n-\treturn rte_le_to_cpu_32(ret);\n-}\n-\n-/* Basic Implementation of Log2 for exact 2^N */\n-static inline uint32_t\n-log2_basic(uint32_t value)\n-{\n-\treturn (value == 0) ? 0 : rte_bsf32(value);\n-}\n-\n-/* Calculate memory alignment offset assuming alignment is 2^N */\n-static inline uint32_t\n-calc_mem_alignment_offset(void *unaligned_virt_mem, uint32_t alignment)\n-{\n-\trte_iova_t unaligned_phy_mem = rte_malloc_virt2iova(unaligned_virt_mem);\n-\treturn (uint32_t)(alignment -\n-\t\t\t(unaligned_phy_mem & (alignment-1)));\n-}\n-\n /* Calculate the offset of the enqueue register */\n static inline uint32_t\n queue_offset(bool pf_device, uint8_t vf_id, uint8_t qgrp_id, uint16_t aq_id)\n@@ -88,17 +46,17 @@ enum {UL_4G = 0, UL_5G, DL_4G, DL_5G, NUM_ACC};\n \n /* Return the accelerator enum for a Queue Group Index */\n static inline int\n-accFromQgid(int qg_idx, const struct rte_acc100_conf *acc100_conf)\n+accFromQgid(int qg_idx, const struct rte_acc_conf *acc_conf)\n {\n \tint accQg[ACC100_NUM_QGRPS];\n \tint NumQGroupsPerFn[NUM_ACC];\n \tint acc, qgIdx, qgIndex = 0;\n \tfor (qgIdx = 0; qgIdx < ACC100_NUM_QGRPS; qgIdx++)\n \t\taccQg[qgIdx] = 0;\n-\tNumQGroupsPerFn[UL_4G] = acc100_conf->q_ul_4g.num_qgroups;\n-\tNumQGroupsPerFn[UL_5G] = acc100_conf->q_ul_5g.num_qgroups;\n-\tNumQGroupsPerFn[DL_4G] = acc100_conf->q_dl_4g.num_qgroups;\n-\tNumQGroupsPerFn[DL_5G] = acc100_conf->q_dl_5g.num_qgroups;\n+\tNumQGroupsPerFn[UL_4G] = acc_conf->q_ul_4g.num_qgroups;\n+\tNumQGroupsPerFn[UL_5G] = acc_conf->q_ul_5g.num_qgroups;\n+\tNumQGroupsPerFn[DL_4G] = acc_conf->q_dl_4g.num_qgroups;\n+\tNumQGroupsPerFn[DL_5G] = acc_conf->q_dl_5g.num_qgroups;\n \tfor (acc = UL_4G;  acc < NUM_ACC; acc++)\n \t\tfor (qgIdx = 0; qgIdx < NumQGroupsPerFn[acc]; qgIdx++)\n \t\t\taccQg[qgIndex++] = acc;\n@@ -108,23 +66,23 @@ accFromQgid(int qg_idx, const struct rte_acc100_conf *acc100_conf)\n \n /* Return the queue topology for a Queue Group Index */\n static inline void\n-qtopFromAcc(struct rte_acc100_queue_topology **qtop, int acc_enum,\n-\t\tstruct rte_acc100_conf *acc100_conf)\n+qtopFromAcc(struct rte_acc_queue_topology **qtop, int acc_enum,\n+\t\tstruct rte_acc_conf *acc_conf)\n {\n-\tstruct rte_acc100_queue_topology *p_qtop;\n+\tstruct rte_acc_queue_topology *p_qtop;\n \tp_qtop = NULL;\n \tswitch (acc_enum) {\n \tcase UL_4G:\n-\t\tp_qtop = &(acc100_conf->q_ul_4g);\n+\t\tp_qtop = &(acc_conf->q_ul_4g);\n \t\tbreak;\n \tcase UL_5G:\n-\t\tp_qtop = &(acc100_conf->q_ul_5g);\n+\t\tp_qtop = &(acc_conf->q_ul_5g);\n \t\tbreak;\n \tcase DL_4G:\n-\t\tp_qtop = &(acc100_conf->q_dl_4g);\n+\t\tp_qtop = &(acc_conf->q_dl_4g);\n \t\tbreak;\n \tcase DL_5G:\n-\t\tp_qtop = &(acc100_conf->q_dl_5g);\n+\t\tp_qtop = &(acc_conf->q_dl_5g);\n \t\tbreak;\n \tdefault:\n \t\t/* NOTREACHED */\n@@ -136,11 +94,11 @@ qtopFromAcc(struct rte_acc100_queue_topology **qtop, int acc_enum,\n \n /* Return the AQ depth for a Queue Group Index */\n static inline int\n-aqDepth(int qg_idx, struct rte_acc100_conf *acc100_conf)\n+aqDepth(int qg_idx, struct rte_acc_conf *acc_conf)\n {\n-\tstruct rte_acc100_queue_topology *q_top = NULL;\n-\tint acc_enum = accFromQgid(qg_idx, acc100_conf);\n-\tqtopFromAcc(&q_top, acc_enum, acc100_conf);\n+\tstruct rte_acc_queue_topology *q_top = NULL;\n+\tint acc_enum = accFromQgid(qg_idx, acc_conf);\n+\tqtopFromAcc(&q_top, acc_enum, acc_conf);\n \tif (unlikely(q_top == NULL))\n \t\treturn 1;\n \treturn RTE_MAX(1, q_top->aq_depth_log2);\n@@ -148,39 +106,39 @@ aqDepth(int qg_idx, struct rte_acc100_conf *acc100_conf)\n \n /* Return the AQ depth for a Queue Group Index */\n static inline int\n-aqNum(int qg_idx, struct rte_acc100_conf *acc100_conf)\n+aqNum(int qg_idx, struct rte_acc_conf *acc_conf)\n {\n-\tstruct rte_acc100_queue_topology *q_top = NULL;\n-\tint acc_enum = accFromQgid(qg_idx, acc100_conf);\n-\tqtopFromAcc(&q_top, acc_enum, acc100_conf);\n+\tstruct rte_acc_queue_topology *q_top = NULL;\n+\tint acc_enum = accFromQgid(qg_idx, acc_conf);\n+\tqtopFromAcc(&q_top, acc_enum, acc_conf);\n \tif (unlikely(q_top == NULL))\n \t\treturn 0;\n \treturn q_top->num_aqs_per_groups;\n }\n \n static void\n-initQTop(struct rte_acc100_conf *acc100_conf)\n+initQTop(struct rte_acc_conf *acc_conf)\n {\n-\tacc100_conf->q_ul_4g.num_aqs_per_groups = 0;\n-\tacc100_conf->q_ul_4g.num_qgroups = 0;\n-\tacc100_conf->q_ul_4g.first_qgroup_index = -1;\n-\tacc100_conf->q_ul_5g.num_aqs_per_groups = 0;\n-\tacc100_conf->q_ul_5g.num_qgroups = 0;\n-\tacc100_conf->q_ul_5g.first_qgroup_index = -1;\n-\tacc100_conf->q_dl_4g.num_aqs_per_groups = 0;\n-\tacc100_conf->q_dl_4g.num_qgroups = 0;\n-\tacc100_conf->q_dl_4g.first_qgroup_index = -1;\n-\tacc100_conf->q_dl_5g.num_aqs_per_groups = 0;\n-\tacc100_conf->q_dl_5g.num_qgroups = 0;\n-\tacc100_conf->q_dl_5g.first_qgroup_index = -1;\n+\tacc_conf->q_ul_4g.num_aqs_per_groups = 0;\n+\tacc_conf->q_ul_4g.num_qgroups = 0;\n+\tacc_conf->q_ul_4g.first_qgroup_index = -1;\n+\tacc_conf->q_ul_5g.num_aqs_per_groups = 0;\n+\tacc_conf->q_ul_5g.num_qgroups = 0;\n+\tacc_conf->q_ul_5g.first_qgroup_index = -1;\n+\tacc_conf->q_dl_4g.num_aqs_per_groups = 0;\n+\tacc_conf->q_dl_4g.num_qgroups = 0;\n+\tacc_conf->q_dl_4g.first_qgroup_index = -1;\n+\tacc_conf->q_dl_5g.num_aqs_per_groups = 0;\n+\tacc_conf->q_dl_5g.num_qgroups = 0;\n+\tacc_conf->q_dl_5g.first_qgroup_index = -1;\n }\n \n static inline void\n-updateQtop(uint8_t acc, uint8_t qg, struct rte_acc100_conf *acc100_conf,\n-\t\tstruct acc100_device *d) {\n+updateQtop(uint8_t acc, uint8_t qg, struct rte_acc_conf *acc_conf,\n+\t\tstruct acc_device *d) {\n \tuint32_t reg;\n-\tstruct rte_acc100_queue_topology *q_top = NULL;\n-\tqtopFromAcc(&q_top, acc, acc100_conf);\n+\tstruct rte_acc_queue_topology *q_top = NULL;\n+\tqtopFromAcc(&q_top, acc, acc_conf);\n \tif (unlikely(q_top == NULL))\n \t\treturn;\n \tuint16_t aq;\n@@ -188,17 +146,17 @@ updateQtop(uint8_t acc, uint8_t qg, struct rte_acc100_conf *acc100_conf,\n \tif (q_top->first_qgroup_index == -1) {\n \t\tq_top->first_qgroup_index = qg;\n \t\t/* Can be optimized to assume all are enabled by default */\n-\t\treg = acc100_reg_read(d, queue_offset(d->pf_device,\n+\t\treg = acc_reg_read(d, queue_offset(d->pf_device,\n \t\t\t\t0, qg, ACC100_NUM_AQS - 1));\n-\t\tif (reg & ACC100_QUEUE_ENABLE) {\n+\t\tif (reg & ACC_QUEUE_ENABLE) {\n \t\t\tq_top->num_aqs_per_groups = ACC100_NUM_AQS;\n \t\t\treturn;\n \t\t}\n \t\tq_top->num_aqs_per_groups = 0;\n \t\tfor (aq = 0; aq < ACC100_NUM_AQS; aq++) {\n-\t\t\treg = acc100_reg_read(d, queue_offset(d->pf_device,\n+\t\t\treg = acc_reg_read(d, queue_offset(d->pf_device,\n \t\t\t\t\t0, qg, aq));\n-\t\t\tif (reg & ACC100_QUEUE_ENABLE)\n+\t\t\tif (reg & ACC_QUEUE_ENABLE)\n \t\t\t\tq_top->num_aqs_per_groups++;\n \t\t}\n \t}\n@@ -208,8 +166,8 @@ updateQtop(uint8_t acc, uint8_t qg, struct rte_acc100_conf *acc100_conf,\n static inline void\n fetch_acc100_config(struct rte_bbdev *dev)\n {\n-\tstruct acc100_device *d = dev->data->dev_private;\n-\tstruct rte_acc100_conf *acc100_conf = &d->acc100_conf;\n+\tstruct acc_device *d = dev->data->dev_private;\n+\tstruct rte_acc_conf *acc_conf = &d->acc_conf;\n \tconst struct acc100_registry_addr *reg_addr;\n \tuint8_t acc, qg;\n \tuint32_t reg, reg_aq, reg_len0, reg_len1;\n@@ -225,201 +183,80 @@ fetch_acc100_config(struct rte_bbdev *dev)\n \telse\n \t\treg_addr = &vf_reg_addr;\n \n-\td->ddr_size = (1 + acc100_reg_read(d, reg_addr->ddr_range)) << 10;\n+\td->ddr_size = (1 + acc_reg_read(d, reg_addr->ddr_range)) << 10;\n \n \t/* Single VF Bundle by VF */\n-\tacc100_conf->num_vf_bundles = 1;\n-\tinitQTop(acc100_conf);\n-\n-\tstruct rte_acc100_queue_topology *q_top = NULL;\n-\tint qman_func_id[ACC100_NUM_ACCS] = {ACC100_ACCMAP_0, ACC100_ACCMAP_1,\n-\t\t\tACC100_ACCMAP_2, ACC100_ACCMAP_3, ACC100_ACCMAP_4};\n-\treg = acc100_reg_read(d, reg_addr->qman_group_func);\n-\tfor (qg = 0; qg < ACC100_NUM_QGRPS_PER_WORD; qg++) {\n-\t\treg_aq = acc100_reg_read(d,\n+\tacc_conf->num_vf_bundles = 1;\n+\tinitQTop(acc_conf);\n+\n+\tstruct rte_acc_queue_topology *q_top = NULL;\n+\tint qman_func_id[ACC100_NUM_ACCS] = {ACC_ACCMAP_0, ACC_ACCMAP_1,\n+\t\t\tACC_ACCMAP_2, ACC_ACCMAP_3, ACC_ACCMAP_4};\n+\treg = acc_reg_read(d, reg_addr->qman_group_func);\n+\tfor (qg = 0; qg < ACC_NUM_QGRPS_PER_WORD; qg++) {\n+\t\treg_aq = acc_reg_read(d,\n \t\t\t\tqueue_offset(d->pf_device, 0, qg, 0));\n-\t\tif (reg_aq & ACC100_QUEUE_ENABLE) {\n+\t\tif (reg_aq & ACC_QUEUE_ENABLE) {\n \t\t\tuint32_t idx = (reg >> (qg * 4)) & 0x7;\n \t\t\tif (idx < ACC100_NUM_ACCS) {\n \t\t\t\tacc = qman_func_id[idx];\n-\t\t\t\tupdateQtop(acc, qg, acc100_conf, d);\n+\t\t\t\tupdateQtop(acc, qg, acc_conf, d);\n \t\t\t}\n \t\t}\n \t}\n \n \t/* Check the depth of the AQs*/\n-\treg_len0 = acc100_reg_read(d, reg_addr->depth_log0_offset);\n-\treg_len1 = acc100_reg_read(d, reg_addr->depth_log1_offset);\n+\treg_len0 = acc_reg_read(d, reg_addr->depth_log0_offset);\n+\treg_len1 = acc_reg_read(d, reg_addr->depth_log1_offset);\n \tfor (acc = 0; acc < NUM_ACC; acc++) {\n-\t\tqtopFromAcc(&q_top, acc, acc100_conf);\n-\t\tif (q_top->first_qgroup_index < ACC100_NUM_QGRPS_PER_WORD)\n+\t\tqtopFromAcc(&q_top, acc, acc_conf);\n+\t\tif (q_top->first_qgroup_index < ACC_NUM_QGRPS_PER_WORD)\n \t\t\tq_top->aq_depth_log2 = (reg_len0 >>\n \t\t\t\t\t(q_top->first_qgroup_index * 4))\n \t\t\t\t\t& 0xF;\n \t\telse\n \t\t\tq_top->aq_depth_log2 = (reg_len1 >>\n \t\t\t\t\t((q_top->first_qgroup_index -\n-\t\t\t\t\tACC100_NUM_QGRPS_PER_WORD) * 4))\n+\t\t\t\t\tACC_NUM_QGRPS_PER_WORD) * 4))\n \t\t\t\t\t& 0xF;\n \t}\n \n \t/* Read PF mode */\n \tif (d->pf_device) {\n-\t\treg_mode = acc100_reg_read(d, HWPfHiPfMode);\n-\t\tacc100_conf->pf_mode_en = (reg_mode == ACC100_PF_VAL) ? 1 : 0;\n+\t\treg_mode = acc_reg_read(d, HWPfHiPfMode);\n+\t\tacc_conf->pf_mode_en = (reg_mode == ACC_PF_VAL) ? 1 : 0;\n \t}\n \n \trte_bbdev_log_debug(\n \t\t\t\"%s Config LLR SIGN IN/OUT %s %s QG %u %u %u %u AQ %u %u %u %u Len %u %u %u %u\\n\",\n \t\t\t(d->pf_device) ? \"PF\" : \"VF\",\n-\t\t\t(acc100_conf->input_pos_llr_1_bit) ? \"POS\" : \"NEG\",\n-\t\t\t(acc100_conf->output_pos_llr_1_bit) ? \"POS\" : \"NEG\",\n-\t\t\tacc100_conf->q_ul_4g.num_qgroups,\n-\t\t\tacc100_conf->q_dl_4g.num_qgroups,\n-\t\t\tacc100_conf->q_ul_5g.num_qgroups,\n-\t\t\tacc100_conf->q_dl_5g.num_qgroups,\n-\t\t\tacc100_conf->q_ul_4g.num_aqs_per_groups,\n-\t\t\tacc100_conf->q_dl_4g.num_aqs_per_groups,\n-\t\t\tacc100_conf->q_ul_5g.num_aqs_per_groups,\n-\t\t\tacc100_conf->q_dl_5g.num_aqs_per_groups,\n-\t\t\tacc100_conf->q_ul_4g.aq_depth_log2,\n-\t\t\tacc100_conf->q_dl_4g.aq_depth_log2,\n-\t\t\tacc100_conf->q_ul_5g.aq_depth_log2,\n-\t\t\tacc100_conf->q_dl_5g.aq_depth_log2);\n-}\n-\n-static void\n-free_base_addresses(void **base_addrs, int size)\n-{\n-\tint i;\n-\tfor (i = 0; i < size; i++)\n-\t\trte_free(base_addrs[i]);\n-}\n-\n-static inline uint32_t\n-get_desc_len(void)\n-{\n-\treturn sizeof(union acc100_dma_desc);\n-}\n-\n-/* Allocate the 2 * 64MB block for the sw rings */\n-static int\n-alloc_2x64mb_sw_rings_mem(struct rte_bbdev *dev, struct acc100_device *d,\n-\t\tint socket)\n-{\n-\tuint32_t sw_ring_size = ACC100_SIZE_64MBYTE;\n-\td->sw_rings_base = rte_zmalloc_socket(dev->device->driver->name,\n-\t\t\t2 * sw_ring_size, RTE_CACHE_LINE_SIZE, socket);\n-\tif (d->sw_rings_base == NULL) {\n-\t\trte_bbdev_log(ERR, \"Failed to allocate memory for %s:%u\",\n-\t\t\t\tdev->device->driver->name,\n-\t\t\t\tdev->data->dev_id);\n-\t\treturn -ENOMEM;\n-\t}\n-\tuint32_t next_64mb_align_offset = calc_mem_alignment_offset(\n-\t\t\td->sw_rings_base, ACC100_SIZE_64MBYTE);\n-\td->sw_rings = RTE_PTR_ADD(d->sw_rings_base, next_64mb_align_offset);\n-\td->sw_rings_iova = rte_malloc_virt2iova(d->sw_rings_base) +\n-\t\t\tnext_64mb_align_offset;\n-\td->sw_ring_size = ACC100_MAX_QUEUE_DEPTH * get_desc_len();\n-\td->sw_ring_max_depth = ACC100_MAX_QUEUE_DEPTH;\n-\n-\treturn 0;\n-}\n-\n-/* Attempt to allocate minimised memory space for sw rings */\n-static void\n-alloc_sw_rings_min_mem(struct rte_bbdev *dev, struct acc100_device *d,\n-\t\tuint16_t num_queues, int socket)\n-{\n-\trte_iova_t sw_rings_base_iova, next_64mb_align_addr_iova;\n-\tuint32_t next_64mb_align_offset;\n-\trte_iova_t sw_ring_iova_end_addr;\n-\tvoid *base_addrs[ACC100_SW_RING_MEM_ALLOC_ATTEMPTS];\n-\tvoid *sw_rings_base;\n-\tint i = 0;\n-\tuint32_t q_sw_ring_size = ACC100_MAX_QUEUE_DEPTH * get_desc_len();\n-\tuint32_t dev_sw_ring_size = q_sw_ring_size * num_queues;\n-\n-\t/* Find an aligned block of memory to store sw rings */\n-\twhile (i < ACC100_SW_RING_MEM_ALLOC_ATTEMPTS) {\n-\t\t/*\n-\t\t * sw_ring allocated memory is guaranteed to be aligned to\n-\t\t * q_sw_ring_size at the condition that the requested size is\n-\t\t * less than the page size\n-\t\t */\n-\t\tsw_rings_base = rte_zmalloc_socket(\n-\t\t\t\tdev->device->driver->name,\n-\t\t\t\tdev_sw_ring_size, q_sw_ring_size, socket);\n-\n-\t\tif (sw_rings_base == NULL) {\n-\t\t\trte_bbdev_log(ERR,\n-\t\t\t\t\t\"Failed to allocate memory for %s:%u\",\n-\t\t\t\t\tdev->device->driver->name,\n-\t\t\t\t\tdev->data->dev_id);\n-\t\t\tbreak;\n-\t\t}\n-\n-\t\tsw_rings_base_iova = rte_malloc_virt2iova(sw_rings_base);\n-\t\tnext_64mb_align_offset = calc_mem_alignment_offset(\n-\t\t\t\tsw_rings_base, ACC100_SIZE_64MBYTE);\n-\t\tnext_64mb_align_addr_iova = sw_rings_base_iova +\n-\t\t\t\tnext_64mb_align_offset;\n-\t\tsw_ring_iova_end_addr = sw_rings_base_iova + dev_sw_ring_size;\n-\n-\t\t/* Check if the end of the sw ring memory block is before the\n-\t\t * start of next 64MB aligned mem address\n-\t\t */\n-\t\tif (sw_ring_iova_end_addr < next_64mb_align_addr_iova) {\n-\t\t\td->sw_rings_iova = sw_rings_base_iova;\n-\t\t\td->sw_rings = sw_rings_base;\n-\t\t\td->sw_rings_base = sw_rings_base;\n-\t\t\td->sw_ring_size = q_sw_ring_size;\n-\t\t\td->sw_ring_max_depth = ACC100_MAX_QUEUE_DEPTH;\n-\t\t\tbreak;\n-\t\t}\n-\t\t/* Store the address of the unaligned mem block */\n-\t\tbase_addrs[i] = sw_rings_base;\n-\t\ti++;\n-\t}\n-\n-\t/* Free all unaligned blocks of mem allocated in the loop */\n-\tfree_base_addresses(base_addrs, i);\n-}\n-\n-/*\n- * Find queue_id of a device queue based on details from the Info Ring.\n- * If a queue isn't found UINT16_MAX is returned.\n- */\n-static inline uint16_t\n-get_queue_id_from_ring_info(struct rte_bbdev_data *data,\n-\t\tconst union acc100_info_ring_data ring_data)\n-{\n-\tuint16_t queue_id;\n-\n-\tfor (queue_id = 0; queue_id < data->num_queues; ++queue_id) {\n-\t\tstruct acc100_queue *acc100_q =\n-\t\t\t\tdata->queues[queue_id].queue_private;\n-\t\tif (acc100_q != NULL && acc100_q->aq_id == ring_data.aq_id &&\n-\t\t\t\tacc100_q->qgrp_id == ring_data.qg_id &&\n-\t\t\t\tacc100_q->vf_id == ring_data.vf_id)\n-\t\t\treturn queue_id;\n-\t}\n-\n-\treturn UINT16_MAX;\n+\t\t\t(acc_conf->input_pos_llr_1_bit) ? \"POS\" : \"NEG\",\n+\t\t\t(acc_conf->output_pos_llr_1_bit) ? \"POS\" : \"NEG\",\n+\t\t\tacc_conf->q_ul_4g.num_qgroups,\n+\t\t\tacc_conf->q_dl_4g.num_qgroups,\n+\t\t\tacc_conf->q_ul_5g.num_qgroups,\n+\t\t\tacc_conf->q_dl_5g.num_qgroups,\n+\t\t\tacc_conf->q_ul_4g.num_aqs_per_groups,\n+\t\t\tacc_conf->q_dl_4g.num_aqs_per_groups,\n+\t\t\tacc_conf->q_ul_5g.num_aqs_per_groups,\n+\t\t\tacc_conf->q_dl_5g.num_aqs_per_groups,\n+\t\t\tacc_conf->q_ul_4g.aq_depth_log2,\n+\t\t\tacc_conf->q_dl_4g.aq_depth_log2,\n+\t\t\tacc_conf->q_ul_5g.aq_depth_log2,\n+\t\t\tacc_conf->q_dl_5g.aq_depth_log2);\n }\n \n /* Checks PF Info Ring to find the interrupt cause and handles it accordingly */\n static inline void\n-acc100_check_ir(struct acc100_device *acc100_dev)\n+acc100_check_ir(struct acc_device *acc100_dev)\n {\n-\tvolatile union acc100_info_ring_data *ring_data;\n+\tvolatile union acc_info_ring_data *ring_data;\n \tuint16_t info_ring_head = acc100_dev->info_ring_head;\n \tif (acc100_dev->info_ring == NULL)\n \t\treturn;\n \n \tring_data = acc100_dev->info_ring + (acc100_dev->info_ring_head &\n-\t\t\tACC100_INFO_RING_MASK);\n+\t\t\tACC_INFO_RING_MASK);\n \n \twhile (ring_data->valid) {\n \t\tif ((ring_data->int_nb < ACC100_PF_INT_DMA_DL_DESC_IRQ) || (\n@@ -431,7 +268,7 @@ acc100_check_ir(struct acc100_device *acc100_dev)\n \t\tring_data->val = 0;\n \t\tinfo_ring_head++;\n \t\tring_data = acc100_dev->info_ring +\n-\t\t\t\t(info_ring_head & ACC100_INFO_RING_MASK);\n+\t\t\t\t(info_ring_head & ACC_INFO_RING_MASK);\n \t}\n }\n \n@@ -439,12 +276,12 @@ acc100_check_ir(struct acc100_device *acc100_dev)\n static inline void\n acc100_pf_interrupt_handler(struct rte_bbdev *dev)\n {\n-\tstruct acc100_device *acc100_dev = dev->data->dev_private;\n-\tvolatile union acc100_info_ring_data *ring_data;\n-\tstruct acc100_deq_intr_details deq_intr_det;\n+\tstruct acc_device *acc100_dev = dev->data->dev_private;\n+\tvolatile union acc_info_ring_data *ring_data;\n+\tstruct acc_deq_intr_details deq_intr_det;\n \n \tring_data = acc100_dev->info_ring + (acc100_dev->info_ring_head &\n-\t\t\tACC100_INFO_RING_MASK);\n+\t\t\tACC_INFO_RING_MASK);\n \n \twhile (ring_data->valid) {\n \n@@ -481,7 +318,7 @@ acc100_pf_interrupt_handler(struct rte_bbdev *dev)\n \t\t++acc100_dev->info_ring_head;\n \t\tring_data = acc100_dev->info_ring +\n \t\t\t\t(acc100_dev->info_ring_head &\n-\t\t\t\tACC100_INFO_RING_MASK);\n+\t\t\t\tACC_INFO_RING_MASK);\n \t}\n }\n \n@@ -489,12 +326,12 @@ acc100_pf_interrupt_handler(struct rte_bbdev *dev)\n static inline void\n acc100_vf_interrupt_handler(struct rte_bbdev *dev)\n {\n-\tstruct acc100_device *acc100_dev = dev->data->dev_private;\n-\tvolatile union acc100_info_ring_data *ring_data;\n-\tstruct acc100_deq_intr_details deq_intr_det;\n+\tstruct acc_device *acc100_dev = dev->data->dev_private;\n+\tvolatile union acc_info_ring_data *ring_data;\n+\tstruct acc_deq_intr_details deq_intr_det;\n \n \tring_data = acc100_dev->info_ring + (acc100_dev->info_ring_head &\n-\t\t\tACC100_INFO_RING_MASK);\n+\t\t\tACC_INFO_RING_MASK);\n \n \twhile (ring_data->valid) {\n \n@@ -533,7 +370,7 @@ acc100_vf_interrupt_handler(struct rte_bbdev *dev)\n \t\tring_data->valid = 0;\n \t\t++acc100_dev->info_ring_head;\n \t\tring_data = acc100_dev->info_ring + (acc100_dev->info_ring_head\n-\t\t\t\t& ACC100_INFO_RING_MASK);\n+\t\t\t\t& ACC_INFO_RING_MASK);\n \t}\n }\n \n@@ -542,7 +379,7 @@ static void\n acc100_dev_interrupt_handler(void *cb_arg)\n {\n \tstruct rte_bbdev *dev = cb_arg;\n-\tstruct acc100_device *acc100_dev = dev->data->dev_private;\n+\tstruct acc_device *acc100_dev = dev->data->dev_private;\n \n \t/* Read info ring */\n \tif (acc100_dev->pf_device)\n@@ -555,7 +392,7 @@ acc100_dev_interrupt_handler(void *cb_arg)\n static int\n allocate_info_ring(struct rte_bbdev *dev)\n {\n-\tstruct acc100_device *d = dev->data->dev_private;\n+\tstruct acc_device *d = dev->data->dev_private;\n \tconst struct acc100_registry_addr *reg_addr;\n \trte_iova_t info_ring_iova;\n \tuint32_t phys_low, phys_high;\n@@ -570,7 +407,7 @@ allocate_info_ring(struct rte_bbdev *dev)\n \t\treg_addr = &vf_reg_addr;\n \t/* Allocate InfoRing */\n \td->info_ring = rte_zmalloc_socket(\"Info Ring\",\n-\t\t\tACC100_INFO_RING_NUM_ENTRIES *\n+\t\t\tACC_INFO_RING_NUM_ENTRIES *\n \t\t\tsizeof(*d->info_ring), RTE_CACHE_LINE_SIZE,\n \t\t\tdev->data->socket_id);\n \tif (d->info_ring == NULL) {\n@@ -585,11 +422,11 @@ allocate_info_ring(struct rte_bbdev *dev)\n \t/* Setup Info Ring */\n \tphys_high = (uint32_t)(info_ring_iova >> 32);\n \tphys_low  = (uint32_t)(info_ring_iova);\n-\tacc100_reg_write(d, reg_addr->info_ring_hi, phys_high);\n-\tacc100_reg_write(d, reg_addr->info_ring_lo, phys_low);\n-\tacc100_reg_write(d, reg_addr->info_ring_en, ACC100_REG_IRQ_EN_ALL);\n-\td->info_ring_head = (acc100_reg_read(d, reg_addr->info_ring_ptr) &\n-\t\t\t0xFFF) / sizeof(union acc100_info_ring_data);\n+\tacc_reg_write(d, reg_addr->info_ring_hi, phys_high);\n+\tacc_reg_write(d, reg_addr->info_ring_lo, phys_low);\n+\tacc_reg_write(d, reg_addr->info_ring_en, ACC100_REG_IRQ_EN_ALL);\n+\td->info_ring_head = (acc_reg_read(d, reg_addr->info_ring_ptr) &\n+\t\t\t0xFFF) / sizeof(union acc_info_ring_data);\n \treturn 0;\n }\n \n@@ -599,11 +436,11 @@ static int\n acc100_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)\n {\n \tuint32_t phys_low, phys_high, value;\n-\tstruct acc100_device *d = dev->data->dev_private;\n+\tstruct acc_device *d = dev->data->dev_private;\n \tconst struct acc100_registry_addr *reg_addr;\n \tint ret;\n \n-\tif (d->pf_device && !d->acc100_conf.pf_mode_en) {\n+\tif (d->pf_device && !d->acc_conf.pf_mode_en) {\n \t\trte_bbdev_log(NOTICE,\n \t\t\t\t\"%s has PF mode disabled. This PF can't be used.\",\n \t\t\t\tdev->data->name);\n@@ -629,7 +466,7 @@ acc100_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)\n \t * Note : Assuming only VF0 bundle is used for PF mode\n \t */\n \tphys_high = (uint32_t)(d->sw_rings_iova >> 32);\n-\tphys_low  = (uint32_t)(d->sw_rings_iova & ~(ACC100_SIZE_64MBYTE-1));\n+\tphys_low  = (uint32_t)(d->sw_rings_iova & ~(ACC_SIZE_64MBYTE-1));\n \n \t/* Choose correct registry addresses for the device type */\n \tif (d->pf_device)\n@@ -642,23 +479,23 @@ acc100_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)\n \n \t/* Release AXI from PF */\n \tif (d->pf_device)\n-\t\tacc100_reg_write(d, HWPfDmaAxiControl, 1);\n+\t\tacc_reg_write(d, HWPfDmaAxiControl, 1);\n \n-\tacc100_reg_write(d, reg_addr->dma_ring_ul5g_hi, phys_high);\n-\tacc100_reg_write(d, reg_addr->dma_ring_ul5g_lo, phys_low);\n-\tacc100_reg_write(d, reg_addr->dma_ring_dl5g_hi, phys_high);\n-\tacc100_reg_write(d, reg_addr->dma_ring_dl5g_lo, phys_low);\n-\tacc100_reg_write(d, reg_addr->dma_ring_ul4g_hi, phys_high);\n-\tacc100_reg_write(d, reg_addr->dma_ring_ul4g_lo, phys_low);\n-\tacc100_reg_write(d, reg_addr->dma_ring_dl4g_hi, phys_high);\n-\tacc100_reg_write(d, reg_addr->dma_ring_dl4g_lo, phys_low);\n+\tacc_reg_write(d, reg_addr->dma_ring_ul5g_hi, phys_high);\n+\tacc_reg_write(d, reg_addr->dma_ring_ul5g_lo, phys_low);\n+\tacc_reg_write(d, reg_addr->dma_ring_dl5g_hi, phys_high);\n+\tacc_reg_write(d, reg_addr->dma_ring_dl5g_lo, phys_low);\n+\tacc_reg_write(d, reg_addr->dma_ring_ul4g_hi, phys_high);\n+\tacc_reg_write(d, reg_addr->dma_ring_ul4g_lo, phys_low);\n+\tacc_reg_write(d, reg_addr->dma_ring_dl4g_hi, phys_high);\n+\tacc_reg_write(d, reg_addr->dma_ring_dl4g_lo, phys_low);\n \n \t/*\n \t * Configure Ring Size to the max queue ring size\n \t * (used for wrapping purpose)\n \t */\n \tvalue = log2_basic(d->sw_ring_size / 64);\n-\tacc100_reg_write(d, reg_addr->ring_size, value);\n+\tacc_reg_write(d, reg_addr->ring_size, value);\n \n \t/* Configure tail pointer for use when SDONE enabled */\n \td->tail_ptrs = rte_zmalloc_socket(\n@@ -676,14 +513,14 @@ acc100_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)\n \n \tphys_high = (uint32_t)(d->tail_ptr_iova >> 32);\n \tphys_low  = (uint32_t)(d->tail_ptr_iova);\n-\tacc100_reg_write(d, reg_addr->tail_ptrs_ul5g_hi, phys_high);\n-\tacc100_reg_write(d, reg_addr->tail_ptrs_ul5g_lo, phys_low);\n-\tacc100_reg_write(d, reg_addr->tail_ptrs_dl5g_hi, phys_high);\n-\tacc100_reg_write(d, reg_addr->tail_ptrs_dl5g_lo, phys_low);\n-\tacc100_reg_write(d, reg_addr->tail_ptrs_ul4g_hi, phys_high);\n-\tacc100_reg_write(d, reg_addr->tail_ptrs_ul4g_lo, phys_low);\n-\tacc100_reg_write(d, reg_addr->tail_ptrs_dl4g_hi, phys_high);\n-\tacc100_reg_write(d, reg_addr->tail_ptrs_dl4g_lo, phys_low);\n+\tacc_reg_write(d, reg_addr->tail_ptrs_ul5g_hi, phys_high);\n+\tacc_reg_write(d, reg_addr->tail_ptrs_ul5g_lo, phys_low);\n+\tacc_reg_write(d, reg_addr->tail_ptrs_dl5g_hi, phys_high);\n+\tacc_reg_write(d, reg_addr->tail_ptrs_dl5g_lo, phys_low);\n+\tacc_reg_write(d, reg_addr->tail_ptrs_ul4g_hi, phys_high);\n+\tacc_reg_write(d, reg_addr->tail_ptrs_ul4g_lo, phys_low);\n+\tacc_reg_write(d, reg_addr->tail_ptrs_dl4g_hi, phys_high);\n+\tacc_reg_write(d, reg_addr->tail_ptrs_dl4g_lo, phys_low);\n \n \tret = allocate_info_ring(dev);\n \tif (ret < 0) {\n@@ -694,7 +531,7 @@ acc100_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)\n \t}\n \n \td->harq_layout = rte_zmalloc_socket(\"HARQ Layout\",\n-\t\t\tACC100_HARQ_LAYOUT * sizeof(*d->harq_layout),\n+\t\t\tACC_HARQ_LAYOUT * sizeof(*d->harq_layout),\n \t\t\tRTE_CACHE_LINE_SIZE, dev->data->socket_id);\n \tif (d->harq_layout == NULL) {\n \t\trte_bbdev_log(ERR, \"Failed to allocate harq_layout for %s:%u\",\n@@ -718,7 +555,7 @@ static int\n acc100_intr_enable(struct rte_bbdev *dev)\n {\n \tint ret;\n-\tstruct acc100_device *d = dev->data->dev_private;\n+\tstruct acc_device *d = dev->data->dev_private;\n \n \t/* Only MSI are currently supported */\n \tif (rte_intr_type_get(dev->intr_handle) == RTE_INTR_HANDLE_VFIO_MSI ||\n@@ -762,7 +599,7 @@ acc100_intr_enable(struct rte_bbdev *dev)\n static int\n acc100_dev_close(struct rte_bbdev *dev)\n {\n-\tstruct acc100_device *d = dev->data->dev_private;\n+\tstruct acc_device *d = dev->data->dev_private;\n \tacc100_check_ir(d);\n \tif (d->sw_rings_base != NULL) {\n \t\trte_free(d->tail_ptrs);\n@@ -771,7 +608,7 @@ acc100_dev_close(struct rte_bbdev *dev)\n \t\td->sw_rings_base = NULL;\n \t}\n \t/* Ensure all in flight HW transactions are completed */\n-\tusleep(ACC100_LONG_WAIT);\n+\tusleep(ACC_LONG_WAIT);\n \treturn 0;\n }\n \n@@ -784,12 +621,12 @@ static int\n acc100_find_free_queue_idx(struct rte_bbdev *dev,\n \t\tconst struct rte_bbdev_queue_conf *conf)\n {\n-\tstruct acc100_device *d = dev->data->dev_private;\n+\tstruct acc_device *d = dev->data->dev_private;\n \tint op_2_acc[5] = {0, UL_4G, DL_4G, UL_5G, DL_5G};\n \tint acc = op_2_acc[conf->op_type];\n-\tstruct rte_acc100_queue_topology *qtop = NULL;\n+\tstruct rte_acc_queue_topology *qtop = NULL;\n \n-\tqtopFromAcc(&qtop, acc, &(d->acc100_conf));\n+\tqtopFromAcc(&qtop, acc, &(d->acc_conf));\n \tif (qtop == NULL)\n \t\treturn -1;\n \t/* Identify matching QGroup Index which are sorted in priority order */\n@@ -802,7 +639,7 @@ acc100_find_free_queue_idx(struct rte_bbdev *dev,\n \t\treturn -1;\n \t}\n \t/* Find a free AQ_idx  */\n-\tuint16_t aq_idx;\n+\tuint64_t aq_idx;\n \tfor (aq_idx = 0; aq_idx < qtop->num_aqs_per_groups; aq_idx++) {\n \t\tif (((d->q_assigned_bit_map[group_idx] >> aq_idx) & 0x1) == 0) {\n \t\t\t/* Mark the Queue as assigned */\n@@ -821,8 +658,8 @@ static int\n acc100_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,\n \t\tconst struct rte_bbdev_queue_conf *conf)\n {\n-\tstruct acc100_device *d = dev->data->dev_private;\n-\tstruct acc100_queue *q;\n+\tstruct acc_device *d = dev->data->dev_private;\n+\tstruct acc_queue *q;\n \tint16_t q_idx;\n \n \t/* Allocate the queue data structure. */\n@@ -842,37 +679,37 @@ acc100_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,\n \tq->ring_addr_iova = d->sw_rings_iova + (d->sw_ring_size * queue_id);\n \n \t/* Prepare the Ring with default descriptor format */\n-\tunion acc100_dma_desc *desc = NULL;\n+\tunion acc_dma_desc *desc = NULL;\n \tunsigned int desc_idx, b_idx;\n \tint fcw_len = (conf->op_type == RTE_BBDEV_OP_LDPC_ENC ?\n-\t\tACC100_FCW_LE_BLEN : (conf->op_type == RTE_BBDEV_OP_TURBO_DEC ?\n-\t\tACC100_FCW_TD_BLEN : ACC100_FCW_LD_BLEN));\n+\t\tACC_FCW_LE_BLEN : (conf->op_type == RTE_BBDEV_OP_TURBO_DEC ?\n+\t\tACC_FCW_TD_BLEN : ACC_FCW_LD_BLEN));\n \n \tfor (desc_idx = 0; desc_idx < d->sw_ring_max_depth; desc_idx++) {\n \t\tdesc = q->ring_addr + desc_idx;\n-\t\tdesc->req.word0 = ACC100_DMA_DESC_TYPE;\n+\t\tdesc->req.word0 = ACC_DMA_DESC_TYPE;\n \t\tdesc->req.word1 = 0; /**< Timestamp */\n \t\tdesc->req.word2 = 0;\n \t\tdesc->req.word3 = 0;\n-\t\tuint64_t fcw_offset = (desc_idx << 8) + ACC100_DESC_FCW_OFFSET;\n+\t\tuint64_t fcw_offset = (desc_idx << 8) + ACC_DESC_FCW_OFFSET;\n \t\tdesc->req.data_ptrs[0].address = q->ring_addr_iova + fcw_offset;\n \t\tdesc->req.data_ptrs[0].blen = fcw_len;\n-\t\tdesc->req.data_ptrs[0].blkid = ACC100_DMA_BLKID_FCW;\n+\t\tdesc->req.data_ptrs[0].blkid = ACC_DMA_BLKID_FCW;\n \t\tdesc->req.data_ptrs[0].last = 0;\n \t\tdesc->req.data_ptrs[0].dma_ext = 0;\n-\t\tfor (b_idx = 1; b_idx < ACC100_DMA_MAX_NUM_POINTERS - 1;\n+\t\tfor (b_idx = 1; b_idx < ACC_DMA_MAX_NUM_POINTERS - 1;\n \t\t\t\tb_idx++) {\n-\t\t\tdesc->req.data_ptrs[b_idx].blkid = ACC100_DMA_BLKID_IN;\n+\t\t\tdesc->req.data_ptrs[b_idx].blkid = ACC_DMA_BLKID_IN;\n \t\t\tdesc->req.data_ptrs[b_idx].last = 1;\n \t\t\tdesc->req.data_ptrs[b_idx].dma_ext = 0;\n \t\t\tb_idx++;\n \t\t\tdesc->req.data_ptrs[b_idx].blkid =\n-\t\t\t\t\tACC100_DMA_BLKID_OUT_ENC;\n+\t\t\t\t\tACC_DMA_BLKID_OUT_ENC;\n \t\t\tdesc->req.data_ptrs[b_idx].last = 1;\n \t\t\tdesc->req.data_ptrs[b_idx].dma_ext = 0;\n \t\t}\n \t\t/* Preset some fields of LDPC FCW */\n-\t\tdesc->req.fcw_ld.FCWversion = ACC100_FCW_VER;\n+\t\tdesc->req.fcw_ld.FCWversion = ACC_FCW_VER;\n \t\tdesc->req.fcw_ld.gain_i = 1;\n \t\tdesc->req.fcw_ld.gain_h = 1;\n \t}\n@@ -925,8 +762,8 @@ acc100_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,\n \tq->vf_id = (q_idx >> ACC100_VF_ID_SHIFT)  & 0x3F;\n \tq->aq_id = q_idx & 0xF;\n \tq->aq_depth = (conf->op_type ==  RTE_BBDEV_OP_TURBO_DEC) ?\n-\t\t\t(1 << d->acc100_conf.q_ul_4g.aq_depth_log2) :\n-\t\t\t(1 << d->acc100_conf.q_dl_4g.aq_depth_log2);\n+\t\t\t(1 << d->acc_conf.q_ul_4g.aq_depth_log2) :\n+\t\t\t(1 << d->acc_conf.q_dl_4g.aq_depth_log2);\n \n \tq->mmio_reg_enqueue = RTE_PTR_ADD(d->mmio_base,\n \t\t\tqueue_offset(d->pf_device,\n@@ -945,13 +782,13 @@ acc100_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,\n static int\n acc100_queue_release(struct rte_bbdev *dev, uint16_t q_id)\n {\n-\tstruct acc100_device *d = dev->data->dev_private;\n-\tstruct acc100_queue *q = dev->data->queues[q_id].queue_private;\n+\tstruct acc_device *d = dev->data->dev_private;\n+\tstruct acc_queue *q = dev->data->queues[q_id].queue_private;\n \n \tif (q != NULL) {\n \t\t/* Mark the Queue as un-assigned */\n-\t\td->q_assigned_bit_map[q->qgrp_id] &= (0xFFFFFFFF -\n-\t\t\t\t(1 << q->aq_id));\n+\t\td->q_assigned_bit_map[q->qgrp_id] &= (0xFFFFFFFFFFFFFFFF -\n+\t\t\t\t(uint64_t) (1 << q->aq_id));\n \t\trte_free(q->lb_in);\n \t\trte_free(q->lb_out);\n \t\trte_free(q);\n@@ -966,7 +803,7 @@ static void\n acc100_dev_info_get(struct rte_bbdev *dev,\n \t\tstruct rte_bbdev_driver_info *dev_info)\n {\n-\tstruct acc100_device *d = dev->data->dev_private;\n+\tstruct acc_device *d = dev->data->dev_private;\n \tint i;\n \n \tstatic const struct rte_bbdev_op_cap bbdev_capabilities[] = {\n@@ -1056,7 +893,7 @@ acc100_dev_info_get(struct rte_bbdev *dev,\n \n \tstatic struct rte_bbdev_queue_conf default_queue_conf;\n \tdefault_queue_conf.socket = dev->data->socket_id;\n-\tdefault_queue_conf.queue_size = ACC100_MAX_QUEUE_DEPTH;\n+\tdefault_queue_conf.queue_size = ACC_MAX_QUEUE_DEPTH;\n \n \tdev_info->driver_name = dev->device->driver->name;\n \n@@ -1066,27 +903,27 @@ acc100_dev_info_get(struct rte_bbdev *dev,\n \n \t/* Expose number of queues */\n \tdev_info->num_queues[RTE_BBDEV_OP_NONE] = 0;\n-\tdev_info->num_queues[RTE_BBDEV_OP_TURBO_DEC] = d->acc100_conf.q_ul_4g.num_aqs_per_groups *\n-\t\t\td->acc100_conf.q_ul_4g.num_qgroups;\n-\tdev_info->num_queues[RTE_BBDEV_OP_TURBO_ENC] = d->acc100_conf.q_dl_4g.num_aqs_per_groups *\n-\t\t\td->acc100_conf.q_dl_4g.num_qgroups;\n-\tdev_info->num_queues[RTE_BBDEV_OP_LDPC_DEC] = d->acc100_conf.q_ul_5g.num_aqs_per_groups *\n-\t\t\td->acc100_conf.q_ul_5g.num_qgroups;\n-\tdev_info->num_queues[RTE_BBDEV_OP_LDPC_ENC] = d->acc100_conf.q_dl_5g.num_aqs_per_groups *\n-\t\t\td->acc100_conf.q_dl_5g.num_qgroups;\n-\tdev_info->queue_priority[RTE_BBDEV_OP_TURBO_DEC] = d->acc100_conf.q_ul_4g.num_qgroups;\n-\tdev_info->queue_priority[RTE_BBDEV_OP_TURBO_ENC] = d->acc100_conf.q_dl_4g.num_qgroups;\n-\tdev_info->queue_priority[RTE_BBDEV_OP_LDPC_DEC] = d->acc100_conf.q_ul_5g.num_qgroups;\n-\tdev_info->queue_priority[RTE_BBDEV_OP_LDPC_ENC] = d->acc100_conf.q_dl_5g.num_qgroups;\n+\tdev_info->num_queues[RTE_BBDEV_OP_TURBO_DEC] = d->acc_conf.q_ul_4g.num_aqs_per_groups *\n+\t\t\td->acc_conf.q_ul_4g.num_qgroups;\n+\tdev_info->num_queues[RTE_BBDEV_OP_TURBO_ENC] = d->acc_conf.q_dl_4g.num_aqs_per_groups *\n+\t\t\td->acc_conf.q_dl_4g.num_qgroups;\n+\tdev_info->num_queues[RTE_BBDEV_OP_LDPC_DEC] = d->acc_conf.q_ul_5g.num_aqs_per_groups *\n+\t\t\td->acc_conf.q_ul_5g.num_qgroups;\n+\tdev_info->num_queues[RTE_BBDEV_OP_LDPC_ENC] = d->acc_conf.q_dl_5g.num_aqs_per_groups *\n+\t\t\td->acc_conf.q_dl_5g.num_qgroups;\n+\tdev_info->queue_priority[RTE_BBDEV_OP_TURBO_DEC] = d->acc_conf.q_ul_4g.num_qgroups;\n+\tdev_info->queue_priority[RTE_BBDEV_OP_TURBO_ENC] = d->acc_conf.q_dl_4g.num_qgroups;\n+\tdev_info->queue_priority[RTE_BBDEV_OP_LDPC_DEC] = d->acc_conf.q_ul_5g.num_qgroups;\n+\tdev_info->queue_priority[RTE_BBDEV_OP_LDPC_ENC] = d->acc_conf.q_dl_5g.num_qgroups;\n \tdev_info->max_num_queues = 0;\n \tfor (i = RTE_BBDEV_OP_TURBO_DEC; i <= RTE_BBDEV_OP_LDPC_ENC; i++)\n \t\tdev_info->max_num_queues += dev_info->num_queues[i];\n-\tdev_info->queue_size_lim = ACC100_MAX_QUEUE_DEPTH;\n+\tdev_info->queue_size_lim = ACC_MAX_QUEUE_DEPTH;\n \tdev_info->hardware_accelerated = true;\n \tdev_info->max_dl_queue_priority =\n-\t\t\td->acc100_conf.q_dl_4g.num_qgroups - 1;\n+\t\t\td->acc_conf.q_dl_4g.num_qgroups - 1;\n \tdev_info->max_ul_queue_priority =\n-\t\t\td->acc100_conf.q_ul_4g.num_qgroups - 1;\n+\t\t\td->acc_conf.q_ul_4g.num_qgroups - 1;\n \tdev_info->default_queue_conf = default_queue_conf;\n \tdev_info->cpu_flag_reqs = NULL;\n \tdev_info->min_alignment = 64;\n@@ -1103,7 +940,7 @@ acc100_dev_info_get(struct rte_bbdev *dev,\n static int\n acc100_queue_intr_enable(struct rte_bbdev *dev, uint16_t queue_id)\n {\n-\tstruct acc100_queue *q = dev->data->queues[queue_id].queue_private;\n+\tstruct acc_queue *q = dev->data->queues[queue_id].queue_private;\n \n \tif (rte_intr_type_get(dev->intr_handle) != RTE_INTR_HANDLE_VFIO_MSI &&\n \t\t\trte_intr_type_get(dev->intr_handle) != RTE_INTR_HANDLE_UIO)\n@@ -1116,7 +953,7 @@ acc100_queue_intr_enable(struct rte_bbdev *dev, uint16_t queue_id)\n static int\n acc100_queue_intr_disable(struct rte_bbdev *dev, uint16_t queue_id)\n {\n-\tstruct acc100_queue *q = dev->data->queues[queue_id].queue_private;\n+\tstruct acc_queue *q = dev->data->queues[queue_id].queue_private;\n \n \tif (rte_intr_type_get(dev->intr_handle) != RTE_INTR_HANDLE_VFIO_MSI &&\n \t\t\trte_intr_type_get(dev->intr_handle) != RTE_INTR_HANDLE_UIO)\n@@ -1159,132 +996,10 @@ static struct rte_pci_id pci_id_acc100_vf_map[] = {\n \t{.device_id = 0},\n };\n \n-/* Read flag value 0/1 from bitmap */\n-static inline bool\n-check_bit(uint32_t bitmap, uint32_t bitmask)\n-{\n-\treturn bitmap & bitmask;\n-}\n-\n-static inline char *\n-mbuf_append(struct rte_mbuf *m_head, struct rte_mbuf *m, uint16_t len)\n-{\n-\tif (unlikely(len > rte_pktmbuf_tailroom(m)))\n-\t\treturn NULL;\n-\n-\tchar *tail = (char *)m->buf_addr + m->data_off + m->data_len;\n-\tm->data_len = (uint16_t)(m->data_len + len);\n-\tm_head->pkt_len  = (m_head->pkt_len + len);\n-\treturn tail;\n-}\n-\n-/* Fill in a frame control word for turbo encoding. */\n-static inline void\n-acc100_fcw_te_fill(const struct rte_bbdev_enc_op *op, struct acc100_fcw_te *fcw)\n-{\n-\tfcw->code_block_mode = op->turbo_enc.code_block_mode;\n-\tif (fcw->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {\n-\t\tfcw->k_neg = op->turbo_enc.tb_params.k_neg;\n-\t\tfcw->k_pos = op->turbo_enc.tb_params.k_pos;\n-\t\tfcw->c_neg = op->turbo_enc.tb_params.c_neg;\n-\t\tfcw->c = op->turbo_enc.tb_params.c;\n-\t\tfcw->ncb_neg = op->turbo_enc.tb_params.ncb_neg;\n-\t\tfcw->ncb_pos = op->turbo_enc.tb_params.ncb_pos;\n-\n-\t\tif (check_bit(op->turbo_enc.op_flags,\n-\t\t\t\tRTE_BBDEV_TURBO_RATE_MATCH)) {\n-\t\t\tfcw->bypass_rm = 0;\n-\t\t\tfcw->cab = op->turbo_enc.tb_params.cab;\n-\t\t\tfcw->ea = op->turbo_enc.tb_params.ea;\n-\t\t\tfcw->eb = op->turbo_enc.tb_params.eb;\n-\t\t} else {\n-\t\t\t/* E is set to the encoding output size when RM is\n-\t\t\t * bypassed.\n-\t\t\t */\n-\t\t\tfcw->bypass_rm = 1;\n-\t\t\tfcw->cab = fcw->c_neg;\n-\t\t\tfcw->ea = 3 * fcw->k_neg + 12;\n-\t\t\tfcw->eb = 3 * fcw->k_pos + 12;\n-\t\t}\n-\t} else { /* For CB mode */\n-\t\tfcw->k_pos = op->turbo_enc.cb_params.k;\n-\t\tfcw->ncb_pos = op->turbo_enc.cb_params.ncb;\n-\n-\t\tif (check_bit(op->turbo_enc.op_flags,\n-\t\t\t\tRTE_BBDEV_TURBO_RATE_MATCH)) {\n-\t\t\tfcw->bypass_rm = 0;\n-\t\t\tfcw->eb = op->turbo_enc.cb_params.e;\n-\t\t} else {\n-\t\t\t/* E is set to the encoding output size when RM is\n-\t\t\t * bypassed.\n-\t\t\t */\n-\t\t\tfcw->bypass_rm = 1;\n-\t\t\tfcw->eb = 3 * fcw->k_pos + 12;\n-\t\t}\n-\t}\n-\n-\tfcw->bypass_rv_idx1 = check_bit(op->turbo_enc.op_flags,\n-\t\t\tRTE_BBDEV_TURBO_RV_INDEX_BYPASS);\n-\tfcw->code_block_crc = check_bit(op->turbo_enc.op_flags,\n-\t\t\tRTE_BBDEV_TURBO_CRC_24B_ATTACH);\n-\tfcw->rv_idx1 = op->turbo_enc.rv_index;\n-}\n-\n-/* Compute value of k0.\n- * Based on 3GPP 38.212 Table 5.4.2.1-2\n- * Starting position of different redundancy versions, k0\n- */\n-static inline uint16_t\n-get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index)\n-{\n-\tif (rv_index == 0)\n-\t\treturn 0;\n-\tuint16_t n = (bg == 1 ? ACC100_N_ZC_1 : ACC100_N_ZC_2) * z_c;\n-\tif (n_cb == n) {\n-\t\tif (rv_index == 1)\n-\t\t\treturn (bg == 1 ? ACC100_K0_1_1 : ACC100_K0_1_2) * z_c;\n-\t\telse if (rv_index == 2)\n-\t\t\treturn (bg == 1 ? ACC100_K0_2_1 : ACC100_K0_2_2) * z_c;\n-\t\telse\n-\t\t\treturn (bg == 1 ? ACC100_K0_3_1 : ACC100_K0_3_2) * z_c;\n-\t}\n-\t/* LBRM case - includes a division by N */\n-\tif (unlikely(z_c == 0))\n-\t\treturn 0;\n-\tif (rv_index == 1)\n-\t\treturn (((bg == 1 ? ACC100_K0_1_1 : ACC100_K0_1_2) * n_cb)\n-\t\t\t\t/ n) * z_c;\n-\telse if (rv_index == 2)\n-\t\treturn (((bg == 1 ? ACC100_K0_2_1 : ACC100_K0_2_2) * n_cb)\n-\t\t\t\t/ n) * z_c;\n-\telse\n-\t\treturn (((bg == 1 ? ACC100_K0_3_1 : ACC100_K0_3_2) * n_cb)\n-\t\t\t\t/ n) * z_c;\n-}\n-\n-/* Fill in a frame control word for LDPC encoding. */\n-static inline void\n-acc100_fcw_le_fill(const struct rte_bbdev_enc_op *op,\n-\t\tstruct acc100_fcw_le *fcw, int num_cb)\n-{\n-\tfcw->qm = op->ldpc_enc.q_m;\n-\tfcw->nfiller = op->ldpc_enc.n_filler;\n-\tfcw->BG = (op->ldpc_enc.basegraph - 1);\n-\tfcw->Zc = op->ldpc_enc.z_c;\n-\tfcw->ncb = op->ldpc_enc.n_cb;\n-\tfcw->k0 = get_k0(fcw->ncb, fcw->Zc, op->ldpc_enc.basegraph,\n-\t\t\top->ldpc_enc.rv_index);\n-\tfcw->rm_e = op->ldpc_enc.cb_params.e;\n-\tfcw->crc_select = check_bit(op->ldpc_enc.op_flags,\n-\t\t\tRTE_BBDEV_LDPC_CRC_24B_ATTACH);\n-\tfcw->bypass_intlv = check_bit(op->ldpc_enc.op_flags,\n-\t\t\tRTE_BBDEV_LDPC_INTERLEAVER_BYPASS);\n-\tfcw->mcb_count = num_cb;\n-}\n \n /* Fill in a frame control word for turbo decoding. */\n static inline void\n-acc100_fcw_td_fill(const struct rte_bbdev_dec_op *op, struct acc100_fcw_td *fcw)\n+acc100_fcw_td_fill(const struct rte_bbdev_dec_op *op, struct acc_fcw_td *fcw)\n {\n \t/* Note : Early termination is always enabled for 4GUL */\n \tfcw->fcw_ver = 1;\n@@ -1304,13 +1019,13 @@ acc100_fcw_td_fill(const struct rte_bbdev_dec_op *op, struct acc100_fcw_td *fcw)\n #ifdef RTE_LIBRTE_BBDEV_DEBUG\n \n static inline bool\n-is_acc100(struct acc100_queue *q)\n+is_acc100(struct acc_queue *q)\n {\n \treturn (q->d->device_variant == ACC100_VARIANT);\n }\n \n static inline bool\n-validate_op_required(struct acc100_queue *q)\n+validate_op_required(struct acc_queue *q)\n {\n \treturn is_acc100(q);\n }\n@@ -1318,8 +1033,8 @@ validate_op_required(struct acc100_queue *q)\n \n /* Fill in a frame control word for LDPC decoding. */\n static inline void\n-acc100_fcw_ld_fill(struct rte_bbdev_dec_op *op, struct acc100_fcw_ld *fcw,\n-\t\tunion acc100_harq_layout_data *harq_layout)\n+acc100_fcw_ld_fill(struct rte_bbdev_dec_op *op, struct acc_fcw_ld *fcw,\n+\t\tunion acc_harq_layout_data *harq_layout)\n {\n \tuint16_t harq_out_length, harq_in_length, ncb_p, k0_p, parity_offset;\n \tuint16_t harq_index;\n@@ -1362,13 +1077,13 @@ acc100_fcw_ld_fill(struct rte_bbdev_dec_op *op, struct acc100_fcw_ld *fcw,\n \tfcw->llr_pack_mode = check_bit(op->ldpc_dec.op_flags,\n \t\t\tRTE_BBDEV_LDPC_LLR_COMPRESSION);\n \tharq_index = op->ldpc_dec.harq_combined_output.offset /\n-\t\t\tACC100_HARQ_OFFSET;\n+\t\t\tACC_HARQ_OFFSET;\n #ifdef ACC100_EXT_MEM\n \t/* Limit cases when HARQ pruning is valid */\n \tharq_prun = ((op->ldpc_dec.harq_combined_output.offset %\n-\t\t\tACC100_HARQ_OFFSET) == 0) &&\n+\t\t\tACC_HARQ_OFFSET) == 0) &&\n \t\t\t(op->ldpc_dec.harq_combined_output.offset <= UINT16_MAX\n-\t\t\t* ACC100_HARQ_OFFSET);\n+\t\t\t* ACC_HARQ_OFFSET);\n #endif\n \tif (fcw->hcin_en > 0) {\n \t\tharq_in_length = op->ldpc_dec.harq_combined_input.length;\n@@ -1423,7 +1138,7 @@ acc100_fcw_ld_fill(struct rte_bbdev_dec_op *op, struct acc100_fcw_ld *fcw,\n \t\tharq_out_length = (uint16_t) fcw->hcin_size0;\n \t\tharq_out_length = RTE_MIN(RTE_MAX(harq_out_length, l), ncb_p);\n \t\tharq_out_length = (harq_out_length + 0x3F) & 0xFFC0;\n-\t\tif ((k0_p > fcw->hcin_size0 + ACC100_HARQ_OFFSET_THRESHOLD) &&\n+\t\tif ((k0_p > fcw->hcin_size0 + ACC_HARQ_OFFSET_THRESHOLD) &&\n \t\t\t\tharq_prun) {\n \t\t\tfcw->hcout_size0 = (uint16_t) fcw->hcin_size0;\n \t\t\tfcw->hcout_offset = k0_p & 0xFFC0;\n@@ -1442,16 +1157,10 @@ acc100_fcw_ld_fill(struct rte_bbdev_dec_op *op, struct acc100_fcw_ld *fcw,\n \t}\n }\n \n-/* Convert offset to harq index for harq_layout structure */\n-static inline uint32_t hq_index(uint32_t offset)\n-{\n-\treturn (offset >> ACC100_HARQ_OFFSET_SHIFT) & ACC100_HARQ_OFFSET_MASK;\n-}\n-\n /* Fill in a frame control word for LDPC decoding for ACC101 */\n static inline void\n-acc101_fcw_ld_fill(struct rte_bbdev_dec_op *op, struct acc100_fcw_ld *fcw,\n-\t\tunion acc100_harq_layout_data *harq_layout)\n+acc101_fcw_ld_fill(struct rte_bbdev_dec_op *op, struct acc_fcw_ld *fcw,\n+\t\tunion acc_harq_layout_data *harq_layout)\n {\n \tuint16_t harq_out_length, harq_in_length, ncb_p, k0_p, parity_offset;\n \tuint32_t harq_index;\n@@ -1591,7 +1300,7 @@ acc101_fcw_ld_fill(struct rte_bbdev_dec_op *op, struct acc100_fcw_ld *fcw,\n  *\n  */\n static inline int\n-acc100_dma_fill_blk_type_in(struct acc100_dma_req_desc *desc,\n+acc100_dma_fill_blk_type_in(struct acc_dma_req_desc *desc,\n \t\tstruct rte_mbuf **input, uint32_t *offset, uint32_t cb_len,\n \t\tuint32_t *seg_total_left, int next_triplet)\n {\n@@ -1605,14 +1314,14 @@ acc100_dma_fill_blk_type_in(struct acc100_dma_req_desc *desc,\n \tdesc->data_ptrs[next_triplet].address =\n \t\t\trte_pktmbuf_iova_offset(m, *offset);\n \tdesc->data_ptrs[next_triplet].blen = part_len;\n-\tdesc->data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_IN;\n+\tdesc->data_ptrs[next_triplet].blkid = ACC_DMA_BLKID_IN;\n \tdesc->data_ptrs[next_triplet].last = 0;\n \tdesc->data_ptrs[next_triplet].dma_ext = 0;\n \t*offset += part_len;\n \tnext_triplet++;\n \n \twhile (cb_len > 0) {\n-\t\tif (next_triplet < ACC100_DMA_MAX_NUM_POINTERS_IN && m->next != NULL) {\n+\t\tif (next_triplet < ACC_DMA_MAX_NUM_POINTERS_IN && m->next != NULL) {\n \n \t\t\tm = m->next;\n \t\t\t*seg_total_left = rte_pktmbuf_data_len(m);\n@@ -1623,7 +1332,7 @@ acc100_dma_fill_blk_type_in(struct acc100_dma_req_desc *desc,\n \t\t\t\t\trte_pktmbuf_iova_offset(m, 0);\n \t\t\tdesc->data_ptrs[next_triplet].blen = part_len;\n \t\t\tdesc->data_ptrs[next_triplet].blkid =\n-\t\t\t\t\tACC100_DMA_BLKID_IN;\n+\t\t\t\t\tACC_DMA_BLKID_IN;\n \t\t\tdesc->data_ptrs[next_triplet].last = 0;\n \t\t\tdesc->data_ptrs[next_triplet].dma_ext = 0;\n \t\t\tcb_len -= part_len;\n@@ -1645,134 +1354,9 @@ acc100_dma_fill_blk_type_in(struct acc100_dma_req_desc *desc,\n \treturn next_triplet;\n }\n \n-/* Fills descriptor with data pointers of one block type.\n- * Returns index of next triplet on success, other value if lengths of\n- * output data and processed mbuf do not match.\n- */\n-static inline int\n-acc100_dma_fill_blk_type_out(struct acc100_dma_req_desc *desc,\n-\t\tstruct rte_mbuf *output, uint32_t out_offset,\n-\t\tuint32_t output_len, int next_triplet, int blk_id)\n-{\n-\tdesc->data_ptrs[next_triplet].address =\n-\t\t\trte_pktmbuf_iova_offset(output, out_offset);\n-\tdesc->data_ptrs[next_triplet].blen = output_len;\n-\tdesc->data_ptrs[next_triplet].blkid = blk_id;\n-\tdesc->data_ptrs[next_triplet].last = 0;\n-\tdesc->data_ptrs[next_triplet].dma_ext = 0;\n-\tnext_triplet++;\n-\n-\treturn next_triplet;\n-}\n-\n-static inline void\n-acc100_header_init(struct acc100_dma_req_desc *desc)\n-{\n-\tdesc->word0 = ACC100_DMA_DESC_TYPE;\n-\tdesc->word1 = 0; /**< Timestamp could be disabled */\n-\tdesc->word2 = 0;\n-\tdesc->word3 = 0;\n-\tdesc->numCBs = 1;\n-}\n-\n-#ifdef RTE_LIBRTE_BBDEV_DEBUG\n-/* Check if any input data is unexpectedly left for processing */\n-static inline int\n-check_mbuf_total_left(uint32_t mbuf_total_left)\n-{\n-\tif (mbuf_total_left == 0)\n-\t\treturn 0;\n-\trte_bbdev_log(ERR,\n-\t\t\"Some date still left for processing: mbuf_total_left = %u\",\n-\t\tmbuf_total_left);\n-\treturn -EINVAL;\n-}\n-#endif\n-\n-static inline int\n-acc100_dma_desc_te_fill(struct rte_bbdev_enc_op *op,\n-\t\tstruct acc100_dma_req_desc *desc, struct rte_mbuf **input,\n-\t\tstruct rte_mbuf *output, uint32_t *in_offset,\n-\t\tuint32_t *out_offset, uint32_t *out_length,\n-\t\tuint32_t *mbuf_total_left, uint32_t *seg_total_left, uint8_t r)\n-{\n-\tint next_triplet = 1; /* FCW already done */\n-\tuint32_t e, ea, eb, length;\n-\tuint16_t k, k_neg, k_pos;\n-\tuint8_t cab, c_neg;\n-\n-\tdesc->word0 = ACC100_DMA_DESC_TYPE;\n-\tdesc->word1 = 0; /**< Timestamp could be disabled */\n-\tdesc->word2 = 0;\n-\tdesc->word3 = 0;\n-\tdesc->numCBs = 1;\n-\n-\tif (op->turbo_enc.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {\n-\t\tea = op->turbo_enc.tb_params.ea;\n-\t\teb = op->turbo_enc.tb_params.eb;\n-\t\tcab = op->turbo_enc.tb_params.cab;\n-\t\tk_neg = op->turbo_enc.tb_params.k_neg;\n-\t\tk_pos = op->turbo_enc.tb_params.k_pos;\n-\t\tc_neg = op->turbo_enc.tb_params.c_neg;\n-\t\te = (r < cab) ? ea : eb;\n-\t\tk = (r < c_neg) ? k_neg : k_pos;\n-\t} else {\n-\t\te = op->turbo_enc.cb_params.e;\n-\t\tk = op->turbo_enc.cb_params.k;\n-\t}\n-\n-\tif (check_bit(op->turbo_enc.op_flags, RTE_BBDEV_TURBO_CRC_24B_ATTACH))\n-\t\tlength = (k - 24) >> 3;\n-\telse\n-\t\tlength = k >> 3;\n-\n-\tif (unlikely((*mbuf_total_left == 0) || (*mbuf_total_left < length))) {\n-\t\trte_bbdev_log(ERR,\n-\t\t\t\t\"Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u\",\n-\t\t\t\t*mbuf_total_left, length);\n-\t\treturn -1;\n-\t}\n-\n-\tnext_triplet = acc100_dma_fill_blk_type_in(desc, input, in_offset,\n-\t\t\tlength, seg_total_left, next_triplet);\n-\tif (unlikely(next_triplet < 0)) {\n-\t\trte_bbdev_log(ERR,\n-\t\t\t\t\"Mismatch between data to process and mbuf data length in bbdev_op: %p\",\n-\t\t\t\top);\n-\t\treturn -1;\n-\t}\n-\tdesc->data_ptrs[next_triplet - 1].last = 1;\n-\tdesc->m2dlen = next_triplet;\n-\t*mbuf_total_left -= length;\n-\n-\t/* Set output length */\n-\tif (check_bit(op->turbo_enc.op_flags, RTE_BBDEV_TURBO_RATE_MATCH))\n-\t\t/* Integer round up division by 8 */\n-\t\t*out_length = (e + 7) >> 3;\n-\telse\n-\t\t*out_length = (k >> 3) * 3 + 2;\n-\n-\tnext_triplet = acc100_dma_fill_blk_type_out(desc, output, *out_offset,\n-\t\t\t*out_length, next_triplet, ACC100_DMA_BLKID_OUT_ENC);\n-\tif (unlikely(next_triplet < 0)) {\n-\t\trte_bbdev_log(ERR,\n-\t\t\t\t\"Mismatch between data to process and mbuf data length in bbdev_op: %p\",\n-\t\t\t\top);\n-\t\treturn -1;\n-\t}\n-\top->turbo_enc.output.length += *out_length;\n-\t*out_offset += *out_length;\n-\tdesc->data_ptrs[next_triplet - 1].last = 1;\n-\tdesc->d2mlen = next_triplet - desc->m2dlen;\n-\n-\tdesc->op_addr = op;\n-\n-\treturn 0;\n-}\n-\n static inline int\n acc100_dma_desc_le_fill(struct rte_bbdev_enc_op *op,\n-\t\tstruct acc100_dma_req_desc *desc, struct rte_mbuf **input,\n+\t\tstruct acc_dma_req_desc *desc, struct rte_mbuf **input,\n \t\tstruct rte_mbuf *output, uint32_t *in_offset,\n \t\tuint32_t *out_offset, uint32_t *out_length,\n \t\tuint32_t *mbuf_total_left, uint32_t *seg_total_left)\n@@ -1781,7 +1365,7 @@ acc100_dma_desc_le_fill(struct rte_bbdev_enc_op *op,\n \tuint16_t K, in_length_in_bits, in_length_in_bytes;\n \tstruct rte_bbdev_op_ldpc_enc *enc = &op->ldpc_enc;\n \n-\tacc100_header_init(desc);\n+\tacc_header_init(desc);\n \n \tK = (enc->basegraph == 1 ? 22 : 10) * enc->z_c;\n \tin_length_in_bits = K - enc->n_filler;\n@@ -1815,8 +1399,8 @@ acc100_dma_desc_le_fill(struct rte_bbdev_enc_op *op,\n \t/* Integer round up division by 8 */\n \t*out_length = (enc->cb_params.e + 7) >> 3;\n \n-\tnext_triplet = acc100_dma_fill_blk_type_out(desc, output, *out_offset,\n-\t\t\t*out_length, next_triplet, ACC100_DMA_BLKID_OUT_ENC);\n+\tnext_triplet = acc_dma_fill_blk_type(desc, output, *out_offset,\n+\t\t\t*out_length, next_triplet, ACC_DMA_BLKID_OUT_ENC);\n \top->ldpc_enc.output.length += *out_length;\n \t*out_offset += *out_length;\n \tdesc->data_ptrs[next_triplet - 1].last = 1;\n@@ -1830,7 +1414,7 @@ acc100_dma_desc_le_fill(struct rte_bbdev_enc_op *op,\n \n static inline int\n acc100_dma_desc_td_fill(struct rte_bbdev_dec_op *op,\n-\t\tstruct acc100_dma_req_desc *desc, struct rte_mbuf **input,\n+\t\tstruct acc_dma_req_desc *desc, struct rte_mbuf **input,\n \t\tstruct rte_mbuf *h_output, struct rte_mbuf *s_output,\n \t\tuint32_t *in_offset, uint32_t *h_out_offset,\n \t\tuint32_t *s_out_offset, uint32_t *h_out_length,\n@@ -1842,7 +1426,7 @@ acc100_dma_desc_td_fill(struct rte_bbdev_dec_op *op,\n \tuint16_t crc24_overlap = 0;\n \tuint32_t e, kw;\n \n-\tdesc->word0 = ACC100_DMA_DESC_TYPE;\n+\tdesc->word0 = ACC_DMA_DESC_TYPE;\n \tdesc->word1 = 0; /**< Timestamp could be disabled */\n \tdesc->word2 = 0;\n \tdesc->word3 = 0;\n@@ -1899,10 +1483,10 @@ acc100_dma_desc_td_fill(struct rte_bbdev_dec_op *op,\n \tdesc->m2dlen = next_triplet;\n \t*mbuf_total_left -= kw;\n \n-\tnext_triplet = acc100_dma_fill_blk_type_out(\n+\tnext_triplet = acc_dma_fill_blk_type(\n \t\t\tdesc, h_output, *h_out_offset,\n \t\t\t(k - crc24_overlap) >> 3, next_triplet,\n-\t\t\tACC100_DMA_BLKID_OUT_HARD);\n+\t\t\tACC_DMA_BLKID_OUT_HARD);\n \tif (unlikely(next_triplet < 0)) {\n \t\trte_bbdev_log(ERR,\n \t\t\t\t\"Mismatch between data to process and mbuf data length in bbdev_op: %p\",\n@@ -1926,9 +1510,9 @@ acc100_dma_desc_td_fill(struct rte_bbdev_dec_op *op,\n \t\telse\n \t\t\t*s_out_length = (k * 3) + 12;\n \n-\t\tnext_triplet = acc100_dma_fill_blk_type_out(desc, s_output,\n+\t\tnext_triplet = acc_dma_fill_blk_type(desc, s_output,\n \t\t\t\t*s_out_offset, *s_out_length, next_triplet,\n-\t\t\t\tACC100_DMA_BLKID_OUT_SOFT);\n+\t\t\t\tACC_DMA_BLKID_OUT_SOFT);\n \t\tif (unlikely(next_triplet < 0)) {\n \t\t\trte_bbdev_log(ERR,\n \t\t\t\t\t\"Mismatch between data to process and mbuf data length in bbdev_op: %p\",\n@@ -1950,12 +1534,12 @@ acc100_dma_desc_td_fill(struct rte_bbdev_dec_op *op,\n \n static inline int\n acc100_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,\n-\t\tstruct acc100_dma_req_desc *desc,\n+\t\tstruct acc_dma_req_desc *desc,\n \t\tstruct rte_mbuf **input, struct rte_mbuf *h_output,\n \t\tuint32_t *in_offset, uint32_t *h_out_offset,\n \t\tuint32_t *h_out_length, uint32_t *mbuf_total_left,\n \t\tuint32_t *seg_total_left,\n-\t\tstruct acc100_fcw_ld *fcw)\n+\t\tstruct acc_fcw_ld *fcw)\n {\n \tstruct rte_bbdev_op_ldpc_dec *dec = &op->ldpc_dec;\n \tint next_triplet = 1; /* FCW already done */\n@@ -1965,7 +1549,7 @@ acc100_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,\n \tbool h_comp = check_bit(dec->op_flags,\n \t\t\tRTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION);\n \n-\tacc100_header_init(desc);\n+\tacc_header_init(desc);\n \n \tif (check_bit(op->ldpc_dec.op_flags,\n \t\t\tRTE_BBDEV_LDPC_CRC_TYPE_24B_DROP))\n@@ -2007,16 +1591,16 @@ acc100_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,\n \t\tdesc->data_ptrs[next_triplet].address =\n \t\t\t\tdec->harq_combined_input.offset;\n \t\tdesc->data_ptrs[next_triplet].blen = h_p_size;\n-\t\tdesc->data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_IN_HARQ;\n+\t\tdesc->data_ptrs[next_triplet].blkid = ACC_DMA_BLKID_IN_HARQ;\n \t\tdesc->data_ptrs[next_triplet].dma_ext = 1;\n #ifndef ACC100_EXT_MEM\n-\t\tacc100_dma_fill_blk_type_out(\n+\t\tacc_dma_fill_blk_type(\n \t\t\t\tdesc,\n \t\t\t\top->ldpc_dec.harq_combined_input.data,\n \t\t\t\top->ldpc_dec.harq_combined_input.offset,\n \t\t\t\th_p_size,\n \t\t\t\tnext_triplet,\n-\t\t\t\tACC100_DMA_BLKID_IN_HARQ);\n+\t\t\t\tACC_DMA_BLKID_IN_HARQ);\n #endif\n \t\tnext_triplet++;\n \t}\n@@ -2025,9 +1609,9 @@ acc100_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,\n \tdesc->m2dlen = next_triplet;\n \t*mbuf_total_left -= input_length;\n \n-\tnext_triplet = acc100_dma_fill_blk_type_out(desc, h_output,\n+\tnext_triplet = acc_dma_fill_blk_type(desc, h_output,\n \t\t\t*h_out_offset, output_length >> 3, next_triplet,\n-\t\t\tACC100_DMA_BLKID_OUT_HARD);\n+\t\t\tACC_DMA_BLKID_OUT_HARD);\n \n \tif (check_bit(op->ldpc_dec.op_flags,\n \t\t\t\tRTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {\n@@ -2045,16 +1629,16 @@ acc100_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,\n \t\tdesc->data_ptrs[next_triplet].address =\n \t\t\t\tdec->harq_combined_output.offset;\n \t\tdesc->data_ptrs[next_triplet].blen = h_p_size;\n-\t\tdesc->data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_OUT_HARQ;\n+\t\tdesc->data_ptrs[next_triplet].blkid = ACC_DMA_BLKID_OUT_HARQ;\n \t\tdesc->data_ptrs[next_triplet].dma_ext = 1;\n #ifndef ACC100_EXT_MEM\n-\t\tacc100_dma_fill_blk_type_out(\n+\t\tacc_dma_fill_blk_type(\n \t\t\t\tdesc,\n \t\t\t\tdec->harq_combined_output.data,\n \t\t\t\tdec->harq_combined_output.offset,\n \t\t\t\th_p_size,\n \t\t\t\tnext_triplet,\n-\t\t\t\tACC100_DMA_BLKID_OUT_HARQ);\n+\t\t\t\tACC_DMA_BLKID_OUT_HARQ);\n #endif\n \t\tnext_triplet++;\n \t}\n@@ -2072,11 +1656,11 @@ acc100_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,\n \n static inline void\n acc100_dma_desc_ld_update(struct rte_bbdev_dec_op *op,\n-\t\tstruct acc100_dma_req_desc *desc,\n+\t\tstruct acc_dma_req_desc *desc,\n \t\tstruct rte_mbuf *input, struct rte_mbuf *h_output,\n \t\tuint32_t *in_offset, uint32_t *h_out_offset,\n \t\tuint32_t *h_out_length,\n-\t\tunion acc100_harq_layout_data *harq_layout)\n+\t\tunion acc_harq_layout_data *harq_layout)\n {\n \tint next_triplet = 1; /* FCW already done */\n \tdesc->data_ptrs[next_triplet].address =\n@@ -2108,10 +1692,10 @@ acc100_dma_desc_ld_update(struct rte_bbdev_dec_op *op,\n \t\top->ldpc_dec.harq_combined_output.length =\n \t\t\t\tprev_op->ldpc_dec.harq_combined_output.length;\n \t\tint16_t hq_idx = op->ldpc_dec.harq_combined_output.offset /\n-\t\t\t\tACC100_HARQ_OFFSET;\n+\t\t\t\tACC_HARQ_OFFSET;\n \t\tint16_t prev_hq_idx =\n \t\t\t\tprev_op->ldpc_dec.harq_combined_output.offset\n-\t\t\t\t/ ACC100_HARQ_OFFSET;\n+\t\t\t\t/ ACC_HARQ_OFFSET;\n \t\tharq_layout[hq_idx].val = harq_layout[prev_hq_idx].val;\n #ifndef ACC100_EXT_MEM\n \t\tstruct rte_bbdev_op_data ho =\n@@ -2126,84 +1710,10 @@ acc100_dma_desc_ld_update(struct rte_bbdev_dec_op *op,\n \tdesc->op_addr = op;\n }\n \n-\n-/* Enqueue a number of operations to HW and update software rings */\n-static inline void\n-acc100_dma_enqueue(struct acc100_queue *q, uint16_t n,\n-\t\tstruct rte_bbdev_stats *queue_stats)\n-{\n-\tunion acc100_enqueue_reg_fmt enq_req;\n-#ifdef RTE_BBDEV_OFFLOAD_COST\n-\tuint64_t start_time = 0;\n-\tqueue_stats->acc_offload_cycles = 0;\n-#else\n-\tRTE_SET_USED(queue_stats);\n-#endif\n-\n-\tenq_req.val = 0;\n-\t/* Setting offset, 100b for 256 DMA Desc */\n-\tenq_req.addr_offset = ACC100_DESC_OFFSET;\n-\n-\t/* Split ops into batches */\n-\tdo {\n-\t\tunion acc100_dma_desc *desc;\n-\t\tuint16_t enq_batch_size;\n-\t\tuint64_t offset;\n-\t\trte_iova_t req_elem_addr;\n-\n-\t\tenq_batch_size = RTE_MIN(n, MAX_ENQ_BATCH_SIZE);\n-\n-\t\t/* Set flag on last descriptor in a batch */\n-\t\tdesc = q->ring_addr + ((q->sw_ring_head + enq_batch_size - 1) &\n-\t\t\t\tq->sw_ring_wrap_mask);\n-\t\tdesc->req.last_desc_in_batch = 1;\n-\n-\t\t/* Calculate the 1st descriptor's address */\n-\t\toffset = ((q->sw_ring_head & q->sw_ring_wrap_mask) *\n-\t\t\t\tsizeof(union acc100_dma_desc));\n-\t\treq_elem_addr = q->ring_addr_iova + offset;\n-\n-\t\t/* Fill enqueue struct */\n-\t\tenq_req.num_elem = enq_batch_size;\n-\t\t/* low 6 bits are not needed */\n-\t\tenq_req.req_elem_addr = (uint32_t)(req_elem_addr >> 6);\n-\n-#ifdef RTE_LIBRTE_BBDEV_DEBUG\n-\t\trte_memdump(stderr, \"Req sdone\", desc, sizeof(*desc));\n-#endif\n-\t\trte_bbdev_log_debug(\n-\t\t\t\t\"Enqueue %u reqs (phys %#\"PRIx64\") to reg %p\",\n-\t\t\t\tenq_batch_size,\n-\t\t\t\treq_elem_addr,\n-\t\t\t\t(void *)q->mmio_reg_enqueue);\n-\n-\t\trte_wmb();\n-\n-#ifdef RTE_BBDEV_OFFLOAD_COST\n-\t\t/* Start time measurement for enqueue function offload. */\n-\t\tstart_time = rte_rdtsc_precise();\n-#endif\n-\t\trte_bbdev_log(DEBUG, \"Debug : MMIO Enqueue\");\n-\t\tmmio_write(q->mmio_reg_enqueue, enq_req.val);\n-\n-#ifdef RTE_BBDEV_OFFLOAD_COST\n-\t\tqueue_stats->acc_offload_cycles +=\n-\t\t\t\trte_rdtsc_precise() - start_time;\n-#endif\n-\n-\t\tq->aq_enqueued++;\n-\t\tq->sw_ring_head += enq_batch_size;\n-\t\tn -= enq_batch_size;\n-\n-\t} while (n);\n-\n-\n-}\n-\n #ifdef RTE_LIBRTE_BBDEV_DEBUG\n /* Validates turbo encoder parameters */\n static inline int\n-validate_enc_op(struct rte_bbdev_enc_op *op, struct acc100_queue *q)\n+validate_enc_op(struct rte_bbdev_enc_op *op, struct acc_queue *q)\n {\n \tstruct rte_bbdev_op_turbo_enc *turbo_enc = &op->turbo_enc;\n \tstruct rte_bbdev_op_enc_turbo_cb_params *cb = NULL;\n@@ -2344,7 +1854,7 @@ validate_enc_op(struct rte_bbdev_enc_op *op, struct acc100_queue *q)\n }\n /* Validates LDPC encoder parameters */\n static inline int\n-validate_ldpc_enc_op(struct rte_bbdev_enc_op *op, struct acc100_queue *q)\n+validate_ldpc_enc_op(struct rte_bbdev_enc_op *op, struct acc_queue *q)\n {\n \tstruct rte_bbdev_op_ldpc_enc *ldpc_enc = &op->ldpc_enc;\n \n@@ -2400,7 +1910,7 @@ validate_ldpc_enc_op(struct rte_bbdev_enc_op *op, struct acc100_queue *q)\n \n /* Validates LDPC decoder parameters */\n static inline int\n-validate_ldpc_dec_op(struct rte_bbdev_dec_op *op, struct acc100_queue *q)\n+validate_ldpc_dec_op(struct rte_bbdev_dec_op *op, struct acc_queue *q)\n {\n \tstruct rte_bbdev_op_ldpc_dec *ldpc_dec = &op->ldpc_dec;\n \n@@ -2448,10 +1958,10 @@ validate_ldpc_dec_op(struct rte_bbdev_dec_op *op, struct acc100_queue *q)\n \n /* Enqueue one encode operations for ACC100 device in CB mode */\n static inline int\n-enqueue_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op *op,\n+enqueue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op *op,\n \t\tuint16_t total_enqueued_cbs)\n {\n-\tunion acc100_dma_desc *desc = NULL;\n+\tunion acc_dma_desc *desc = NULL;\n \tint ret;\n \tuint32_t in_offset, out_offset, out_length, mbuf_total_left,\n \t\tseg_total_left;\n@@ -2468,7 +1978,7 @@ enqueue_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op *op,\n \tuint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)\n \t\t\t& q->sw_ring_wrap_mask);\n \tdesc = q->ring_addr + desc_idx;\n-\tacc100_fcw_te_fill(op, &desc->req.fcw_te);\n+\tacc_fcw_te_fill(op, &desc->req.fcw_te);\n \n \tinput = op->turbo_enc.input.data;\n \toutput_head = output = op->turbo_enc.output.data;\n@@ -2479,7 +1989,7 @@ enqueue_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op *op,\n \tseg_total_left = rte_pktmbuf_data_len(op->turbo_enc.input.data)\n \t\t\t- in_offset;\n \n-\tret = acc100_dma_desc_te_fill(op, &desc->req, &input, output,\n+\tret = acc_dma_desc_te_fill(op, &desc->req, &input, output,\n \t\t\t&in_offset, &out_offset, &out_length, &mbuf_total_left,\n \t\t\t&seg_total_left, 0);\n \n@@ -2501,10 +2011,10 @@ enqueue_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op *op,\n \n /* Enqueue one encode operations for ACC100 device in CB mode */\n static inline int\n-enqueue_ldpc_enc_n_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op **ops,\n+enqueue_ldpc_enc_n_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ops,\n \t\tuint16_t total_enqueued_cbs, int16_t num)\n {\n-\tunion acc100_dma_desc *desc = NULL;\n+\tunion acc_dma_desc *desc = NULL;\n \tuint32_t out_length;\n \tstruct rte_mbuf *output_head, *output;\n \tint i, next_triplet;\n@@ -2522,10 +2032,10 @@ enqueue_ldpc_enc_n_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op **ops,\n \tuint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)\n \t\t\t& q->sw_ring_wrap_mask);\n \tdesc = q->ring_addr + desc_idx;\n-\tacc100_fcw_le_fill(ops[0], &desc->req.fcw_le, num);\n+\tacc_fcw_le_fill(ops[0], &desc->req.fcw_le, num, 0);\n \n \t/** This could be done at polling */\n-\tacc100_header_init(&desc->req);\n+\tacc_header_init(&desc->req);\n \tdesc->req.numCBs = num;\n \n \tin_length_in_bytes = ops[0]->ldpc_enc.input.data->data_len;\n@@ -2564,10 +2074,10 @@ enqueue_ldpc_enc_n_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op **ops,\n \n /* Enqueue one encode operations for ACC100 device in CB mode */\n static inline int\n-enqueue_ldpc_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op *op,\n+enqueue_ldpc_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op *op,\n \t\tuint16_t total_enqueued_cbs)\n {\n-\tunion acc100_dma_desc *desc = NULL;\n+\tunion acc_dma_desc *desc = NULL;\n \tint ret;\n \tuint32_t in_offset, out_offset, out_length, mbuf_total_left,\n \t\tseg_total_left;\n@@ -2584,7 +2094,7 @@ enqueue_ldpc_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op *op,\n \tuint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)\n \t\t\t& q->sw_ring_wrap_mask);\n \tdesc = q->ring_addr + desc_idx;\n-\tacc100_fcw_le_fill(op, &desc->req.fcw_le, 1);\n+\tacc_fcw_le_fill(op, &desc->req.fcw_le, 1, 0);\n \n \tinput = op->ldpc_enc.input.data;\n \toutput_head = output = op->ldpc_enc.output.data;\n@@ -2619,10 +2129,10 @@ enqueue_ldpc_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op *op,\n \n /* Enqueue one encode operations for ACC100 device in TB mode. */\n static inline int\n-enqueue_enc_one_op_tb(struct acc100_queue *q, struct rte_bbdev_enc_op *op,\n+enqueue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op,\n \t\tuint16_t total_enqueued_cbs, uint8_t cbs_in_tb)\n {\n-\tunion acc100_dma_desc *desc = NULL;\n+\tunion acc_dma_desc *desc = NULL;\n \tint ret;\n \tuint8_t r, c;\n \tuint32_t in_offset, out_offset, out_length, mbuf_total_left,\n@@ -2641,8 +2151,8 @@ enqueue_enc_one_op_tb(struct acc100_queue *q, struct rte_bbdev_enc_op *op,\n \tuint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)\n \t\t\t& q->sw_ring_wrap_mask);\n \tdesc = q->ring_addr + desc_idx;\n-\tuint64_t fcw_offset = (desc_idx << 8) + ACC100_DESC_FCW_OFFSET;\n-\tacc100_fcw_te_fill(op, &desc->req.fcw_te);\n+\tuint64_t fcw_offset = (desc_idx << 8) + ACC_DESC_FCW_OFFSET;\n+\tacc_fcw_te_fill(op, &desc->req.fcw_te);\n \n \tinput = op->turbo_enc.input.data;\n \toutput_head = output = op->turbo_enc.output.data;\n@@ -2660,9 +2170,9 @@ enqueue_enc_one_op_tb(struct acc100_queue *q, struct rte_bbdev_enc_op *op,\n \t\tdesc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs)\n \t\t\t\t& q->sw_ring_wrap_mask);\n \t\tdesc->req.data_ptrs[0].address = q->ring_addr_iova + fcw_offset;\n-\t\tdesc->req.data_ptrs[0].blen = ACC100_FCW_TE_BLEN;\n+\t\tdesc->req.data_ptrs[0].blen = ACC_FCW_TE_BLEN;\n \n-\t\tret = acc100_dma_desc_te_fill(op, &desc->req, &input, output,\n+\t\tret = acc_dma_desc_te_fill(op, &desc->req, &input, output,\n \t\t\t\t&in_offset, &out_offset, &out_length,\n \t\t\t\t&mbuf_total_left, &seg_total_left, r);\n \t\tif (unlikely(ret < 0))\n@@ -2705,7 +2215,7 @@ enqueue_enc_one_op_tb(struct acc100_queue *q, struct rte_bbdev_enc_op *op,\n #ifdef RTE_LIBRTE_BBDEV_DEBUG\n /* Validates turbo decoder parameters */\n static inline int\n-validate_dec_op(struct rte_bbdev_dec_op *op, struct acc100_queue *q)\n+validate_dec_op(struct rte_bbdev_dec_op *op, struct acc_queue *q)\n {\n \tstruct rte_bbdev_op_turbo_dec *turbo_dec = &op->turbo_dec;\n \tstruct rte_bbdev_op_dec_turbo_cb_params *cb = NULL;\n@@ -2843,10 +2353,10 @@ validate_dec_op(struct rte_bbdev_dec_op *op, struct acc100_queue *q)\n \n /** Enqueue one decode operations for ACC100 device in CB mode */\n static inline int\n-enqueue_dec_one_op_cb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,\n+enqueue_dec_one_op_cb(struct acc_queue *q, struct rte_bbdev_dec_op *op,\n \t\tuint16_t total_enqueued_cbs)\n {\n-\tunion acc100_dma_desc *desc = NULL;\n+\tunion acc_dma_desc *desc = NULL;\n \tint ret;\n \tuint32_t in_offset, h_out_offset, s_out_offset, s_out_length,\n \t\th_out_length, mbuf_total_left, seg_total_left;\n@@ -2915,10 +2425,10 @@ enqueue_dec_one_op_cb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,\n }\n \n static inline int\n-harq_loopback(struct acc100_queue *q, struct rte_bbdev_dec_op *op,\n+harq_loopback(struct acc_queue *q, struct rte_bbdev_dec_op *op,\n \t\tuint16_t total_enqueued_cbs) {\n-\tstruct acc100_fcw_ld *fcw;\n-\tunion acc100_dma_desc *desc;\n+\tstruct acc_fcw_ld *fcw;\n+\tunion acc_dma_desc *desc;\n \tint next_triplet = 1;\n \tstruct rte_mbuf *hq_output_head, *hq_output;\n \tuint16_t harq_dma_length_in, harq_dma_length_out;\n@@ -2943,24 +2453,24 @@ harq_loopback(struct acc100_queue *q, struct rte_bbdev_dec_op *op,\n \n \tbool ddr_mem_in = check_bit(op->ldpc_dec.op_flags,\n \t\t\tRTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE);\n-\tunion acc100_harq_layout_data *harq_layout = q->d->harq_layout;\n+\tunion acc_harq_layout_data *harq_layout = q->d->harq_layout;\n \tuint16_t harq_index = (ddr_mem_in ?\n \t\t\top->ldpc_dec.harq_combined_input.offset :\n \t\t\top->ldpc_dec.harq_combined_output.offset)\n-\t\t\t/ ACC100_HARQ_OFFSET;\n+\t\t\t/ ACC_HARQ_OFFSET;\n \n \tuint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)\n \t\t\t& q->sw_ring_wrap_mask);\n \tdesc = q->ring_addr + desc_idx;\n \tfcw = &desc->req.fcw_ld;\n \t/* Set the FCW from loopback into DDR */\n-\tmemset(fcw, 0, sizeof(struct acc100_fcw_ld));\n-\tfcw->FCWversion = ACC100_FCW_VER;\n+\tmemset(fcw, 0, sizeof(struct acc_fcw_ld));\n+\tfcw->FCWversion = ACC_FCW_VER;\n \tfcw->qm = 2;\n \tfcw->Zc = 384;\n-\tif (harq_in_length < 16 * ACC100_N_ZC_1)\n+\tif (harq_in_length < 16 * ACC_N_ZC_1)\n \t\tfcw->Zc = 16;\n-\tfcw->ncb = fcw->Zc * ACC100_N_ZC_1;\n+\tfcw->ncb = fcw->Zc * ACC_N_ZC_1;\n \tfcw->rm_e = 2;\n \tfcw->hcin_en = 1;\n \tfcw->hcout_en = 1;\n@@ -2990,32 +2500,32 @@ harq_loopback(struct acc100_queue *q, struct rte_bbdev_dec_op *op,\n \tfcw->gain_h = 1;\n \n \t/* Set the prefix of descriptor. This could be done at polling */\n-\tacc100_header_init(&desc->req);\n+\tacc_header_init(&desc->req);\n \n \t/* Null LLR input for Decoder */\n \tdesc->req.data_ptrs[next_triplet].address =\n \t\t\tq->lb_in_addr_iova;\n \tdesc->req.data_ptrs[next_triplet].blen = 2;\n-\tdesc->req.data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_IN;\n+\tdesc->req.data_ptrs[next_triplet].blkid = ACC_DMA_BLKID_IN;\n \tdesc->req.data_ptrs[next_triplet].last = 0;\n \tdesc->req.data_ptrs[next_triplet].dma_ext = 0;\n \tnext_triplet++;\n \n \t/* HARQ Combine input from either Memory interface */\n \tif (!ddr_mem_in) {\n-\t\tnext_triplet = acc100_dma_fill_blk_type_out(&desc->req,\n+\t\tnext_triplet = acc_dma_fill_blk_type(&desc->req,\n \t\t\t\top->ldpc_dec.harq_combined_input.data,\n \t\t\t\top->ldpc_dec.harq_combined_input.offset,\n \t\t\t\tharq_dma_length_in,\n \t\t\t\tnext_triplet,\n-\t\t\t\tACC100_DMA_BLKID_IN_HARQ);\n+\t\t\t\tACC_DMA_BLKID_IN_HARQ);\n \t} else {\n \t\tdesc->req.data_ptrs[next_triplet].address =\n \t\t\t\top->ldpc_dec.harq_combined_input.offset;\n \t\tdesc->req.data_ptrs[next_triplet].blen =\n \t\t\t\tharq_dma_length_in;\n \t\tdesc->req.data_ptrs[next_triplet].blkid =\n-\t\t\t\tACC100_DMA_BLKID_IN_HARQ;\n+\t\t\t\tACC_DMA_BLKID_IN_HARQ;\n \t\tdesc->req.data_ptrs[next_triplet].dma_ext = 1;\n \t\tnext_triplet++;\n \t}\n@@ -3025,8 +2535,8 @@ harq_loopback(struct acc100_queue *q, struct rte_bbdev_dec_op *op,\n \t/* Dropped decoder hard output */\n \tdesc->req.data_ptrs[next_triplet].address =\n \t\t\tq->lb_out_addr_iova;\n-\tdesc->req.data_ptrs[next_triplet].blen = ACC100_BYTES_IN_WORD;\n-\tdesc->req.data_ptrs[next_triplet].blkid = ACC100_DMA_BLKID_OUT_HARD;\n+\tdesc->req.data_ptrs[next_triplet].blen = ACC_BYTES_IN_WORD;\n+\tdesc->req.data_ptrs[next_triplet].blkid = ACC_DMA_BLKID_OUT_HARD;\n \tdesc->req.data_ptrs[next_triplet].last = 0;\n \tdesc->req.data_ptrs[next_triplet].dma_ext = 0;\n \tnext_triplet++;\n@@ -3040,19 +2550,19 @@ harq_loopback(struct acc100_queue *q, struct rte_bbdev_dec_op *op,\n \t\tdesc->req.data_ptrs[next_triplet].blen =\n \t\t\t\tharq_dma_length_out;\n \t\tdesc->req.data_ptrs[next_triplet].blkid =\n-\t\t\t\tACC100_DMA_BLKID_OUT_HARQ;\n+\t\t\t\tACC_DMA_BLKID_OUT_HARQ;\n \t\tdesc->req.data_ptrs[next_triplet].dma_ext = 1;\n \t\tnext_triplet++;\n \t} else {\n \t\thq_output_head = op->ldpc_dec.harq_combined_output.data;\n \t\thq_output = op->ldpc_dec.harq_combined_output.data;\n-\t\tnext_triplet = acc100_dma_fill_blk_type_out(\n+\t\tnext_triplet = acc_dma_fill_blk_type(\n \t\t\t\t&desc->req,\n \t\t\t\top->ldpc_dec.harq_combined_output.data,\n \t\t\t\top->ldpc_dec.harq_combined_output.offset,\n \t\t\t\tharq_dma_length_out,\n \t\t\t\tnext_triplet,\n-\t\t\t\tACC100_DMA_BLKID_OUT_HARQ);\n+\t\t\t\tACC_DMA_BLKID_OUT_HARQ);\n \t\t/* HARQ output */\n \t\tmbuf_append(hq_output_head, hq_output, harq_dma_length_out);\n \t\top->ldpc_dec.harq_combined_output.length =\n@@ -3068,7 +2578,7 @@ harq_loopback(struct acc100_queue *q, struct rte_bbdev_dec_op *op,\n \n /** Enqueue one decode operations for ACC100 device in CB mode */\n static inline int\n-enqueue_ldpc_dec_one_op_cb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,\n+enqueue_ldpc_dec_one_op_cb(struct acc_queue *q, struct rte_bbdev_dec_op *op,\n \t\tuint16_t total_enqueued_cbs, bool same_op)\n {\n \tint ret;\n@@ -3085,7 +2595,7 @@ enqueue_ldpc_dec_one_op_cb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,\n \t\treturn -EINVAL;\n \t}\n #endif\n-\tunion acc100_dma_desc *desc;\n+\tunion acc_dma_desc *desc;\n \tuint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)\n \t\t\t& q->sw_ring_wrap_mask);\n \tdesc = q->ring_addr + desc_idx;\n@@ -3102,36 +2612,36 @@ enqueue_ldpc_dec_one_op_cb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,\n \t\treturn -EFAULT;\n \t}\n #endif\n-\tunion acc100_harq_layout_data *harq_layout = q->d->harq_layout;\n+\tunion acc_harq_layout_data *harq_layout = q->d->harq_layout;\n \n \tif (same_op) {\n-\t\tunion acc100_dma_desc *prev_desc;\n+\t\tunion acc_dma_desc *prev_desc;\n \t\tdesc_idx = ((q->sw_ring_head + total_enqueued_cbs - 1)\n \t\t\t\t& q->sw_ring_wrap_mask);\n \t\tprev_desc = q->ring_addr + desc_idx;\n \t\tuint8_t *prev_ptr = (uint8_t *) prev_desc;\n \t\tuint8_t *new_ptr = (uint8_t *) desc;\n \t\t/* Copy first 4 words and BDESCs */\n-\t\trte_memcpy(new_ptr, prev_ptr, ACC100_5GUL_SIZE_0);\n-\t\trte_memcpy(new_ptr + ACC100_5GUL_OFFSET_0,\n-\t\t\t\tprev_ptr + ACC100_5GUL_OFFSET_0,\n-\t\t\t\tACC100_5GUL_SIZE_1);\n+\t\trte_memcpy(new_ptr, prev_ptr, ACC_5GUL_SIZE_0);\n+\t\trte_memcpy(new_ptr + ACC_5GUL_OFFSET_0,\n+\t\t\t\tprev_ptr + ACC_5GUL_OFFSET_0,\n+\t\t\t\tACC_5GUL_SIZE_1);\n \t\tdesc->req.op_addr = prev_desc->req.op_addr;\n \t\t/* Copy FCW */\n-\t\trte_memcpy(new_ptr + ACC100_DESC_FCW_OFFSET,\n-\t\t\t\tprev_ptr + ACC100_DESC_FCW_OFFSET,\n-\t\t\t\tACC100_FCW_LD_BLEN);\n+\t\trte_memcpy(new_ptr + ACC_DESC_FCW_OFFSET,\n+\t\t\t\tprev_ptr + ACC_DESC_FCW_OFFSET,\n+\t\t\t\tACC_FCW_LD_BLEN);\n \t\tacc100_dma_desc_ld_update(op, &desc->req, input, h_output,\n \t\t\t\t&in_offset, &h_out_offset,\n \t\t\t\t&h_out_length, harq_layout);\n \t} else {\n-\t\tstruct acc100_fcw_ld *fcw;\n+\t\tstruct acc_fcw_ld *fcw;\n \t\tuint32_t seg_total_left;\n \t\tfcw = &desc->req.fcw_ld;\n \t\tq->d->fcw_ld_fill(op, fcw, harq_layout);\n \n \t\t/* Special handling when overusing mbuf */\n-\t\tif (fcw->rm_e < ACC100_MAX_E_MBUF)\n+\t\tif (fcw->rm_e < ACC_MAX_E_MBUF)\n \t\t\tseg_total_left = rte_pktmbuf_data_len(input)\n \t\t\t\t\t- in_offset;\n \t\telse\n@@ -3171,10 +2681,10 @@ enqueue_ldpc_dec_one_op_cb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,\n \n /* Enqueue one decode operations for ACC100 device in TB mode */\n static inline int\n-enqueue_ldpc_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,\n+enqueue_ldpc_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op,\n \t\tuint16_t total_enqueued_cbs, uint8_t cbs_in_tb)\n {\n-\tunion acc100_dma_desc *desc = NULL;\n+\tunion acc_dma_desc *desc = NULL;\n \tint ret;\n \tuint8_t r, c;\n \tuint32_t in_offset, h_out_offset,\n@@ -3193,8 +2703,8 @@ enqueue_ldpc_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,\n \tuint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)\n \t\t\t& q->sw_ring_wrap_mask);\n \tdesc = q->ring_addr + desc_idx;\n-\tuint64_t fcw_offset = (desc_idx << 8) + ACC100_DESC_FCW_OFFSET;\n-\tunion acc100_harq_layout_data *harq_layout = q->d->harq_layout;\n+\tuint64_t fcw_offset = (desc_idx << 8) + ACC_DESC_FCW_OFFSET;\n+\tunion acc_harq_layout_data *harq_layout = q->d->harq_layout;\n \tq->d->fcw_ld_fill(op, &desc->req.fcw_ld, harq_layout);\n \n \tinput = op->ldpc_dec.input.data;\n@@ -3214,7 +2724,7 @@ enqueue_ldpc_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,\n \t\tdesc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs)\n \t\t\t\t& q->sw_ring_wrap_mask);\n \t\tdesc->req.data_ptrs[0].address = q->ring_addr_iova + fcw_offset;\n-\t\tdesc->req.data_ptrs[0].blen = ACC100_FCW_LD_BLEN;\n+\t\tdesc->req.data_ptrs[0].blen = ACC_FCW_LD_BLEN;\n \t\tret = acc100_dma_desc_ld_fill(op, &desc->req, &input,\n \t\t\t\th_output, &in_offset, &h_out_offset,\n \t\t\t\t&h_out_length,\n@@ -3260,10 +2770,10 @@ enqueue_ldpc_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,\n \n /* Enqueue one decode operations for ACC100 device in TB mode */\n static inline int\n-enqueue_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,\n+enqueue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op,\n \t\tuint16_t total_enqueued_cbs, uint8_t cbs_in_tb)\n {\n-\tunion acc100_dma_desc *desc = NULL;\n+\tunion acc_dma_desc *desc = NULL;\n \tint ret;\n \tuint8_t r, c;\n \tuint32_t in_offset, h_out_offset, s_out_offset, s_out_length,\n@@ -3283,7 +2793,7 @@ enqueue_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,\n \tuint16_t desc_idx = ((q->sw_ring_head + total_enqueued_cbs)\n \t\t\t& q->sw_ring_wrap_mask);\n \tdesc = q->ring_addr + desc_idx;\n-\tuint64_t fcw_offset = (desc_idx << 8) + ACC100_DESC_FCW_OFFSET;\n+\tuint64_t fcw_offset = (desc_idx << 8) + ACC_DESC_FCW_OFFSET;\n \tacc100_fcw_td_fill(op, &desc->req.fcw_td);\n \n \tinput = op->turbo_dec.input.data;\n@@ -3305,7 +2815,7 @@ enqueue_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,\n \t\tdesc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs)\n \t\t\t\t& q->sw_ring_wrap_mask);\n \t\tdesc->req.data_ptrs[0].address = q->ring_addr_iova + fcw_offset;\n-\t\tdesc->req.data_ptrs[0].blen = ACC100_FCW_TD_BLEN;\n+\t\tdesc->req.data_ptrs[0].blen = ACC_FCW_TD_BLEN;\n \t\tret = acc100_dma_desc_td_fill(op, &desc->req, &input,\n \t\t\t\th_output, s_output, &in_offset, &h_out_offset,\n \t\t\t\t&s_out_offset, &h_out_length, &s_out_length,\n@@ -3360,91 +2870,15 @@ enqueue_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op *op,\n \treturn current_enqueued_cbs;\n }\n \n-/* Calculates number of CBs in processed encoder TB based on 'r' and input\n- * length.\n- */\n-static inline uint8_t\n-get_num_cbs_in_tb_enc(struct rte_bbdev_op_turbo_enc *turbo_enc)\n-{\n-\tuint8_t c, c_neg, r, crc24_bits = 0;\n-\tuint16_t k, k_neg, k_pos;\n-\tuint8_t cbs_in_tb = 0;\n-\tint32_t length;\n-\n-\tlength = turbo_enc->input.length;\n-\tr = turbo_enc->tb_params.r;\n-\tc = turbo_enc->tb_params.c;\n-\tc_neg = turbo_enc->tb_params.c_neg;\n-\tk_neg = turbo_enc->tb_params.k_neg;\n-\tk_pos = turbo_enc->tb_params.k_pos;\n-\tcrc24_bits = 0;\n-\tif (check_bit(turbo_enc->op_flags, RTE_BBDEV_TURBO_CRC_24B_ATTACH))\n-\t\tcrc24_bits = 24;\n-\twhile (length > 0 && r < c) {\n-\t\tk = (r < c_neg) ? k_neg : k_pos;\n-\t\tlength -= (k - crc24_bits) >> 3;\n-\t\tr++;\n-\t\tcbs_in_tb++;\n-\t}\n-\n-\treturn cbs_in_tb;\n-}\n-\n-/* Calculates number of CBs in processed decoder TB based on 'r' and input\n- * length.\n- */\n-static inline uint16_t\n-get_num_cbs_in_tb_dec(struct rte_bbdev_op_turbo_dec *turbo_dec)\n-{\n-\tuint8_t c, c_neg, r = 0;\n-\tuint16_t kw, k, k_neg, k_pos, cbs_in_tb = 0;\n-\tint32_t length;\n-\n-\tlength = turbo_dec->input.length;\n-\tr = turbo_dec->tb_params.r;\n-\tc = turbo_dec->tb_params.c;\n-\tc_neg = turbo_dec->tb_params.c_neg;\n-\tk_neg = turbo_dec->tb_params.k_neg;\n-\tk_pos = turbo_dec->tb_params.k_pos;\n-\twhile (length > 0 && r < c) {\n-\t\tk = (r < c_neg) ? k_neg : k_pos;\n-\t\tkw = RTE_ALIGN_CEIL(k + 4, 32) * 3;\n-\t\tlength -= kw;\n-\t\tr++;\n-\t\tcbs_in_tb++;\n-\t}\n-\n-\treturn cbs_in_tb;\n-}\n-\n-/* Calculates number of CBs in processed decoder TB based on 'r' and input\n- * length.\n- */\n-static inline uint16_t\n-get_num_cbs_in_tb_ldpc_dec(struct rte_bbdev_op_ldpc_dec *ldpc_dec)\n-{\n-\tuint16_t r, cbs_in_tb = 0;\n-\tint32_t length = ldpc_dec->input.length;\n-\tr = ldpc_dec->tb_params.r;\n-\twhile (length > 0 && r < ldpc_dec->tb_params.c) {\n-\t\tlength -=  (r < ldpc_dec->tb_params.cab) ?\n-\t\t\t\tldpc_dec->tb_params.ea :\n-\t\t\t\tldpc_dec->tb_params.eb;\n-\t\tr++;\n-\t\tcbs_in_tb++;\n-\t}\n-\treturn cbs_in_tb;\n-}\n-\n /* Enqueue encode operations for ACC100 device in CB mode. */\n static uint16_t\n acc100_enqueue_enc_cb(struct rte_bbdev_queue_data *q_data,\n \t\tstruct rte_bbdev_enc_op **ops, uint16_t num)\n {\n-\tstruct acc100_queue *q = q_data->queue_private;\n+\tstruct acc_queue *q = q_data->queue_private;\n \tint32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;\n \tuint16_t i;\n-\tunion acc100_dma_desc *desc;\n+\tunion acc_dma_desc *desc;\n \tint ret;\n \n \tfor (i = 0; i < num; ++i) {\n@@ -3467,7 +2901,7 @@ acc100_enqueue_enc_cb(struct rte_bbdev_queue_data *q_data,\n \tdesc->req.sdone_enable = 1;\n \tdesc->req.irq_enable = q->irq_enable;\n \n-\tacc100_dma_enqueue(q, i, &q_data->queue_stats);\n+\tacc_dma_enqueue(q, i, &q_data->queue_stats);\n \n \t/* Update stats */\n \tq_data->queue_stats.enqueued_count += i;\n@@ -3475,32 +2909,15 @@ acc100_enqueue_enc_cb(struct rte_bbdev_queue_data *q_data,\n \treturn i;\n }\n \n-/* Check we can mux encode operations with common FCW */\n-static inline bool\n-check_mux(struct rte_bbdev_enc_op **ops, uint16_t num) {\n-\tuint16_t i;\n-\tif (num <= 1)\n-\t\treturn false;\n-\tfor (i = 1; i < num; ++i) {\n-\t\t/* Only mux compatible code blocks */\n-\t\tif (memcmp((uint8_t *)(&ops[i]->ldpc_enc) + ACC100_ENC_OFFSET,\n-\t\t\t\t(uint8_t *)(&ops[0]->ldpc_enc) +\n-\t\t\t\tACC100_ENC_OFFSET,\n-\t\t\t\tACC100_CMP_ENC_SIZE) != 0)\n-\t\t\treturn false;\n-\t}\n-\treturn true;\n-}\n-\n /** Enqueue encode operations for ACC100 device in CB mode. */\n static inline uint16_t\n acc100_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data *q_data,\n \t\tstruct rte_bbdev_enc_op **ops, uint16_t num)\n {\n-\tstruct acc100_queue *q = q_data->queue_private;\n+\tstruct acc_queue *q = q_data->queue_private;\n \tint32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;\n \tuint16_t i = 0;\n-\tunion acc100_dma_desc *desc;\n+\tunion acc_dma_desc *desc;\n \tint ret, desc_idx = 0;\n \tint16_t enq, left = num;\n \n@@ -3508,7 +2925,7 @@ acc100_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data *q_data,\n \t\tif (unlikely(avail < 1))\n \t\t\tbreak;\n \t\tavail--;\n-\t\tenq = RTE_MIN(left, ACC100_MUX_5GDL_DESC);\n+\t\tenq = RTE_MIN(left, ACC_MUX_5GDL_DESC);\n \t\tif (check_mux(&ops[i], enq)) {\n \t\t\tret = enqueue_ldpc_enc_n_op_cb(q, &ops[i],\n \t\t\t\t\tdesc_idx, enq);\n@@ -3534,7 +2951,7 @@ acc100_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data *q_data,\n \tdesc->req.sdone_enable = 1;\n \tdesc->req.irq_enable = q->irq_enable;\n \n-\tacc100_dma_enqueue(q, desc_idx, &q_data->queue_stats);\n+\tacc_dma_enqueue(q, desc_idx, &q_data->queue_stats);\n \n \t/* Update stats */\n \tq_data->queue_stats.enqueued_count += i;\n@@ -3548,7 +2965,7 @@ static uint16_t\n acc100_enqueue_enc_tb(struct rte_bbdev_queue_data *q_data,\n \t\tstruct rte_bbdev_enc_op **ops, uint16_t num)\n {\n-\tstruct acc100_queue *q = q_data->queue_private;\n+\tstruct acc_queue *q = q_data->queue_private;\n \tint32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;\n \tuint16_t i, enqueued_cbs = 0;\n \tuint8_t cbs_in_tb;\n@@ -3569,7 +2986,7 @@ acc100_enqueue_enc_tb(struct rte_bbdev_queue_data *q_data,\n \tif (unlikely(enqueued_cbs == 0))\n \t\treturn 0; /* Nothing to enqueue */\n \n-\tacc100_dma_enqueue(q, enqueued_cbs, &q_data->queue_stats);\n+\tacc_dma_enqueue(q, enqueued_cbs, &q_data->queue_stats);\n \n \t/* Update stats */\n \tq_data->queue_stats.enqueued_count += i;\n@@ -3610,10 +3027,10 @@ static uint16_t\n acc100_enqueue_dec_cb(struct rte_bbdev_queue_data *q_data,\n \t\tstruct rte_bbdev_dec_op **ops, uint16_t num)\n {\n-\tstruct acc100_queue *q = q_data->queue_private;\n+\tstruct acc_queue *q = q_data->queue_private;\n \tint32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;\n \tuint16_t i;\n-\tunion acc100_dma_desc *desc;\n+\tunion acc_dma_desc *desc;\n \tint ret;\n \n \tfor (i = 0; i < num; ++i) {\n@@ -3636,7 +3053,7 @@ acc100_enqueue_dec_cb(struct rte_bbdev_queue_data *q_data,\n \tdesc->req.sdone_enable = 1;\n \tdesc->req.irq_enable = q->irq_enable;\n \n-\tacc100_dma_enqueue(q, i, &q_data->queue_stats);\n+\tacc_dma_enqueue(q, i, &q_data->queue_stats);\n \n \t/* Update stats */\n \tq_data->queue_stats.enqueued_count += i;\n@@ -3645,25 +3062,12 @@ acc100_enqueue_dec_cb(struct rte_bbdev_queue_data *q_data,\n \treturn i;\n }\n \n-/* Check we can mux encode operations with common FCW */\n-static inline bool\n-cmp_ldpc_dec_op(struct rte_bbdev_dec_op **ops) {\n-\t/* Only mux compatible code blocks */\n-\tif (memcmp((uint8_t *)(&ops[0]->ldpc_dec) + ACC100_DEC_OFFSET,\n-\t\t\t(uint8_t *)(&ops[1]->ldpc_dec) +\n-\t\t\tACC100_DEC_OFFSET, ACC100_CMP_DEC_SIZE) != 0) {\n-\t\treturn false;\n-\t} else\n-\t\treturn true;\n-}\n-\n-\n /* Enqueue decode operations for ACC100 device in TB mode */\n static uint16_t\n acc100_enqueue_ldpc_dec_tb(struct rte_bbdev_queue_data *q_data,\n \t\tstruct rte_bbdev_dec_op **ops, uint16_t num)\n {\n-\tstruct acc100_queue *q = q_data->queue_private;\n+\tstruct acc_queue *q = q_data->queue_private;\n \tint32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;\n \tuint16_t i, enqueued_cbs = 0;\n \tuint8_t cbs_in_tb;\n@@ -3683,7 +3087,7 @@ acc100_enqueue_ldpc_dec_tb(struct rte_bbdev_queue_data *q_data,\n \t\tenqueued_cbs += ret;\n \t}\n \n-\tacc100_dma_enqueue(q, enqueued_cbs, &q_data->queue_stats);\n+\tacc_dma_enqueue(q, enqueued_cbs, &q_data->queue_stats);\n \n \t/* Update stats */\n \tq_data->queue_stats.enqueued_count += i;\n@@ -3696,10 +3100,10 @@ static uint16_t\n acc100_enqueue_ldpc_dec_cb(struct rte_bbdev_queue_data *q_data,\n \t\tstruct rte_bbdev_dec_op **ops, uint16_t num)\n {\n-\tstruct acc100_queue *q = q_data->queue_private;\n+\tstruct acc_queue *q = q_data->queue_private;\n \tint32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;\n \tuint16_t i;\n-\tunion acc100_dma_desc *desc;\n+\tunion acc_dma_desc *desc;\n \tint ret;\n \tbool same_op = false;\n \tfor (i = 0; i < num; ++i) {\n@@ -3732,7 +3136,7 @@ acc100_enqueue_ldpc_dec_cb(struct rte_bbdev_queue_data *q_data,\n \tdesc->req.sdone_enable = 1;\n \tdesc->req.irq_enable = q->irq_enable;\n \n-\tacc100_dma_enqueue(q, i, &q_data->queue_stats);\n+\tacc_dma_enqueue(q, i, &q_data->queue_stats);\n \n \t/* Update stats */\n \tq_data->queue_stats.enqueued_count += i;\n@@ -3746,7 +3150,7 @@ static uint16_t\n acc100_enqueue_dec_tb(struct rte_bbdev_queue_data *q_data,\n \t\tstruct rte_bbdev_dec_op **ops, uint16_t num)\n {\n-\tstruct acc100_queue *q = q_data->queue_private;\n+\tstruct acc_queue *q = q_data->queue_private;\n \tint32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head;\n \tuint16_t i, enqueued_cbs = 0;\n \tuint8_t cbs_in_tb;\n@@ -3765,7 +3169,7 @@ acc100_enqueue_dec_tb(struct rte_bbdev_queue_data *q_data,\n \t\tenqueued_cbs += ret;\n \t}\n \n-\tacc100_dma_enqueue(q, enqueued_cbs, &q_data->queue_stats);\n+\tacc_dma_enqueue(q, enqueued_cbs, &q_data->queue_stats);\n \n \t/* Update stats */\n \tq_data->queue_stats.enqueued_count += i;\n@@ -3792,7 +3196,7 @@ static uint16_t\n acc100_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,\n \t\tstruct rte_bbdev_dec_op **ops, uint16_t num)\n {\n-\tstruct acc100_queue *q = q_data->queue_private;\n+\tstruct acc_queue *q = q_data->queue_private;\n \tint32_t aq_avail = q->aq_depth +\n \t\t\t(q->aq_dequeued - q->aq_enqueued) / 128;\n \n@@ -3808,11 +3212,11 @@ acc100_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,\n \n /* Dequeue one encode operations from ACC100 device in CB mode */\n static inline int\n-dequeue_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op **ref_op,\n+dequeue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op,\n \t\tuint16_t total_dequeued_cbs, uint32_t *aq_dequeued)\n {\n-\tunion acc100_dma_desc *desc, atom_desc;\n-\tunion acc100_dma_rsp_desc rsp;\n+\tunion acc_dma_desc *desc, atom_desc;\n+\tunion acc_dma_rsp_desc rsp;\n \tstruct rte_bbdev_enc_op *op;\n \tint i;\n \n@@ -3822,7 +3226,7 @@ dequeue_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op **ref_op,\n \t\t\t__ATOMIC_RELAXED);\n \n \t/* Check fdone bit */\n-\tif (!(atom_desc.rsp.val & ACC100_FDONE))\n+\tif (!(atom_desc.rsp.val & ACC_FDONE))\n \t\treturn -1;\n \n \trsp.val = atom_desc.rsp.val;\n@@ -3843,7 +3247,7 @@ dequeue_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op **ref_op,\n \t\t(*aq_dequeued)++;\n \t\tdesc->req.last_desc_in_batch = 0;\n \t}\n-\tdesc->rsp.val = ACC100_DMA_DESC_TYPE;\n+\tdesc->rsp.val = ACC_DMA_DESC_TYPE;\n \tdesc->rsp.add_info_0 = 0; /*Reserved bits */\n \tdesc->rsp.add_info_1 = 0; /*Reserved bits */\n \n@@ -3858,11 +3262,11 @@ dequeue_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op **ref_op,\n \n /* Dequeue one encode operations from ACC100 device in TB mode */\n static inline int\n-dequeue_enc_one_op_tb(struct acc100_queue *q, struct rte_bbdev_enc_op **ref_op,\n+dequeue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op,\n \t\tuint16_t total_dequeued_cbs, uint32_t *aq_dequeued)\n {\n-\tunion acc100_dma_desc *desc, *last_desc, atom_desc;\n-\tunion acc100_dma_rsp_desc rsp;\n+\tunion acc_dma_desc *desc, *last_desc, atom_desc;\n+\tunion acc_dma_rsp_desc rsp;\n \tstruct rte_bbdev_enc_op *op;\n \tuint8_t i = 0;\n \tuint16_t current_dequeued_cbs = 0, cbs_in_tb;\n@@ -3873,7 +3277,7 @@ dequeue_enc_one_op_tb(struct acc100_queue *q, struct rte_bbdev_enc_op **ref_op,\n \t\t\t__ATOMIC_RELAXED);\n \n \t/* Check fdone bit */\n-\tif (!(atom_desc.rsp.val & ACC100_FDONE))\n+\tif (!(atom_desc.rsp.val & ACC_FDONE))\n \t\treturn -1;\n \n \t/* Get number of CBs in dequeued TB */\n@@ -3887,7 +3291,7 @@ dequeue_enc_one_op_tb(struct acc100_queue *q, struct rte_bbdev_enc_op **ref_op,\n \t */\n \tatom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,\n \t\t\t__ATOMIC_RELAXED);\n-\tif (!(atom_desc.rsp.val & ACC100_SDONE))\n+\tif (!(atom_desc.rsp.val & ACC_SDONE))\n \t\treturn -1;\n \n \t/* Dequeue */\n@@ -3915,7 +3319,7 @@ dequeue_enc_one_op_tb(struct acc100_queue *q, struct rte_bbdev_enc_op **ref_op,\n \t\t\t(*aq_dequeued)++;\n \t\t\tdesc->req.last_desc_in_batch = 0;\n \t\t}\n-\t\tdesc->rsp.val = ACC100_DMA_DESC_TYPE;\n+\t\tdesc->rsp.val = ACC_DMA_DESC_TYPE;\n \t\tdesc->rsp.add_info_0 = 0;\n \t\tdesc->rsp.add_info_1 = 0;\n \t\ttotal_dequeued_cbs++;\n@@ -3931,11 +3335,11 @@ dequeue_enc_one_op_tb(struct acc100_queue *q, struct rte_bbdev_enc_op **ref_op,\n /* Dequeue one decode operation from ACC100 device in CB mode */\n static inline int\n dequeue_dec_one_op_cb(struct rte_bbdev_queue_data *q_data,\n-\t\tstruct acc100_queue *q, struct rte_bbdev_dec_op **ref_op,\n+\t\tstruct acc_queue *q, struct rte_bbdev_dec_op **ref_op,\n \t\tuint16_t dequeued_cbs, uint32_t *aq_dequeued)\n {\n-\tunion acc100_dma_desc *desc, atom_desc;\n-\tunion acc100_dma_rsp_desc rsp;\n+\tunion acc_dma_desc *desc, atom_desc;\n+\tunion acc_dma_rsp_desc rsp;\n \tstruct rte_bbdev_dec_op *op;\n \n \tdesc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)\n@@ -3944,7 +3348,7 @@ dequeue_dec_one_op_cb(struct rte_bbdev_queue_data *q_data,\n \t\t\t__ATOMIC_RELAXED);\n \n \t/* Check fdone bit */\n-\tif (!(atom_desc.rsp.val & ACC100_FDONE))\n+\tif (!(atom_desc.rsp.val & ACC_FDONE))\n \t\treturn -1;\n \n \trsp.val = atom_desc.rsp.val;\n@@ -3973,7 +3377,7 @@ dequeue_dec_one_op_cb(struct rte_bbdev_queue_data *q_data,\n \t\t(*aq_dequeued)++;\n \t\tdesc->req.last_desc_in_batch = 0;\n \t}\n-\tdesc->rsp.val = ACC100_DMA_DESC_TYPE;\n+\tdesc->rsp.val = ACC_DMA_DESC_TYPE;\n \tdesc->rsp.add_info_0 = 0;\n \tdesc->rsp.add_info_1 = 0;\n \t*ref_op = op;\n@@ -3985,11 +3389,11 @@ dequeue_dec_one_op_cb(struct rte_bbdev_queue_data *q_data,\n /* Dequeue one decode operations from ACC100 device in CB mode */\n static inline int\n dequeue_ldpc_dec_one_op_cb(struct rte_bbdev_queue_data *q_data,\n-\t\tstruct acc100_queue *q, struct rte_bbdev_dec_op **ref_op,\n+\t\tstruct acc_queue *q, struct rte_bbdev_dec_op **ref_op,\n \t\tuint16_t dequeued_cbs, uint32_t *aq_dequeued)\n {\n-\tunion acc100_dma_desc *desc, atom_desc;\n-\tunion acc100_dma_rsp_desc rsp;\n+\tunion acc_dma_desc *desc, atom_desc;\n+\tunion acc_dma_rsp_desc rsp;\n \tstruct rte_bbdev_dec_op *op;\n \n \tdesc = q->ring_addr + ((q->sw_ring_tail + dequeued_cbs)\n@@ -3998,7 +3402,7 @@ dequeue_ldpc_dec_one_op_cb(struct rte_bbdev_queue_data *q_data,\n \t\t\t__ATOMIC_RELAXED);\n \n \t/* Check fdone bit */\n-\tif (!(atom_desc.rsp.val & ACC100_FDONE))\n+\tif (!(atom_desc.rsp.val & ACC_FDONE))\n \t\treturn -1;\n \n \trsp.val = atom_desc.rsp.val;\n@@ -4028,7 +3432,7 @@ dequeue_ldpc_dec_one_op_cb(struct rte_bbdev_queue_data *q_data,\n \t\tdesc->req.last_desc_in_batch = 0;\n \t}\n \n-\tdesc->rsp.val = ACC100_DMA_DESC_TYPE;\n+\tdesc->rsp.val = ACC_DMA_DESC_TYPE;\n \tdesc->rsp.add_info_0 = 0;\n \tdesc->rsp.add_info_1 = 0;\n \n@@ -4040,11 +3444,11 @@ dequeue_ldpc_dec_one_op_cb(struct rte_bbdev_queue_data *q_data,\n \n /* Dequeue one decode operations from ACC100 device in TB mode. */\n static inline int\n-dequeue_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op **ref_op,\n+dequeue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op **ref_op,\n \t\tuint16_t dequeued_cbs, uint32_t *aq_dequeued)\n {\n-\tunion acc100_dma_desc *desc, *last_desc, atom_desc;\n-\tunion acc100_dma_rsp_desc rsp;\n+\tunion acc_dma_desc *desc, *last_desc, atom_desc;\n+\tunion acc_dma_rsp_desc rsp;\n \tstruct rte_bbdev_dec_op *op;\n \tuint8_t cbs_in_tb = 1, cb_idx = 0;\n \n@@ -4054,7 +3458,7 @@ dequeue_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op **ref_op,\n \t\t\t__ATOMIC_RELAXED);\n \n \t/* Check fdone bit */\n-\tif (!(atom_desc.rsp.val & ACC100_FDONE))\n+\tif (!(atom_desc.rsp.val & ACC_FDONE))\n \t\treturn -1;\n \n \t/* Dequeue */\n@@ -4071,7 +3475,7 @@ dequeue_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op **ref_op,\n \t */\n \tatom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,\n \t\t\t__ATOMIC_RELAXED);\n-\tif (!(atom_desc.rsp.val & ACC100_SDONE))\n+\tif (!(atom_desc.rsp.val & ACC_SDONE))\n \t\treturn -1;\n \n \t/* Clearing status, it will be set based on response */\n@@ -4103,7 +3507,7 @@ dequeue_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op **ref_op,\n \t\t\t(*aq_dequeued)++;\n \t\t\tdesc->req.last_desc_in_batch = 0;\n \t\t}\n-\t\tdesc->rsp.val = ACC100_DMA_DESC_TYPE;\n+\t\tdesc->rsp.val = ACC_DMA_DESC_TYPE;\n \t\tdesc->rsp.add_info_0 = 0;\n \t\tdesc->rsp.add_info_1 = 0;\n \t\tdequeued_cbs++;\n@@ -4120,7 +3524,7 @@ static uint16_t\n acc100_dequeue_enc(struct rte_bbdev_queue_data *q_data,\n \t\tstruct rte_bbdev_enc_op **ops, uint16_t num)\n {\n-\tstruct acc100_queue *q = q_data->queue_private;\n+\tstruct acc_queue *q = q_data->queue_private;\n \tuint16_t dequeue_num;\n \tuint32_t avail = q->sw_ring_head - q->sw_ring_tail;\n \tuint32_t aq_dequeued = 0;\n@@ -4166,7 +3570,7 @@ static uint16_t\n acc100_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,\n \t\tstruct rte_bbdev_enc_op **ops, uint16_t num)\n {\n-\tstruct acc100_queue *q = q_data->queue_private;\n+\tstruct acc_queue *q = q_data->queue_private;\n \tuint32_t avail = q->sw_ring_head - q->sw_ring_tail;\n \tuint32_t aq_dequeued = 0;\n \tuint16_t dequeue_num, i, dequeued_cbs = 0, dequeued_descs = 0;\n@@ -4205,7 +3609,7 @@ static uint16_t\n acc100_dequeue_dec(struct rte_bbdev_queue_data *q_data,\n \t\tstruct rte_bbdev_dec_op **ops, uint16_t num)\n {\n-\tstruct acc100_queue *q = q_data->queue_private;\n+\tstruct acc_queue *q = q_data->queue_private;\n \tuint16_t dequeue_num;\n \tuint32_t avail = q->sw_ring_head - q->sw_ring_tail;\n \tuint32_t aq_dequeued = 0;\n@@ -4250,7 +3654,7 @@ static uint16_t\n acc100_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data,\n \t\tstruct rte_bbdev_dec_op **ops, uint16_t num)\n {\n-\tstruct acc100_queue *q = q_data->queue_private;\n+\tstruct acc_queue *q = q_data->queue_private;\n \tuint16_t dequeue_num;\n \tuint32_t avail = q->sw_ring_head - q->sw_ring_tail;\n \tuint32_t aq_dequeued = 0;\n@@ -4310,17 +3714,17 @@ acc100_bbdev_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)\n \t/* Device variant specific handling */\n \tif ((pci_dev->id.device_id == ACC100_PF_DEVICE_ID) ||\n \t\t\t(pci_dev->id.device_id == ACC100_VF_DEVICE_ID)) {\n-\t\t((struct acc100_device *) dev->data->dev_private)->device_variant = ACC100_VARIANT;\n-\t\t((struct acc100_device *) dev->data->dev_private)->fcw_ld_fill = acc100_fcw_ld_fill;\n+\t\t((struct acc_device *) dev->data->dev_private)->device_variant = ACC100_VARIANT;\n+\t\t((struct acc_device *) dev->data->dev_private)->fcw_ld_fill = acc100_fcw_ld_fill;\n \t} else {\n-\t\t((struct acc100_device *) dev->data->dev_private)->device_variant = ACC101_VARIANT;\n-\t\t((struct acc100_device *) dev->data->dev_private)->fcw_ld_fill = acc101_fcw_ld_fill;\n+\t\t((struct acc_device *) dev->data->dev_private)->device_variant = ACC101_VARIANT;\n+\t\t((struct acc_device *) dev->data->dev_private)->fcw_ld_fill = acc101_fcw_ld_fill;\n \t}\n \n-\t((struct acc100_device *) dev->data->dev_private)->pf_device =\n+\t((struct acc_device *) dev->data->dev_private)->pf_device =\n \t\t\t!strcmp(drv->driver.name, RTE_STR(ACC100PF_DRIVER_NAME));\n \n-\t((struct acc100_device *) dev->data->dev_private)->mmio_base =\n+\t((struct acc_device *) dev->data->dev_private)->mmio_base =\n \t\t\tpci_dev->mem_resource[0].addr;\n \n \trte_bbdev_log_debug(\"Init device %s [%s] @ vaddr %p paddr %#\"PRIx64\"\",\n@@ -4349,13 +3753,13 @@ static int acc100_pci_probe(struct rte_pci_driver *pci_drv,\n \n \t/* allocate device private memory */\n \tbbdev->data->dev_private = rte_zmalloc_socket(dev_name,\n-\t\t\tsizeof(struct acc100_device), RTE_CACHE_LINE_SIZE,\n+\t\t\tsizeof(struct acc_device), RTE_CACHE_LINE_SIZE,\n \t\t\tpci_dev->device.numa_node);\n \n \tif (bbdev->data->dev_private == NULL) {\n \t\trte_bbdev_log(CRIT,\n \t\t\t\t\"Allocate of %zu bytes for device \\\"%s\\\" failed\",\n-\t\t\t\tsizeof(struct acc100_device), dev_name);\n+\t\t\t\tsizeof(struct acc_device), dev_name);\n \t\t\t\trte_bbdev_release(bbdev);\n \t\t\treturn -ENOMEM;\n \t}\n@@ -4373,53 +3777,16 @@ static int acc100_pci_probe(struct rte_pci_driver *pci_drv,\n \treturn 0;\n }\n \n-static int acc100_pci_remove(struct rte_pci_device *pci_dev)\n-{\n-\tstruct rte_bbdev *bbdev;\n-\tint ret;\n-\tuint8_t dev_id;\n-\n-\tif (pci_dev == NULL)\n-\t\treturn -EINVAL;\n-\n-\t/* Find device */\n-\tbbdev = rte_bbdev_get_named_dev(pci_dev->device.name);\n-\tif (bbdev == NULL) {\n-\t\trte_bbdev_log(CRIT,\n-\t\t\t\t\"Couldn't find HW dev \\\"%s\\\" to uninitialise it\",\n-\t\t\t\tpci_dev->device.name);\n-\t\treturn -ENODEV;\n-\t}\n-\tdev_id = bbdev->data->dev_id;\n-\n-\t/* free device private memory before close */\n-\trte_free(bbdev->data->dev_private);\n-\n-\t/* Close device */\n-\tret = rte_bbdev_close(dev_id);\n-\tif (ret < 0)\n-\t\trte_bbdev_log(ERR,\n-\t\t\t\t\"Device %i failed to close during uninit: %i\",\n-\t\t\t\tdev_id, ret);\n-\n-\t/* release bbdev from library */\n-\trte_bbdev_release(bbdev);\n-\n-\trte_bbdev_log_debug(\"Destroyed bbdev = %u\", dev_id);\n-\n-\treturn 0;\n-}\n-\n static struct rte_pci_driver acc100_pci_pf_driver = {\n \t\t.probe = acc100_pci_probe,\n-\t\t.remove = acc100_pci_remove,\n+\t\t.remove = acc_pci_remove,\n \t\t.id_table = pci_id_acc100_pf_map,\n \t\t.drv_flags = RTE_PCI_DRV_NEED_MAPPING\n };\n \n static struct rte_pci_driver acc100_pci_vf_driver = {\n \t\t.probe = acc100_pci_probe,\n-\t\t.remove = acc100_pci_remove,\n+\t\t.remove = acc_pci_remove,\n \t\t.id_table = pci_id_acc100_vf_map,\n \t\t.drv_flags = RTE_PCI_DRV_NEED_MAPPING\n };\n@@ -4437,51 +3804,51 @@ RTE_PMD_REGISTER_PCI_TABLE(ACC100VF_DRIVER_NAME, pci_id_acc100_vf_map);\n  * defined.\n  */\n static void\n-poweron_cleanup(struct rte_bbdev *bbdev, struct acc100_device *d,\n-\t\tstruct rte_acc100_conf *conf)\n+poweron_cleanup(struct rte_bbdev *bbdev, struct acc_device *d,\n+\t\tstruct rte_acc_conf *conf)\n {\n \tint i, template_idx, qg_idx;\n \tuint32_t address, status, value;\n \tprintf(\"Need to clear power-on 5GUL status in internal memory\\n\");\n \t/* Reset LDPC Cores */\n \tfor (i = 0; i < ACC100_ENGINES_MAX; i++)\n-\t\tacc100_reg_write(d, HWPfFecUl5gCntrlReg +\n-\t\t\t\tACC100_ENGINE_OFFSET * i, ACC100_RESET_HI);\n-\tusleep(ACC100_LONG_WAIT);\n+\t\tacc_reg_write(d, HWPfFecUl5gCntrlReg +\n+\t\t\t\tACC_ENGINE_OFFSET * i, ACC100_RESET_HI);\n+\tusleep(ACC_LONG_WAIT);\n \tfor (i = 0; i < ACC100_ENGINES_MAX; i++)\n-\t\tacc100_reg_write(d, HWPfFecUl5gCntrlReg +\n-\t\t\t\tACC100_ENGINE_OFFSET * i, ACC100_RESET_LO);\n-\tusleep(ACC100_LONG_WAIT);\n+\t\tacc_reg_write(d, HWPfFecUl5gCntrlReg +\n+\t\t\t\tACC_ENGINE_OFFSET * i, ACC100_RESET_LO);\n+\tusleep(ACC_LONG_WAIT);\n \t/* Prepare dummy workload */\n \talloc_2x64mb_sw_rings_mem(bbdev, d, 0);\n \t/* Set base addresses */\n \tuint32_t phys_high = (uint32_t)(d->sw_rings_iova >> 32);\n \tuint32_t phys_low  = (uint32_t)(d->sw_rings_iova &\n-\t\t\t~(ACC100_SIZE_64MBYTE-1));\n-\tacc100_reg_write(d, HWPfDmaFec5GulDescBaseHiRegVf, phys_high);\n-\tacc100_reg_write(d, HWPfDmaFec5GulDescBaseLoRegVf, phys_low);\n+\t\t\t~(ACC_SIZE_64MBYTE-1));\n+\tacc_reg_write(d, HWPfDmaFec5GulDescBaseHiRegVf, phys_high);\n+\tacc_reg_write(d, HWPfDmaFec5GulDescBaseLoRegVf, phys_low);\n \n \t/* Descriptor for a dummy 5GUL code block processing*/\n-\tunion acc100_dma_desc *desc = NULL;\n+\tunion acc_dma_desc *desc = NULL;\n \tdesc = d->sw_rings;\n \tdesc->req.data_ptrs[0].address = d->sw_rings_iova +\n-\t\t\tACC100_DESC_FCW_OFFSET;\n-\tdesc->req.data_ptrs[0].blen = ACC100_FCW_LD_BLEN;\n-\tdesc->req.data_ptrs[0].blkid = ACC100_DMA_BLKID_FCW;\n+\t\t\tACC_DESC_FCW_OFFSET;\n+\tdesc->req.data_ptrs[0].blen = ACC_FCW_LD_BLEN;\n+\tdesc->req.data_ptrs[0].blkid = ACC_DMA_BLKID_FCW;\n \tdesc->req.data_ptrs[0].last = 0;\n \tdesc->req.data_ptrs[0].dma_ext = 0;\n \tdesc->req.data_ptrs[1].address = d->sw_rings_iova + 512;\n-\tdesc->req.data_ptrs[1].blkid = ACC100_DMA_BLKID_IN;\n+\tdesc->req.data_ptrs[1].blkid = ACC_DMA_BLKID_IN;\n \tdesc->req.data_ptrs[1].last = 1;\n \tdesc->req.data_ptrs[1].dma_ext = 0;\n \tdesc->req.data_ptrs[1].blen = 44;\n \tdesc->req.data_ptrs[2].address = d->sw_rings_iova + 1024;\n-\tdesc->req.data_ptrs[2].blkid = ACC100_DMA_BLKID_OUT_ENC;\n+\tdesc->req.data_ptrs[2].blkid = ACC_DMA_BLKID_OUT_ENC;\n \tdesc->req.data_ptrs[2].last = 1;\n \tdesc->req.data_ptrs[2].dma_ext = 0;\n \tdesc->req.data_ptrs[2].blen = 5;\n \t/* Dummy FCW */\n-\tdesc->req.fcw_ld.FCWversion = ACC100_FCW_VER;\n+\tdesc->req.fcw_ld.FCWversion = ACC_FCW_VER;\n \tdesc->req.fcw_ld.qm = 1;\n \tdesc->req.fcw_ld.nfiller = 30;\n \tdesc->req.fcw_ld.BG = 2 - 1;\n@@ -4500,8 +3867,8 @@ poweron_cleanup(struct rte_bbdev *bbdev, struct acc100_device *d,\n \t\t\ttemplate_idx++) {\n \t\t/* Check engine power-on status */\n \t\taddress = HwPfFecUl5gIbDebugReg +\n-\t\t\t\tACC100_ENGINE_OFFSET * template_idx;\n-\t\tstatus = (acc100_reg_read(d, address) >> 4) & 0xF;\n+\t\t\t\tACC_ENGINE_OFFSET * template_idx;\n+\t\tstatus = (acc_reg_read(d, address) >> 4) & 0xF;\n \t\tif (status == 0) {\n \t\t\tengines_to_restart[num_failed_engine] = template_idx;\n \t\t\tnum_failed_engine++;\n@@ -4521,14 +3888,14 @@ poweron_cleanup(struct rte_bbdev *bbdev, struct acc100_device *d,\n \t\t\t\ttemplate_idx <= ACC100_SIG_UL_5G_LAST;\n \t\t\t\ttemplate_idx++) {\n \t\t\taddress = HWPfQmgrGrpTmplateReg4Indx\n-\t\t\t\t\t+ ACC100_BYTES_IN_WORD * template_idx;\n+\t\t\t\t\t+ ACC_BYTES_IN_WORD * template_idx;\n \t\t\tif (template_idx == failed_engine)\n-\t\t\t\tacc100_reg_write(d, address, value);\n+\t\t\t\tacc_reg_write(d, address, value);\n \t\t\telse\n-\t\t\t\tacc100_reg_write(d, address, 0);\n+\t\t\t\tacc_reg_write(d, address, 0);\n \t\t}\n \t\t/* Reset descriptor header */\n-\t\tdesc->req.word0 = ACC100_DMA_DESC_TYPE;\n+\t\tdesc->req.word0 = ACC_DMA_DESC_TYPE;\n \t\tdesc->req.word1 = 0;\n \t\tdesc->req.word2 = 0;\n \t\tdesc->req.word3 = 0;\n@@ -4536,56 +3903,56 @@ poweron_cleanup(struct rte_bbdev *bbdev, struct acc100_device *d,\n \t\tdesc->req.m2dlen = 2;\n \t\tdesc->req.d2mlen = 1;\n \t\t/* Enqueue the code block for processing */\n-\t\tunion acc100_enqueue_reg_fmt enq_req;\n+\t\tunion acc_enqueue_reg_fmt enq_req;\n \t\tenq_req.val = 0;\n-\t\tenq_req.addr_offset = ACC100_DESC_OFFSET;\n+\t\tenq_req.addr_offset = ACC_DESC_OFFSET;\n \t\tenq_req.num_elem = 1;\n \t\tenq_req.req_elem_addr = 0;\n \t\trte_wmb();\n-\t\tacc100_reg_write(d, HWPfQmgrIngressAq + 0x100, enq_req.val);\n-\t\tusleep(ACC100_LONG_WAIT * 100);\n+\t\tacc_reg_write(d, HWPfQmgrIngressAq + 0x100, enq_req.val);\n+\t\tusleep(ACC_LONG_WAIT * 100);\n \t\tif (desc->req.word0 != 2)\n \t\t\tprintf(\"DMA Response %#\"PRIx32\"\\n\", desc->req.word0);\n \t}\n \n \t/* Reset LDPC Cores */\n \tfor (i = 0; i < ACC100_ENGINES_MAX; i++)\n-\t\tacc100_reg_write(d, HWPfFecUl5gCntrlReg +\n-\t\t\t\tACC100_ENGINE_OFFSET * i,\n+\t\tacc_reg_write(d, HWPfFecUl5gCntrlReg +\n+\t\t\t\tACC_ENGINE_OFFSET * i,\n \t\t\t\tACC100_RESET_HI);\n-\tusleep(ACC100_LONG_WAIT);\n+\tusleep(ACC_LONG_WAIT);\n \tfor (i = 0; i < ACC100_ENGINES_MAX; i++)\n-\t\tacc100_reg_write(d, HWPfFecUl5gCntrlReg +\n-\t\t\t\tACC100_ENGINE_OFFSET * i,\n+\t\tacc_reg_write(d, HWPfFecUl5gCntrlReg +\n+\t\t\t\tACC_ENGINE_OFFSET * i,\n \t\t\t\tACC100_RESET_LO);\n-\tusleep(ACC100_LONG_WAIT);\n-\tacc100_reg_write(d, HWPfHi5GHardResetReg, ACC100_RESET_HARD);\n-\tusleep(ACC100_LONG_WAIT);\n+\tusleep(ACC_LONG_WAIT);\n+\tacc_reg_write(d, HWPfHi5GHardResetReg, ACC100_RESET_HARD);\n+\tusleep(ACC_LONG_WAIT);\n \tint numEngines = 0;\n \t/* Check engine power-on status again */\n \tfor (template_idx = ACC100_SIG_UL_5G;\n \t\t\ttemplate_idx <= ACC100_SIG_UL_5G_LAST;\n \t\t\ttemplate_idx++) {\n \t\taddress = HwPfFecUl5gIbDebugReg +\n-\t\t\t\tACC100_ENGINE_OFFSET * template_idx;\n-\t\tstatus = (acc100_reg_read(d, address) >> 4) & 0xF;\n+\t\t\t\tACC_ENGINE_OFFSET * template_idx;\n+\t\tstatus = (acc_reg_read(d, address) >> 4) & 0xF;\n \t\taddress = HWPfQmgrGrpTmplateReg4Indx\n-\t\t\t\t+ ACC100_BYTES_IN_WORD * template_idx;\n+\t\t\t\t+ ACC_BYTES_IN_WORD * template_idx;\n \t\tif (status == 1) {\n-\t\t\tacc100_reg_write(d, address, value);\n+\t\t\tacc_reg_write(d, address, value);\n \t\t\tnumEngines++;\n \t\t} else\n-\t\t\tacc100_reg_write(d, address, 0);\n+\t\t\tacc_reg_write(d, address, 0);\n \t}\n \tprintf(\"Number of 5GUL engines %d\\n\", numEngines);\n \n \trte_free(d->sw_rings_base);\n-\tusleep(ACC100_LONG_WAIT);\n+\tusleep(ACC_LONG_WAIT);\n }\n \n /* Initial configuration of a ACC100 device prior to running configure() */\n static int\n-acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)\n+acc100_configure(const char *dev_name, struct rte_acc_conf *conf)\n {\n \trte_bbdev_log(INFO, \"rte_acc100_configure\");\n \tuint32_t value, address, status;\n@@ -4593,10 +3960,10 @@ acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \tstruct rte_bbdev *bbdev = rte_bbdev_get_named_dev(dev_name);\n \n \t/* Compile time checks */\n-\tRTE_BUILD_BUG_ON(sizeof(struct acc100_dma_req_desc) != 256);\n-\tRTE_BUILD_BUG_ON(sizeof(union acc100_dma_desc) != 256);\n-\tRTE_BUILD_BUG_ON(sizeof(struct acc100_fcw_td) != 24);\n-\tRTE_BUILD_BUG_ON(sizeof(struct acc100_fcw_te) != 32);\n+\tRTE_BUILD_BUG_ON(sizeof(struct acc_dma_req_desc) != 256);\n+\tRTE_BUILD_BUG_ON(sizeof(union acc_dma_desc) != 256);\n+\tRTE_BUILD_BUG_ON(sizeof(struct acc_fcw_td) != 24);\n+\tRTE_BUILD_BUG_ON(sizeof(struct acc_fcw_te) != 32);\n \n \tif (bbdev == NULL) {\n \t\trte_bbdev_log(ERR,\n@@ -4604,87 +3971,87 @@ acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \t\tdev_name);\n \t\treturn -ENODEV;\n \t}\n-\tstruct acc100_device *d = bbdev->data->dev_private;\n+\tstruct acc_device *d = bbdev->data->dev_private;\n \n \t/* Store configuration */\n-\trte_memcpy(&d->acc100_conf, conf, sizeof(d->acc100_conf));\n+\trte_memcpy(&d->acc_conf, conf, sizeof(d->acc_conf));\n \n-\tvalue = acc100_reg_read(d, HwPfPcieGpexBridgeControl);\n+\tvalue = acc_reg_read(d, HwPfPcieGpexBridgeControl);\n \tbool firstCfg = (value != ACC100_CFG_PCI_BRIDGE);\n \n \t/* PCIe Bridge configuration */\n-\tacc100_reg_write(d, HwPfPcieGpexBridgeControl, ACC100_CFG_PCI_BRIDGE);\n+\tacc_reg_write(d, HwPfPcieGpexBridgeControl, ACC100_CFG_PCI_BRIDGE);\n \tfor (i = 1; i < ACC100_GPEX_AXIMAP_NUM; i++)\n-\t\tacc100_reg_write(d,\n+\t\tacc_reg_write(d,\n \t\t\t\tHwPfPcieGpexAxiAddrMappingWindowPexBaseHigh\n \t\t\t\t+ i * 16, 0);\n \n \t/* Prevent blocking AXI read on BRESP for AXI Write */\n \taddress = HwPfPcieGpexAxiPioControl;\n \tvalue = ACC100_CFG_PCI_AXI;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \n \t/* 5GDL PLL phase shift */\n-\tacc100_reg_write(d, HWPfChaDl5gPllPhshft0, 0x1);\n+\tacc_reg_write(d, HWPfChaDl5gPllPhshft0, 0x1);\n \n \t/* Explicitly releasing AXI as this may be stopped after PF FLR/BME */\n \taddress = HWPfDmaAxiControl;\n \tvalue = 1;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \n \t/* Enable granular dynamic clock gating */\n \taddress = HWPfHiClkGateHystReg;\n \tvalue = ACC100_CLOCK_GATING_EN;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \n \t/* Set default descriptor signature */\n \taddress = HWPfDmaDescriptorSignatuture;\n \tvalue = 0;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \n \t/* Enable the Error Detection in DMA */\n \tvalue = ACC100_CFG_DMA_ERROR;\n \taddress = HWPfDmaErrorDetectionEn;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \n \t/* AXI Cache configuration */\n \tvalue = ACC100_CFG_AXI_CACHE;\n \taddress = HWPfDmaAxcacheReg;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \n \t/* Adjust PCIe Lane adaptation */\n \tfor (i = 0; i < ACC100_QUAD_NUMS; i++)\n \t\tfor (j = 0; j < ACC100_LANES_PER_QUAD; j++)\n-\t\t\tacc100_reg_write(d, HwPfPcieLnAdaptctrl + i * ACC100_PCIE_QUAD_OFFSET\n+\t\t\tacc_reg_write(d, HwPfPcieLnAdaptctrl + i * ACC100_PCIE_QUAD_OFFSET\n \t\t\t\t\t+ j * ACC100_PCIE_LANE_OFFSET, ACC100_ADAPT);\n \n \t/* Enable PCIe live adaptation */\n \tfor (i = 0; i < ACC100_QUAD_NUMS; i++)\n-\t\tacc100_reg_write(d, HwPfPciePcsEqControl +\n+\t\tacc_reg_write(d, HwPfPciePcsEqControl +\n \t\t\t\ti * ACC100_PCIE_QUAD_OFFSET, ACC100_PCS_EQ);\n \n \t/* Default DMA Configuration (Qmgr Enabled) */\n \taddress = HWPfDmaConfig0Reg;\n \tvalue = 0;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \taddress = HWPfDmaQmanen;\n \tvalue = 0;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \n \t/* Default RLIM/ALEN configuration */\n \taddress = HWPfDmaConfig1Reg;\n \tvalue = (1 << 31) + (23 << 8) + (1 << 6) + 7;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \n \t/* Configure DMA Qmanager addresses */\n \taddress = HWPfDmaQmgrAddrReg;\n \tvalue = HWPfQmgrEgressQueuesTemplate;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \n \t/* Default Fabric Mode */\n \taddress = HWPfFabricMode;\n \tvalue = ACC100_FABRIC_MODE;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \n \t/* ===== Qmgr Configuration ===== */\n \t/* Configuration of the AQueue Depth QMGR_GRP_0_DEPTH_LOG2 for UL */\n@@ -4694,42 +4061,42 @@ acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \t\t\tconf->q_dl_5g.num_qgroups;\n \tfor (qg_idx = 0; qg_idx < totalQgs; qg_idx++) {\n \t\taddress = HWPfQmgrDepthLog2Grp +\n-\t\tACC100_BYTES_IN_WORD * qg_idx;\n+\t\tACC_BYTES_IN_WORD * qg_idx;\n \t\tvalue = aqDepth(qg_idx, conf);\n-\t\tacc100_reg_write(d, address, value);\n+\t\tacc_reg_write(d, address, value);\n \t\taddress = HWPfQmgrTholdGrp +\n-\t\tACC100_BYTES_IN_WORD * qg_idx;\n+\t\tACC_BYTES_IN_WORD * qg_idx;\n \t\tvalue = (1 << 16) + (1 << (aqDepth(qg_idx, conf) - 1));\n-\t\tacc100_reg_write(d, address, value);\n+\t\tacc_reg_write(d, address, value);\n \t}\n \n \t/* Template Priority in incremental order */\n-\tfor (template_idx = 0; template_idx < ACC100_NUM_TMPL; template_idx++) {\n-\t\taddress = HWPfQmgrGrpTmplateReg0Indx + ACC100_BYTES_IN_WORD * template_idx;\n-\t\tvalue = ACC100_TMPL_PRI_0;\n-\t\tacc100_reg_write(d, address, value);\n-\t\taddress = HWPfQmgrGrpTmplateReg1Indx + ACC100_BYTES_IN_WORD * template_idx;\n-\t\tvalue = ACC100_TMPL_PRI_1;\n-\t\tacc100_reg_write(d, address, value);\n-\t\taddress = HWPfQmgrGrpTmplateReg2indx + ACC100_BYTES_IN_WORD * template_idx;\n-\t\tvalue = ACC100_TMPL_PRI_2;\n-\t\tacc100_reg_write(d, address, value);\n-\t\taddress = HWPfQmgrGrpTmplateReg3Indx + ACC100_BYTES_IN_WORD * template_idx;\n-\t\tvalue = ACC100_TMPL_PRI_3;\n-\t\tacc100_reg_write(d, address, value);\n+\tfor (template_idx = 0; template_idx < ACC_NUM_TMPL; template_idx++) {\n+\t\taddress = HWPfQmgrGrpTmplateReg0Indx + ACC_BYTES_IN_WORD * template_idx;\n+\t\tvalue = ACC_TMPL_PRI_0;\n+\t\tacc_reg_write(d, address, value);\n+\t\taddress = HWPfQmgrGrpTmplateReg1Indx + ACC_BYTES_IN_WORD * template_idx;\n+\t\tvalue = ACC_TMPL_PRI_1;\n+\t\tacc_reg_write(d, address, value);\n+\t\taddress = HWPfQmgrGrpTmplateReg2indx + ACC_BYTES_IN_WORD * template_idx;\n+\t\tvalue = ACC_TMPL_PRI_2;\n+\t\tacc_reg_write(d, address, value);\n+\t\taddress = HWPfQmgrGrpTmplateReg3Indx + ACC_BYTES_IN_WORD * template_idx;\n+\t\tvalue = ACC_TMPL_PRI_3;\n+\t\tacc_reg_write(d, address, value);\n \t}\n \n \taddress = HWPfQmgrGrpPriority;\n \tvalue = ACC100_CFG_QMGR_HI_P;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \n \t/* Template Configuration */\n-\tfor (template_idx = 0; template_idx < ACC100_NUM_TMPL;\n+\tfor (template_idx = 0; template_idx < ACC_NUM_TMPL;\n \t\t\ttemplate_idx++) {\n \t\tvalue = 0;\n \t\taddress = HWPfQmgrGrpTmplateReg4Indx\n-\t\t\t\t+ ACC100_BYTES_IN_WORD * template_idx;\n-\t\tacc100_reg_write(d, address, value);\n+\t\t\t\t+ ACC_BYTES_IN_WORD * template_idx;\n+\t\tacc_reg_write(d, address, value);\n \t}\n \t/* 4GUL */\n \tint numQgs = conf->q_ul_4g.num_qgroups;\n@@ -4741,8 +4108,8 @@ acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \t\t\ttemplate_idx <= ACC100_SIG_UL_4G_LAST;\n \t\t\ttemplate_idx++) {\n \t\taddress = HWPfQmgrGrpTmplateReg4Indx\n-\t\t\t\t+ ACC100_BYTES_IN_WORD * template_idx;\n-\t\tacc100_reg_write(d, address, value);\n+\t\t\t\t+ ACC_BYTES_IN_WORD * template_idx;\n+\t\tacc_reg_write(d, address, value);\n \t}\n \t/* 5GUL */\n \tnumQqsAcc += numQgs;\n@@ -4756,15 +4123,15 @@ acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \t\t\ttemplate_idx++) {\n \t\t/* Check engine power-on status */\n \t\taddress = HwPfFecUl5gIbDebugReg +\n-\t\t\t\tACC100_ENGINE_OFFSET * template_idx;\n-\t\tstatus = (acc100_reg_read(d, address) >> 4) & 0xF;\n+\t\t\t\tACC_ENGINE_OFFSET * template_idx;\n+\t\tstatus = (acc_reg_read(d, address) >> 4) & 0xF;\n \t\taddress = HWPfQmgrGrpTmplateReg4Indx\n-\t\t\t\t+ ACC100_BYTES_IN_WORD * template_idx;\n+\t\t\t\t+ ACC_BYTES_IN_WORD * template_idx;\n \t\tif (status == 1) {\n-\t\t\tacc100_reg_write(d, address, value);\n+\t\t\tacc_reg_write(d, address, value);\n \t\t\tnumEngines++;\n \t\t} else\n-\t\t\tacc100_reg_write(d, address, 0);\n+\t\t\tacc_reg_write(d, address, 0);\n \t}\n \tprintf(\"Number of 5GUL engines %d\\n\", numEngines);\n \t/* 4GDL */\n@@ -4777,8 +4144,8 @@ acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \t\t\ttemplate_idx <= ACC100_SIG_DL_4G_LAST;\n \t\t\ttemplate_idx++) {\n \t\taddress = HWPfQmgrGrpTmplateReg4Indx\n-\t\t\t\t+ ACC100_BYTES_IN_WORD * template_idx;\n-\t\tacc100_reg_write(d, address, value);\n+\t\t\t\t+ ACC_BYTES_IN_WORD * template_idx;\n+\t\tacc_reg_write(d, address, value);\n \t}\n \t/* 5GDL */\n \tnumQqsAcc += numQgs;\n@@ -4790,8 +4157,8 @@ acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \t\t\ttemplate_idx <= ACC100_SIG_DL_5G_LAST;\n \t\t\ttemplate_idx++) {\n \t\taddress = HWPfQmgrGrpTmplateReg4Indx\n-\t\t\t\t+ ACC100_BYTES_IN_WORD * template_idx;\n-\t\tacc100_reg_write(d, address, value);\n+\t\t\t\t+ ACC_BYTES_IN_WORD * template_idx;\n+\t\tacc_reg_write(d, address, value);\n \t}\n \n \t/* Queue Group Function mapping */\n@@ -4802,14 +4169,14 @@ acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \t\tacc = accFromQgid(qg_idx, conf);\n \t\tvalue |= qman_func_id[acc]<<(qg_idx * 4);\n \t}\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \n \t/* Configuration of the Arbitration QGroup depth to 1 */\n \tfor (qg_idx = 0; qg_idx < totalQgs; qg_idx++) {\n \t\taddress = HWPfQmgrArbQDepthGrp +\n-\t\tACC100_BYTES_IN_WORD * qg_idx;\n+\t\tACC_BYTES_IN_WORD * qg_idx;\n \t\tvalue = 0;\n-\t\tacc100_reg_write(d, address, value);\n+\t\tacc_reg_write(d, address, value);\n \t}\n \n \t/* Enabling AQueues through the Queue hierarchy*/\n@@ -4820,9 +4187,9 @@ acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \t\t\t\t\tqg_idx < totalQgs)\n \t\t\t\tvalue = (1 << aqNum(qg_idx, conf)) - 1;\n \t\t\taddress = HWPfQmgrAqEnableVf\n-\t\t\t\t\t+ vf_idx * ACC100_BYTES_IN_WORD;\n+\t\t\t\t\t+ vf_idx * ACC_BYTES_IN_WORD;\n \t\t\tvalue += (qg_idx << 16);\n-\t\t\tacc100_reg_write(d, address, value);\n+\t\t\tacc_reg_write(d, address, value);\n \t\t}\n \t}\n \n@@ -4831,10 +4198,10 @@ acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \tfor (qg_idx = 0; qg_idx < totalQgs; qg_idx++) {\n \t\tfor (vf_idx = 0; vf_idx < conf->num_vf_bundles; vf_idx++) {\n \t\t\taddress = HWPfQmgrVfBaseAddr + vf_idx\n-\t\t\t\t\t* ACC100_BYTES_IN_WORD + qg_idx\n-\t\t\t\t\t* ACC100_BYTES_IN_WORD * 64;\n+\t\t\t\t\t* ACC_BYTES_IN_WORD + qg_idx\n+\t\t\t\t\t* ACC_BYTES_IN_WORD * 64;\n \t\t\tvalue = aram_address;\n-\t\t\tacc100_reg_write(d, address, value);\n+\t\t\tacc_reg_write(d, address, value);\n \t\t\t/* Offset ARAM Address for next memory bank\n \t\t\t * - increment of 4B\n \t\t\t */\n@@ -4852,29 +4219,29 @@ acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \t/* ==== HI Configuration ==== */\n \n \t/* No Info Ring/MSI by default */\n-\tacc100_reg_write(d, HWPfHiInfoRingIntWrEnRegPf, 0);\n-\tacc100_reg_write(d, HWPfHiInfoRingVf2pfLoWrEnReg, 0);\n-\tacc100_reg_write(d, HWPfHiCfgMsiIntWrEnRegPf, 0xFFFFFFFF);\n-\tacc100_reg_write(d, HWPfHiCfgMsiVf2pfLoWrEnReg, 0xFFFFFFFF);\n+\tacc_reg_write(d, HWPfHiInfoRingIntWrEnRegPf, 0);\n+\tacc_reg_write(d, HWPfHiInfoRingVf2pfLoWrEnReg, 0);\n+\tacc_reg_write(d, HWPfHiCfgMsiIntWrEnRegPf, 0xFFFFFFFF);\n+\tacc_reg_write(d, HWPfHiCfgMsiVf2pfLoWrEnReg, 0xFFFFFFFF);\n \t/* Prevent Block on Transmit Error */\n \taddress = HWPfHiBlockTransmitOnErrorEn;\n \tvalue = 0;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \t/* Prevents to drop MSI */\n \taddress = HWPfHiMsiDropEnableReg;\n \tvalue = 0;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \t/* Set the PF Mode register */\n \taddress = HWPfHiPfMode;\n-\tvalue = (conf->pf_mode_en) ? ACC100_PF_VAL : 0;\n-\tacc100_reg_write(d, address, value);\n+\tvalue = (conf->pf_mode_en) ? ACC_PF_VAL : 0;\n+\tacc_reg_write(d, address, value);\n \n \t/* QoS overflow init */\n \tvalue = 1;\n \taddress = HWPfQosmonAEvalOverflow0;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \taddress = HWPfQosmonBEvalOverflow0;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \n \t/* HARQ DDR Configuration */\n \tunsigned int ddrSizeInMb = ACC100_HARQ_DDR;\n@@ -4883,9 +4250,9 @@ acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \t\t\t\t* 0x10;\n \t\tvalue = ((vf_idx * (ddrSizeInMb / 64)) << 16) +\n \t\t\t\t(ddrSizeInMb - 1);\n-\t\tacc100_reg_write(d, address, value);\n+\t\tacc_reg_write(d, address, value);\n \t}\n-\tusleep(ACC100_LONG_WAIT);\n+\tusleep(ACC_LONG_WAIT);\n \n \t/* Workaround in case some 5GUL engines are in an unexpected state */\n \tif (numEngines < (ACC100_SIG_UL_5G_LAST + 1))\n@@ -4893,7 +4260,7 @@ acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \n \tuint32_t version = 0;\n \tfor (i = 0; i < 4; i++)\n-\t\tversion += acc100_reg_read(d,\n+\t\tversion += acc_reg_read(d,\n \t\t\t\tHWPfDdrPhyIdtmFwVersion + 4 * i) << (8 * i);\n \tif (version != ACC100_PRQ_DDR_VER) {\n \t\tprintf(\"* Note: Not on DDR PRQ version %8x != %08x\\n\",\n@@ -4901,76 +4268,76 @@ acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \t} else if (firstCfg) {\n \t\t/* ---- DDR configuration at boot up --- */\n \t\t/* Read Clear Ddr training status */\n-\t\tacc100_reg_read(d, HWPfChaDdrStDoneStatus);\n+\t\tacc_reg_read(d, HWPfChaDdrStDoneStatus);\n \t\t/* Reset PHY/IDTM/UMMC */\n-\t\tacc100_reg_write(d, HWPfChaDdrWbRstCfg, 3);\n-\t\tacc100_reg_write(d, HWPfChaDdrApbRstCfg, 2);\n-\t\tacc100_reg_write(d, HWPfChaDdrPhyRstCfg, 2);\n-\t\tacc100_reg_write(d, HWPfChaDdrCpuRstCfg, 3);\n-\t\tacc100_reg_write(d, HWPfChaDdrSifRstCfg, 2);\n-\t\tusleep(ACC100_MS_IN_US);\n+\t\tacc_reg_write(d, HWPfChaDdrWbRstCfg, 3);\n+\t\tacc_reg_write(d, HWPfChaDdrApbRstCfg, 2);\n+\t\tacc_reg_write(d, HWPfChaDdrPhyRstCfg, 2);\n+\t\tacc_reg_write(d, HWPfChaDdrCpuRstCfg, 3);\n+\t\tacc_reg_write(d, HWPfChaDdrSifRstCfg, 2);\n+\t\tusleep(ACC_MS_IN_US);\n \t\t/* Reset WB and APB resets */\n-\t\tacc100_reg_write(d, HWPfChaDdrWbRstCfg, 2);\n-\t\tacc100_reg_write(d, HWPfChaDdrApbRstCfg, 3);\n+\t\tacc_reg_write(d, HWPfChaDdrWbRstCfg, 2);\n+\t\tacc_reg_write(d, HWPfChaDdrApbRstCfg, 3);\n \t\t/* Configure PHY-IDTM */\n-\t\tacc100_reg_write(d, HWPfDdrPhyIdletimeout, 0x3e8);\n+\t\tacc_reg_write(d, HWPfDdrPhyIdletimeout, 0x3e8);\n \t\t/* IDTM timing registers */\n-\t\tacc100_reg_write(d, HWPfDdrPhyRdLatency, 0x13);\n-\t\tacc100_reg_write(d, HWPfDdrPhyRdLatencyDbi, 0x15);\n-\t\tacc100_reg_write(d, HWPfDdrPhyWrLatency, 0x10011);\n+\t\tacc_reg_write(d, HWPfDdrPhyRdLatency, 0x13);\n+\t\tacc_reg_write(d, HWPfDdrPhyRdLatencyDbi, 0x15);\n+\t\tacc_reg_write(d, HWPfDdrPhyWrLatency, 0x10011);\n \t\t/* Configure SDRAM MRS registers */\n-\t\tacc100_reg_write(d, HWPfDdrPhyMr01Dimm, 0x3030b70);\n-\t\tacc100_reg_write(d, HWPfDdrPhyMr01DimmDbi, 0x3030b50);\n-\t\tacc100_reg_write(d, HWPfDdrPhyMr23Dimm, 0x30);\n-\t\tacc100_reg_write(d, HWPfDdrPhyMr67Dimm, 0xc00);\n-\t\tacc100_reg_write(d, HWPfDdrPhyMr45Dimm, 0x4000000);\n+\t\tacc_reg_write(d, HWPfDdrPhyMr01Dimm, 0x3030b70);\n+\t\tacc_reg_write(d, HWPfDdrPhyMr01DimmDbi, 0x3030b50);\n+\t\tacc_reg_write(d, HWPfDdrPhyMr23Dimm, 0x30);\n+\t\tacc_reg_write(d, HWPfDdrPhyMr67Dimm, 0xc00);\n+\t\tacc_reg_write(d, HWPfDdrPhyMr45Dimm, 0x4000000);\n \t\t/* Configure active lanes */\n-\t\tacc100_reg_write(d, HWPfDdrPhyDqsCountMax, 0x9);\n-\t\tacc100_reg_write(d, HWPfDdrPhyDqsCountNum, 0x9);\n+\t\tacc_reg_write(d, HWPfDdrPhyDqsCountMax, 0x9);\n+\t\tacc_reg_write(d, HWPfDdrPhyDqsCountNum, 0x9);\n \t\t/* Configure WR/RD leveling timing registers */\n-\t\tacc100_reg_write(d, HWPfDdrPhyWrlvlWwRdlvlRr, 0x101212);\n+\t\tacc_reg_write(d, HWPfDdrPhyWrlvlWwRdlvlRr, 0x101212);\n \t\t/* Configure what trainings to execute */\n-\t\tacc100_reg_write(d, HWPfDdrPhyTrngType, 0x2d3c);\n+\t\tacc_reg_write(d, HWPfDdrPhyTrngType, 0x2d3c);\n \t\t/* Releasing PHY reset */\n-\t\tacc100_reg_write(d, HWPfChaDdrPhyRstCfg, 3);\n+\t\tacc_reg_write(d, HWPfChaDdrPhyRstCfg, 3);\n \t\t/* Configure Memory Controller registers */\n-\t\tacc100_reg_write(d, HWPfDdrMemInitPhyTrng0, 0x3);\n-\t\tacc100_reg_write(d, HWPfDdrBcDram, 0x3c232003);\n-\t\tacc100_reg_write(d, HWPfDdrBcAddrMap, 0x31);\n+\t\tacc_reg_write(d, HWPfDdrMemInitPhyTrng0, 0x3);\n+\t\tacc_reg_write(d, HWPfDdrBcDram, 0x3c232003);\n+\t\tacc_reg_write(d, HWPfDdrBcAddrMap, 0x31);\n \t\t/* Configure UMMC BC timing registers */\n-\t\tacc100_reg_write(d, HWPfDdrBcRef, 0xa22);\n-\t\tacc100_reg_write(d, HWPfDdrBcTim0, 0x4050501);\n-\t\tacc100_reg_write(d, HWPfDdrBcTim1, 0xf0b0476);\n-\t\tacc100_reg_write(d, HWPfDdrBcTim2, 0x103);\n-\t\tacc100_reg_write(d, HWPfDdrBcTim3, 0x144050a1);\n-\t\tacc100_reg_write(d, HWPfDdrBcTim4, 0x23300);\n-\t\tacc100_reg_write(d, HWPfDdrBcTim5, 0x4230276);\n-\t\tacc100_reg_write(d, HWPfDdrBcTim6, 0x857914);\n-\t\tacc100_reg_write(d, HWPfDdrBcTim7, 0x79100232);\n-\t\tacc100_reg_write(d, HWPfDdrBcTim8, 0x100007ce);\n-\t\tacc100_reg_write(d, HWPfDdrBcTim9, 0x50020);\n-\t\tacc100_reg_write(d, HWPfDdrBcTim10, 0x40ee);\n+\t\tacc_reg_write(d, HWPfDdrBcRef, 0xa22);\n+\t\tacc_reg_write(d, HWPfDdrBcTim0, 0x4050501);\n+\t\tacc_reg_write(d, HWPfDdrBcTim1, 0xf0b0476);\n+\t\tacc_reg_write(d, HWPfDdrBcTim2, 0x103);\n+\t\tacc_reg_write(d, HWPfDdrBcTim3, 0x144050a1);\n+\t\tacc_reg_write(d, HWPfDdrBcTim4, 0x23300);\n+\t\tacc_reg_write(d, HWPfDdrBcTim5, 0x4230276);\n+\t\tacc_reg_write(d, HWPfDdrBcTim6, 0x857914);\n+\t\tacc_reg_write(d, HWPfDdrBcTim7, 0x79100232);\n+\t\tacc_reg_write(d, HWPfDdrBcTim8, 0x100007ce);\n+\t\tacc_reg_write(d, HWPfDdrBcTim9, 0x50020);\n+\t\tacc_reg_write(d, HWPfDdrBcTim10, 0x40ee);\n \t\t/* Configure UMMC DFI timing registers */\n-\t\tacc100_reg_write(d, HWPfDdrDfiInit, 0x5000);\n-\t\tacc100_reg_write(d, HWPfDdrDfiTim0, 0x15030006);\n-\t\tacc100_reg_write(d, HWPfDdrDfiTim1, 0x11305);\n-\t\tacc100_reg_write(d, HWPfDdrDfiPhyUpdEn, 0x1);\n-\t\tacc100_reg_write(d, HWPfDdrUmmcIntEn, 0x1f);\n+\t\tacc_reg_write(d, HWPfDdrDfiInit, 0x5000);\n+\t\tacc_reg_write(d, HWPfDdrDfiTim0, 0x15030006);\n+\t\tacc_reg_write(d, HWPfDdrDfiTim1, 0x11305);\n+\t\tacc_reg_write(d, HWPfDdrDfiPhyUpdEn, 0x1);\n+\t\tacc_reg_write(d, HWPfDdrUmmcIntEn, 0x1f);\n \t\t/* Release IDTM CPU out of reset */\n-\t\tacc100_reg_write(d, HWPfChaDdrCpuRstCfg, 0x2);\n+\t\tacc_reg_write(d, HWPfChaDdrCpuRstCfg, 0x2);\n \t\t/* Wait PHY-IDTM to finish static training */\n \t\tfor (i = 0; i < ACC100_DDR_TRAINING_MAX; i++) {\n-\t\t\tusleep(ACC100_MS_IN_US);\n-\t\t\tvalue = acc100_reg_read(d,\n+\t\t\tusleep(ACC_MS_IN_US);\n+\t\t\tvalue = acc_reg_read(d,\n \t\t\t\t\tHWPfChaDdrStDoneStatus);\n \t\t\tif (value & 1)\n \t\t\t\tbreak;\n \t\t}\n \t\tprintf(\"DDR Training completed in %d ms\", i);\n \t\t/* Enable Memory Controller */\n-\t\tacc100_reg_write(d, HWPfDdrUmmcCtrl, 0x401);\n+\t\tacc_reg_write(d, HWPfDdrUmmcCtrl, 0x401);\n \t\t/* Release AXI interface reset */\n-\t\tacc100_reg_write(d, HWPfChaDdrSifRstCfg, 3);\n+\t\tacc_reg_write(d, HWPfChaDdrSifRstCfg, 3);\n \t}\n \n \trte_bbdev_log_debug(\"PF Tip configuration complete for %s\", dev_name);\n@@ -4980,7 +4347,7 @@ acc100_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \n /* Initial configuration of a ACC101 device prior to running configure() */\n static int\n-acc101_configure(const char *dev_name, struct rte_acc100_conf *conf)\n+acc101_configure(const char *dev_name, struct rte_acc_conf *conf)\n {\n \trte_bbdev_log(INFO, \"rte_acc101_configure\");\n \tuint32_t value, address, status;\n@@ -4988,10 +4355,10 @@ acc101_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \tstruct rte_bbdev *bbdev = rte_bbdev_get_named_dev(dev_name);\n \n \t/* Compile time checks */\n-\tRTE_BUILD_BUG_ON(sizeof(struct acc100_dma_req_desc) != 256);\n-\tRTE_BUILD_BUG_ON(sizeof(union acc100_dma_desc) != 256);\n-\tRTE_BUILD_BUG_ON(sizeof(struct acc100_fcw_td) != 24);\n-\tRTE_BUILD_BUG_ON(sizeof(struct acc100_fcw_te) != 32);\n+\tRTE_BUILD_BUG_ON(sizeof(struct acc_dma_req_desc) != 256);\n+\tRTE_BUILD_BUG_ON(sizeof(union acc_dma_desc) != 256);\n+\tRTE_BUILD_BUG_ON(sizeof(struct acc_fcw_td) != 24);\n+\tRTE_BUILD_BUG_ON(sizeof(struct acc_fcw_te) != 32);\n \n \tif (bbdev == NULL) {\n \t\trte_bbdev_log(ERR,\n@@ -4999,67 +4366,67 @@ acc101_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \t\tdev_name);\n \t\treturn -ENODEV;\n \t}\n-\tstruct acc100_device *d = bbdev->data->dev_private;\n+\tstruct acc_device *d = bbdev->data->dev_private;\n \n \t/* Store configuration */\n-\trte_memcpy(&d->acc100_conf, conf, sizeof(d->acc100_conf));\n+\trte_memcpy(&d->acc_conf, conf, sizeof(d->acc_conf));\n \n \t/* PCIe Bridge configuration */\n-\tacc100_reg_write(d, HwPfPcieGpexBridgeControl, ACC101_CFG_PCI_BRIDGE);\n+\tacc_reg_write(d, HwPfPcieGpexBridgeControl, ACC101_CFG_PCI_BRIDGE);\n \tfor (i = 1; i < ACC101_GPEX_AXIMAP_NUM; i++)\n-\t\tacc100_reg_write(d, HwPfPcieGpexAxiAddrMappingWindowPexBaseHigh + i * 16, 0);\n+\t\tacc_reg_write(d, HwPfPcieGpexAxiAddrMappingWindowPexBaseHigh + i * 16, 0);\n \n \t/* Prevent blocking AXI read on BRESP for AXI Write */\n \taddress = HwPfPcieGpexAxiPioControl;\n \tvalue = ACC101_CFG_PCI_AXI;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \n \t/* Explicitly releasing AXI including a 2ms delay on ACC101 */\n \tusleep(2000);\n-\tacc100_reg_write(d, HWPfDmaAxiControl, 1);\n+\tacc_reg_write(d, HWPfDmaAxiControl, 1);\n \n \t/* Set the default 5GDL DMA configuration */\n-\tacc100_reg_write(d, HWPfDmaInboundDrainDataSize, ACC101_DMA_INBOUND);\n+\tacc_reg_write(d, HWPfDmaInboundDrainDataSize, ACC101_DMA_INBOUND);\n \n \t/* Enable granular dynamic clock gating */\n \taddress = HWPfHiClkGateHystReg;\n \tvalue = ACC101_CLOCK_GATING_EN;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \n \t/* Set default descriptor signature */\n \taddress = HWPfDmaDescriptorSignatuture;\n \tvalue = 0;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \n \t/* Enable the Error Detection in DMA */\n \tvalue = ACC101_CFG_DMA_ERROR;\n \taddress = HWPfDmaErrorDetectionEn;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \n \t/* AXI Cache configuration */\n \tvalue = ACC101_CFG_AXI_CACHE;\n \taddress = HWPfDmaAxcacheReg;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \n \t/* Default DMA Configuration (Qmgr Enabled) */\n \taddress = HWPfDmaConfig0Reg;\n \tvalue = 0;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \taddress = HWPfDmaQmanen;\n \tvalue = 0;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \n \t/* Default RLIM/ALEN configuration */\n \taddress = HWPfDmaConfig1Reg;\n \tint alen_r = 0xF;\n \tint alen_w = 0x7;\n \tvalue = (1 << 31) + (alen_w << 20)  + (1 << 6) + alen_r;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \n \t/* Configure DMA Qmanager addresses */\n \taddress = HWPfDmaQmgrAddrReg;\n \tvalue = HWPfQmgrEgressQueuesTemplate;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \n \t/* ===== Qmgr Configuration ===== */\n \t/* Configuration of the AQueue Depth QMGR_GRP_0_DEPTH_LOG2 for UL */\n@@ -5069,43 +4436,43 @@ acc101_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \t\t\tconf->q_dl_5g.num_qgroups;\n \tfor (qg_idx = 0; qg_idx < totalQgs; qg_idx++) {\n \t\taddress = HWPfQmgrDepthLog2Grp +\n-\t\tACC101_BYTES_IN_WORD * qg_idx;\n+\t\tACC_BYTES_IN_WORD * qg_idx;\n \t\tvalue = aqDepth(qg_idx, conf);\n-\t\tacc100_reg_write(d, address, value);\n+\t\tacc_reg_write(d, address, value);\n \t\taddress = HWPfQmgrTholdGrp +\n-\t\tACC101_BYTES_IN_WORD * qg_idx;\n+\t\tACC_BYTES_IN_WORD * qg_idx;\n \t\tvalue = (1 << 16) + (1 << (aqDepth(qg_idx, conf) - 1));\n-\t\tacc100_reg_write(d, address, value);\n+\t\tacc_reg_write(d, address, value);\n \t}\n \n \t/* Template Priority in incremental order */\n-\tfor (template_idx = 0; template_idx < ACC101_NUM_TMPL;\n+\tfor (template_idx = 0; template_idx < ACC_NUM_TMPL;\n \t\t\ttemplate_idx++) {\n-\t\taddress = HWPfQmgrGrpTmplateReg0Indx + ACC101_BYTES_IN_WORD * template_idx;\n-\t\tvalue = ACC101_TMPL_PRI_0;\n-\t\tacc100_reg_write(d, address, value);\n-\t\taddress = HWPfQmgrGrpTmplateReg1Indx + ACC101_BYTES_IN_WORD * template_idx;\n-\t\tvalue = ACC101_TMPL_PRI_1;\n-\t\tacc100_reg_write(d, address, value);\n-\t\taddress = HWPfQmgrGrpTmplateReg2indx + ACC101_BYTES_IN_WORD * template_idx;\n-\t\tvalue = ACC101_TMPL_PRI_2;\n-\t\tacc100_reg_write(d, address, value);\n-\t\taddress = HWPfQmgrGrpTmplateReg3Indx + ACC101_BYTES_IN_WORD * template_idx;\n-\t\tvalue = ACC101_TMPL_PRI_3;\n-\t\tacc100_reg_write(d, address, value);\n+\t\taddress = HWPfQmgrGrpTmplateReg0Indx + ACC_BYTES_IN_WORD * template_idx;\n+\t\tvalue = ACC_TMPL_PRI_0;\n+\t\tacc_reg_write(d, address, value);\n+\t\taddress = HWPfQmgrGrpTmplateReg1Indx + ACC_BYTES_IN_WORD * template_idx;\n+\t\tvalue = ACC_TMPL_PRI_1;\n+\t\tacc_reg_write(d, address, value);\n+\t\taddress = HWPfQmgrGrpTmplateReg2indx + ACC_BYTES_IN_WORD * template_idx;\n+\t\tvalue = ACC_TMPL_PRI_2;\n+\t\tacc_reg_write(d, address, value);\n+\t\taddress = HWPfQmgrGrpTmplateReg3Indx + ACC_BYTES_IN_WORD * template_idx;\n+\t\tvalue = ACC_TMPL_PRI_3;\n+\t\tacc_reg_write(d, address, value);\n \t}\n \n \taddress = HWPfQmgrGrpPriority;\n \tvalue = ACC101_CFG_QMGR_HI_P;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \n \t/* Template Configuration */\n-\tfor (template_idx = 0; template_idx < ACC101_NUM_TMPL;\n+\tfor (template_idx = 0; template_idx < ACC_NUM_TMPL;\n \t\t\ttemplate_idx++) {\n \t\tvalue = 0;\n \t\taddress = HWPfQmgrGrpTmplateReg4Indx\n-\t\t\t\t+ ACC101_BYTES_IN_WORD * template_idx;\n-\t\tacc100_reg_write(d, address, value);\n+\t\t\t\t+ ACC_BYTES_IN_WORD * template_idx;\n+\t\tacc_reg_write(d, address, value);\n \t}\n \t/* 4GUL */\n \tint numQgs = conf->q_ul_4g.num_qgroups;\n@@ -5117,8 +4484,8 @@ acc101_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \t\t\ttemplate_idx <= ACC101_SIG_UL_4G_LAST;\n \t\t\ttemplate_idx++) {\n \t\taddress = HWPfQmgrGrpTmplateReg4Indx\n-\t\t\t\t+ ACC101_BYTES_IN_WORD * template_idx;\n-\t\tacc100_reg_write(d, address, value);\n+\t\t\t\t+ ACC_BYTES_IN_WORD * template_idx;\n+\t\tacc_reg_write(d, address, value);\n \t}\n \t/* 5GUL */\n \tnumQqsAcc += numQgs;\n@@ -5132,15 +4499,15 @@ acc101_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \t\t\ttemplate_idx++) {\n \t\t/* Check engine power-on status */\n \t\taddress = HwPfFecUl5gIbDebugReg +\n-\t\t\t\tACC101_ENGINE_OFFSET * template_idx;\n-\t\tstatus = (acc100_reg_read(d, address) >> 4) & 0xF;\n+\t\t\t\tACC_ENGINE_OFFSET * template_idx;\n+\t\tstatus = (acc_reg_read(d, address) >> 4) & 0xF;\n \t\taddress = HWPfQmgrGrpTmplateReg4Indx\n-\t\t\t\t+ ACC101_BYTES_IN_WORD * template_idx;\n+\t\t\t\t+ ACC_BYTES_IN_WORD * template_idx;\n \t\tif (status == 1) {\n-\t\t\tacc100_reg_write(d, address, value);\n+\t\t\tacc_reg_write(d, address, value);\n \t\t\tnumEngines++;\n \t\t} else\n-\t\t\tacc100_reg_write(d, address, 0);\n+\t\t\tacc_reg_write(d, address, 0);\n \t}\n \tprintf(\"Number of 5GUL engines %d\\n\", numEngines);\n \t/* 4GDL */\n@@ -5153,8 +4520,8 @@ acc101_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \t\t\ttemplate_idx <= ACC101_SIG_DL_4G_LAST;\n \t\t\ttemplate_idx++) {\n \t\taddress = HWPfQmgrGrpTmplateReg4Indx\n-\t\t\t\t+ ACC101_BYTES_IN_WORD * template_idx;\n-\t\tacc100_reg_write(d, address, value);\n+\t\t\t\t+ ACC_BYTES_IN_WORD * template_idx;\n+\t\tacc_reg_write(d, address, value);\n \t}\n \t/* 5GDL */\n \tnumQqsAcc += numQgs;\n@@ -5166,8 +4533,8 @@ acc101_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \t\t\ttemplate_idx <= ACC101_SIG_DL_5G_LAST;\n \t\t\ttemplate_idx++) {\n \t\taddress = HWPfQmgrGrpTmplateReg4Indx\n-\t\t\t\t+ ACC101_BYTES_IN_WORD * template_idx;\n-\t\tacc100_reg_write(d, address, value);\n+\t\t\t\t+ ACC_BYTES_IN_WORD * template_idx;\n+\t\tacc_reg_write(d, address, value);\n \t}\n \n \t/* Queue Group Function mapping */\n@@ -5178,14 +4545,14 @@ acc101_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \t\tacc = accFromQgid(qg_idx, conf);\n \t\tvalue |= qman_func_id[acc]<<(qg_idx * 4);\n \t}\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \n \t/* Configuration of the Arbitration QGroup depth to 1 */\n \tfor (qg_idx = 0; qg_idx < totalQgs; qg_idx++) {\n \t\taddress = HWPfQmgrArbQDepthGrp +\n-\t\tACC101_BYTES_IN_WORD * qg_idx;\n+\t\tACC_BYTES_IN_WORD * qg_idx;\n \t\tvalue = 0;\n-\t\tacc100_reg_write(d, address, value);\n+\t\tacc_reg_write(d, address, value);\n \t}\n \n \t/* Enabling AQueues through the Queue hierarchy*/\n@@ -5196,9 +4563,9 @@ acc101_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \t\t\t\t\tqg_idx < totalQgs)\n \t\t\t\tvalue = (1 << aqNum(qg_idx, conf)) - 1;\n \t\t\taddress = HWPfQmgrAqEnableVf\n-\t\t\t\t\t+ vf_idx * ACC101_BYTES_IN_WORD;\n+\t\t\t\t\t+ vf_idx * ACC_BYTES_IN_WORD;\n \t\t\tvalue += (qg_idx << 16);\n-\t\t\tacc100_reg_write(d, address, value);\n+\t\t\tacc_reg_write(d, address, value);\n \t\t}\n \t}\n \n@@ -5207,10 +4574,10 @@ acc101_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \tfor (qg_idx = 0; qg_idx < totalQgs; qg_idx++) {\n \t\tfor (vf_idx = 0; vf_idx < conf->num_vf_bundles; vf_idx++) {\n \t\t\taddress = HWPfQmgrVfBaseAddr + vf_idx\n-\t\t\t\t\t* ACC101_BYTES_IN_WORD + qg_idx\n-\t\t\t\t\t* ACC101_BYTES_IN_WORD * 64;\n+\t\t\t\t\t* ACC_BYTES_IN_WORD + qg_idx\n+\t\t\t\t\t* ACC_BYTES_IN_WORD * 64;\n \t\t\tvalue = aram_address;\n-\t\t\tacc100_reg_write(d, address, value);\n+\t\t\tacc_reg_write(d, address, value);\n \t\t\t/* Offset ARAM Address for next memory bank\n \t\t\t * - increment of 4B\n \t\t\t */\n@@ -5228,32 +4595,32 @@ acc101_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \t/* ==== HI Configuration ==== */\n \n \t/* No Info Ring/MSI by default */\n-\tacc100_reg_write(d, HWPfHiInfoRingIntWrEnRegPf, 0);\n-\tacc100_reg_write(d, HWPfHiInfoRingVf2pfLoWrEnReg, 0);\n-\tacc100_reg_write(d, HWPfHiCfgMsiIntWrEnRegPf, 0xFFFFFFFF);\n-\tacc100_reg_write(d, HWPfHiCfgMsiVf2pfLoWrEnReg, 0xFFFFFFFF);\n+\tacc_reg_write(d, HWPfHiInfoRingIntWrEnRegPf, 0);\n+\tacc_reg_write(d, HWPfHiInfoRingVf2pfLoWrEnReg, 0);\n+\tacc_reg_write(d, HWPfHiCfgMsiIntWrEnRegPf, 0xFFFFFFFF);\n+\tacc_reg_write(d, HWPfHiCfgMsiVf2pfLoWrEnReg, 0xFFFFFFFF);\n \t/* Prevent Block on Transmit Error */\n \taddress = HWPfHiBlockTransmitOnErrorEn;\n \tvalue = 0;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \t/* Prevents to drop MSI */\n \taddress = HWPfHiMsiDropEnableReg;\n \tvalue = 0;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \t/* Set the PF Mode register */\n \taddress = HWPfHiPfMode;\n-\tvalue = (conf->pf_mode_en) ? ACC101_PF_VAL : 0;\n-\tacc100_reg_write(d, address, value);\n+\tvalue = (conf->pf_mode_en) ? ACC_PF_VAL : 0;\n+\tacc_reg_write(d, address, value);\n \t/* Explicitly releasing AXI after PF Mode and 2 ms */\n \tusleep(2000);\n-\tacc100_reg_write(d, HWPfDmaAxiControl, 1);\n+\tacc_reg_write(d, HWPfDmaAxiControl, 1);\n \n \t/* QoS overflow init */\n \tvalue = 1;\n \taddress = HWPfQosmonAEvalOverflow0;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \taddress = HWPfQosmonBEvalOverflow0;\n-\tacc100_reg_write(d, address, value);\n+\tacc_reg_write(d, address, value);\n \n \t/* HARQ DDR Configuration */\n \tunsigned int ddrSizeInMb = ACC101_HARQ_DDR;\n@@ -5262,16 +4629,16 @@ acc101_configure(const char *dev_name, struct rte_acc100_conf *conf)\n \t\t\t\t* 0x10;\n \t\tvalue = ((vf_idx * (ddrSizeInMb / 64)) << 16) +\n \t\t\t\t(ddrSizeInMb - 1);\n-\t\tacc100_reg_write(d, address, value);\n+\t\tacc_reg_write(d, address, value);\n \t}\n-\tusleep(ACC101_LONG_WAIT);\n+\tusleep(ACC_LONG_WAIT);\n \n \trte_bbdev_log_debug(\"PF TIP configuration complete for %s\", dev_name);\n \treturn 0;\n }\n \n int\n-rte_acc10x_configure(const char *dev_name, struct rte_acc100_conf *conf)\n+rte_acc10x_configure(const char *dev_name, struct rte_acc_conf *conf)\n {\n \tstruct rte_bbdev *bbdev = rte_bbdev_get_named_dev(dev_name);\n \tif (bbdev == NULL) {\ndiff --git a/drivers/baseband/acc100/rte_acc_common_cfg.h b/drivers/baseband/acc100/rte_acc_common_cfg.h\nnew file mode 100644\nindex 0000000000..8292ef44da\n--- /dev/null\n+++ b/drivers/baseband/acc100/rte_acc_common_cfg.h\n@@ -0,0 +1,101 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2022 Intel Corporation\n+ */\n+\n+#ifndef _RTE_ACC_COMMON_CFG_H_\n+#define _RTE_ACC_COMMON_CFG_H_\n+\n+/**\n+ * @file rte_acc100_cfg.h\n+ *\n+ * Functions for configuring ACC100 HW, exposed directly to applications.\n+ * Configuration related to encoding/decoding is done through the\n+ * librte_bbdev library.\n+ *\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice\n+ */\n+\n+#include <stdint.h>\n+#include <stdbool.h>\n+\n+#ifdef __cplusplus\n+extern \"C\" {\n+#endif\n+\n+/**< Number of Virtual Functions ACC300 supports */\n+#define RTE_ACC_NUM_VFS 64\n+\n+/**\n+ * Definition of Queue Topology for ACC300 Configuration\n+ * Some level of details is abstracted out to expose a clean interface\n+ * given that comprehensive flexibility is not required\n+ */\n+struct rte_acc_queue_topology {\n+\t/** Number of QGroups in incremental order of priority */\n+\tuint16_t num_qgroups;\n+\t/**\n+\t * All QGroups have the same number of AQs here.\n+\t * Note : Could be made a 16-array if more flexibility is really\n+\t * required\n+\t */\n+\tuint16_t num_aqs_per_groups;\n+\t/**\n+\t * Depth of the AQs is the same of all QGroups here. Log2 Enum : 2^N\n+\t * Note : Could be made a 16-array if more flexibility is really\n+\t * required\n+\t */\n+\tuint16_t aq_depth_log2;\n+\t/**\n+\t * Index of the first Queue Group Index - assuming contiguity\n+\t * Initialized as -1\n+\t */\n+\tint8_t first_qgroup_index;\n+};\n+\n+/**\n+ * Definition of Arbitration related parameters for ACC300 Configuration\n+ */\n+struct rte_acc_arbitration {\n+\t/** Default Weight for VF Fairness Arbitration */\n+\tuint16_t round_robin_weight;\n+\tuint32_t gbr_threshold1; /**< Guaranteed Bitrate Threshold 1 */\n+\tuint32_t gbr_threshold2; /**< Guaranteed Bitrate Threshold 2 */\n+};\n+\n+/**\n+ * Structure to pass ACC300 configuration.\n+ * Note: all VF Bundles will have the same configuration.\n+ */\n+struct rte_acc_conf {\n+\tbool pf_mode_en; /**< 1 if PF is used for dataplane, 0 for VFs */\n+\t/** 1 if input '1' bit is represented by a positive LLR value, 0 if '1'\n+\t * bit is represented by a negative value.\n+\t */\n+\tbool input_pos_llr_1_bit;\n+\t/** 1 if output '1' bit is represented by a positive value, 0 if '1'\n+\t * bit is represented by a negative value.\n+\t */\n+\tbool output_pos_llr_1_bit;\n+\tuint16_t num_vf_bundles; /**< Number of VF bundles to setup */\n+\t/** Queue topology for each operation type */\n+\tstruct rte_acc_queue_topology q_ul_4g;\n+\tstruct rte_acc_queue_topology q_dl_4g;\n+\tstruct rte_acc_queue_topology q_ul_5g;\n+\tstruct rte_acc_queue_topology q_dl_5g;\n+\tstruct rte_acc_queue_topology q_fft;\n+\tstruct rte_acc_queue_topology q_mld;\n+\t/** Arbitration configuration for each operation type */\n+\tstruct rte_acc_arbitration arb_ul_4g[RTE_ACC_NUM_VFS];\n+\tstruct rte_acc_arbitration arb_dl_4g[RTE_ACC_NUM_VFS];\n+\tstruct rte_acc_arbitration arb_ul_5g[RTE_ACC_NUM_VFS];\n+\tstruct rte_acc_arbitration arb_dl_5g[RTE_ACC_NUM_VFS];\n+\tstruct rte_acc_arbitration arb_fft[RTE_ACC_NUM_VFS];\n+\tstruct rte_acc_arbitration arb_mld[RTE_ACC_NUM_VFS];\n+};\n+\n+#ifdef __cplusplus\n+}\n+#endif\n+\n+#endif /* _RTE_ACC_COMMON_CFG_H_ */\n",
    "prefixes": [
        "v6",
        "02/14"
    ]
}