get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/55180/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 55180,
    "url": "http://patches.dpdk.org/api/patches/55180/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1561136352-32198-3-git-send-email-nicolas.chautru@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1561136352-32198-3-git-send-email-nicolas.chautru@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1561136352-32198-3-git-send-email-nicolas.chautru@intel.com",
    "date": "2019-06-21T16:59:04",
    "name": "[v3,02/10] baseband/fpga_lte_fec: dependency patch",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "22964fdf988ae6bad58384f9ef8e7f109c1846ae",
    "submitter": {
        "id": 1314,
        "url": "http://patches.dpdk.org/api/people/1314/?format=api",
        "name": "Nicolas Chautru",
        "email": "nicolas.chautru@intel.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1561136352-32198-3-git-send-email-nicolas.chautru@intel.com/mbox/",
    "series": [
        {
            "id": 5122,
            "url": "http://patches.dpdk.org/api/series/5122/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=5122",
            "date": "2019-06-21T16:59:02",
            "name": "bbdev: adding support in BBDEV for 5GNR FEC",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/5122/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/55180/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/55180/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 5AFD81D55E;\n\tSat, 22 Jun 2019 02:02:18 +0200 (CEST)",
            "from mga11.intel.com (mga11.intel.com [192.55.52.93])\n\tby dpdk.org (Postfix) with ESMTP id C3BEB1D53C\n\tfor <dev@dpdk.org>; Sat, 22 Jun 2019 02:02:11 +0200 (CEST)",
            "from orsmga003.jf.intel.com ([10.7.209.27])\n\tby fmsmga102.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t21 Jun 2019 17:02:10 -0700",
            "from skx-5gnr-sc12-4.sc.intel.com ([172.25.69.210])\n\tby orsmga003.jf.intel.com with ESMTP; 21 Jun 2019 17:02:09 -0700"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.63,402,1557212400\"; d=\"scan'208\";a=\"163039842\"",
        "From": "Nicolas Chautru <nicolas.chautru@intel.com>",
        "To": "akhil.goyal@nxp.com,\n\tdev@dpdk.org",
        "Cc": "thomas@monjalon.net, ferruh.yigit@intel.com, amr.mokhtar@intel.com,\n\tNicolas Chautru <nicolas.chautru@intel.com>",
        "Date": "Fri, 21 Jun 2019 09:59:04 -0700",
        "Message-Id": "<1561136352-32198-3-git-send-email-nicolas.chautru@intel.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1561136352-32198-1-git-send-email-nicolas.chautru@intel.com>",
        "References": "<1557863143-174842-2-git-send-email-nicolas.chautru@intel.com>\n\t<1561136352-32198-1-git-send-email-nicolas.chautru@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v3 02/10] baseband/fpga_lte_fec: dependency patch",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Not meant for review, purely for dependency for CI.\nThis is a squash of the related serie which\nis not applied yet:\nadding driver for FEC on FPGA\nhttps://patches.dpdk.org/project/dpdk/list/?series=5017\n\nSigned-off-by: Nicolas Chautru <nicolas.chautru@intel.com>\n---\n config/common_base                                 |    5 +\n doc/guides/bbdevs/fpga_lte_fec.rst                 |  318 +++\n doc/guides/bbdevs/index.rst                        |    1 +\n drivers/baseband/Makefile                          |    2 +\n drivers/baseband/fpga_lte_fec/Makefile             |   29 +\n drivers/baseband/fpga_lte_fec/fpga_lte_fec.c       | 2674 ++++++++++++++++++++\n drivers/baseband/fpga_lte_fec/fpga_lte_fec.h       |   73 +\n drivers/baseband/fpga_lte_fec/meson.build          |    7 +\n .../rte_pmd_bbdev_fpga_lte_fec_version.map         |    3 +\n drivers/baseband/meson.build                       |    2 +-\n mk/rte.app.mk                                      |    1 +\n 11 files changed, 3114 insertions(+), 1 deletion(-)\n create mode 100644 doc/guides/bbdevs/fpga_lte_fec.rst\n create mode 100644 drivers/baseband/fpga_lte_fec/Makefile\n create mode 100644 drivers/baseband/fpga_lte_fec/fpga_lte_fec.c\n create mode 100644 drivers/baseband/fpga_lte_fec/fpga_lte_fec.h\n create mode 100644 drivers/baseband/fpga_lte_fec/meson.build\n create mode 100644 drivers/baseband/fpga_lte_fec/rte_pmd_bbdev_fpga_lte_fec_version.map",
    "diff": "diff --git a/config/common_base b/config/common_base\nindex 21ab606..cc13025 100644\n--- a/config/common_base\n+++ b/config/common_base\n@@ -526,6 +526,7 @@ CONFIG_RTE_PMD_PACKET_PREFETCH=y\n # EXPERIMENTAL: API may change without prior notice\n #\n CONFIG_RTE_LIBRTE_BBDEV=y\n+CONFIG_RTE_LIBRTE_BBDEV_DEBUG=n\n CONFIG_RTE_BBDEV_MAX_DEVS=128\n CONFIG_RTE_BBDEV_OFFLOAD_COST=y\n CONFIG_RTE_BBDEV_SDK_AVX2=n\n@@ -540,6 +541,10 @@ CONFIG_RTE_LIBRTE_PMD_BBDEV_NULL=y\n #\n CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW=y\n \n+# Compile PMD for Intel FPGA LTE FEC bbdev device\n+#\n+CONFIG_RTE_LIBRTE_PMD_FPGA_LTE_FEC=y\n+\n #\n # Compile generic crypto device library\n #\ndiff --git a/doc/guides/bbdevs/fpga_lte_fec.rst b/doc/guides/bbdevs/fpga_lte_fec.rst\nnew file mode 100644\nindex 0000000..71b058c\n--- /dev/null\n+++ b/doc/guides/bbdevs/fpga_lte_fec.rst\n@@ -0,0 +1,318 @@\n+..  SPDX-License-Identifier: BSD-3-Clause\n+    Copyright(c) 2018 Intel Corporation\n+\n+Intel(R) FPGA LTE FEC Poll Mode Driver\n+======================================\n+\n+The BBDEV FPGA LTE FEC poll mode driver (PMD) supports an FPGA implementation of a VRAN\n+Turbo Encode / Decode LTE wireless acceleration function, using Intel's PCI-e and FPGA\n+based Vista Creek device.\n+\n+Features\n+--------\n+\n+FPGA LTE FEC PMD supports the following features:\n+\n+- Turbo Encode in the DL with total throughput of 4.5 Gbits/s\n+- Turbo Decode in the UL with total throughput of 1.5 Gbits/s assuming 8 decoder iterations\n+- 8 VFs per PF (physical device)\n+- Maximum of 32 UL queues per VF\n+- Maximum of 32 DL queues per VF\n+- PCIe Gen-3 x8 Interface\n+- MSI-X\n+- SR-IOV\n+\n+\n+FPGA LTE FEC PMD supports the following BBDEV capabilities:\n+\n+* For the turbo encode operation:\n+   - ``RTE_BBDEV_TURBO_CRC_24B_ATTACH`` :  set to attach CRC24B to CB(s)\n+   - ``RTE_BBDEV_TURBO_RATE_MATCH`` :  if set then do not do Rate Match bypass\n+   - ``RTE_BBDEV_TURBO_ENC_INTERRUPTS`` :  set for encoder dequeue interrupts\n+\n+\n+* For the turbo decode operation:\n+   - ``RTE_BBDEV_TURBO_CRC_TYPE_24B`` :  check CRC24B from CB(s)\n+   - ``RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE`` :  perform subblock de-interleave\n+   - ``RTE_BBDEV_TURBO_DEC_INTERRUPTS`` :  set for decoder dequeue interrupts\n+   - ``RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN`` :  set if negative LLR encoder i/p is supported\n+   - ``RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP`` :  keep CRC24B bits appended while decoding\n+\n+\n+Limitations\n+-----------\n+\n+FPGA LTE FEC does not support the following:\n+\n+- Scatter-Gather function\n+\n+\n+Installation\n+--------------\n+\n+Section 3 of the DPDK manual provides instuctions on installing and compiling DPDK. The\n+default set of bbdev compile flags may be found in config/common_base, where for example\n+the flag to build the FPGA LTE FEC device, ``CONFIG_RTE_LIBRTE_PMD_FPGA_LTE_FEC``, is already\n+set. It is assumed DPDK has been compiled using for instance:\n+\n+.. code-block:: console\n+\n+  make install T=x86_64-native-linuxapp-gcc\n+\n+\n+DPDK requires hugepages to be configured as detailed in section 2 of the DPDK manual.\n+The bbdev test application has been tested with a configuration 40 x 1GB hugepages. The\n+hugepage configuration of a server may be examined using:\n+\n+.. code-block:: console\n+\n+   grep Huge* /proc/meminfo\n+\n+\n+Initialization\n+--------------\n+\n+When the device first powers up, its PCI Physical Functions (PF) can be listed through this command:\n+\n+.. code-block:: console\n+\n+  sudo lspci -vd1172:5052\n+\n+The physical and virtual functions are compatible with Linux UIO drivers:\n+``vfio`` and ``igb_uio``. However, in order to work the FPGA LTE FEC device firstly needs\n+to be bound to one of these linux drivers through DPDK.\n+\n+\n+Bind PF UIO driver(s)\n+~~~~~~~~~~~~~~~~~~~~~\n+\n+Install the DPDK igb_uio driver, bind it with the PF PCI device ID and use\n+``lspci`` to confirm the PF device is under use by ``igb_uio`` DPDK UIO driver.\n+\n+The igb_uio driver may be bound to the PF PCI device using one of three methods:\n+\n+\n+1. PCI functions (physical or virtual, depending on the use case) can be bound to\n+the UIO driver by repeating this command for every function.\n+\n+.. code-block:: console\n+\n+  cd <dpdk-top-level-directory>\n+  insmod ./build/kmod/igb_uio.ko\n+  echo \"1172 5052\" > /sys/bus/pci/drivers/igb_uio/new_id\n+  lspci -vd1172:\n+\n+\n+2. Another way to bind PF with DPDK UIO driver is by using the ``dpdk-devbind.py`` tool\n+\n+.. code-block:: console\n+\n+  cd <dpdk-top-level-directory>\n+  ./usertools/dpdk-devbind.py -b igb_uio 0000:06:00.0\n+\n+where the PCI device ID (example: 0000:06:00.0) is obtained using lspci -vd1172:\n+\n+\n+3. A third way to bind is to use ``dpdk-setup.sh`` tool\n+\n+.. code-block:: console\n+\n+  cd <dpdk-top-level-directory>\n+  ./usertools/dpdk-setup.sh\n+\n+  select 'Bind Ethernet/Crypto/Baseband device to IGB UIO module'\n+  or\n+  select 'Bind Ethernet/Crypto/Baseband device to VFIO module' depending on driver required\n+  enter PCI device ID\n+  select 'Display current Ethernet/Crypto/Baseband device settings' to confirm binding\n+\n+\n+In the same way the FPGA LTE FEC PF can be bound with vfio, but vfio driver does not\n+support SR-IOV configuration right out of the box, so it will need to be patched.\n+\n+\n+Enable Virtual Functions\n+~~~~~~~~~~~~~~~~~~~~~~~~\n+\n+Now, it should be visible in the printouts that PCI PF is under igb_uio control\n+\"``Kernel driver in use: igb_uio``\"\n+\n+To show the number of available VFs on the device, read ``sriov_totalvfs`` file..\n+\n+.. code-block:: console\n+\n+  cat /sys/bus/pci/devices/0000\\:<b>\\:<d>.<f>/sriov_totalvfs\n+\n+  where 0000\\:<b>\\:<d>.<f> is the PCI device ID\n+\n+\n+To enable VFs via igb_uio, echo the number of virtual functions intended to\n+enable to ``max_vfs`` file..\n+\n+.. code-block:: console\n+\n+  echo <num-of-vfs> > /sys/bus/pci/devices/0000\\:<b>\\:<d>.<f>/max_vfs\n+\n+\n+Afterwards, all VFs must be bound to appropriate UIO drivers as required, same\n+way it was done with the physical function previously.\n+\n+Enabling SR-IOV via vfio driver is pretty much the same, except that the file\n+name is different:\n+\n+.. code-block:: console\n+\n+  echo <num-of-vfs> > /sys/bus/pci/devices/0000\\:<b>\\:<d>.<f>/sriov_numvfs\n+\n+\n+Configure the VFs through PF\n+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n+\n+The PCI virtual functions must be configured before working or getting assigned\n+to VMs/Containers. The configuration involves allocating the number of hardware\n+queues, priorities, load balance, bandwidth and other settings necessary for the\n+device to perform FEC functions.\n+\n+This configuration needs to be executed at least once after reboot or PCI FLR and can\n+be achieved by using the function ``fpga_lte_fec_configure()``, which sets up the\n+parameters defined in ``fpga_lte_fec_conf`` structure:\n+\n+.. code-block:: c\n+\n+  struct fpga_lte_fec_conf {\n+      bool pf_mode_en;\n+      uint8_t vf_ul_queues_number[FPGA_LTE_FEC_NUM_VFS];\n+      uint8_t vf_dl_queues_number[FPGA_LTE_FEC_NUM_VFS];\n+      uint8_t ul_bandwidth;\n+      uint8_t dl_bandwidth;\n+      uint8_t ul_load_balance;\n+      uint8_t dl_load_balance;\n+      uint16_t flr_time_out;\n+  };\n+\n+- ``pf_mode_en``: identifies whether only PF is to be used, or the VFs. PF and\n+  VFs are mutually exclusive and cannot run simultaneously.\n+  Set to 1 for PF mode enabled.\n+  If PF mode is enabled all queues available in the device are assigned\n+  exclusively to PF and 0 queues given to VFs.\n+\n+- ``vf_*l_queues_number``: defines the hardware queue mapping for every VF.\n+\n+- ``*l_bandwidth``: in case of congestion on PCIe interface. The device\n+  allocates different bandwidth to UL and DL. The weight is configured by this\n+  setting. The unit of weight is 3 code blocks. For example, if the code block\n+  cbps (code block per second) ratio between UL and DL is 12:1, then the\n+  configuration value should be set to 36:3. The schedule algorithm is based\n+  on code block regardless the length of each block.\n+\n+- ``*l_load_balance``: hardware queues are load-balanced in a round-robin\n+  fashion. Queues get filled first-in first-out until they reach a pre-defined\n+  watermark level, if exceeded, they won't get assigned new code blocks..\n+  This watermark is defined by this setting.\n+\n+  If all hardware queues exceeds the watermark, no code blocks will be\n+  streamed in from UL/DL code block FIFO.\n+\n+- ``flr_time_out``: specifies how many 16.384us to be FLR time out. The\n+  time_out = flr_time_out x 16.384us. For instance, if you want to set 10ms for\n+  the FLR time out then set this setting to 0x262=610.\n+\n+\n+An example configuration code calling the function ``fpga_lte_fec_configure()`` is shown\n+below:\n+\n+.. code-block:: c\n+\n+  struct fpga_lte_fec_conf conf;\n+  unsigned int i;\n+\n+  memset(&conf, 0, sizeof(struct fpga_lte_fec_conf));\n+  conf.pf_mode_en = 1;\n+\n+  for (i = 0; i < FPGA_LTE_FEC_NUM_VFS; ++i) {\n+      conf.vf_ul_queues_number[i] = 4;\n+      conf.vf_dl_queues_number[i] = 4;\n+  }\n+  conf.ul_bandwidth = 12;\n+  conf.dl_bandwidth = 5;\n+  conf.dl_load_balance = 64;\n+  conf.ul_load_balance = 64;\n+\n+  /* setup FPGA PF */\n+  ret = fpga_lte_fec_configure(info->dev_name, &conf);\n+  TEST_ASSERT_SUCCESS(ret,\n+      \"Failed to configure 4G FPGA PF for bbdev %s\",\n+      info->dev_name);\n+\n+\n+Test Application\n+----------------\n+\n+BBDEV provides a test application, ``test-bbdev.py`` and range of test data for testing\n+the functionality of FPGA LTE FEC turbo encode and turbo decode, depending on the device's\n+capabilities. The test application is located under app->test-bbdev folder and has the\n+following options:\n+\n+.. code-block:: console\n+\n+  \"-p\", \"--testapp-path\": specifies path to the bbdev test app.\n+  \"-e\", \"--eal-params\"\t: EAL arguments which are passed to the test app.\n+  \"-t\", \"--timeout\"\t: Timeout in seconds (default=300).\n+  \"-c\", \"--test-cases\"\t: Defines test cases to run. Run all if not specified.\n+  \"-v\", \"--test-vector\"\t: Test vector path (default=dpdk_path+/app/test-bbdev/test_vectors/bbdev_null.data).\n+  \"-n\", \"--num-ops\"\t: Number of operations to process on device (default=32).\n+  \"-b\", \"--burst-size\"\t: Operations enqueue/dequeue burst size (default=32).\n+  \"-l\", \"--num-lcores\"\t: Number of lcores to run (default=16).\n+  \"-i\", \"--init-device\" : Initialise PF device with default values.\n+\n+\n+To execute the test application tool using simple turbo decode or turbo encode data,\n+type one of the following:\n+\n+.. code-block:: console\n+\n+  ./test-bbdev.py -c validation -n 64 -b 8 -v ./turbo_dec_default.data\n+  ./test-bbdev.py -c validation -n 64 -b 8 -v ./turbo_enc_default.data\n+\n+\n+The test application ``test-bbdev.py``, supports the ability to configure the PF device with\n+a default set of values, if the \"-i\" or \"- -init-device\" option is included. The default values\n+are defined in test_bbdev_perf.c as:\n+\n+- VF_UL_QUEUE_VALUE 4\n+- VF_DL_QUEUE_VALUE 4\n+- UL_BANDWIDTH 3\n+- DL_BANDWIDTH 3\n+- UL_LOAD_BALANCE 128\n+- DL_LOAD_BALANCE 128\n+- FLR_TIMEOUT 610\n+\n+\n+Test Vectors\n+~~~~~~~~~~~~\n+\n+In addition to the simple turbo decoder and turbo encoder tests, bbdev also provides\n+a range of additional tests under the test_vectors folder, which may be useful. The results\n+of these tests will depend on the FPGA LTE FEC capabilities:\n+\n+* turbo decoder tests:\n+   - ``turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_high_snr.data``\n+   - ``turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_low_snr.data``\n+   - ``turbo_dec_c1_k6144_r0_e34560_negllr.data``\n+   - ``turbo_dec_c1_k6144_r0_e34560_sbd_negllr.data``\n+   - ``turbo_dec_c2_k3136_r0_e4920_sbd_negllr_crc24b.data``\n+   - ``turbo_dec_c2_k3136_r0_e4920_sbd_negllr.data``\n+\n+\n+* turbo encoder tests:\n+   - ``turbo_enc_c1_k40_r0_e1190_rm.data``\n+   - ``turbo_enc_c1_k40_r0_e1194_rm.data``\n+   - ``turbo_enc_c1_k40_r0_e1196_rm.data``\n+   - ``turbo_enc_c1_k40_r0_e272_rm.data``\n+   - ``turbo_enc_c1_k6144_r0_e18444.data``\n+   - ``turbo_enc_c1_k6144_r0_e32256_crc24b_rm.data``\n+   - ``turbo_enc_c2_k5952_r0_e17868_crc24b.data``\n+   - ``turbo_enc_c3_k4800_r2_e14412_crc24b.data``\n+   - ``turbo_enc_c4_k4800_r2_e14412_crc24b.data``\n+\n+\ndiff --git a/doc/guides/bbdevs/index.rst b/doc/guides/bbdevs/index.rst\nindex 93276ed..005b95e 100644\n--- a/doc/guides/bbdevs/index.rst\n+++ b/doc/guides/bbdevs/index.rst\n@@ -10,3 +10,4 @@ Baseband Device Drivers\n \n     null\n     turbo_sw\n+    fpga_lte_fec\ndiff --git a/drivers/baseband/Makefile b/drivers/baseband/Makefile\nindex 4ec83b0..ceffc7d 100644\n--- a/drivers/baseband/Makefile\n+++ b/drivers/baseband/Makefile\n@@ -10,5 +10,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_NULL) += null\n DEPDIRS-null = $(core-libs)\n DIRS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW) += turbo_sw\n DEPDIRS-turbo_sw = $(core-libs)\n+DIRS-$(CONFIG_RTE_LIBRTE_PMD_FPGA_LTE_FEC) += fpga_lte_fec\n+DEPDIRS-fpga_lte_fec = $(core-libs)\n \n include $(RTE_SDK)/mk/rte.subdir.mk\ndiff --git a/drivers/baseband/fpga_lte_fec/Makefile b/drivers/baseband/fpga_lte_fec/Makefile\nnew file mode 100644\nindex 0000000..a38a396\n--- /dev/null\n+++ b/drivers/baseband/fpga_lte_fec/Makefile\n@@ -0,0 +1,29 @@\n+# SPDX-License-Identifier: BSD-3-Clause\n+# Copyright(c) 2019 Intel Corporation\n+\n+include $(RTE_SDK)/mk/rte.vars.mk\n+\n+# library name\n+LIB = librte_pmd_fpga_lte_fec.a\n+\n+# build flags\n+CFLAGS += -DALLOW_EXPERIMENTAL_API\n+CFLAGS += -O3\n+CFLAGS += $(WERROR_FLAGS)\n+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring\n+LDLIBS += -lrte_bbdev\n+LDLIBS += -lrte_pci -lrte_bus_pci\n+\n+# versioning export map\n+EXPORT_MAP := rte_pmd_bbdev_fpga_lte_fec_version.map\n+\n+# library version\n+LIBABIVER := 1\n+\n+# library source files\n+SRCS-$(CONFIG_RTE_LIBRTE_PMD_FPGA_LTE_FEC) += fpga_lte_fec.c\n+\n+# export include files\n+SYMLINK-$(CONFIG_RTE_LIBRTE_PMD_FPGA_LTE_FEC)-include += fpga_lte_fec.h\n+\n+include $(RTE_SDK)/mk/rte.lib.mk\ndiff --git a/drivers/baseband/fpga_lte_fec/fpga_lte_fec.c b/drivers/baseband/fpga_lte_fec/fpga_lte_fec.c\nnew file mode 100644\nindex 0000000..19e7689\n--- /dev/null\n+++ b/drivers/baseband/fpga_lte_fec/fpga_lte_fec.c\n@@ -0,0 +1,2674 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2019 Intel Corporation\n+ */\n+\n+#include <unistd.h>\n+\n+#include <rte_common.h>\n+#include <rte_log.h>\n+#include <rte_dev.h>\n+#include <rte_malloc.h>\n+#include <rte_mempool.h>\n+#include <rte_errno.h>\n+#include <rte_pci.h>\n+#include <rte_bus_pci.h>\n+#include <rte_byteorder.h>\n+#ifdef RTE_BBDEV_OFFLOAD_COST\n+#include <rte_cycles.h>\n+#endif\n+\n+#include <rte_bbdev.h>\n+#include <rte_bbdev_pmd.h>\n+\n+#include \"fpga_lte_fec.h\"\n+\n+/* Turbo SW PMD logging ID */\n+static int fpga_lte_fec_logtype;\n+\n+/* Helper macro for logging */\n+#define rte_bbdev_log(level, fmt, ...) \\\n+\trte_log(RTE_LOG_ ## level, fpga_lte_fec_logtype, fmt \"\\n\", \\\n+\t\t##__VA_ARGS__)\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+#define rte_bbdev_log_debug(fmt, ...) \\\n+\t\trte_bbdev_log(DEBUG, \"fpga_lte_fec: \" fmt, \\\n+\t\t##__VA_ARGS__)\n+#else\n+#define rte_bbdev_log_debug(fmt, ...)\n+#endif\n+\n+/* FPGA LTE FEC driver names */\n+#define FPGA_LTE_FEC_PF_DRIVER_NAME intel_fpga_lte_fec_pf\n+#define FPGA_LTE_FEC_VF_DRIVER_NAME intel_fpga_lte_fec_vf\n+\n+/* FPGA LTE FEC PCI vendor & device IDs */\n+#define FPGA_LTE_FEC_VENDOR_ID (0x1172)\n+#define FPGA_LTE_FEC_PF_DEVICE_ID (0x5052)\n+#define FPGA_LTE_FEC_VF_DEVICE_ID (0x5050)\n+\n+/* Align DMA descriptors to 256 bytes - cache-aligned */\n+#define FPGA_RING_DESC_ENTRY_LENGTH (8)\n+/* Ring size is in 256 bits (32 bytes) units */\n+#define FPGA_RING_DESC_LEN_UNIT_BYTES (32)\n+/* Maximum size of queue */\n+#define FPGA_RING_MAX_SIZE (1024)\n+#define FPGA_FLR_TIMEOUT_UNIT (16.384)\n+\n+#define FPGA_NUM_UL_QUEUES (32)\n+#define FPGA_NUM_DL_QUEUES (32)\n+#define FPGA_TOTAL_NUM_QUEUES (FPGA_NUM_UL_QUEUES + FPGA_NUM_DL_QUEUES)\n+#define FPGA_NUM_INTR_VEC (FPGA_TOTAL_NUM_QUEUES - RTE_INTR_VEC_RXTX_OFFSET)\n+\n+#define FPGA_INVALID_HW_QUEUE_ID (0xFFFFFFFF)\n+\n+#define FPGA_QUEUE_FLUSH_TIMEOUT_US (1000)\n+#define FPGA_TIMEOUT_CHECK_INTERVAL (5)\n+\n+/* FPGA LTE FEC Register mapping on BAR0 */\n+enum {\n+\tFPGA_LTE_FEC_VERSION_ID = 0x00000000, /* len: 4B */\n+\tFPGA_LTE_FEC_CONFIGURATION = 0x00000004, /* len: 2B */\n+\tFPGA_LTE_FEC_QUEUE_PF_VF_MAP_DONE = 0x00000008, /* len: 1B */\n+\tFPGA_LTE_FEC_LOAD_BALANCE_FACTOR = 0x0000000a, /* len: 2B */\n+\tFPGA_LTE_FEC_RING_DESC_LEN = 0x0000000c, /* len: 2B */\n+\tFPGA_LTE_FEC_FLR_TIME_OUT = 0x0000000e, /* len: 2B */\n+\tFPGA_LTE_FEC_VFQ_FLUSH_STATUS_LW = 0x00000018, /* len: 4B */\n+\tFPGA_LTE_FEC_VFQ_FLUSH_STATUS_HI = 0x0000001c, /* len: 4B */\n+\tFPGA_LTE_FEC_VF0_DEBUG = 0x00000020, /* len: 4B */\n+\tFPGA_LTE_FEC_VF1_DEBUG = 0x00000024, /* len: 4B */\n+\tFPGA_LTE_FEC_VF2_DEBUG = 0x00000028, /* len: 4B */\n+\tFPGA_LTE_FEC_VF3_DEBUG = 0x0000002c, /* len: 4B */\n+\tFPGA_LTE_FEC_VF4_DEBUG = 0x00000030, /* len: 4B */\n+\tFPGA_LTE_FEC_VF5_DEBUG = 0x00000034, /* len: 4B */\n+\tFPGA_LTE_FEC_VF6_DEBUG = 0x00000038, /* len: 4B */\n+\tFPGA_LTE_FEC_VF7_DEBUG = 0x0000003c, /* len: 4B */\n+\tFPGA_LTE_FEC_QUEUE_MAP = 0x00000040, /* len: 256B */\n+\tFPGA_LTE_FEC_RING_CTRL_REGS = 0x00000200  /* len: 2048B */\n+};\n+\n+/* FPGA LTE FEC Ring Control Registers */\n+enum {\n+\tFPGA_LTE_FEC_RING_HEAD_ADDR = 0x00000008,\n+\tFPGA_LTE_FEC_RING_SIZE = 0x00000010,\n+\tFPGA_LTE_FEC_RING_MISC = 0x00000014,\n+\tFPGA_LTE_FEC_RING_ENABLE = 0x00000015,\n+\tFPGA_LTE_FEC_RING_FLUSH_QUEUE_EN = 0x00000016,\n+\tFPGA_LTE_FEC_RING_SHADOW_TAIL = 0x00000018,\n+\tFPGA_LTE_FEC_RING_HEAD_POINT = 0x0000001C\n+};\n+\n+/* FPGA LTE FEC DESCRIPTOR ERROR */\n+enum {\n+\tDESC_ERR_NO_ERR = 0x0,\n+\tDESC_ERR_K_OUT_OF_RANGE = 0x1,\n+\tDESC_ERR_K_NOT_NORMAL = 0x2,\n+\tDESC_ERR_KPAI_NOT_NORMAL = 0x3,\n+\tDESC_ERR_DESC_OFFSET_ERR = 0x4,\n+\tDESC_ERR_DESC_READ_FAIL = 0x8,\n+\tDESC_ERR_DESC_READ_TIMEOUT = 0x9,\n+\tDESC_ERR_DESC_READ_TLP_POISONED = 0xA,\n+\tDESC_ERR_CB_READ_FAIL = 0xC,\n+\tDESC_ERR_CB_READ_TIMEOUT = 0xD,\n+\tDESC_ERR_CB_READ_TLP_POISONED = 0xE\n+};\n+\n+/* FPGA LTE FEC DMA Encoding Request Descriptor */\n+struct __attribute__((__packed__)) fpga_dma_enc_desc {\n+\tuint32_t done:1,\n+\t\trsrvd0:11,\n+\t\terror:4,\n+\t\trsrvd1:16;\n+\tuint32_t ncb:16,\n+\t\trsrvd2:14,\n+\t\trv:2;\n+\tuint32_t bypass_rm:1,\n+\t\tirq_en:1,\n+\t\tcrc_en:1,\n+\t\trsrvd3:13,\n+\t\toffset:10,\n+\t\trsrvd4:6;\n+\tuint16_t e;\n+\tuint16_t k;\n+\tuint32_t out_addr_lw;\n+\tuint32_t out_addr_hi;\n+\tuint32_t in_addr_lw;\n+\tuint32_t in_addr_hi;\n+\n+\tunion {\n+\t\tstruct {\n+\t\t\t/* Virtual addresses used to retrieve SW context info */\n+\t\t\tvoid *op_addr;\n+\t\t\t/* Stores information about total number of Code Blocks\n+\t\t\t * in currently processed Transport Block\n+\t\t\t */\n+\t\t\tuint64_t cbs_in_op;\n+\t\t};\n+\n+\t\tuint8_t sw_ctxt[FPGA_RING_DESC_LEN_UNIT_BYTES *\n+\t\t\t\t\t(FPGA_RING_DESC_ENTRY_LENGTH - 1)];\n+\t};\n+};\n+\n+/* FPGA LTE FEC DMA Decoding Request Descriptor */\n+struct __attribute__((__packed__)) fpga_dma_dec_desc {\n+\tuint32_t done:1,\n+\t\titer:5,\n+\t\trsrvd0:2,\n+\t\tcrc_pass:1,\n+\t\trsrvd1:3,\n+\t\terror:4,\n+\t\tcrc_type:1,\n+\t\trsrvd2:7,\n+\t\tmax_iter:5,\n+\t\trsrvd3:3;\n+\tuint32_t rsrvd4;\n+\tuint32_t bypass_rm:1,\n+\t\tirq_en:1,\n+\t\tdrop_crc:1,\n+\t\trsrvd5:13,\n+\t\toffset:10,\n+\t\trsrvd6:6;\n+\tuint16_t k;\n+\tuint16_t in_len;\n+\tuint32_t out_addr_lw;\n+\tuint32_t out_addr_hi;\n+\tuint32_t in_addr_lw;\n+\tuint32_t in_addr_hi;\n+\n+\tunion {\n+\t\tstruct {\n+\t\t\t/* Virtual addresses used to retrieve SW context info */\n+\t\t\tvoid *op_addr;\n+\t\t\t/* Stores information about total number of Code Blocks\n+\t\t\t * in currently processed Transport Block\n+\t\t\t */\n+\t\t\tuint8_t cbs_in_op;\n+\t\t};\n+\n+\t\tuint32_t sw_ctxt[8 * (FPGA_RING_DESC_ENTRY_LENGTH - 1)];\n+\t};\n+};\n+\n+/* FPGA LTE DMA Descriptor */\n+union fpga_dma_desc {\n+\tstruct fpga_dma_enc_desc enc_req;\n+\tstruct fpga_dma_dec_desc dec_req;\n+};\n+\n+/* FPGA LTE FEC Ring Control Register */\n+struct __attribute__((__packed__)) fpga_ring_ctrl_reg {\n+\tuint64_t ring_base_addr;\n+\tuint64_t ring_head_addr;\n+\tuint16_t ring_size:11;\n+\tuint16_t rsrvd0;\n+\tunion { /* Miscellaneous register */\n+\t\tuint8_t misc;\n+\t\tuint8_t max_ul_dec:5,\n+\t\t\tmax_ul_dec_en:1,\n+\t\t\trsrvd1:2;\n+\t};\n+\tuint8_t enable;\n+\tuint8_t flush_queue_en;\n+\tuint8_t rsrvd2;\n+\tuint16_t shadow_tail;\n+\tuint16_t rsrvd3;\n+\tuint16_t head_point;\n+\tuint16_t rsrvd4;\n+\n+};\n+\n+/* Private data structure for each FPGA FEC device */\n+struct fpga_lte_fec_device {\n+\t/** Base address of MMIO registers (BAR0) */\n+\tvoid *mmio_base;\n+\t/** Base address of memory for sw rings */\n+\tvoid *sw_rings;\n+\t/** Physical address of sw_rings */\n+\trte_iova_t sw_rings_phys;\n+\t/** Number of bytes available for each queue in device. */\n+\tuint32_t sw_ring_size;\n+\t/** Max number of entries available for each queue in device */\n+\tuint32_t sw_ring_max_depth;\n+\t/** Base address of response tail pointer buffer */\n+\tuint32_t *tail_ptrs;\n+\t/** Physical address of tail pointers */\n+\trte_iova_t tail_ptr_phys;\n+\t/** Queues flush completion flag */\n+\tuint64_t *flush_queue_status;\n+\t/* Bitmap capturing which Queues are bound to the PF/VF */\n+\tuint64_t q_bound_bit_map;\n+\t/* Bitmap capturing which Queues have already been assigned */\n+\tuint64_t q_assigned_bit_map;\n+\t/** True if this is a PF FPGA FEC device */\n+\tbool pf_device;\n+};\n+\n+/* Structure associated with each queue. */\n+struct __rte_cache_aligned fpga_queue {\n+\tstruct fpga_ring_ctrl_reg ring_ctrl_reg;  /* Ring Control Register */\n+\tunion fpga_dma_desc *ring_addr;  /* Virtual address of software ring */\n+\tuint64_t *ring_head_addr;  /* Virtual address of completion_head */\n+\tuint64_t shadow_completion_head; /* Shadow completion head value */\n+\tuint16_t head_free_desc;  /* Ring head */\n+\tuint16_t tail;  /* Ring tail */\n+\t/* Mask used to wrap enqueued descriptors on the sw ring */\n+\tuint32_t sw_ring_wrap_mask;\n+\tuint32_t irq_enable;  /* Enable ops dequeue interrupts if set to 1 */\n+\tuint8_t q_idx;  /* Queue index */\n+\tstruct fpga_lte_fec_device *d;\n+\t/* MMIO register of shadow_tail used to enqueue descriptors */\n+\tvoid *shadow_tail_addr;\n+};\n+\n+/* Write to 16 bit MMIO register address */\n+static inline void\n+mmio_write_16(void *addr, uint16_t value)\n+{\n+\t*((volatile uint16_t *)(addr)) = rte_cpu_to_le_16(value);\n+}\n+\n+/* Write to 32 bit MMIO register address */\n+static inline void\n+mmio_write_32(void *addr, uint32_t value)\n+{\n+\t*((volatile uint32_t *)(addr)) = rte_cpu_to_le_32(value);\n+}\n+\n+/* Write to 64 bit MMIO register address */\n+static inline void\n+mmio_write_64(void *addr, uint64_t value)\n+{\n+\t*((volatile uint64_t *)(addr)) = rte_cpu_to_le_64(value);\n+}\n+\n+/* Write a 8 bit register of a FPGA LTE FEC device */\n+static inline void\n+fpga_reg_write_8(void *mmio_base, uint32_t offset, uint8_t payload)\n+{\n+\tvoid *reg_addr = RTE_PTR_ADD(mmio_base, offset);\n+\t*((volatile uint8_t *)(reg_addr)) = payload;\n+}\n+\n+/* Write a 16 bit register of a FPGA LTE FEC device */\n+static inline void\n+fpga_reg_write_16(void *mmio_base, uint32_t offset, uint16_t payload)\n+{\n+\tvoid *reg_addr = RTE_PTR_ADD(mmio_base, offset);\n+\tmmio_write_16(reg_addr, payload);\n+}\n+\n+/* Write a 32 bit register of a FPGA LTE FEC device */\n+static inline void\n+fpga_reg_write_32(void *mmio_base, uint32_t offset, uint32_t payload)\n+{\n+\tvoid *reg_addr = RTE_PTR_ADD(mmio_base, offset);\n+\tmmio_write_32(reg_addr, payload);\n+}\n+\n+/* Write a 64 bit register of a FPGA LTE FEC device */\n+static inline void\n+fpga_reg_write_64(void *mmio_base, uint32_t offset, uint64_t payload)\n+{\n+\tvoid *reg_addr = RTE_PTR_ADD(mmio_base, offset);\n+\tmmio_write_64(reg_addr, payload);\n+}\n+\n+/* Write a ring control register of a FPGA LTE FEC device */\n+static inline void\n+fpga_ring_reg_write(void *mmio_base, uint32_t offset,\n+\t\tstruct fpga_ring_ctrl_reg payload)\n+{\n+\tfpga_reg_write_64(mmio_base, offset, payload.ring_base_addr);\n+\tfpga_reg_write_64(mmio_base, offset + FPGA_LTE_FEC_RING_HEAD_ADDR,\n+\t\t\tpayload.ring_head_addr);\n+\tfpga_reg_write_16(mmio_base, offset + FPGA_LTE_FEC_RING_SIZE,\n+\t\t\tpayload.ring_size);\n+\tfpga_reg_write_16(mmio_base, offset + FPGA_LTE_FEC_RING_HEAD_POINT,\n+\t\t\tpayload.head_point);\n+\tfpga_reg_write_8(mmio_base, offset + FPGA_LTE_FEC_RING_FLUSH_QUEUE_EN,\n+\t\t\tpayload.flush_queue_en);\n+\tfpga_reg_write_16(mmio_base, offset + FPGA_LTE_FEC_RING_SHADOW_TAIL,\n+\t\t\tpayload.shadow_tail);\n+\tfpga_reg_write_8(mmio_base, offset + FPGA_LTE_FEC_RING_MISC,\n+\t\t\tpayload.misc);\n+\tfpga_reg_write_8(mmio_base, offset + FPGA_LTE_FEC_RING_ENABLE,\n+\t\t\tpayload.enable);\n+}\n+\n+/* Read a register of FPGA LTE FEC device */\n+static uint32_t\n+fpga_reg_read_32(void *mmio_base, uint32_t offset)\n+{\n+\tvoid *reg_addr = RTE_PTR_ADD(mmio_base, offset);\n+\tuint32_t ret = *((volatile uint32_t *)(reg_addr));\n+\treturn rte_le_to_cpu_32(ret);\n+}\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+/* Read a register of FPGA LTE FEC device */\n+static uint8_t\n+fpga_reg_read_8(void *mmio_base, uint32_t offset)\n+{\n+\tvoid *reg_addr = RTE_PTR_ADD(mmio_base, offset);\n+\treturn *((volatile uint8_t *)(reg_addr));\n+}\n+\n+/* Read a register of FPGA LTE FEC device */\n+static uint16_t\n+fpga_reg_read_16(void *mmio_base, uint32_t offset)\n+{\n+\tvoid *reg_addr = RTE_PTR_ADD(mmio_base, offset);\n+\tuint16_t ret = *((volatile uint16_t *)(reg_addr));\n+\treturn rte_le_to_cpu_16(ret);\n+}\n+\n+/* Read a register of FPGA LTE FEC device */\n+static uint64_t\n+fpga_reg_read_64(void *mmio_base, uint32_t offset)\n+{\n+\tvoid *reg_addr = RTE_PTR_ADD(mmio_base, offset);\n+\tuint64_t ret = *((volatile uint64_t *)(reg_addr));\n+\treturn rte_le_to_cpu_64(ret);\n+}\n+\n+/* Read Ring Control Register of FPGA LTE FEC device */\n+static inline void\n+print_ring_reg_debug_info(void *mmio_base, uint32_t offset)\n+{\n+\trte_bbdev_log_debug(\n+\t\t\"FPGA MMIO base address @ %p | Ring Control Register @ offset = 0x%08\"\n+\t\tPRIx32, mmio_base, offset);\n+\trte_bbdev_log_debug(\n+\t\t\"RING_BASE_ADDR = 0x%016\"PRIx64,\n+\t\tfpga_reg_read_64(mmio_base, offset));\n+\trte_bbdev_log_debug(\n+\t\t\"RING_HEAD_ADDR = 0x%016\"PRIx64,\n+\t\tfpga_reg_read_64(mmio_base, offset +\n+\t\t\t\tFPGA_LTE_FEC_RING_HEAD_ADDR));\n+\trte_bbdev_log_debug(\n+\t\t\"RING_SIZE = 0x%04\"PRIx16,\n+\t\tfpga_reg_read_16(mmio_base, offset +\n+\t\t\t\tFPGA_LTE_FEC_RING_SIZE));\n+\trte_bbdev_log_debug(\n+\t\t\"RING_MISC = 0x%02\"PRIx8,\n+\t\tfpga_reg_read_8(mmio_base, offset +\n+\t\t\t\tFPGA_LTE_FEC_RING_MISC));\n+\trte_bbdev_log_debug(\n+\t\t\"RING_ENABLE = 0x%02\"PRIx8,\n+\t\tfpga_reg_read_8(mmio_base, offset +\n+\t\t\t\tFPGA_LTE_FEC_RING_ENABLE));\n+\trte_bbdev_log_debug(\n+\t\t\"RING_FLUSH_QUEUE_EN = 0x%02\"PRIx8,\n+\t\tfpga_reg_read_8(mmio_base, offset +\n+\t\t\t\tFPGA_LTE_FEC_RING_FLUSH_QUEUE_EN));\n+\trte_bbdev_log_debug(\n+\t\t\"RING_SHADOW_TAIL = 0x%04\"PRIx16,\n+\t\tfpga_reg_read_16(mmio_base, offset +\n+\t\t\t\tFPGA_LTE_FEC_RING_SHADOW_TAIL));\n+\trte_bbdev_log_debug(\n+\t\t\"RING_HEAD_POINT = 0x%04\"PRIx16,\n+\t\tfpga_reg_read_16(mmio_base, offset +\n+\t\t\t\tFPGA_LTE_FEC_RING_HEAD_POINT));\n+}\n+\n+/* Read Static Register of FPGA LTE FEC device */\n+static inline void\n+print_static_reg_debug_info(void *mmio_base)\n+{\n+\tuint16_t config = fpga_reg_read_16(mmio_base,\n+\t\t\tFPGA_LTE_FEC_CONFIGURATION);\n+\tuint8_t qmap_done = fpga_reg_read_8(mmio_base,\n+\t\t\tFPGA_LTE_FEC_QUEUE_PF_VF_MAP_DONE);\n+\tuint16_t lb_factor = fpga_reg_read_16(mmio_base,\n+\t\t\tFPGA_LTE_FEC_LOAD_BALANCE_FACTOR);\n+\tuint16_t ring_desc_len = fpga_reg_read_16(mmio_base,\n+\t\t\tFPGA_LTE_FEC_RING_DESC_LEN);\n+\tuint16_t flr_time_out = fpga_reg_read_16(mmio_base,\n+\t\t\tFPGA_LTE_FEC_FLR_TIME_OUT);\n+\n+\trte_bbdev_log_debug(\"UL.DL Weights = %u.%u\",\n+\t\t\t((uint8_t)config), ((uint8_t)(config >> 8)));\n+\trte_bbdev_log_debug(\"UL.DL Load Balance = %u.%u\",\n+\t\t\t((uint8_t)lb_factor), ((uint8_t)(lb_factor >> 8)));\n+\trte_bbdev_log_debug(\"Queue-PF/VF Mapping Table = %s\",\n+\t\t\t(qmap_done > 0) ? \"READY\" : \"NOT-READY\");\n+\trte_bbdev_log_debug(\"Ring Descriptor Size = %u bytes\",\n+\t\t\tring_desc_len*FPGA_RING_DESC_LEN_UNIT_BYTES);\n+\trte_bbdev_log_debug(\"FLR Timeout = %f usec\",\n+\t\t\t(float)flr_time_out*FPGA_FLR_TIMEOUT_UNIT);\n+}\n+\n+/* Print decode DMA Descriptor of FPGA LTE FEC device */\n+static void\n+print_dma_dec_desc_debug_info(union fpga_dma_desc *desc)\n+{\n+\trte_bbdev_log_debug(\"DMA response desc %p\\n\"\n+\t\t\"\\t-- done(%\"PRIu32\") | iter(%\"PRIu32\") | crc_pass(%\"PRIu32\")\"\n+\t\t\" | error (%\"PRIu32\") | crc_type(%\"PRIu32\")\\n\"\n+\t\t\"\\t-- max_iter(%\"PRIu32\") | bypass_rm(%\"PRIu32\") | \"\n+\t\t\"irq_en (%\"PRIu32\") | drop_crc(%\"PRIu32\") | offset(%\"PRIu32\")\\n\"\n+\t\t\"\\t-- k(%\"PRIu32\") | in_len (%\"PRIu16\") | op_add(%p)\\n\"\n+\t\t\"\\t-- cbs_in_op(%\"PRIu32\") | in_add (0x%08\"PRIx32\"%08\"PRIx32\") | \"\n+\t\t\"out_add (0x%08\"PRIx32\"%08\"PRIx32\")\",\n+\t\tdesc,\n+\t\t(uint32_t)desc->dec_req.done,\n+\t\t(uint32_t)desc->dec_req.iter,\n+\t\t(uint32_t)desc->dec_req.crc_pass,\n+\t\t(uint32_t)desc->dec_req.error,\n+\t\t(uint32_t)desc->dec_req.crc_type,\n+\t\t(uint32_t)desc->dec_req.max_iter,\n+\t\t(uint32_t)desc->dec_req.bypass_rm,\n+\t\t(uint32_t)desc->dec_req.irq_en,\n+\t\t(uint32_t)desc->dec_req.drop_crc,\n+\t\t(uint32_t)desc->dec_req.offset,\n+\t\t(uint32_t)desc->dec_req.k,\n+\t\t(uint16_t)desc->dec_req.in_len,\n+\t\tdesc->dec_req.op_addr,\n+\t\t(uint32_t)desc->dec_req.cbs_in_op,\n+\t\t(uint32_t)desc->dec_req.in_addr_hi,\n+\t\t(uint32_t)desc->dec_req.in_addr_lw,\n+\t\t(uint32_t)desc->dec_req.out_addr_hi,\n+\t\t(uint32_t)desc->dec_req.out_addr_lw);\n+}\n+#endif\n+\n+static int\n+fpga_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)\n+{\n+\t/* Number of queues bound to a PF/VF */\n+\tuint32_t hw_q_num = 0;\n+\tuint32_t ring_size, payload, address, q_id, offset;\n+\trte_iova_t phys_addr;\n+\tstruct fpga_ring_ctrl_reg ring_reg;\n+\tstruct fpga_lte_fec_device *fpga_dev = dev->data->dev_private;\n+\n+\taddress = FPGA_LTE_FEC_QUEUE_PF_VF_MAP_DONE;\n+\tif (!(fpga_reg_read_32(fpga_dev->mmio_base, address) & 0x1)) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Queue-PF/VF mapping is not set! Was PF configured for device (%s) ?\",\n+\t\t\t\tdev->data->name);\n+\t\treturn -EPERM;\n+\t}\n+\n+\t/* Clear queue registers structure */\n+\tmemset(&ring_reg, 0, sizeof(struct fpga_ring_ctrl_reg));\n+\n+\t/* Scan queue map.\n+\t * If a queue is valid and mapped to a calling PF/VF the read value is\n+\t * replaced with a queue ID and if it's not then\n+\t * FPGA_INVALID_HW_QUEUE_ID is returned.\n+\t */\n+\tfor (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {\n+\t\tuint32_t hw_q_id = fpga_reg_read_32(fpga_dev->mmio_base,\n+\t\t\t\tFPGA_LTE_FEC_QUEUE_MAP + (q_id << 2));\n+\n+\t\trte_bbdev_log_debug(\"%s: queue ID: %u, registry queue ID: %u\",\n+\t\t\t\tdev->device->name, q_id, hw_q_id);\n+\n+\t\tif (hw_q_id != FPGA_INVALID_HW_QUEUE_ID) {\n+\t\t\tfpga_dev->q_bound_bit_map |= (1ULL << q_id);\n+\t\t\t/* Clear queue register of found queue */\n+\t\t\toffset = FPGA_LTE_FEC_RING_CTRL_REGS +\n+\t\t\t\t(sizeof(struct fpga_ring_ctrl_reg) * q_id);\n+\t\t\tfpga_ring_reg_write(fpga_dev->mmio_base,\n+\t\t\t\t\toffset, ring_reg);\n+\t\t\t++hw_q_num;\n+\t\t}\n+\t}\n+\tif (hw_q_num == 0) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\"No HW queues assigned to this device. Probably this is a VF configured for PF mode. Check device configuration!\");\n+\t\treturn -ENODEV;\n+\t}\n+\n+\tif (num_queues > hw_q_num) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\"Not enough queues for device %s! Requested: %u, available: %u\",\n+\t\t\tdev->device->name, num_queues, hw_q_num);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tring_size = FPGA_RING_MAX_SIZE * sizeof(struct fpga_dma_dec_desc);\n+\n+\t/* Enforce 32 byte alignment */\n+\tRTE_BUILD_BUG_ON((RTE_CACHE_LINE_SIZE % 32) != 0);\n+\n+\t/* Allocate memory for SW descriptor rings */\n+\tfpga_dev->sw_rings = rte_zmalloc_socket(dev->device->driver->name,\n+\t\t\tnum_queues * ring_size, RTE_CACHE_LINE_SIZE,\n+\t\t\tsocket_id);\n+\tif (fpga_dev->sw_rings == NULL) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Failed to allocate memory for %s:%u sw_rings\",\n+\t\t\t\tdev->device->driver->name, dev->data->dev_id);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tfpga_dev->sw_rings_phys = rte_malloc_virt2iova(fpga_dev->sw_rings);\n+\tfpga_dev->sw_ring_size = ring_size;\n+\tfpga_dev->sw_ring_max_depth = FPGA_RING_MAX_SIZE;\n+\n+\t/* Allocate memory for ring flush status */\n+\tfpga_dev->flush_queue_status = rte_zmalloc_socket(NULL,\n+\t\t\tsizeof(uint64_t), RTE_CACHE_LINE_SIZE, socket_id);\n+\tif (fpga_dev->flush_queue_status == NULL) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Failed to allocate memory for %s:%u flush_queue_status\",\n+\t\t\t\tdev->device->driver->name, dev->data->dev_id);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Set the flush status address registers */\n+\tphys_addr = rte_malloc_virt2iova(fpga_dev->flush_queue_status);\n+\n+\taddress = FPGA_LTE_FEC_VFQ_FLUSH_STATUS_LW;\n+\tpayload = (uint32_t)(phys_addr);\n+\tfpga_reg_write_32(fpga_dev->mmio_base, address, payload);\n+\n+\taddress = FPGA_LTE_FEC_VFQ_FLUSH_STATUS_HI;\n+\tpayload = (uint32_t)(phys_addr >> 32);\n+\tfpga_reg_write_32(fpga_dev->mmio_base, address, payload);\n+\n+\treturn 0;\n+}\n+\n+static int\n+fpga_dev_close(struct rte_bbdev *dev)\n+{\n+\tstruct fpga_lte_fec_device *fpga_dev = dev->data->dev_private;\n+\n+\trte_free(fpga_dev->sw_rings);\n+\trte_free(fpga_dev->flush_queue_status);\n+\n+\treturn 0;\n+}\n+\n+static void\n+fpga_dev_info_get(struct rte_bbdev *dev,\n+\t\tstruct rte_bbdev_driver_info *dev_info)\n+{\n+\tstruct fpga_lte_fec_device *d = dev->data->dev_private;\n+\tuint32_t q_id = 0;\n+\n+\t/* TODO RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN and numbers of buffers are set\n+\t * to temporary values as they are required by test application while\n+\t * validation phase.\n+\t */\n+\tstatic const struct rte_bbdev_op_cap bbdev_capabilities[] = {\n+\t\t{\n+\t\t\t.type = RTE_BBDEV_OP_TURBO_DEC,\n+\t\t\t.cap.turbo_dec = {\n+\t\t\t\t.capability_flags =\n+\t\t\t\t\tRTE_BBDEV_TURBO_CRC_TYPE_24B |\n+\t\t\t\t\tRTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE |\n+\t\t\t\t\tRTE_BBDEV_TURBO_DEC_INTERRUPTS |\n+\t\t\t\t\tRTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN |\n+\t\t\t\t\tRTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP,\n+\t\t\t\t.max_llr_modulus = INT8_MAX,\n+\t\t\t\t.num_buffers_src =\n+\t\t\t\t\t\tRTE_BBDEV_MAX_CODE_BLOCKS,\n+\t\t\t\t.num_buffers_hard_out =\n+\t\t\t\t\tRTE_BBDEV_MAX_CODE_BLOCKS,\n+\t\t\t\t.num_buffers_soft_out = 0\n+\t\t\t}\n+\t\t},\n+\t\t{\n+\t\t\t.type = RTE_BBDEV_OP_TURBO_ENC,\n+\t\t\t.cap.turbo_enc = {\n+\t\t\t\t.capability_flags =\n+\t\t\t\t\tRTE_BBDEV_TURBO_CRC_24B_ATTACH |\n+\t\t\t\t\tRTE_BBDEV_TURBO_RATE_MATCH |\n+\t\t\t\t\tRTE_BBDEV_TURBO_ENC_INTERRUPTS,\n+\t\t\t\t.num_buffers_src =\n+\t\t\t\t\t\tRTE_BBDEV_MAX_CODE_BLOCKS,\n+\t\t\t\t.num_buffers_dst =\n+\t\t\t\t\t\tRTE_BBDEV_MAX_CODE_BLOCKS\n+\t\t\t}\n+\t\t},\n+\t\tRTE_BBDEV_END_OF_CAPABILITIES_LIST()\n+\t};\n+\n+\tstatic struct rte_bbdev_queue_conf default_queue_conf;\n+\tdefault_queue_conf.socket = dev->data->socket_id;\n+\tdefault_queue_conf.queue_size = FPGA_RING_MAX_SIZE;\n+\n+\n+\tdev_info->driver_name = dev->device->driver->name;\n+\tdev_info->queue_size_lim = FPGA_RING_MAX_SIZE;\n+\tdev_info->hardware_accelerated = true;\n+\tdev_info->min_alignment = 64;\n+\tdev_info->default_queue_conf = default_queue_conf;\n+\tdev_info->capabilities = bbdev_capabilities;\n+\tdev_info->cpu_flag_reqs = NULL;\n+\n+\t/* Calculates number of queues assigned to device */\n+\tdev_info->max_num_queues = 0;\n+\tfor (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {\n+\t\tuint32_t hw_q_id = fpga_reg_read_32(d->mmio_base,\n+\t\t\t\tFPGA_LTE_FEC_QUEUE_MAP + (q_id << 2));\n+\t\tif (hw_q_id != FPGA_INVALID_HW_QUEUE_ID)\n+\t\t\tdev_info->max_num_queues++;\n+\t}\n+}\n+\n+/**\n+ * Find index of queue bound to current PF/VF which is unassigned. Return -1\n+ * when there is no available queue\n+ */\n+static int\n+fpga_find_free_queue_idx(struct rte_bbdev *dev,\n+\t\tconst struct rte_bbdev_queue_conf *conf)\n+{\n+\tstruct fpga_lte_fec_device *d = dev->data->dev_private;\n+\tuint64_t q_idx;\n+\tuint8_t i = 0;\n+\tuint8_t range = FPGA_TOTAL_NUM_QUEUES >> 1;\n+\n+\tif (conf->op_type == RTE_BBDEV_OP_TURBO_ENC) {\n+\t\ti = FPGA_NUM_DL_QUEUES;\n+\t\trange = FPGA_TOTAL_NUM_QUEUES;\n+\t}\n+\n+\tfor (; i < range; ++i) {\n+\t\tq_idx = 1ULL << i;\n+\t\t/* Check if index of queue is bound to current PF/VF */\n+\t\tif (d->q_bound_bit_map & q_idx)\n+\t\t\t/* Check if found queue was not already assigned */\n+\t\t\tif (!(d->q_assigned_bit_map & q_idx)) {\n+\t\t\t\td->q_assigned_bit_map |= q_idx;\n+\t\t\t\treturn i;\n+\t\t\t}\n+\t}\n+\n+\trte_bbdev_log(INFO, \"Failed to find free queue on %s\", dev->data->name);\n+\n+\treturn -1;\n+}\n+\n+static int\n+fpga_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,\n+\t\tconst struct rte_bbdev_queue_conf *conf)\n+{\n+\tuint32_t address, ring_offset;\n+\tstruct fpga_lte_fec_device *d = dev->data->dev_private;\n+\tstruct fpga_queue *q;\n+\tint8_t q_idx;\n+\n+\t/* Check if there is a free queue to assign */\n+\tq_idx = fpga_find_free_queue_idx(dev, conf);\n+\tif (q_idx == -1)\n+\t\treturn -1;\n+\n+\t/* Allocate the queue data structure. */\n+\tq = rte_zmalloc_socket(dev->device->driver->name, sizeof(*q),\n+\t\t\tRTE_CACHE_LINE_SIZE, conf->socket);\n+\tif (q == NULL) {\n+\t\t/* Mark queue as un-assigned */\n+\t\td->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));\n+\t\trte_bbdev_log(ERR, \"Failed to allocate queue memory\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tq->d = d;\n+\tq->q_idx = q_idx;\n+\n+\t/* Set ring_base_addr */\n+\tq->ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id));\n+\tq->ring_ctrl_reg.ring_base_addr = d->sw_rings_phys +\n+\t\t\t(d->sw_ring_size * queue_id);\n+\n+\t/* Allocate memory for Completion Head variable*/\n+\tq->ring_head_addr = rte_zmalloc_socket(dev->device->driver->name,\n+\t\t\tsizeof(uint64_t), RTE_CACHE_LINE_SIZE, conf->socket);\n+\tif (q->ring_head_addr == NULL) {\n+\t\t/* Mark queue as un-assigned */\n+\t\td->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));\n+\t\trte_free(q);\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Failed to allocate memory for %s:%u completion_head\",\n+\t\t\t\tdev->device->driver->name, dev->data->dev_id);\n+\t\treturn -ENOMEM;\n+\t}\n+\t/* Set ring_head_addr */\n+\tq->ring_ctrl_reg.ring_head_addr =\n+\t\t\trte_malloc_virt2iova(q->ring_head_addr);\n+\n+\t/* Clear shadow_completion_head */\n+\tq->shadow_completion_head = 0;\n+\n+\t/* Set ring_size */\n+\tif (conf->queue_size > FPGA_RING_MAX_SIZE) {\n+\t\t/* Mark queue as un-assigned */\n+\t\td->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));\n+\t\trte_free(q->ring_head_addr);\n+\t\trte_free(q);\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Size of queue is too big %d (MAX: %d ) for %s:%u\",\n+\t\t\t\tconf->queue_size, FPGA_RING_MAX_SIZE,\n+\t\t\t\tdev->device->driver->name, dev->data->dev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\tq->ring_ctrl_reg.ring_size = conf->queue_size;\n+\n+\t/* Set Miscellaneous FPGA register*/\n+\t/* Max iteration number for TTI mitigation - todo */\n+\tq->ring_ctrl_reg.max_ul_dec = 0;\n+\t/* Enable max iteration number for TTI - todo */\n+\tq->ring_ctrl_reg.max_ul_dec_en = 0;\n+\n+\t/* Enable the ring */\n+\tq->ring_ctrl_reg.enable = 1;\n+\n+\t/* Set FPGA head_point and tail registers */\n+\tq->ring_ctrl_reg.head_point = q->tail = 0;\n+\n+\t/* Set FPGA shadow_tail register */\n+\tq->ring_ctrl_reg.shadow_tail = q->tail;\n+\n+\t/* Calculates the ring offset for found queue */\n+\tring_offset = FPGA_LTE_FEC_RING_CTRL_REGS +\n+\t\t\t(sizeof(struct fpga_ring_ctrl_reg) * q_idx);\n+\n+\t/* Set FPGA Ring Control Registers */\n+\tfpga_ring_reg_write(d->mmio_base, ring_offset, q->ring_ctrl_reg);\n+\n+\t/* Store MMIO register of shadow_tail */\n+\taddress = ring_offset + FPGA_LTE_FEC_RING_SHADOW_TAIL;\n+\tq->shadow_tail_addr = RTE_PTR_ADD(d->mmio_base, address);\n+\n+\tq->head_free_desc = q->tail;\n+\n+\t/* Set wrap mask */\n+\tq->sw_ring_wrap_mask = conf->queue_size - 1;\n+\n+\trte_bbdev_log_debug(\"Setup dev%u q%u: queue_idx=%u\",\n+\t\t\tdev->data->dev_id, queue_id, q->q_idx);\n+\n+\tdev->data->queues[queue_id].queue_private = q;\n+\n+\trte_bbdev_log_debug(\"BBDEV queue[%d] set up for FPGA queue[%d]\",\n+\t\t\tqueue_id, q_idx);\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\t/* Read FPGA Ring Control Registers after configuration*/\n+\tprint_ring_reg_debug_info(d->mmio_base, ring_offset);\n+#endif\n+\treturn 0;\n+}\n+\n+static int\n+fpga_queue_release(struct rte_bbdev *dev, uint16_t queue_id)\n+{\n+\tstruct fpga_lte_fec_device *d = dev->data->dev_private;\n+\tstruct fpga_queue *q = dev->data->queues[queue_id].queue_private;\n+\tstruct fpga_ring_ctrl_reg ring_reg;\n+\tuint32_t offset;\n+\n+\trte_bbdev_log_debug(\"FPGA Queue[%d] released\", queue_id);\n+\n+\tif (q != NULL) {\n+\t\tmemset(&ring_reg, 0, sizeof(struct fpga_ring_ctrl_reg));\n+\t\toffset = FPGA_LTE_FEC_RING_CTRL_REGS +\n+\t\t\t(sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);\n+\t\t/* Disable queue */\n+\t\tfpga_reg_write_8(d->mmio_base,\n+\t\t\t\toffset + FPGA_LTE_FEC_RING_ENABLE, 0x00);\n+\t\t/* Clear queue registers */\n+\t\tfpga_ring_reg_write(d->mmio_base, offset, ring_reg);\n+\n+\t\t/* Mark the Queue as un-assigned */\n+\t\td->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q->q_idx));\n+\t\trte_free(q->ring_head_addr);\n+\t\trte_free(q);\n+\t\tdev->data->queues[queue_id].queue_private = NULL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/* Function starts a device queue. */\n+static int\n+fpga_queue_start(struct rte_bbdev *dev, uint16_t queue_id)\n+{\n+\tstruct fpga_lte_fec_device *d = dev->data->dev_private;\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\tif (d == NULL) {\n+\t\trte_bbdev_log(ERR, \"Invalid device pointer\");\n+\t\treturn -1;\n+\t}\n+#endif\n+\tstruct fpga_queue *q = dev->data->queues[queue_id].queue_private;\n+\tuint32_t offset = FPGA_LTE_FEC_RING_CTRL_REGS +\n+\t\t\t(sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);\n+\tuint8_t enable = 0x01;\n+\tuint16_t zero = 0x0000;\n+\n+\t/* Clear queue head and tail variables */\n+\tq->tail = q->head_free_desc = 0;\n+\n+\t/* Clear FPGA head_point and tail registers */\n+\tfpga_reg_write_16(d->mmio_base, offset + FPGA_LTE_FEC_RING_HEAD_POINT,\n+\t\t\tzero);\n+\tfpga_reg_write_16(d->mmio_base, offset + FPGA_LTE_FEC_RING_SHADOW_TAIL,\n+\t\t\tzero);\n+\n+\t/* Enable queue */\n+\tfpga_reg_write_8(d->mmio_base, offset + FPGA_LTE_FEC_RING_ENABLE,\n+\t\t\tenable);\n+\n+\trte_bbdev_log_debug(\"FPGA Queue[%d] started\", queue_id);\n+\treturn 0;\n+}\n+\n+/* Function stops a device queue. */\n+static int\n+fpga_queue_stop(struct rte_bbdev *dev, uint16_t queue_id)\n+{\n+\tstruct fpga_lte_fec_device *d = dev->data->dev_private;\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\tif (d == NULL) {\n+\t\trte_bbdev_log(ERR, \"Invalid device pointer\");\n+\t\treturn -1;\n+\t}\n+#endif\n+\tstruct fpga_queue *q = dev->data->queues[queue_id].queue_private;\n+\tuint32_t offset = FPGA_LTE_FEC_RING_CTRL_REGS +\n+\t\t\t(sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);\n+\tuint8_t payload = 0x01;\n+\tuint8_t counter = 0;\n+\tuint8_t timeout = FPGA_QUEUE_FLUSH_TIMEOUT_US /\n+\t\t\tFPGA_TIMEOUT_CHECK_INTERVAL;\n+\n+\t/* Set flush_queue_en bit to trigger queue flushing */\n+\tfpga_reg_write_8(d->mmio_base,\n+\t\t\toffset + FPGA_LTE_FEC_RING_FLUSH_QUEUE_EN, payload);\n+\n+\t/** Check if queue flush is completed.\n+\t * FPGA will update the completion flag after queue flushing is\n+\t * completed. If completion flag is not updated within 1ms it is\n+\t * considered as a failure.\n+\t */\n+\twhile (!(*((uint8_t *)d->flush_queue_status + q->q_idx) & payload)) {\n+\t\tif (counter > timeout) {\n+\t\t\trte_bbdev_log(ERR, \"FPGA Queue Flush failed for queue %d\",\n+\t\t\t\t\tqueue_id);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tusleep(FPGA_TIMEOUT_CHECK_INTERVAL);\n+\t\tcounter++;\n+\t}\n+\n+\t/* Disable queue */\n+\tpayload = 0x00;\n+\tfpga_reg_write_8(d->mmio_base, offset + FPGA_LTE_FEC_RING_ENABLE,\n+\t\t\tpayload);\n+\n+\trte_bbdev_log_debug(\"FPGA Queue[%d] stopped\", queue_id);\n+\treturn 0;\n+}\n+\n+static inline uint16_t\n+get_queue_id(struct rte_bbdev_data *data, uint8_t q_idx)\n+{\n+\tuint16_t queue_id;\n+\n+\tfor (queue_id = 0; queue_id < data->num_queues; ++queue_id) {\n+\t\tstruct fpga_queue *q = data->queues[queue_id].queue_private;\n+\t\tif (q != NULL && q->q_idx == q_idx)\n+\t\t\treturn queue_id;\n+\t}\n+\n+\treturn -1;\n+}\n+\n+/* Interrupt handler triggered by FPGA dev for handling specific interrupt */\n+static void\n+fpga_dev_interrupt_handler(void *cb_arg)\n+{\n+\tstruct rte_bbdev *dev = cb_arg;\n+\tstruct fpga_lte_fec_device *fpga_dev = dev->data->dev_private;\n+\tstruct fpga_queue *q;\n+\tuint64_t ring_head;\n+\tuint64_t q_idx;\n+\tuint16_t queue_id;\n+\tuint8_t i;\n+\n+\t/* Scan queue assigned to this device */\n+\tfor (i = 0; i < FPGA_TOTAL_NUM_QUEUES; ++i) {\n+\t\tq_idx = 1ULL << i;\n+\t\tif (fpga_dev->q_bound_bit_map & q_idx) {\n+\t\t\tqueue_id = get_queue_id(dev->data, i);\n+\t\t\tif (queue_id == (uint16_t) -1)\n+\t\t\t\tcontinue;\n+\n+\t\t\t/* Check if completion head was changed */\n+\t\t\tq = dev->data->queues[queue_id].queue_private;\n+\t\t\tring_head = *q->ring_head_addr;\n+\t\t\tif (q->shadow_completion_head != ring_head &&\n+\t\t\t\tq->irq_enable == 1) {\n+\t\t\t\tq->shadow_completion_head = ring_head;\n+\t\t\t\trte_bbdev_pmd_callback_process(\n+\t\t\t\t\t\tdev,\n+\t\t\t\t\t\tRTE_BBDEV_EVENT_DEQUEUE,\n+\t\t\t\t\t\t&queue_id);\n+\t\t\t}\n+\t\t}\n+\t}\n+}\n+\n+static int\n+fpga_queue_intr_enable(struct rte_bbdev *dev, uint16_t queue_id)\n+{\n+\tstruct fpga_queue *q = dev->data->queues[queue_id].queue_private;\n+\n+\tif (!rte_intr_cap_multiple(dev->intr_handle))\n+\t\treturn -ENOTSUP;\n+\n+\tq->irq_enable = 1;\n+\n+\treturn 0;\n+}\n+\n+static int\n+fpga_queue_intr_disable(struct rte_bbdev *dev, uint16_t queue_id)\n+{\n+\tstruct fpga_queue *q = dev->data->queues[queue_id].queue_private;\n+\tq->irq_enable = 0;\n+\n+\treturn 0;\n+}\n+\n+static int\n+fpga_intr_enable(struct rte_bbdev *dev)\n+{\n+\tint ret;\n+\tuint8_t i;\n+\n+\tif (!rte_intr_cap_multiple(dev->intr_handle)) {\n+\t\trte_bbdev_log(ERR, \"Multiple intr vector is not supported by FPGA (%s)\",\n+\t\t\t\tdev->data->name);\n+\t\treturn -ENOTSUP;\n+\t}\n+\n+\t/* Create event file descriptors for each of 64 queue. Event fds will be\n+\t * mapped to FPGA IRQs in rte_intr_enable(). This is a 1:1 mapping where\n+\t * the IRQ number is a direct translation to the queue number.\n+\t *\n+\t * 63 (FPGA_NUM_INTR_VEC) event fds are created as rte_intr_enable()\n+\t * mapped the first IRQ to already created interrupt event file\n+\t * descriptor (intr_handle->fd).\n+\t */\n+\tif (rte_intr_efd_enable(dev->intr_handle, FPGA_NUM_INTR_VEC)) {\n+\t\trte_bbdev_log(ERR, \"Failed to create fds for %u queues\",\n+\t\t\t\tdev->data->num_queues);\n+\t\treturn -1;\n+\t}\n+\n+\t/* TODO Each event file descriptor is overwritten by interrupt event\n+\t * file descriptor. That descriptor is added to epoll observed list.\n+\t * It ensures that callback function assigned to that descriptor will\n+\t * invoked when any FPGA queue issues interrupt.\n+\t */\n+\tfor (i = 0; i < FPGA_NUM_INTR_VEC; ++i)\n+\t\tdev->intr_handle->efds[i] = dev->intr_handle->fd;\n+\n+\tif (!dev->intr_handle->intr_vec) {\n+\t\tdev->intr_handle->intr_vec = rte_zmalloc(\"intr_vec\",\n+\t\t\t\tdev->data->num_queues * sizeof(int), 0);\n+\t\tif (!dev->intr_handle->intr_vec) {\n+\t\t\trte_bbdev_log(ERR, \"Failed to allocate %u vectors\",\n+\t\t\t\t\tdev->data->num_queues);\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\t}\n+\n+\tret = rte_intr_enable(dev->intr_handle);\n+\tif (ret < 0) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Couldn't enable interrupts for device: %s\",\n+\t\t\t\tdev->data->name);\n+\t\treturn ret;\n+\t}\n+\n+\tret = rte_intr_callback_register(dev->intr_handle,\n+\t\t\tfpga_dev_interrupt_handler, dev);\n+\tif (ret < 0) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Couldn't register interrupt callback for device: %s\",\n+\t\t\t\tdev->data->name);\n+\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static const struct rte_bbdev_ops fpga_ops = {\n+\t.setup_queues = fpga_setup_queues,\n+\t.intr_enable = fpga_intr_enable,\n+\t.close = fpga_dev_close,\n+\t.info_get = fpga_dev_info_get,\n+\t.queue_setup = fpga_queue_setup,\n+\t.queue_stop = fpga_queue_stop,\n+\t.queue_start = fpga_queue_start,\n+\t.queue_release = fpga_queue_release,\n+\t.queue_intr_enable = fpga_queue_intr_enable,\n+\t.queue_intr_disable = fpga_queue_intr_disable\n+};\n+\n+static inline void\n+fpga_dma_enqueue(struct fpga_queue *q, uint16_t num_desc,\n+\t\tstruct rte_bbdev_stats *queue_stats)\n+{\n+#ifdef RTE_BBDEV_OFFLOAD_COST\n+\tuint64_t start_time = 0;\n+\tqueue_stats->acc_offload_cycles = 0;\n+#else\n+\tRTE_SET_USED(queue_stats);\n+#endif\n+\n+\t/* Update tail and shadow_tail register */\n+\tq->tail = (q->tail + num_desc) & q->sw_ring_wrap_mask;\n+\n+\trte_wmb();\n+\n+#ifdef RTE_BBDEV_OFFLOAD_COST\n+\t/* Start time measurement for enqueue function offload. */\n+\tstart_time = rte_rdtsc_precise();\n+#endif\n+\tmmio_write_16(q->shadow_tail_addr, q->tail);\n+\n+#ifdef RTE_BBDEV_OFFLOAD_COST\n+\trte_wmb();\n+\tqueue_stats->acc_offload_cycles += rte_rdtsc_precise() - start_time;\n+#endif\n+}\n+\n+/* Calculates number of CBs in processed encoder TB based on 'r' and input\n+ * length.\n+ */\n+static inline uint8_t\n+get_num_cbs_in_op_enc(struct rte_bbdev_op_turbo_enc *turbo_enc)\n+{\n+\tuint8_t c, c_neg, r, crc24_bits = 0;\n+\tuint16_t k, k_neg, k_pos;\n+\tuint8_t cbs_in_op = 0;\n+\tint32_t length;\n+\n+\tlength = turbo_enc->input.length;\n+\tr = turbo_enc->tb_params.r;\n+\tc = turbo_enc->tb_params.c;\n+\tc_neg = turbo_enc->tb_params.c_neg;\n+\tk_neg = turbo_enc->tb_params.k_neg;\n+\tk_pos = turbo_enc->tb_params.k_pos;\n+\tcrc24_bits = 24;\n+\twhile (length > 0 && r < c) {\n+\t\tk = (r < c_neg) ? k_neg : k_pos;\n+\t\tlength -= (k - crc24_bits) >> 3;\n+\t\tr++;\n+\t\tcbs_in_op++;\n+\t}\n+\n+\treturn cbs_in_op;\n+}\n+\n+/* Calculates number of CBs in processed decoder TB based on 'r' and input\n+ * length.\n+ */\n+static inline uint16_t\n+get_num_cbs_in_op_dec(struct rte_bbdev_op_turbo_dec *turbo_dec)\n+{\n+\tuint8_t c, c_neg, r = 0;\n+\tuint16_t kw, k, k_neg, k_pos, cbs_in_op = 0;\n+\tint32_t length;\n+\n+\tlength = turbo_dec->input.length;\n+\tr = turbo_dec->tb_params.r;\n+\tc = turbo_dec->tb_params.c;\n+\tc_neg = turbo_dec->tb_params.c_neg;\n+\tk_neg = turbo_dec->tb_params.k_neg;\n+\tk_pos = turbo_dec->tb_params.k_pos;\n+\twhile (length > 0 && r < c) {\n+\t\tk = (r < c_neg) ? k_neg : k_pos;\n+\t\tkw = RTE_ALIGN_CEIL(k + 4, 32) * 3;\n+\t\tlength -= kw;\n+\t\tr++;\n+\t\tcbs_in_op++;\n+\t}\n+\n+\treturn cbs_in_op;\n+}\n+\n+/* Read flag value 0/1/ from bitmap */\n+static inline bool\n+check_bit(uint32_t bitmap, uint32_t bitmask)\n+{\n+\treturn bitmap & bitmask;\n+}\n+\n+/* Print an error if a descriptor error has occurred.\n+ *  Return 0 on success, 1 on failure\n+ */\n+static inline int\n+check_desc_error(uint32_t error_code) {\n+\tswitch (error_code) {\n+\tcase DESC_ERR_NO_ERR:\n+\t\treturn 0;\n+\tcase DESC_ERR_K_OUT_OF_RANGE:\n+\t\trte_bbdev_log(ERR, \"Block_size_k is out of range (k<40 or k>6144)\");\n+\t\tbreak;\n+\tcase DESC_ERR_K_NOT_NORMAL:\n+\t\trte_bbdev_log(ERR, \"Block_size_k is not a normal value within normal range\");\n+\t\tbreak;\n+\tcase DESC_ERR_KPAI_NOT_NORMAL:\n+\t\trte_bbdev_log(ERR, \"Three_kpai is not a normal value for UL only\");\n+\t\tbreak;\n+\tcase DESC_ERR_DESC_OFFSET_ERR:\n+\t\trte_bbdev_log(ERR, \"Queue offset does not meet the expectation in the FPGA\");\n+\t\tbreak;\n+\tcase (DESC_ERR_K_OUT_OF_RANGE | DESC_ERR_DESC_OFFSET_ERR):\n+\t\trte_bbdev_log(ERR, \"Block_size_k is out of range (k<40 or k>6144) and queue offset error\");\n+\t\tbreak;\n+\tcase (DESC_ERR_K_NOT_NORMAL | DESC_ERR_DESC_OFFSET_ERR):\n+\t\trte_bbdev_log(ERR, \"Block_size_k is not a normal value within normal range and queue offset error\");\n+\t\tbreak;\n+\tcase (DESC_ERR_KPAI_NOT_NORMAL | DESC_ERR_DESC_OFFSET_ERR):\n+\t\trte_bbdev_log(ERR, \"Three_kpai is not a normal value for UL only and queue offset error\");\n+\t\tbreak;\n+\tcase DESC_ERR_DESC_READ_FAIL:\n+\t\trte_bbdev_log(ERR, \"Unsuccessful completion for descriptor read\");\n+\t\tbreak;\n+\tcase DESC_ERR_DESC_READ_TIMEOUT:\n+\t\trte_bbdev_log(ERR, \"Descriptor read time-out\");\n+\t\tbreak;\n+\tcase DESC_ERR_DESC_READ_TLP_POISONED:\n+\t\trte_bbdev_log(ERR, \"Descriptor read TLP poisoned\");\n+\t\tbreak;\n+\tcase DESC_ERR_CB_READ_FAIL:\n+\t\trte_bbdev_log(ERR, \"Unsuccessful completion for code block\");\n+\t\tbreak;\n+\tcase DESC_ERR_CB_READ_TIMEOUT:\n+\t\trte_bbdev_log(ERR, \"Code block read time-out\");\n+\t\tbreak;\n+\tcase DESC_ERR_CB_READ_TLP_POISONED:\n+\t\trte_bbdev_log(ERR, \"Code block read TLP poisoned\");\n+\t\tbreak;\n+\tdefault:\n+\t\trte_bbdev_log(ERR, \"Descriptor error unknown error code %u\",\n+\t\t\t\terror_code);\n+\t\tbreak;\n+\t}\n+\treturn 1;\n+}\n+\n+/**\n+ * Set DMA descriptor for encode operation (1 Code Block)\n+ *\n+ * @param op\n+ *   Pointer to a single encode operation.\n+ * @param desc\n+ *   Pointer to DMA descriptor.\n+ * @param input\n+ *   Pointer to pointer to input data which will be decoded.\n+ * @param k\n+ *   K value (length of input in bits).\n+ * @param e\n+ *   E value (length of output in bits).\n+ * @param ncb\n+ *   Ncb value (size of the soft buffer).\n+ * @param out_length\n+ *   Length of output buffer\n+ * @param in_offset\n+ *   Input offset in rte_mbuf structure. It is used for calculating the point\n+ *   where data is starting.\n+ * @param out_offset\n+ *   Output offset in rte_mbuf structure. It is used for calculating the point\n+ *   where hard output data will be stored.\n+ * @param cbs_in_op\n+ *   Number of CBs contained in one operation.\n+ */\n+static inline int\n+fpga_dma_desc_te_fill(struct rte_bbdev_enc_op *op,\n+\t\tstruct fpga_dma_enc_desc *desc, struct rte_mbuf *input,\n+\t\tstruct rte_mbuf *output, uint16_t k, uint16_t e, uint16_t ncb,\n+\t\tuint32_t in_offset, uint32_t out_offset, uint16_t desc_offset,\n+\t\tuint8_t cbs_in_op)\n+\n+{\n+\t/* reset */\n+\tdesc->done = 0;\n+\tdesc->crc_en = check_bit(op->turbo_enc.op_flags,\n+\t\tRTE_BBDEV_TURBO_CRC_24B_ATTACH);\n+\tdesc->bypass_rm = !check_bit(op->turbo_enc.op_flags,\n+\t\tRTE_BBDEV_TURBO_RATE_MATCH);\n+\tdesc->k = k;\n+\tdesc->e = e;\n+\tdesc->ncb = ncb;\n+\tdesc->rv = op->turbo_enc.rv_index;\n+\tdesc->offset = desc_offset;\n+\t/* Set inbound data buffer address */\n+\tdesc->in_addr_hi = (uint32_t)(\n+\t\t\trte_pktmbuf_mtophys_offset(input, in_offset) >> 32);\n+\tdesc->in_addr_lw = (uint32_t)(\n+\t\t\trte_pktmbuf_mtophys_offset(input, in_offset));\n+\n+\tdesc->out_addr_hi = (uint32_t)(\n+\t\t\trte_pktmbuf_mtophys_offset(output, out_offset) >> 32);\n+\tdesc->out_addr_lw = (uint32_t)(\n+\t\t\trte_pktmbuf_mtophys_offset(output, out_offset));\n+\n+\t/* Save software context needed for dequeue */\n+\tdesc->op_addr = op;\n+\n+\t/* Set total number of CBs in an op */\n+\tdesc->cbs_in_op = cbs_in_op;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * Set DMA descriptor for encode operation (1 Code Block)\n+ *\n+ * @param op\n+ *   Pointer to a single encode operation.\n+ * @param desc\n+ *   Pointer to DMA descriptor.\n+ * @param input\n+ *   Pointer to pointer to input data which will be decoded.\n+ * @param in_length\n+ *   Length of an input.\n+ * @param k\n+ *   K value (length of an output in bits).\n+ * @param in_offset\n+ *   Input offset in rte_mbuf structure. It is used for calculating the point\n+ *   where data is starting.\n+ * @param out_offset\n+ *   Output offset in rte_mbuf structure. It is used for calculating the point\n+ *   where hard output data will be stored.\n+ * @param cbs_in_op\n+ *   Number of CBs contained in one operation.\n+ */\n+static inline int\n+fpga_dma_desc_td_fill(struct rte_bbdev_dec_op *op,\n+\t\tstruct fpga_dma_dec_desc *desc, struct rte_mbuf *input,\n+\t\tstruct rte_mbuf *output, uint16_t in_length, uint16_t k,\n+\t\tuint32_t in_offset, uint32_t out_offset, uint16_t desc_offset,\n+\t\tuint8_t cbs_in_op)\n+{\n+\t/* reset */\n+\tdesc->done = 0;\n+\t/* Set inbound data buffer address */\n+\tdesc->in_addr_hi = (uint32_t)(\n+\t\t\trte_pktmbuf_mtophys_offset(input, in_offset) >> 32);\n+\tdesc->in_addr_lw = (uint32_t)(\n+\t\t\trte_pktmbuf_mtophys_offset(input, in_offset));\n+\tdesc->in_len = in_length;\n+\tdesc->k = k;\n+\tdesc->crc_type = !check_bit(op->turbo_dec.op_flags,\n+\t\t\tRTE_BBDEV_TURBO_CRC_TYPE_24B);\n+\tif ((op->turbo_dec.code_block_mode == 0)\n+\t\t&& !check_bit(op->turbo_dec.op_flags,\n+\t\tRTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP))\n+\t\tdesc->drop_crc = 1;\n+\tdesc->max_iter = op->turbo_dec.iter_max * 2;\n+\tdesc->offset = desc_offset;\n+\tdesc->out_addr_hi = (uint32_t)(\n+\t\t\trte_pktmbuf_mtophys_offset(output, out_offset) >> 32);\n+\tdesc->out_addr_lw = (uint32_t)(\n+\t\t\trte_pktmbuf_mtophys_offset(output, out_offset));\n+\n+\t/* Save software context needed for dequeue */\n+\tdesc->op_addr = op;\n+\n+\t/* Set total number of CBs in an op */\n+\tdesc->cbs_in_op = cbs_in_op;\n+\n+\treturn 0;\n+}\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+/* Validates turbo encoder parameters */\n+static int\n+validate_enc_op(struct rte_bbdev_enc_op *op)\n+{\n+\tstruct rte_bbdev_op_turbo_enc *turbo_enc = &op->turbo_enc;\n+\tstruct rte_bbdev_op_enc_cb_params *cb = NULL;\n+\tstruct rte_bbdev_op_enc_tb_params *tb = NULL;\n+\tuint16_t kw, kw_neg, kw_pos;\n+\n+\tif (turbo_enc->input.length >\n+\t\t\tRTE_BBDEV_MAX_TB_SIZE >> 3) {\n+\t\trte_bbdev_log(ERR, \"TB size (%u) is too big, max: %d\",\n+\t\t\t\tturbo_enc->input.length, RTE_BBDEV_MAX_TB_SIZE);\n+\t\top->status = 1 << RTE_BBDEV_DATA_ERROR;\n+\t\treturn -1;\n+\t}\n+\n+\tif (op->mempool == NULL) {\n+\t\trte_bbdev_log(ERR, \"Invalid mempool pointer\");\n+\t\treturn -1;\n+\t}\n+\tif (turbo_enc->input.data == NULL) {\n+\t\trte_bbdev_log(ERR, \"Invalid input pointer\");\n+\t\treturn -1;\n+\t}\n+\tif (turbo_enc->output.data == NULL) {\n+\t\trte_bbdev_log(ERR, \"Invalid output pointer\");\n+\t\treturn -1;\n+\t}\n+\tif (turbo_enc->rv_index > 3) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"rv_index (%u) is out of range 0 <= value <= 3\",\n+\t\t\t\tturbo_enc->rv_index);\n+\t\treturn -1;\n+\t}\n+\tif (turbo_enc->code_block_mode != 0 &&\n+\t\t\tturbo_enc->code_block_mode != 1) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"code_block_mode (%u) is out of range 0 <= value <= 1\",\n+\t\t\t\tturbo_enc->code_block_mode);\n+\t\treturn -1;\n+\t}\n+\n+\tif (turbo_enc->code_block_mode == 0) {\n+\t\ttb = &turbo_enc->tb_params;\n+\t\tif ((tb->k_neg < RTE_BBDEV_MIN_CB_SIZE\n+\t\t\t\t|| tb->k_neg > RTE_BBDEV_MAX_CB_SIZE)\n+\t\t\t\t&& tb->c_neg > 0) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"k_neg (%u) is out of range %u <= value <= %u\",\n+\t\t\t\t\ttb->k_neg, RTE_BBDEV_MIN_CB_SIZE,\n+\t\t\t\t\tRTE_BBDEV_MAX_CB_SIZE);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tif (tb->k_pos < RTE_BBDEV_MIN_CB_SIZE\n+\t\t\t\t|| tb->k_pos > RTE_BBDEV_MAX_CB_SIZE) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"k_pos (%u) is out of range %u <= value <= %u\",\n+\t\t\t\t\ttb->k_pos, RTE_BBDEV_MIN_CB_SIZE,\n+\t\t\t\t\tRTE_BBDEV_MAX_CB_SIZE);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tif (tb->c_neg > (RTE_BBDEV_MAX_CODE_BLOCKS - 1))\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"c_neg (%u) is out of range 0 <= value <= %u\",\n+\t\t\t\t\ttb->c_neg,\n+\t\t\t\t\tRTE_BBDEV_MAX_CODE_BLOCKS - 1);\n+\t\tif (tb->c < 1 || tb->c > RTE_BBDEV_MAX_CODE_BLOCKS) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"c (%u) is out of range 1 <= value <= %u\",\n+\t\t\t\t\ttb->c, RTE_BBDEV_MAX_CODE_BLOCKS);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tif (tb->cab > tb->c) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"cab (%u) is greater than c (%u)\",\n+\t\t\t\t\ttb->cab, tb->c);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tif ((tb->ea < RTE_BBDEV_MIN_CB_SIZE || (tb->ea % 2))\n+\t\t\t\t&& tb->r < tb->cab) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"ea (%u) is less than %u or it is not even\",\n+\t\t\t\t\ttb->ea, RTE_BBDEV_MIN_CB_SIZE);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tif ((tb->eb < RTE_BBDEV_MIN_CB_SIZE || (tb->eb % 2))\n+\t\t\t\t&& tb->c > tb->cab) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"eb (%u) is less than %u or it is not even\",\n+\t\t\t\t\ttb->eb, RTE_BBDEV_MIN_CB_SIZE);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tkw_neg = 3 * RTE_ALIGN_CEIL(tb->k_neg + 4,\n+\t\t\t\t\tRTE_BBDEV_C_SUBBLOCK);\n+\t\tif (tb->ncb_neg < tb->k_neg || tb->ncb_neg > kw_neg) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"ncb_neg (%u) is out of range (%u) k_neg <= value <= (%u) kw_neg\",\n+\t\t\t\t\ttb->ncb_neg, tb->k_neg, kw_neg);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tkw_pos = 3 * RTE_ALIGN_CEIL(tb->k_pos + 4,\n+\t\t\t\t\tRTE_BBDEV_C_SUBBLOCK);\n+\t\tif (tb->ncb_pos < tb->k_pos || tb->ncb_pos > kw_pos) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"ncb_pos (%u) is out of range (%u) k_pos <= value <= (%u) kw_pos\",\n+\t\t\t\t\ttb->ncb_pos, tb->k_pos, kw_pos);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tif (tb->r > (tb->c - 1)) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"r (%u) is greater than c - 1 (%u)\",\n+\t\t\t\t\ttb->r, tb->c - 1);\n+\t\t\treturn -1;\n+\t\t}\n+\t} else {\n+\t\tcb = &turbo_enc->cb_params;\n+\t\tif (cb->k < RTE_BBDEV_MIN_CB_SIZE\n+\t\t\t\t|| cb->k > RTE_BBDEV_MAX_CB_SIZE) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"k (%u) is out of range %u <= value <= %u\",\n+\t\t\t\t\tcb->k, RTE_BBDEV_MIN_CB_SIZE,\n+\t\t\t\t\tRTE_BBDEV_MAX_CB_SIZE);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tif (cb->e < RTE_BBDEV_MIN_CB_SIZE || (cb->e % 2)) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"e (%u) is less than %u or it is not even\",\n+\t\t\t\t\tcb->e, RTE_BBDEV_MIN_CB_SIZE);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tkw = RTE_ALIGN_CEIL(cb->k + 4, RTE_BBDEV_C_SUBBLOCK) * 3;\n+\t\tif (cb->ncb < cb->k || cb->ncb > kw) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"ncb (%u) is out of range (%u) k <= value <= (%u) kw\",\n+\t\t\t\t\tcb->ncb, cb->k, kw);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+#endif\n+\n+static inline char *\n+mbuf_append(struct rte_mbuf *m_head, struct rte_mbuf *m, uint16_t len)\n+{\n+\tif (unlikely(len > rte_pktmbuf_tailroom(m)))\n+\t\treturn NULL;\n+\n+\tchar *tail = (char *)m->buf_addr + m->data_off + m->data_len;\n+\tm->data_len = (uint16_t)(m->data_len + len);\n+\tm_head->pkt_len  = (m_head->pkt_len + len);\n+\treturn tail;\n+}\n+\n+static inline int\n+enqueue_enc_one_op_cb(struct fpga_queue *q, struct rte_bbdev_enc_op *op,\n+\t\tuint16_t desc_offset)\n+{\n+\tunion fpga_dma_desc *desc;\n+\tstruct rte_mbuf *input;\n+\tstruct rte_mbuf *output;\n+\tint ret;\n+\tuint16_t k, e, ncb, ring_offset;\n+\tuint32_t total_left, in_length, out_length, in_offset, out_offset;\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\t/* Validate op structure */\n+\tif (validate_enc_op(op) == -1) {\n+\t\trte_bbdev_log(ERR, \"Turbo encoder validation failed\");\n+\t\treturn -EINVAL;\n+\t}\n+#endif\n+\n+\tinput = op->turbo_enc.input.data;\n+\toutput = op->turbo_enc.output.data;\n+\tin_offset = op->turbo_enc.input.offset;\n+\tout_offset = op->turbo_enc.output.offset;\n+\ttotal_left = op->turbo_enc.input.length;\n+\tk = op->turbo_enc.cb_params.k;\n+\te = op->turbo_enc.cb_params.e;\n+\tncb = op->turbo_enc.cb_params.ncb;\n+\n+\tif (check_bit(op->turbo_enc.op_flags, RTE_BBDEV_TURBO_CRC_24B_ATTACH))\n+\t\tin_length = ((k - 24) >> 3);\n+\telse\n+\t\tin_length = k >> 3;\n+\n+\tif (check_bit(op->turbo_enc.op_flags, RTE_BBDEV_TURBO_RATE_MATCH))\n+\t\tout_length = (e + 7) >> 3;\n+\telse\n+\t\tout_length = (k >> 3) * 3 + 2;\n+\n+\tmbuf_append(output, output, out_length);\n+\n+\t/* Offset into the ring */\n+\tring_offset = ((q->tail + desc_offset) & q->sw_ring_wrap_mask);\n+\t/* Setup DMA Descriptor */\n+\tdesc = q->ring_addr + ring_offset;\n+\n+\tret = fpga_dma_desc_te_fill(op, &desc->enc_req, input, output, k, e,\n+\t\t\tncb, in_offset, out_offset, ring_offset, 1);\n+\tif (unlikely(ret < 0))\n+\t\treturn ret;\n+\n+\t/* Update lengths */\n+\ttotal_left -= in_length;\n+\top->turbo_enc.output.length += out_length;\n+\n+\tif (total_left > 0) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\"Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u\",\n+\t\t\t\ttotal_left, in_length);\n+\t\treturn -1;\n+\t}\n+\n+\treturn 1;\n+}\n+\n+static inline int\n+enqueue_enc_one_op_tb(struct fpga_queue *q, struct rte_bbdev_enc_op *op,\n+\t\tuint16_t desc_offset, uint8_t cbs_in_op)\n+{\n+\tunion fpga_dma_desc *desc;\n+\tstruct rte_mbuf *input, *output_head, *output;\n+\tint ret;\n+\tuint8_t r, c, crc24_bits = 0;\n+\tuint16_t k, e, ncb, ring_offset;\n+\tuint32_t mbuf_total_left, in_length, out_length, in_offset, out_offset;\n+\tuint32_t seg_total_left;\n+\tuint16_t current_enqueued_cbs = 0;\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\t/* Validate op structure */\n+\tif (validate_enc_op(op) == -1) {\n+\t\trte_bbdev_log(ERR, \"Turbo encoder validation failed\");\n+\t\treturn -EINVAL;\n+\t}\n+#endif\n+\n+\tinput = op->turbo_enc.input.data;\n+\toutput_head = output = op->turbo_enc.output.data;\n+\tin_offset = op->turbo_enc.input.offset;\n+\tout_offset = op->turbo_enc.output.offset;\n+\tmbuf_total_left = op->turbo_enc.input.length;\n+\n+\tc = op->turbo_enc.tb_params.c;\n+\tr = op->turbo_enc.tb_params.r;\n+\n+\tif (check_bit(op->turbo_enc.op_flags, RTE_BBDEV_TURBO_CRC_24B_ATTACH))\n+\t\tcrc24_bits = 24;\n+\n+\twhile (mbuf_total_left > 0 && r < c && input != NULL) {\n+\t\tseg_total_left = rte_pktmbuf_data_len(input) - in_offset;\n+\n+\t\te = (r < op->turbo_enc.tb_params.cab) ?\n+\t\t\t\top->turbo_enc.tb_params.ea :\n+\t\t\t\top->turbo_enc.tb_params.eb;\n+\t\tk = (r < op->turbo_enc.tb_params.c_neg) ?\n+\t\t\t\top->turbo_enc.tb_params.k_neg :\n+\t\t\t\top->turbo_enc.tb_params.k_pos;\n+\t\tncb = (r < op->turbo_enc.tb_params.c_neg) ?\n+\t\t\t\top->turbo_enc.tb_params.ncb_neg :\n+\t\t\t\top->turbo_enc.tb_params.ncb_pos;\n+\n+\t\tin_length = ((k - crc24_bits) >> 3);\n+\n+\t\tif (check_bit(op->turbo_enc.op_flags,\n+\t\t\tRTE_BBDEV_TURBO_RATE_MATCH))\n+\t\t\tout_length = (e + 7) >> 3;\n+\t\telse\n+\t\t\tout_length = (k >> 3) * 3 + 2;\n+\n+\t\tmbuf_append(output_head, output, out_length);\n+\n+\t\t/* Setup DMA Descriptor */\n+\t\tring_offset = ((q->tail + desc_offset) & q->sw_ring_wrap_mask);\n+\t\tdesc = q->ring_addr + ring_offset;\n+\t\tret = fpga_dma_desc_te_fill(op, &desc->enc_req, input, output,\n+\t\t\t\tk, e, ncb, in_offset, out_offset, ring_offset,\n+\t\t\t\tcbs_in_op);\n+\t\tif (unlikely(ret < 0))\n+\t\t\treturn ret;\n+\n+\t\trte_bbdev_log_debug(\"DMA request desc %p\", desc);\n+\n+\t\t/* Update lengths */\n+\t\top->turbo_enc.output.length += out_length;\n+\t\tmbuf_total_left -= in_length;\n+\n+\t\t/* Update offsets */\n+\t\tif (seg_total_left == in_length) {\n+\t\t\t/* Go to the next mbuf */\n+\t\t\tinput = input->next;\n+\t\t\toutput = output->next;\n+\t\t\tin_offset = 0;\n+\t\t\tout_offset = 0;\n+\t\t} else {\n+\t\t\tin_offset += in_length;\n+\t\t\tout_offset += out_length;\n+\t\t}\n+\n+\t\tr++;\n+\t\tdesc_offset++;\n+\t\tcurrent_enqueued_cbs++;\n+\t}\n+\n+\tif (mbuf_total_left > 0) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Some date still left for processing: mbuf_total_left = %u\",\n+\t\t\t\tmbuf_total_left);\n+\t\treturn -1;\n+\t}\n+\n+\treturn current_enqueued_cbs;\n+}\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+/* Validates turbo decoder parameters */\n+static int\n+validate_dec_op(struct rte_bbdev_dec_op *op)\n+{\n+\tstruct rte_bbdev_op_turbo_dec *turbo_dec = &op->turbo_dec;\n+\tstruct rte_bbdev_op_dec_cb_params *cb = NULL;\n+\tstruct rte_bbdev_op_dec_tb_params *tb = NULL;\n+\n+\tif (op->mempool == NULL) {\n+\t\trte_bbdev_log(ERR, \"Invalid mempool pointer\");\n+\t\treturn -1;\n+\t}\n+\tif (turbo_dec->input.data == NULL) {\n+\t\trte_bbdev_log(ERR, \"Invalid input pointer\");\n+\t\treturn -1;\n+\t}\n+\tif (turbo_dec->hard_output.data == NULL) {\n+\t\trte_bbdev_log(ERR, \"Invalid hard_output pointer\");\n+\t\treturn -1;\n+\t}\n+\tif (turbo_dec->rv_index > 3) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"rv_index (%u) is out of range 0 <= value <= 3\",\n+\t\t\t\tturbo_dec->rv_index);\n+\t\treturn -1;\n+\t}\n+\tif (turbo_dec->iter_min < 1) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"iter_min (%u) is less than 1\",\n+\t\t\t\tturbo_dec->iter_min);\n+\t\treturn -1;\n+\t}\n+\tif (turbo_dec->iter_max <= 2) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"iter_max (%u) is less than or equal to 2\",\n+\t\t\t\tturbo_dec->iter_max);\n+\t\treturn -1;\n+\t}\n+\tif (turbo_dec->iter_min > turbo_dec->iter_max) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"iter_min (%u) is greater than iter_max (%u)\",\n+\t\t\t\tturbo_dec->iter_min, turbo_dec->iter_max);\n+\t\treturn -1;\n+\t}\n+\tif (turbo_dec->code_block_mode != 0 &&\n+\t\t\tturbo_dec->code_block_mode != 1) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"code_block_mode (%u) is out of range 0 <= value <= 1\",\n+\t\t\t\tturbo_dec->code_block_mode);\n+\t\treturn -1;\n+\t}\n+\n+\tif (turbo_dec->code_block_mode == 0) {\n+\n+\t\tif ((turbo_dec->op_flags &\n+\t\t\tRTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP) &&\n+\t\t\t!(turbo_dec->op_flags & RTE_BBDEV_TURBO_CRC_TYPE_24B)) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP should accompany RTE_BBDEV_TURBO_CRC_TYPE_24B\");\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\ttb = &turbo_dec->tb_params;\n+\t\tif ((tb->k_neg < RTE_BBDEV_MIN_CB_SIZE\n+\t\t\t\t|| tb->k_neg > RTE_BBDEV_MAX_CB_SIZE)\n+\t\t\t\t&& tb->c_neg > 0) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"k_neg (%u) is out of range %u <= value <= %u\",\n+\t\t\t\t\ttb->k_neg, RTE_BBDEV_MIN_CB_SIZE,\n+\t\t\t\t\tRTE_BBDEV_MAX_CB_SIZE);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tif ((tb->k_pos < RTE_BBDEV_MIN_CB_SIZE\n+\t\t\t\t|| tb->k_pos > RTE_BBDEV_MAX_CB_SIZE)\n+\t\t\t\t&& tb->c > tb->c_neg) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"k_pos (%u) is out of range %u <= value <= %u\",\n+\t\t\t\t\ttb->k_pos, RTE_BBDEV_MIN_CB_SIZE,\n+\t\t\t\t\tRTE_BBDEV_MAX_CB_SIZE);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tif (tb->c_neg > (RTE_BBDEV_MAX_CODE_BLOCKS - 1))\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"c_neg (%u) is out of range 0 <= value <= %u\",\n+\t\t\t\t\ttb->c_neg,\n+\t\t\t\t\tRTE_BBDEV_MAX_CODE_BLOCKS - 1);\n+\t\tif (tb->c < 1 || tb->c > RTE_BBDEV_MAX_CODE_BLOCKS) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"c (%u) is out of range 1 <= value <= %u\",\n+\t\t\t\t\ttb->c, RTE_BBDEV_MAX_CODE_BLOCKS);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tif (tb->cab > tb->c) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"cab (%u) is greater than c (%u)\",\n+\t\t\t\t\ttb->cab, tb->c);\n+\t\t\treturn -1;\n+\t\t}\n+\t} else {\n+\n+\t\tif (turbo_dec->op_flags & RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP is invalid in CB-mode\");\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tcb = &turbo_dec->cb_params;\n+\t\tif (cb->k < RTE_BBDEV_MIN_CB_SIZE\n+\t\t\t\t|| cb->k > RTE_BBDEV_MAX_CB_SIZE) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"k (%u) is out of range %u <= value <= %u\",\n+\t\t\t\t\tcb->k, RTE_BBDEV_MIN_CB_SIZE,\n+\t\t\t\t\tRTE_BBDEV_MAX_CB_SIZE);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+#endif\n+\n+static inline int\n+enqueue_dec_one_op_cb(struct fpga_queue *q, struct rte_bbdev_dec_op *op,\n+\t\tuint16_t desc_offset)\n+{\n+\tunion fpga_dma_desc *desc;\n+\tstruct rte_mbuf *input;\n+\tstruct rte_mbuf *output;\n+\tint ret;\n+\tuint16_t k, kw, ring_offset;\n+\tuint32_t total_left, in_length, out_length, in_offset, out_offset;\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\t/* Validate op structure */\n+\tif (validate_dec_op(op) == -1) {\n+\t\trte_bbdev_log(ERR, \"Turbo decoder validation failed\");\n+\t\treturn -EINVAL;\n+\t}\n+#endif\n+\n+\tinput = op->turbo_dec.input.data;\n+\toutput = op->turbo_dec.hard_output.data;\n+\ttotal_left = op->turbo_dec.input.length;\n+\tin_offset = op->turbo_dec.input.offset;\n+\tout_offset = op->turbo_dec.hard_output.offset;\n+\n+\tk = op->turbo_dec.cb_params.k;\n+\tkw = RTE_ALIGN_CEIL(k + 4, 32) * 3;\n+\tin_length = kw;\n+\tout_length = k >> 3;\n+\n+\tmbuf_append(output, output, out_length);\n+\n+\t/* Setup DMA Descriptor */\n+\tring_offset = ((q->tail + desc_offset) & q->sw_ring_wrap_mask);\n+\tdesc = q->ring_addr + ring_offset;\n+\tret = fpga_dma_desc_td_fill(op, &desc->dec_req, input, output,\n+\t\t\tin_length, k, in_offset, out_offset, ring_offset, 1);\n+\tif (unlikely(ret < 0))\n+\t\treturn ret;\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\tprint_dma_dec_desc_debug_info(desc);\n+#endif\n+\n+\t/* Update lengths */\n+\ttotal_left -= in_length;\n+\top->turbo_dec.hard_output.length += out_length;\n+\n+\tif (total_left > 0) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u\",\n+\t\t\t\ttotal_left, in_length);\n+\t\treturn -1;\n+\t}\n+\n+\treturn 1;\n+}\n+\n+\n+static inline int\n+enqueue_dec_one_op_tb(struct fpga_queue *q, struct rte_bbdev_dec_op *op,\n+\t\tuint16_t desc_offset, uint8_t cbs_in_op)\n+{\n+\tunion fpga_dma_desc *desc;\n+\tstruct rte_mbuf *input, *output_head, *output;\n+\tint ret;\n+\tuint8_t r, c;\n+\tuint16_t k, kw, in_length, out_length, ring_offset;\n+\tuint32_t mbuf_total_left, seg_total_left, in_offset, out_offset;\n+\tuint16_t current_enqueued_cbs = 0;\n+\tuint16_t crc24_overlap = 0;\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\t/* Validate op structure */\n+\tif (validate_dec_op(op) == -1) {\n+\t\trte_bbdev_log(ERR, \"Turbo decoder validation failed\");\n+\t\treturn -EINVAL;\n+\t}\n+#endif\n+\n+\tinput = op->turbo_dec.input.data;\n+\toutput_head = output = op->turbo_dec.hard_output.data;\n+\tmbuf_total_left = op->turbo_dec.input.length;\n+\tin_offset = op->turbo_dec.input.offset;\n+\tout_offset = op->turbo_dec.hard_output.offset;\n+\n+\tif (!check_bit(op->turbo_dec.op_flags,\n+\t\tRTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP))\n+\t\tcrc24_overlap = 24;\n+\n+\tc = op->turbo_dec.tb_params.c;\n+\tr = op->turbo_dec.tb_params.r;\n+\n+\twhile (mbuf_total_left > 0 && r < c && input != NULL) {\n+\t\tseg_total_left = rte_pktmbuf_data_len(input) - in_offset;\n+\t\tk = (r < op->turbo_dec.tb_params.c_neg) ?\n+\t\t\t\top->turbo_dec.tb_params.k_neg :\n+\t\t\t\top->turbo_dec.tb_params.k_pos;\n+\t\tkw = RTE_ALIGN_CEIL(k + 4, 32) * 3;\n+\n+\t\tin_length = kw;\n+\t\tout_length = (k - crc24_overlap) >> 3;\n+\n+\t\tmbuf_append(output_head, output, out_length);\n+\n+\t\tif (seg_total_left < in_length) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"Partial CB found in a TB. FPGA Driver doesn't support scatter-gather operations!\");\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\t/* Setup DMA Descriptor */\n+\t\tring_offset = ((q->tail + desc_offset) & q->sw_ring_wrap_mask);\n+\t\tdesc = q->ring_addr + ring_offset;\n+\t\tret = fpga_dma_desc_td_fill(op, &desc->dec_req, input, output,\n+\t\t\t\tin_length, k, in_offset, out_offset,\n+\t\t\t\tring_offset, cbs_in_op);\n+\t\tif (unlikely(ret < 0))\n+\t\t\treturn ret;\n+\n+\t\t/* Update lengths */\n+\t\tret = rte_pktmbuf_trim(op->turbo_dec.hard_output.data,\n+\t\t\t\t(crc24_overlap >> 3));\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\t\tif (ret < 0) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"The length to remove is greater than the length of the last segment\");\n+\t\t\treturn -EINVAL;\n+\t\t}\n+#endif\n+\t\top->turbo_dec.hard_output.length += out_length;\n+\t\tmbuf_total_left -= in_length;\n+\n+\t\t/* Update offsets */\n+\t\tif (seg_total_left == in_length) {\n+\t\t\t/* Go to the next mbuf */\n+\t\t\tinput = input->next;\n+\t\t\toutput = output->next;\n+\t\t\tin_offset = 0;\n+\t\t\tout_offset = 0;\n+\t\t} else {\n+\t\t\tin_offset += in_length;\n+\t\t\tout_offset += out_length;\n+\t\t}\n+\n+\t\tr++;\n+\t\tdesc_offset++;\n+\t\tcurrent_enqueued_cbs++;\n+\t}\n+\n+\tif (mbuf_total_left > 0) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Some date still left for processing: mbuf_total_left = %u\",\n+\t\t\t\tmbuf_total_left);\n+\t\treturn -1;\n+\t}\n+\n+\treturn current_enqueued_cbs;\n+}\n+\n+static uint16_t\n+fpga_enqueue_enc(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_enc_op **ops, uint16_t num)\n+{\n+\tuint8_t cbs_in_op;\n+\tuint16_t i, total_enqueued_cbs = 0;\n+\tint32_t avail;\n+\tint enqueued_cbs;\n+\tstruct fpga_queue *q = q_data->queue_private;\n+\tunion fpga_dma_desc *desc;\n+\n+\t/* Check if queue is not full */\n+\tif (unlikely(((q->tail + 1) & q->sw_ring_wrap_mask) ==\n+\t\t\tq->head_free_desc))\n+\t\treturn 0;\n+\n+\t/* Calculates available space */\n+\tavail = (q->head_free_desc > q->tail) ?\n+\t\tq->head_free_desc - q->tail - 1 :\n+\t\tq->ring_ctrl_reg.ring_size + q->head_free_desc - q->tail - 1;\n+\n+\tfor (i = 0; i < num; ++i) {\n+\t\tif (ops[i]->turbo_enc.code_block_mode == 0) {\n+\t\t\tcbs_in_op = get_num_cbs_in_op_enc(&ops[i]->turbo_enc);\n+\t\t\t/* Check if there is available space for further\n+\t\t\t * processing\n+\t\t\t */\n+\t\t\tif (unlikely(avail - cbs_in_op < 0))\n+\t\t\t\tbreak;\n+\t\t\tavail -= cbs_in_op;\n+\t\t\tenqueued_cbs = enqueue_enc_one_op_tb(q, ops[i],\n+\t\t\t\t\ttotal_enqueued_cbs, cbs_in_op);\n+\t\t} else {\n+\t\t\t/* Check if there is available space for further\n+\t\t\t * processing\n+\t\t\t */\n+\t\t\tif (unlikely(avail - 1 < 0))\n+\t\t\t\tbreak;\n+\t\t\tavail -= 1;\n+\t\t\tenqueued_cbs = enqueue_enc_one_op_cb(q, ops[i],\n+\t\t\t\t\ttotal_enqueued_cbs);\n+\t\t}\n+\n+\t\tif (enqueued_cbs < 0)\n+\t\t\tbreak;\n+\n+\t\ttotal_enqueued_cbs += enqueued_cbs;\n+\n+\t\trte_bbdev_log_debug(\"enqueuing enc ops [%d/%d] | head %d | tail %d\",\n+\t\t\t\ttotal_enqueued_cbs, num,\n+\t\t\t\tq->head_free_desc, q->tail);\n+\t}\n+\n+\t/* Set interrupt bit for last CB in enqueued ops. FPGA issues interrupt\n+\t * only when all previous CBs were already processed.\n+\t */\n+\tdesc = q->ring_addr + ((q->tail + total_enqueued_cbs - 1)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\tdesc->enc_req.irq_en = q->irq_enable;\n+\n+\tfpga_dma_enqueue(q, total_enqueued_cbs, &q_data->queue_stats);\n+\n+\t/* Update stats */\n+\tq_data->queue_stats.enqueued_count += i;\n+\tq_data->queue_stats.enqueue_err_count += num - i;\n+\n+\treturn i;\n+}\n+\n+static uint16_t\n+fpga_enqueue_dec(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_dec_op **ops, uint16_t num)\n+{\n+\tuint8_t cbs_in_op;\n+\tuint16_t i, total_enqueued_cbs = 0;\n+\tint32_t avail;\n+\tint enqueued_cbs;\n+\tstruct fpga_queue *q = q_data->queue_private;\n+\tunion fpga_dma_desc *desc;\n+\n+\t/* Check if queue is not full */\n+\tif (unlikely(((q->tail + 1) & q->sw_ring_wrap_mask) ==\n+\t\t\tq->head_free_desc))\n+\t\treturn 0;\n+\n+\t/* Calculates available space */\n+\tavail = (q->head_free_desc > q->tail) ?\n+\t\tq->head_free_desc - q->tail - 1 :\n+\t\tq->ring_ctrl_reg.ring_size + q->head_free_desc - q->tail - 1;\n+\n+\tfor (i = 0; i < num; ++i) {\n+\t\tif (ops[i]->turbo_dec.code_block_mode == 0) {\n+\t\t\tcbs_in_op = get_num_cbs_in_op_dec(&ops[i]->turbo_dec);\n+\t\t\t/* Check if there is available space for further\n+\t\t\t * processing\n+\t\t\t */\n+\t\t\tif (unlikely(avail - cbs_in_op < 0))\n+\t\t\t\tbreak;\n+\t\t\tavail -= cbs_in_op;\n+\t\t\tenqueued_cbs = enqueue_dec_one_op_tb(q, ops[i],\n+\t\t\t\t\ttotal_enqueued_cbs, cbs_in_op);\n+\t\t} else {\n+\t\t\t/* Check if there is available space for further\n+\t\t\t * processing\n+\t\t\t */\n+\t\t\tif (unlikely(avail - 1 < 0))\n+\t\t\t\tbreak;\n+\t\t\tavail -= 1;\n+\t\t\tenqueued_cbs = enqueue_dec_one_op_cb(q, ops[i],\n+\t\t\t\t\ttotal_enqueued_cbs);\n+\t\t}\n+\n+\t\tif (enqueued_cbs < 0)\n+\t\t\tbreak;\n+\n+\t\ttotal_enqueued_cbs += enqueued_cbs;\n+\n+\t\trte_bbdev_log_debug(\"enqueuing dec ops [%d/%d] | head %d | tail %d\",\n+\t\t\t\ttotal_enqueued_cbs, num,\n+\t\t\t\tq->head_free_desc, q->tail);\n+\t}\n+\n+\t/* Set interrupt bit for last CB in enqueued ops. FPGA issues interrupt\n+\t * only when all previous CBs were already processed.\n+\t */\n+\tdesc = q->ring_addr + ((q->tail + total_enqueued_cbs - 1)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\tdesc->dec_req.irq_en = q->irq_enable;\n+\n+\tfpga_dma_enqueue(q, total_enqueued_cbs, &q_data->queue_stats);\n+\n+\t/* Update stats */\n+\tq_data->queue_stats.enqueued_count += i;\n+\tq_data->queue_stats.enqueue_err_count += num - i;\n+\n+\treturn i;\n+}\n+\n+static inline int\n+dequeue_enc_one_op_cb(struct fpga_queue *q, struct rte_bbdev_enc_op **op,\n+\t\tuint16_t desc_offset)\n+{\n+\tunion fpga_dma_desc *desc;\n+\tint desc_error = 0;\n+\n+\t/* Set current desc */\n+\tdesc = q->ring_addr + ((q->head_free_desc + desc_offset)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\n+\t/*check if done */\n+\tif (desc->enc_req.done == 0)\n+\t\treturn -1;\n+\n+\t/* make sure the response is read atomically */\n+\trte_smp_rmb();\n+\n+\trte_bbdev_log_debug(\"DMA response desc %p\", desc);\n+\n+\t*op = desc->enc_req.op_addr;\n+\t/* Check the decriptor error field, return 1 on error */\n+\tdesc_error = check_desc_error(desc->enc_req.error);\n+\t(*op)->status = desc_error << RTE_BBDEV_DATA_ERROR;\n+\n+\treturn 1;\n+}\n+\n+static inline int\n+dequeue_enc_one_op_tb(struct fpga_queue *q, struct rte_bbdev_enc_op **op,\n+\t\tuint16_t desc_offset)\n+{\n+\tunion fpga_dma_desc *desc;\n+\tuint8_t cbs_in_op, cb_idx;\n+\tint desc_error = 0;\n+\tint status = 0;\n+\n+\t/* Set descriptor */\n+\tdesc = q->ring_addr + ((q->head_free_desc + desc_offset)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\n+\t/* Verify if done bit is set */\n+\tif (desc->enc_req.done == 0)\n+\t\treturn -1;\n+\n+\t/* Make sure the response is read atomically */\n+\trte_smp_rmb();\n+\n+\t/* Verify if done bit in all CBs is set */\n+\tcbs_in_op = desc->enc_req.cbs_in_op;\n+\tfor (cb_idx = 1; cb_idx < cbs_in_op; ++cb_idx) {\n+\t\tdesc = q->ring_addr + ((q->head_free_desc + desc_offset +\n+\t\t\t\tcb_idx) & q->sw_ring_wrap_mask);\n+\t\tif (desc->enc_req.done == 0)\n+\t\t\treturn -1;\n+\t}\n+\n+\t/* Make sure the response is read atomically */\n+\trte_smp_rmb();\n+\n+\tfor (cb_idx = 0; cb_idx < cbs_in_op; ++cb_idx) {\n+\t\tdesc = q->ring_addr + ((q->head_free_desc + desc_offset +\n+\t\t\t\tcb_idx) & q->sw_ring_wrap_mask);\n+\t\t/* Check the decriptor error field, return 1 on error */\n+\t\tdesc_error = check_desc_error(desc->enc_req.error);\n+\t\tstatus |=  desc_error << RTE_BBDEV_DATA_ERROR;\n+\t\trte_bbdev_log_debug(\"DMA response desc %p\", desc);\n+\t}\n+\n+\t*op = desc->enc_req.op_addr;\n+\t(*op)->status = status;\n+\treturn cbs_in_op;\n+}\n+\n+static inline int\n+dequeue_dec_one_op_cb(struct fpga_queue *q, struct rte_bbdev_dec_op **op,\n+\t\tuint16_t desc_offset)\n+{\n+\tunion fpga_dma_desc *desc;\n+\tint desc_error = 0;\n+\t/* Set descriptor */\n+\tdesc = q->ring_addr + ((q->head_free_desc + desc_offset)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\n+\t/* Verify done bit is set */\n+\tif (desc->dec_req.done == 0)\n+\t\treturn -1;\n+\n+\t/* make sure the response is read atomically */\n+\trte_smp_rmb();\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\tprint_dma_dec_desc_debug_info(desc);\n+\n+#endif\n+\n+\t*op = desc->dec_req.op_addr;\n+\t/* FPGA reports in half-iterations, from 0 to 31. get ceiling */\n+\t(*op)->turbo_dec.iter_count = (desc->dec_req.iter + 2) >> 1;\n+\t/* crc_pass = 0 when decoder fails */\n+\t(*op)->status = !(desc->dec_req.crc_pass) << RTE_BBDEV_CRC_ERROR;\n+\t/* Check the decriptor error field, return 1 on error */\n+\tdesc_error = check_desc_error(desc->enc_req.error);\n+\t(*op)->status |= desc_error << RTE_BBDEV_DATA_ERROR;\n+\treturn 1;\n+}\n+\n+static inline int\n+dequeue_dec_one_op_tb(struct fpga_queue *q, struct rte_bbdev_dec_op **op,\n+\t\tuint16_t desc_offset)\n+{\n+\tunion fpga_dma_desc *desc;\n+\tuint8_t cbs_in_op, cb_idx, iter_count = 0;\n+\tint status = 0;\n+\tint  desc_error = 0;\n+\t/* Set descriptor */\n+\tdesc = q->ring_addr + ((q->head_free_desc + desc_offset)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\n+\t/* Verify if done bit is set */\n+\tif (desc->dec_req.done == 0)\n+\t\treturn -1;\n+\n+\t/* Make sure the response is read atomically */\n+\trte_smp_rmb();\n+\n+\t/* Verify if done bit in all CBs is set */\n+\tcbs_in_op = desc->dec_req.cbs_in_op;\n+\tfor (cb_idx = 1; cb_idx < cbs_in_op; ++cb_idx) {\n+\t\tdesc = q->ring_addr + ((q->head_free_desc + desc_offset +\n+\t\t\t\tcb_idx) & q->sw_ring_wrap_mask);\n+\t\tif (desc->dec_req.done == 0)\n+\t\t\treturn -1;\n+\t}\n+\n+\t/* Make sure the response is read atomically */\n+\trte_smp_rmb();\n+\n+\tfor (cb_idx = 0; cb_idx < cbs_in_op; ++cb_idx) {\n+\t\tdesc = q->ring_addr + ((q->head_free_desc + desc_offset +\n+\t\t\t\tcb_idx) & q->sw_ring_wrap_mask);\n+\t\t/* get max iter_count for all CBs in op */\n+\t\titer_count = RTE_MAX(iter_count, (uint8_t) desc->dec_req.iter);\n+\t\t/* crc_pass = 0 when decoder fails, one fails all */\n+\t\tstatus |= !(desc->dec_req.crc_pass) << RTE_BBDEV_CRC_ERROR;\n+\t\t/* Check the decriptor error field, return 1 on error */\n+\t\tdesc_error = check_desc_error(desc->enc_req.error);\n+\t\tstatus |= desc_error << RTE_BBDEV_DATA_ERROR;\n+\t\trte_bbdev_log_debug(\"DMA response desc %p\", desc);\n+\t}\n+\n+\t*op = desc->dec_req.op_addr;\n+\n+\t/* FPGA reports in half-iterations, get ceiling */\n+\t(*op)->turbo_dec.iter_count = (iter_count + 2) >> 1;\n+\t(*op)->status = status;\n+\treturn cbs_in_op;\n+}\n+\n+static uint16_t\n+fpga_dequeue_enc(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_enc_op **ops, uint16_t num)\n+{\n+\tstruct fpga_queue *q = q_data->queue_private;\n+\tuint32_t avail = (q->tail - q->head_free_desc) & q->sw_ring_wrap_mask;\n+\tuint16_t i;\n+\tuint16_t dequeued_cbs = 0;\n+\tstruct rte_bbdev_enc_op *op;\n+\tint ret;\n+\n+\tfor (i = 0; (i < num) && (dequeued_cbs < avail); ++i) {\n+\t\top = (q->ring_addr + ((q->head_free_desc + dequeued_cbs)\n+\t\t\t& q->sw_ring_wrap_mask))->enc_req.op_addr;\n+\t\tif (op->turbo_enc.code_block_mode == 0)\n+\t\t\tret = dequeue_enc_one_op_tb(q, &ops[i], dequeued_cbs);\n+\t\telse\n+\t\t\tret = dequeue_enc_one_op_cb(q, &ops[i], dequeued_cbs);\n+\n+\t\tif (ret < 0)\n+\t\t\tbreak;\n+\n+\t\tdequeued_cbs += ret;\n+\n+\t\trte_bbdev_log_debug(\"dequeuing enc ops [%d/%d] | head %d | tail %d\",\n+\t\t\t\tdequeued_cbs, num, q->head_free_desc, q->tail);\n+\t}\n+\n+\t/* Update head */\n+\tq->head_free_desc = (q->head_free_desc + dequeued_cbs) &\n+\t\t\tq->sw_ring_wrap_mask;\n+\n+\t/* Update stats */\n+\tq_data->queue_stats.dequeued_count += i;\n+\n+\treturn i;\n+}\n+\n+static uint16_t\n+fpga_dequeue_dec(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_dec_op **ops, uint16_t num)\n+{\n+\tstruct fpga_queue *q = q_data->queue_private;\n+\tuint32_t avail = (q->tail - q->head_free_desc) & q->sw_ring_wrap_mask;\n+\tuint16_t i;\n+\tuint16_t dequeued_cbs = 0;\n+\tstruct rte_bbdev_dec_op *op;\n+\tint ret;\n+\n+\tfor (i = 0; (i < num) && (dequeued_cbs < avail); ++i) {\n+\t\top = (q->ring_addr + ((q->head_free_desc + dequeued_cbs)\n+\t\t\t& q->sw_ring_wrap_mask))->dec_req.op_addr;\n+\t\tif (op->turbo_dec.code_block_mode == 0)\n+\t\t\tret = dequeue_dec_one_op_tb(q, &ops[i], dequeued_cbs);\n+\t\telse\n+\t\t\tret = dequeue_dec_one_op_cb(q, &ops[i], dequeued_cbs);\n+\n+\t\tif (ret < 0)\n+\t\t\tbreak;\n+\n+\t\tdequeued_cbs += ret;\n+\n+\t\trte_bbdev_log_debug(\"dequeuing dec ops [%d/%d] | head %d | tail %d\",\n+\t\t\t\tdequeued_cbs, num, q->head_free_desc, q->tail);\n+\t}\n+\n+\t/* Update head */\n+\tq->head_free_desc = (q->head_free_desc + dequeued_cbs) &\n+\t\t\tq->sw_ring_wrap_mask;\n+\n+\t/* Update stats */\n+\tq_data->queue_stats.dequeued_count += i;\n+\n+\treturn i;\n+}\n+\n+/* Initialization Function */\n+static void\n+fpga_lte_fec_init(struct rte_bbdev *dev)\n+{\n+\tstruct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);\n+\n+\tdev->dev_ops = &fpga_ops;\n+\tdev->enqueue_enc_ops = fpga_enqueue_enc;\n+\tdev->enqueue_dec_ops = fpga_enqueue_dec;\n+\tdev->dequeue_enc_ops = fpga_dequeue_enc;\n+\tdev->dequeue_dec_ops = fpga_dequeue_dec;\n+\n+\t((struct fpga_lte_fec_device *) dev->data->dev_private)->pf_device =\n+\t\t\t!strcmp(dev->device->driver->name,\n+\t\t\t\t\tRTE_STR(FPGA_LTE_FEC_PF_DRIVER_NAME));\n+\t((struct fpga_lte_fec_device *) dev->data->dev_private)->mmio_base =\n+\t\t\tpci_dev->mem_resource[0].addr;\n+\n+\trte_bbdev_log_debug(\n+\t\t\t\"Init device %s [%s] @ virtaddr %p phyaddr %#\"PRIx64,\n+\t\t\tdev->device->driver->name, dev->data->name,\n+\t\t\t(void *)pci_dev->mem_resource[0].addr,\n+\t\t\tpci_dev->mem_resource[0].phys_addr);\n+}\n+\n+static int\n+fpga_lte_fec_probe(struct rte_pci_driver *pci_drv __rte_unused,\n+\tstruct rte_pci_device *pci_dev)\n+{\n+\tstruct rte_bbdev *bbdev = NULL;\n+\tchar dev_name[RTE_BBDEV_NAME_MAX_LEN];\n+\n+\tif (pci_dev == NULL) {\n+\t\trte_bbdev_log(ERR, \"NULL PCI device\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\trte_pci_device_name(&pci_dev->addr, dev_name, sizeof(dev_name));\n+\n+\t/* Allocate memory to be used privately by drivers */\n+\tbbdev = rte_bbdev_allocate(pci_dev->device.name);\n+\tif (bbdev == NULL)\n+\t\treturn -ENODEV;\n+\n+\t/* allocate device private memory */\n+\tbbdev->data->dev_private = rte_zmalloc_socket(dev_name,\n+\t\t\tsizeof(struct fpga_lte_fec_device), RTE_CACHE_LINE_SIZE,\n+\t\t\tpci_dev->device.numa_node);\n+\n+\tif (bbdev->data->dev_private == NULL) {\n+\t\trte_bbdev_log(CRIT,\n+\t\t\t\t\"Allocate of %zu bytes for device \\\"%s\\\" failed\",\n+\t\t\t\tsizeof(struct fpga_lte_fec_device), dev_name);\n+\t\t\t\trte_bbdev_release(bbdev);\n+\t\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Fill HW specific part of device structure */\n+\tbbdev->device = &pci_dev->device;\n+\tbbdev->intr_handle = &pci_dev->intr_handle;\n+\tbbdev->data->socket_id = pci_dev->device.numa_node;\n+\n+\t/* Invoke FEC FPGA device initialization function */\n+\tfpga_lte_fec_init(bbdev);\n+\n+\trte_bbdev_log_debug(\"bbdev id = %u [%s]\",\n+\t\t\tbbdev->data->dev_id, dev_name);\n+\n+\tstruct fpga_lte_fec_device *d = bbdev->data->dev_private;\n+\tuint32_t version_id = fpga_reg_read_32(d->mmio_base,\n+\t\t\tFPGA_LTE_FEC_VERSION_ID);\n+\trte_bbdev_log(INFO, \"FEC FPGA RTL v%u.%u\",\n+\t\t((uint16_t)(version_id >> 16)), ((uint16_t)version_id));\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\tif (!strcmp(bbdev->device->driver->name,\n+\t\t\tRTE_STR(FPGA_LTE_FEC_PF_DRIVER_NAME)))\n+\t\tprint_static_reg_debug_info(d->mmio_base);\n+#endif\n+\treturn 0;\n+}\n+\n+static int\n+fpga_lte_fec_remove(struct rte_pci_device *pci_dev)\n+{\n+\tstruct rte_bbdev *bbdev;\n+\tint ret;\n+\tuint8_t dev_id;\n+\n+\tif (pci_dev == NULL)\n+\t\treturn -EINVAL;\n+\n+\t/* Find device */\n+\tbbdev = rte_bbdev_get_named_dev(pci_dev->device.name);\n+\tif (bbdev == NULL) {\n+\t\trte_bbdev_log(CRIT,\n+\t\t\t\t\"Couldn't find HW dev \\\"%s\\\" to uninitialise it\",\n+\t\t\t\tpci_dev->device.name);\n+\t\treturn -ENODEV;\n+\t}\n+\tdev_id = bbdev->data->dev_id;\n+\n+\t/* free device private memory before close */\n+\trte_free(bbdev->data->dev_private);\n+\n+\t/* Close device */\n+\tret = rte_bbdev_close(dev_id);\n+\tif (ret < 0)\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Device %i failed to close during uninit: %i\",\n+\t\t\t\tdev_id, ret);\n+\n+\t/* release bbdev from library */\n+\tret = rte_bbdev_release(bbdev);\n+\tif (ret)\n+\t\trte_bbdev_log(ERR, \"Device %i failed to uninit: %i\", dev_id,\n+\t\t\t\tret);\n+\n+\trte_bbdev_log_debug(\"Destroyed bbdev = %u\", dev_id);\n+\n+\treturn 0;\n+}\n+\n+static inline void\n+set_default_fpga_conf(struct fpga_lte_fec_conf *def_conf)\n+{\n+\t/* clear default configuration before initialization */\n+\tmemset(def_conf, 0, sizeof(struct fpga_lte_fec_conf));\n+\t/* Set pf mode to true */\n+\tdef_conf->pf_mode_en = true;\n+\n+\t/* Set ratio between UL and DL to 1:1 (unit of weight is 3 CBs) */\n+\tdef_conf->ul_bandwidth = 3;\n+\tdef_conf->dl_bandwidth = 3;\n+\n+\t/* Set Load Balance Factor to 64 */\n+\tdef_conf->dl_load_balance = 64;\n+\tdef_conf->ul_load_balance = 64;\n+}\n+\n+/* Initial configuration of FPGA LTE FEC device */\n+int\n+fpga_lte_fec_configure(const char *dev_name,\n+\t\tconst struct fpga_lte_fec_conf *conf)\n+{\n+\tuint32_t payload_32, address;\n+\tuint16_t payload_16;\n+\tuint8_t payload_8;\n+\tuint16_t q_id, vf_id, total_q_id, total_ul_q_id, total_dl_q_id;\n+\tstruct rte_bbdev *bbdev = rte_bbdev_get_named_dev(dev_name);\n+\tstruct fpga_lte_fec_conf def_conf;\n+\n+\tif (bbdev == NULL) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Invalid dev_name (%s), or device is not yet initialised\",\n+\t\t\t\tdev_name);\n+\t\treturn -ENODEV;\n+\t}\n+\n+\tstruct fpga_lte_fec_device *d = bbdev->data->dev_private;\n+\n+\tif (conf == NULL) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"FPGA Configuration was not provided. Default configuration will be loaded.\");\n+\t\tset_default_fpga_conf(&def_conf);\n+\t\tconf = &def_conf;\n+\t}\n+\n+\t/*\n+\t * Configure UL:DL ratio.\n+\t * [7:0]: UL weight\n+\t * [15:8]: DL weight\n+\t */\n+\tpayload_16 = (conf->dl_bandwidth << 8) | conf->ul_bandwidth;\n+\taddress = FPGA_LTE_FEC_CONFIGURATION;\n+\tfpga_reg_write_16(d->mmio_base, address, payload_16);\n+\n+\t/* Clear all queues registers */\n+\tpayload_32 = FPGA_INVALID_HW_QUEUE_ID;\n+\tfor (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {\n+\t\taddress = (q_id << 2) + FPGA_LTE_FEC_QUEUE_MAP;\n+\t\tfpga_reg_write_32(d->mmio_base, address, payload_32);\n+\t}\n+\n+\t/*\n+\t * If PF mode is enabled allocate all queues for PF only.\n+\t *\n+\t * For VF mode each VF can have different number of UL and DL queues.\n+\t * Total number of queues to configure cannot exceed FPGA\n+\t * capabilities - 64 queues - 32 queues for UL and 32 queues for DL.\n+\t * Queues mapping is done according to configuration:\n+\t *\n+\t * UL queues:\n+\t * |                Q_ID              | VF_ID |\n+\t * |                 0                |   0   |\n+\t * |                ...               |   0   |\n+\t * | conf->vf_dl_queues_number[0] - 1 |   0   |\n+\t * | conf->vf_dl_queues_number[0]     |   1   |\n+\t * |                ...               |   1   |\n+\t * | conf->vf_dl_queues_number[1] - 1 |   1   |\n+\t * |                ...               |  ...  |\n+\t * | conf->vf_dl_queues_number[7] - 1 |   7   |\n+\t *\n+\t * DL queues:\n+\t * |                Q_ID              | VF_ID |\n+\t * |                 32               |   0   |\n+\t * |                ...               |   0   |\n+\t * | conf->vf_ul_queues_number[0] - 1 |   0   |\n+\t * | conf->vf_ul_queues_number[0]     |   1   |\n+\t * |                ...               |   1   |\n+\t * | conf->vf_ul_queues_number[1] - 1 |   1   |\n+\t * |                ...               |  ...  |\n+\t * | conf->vf_ul_queues_number[7] - 1 |   7   |\n+\t *\n+\t * Example of configuration:\n+\t * conf->vf_ul_queues_number[0] = 4;  -> 4 UL queues for VF0\n+\t * conf->vf_dl_queues_number[0] = 4;  -> 4 DL queues for VF0\n+\t * conf->vf_ul_queues_number[1] = 2;  -> 2 UL queues for VF1\n+\t * conf->vf_dl_queues_number[1] = 2;  -> 2 DL queues for VF1\n+\t *\n+\t * UL:\n+\t * | Q_ID | VF_ID |\n+\t * |   0  |   0   |\n+\t * |   1  |   0   |\n+\t * |   2  |   0   |\n+\t * |   3  |   0   |\n+\t * |   4  |   1   |\n+\t * |   5  |   1   |\n+\t *\n+\t * DL:\n+\t * | Q_ID | VF_ID |\n+\t * |  32  |   0   |\n+\t * |  33  |   0   |\n+\t * |  34  |   0   |\n+\t * |  35  |   0   |\n+\t * |  36  |   1   |\n+\t * |  37  |   1   |\n+\t */\n+\tif (conf->pf_mode_en) {\n+\t\tpayload_32 = 0x1;\n+\t\tfor (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {\n+\t\t\taddress = (q_id << 2) + FPGA_LTE_FEC_QUEUE_MAP;\n+\t\t\tfpga_reg_write_32(d->mmio_base, address, payload_32);\n+\t\t}\n+\t} else {\n+\t\t/* Calculate total number of UL and DL queues to configure */\n+\t\ttotal_ul_q_id = total_dl_q_id = 0;\n+\t\tfor (vf_id = 0; vf_id < FPGA_LTE_FEC_NUM_VFS; ++vf_id) {\n+\t\t\ttotal_ul_q_id += conf->vf_ul_queues_number[vf_id];\n+\t\t\ttotal_dl_q_id += conf->vf_dl_queues_number[vf_id];\n+\t\t}\n+\t\ttotal_q_id = total_dl_q_id + total_ul_q_id;\n+\t\t/*\n+\t\t * Check if total number of queues to configure does not exceed\n+\t\t * FPGA capabilities (64 queues - 32 UL and 32 DL queues)\n+\t\t */\n+\t\tif ((total_ul_q_id > FPGA_NUM_UL_QUEUES) ||\n+\t\t\t(total_dl_q_id > FPGA_NUM_DL_QUEUES) ||\n+\t\t\t(total_q_id > FPGA_TOTAL_NUM_QUEUES)) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"FPGA Configuration failed. Too many queues to configure: UL_Q %u, DL_Q %u, FPGA_Q %u\",\n+\t\t\t\t\ttotal_ul_q_id, total_dl_q_id,\n+\t\t\t\t\tFPGA_TOTAL_NUM_QUEUES);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t\ttotal_ul_q_id = 0;\n+\t\tfor (vf_id = 0; vf_id < FPGA_LTE_FEC_NUM_VFS; ++vf_id) {\n+\t\t\tfor (q_id = 0; q_id < conf->vf_ul_queues_number[vf_id];\n+\t\t\t\t\t++q_id, ++total_ul_q_id) {\n+\t\t\t\taddress = (total_ul_q_id << 2) +\n+\t\t\t\t\t\tFPGA_LTE_FEC_QUEUE_MAP;\n+\t\t\t\tpayload_32 = ((0x80 + vf_id) << 16) | 0x1;\n+\t\t\t\tfpga_reg_write_32(d->mmio_base, address,\n+\t\t\t\t\t\tpayload_32);\n+\t\t\t}\n+\t\t}\n+\t\ttotal_dl_q_id = 0;\n+\t\tfor (vf_id = 0; vf_id < FPGA_LTE_FEC_NUM_VFS; ++vf_id) {\n+\t\t\tfor (q_id = 0; q_id < conf->vf_dl_queues_number[vf_id];\n+\t\t\t\t\t++q_id, ++total_dl_q_id) {\n+\t\t\t\taddress = ((total_dl_q_id + FPGA_NUM_UL_QUEUES)\n+\t\t\t\t\t\t<< 2) + FPGA_LTE_FEC_QUEUE_MAP;\n+\t\t\t\tpayload_32 = ((0x80 + vf_id) << 16) | 0x1;\n+\t\t\t\tfpga_reg_write_32(d->mmio_base, address,\n+\t\t\t\t\t\tpayload_32);\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\t/* Setting Load Balance Factor */\n+\tpayload_16 = (conf->dl_load_balance << 8) | (conf->ul_load_balance);\n+\taddress = FPGA_LTE_FEC_LOAD_BALANCE_FACTOR;\n+\tfpga_reg_write_16(d->mmio_base, address, payload_16);\n+\n+\t/* Setting length of ring descriptor entry */\n+\tpayload_16 = FPGA_RING_DESC_ENTRY_LENGTH;\n+\taddress = FPGA_LTE_FEC_RING_DESC_LEN;\n+\tfpga_reg_write_16(d->mmio_base, address, payload_16);\n+\n+\t/* Setting FLR timeout value */\n+\tpayload_16 = conf->flr_time_out;\n+\taddress = FPGA_LTE_FEC_FLR_TIME_OUT;\n+\tfpga_reg_write_16(d->mmio_base, address, payload_16);\n+\n+\t/* Queue PF/VF mapping table is ready */\n+\tpayload_8 = 0x1;\n+\taddress = FPGA_LTE_FEC_QUEUE_PF_VF_MAP_DONE;\n+\tfpga_reg_write_8(d->mmio_base, address, payload_8);\n+\n+\trte_bbdev_log_debug(\"PF FPGA LTE FEC configuration complete for %s\",\n+\t\t\tdev_name);\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\tprint_static_reg_debug_info(d->mmio_base);\n+#endif\n+\treturn 0;\n+}\n+\n+/* FPGA LTE FEC PCI PF address map */\n+static struct rte_pci_id pci_id_fpga_lte_fec_pf_map[] = {\n+\t{\n+\t\tRTE_PCI_DEVICE(FPGA_LTE_FEC_VENDOR_ID,\n+\t\t\t\tFPGA_LTE_FEC_PF_DEVICE_ID)\n+\t},\n+\t{.device_id = 0},\n+};\n+\n+static struct rte_pci_driver fpga_lte_fec_pci_pf_driver = {\n+\t.probe = fpga_lte_fec_probe,\n+\t.remove = fpga_lte_fec_remove,\n+\t.id_table = pci_id_fpga_lte_fec_pf_map,\n+\t.drv_flags = RTE_PCI_DRV_NEED_MAPPING\n+};\n+\n+/* FPGA LTE FEC PCI VF address map */\n+static struct rte_pci_id pci_id_fpga_lte_fec_vf_map[] = {\n+\t{\n+\t\tRTE_PCI_DEVICE(FPGA_LTE_FEC_VENDOR_ID,\n+\t\t\t\tFPGA_LTE_FEC_VF_DEVICE_ID)\n+\t},\n+\t{.device_id = 0},\n+};\n+\n+static struct rte_pci_driver fpga_lte_fec_pci_vf_driver = {\n+\t.probe = fpga_lte_fec_probe,\n+\t.remove = fpga_lte_fec_remove,\n+\t.id_table = pci_id_fpga_lte_fec_vf_map,\n+\t.drv_flags = RTE_PCI_DRV_NEED_MAPPING\n+};\n+\n+\n+RTE_PMD_REGISTER_PCI(FPGA_LTE_FEC_PF_DRIVER_NAME, fpga_lte_fec_pci_pf_driver);\n+RTE_PMD_REGISTER_PCI_TABLE(FPGA_LTE_FEC_PF_DRIVER_NAME,\n+\t\tpci_id_fpga_lte_fec_pf_map);\n+RTE_PMD_REGISTER_PCI(FPGA_LTE_FEC_VF_DRIVER_NAME, fpga_lte_fec_pci_vf_driver);\n+RTE_PMD_REGISTER_PCI_TABLE(FPGA_LTE_FEC_VF_DRIVER_NAME,\n+\t\tpci_id_fpga_lte_fec_vf_map);\n+\n+RTE_INIT(fpga_lte_fec_init_log)\n+{\n+\tfpga_lte_fec_logtype = rte_log_register(\"pmd.bb.fpga_lte_fec\");\n+\tif (fpga_lte_fec_logtype >= 0)\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\t\trte_log_set_level(fpga_lte_fec_logtype, RTE_LOG_DEBUG);\n+#else\n+\t\trte_log_set_level(fpga_lte_fec_logtype, RTE_LOG_NOTICE);\n+#endif\n+}\ndiff --git a/drivers/baseband/fpga_lte_fec/fpga_lte_fec.h b/drivers/baseband/fpga_lte_fec/fpga_lte_fec.h\nnew file mode 100644\nindex 0000000..9ae8b12\n--- /dev/null\n+++ b/drivers/baseband/fpga_lte_fec/fpga_lte_fec.h\n@@ -0,0 +1,73 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2019 Intel Corporation\n+ */\n+\n+#ifndef _FPGA_LTE_FEC_H_\n+#define _FPGA_LTE_FEC_H_\n+\n+#include <stdint.h>\n+#include <stdbool.h>\n+\n+/**\n+ * @file fpga_lte_fec.h\n+ *\n+ * Interface for Intel(R) FGPA LTE FEC device configuration at the host level,\n+ * directly accessible by the application.\n+ * Configuration related to LTE Turbo coding functionality is done through\n+ * librte_bbdev library.\n+ *\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice\n+ */\n+\n+#ifdef __cplusplus\n+extern \"C\" {\n+#endif\n+\n+/**< Number of Virtual Functions FGPA 4G FEC supports */\n+#define FPGA_LTE_FEC_NUM_VFS 8\n+\n+/**\n+ * Structure to pass FPGA 4G FEC configuration.\n+ */\n+struct fpga_lte_fec_conf {\n+\t/**< 1 if PF is used for dataplane, 0 for VFs */\n+\tbool pf_mode_en;\n+\t/**< Number of UL queues per VF */\n+\tuint8_t vf_ul_queues_number[FPGA_LTE_FEC_NUM_VFS];\n+\t/**< Number of DL queues per VF */\n+\tuint8_t vf_dl_queues_number[FPGA_LTE_FEC_NUM_VFS];\n+\t/**< UL bandwidth. Needed for schedule algorithm */\n+\tuint8_t ul_bandwidth;\n+\t/**< DL bandwidth. Needed for schedule algorithm */\n+\tuint8_t dl_bandwidth;\n+\t/**< UL Load Balance */\n+\tuint8_t ul_load_balance;\n+\t/**< DL Load Balance */\n+\tuint8_t dl_load_balance;\n+\t/**< FLR timeout value */\n+\tuint16_t flr_time_out;\n+};\n+\n+/**\n+ * Configure Intel(R) FPGA LTE FEC device\n+ *\n+ * @param dev_name\n+ *   The name of the device. This is the short form of PCI BDF, e.g. 00:01.0.\n+ *   It can also be retrieved for a bbdev device from the dev_name field in the\n+ *   rte_bbdev_info structure returned by rte_bbdev_info_get().\n+ * @param conf\n+ *   Configuration to apply to FPGA 4G FEC.\n+ *\n+ * @return\n+ *   Zero on success, negative value on failure.\n+ */\n+int\n+fpga_lte_fec_configure(const char *dev_name,\n+\t\tconst struct fpga_lte_fec_conf *conf);\n+\n+#ifdef __cplusplus\n+}\n+#endif\n+\n+#endif /* _FPGA_LTE_FEC_H_ */\ndiff --git a/drivers/baseband/fpga_lte_fec/meson.build b/drivers/baseband/fpga_lte_fec/meson.build\nnew file mode 100644\nindex 0000000..bf44e6b\n--- /dev/null\n+++ b/drivers/baseband/fpga_lte_fec/meson.build\n@@ -0,0 +1,7 @@\n+# SPDX-License-Identifier: BSD-3-Clause\n+# Copyright(c) 2019 Intel Corporation\n+\n+deps += ['bbdev', 'bus_vdev', 'ring', 'pci', 'bus_pci']\n+name = 'bbdev_fpga_lte_fec'\n+allow_experimental_apis = true\n+sources = files('fpga_lte_fec.c')\ndiff --git a/drivers/baseband/fpga_lte_fec/rte_pmd_bbdev_fpga_lte_fec_version.map b/drivers/baseband/fpga_lte_fec/rte_pmd_bbdev_fpga_lte_fec_version.map\nnew file mode 100644\nindex 0000000..e923270\n--- /dev/null\n+++ b/drivers/baseband/fpga_lte_fec/rte_pmd_bbdev_fpga_lte_fec_version.map\n@@ -0,0 +1,3 @@\n+DPDK_19.08 {\n+    local: *;\n+};\ndiff --git a/drivers/baseband/meson.build b/drivers/baseband/meson.build\nindex 40a87d2..391bd51 100644\n--- a/drivers/baseband/meson.build\n+++ b/drivers/baseband/meson.build\n@@ -1,7 +1,7 @@\n # SPDX-License-Identifier: BSD-3-Clause\n # Copyright(c) 2018 Luca Boccassi <bluca@debian.org>\n \n-drivers = ['null', 'turbo_sw']\n+drivers = ['null', 'turbo_sw', 'fpga_lte_fec']\n \n config_flag_fmt = 'RTE_LIBRTE_@0@_PMD'\n driver_name_fmt = 'rte_pmd_@0@'\ndiff --git a/mk/rte.app.mk b/mk/rte.app.mk\nindex 2c3d07a..cdd6073 100644\n--- a/mk/rte.app.mk\n+++ b/mk/rte.app.mk\n@@ -216,6 +216,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD)     += -lrte_pmd_netvsc\n \n ifeq ($(CONFIG_RTE_LIBRTE_BBDEV),y)\n _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_NULL)     += -lrte_pmd_bbdev_null\n+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_FPGA_LTE_FEC) += -lrte_pmd_fpga_lte_fec\n \n # TURBO SOFTWARE PMD is dependent on the FLEXRAN library\n _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW) += -lrte_pmd_bbdev_turbo_sw\n",
    "prefixes": [
        "v3",
        "02/10"
    ]
}