get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/66260/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 66260,
    "url": "http://patches.dpdk.org/api/patches/66260/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1583346152-10186-13-git-send-email-nicolas.chautru@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1583346152-10186-13-git-send-email-nicolas.chautru@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1583346152-10186-13-git-send-email-nicolas.chautru@intel.com",
    "date": "2020-03-04T18:22:29",
    "name": "[v2,12/15] drivers/baseband: add PMD for FPGA 5GNR FEC",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "7cba844a6e1d22e021f5bc7542553c75d1d88f36",
    "submitter": {
        "id": 1314,
        "url": "http://patches.dpdk.org/api/people/1314/?format=api",
        "name": "Chautru, Nicolas",
        "email": "nicolas.chautru@intel.com"
    },
    "delegate": {
        "id": 6690,
        "url": "http://patches.dpdk.org/api/users/6690/?format=api",
        "username": "akhil",
        "first_name": "akhil",
        "last_name": "goyal",
        "email": "gakhil@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1583346152-10186-13-git-send-email-nicolas.chautru@intel.com/mbox/",
    "series": [
        {
            "id": 8783,
            "url": "http://patches.dpdk.org/api/series/8783/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=8783",
            "date": "2020-03-04T18:22:17",
            "name": "bbdev new features",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/8783/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/66260/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/66260/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id BE5DFA0573;\n\tWed,  4 Mar 2020 19:24:42 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id B292C1C06C;\n\tWed,  4 Mar 2020 19:23:02 +0100 (CET)",
            "from mga02.intel.com (mga02.intel.com [134.134.136.20])\n by dpdk.org (Postfix) with ESMTP id 93F6D1BFA9\n for <dev@dpdk.org>; Wed,  4 Mar 2020 19:22:48 +0100 (CET)",
            "from fmsmga008.fm.intel.com ([10.253.24.58])\n by orsmga101.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n 04 Mar 2020 10:22:44 -0800",
            "from skx-5gnr-sc12-4.sc.intel.com ([172.25.69.210])\n by fmsmga008.fm.intel.com with ESMTP; 04 Mar 2020 10:22:43 -0800"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.70,514,1574150400\"; d=\"scan'208\";a=\"234199027\"",
        "From": "Nicolas Chautru <nicolas.chautru@intel.com>",
        "To": "thomas@monjalon.net,\n\takhil.goyal@nxp.com,\n\tdev@dpdk.org",
        "Cc": "ferruh.yigit@intel.com,\n\tNic Chautru <nicolas.chautru@intel.com>",
        "Date": "Wed,  4 Mar 2020 10:22:29 -0800",
        "Message-Id": "<1583346152-10186-13-git-send-email-nicolas.chautru@intel.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1583346152-10186-1-git-send-email-nicolas.chautru@intel.com>",
        "References": "<1582778348-113547-15-git-send-email-nicolas.chautru@intel.com>\n <1583346152-10186-1-git-send-email-nicolas.chautru@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v2 12/15] drivers/baseband: add PMD for FPGA 5GNR\n\tFEC",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Nic Chautru <nicolas.chautru@intel.com>\n\nSupports for FEC 5G PMD Driver on FPGA card PAC N3000\n\nSigned-off-by: Nic Chautru <nicolas.chautru@intel.com>\n---\n config/common_base                                 |    5 +\n doc/guides/bbdevs/fpga_5gnr_fec.rst                |  297 +++\n doc/guides/bbdevs/index.rst                        |    1 +\n drivers/baseband/Makefile                          |    2 +\n drivers/baseband/fpga_5gnr_fec/Makefile            |   29 +\n drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.c     | 2552 ++++++++++++++++++++\n drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h     |   74 +\n drivers/baseband/fpga_5gnr_fec/meson.build         |    6 +\n .../rte_pmd_bbdev_fpga_5gnr_fec_version.map        |   10 +\n drivers/baseband/meson.build                       |    2 +-\n mk/rte.app.mk                                      |    1 +\n 11 files changed, 2978 insertions(+), 1 deletion(-)\n create mode 100644 doc/guides/bbdevs/fpga_5gnr_fec.rst\n create mode 100644 drivers/baseband/fpga_5gnr_fec/Makefile\n create mode 100644 drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.c\n create mode 100644 drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h\n create mode 100644 drivers/baseband/fpga_5gnr_fec/meson.build\n create mode 100644 drivers/baseband/fpga_5gnr_fec/rte_pmd_bbdev_fpga_5gnr_fec_version.map",
    "diff": "diff --git a/config/common_base b/config/common_base\nindex 7ca2f28..7280ee2 100644\n--- a/config/common_base\n+++ b/config/common_base\n@@ -578,6 +578,11 @@ CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW=y\n CONFIG_RTE_LIBRTE_PMD_BBDEV_FPGA_LTE_FEC=y\n \n #\n+# Compile PMD for Intel FPGA 5GNR FEC bbdev device\n+#\n+CONFIG_RTE_LIBRTE_PMD_BBDEV_FPGA_5GNR_FEC=y\n+\n+#\n # Compile generic crypto device library\n #\n CONFIG_RTE_LIBRTE_CRYPTODEV=y\ndiff --git a/doc/guides/bbdevs/fpga_5gnr_fec.rst b/doc/guides/bbdevs/fpga_5gnr_fec.rst\nnew file mode 100644\nindex 0000000..5641b1a\n--- /dev/null\n+++ b/doc/guides/bbdevs/fpga_5gnr_fec.rst\n@@ -0,0 +1,297 @@\n+..  SPDX-License-Identifier: BSD-3-Clause\n+    Copyright(c) 2019 Intel Corporation\n+\n+Intel(R) FPGA 5GNR FEC Poll Mode Driver\n+======================================\n+\n+The BBDEV FPGA 5GNR FEC poll mode driver (PMD) supports an FPGA implementation of a VRAN\n+LDPC Encode / Decode 5GNR wireless acceleration function, using Intel's PCI-e and FPGA\n+based Vista Creek device.\n+\n+Features\n+--------\n+\n+FPGA 5GNR FEC PMD supports the following features:\n+\n+- LDPC Encode in the DL\n+- LDPC Decode in the UL\n+- 8 VFs per PF (physical device)\n+- Maximum of 32 UL queues per VF\n+- Maximum of 32 DL queues per VF\n+- PCIe Gen-3 x8 Interface\n+- MSI-X\n+- SR-IOV\n+\n+FPGA 5GNR FEC PMD supports the following BBDEV capabilities:\n+\n+* For the LDPC encode operation:\n+   - ``RTE_BBDEV_LDPC_CRC_24B_ATTACH`` :  set to attach CRC24B to CB(s)\n+   - ``RTE_BBDEV_LDPC_RATE_MATCH`` :  if set then do not do Rate Match bypass\n+\n+* For the LDPC decode operation:\n+   - ``RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK`` :  check CRC24B from CB(s)\n+   - ``RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE`` :  disable early termination\n+   - ``RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP`` :  drops CRC24B bits appended while decoding\n+   - ``RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE`` :  provides an input for HARQ combining\n+   - ``RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE`` :  provides an input for HARQ combining\n+   - ``RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE`` :  HARQ memory input is internal\n+   - ``RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE`` :  HARQ memory output is internal\n+   - ``RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK`` :  loopback data to/from HARQ memory\n+   - ``RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_FILLERS`` :  HARQ memory includes the fillers bits\n+\n+\n+Limitations\n+-----------\n+\n+FPGA 5GNR FEC does not support the following:\n+\n+- Scatter-Gather function\n+\n+\n+Installation\n+--------------\n+\n+Section 3 of the DPDK manual provides instuctions on installing and compiling DPDK. The\n+default set of bbdev compile flags may be found in config/common_base, where for example\n+the flag to build the FPGA 5GNR FEC device, ``CONFIG_RTE_LIBRTE_PMD_BBDEV_FPGA_5GNR_FEC``,\n+is already set. It is assumed DPDK has been compiled using for instance:\n+\n+.. code-block:: console\n+\n+  make install T=x86_64-native-linuxapp-gcc\n+\n+\n+DPDK requires hugepages to be configured as detailed in section 2 of the DPDK manual.\n+The bbdev test application has been tested with a configuration 40 x 1GB hugepages. The\n+hugepage configuration of a server may be examined using:\n+\n+.. code-block:: console\n+\n+   grep Huge* /proc/meminfo\n+\n+\n+Initialization\n+--------------\n+\n+When the device first powers up, its PCI Physical Functions (PF) can be listed through this command:\n+\n+.. code-block:: console\n+\n+  sudo lspci -vd8086:0d8f\n+\n+The physical and virtual functions are compatible with Linux UIO drivers:\n+``vfio`` and ``igb_uio``. However, in order to work the FPGA 5GNR FEC device firstly needs\n+to be bound to one of these linux drivers through DPDK.\n+\n+\n+Bind PF UIO driver(s)\n+~~~~~~~~~~~~~~~~~~~~~\n+\n+Install the DPDK igb_uio driver, bind it with the PF PCI device ID and use\n+``lspci`` to confirm the PF device is under use by ``igb_uio`` DPDK UIO driver.\n+\n+The igb_uio driver may be bound to the PF PCI device using one of three methods:\n+\n+\n+1. PCI functions (physical or virtual, depending on the use case) can be bound to\n+the UIO driver by repeating this command for every function.\n+\n+.. code-block:: console\n+\n+  cd <dpdk-top-level-directory>\n+  insmod ./build/kmod/igb_uio.ko\n+  echo \"8086 0d8f\" > /sys/bus/pci/drivers/igb_uio/new_id\n+  lspci -vd8086:0d8f\n+\n+\n+2. Another way to bind PF with DPDK UIO driver is by using the ``dpdk-devbind.py`` tool\n+\n+.. code-block:: console\n+\n+  cd <dpdk-top-level-directory>\n+  ./usertools/dpdk-devbind.py -b igb_uio 0000:06:00.0\n+\n+where the PCI device ID (example: 0000:06:00.0) is obtained using lspci -vd8086:0d8f\n+\n+\n+3. A third way to bind is to use ``dpdk-setup.sh`` tool\n+\n+.. code-block:: console\n+\n+  cd <dpdk-top-level-directory>\n+  ./usertools/dpdk-setup.sh\n+\n+  select 'Bind Ethernet/Crypto/Baseband device to IGB UIO module'\n+  or\n+  select 'Bind Ethernet/Crypto/Baseband device to VFIO module' depending on driver required\n+  enter PCI device ID\n+  select 'Display current Ethernet/Crypto/Baseband device settings' to confirm binding\n+\n+\n+In the same way the FPGA 5GNR FEC PF can be bound with vfio, but vfio driver does not\n+support SR-IOV configuration right out of the box, so it will need to be patched.\n+\n+\n+Enable Virtual Functions\n+~~~~~~~~~~~~~~~~~~~~~~~~\n+\n+Now, it should be visible in the printouts that PCI PF is under igb_uio control\n+\"``Kernel driver in use: igb_uio``\"\n+\n+To show the number of available VFs on the device, read ``sriov_totalvfs`` file..\n+\n+.. code-block:: console\n+\n+  cat /sys/bus/pci/devices/0000\\:<b>\\:<d>.<f>/sriov_totalvfs\n+\n+  where 0000\\:<b>\\:<d>.<f> is the PCI device ID\n+\n+\n+To enable VFs via igb_uio, echo the number of virtual functions intended to\n+enable to ``max_vfs`` file..\n+\n+.. code-block:: console\n+\n+  echo <num-of-vfs> > /sys/bus/pci/devices/0000\\:<b>\\:<d>.<f>/max_vfs\n+\n+\n+Afterwards, all VFs must be bound to appropriate UIO drivers as required, same\n+way it was done with the physical function previously.\n+\n+Enabling SR-IOV via vfio driver is pretty much the same, except that the file\n+name is different:\n+\n+.. code-block:: console\n+\n+  echo <num-of-vfs> > /sys/bus/pci/devices/0000\\:<b>\\:<d>.<f>/sriov_numvfs\n+\n+\n+Configure the VFs through PF\n+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n+\n+The PCI virtual functions must be configured before working or getting assigned\n+to VMs/Containers. The configuration involves allocating the number of hardware\n+queues, priorities, load balance, bandwidth and other settings necessary for the\n+device to perform FEC functions.\n+\n+This configuration needs to be executed at least once after reboot or PCI FLR and can\n+be achieved by using the function ``fpga_5gnr_fec_configure()``, which sets up the\n+parameters defined in ``fpga_5gnr_fec_conf`` structure:\n+\n+.. code-block:: c\n+\n+  struct fpga_5gnr_fec_conf {\n+      bool pf_mode_en;\n+      uint8_t vf_ul_queues_number[FPGA_5GNR_FEC_NUM_VFS];\n+      uint8_t vf_dl_queues_number[FPGA_5GNR_FEC_NUM_VFS];\n+      uint8_t ul_bandwidth;\n+      uint8_t dl_bandwidth;\n+      uint8_t ul_load_balance;\n+      uint8_t dl_load_balance;\n+      uint16_t flr_time_out;\n+  };\n+\n+- ``pf_mode_en``: identifies whether only PF is to be used, or the VFs. PF and\n+  VFs are mutually exclusive and cannot run simultaneously.\n+  Set to 1 for PF mode enabled.\n+  If PF mode is enabled all queues available in the device are assigned\n+  exclusively to PF and 0 queues given to VFs.\n+\n+- ``vf_*l_queues_number``: defines the hardware queue mapping for every VF.\n+\n+- ``*l_bandwidth``: in case of congestion on PCIe interface. The device\n+  allocates different bandwidth to UL and DL. The weight is configured by this\n+  setting. The unit of weight is 3 code blocks. For example, if the code block\n+  cbps (code block per second) ratio between UL and DL is 12:1, then the\n+  configuration value should be set to 36:3. The schedule algorithm is based\n+  on code block regardless the length of each block.\n+\n+- ``*l_load_balance``: hardware queues are load-balanced in a round-robin\n+  fashion. Queues get filled first-in first-out until they reach a pre-defined\n+  watermark level, if exceeded, they won't get assigned new code blocks..\n+  This watermark is defined by this setting.\n+\n+  If all hardware queues exceeds the watermark, no code blocks will be\n+  streamed in from UL/DL code block FIFO.\n+\n+- ``flr_time_out``: specifies how many 16.384us to be FLR time out. The\n+  time_out = flr_time_out x 16.384us. For instance, if you want to set 10ms for\n+  the FLR time out then set this setting to 0x262=610.\n+\n+\n+An example configuration code calling the function ``fpga_5gnr_fec_configure()`` is shown\n+below:\n+\n+.. code-block:: c\n+\n+  struct fpga_5gnr_fec_conf conf;\n+  unsigned int i;\n+\n+  memset(&conf, 0, sizeof(struct fpga_5gnr_fec_conf));\n+  conf.pf_mode_en = 1;\n+\n+  for (i = 0; i < FPGA_5GNR_FEC_NUM_VFS; ++i) {\n+      conf.vf_ul_queues_number[i] = 4;\n+      conf.vf_dl_queues_number[i] = 4;\n+  }\n+  conf.ul_bandwidth = 12;\n+  conf.dl_bandwidth = 5;\n+  conf.dl_load_balance = 64;\n+  conf.ul_load_balance = 64;\n+\n+  /* setup FPGA PF */\n+  ret = fpga_5gnr_fec_configure(info->dev_name, &conf);\n+  TEST_ASSERT_SUCCESS(ret,\n+      \"Failed to configure 4G FPGA PF for bbdev %s\",\n+      info->dev_name);\n+\n+\n+Test Application\n+----------------\n+\n+BBDEV provides a test application, ``test-bbdev.py`` and range of test data for testing\n+the functionality of FPGA 5GNR FEC encode and decode, depending on the device's\n+capabilities. The test application is located under app->test-bbdev folder and has the\n+following options:\n+\n+.. code-block:: console\n+\n+  \"-p\", \"--testapp-path\": specifies path to the bbdev test app.\n+  \"-e\", \"--eal-params\"\t: EAL arguments which are passed to the test app.\n+  \"-t\", \"--timeout\"\t: Timeout in seconds (default=300).\n+  \"-c\", \"--test-cases\"\t: Defines test cases to run. Run all if not specified.\n+  \"-v\", \"--test-vector\"\t: Test vector path (default=dpdk_path+/app/test-bbdev/test_vectors/bbdev_null.data).\n+  \"-n\", \"--num-ops\"\t: Number of operations to process on device (default=32).\n+  \"-b\", \"--burst-size\"\t: Operations enqueue/dequeue burst size (default=32).\n+  \"-l\", \"--num-lcores\"\t: Number of lcores to run (default=16).\n+  \"-i\", \"--init-device\" : Initialise PF device with default values.\n+\n+\n+To execute the test application tool using simple decode or encode data,\n+type one of the following:\n+\n+.. code-block:: console\n+\n+  ./test-bbdev.py -c validation -n 64 -b 1 -v ./ldpc_dec_default.data\n+  ./test-bbdev.py -c validation -n 64 -b 1 -v ./ldpc_enc_default.data\n+\n+\n+The test application ``test-bbdev.py``, supports the ability to configure the PF device with\n+a default set of values, if the \"-i\" or \"- -init-device\" option is included. The default values\n+are defined in test_bbdev_perf.c as:\n+\n+- VF_UL_QUEUE_VALUE 4\n+- VF_DL_QUEUE_VALUE 4\n+- UL_BANDWIDTH 3\n+- DL_BANDWIDTH 3\n+- UL_LOAD_BALANCE 128\n+- DL_LOAD_BALANCE 128\n+- FLR_TIMEOUT 610\n+\n+\n+Test Vectors\n+~~~~~~~~~~~~\n+\n+In addition to the simple LDPC decoder and LDPC encoder tests, bbdev also provides\n+a range of additional tests under the test_vectors folder, which may be useful. The results\n+of these tests will depend on the FPGA 5GNR FEC capabilities.\ndiff --git a/doc/guides/bbdevs/index.rst b/doc/guides/bbdevs/index.rst\nindex 005b95e..1a79343 100644\n--- a/doc/guides/bbdevs/index.rst\n+++ b/doc/guides/bbdevs/index.rst\n@@ -11,3 +11,4 @@ Baseband Device Drivers\n     null\n     turbo_sw\n     fpga_lte_fec\n+    fpga_5gnr_fec\ndiff --git a/drivers/baseband/Makefile b/drivers/baseband/Makefile\nindex 91048be..762773c 100644\n--- a/drivers/baseband/Makefile\n+++ b/drivers/baseband/Makefile\n@@ -12,5 +12,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW) += turbo_sw\n DEPDIRS-turbo_sw = $(core-libs)\n DIRS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_FPGA_LTE_FEC) += fpga_lte_fec\n DEPDIRS-fpga_lte_fec = $(core-libs)\n+DIRS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_FPGA_LTE_FEC) += fpga_5gnr_fec\n+DEPDIRS-fpga_5gnr_fec = $(core-libs)\n \n include $(RTE_SDK)/mk/rte.subdir.mk\ndiff --git a/drivers/baseband/fpga_5gnr_fec/Makefile b/drivers/baseband/fpga_5gnr_fec/Makefile\nnew file mode 100644\nindex 0000000..de80d67\n--- /dev/null\n+++ b/drivers/baseband/fpga_5gnr_fec/Makefile\n@@ -0,0 +1,29 @@\n+# SPDX-License-Identifier: BSD-3-Clause\n+# Copyright(c) 2019 Intel Corporation\n+\n+include $(RTE_SDK)/mk/rte.vars.mk\n+\n+# library name\n+LIB = librte_pmd_bbdev_fpga_5gnr_fec.a\n+\n+# build flags\n+CFLAGS += -DALLOW_EXPERIMENTAL_API\n+CFLAGS += -O3\n+CFLAGS += $(WERROR_FLAGS)\n+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring\n+LDLIBS += -lrte_bbdev\n+LDLIBS += -lrte_pci -lrte_bus_pci\n+\n+# versioning export map\n+EXPORT_MAP := rte_pmd_bbdev_fpga_5gnr_fec_version.map\n+\n+# library version\n+LIBABIVER := 1\n+\n+# library source files\n+SRCS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_FPGA_5GNR_FEC) += fpga_5gnr_fec.c\n+\n+# export include files\n+SYMLINK-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_FPGA_5GNR_FEC)-include += fpga_5gnr_fec.h\n+\n+include $(RTE_SDK)/mk/rte.lib.mk\ndiff --git a/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.c b/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.c\nnew file mode 100644\nindex 0000000..5935e24\n--- /dev/null\n+++ b/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.c\n@@ -0,0 +1,2552 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2020 Intel Corporation\n+ */\n+\n+#include <unistd.h>\n+\n+#include <rte_common.h>\n+#include <rte_log.h>\n+#include <rte_dev.h>\n+#include <rte_malloc.h>\n+#include <rte_mempool.h>\n+#include <rte_errno.h>\n+#include <rte_pci.h>\n+#include <rte_bus_pci.h>\n+#include <rte_byteorder.h>\n+#ifdef RTE_BBDEV_OFFLOAD_COST\n+#include <rte_cycles.h>\n+#endif\n+\n+#include <rte_bbdev.h>\n+#include <rte_bbdev_pmd.h>\n+\n+#include \"fpga_5gnr_fec.h\"\n+\n+/* 5GNR SW PMD logging ID */\n+static int fpga_5gnr_fec_logtype;\n+\n+/* Helper macro for logging */\n+#define rte_bbdev_log(level, fmt, ...) \\\n+\trte_log(RTE_LOG_ ## level, fpga_5gnr_fec_logtype, fmt \"\\n\", \\\n+\t\t##__VA_ARGS__)\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+#define rte_bbdev_log_debug(fmt, ...) \\\n+\t\trte_bbdev_log(DEBUG, \"fpga_5gnr_fec: \" fmt, \\\n+\t\t##__VA_ARGS__)\n+#else\n+#define rte_bbdev_log_debug(fmt, ...)\n+#endif\n+\n+/* FPGA 5GNR FEC driver names */\n+#define FPGA_5GNR_FEC_PF_DRIVER_NAME intel_fpga_5gnr_fec_pf\n+#define FPGA_5GNR_FEC_VF_DRIVER_NAME intel_fpga_5gnr_fec_vf\n+\n+/* FPGA 5GNR FEC PCI vendor & device IDs */\n+#define FPGA_5GNR_FEC_VENDOR_ID (0x8086)\n+#define FPGA_5GNR_FEC_PF_DEVICE_ID (0x0D8F)\n+#define FPGA_5GNR_FEC_VF_DEVICE_ID (0x0D90)\n+\n+/* Align DMA descriptors to 256 bytes - cache-aligned */\n+#define FPGA_RING_DESC_ENTRY_LENGTH (8)\n+/* Ring size is in 256 bits (32 bytes) units */\n+#define FPGA_RING_DESC_LEN_UNIT_BYTES (32)\n+/* Maximum size of queue */\n+#define FPGA_RING_MAX_SIZE (1024)\n+#define FPGA_FLR_TIMEOUT_UNIT (16.384)\n+\n+#define FPGA_NUM_UL_QUEUES (32)\n+#define FPGA_NUM_DL_QUEUES (32)\n+#define FPGA_TOTAL_NUM_QUEUES (FPGA_NUM_UL_QUEUES + FPGA_NUM_DL_QUEUES)\n+#define FPGA_NUM_INTR_VEC (FPGA_TOTAL_NUM_QUEUES - RTE_INTR_VEC_RXTX_OFFSET)\n+\n+#define FPGA_INVALID_HW_QUEUE_ID (0xFFFFFFFF)\n+\n+#define FPGA_QUEUE_FLUSH_TIMEOUT_US (1000)\n+#define FPGA_HARQ_RDY_TIMEOUT (10)\n+#define FPGA_TIMEOUT_CHECK_INTERVAL (5)\n+#define FPGA_DDR_OVERFLOW (0x10)\n+\n+#define FPGA_5GNR_FEC_DDR_WR_DATA_LEN_IN_BYTES 8\n+#define FPGA_5GNR_FEC_DDR_RD_DATA_LEN_IN_BYTES 8\n+\n+\n+/* FPGA 5GNR FEC Register mapping on BAR0 */\n+enum {\n+\tFPGA_5GNR_FEC_VERSION_ID = 0x00000000, /* len: 4B */\n+\tFPGA_5GNR_FEC_CONFIGURATION = 0x00000004, /* len: 2B */\n+\tFPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE = 0x00000008, /* len: 1B */\n+\tFPGA_5GNR_FEC_LOAD_BALANCE_FACTOR = 0x0000000a, /* len: 2B */\n+\tFPGA_5GNR_FEC_RING_DESC_LEN = 0x0000000c, /* len: 2B */\n+\tFPGA_5GNR_FEC_FLR_TIME_OUT = 0x0000000e, /* len: 2B */\n+\tFPGA_5GNR_FEC_VFQ_FLUSH_STATUS_LW = 0x00000018, /* len: 4B */\n+\tFPGA_5GNR_FEC_VFQ_FLUSH_STATUS_HI = 0x0000001c, /* len: 4B */\n+\tFPGA_5GNR_FEC_QUEUE_MAP = 0x00000040, /* len: 256B */\n+\tFPGA_5GNR_FEC_RING_CTRL_REGS = 0x00000200, /* len: 2048B */\n+\tFPGA_5GNR_FEC_DDR4_WR_ADDR_REGS = 0x00000A00, /* len: 4B */\n+\tFPGA_5GNR_FEC_DDR4_WR_DATA_REGS = 0x00000A08, /* len: 8B */\n+\tFPGA_5GNR_FEC_DDR4_WR_DONE_REGS = 0x00000A10, /* len: 1B */\n+\tFPGA_5GNR_FEC_DDR4_RD_ADDR_REGS = 0x00000A18, /* len: 4B */\n+\tFPGA_5GNR_FEC_DDR4_RD_DONE_REGS = 0x00000A20, /* len: 1B */\n+\tFPGA_5GNR_FEC_DDR4_RD_RDY_REGS = 0x00000A28, /* len: 1B */\n+\tFPGA_5GNR_FEC_DDR4_RD_DATA_REGS = 0x00000A30, /* len: 8B */\n+\tFPGA_5GNR_FEC_DDR4_ADDR_RDY_REGS = 0x00000A38, /* len: 1B */\n+\tFPGA_5GNR_FEC_HARQ_BUF_SIZE_RDY_REGS = 0x00000A40, /* len: 1B */\n+\tFPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS = 0x00000A48  /* len: 4B */\n+};\n+\n+/* FPGA 5GNR FEC Ring Control Registers */\n+enum {\n+\tFPGA_5GNR_FEC_RING_HEAD_ADDR = 0x00000008,\n+\tFPGA_5GNR_FEC_RING_SIZE = 0x00000010,\n+\tFPGA_5GNR_FEC_RING_MISC = 0x00000014,\n+\tFPGA_5GNR_FEC_RING_ENABLE = 0x00000015,\n+\tFPGA_5GNR_FEC_RING_FLUSH_QUEUE_EN = 0x00000016,\n+\tFPGA_5GNR_FEC_RING_SHADOW_TAIL = 0x00000018,\n+\tFPGA_5GNR_FEC_RING_HEAD_POINT = 0x0000001C\n+};\n+\n+/* FPGA 5GNR FEC DESCRIPTOR ERROR */\n+enum {\n+\tDESC_ERR_NO_ERR = 0x0,\n+\tDESC_ERR_K_P_OUT_OF_RANGE = 0x1,\n+\tDESC_ERR_Z_C_NOT_LEGAL = 0x2,\n+\tDESC_ERR_DESC_OFFSET_ERR = 0x3,\n+\tDESC_ERR_DESC_READ_FAIL = 0x8,\n+\tDESC_ERR_DESC_READ_TIMEOUT = 0x9,\n+\tDESC_ERR_DESC_READ_TLP_POISONED = 0xA,\n+\tDESC_ERR_CB_READ_FAIL = 0xC,\n+\tDESC_ERR_CB_READ_TIMEOUT = 0xD,\n+\tDESC_ERR_CB_READ_TLP_POISONED = 0xE,\n+\tDESC_ERR_HBSTORE_ERR = 0xF\n+};\n+\n+\n+/* FPGA 5GNR FEC DMA Encoding Request Descriptor */\n+struct __attribute__((__packed__)) fpga_dma_enc_desc {\n+\tuint32_t done:1,\n+\t\trsrvd0:7,\n+\t\terror:4,\n+\t\trsrvd1:4,\n+\t\tnum_null:10,\n+\t\trsrvd2:6;\n+\tuint32_t ncb:15,\n+\t\trsrvd3:1,\n+\t\tk0:16;\n+\tuint32_t irq_en:1,\n+\t\tcrc_en:1,\n+\t\trsrvd4:1,\n+\t\tqm_idx:3,\n+\t\tbg_idx:1,\n+\t\tzc:9,\n+\t\tdesc_idx:10,\n+\t\trsrvd5:6;\n+\tuint16_t rm_e;\n+\tuint16_t k_;\n+\tuint32_t out_addr_lw;\n+\tuint32_t out_addr_hi;\n+\tuint32_t in_addr_lw;\n+\tuint32_t in_addr_hi;\n+\n+\tunion {\n+\t\tstruct {\n+\t\t\t/* Virtual addresses used to retrieve SW context info */\n+\t\t\tvoid *op_addr;\n+\t\t\t/* Stores information about total number of Code Blocks\n+\t\t\t * in currently processed Transport Block\n+\t\t\t */\n+\t\t\tuint64_t cbs_in_op;\n+\t\t};\n+\n+\t\tuint8_t sw_ctxt[FPGA_RING_DESC_LEN_UNIT_BYTES *\n+\t\t\t\t\t(FPGA_RING_DESC_ENTRY_LENGTH - 1)];\n+\t};\n+};\n+\n+\n+/* FPGA 5GNR DPC FEC DMA Decoding Request Descriptor */\n+struct __attribute__((__packed__)) fpga_dma_dec_desc {\n+\tuint32_t done:1,\n+\t\titer:5,\n+\t\tet_pass:1,\n+\t\tcrcb_pass:1,\n+\t\terror:4,\n+\t\tqm_idx:3,\n+\t\tmax_iter:5,\n+\t\tbg_idx:1,\n+\t\trsrvd0:1,\n+\t\tharqin_en:1,\n+\t\tzc:9;\n+\tuint32_t hbstroe_offset:22,\n+\t\tnum_null:10;\n+\tuint32_t irq_en:1,\n+\t\tncb:15,\n+\t\tdesc_idx:10,\n+\t\tdrop_crc24b:1,\n+\t\tcrc24b_ind:1,\n+\t\trv:2,\n+\t\tet_dis:1,\n+\t\trsrvd2:1;\n+\tuint32_t harq_input_length:16,\n+\t\trm_e:16;/*the inbound data byte length*/\n+\tuint32_t out_addr_lw;\n+\tuint32_t out_addr_hi;\n+\tuint32_t in_addr_lw;\n+\tuint32_t in_addr_hi;\n+\n+\tunion {\n+\t\tstruct {\n+\t\t\t/* Virtual addresses used to retrieve SW context info */\n+\t\t\tvoid *op_addr;\n+\t\t\t/* Stores information about total number of Code Blocks\n+\t\t\t * in currently processed Transport Block\n+\t\t\t */\n+\t\t\tuint8_t cbs_in_op;\n+\t\t};\n+\n+\t\tuint32_t sw_ctxt[8 * (FPGA_RING_DESC_ENTRY_LENGTH - 1)];\n+\t};\n+};\n+\n+/* FPGA 5GNR DMA Descriptor */\n+union fpga_dma_desc {\n+\tstruct fpga_dma_enc_desc enc_req;\n+\tstruct fpga_dma_dec_desc dec_req;\n+};\n+\n+/* FPGA 5GNR FEC Ring Control Register */\n+struct __attribute__((__packed__)) fpga_ring_ctrl_reg {\n+\tuint64_t ring_base_addr;\n+\tuint64_t ring_head_addr;\n+\tuint16_t ring_size:11;\n+\tuint16_t rsrvd0;\n+\tunion { /* Miscellaneous register */\n+\t\tuint8_t misc;\n+\t\tuint8_t max_ul_dec:5,\n+\t\t\tmax_ul_dec_en:1,\n+\t\t\trsrvd1:2;\n+\t};\n+\tuint8_t enable;\n+\tuint8_t flush_queue_en;\n+\tuint8_t rsrvd2;\n+\tuint16_t shadow_tail;\n+\tuint16_t rsrvd3;\n+\tuint16_t head_point;\n+\tuint16_t rsrvd4;\n+\n+};\n+\n+/* Private data structure for each FPGA FEC device */\n+struct fpga_5gnr_fec_device {\n+\t/** Base address of MMIO registers (BAR0) */\n+\tvoid *mmio_base;\n+\t/** Base address of memory for sw rings */\n+\tvoid *sw_rings;\n+\t/** Physical address of sw_rings */\n+\trte_iova_t sw_rings_phys;\n+\t/** Number of bytes available for each queue in device. */\n+\tuint32_t sw_ring_size;\n+\t/** Max number of entries available for each queue in device */\n+\tuint32_t sw_ring_max_depth;\n+\t/** Base address of response tail pointer buffer */\n+\tuint32_t *tail_ptrs;\n+\t/** Physical address of tail pointers */\n+\trte_iova_t tail_ptr_phys;\n+\t/** Queues flush completion flag */\n+\tuint64_t *flush_queue_status;\n+\t/* Bitmap capturing which Queues are bound to the PF/VF */\n+\tuint64_t q_bound_bit_map;\n+\t/* Bitmap capturing which Queues have already been assigned */\n+\tuint64_t q_assigned_bit_map;\n+\t/** True if this is a PF FPGA FEC device */\n+\tbool pf_device;\n+};\n+\n+/* Structure associated with each queue. */\n+struct __rte_cache_aligned fpga_queue {\n+\tstruct fpga_ring_ctrl_reg ring_ctrl_reg;  /* Ring Control Register */\n+\tunion fpga_dma_desc *ring_addr;  /* Virtual address of software ring */\n+\tuint64_t *ring_head_addr;  /* Virtual address of completion_head */\n+\tuint64_t shadow_completion_head; /* Shadow completion head value */\n+\tuint16_t head_free_desc;  /* Ring head */\n+\tuint16_t tail;  /* Ring tail */\n+\t/* Mask used to wrap enqueued descriptors on the sw ring */\n+\tuint32_t sw_ring_wrap_mask;\n+\tuint32_t irq_enable;  /* Enable ops dequeue interrupts if set to 1 */\n+\tuint8_t q_idx;  /* Queue index */\n+\tstruct fpga_5gnr_fec_device *d;\n+\t/* MMIO register of shadow_tail used to enqueue descriptors */\n+\tvoid *shadow_tail_addr;\n+};\n+\n+/* Write to 16 bit MMIO register address */\n+static inline void\n+mmio_write_16(void *addr, uint16_t value)\n+{\n+\t*((volatile uint16_t *)(addr)) = rte_cpu_to_le_16(value);\n+}\n+\n+/* Write to 32 bit MMIO register address */\n+static inline void\n+mmio_write_32(void *addr, uint32_t value)\n+{\n+\t*((volatile uint32_t *)(addr)) = rte_cpu_to_le_32(value);\n+}\n+\n+/* Write to 64 bit MMIO register address */\n+static inline void\n+mmio_write_64(void *addr, uint64_t value)\n+{\n+\t*((volatile uint64_t *)(addr)) = rte_cpu_to_le_64(value);\n+}\n+\n+/* Write a 8 bit register of a FPGA 5GNR FEC device */\n+static inline void\n+fpga_reg_write_8(void *mmio_base, uint32_t offset, uint8_t payload)\n+{\n+\tvoid *reg_addr = RTE_PTR_ADD(mmio_base, offset);\n+\t*((volatile uint8_t *)(reg_addr)) = payload;\n+}\n+\n+/* Write a 16 bit register of a FPGA 5GNR FEC device */\n+static inline void\n+fpga_reg_write_16(void *mmio_base, uint32_t offset, uint16_t payload)\n+{\n+\tvoid *reg_addr = RTE_PTR_ADD(mmio_base, offset);\n+\tmmio_write_16(reg_addr, payload);\n+}\n+\n+/* Write a 32 bit register of a FPGA 5GNR FEC device */\n+static inline void\n+fpga_reg_write_32(void *mmio_base, uint32_t offset, uint32_t payload)\n+{\n+\tvoid *reg_addr = RTE_PTR_ADD(mmio_base, offset);\n+\tmmio_write_32(reg_addr, payload);\n+}\n+\n+/* Write a 64 bit register of a FPGA 5GNR FEC device */\n+static inline void\n+fpga_reg_write_64(void *mmio_base, uint32_t offset, uint64_t payload)\n+{\n+\tvoid *reg_addr = RTE_PTR_ADD(mmio_base, offset);\n+\tmmio_write_64(reg_addr, payload);\n+}\n+\n+/* Write a ring control register of a FPGA 5GNR FEC device */\n+static inline void\n+fpga_ring_reg_write(void *mmio_base, uint32_t offset,\n+\t\tstruct fpga_ring_ctrl_reg payload)\n+{\n+\tfpga_reg_write_64(mmio_base, offset, payload.ring_base_addr);\n+\tfpga_reg_write_64(mmio_base, offset + FPGA_5GNR_FEC_RING_HEAD_ADDR,\n+\t\t\tpayload.ring_head_addr);\n+\tfpga_reg_write_16(mmio_base, offset + FPGA_5GNR_FEC_RING_SIZE,\n+\t\t\tpayload.ring_size);\n+\tfpga_reg_write_16(mmio_base, offset + FPGA_5GNR_FEC_RING_HEAD_POINT,\n+\t\t\tpayload.head_point);\n+\tfpga_reg_write_8(mmio_base, offset + FPGA_5GNR_FEC_RING_FLUSH_QUEUE_EN,\n+\t\t\tpayload.flush_queue_en);\n+\tfpga_reg_write_16(mmio_base, offset + FPGA_5GNR_FEC_RING_SHADOW_TAIL,\n+\t\t\tpayload.shadow_tail);\n+\tfpga_reg_write_8(mmio_base, offset + FPGA_5GNR_FEC_RING_MISC,\n+\t\t\tpayload.misc);\n+\tfpga_reg_write_8(mmio_base, offset + FPGA_5GNR_FEC_RING_ENABLE,\n+\t\t\tpayload.enable);\n+}\n+\n+/* Read a register of FPGA 5GNR FEC device */\n+static uint32_t\n+fpga_reg_read_32(void *mmio_base, uint32_t offset)\n+{\n+\tvoid *reg_addr = RTE_PTR_ADD(mmio_base, offset);\n+\tuint32_t ret = *((volatile uint32_t *)(reg_addr));\n+\treturn rte_le_to_cpu_32(ret);\n+}\n+\n+/* Read a register of FPGA 5GNR FEC device */\n+static uint8_t\n+fpga_reg_read_8(void *mmio_base, uint32_t offset)\n+{\n+\tvoid *reg_addr = RTE_PTR_ADD(mmio_base, offset);\n+\treturn *((volatile uint8_t *)(reg_addr));\n+}\n+\n+/* Read a register of FPGA 5GNR FEC device */\n+static uint64_t\n+fpga_reg_read_64(void *mmio_base, uint32_t offset)\n+{\n+\tvoid *reg_addr = RTE_PTR_ADD(mmio_base, offset);\n+\tuint64_t ret = *((volatile uint64_t *)(reg_addr));\n+\treturn rte_le_to_cpu_64(ret);\n+}\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\n+/* Read a register of FPGA 5GNR FEC device */\n+static uint16_t\n+fpga_reg_read_16(void *mmio_base, uint32_t offset)\n+{\n+\tvoid *reg_addr = RTE_PTR_ADD(mmio_base, offset);\n+\tuint16_t ret = *((volatile uint16_t *)(reg_addr));\n+\treturn rte_le_to_cpu_16(ret);\n+}\n+\n+/* Read Ring Control Register of FPGA 5GNR FEC device */\n+static inline void\n+print_ring_reg_debug_info(void *mmio_base, uint32_t offset)\n+{\n+\trte_bbdev_log_debug(\n+\t\t\"FPGA MMIO base address @ %p | Ring Control Register @ offset = 0x%08\"\n+\t\tPRIx32, mmio_base, offset);\n+\trte_bbdev_log_debug(\n+\t\t\"RING_BASE_ADDR = 0x%016\"PRIx64,\n+\t\tfpga_reg_read_64(mmio_base, offset));\n+\trte_bbdev_log_debug(\n+\t\t\"RING_HEAD_ADDR = 0x%016\"PRIx64,\n+\t\tfpga_reg_read_64(mmio_base, offset +\n+\t\t\t\tFPGA_5GNR_FEC_RING_HEAD_ADDR));\n+\trte_bbdev_log_debug(\n+\t\t\"RING_SIZE = 0x%04\"PRIx16,\n+\t\tfpga_reg_read_16(mmio_base, offset +\n+\t\t\t\tFPGA_5GNR_FEC_RING_SIZE));\n+\trte_bbdev_log_debug(\n+\t\t\"RING_MISC = 0x%02\"PRIx8,\n+\t\tfpga_reg_read_8(mmio_base, offset +\n+\t\t\t\tFPGA_5GNR_FEC_RING_MISC));\n+\trte_bbdev_log_debug(\n+\t\t\"RING_ENABLE = 0x%02\"PRIx8,\n+\t\tfpga_reg_read_8(mmio_base, offset +\n+\t\t\t\tFPGA_5GNR_FEC_RING_ENABLE));\n+\trte_bbdev_log_debug(\n+\t\t\"RING_FLUSH_QUEUE_EN = 0x%02\"PRIx8,\n+\t\tfpga_reg_read_8(mmio_base, offset +\n+\t\t\t\tFPGA_5GNR_FEC_RING_FLUSH_QUEUE_EN));\n+\trte_bbdev_log_debug(\n+\t\t\"RING_SHADOW_TAIL = 0x%04\"PRIx16,\n+\t\tfpga_reg_read_16(mmio_base, offset +\n+\t\t\t\tFPGA_5GNR_FEC_RING_SHADOW_TAIL));\n+\trte_bbdev_log_debug(\n+\t\t\"RING_HEAD_POINT = 0x%04\"PRIx16,\n+\t\tfpga_reg_read_16(mmio_base, offset +\n+\t\t\t\tFPGA_5GNR_FEC_RING_HEAD_POINT));\n+}\n+\n+/* Read Static Register of FPGA 5GNR FEC device */\n+static inline void\n+print_static_reg_debug_info(void *mmio_base)\n+{\n+\tuint16_t config = fpga_reg_read_16(mmio_base,\n+\t\t\tFPGA_5GNR_FEC_CONFIGURATION);\n+\tuint8_t qmap_done = fpga_reg_read_8(mmio_base,\n+\t\t\tFPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE);\n+\tuint16_t lb_factor = fpga_reg_read_16(mmio_base,\n+\t\t\tFPGA_5GNR_FEC_LOAD_BALANCE_FACTOR);\n+\tuint16_t ring_desc_len = fpga_reg_read_16(mmio_base,\n+\t\t\tFPGA_5GNR_FEC_RING_DESC_LEN);\n+\tuint16_t flr_time_out = fpga_reg_read_16(mmio_base,\n+\t\t\tFPGA_5GNR_FEC_FLR_TIME_OUT);\n+\n+\trte_bbdev_log_debug(\"UL.DL Weights = %u.%u\",\n+\t\t\t((uint8_t)config), ((uint8_t)(config >> 8)));\n+\trte_bbdev_log_debug(\"UL.DL Load Balance = %u.%u\",\n+\t\t\t((uint8_t)lb_factor), ((uint8_t)(lb_factor >> 8)));\n+\trte_bbdev_log_debug(\"Queue-PF/VF Mapping Table = %s\",\n+\t\t\t(qmap_done > 0) ? \"READY\" : \"NOT-READY\");\n+\trte_bbdev_log_debug(\"Ring Descriptor Size = %u bytes\",\n+\t\t\tring_desc_len*FPGA_RING_DESC_LEN_UNIT_BYTES);\n+\trte_bbdev_log_debug(\"FLR Timeout = %f usec\",\n+\t\t\t(float)flr_time_out*FPGA_FLR_TIMEOUT_UNIT);\n+}\n+\n+/* Print decode DMA Descriptor of FPGA 5GNR Decoder device */\n+static void\n+print_dma_dec_desc_debug_info(union fpga_dma_desc *desc)\n+{\n+\trte_bbdev_log_debug(\"DMA response desc %p\\n\"\n+\t\t\"\\t-- done(%\"PRIu32\") | iter(%\"PRIu32\") | et_pass(%\"PRIu32\")\"\n+\t\t\" | crcb_pass (%\"PRIu32\") | error(%\"PRIu32\")\\n\"\n+\t\t\"\\t-- qm_idx(%\"PRIu32\") | max_iter(%\"PRIu32\") | \"\n+\t\t\"bg_idx (%\"PRIu32\") | harqin_en(%\"PRIu32\") | zc(%\"PRIu32\")\\n\"\n+\t\t\"\\t-- hbstroe_offset(%\"PRIu32\") | num_null (%\"PRIu32\") \"\n+\t\t\"| irq_en(%\"PRIu32\")\\n\"\n+\t\t\"\\t-- ncb(%\"PRIu32\") | desc_idx (%\"PRIu32\") | \"\n+\t\t\"drop_crc24b(%\"PRIu32\") | RV (%\"PRIu32\")\\n\"\n+\t\t\"\\t-- crc24b_ind(%\"PRIu32\") | et_dis (%\"PRIu32\")\\n\"\n+\t\t\"\\t-- harq_input_length(%\"PRIu32\") | rm_e(%\"PRIu32\")\\n\"\n+\t\t\"\\t-- cbs_in_op(%\"PRIu32\") | in_add (0x%08\"PRIx32\"%08\"PRIx32\")\"\n+\t\t\"| out_add (0x%08\"PRIx32\"%08\"PRIx32\")\",\n+\t\tdesc,\n+\t\t(uint32_t)desc->dec_req.done,\n+\t\t(uint32_t)desc->dec_req.iter,\n+\t\t(uint32_t)desc->dec_req.et_pass,\n+\t\t(uint32_t)desc->dec_req.crcb_pass,\n+\t\t(uint32_t)desc->dec_req.error,\n+\t\t(uint32_t)desc->dec_req.qm_idx,\n+\t\t(uint32_t)desc->dec_req.max_iter,\n+\t\t(uint32_t)desc->dec_req.bg_idx,\n+\t\t(uint32_t)desc->dec_req.harqin_en,\n+\t\t(uint32_t)desc->dec_req.zc,\n+\t\t(uint32_t)desc->dec_req.hbstroe_offset,\n+\t\t(uint32_t)desc->dec_req.num_null,\n+\t\t(uint32_t)desc->dec_req.irq_en,\n+\t\t(uint32_t)desc->dec_req.ncb,\n+\t\t(uint32_t)desc->dec_req.desc_idx,\n+\t\t(uint32_t)desc->dec_req.drop_crc24b,\n+\t\t(uint32_t)desc->dec_req.rv,\n+\t\t(uint32_t)desc->dec_req.crc24b_ind,\n+\t\t(uint32_t)desc->dec_req.et_dis,\n+\t\t(uint32_t)desc->dec_req.harq_input_length,\n+\t\t(uint32_t)desc->dec_req.rm_e,\n+\t\t(uint32_t)desc->dec_req.cbs_in_op,\n+\t\t(uint32_t)desc->dec_req.in_addr_hi,\n+\t\t(uint32_t)desc->dec_req.in_addr_lw,\n+\t\t(uint32_t)desc->dec_req.out_addr_hi,\n+\t\t(uint32_t)desc->dec_req.out_addr_lw);\n+\tuint32_t *word = (uint32_t *) desc;\n+\trte_bbdev_log_debug(\"%08\"PRIx32\"\\n%08\"PRIx32\"\\n%08\"PRIx32\"\\n%08\"PRIx32\"\\n\"\n+\t\t\t\"%08\"PRIx32\"\\n%08\"PRIx32\"\\n%08\"PRIx32\"\\n%08\"PRIx32\"\\n\",\n+\t\t\tword[0], word[1], word[2], word[3],\n+\t\t\tword[4], word[5], word[6], word[7]);\n+}\n+\n+/* Print decode DMA Descriptor of FPGA 5GNR encoder device */\n+static void\n+print_dma_enc_desc_debug_info(union fpga_dma_desc *desc)\n+{\n+\trte_bbdev_log_debug(\"DMA response desc %p\\n\"\n+\t\t\t\"%\"PRIu32\" %\"PRIu32\"\\n\"\n+\t\t\t\"K' %\"PRIu32\" E %\"PRIu32\" desc %\"PRIu32\" Z %\"PRIu32\"\\n\"\n+\t\t\t\"BG %\"PRIu32\" Qm %\"PRIu32\" CRC %\"PRIu32\" IRQ %\"PRIu32\"\\n\"\n+\t\t\t\"k0 %\"PRIu32\" Ncb %\"PRIu32\" F %\"PRIu32\"\\n\",\n+\t\t\tdesc,\n+\t\t\t(uint32_t)desc->enc_req.done,\n+\t\t\t(uint32_t)desc->enc_req.error,\n+\n+\t\t\t(uint32_t)desc->enc_req.k_,\n+\t\t\t(uint32_t)desc->enc_req.rm_e,\n+\t\t\t(uint32_t)desc->enc_req.desc_idx,\n+\t\t\t(uint32_t)desc->enc_req.zc,\n+\n+\t\t\t(uint32_t)desc->enc_req.bg_idx,\n+\t\t\t(uint32_t)desc->enc_req.qm_idx,\n+\t\t\t(uint32_t)desc->enc_req.crc_en,\n+\t\t\t(uint32_t)desc->enc_req.irq_en,\n+\n+\t\t\t(uint32_t)desc->enc_req.k0,\n+\t\t\t(uint32_t)desc->enc_req.ncb,\n+\t\t\t(uint32_t)desc->enc_req.num_null);\n+\tuint32_t *word = (uint32_t *) desc;\n+\trte_bbdev_log_debug(\"%08\"PRIx32\"\\n%08\"PRIx32\"\\n%08\"PRIx32\"\\n%08\"PRIx32\"\\n\"\n+\t\t\t\"%08\"PRIx32\"\\n%08\"PRIx32\"\\n%08\"PRIx32\"\\n%08\"PRIx32\"\\n\",\n+\t\t\tword[0], word[1], word[2], word[3],\n+\t\t\tword[4], word[5], word[6], word[7]);\n+}\n+\n+#endif\n+\n+static int\n+fpga_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)\n+{\n+\t/* Number of queues bound to a PF/VF */\n+\tuint32_t hw_q_num = 0;\n+\tuint32_t ring_size, payload, address, q_id, offset;\n+\trte_iova_t phys_addr;\n+\tstruct fpga_ring_ctrl_reg ring_reg;\n+\tstruct fpga_5gnr_fec_device *fpga_dev = dev->data->dev_private;\n+\n+\taddress = FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE;\n+\tif (!(fpga_reg_read_32(fpga_dev->mmio_base, address) & 0x1)) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Queue-PF/VF mapping is not set! Was PF configured for device (%s) ?\",\n+\t\t\t\tdev->data->name);\n+\t\treturn -EPERM;\n+\t}\n+\n+\t/* Clear queue registers structure */\n+\tmemset(&ring_reg, 0, sizeof(struct fpga_ring_ctrl_reg));\n+\n+\t/* Scan queue map.\n+\t * If a queue is valid and mapped to a calling PF/VF the read value is\n+\t * replaced with a queue ID and if it's not then\n+\t * FPGA_INVALID_HW_QUEUE_ID is returned.\n+\t */\n+\tfor (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {\n+\t\tuint32_t hw_q_id = fpga_reg_read_32(fpga_dev->mmio_base,\n+\t\t\t\tFPGA_5GNR_FEC_QUEUE_MAP + (q_id << 2));\n+\n+\t\trte_bbdev_log_debug(\"%s: queue ID: %u, registry queue ID: %u\",\n+\t\t\t\tdev->device->name, q_id, hw_q_id);\n+\n+\t\tif (hw_q_id != FPGA_INVALID_HW_QUEUE_ID) {\n+\t\t\tfpga_dev->q_bound_bit_map |= (1ULL << q_id);\n+\t\t\t/* Clear queue register of found queue */\n+\t\t\toffset = FPGA_5GNR_FEC_RING_CTRL_REGS +\n+\t\t\t\t(sizeof(struct fpga_ring_ctrl_reg) * q_id);\n+\t\t\tfpga_ring_reg_write(fpga_dev->mmio_base,\n+\t\t\t\t\toffset, ring_reg);\n+\t\t\t++hw_q_num;\n+\t\t}\n+\t}\n+\tif (hw_q_num == 0) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\"No HW queues assigned to this device. Probably this is a VF configured for PF mode. Check device configuration!\");\n+\t\treturn -ENODEV;\n+\t}\n+\n+\tif (num_queues > hw_q_num) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\"Not enough queues for device %s! Requested: %u, available: %u\",\n+\t\t\tdev->device->name, num_queues, hw_q_num);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tring_size = FPGA_RING_MAX_SIZE * sizeof(struct fpga_dma_dec_desc);\n+\n+\t/* Enforce 32 byte alignment */\n+\tRTE_BUILD_BUG_ON((RTE_CACHE_LINE_SIZE % 32) != 0);\n+\n+\t/* Allocate memory for SW descriptor rings */\n+\tfpga_dev->sw_rings = rte_zmalloc_socket(dev->device->driver->name,\n+\t\t\tnum_queues * ring_size, RTE_CACHE_LINE_SIZE,\n+\t\t\tsocket_id);\n+\tif (fpga_dev->sw_rings == NULL) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Failed to allocate memory for %s:%u sw_rings\",\n+\t\t\t\tdev->device->driver->name, dev->data->dev_id);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tfpga_dev->sw_rings_phys = rte_malloc_virt2iova(fpga_dev->sw_rings);\n+\tfpga_dev->sw_ring_size = ring_size;\n+\tfpga_dev->sw_ring_max_depth = FPGA_RING_MAX_SIZE;\n+\n+\t/* Allocate memory for ring flush status */\n+\tfpga_dev->flush_queue_status = rte_zmalloc_socket(NULL,\n+\t\t\tsizeof(uint64_t), RTE_CACHE_LINE_SIZE, socket_id);\n+\tif (fpga_dev->flush_queue_status == NULL) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Failed to allocate memory for %s:%u flush_queue_status\",\n+\t\t\t\tdev->device->driver->name, dev->data->dev_id);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Set the flush status address registers */\n+\tphys_addr = rte_malloc_virt2iova(fpga_dev->flush_queue_status);\n+\n+\taddress = FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_LW;\n+\tpayload = (uint32_t)(phys_addr);\n+\tfpga_reg_write_32(fpga_dev->mmio_base, address, payload);\n+\n+\taddress = FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_HI;\n+\tpayload = (uint32_t)(phys_addr >> 32);\n+\tfpga_reg_write_32(fpga_dev->mmio_base, address, payload);\n+\n+\treturn 0;\n+}\n+\n+static int\n+fpga_dev_close(struct rte_bbdev *dev)\n+{\n+\tstruct fpga_5gnr_fec_device *fpga_dev = dev->data->dev_private;\n+\n+\trte_free(fpga_dev->sw_rings);\n+\trte_free(fpga_dev->flush_queue_status);\n+\n+\treturn 0;\n+}\n+\n+static void\n+fpga_dev_info_get(struct rte_bbdev *dev,\n+\t\tstruct rte_bbdev_driver_info *dev_info)\n+{\n+\tstruct fpga_5gnr_fec_device *d = dev->data->dev_private;\n+\tuint32_t q_id = 0;\n+\n+\tstatic const struct rte_bbdev_op_cap bbdev_capabilities[] = {\n+\t\t{\n+\t\t\t.type   = RTE_BBDEV_OP_LDPC_ENC,\n+\t\t\t.cap.ldpc_enc = {\n+\t\t\t\t.capability_flags =\n+\t\t\t\t\t\tRTE_BBDEV_LDPC_RATE_MATCH |\n+\t\t\t\t\t\tRTE_BBDEV_LDPC_ENC_INTERRUPTS |\n+\t\t\t\t\t\tRTE_BBDEV_LDPC_CRC_24B_ATTACH,\n+\t\t\t\t.num_buffers_src =\n+\t\t\t\t\t\tRTE_BBDEV_LDPC_MAX_CODE_BLOCKS,\n+\t\t\t\t.num_buffers_dst =\n+\t\t\t\t\t\tRTE_BBDEV_LDPC_MAX_CODE_BLOCKS,\n+\t\t\t}\n+\t\t},\n+\t\t{\n+\t\t.type   = RTE_BBDEV_OP_LDPC_DEC,\n+\t\t.cap.ldpc_dec = {\n+\t\t\t.capability_flags =\n+\t\t\t\tRTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK |\n+\t\t\t\tRTE_BBDEV_LDPC_CRC_TYPE_24B_DROP |\n+\t\t\t\tRTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE |\n+\t\t\t\tRTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE |\n+\t\t\t\tRTE_BBDEV_LDPC_ITERATION_STOP_ENABLE |\n+\t\t\t\tRTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE |\n+\t\t\t\tRTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE |\n+\t\t\t\tRTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK |\n+\t\t\t\tRTE_BBDEV_LDPC_DEC_INTERRUPTS |\n+\t\t\t\tRTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_FILLERS,\n+\t\t\t.llr_size = 6,\n+\t\t\t.llr_decimals = 2,\n+\t\t\t.num_buffers_src =\n+\t\t\t\t\tRTE_BBDEV_LDPC_MAX_CODE_BLOCKS,\n+\t\t\t.num_buffers_hard_out =\n+\t\t\t\t\tRTE_BBDEV_LDPC_MAX_CODE_BLOCKS,\n+\t\t\t.num_buffers_soft_out = 0,\n+\t\t}\n+\t\t},\n+\t\tRTE_BBDEV_END_OF_CAPABILITIES_LIST()\n+\t};\n+\n+\t/* Check the HARQ DDR size available */\n+\tuint8_t timeout_counter = 0;\n+\tuint32_t harq_buf_ready = fpga_reg_read_32(d->mmio_base,\n+\t\t\tFPGA_5GNR_FEC_HARQ_BUF_SIZE_RDY_REGS);\n+\twhile (harq_buf_ready != 1) {\n+\t\tusleep(FPGA_TIMEOUT_CHECK_INTERVAL);\n+\t\ttimeout_counter++;\n+\t\tharq_buf_ready = fpga_reg_read_32(d->mmio_base,\n+\t\t\t\tFPGA_5GNR_FEC_HARQ_BUF_SIZE_RDY_REGS);\n+\t\tif (timeout_counter > FPGA_HARQ_RDY_TIMEOUT) {\n+\t\t\trte_bbdev_log(ERR, \"HARQ Buffer not ready %d\",\n+\t\t\t\t\tharq_buf_ready);\n+\t\t\tharq_buf_ready = 1;\n+\t\t}\n+\t}\n+\tuint32_t harq_buf_size = fpga_reg_read_32(d->mmio_base,\n+\t\t\tFPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS);\n+\n+\tstatic struct rte_bbdev_queue_conf default_queue_conf;\n+\tdefault_queue_conf.socket = dev->data->socket_id;\n+\tdefault_queue_conf.queue_size = FPGA_RING_MAX_SIZE;\n+\n+\tdev_info->driver_name = dev->device->driver->name;\n+\tdev_info->queue_size_lim = FPGA_RING_MAX_SIZE;\n+\tdev_info->hardware_accelerated = true;\n+\tdev_info->min_alignment = 64;\n+\tdev_info->harq_buffer_size = (harq_buf_size >> 10) + 1;\n+\tdev_info->default_queue_conf = default_queue_conf;\n+\tdev_info->capabilities = bbdev_capabilities;\n+\tdev_info->cpu_flag_reqs = NULL;\n+\n+\t/* Calculates number of queues assigned to device */\n+\tdev_info->max_num_queues = 0;\n+\tfor (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {\n+\t\tuint32_t hw_q_id = fpga_reg_read_32(d->mmio_base,\n+\t\t\t\tFPGA_5GNR_FEC_QUEUE_MAP + (q_id << 2));\n+\t\tif (hw_q_id != FPGA_INVALID_HW_QUEUE_ID)\n+\t\t\tdev_info->max_num_queues++;\n+\t}\n+}\n+\n+/**\n+ * Find index of queue bound to current PF/VF which is unassigned. Return -1\n+ * when there is no available queue\n+ */\n+static int\n+fpga_find_free_queue_idx(struct rte_bbdev *dev,\n+\t\tconst struct rte_bbdev_queue_conf *conf)\n+{\n+\tstruct fpga_5gnr_fec_device *d = dev->data->dev_private;\n+\tuint64_t q_idx;\n+\tuint8_t i = 0;\n+\tuint8_t range = FPGA_TOTAL_NUM_QUEUES >> 1;\n+\n+\tif (conf->op_type == RTE_BBDEV_OP_LDPC_ENC) {\n+\t\ti = FPGA_NUM_DL_QUEUES;\n+\t\trange = FPGA_TOTAL_NUM_QUEUES;\n+\t}\n+\n+\tfor (; i < range; ++i) {\n+\t\tq_idx = 1ULL << i;\n+\t\t/* Check if index of queue is bound to current PF/VF */\n+\t\tif (d->q_bound_bit_map & q_idx)\n+\t\t\t/* Check if found queue was not already assigned */\n+\t\t\tif (!(d->q_assigned_bit_map & q_idx)) {\n+\t\t\t\td->q_assigned_bit_map |= q_idx;\n+\t\t\t\treturn i;\n+\t\t\t}\n+\t}\n+\n+\trte_bbdev_log(INFO, \"Failed to find free queue on %s\", dev->data->name);\n+\n+\treturn -1;\n+}\n+\n+static int\n+fpga_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,\n+\t\tconst struct rte_bbdev_queue_conf *conf)\n+{\n+\tuint32_t address, ring_offset;\n+\tstruct fpga_5gnr_fec_device *d = dev->data->dev_private;\n+\tstruct fpga_queue *q;\n+\tint8_t q_idx;\n+\n+\t/* Check if there is a free queue to assign */\n+\tq_idx = fpga_find_free_queue_idx(dev, conf);\n+\tif (q_idx == -1)\n+\t\treturn -1;\n+\n+\t/* Allocate the queue data structure. */\n+\tq = rte_zmalloc_socket(dev->device->driver->name, sizeof(*q),\n+\t\t\tRTE_CACHE_LINE_SIZE, conf->socket);\n+\tif (q == NULL) {\n+\t\t/* Mark queue as un-assigned */\n+\t\td->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));\n+\t\trte_bbdev_log(ERR, \"Failed to allocate queue memory\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tq->d = d;\n+\tq->q_idx = q_idx;\n+\n+\t/* Set ring_base_addr */\n+\tq->ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id));\n+\tq->ring_ctrl_reg.ring_base_addr = d->sw_rings_phys +\n+\t\t\t(d->sw_ring_size * queue_id);\n+\n+\t/* Allocate memory for Completion Head variable*/\n+\tq->ring_head_addr = rte_zmalloc_socket(dev->device->driver->name,\n+\t\t\tsizeof(uint64_t), RTE_CACHE_LINE_SIZE, conf->socket);\n+\tif (q->ring_head_addr == NULL) {\n+\t\t/* Mark queue as un-assigned */\n+\t\td->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));\n+\t\trte_free(q);\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Failed to allocate memory for %s:%u completion_head\",\n+\t\t\t\tdev->device->driver->name, dev->data->dev_id);\n+\t\treturn -ENOMEM;\n+\t}\n+\t/* Set ring_head_addr */\n+\tq->ring_ctrl_reg.ring_head_addr =\n+\t\t\trte_malloc_virt2iova(q->ring_head_addr);\n+\n+\t/* Clear shadow_completion_head */\n+\tq->shadow_completion_head = 0;\n+\n+\t/* Set ring_size */\n+\tif (conf->queue_size > FPGA_RING_MAX_SIZE) {\n+\t\t/* Mark queue as un-assigned */\n+\t\td->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));\n+\t\trte_free(q->ring_head_addr);\n+\t\trte_free(q);\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Size of queue is too big %d (MAX: %d ) for %s:%u\",\n+\t\t\t\tconf->queue_size, FPGA_RING_MAX_SIZE,\n+\t\t\t\tdev->device->driver->name, dev->data->dev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\tq->ring_ctrl_reg.ring_size = conf->queue_size;\n+\n+\t/* Set Miscellaneous FPGA register*/\n+\t/* Max iteration number for TTI mitigation - todo */\n+\tq->ring_ctrl_reg.max_ul_dec = 0;\n+\t/* Enable max iteration number for TTI - todo */\n+\tq->ring_ctrl_reg.max_ul_dec_en = 0;\n+\n+\t/* Enable the ring */\n+\tq->ring_ctrl_reg.enable = 1;\n+\n+\t/* Set FPGA head_point and tail registers */\n+\tq->ring_ctrl_reg.head_point = q->tail = 0;\n+\n+\t/* Set FPGA shadow_tail register */\n+\tq->ring_ctrl_reg.shadow_tail = q->tail;\n+\n+\t/* Calculates the ring offset for found queue */\n+\tring_offset = FPGA_5GNR_FEC_RING_CTRL_REGS +\n+\t\t\t(sizeof(struct fpga_ring_ctrl_reg) * q_idx);\n+\n+\t/* Set FPGA Ring Control Registers */\n+\tfpga_ring_reg_write(d->mmio_base, ring_offset, q->ring_ctrl_reg);\n+\n+\t/* Store MMIO register of shadow_tail */\n+\taddress = ring_offset + FPGA_5GNR_FEC_RING_SHADOW_TAIL;\n+\tq->shadow_tail_addr = RTE_PTR_ADD(d->mmio_base, address);\n+\n+\tq->head_free_desc = q->tail;\n+\n+\t/* Set wrap mask */\n+\tq->sw_ring_wrap_mask = conf->queue_size - 1;\n+\n+\trte_bbdev_log_debug(\"Setup dev%u q%u: queue_idx=%u\",\n+\t\t\tdev->data->dev_id, queue_id, q->q_idx);\n+\n+\tdev->data->queues[queue_id].queue_private = q;\n+\n+\trte_bbdev_log_debug(\"BBDEV queue[%d] set up for FPGA queue[%d]\",\n+\t\t\tqueue_id, q_idx);\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\t/* Read FPGA Ring Control Registers after configuration*/\n+\tprint_ring_reg_debug_info(d->mmio_base, ring_offset);\n+#endif\n+\treturn 0;\n+}\n+\n+static int\n+fpga_queue_release(struct rte_bbdev *dev, uint16_t queue_id)\n+{\n+\tstruct fpga_5gnr_fec_device *d = dev->data->dev_private;\n+\tstruct fpga_queue *q = dev->data->queues[queue_id].queue_private;\n+\tstruct fpga_ring_ctrl_reg ring_reg;\n+\tuint32_t offset;\n+\n+\trte_bbdev_log_debug(\"FPGA Queue[%d] released\", queue_id);\n+\n+\tif (q != NULL) {\n+\t\tmemset(&ring_reg, 0, sizeof(struct fpga_ring_ctrl_reg));\n+\t\toffset = FPGA_5GNR_FEC_RING_CTRL_REGS +\n+\t\t\t(sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);\n+\t\t/* Disable queue */\n+\t\tfpga_reg_write_8(d->mmio_base,\n+\t\t\t\toffset + FPGA_5GNR_FEC_RING_ENABLE, 0x00);\n+\t\t/* Clear queue registers */\n+\t\tfpga_ring_reg_write(d->mmio_base, offset, ring_reg);\n+\n+\t\t/* Mark the Queue as un-assigned */\n+\t\td->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q->q_idx));\n+\t\trte_free(q->ring_head_addr);\n+\t\trte_free(q);\n+\t\tdev->data->queues[queue_id].queue_private = NULL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/* Function starts a device queue. */\n+static int\n+fpga_queue_start(struct rte_bbdev *dev, uint16_t queue_id)\n+{\n+\tstruct fpga_5gnr_fec_device *d = dev->data->dev_private;\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\tif (d == NULL) {\n+\t\trte_bbdev_log(ERR, \"Invalid device pointer\");\n+\t\treturn -1;\n+\t}\n+#endif\n+\tstruct fpga_queue *q = dev->data->queues[queue_id].queue_private;\n+\tuint32_t offset = FPGA_5GNR_FEC_RING_CTRL_REGS +\n+\t\t\t(sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);\n+\tuint8_t enable = 0x01;\n+\tuint16_t zero = 0x0000;\n+\n+\t/* Clear queue head and tail variables */\n+\tq->tail = q->head_free_desc = 0;\n+\n+\t/* Clear FPGA head_point and tail registers */\n+\tfpga_reg_write_16(d->mmio_base, offset + FPGA_5GNR_FEC_RING_HEAD_POINT,\n+\t\t\tzero);\n+\tfpga_reg_write_16(d->mmio_base, offset + FPGA_5GNR_FEC_RING_SHADOW_TAIL,\n+\t\t\tzero);\n+\n+\t/* Enable queue */\n+\tfpga_reg_write_8(d->mmio_base, offset + FPGA_5GNR_FEC_RING_ENABLE,\n+\t\t\tenable);\n+\n+\trte_bbdev_log_debug(\"FPGA Queue[%d] started\", queue_id);\n+\treturn 0;\n+}\n+\n+/* Function stops a device queue. */\n+static int\n+fpga_queue_stop(struct rte_bbdev *dev, uint16_t queue_id)\n+{\n+\tstruct fpga_5gnr_fec_device *d = dev->data->dev_private;\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\tif (d == NULL) {\n+\t\trte_bbdev_log(ERR, \"Invalid device pointer\");\n+\t\treturn -1;\n+\t}\n+#endif\n+\tstruct fpga_queue *q = dev->data->queues[queue_id].queue_private;\n+\tuint32_t offset = FPGA_5GNR_FEC_RING_CTRL_REGS +\n+\t\t\t(sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);\n+\tuint8_t payload = 0x01;\n+\tuint8_t counter = 0;\n+\tuint8_t timeout = FPGA_QUEUE_FLUSH_TIMEOUT_US /\n+\t\t\tFPGA_TIMEOUT_CHECK_INTERVAL;\n+\n+\t/* Set flush_queue_en bit to trigger queue flushing */\n+\tfpga_reg_write_8(d->mmio_base,\n+\t\t\toffset + FPGA_5GNR_FEC_RING_FLUSH_QUEUE_EN, payload);\n+\n+\t/** Check if queue flush is completed.\n+\t * FPGA will update the completion flag after queue flushing is\n+\t * completed. If completion flag is not updated within 1ms it is\n+\t * considered as a failure.\n+\t */\n+\twhile (!(*((volatile uint8_t *)d->flush_queue_status + q->q_idx)\n+\t\t\t& payload)) {\n+\t\tif (counter > timeout) {\n+\t\t\trte_bbdev_log(ERR, \"FPGA Queue Flush failed for queue %d\",\n+\t\t\t\t\tqueue_id);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tusleep(FPGA_TIMEOUT_CHECK_INTERVAL);\n+\t\tcounter++;\n+\t}\n+\n+\t/* Disable queue */\n+\tpayload = 0x00;\n+\tfpga_reg_write_8(d->mmio_base, offset + FPGA_5GNR_FEC_RING_ENABLE,\n+\t\t\tpayload);\n+\n+\trte_bbdev_log_debug(\"FPGA Queue[%d] stopped\", queue_id);\n+\treturn 0;\n+}\n+\n+static inline uint16_t\n+get_queue_id(struct rte_bbdev_data *data, uint8_t q_idx)\n+{\n+\tuint16_t queue_id;\n+\n+\tfor (queue_id = 0; queue_id < data->num_queues; ++queue_id) {\n+\t\tstruct fpga_queue *q = data->queues[queue_id].queue_private;\n+\t\tif (q != NULL && q->q_idx == q_idx)\n+\t\t\treturn queue_id;\n+\t}\n+\n+\treturn -1;\n+}\n+\n+/* Interrupt handler triggered by FPGA dev for handling specific interrupt */\n+static void\n+fpga_dev_interrupt_handler(void *cb_arg)\n+{\n+\tstruct rte_bbdev *dev = cb_arg;\n+\tstruct fpga_5gnr_fec_device *fpga_dev = dev->data->dev_private;\n+\tstruct fpga_queue *q;\n+\tuint64_t ring_head;\n+\tuint64_t q_idx;\n+\tuint16_t queue_id;\n+\tuint8_t i;\n+\n+\t/* Scan queue assigned to this device */\n+\tfor (i = 0; i < FPGA_TOTAL_NUM_QUEUES; ++i) {\n+\t\tq_idx = 1ULL << i;\n+\t\tif (fpga_dev->q_bound_bit_map & q_idx) {\n+\t\t\tqueue_id = get_queue_id(dev->data, i);\n+\t\t\tif (queue_id == (uint16_t) -1)\n+\t\t\t\tcontinue;\n+\n+\t\t\t/* Check if completion head was changed */\n+\t\t\tq = dev->data->queues[queue_id].queue_private;\n+\t\t\tring_head = *q->ring_head_addr;\n+\t\t\tif (q->shadow_completion_head != ring_head &&\n+\t\t\t\tq->irq_enable == 1) {\n+\t\t\t\tq->shadow_completion_head = ring_head;\n+\t\t\t\trte_bbdev_pmd_callback_process(\n+\t\t\t\t\t\tdev,\n+\t\t\t\t\t\tRTE_BBDEV_EVENT_DEQUEUE,\n+\t\t\t\t\t\t&queue_id);\n+\t\t\t}\n+\t\t}\n+\t}\n+}\n+\n+static int\n+fpga_queue_intr_enable(struct rte_bbdev *dev, uint16_t queue_id)\n+{\n+\tstruct fpga_queue *q = dev->data->queues[queue_id].queue_private;\n+\n+\tif (!rte_intr_cap_multiple(dev->intr_handle))\n+\t\treturn -ENOTSUP;\n+\n+\tq->irq_enable = 1;\n+\n+\treturn 0;\n+}\n+\n+static int\n+fpga_queue_intr_disable(struct rte_bbdev *dev, uint16_t queue_id)\n+{\n+\tstruct fpga_queue *q = dev->data->queues[queue_id].queue_private;\n+\tq->irq_enable = 0;\n+\n+\treturn 0;\n+}\n+\n+static int\n+fpga_intr_enable(struct rte_bbdev *dev)\n+{\n+\tint ret;\n+\tuint8_t i;\n+\n+\tif (!rte_intr_cap_multiple(dev->intr_handle)) {\n+\t\trte_bbdev_log(ERR, \"Multiple intr vector is not supported by FPGA (%s)\",\n+\t\t\t\tdev->data->name);\n+\t\treturn -ENOTSUP;\n+\t}\n+\n+\t/* Create event file descriptors for each of 64 queue. Event fds will be\n+\t * mapped to FPGA IRQs in rte_intr_enable(). This is a 1:1 mapping where\n+\t * the IRQ number is a direct translation to the queue number.\n+\t *\n+\t * 63 (FPGA_NUM_INTR_VEC) event fds are created as rte_intr_enable()\n+\t * mapped the first IRQ to already created interrupt event file\n+\t * descriptor (intr_handle->fd).\n+\t */\n+\tif (rte_intr_efd_enable(dev->intr_handle, FPGA_NUM_INTR_VEC)) {\n+\t\trte_bbdev_log(ERR, \"Failed to create fds for %u queues\",\n+\t\t\t\tdev->data->num_queues);\n+\t\treturn -1;\n+\t}\n+\n+\t/* TODO Each event file descriptor is overwritten by interrupt event\n+\t * file descriptor. That descriptor is added to epoll observed list.\n+\t * It ensures that callback function assigned to that descriptor will\n+\t * invoked when any FPGA queue issues interrupt.\n+\t */\n+\tfor (i = 0; i < FPGA_NUM_INTR_VEC; ++i)\n+\t\tdev->intr_handle->efds[i] = dev->intr_handle->fd;\n+\n+\tif (!dev->intr_handle->intr_vec) {\n+\t\tdev->intr_handle->intr_vec = rte_zmalloc(\"intr_vec\",\n+\t\t\t\tdev->data->num_queues * sizeof(int), 0);\n+\t\tif (!dev->intr_handle->intr_vec) {\n+\t\t\trte_bbdev_log(ERR, \"Failed to allocate %u vectors\",\n+\t\t\t\t\tdev->data->num_queues);\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\t}\n+\n+\tret = rte_intr_enable(dev->intr_handle);\n+\tif (ret < 0) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Couldn't enable interrupts for device: %s\",\n+\t\t\t\tdev->data->name);\n+\t\treturn ret;\n+\t}\n+\n+\tret = rte_intr_callback_register(dev->intr_handle,\n+\t\t\tfpga_dev_interrupt_handler, dev);\n+\tif (ret < 0) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Couldn't register interrupt callback for device: %s\",\n+\t\t\t\tdev->data->name);\n+\t\treturn ret;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static const struct rte_bbdev_ops fpga_ops = {\n+\t.setup_queues = fpga_setup_queues,\n+\t.intr_enable = fpga_intr_enable,\n+\t.close = fpga_dev_close,\n+\t.info_get = fpga_dev_info_get,\n+\t.queue_setup = fpga_queue_setup,\n+\t.queue_stop = fpga_queue_stop,\n+\t.queue_start = fpga_queue_start,\n+\t.queue_release = fpga_queue_release,\n+\t.queue_intr_enable = fpga_queue_intr_enable,\n+\t.queue_intr_disable = fpga_queue_intr_disable\n+};\n+\n+static inline void\n+fpga_dma_enqueue(struct fpga_queue *q, uint16_t num_desc,\n+\t\tstruct rte_bbdev_stats *queue_stats)\n+{\n+#ifdef RTE_BBDEV_OFFLOAD_COST\n+\tuint64_t start_time = 0;\n+\tqueue_stats->acc_offload_cycles = 0;\n+#else\n+\tRTE_SET_USED(queue_stats);\n+#endif\n+\n+\t/* Update tail and shadow_tail register */\n+\tq->tail = (q->tail + num_desc) & q->sw_ring_wrap_mask;\n+\n+\trte_wmb();\n+\n+#ifdef RTE_BBDEV_OFFLOAD_COST\n+\t/* Start time measurement for enqueue function offload. */\n+\tstart_time = rte_rdtsc_precise();\n+#endif\n+\tmmio_write_16(q->shadow_tail_addr, q->tail);\n+\n+#ifdef RTE_BBDEV_OFFLOAD_COST\n+\trte_wmb();\n+\tqueue_stats->acc_offload_cycles += rte_rdtsc_precise() - start_time;\n+#endif\n+}\n+\n+/* Read flag value 0/1/ from bitmap */\n+static inline bool\n+check_bit(uint32_t bitmap, uint32_t bitmask)\n+{\n+\treturn bitmap & bitmask;\n+}\n+\n+/* Print an error if a descriptor error has occurred.\n+ *  Return 0 on success, 1 on failure\n+ */\n+static inline int\n+check_desc_error(uint32_t error_code) {\n+\tswitch (error_code) {\n+\tcase DESC_ERR_NO_ERR:\n+\t\treturn 0;\n+\tcase DESC_ERR_K_P_OUT_OF_RANGE:\n+\t\trte_bbdev_log(ERR, \"Encode block size K' is out of range\");\n+\t\tbreak;\n+\tcase DESC_ERR_Z_C_NOT_LEGAL:\n+\t\trte_bbdev_log(ERR, \"Zc is illegal\");\n+\t\tbreak;\n+\tcase DESC_ERR_DESC_OFFSET_ERR:\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Queue offset does not meet the expectation in the FPGA\"\n+\t\t\t\t);\n+\t\tbreak;\n+\tcase DESC_ERR_DESC_READ_FAIL:\n+\t\trte_bbdev_log(ERR, \"Unsuccessful completion for descriptor read\");\n+\t\tbreak;\n+\tcase DESC_ERR_DESC_READ_TIMEOUT:\n+\t\trte_bbdev_log(ERR, \"Descriptor read time-out\");\n+\t\tbreak;\n+\tcase DESC_ERR_DESC_READ_TLP_POISONED:\n+\t\trte_bbdev_log(ERR, \"Descriptor read TLP poisoned\");\n+\t\tbreak;\n+\tcase DESC_ERR_CB_READ_FAIL:\n+\t\trte_bbdev_log(ERR, \"Unsuccessful completion for code block\");\n+\t\tbreak;\n+\tcase DESC_ERR_CB_READ_TIMEOUT:\n+\t\trte_bbdev_log(ERR, \"Code block read time-out\");\n+\t\tbreak;\n+\tcase DESC_ERR_CB_READ_TLP_POISONED:\n+\t\trte_bbdev_log(ERR, \"Code block read TLP poisoned\");\n+\t\tbreak;\n+\tcase DESC_ERR_HBSTORE_ERR:\n+\t\trte_bbdev_log(ERR, \"Hbstroe exceeds HARQ buffer size.\");\n+\t\tbreak;\n+\tdefault:\n+\t\trte_bbdev_log(ERR, \"Descriptor error unknown error code %u\",\n+\t\t\t\terror_code);\n+\t\tbreak;\n+\t}\n+\treturn 1;\n+}\n+\n+/* Compute value of k0.\n+ * Based on 3GPP 38.212 Table 5.4.2.1-2\n+ * Starting position of different redundancy versions, k0\n+ */\n+static inline uint16_t\n+get_k0(uint16_t n_cb, uint16_t z_c, uint8_t basegraph, uint8_t rv_index)\n+{\n+\tif (rv_index == 0)\n+\t\treturn 0;\n+\tuint16_t n = (basegraph == 1 ? 66 : 50) * z_c;\n+\tif (n_cb == n) {\n+\t\tif (rv_index == 1)\n+\t\t\treturn (basegraph == 1 ? 17 : 13) * z_c;\n+\t\telse if (rv_index == 2)\n+\t\t\treturn (basegraph == 1 ? 33 : 25) * z_c;\n+\t\telse\n+\t\t\treturn (basegraph == 1 ? 56 : 43) * z_c;\n+\t} else {\n+\t\t/* LBRM case - includes a division by N */\n+\t\tif (rv_index == 1)\n+\t\t\treturn (((basegraph == 1 ? 17 : 13) * n_cb)\n+\t\t\t\t\t/ n) * z_c;\n+\t\telse if (rv_index == 2)\n+\t\t\treturn (((basegraph == 1 ? 33 : 25) * n_cb)\n+\t\t\t\t\t/ n) * z_c;\n+\t\telse\n+\t\t\treturn (((basegraph == 1 ? 56 : 43) * n_cb)\n+\t\t\t\t\t/ n) * z_c;\n+\t}\n+}\n+\n+\n+/**\n+ * Set DMA descriptor for encode operation (1 Code Block)\n+ *\n+ * @param op\n+ *   Pointer to a single encode operation.\n+ * @param desc\n+ *   Pointer to DMA descriptor.\n+ * @param input\n+ *   Pointer to pointer to input data which will be decoded.\n+ * @param e\n+ *   E value (length of output in bits).\n+ * @param ncb\n+ *   Ncb value (size of the soft buffer).\n+ * @param out_length\n+ *   Length of output buffer\n+ * @param in_offset\n+ *   Input offset in rte_mbuf structure. It is used for calculating the point\n+ *   where data is starting.\n+ * @param out_offset\n+ *   Output offset in rte_mbuf structure. It is used for calculating the point\n+ *   where hard output data will be stored.\n+ * @param cbs_in_op\n+ *   Number of CBs contained in one operation.\n+ */\n+static inline int\n+fpga_dma_desc_te_fill(struct rte_bbdev_enc_op *op,\n+\t\tstruct fpga_dma_enc_desc *desc, struct rte_mbuf *input,\n+\t\tstruct rte_mbuf *output, uint16_t k_,  uint16_t e,\n+\t\tuint32_t in_offset, uint32_t out_offset, uint16_t desc_offset,\n+\t\tuint8_t cbs_in_op)\n+{\n+\t/* reset */\n+\tdesc->done = 0;\n+\tdesc->error = 0;\n+\tdesc->k_ = k_;\n+\tdesc->rm_e = e;\n+\tdesc->desc_idx = desc_offset;\n+\tdesc->zc = op->ldpc_enc.z_c;\n+\tdesc->bg_idx = op->ldpc_enc.basegraph - 1;\n+\tdesc->qm_idx = op->ldpc_enc.q_m / 2;\n+\tdesc->crc_en = check_bit(op->ldpc_enc.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_CRC_24B_ATTACH);\n+\tdesc->irq_en = 0;\n+\tdesc->k0 = get_k0(op->ldpc_enc.n_cb, op->ldpc_enc.z_c,\n+\t\t\top->ldpc_enc.basegraph, op->ldpc_enc.rv_index);\n+\tdesc->ncb = op->ldpc_enc.n_cb;\n+\tdesc->num_null = op->ldpc_enc.n_filler;\n+\t/* Set inbound data buffer address */\n+\tdesc->in_addr_hi = (uint32_t)(\n+\t\t\trte_pktmbuf_mtophys_offset(input, in_offset) >> 32);\n+\tdesc->in_addr_lw = (uint32_t)(\n+\t\t\trte_pktmbuf_mtophys_offset(input, in_offset));\n+\n+\tdesc->out_addr_hi = (uint32_t)(\n+\t\t\trte_pktmbuf_mtophys_offset(output, out_offset) >> 32);\n+\tdesc->out_addr_lw = (uint32_t)(\n+\t\t\trte_pktmbuf_mtophys_offset(output, out_offset));\n+\t/* Save software context needed for dequeue */\n+\tdesc->op_addr = op;\n+\t/* Set total number of CBs in an op */\n+\tdesc->cbs_in_op = cbs_in_op;\n+\treturn 0;\n+}\n+\n+/**\n+ * Set DMA descriptor for decode operation (1 Code Block)\n+ *\n+ * @param op\n+ *   Pointer to a single encode operation.\n+ * @param desc\n+ *   Pointer to DMA descriptor.\n+ * @param input\n+ *   Pointer to pointer to input data which will be decoded.\n+ * @param in_offset\n+ *   Input offset in rte_mbuf structure. It is used for calculating the point\n+ *   where data is starting.\n+ * @param out_offset\n+ *   Output offset in rte_mbuf structure. It is used for calculating the point\n+ *   where hard output data will be stored.\n+ * @param cbs_in_op\n+ *   Number of CBs contained in one operation.\n+ */\n+static inline int\n+fpga_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,\n+\t\tstruct fpga_dma_dec_desc *desc,\n+\t\tstruct rte_mbuf *input,\tstruct rte_mbuf *output,\n+\t\tuint16_t harq_in_length,\n+\t\tuint32_t in_offset, uint32_t out_offset,\n+\t\tuint32_t harq_offset,\n+\t\tuint16_t desc_offset,\n+\t\tuint8_t cbs_in_op)\n+{\n+\t/* reset */\n+\tdesc->done = 0;\n+\tdesc->error = 0;\n+\t/* Set inbound data buffer address */\n+\tdesc->in_addr_hi = (uint32_t)(\n+\t\t\trte_pktmbuf_mtophys_offset(input, in_offset) >> 32);\n+\tdesc->in_addr_lw = (uint32_t)(\n+\t\t\trte_pktmbuf_mtophys_offset(input, in_offset));\n+\tdesc->rm_e = op->ldpc_dec.cb_params.e;\n+\tdesc->harq_input_length = harq_in_length;\n+\tdesc->et_dis = !check_bit(op->ldpc_dec.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_ITERATION_STOP_ENABLE);\n+\tdesc->rv = op->ldpc_dec.rv_index;\n+\tdesc->crc24b_ind = check_bit(op->ldpc_dec.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK);\n+\tdesc->drop_crc24b = check_bit(op->ldpc_dec.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_CRC_TYPE_24B_DROP);\n+\tdesc->desc_idx = desc_offset;\n+\tdesc->ncb = op->ldpc_dec.n_cb;\n+\tdesc->num_null = op->ldpc_dec.n_filler;\n+\tdesc->hbstroe_offset = harq_offset >> 10;\n+\tdesc->zc = op->ldpc_dec.z_c;\n+\tdesc->harqin_en = check_bit(op->ldpc_dec.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE);\n+\tdesc->bg_idx = op->ldpc_dec.basegraph - 1;\n+\tdesc->max_iter = op->ldpc_dec.iter_max;\n+\tdesc->qm_idx = op->ldpc_dec.q_m / 2;\n+\tdesc->out_addr_hi = (uint32_t)(\n+\t\t\trte_pktmbuf_mtophys_offset(output, out_offset) >> 32);\n+\tdesc->out_addr_lw = (uint32_t)(\n+\t\t\trte_pktmbuf_mtophys_offset(output, out_offset));\n+\t/* Save software context needed for dequeue */\n+\tdesc->op_addr = op;\n+\t/* Set total number of CBs in an op */\n+\tdesc->cbs_in_op = cbs_in_op;\n+\n+\treturn 0;\n+}\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+/* Validates LDPC encoder parameters */\n+static int\n+validate_enc_op(struct rte_bbdev_enc_op *op __rte_unused)\n+{\n+\tstruct rte_bbdev_op_ldpc_enc *ldpc_enc = &op->ldpc_enc;\n+\tstruct rte_bbdev_op_enc_ldpc_cb_params *cb = NULL;\n+\tstruct rte_bbdev_op_enc_ldpc_tb_params *tb = NULL;\n+\n+\n+\tif (ldpc_enc->input.length >\n+\t\t\tRTE_BBDEV_LDPC_MAX_CB_SIZE >> 3) {\n+\t\trte_bbdev_log(ERR, \"CB size (%u) is too big, max: %d\",\n+\t\t\t\tldpc_enc->input.length,\n+\t\t\t\tRTE_BBDEV_LDPC_MAX_CB_SIZE);\n+\t\treturn -1;\n+\t}\n+\n+\tif (op->mempool == NULL) {\n+\t\trte_bbdev_log(ERR, \"Invalid mempool pointer\");\n+\t\treturn -1;\n+\t}\n+\tif (ldpc_enc->input.data == NULL) {\n+\t\trte_bbdev_log(ERR, \"Invalid input pointer\");\n+\t\treturn -1;\n+\t}\n+\tif (ldpc_enc->output.data == NULL) {\n+\t\trte_bbdev_log(ERR, \"Invalid output pointer\");\n+\t\treturn -1;\n+\t}\n+\tif ((ldpc_enc->basegraph > 2) || (ldpc_enc->basegraph == 0)) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"basegraph (%u) is out of range 1 <= value <= 2\",\n+\t\t\t\tldpc_enc->basegraph);\n+\t\treturn -1;\n+\t}\n+\tif (ldpc_enc->code_block_mode > 1) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"code_block_mode (%u) is out of range 0:Tb 1:CB\",\n+\t\t\t\tldpc_enc->code_block_mode);\n+\t\treturn -1;\n+\t}\n+\n+\tif (ldpc_enc->code_block_mode == 0) {\n+\t\ttb = &ldpc_enc->tb_params;\n+\t\tif (tb->c == 0) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"c (%u) is out of range 1 <= value <= %u\",\n+\t\t\t\t\ttb->c, RTE_BBDEV_LDPC_MAX_CODE_BLOCKS);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tif (tb->cab > tb->c) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"cab (%u) is greater than c (%u)\",\n+\t\t\t\t\ttb->cab, tb->c);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tif ((tb->ea < RTE_BBDEV_LDPC_MIN_CB_SIZE)\n+\t\t\t\t&& tb->r < tb->cab) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"ea (%u) is less than %u or it is not even\",\n+\t\t\t\t\ttb->ea, RTE_BBDEV_LDPC_MIN_CB_SIZE);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tif ((tb->eb < RTE_BBDEV_LDPC_MIN_CB_SIZE)\n+\t\t\t\t&& tb->c > tb->cab) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"eb (%u) is less than %u\",\n+\t\t\t\t\ttb->eb, RTE_BBDEV_LDPC_MIN_CB_SIZE);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tif (tb->r > (tb->c - 1)) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"r (%u) is greater than c - 1 (%u)\",\n+\t\t\t\t\ttb->r, tb->c - 1);\n+\t\t\treturn -1;\n+\t\t}\n+\t} else {\n+\t\tcb = &ldpc_enc->cb_params;\n+\t\tif (cb->e < RTE_BBDEV_LDPC_MIN_CB_SIZE) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"e (%u) is less than %u or it is not even\",\n+\t\t\t\t\tcb->e, RTE_BBDEV_LDPC_MIN_CB_SIZE);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\treturn 0;\n+}\n+#endif\n+\n+static inline char *\n+mbuf_append(struct rte_mbuf *m_head, struct rte_mbuf *m, uint16_t len)\n+{\n+\tif (unlikely(len > rte_pktmbuf_tailroom(m)))\n+\t\treturn NULL;\n+\n+\tchar *tail = (char *)m->buf_addr + m->data_off + m->data_len;\n+\tm->data_len = (uint16_t)(m->data_len + len);\n+\tm_head->pkt_len  = (m_head->pkt_len + len);\n+\treturn tail;\n+}\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+/* Validates LDPC decoder parameters */\n+static int\n+validate_dec_op(struct rte_bbdev_dec_op *op __rte_unused)\n+{\n+\tstruct rte_bbdev_op_ldpc_dec *ldpc_dec = &op->ldpc_dec;\n+\tstruct rte_bbdev_op_dec_ldpc_cb_params *cb = NULL;\n+\tstruct rte_bbdev_op_dec_ldpc_tb_params *tb = NULL;\n+\n+\tif (op->mempool == NULL) {\n+\t\trte_bbdev_log(ERR, \"Invalid mempool pointer\");\n+\t\treturn -1;\n+\t}\n+\tif (ldpc_dec->rv_index > 3) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"rv_index (%u) is out of range 0 <= value <= 3\",\n+\t\t\t\tldpc_dec->rv_index);\n+\t\treturn -1;\n+\t}\n+\n+\tif (ldpc_dec->iter_max == 0) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"iter_max (%u) is equal to 0\",\n+\t\t\t\tldpc_dec->iter_max);\n+\t\treturn -1;\n+\t}\n+\n+\tif (ldpc_dec->code_block_mode > 1) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"code_block_mode (%u) is out of range 0 <= value <= 1\",\n+\t\t\t\tldpc_dec->code_block_mode);\n+\t\treturn -1;\n+\t}\n+\n+\tif (ldpc_dec->code_block_mode == 0) {\n+\t\ttb = &ldpc_dec->tb_params;\n+\t\tif (tb->c < 1) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"c (%u) is out of range 1 <= value <= %u\",\n+\t\t\t\t\ttb->c, RTE_BBDEV_LDPC_MAX_CODE_BLOCKS);\n+\t\t\treturn -1;\n+\t\t}\n+\t\tif (tb->cab > tb->c) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"cab (%u) is greater than c (%u)\",\n+\t\t\t\t\ttb->cab, tb->c);\n+\t\t\treturn -1;\n+\t\t}\n+\t} else {\n+\t\tcb = &ldpc_dec->cb_params;\n+\t\tif (cb->e < RTE_BBDEV_LDPC_MIN_CB_SIZE) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"e (%u) is out of range %u <= value <= %u\",\n+\t\t\t\t\tcb->e, RTE_BBDEV_LDPC_MIN_CB_SIZE,\n+\t\t\t\t\tRTE_BBDEV_LDPC_MAX_CB_SIZE);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+#endif\n+\n+static inline int\n+fpga_harq_write_loopback(struct fpga_5gnr_fec_device *fpga_dev,\n+\t\tstruct rte_mbuf *harq_input, uint16_t harq_in_length,\n+\t\tuint32_t harq_in_offset, uint32_t harq_out_offset)\n+{\n+\tuint32_t out_offset = harq_out_offset;\n+\tuint32_t in_offset = harq_in_offset;\n+\tuint32_t left_length = harq_in_length;\n+\tuint32_t reg_32, increment = 0;\n+\tuint64_t *input = NULL;\n+\tuint32_t last_transaction = left_length\n+\t\t\t% FPGA_5GNR_FEC_DDR_WR_DATA_LEN_IN_BYTES;\n+\tuint64_t last_word;\n+\n+\tif (last_transaction > 0)\n+\t\tleft_length -= last_transaction;\n+\n+\t/*\n+\t * Get HARQ buffer size for each VF/PF: When 0x00, there is no\n+\t * available DDR space for the corresponding VF/PF.\n+\t */\n+\treg_32 = fpga_reg_read_32(fpga_dev->mmio_base,\n+\t\t\tFPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS);\n+\tif (reg_32 < harq_in_length) {\n+\t\tleft_length = reg_32;\n+\t\trte_bbdev_log(ERR, \"HARQ in length > HARQ buffer size\\n\");\n+\t}\n+\n+\tinput = (uint64_t *)rte_pktmbuf_mtod_offset(harq_input,\n+\t\t\tuint8_t *, in_offset);\n+\n+\twhile (left_length > 0) {\n+\t\tif (fpga_reg_read_8(fpga_dev->mmio_base,\n+\t\t\t\tFPGA_5GNR_FEC_DDR4_ADDR_RDY_REGS) ==  1) {\n+\t\t\tfpga_reg_write_32(fpga_dev->mmio_base,\n+\t\t\t\t\tFPGA_5GNR_FEC_DDR4_WR_ADDR_REGS,\n+\t\t\t\t\tout_offset);\n+\t\t\tfpga_reg_write_64(fpga_dev->mmio_base,\n+\t\t\t\t\tFPGA_5GNR_FEC_DDR4_WR_DATA_REGS,\n+\t\t\t\t\tinput[increment]);\n+\t\t\tleft_length -= FPGA_5GNR_FEC_DDR_WR_DATA_LEN_IN_BYTES;\n+\t\t\tout_offset += FPGA_5GNR_FEC_DDR_WR_DATA_LEN_IN_BYTES;\n+\t\t\tincrement++;\n+\t\t\tfpga_reg_write_8(fpga_dev->mmio_base,\n+\t\t\t\t\tFPGA_5GNR_FEC_DDR4_WR_DONE_REGS, 1);\n+\t\t}\n+\t}\n+\twhile (last_transaction > 0) {\n+\t\tif (fpga_reg_read_8(fpga_dev->mmio_base,\n+\t\t\t\tFPGA_5GNR_FEC_DDR4_ADDR_RDY_REGS) ==  1) {\n+\t\t\tfpga_reg_write_32(fpga_dev->mmio_base,\n+\t\t\t\t\tFPGA_5GNR_FEC_DDR4_WR_ADDR_REGS,\n+\t\t\t\t\tout_offset);\n+\t\t\tlast_word = input[increment];\n+\t\t\tlast_word &= (uint64_t)(1 << (last_transaction * 4))\n+\t\t\t\t\t- 1;\n+\t\t\tfpga_reg_write_64(fpga_dev->mmio_base,\n+\t\t\t\t\tFPGA_5GNR_FEC_DDR4_WR_DATA_REGS,\n+\t\t\t\t\tlast_word);\n+\t\t\tfpga_reg_write_8(fpga_dev->mmio_base,\n+\t\t\t\t\tFPGA_5GNR_FEC_DDR4_WR_DONE_REGS, 1);\n+\t\t\tlast_transaction = 0;\n+\t\t}\n+\t}\n+\treturn 1;\n+}\n+\n+static inline int\n+fpga_harq_read_loopback(struct fpga_5gnr_fec_device *fpga_dev,\n+\t\tstruct rte_mbuf *harq_output, uint16_t harq_in_length,\n+\t\tuint32_t harq_in_offset, uint32_t harq_out_offset)\n+{\n+\tuint32_t left_length, in_offset = harq_in_offset;\n+\tuint64_t reg;\n+\tuint32_t increment = 0;\n+\tuint64_t *input = NULL;\n+\tuint32_t last_transaction = harq_in_length\n+\t\t\t% FPGA_5GNR_FEC_DDR_WR_DATA_LEN_IN_BYTES;\n+\n+\tif (last_transaction > 0)\n+\t\tharq_in_length += (8 - last_transaction);\n+\n+\treg = fpga_reg_read_32(fpga_dev->mmio_base,\n+\t\t\tFPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS);\n+\tif (reg < harq_in_length) {\n+\t\tharq_in_length = reg;\n+\t\trte_bbdev_log(ERR, \"HARQ in length > HARQ buffer size\\n\");\n+\t}\n+\n+\tif (!mbuf_append(harq_output, harq_output, harq_in_length)) {\n+\t\trte_bbdev_log(ERR, \"HARQ output buffer warning %d %d\\n\",\n+\t\t\t\tharq_output->buf_len -\n+\t\t\t\trte_pktmbuf_headroom(harq_output),\n+\t\t\t\tharq_in_length);\n+\t\tharq_in_length = harq_output->buf_len -\n+\t\t\t\trte_pktmbuf_headroom(harq_output);\n+\t\tif (!mbuf_append(harq_output, harq_output, harq_in_length)) {\n+\t\t\trte_bbdev_log(ERR, \"HARQ output buffer issue %d %d\\n\",\n+\t\t\t\t\tharq_output->buf_len, harq_in_length);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\tleft_length = harq_in_length;\n+\n+\tinput = (uint64_t *)rte_pktmbuf_mtod_offset(harq_output,\n+\t\t\tuint8_t *, harq_out_offset);\n+\n+\twhile (left_length > 0) {\n+\t\tfpga_reg_write_32(fpga_dev->mmio_base,\n+\t\t\tFPGA_5GNR_FEC_DDR4_RD_ADDR_REGS, in_offset);\n+\t\tfpga_reg_write_8(fpga_dev->mmio_base,\n+\t\t\t\tFPGA_5GNR_FEC_DDR4_RD_DONE_REGS, 1);\n+\t\treg = fpga_reg_read_8(fpga_dev->mmio_base,\n+\t\t\tFPGA_5GNR_FEC_DDR4_RD_RDY_REGS);\n+\t\twhile (reg != 1) {\n+\t\t\treg = fpga_reg_read_8(fpga_dev->mmio_base,\n+\t\t\t\tFPGA_5GNR_FEC_DDR4_RD_RDY_REGS);\n+\t\t\tif (reg == FPGA_DDR_OVERFLOW) {\n+\t\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\t\"Read address is overflow!\\n\");\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t}\n+\t\tinput[increment] = fpga_reg_read_64(fpga_dev->mmio_base,\n+\t\t\tFPGA_5GNR_FEC_DDR4_RD_DATA_REGS);\n+\t\tleft_length -= FPGA_5GNR_FEC_DDR_RD_DATA_LEN_IN_BYTES;\n+\t\tin_offset += FPGA_5GNR_FEC_DDR_WR_DATA_LEN_IN_BYTES;\n+\t\tincrement++;\n+\t\tfpga_reg_write_8(fpga_dev->mmio_base,\n+\t\t\t\tFPGA_5GNR_FEC_DDR4_RD_DONE_REGS, 0);\n+\t}\n+\treturn 1;\n+}\n+\n+static inline int\n+enqueue_ldpc_enc_one_op_cb(struct fpga_queue *q, struct rte_bbdev_enc_op *op,\n+\t\tuint16_t desc_offset)\n+{\n+\tunion fpga_dma_desc *desc;\n+\tint ret;\n+\tuint8_t c, crc24_bits = 0;\n+\tstruct rte_bbdev_op_ldpc_enc *enc = &op->ldpc_enc;\n+\tuint16_t in_offset = enc->input.offset;\n+\tuint16_t out_offset = enc->output.offset;\n+\tstruct rte_mbuf *m_in = enc->input.data;\n+\tstruct rte_mbuf *m_out = enc->output.data;\n+\tstruct rte_mbuf *m_out_head = enc->output.data;\n+\tuint32_t in_length, out_length, e;\n+\tuint16_t total_left = enc->input.length;\n+\tuint16_t ring_offset;\n+\tuint16_t K, k_;\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\t/* Validate op structure */\n+\t/* FIXME */\n+\tif (validate_enc_op(op) == -1) {\n+\t\trte_bbdev_log(ERR, \"LDPC encoder validation failed\");\n+\t\treturn -EINVAL;\n+\t}\n+#endif\n+\n+\t/* Clear op status */\n+\top->status = 0;\n+\n+\tif (m_in == NULL || m_out == NULL) {\n+\t\trte_bbdev_log(ERR, \"Invalid mbuf pointer\");\n+\t\top->status = 1 << RTE_BBDEV_DATA_ERROR;\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (enc->op_flags & RTE_BBDEV_LDPC_CRC_24B_ATTACH)\n+\t\tcrc24_bits = 24;\n+\n+\tif (enc->code_block_mode == 0) {\n+\t\t/* For Transport Block mode */\n+\t\t/* FIXME */\n+\t\tc = enc->tb_params.c;\n+\t\te = enc->tb_params.ea;\n+\t} else { /* For Code Block mode */\n+\t\tc = 1;\n+\t\te = enc->cb_params.e;\n+\t}\n+\n+\t/* Update total_left */\n+\tK = (enc->basegraph == 1 ? 22 : 10) * enc->z_c;\n+\tk_ = K - enc->n_filler;\n+\tin_length = (k_ - crc24_bits) >> 3;\n+\tout_length = (e + 7) >> 3;\n+\n+\ttotal_left = rte_pktmbuf_data_len(m_in) - in_offset;\n+\n+\t/* Update offsets */\n+\tif (total_left != in_length) {\n+\t\top->status |= 1 << RTE_BBDEV_DATA_ERROR;\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Mismatch between mbuf length and included CBs sizes %d\",\n+\t\t\t\ttotal_left);\n+\t}\n+\n+\tmbuf_append(m_out_head, m_out, out_length);\n+\n+\t/* Offset into the ring */\n+\tring_offset = ((q->tail + desc_offset) & q->sw_ring_wrap_mask);\n+\t/* Setup DMA Descriptor */\n+\tdesc = q->ring_addr + ring_offset;\n+\n+\tret = fpga_dma_desc_te_fill(op, &desc->enc_req, m_in, m_out,\n+\t\t\tk_, e, in_offset, out_offset, ring_offset, c);\n+\tif (unlikely(ret < 0))\n+\t\treturn ret;\n+\n+\t/* Update lengths */\n+\ttotal_left -= in_length;\n+\top->ldpc_enc.output.length += out_length;\n+\n+\tif (total_left > 0) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\"Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u\",\n+\t\t\t\ttotal_left, in_length);\n+\t\treturn -1;\n+\t}\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\tprint_dma_enc_desc_debug_info(desc);\n+#endif\n+\treturn 1;\n+}\n+\n+static inline int\n+enqueue_ldpc_dec_one_op_cb(struct fpga_queue *q, struct rte_bbdev_dec_op *op,\n+\t\tuint16_t desc_offset)\n+{\n+\tunion fpga_dma_desc *desc;\n+\tint ret;\n+\tuint16_t ring_offset;\n+\tuint8_t c;\n+\tuint16_t e, in_length, out_length, k0, l, seg_total_left, sys_cols;\n+\tuint16_t K, parity_offset, harq_in_length = 0, harq_out_length = 0;\n+\tuint16_t crc24_overlap = 0;\n+\tstruct rte_bbdev_op_ldpc_dec *dec = &op->ldpc_dec;\n+\tstruct rte_mbuf *m_in = dec->input.data;\n+\tstruct rte_mbuf *m_out = dec->hard_output.data;\n+\tstruct rte_mbuf *m_out_head = dec->hard_output.data;\n+\tuint16_t in_offset = dec->input.offset;\n+\tuint16_t out_offset = dec->hard_output.offset;\n+\tuint32_t harq_offset = 0;\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\t\t/* Validate op structure */\n+\t\tif (validate_dec_op(op) == -1) {\n+\t\t\trte_bbdev_log(ERR, \"LDPC decoder validation failed\");\n+\t\t\treturn -EINVAL;\n+\t\t}\n+#endif\n+\n+\t/* Clear op status */\n+\top->status = 0;\n+\n+\t/* Setup DMA Descriptor */\n+\tring_offset = ((q->tail + desc_offset) & q->sw_ring_wrap_mask);\n+\tdesc = q->ring_addr + ring_offset;\n+\n+\tif (check_bit(dec->op_flags,\n+\t\t\tRTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {\n+\t\tstruct rte_mbuf *harq_in = dec->harq_combined_input.data;\n+\t\tstruct rte_mbuf *harq_out = dec->harq_combined_output.data;\n+\t\tharq_in_length = dec->harq_combined_input.length;\n+\t\tuint32_t harq_in_offset = dec->harq_combined_input.offset;\n+\t\tuint32_t harq_out_offset = dec->harq_combined_output.offset;\n+\n+\t\tif (check_bit(dec->op_flags,\n+\t\t\t\tRTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE\n+\t\t\t\t)) {\n+\t\t\tret = fpga_harq_write_loopback(q->d, harq_in,\n+\t\t\t\t\tharq_in_length, harq_in_offset,\n+\t\t\t\t\tharq_out_offset);\n+\t\t} else if (check_bit(dec->op_flags,\n+\t\t\t\tRTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE\n+\t\t\t\t)) {\n+\t\t\tret = fpga_harq_read_loopback(q->d, harq_out,\n+\t\t\t\tharq_in_length, harq_in_offset,\n+\t\t\t\tharq_out_offset);\n+\t\t\tdec->harq_combined_output.length = harq_in_length;\n+\t\t} else {\n+\t\t\trte_bbdev_log(ERR, \"OP flag Err!\");\n+\t\t\tret = -1;\n+\t\t}\n+\t\t/* Set descriptor for dequeue */\n+\t\tdesc->dec_req.done = 1;\n+\t\tdesc->dec_req.error = 0;\n+\t\tdesc->dec_req.op_addr = op;\n+\t\tdesc->dec_req.cbs_in_op = 1;\n+\t\t/* Mark this dummy descriptor to be dropped by HW */\n+\t\tdesc->dec_req.desc_idx = (ring_offset + 1)\n+\t\t\t\t& q->sw_ring_wrap_mask;\n+\t\treturn ret; /* Error or number of CB */\n+\t}\n+\n+\tif (m_in == NULL || m_out == NULL) {\n+\t\trte_bbdev_log(ERR, \"Invalid mbuf pointer\");\n+\t\top->status = 1 << RTE_BBDEV_DATA_ERROR;\n+\t\treturn -1;\n+\t}\n+\n+\tc = 1;\n+\te = dec->cb_params.e;\n+\n+\tif (check_bit(dec->op_flags, RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP))\n+\t\tcrc24_overlap = 24;\n+\n+\tsys_cols = (dec->basegraph == 1) ? 22 : 10;\n+\tK = sys_cols * dec->z_c;\n+\tparity_offset = K - 2 * dec->z_c;\n+\n+\tout_length = ((K - crc24_overlap - dec->n_filler) >> 3);\n+\tin_length = e;\n+\tseg_total_left = dec->input.length;\n+\n+\tif (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {\n+\t\tharq_in_length = RTE_MIN(dec->harq_combined_input.length,\n+\t\t\t\t(uint32_t)dec->n_cb);\n+\t}\n+\n+\tif (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {\n+\t\tk0 = get_k0(dec->n_cb, dec->z_c,\n+\t\t\t\tdec->basegraph, dec->rv_index);\n+\t\tif (k0 > parity_offset)\n+\t\t\tl = k0 + e;\n+\t\telse\n+\t\t\tl = k0 + e + dec->n_filler;\n+\t\tharq_out_length = RTE_MIN(RTE_MAX(harq_in_length, l),\n+\t\t\t\tdec->n_cb - dec->n_filler);\n+\t\tdec->harq_combined_output.length = harq_out_length;\n+\t}\n+\n+\tmbuf_append(m_out_head, m_out, out_length);\n+\tif (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE))\n+\t\tharq_offset = dec->harq_combined_input.offset;\n+\telse if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE))\n+\t\tharq_offset = dec->harq_combined_output.offset;\n+\n+\tif ((harq_offset & 0x3FF) > 0) {\n+\t\trte_bbdev_log(ERR, \"Invalid HARQ offset %d\", harq_offset);\n+\t\top->status = 1 << RTE_BBDEV_DATA_ERROR;\n+\t\treturn -1;\n+\t}\n+\n+\tret = fpga_dma_desc_ld_fill(op, &desc->dec_req, m_in, m_out,\n+\t\tharq_in_length, in_offset, out_offset, harq_offset,\n+\t\tring_offset, c);\n+\tif (unlikely(ret < 0))\n+\t\treturn ret;\n+\t/* Update lengths */\n+\tseg_total_left -= in_length;\n+\top->ldpc_dec.hard_output.length += out_length;\n+\tif (seg_total_left > 0) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u\",\n+\t\t\t\tseg_total_left, in_length);\n+\t\treturn -1;\n+\t}\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\tprint_dma_dec_desc_debug_info(desc);\n+#endif\n+\n+\treturn 1;\n+}\n+\n+static uint16_t\n+fpga_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_enc_op **ops, uint16_t num)\n+{\n+\tuint16_t i, total_enqueued_cbs = 0;\n+\tint32_t avail;\n+\tint enqueued_cbs;\n+\tstruct fpga_queue *q = q_data->queue_private;\n+\tunion fpga_dma_desc *desc;\n+\n+\t/* Check if queue is not full */\n+\tif (unlikely(((q->tail + 1) & q->sw_ring_wrap_mask) ==\n+\t\t\tq->head_free_desc))\n+\t\treturn 0;\n+\n+\t/* Calculates available space */\n+\tavail = (q->head_free_desc > q->tail) ?\n+\t\tq->head_free_desc - q->tail - 1 :\n+\t\tq->ring_ctrl_reg.ring_size + q->head_free_desc - q->tail - 1;\n+\n+\tfor (i = 0; i < num; ++i) {\n+\n+\t\t/* Check if there is available space for further\n+\t\t * processing\n+\t\t */\n+\t\tif (unlikely(avail - 1 < 0))\n+\t\t\tbreak;\n+\t\tavail -= 1;\n+\t\tenqueued_cbs = enqueue_ldpc_enc_one_op_cb(q, ops[i],\n+\t\t\t\ttotal_enqueued_cbs);\n+\n+\t\tif (enqueued_cbs < 0)\n+\t\t\tbreak;\n+\n+\t\ttotal_enqueued_cbs += enqueued_cbs;\n+\n+\t\trte_bbdev_log_debug(\"enqueuing enc ops [%d/%d] | head %d | tail %d\",\n+\t\t\t\ttotal_enqueued_cbs, num,\n+\t\t\t\tq->head_free_desc, q->tail);\n+\t}\n+\n+\t/* Set interrupt bit for last CB in enqueued ops. FPGA issues interrupt\n+\t * only when all previous CBs were already processed.\n+\t */\n+\tdesc = q->ring_addr + ((q->tail + total_enqueued_cbs - 1)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\tdesc->enc_req.irq_en = q->irq_enable;\n+\n+\tfpga_dma_enqueue(q, total_enqueued_cbs, &q_data->queue_stats);\n+\n+\t/* Update stats */\n+\tq_data->queue_stats.enqueued_count += i;\n+\tq_data->queue_stats.enqueue_err_count += num - i;\n+\n+\treturn i;\n+}\n+\n+static uint16_t\n+fpga_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_dec_op **ops, uint16_t num)\n+{\n+\tuint16_t i, total_enqueued_cbs = 0;\n+\tint32_t avail;\n+\tint enqueued_cbs;\n+\tstruct fpga_queue *q = q_data->queue_private;\n+\tunion fpga_dma_desc *desc;\n+\n+\t/* Check if queue is not full */\n+\tif (unlikely(((q->tail + 1) & q->sw_ring_wrap_mask) ==\n+\t\t\tq->head_free_desc))\n+\t\treturn 0;\n+\n+\t/* Calculates available space */\n+\tavail = (q->head_free_desc > q->tail) ?\n+\t\tq->head_free_desc - q->tail - 1 :\n+\t\tq->ring_ctrl_reg.ring_size + q->head_free_desc - q->tail - 1;\n+\n+\tfor (i = 0; i < num; ++i) {\n+\n+\t\t/* Check if there is available space for further\n+\t\t * processing\n+\t\t */\n+\t\tif (unlikely(avail - 1 < 0))\n+\t\t\tbreak;\n+\t\tavail -= 1;\n+\t\tenqueued_cbs = enqueue_ldpc_dec_one_op_cb(q, ops[i],\n+\t\t\t\ttotal_enqueued_cbs);\n+\n+\t\tif (enqueued_cbs < 0)\n+\t\t\tbreak;\n+\n+\t\ttotal_enqueued_cbs += enqueued_cbs;\n+\n+\t\trte_bbdev_log_debug(\"enqueuing dec ops [%d/%d] | head %d | tail %d\",\n+\t\t\t\ttotal_enqueued_cbs, num,\n+\t\t\t\tq->head_free_desc, q->tail);\n+\t}\n+\n+\t/* Update stats */\n+\tq_data->queue_stats.enqueued_count += i;\n+\tq_data->queue_stats.enqueue_err_count += num - i;\n+\n+\t/* Set interrupt bit for last CB in enqueued ops. FPGA issues interrupt\n+\t * only when all previous CBs were already processed.\n+\t */\n+\tdesc = q->ring_addr + ((q->tail + total_enqueued_cbs - 1)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\tdesc->enc_req.irq_en = q->irq_enable;\n+\tfpga_dma_enqueue(q, total_enqueued_cbs, &q_data->queue_stats);\n+\treturn i;\n+}\n+\n+\n+static inline int\n+dequeue_ldpc_enc_one_op_cb(struct fpga_queue *q, struct rte_bbdev_enc_op **op,\n+\t\tuint16_t desc_offset)\n+{\n+\tunion fpga_dma_desc *desc;\n+\tint desc_error = 0;\n+\n+\t/* Set current desc */\n+\tdesc = q->ring_addr + ((q->head_free_desc + desc_offset)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\n+\t/*check if done */\n+\tif (desc->enc_req.done == 0)\n+\t\treturn -1;\n+\n+\t/* make sure the response is read atomically */\n+\trte_smp_rmb();\n+\n+\trte_bbdev_log_debug(\"DMA response desc %p\", desc);\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\tprint_dma_enc_desc_debug_info(desc);\n+#endif\n+\n+\t*op = desc->enc_req.op_addr;\n+\t/* Check the descriptor error field, return 1 on error */\n+\tdesc_error = check_desc_error(desc->enc_req.error);\n+\t(*op)->status = desc_error << RTE_BBDEV_DATA_ERROR;\n+\n+\treturn 1;\n+}\n+\n+\n+static inline int\n+dequeue_ldpc_dec_one_op_cb(struct fpga_queue *q, struct rte_bbdev_dec_op **op,\n+\t\tuint16_t desc_offset)\n+{\n+\tunion fpga_dma_desc *desc;\n+\tint desc_error = 0;\n+\t/* Set descriptor */\n+\tdesc = q->ring_addr + ((q->head_free_desc + desc_offset)\n+\t\t\t& q->sw_ring_wrap_mask);\n+\n+\t/* Verify done bit is set */\n+\tif (desc->dec_req.done == 0)\n+\t\treturn -1;\n+\n+\t/* make sure the response is read atomically */\n+\trte_smp_rmb();\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\tprint_dma_dec_desc_debug_info(desc);\n+#endif\n+\n+\t*op = desc->dec_req.op_addr;\n+\n+\tif (check_bit((*op)->ldpc_dec.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {\n+\t\t(*op)->status = 0;\n+\t\treturn 1;\n+\t}\n+\n+\t/* FPGA reports iterations based on round-up minus 1 */\n+\t(*op)->ldpc_dec.iter_count = desc->dec_req.iter + 1;\n+\t/* CRC Check criteria */\n+\tif (desc->dec_req.crc24b_ind && !(desc->dec_req.crcb_pass))\n+\t\t(*op)->status = 1 << RTE_BBDEV_CRC_ERROR;\n+\t/* et_pass = 0 when decoder fails */\n+\t(*op)->status |= !(desc->dec_req.et_pass) << RTE_BBDEV_SYNDROME_ERROR;\n+\t/* Check the descriptor error field, return 1 on error */\n+\tdesc_error = check_desc_error(desc->dec_req.error);\n+\t(*op)->status |= desc_error << RTE_BBDEV_DATA_ERROR;\n+\treturn 1;\n+}\n+\n+static uint16_t\n+fpga_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_enc_op **ops, uint16_t num)\n+{\n+\tstruct fpga_queue *q = q_data->queue_private;\n+\tuint32_t avail = (q->tail - q->head_free_desc) & q->sw_ring_wrap_mask;\n+\tuint16_t i;\n+\tuint16_t dequeued_cbs = 0;\n+\tint ret;\n+\n+\tfor (i = 0; (i < num) && (dequeued_cbs < avail); ++i) {\n+\t\tret = dequeue_ldpc_enc_one_op_cb(q, &ops[i], dequeued_cbs);\n+\n+\t\tif (ret < 0)\n+\t\t\tbreak;\n+\n+\t\tdequeued_cbs += ret;\n+\n+\t\trte_bbdev_log_debug(\"dequeuing enc ops [%d/%d] | head %d | tail %d\",\n+\t\t\t\tdequeued_cbs, num, q->head_free_desc, q->tail);\n+\t}\n+\n+\t/* Update head */\n+\tq->head_free_desc = (q->head_free_desc + dequeued_cbs) &\n+\t\t\tq->sw_ring_wrap_mask;\n+\n+\t/* Update stats */\n+\tq_data->queue_stats.dequeued_count += i;\n+\n+\treturn i;\n+}\n+\n+static uint16_t\n+fpga_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_dec_op **ops, uint16_t num)\n+{\n+\tstruct fpga_queue *q = q_data->queue_private;\n+\tuint32_t avail = (q->tail - q->head_free_desc) & q->sw_ring_wrap_mask;\n+\tuint16_t i;\n+\tuint16_t dequeued_cbs = 0;\n+\tint ret;\n+\n+\tfor (i = 0; (i < num) && (dequeued_cbs < avail); ++i) {\n+\t\tret = dequeue_ldpc_dec_one_op_cb(q, &ops[i], dequeued_cbs);\n+\n+\t\tif (ret < 0)\n+\t\t\tbreak;\n+\n+\t\tdequeued_cbs += ret;\n+\n+\t\trte_bbdev_log_debug(\"dequeuing dec ops [%d/%d] | head %d | tail %d\",\n+\t\t\t\tdequeued_cbs, num, q->head_free_desc, q->tail);\n+\t}\n+\n+\t/* Update head */\n+\tq->head_free_desc = (q->head_free_desc + dequeued_cbs) &\n+\t\t\tq->sw_ring_wrap_mask;\n+\n+\t/* Update stats */\n+\tq_data->queue_stats.dequeued_count += i;\n+\n+\treturn i;\n+}\n+\n+\n+/* Initialization Function */\n+static void\n+fpga_5gnr_fec_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)\n+{\n+\tstruct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);\n+\n+\tdev->dev_ops = &fpga_ops;\n+\tdev->enqueue_ldpc_enc_ops = fpga_enqueue_ldpc_enc;\n+\tdev->enqueue_ldpc_dec_ops = fpga_enqueue_ldpc_dec;\n+\tdev->dequeue_ldpc_enc_ops = fpga_dequeue_ldpc_enc;\n+\tdev->dequeue_ldpc_dec_ops = fpga_dequeue_ldpc_dec;\n+\n+\t((struct fpga_5gnr_fec_device *) dev->data->dev_private)->pf_device =\n+\t\t\t!strcmp(drv->driver.name,\n+\t\t\t\t\tRTE_STR(FPGA_5GNR_FEC_PF_DRIVER_NAME));\n+\t((struct fpga_5gnr_fec_device *) dev->data->dev_private)->mmio_base =\n+\t\t\tpci_dev->mem_resource[0].addr;\n+\n+\trte_bbdev_log_debug(\n+\t\t\t\"Init device %s [%s] @ virtaddr %p phyaddr %#\"PRIx64,\n+\t\t\tdev->device->driver->name, dev->data->name,\n+\t\t\t(void *)pci_dev->mem_resource[0].addr,\n+\t\t\tpci_dev->mem_resource[0].phys_addr);\n+}\n+\n+static int\n+fpga_5gnr_fec_probe(struct rte_pci_driver *pci_drv,\n+\tstruct rte_pci_device *pci_dev)\n+{\n+\tstruct rte_bbdev *bbdev = NULL;\n+\tchar dev_name[RTE_BBDEV_NAME_MAX_LEN];\n+\n+\tif (pci_dev == NULL) {\n+\t\trte_bbdev_log(ERR, \"NULL PCI device\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\trte_pci_device_name(&pci_dev->addr, dev_name, sizeof(dev_name));\n+\n+\t/* Allocate memory to be used privately by drivers */\n+\tbbdev = rte_bbdev_allocate(pci_dev->device.name);\n+\tif (bbdev == NULL)\n+\t\treturn -ENODEV;\n+\n+\t/* allocate device private memory */\n+\tbbdev->data->dev_private = rte_zmalloc_socket(dev_name,\n+\t\t\tsizeof(struct fpga_5gnr_fec_device),\n+\t\t\tRTE_CACHE_LINE_SIZE,\n+\t\t\tpci_dev->device.numa_node);\n+\n+\tif (bbdev->data->dev_private == NULL) {\n+\t\trte_bbdev_log(CRIT,\n+\t\t\t\t\"Allocate of %zu bytes for device \\\"%s\\\" failed\",\n+\t\t\t\tsizeof(struct fpga_5gnr_fec_device), dev_name);\n+\t\t\t\trte_bbdev_release(bbdev);\n+\t\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Fill HW specific part of device structure */\n+\tbbdev->device = &pci_dev->device;\n+\tbbdev->intr_handle = &pci_dev->intr_handle;\n+\tbbdev->data->socket_id = pci_dev->device.numa_node;\n+\n+\t/* Invoke FEC FPGA device initialization function */\n+\tfpga_5gnr_fec_init(bbdev, pci_drv);\n+\n+\trte_bbdev_log_debug(\"bbdev id = %u [%s]\",\n+\t\t\tbbdev->data->dev_id, dev_name);\n+\n+\tstruct fpga_5gnr_fec_device *d = bbdev->data->dev_private;\n+\tuint32_t version_id = fpga_reg_read_32(d->mmio_base,\n+\t\t\tFPGA_5GNR_FEC_VERSION_ID);\n+\trte_bbdev_log(INFO, \"FEC FPGA RTL v%u.%u\",\n+\t\t((uint16_t)(version_id >> 16)), ((uint16_t)version_id));\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\tif (!strcmp(bbdev->device->driver->name,\n+\t\t\tRTE_STR(FPGA_5GNR_FEC_PF_DRIVER_NAME)))\n+\t\tprint_static_reg_debug_info(d->mmio_base);\n+#endif\n+\treturn 0;\n+}\n+\n+static int\n+fpga_5gnr_fec_remove(struct rte_pci_device *pci_dev)\n+{\n+\tstruct rte_bbdev *bbdev;\n+\tint ret;\n+\tuint8_t dev_id;\n+\n+\tif (pci_dev == NULL)\n+\t\treturn -EINVAL;\n+\n+\t/* Find device */\n+\tbbdev = rte_bbdev_get_named_dev(pci_dev->device.name);\n+\tif (bbdev == NULL) {\n+\t\trte_bbdev_log(CRIT,\n+\t\t\t\t\"Couldn't find HW dev \\\"%s\\\" to uninitialise it\",\n+\t\t\t\tpci_dev->device.name);\n+\t\treturn -ENODEV;\n+\t}\n+\tdev_id = bbdev->data->dev_id;\n+\n+\t/* free device private memory before close */\n+\trte_free(bbdev->data->dev_private);\n+\n+\t/* Close device */\n+\tret = rte_bbdev_close(dev_id);\n+\tif (ret < 0)\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Device %i failed to close during uninit: %i\",\n+\t\t\t\tdev_id, ret);\n+\n+\t/* release bbdev from library */\n+\tret = rte_bbdev_release(bbdev);\n+\tif (ret)\n+\t\trte_bbdev_log(ERR, \"Device %i failed to uninit: %i\", dev_id,\n+\t\t\t\tret);\n+\n+\trte_bbdev_log_debug(\"Destroyed bbdev = %u\", dev_id);\n+\n+\treturn 0;\n+}\n+\n+static inline void\n+set_default_fpga_conf(struct fpga_5gnr_fec_conf *def_conf)\n+{\n+\t/* clear default configuration before initialization */\n+\tmemset(def_conf, 0, sizeof(struct fpga_5gnr_fec_conf));\n+\t/* Set pf mode to true */\n+\tdef_conf->pf_mode_en = true;\n+\n+\t/* Set ratio between UL and DL to 1:1 (unit of weight is 3 CBs) */\n+\tdef_conf->ul_bandwidth = 3;\n+\tdef_conf->dl_bandwidth = 3;\n+\n+\t/* Set Load Balance Factor to 64 */\n+\tdef_conf->dl_load_balance = 64;\n+\tdef_conf->ul_load_balance = 64;\n+}\n+\n+/* Initial configuration of FPGA 5GNR FEC device */\n+int\n+fpga_5gnr_fec_configure(const char *dev_name,\n+\t\tconst struct fpga_5gnr_fec_conf *conf)\n+{\n+\tuint32_t payload_32, address;\n+\tuint16_t payload_16;\n+\tuint8_t payload_8;\n+\tuint16_t q_id, vf_id, total_q_id, total_ul_q_id, total_dl_q_id;\n+\tstruct rte_bbdev *bbdev = rte_bbdev_get_named_dev(dev_name);\n+\tstruct fpga_5gnr_fec_conf def_conf;\n+\n+\tif (bbdev == NULL) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Invalid dev_name (%s), or device is not yet initialised\",\n+\t\t\t\tdev_name);\n+\t\treturn -ENODEV;\n+\t}\n+\n+\tstruct fpga_5gnr_fec_device *d = bbdev->data->dev_private;\n+\n+\tif (conf == NULL) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"FPGA Configuration was not provided. Default configuration will be loaded.\");\n+\t\tset_default_fpga_conf(&def_conf);\n+\t\tconf = &def_conf;\n+\t}\n+\n+\t/*\n+\t * Configure UL:DL ratio.\n+\t * [7:0]: UL weight\n+\t * [15:8]: DL weight\n+\t */\n+\tpayload_16 = (conf->dl_bandwidth << 8) | conf->ul_bandwidth;\n+\taddress = FPGA_5GNR_FEC_CONFIGURATION;\n+\tfpga_reg_write_16(d->mmio_base, address, payload_16);\n+\n+\t/* Clear all queues registers */\n+\tpayload_32 = FPGA_INVALID_HW_QUEUE_ID;\n+\tfor (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {\n+\t\taddress = (q_id << 2) + FPGA_5GNR_FEC_QUEUE_MAP;\n+\t\tfpga_reg_write_32(d->mmio_base, address, payload_32);\n+\t}\n+\n+\t/*\n+\t * If PF mode is enabled allocate all queues for PF only.\n+\t *\n+\t * For VF mode each VF can have different number of UL and DL queues.\n+\t * Total number of queues to configure cannot exceed FPGA\n+\t * capabilities - 64 queues - 32 queues for UL and 32 queues for DL.\n+\t * Queues mapping is done according to configuration:\n+\t *\n+\t * UL queues:\n+\t * |                Q_ID              | VF_ID |\n+\t * |                 0                |   0   |\n+\t * |                ...               |   0   |\n+\t * | conf->vf_dl_queues_number[0] - 1 |   0   |\n+\t * | conf->vf_dl_queues_number[0]     |   1   |\n+\t * |                ...               |   1   |\n+\t * | conf->vf_dl_queues_number[1] - 1 |   1   |\n+\t * |                ...               |  ...  |\n+\t * | conf->vf_dl_queues_number[7] - 1 |   7   |\n+\t *\n+\t * DL queues:\n+\t * |                Q_ID              | VF_ID |\n+\t * |                 32               |   0   |\n+\t * |                ...               |   0   |\n+\t * | conf->vf_ul_queues_number[0] - 1 |   0   |\n+\t * | conf->vf_ul_queues_number[0]     |   1   |\n+\t * |                ...               |   1   |\n+\t * | conf->vf_ul_queues_number[1] - 1 |   1   |\n+\t * |                ...               |  ...  |\n+\t * | conf->vf_ul_queues_number[7] - 1 |   7   |\n+\t *\n+\t * Example of configuration:\n+\t * conf->vf_ul_queues_number[0] = 4;  -> 4 UL queues for VF0\n+\t * conf->vf_dl_queues_number[0] = 4;  -> 4 DL queues for VF0\n+\t * conf->vf_ul_queues_number[1] = 2;  -> 2 UL queues for VF1\n+\t * conf->vf_dl_queues_number[1] = 2;  -> 2 DL queues for VF1\n+\t *\n+\t * UL:\n+\t * | Q_ID | VF_ID |\n+\t * |   0  |   0   |\n+\t * |   1  |   0   |\n+\t * |   2  |   0   |\n+\t * |   3  |   0   |\n+\t * |   4  |   1   |\n+\t * |   5  |   1   |\n+\t *\n+\t * DL:\n+\t * | Q_ID | VF_ID |\n+\t * |  32  |   0   |\n+\t * |  33  |   0   |\n+\t * |  34  |   0   |\n+\t * |  35  |   0   |\n+\t * |  36  |   1   |\n+\t * |  37  |   1   |\n+\t */\n+\tif (conf->pf_mode_en) {\n+\t\tpayload_32 = 0x1;\n+\t\tfor (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {\n+\t\t\taddress = (q_id << 2) + FPGA_5GNR_FEC_QUEUE_MAP;\n+\t\t\tfpga_reg_write_32(d->mmio_base, address, payload_32);\n+\t\t}\n+\t} else {\n+\t\t/* Calculate total number of UL and DL queues to configure */\n+\t\ttotal_ul_q_id = total_dl_q_id = 0;\n+\t\tfor (vf_id = 0; vf_id < FPGA_5GNR_FEC_NUM_VFS; ++vf_id) {\n+\t\t\ttotal_ul_q_id += conf->vf_ul_queues_number[vf_id];\n+\t\t\ttotal_dl_q_id += conf->vf_dl_queues_number[vf_id];\n+\t\t}\n+\t\ttotal_q_id = total_dl_q_id + total_ul_q_id;\n+\t\t/*\n+\t\t * Check if total number of queues to configure does not exceed\n+\t\t * FPGA capabilities (64 queues - 32 UL and 32 DL queues)\n+\t\t */\n+\t\tif ((total_ul_q_id > FPGA_NUM_UL_QUEUES) ||\n+\t\t\t(total_dl_q_id > FPGA_NUM_DL_QUEUES) ||\n+\t\t\t(total_q_id > FPGA_TOTAL_NUM_QUEUES)) {\n+\t\t\trte_bbdev_log(ERR,\n+\t\t\t\t\t\"FPGA Configuration failed. Too many queues to configure: UL_Q %u, DL_Q %u, FPGA_Q %u\",\n+\t\t\t\t\ttotal_ul_q_id, total_dl_q_id,\n+\t\t\t\t\tFPGA_TOTAL_NUM_QUEUES);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t\ttotal_ul_q_id = 0;\n+\t\tfor (vf_id = 0; vf_id < FPGA_5GNR_FEC_NUM_VFS; ++vf_id) {\n+\t\t\tfor (q_id = 0; q_id < conf->vf_ul_queues_number[vf_id];\n+\t\t\t\t\t++q_id, ++total_ul_q_id) {\n+\t\t\t\taddress = (total_ul_q_id << 2) +\n+\t\t\t\t\t\tFPGA_5GNR_FEC_QUEUE_MAP;\n+\t\t\t\tpayload_32 = ((0x80 + vf_id) << 16) | 0x1;\n+\t\t\t\tfpga_reg_write_32(d->mmio_base, address,\n+\t\t\t\t\t\tpayload_32);\n+\t\t\t}\n+\t\t}\n+\t\ttotal_dl_q_id = 0;\n+\t\tfor (vf_id = 0; vf_id < FPGA_5GNR_FEC_NUM_VFS; ++vf_id) {\n+\t\t\tfor (q_id = 0; q_id < conf->vf_dl_queues_number[vf_id];\n+\t\t\t\t\t++q_id, ++total_dl_q_id) {\n+\t\t\t\taddress = ((total_dl_q_id + FPGA_NUM_UL_QUEUES)\n+\t\t\t\t\t\t<< 2) + FPGA_5GNR_FEC_QUEUE_MAP;\n+\t\t\t\tpayload_32 = ((0x80 + vf_id) << 16) | 0x1;\n+\t\t\t\tfpga_reg_write_32(d->mmio_base, address,\n+\t\t\t\t\t\tpayload_32);\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\t/* Setting Load Balance Factor */\n+\tpayload_16 = (conf->dl_load_balance << 8) | (conf->ul_load_balance);\n+\taddress = FPGA_5GNR_FEC_LOAD_BALANCE_FACTOR;\n+\tfpga_reg_write_16(d->mmio_base, address, payload_16);\n+\n+\t/* Setting length of ring descriptor entry */\n+\tpayload_16 = FPGA_RING_DESC_ENTRY_LENGTH;\n+\taddress = FPGA_5GNR_FEC_RING_DESC_LEN;\n+\tfpga_reg_write_16(d->mmio_base, address, payload_16);\n+\n+\t/* Setting FLR timeout value */\n+\tpayload_16 = conf->flr_time_out;\n+\taddress = FPGA_5GNR_FEC_FLR_TIME_OUT;\n+\tfpga_reg_write_16(d->mmio_base, address, payload_16);\n+\n+\t/* Queue PF/VF mapping table is ready */\n+\tpayload_8 = 0x1;\n+\taddress = FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE;\n+\tfpga_reg_write_8(d->mmio_base, address, payload_8);\n+\n+\trte_bbdev_log_debug(\"PF FPGA 5GNR FEC configuration complete for %s\",\n+\t\t\tdev_name);\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\tprint_static_reg_debug_info(d->mmio_base);\n+#endif\n+\treturn 0;\n+}\n+\n+/* FPGA 5GNR FEC PCI PF address map */\n+static struct rte_pci_id pci_id_fpga_5gnr_fec_pf_map[] = {\n+\t{\n+\t\tRTE_PCI_DEVICE(FPGA_5GNR_FEC_VENDOR_ID,\n+\t\t\t\tFPGA_5GNR_FEC_PF_DEVICE_ID)\n+\t},\n+\t{.device_id = 0},\n+};\n+\n+static struct rte_pci_driver fpga_5gnr_fec_pci_pf_driver = {\n+\t.probe = fpga_5gnr_fec_probe,\n+\t.remove = fpga_5gnr_fec_remove,\n+\t.id_table = pci_id_fpga_5gnr_fec_pf_map,\n+\t.drv_flags = RTE_PCI_DRV_NEED_MAPPING\n+};\n+\n+/* FPGA 5GNR FEC PCI VF address map */\n+static struct rte_pci_id pci_id_fpga_5gnr_fec_vf_map[] = {\n+\t{\n+\t\tRTE_PCI_DEVICE(FPGA_5GNR_FEC_VENDOR_ID,\n+\t\t\t\tFPGA_5GNR_FEC_VF_DEVICE_ID)\n+\t},\n+\t{.device_id = 0},\n+};\n+\n+static struct rte_pci_driver fpga_5gnr_fec_pci_vf_driver = {\n+\t.probe = fpga_5gnr_fec_probe,\n+\t.remove = fpga_5gnr_fec_remove,\n+\t.id_table = pci_id_fpga_5gnr_fec_vf_map,\n+\t.drv_flags = RTE_PCI_DRV_NEED_MAPPING\n+};\n+\n+\n+RTE_PMD_REGISTER_PCI(FPGA_5GNR_FEC_PF_DRIVER_NAME, fpga_5gnr_fec_pci_pf_driver);\n+RTE_PMD_REGISTER_PCI_TABLE(FPGA_5GNR_FEC_PF_DRIVER_NAME,\n+\t\tpci_id_fpga_5gnr_fec_pf_map);\n+RTE_PMD_REGISTER_PCI(FPGA_5GNR_FEC_VF_DRIVER_NAME, fpga_5gnr_fec_pci_vf_driver);\n+RTE_PMD_REGISTER_PCI_TABLE(FPGA_5GNR_FEC_VF_DRIVER_NAME,\n+\t\tpci_id_fpga_5gnr_fec_vf_map);\n+\n+RTE_INIT(fpga_5gnr_fec_init_log)\n+{\n+\tfpga_5gnr_fec_logtype = rte_log_register(\"pmd.bb.fpga_5gnr_fec\");\n+\tif (fpga_5gnr_fec_logtype >= 0)\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\t\trte_log_set_level(fpga_5gnr_fec_logtype, RTE_LOG_DEBUG);\n+#else\n+\t\trte_log_set_level(fpga_5gnr_fec_logtype, RTE_LOG_NOTICE);\n+#endif\n+}\ndiff --git a/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h b/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h\nnew file mode 100644\nindex 0000000..7eebc7d\n--- /dev/null\n+++ b/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h\n@@ -0,0 +1,74 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2020 Intel Corporation\n+ */\n+\n+#ifndef _FPGA_5GNR_FEC_H_\n+#define _FPGA_5GNR_FEC_H_\n+\n+#include <stdint.h>\n+#include <stdbool.h>\n+\n+/**\n+ * @file fpga_5gnr_fec.h\n+ *\n+ * Interface for Intel(R) FGPA 5GNR FEC device configuration at the host level,\n+ * directly accessible by the application.\n+ * Configuration related to 5GNR functionality is done through\n+ * librte_bbdev library.\n+ *\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice\n+ */\n+\n+#ifdef __cplusplus\n+extern \"C\" {\n+#endif\n+\n+/**< Number of Virtual Functions FGPA 4G FEC supports */\n+#define FPGA_5GNR_FEC_NUM_VFS 8\n+\n+/**\n+ * Structure to pass FPGA 4G FEC configuration.\n+ */\n+struct fpga_5gnr_fec_conf {\n+\t/**< 1 if PF is used for dataplane, 0 for VFs */\n+\tbool pf_mode_en;\n+\t/**< Number of UL queues per VF */\n+\tuint8_t vf_ul_queues_number[FPGA_5GNR_FEC_NUM_VFS];\n+\t/**< Number of DL queues per VF */\n+\tuint8_t vf_dl_queues_number[FPGA_5GNR_FEC_NUM_VFS];\n+\t/**< UL bandwidth. Needed for schedule algorithm */\n+\tuint8_t ul_bandwidth;\n+\t/**< DL bandwidth. Needed for schedule algorithm */\n+\tuint8_t dl_bandwidth;\n+\t/**< UL Load Balance */\n+\tuint8_t ul_load_balance;\n+\t/**< DL Load Balance */\n+\tuint8_t dl_load_balance;\n+\t/**< FLR timeout value */\n+\tuint16_t flr_time_out;\n+};\n+\n+/**\n+ * Configure Intel(R) FPGA 5GNR FEC device\n+ *\n+ * @param dev_name\n+ *   The name of the device. This is the short form of PCI BDF, e.g. 00:01.0.\n+ *   It can also be retrieved for a bbdev device from the dev_name field in the\n+ *   rte_bbdev_info structure returned by rte_bbdev_info_get().\n+ * @param conf\n+ *   Configuration to apply to FPGA 4G FEC.\n+ *\n+ * @return\n+ *   Zero on success, negative value on failure.\n+ */\n+__rte_experimental\n+int\n+fpga_5gnr_fec_configure(const char *dev_name,\n+\t\tconst struct fpga_5gnr_fec_conf *conf);\n+\n+#ifdef __cplusplus\n+}\n+#endif\n+\n+#endif /* _FPGA_5GNR_FEC_H_ */\ndiff --git a/drivers/baseband/fpga_5gnr_fec/meson.build b/drivers/baseband/fpga_5gnr_fec/meson.build\nnew file mode 100644\nindex 0000000..86121d8\n--- /dev/null\n+++ b/drivers/baseband/fpga_5gnr_fec/meson.build\n@@ -0,0 +1,6 @@\n+# SPDX-License-Identifier: BSD-3-Clause\n+# Copyright(c) 2020 Intel Corporation\n+\n+deps += ['bbdev', 'bus_vdev', 'ring', 'pci', 'bus_pci']\n+allow_experimental_apis = true\n+sources = files('fpga_5gnr_fec.c')\ndiff --git a/drivers/baseband/fpga_5gnr_fec/rte_pmd_bbdev_fpga_5gnr_fec_version.map b/drivers/baseband/fpga_5gnr_fec/rte_pmd_bbdev_fpga_5gnr_fec_version.map\nnew file mode 100644\nindex 0000000..b0fb971\n--- /dev/null\n+++ b/drivers/baseband/fpga_5gnr_fec/rte_pmd_bbdev_fpga_5gnr_fec_version.map\n@@ -0,0 +1,10 @@\n+DPDK_20.0 {\n+\tlocal: *;\n+};\n+\n+EXPERIMENTAL {\n+\tglobal:\n+\n+\tfpga_5gnr_fec_configure;\n+\n+};\ndiff --git a/drivers/baseband/meson.build b/drivers/baseband/meson.build\nindex be7677f..4d909f9 100644\n--- a/drivers/baseband/meson.build\n+++ b/drivers/baseband/meson.build\n@@ -1,7 +1,7 @@\n # SPDX-License-Identifier: BSD-3-Clause\n # Copyright(c) 2018 Luca Boccassi <bluca@debian.org>\n \n-drivers = ['null', 'turbo_sw', 'fpga_lte_fec']\n+drivers = ['null', 'turbo_sw', 'fpga_lte_fec', 'fpga_5gnr_fec']\n \n config_flag_fmt = 'RTE_LIBRTE_PMD_BBDEV_@0@'\n driver_name_fmt = 'rte_pmd_bbdev_@0@'\ndiff --git a/mk/rte.app.mk b/mk/rte.app.mk\nindex d295ca0..da12b9e 100644\n--- a/mk/rte.app.mk\n+++ b/mk/rte.app.mk\n@@ -246,6 +246,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD)     += -lrte_pmd_netvsc\n ifeq ($(CONFIG_RTE_LIBRTE_BBDEV),y)\n _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_NULL)     += -lrte_pmd_bbdev_null\n _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_FPGA_LTE_FEC) += -lrte_pmd_bbdev_fpga_lte_fec\n+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_FPGA_5GNR_FEC) += -lrte_pmd_bbdev_fpga_5gnr_fec\n \n # TURBO SOFTWARE PMD is dependent on the FLEXRAN library\n _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW) += -lrte_pmd_bbdev_turbo_sw\n",
    "prefixes": [
        "v2",
        "12/15"
    ]
}