get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/126626/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 126626,
    "url": "https://patches.dpdk.org/api/patches/126626/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20230428103127.1059989-1-jerinj@marvell.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230428103127.1059989-1-jerinj@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230428103127.1059989-1-jerinj@marvell.com",
    "date": "2023-04-28T10:31:26",
    "name": "net/liquidio: removed LiquidIO ethdev driver",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "45e423c5c71210a4ee9423a2c682a09ec4808b64",
    "submitter": {
        "id": 1188,
        "url": "https://patches.dpdk.org/api/people/1188/?format=api",
        "name": "Jerin Jacob Kollanukkaran",
        "email": "jerinj@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "https://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20230428103127.1059989-1-jerinj@marvell.com/mbox/",
    "series": [
        {
            "id": 27900,
            "url": "https://patches.dpdk.org/api/series/27900/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=27900",
            "date": "2023-04-28T10:31:26",
            "name": "net/liquidio: removed LiquidIO ethdev driver",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/27900/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/126626/comments/",
    "check": "fail",
    "checks": "https://patches.dpdk.org/api/patches/126626/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 6935942A13;\n\tFri, 28 Apr 2023 12:31:44 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 3C94E4113C;\n\tFri, 28 Apr 2023 12:31:44 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com\n [67.231.148.174])\n by mails.dpdk.org (Postfix) with ESMTP id ECBF04021D\n for <dev@dpdk.org>; Fri, 28 Apr 2023 12:31:41 +0200 (CEST)",
            "from pps.filterd (m0045849.ppops.net [127.0.0.1])\n by mx0a-0016f401.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id\n 33SA5AQM003425; Fri, 28 Apr 2023 03:31:38 -0700",
            "from dc5-exch01.marvell.com ([199.233.59.181])\n by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3q85x61fn6-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT);\n Fri, 28 Apr 2023 03:31:36 -0700",
            "from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.48;\n Fri, 28 Apr 2023 03:31:33 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com\n (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.48 via Frontend\n Transport; Fri, 28 Apr 2023 03:31:33 -0700",
            "from jerin-lab.marvell.com (jerin-lab.marvell.com [10.28.34.14])\n by maili.marvell.com (Postfix) with ESMTP id 6E4355B692C;\n Fri, 28 Apr 2023 03:31:30 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : mime-version : content-type :\n content-transfer-encoding; s=pfpt0220;\n bh=f6b5WJVi3kkFSW9Od19V/9YFjXpDQNgoN4KOOhzfyO8=;\n b=SXUsIW0cbMtpxUDqJz8QyC23mAhRAUsnSngtYrJ8WWk/ihayjGTZHxQUeN7wb1G3WWVv\n DR6duJwu2XxBp14L2JleGlDzkgma6xk5kdRL7JE+7bStIRw6K0p0w9siHSmYu1C4pzOT\n qBdWQETFE7WNUoIfEN2R6pLb/C5vFqPSu0o/KN/ciSdL58fjGpFQqHgNR6t4DaqoU3s7\n Tjy10Km2tAWgWkG29DnVoBQUmhkkH+s2/UNFacvvdmQhh5lVv/PWNaG/bEeCRcVQxMBZ\n Sej9Sxd2kCY9OccpGrpJnpTk3KjcQDVcXnERzGYRW+QANbLotV7C2qYerxZ1LqGRzHpz XQ==",
        "From": "<jerinj@marvell.com>",
        "To": "<dev@dpdk.org>, Thomas Monjalon <thomas@monjalon.net>, Shijith Thotton\n <sthotton@marvell.com>, Srisivasubramanian Srinivasan\n <srinivasan@marvell.com>, Anatoly Burakov <anatoly.burakov@intel.com>",
        "CC": "<ferruh.yigit@amd.com>, Jerin Jacob <jerinj@marvell.com>",
        "Subject": "[dpdk-dev] [PATCH] net/liquidio: removed LiquidIO ethdev driver",
        "Date": "Fri, 28 Apr 2023 16:01:26 +0530",
        "Message-ID": "<20230428103127.1059989-1-jerinj@marvell.com>",
        "X-Mailer": "git-send-email 2.40.1",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=\"UTF-8\"",
        "Content-Transfer-Encoding": "8bit",
        "X-Proofpoint-GUID": "4l9yf7Xv3Q8WjgCYvG9md3nLwHDTnije",
        "X-Proofpoint-ORIG-GUID": "4l9yf7Xv3Q8WjgCYvG9md3nLwHDTnije",
        "X-Proofpoint-Virus-Version": "vendor=baseguard\n engine=ICAP:2.0.254,Aquarius:18.0.942,Hydra:6.0.573,FMLib:17.11.170.22\n definitions=2023-04-28_04,2023-04-27_01,2023-02-09_01",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Jerin Jacob <jerinj@marvell.com>\n\nThe LiquidIO product line has been substituted with CN9K/CN10K\nOCTEON product line smart NICs located at drivers/net/octeon_ep/.\n\nDPDK 20.08 has categorized the LiquidIO driver as UNMAINTAINED\nbecause of the absence of updates in the driver.\n\nDue to the above reasons, the driver removed from DPDK 23.07.\n\nAlso removed deprecation notice entry for the removal in\ndoc/guides/rel_notes/deprecation.rst.\n\nSigned-off-by: Jerin Jacob <jerinj@marvell.com>\n---\n MAINTAINERS                              |    8 -\n doc/guides/nics/features/liquidio.ini    |   29 -\n doc/guides/nics/index.rst                |    1 -\n doc/guides/nics/liquidio.rst             |  169 --\n doc/guides/rel_notes/deprecation.rst     |    7 -\n doc/guides/rel_notes/release_23_07.rst   |    9 +-\n drivers/net/liquidio/base/lio_23xx_reg.h |  165 --\n drivers/net/liquidio/base/lio_23xx_vf.c  |  513 ------\n drivers/net/liquidio/base/lio_23xx_vf.h  |   63 -\n drivers/net/liquidio/base/lio_hw_defs.h  |  239 ---\n drivers/net/liquidio/base/lio_mbox.c     |  246 ---\n drivers/net/liquidio/base/lio_mbox.h     |  102 -\n drivers/net/liquidio/lio_ethdev.c        | 2147 ----------------------\n drivers/net/liquidio/lio_ethdev.h        |  179 --\n drivers/net/liquidio/lio_logs.h          |   58 -\n drivers/net/liquidio/lio_rxtx.c          | 1804 ------------------\n drivers/net/liquidio/lio_rxtx.h          |  740 --------\n drivers/net/liquidio/lio_struct.h        |  661 -------\n drivers/net/liquidio/meson.build         |   16 -\n drivers/net/meson.build                  |    1 -\n 20 files changed, 1 insertion(+), 7156 deletions(-)\n delete mode 100644 doc/guides/nics/features/liquidio.ini\n delete mode 100644 doc/guides/nics/liquidio.rst\n delete mode 100644 drivers/net/liquidio/base/lio_23xx_reg.h\n delete mode 100644 drivers/net/liquidio/base/lio_23xx_vf.c\n delete mode 100644 drivers/net/liquidio/base/lio_23xx_vf.h\n delete mode 100644 drivers/net/liquidio/base/lio_hw_defs.h\n delete mode 100644 drivers/net/liquidio/base/lio_mbox.c\n delete mode 100644 drivers/net/liquidio/base/lio_mbox.h\n delete mode 100644 drivers/net/liquidio/lio_ethdev.c\n delete mode 100644 drivers/net/liquidio/lio_ethdev.h\n delete mode 100644 drivers/net/liquidio/lio_logs.h\n delete mode 100644 drivers/net/liquidio/lio_rxtx.c\n delete mode 100644 drivers/net/liquidio/lio_rxtx.h\n delete mode 100644 drivers/net/liquidio/lio_struct.h\n delete mode 100644 drivers/net/liquidio/meson.build",
    "diff": "diff --git a/MAINTAINERS b/MAINTAINERS\nindex 8df23e5099..0157c26dd2 100644\n--- a/MAINTAINERS\n+++ b/MAINTAINERS\n@@ -681,14 +681,6 @@ F: drivers/net/thunderx/\n F: doc/guides/nics/thunderx.rst\n F: doc/guides/nics/features/thunderx.ini\n \n-Cavium LiquidIO - UNMAINTAINED\n-M: Shijith Thotton <sthotton@marvell.com>\n-M: Srisivasubramanian Srinivasan <srinivasan@marvell.com>\n-T: git://dpdk.org/next/dpdk-next-net-mrvl\n-F: drivers/net/liquidio/\n-F: doc/guides/nics/liquidio.rst\n-F: doc/guides/nics/features/liquidio.ini\n-\n Cavium OCTEON TX\n M: Harman Kalra <hkalra@marvell.com>\n T: git://dpdk.org/next/dpdk-next-net-mrvl\ndiff --git a/doc/guides/nics/features/liquidio.ini b/doc/guides/nics/features/liquidio.ini\ndeleted file mode 100644\nindex a8bde282e0..0000000000\n--- a/doc/guides/nics/features/liquidio.ini\n+++ /dev/null\n@@ -1,29 +0,0 @@\n-;\n-; Supported features of the 'LiquidIO' network poll mode driver.\n-;\n-; Refer to default.ini for the full list of available PMD features.\n-;\n-[Features]\n-Speed capabilities   = Y\n-Link status          = Y\n-Link status event    = Y\n-MTU update           = Y\n-Scattered Rx         = Y\n-Promiscuous mode     = Y\n-Allmulticast mode    = Y\n-RSS hash             = Y\n-RSS key update       = Y\n-RSS reta update      = Y\n-VLAN filter          = Y\n-CRC offload          = Y\n-VLAN offload         = P\n-L3 checksum offload  = Y\n-L4 checksum offload  = Y\n-Inner L3 checksum    = Y\n-Inner L4 checksum    = Y\n-Basic stats          = Y\n-Extended stats       = Y\n-Multiprocess aware   = Y\n-Linux                = Y\n-x86-64               = Y\n-Usage doc            = Y\ndiff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst\nindex 5c9d1edf5e..31296822e5 100644\n--- a/doc/guides/nics/index.rst\n+++ b/doc/guides/nics/index.rst\n@@ -44,7 +44,6 @@ Network Interface Controller Drivers\n     ipn3ke\n     ixgbe\n     kni\n-    liquidio\n     mana\n     memif\n     mlx4\ndiff --git a/doc/guides/nics/liquidio.rst b/doc/guides/nics/liquidio.rst\ndeleted file mode 100644\nindex f893b3b539..0000000000\n--- a/doc/guides/nics/liquidio.rst\n+++ /dev/null\n@@ -1,169 +0,0 @@\n-..  SPDX-License-Identifier: BSD-3-Clause\n-    Copyright(c) 2017 Cavium, Inc\n-\n-LiquidIO VF Poll Mode Driver\n-============================\n-\n-The LiquidIO VF PMD library (**librte_net_liquidio**) provides poll mode driver support for\n-Cavium LiquidIO® II server adapter VFs. PF management and VF creation can be\n-done using kernel driver.\n-\n-More information can be found at `Cavium Official Website\n-<http://cavium.com/LiquidIO_Adapters.html>`_.\n-\n-Supported LiquidIO Adapters\n------------------------------\n-\n-- LiquidIO II CN2350 210SV/225SV\n-- LiquidIO II CN2350 210SVPT\n-- LiquidIO II CN2360 210SV/225SV\n-- LiquidIO II CN2360 210SVPT\n-\n-\n-SR-IOV: Prerequisites and Sample Application Notes\n---------------------------------------------------\n-\n-This section provides instructions to configure SR-IOV with Linux OS.\n-\n-#. Verify SR-IOV and ARI capabilities are enabled on the adapter using ``lspci``:\n-\n-   .. code-block:: console\n-\n-      lspci -s <slot> -vvv\n-\n-   Example output:\n-\n-   .. code-block:: console\n-\n-      [...]\n-      Capabilities: [148 v1] Alternative Routing-ID Interpretation (ARI)\n-      [...]\n-      Capabilities: [178 v1] Single Root I/O Virtualization (SR-IOV)\n-      [...]\n-      Kernel driver in use: LiquidIO\n-\n-#. Load the kernel module:\n-\n-   .. code-block:: console\n-\n-      modprobe liquidio\n-\n-#. Bring up the PF ports:\n-\n-   .. code-block:: console\n-\n-      ifconfig p4p1 up\n-      ifconfig p4p2 up\n-\n-#. Change PF MTU if required:\n-\n-   .. code-block:: console\n-\n-      ifconfig p4p1 mtu 9000\n-      ifconfig p4p2 mtu 9000\n-\n-#. Create VF device(s):\n-\n-   Echo number of VFs to be created into ``\"sriov_numvfs\"`` sysfs entry\n-   of the parent PF.\n-\n-   .. code-block:: console\n-\n-      echo 1 > /sys/bus/pci/devices/0000:03:00.0/sriov_numvfs\n-      echo 1 > /sys/bus/pci/devices/0000:03:00.1/sriov_numvfs\n-\n-#. Assign VF MAC address:\n-\n-   Assign MAC address to the VF using iproute2 utility. The syntax is::\n-\n-      ip link set <PF iface> vf <VF id> mac <macaddr>\n-\n-   Example output:\n-\n-   .. code-block:: console\n-\n-      ip link set p4p1 vf 0 mac F2:A8:1B:5E:B4:66\n-\n-#. Assign VF(s) to VM.\n-\n-   The VF devices may be passed through to the guest VM using qemu or\n-   virt-manager or virsh etc.\n-\n-   Example qemu guest launch command:\n-\n-   .. code-block:: console\n-\n-      ./qemu-system-x86_64 -name lio-vm -machine accel=kvm \\\n-      -cpu host -m 4096 -smp 4 \\\n-      -drive file=<disk_file>,if=none,id=disk1,format=<type> \\\n-      -device virtio-blk-pci,scsi=off,drive=disk1,id=virtio-disk1,bootindex=1 \\\n-      -device vfio-pci,host=03:00.3 -device vfio-pci,host=03:08.3\n-\n-#. Running testpmd\n-\n-   Refer to the document\n-   :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>` to run\n-   ``testpmd`` application.\n-\n-   .. note::\n-\n-      Use ``igb_uio`` instead of ``vfio-pci`` in VM.\n-\n-   Example output:\n-\n-   .. code-block:: console\n-\n-      [...]\n-      EAL: PCI device 0000:03:00.3 on NUMA socket 0\n-      EAL:   probe driver: 177d:9712 net_liovf\n-      EAL:   using IOMMU type 1 (Type 1)\n-      PMD: net_liovf[03:00.3]INFO: DEVICE : CN23XX VF\n-      EAL: PCI device 0000:03:08.3 on NUMA socket 0\n-      EAL:   probe driver: 177d:9712 net_liovf\n-      PMD: net_liovf[03:08.3]INFO: DEVICE : CN23XX VF\n-      Interactive-mode selected\n-      USER1: create a new mbuf pool <mbuf_pool_socket_0>: n=171456, size=2176, socket=0\n-      Configuring Port 0 (socket 0)\n-      PMD: net_liovf[03:00.3]INFO: Starting port 0\n-      Port 0: F2:A8:1B:5E:B4:66\n-      Configuring Port 1 (socket 0)\n-      PMD: net_liovf[03:08.3]INFO: Starting port 1\n-      Port 1: 32:76:CC:EE:56:D7\n-      Checking link statuses...\n-      Port 0 Link Up - speed 10000 Mbps - full-duplex\n-      Port 1 Link Up - speed 10000 Mbps - full-duplex\n-      Done\n-      testpmd>\n-\n-#. Enabling VF promiscuous mode\n-\n-   One VF per PF can be marked as trusted for promiscuous mode.\n-\n-   .. code-block:: console\n-\n-      ip link set dev <PF iface> vf <VF id> trust on\n-\n-\n-Limitations\n------------\n-\n-VF MTU\n-~~~~~~\n-\n-VF MTU is limited by PF MTU. Raise PF value before configuring VF for larger packet size.\n-\n-VLAN offload\n-~~~~~~~~~~~~\n-\n-Tx VLAN insertion is not supported and consequently VLAN offload feature is\n-marked partial.\n-\n-Ring size\n-~~~~~~~~~\n-\n-Number of descriptors for Rx/Tx ring should be in the range 128 to 512.\n-\n-CRC stripping\n-~~~~~~~~~~~~~\n-\n-LiquidIO adapters strip ethernet FCS of every packet coming to the host interface.\ndiff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst\nindex dcc1ca1696..8e1cdd677a 100644\n--- a/doc/guides/rel_notes/deprecation.rst\n+++ b/doc/guides/rel_notes/deprecation.rst\n@@ -121,13 +121,6 @@ Deprecation Notices\n * net/bnx2x: Starting from DPDK 23.07, the Marvell QLogic bnx2x driver will be removed.\n   This decision has been made to alleviate the burden of maintaining a discontinued product.\n \n-* net/liquidio: Remove LiquidIO ethdev driver.\n-  The LiquidIO product line has been substituted\n-  with CN9K/CN10K OCTEON product line smart NICs located in ``drivers/net/octeon_ep/``.\n-  DPDK 20.08 has categorized the LiquidIO driver as UNMAINTAINED\n-  because of the absence of updates in the driver.\n-  Due to the above reasons, the driver will be unavailable from DPDK 23.07.\n-\n * cryptodev: The function ``rte_cryptodev_cb_fn`` will be updated\n   to have another parameter ``qp_id`` to return the queue pair ID\n   which got error interrupt to the application,\ndiff --git a/doc/guides/rel_notes/release_23_07.rst b/doc/guides/rel_notes/release_23_07.rst\nindex a9b1293689..4d505b607a 100644\n--- a/doc/guides/rel_notes/release_23_07.rst\n+++ b/doc/guides/rel_notes/release_23_07.rst\n@@ -59,14 +59,7 @@ New Features\n Removed Items\n -------------\n \n-.. This section should contain removed items in this release. Sample format:\n-\n-   * Add a short 1-2 sentence description of the removed item\n-     in the past tense.\n-\n-   This section is a comment. Do not overwrite or remove it.\n-   Also, make sure to start the actual text at the margin.\n-   =======================================================\n+* Removed LiquidIO ethdev driver located at ``drivers/net/liquidio/``\n \n \n API Changes\ndiff --git a/drivers/net/liquidio/base/lio_23xx_reg.h b/drivers/net/liquidio/base/lio_23xx_reg.h\ndeleted file mode 100644\nindex 9f28504b53..0000000000\n--- a/drivers/net/liquidio/base/lio_23xx_reg.h\n+++ /dev/null\n@@ -1,165 +0,0 @@\n-/* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright(c) 2017 Cavium, Inc\n- */\n-\n-#ifndef _LIO_23XX_REG_H_\n-#define _LIO_23XX_REG_H_\n-\n-/* ###################### REQUEST QUEUE ######################### */\n-\n-/* 64 registers for Input Queues Start Addr - SLI_PKT(0..63)_INSTR_BADDR */\n-#define CN23XX_SLI_PKT_INSTR_BADDR_START64\t0x10010\n-\n-/* 64 registers for Input Doorbell - SLI_PKT(0..63)_INSTR_BAOFF_DBELL */\n-#define CN23XX_SLI_PKT_INSTR_BADDR_DBELL_START\t0x10020\n-\n-/* 64 registers for Input Queue size - SLI_PKT(0..63)_INSTR_FIFO_RSIZE */\n-#define CN23XX_SLI_PKT_INSTR_FIFO_RSIZE_START\t0x10030\n-\n-/* 64 registers for Input Queue Instr Count - SLI_PKT_IN_DONE(0..63)_CNTS */\n-#define CN23XX_SLI_PKT_IN_DONE_CNTS_START64\t0x10040\n-\n-/* 64 registers (64-bit) - ES, RO, NS, Arbitration for Input Queue Data &\n- * gather list fetches. SLI_PKT(0..63)_INPUT_CONTROL.\n- */\n-#define CN23XX_SLI_PKT_INPUT_CONTROL_START64\t0x10000\n-\n-/* ------- Request Queue Macros --------- */\n-\n-/* Each Input Queue register is at a 16-byte Offset in BAR0 */\n-#define CN23XX_IQ_OFFSET\t\t\t0x20000\n-\n-#define CN23XX_SLI_IQ_PKT_CONTROL64(iq)\t\t\t\t\t\\\n-\t(CN23XX_SLI_PKT_INPUT_CONTROL_START64 + ((iq) * CN23XX_IQ_OFFSET))\n-\n-#define CN23XX_SLI_IQ_BASE_ADDR64(iq)\t\t\t\t\t\\\n-\t(CN23XX_SLI_PKT_INSTR_BADDR_START64 + ((iq) * CN23XX_IQ_OFFSET))\n-\n-#define CN23XX_SLI_IQ_SIZE(iq)\t\t\t\t\t\t\\\n-\t(CN23XX_SLI_PKT_INSTR_FIFO_RSIZE_START + ((iq) * CN23XX_IQ_OFFSET))\n-\n-#define CN23XX_SLI_IQ_DOORBELL(iq)\t\t\t\t\t\\\n-\t(CN23XX_SLI_PKT_INSTR_BADDR_DBELL_START + ((iq) * CN23XX_IQ_OFFSET))\n-\n-#define CN23XX_SLI_IQ_INSTR_COUNT64(iq)\t\t\t\t\t\\\n-\t(CN23XX_SLI_PKT_IN_DONE_CNTS_START64 + ((iq) * CN23XX_IQ_OFFSET))\n-\n-/* Number of instructions to be read in one MAC read request.\n- * setting to Max value(4)\n- */\n-#define CN23XX_PKT_INPUT_CTL_RDSIZE\t\t\t(3 << 25)\n-#define CN23XX_PKT_INPUT_CTL_IS_64B\t\t\t(1 << 24)\n-#define CN23XX_PKT_INPUT_CTL_RST\t\t\t(1 << 23)\n-#define CN23XX_PKT_INPUT_CTL_QUIET\t\t\t(1 << 28)\n-#define CN23XX_PKT_INPUT_CTL_RING_ENB\t\t\t(1 << 22)\n-#define CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP\t\t(1 << 6)\n-#define CN23XX_PKT_INPUT_CTL_USE_CSR\t\t\t(1 << 4)\n-#define CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP\t\t(2)\n-\n-/* These bits[47:44] select the Physical function number within the MAC */\n-#define CN23XX_PKT_INPUT_CTL_PF_NUM_POS\t\t45\n-/* These bits[43:32] select the function number within the PF */\n-#define CN23XX_PKT_INPUT_CTL_VF_NUM_POS\t\t32\n-\n-#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN\n-#define CN23XX_PKT_INPUT_CTL_MASK\t\t\t\\\n-\t(CN23XX_PKT_INPUT_CTL_RDSIZE |\t\t\t\\\n-\t CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP |\t\\\n-\t CN23XX_PKT_INPUT_CTL_USE_CSR)\n-#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN\n-#define CN23XX_PKT_INPUT_CTL_MASK\t\t\t\\\n-\t(CN23XX_PKT_INPUT_CTL_RDSIZE |\t\t\t\\\n-\t CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP |\t\\\n-\t CN23XX_PKT_INPUT_CTL_USE_CSR |\t\t\t\\\n-\t CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP)\n-#endif\n-\n-/* ############################ OUTPUT QUEUE ######################### */\n-\n-/* 64 registers for Output queue control - SLI_PKT(0..63)_OUTPUT_CONTROL */\n-#define CN23XX_SLI_PKT_OUTPUT_CONTROL_START\t0x10050\n-\n-/* 64 registers for Output queue buffer and info size\n- * SLI_PKT(0..63)_OUT_SIZE\n- */\n-#define CN23XX_SLI_PKT_OUT_SIZE\t\t\t0x10060\n-\n-/* 64 registers for Output Queue Start Addr - SLI_PKT(0..63)_SLIST_BADDR */\n-#define CN23XX_SLI_SLIST_BADDR_START64\t\t0x10070\n-\n-/* 64 registers for Output Queue Packet Credits\n- * SLI_PKT(0..63)_SLIST_BAOFF_DBELL\n- */\n-#define CN23XX_SLI_PKT_SLIST_BAOFF_DBELL_START\t0x10080\n-\n-/* 64 registers for Output Queue size - SLI_PKT(0..63)_SLIST_FIFO_RSIZE */\n-#define CN23XX_SLI_PKT_SLIST_FIFO_RSIZE_START\t0x10090\n-\n-/* 64 registers for Output Queue Packet Count - SLI_PKT(0..63)_CNTS */\n-#define CN23XX_SLI_PKT_CNTS_START\t\t0x100B0\n-\n-/* Each Output Queue register is at a 16-byte Offset in BAR0 */\n-#define CN23XX_OQ_OFFSET\t\t\t0x20000\n-\n-/* ------- Output Queue Macros --------- */\n-\n-#define CN23XX_SLI_OQ_PKT_CONTROL(oq)\t\t\t\t\t\\\n-\t(CN23XX_SLI_PKT_OUTPUT_CONTROL_START + ((oq) * CN23XX_OQ_OFFSET))\n-\n-#define CN23XX_SLI_OQ_BASE_ADDR64(oq)\t\t\t\t\t\\\n-\t(CN23XX_SLI_SLIST_BADDR_START64 + ((oq) * CN23XX_OQ_OFFSET))\n-\n-#define CN23XX_SLI_OQ_SIZE(oq)\t\t\t\t\t\t\\\n-\t(CN23XX_SLI_PKT_SLIST_FIFO_RSIZE_START + ((oq) * CN23XX_OQ_OFFSET))\n-\n-#define CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq)\t\t\t\t\\\n-\t(CN23XX_SLI_PKT_OUT_SIZE + ((oq) * CN23XX_OQ_OFFSET))\n-\n-#define CN23XX_SLI_OQ_PKTS_SENT(oq)\t\t\t\t\t\\\n-\t(CN23XX_SLI_PKT_CNTS_START + ((oq) * CN23XX_OQ_OFFSET))\n-\n-#define CN23XX_SLI_OQ_PKTS_CREDIT(oq)\t\t\t\t\t\\\n-\t(CN23XX_SLI_PKT_SLIST_BAOFF_DBELL_START + ((oq) * CN23XX_OQ_OFFSET))\n-\n-/* ------------------ Masks ---------------- */\n-#define CN23XX_PKT_OUTPUT_CTL_IPTR\t\t(1 << 11)\n-#define CN23XX_PKT_OUTPUT_CTL_ES\t\t(1 << 9)\n-#define CN23XX_PKT_OUTPUT_CTL_NSR\t\t(1 << 8)\n-#define CN23XX_PKT_OUTPUT_CTL_ROR\t\t(1 << 7)\n-#define CN23XX_PKT_OUTPUT_CTL_DPTR\t\t(1 << 6)\n-#define CN23XX_PKT_OUTPUT_CTL_BMODE\t\t(1 << 5)\n-#define CN23XX_PKT_OUTPUT_CTL_ES_P\t\t(1 << 3)\n-#define CN23XX_PKT_OUTPUT_CTL_NSR_P\t\t(1 << 2)\n-#define CN23XX_PKT_OUTPUT_CTL_ROR_P\t\t(1 << 1)\n-#define CN23XX_PKT_OUTPUT_CTL_RING_ENB\t\t(1 << 0)\n-\n-/* Rings per Virtual Function [RO] */\n-#define CN23XX_PKT_INPUT_CTL_RPVF_MASK\t\t0x3F\n-#define CN23XX_PKT_INPUT_CTL_RPVF_POS\t\t48\n-\n-/* These bits[47:44][RO] give the Physical function\n- * number info within the MAC\n- */\n-#define CN23XX_PKT_INPUT_CTL_PF_NUM_MASK\t0x7\n-\n-/* These bits[43:32][RO] give the virtual function\n- * number info within the PF\n- */\n-#define CN23XX_PKT_INPUT_CTL_VF_NUM_MASK\t0x1FFF\n-\n-/* ######################### Mailbox Reg Macros ######################## */\n-#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START\t0x10200\n-#define CN23XX_VF_SLI_PKT_MBOX_INT_START\t0x10210\n-\n-#define CN23XX_SLI_MBOX_OFFSET\t\t\t0x20000\n-#define CN23XX_SLI_MBOX_SIG_IDX_OFFSET\t\t0x8\n-\n-#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q, idx)\t\t\t\t\\\n-\t(CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START +\t\t\t\t\\\n-\t ((q) * CN23XX_SLI_MBOX_OFFSET +\t\t\t\t\\\n-\t  (idx) * CN23XX_SLI_MBOX_SIG_IDX_OFFSET))\n-\n-#define CN23XX_VF_SLI_PKT_MBOX_INT(q)\t\t\t\t\t\\\n-\t(CN23XX_VF_SLI_PKT_MBOX_INT_START + ((q) * CN23XX_SLI_MBOX_OFFSET))\n-\n-#endif /* _LIO_23XX_REG_H_ */\ndiff --git a/drivers/net/liquidio/base/lio_23xx_vf.c b/drivers/net/liquidio/base/lio_23xx_vf.c\ndeleted file mode 100644\nindex c6b8310b71..0000000000\n--- a/drivers/net/liquidio/base/lio_23xx_vf.c\n+++ /dev/null\n@@ -1,513 +0,0 @@\n-/* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright(c) 2017 Cavium, Inc\n- */\n-\n-#include <string.h>\n-\n-#include <ethdev_driver.h>\n-#include <rte_cycles.h>\n-#include <rte_malloc.h>\n-\n-#include \"lio_logs.h\"\n-#include \"lio_23xx_vf.h\"\n-#include \"lio_23xx_reg.h\"\n-#include \"lio_mbox.h\"\n-\n-static int\n-cn23xx_vf_reset_io_queues(struct lio_device *lio_dev, uint32_t num_queues)\n-{\n-\tuint32_t loop = CN23XX_VF_BUSY_READING_REG_LOOP_COUNT;\n-\tuint64_t d64, q_no;\n-\tint ret_val = 0;\n-\n-\tPMD_INIT_FUNC_TRACE();\n-\n-\tfor (q_no = 0; q_no < num_queues; q_no++) {\n-\t\t/* set RST bit to 1. This bit applies to both IQ and OQ */\n-\t\td64 = lio_read_csr64(lio_dev,\n-\t\t\t\t     CN23XX_SLI_IQ_PKT_CONTROL64(q_no));\n-\t\td64 = d64 | CN23XX_PKT_INPUT_CTL_RST;\n-\t\tlio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),\n-\t\t\t\td64);\n-\t}\n-\n-\t/* wait until the RST bit is clear or the RST and QUIET bits are set */\n-\tfor (q_no = 0; q_no < num_queues; q_no++) {\n-\t\tvolatile uint64_t reg_val;\n-\n-\t\treg_val\t= lio_read_csr64(lio_dev,\n-\t\t\t\t\t CN23XX_SLI_IQ_PKT_CONTROL64(q_no));\n-\t\twhile ((reg_val & CN23XX_PKT_INPUT_CTL_RST) &&\n-\t\t\t\t!(reg_val & CN23XX_PKT_INPUT_CTL_QUIET) &&\n-\t\t\t\tloop) {\n-\t\t\treg_val = lio_read_csr64(\n-\t\t\t\t\tlio_dev,\n-\t\t\t\t\tCN23XX_SLI_IQ_PKT_CONTROL64(q_no));\n-\t\t\tloop = loop - 1;\n-\t\t}\n-\n-\t\tif (loop == 0) {\n-\t\t\tlio_dev_err(lio_dev,\n-\t\t\t\t    \"clearing the reset reg failed or setting the quiet reg failed for qno: %lu\\n\",\n-\t\t\t\t    (unsigned long)q_no);\n-\t\t\treturn -1;\n-\t\t}\n-\n-\t\treg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;\n-\t\tlio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),\n-\t\t\t\treg_val);\n-\n-\t\treg_val = lio_read_csr64(\n-\t\t    lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));\n-\t\tif (reg_val & CN23XX_PKT_INPUT_CTL_RST) {\n-\t\t\tlio_dev_err(lio_dev,\n-\t\t\t\t    \"clearing the reset failed for qno: %lu\\n\",\n-\t\t\t\t    (unsigned long)q_no);\n-\t\t\tret_val = -1;\n-\t\t}\n-\t}\n-\n-\treturn ret_val;\n-}\n-\n-static int\n-cn23xx_vf_setup_global_input_regs(struct lio_device *lio_dev)\n-{\n-\tuint64_t q_no;\n-\tuint64_t d64;\n-\n-\tPMD_INIT_FUNC_TRACE();\n-\n-\tif (cn23xx_vf_reset_io_queues(lio_dev,\n-\t\t\t\t      lio_dev->sriov_info.rings_per_vf))\n-\t\treturn -1;\n-\n-\tfor (q_no = 0; q_no < (lio_dev->sriov_info.rings_per_vf); q_no++) {\n-\t\tlio_write_csr64(lio_dev, CN23XX_SLI_IQ_DOORBELL(q_no),\n-\t\t\t\t0xFFFFFFFF);\n-\n-\t\td64 = lio_read_csr64(lio_dev,\n-\t\t\t\t     CN23XX_SLI_IQ_INSTR_COUNT64(q_no));\n-\n-\t\td64 &= 0xEFFFFFFFFFFFFFFFL;\n-\n-\t\tlio_write_csr64(lio_dev, CN23XX_SLI_IQ_INSTR_COUNT64(q_no),\n-\t\t\t\td64);\n-\n-\t\t/* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for\n-\t\t * the Input Queues\n-\t\t */\n-\t\tlio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),\n-\t\t\t\tCN23XX_PKT_INPUT_CTL_MASK);\n-\t}\n-\n-\treturn 0;\n-}\n-\n-static void\n-cn23xx_vf_setup_global_output_regs(struct lio_device *lio_dev)\n-{\n-\tuint32_t reg_val;\n-\tuint32_t q_no;\n-\n-\tPMD_INIT_FUNC_TRACE();\n-\n-\tfor (q_no = 0; q_no < lio_dev->sriov_info.rings_per_vf; q_no++) {\n-\t\tlio_write_csr(lio_dev, CN23XX_SLI_OQ_PKTS_CREDIT(q_no),\n-\t\t\t      0xFFFFFFFF);\n-\n-\t\treg_val =\n-\t\t    lio_read_csr(lio_dev, CN23XX_SLI_OQ_PKTS_SENT(q_no));\n-\n-\t\treg_val &= 0xEFFFFFFFFFFFFFFFL;\n-\n-\t\tlio_write_csr(lio_dev, CN23XX_SLI_OQ_PKTS_SENT(q_no), reg_val);\n-\n-\t\treg_val =\n-\t\t    lio_read_csr(lio_dev, CN23XX_SLI_OQ_PKT_CONTROL(q_no));\n-\n-\t\t/* set IPTR & DPTR */\n-\t\treg_val |=\n-\t\t    (CN23XX_PKT_OUTPUT_CTL_IPTR | CN23XX_PKT_OUTPUT_CTL_DPTR);\n-\n-\t\t/* reset BMODE */\n-\t\treg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);\n-\n-\t\t/* No Relaxed Ordering, No Snoop, 64-bit Byte swap\n-\t\t * for Output Queue Scatter List\n-\t\t * reset ROR_P, NSR_P\n-\t\t */\n-\t\treg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P);\n-\t\treg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P);\n-\n-#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN\n-\t\treg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P);\n-#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN\n-\t\treg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P);\n-#endif\n-\t\t/* No Relaxed Ordering, No Snoop, 64-bit Byte swap\n-\t\t * for Output Queue Data\n-\t\t * reset ROR, NSR\n-\t\t */\n-\t\treg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR);\n-\t\treg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR);\n-\t\t/* set the ES bit */\n-\t\treg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);\n-\n-\t\t/* write all the selected settings */\n-\t\tlio_write_csr(lio_dev, CN23XX_SLI_OQ_PKT_CONTROL(q_no),\n-\t\t\t      reg_val);\n-\t}\n-}\n-\n-static int\n-cn23xx_vf_setup_device_regs(struct lio_device *lio_dev)\n-{\n-\tPMD_INIT_FUNC_TRACE();\n-\n-\tif (cn23xx_vf_setup_global_input_regs(lio_dev))\n-\t\treturn -1;\n-\n-\tcn23xx_vf_setup_global_output_regs(lio_dev);\n-\n-\treturn 0;\n-}\n-\n-static void\n-cn23xx_vf_setup_iq_regs(struct lio_device *lio_dev, uint32_t iq_no)\n-{\n-\tstruct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];\n-\tuint64_t pkt_in_done = 0;\n-\n-\tPMD_INIT_FUNC_TRACE();\n-\n-\t/* Write the start of the input queue's ring and its size */\n-\tlio_write_csr64(lio_dev, CN23XX_SLI_IQ_BASE_ADDR64(iq_no),\n-\t\t\tiq->base_addr_dma);\n-\tlio_write_csr(lio_dev, CN23XX_SLI_IQ_SIZE(iq_no), iq->nb_desc);\n-\n-\t/* Remember the doorbell & instruction count register addr\n-\t * for this queue\n-\t */\n-\tiq->doorbell_reg = (uint8_t *)lio_dev->hw_addr +\n-\t\t\t\tCN23XX_SLI_IQ_DOORBELL(iq_no);\n-\tiq->inst_cnt_reg = (uint8_t *)lio_dev->hw_addr +\n-\t\t\t\tCN23XX_SLI_IQ_INSTR_COUNT64(iq_no);\n-\tlio_dev_dbg(lio_dev, \"InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\\n\",\n-\t\t    iq_no, iq->doorbell_reg, iq->inst_cnt_reg);\n-\n-\t/* Store the current instruction counter (used in flush_iq\n-\t * calculation)\n-\t */\n-\tpkt_in_done = rte_read64(iq->inst_cnt_reg);\n-\n-\t/* Clear the count by writing back what we read, but don't\n-\t * enable data traffic here\n-\t */\n-\trte_write64(pkt_in_done, iq->inst_cnt_reg);\n-}\n-\n-static void\n-cn23xx_vf_setup_oq_regs(struct lio_device *lio_dev, uint32_t oq_no)\n-{\n-\tstruct lio_droq *droq = lio_dev->droq[oq_no];\n-\n-\tPMD_INIT_FUNC_TRACE();\n-\n-\tlio_write_csr64(lio_dev, CN23XX_SLI_OQ_BASE_ADDR64(oq_no),\n-\t\t\tdroq->desc_ring_dma);\n-\tlio_write_csr(lio_dev, CN23XX_SLI_OQ_SIZE(oq_no), droq->nb_desc);\n-\n-\tlio_write_csr(lio_dev, CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no),\n-\t\t      (droq->buffer_size | (OCTEON_RH_SIZE << 16)));\n-\n-\t/* Get the mapped address of the pkt_sent and pkts_credit regs */\n-\tdroq->pkts_sent_reg = (uint8_t *)lio_dev->hw_addr +\n-\t\t\t\t\tCN23XX_SLI_OQ_PKTS_SENT(oq_no);\n-\tdroq->pkts_credit_reg = (uint8_t *)lio_dev->hw_addr +\n-\t\t\t\t\tCN23XX_SLI_OQ_PKTS_CREDIT(oq_no);\n-}\n-\n-static void\n-cn23xx_vf_free_mbox(struct lio_device *lio_dev)\n-{\n-\tPMD_INIT_FUNC_TRACE();\n-\n-\trte_free(lio_dev->mbox[0]);\n-\tlio_dev->mbox[0] = NULL;\n-\n-\trte_free(lio_dev->mbox);\n-\tlio_dev->mbox = NULL;\n-}\n-\n-static int\n-cn23xx_vf_setup_mbox(struct lio_device *lio_dev)\n-{\n-\tstruct lio_mbox *mbox;\n-\n-\tPMD_INIT_FUNC_TRACE();\n-\n-\tif (lio_dev->mbox == NULL) {\n-\t\tlio_dev->mbox = rte_zmalloc(NULL, sizeof(void *), 0);\n-\t\tif (lio_dev->mbox == NULL)\n-\t\t\treturn -ENOMEM;\n-\t}\n-\n-\tmbox = rte_zmalloc(NULL, sizeof(struct lio_mbox), 0);\n-\tif (mbox == NULL) {\n-\t\trte_free(lio_dev->mbox);\n-\t\tlio_dev->mbox = NULL;\n-\t\treturn -ENOMEM;\n-\t}\n-\n-\trte_spinlock_init(&mbox->lock);\n-\n-\tmbox->lio_dev = lio_dev;\n-\n-\tmbox->q_no = 0;\n-\n-\tmbox->state = LIO_MBOX_STATE_IDLE;\n-\n-\t/* VF mbox interrupt reg */\n-\tmbox->mbox_int_reg = (uint8_t *)lio_dev->hw_addr +\n-\t\t\t\tCN23XX_VF_SLI_PKT_MBOX_INT(0);\n-\t/* VF reads from SIG0 reg */\n-\tmbox->mbox_read_reg = (uint8_t *)lio_dev->hw_addr +\n-\t\t\t\tCN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 0);\n-\t/* VF writes into SIG1 reg */\n-\tmbox->mbox_write_reg = (uint8_t *)lio_dev->hw_addr +\n-\t\t\t\tCN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 1);\n-\n-\tlio_dev->mbox[0] = mbox;\n-\n-\trte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);\n-\n-\treturn 0;\n-}\n-\n-static int\n-cn23xx_vf_enable_io_queues(struct lio_device *lio_dev)\n-{\n-\tuint32_t q_no;\n-\n-\tPMD_INIT_FUNC_TRACE();\n-\n-\tfor (q_no = 0; q_no < lio_dev->num_iqs; q_no++) {\n-\t\tuint64_t reg_val;\n-\n-\t\t/* set the corresponding IQ IS_64B bit */\n-\t\tif (lio_dev->io_qmask.iq64B & (1ULL << q_no)) {\n-\t\t\treg_val = lio_read_csr64(\n-\t\t\t\t\tlio_dev,\n-\t\t\t\t\tCN23XX_SLI_IQ_PKT_CONTROL64(q_no));\n-\t\t\treg_val = reg_val | CN23XX_PKT_INPUT_CTL_IS_64B;\n-\t\t\tlio_write_csr64(lio_dev,\n-\t\t\t\t\tCN23XX_SLI_IQ_PKT_CONTROL64(q_no),\n-\t\t\t\t\treg_val);\n-\t\t}\n-\n-\t\t/* set the corresponding IQ ENB bit */\n-\t\tif (lio_dev->io_qmask.iq & (1ULL << q_no)) {\n-\t\t\treg_val = lio_read_csr64(\n-\t\t\t\t\tlio_dev,\n-\t\t\t\t\tCN23XX_SLI_IQ_PKT_CONTROL64(q_no));\n-\t\t\treg_val = reg_val | CN23XX_PKT_INPUT_CTL_RING_ENB;\n-\t\t\tlio_write_csr64(lio_dev,\n-\t\t\t\t\tCN23XX_SLI_IQ_PKT_CONTROL64(q_no),\n-\t\t\t\t\treg_val);\n-\t\t}\n-\t}\n-\tfor (q_no = 0; q_no < lio_dev->num_oqs; q_no++) {\n-\t\tuint32_t reg_val;\n-\n-\t\t/* set the corresponding OQ ENB bit */\n-\t\tif (lio_dev->io_qmask.oq & (1ULL << q_no)) {\n-\t\t\treg_val = lio_read_csr(\n-\t\t\t\t\tlio_dev,\n-\t\t\t\t\tCN23XX_SLI_OQ_PKT_CONTROL(q_no));\n-\t\t\treg_val = reg_val | CN23XX_PKT_OUTPUT_CTL_RING_ENB;\n-\t\t\tlio_write_csr(lio_dev,\n-\t\t\t\t      CN23XX_SLI_OQ_PKT_CONTROL(q_no),\n-\t\t\t\t      reg_val);\n-\t\t}\n-\t}\n-\n-\treturn 0;\n-}\n-\n-static void\n-cn23xx_vf_disable_io_queues(struct lio_device *lio_dev)\n-{\n-\tuint32_t num_queues;\n-\n-\tPMD_INIT_FUNC_TRACE();\n-\n-\t/* per HRM, rings can only be disabled via reset operation,\n-\t * NOT via SLI_PKT()_INPUT/OUTPUT_CONTROL[ENB]\n-\t */\n-\tnum_queues = lio_dev->num_iqs;\n-\tif (num_queues < lio_dev->num_oqs)\n-\t\tnum_queues = lio_dev->num_oqs;\n-\n-\tcn23xx_vf_reset_io_queues(lio_dev, num_queues);\n-}\n-\n-void\n-cn23xx_vf_ask_pf_to_do_flr(struct lio_device *lio_dev)\n-{\n-\tstruct lio_mbox_cmd mbox_cmd;\n-\n-\tmemset(&mbox_cmd, 0, sizeof(struct lio_mbox_cmd));\n-\tmbox_cmd.msg.s.type = LIO_MBOX_REQUEST;\n-\tmbox_cmd.msg.s.resp_needed = 0;\n-\tmbox_cmd.msg.s.cmd = LIO_VF_FLR_REQUEST;\n-\tmbox_cmd.msg.s.len = 1;\n-\tmbox_cmd.q_no = 0;\n-\tmbox_cmd.recv_len = 0;\n-\tmbox_cmd.recv_status = 0;\n-\tmbox_cmd.fn = NULL;\n-\tmbox_cmd.fn_arg = 0;\n-\n-\tlio_mbox_write(lio_dev, &mbox_cmd);\n-}\n-\n-static void\n-cn23xx_pfvf_hs_callback(struct lio_device *lio_dev,\n-\t\t\tstruct lio_mbox_cmd *cmd, void *arg)\n-{\n-\tuint32_t major = 0;\n-\n-\tPMD_INIT_FUNC_TRACE();\n-\n-\trte_memcpy((uint8_t *)&lio_dev->pfvf_hsword, cmd->msg.s.params, 6);\n-\tif (cmd->recv_len > 1) {\n-\t\tstruct lio_version *lio_ver = (struct lio_version *)cmd->data;\n-\n-\t\tmajor = lio_ver->major;\n-\t\tmajor = major << 16;\n-\t}\n-\n-\trte_atomic64_set((rte_atomic64_t *)arg, major | 1);\n-}\n-\n-int\n-cn23xx_pfvf_handshake(struct lio_device *lio_dev)\n-{\n-\tstruct lio_mbox_cmd mbox_cmd;\n-\tstruct lio_version *lio_ver = (struct lio_version *)&mbox_cmd.data[0];\n-\tuint32_t q_no, count = 0;\n-\trte_atomic64_t status;\n-\tuint32_t pfmajor;\n-\tuint32_t vfmajor;\n-\tuint32_t ret;\n-\n-\tPMD_INIT_FUNC_TRACE();\n-\n-\t/* Sending VF_ACTIVE indication to the PF driver */\n-\tlio_dev_dbg(lio_dev, \"requesting info from PF\\n\");\n-\n-\tmbox_cmd.msg.mbox_msg64 = 0;\n-\tmbox_cmd.msg.s.type = LIO_MBOX_REQUEST;\n-\tmbox_cmd.msg.s.resp_needed = 1;\n-\tmbox_cmd.msg.s.cmd = LIO_VF_ACTIVE;\n-\tmbox_cmd.msg.s.len = 2;\n-\tmbox_cmd.data[0] = 0;\n-\tlio_ver->major = LIO_BASE_MAJOR_VERSION;\n-\tlio_ver->minor = LIO_BASE_MINOR_VERSION;\n-\tlio_ver->micro = LIO_BASE_MICRO_VERSION;\n-\tmbox_cmd.q_no = 0;\n-\tmbox_cmd.recv_len = 0;\n-\tmbox_cmd.recv_status = 0;\n-\tmbox_cmd.fn = (lio_mbox_callback)cn23xx_pfvf_hs_callback;\n-\tmbox_cmd.fn_arg = (void *)&status;\n-\n-\tif (lio_mbox_write(lio_dev, &mbox_cmd)) {\n-\t\tlio_dev_err(lio_dev, \"Write to mailbox failed\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\trte_atomic64_set(&status, 0);\n-\n-\tdo {\n-\t\trte_delay_ms(1);\n-\t} while ((rte_atomic64_read(&status) == 0) && (count++ < 10000));\n-\n-\tret = rte_atomic64_read(&status);\n-\tif (ret == 0) {\n-\t\tlio_dev_err(lio_dev, \"cn23xx_pfvf_handshake timeout\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\tfor (q_no = 0; q_no < lio_dev->num_iqs; q_no++)\n-\t\tlio_dev->instr_queue[q_no]->txpciq.s.pkind =\n-\t\t\t\t\t\tlio_dev->pfvf_hsword.pkind;\n-\n-\tvfmajor = LIO_BASE_MAJOR_VERSION;\n-\tpfmajor = ret >> 16;\n-\tif (pfmajor != vfmajor) {\n-\t\tlio_dev_err(lio_dev,\n-\t\t\t    \"VF LiquidIO driver (major version %d) is not compatible with LiquidIO PF driver (major version %d)\\n\",\n-\t\t\t    vfmajor, pfmajor);\n-\t\tret = -EPERM;\n-\t} else {\n-\t\tlio_dev_dbg(lio_dev,\n-\t\t\t    \"VF LiquidIO driver (major version %d), LiquidIO PF driver (major version %d)\\n\",\n-\t\t\t    vfmajor, pfmajor);\n-\t\tret = 0;\n-\t}\n-\n-\tlio_dev_dbg(lio_dev, \"got data from PF pkind is %d\\n\",\n-\t\t    lio_dev->pfvf_hsword.pkind);\n-\n-\treturn ret;\n-}\n-\n-void\n-cn23xx_vf_handle_mbox(struct lio_device *lio_dev)\n-{\n-\tuint64_t mbox_int_val;\n-\n-\t/* read and clear by writing 1 */\n-\tmbox_int_val = rte_read64(lio_dev->mbox[0]->mbox_int_reg);\n-\trte_write64(mbox_int_val, lio_dev->mbox[0]->mbox_int_reg);\n-\tif (lio_mbox_read(lio_dev->mbox[0]))\n-\t\tlio_mbox_process_message(lio_dev->mbox[0]);\n-}\n-\n-int\n-cn23xx_vf_setup_device(struct lio_device *lio_dev)\n-{\n-\tuint64_t reg_val;\n-\n-\tPMD_INIT_FUNC_TRACE();\n-\n-\t/* INPUT_CONTROL[RPVF] gives the VF IOq count */\n-\treg_val = lio_read_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(0));\n-\n-\tlio_dev->pf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) &\n-\t\t\t\tCN23XX_PKT_INPUT_CTL_PF_NUM_MASK;\n-\tlio_dev->vf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_VF_NUM_POS) &\n-\t\t\t\tCN23XX_PKT_INPUT_CTL_VF_NUM_MASK;\n-\n-\treg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;\n-\n-\tlio_dev->sriov_info.rings_per_vf =\n-\t\t\t\treg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;\n-\n-\tlio_dev->default_config = lio_get_conf(lio_dev);\n-\tif (lio_dev->default_config == NULL)\n-\t\treturn -1;\n-\n-\tlio_dev->fn_list.setup_iq_regs\t\t= cn23xx_vf_setup_iq_regs;\n-\tlio_dev->fn_list.setup_oq_regs\t\t= cn23xx_vf_setup_oq_regs;\n-\tlio_dev->fn_list.setup_mbox\t\t= cn23xx_vf_setup_mbox;\n-\tlio_dev->fn_list.free_mbox\t\t= cn23xx_vf_free_mbox;\n-\n-\tlio_dev->fn_list.setup_device_regs\t= cn23xx_vf_setup_device_regs;\n-\n-\tlio_dev->fn_list.enable_io_queues\t= cn23xx_vf_enable_io_queues;\n-\tlio_dev->fn_list.disable_io_queues\t= cn23xx_vf_disable_io_queues;\n-\n-\treturn 0;\n-}\n-\ndiff --git a/drivers/net/liquidio/base/lio_23xx_vf.h b/drivers/net/liquidio/base/lio_23xx_vf.h\ndeleted file mode 100644\nindex 8e5362db15..0000000000\n--- a/drivers/net/liquidio/base/lio_23xx_vf.h\n+++ /dev/null\n@@ -1,63 +0,0 @@\n-/* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright(c) 2017 Cavium, Inc\n- */\n-\n-#ifndef _LIO_23XX_VF_H_\n-#define _LIO_23XX_VF_H_\n-\n-#include <stdio.h>\n-\n-#include \"lio_struct.h\"\n-\n-static const struct lio_config default_cn23xx_conf\t= {\n-\t.card_type\t\t\t\t= LIO_23XX,\n-\t.card_name\t\t\t\t= LIO_23XX_NAME,\n-\t/** IQ attributes */\n-\t.iq\t\t\t\t\t= {\n-\t\t.max_iqs\t\t\t= CN23XX_CFG_IO_QUEUES,\n-\t\t.pending_list_size\t\t=\n-\t\t\t(CN23XX_MAX_IQ_DESCRIPTORS * CN23XX_CFG_IO_QUEUES),\n-\t\t.instr_type\t\t\t= OCTEON_64BYTE_INSTR,\n-\t},\n-\n-\t/** OQ attributes */\n-\t.oq\t\t\t\t\t= {\n-\t\t.max_oqs\t\t\t= CN23XX_CFG_IO_QUEUES,\n-\t\t.info_ptr\t\t\t= OCTEON_OQ_INFOPTR_MODE,\n-\t\t.refill_threshold\t\t= CN23XX_OQ_REFIL_THRESHOLD,\n-\t},\n-\n-\t.num_nic_ports\t\t\t\t= CN23XX_DEFAULT_NUM_PORTS,\n-\t.num_def_rx_descs\t\t\t= CN23XX_MAX_OQ_DESCRIPTORS,\n-\t.num_def_tx_descs\t\t\t= CN23XX_MAX_IQ_DESCRIPTORS,\n-\t.def_rx_buf_size\t\t\t= CN23XX_OQ_BUF_SIZE,\n-};\n-\n-static inline const struct lio_config *\n-lio_get_conf(struct lio_device *lio_dev)\n-{\n-\tconst struct lio_config *default_lio_conf = NULL;\n-\n-\t/* check the LIO Device model & return the corresponding lio\n-\t * configuration\n-\t */\n-\tdefault_lio_conf = &default_cn23xx_conf;\n-\n-\tif (default_lio_conf == NULL) {\n-\t\tlio_dev_err(lio_dev, \"Configuration verification failed\\n\");\n-\t\treturn NULL;\n-\t}\n-\n-\treturn default_lio_conf;\n-}\n-\n-#define CN23XX_VF_BUSY_READING_REG_LOOP_COUNT\t100000\n-\n-void cn23xx_vf_ask_pf_to_do_flr(struct lio_device *lio_dev);\n-\n-int cn23xx_pfvf_handshake(struct lio_device *lio_dev);\n-\n-int cn23xx_vf_setup_device(struct lio_device  *lio_dev);\n-\n-void cn23xx_vf_handle_mbox(struct lio_device *lio_dev);\n-#endif /* _LIO_23XX_VF_H_  */\ndiff --git a/drivers/net/liquidio/base/lio_hw_defs.h b/drivers/net/liquidio/base/lio_hw_defs.h\ndeleted file mode 100644\nindex 5e119c1241..0000000000\n--- a/drivers/net/liquidio/base/lio_hw_defs.h\n+++ /dev/null\n@@ -1,239 +0,0 @@\n-/* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright(c) 2017 Cavium, Inc\n- */\n-\n-#ifndef _LIO_HW_DEFS_H_\n-#define _LIO_HW_DEFS_H_\n-\n-#include <rte_io.h>\n-\n-#ifndef PCI_VENDOR_ID_CAVIUM\n-#define PCI_VENDOR_ID_CAVIUM\t0x177D\n-#endif\n-\n-#define LIO_CN23XX_VF_VID\t0x9712\n-\n-/* CN23xx subsystem device ids */\n-#define PCI_SUBSYS_DEV_ID_CN2350_210\t\t0x0004\n-#define PCI_SUBSYS_DEV_ID_CN2360_210\t\t0x0005\n-#define PCI_SUBSYS_DEV_ID_CN2360_225\t\t0x0006\n-#define PCI_SUBSYS_DEV_ID_CN2350_225\t\t0x0007\n-#define PCI_SUBSYS_DEV_ID_CN2350_210SVPN3\t0x0008\n-#define PCI_SUBSYS_DEV_ID_CN2360_210SVPN3\t0x0009\n-#define PCI_SUBSYS_DEV_ID_CN2350_210SVPT\t0x000a\n-#define PCI_SUBSYS_DEV_ID_CN2360_210SVPT\t0x000b\n-\n-/* --------------------------CONFIG VALUES------------------------ */\n-\n-/* CN23xx IQ configuration macros */\n-#define CN23XX_MAX_RINGS_PER_PF\t\t\t64\n-#define CN23XX_MAX_RINGS_PER_VF\t\t\t8\n-\n-#define CN23XX_MAX_INPUT_QUEUES\t\t\tCN23XX_MAX_RINGS_PER_PF\n-#define CN23XX_MAX_IQ_DESCRIPTORS\t\t512\n-#define CN23XX_MIN_IQ_DESCRIPTORS\t\t128\n-\n-#define CN23XX_MAX_OUTPUT_QUEUES\t\tCN23XX_MAX_RINGS_PER_PF\n-#define CN23XX_MAX_OQ_DESCRIPTORS\t\t512\n-#define CN23XX_MIN_OQ_DESCRIPTORS\t\t128\n-#define CN23XX_OQ_BUF_SIZE\t\t\t1536\n-\n-#define CN23XX_OQ_REFIL_THRESHOLD\t\t16\n-\n-#define CN23XX_DEFAULT_NUM_PORTS\t\t1\n-\n-#define CN23XX_CFG_IO_QUEUES\t\t\tCN23XX_MAX_RINGS_PER_PF\n-\n-/* common OCTEON configuration macros */\n-#define OCTEON_64BYTE_INSTR\t\t\t64\n-#define OCTEON_OQ_INFOPTR_MODE\t\t\t1\n-\n-/* Max IOQs per LIO Link */\n-#define LIO_MAX_IOQS_PER_IF\t\t\t64\n-\n-/* Wait time in milliseconds for FLR */\n-#define LIO_PCI_FLR_WAIT\t\t\t100\n-\n-enum lio_card_type {\n-\tLIO_23XX /* 23xx */\n-};\n-\n-#define LIO_23XX_NAME \"23xx\"\n-\n-#define LIO_DEV_RUNNING\t\t0xc\n-\n-#define LIO_OQ_REFILL_THRESHOLD_CFG(cfg)\t\t\t\t\\\n-\t\t((cfg)->default_config->oq.refill_threshold)\n-#define LIO_NUM_DEF_TX_DESCS_CFG(cfg)\t\t\t\t\t\\\n-\t\t((cfg)->default_config->num_def_tx_descs)\n-\n-#define LIO_IQ_INSTR_TYPE(cfg)\t\t((cfg)->default_config->iq.instr_type)\n-\n-/* The following config values are fixed and should not be modified. */\n-\n-/* Maximum number of Instruction queues */\n-#define LIO_MAX_INSTR_QUEUES(lio_dev)\t\tCN23XX_MAX_RINGS_PER_VF\n-\n-#define LIO_MAX_POSSIBLE_INSTR_QUEUES\t\tCN23XX_MAX_INPUT_QUEUES\n-#define LIO_MAX_POSSIBLE_OUTPUT_QUEUES\t\tCN23XX_MAX_OUTPUT_QUEUES\n-\n-#define LIO_DEVICE_NAME_LEN\t\t32\n-#define LIO_BASE_MAJOR_VERSION\t\t1\n-#define LIO_BASE_MINOR_VERSION\t\t5\n-#define LIO_BASE_MICRO_VERSION\t\t1\n-\n-#define LIO_FW_VERSION_LENGTH\t\t32\n-\n-#define LIO_Q_RECONF_MIN_VERSION\t\"1.7.0\"\n-#define LIO_VF_TRUST_MIN_VERSION\t\"1.7.1\"\n-\n-/** Tag types used by Octeon cores in its work. */\n-enum octeon_tag_type {\n-\tOCTEON_ORDERED_TAG\t= 0,\n-\tOCTEON_ATOMIC_TAG\t= 1,\n-};\n-\n-/* pre-defined host->NIC tag values */\n-#define LIO_CONTROL\t(0x11111110)\n-#define LIO_DATA(i)\t(0x11111111 + (i))\n-\n-/* used for NIC operations */\n-#define LIO_OPCODE\t1\n-\n-/* Subcodes are used by host driver/apps to identify the sub-operation\n- * for the core. They only need to by unique for a given subsystem.\n- */\n-#define LIO_OPCODE_SUBCODE(op, sub)\t\t\\\n-\t\t((((op) & 0x0f) << 8) | ((sub) & 0x7f))\n-\n-/** LIO_OPCODE subcodes */\n-/* This subcode is sent by core PCI driver to indicate cores are ready. */\n-#define LIO_OPCODE_NW_DATA\t\t0x02 /* network packet data */\n-#define LIO_OPCODE_CMD\t\t\t0x03\n-#define LIO_OPCODE_INFO\t\t\t0x04\n-#define LIO_OPCODE_PORT_STATS\t\t0x05\n-#define LIO_OPCODE_IF_CFG\t\t0x09\n-\n-#define LIO_MIN_RX_BUF_SIZE\t\t64\n-#define LIO_MAX_RX_PKTLEN\t\t(64 * 1024)\n-\n-/* NIC Command types */\n-#define LIO_CMD_CHANGE_MTU\t\t0x1\n-#define LIO_CMD_CHANGE_DEVFLAGS\t\t0x3\n-#define LIO_CMD_RX_CTL\t\t\t0x4\n-#define LIO_CMD_CLEAR_STATS\t\t0x6\n-#define LIO_CMD_SET_RSS\t\t\t0xD\n-#define LIO_CMD_TNL_RX_CSUM_CTL\t\t0x10\n-#define LIO_CMD_TNL_TX_CSUM_CTL\t\t0x11\n-#define LIO_CMD_ADD_VLAN_FILTER\t\t0x17\n-#define LIO_CMD_DEL_VLAN_FILTER\t\t0x18\n-#define LIO_CMD_VXLAN_PORT_CONFIG\t0x19\n-#define LIO_CMD_QUEUE_COUNT_CTL\t\t0x1f\n-\n-#define LIO_CMD_VXLAN_PORT_ADD\t\t0x0\n-#define LIO_CMD_VXLAN_PORT_DEL\t\t0x1\n-#define LIO_CMD_RXCSUM_ENABLE\t\t0x0\n-#define LIO_CMD_TXCSUM_ENABLE\t\t0x0\n-\n-/* RX(packets coming from wire) Checksum verification flags */\n-/* TCP/UDP csum */\n-#define LIO_L4_CSUM_VERIFIED\t\t0x1\n-#define LIO_IP_CSUM_VERIFIED\t\t0x2\n-\n-/* RSS */\n-#define LIO_RSS_PARAM_DISABLE_RSS\t\t0x10\n-#define LIO_RSS_PARAM_HASH_KEY_UNCHANGED\t0x08\n-#define LIO_RSS_PARAM_ITABLE_UNCHANGED\t\t0x04\n-#define LIO_RSS_PARAM_HASH_INFO_UNCHANGED\t0x02\n-\n-#define LIO_RSS_HASH_IPV4\t\t\t0x100\n-#define LIO_RSS_HASH_TCP_IPV4\t\t\t0x200\n-#define LIO_RSS_HASH_IPV6\t\t\t0x400\n-#define LIO_RSS_HASH_TCP_IPV6\t\t\t0x1000\n-#define LIO_RSS_HASH_IPV6_EX\t\t\t0x800\n-#define LIO_RSS_HASH_TCP_IPV6_EX\t\t0x2000\n-\n-#define LIO_RSS_OFFLOAD_ALL (\t\t\\\n-\t\tLIO_RSS_HASH_IPV4 |\t\\\n-\t\tLIO_RSS_HASH_TCP_IPV4 |\t\\\n-\t\tLIO_RSS_HASH_IPV6 |\t\\\n-\t\tLIO_RSS_HASH_TCP_IPV6 |\t\\\n-\t\tLIO_RSS_HASH_IPV6_EX |\t\\\n-\t\tLIO_RSS_HASH_TCP_IPV6_EX)\n-\n-#define LIO_RSS_MAX_TABLE_SZ\t\t128\n-#define LIO_RSS_MAX_KEY_SZ\t\t40\n-#define LIO_RSS_PARAM_SIZE\t\t16\n-\n-/* Interface flags communicated between host driver and core app. */\n-enum lio_ifflags {\n-\tLIO_IFFLAG_PROMISC\t= 0x01,\n-\tLIO_IFFLAG_ALLMULTI\t= 0x02,\n-\tLIO_IFFLAG_UNICAST\t= 0x10\n-};\n-\n-/* Routines for reading and writing CSRs */\n-#ifdef RTE_LIBRTE_LIO_DEBUG_REGS\n-#define lio_write_csr(lio_dev, reg_off, value)\t\t\t\t\\\n-\tdo {\t\t\t\t\t\t\t\t\\\n-\t\ttypeof(lio_dev) _dev = lio_dev;\t\t\t\t\\\n-\t\ttypeof(reg_off) _reg_off = reg_off;\t\t\t\\\n-\t\ttypeof(value) _value = value;\t\t\t\t\\\n-\t\tPMD_REGS_LOG(_dev,\t\t\t\t\t\\\n-\t\t\t     \"Write32: Reg: 0x%08lx Val: 0x%08lx\\n\",\t\\\n-\t\t\t     (unsigned long)_reg_off,\t\t\t\\\n-\t\t\t     (unsigned long)_value);\t\t\t\\\n-\t\trte_write32(_value, _dev->hw_addr + _reg_off);\t\t\\\n-\t} while (0)\n-\n-#define lio_write_csr64(lio_dev, reg_off, val64)\t\t\t\\\n-\tdo {\t\t\t\t\t\t\t\t\\\n-\t\ttypeof(lio_dev) _dev = lio_dev;\t\t\t\t\\\n-\t\ttypeof(reg_off) _reg_off = reg_off;\t\t\t\\\n-\t\ttypeof(val64) _val64 = val64;\t\t\t\t\\\n-\t\tPMD_REGS_LOG(\t\t\t\t\t\t\\\n-\t\t    _dev,\t\t\t\t\t\t\\\n-\t\t    \"Write64: Reg: 0x%08lx Val: 0x%016llx\\n\",\t\t\\\n-\t\t    (unsigned long)_reg_off,\t\t\t\t\\\n-\t\t    (unsigned long long)_val64);\t\t\t\\\n-\t\trte_write64(_val64, _dev->hw_addr + _reg_off);\t\t\\\n-\t} while (0)\n-\n-#define lio_read_csr(lio_dev, reg_off)\t\t\t\t\t\\\n-\t({\t\t\t\t\t\t\t\t\\\n-\t\ttypeof(lio_dev) _dev = lio_dev;\t\t\t\t\\\n-\t\ttypeof(reg_off) _reg_off = reg_off;\t\t\t\\\n-\t\tuint32_t val = rte_read32(_dev->hw_addr + _reg_off);\t\\\n-\t\tPMD_REGS_LOG(_dev,\t\t\t\t\t\\\n-\t\t\t     \"Read32: Reg: 0x%08lx Val: 0x%08lx\\n\",\t\\\n-\t\t\t     (unsigned long)_reg_off,\t\t\t\\\n-\t\t\t     (unsigned long)val);\t\t\t\\\n-\t\tval;\t\t\t\t\t\t\t\\\n-\t})\n-\n-#define lio_read_csr64(lio_dev, reg_off)\t\t\t\t\\\n-\t({\t\t\t\t\t\t\t\t\\\n-\t\ttypeof(lio_dev) _dev = lio_dev;\t\t\t\t\\\n-\t\ttypeof(reg_off) _reg_off = reg_off;\t\t\t\\\n-\t\tuint64_t val64 = rte_read64(_dev->hw_addr + _reg_off);\t\\\n-\t\tPMD_REGS_LOG(\t\t\t\t\t\t\\\n-\t\t    _dev,\t\t\t\t\t\t\\\n-\t\t    \"Read64: Reg: 0x%08lx Val: 0x%016llx\\n\",\t\t\\\n-\t\t    (unsigned long)_reg_off,\t\t\t\t\\\n-\t\t    (unsigned long long)val64);\t\t\t\t\\\n-\t\tval64;\t\t\t\t\t\t\t\\\n-\t})\n-#else\n-#define lio_write_csr(lio_dev, reg_off, value)\t\t\t\t\\\n-\trte_write32(value, (lio_dev)->hw_addr + (reg_off))\n-\n-#define lio_write_csr64(lio_dev, reg_off, val64)\t\t\t\\\n-\trte_write64(val64, (lio_dev)->hw_addr + (reg_off))\n-\n-#define lio_read_csr(lio_dev, reg_off)\t\t\t\t\t\\\n-\trte_read32((lio_dev)->hw_addr + (reg_off))\n-\n-#define lio_read_csr64(lio_dev, reg_off)\t\t\t\t\\\n-\trte_read64((lio_dev)->hw_addr + (reg_off))\n-#endif\n-#endif /* _LIO_HW_DEFS_H_ */\ndiff --git a/drivers/net/liquidio/base/lio_mbox.c b/drivers/net/liquidio/base/lio_mbox.c\ndeleted file mode 100644\nindex 2ac2b1b334..0000000000\n--- a/drivers/net/liquidio/base/lio_mbox.c\n+++ /dev/null\n@@ -1,246 +0,0 @@\n-/* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright(c) 2017 Cavium, Inc\n- */\n-\n-#include <ethdev_driver.h>\n-#include <rte_cycles.h>\n-\n-#include \"lio_logs.h\"\n-#include \"lio_struct.h\"\n-#include \"lio_mbox.h\"\n-\n-/**\n- * lio_mbox_read:\n- * @mbox: Pointer mailbox\n- *\n- * Reads the 8-bytes of data from the mbox register\n- * Writes back the acknowledgment indicating completion of read\n- */\n-int\n-lio_mbox_read(struct lio_mbox *mbox)\n-{\n-\tunion lio_mbox_message msg;\n-\tint ret = 0;\n-\n-\tmsg.mbox_msg64 = rte_read64(mbox->mbox_read_reg);\n-\n-\tif ((msg.mbox_msg64 == LIO_PFVFACK) || (msg.mbox_msg64 == LIO_PFVFSIG))\n-\t\treturn 0;\n-\n-\tif (mbox->state & LIO_MBOX_STATE_REQ_RECEIVING) {\n-\t\tmbox->mbox_req.data[mbox->mbox_req.recv_len - 1] =\n-\t\t\t\t\tmsg.mbox_msg64;\n-\t\tmbox->mbox_req.recv_len++;\n-\t} else {\n-\t\tif (mbox->state & LIO_MBOX_STATE_RES_RECEIVING) {\n-\t\t\tmbox->mbox_resp.data[mbox->mbox_resp.recv_len - 1] =\n-\t\t\t\t\tmsg.mbox_msg64;\n-\t\t\tmbox->mbox_resp.recv_len++;\n-\t\t} else {\n-\t\t\tif ((mbox->state & LIO_MBOX_STATE_IDLE) &&\n-\t\t\t\t\t(msg.s.type == LIO_MBOX_REQUEST)) {\n-\t\t\t\tmbox->state &= ~LIO_MBOX_STATE_IDLE;\n-\t\t\t\tmbox->state |= LIO_MBOX_STATE_REQ_RECEIVING;\n-\t\t\t\tmbox->mbox_req.msg.mbox_msg64 = msg.mbox_msg64;\n-\t\t\t\tmbox->mbox_req.q_no = mbox->q_no;\n-\t\t\t\tmbox->mbox_req.recv_len = 1;\n-\t\t\t} else {\n-\t\t\t\tif ((mbox->state &\n-\t\t\t\t     LIO_MBOX_STATE_RES_PENDING) &&\n-\t\t\t\t    (msg.s.type == LIO_MBOX_RESPONSE)) {\n-\t\t\t\t\tmbox->state &=\n-\t\t\t\t\t\t~LIO_MBOX_STATE_RES_PENDING;\n-\t\t\t\t\tmbox->state |=\n-\t\t\t\t\t\tLIO_MBOX_STATE_RES_RECEIVING;\n-\t\t\t\t\tmbox->mbox_resp.msg.mbox_msg64 =\n-\t\t\t\t\t\t\t\tmsg.mbox_msg64;\n-\t\t\t\t\tmbox->mbox_resp.q_no = mbox->q_no;\n-\t\t\t\t\tmbox->mbox_resp.recv_len = 1;\n-\t\t\t\t} else {\n-\t\t\t\t\trte_write64(LIO_PFVFERR,\n-\t\t\t\t\t\t    mbox->mbox_read_reg);\n-\t\t\t\t\tmbox->state |= LIO_MBOX_STATE_ERROR;\n-\t\t\t\t\treturn -1;\n-\t\t\t\t}\n-\t\t\t}\n-\t\t}\n-\t}\n-\n-\tif (mbox->state & LIO_MBOX_STATE_REQ_RECEIVING) {\n-\t\tif (mbox->mbox_req.recv_len < msg.s.len) {\n-\t\t\tret = 0;\n-\t\t} else {\n-\t\t\tmbox->state &= ~LIO_MBOX_STATE_REQ_RECEIVING;\n-\t\t\tmbox->state |= LIO_MBOX_STATE_REQ_RECEIVED;\n-\t\t\tret = 1;\n-\t\t}\n-\t} else {\n-\t\tif (mbox->state & LIO_MBOX_STATE_RES_RECEIVING) {\n-\t\t\tif (mbox->mbox_resp.recv_len < msg.s.len) {\n-\t\t\t\tret = 0;\n-\t\t\t} else {\n-\t\t\t\tmbox->state &= ~LIO_MBOX_STATE_RES_RECEIVING;\n-\t\t\t\tmbox->state |= LIO_MBOX_STATE_RES_RECEIVED;\n-\t\t\t\tret = 1;\n-\t\t\t}\n-\t\t} else {\n-\t\t\tRTE_ASSERT(0);\n-\t\t}\n-\t}\n-\n-\trte_write64(LIO_PFVFACK, mbox->mbox_read_reg);\n-\n-\treturn ret;\n-}\n-\n-/**\n- * lio_mbox_write:\n- * @lio_dev: Pointer lio device\n- * @mbox_cmd: Cmd to send to mailbox.\n- *\n- * Populates the queue specific mbox structure\n- * with cmd information.\n- * Write the cmd to mbox register\n- */\n-int\n-lio_mbox_write(struct lio_device *lio_dev,\n-\t       struct lio_mbox_cmd *mbox_cmd)\n-{\n-\tstruct lio_mbox *mbox = lio_dev->mbox[mbox_cmd->q_no];\n-\tuint32_t count, i, ret = LIO_MBOX_STATUS_SUCCESS;\n-\n-\tif ((mbox_cmd->msg.s.type == LIO_MBOX_RESPONSE) &&\n-\t\t\t!(mbox->state & LIO_MBOX_STATE_REQ_RECEIVED))\n-\t\treturn LIO_MBOX_STATUS_FAILED;\n-\n-\tif ((mbox_cmd->msg.s.type == LIO_MBOX_REQUEST) &&\n-\t\t\t!(mbox->state & LIO_MBOX_STATE_IDLE))\n-\t\treturn LIO_MBOX_STATUS_BUSY;\n-\n-\tif (mbox_cmd->msg.s.type == LIO_MBOX_REQUEST) {\n-\t\trte_memcpy(&mbox->mbox_resp, mbox_cmd,\n-\t\t\t   sizeof(struct lio_mbox_cmd));\n-\t\tmbox->state = LIO_MBOX_STATE_RES_PENDING;\n-\t}\n-\n-\tcount = 0;\n-\n-\twhile (rte_read64(mbox->mbox_write_reg) != LIO_PFVFSIG) {\n-\t\trte_delay_ms(1);\n-\t\tif (count++ == 1000) {\n-\t\t\tret = LIO_MBOX_STATUS_FAILED;\n-\t\t\tbreak;\n-\t\t}\n-\t}\n-\n-\tif (ret == LIO_MBOX_STATUS_SUCCESS) {\n-\t\trte_write64(mbox_cmd->msg.mbox_msg64, mbox->mbox_write_reg);\n-\t\tfor (i = 0; i < (uint32_t)(mbox_cmd->msg.s.len - 1); i++) {\n-\t\t\tcount = 0;\n-\t\t\twhile (rte_read64(mbox->mbox_write_reg) !=\n-\t\t\t\t\tLIO_PFVFACK) {\n-\t\t\t\trte_delay_ms(1);\n-\t\t\t\tif (count++ == 1000) {\n-\t\t\t\t\tret = LIO_MBOX_STATUS_FAILED;\n-\t\t\t\t\tbreak;\n-\t\t\t\t}\n-\t\t\t}\n-\t\t\trte_write64(mbox_cmd->data[i], mbox->mbox_write_reg);\n-\t\t}\n-\t}\n-\n-\tif (mbox_cmd->msg.s.type == LIO_MBOX_RESPONSE) {\n-\t\tmbox->state = LIO_MBOX_STATE_IDLE;\n-\t\trte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);\n-\t} else {\n-\t\tif ((!mbox_cmd->msg.s.resp_needed) ||\n-\t\t\t\t(ret == LIO_MBOX_STATUS_FAILED)) {\n-\t\t\tmbox->state &= ~LIO_MBOX_STATE_RES_PENDING;\n-\t\t\tif (!(mbox->state & (LIO_MBOX_STATE_REQ_RECEIVING |\n-\t\t\t\t\t     LIO_MBOX_STATE_REQ_RECEIVED)))\n-\t\t\t\tmbox->state = LIO_MBOX_STATE_IDLE;\n-\t\t}\n-\t}\n-\n-\treturn ret;\n-}\n-\n-/**\n- * lio_mbox_process_cmd:\n- * @mbox: Pointer mailbox\n- * @mbox_cmd: Pointer to command received\n- *\n- * Process the cmd received in mbox\n- */\n-static int\n-lio_mbox_process_cmd(struct lio_mbox *mbox,\n-\t\t     struct lio_mbox_cmd *mbox_cmd)\n-{\n-\tstruct lio_device *lio_dev = mbox->lio_dev;\n-\n-\tif (mbox_cmd->msg.s.cmd == LIO_CORES_CRASHED)\n-\t\tlio_dev_err(lio_dev, \"Octeon core(s) crashed or got stuck!\\n\");\n-\n-\treturn 0;\n-}\n-\n-/**\n- * Process the received mbox message.\n- */\n-int\n-lio_mbox_process_message(struct lio_mbox *mbox)\n-{\n-\tstruct lio_mbox_cmd mbox_cmd;\n-\n-\tif (mbox->state & LIO_MBOX_STATE_ERROR) {\n-\t\tif (mbox->state & (LIO_MBOX_STATE_RES_PENDING |\n-\t\t\t\t   LIO_MBOX_STATE_RES_RECEIVING)) {\n-\t\t\trte_memcpy(&mbox_cmd, &mbox->mbox_resp,\n-\t\t\t\t   sizeof(struct lio_mbox_cmd));\n-\t\t\tmbox->state = LIO_MBOX_STATE_IDLE;\n-\t\t\trte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);\n-\t\t\tmbox_cmd.recv_status = 1;\n-\t\t\tif (mbox_cmd.fn)\n-\t\t\t\tmbox_cmd.fn(mbox->lio_dev, &mbox_cmd,\n-\t\t\t\t\t    mbox_cmd.fn_arg);\n-\n-\t\t\treturn 0;\n-\t\t}\n-\n-\t\tmbox->state = LIO_MBOX_STATE_IDLE;\n-\t\trte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);\n-\n-\t\treturn 0;\n-\t}\n-\n-\tif (mbox->state & LIO_MBOX_STATE_RES_RECEIVED) {\n-\t\trte_memcpy(&mbox_cmd, &mbox->mbox_resp,\n-\t\t\t   sizeof(struct lio_mbox_cmd));\n-\t\tmbox->state = LIO_MBOX_STATE_IDLE;\n-\t\trte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);\n-\t\tmbox_cmd.recv_status = 0;\n-\t\tif (mbox_cmd.fn)\n-\t\t\tmbox_cmd.fn(mbox->lio_dev, &mbox_cmd, mbox_cmd.fn_arg);\n-\n-\t\treturn 0;\n-\t}\n-\n-\tif (mbox->state & LIO_MBOX_STATE_REQ_RECEIVED) {\n-\t\trte_memcpy(&mbox_cmd, &mbox->mbox_req,\n-\t\t\t   sizeof(struct lio_mbox_cmd));\n-\t\tif (!mbox_cmd.msg.s.resp_needed) {\n-\t\t\tmbox->state &= ~LIO_MBOX_STATE_REQ_RECEIVED;\n-\t\t\tif (!(mbox->state & LIO_MBOX_STATE_RES_PENDING))\n-\t\t\t\tmbox->state = LIO_MBOX_STATE_IDLE;\n-\t\t\trte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);\n-\t\t}\n-\n-\t\tlio_mbox_process_cmd(mbox, &mbox_cmd);\n-\n-\t\treturn 0;\n-\t}\n-\n-\tRTE_ASSERT(0);\n-\n-\treturn 0;\n-}\ndiff --git a/drivers/net/liquidio/base/lio_mbox.h b/drivers/net/liquidio/base/lio_mbox.h\ndeleted file mode 100644\nindex 457917e91f..0000000000\n--- a/drivers/net/liquidio/base/lio_mbox.h\n+++ /dev/null\n@@ -1,102 +0,0 @@\n-/* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright(c) 2017 Cavium, Inc\n- */\n-\n-#ifndef _LIO_MBOX_H_\n-#define _LIO_MBOX_H_\n-\n-#include <stdint.h>\n-\n-#include <rte_spinlock.h>\n-\n-/* Macros for Mail Box Communication */\n-\n-#define LIO_MBOX_DATA_MAX\t\t\t32\n-\n-#define LIO_VF_ACTIVE\t\t\t\t0x1\n-#define LIO_VF_FLR_REQUEST\t\t\t0x2\n-#define LIO_CORES_CRASHED\t\t\t0x3\n-\n-/* Macro for Read acknowledgment */\n-#define LIO_PFVFACK\t\t\t\t0xffffffffffffffff\n-#define LIO_PFVFSIG\t\t\t\t0x1122334455667788\n-#define LIO_PFVFERR\t\t\t\t0xDEADDEADDEADDEAD\n-\n-enum lio_mbox_cmd_status {\n-\tLIO_MBOX_STATUS_SUCCESS\t\t= 0,\n-\tLIO_MBOX_STATUS_FAILED\t\t= 1,\n-\tLIO_MBOX_STATUS_BUSY\t\t= 2\n-};\n-\n-enum lio_mbox_message_type {\n-\tLIO_MBOX_REQUEST\t= 0,\n-\tLIO_MBOX_RESPONSE\t= 1\n-};\n-\n-union lio_mbox_message {\n-\tuint64_t mbox_msg64;\n-\tstruct {\n-\t\tuint16_t type : 1;\n-\t\tuint16_t resp_needed : 1;\n-\t\tuint16_t cmd : 6;\n-\t\tuint16_t len : 8;\n-\t\tuint8_t params[6];\n-\t} s;\n-};\n-\n-typedef void (*lio_mbox_callback)(void *, void *, void *);\n-\n-struct lio_mbox_cmd {\n-\tunion lio_mbox_message msg;\n-\tuint64_t data[LIO_MBOX_DATA_MAX];\n-\tuint32_t q_no;\n-\tuint32_t recv_len;\n-\tuint32_t recv_status;\n-\tlio_mbox_callback fn;\n-\tvoid *fn_arg;\n-};\n-\n-enum lio_mbox_state {\n-\tLIO_MBOX_STATE_IDLE\t\t= 1,\n-\tLIO_MBOX_STATE_REQ_RECEIVING\t= 2,\n-\tLIO_MBOX_STATE_REQ_RECEIVED\t= 4,\n-\tLIO_MBOX_STATE_RES_PENDING\t= 8,\n-\tLIO_MBOX_STATE_RES_RECEIVING\t= 16,\n-\tLIO_MBOX_STATE_RES_RECEIVED\t= 16,\n-\tLIO_MBOX_STATE_ERROR\t\t= 32\n-};\n-\n-struct lio_mbox {\n-\t/* A spinlock to protect access to this q_mbox. */\n-\trte_spinlock_t lock;\n-\n-\tstruct lio_device *lio_dev;\n-\n-\tuint32_t q_no;\n-\n-\tenum lio_mbox_state state;\n-\n-\t/* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */\n-\tvoid *mbox_int_reg;\n-\n-\t/* SLI_PKT_PF_VF_MBOX_SIG(0) for PF,\n-\t * SLI_PKT_PF_VF_MBOX_SIG(1) for VF.\n-\t */\n-\tvoid *mbox_write_reg;\n-\n-\t/* SLI_PKT_PF_VF_MBOX_SIG(1) for PF,\n-\t * SLI_PKT_PF_VF_MBOX_SIG(0) for VF.\n-\t */\n-\tvoid *mbox_read_reg;\n-\n-\tstruct lio_mbox_cmd mbox_req;\n-\n-\tstruct lio_mbox_cmd mbox_resp;\n-\n-};\n-\n-int lio_mbox_read(struct lio_mbox *mbox);\n-int lio_mbox_write(struct lio_device *lio_dev,\n-\t\t   struct lio_mbox_cmd *mbox_cmd);\n-int lio_mbox_process_message(struct lio_mbox *mbox);\n-#endif\t/* _LIO_MBOX_H_ */\ndiff --git a/drivers/net/liquidio/lio_ethdev.c b/drivers/net/liquidio/lio_ethdev.c\ndeleted file mode 100644\nindex ebcfbb1a5c..0000000000\n--- a/drivers/net/liquidio/lio_ethdev.c\n+++ /dev/null\n@@ -1,2147 +0,0 @@\n-/* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright(c) 2017 Cavium, Inc\n- */\n-\n-#include <rte_string_fns.h>\n-#include <ethdev_driver.h>\n-#include <ethdev_pci.h>\n-#include <rte_cycles.h>\n-#include <rte_malloc.h>\n-#include <rte_alarm.h>\n-#include <rte_ether.h>\n-\n-#include \"lio_logs.h\"\n-#include \"lio_23xx_vf.h\"\n-#include \"lio_ethdev.h\"\n-#include \"lio_rxtx.h\"\n-\n-/* Default RSS key in use */\n-static uint8_t lio_rss_key[40] = {\n-\t0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,\n-\t0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,\n-\t0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,\n-\t0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,\n-\t0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,\n-};\n-\n-static const struct rte_eth_desc_lim lio_rx_desc_lim = {\n-\t.nb_max\t\t= CN23XX_MAX_OQ_DESCRIPTORS,\n-\t.nb_min\t\t= CN23XX_MIN_OQ_DESCRIPTORS,\n-\t.nb_align\t= 1,\n-};\n-\n-static const struct rte_eth_desc_lim lio_tx_desc_lim = {\n-\t.nb_max\t\t= CN23XX_MAX_IQ_DESCRIPTORS,\n-\t.nb_min\t\t= CN23XX_MIN_IQ_DESCRIPTORS,\n-\t.nb_align\t= 1,\n-};\n-\n-/* Wait for control command to reach nic. */\n-static uint16_t\n-lio_wait_for_ctrl_cmd(struct lio_device *lio_dev,\n-\t\t      struct lio_dev_ctrl_cmd *ctrl_cmd)\n-{\n-\tuint16_t timeout = LIO_MAX_CMD_TIMEOUT;\n-\n-\twhile ((ctrl_cmd->cond == 0) && --timeout) {\n-\t\tlio_flush_iq(lio_dev, lio_dev->instr_queue[0]);\n-\t\trte_delay_ms(1);\n-\t}\n-\n-\treturn !timeout;\n-}\n-\n-/**\n- * \\brief Send Rx control command\n- * @param eth_dev Pointer to the structure rte_eth_dev\n- * @param start_stop whether to start or stop\n- */\n-static int\n-lio_send_rx_ctrl_cmd(struct rte_eth_dev *eth_dev, int start_stop)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tstruct lio_dev_ctrl_cmd ctrl_cmd;\n-\tstruct lio_ctrl_pkt ctrl_pkt;\n-\n-\t/* flush added to prevent cmd failure\n-\t * incase the queue is full\n-\t */\n-\tlio_flush_iq(lio_dev, lio_dev->instr_queue[0]);\n-\n-\tmemset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));\n-\tmemset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));\n-\n-\tctrl_cmd.eth_dev = eth_dev;\n-\tctrl_cmd.cond = 0;\n-\n-\tctrl_pkt.ncmd.s.cmd = LIO_CMD_RX_CTL;\n-\tctrl_pkt.ncmd.s.param1 = start_stop;\n-\tctrl_pkt.ctrl_cmd = &ctrl_cmd;\n-\n-\tif (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {\n-\t\tlio_dev_err(lio_dev, \"Failed to send RX Control message\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\tif (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {\n-\t\tlio_dev_err(lio_dev, \"RX Control command timed out\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-/* store statistics names and its offset in stats structure */\n-struct rte_lio_xstats_name_off {\n-\tchar name[RTE_ETH_XSTATS_NAME_SIZE];\n-\tunsigned int offset;\n-};\n-\n-static const struct rte_lio_xstats_name_off rte_lio_stats_strings[] = {\n-\t{\"rx_pkts\", offsetof(struct octeon_rx_stats, total_rcvd)},\n-\t{\"rx_bytes\", offsetof(struct octeon_rx_stats, bytes_rcvd)},\n-\t{\"rx_broadcast_pkts\", offsetof(struct octeon_rx_stats, total_bcst)},\n-\t{\"rx_multicast_pkts\", offsetof(struct octeon_rx_stats, total_mcst)},\n-\t{\"rx_flow_ctrl_pkts\", offsetof(struct octeon_rx_stats, ctl_rcvd)},\n-\t{\"rx_fifo_err\", offsetof(struct octeon_rx_stats, fifo_err)},\n-\t{\"rx_dmac_drop\", offsetof(struct octeon_rx_stats, dmac_drop)},\n-\t{\"rx_fcs_err\", offsetof(struct octeon_rx_stats, fcs_err)},\n-\t{\"rx_jabber_err\", offsetof(struct octeon_rx_stats, jabber_err)},\n-\t{\"rx_l2_err\", offsetof(struct octeon_rx_stats, l2_err)},\n-\t{\"rx_vxlan_pkts\", offsetof(struct octeon_rx_stats, fw_rx_vxlan)},\n-\t{\"rx_vxlan_err\", offsetof(struct octeon_rx_stats, fw_rx_vxlan_err)},\n-\t{\"rx_lro_pkts\", offsetof(struct octeon_rx_stats, fw_lro_pkts)},\n-\t{\"tx_pkts\", (offsetof(struct octeon_tx_stats, total_pkts_sent)) +\n-\t\t\t\t\t\tsizeof(struct octeon_rx_stats)},\n-\t{\"tx_bytes\", (offsetof(struct octeon_tx_stats, total_bytes_sent)) +\n-\t\t\t\t\t\tsizeof(struct octeon_rx_stats)},\n-\t{\"tx_broadcast_pkts\",\n-\t\t(offsetof(struct octeon_tx_stats, bcast_pkts_sent)) +\n-\t\t\tsizeof(struct octeon_rx_stats)},\n-\t{\"tx_multicast_pkts\",\n-\t\t(offsetof(struct octeon_tx_stats, mcast_pkts_sent)) +\n-\t\t\tsizeof(struct octeon_rx_stats)},\n-\t{\"tx_flow_ctrl_pkts\", (offsetof(struct octeon_tx_stats, ctl_sent)) +\n-\t\t\t\t\t\tsizeof(struct octeon_rx_stats)},\n-\t{\"tx_fifo_err\", (offsetof(struct octeon_tx_stats, fifo_err)) +\n-\t\t\t\t\t\tsizeof(struct octeon_rx_stats)},\n-\t{\"tx_total_collisions\", (offsetof(struct octeon_tx_stats,\n-\t\t\t\t\t  total_collisions)) +\n-\t\t\t\t\t\tsizeof(struct octeon_rx_stats)},\n-\t{\"tx_tso\", (offsetof(struct octeon_tx_stats, fw_tso)) +\n-\t\t\t\t\t\tsizeof(struct octeon_rx_stats)},\n-\t{\"tx_vxlan_pkts\", (offsetof(struct octeon_tx_stats, fw_tx_vxlan)) +\n-\t\t\t\t\t\tsizeof(struct octeon_rx_stats)},\n-};\n-\n-#define LIO_NB_XSTATS\tRTE_DIM(rte_lio_stats_strings)\n-\n-/* Get hw stats of the port */\n-static int\n-lio_dev_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *xstats,\n-\t\t   unsigned int n)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tuint16_t timeout = LIO_MAX_CMD_TIMEOUT;\n-\tstruct octeon_link_stats *hw_stats;\n-\tstruct lio_link_stats_resp *resp;\n-\tstruct lio_soft_command *sc;\n-\tuint32_t resp_size;\n-\tunsigned int i;\n-\tint retval;\n-\n-\tif (!lio_dev->intf_open) {\n-\t\tlio_dev_err(lio_dev, \"Port %d down\\n\",\n-\t\t\t    lio_dev->port_id);\n-\t\treturn -EINVAL;\n-\t}\n-\n-\tif (n < LIO_NB_XSTATS)\n-\t\treturn LIO_NB_XSTATS;\n-\n-\tresp_size = sizeof(struct lio_link_stats_resp);\n-\tsc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);\n-\tif (sc == NULL)\n-\t\treturn -ENOMEM;\n-\n-\tresp = (struct lio_link_stats_resp *)sc->virtrptr;\n-\tlio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,\n-\t\t\t\t LIO_OPCODE_PORT_STATS, 0, 0, 0);\n-\n-\t/* Setting wait time in seconds */\n-\tsc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;\n-\n-\tretval = lio_send_soft_command(lio_dev, sc);\n-\tif (retval == LIO_IQ_SEND_FAILED) {\n-\t\tlio_dev_err(lio_dev, \"failed to get port stats from firmware. status: %x\\n\",\n-\t\t\t    retval);\n-\t\tgoto get_stats_fail;\n-\t}\n-\n-\twhile ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {\n-\t\tlio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);\n-\t\tlio_process_ordered_list(lio_dev);\n-\t\trte_delay_ms(1);\n-\t}\n-\n-\tretval = resp->status;\n-\tif (retval) {\n-\t\tlio_dev_err(lio_dev, \"failed to get port stats from firmware\\n\");\n-\t\tgoto get_stats_fail;\n-\t}\n-\n-\tlio_swap_8B_data((uint64_t *)(&resp->link_stats),\n-\t\t\t sizeof(struct octeon_link_stats) >> 3);\n-\n-\thw_stats = &resp->link_stats;\n-\n-\tfor (i = 0; i < LIO_NB_XSTATS; i++) {\n-\t\txstats[i].id = i;\n-\t\txstats[i].value =\n-\t\t    *(uint64_t *)(((char *)hw_stats) +\n-\t\t\t\t\trte_lio_stats_strings[i].offset);\n-\t}\n-\n-\tlio_free_soft_command(sc);\n-\n-\treturn LIO_NB_XSTATS;\n-\n-get_stats_fail:\n-\tlio_free_soft_command(sc);\n-\n-\treturn -1;\n-}\n-\n-static int\n-lio_dev_xstats_get_names(struct rte_eth_dev *eth_dev,\n-\t\t\t struct rte_eth_xstat_name *xstats_names,\n-\t\t\t unsigned limit __rte_unused)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tunsigned int i;\n-\n-\tif (!lio_dev->intf_open) {\n-\t\tlio_dev_err(lio_dev, \"Port %d down\\n\",\n-\t\t\t    lio_dev->port_id);\n-\t\treturn -EINVAL;\n-\t}\n-\n-\tif (xstats_names == NULL)\n-\t\treturn LIO_NB_XSTATS;\n-\n-\t/* Note: limit checked in rte_eth_xstats_names() */\n-\n-\tfor (i = 0; i < LIO_NB_XSTATS; i++) {\n-\t\tsnprintf(xstats_names[i].name, sizeof(xstats_names[i].name),\n-\t\t\t \"%s\", rte_lio_stats_strings[i].name);\n-\t}\n-\n-\treturn LIO_NB_XSTATS;\n-}\n-\n-/* Reset hw stats for the port */\n-static int\n-lio_dev_xstats_reset(struct rte_eth_dev *eth_dev)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tstruct lio_dev_ctrl_cmd ctrl_cmd;\n-\tstruct lio_ctrl_pkt ctrl_pkt;\n-\tint ret;\n-\n-\tif (!lio_dev->intf_open) {\n-\t\tlio_dev_err(lio_dev, \"Port %d down\\n\",\n-\t\t\t    lio_dev->port_id);\n-\t\treturn -EINVAL;\n-\t}\n-\n-\t/* flush added to prevent cmd failure\n-\t * incase the queue is full\n-\t */\n-\tlio_flush_iq(lio_dev, lio_dev->instr_queue[0]);\n-\n-\tmemset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));\n-\tmemset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));\n-\n-\tctrl_cmd.eth_dev = eth_dev;\n-\tctrl_cmd.cond = 0;\n-\n-\tctrl_pkt.ncmd.s.cmd = LIO_CMD_CLEAR_STATS;\n-\tctrl_pkt.ctrl_cmd = &ctrl_cmd;\n-\n-\tret = lio_send_ctrl_pkt(lio_dev, &ctrl_pkt);\n-\tif (ret != 0) {\n-\t\tlio_dev_err(lio_dev, \"Failed to send clear stats command\\n\");\n-\t\treturn ret;\n-\t}\n-\n-\tret = lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd);\n-\tif (ret != 0) {\n-\t\tlio_dev_err(lio_dev, \"Clear stats command timed out\\n\");\n-\t\treturn ret;\n-\t}\n-\n-\t/* clear stored per queue stats */\n-\tif (*eth_dev->dev_ops->stats_reset == NULL)\n-\t\treturn 0;\n-\treturn (*eth_dev->dev_ops->stats_reset)(eth_dev);\n-}\n-\n-/* Retrieve the device statistics (# packets in/out, # bytes in/out, etc */\n-static int\n-lio_dev_stats_get(struct rte_eth_dev *eth_dev,\n-\t\t  struct rte_eth_stats *stats)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tstruct lio_droq_stats *oq_stats;\n-\tstruct lio_iq_stats *iq_stats;\n-\tstruct lio_instr_queue *txq;\n-\tstruct lio_droq *droq;\n-\tint i, iq_no, oq_no;\n-\tuint64_t bytes = 0;\n-\tuint64_t pkts = 0;\n-\tuint64_t drop = 0;\n-\n-\tfor (i = 0; i < eth_dev->data->nb_tx_queues; i++) {\n-\t\tiq_no = lio_dev->linfo.txpciq[i].s.q_no;\n-\t\ttxq = lio_dev->instr_queue[iq_no];\n-\t\tif (txq != NULL) {\n-\t\t\tiq_stats = &txq->stats;\n-\t\t\tpkts += iq_stats->tx_done;\n-\t\t\tdrop += iq_stats->tx_dropped;\n-\t\t\tbytes += iq_stats->tx_tot_bytes;\n-\t\t}\n-\t}\n-\n-\tstats->opackets = pkts;\n-\tstats->obytes = bytes;\n-\tstats->oerrors = drop;\n-\n-\tpkts = 0;\n-\tdrop = 0;\n-\tbytes = 0;\n-\n-\tfor (i = 0; i < eth_dev->data->nb_rx_queues; i++) {\n-\t\toq_no = lio_dev->linfo.rxpciq[i].s.q_no;\n-\t\tdroq = lio_dev->droq[oq_no];\n-\t\tif (droq != NULL) {\n-\t\t\toq_stats = &droq->stats;\n-\t\t\tpkts += oq_stats->rx_pkts_received;\n-\t\t\tdrop += (oq_stats->rx_dropped +\n-\t\t\t\t\toq_stats->dropped_toomany +\n-\t\t\t\t\toq_stats->dropped_nomem);\n-\t\t\tbytes += oq_stats->rx_bytes_received;\n-\t\t}\n-\t}\n-\tstats->ibytes = bytes;\n-\tstats->ipackets = pkts;\n-\tstats->ierrors = drop;\n-\n-\treturn 0;\n-}\n-\n-static int\n-lio_dev_stats_reset(struct rte_eth_dev *eth_dev)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tstruct lio_droq_stats *oq_stats;\n-\tstruct lio_iq_stats *iq_stats;\n-\tstruct lio_instr_queue *txq;\n-\tstruct lio_droq *droq;\n-\tint i, iq_no, oq_no;\n-\n-\tfor (i = 0; i < eth_dev->data->nb_tx_queues; i++) {\n-\t\tiq_no = lio_dev->linfo.txpciq[i].s.q_no;\n-\t\ttxq = lio_dev->instr_queue[iq_no];\n-\t\tif (txq != NULL) {\n-\t\t\tiq_stats = &txq->stats;\n-\t\t\tmemset(iq_stats, 0, sizeof(struct lio_iq_stats));\n-\t\t}\n-\t}\n-\n-\tfor (i = 0; i < eth_dev->data->nb_rx_queues; i++) {\n-\t\toq_no = lio_dev->linfo.rxpciq[i].s.q_no;\n-\t\tdroq = lio_dev->droq[oq_no];\n-\t\tif (droq != NULL) {\n-\t\t\toq_stats = &droq->stats;\n-\t\t\tmemset(oq_stats, 0, sizeof(struct lio_droq_stats));\n-\t\t}\n-\t}\n-\n-\treturn 0;\n-}\n-\n-static int\n-lio_dev_info_get(struct rte_eth_dev *eth_dev,\n-\t\t struct rte_eth_dev_info *devinfo)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tstruct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);\n-\n-\tswitch (pci_dev->id.subsystem_device_id) {\n-\t/* CN23xx 10G cards */\n-\tcase PCI_SUBSYS_DEV_ID_CN2350_210:\n-\tcase PCI_SUBSYS_DEV_ID_CN2360_210:\n-\tcase PCI_SUBSYS_DEV_ID_CN2350_210SVPN3:\n-\tcase PCI_SUBSYS_DEV_ID_CN2360_210SVPN3:\n-\tcase PCI_SUBSYS_DEV_ID_CN2350_210SVPT:\n-\tcase PCI_SUBSYS_DEV_ID_CN2360_210SVPT:\n-\t\tdevinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;\n-\t\tbreak;\n-\t/* CN23xx 25G cards */\n-\tcase PCI_SUBSYS_DEV_ID_CN2350_225:\n-\tcase PCI_SUBSYS_DEV_ID_CN2360_225:\n-\t\tdevinfo->speed_capa = RTE_ETH_LINK_SPEED_25G;\n-\t\tbreak;\n-\tdefault:\n-\t\tdevinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;\n-\t\tlio_dev_err(lio_dev,\n-\t\t\t    \"Unknown CN23XX subsystem device id. Setting 10G as default link speed.\\n\");\n-\t\treturn -EINVAL;\n-\t}\n-\n-\tdevinfo->max_rx_queues = lio_dev->max_rx_queues;\n-\tdevinfo->max_tx_queues = lio_dev->max_tx_queues;\n-\n-\tdevinfo->min_rx_bufsize = LIO_MIN_RX_BUF_SIZE;\n-\tdevinfo->max_rx_pktlen = LIO_MAX_RX_PKTLEN;\n-\n-\tdevinfo->max_mac_addrs = 1;\n-\n-\tdevinfo->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM\t\t|\n-\t\t\t\t    RTE_ETH_RX_OFFLOAD_UDP_CKSUM\t\t|\n-\t\t\t\t    RTE_ETH_RX_OFFLOAD_TCP_CKSUM\t\t|\n-\t\t\t\t    RTE_ETH_RX_OFFLOAD_VLAN_STRIP\t\t|\n-\t\t\t\t    RTE_ETH_RX_OFFLOAD_RSS_HASH);\n-\tdevinfo->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM\t\t|\n-\t\t\t\t    RTE_ETH_TX_OFFLOAD_UDP_CKSUM\t\t|\n-\t\t\t\t    RTE_ETH_TX_OFFLOAD_TCP_CKSUM\t\t|\n-\t\t\t\t    RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM);\n-\n-\tdevinfo->rx_desc_lim = lio_rx_desc_lim;\n-\tdevinfo->tx_desc_lim = lio_tx_desc_lim;\n-\n-\tdevinfo->reta_size = LIO_RSS_MAX_TABLE_SZ;\n-\tdevinfo->hash_key_size = LIO_RSS_MAX_KEY_SZ;\n-\tdevinfo->flow_type_rss_offloads = (RTE_ETH_RSS_IPV4\t\t\t|\n-\t\t\t\t\t   RTE_ETH_RSS_NONFRAG_IPV4_TCP\t|\n-\t\t\t\t\t   RTE_ETH_RSS_IPV6\t\t\t|\n-\t\t\t\t\t   RTE_ETH_RSS_NONFRAG_IPV6_TCP\t|\n-\t\t\t\t\t   RTE_ETH_RSS_IPV6_EX\t\t|\n-\t\t\t\t\t   RTE_ETH_RSS_IPV6_TCP_EX);\n-\treturn 0;\n-}\n-\n-static int\n-lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tstruct lio_dev_ctrl_cmd ctrl_cmd;\n-\tstruct lio_ctrl_pkt ctrl_pkt;\n-\n-\tPMD_INIT_FUNC_TRACE();\n-\n-\tif (!lio_dev->intf_open) {\n-\t\tlio_dev_err(lio_dev, \"Port %d down, can't set MTU\\n\",\n-\t\t\t    lio_dev->port_id);\n-\t\treturn -EINVAL;\n-\t}\n-\n-\t/* flush added to prevent cmd failure\n-\t * incase the queue is full\n-\t */\n-\tlio_flush_iq(lio_dev, lio_dev->instr_queue[0]);\n-\n-\tmemset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));\n-\tmemset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));\n-\n-\tctrl_cmd.eth_dev = eth_dev;\n-\tctrl_cmd.cond = 0;\n-\n-\tctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_MTU;\n-\tctrl_pkt.ncmd.s.param1 = mtu;\n-\tctrl_pkt.ctrl_cmd = &ctrl_cmd;\n-\n-\tif (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {\n-\t\tlio_dev_err(lio_dev, \"Failed to send command to change MTU\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\tif (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {\n-\t\tlio_dev_err(lio_dev, \"Command to change MTU timed out\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-static int\n-lio_dev_rss_reta_update(struct rte_eth_dev *eth_dev,\n-\t\t\tstruct rte_eth_rss_reta_entry64 *reta_conf,\n-\t\t\tuint16_t reta_size)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tstruct lio_rss_ctx *rss_state = &lio_dev->rss_state;\n-\tstruct lio_rss_set *rss_param;\n-\tstruct lio_dev_ctrl_cmd ctrl_cmd;\n-\tstruct lio_ctrl_pkt ctrl_pkt;\n-\tint i, j, index;\n-\n-\tif (!lio_dev->intf_open) {\n-\t\tlio_dev_err(lio_dev, \"Port %d down, can't update reta\\n\",\n-\t\t\t    lio_dev->port_id);\n-\t\treturn -EINVAL;\n-\t}\n-\n-\tif (reta_size != LIO_RSS_MAX_TABLE_SZ) {\n-\t\tlio_dev_err(lio_dev,\n-\t\t\t    \"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\\n\",\n-\t\t\t    reta_size, LIO_RSS_MAX_TABLE_SZ);\n-\t\treturn -EINVAL;\n-\t}\n-\n-\t/* flush added to prevent cmd failure\n-\t * incase the queue is full\n-\t */\n-\tlio_flush_iq(lio_dev, lio_dev->instr_queue[0]);\n-\n-\tmemset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));\n-\tmemset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));\n-\n-\trss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];\n-\n-\tctrl_cmd.eth_dev = eth_dev;\n-\tctrl_cmd.cond = 0;\n-\n-\tctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;\n-\tctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;\n-\tctrl_pkt.ctrl_cmd = &ctrl_cmd;\n-\n-\trss_param->param.flags = 0xF;\n-\trss_param->param.flags &= ~LIO_RSS_PARAM_ITABLE_UNCHANGED;\n-\trss_param->param.itablesize = LIO_RSS_MAX_TABLE_SZ;\n-\n-\tfor (i = 0; i < (reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) {\n-\t\tfor (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {\n-\t\t\tif ((reta_conf[i].mask) & ((uint64_t)1 << j)) {\n-\t\t\t\tindex = (i * RTE_ETH_RETA_GROUP_SIZE) + j;\n-\t\t\t\trss_state->itable[index] = reta_conf[i].reta[j];\n-\t\t\t}\n-\t\t}\n-\t}\n-\n-\trss_state->itable_size = LIO_RSS_MAX_TABLE_SZ;\n-\tmemcpy(rss_param->itable, rss_state->itable, rss_state->itable_size);\n-\n-\tlio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);\n-\n-\tif (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {\n-\t\tlio_dev_err(lio_dev, \"Failed to set rss hash\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\tif (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {\n-\t\tlio_dev_err(lio_dev, \"Set rss hash timed out\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-static int\n-lio_dev_rss_reta_query(struct rte_eth_dev *eth_dev,\n-\t\t       struct rte_eth_rss_reta_entry64 *reta_conf,\n-\t\t       uint16_t reta_size)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tstruct lio_rss_ctx *rss_state = &lio_dev->rss_state;\n-\tint i, num;\n-\n-\tif (reta_size != LIO_RSS_MAX_TABLE_SZ) {\n-\t\tlio_dev_err(lio_dev,\n-\t\t\t    \"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\\n\",\n-\t\t\t    reta_size, LIO_RSS_MAX_TABLE_SZ);\n-\t\treturn -EINVAL;\n-\t}\n-\n-\tnum = reta_size / RTE_ETH_RETA_GROUP_SIZE;\n-\n-\tfor (i = 0; i < num; i++) {\n-\t\tmemcpy(reta_conf->reta,\n-\t\t       &rss_state->itable[i * RTE_ETH_RETA_GROUP_SIZE],\n-\t\t       RTE_ETH_RETA_GROUP_SIZE);\n-\t\treta_conf++;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-static int\n-lio_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,\n-\t\t\t  struct rte_eth_rss_conf *rss_conf)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tstruct lio_rss_ctx *rss_state = &lio_dev->rss_state;\n-\tuint8_t *hash_key = NULL;\n-\tuint64_t rss_hf = 0;\n-\n-\tif (rss_state->hash_disable) {\n-\t\tlio_dev_info(lio_dev, \"RSS disabled in nic\\n\");\n-\t\trss_conf->rss_hf = 0;\n-\t\treturn 0;\n-\t}\n-\n-\t/* Get key value */\n-\thash_key = rss_conf->rss_key;\n-\tif (hash_key != NULL)\n-\t\tmemcpy(hash_key, rss_state->hash_key, rss_state->hash_key_size);\n-\n-\tif (rss_state->ip)\n-\t\trss_hf |= RTE_ETH_RSS_IPV4;\n-\tif (rss_state->tcp_hash)\n-\t\trss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;\n-\tif (rss_state->ipv6)\n-\t\trss_hf |= RTE_ETH_RSS_IPV6;\n-\tif (rss_state->ipv6_tcp_hash)\n-\t\trss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;\n-\tif (rss_state->ipv6_ex)\n-\t\trss_hf |= RTE_ETH_RSS_IPV6_EX;\n-\tif (rss_state->ipv6_tcp_ex_hash)\n-\t\trss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;\n-\n-\trss_conf->rss_hf = rss_hf;\n-\n-\treturn 0;\n-}\n-\n-static int\n-lio_dev_rss_hash_update(struct rte_eth_dev *eth_dev,\n-\t\t\tstruct rte_eth_rss_conf *rss_conf)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tstruct lio_rss_ctx *rss_state = &lio_dev->rss_state;\n-\tstruct lio_rss_set *rss_param;\n-\tstruct lio_dev_ctrl_cmd ctrl_cmd;\n-\tstruct lio_ctrl_pkt ctrl_pkt;\n-\n-\tif (!lio_dev->intf_open) {\n-\t\tlio_dev_err(lio_dev, \"Port %d down, can't update hash\\n\",\n-\t\t\t    lio_dev->port_id);\n-\t\treturn -EINVAL;\n-\t}\n-\n-\t/* flush added to prevent cmd failure\n-\t * incase the queue is full\n-\t */\n-\tlio_flush_iq(lio_dev, lio_dev->instr_queue[0]);\n-\n-\tmemset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));\n-\tmemset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));\n-\n-\trss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];\n-\n-\tctrl_cmd.eth_dev = eth_dev;\n-\tctrl_cmd.cond = 0;\n-\n-\tctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;\n-\tctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;\n-\tctrl_pkt.ctrl_cmd = &ctrl_cmd;\n-\n-\trss_param->param.flags = 0xF;\n-\n-\tif (rss_conf->rss_key) {\n-\t\trss_param->param.flags &= ~LIO_RSS_PARAM_HASH_KEY_UNCHANGED;\n-\t\trss_state->hash_key_size = LIO_RSS_MAX_KEY_SZ;\n-\t\trss_param->param.hashkeysize = LIO_RSS_MAX_KEY_SZ;\n-\t\tmemcpy(rss_state->hash_key, rss_conf->rss_key,\n-\t\t       rss_state->hash_key_size);\n-\t\tmemcpy(rss_param->key, rss_state->hash_key,\n-\t\t       rss_state->hash_key_size);\n-\t}\n-\n-\tif ((rss_conf->rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {\n-\t\t/* Can't disable rss through hash flags,\n-\t\t * if it is enabled by default during init\n-\t\t */\n-\t\tif (!rss_state->hash_disable)\n-\t\t\treturn -EINVAL;\n-\n-\t\t/* This is for --disable-rss during testpmd launch */\n-\t\trss_param->param.flags |= LIO_RSS_PARAM_DISABLE_RSS;\n-\t} else {\n-\t\tuint32_t hashinfo = 0;\n-\n-\t\t/* Can't enable rss if disabled by default during init */\n-\t\tif (rss_state->hash_disable)\n-\t\t\treturn -EINVAL;\n-\n-\t\tif (rss_conf->rss_hf & RTE_ETH_RSS_IPV4) {\n-\t\t\thashinfo |= LIO_RSS_HASH_IPV4;\n-\t\t\trss_state->ip = 1;\n-\t\t} else {\n-\t\t\trss_state->ip = 0;\n-\t\t}\n-\n-\t\tif (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {\n-\t\t\thashinfo |= LIO_RSS_HASH_TCP_IPV4;\n-\t\t\trss_state->tcp_hash = 1;\n-\t\t} else {\n-\t\t\trss_state->tcp_hash = 0;\n-\t\t}\n-\n-\t\tif (rss_conf->rss_hf & RTE_ETH_RSS_IPV6) {\n-\t\t\thashinfo |= LIO_RSS_HASH_IPV6;\n-\t\t\trss_state->ipv6 = 1;\n-\t\t} else {\n-\t\t\trss_state->ipv6 = 0;\n-\t\t}\n-\n-\t\tif (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {\n-\t\t\thashinfo |= LIO_RSS_HASH_TCP_IPV6;\n-\t\t\trss_state->ipv6_tcp_hash = 1;\n-\t\t} else {\n-\t\t\trss_state->ipv6_tcp_hash = 0;\n-\t\t}\n-\n-\t\tif (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_EX) {\n-\t\t\thashinfo |= LIO_RSS_HASH_IPV6_EX;\n-\t\t\trss_state->ipv6_ex = 1;\n-\t\t} else {\n-\t\t\trss_state->ipv6_ex = 0;\n-\t\t}\n-\n-\t\tif (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) {\n-\t\t\thashinfo |= LIO_RSS_HASH_TCP_IPV6_EX;\n-\t\t\trss_state->ipv6_tcp_ex_hash = 1;\n-\t\t} else {\n-\t\t\trss_state->ipv6_tcp_ex_hash = 0;\n-\t\t}\n-\n-\t\trss_param->param.flags &= ~LIO_RSS_PARAM_HASH_INFO_UNCHANGED;\n-\t\trss_param->param.hashinfo = hashinfo;\n-\t}\n-\n-\tlio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);\n-\n-\tif (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {\n-\t\tlio_dev_err(lio_dev, \"Failed to set rss hash\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\tif (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {\n-\t\tlio_dev_err(lio_dev, \"Set rss hash timed out\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-/**\n- * Add vxlan dest udp port for an interface.\n- *\n- * @param eth_dev\n- *  Pointer to the structure rte_eth_dev\n- * @param udp_tnl\n- *  udp tunnel conf\n- *\n- * @return\n- *  On success return 0\n- *  On failure return -1\n- */\n-static int\n-lio_dev_udp_tunnel_add(struct rte_eth_dev *eth_dev,\n-\t\t       struct rte_eth_udp_tunnel *udp_tnl)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tstruct lio_dev_ctrl_cmd ctrl_cmd;\n-\tstruct lio_ctrl_pkt ctrl_pkt;\n-\n-\tif (udp_tnl == NULL)\n-\t\treturn -EINVAL;\n-\n-\tif (udp_tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {\n-\t\tlio_dev_err(lio_dev, \"Unsupported tunnel type\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\t/* flush added to prevent cmd failure\n-\t * incase the queue is full\n-\t */\n-\tlio_flush_iq(lio_dev, lio_dev->instr_queue[0]);\n-\n-\tmemset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));\n-\tmemset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));\n-\n-\tctrl_cmd.eth_dev = eth_dev;\n-\tctrl_cmd.cond = 0;\n-\n-\tctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;\n-\tctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;\n-\tctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_ADD;\n-\tctrl_pkt.ctrl_cmd = &ctrl_cmd;\n-\n-\tif (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {\n-\t\tlio_dev_err(lio_dev, \"Failed to send VXLAN_PORT_ADD command\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\tif (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {\n-\t\tlio_dev_err(lio_dev, \"VXLAN_PORT_ADD command timed out\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-/**\n- * Remove vxlan dest udp port for an interface.\n- *\n- * @param eth_dev\n- *  Pointer to the structure rte_eth_dev\n- * @param udp_tnl\n- *  udp tunnel conf\n- *\n- * @return\n- *  On success return 0\n- *  On failure return -1\n- */\n-static int\n-lio_dev_udp_tunnel_del(struct rte_eth_dev *eth_dev,\n-\t\t       struct rte_eth_udp_tunnel *udp_tnl)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tstruct lio_dev_ctrl_cmd ctrl_cmd;\n-\tstruct lio_ctrl_pkt ctrl_pkt;\n-\n-\tif (udp_tnl == NULL)\n-\t\treturn -EINVAL;\n-\n-\tif (udp_tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {\n-\t\tlio_dev_err(lio_dev, \"Unsupported tunnel type\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\t/* flush added to prevent cmd failure\n-\t * incase the queue is full\n-\t */\n-\tlio_flush_iq(lio_dev, lio_dev->instr_queue[0]);\n-\n-\tmemset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));\n-\tmemset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));\n-\n-\tctrl_cmd.eth_dev = eth_dev;\n-\tctrl_cmd.cond = 0;\n-\n-\tctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;\n-\tctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;\n-\tctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_DEL;\n-\tctrl_pkt.ctrl_cmd = &ctrl_cmd;\n-\n-\tif (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {\n-\t\tlio_dev_err(lio_dev, \"Failed to send VXLAN_PORT_DEL command\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\tif (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {\n-\t\tlio_dev_err(lio_dev, \"VXLAN_PORT_DEL command timed out\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-static int\n-lio_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tstruct lio_dev_ctrl_cmd ctrl_cmd;\n-\tstruct lio_ctrl_pkt ctrl_pkt;\n-\n-\tif (lio_dev->linfo.vlan_is_admin_assigned)\n-\t\treturn -EPERM;\n-\n-\t/* flush added to prevent cmd failure\n-\t * incase the queue is full\n-\t */\n-\tlio_flush_iq(lio_dev, lio_dev->instr_queue[0]);\n-\n-\tmemset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));\n-\tmemset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));\n-\n-\tctrl_cmd.eth_dev = eth_dev;\n-\tctrl_cmd.cond = 0;\n-\n-\tctrl_pkt.ncmd.s.cmd = on ?\n-\t\t\tLIO_CMD_ADD_VLAN_FILTER : LIO_CMD_DEL_VLAN_FILTER;\n-\tctrl_pkt.ncmd.s.param1 = vlan_id;\n-\tctrl_pkt.ctrl_cmd = &ctrl_cmd;\n-\n-\tif (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {\n-\t\tlio_dev_err(lio_dev, \"Failed to %s VLAN port\\n\",\n-\t\t\t    on ? \"add\" : \"remove\");\n-\t\treturn -1;\n-\t}\n-\n-\tif (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {\n-\t\tlio_dev_err(lio_dev, \"Command to %s VLAN port timed out\\n\",\n-\t\t\t    on ? \"add\" : \"remove\");\n-\t\treturn -1;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-static uint64_t\n-lio_hweight64(uint64_t w)\n-{\n-\tuint64_t res = w - ((w >> 1) & 0x5555555555555555ul);\n-\n-\tres =\n-\t    (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);\n-\tres = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;\n-\tres = res + (res >> 8);\n-\tres = res + (res >> 16);\n-\n-\treturn (res + (res >> 32)) & 0x00000000000000FFul;\n-}\n-\n-static int\n-lio_dev_link_update(struct rte_eth_dev *eth_dev,\n-\t\t    int wait_to_complete __rte_unused)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tstruct rte_eth_link link;\n-\n-\t/* Initialize */\n-\tmemset(&link, 0, sizeof(link));\n-\tlink.link_status = RTE_ETH_LINK_DOWN;\n-\tlink.link_speed = RTE_ETH_SPEED_NUM_NONE;\n-\tlink.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;\n-\tlink.link_autoneg = RTE_ETH_LINK_AUTONEG;\n-\n-\t/* Return what we found */\n-\tif (lio_dev->linfo.link.s.link_up == 0) {\n-\t\t/* Interface is down */\n-\t\treturn rte_eth_linkstatus_set(eth_dev, &link);\n-\t}\n-\n-\tlink.link_status = RTE_ETH_LINK_UP; /* Interface is up */\n-\tlink.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;\n-\tswitch (lio_dev->linfo.link.s.speed) {\n-\tcase LIO_LINK_SPEED_10000:\n-\t\tlink.link_speed = RTE_ETH_SPEED_NUM_10G;\n-\t\tbreak;\n-\tcase LIO_LINK_SPEED_25000:\n-\t\tlink.link_speed = RTE_ETH_SPEED_NUM_25G;\n-\t\tbreak;\n-\tdefault:\n-\t\tlink.link_speed = RTE_ETH_SPEED_NUM_NONE;\n-\t\tlink.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;\n-\t}\n-\n-\treturn rte_eth_linkstatus_set(eth_dev, &link);\n-}\n-\n-/**\n- * \\brief Net device enable, disable allmulticast\n- * @param eth_dev Pointer to the structure rte_eth_dev\n- *\n- * @return\n- *  On success return 0\n- *  On failure return negative errno\n- */\n-static int\n-lio_change_dev_flag(struct rte_eth_dev *eth_dev)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tstruct lio_dev_ctrl_cmd ctrl_cmd;\n-\tstruct lio_ctrl_pkt ctrl_pkt;\n-\n-\t/* flush added to prevent cmd failure\n-\t * incase the queue is full\n-\t */\n-\tlio_flush_iq(lio_dev, lio_dev->instr_queue[0]);\n-\n-\tmemset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));\n-\tmemset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));\n-\n-\tctrl_cmd.eth_dev = eth_dev;\n-\tctrl_cmd.cond = 0;\n-\n-\t/* Create a ctrl pkt command to be sent to core app. */\n-\tctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_DEVFLAGS;\n-\tctrl_pkt.ncmd.s.param1 = lio_dev->ifflags;\n-\tctrl_pkt.ctrl_cmd = &ctrl_cmd;\n-\n-\tif (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {\n-\t\tlio_dev_err(lio_dev, \"Failed to send change flag message\\n\");\n-\t\treturn -EAGAIN;\n-\t}\n-\n-\tif (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {\n-\t\tlio_dev_err(lio_dev, \"Change dev flag command timed out\\n\");\n-\t\treturn -ETIMEDOUT;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-static int\n-lio_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\n-\tif (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) {\n-\t\tlio_dev_err(lio_dev, \"Require firmware version >= %s\\n\",\n-\t\t\t    LIO_VF_TRUST_MIN_VERSION);\n-\t\treturn -EAGAIN;\n-\t}\n-\n-\tif (!lio_dev->intf_open) {\n-\t\tlio_dev_err(lio_dev, \"Port %d down, can't enable promiscuous\\n\",\n-\t\t\t    lio_dev->port_id);\n-\t\treturn -EAGAIN;\n-\t}\n-\n-\tlio_dev->ifflags |= LIO_IFFLAG_PROMISC;\n-\treturn lio_change_dev_flag(eth_dev);\n-}\n-\n-static int\n-lio_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\n-\tif (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) {\n-\t\tlio_dev_err(lio_dev, \"Require firmware version >= %s\\n\",\n-\t\t\t    LIO_VF_TRUST_MIN_VERSION);\n-\t\treturn -EAGAIN;\n-\t}\n-\n-\tif (!lio_dev->intf_open) {\n-\t\tlio_dev_err(lio_dev, \"Port %d down, can't disable promiscuous\\n\",\n-\t\t\t    lio_dev->port_id);\n-\t\treturn -EAGAIN;\n-\t}\n-\n-\tlio_dev->ifflags &= ~LIO_IFFLAG_PROMISC;\n-\treturn lio_change_dev_flag(eth_dev);\n-}\n-\n-static int\n-lio_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\n-\tif (!lio_dev->intf_open) {\n-\t\tlio_dev_err(lio_dev, \"Port %d down, can't enable multicast\\n\",\n-\t\t\t    lio_dev->port_id);\n-\t\treturn -EAGAIN;\n-\t}\n-\n-\tlio_dev->ifflags |= LIO_IFFLAG_ALLMULTI;\n-\treturn lio_change_dev_flag(eth_dev);\n-}\n-\n-static int\n-lio_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\n-\tif (!lio_dev->intf_open) {\n-\t\tlio_dev_err(lio_dev, \"Port %d down, can't disable multicast\\n\",\n-\t\t\t    lio_dev->port_id);\n-\t\treturn -EAGAIN;\n-\t}\n-\n-\tlio_dev->ifflags &= ~LIO_IFFLAG_ALLMULTI;\n-\treturn lio_change_dev_flag(eth_dev);\n-}\n-\n-static void\n-lio_dev_rss_configure(struct rte_eth_dev *eth_dev)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tstruct lio_rss_ctx *rss_state = &lio_dev->rss_state;\n-\tstruct rte_eth_rss_reta_entry64 reta_conf[8];\n-\tstruct rte_eth_rss_conf rss_conf;\n-\tuint16_t i;\n-\n-\t/* Configure the RSS key and the RSS protocols used to compute\n-\t * the RSS hash of input packets.\n-\t */\n-\trss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;\n-\tif ((rss_conf.rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {\n-\t\trss_state->hash_disable = 1;\n-\t\tlio_dev_rss_hash_update(eth_dev, &rss_conf);\n-\t\treturn;\n-\t}\n-\n-\tif (rss_conf.rss_key == NULL)\n-\t\trss_conf.rss_key = lio_rss_key; /* Default hash key */\n-\n-\tlio_dev_rss_hash_update(eth_dev, &rss_conf);\n-\n-\tmemset(reta_conf, 0, sizeof(reta_conf));\n-\tfor (i = 0; i < LIO_RSS_MAX_TABLE_SZ; i++) {\n-\t\tuint8_t q_idx, conf_idx, reta_idx;\n-\n-\t\tq_idx = (uint8_t)((eth_dev->data->nb_rx_queues > 1) ?\n-\t\t\t\t  i % eth_dev->data->nb_rx_queues : 0);\n-\t\tconf_idx = i / RTE_ETH_RETA_GROUP_SIZE;\n-\t\treta_idx = i % RTE_ETH_RETA_GROUP_SIZE;\n-\t\treta_conf[conf_idx].reta[reta_idx] = q_idx;\n-\t\treta_conf[conf_idx].mask |= ((uint64_t)1 << reta_idx);\n-\t}\n-\n-\tlio_dev_rss_reta_update(eth_dev, reta_conf, LIO_RSS_MAX_TABLE_SZ);\n-}\n-\n-static void\n-lio_dev_mq_rx_configure(struct rte_eth_dev *eth_dev)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tstruct lio_rss_ctx *rss_state = &lio_dev->rss_state;\n-\tstruct rte_eth_rss_conf rss_conf;\n-\n-\tswitch (eth_dev->data->dev_conf.rxmode.mq_mode) {\n-\tcase RTE_ETH_MQ_RX_RSS:\n-\t\tlio_dev_rss_configure(eth_dev);\n-\t\tbreak;\n-\tcase RTE_ETH_MQ_RX_NONE:\n-\t/* if mq_mode is none, disable rss mode. */\n-\tdefault:\n-\t\tmemset(&rss_conf, 0, sizeof(rss_conf));\n-\t\trss_state->hash_disable = 1;\n-\t\tlio_dev_rss_hash_update(eth_dev, &rss_conf);\n-\t}\n-}\n-\n-/**\n- * Setup our receive queue/ringbuffer. This is the\n- * queue the Octeon uses to send us packets and\n- * responses. We are given a memory pool for our\n- * packet buffers that are used to populate the receive\n- * queue.\n- *\n- * @param eth_dev\n- *    Pointer to the structure rte_eth_dev\n- * @param q_no\n- *    Queue number\n- * @param num_rx_descs\n- *    Number of entries in the queue\n- * @param socket_id\n- *    Where to allocate memory\n- * @param rx_conf\n- *    Pointer to the struction rte_eth_rxconf\n- * @param mp\n- *    Pointer to the packet pool\n- *\n- * @return\n- *    - On success, return 0\n- *    - On failure, return -1\n- */\n-static int\n-lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,\n-\t\t       uint16_t num_rx_descs, unsigned int socket_id,\n-\t\t       const struct rte_eth_rxconf *rx_conf __rte_unused,\n-\t\t       struct rte_mempool *mp)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tstruct rte_pktmbuf_pool_private *mbp_priv;\n-\tuint32_t fw_mapped_oq;\n-\tuint16_t buf_size;\n-\n-\tif (q_no >= lio_dev->nb_rx_queues) {\n-\t\tlio_dev_err(lio_dev, \"Invalid rx queue number %u\\n\", q_no);\n-\t\treturn -EINVAL;\n-\t}\n-\n-\tlio_dev_dbg(lio_dev, \"setting up rx queue %u\\n\", q_no);\n-\n-\tfw_mapped_oq = lio_dev->linfo.rxpciq[q_no].s.q_no;\n-\n-\t/* Free previous allocation if any */\n-\tif (eth_dev->data->rx_queues[q_no] != NULL) {\n-\t\tlio_dev_rx_queue_release(eth_dev, q_no);\n-\t\teth_dev->data->rx_queues[q_no] = NULL;\n-\t}\n-\n-\tmbp_priv = rte_mempool_get_priv(mp);\n-\tbuf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;\n-\n-\tif (lio_setup_droq(lio_dev, fw_mapped_oq, num_rx_descs, buf_size, mp,\n-\t\t\t   socket_id)) {\n-\t\tlio_dev_err(lio_dev, \"droq allocation failed\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\teth_dev->data->rx_queues[q_no] = lio_dev->droq[fw_mapped_oq];\n-\n-\treturn 0;\n-}\n-\n-/**\n- * Release the receive queue/ringbuffer. Called by\n- * the upper layers.\n- *\n- * @param eth_dev\n- *    Pointer to Ethernet device structure.\n- * @param q_no\n- *    Receive queue index.\n- *\n- * @return\n- *    - nothing\n- */\n-void\n-lio_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)\n-{\n-\tstruct lio_droq *droq = dev->data->rx_queues[q_no];\n-\tint oq_no;\n-\n-\tif (droq) {\n-\t\toq_no = droq->q_no;\n-\t\tlio_delete_droq_queue(droq->lio_dev, oq_no);\n-\t}\n-}\n-\n-/**\n- * Allocate and initialize SW ring. Initialize associated HW registers.\n- *\n- * @param eth_dev\n- *   Pointer to structure rte_eth_dev\n- *\n- * @param q_no\n- *   Queue number\n- *\n- * @param num_tx_descs\n- *   Number of ringbuffer descriptors\n- *\n- * @param socket_id\n- *   NUMA socket id, used for memory allocations\n- *\n- * @param tx_conf\n- *   Pointer to the structure rte_eth_txconf\n- *\n- * @return\n- *   - On success, return 0\n- *   - On failure, return -errno value\n- */\n-static int\n-lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,\n-\t\t       uint16_t num_tx_descs, unsigned int socket_id,\n-\t\t       const struct rte_eth_txconf *tx_conf __rte_unused)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tint fw_mapped_iq = lio_dev->linfo.txpciq[q_no].s.q_no;\n-\tint retval;\n-\n-\tif (q_no >= lio_dev->nb_tx_queues) {\n-\t\tlio_dev_err(lio_dev, \"Invalid tx queue number %u\\n\", q_no);\n-\t\treturn -EINVAL;\n-\t}\n-\n-\tlio_dev_dbg(lio_dev, \"setting up tx queue %u\\n\", q_no);\n-\n-\t/* Free previous allocation if any */\n-\tif (eth_dev->data->tx_queues[q_no] != NULL) {\n-\t\tlio_dev_tx_queue_release(eth_dev, q_no);\n-\t\teth_dev->data->tx_queues[q_no] = NULL;\n-\t}\n-\n-\tretval = lio_setup_iq(lio_dev, q_no, lio_dev->linfo.txpciq[q_no],\n-\t\t\t      num_tx_descs, lio_dev, socket_id);\n-\n-\tif (retval) {\n-\t\tlio_dev_err(lio_dev, \"Runtime IQ(TxQ) creation failed.\\n\");\n-\t\treturn retval;\n-\t}\n-\n-\tretval = lio_setup_sglists(lio_dev, q_no, fw_mapped_iq,\n-\t\t\t\tlio_dev->instr_queue[fw_mapped_iq]->nb_desc,\n-\t\t\t\tsocket_id);\n-\n-\tif (retval) {\n-\t\tlio_delete_instruction_queue(lio_dev, fw_mapped_iq);\n-\t\treturn retval;\n-\t}\n-\n-\teth_dev->data->tx_queues[q_no] = lio_dev->instr_queue[fw_mapped_iq];\n-\n-\treturn 0;\n-}\n-\n-/**\n- * Release the transmit queue/ringbuffer. Called by\n- * the upper layers.\n- *\n- * @param eth_dev\n- *    Pointer to Ethernet device structure.\n- * @param q_no\n- *   Transmit queue index.\n- *\n- * @return\n- *    - nothing\n- */\n-void\n-lio_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)\n-{\n-\tstruct lio_instr_queue *tq = dev->data->tx_queues[q_no];\n-\tuint32_t fw_mapped_iq_no;\n-\n-\n-\tif (tq) {\n-\t\t/* Free sg_list */\n-\t\tlio_delete_sglist(tq);\n-\n-\t\tfw_mapped_iq_no = tq->txpciq.s.q_no;\n-\t\tlio_delete_instruction_queue(tq->lio_dev, fw_mapped_iq_no);\n-\t}\n-}\n-\n-/**\n- * Api to check link state.\n- */\n-static void\n-lio_dev_get_link_status(struct rte_eth_dev *eth_dev)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tuint16_t timeout = LIO_MAX_CMD_TIMEOUT;\n-\tstruct lio_link_status_resp *resp;\n-\tunion octeon_link_status *ls;\n-\tstruct lio_soft_command *sc;\n-\tuint32_t resp_size;\n-\n-\tif (!lio_dev->intf_open)\n-\t\treturn;\n-\n-\tresp_size = sizeof(struct lio_link_status_resp);\n-\tsc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);\n-\tif (sc == NULL)\n-\t\treturn;\n-\n-\tresp = (struct lio_link_status_resp *)sc->virtrptr;\n-\tlio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,\n-\t\t\t\t LIO_OPCODE_INFO, 0, 0, 0);\n-\n-\t/* Setting wait time in seconds */\n-\tsc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;\n-\n-\tif (lio_send_soft_command(lio_dev, sc) == LIO_IQ_SEND_FAILED)\n-\t\tgoto get_status_fail;\n-\n-\twhile ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {\n-\t\tlio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);\n-\t\trte_delay_ms(1);\n-\t}\n-\n-\tif (resp->status)\n-\t\tgoto get_status_fail;\n-\n-\tls = &resp->link_info.link;\n-\n-\tlio_swap_8B_data((uint64_t *)ls, sizeof(union octeon_link_status) >> 3);\n-\n-\tif (lio_dev->linfo.link.link_status64 != ls->link_status64) {\n-\t\tif (ls->s.mtu < eth_dev->data->mtu) {\n-\t\t\tlio_dev_info(lio_dev, \"Lowered VF MTU to %d as PF MTU dropped\\n\",\n-\t\t\t\t     ls->s.mtu);\n-\t\t\teth_dev->data->mtu = ls->s.mtu;\n-\t\t}\n-\t\tlio_dev->linfo.link.link_status64 = ls->link_status64;\n-\t\tlio_dev_link_update(eth_dev, 0);\n-\t}\n-\n-\tlio_free_soft_command(sc);\n-\n-\treturn;\n-\n-get_status_fail:\n-\tlio_free_soft_command(sc);\n-}\n-\n-/* This function will be invoked every LSC_TIMEOUT ns (100ms)\n- * and will update link state if it changes.\n- */\n-static void\n-lio_sync_link_state_check(void *eth_dev)\n-{\n-\tstruct lio_device *lio_dev =\n-\t\t(((struct rte_eth_dev *)eth_dev)->data->dev_private);\n-\n-\tif (lio_dev->port_configured)\n-\t\tlio_dev_get_link_status(eth_dev);\n-\n-\t/* Schedule periodic link status check.\n-\t * Stop check if interface is close and start again while opening.\n-\t */\n-\tif (lio_dev->intf_open)\n-\t\trte_eal_alarm_set(LIO_LSC_TIMEOUT, lio_sync_link_state_check,\n-\t\t\t\t  eth_dev);\n-}\n-\n-static int\n-lio_dev_start(struct rte_eth_dev *eth_dev)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tuint16_t timeout = LIO_MAX_CMD_TIMEOUT;\n-\tint ret = 0;\n-\n-\tlio_dev_info(lio_dev, \"Starting port %d\\n\", eth_dev->data->port_id);\n-\n-\tif (lio_dev->fn_list.enable_io_queues(lio_dev))\n-\t\treturn -1;\n-\n-\tif (lio_send_rx_ctrl_cmd(eth_dev, 1))\n-\t\treturn -1;\n-\n-\t/* Ready for link status updates */\n-\tlio_dev->intf_open = 1;\n-\trte_mb();\n-\n-\t/* Configure RSS if device configured with multiple RX queues. */\n-\tlio_dev_mq_rx_configure(eth_dev);\n-\n-\t/* Before update the link info,\n-\t * must set linfo.link.link_status64 to 0.\n-\t */\n-\tlio_dev->linfo.link.link_status64 = 0;\n-\n-\t/* start polling for lsc */\n-\tret = rte_eal_alarm_set(LIO_LSC_TIMEOUT,\n-\t\t\t\tlio_sync_link_state_check,\n-\t\t\t\teth_dev);\n-\tif (ret) {\n-\t\tlio_dev_err(lio_dev,\n-\t\t\t    \"link state check handler creation failed\\n\");\n-\t\tgoto dev_lsc_handle_error;\n-\t}\n-\n-\twhile ((lio_dev->linfo.link.link_status64 == 0) && (--timeout))\n-\t\trte_delay_ms(1);\n-\n-\tif (lio_dev->linfo.link.link_status64 == 0) {\n-\t\tret = -1;\n-\t\tgoto dev_mtu_set_error;\n-\t}\n-\n-\tret = lio_dev_mtu_set(eth_dev, eth_dev->data->mtu);\n-\tif (ret != 0)\n-\t\tgoto dev_mtu_set_error;\n-\n-\treturn 0;\n-\n-dev_mtu_set_error:\n-\trte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);\n-\n-dev_lsc_handle_error:\n-\tlio_dev->intf_open = 0;\n-\tlio_send_rx_ctrl_cmd(eth_dev, 0);\n-\n-\treturn ret;\n-}\n-\n-/* Stop device and disable input/output functions */\n-static int\n-lio_dev_stop(struct rte_eth_dev *eth_dev)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\n-\tlio_dev_info(lio_dev, \"Stopping port %d\\n\", eth_dev->data->port_id);\n-\teth_dev->data->dev_started = 0;\n-\tlio_dev->intf_open = 0;\n-\trte_mb();\n-\n-\t/* Cancel callback if still running. */\n-\trte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);\n-\n-\tlio_send_rx_ctrl_cmd(eth_dev, 0);\n-\n-\tlio_wait_for_instr_fetch(lio_dev);\n-\n-\t/* Clear recorded link status */\n-\tlio_dev->linfo.link.link_status64 = 0;\n-\n-\treturn 0;\n-}\n-\n-static int\n-lio_dev_set_link_up(struct rte_eth_dev *eth_dev)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\n-\tif (!lio_dev->intf_open) {\n-\t\tlio_dev_info(lio_dev, \"Port is stopped, Start the port first\\n\");\n-\t\treturn 0;\n-\t}\n-\n-\tif (lio_dev->linfo.link.s.link_up) {\n-\t\tlio_dev_info(lio_dev, \"Link is already UP\\n\");\n-\t\treturn 0;\n-\t}\n-\n-\tif (lio_send_rx_ctrl_cmd(eth_dev, 1)) {\n-\t\tlio_dev_err(lio_dev, \"Unable to set Link UP\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\tlio_dev->linfo.link.s.link_up = 1;\n-\teth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;\n-\n-\treturn 0;\n-}\n-\n-static int\n-lio_dev_set_link_down(struct rte_eth_dev *eth_dev)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\n-\tif (!lio_dev->intf_open) {\n-\t\tlio_dev_info(lio_dev, \"Port is stopped, Start the port first\\n\");\n-\t\treturn 0;\n-\t}\n-\n-\tif (!lio_dev->linfo.link.s.link_up) {\n-\t\tlio_dev_info(lio_dev, \"Link is already DOWN\\n\");\n-\t\treturn 0;\n-\t}\n-\n-\tlio_dev->linfo.link.s.link_up = 0;\n-\teth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;\n-\n-\tif (lio_send_rx_ctrl_cmd(eth_dev, 0)) {\n-\t\tlio_dev->linfo.link.s.link_up = 1;\n-\t\teth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;\n-\t\tlio_dev_err(lio_dev, \"Unable to set Link Down\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-/**\n- * Reset and stop the device. This occurs on the first\n- * call to this routine. Subsequent calls will simply\n- * return. NB: This will require the NIC to be rebooted.\n- *\n- * @param eth_dev\n- *    Pointer to the structure rte_eth_dev\n- *\n- * @return\n- *    - nothing\n- */\n-static int\n-lio_dev_close(struct rte_eth_dev *eth_dev)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tint ret = 0;\n-\n-\tif (rte_eal_process_type() != RTE_PROC_PRIMARY)\n-\t\treturn 0;\n-\n-\tlio_dev_info(lio_dev, \"closing port %d\\n\", eth_dev->data->port_id);\n-\n-\tif (lio_dev->intf_open)\n-\t\tret = lio_dev_stop(eth_dev);\n-\n-\t/* Reset ioq regs */\n-\tlio_dev->fn_list.setup_device_regs(lio_dev);\n-\n-\tif (lio_dev->pci_dev->kdrv == RTE_PCI_KDRV_IGB_UIO) {\n-\t\tcn23xx_vf_ask_pf_to_do_flr(lio_dev);\n-\t\trte_delay_ms(LIO_PCI_FLR_WAIT);\n-\t}\n-\n-\t/* lio_free_mbox */\n-\tlio_dev->fn_list.free_mbox(lio_dev);\n-\n-\t/* Free glist resources */\n-\trte_free(lio_dev->glist_head);\n-\trte_free(lio_dev->glist_lock);\n-\tlio_dev->glist_head = NULL;\n-\tlio_dev->glist_lock = NULL;\n-\n-\tlio_dev->port_configured = 0;\n-\n-\t /* Delete all queues */\n-\tlio_dev_clear_queues(eth_dev);\n-\n-\treturn ret;\n-}\n-\n-/**\n- * Enable tunnel rx checksum verification from firmware.\n- */\n-static void\n-lio_enable_hw_tunnel_rx_checksum(struct rte_eth_dev *eth_dev)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tstruct lio_dev_ctrl_cmd ctrl_cmd;\n-\tstruct lio_ctrl_pkt ctrl_pkt;\n-\n-\t/* flush added to prevent cmd failure\n-\t * incase the queue is full\n-\t */\n-\tlio_flush_iq(lio_dev, lio_dev->instr_queue[0]);\n-\n-\tmemset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));\n-\tmemset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));\n-\n-\tctrl_cmd.eth_dev = eth_dev;\n-\tctrl_cmd.cond = 0;\n-\n-\tctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_RX_CSUM_CTL;\n-\tctrl_pkt.ncmd.s.param1 = LIO_CMD_RXCSUM_ENABLE;\n-\tctrl_pkt.ctrl_cmd = &ctrl_cmd;\n-\n-\tif (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {\n-\t\tlio_dev_err(lio_dev, \"Failed to send TNL_RX_CSUM command\\n\");\n-\t\treturn;\n-\t}\n-\n-\tif (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))\n-\t\tlio_dev_err(lio_dev, \"TNL_RX_CSUM command timed out\\n\");\n-}\n-\n-/**\n- * Enable checksum calculation for inner packet in a tunnel.\n- */\n-static void\n-lio_enable_hw_tunnel_tx_checksum(struct rte_eth_dev *eth_dev)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tstruct lio_dev_ctrl_cmd ctrl_cmd;\n-\tstruct lio_ctrl_pkt ctrl_pkt;\n-\n-\t/* flush added to prevent cmd failure\n-\t * incase the queue is full\n-\t */\n-\tlio_flush_iq(lio_dev, lio_dev->instr_queue[0]);\n-\n-\tmemset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));\n-\tmemset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));\n-\n-\tctrl_cmd.eth_dev = eth_dev;\n-\tctrl_cmd.cond = 0;\n-\n-\tctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_TX_CSUM_CTL;\n-\tctrl_pkt.ncmd.s.param1 = LIO_CMD_TXCSUM_ENABLE;\n-\tctrl_pkt.ctrl_cmd = &ctrl_cmd;\n-\n-\tif (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {\n-\t\tlio_dev_err(lio_dev, \"Failed to send TNL_TX_CSUM command\\n\");\n-\t\treturn;\n-\t}\n-\n-\tif (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))\n-\t\tlio_dev_err(lio_dev, \"TNL_TX_CSUM command timed out\\n\");\n-}\n-\n-static int\n-lio_send_queue_count_update(struct rte_eth_dev *eth_dev, int num_txq,\n-\t\t\t    int num_rxq)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tstruct lio_dev_ctrl_cmd ctrl_cmd;\n-\tstruct lio_ctrl_pkt ctrl_pkt;\n-\n-\tif (strcmp(lio_dev->firmware_version, LIO_Q_RECONF_MIN_VERSION) < 0) {\n-\t\tlio_dev_err(lio_dev, \"Require firmware version >= %s\\n\",\n-\t\t\t    LIO_Q_RECONF_MIN_VERSION);\n-\t\treturn -ENOTSUP;\n-\t}\n-\n-\t/* flush added to prevent cmd failure\n-\t * incase the queue is full\n-\t */\n-\tlio_flush_iq(lio_dev, lio_dev->instr_queue[0]);\n-\n-\tmemset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));\n-\tmemset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));\n-\n-\tctrl_cmd.eth_dev = eth_dev;\n-\tctrl_cmd.cond = 0;\n-\n-\tctrl_pkt.ncmd.s.cmd = LIO_CMD_QUEUE_COUNT_CTL;\n-\tctrl_pkt.ncmd.s.param1 = num_txq;\n-\tctrl_pkt.ncmd.s.param2 = num_rxq;\n-\tctrl_pkt.ctrl_cmd = &ctrl_cmd;\n-\n-\tif (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {\n-\t\tlio_dev_err(lio_dev, \"Failed to send queue count control command\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\tif (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {\n-\t\tlio_dev_err(lio_dev, \"Queue count control command timed out\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-static int\n-lio_reconf_queues(struct rte_eth_dev *eth_dev, int num_txq, int num_rxq)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tint ret;\n-\n-\tif (lio_dev->nb_rx_queues != num_rxq ||\n-\t    lio_dev->nb_tx_queues != num_txq) {\n-\t\tif (lio_send_queue_count_update(eth_dev, num_txq, num_rxq))\n-\t\t\treturn -1;\n-\t\tlio_dev->nb_rx_queues = num_rxq;\n-\t\tlio_dev->nb_tx_queues = num_txq;\n-\t}\n-\n-\tif (lio_dev->intf_open) {\n-\t\tret = lio_dev_stop(eth_dev);\n-\t\tif (ret != 0)\n-\t\t\treturn ret;\n-\t}\n-\n-\t/* Reset ioq registers */\n-\tif (lio_dev->fn_list.setup_device_regs(lio_dev)) {\n-\t\tlio_dev_err(lio_dev, \"Failed to configure device registers\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-static int\n-lio_dev_configure(struct rte_eth_dev *eth_dev)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\tuint16_t timeout = LIO_MAX_CMD_TIMEOUT;\n-\tint retval, num_iqueues, num_oqueues;\n-\tuint8_t mac[RTE_ETHER_ADDR_LEN], i;\n-\tstruct lio_if_cfg_resp *resp;\n-\tstruct lio_soft_command *sc;\n-\tunion lio_if_cfg if_cfg;\n-\tuint32_t resp_size;\n-\n-\tPMD_INIT_FUNC_TRACE();\n-\n-\tif (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)\n-\t\teth_dev->data->dev_conf.rxmode.offloads |=\n-\t\t\tRTE_ETH_RX_OFFLOAD_RSS_HASH;\n-\n-\t/* Inform firmware about change in number of queues to use.\n-\t * Disable IO queues and reset registers for re-configuration.\n-\t */\n-\tif (lio_dev->port_configured)\n-\t\treturn lio_reconf_queues(eth_dev,\n-\t\t\t\t\t eth_dev->data->nb_tx_queues,\n-\t\t\t\t\t eth_dev->data->nb_rx_queues);\n-\n-\tlio_dev->nb_rx_queues = eth_dev->data->nb_rx_queues;\n-\tlio_dev->nb_tx_queues = eth_dev->data->nb_tx_queues;\n-\n-\t/* Set max number of queues which can be re-configured. */\n-\tlio_dev->max_rx_queues = eth_dev->data->nb_rx_queues;\n-\tlio_dev->max_tx_queues = eth_dev->data->nb_tx_queues;\n-\n-\tresp_size = sizeof(struct lio_if_cfg_resp);\n-\tsc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);\n-\tif (sc == NULL)\n-\t\treturn -ENOMEM;\n-\n-\tresp = (struct lio_if_cfg_resp *)sc->virtrptr;\n-\n-\t/* Firmware doesn't have capability to reconfigure the queues,\n-\t * Claim all queues, and use as many required\n-\t */\n-\tif_cfg.if_cfg64 = 0;\n-\tif_cfg.s.num_iqueues = lio_dev->nb_tx_queues;\n-\tif_cfg.s.num_oqueues = lio_dev->nb_rx_queues;\n-\tif_cfg.s.base_queue = 0;\n-\n-\tif_cfg.s.gmx_port_id = lio_dev->pf_num;\n-\n-\tlio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,\n-\t\t\t\t LIO_OPCODE_IF_CFG, 0,\n-\t\t\t\t if_cfg.if_cfg64, 0);\n-\n-\t/* Setting wait time in seconds */\n-\tsc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;\n-\n-\tretval = lio_send_soft_command(lio_dev, sc);\n-\tif (retval == LIO_IQ_SEND_FAILED) {\n-\t\tlio_dev_err(lio_dev, \"iq/oq config failed status: %x\\n\",\n-\t\t\t    retval);\n-\t\t/* Soft instr is freed by driver in case of failure. */\n-\t\tgoto nic_config_fail;\n-\t}\n-\n-\t/* Sleep on a wait queue till the cond flag indicates that the\n-\t * response arrived or timed-out.\n-\t */\n-\twhile ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {\n-\t\tlio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);\n-\t\tlio_process_ordered_list(lio_dev);\n-\t\trte_delay_ms(1);\n-\t}\n-\n-\tretval = resp->status;\n-\tif (retval) {\n-\t\tlio_dev_err(lio_dev, \"iq/oq config failed\\n\");\n-\t\tgoto nic_config_fail;\n-\t}\n-\n-\tstrlcpy(lio_dev->firmware_version,\n-\t\tresp->cfg_info.lio_firmware_version, LIO_FW_VERSION_LENGTH);\n-\n-\tlio_swap_8B_data((uint64_t *)(&resp->cfg_info),\n-\t\t\t sizeof(struct octeon_if_cfg_info) >> 3);\n-\n-\tnum_iqueues = lio_hweight64(resp->cfg_info.iqmask);\n-\tnum_oqueues = lio_hweight64(resp->cfg_info.oqmask);\n-\n-\tif (!(num_iqueues) || !(num_oqueues)) {\n-\t\tlio_dev_err(lio_dev,\n-\t\t\t    \"Got bad iqueues (%016lx) or oqueues (%016lx) from firmware.\\n\",\n-\t\t\t    (unsigned long)resp->cfg_info.iqmask,\n-\t\t\t    (unsigned long)resp->cfg_info.oqmask);\n-\t\tgoto nic_config_fail;\n-\t}\n-\n-\tlio_dev_dbg(lio_dev,\n-\t\t    \"interface %d, iqmask %016lx, oqmask %016lx, numiqueues %d, numoqueues %d\\n\",\n-\t\t    eth_dev->data->port_id,\n-\t\t    (unsigned long)resp->cfg_info.iqmask,\n-\t\t    (unsigned long)resp->cfg_info.oqmask,\n-\t\t    num_iqueues, num_oqueues);\n-\n-\tlio_dev->linfo.num_rxpciq = num_oqueues;\n-\tlio_dev->linfo.num_txpciq = num_iqueues;\n-\n-\tfor (i = 0; i < num_oqueues; i++) {\n-\t\tlio_dev->linfo.rxpciq[i].rxpciq64 =\n-\t\t    resp->cfg_info.linfo.rxpciq[i].rxpciq64;\n-\t\tlio_dev_dbg(lio_dev, \"index %d OQ %d\\n\",\n-\t\t\t    i, lio_dev->linfo.rxpciq[i].s.q_no);\n-\t}\n-\n-\tfor (i = 0; i < num_iqueues; i++) {\n-\t\tlio_dev->linfo.txpciq[i].txpciq64 =\n-\t\t    resp->cfg_info.linfo.txpciq[i].txpciq64;\n-\t\tlio_dev_dbg(lio_dev, \"index %d IQ %d\\n\",\n-\t\t\t    i, lio_dev->linfo.txpciq[i].s.q_no);\n-\t}\n-\n-\tlio_dev->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;\n-\tlio_dev->linfo.gmxport = resp->cfg_info.linfo.gmxport;\n-\tlio_dev->linfo.link.link_status64 =\n-\t\t\tresp->cfg_info.linfo.link.link_status64;\n-\n-\t/* 64-bit swap required on LE machines */\n-\tlio_swap_8B_data(&lio_dev->linfo.hw_addr, 1);\n-\tfor (i = 0; i < RTE_ETHER_ADDR_LEN; i++)\n-\t\tmac[i] = *((uint8_t *)(((uint8_t *)&lio_dev->linfo.hw_addr) +\n-\t\t\t\t       2 + i));\n-\n-\t/* Copy the permanent MAC address */\n-\trte_ether_addr_copy((struct rte_ether_addr *)mac,\n-\t\t\t&eth_dev->data->mac_addrs[0]);\n-\n-\t/* enable firmware checksum support for tunnel packets */\n-\tlio_enable_hw_tunnel_rx_checksum(eth_dev);\n-\tlio_enable_hw_tunnel_tx_checksum(eth_dev);\n-\n-\tlio_dev->glist_lock =\n-\t    rte_zmalloc(NULL, sizeof(*lio_dev->glist_lock) * num_iqueues, 0);\n-\tif (lio_dev->glist_lock == NULL)\n-\t\treturn -ENOMEM;\n-\n-\tlio_dev->glist_head =\n-\t\trte_zmalloc(NULL, sizeof(*lio_dev->glist_head) * num_iqueues,\n-\t\t\t    0);\n-\tif (lio_dev->glist_head == NULL) {\n-\t\trte_free(lio_dev->glist_lock);\n-\t\tlio_dev->glist_lock = NULL;\n-\t\treturn -ENOMEM;\n-\t}\n-\n-\tlio_dev_link_update(eth_dev, 0);\n-\n-\tlio_dev->port_configured = 1;\n-\n-\tlio_free_soft_command(sc);\n-\n-\t/* Reset ioq regs */\n-\tlio_dev->fn_list.setup_device_regs(lio_dev);\n-\n-\t/* Free iq_0 used during init */\n-\tlio_free_instr_queue0(lio_dev);\n-\n-\treturn 0;\n-\n-nic_config_fail:\n-\tlio_dev_err(lio_dev, \"Failed retval %d\\n\", retval);\n-\tlio_free_soft_command(sc);\n-\tlio_free_instr_queue0(lio_dev);\n-\n-\treturn -ENODEV;\n-}\n-\n-/* Define our ethernet definitions */\n-static const struct eth_dev_ops liovf_eth_dev_ops = {\n-\t.dev_configure\t\t= lio_dev_configure,\n-\t.dev_start\t\t= lio_dev_start,\n-\t.dev_stop\t\t= lio_dev_stop,\n-\t.dev_set_link_up\t= lio_dev_set_link_up,\n-\t.dev_set_link_down\t= lio_dev_set_link_down,\n-\t.dev_close\t\t= lio_dev_close,\n-\t.promiscuous_enable\t= lio_dev_promiscuous_enable,\n-\t.promiscuous_disable\t= lio_dev_promiscuous_disable,\n-\t.allmulticast_enable\t= lio_dev_allmulticast_enable,\n-\t.allmulticast_disable\t= lio_dev_allmulticast_disable,\n-\t.link_update\t\t= lio_dev_link_update,\n-\t.stats_get\t\t= lio_dev_stats_get,\n-\t.xstats_get\t\t= lio_dev_xstats_get,\n-\t.xstats_get_names\t= lio_dev_xstats_get_names,\n-\t.stats_reset\t\t= lio_dev_stats_reset,\n-\t.xstats_reset\t\t= lio_dev_xstats_reset,\n-\t.dev_infos_get\t\t= lio_dev_info_get,\n-\t.vlan_filter_set\t= lio_dev_vlan_filter_set,\n-\t.rx_queue_setup\t\t= lio_dev_rx_queue_setup,\n-\t.rx_queue_release\t= lio_dev_rx_queue_release,\n-\t.tx_queue_setup\t\t= lio_dev_tx_queue_setup,\n-\t.tx_queue_release\t= lio_dev_tx_queue_release,\n-\t.reta_update\t\t= lio_dev_rss_reta_update,\n-\t.reta_query\t\t= lio_dev_rss_reta_query,\n-\t.rss_hash_conf_get\t= lio_dev_rss_hash_conf_get,\n-\t.rss_hash_update\t= lio_dev_rss_hash_update,\n-\t.udp_tunnel_port_add\t= lio_dev_udp_tunnel_add,\n-\t.udp_tunnel_port_del\t= lio_dev_udp_tunnel_del,\n-\t.mtu_set\t\t= lio_dev_mtu_set,\n-};\n-\n-static void\n-lio_check_pf_hs_response(void *lio_dev)\n-{\n-\tstruct lio_device *dev = lio_dev;\n-\n-\t/* check till response arrives */\n-\tif (dev->pfvf_hsword.coproc_tics_per_us)\n-\t\treturn;\n-\n-\tcn23xx_vf_handle_mbox(dev);\n-\n-\trte_eal_alarm_set(1, lio_check_pf_hs_response, lio_dev);\n-}\n-\n-/**\n- * \\brief Identify the LIO device and to map the BAR address space\n- * @param lio_dev lio device\n- */\n-static int\n-lio_chip_specific_setup(struct lio_device *lio_dev)\n-{\n-\tstruct rte_pci_device *pdev = lio_dev->pci_dev;\n-\tuint32_t dev_id = pdev->id.device_id;\n-\tconst char *s;\n-\tint ret = 1;\n-\n-\tswitch (dev_id) {\n-\tcase LIO_CN23XX_VF_VID:\n-\t\tlio_dev->chip_id = LIO_CN23XX_VF_VID;\n-\t\tret = cn23xx_vf_setup_device(lio_dev);\n-\t\ts = \"CN23XX VF\";\n-\t\tbreak;\n-\tdefault:\n-\t\ts = \"?\";\n-\t\tlio_dev_err(lio_dev, \"Unsupported Chip\\n\");\n-\t}\n-\n-\tif (!ret)\n-\t\tlio_dev_info(lio_dev, \"DEVICE : %s\\n\", s);\n-\n-\treturn ret;\n-}\n-\n-static int\n-lio_first_time_init(struct lio_device *lio_dev,\n-\t\t    struct rte_pci_device *pdev)\n-{\n-\tint dpdk_queues;\n-\n-\tPMD_INIT_FUNC_TRACE();\n-\n-\t/* set dpdk specific pci device pointer */\n-\tlio_dev->pci_dev = pdev;\n-\n-\t/* Identify the LIO type and set device ops */\n-\tif (lio_chip_specific_setup(lio_dev)) {\n-\t\tlio_dev_err(lio_dev, \"Chip specific setup failed\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\t/* Initialize soft command buffer pool */\n-\tif (lio_setup_sc_buffer_pool(lio_dev)) {\n-\t\tlio_dev_err(lio_dev, \"sc buffer pool allocation failed\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\t/* Initialize lists to manage the requests of different types that\n-\t * arrive from applications for this lio device.\n-\t */\n-\tlio_setup_response_list(lio_dev);\n-\n-\tif (lio_dev->fn_list.setup_mbox(lio_dev)) {\n-\t\tlio_dev_err(lio_dev, \"Mailbox setup failed\\n\");\n-\t\tgoto error;\n-\t}\n-\n-\t/* Check PF response */\n-\tlio_check_pf_hs_response((void *)lio_dev);\n-\n-\t/* Do handshake and exit if incompatible PF driver */\n-\tif (cn23xx_pfvf_handshake(lio_dev))\n-\t\tgoto error;\n-\n-\t/* Request and wait for device reset. */\n-\tif (pdev->kdrv == RTE_PCI_KDRV_IGB_UIO) {\n-\t\tcn23xx_vf_ask_pf_to_do_flr(lio_dev);\n-\t\t/* FLR wait time doubled as a precaution. */\n-\t\trte_delay_ms(LIO_PCI_FLR_WAIT * 2);\n-\t}\n-\n-\tif (lio_dev->fn_list.setup_device_regs(lio_dev)) {\n-\t\tlio_dev_err(lio_dev, \"Failed to configure device registers\\n\");\n-\t\tgoto error;\n-\t}\n-\n-\tif (lio_setup_instr_queue0(lio_dev)) {\n-\t\tlio_dev_err(lio_dev, \"Failed to setup instruction queue 0\\n\");\n-\t\tgoto error;\n-\t}\n-\n-\tdpdk_queues = (int)lio_dev->sriov_info.rings_per_vf;\n-\n-\tlio_dev->max_tx_queues = dpdk_queues;\n-\tlio_dev->max_rx_queues = dpdk_queues;\n-\n-\t/* Enable input and output queues for this device */\n-\tif (lio_dev->fn_list.enable_io_queues(lio_dev))\n-\t\tgoto error;\n-\n-\treturn 0;\n-\n-error:\n-\tlio_free_sc_buffer_pool(lio_dev);\n-\tif (lio_dev->mbox[0])\n-\t\tlio_dev->fn_list.free_mbox(lio_dev);\n-\tif (lio_dev->instr_queue[0])\n-\t\tlio_free_instr_queue0(lio_dev);\n-\n-\treturn -1;\n-}\n-\n-static int\n-lio_eth_dev_uninit(struct rte_eth_dev *eth_dev)\n-{\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\n-\tPMD_INIT_FUNC_TRACE();\n-\n-\tif (rte_eal_process_type() != RTE_PROC_PRIMARY)\n-\t\treturn 0;\n-\n-\t/* lio_free_sc_buffer_pool */\n-\tlio_free_sc_buffer_pool(lio_dev);\n-\n-\treturn 0;\n-}\n-\n-static int\n-lio_eth_dev_init(struct rte_eth_dev *eth_dev)\n-{\n-\tstruct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev);\n-\tstruct lio_device *lio_dev = LIO_DEV(eth_dev);\n-\n-\tPMD_INIT_FUNC_TRACE();\n-\n-\teth_dev->rx_pkt_burst = &lio_dev_recv_pkts;\n-\teth_dev->tx_pkt_burst = &lio_dev_xmit_pkts;\n-\n-\t/* Primary does the initialization. */\n-\tif (rte_eal_process_type() != RTE_PROC_PRIMARY)\n-\t\treturn 0;\n-\n-\trte_eth_copy_pci_info(eth_dev, pdev);\n-\n-\tif (pdev->mem_resource[0].addr) {\n-\t\tlio_dev->hw_addr = pdev->mem_resource[0].addr;\n-\t} else {\n-\t\tPMD_INIT_LOG(ERR, \"ERROR: Failed to map BAR0\\n\");\n-\t\treturn -ENODEV;\n-\t}\n-\n-\tlio_dev->eth_dev = eth_dev;\n-\t/* set lio device print string */\n-\tsnprintf(lio_dev->dev_string, sizeof(lio_dev->dev_string),\n-\t\t \"%s[%02x:%02x.%x]\", pdev->driver->driver.name,\n-\t\t pdev->addr.bus, pdev->addr.devid, pdev->addr.function);\n-\n-\tlio_dev->port_id = eth_dev->data->port_id;\n-\n-\tif (lio_first_time_init(lio_dev, pdev)) {\n-\t\tlio_dev_err(lio_dev, \"Device init failed\\n\");\n-\t\treturn -EINVAL;\n-\t}\n-\n-\teth_dev->dev_ops = &liovf_eth_dev_ops;\n-\teth_dev->data->mac_addrs = rte_zmalloc(\"lio\", RTE_ETHER_ADDR_LEN, 0);\n-\tif (eth_dev->data->mac_addrs == NULL) {\n-\t\tlio_dev_err(lio_dev,\n-\t\t\t    \"MAC addresses memory allocation failed\\n\");\n-\t\teth_dev->dev_ops = NULL;\n-\t\teth_dev->rx_pkt_burst = NULL;\n-\t\teth_dev->tx_pkt_burst = NULL;\n-\t\treturn -ENOMEM;\n-\t}\n-\n-\trte_atomic64_set(&lio_dev->status, LIO_DEV_RUNNING);\n-\trte_wmb();\n-\n-\tlio_dev->port_configured = 0;\n-\t/* Always allow unicast packets */\n-\tlio_dev->ifflags |= LIO_IFFLAG_UNICAST;\n-\n-\treturn 0;\n-}\n-\n-static int\n-lio_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,\n-\t\t      struct rte_pci_device *pci_dev)\n-{\n-\treturn rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct lio_device),\n-\t\t\tlio_eth_dev_init);\n-}\n-\n-static int\n-lio_eth_dev_pci_remove(struct rte_pci_device *pci_dev)\n-{\n-\treturn rte_eth_dev_pci_generic_remove(pci_dev,\n-\t\t\t\t\t      lio_eth_dev_uninit);\n-}\n-\n-/* Set of PCI devices this driver supports */\n-static const struct rte_pci_id pci_id_liovf_map[] = {\n-\t{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_VF_VID) },\n-\t{ .vendor_id = 0, /* sentinel */ }\n-};\n-\n-static struct rte_pci_driver rte_liovf_pmd = {\n-\t.id_table\t= pci_id_liovf_map,\n-\t.drv_flags      = RTE_PCI_DRV_NEED_MAPPING,\n-\t.probe\t\t= lio_eth_dev_pci_probe,\n-\t.remove\t\t= lio_eth_dev_pci_remove,\n-};\n-\n-RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd);\n-RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map);\n-RTE_PMD_REGISTER_KMOD_DEP(net_liovf, \"* igb_uio | vfio-pci\");\n-RTE_LOG_REGISTER_SUFFIX(lio_logtype_init, init, NOTICE);\n-RTE_LOG_REGISTER_SUFFIX(lio_logtype_driver, driver, NOTICE);\ndiff --git a/drivers/net/liquidio/lio_ethdev.h b/drivers/net/liquidio/lio_ethdev.h\ndeleted file mode 100644\nindex ece2b03858..0000000000\n--- a/drivers/net/liquidio/lio_ethdev.h\n+++ /dev/null\n@@ -1,179 +0,0 @@\n-/* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright(c) 2017 Cavium, Inc\n- */\n-\n-#ifndef _LIO_ETHDEV_H_\n-#define _LIO_ETHDEV_H_\n-\n-#include <stdint.h>\n-\n-#include \"lio_struct.h\"\n-\n-/* timeout to check link state updates from firmware in us */\n-#define LIO_LSC_TIMEOUT\t\t100000 /* 100000us (100ms) */\n-#define LIO_MAX_CMD_TIMEOUT     10000 /* 10000ms (10s) */\n-\n-/* The max frame size with default MTU */\n-#define LIO_ETH_MAX_LEN (RTE_ETHER_MTU + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)\n-\n-#define LIO_DEV(_eth_dev)\t\t((_eth_dev)->data->dev_private)\n-\n-/* LIO Response condition variable */\n-struct lio_dev_ctrl_cmd {\n-\tstruct rte_eth_dev *eth_dev;\n-\tuint64_t cond;\n-};\n-\n-enum lio_bus_speed {\n-\tLIO_LINK_SPEED_UNKNOWN  = 0,\n-\tLIO_LINK_SPEED_10000    = 10000,\n-\tLIO_LINK_SPEED_25000    = 25000\n-};\n-\n-struct octeon_if_cfg_info {\n-\tuint64_t iqmask;\t/** mask for IQs enabled for the port */\n-\tuint64_t oqmask;\t/** mask for OQs enabled for the port */\n-\tstruct octeon_link_info linfo; /** initial link information */\n-\tchar lio_firmware_version[LIO_FW_VERSION_LENGTH];\n-};\n-\n-/** Stats for each NIC port in RX direction. */\n-struct octeon_rx_stats {\n-\t/* link-level stats */\n-\tuint64_t total_rcvd;\n-\tuint64_t bytes_rcvd;\n-\tuint64_t total_bcst;\n-\tuint64_t total_mcst;\n-\tuint64_t runts;\n-\tuint64_t ctl_rcvd;\n-\tuint64_t fifo_err; /* Accounts for over/under-run of buffers */\n-\tuint64_t dmac_drop;\n-\tuint64_t fcs_err;\n-\tuint64_t jabber_err;\n-\tuint64_t l2_err;\n-\tuint64_t frame_err;\n-\n-\t/* firmware stats */\n-\tuint64_t fw_total_rcvd;\n-\tuint64_t fw_total_fwd;\n-\tuint64_t fw_total_fwd_bytes;\n-\tuint64_t fw_err_pko;\n-\tuint64_t fw_err_link;\n-\tuint64_t fw_err_drop;\n-\tuint64_t fw_rx_vxlan;\n-\tuint64_t fw_rx_vxlan_err;\n-\n-\t/* LRO */\n-\tuint64_t fw_lro_pkts;   /* Number of packets that are LROed */\n-\tuint64_t fw_lro_octs;   /* Number of octets that are LROed */\n-\tuint64_t fw_total_lro;  /* Number of LRO packets formed */\n-\tuint64_t fw_lro_aborts; /* Number of times lRO of packet aborted */\n-\tuint64_t fw_lro_aborts_port;\n-\tuint64_t fw_lro_aborts_seq;\n-\tuint64_t fw_lro_aborts_tsval;\n-\tuint64_t fw_lro_aborts_timer;\n-\t/* intrmod: packet forward rate */\n-\tuint64_t fwd_rate;\n-};\n-\n-/** Stats for each NIC port in RX direction. */\n-struct octeon_tx_stats {\n-\t/* link-level stats */\n-\tuint64_t total_pkts_sent;\n-\tuint64_t total_bytes_sent;\n-\tuint64_t mcast_pkts_sent;\n-\tuint64_t bcast_pkts_sent;\n-\tuint64_t ctl_sent;\n-\tuint64_t one_collision_sent;\t/* Packets sent after one collision */\n-\t/* Packets sent after multiple collision */\n-\tuint64_t multi_collision_sent;\n-\t/* Packets not sent due to max collisions */\n-\tuint64_t max_collision_fail;\n-\t/* Packets not sent due to max deferrals */\n-\tuint64_t max_deferral_fail;\n-\t/* Accounts for over/under-run of buffers */\n-\tuint64_t fifo_err;\n-\tuint64_t runts;\n-\tuint64_t total_collisions; /* Total number of collisions detected */\n-\n-\t/* firmware stats */\n-\tuint64_t fw_total_sent;\n-\tuint64_t fw_total_fwd;\n-\tuint64_t fw_total_fwd_bytes;\n-\tuint64_t fw_err_pko;\n-\tuint64_t fw_err_link;\n-\tuint64_t fw_err_drop;\n-\tuint64_t fw_err_tso;\n-\tuint64_t fw_tso;     /* number of tso requests */\n-\tuint64_t fw_tso_fwd; /* number of packets segmented in tso */\n-\tuint64_t fw_tx_vxlan;\n-};\n-\n-struct octeon_link_stats {\n-\tstruct octeon_rx_stats fromwire;\n-\tstruct octeon_tx_stats fromhost;\n-};\n-\n-union lio_if_cfg {\n-\tuint64_t if_cfg64;\n-\tstruct {\n-#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN\n-\t\tuint64_t base_queue : 16;\n-\t\tuint64_t num_iqueues : 16;\n-\t\tuint64_t num_oqueues : 16;\n-\t\tuint64_t gmx_port_id : 8;\n-\t\tuint64_t vf_id : 8;\n-#else\n-\t\tuint64_t vf_id : 8;\n-\t\tuint64_t gmx_port_id : 8;\n-\t\tuint64_t num_oqueues : 16;\n-\t\tuint64_t num_iqueues : 16;\n-\t\tuint64_t base_queue : 16;\n-#endif\n-\t} s;\n-};\n-\n-struct lio_if_cfg_resp {\n-\tuint64_t rh;\n-\tstruct octeon_if_cfg_info cfg_info;\n-\tuint64_t status;\n-};\n-\n-struct lio_link_stats_resp {\n-\tuint64_t rh;\n-\tstruct octeon_link_stats link_stats;\n-\tuint64_t status;\n-};\n-\n-struct lio_link_status_resp {\n-\tuint64_t rh;\n-\tstruct octeon_link_info link_info;\n-\tuint64_t status;\n-};\n-\n-struct lio_rss_set {\n-\tstruct param {\n-#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN\n-\t\tuint64_t flags : 16;\n-\t\tuint64_t hashinfo : 32;\n-\t\tuint64_t itablesize : 16;\n-\t\tuint64_t hashkeysize : 16;\n-\t\tuint64_t reserved : 48;\n-#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN\n-\t\tuint64_t itablesize : 16;\n-\t\tuint64_t hashinfo : 32;\n-\t\tuint64_t flags : 16;\n-\t\tuint64_t reserved : 48;\n-\t\tuint64_t hashkeysize : 16;\n-#endif\n-\t} param;\n-\n-\tuint8_t itable[LIO_RSS_MAX_TABLE_SZ];\n-\tuint8_t key[LIO_RSS_MAX_KEY_SZ];\n-};\n-\n-void lio_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t q_no);\n-\n-void lio_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_no);\n-\n-#endif\t/* _LIO_ETHDEV_H_ */\ndiff --git a/drivers/net/liquidio/lio_logs.h b/drivers/net/liquidio/lio_logs.h\ndeleted file mode 100644\nindex f227827081..0000000000\n--- a/drivers/net/liquidio/lio_logs.h\n+++ /dev/null\n@@ -1,58 +0,0 @@\n-/* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright(c) 2017 Cavium, Inc\n- */\n-\n-#ifndef _LIO_LOGS_H_\n-#define _LIO_LOGS_H_\n-\n-extern int lio_logtype_driver;\n-#define lio_dev_printf(lio_dev, level, fmt, args...)\t\t\\\n-\trte_log(RTE_LOG_ ## level, lio_logtype_driver,\t\t\\\n-\t\t\"%s\" fmt, (lio_dev)->dev_string, ##args)\n-\n-#define lio_dev_info(lio_dev, fmt, args...)\t\t\t\t\\\n-\tlio_dev_printf(lio_dev, INFO, \"INFO: \" fmt, ##args)\n-\n-#define lio_dev_err(lio_dev, fmt, args...)\t\t\t\t\\\n-\tlio_dev_printf(lio_dev, ERR, \"ERROR: %s() \" fmt, __func__, ##args)\n-\n-extern int lio_logtype_init;\n-#define PMD_INIT_LOG(level, fmt, args...) \\\n-\trte_log(RTE_LOG_ ## level, lio_logtype_init, \\\n-\t\tfmt, ## args)\n-\n-/* Enable these through config options */\n-#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, \"%s() >>\\n\", __func__)\n-\n-#define lio_dev_dbg(lio_dev, fmt, args...)\t\t\t\t\\\n-\tlio_dev_printf(lio_dev, DEBUG, \"DEBUG: %s() \" fmt, __func__, ##args)\n-\n-#ifdef RTE_LIBRTE_LIO_DEBUG_RX\n-#define PMD_RX_LOG(lio_dev, level, fmt, args...)\t\t\t\\\n-\tlio_dev_printf(lio_dev, level, \"RX: %s() \" fmt, __func__, ##args)\n-#else /* !RTE_LIBRTE_LIO_DEBUG_RX */\n-#define PMD_RX_LOG(lio_dev, level, fmt, args...) do { } while (0)\n-#endif /* RTE_LIBRTE_LIO_DEBUG_RX */\n-\n-#ifdef RTE_LIBRTE_LIO_DEBUG_TX\n-#define PMD_TX_LOG(lio_dev, level, fmt, args...)\t\t\t\\\n-\tlio_dev_printf(lio_dev, level, \"TX: %s() \" fmt, __func__, ##args)\n-#else /* !RTE_LIBRTE_LIO_DEBUG_TX */\n-#define PMD_TX_LOG(lio_dev, level, fmt, args...) do { } while (0)\n-#endif /* RTE_LIBRTE_LIO_DEBUG_TX */\n-\n-#ifdef RTE_LIBRTE_LIO_DEBUG_MBOX\n-#define PMD_MBOX_LOG(lio_dev, level, fmt, args...)\t\t\t\\\n-\tlio_dev_printf(lio_dev, level, \"MBOX: %s() \" fmt, __func__, ##args)\n-#else /* !RTE_LIBRTE_LIO_DEBUG_MBOX */\n-#define PMD_MBOX_LOG(level, fmt, args...) do { } while (0)\n-#endif /* RTE_LIBRTE_LIO_DEBUG_MBOX */\n-\n-#ifdef RTE_LIBRTE_LIO_DEBUG_REGS\n-#define PMD_REGS_LOG(lio_dev, fmt, args...)\t\t\t\t\\\n-\tlio_dev_printf(lio_dev, DEBUG, \"REGS: \" fmt, ##args)\n-#else /* !RTE_LIBRTE_LIO_DEBUG_REGS */\n-#define PMD_REGS_LOG(level, fmt, args...) do { } while (0)\n-#endif /* RTE_LIBRTE_LIO_DEBUG_REGS */\n-\n-#endif  /* _LIO_LOGS_H_ */\ndiff --git a/drivers/net/liquidio/lio_rxtx.c b/drivers/net/liquidio/lio_rxtx.c\ndeleted file mode 100644\nindex e09798ddd7..0000000000\n--- a/drivers/net/liquidio/lio_rxtx.c\n+++ /dev/null\n@@ -1,1804 +0,0 @@\n-/* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright(c) 2017 Cavium, Inc\n- */\n-\n-#include <ethdev_driver.h>\n-#include <rte_cycles.h>\n-#include <rte_malloc.h>\n-\n-#include \"lio_logs.h\"\n-#include \"lio_struct.h\"\n-#include \"lio_ethdev.h\"\n-#include \"lio_rxtx.h\"\n-\n-#define LIO_MAX_SG 12\n-/* Flush iq if available tx_desc fall below LIO_FLUSH_WM */\n-#define LIO_FLUSH_WM(_iq) ((_iq)->nb_desc / 2)\n-#define LIO_PKT_IN_DONE_CNT_MASK 0x00000000FFFFFFFFULL\n-\n-static void\n-lio_droq_compute_max_packet_bufs(struct lio_droq *droq)\n-{\n-\tuint32_t count = 0;\n-\n-\tdo {\n-\t\tcount += droq->buffer_size;\n-\t} while (count < LIO_MAX_RX_PKTLEN);\n-}\n-\n-static void\n-lio_droq_reset_indices(struct lio_droq *droq)\n-{\n-\tdroq->read_idx\t= 0;\n-\tdroq->write_idx\t= 0;\n-\tdroq->refill_idx = 0;\n-\tdroq->refill_count = 0;\n-\trte_atomic64_set(&droq->pkts_pending, 0);\n-}\n-\n-static void\n-lio_droq_destroy_ring_buffers(struct lio_droq *droq)\n-{\n-\tuint32_t i;\n-\n-\tfor (i = 0; i < droq->nb_desc; i++) {\n-\t\tif (droq->recv_buf_list[i].buffer) {\n-\t\t\trte_pktmbuf_free((struct rte_mbuf *)\n-\t\t\t\t\t droq->recv_buf_list[i].buffer);\n-\t\t\tdroq->recv_buf_list[i].buffer = NULL;\n-\t\t}\n-\t}\n-\n-\tlio_droq_reset_indices(droq);\n-}\n-\n-static int\n-lio_droq_setup_ring_buffers(struct lio_device *lio_dev,\n-\t\t\t    struct lio_droq *droq)\n-{\n-\tstruct lio_droq_desc *desc_ring = droq->desc_ring;\n-\tuint32_t i;\n-\tvoid *buf;\n-\n-\tfor (i = 0; i < droq->nb_desc; i++) {\n-\t\tbuf = rte_pktmbuf_alloc(droq->mpool);\n-\t\tif (buf == NULL) {\n-\t\t\tlio_dev_err(lio_dev, \"buffer alloc failed\\n\");\n-\t\t\tdroq->stats.rx_alloc_failure++;\n-\t\t\tlio_droq_destroy_ring_buffers(droq);\n-\t\t\treturn -ENOMEM;\n-\t\t}\n-\n-\t\tdroq->recv_buf_list[i].buffer = buf;\n-\t\tdroq->info_list[i].length = 0;\n-\n-\t\t/* map ring buffers into memory */\n-\t\tdesc_ring[i].info_ptr = lio_map_ring_info(droq, i);\n-\t\tdesc_ring[i].buffer_ptr =\n-\t\t\tlio_map_ring(droq->recv_buf_list[i].buffer);\n-\t}\n-\n-\tlio_droq_reset_indices(droq);\n-\n-\tlio_droq_compute_max_packet_bufs(droq);\n-\n-\treturn 0;\n-}\n-\n-static void\n-lio_dma_zone_free(struct lio_device *lio_dev, const struct rte_memzone *mz)\n-{\n-\tconst struct rte_memzone *mz_tmp;\n-\tint ret = 0;\n-\n-\tif (mz == NULL) {\n-\t\tlio_dev_err(lio_dev, \"Memzone NULL\\n\");\n-\t\treturn;\n-\t}\n-\n-\tmz_tmp = rte_memzone_lookup(mz->name);\n-\tif (mz_tmp == NULL) {\n-\t\tlio_dev_err(lio_dev, \"Memzone %s Not Found\\n\", mz->name);\n-\t\treturn;\n-\t}\n-\n-\tret = rte_memzone_free(mz);\n-\tif (ret)\n-\t\tlio_dev_err(lio_dev, \"Memzone free Failed ret %d\\n\", ret);\n-}\n-\n-/**\n- *  Frees the space for descriptor ring for the droq.\n- *\n- *  @param lio_dev\t- pointer to the lio device structure\n- *  @param q_no\t\t- droq no.\n- */\n-static void\n-lio_delete_droq(struct lio_device *lio_dev, uint32_t q_no)\n-{\n-\tstruct lio_droq *droq = lio_dev->droq[q_no];\n-\n-\tlio_dev_dbg(lio_dev, \"OQ[%d]\\n\", q_no);\n-\n-\tlio_droq_destroy_ring_buffers(droq);\n-\trte_free(droq->recv_buf_list);\n-\tdroq->recv_buf_list = NULL;\n-\tlio_dma_zone_free(lio_dev, droq->info_mz);\n-\tlio_dma_zone_free(lio_dev, droq->desc_ring_mz);\n-\n-\tmemset(droq, 0, LIO_DROQ_SIZE);\n-}\n-\n-static void *\n-lio_alloc_info_buffer(struct lio_device *lio_dev,\n-\t\t      struct lio_droq *droq, unsigned int socket_id)\n-{\n-\tdroq->info_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,\n-\t\t\t\t\t\t \"info_list\", droq->q_no,\n-\t\t\t\t\t\t (droq->nb_desc *\n-\t\t\t\t\t\t\tLIO_DROQ_INFO_SIZE),\n-\t\t\t\t\t\t RTE_CACHE_LINE_SIZE,\n-\t\t\t\t\t\t socket_id);\n-\n-\tif (droq->info_mz == NULL)\n-\t\treturn NULL;\n-\n-\tdroq->info_list_dma = droq->info_mz->iova;\n-\tdroq->info_alloc_size = droq->info_mz->len;\n-\tdroq->info_base_addr = (size_t)droq->info_mz->addr;\n-\n-\treturn droq->info_mz->addr;\n-}\n-\n-/**\n- *  Allocates space for the descriptor ring for the droq and\n- *  sets the base addr, num desc etc in Octeon registers.\n- *\n- * @param lio_dev\t- pointer to the lio device structure\n- * @param q_no\t\t- droq no.\n- * @param app_ctx\t- pointer to application context\n- * @return Success: 0\tFailure: -1\n- */\n-static int\n-lio_init_droq(struct lio_device *lio_dev, uint32_t q_no,\n-\t      uint32_t num_descs, uint32_t desc_size,\n-\t      struct rte_mempool *mpool, unsigned int socket_id)\n-{\n-\tuint32_t c_refill_threshold;\n-\tuint32_t desc_ring_size;\n-\tstruct lio_droq *droq;\n-\n-\tlio_dev_dbg(lio_dev, \"OQ[%d]\\n\", q_no);\n-\n-\tdroq = lio_dev->droq[q_no];\n-\tdroq->lio_dev = lio_dev;\n-\tdroq->q_no = q_no;\n-\tdroq->mpool = mpool;\n-\n-\tc_refill_threshold = LIO_OQ_REFILL_THRESHOLD_CFG(lio_dev);\n-\n-\tdroq->nb_desc = num_descs;\n-\tdroq->buffer_size = desc_size;\n-\n-\tdesc_ring_size = droq->nb_desc * LIO_DROQ_DESC_SIZE;\n-\tdroq->desc_ring_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,\n-\t\t\t\t\t\t      \"droq\", q_no,\n-\t\t\t\t\t\t      desc_ring_size,\n-\t\t\t\t\t\t      RTE_CACHE_LINE_SIZE,\n-\t\t\t\t\t\t      socket_id);\n-\n-\tif (droq->desc_ring_mz == NULL) {\n-\t\tlio_dev_err(lio_dev,\n-\t\t\t    \"Output queue %d ring alloc failed\\n\", q_no);\n-\t\treturn -1;\n-\t}\n-\n-\tdroq->desc_ring_dma = droq->desc_ring_mz->iova;\n-\tdroq->desc_ring = (struct lio_droq_desc *)droq->desc_ring_mz->addr;\n-\n-\tlio_dev_dbg(lio_dev, \"droq[%d]: desc_ring: virt: 0x%p, dma: %lx\\n\",\n-\t\t    q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma);\n-\tlio_dev_dbg(lio_dev, \"droq[%d]: num_desc: %d\\n\", q_no,\n-\t\t    droq->nb_desc);\n-\n-\tdroq->info_list = lio_alloc_info_buffer(lio_dev, droq, socket_id);\n-\tif (droq->info_list == NULL) {\n-\t\tlio_dev_err(lio_dev, \"Cannot allocate memory for info list.\\n\");\n-\t\tgoto init_droq_fail;\n-\t}\n-\n-\tdroq->recv_buf_list = rte_zmalloc_socket(\"recv_buf_list\",\n-\t\t\t\t\t\t (droq->nb_desc *\n-\t\t\t\t\t\t\tLIO_DROQ_RECVBUF_SIZE),\n-\t\t\t\t\t\t RTE_CACHE_LINE_SIZE,\n-\t\t\t\t\t\t socket_id);\n-\tif (droq->recv_buf_list == NULL) {\n-\t\tlio_dev_err(lio_dev,\n-\t\t\t    \"Output queue recv buf list alloc failed\\n\");\n-\t\tgoto init_droq_fail;\n-\t}\n-\n-\tif (lio_droq_setup_ring_buffers(lio_dev, droq))\n-\t\tgoto init_droq_fail;\n-\n-\tdroq->refill_threshold = c_refill_threshold;\n-\n-\trte_spinlock_init(&droq->lock);\n-\n-\tlio_dev->fn_list.setup_oq_regs(lio_dev, q_no);\n-\n-\tlio_dev->io_qmask.oq |= (1ULL << q_no);\n-\n-\treturn 0;\n-\n-init_droq_fail:\n-\tlio_delete_droq(lio_dev, q_no);\n-\n-\treturn -1;\n-}\n-\n-int\n-lio_setup_droq(struct lio_device *lio_dev, int oq_no, int num_descs,\n-\t       int desc_size, struct rte_mempool *mpool, unsigned int socket_id)\n-{\n-\tstruct lio_droq *droq;\n-\n-\tPMD_INIT_FUNC_TRACE();\n-\n-\t/* Allocate the DS for the new droq. */\n-\tdroq = rte_zmalloc_socket(\"ethdev RX queue\", sizeof(*droq),\n-\t\t\t\t  RTE_CACHE_LINE_SIZE, socket_id);\n-\tif (droq == NULL)\n-\t\treturn -ENOMEM;\n-\n-\tlio_dev->droq[oq_no] = droq;\n-\n-\t/* Initialize the Droq */\n-\tif (lio_init_droq(lio_dev, oq_no, num_descs, desc_size, mpool,\n-\t\t\t  socket_id)) {\n-\t\tlio_dev_err(lio_dev, \"Droq[%u] Initialization Failed\\n\", oq_no);\n-\t\trte_free(lio_dev->droq[oq_no]);\n-\t\tlio_dev->droq[oq_no] = NULL;\n-\t\treturn -ENOMEM;\n-\t}\n-\n-\tlio_dev->num_oqs++;\n-\n-\tlio_dev_dbg(lio_dev, \"Total number of OQ: %d\\n\", lio_dev->num_oqs);\n-\n-\t/* Send credit for octeon output queues. credits are always\n-\t * sent after the output queue is enabled.\n-\t */\n-\trte_write32(lio_dev->droq[oq_no]->nb_desc,\n-\t\t    lio_dev->droq[oq_no]->pkts_credit_reg);\n-\trte_wmb();\n-\n-\treturn 0;\n-}\n-\n-static inline uint32_t\n-lio_droq_get_bufcount(uint32_t buf_size, uint32_t total_len)\n-{\n-\tuint32_t buf_cnt = 0;\n-\n-\twhile (total_len > (buf_size * buf_cnt))\n-\t\tbuf_cnt++;\n-\n-\treturn buf_cnt;\n-}\n-\n-/* If we were not able to refill all buffers, try to move around\n- * the buffers that were not dispatched.\n- */\n-static inline uint32_t\n-lio_droq_refill_pullup_descs(struct lio_droq *droq,\n-\t\t\t     struct lio_droq_desc *desc_ring)\n-{\n-\tuint32_t refill_index = droq->refill_idx;\n-\tuint32_t desc_refilled = 0;\n-\n-\twhile (refill_index != droq->read_idx) {\n-\t\tif (droq->recv_buf_list[refill_index].buffer) {\n-\t\t\tdroq->recv_buf_list[droq->refill_idx].buffer =\n-\t\t\t\tdroq->recv_buf_list[refill_index].buffer;\n-\t\t\tdesc_ring[droq->refill_idx].buffer_ptr =\n-\t\t\t\tdesc_ring[refill_index].buffer_ptr;\n-\t\t\tdroq->recv_buf_list[refill_index].buffer = NULL;\n-\t\t\tdesc_ring[refill_index].buffer_ptr = 0;\n-\t\t\tdo {\n-\t\t\t\tdroq->refill_idx = lio_incr_index(\n-\t\t\t\t\t\t\tdroq->refill_idx, 1,\n-\t\t\t\t\t\t\tdroq->nb_desc);\n-\t\t\t\tdesc_refilled++;\n-\t\t\t\tdroq->refill_count--;\n-\t\t\t} while (droq->recv_buf_list[droq->refill_idx].buffer);\n-\t\t}\n-\t\trefill_index = lio_incr_index(refill_index, 1,\n-\t\t\t\t\t      droq->nb_desc);\n-\t}\t/* while */\n-\n-\treturn desc_refilled;\n-}\n-\n-/* lio_droq_refill\n- *\n- * @param droq\t\t- droq in which descriptors require new buffers.\n- *\n- * Description:\n- *  Called during normal DROQ processing in interrupt mode or by the poll\n- *  thread to refill the descriptors from which buffers were dispatched\n- *  to upper layers. Attempts to allocate new buffers. If that fails, moves\n- *  up buffers (that were not dispatched) to form a contiguous ring.\n- *\n- * Returns:\n- *  No of descriptors refilled.\n- *\n- * Locks:\n- * This routine is called with droq->lock held.\n- */\n-static uint32_t\n-lio_droq_refill(struct lio_droq *droq)\n-{\n-\tstruct lio_droq_desc *desc_ring;\n-\tuint32_t desc_refilled = 0;\n-\tvoid *buf = NULL;\n-\n-\tdesc_ring = droq->desc_ring;\n-\n-\twhile (droq->refill_count && (desc_refilled < droq->nb_desc)) {\n-\t\t/* If a valid buffer exists (happens if there is no dispatch),\n-\t\t * reuse the buffer, else allocate.\n-\t\t */\n-\t\tif (droq->recv_buf_list[droq->refill_idx].buffer == NULL) {\n-\t\t\tbuf = rte_pktmbuf_alloc(droq->mpool);\n-\t\t\t/* If a buffer could not be allocated, no point in\n-\t\t\t * continuing\n-\t\t\t */\n-\t\t\tif (buf == NULL) {\n-\t\t\t\tdroq->stats.rx_alloc_failure++;\n-\t\t\t\tbreak;\n-\t\t\t}\n-\n-\t\t\tdroq->recv_buf_list[droq->refill_idx].buffer = buf;\n-\t\t}\n-\n-\t\tdesc_ring[droq->refill_idx].buffer_ptr =\n-\t\t    lio_map_ring(droq->recv_buf_list[droq->refill_idx].buffer);\n-\t\t/* Reset any previous values in the length field. */\n-\t\tdroq->info_list[droq->refill_idx].length = 0;\n-\n-\t\tdroq->refill_idx = lio_incr_index(droq->refill_idx, 1,\n-\t\t\t\t\t\t  droq->nb_desc);\n-\t\tdesc_refilled++;\n-\t\tdroq->refill_count--;\n-\t}\n-\n-\tif (droq->refill_count)\n-\t\tdesc_refilled += lio_droq_refill_pullup_descs(droq, desc_ring);\n-\n-\t/* if droq->refill_count\n-\t * The refill count would not change in pass two. We only moved buffers\n-\t * to close the gap in the ring, but we would still have the same no. of\n-\t * buffers to refill.\n-\t */\n-\treturn desc_refilled;\n-}\n-\n-static int\n-lio_droq_fast_process_packet(struct lio_device *lio_dev,\n-\t\t\t     struct lio_droq *droq,\n-\t\t\t     struct rte_mbuf **rx_pkts)\n-{\n-\tstruct rte_mbuf *nicbuf = NULL;\n-\tstruct lio_droq_info *info;\n-\tuint32_t total_len = 0;\n-\tint data_total_len = 0;\n-\tuint32_t pkt_len = 0;\n-\tunion octeon_rh *rh;\n-\tint data_pkts = 0;\n-\n-\tinfo = &droq->info_list[droq->read_idx];\n-\tlio_swap_8B_data((uint64_t *)info, 2);\n-\n-\tif (!info->length)\n-\t\treturn -1;\n-\n-\t/* Len of resp hdr in included in the received data len. */\n-\tinfo->length -= OCTEON_RH_SIZE;\n-\trh = &info->rh;\n-\n-\ttotal_len += (uint32_t)info->length;\n-\n-\tif (lio_opcode_slow_path(rh)) {\n-\t\tuint32_t buf_cnt;\n-\n-\t\tbuf_cnt = lio_droq_get_bufcount(droq->buffer_size,\n-\t\t\t\t\t\t(uint32_t)info->length);\n-\t\tdroq->read_idx = lio_incr_index(droq->read_idx, buf_cnt,\n-\t\t\t\t\t\tdroq->nb_desc);\n-\t\tdroq->refill_count += buf_cnt;\n-\t} else {\n-\t\tif (info->length <= droq->buffer_size) {\n-\t\t\tif (rh->r_dh.has_hash)\n-\t\t\t\tpkt_len = (uint32_t)(info->length - 8);\n-\t\t\telse\n-\t\t\t\tpkt_len = (uint32_t)info->length;\n-\n-\t\t\tnicbuf = droq->recv_buf_list[droq->read_idx].buffer;\n-\t\t\tdroq->recv_buf_list[droq->read_idx].buffer = NULL;\n-\t\t\tdroq->read_idx = lio_incr_index(\n-\t\t\t\t\t\tdroq->read_idx, 1,\n-\t\t\t\t\t\tdroq->nb_desc);\n-\t\t\tdroq->refill_count++;\n-\n-\t\t\tif (likely(nicbuf != NULL)) {\n-\t\t\t\t/* We don't have a way to pass flags yet */\n-\t\t\t\tnicbuf->ol_flags = 0;\n-\t\t\t\tif (rh->r_dh.has_hash) {\n-\t\t\t\t\tuint64_t *hash_ptr;\n-\n-\t\t\t\t\tnicbuf->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;\n-\t\t\t\t\thash_ptr = rte_pktmbuf_mtod(nicbuf,\n-\t\t\t\t\t\t\t\t    uint64_t *);\n-\t\t\t\t\tlio_swap_8B_data(hash_ptr, 1);\n-\t\t\t\t\tnicbuf->hash.rss = (uint32_t)*hash_ptr;\n-\t\t\t\t\tnicbuf->data_off += 8;\n-\t\t\t\t}\n-\n-\t\t\t\tnicbuf->pkt_len = pkt_len;\n-\t\t\t\tnicbuf->data_len = pkt_len;\n-\t\t\t\tnicbuf->port = lio_dev->port_id;\n-\t\t\t\t/* Store the mbuf */\n-\t\t\t\trx_pkts[data_pkts++] = nicbuf;\n-\t\t\t\tdata_total_len += pkt_len;\n-\t\t\t}\n-\n-\t\t\t/* Prefetch buffer pointers when on a cache line\n-\t\t\t * boundary\n-\t\t\t */\n-\t\t\tif ((droq->read_idx & 3) == 0) {\n-\t\t\t\trte_prefetch0(\n-\t\t\t\t    &droq->recv_buf_list[droq->read_idx]);\n-\t\t\t\trte_prefetch0(\n-\t\t\t\t    &droq->info_list[droq->read_idx]);\n-\t\t\t}\n-\t\t} else {\n-\t\t\tstruct rte_mbuf *first_buf = NULL;\n-\t\t\tstruct rte_mbuf *last_buf = NULL;\n-\n-\t\t\twhile (pkt_len < info->length) {\n-\t\t\t\tint cpy_len = 0;\n-\n-\t\t\t\tcpy_len = ((pkt_len + droq->buffer_size) >\n-\t\t\t\t\t\tinfo->length)\n-\t\t\t\t\t\t? ((uint32_t)info->length -\n-\t\t\t\t\t\t\tpkt_len)\n-\t\t\t\t\t\t: droq->buffer_size;\n-\n-\t\t\t\tnicbuf =\n-\t\t\t\t    droq->recv_buf_list[droq->read_idx].buffer;\n-\t\t\t\tdroq->recv_buf_list[droq->read_idx].buffer =\n-\t\t\t\t    NULL;\n-\n-\t\t\t\tif (likely(nicbuf != NULL)) {\n-\t\t\t\t\t/* Note the first seg */\n-\t\t\t\t\tif (!pkt_len)\n-\t\t\t\t\t\tfirst_buf = nicbuf;\n-\n-\t\t\t\t\tnicbuf->port = lio_dev->port_id;\n-\t\t\t\t\t/* We don't have a way to pass\n-\t\t\t\t\t * flags yet\n-\t\t\t\t\t */\n-\t\t\t\t\tnicbuf->ol_flags = 0;\n-\t\t\t\t\tif ((!pkt_len) && (rh->r_dh.has_hash)) {\n-\t\t\t\t\t\tuint64_t *hash_ptr;\n-\n-\t\t\t\t\t\tnicbuf->ol_flags |=\n-\t\t\t\t\t\t    RTE_MBUF_F_RX_RSS_HASH;\n-\t\t\t\t\t\thash_ptr = rte_pktmbuf_mtod(\n-\t\t\t\t\t\t    nicbuf, uint64_t *);\n-\t\t\t\t\t\tlio_swap_8B_data(hash_ptr, 1);\n-\t\t\t\t\t\tnicbuf->hash.rss =\n-\t\t\t\t\t\t    (uint32_t)*hash_ptr;\n-\t\t\t\t\t\tnicbuf->data_off += 8;\n-\t\t\t\t\t\tnicbuf->pkt_len = cpy_len - 8;\n-\t\t\t\t\t\tnicbuf->data_len = cpy_len - 8;\n-\t\t\t\t\t} else {\n-\t\t\t\t\t\tnicbuf->pkt_len = cpy_len;\n-\t\t\t\t\t\tnicbuf->data_len = cpy_len;\n-\t\t\t\t\t}\n-\n-\t\t\t\t\tif (pkt_len)\n-\t\t\t\t\t\tfirst_buf->nb_segs++;\n-\n-\t\t\t\t\tif (last_buf)\n-\t\t\t\t\t\tlast_buf->next = nicbuf;\n-\n-\t\t\t\t\tlast_buf = nicbuf;\n-\t\t\t\t} else {\n-\t\t\t\t\tPMD_RX_LOG(lio_dev, ERR, \"no buf\\n\");\n-\t\t\t\t}\n-\n-\t\t\t\tpkt_len += cpy_len;\n-\t\t\t\tdroq->read_idx = lio_incr_index(\n-\t\t\t\t\t\t\tdroq->read_idx,\n-\t\t\t\t\t\t\t1, droq->nb_desc);\n-\t\t\t\tdroq->refill_count++;\n-\n-\t\t\t\t/* Prefetch buffer pointers when on a\n-\t\t\t\t * cache line boundary\n-\t\t\t\t */\n-\t\t\t\tif ((droq->read_idx & 3) == 0) {\n-\t\t\t\t\trte_prefetch0(&droq->recv_buf_list\n-\t\t\t\t\t\t\t      [droq->read_idx]);\n-\n-\t\t\t\t\trte_prefetch0(\n-\t\t\t\t\t    &droq->info_list[droq->read_idx]);\n-\t\t\t\t}\n-\t\t\t}\n-\t\t\trx_pkts[data_pkts++] = first_buf;\n-\t\t\tif (rh->r_dh.has_hash)\n-\t\t\t\tdata_total_len += (pkt_len - 8);\n-\t\t\telse\n-\t\t\t\tdata_total_len += pkt_len;\n-\t\t}\n-\n-\t\t/* Inform upper layer about packet checksum verification */\n-\t\tstruct rte_mbuf *m = rx_pkts[data_pkts - 1];\n-\n-\t\tif (rh->r_dh.csum_verified & LIO_IP_CSUM_VERIFIED)\n-\t\t\tm->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;\n-\n-\t\tif (rh->r_dh.csum_verified & LIO_L4_CSUM_VERIFIED)\n-\t\t\tm->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;\n-\t}\n-\n-\tif (droq->refill_count >= droq->refill_threshold) {\n-\t\tint desc_refilled = lio_droq_refill(droq);\n-\n-\t\t/* Flush the droq descriptor data to memory to be sure\n-\t\t * that when we update the credits the data in memory is\n-\t\t * accurate.\n-\t\t */\n-\t\trte_wmb();\n-\t\trte_write32(desc_refilled, droq->pkts_credit_reg);\n-\t\t/* make sure mmio write completes */\n-\t\trte_wmb();\n-\t}\n-\n-\tinfo->length = 0;\n-\tinfo->rh.rh64 = 0;\n-\n-\tdroq->stats.pkts_received++;\n-\tdroq->stats.rx_pkts_received += data_pkts;\n-\tdroq->stats.rx_bytes_received += data_total_len;\n-\tdroq->stats.bytes_received += total_len;\n-\n-\treturn data_pkts;\n-}\n-\n-static uint32_t\n-lio_droq_fast_process_packets(struct lio_device *lio_dev,\n-\t\t\t      struct lio_droq *droq,\n-\t\t\t      struct rte_mbuf **rx_pkts,\n-\t\t\t      uint32_t pkts_to_process)\n-{\n-\tint ret, data_pkts = 0;\n-\tuint32_t pkt;\n-\n-\tfor (pkt = 0; pkt < pkts_to_process; pkt++) {\n-\t\tret = lio_droq_fast_process_packet(lio_dev, droq,\n-\t\t\t\t\t\t   &rx_pkts[data_pkts]);\n-\t\tif (ret < 0) {\n-\t\t\tlio_dev_err(lio_dev, \"Port[%d] DROQ[%d] idx: %d len:0, pkt_cnt: %d\\n\",\n-\t\t\t\t    lio_dev->port_id, droq->q_no,\n-\t\t\t\t    droq->read_idx, pkts_to_process);\n-\t\t\tbreak;\n-\t\t}\n-\t\tdata_pkts += ret;\n-\t}\n-\n-\trte_atomic64_sub(&droq->pkts_pending, pkt);\n-\n-\treturn data_pkts;\n-}\n-\n-static inline uint32_t\n-lio_droq_check_hw_for_pkts(struct lio_droq *droq)\n-{\n-\tuint32_t last_count;\n-\tuint32_t pkt_count;\n-\n-\tpkt_count = rte_read32(droq->pkts_sent_reg);\n-\n-\tlast_count = pkt_count - droq->pkt_count;\n-\tdroq->pkt_count = pkt_count;\n-\n-\tif (last_count)\n-\t\trte_atomic64_add(&droq->pkts_pending, last_count);\n-\n-\treturn last_count;\n-}\n-\n-uint16_t\n-lio_dev_recv_pkts(void *rx_queue,\n-\t\t  struct rte_mbuf **rx_pkts,\n-\t\t  uint16_t budget)\n-{\n-\tstruct lio_droq *droq = rx_queue;\n-\tstruct lio_device *lio_dev = droq->lio_dev;\n-\tuint32_t pkts_processed = 0;\n-\tuint32_t pkt_count = 0;\n-\n-\tlio_droq_check_hw_for_pkts(droq);\n-\n-\tpkt_count = rte_atomic64_read(&droq->pkts_pending);\n-\tif (!pkt_count)\n-\t\treturn 0;\n-\n-\tif (pkt_count > budget)\n-\t\tpkt_count = budget;\n-\n-\t/* Grab the lock */\n-\trte_spinlock_lock(&droq->lock);\n-\tpkts_processed = lio_droq_fast_process_packets(lio_dev,\n-\t\t\t\t\t\t       droq, rx_pkts,\n-\t\t\t\t\t\t       pkt_count);\n-\n-\tif (droq->pkt_count) {\n-\t\trte_write32(droq->pkt_count, droq->pkts_sent_reg);\n-\t\tdroq->pkt_count = 0;\n-\t}\n-\n-\t/* Release the spin lock */\n-\trte_spinlock_unlock(&droq->lock);\n-\n-\treturn pkts_processed;\n-}\n-\n-void\n-lio_delete_droq_queue(struct lio_device *lio_dev,\n-\t\t      int oq_no)\n-{\n-\tlio_delete_droq(lio_dev, oq_no);\n-\tlio_dev->num_oqs--;\n-\trte_free(lio_dev->droq[oq_no]);\n-\tlio_dev->droq[oq_no] = NULL;\n-}\n-\n-/**\n- *  lio_init_instr_queue()\n- *  @param lio_dev\t- pointer to the lio device structure.\n- *  @param txpciq\t- queue to be initialized.\n- *\n- *  Called at driver init time for each input queue. iq_conf has the\n- *  configuration parameters for the queue.\n- *\n- *  @return  Success: 0\tFailure: -1\n- */\n-static int\n-lio_init_instr_queue(struct lio_device *lio_dev,\n-\t\t     union octeon_txpciq txpciq,\n-\t\t     uint32_t num_descs, unsigned int socket_id)\n-{\n-\tuint32_t iq_no = (uint32_t)txpciq.s.q_no;\n-\tstruct lio_instr_queue *iq;\n-\tuint32_t instr_type;\n-\tuint32_t q_size;\n-\n-\tinstr_type = LIO_IQ_INSTR_TYPE(lio_dev);\n-\n-\tq_size = instr_type * num_descs;\n-\tiq = lio_dev->instr_queue[iq_no];\n-\tiq->iq_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,\n-\t\t\t\t\t     \"instr_queue\", iq_no, q_size,\n-\t\t\t\t\t     RTE_CACHE_LINE_SIZE,\n-\t\t\t\t\t     socket_id);\n-\tif (iq->iq_mz == NULL) {\n-\t\tlio_dev_err(lio_dev, \"Cannot allocate memory for instr queue %d\\n\",\n-\t\t\t    iq_no);\n-\t\treturn -1;\n-\t}\n-\n-\tiq->base_addr_dma = iq->iq_mz->iova;\n-\tiq->base_addr = (uint8_t *)iq->iq_mz->addr;\n-\n-\tiq->nb_desc = num_descs;\n-\n-\t/* Initialize a list to holds requests that have been posted to Octeon\n-\t * but has yet to be fetched by octeon\n-\t */\n-\tiq->request_list = rte_zmalloc_socket(\"request_list\",\n-\t\t\t\t\t      sizeof(*iq->request_list) *\n-\t\t\t\t\t\t\tnum_descs,\n-\t\t\t\t\t      RTE_CACHE_LINE_SIZE,\n-\t\t\t\t\t      socket_id);\n-\tif (iq->request_list == NULL) {\n-\t\tlio_dev_err(lio_dev, \"Alloc failed for IQ[%d] nr free list\\n\",\n-\t\t\t    iq_no);\n-\t\tlio_dma_zone_free(lio_dev, iq->iq_mz);\n-\t\treturn -1;\n-\t}\n-\n-\tlio_dev_dbg(lio_dev, \"IQ[%d]: base: %p basedma: %lx count: %d\\n\",\n-\t\t    iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma,\n-\t\t    iq->nb_desc);\n-\n-\tiq->lio_dev = lio_dev;\n-\tiq->txpciq.txpciq64 = txpciq.txpciq64;\n-\tiq->fill_cnt = 0;\n-\tiq->host_write_index = 0;\n-\tiq->lio_read_index = 0;\n-\tiq->flush_index = 0;\n-\n-\trte_atomic64_set(&iq->instr_pending, 0);\n-\n-\t/* Initialize the spinlock for this instruction queue */\n-\trte_spinlock_init(&iq->lock);\n-\trte_spinlock_init(&iq->post_lock);\n-\n-\trte_atomic64_clear(&iq->iq_flush_running);\n-\n-\tlio_dev->io_qmask.iq |= (1ULL << iq_no);\n-\n-\t/* Set the 32B/64B mode for each input queue */\n-\tlio_dev->io_qmask.iq64B |= ((instr_type == 64) << iq_no);\n-\tiq->iqcmd_64B = (instr_type == 64);\n-\n-\tlio_dev->fn_list.setup_iq_regs(lio_dev, iq_no);\n-\n-\treturn 0;\n-}\n-\n-int\n-lio_setup_instr_queue0(struct lio_device *lio_dev)\n-{\n-\tunion octeon_txpciq txpciq;\n-\tuint32_t num_descs = 0;\n-\tuint32_t iq_no = 0;\n-\n-\tnum_descs = LIO_NUM_DEF_TX_DESCS_CFG(lio_dev);\n-\n-\tlio_dev->num_iqs = 0;\n-\n-\tlio_dev->instr_queue[0] = rte_zmalloc(NULL,\n-\t\t\t\t\tsizeof(struct lio_instr_queue), 0);\n-\tif (lio_dev->instr_queue[0] == NULL)\n-\t\treturn -ENOMEM;\n-\n-\tlio_dev->instr_queue[0]->q_index = 0;\n-\tlio_dev->instr_queue[0]->app_ctx = (void *)(size_t)0;\n-\ttxpciq.txpciq64 = 0;\n-\ttxpciq.s.q_no = iq_no;\n-\ttxpciq.s.pkind = lio_dev->pfvf_hsword.pkind;\n-\ttxpciq.s.use_qpg = 0;\n-\ttxpciq.s.qpg = 0;\n-\tif (lio_init_instr_queue(lio_dev, txpciq, num_descs, SOCKET_ID_ANY)) {\n-\t\trte_free(lio_dev->instr_queue[0]);\n-\t\tlio_dev->instr_queue[0] = NULL;\n-\t\treturn -1;\n-\t}\n-\n-\tlio_dev->num_iqs++;\n-\n-\treturn 0;\n-}\n-\n-/**\n- *  lio_delete_instr_queue()\n- *  @param lio_dev\t- pointer to the lio device structure.\n- *  @param iq_no\t- queue to be deleted.\n- *\n- *  Called at driver unload time for each input queue. Deletes all\n- *  allocated resources for the input queue.\n- */\n-static void\n-lio_delete_instr_queue(struct lio_device *lio_dev, uint32_t iq_no)\n-{\n-\tstruct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];\n-\n-\trte_free(iq->request_list);\n-\tiq->request_list = NULL;\n-\tlio_dma_zone_free(lio_dev, iq->iq_mz);\n-}\n-\n-void\n-lio_free_instr_queue0(struct lio_device *lio_dev)\n-{\n-\tlio_delete_instr_queue(lio_dev, 0);\n-\trte_free(lio_dev->instr_queue[0]);\n-\tlio_dev->instr_queue[0] = NULL;\n-\tlio_dev->num_iqs--;\n-}\n-\n-/* Return 0 on success, -1 on failure */\n-int\n-lio_setup_iq(struct lio_device *lio_dev, int q_index,\n-\t     union octeon_txpciq txpciq, uint32_t num_descs, void *app_ctx,\n-\t     unsigned int socket_id)\n-{\n-\tuint32_t iq_no = (uint32_t)txpciq.s.q_no;\n-\n-\tlio_dev->instr_queue[iq_no] = rte_zmalloc_socket(\"ethdev TX queue\",\n-\t\t\t\t\t\tsizeof(struct lio_instr_queue),\n-\t\t\t\t\t\tRTE_CACHE_LINE_SIZE, socket_id);\n-\tif (lio_dev->instr_queue[iq_no] == NULL)\n-\t\treturn -1;\n-\n-\tlio_dev->instr_queue[iq_no]->q_index = q_index;\n-\tlio_dev->instr_queue[iq_no]->app_ctx = app_ctx;\n-\n-\tif (lio_init_instr_queue(lio_dev, txpciq, num_descs, socket_id)) {\n-\t\trte_free(lio_dev->instr_queue[iq_no]);\n-\t\tlio_dev->instr_queue[iq_no] = NULL;\n-\t\treturn -1;\n-\t}\n-\n-\tlio_dev->num_iqs++;\n-\n-\treturn 0;\n-}\n-\n-int\n-lio_wait_for_instr_fetch(struct lio_device *lio_dev)\n-{\n-\tint pending, instr_cnt;\n-\tint i, retry = 1000;\n-\n-\tdo {\n-\t\tinstr_cnt = 0;\n-\n-\t\tfor (i = 0; i < LIO_MAX_INSTR_QUEUES(lio_dev); i++) {\n-\t\t\tif (!(lio_dev->io_qmask.iq & (1ULL << i)))\n-\t\t\t\tcontinue;\n-\n-\t\t\tif (lio_dev->instr_queue[i] == NULL)\n-\t\t\t\tbreak;\n-\n-\t\t\tpending = rte_atomic64_read(\n-\t\t\t    &lio_dev->instr_queue[i]->instr_pending);\n-\t\t\tif (pending)\n-\t\t\t\tlio_flush_iq(lio_dev, lio_dev->instr_queue[i]);\n-\n-\t\t\tinstr_cnt += pending;\n-\t\t}\n-\n-\t\tif (instr_cnt == 0)\n-\t\t\tbreak;\n-\n-\t\trte_delay_ms(1);\n-\n-\t} while (retry-- && instr_cnt);\n-\n-\treturn instr_cnt;\n-}\n-\n-static inline void\n-lio_ring_doorbell(struct lio_device *lio_dev,\n-\t\t  struct lio_instr_queue *iq)\n-{\n-\tif (rte_atomic64_read(&lio_dev->status) == LIO_DEV_RUNNING) {\n-\t\trte_write32(iq->fill_cnt, iq->doorbell_reg);\n-\t\t/* make sure doorbell write goes through */\n-\t\trte_wmb();\n-\t\tiq->fill_cnt = 0;\n-\t}\n-}\n-\n-static inline void\n-copy_cmd_into_iq(struct lio_instr_queue *iq, uint8_t *cmd)\n-{\n-\tuint8_t *iqptr, cmdsize;\n-\n-\tcmdsize = ((iq->iqcmd_64B) ? 64 : 32);\n-\tiqptr = iq->base_addr + (cmdsize * iq->host_write_index);\n-\n-\trte_memcpy(iqptr, cmd, cmdsize);\n-}\n-\n-static inline struct lio_iq_post_status\n-post_command2(struct lio_instr_queue *iq, uint8_t *cmd)\n-{\n-\tstruct lio_iq_post_status st;\n-\n-\tst.status = LIO_IQ_SEND_OK;\n-\n-\t/* This ensures that the read index does not wrap around to the same\n-\t * position if queue gets full before Octeon could fetch any instr.\n-\t */\n-\tif (rte_atomic64_read(&iq->instr_pending) >=\n-\t\t\t(int32_t)(iq->nb_desc - 1)) {\n-\t\tst.status = LIO_IQ_SEND_FAILED;\n-\t\tst.index = -1;\n-\t\treturn st;\n-\t}\n-\n-\tif (rte_atomic64_read(&iq->instr_pending) >=\n-\t\t\t(int32_t)(iq->nb_desc - 2))\n-\t\tst.status = LIO_IQ_SEND_STOP;\n-\n-\tcopy_cmd_into_iq(iq, cmd);\n-\n-\t/* \"index\" is returned, host_write_index is modified. */\n-\tst.index = iq->host_write_index;\n-\tiq->host_write_index = lio_incr_index(iq->host_write_index, 1,\n-\t\t\t\t\t      iq->nb_desc);\n-\tiq->fill_cnt++;\n-\n-\t/* Flush the command into memory. We need to be sure the data is in\n-\t * memory before indicating that the instruction is pending.\n-\t */\n-\trte_wmb();\n-\n-\trte_atomic64_inc(&iq->instr_pending);\n-\n-\treturn st;\n-}\n-\n-static inline void\n-lio_add_to_request_list(struct lio_instr_queue *iq,\n-\t\t\tint idx, void *buf, int reqtype)\n-{\n-\tiq->request_list[idx].buf = buf;\n-\tiq->request_list[idx].reqtype = reqtype;\n-}\n-\n-static inline void\n-lio_free_netsgbuf(void *buf)\n-{\n-\tstruct lio_buf_free_info *finfo = buf;\n-\tstruct lio_device *lio_dev = finfo->lio_dev;\n-\tstruct rte_mbuf *m = finfo->mbuf;\n-\tstruct lio_gather *g = finfo->g;\n-\tuint8_t iq = finfo->iq_no;\n-\n-\t/* This will take care of multiple segments also */\n-\trte_pktmbuf_free(m);\n-\n-\trte_spinlock_lock(&lio_dev->glist_lock[iq]);\n-\tSTAILQ_INSERT_TAIL(&lio_dev->glist_head[iq], &g->list, entries);\n-\trte_spinlock_unlock(&lio_dev->glist_lock[iq]);\n-\trte_free(finfo);\n-}\n-\n-/* Can only run in process context */\n-static int\n-lio_process_iq_request_list(struct lio_device *lio_dev,\n-\t\t\t    struct lio_instr_queue *iq)\n-{\n-\tstruct octeon_instr_irh *irh = NULL;\n-\tuint32_t old = iq->flush_index;\n-\tstruct lio_soft_command *sc;\n-\tuint32_t inst_count = 0;\n-\tint reqtype;\n-\tvoid *buf;\n-\n-\twhile (old != iq->lio_read_index) {\n-\t\treqtype = iq->request_list[old].reqtype;\n-\t\tbuf     = iq->request_list[old].buf;\n-\n-\t\tif (reqtype == LIO_REQTYPE_NONE)\n-\t\t\tgoto skip_this;\n-\n-\t\tswitch (reqtype) {\n-\t\tcase LIO_REQTYPE_NORESP_NET:\n-\t\t\trte_pktmbuf_free((struct rte_mbuf *)buf);\n-\t\t\tbreak;\n-\t\tcase LIO_REQTYPE_NORESP_NET_SG:\n-\t\t\tlio_free_netsgbuf(buf);\n-\t\t\tbreak;\n-\t\tcase LIO_REQTYPE_SOFT_COMMAND:\n-\t\t\tsc = buf;\n-\t\t\tirh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;\n-\t\t\tif (irh->rflag) {\n-\t\t\t\t/* We're expecting a response from Octeon.\n-\t\t\t\t * It's up to lio_process_ordered_list() to\n-\t\t\t\t * process sc. Add sc to the ordered soft\n-\t\t\t\t * command response list because we expect\n-\t\t\t\t * a response from Octeon.\n-\t\t\t\t */\n-\t\t\t\trte_spinlock_lock(&lio_dev->response_list.lock);\n-\t\t\t\trte_atomic64_inc(\n-\t\t\t\t    &lio_dev->response_list.pending_req_count);\n-\t\t\t\tSTAILQ_INSERT_TAIL(\n-\t\t\t\t\t&lio_dev->response_list.head,\n-\t\t\t\t\t&sc->node, entries);\n-\t\t\t\trte_spinlock_unlock(\n-\t\t\t\t\t\t&lio_dev->response_list.lock);\n-\t\t\t} else {\n-\t\t\t\tif (sc->callback) {\n-\t\t\t\t\t/* This callback must not sleep */\n-\t\t\t\t\tsc->callback(LIO_REQUEST_DONE,\n-\t\t\t\t\t\t     sc->callback_arg);\n-\t\t\t\t}\n-\t\t\t}\n-\t\t\tbreak;\n-\t\tdefault:\n-\t\t\tlio_dev_err(lio_dev,\n-\t\t\t\t    \"Unknown reqtype: %d buf: %p at idx %d\\n\",\n-\t\t\t\t    reqtype, buf, old);\n-\t\t}\n-\n-\t\tiq->request_list[old].buf = NULL;\n-\t\tiq->request_list[old].reqtype = 0;\n-\n-skip_this:\n-\t\tinst_count++;\n-\t\told = lio_incr_index(old, 1, iq->nb_desc);\n-\t}\n-\n-\tiq->flush_index = old;\n-\n-\treturn inst_count;\n-}\n-\n-static void\n-lio_update_read_index(struct lio_instr_queue *iq)\n-{\n-\tuint32_t pkt_in_done = rte_read32(iq->inst_cnt_reg);\n-\tuint32_t last_done;\n-\n-\tlast_done = pkt_in_done - iq->pkt_in_done;\n-\tiq->pkt_in_done = pkt_in_done;\n-\n-\t/* Add last_done and modulo with the IQ size to get new index */\n-\tiq->lio_read_index = (iq->lio_read_index +\n-\t\t\t(uint32_t)(last_done & LIO_PKT_IN_DONE_CNT_MASK)) %\n-\t\t\tiq->nb_desc;\n-}\n-\n-int\n-lio_flush_iq(struct lio_device *lio_dev, struct lio_instr_queue *iq)\n-{\n-\tuint32_t inst_processed = 0;\n-\tint tx_done = 1;\n-\n-\tif (rte_atomic64_test_and_set(&iq->iq_flush_running) == 0)\n-\t\treturn tx_done;\n-\n-\trte_spinlock_lock(&iq->lock);\n-\n-\tlio_update_read_index(iq);\n-\n-\tdo {\n-\t\t/* Process any outstanding IQ packets. */\n-\t\tif (iq->flush_index == iq->lio_read_index)\n-\t\t\tbreak;\n-\n-\t\tinst_processed = lio_process_iq_request_list(lio_dev, iq);\n-\n-\t\tif (inst_processed) {\n-\t\t\trte_atomic64_sub(&iq->instr_pending, inst_processed);\n-\t\t\tiq->stats.instr_processed += inst_processed;\n-\t\t}\n-\n-\t\tinst_processed = 0;\n-\n-\t} while (1);\n-\n-\trte_spinlock_unlock(&iq->lock);\n-\n-\trte_atomic64_clear(&iq->iq_flush_running);\n-\n-\treturn tx_done;\n-}\n-\n-static int\n-lio_send_command(struct lio_device *lio_dev, uint32_t iq_no, void *cmd,\n-\t\t void *buf, uint32_t datasize, uint32_t reqtype)\n-{\n-\tstruct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];\n-\tstruct lio_iq_post_status st;\n-\n-\trte_spinlock_lock(&iq->post_lock);\n-\n-\tst = post_command2(iq, cmd);\n-\n-\tif (st.status != LIO_IQ_SEND_FAILED) {\n-\t\tlio_add_to_request_list(iq, st.index, buf, reqtype);\n-\t\tLIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, bytes_sent,\n-\t\t\t\t\t      datasize);\n-\t\tLIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, instr_posted, 1);\n-\n-\t\tlio_ring_doorbell(lio_dev, iq);\n-\t} else {\n-\t\tLIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, instr_dropped, 1);\n-\t}\n-\n-\trte_spinlock_unlock(&iq->post_lock);\n-\n-\treturn st.status;\n-}\n-\n-void\n-lio_prepare_soft_command(struct lio_device *lio_dev,\n-\t\t\t struct lio_soft_command *sc, uint8_t opcode,\n-\t\t\t uint8_t subcode, uint32_t irh_ossp, uint64_t ossp0,\n-\t\t\t uint64_t ossp1)\n-{\n-\tstruct octeon_instr_pki_ih3 *pki_ih3;\n-\tstruct octeon_instr_ih3 *ih3;\n-\tstruct octeon_instr_irh *irh;\n-\tstruct octeon_instr_rdp *rdp;\n-\n-\tRTE_ASSERT(opcode <= 15);\n-\tRTE_ASSERT(subcode <= 127);\n-\n-\tih3\t  = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;\n-\n-\tih3->pkind = lio_dev->instr_queue[sc->iq_no]->txpciq.s.pkind;\n-\n-\tpki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;\n-\n-\tpki_ih3->w\t= 1;\n-\tpki_ih3->raw\t= 1;\n-\tpki_ih3->utag\t= 1;\n-\tpki_ih3->uqpg\t= lio_dev->instr_queue[sc->iq_no]->txpciq.s.use_qpg;\n-\tpki_ih3->utt\t= 1;\n-\n-\tpki_ih3->tag\t= LIO_CONTROL;\n-\tpki_ih3->tagtype = OCTEON_ATOMIC_TAG;\n-\tpki_ih3->qpg\t= lio_dev->instr_queue[sc->iq_no]->txpciq.s.qpg;\n-\tpki_ih3->pm\t= 0x7;\n-\tpki_ih3->sl\t= 8;\n-\n-\tif (sc->datasize)\n-\t\tih3->dlengsz = sc->datasize;\n-\n-\tirh\t\t= (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;\n-\tirh->opcode\t= opcode;\n-\tirh->subcode\t= subcode;\n-\n-\t/* opcode/subcode specific parameters (ossp) */\n-\tirh->ossp = irh_ossp;\n-\tsc->cmd.cmd3.ossp[0] = ossp0;\n-\tsc->cmd.cmd3.ossp[1] = ossp1;\n-\n-\tif (sc->rdatasize) {\n-\t\trdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;\n-\t\trdp->pcie_port = lio_dev->pcie_port;\n-\t\trdp->rlen      = sc->rdatasize;\n-\t\tirh->rflag = 1;\n-\t\t/* PKI IH3 */\n-\t\tih3->fsz    = OCTEON_SOFT_CMD_RESP_IH3;\n-\t} else {\n-\t\tirh->rflag = 0;\n-\t\t/* PKI IH3 */\n-\t\tih3->fsz    = OCTEON_PCI_CMD_O3;\n-\t}\n-}\n-\n-int\n-lio_send_soft_command(struct lio_device *lio_dev,\n-\t\t      struct lio_soft_command *sc)\n-{\n-\tstruct octeon_instr_ih3 *ih3;\n-\tstruct octeon_instr_irh *irh;\n-\tuint32_t len = 0;\n-\n-\tih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;\n-\tif (ih3->dlengsz) {\n-\t\tRTE_ASSERT(sc->dmadptr);\n-\t\tsc->cmd.cmd3.dptr = sc->dmadptr;\n-\t}\n-\n-\tirh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;\n-\tif (irh->rflag) {\n-\t\tRTE_ASSERT(sc->dmarptr);\n-\t\tRTE_ASSERT(sc->status_word != NULL);\n-\t\t*sc->status_word = LIO_COMPLETION_WORD_INIT;\n-\t\tsc->cmd.cmd3.rptr = sc->dmarptr;\n-\t}\n-\n-\tlen = (uint32_t)ih3->dlengsz;\n-\n-\tif (sc->wait_time)\n-\t\tsc->timeout = lio_uptime + sc->wait_time;\n-\n-\treturn lio_send_command(lio_dev, sc->iq_no, &sc->cmd, sc, len,\n-\t\t\t\tLIO_REQTYPE_SOFT_COMMAND);\n-}\n-\n-int\n-lio_setup_sc_buffer_pool(struct lio_device *lio_dev)\n-{\n-\tchar sc_pool_name[RTE_MEMPOOL_NAMESIZE];\n-\tuint16_t buf_size;\n-\n-\tbuf_size = LIO_SOFT_COMMAND_BUFFER_SIZE + RTE_PKTMBUF_HEADROOM;\n-\tsnprintf(sc_pool_name, sizeof(sc_pool_name),\n-\t\t \"lio_sc_pool_%u\", lio_dev->port_id);\n-\tlio_dev->sc_buf_pool = rte_pktmbuf_pool_create(sc_pool_name,\n-\t\t\t\t\t\tLIO_MAX_SOFT_COMMAND_BUFFERS,\n-\t\t\t\t\t\t0, 0, buf_size, SOCKET_ID_ANY);\n-\treturn 0;\n-}\n-\n-void\n-lio_free_sc_buffer_pool(struct lio_device *lio_dev)\n-{\n-\trte_mempool_free(lio_dev->sc_buf_pool);\n-}\n-\n-struct lio_soft_command *\n-lio_alloc_soft_command(struct lio_device *lio_dev, uint32_t datasize,\n-\t\t       uint32_t rdatasize, uint32_t ctxsize)\n-{\n-\tuint32_t offset = sizeof(struct lio_soft_command);\n-\tstruct lio_soft_command *sc;\n-\tstruct rte_mbuf *m;\n-\tuint64_t dma_addr;\n-\n-\tRTE_ASSERT((offset + datasize + rdatasize + ctxsize) <=\n-\t\t   LIO_SOFT_COMMAND_BUFFER_SIZE);\n-\n-\tm = rte_pktmbuf_alloc(lio_dev->sc_buf_pool);\n-\tif (m == NULL) {\n-\t\tlio_dev_err(lio_dev, \"Cannot allocate mbuf for sc\\n\");\n-\t\treturn NULL;\n-\t}\n-\n-\t/* set rte_mbuf data size and there is only 1 segment */\n-\tm->pkt_len = LIO_SOFT_COMMAND_BUFFER_SIZE;\n-\tm->data_len = LIO_SOFT_COMMAND_BUFFER_SIZE;\n-\n-\t/* use rte_mbuf buffer for soft command */\n-\tsc = rte_pktmbuf_mtod(m, struct lio_soft_command *);\n-\tmemset(sc, 0, LIO_SOFT_COMMAND_BUFFER_SIZE);\n-\tsc->size = LIO_SOFT_COMMAND_BUFFER_SIZE;\n-\tsc->dma_addr = rte_mbuf_data_iova(m);\n-\tsc->mbuf = m;\n-\n-\tdma_addr = sc->dma_addr;\n-\n-\tif (ctxsize) {\n-\t\tsc->ctxptr = (uint8_t *)sc + offset;\n-\t\tsc->ctxsize = ctxsize;\n-\t}\n-\n-\t/* Start data at 128 byte boundary */\n-\toffset = (offset + ctxsize + 127) & 0xffffff80;\n-\n-\tif (datasize) {\n-\t\tsc->virtdptr = (uint8_t *)sc + offset;\n-\t\tsc->dmadptr = dma_addr + offset;\n-\t\tsc->datasize = datasize;\n-\t}\n-\n-\t/* Start rdata at 128 byte boundary */\n-\toffset = (offset + datasize + 127) & 0xffffff80;\n-\n-\tif (rdatasize) {\n-\t\tRTE_ASSERT(rdatasize >= 16);\n-\t\tsc->virtrptr = (uint8_t *)sc + offset;\n-\t\tsc->dmarptr = dma_addr + offset;\n-\t\tsc->rdatasize = rdatasize;\n-\t\tsc->status_word = (uint64_t *)((uint8_t *)(sc->virtrptr) +\n-\t\t\t\t\t       rdatasize - 8);\n-\t}\n-\n-\treturn sc;\n-}\n-\n-void\n-lio_free_soft_command(struct lio_soft_command *sc)\n-{\n-\trte_pktmbuf_free(sc->mbuf);\n-}\n-\n-void\n-lio_setup_response_list(struct lio_device *lio_dev)\n-{\n-\tSTAILQ_INIT(&lio_dev->response_list.head);\n-\trte_spinlock_init(&lio_dev->response_list.lock);\n-\trte_atomic64_set(&lio_dev->response_list.pending_req_count, 0);\n-}\n-\n-int\n-lio_process_ordered_list(struct lio_device *lio_dev)\n-{\n-\tint resp_to_process = LIO_MAX_ORD_REQS_TO_PROCESS;\n-\tstruct lio_response_list *ordered_sc_list;\n-\tstruct lio_soft_command *sc;\n-\tint request_complete = 0;\n-\tuint64_t status64;\n-\tuint32_t status;\n-\n-\tordered_sc_list = &lio_dev->response_list;\n-\n-\tdo {\n-\t\trte_spinlock_lock(&ordered_sc_list->lock);\n-\n-\t\tif (STAILQ_EMPTY(&ordered_sc_list->head)) {\n-\t\t\t/* ordered_sc_list is empty; there is\n-\t\t\t * nothing to process\n-\t\t\t */\n-\t\t\trte_spinlock_unlock(&ordered_sc_list->lock);\n-\t\t\treturn -1;\n-\t\t}\n-\n-\t\tsc = LIO_STQUEUE_FIRST_ENTRY(&ordered_sc_list->head,\n-\t\t\t\t\t     struct lio_soft_command, node);\n-\n-\t\tstatus = LIO_REQUEST_PENDING;\n-\n-\t\t/* check if octeon has finished DMA'ing a response\n-\t\t * to where rptr is pointing to\n-\t\t */\n-\t\tstatus64 = *sc->status_word;\n-\n-\t\tif (status64 != LIO_COMPLETION_WORD_INIT) {\n-\t\t\t/* This logic ensures that all 64b have been written.\n-\t\t\t * 1. check byte 0 for non-FF\n-\t\t\t * 2. if non-FF, then swap result from BE to host order\n-\t\t\t * 3. check byte 7 (swapped to 0) for non-FF\n-\t\t\t * 4. if non-FF, use the low 32-bit status code\n-\t\t\t * 5. if either byte 0 or byte 7 is FF, don't use status\n-\t\t\t */\n-\t\t\tif ((status64 & 0xff) != 0xff) {\n-\t\t\t\tlio_swap_8B_data(&status64, 1);\n-\t\t\t\tif (((status64 & 0xff) != 0xff)) {\n-\t\t\t\t\t/* retrieve 16-bit firmware status */\n-\t\t\t\t\tstatus = (uint32_t)(status64 &\n-\t\t\t\t\t\t\t    0xffffULL);\n-\t\t\t\t\tif (status) {\n-\t\t\t\t\t\tstatus =\n-\t\t\t\t\t\tLIO_FIRMWARE_STATUS_CODE(\n-\t\t\t\t\t\t\t\t\tstatus);\n-\t\t\t\t\t} else {\n-\t\t\t\t\t\t/* i.e. no error */\n-\t\t\t\t\t\tstatus = LIO_REQUEST_DONE;\n-\t\t\t\t\t}\n-\t\t\t\t}\n-\t\t\t}\n-\t\t} else if ((sc->timeout && lio_check_timeout(lio_uptime,\n-\t\t\t\t\t\t\t     sc->timeout))) {\n-\t\t\tlio_dev_err(lio_dev,\n-\t\t\t\t    \"cmd failed, timeout (%ld, %ld)\\n\",\n-\t\t\t\t    (long)lio_uptime, (long)sc->timeout);\n-\t\t\tstatus = LIO_REQUEST_TIMEOUT;\n-\t\t}\n-\n-\t\tif (status != LIO_REQUEST_PENDING) {\n-\t\t\t/* we have received a response or we have timed out.\n-\t\t\t * remove node from linked list\n-\t\t\t */\n-\t\t\tSTAILQ_REMOVE(&ordered_sc_list->head,\n-\t\t\t\t      &sc->node, lio_stailq_node, entries);\n-\t\t\trte_atomic64_dec(\n-\t\t\t    &lio_dev->response_list.pending_req_count);\n-\t\t\trte_spinlock_unlock(&ordered_sc_list->lock);\n-\n-\t\t\tif (sc->callback)\n-\t\t\t\tsc->callback(status, sc->callback_arg);\n-\n-\t\t\trequest_complete++;\n-\t\t} else {\n-\t\t\t/* no response yet */\n-\t\t\trequest_complete = 0;\n-\t\t\trte_spinlock_unlock(&ordered_sc_list->lock);\n-\t\t}\n-\n-\t\t/* If we hit the Max Ordered requests to process every loop,\n-\t\t * we quit and let this function be invoked the next time\n-\t\t * the poll thread runs to process the remaining requests.\n-\t\t * This function can take up the entire CPU if there is\n-\t\t * no upper limit to the requests processed.\n-\t\t */\n-\t\tif (request_complete >= resp_to_process)\n-\t\t\tbreak;\n-\t} while (request_complete);\n-\n-\treturn 0;\n-}\n-\n-static inline struct lio_stailq_node *\n-list_delete_first_node(struct lio_stailq_head *head)\n-{\n-\tstruct lio_stailq_node *node;\n-\n-\tif (STAILQ_EMPTY(head))\n-\t\tnode = NULL;\n-\telse\n-\t\tnode = STAILQ_FIRST(head);\n-\n-\tif (node)\n-\t\tSTAILQ_REMOVE(head, node, lio_stailq_node, entries);\n-\n-\treturn node;\n-}\n-\n-void\n-lio_delete_sglist(struct lio_instr_queue *txq)\n-{\n-\tstruct lio_device *lio_dev = txq->lio_dev;\n-\tint iq_no = txq->q_index;\n-\tstruct lio_gather *g;\n-\n-\tif (lio_dev->glist_head == NULL)\n-\t\treturn;\n-\n-\tdo {\n-\t\tg = (struct lio_gather *)list_delete_first_node(\n-\t\t\t\t\t\t&lio_dev->glist_head[iq_no]);\n-\t\tif (g) {\n-\t\t\tif (g->sg)\n-\t\t\t\trte_free(\n-\t\t\t\t    (void *)((unsigned long)g->sg - g->adjust));\n-\t\t\trte_free(g);\n-\t\t}\n-\t} while (g);\n-}\n-\n-/**\n- * \\brief Setup gather lists\n- * @param lio per-network private data\n- */\n-int\n-lio_setup_sglists(struct lio_device *lio_dev, int iq_no,\n-\t\t  int fw_mapped_iq, int num_descs, unsigned int socket_id)\n-{\n-\tstruct lio_gather *g;\n-\tint i;\n-\n-\trte_spinlock_init(&lio_dev->glist_lock[iq_no]);\n-\n-\tSTAILQ_INIT(&lio_dev->glist_head[iq_no]);\n-\n-\tfor (i = 0; i < num_descs; i++) {\n-\t\tg = rte_zmalloc_socket(NULL, sizeof(*g), RTE_CACHE_LINE_SIZE,\n-\t\t\t\t       socket_id);\n-\t\tif (g == NULL) {\n-\t\t\tlio_dev_err(lio_dev,\n-\t\t\t\t    \"lio_gather memory allocation failed for qno %d\\n\",\n-\t\t\t\t    iq_no);\n-\t\t\tbreak;\n-\t\t}\n-\n-\t\tg->sg_size =\n-\t\t    ((ROUNDUP4(LIO_MAX_SG) >> 2) * LIO_SG_ENTRY_SIZE);\n-\n-\t\tg->sg = rte_zmalloc_socket(NULL, g->sg_size + 8,\n-\t\t\t\t\t   RTE_CACHE_LINE_SIZE, socket_id);\n-\t\tif (g->sg == NULL) {\n-\t\t\tlio_dev_err(lio_dev,\n-\t\t\t\t    \"sg list memory allocation failed for qno %d\\n\",\n-\t\t\t\t    iq_no);\n-\t\t\trte_free(g);\n-\t\t\tbreak;\n-\t\t}\n-\n-\t\t/* The gather component should be aligned on 64-bit boundary */\n-\t\tif (((unsigned long)g->sg) & 7) {\n-\t\t\tg->adjust = 8 - (((unsigned long)g->sg) & 7);\n-\t\t\tg->sg =\n-\t\t\t    (struct lio_sg_entry *)((unsigned long)g->sg +\n-\t\t\t\t\t\t       g->adjust);\n-\t\t}\n-\n-\t\tSTAILQ_INSERT_TAIL(&lio_dev->glist_head[iq_no], &g->list,\n-\t\t\t\t   entries);\n-\t}\n-\n-\tif (i != num_descs) {\n-\t\tlio_delete_sglist(lio_dev->instr_queue[fw_mapped_iq]);\n-\t\treturn -ENOMEM;\n-\t}\n-\n-\treturn 0;\n-}\n-\n-void\n-lio_delete_instruction_queue(struct lio_device *lio_dev, int iq_no)\n-{\n-\tlio_delete_instr_queue(lio_dev, iq_no);\n-\trte_free(lio_dev->instr_queue[iq_no]);\n-\tlio_dev->instr_queue[iq_no] = NULL;\n-\tlio_dev->num_iqs--;\n-}\n-\n-static inline uint32_t\n-lio_iq_get_available(struct lio_device *lio_dev, uint32_t q_no)\n-{\n-\treturn ((lio_dev->instr_queue[q_no]->nb_desc - 1) -\n-\t\t(uint32_t)rte_atomic64_read(\n-\t\t\t\t&lio_dev->instr_queue[q_no]->instr_pending));\n-}\n-\n-static inline int\n-lio_iq_is_full(struct lio_device *lio_dev, uint32_t q_no)\n-{\n-\treturn ((uint32_t)rte_atomic64_read(\n-\t\t\t\t&lio_dev->instr_queue[q_no]->instr_pending) >=\n-\t\t\t\t(lio_dev->instr_queue[q_no]->nb_desc - 2));\n-}\n-\n-static int\n-lio_dev_cleanup_iq(struct lio_device *lio_dev, int iq_no)\n-{\n-\tstruct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];\n-\tuint32_t count = 10000;\n-\n-\twhile ((lio_iq_get_available(lio_dev, iq_no) < LIO_FLUSH_WM(iq)) &&\n-\t\t\t--count)\n-\t\tlio_flush_iq(lio_dev, iq);\n-\n-\treturn count ? 0 : 1;\n-}\n-\n-static void\n-lio_ctrl_cmd_callback(uint32_t status __rte_unused, void *sc_ptr)\n-{\n-\tstruct lio_soft_command *sc = sc_ptr;\n-\tstruct lio_dev_ctrl_cmd *ctrl_cmd;\n-\tstruct lio_ctrl_pkt *ctrl_pkt;\n-\n-\tctrl_pkt = (struct lio_ctrl_pkt *)sc->ctxptr;\n-\tctrl_cmd = ctrl_pkt->ctrl_cmd;\n-\tctrl_cmd->cond = 1;\n-\n-\tlio_free_soft_command(sc);\n-}\n-\n-static inline struct lio_soft_command *\n-lio_alloc_ctrl_pkt_sc(struct lio_device *lio_dev,\n-\t\t      struct lio_ctrl_pkt *ctrl_pkt)\n-{\n-\tstruct lio_soft_command *sc = NULL;\n-\tuint32_t uddsize, datasize;\n-\tuint32_t rdatasize;\n-\tuint8_t *data;\n-\n-\tuddsize = (uint32_t)(ctrl_pkt->ncmd.s.more * 8);\n-\n-\tdatasize = OCTEON_CMD_SIZE + uddsize;\n-\trdatasize = (ctrl_pkt->wait_time) ? 16 : 0;\n-\n-\tsc = lio_alloc_soft_command(lio_dev, datasize,\n-\t\t\t\t    rdatasize, sizeof(struct lio_ctrl_pkt));\n-\tif (sc == NULL)\n-\t\treturn NULL;\n-\n-\trte_memcpy(sc->ctxptr, ctrl_pkt, sizeof(struct lio_ctrl_pkt));\n-\n-\tdata = (uint8_t *)sc->virtdptr;\n-\n-\trte_memcpy(data, &ctrl_pkt->ncmd, OCTEON_CMD_SIZE);\n-\n-\tlio_swap_8B_data((uint64_t *)data, OCTEON_CMD_SIZE >> 3);\n-\n-\tif (uddsize) {\n-\t\t/* Endian-Swap for UDD should have been done by caller. */\n-\t\trte_memcpy(data + OCTEON_CMD_SIZE, ctrl_pkt->udd, uddsize);\n-\t}\n-\n-\tsc->iq_no = (uint32_t)ctrl_pkt->iq_no;\n-\n-\tlio_prepare_soft_command(lio_dev, sc,\n-\t\t\t\t LIO_OPCODE, LIO_OPCODE_CMD,\n-\t\t\t\t 0, 0, 0);\n-\n-\tsc->callback = lio_ctrl_cmd_callback;\n-\tsc->callback_arg = sc;\n-\tsc->wait_time = ctrl_pkt->wait_time;\n-\n-\treturn sc;\n-}\n-\n-int\n-lio_send_ctrl_pkt(struct lio_device *lio_dev, struct lio_ctrl_pkt *ctrl_pkt)\n-{\n-\tstruct lio_soft_command *sc = NULL;\n-\tint retval;\n-\n-\tsc = lio_alloc_ctrl_pkt_sc(lio_dev, ctrl_pkt);\n-\tif (sc == NULL) {\n-\t\tlio_dev_err(lio_dev, \"soft command allocation failed\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\tretval = lio_send_soft_command(lio_dev, sc);\n-\tif (retval == LIO_IQ_SEND_FAILED) {\n-\t\tlio_free_soft_command(sc);\n-\t\tlio_dev_err(lio_dev, \"Port: %d soft command: %d send failed status: %x\\n\",\n-\t\t\t    lio_dev->port_id, ctrl_pkt->ncmd.s.cmd, retval);\n-\t\treturn -1;\n-\t}\n-\n-\treturn retval;\n-}\n-\n-/** Send data packet to the device\n- *  @param lio_dev - lio device pointer\n- *  @param ndata   - control structure with queueing, and buffer information\n- *\n- *  @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the\n- *  queue should be stopped, and LIO_IQ_SEND_OK if it sent okay.\n- */\n-static inline int\n-lio_send_data_pkt(struct lio_device *lio_dev, struct lio_data_pkt *ndata)\n-{\n-\treturn lio_send_command(lio_dev, ndata->q_no, &ndata->cmd,\n-\t\t\t\tndata->buf, ndata->datasize, ndata->reqtype);\n-}\n-\n-uint16_t\n-lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)\n-{\n-\tstruct lio_instr_queue *txq = tx_queue;\n-\tunion lio_cmd_setup cmdsetup;\n-\tstruct lio_device *lio_dev;\n-\tstruct lio_iq_stats *stats;\n-\tstruct lio_data_pkt ndata;\n-\tint i, processed = 0;\n-\tstruct rte_mbuf *m;\n-\tuint32_t tag = 0;\n-\tint status = 0;\n-\tint iq_no;\n-\n-\tlio_dev = txq->lio_dev;\n-\tiq_no = txq->txpciq.s.q_no;\n-\tstats = &lio_dev->instr_queue[iq_no]->stats;\n-\n-\tif (!lio_dev->intf_open || !lio_dev->linfo.link.s.link_up) {\n-\t\tPMD_TX_LOG(lio_dev, ERR, \"Transmit failed link_status : %d\\n\",\n-\t\t\t   lio_dev->linfo.link.s.link_up);\n-\t\tgoto xmit_failed;\n-\t}\n-\n-\tlio_dev_cleanup_iq(lio_dev, iq_no);\n-\n-\tfor (i = 0; i < nb_pkts; i++) {\n-\t\tuint32_t pkt_len = 0;\n-\n-\t\tm = pkts[i];\n-\n-\t\t/* Prepare the attributes for the data to be passed to BASE. */\n-\t\tmemset(&ndata, 0, sizeof(struct lio_data_pkt));\n-\n-\t\tndata.buf = m;\n-\n-\t\tndata.q_no = iq_no;\n-\t\tif (lio_iq_is_full(lio_dev, ndata.q_no)) {\n-\t\t\tstats->tx_iq_busy++;\n-\t\t\tif (lio_dev_cleanup_iq(lio_dev, iq_no)) {\n-\t\t\t\tPMD_TX_LOG(lio_dev, ERR,\n-\t\t\t\t\t   \"Transmit failed iq:%d full\\n\",\n-\t\t\t\t\t   ndata.q_no);\n-\t\t\t\tbreak;\n-\t\t\t}\n-\t\t}\n-\n-\t\tcmdsetup.cmd_setup64 = 0;\n-\t\tcmdsetup.s.iq_no = iq_no;\n-\n-\t\t/* check checksum offload flags to form cmd */\n-\t\tif (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)\n-\t\t\tcmdsetup.s.ip_csum = 1;\n-\n-\t\tif (m->ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)\n-\t\t\tcmdsetup.s.tnl_csum = 1;\n-\t\telse if ((m->ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) ||\n-\t\t\t\t(m->ol_flags & RTE_MBUF_F_TX_UDP_CKSUM))\n-\t\t\tcmdsetup.s.transport_csum = 1;\n-\n-\t\tif (m->nb_segs == 1) {\n-\t\t\tpkt_len = rte_pktmbuf_data_len(m);\n-\t\t\tcmdsetup.s.u.datasize = pkt_len;\n-\t\t\tlio_prepare_pci_cmd(lio_dev, &ndata.cmd,\n-\t\t\t\t\t    &cmdsetup, tag);\n-\t\t\tndata.cmd.cmd3.dptr = rte_mbuf_data_iova(m);\n-\t\t\tndata.reqtype = LIO_REQTYPE_NORESP_NET;\n-\t\t} else {\n-\t\t\tstruct lio_buf_free_info *finfo;\n-\t\t\tstruct lio_gather *g;\n-\t\t\trte_iova_t phyaddr;\n-\t\t\tint i, frags;\n-\n-\t\t\tfinfo = (struct lio_buf_free_info *)rte_malloc(NULL,\n-\t\t\t\t\t\t\tsizeof(*finfo), 0);\n-\t\t\tif (finfo == NULL) {\n-\t\t\t\tPMD_TX_LOG(lio_dev, ERR,\n-\t\t\t\t\t   \"free buffer alloc failed\\n\");\n-\t\t\t\tgoto xmit_failed;\n-\t\t\t}\n-\n-\t\t\trte_spinlock_lock(&lio_dev->glist_lock[iq_no]);\n-\t\t\tg = (struct lio_gather *)list_delete_first_node(\n-\t\t\t\t\t\t&lio_dev->glist_head[iq_no]);\n-\t\t\trte_spinlock_unlock(&lio_dev->glist_lock[iq_no]);\n-\t\t\tif (g == NULL) {\n-\t\t\t\tPMD_TX_LOG(lio_dev, ERR,\n-\t\t\t\t\t   \"Transmit scatter gather: glist null!\\n\");\n-\t\t\t\tgoto xmit_failed;\n-\t\t\t}\n-\n-\t\t\tcmdsetup.s.gather = 1;\n-\t\t\tcmdsetup.s.u.gatherptrs = m->nb_segs;\n-\t\t\tlio_prepare_pci_cmd(lio_dev, &ndata.cmd,\n-\t\t\t\t\t    &cmdsetup, tag);\n-\n-\t\t\tmemset(g->sg, 0, g->sg_size);\n-\t\t\tg->sg[0].ptr[0] = rte_mbuf_data_iova(m);\n-\t\t\tlio_add_sg_size(&g->sg[0], m->data_len, 0);\n-\t\t\tpkt_len = m->data_len;\n-\t\t\tfinfo->mbuf = m;\n-\n-\t\t\t/* First seg taken care above */\n-\t\t\tfrags = m->nb_segs - 1;\n-\t\t\ti = 1;\n-\t\t\tm = m->next;\n-\t\t\twhile (frags--) {\n-\t\t\t\tg->sg[(i >> 2)].ptr[(i & 3)] =\n-\t\t\t\t\t\trte_mbuf_data_iova(m);\n-\t\t\t\tlio_add_sg_size(&g->sg[(i >> 2)],\n-\t\t\t\t\t\tm->data_len, (i & 3));\n-\t\t\t\tpkt_len += m->data_len;\n-\t\t\t\ti++;\n-\t\t\t\tm = m->next;\n-\t\t\t}\n-\n-\t\t\tphyaddr = rte_mem_virt2iova(g->sg);\n-\t\t\tif (phyaddr == RTE_BAD_IOVA) {\n-\t\t\t\tPMD_TX_LOG(lio_dev, ERR, \"bad phys addr\\n\");\n-\t\t\t\tgoto xmit_failed;\n-\t\t\t}\n-\n-\t\t\tndata.cmd.cmd3.dptr = phyaddr;\n-\t\t\tndata.reqtype = LIO_REQTYPE_NORESP_NET_SG;\n-\n-\t\t\tfinfo->g = g;\n-\t\t\tfinfo->lio_dev = lio_dev;\n-\t\t\tfinfo->iq_no = (uint64_t)iq_no;\n-\t\t\tndata.buf = finfo;\n-\t\t}\n-\n-\t\tndata.datasize = pkt_len;\n-\n-\t\tstatus = lio_send_data_pkt(lio_dev, &ndata);\n-\n-\t\tif (unlikely(status == LIO_IQ_SEND_FAILED)) {\n-\t\t\tPMD_TX_LOG(lio_dev, ERR, \"send failed\\n\");\n-\t\t\tbreak;\n-\t\t}\n-\n-\t\tif (unlikely(status == LIO_IQ_SEND_STOP)) {\n-\t\t\tPMD_TX_LOG(lio_dev, DEBUG, \"iq full\\n\");\n-\t\t\t/* create space as iq is full */\n-\t\t\tlio_dev_cleanup_iq(lio_dev, iq_no);\n-\t\t}\n-\n-\t\tstats->tx_done++;\n-\t\tstats->tx_tot_bytes += pkt_len;\n-\t\tprocessed++;\n-\t}\n-\n-xmit_failed:\n-\tstats->tx_dropped += (nb_pkts - processed);\n-\n-\treturn processed;\n-}\n-\n-void\n-lio_dev_clear_queues(struct rte_eth_dev *eth_dev)\n-{\n-\tstruct lio_instr_queue *txq;\n-\tstruct lio_droq *rxq;\n-\tuint16_t i;\n-\n-\tfor (i = 0; i < eth_dev->data->nb_tx_queues; i++) {\n-\t\ttxq = eth_dev->data->tx_queues[i];\n-\t\tif (txq != NULL) {\n-\t\t\tlio_dev_tx_queue_release(eth_dev, i);\n-\t\t\teth_dev->data->tx_queues[i] = NULL;\n-\t\t}\n-\t}\n-\n-\tfor (i = 0; i < eth_dev->data->nb_rx_queues; i++) {\n-\t\trxq = eth_dev->data->rx_queues[i];\n-\t\tif (rxq != NULL) {\n-\t\t\tlio_dev_rx_queue_release(eth_dev, i);\n-\t\t\teth_dev->data->rx_queues[i] = NULL;\n-\t\t}\n-\t}\n-}\ndiff --git a/drivers/net/liquidio/lio_rxtx.h b/drivers/net/liquidio/lio_rxtx.h\ndeleted file mode 100644\nindex d2a45104f0..0000000000\n--- a/drivers/net/liquidio/lio_rxtx.h\n+++ /dev/null\n@@ -1,740 +0,0 @@\n-/* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright(c) 2017 Cavium, Inc\n- */\n-\n-#ifndef _LIO_RXTX_H_\n-#define _LIO_RXTX_H_\n-\n-#include <stdio.h>\n-#include <stdint.h>\n-\n-#include <rte_spinlock.h>\n-#include <rte_memory.h>\n-\n-#include \"lio_struct.h\"\n-\n-#ifndef ROUNDUP4\n-#define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)\n-#endif\n-\n-#define LIO_STQUEUE_FIRST_ENTRY(ptr, type, elem)\t\\\n-\t(type *)((char *)((ptr)->stqh_first) - offsetof(type, elem))\n-\n-#define lio_check_timeout(cur_time, chk_time) ((cur_time) > (chk_time))\n-\n-#define lio_uptime\t\t\\\n-\t(size_t)(rte_get_timer_cycles() / rte_get_timer_hz())\n-\n-/** Descriptor format.\n- *  The descriptor ring is made of descriptors which have 2 64-bit values:\n- *  -# Physical (bus) address of the data buffer.\n- *  -# Physical (bus) address of a lio_droq_info structure.\n- *  The device DMA's incoming packets and its information at the address\n- *  given by these descriptor fields.\n- */\n-struct lio_droq_desc {\n-\t/** The buffer pointer */\n-\tuint64_t buffer_ptr;\n-\n-\t/** The Info pointer */\n-\tuint64_t info_ptr;\n-};\n-\n-#define LIO_DROQ_DESC_SIZE\t(sizeof(struct lio_droq_desc))\n-\n-/** Information about packet DMA'ed by Octeon.\n- *  The format of the information available at Info Pointer after Octeon\n- *  has posted a packet. Not all descriptors have valid information. Only\n- *  the Info field of the first descriptor for a packet has information\n- *  about the packet.\n- */\n-struct lio_droq_info {\n-\t/** The Output Receive Header. */\n-\tunion octeon_rh rh;\n-\n-\t/** The Length of the packet. */\n-\tuint64_t length;\n-};\n-\n-#define LIO_DROQ_INFO_SIZE\t(sizeof(struct lio_droq_info))\n-\n-/** Pointer to data buffer.\n- *  Driver keeps a pointer to the data buffer that it made available to\n- *  the Octeon device. Since the descriptor ring keeps physical (bus)\n- *  addresses, this field is required for the driver to keep track of\n- *  the virtual address pointers.\n- */\n-struct lio_recv_buffer {\n-\t/** Packet buffer, including meta data. */\n-\tvoid *buffer;\n-\n-\t/** Data in the packet buffer. */\n-\tuint8_t *data;\n-\n-};\n-\n-#define LIO_DROQ_RECVBUF_SIZE\t(sizeof(struct lio_recv_buffer))\n-\n-#define LIO_DROQ_SIZE\t\t(sizeof(struct lio_droq))\n-\n-#define LIO_IQ_SEND_OK\t\t0\n-#define LIO_IQ_SEND_STOP\t1\n-#define LIO_IQ_SEND_FAILED\t-1\n-\n-/* conditions */\n-#define LIO_REQTYPE_NONE\t\t0\n-#define LIO_REQTYPE_NORESP_NET\t\t1\n-#define LIO_REQTYPE_NORESP_NET_SG\t2\n-#define LIO_REQTYPE_SOFT_COMMAND\t3\n-\n-struct lio_request_list {\n-\tuint32_t reqtype;\n-\tvoid *buf;\n-};\n-\n-/*----------------------  INSTRUCTION FORMAT ----------------------------*/\n-\n-struct lio_instr3_64B {\n-\t/** Pointer where the input data is available. */\n-\tuint64_t dptr;\n-\n-\t/** Instruction Header. */\n-\tuint64_t ih3;\n-\n-\t/** Instruction Header. */\n-\tuint64_t pki_ih3;\n-\n-\t/** Input Request Header. */\n-\tuint64_t irh;\n-\n-\t/** opcode/subcode specific parameters */\n-\tuint64_t ossp[2];\n-\n-\t/** Return Data Parameters */\n-\tuint64_t rdp;\n-\n-\t/** Pointer where the response for a RAW mode packet will be written\n-\t *  by Octeon.\n-\t */\n-\tuint64_t rptr;\n-\n-};\n-\n-union lio_instr_64B {\n-\tstruct lio_instr3_64B cmd3;\n-};\n-\n-/** The size of each buffer in soft command buffer pool */\n-#define LIO_SOFT_COMMAND_BUFFER_SIZE\t1536\n-\n-/** Maximum number of buffers to allocate into soft command buffer pool */\n-#define LIO_MAX_SOFT_COMMAND_BUFFERS\t255\n-\n-struct lio_soft_command {\n-\t/** Soft command buffer info. */\n-\tstruct lio_stailq_node node;\n-\tuint64_t dma_addr;\n-\tuint32_t size;\n-\n-\t/** Command and return status */\n-\tunion lio_instr_64B cmd;\n-\n-#define LIO_COMPLETION_WORD_INIT\t0xffffffffffffffffULL\n-\tuint64_t *status_word;\n-\n-\t/** Data buffer info */\n-\tvoid *virtdptr;\n-\tuint64_t dmadptr;\n-\tuint32_t datasize;\n-\n-\t/** Return buffer info */\n-\tvoid *virtrptr;\n-\tuint64_t dmarptr;\n-\tuint32_t rdatasize;\n-\n-\t/** Context buffer info */\n-\tvoid *ctxptr;\n-\tuint32_t ctxsize;\n-\n-\t/** Time out and callback */\n-\tsize_t wait_time;\n-\tsize_t timeout;\n-\tuint32_t iq_no;\n-\tvoid (*callback)(uint32_t, void *);\n-\tvoid *callback_arg;\n-\tstruct rte_mbuf *mbuf;\n-};\n-\n-struct lio_iq_post_status {\n-\tint status;\n-\tint index;\n-};\n-\n-/*   wqe\n- *  ---------------  0\n- * |  wqe  word0-3 |\n- *  ---------------  32\n- * |    PCI IH     |\n- *  ---------------  40\n- * |     RPTR      |\n- *  ---------------  48\n- * |    PCI IRH    |\n- *  ---------------  56\n- * |    OCTEON_CMD |\n- *  ---------------  64\n- * | Addtl 8-BData |\n- * |               |\n- *  ---------------\n- */\n-\n-union octeon_cmd {\n-\tuint64_t cmd64;\n-\n-\tstruct\t{\n-#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN\n-\t\tuint64_t cmd : 5;\n-\n-\t\tuint64_t more : 6; /* How many udd words follow the command */\n-\n-\t\tuint64_t reserved : 29;\n-\n-\t\tuint64_t param1 : 16;\n-\n-\t\tuint64_t param2 : 8;\n-\n-#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN\n-\n-\t\tuint64_t param2 : 8;\n-\n-\t\tuint64_t param1 : 16;\n-\n-\t\tuint64_t reserved : 29;\n-\n-\t\tuint64_t more : 6;\n-\n-\t\tuint64_t cmd : 5;\n-\n-#endif\n-\t} s;\n-};\n-\n-#define OCTEON_CMD_SIZE (sizeof(union octeon_cmd))\n-\n-/* Maximum number of 8-byte words can be\n- * sent in a NIC control message.\n- */\n-#define LIO_MAX_NCTRL_UDD\t32\n-\n-/* Structure of control information passed by driver to the BASE\n- * layer when sending control commands to Octeon device software.\n- */\n-struct lio_ctrl_pkt {\n-\t/** Command to be passed to the Octeon device software. */\n-\tunion octeon_cmd ncmd;\n-\n-\t/** Send buffer */\n-\tvoid *data;\n-\tuint64_t dmadata;\n-\n-\t/** Response buffer */\n-\tvoid *rdata;\n-\tuint64_t dmardata;\n-\n-\t/** Additional data that may be needed by some commands. */\n-\tuint64_t udd[LIO_MAX_NCTRL_UDD];\n-\n-\t/** Input queue to use to send this command. */\n-\tuint64_t iq_no;\n-\n-\t/** Time to wait for Octeon software to respond to this control command.\n-\t *  If wait_time is 0, BASE assumes no response is expected.\n-\t */\n-\tsize_t wait_time;\n-\n-\tstruct lio_dev_ctrl_cmd *ctrl_cmd;\n-};\n-\n-/** Structure of data information passed by driver to the BASE\n- *  layer when forwarding data to Octeon device software.\n- */\n-struct lio_data_pkt {\n-\t/** Pointer to information maintained by NIC module for this packet. The\n-\t *  BASE layer passes this as-is to the driver.\n-\t */\n-\tvoid *buf;\n-\n-\t/** Type of buffer passed in \"buf\" above. */\n-\tuint32_t reqtype;\n-\n-\t/** Total data bytes to be transferred in this command. */\n-\tuint32_t datasize;\n-\n-\t/** Command to be passed to the Octeon device software. */\n-\tunion lio_instr_64B cmd;\n-\n-\t/** Input queue to use to send this command. */\n-\tuint32_t q_no;\n-};\n-\n-/** Structure passed by driver to BASE layer to prepare a command to send\n- *  network data to Octeon.\n- */\n-union lio_cmd_setup {\n-\tstruct {\n-\t\tuint32_t iq_no : 8;\n-\t\tuint32_t gather : 1;\n-\t\tuint32_t timestamp : 1;\n-\t\tuint32_t ip_csum : 1;\n-\t\tuint32_t transport_csum : 1;\n-\t\tuint32_t tnl_csum : 1;\n-\t\tuint32_t rsvd : 19;\n-\n-\t\tunion {\n-\t\t\tuint32_t datasize;\n-\t\t\tuint32_t gatherptrs;\n-\t\t} u;\n-\t} s;\n-\n-\tuint64_t cmd_setup64;\n-};\n-\n-/* Instruction Header */\n-struct octeon_instr_ih3 {\n-#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN\n-\n-\t/** Reserved3 */\n-\tuint64_t reserved3 : 1;\n-\n-\t/** Gather indicator 1=gather*/\n-\tuint64_t gather : 1;\n-\n-\t/** Data length OR no. of entries in gather list */\n-\tuint64_t dlengsz : 14;\n-\n-\t/** Front Data size */\n-\tuint64_t fsz : 6;\n-\n-\t/** Reserved2 */\n-\tuint64_t reserved2 : 4;\n-\n-\t/** PKI port kind - PKIND */\n-\tuint64_t pkind : 6;\n-\n-\t/** Reserved1 */\n-\tuint64_t reserved1 : 32;\n-\n-#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN\n-\t/** Reserved1 */\n-\tuint64_t reserved1 : 32;\n-\n-\t/** PKI port kind - PKIND */\n-\tuint64_t pkind : 6;\n-\n-\t/** Reserved2 */\n-\tuint64_t reserved2 : 4;\n-\n-\t/** Front Data size */\n-\tuint64_t fsz : 6;\n-\n-\t/** Data length OR no. of entries in gather list */\n-\tuint64_t dlengsz : 14;\n-\n-\t/** Gather indicator 1=gather*/\n-\tuint64_t gather : 1;\n-\n-\t/** Reserved3 */\n-\tuint64_t reserved3 : 1;\n-\n-#endif\n-};\n-\n-/* PKI Instruction Header(PKI IH) */\n-struct octeon_instr_pki_ih3 {\n-#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN\n-\n-\t/** Wider bit */\n-\tuint64_t w : 1;\n-\n-\t/** Raw mode indicator 1 = RAW */\n-\tuint64_t raw : 1;\n-\n-\t/** Use Tag */\n-\tuint64_t utag : 1;\n-\n-\t/** Use QPG */\n-\tuint64_t uqpg : 1;\n-\n-\t/** Reserved2 */\n-\tuint64_t reserved2 : 1;\n-\n-\t/** Parse Mode */\n-\tuint64_t pm : 3;\n-\n-\t/** Skip Length */\n-\tuint64_t sl : 8;\n-\n-\t/** Use Tag Type */\n-\tuint64_t utt : 1;\n-\n-\t/** Tag type */\n-\tuint64_t tagtype : 2;\n-\n-\t/** Reserved1 */\n-\tuint64_t reserved1 : 2;\n-\n-\t/** QPG Value */\n-\tuint64_t qpg : 11;\n-\n-\t/** Tag Value */\n-\tuint64_t tag : 32;\n-\n-#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN\n-\n-\t/** Tag Value */\n-\tuint64_t tag : 32;\n-\n-\t/** QPG Value */\n-\tuint64_t qpg : 11;\n-\n-\t/** Reserved1 */\n-\tuint64_t reserved1 : 2;\n-\n-\t/** Tag type */\n-\tuint64_t tagtype : 2;\n-\n-\t/** Use Tag Type */\n-\tuint64_t utt : 1;\n-\n-\t/** Skip Length */\n-\tuint64_t sl : 8;\n-\n-\t/** Parse Mode */\n-\tuint64_t pm : 3;\n-\n-\t/** Reserved2 */\n-\tuint64_t reserved2 : 1;\n-\n-\t/** Use QPG */\n-\tuint64_t uqpg : 1;\n-\n-\t/** Use Tag */\n-\tuint64_t utag : 1;\n-\n-\t/** Raw mode indicator 1 = RAW */\n-\tuint64_t raw : 1;\n-\n-\t/** Wider bit */\n-\tuint64_t w : 1;\n-#endif\n-};\n-\n-/** Input Request Header */\n-struct octeon_instr_irh {\n-#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN\n-\tuint64_t opcode : 4;\n-\tuint64_t rflag : 1;\n-\tuint64_t subcode : 7;\n-\tuint64_t vlan : 12;\n-\tuint64_t priority : 3;\n-\tuint64_t reserved : 5;\n-\tuint64_t ossp : 32; /* opcode/subcode specific parameters */\n-#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN\n-\tuint64_t ossp : 32; /* opcode/subcode specific parameters */\n-\tuint64_t reserved : 5;\n-\tuint64_t priority : 3;\n-\tuint64_t vlan : 12;\n-\tuint64_t subcode : 7;\n-\tuint64_t rflag : 1;\n-\tuint64_t opcode : 4;\n-#endif\n-};\n-\n-/* pkiih3 + irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */\n-#define OCTEON_SOFT_CMD_RESP_IH3\t(40 + 8)\n-/* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */\n-#define OCTEON_PCI_CMD_O3\t\t(24 + 8)\n-\n-/** Return Data Parameters */\n-struct octeon_instr_rdp {\n-#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN\n-\tuint64_t reserved : 49;\n-\tuint64_t pcie_port : 3;\n-\tuint64_t rlen : 12;\n-#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN\n-\tuint64_t rlen : 12;\n-\tuint64_t pcie_port : 3;\n-\tuint64_t reserved : 49;\n-#endif\n-};\n-\n-union octeon_packet_params {\n-\tuint32_t pkt_params32;\n-\tstruct {\n-#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN\n-\t\tuint32_t reserved : 24;\n-\t\tuint32_t ip_csum : 1; /* Perform IP header checksum(s) */\n-\t\t/* Perform Outer transport header checksum */\n-\t\tuint32_t transport_csum : 1;\n-\t\t/* Find tunnel, and perform transport csum. */\n-\t\tuint32_t tnl_csum : 1;\n-\t\tuint32_t tsflag : 1;   /* Timestamp this packet */\n-\t\tuint32_t ipsec_ops : 4; /* IPsec operation */\n-#else\n-\t\tuint32_t ipsec_ops : 4;\n-\t\tuint32_t tsflag : 1;\n-\t\tuint32_t tnl_csum : 1;\n-\t\tuint32_t transport_csum : 1;\n-\t\tuint32_t ip_csum : 1;\n-\t\tuint32_t reserved : 7;\n-#endif\n-\t} s;\n-};\n-\n-/** Utility function to prepare a 64B NIC instruction based on a setup command\n- * @param cmd - pointer to instruction to be filled in.\n- * @param setup - pointer to the setup structure\n- * @param q_no - which queue for back pressure\n- *\n- * Assumes the cmd instruction is pre-allocated, but no fields are filled in.\n- */\n-static inline void\n-lio_prepare_pci_cmd(struct lio_device *lio_dev,\n-\t\t    union lio_instr_64B *cmd,\n-\t\t    union lio_cmd_setup *setup,\n-\t\t    uint32_t tag)\n-{\n-\tunion octeon_packet_params packet_params;\n-\tstruct octeon_instr_pki_ih3 *pki_ih3;\n-\tstruct octeon_instr_irh *irh;\n-\tstruct octeon_instr_ih3 *ih3;\n-\tint port;\n-\n-\tmemset(cmd, 0, sizeof(union lio_instr_64B));\n-\n-\tih3 = (struct octeon_instr_ih3 *)&cmd->cmd3.ih3;\n-\tpki_ih3 = (struct octeon_instr_pki_ih3 *)&cmd->cmd3.pki_ih3;\n-\n-\t/* assume that rflag is cleared so therefore front data will only have\n-\t * irh and ossp[1] and ossp[2] for a total of 24 bytes\n-\t */\n-\tih3->pkind = lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.pkind;\n-\t/* PKI IH */\n-\tih3->fsz = OCTEON_PCI_CMD_O3;\n-\n-\tif (!setup->s.gather) {\n-\t\tih3->dlengsz = setup->s.u.datasize;\n-\t} else {\n-\t\tih3->gather = 1;\n-\t\tih3->dlengsz = setup->s.u.gatherptrs;\n-\t}\n-\n-\tpki_ih3->w = 1;\n-\tpki_ih3->raw = 0;\n-\tpki_ih3->utag = 0;\n-\tpki_ih3->utt = 1;\n-\tpki_ih3->uqpg = lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.use_qpg;\n-\n-\tport = (int)lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.port;\n-\n-\tif (tag)\n-\t\tpki_ih3->tag = tag;\n-\telse\n-\t\tpki_ih3->tag = LIO_DATA(port);\n-\n-\tpki_ih3->tagtype = OCTEON_ORDERED_TAG;\n-\tpki_ih3->qpg = lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.qpg;\n-\tpki_ih3->pm = 0x0; /* parse from L2 */\n-\tpki_ih3->sl = 32;  /* sl will be sizeof(pki_ih3) + irh + ossp0 + ossp1*/\n-\n-\tirh = (struct octeon_instr_irh *)&cmd->cmd3.irh;\n-\n-\tirh->opcode = LIO_OPCODE;\n-\tirh->subcode = LIO_OPCODE_NW_DATA;\n-\n-\tpacket_params.pkt_params32 = 0;\n-\tpacket_params.s.ip_csum = setup->s.ip_csum;\n-\tpacket_params.s.transport_csum = setup->s.transport_csum;\n-\tpacket_params.s.tnl_csum = setup->s.tnl_csum;\n-\tpacket_params.s.tsflag = setup->s.timestamp;\n-\n-\tirh->ossp = packet_params.pkt_params32;\n-}\n-\n-int lio_setup_sc_buffer_pool(struct lio_device *lio_dev);\n-void lio_free_sc_buffer_pool(struct lio_device *lio_dev);\n-\n-struct lio_soft_command *\n-lio_alloc_soft_command(struct lio_device *lio_dev,\n-\t\t       uint32_t datasize, uint32_t rdatasize,\n-\t\t       uint32_t ctxsize);\n-void lio_prepare_soft_command(struct lio_device *lio_dev,\n-\t\t\t      struct lio_soft_command *sc,\n-\t\t\t      uint8_t opcode, uint8_t subcode,\n-\t\t\t      uint32_t irh_ossp, uint64_t ossp0,\n-\t\t\t      uint64_t ossp1);\n-int lio_send_soft_command(struct lio_device *lio_dev,\n-\t\t\t  struct lio_soft_command *sc);\n-void lio_free_soft_command(struct lio_soft_command *sc);\n-\n-/** Send control packet to the device\n- *  @param lio_dev - lio device pointer\n- *  @param nctrl   - control structure with command, timeout, and callback info\n- *\n- *  @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the\n- *  queue should be stopped, and LIO_IQ_SEND_OK if it sent okay.\n- */\n-int lio_send_ctrl_pkt(struct lio_device *lio_dev,\n-\t\t      struct lio_ctrl_pkt *ctrl_pkt);\n-\n-/** Maximum ordered requests to process in every invocation of\n- *  lio_process_ordered_list(). The function will continue to process requests\n- *  as long as it can find one that has finished processing. If it keeps\n- *  finding requests that have completed, the function can run for ever. The\n- *  value defined here sets an upper limit on the number of requests it can\n- *  process before it returns control to the poll thread.\n- */\n-#define LIO_MAX_ORD_REQS_TO_PROCESS\t4096\n-\n-/** Error codes used in Octeon Host-Core communication.\n- *\n- *   31\t\t16 15\t\t0\n- *   ----------------------------\n- * |\t\t|\t\t|\n- *   ----------------------------\n- *   Error codes are 32-bit wide. The upper 16-bits, called Major Error Number,\n- *   are reserved to identify the group to which the error code belongs. The\n- *   lower 16-bits, called Minor Error Number, carry the actual code.\n- *\n- *   So error codes are (MAJOR NUMBER << 16)| MINOR_NUMBER.\n- */\n-/** Status for a request.\n- *  If the request is successfully queued, the driver will return\n- *  a LIO_REQUEST_PENDING status. LIO_REQUEST_TIMEOUT is only returned by\n- *  the driver if the response for request failed to arrive before a\n- *  time-out period or if the request processing * got interrupted due to\n- *  a signal respectively.\n- */\n-enum {\n-\t/** A value of 0x00000000 indicates no error i.e. success */\n-\tLIO_REQUEST_DONE\t= 0x00000000,\n-\t/** (Major number: 0x0000; Minor Number: 0x0001) */\n-\tLIO_REQUEST_PENDING\t= 0x00000001,\n-\tLIO_REQUEST_TIMEOUT\t= 0x00000003,\n-\n-};\n-\n-/*------ Error codes used by firmware (bits 15..0 set by firmware */\n-#define LIO_FIRMWARE_MAJOR_ERROR_CODE\t 0x0001\n-#define LIO_FIRMWARE_STATUS_CODE(status) \\\n-\t((LIO_FIRMWARE_MAJOR_ERROR_CODE << 16) | (status))\n-\n-/** Initialize the response lists. The number of response lists to create is\n- *  given by count.\n- *  @param lio_dev - the lio device structure.\n- */\n-void lio_setup_response_list(struct lio_device *lio_dev);\n-\n-/** Check the status of first entry in the ordered list. If the instruction at\n- *  that entry finished processing or has timed-out, the entry is cleaned.\n- *  @param lio_dev - the lio device structure.\n- *  @return 1 if the ordered list is empty, 0 otherwise.\n- */\n-int lio_process_ordered_list(struct lio_device *lio_dev);\n-\n-#define LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, field, count)\t\\\n-\t(((lio_dev)->instr_queue[iq_no]->stats.field) += count)\n-\n-static inline void\n-lio_swap_8B_data(uint64_t *data, uint32_t blocks)\n-{\n-\twhile (blocks) {\n-\t\t*data = rte_cpu_to_be_64(*data);\n-\t\tblocks--;\n-\t\tdata++;\n-\t}\n-}\n-\n-static inline uint64_t\n-lio_map_ring(void *buf)\n-{\n-\trte_iova_t dma_addr;\n-\n-\tdma_addr = rte_mbuf_data_iova_default(((struct rte_mbuf *)buf));\n-\n-\treturn (uint64_t)dma_addr;\n-}\n-\n-static inline uint64_t\n-lio_map_ring_info(struct lio_droq *droq, uint32_t i)\n-{\n-\trte_iova_t dma_addr;\n-\n-\tdma_addr = droq->info_list_dma + (i * LIO_DROQ_INFO_SIZE);\n-\n-\treturn (uint64_t)dma_addr;\n-}\n-\n-static inline int\n-lio_opcode_slow_path(union octeon_rh *rh)\n-{\n-\tuint16_t subcode1, subcode2;\n-\n-\tsubcode1 = LIO_OPCODE_SUBCODE(rh->r.opcode, rh->r.subcode);\n-\tsubcode2 = LIO_OPCODE_SUBCODE(LIO_OPCODE, LIO_OPCODE_NW_DATA);\n-\n-\treturn subcode2 != subcode1;\n-}\n-\n-static inline void\n-lio_add_sg_size(struct lio_sg_entry *sg_entry,\n-\t\tuint16_t size, uint32_t pos)\n-{\n-#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN\n-\tsg_entry->u.size[pos] = size;\n-#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN\n-\tsg_entry->u.size[3 - pos] = size;\n-#endif\n-}\n-\n-/* Macro to increment index.\n- * Index is incremented by count; if the sum exceeds\n- * max, index is wrapped-around to the start.\n- */\n-static inline uint32_t\n-lio_incr_index(uint32_t index, uint32_t count, uint32_t max)\n-{\n-\tif ((index + count) >= max)\n-\t\tindex = index + count - max;\n-\telse\n-\t\tindex += count;\n-\n-\treturn index;\n-}\n-\n-int lio_setup_droq(struct lio_device *lio_dev, int q_no, int num_descs,\n-\t\t   int desc_size, struct rte_mempool *mpool,\n-\t\t   unsigned int socket_id);\n-uint16_t lio_dev_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n-\t\t\t   uint16_t budget);\n-void lio_delete_droq_queue(struct lio_device *lio_dev, int oq_no);\n-\n-void lio_delete_sglist(struct lio_instr_queue *txq);\n-int lio_setup_sglists(struct lio_device *lio_dev, int iq_no,\n-\t\t      int fw_mapped_iq, int num_descs, unsigned int socket_id);\n-uint16_t lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts,\n-\t\t\t   uint16_t nb_pkts);\n-int lio_wait_for_instr_fetch(struct lio_device *lio_dev);\n-int lio_setup_iq(struct lio_device *lio_dev, int q_index,\n-\t\t union octeon_txpciq iq_no, uint32_t num_descs, void *app_ctx,\n-\t\t unsigned int socket_id);\n-int lio_flush_iq(struct lio_device *lio_dev, struct lio_instr_queue *iq);\n-void lio_delete_instruction_queue(struct lio_device *lio_dev, int iq_no);\n-/** Setup instruction queue zero for the device\n- *  @param lio_dev which lio device to setup\n- *\n- *  @return 0 if success. -1 if fails\n- */\n-int lio_setup_instr_queue0(struct lio_device *lio_dev);\n-void lio_free_instr_queue0(struct lio_device *lio_dev);\n-void lio_dev_clear_queues(struct rte_eth_dev *eth_dev);\n-#endif\t/* _LIO_RXTX_H_ */\ndiff --git a/drivers/net/liquidio/lio_struct.h b/drivers/net/liquidio/lio_struct.h\ndeleted file mode 100644\nindex 10270c560e..0000000000\n--- a/drivers/net/liquidio/lio_struct.h\n+++ /dev/null\n@@ -1,661 +0,0 @@\n-/* SPDX-License-Identifier: BSD-3-Clause\n- * Copyright(c) 2017 Cavium, Inc\n- */\n-\n-#ifndef _LIO_STRUCT_H_\n-#define _LIO_STRUCT_H_\n-\n-#include <stdio.h>\n-#include <stdint.h>\n-#include <sys/queue.h>\n-\n-#include <rte_spinlock.h>\n-#include <rte_atomic.h>\n-\n-#include \"lio_hw_defs.h\"\n-\n-struct lio_stailq_node {\n-\tSTAILQ_ENTRY(lio_stailq_node) entries;\n-};\n-\n-STAILQ_HEAD(lio_stailq_head, lio_stailq_node);\n-\n-struct lio_version {\n-\tuint16_t major;\n-\tuint16_t minor;\n-\tuint16_t micro;\n-\tuint16_t reserved;\n-};\n-\n-/** Input Queue statistics. Each input queue has four stats fields. */\n-struct lio_iq_stats {\n-\tuint64_t instr_posted; /**< Instructions posted to this queue. */\n-\tuint64_t instr_processed; /**< Instructions processed in this queue. */\n-\tuint64_t instr_dropped; /**< Instructions that could not be processed */\n-\tuint64_t bytes_sent; /**< Bytes sent through this queue. */\n-\tuint64_t tx_done; /**< Num of packets sent to network. */\n-\tuint64_t tx_iq_busy; /**< Num of times this iq was found to be full. */\n-\tuint64_t tx_dropped; /**< Num of pkts dropped due to xmitpath errors. */\n-\tuint64_t tx_tot_bytes; /**< Total count of bytes sent to network. */\n-};\n-\n-/** Output Queue statistics. Each output queue has four stats fields. */\n-struct lio_droq_stats {\n-\t/** Number of packets received in this queue. */\n-\tuint64_t pkts_received;\n-\n-\t/** Bytes received by this queue. */\n-\tuint64_t bytes_received;\n-\n-\t/** Packets dropped due to no memory available. */\n-\tuint64_t dropped_nomem;\n-\n-\t/** Packets dropped due to large number of pkts to process. */\n-\tuint64_t dropped_toomany;\n-\n-\t/** Number of packets  sent to stack from this queue. */\n-\tuint64_t rx_pkts_received;\n-\n-\t/** Number of Bytes sent to stack from this queue. */\n-\tuint64_t rx_bytes_received;\n-\n-\t/** Num of Packets dropped due to receive path failures. */\n-\tuint64_t rx_dropped;\n-\n-\t/** Num of vxlan packets received; */\n-\tuint64_t rx_vxlan;\n-\n-\t/** Num of failures of rte_pktmbuf_alloc() */\n-\tuint64_t rx_alloc_failure;\n-\n-};\n-\n-/** The Descriptor Ring Output Queue structure.\n- *  This structure has all the information required to implement a\n- *  DROQ.\n- */\n-struct lio_droq {\n-\t/** A spinlock to protect access to this ring. */\n-\trte_spinlock_t lock;\n-\n-\tuint32_t q_no;\n-\n-\tuint32_t pkt_count;\n-\n-\tstruct lio_device *lio_dev;\n-\n-\t/** The 8B aligned descriptor ring starts at this address. */\n-\tstruct lio_droq_desc *desc_ring;\n-\n-\t/** Index in the ring where the driver should read the next packet */\n-\tuint32_t read_idx;\n-\n-\t/** Index in the ring where Octeon will write the next packet */\n-\tuint32_t write_idx;\n-\n-\t/** Index in the ring where the driver will refill the descriptor's\n-\t * buffer\n-\t */\n-\tuint32_t refill_idx;\n-\n-\t/** Packets pending to be processed */\n-\trte_atomic64_t pkts_pending;\n-\n-\t/** Number of  descriptors in this ring. */\n-\tuint32_t nb_desc;\n-\n-\t/** The number of descriptors pending refill. */\n-\tuint32_t refill_count;\n-\n-\tuint32_t refill_threshold;\n-\n-\t/** The 8B aligned info ptrs begin from this address. */\n-\tstruct lio_droq_info *info_list;\n-\n-\t/** The receive buffer list. This list has the virtual addresses of the\n-\t *  buffers.\n-\t */\n-\tstruct lio_recv_buffer *recv_buf_list;\n-\n-\t/** The size of each buffer pointed by the buffer pointer. */\n-\tuint32_t buffer_size;\n-\n-\t/** Pointer to the mapped packet credit register.\n-\t *  Host writes number of info/buffer ptrs available to this register\n-\t */\n-\tvoid *pkts_credit_reg;\n-\n-\t/** Pointer to the mapped packet sent register.\n-\t *  Octeon writes the number of packets DMA'ed to host memory\n-\t *  in this register.\n-\t */\n-\tvoid *pkts_sent_reg;\n-\n-\t/** Statistics for this DROQ. */\n-\tstruct lio_droq_stats stats;\n-\n-\t/** DMA mapped address of the DROQ descriptor ring. */\n-\tsize_t desc_ring_dma;\n-\n-\t/** Info ptr list are allocated at this virtual address. */\n-\tsize_t info_base_addr;\n-\n-\t/** DMA mapped address of the info list */\n-\tsize_t info_list_dma;\n-\n-\t/** Allocated size of info list. */\n-\tuint32_t info_alloc_size;\n-\n-\t/** Memory zone **/\n-\tconst struct rte_memzone *desc_ring_mz;\n-\tconst struct rte_memzone *info_mz;\n-\tstruct rte_mempool *mpool;\n-};\n-\n-/** Receive Header */\n-union octeon_rh {\n-#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN\n-\tuint64_t rh64;\n-\tstruct\t{\n-\t\tuint64_t opcode : 4;\n-\t\tuint64_t subcode : 8;\n-\t\tuint64_t len : 3; /** additional 64-bit words */\n-\t\tuint64_t reserved : 17;\n-\t\tuint64_t ossp : 32; /** opcode/subcode specific parameters */\n-\t} r;\n-\tstruct\t{\n-\t\tuint64_t opcode : 4;\n-\t\tuint64_t subcode : 8;\n-\t\tuint64_t len : 3; /** additional 64-bit words */\n-\t\tuint64_t extra : 28;\n-\t\tuint64_t vlan : 12;\n-\t\tuint64_t priority : 3;\n-\t\tuint64_t csum_verified : 3; /** checksum verified. */\n-\t\tuint64_t has_hwtstamp : 1; /** Has hardware timestamp.1 = yes.*/\n-\t\tuint64_t encap_on : 1;\n-\t\tuint64_t has_hash : 1; /** Has hash (rth or rss). 1 = yes. */\n-\t} r_dh;\n-\tstruct {\n-\t\tuint64_t opcode : 4;\n-\t\tuint64_t subcode : 8;\n-\t\tuint64_t len : 3; /** additional 64-bit words */\n-\t\tuint64_t reserved : 8;\n-\t\tuint64_t extra : 25;\n-\t\tuint64_t gmxport : 16;\n-\t} r_nic_info;\n-#else\n-\tuint64_t rh64;\n-\tstruct {\n-\t\tuint64_t ossp : 32; /** opcode/subcode specific parameters */\n-\t\tuint64_t reserved : 17;\n-\t\tuint64_t len : 3; /** additional 64-bit words */\n-\t\tuint64_t subcode : 8;\n-\t\tuint64_t opcode : 4;\n-\t} r;\n-\tstruct {\n-\t\tuint64_t has_hash : 1; /** Has hash (rth or rss). 1 = yes. */\n-\t\tuint64_t encap_on : 1;\n-\t\tuint64_t has_hwtstamp : 1;  /** 1 = has hwtstamp */\n-\t\tuint64_t csum_verified : 3; /** checksum verified. */\n-\t\tuint64_t priority : 3;\n-\t\tuint64_t vlan : 12;\n-\t\tuint64_t extra : 28;\n-\t\tuint64_t len : 3; /** additional 64-bit words */\n-\t\tuint64_t subcode : 8;\n-\t\tuint64_t opcode : 4;\n-\t} r_dh;\n-\tstruct {\n-\t\tuint64_t gmxport : 16;\n-\t\tuint64_t extra : 25;\n-\t\tuint64_t reserved : 8;\n-\t\tuint64_t len : 3; /** additional 64-bit words */\n-\t\tuint64_t subcode : 8;\n-\t\tuint64_t opcode : 4;\n-\t} r_nic_info;\n-#endif\n-};\n-\n-#define OCTEON_RH_SIZE (sizeof(union octeon_rh))\n-\n-/** The txpciq info passed to host from the firmware */\n-union octeon_txpciq {\n-\tuint64_t txpciq64;\n-\n-\tstruct {\n-#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN\n-\t\tuint64_t q_no : 8;\n-\t\tuint64_t port : 8;\n-\t\tuint64_t pkind : 6;\n-\t\tuint64_t use_qpg : 1;\n-\t\tuint64_t qpg : 11;\n-\t\tuint64_t aura_num : 10;\n-\t\tuint64_t reserved : 20;\n-#else\n-\t\tuint64_t reserved : 20;\n-\t\tuint64_t aura_num : 10;\n-\t\tuint64_t qpg : 11;\n-\t\tuint64_t use_qpg : 1;\n-\t\tuint64_t pkind : 6;\n-\t\tuint64_t port : 8;\n-\t\tuint64_t q_no : 8;\n-#endif\n-\t} s;\n-};\n-\n-/** The instruction (input) queue.\n- *  The input queue is used to post raw (instruction) mode data or packet\n- *  data to Octeon device from the host. Each input queue for\n- *  a LIO device has one such structure to represent it.\n- */\n-struct lio_instr_queue {\n-\t/** A spinlock to protect access to the input ring.  */\n-\trte_spinlock_t lock;\n-\n-\trte_spinlock_t post_lock;\n-\n-\tstruct lio_device *lio_dev;\n-\n-\tuint32_t pkt_in_done;\n-\n-\trte_atomic64_t iq_flush_running;\n-\n-\t/** Flag that indicates if the queue uses 64 byte commands. */\n-\tuint32_t iqcmd_64B:1;\n-\n-\t/** Queue info. */\n-\tunion octeon_txpciq txpciq;\n-\n-\tuint32_t rsvd:17;\n-\n-\tuint32_t status:8;\n-\n-\t/** Number of  descriptors in this ring. */\n-\tuint32_t nb_desc;\n-\n-\t/** Index in input ring where the driver should write the next packet */\n-\tuint32_t host_write_index;\n-\n-\t/** Index in input ring where Octeon is expected to read the next\n-\t *  packet.\n-\t */\n-\tuint32_t lio_read_index;\n-\n-\t/** This index aids in finding the window in the queue where Octeon\n-\t *  has read the commands.\n-\t */\n-\tuint32_t flush_index;\n-\n-\t/** This field keeps track of the instructions pending in this queue. */\n-\trte_atomic64_t instr_pending;\n-\n-\t/** Pointer to the Virtual Base addr of the input ring. */\n-\tuint8_t *base_addr;\n-\n-\tstruct lio_request_list *request_list;\n-\n-\t/** Octeon doorbell register for the ring. */\n-\tvoid *doorbell_reg;\n-\n-\t/** Octeon instruction count register for this ring. */\n-\tvoid *inst_cnt_reg;\n-\n-\t/** Number of instructions pending to be posted to Octeon. */\n-\tuint32_t fill_cnt;\n-\n-\t/** Statistics for this input queue. */\n-\tstruct lio_iq_stats stats;\n-\n-\t/** DMA mapped base address of the input descriptor ring. */\n-\tuint64_t base_addr_dma;\n-\n-\t/** Application context */\n-\tvoid *app_ctx;\n-\n-\t/* network stack queue index */\n-\tint q_index;\n-\n-\t/* Memory zone */\n-\tconst struct rte_memzone *iq_mz;\n-};\n-\n-/** This structure is used by driver to store information required\n- *  to free the mbuff when the packet has been fetched by Octeon.\n- *  Bytes offset below assume worst-case of a 64-bit system.\n- */\n-struct lio_buf_free_info {\n-\t/** Bytes 1-8. Pointer to network device private structure. */\n-\tstruct lio_device *lio_dev;\n-\n-\t/** Bytes 9-16. Pointer to mbuff. */\n-\tstruct rte_mbuf *mbuf;\n-\n-\t/** Bytes 17-24. Pointer to gather list. */\n-\tstruct lio_gather *g;\n-\n-\t/** Bytes 25-32. Physical address of mbuf->data or gather list. */\n-\tuint64_t dptr;\n-\n-\t/** Bytes 33-47. Piggybacked soft command, if any */\n-\tstruct lio_soft_command *sc;\n-\n-\t/** Bytes 48-63. iq no */\n-\tuint64_t iq_no;\n-};\n-\n-/* The Scatter-Gather List Entry. The scatter or gather component used with\n- * input instruction has this format.\n- */\n-struct lio_sg_entry {\n-\t/** The first 64 bit gives the size of data in each dptr. */\n-\tunion {\n-\t\tuint16_t size[4];\n-\t\tuint64_t size64;\n-\t} u;\n-\n-\t/** The 4 dptr pointers for this entry. */\n-\tuint64_t ptr[4];\n-};\n-\n-#define LIO_SG_ENTRY_SIZE\t(sizeof(struct lio_sg_entry))\n-\n-/** Structure of a node in list of gather components maintained by\n- *  driver for each network device.\n- */\n-struct lio_gather {\n-\t/** List manipulation. Next and prev pointers. */\n-\tstruct lio_stailq_node list;\n-\n-\t/** Size of the gather component at sg in bytes. */\n-\tint sg_size;\n-\n-\t/** Number of bytes that sg was adjusted to make it 8B-aligned. */\n-\tint adjust;\n-\n-\t/** Gather component that can accommodate max sized fragment list\n-\t *  received from the IP layer.\n-\t */\n-\tstruct lio_sg_entry *sg;\n-};\n-\n-struct lio_rss_ctx {\n-\tuint16_t hash_key_size;\n-\tuint8_t  hash_key[LIO_RSS_MAX_KEY_SZ];\n-\t/* Ideally a factor of number of queues */\n-\tuint8_t  itable[LIO_RSS_MAX_TABLE_SZ];\n-\tuint8_t  itable_size;\n-\tuint8_t  ip;\n-\tuint8_t  tcp_hash;\n-\tuint8_t  ipv6;\n-\tuint8_t  ipv6_tcp_hash;\n-\tuint8_t  ipv6_ex;\n-\tuint8_t  ipv6_tcp_ex_hash;\n-\tuint8_t  hash_disable;\n-};\n-\n-struct lio_io_enable {\n-\tuint64_t iq;\n-\tuint64_t oq;\n-\tuint64_t iq64B;\n-};\n-\n-struct lio_fn_list {\n-\tvoid (*setup_iq_regs)(struct lio_device *, uint32_t);\n-\tvoid (*setup_oq_regs)(struct lio_device *, uint32_t);\n-\n-\tint (*setup_mbox)(struct lio_device *);\n-\tvoid (*free_mbox)(struct lio_device *);\n-\n-\tint (*setup_device_regs)(struct lio_device *);\n-\tint (*enable_io_queues)(struct lio_device *);\n-\tvoid (*disable_io_queues)(struct lio_device *);\n-};\n-\n-struct lio_pf_vf_hs_word {\n-#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN\n-\t/** PKIND value assigned for the DPI interface */\n-\tuint64_t pkind : 8;\n-\n-\t/** OCTEON core clock multiplier */\n-\tuint64_t core_tics_per_us : 16;\n-\n-\t/** OCTEON coprocessor clock multiplier */\n-\tuint64_t coproc_tics_per_us : 16;\n-\n-\t/** app that currently running on OCTEON */\n-\tuint64_t app_mode : 8;\n-\n-\t/** RESERVED */\n-\tuint64_t reserved : 16;\n-\n-#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN\n-\n-\t/** RESERVED */\n-\tuint64_t reserved : 16;\n-\n-\t/** app that currently running on OCTEON */\n-\tuint64_t app_mode : 8;\n-\n-\t/** OCTEON coprocessor clock multiplier */\n-\tuint64_t coproc_tics_per_us : 16;\n-\n-\t/** OCTEON core clock multiplier */\n-\tuint64_t core_tics_per_us : 16;\n-\n-\t/** PKIND value assigned for the DPI interface */\n-\tuint64_t pkind : 8;\n-#endif\n-};\n-\n-struct lio_sriov_info {\n-\t/** Number of rings assigned to VF */\n-\tuint32_t rings_per_vf;\n-\n-\t/** Number of VF devices enabled */\n-\tuint32_t num_vfs;\n-};\n-\n-/* Head of a response list */\n-struct lio_response_list {\n-\t/** List structure to add delete pending entries to */\n-\tstruct lio_stailq_head head;\n-\n-\t/** A lock for this response list */\n-\trte_spinlock_t lock;\n-\n-\trte_atomic64_t pending_req_count;\n-};\n-\n-/* Structure to define the configuration attributes for each Input queue. */\n-struct lio_iq_config {\n-\t/* Max number of IQs available */\n-\tuint8_t max_iqs;\n-\n-\t/** Pending list size (usually set to the sum of the size of all Input\n-\t *  queues)\n-\t */\n-\tuint32_t pending_list_size;\n-\n-\t/** Command size - 32 or 64 bytes */\n-\tuint32_t instr_type;\n-};\n-\n-/* Structure to define the configuration attributes for each Output queue. */\n-struct lio_oq_config {\n-\t/* Max number of OQs available */\n-\tuint8_t max_oqs;\n-\n-\t/** If set, the Output queue uses info-pointer mode. (Default: 1 ) */\n-\tuint32_t info_ptr;\n-\n-\t/** The number of buffers that were consumed during packet processing by\n-\t *  the driver on this Output queue before the driver attempts to\n-\t *  replenish the descriptor ring with new buffers.\n-\t */\n-\tuint32_t refill_threshold;\n-};\n-\n-/* Structure to define the configuration. */\n-struct lio_config {\n-\tuint16_t card_type;\n-\tconst char *card_name;\n-\n-\t/** Input Queue attributes. */\n-\tstruct lio_iq_config iq;\n-\n-\t/** Output Queue attributes. */\n-\tstruct lio_oq_config oq;\n-\n-\tint num_nic_ports;\n-\n-\tint num_def_tx_descs;\n-\n-\t/* Num of desc for rx rings */\n-\tint num_def_rx_descs;\n-\n-\tint def_rx_buf_size;\n-};\n-\n-/** Status of a RGMII Link on Octeon as seen by core driver. */\n-union octeon_link_status {\n-\tuint64_t link_status64;\n-\n-\tstruct {\n-#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN\n-\t\tuint64_t duplex : 8;\n-\t\tuint64_t mtu : 16;\n-\t\tuint64_t speed : 16;\n-\t\tuint64_t link_up : 1;\n-\t\tuint64_t autoneg : 1;\n-\t\tuint64_t if_mode : 5;\n-\t\tuint64_t pause : 1;\n-\t\tuint64_t flashing : 1;\n-\t\tuint64_t reserved : 15;\n-#else\n-\t\tuint64_t reserved : 15;\n-\t\tuint64_t flashing : 1;\n-\t\tuint64_t pause : 1;\n-\t\tuint64_t if_mode : 5;\n-\t\tuint64_t autoneg : 1;\n-\t\tuint64_t link_up : 1;\n-\t\tuint64_t speed : 16;\n-\t\tuint64_t mtu : 16;\n-\t\tuint64_t duplex : 8;\n-#endif\n-\t} s;\n-};\n-\n-/** The rxpciq info passed to host from the firmware */\n-union octeon_rxpciq {\n-\tuint64_t rxpciq64;\n-\n-\tstruct {\n-#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN\n-\t\tuint64_t q_no : 8;\n-\t\tuint64_t reserved : 56;\n-#else\n-\t\tuint64_t reserved : 56;\n-\t\tuint64_t q_no : 8;\n-#endif\n-\t} s;\n-};\n-\n-/** Information for a OCTEON ethernet interface shared between core & host. */\n-struct octeon_link_info {\n-\tunion octeon_link_status link;\n-\tuint64_t hw_addr;\n-\n-#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN\n-\tuint64_t gmxport : 16;\n-\tuint64_t macaddr_is_admin_assigned : 1;\n-\tuint64_t vlan_is_admin_assigned : 1;\n-\tuint64_t rsvd : 30;\n-\tuint64_t num_txpciq : 8;\n-\tuint64_t num_rxpciq : 8;\n-#else\n-\tuint64_t num_rxpciq : 8;\n-\tuint64_t num_txpciq : 8;\n-\tuint64_t rsvd : 30;\n-\tuint64_t vlan_is_admin_assigned : 1;\n-\tuint64_t macaddr_is_admin_assigned : 1;\n-\tuint64_t gmxport : 16;\n-#endif\n-\n-\tunion octeon_txpciq txpciq[LIO_MAX_IOQS_PER_IF];\n-\tunion octeon_rxpciq rxpciq[LIO_MAX_IOQS_PER_IF];\n-};\n-\n-/* -----------------------  THE LIO DEVICE  --------------------------- */\n-/** The lio device.\n- *  Each lio device has this structure to represent all its\n- *  components.\n- */\n-struct lio_device {\n-\t/** PCI device pointer */\n-\tstruct rte_pci_device *pci_dev;\n-\n-\t/** Octeon Chip type */\n-\tuint16_t chip_id;\n-\tuint16_t pf_num;\n-\tuint16_t vf_num;\n-\n-\t/** This device's PCIe port used for traffic. */\n-\tuint16_t pcie_port;\n-\n-\t/** The state of this device */\n-\trte_atomic64_t status;\n-\n-\tuint8_t intf_open;\n-\n-\tstruct octeon_link_info linfo;\n-\n-\tuint8_t *hw_addr;\n-\n-\tstruct lio_fn_list fn_list;\n-\n-\tuint32_t num_iqs;\n-\n-\t/** Guards each glist */\n-\trte_spinlock_t *glist_lock;\n-\t/** Array of gather component linked lists */\n-\tstruct lio_stailq_head *glist_head;\n-\n-\t/* The pool containing pre allocated buffers used for soft commands */\n-\tstruct rte_mempool *sc_buf_pool;\n-\n-\t/** The input instruction queues */\n-\tstruct lio_instr_queue *instr_queue[LIO_MAX_POSSIBLE_INSTR_QUEUES];\n-\n-\t/** The singly-linked tail queues of instruction response */\n-\tstruct lio_response_list response_list;\n-\n-\tuint32_t num_oqs;\n-\n-\t/** The DROQ output queues  */\n-\tstruct lio_droq *droq[LIO_MAX_POSSIBLE_OUTPUT_QUEUES];\n-\n-\tstruct lio_io_enable io_qmask;\n-\n-\tstruct lio_sriov_info sriov_info;\n-\n-\tstruct lio_pf_vf_hs_word pfvf_hsword;\n-\n-\t/** Mail Box details of each lio queue. */\n-\tstruct lio_mbox **mbox;\n-\n-\tchar dev_string[LIO_DEVICE_NAME_LEN]; /* Device print string */\n-\n-\tconst struct lio_config *default_config;\n-\n-\tstruct rte_eth_dev      *eth_dev;\n-\n-\tuint64_t ifflags;\n-\tuint8_t max_rx_queues;\n-\tuint8_t max_tx_queues;\n-\tuint8_t nb_rx_queues;\n-\tuint8_t nb_tx_queues;\n-\tuint8_t port_configured;\n-\tstruct lio_rss_ctx rss_state;\n-\tuint16_t port_id;\n-\tchar firmware_version[LIO_FW_VERSION_LENGTH];\n-};\n-#endif /* _LIO_STRUCT_H_ */\ndiff --git a/drivers/net/liquidio/meson.build b/drivers/net/liquidio/meson.build\ndeleted file mode 100644\nindex ebadbf3dea..0000000000\n--- a/drivers/net/liquidio/meson.build\n+++ /dev/null\n@@ -1,16 +0,0 @@\n-# SPDX-License-Identifier: BSD-3-Clause\n-# Copyright(c) 2018 Intel Corporation\n-\n-if is_windows\n-    build = false\n-    reason = 'not supported on Windows'\n-    subdir_done()\n-endif\n-\n-sources = files(\n-        'base/lio_23xx_vf.c',\n-        'base/lio_mbox.c',\n-        'lio_ethdev.c',\n-        'lio_rxtx.c',\n-)\n-includes += include_directories('base')\ndiff --git a/drivers/net/meson.build b/drivers/net/meson.build\nindex b1df17ce8c..f68bbc27a7 100644\n--- a/drivers/net/meson.build\n+++ b/drivers/net/meson.build\n@@ -36,7 +36,6 @@ drivers = [\n         'ipn3ke',\n         'ixgbe',\n         'kni',\n-        'liquidio',\n         'mana',\n         'memif',\n         'mlx4',\n",
    "prefixes": []
}