@@ -280,6 +280,14 @@ M: Evgeny Schemeilin <evgenys@amazon.com>
F: drivers/net/ena/
F: doc/guides/nics/ena.rst
+Atomic Rules ark
+M: Shepard Siegel <shepard.siegel@atomicrules.com>
+M: Ed Czeck <ed.czeck@atomicrules.com>
+M: John Miller <john.miller@atomicrules.com>
+F: /drivers/net/ark/
+F: doc/guides/nics/ark.rst
+F: doc/guides/nics/features/ark.ini
+
Broadcom bnxt
M: Stephen Hurd <stephen.hurd@broadcom.com>
M: Ajit Khaparde <ajit.khaparde@broadcom.com>
@@ -348,6 +348,16 @@ CONFIG_RTE_LIBRTE_QEDE_FW=""
CONFIG_RTE_LIBRTE_PMD_AF_PACKET=n
#
+# Compile ARK PMD
+#
+CONFIG_RTE_LIBRTE_ARK_PMD=y
+CONFIG_RTE_LIBRTE_ARK_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_ARK_DEBUG_TX=n
+CONFIG_RTE_LIBRTE_ARK_DEBUG_STATS=n
+CONFIG_RTE_LIBRTE_ARK_DEBUG_TRACE=n
+
+
+#
# Compile the TAP PMD
# It is enabled by default for Linux only.
#
@@ -61,6 +61,7 @@ CONFIG_RTE_SCHED_VECTOR=n
# cannot use those on ARM
CONFIG_RTE_KNI_KMOD=n
+CONFIG_RTE_LIBRTE_ARK_PMD=n
CONFIG_RTE_LIBRTE_EM_PMD=n
CONFIG_RTE_LIBRTE_IGB_PMD=n
CONFIG_RTE_LIBRTE_CXGBE_PMD=n
@@ -48,6 +48,7 @@ CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
# Note: Initially, all of the PMD drivers compilation are turned off on Power
# Will turn on them only after the successful testing on Power
+CONFIG_RTE_LIBRTE_ARK_PMD=n
CONFIG_RTE_LIBRTE_IXGBE_PMD=n
CONFIG_RTE_LIBRTE_I40E_PMD=n
CONFIG_RTE_LIBRTE_VIRTIO_PMD=y
new file mode 100644
@@ -0,0 +1,238 @@
+.. BSD LICENSE
+
+ Copyright (c) 2015-2017 Atomic Rules LLC
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Atomic Rules LLC nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ARK Poll Mode Driver
+====================
+
+The ARK PMD is a DPDK poll-mode driver for the Atomic Rules Arkville
+(ARK) family of devices.
+
+More information can be found at the `Atomic Rules website
+<http://atomicrules.com>`_.
+
+Overview
+--------
+
+The Atomic Rules Arkville product is DPDK and AXI compliant product
+that marshals packets across a PCIe conduit between host DPDK mbufs and
+FPGA AXI streams.
+
+The ARK PMD, and the spirit of the overall Arkville product,
+has been to take the DPDK API/ABI as a fixed specification;
+then implement much of the business logic in FPGA RTL circuits.
+The approach of *working backwards* from the DPDK API/ABI and having
+the GPP host software *dictate*, while the FPGA hardware *copes*,
+results in significant performance gains over a naive implementation.
+
+While this document describes the ARK PMD software, it is helpful to
+understand what the FPGA hardware is and is not. The Arkville RTL
+component provides a single PCIe Physical Function (PF) supporting
+some number of RX/Ingress and TX/Egress Queues. The ARK PMD controls
+the Arkville core through a dedicated opaque Core BAR (CBAR).
+To allow users full freedom for their own FPGA application IP,
+an independent FPGA Application BAR (ABAR) is provided.
+
+One popular way to imagine Arkville's FPGA hardware aspect is as the
+FPGA PCIe-facing side of a so-called Smart NIC. The Arkville core does
+not contain any MACs, and is link-speed independent, as well as
+agnostic to the number of physical ports the application chooses to
+use. The ARK driver exposes the familiar PMD interface to allow packet
+movement to and from mbufs across multiple queues.
+
+However FPGA RTL applications could contain a universe of added
+functionality that an Arkville RTL core does not provide or can
+not anticipate. To allow for this expectation of user-defined
+innovation, the ARK PMD provides a dynamic mechanism of adding
+capabilities without having to modify the ARK PMD.
+
+The ARK PMD is intended to support all instances of the Arkville
+RTL Core, regardless of configuration, FPGA vendor, or target
+board. While specific capabilities such as number of physical
+hardware queue-pairs are negotiated; the driver is designed to
+remain constant over a broad and extendable feature set.
+
+Intentionally, Arkville by itself DOES NOT provide common NIC
+capabilities such as offload or receive-side scaling (RSS).
+These capabilities would be viewed as a gate-level "tax" on
+Green-box FPGA applications that do not require such function.
+Instead, they can be added as needed with essentially no
+overhead to the FPGA Application.
+
+Data Path Interface
+-------------------
+
+Ingress RX and Egress TX operation is by the nominal DPDK API .
+The driver supports single-port, multi-queue for both RX and TX.
+
+Refer to ``ark_ethdev.h`` for the list of supported methods to
+act upon RX and TX Queues.
+
+Configuration Information
+-------------------------
+
+**DPDK Configuration Parameters**
+
+ The following configuration options are available for the ARK PMD:
+
+ * **CONFIG_RTE_LIBRTE_ARK_PMD** (default y): Enables or disables inclusion
+ of the ARK PMD driver in the DPDK compilation.
+
+ * **CONFIG_RTE_LIBRTE_ARK_DEBUG_RX** (default n): Enables or disables debug
+ logging and internal checking of RX ingress logic within the ARK PMD driver.
+
+ * **CONFIG_RTE_LIBRTE_ARK_DEBUG_TX** (default n): Enables or disables debug
+ logging and internal checking of TX egress logic within the ARK PMD driver.
+
+ * **CONFIG_RTE_LIBRTE_ARK_DEBUG_STATS** (default n): Enables or disables debug
+ logging of detailed packet and performance statistics gathered in
+ the PMD and FPGA.
+
+ * **CONFIG_RTE_LIBRTE_ARK_DEBUG_TRACE** (default n): Enables or disables debug
+ logging of detailed PMD events and status.
+
+
+Building DPDK
+-------------
+
+See the :ref:`DPDK Getting Started Guide for Linux <linux_gsg>` for
+instructions on how to build DPDK.
+
+By default the ARK PMD library will be built into the DPDK library.
+
+For configuring and using UIO and VFIO frameworks, please also refer :ref:`the
+documentation that comes with DPDK suite <linux_gsg>`.
+
+Supported ARK RTL PCIe Instances
+--------------------------------
+
+ARK PMD supports the following Arkville RTL PCIe instances including:
+
+* ``1d6c:100d`` - AR-ARKA-FX0 [Arkville 32B DPDK Data Mover]
+* ``1d6c:100e`` - AR-ARKA-FX1 [Arkville 64B DPDK Data Mover]
+
+Supported Operating Systems
+---------------------------
+
+Any Linux distribution fulfilling the conditions described in ``System Requirements``
+section of :ref:`the DPDK documentation <linux_gsg>` or refer to *DPDK Release Notes*.
+
+Supported Features
+------------------
+
+* Dynamic ARK PMD extensions
+* Multiple receive and transmit queues
+* Jumbo frames up to 9K
+* Hardware Statistics
+
+Unsupported Features
+--------------------
+
+Features that may be part of, or become part of, the Arkville RTL IP that are
+not currently supported or exposed by the ARK PMD include:
+
+* PCIe SR-IOV Virtual Functions (VFs)
+* Arkville's Packet Generator Control and Status
+* Arkville's Packet Director Control and Status
+* Arkville's Packet Checker Control and Status
+* Arkville's Timebase Management
+
+Pre-Requisites
+--------------
+
+#. Prepare the system as recommended by DPDK suite. This includes environment
+ variables, hugepages configuration, tool-chains and configuration
+
+#. Insert igb_uio kernel module using the command 'modprobe igb_uio'
+
+#. Bind the intended ARK device to igb_uio module
+
+At this point the system should be ready to run DPDK applications. Once the
+application runs to completion, the ARK PMD can be detached from igb_uio if necessary.
+
+Usage Example
+-------------
+
+This section demonstrates how to launch **testpmd** with Atomic Rules ARK
+devices managed by librte_pmd_ark.
+
+#. Load the kernel modules:
+
+ .. code-block:: console
+
+ modprobe uio
+ insmod ./x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
+
+ .. note::
+
+ The ARK PMD driver depends upon the igb_uio user space I/O kernel module
+
+#. Mount and request huge pages:
+
+ .. code-block:: console
+
+ mount -t hugetlbfs nodev /mnt/huge
+ echo 256 > /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages
+
+#. Bind UIO driver to ARK device at 0000:01:00.0 (using dpdk-devbind.py):
+
+ .. code-block:: console
+
+ ./usertools/dpdk-devbind.py --bind=igb_uio 0000:01:00.0
+
+ .. note::
+
+ The last argument to dpdk-devbind.py is the 4-tuple that indentifies a specific PCIe
+ device. You can use lspci -d 1d6c: to indentify all Atomic Rules devices in the system,
+ and thus determine the correct 4-tuple argument to dpdk-devbind.py
+
+#. Start testpmd with basic parameters:
+
+ .. code-block:: console
+
+ ./x86_64-native-linuxapp-gcc/app/testpmd -l 0-3 -n 4 -- -i
+
+ Example output:
+
+ .. code-block:: console
+
+ [...]
+ EAL: PCI device 0000:01:00.0 on NUMA socket -1
+ EAL: probe driver: 1d6c:100e rte_ark_pmd
+ EAL: PCI memory mapped at 0x7f9b6c400000
+ PMD: eth_ark_dev_init(): Initializing 0:2:0.1
+ ARKP PMD CommitID: 378f3a67
+ Configuring Port 0 (socket 0)
+ Port 0: DC:3C:F6:00:00:01
+ Checking link statuses...
+ Port 0 Link Up - speed 100000 Mbps - full-duplex
+ Done
+ testpmd>
+
new file mode 100644
@@ -0,0 +1,15 @@
+;
+; Supported features of the 'ark' poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Queue start/stop = Y
+Jumbo frame = Y
+Scattered Rx = Y
+Basic stats = Y
+Stats per queue = Y
+FW version = Y
+Linux UIO = Y
+x86-64 = Y
+Usage doc = Y
@@ -36,6 +36,7 @@ Network Interface Controller Drivers
:numbered:
overview
+ ark
bnx2x
bnxt
cxgbe
@@ -32,6 +32,7 @@
include $(RTE_SDK)/mk/rte.vars.mk
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += af_packet
+DIRS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark
DIRS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x
DIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += bonding
DIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe
new file mode 100644
@@ -0,0 +1,73 @@
+# BSD LICENSE
+#
+# Copyright (c) 2015-2017 Atomic Rules LLC
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_ark.a
+
+CFLAGS += -O3 -I./
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_ark_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD)
+#
+
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_ethdev_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_ethdev_tx.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_pktgen.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_pktchkr.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_pktdir.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_mpu.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_ddm.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_udm.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_rqp.c
+
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += lib/librte_kvargs
+DEPDIRS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += lib/librte_mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += lib/libpthread
+DEPDIRS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += lib/libdl
+
+
+include $(RTE_SDK)/mk/rte.lib.mk
+
new file mode 100644
@@ -0,0 +1,150 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+
+#include "ark_debug.h"
+#include "ark_ddm.h"
+
+/* ************************************************************************* */
+int
+ark_ddm_verify(struct ark_ddm_t *ddm)
+{
+ if (sizeof(struct ark_ddm_t) != ARK_DDM_EXPECTED_SIZE) {
+ fprintf(stderr, " DDM structure looks incorrect %#x vs %#lx\n",
+ ARK_DDM_EXPECTED_SIZE, sizeof(struct ark_ddm_t));
+ return -1;
+ }
+
+ if (ddm->cfg.const0 != ARK_DDM_CONST) {
+ fprintf(stderr, " DDM module not found as expected 0x%08x\n",
+ ddm->cfg.const0);
+ return -1;
+ }
+ return 0;
+}
+
+void
+ark_ddm_start(struct ark_ddm_t *ddm)
+{
+ ddm->cfg.command = 1;
+}
+
+int
+ark_ddm_stop(struct ark_ddm_t *ddm, const int wait)
+{
+ int cnt = 0;
+
+ ddm->cfg.command = 2;
+ while (wait && (ddm->cfg.stopFlushed & 0x01) == 0) {
+ if (cnt++ > 1000)
+ return 1;
+
+ usleep(10);
+ }
+ return 0;
+}
+
+void
+ark_ddm_reset(struct ark_ddm_t *ddm)
+{
+ int status;
+
+ /* reset only works if ddm has stopped properly. */
+ status = ark_ddm_stop(ddm, 1);
+
+ if (status != 0) {
+ ARK_DEBUG_TRACE("ARKP: %s stop failed doing forced reset\n",
+ __func__);
+ ddm->cfg.command = 4;
+ usleep(10);
+ }
+ ddm->cfg.command = 3;
+}
+
+void
+ark_ddm_setup(struct ark_ddm_t *ddm, phys_addr_t consAddr, uint32_t interval)
+{
+ ddm->setup.consWriteIndexAddr = consAddr;
+ ddm->setup.writeIndexInterval = interval / 4; /* 4 ns period */
+}
+
+void
+ark_ddm_stats_reset(struct ark_ddm_t *ddm)
+{
+ ddm->cfg.tlpStatsClear = 1;
+}
+
+void
+ark_ddm_dump(struct ark_ddm_t *ddm, const char *msg)
+{
+ ARK_DEBUG_TRACE("ARKP DDM Dump: %s Stopped: %d\n", msg,
+ ark_ddm_is_stopped(ddm)
+ );
+}
+
+void
+ark_ddm_dump_stats(struct ark_ddm_t *ddm, const char *msg)
+{
+ struct ark_ddm_stats_t *stats = &ddm->stats;
+
+ ARK_DEBUG_STATS("ARKP DDM Stats: %s"
+ FMT_SU64 FMT_SU64 FMT_SU64
+ "\n", msg,
+ "Bytes:", stats->txByteCount,
+ "Packets:", stats->txPktCount, "MBufs", stats->txMbufCount);
+}
+
+int
+ark_ddm_is_stopped(struct ark_ddm_t *ddm)
+{
+ return (ddm->cfg.stopFlushed & 0x01) != 0;
+}
+
+uint64_t
+ark_ddm_queue_byte_count(struct ark_ddm_t *ddm)
+{
+ return ddm->queue_stats.byteCount;
+}
+
+uint64_t
+ark_ddm_queue_pkt_count(struct ark_ddm_t *ddm)
+{
+ return ddm->queue_stats.pktCount;
+}
+
+void
+ark_ddm_queue_reset_stats(struct ark_ddm_t *ddm)
+{
+ ddm->queue_stats.byteCount = 1;
+}
new file mode 100644
@@ -0,0 +1,154 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_DDM_H_
+#define _ARK_DDM_H_
+
+#include <stdint.h>
+
+#include <rte_memory.h>
+
+/* DDM core hardware structures */
+#define ARK_DDM_CFG 0x0000
+#define ARK_DDM_CONST 0xfacecafe
+struct ark_ddm_cfg_t {
+ uint32_t r0;
+ volatile uint32_t tlpStatsClear;
+ uint32_t const0;
+ volatile uint32_t tag_max;
+ volatile uint32_t command;
+ volatile uint32_t stopFlushed;
+};
+
+#define ARK_DDM_STATS 0x0020
+struct ark_ddm_stats_t {
+ volatile uint64_t txByteCount;
+ volatile uint64_t txPktCount;
+ volatile uint64_t txMbufCount;
+};
+
+#define ARK_DDM_MRDQ 0x0040
+struct ark_ddm_mrdq_t {
+ volatile uint32_t mrd_q1;
+ volatile uint32_t mrd_q2;
+ volatile uint32_t mrd_q3;
+ volatile uint32_t mrd_q4;
+ volatile uint32_t mrd_full;
+};
+
+#define ARK_DDM_CPLDQ 0x0068
+struct ark_ddm_cpldq_t {
+ volatile uint32_t cpld_q1;
+ volatile uint32_t cpld_q2;
+ volatile uint32_t cpld_q3;
+ volatile uint32_t cpld_q4;
+ volatile uint32_t cpld_full;
+};
+
+#define ARK_DDM_MRD_PS 0x0090
+struct ark_ddm_mrd_ps_t {
+ volatile uint32_t mrd_ps_min;
+ volatile uint32_t mrd_ps_max;
+ volatile uint32_t mrd_full_ps_min;
+ volatile uint32_t mrd_full_ps_max;
+ volatile uint32_t mrd_dw_ps_min;
+ volatile uint32_t mrd_dw_ps_max;
+};
+
+#define ARK_DDM_QUEUE_STATS 0x00a8
+struct ark_ddm_qstats_t {
+ volatile uint64_t byteCount;
+ volatile uint64_t pktCount;
+ volatile uint64_t mbufCount;
+};
+
+#define ARK_DDM_CPLD_PS 0x00c0
+struct ark_ddm_cpld_ps_t {
+ volatile uint32_t cpld_ps_min;
+ volatile uint32_t cpld_ps_max;
+ volatile uint32_t cpld_full_ps_min;
+ volatile uint32_t cpld_full_ps_max;
+ volatile uint32_t cpld_dw_ps_min;
+ volatile uint32_t cpld_dw_ps_max;
+};
+
+#define ARK_DDM_SETUP 0x00e0
+struct ark_ddm_setup_t {
+ phys_addr_t consWriteIndexAddr;
+ uint32_t writeIndexInterval; /* 4ns each */
+ volatile uint32_t consIndex;
+};
+
+/* Consolidated structure */
+struct ark_ddm_t {
+ struct ark_ddm_cfg_t cfg;
+ uint8_t reserved0[(ARK_DDM_STATS - ARK_DDM_CFG) -
+ sizeof(struct ark_ddm_cfg_t)];
+ struct ark_ddm_stats_t stats;
+ uint8_t reserved1[(ARK_DDM_MRDQ - ARK_DDM_STATS) -
+ sizeof(struct ark_ddm_stats_t)];
+ struct ark_ddm_mrdq_t mrdq;
+ uint8_t reserved2[(ARK_DDM_CPLDQ - ARK_DDM_MRDQ) -
+ sizeof(struct ark_ddm_mrdq_t)];
+ struct ark_ddm_cpldq_t cpldq;
+ uint8_t reserved3[(ARK_DDM_MRD_PS - ARK_DDM_CPLDQ) -
+ sizeof(struct ark_ddm_cpldq_t)];
+ struct ark_ddm_mrd_ps_t mrd_ps;
+ struct ark_ddm_qstats_t queue_stats;
+ struct ark_ddm_cpld_ps_t cpld_ps;
+ uint8_t reserved5[(ARK_DDM_SETUP - ARK_DDM_CPLD_PS) -
+ sizeof(struct ark_ddm_cpld_ps_t)];
+ struct ark_ddm_setup_t setup;
+ uint8_t reservedP[(256 - ARK_DDM_SETUP)
+ - sizeof(struct ark_ddm_setup_t)];
+};
+
+#define ARK_DDM_EXPECTED_SIZE 256
+#define ARK_DDM_QOFFSET ARK_DDM_EXPECTED_SIZE
+
+/* DDM function prototype */
+int ark_ddm_verify(struct ark_ddm_t *ddm);
+void ark_ddm_start(struct ark_ddm_t *ddm);
+int ark_ddm_stop(struct ark_ddm_t *ddm, const int wait);
+void ark_ddm_reset(struct ark_ddm_t *ddm);
+void ark_ddm_stats_reset(struct ark_ddm_t *ddm);
+void ark_ddm_setup(struct ark_ddm_t *ddm, phys_addr_t consAddr,
+ uint32_t interval);
+void ark_ddm_dump_stats(struct ark_ddm_t *ddm, const char *msg);
+void ark_ddm_dump(struct ark_ddm_t *ddm, const char *msg);
+int ark_ddm_is_stopped(struct ark_ddm_t *ddm);
+uint64_t ark_ddm_queue_byte_count(struct ark_ddm_t *ddm);
+uint64_t ark_ddm_queue_pkt_count(struct ark_ddm_t *ddm);
+void ark_ddm_queue_reset_stats(struct ark_ddm_t *ddm);
+
+#endif
new file mode 100644
@@ -0,0 +1,72 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_DEBUG_H_
+#define _ARK_DEBUG_H_
+
+#include <rte_log.h>
+
+/* Format specifiers for string data pairs */
+#define FMT_SU32 "\n\t%-20s %'20u"
+#define FMT_SU64 "\n\t%-20s %'20lu"
+#define FMT_SPTR "\n\t%-20s %20p"
+
+#define ARK_TRACE_ON(fmt, ...) \
+ fprintf(stderr, fmt, ##__VA_ARGS__)
+
+#define ARK_TRACE_OFF(fmt, ...) \
+ do {if (0) fprintf(stderr, fmt, ##__VA_ARGS__); } while (0)
+
+/* Debug macro for reporting Packet stats */
+#ifdef RTE_LIBRTE_ARK_DEBUG_STATS
+#define ARK_DEBUG_STATS(fmt, ...) ARK_TRACE_ON(fmt, ##__VA_ARGS__)
+#else
+#define ARK_DEBUG_STATS(fmt, ...) ARK_TRACE_OFF(fmt, ##__VA_ARGS__)
+#endif
+
+/* Debug macro for tracing full behavior*/
+#ifdef RTE_LIBRTE_ARK_DEBUG_TRACE
+#define ARK_DEBUG_TRACE(fmt, ...) ARK_TRACE_ON(fmt, ##__VA_ARGS__)
+#else
+#define ARK_DEBUG_TRACE(fmt, ...) ARK_TRACE_OFF(fmt, ##__VA_ARGS__)
+#endif
+
+#ifdef ARK_STD_LOG
+#define PMD_DRV_LOG(level, fmt, args...) \
+ fprintf(stderr, fmt, args)
+#else
+#define PMD_DRV_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
+#endif
+
+#endif
new file mode 100644
@@ -0,0 +1,982 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+#include <sys/stat.h>
+#include <dlfcn.h>
+
+#include <rte_kvargs.h>
+#include <rte_vdev.h>
+
+#include "ark_global.h"
+#include "ark_debug.h"
+#include "ark_ethdev.h"
+#include "ark_mpu.h"
+#include "ark_ddm.h"
+#include "ark_udm.h"
+#include "ark_rqp.h"
+#include "ark_pktdir.h"
+#include "ark_pktgen.h"
+#include "ark_pktchkr.h"
+
+/* Internal prototypes */
+static int eth_ark_check_args(const char *params);
+static int eth_ark_dev_init(struct rte_eth_dev *dev);
+static int ark_config_device(struct rte_eth_dev *dev);
+static int eth_ark_dev_uninit(struct rte_eth_dev *eth_dev);
+static int eth_ark_dev_configure(struct rte_eth_dev *dev);
+static int eth_ark_dev_start(struct rte_eth_dev *dev);
+static void eth_ark_dev_stop(struct rte_eth_dev *dev);
+static void eth_ark_dev_close(struct rte_eth_dev *dev);
+static void eth_ark_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static int eth_ark_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static int eth_ark_dev_set_link_up(struct rte_eth_dev *dev);
+static int eth_ark_dev_set_link_down(struct rte_eth_dev *dev);
+static void eth_ark_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static void eth_ark_dev_stats_reset(struct rte_eth_dev *dev);
+static void eth_ark_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
+static void eth_ark_macaddr_add(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr, uint32_t index, uint32_t pool);
+static void eth_ark_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
+
+#define ARK_DEV_TO_PCI(eth_dev) \
+ RTE_DEV_TO_PCI((eth_dev)->device)
+
+#define ARK_MAX_ARG_LEN 256
+static uint32_t pktDirV;
+static char pktGenArgs[ARK_MAX_ARG_LEN];
+static char pktChkrArgs[ARK_MAX_ARG_LEN];
+
+#define ARK_PKTGEN_ARG "PktGen"
+#define ARK_PKTCHKR_ARG "PktChkr"
+#define ARK_PKTDIR_ARG "PktDir"
+
+static const char *valid_arguments[] = {
+ ARK_PKTGEN_ARG,
+ ARK_PKTCHKR_ARG,
+ ARK_PKTDIR_ARG,
+ "iface",
+ NULL
+};
+
+#define MAX_ARK_PHYS 16
+struct ark_adapter *gark[MAX_ARK_PHYS];
+
+static const struct rte_pci_id pci_id_ark_map[] = {
+ {RTE_PCI_DEVICE(0x1d6c, 0x100d)},
+ {RTE_PCI_DEVICE(0x1d6c, 0x100e)},
+ {.vendor_id = 0, /* sentinel */ },
+};
+
+static struct eth_driver rte_ark_pmd = {
+ .pci_drv = {
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
+ .id_table = pci_id_ark_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC},
+ .eth_dev_init = eth_ark_dev_init,
+ .eth_dev_uninit = eth_ark_dev_uninit,
+ .dev_private_size = sizeof(struct ark_adapter),
+};
+
+static const struct eth_dev_ops ark_eth_dev_ops = {
+ .dev_configure = eth_ark_dev_configure,
+ .dev_start = eth_ark_dev_start,
+ .dev_stop = eth_ark_dev_stop,
+ .dev_close = eth_ark_dev_close,
+
+ .dev_infos_get = eth_ark_dev_info_get,
+
+ .rx_queue_setup = eth_ark_dev_rx_queue_setup,
+ .rx_queue_count = eth_ark_dev_rx_queue_count,
+ .tx_queue_setup = eth_ark_tx_queue_setup,
+
+ .link_update = eth_ark_dev_link_update,
+ .dev_set_link_up = eth_ark_dev_set_link_up,
+ .dev_set_link_down = eth_ark_dev_set_link_down,
+
+ .rx_queue_start = eth_ark_rx_start_queue,
+ .rx_queue_stop = eth_ark_rx_stop_queue,
+
+ .tx_queue_start = eth_ark_tx_queue_start,
+ .tx_queue_stop = eth_ark_tx_queue_stop,
+
+ .stats_get = eth_ark_dev_stats_get,
+ .stats_reset = eth_ark_dev_stats_reset,
+
+ .mac_addr_add = eth_ark_macaddr_add,
+ .mac_addr_remove = eth_ark_macaddr_remove,
+ .mac_addr_set = eth_ark_set_default_mac_addr,
+
+};
+
+int
+ark_get_port_id(struct rte_eth_dev *dev, struct ark_adapter *ark)
+{
+ int n = ark->num_ports;
+ int i;
+
+ /* There has to be a smarter way to do this ... */
+ for (i = 0; i < n; i++) {
+ if (ark->port[i].eth_dev == dev)
+ return i;
+ }
+ ARK_DEBUG_TRACE("ARK: Device is NOT associated with a port !!");
+ return -1;
+}
+
+static
+ int
+check_for_ext(struct rte_eth_dev *dev __rte_unused,
+ struct ark_adapter *ark __rte_unused)
+{
+ int found = 0;
+
+ /* Get the env */
+ const char *dllpath = getenv("ARK_EXT_PATH");
+
+ if (dllpath == NULL) {
+ ARK_DEBUG_TRACE("ARK EXT NO dll path specified \n");
+ return 0;
+ }
+ ARK_DEBUG_TRACE("ARK EXT found dll path at %s\n", dllpath);
+
+ /* Open and load the .so */
+ ark->dHandle = dlopen(dllpath, RTLD_LOCAL | RTLD_LAZY);
+ if (ark->dHandle == NULL) {
+ PMD_DRV_LOG(ERR, "Could not load user extension %s \n", dllpath);
+ } else {
+ ARK_DEBUG_TRACE("SUCCESS: loaded user extension %s\n", dllpath);
+ }
+
+ /* Get the entry points */
+ ark->user_ext.dev_init =
+ (void *(*)(struct rte_eth_dev *, void *, int)) dlsym(ark->dHandle,
+ "dev_init");
+ ARK_DEBUG_TRACE("device ext init pointer = %p\n", ark->user_ext.dev_init);
+ ark->user_ext.dev_get_port_count =
+ (int (*)(struct rte_eth_dev *, void *)) dlsym(ark->dHandle,
+ "dev_get_port_count");
+ ark->user_ext.dev_uninit =
+ (void (*)(struct rte_eth_dev *, void *)) dlsym(ark->dHandle,
+ "dev_uninit");
+ ark->user_ext.dev_configure =
+ (int (*)(struct rte_eth_dev *, void *)) dlsym(ark->dHandle,
+ "dev_configure");
+ ark->user_ext.dev_start =
+ (int (*)(struct rte_eth_dev *, void *)) dlsym(ark->dHandle,
+ "dev_start");
+ ark->user_ext.dev_stop =
+ (void (*)(struct rte_eth_dev *, void *)) dlsym(ark->dHandle,
+ "dev_stop");
+ ark->user_ext.dev_close =
+ (void (*)(struct rte_eth_dev *, void *)) dlsym(ark->dHandle,
+ "dev_close");
+ ark->user_ext.link_update =
+ (int (*)(struct rte_eth_dev *, int, void *)) dlsym(ark->dHandle,
+ "link_update");
+ ark->user_ext.dev_set_link_up =
+ (int (*)(struct rte_eth_dev *, void *)) dlsym(ark->dHandle,
+ "dev_set_link_up");
+ ark->user_ext.dev_set_link_down =
+ (int (*)(struct rte_eth_dev *, void *)) dlsym(ark->dHandle,
+ "dev_set_link_down");
+ ark->user_ext.stats_get =
+ (void (*)(struct rte_eth_dev *, struct rte_eth_stats *,
+ void *)) dlsym(ark->dHandle, "stats_get");
+ ark->user_ext.stats_reset =
+ (void (*)(struct rte_eth_dev *, void *)) dlsym(ark->dHandle,
+ "stats_reset");
+ ark->user_ext.mac_addr_add =
+ (void (*)(struct rte_eth_dev *, struct ether_addr *, uint32_t,
+ uint32_t, void *)) dlsym(ark->dHandle, "mac_addr_add");
+ ark->user_ext.mac_addr_remove =
+ (void (*)(struct rte_eth_dev *, uint32_t, void *)) dlsym(ark->dHandle,
+ "mac_addr_remove");
+ ark->user_ext.mac_addr_set =
+ (void (*)(struct rte_eth_dev *, struct ether_addr *,
+ void *)) dlsym(ark->dHandle, "mac_addr_set");
+
+ return found;
+}
+
+static int
+eth_ark_dev_init(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark = (struct ark_adapter *) dev->data->dev_private;
+ struct rte_pci_device *pci_dev;
+ int ret;
+
+ ark->eth_dev = dev;
+
+ ARK_DEBUG_TRACE("eth_ark_dev_init(struct rte_eth_dev *dev)");
+ gark[0] = ark;
+
+ /* Check to see if there is an extension that we need to load */
+ check_for_ext(dev, ark);
+ pci_dev = ARK_DEV_TO_PCI(dev);
+ rte_eth_copy_pci_info(dev, pci_dev);
+
+ if (pci_dev->device.devargs)
+ eth_ark_check_args(pci_dev->device.devargs->args);
+ else
+ PMD_DRV_LOG(INFO, "No Device args found\n");
+
+ /* Use dummy function until setup */
+ dev->rx_pkt_burst = ð_ark_recv_pkts_noop;
+ dev->tx_pkt_burst = ð_ark_xmit_pkts_noop;
+
+ ark->bar0 = (uint8_t *) pci_dev->mem_resource[0].addr;
+ ark->Abar = (uint8_t *) pci_dev->mem_resource[2].addr;
+
+ SetPtr(bar0, ark, sysctrl, ARK_SYSCTRL_BASE);
+ SetPtr(bar0, ark, mpurx, ARK_MPURx_BASE);
+ SetPtr(bar0, ark, udm, ARK_UDM_BASE);
+ SetPtr(bar0, ark, mputx, ARK_MPUTx_BASE);
+ SetPtr(bar0, ark, ddm, ARK_DDM_BASE);
+ SetPtr(bar0, ark, cmac, ARK_CMAC_BASE);
+ SetPtr(bar0, ark, external, ARK_EXTERNAL_BASE);
+ SetPtr(bar0, ark, pktdir, ARK_PKTDIR_BASE);
+ SetPtr(bar0, ark, pktgen, ARK_PKTGEN_BASE);
+ SetPtr(bar0, ark, pktchkr, ARK_PKTCHKR_BASE);
+
+ ark->rqpacing = (struct ark_rqpace_t *) (ark->bar0 + ARK_RCPACING_BASE);
+ ark->started = 0;
+
+ ARK_DEBUG_TRACE("Sys Ctrl Const = 0x%x DEV CommitID: %08x\n",
+ ark->sysctrl.t32[4], rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4]));
+ PMD_DRV_LOG(INFO, "ARKP PMD CommitID: %08x\n",
+ rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4]));
+
+ /* If HW sanity test fails, return an error */
+ if (ark->sysctrl.t32[4] != 0xcafef00d) {
+ PMD_DRV_LOG(ERR,
+ "HW Sanity test has failed, expected constant 0x%x, read 0x%x (%s)\n",
+ 0xcafef00d, ark->sysctrl.t32[4], __func__);
+ return -1;
+ } else {
+ PMD_DRV_LOG(INFO,
+ "HW Sanity test has PASSED, expected constant 0x%x, read 0x%x (%s)\n",
+ 0xcafef00d, ark->sysctrl.t32[4], __func__);
+ }
+
+ /* We are a single function multi-port device. */
+ const unsigned int numa_node = rte_socket_id();
+ struct ether_addr adr;
+
+ ret = ark_config_device(dev);
+ dev->dev_ops = &ark_eth_dev_ops;
+
+ dev->data->mac_addrs = rte_zmalloc("ark", ETHER_ADDR_LEN, 0);
+ if (!dev->data->mac_addrs) {
+ PMD_DRV_LOG(ERR, "Failed to allocated memory for storing mac address");
+ }
+ ether_addr_copy((struct ether_addr *) &adr, &dev->data->mac_addrs[0]);
+
+ if (ark->user_ext.dev_init) {
+ ark->user_data = ark->user_ext.dev_init(dev, ark->Abar, 0);
+ if (!ark->user_data) {
+ PMD_DRV_LOG(INFO,
+ "Failed to initialize PMD extension !!, continuing without it\n");
+ memset(&ark->user_ext, 0, sizeof(struct ark_user_ext));
+ dlclose(ark->dHandle);
+ }
+ }
+
+ /* We will create additional devices based on the number of requested
+ * ports */
+ int pc = 1;
+ int p;
+
+ if (ark->user_ext.dev_get_port_count) {
+ pc = ark->user_ext.dev_get_port_count(dev, ark->user_data);
+ ark->num_ports = pc;
+ } else {
+ ark->num_ports = 1;
+ }
+ for (p = 0; p < pc; p++) {
+ struct ark_port *port;
+
+ port = &ark->port[p];
+ struct rte_eth_dev_data *data = NULL;
+
+ port->id = p;
+
+ char name[RTE_ETH_NAME_MAX_LEN];
+
+ snprintf(name, sizeof(name), "arketh%d", dev->data->port_id + p);
+
+ if (p == 0) {
+ /* First port is already allocated by DPDK */
+ port->eth_dev = ark->eth_dev;
+ continue;
+ }
+
+ /* reserve an ethdev entry */
+ port->eth_dev = rte_eth_dev_allocate(name);
+ if (!port->eth_dev) {
+ PMD_DRV_LOG(ERR, "Could not allocate eth_dev for port %d\n", p);
+ goto error;
+ }
+
+ data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
+ if (!data) {
+ PMD_DRV_LOG(ERR, "Could not allocate eth_dev for port %d\n", p);
+ goto error;
+ }
+ data->port_id = ark->eth_dev->data->port_id + p;
+ port->eth_dev->data = data;
+ port->eth_dev->device = &pci_dev->device;
+ port->eth_dev->data->dev_private = ark;
+ port->eth_dev->driver = ark->eth_dev->driver;
+ port->eth_dev->dev_ops = ark->eth_dev->dev_ops;
+ port->eth_dev->tx_pkt_burst = ark->eth_dev->tx_pkt_burst;
+ port->eth_dev->rx_pkt_burst = ark->eth_dev->rx_pkt_burst;
+
+ rte_eth_copy_pci_info(port->eth_dev, pci_dev);
+
+ port->eth_dev->data->mac_addrs = rte_zmalloc(name, ETHER_ADDR_LEN, 0);
+ if (!port->eth_dev->data->mac_addrs) {
+ PMD_DRV_LOG(ERR, "Memory allocation for MAC failed !, exiting\n");
+ goto error;
+ }
+ ether_addr_copy((struct ether_addr *) &adr,
+ &port->eth_dev->data->mac_addrs[0]);
+
+ if (ark->user_ext.dev_init) {
+ ark->user_data = ark->user_ext.dev_init(dev, ark->Abar, p);
+ }
+ }
+
+ return ret;
+
+error:
+ if (dev->data->mac_addrs)
+ rte_free(dev->data->mac_addrs);
+
+ for (p = 0; p < pc; p++) {
+ if (ark->port[p].eth_dev->data)
+ rte_free(ark->port[p].eth_dev->data);
+ if (ark->port[p].eth_dev->data->mac_addrs)
+ rte_free(ark->port[p].eth_dev->data->mac_addrs);
+ }
+
+ return -1;
+
+}
+
+/* Initial device configuration when device is opened
+ setup the DDM, and UDM
+ Called once per PCIE device
+*/
+static int
+ark_config_device(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark = (struct ark_adapter *) dev->data->dev_private;
+ uint16_t numQ, i;
+ struct ark_mpu_t *mpu;
+
+ /* Make sure that the packet director, generator and checker are in a
+ * known state */
+ ark->start_pg = 0;
+ ark->pg = ark_pmd_pktgen_init(ark->pktgen.v, 0, 1);
+ ark_pmd_pktgen_reset(ark->pg);
+ ark->pc = ark_pmd_pktchkr_init(ark->pktchkr.v, 0, 1);
+ ark_pmd_pktchkr_stop(ark->pc);
+ ark->pd = ark_pmd_pktdir_init(ark->pktdir.v);
+
+ /* Verify HW */
+ if (ark_udm_verify(ark->udm.v)) {
+ return -1;
+ }
+ if (ark_ddm_verify(ark->ddm.v)) {
+ return -1;
+ }
+
+ /* UDM */
+ if (ark_udm_reset(ark->udm.v)) {
+ PMD_DRV_LOG(ERR, "Unable to stop and reset UDM \n");
+ return -1;
+ }
+ /* Keep in reset until the MPU are cleared */
+
+ /* MPU reset */
+ mpu = ark->mpurx.v;
+ numQ = ark_api_num_queues(mpu);
+ ark->rxQueues = numQ;
+ for (i = 0; i < numQ; i++) {
+ ark_mpu_reset(mpu);
+ mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+ }
+
+ ark_udm_stop(ark->udm.v, 0);
+ ark_udm_configure(ark->udm.v, RTE_PKTMBUF_HEADROOM,
+ RTE_MBUF_DEFAULT_DATAROOM, ARK_RX_WRITE_TIME_NS);
+ ark_udm_stats_reset(ark->udm.v);
+ ark_udm_stop(ark->udm.v, 0);
+
+ /* TX -- DDM */
+ if (ark_ddm_stop(ark->ddm.v, 1)) {
+ PMD_DRV_LOG(ERR, "Unable to stop DDM \n");
+ };
+
+ mpu = ark->mputx.v;
+ numQ = ark_api_num_queues(mpu);
+ ark->txQueues = numQ;
+ for (i = 0; i < numQ; i++) {
+ ark_mpu_reset(mpu);
+ mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+ }
+
+ ark_ddm_reset(ark->ddm.v);
+ ark_ddm_stats_reset(ark->ddm.v);
+ /* ark_ddm_dump(ark->ddm.v, "Config"); */
+ /* ark_ddm_dump_stats(ark->ddm.v, "Config"); */
+
+ /* MPU reset */
+ ark_ddm_stop(ark->ddm.v, 0);
+ ark_rqp_stats_reset(ark->rqpacing);
+
+ return 0;
+}
+
+static int
+eth_ark_dev_uninit(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark = (struct ark_adapter *) dev->data->dev_private;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (ark->user_ext.dev_uninit) {
+ ark->user_ext.dev_uninit(dev, ark->user_data);
+ }
+
+ ark_pmd_pktgen_uninit(ark->pg);
+ ark_pmd_pktchkr_uninit(ark->pc);
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+ if (dev->data->mac_addrs)
+ rte_free(dev->data->mac_addrs);
+ if (dev->data)
+ rte_free(dev->data);
+
+ return 0;
+}
+
+static int
+eth_ark_dev_configure(struct rte_eth_dev *dev __rte_unused)
+{
+ ARK_DEBUG_TRACE
+ ("ARKP: In eth_ark_dev_configure(struct rte_eth_dev *dev)\n");
+ struct ark_adapter *ark = (struct ark_adapter *) dev->data->dev_private;
+
+ eth_ark_dev_set_link_up(dev);
+ if (ark->user_ext.dev_configure) {
+ return ark->user_ext.dev_configure(dev, ark->user_data);
+ }
+ return 0;
+}
+
+static void *
+delay_pg_start(void *arg)
+{
+ struct ark_adapter *ark = (struct ark_adapter *) arg;
+
+ /* This function is used exclusively for regression testing, We perform a
+ * blind sleep here to ensure that the external test application has time
+ * to setup the test before we generate packets */
+ usleep(100000);
+ ark_pmd_pktgen_run(ark->pg);
+ return NULL;
+}
+
+static int
+eth_ark_dev_start(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark = (struct ark_adapter *) dev->data->dev_private;
+ int i;
+
+ ARK_DEBUG_TRACE("ARKP: In eth_ark_dev_start\n");
+
+ /* RX Side */
+ /* start UDM */
+ ark_udm_start(ark->udm.v);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ eth_ark_rx_start_queue(dev, i);
+ }
+
+ /* TX Side */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ eth_ark_tx_queue_start(dev, i);
+ }
+
+ /* start DDM */
+ ark_ddm_start(ark->ddm.v);
+
+ ark->started = 1;
+ /* set xmit and receive function */
+ dev->rx_pkt_burst = ð_ark_recv_pkts;
+ dev->tx_pkt_burst = ð_ark_xmit_pkts;
+
+ if (ark->start_pg) {
+ ark_pmd_pktchkr_run(ark->pc);
+ }
+
+ if (ark->start_pg && (ark_get_port_id(dev, ark) == 0)) {
+ pthread_t thread;
+
+ /* TODO: add comment here */
+ pthread_create(&thread, NULL, delay_pg_start, ark);
+ }
+
+ if (ark->user_ext.dev_start) {
+ ark->user_ext.dev_start(dev, ark->user_data);
+ }
+
+ return 0;
+}
+
+static void
+eth_ark_dev_stop(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+ int status;
+ struct ark_adapter *ark = (struct ark_adapter *) dev->data->dev_private;
+ struct ark_mpu_t *mpu;
+
+ ARK_DEBUG_TRACE("ARKP: In eth_ark_dev_stop\n");
+
+ if (ark->started == 0)
+ return;
+ ark->started = 0;
+
+ /* Stop the extension first */
+ if (ark->user_ext.dev_stop) {
+ ark->user_ext.dev_stop(dev, ark->user_data);
+ }
+
+ /* Stop the packet generator */
+ if (ark->start_pg) {
+ ark_pmd_pktgen_pause(ark->pg);
+ }
+
+ dev->rx_pkt_burst = ð_ark_recv_pkts_noop;
+ dev->tx_pkt_burst = ð_ark_xmit_pkts_noop;
+
+ /* STOP TX Side */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ status = eth_ark_tx_queue_stop(dev, i);
+ if (status != 0) {
+ uint8_t port = dev->data->port_id;
+
+ fprintf(stderr, "ARKP tx_queue stop anomaly port %u, queue %u\n",
+ port, i);
+ }
+ }
+
+ /* Stop DDM */
+ /* Wait up to 0.1 second. each stop is upto 1000 * 10 useconds */
+ for (i = 0; i < 10; i++) {
+ status = ark_ddm_stop(ark->ddm.v, 1);
+ if (status == 0)
+ break;
+ }
+ if (status || i != 0) {
+ PMD_DRV_LOG(ERR, "DDM stop anomaly. status: %d iter: %u. (%s)\n",
+ status, i, __func__);
+ ark_ddm_dump(ark->ddm.v, "Stop anomaly");
+
+ mpu = ark->mputx.v;
+ for (i = 0; i < ark->txQueues; i++) {
+ ark_mpu_dump(mpu, "DDM failure dump", i);
+ mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+ }
+ }
+
+ /* STOP RX Side */
+ /* Stop UDM */
+ for (i = 0; i < 10; i++) {
+ status = ark_udm_stop(ark->udm.v, 1);
+ if (status == 0)
+ break;
+ }
+ if (status || i != 0) {
+ PMD_DRV_LOG(ERR, "UDM stop anomaly. status %d iter: %u. (%s)\n",
+ status, i, __func__);
+ ark_udm_dump(ark->udm.v, "Stop anomaly");
+
+ mpu = ark->mpurx.v;
+ for (i = 0; i < ark->rxQueues; i++) {
+ ark_mpu_dump(mpu, "UDM Stop anomaly", i);
+ mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+ }
+ }
+
+ ark_udm_dump_stats(ark->udm.v, "Post stop");
+ ark_udm_dump_perf(ark->udm.v, "Post stop");
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ eth_ark_rx_dump_queue(dev, i, __func__);
+ }
+
+ /* Stop the packet checker if it is running */
+ if (ark->start_pg) {
+ ark_pmd_pktchkr_dump_stats(ark->pc);
+ ark_pmd_pktchkr_stop(ark->pc);
+ }
+}
+
+static void
+eth_ark_dev_close(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark = (struct ark_adapter *) dev->data->dev_private;
+ uint16_t i;
+
+ if (ark->user_ext.dev_close) {
+ ark->user_ext.dev_close(dev, ark->user_data);
+ }
+
+ eth_ark_dev_stop(dev);
+ eth_ark_udm_force_close(dev);
+
+ /* TODO This should only be called once for the device during shutdown */
+ ark_rqp_dump(ark->rqpacing);
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ eth_ark_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = 0;
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ eth_ark_dev_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = 0;
+ }
+
+}
+
+static void
+eth_ark_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct ark_adapter *ark = (struct ark_adapter *) dev->data->dev_private;
+ struct ark_mpu_t *tx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPUTx_BASE);
+ struct ark_mpu_t *rx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPURx_BASE);
+
+ uint16_t ports = ark->num_ports;
+
+ /* device specific configuration */
+ memset(dev_info, 0, sizeof(*dev_info));
+
+ dev_info->max_rx_queues = ark_api_num_queues_per_port(rx_mpu, ports);
+ dev_info->max_tx_queues = ark_api_num_queues_per_port(tx_mpu, ports);
+ dev_info->max_mac_addrs = 0;
+ dev_info->if_index = 0;
+ dev_info->max_rx_pktlen = (16 * 1024) - 128;
+ dev_info->min_rx_bufsize = 1024;
+ dev_info->rx_offload_capa = 0;
+ dev_info->tx_offload_capa = 0;
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = 4096 * 4,
+ .nb_min = 512, /* HW Q size for RX */
+ .nb_align = 2,};
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = 4096 * 4,
+ .nb_min = 256, /* HW Q size for TX */
+ .nb_align = 2,};
+
+ dev_info->rx_offload_capa = 0;
+ dev_info->tx_offload_capa = 0;
+
+ /* ARK PMD supports all line rates, how do we indicate that here ?? */
+ dev_info->speed_capa =
+ ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
+ ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
+ dev_info->pci_dev = ARK_DEV_TO_PCI(dev);
+ dev_info->driver_name = dev->data->drv_name;
+
+}
+
+static int
+eth_ark_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ ARK_DEBUG_TRACE("ARKP: link status = %d\n",
+ dev->data->dev_link.link_status);
+ struct ark_adapter *ark = (struct ark_adapter *) dev->data->dev_private;
+
+ if (ark->user_ext.link_update) {
+ return ark->user_ext.link_update(dev, wait_to_complete,
+ ark->user_data);
+ }
+ return 0;
+}
+
+static int
+eth_ark_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ dev->data->dev_link.link_status = 1;
+ struct ark_adapter *ark = (struct ark_adapter *) dev->data->dev_private;
+
+ if (ark->user_ext.dev_set_link_up) {
+ return ark->user_ext.dev_set_link_up(dev, ark->user_data);
+ }
+ return 0;
+}
+
+static int
+eth_ark_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ dev->data->dev_link.link_status = 0;
+ struct ark_adapter *ark = (struct ark_adapter *) dev->data->dev_private;
+
+ if (ark->user_ext.dev_set_link_down) {
+ return ark->user_ext.dev_set_link_down(dev, ark->user_data);
+ }
+ return 0;
+}
+
+static void
+eth_ark_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ uint16_t i;
+ struct ark_adapter *ark = (struct ark_adapter *) dev->data->dev_private;
+
+ stats->ipackets = 0;
+ stats->ibytes = 0;
+ stats->opackets = 0;
+ stats->obytes = 0;
+ stats->imissed = 0;
+ stats->oerrors = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ eth_tx_queue_stats_get(dev->data->tx_queues[i], stats);
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ eth_rx_queue_stats_get(dev->data->rx_queues[i], stats);
+ }
+
+ if (ark->user_ext.stats_get) {
+ ark->user_ext.stats_get(dev, stats, ark->user_data);
+ }
+
+}
+
+static void
+eth_ark_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+ struct ark_adapter *ark = (struct ark_adapter *) dev->data->dev_private;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ eth_tx_queue_stats_reset(dev->data->rx_queues[i]);
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ eth_rx_queue_stats_reset(dev->data->rx_queues[i]);
+ }
+
+ if (ark->user_ext.stats_reset) {
+ ark->user_ext.stats_reset(dev, ark->user_data);
+ }
+
+}
+
+static void
+eth_ark_macaddr_add(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr, uint32_t index, uint32_t pool)
+{
+ struct ark_adapter *ark = (struct ark_adapter *) dev->data->dev_private;
+
+ if (ark->user_ext.mac_addr_add) {
+ ark->user_ext.mac_addr_add(dev, mac_addr, index, pool, ark->user_data);
+ }
+}
+
+static void
+eth_ark_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct ark_adapter *ark = (struct ark_adapter *) dev->data->dev_private;
+
+ if (ark->user_ext.mac_addr_remove) {
+ ark->user_ext.mac_addr_remove(dev, index, ark->user_data);
+ }
+}
+
+static void
+eth_ark_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr)
+{
+ struct ark_adapter *ark = (struct ark_adapter *) dev->data->dev_private;
+
+ if (ark->user_ext.mac_addr_set) {
+ ark->user_ext.mac_addr_set(dev, mac_addr, ark->user_data);
+ }
+}
+
+static inline int
+process_pktdir_arg(const char *key, const char *value,
+ void *extra_args __rte_unused)
+{
+ ARK_DEBUG_TRACE("**** IN process_pktdir_arg, key = %s, value = %s\n", key,
+ value);
+ pktDirV = strtol(value, NULL, 16);
+ ARK_DEBUG_TRACE("pktDirV = 0x%x\n", pktDirV);
+ return 0;
+}
+
+static inline int
+process_file_args(const char *key, const char *value, void *extra_args)
+{
+ ARK_DEBUG_TRACE("**** IN process_pktgen_arg, key = %s, value = %s\n", key,
+ value);
+ char *args = (char *) extra_args;
+
+ /* Open the configuration file */
+ FILE *file = fopen(value, "r");
+ char line[256];
+ int first = 1;
+
+ while (fgets(line, sizeof(line), file)) {
+ /* ARK_DEBUG_TRACE("%s\n", line); */
+ if (first) {
+ strncpy(args, line, ARK_MAX_ARG_LEN);
+ first = 0;
+ } else {
+ strncat(args, line, ARK_MAX_ARG_LEN);
+ }
+ }
+ ARK_DEBUG_TRACE("file = %s\n", args);
+ fclose(file);
+ return 0;
+}
+
+static int
+eth_ark_check_args(const char *params)
+{
+ struct rte_kvargs *kvlist;
+ unsigned k_idx;
+ struct rte_kvargs_pair *pair = NULL;
+
+ /* TODO: the index of gark[index] should be associated with phy dev map */
+ struct ark_adapter *ark = gark[0];
+
+ kvlist = rte_kvargs_parse(params, valid_arguments);
+ if (kvlist == NULL)
+ return 0;
+
+ pktGenArgs[0] = 0;
+ pktChkrArgs[0] = 0;
+
+ for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
+ pair = &kvlist->pairs[k_idx];
+ ARK_DEBUG_TRACE("**** Arg passed to PMD = %s:%s\n", pair->key,
+ pair->value);
+ }
+
+ if (rte_kvargs_process(kvlist, ARK_PKTDIR_ARG,
+ &process_pktdir_arg, NULL) != 0) {
+ PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTDIR_ARG);
+ }
+
+ if (rte_kvargs_process(kvlist, ARK_PKTGEN_ARG,
+ &process_file_args, pktGenArgs) != 0) {
+ PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTGEN_ARG);
+ }
+
+ if (rte_kvargs_process(kvlist, ARK_PKTCHKR_ARG,
+ &process_file_args, pktChkrArgs) != 0) {
+ PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTCHKR_ARG);
+ }
+
+ /* Setup the packet director */
+ ark_pmd_pktdir_setup(ark->pd, pktDirV);
+ ARK_DEBUG_TRACE("INFO: packet director set to 0x%x\n", pktDirV);
+
+ /* Setup the packet generator */
+ if (pktGenArgs[0]) {
+ PMD_DRV_LOG(INFO, "Setting up the packet generator\n");
+ ark_pmd_pktgen_parse(pktGenArgs);
+ ark_pmd_pktgen_reset(ark->pg);
+ ark_pmd_pktgen_setup(ark->pg);
+ ark->start_pg = 1;
+ }
+
+ /* Setup the packet checker */
+ if (pktChkrArgs[0]) {
+ ark_pmd_pktchkr_parse(pktChkrArgs);
+ ark_pmd_pktchkr_setup(ark->pc);
+ }
+
+ return 1;
+}
+
+static int
+pmd_ark_probe(const char *name, const char *params)
+{
+ RTE_LOG(INFO, PMD, "Initializing pmd_ark for %s params = %s\n", name,
+ params);
+
+ /* Parse off the v index */
+
+ eth_ark_check_args(params);
+ return 0;
+}
+
+static int
+pmd_ark_remove(const char *name)
+{
+ RTE_LOG(INFO, PMD, "Closing ark %s ethdev on numa socket %u\n", name,
+ rte_socket_id());
+ return 1;
+}
+
+static struct rte_vdev_driver pmd_ark_drv = {
+ .probe = pmd_ark_probe,
+ .remove = pmd_ark_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(net_ark, pmd_ark_drv);
+RTE_PMD_REGISTER_ALIAS(net_ark, eth_ark);
+RTE_PMD_REGISTER_PCI(eth_ark, rte_ark_pmd.pci_drv);
+RTE_PMD_REGISTER_KMOD_DEP(net_ark, "* igb_uio | uio_pci_generic ");
+RTE_PMD_REGISTER_PCI_TABLE(eth_ark, pci_id_ark_map);
new file mode 100644
@@ -0,0 +1,75 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_ETHDEV_H_
+#define _ARK_ETHDEV_H_
+
+int ark_get_port_id(struct rte_eth_dev *dev, struct ark_adapter *ark);
+
+/* RX functions */
+int eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp);
+uint32_t eth_ark_dev_rx_queue_count(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+int eth_ark_rx_stop_queue(struct rte_eth_dev *dev, uint16_t queue_id);
+int eth_ark_rx_start_queue(struct rte_eth_dev *dev, uint16_t queue_id);
+uint16_t eth_ark_recv_pkts_noop(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t eth_ark_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+void eth_ark_dev_rx_queue_release(void *rx_queue);
+void eth_rx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats);
+void eth_rx_queue_stats_reset(void *vqueue);
+void eth_ark_rx_dump_queue(struct rte_eth_dev *dev, uint16_t queue_id,
+ const char *msg);
+
+void eth_ark_udm_force_close(struct rte_eth_dev *dev);
+
+/* TX functions */
+uint16_t eth_ark_xmit_pkts_noop(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t eth_ark_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+int eth_ark_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+void eth_ark_tx_queue_release(void *tx_queue);
+int eth_ark_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id);
+int eth_ark_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id);
+void eth_tx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats);
+void eth_tx_queue_stats_reset(void *vqueue);
+
+#endif
new file mode 100644
@@ -0,0 +1,2 @@
+const char net_ark_pmd_info[] __attribute__((used)) = "PMD_INFO_STRING= {\"name\" : \"net_ark\", \"kmod\" : \"* igb_uio | uio_pci_generic \", \"pci_ids\" : []}";
+const char eth_ark_pmd_info[] __attribute__((used)) = "PMD_INFO_STRING= {\"name\" : \"eth_ark\", \"pci_ids\" : [[7532, 4109, 65535, 65535],[7532, 4110, 65535, 65535] ]}";
new file mode 100644
@@ -0,0 +1,671 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+
+#include "ark_global.h"
+#include "ark_debug.h"
+#include "ark_ethdev.h"
+#include "ark_mpu.h"
+#include "ark_udm.h"
+
+#define ARK_RX_META_SIZE 32
+#define ARK_RX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_RX_META_SIZE)
+#define ARK_RX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
+
+#ifdef RTE_LIBRTE_ARK_DEBUG_RX
+#define ARK_RX_DEBUG 1
+#define ARK_FULL_DEBUG 1
+#else
+#define ARK_RX_DEBUG 0
+#define ARK_FULL_DEBUG 0
+#endif
+
+/* Forward declarations */
+struct ark_rx_queue;
+struct ark_rx_meta;
+
+static void dump_mbuf_data(struct rte_mbuf *mbuf, uint16_t lo, uint16_t hi);
+static void ark_ethdev_rx_dump(const char *name, struct ark_rx_queue *queue);
+static uint32_t eth_ark_rx_jumbo(struct ark_rx_queue *queue,
+ struct ark_rx_meta *meta, struct rte_mbuf *mbuf0, uint32_t consIndex);
+static inline int eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue);
+
+/* ************************************************************************* */
+struct ark_rx_queue {
+
+ /* array of mbufs to populate */
+ struct rte_mbuf **reserveQ;
+ /* array of physical addrresses of the mbuf data pointer */
+ /* This point is a virtual address */
+ phys_addr_t *paddressQ;
+ struct rte_mempool *mb_pool;
+
+ struct ark_udm_t *udm;
+ struct ark_mpu_t *mpu;
+
+ uint32_t queueSize;
+ uint32_t queueMask;
+
+ uint32_t seedIndex; /* 1 set with an empty mbuf */
+ uint32_t consIndex; /* 3 consumed by the driver */
+
+ /* The queue Id is used to identify the HW Q */
+ uint16_t phys_qid;
+
+ /* The queue Index is used within the dpdk device structures */
+ uint16_t queueIndex;
+
+ uint32_t pad1;
+
+ /* separate cache line */
+ /* second cache line - fields only used in slow path */
+ MARKER cacheline1 __rte_cache_min_aligned;
+
+ volatile uint32_t prodIndex; /* 2 filled by the HW */
+
+} __rte_cache_aligned;
+
+/* ************************************************************************* */
+
+/* MATCHES struct in UDMDefines.bsv */
+
+/* TODO move to ark_udm.h */
+struct ark_rx_meta {
+ uint64_t timestamp;
+ uint64_t userData;
+ uint8_t port;
+ uint8_t dstQueue;
+ uint16_t pktLen;
+};
+
+/* ************************************************************************* */
+
+/* TODO pick a better function name */
+static int
+eth_ark_rx_queue_setup(struct rte_eth_dev *dev,
+ struct ark_rx_queue *queue,
+ uint16_t rx_queue_id __rte_unused, uint16_t rx_queue_idx)
+{
+ phys_addr_t queueBase;
+ phys_addr_t physAddrQBase;
+ phys_addr_t physAddrProdIndex;
+
+ queueBase = rte_malloc_virt2phy(queue);
+ physAddrProdIndex = queueBase +
+ offsetof(struct ark_rx_queue, prodIndex);
+
+ physAddrQBase = rte_malloc_virt2phy(queue->paddressQ);
+
+ /* Verify HW */
+ if (ark_mpu_verify(queue->mpu, sizeof(phys_addr_t))) {
+ PMD_DRV_LOG(ERR, "ARKP: Illegal configuration rx queue\n");
+ return -1;
+ }
+
+ /* Stop and Reset and configure MPU */
+ ark_mpu_configure(queue->mpu, physAddrQBase, queue->queueSize, 0);
+
+ ark_udm_write_addr(queue->udm, physAddrProdIndex);
+
+ /* advance the valid pointer, but don't start until the queue starts */
+ ark_mpu_reset_stats(queue->mpu);
+
+ /* The seed is the producer index for the HW */
+ ark_mpu_set_producer(queue->mpu, queue->seedIndex);
+ dev->data->rx_queue_state[rx_queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+static inline void
+eth_ark_rx_update_consIndex(struct ark_rx_queue *queue, uint32_t consIndex)
+{
+ queue->consIndex = consIndex;
+ eth_ark_rx_seed_mbufs(queue);
+ ark_mpu_set_producer(queue->mpu, queue->seedIndex);
+}
+
+/* ************************************************************************* */
+int
+eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
+{
+ struct ark_adapter *ark = (struct ark_adapter *) dev->data->dev_private;
+ static int warning1; /* = 0 */
+
+ struct ark_rx_queue *queue;
+ uint32_t i;
+ int status;
+
+ int port = ark_get_port_id(dev, ark);
+ int qidx = port + queue_idx; /* TODO FIXME */
+
+ // TODO: We may already be setup, check here if there is nothing to do
+ /* Free memory prior to re-allocation if needed */
+ if (dev->data->rx_queues[queue_idx] != NULL) {
+ // TODO: release any allocated queues
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ if (rx_conf != NULL && warning1 == 0) {
+ warning1 = 1;
+ PMD_DRV_LOG(INFO,
+ "ARKP: Arkville PMD ignores rte_eth_rxconf argument.\n");
+ }
+
+ if (RTE_PKTMBUF_HEADROOM < ARK_RX_META_SIZE) {
+ PMD_DRV_LOG(ERR,
+ "Error: DPDK Arkville requires head room > %d bytes (%s)\n",
+ ARK_RX_META_SIZE, __func__);
+ return -1; /* ERROR CODE */
+ }
+
+ if (!rte_is_power_of_2(nb_desc)) {
+ PMD_DRV_LOG(ERR,
+ "DPDK Arkville configuration queue size must be power of two %u (%s)\n",
+ nb_desc, __func__);
+ return -1; /* ERROR CODE */
+ }
+
+ /* Allocate queue struct */
+ queue =
+ rte_zmalloc_socket("ArkRXQueue", sizeof(struct ark_rx_queue), 64,
+ socket_id);
+ if (queue == 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory in %s\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* NOTE zmalloc is used, no need to 0 indexes, etc. */
+ queue->mb_pool = mb_pool;
+ queue->phys_qid = qidx;
+ queue->queueIndex = queue_idx;
+ queue->queueSize = nb_desc;
+ queue->queueMask = nb_desc - 1;
+
+ queue->reserveQ =
+ rte_zmalloc_socket("ArkRXQueue mbuf",
+ nb_desc * sizeof(struct rte_mbuf *), 64, socket_id);
+ queue->paddressQ =
+ rte_zmalloc_socket("ArkRXQueue paddr", nb_desc * sizeof(phys_addr_t),
+ 64, socket_id);
+ if (queue->reserveQ == 0 || queue->paddressQ == 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate queue memory in %s\n", __func__);
+ rte_free(queue->reserveQ);
+ rte_free(queue->paddressQ);
+ rte_free(queue);
+ return -ENOMEM;
+ }
+
+ dev->data->rx_queues[queue_idx] = queue;
+ queue->udm = RTE_PTR_ADD(ark->udm.v, qidx * ARK_UDM_QOFFSET);
+ queue->mpu = RTE_PTR_ADD(ark->mpurx.v, qidx * ARK_MPU_QOFFSET);
+
+ /* populate mbuf reserve */
+ status = eth_ark_rx_seed_mbufs(queue);
+
+ /* MPU Setup */
+ if (status == 0)
+ status = eth_ark_rx_queue_setup(dev, queue, qidx, queue_idx);
+
+ if (unlikely(status != 0)) {
+ struct rte_mbuf *mbuf;
+
+ PMD_DRV_LOG(ERR, "ARKP Failed to initialize RX queue %d %s\n", qidx,
+ __func__);
+ /* Free the mbufs allocated */
+ for (i = 0, mbuf = queue->reserveQ[0]; i < nb_desc; ++i, mbuf++) {
+ if (mbuf != 0)
+ rte_pktmbuf_free(mbuf);
+ }
+ rte_free(queue->reserveQ);
+ rte_free(queue->paddressQ);
+ rte_free(queue);
+ return -1; /* ERROR CODE */
+ }
+
+ return 0;
+}
+
+/* ************************************************************************* */
+uint16_t
+eth_ark_recv_pkts_noop(void *rx_queue __rte_unused,
+ struct rte_mbuf **rx_pkts __rte_unused, uint16_t nb_pkts __rte_unused)
+{
+ return 0;
+}
+
+/* ************************************************************************* */
+uint16_t
+eth_ark_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct ark_rx_queue *queue;
+ register uint32_t consIndex, prodIndex;
+ uint16_t nb;
+ uint64_t rx_bytes = 0;
+ struct rte_mbuf *mbuf;
+ struct ark_rx_meta *meta;
+
+ queue = (struct ark_rx_queue *) rx_queue;
+ if (unlikely(queue == 0))
+ return 0;
+ if (unlikely(nb_pkts == 0))
+ return 0;
+ prodIndex = queue->prodIndex;
+ consIndex = queue->consIndex;
+ nb = 0;
+
+ while (prodIndex != consIndex) {
+ mbuf = queue->reserveQ[consIndex & queue->queueMask];
+ /* prefetch mbuf ? */
+ rte_mbuf_prefetch_part1(mbuf);
+ rte_mbuf_prefetch_part2(mbuf);
+
+ /* META DATA burried in buffer */
+ meta = RTE_PTR_ADD(mbuf->buf_addr, ARK_RX_META_OFFSET);
+
+ mbuf->port = meta->port;
+ mbuf->pkt_len = meta->pktLen;
+ mbuf->data_len = meta->pktLen;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->udata64 = meta->userData;
+ if (ARK_RX_DEBUG) { /* debug use */
+ if ((meta->pktLen > (1024 * 16)) ||
+ (meta->pktLen == 0)) {
+ PMD_DRV_LOG(INFO,
+ "ARKP RX: Bad Meta Q: %u cons: %u prod: %u\n",
+ queue->phys_qid,
+ consIndex,
+ queue->prodIndex);
+
+ PMD_DRV_LOG(INFO, " : cons: %u prod: %u seedIndex %u\n",
+ consIndex,
+ queue->prodIndex,
+ queue->seedIndex);
+
+ PMD_DRV_LOG(INFO, " : UDM prod: %u len: %u\n",
+ queue->udm->rt_cfg.prodIdx,
+ meta->pktLen);
+ ark_mpu_dump(queue->mpu,
+ " ",
+ queue->phys_qid);
+
+ dump_mbuf_data(mbuf, 0, 256);
+ /* its FUBAR so fix it */
+ mbuf->pkt_len = 63;
+ meta->pktLen = 63;
+ }
+ mbuf->seqn = consIndex;
+ }
+
+ rx_bytes += meta->pktLen; /* TEMP stats */
+
+ if (unlikely(meta->pktLen > ARK_RX_MAX_NOCHAIN))
+ consIndex = eth_ark_rx_jumbo
+ (queue, meta, mbuf, consIndex + 1);
+ else
+ consIndex += 1;
+
+ rx_pkts[nb] = mbuf;
+ nb++;
+ if (nb >= nb_pkts)
+ break;
+ }
+
+ if (unlikely(nb != 0))
+ /* report next free to FPGA */
+ eth_ark_rx_update_consIndex(queue, consIndex);
+
+ return nb;
+}
+
+/* ************************************************************************* */
+static uint32_t
+eth_ark_rx_jumbo(struct ark_rx_queue *queue,
+ struct ark_rx_meta *meta, struct rte_mbuf *mbuf0, uint32_t consIndex)
+{
+ struct rte_mbuf *mbuf_prev;
+ struct rte_mbuf *mbuf;
+
+ uint16_t remaining;
+ uint16_t data_len;
+ uint8_t segments;
+
+ /* first buf populated by called */
+ mbuf_prev = mbuf0;
+ segments = 1;
+ data_len = RTE_MIN(meta->pktLen, RTE_MBUF_DEFAULT_DATAROOM);
+ remaining = meta->pktLen - data_len;
+ mbuf0->data_len = data_len;
+
+ /* TODO check that the data does not exceed prodIndex! */
+ while (remaining != 0) {
+ data_len =
+ RTE_MIN(remaining,
+ RTE_MBUF_DEFAULT_DATAROOM +
+ RTE_PKTMBUF_HEADROOM);
+
+ remaining -= data_len;
+ segments += 1;
+
+ mbuf = queue->reserveQ[consIndex & queue->queueMask];
+ mbuf_prev->next = mbuf;
+ mbuf_prev = mbuf;
+ mbuf->data_len = data_len;
+ mbuf->data_off = 0;
+ if (ARK_RX_DEBUG)
+ mbuf->seqn = consIndex; /* for debug only */
+
+ consIndex += 1;
+ }
+
+ mbuf0->nb_segs = segments;
+ return consIndex;
+}
+
+/* Drain the internal queue allowing hw to clear out. */
+static void
+eth_ark_rx_queue_drain(struct ark_rx_queue *queue)
+{
+ register uint32_t consIndex;
+ struct rte_mbuf *mbuf;
+
+ consIndex = queue->consIndex;
+
+ /* NOT performance optimized, since this is a one-shot call */
+ while ((consIndex ^ queue->prodIndex) & queue->queueMask) {
+ mbuf = queue->reserveQ[consIndex & queue->queueMask];
+ rte_pktmbuf_free(mbuf);
+ consIndex++;
+ eth_ark_rx_update_consIndex(queue, consIndex);
+ }
+}
+
+uint32_t
+eth_ark_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct ark_rx_queue *queue;
+
+ queue = dev->data->rx_queues[queue_id];
+ return (queue->prodIndex - queue->consIndex); /* mod arith */
+}
+
+/* ************************************************************************* */
+int
+eth_ark_rx_start_queue(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct ark_rx_queue *queue;
+
+ queue = dev->data->rx_queues[queue_id];
+ if (queue == 0)
+ return -1;
+
+ dev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ ark_mpu_set_producer(queue->mpu, queue->seedIndex);
+ ark_mpu_start(queue->mpu);
+
+ ark_udm_queue_enable(queue->udm, 1);
+
+ return 0;
+}
+
+/* ************************************************************************* */
+
+/* Queue can be restarted. data remains
+ */
+int
+eth_ark_rx_stop_queue(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct ark_rx_queue *queue;
+
+ queue = dev->data->rx_queues[queue_id];
+ if (queue == 0)
+ return -1;
+
+ ark_udm_queue_enable(queue->udm, 0);
+
+ dev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+/* ************************************************************************* */
+static inline int
+eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue)
+{
+ uint32_t limit = queue->consIndex + queue->queueSize;
+ uint32_t seedIndex = queue->seedIndex;
+
+ uint32_t count = 0;
+ uint32_t seedM = queue->seedIndex & queue->queueMask;
+
+ uint32_t nb = limit - seedIndex;
+
+ /* Handle wrap around -- remainder is filled on the next call */
+ if (unlikely(seedM + nb > queue->queueSize))
+ nb = queue->queueSize - seedM;
+
+ struct rte_mbuf **mbufs = &queue->reserveQ[seedM];
+ int status = rte_pktmbuf_alloc_bulk(queue->mb_pool, mbufs, nb);
+
+ if (unlikely(status != 0))
+ return -1;
+
+ if (ARK_RX_DEBUG) { /* DEBUG */
+ while (count != nb) {
+ struct rte_mbuf *mbuf_init =
+ queue->reserveQ[seedM + count];
+
+ memset(mbuf_init->buf_addr, -1, 512);
+ *((uint32_t *) mbuf_init->buf_addr) = seedIndex + count;
+ *(uint16_t *) RTE_PTR_ADD(mbuf_init->buf_addr, 4) =
+ queue->phys_qid;
+ count++;
+ }
+ count = 0;
+ }
+ /* DEBUG */
+ queue->seedIndex += nb;
+
+ /* Duff's device https://en.wikipedia.org/wiki/Duff's_device */
+ switch (nb % 4) {
+ case 0:
+ while (count != nb) {
+ queue->paddressQ[seedM++] = (*mbufs++)->buf_physaddr;
+ count++;
+ /* FALLTHROUGH */
+ case 3:
+ queue->paddressQ[seedM++] = (*mbufs++)->buf_physaddr;
+ count++;
+ /* FALLTHROUGH */
+ case 2:
+ queue->paddressQ[seedM++] = (*mbufs++)->buf_physaddr;
+ count++;
+ /* FALLTHROUGH */
+ case 1:
+ queue->paddressQ[seedM++] = (*mbufs++)->buf_physaddr;
+ count++;
+ /* FALLTHROUGH */
+
+ } /* while (count != nb) */
+ } /* switch */
+
+ return 0;
+}
+
+void
+eth_ark_rx_dump_queue(struct rte_eth_dev *dev, uint16_t queue_id,
+ const char *msg)
+{
+ struct ark_rx_queue *queue;
+
+ queue = dev->data->rx_queues[queue_id];
+
+ ark_ethdev_rx_dump(msg, queue);
+}
+
+/* ************************************************************************* */
+
+/* Call on device closed no user API, queue is stopped */
+void
+eth_ark_dev_rx_queue_release(void *vqueue)
+{
+ struct ark_rx_queue *queue;
+ uint32_t i;
+
+ queue = (struct ark_rx_queue *) vqueue;
+ if (queue == 0)
+ return;
+
+ ark_udm_queue_enable(queue->udm, 0);
+ /* Stop the MPU since pointer are going away */
+ ark_mpu_stop(queue->mpu);
+
+ /* Need to clear out mbufs here, dropping packets along the way */
+ eth_ark_rx_queue_drain(queue);
+
+ for (i = 0; i < queue->queueSize; ++i)
+ rte_pktmbuf_free(queue->reserveQ[i]);
+
+ rte_free(queue->reserveQ);
+ rte_free(queue->paddressQ);
+ rte_free(queue);
+}
+
+void
+eth_rx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats)
+{
+ struct ark_rx_queue *queue;
+ struct ark_udm_t *udm;
+
+ queue = vqueue;
+ if (queue == 0)
+ return;
+ udm = queue->udm;
+
+ uint64_t ibytes = ark_udm_bytes(udm);
+ uint64_t ipackets = ark_udm_packets(udm);
+ uint64_t idropped = ark_udm_dropped(queue->udm);
+
+ stats->q_ipackets[queue->queueIndex] = ipackets;
+ stats->q_ibytes[queue->queueIndex] = ibytes;
+ stats->q_errors[queue->queueIndex] = idropped;
+ stats->ipackets += ipackets;
+ stats->ibytes += ibytes;
+ stats->imissed += idropped;
+}
+
+void
+eth_rx_queue_stats_reset(void *vqueue)
+{
+ struct ark_rx_queue *queue;
+
+ queue = vqueue;
+ if (queue == 0)
+ return;
+
+ ark_mpu_reset_stats(queue->mpu);
+ ark_udm_queue_stats_reset(queue->udm);
+}
+
+void
+eth_ark_udm_force_close(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark = (struct ark_adapter *) dev->data->dev_private;
+ struct ark_rx_queue *queue;
+ uint32_t index;
+ uint16_t i;
+
+ if (!ark_udm_is_flushed(ark->udm.v)) {
+ /* restart the MPUs */
+ fprintf(stderr, "ARK: %s UDM not flushed\n", __func__);
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ queue = (struct ark_rx_queue *) dev->data->rx_queues[i];
+ if (queue == 0)
+ continue;
+
+ ark_mpu_start(queue->mpu);
+ /* Add some buffers */
+ index = 100000 + queue->seedIndex;
+ ark_mpu_set_producer(queue->mpu, index);
+ }
+ /* Wait to allow data to pass */
+ usleep(100);
+
+ ARK_DEBUG_TRACE("UDM forced flush attempt, stopped = %d\n",
+ ark_udm_is_flushed(ark->udm.v));
+ }
+ ark_udm_reset(ark->udm.v);
+
+}
+
+static void
+ark_ethdev_rx_dump(const char *name, struct ark_rx_queue *queue)
+{
+ if (queue == NULL)
+ return;
+ ARK_DEBUG_TRACE("RX QUEUE %d -- %s", queue->phys_qid, name);
+ ARK_DEBUG_TRACE(FMT_SU32 FMT_SU32 FMT_SU32 FMT_SU32 "\n",
+ "queueSize", queue->queueSize,
+ "seedIndex", queue->seedIndex,
+ "prodIndex", queue->prodIndex, "consIndex", queue->consIndex);
+
+ ark_mpu_dump(queue->mpu, name, queue->phys_qid);
+ ark_mpu_dump_setup(queue->mpu, queue->phys_qid);
+ ark_udm_dump(queue->udm, name);
+ ark_udm_dump_setup(queue->udm, queue->phys_qid);
+
+}
+
+static void
+dump_mbuf_data(struct rte_mbuf *mbuf, uint16_t lo, uint16_t hi)
+{
+ uint16_t i, j;
+
+ fprintf(stderr, " MBUF: %p len %d, off: %d, seq: %u\n", mbuf,
+ mbuf->pkt_len, mbuf->data_off, mbuf->seqn);
+ for (i = lo; i < hi; i += 16) {
+ uint8_t *dp = RTE_PTR_ADD(mbuf->buf_addr, i);
+
+ fprintf(stderr, " %6d: ", i);
+ for (j = 0; j < 16; j++)
+ fprintf(stderr, " %02x", dp[j]);
+
+ fprintf(stderr, "\n");
+ }
+}
new file mode 100644
@@ -0,0 +1,479 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+
+#include "ark_global.h"
+#include "ark_mpu.h"
+#include "ark_ddm.h"
+#include "ark_ethdev.h"
+#include "ark_debug.h"
+
+#define ARK_TX_META_SIZE 32
+#define ARK_TX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_TX_META_SIZE)
+#define ARK_TX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
+#define ARK_TX_PAD_TO_60 1
+
+#ifdef RTE_LIBRTE_ARK_DEBUG_TX
+#define ARK_TX_DEBUG 1
+#define ARK_TX_DEBUG_JUMBO 1
+#else
+#define ARK_TX_DEBUG 0
+#define ARK_TX_DEBUG_JUMBO 0
+#endif
+
+/* ************************************************************************* */
+
+/* struct fixed in FPGA -- 16 bytes */
+
+/* TODO move to ark_ddm.h */
+struct ark_tx_meta {
+ uint64_t physaddr;
+ uint32_t delta_ns;
+ uint16_t data_len; /* of this MBUF */
+#define ARK_DDM_EOP 0x01
+#define ARK_DDM_SOP 0x02
+ uint8_t flags; /* bit 0 indicates last mbuf in chain. */
+ uint8_t reserved[1];
+};
+
+/* ************************************************************************* */
+struct ark_tx_queue {
+
+ struct ark_tx_meta *metaQ;
+ struct rte_mbuf **bufs;
+
+ /* handles for hw objects */
+ struct ark_mpu_t *mpu;
+ struct ark_ddm_t *ddm;
+
+ /* Stats HW tracks bytes and packets, need to count send errors */
+ uint64_t tx_errors;
+
+ uint32_t queueSize;
+ uint32_t queueMask;
+
+ /* 3 indexs to the paired data rings. */
+ uint32_t prodIndex; /* where to put the next one */
+ uint32_t freeIndex; /* mbuf has been freed */
+
+ // The queue Id is used to identify the HW Q
+ uint16_t phys_qid;
+ /* The queue Index within the dpdk device structures */
+ uint16_t queueIndex;
+
+ uint32_t pad[1];
+
+ /* second cache line - fields only used in slow path */
+ MARKER cacheline1 __rte_cache_min_aligned;
+ uint32_t consIndex; /* hw is done, can be freed */
+} __rte_cache_aligned;
+
+/* Forward declarations */
+static uint32_t eth_ark_tx_jumbo(struct ark_tx_queue *queue,
+ struct rte_mbuf *mbuf);
+static int eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue);
+static void free_completed_tx(struct ark_tx_queue *queue);
+
+static inline void
+ark_tx_hw_queue_stop(struct ark_tx_queue *queue)
+{
+ ark_mpu_stop(queue->mpu);
+}
+
+/* ************************************************************************* */
+static inline void
+eth_ark_tx_meta_from_mbuf(struct ark_tx_meta *meta,
+ const struct rte_mbuf *mbuf, uint8_t flags)
+{
+ meta->physaddr = rte_mbuf_data_dma_addr(mbuf);
+ meta->delta_ns = 0;
+ meta->data_len = rte_pktmbuf_data_len(mbuf);
+ meta->flags = flags;
+}
+
+/* ************************************************************************* */
+uint16_t
+eth_ark_xmit_pkts_noop(void *vtxq __rte_unused,
+ struct rte_mbuf **tx_pkts __rte_unused, uint16_t nb_pkts __rte_unused)
+{
+ return 0;
+}
+
+/* ************************************************************************* */
+uint16_t
+eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct ark_tx_queue *queue;
+ struct rte_mbuf *mbuf;
+ struct ark_tx_meta *meta;
+
+ uint32_t idx;
+ uint32_t prodIndexLimit;
+ int stat;
+ uint16_t nb;
+
+ queue = (struct ark_tx_queue *) vtxq;
+
+ /* free any packets after the HW is done with them */
+ free_completed_tx(queue);
+
+ prodIndexLimit = queue->queueSize + queue->freeIndex;
+
+ for (nb = 0;
+ (nb < nb_pkts) && (queue->prodIndex != prodIndexLimit);
+ ++nb) {
+ mbuf = tx_pkts[nb];
+
+ if (ARK_TX_PAD_TO_60) {
+ if (unlikely(rte_pktmbuf_pkt_len(mbuf) < 60)) {
+ /* this packet even if it is small can be split,
+ * be sure to add to the end
+ */
+ uint16_t toAdd = 60 - rte_pktmbuf_pkt_len(mbuf);
+ char *appended = rte_pktmbuf_append(mbuf, toAdd);
+
+ if (appended == 0) {
+ /* This packet is in error, we cannot send it so just
+ * count it and delete it.
+ */
+ queue->tx_errors += 1;
+ rte_pktmbuf_free(mbuf);
+ continue;
+ }
+ memset(appended, 0, toAdd);
+ }
+ }
+
+ if (unlikely(mbuf->nb_segs != 1)) {
+ stat = eth_ark_tx_jumbo(queue, mbuf);
+ if (unlikely(stat != 0))
+ break; /* Queue is full */
+ } else {
+ idx = queue->prodIndex & queue->queueMask;
+ queue->bufs[idx] = mbuf;
+ meta = &queue->metaQ[idx];
+ eth_ark_tx_meta_from_mbuf(meta, mbuf,
+ ARK_DDM_SOP | ARK_DDM_EOP);
+ queue->prodIndex++;
+ }
+ }
+
+ if (ARK_TX_DEBUG) {
+ if (nb != nb_pkts) {
+ PMD_DRV_LOG(ERR,
+ "ARKP TX: Failure to send: req: %u sent: %u prod: %u cons: %u free: %u\n",
+ nb_pkts, nb, queue->prodIndex, queue->consIndex,
+ queue->freeIndex);
+ ark_mpu_dump(queue->mpu, "TX Failure MPU: ", queue->phys_qid);
+ }
+ }
+
+ /* let fpga know producer index. */
+ if (likely(nb != 0))
+ ark_mpu_set_producer(queue->mpu, queue->prodIndex);
+
+ return nb;
+}
+
+/* ************************************************************************* */
+static uint32_t
+eth_ark_tx_jumbo(struct ark_tx_queue *queue, struct rte_mbuf *mbuf)
+{
+ struct rte_mbuf *next;
+ struct ark_tx_meta *meta;
+ uint32_t freeQueueSpace;
+ uint32_t idx;
+ uint8_t flags = ARK_DDM_SOP;
+
+ freeQueueSpace = queue->queueMask - (queue->prodIndex - queue->freeIndex);
+ if (unlikely(freeQueueSpace < mbuf->nb_segs)) {
+ return -1;
+ }
+
+ if (ARK_TX_DEBUG_JUMBO) {
+ PMD_DRV_LOG(ERR,
+ "ARKP JUMBO TX len: %u segs: %u prod: %u cons: %u free: %u freeSpace: %u\n",
+ mbuf->pkt_len, mbuf->nb_segs, queue->prodIndex, queue->consIndex,
+ queue->freeIndex, freeQueueSpace);
+ }
+
+ while (mbuf != NULL) {
+ next = mbuf->next;
+
+ idx = queue->prodIndex & queue->queueMask;
+ queue->bufs[idx] = mbuf;
+ meta = &queue->metaQ[idx];
+
+ flags |= (next == NULL) ? ARK_DDM_EOP : 0;
+ eth_ark_tx_meta_from_mbuf(meta, mbuf, flags);
+ queue->prodIndex++;
+
+ flags &= ~ARK_DDM_SOP; /* drop SOP flags */
+ mbuf = next;
+ }
+
+ return 0;
+}
+
+/* ************************************************************************* */
+int
+eth_ark_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id, const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct ark_adapter *ark = (struct ark_adapter *) dev->data->dev_private;
+ struct ark_tx_queue *queue;
+ int status;
+
+ /* TODO: divide the Q's evenly with the Vports */
+ int port = ark_get_port_id(dev, ark);
+ int qidx = port + queue_idx; /* FIXME for multi queue */
+
+ if (!rte_is_power_of_2(nb_desc)) {
+ PMD_DRV_LOG(ERR,
+ "DPDK Arkville configuration queue size must be power of two %u (%s)\n",
+ nb_desc, __func__);
+ return -1;
+ }
+
+ /* TODO: We may already be setup, check here if there is to do return */
+ /* /\* Free memory prior to re-allocation if needed *\/ */
+ /* if (dev->data->tx_queues[queue_idx] != NULL) { */
+ /* dev->data->tx_queues[queue_idx] = NULL; */
+ /* } */
+
+ /* Allocate queue struct */
+ queue =
+ rte_zmalloc_socket("ArkTXQueue", sizeof(struct ark_tx_queue), 64,
+ socket_id);
+ if (queue == 0) {
+ PMD_DRV_LOG(ERR, "ARKP Failed to allocate tx queue memory in %s\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* we use zmalloc no need to initialize fields */
+ queue->queueSize = nb_desc;
+ queue->queueMask = nb_desc - 1;
+ queue->phys_qid = qidx;
+ queue->queueIndex = queue_idx;
+ dev->data->tx_queues[queue_idx] = queue;
+
+ queue->metaQ =
+ rte_zmalloc_socket("ArkTXQueue meta",
+ nb_desc * sizeof(struct ark_tx_meta), 64, socket_id);
+ queue->bufs =
+ rte_zmalloc_socket("ArkTXQueue bufs",
+ nb_desc * sizeof(struct rte_mbuf *), 64, socket_id);
+
+ if (queue->metaQ == 0 || queue->bufs == 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate queue memory in %s\n", __func__);
+ rte_free(queue->metaQ);
+ rte_free(queue->bufs);
+ rte_free(queue);
+ return -ENOMEM;
+ }
+
+ queue->ddm = RTE_PTR_ADD(ark->ddm.v, qidx * ARK_DDM_QOFFSET);
+ queue->mpu = RTE_PTR_ADD(ark->mputx.v, qidx * ARK_MPU_QOFFSET);
+
+ status = eth_ark_tx_hw_queue_config(queue);
+
+ if (unlikely(status != 0)) {
+ rte_free(queue->metaQ);
+ rte_free(queue->bufs);
+ rte_free(queue);
+ return -1; /* ERROR CODE */
+ }
+
+ return 0;
+}
+
+/* ************************************************************************* */
+static int
+eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue)
+{
+ phys_addr_t queueBase, ringBase, prodIndexAddr;
+ uint32_t writeInterval_ns;
+
+ /* Verify HW -- MPU */
+ if (ark_mpu_verify(queue->mpu, sizeof(struct ark_tx_meta)))
+ return -1;
+
+ queueBase = rte_malloc_virt2phy(queue);
+ ringBase = rte_malloc_virt2phy(queue->metaQ);
+ prodIndexAddr = queueBase + offsetof(struct ark_tx_queue, consIndex);
+
+ ark_mpu_stop(queue->mpu);
+ ark_mpu_reset(queue->mpu);
+
+ /* Stop and Reset and configure MPU */
+ ark_mpu_configure(queue->mpu, ringBase, queue->queueSize, 1);
+
+ /* Adjust the write interval based on queue size -- increase pcie traffic
+ * when low mbuf count */
+ switch (queue->queueSize) {
+ case 128:
+ writeInterval_ns = 500;
+ break;
+ case 256:
+ writeInterval_ns = 500;
+ break;
+ case 512:
+ writeInterval_ns = 1000;
+ break;
+ default:
+ writeInterval_ns = 2000;
+ break;
+ }
+
+ // Completion address in UDM
+ ark_ddm_setup(queue->ddm, prodIndexAddr, writeInterval_ns);
+
+ return 0;
+}
+
+/* ************************************************************************* */
+void
+eth_ark_tx_queue_release(void *vtx_queue)
+{
+ struct ark_tx_queue *queue;
+
+ queue = (struct ark_tx_queue *) vtx_queue;
+
+ ark_tx_hw_queue_stop(queue);
+
+ queue->consIndex = queue->prodIndex;
+ free_completed_tx(queue);
+
+ rte_free(queue->metaQ);
+ rte_free(queue->bufs);
+ rte_free(queue);
+
+}
+
+/* ************************************************************************* */
+int
+eth_ark_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct ark_tx_queue *queue;
+ int cnt = 0;
+
+ queue = dev->data->tx_queues[queue_id];
+
+ /* Wait for DDM to send out all packets. */
+ while (queue->consIndex != queue->prodIndex) {
+ usleep(100);
+ if (cnt++ > 10000)
+ return -1;
+ }
+
+ ark_mpu_stop(queue->mpu);
+ free_completed_tx(queue);
+
+ dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+int
+eth_ark_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct ark_tx_queue *queue;
+
+ queue = dev->data->tx_queues[queue_id];
+ if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_STARTED)
+ return 0;
+
+ ark_mpu_start(queue->mpu);
+ dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+/* ************************************************************************* */
+static void
+free_completed_tx(struct ark_tx_queue *queue)
+{
+ struct rte_mbuf *mbuf;
+ struct ark_tx_meta *meta;
+ uint32_t topIndex;
+
+ topIndex = queue->consIndex; /* read once */
+ while (queue->freeIndex != topIndex) {
+ meta = &queue->metaQ[queue->freeIndex & queue->queueMask];
+ mbuf = queue->bufs[queue->freeIndex & queue->queueMask];
+
+ if (likely((meta->flags & ARK_DDM_SOP) != 0)) {
+ /* ref count of the mbuf is checked in this call. */
+ rte_pktmbuf_free(mbuf);
+ }
+ queue->freeIndex++;
+ }
+}
+
+/* ************************************************************************* */
+void
+eth_tx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats)
+{
+ struct ark_tx_queue *queue;
+ struct ark_ddm_t *ddm;
+ uint64_t bytes, pkts;
+
+ queue = vqueue;
+ ddm = queue->ddm;
+
+ bytes = ark_ddm_queue_byte_count(ddm);
+ pkts = ark_ddm_queue_pkt_count(ddm);
+
+ stats->q_opackets[queue->queueIndex] = pkts;
+ stats->q_obytes[queue->queueIndex] = bytes;
+ stats->opackets += pkts;
+ stats->obytes += bytes;
+ stats->oerrors += queue->tx_errors;
+}
+
+void
+eth_tx_queue_stats_reset(void *vqueue)
+{
+ struct ark_tx_queue *queue;
+ struct ark_ddm_t *ddm;
+
+ queue = vqueue;
+ ddm = queue->ddm;
+
+ ark_ddm_queue_reset_stats(ddm);
+ queue->tx_errors = 0;
+}
new file mode 100644
@@ -0,0 +1,71 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_EXT_H_
+#define _ARK_EXT_H_
+
+/*
+ Called post PMD init. The implementation returns its private data that gets passed into
+ all other functions as user_data
+ The ARK extension implementation MUST implement this function
+*/
+void *dev_init(struct rte_eth_dev *dev, void *Abar, int port_id);
+
+/* Called during device shutdown */
+void dev_uninit(struct rte_eth_dev *dev, void *user_data);
+
+/* This call is optional and allows the extension to specify the number of supported ports. */
+uint8_t dev_get_port_count(struct rte_eth_dev *dev, void *user_data);
+
+/*
+ The following functions are optional and are directly mapped from the DPDK PMD ops
+ structure. Each function if implemented is called after the ARK PMD implementation executes.
+*/
+int dev_configure(struct rte_eth_dev *dev, void *user_data);
+int dev_start(struct rte_eth_dev *dev, void *user_data);
+void dev_stop(struct rte_eth_dev *dev, void *user_data);
+void dev_close(struct rte_eth_dev *dev, void *user_data);
+int link_update(struct rte_eth_dev *dev, int wait_to_complete,
+ void *user_data);
+int dev_set_link_up(struct rte_eth_dev *dev, void *user_data);
+int dev_set_link_down(struct rte_eth_dev *dev, void *user_data);
+void stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats,
+ void *user_data);
+void stats_reset(struct rte_eth_dev *dev, void *user_data);
+void mac_addr_add(struct rte_eth_dev *dev,
+ struct ether_addr *macadr, uint32_t index, uint32_t pool, void *user_data);
+void mac_addr_remove(struct rte_eth_dev *dev, uint32_t index, void *user_data);
+void mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ void *user_data);
+
+#endif
new file mode 100644
@@ -0,0 +1,164 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_GLOBAL_H_
+#define _ARK_GLOBAL_H_
+
+#include <time.h>
+#include <assert.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_version.h>
+
+#include "ark_pktdir.h"
+#include "ark_pktgen.h"
+#include "ark_pktchkr.h"
+
+#define ETH_ARK_ARG_MAXLEN 64
+#define ARK_SYSCTRL_BASE 0x0
+#define ARK_PKTGEN_BASE 0x10000
+#define ARK_MPURx_BASE 0x20000
+#define ARK_UDM_BASE 0x30000
+#define ARK_MPUTx_BASE 0x40000
+#define ARK_DDM_BASE 0x60000
+#define ARK_CMAC_BASE 0x80000
+#define ARK_PKTDIR_BASE 0xA0000
+#define ARK_PKTCHKR_BASE 0x90000
+#define ARK_RCPACING_BASE 0xB0000
+#define ARK_EXTERNAL_BASE 0x100000
+#define ARK_MPU_QOFFSET 0x00100
+#define ARK_MAX_PORTS 8
+
+#define Offset8(n) n
+#define Offset16(n) (n/2)
+#define Offset32(n) (n/4)
+#define Offset64(n) (n/8)
+
+/*
+ * Structure to store private data for each PF/VF instance.
+ */
+#define DefPtr(type, name) \
+ union type { \
+ uint64_t *t64; \
+ uint32_t *t32; \
+ uint16_t *t16; \
+ uint8_t *t8; \
+ void *v; \
+ } name
+
+#define SetPtr(bar, ark, mem, off) { \
+ ark->mem.t64 = (uint64_t *)&ark->bar[off]; \
+ ark->mem.t32 = (uint32_t *)&ark->bar[off]; \
+ ark->mem.t16 = (uint16_t *)&ark->bar[off]; \
+ ark->mem.t8 = (uint8_t *)&ark->bar[off]; \
+ }
+
+struct ark_port {
+ struct rte_eth_dev *eth_dev;
+ int id;
+};
+
+struct ark_user_ext {
+ void *(*dev_init) (struct rte_eth_dev *, void *abar, int port_id);
+ void (*dev_uninit) (struct rte_eth_dev *, void *);
+ int (*dev_get_port_count) (struct rte_eth_dev *, void *);
+ int (*dev_configure) (struct rte_eth_dev *, void *);
+ int (*dev_start) (struct rte_eth_dev *, void *);
+ void (*dev_stop) (struct rte_eth_dev *, void *);
+ void (*dev_close) (struct rte_eth_dev *, void *);
+ int (*link_update) (struct rte_eth_dev *, int wait_to_complete, void *);
+ int (*dev_set_link_up) (struct rte_eth_dev *, void *);
+ int (*dev_set_link_down) (struct rte_eth_dev *, void *);
+ void (*stats_get) (struct rte_eth_dev *, struct rte_eth_stats *, void *);
+ void (*stats_reset) (struct rte_eth_dev *, void *);
+ void (*mac_addr_add) (struct rte_eth_dev *,
+ struct ether_addr *, uint32_t, uint32_t, void *);
+ void (*mac_addr_remove) (struct rte_eth_dev *, uint32_t, void *);
+ void (*mac_addr_set) (struct rte_eth_dev *, struct ether_addr *, void *);
+};
+
+struct ark_adapter {
+
+ /* User extension private data */
+ void *user_data;
+
+ /* Pointers to packet generator and checker */
+ int start_pg;
+ ArkPktGen_t pg;
+ ArkPktChkr_t pc;
+ ArkPktDir_t pd;
+
+ struct ark_port port[ARK_MAX_PORTS];
+ int num_ports;
+
+ /* Common for both PF and VF */
+ struct rte_eth_dev *eth_dev;
+
+ void *dHandle;
+ struct ark_user_ext user_ext;
+
+ /* Our Bar 0 */
+ uint8_t *bar0;
+
+ /* A Bar */
+ uint8_t *Abar;
+
+ /* Arkville demo block offsets */
+ DefPtr(SysCtrl, sysctrl);
+ DefPtr(PktGen, pktgen);
+ DefPtr(MpuRx, mpurx);
+ DefPtr(UDM, udm);
+ DefPtr(MpuTx, mputx);
+ DefPtr(DDM, ddm);
+ DefPtr(CMAC, cmac);
+ DefPtr(External, external);
+ DefPtr(PktDir, pktdir);
+ DefPtr(PktChkr, pktchkr);
+
+ int started;
+ uint16_t rxQueues;
+ uint16_t txQueues;
+
+ struct ark_rqpace_t *rqpacing;
+};
+
+typedef uint32_t *ark_t;
+
+#endif
new file mode 100644
@@ -0,0 +1,168 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+
+#include "ark_debug.h"
+#include "ark_mpu.h"
+
+uint16_t
+ark_api_num_queues(struct ark_mpu_t *mpu)
+{
+ return mpu->hw.numQueues;
+}
+
+uint16_t
+ark_api_num_queues_per_port(struct ark_mpu_t *mpu, uint16_t ark_ports)
+{
+ return mpu->hw.numQueues / ark_ports;
+}
+
+int
+ark_mpu_verify(struct ark_mpu_t *mpu, uint32_t objSize)
+{
+ uint32_t version;
+
+ version = mpu->id.vernum & 0x0000FF00;
+ if ((mpu->id.idnum != 0x2055504d) || (mpu->hw.objSize != objSize)
+ || version != 0x00003100) {
+ fprintf(stderr,
+ " MPU module not found as expected %08x \"%c%c%c%c"
+ "%c%c%c%c\"\n", mpu->id.idnum, mpu->id.id[0], mpu->id.id[1],
+ mpu->id.id[2], mpu->id.id[3], mpu->id.ver[0], mpu->id.ver[1],
+ mpu->id.ver[2], mpu->id.ver[3]);
+ fprintf(stderr,
+ " MPU HW numQueues: %u hwDepth %u, objSize: %u, objPerMRR: %u Expected size %u\n",
+ mpu->hw.numQueues, mpu->hw.hwDepth, mpu->hw.objSize,
+ mpu->hw.objPerMRR, objSize);
+ return -1;
+ }
+ return 0;
+}
+
+void
+ark_mpu_stop(struct ark_mpu_t *mpu)
+{
+ mpu->cfg.command = MPU_CMD_Stop;
+}
+
+void
+ark_mpu_start(struct ark_mpu_t *mpu)
+{
+ mpu->cfg.command = MPU_CMD_Run; /* run state */
+}
+
+int
+ark_mpu_reset(struct ark_mpu_t *mpu)
+{
+
+ int cnt = 0;
+
+ mpu->cfg.command = MPU_CMD_Reset; /* reset */
+
+ while (mpu->cfg.command != MPU_CMD_Idle) {
+ if (cnt++ > 1000)
+ break;
+ usleep(10);
+ }
+ if (mpu->cfg.command != MPU_CMD_Idle) {
+ mpu->cfg.command = MPU_CMD_ForceReset; /* forced reset */
+ usleep(10);
+ }
+ ark_mpu_reset_stats(mpu);
+ return mpu->cfg.command != MPU_CMD_Idle;
+}
+
+void
+ark_mpu_reset_stats(struct ark_mpu_t *mpu)
+{
+ mpu->stats.pciRequest = 1; /* reset stats */
+}
+
+int
+ark_mpu_configure(struct ark_mpu_t *mpu, phys_addr_t ring, uint32_t ringSize,
+ int isTx)
+{
+ ark_mpu_reset(mpu);
+
+ if (!rte_is_power_of_2(ringSize)) {
+ fprintf(stderr, "ARKP Invalid ring size for MPU %d\n", ringSize);
+ return -1;
+ }
+
+ mpu->cfg.ringBase = ring;
+ mpu->cfg.ringSize = ringSize;
+ mpu->cfg.ringMask = ringSize - 1;
+ mpu->cfg.minHostMove = isTx ? 1 : mpu->hw.objPerMRR;
+ mpu->cfg.minHWMove = mpu->hw.objPerMRR;
+ mpu->cfg.swProdIndex = 0;
+ mpu->cfg.hwConsIndex = 0;
+ return 0;
+}
+
+void
+ark_mpu_dump(struct ark_mpu_t *mpu, const char *code, uint16_t qid)
+{
+ /* DUMP to see that we have started */
+ ARK_DEBUG_TRACE
+ ("ARKP MPU: %s Q: %3u swProd %u, hwCons: %u\n", code, qid,
+ mpu->cfg.swProdIndex, mpu->cfg.hwConsIndex);
+ ARK_DEBUG_TRACE
+ ("ARKP MPU: %s state: %d count %d, reserved %d data 0x%08x_%08x 0x%08x_%08x\n",
+ code, mpu->debug.state, mpu->debug.count, mpu->debug.reserved,
+ mpu->debug.peek[1], mpu->debug.peek[0], mpu->debug.peek[3],
+ mpu->debug.peek[2]
+ );
+ ARK_DEBUG_STATS
+ ("ARKP MPU: %s Q: %3u" FMT_SU64 FMT_SU64 FMT_SU64 FMT_SU64
+ FMT_SU64 FMT_SU64 FMT_SU64 "\n", code, qid,
+ "PCI Request:", mpu->stats.pciRequest,
+ "QueueEmpty", mpu->stats.qEmpty,
+ "QueueQ1", mpu->stats.qQ1,
+ "QueueQ2", mpu->stats.qQ2,
+ "QueueQ3", mpu->stats.qQ3,
+ "QueueQ4", mpu->stats.qQ4,
+ "QueueFull", mpu->stats.qFull
+ );
+}
+
+void
+ark_mpu_dump_setup(struct ark_mpu_t *mpu, uint16_t qId)
+{
+ ARK_DEBUG_TRACE
+ ("MPU Setup Q: %u"
+ FMT_SPTR "\n", qId,
+ "ringBase", (void *) mpu->cfg.ringBase
+ );
+
+}
new file mode 100644
@@ -0,0 +1,143 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_MPU_H_
+#define _ARK_MPU_H_
+
+#include <stdint.h>
+
+#include <rte_memory.h>
+
+/*
+ * MPU hardware structures
+ */
+
+#define ARK_MPU_ID 0x00
+struct ark_mpu_id_t {
+ union {
+ char id[4];
+ uint32_t idnum;
+ };
+ union {
+ char ver[4];
+ uint32_t vernum;
+ };
+ uint32_t physId;
+ uint32_t mrrCode;
+};
+
+#define ARK_MPU_HW 0x010
+struct ark_mpu_hw_t {
+ uint16_t numQueues;
+ uint16_t reserved;
+ uint32_t hwDepth;
+ uint32_t objSize;
+ uint32_t objPerMRR;
+};
+
+#define ARK_MPU_CFG 0x040
+struct ark_mpu_cfg_t {
+ phys_addr_t ringBase; /* phys_addr_t is a uint64_t */
+ uint32_t ringSize;
+ uint32_t ringMask;
+ uint32_t minHostMove;
+ uint32_t minHWMove;
+ volatile uint32_t swProdIndex;
+ volatile uint32_t hwConsIndex;
+ volatile uint32_t command;
+};
+enum ARK_MPU_COMMAND {
+ MPU_CMD_Idle = 1, MPU_CMD_Run = 2, MPU_CMD_Stop = 4, MPU_CMD_Reset =
+ 8, MPU_CMD_ForceReset = 16, MPU_COMMAND_LIMIT = 0xFFFFFFFF
+};
+
+#define ARK_MPU_STATS 0x080
+struct ark_mpu_stats_t {
+ volatile uint64_t pciRequest;
+ volatile uint64_t qEmpty;
+ volatile uint64_t qQ1;
+ volatile uint64_t qQ2;
+ volatile uint64_t qQ3;
+ volatile uint64_t qQ4;
+ volatile uint64_t qFull;
+};
+
+#define ARK_MPU_DEBUG 0x0C0
+struct ark_mpu_debug_t {
+ volatile uint32_t state;
+ uint32_t reserved;
+ volatile uint32_t count;
+ volatile uint32_t take;
+ volatile uint32_t peek[4];
+};
+
+/* Consolidated structure */
+struct ark_mpu_t {
+ struct ark_mpu_id_t id;
+ uint8_t reserved0[(ARK_MPU_HW - ARK_MPU_ID)
+ - sizeof(struct ark_mpu_id_t)];
+ struct ark_mpu_hw_t hw;
+ uint8_t reserved1[(ARK_MPU_CFG - ARK_MPU_HW) -
+ sizeof(struct ark_mpu_hw_t)];
+ struct ark_mpu_cfg_t cfg;
+ uint8_t reserved2[(ARK_MPU_STATS - ARK_MPU_CFG) -
+ sizeof(struct ark_mpu_cfg_t)];
+ struct ark_mpu_stats_t stats;
+ uint8_t reserved3[(ARK_MPU_DEBUG - ARK_MPU_STATS) -
+ sizeof(struct ark_mpu_stats_t)];
+ struct ark_mpu_debug_t debug;
+};
+
+uint16_t ark_api_num_queues(struct ark_mpu_t *mpu);
+uint16_t ark_api_num_queues_per_port(struct ark_mpu_t *mpu,
+ uint16_t ark_ports);
+int ark_mpu_verify(struct ark_mpu_t *mpu, uint32_t objSize);
+void ark_mpu_stop(struct ark_mpu_t *mpu);
+void ark_mpu_start(struct ark_mpu_t *mpu);
+int ark_mpu_reset(struct ark_mpu_t *mpu);
+int ark_mpu_configure(struct ark_mpu_t *mpu, phys_addr_t ring,
+ uint32_t ringSize, int isTx);
+
+void ark_mpu_dump(struct ark_mpu_t *mpu, const char *msg, uint16_t idx);
+void ark_mpu_dump_setup(struct ark_mpu_t *mpu, uint16_t qid);
+void ark_mpu_reset_stats(struct ark_mpu_t *mpu);
+
+static inline void
+ark_mpu_set_producer(struct ark_mpu_t *mpu, uint32_t idx)
+{
+ mpu->cfg.swProdIndex = idx;
+}
+
+// #define ark_mpu_set_producer(MPU, IDX) {(MPU)->cfg.swProdIndex = (IDX);}
+
+#endif
new file mode 100644
@@ -0,0 +1,445 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <getopt.h>
+#include <sys/time.h>
+#include <locale.h>
+#include <unistd.h>
+
+#include "ark_pktchkr.h"
+#include "ark_debug.h"
+
+static int setArg(char *arg, char *val);
+static int ark_pmd_pktchkr_isGenForever(ArkPktChkr_t handle);
+
+#define ARK_MAX_STR_LEN 64
+union OptV {
+ int Int;
+ int Bool;
+ uint64_t Long;
+ char Str[ARK_MAX_STR_LEN];
+};
+
+enum OPType {
+ OTInt,
+ OTLong,
+ OTBool,
+ OTString
+};
+
+struct Options {
+ char opt[ARK_MAX_STR_LEN];
+ enum OPType t;
+ union OptV v;
+};
+
+static struct Options toptions[] = {
+ {{"configure"}, OTBool, {1} },
+ {{"port"}, OTInt, {0} },
+ {{"mac-dump"}, OTBool, {0} },
+ {{"dg-mode"}, OTBool, {1} },
+ {{"run"}, OTBool, {0} },
+ {{"stop"}, OTBool, {0} },
+ {{"dump"}, OTBool, {0} },
+ {{"enResync"}, OTBool, {0} },
+ {{"tuserErrVal"}, OTInt, {1} },
+ {{"genForever"}, OTBool, {0} },
+ {{"enSlavedStart"}, OTBool, {0} },
+ {{"varyLength"}, OTBool, {0} },
+ {{"incrPayload"}, OTInt, {0} },
+ {{"incrFirstByte"}, OTBool, {0} },
+ {{"insSeqNum"}, OTBool, {0} },
+ {{"insTimeStamp"}, OTBool, {1} },
+ {{"insUDPHdr"}, OTBool, {0} },
+ {{"numPkts"}, OTLong, .v.Long = 10000000000000L},
+ {{"payloadByte"}, OTInt, {0x55} },
+ {{"pktSpacing"}, OTInt, {60} },
+ {{"pktSizeMin"}, OTInt, {2005} },
+ {{"pktSizeMax"}, OTInt, {1514} },
+ {{"pktSizeIncr"}, OTInt, {1} },
+ {{"ethType"}, OTInt, {0x0800} },
+ {{"srcMACAddr"}, OTLong, .v.Long = 0xDC3CF6425060L},
+ {{"dstMACAddr"}, OTLong, .v.Long = 0x112233445566L},
+ {{"hdrDW0"}, OTInt, {0x0016e319} },
+ {{"hdrDW1"}, OTInt, {0x27150004} },
+ {{"hdrDW2"}, OTInt, {0x76967bda} },
+ {{"hdrDW3"}, OTInt, {0x08004500} },
+ {{"hdrDW4"}, OTInt, {0x005276ed} },
+ {{"hdrDW5"}, OTInt, {0x40004006} },
+ {{"hdrDW6"}, OTInt, {0x56cfc0a8} },
+ {{"startOffset"}, OTInt, {0} },
+ {{"dstIP"}, OTString, .v.Str = "169.254.10.240"},
+ {{"dstPort"}, OTInt, {65536} },
+ {{"srcPort"}, OTInt, {65536} },
+};
+
+ArkPktChkr_t
+ark_pmd_pktchkr_init(void *addr, int ord, int l2_mode)
+{
+ struct ArkPktChkrInst *inst =
+ rte_malloc("ArkPktChkrInst", sizeof(struct ArkPktChkrInst), 0);
+ inst->sregs = (struct ArkPktChkrStatRegs *) addr;
+ inst->cregs = (struct ArkPktChkrCtlRegs *) (((uint8_t *) addr) + 0x100);
+ inst->ordinal = ord;
+ inst->l2_mode = l2_mode;
+ return inst;
+}
+
+void
+ark_pmd_pktchkr_uninit(ArkPktChkr_t handle)
+{
+ rte_free(handle);
+}
+
+void
+ark_pmd_pktchkr_run(ArkPktChkr_t handle)
+{
+ struct ArkPktChkrInst *inst = (struct ArkPktChkrInst *) handle;
+
+ inst->sregs->pktStartStop = 0;
+ inst->sregs->pktStartStop = 0x1;
+}
+
+int
+ark_pmd_pktchkr_stopped(ArkPktChkr_t handle)
+{
+ struct ArkPktChkrInst *inst = (struct ArkPktChkrInst *) handle;
+ uint32_t r = inst->sregs->pktStartStop;
+
+ return (((r >> 16) & 1) == 1);
+}
+
+void
+ark_pmd_pktchkr_stop(ArkPktChkr_t handle)
+{
+ struct ArkPktChkrInst *inst = (struct ArkPktChkrInst *) handle;
+ int waitCycle = 10;
+
+ inst->sregs->pktStartStop = 0;
+ while (!ark_pmd_pktchkr_stopped(handle) && (waitCycle > 0)) {
+ usleep(1000);
+ waitCycle--;
+ ARK_DEBUG_TRACE("Waiting for pktchk %d to stop...\n", inst->ordinal);
+ }
+ ARK_DEBUG_TRACE("pktchk %d stopped.\n", inst->ordinal);
+}
+
+int
+ark_pmd_pktchkr_isRunning(ArkPktChkr_t handle)
+{
+ struct ArkPktChkrInst *inst = (struct ArkPktChkrInst *) handle;
+ uint32_t r = inst->sregs->pktStartStop;
+
+ return ((r & 1) == 1);
+}
+
+static void
+ark_pmd_pktchkr_setPktCtrl(ArkPktChkr_t handle, uint32_t genForever,
+ uint32_t varyLength, uint32_t incrPayload, uint32_t incrFirstByte,
+ uint32_t insSeqNum, uint32_t insUDPHdr, uint32_t enResync,
+ uint32_t tuserErrVal, uint32_t insTimeStamp)
+{
+ struct ArkPktChkrInst *inst = (struct ArkPktChkrInst *) handle;
+ uint32_t r = (tuserErrVal << 16) | (enResync << 0);
+
+ inst->sregs->pktCtrl = r;
+ if (!inst->l2_mode) {
+ insUDPHdr = 0;
+ }
+ r = (genForever << 24) | (varyLength << 16) |
+ (incrPayload << 12) | (incrFirstByte << 8) |
+ (insTimeStamp << 5) | (insSeqNum << 4) | insUDPHdr;
+ inst->cregs->pktCtrl = r;
+}
+
+static
+ int
+ark_pmd_pktchkr_isGenForever(ArkPktChkr_t handle)
+{
+ struct ArkPktChkrInst *inst = (struct ArkPktChkrInst *) handle;
+ uint32_t r = inst->cregs->pktCtrl;
+
+ return (((r >> 24) & 1) == 1);
+}
+
+int
+ark_pmd_pktchkr_waitDone(ArkPktChkr_t handle)
+{
+ struct ArkPktChkrInst *inst = (struct ArkPktChkrInst *) handle;
+
+ if (ark_pmd_pktchkr_isGenForever(handle)) {
+ ARK_DEBUG_TRACE
+ ("Error: waitDone will not terminate because genForever=1\n");
+ return -1;
+ }
+ int waitCycle = 10;
+
+ while (!ark_pmd_pktchkr_stopped(handle) && (waitCycle > 0)) {
+ usleep(1000);
+ waitCycle--;
+ ARK_DEBUG_TRACE
+ ("Waiting for packet checker %d's internal pktgen to finish sending...\n",
+ inst->ordinal);
+ ARK_DEBUG_TRACE("pktchk %d's pktgen done.\n", inst->ordinal);
+ }
+ return 0;
+}
+
+int
+ark_pmd_pktchkr_getPktsSent(ArkPktChkr_t handle)
+{
+ struct ArkPktChkrInst *inst = (struct ArkPktChkrInst *) handle;
+
+ return inst->cregs->pktsSent;
+}
+
+void
+ark_pmd_pktchkr_setPayloadByte(ArkPktChkr_t handle, uint32_t b)
+{
+ struct ArkPktChkrInst *inst = (struct ArkPktChkrInst *) handle;
+
+ inst->cregs->pktPayload = b;
+}
+
+void
+ark_pmd_pktchkr_setPktSizeMin(ArkPktChkr_t handle, uint32_t x)
+{
+ struct ArkPktChkrInst *inst = (struct ArkPktChkrInst *) handle;
+
+ inst->cregs->pktSizeMin = x;
+}
+
+void
+ark_pmd_pktchkr_setPktSizeMax(ArkPktChkr_t handle, uint32_t x)
+{
+ struct ArkPktChkrInst *inst = (struct ArkPktChkrInst *) handle;
+
+ inst->cregs->pktSizeMax = x;
+}
+
+void
+ark_pmd_pktchkr_setPktSizeIncr(ArkPktChkr_t handle, uint32_t x)
+{
+ struct ArkPktChkrInst *inst = (struct ArkPktChkrInst *) handle;
+
+ inst->cregs->pktSizeIncr = x;
+}
+
+void
+ark_pmd_pktchkr_setNumPkts(ArkPktChkr_t handle, uint32_t x)
+{
+ struct ArkPktChkrInst *inst = (struct ArkPktChkrInst *) handle;
+
+ inst->cregs->numPkts = x;
+}
+
+void
+ark_pmd_pktchkr_setSrcMACAddr(ArkPktChkr_t handle, uint64_t macAddr)
+{
+ struct ArkPktChkrInst *inst = (struct ArkPktChkrInst *) handle;
+
+ inst->cregs->srcMACAddrH = (macAddr >> 32) & 0xffff;
+ inst->cregs->srcMACAddrL = macAddr & 0xffffffff;
+}
+
+void
+ark_pmd_pktchkr_setDstMACAddr(ArkPktChkr_t handle, uint64_t macAddr)
+{
+ struct ArkPktChkrInst *inst = (struct ArkPktChkrInst *) handle;
+
+ inst->cregs->dstMACAddrH = (macAddr >> 32) & 0xffff;
+ inst->cregs->dstMACAddrL = macAddr & 0xffffffff;
+}
+
+void
+ark_pmd_pktchkr_setEthType(ArkPktChkr_t handle, uint32_t x)
+{
+ struct ArkPktChkrInst *inst = (struct ArkPktChkrInst *) handle;
+
+ inst->cregs->ethType = x;
+}
+
+void
+ark_pmd_pktchkr_setHdrDW(ArkPktChkr_t handle, uint32_t *hdr)
+{
+ uint32_t i;
+ struct ArkPktChkrInst *inst = (struct ArkPktChkrInst *) handle;
+
+ for (i = 0; i < 7; i++) {
+ inst->cregs->hdrDW[i] = hdr[i];
+ }
+}
+
+void
+ark_pmd_pktchkr_dump_stats(ArkPktChkr_t handle)
+{
+ struct ArkPktChkrInst *inst = (struct ArkPktChkrInst *) handle;
+
+ fprintf(stderr, "pktsRcvd = (%'u)\n", inst->sregs->pktsRcvd);
+ fprintf(stderr, "bytesRcvd = (%'" PRIu64 ")\n",
+ inst->sregs->bytesRcvd);
+ fprintf(stderr, "pktsOK = (%'u)\n", inst->sregs->pktsOK);
+ fprintf(stderr, "pktsMismatch = (%'u)\n", inst->sregs->pktsMismatch);
+ fprintf(stderr, "pktsErr = (%'u)\n", inst->sregs->pktsErr);
+ fprintf(stderr, "firstMismatch = (%'u)\n", inst->sregs->firstMismatch);
+ fprintf(stderr, "resyncEvents = (%'u)\n", inst->sregs->resyncEvents);
+ fprintf(stderr, "pktsMissing = (%'u)\n", inst->sregs->pktsMissing);
+ fprintf(stderr, "minLatency = (%'u)\n", inst->sregs->minLatency);
+ fprintf(stderr, "maxLatency = (%'u)\n", inst->sregs->maxLatency);
+}
+
+static struct Options *
+OPTIONS(const char *id)
+{
+ unsigned i;
+
+ for (i = 0; i < sizeof(toptions) / sizeof(struct Options); i++) {
+ if (strcmp(id, toptions[i].opt) == 0) {
+ return &toptions[i];
+ }
+ }
+ PMD_DRV_LOG(ERR,
+ "pktgen: Could not find requested option !!, option = %s\n", id);
+ return NULL;
+}
+
+static int
+setArg(char *arg, char *val)
+{
+ struct Options *o = OPTIONS(arg);
+
+ if (o) {
+ switch (o->t) {
+ case OTInt:
+ case OTBool:
+ o->v.Int = atoi(val);
+ break;
+ case OTLong:
+ o->v.Int = atoll(val);
+ break;
+ case OTString:
+ strncpy(o->v.Str, val, ARK_MAX_STR_LEN);
+ break;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/******
+ * Arg format = "opt0=v,optN=v ..."
+ ******/
+void
+ark_pmd_pktchkr_parse(char *args)
+{
+ char *argv, *v;
+ const char toks[] = "= \n\t\v\f\r";
+
+ argv = strtok(args, toks);
+ v = strtok(NULL, toks);
+ setArg(argv, v);
+ while (argv && v) {
+ argv = strtok(NULL, toks);
+ v = strtok(NULL, toks);
+ if (argv && v)
+ setArg(argv, v);
+ }
+}
+
+static int32_t parseIPV4string(char const *ipAddress);
+static int32_t
+parseIPV4string(char const *ipAddress)
+{
+ unsigned int ip[4];
+
+ if (4 != sscanf(ipAddress, "%u.%u.%u.%u", &ip[0], &ip[1], &ip[2], &ip[3]))
+ return 0;
+ return ip[3] + ip[2] * 0x100 + ip[1] * 0x10000ul + ip[0] * 0x1000000ul;
+}
+
+void
+ark_pmd_pktchkr_setup(ArkPktChkr_t handle)
+{
+ uint32_t hdr[7];
+ int32_t dstIp = parseIPV4string(OPTIONS("dstIP")->v.Str);
+
+ if (!OPTIONS("stop")->v.Bool && OPTIONS("configure")->v.Bool) {
+
+ ark_pmd_pktchkr_setPayloadByte(handle, OPTIONS("payloadByte")->v.Int);
+ ark_pmd_pktchkr_setSrcMACAddr(handle, OPTIONS("srcMACAddr")->v.Int);
+ ark_pmd_pktchkr_setDstMACAddr(handle, OPTIONS("dstMACAddr")->v.Long);
+
+ ark_pmd_pktchkr_setEthType(handle, OPTIONS("ethType")->v.Int);
+ if (OPTIONS("dg-mode")->v.Bool) {
+ hdr[0] = OPTIONS("hdrDW0")->v.Int;
+ hdr[1] = OPTIONS("hdrDW1")->v.Int;
+ hdr[2] = OPTIONS("hdrDW2")->v.Int;
+ hdr[3] = OPTIONS("hdrDW3")->v.Int;
+ hdr[4] = OPTIONS("hdrDW4")->v.Int;
+ hdr[5] = OPTIONS("hdrDW5")->v.Int;
+ hdr[6] = OPTIONS("hdrDW6")->v.Int;
+ } else {
+ hdr[0] = dstIp;
+ hdr[1] = OPTIONS("dstPort")->v.Int;
+ hdr[2] = OPTIONS("srcPort")->v.Int;
+ hdr[3] = 0;
+ hdr[4] = 0;
+ hdr[5] = 0;
+ hdr[6] = 0;
+ }
+ ark_pmd_pktchkr_setHdrDW(handle, hdr);
+ ark_pmd_pktchkr_setNumPkts(handle, OPTIONS("numPkts")->v.Int);
+ ark_pmd_pktchkr_setPktSizeMin(handle, OPTIONS("pktSizeMin")->v.Int);
+ ark_pmd_pktchkr_setPktSizeMax(handle, OPTIONS("pktSizeMax")->v.Int);
+ ark_pmd_pktchkr_setPktSizeIncr(handle, OPTIONS("pktSizeIncr")->v.Int);
+ ark_pmd_pktchkr_setPktCtrl(handle,
+ OPTIONS("genForever")->v.Bool,
+ OPTIONS("varyLength")->v.Bool,
+ OPTIONS("incrPayload")->v.Bool,
+ OPTIONS("incrFirstByte")->v.Bool,
+ OPTIONS("insSeqNum")->v.Int,
+ OPTIONS("insUDPHdr")->v.Bool,
+ OPTIONS("enResync")->v.Bool,
+ OPTIONS("tuserErrVal")->v.Int, OPTIONS("insTimeStamp")->v.Int);
+ }
+
+ if (OPTIONS("stop")->v.Bool)
+ ark_pmd_pktchkr_stop(handle);
+
+ if (OPTIONS("run")->v.Bool) {
+ ARK_DEBUG_TRACE("Starting packet checker on port %d\n",
+ OPTIONS("port")->v.Int);
+ ark_pmd_pktchkr_run(handle);
+ }
+
+}
new file mode 100644
@@ -0,0 +1,114 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_PKTCHKR_H_
+#define _ARK_PKTCHKR_H_
+
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <rte_eal.h>
+
+#include <rte_ethdev.h>
+#include <rte_cycles.h>
+#include <rte_lcore.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+
+#define ARK_PKTCHKR_BASE_ADR 0x90000
+
+typedef void *ArkPktChkr_t;
+
+struct ArkPktChkrStatRegs {
+ uint32_t r0;
+ uint32_t pktStartStop;
+ uint32_t pktCtrl;
+ uint32_t pktsRcvd;
+ uint64_t bytesRcvd;
+ uint32_t pktsOK;
+ uint32_t pktsMismatch;
+ uint32_t pktsErr;
+ uint32_t firstMismatch;
+ uint32_t resyncEvents;
+ uint32_t pktsMissing;
+ uint32_t minLatency;
+ uint32_t maxLatency;
+} __attribute__ ((packed));
+
+struct ArkPktChkrCtlRegs {
+ uint32_t pktCtrl;
+ uint32_t pktPayload;
+ uint32_t pktSizeMin;
+ uint32_t pktSizeMax;
+ uint32_t pktSizeIncr;
+ uint32_t numPkts;
+ uint32_t pktsSent;
+ uint32_t srcMACAddrL;
+ uint32_t srcMACAddrH;
+ uint32_t dstMACAddrL;
+ uint32_t dstMACAddrH;
+ uint32_t ethType;
+ uint32_t hdrDW[7];
+} __attribute__ ((packed));
+
+struct ArkPktChkrInst {
+ struct rte_eth_dev_info *dev_info;
+ volatile struct ArkPktChkrStatRegs *sregs;
+ volatile struct ArkPktChkrCtlRegs *cregs;
+ int l2_mode;
+ int ordinal;
+};
+
+/* packet checker functions */
+ArkPktChkr_t ark_pmd_pktchkr_init(void *addr, int ord, int l2_mode);
+void ark_pmd_pktchkr_uninit(ArkPktChkr_t handle);
+void ark_pmd_pktchkr_run(ArkPktChkr_t handle);
+int ark_pmd_pktchkr_stopped(ArkPktChkr_t handle);
+void ark_pmd_pktchkr_stop(ArkPktChkr_t handle);
+int ark_pmd_pktchkr_isRunning(ArkPktChkr_t handle);
+int ark_pmd_pktchkr_getPktsSent(ArkPktChkr_t handle);
+void ark_pmd_pktchkr_setPayloadByte(ArkPktChkr_t handle, uint32_t b);
+void ark_pmd_pktchkr_setPktSizeMin(ArkPktChkr_t handle, uint32_t x);
+void ark_pmd_pktchkr_setPktSizeMax(ArkPktChkr_t handle, uint32_t x);
+void ark_pmd_pktchkr_setPktSizeIncr(ArkPktChkr_t handle, uint32_t x);
+void ark_pmd_pktchkr_setNumPkts(ArkPktChkr_t handle, uint32_t x);
+void ark_pmd_pktchkr_setSrcMACAddr(ArkPktChkr_t handle, uint64_t macAddr);
+void ark_pmd_pktchkr_setDstMACAddr(ArkPktChkr_t handle, uint64_t macAddr);
+void ark_pmd_pktchkr_setEthType(ArkPktChkr_t handle, uint32_t x);
+void ark_pmd_pktchkr_setHdrDW(ArkPktChkr_t handle, uint32_t *hdr);
+void ark_pmd_pktchkr_parse(char *args);
+void ark_pmd_pktchkr_setup(ArkPktChkr_t handle);
+void ark_pmd_pktchkr_dump_stats(ArkPktChkr_t handle);
+int ark_pmd_pktchkr_waitDone(ArkPktChkr_t handle);
+
+#endif
new file mode 100644
@@ -0,0 +1,79 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <inttypes.h>
+
+#include "ark_global.h"
+
+ArkPktDir_t
+ark_pmd_pktdir_init(void *base)
+{
+ struct ArkPktDirInst *inst =
+ rte_malloc("ArkPktDirInst", sizeof(struct ArkPktDirInst), 0);
+ inst->regs = (struct ArkPktDirRegs *) base;
+ inst->regs->ctrl = 0x00110110; /* POR state */
+ return inst;
+}
+
+void
+ark_pmd_pktdir_uninit(ArkPktDir_t handle)
+{
+ struct ArkPktDirInst *inst = (struct ArkPktDirInst *) handle;
+
+ rte_free(inst);
+}
+
+void
+ark_pmd_pktdir_setup(ArkPktDir_t handle, uint32_t v)
+{
+ struct ArkPktDirInst *inst = (struct ArkPktDirInst *) handle;
+
+ inst->regs->ctrl = v;
+}
+
+uint32_t
+ark_pmd_pktdir_status(ArkPktDir_t handle)
+{
+ struct ArkPktDirInst *inst = (struct ArkPktDirInst *) handle;
+
+ return inst->regs->ctrl;
+}
+
+uint32_t
+ark_pmd_pktdir_stallCnt(ArkPktDir_t handle)
+{
+ struct ArkPktDirInst *inst = (struct ArkPktDirInst *) handle;
+
+ return inst->regs->stallCnt;
+}
new file mode 100644
@@ -0,0 +1,68 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_PKTDIR_H_
+#define _ARK_PKTDIR_H_
+
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <rte_eal.h>
+
+#include <rte_ethdev.h>
+#include <rte_cycles.h>
+#include <rte_lcore.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+
+#define ARK_PKTDIR_BASE_ADR 0xA0000
+
+typedef void *ArkPktDir_t;
+
+struct ArkPktDirRegs {
+ uint32_t ctrl;
+ uint32_t status;
+ uint32_t stallCnt;
+} __attribute__ ((packed));
+
+struct ArkPktDirInst {
+ volatile struct ArkPktDirRegs *regs;
+};
+
+ArkPktDir_t ark_pmd_pktdir_init(void *base);
+void ark_pmd_pktdir_uninit(ArkPktDir_t handle);
+void ark_pmd_pktdir_setup(ArkPktDir_t handle, uint32_t v);
+uint32_t ark_pmd_pktdir_stallCnt(ArkPktDir_t handle);
+uint32_t ark_pmd_pktdir_status(ArkPktDir_t handle);
+
+#endif
new file mode 100644
@@ -0,0 +1,477 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <getopt.h>
+#include <sys/time.h>
+#include <locale.h>
+#include <unistd.h>
+
+#include "ark_pktgen.h"
+#include "ark_debug.h"
+
+#define ARK_MAX_STR_LEN 64
+union OptV {
+ int Int;
+ int Bool;
+ uint64_t Long;
+ char Str[ARK_MAX_STR_LEN];
+};
+
+enum OPType {
+ OTInt,
+ OTLong,
+ OTBool,
+ OTString
+};
+
+struct Options {
+ char opt[ARK_MAX_STR_LEN];
+ enum OPType t;
+ union OptV v;
+};
+
+static struct Options toptions[] = {
+ {{"configure"}, OTBool, {1} },
+ {{"dg-mode"}, OTBool, {1} },
+ {{"run"}, OTBool, {0} },
+ {{"pause"}, OTBool, {0} },
+ {{"reset"}, OTBool, {0} },
+ {{"dump"}, OTBool, {0} },
+ {{"genForever"}, OTBool, {0} },
+ {{"enSlavedStart"}, OTBool, {0} },
+ {{"varyLength"}, OTBool, {0} },
+ {{"incrPayload"}, OTBool, {0} },
+ {{"incrFirstByte"}, OTBool, {0} },
+ {{"insSeqNum"}, OTBool, {0} },
+ {{"insTimeStamp"}, OTBool, {1} },
+ {{"insUDPHdr"}, OTBool, {0} },
+ {{"numPkts"}, OTLong, .v.Long = 100000000},
+ {{"payloadByte"}, OTInt, {0x55} },
+ {{"pktSpacing"}, OTInt, {130} },
+ {{"pktSizeMin"}, OTInt, {2006} },
+ {{"pktSizeMax"}, OTInt, {1514} },
+ {{"pktSizeIncr"}, OTInt, {1} },
+ {{"ethType"}, OTInt, {0x0800} },
+ {{"srcMACAddr"}, OTLong, .v.Long = 0xDC3CF6425060L},
+ {{"dstMACAddr"}, OTLong, .v.Long = 0x112233445566L},
+ {{"hdrDW0"}, OTInt, {0x0016e319} },
+ {{"hdrDW1"}, OTInt, {0x27150004} },
+ {{"hdrDW2"}, OTInt, {0x76967bda} },
+ {{"hdrDW3"}, OTInt, {0x08004500} },
+ {{"hdrDW4"}, OTInt, {0x005276ed} },
+ {{"hdrDW5"}, OTInt, {0x40004006} },
+ {{"hdrDW6"}, OTInt, {0x56cfc0a8} },
+ {{"startOffset"}, OTInt, {0} },
+ {{"bytesPerCycle"}, OTInt, {10} },
+ {{"shaping"}, OTBool, {0} },
+ {{"dstIP"}, OTString, .v.Str = "169.254.10.240"},
+ {{"dstPort"}, OTInt, {65536} },
+ {{"srcPort"}, OTInt, {65536} },
+};
+
+ArkPktGen_t
+ark_pmd_pktgen_init(void *adr, int ord, int l2_mode)
+{
+ struct ArkPktGenInst *inst =
+ rte_malloc("ArkPktGenInstPMD", sizeof(struct ArkPktGenInst), 0);
+ inst->regs = (struct ArkPktGenRegs *) adr;
+ inst->ordinal = ord;
+ inst->l2_mode = l2_mode;
+ return inst;
+}
+
+void
+ark_pmd_pktgen_uninit(ArkPktGen_t handle)
+{
+ rte_free(handle);
+}
+
+void
+ark_pmd_pktgen_run(ArkPktGen_t handle)
+{
+ struct ArkPktGenInst *inst = (struct ArkPktGenInst *) handle;
+
+ inst->regs->pktStartStop = 1;
+}
+
+uint32_t
+ark_pmd_pktgen_paused(ArkPktGen_t handle)
+{
+ struct ArkPktGenInst *inst = (struct ArkPktGenInst *) handle;
+ uint32_t r = inst->regs->pktStartStop;
+
+ return (((r >> 16) & 1) == 1);
+}
+
+void
+ark_pmd_pktgen_pause(ArkPktGen_t handle)
+{
+ struct ArkPktGenInst *inst = (struct ArkPktGenInst *) handle;
+ int cnt = 0;
+
+ inst->regs->pktStartStop = 0;
+
+ while (!ark_pmd_pktgen_paused(handle)) {
+ usleep(1000);
+ if (cnt++ > 100) {
+ PMD_DRV_LOG(ERR, "pktgen %d failed to pause.\n", inst->ordinal);
+ break;
+ }
+ }
+ ARK_DEBUG_TRACE("pktgen %d paused.\n", inst->ordinal);
+}
+
+void
+ark_pmd_pktgen_reset(ArkPktGen_t handle)
+{
+ struct ArkPktGenInst *inst = (struct ArkPktGenInst *) handle;
+
+ if (!ark_pmd_pktgen_isRunning(handle) && !ark_pmd_pktgen_paused(handle)) {
+ ARK_DEBUG_TRACE
+ ("pktgen %d is not running and is not paused. No need to reset.\n",
+ inst->ordinal);
+ return;
+ }
+
+ if (ark_pmd_pktgen_isRunning(handle) && !ark_pmd_pktgen_paused(handle)) {
+ ARK_DEBUG_TRACE("pktgen %d is not paused. Pausing first.\n",
+ inst->ordinal);
+ ark_pmd_pktgen_pause(handle);
+ }
+
+ ARK_DEBUG_TRACE("Resetting pktgen %d.\n", inst->ordinal);
+ inst->regs->pktStartStop = (1 << 8);
+}
+
+uint32_t
+ark_pmd_pktgen_txDone(ArkPktGen_t handle)
+{
+ struct ArkPktGenInst *inst = (struct ArkPktGenInst *) handle;
+ uint32_t r = inst->regs->pktStartStop;
+
+ return (((r >> 24) & 1) == 1);
+}
+
+uint32_t
+ark_pmd_pktgen_isRunning(ArkPktGen_t handle)
+{
+ struct ArkPktGenInst *inst = (struct ArkPktGenInst *) handle;
+ uint32_t r = inst->regs->pktStartStop;
+
+ return ((r & 1) == 1);
+}
+
+uint32_t
+ark_pmd_pktgen_isGenForever(ArkPktGen_t handle)
+{
+ struct ArkPktGenInst *inst = (struct ArkPktGenInst *) handle;
+ uint32_t r = inst->regs->pktCtrl;
+
+ return (((r >> 24) & 1) == 1);
+}
+
+void
+ark_pmd_pktgen_waitDone(ArkPktGen_t handle)
+{
+ struct ArkPktGenInst *inst = (struct ArkPktGenInst *) handle;
+
+ if (ark_pmd_pktgen_isGenForever(handle)) {
+ PMD_DRV_LOG(ERR, "waitDone will not terminate because genForever=1\n");
+ }
+ int waitCycle = 10;
+
+ while (!ark_pmd_pktgen_txDone(handle) && (waitCycle > 0)) {
+ usleep(1000);
+ waitCycle--;
+ ARK_DEBUG_TRACE("Waiting for pktgen %d to finish sending...\n",
+ inst->ordinal);
+ }
+ ARK_DEBUG_TRACE("pktgen %d done.\n", inst->ordinal);
+}
+
+uint32_t
+ark_pmd_pktgen_getPktsSent(ArkPktGen_t handle)
+{
+ struct ArkPktGenInst *inst = (struct ArkPktGenInst *) handle;
+
+ return inst->regs->pktsSent;
+}
+
+void
+ark_pmd_pktgen_setPayloadByte(ArkPktGen_t handle, uint32_t b)
+{
+ struct ArkPktGenInst *inst = (struct ArkPktGenInst *) handle;
+
+ inst->regs->pktPayload = b;
+}
+
+void
+ark_pmd_pktgen_setPktSpacing(ArkPktGen_t handle, uint32_t x)
+{
+ struct ArkPktGenInst *inst = (struct ArkPktGenInst *) handle;
+
+ inst->regs->pktSpacing = x;
+}
+
+void
+ark_pmd_pktgen_setPktSizeMin(ArkPktGen_t handle, uint32_t x)
+{
+ struct ArkPktGenInst *inst = (struct ArkPktGenInst *) handle;
+
+ inst->regs->pktSizeMin = x;
+}
+
+void
+ark_pmd_pktgen_setPktSizeMax(ArkPktGen_t handle, uint32_t x)
+{
+ struct ArkPktGenInst *inst = (struct ArkPktGenInst *) handle;
+
+ inst->regs->pktSizeMax = x;
+}
+
+void
+ark_pmd_pktgen_setPktSizeIncr(ArkPktGen_t handle, uint32_t x)
+{
+ struct ArkPktGenInst *inst = (struct ArkPktGenInst *) handle;
+
+ inst->regs->pktSizeIncr = x;
+}
+
+void
+ark_pmd_pktgen_setNumPkts(ArkPktGen_t handle, uint32_t x)
+{
+ struct ArkPktGenInst *inst = (struct ArkPktGenInst *) handle;
+
+ inst->regs->numPkts = x;
+}
+
+void
+ark_pmd_pktgen_setSrcMACAddr(ArkPktGen_t handle, uint64_t macAddr)
+{
+ struct ArkPktGenInst *inst = (struct ArkPktGenInst *) handle;
+
+ inst->regs->srcMACAddrH = (macAddr >> 32) & 0xffff;
+ inst->regs->srcMACAddrL = macAddr & 0xffffffff;
+}
+
+void
+ark_pmd_pktgen_setDstMACAddr(ArkPktGen_t handle, uint64_t macAddr)
+{
+ struct ArkPktGenInst *inst = (struct ArkPktGenInst *) handle;
+
+ inst->regs->dstMACAddrH = (macAddr >> 32) & 0xffff;
+ inst->regs->dstMACAddrL = macAddr & 0xffffffff;
+}
+
+void
+ark_pmd_pktgen_setEthType(ArkPktGen_t handle, uint32_t x)
+{
+ struct ArkPktGenInst *inst = (struct ArkPktGenInst *) handle;
+
+ inst->regs->ethType = x;
+}
+
+void
+ark_pmd_pktgen_setHdrDW(ArkPktGen_t handle, uint32_t *hdr)
+{
+ uint32_t i;
+ struct ArkPktGenInst *inst = (struct ArkPktGenInst *) handle;
+
+ for (i = 0; i < 7; i++)
+ inst->regs->hdrDW[i] = hdr[i];
+}
+
+void
+ark_pmd_pktgen_setStartOffset(ArkPktGen_t handle, uint32_t x)
+{
+ struct ArkPktGenInst *inst = (struct ArkPktGenInst *) handle;
+
+ inst->regs->startOffset = x;
+}
+
+static struct Options *
+OPTIONS(const char *id)
+{
+ unsigned i;
+
+ for (i = 0; i < sizeof(toptions) / sizeof(struct Options); i++) {
+ if (strcmp(id, toptions[i].opt) == 0)
+ return &toptions[i];
+ }
+
+ PMD_DRV_LOG
+ (ERR,
+ "pktgen: Could not find requested option !!, option = %s\n", id
+ );
+ return NULL;
+}
+
+static int pmd_setArg(char *arg, char *val);
+static int
+pmd_setArg(char *arg, char *val)
+{
+ struct Options *o = OPTIONS(arg);
+
+ if (o) {
+ switch (o->t) {
+ case OTInt:
+ case OTBool:
+ o->v.Int = atoi(val);
+ break;
+ case OTLong:
+ o->v.Int = atoll(val);
+ break;
+ case OTString:
+ strncpy(o->v.Str, val, ARK_MAX_STR_LEN);
+ break;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/******
+ * Arg format = "opt0=v,optN=v ..."
+ ******/
+void
+ark_pmd_pktgen_parse(char *args)
+{
+ char *argv, *v;
+ const char toks[] = " =\n\t\v\f\r";
+
+ argv = strtok(args, toks);
+ v = strtok(NULL, toks);
+ pmd_setArg(argv, v);
+ while (argv && v) {
+ argv = strtok(NULL, toks);
+ v = strtok(NULL, toks);
+ if (argv && v)
+ pmd_setArg(argv, v);
+ }
+}
+
+static int32_t parseIPV4string(char const *ipAddress);
+static int32_t
+parseIPV4string(char const *ipAddress)
+{
+ unsigned int ip[4];
+
+ if (4 != sscanf(ipAddress, "%u.%u.%u.%u", &ip[0], &ip[1], &ip[2], &ip[3]))
+ return 0;
+ return ip[3] + ip[2] * 0x100 + ip[1] * 0x10000ul + ip[0] * 0x1000000ul;
+}
+
+static void
+ark_pmd_pktgen_setPktCtrl(ArkPktGen_t handle, uint32_t genForever,
+ uint32_t enSlavedStart, uint32_t varyLength, uint32_t incrPayload,
+ uint32_t incrFirstByte, uint32_t insSeqNum, uint32_t insUDPHdr,
+ uint32_t insTimeStamp)
+{
+ uint32_t r;
+ struct ArkPktGenInst *inst = (struct ArkPktGenInst *) handle;
+
+ if (!inst->l2_mode)
+ insUDPHdr = 0;
+
+ r = (genForever << 24) | (enSlavedStart << 20) | (varyLength << 16) |
+ (incrPayload << 12) | (incrFirstByte << 8) |
+ (insTimeStamp << 5) | (insSeqNum << 4) | insUDPHdr;
+
+ inst->regs->bytesPerCycle = OPTIONS("bytesPerCycle")->v.Int;
+ if (OPTIONS("shaping")->v.Bool)
+ r = r | (1 << 28); /* enable shaping */
+
+
+ inst->regs->pktCtrl = r;
+}
+
+void
+ark_pmd_pktgen_setup(ArkPktGen_t handle)
+{
+ uint32_t hdr[7];
+ int32_t dstIp = parseIPV4string(OPTIONS("dstIP")->v.Str);
+
+ if (!OPTIONS("pause")->v.Bool && (!OPTIONS("reset")->v.Bool
+ && (OPTIONS("configure")->v.Bool))) {
+
+ ark_pmd_pktgen_setPayloadByte(handle, OPTIONS("payloadByte")->v.Int);
+ ark_pmd_pktgen_setSrcMACAddr(handle, OPTIONS("srcMACAddr")->v.Int);
+ ark_pmd_pktgen_setDstMACAddr(handle, OPTIONS("dstMACAddr")->v.Long);
+ ark_pmd_pktgen_setEthType(handle, OPTIONS("ethType")->v.Int);
+
+ if (OPTIONS("dg-mode")->v.Bool) {
+ hdr[0] = OPTIONS("hdrDW0")->v.Int;
+ hdr[1] = OPTIONS("hdrDW1")->v.Int;
+ hdr[2] = OPTIONS("hdrDW2")->v.Int;
+ hdr[3] = OPTIONS("hdrDW3")->v.Int;
+ hdr[4] = OPTIONS("hdrDW4")->v.Int;
+ hdr[5] = OPTIONS("hdrDW5")->v.Int;
+ hdr[6] = OPTIONS("hdrDW6")->v.Int;
+ } else {
+ hdr[0] = dstIp;
+ hdr[1] = OPTIONS("dstPort")->v.Int;
+ hdr[2] = OPTIONS("srcPort")->v.Int;
+ hdr[3] = 0;
+ hdr[4] = 0;
+ hdr[5] = 0;
+ hdr[6] = 0;
+ }
+ ark_pmd_pktgen_setHdrDW(handle, hdr);
+ ark_pmd_pktgen_setNumPkts(handle, OPTIONS("numPkts")->v.Int);
+ ark_pmd_pktgen_setPktSizeMin(handle, OPTIONS("pktSizeMin")->v.Int);
+ ark_pmd_pktgen_setPktSizeMax(handle, OPTIONS("pktSizeMax")->v.Int);
+ ark_pmd_pktgen_setPktSizeIncr(handle, OPTIONS("pktSizeIncr")->v.Int);
+ ark_pmd_pktgen_setPktSpacing(handle, OPTIONS("pktSpacing")->v.Int);
+ ark_pmd_pktgen_setStartOffset(handle, OPTIONS("startOffset")->v.Int);
+ ark_pmd_pktgen_setPktCtrl(handle,
+ OPTIONS("genForever")->v.Bool,
+ OPTIONS("enSlavedStart")->v.Bool,
+ OPTIONS("varyLength")->v.Bool,
+ OPTIONS("incrPayload")->v.Bool,
+ OPTIONS("incrFirstByte")->v.Bool,
+ OPTIONS("insSeqNum")->v.Int,
+ OPTIONS("insUDPHdr")->v.Bool, OPTIONS("insTimeStamp")->v.Int);
+ }
+
+ if (OPTIONS("pause")->v.Bool)
+ ark_pmd_pktgen_pause(handle);
+
+ if (OPTIONS("reset")->v.Bool)
+ ark_pmd_pktgen_reset(handle);
+
+ if (OPTIONS("run")->v.Bool) {
+ ARK_DEBUG_TRACE("Starting packet generator on port %d\n",
+ OPTIONS("port")->v.Int);
+ ark_pmd_pktgen_run(handle);
+ }
+}
new file mode 100644
@@ -0,0 +1,106 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_PKTGEN_H_
+#define _ARK_PKTGEN_H_
+
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <rte_eal.h>
+
+#include <rte_ethdev.h>
+#include <rte_cycles.h>
+#include <rte_lcore.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+
+#define ARK_PKTGEN_BASE_ADR 0x10000
+
+typedef void *ArkPktGen_t;
+
+struct ArkPktGenRegs {
+ uint32_t r0;
+ volatile uint32_t pktStartStop;
+ volatile uint32_t pktCtrl;
+ uint32_t pktPayload;
+ uint32_t pktSpacing;
+ uint32_t pktSizeMin;
+ uint32_t pktSizeMax;
+ uint32_t pktSizeIncr;
+ volatile uint32_t numPkts;
+ volatile uint32_t pktsSent;
+ uint32_t srcMACAddrL;
+ uint32_t srcMACAddrH;
+ uint32_t dstMACAddrL;
+ uint32_t dstMACAddrH;
+ uint32_t ethType;
+ uint32_t hdrDW[7];
+ uint32_t startOffset;
+ uint32_t bytesPerCycle;
+} __attribute__ ((packed));
+
+struct ArkPktGenInst {
+ struct rte_eth_dev_info *dev_info;
+ struct ArkPktGenRegs *regs;
+ int l2_mode;
+ int ordinal;
+};
+
+/* packet generator functions */
+ArkPktGen_t ark_pmd_pktgen_init(void *, int ord, int l2_mode);
+void ark_pmd_pktgen_uninit(ArkPktGen_t handle);
+void ark_pmd_pktgen_run(ArkPktGen_t handle);
+void ark_pmd_pktgen_pause(ArkPktGen_t handle);
+uint32_t ark_pmd_pktgen_paused(ArkPktGen_t handle);
+uint32_t ark_pmd_pktgen_isGenForever(ArkPktGen_t handle);
+uint32_t ark_pmd_pktgen_isRunning(ArkPktGen_t handle);
+uint32_t ark_pmd_pktgen_txDone(ArkPktGen_t handle);
+void ark_pmd_pktgen_reset(ArkPktGen_t handle);
+void ark_pmd_pktgen_waitDone(ArkPktGen_t handle);
+uint32_t ark_pmd_pktgen_getPktsSent(ArkPktGen_t handle);
+void ark_pmd_pktgen_setPayloadByte(ArkPktGen_t handle, uint32_t b);
+void ark_pmd_pktgen_setPktSpacing(ArkPktGen_t handle, uint32_t x);
+void ark_pmd_pktgen_setPktSizeMin(ArkPktGen_t handle, uint32_t x);
+void ark_pmd_pktgen_setPktSizeMax(ArkPktGen_t handle, uint32_t x);
+void ark_pmd_pktgen_setPktSizeIncr(ArkPktGen_t handle, uint32_t x);
+void ark_pmd_pktgen_setNumPkts(ArkPktGen_t handle, uint32_t x);
+void ark_pmd_pktgen_setSrcMACAddr(ArkPktGen_t handle, uint64_t macAddr);
+void ark_pmd_pktgen_setDstMACAddr(ArkPktGen_t handle, uint64_t macAddr);
+void ark_pmd_pktgen_setEthType(ArkPktGen_t handle, uint32_t x);
+void ark_pmd_pktgen_setHdrDW(ArkPktGen_t handle, uint32_t *hdr);
+void ark_pmd_pktgen_setStartOffset(ArkPktGen_t handle, uint32_t x);
+void ark_pmd_pktgen_parse(char *argv);
+void ark_pmd_pktgen_setup(ArkPktGen_t handle);
+
+#endif
new file mode 100644
@@ -0,0 +1,93 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+
+#include "ark_rqp.h"
+#include "ark_debug.h"
+
+/* ************************************************************************* */
+void
+ark_rqp_stats_reset(struct ark_rqpace_t *rqp)
+{
+ rqp->statsClear = 1;
+ /* POR 992 */
+ /* rqp->cpld_max = 992; */
+ /* POR 64 */
+ /* rqp->cplh_max = 64; */
+
+}
+
+/* ************************************************************************* */
+void
+ark_rqp_dump(struct ark_rqpace_t *rqp)
+{
+ if (rqp->errCountOther != 0)
+ fprintf
+ (stderr,
+ "ARKP RQP Errors noted: ctrl: %d cplh_hmax %d cpld_max %d"
+ FMT_SU32
+ FMT_SU32 "\n",
+ rqp->ctrl, rqp->cplh_max, rqp->cpld_max,
+ "Error Count", rqp->errCnt,
+ "Error General", rqp->errCountOther);
+
+ ARK_DEBUG_STATS
+ ("ARKP RQP Dump: ctrl: %d cplh_hmax %d cpld_max %d" FMT_SU32
+ FMT_SU32 FMT_SU32 FMT_SU32 FMT_SU32 FMT_SU32 FMT_SU32
+ FMT_SU32 FMT_SU32 FMT_SU32 FMT_SU32 FMT_SU32 FMT_SU32
+ FMT_SU32 FMT_SU32 FMT_SU32
+ FMT_SU32 FMT_SU32 FMT_SU32 FMT_SU32 FMT_SU32 "\n",
+ rqp->ctrl, rqp->cplh_max, rqp->cpld_max,
+ "Error Count", rqp->errCnt,
+ "Error General", rqp->errCountOther,
+ "stallPS", rqp->stallPS,
+ "stallPS Min", rqp->stallPSMin,
+ "stallPS Max", rqp->stallPSMax,
+ "reqPS", rqp->reqPS,
+ "reqPS Min", rqp->reqPSMin,
+ "reqPS Max", rqp->reqPSMax,
+ "reqDWPS", rqp->reqDWPS,
+ "reqDWPS Min", rqp->reqDWPSMin,
+ "reqDWPS Max", rqp->reqDWPSMax,
+ "cplPS", rqp->cplPS,
+ "cplPS Min", rqp->cplPSMin,
+ "cplPS Max", rqp->cplPSMax,
+ "cplDWPS", rqp->cplDWPS,
+ "cplDWPS Min", rqp->cplDWPSMin,
+ "cplDWPS Max", rqp->cplDWPSMax,
+ "cplh pending", rqp->cplh_pending,
+ "cpld pending", rqp->cpld_pending,
+ "cplh pending max", rqp->cplh_pending_max,
+ "cpld pending max", rqp->cpld_pending_max);
+}
new file mode 100644
@@ -0,0 +1,75 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_RQP_H_
+#define _ARK_RQP_H_
+
+#include <stdint.h>
+
+#include <rte_memory.h>
+
+/*
+ * RQ Pacing core hardware structure
+ */
+struct ark_rqpace_t {
+ volatile uint32_t ctrl;
+ volatile uint32_t statsClear;
+ volatile uint32_t cplh_max;
+ volatile uint32_t cpld_max;
+ volatile uint32_t errCnt;
+ volatile uint32_t stallPS;
+ volatile uint32_t stallPSMin;
+ volatile uint32_t stallPSMax;
+ volatile uint32_t reqPS;
+ volatile uint32_t reqPSMin;
+ volatile uint32_t reqPSMax;
+ volatile uint32_t reqDWPS;
+ volatile uint32_t reqDWPSMin;
+ volatile uint32_t reqDWPSMax;
+ volatile uint32_t cplPS;
+ volatile uint32_t cplPSMin;
+ volatile uint32_t cplPSMax;
+ volatile uint32_t cplDWPS;
+ volatile uint32_t cplDWPSMin;
+ volatile uint32_t cplDWPSMax;
+ volatile uint32_t cplh_pending;
+ volatile uint32_t cpld_pending;
+ volatile uint32_t cplh_pending_max;
+ volatile uint32_t cpld_pending_max;
+ volatile uint32_t errCountOther;
+};
+
+void ark_rqp_dump(struct ark_rqpace_t *rqp);
+void ark_rqp_stats_reset(struct ark_rqpace_t *rqp);
+
+#endif
new file mode 100644
@@ -0,0 +1,221 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+
+#include "ark_debug.h"
+#include "ark_udm.h"
+
+int
+ark_udm_verify(struct ark_udm_t *udm)
+{
+ if (sizeof(struct ark_udm_t) != ARK_UDM_EXPECT_SIZE) {
+ fprintf(stderr, " UDM structure looks incorrect %#x vs %#lx\n",
+ ARK_UDM_EXPECT_SIZE, sizeof(struct ark_udm_t));
+ return -1;
+ }
+
+ if (udm->setup.const0 != ARK_UDM_CONST) {
+ fprintf(stderr, " UDM module not found as expected 0x%08x\n",
+ udm->setup.const0);
+ return -1;
+ }
+ return 0;
+}
+
+int
+ark_udm_stop(struct ark_udm_t *udm, const int wait)
+{
+ int cnt = 0;
+
+ udm->cfg.command = 2;
+
+ while (wait && (udm->cfg.stopFlushed & 0x01) == 0) {
+ if (cnt++ > 1000)
+ return 1;
+
+ usleep(10);
+ }
+ return 0;
+}
+
+int
+ark_udm_reset(struct ark_udm_t *udm)
+{
+ int status;
+
+ status = ark_udm_stop(udm, 1);
+ if (status != 0) {
+ ARK_DEBUG_TRACE("ARKP: %s stop failed doing forced reset\n",
+ __func__);
+ udm->cfg.command = 4;
+ usleep(10);
+ udm->cfg.command = 3;
+ status = ark_udm_stop(udm, 0);
+ ARK_DEBUG_TRACE
+ ("ARKP: %s stop status %d post failure and forced reset\n",
+ __func__, status);
+ } else {
+ udm->cfg.command = 3;
+ }
+
+ return status;
+}
+
+void
+ark_udm_start(struct ark_udm_t *udm)
+{
+ udm->cfg.command = 1;
+}
+
+void
+ark_udm_stats_reset(struct ark_udm_t *udm)
+{
+ udm->pcibp.pci_clear = 1;
+ udm->tlp_ps.tlp_clear = 1;
+}
+
+void
+ark_udm_configure(struct ark_udm_t *udm, uint32_t headroom, uint32_t dataroom,
+ uint32_t write_interval_ns)
+{
+ /* headroom and data room are in DWs in the UDM */
+ udm->cfg.dataroom = dataroom / 4;
+ udm->cfg.headroom = headroom / 4;
+
+ /* 4 NS period ns */
+ udm->rt_cfg.writeInterval = write_interval_ns / 4;
+}
+
+void
+ark_udm_write_addr(struct ark_udm_t *udm, phys_addr_t addr)
+{
+ udm->rt_cfg.hwProdAddr = addr;
+}
+
+int
+ark_udm_is_flushed(struct ark_udm_t *udm)
+{
+ return (udm->cfg.stopFlushed & 0x01) != 0;
+}
+
+uint64_t
+ark_udm_dropped(struct ark_udm_t *udm)
+{
+ return udm->qstats.qPktDrop;
+}
+
+uint64_t
+ark_udm_bytes(struct ark_udm_t *udm)
+{
+ return udm->qstats.qByteCount;
+}
+
+uint64_t
+ark_udm_packets(struct ark_udm_t *udm)
+{
+ return udm->qstats.qFFPacketCount;
+}
+
+void
+ark_udm_dump_stats(struct ark_udm_t *udm, const char *msg)
+{
+ ARK_DEBUG_STATS("ARKP UDM Stats: %s" FMT_SU64 FMT_SU64 FMT_SU64 FMT_SU64
+ FMT_SU64 "\n", msg, "Pkts Received", udm->stats.rxPacketCount,
+ "Pkts Finalized", udm->stats.rxSentPackets, "Pkts Dropped",
+ udm->tlp.pkt_drop, "Bytes Count", udm->stats.rxByteCount, "MBuf Count",
+ udm->stats.rxMBufCount);
+}
+
+void
+ark_udm_dump_queue_stats(struct ark_udm_t *udm, const char *msg, uint16_t qid)
+{
+ ARK_DEBUG_STATS
+ ("ARKP UDM Queue %3u Stats: %s"
+ FMT_SU64 FMT_SU64
+ FMT_SU64 FMT_SU64
+ FMT_SU64 "\n",
+ qid, msg,
+ "Pkts Received", udm->qstats.qPacketCount,
+ "Pkts Finalized", udm->qstats.qFFPacketCount,
+ "Pkts Dropped", udm->qstats.qPktDrop,
+ "Bytes Count", udm->qstats.qByteCount,
+ "MBuf Count", udm->qstats.qMbufCount);
+}
+
+void
+ark_udm_dump(struct ark_udm_t *udm, const char *msg)
+{
+ ARK_DEBUG_TRACE("ARKP UDM Dump: %s Stopped: %d\n", msg,
+ udm->cfg.stopFlushed);
+}
+
+void
+ark_udm_dump_setup(struct ark_udm_t *udm, uint16_t qId)
+{
+ ARK_DEBUG_TRACE
+ ("UDM Setup Q: %u"
+ FMT_SPTR FMT_SU32 "\n",
+ qId,
+ "hwProdAddr", (void *) udm->rt_cfg.hwProdAddr,
+ "prodIdx", udm->rt_cfg.prodIdx);
+}
+
+void
+ark_udm_dump_perf(struct ark_udm_t *udm, const char *msg)
+{
+ struct ark_udm_pcibp_t *bp = &udm->pcibp;
+
+ ARK_DEBUG_STATS
+ ("ARKP UDM Performance %s"
+ FMT_SU32 FMT_SU32 FMT_SU32 FMT_SU32 FMT_SU32 FMT_SU32 "\n",
+ msg,
+ "PCI Empty", bp->pci_empty,
+ "PCI Q1", bp->pci_q1,
+ "PCI Q2", bp->pci_q2,
+ "PCI Q3", bp->pci_q3,
+ "PCI Q4", bp->pci_q4,
+ "PCI Full", bp->pci_full);
+}
+
+void
+ark_udm_queue_stats_reset(struct ark_udm_t *udm)
+{
+ udm->qstats.qByteCount = 1;
+}
+
+void
+ark_udm_queue_enable(struct ark_udm_t *udm, int enable)
+{
+ udm->qstats.qEnable = enable ? 1 : 0;
+}
new file mode 100644
@@ -0,0 +1,175 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_UDM_H_
+#define _ARK_UDM_H_
+
+#include <stdint.h>
+
+#include <rte_memory.h>
+
+/*
+ * UDM hardware structures
+ */
+
+#define ARK_RX_WRITE_TIME_NS 2500
+#define ARK_UDM_SETUP 0
+#define ARK_UDM_CONST 0xBACECACE
+struct ark_udm_setup_t {
+ uint32_t r0;
+ uint32_t r4;
+ volatile uint32_t cycleCount;
+ uint32_t const0;
+};
+
+#define ARK_UDM_CFG 0x010
+struct ark_udm_cfg_t {
+ volatile uint32_t stopFlushed; /* RO */
+ volatile uint32_t command;
+ uint32_t dataroom;
+ uint32_t headroom;
+};
+
+typedef enum {
+ ARK_UDM_START = 0x1,
+ ARK_UDM_STOP = 0x2,
+ ARK_UDM_RESET = 0x3
+} ArkUdmCommands;
+
+#define ARK_UDM_STATS 0x020
+struct ark_udm_stats_t {
+ volatile uint64_t rxByteCount;
+ volatile uint64_t rxPacketCount;
+ volatile uint64_t rxMBufCount;
+ volatile uint64_t rxSentPackets;
+};
+
+#define ARK_UDM_PQ 0x040
+struct ark_udm_queue_stats_t {
+ volatile uint64_t qByteCount;
+ volatile uint64_t qPacketCount; /* includes drops */
+ volatile uint64_t qMbufCount;
+ volatile uint64_t qFFPacketCount;
+ volatile uint64_t qPktDrop;
+ uint32_t qEnable;
+};
+
+#define ARK_UDM_TLP 0x0070
+struct ark_udm_tlp_t {
+ volatile uint64_t pkt_drop; /* global */
+ volatile uint32_t tlp_q1;
+ volatile uint32_t tlp_q2;
+ volatile uint32_t tlp_q3;
+ volatile uint32_t tlp_q4;
+ volatile uint32_t tlp_full;
+};
+
+#define ARK_UDM_PCIBP 0x00a0
+struct ark_udm_pcibp_t {
+ volatile uint32_t pci_clear;
+ volatile uint32_t pci_empty;
+ volatile uint32_t pci_q1;
+ volatile uint32_t pci_q2;
+ volatile uint32_t pci_q3;
+ volatile uint32_t pci_q4;
+ volatile uint32_t pci_full;
+};
+
+#define ARK_UDM_TLP_PS 0x00bc
+struct ark_udm_tlp_ps_t {
+ volatile uint32_t tlp_clear;
+ volatile uint32_t tlp_ps_min;
+ volatile uint32_t tlp_ps_max;
+ volatile uint32_t tlp_full_ps_min;
+ volatile uint32_t tlp_full_ps_max;
+ volatile uint32_t tlp_dw_ps_min;
+ volatile uint32_t tlp_dw_ps_max;
+ volatile uint32_t tlp_pldw_ps_min;
+ volatile uint32_t tlp_pldw_ps_max;
+};
+
+#define ARK_UDM_RT_CFG 0x00E0
+struct ark_udm_rt_cfg_t {
+ phys_addr_t hwProdAddr;
+ uint32_t writeInterval; /* 4ns cycles */
+ volatile uint32_t prodIdx; /* RO */
+};
+
+/* Consolidated structure */
+struct ark_udm_t {
+ struct ark_udm_setup_t setup;
+ struct ark_udm_cfg_t cfg;
+ struct ark_udm_stats_t stats;
+ struct ark_udm_queue_stats_t qstats;
+ uint8_t reserved1[(ARK_UDM_TLP - ARK_UDM_PQ) -
+ sizeof(struct ark_udm_queue_stats_t)];
+ struct ark_udm_tlp_t tlp;
+ uint8_t reserved2[(ARK_UDM_PCIBP - ARK_UDM_TLP) -
+ sizeof(struct ark_udm_tlp_t)];
+ struct ark_udm_pcibp_t pcibp;
+ struct ark_udm_tlp_ps_t tlp_ps;
+ struct ark_udm_rt_cfg_t rt_cfg;
+ int8_t reserved3[(0x100 - ARK_UDM_RT_CFG) -
+ sizeof(struct ark_udm_rt_cfg_t)];
+};
+
+#define ARK_UDM_EXPECT_SIZE (0x00fc + 4)
+#define ARK_UDM_QOFFSET ARK_UDM_EXPECT_SIZE
+
+int ark_udm_verify(struct ark_udm_t *udm);
+int ark_udm_stop(struct ark_udm_t *udm, int wait);
+void ark_udm_start(struct ark_udm_t *udm);
+int ark_udm_reset(struct ark_udm_t *udm);
+void ark_udm_configure(struct ark_udm_t *udm,
+ uint32_t headroom,
+ uint32_t dataroom,
+ uint32_t write_interval_ns);
+void ark_udm_write_addr(struct ark_udm_t *udm, phys_addr_t addr);
+void ark_udm_stats_reset(struct ark_udm_t *udm);
+void ark_udm_dump_stats(struct ark_udm_t *udm, const char *msg);
+void ark_udm_dump_queue_stats(struct ark_udm_t *udm, const char *msg,
+ uint16_t qid);
+void ark_udm_dump(struct ark_udm_t *udm, const char *msg);
+void ark_udm_dump_perf(struct ark_udm_t *udm, const char *msg);
+void ark_udm_dump_setup(struct ark_udm_t *udm, uint16_t qId);
+int ark_udm_is_flushed(struct ark_udm_t *udm);
+
+/* Per queue data */
+uint64_t ark_udm_dropped(struct ark_udm_t *udm);
+uint64_t ark_udm_bytes(struct ark_udm_t *udm);
+uint64_t ark_udm_packets(struct ark_udm_t *udm);
+
+void ark_udm_queue_stats_reset(struct ark_udm_t *udm);
+void ark_udm_queue_enable(struct ark_udm_t *udm, int enable);
+
+#endif
new file mode 100644
@@ -0,0 +1,4 @@
+DPDK_2.0 {
+ local: *;
+
+};
@@ -104,6 +104,7 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
# plugins (link only if static libraries)
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += -lrte_pmd_af_packet
+_LDLIBS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += -lrte_pmd_ark
_LDLIBS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += -lrte_pmd_bnx2x -lz
_LDLIBS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += -lrte_pmd_bnxt
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += -lrte_pmd_bond