@@ -882,6 +882,13 @@ F: drivers/net/vmxnet3/
F: doc/guides/nics/vmxnet3.rst
F: doc/guides/nics/features/vmxnet3.ini
+Wangxun txgbe
+M: Jiawen Wu <jiawenwu@trustnetic.com>
+M: Jian Wang <jianwang@trustnetic.com>
+F: drivers/net/txgbe/
+F: doc/guides/nics/txgbe.rst
+F: doc/guides/nics/features/txgbe.ini
+
Vhost-user
M: Maxime Coquelin <maxime.coquelin@redhat.com>
M: Chenbo Xia <chenbo.xia@intel.com>
@@ -389,6 +389,16 @@ CONFIG_RTE_LIBRTE_MLX5_VDPA_PMD=n
CONFIG_RTE_IBVERBS_LINK_DLOPEN=n
CONFIG_RTE_IBVERBS_LINK_STATIC=n
+#
+# Compile burst-oriented TXGBE PMD driver
+#
+CONFIG_RTE_LIBRTE_TXGBE_PMD=y
+CONFIG_RTE_LIBRTE_TXGBE_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_TXGBE_DEBUG_TX=n
+CONFIG_RTE_LIBRTE_TXGBE_DEBUG_TX_FREE=n
+CONFIG_RTE_LIBRTE_TXGBE_PF_DISABLE_STRIP_CRC=n
+CONFIG_RTE_LIBRTE_TXGBE_BYPASS=n
+
#
# Compile burst-oriented Netronome NFP PMD driver
#
new file mode 100644
@@ -0,0 +1,52 @@
+;
+; Supported features of the 'txgbe' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities = Y
+Link status = Y
+Link status event = Y
+Rx interrupt = Y
+Queue start/stop = Y
+MTU update = Y
+Jumbo frame = Y
+Scattered Rx = Y
+LRO = Y
+TSO = Y
+Promiscuous mode = Y
+Allmulticast mode = Y
+Unicast MAC filter = Y
+Multicast MAC filter = Y
+RSS hash = Y
+RSS key update = Y
+RSS reta update = Y
+DCB = Y
+VLAN filter = Y
+Flow control = Y
+Flow API = Y
+Rate limitation = Y
+Traffic mirroring = Y
+Inline crypto = Y
+CRC offload = P
+VLAN offload = P
+QinQ offload = P
+L3 checksum offload = P
+L4 checksum offload = P
+MACsec offload = P
+Inner L3 checksum = P
+Inner L4 checksum = P
+Packet type parsing = Y
+Timesync = Y
+Rx descriptor status = Y
+Tx descriptor status = Y
+Basic stats = Y
+Extended stats = Y
+Stats per queue = Y
+FW version = Y
+EEPROM dump = Y
+Module EEPROM dump = Y
+Multiprocess aware = Y
+BSD nic_uio = Y
+Linux UIO = Y
+Linux VFIO = Y
new file mode 100644
@@ -0,0 +1,67 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright(c) 2015-2020.
+
+TXGBE Poll Mode Driver
+======================
+
+The TXGBE PMD (librte_pmd_txgbe) provides poll mode driver support
+for Wangxun 10 Gigabit Ethernet NICs.
+
+Features
+--------
+
+- Multiple queues for TX and RX
+- Receiver Side Scaling (RSS)
+- MAC/VLAN filtering
+- Packet type information
+- Checksum offload
+- VLAN/QinQ stripping and inserting
+- TSO offload
+- Promiscuous mode
+- Multicast mode
+- Port hardware statistics
+- Jumbo frames
+- Link state information
+- Link flow control
+- Interrupt mode for RX
+- Scattered and gather for TX and RX
+- DCB
+- IEEE 1588
+- FW version
+- LRO
+- Generic flow API
+
+Prerequisites
+-------------
+
+- Learning about Wangxun 10 Gigabit Ethernet NICs using
+ `<https://www.net-swift.com/c/product.html>`_.
+
+- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
+
+Pre-Installation Configuration
+------------------------------
+
+Config File Options
+~~~~~~~~~~~~~~~~~~~
+
+The following options can be modified in the ``config`` file.
+
+- ``CONFIG_RTE_LIBRTE_TXGBE_PMD`` (default ``y``)
+
+ Toggle compilation of the ``librte_pmd_txgbe`` driver.
+
+- ``CONFIG_RTE_LIBRTE_TXGBE_DEBUG_*`` (default ``n``)
+
+ Toggle display of generic debugging messages.
+
+Driver compilation and testing
+------------------------------
+
+Refer to the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
+for details.
+
+Limitations or Known issues
+---------------------------
+Build with ICC is not supported yet.
+X86-32, Power8, ARMv7 and BSD are not supported yet.
@@ -59,6 +59,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += szedata2
DIRS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap
DIRS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += thunderx
+DIRS-$(CONFIG_RTE_LIBRTE_TXGBE_PMD) += txgbe
DIRS-$(CONFIG_RTE_LIBRTE_VDEV_NETVSC_PMD) += vdev_netvsc
DIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio
DIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3
@@ -50,6 +50,7 @@ drivers = ['af_packet',
'szedata2',
'tap',
'thunderx',
+ 'txgbe',
'vdev_netvsc',
'vhost',
'virtio',
new file mode 100644
@@ -0,0 +1,95 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2016 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_txgbe.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_txgbe_version.map
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+#
+# CFLAGS for icc
+#
+CFLAGS_BASE_DRIVER = -diag-disable 174 -diag-disable 593 -diag-disable 869
+CFLAGS_BASE_DRIVER += -diag-disable 981 -diag-disable 2259
+
+CFLAGS_txgbe_rxtx.o += -diag-disable 3656
+
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
+#
+# CFLAGS for clang
+#
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
+
+else
+#
+# CFLAGS for gcc
+#
+ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1)
+CFLAGS += -Wno-deprecated
+endif
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
+
+ifeq ($(shell test $(GCC_VERSION) -ge 50 && echo 1), 1)
+ifeq ($(shell test $(GCC_VERSION) -ge 70 && echo 1), 1)
+CFLAGS_BASE_DRIVER += -Wno-implicit-fallthrough
+endif
+endif
+
+endif
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
+LDLIBS += -lrte_bus_pci
+LDLIBS += -lpthread
+
+#
+# Add extra flags for base driver files (also known as shared code)
+# to disable warnings in them
+#
+BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))))
+$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+VPATH += $(SRCDIR)/base
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_TXGBE_PMD) += txgbe_phy.c
+SRCS-$(CONFIG_RTE_LIBRTE_TXGBE_PMD) += txgbe_eeprom.c
+SRCS-$(CONFIG_RTE_LIBRTE_TXGBE_PMD) += txgbe_hw.c
+SRCS-$(CONFIG_RTE_LIBRTE_TXGBE_PMD) += txgbe_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_TXGBE_PMD) += txgbe_hv_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_TXGBE_PMD) += txgbe_dcb.c
+SRCS-$(CONFIG_RTE_LIBRTE_TXGBE_PMD) += txgbe_dcb_hw.c
+SRCS-$(CONFIG_RTE_LIBRTE_TXGBE_PMD) += txgbe_mbx.c
+SRCS-$(CONFIG_RTE_LIBRTE_TXGBE_PMD) += txgbe_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_TXGBE_PMD) += txgbe_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_TXGBE_PMD) += txgbe_ethdev_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_TXGBE_PMD) += txgbe_fdir.c
+SRCS-$(CONFIG_RTE_LIBRTE_TXGBE_PMD) += txgbe_pf.c
+SRCS-$(CONFIG_RTE_LIBRTE_TXGBE_PMD) += txgbe_flow.c
+
+ifeq ($(CONFIG_RTE_LIBRTE_SECURITY),y)
+SRCS-$(CONFIG_RTE_LIBRTE_TXGBE_PMD) += txgbe_ipsec.c
+endif
+SRCS-$(CONFIG_RTE_LIBRTE_TXGBE_PMD) += rte_pmd_txgbe.c
+SRCS-$(CONFIG_RTE_LIBRTE_TXGBE_PMD) += txgbe_tm.c
+SRCS-$(CONFIG_RTE_LIBRTE_TXGBE_PMD) += txgbe_vf_representor.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_TXGBE_PMD) += txgbe_ptypes.c
+SRCS-$(CONFIG_RTE_LIBRTE_TXGBE_PMD) += txgbe_mng.c
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_TXGBE_PMD)-include := rte_pmd_txgbe.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
new file mode 100644
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2015-2020
+
+sources = [
+ 'txgbe_dcb_hw.c',
+ 'txgbe_dcb.c',
+ 'txgbe_eeprom.c',
+ 'txgbe_hv_vf.c',
+ 'txgbe_hw.c',
+ 'txgbe_mbx.c',
+ 'txgbe_mng.c',
+ 'txgbe_phy.c',
+ 'txgbe_vf.c',
+]
+
+error_cflags = ['-Wno-unused-value',
+ '-Wno-unused-parameter',
+ '-Wno-unused-but-set-variable']
+c_args = cflags
+foreach flag: error_cflags
+ if cc.has_argument(flag)
+ c_args += flag
+ endif
+endforeach
+
+base_lib = static_library('txgbe_base', sources,
+ dependencies: static_rte_eal,
+ c_args: c_args)
+base_objs = base_lib.extract_all_objects()
new file mode 100644
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef _TXGBE_H_
+#define _TXGBE_H_
+
+#include "txgbe_type.h"
+#include "txgbe_mng.h"
+#include "txgbe_mbx.h"
+#include "txgbe_eeprom.h"
+#include "txgbe_phy.h"
+#include "txgbe_hw.h"
+#include "txgbe_vf.h"
+#include "txgbe_dcb.h"
+
+#endif /* _TXGBE_H_ */
new file mode 100644
@@ -0,0 +1,700 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include "txgbe_type.h"
+#include "txgbe_hw.h"
+#include "txgbe_dcb.h"
+#include "txgbe_dcb_hw.h"
+
+/**
+ * txgbe_pfc_enable - Enable flow control
+ * @hw: pointer to hardware structure
+ * @tc_num: traffic class number
+ * Enable flow control according to the current settings.
+ */
+int
+txgbe_dcb_pfc_enable(struct txgbe_hw *hw, uint8_t tc_num)
+{
+ int ret_val = 0;
+ uint32_t mflcn_reg, fccfg_reg;
+ uint32_t pause_time;
+ uint32_t fcrtl, fcrth;
+ uint8_t i;
+ uint8_t nb_rx_en;
+
+ /* Validate the water mark configuration */
+ if (!hw->fc.pause_time) {
+ ret_val = TXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /* Low water mark of zero causes XOFF floods */
+ if (hw->fc.current_mode & txgbe_fc_tx_pause) {
+ /* High/Low water can not be 0 */
+ if (!hw->fc.high_water[tc_num] ||
+ !hw->fc.low_water[tc_num]) {
+ PMD_INIT_LOG(ERR, "Invalid water mark configuration");
+ ret_val = TXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
+ PMD_INIT_LOG(ERR, "Invalid water mark configuration");
+ ret_val = TXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+ }
+ /* Negotiate the fc mode to use */
+ txgbe_fc_autoneg(hw);
+
+ /* Disable any previous flow control settings */
+ mflcn_reg = rd32(hw, TXGBE_RXFCCFG);
+ mflcn_reg &= ~(TXGBE_RXFCCFG_FC | TXGBE_RXFCCFG_PFC);
+
+ fccfg_reg = rd32(hw, TXGBE_TXFCCFG);
+ fccfg_reg &= ~(TXGBE_TXFCCFG_FC | TXGBE_TXFCCFG_PFC);
+
+ switch (hw->fc.current_mode) {
+ case txgbe_fc_none:
+ /*
+ * If the count of enabled RX Priority Flow control > 1,
+ * and the TX pause can not be disabled
+ */
+ nb_rx_en = 0;
+ for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+ uint32_t reg = rd32(hw, TXGBE_FCWTRHI(i));
+ if (reg & TXGBE_FCWTRHI_XOFF)
+ nb_rx_en++;
+ }
+ if (nb_rx_en > 1)
+ fccfg_reg |= TXGBE_TXFCCFG_PFC;
+ break;
+ case txgbe_fc_rx_pause:
+ /*
+ * Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ mflcn_reg |= TXGBE_RXFCCFG_PFC;
+ /*
+ * If the count of enabled RX Priority Flow control > 1,
+ * and the TX pause can not be disabled
+ */
+ nb_rx_en = 0;
+ for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+ uint32_t reg = rd32(hw, TXGBE_FCWTRHI(i));
+ if (reg & TXGBE_FCWTRHI_XOFF)
+ nb_rx_en++;
+ }
+ if (nb_rx_en > 1)
+ fccfg_reg |= TXGBE_TXFCCFG_PFC;
+ break;
+ case txgbe_fc_tx_pause:
+ /*
+ * Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ fccfg_reg |= TXGBE_TXFCCFG_PFC;
+ break;
+ case txgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ mflcn_reg |= TXGBE_RXFCCFG_PFC;
+ fccfg_reg |= TXGBE_TXFCCFG_PFC;
+ break;
+ default:
+ PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
+ ret_val = TXGBE_ERR_CONFIG;
+ goto out;
+ }
+
+ /* Set 802.3x based flow control settings. */
+ wr32(hw, TXGBE_RXFCCFG, mflcn_reg);
+ wr32(hw, TXGBE_TXFCCFG, fccfg_reg);
+
+ /* Set up and enable Rx high/low water mark thresholds, enable XON. */
+ if ((hw->fc.current_mode & txgbe_fc_tx_pause) &&
+ hw->fc.high_water[tc_num]) {
+ fcrtl = TXGBE_FCWTRLO_TH(hw->fc.low_water[tc_num]) |
+ TXGBE_FCWTRLO_XON;
+ fcrth = TXGBE_FCWTRHI_TH(hw->fc.high_water[tc_num]) |
+ TXGBE_FCWTRHI_XOFF;
+ } else {
+ /*
+ * In order to prevent Tx hangs when the internal Tx
+ * switch is enabled we must set the high water mark
+ * to the maximum FCRTH value. This allows the Tx
+ * switch to function even under heavy Rx workloads.
+ */
+ fcrtl = 0;
+ fcrth = rd32(hw, TXGBE_PBRXSIZE(tc_num)) - 32;
+ }
+ wr32(hw, TXGBE_FCWTRLO(tc_num), fcrtl);
+ wr32(hw, TXGBE_FCWTRHI(tc_num), fcrth);
+
+ /* Configure pause time (2 TCs per register) */
+ pause_time = TXGBE_RXFCFSH_TIME(hw->fc.pause_time);
+ for (i = 0; i < (TXGBE_DCB_TC_MAX / 2); i++)
+ wr32(hw, TXGBE_FCXOFFTM(i), pause_time * 0x00010001);
+
+ /* Configure flow control refresh threshold value */
+ wr32(hw, TXGBE_RXFCRFSH, pause_time / 2);
+
+out:
+ return ret_val;
+}
+
+/**
+ * txgbe_dcb_calculate_tc_credits - This calculates the ieee traffic class
+ * credits from the configured bandwidth percentages. Credits
+ * are the smallest unit programmable into the underlying
+ * hardware. The IEEE 802.1Qaz specification do not use bandwidth
+ * groups so this is much simplified from the CEE case.
+ * @bw: bandwidth index by traffic class
+ * @refill: refill credits index by traffic class
+ * @max: max credits by traffic class
+ * @max_frame_size: maximum frame size
+ */
+s32 txgbe_dcb_calculate_tc_credits(u8 *bw, u16 *refill, u16 *max,
+ int max_frame_size)
+{
+ int min_percent = 100;
+ int min_credit, multiplier;
+ int i;
+
+ min_credit = ((max_frame_size / 2) + TXGBE_DCB_CREDIT_QUANTUM - 1) /
+ TXGBE_DCB_CREDIT_QUANTUM;
+
+ for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+ if (bw[i] < min_percent && bw[i])
+ min_percent = bw[i];
+ }
+
+ multiplier = (min_credit / min_percent) + 1;
+
+ /* Find out the hw credits for each TC */
+ for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+ int val = min(bw[i] * multiplier, TXGBE_DCB_MAX_CREDIT_REFILL);
+
+ if (val < min_credit)
+ val = min_credit;
+ refill[i] = (u16)val;
+
+ max[i] = bw[i] ? (bw[i] * TXGBE_DCB_MAX_CREDIT) / 100 : min_credit;
+ }
+
+ return 0;
+}
+
+/**
+ * txgbe_dcb_calculate_tc_credits_cee - Calculates traffic class credits
+ * @hw: pointer to hardware structure
+ * @dcb_config: Struct containing DCB settings
+ * @max_frame_size: Maximum frame size
+ * @direction: Configuring either Tx or Rx
+ *
+ * This function calculates the credits allocated to each traffic class.
+ * It should be called only after the rules are checked by
+ * txgbe_dcb_check_config_cee().
+ */
+s32 txgbe_dcb_calculate_tc_credits_cee(struct txgbe_hw *hw,
+ struct txgbe_dcb_config *dcb_config,
+ u32 max_frame_size, u8 direction)
+{
+ struct txgbe_dcb_tc_path *p;
+ u32 min_multiplier = 0;
+ u16 min_percent = 100;
+ s32 ret_val = 0;
+ /* Initialization values default for Tx settings */
+ u32 min_credit = 0;
+ u32 credit_refill = 0;
+ u32 credit_max = 0;
+ u16 link_percentage = 0;
+ u8 bw_percent = 0;
+ u8 i;
+
+ UNREFERENCED_PARAMETER(hw);
+
+ if (dcb_config == NULL) {
+ ret_val = TXGBE_ERR_CONFIG;
+ goto out;
+ }
+
+ min_credit = ((max_frame_size / 2) + TXGBE_DCB_CREDIT_QUANTUM - 1) /
+ TXGBE_DCB_CREDIT_QUANTUM;
+
+ /* Find smallest link percentage */
+ for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+ p = &dcb_config->tc_config[i].path[direction];
+ bw_percent = dcb_config->bw_percentage[p->bwg_id][direction];
+ link_percentage = p->bwg_percent;
+
+ link_percentage = (link_percentage * bw_percent) / 100;
+
+ if (link_percentage && link_percentage < min_percent)
+ min_percent = link_percentage;
+ }
+
+ /*
+ * The ratio between traffic classes will control the bandwidth
+ * percentages seen on the wire. To calculate this ratio we use
+ * a multiplier. It is required that the refill credits must be
+ * larger than the max frame size so here we find the smallest
+ * multiplier that will allow all bandwidth percentages to be
+ * greater than the max frame size.
+ */
+ min_multiplier = (min_credit / min_percent) + 1;
+
+ /* Find out the link percentage for each TC first */
+ for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+ p = &dcb_config->tc_config[i].path[direction];
+ bw_percent = dcb_config->bw_percentage[p->bwg_id][direction];
+
+ link_percentage = p->bwg_percent;
+ /* Must be careful of integer division for very small nums */
+ link_percentage = (link_percentage * bw_percent) / 100;
+ if (p->bwg_percent > 0 && link_percentage == 0)
+ link_percentage = 1;
+
+ /* Save link_percentage for reference */
+ p->link_percent = (u8)link_percentage;
+
+ /* Calculate credit refill ratio using multiplier */
+ credit_refill = min(link_percentage * min_multiplier,
+ (u32)TXGBE_DCB_MAX_CREDIT_REFILL);
+
+ /* Refill at least minimum credit */
+ if (credit_refill < min_credit)
+ credit_refill = min_credit;
+
+ p->data_credits_refill = (u16)credit_refill;
+
+ /* Calculate maximum credit for the TC */
+ credit_max = (link_percentage * TXGBE_DCB_MAX_CREDIT) / 100;
+
+ /*
+ * Adjustment based on rule checking, if the percentage
+ * of a TC is too small, the maximum credit may not be
+ * enough to send out a jumbo frame in data plane arbitration.
+ */
+ if (credit_max < min_credit)
+ credit_max = min_credit;
+
+ if (direction == TXGBE_DCB_TX_CONFIG) {
+ dcb_config->tc_config[i].desc_credits_max =
+ (u16)credit_max;
+ }
+
+ p->data_credits_max = (u16)credit_max;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * txgbe_dcb_unpack_pfc_cee - Unpack dcb_config PFC info
+ * @cfg: dcb configuration to unpack into hardware consumable fields
+ * @map: user priority to traffic class map
+ * @pfc_up: u8 to store user priority PFC bitmask
+ *
+ * This unpacks the dcb configuration PFC info which is stored per
+ * traffic class into a 8bit user priority bitmask that can be
+ * consumed by hardware routines. The priority to tc map must be
+ * updated before calling this routine to use current up-to maps.
+ */
+void txgbe_dcb_unpack_pfc_cee(struct txgbe_dcb_config *cfg, u8 *map, u8 *pfc_up)
+{
+ struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+ int up;
+
+ /*
+ * If the TC for this user priority has PFC enabled then set the
+ * matching bit in 'pfc_up' to reflect that PFC is enabled.
+ */
+ for (*pfc_up = 0, up = 0; up < TXGBE_DCB_UP_MAX; up++) {
+ if (tc_config[map[up]].pfc != txgbe_dcb_pfc_disabled)
+ *pfc_up |= 1 << up;
+ }
+}
+
+void txgbe_dcb_unpack_refill_cee(struct txgbe_dcb_config *cfg, int direction,
+ u16 *refill)
+{
+ struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+ int tc;
+
+ for (tc = 0; tc < TXGBE_DCB_TC_MAX; tc++)
+ refill[tc] = tc_config[tc].path[direction].data_credits_refill;
+}
+
+void txgbe_dcb_unpack_max_cee(struct txgbe_dcb_config *cfg, u16 *max)
+{
+ struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+ int tc;
+
+ for (tc = 0; tc < TXGBE_DCB_TC_MAX; tc++)
+ max[tc] = tc_config[tc].desc_credits_max;
+}
+
+void txgbe_dcb_unpack_bwgid_cee(struct txgbe_dcb_config *cfg, int direction,
+ u8 *bwgid)
+{
+ struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+ int tc;
+
+ for (tc = 0; tc < TXGBE_DCB_TC_MAX; tc++)
+ bwgid[tc] = tc_config[tc].path[direction].bwg_id;
+}
+
+void txgbe_dcb_unpack_tsa_cee(struct txgbe_dcb_config *cfg, int direction,
+ u8 *tsa)
+{
+ struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+ int tc;
+
+ for (tc = 0; tc < TXGBE_DCB_TC_MAX; tc++)
+ tsa[tc] = tc_config[tc].path[direction].tsa;
+}
+
+u8 txgbe_dcb_get_tc_from_up(struct txgbe_dcb_config *cfg, int direction, u8 up)
+{
+ struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+ u8 prio_mask = 1 << up;
+ u8 tc = cfg->num_tcs.pg_tcs;
+
+ /* If tc is 0 then DCB is likely not enabled or supported */
+ if (!tc)
+ goto out;
+
+ /*
+ * Test from maximum TC to 1 and report the first match we find. If
+ * we find no match we can assume that the TC is 0 since the TC must
+ * be set for all user priorities
+ */
+ for (tc--; tc; tc--) {
+ if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap)
+ break;
+ }
+out:
+ return tc;
+}
+
+void txgbe_dcb_unpack_map_cee(struct txgbe_dcb_config *cfg, int direction,
+ u8 *map)
+{
+ u8 up;
+
+ for (up = 0; up < TXGBE_DCB_UP_MAX; up++)
+ map[up] = txgbe_dcb_get_tc_from_up(cfg, direction, up);
+}
+
+/**
+ * txgbe_dcb_config - Struct containing DCB settings.
+ * @dcb_config: Pointer to DCB config structure
+ *
+ * This function checks DCB rules for DCB settings.
+ * The following rules are checked:
+ * 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%.
+ * 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth
+ * Group must total 100.
+ * 3. A Traffic Class should not be set to both Link Strict Priority
+ * and Group Strict Priority.
+ * 4. Link strict Bandwidth Groups can only have link strict traffic classes
+ * with zero bandwidth.
+ */
+s32 txgbe_dcb_check_config_cee(struct txgbe_dcb_config *dcb_config)
+{
+ struct txgbe_dcb_tc_path *p;
+ s32 ret_val = 0;
+ u8 i, j, bw = 0, bw_id;
+ u8 bw_sum[2][TXGBE_DCB_BWG_MAX];
+ bool link_strict[2][TXGBE_DCB_BWG_MAX];
+
+ memset(bw_sum, 0, sizeof(bw_sum));
+ memset(link_strict, 0, sizeof(link_strict));
+
+ /* First Tx, then Rx */
+ for (i = 0; i < 2; i++) {
+ /* Check each traffic class for rule violation */
+ for (j = 0; j < TXGBE_DCB_TC_MAX; j++) {
+ p = &dcb_config->tc_config[j].path[i];
+
+ bw = p->bwg_percent;
+ bw_id = p->bwg_id;
+
+ if (bw_id >= TXGBE_DCB_BWG_MAX) {
+ ret_val = TXGBE_ERR_CONFIG;
+ goto err_config;
+ }
+ if (p->tsa == txgbe_dcb_tsa_strict) {
+ link_strict[i][bw_id] = true;
+ /* Link strict should have zero bandwidth */
+ if (bw) {
+ ret_val = TXGBE_ERR_CONFIG;
+ goto err_config;
+ }
+ } else if (!bw) {
+ /*
+ * Traffic classes without link strict
+ * should have non-zero bandwidth.
+ */
+ ret_val = TXGBE_ERR_CONFIG;
+ goto err_config;
+ }
+ bw_sum[i][bw_id] += bw;
+ }
+
+ bw = 0;
+
+ /* Check each bandwidth group for rule violation */
+ for (j = 0; j < TXGBE_DCB_BWG_MAX; j++) {
+ bw += dcb_config->bw_percentage[j][i];
+ /*
+ * Sum of bandwidth percentages of all traffic classes
+ * within a Bandwidth Group must total 100 except for
+ * link strict group (zero bandwidth).
+ */
+ if (link_strict[i][j]) {
+ if (bw_sum[i][j]) {
+ /*
+ * Link strict group should have zero
+ * bandwidth.
+ */
+ ret_val = TXGBE_ERR_CONFIG;
+ goto err_config;
+ }
+ } else if (bw_sum[i][j] != TXGBE_DCB_BW_PERCENT &&
+ bw_sum[i][j] != 0) {
+ ret_val = TXGBE_ERR_CONFIG;
+ goto err_config;
+ }
+ }
+
+ if (bw != TXGBE_DCB_BW_PERCENT) {
+ ret_val = TXGBE_ERR_CONFIG;
+ goto err_config;
+ }
+ }
+
+err_config:
+
+ return ret_val;
+}
+
+/**
+ * txgbe_dcb_get_tc_stats - Returns status of each traffic class
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count: Number of elements in bwg_array.
+ *
+ * This function returns the status data for each of the Traffic Classes in use.
+ */
+s32 txgbe_dcb_get_tc_stats(struct txgbe_hw *hw, struct txgbe_hw_stats *stats,
+ u8 tc_count)
+{
+ s32 ret = TXGBE_NOT_IMPLEMENTED;
+ ret = txgbe_dcb_get_tc_stats_raptor(hw, stats, tc_count);
+ return ret;
+}
+
+/**
+ * txgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count: Number of elements in bwg_array.
+ *
+ * This function returns the CBFC status data for each of the Traffic Classes.
+ */
+s32 txgbe_dcb_get_pfc_stats(struct txgbe_hw *hw, struct txgbe_hw_stats *stats,
+ u8 tc_count)
+{
+ s32 ret = TXGBE_NOT_IMPLEMENTED;
+ ret = txgbe_dcb_get_pfc_stats_raptor(hw, stats, tc_count);
+ return ret;
+}
+
+/**
+ * txgbe_dcb_config_rx_arbiter_cee - Config Rx arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to txgbe_dcb_config structure
+ *
+ * Configure Rx Data Arbiter and credits for each traffic class.
+ */
+s32 txgbe_dcb_config_rx_arbiter_cee(struct txgbe_hw *hw,
+ struct txgbe_dcb_config *dcb_config)
+{
+ s32 ret = TXGBE_NOT_IMPLEMENTED;
+ u8 tsa[TXGBE_DCB_TC_MAX] = { 0 };
+ u8 bwgid[TXGBE_DCB_TC_MAX] = { 0 };
+ u8 map[TXGBE_DCB_UP_MAX] = { 0 };
+ u16 refill[TXGBE_DCB_TC_MAX] = { 0 };
+ u16 max[TXGBE_DCB_TC_MAX] = { 0 };
+
+ txgbe_dcb_unpack_refill_cee(dcb_config, TXGBE_DCB_TX_CONFIG, refill);
+ txgbe_dcb_unpack_max_cee(dcb_config, max);
+ txgbe_dcb_unpack_bwgid_cee(dcb_config, TXGBE_DCB_TX_CONFIG, bwgid);
+ txgbe_dcb_unpack_tsa_cee(dcb_config, TXGBE_DCB_TX_CONFIG, tsa);
+ txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_TX_CONFIG, map);
+
+ ret = txgbe_dcb_config_rx_arbiter_raptor(hw, refill, max, bwgid,
+ tsa, map);
+ return ret;
+}
+
+/**
+ * txgbe_dcb_config_tx_desc_arbiter_cee - Config Tx Desc arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to txgbe_dcb_config structure
+ *
+ * Configure Tx Descriptor Arbiter and credits for each traffic class.
+ */
+s32 txgbe_dcb_config_tx_desc_arbiter_cee(struct txgbe_hw *hw,
+ struct txgbe_dcb_config *dcb_config)
+{
+ s32 ret = TXGBE_NOT_IMPLEMENTED;
+ u8 tsa[TXGBE_DCB_TC_MAX];
+ u8 bwgid[TXGBE_DCB_TC_MAX];
+ u16 refill[TXGBE_DCB_TC_MAX];
+ u16 max[TXGBE_DCB_TC_MAX];
+
+ txgbe_dcb_unpack_refill_cee(dcb_config, TXGBE_DCB_TX_CONFIG, refill);
+ txgbe_dcb_unpack_max_cee(dcb_config, max);
+ txgbe_dcb_unpack_bwgid_cee(dcb_config, TXGBE_DCB_TX_CONFIG, bwgid);
+ txgbe_dcb_unpack_tsa_cee(dcb_config, TXGBE_DCB_TX_CONFIG, tsa);
+
+ ret = txgbe_dcb_config_tx_desc_arbiter_raptor(hw, refill, max,
+ bwgid, tsa);
+
+ return ret;
+}
+
+/**
+ * txgbe_dcb_config_tx_data_arbiter_cee - Config Tx data arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to txgbe_dcb_config structure
+ *
+ * Configure Tx Data Arbiter and credits for each traffic class.
+ */
+s32 txgbe_dcb_config_tx_data_arbiter_cee(struct txgbe_hw *hw,
+ struct txgbe_dcb_config *dcb_config)
+{
+ s32 ret = TXGBE_NOT_IMPLEMENTED;
+ u8 tsa[TXGBE_DCB_TC_MAX];
+ u8 bwgid[TXGBE_DCB_TC_MAX];
+ u8 map[TXGBE_DCB_UP_MAX] = { 0 };
+ u16 refill[TXGBE_DCB_TC_MAX];
+ u16 max[TXGBE_DCB_TC_MAX];
+
+ txgbe_dcb_unpack_refill_cee(dcb_config, TXGBE_DCB_TX_CONFIG, refill);
+ txgbe_dcb_unpack_max_cee(dcb_config, max);
+ txgbe_dcb_unpack_bwgid_cee(dcb_config, TXGBE_DCB_TX_CONFIG, bwgid);
+ txgbe_dcb_unpack_tsa_cee(dcb_config, TXGBE_DCB_TX_CONFIG, tsa);
+ txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_TX_CONFIG, map);
+
+ ret = txgbe_dcb_config_tx_data_arbiter_raptor(hw, refill, max,
+ bwgid, tsa,
+ map);
+
+ return ret;
+}
+
+/**
+ * txgbe_dcb_config_pfc_cee - Config priority flow control
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to txgbe_dcb_config structure
+ *
+ * Configure Priority Flow Control for each traffic class.
+ */
+s32 txgbe_dcb_config_pfc_cee(struct txgbe_hw *hw,
+ struct txgbe_dcb_config *dcb_config)
+{
+ s32 ret = TXGBE_NOT_IMPLEMENTED;
+ u8 pfc_en;
+ u8 map[TXGBE_DCB_UP_MAX] = { 0 };
+
+ txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_TX_CONFIG, map);
+ txgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
+
+ ret = txgbe_dcb_config_pfc_raptor(hw, pfc_en, map);
+
+ return ret;
+}
+
+/**
+ * txgbe_dcb_config_tc_stats - Config traffic class statistics
+ * @hw: pointer to hardware structure
+ *
+ * Configure queue statistics registers, all queues belonging to same traffic
+ * class uses a single set of queue statistics counters.
+ */
+s32 txgbe_dcb_config_tc_stats(struct txgbe_hw *hw)
+{
+ s32 ret = TXGBE_NOT_IMPLEMENTED;
+ ret = txgbe_dcb_config_tc_stats_raptor(hw, NULL);
+
+ return ret;
+}
+
+/**
+ * txgbe_dcb_hw_config_cee - Config and enable DCB
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to txgbe_dcb_config structure
+ *
+ * Configure dcb settings and enable dcb mode.
+ */
+s32 txgbe_dcb_hw_config_cee(struct txgbe_hw *hw,
+ struct txgbe_dcb_config *dcb_config)
+{
+ s32 ret = TXGBE_NOT_IMPLEMENTED;
+ u8 pfc_en;
+ u8 tsa[TXGBE_DCB_TC_MAX];
+ u8 bwgid[TXGBE_DCB_TC_MAX];
+ u8 map[TXGBE_DCB_UP_MAX] = { 0 };
+ u16 refill[TXGBE_DCB_TC_MAX];
+ u16 max[TXGBE_DCB_TC_MAX];
+
+ /* Unpack CEE standard containers */
+ txgbe_dcb_unpack_refill_cee(dcb_config, TXGBE_DCB_TX_CONFIG, refill);
+ txgbe_dcb_unpack_max_cee(dcb_config, max);
+ txgbe_dcb_unpack_bwgid_cee(dcb_config, TXGBE_DCB_TX_CONFIG, bwgid);
+ txgbe_dcb_unpack_tsa_cee(dcb_config, TXGBE_DCB_TX_CONFIG, tsa);
+ txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_TX_CONFIG, map);
+
+ txgbe_dcb_config_raptor(hw, dcb_config);
+ ret = txgbe_dcb_hw_config_raptor(hw, dcb_config->link_speed,
+ refill, max, bwgid,
+ tsa, map);
+
+ txgbe_dcb_config_tc_stats_raptor(hw, dcb_config);
+
+ if (!ret && dcb_config->pfc_mode_enable) {
+ txgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
+ ret = txgbe_dcb_config_pfc(hw, pfc_en, map);
+ }
+
+ return ret;
+}
+
+/* Helper routines to abstract HW specifics from DCB netlink ops */
+s32 txgbe_dcb_config_pfc(struct txgbe_hw *hw, u8 pfc_en, u8 *map)
+{
+ int ret = TXGBE_ERR_PARAM;
+ ret = txgbe_dcb_config_pfc_raptor(hw, pfc_en, map);
+ return ret;
+}
+
+s32 txgbe_dcb_hw_config(struct txgbe_hw *hw, u16 *refill, u16 *max,
+ u8 *bwg_id, u8 *tsa, u8 *map)
+{
+ txgbe_dcb_config_rx_arbiter_raptor(hw, refill, max, bwg_id,
+ tsa, map);
+ txgbe_dcb_config_tx_desc_arbiter_raptor(hw, refill, max, bwg_id,
+ tsa);
+ txgbe_dcb_config_tx_data_arbiter_raptor(hw, refill, max, bwg_id,
+ tsa, map);
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef _TXGBE_DCB_H_
+#define _TXGBE_DCB_H_
+
+#include "txgbe_type.h"
+
+/* DCB defines */
+/* DCB credit calculation defines */
+#define TXGBE_DCB_CREDIT_QUANTUM 64
+#define TXGBE_DCB_MAX_CREDIT_REFILL 200 /* 200 * 64B = 12800B */
+#define TXGBE_DCB_MAX_TSO_SIZE (32 * 1024) /* Max TSO pkt size in DCB*/
+#define TXGBE_DCB_MAX_CREDIT (2 * TXGBE_DCB_MAX_CREDIT_REFILL)
+
+/* 513 for 32KB TSO packet */
+#define TXGBE_DCB_MIN_TSO_CREDIT \
+ ((TXGBE_DCB_MAX_TSO_SIZE / TXGBE_DCB_CREDIT_QUANTUM) + 1)
+
+#define TXGBE_DCB_TX_CONFIG 0
+#define TXGBE_DCB_RX_CONFIG 1
+
+/* DCB capability defines */
+#define TXGBE_DCB_PG_SUPPORT 0x00000001
+#define TXGBE_DCB_PFC_SUPPORT 0x00000002
+#define TXGBE_DCB_BCN_SUPPORT 0x00000004
+#define TXGBE_DCB_UP2TC_SUPPORT 0x00000008
+#define TXGBE_DCB_GSP_SUPPORT 0x00000010
+
+struct txgbe_dcb_support {
+ u32 capabilities; /* DCB capabilities */
+
+ /* Each bit represents a number of TCs configurable in the hw.
+ * If 8 traffic classes can be configured, the value is 0x80. */
+ u8 traffic_classes;
+ u8 pfc_traffic_classes;
+};
+
+enum txgbe_dcb_tsa {
+ txgbe_dcb_tsa_ets = 0,
+ txgbe_dcb_tsa_group_strict_cee,
+ txgbe_dcb_tsa_strict
+};
+
+/* Traffic class bandwidth allocation per direction */
+struct txgbe_dcb_tc_path {
+ u8 bwg_id; /* Bandwidth Group (BWG) ID */
+ u8 bwg_percent; /* % of BWG's bandwidth */
+ u8 link_percent; /* % of link bandwidth */
+ u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */
+ u16 data_credits_refill; /* Credit refill amount in 64B granularity */
+ u16 data_credits_max; /* Max credits for a configured packet buffer
+ * in 64B granularity.*/
+ enum txgbe_dcb_tsa tsa; /* Link or Group Strict Priority */
+};
+
+enum txgbe_dcb_pfc {
+ txgbe_dcb_pfc_disabled = 0,
+ txgbe_dcb_pfc_enabled,
+ txgbe_dcb_pfc_enabled_txonly,
+ txgbe_dcb_pfc_enabled_rxonly
+};
+
+/* Traffic class configuration */
+struct txgbe_dcb_tc_config {
+ struct txgbe_dcb_tc_path path[2]; /* One each for Tx/Rx */
+ enum txgbe_dcb_pfc pfc; /* Class based flow control setting */
+
+ u16 desc_credits_max; /* For Tx Descriptor arbitration */
+ u8 tc; /* Traffic class (TC) */
+};
+
+enum txgbe_dcb_pba {
+ /* PBA[0-7] each use 64KB FIFO */
+ txgbe_dcb_pba_equal = PBA_STRATEGY_EQUAL,
+ /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */
+ txgbe_dcb_pba_80_48 = PBA_STRATEGY_WEIGHTED
+};
+
+struct txgbe_dcb_num_tcs {
+ u8 pg_tcs;
+ u8 pfc_tcs;
+};
+
+struct txgbe_dcb_config {
+ struct txgbe_dcb_tc_config tc_config[TXGBE_DCB_TC_MAX];
+ struct txgbe_dcb_support support;
+ struct txgbe_dcb_num_tcs num_tcs;
+ u8 bw_percentage[TXGBE_DCB_BWG_MAX][2]; /* One each for Tx/Rx */
+ bool pfc_mode_enable;
+ bool round_robin_enable;
+
+ enum txgbe_dcb_pba rx_pba_cfg;
+
+ u32 link_speed; /* For bandwidth allocation validation purpose */
+ bool vt_mode;
+};
+
+int txgbe_dcb_pfc_enable(struct txgbe_hw *hw, u8 tc_num);
+/* DCB rule checking */
+s32 txgbe_dcb_check_config_cee(struct txgbe_dcb_config *);
+
+/* DCB credits calculation */
+s32 txgbe_dcb_calculate_tc_credits(u8 *, u16 *, u16 *, int);
+s32 txgbe_dcb_calculate_tc_credits_cee(struct txgbe_hw *,
+ struct txgbe_dcb_config *, u32, u8);
+
+/* DCB PFC */
+s32 txgbe_dcb_config_pfc(struct txgbe_hw *, u8, u8 *);
+s32 txgbe_dcb_config_pfc_cee(struct txgbe_hw *, struct txgbe_dcb_config *);
+
+/* DCB stats */
+s32 txgbe_dcb_config_tc_stats(struct txgbe_hw *);
+s32 txgbe_dcb_get_tc_stats(struct txgbe_hw *, struct txgbe_hw_stats *, u8);
+s32 txgbe_dcb_get_pfc_stats(struct txgbe_hw *, struct txgbe_hw_stats *, u8);
+
+/* DCB config arbiters */
+s32 txgbe_dcb_config_tx_desc_arbiter_cee(struct txgbe_hw *,
+ struct txgbe_dcb_config *);
+s32 txgbe_dcb_config_tx_data_arbiter_cee(struct txgbe_hw *,
+ struct txgbe_dcb_config *);
+s32 txgbe_dcb_config_rx_arbiter_cee(struct txgbe_hw *,
+ struct txgbe_dcb_config *);
+
+/* DCB unpack routines */
+void txgbe_dcb_unpack_pfc_cee(struct txgbe_dcb_config *, u8 *, u8 *);
+void txgbe_dcb_unpack_refill_cee(struct txgbe_dcb_config *, int, u16 *);
+void txgbe_dcb_unpack_max_cee(struct txgbe_dcb_config *, u16 *);
+void txgbe_dcb_unpack_bwgid_cee(struct txgbe_dcb_config *, int, u8 *);
+void txgbe_dcb_unpack_tsa_cee(struct txgbe_dcb_config *, int, u8 *);
+void txgbe_dcb_unpack_map_cee(struct txgbe_dcb_config *, int, u8 *);
+u8 txgbe_dcb_get_tc_from_up(struct txgbe_dcb_config *, int, u8);
+
+/* DCB initialization */
+s32 txgbe_dcb_hw_config(struct txgbe_hw *, u16 *, u16 *, u8 *, u8 *, u8 *);
+s32 txgbe_dcb_hw_config_cee(struct txgbe_hw *, struct txgbe_dcb_config *);
+
+#include "txgbe_dcb_hw.h"
+
+#endif /* _TXGBE_DCB_H_ */
new file mode 100644
@@ -0,0 +1,430 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include "txgbe_type.h"
+
+#include "txgbe_dcb.h"
+
+/**
+ * txgbe_dcb_get_tc_stats_raptor - Returns status for each traffic class
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count: Number of elements in bwg_array.
+ *
+ * This function returns the status data for each of the Traffic Classes in use.
+ */
+s32 txgbe_dcb_get_tc_stats_raptor(struct txgbe_hw *hw,
+ struct txgbe_hw_stats *stats,
+ u8 tc_count)
+{
+ int tc;
+
+ UNREFERENCED_PARAMETER(hw);
+
+ DEBUGFUNC("dcb_get_tc_stats");
+
+ if (tc_count > TXGBE_DCB_TC_MAX)
+ return TXGBE_ERR_PARAM;
+
+ /* Statistics pertaining to each traffic class */
+ for (tc = 0; tc < tc_count; tc++) {
+ /* Transmitted Packets */
+ stats->up[tc].tx_up_packets += 0;
+ /* Transmitted Bytes (read low first to prevent missed carry) */
+ stats->up[tc].tx_up_bytes += 0;
+ /* Received Packets */
+ stats->up[tc].rx_up_packets += 0;
+ /* Received Bytes (read low first to prevent missed carry) */
+ stats->up[tc].rx_up_bytes += 0;
+ /* Received Dropped Packet */
+ stats->up[tc].rx_up_drop_packets += 0;
+ }
+
+ return 0;
+}
+
+/**
+ * txgbe_dcb_get_pfc_stats_raptor - Return CBFC status data
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count: Number of elements in bwg_array.
+ *
+ * This function returns the CBFC status data for each of the Traffic Classes.
+ */
+s32 txgbe_dcb_get_pfc_stats_raptor(struct txgbe_hw *hw,
+ struct txgbe_hw_stats *stats,
+ u8 tc_count)
+{
+ int tc;
+
+ DEBUGFUNC("dcb_get_pfc_stats");
+
+ if (tc_count > TXGBE_DCB_TC_MAX)
+ return TXGBE_ERR_PARAM;
+
+ for (tc = 0; tc < tc_count; tc++) {
+ /* Priority XOFF Transmitted */
+ stats->up[tc].tx_up_xoff_packets +=
+ rd32(hw, TXGBE_PBTXUPXOFF(tc));
+ /* Priority XOFF Received */
+ stats->up[tc].rx_up_xoff_packets +=
+ rd32(hw, TXGBE_PBRXUPXOFF(tc));
+ }
+
+ return 0;
+}
+
+/**
+ * txgbe_dcb_config_rx_arbiter_raptor - Config Rx Data arbiter
+ * @hw: pointer to hardware structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @tsa: transmission selection algorithm indexed by traffic class
+ * @map: priority to tc assignments indexed by priority
+ *
+ * Configure Rx Packet Arbiter and credits for each traffic class.
+ */
+s32 txgbe_dcb_config_rx_arbiter_raptor(struct txgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *tsa,
+ u8 *map)
+{
+ u32 reg = 0;
+ u32 credit_refill = 0;
+ u32 credit_max = 0;
+ u8 i = 0;
+
+ /*
+ * Disable the arbiter before changing parameters
+ * (always enable recycle mode; WSP)
+ */
+ reg = TXGBE_ARBRXCTL_RRM | TXGBE_ARBRXCTL_WSP |
+ TXGBE_ARBRXCTL_DIA;
+ wr32(hw, TXGBE_ARBRXCTL, reg);
+
+ /*
+ * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
+ * bits sets for the UPs that needs to be mappped to that TC.
+ * e.g if priorities 6 and 7 are to be mapped to a TC then the
+ * up_to_tc_bitmap value for that TC will be 11000000 in binary.
+ */
+ reg = 0;
+ for (i = 0; i < TXGBE_DCB_UP_MAX; i++)
+ reg |= (map[i] << (i * TXGBE_RPUP2TC_UP_SHIFT));
+
+ wr32(hw, TXGBE_RPUP2TC, reg);
+
+ /* Configure traffic class credits and priority */
+ for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+ credit_refill = refill[i];
+ credit_max = max[i];
+ reg = TXGBE_QARBRXCFG_CRQ(credit_refill) |
+ TXGBE_QARBRXCFG_MCL(credit_max) |
+ TXGBE_QARBRXCFG_BWG(bwg_id[i]);
+
+ if (tsa[i] == txgbe_dcb_tsa_strict)
+ reg |= TXGBE_QARBRXCFG_LSP;
+
+ wr32(hw, TXGBE_QARBRXCFG(i), reg);
+ }
+
+ /*
+ * Configure Rx packet plane (recycle mode; WSP) and
+ * enable arbiter
+ */
+ reg = TXGBE_ARBRXCTL_RRM | TXGBE_ARBRXCTL_WSP;
+ wr32(hw, TXGBE_ARBRXCTL, reg);
+
+ return 0;
+}
+
+/**
+ * txgbe_dcb_config_tx_desc_arbiter_raptor - Config Tx Desc. arbiter
+ * @hw: pointer to hardware structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @tsa: transmission selection algorithm indexed by traffic class
+ *
+ * Configure Tx Descriptor Arbiter and credits for each traffic class.
+ */
+s32 txgbe_dcb_config_tx_desc_arbiter_raptor(struct txgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *tsa)
+{
+ u32 reg, max_credits;
+ u8 i;
+
+ /* Clear the per-Tx queue credits; we use per-TC instead */
+ for (i = 0; i < 128; i++) {
+ wr32(hw, TXGBE_QARBTXCRED(i), 0);
+ }
+
+ /* Configure traffic class credits and priority */
+ for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+ max_credits = max[i];
+ reg = TXGBE_QARBTXCFG_MCL(max_credits) |
+ TXGBE_QARBTXCFG_CRQ(refill[i]) |
+ TXGBE_QARBTXCFG_BWG(bwg_id[i]);
+
+ if (tsa[i] == txgbe_dcb_tsa_group_strict_cee)
+ reg |= TXGBE_QARBTXCFG_GSP;
+
+ if (tsa[i] == txgbe_dcb_tsa_strict)
+ reg |= TXGBE_QARBTXCFG_LSP;
+
+ wr32(hw, TXGBE_QARBTXCFG(i), reg);
+ }
+
+ /*
+ * Configure Tx descriptor plane (recycle mode; WSP) and
+ * enable arbiter
+ */
+ reg = TXGBE_ARBTXCTL_WSP | TXGBE_ARBTXCTL_RRM;
+ wr32(hw, TXGBE_ARBTXCTL, reg);
+
+ return 0;
+}
+
+/**
+ * txgbe_dcb_config_tx_data_arbiter_raptor - Config Tx Data arbiter
+ * @hw: pointer to hardware structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @tsa: transmission selection algorithm indexed by traffic class
+ * @map: priority to tc assignments indexed by priority
+ *
+ * Configure Tx Packet Arbiter and credits for each traffic class.
+ */
+s32 txgbe_dcb_config_tx_data_arbiter_raptor(struct txgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *tsa,
+ u8 *map)
+{
+ u32 reg;
+ u8 i;
+
+ /*
+ * Disable the arbiter before changing parameters
+ * (always enable recycle mode; SP; arb delay)
+ */
+ reg = TXGBE_PARBTXCTL_SP |
+ TXGBE_PARBTXCTL_RECYC |
+ TXGBE_PARBTXCTL_DA;
+ wr32(hw, TXGBE_PARBTXCTL, reg);
+
+ /*
+ * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
+ * bits sets for the UPs that needs to be mappped to that TC.
+ * e.g if priorities 6 and 7 are to be mapped to a TC then the
+ * up_to_tc_bitmap value for that TC will be 11000000 in binary.
+ */
+ reg = 0;
+ for (i = 0; i < TXGBE_DCB_UP_MAX; i++)
+ reg |= TXGBE_DCBUP2TC_MAP(i, map[i]);
+
+ wr32(hw, TXGBE_PBRXUP2TC, reg);
+
+ /* Configure traffic class credits and priority */
+ for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+ reg = TXGBE_PARBTXCFG_CRQ(refill[i]) |
+ TXGBE_PARBTXCFG_MCL(max[i]) |
+ TXGBE_PARBTXCFG_BWG(bwg_id[i]);
+
+ if (tsa[i] == txgbe_dcb_tsa_group_strict_cee)
+ reg |= TXGBE_PARBTXCFG_GSP;
+
+ if (tsa[i] == txgbe_dcb_tsa_strict)
+ reg |= TXGBE_PARBTXCFG_LSP;
+
+ wr32(hw, TXGBE_PARBTXCFG(i), reg);
+ }
+
+ /*
+ * Configure Tx packet plane (recycle mode; SP; arb delay) and
+ * enable arbiter
+ */
+ reg = TXGBE_PARBTXCTL_SP | TXGBE_PARBTXCTL_RECYC;
+ wr32(hw, TXGBE_PARBTXCTL, reg);
+
+ return 0;
+}
+
+/**
+ * txgbe_dcb_config_pfc_raptor - Configure priority flow control
+ * @hw: pointer to hardware structure
+ * @pfc_en: enabled pfc bitmask
+ * @map: priority to tc assignments indexed by priority
+ *
+ * Configure Priority Flow Control (PFC) for each traffic class.
+ */
+s32 txgbe_dcb_config_pfc_raptor(struct txgbe_hw *hw, u8 pfc_en, u8 *map)
+{
+ u32 i, j, fcrtl, reg;
+ u8 max_tc = 0;
+
+ /* Enable Transmit Priority Flow Control */
+ wr32(hw, TXGBE_TXFCCFG, TXGBE_TXFCCFG_PFC);
+
+ /* Enable Receive Priority Flow Control */
+ wr32m(hw, TXGBE_RXFCCFG, TXGBE_RXFCCFG_PFC,
+ pfc_en ? TXGBE_RXFCCFG_PFC : 0);
+
+ for (i = 0; i < TXGBE_DCB_UP_MAX; i++) {
+ if (map[i] > max_tc)
+ max_tc = map[i];
+ }
+
+ /* Configure PFC Tx thresholds per TC */
+ for (i = 0; i <= max_tc; i++) {
+ int enabled = 0;
+
+ for (j = 0; j < TXGBE_DCB_UP_MAX; j++) {
+ if ((map[j] == i) && (pfc_en & (1 << j))) {
+ enabled = 1;
+ break;
+ }
+ }
+
+ if (enabled) {
+ reg = TXGBE_FCWTRHI_TH(hw->fc.high_water[i]) |
+ TXGBE_FCWTRHI_XOFF;
+ fcrtl = TXGBE_FCWTRLO_TH(hw->fc.low_water[i]) |
+ TXGBE_FCWTRLO_XON;
+ wr32(hw, TXGBE_FCWTRLO(i), fcrtl);
+ } else {
+ /*
+ * In order to prevent Tx hangs when the internal Tx
+ * switch is enabled we must set the high water mark
+ * to the Rx packet buffer size - 24KB. This allows
+ * the Tx switch to function even under heavy Rx
+ * workloads.
+ */
+ reg = rd32(hw, TXGBE_PBRXSIZE(i)) - 24576;
+ wr32(hw, TXGBE_FCWTRLO(i), 0);
+ }
+
+ wr32(hw, TXGBE_FCWTRHI(i), reg);
+ }
+
+ for (; i < TXGBE_DCB_TC_MAX; i++) {
+ wr32(hw, TXGBE_FCWTRLO(i), 0);
+ wr32(hw, TXGBE_FCWTRHI(i), 0);
+ }
+
+ /* Configure pause time (2 TCs per register) */
+ reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
+ for (i = 0; i < (TXGBE_DCB_TC_MAX / 2); i++)
+ wr32(hw, TXGBE_FCXOFFTM(i), reg);
+
+ /* Configure flow control refresh threshold value */
+ wr32(hw, TXGBE_RXFCRFSH, hw->fc.pause_time / 2);
+
+ return 0;
+}
+
+/**
+ * txgbe_dcb_config_tc_stats_raptor - Config traffic class statistics
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to txgbe_dcb_config structure
+ *
+ * Configure queue statistics registers, all queues belonging to same traffic
+ * class uses a single set of queue statistics counters.
+ */
+s32 txgbe_dcb_config_tc_stats_raptor(struct txgbe_hw *hw,
+ struct txgbe_dcb_config *dcb_config)
+{
+ u8 tc_count = 8;
+ bool vt_mode = false;
+
+ UNREFERENCED_PARAMETER(hw);
+
+ if (dcb_config != NULL) {
+ tc_count = dcb_config->num_tcs.pg_tcs;
+ vt_mode = dcb_config->vt_mode;
+ }
+
+ if (!((tc_count == 8 && vt_mode == false) || tc_count == 4))
+ return TXGBE_ERR_PARAM;
+
+ return 0;
+}
+
+/**
+ * txgbe_dcb_config_raptor - Configure general DCB parameters
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to txgbe_dcb_config structure
+ *
+ * Configure general DCB parameters.
+ */
+s32 txgbe_dcb_config_raptor(struct txgbe_hw *hw,
+ struct txgbe_dcb_config *dcb_config)
+{
+ u32 reg;
+ u32 q;
+
+ /* Disable the Tx desc arbiter so that MTQC can be changed */
+ reg = rd32(hw, TXGBE_ARBTXCTL);
+ reg |= TXGBE_ARBTXCTL_DIA;
+ wr32(hw, TXGBE_ARBTXCTL, reg);
+
+ reg = rd32(hw, TXGBE_PORTCTL);
+ reg &= ~TXGBE_PORTCTL_NUMTC_MASK;
+ reg &= ~TXGBE_PORTCTL_NUMVT_MASK;
+ reg |= TXGBE_PORTCTL_DCB;
+ if (dcb_config->num_tcs.pg_tcs == 8) {
+ /* Enable DCB for Rx with 8 TCs */
+ reg |= TXGBE_PORTCTL_NUMTC_8;
+ if (dcb_config->vt_mode)
+ reg |= TXGBE_PORTCTL_NUMVT_16;
+ }
+ if (dcb_config->num_tcs.pg_tcs == 4) {
+ /* We support both VT-on and VT-off with 4 TCs. */
+ reg |= TXGBE_PORTCTL_NUMTC_4;
+ if (dcb_config->vt_mode)
+ reg |= TXGBE_PORTCTL_NUMVT_32;
+ }
+ wr32(hw, TXGBE_PORTCTL, reg);
+
+ /* Disable drop for all queues */
+ for (q = 0; q < 128; q++) {
+ u32 val = 1 << (q % 32);
+ wr32m(hw, TXGBE_QPRXDROP(q / 32), val, val);
+ }
+
+ /* Enable the Tx desc arbiter */
+ reg = rd32(hw, TXGBE_ARBTXCTL);
+ reg &= ~TXGBE_ARBTXCTL_DIA;
+ wr32(hw, TXGBE_ARBTXCTL, reg);
+
+ return 0;
+}
+
+/**
+ * txgbe_dcb_hw_config_raptor - Configure and enable DCB
+ * @hw: pointer to hardware structure
+ * @link_speed: unused
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @tsa: transmission selection algorithm indexed by traffic class
+ * @map: priority to tc assignments indexed by priority
+ *
+ * Configure dcb settings and enable dcb mode.
+ */
+s32 txgbe_dcb_hw_config_raptor(struct txgbe_hw *hw, int link_speed,
+ u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa,
+ u8 *map)
+{
+ UNREFERENCED_PARAMETER(link_speed);
+
+ txgbe_dcb_config_rx_arbiter_raptor(hw, refill, max, bwg_id, tsa,
+ map);
+ txgbe_dcb_config_tx_desc_arbiter_raptor(hw, refill, max, bwg_id,
+ tsa);
+ txgbe_dcb_config_tx_data_arbiter_raptor(hw, refill, max, bwg_id,
+ tsa, map);
+
+ return 0;
+}
+
new file mode 100644
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef _TXGBE_DCB_HW_H_
+#define _TXGBE_DCB_HW_H_
+
+/* DCB PFC */
+s32 txgbe_dcb_config_pfc_raptor(struct txgbe_hw *, u8, u8 *);
+
+/* DCB stats */
+s32 txgbe_dcb_config_tc_stats_raptor(struct txgbe_hw *,
+ struct txgbe_dcb_config *);
+s32 txgbe_dcb_get_tc_stats_raptor(struct txgbe_hw *,
+ struct txgbe_hw_stats *, u8);
+s32 txgbe_dcb_get_pfc_stats_raptor(struct txgbe_hw *,
+ struct txgbe_hw_stats *, u8);
+
+/* DCB config arbiters */
+s32 txgbe_dcb_config_tx_desc_arbiter_raptor(struct txgbe_hw *, u16 *, u16 *,
+ u8 *, u8 *);
+s32 txgbe_dcb_config_tx_data_arbiter_raptor(struct txgbe_hw *, u16 *, u16 *,
+ u8 *, u8 *, u8 *);
+s32 txgbe_dcb_config_rx_arbiter_raptor(struct txgbe_hw *, u16 *, u16 *, u8 *,
+ u8 *, u8 *);
+
+/* DCB initialization */
+s32 txgbe_dcb_config_raptor(struct txgbe_hw *,
+ struct txgbe_dcb_config *);
+
+s32 txgbe_dcb_hw_config_raptor(struct txgbe_hw *, int, u16 *, u16 *, u8 *,
+ u8 *, u8 *);
+#endif /* _TXGBE_DCB_HW_H_ */
new file mode 100644
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef _TXGBE_DEVIDS_H_
+#define _TXGBE_DEVIDS_H_
+
+/*
+ * Vendor ID
+ */
+#ifndef PCI_VENDOR_ID_WANGXUN
+#define PCI_VENDOR_ID_WANGXUN 0x8088
+#endif
+
+/*
+ * Device IDs
+ */
+#define TXGBE_DEV_ID_RAPTOR_VF 0x1000
+#define TXGBE_DEV_ID_RAPTOR_SFP 0x1001 /* fiber */
+#define TXGBE_DEV_ID_RAPTOR_KR_KX_KX4 0x1002 /* backplane */
+#define TXGBE_DEV_ID_RAPTOR_XAUI 0x1003 /* copper */
+#define TXGBE_DEV_ID_RAPTOR_SGMII 0x1004 /* copper */
+#define TXGBE_DEV_ID_RAPTOR_QSFP 0x1011 /* fiber */
+#define TXGBE_DEV_ID_RAPTOR_VF_HV 0x2000
+#define TXGBE_DEV_ID_RAPTOR_T3_LOM 0x2001
+
+#define TXGBE_DEV_ID_WX1820_SFP 0x2001
+
+/*
+ * Subdevice IDs
+ */
+#define TXGBE_SUBDEV_ID_RAPTOR 0x0000
+#define TXGBE_SUBDEV_ID_MPW 0x0001
+
+#define TXGBE_ETHERTYPE_FLOW_CTRL 0x8808
+#define TXGBE_ETHERTYPE_IEEE_VLAN 0x8100 /* 802.1q protocol */
+
+#define TXGBE_VXLAN_PORT 4789
+
+#endif /* _TXGBE_DEVIDS_H_ */
new file mode 100644
@@ -0,0 +1,739 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef _TXGBE_TYPE_DUMMY_H_
+#define _TXGBE_TYPE_DUMMY_H_
+
+#ifdef TUP
+#elif defined(__GNUC__)
+#define TUP(x) x##_unused __attribute__((unused))
+#elif defined(__LCLINT__)
+#define TUP(x) x /*@unused@*/
+#else
+#define TUP(x) x
+#endif /*TUP*/
+#define TUP0 TUP(p0)
+#define TUP1 TUP(p1)
+#define TUP2 TUP(p2)
+#define TUP3 TUP(p3)
+#define TUP4 TUP(p4)
+#define TUP5 TUP(p5)
+#define TUP6 TUP(p6)
+#define TUP7 TUP(p7)
+#define TUP8 TUP(p8)
+#define TUP9 TUP(p9)
+
+/* struct txgbe_bus_operations */
+static inline s32 txgbe_bus_get_bus_info_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline void txgbe_bus_set_lan_id_dummy(struct txgbe_hw *TUP0)
+{
+ return;
+}
+/* struct txgbe_rom_operations */
+static inline s32 txgbe_rom_init_params_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_rom_read16_dummy(struct txgbe_hw *TUP0, u32 TUP1, u16 *TUP2)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_rom_readw_buffer_dummy(struct txgbe_hw *TUP0, u32 TUP1, u32 TUP2, void *TUP3)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_rom_readw_sw_dummy(struct txgbe_hw *TUP0, u32 TUP1, u16 *TUP2)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_rom_read32_dummy(struct txgbe_hw *TUP0, u32 TUP1, u32 *TUP2)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_rom_read_buffer_dummy(struct txgbe_hw *TUP0, u32 TUP1, u32 TUP2, void *TUP3)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_rom_write16_dummy(struct txgbe_hw *TUP0, u32 TUP1, u16 TUP2)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_rom_writew_buffer_dummy(struct txgbe_hw *TUP0, u32 TUP1, u32 TUP2, void *TUP3)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_rom_writew_sw_dummy(struct txgbe_hw *TUP0, u32 TUP1, u16 TUP2)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_rom_write32_dummy(struct txgbe_hw *TUP0, u32 TUP1, u32 TUP2)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_rom_write_buffer_dummy(struct txgbe_hw *TUP0, u32 TUP1, u32 TUP2, void *TUP3)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_rom_validate_checksum_dummy(struct txgbe_hw *TUP0, u16 *TUP1)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_rom_update_checksum_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_rom_calc_checksum_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+
+/* struct txgbe_mac_operations */
+static inline s32 txgbe_mac_init_hw_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_reset_hw_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_start_hw_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_stop_hw_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_clear_hw_cntrs_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline void txgbe_mac_enable_relaxed_ordering_dummy(struct txgbe_hw *TUP0)
+{
+ return;
+}
+static inline u64 txgbe_mac_get_supported_physical_layer_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_get_mac_addr_dummy(struct txgbe_hw *TUP0, u8 *TUP1)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_get_san_mac_addr_dummy(struct txgbe_hw *TUP0, u8 *TUP1)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_set_san_mac_addr_dummy(struct txgbe_hw *TUP0, u8 *TUP1)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_get_device_caps_dummy(struct txgbe_hw *TUP0, u16 *TUP1)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_get_wwn_prefix_dummy(struct txgbe_hw *TUP0, u16 *TUP1, u16 *TUP2)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_get_fcoe_boot_status_dummy(struct txgbe_hw *TUP0, u16 *TUP1)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_read_analog_reg8_dummy(struct txgbe_hw *TUP0, u32 TUP1, u8 *TUP2)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_write_analog_reg8_dummy(struct txgbe_hw *TUP0, u32 TUP1, u8 TUP2)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_setup_sfp_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_enable_rx_dma_dummy(struct txgbe_hw *TUP0, u32 TUP1)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_disable_sec_rx_path_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_enable_sec_rx_path_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_disable_sec_tx_path_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_enable_sec_tx_path_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_acquire_swfw_sync_dummy(struct txgbe_hw *TUP0, u32 TUP1)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline void txgbe_mac_release_swfw_sync_dummy(struct txgbe_hw *TUP0, u32 TUP1)
+{
+ return;
+}
+static inline void txgbe_mac_init_swfw_sync_dummy(struct txgbe_hw *TUP0)
+{
+ return;
+}
+static inline u64 txgbe_mac_autoc_read_dummy(struct txgbe_hw *TUP0)
+{
+ return 0;
+}
+static inline void txgbe_mac_autoc_write_dummy(struct txgbe_hw *TUP0, u64 TUP1)
+{
+ return;
+}
+static inline s32 txgbe_mac_prot_autoc_read_dummy(struct txgbe_hw *TUP0, bool *TUP1, u64 *TUP2)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_prot_autoc_write_dummy(struct txgbe_hw *TUP0, bool TUP1, u64 TUP2)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_negotiate_api_version_dummy(struct txgbe_hw *TUP0, int TUP1)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline void txgbe_mac_disable_tx_laser_dummy(struct txgbe_hw *TUP0)
+{
+ return;
+}
+static inline void txgbe_mac_enable_tx_laser_dummy(struct txgbe_hw *TUP0)
+{
+ return;
+}
+static inline void txgbe_mac_flap_tx_laser_dummy(struct txgbe_hw *TUP0)
+{
+ return;
+}
+static inline s32 txgbe_mac_setup_link_dummy(struct txgbe_hw *TUP0, u32 TUP1, bool TUP2)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_setup_mac_link_dummy(struct txgbe_hw *TUP0, u32 TUP1, bool TUP2)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_check_link_dummy(struct txgbe_hw *TUP0, u32 *TUP1, bool *TUP3, bool TUP4)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_get_link_capabilities_dummy(struct txgbe_hw *TUP0, u32 *TUP1, bool *TUP2)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline void txgbe_mac_set_rate_select_speed_dummy(struct txgbe_hw *TUP0, u32 TUP1)
+{
+ return;
+}
+static inline void txgbe_mac_setup_pba_dummy(struct txgbe_hw *TUP0, int TUP1, u32 TUP2, int TUP3)
+{
+ return;
+}
+static inline s32 txgbe_mac_led_on_dummy(struct txgbe_hw *TUP0, u32 TUP1)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_led_off_dummy(struct txgbe_hw *TUP0, u32 TUP1)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_blink_led_start_dummy(struct txgbe_hw *TUP0, u32 TUP1)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_blink_led_stop_dummy(struct txgbe_hw *TUP0, u32 TUP1)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_init_led_link_act_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_set_rar_dummy(struct txgbe_hw *TUP0, u32 TUP1, u8 *TUP2, u32 TUP3, u32 TUP4)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_set_uc_addr_dummy(struct txgbe_hw *TUP0, u32 TUP1, u8 *TUP2)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_clear_rar_dummy(struct txgbe_hw *TUP0, u32 TUP1)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_insert_mac_addr_dummy(struct txgbe_hw *TUP0, u8 *TUP1, u32 TUP2)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_set_vmdq_dummy(struct txgbe_hw *TUP0, u32 TUP1, u32 TUP2)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_set_vmdq_san_mac_dummy(struct txgbe_hw *TUP0, u32 TUP1)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_clear_vmdq_dummy(struct txgbe_hw *TUP0, u32 TUP1, u32 TUP2)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_init_rx_addrs_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_update_uc_addr_list_dummy(struct txgbe_hw *TUP0, u8 *TUP1, u32 TUP2, txgbe_mc_addr_itr TUP3)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_update_mc_addr_list_dummy(struct txgbe_hw *TUP0, u8 *TUP1, u32 TUP2, txgbe_mc_addr_itr TUP3, bool TUP4)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_enable_mc_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_disable_mc_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_clear_vfta_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_set_vfta_dummy(struct txgbe_hw *TUP0, u32 TUP1, u32 TUP2, bool TUP3, bool TUP4)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_set_vlvf_dummy(struct txgbe_hw *TUP0, u32 TUP1, u32 TUP2, bool TUP3, u32 *TUP4, u32 TUP5, bool TUP6)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_init_uta_tables_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline void txgbe_mac_set_mac_anti_spoofing_dummy(struct txgbe_hw *TUP0, bool TUP1, int TUP2)
+{
+ return;
+}
+static inline void txgbe_mac_set_vlan_anti_spoofing_dummy(struct txgbe_hw *TUP0, bool TUP1, int TUP2)
+{
+ return;
+}
+static inline s32 txgbe_mac_update_xcast_mode_dummy(struct txgbe_hw *TUP0, int TUP1)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_set_rlpml_dummy(struct txgbe_hw *TUP0, u16 TUP1)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_fc_enable_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_setup_fc_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline void txgbe_mac_fc_autoneg_dummy(struct txgbe_hw *TUP0)
+{
+ return;
+}
+static inline s32 txgbe_mac_set_fw_drv_ver_dummy(struct txgbe_hw *TUP0, u8 TUP1, u8 TUP2, u8 TUP3, u8 TUP4, u16 TUP5, const char *TUP6)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_get_thermal_sensor_data_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_init_thermal_sensor_thresh_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline void txgbe_mac_get_rtrup2tc_dummy(struct txgbe_hw *TUP0, u8 *TUP1)
+{
+ return;
+}
+static inline void txgbe_mac_disable_rx_dummy(struct txgbe_hw *TUP0)
+{
+ return;
+}
+static inline void txgbe_mac_enable_rx_dummy(struct txgbe_hw *TUP0)
+{
+ return;
+}
+static inline void txgbe_mac_set_source_address_pruning_dummy(struct txgbe_hw *TUP0, bool TUP1, unsigned int TUP2)
+{
+ return;
+}
+static inline void txgbe_mac_set_ethertype_anti_spoofing_dummy(struct txgbe_hw *TUP0, bool TUP1, int TUP2)
+{
+ return;
+}
+static inline s32 txgbe_mac_dmac_update_tcs_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_dmac_config_tcs_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_dmac_config_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_setup_eee_dummy(struct txgbe_hw *TUP0, bool TUP1)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_read_iosf_sb_reg_dummy(struct txgbe_hw *TUP0, u32 TUP1, u32 TUP2, u32 *TUP3)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mac_write_iosf_sb_reg_dummy(struct txgbe_hw *TUP0, u32 TUP1, u32 TUP2, u32 TUP3)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline void txgbe_mac_disable_mdd_dummy(struct txgbe_hw *TUP0)
+{
+ return;
+}
+static inline void txgbe_mac_enable_mdd_dummy(struct txgbe_hw *TUP0)
+{
+ return;
+}
+static inline void txgbe_mac_mdd_event_dummy(struct txgbe_hw *TUP0, u32 *TUP1)
+{
+ return;
+}
+static inline void txgbe_mac_restore_mdd_vf_dummy(struct txgbe_hw *TUP0, u32 TUP1)
+{
+ return;
+}
+static inline bool txgbe_mac_fw_recovery_mode_dummy(struct txgbe_hw *TUP0)
+{
+ return false;
+}
+
+/* struct txgbe_phy_operations */
+static inline u32 txgbe_phy_get_media_type_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_phy_identify_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_phy_identify_sfp_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_phy_init_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_phy_reset_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_phy_read_reg_dummy(struct txgbe_hw *TUP0, u32 TUP1, u32 TUP2, u16 *TUP3)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_phy_write_reg_dummy(struct txgbe_hw *TUP0, u32 TUP1, u32 TUP2, u16 TUP3)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_phy_read_reg_mdi_dummy(struct txgbe_hw *TUP0, u32 TUP1, u32 TUP2, u16 *TUP3)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_phy_write_reg_mdi_dummy(struct txgbe_hw *TUP0, u32 TUP1, u32 TUP2, u16 TUP3)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_phy_setup_link_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_phy_setup_internal_link_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_phy_setup_link_speed_dummy(struct txgbe_hw *TUP0, u32 TUP1, bool TUP2)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_phy_check_link_dummy(struct txgbe_hw *TUP0, u32 *TUP1, bool *TUP2)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_phy_get_firmware_version_dummy(struct txgbe_hw *TUP0, u32 *TUP1)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_phy_read_i2c_byte_dummy(struct txgbe_hw *TUP0, u8 TUP1, u8 TUP2, u8 *TUP3)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_phy_write_i2c_byte_dummy(struct txgbe_hw *TUP0, u8 TUP1, u8 TUP2, u8 TUP3)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_phy_read_i2c_sff8472_dummy(struct txgbe_hw *TUP0, u8 TUP1, u8 *TUP2)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_phy_read_i2c_eeprom_dummy(struct txgbe_hw *TUP0, u8 TUP1, u8 *TUP2)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_phy_write_i2c_eeprom_dummy(struct txgbe_hw *TUP0, u8 TUP1, u8 TUP2)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline void txgbe_phy_i2c_bus_clear_dummy(struct txgbe_hw *TUP0)
+{
+ return;
+}
+static inline s32 txgbe_phy_check_overtemp_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_phy_set_phy_power_dummy(struct txgbe_hw *TUP0, bool TUP1)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_phy_enter_lplu_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_phy_handle_lasi_dummy(struct txgbe_hw *TUP0)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_phy_read_i2c_byte_unlocked_dummy(struct txgbe_hw *TUP0, u8 TUP1, u8 TUP2, u8 *TUP3)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_phy_write_i2c_byte_unlocked_dummy(struct txgbe_hw *TUP0, u8 TUP1, u8 TUP2, u8 TUP3)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+
+/* struct txgbe_link_operations */
+static inline s32 txgbe_link_read_link_dummy(struct txgbe_hw *TUP0, u8 TUP1, u16 TUP2, u16 *TUP3)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_link_read_link_unlocked_dummy(struct txgbe_hw *TUP0, u8 TUP1, u16 TUP2, u16 *TUP3)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_link_write_link_dummy(struct txgbe_hw *TUP0, u8 TUP1, u16 TUP2, u16 TUP3)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_link_write_link_unlocked_dummy(struct txgbe_hw *TUP0, u8 TUP1, u16 TUP2, u16 TUP3)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+
+/* struct txgbe_mbx_operations */
+static inline void txgbe_mbx_init_params_dummy(struct txgbe_hw *TUP0)
+{
+ return;
+}
+static inline s32 txgbe_mbx_read_dummy(struct txgbe_hw *TUP0, u32 *TUP1, u16 TUP2, u16 TUP3)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mbx_write_dummy(struct txgbe_hw *TUP0, u32 *TUP1, u16 TUP2, u16 TUP3)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mbx_read_posted_dummy(struct txgbe_hw *TUP0, u32 *TUP1, u16 TUP2, u16 TUP3)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mbx_write_posted_dummy(struct txgbe_hw *TUP0, u32 *TUP1, u16 TUP2, u16 TUP4)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mbx_check_for_msg_dummy(struct txgbe_hw *TUP0, u16 TUP1)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mbx_check_for_ack_dummy(struct txgbe_hw *TUP0, u16 TUP1)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+static inline s32 txgbe_mbx_check_for_rst_dummy(struct txgbe_hw *TUP0, u16 TUP1)
+{
+ return TXGBE_ERR_OPS_DUMMY;
+}
+
+
+static inline void txgbe_init_ops_dummy(struct txgbe_hw *hw)
+{
+ hw->bus.get_bus_info = txgbe_bus_get_bus_info_dummy;
+ hw->bus.set_lan_id = txgbe_bus_set_lan_id_dummy;
+ hw->rom.init_params = txgbe_rom_init_params_dummy;
+ hw->rom.read16 = txgbe_rom_read16_dummy;
+ hw->rom.readw_buffer = txgbe_rom_readw_buffer_dummy;
+ hw->rom.readw_sw = txgbe_rom_readw_sw_dummy;
+ hw->rom.read32 = txgbe_rom_read32_dummy;
+ hw->rom.read_buffer = txgbe_rom_read_buffer_dummy;
+ hw->rom.write16 = txgbe_rom_write16_dummy;
+ hw->rom.writew_buffer = txgbe_rom_writew_buffer_dummy;
+ hw->rom.writew_sw = txgbe_rom_writew_sw_dummy;
+ hw->rom.write32 = txgbe_rom_write32_dummy;
+ hw->rom.write_buffer = txgbe_rom_write_buffer_dummy;
+ hw->rom.validate_checksum = txgbe_rom_validate_checksum_dummy;
+ hw->rom.update_checksum = txgbe_rom_update_checksum_dummy;
+ hw->rom.calc_checksum = txgbe_rom_calc_checksum_dummy;
+ hw->mac.init_hw = txgbe_mac_init_hw_dummy;
+ hw->mac.reset_hw = txgbe_mac_reset_hw_dummy;
+ hw->mac.start_hw = txgbe_mac_start_hw_dummy;
+ hw->mac.stop_hw = txgbe_mac_stop_hw_dummy;
+ hw->mac.clear_hw_cntrs = txgbe_mac_clear_hw_cntrs_dummy;
+ hw->mac.enable_relaxed_ordering = txgbe_mac_enable_relaxed_ordering_dummy;
+ hw->mac.get_supported_physical_layer = txgbe_mac_get_supported_physical_layer_dummy;
+ hw->mac.get_mac_addr = txgbe_mac_get_mac_addr_dummy;
+ hw->mac.get_san_mac_addr = txgbe_mac_get_san_mac_addr_dummy;
+ hw->mac.set_san_mac_addr = txgbe_mac_set_san_mac_addr_dummy;
+ hw->mac.get_device_caps = txgbe_mac_get_device_caps_dummy;
+ hw->mac.get_wwn_prefix = txgbe_mac_get_wwn_prefix_dummy;
+ hw->mac.get_fcoe_boot_status = txgbe_mac_get_fcoe_boot_status_dummy;
+ hw->mac.read_analog_reg8 = txgbe_mac_read_analog_reg8_dummy;
+ hw->mac.write_analog_reg8 = txgbe_mac_write_analog_reg8_dummy;
+ hw->mac.setup_sfp = txgbe_mac_setup_sfp_dummy;
+ hw->mac.enable_rx_dma = txgbe_mac_enable_rx_dma_dummy;
+ hw->mac.disable_sec_rx_path = txgbe_mac_disable_sec_rx_path_dummy;
+ hw->mac.enable_sec_rx_path = txgbe_mac_enable_sec_rx_path_dummy;
+ hw->mac.disable_sec_tx_path = txgbe_mac_disable_sec_tx_path_dummy;
+ hw->mac.enable_sec_tx_path = txgbe_mac_enable_sec_tx_path_dummy;
+ hw->mac.acquire_swfw_sync = txgbe_mac_acquire_swfw_sync_dummy;
+ hw->mac.release_swfw_sync = txgbe_mac_release_swfw_sync_dummy;
+ hw->mac.init_swfw_sync = txgbe_mac_init_swfw_sync_dummy;
+ hw->mac.autoc_read = txgbe_mac_autoc_read_dummy;
+ hw->mac.autoc_write = txgbe_mac_autoc_write_dummy;
+ hw->mac.prot_autoc_read = txgbe_mac_prot_autoc_read_dummy;
+ hw->mac.prot_autoc_write = txgbe_mac_prot_autoc_write_dummy;
+ hw->mac.negotiate_api_version = txgbe_mac_negotiate_api_version_dummy;
+ hw->mac.disable_tx_laser = txgbe_mac_disable_tx_laser_dummy;
+ hw->mac.enable_tx_laser = txgbe_mac_enable_tx_laser_dummy;
+ hw->mac.flap_tx_laser = txgbe_mac_flap_tx_laser_dummy;
+ hw->mac.setup_link = txgbe_mac_setup_link_dummy;
+ hw->mac.setup_mac_link = txgbe_mac_setup_mac_link_dummy;
+ hw->mac.check_link = txgbe_mac_check_link_dummy;
+ hw->mac.get_link_capabilities = txgbe_mac_get_link_capabilities_dummy;
+ hw->mac.set_rate_select_speed = txgbe_mac_set_rate_select_speed_dummy;
+ hw->mac.setup_pba = txgbe_mac_setup_pba_dummy;
+ hw->mac.led_on = txgbe_mac_led_on_dummy;
+ hw->mac.led_off = txgbe_mac_led_off_dummy;
+ hw->mac.blink_led_start = txgbe_mac_blink_led_start_dummy;
+ hw->mac.blink_led_stop = txgbe_mac_blink_led_stop_dummy;
+ hw->mac.init_led_link_act = txgbe_mac_init_led_link_act_dummy;
+ hw->mac.set_rar = txgbe_mac_set_rar_dummy;
+ hw->mac.set_uc_addr = txgbe_mac_set_uc_addr_dummy;
+ hw->mac.clear_rar = txgbe_mac_clear_rar_dummy;
+ hw->mac.insert_mac_addr = txgbe_mac_insert_mac_addr_dummy;
+ hw->mac.set_vmdq = txgbe_mac_set_vmdq_dummy;
+ hw->mac.set_vmdq_san_mac = txgbe_mac_set_vmdq_san_mac_dummy;
+ hw->mac.clear_vmdq = txgbe_mac_clear_vmdq_dummy;
+ hw->mac.init_rx_addrs = txgbe_mac_init_rx_addrs_dummy;
+ hw->mac.update_uc_addr_list = txgbe_mac_update_uc_addr_list_dummy;
+ hw->mac.update_mc_addr_list = txgbe_mac_update_mc_addr_list_dummy;
+ hw->mac.enable_mc = txgbe_mac_enable_mc_dummy;
+ hw->mac.disable_mc = txgbe_mac_disable_mc_dummy;
+ hw->mac.clear_vfta = txgbe_mac_clear_vfta_dummy;
+ hw->mac.set_vfta = txgbe_mac_set_vfta_dummy;
+ hw->mac.set_vlvf = txgbe_mac_set_vlvf_dummy;
+ hw->mac.init_uta_tables = txgbe_mac_init_uta_tables_dummy;
+ hw->mac.set_mac_anti_spoofing = txgbe_mac_set_mac_anti_spoofing_dummy;
+ hw->mac.set_vlan_anti_spoofing = txgbe_mac_set_vlan_anti_spoofing_dummy;
+ hw->mac.update_xcast_mode = txgbe_mac_update_xcast_mode_dummy;
+ hw->mac.set_rlpml = txgbe_mac_set_rlpml_dummy;
+ hw->mac.fc_enable = txgbe_mac_fc_enable_dummy;
+ hw->mac.setup_fc = txgbe_mac_setup_fc_dummy;
+ hw->mac.fc_autoneg = txgbe_mac_fc_autoneg_dummy;
+ hw->mac.set_fw_drv_ver = txgbe_mac_set_fw_drv_ver_dummy;
+ hw->mac.get_thermal_sensor_data = txgbe_mac_get_thermal_sensor_data_dummy;
+ hw->mac.init_thermal_sensor_thresh = txgbe_mac_init_thermal_sensor_thresh_dummy;
+ hw->mac.get_rtrup2tc = txgbe_mac_get_rtrup2tc_dummy;
+ hw->mac.disable_rx = txgbe_mac_disable_rx_dummy;
+ hw->mac.enable_rx = txgbe_mac_enable_rx_dummy;
+ hw->mac.set_source_address_pruning = txgbe_mac_set_source_address_pruning_dummy;
+ hw->mac.set_ethertype_anti_spoofing = txgbe_mac_set_ethertype_anti_spoofing_dummy;
+ hw->mac.dmac_update_tcs = txgbe_mac_dmac_update_tcs_dummy;
+ hw->mac.dmac_config_tcs = txgbe_mac_dmac_config_tcs_dummy;
+ hw->mac.dmac_config = txgbe_mac_dmac_config_dummy;
+ hw->mac.setup_eee = txgbe_mac_setup_eee_dummy;
+ hw->mac.read_iosf_sb_reg = txgbe_mac_read_iosf_sb_reg_dummy;
+ hw->mac.write_iosf_sb_reg = txgbe_mac_write_iosf_sb_reg_dummy;
+ hw->mac.disable_mdd = txgbe_mac_disable_mdd_dummy;
+ hw->mac.enable_mdd = txgbe_mac_enable_mdd_dummy;
+ hw->mac.mdd_event = txgbe_mac_mdd_event_dummy;
+ hw->mac.restore_mdd_vf = txgbe_mac_restore_mdd_vf_dummy;
+ hw->mac.fw_recovery_mode = txgbe_mac_fw_recovery_mode_dummy;
+ hw->phy.get_media_type = txgbe_phy_get_media_type_dummy;
+ hw->phy.identify = txgbe_phy_identify_dummy;
+ hw->phy.identify_sfp = txgbe_phy_identify_sfp_dummy;
+ hw->phy.init = txgbe_phy_init_dummy;
+ hw->phy.reset = txgbe_phy_reset_dummy;
+ hw->phy.read_reg = txgbe_phy_read_reg_dummy;
+ hw->phy.write_reg = txgbe_phy_write_reg_dummy;
+ hw->phy.read_reg_mdi = txgbe_phy_read_reg_mdi_dummy;
+ hw->phy.write_reg_mdi = txgbe_phy_write_reg_mdi_dummy;
+ hw->phy.setup_link = txgbe_phy_setup_link_dummy;
+ hw->phy.setup_internal_link = txgbe_phy_setup_internal_link_dummy;
+ hw->phy.setup_link_speed = txgbe_phy_setup_link_speed_dummy;
+ hw->phy.check_link = txgbe_phy_check_link_dummy;
+ hw->phy.get_firmware_version = txgbe_phy_get_firmware_version_dummy;
+ hw->phy.read_i2c_byte = txgbe_phy_read_i2c_byte_dummy;
+ hw->phy.write_i2c_byte = txgbe_phy_write_i2c_byte_dummy;
+ hw->phy.read_i2c_sff8472 = txgbe_phy_read_i2c_sff8472_dummy;
+ hw->phy.read_i2c_eeprom = txgbe_phy_read_i2c_eeprom_dummy;
+ hw->phy.write_i2c_eeprom = txgbe_phy_write_i2c_eeprom_dummy;
+ hw->phy.i2c_bus_clear = txgbe_phy_i2c_bus_clear_dummy;
+ hw->phy.check_overtemp = txgbe_phy_check_overtemp_dummy;
+ hw->phy.set_phy_power = txgbe_phy_set_phy_power_dummy;
+ hw->phy.enter_lplu = txgbe_phy_enter_lplu_dummy;
+ hw->phy.handle_lasi = txgbe_phy_handle_lasi_dummy;
+ hw->phy.read_i2c_byte_unlocked = txgbe_phy_read_i2c_byte_unlocked_dummy;
+ hw->phy.write_i2c_byte_unlocked = txgbe_phy_write_i2c_byte_unlocked_dummy;
+ hw->link.read_link = txgbe_link_read_link_dummy;
+ hw->link.read_link_unlocked = txgbe_link_read_link_unlocked_dummy;
+ hw->link.write_link = txgbe_link_write_link_dummy;
+ hw->link.write_link_unlocked = txgbe_link_write_link_unlocked_dummy;
+ hw->mbx.init_params = txgbe_mbx_init_params_dummy;
+ hw->mbx.read = txgbe_mbx_read_dummy;
+ hw->mbx.write = txgbe_mbx_write_dummy;
+ hw->mbx.read_posted = txgbe_mbx_read_posted_dummy;
+ hw->mbx.write_posted = txgbe_mbx_write_posted_dummy;
+ hw->mbx.check_for_msg = txgbe_mbx_check_for_msg_dummy;
+ hw->mbx.check_for_ack = txgbe_mbx_check_for_ack_dummy;
+ hw->mbx.check_for_rst = txgbe_mbx_check_for_rst_dummy;
+}
+
+#endif /* _TXGBE_TYPE_DUMMY_H_ */
+
new file mode 100644
@@ -0,0 +1,585 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include "txgbe_phy.h"
+#include "txgbe_dcb.h"
+#include "txgbe_vf.h"
+#include "txgbe_hw.h"
+#include "txgbe_mng.h"
+#include "txgbe_eeprom.h"
+
+/**
+ * txgbe_init_eeprom_params - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters txgbe_rom_info within the
+ * txgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 txgbe_init_eeprom_params(struct txgbe_hw *hw)
+{
+ struct txgbe_rom_info *eeprom = &hw->rom;
+ u32 eec;
+ u16 eeprom_size;
+ int err = 0;
+
+ DEBUGFUNC("txgbe_init_eeprom_params");
+
+ if (eeprom->type != txgbe_eeprom_unknown) {
+ return 0;
+ }
+
+ eeprom->type = txgbe_eeprom_none;
+ /* Set default semaphore delay to 10ms which is a well
+ * tested value */
+ eeprom->semaphore_delay = 10; /*ms*/
+ /* Clear EEPROM page size, it will be initialized as needed */
+ eeprom->word_page_size = 0;
+
+ /*
+ * Check for EEPROM present first.
+ * If not present leave as none
+ */
+ eec = rd32(hw, TXGBE_SPISTAT);
+ if (!(eec & TXGBE_SPISTAT_BPFLASH)) {
+ eeprom->type = txgbe_eeprom_flash;
+
+ /*
+ * SPI EEPROM is assumed here. This code would need to
+ * change if a future EEPROM is not SPI.
+ */
+ eeprom_size = 4096;
+ eeprom->word_size = eeprom_size >> 1;
+ }
+
+ eeprom->address_bits = 16;
+
+ err = eeprom->read32(hw, TXGBE_SW_REGION_PTR << 1, &eeprom->sw_addr);
+ if (err) {
+ DEBUGOUT("EEPROM read failed.\n");
+ return err;
+ }
+
+ DEBUGOUT("eeprom params: type = %d, size = %d, address bits: "
+ "%d %d\n", eeprom->type, eeprom->word_size,
+ eeprom->address_bits, eeprom->sw_addr);
+
+ return 0;
+}
+
+/**
+ * txgbe_get_eeprom_semaphore - Get hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
+ **/
+s32 txgbe_get_eeprom_semaphore(struct txgbe_hw *hw)
+{
+ s32 status = TXGBE_ERR_EEPROM;
+ u32 timeout = 2000;
+ u32 i;
+ u32 swsm;
+
+ DEBUGFUNC("txgbe_get_eeprom_semaphore");
+
+
+ /* Get SMBI software semaphore between device drivers first */
+ for (i = 0; i < timeout; i++) {
+ /*
+ * If the SMBI bit is 0 when we read it, then the bit will be
+ * set and we have the semaphore
+ */
+ swsm = rd32(hw, TXGBE_SWSEM);
+ if (!(swsm & TXGBE_SWSEM_PF)) {
+ status = 0;
+ break;
+ }
+ usec_delay(50);
+ }
+
+ if (i == timeout) {
+ DEBUGOUT("Driver can't access the eeprom - SMBI Semaphore "
+ "not granted.\n");
+ /*
+ * this release is particularly important because our attempts
+ * above to get the semaphore may have succeeded, and if there
+ * was a timeout, we should unconditionally clear the semaphore
+ * bits to free the driver to make progress
+ */
+ txgbe_release_eeprom_semaphore(hw);
+
+ usec_delay(50);
+ /*
+ * one last try
+ * If the SMBI bit is 0 when we read it, then the bit will be
+ * set and we have the semaphore
+ */
+ swsm = rd32(hw, TXGBE_SWSEM);
+ if (!(swsm & TXGBE_SWSEM_PF))
+ status = 0;
+ }
+
+ /* Now get the semaphore between SW/FW through the SWESMBI bit */
+ if (status == 0) {
+ for (i = 0; i < timeout; i++) {
+ /* Set the SW EEPROM semaphore bit to request access */
+ wr32m(hw, TXGBE_MNGSWSYNC,
+ TXGBE_MNGSWSYNC_REQ, TXGBE_MNGSWSYNC_REQ);
+
+ /*
+ * If we set the bit successfully then we got the
+ * semaphore.
+ */
+ swsm = rd32(hw, TXGBE_MNGSWSYNC);
+ if (swsm & TXGBE_MNGSWSYNC_REQ)
+ break;
+
+ usec_delay(50);
+ }
+
+ /*
+ * Release semaphores and return error if SW EEPROM semaphore
+ * was not granted because we don't have access to the EEPROM
+ */
+ if (i >= timeout) {
+ DEBUGOUT("SWESMBI Software EEPROM semaphore not granted.\n");
+ txgbe_release_eeprom_semaphore(hw);
+ status = TXGBE_ERR_EEPROM;
+ }
+ } else {
+ DEBUGOUT("Software semaphore SMBI between device drivers "
+ "not granted.\n");
+ }
+
+ return status;
+}
+
+/**
+ * txgbe_release_eeprom_semaphore - Release hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * This function clears hardware semaphore bits.
+ **/
+void txgbe_release_eeprom_semaphore(struct txgbe_hw *hw)
+{
+ DEBUGFUNC("txgbe_release_eeprom_semaphore");
+
+ wr32m(hw, TXGBE_MNGSWSYNC, TXGBE_MNGSWSYNC_REQ, 0);
+ wr32m(hw, TXGBE_SWSEM, TXGBE_SWSEM_PF, 0);
+ txgbe_flush(hw);
+}
+
+/**
+ * txgbe_ee_read - Read EEPROM word using a host interface cmd
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the hostif.
+ **/
+s32 txgbe_ee_read16(struct txgbe_hw *hw, u32 offset,
+ u16 *data)
+{
+ const u32 mask = TXGBE_MNGSEM_SWMBX | TXGBE_MNGSEM_SWFLASH;
+ u32 addr = (offset << 1);
+ int err;
+
+ err = hw->mac.acquire_swfw_sync(hw, mask);
+ if (err)
+ return err;
+
+ err = txgbe_hic_sr_read(hw, addr, (u8 *)data, 2);
+
+ hw->mac.release_swfw_sync(hw, mask);
+
+ return err;
+}
+
+/**
+ * txgbe_ee_read_buffer- Read EEPROM word(s) using hostif
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words
+ * @data: word(s) read from the EEPROM
+ *
+ * Reads a 16 bit word(s) from the EEPROM using the hostif.
+ **/
+s32 txgbe_ee_readw_buffer(struct txgbe_hw *hw,
+ u32 offset, u32 words, void *data)
+{
+ const u32 mask = TXGBE_MNGSEM_SWMBX | TXGBE_MNGSEM_SWFLASH;
+ u32 addr = (offset << 1);
+ u32 len = (words << 1);
+ u8 *buf = (u8 *)data;
+ int err;
+
+ err = hw->mac.acquire_swfw_sync(hw, mask);
+ if (err)
+ return err;
+
+ while (len) {
+ u32 seg = (len <= TXGBE_PMMBX_DATA_SIZE
+ ? len : TXGBE_PMMBX_DATA_SIZE);
+
+ err = txgbe_hic_sr_read(hw, addr, buf, seg);
+ if (err)
+ break;
+
+ len -= seg;
+ addr += seg;
+ buf += seg;
+ }
+
+ hw->mac.release_swfw_sync(hw, mask);
+ return err;
+}
+
+
+s32 txgbe_ee_readw_sw(struct txgbe_hw *hw, u32 offset,
+ u16 *data)
+{
+ const u32 mask = TXGBE_MNGSEM_SWMBX | TXGBE_MNGSEM_SWFLASH;
+ u32 addr = hw->rom.sw_addr + (offset << 1);
+ int err;
+
+ err = hw->mac.acquire_swfw_sync(hw, mask);
+ if (err)
+ return err;
+
+ err = txgbe_hic_sr_read(hw, addr, (u8 *)data, 2);
+
+ hw->mac.release_swfw_sync(hw, mask);
+
+ return err;
+}
+
+/**
+ * txgbe_ee_read32 - Read EEPROM word using a host interface cmd
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 32 bit word from the EEPROM using the hostif.
+ **/
+s32 txgbe_ee_read32(struct txgbe_hw *hw, u32 addr, u32 *data)
+{
+ const u32 mask = TXGBE_MNGSEM_SWMBX | TXGBE_MNGSEM_SWFLASH;
+ int err;
+
+ err = hw->mac.acquire_swfw_sync(hw, mask);
+ if (err)
+ return err;
+
+ err = txgbe_hic_sr_read(hw, addr, (u8 *)data, 4);
+
+ hw->mac.release_swfw_sync(hw, mask);
+
+ return err;
+}
+
+/**
+ * txgbe_ee_read_buffer - Read EEPROM byte(s) using hostif
+ * @hw: pointer to hardware structure
+ * @addr: offset of bytes in the EEPROM to read
+ * @len: number of bytes
+ * @data: byte(s) read from the EEPROM
+ *
+ * Reads a 8 bit byte(s) from the EEPROM using the hostif.
+ **/
+s32 txgbe_ee_read_buffer(struct txgbe_hw *hw,
+ u32 addr, u32 len, void *data)
+{
+ const u32 mask = TXGBE_MNGSEM_SWMBX | TXGBE_MNGSEM_SWFLASH;
+ u8 *buf = (u8 *)data;
+ int err;
+
+ err = hw->mac.acquire_swfw_sync(hw, mask);
+ if (err)
+ return err;
+
+ while (len) {
+ u32 seg = (len <= TXGBE_PMMBX_DATA_SIZE
+ ? len : TXGBE_PMMBX_DATA_SIZE);
+
+ err = txgbe_hic_sr_read(hw, addr, buf, seg);
+ if (err)
+ break;
+
+ len -= seg;
+ buf += seg;
+ }
+
+ hw->mac.release_swfw_sync(hw, mask);
+ return err;
+}
+
+/**
+ * txgbe_ee_write - Write EEPROM word using hostif
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the hostif.
+ **/
+s32 txgbe_ee_write16(struct txgbe_hw *hw, u32 offset,
+ u16 data)
+{
+ const u32 mask = TXGBE_MNGSEM_SWMBX | TXGBE_MNGSEM_SWFLASH;
+ u32 addr = (offset << 1);
+ int err;
+
+ DEBUGFUNC("\n");
+
+ err = hw->mac.acquire_swfw_sync(hw, mask);
+ if (err)
+ return err;
+
+ err = txgbe_hic_sr_write(hw, addr, (u8 *)&data, 2);
+
+ hw->mac.release_swfw_sync(hw, mask);
+
+ return err;
+}
+
+/**
+ * txgbe_ee_write_buffer - Write EEPROM word(s) using hostif
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @words: number of words
+ * @data: word(s) write to the EEPROM
+ *
+ * Write a 16 bit word(s) to the EEPROM using the hostif.
+ **/
+s32 txgbe_ee_writew_buffer(struct txgbe_hw *hw,
+ u32 offset, u32 words, void *data)
+{
+ const u32 mask = TXGBE_MNGSEM_SWMBX | TXGBE_MNGSEM_SWFLASH;
+ u32 addr = (offset << 1);
+ u32 len = (words << 1);
+ u8 *buf = (u8 *)data;
+ int err;
+
+ err = hw->mac.acquire_swfw_sync(hw, mask);
+ if (err)
+ return err;
+
+ while (len) {
+ u32 seg = (len <= TXGBE_PMMBX_DATA_SIZE
+ ? len : TXGBE_PMMBX_DATA_SIZE);
+
+ err = txgbe_hic_sr_write(hw, addr, buf, seg);
+ if (err)
+ break;
+
+ len -= seg;
+ buf += seg;
+ }
+
+ hw->mac.release_swfw_sync(hw, mask);
+ return err;
+}
+
+s32 txgbe_ee_writew_sw(struct txgbe_hw *hw, u32 offset,
+ u16 data)
+{
+ const u32 mask = TXGBE_MNGSEM_SWMBX | TXGBE_MNGSEM_SWFLASH;
+ u32 addr = hw->rom.sw_addr + (offset << 1);
+ int err;
+
+ DEBUGFUNC("\n");
+
+ err = hw->mac.acquire_swfw_sync(hw, mask);
+ if (err)
+ return err;
+
+ err = txgbe_hic_sr_write(hw, addr, (u8 *)&data, 2);
+
+ hw->mac.release_swfw_sync(hw, mask);
+
+ return err;
+}
+
+/**
+ * txgbe_ee_write32 - Read EEPROM word using a host interface cmd
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 32 bit word from the EEPROM using the hostif.
+ **/
+s32 txgbe_ee_write32(struct txgbe_hw *hw, u32 addr, u32 data)
+{
+ const u32 mask = TXGBE_MNGSEM_SWMBX | TXGBE_MNGSEM_SWFLASH;
+ int err;
+
+ err = hw->mac.acquire_swfw_sync(hw, mask);
+ if (err)
+ return err;
+
+ err = txgbe_hic_sr_write(hw, addr, (u8 *)&data, 4);
+
+ hw->mac.release_swfw_sync(hw, mask);
+
+ return err;
+}
+
+/**
+ * txgbe_ee_write_buffer - Write EEPROM byte(s) using hostif
+ * @hw: pointer to hardware structure
+ * @addr: offset of bytes in the EEPROM to write
+ * @len: number of bytes
+ * @data: word(s) write to the EEPROM
+ *
+ * Write a 8 bit byte(s) to the EEPROM using the hostif.
+ **/
+s32 txgbe_ee_write_buffer(struct txgbe_hw *hw,
+ u32 addr, u32 len, void *data)
+{
+ const u32 mask = TXGBE_MNGSEM_SWMBX | TXGBE_MNGSEM_SWFLASH;
+ u8 *buf = (u8 *)data;
+ int err;
+
+ err = hw->mac.acquire_swfw_sync(hw, mask);
+ if (err)
+ return err;
+
+ while (len) {
+ u32 seg = (len <= TXGBE_PMMBX_DATA_SIZE
+ ? len : TXGBE_PMMBX_DATA_SIZE);
+
+ err = txgbe_hic_sr_write(hw, addr, buf, seg);
+ if (err)
+ break;
+
+ len -= seg;
+ buf += seg;
+ }
+
+ hw->mac.release_swfw_sync(hw, mask);
+ return err;
+}
+
+/**
+ * txgbe_calc_eeprom_checksum - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ *
+ * Returns a negative error code on error, or the 16-bit checksum
+ **/
+#define BUFF_SIZE 64
+s32 txgbe_calc_eeprom_checksum(struct txgbe_hw *hw)
+{
+ u16 checksum = 0, read_checksum = 0;
+ int i, j, seg;
+ int err;
+ u16 buffer[BUFF_SIZE];
+
+ DEBUGFUNC("txgbe_calc_eeprom_checksum");
+
+ err = hw->rom.readw_sw(hw, TXGBE_EEPROM_CHECKSUM, &read_checksum);
+ if (err) {
+ DEBUGOUT("EEPROM read failed\n");
+ return err;
+ }
+
+ for (i = 0; i < TXGBE_EE_CSUM_MAX; i += seg) {
+ seg = (i + BUFF_SIZE < TXGBE_EE_CSUM_MAX
+ ? BUFF_SIZE : TXGBE_EE_CSUM_MAX - i);
+ err = hw->rom.readw_buffer(hw, i, seg, buffer);
+ if (err)
+ return err;
+ for (j = 0; j < seg; j++) {
+ checksum += buffer[j];
+ }
+ }
+
+ checksum = (u16)TXGBE_EEPROM_SUM - checksum + read_checksum;
+
+ return (s32)checksum;
+}
+
+/**
+ * txgbe_validate_eeprom_checksum - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ **/
+s32 txgbe_validate_eeprom_checksum(struct txgbe_hw *hw,
+ u16 *checksum_val)
+{
+ u16 checksum;
+ u16 read_checksum = 0;
+ int err;
+
+ DEBUGFUNC("txgbe_validate_eeprom_checksum");
+
+ /* Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ err = hw->rom.read16(hw, 0, &checksum);
+ if (err) {
+ DEBUGOUT("EEPROM read failed\n");
+ return err;
+ }
+
+ err = hw->rom.calc_checksum(hw);
+ if (err < 0)
+ return err;
+
+ checksum = (u16)(err & 0xffff);
+
+ err = hw->rom.readw_sw(hw, TXGBE_EEPROM_CHECKSUM, &read_checksum);
+ if (err) {
+ DEBUGOUT("EEPROM read failed\n");
+ return err;
+ }
+
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (read_checksum != checksum) {
+ err = TXGBE_ERR_EEPROM_CHECKSUM;
+ DEBUGOUT("EEPROM checksum error\n");
+ }
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum_val)
+ *checksum_val = checksum;
+
+ return err;
+}
+
+/**
+ * txgbe_update_eeprom_checksum - Updates the EEPROM checksum
+ * @hw: pointer to hardware structure
+ **/
+s32 txgbe_update_eeprom_checksum(struct txgbe_hw *hw)
+{
+ s32 status;
+ u16 checksum;
+
+ DEBUGFUNC("txgbe_update_eeprom_checksum");
+
+ /* Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->rom.read16(hw, 0, &checksum);
+ if (status) {
+ DEBUGOUT("EEPROM read failed\n");
+ return status;
+ }
+
+ status = hw->rom.calc_checksum(hw);
+ if (status < 0)
+ return status;
+
+ checksum = (u16)(status & 0xffff);
+
+ status = hw->rom.writew_sw(hw, TXGBE_EEPROM_CHECKSUM, checksum);
+
+ return status;
+}
+
new file mode 100644
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef _TXGBE_EEPROM_H_
+#define _TXGBE_EEPROM_H_
+
+/* Checksum and EEPROM pointers */
+#define TXGBE_PBANUM_PTR_GUARD 0xFAFA
+#define TXGBE_EEPROM_SUM 0xBABA
+
+#define TXGBE_FW_PTR 0x0F
+#define TXGBE_PBANUM0_PTR 0x05
+#define TXGBE_PBANUM1_PTR 0x06
+#define TXGBE_SW_REGION_PTR 0x1C
+
+#define TXGBE_EE_CSUM_MAX 0x800
+#define TXGBE_EEPROM_CHECKSUM 0x2F
+
+#define TXGBE_SAN_MAC_ADDR_PTR 0x18
+#define TXGBE_DEVICE_CAPS 0x1C
+#define TXGBE_EEPROM_VERSION_L 0x1D
+#define TXGBE_EEPROM_VERSION_H 0x1E
+#define TXGBE_ISCSI_BOOT_CONFIG 0x07
+
+/* Special PHY Init Routine */
+#define TXGBE_EE_PHY_INIT_OFFSET_NL 0x002B
+#define TXGBE_EE_PHY_INIT_END_NL 0xFFFF
+#define TXGBE_EE_CONTROL_MASK_NL 0xF000
+#define TXGBE_EE_DATA_MASK_NL 0x0FFF
+#define TXGBE_EE_CONTROL_SHIFT_NL 12
+#define TXGBE_EE_DELAY_NL 0
+#define TXGBE_EE_DATA_NL 1
+#define TXGBE_EE_CONTROL_NL 0x000F
+#define TXGBE_EE_CONTROL_EOL_NL 0x0FFF
+#define TXGBE_EE_CONTROL_SOL_NL 0x0000
+
+#define NVM_INIT_CTRL_3 0x38
+#define NVM_INIT_CTRL_3_LPLU 0x8
+#define NVM_INIT_CTRL_3_D10GMP_PORT0 0x40
+#define NVM_INIT_CTRL_3_D10GMP_PORT1 0x100
+
+#define TXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0
+#define TXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3
+#define TXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1
+#define TXGBE_DEVICE_CAPS_NO_CROSSTALK_WR (1 << 7)
+#define TXGBE_FW_LESM_PARAMETERS_PTR 0x2
+#define TXGBE_FW_LESM_STATE_1 0x1
+#define TXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */
+#define TXGBE_FW_LESM_2_STATES_ENABLED_MASK 0x1F
+#define TXGBE_FW_LESM_2_STATES_ENABLED 0x12
+#define TXGBE_FW_LESM_STATE0_10G_ENABLED 0x6FFF
+#define TXGBE_FW_LESM_STATE1_10G_ENABLED 0x4FFF
+#define TXGBE_FW_LESM_STATE0_10G_DISABLED 0x0FFF
+#define TXGBE_FW_LESM_STATE1_10G_DISABLED 0x2FFF
+#define TXGBE_FW_LESM_PORT0_STATE0_OFFSET 0x2
+#define TXGBE_FW_LESM_PORT0_STATE1_OFFSET 0x3
+#define TXGBE_FW_LESM_PORT1_STATE0_OFFSET 0x6
+#define TXGBE_FW_LESM_PORT1_STATE1_OFFSET 0x7
+#define TXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4
+#define TXGBE_FW_PATCH_VERSION_4 0x7
+#define TXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */
+#define TXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */
+#define TXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */
+#define TXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */
+#define TXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */
+#define TXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */
+#define TXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt SAN MAC capability */
+#define TXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt SAN MAC 0 offset */
+#define TXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt SAN MAC 1 offset */
+#define TXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt WWNN prefix offset */
+#define TXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt WWPN prefix offset */
+#define TXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt SAN MAC exists */
+#define TXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt WWN base exists */
+
+#define TXGBE_ISCSI_BOOT_CAPS 0x0033
+#define TXGBE_ISCSI_SETUP_PORT_0 0x0030
+#define TXGBE_ISCSI_SETUP_PORT_1 0x0034
+
+s32 txgbe_init_eeprom_params(struct txgbe_hw *hw);
+s32 txgbe_calc_eeprom_checksum(struct txgbe_hw *hw);
+s32 txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum_val);
+s32 txgbe_update_eeprom_checksum(struct txgbe_hw *hw);
+s32 txgbe_get_eeprom_semaphore(struct txgbe_hw *hw);
+void txgbe_release_eeprom_semaphore(struct txgbe_hw *hw);
+
+s32 txgbe_ee_read16(struct txgbe_hw *hw, u32 offset, u16 *data);
+s32 txgbe_ee_readw_sw(struct txgbe_hw *hw, u32 offset, u16 *data);
+s32 txgbe_ee_readw_buffer(struct txgbe_hw *hw, u32 offset, u32 words, void *data);
+s32 txgbe_ee_read32(struct txgbe_hw *hw, u32 addr, u32 *data);
+s32 txgbe_ee_read_buffer(struct txgbe_hw *hw, u32 addr, u32 len, void *data);
+
+s32 txgbe_ee_write16(struct txgbe_hw *hw, u32 offset, u16 data);
+s32 txgbe_ee_writew_sw(struct txgbe_hw *hw, u32 offset, u16 data);
+s32 txgbe_ee_writew_buffer(struct txgbe_hw *hw, u32 offset, u32 words, void *data);
+s32 txgbe_ee_write32(struct txgbe_hw *hw, u32 addr, u32 data);
+s32 txgbe_ee_write_buffer(struct txgbe_hw *hw, u32 addr, u32 len, void *data);
+
+#endif /* _TXGBE_EEPROM_H_ */
new file mode 100644
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include "txgbe_mbx.h"
+#include "txgbe_vf.h"
+#include "txgbe_hv_vf.h"
+
+/**
+ * Hyper-V variant - just a stub.
+ * @hw: unused
+ * @mc_addr_list: unused
+ * @mc_addr_count: unused
+ * @next: unused
+ * @clear: unused
+ */
+static s32 txgbevf_hv_update_mc_addr_list_vf(struct txgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, txgbe_mc_addr_itr next,
+ bool clear)
+{
+ UNREFERENCED_PARAMETER(hw, mc_addr_list, mc_addr_count, next, clear);
+
+ return TXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+/**
+ * Hyper-V variant - just a stub.
+ * @hw: unused
+ * @xcast_mode: unused
+ */
+static s32 txgbevf_hv_update_xcast_mode(struct txgbe_hw *hw, int xcast_mode)
+{
+ UNREFERENCED_PARAMETER(hw, xcast_mode);
+
+ return TXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+/**
+ * Hyper-V variant - just a stub.
+ * @hw: unused
+ * @vlan: unused
+ * @vind: unused
+ * @vlan_on: unused
+ * @vlvf_bypass: unused
+ */
+static s32 txgbevf_hv_set_vfta_vf(struct txgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool vlvf_bypass)
+{
+ UNREFERENCED_PARAMETER(hw, vlan, vind, vlan_on, vlvf_bypass);
+
+ return TXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+static s32 txgbevf_hv_set_uc_addr_vf(struct txgbe_hw *hw, u32 index, u8 *addr)
+{
+ UNREFERENCED_PARAMETER(hw, index, addr);
+
+ return TXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 txgbevf_hv_reset_hw_vf(struct txgbe_hw *hw)
+{
+ UNREFERENCED_PARAMETER(hw);
+
+ return TXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 txgbevf_hv_set_rar_vf(struct txgbe_hw *hw, u32 index, u8 *addr, u32 vlan, u32 vind)
+{
+ UNREFERENCED_PARAMETER(hw, index, addr, vlan, vind);
+
+ return TXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+/**
+ * Hyper-V variant; there is no mailbox communication.
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true is link is up, false otherwise
+ * @autoneg_wait_to_complete: unused
+ *
+ */
+static s32 txgbevf_hv_check_mac_link_vf(struct txgbe_hw *hw,
+ u32 *speed,
+ bool *link_up,
+ bool autoneg_wait_to_complete)
+{
+ struct txgbe_mbx_info *mbx = &hw->mbx;
+ struct txgbe_mac_info *mac = &hw->mac;
+ u32 links_reg;
+ UNREFERENCED_PARAMETER(autoneg_wait_to_complete);
+
+ /* If we were hit with a reset drop the link */
+ if (!mbx->check_for_rst(hw, 0) || !mbx->timeout)
+ mac->get_link_status = true;
+
+ if (!mac->get_link_status)
+ goto out;
+
+ /* if link status is down no point in checking to see if pf is up */
+ links_reg = rd32(hw, TXGBE_VFSTATUS);
+ if (!(links_reg & TXGBE_VFSTATUS_UP))
+ goto out;
+
+ /* for SFP+ modules and DA cables it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (mac->type == txgbe_mac_raptor_vf) {
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ usec_delay(100);
+ links_reg = rd32(hw, TXGBE_VFSTATUS);
+
+ if (!(links_reg & TXGBE_VFSTATUS_UP))
+ goto out;
+ }
+ }
+
+ switch (links_reg & TXGBE_VFSTATUS_BW_MASK) {
+ case TXGBE_VFSTATUS_BW_10G:
+ *speed = TXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case TXGBE_VFSTATUS_BW_1G:
+ *speed = TXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case TXGBE_VFSTATUS_BW_100M:
+ *speed = TXGBE_LINK_SPEED_100M_FULL;
+ break;
+ default:
+ *speed = TXGBE_LINK_SPEED_UNKNOWN;
+ }
+
+ /* if we passed all the tests above then the link is up and we no
+ * longer need to check for link
+ */
+ mac->get_link_status = false;
+
+out:
+ *link_up = !mac->get_link_status;
+ return 0;
+}
+
+/**
+ * txgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
+ * @hw: pointer to the HW structure
+ * @max_size: value to assign to max frame size
+ * Hyper-V variant.
+ **/
+static s32 txgbevf_hv_set_rlpml_vf(struct txgbe_hw *hw, u16 max_size)
+{
+ /* If we are on Hyper-V, we implement this functionality
+ * differently.
+ */
+ /* CRC == 4 */
+ wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
+ TXGBE_FRMSZ_MAX(max_size + ETH_FCS_LEN));
+
+ return 0;
+}
+
+/**
+ * txgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
+ * @hw: pointer to the HW structure
+ * @api: integer containing requested API version
+ * Hyper-V version - only txgbe_mbox_api_10 supported.
+ **/
+static int txgbevf_hv_negotiate_api_version_vf(struct txgbe_hw *hw, int api)
+{
+ UNREFERENCED_PARAMETER(hw);
+
+ /* Hyper-V only supports api version txgbe_mbox_api_10 */
+ if (api != txgbe_mbox_api_10)
+ return TXGBE_ERR_INVALID_ARGUMENT;
+
+ return 0;
+}
+
+/**
+ * txgbe_init_ops_hvf - Initialize the pointers for vf
+ * @hw: pointer to hardware structure
+ *
+ * This will assign function pointers, adapter-specific functions can
+ * override the assignment of generic function pointers by assigning
+ * their own adapter-specific function pointers.
+ * Does not touch the hardware.
+ **/
+s32 txgbe_init_ops_hvf(struct txgbe_hw *hw)
+{
+ /* Set defaults for VF then override applicable Hyper-V
+ * specific functions
+ */
+ txgbe_init_ops_vf(hw);
+
+ hw->mac.reset_hw = txgbevf_hv_reset_hw_vf;
+ hw->mac.check_link = txgbevf_hv_check_mac_link_vf;
+ hw->mac.negotiate_api_version = txgbevf_hv_negotiate_api_version_vf;
+ hw->mac.set_rar = txgbevf_hv_set_rar_vf;
+ hw->mac.update_mc_addr_list = txgbevf_hv_update_mc_addr_list_vf;
+ hw->mac.update_xcast_mode = txgbevf_hv_update_xcast_mode;
+ hw->mac.set_uc_addr = txgbevf_hv_set_uc_addr_vf;
+ hw->mac.set_vfta = txgbevf_hv_set_vfta_vf;
+ hw->mac.set_rlpml = txgbevf_hv_set_rlpml_vf;
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef _TXGBE_HV_VF_H_
+#define _TXGBE_HV_VF_H_
+
+#include "txgbe_type.h"
+
+
+s32 txgbe_init_ops_hvf(struct txgbe_hw *hw);
+
+#endif /* _TXGBE_HV_VF_H_ */
new file mode 100644
@@ -0,0 +1,5453 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include "txgbe_type.h"
+#include "txgbe_mbx.h"
+#include "txgbe_phy.h"
+#include "txgbe_dcb.h"
+#include "txgbe_vf.h"
+#include "txgbe_eeprom.h"
+#include "txgbe_mng.h"
+#include "txgbe_hw.h"
+
+#define TXGBE_RAPTOR_MAX_TX_QUEUES 128
+#define TXGBE_RAPTOR_MAX_RX_QUEUES 128
+#define TXGBE_RAPTOR_RAR_ENTRIES 128
+#define TXGBE_RAPTOR_MC_TBL_SIZE 128
+#define TXGBE_RAPTOR_VFT_TBL_SIZE 128
+#define TXGBE_RAPTOR_RX_PB_SIZE 512 /*KB*/
+#define TXGBE_RAPTOR_MAX_MSIX_VECTORS 0x40
+
+STATIC s32 txgbe_setup_copper_link_raptor(struct txgbe_hw *hw,
+ u32 speed,
+ bool autoneg_wait_to_complete);
+
+STATIC s32 txgbe_mta_vector(struct txgbe_hw *hw, u8 *mc_addr);
+STATIC s32 txgbe_get_san_mac_addr_offset(struct txgbe_hw *hw,
+ u16 *san_mac_offset);
+
+/**
+ * txgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
+ * of flow control
+ * @hw: pointer to hardware structure
+ *
+ * This function returns true if the device supports flow control
+ * autonegotiation, and false if it does not.
+ *
+ **/
+bool txgbe_device_supports_autoneg_fc(struct txgbe_hw *hw)
+{
+ bool supported = false;
+ u32 speed;
+ bool link_up;
+
+ DEBUGFUNC("txgbe_device_supports_autoneg_fc");
+
+ switch (hw->phy.media_type) {
+ case txgbe_media_type_fiber_qsfp:
+ case txgbe_media_type_fiber:
+ hw->mac.check_link(hw, &speed, &link_up, false);
+ /* if link is down, assume supported */
+ if (link_up)
+ supported = speed == TXGBE_LINK_SPEED_1GB_FULL ?
+ true : false;
+ else
+ supported = true;
+
+ break;
+ case txgbe_media_type_backplane:
+ supported = true;
+ break;
+ case txgbe_media_type_copper:
+ /* only some copper devices support flow control autoneg */
+ switch (hw->device_id) {
+ case TXGBE_DEV_ID_RAPTOR_XAUI:
+ case TXGBE_DEV_ID_RAPTOR_SGMII:
+ supported = true;
+ break;
+ default:
+ supported = false;
+ }
+ default:
+ break;
+ }
+
+ if (!supported)
+ DEBUGOUT("Device %x does not support flow control autoneg",
+ hw->device_id);
+ return supported;
+}
+
+/**
+ * txgbe_setup_fc - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Called at init time to set up flow control.
+ **/
+s32 txgbe_setup_fc(struct txgbe_hw *hw)
+{
+ s32 err = 0;
+ u32 reg = 0;
+ u16 reg_cu = 0;
+ u32 value = 0;
+ u64 reg_bp = 0;
+ bool locked = false;
+
+ DEBUGFUNC("txgbe_setup_fc");
+
+ /* Validate the requested mode */
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == txgbe_fc_rx_pause) {
+ DEBUGOUT("txgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ err = TXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /*
+ * 10gig parts do not have a word in the EEPROM to determine the
+ * default flow control setting, so we explicitly set it to full.
+ */
+ if (hw->fc.requested_mode == txgbe_fc_default)
+ hw->fc.requested_mode = txgbe_fc_full;
+
+ /*
+ * Set up the 1G and 10G flow control advertisement registers so the
+ * HW will be able to do fc autoneg once the cable is plugged in. If
+ * we link at 10G, the 1G advertisement is harmless and vice versa.
+ */
+ switch (hw->phy.media_type) {
+ case txgbe_media_type_backplane:
+ /* some MAC's need RMW protection on AUTOC */
+ err = hw->mac.prot_autoc_read(hw, &locked, ®_bp);
+ if (err != 0)
+ goto out;
+
+ /* fall through - only backplane uses autoc */
+ case txgbe_media_type_fiber_qsfp:
+ case txgbe_media_type_fiber:
+ case txgbe_media_type_copper:
+ hw->phy.read_reg(hw, TXGBE_MD_AUTO_NEG_ADVT,
+ TXGBE_MD_DEV_AUTO_NEG, ®_cu);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * The possible values of fc.requested_mode are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: Invalid.
+ */
+ switch (hw->fc.requested_mode) {
+ case txgbe_fc_none:
+ /* Flow control completely disabled by software override. */
+ reg &= ~(SR_MII_MMD_AN_ADV_PAUSE_SYM |
+ SR_MII_MMD_AN_ADV_PAUSE_ASM);
+ if (hw->phy.media_type == txgbe_media_type_backplane)
+ reg_bp &= ~(TXGBE_AUTOC_SYM_PAUSE |
+ TXGBE_AUTOC_ASM_PAUSE);
+ else if (hw->phy.media_type == txgbe_media_type_copper)
+ reg_cu &= ~(TXGBE_TAF_SYM_PAUSE | TXGBE_TAF_ASM_PAUSE);
+ break;
+ case txgbe_fc_tx_pause:
+ /*
+ * Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ reg |= SR_MII_MMD_AN_ADV_PAUSE_ASM;
+ reg &= ~SR_MII_MMD_AN_ADV_PAUSE_SYM;
+ if (hw->phy.media_type == txgbe_media_type_backplane) {
+ reg_bp |= TXGBE_AUTOC_ASM_PAUSE;
+ reg_bp &= ~TXGBE_AUTOC_SYM_PAUSE;
+ } else if (hw->phy.media_type == txgbe_media_type_copper) {
+ reg_cu |= TXGBE_TAF_ASM_PAUSE;
+ reg_cu &= ~TXGBE_TAF_SYM_PAUSE;
+ }
+ reg |= SR_MII_MMD_AN_ADV_PAUSE_ASM;
+ reg_bp |= SR_AN_MMD_ADV_REG1_PAUSE_ASM;
+ break;
+ case txgbe_fc_rx_pause:
+ /*
+ * Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE, as such we fall
+ * through to the fc_full statement. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ case txgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ reg |= SR_MII_MMD_AN_ADV_PAUSE_SYM |
+ SR_MII_MMD_AN_ADV_PAUSE_ASM;
+ if (hw->phy.media_type == txgbe_media_type_backplane)
+ reg_bp |= TXGBE_AUTOC_SYM_PAUSE |
+ TXGBE_AUTOC_ASM_PAUSE;
+ else if (hw->phy.media_type == txgbe_media_type_copper)
+ reg_cu |= TXGBE_TAF_SYM_PAUSE | TXGBE_TAF_ASM_PAUSE;
+ reg |= SR_MII_MMD_AN_ADV_PAUSE_SYM |
+ SR_MII_MMD_AN_ADV_PAUSE_ASM;
+ reg_bp |= SR_AN_MMD_ADV_REG1_PAUSE_SYM |
+ SR_AN_MMD_ADV_REG1_PAUSE_ASM;
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ err = TXGBE_ERR_CONFIG;
+ goto out;
+ break;
+ }
+
+ /*
+ * Enable auto-negotiation between the MAC & PHY;
+ * the MAC will advertise clause 37 flow control.
+ */
+ value = rd32_epcs(hw, SR_MII_MMD_AN_ADV);
+ value = (value & ~(SR_MII_MMD_AN_ADV_PAUSE_ASM |
+ SR_MII_MMD_AN_ADV_PAUSE_SYM)) | reg;
+ wr32_epcs(hw, SR_MII_MMD_AN_ADV, value);
+
+ /*
+ * AUTOC restart handles negotiation of 1G and 10G on backplane
+ * and copper. There is no need to set the PCS1GCTL register.
+ *
+ */
+ if (hw->phy.media_type == txgbe_media_type_backplane) {
+ value = rd32_epcs(hw, SR_AN_MMD_ADV_REG1);
+ value = (value & ~(SR_AN_MMD_ADV_REG1_PAUSE_ASM |
+ SR_AN_MMD_ADV_REG1_PAUSE_SYM)) |
+ reg_bp;
+ wr32_epcs(hw, SR_AN_MMD_ADV_REG1, value);
+ } else if ((hw->phy.media_type == txgbe_media_type_copper) &&
+ (txgbe_device_supports_autoneg_fc(hw))) {
+ hw->phy.write_reg(hw, TXGBE_MD_AUTO_NEG_ADVT,
+ TXGBE_MD_DEV_AUTO_NEG, reg_cu);
+ }
+
+ DEBUGOUT("Set up FC; reg = 0x%08X\n", reg);
+out:
+ return err;
+}
+
+/**
+ * txgbe_start_hw - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware by filling the bus info structure and media type, clears
+ * all on chip counters, initializes receive address registers, multicast
+ * table, VLAN filter table, calls routine to set up link and flow control
+ * settings, and leaves transmit and receive units disabled and uninitialized
+ **/
+s32 txgbe_start_hw(struct txgbe_hw *hw)
+{
+ s32 err;
+ u16 device_caps;
+
+ DEBUGFUNC("txgbe_start_hw");
+
+ /* Set the media type */
+ hw->phy.media_type = hw->phy.get_media_type(hw);
+
+ /* Clear the VLAN filter table */
+ hw->mac.clear_vfta(hw);
+
+ /* Clear statistics registers */
+ hw->mac.clear_hw_cntrs(hw);
+
+ /* Setup flow control */
+ err = txgbe_setup_fc(hw);
+ if (err != 0 && err != TXGBE_NOT_IMPLEMENTED) {
+ DEBUGOUT("Flow control setup failed, returning %d\n", err);
+ return err;
+ }
+
+ /* Cache bit indicating need for crosstalk fix */
+ switch (hw->mac.type) {
+ case txgbe_mac_raptor:
+ hw->mac.get_device_caps(hw, &device_caps);
+ if (device_caps & TXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
+ hw->need_crosstalk_fix = false;
+ else
+ hw->need_crosstalk_fix = true;
+ break;
+ default:
+ hw->need_crosstalk_fix = false;
+ break;
+ }
+
+ /* Clear adapter stopped flag */
+ hw->adapter_stopped = false;
+
+ return 0;
+}
+
+/**
+ * txgbe_start_hw_gen2 - Init sequence for common device family
+ * @hw: pointer to hw structure
+ *
+ * Performs the init sequence common to the second generation
+ * of 10 GbE devices.
+ **/
+s32 txgbe_start_hw_gen2(struct txgbe_hw *hw)
+{
+ u32 i;
+
+ /* Clear the rate limiters */
+ for (i = 0; i < hw->mac.max_tx_queues; i++) {
+ wr32(hw, TXGBE_ARBPOOLIDX, i);
+ wr32(hw, TXGBE_ARBTXRATE, 0);
+ }
+ txgbe_flush(hw);
+
+ /* We need to run link autotry after the driver loads */
+ hw->mac.autotry_restart = true;
+
+ return 0;
+}
+
+/**
+ * txgbe_init_hw - Generic hardware initialization
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the hardware by resetting the hardware, filling the bus info
+ * structure and media type, clears all on chip counters, initializes receive
+ * address registers, multicast table, VLAN filter table, calls routine to set
+ * up link and flow control settings, and leaves transmit and receive units
+ * disabled and uninitialized
+ **/
+s32 txgbe_init_hw(struct txgbe_hw *hw)
+{
+ s32 status;
+
+ DEBUGFUNC("txgbe_init_hw");
+
+ /* Reset the hardware */
+ status = hw->mac.reset_hw(hw);
+ if (status == 0 || status == TXGBE_ERR_SFP_NOT_PRESENT) {
+ /* Start the HW */
+ status = hw->mac.start_hw(hw);
+ }
+
+ /* Initialize the LED link active for LED blink support */
+ hw->mac.init_led_link_act(hw);
+
+ if (status != 0)
+ DEBUGOUT("Failed to initialize HW, STATUS = %d\n", status);
+
+ return status;
+}
+
+/**
+ * txgbe_clear_hw_cntrs - Generic clear hardware counters
+ * @hw: pointer to hardware structure
+ *
+ * Clears all hardware statistics counters by reading them from the hardware
+ * Statistics counters are clear on read.
+ **/
+s32 txgbe_clear_hw_cntrs(struct txgbe_hw *hw)
+{
+ u16 i = 0;
+
+ DEBUGFUNC("txgbe_clear_hw_cntrs");
+
+ /* QP Stats */
+ /* don't write clear queue stats */
+ for (i = 0; i < TXGBE_MAX_QP; i++) {
+ hw->qp_last[i].rx_qp_packets = 0;
+ hw->qp_last[i].tx_qp_packets = 0;
+ hw->qp_last[i].rx_qp_bytes = 0;
+ hw->qp_last[i].tx_qp_bytes = 0;
+ hw->qp_last[i].rx_qp_mc_packets = 0;
+ }
+
+ /* PB Stats */
+ for (i = 0; i < TXGBE_MAX_UP; i++) {
+ rd32(hw, TXGBE_PBRXUPXON(i));
+ rd32(hw, TXGBE_PBRXUPXOFF(i));
+ rd32(hw, TXGBE_PBTXUPXON(i));
+ rd32(hw, TXGBE_PBTXUPXOFF(i));
+ rd32(hw, TXGBE_PBTXUPOFF(i));
+
+ rd32(hw, TXGBE_PBRXMISS(i));
+ }
+ rd32(hw, TXGBE_PBRXLNKXON);
+ rd32(hw, TXGBE_PBRXLNKXOFF);
+ rd32(hw, TXGBE_PBTXLNKXON);
+ rd32(hw, TXGBE_PBTXLNKXOFF);
+
+ /* DMA Stats */
+ rd32(hw, TXGBE_DMARXPKT);
+ rd32(hw, TXGBE_DMATXPKT);
+
+ rd64(hw, TXGBE_DMARXOCTL);
+ rd64(hw, TXGBE_DMATXOCTL);
+
+ /* MAC Stats */
+ rd64(hw, TXGBE_MACRXERRCRCL);
+ rd64(hw, TXGBE_MACRXMPKTL);
+ rd64(hw, TXGBE_MACTXMPKTL);
+
+ rd64(hw, TXGBE_MACRXPKTL);
+ rd64(hw, TXGBE_MACTXPKTL);
+ rd64(hw, TXGBE_MACRXGBOCTL);
+
+ rd64(hw, TXGBE_MACRXOCTL);
+ rd32(hw, TXGBE_MACTXOCTL);
+
+ rd64(hw, TXGBE_MACRX1to64L);
+ rd64(hw, TXGBE_MACRX65to127L);
+ rd64(hw, TXGBE_MACRX128to255L);
+ rd64(hw, TXGBE_MACRX256to511L);
+ rd64(hw, TXGBE_MACRX512to1023L);
+ rd64(hw, TXGBE_MACRX1024toMAXL);
+ rd64(hw, TXGBE_MACTX1to64L);
+ rd64(hw, TXGBE_MACTX65to127L);
+ rd64(hw, TXGBE_MACTX128to255L);
+ rd64(hw, TXGBE_MACTX256to511L);
+ rd64(hw, TXGBE_MACTX512to1023L);
+ rd64(hw, TXGBE_MACTX1024toMAXL);
+
+ rd64(hw, TXGBE_MACRXERRLENL);
+ rd32(hw, TXGBE_MACRXOVERSIZE);
+ rd32(hw, TXGBE_MACRXJABBER);
+
+ /* FCoE Stats */
+ rd32(hw, TXGBE_FCOECRC);
+ rd32(hw, TXGBE_FCOELAST);
+ rd32(hw, TXGBE_FCOERPDC);
+ rd32(hw, TXGBE_FCOEPRC);
+ rd32(hw, TXGBE_FCOEPTC);
+ rd32(hw, TXGBE_FCOEDWRC);
+ rd32(hw, TXGBE_FCOEDWTC);
+
+ /* Flow Director Stats */
+ rd32(hw, TXGBE_FDIRMATCH);
+ rd32(hw, TXGBE_FDIRMISS);
+ rd32(hw, TXGBE_FDIRUSED);
+ rd32(hw, TXGBE_FDIRUSED);
+ rd32(hw, TXGBE_FDIRFAIL);
+ rd32(hw, TXGBE_FDIRFAIL);
+
+ /* MACsec Stats */
+ rd32(hw, TXGBE_LSECTX_UTPKT);
+ rd32(hw, TXGBE_LSECTX_ENCPKT);
+ rd32(hw, TXGBE_LSECTX_PROTPKT);
+ rd32(hw, TXGBE_LSECTX_ENCOCT);
+ rd32(hw, TXGBE_LSECTX_PROTOCT);
+ rd32(hw, TXGBE_LSECRX_UTPKT);
+ rd32(hw, TXGBE_LSECRX_BTPKT);
+ rd32(hw, TXGBE_LSECRX_NOSCIPKT);
+ rd32(hw, TXGBE_LSECRX_UNSCIPKT);
+ rd32(hw, TXGBE_LSECRX_DECOCT);
+ rd32(hw, TXGBE_LSECRX_VLDOCT);
+ rd32(hw, TXGBE_LSECRX_UNCHKPKT);
+ rd32(hw, TXGBE_LSECRX_DLYPKT);
+ rd32(hw, TXGBE_LSECRX_LATEPKT);
+ for (i = 0; i < 2; i++) {
+ rd32(hw, TXGBE_LSECRX_OKPKT(i));
+ rd32(hw, TXGBE_LSECRX_INVPKT(i));
+ rd32(hw, TXGBE_LSECRX_BADPKT(i));
+ }
+ rd32(hw, TXGBE_LSECRX_INVSAPKT);
+ rd32(hw, TXGBE_LSECRX_BADSAPKT);
+
+ return 0;
+}
+
+/**
+ * txgbe_read_pba_string - Reads part number string from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the EEPROM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the EEPROM.
+ **/
+s32 txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ s32 err;
+ u16 data;
+ u16 pba_ptr;
+ u16 offset;
+ u16 length;
+
+ DEBUGFUNC("txgbe_read_pba_string");
+
+ if (pba_num == NULL) {
+ DEBUGOUT("PBA string buffer was null\n");
+ return TXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ err = hw->rom.readw_sw(hw, TXGBE_PBANUM0_PTR, &data);
+ if (err) {
+ DEBUGOUT("NVM Read Error\n");
+ return err;
+ }
+
+ err = hw->rom.readw_sw(hw, TXGBE_PBANUM1_PTR, &pba_ptr);
+ if (err) {
+ DEBUGOUT("NVM Read Error\n");
+ return err;
+ }
+
+ /*
+ * if data is not ptr guard the PBA must be in legacy format which
+ * means pba_ptr is actually our second data word for the PBA number
+ * and we can decode it into an ascii string
+ */
+ if (data != TXGBE_PBANUM_PTR_GUARD) {
+ DEBUGOUT("NVM PBA number is not stored as string\n");
+
+ /* we will need 11 characters to store the PBA */
+ if (pba_num_size < 11) {
+ DEBUGOUT("PBA string buffer too small\n");
+ return TXGBE_ERR_NO_SPACE;
+ }
+
+ /* extract hex string from data and pba_ptr */
+ pba_num[0] = (data >> 12) & 0xF;
+ pba_num[1] = (data >> 8) & 0xF;
+ pba_num[2] = (data >> 4) & 0xF;
+ pba_num[3] = data & 0xF;
+ pba_num[4] = (pba_ptr >> 12) & 0xF;
+ pba_num[5] = (pba_ptr >> 8) & 0xF;
+ pba_num[6] = '-';
+ pba_num[7] = 0;
+ pba_num[8] = (pba_ptr >> 4) & 0xF;
+ pba_num[9] = pba_ptr & 0xF;
+
+ /* put a null character on the end of our string */
+ pba_num[10] = '\0';
+
+ /* switch all the data but the '-' to hex char */
+ for (offset = 0; offset < 10; offset++) {
+ if (pba_num[offset] < 0xA)
+ pba_num[offset] += '0';
+ else if (pba_num[offset] < 0x10)
+ pba_num[offset] += 'A' - 0xA;
+ }
+
+ return 0;
+ }
+
+ err = hw->rom.read16(hw, pba_ptr, &length);
+ if (err) {
+ DEBUGOUT("NVM Read Error\n");
+ return err;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ DEBUGOUT("NVM PBA number section invalid length\n");
+ return TXGBE_ERR_PBA_SECTION;
+ }
+
+ /* check if pba_num buffer is big enough */
+ if (pba_num_size < (((u32)length * 2) - 1)) {
+ DEBUGOUT("PBA string buffer too small\n");
+ return TXGBE_ERR_NO_SPACE;
+ }
+
+ /* trim pba length from start of string */
+ pba_ptr++;
+ length--;
+
+ for (offset = 0; offset < length; offset++) {
+ err = hw->rom.read16(hw, pba_ptr + offset, &data);
+ if (err) {
+ DEBUGOUT("NVM Read Error\n");
+ return err;
+ }
+ pba_num[offset * 2] = (u8)(data >> 8);
+ pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
+ }
+ pba_num[offset * 2] = '\0';
+
+ return 0;
+}
+
+/**
+ * txgbe_read_pba_num - Reads part number from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number from the EEPROM
+ *
+ * Reads the part number from the EEPROM.
+ **/
+s32 txgbe_read_pba_num(struct txgbe_hw *hw, u32 *pba_num)
+{
+ s32 err;
+ u16 data;
+
+ DEBUGFUNC("txgbe_read_pba_num");
+
+ err = hw->rom.readw_sw(hw, TXGBE_PBANUM0_PTR, &data);
+ if (err) {
+ DEBUGOUT("NVM Read Error\n");
+ return err;
+ } else if (data == TXGBE_PBANUM_PTR_GUARD) {
+ DEBUGOUT("NVM Not supported\n");
+ return TXGBE_NOT_IMPLEMENTED;
+ }
+ *pba_num = (u32)(data << 16);
+
+ err = hw->rom.readw_sw(hw, TXGBE_PBANUM1_PTR, &data);
+ if (err) {
+ DEBUGOUT("NVM Read Error\n");
+ return err;
+ }
+ *pba_num |= data;
+
+ return 0;
+}
+
+/**
+ * txgbe_read_pba_raw
+ * @hw: pointer to the HW structure
+ * @eeprom_buf: optional pointer to EEPROM image
+ * @eeprom_buf_size: size of EEPROM image in words
+ * @max_pba_block_size: PBA block size limit
+ * @pba: pointer to output PBA structure
+ *
+ * Reads PBA from EEPROM image when eeprom_buf is not NULL.
+ * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
+ *
+ **/
+s32 txgbe_read_pba_raw(struct txgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 max_pba_block_size,
+ struct txgbe_pba *pba)
+{
+ s32 err;
+ u16 pba_block_size;
+
+ if (pba == NULL)
+ return TXGBE_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ err = hw->rom.readw_buffer(hw, TXGBE_PBANUM0_PTR, 2,
+ &pba->word[0]);
+ if (err)
+ return err;
+ } else {
+ if (eeprom_buf_size > TXGBE_PBANUM1_PTR) {
+ pba->word[0] = eeprom_buf[TXGBE_PBANUM0_PTR];
+ pba->word[1] = eeprom_buf[TXGBE_PBANUM1_PTR];
+ } else {
+ return TXGBE_ERR_PARAM;
+ }
+ }
+
+ if (pba->word[0] == TXGBE_PBANUM_PTR_GUARD) {
+ if (pba->pba_block == NULL)
+ return TXGBE_ERR_PARAM;
+
+ err = txgbe_get_pba_block_size(hw, eeprom_buf,
+ eeprom_buf_size,
+ &pba_block_size);
+ if (err)
+ return err;
+
+ if (pba_block_size > max_pba_block_size)
+ return TXGBE_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ err = hw->rom.readw_buffer(hw, pba->word[1],
+ pba_block_size,
+ pba->pba_block);
+ if (err)
+ return err;
+ } else {
+ if (eeprom_buf_size > (u32)(pba->word[1] +
+ pba_block_size)) {
+ memcpy(pba->pba_block,
+ &eeprom_buf[pba->word[1]],
+ pba_block_size * sizeof(u16));
+ } else {
+ return TXGBE_ERR_PARAM;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * txgbe_write_pba_raw
+ * @hw: pointer to the HW structure
+ * @eeprom_buf: optional pointer to EEPROM image
+ * @eeprom_buf_size: size of EEPROM image in words
+ * @pba: pointer to PBA structure
+ *
+ * Writes PBA to EEPROM image when eeprom_buf is not NULL.
+ * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
+ *
+ **/
+s32 txgbe_write_pba_raw(struct txgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, struct txgbe_pba *pba)
+{
+ s32 err;
+
+ if (pba == NULL)
+ return TXGBE_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ err = hw->rom.writew_buffer(hw, TXGBE_PBANUM0_PTR, 2,
+ &pba->word[0]);
+ if (err)
+ return err;
+ } else {
+ if (eeprom_buf_size > TXGBE_PBANUM1_PTR) {
+ eeprom_buf[TXGBE_PBANUM0_PTR] = pba->word[0];
+ eeprom_buf[TXGBE_PBANUM1_PTR] = pba->word[1];
+ } else {
+ return TXGBE_ERR_PARAM;
+ }
+ }
+
+ if (pba->word[0] == TXGBE_PBANUM_PTR_GUARD) {
+ if (pba->pba_block == NULL)
+ return TXGBE_ERR_PARAM;
+
+ if (eeprom_buf == NULL) {
+ err = hw->rom.writew_buffer(hw, pba->word[1],
+ pba->pba_block[0],
+ pba->pba_block);
+ if (err)
+ return err;
+ } else {
+ if (eeprom_buf_size > (u32)(pba->word[1] +
+ pba->pba_block[0])) {
+ memcpy(&eeprom_buf[pba->word[1]],
+ pba->pba_block,
+ pba->pba_block[0] * sizeof(u16));
+ } else {
+ return TXGBE_ERR_PARAM;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * txgbe_get_pba_block_size
+ * @hw: pointer to the HW structure
+ * @eeprom_buf: optional pointer to EEPROM image
+ * @eeprom_buf_size: size of EEPROM image in words
+ * @pba_data_size: pointer to output variable
+ *
+ * Returns the size of the PBA block in words. Function operates on EEPROM
+ * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
+ * EEPROM device.
+ *
+ **/
+s32 txgbe_get_pba_block_size(struct txgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 *pba_block_size)
+{
+ s32 err;
+ u16 pba_word[2];
+ u16 length;
+
+ DEBUGFUNC("txgbe_get_pba_block_size");
+
+ if (eeprom_buf == NULL) {
+ err = hw->rom.readw_buffer(hw, TXGBE_PBANUM0_PTR, 2,
+ &pba_word[0]);
+ if (err)
+ return err;
+ } else {
+ if (eeprom_buf_size > TXGBE_PBANUM1_PTR) {
+ pba_word[0] = eeprom_buf[TXGBE_PBANUM0_PTR];
+ pba_word[1] = eeprom_buf[TXGBE_PBANUM1_PTR];
+ } else {
+ return TXGBE_ERR_PARAM;
+ }
+ }
+
+ if (pba_word[0] == TXGBE_PBANUM_PTR_GUARD) {
+ if (eeprom_buf == NULL) {
+ err = hw->rom.read16(hw, pba_word[1] + 0,
+ &length);
+ if (err)
+ return err;
+ } else {
+ if (eeprom_buf_size > pba_word[1])
+ length = eeprom_buf[pba_word[1] + 0];
+ else
+ return TXGBE_ERR_PARAM;
+ }
+
+ if (length == 0xFFFF || length == 0)
+ return TXGBE_ERR_PBA_SECTION;
+ } else {
+ /* PBA number in legacy format, there is no PBA Block. */
+ length = 0;
+ }
+
+ if (pba_block_size != NULL)
+ *pba_block_size = length;
+
+ return 0;
+}
+
+/**
+ * txgbe_get_mac_addr - Generic get MAC address
+ * @hw: pointer to hardware structure
+ * @mac_addr: Adapter MAC address
+ *
+ * Reads the adapter's MAC address from first Receive Address Register (RAR0)
+ * A reset of the adapter must be performed prior to calling this function
+ * in order for the MAC address to have been loaded from the EEPROM into RAR0
+ **/
+s32 txgbe_get_mac_addr(struct txgbe_hw *hw, u8 *mac_addr)
+{
+ u32 rar_high;
+ u32 rar_low;
+ u16 i;
+
+ DEBUGFUNC("txgbe_get_mac_addr");
+
+ wr32(hw, TXGBE_ETHADDRIDX, 0);
+ rar_high = rd32(hw, TXGBE_ETHADDRH);
+ rar_low = rd32(hw, TXGBE_ETHADDRL);
+
+ for (i = 0; i < 2; i++)
+ mac_addr[i] = (u8)(rar_high >> (1 - i) * 8);
+
+ for (i = 0; i < 4; i++)
+ mac_addr[i + 2] = (u8)(rar_low >> (3 - i) * 8);
+
+ return 0;
+}
+
+/**
+ * txgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
+ * @hw: pointer to the HW structure
+ *
+ * Determines the LAN function id by reading memory-mapped registers and swaps
+ * the port value if requested, and set MAC instance for devices that share
+ * CS4227.
+ **/
+void txgbe_set_lan_id_multi_port(struct txgbe_hw *hw)
+{
+ struct txgbe_bus_info *bus = &hw->bus;
+ u32 reg;
+
+ DEBUGFUNC("txgbe_set_lan_id_multi_port_pcie");
+
+ reg = rd32(hw, TXGBE_PORTSTAT);
+ bus->lan_id = TXGBE_PORTSTAT_ID(reg);
+
+ /* check for single port */
+ reg = rd32(hw, TXGBE_PWR);
+ if (TXGBE_PWR_LANID_SWAP == TXGBE_PWR_LANID(reg))
+ bus->func = 0;
+ else
+ bus->func = bus->lan_id;
+}
+
+/**
+ * txgbe_stop_hw - Generic stop Tx/Rx units
+ * @hw: pointer to hardware structure
+ *
+ * Sets the adapter_stopped flag within txgbe_hw struct. Clears interrupts,
+ * disables transmit and receive units. The adapter_stopped flag is used by
+ * the shared code and drivers to determine if the adapter is in a stopped
+ * state and should not touch the hardware.
+ **/
+s32 txgbe_stop_hw(struct txgbe_hw *hw)
+{
+ u32 reg_val;
+ u16 i;
+
+ DEBUGFUNC("txgbe_stop_hw");
+
+ /*
+ * Set the adapter_stopped flag so other driver functions stop touching
+ * the hardware
+ */
+ hw->adapter_stopped = true;
+
+ /* Disable the receive unit */
+ txgbe_disable_rx(hw);
+
+ /* Clear interrupt mask to stop interrupts from being generated */
+ wr32(hw, TXGBE_IENMISC, 0);
+ wr32(hw, TXGBE_IMS(0), TXGBE_IMS_MASK);
+ wr32(hw, TXGBE_IMS(1), TXGBE_IMS_MASK);
+
+ /* Clear any pending interrupts, flush previous writes */
+ wr32(hw, TXGBE_ICRMISC, TXGBE_ICRMISC_MASK);
+ wr32(hw, TXGBE_ICR(0), TXGBE_ICR_MASK);
+ wr32(hw, TXGBE_ICR(1), TXGBE_ICR_MASK);
+
+ /* Disable the transmit unit. Each queue must be disabled. */
+ for (i = 0; i < hw->mac.max_tx_queues; i++)
+ wr32(hw, TXGBE_TXCFG(i), TXGBE_TXCFG_FLUSH);
+
+ /* Disable the receive unit by stopping each queue */
+ for (i = 0; i < hw->mac.max_rx_queues; i++) {
+ reg_val = rd32(hw, TXGBE_RXCFG(i));
+ reg_val &= ~TXGBE_RXCFG_ENA;
+ wr32(hw, TXGBE_RXCFG(i), reg_val);
+ }
+
+ /* flush all queues disables */
+ txgbe_flush(hw);
+ msec_delay(2);
+
+ return 0;
+}
+
+/**
+ * txgbe_led_on - Turns on the software controllable LEDs.
+ * @hw: pointer to hardware structure
+ * @index: led number to turn on
+ **/
+s32 txgbe_led_on(struct txgbe_hw *hw, u32 index)
+{
+ u32 led_reg = rd32(hw, TXGBE_LEDCTL);
+
+ DEBUGFUNC("txgbe_led_on");
+
+ if (index > 4)
+ return TXGBE_ERR_PARAM;
+
+ /* To turn on the LED, set mode to ON. */
+ led_reg |= TXGBE_LEDCTL_SEL(index);
+ led_reg |= TXGBE_LEDCTL_OD(index);
+ wr32(hw, TXGBE_LEDCTL, led_reg);
+ txgbe_flush(hw);
+
+ return 0;
+}
+
+/**
+ * txgbe_led_off - Turns off the software controllable LEDs.
+ * @hw: pointer to hardware structure
+ * @index: led number to turn off
+ **/
+s32 txgbe_led_off(struct txgbe_hw *hw, u32 index)
+{
+ u32 led_reg = rd32(hw, TXGBE_LEDCTL);
+
+ DEBUGFUNC("txgbe_led_off");
+
+ if (index > 4)
+ return TXGBE_ERR_PARAM;
+
+ /* To turn off the LED, set mode to OFF. */
+ led_reg &= ~(TXGBE_LEDCTL_SEL(index));
+ led_reg &= ~(TXGBE_LEDCTL_OD(index));
+ wr32(hw, TXGBE_LEDCTL, led_reg);
+ txgbe_flush(hw);
+
+ return 0;
+}
+
+/**
+ * txgbe_validate_mac_addr - Validate MAC address
+ * @mac_addr: pointer to MAC address.
+ *
+ * Tests a MAC address to ensure it is a valid Individual Address.
+ **/
+s32 txgbe_validate_mac_addr(u8 *mac_addr)
+{
+ s32 status = 0;
+
+ DEBUGFUNC("txgbe_validate_mac_addr");
+
+ /* Make sure it is not a multicast address */
+ if (TXGBE_IS_MULTICAST(mac_addr)) {
+ status = TXGBE_ERR_INVALID_MAC_ADDR;
+ /* Not a broadcast address */
+ } else if (TXGBE_IS_BROADCAST(mac_addr)) {
+ status = TXGBE_ERR_INVALID_MAC_ADDR;
+ /* Reject the zero address */
+ } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+ mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
+ status = TXGBE_ERR_INVALID_MAC_ADDR;
+ }
+ return status;
+}
+
+/**
+ * txgbe_set_rar - Set Rx address register
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: VMDq "set" or "pool" index
+ * @enable_addr: set flag that address is active
+ *
+ * Puts an ethernet address into a receive address register.
+ **/
+s32 txgbe_set_rar(struct txgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr)
+{
+ u32 rar_low, rar_high;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ DEBUGFUNC("txgbe_set_rar");
+
+ /* Make sure we are using a valid rar index range */
+ if (index >= rar_entries) {
+ DEBUGOUT("RAR index %d is out of range.\n", index);
+ return TXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ /* setup VMDq pool selection before this RAR gets enabled */
+ hw->mac.set_vmdq(hw, index, vmdq);
+
+ /*
+ * HW expects these in little endian so we reverse the byte
+ * order from network order (big endian) to little endian
+ */
+ rar_low = TXGBE_ETHADDRL_AD0(addr[5]) |
+ TXGBE_ETHADDRL_AD1(addr[4]) |
+ TXGBE_ETHADDRL_AD2(addr[3]) |
+ TXGBE_ETHADDRL_AD3(addr[2]);
+ /*
+ * Some parts put the VMDq setting in the extra RAH bits,
+ * so save everything except the lower 16 bits that hold part
+ * of the address and the address valid bit.
+ */
+ rar_high = rd32(hw, TXGBE_ETHADDRH);
+ rar_high &= ~TXGBE_ETHADDRH_AD_MASK;
+ rar_high |= (TXGBE_ETHADDRH_AD4(addr[1]) |
+ TXGBE_ETHADDRH_AD5(addr[0]));
+
+ rar_high &= ~TXGBE_ETHADDRH_VLD;
+ if (enable_addr != 0)
+ rar_high |= TXGBE_ETHADDRH_VLD;
+
+ wr32(hw, TXGBE_ETHADDRIDX, index);
+ wr32(hw, TXGBE_ETHADDRL, rar_low);
+ wr32(hw, TXGBE_ETHADDRH, rar_high);
+
+ return 0;
+}
+
+/**
+ * txgbe_clear_rar - Remove Rx address register
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ *
+ * Clears an ethernet address from a receive address register.
+ **/
+s32 txgbe_clear_rar(struct txgbe_hw *hw, u32 index)
+{
+ u32 rar_high;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ DEBUGFUNC("txgbe_clear_rar");
+
+ /* Make sure we are using a valid rar index range */
+ if (index >= rar_entries) {
+ DEBUGOUT("RAR index %d is out of range.\n", index);
+ return TXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ /*
+ * Some parts put the VMDq setting in the extra RAH bits,
+ * so save everything except the lower 16 bits that hold part
+ * of the address and the address valid bit.
+ */
+ wr32(hw, TXGBE_ETHADDRIDX, index);
+ rar_high = rd32(hw, TXGBE_ETHADDRH);
+ rar_high &= ~(TXGBE_ETHADDRH_AD_MASK | TXGBE_ETHADDRH_VLD);
+
+ wr32(hw, TXGBE_ETHADDRL, 0);
+ wr32(hw, TXGBE_ETHADDRH, rar_high);
+
+ /* clear VMDq pool/queue selection for this RAR */
+ hw->mac.clear_vmdq(hw, index, BIT_MASK32);
+
+ return 0;
+}
+
+/**
+ * txgbe_init_rx_addrs - Initializes receive address filters.
+ * @hw: pointer to hardware structure
+ *
+ * Places the MAC address in receive address register 0 and clears the rest
+ * of the receive address registers. Clears the multicast table. Assumes
+ * the receiver is in reset when the routine is called.
+ **/
+s32 txgbe_init_rx_addrs(struct txgbe_hw *hw)
+{
+ u32 i;
+ u32 psrctl;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ DEBUGFUNC("txgbe_init_rx_addrs");
+
+ /*
+ * If the current mac address is valid, assume it is a software override
+ * to the permanent address.
+ * Otherwise, use the permanent address from the eeprom.
+ */
+ if (txgbe_validate_mac_addr(hw->mac.addr) ==
+ TXGBE_ERR_INVALID_MAC_ADDR) {
+ /* Get the MAC address from the RAR0 for later reference */
+ hw->mac.get_mac_addr(hw, hw->mac.addr);
+
+ DEBUGOUT(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
+ hw->mac.addr[0], hw->mac.addr[1],
+ hw->mac.addr[2]);
+ DEBUGOUT("%.2X %.2X %.2X\n", hw->mac.addr[3],
+ hw->mac.addr[4], hw->mac.addr[5]);
+ } else {
+ /* Setup the receive address. */
+ DEBUGOUT("Overriding MAC Address in RAR[0]\n");
+ DEBUGOUT(" New MAC Addr =%.2X %.2X %.2X ",
+ hw->mac.addr[0], hw->mac.addr[1],
+ hw->mac.addr[2]);
+ DEBUGOUT("%.2X %.2X %.2X\n", hw->mac.addr[3],
+ hw->mac.addr[4], hw->mac.addr[5]);
+
+ hw->mac.set_rar(hw, 0, hw->mac.addr, 0, true);
+ }
+
+ /* clear VMDq pool/queue selection for RAR 0 */
+ hw->mac.clear_vmdq(hw, 0, BIT_MASK32);
+
+ hw->addr_ctrl.overflow_promisc = 0;
+
+ hw->addr_ctrl.rar_used_count = 1;
+
+ /* Zero out the other receive addresses. */
+ DEBUGOUT("Clearing RAR[1-%d]\n", rar_entries - 1);
+ for (i = 1; i < rar_entries; i++) {
+ wr32(hw, TXGBE_ETHADDRIDX, i);
+ wr32(hw, TXGBE_ETHADDRL, 0);
+ wr32(hw, TXGBE_ETHADDRH, 0);
+ }
+
+ /* Clear the MTA */
+ hw->addr_ctrl.mta_in_use = 0;
+ psrctl = rd32(hw, TXGBE_PSRCTL);
+ psrctl &= ~(TXGBE_PSRCTL_ADHF12_MASK | TXGBE_PSRCTL_MCHFENA);
+ psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
+ wr32(hw, TXGBE_PSRCTL, psrctl);
+
+ DEBUGOUT(" Clearing MTA\n");
+ for (i = 0; i < hw->mac.mcft_size; i++)
+ wr32(hw, TXGBE_MCADDRTBL(i), 0);
+
+ txgbe_init_uta_tables(hw);
+
+ return 0;
+}
+
+/**
+ * txgbe_add_uc_addr - Adds a secondary unicast address.
+ * @hw: pointer to hardware structure
+ * @addr: new address
+ * @vmdq: VMDq "set" or "pool" index
+ *
+ * Adds it to unused receive address register or goes into promiscuous mode.
+ **/
+void txgbe_add_uc_addr(struct txgbe_hw *hw, u8 *addr, u32 vmdq)
+{
+ u32 rar_entries = hw->mac.num_rar_entries;
+ u32 rar;
+
+ DEBUGFUNC("txgbe_add_uc_addr");
+
+ DEBUGOUT(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+
+ /*
+ * Place this address in the RAR if there is room,
+ * else put the controller into promiscuous mode
+ */
+ if (hw->addr_ctrl.rar_used_count < rar_entries) {
+ rar = hw->addr_ctrl.rar_used_count;
+ hw->mac.set_rar(hw, rar, addr, vmdq, true);
+ DEBUGOUT("Added a secondary address to RAR[%d]\n", rar);
+ hw->addr_ctrl.rar_used_count++;
+ } else {
+ hw->addr_ctrl.overflow_promisc++;
+ }
+
+ DEBUGOUT("txgbe_add_uc_addr Complete\n");
+}
+
+/**
+ * txgbe_update_uc_addr_list - Updates MAC list of secondary addresses
+ * @hw: pointer to hardware structure
+ * @addr_list: the list of new addresses
+ * @addr_count: number of addresses
+ * @next: iterator function to walk the address list
+ *
+ * The given list replaces any existing list. Clears the secondary addrs from
+ * receive address registers. Uses unused receive address registers for the
+ * first secondary addresses, and falls back to promiscuous mode as needed.
+ *
+ * Drivers using secondary unicast addresses must set user_set_promisc when
+ * manually putting the device into promiscuous mode.
+ **/
+s32 txgbe_update_uc_addr_list(struct txgbe_hw *hw, u8 *addr_list,
+ u32 addr_count, txgbe_mc_addr_itr next)
+{
+ u8 *addr;
+ u32 i;
+ u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
+ u32 uc_addr_in_use;
+ u32 fctrl;
+ u32 vmdq;
+
+ DEBUGFUNC("txgbe_update_uc_addr_list");
+
+ /*
+ * Clear accounting of old secondary address list,
+ * don't count RAR[0]
+ */
+ uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
+ hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
+ hw->addr_ctrl.overflow_promisc = 0;
+
+ /* Zero out the other receive addresses */
+ DEBUGOUT("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
+ for (i = 0; i < uc_addr_in_use; i++) {
+ wr32(hw, TXGBE_ETHADDRIDX, i + 1);
+ wr32(hw, TXGBE_ETHADDRL, 0);
+ wr32(hw, TXGBE_ETHADDRH, 0);
+ }
+
+ /* Add the new addresses */
+ for (i = 0; i < addr_count; i++) {
+ DEBUGOUT(" Adding the secondary addresses:\n");
+ addr = next(hw, &addr_list, &vmdq);
+ txgbe_add_uc_addr(hw, addr, vmdq);
+ }
+
+ if (hw->addr_ctrl.overflow_promisc) {
+ /* enable promisc if not already in overflow or set by user */
+ if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
+ DEBUGOUT(" Entering address overflow promisc mode\n");
+ fctrl = rd32(hw, TXGBE_PSRCTL);
+ fctrl |= TXGBE_PSRCTL_UCP;
+ wr32(hw, TXGBE_PSRCTL, fctrl);
+ }
+ } else {
+ /* only disable if set by overflow, not by user */
+ if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
+ DEBUGOUT(" Leaving address overflow promisc mode\n");
+ fctrl = rd32(hw, TXGBE_PSRCTL);
+ fctrl &= ~TXGBE_PSRCTL_UCP;
+ wr32(hw, TXGBE_PSRCTL, fctrl);
+ }
+ }
+
+ DEBUGOUT("txgbe_update_uc_addr_list Complete\n");
+ return 0;
+}
+
+/**
+ * txgbe_mta_vector - Determines bit-vector in multicast table to set
+ * @hw: pointer to hardware structure
+ * @mc_addr: the multicast address
+ *
+ * Extracts the 12 bits, from a multicast address, to determine which
+ * bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ * incoming rx multicast addresses, to determine the bit-vector to check in
+ * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ * by the MO field of the PSRCTRL. The MO field is set during initialization
+ * to mc_filter_type.
+ **/
+STATIC s32 txgbe_mta_vector(struct txgbe_hw *hw, u8 *mc_addr)
+{
+ u32 vector = 0;
+
+ DEBUGFUNC("txgbe_mta_vector");
+
+ switch (hw->mac.mc_filter_type) {
+ case 0: /* use bits [47:36] of the address */
+ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+ break;
+ case 1: /* use bits [46:35] of the address */
+ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+ break;
+ case 2: /* use bits [45:34] of the address */
+ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+ break;
+ case 3: /* use bits [43:32] of the address */
+ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+ break;
+ default: /* Invalid mc_filter_type */
+ DEBUGOUT("MC filter type param set incorrectly\n");
+ ASSERT(0);
+ break;
+ }
+
+ /* vector can only be 12-bits or boundary will be exceeded */
+ vector &= 0xFFF;
+ return vector;
+}
+
+/**
+ * txgbe_set_mta - Set bit-vector in multicast table
+ * @hw: pointer to hardware structure
+ * @mc_addr: Multicast address
+ *
+ * Sets the bit-vector in the multicast table.
+ **/
+void txgbe_set_mta(struct txgbe_hw *hw, u8 *mc_addr)
+{
+ u32 vector;
+ u32 vector_bit;
+ u32 vector_reg;
+
+ DEBUGFUNC("txgbe_set_mta");
+
+ hw->addr_ctrl.mta_in_use++;
+
+ vector = txgbe_mta_vector(hw, mc_addr);
+ DEBUGOUT(" bit-vector = 0x%03X\n", vector);
+
+ /*
+ * The MTA is a register array of 128 32-bit registers. It is treated
+ * like an array of 4096 bits. We want to set bit
+ * BitArray[vector_value]. So we figure out what register the bit is
+ * in, read it, OR in the new bit, then write back the new value. The
+ * register is determined by the upper 7 bits of the vector value and
+ * the bit within that register are determined by the lower 5 bits of
+ * the value.
+ */
+ vector_reg = (vector >> 5) & 0x7F;
+ vector_bit = vector & 0x1F;
+ hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
+}
+
+/**
+ * txgbe_update_mc_addr_list - Updates MAC list of multicast addresses
+ * @hw: pointer to hardware structure
+ * @mc_addr_list: the list of new multicast addresses
+ * @mc_addr_count: number of addresses
+ * @next: iterator function to walk the multicast address list
+ * @clear: flag, when set clears the table beforehand
+ *
+ * When the clear flag is set, the given list replaces any existing list.
+ * Hashes the given addresses into the multicast table.
+ **/
+s32 txgbe_update_mc_addr_list(struct txgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, txgbe_mc_addr_itr next,
+ bool clear)
+{
+ u32 i;
+ u32 vmdq;
+
+ DEBUGFUNC("txgbe_update_mc_addr_list");
+
+ /*
+ * Set the new number of MC addresses that we are being requested to
+ * use.
+ */
+ hw->addr_ctrl.num_mc_addrs = mc_addr_count;
+ hw->addr_ctrl.mta_in_use = 0;
+
+ /* Clear mta_shadow */
+ if (clear) {
+ DEBUGOUT(" Clearing MTA\n");
+ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+ }
+
+ /* Update mta_shadow */
+ for (i = 0; i < mc_addr_count; i++) {
+ DEBUGOUT(" Adding the multicast addresses:\n");
+ txgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
+ }
+
+ /* Enable mta */
+ for (i = 0; i < hw->mac.mcft_size; i++)
+ wr32a(hw, TXGBE_MCADDRTBL(0), i,
+ hw->mac.mta_shadow[i]);
+
+ if (hw->addr_ctrl.mta_in_use > 0) {
+ u32 psrctl = rd32(hw, TXGBE_PSRCTL);
+ psrctl &= ~(TXGBE_PSRCTL_ADHF12_MASK | TXGBE_PSRCTL_MCHFENA);
+ psrctl |= TXGBE_PSRCTL_MCHFENA |
+ TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
+ wr32(hw, TXGBE_PSRCTL, psrctl);
+ }
+
+ DEBUGOUT("txgbe_update_mc_addr_list Complete\n");
+ return 0;
+}
+
+/**
+ * txgbe_enable_mc - Enable multicast address in RAR
+ * @hw: pointer to hardware structure
+ *
+ * Enables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 txgbe_enable_mc(struct txgbe_hw *hw)
+{
+ struct txgbe_addr_filter_info *a = &hw->addr_ctrl;
+
+ DEBUGFUNC("txgbe_enable_mc");
+
+ if (a->mta_in_use > 0) {
+ u32 psrctl = rd32(hw, TXGBE_PSRCTL);
+ psrctl &= ~(TXGBE_PSRCTL_ADHF12_MASK | TXGBE_PSRCTL_MCHFENA);
+ psrctl |= TXGBE_PSRCTL_MCHFENA |
+ TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
+ wr32(hw, TXGBE_PSRCTL, psrctl);
+ }
+
+ return 0;
+}
+
+/**
+ * txgbe_disable_mc - Disable multicast address in RAR
+ * @hw: pointer to hardware structure
+ *
+ * Disables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 txgbe_disable_mc(struct txgbe_hw *hw)
+{
+ struct txgbe_addr_filter_info *a = &hw->addr_ctrl;
+
+ DEBUGFUNC("txgbe_disable_mc");
+
+ if (a->mta_in_use > 0) {
+ u32 psrctl = rd32(hw, TXGBE_PSRCTL);
+ psrctl &= ~(TXGBE_PSRCTL_ADHF12_MASK | TXGBE_PSRCTL_MCHFENA);
+ psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
+ wr32(hw, TXGBE_PSRCTL, psrctl);
+ }
+ return 0;
+}
+
+/**
+ * txgbe_fc_enable - Enable flow control
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to the current settings.
+ **/
+s32 txgbe_fc_enable(struct txgbe_hw *hw)
+{
+ s32 err = 0;
+ u32 mflcn_reg, fccfg_reg;
+ u32 pause_time;
+ u32 fcrtl, fcrth;
+ int i;
+
+ DEBUGFUNC("txgbe_fc_enable");
+
+ /* Validate the water mark configuration */
+ if (!hw->fc.pause_time) {
+ err = TXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /* Low water mark of zero causes XOFF floods */
+ for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+ if ((hw->fc.current_mode & txgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ if (!hw->fc.low_water[i] ||
+ hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ DEBUGOUT("Invalid water mark configuration\n");
+ err = TXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+ }
+ }
+
+ /* Negotiate the fc mode to use */
+ hw->mac.fc_autoneg(hw);
+
+ /* Disable any previous flow control settings */
+ mflcn_reg = rd32(hw, TXGBE_RXFCCFG);
+ mflcn_reg &= ~(TXGBE_RXFCCFG_FC | TXGBE_RXFCCFG_PFC);
+
+ fccfg_reg = rd32(hw, TXGBE_TXFCCFG);
+ fccfg_reg &= ~(TXGBE_TXFCCFG_FC | TXGBE_TXFCCFG_PFC);
+
+ /*
+ * The possible values of fc.current_mode are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: Invalid.
+ */
+ switch (hw->fc.current_mode) {
+ case txgbe_fc_none:
+ /*
+ * Flow control is disabled by software override or autoneg.
+ * The code below will actually disable it in the HW.
+ */
+ break;
+ case txgbe_fc_rx_pause:
+ /*
+ * Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ mflcn_reg |= TXGBE_RXFCCFG_FC;
+ break;
+ case txgbe_fc_tx_pause:
+ /*
+ * Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ fccfg_reg |= TXGBE_TXFCCFG_FC;
+ break;
+ case txgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ mflcn_reg |= TXGBE_RXFCCFG_FC;
+ fccfg_reg |= TXGBE_TXFCCFG_FC;
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ err = TXGBE_ERR_CONFIG;
+ goto out;
+ }
+
+ /* Set 802.3x based flow control settings. */
+ wr32(hw, TXGBE_RXFCCFG, mflcn_reg);
+ wr32(hw, TXGBE_TXFCCFG, fccfg_reg);
+
+ /* Set up and enable Rx high/low water mark thresholds, enable XON. */
+ for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+ if ((hw->fc.current_mode & txgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ fcrtl = TXGBE_FCWTRLO_TH(hw->fc.low_water[i]) |
+ TXGBE_FCWTRLO_XON;
+ fcrth = TXGBE_FCWTRHI_TH(hw->fc.high_water[i]) |
+ TXGBE_FCWTRHI_XOFF;
+ } else {
+ /*
+ * In order to prevent Tx hangs when the internal Tx
+ * switch is enabled we must set the high water mark
+ * to the Rx packet buffer size - 24KB. This allows
+ * the Tx switch to function even under heavy Rx
+ * workloads.
+ */
+ fcrtl = 0;
+ fcrth = rd32(hw, TXGBE_PBRXSIZE(i)) - 24576;
+ }
+ wr32(hw, TXGBE_FCWTRLO(i), fcrtl);
+ wr32(hw, TXGBE_FCWTRHI(i), fcrth);
+ }
+
+ /* Configure pause time (2 TCs per register) */
+ pause_time = TXGBE_RXFCFSH_TIME(hw->fc.pause_time);
+ for (i = 0; i < (TXGBE_DCB_TC_MAX / 2); i++)
+ wr32(hw, TXGBE_FCXOFFTM(i), pause_time * 0x00010001);
+
+ /* Configure flow control refresh threshold value */
+ wr32(hw, TXGBE_RXFCRFSH, hw->fc.pause_time / 2);
+
+out:
+ return err;
+}
+
+/**
+ * txgbe_negotiate_fc - Negotiate flow control
+ * @hw: pointer to hardware structure
+ * @adv_reg: flow control advertised settings
+ * @lp_reg: link partner's flow control settings
+ * @adv_sym: symmetric pause bit in advertisement
+ * @adv_asm: asymmetric pause bit in advertisement
+ * @lp_sym: symmetric pause bit in link partner advertisement
+ * @lp_asm: asymmetric pause bit in link partner advertisement
+ *
+ * Find the intersection between advertised settings and link partner's
+ * advertised settings
+ **/
+s32 txgbe_negotiate_fc(struct txgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
+{
+ if ((!(adv_reg)) || (!(lp_reg))) {
+ DEBUGOUT("Local or link partner's advertised flow control "
+ "settings are NULL. Local: %x, link partner: %x\n",
+ adv_reg, lp_reg);
+ return TXGBE_ERR_FC_NOT_NEGOTIATED;
+ }
+
+ if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
+ /*
+ * Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise RX
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == txgbe_fc_full) {
+ hw->fc.current_mode = txgbe_fc_full;
+ DEBUGOUT("Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = txgbe_fc_rx_pause;
+ DEBUGOUT("Flow Control=RX PAUSE frames only\n");
+ }
+ } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+ (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+ hw->fc.current_mode = txgbe_fc_tx_pause;
+ DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
+ } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+ !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+ hw->fc.current_mode = txgbe_fc_rx_pause;
+ DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+ } else {
+ hw->fc.current_mode = txgbe_fc_none;
+ DEBUGOUT("Flow Control = NONE.\n");
+ }
+ return 0;
+}
+
+/**
+ * txgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according on 1 gig fiber.
+ **/
+STATIC s32 txgbe_fc_autoneg_fiber(struct txgbe_hw *hw)
+{
+ u32 pcs_anadv_reg, pcs_lpab_reg;
+ s32 err = TXGBE_ERR_FC_NOT_NEGOTIATED;
+
+ /*
+ * On multispeed fiber at 1g, bail out if
+ * - link is up but AN did not complete, or if
+ * - link is up and AN completed but timed out
+ */
+
+ pcs_anadv_reg = rd32_epcs(hw, SR_MII_MMD_AN_ADV);
+ pcs_lpab_reg = rd32_epcs(hw, SR_MII_MMD_LP_BABL);
+
+ err = txgbe_negotiate_fc(hw, pcs_anadv_reg,
+ pcs_lpab_reg,
+ SR_MII_MMD_AN_ADV_PAUSE_SYM,
+ SR_MII_MMD_AN_ADV_PAUSE_ASM,
+ SR_MII_MMD_AN_ADV_PAUSE_SYM,
+ SR_MII_MMD_AN_ADV_PAUSE_ASM);
+
+ return err;
+}
+
+/**
+ * txgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to IEEE clause 37.
+ **/
+STATIC s32 txgbe_fc_autoneg_backplane(struct txgbe_hw *hw)
+{
+ u32 anlp1_reg, autoc_reg;
+ s32 err = TXGBE_ERR_FC_NOT_NEGOTIATED;
+
+ /*
+ * Read the 10g AN autoc and LP ability registers and resolve
+ * local flow control settings accordingly
+ */
+ autoc_reg = rd32_epcs(hw, SR_AN_MMD_ADV_REG1);
+ anlp1_reg = rd32_epcs(hw, SR_AN_MMD_LP_ABL1);
+
+ err = txgbe_negotiate_fc(hw, autoc_reg,
+ anlp1_reg,
+ SR_AN_MMD_ADV_REG1_PAUSE_SYM,
+ SR_AN_MMD_ADV_REG1_PAUSE_ASM,
+ SR_AN_MMD_ADV_REG1_PAUSE_SYM,
+ SR_AN_MMD_ADV_REG1_PAUSE_ASM);
+
+ return err;
+}
+
+/**
+ * txgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to IEEE clause 37.
+ **/
+STATIC s32 txgbe_fc_autoneg_copper(struct txgbe_hw *hw)
+{
+ u16 technology_ability_reg = 0;
+ u16 lp_technology_ability_reg = 0;
+
+ hw->phy.read_reg(hw, TXGBE_MD_AUTO_NEG_ADVT,
+ TXGBE_MD_DEV_AUTO_NEG,
+ &technology_ability_reg);
+ hw->phy.read_reg(hw, TXGBE_MD_AUTO_NEG_LP,
+ TXGBE_MD_DEV_AUTO_NEG,
+ &lp_technology_ability_reg);
+
+ return txgbe_negotiate_fc(hw, (u32)technology_ability_reg,
+ (u32)lp_technology_ability_reg,
+ TXGBE_TAF_SYM_PAUSE, TXGBE_TAF_ASM_PAUSE,
+ TXGBE_TAF_SYM_PAUSE, TXGBE_TAF_ASM_PAUSE);
+}
+
+/**
+ * txgbe_fc_autoneg - Configure flow control
+ * @hw: pointer to hardware structure
+ *
+ * Compares our advertised flow control capabilities to those advertised by
+ * our link partner, and determines the proper flow control mode to use.
+ **/
+void txgbe_fc_autoneg(struct txgbe_hw *hw)
+{
+ s32 err = TXGBE_ERR_FC_NOT_NEGOTIATED;
+ u32 speed;
+ bool link_up;
+
+ DEBUGFUNC("txgbe_fc_autoneg");
+
+ /*
+ * AN should have completed when the cable was plugged in.
+ * Look for reasons to bail out. Bail out if:
+ * - FC autoneg is disabled, or if
+ * - link is not up.
+ */
+ if (hw->fc.disable_fc_autoneg) {
+ DEBUGOUT("Flow control autoneg is disabled");
+ goto out;
+ }
+
+ hw->mac.check_link(hw, &speed, &link_up, false);
+ if (!link_up) {
+ DEBUGOUT("The link is down");
+ goto out;
+ }
+
+ switch (hw->phy.media_type) {
+ /* Autoneg flow control on fiber adapters */
+ case txgbe_media_type_fiber_qsfp:
+ case txgbe_media_type_fiber:
+ if (speed == TXGBE_LINK_SPEED_1GB_FULL)
+ err = txgbe_fc_autoneg_fiber(hw);
+ break;
+
+ /* Autoneg flow control on backplane adapters */
+ case txgbe_media_type_backplane:
+ err = txgbe_fc_autoneg_backplane(hw);
+ break;
+
+ /* Autoneg flow control on copper adapters */
+ case txgbe_media_type_copper:
+ if (txgbe_device_supports_autoneg_fc(hw))
+ err = txgbe_fc_autoneg_copper(hw);
+ break;
+
+ default:
+ break;
+ }
+
+out:
+ if (err == 0) {
+ hw->fc.fc_was_autonegged = true;
+ } else {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ }
+}
+
+/**
+ * txgbe_acquire_swfw_sync - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore through the MNGSEM register for the specified
+ * function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+s32 txgbe_acquire_swfw_sync(struct txgbe_hw *hw, u32 mask)
+{
+ u32 mngsem = 0;
+ u32 swmask = TXGBE_MNGSEM_SW(mask);
+ u32 fwmask = TXGBE_MNGSEM_FW(mask);
+ u32 timeout = 200;
+ u32 i;
+
+ DEBUGFUNC("txgbe_acquire_swfw_sync");
+
+ for (i = 0; i < timeout; i++) {
+ /*
+ * SW NVM semaphore bit is used for access to all
+ * SW_FW_SYNC bits (not just NVM)
+ */
+ if (txgbe_get_eeprom_semaphore(hw))
+ return TXGBE_ERR_SWFW_SYNC;
+
+ mngsem = rd32(hw, TXGBE_MNGSEM);
+ if (!(mngsem & (fwmask | swmask))) {
+ mngsem |= swmask;
+ wr32(hw, TXGBE_MNGSEM, mngsem);
+ txgbe_release_eeprom_semaphore(hw);
+ return 0;
+ } else {
+ /* Resource is currently in use by FW or SW */
+ txgbe_release_eeprom_semaphore(hw);
+ msec_delay(5);
+ }
+ }
+
+ /* If time expired clear the bits holding the lock and retry */
+ if (mngsem & (fwmask | swmask))
+ txgbe_release_swfw_sync(hw, mngsem & (fwmask | swmask));
+
+ msec_delay(5);
+ return TXGBE_ERR_SWFW_SYNC;
+}
+
+/**
+ * txgbe_release_swfw_sync - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore through the MNGSEM register for the specified
+ * function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+void txgbe_release_swfw_sync(struct txgbe_hw *hw, u32 mask)
+{
+ u32 mngsem;
+ u32 swmask = mask;
+
+ DEBUGFUNC("txgbe_release_swfw_sync");
+
+ txgbe_get_eeprom_semaphore(hw);
+
+ mngsem = rd32(hw, TXGBE_MNGSEM);
+ mngsem &= ~swmask;
+ wr32(hw, TXGBE_MNGSEM, mngsem);
+
+ txgbe_release_eeprom_semaphore(hw);
+}
+
+/**
+ * txgbe_disable_sec_rx_path - Stops the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Stops the receive data path and waits for the HW to internally empty
+ * the Rx security block
+ **/
+s32 txgbe_disable_sec_rx_path(struct txgbe_hw *hw)
+{
+#define TXGBE_MAX_SECRX_POLL 4000
+
+ int i;
+ int secrxreg;
+
+ DEBUGFUNC("txgbe_disable_sec_rx_path");
+
+ secrxreg = rd32(hw, TXGBE_SECRXCTL);
+ secrxreg |= TXGBE_SECRXCTL_XDSA;
+ wr32(hw, TXGBE_SECRXCTL, secrxreg);
+ for (i = 0; i < TXGBE_MAX_SECRX_POLL; i++) {
+ secrxreg = rd32(hw, TXGBE_SECRXSTAT);
+ if (secrxreg & TXGBE_SECRXSTAT_RDY)
+ break;
+ else
+ /* Use interrupt-safe sleep just in case */
+ usec_delay(10);
+ }
+
+ /* For informational purposes only */
+ if (i >= TXGBE_MAX_SECRX_POLL)
+ DEBUGOUT("Rx unit being enabled before security "
+ "path fully disabled. Continuing with init.\n");
+
+ return 0;
+}
+
+/**
+ * txgbe_enable_sec_rx_path - Enables the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Enables the receive data path.
+ **/
+s32 txgbe_enable_sec_rx_path(struct txgbe_hw *hw)
+{
+ u32 secrxreg;
+
+ DEBUGFUNC("txgbe_enable_sec_rx_path");
+
+ secrxreg = rd32(hw, TXGBE_SECRXCTL);
+ secrxreg &= ~TXGBE_SECRXCTL_XDSA;
+ wr32(hw, TXGBE_SECRXCTL, secrxreg);
+ txgbe_flush(hw);
+
+ return 0;
+}
+
+/**
+ * txgbe_disable_sec_tx_path - Stops the transmit data path
+ * @hw: pointer to hardware structure
+ *
+ * Stops the transmit data path and waits for the HW to internally empty
+ * the Tx security block
+ **/
+int txgbe_disable_sec_tx_path(struct txgbe_hw *hw)
+{
+#define TXGBE_MAX_SECTX_POLL 40
+
+ int i;
+ int sectxreg;
+
+ sectxreg = rd32(hw, TXGBE_SECTXCTL);
+ sectxreg |= TXGBE_SECTXCTL_XDSA;
+ wr32(hw, TXGBE_SECTXCTL, sectxreg);
+ for (i = 0; i < TXGBE_MAX_SECTX_POLL; i++) {
+ sectxreg = rd32(hw, TXGBE_SECTXSTAT);
+ if (sectxreg & TXGBE_SECTXSTAT_RDY)
+ break;
+ /* Use interrupt-safe sleep just in case */
+ usec_delay(1000);
+ }
+
+ /* For informational purposes only */
+ if (i >= TXGBE_MAX_SECTX_POLL)
+ PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
+ "path fully disabled. Continuing with init.");
+
+ return 0;
+}
+
+/**
+ * txgbe_enable_sec_tx_path - Enables the transmit data path
+ * @hw: pointer to hardware structure
+ *
+ * Enables the transmit data path.
+ **/
+int txgbe_enable_sec_tx_path(struct txgbe_hw *hw)
+{
+ uint32_t sectxreg;
+
+ sectxreg = rd32(hw, TXGBE_SECTXCTL);
+ sectxreg &= ~TXGBE_SECTXCTL_XDSA;
+ wr32(hw, TXGBE_SECTXCTL, sectxreg);
+ txgbe_flush(hw);
+
+ return 0;
+}
+
+/**
+ * txgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
+ * @hw: pointer to hardware structure
+ * @san_mac_offset: SAN MAC address offset
+ *
+ * This function will read the EEPROM location for the SAN MAC address
+ * pointer, and returns the value at that location. This is used in both
+ * get and set mac_addr routines.
+ **/
+STATIC s32 txgbe_get_san_mac_addr_offset(struct txgbe_hw *hw,
+ u16 *san_mac_offset)
+{
+ s32 err;
+
+ DEBUGFUNC("txgbe_get_san_mac_addr_offset");
+
+ /*
+ * First read the EEPROM pointer to see if the MAC addresses are
+ * available.
+ */
+ err = hw->rom.readw_sw(hw, TXGBE_SAN_MAC_ADDR_PTR,
+ san_mac_offset);
+ if (err) {
+ DEBUGOUT("eeprom at offset %d failed",
+ TXGBE_SAN_MAC_ADDR_PTR);
+ }
+
+ return err;
+}
+
+/**
+ * txgbe_get_san_mac_addr - SAN MAC address retrieval from the EEPROM
+ * @hw: pointer to hardware structure
+ * @san_mac_addr: SAN MAC address
+ *
+ * Reads the SAN MAC address from the EEPROM, if it's available. This is
+ * per-port, so set_lan_id() must be called before reading the addresses.
+ * set_lan_id() is called by identify_sfp(), but this cannot be relied
+ * upon for non-SFP connections, so we must call it here.
+ **/
+s32 txgbe_get_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr)
+{
+ u16 san_mac_data, san_mac_offset;
+ u8 i;
+ s32 err;
+
+ DEBUGFUNC("txgbe_get_san_mac_addr");
+
+ /*
+ * First read the EEPROM pointer to see if the MAC addresses are
+ * available. If they're not, no point in calling set_lan_id() here.
+ */
+ err = txgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+ if (err || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
+ goto san_mac_addr_out;
+
+ /* apply the port offset to the address offset */
+ (hw->bus.func) ? (san_mac_offset += TXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
+ (san_mac_offset += TXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+ for (i = 0; i < 3; i++) {
+ err = hw->rom.read16(hw, san_mac_offset,
+ &san_mac_data);
+ if (err) {
+ DEBUGOUT("eeprom read at offset %d failed",
+ san_mac_offset);
+ goto san_mac_addr_out;
+ }
+ san_mac_addr[i * 2] = (u8)(san_mac_data);
+ san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
+ san_mac_offset++;
+ }
+ return 0;
+
+san_mac_addr_out:
+ /*
+ * No addresses available in this EEPROM. It's not an
+ * error though, so just wipe the local address and return.
+ */
+ for (i = 0; i < 6; i++)
+ san_mac_addr[i] = 0xFF;
+ return 0;
+}
+
+/**
+ * txgbe_set_san_mac_addr - Write the SAN MAC address to the EEPROM
+ * @hw: pointer to hardware structure
+ * @san_mac_addr: SAN MAC address
+ *
+ * Write a SAN MAC address to the EEPROM.
+ **/
+s32 txgbe_set_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr)
+{
+ s32 err;
+ u16 san_mac_data, san_mac_offset;
+ u8 i;
+
+ DEBUGFUNC("txgbe_set_san_mac_addr");
+
+ /* Look for SAN mac address pointer. If not defined, return */
+ err = txgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+ if (err || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
+ return TXGBE_ERR_NO_SAN_ADDR_PTR;
+
+ /* Apply the port offset to the address offset */
+ (hw->bus.func) ? (san_mac_offset += TXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
+ (san_mac_offset += TXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+
+ for (i = 0; i < 3; i++) {
+ san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
+ san_mac_data |= (u16)(san_mac_addr[i * 2]);
+ hw->rom.write16(hw, san_mac_offset, san_mac_data);
+ san_mac_offset++;
+ }
+
+ return 0;
+}
+
+/**
+ * txgbe_insert_mac_addr - Find a RAR for this mac address
+ * @hw: pointer to hardware structure
+ * @addr: Address to put into receive address register
+ * @vmdq: VMDq pool to assign
+ *
+ * Puts an ethernet address into a receive address register, or
+ * finds the rar that it is already in; adds to the pool list
+ **/
+s32 txgbe_insert_mac_addr(struct txgbe_hw *hw, u8 *addr, u32 vmdq)
+{
+ static const u32 NO_EMPTY_RAR_FOUND = BIT_MASK32;
+ u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
+ u32 rar;
+ u32 rar_low, rar_high;
+ u32 addr_low, addr_high;
+
+ DEBUGFUNC("txgbe_insert_mac_addr");
+
+ /* swap bytes for HW little endian */
+ addr_low = addr[0] | (addr[1] << 8)
+ | (addr[2] << 16)
+ | (addr[3] << 24);
+ addr_high = addr[4] | (addr[5] << 8);
+
+ /*
+ * Either find the mac_id in rar or find the first empty space.
+ * rar_highwater points to just after the highest currently used
+ * rar in order to shorten the search. It grows when we add a new
+ * rar to the top.
+ */
+ for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
+ wr32(hw, TXGBE_ETHADDRIDX, rar);
+ rar_high = rd32(hw, TXGBE_ETHADDRH);
+
+ if (((TXGBE_ETHADDRH_VLD & rar_high) == 0)
+ && first_empty_rar == NO_EMPTY_RAR_FOUND) {
+ first_empty_rar = rar;
+ } else if ((rar_high & 0xFFFF) == addr_high) {
+ rar_low = rd32(hw, TXGBE_ETHADDRL);
+ if (rar_low == addr_low)
+ break; /* found it already in the rars */
+ }
+ }
+
+ if (rar < hw->mac.rar_highwater) {
+ /* already there so just add to the pool bits */
+ txgbe_set_vmdq(hw, rar, vmdq);
+ } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
+ /* stick it into first empty RAR slot we found */
+ rar = first_empty_rar;
+ txgbe_set_rar(hw, rar, addr, vmdq, true);
+ } else if (rar == hw->mac.rar_highwater) {
+ /* add it to the top of the list and inc the highwater mark */
+ txgbe_set_rar(hw, rar, addr, vmdq, true);
+ hw->mac.rar_highwater++;
+ } else if (rar >= hw->mac.num_rar_entries) {
+ return TXGBE_ERR_INVALID_MAC_ADDR;
+ }
+
+ /*
+ * If we found rar[0], make sure the default pool bit (we use pool 0)
+ * remains cleared to be sure default pool packets will get delivered
+ */
+ if (rar == 0)
+ txgbe_clear_vmdq(hw, rar, 0);
+
+ return rar;
+}
+
+/**
+ * txgbe_clear_vmdq - Disassociate a VMDq pool index from a rx address
+ * @hw: pointer to hardware struct
+ * @rar: receive address register index to disassociate
+ * @vmdq: VMDq pool index to remove from the rar
+ **/
+s32 txgbe_clear_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ u32 mpsar_lo, mpsar_hi;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ DEBUGFUNC("txgbe_clear_vmdq");
+
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ DEBUGOUT("RAR index %d is out of range.\n", rar);
+ return TXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ wr32(hw, TXGBE_ETHADDRIDX, rar);
+ mpsar_lo = rd32(hw, TXGBE_ETHADDRASSL);
+ mpsar_hi = rd32(hw, TXGBE_ETHADDRASSH);
+
+ if (TXGBE_REMOVED(hw->hw_addr))
+ goto done;
+
+ if (!mpsar_lo && !mpsar_hi)
+ goto done;
+
+ if (vmdq == BIT_MASK32) {
+ if (mpsar_lo) {
+ wr32(hw, TXGBE_ETHADDRASSL, 0);
+ mpsar_lo = 0;
+ }
+ if (mpsar_hi) {
+ wr32(hw, TXGBE_ETHADDRASSH, 0);
+ mpsar_hi = 0;
+ }
+ } else if (vmdq < 32) {
+ mpsar_lo &= ~(1 << vmdq);
+ wr32(hw, TXGBE_ETHADDRASSL, mpsar_lo);
+ } else {
+ mpsar_hi &= ~(1 << (vmdq - 32));
+ wr32(hw, TXGBE_ETHADDRASSH, mpsar_hi);
+ }
+
+ /* was that the last pool using this rar? */
+ if (mpsar_lo == 0 && mpsar_hi == 0 &&
+ rar != 0 && rar != hw->mac.san_mac_rar_index)
+ hw->mac.clear_rar(hw, rar);
+done:
+ return 0;
+}
+
+/**
+ * txgbe_set_vmdq - Associate a VMDq pool index with a rx address
+ * @hw: pointer to hardware struct
+ * @rar: receive address register index to associate with a VMDq index
+ * @vmdq: VMDq pool index
+ **/
+s32 txgbe_set_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ u32 mpsar;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ DEBUGFUNC("txgbe_set_vmdq");
+
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ DEBUGOUT("RAR index %d is out of range.\n", rar);
+ return TXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ wr32(hw, TXGBE_ETHADDRIDX, rar);
+ if (vmdq < 32) {
+ mpsar = rd32(hw, TXGBE_ETHADDRASSL);
+ mpsar |= 1 << vmdq;
+ wr32(hw, TXGBE_ETHADDRASSL, mpsar);
+ } else {
+ mpsar = rd32(hw, TXGBE_ETHADDRASSH);
+ mpsar |= 1 << (vmdq - 32);
+ wr32(hw, TXGBE_ETHADDRASSH, mpsar);
+ }
+ return 0;
+}
+
+/**
+ * This function should only be involved in the IOV mode.
+ * In IOV mode, Default pool is next pool after the number of
+ * VFs advertized and not 0.
+ * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
+ *
+ * txgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
+ * @hw: pointer to hardware struct
+ * @vmdq: VMDq pool index
+ **/
+s32 txgbe_set_vmdq_san_mac(struct txgbe_hw *hw, u32 vmdq)
+{
+ u32 rar = hw->mac.san_mac_rar_index;
+
+ DEBUGFUNC("txgbe_set_vmdq_san_mac");
+
+ wr32(hw, TXGBE_ETHADDRIDX, rar);
+ if (vmdq < 32) {
+ wr32(hw, TXGBE_ETHADDRASSL, 1 << vmdq);
+ wr32(hw, TXGBE_ETHADDRASSH, 0);
+ } else {
+ wr32(hw, TXGBE_ETHADDRASSL, 0);
+ wr32(hw, TXGBE_ETHADDRASSH, 1 << (vmdq - 32));
+ }
+
+ return 0;
+}
+
+/**
+ * txgbe_init_uta_tables - Initialize the Unicast Table Array
+ * @hw: pointer to hardware structure
+ **/
+s32 txgbe_init_uta_tables(struct txgbe_hw *hw)
+{
+ int i;
+
+ DEBUGFUNC("txgbe_init_uta_tables");
+ DEBUGOUT(" Clearing UTA\n");
+
+ for (i = 0; i < 128; i++)
+ wr32(hw, TXGBE_UCADDRTBL(i), 0);
+
+ return 0;
+}
+
+/**
+ * txgbe_find_vlvf_slot - find the vlanid or the first empty slot
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vlvf_bypass: true to find vlanid only, false returns first empty slot if
+ * vlanid not found
+ *
+ *
+ * return the VLVF index where this VLAN id should be placed
+ *
+ **/
+s32 txgbe_find_vlvf_slot(struct txgbe_hw *hw, u32 vlan, bool vlvf_bypass)
+{
+ s32 regindex, first_empty_slot;
+ u32 bits;
+
+ /* short cut the special case */
+ if (vlan == 0)
+ return 0;
+
+ /* if vlvf_bypass is set we don't want to use an empty slot, we
+ * will simply bypass the VLVF if there are no entries present in the
+ * VLVF that contain our VLAN
+ */
+ first_empty_slot = vlvf_bypass ? TXGBE_ERR_NO_SPACE : 0;
+
+ /* add VLAN enable bit for comparison */
+ vlan |= TXGBE_PSRVLAN_EA;
+
+ /* Search for the vlan id in the VLVF entries. Save off the first empty
+ * slot found along the way.
+ *
+ * pre-decrement loop covering (TXGBE_NUM_POOL - 1) .. 1
+ */
+ for (regindex = TXGBE_NUM_POOL; --regindex;) {
+ wr32(hw, TXGBE_PSRVLANIDX, regindex);
+ bits = rd32(hw, TXGBE_PSRVLAN);
+ if (bits == vlan)
+ return regindex;
+ if (!first_empty_slot && !bits)
+ first_empty_slot = regindex;
+ }
+
+ /* If we are here then we didn't find the VLAN. Return first empty
+ * slot we found during our search, else error.
+ */
+ if (!first_empty_slot)
+ DEBUGOUT("No space in VLVF.\n");
+
+ return first_empty_slot ? first_empty_slot : TXGBE_ERR_NO_SPACE;
+}
+
+/**
+ * txgbe_set_vfta - Set VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VLVFB
+ * @vlan_on: boolean flag to turn on/off VLAN
+ * @vlvf_bypass: boolean flag indicating updating default pool is okay
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 txgbe_set_vfta(struct txgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool vlvf_bypass)
+{
+ u32 regidx, vfta_delta, vfta;
+ s32 err;
+
+ DEBUGFUNC("txgbe_set_vfta");
+
+ if (vlan > 4095 || vind > 63)
+ return TXGBE_ERR_PARAM;
+
+ /*
+ * this is a 2 part operation - first the VFTA, then the
+ * VLVF and VLVFB if VT Mode is set
+ * We don't write the VFTA until we know the VLVF part succeeded.
+ */
+
+ /* Part 1
+ * The VFTA is a bitstring made up of 128 32-bit registers
+ * that enable the particular VLAN id, much like the MTA:
+ * bits[11-5]: which register
+ * bits[4-0]: which bit in the register
+ */
+ regidx = vlan / 32;
+ vfta_delta = 1 << (vlan % 32);
+ vfta = rd32(hw, TXGBE_VLANTBL(regidx));
+
+ /*
+ * vfta_delta represents the difference between the current value
+ * of vfta and the value we want in the register. Since the diff
+ * is an XOR mask we can just update the vfta using an XOR
+ */
+ vfta_delta &= vlan_on ? ~vfta : vfta;
+ vfta ^= vfta_delta;
+
+ /* Part 2
+ * Call txgbe_set_vlvf to set VLVFB and VLVF
+ */
+ err = txgbe_set_vlvf(hw, vlan, vind, vlan_on, &vfta_delta,
+ vfta, vlvf_bypass);
+ if (err != 0) {
+ if (vlvf_bypass)
+ goto vfta_update;
+ return err;
+ }
+
+vfta_update:
+ /* Update VFTA now that we are ready for traffic */
+ if (vfta_delta)
+ wr32(hw, TXGBE_VLANTBL(regidx), vfta);
+
+ return 0;
+}
+
+/**
+ * txgbe_set_vlvf - Set VLAN Pool Filter
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in PSRVLANPLM
+ * @vlan_on: boolean flag to turn on/off VLAN in PSRVLAN
+ * @vfta_delta: pointer to the difference between the current value
+ * of PSRVLANPLM and the desired value
+ * @vfta: the desired value of the VFTA
+ * @vlvf_bypass: boolean flag indicating updating default pool is okay
+ *
+ * Turn on/off specified bit in VLVF table.
+ **/
+s32 txgbe_set_vlvf(struct txgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, u32 *vfta_delta, u32 vfta,
+ bool vlvf_bypass)
+{
+ u32 bits;
+ u32 portctl;
+ s32 vlvf_index;
+
+ DEBUGFUNC("txgbe_set_vlvf");
+
+ if (vlan > 4095 || vind > 63)
+ return TXGBE_ERR_PARAM;
+
+ /* If VT Mode is set
+ * Either vlan_on
+ * make sure the vlan is in PSRVLAN
+ * set the vind bit in the matching PSRVLANPLM
+ * Or !vlan_on
+ * clear the pool bit and possibly the vind
+ */
+ portctl = rd32(hw, TXGBE_PORTCTL);
+ if (!(portctl & TXGBE_PORTCTL_NUMVT_MASK))
+ return 0;
+
+ vlvf_index = txgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
+ if (vlvf_index < 0)
+ return vlvf_index;
+
+ wr32(hw, TXGBE_PSRVLANIDX, vlvf_index);
+ bits = rd32(hw, TXGBE_PSRVLANPLM(vind / 32));
+
+ /* set the pool bit */
+ bits |= 1 << (vind % 32);
+ if (vlan_on)
+ goto vlvf_update;
+
+ /* clear the pool bit */
+ bits ^= 1 << (vind % 32);
+
+ if (!bits &&
+ !rd32(hw, TXGBE_PSRVLANPLM(vind / 32))) {
+ /* Clear PSRVLANPLM first, then disable PSRVLAN. Otherwise
+ * we run the risk of stray packets leaking into
+ * the PF via the default pool
+ */
+ if (*vfta_delta)
+ wr32(hw, TXGBE_PSRVLANPLM(vlan / 32), vfta);
+
+ /* disable VLVF and clear remaining bit from pool */
+ wr32(hw, TXGBE_PSRVLAN, 0);
+ wr32(hw, TXGBE_PSRVLANPLM(vind / 32), 0);
+
+ return 0;
+ }
+
+ /* If there are still bits set in the PSRVLANPLM registers
+ * for the VLAN ID indicated we need to see if the
+ * caller is requesting that we clear the PSRVLANPLM entry bit.
+ * If the caller has requested that we clear the PSRVLANPLM
+ * entry bit but there are still pools/VFs using this VLAN
+ * ID entry then ignore the request. We're not worried
+ * about the case where we're turning the PSRVLANPLM VLAN ID
+ * entry bit on, only when requested to turn it off as
+ * there may be multiple pools and/or VFs using the
+ * VLAN ID entry. In that case we cannot clear the
+ * PSRVLANPLM bit until all pools/VFs using that VLAN ID have also
+ * been cleared. This will be indicated by "bits" being
+ * zero.
+ */
+ *vfta_delta = 0;
+
+vlvf_update:
+ /* record pool change and enable VLAN ID if not already enabled */
+ wr32(hw, TXGBE_PSRVLANPLM(vind / 32), bits);
+ wr32(hw, TXGBE_PSRVLAN, TXGBE_PSRVLAN_EA | vlan);
+
+ return 0;
+}
+
+/**
+ * txgbe_clear_vfta - Clear VLAN filter table
+ * @hw: pointer to hardware structure
+ *
+ * Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+s32 txgbe_clear_vfta(struct txgbe_hw *hw)
+{
+ u32 offset;
+
+ DEBUGFUNC("txgbe_clear_vfta");
+
+ for (offset = 0; offset < hw->mac.vft_size; offset++)
+ wr32(hw, TXGBE_VLANTBL(offset), 0);
+
+ for (offset = 0; offset < TXGBE_NUM_POOL; offset++) {
+ wr32(hw, TXGBE_PSRVLANIDX, offset);
+ wr32(hw, TXGBE_PSRVLAN, 0);
+ wr32(hw, TXGBE_PSRVLANPLM(0), 0);
+ wr32(hw, TXGBE_PSRVLANPLM(1), 0);
+ }
+
+ return 0;
+}
+
+/**
+ * txgbe_need_crosstalk_fix - Determine if we need to do cross talk fix
+ * @hw: pointer to hardware structure
+ *
+ * Contains the logic to identify if we need to verify link for the
+ * crosstalk fix
+ **/
+static bool txgbe_need_crosstalk_fix(struct txgbe_hw *hw)
+{
+
+ /* Does FW say we need the fix */
+ if (!hw->need_crosstalk_fix)
+ return false;
+
+ /* Only consider SFP+ PHYs i.e. media type fiber */
+ switch (hw->phy.media_type) {
+ case txgbe_media_type_fiber:
+ case txgbe_media_type_fiber_qsfp:
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * txgbe_check_mac_link - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+s32 txgbe_check_mac_link(struct txgbe_hw *hw, u32 *speed,
+ bool *link_up, bool link_up_wait_to_complete)
+{
+ u32 links_reg, links_orig;
+ u32 i;
+
+ DEBUGFUNC("txgbe_check_mac_link");
+
+ /* If Crosstalk fix enabled do the sanity check of making sure
+ * the SFP+ cage is full.
+ */
+ if (txgbe_need_crosstalk_fix(hw)) {
+ u32 sfp_cage_full;
+
+ switch (hw->mac.type) {
+ case txgbe_mac_raptor:
+ sfp_cage_full = !rd32m(hw, TXGBE_GPIODATA,
+ TXGBE_GPIOBIT_2);
+ break;
+ default:
+ /* sanity check - No SFP+ devices here */
+ sfp_cage_full = false;
+ break;
+ }
+
+ if (!sfp_cage_full) {
+ *link_up = false;
+ *speed = TXGBE_LINK_SPEED_UNKNOWN;
+ return 0;
+ }
+ }
+
+ /* clear the old state */
+ links_orig = rd32(hw, TXGBE_PORTSTAT);
+
+ links_reg = rd32(hw, TXGBE_PORTSTAT);
+
+ if (links_orig != links_reg) {
+ DEBUGOUT("LINKS changed from %08X to %08X\n",
+ links_orig, links_reg);
+ }
+
+ if (link_up_wait_to_complete) {
+ for (i = 0; i < hw->mac.max_link_up_time; i++) {
+ if (links_reg & TXGBE_PORTSTAT_UP) {
+ *link_up = true;
+ break;
+ } else {
+ *link_up = false;
+ }
+ msec_delay(100);
+ links_reg = rd32(hw, TXGBE_PORTSTAT);
+ }
+ } else {
+ if (links_reg & TXGBE_PORTSTAT_UP)
+ *link_up = true;
+ else
+ *link_up = false;
+ }
+
+ switch (links_reg & TXGBE_PORTSTAT_BW_MASK) {
+ case TXGBE_PORTSTAT_BW_10G:
+ *speed = TXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case TXGBE_PORTSTAT_BW_1G:
+ *speed = TXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case TXGBE_PORTSTAT_BW_100M:
+ *speed = TXGBE_LINK_SPEED_100M_FULL;
+ break;
+ default:
+ *speed = TXGBE_LINK_SPEED_UNKNOWN;
+ }
+
+ return 0;
+}
+
+/**
+ * txgbe_get_wwn_prefix - Get alternative WWNN/WWPN prefix from
+ * the EEPROM
+ * @hw: pointer to hardware structure
+ * @wwnn_prefix: the alternative WWNN prefix
+ * @wwpn_prefix: the alternative WWPN prefix
+ *
+ * This function will read the EEPROM from the alternative SAN MAC address
+ * block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+s32 txgbe_get_wwn_prefix(struct txgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix)
+{
+ u16 offset, caps;
+ u16 alt_san_mac_blk_offset;
+
+ DEBUGFUNC("txgbe_get_wwn_prefix");
+
+ /* clear output first */
+ *wwnn_prefix = 0xFFFF;
+ *wwpn_prefix = 0xFFFF;
+
+ /* check if alternative SAN MAC is supported */
+ offset = TXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
+ if (hw->rom.readw_sw(hw, offset, &alt_san_mac_blk_offset))
+ goto wwn_prefix_err;
+
+ if ((alt_san_mac_blk_offset == 0) ||
+ (alt_san_mac_blk_offset == 0xFFFF))
+ goto wwn_prefix_out;
+
+ /* check capability in alternative san mac address block */
+ offset = alt_san_mac_blk_offset + TXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
+ if (hw->rom.read16(hw, offset, &caps))
+ goto wwn_prefix_err;
+ if (!(caps & TXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
+ goto wwn_prefix_out;
+
+ /* get the corresponding prefix for WWNN/WWPN */
+ offset = alt_san_mac_blk_offset + TXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
+ if (hw->rom.read16(hw, offset, wwnn_prefix)) {
+ DEBUGOUT("eeprom read at offset %d failed", offset);
+ }
+
+ offset = alt_san_mac_blk_offset + TXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
+ if (hw->rom.read16(hw, offset, wwpn_prefix))
+ goto wwn_prefix_err;
+
+wwn_prefix_out:
+ return 0;
+
+wwn_prefix_err:
+ DEBUGOUT("eeprom read at offset %d failed", offset);
+ return 0;
+}
+
+/**
+ * txgbe_get_fcoe_boot_status - Get FCOE boot status from EEPROM
+ * @hw: pointer to hardware structure
+ * @bs: the fcoe boot status
+ *
+ * This function will read the FCOE boot status from the iSCSI FCOE block
+ **/
+s32 txgbe_get_fcoe_boot_status(struct txgbe_hw *hw, u16 *bs)
+{
+ u16 offset, caps, flags;
+ s32 status;
+
+ DEBUGFUNC("txgbe_get_fcoe_boot_status");
+
+ /* clear output first */
+ *bs = txgbe_fcoe_bootstatus_unavailable;
+
+ /* check if FCOE IBA block is present */
+ offset = TXGBE_FCOE_IBA_CAPS_BLK_PTR;
+ status = hw->rom.read16(hw, offset, &caps);
+ if (status != 0)
+ goto out;
+
+ if (!(caps & TXGBE_FCOE_IBA_CAPS_FCOE))
+ goto out;
+
+ /* check if iSCSI FCOE block is populated */
+ status = hw->rom.read16(hw, TXGBE_ISCSI_FCOE_BLK_PTR, &offset);
+ if (status != 0)
+ goto out;
+
+ if ((offset == 0) || (offset == 0xFFFF))
+ goto out;
+
+ /* read fcoe flags in iSCSI FCOE block */
+ offset = offset + TXGBE_ISCSI_FCOE_FLAGS_OFFSET;
+ status = hw->rom.read16(hw, offset, &flags);
+ if (status != 0)
+ goto out;
+
+ if (flags & TXGBE_ISCSI_FCOE_FLAGS_ENABLE)
+ *bs = txgbe_fcoe_bootstatus_enabled;
+ else
+ *bs = txgbe_fcoe_bootstatus_disabled;
+
+out:
+ return status;
+}
+
+/**
+ * txgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable switch for MAC anti-spoofing
+ * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
+ *
+ **/
+void txgbe_set_mac_anti_spoofing(struct txgbe_hw *hw, bool enable, int vf)
+{
+ int vf_target_reg = vf >> 3;
+ int vf_target_shift = vf % 8;
+ u32 pfvfspoof;
+
+ pfvfspoof = rd32(hw, TXGBE_POOLTXASMAC(vf_target_reg));
+ if (enable)
+ pfvfspoof |= (1 << vf_target_shift);
+ else
+ pfvfspoof &= ~(1 << vf_target_shift);
+ wr32(hw, TXGBE_POOLTXASMAC(vf_target_reg), pfvfspoof);
+}
+
+/**
+ * txgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable switch for VLAN anti-spoofing
+ * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
+ *
+ **/
+void txgbe_set_vlan_anti_spoofing(struct txgbe_hw *hw, bool enable, int vf)
+{
+ int vf_target_reg = vf >> 3;
+ int vf_target_shift = vf % 8;
+ u32 pfvfspoof;
+
+ pfvfspoof = rd32(hw, TXGBE_POOLTXASVLAN(vf_target_reg));
+ if (enable)
+ pfvfspoof |= (1 << vf_target_shift);
+ else
+ pfvfspoof &= ~(1 << vf_target_shift);
+ wr32(hw, TXGBE_POOLTXASVLAN(vf_target_reg), pfvfspoof);
+}
+
+/**
+ * txgbe_get_device_caps - Get additional device capabilities
+ * @hw: pointer to hardware structure
+ * @device_caps: the EEPROM word with the extra device capabilities
+ *
+ * This function will read the EEPROM location for the device capabilities,
+ * and return the word through device_caps.
+ **/
+s32 txgbe_get_device_caps(struct txgbe_hw *hw, u16 *device_caps)
+{
+ DEBUGFUNC("txgbe_get_device_caps");
+
+ hw->rom.readw_sw(hw, TXGBE_DEVICE_CAPS, device_caps);
+
+ return 0;
+}
+
+/**
+ * txgbe_set_pba - Initialize Rx packet buffer
+ * @hw: pointer to hardware structure
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+void txgbe_set_pba(struct txgbe_hw *hw, int num_pb, u32 headroom,
+ int strategy)
+{
+ u32 pbsize = hw->mac.rx_pb_size;
+ int i = 0;
+ u32 rxpktsize, txpktsize, txpbthresh;
+
+ UNREFERENCED_PARAMETER(hw);
+
+ /* Reserve headroom */
+ pbsize -= headroom;
+
+ if (!num_pb)
+ num_pb = 1;
+
+ /* Divide remaining packet buffer space amongst the number of packet
+ * buffers requested using supplied strategy.
+ */
+ switch (strategy) {
+ case PBA_STRATEGY_WEIGHTED:
+ /* txgbe_dcb_pba_80_48 strategy weight first half of packet
+ * buffer with 5/8 of the packet buffer space.
+ */
+ rxpktsize = (pbsize * 5) / (num_pb * 4);
+ pbsize -= rxpktsize * (num_pb / 2);
+ rxpktsize <<= 10;
+ for (; i < (num_pb / 2); i++)
+ wr32(hw, TXGBE_PBRXSIZE(i), rxpktsize);
+ /* fall through - configure remaining packet buffers */
+ case PBA_STRATEGY_EQUAL:
+ rxpktsize = (pbsize / (num_pb - i));
+ rxpktsize <<= 10;
+ for (; i < num_pb; i++)
+ wr32(hw, TXGBE_PBRXSIZE(i), rxpktsize);
+ break;
+ default:
+ break;
+ }
+
+ /* Only support an equally distributed Tx packet buffer strategy. */
+ txpktsize = TXGBE_PBTXSIZE_MAX / num_pb;
+ txpbthresh = (txpktsize / 1024) - TXGBE_TXPKT_SIZE_MAX;
+ for (i = 0; i < num_pb; i++) {
+ wr32(hw, TXGBE_PBTXSIZE(i), txpktsize);
+ wr32(hw, TXGBE_PBTXDMATH(i), txpbthresh);
+ }
+
+ /* Clear unused TCs, if any, to zero buffer size*/
+ for (; i < TXGBE_MAX_UP; i++) {
+ wr32(hw, TXGBE_PBRXSIZE(i), 0);
+ wr32(hw, TXGBE_PBTXSIZE(i), 0);
+ wr32(hw, TXGBE_PBTXDMATH(i), 0);
+ }
+}
+
+/**
+ * txgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
+ * @hw: pointer to the hardware structure
+ *
+ * The MACs can experience issues if TX work is still pending
+ * when a reset occurs. This function prevents this by flushing the PCIe
+ * buffers on the system.
+ **/
+void txgbe_clear_tx_pending(struct txgbe_hw *hw)
+{
+ u32 hlreg0, i, poll;
+
+ /*
+ * If double reset is not requested then all transactions should
+ * already be clear and as such there is no work to do
+ */
+ if (!(hw->mac.flags & TXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
+ return;
+
+ hlreg0 = rd32(hw, TXGBE_PSRCTL);
+ wr32(hw, TXGBE_PSRCTL, hlreg0 | TXGBE_PSRCTL_LBENA);
+
+ /* Wait for a last completion before clearing buffers */
+ txgbe_flush(hw);
+ msec_delay(3);
+
+ /*
+ * Before proceeding, make sure that the PCIe block does not have
+ * transactions pending.
+ */
+ poll = (800 * 11) / 10;
+ for (i = 0; i < poll; i++) {
+ usec_delay(100);
+ }
+
+ /* Flush all writes and allow 20usec for all transactions to clear */
+ txgbe_flush(hw);
+ usec_delay(20);
+
+ /* restore previous register values */
+ wr32(hw, TXGBE_PSRCTL, hlreg0);
+}
+
+/**
+ * txgbe_get_thermal_sensor_data - Gathers thermal sensor data
+ * @hw: pointer to hardware structure
+ *
+ * Returns the thermal sensor data structure
+ **/
+s32 txgbe_get_thermal_sensor_data(struct txgbe_hw *hw)
+{
+ struct txgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+ s64 tsv;
+ u32 ts_stat;
+
+ DEBUGFUNC("txgbe_get_thermal_sensor_data");
+
+ /* Only support thermal sensors attached to physical port 0 */
+ if (hw->bus.lan_id != 0) {
+ return TXGBE_NOT_IMPLEMENTED;
+ }
+
+ ts_stat = rd32(hw, TXGBE_TSSTAT);
+ tsv = (s64)TXGBE_TSSTAT_DATA(ts_stat);
+ tsv = tsv > 1200 ? tsv : 1200;
+ tsv = -(48380 << 8) / 1000
+ + tsv * (31020 << 8) / 100000
+ - tsv * tsv * (18201 << 8) / 100000000
+ + tsv * tsv * tsv * (81542 << 8) / 1000000000000
+ - tsv * tsv * tsv * tsv * (16743 << 8) / 1000000000000000;
+ tsv >>= 8;
+
+ data->sensor[0].temp = (s16)tsv;
+
+ return 0;
+}
+
+/**
+ * txgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds
+ * @hw: pointer to hardware structure
+ *
+ * Inits the thermal sensor thresholds according to the NVM map
+ * and save off the threshold and location values into mac.thermal_sensor_data
+ **/
+s32 txgbe_init_thermal_sensor_thresh(struct txgbe_hw *hw)
+{
+ struct txgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
+ DEBUGFUNC("txgbe_init_thermal_sensor_thresh");
+
+ memset(data, 0, sizeof(struct txgbe_thermal_sensor_data));
+
+ if (hw->bus.lan_id != 0)
+ return TXGBE_NOT_IMPLEMENTED;
+
+ wr32(hw, TXGBE_TSCTRL, TXGBE_TSCTRL_EVALMD);
+ wr32(hw, TXGBE_TSINTR,
+ TXGBE_TSINTR_AEN | TXGBE_TSINTR_DEN);
+ wr32(hw, TXGBE_TSEN, TXGBE_TSEN_ENA);
+
+
+ data->sensor[0].alarm_thresh = 100;
+ wr32(hw, TXGBE_TSATHRE, 677);
+ data->sensor[0].dalarm_thresh = 90;
+ wr32(hw, TXGBE_TSDTHRE, 614);
+
+ return 0;
+}
+
+/**
+ * txgbe_get_orom_version - Return option ROM from EEPROM
+ *
+ * @hw: pointer to hardware structure
+ * @nvm_ver: pointer to output structure
+ *
+ * if valid option ROM version, nvm_ver->or_valid set to true
+ * else nvm_ver->or_valid is false.
+ **/
+void txgbe_get_orom_version(struct txgbe_hw *hw,
+ struct txgbe_nvm_version *nvm_ver)
+{
+ u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl;
+
+ nvm_ver->or_valid = false;
+ /* Option Rom may or may not be present. Start with pointer */
+ hw->rom.read16(hw, NVM_OROM_OFFSET, &offset);
+
+ /* make sure offset is valid */
+ if ((offset == 0x0) || (offset == NVM_INVALID_PTR))
+ return;
+
+ hw->rom.read16(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh);
+ hw->rom.read16(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl);
+
+ /* option rom exists and is valid */
+ if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 ||
+ eeprom_cfg_blkl == NVM_VER_INVALID ||
+ eeprom_cfg_blkh == NVM_VER_INVALID)
+ return;
+
+ nvm_ver->or_valid = true;
+ nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT;
+ nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) |
+ (eeprom_cfg_blkh >> NVM_OROM_SHIFT);
+ nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK;
+}
+
+/**
+ * txgbe_get_oem_prod_version - Return OEM Product version
+ *
+ * @hw: pointer to hardware structure
+ * @nvm_ver: pointer to output structure
+ *
+ * if valid OEM product version, nvm_ver->oem_valid set to true
+ * else nvm_ver->oem_valid is false.
+ **/
+void txgbe_get_oem_prod_version(struct txgbe_hw *hw,
+ struct txgbe_nvm_version *nvm_ver)
+{
+ u16 rel_num, prod_ver, mod_len, cap, offset;
+
+ nvm_ver->oem_valid = false;
+ hw->rom.read16(hw, NVM_OEM_PROD_VER_PTR, &offset);
+
+ /* Return is offset to OEM Product Version block is invalid */
+ if (offset == 0x0 || offset == NVM_INVALID_PTR)
+ return;
+
+ /* Read product version block */
+ hw->rom.read16(hw, offset, &mod_len);
+ hw->rom.read16(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap);
+
+ /* Return if OEM product version block is invalid */
+ if (mod_len != NVM_OEM_PROD_VER_MOD_LEN ||
+ (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0)
+ return;
+
+ hw->rom.read16(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver);
+ hw->rom.read16(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num);
+
+ /* Return if version is invalid */
+ if ((rel_num | prod_ver) == 0x0 ||
+ rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID)
+ return;
+
+ nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT;
+ nvm_ver->oem_minor = prod_ver & NVM_VER_MASK;
+ nvm_ver->oem_release = rel_num;
+ nvm_ver->oem_valid = true;
+}
+
+/**
+ * txgbe_get_etk_id - Return Etrack ID from EEPROM
+ *
+ * @hw: pointer to hardware structure
+ * @nvm_ver: pointer to output structure
+ *
+ * word read errors will return 0xFFFF
+ **/
+void txgbe_get_etk_id(struct txgbe_hw *hw, struct txgbe_nvm_version *nvm_ver)
+{
+ u16 etk_id_l, etk_id_h;
+
+ if (hw->rom.read16(hw, NVM_ETK_OFF_LOW, &etk_id_l))
+ etk_id_l = NVM_VER_INVALID;
+ if (hw->rom.read16(hw, NVM_ETK_OFF_HI, &etk_id_h))
+ etk_id_h = NVM_VER_INVALID;
+
+ /* The word order for the version format is determined by high order
+ * word bit 15.
+ */
+ if ((etk_id_h & NVM_ETK_VALID) == 0) {
+ nvm_ver->etk_id = etk_id_h;
+ nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT);
+ } else {
+ nvm_ver->etk_id = etk_id_l;
+ nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT);
+ }
+}
+
+
+/**
+ * txgbe_dcb_get_rtrup2tc - read rtrup2tc reg
+ * @hw: pointer to hardware structure
+ * @map: pointer to u8 arr for returning map
+ *
+ * Read the rtrup2tc HW register and resolve its content into map
+ **/
+void txgbe_dcb_get_rtrup2tc(struct txgbe_hw *hw, u8 *map)
+{
+ u32 reg, i;
+
+ reg = rd32(hw, TXGBE_RPUP2TC);
+ for (i = 0; i < TXGBE_DCB_UP_MAX; i++)
+ map[i] = TXGBE_RPUP2TC_UP_MASK &
+ (reg >> (i * TXGBE_RPUP2TC_UP_SHIFT));
+ return;
+}
+
+void txgbe_disable_rx(struct txgbe_hw *hw)
+{
+ u32 pfdtxgswc;
+
+ pfdtxgswc = rd32(hw, TXGBE_PSRCTL);
+ if (pfdtxgswc & TXGBE_PSRCTL_LBENA) {
+ pfdtxgswc &= ~TXGBE_PSRCTL_LBENA;
+ wr32(hw, TXGBE_PSRCTL, pfdtxgswc);
+ hw->mac.set_lben = true;
+ } else {
+ hw->mac.set_lben = false;
+ }
+
+ wr32m(hw, TXGBE_PBRXCTL, TXGBE_PBRXCTL_ENA, 0);
+ wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_ENA, 0);
+}
+
+void txgbe_enable_rx(struct txgbe_hw *hw)
+{
+ u32 pfdtxgswc;
+
+ wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_ENA, TXGBE_MACRXCFG_ENA);
+ wr32m(hw, TXGBE_PBRXCTL, TXGBE_PBRXCTL_ENA, TXGBE_PBRXCTL_ENA);
+
+ if (hw->mac.set_lben) {
+ pfdtxgswc = rd32(hw, TXGBE_PSRCTL);
+ pfdtxgswc |= TXGBE_PSRCTL_LBENA;
+ wr32(hw, TXGBE_PSRCTL, pfdtxgswc);
+ hw->mac.set_lben = false;
+ }
+}
+
+/**
+ * txgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Set the link speed in the MAC and/or PHY register and restarts link.
+ **/
+s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw,
+ u32 speed,
+ bool autoneg_wait_to_complete)
+{
+ u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
+ u32 highest_link_speed = TXGBE_LINK_SPEED_UNKNOWN;
+ s32 status = 0;
+ u32 speedcnt = 0;
+ u32 i = 0;
+ bool autoneg, link_up = false;
+
+ DEBUGFUNC("txgbe_setup_mac_link_multispeed_fiber");
+
+ /* Mask off requested but non-supported speeds */
+ status = hw->mac.get_link_capabilities(hw, &link_speed, &autoneg);
+ if (status != 0)
+ return status;
+
+ speed &= link_speed;
+
+ /* Try each speed one by one, highest priority first. We do this in
+ * software because 10Gb fiber doesn't support speed autonegotiation.
+ */
+ if (speed & TXGBE_LINK_SPEED_10GB_FULL) {
+ speedcnt++;
+ highest_link_speed = TXGBE_LINK_SPEED_10GB_FULL;
+
+ /* Set the module link speed */
+ switch (hw->phy.media_type) {
+ case txgbe_media_type_fiber:
+ hw->mac.set_rate_select_speed(hw,
+ TXGBE_LINK_SPEED_10GB_FULL);
+ break;
+ case txgbe_media_type_fiber_qsfp:
+ /* QSFP module automatically detects MAC link speed */
+ break;
+ default:
+ DEBUGOUT("Unexpected media type.\n");
+ break;
+ }
+
+ /* Allow module to change analog characteristics (1G->10G) */
+ msec_delay(40);
+
+ status = hw->mac.setup_mac_link(hw,
+ TXGBE_LINK_SPEED_10GB_FULL,
+ autoneg_wait_to_complete);
+ if (status != 0)
+ return status;
+
+ /* Flap the Tx laser if it has not already been done */
+ hw->mac.flap_tx_laser(hw);
+
+ /* Wait for the controller to acquire link. Per IEEE 802.3ap,
+ * Section 73.10.2, we may have to wait up to 500ms if KR is
+ * attempted. uses the same timing for 10g SFI.
+ */
+ for (i = 0; i < 5; i++) {
+ /* Wait for the link partner to also set speed */
+ msec_delay(100);
+
+ /* If we have link, just jump out */
+ status = hw->mac.check_link(hw, &link_speed,
+ &link_up, false);
+ if (status != 0)
+ return status;
+
+ if (link_up)
+ goto out;
+ }
+ }
+
+ if (speed & TXGBE_LINK_SPEED_1GB_FULL) {
+ speedcnt++;
+ if (highest_link_speed == TXGBE_LINK_SPEED_UNKNOWN)
+ highest_link_speed = TXGBE_LINK_SPEED_1GB_FULL;
+
+ /* Set the module link speed */
+ switch (hw->phy.media_type) {
+ case txgbe_media_type_fiber:
+ hw->mac.set_rate_select_speed(hw,
+ TXGBE_LINK_SPEED_1GB_FULL);
+ break;
+ case txgbe_media_type_fiber_qsfp:
+ /* QSFP module automatically detects link speed */
+ break;
+ default:
+ DEBUGOUT("Unexpected media type.\n");
+ break;
+ }
+
+ /* Allow module to change analog characteristics (10G->1G) */
+ msec_delay(40);
+
+ status = hw->mac.setup_mac_link(hw,
+ TXGBE_LINK_SPEED_1GB_FULL,
+ autoneg_wait_to_complete);
+ if (status != 0)
+ return status;
+
+ /* Flap the Tx laser if it has not already been done */
+ hw->mac.flap_tx_laser(hw);
+
+ /* Wait for the link partner to also set speed */
+ msec_delay(100);
+
+ /* If we have link, just jump out */
+ status = hw->mac.check_link(hw, &link_speed, &link_up, false);
+ if (status != 0)
+ return status;
+
+ if (link_up)
+ goto out;
+ }
+
+ /* We didn't get link. Configure back to the highest speed we tried,
+ * (if there was more than one). We call ourselves back with just the
+ * single highest speed that the user requested.
+ */
+ if (speedcnt > 1)
+ status = txgbe_setup_mac_link_multispeed_fiber(hw,
+ highest_link_speed,
+ autoneg_wait_to_complete);
+
+out:
+ /* Set autoneg_advertised value based on input link speed */
+ hw->phy.autoneg_advertised = 0;
+
+ if (speed & TXGBE_LINK_SPEED_10GB_FULL)
+ hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_10GB_FULL;
+
+ if (speed & TXGBE_LINK_SPEED_1GB_FULL)
+ hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_1GB_FULL;
+
+ return status;
+}
+
+/**
+ * txgbe_set_soft_rate_select_speed - Set module link speed
+ * @hw: pointer to hardware structure
+ * @speed: link speed to set
+ *
+ * Set module link speed via the soft rate select.
+ */
+void txgbe_set_soft_rate_select_speed(struct txgbe_hw *hw,
+ u32 speed)
+{
+ s32 status;
+ u8 rs, eeprom_data;
+
+ switch (speed) {
+ case TXGBE_LINK_SPEED_10GB_FULL:
+ /* one bit mask same as setting on */
+ rs = TXGBE_SFF_SOFT_RS_SELECT_10G;
+ break;
+ case TXGBE_LINK_SPEED_1GB_FULL:
+ rs = TXGBE_SFF_SOFT_RS_SELECT_1G;
+ break;
+ default:
+ DEBUGOUT("Invalid fixed module speed\n");
+ return;
+ }
+
+ /* Set RS0 */
+ status = hw->phy.read_i2c_byte(hw, TXGBE_SFF_SFF_8472_OSCB,
+ TXGBE_I2C_EEPROM_DEV_ADDR2,
+ &eeprom_data);
+ if (status) {
+ DEBUGOUT("Failed to read Rx Rate Select RS0\n");
+ goto out;
+ }
+
+ eeprom_data = (eeprom_data & ~TXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
+
+ status = hw->phy.write_i2c_byte(hw, TXGBE_SFF_SFF_8472_OSCB,
+ TXGBE_I2C_EEPROM_DEV_ADDR2,
+ eeprom_data);
+ if (status) {
+ DEBUGOUT("Failed to write Rx Rate Select RS0\n");
+ goto out;
+ }
+
+ /* Set RS1 */
+ status = hw->phy.read_i2c_byte(hw, TXGBE_SFF_SFF_8472_ESCB,
+ TXGBE_I2C_EEPROM_DEV_ADDR2,
+ &eeprom_data);
+ if (status) {
+ DEBUGOUT("Failed to read Rx Rate Select RS1\n");
+ goto out;
+ }
+
+ eeprom_data = (eeprom_data & ~TXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
+
+ status = hw->phy.write_i2c_byte(hw, TXGBE_SFF_SFF_8472_ESCB,
+ TXGBE_I2C_EEPROM_DEV_ADDR2,
+ eeprom_data);
+ if (status) {
+ DEBUGOUT("Failed to write Rx Rate Select RS1\n");
+ goto out;
+ }
+out:
+ return;
+}
+
+/**
+ * txgbe_init_shared_code - Initialize the shared code
+ * @hw: pointer to hardware structure
+ *
+ * This will assign function pointers and assign the MAC type and PHY code.
+ * Does not touch the hardware. This function must be called prior to any
+ * other function in the shared code. The txgbe_hw structure should be
+ * memset to 0 prior to calling this function. The following fields in
+ * hw structure should be filled in prior to calling this function:
+ * hw_addr, back, device_id, vendor_id, subsystem_device_id,
+ * subsystem_vendor_id, and revision_id
+ **/
+s32 txgbe_init_shared_code(struct txgbe_hw *hw)
+{
+ s32 status;
+
+ DEBUGFUNC("txgbe_init_shared_code");
+
+ /*
+ * Set the mac type
+ */
+ txgbe_set_mac_type(hw);
+
+ txgbe_init_ops_dummy(hw);
+ switch (hw->mac.type) {
+ case txgbe_mac_raptor:
+ status = txgbe_init_ops_pf(hw);
+ break;
+ case txgbe_mac_raptor_vf:
+ status = txgbe_init_ops_vf(hw);
+ break;
+ default:
+ status = TXGBE_ERR_DEVICE_NOT_SUPPORTED;
+ break;
+ }
+ hw->mac.max_link_up_time = TXGBE_LINK_UP_TIME;
+
+ hw->bus.set_lan_id(hw);
+
+ return status;
+}
+
+/**
+ * txgbe_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+s32 txgbe_set_mac_type(struct txgbe_hw *hw)
+{
+ s32 err = 0;
+
+ DEBUGFUNC("txgbe_set_mac_type\n");
+
+ if (hw->vendor_id != PCI_VENDOR_ID_WANGXUN) {
+ DEBUGOUT("Unsupported vendor id: %x", hw->vendor_id);
+ return TXGBE_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ switch (hw->device_id) {
+ case TXGBE_DEV_ID_RAPTOR_KR_KX_KX4:
+ hw->phy.media_type = txgbe_media_type_backplane;
+ hw->mac.type = txgbe_mac_raptor;
+ break;
+ case TXGBE_DEV_ID_RAPTOR_XAUI:
+ case TXGBE_DEV_ID_RAPTOR_SGMII:
+ hw->phy.media_type = txgbe_media_type_copper;
+ hw->mac.type = txgbe_mac_raptor;
+ break;
+ case TXGBE_DEV_ID_RAPTOR_SFP:
+ case TXGBE_DEV_ID_WX1820_SFP:
+ hw->phy.media_type = txgbe_media_type_fiber;
+ hw->mac.type = txgbe_mac_raptor;
+ break;
+ case TXGBE_DEV_ID_RAPTOR_QSFP:
+ hw->phy.media_type = txgbe_media_type_fiber_qsfp;
+ hw->mac.type = txgbe_mac_raptor;
+ break;
+ case TXGBE_DEV_ID_RAPTOR_VF:
+ case TXGBE_DEV_ID_RAPTOR_VF_HV:
+ hw->phy.media_type = txgbe_media_type_virtual;
+ hw->mac.type = txgbe_mac_raptor_vf;
+ break;
+ default:
+ err = TXGBE_ERR_DEVICE_NOT_SUPPORTED;
+ DEBUGOUT("Unsupported device id: %x", hw->device_id);
+ break;
+ }
+
+ DEBUGOUT("txgbe_set_mac_type found mac: %d media: %d, returns: %d\n",
+ hw->mac.type, hw->phy.media_type, err);
+ return err;
+}
+
+void txgbe_init_mac_link_ops(struct txgbe_hw *hw)
+{
+ struct txgbe_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("txgbe_init_mac_link_ops");
+
+ /*
+ * enable the laser control functions for SFP+ fiber
+ * and MNG not enabled
+ */
+ if ((hw->phy.media_type == txgbe_media_type_fiber) &&
+ !txgbe_mng_enabled(hw)) {
+ mac->disable_tx_laser =
+ txgbe_disable_tx_laser_multispeed_fiber;
+ mac->enable_tx_laser =
+ txgbe_enable_tx_laser_multispeed_fiber;
+ mac->flap_tx_laser =
+ txgbe_flap_tx_laser_multispeed_fiber;
+ }
+
+ if ((hw->phy.media_type == txgbe_media_type_fiber ||
+ hw->phy.media_type == txgbe_media_type_fiber_qsfp) &&
+ hw->phy.multispeed_fiber) {
+ /* Set up dual speed SFP+ support */
+ mac->setup_link = txgbe_setup_mac_link_multispeed_fiber;
+ mac->setup_mac_link = txgbe_setup_mac_link;
+ mac->set_rate_select_speed = txgbe_set_hard_rate_select_speed;
+ } else if ((hw->phy.media_type == txgbe_media_type_backplane) &&
+ (hw->phy.smart_speed == txgbe_smart_speed_auto ||
+ hw->phy.smart_speed == txgbe_smart_speed_on) &&
+ !txgbe_verify_lesm_fw_enabled_raptor(hw)) {
+ mac->setup_link = txgbe_setup_mac_link_smartspeed;
+ } else {
+ mac->setup_link = txgbe_setup_mac_link;
+ }
+}
+
+/**
+ * txgbe_init_phy_raptor - PHY/SFP specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during init_shared_code because the PHY/SFP type was
+ * not known. Perform the SFP init if necessary.
+ *
+ **/
+s32 txgbe_init_phy_raptor(struct txgbe_hw *hw)
+{
+ struct txgbe_mac_info *mac = &hw->mac;
+ struct txgbe_phy_info *phy = &hw->phy;
+ s32 err = 0;
+
+ DEBUGFUNC("txgbe_init_phy_raptor");
+
+ if (hw->device_id == TXGBE_DEV_ID_RAPTOR_QSFP) {
+ /* Store flag indicating I2C bus access control unit. */
+ hw->phy.qsfp_shared_i2c_bus = TRUE;
+
+ /* Initialize access to QSFP+ I2C bus */
+ txgbe_flush(hw);
+ }
+
+ /* Identify the PHY or SFP module */
+ err = phy->identify(hw);
+ if (err == TXGBE_ERR_SFP_NOT_SUPPORTED)
+ goto init_phy_ops_out;
+
+ /* Setup function pointers based on detected SFP module and speeds */
+ txgbe_init_mac_link_ops(hw);
+
+ /* If copper media, overwrite with copper function pointers */
+ if (phy->media_type == txgbe_media_type_copper) {
+ mac->setup_link = txgbe_setup_copper_link_raptor;
+ mac->get_link_capabilities =
+ txgbe_get_copper_link_capabilities;
+ }
+
+ /* Set necessary function pointers based on PHY type */
+ switch (hw->phy.type) {
+ case txgbe_phy_tn:
+ phy->setup_link = txgbe_setup_phy_link_tnx;
+ phy->check_link = txgbe_check_phy_link_tnx;
+ phy->get_firmware_version =
+ txgbe_get_phy_firmware_version_tnx;
+ break;
+ default:
+ break;
+ }
+init_phy_ops_out:
+ return err;
+}
+
+s32 txgbe_setup_sfp_modules(struct txgbe_hw *hw)
+{
+ s32 err = 0;
+
+ DEBUGFUNC("txgbe_setup_sfp_modules");
+
+ if (hw->phy.sfp_type == txgbe_sfp_type_unknown)
+ return 0;
+
+ txgbe_init_mac_link_ops(hw);
+
+ /* PHY config will finish before releasing the semaphore */
+ err = hw->mac.acquire_swfw_sync(hw, TXGBE_MNGSEM_SWPHY);
+ if (err != 0)
+ return TXGBE_ERR_SWFW_SYNC;
+
+ /* Release the semaphore */
+ hw->mac.release_swfw_sync(hw, TXGBE_MNGSEM_SWPHY);
+
+ /* Delay obtaining semaphore again to allow FW access
+ * prot_autoc_write uses the semaphore too.
+ */
+ msec_delay(hw->rom.semaphore_delay);
+
+ if (err) {
+ DEBUGOUT("sfp module setup not complete\n");
+ return TXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
+ }
+
+ return err;
+}
+
+/**
+ * txgbe_prot_autoc_read_raptor - Hides MAC differences needed for AUTOC read
+ * @hw: pointer to hardware structure
+ * @locked: Return the if we locked for this read.
+ * @value: Value we read from AUTOC
+ *
+ * For this part we need to wrap read-modify-writes with a possible
+ * FW/SW lock. It is assumed this lock will be freed with the next
+ * prot_autoc_write_raptor().
+ */
+s32 txgbe_prot_autoc_read_raptor(struct txgbe_hw *hw, bool *locked, u64 *value)
+{
+ s32 err;
+ bool lock_state = false;
+
+ /* If LESM is on then we need to hold the SW/FW semaphore. */
+ if (txgbe_verify_lesm_fw_enabled_raptor(hw)) {
+ err = hw->mac.acquire_swfw_sync(hw,
+ TXGBE_MNGSEM_SWPHY);
+ if (err != 0)
+ return TXGBE_ERR_SWFW_SYNC;
+
+ lock_state = true;
+ }
+
+ if (locked)
+ *locked = lock_state;
+
+ *value = txgbe_autoc_read(hw);
+ return 0;
+}
+
+/**
+ * txgbe_prot_autoc_write_raptor - Hides MAC differences needed for AUTOC write
+ * @hw: pointer to hardware structure
+ * @autoc: value to write to AUTOC
+ * @locked: bool to indicate whether the SW/FW lock was already taken by
+ * previous prot_autoc_read_raptor.
+ *
+ * This part may need to hold the SW/FW lock around all writes to
+ * AUTOC. Likewise after a write we need to do a pipeline reset.
+ */
+s32 txgbe_prot_autoc_write_raptor(struct txgbe_hw *hw, bool locked, u64 autoc)
+{
+ int err = 0;
+
+ /* Blocked by MNG FW so bail */
+ if (txgbe_check_reset_blocked(hw))
+ goto out;
+
+ /* We only need to get the lock if:
+ * - We didn't do it already (in the read part of a read-modify-write)
+ * - LESM is enabled.
+ */
+ if (!locked && txgbe_verify_lesm_fw_enabled_raptor(hw)) {
+ err = hw->mac.acquire_swfw_sync(hw,
+ TXGBE_MNGSEM_SWPHY);
+ if (err != 0)
+ return TXGBE_ERR_SWFW_SYNC;
+
+ locked = true;
+ }
+
+ txgbe_autoc_write(hw, autoc);
+ err = txgbe_reset_pipeline_raptor(hw);
+
+out:
+ /* Free the SW/FW semaphore as we either grabbed it here or
+ * already had it when this function was called.
+ */
+ if (locked)
+ hw->mac.release_swfw_sync(hw, TXGBE_MNGSEM_SWPHY);
+
+ return err;
+}
+
+/**
+ * txgbe_init_ops_pf - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type.
+ * Does not touch the hardware.
+ **/
+s32 txgbe_init_ops_pf(struct txgbe_hw *hw)
+{
+ struct txgbe_bus_info *bus = &hw->bus;
+ struct txgbe_mac_info *mac = &hw->mac;
+ struct txgbe_phy_info *phy = &hw->phy;
+ struct txgbe_rom_info *rom = &hw->rom;
+ struct txgbe_flash_info *flash = &hw->flash;
+ struct txgbe_mbx_info *mbx = &hw->mbx;
+
+ UNREFERENCED_PARAMETER(flash);
+
+ DEBUGFUNC("txgbe_init_ops_pf");
+
+ /* BUS */
+ bus->set_lan_id = txgbe_set_lan_id_multi_port;
+
+ /* PHY */
+ phy->get_media_type = txgbe_get_media_type_raptor;
+ phy->identify = txgbe_identify_phy;
+ phy->read_reg = txgbe_read_phy_reg;
+ phy->write_reg = txgbe_write_phy_reg;
+ phy->read_reg_mdi = txgbe_read_phy_reg_mdi;
+ phy->write_reg_mdi = txgbe_write_phy_reg_mdi;
+ phy->setup_link = txgbe_setup_phy_link;
+ phy->setup_link_speed = txgbe_setup_phy_link_speed;
+ phy->get_firmware_version = txgbe_get_phy_firmware_version;
+ phy->read_i2c_byte = txgbe_read_i2c_byte;
+ phy->write_i2c_byte = txgbe_write_i2c_byte;
+ phy->read_i2c_sff8472 = txgbe_read_i2c_sff8472;
+ phy->read_i2c_eeprom = txgbe_read_i2c_eeprom;
+ phy->write_i2c_eeprom = txgbe_write_i2c_eeprom;
+ phy->identify_sfp = txgbe_identify_module;
+ phy->read_i2c_byte_unlocked = txgbe_read_i2c_byte_unlocked;
+ phy->write_i2c_byte_unlocked = txgbe_write_i2c_byte_unlocked;
+ phy->reset = txgbe_reset_phy;
+ phy->init = txgbe_init_phy_raptor;
+
+ /* MAC */
+ mac->init_hw = txgbe_init_hw;
+ mac->start_hw = txgbe_start_hw_raptor;
+ mac->clear_hw_cntrs = txgbe_clear_hw_cntrs;
+ mac->enable_rx_dma = txgbe_enable_rx_dma_raptor;
+ mac->get_mac_addr = txgbe_get_mac_addr;
+ mac->stop_hw = txgbe_stop_hw;
+ mac->acquire_swfw_sync = txgbe_acquire_swfw_sync;
+ mac->release_swfw_sync = txgbe_release_swfw_sync;
+
+ mac->reset_hw = txgbe_reset_hw;
+ mac->get_supported_physical_layer =
+ txgbe_get_supported_physical_layer_raptor;
+ mac->disable_sec_rx_path = txgbe_disable_sec_rx_path;
+ mac->enable_sec_rx_path = txgbe_enable_sec_rx_path;
+ mac->disable_sec_tx_path = txgbe_disable_sec_tx_path;
+ mac->enable_sec_tx_path = txgbe_enable_sec_tx_path;
+ mac->get_san_mac_addr = txgbe_get_san_mac_addr;
+ mac->set_san_mac_addr = txgbe_set_san_mac_addr;
+ mac->get_device_caps = txgbe_get_device_caps;
+ mac->get_wwn_prefix = txgbe_get_wwn_prefix;
+ mac->get_fcoe_boot_status = txgbe_get_fcoe_boot_status;
+ mac->autoc_read = txgbe_autoc_read;
+ mac->autoc_write = txgbe_autoc_write;
+ mac->prot_autoc_read = txgbe_prot_autoc_read_raptor;
+ mac->prot_autoc_write = txgbe_prot_autoc_write_raptor;
+
+ /* LEDs */
+ mac->led_on = txgbe_led_on;
+ mac->led_off = txgbe_led_off;
+
+ /* RAR, Multicast, VLAN */
+ mac->set_rar = txgbe_set_rar;
+ mac->clear_rar = txgbe_clear_rar;
+ mac->init_rx_addrs = txgbe_init_rx_addrs;
+ mac->update_uc_addr_list = txgbe_update_uc_addr_list;
+ mac->update_mc_addr_list = txgbe_update_mc_addr_list;
+ mac->enable_mc = txgbe_enable_mc;
+ mac->disable_mc = txgbe_disable_mc;
+ mac->enable_rx = txgbe_enable_rx;
+ mac->disable_rx = txgbe_disable_rx;
+ mac->set_vmdq = txgbe_set_vmdq;
+ mac->set_vmdq_san_mac = txgbe_set_vmdq_san_mac;
+ mac->clear_vmdq = txgbe_clear_vmdq;
+ mac->insert_mac_addr = txgbe_insert_mac_addr;
+ mac->set_vfta = txgbe_set_vfta;
+ mac->set_vlvf = txgbe_set_vlvf;
+ mac->clear_vfta = txgbe_clear_vfta;
+ mac->init_uta_tables = txgbe_init_uta_tables;
+ mac->setup_sfp = txgbe_setup_sfp_modules;
+ mac->set_mac_anti_spoofing = txgbe_set_mac_anti_spoofing;
+ mac->set_vlan_anti_spoofing = txgbe_set_vlan_anti_spoofing;
+
+ /* Flow Control */
+ mac->fc_enable = txgbe_fc_enable;
+ mac->setup_fc = txgbe_setup_fc;
+ mac->fc_autoneg = txgbe_fc_autoneg;
+
+ /* Link */
+ mac->get_link_capabilities = txgbe_get_link_capabilities_raptor;
+ mac->check_link = txgbe_check_mac_link;
+ mac->setup_pba = txgbe_set_pba;
+
+ /* Manageability interface */
+ mac->set_fw_drv_ver = txgbe_hic_set_drv_ver;
+ mac->get_thermal_sensor_data = txgbe_get_thermal_sensor_data;
+ mac->init_thermal_sensor_thresh = txgbe_init_thermal_sensor_thresh;
+ mac->get_rtrup2tc = txgbe_dcb_get_rtrup2tc;
+
+ mbx->init_params = txgbe_init_mbx_params_pf;
+ mbx->read = txgbe_read_mbx_pf;
+ mbx->write = txgbe_write_mbx_pf;
+ mbx->read_posted = txgbe_read_posted_mbx;
+ mbx->write_posted = txgbe_write_posted_mbx;
+ mbx->check_for_msg = txgbe_check_for_msg_pf;
+ mbx->check_for_ack = txgbe_check_for_ack_pf;
+ mbx->check_for_rst = txgbe_check_for_rst_pf;
+
+ /* EEPROM */
+ rom->init_params = txgbe_init_eeprom_params;
+ rom->read16 = txgbe_ee_read16;
+ rom->readw_buffer = txgbe_ee_readw_buffer;
+ rom->readw_sw = txgbe_ee_readw_sw;
+ rom->read32 = txgbe_ee_read32;
+ rom->write16 = txgbe_ee_write16;
+ rom->writew_buffer = txgbe_ee_writew_buffer;
+ rom->writew_sw = txgbe_ee_writew_sw;
+ rom->write32 = txgbe_ee_write32;
+ rom->validate_checksum = txgbe_validate_eeprom_checksum;
+ rom->update_checksum = txgbe_update_eeprom_checksum;
+ rom->calc_checksum = txgbe_calc_eeprom_checksum;
+
+ mac->rar_highwater = 1;
+ mac->mcft_size = TXGBE_RAPTOR_MC_TBL_SIZE;
+ mac->vft_size = TXGBE_RAPTOR_VFT_TBL_SIZE;
+ mac->num_rar_entries = TXGBE_RAPTOR_RAR_ENTRIES;
+ mac->rx_pb_size = TXGBE_RAPTOR_RX_PB_SIZE;
+ mac->max_rx_queues = TXGBE_RAPTOR_MAX_RX_QUEUES;
+ mac->max_tx_queues = TXGBE_RAPTOR_MAX_TX_QUEUES;
+
+ return 0;
+}
+
+/**
+ * txgbe_get_link_capabilities_raptor - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: true when autoneg or autotry is enabled
+ *
+ * Determines the link capabilities by reading the AUTOC register.
+ **/
+s32 txgbe_get_link_capabilities_raptor(struct txgbe_hw *hw,
+ u32 *speed,
+ bool *autoneg)
+{
+ s32 status = 0;
+ u32 autoc = 0;
+
+ DEBUGFUNC("txgbe_get_link_capabilities_raptor");
+
+ /* Check if 1G SFP module. */
+ if (hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 ||
+ hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1 ||
+ hw->phy.sfp_type == txgbe_sfp_type_1g_lx_core0 ||
+ hw->phy.sfp_type == txgbe_sfp_type_1g_lx_core1 ||
+ hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core1) {
+ *speed = TXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = true;
+ return 0;
+ }
+
+ /*
+ * Determine link capabilities based on the stored value of AUTOC,
+ * which represents EEPROM defaults. If AUTOC value has not
+ * been stored, use the current register values.
+ */
+ if (hw->mac.orig_link_settings_stored)
+ autoc = hw->mac.orig_autoc;
+ else
+ autoc = hw->mac.autoc_read(hw);
+
+ switch (autoc & TXGBE_AUTOC_LMS_MASK) {
+ case TXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+ *speed = TXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = false;
+ break;
+
+ case TXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+ *speed = TXGBE_LINK_SPEED_10GB_FULL;
+ *autoneg = false;
+ break;
+
+ case TXGBE_AUTOC_LMS_1G_AN:
+ *speed = TXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = true;
+ break;
+
+ case TXGBE_AUTOC_LMS_10Gs:
+ *speed = TXGBE_LINK_SPEED_10GB_FULL;
+ *autoneg = false;
+ break;
+
+ case TXGBE_AUTOC_LMS_KX4_KX_KR:
+ case TXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
+ *speed = TXGBE_LINK_SPEED_UNKNOWN;
+ if (autoc & TXGBE_AUTOC_KR_SUPP)
+ *speed |= TXGBE_LINK_SPEED_10GB_FULL;
+ if (autoc & TXGBE_AUTOC_KX4_SUPP)
+ *speed |= TXGBE_LINK_SPEED_10GB_FULL;
+ if (autoc & TXGBE_AUTOC_KX_SUPP)
+ *speed |= TXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = true;
+ break;
+
+ case TXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
+ *speed = TXGBE_LINK_SPEED_100M_FULL;
+ if (autoc & TXGBE_AUTOC_KR_SUPP)
+ *speed |= TXGBE_LINK_SPEED_10GB_FULL;
+ if (autoc & TXGBE_AUTOC_KX4_SUPP)
+ *speed |= TXGBE_LINK_SPEED_10GB_FULL;
+ if (autoc & TXGBE_AUTOC_KX_SUPP)
+ *speed |= TXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = true;
+ break;
+
+ case TXGBE_AUTOC_LMS_SGMII_1G_100M:
+ *speed = TXGBE_LINK_SPEED_1GB_FULL |
+ TXGBE_LINK_SPEED_100M_FULL |
+ TXGBE_LINK_SPEED_10M_FULL;
+ *autoneg = false;
+ break;
+
+ default:
+ return TXGBE_ERR_LINK_SETUP;
+ }
+
+ if (hw->phy.multispeed_fiber) {
+ *speed |= TXGBE_LINK_SPEED_10GB_FULL |
+ TXGBE_LINK_SPEED_1GB_FULL;
+
+ /* QSFP must not enable full auto-negotiation
+ * Limited autoneg is enabled at 1G
+ */
+ if (hw->phy.media_type == txgbe_media_type_fiber_qsfp)
+ *autoneg = false;
+ else
+ *autoneg = true;
+ }
+
+ return status;
+}
+
+/**
+ * txgbe_get_media_type_raptor - Get media type
+ * @hw: pointer to hardware structure
+ *
+ * Returns the media type (fiber, copper, backplane)
+ **/
+u32 txgbe_get_media_type_raptor(struct txgbe_hw *hw)
+{
+ u32 media_type;
+
+ DEBUGFUNC("txgbe_get_media_type_raptor");
+
+ /* Detect if there is a copper PHY attached. */
+ switch (hw->phy.type) {
+ case txgbe_phy_cu_unknown:
+ case txgbe_phy_tn:
+ media_type = txgbe_media_type_copper;
+ return media_type;
+ default:
+ break;
+ }
+
+ switch (hw->device_id) {
+ case TXGBE_DEV_ID_RAPTOR_KR_KX_KX4:
+ /* Default device ID is mezzanine card KX/KX4 */
+ media_type = txgbe_media_type_backplane;
+ break;
+ case TXGBE_DEV_ID_RAPTOR_SFP:
+ case TXGBE_DEV_ID_WX1820_SFP:
+ media_type = txgbe_media_type_fiber;
+ break;
+ case TXGBE_DEV_ID_RAPTOR_QSFP:
+ media_type = txgbe_media_type_fiber_qsfp;
+ break;
+ case TXGBE_DEV_ID_RAPTOR_XAUI:
+ case TXGBE_DEV_ID_RAPTOR_SGMII:
+ media_type = txgbe_media_type_copper;
+ break;
+ default:
+ media_type = txgbe_media_type_unknown;
+ break;
+ }
+
+ return media_type;
+}
+
+/**
+ * txgbe_start_mac_link_raptor - Setup MAC link settings
+ * @hw: pointer to hardware structure
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Configures link settings based on values in the txgbe_hw struct.
+ * Restarts the link. Performs autonegotiation if needed.
+ **/
+s32 txgbe_start_mac_link_raptor(struct txgbe_hw *hw,
+ bool autoneg_wait_to_complete)
+{
+ s32 status = 0;
+ bool got_lock = false;
+
+ DEBUGFUNC("txgbe_start_mac_link_raptor");
+
+ /* reset_pipeline requires us to hold this lock as it writes to
+ * AUTOC.
+ */
+ if (txgbe_verify_lesm_fw_enabled_raptor(hw)) {
+ status = hw->mac.acquire_swfw_sync(hw, TXGBE_MNGSEM_SWPHY);
+ if (status != 0)
+ goto out;
+
+ got_lock = true;
+ }
+
+ /* Restart link */
+ txgbe_reset_pipeline_raptor(hw);
+
+ if (got_lock)
+ hw->mac.release_swfw_sync(hw, TXGBE_MNGSEM_SWPHY);
+
+ /* Add delay to filter out noises during initial link setup */
+ msec_delay(50);
+
+out:
+ return status;
+}
+
+/**
+ * txgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * The base drivers may require better control over SFP+ module
+ * PHY states. This includes selectively shutting down the Tx
+ * laser on the PHY, effectively halting physical link.
+ **/
+void txgbe_disable_tx_laser_multispeed_fiber(struct txgbe_hw *hw)
+{
+ u32 esdp_reg = rd32(hw, TXGBE_GPIODATA);
+
+ /* Blocked by MNG FW so bail */
+ if (txgbe_check_reset_blocked(hw))
+ return;
+
+ /* Disable Tx laser; allow 100us to go dark per spec */
+ esdp_reg |= (TXGBE_GPIOBIT_0 | TXGBE_GPIOBIT_1);
+ wr32(hw, TXGBE_GPIODATA, esdp_reg);
+ txgbe_flush(hw);
+ usec_delay(100);
+}
+
+/**
+ * txgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * The base drivers may require better control over SFP+ module
+ * PHY states. This includes selectively turning on the Tx
+ * laser on the PHY, effectively starting physical link.
+ **/
+void txgbe_enable_tx_laser_multispeed_fiber(struct txgbe_hw *hw)
+{
+ u32 esdp_reg = rd32(hw, TXGBE_GPIODATA);
+
+ /* Enable Tx laser; allow 100ms to light up */
+ esdp_reg &= ~(TXGBE_GPIOBIT_0 | TXGBE_GPIOBIT_1);
+ wr32(hw, TXGBE_GPIODATA, esdp_reg);
+ txgbe_flush(hw);
+ msec_delay(100);
+}
+
+/**
+ * txgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * When the driver changes the link speeds that it can support,
+ * it sets autotry_restart to true to indicate that we need to
+ * initiate a new autotry session with the link partner. To do
+ * so, we set the speed then disable and re-enable the Tx laser, to
+ * alert the link partner that it also needs to restart autotry on its
+ * end. This is consistent with true clause 37 autoneg, which also
+ * involves a loss of signal.
+ **/
+void txgbe_flap_tx_laser_multispeed_fiber(struct txgbe_hw *hw)
+{
+ DEBUGFUNC("txgbe_flap_tx_laser_multispeed_fiber");
+
+ /* Blocked by MNG FW so bail */
+ if (txgbe_check_reset_blocked(hw))
+ return;
+
+ if (hw->mac.autotry_restart) {
+ txgbe_disable_tx_laser_multispeed_fiber(hw);
+ txgbe_enable_tx_laser_multispeed_fiber(hw);
+ hw->mac.autotry_restart = false;
+ }
+}
+
+/**
+ * txgbe_set_hard_rate_select_speed - Set module link speed
+ * @hw: pointer to hardware structure
+ * @speed: link speed to set
+ *
+ * Set module link speed via RS0/RS1 rate select pins.
+ */
+void txgbe_set_hard_rate_select_speed(struct txgbe_hw *hw,
+ u32 speed)
+{
+ u32 esdp_reg = rd32(hw, TXGBE_GPIODATA);
+
+ switch (speed) {
+ case TXGBE_LINK_SPEED_10GB_FULL:
+ esdp_reg |= (TXGBE_GPIOBIT_4 | TXGBE_GPIOBIT_5);
+ break;
+ case TXGBE_LINK_SPEED_1GB_FULL:
+ esdp_reg &= ~(TXGBE_GPIOBIT_4 | TXGBE_GPIOBIT_5);
+ break;
+ default:
+ DEBUGOUT("Invalid fixed module speed\n");
+ return;
+ }
+
+ wr32(hw, TXGBE_GPIODATA, esdp_reg);
+ txgbe_flush(hw);
+}
+
+/**
+ * txgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Implements the Intel SmartSpeed algorithm.
+ **/
+s32 txgbe_setup_mac_link_smartspeed(struct txgbe_hw *hw,
+ u32 speed,
+ bool autoneg_wait_to_complete)
+{
+ s32 status = 0;
+ u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
+ s32 i, j;
+ bool link_up = false;
+ u32 autoc_reg = rd32_epcs(hw, SR_AN_MMD_ADV_REG1);
+
+ DEBUGFUNC("txgbe_setup_mac_link_smartspeed");
+
+ /* Set autoneg_advertised value based on input link speed */
+ hw->phy.autoneg_advertised = 0;
+
+ if (speed & TXGBE_LINK_SPEED_10GB_FULL)
+ hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_10GB_FULL;
+
+ if (speed & TXGBE_LINK_SPEED_1GB_FULL)
+ hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_1GB_FULL;
+
+ if (speed & TXGBE_LINK_SPEED_100M_FULL)
+ hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_100M_FULL;
+
+ /*
+ * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the
+ * autoneg advertisement if link is unable to be established at the
+ * highest negotiated rate. This can sometimes happen due to integrity
+ * issues with the physical media connection.
+ */
+
+ /* First, try to get link with full advertisement */
+ hw->phy.smart_speed_active = false;
+ for (j = 0; j < TXGBE_SMARTSPEED_MAX_RETRIES; j++) {
+ status = txgbe_setup_mac_link(hw, speed,
+ autoneg_wait_to_complete);
+ if (status != 0)
+ goto out;
+
+ /*
+ * Wait for the controller to acquire link. Per IEEE 802.3ap,
+ * Section 73.10.2, we may have to wait up to 500ms if KR is
+ * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
+ * Table 9 in the AN MAS.
+ */
+ for (i = 0; i < 5; i++) {
+ msec_delay(100);
+
+ /* If we have link, just jump out */
+ status = hw->mac.check_link(hw, &link_speed, &link_up,
+ false);
+ if (status != 0)
+ goto out;
+
+ if (link_up)
+ goto out;
+ }
+ }
+
+ /*
+ * We didn't get link. If we advertised KR plus one of KX4/KX
+ * (or BX4/BX), then disable KR and try again.
+ */
+ if (((autoc_reg & TXGBE_AUTOC_KR_SUPP) == 0) ||
+ ((autoc_reg & TXGBE_AUTOC_KX_SUPP) == 0 &&
+ (autoc_reg & TXGBE_AUTOC_KX4_SUPP) == 0))
+ goto out;
+
+ /* Turn SmartSpeed on to disable KR support */
+ hw->phy.smart_speed_active = true;
+ status = txgbe_setup_mac_link(hw, speed,
+ autoneg_wait_to_complete);
+ if (status != 0)
+ goto out;
+
+ /*
+ * Wait for the controller to acquire link. 600ms will allow for
+ * the AN link_fail_inhibit_timer as well for multiple cycles of
+ * parallel detect, both 10g and 1g. This allows for the maximum
+ * connect attempts as defined in the AN MAS table 73-7.
+ */
+ for (i = 0; i < 6; i++) {
+ msec_delay(100);
+
+ /* If we have link, just jump out */
+ status = hw->mac.check_link(hw, &link_speed, &link_up, false);
+ if (status != 0)
+ goto out;
+
+ if (link_up)
+ goto out;
+ }
+
+ /* We didn't get link. Turn SmartSpeed back off. */
+ hw->phy.smart_speed_active = false;
+ status = txgbe_setup_mac_link(hw, speed,
+ autoneg_wait_to_complete);
+
+out:
+ if (link_up && (link_speed == TXGBE_LINK_SPEED_1GB_FULL))
+ DEBUGOUT("Smartspeed has downgraded the link speed "
+ "from the maximum advertised\n");
+ return status;
+}
+
+/**
+ * txgbe_setup_mac_link - Set MAC link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Set the link speed in the AUTOC register and restarts link.
+ **/
+s32 txgbe_setup_mac_link(struct txgbe_hw *hw,
+ u32 speed,
+ bool autoneg_wait_to_complete)
+{
+ bool autoneg = false;
+ s32 status = 0;
+
+ u64 autoc = hw->mac.autoc_read(hw);
+ u64 pma_pmd_10gs = autoc & TXGBE_AUTOC_10Gs_PMA_PMD_MASK;
+ u64 pma_pmd_1g = autoc & TXGBE_AUTOC_1G_PMA_PMD_MASK;
+ u64 link_mode = autoc & TXGBE_AUTOC_LMS_MASK;
+ u64 current_autoc = autoc;
+ u64 orig_autoc = 0;
+ u32 links_reg;
+ u32 i;
+ u32 link_capabilities = TXGBE_LINK_SPEED_UNKNOWN;
+
+ DEBUGFUNC("txgbe_setup_mac_link");
+
+ /* Check to see if speed passed in is supported. */
+ status = hw->mac.get_link_capabilities(hw,
+ &link_capabilities, &autoneg);
+ if (status)
+ return status;
+
+ speed &= link_capabilities;
+ if (speed == TXGBE_LINK_SPEED_UNKNOWN) {
+ return TXGBE_ERR_LINK_SETUP;
+ }
+
+ /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
+ if (hw->mac.orig_link_settings_stored)
+ orig_autoc = hw->mac.orig_autoc;
+ else
+ orig_autoc = autoc;
+
+ link_mode = autoc & TXGBE_AUTOC_LMS_MASK;
+ pma_pmd_1g = autoc & TXGBE_AUTOC_1G_PMA_PMD_MASK;
+
+ if (link_mode == TXGBE_AUTOC_LMS_KX4_KX_KR ||
+ link_mode == TXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
+ link_mode == TXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
+ /* Set KX4/KX/KR support according to speed requested */
+ autoc &= ~(TXGBE_AUTOC_KX_SUPP |
+ TXGBE_AUTOC_KX4_SUPP |
+ TXGBE_AUTOC_KR_SUPP);
+ if (speed & TXGBE_LINK_SPEED_10GB_FULL) {
+ if (orig_autoc & TXGBE_AUTOC_KX4_SUPP)
+ autoc |= TXGBE_AUTOC_KX4_SUPP;
+ if ((orig_autoc & TXGBE_AUTOC_KR_SUPP) &&
+ (hw->phy.smart_speed_active == false))
+ autoc |= TXGBE_AUTOC_KR_SUPP;
+ }
+ if (speed & TXGBE_LINK_SPEED_1GB_FULL)
+ autoc |= TXGBE_AUTOC_KX_SUPP;
+ } else if ((pma_pmd_1g == TXGBE_AUTOC_1G_SFI) &&
+ (link_mode == TXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
+ link_mode == TXGBE_AUTOC_LMS_1G_AN)) {
+ /* Switch from 1G SFI to 10G SFI if requested */
+ if ((speed == TXGBE_LINK_SPEED_10GB_FULL) &&
+ (pma_pmd_10gs == TXGBE_AUTOC_10Gs_SFI)) {
+ autoc &= ~TXGBE_AUTOC_LMS_MASK;
+ autoc |= TXGBE_AUTOC_LMS_10Gs;
+ }
+ } else if ((pma_pmd_10gs == TXGBE_AUTOC_10Gs_SFI) &&
+ (link_mode == TXGBE_AUTOC_LMS_10Gs)) {
+ /* Switch from 10G SFI to 1G SFI if requested */
+ if ((speed == TXGBE_LINK_SPEED_1GB_FULL) &&
+ (pma_pmd_1g == TXGBE_AUTOC_1G_SFI)) {
+ autoc &= ~TXGBE_AUTOC_LMS_MASK;
+ if (autoneg || hw->phy.type == txgbe_phy_qsfp_intel)
+ autoc |= TXGBE_AUTOC_LMS_1G_AN;
+ else
+ autoc |= TXGBE_AUTOC_LMS_1G_LINK_NO_AN;
+ }
+ }
+
+ if (autoc == current_autoc) {
+ return status;
+ }
+
+ autoc &= ~TXGBE_AUTOC_SPEED_MASK;
+ autoc |= TXGBE_AUTOC_SPEED(speed);
+ autoc |= (autoneg ? TXGBE_AUTOC_AUTONEG : 0);
+
+ /* Restart link */
+ hw->mac.autoc_write(hw, autoc);
+
+ /* Only poll for autoneg to complete if specified to do so */
+ if (autoneg_wait_to_complete) {
+ if (link_mode == TXGBE_AUTOC_LMS_KX4_KX_KR ||
+ link_mode == TXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
+ link_mode == TXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
+ links_reg = 0; /*Just in case Autoneg time=0*/
+ for (i = 0; i < TXGBE_AUTO_NEG_TIME; i++) {
+ links_reg = rd32(hw, TXGBE_PORTSTAT);
+ if (links_reg & TXGBE_PORTSTAT_UP)
+ break;
+ msec_delay(100);
+ }
+ if (!(links_reg & TXGBE_PORTSTAT_UP)) {
+ status = TXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ DEBUGOUT("Autoneg did not complete.\n");
+ }
+ }
+ }
+
+ /* Add delay to filter out noises during initial link setup */
+ msec_delay(50);
+
+ return status;
+}
+
+/**
+ * txgbe_setup_copper_link_raptor - Set the PHY autoneg advertised field
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true if waiting is needed to complete
+ *
+ * Restarts link on PHY and MAC based on settings passed in.
+ **/
+STATIC s32 txgbe_setup_copper_link_raptor(struct txgbe_hw *hw,
+ u32 speed,
+ bool autoneg_wait_to_complete)
+{
+ s32 status;
+
+ DEBUGFUNC("txgbe_setup_copper_link_raptor");
+
+ /* Setup the PHY according to input speed */
+ status = hw->phy.setup_link_speed(hw, speed,
+ autoneg_wait_to_complete);
+ /* Set up MAC */
+ txgbe_start_mac_link_raptor(hw, autoneg_wait_to_complete);
+
+ return status;
+}
+
+static int
+txgbe_check_flash_load(struct txgbe_hw *hw, u32 check_bit)
+{
+ u32 reg = 0;
+ u32 i;
+ int err = 0;
+ /* if there's flash existing */
+ if (!(rd32(hw, TXGBE_SPISTAT) & TXGBE_SPISTAT_BPFLASH)) {
+ /* wait hw load flash done */
+ for (i = 0; i < 10; i++) {
+ reg = rd32(hw, TXGBE_ILDRSTAT);
+ if (!(reg & check_bit)) {
+ /* done */
+ break;
+ }
+ msleep(100);
+ }
+ if (i == 10) {
+ err = TXGBE_ERR_FLASH_LOADING_FAILED;
+ }
+ }
+ return err;
+}
+
+static void
+txgbe_reset_misc(struct txgbe_hw *hw)
+{
+ int i;
+ u32 value;
+
+ wr32(hw, TXGBE_ISBADDRL, hw->isb_dma & 0x00000000FFFFFFFF);
+ wr32(hw, TXGBE_ISBADDRH, hw->isb_dma >> 32);
+
+ value = rd32_epcs(hw, SR_XS_PCS_CTRL2);
+ if ((value & 0x3) != SR_PCS_CTRL2_TYPE_SEL_X) {
+ hw->link_status = TXGBE_LINK_STATUS_NONE;
+ }
+
+ /* receive packets that size > 2048 */
+ wr32m(hw, TXGBE_MACRXCFG,
+ TXGBE_MACRXCFG_JUMBO, TXGBE_MACRXCFG_JUMBO);
+
+ wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
+ TXGBE_FRMSZ_MAX(TXGBE_FRAME_SIZE_DFT));
+
+ /* clear counters on read */
+ wr32m(hw, TXGBE_MACCNTCTL,
+ TXGBE_MACCNTCTL_RC, TXGBE_MACCNTCTL_RC);
+
+ wr32m(hw, TXGBE_RXFCCFG,
+ TXGBE_RXFCCFG_FC, TXGBE_RXFCCFG_FC);
+ wr32m(hw, TXGBE_TXFCCFG,
+ TXGBE_TXFCCFG_FC, TXGBE_TXFCCFG_FC);
+
+ wr32m(hw, TXGBE_MACRXFLT,
+ TXGBE_MACRXFLT_PROMISC, TXGBE_MACRXFLT_PROMISC);
+
+ wr32m(hw, TXGBE_RSTSTAT,
+ TXGBE_RSTSTAT_TMRINIT_MASK, TXGBE_RSTSTAT_TMRINIT(30));
+
+ /* errata 4: initialize mng flex tbl and wakeup flex tbl*/
+ wr32(hw, TXGBE_MNGFLEXSEL, 0);
+ for (i = 0; i < 16; i++) {
+ wr32(hw, TXGBE_MNGFLEXDWL(i), 0);
+ wr32(hw, TXGBE_MNGFLEXDWH(i), 0);
+ wr32(hw, TXGBE_MNGFLEXMSK(i), 0);
+ }
+ wr32(hw, TXGBE_LANFLEXSEL, 0);
+ for (i = 0; i < 16; i++) {
+ wr32(hw, TXGBE_LANFLEXDWL(i), 0);
+ wr32(hw, TXGBE_LANFLEXDWH(i), 0);
+ wr32(hw, TXGBE_LANFLEXMSK(i), 0);
+ }
+
+ /* set pause frame dst mac addr */
+ wr32(hw, TXGBE_RXPBPFCDMACL, 0xC2000001);
+ wr32(hw, TXGBE_RXPBPFCDMACH, 0x0180);
+
+ txgbe_init_thermal_sensor_thresh(hw);
+
+ /* enable mac transmiter */
+ wr32m(hw, TXGBE_MACTXCFG, TXGBE_MACTXCFG_TE, TXGBE_MACTXCFG_TE);
+
+ for (i = 0; i < 4; i++) {
+ wr32m(hw, TXGBE_IVAR(i), 0x80808080, 0);
+ }
+}
+
+/**
+ * txgbe_reset_hw - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
+ * reset.
+ **/
+s32 txgbe_reset_hw(struct txgbe_hw *hw)
+{
+ s32 status;
+ u32 autoc;
+
+ DEBUGFUNC("txgbe_reset_hw");
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ status = hw->mac.stop_hw(hw);
+ if (status != 0)
+ return status;
+
+ /* flush pending Tx transactions */
+ txgbe_clear_tx_pending(hw);
+
+ /* Identify PHY and related function pointers */
+ status = hw->phy.init(hw);
+ if (status == TXGBE_ERR_SFP_NOT_SUPPORTED)
+ return status;
+
+ /* Setup SFP module if there is one present. */
+ if (hw->phy.sfp_setup_needed) {
+ status = hw->mac.setup_sfp(hw);
+ hw->phy.sfp_setup_needed = false;
+ }
+ if (status == TXGBE_ERR_SFP_NOT_SUPPORTED)
+ return status;
+
+ /* Reset PHY */
+ if (hw->phy.reset_disable == false)
+ hw->phy.reset(hw);
+
+ /* remember AUTOC from before we reset */
+ autoc = hw->mac.autoc_read(hw);
+
+mac_reset_top:
+ /*
+ * Issue global reset to the MAC. Needs to be SW reset if link is up.
+ * If link reset is used when link is up, it might reset the PHY when
+ * mng is using it. If link is down or the flag to force full link
+ * reset is set, then perform link reset.
+ */
+ if (txgbe_mng_present(hw)) {
+ txgbe_hic_reset(hw);
+ } else {
+ wr32(hw, TXGBE_RST, TXGBE_RST_LAN(hw->bus.lan_id));
+ txgbe_flush(hw);
+ }
+ usec_delay(10);
+
+ txgbe_reset_misc(hw);
+
+ if (hw->bus.lan_id == 0) {
+ status = txgbe_check_flash_load(hw,
+ TXGBE_ILDRSTAT_SWRST_LAN0);
+ } else {
+ status = txgbe_check_flash_load(hw,
+ TXGBE_ILDRSTAT_SWRST_LAN1);
+ }
+ if (status != 0)
+ return status;
+
+ msec_delay(50);
+
+ /*
+ * Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to
+ * allow time for any pending HW events to complete.
+ */
+ if (hw->mac.flags & TXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~TXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ /*
+ * Store the original AUTOC/AUTOC2 values if they have not been
+ * stored off yet. Otherwise restore the stored original
+ * values since the reset operation sets back to defaults.
+ */
+ if (hw->mac.orig_link_settings_stored == false) {
+ hw->mac.orig_autoc = hw->mac.autoc_read(hw);
+ hw->mac.autoc_write(hw, hw->mac.orig_autoc);
+ hw->mac.orig_link_settings_stored = true;
+ } else {
+ hw->mac.orig_autoc = autoc;
+ }
+
+ /* Store the permanent mac address */
+ hw->mac.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /*
+ * Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = 128;
+ hw->mac.init_rx_addrs(hw);
+
+ /* Store the permanent SAN mac address */
+ hw->mac.get_san_mac_addr(hw, hw->mac.san_addr);
+
+ /* Add the SAN MAC address to the RAR only if it's a valid address */
+ if (txgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
+ /* Save the SAN MAC RAR index */
+ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
+
+ hw->mac.set_rar(hw, hw->mac.san_mac_rar_index,
+ hw->mac.san_addr, 0, true);
+
+ /* clear VMDq pool/queue selection for this RAR */
+ hw->mac.clear_vmdq(hw, hw->mac.san_mac_rar_index,
+ BIT_MASK32);
+
+ /* Reserve the last RAR for the SAN MAC address */
+ hw->mac.num_rar_entries--;
+ }
+
+ /* Store the alternative WWNN/WWPN prefix */
+ hw->mac.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
+ &hw->mac.wwpn_prefix);
+
+ return status;
+}
+
+/**
+ * txgbe_fdir_check_cmd_complete - poll to check whether FDIRPICMD is complete
+ * @hw: pointer to hardware structure
+ * @fdircmd: current value of FDIRCMD register
+ */
+STATIC s32 txgbe_fdir_check_cmd_complete(struct txgbe_hw *hw, u32 *fdircmd)
+{
+ int i;
+
+ for (i = 0; i < TXGBE_FDIRCMD_CMD_POLL; i++) {
+ *fdircmd = rd32(hw, TXGBE_FDIRPICMD);
+ if (!(*fdircmd & TXGBE_FDIRPICMD_OP_MASK))
+ return 0;
+ usec_delay(10);
+ }
+
+ return TXGBE_ERR_FDIR_CMD_INCOMPLETE;
+}
+
+/**
+ * txgbe_reinit_fdir_tables - Reinitialize Flow Director tables.
+ * @hw: pointer to hardware structure
+ **/
+s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw)
+{
+ s32 err;
+ int i;
+ u32 fdirctrl = rd32(hw, TXGBE_FDIRCTL);
+ u32 fdircmd;
+ fdirctrl &= ~TXGBE_FDIRCTL_INITDONE;
+
+ DEBUGFUNC("txgbe_reinit_fdir_tables");
+
+ /*
+ * Before starting reinitialization process,
+ * FDIRPICMD.OP must be zero.
+ */
+ err = txgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err) {
+ DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
+ return err;
+ }
+
+ wr32(hw, TXGBE_FDIRFREE, 0);
+ txgbe_flush(hw);
+ /*
+ * adapters flow director init flow cannot be restarted,
+ * Workaround silicon errata by performing the following steps
+ * before re-writing the FDIRCTL control register with the same value.
+ * - write 1 to bit 8 of FDIRPICMD register &
+ * - write 0 to bit 8 of FDIRPICMD register
+ */
+ wr32m(hw, TXGBE_FDIRPICMD, TXGBE_FDIRPICMD_CLR, TXGBE_FDIRPICMD_CLR);
+ txgbe_flush(hw);
+ wr32m(hw, TXGBE_FDIRPICMD, TXGBE_FDIRPICMD_CLR, 0);
+ txgbe_flush(hw);
+ /*
+ * Clear FDIR Hash register to clear any leftover hashes
+ * waiting to be programmed.
+ */
+ wr32(hw, TXGBE_FDIRPIHASH, 0x00);
+ txgbe_flush(hw);
+
+ wr32(hw, TXGBE_FDIRCTL, fdirctrl);
+ txgbe_flush(hw);
+
+ /* Poll init-done after we write FDIRCTL register */
+ for (i = 0; i < TXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (rd32m(hw, TXGBE_FDIRCTL, TXGBE_FDIRCTL_INITDONE))
+ break;
+ msec_delay(1);
+ }
+ if (i >= TXGBE_FDIR_INIT_DONE_POLL) {
+ DEBUGOUT("Flow Director Signature poll time exceeded!\n");
+ return TXGBE_ERR_FDIR_REINIT_FAILED;
+ }
+
+ /* Clear FDIR statistics registers (read to clear) */
+ rd32(hw, TXGBE_FDIRUSED);
+ rd32(hw, TXGBE_FDIRFAIL);
+ rd32(hw, TXGBE_FDIRMATCH);
+ rd32(hw, TXGBE_FDIRMISS);
+ rd32(hw, TXGBE_FDIRLEN);
+
+ return 0;
+}
+
+/**
+ * txgbe_fdir_enable_raptor - Initialize Flow Director control registers
+ * @hw: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register
+ **/
+STATIC void txgbe_fdir_enable_raptor(struct txgbe_hw *hw, u32 fdirctrl)
+{
+ int i;
+
+ DEBUGFUNC("txgbe_fdir_enable_raptor");
+
+ /* Prime the keys for hashing */
+ wr32(hw, TXGBE_FDIRBKTHKEY, TXGBE_ATR_BUCKET_HASH_KEY);
+ wr32(hw, TXGBE_FDIRSIGHKEY, TXGBE_ATR_SIGNATURE_HASH_KEY);
+
+ /*
+ * Poll init-done after we write the register. Estimated times:
+ * 10G: PBALLOC = 11b, timing is 60us
+ * 1G: PBALLOC = 11b, timing is 600us
+ * 100M: PBALLOC = 11b, timing is 6ms
+ *
+ * Multiple these timings by 4 if under full Rx load
+ *
+ * So we'll poll for TXGBE_FDIR_INIT_DONE_POLL times, sleeping for
+ * 1 msec per poll time. If we're at line rate and drop to 100M, then
+ * this might not finish in our poll time, but we can live with that
+ * for now.
+ */
+ wr32(hw, TXGBE_FDIRCTL, fdirctrl);
+ txgbe_flush(hw);
+ for (i = 0; i < TXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (rd32(hw, TXGBE_FDIRCTL) &
+ TXGBE_FDIRCTL_INITDONE)
+ break;
+ msec_delay(1);
+ }
+
+ if (i >= TXGBE_FDIR_INIT_DONE_POLL)
+ DEBUGOUT("Flow Director poll time exceeded!\n");
+}
+
+/**
+ * txgbe_init_fdir_signature_raptor - Initialize Flow Director signature filters
+ * @hw: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register, initially
+ * contains just the value of the Rx packet buffer allocation
+ **/
+s32 txgbe_init_fdir_signature_raptor(struct txgbe_hw *hw, u32 fdirctrl)
+{
+ int i;
+ DEBUGFUNC("txgbe_init_fdir_signature_raptor");
+
+ for (i = 0; i < 64; i++) {
+ uint32_t flexreg, flex;
+ flexreg = rd32(hw, TXGBE_FDIRFLEXCFG(i / 4));
+ flex = TXGBE_FDIRFLEXCFG_BASE_MAC;
+ flex |= TXGBE_FDIRFLEXCFG_OFST(0x6);
+ flexreg &= ~(TXGBE_FDIRFLEXCFG_ALL(~0UL, i % 4));
+ flexreg |= TXGBE_FDIRFLEXCFG_ALL(flex, i % 4);
+ wr32(hw, TXGBE_FDIRFLEXCFG(i / 4), flexreg);
+ }
+
+ fdirctrl |= TXGBE_FDIRCTL_HASHBITS(0xF) |
+ TXGBE_FDIRCTL_MAXLEN(0xA) |
+ TXGBE_FDIRCTL_FULLTHR(4);
+
+ /* write hashes and fdirctrl register, poll for completion */
+ txgbe_fdir_enable_raptor(hw, fdirctrl);
+
+ return 0;
+}
+
+/**
+ * txgbe_init_fdir_perfect - Initialize Flow Director perfect filters
+ * @hw: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register, initially
+ * contains just the value of the Rx packet buffer allocation
+ * @cloud_mode: true - cloud mode, false - other mode
+ **/
+s32 txgbe_init_fdir_perfect(struct txgbe_hw *hw, u32 fdirctrl,
+ bool cloud_mode)
+{
+ int i;
+
+ UNREFERENCED_PARAMETER(cloud_mode);
+ DEBUGFUNC("txgbe_init_fdir_perfect");
+
+ for (i = 0; i < 64; i++) {
+ uint32_t flexreg, flex;
+ flexreg = rd32(hw, TXGBE_FDIRFLEXCFG(i / 4));
+ flex = TXGBE_FDIRFLEXCFG_BASE_MAC;
+ flex |= TXGBE_FDIRFLEXCFG_OFST(0x6);
+ flexreg &= ~(TXGBE_FDIRFLEXCFG_ALL(~0UL, i % 4));
+ flexreg |= TXGBE_FDIRFLEXCFG_ALL(flex, i % 4);
+ wr32(hw, TXGBE_FDIRFLEXCFG(i / 4), flexreg);
+ }
+
+ /*
+ * Continue setup of fdirctrl register bits:
+ * Turn perfect match filtering on
+ * Report hash in RSS field of Rx wb descriptor
+ * Initialize the drop queue to queue 127
+ * Move the flexible bytes to use the ethertype - shift 6 words
+ * Set the maximum length per hash bucket to 0xA filters
+ * Send interrupt when 64 (0x4 * 16) filters are left
+ */
+ fdirctrl |= TXGBE_FDIRCTL_REPORT_MATCH;
+ fdirctrl |= TXGBE_FDIRCTL_PERFECT |
+ TXGBE_FDIRCTL_DROPQP(TXGBE_FDIR_DROP_QUEUE) |
+ TXGBE_FDIRCTL_MAXLEN(0xA) |
+ TXGBE_FDIRCTL_FULLTHR(4);
+
+ /* write hashes and fdirctrl register, poll for completion */
+ txgbe_fdir_enable_raptor(hw, fdirctrl);
+
+ return 0;
+}
+
+/**
+ * txgbe_set_fdir_drop_queue_raptor - Set Flow Director drop queue
+ * @hw: pointer to hardware structure
+ * @dropqueue: Rx queue index used for the dropped packets
+ **/
+void txgbe_set_fdir_drop_queue_raptor(struct txgbe_hw *hw, u8 dropqueue)
+{
+ u32 fdirctrl;
+
+ DEBUGFUNC("txgbe_set_fdir_drop_queue_raptor");
+ /* Clear init done bit and drop queue field */
+ fdirctrl = rd32(hw, TXGBE_FDIRCTL);
+ fdirctrl &= ~(TXGBE_FDIRCTL_DROPQP_MASK | TXGBE_FDIRCTL_INITDONE);
+
+ /* Set drop queue */
+ fdirctrl |= TXGBE_FDIRCTL_DROPQP(dropqueue);
+
+ wr32(hw, TXGBE_FDIRPICMD,
+ (rd32(hw, TXGBE_FDIRPICMD) |
+ TXGBE_FDIRPICMD_CLR));
+ txgbe_flush(hw);
+ wr32(hw, TXGBE_FDIRPICMD,
+ (rd32(hw, TXGBE_FDIRPICMD) &
+ ~TXGBE_FDIRPICMD_CLR));
+ txgbe_flush(hw);
+
+ /* write hashes and fdirctrl register, poll for completion */
+ txgbe_fdir_enable_raptor(hw, fdirctrl);
+}
+
+/*
+ * These defines allow us to quickly generate all of the necessary instructions
+ * in the function below by simply calling out TXGBE_COMPUTE_SIG_HASH_ITERATION
+ * for values 0 through 15
+ */
+#define TXGBE_ATR_COMMON_HASH_KEY \
+ (TXGBE_ATR_BUCKET_HASH_KEY & TXGBE_ATR_SIGNATURE_HASH_KEY)
+#define TXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
+do { \
+ u32 n = (_n); \
+ if (TXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
+ common_hash ^= lo_hash_dword >> n; \
+ else if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ bucket_hash ^= lo_hash_dword >> n; \
+ else if (TXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
+ sig_hash ^= lo_hash_dword << (16 - n); \
+ if (TXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
+ common_hash ^= hi_hash_dword >> n; \
+ else if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ bucket_hash ^= hi_hash_dword >> n; \
+ else if (TXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
+ sig_hash ^= hi_hash_dword << (16 - n); \
+} while (0)
+
+/**
+ * txgbe_atr_compute_sig_hash_raptor - Compute the signature hash
+ * @input: input bitstream to compute the hash on
+ * @common: compressed common input dword
+ *
+ * This function is almost identical to the function above but contains
+ * several optimizations such as unwinding all of the loops, letting the
+ * compiler work out all of the conditional ifs since the keys are static
+ * defines, and computing two keys at once since the hashed dword stream
+ * will be the same for both keys.
+ **/
+u32 txgbe_atr_compute_sig_hash_raptor(union txgbe_atr_hash_dword input,
+ union txgbe_atr_hash_dword common)
+{
+ u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
+
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = be_to_cpu32(input.dword);
+
+ /* generate common hash dword */
+ hi_hash_dword = be_to_cpu32(common.dword);
+
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+ /* Process bits 0 and 16 */
+ TXGBE_COMPUTE_SIG_HASH_ITERATION(0);
+
+ /*
+ * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the VLAN until after bit 0 was processed
+ */
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+ /* Process remaining 30 bit of the key */
+ TXGBE_COMPUTE_SIG_HASH_ITERATION(1);
+ TXGBE_COMPUTE_SIG_HASH_ITERATION(2);
+ TXGBE_COMPUTE_SIG_HASH_ITERATION(3);
+ TXGBE_COMPUTE_SIG_HASH_ITERATION(4);
+ TXGBE_COMPUTE_SIG_HASH_ITERATION(5);
+ TXGBE_COMPUTE_SIG_HASH_ITERATION(6);
+ TXGBE_COMPUTE_SIG_HASH_ITERATION(7);
+ TXGBE_COMPUTE_SIG_HASH_ITERATION(8);
+ TXGBE_COMPUTE_SIG_HASH_ITERATION(9);
+ TXGBE_COMPUTE_SIG_HASH_ITERATION(10);
+ TXGBE_COMPUTE_SIG_HASH_ITERATION(11);
+ TXGBE_COMPUTE_SIG_HASH_ITERATION(12);
+ TXGBE_COMPUTE_SIG_HASH_ITERATION(13);
+ TXGBE_COMPUTE_SIG_HASH_ITERATION(14);
+ TXGBE_COMPUTE_SIG_HASH_ITERATION(15);
+
+ /* combine common_hash result with signature and bucket hashes */
+ bucket_hash ^= common_hash;
+ bucket_hash &= TXGBE_ATR_HASH_MASK;
+
+ sig_hash ^= common_hash << 16;
+ sig_hash &= TXGBE_ATR_HASH_MASK << 16;
+
+ /* return completed signature hash */
+ return sig_hash ^ bucket_hash;
+}
+
+/**
+ * txgbe_atr_add_signature_filter_raptor - Adds a signature hash filter
+ * @hw: pointer to hardware structure
+ * @input: unique input dword
+ * @common: compressed common input dword
+ * @queue: queue index to direct traffic to
+ *
+ * Note that the tunnel bit in input must not be set when the hardware
+ * tunneling support does not exist.
+ **/
+void txgbe_fdir_add_signature_filter_raptor(struct txgbe_hw *hw,
+ union txgbe_atr_hash_dword input,
+ union txgbe_atr_hash_dword common,
+ u8 queue)
+{
+ u64 fdirhashcmd;
+ u8 flow_type;
+ u32 fdircmd;
+
+ DEBUGFUNC("txgbe_fdir_add_signature_filter_raptor");
+
+ flow_type = input.formatted.flow_type;
+ switch (flow_type) {
+ case TXGBE_ATR_FLOW_TYPE_TCPV4:
+ case TXGBE_ATR_FLOW_TYPE_UDPV4:
+ case TXGBE_ATR_FLOW_TYPE_SCTPV4:
+ case TXGBE_ATR_FLOW_TYPE_TCPV6:
+ case TXGBE_ATR_FLOW_TYPE_UDPV6:
+ case TXGBE_ATR_FLOW_TYPE_SCTPV6:
+ break;
+ default:
+ DEBUGOUT(" Error on flow type input\n");
+ return;
+ }
+
+ /* configure FDIRPICMD register */
+ fdircmd = TXGBE_FDIRPICMD_OP_ADD | TXGBE_FDIRPICMD_UPD |
+ TXGBE_FDIRPICMD_LAST | TXGBE_FDIRPICMD_QPENA;
+ fdircmd |= TXGBE_FDIRPICMD_FT(flow_type);
+ fdircmd |= TXGBE_FDIRPICMD_QP(queue);
+
+ /*
+ * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
+ * is for FDIRPICMD. Then do a 64-bit register write from FDIRPIHASH.
+ */
+ fdirhashcmd = (u64)fdircmd << 32;
+ fdirhashcmd |= txgbe_atr_compute_sig_hash_raptor(input, common);
+ wr64(hw, TXGBE_FDIRPIHASH, fdirhashcmd);
+
+ DEBUGOUT("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
+
+ return;
+}
+
+#define TXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
+do { \
+ u32 n = (_n); \
+ if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ bucket_hash ^= lo_hash_dword >> n; \
+ if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ bucket_hash ^= hi_hash_dword >> n; \
+} while (0)
+
+/**
+ * txgbe_atr_compute_perfect_hash_raptor - Compute the perfect filter hash
+ * @input: input bitstream to compute the hash on
+ * @input_mask: mask for the input bitstream
+ *
+ * This function serves two main purposes. First it applies the input_mask
+ * to the atr_input resulting in a cleaned up atr_input data stream.
+ * Secondly it computes the hash and stores it in the bkt_hash field at
+ * the end of the input byte stream. This way it will be available for
+ * future use without needing to recompute the hash.
+ **/
+void txgbe_atr_compute_perfect_hash_raptor(struct txgbe_atr_input *input,
+ struct txgbe_atr_input *input_mask)
+{
+
+ __be32 *dword_stream = (__be32 *)input;
+ __be32 *mask_stream = (__be32 *)input_mask;
+ u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ u32 bucket_hash = 0;
+ u32 hi_dword = 0;
+ u32 i = 0;
+
+ /* Apply masks to input data */
+ for (i = 0; i < 14; i++)
+ dword_stream[i] &= mask_stream[i];
+
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = be_to_cpu32(dword_stream[0]);
+
+ /* generate common hash dword */
+ for (i = 1; i <= 13; i++)
+ hi_dword ^= dword_stream[i];
+ hi_hash_dword = be_to_cpu32(hi_dword);
+
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+ /* Process bits 0 and 16 */
+ TXGBE_COMPUTE_BKT_HASH_ITERATION(0);
+
+ /*
+ * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the VLAN until after bit 0 was processed
+ */
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+ /* Process remaining 30 bit of the key */
+ for (i = 1; i <= 15; i++)
+ TXGBE_COMPUTE_BKT_HASH_ITERATION(i);
+
+ /*
+ * Limit hash to 13 bits since max bucket count is 8K.
+ * Store result at the end of the input stream.
+ */
+ input->bkt_hash = bucket_hash & 0x1FFF;
+}
+
+/**
+ * txgbe_get_fdirtcpm_raptor - generate a TCP port from atr_input_masks
+ * @input_mask: mask to be bit swapped
+ *
+ * The source and destination port masks for flow director are bit swapped
+ * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
+ * generate a correctly swapped value we need to bit swap the mask and that
+ * is what is accomplished by this function.
+ **/
+STATIC u32 txgbe_get_fdirtcpm_raptor(struct txgbe_atr_input *input_mask)
+{
+ u32 mask = be_to_cpu16(input_mask->dst_port);
+ mask <<= 16;
+ mask |= be_to_cpu16(input_mask->src_port);
+ mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+ mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+ mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+ return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+}
+
+s32 txgbe_fdir_set_input_mask_raptor(struct txgbe_hw *hw,
+ struct txgbe_atr_input *input_mask, bool cloud_mode)
+{
+ /* mask IPv6 since it is currently not supported */
+ u32 fdirm = 0;
+ u32 fdirtcpm;
+
+ UNREFERENCED_PARAMETER(cloud_mode);
+ DEBUGFUNC("txgbe_fdir_set_atr_input_mask_raptor");
+
+ /*
+ * Program the relevant mask registers. If src/dst_port or src/dst_addr
+ * are zero, then assume a full mask for that field. Also assume that
+ * a VLAN of 0 is unspecified, so mask that out as well. L4type
+ * cannot be masked out in this implementation.
+ *
+ * This also assumes IPv4 only. IPv6 masking isn't supported at this
+ * point in time.
+ */
+
+ /* verify bucket hash is cleared on hash generation */
+ if (input_mask->bkt_hash)
+ DEBUGOUT(" bucket hash should always be 0 in mask\n");
+
+ /* Program FDIRMSK and verify partial masks */
+ switch (input_mask->vm_pool & 0x7F) {
+ case 0x0:
+ fdirm |= TXGBE_FDIRMSK_POOL;
+ case 0x7F:
+ break;
+ default:
+ DEBUGOUT(" Error on vm pool mask\n");
+ return TXGBE_ERR_CONFIG;
+ }
+
+ switch (input_mask->flow_type & TXGBE_ATR_L4TYPE_MASK) {
+ case 0x0:
+ fdirm |= TXGBE_FDIRMSK_L4P;
+ if (input_mask->dst_port ||
+ input_mask->src_port) {
+ DEBUGOUT(" Error on src/dst port mask\n");
+ return TXGBE_ERR_CONFIG;
+ }
+ case TXGBE_ATR_L4TYPE_MASK:
+ break;
+ default:
+ DEBUGOUT(" Error on flow type mask\n");
+ return TXGBE_ERR_CONFIG;
+ }
+
+ switch (be_to_cpu16(input_mask->pkt_type) & 0xFF) {
+ case 0xFF:
+ /* no PTYPE fields masked */
+ break;
+ default:
+ DEBUGOUT(" Error on PTYPE mask\n");
+ return TXGBE_ERR_CONFIG;
+ }
+
+ /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
+ wr32(hw, TXGBE_FDIRMSK, fdirm);
+
+ if (!cloud_mode) {
+ /* store the TCP/UDP port masks, bit reversed from port
+ * layout */
+ fdirtcpm = txgbe_get_fdirtcpm_raptor(input_mask);
+
+ /* write both the same so that UDP and TCP use the same mask */
+ wr32(hw, TXGBE_FDIRTCPMSK, ~fdirtcpm);
+ wr32(hw, TXGBE_FDIRUDPMSK, ~fdirtcpm);
+ /* also use it for SCTP */
+ wr32(hw, TXGBE_FDIRSCTPMSK, ~fdirtcpm);
+
+ /* store source and destination IP masks (big-enian) */
+ wr32(hw, TXGBE_FDIRSIP4MSK,
+ npu_to_be32(~input_mask->src_ip[0]));
+ wr32(hw, TXGBE_FDIRDIP4MSK,
+ npu_to_be32(~input_mask->dst_ip[0]));
+ }
+ return 0;
+}
+
+s32 txgbe_fdir_write_perfect_filter_raptor(struct txgbe_hw *hw,
+ struct txgbe_atr_input *input,
+ u16 soft_id, u8 queue, bool cloud_mode)
+{
+ u32 fdirport, fdirvlan, fdirhash, fdircmd;
+ s32 err;
+ UNREFERENCED_PARAMETER(cloud_mode);
+
+ DEBUGFUNC("txgbe_fdir_write_perfect_filter_raptor");
+ if (!cloud_mode) {
+ /* currently IPv6 is not supported, must be programmed with 0 */
+ wr32(hw, TXGBE_FDIRPISIP6(0), npu_to_be32(input->src_ip[0]));
+ wr32(hw, TXGBE_FDIRPISIP6(1), npu_to_be32(input->src_ip[1]));
+ wr32(hw, TXGBE_FDIRPISIP6(2), npu_to_be32(input->src_ip[2]));
+
+ /* record the source address (big-endian) */
+ wr32(hw, TXGBE_FDIRPISIP4, npu_to_be32(input->src_ip[0]));
+
+ /* record the first 32 bits of the destination address
+ * (big-endian) */
+ wr32(hw, TXGBE_FDIRPIDIP4, npu_to_be32(input->dst_ip[0]));
+
+ /* record source and destination port (little-endian)*/
+ fdirport = TXGBE_FDIRPIPORT_DST(be_to_cpu16(input->dst_port));
+ fdirport |= be_to_cpu16(input->src_port);
+ wr32(hw, TXGBE_FDIRPIPORT, fdirport);
+ }
+
+ /* record VLAN (little-endian) and flex_bytes(big-endian) */
+ fdirvlan = TXGBE_FDIRPIFLEX_FLEX(be_to_npu16(input->flex_bytes));
+ fdirvlan |= TXGBE_FDIRPIFLEX_PTYPE(be_to_cpu16(input->pkt_type));
+ wr32(hw, TXGBE_FDIRPIFLEX, fdirvlan);
+
+ /* configure FDIRPIHASH register */
+ fdirhash = input->bkt_hash | TXGBE_FDIRPIHASH_VLD;
+ fdirhash |= TXGBE_FDIRPIHASH_IDX(soft_id);
+ wr32(hw, TXGBE_FDIRPIHASH, fdirhash);
+
+ /*
+ * flush all previous writes to make certain registers are
+ * programmed prior to issuing the command
+ */
+ txgbe_flush(hw);
+
+ /* configure FDIRPICMD register */
+ fdircmd = TXGBE_FDIRPICMD_OP_ADD | TXGBE_FDIRPICMD_UPD |
+ TXGBE_FDIRPICMD_LAST | TXGBE_FDIRPICMD_QPENA;
+ if (queue == TXGBE_FDIR_DROP_QUEUE)
+ fdircmd |= TXGBE_FDIRPICMD_DROP;
+
+ fdircmd |= TXGBE_FDIRPICMD_FT(input->flow_type);
+ fdircmd |= TXGBE_FDIRPICMD_QP(queue);
+ fdircmd |= TXGBE_FDIRPICMD_POOL(input->vm_pool);
+
+ wr32(hw, TXGBE_FDIRPICMD, fdircmd);
+ err = txgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err) {
+ DEBUGOUT("Flow Director command did not complete!\n");
+ return err;
+ }
+
+ return 0;
+}
+
+s32 txgbe_fdir_erase_perfect_filter_raptor(struct txgbe_hw *hw,
+ struct txgbe_atr_input *input,
+ u16 soft_id)
+{
+ u32 fdirhash;
+ u32 fdircmd;
+ s32 err;
+
+ /* configure FDIRPIHASH register */
+ fdirhash = input->bkt_hash;
+ fdirhash |= TXGBE_FDIRPIHASH_IDX(soft_id);
+ wr32(hw, TXGBE_FDIRPIHASH, fdirhash);
+
+ /* flush hash to HW */
+ txgbe_flush(hw);
+
+ /* Query if filter is present */
+ wr32(hw, TXGBE_FDIRPICMD, TXGBE_FDIRPICMD_OP_QRY);
+
+ err = txgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err) {
+ DEBUGOUT("Flow Director command did not complete!\n");
+ return err;
+ }
+
+ /* if filter exists in hardware then remove it */
+ if (fdircmd & TXGBE_FDIRPICMD_VLD) {
+ wr32(hw, TXGBE_FDIRPIHASH, fdirhash);
+ txgbe_flush(hw);
+ wr32(hw, TXGBE_FDIRPICMD,
+ TXGBE_FDIRPICMD_OP_REM);
+ }
+
+ return 0;
+}
+
+/**
+ * txgbe_fdir_add_perfect_filter_raptor - Adds a perfect filter
+ * @hw: pointer to hardware structure
+ * @input: input bitstream
+ * @input_mask: mask for the input bitstream
+ * @soft_id: software index for the filters
+ * @queue: queue index to direct traffic to
+ * @cloud_mode: unused
+ *
+ * Note that the caller to this function must lock before calling, since the
+ * hardware writes must be protected from one another.
+ **/
+s32 txgbe_fdir_add_perfect_filter_raptor(struct txgbe_hw *hw,
+ struct txgbe_atr_input *input,
+ struct txgbe_atr_input *input_mask,
+ u16 soft_id, u8 queue, bool cloud_mode)
+{
+ s32 err = TXGBE_ERR_CONFIG;
+ UNREFERENCED_PARAMETER(cloud_mode);
+
+ DEBUGFUNC("txgbe_fdir_add_perfect_filter_raptor");
+
+ /*
+ * Check flow_type formatting, and bail out before we touch the hardware
+ * if there's a configuration issue
+ */
+ switch (input->flow_type) {
+ case TXGBE_ATR_FLOW_TYPE_IPV4:
+ case TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4:
+ input_mask->flow_type = TXGBE_ATR_L3TYPE_MASK;
+ if (input->dst_port || input->src_port) {
+ DEBUGOUT(" Error on src/dst port\n");
+ return TXGBE_ERR_CONFIG;
+ }
+ break;
+ case TXGBE_ATR_FLOW_TYPE_SCTPV4:
+ case TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4:
+ if (input->dst_port || input->src_port) {
+ DEBUGOUT(" Error on src/dst port\n");
+ return TXGBE_ERR_CONFIG;
+ }
+ /* fall through */
+ case TXGBE_ATR_FLOW_TYPE_TCPV4:
+ case TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4:
+ case TXGBE_ATR_FLOW_TYPE_UDPV4:
+ case TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4:
+ input_mask->flow_type = TXGBE_ATR_L3TYPE_MASK |
+ TXGBE_ATR_L4TYPE_MASK;
+ break;
+ default:
+ DEBUGOUT(" Error on flow type input\n");
+ return err;
+ }
+
+ /* program input mask into the HW */
+ err = txgbe_fdir_set_input_mask_raptor(hw, input_mask, cloud_mode);
+ if (err)
+ return err;
+
+ /* apply mask and compute/store hash */
+ txgbe_atr_compute_perfect_hash_raptor(input, input_mask);
+
+ /* program filters to filter memory */
+ return txgbe_fdir_write_perfect_filter_raptor(hw, input,
+ soft_id, queue, cloud_mode);
+}
+
+/**
+ * txgbe_start_hw_raptor - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware using the generic start_hw function
+ * and the generation start_hw function.
+ * Then performs revision-specific operations, if any.
+ **/
+s32 txgbe_start_hw_raptor(struct txgbe_hw *hw)
+{
+ s32 err = 0;
+
+ DEBUGFUNC("txgbe_start_hw_raptor");
+
+ err = txgbe_start_hw(hw);
+ if (err != 0)
+ goto out;
+
+ err = txgbe_start_hw_gen2(hw);
+ if (err != 0)
+ goto out;
+
+ /* We need to run link autotry after the driver loads */
+ hw->mac.autotry_restart = true;
+
+out:
+ return err;
+}
+
+/**
+ * txgbe_get_supported_physical_layer_raptor - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ **/
+u64 txgbe_get_supported_physical_layer_raptor(struct txgbe_hw *hw)
+{
+ u64 physical_layer = TXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u64 autoc = hw->mac.autoc_read(hw);
+ u64 pma_pmd_10gs = autoc & TXGBE_AUTOC_10Gs_PMA_PMD_MASK;
+ u64 pma_pmd_10gp = autoc & TXGBE_AUTOC_10G_PMA_PMD_MASK;
+ u64 pma_pmd_1g = autoc & TXGBE_AUTOC_1G_PMA_PMD_MASK;
+ u16 ext_ability = 0;
+
+ DEBUGFUNC("txgbe_get_support_physical_layer_raptor");
+
+ hw->phy.identify(hw);
+
+ switch (hw->phy.type) {
+ case txgbe_phy_tn:
+ case txgbe_phy_cu_unknown:
+ hw->phy.read_reg(hw, TXGBE_MD_PHY_EXT_ABILITY,
+ TXGBE_MD_DEV_PMA_PMD, &ext_ability);
+ if (ext_ability & TXGBE_MD_PHY_10GBASET_ABILITY)
+ physical_layer |= TXGBE_PHYSICAL_LAYER_10GBASE_T;
+ if (ext_ability & TXGBE_MD_PHY_1000BASET_ABILITY)
+ physical_layer |= TXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if (ext_ability & TXGBE_MD_PHY_100BASETX_ABILITY)
+ physical_layer |= TXGBE_PHYSICAL_LAYER_100BASE_TX;
+ return physical_layer;
+ default:
+ break;
+ }
+
+ switch (autoc & TXGBE_AUTOC_LMS_MASK) {
+ case TXGBE_AUTOC_LMS_1G_AN:
+ case TXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+ if (pma_pmd_1g == TXGBE_AUTOC_1G_KX_BX) {
+ physical_layer = TXGBE_PHYSICAL_LAYER_1000BASE_KX |
+ TXGBE_PHYSICAL_LAYER_1000BASE_BX;
+ } else
+ /* SFI mode so read SFP module */
+ goto sfp_check;
+ break;
+ case TXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+ if (pma_pmd_10gp == TXGBE_AUTOC_10G_CX4)
+ physical_layer = TXGBE_PHYSICAL_LAYER_10GBASE_CX4;
+ else if (pma_pmd_10gp == TXGBE_AUTOC_10G_KX4)
+ physical_layer = TXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+ else if (pma_pmd_10gp == TXGBE_AUTOC_10G_XAUI)
+ physical_layer = TXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
+ break;
+ case TXGBE_AUTOC_LMS_10Gs:
+ if (pma_pmd_10gs == TXGBE_AUTOC_10Gs_KR) {
+ physical_layer = TXGBE_PHYSICAL_LAYER_10GBASE_KR;
+ } else if (pma_pmd_10gs == TXGBE_AUTOC_10Gs_SFI)
+ goto sfp_check;
+ break;
+ case TXGBE_AUTOC_LMS_KX4_KX_KR:
+ case TXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
+ if (autoc & TXGBE_AUTOC_KX_SUPP)
+ physical_layer |= TXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ if (autoc & TXGBE_AUTOC_KX4_SUPP)
+ physical_layer |= TXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+ if (autoc & TXGBE_AUTOC_KR_SUPP)
+ physical_layer |= TXGBE_PHYSICAL_LAYER_10GBASE_KR;
+ break;
+ default:
+ break;
+ }
+
+ return physical_layer;
+
+sfp_check:
+ /* SFP check must be done last since DA modules are sometimes used to
+ * test KR mode - we need to id KR mode correctly before SFP module.
+ * Call identify_sfp because the pluggable module may have changed */
+ return txgbe_get_supported_phy_sfp_layer(hw);
+}
+
+/**
+ * txgbe_enable_rx_dma_raptor - Enable the Rx DMA unit
+ * @hw: pointer to hardware structure
+ * @regval: register value to write to RXCTRL
+ *
+ * Enables the Rx DMA unit
+ **/
+s32 txgbe_enable_rx_dma_raptor(struct txgbe_hw *hw, u32 regval)
+{
+
+ DEBUGFUNC("txgbe_enable_rx_dma_raptor");
+
+ /*
+ * Workaround silicon errata when enabling the Rx datapath.
+ * If traffic is incoming before we enable the Rx unit, it could hang
+ * the Rx DMA unit. Therefore, make sure the security engine is
+ * completely disabled prior to enabling the Rx unit.
+ */
+
+ hw->mac.disable_sec_rx_path(hw);
+
+ if (regval & TXGBE_PBRXCTL_ENA)
+ txgbe_enable_rx(hw);
+ else
+ txgbe_disable_rx(hw);
+
+ hw->mac.enable_sec_rx_path(hw);
+
+ return 0;
+}
+
+/**
+ * txgbe_verify_lesm_fw_enabled_raptor - Checks LESM FW module state.
+ * @hw: pointer to hardware structure
+ *
+ * Returns true if the LESM FW module is present and enabled. Otherwise
+ * returns false. Smart Speed must be disabled if LESM FW module is enabled.
+ **/
+bool txgbe_verify_lesm_fw_enabled_raptor(struct txgbe_hw *hw)
+{
+ bool lesm_enabled = false;
+ u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
+ s32 status;
+
+ DEBUGFUNC("txgbe_verify_lesm_fw_enabled_raptor");
+
+ /* get the offset to the Firmware Module block */
+ status = hw->rom.read16(hw, TXGBE_FW_PTR, &fw_offset);
+
+ if ((status != 0) ||
+ (fw_offset == 0) || (fw_offset == 0xFFFF))
+ goto out;
+
+ /* get the offset to the LESM Parameters block */
+ status = hw->rom.read16(hw, (fw_offset +
+ TXGBE_FW_LESM_PARAMETERS_PTR),
+ &fw_lesm_param_offset);
+
+ if ((status != 0) ||
+ (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
+ goto out;
+
+ /* get the LESM state word */
+ status = hw->rom.read16(hw, (fw_lesm_param_offset +
+ TXGBE_FW_LESM_STATE_1),
+ &fw_lesm_state);
+
+ if ((status == 0) &&
+ (fw_lesm_state & TXGBE_FW_LESM_STATE_ENABLED))
+ lesm_enabled = true;
+
+out:
+ lesm_enabled = false;
+ return lesm_enabled;
+}
+
+/**
+ * txgbe_reset_pipeline_raptor - perform pipeline reset
+ *
+ * @hw: pointer to hardware structure
+ *
+ * Reset pipeline by asserting Restart_AN together with LMS change to ensure
+ * full pipeline reset. This function assumes the SW/FW lock is held.
+ **/
+s32 txgbe_reset_pipeline_raptor(struct txgbe_hw *hw)
+{
+ s32 err = 0;
+ u64 autoc;
+
+ autoc = hw->mac.autoc_read(hw);
+
+ /* Enable link if disabled in NVM */
+ if (autoc & TXGBE_AUTOC_LINK_DIA_MASK) {
+ autoc &= ~TXGBE_AUTOC_LINK_DIA_MASK;
+ }
+
+ autoc |= TXGBE_AUTOC_AN_RESTART;
+ /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
+ hw->mac.autoc_write(hw, autoc ^ TXGBE_AUTOC_LMS_AN);
+
+ /* Write AUTOC register with original LMS field and Restart_AN */
+ hw->mac.autoc_write(hw, autoc);
+ txgbe_flush(hw);
+
+ return err;
+}
+
new file mode 100644
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef _TXGBE_HW_H_
+#define _TXGBE_HW_H_
+
+#include "txgbe_type.h"
+
+struct txgbe_pba {
+ u16 word[2];
+ u16 *pba_block;
+};
+
+void txgbe_dcb_get_rtrup2tc(struct txgbe_hw *hw, u8 *map);
+
+u16 txgbe_get_pcie_msix_count(struct txgbe_hw *hw);
+s32 txgbe_init_hw(struct txgbe_hw *hw);
+s32 txgbe_start_hw(struct txgbe_hw *hw);
+s32 txgbe_stop_hw(struct txgbe_hw *hw);
+s32 txgbe_start_hw_gen2(struct txgbe_hw *hw);
+s32 txgbe_clear_hw_cntrs(struct txgbe_hw *hw);
+s32 txgbe_read_pba_num(struct txgbe_hw *hw, u32 *pba_num);
+s32 txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
+s32 txgbe_read_pba_raw(struct txgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 max_pba_block_size,
+ struct txgbe_pba *pba);
+s32 txgbe_write_pba_raw(struct txgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, struct txgbe_pba *pba);
+s32 txgbe_get_pba_block_size(struct txgbe_hw *hw, u16 *eeprom_buf,
+ u32 eeprom_buf_size, u16 *pba_block_size);
+s32 txgbe_get_mac_addr(struct txgbe_hw *hw, u8 *mac_addr);
+
+void txgbe_set_lan_id_multi_port(struct txgbe_hw *hw);
+
+s32 txgbe_led_on(struct txgbe_hw *hw, u32 index);
+s32 txgbe_led_off(struct txgbe_hw *hw, u32 index);
+
+s32 txgbe_set_rar(struct txgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr);
+s32 txgbe_clear_rar(struct txgbe_hw *hw, u32 index);
+s32 txgbe_init_rx_addrs(struct txgbe_hw *hw);
+s32 txgbe_update_mc_addr_list(struct txgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count,
+ txgbe_mc_addr_itr func, bool clear);
+s32 txgbe_update_uc_addr_list(struct txgbe_hw *hw, u8 *addr_list,
+ u32 addr_count, txgbe_mc_addr_itr func);
+s32 txgbe_enable_mc(struct txgbe_hw *hw);
+s32 txgbe_disable_mc(struct txgbe_hw *hw);
+s32 txgbe_disable_sec_rx_path(struct txgbe_hw *hw);
+s32 txgbe_enable_sec_rx_path(struct txgbe_hw *hw);
+s32 txgbe_disable_sec_tx_path(struct txgbe_hw *hw);
+s32 txgbe_enable_sec_tx_path(struct txgbe_hw *hw);
+
+s32 txgbe_fc_enable(struct txgbe_hw *hw);
+bool txgbe_device_supports_autoneg_fc(struct txgbe_hw *hw);
+void txgbe_fc_autoneg(struct txgbe_hw *hw);
+s32 txgbe_setup_fc(struct txgbe_hw *hw);
+
+s32 txgbe_validate_mac_addr(u8 *mac_addr);
+s32 txgbe_acquire_swfw_sync(struct txgbe_hw *hw, u32 mask);
+void txgbe_release_swfw_sync(struct txgbe_hw *hw, u32 mask);
+
+s32 txgbe_get_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr);
+s32 txgbe_set_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr);
+
+s32 txgbe_set_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq);
+s32 txgbe_set_vmdq_san_mac(struct txgbe_hw *hw, u32 vmdq);
+s32 txgbe_clear_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq);
+s32 txgbe_insert_mac_addr(struct txgbe_hw *hw, u8 *addr, u32 vmdq);
+s32 txgbe_init_uta_tables(struct txgbe_hw *hw);
+s32 txgbe_set_vfta(struct txgbe_hw *hw, u32 vlan,
+ u32 vind, bool vlan_on, bool vlvf_bypass);
+s32 txgbe_set_vlvf(struct txgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, u32 *vfta_delta, u32 vfta,
+ bool vlvf_bypass);
+s32 txgbe_clear_vfta(struct txgbe_hw *hw);
+s32 txgbe_find_vlvf_slot(struct txgbe_hw *hw, u32 vlan, bool vlvf_bypass);
+
+s32 txgbe_check_mac_link(struct txgbe_hw *hw,
+ u32 *speed,
+ bool *link_up, bool link_up_wait_to_complete);
+
+s32 txgbe_get_wwn_prefix(struct txgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix);
+
+s32 txgbe_get_fcoe_boot_status(struct txgbe_hw *hw, u16 *bs);
+void txgbe_set_mac_anti_spoofing(struct txgbe_hw *hw, bool enable, int vf);
+void txgbe_set_vlan_anti_spoofing(struct txgbe_hw *hw, bool enable, int vf);
+s32 txgbe_get_device_caps(struct txgbe_hw *hw, u16 *device_caps);
+void txgbe_set_pba(struct txgbe_hw *hw, int num_pb, u32 headroom,
+ int strategy);
+void txgbe_enable_relaxed_ordering_gen2(struct txgbe_hw *hw);
+s32 txgbe_shutdown_fw_phy(struct txgbe_hw *);
+void txgbe_clear_tx_pending(struct txgbe_hw *hw);
+
+extern s32 txgbe_reset_pipeline_raptor(struct txgbe_hw *hw);
+extern void txgbe_stop_mac_link_on_d3_raptor(struct txgbe_hw *hw);
+
+s32 txgbe_get_thermal_sensor_data(struct txgbe_hw *hw);
+s32 txgbe_init_thermal_sensor_thresh(struct txgbe_hw *hw);
+
+void txgbe_get_etk_id(struct txgbe_hw *hw, struct txgbe_nvm_version *nvm_ver);
+void txgbe_get_oem_prod_version(struct txgbe_hw *hw,
+ struct txgbe_nvm_version *nvm_ver);
+void txgbe_get_orom_version(struct txgbe_hw *hw,
+ struct txgbe_nvm_version *nvm_ver);
+void txgbe_disable_rx(struct txgbe_hw *hw);
+void txgbe_enable_rx(struct txgbe_hw *hw);
+s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw,
+ u32 speed,
+ bool autoneg_wait_to_complete);
+void txgbe_set_soft_rate_select_speed(struct txgbe_hw *hw,
+ u32 speed);
+void txgbe_add_uc_addr(struct txgbe_hw *hw, u8 *addr_list, u32 vmdq);
+void txgbe_set_mta(struct txgbe_hw *hw, u8 *mc_addr);
+s32 txgbe_negotiate_fc(struct txgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
+s32 txgbe_init_shared_code(struct txgbe_hw *hw);
+s32 txgbe_set_mac_type(struct txgbe_hw *hw);
+s32 txgbe_init_ops_pf(struct txgbe_hw *hw);
+s32 txgbe_get_link_capabilities_raptor(struct txgbe_hw *hw,
+ u32 *speed, bool *autoneg);
+u32 txgbe_get_media_type_raptor(struct txgbe_hw *hw);
+void txgbe_disable_tx_laser_multispeed_fiber(struct txgbe_hw *hw);
+void txgbe_enable_tx_laser_multispeed_fiber(struct txgbe_hw *hw);
+void txgbe_flap_tx_laser_multispeed_fiber(struct txgbe_hw *hw);
+void txgbe_set_hard_rate_select_speed(struct txgbe_hw *hw,
+ u32 speed);
+s32 txgbe_setup_mac_link_smartspeed(struct txgbe_hw *hw,
+ u32 speed,
+ bool autoneg_wait_to_complete);
+s32 txgbe_start_mac_link_raptor(struct txgbe_hw *hw,
+ bool autoneg_wait_to_complete);
+s32 txgbe_setup_mac_link(struct txgbe_hw *hw, u32 speed,
+ bool autoneg_wait_to_complete);
+s32 txgbe_setup_sfp_modules(struct txgbe_hw *hw);
+void txgbe_init_mac_link_ops(struct txgbe_hw *hw);
+s32 txgbe_reset_hw(struct txgbe_hw *hw);
+s32 txgbe_start_hw_raptor(struct txgbe_hw *hw);
+s32 txgbe_init_phy_raptor(struct txgbe_hw *hw);
+u64 txgbe_get_supported_physical_layer_raptor(struct txgbe_hw *hw);
+s32 txgbe_enable_rx_dma_raptor(struct txgbe_hw *hw, u32 regval);
+s32 txgbe_prot_autoc_read_raptor(struct txgbe_hw *hw, bool *locked, u64 *value);
+s32 txgbe_prot_autoc_write_raptor(struct txgbe_hw *hw, bool locked, u64 value);
+s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw);
+s32 txgbe_init_fdir_signature_raptor(struct txgbe_hw *hw, u32 fdirctrl);
+s32 txgbe_init_fdir_perfect(struct txgbe_hw *hw, u32 fdirctrl,
+ bool cloud_mode);
+void txgbe_fdir_add_signature_filter_raptor(struct txgbe_hw *hw,
+ union txgbe_atr_hash_dword input,
+ union txgbe_atr_hash_dword common,
+ u8 queue);
+s32 txgbe_fdir_set_input_mask_raptor(struct txgbe_hw *hw,
+ struct txgbe_atr_input *input_mask, bool cloud_mode);
+s32 txgbe_fdir_write_perfect_filter_raptor(struct txgbe_hw *hw,
+ struct txgbe_atr_input *input,
+ u16 soft_id, u8 queue, bool cloud_mode);
+s32 txgbe_fdir_erase_perfect_filter_raptor(struct txgbe_hw *hw,
+ struct txgbe_atr_input *input,
+ u16 soft_id);
+s32 txgbe_fdir_add_perfect_filter_raptor(struct txgbe_hw *hw,
+ struct txgbe_atr_input *input,
+ struct txgbe_atr_input *mask,
+ u16 soft_id,
+ u8 queue,
+ bool cloud_mode);
+void txgbe_atr_compute_perfect_hash_raptor(struct txgbe_atr_input *input,
+ struct txgbe_atr_input *mask);
+u32 txgbe_atr_compute_sig_hash_raptor(union txgbe_atr_hash_dword input,
+ union txgbe_atr_hash_dword common);
+bool txgbe_verify_lesm_fw_enabled_raptor(struct txgbe_hw *hw);
+void txgbe_set_fdir_drop_queue_raptor(struct txgbe_hw *hw, u8 dropqueue);
+#endif /* _TXGBE_HW_H_ */
new file mode 100644
@@ -0,0 +1,685 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include "txgbe_type.h"
+
+#include "txgbe_mbx.h"
+
+/**
+ * txgbe_read_mbx - Reads a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfully read message from buffer
+ **/
+s32 txgbe_read_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct txgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = TXGBE_ERR_MBX;
+
+ DEBUGFUNC("txgbe_read_mbx");
+
+ /* limit read to size of mailbox */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ if (mbx->read)
+ ret_val = mbx->read(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * txgbe_write_mbx - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+s32 txgbe_write_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct txgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = 0;
+
+ DEBUGFUNC("txgbe_write_mbx");
+
+ if (size > mbx->size) {
+ ret_val = TXGBE_ERR_MBX;
+ DEBUGOUT("Invalid mailbox message size %d", size);
+ } else if (mbx->write)
+ ret_val = mbx->write(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * txgbe_check_for_msg - checks to see if someone sent us mail
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 txgbe_check_for_msg(struct txgbe_hw *hw, u16 mbx_id)
+{
+ struct txgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = TXGBE_ERR_MBX;
+
+ DEBUGFUNC("txgbe_check_for_msg");
+
+ if (mbx->check_for_msg)
+ ret_val = mbx->check_for_msg(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * txgbe_check_for_ack - checks to see if someone sent us ACK
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 txgbe_check_for_ack(struct txgbe_hw *hw, u16 mbx_id)
+{
+ struct txgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = TXGBE_ERR_MBX;
+
+ DEBUGFUNC("txgbe_check_for_ack");
+
+ if (mbx->check_for_ack)
+ ret_val = mbx->check_for_ack(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * txgbe_check_for_rst - checks to see if other side has reset
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 txgbe_check_for_rst(struct txgbe_hw *hw, u16 mbx_id)
+{
+ struct txgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = TXGBE_ERR_MBX;
+
+ DEBUGFUNC("txgbe_check_for_rst");
+
+ if (mbx->check_for_rst)
+ ret_val = mbx->check_for_rst(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * txgbe_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification
+ **/
+STATIC s32 txgbe_poll_for_msg(struct txgbe_hw *hw, u16 mbx_id)
+{
+ struct txgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ DEBUGFUNC("txgbe_poll_for_msg");
+
+ if (!countdown || !mbx->check_for_msg)
+ goto out;
+
+ while (countdown && mbx->check_for_msg(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ usec_delay(mbx->usec_delay);
+ }
+
+ if (countdown == 0)
+ DEBUGOUT("Polling for VF%d mailbox message timedout", mbx_id);
+
+out:
+ return countdown ? 0 : TXGBE_ERR_MBX;
+}
+
+/**
+ * txgbe_poll_for_ack - Wait for message acknowledgement
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message acknowledgement
+ **/
+STATIC s32 txgbe_poll_for_ack(struct txgbe_hw *hw, u16 mbx_id)
+{
+ struct txgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ DEBUGFUNC("txgbe_poll_for_ack");
+
+ if (!countdown || !mbx->check_for_ack)
+ goto out;
+
+ while (countdown && mbx->check_for_ack(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ usec_delay(mbx->usec_delay);
+ }
+
+ if (countdown == 0)
+ DEBUGOUT("Polling for VF%d mailbox ack timedout", mbx_id);
+
+out:
+ return countdown ? 0 : TXGBE_ERR_MBX;
+}
+
+/**
+ * txgbe_read_posted_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+s32 txgbe_read_posted_mbx(struct txgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct txgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = TXGBE_ERR_MBX;
+
+ DEBUGFUNC("txgbe_read_posted_mbx");
+
+ if (!mbx->read)
+ goto out;
+
+ ret_val = txgbe_poll_for_msg(hw, mbx_id);
+
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->read(hw, msg, size, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * txgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+s32 txgbe_write_posted_mbx(struct txgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
+{
+ struct txgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = TXGBE_ERR_MBX;
+
+ DEBUGFUNC("txgbe_write_posted_mbx");
+
+ /* exit if either we can't write or there isn't a defined timeout */
+ if (!mbx->write || !mbx->timeout)
+ goto out;
+
+ /* send msg */
+ ret_val = mbx->write(hw, msg, size, mbx_id);
+
+ /* if msg sent wait until we receive an ack */
+ if (!ret_val)
+ ret_val = txgbe_poll_for_ack(hw, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * txgbe_read_v2p_mailbox - read v2p mailbox
+ * @hw: pointer to the HW structure
+ *
+ * This function is used to read the v2p mailbox without losing the read to
+ * clear status bits.
+ **/
+STATIC u32 txgbe_read_v2p_mailbox(struct txgbe_hw *hw)
+{
+ u32 v2p_mailbox = rd32(hw, TXGBE_VFMBCTL);
+
+ v2p_mailbox |= hw->mbx.v2p_mailbox;
+ hw->mbx.v2p_mailbox |= v2p_mailbox & TXGBE_VFMBCTL_R2C_BITS;
+
+ return v2p_mailbox;
+}
+
+/**
+ * txgbe_check_for_bit_vf - Determine if a status bit was set
+ * @hw: pointer to the HW structure
+ * @mask: bitmask for bits to be tested and cleared
+ *
+ * This function is used to check for the read to clear bits within
+ * the V2P mailbox.
+ **/
+STATIC s32 txgbe_check_for_bit_vf(struct txgbe_hw *hw, u32 mask)
+{
+ u32 v2p_mailbox = txgbe_read_v2p_mailbox(hw);
+ s32 ret_val = TXGBE_ERR_MBX;
+
+ if (v2p_mailbox & mask)
+ ret_val = 0;
+
+ hw->mbx.v2p_mailbox &= ~mask;
+
+ return ret_val;
+}
+
+/**
+ * txgbe_check_for_msg_vf - checks to see if the PF has sent mail
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the PF has set the Status bit or else ERR_MBX
+ **/
+s32 txgbe_check_for_msg_vf(struct txgbe_hw *hw, u16 mbx_id)
+{
+ s32 ret_val = TXGBE_ERR_MBX;
+
+ UNREFERENCED_PARAMETER(mbx_id);
+ DEBUGFUNC("txgbe_check_for_msg_vf");
+
+ if (!txgbe_check_for_bit_vf(hw, TXGBE_VFMBCTL_PFSTS)) {
+ ret_val = 0;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * txgbe_check_for_ack_vf - checks to see if the PF has ACK'd
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX
+ **/
+s32 txgbe_check_for_ack_vf(struct txgbe_hw *hw, u16 mbx_id)
+{
+ s32 ret_val = TXGBE_ERR_MBX;
+
+ UNREFERENCED_PARAMETER(mbx_id);
+ DEBUGFUNC("txgbe_check_for_ack_vf");
+
+ if (!txgbe_check_for_bit_vf(hw, TXGBE_VFMBCTL_PFACK)) {
+ ret_val = 0;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * txgbe_check_for_rst_vf - checks to see if the PF has reset
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns true if the PF has set the reset done bit or else false
+ **/
+s32 txgbe_check_for_rst_vf(struct txgbe_hw *hw, u16 mbx_id)
+{
+ s32 ret_val = TXGBE_ERR_MBX;
+
+ UNREFERENCED_PARAMETER(mbx_id);
+ DEBUGFUNC("txgbe_check_for_rst_vf");
+
+ if (!txgbe_check_for_bit_vf(hw, (TXGBE_VFMBCTL_RSTD |
+ TXGBE_VFMBCTL_RSTI))) {
+ ret_val = 0;
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * txgbe_obtain_mbx_lock_vf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ *
+ * return SUCCESS if we obtained the mailbox lock
+ **/
+STATIC s32 txgbe_obtain_mbx_lock_vf(struct txgbe_hw *hw)
+{
+ s32 ret_val = TXGBE_ERR_MBX;
+
+ DEBUGFUNC("txgbe_obtain_mbx_lock_vf");
+
+ /* Take ownership of the buffer */
+ wr32(hw, TXGBE_VFMBCTL, TXGBE_VFMBCTL_VFU);
+
+ /* reserve mailbox for vf use */
+ if (txgbe_read_v2p_mailbox(hw) & TXGBE_VFMBCTL_VFU)
+ ret_val = 0;
+
+ return ret_val;
+}
+
+/**
+ * txgbe_write_mbx_vf - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+s32 txgbe_write_mbx_vf(struct txgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
+{
+ s32 ret_val;
+ u16 i;
+
+ UNREFERENCED_PARAMETER(mbx_id);
+
+ DEBUGFUNC("txgbe_write_mbx_vf");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = txgbe_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ txgbe_check_for_msg_vf(hw, 0);
+ txgbe_check_for_ack_vf(hw, 0);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ wr32a(hw, TXGBE_VFMBX, i, msg[i]);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+ /* Drop VFU and interrupt the PF to tell it a message has been sent */
+ wr32(hw, TXGBE_VFMBCTL, TXGBE_VFMBCTL_REQ);
+
+out_no_write:
+ return ret_val;
+}
+
+/**
+ * txgbe_read_mbx_vf - Reads a message from the inbox intended for vf
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfully read message from buffer
+ **/
+s32 txgbe_read_mbx_vf(struct txgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
+{
+ s32 ret_val = 0;
+ u16 i;
+
+ DEBUGFUNC("txgbe_read_mbx_vf");
+ UNREFERENCED_PARAMETER(mbx_id);
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = txgbe_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message from the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = rd32a(hw, TXGBE_VFMBX, i);
+
+ /* Acknowledge receipt and release mailbox, then we're done */
+ wr32(hw, TXGBE_VFMBCTL, TXGBE_VFMBCTL_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * txgbe_init_mbx_params_vf - set initial values for vf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+void txgbe_init_mbx_params_vf(struct txgbe_hw *hw)
+{
+ struct txgbe_mbx_info *mbx = &hw->mbx;
+
+ /* start mailbox as timed out and let the reset_hw call set the timeout
+ * value to begin communications */
+ mbx->timeout = 0;
+ mbx->usec_delay = TXGBE_VF_MBX_INIT_DELAY;
+
+ mbx->size = TXGBE_P2VMBX_SIZE;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+}
+
+STATIC s32 txgbe_check_for_bit_pf(struct txgbe_hw *hw, u32 mask, s32 index)
+{
+ u32 mbvficr = rd32(hw, TXGBE_MBVFICR(index));
+ s32 ret_val = TXGBE_ERR_MBX;
+
+ if (mbvficr & mask) {
+ ret_val = 0;
+ wr32(hw, TXGBE_MBVFICR(index), mask);
+ }
+
+ return ret_val;
+}
+
+/**
+ * txgbe_check_for_msg_pf - checks to see if the VF has sent mail
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+s32 txgbe_check_for_msg_pf(struct txgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = TXGBE_ERR_MBX;
+ s32 index = TXGBE_MBVFICR_INDEX(vf_number);
+ u32 vf_bit = vf_number % 16;
+
+ DEBUGFUNC("txgbe_check_for_msg_pf");
+
+ if (!txgbe_check_for_bit_pf(hw, TXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
+ index)) {
+ ret_val = 0;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * txgbe_check_for_ack_pf - checks to see if the VF has ACKed
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+s32 txgbe_check_for_ack_pf(struct txgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = TXGBE_ERR_MBX;
+ s32 index = TXGBE_MBVFICR_INDEX(vf_number);
+ u32 vf_bit = vf_number % 16;
+
+ DEBUGFUNC("txgbe_check_for_ack_pf");
+
+ if (!txgbe_check_for_bit_pf(hw, TXGBE_MBVFICR_VFACK_VF1 << vf_bit,
+ index)) {
+ ret_val = 0;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * txgbe_check_for_rst_pf - checks to see if the VF has reset
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+s32 txgbe_check_for_rst_pf(struct txgbe_hw *hw, u16 vf_number)
+{
+ u32 reg_offset = (vf_number < 32) ? 0 : 1;
+ u32 vf_shift = vf_number % 32;
+ u32 vflre = 0;
+ s32 ret_val = TXGBE_ERR_MBX;
+
+ DEBUGFUNC("txgbe_check_for_rst_pf");
+
+ vflre = rd32(hw, TXGBE_FLRVFE(reg_offset));
+ if (vflre & (1 << vf_shift)) {
+ ret_val = 0;
+ wr32(hw, TXGBE_FLRVFEC(reg_offset), (1 << vf_shift));
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * txgbe_obtain_mbx_lock_pf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * return SUCCESS if we obtained the mailbox lock
+ **/
+STATIC s32 txgbe_obtain_mbx_lock_pf(struct txgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = TXGBE_ERR_MBX;
+ u32 p2v_mailbox;
+
+ DEBUGFUNC("txgbe_obtain_mbx_lock_pf");
+
+ /* Take ownership of the buffer */
+ wr32(hw, TXGBE_MBCTL(vf_number), TXGBE_MBCTL_PFU);
+
+ /* reserve mailbox for vf use */
+ p2v_mailbox = rd32(hw, TXGBE_MBCTL(vf_number));
+ if (p2v_mailbox & TXGBE_MBCTL_PFU)
+ ret_val = 0;
+ else
+ DEBUGOUT("Failed to obtain mailbox lock for VF%d", vf_number);
+
+
+ return ret_val;
+}
+
+/**
+ * txgbe_write_mbx_pf - Places a message in the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+s32 txgbe_write_mbx_pf(struct txgbe_hw *hw, u32 *msg, u16 size, u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("txgbe_write_mbx_pf");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = txgbe_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ txgbe_check_for_msg_pf(hw, vf_number);
+ txgbe_check_for_ack_pf(hw, vf_number);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ wr32a(hw, TXGBE_MBMEM(vf_number), i, msg[i]);
+
+ /* Interrupt VF to tell it a message has been sent and release buffer*/
+ wr32(hw, TXGBE_MBCTL(vf_number), TXGBE_MBCTL_STS);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+out_no_write:
+ return ret_val;
+
+}
+
+/**
+ * txgbe_read_mbx_pf - Read a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * This function copies a message from the mailbox buffer to the caller's
+ * memory buffer. The presumption is that the caller knows that there was
+ * a message due to a VF request so no polling for message is needed.
+ **/
+s32 txgbe_read_mbx_pf(struct txgbe_hw *hw, u32 *msg, u16 size, u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("txgbe_read_mbx_pf");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = txgbe_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = rd32a(hw, TXGBE_MBMEM(vf_number), i);
+
+ /* Acknowledge the message and release buffer */
+ wr32(hw, TXGBE_MBCTL(vf_number), TXGBE_MBCTL_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * txgbe_init_mbx_params_pf - set initial values for pf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+void txgbe_init_mbx_params_pf(struct txgbe_hw *hw)
+{
+ struct txgbe_mbx_info *mbx = &hw->mbx;
+
+ mbx->timeout = 0;
+ mbx->usec_delay = 0;
+
+ mbx->size = TXGBE_P2VMBX_SIZE;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+}
new file mode 100644
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef _TXGBE_MBX_H_
+#define _TXGBE_MBX_H_
+
+#include "txgbe_type.h"
+
+#define TXGBE_ERR_MBX -100
+
+
+/* If it's a TXGBE_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is true if it is TXGBE_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define TXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
+ * this are the ACK */
+#define TXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
+ * this are the NACK */
+#define TXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ * clear to send requests */
+#define TXGBE_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for extra info for certain messages */
+#define TXGBE_VT_MSGINFO_MASK (0xFF << TXGBE_VT_MSGINFO_SHIFT)
+
+/* definitions to support mailbox API version negotiation */
+
+/*
+ * each element denotes a version of the API; existing numbers may not
+ * change; any additions must go at the end
+ */
+enum txgbe_pfvf_api_rev {
+ txgbe_mbox_api_null,
+ txgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
+ txgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
+ txgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */
+ txgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
+ txgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
+ /* This value should always be last */
+ txgbe_mbox_api_unknown, /* indicates that API version is not known */
+};
+
+/* mailbox API, legacy requests */
+#define TXGBE_VF_RESET 0x01 /* VF requests reset */
+#define TXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define TXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define TXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+
+/* mailbox API, version 1.0 VF requests */
+#define TXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+#define TXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
+#define TXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
+
+/* mailbox API, version 1.1 VF requests */
+#define TXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
+
+/* mailbox API, version 1.2 VF requests */
+#define TXGBE_VF_GET_RETA 0x0a /* VF request for RETA */
+#define TXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */
+#define TXGBE_VF_UPDATE_XCAST_MODE 0x0c
+
+#define TXGBE_VF_BACKUP 0x8001 /* VF requests backup */
+
+/* mode choices for TXGBE_VF_UPDATE_XCAST_MODE */
+enum txgbevf_xcast_modes {
+ TXGBEVF_XCAST_MODE_NONE = 0,
+ TXGBEVF_XCAST_MODE_MULTI,
+ TXGBEVF_XCAST_MODE_ALLMULTI,
+ TXGBEVF_XCAST_MODE_PROMISC,
+};
+
+/* GET_QUEUES return data indices within the mailbox */
+#define TXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
+#define TXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
+#define TXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */
+#define TXGBE_VF_DEF_QUEUE 4 /* Default queue offset */
+
+/* length of permanent address message returned from PF */
+#define TXGBE_VF_PERMADDR_MSG_LEN 4
+/* word in permanent address message with the current multicast type */
+#define TXGBE_VF_MC_TYPE_WORD 3
+
+#define TXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+/* mailbox API, version 2.0 VF requests */
+#define TXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
+#define TXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
+#define TXGBE_VF_ENABLE_MACADDR 0x0A /* enable MAC address */
+#define TXGBE_VF_DISABLE_MACADDR 0x0B /* disable MAC address */
+#define TXGBE_VF_GET_MACADDRS 0x0C /* get all configured MAC addrs */
+#define TXGBE_VF_SET_MCAST_PROMISC 0x0D /* enable multicast promiscuous */
+#define TXGBE_VF_GET_MTU 0x0E /* get bounds on MTU */
+#define TXGBE_VF_SET_MTU 0x0F /* set a specific MTU */
+
+/* mailbox API, version 2.0 PF requests */
+#define TXGBE_PF_TRANSPARENT_VLAN 0x0101 /* enable transparent vlan */
+
+#define TXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define TXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+s32 txgbe_read_mbx(struct txgbe_hw *, u32 *, u16, u16);
+s32 txgbe_write_mbx(struct txgbe_hw *, u32 *, u16, u16);
+s32 txgbe_read_posted_mbx(struct txgbe_hw *, u32 *, u16, u16);
+s32 txgbe_write_posted_mbx(struct txgbe_hw *, u32 *, u16, u16);
+s32 txgbe_check_for_msg(struct txgbe_hw *, u16);
+s32 txgbe_check_for_ack(struct txgbe_hw *, u16);
+s32 txgbe_check_for_rst(struct txgbe_hw *, u16);
+void txgbe_init_mbx_params_vf(struct txgbe_hw *);
+void txgbe_init_mbx_params_pf(struct txgbe_hw *);
+
+s32 txgbe_read_mbx_pf(struct txgbe_hw *, u32 *, u16, u16);
+s32 txgbe_write_mbx_pf(struct txgbe_hw *, u32 *, u16, u16);
+s32 txgbe_check_for_msg_pf(struct txgbe_hw *, u16);
+s32 txgbe_check_for_ack_pf(struct txgbe_hw *, u16);
+s32 txgbe_check_for_rst_pf(struct txgbe_hw *, u16);
+
+s32 txgbe_read_mbx_vf(struct txgbe_hw *, u32 *, u16, u16);
+s32 txgbe_write_mbx_vf(struct txgbe_hw *, u32 *, u16, u16);
+s32 txgbe_check_for_msg_vf(struct txgbe_hw *, u16);
+s32 txgbe_check_for_ack_vf(struct txgbe_hw *, u16);
+s32 txgbe_check_for_rst_vf(struct txgbe_hw *, u16);
+
+#endif /* _TXGBE_MBX_H_ */
new file mode 100644
@@ -0,0 +1,400 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include "txgbe_type.h"
+#include "txgbe_phy.h"
+#include "txgbe_mng.h"
+
+/**
+ * txgbe_calculate_checksum - Calculate checksum for buffer
+ * @buffer: pointer to EEPROM
+ * @length: size of EEPROM to calculate a checksum for
+ * Calculates the checksum for some buffer on a specified length. The
+ * checksum calculated is returned.
+ **/
+static u8
+txgbe_calculate_checksum(u8 *buffer, u32 length)
+{
+ u32 i;
+ u8 sum = 0;
+
+ for (i = 0; i < length; i++)
+ sum += buffer[i];
+
+ return (u8) (0 - sum);
+}
+
+/**
+ * txgbe_hic_unlocked - Issue command to manageability block unlocked
+ * @hw: pointer to the HW structure
+ * @buffer: command to write and where the return status will be placed
+ * @length: length of buffer, must be multiple of 4 bytes
+ * @timeout: time in ms to wait for command completion
+ *
+ * Communicates with the manageability block. On success return 0
+ * else returns semaphore error when encountering an error acquiring
+ * semaphore or TXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ *
+ * This function assumes that the TXGBE_MNGSEM_SWMBX semaphore is held
+ * by the caller.
+ **/
+static s32
+txgbe_hic_unlocked(struct txgbe_hw *hw, u32 *buffer, u32 length, u32 timeout)
+{
+ u32 value, loop;
+ u16 i, dword_len;
+
+ DEBUGFUNC("txgbe_hic_unlocked");
+
+ if (!length || length > TXGBE_PMMBX_BSIZE) {
+ DEBUGOUT("Buffer length failure buffersize=%d.\n", length);
+ return TXGBE_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Calculate length in DWORDs. We must be DWORD aligned */
+ if (length % sizeof(u32)) {
+ DEBUGOUT("Buffer length failure, not aligned to dword");
+ return TXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ dword_len = length >> 2;
+
+ /* The device driver writes the relevant command block
+ * into the ram area.
+ */
+ for (i = 0; i < dword_len; i++) {
+ wr32a(hw, TXGBE_MNGMBX, i, cpu_to_le32(buffer[i]));
+ buffer[i] = rd32a(hw, TXGBE_MNGMBX, i);
+ }
+ txgbe_flush(hw);
+
+ /* Setting this bit tells the ARC that a new command is pending. */
+ wr32m(hw, TXGBE_MNGMBXCTL,
+ TXGBE_MNGMBXCTL_SWRDY, TXGBE_MNGMBXCTL_SWRDY);
+
+ /* Check command completion */
+ loop = po32m(hw, TXGBE_MNGMBXCTL,
+ TXGBE_MNGMBXCTL_FWRDY, TXGBE_MNGMBXCTL_FWRDY,
+ &value, timeout, 1000);
+ if (!loop || !(value & TXGBE_MNGMBXCTL_FWACK)) {
+ DEBUGOUT("Command has failed with no status valid.\n");
+ return TXGBE_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ return 0;
+}
+
+/**
+ * txgbe_host_interface_command - Issue command to manageability block
+ * @hw: pointer to the HW structure
+ * @buffer: contains the command to write and where the return status will
+ * be placed
+ * @length: length of buffer, must be multiple of 4 bytes
+ * @timeout: time in ms to wait for command completion
+ * @return_data: read and return data from the buffer (true) or not (false)
+ * Needed because FW structures are big endian and decoding of
+ * these fields can be 8 bit or 16 bit based on command. Decoding
+ * is not easily understood without making a table of commands.
+ * So we will leave this up to the caller to read back the data
+ * in these cases.
+ *
+ * Communicates with the manageability block. On success return 0
+ * else returns semaphore error when encountering an error acquiring
+ * semaphore or TXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ **/
+static s32
+txgbe_host_interface_command(struct txgbe_hw *hw, u32 *buffer,
+ u32 length, u32 timeout, bool return_data)
+{
+ u32 hdr_size = sizeof(struct txgbe_hic_hdr);
+ struct txgbe_hic_hdr *resp = (struct txgbe_hic_hdr *)buffer;
+ u16 buf_len;
+ s32 err;
+ u32 bi;
+ u32 dword_len;
+
+ DEBUGFUNC("txgbe_host_interface_command");
+
+ if (length == 0 || length > TXGBE_PMMBX_BSIZE) {
+ DEBUGOUT("Buffer length failure buffersize=%d.\n", length);
+ return TXGBE_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Take management host interface semaphore */
+ err = hw->mac.acquire_swfw_sync(hw, TXGBE_MNGSEM_SWMBX);
+ if (err)
+ return err;
+
+ err = txgbe_hic_unlocked(hw, buffer, length, timeout);
+ if (err)
+ goto rel_out;
+
+ if (!return_data)
+ goto rel_out;
+
+ /* Calculate length in DWORDs */
+ dword_len = hdr_size >> 2;
+
+ /* first pull in the header so we know the buffer length */
+ for (bi = 0; bi < dword_len; bi++) {
+ buffer[bi] = rd32a(hw, TXGBE_MNGMBX, bi);
+ }
+
+ /*
+ * If there is any thing in data position pull it in
+ * Read Flash command requires reading buffer length from
+ * two byes instead of one byte
+ */
+ if (resp->cmd == 0x30) {
+ for (; bi < dword_len + 2; bi++) {
+ buffer[bi] = rd32a(hw, TXGBE_MNGMBX, bi);
+ }
+ buf_len = (((u16)(resp->cmd_or_resp.ret_status) << 3)
+ & 0xF00) | resp->buf_len;
+ hdr_size += (2 << 2);
+ } else {
+ buf_len = resp->buf_len;
+ }
+ if (!buf_len)
+ goto rel_out;
+
+ if (length < buf_len + hdr_size) {
+ DEBUGOUT("Buffer not large enough for reply message.\n");
+ err = TXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto rel_out;
+ }
+
+ /* Calculate length in DWORDs, add 3 for odd lengths */
+ dword_len = (buf_len + 3) >> 2;
+
+ /* Pull in the rest of the buffer (bi is where we left off) */
+ for (; bi <= dword_len; bi++) {
+ buffer[bi] = rd32a(hw, TXGBE_MNGMBX, bi);
+ }
+
+rel_out:
+ hw->mac.release_swfw_sync(hw, TXGBE_MNGSEM_SWMBX);
+
+ return err;
+}
+
+/**
+ * txgbe_hic_sr_read - Read EEPROM word using a host interface cmd
+ * assuming that the semaphore is already obtained.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the hostif.
+ **/
+s32 txgbe_hic_sr_read(struct txgbe_hw *hw, u32 addr, u8 *buf, int len)
+{
+ struct txgbe_hic_read_shadow_ram command;
+ u32 value;
+ int err, i = 0, j = 0;
+
+ if (len > TXGBE_PMMBX_DATA_SIZE)
+ return TXGBE_ERR_HOST_INTERFACE_COMMAND;
+
+ memset(&command, 0, sizeof(command));
+ command.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
+ command.hdr.req.buf_lenh = 0;
+ command.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
+ command.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+ command.address = cpu_to_be32(addr);
+ command.length = cpu_to_be16(len);
+
+ err = txgbe_hic_unlocked(hw, (u32 *)&command,
+ sizeof(command), TXGBE_HI_COMMAND_TIMEOUT);
+ if (err)
+ return err;
+
+ while (i < (len >> 2)) {
+ value = rd32a(hw, TXGBE_MNGMBX, FW_NVM_DATA_OFFSET + i);
+ ((u32 *)buf)[i] = value;
+ i++;
+ }
+
+ value = rd32a(hw, TXGBE_MNGMBX, FW_NVM_DATA_OFFSET + i);
+ for (i <<= 2; i < len; i++) {
+ ((u8 *)buf)[i] = ((u8 *)&value)[j++];
+ }
+
+ return 0;
+}
+
+/**
+ * txgbe_hic_sr_write - Write EEPROM word using hostif
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the hostif.
+ **/
+s32 txgbe_hic_sr_write(struct txgbe_hw *hw, u32 addr, u8 *buf, int len)
+{
+ struct txgbe_hic_write_shadow_ram command;
+ u32 value;
+ int err = 0, i = 0, j = 0;
+
+ if (len > TXGBE_PMMBX_DATA_SIZE)
+ return TXGBE_ERR_HOST_INTERFACE_COMMAND;
+
+ memset(&command, 0, sizeof(command));
+ command.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
+ command.hdr.req.buf_lenh = 0;
+ command.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
+ command.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+ command.address = cpu_to_be32(addr);
+ command.length = cpu_to_be16(len);
+
+ while (i < (len >> 2)) {
+ value = ((u32 *)buf)[i];
+ wr32a(hw, TXGBE_MNGMBX, FW_NVM_DATA_OFFSET + i, value);
+ i++;
+ }
+
+ for (i <<= 2; i < len; i++) {
+ ((u8 *)&value)[j++] = ((u8 *)buf)[i];
+ }
+ wr32a(hw, TXGBE_MNGMBX, FW_NVM_DATA_OFFSET + (i >> 2), value);
+
+ UNREFERENCED_PARAMETER(&command);
+
+ return err;
+}
+
+/**
+ * txgbe_hic_set_drv_ver - Sends driver version to firmware
+ * @hw: pointer to the HW structure
+ * @maj: driver version major number
+ * @min: driver version minor number
+ * @build: driver version build number
+ * @sub: driver version sub build number
+ * @len: unused
+ * @driver_ver: unused
+ *
+ * Sends driver version number to firmware through the manageability
+ * block. On success return 0
+ * else returns TXGBE_ERR_SWFW_SYNC when encountering an error acquiring
+ * semaphore or TXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ **/
+s32 txgbe_hic_set_drv_ver(struct txgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub, u16 len,
+ const char *driver_ver)
+{
+ struct txgbe_hic_drv_info fw_cmd;
+ int i;
+ s32 ret_val = 0;
+
+ DEBUGFUNC("txgbe_hic_set_drv_ver");
+ UNREFERENCED_PARAMETER(len, driver_ver);
+
+ fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
+ fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
+ fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+ fw_cmd.port_num = (u8)hw->bus.func;
+ fw_cmd.ver_maj = maj;
+ fw_cmd.ver_min = min;
+ fw_cmd.ver_build = build;
+ fw_cmd.ver_sub = sub;
+ fw_cmd.hdr.checksum = 0;
+ fw_cmd.pad = 0;
+ fw_cmd.pad2 = 0;
+ fw_cmd.hdr.checksum = txgbe_calculate_checksum((u8 *)&fw_cmd,
+ (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
+
+ for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
+ ret_val = txgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+ sizeof(fw_cmd),
+ TXGBE_HI_COMMAND_TIMEOUT,
+ true);
+ if (ret_val != 0)
+ continue;
+
+ if (fw_cmd.hdr.cmd_or_resp.ret_status ==
+ FW_CEM_RESP_STATUS_SUCCESS)
+ ret_val = 0;
+ else
+ ret_val = TXGBE_ERR_HOST_INTERFACE_COMMAND;
+
+ break;
+ }
+
+ return ret_val;
+}
+
+/**
+ * txgbe_hic_reset - send reset cmd to fw
+ * @hw: pointer to hardware structure
+ *
+ * Sends reset cmd to firmware through the manageability
+ * block. On success return 0
+ * else returns TXGBE_ERR_SWFW_SYNC when encountering an error acquiring
+ * semaphore or TXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ **/
+s32
+txgbe_hic_reset(struct txgbe_hw *hw)
+{
+ struct txgbe_hic_reset reset_cmd;
+ int i;
+ s32 err = 0;
+
+ DEBUGFUNC("\n");
+
+ reset_cmd.hdr.cmd = FW_RESET_CMD;
+ reset_cmd.hdr.buf_len = FW_RESET_LEN;
+ reset_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+ reset_cmd.lan_id = hw->bus.lan_id;
+ reset_cmd.reset_type = (u16)hw->reset_type;
+ reset_cmd.hdr.checksum = 0;
+ reset_cmd.hdr.checksum = txgbe_calculate_checksum((u8 *)&reset_cmd,
+ (FW_CEM_HDR_LEN + reset_cmd.hdr.buf_len));
+
+ for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
+ err = txgbe_host_interface_command(hw, (u32 *)&reset_cmd,
+ sizeof(reset_cmd),
+ TXGBE_HI_COMMAND_TIMEOUT,
+ true);
+ if (err != 0)
+ continue;
+
+ if (reset_cmd.hdr.cmd_or_resp.ret_status ==
+ FW_CEM_RESP_STATUS_SUCCESS)
+ err = 0;
+ else
+ err = TXGBE_ERR_HOST_INTERFACE_COMMAND;
+
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * txgbe_mng_present - returns true when management capability is present
+ * @hw: pointer to hardware structure
+ */
+bool
+txgbe_mng_present(struct txgbe_hw *hw)
+{
+ if (hw->mac.type == txgbe_mac_unknown)
+ return false;
+
+ return !!rd32m(hw, TXGBE_STAT, TXGBE_STAT_MNGINIT);
+}
+
+/**
+ * txgbe_mng_enabled - Is the manageability engine enabled?
+ * @hw: pointer to hardware structure
+ *
+ * Returns true if the manageability engine is enabled.
+ **/
+bool
+txgbe_mng_enabled(struct txgbe_hw *hw)
+{
+ UNREFERENCED_PARAMETER(hw);
+ /* firmware doesn't control laser */
+ return false;
+}
new file mode 100644
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef _TXGBE_MNG_H_
+#define _TXGBE_MNG_H_
+
+#include "txgbe_type.h"
+
+
+#define TXGBE_PMMBX_QSIZE 64 /* Num of dwords in range */
+#define TXGBE_PMMBX_BSIZE (TXGBE_PMMBX_QSIZE * 4)
+#define TXGBE_PMMBX_DATA_SIZE (TXGBE_PMMBX_BSIZE - FW_NVM_DATA_OFFSET * 4)
+#define TXGBE_HI_COMMAND_TIMEOUT 5000 /* Process HI command limit */
+#define TXGBE_HI_FLASH_ERASE_TIMEOUT 5000 /* Process Erase command limit */
+#define TXGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */
+#define TXGBE_HI_FLASH_VERIFY_TIMEOUT 60000 /* Process Apply command limit */
+#define TXGBE_HI_PHY_MGMT_REQ_TIMEOUT 2000 /* Wait up to 2 seconds */
+
+/* CEM Support */
+#define FW_CEM_HDR_LEN 0x4
+#define FW_CEM_CMD_DRIVER_INFO 0xDD
+#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5
+#define FW_CEM_CMD_RESERVED 0X0
+#define FW_CEM_UNUSED_VER 0x0
+#define FW_CEM_MAX_RETRIES 3
+#define FW_CEM_RESP_STATUS_SUCCESS 0x1
+#define FW_READ_SHADOW_RAM_CMD 0x31
+#define FW_READ_SHADOW_RAM_LEN 0x6
+#define FW_WRITE_SHADOW_RAM_CMD 0x33
+#define FW_WRITE_SHADOW_RAM_LEN 0xA /* 8 plus 1 WORD to write */
+#define FW_SHADOW_RAM_DUMP_CMD 0x36
+#define FW_SHADOW_RAM_DUMP_LEN 0
+#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */
+#define FW_NVM_DATA_OFFSET 3
+#define FW_MAX_READ_BUFFER_SIZE 244
+#define FW_DISABLE_RXEN_CMD 0xDE
+#define FW_DISABLE_RXEN_LEN 0x1
+#define FW_PHY_MGMT_REQ_CMD 0x20
+#define FW_RESET_CMD 0xDF
+#define FW_RESET_LEN 0x2
+#define FW_SETUP_MAC_LINK_CMD 0xE0
+#define FW_SETUP_MAC_LINK_LEN 0x2
+#define FW_FLASH_UPGRADE_START_CMD 0xE3
+#define FW_FLASH_UPGRADE_START_LEN 0x1
+#define FW_FLASH_UPGRADE_WRITE_CMD 0xE4
+#define FW_FLASH_UPGRADE_VERIFY_CMD 0xE5
+#define FW_FLASH_UPGRADE_VERIFY_LEN 0x4
+#define FW_PHY_ACT_DATA_COUNT 4
+#define FW_PHY_TOKEN_DELAY 5 /* milliseconds */
+#define FW_PHY_TOKEN_WAIT 5 /* seconds */
+#define FW_PHY_TOKEN_RETRIES ((FW_PHY_TOKEN_WAIT * 1000) / FW_PHY_TOKEN_DELAY)
+
+/* Host Interface Command Structures */
+struct txgbe_hic_hdr {
+ u8 cmd;
+ u8 buf_len;
+ union {
+ u8 cmd_resv;
+ u8 ret_status;
+ } cmd_or_resp;
+ u8 checksum;
+};
+
+struct txgbe_hic_hdr2_req {
+ u8 cmd;
+ u8 buf_lenh;
+ u8 buf_lenl;
+ u8 checksum;
+};
+
+struct txgbe_hic_hdr2_rsp {
+ u8 cmd;
+ u8 buf_lenl;
+ u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */
+ u8 checksum;
+};
+
+union txgbe_hic_hdr2 {
+ struct txgbe_hic_hdr2_req req;
+ struct txgbe_hic_hdr2_rsp rsp;
+};
+
+struct txgbe_hic_drv_info {
+ struct txgbe_hic_hdr hdr;
+ u8 port_num;
+ u8 ver_sub;
+ u8 ver_build;
+ u8 ver_min;
+ u8 ver_maj;
+ u8 pad; /* end spacing to ensure length is mult. of dword */
+ u16 pad2; /* end spacing to ensure length is mult. of dword2 */
+};
+
+/* These need to be dword aligned */
+struct txgbe_hic_read_shadow_ram {
+ union txgbe_hic_hdr2 hdr;
+ u32 address;
+ u16 length;
+ u16 pad2;
+ u16 data;
+ u16 pad3;
+};
+
+struct txgbe_hic_write_shadow_ram {
+ union txgbe_hic_hdr2 hdr;
+ u32 address;
+ u16 length;
+ u16 pad2;
+ u16 data;
+ u16 pad3;
+};
+
+struct txgbe_hic_disable_rxen {
+ struct txgbe_hic_hdr hdr;
+ u8 port_number;
+ u8 pad2;
+ u16 pad3;
+};
+
+struct txgbe_hic_reset {
+ struct txgbe_hic_hdr hdr;
+ u16 lan_id;
+ u16 reset_type;
+};
+
+struct txgbe_hic_phy_cfg {
+ struct txgbe_hic_hdr hdr;
+ u8 lan_id;
+ u8 phy_mode;
+ u16 phy_speed;
+};
+
+enum txgbe_module_id {
+ TXGBE_MODULE_EEPROM = 0,
+ TXGBE_MODULE_FIRMWARE,
+ TXGBE_MODULE_HARDWARE,
+ TXGBE_MODULE_PCIE
+};
+
+struct txgbe_hic_upg_start {
+ struct txgbe_hic_hdr hdr;
+ u8 module_id;
+ u8 pad2;
+ u16 pad3;
+};
+
+struct txgbe_hic_upg_write {
+ struct txgbe_hic_hdr hdr;
+ u8 data_len;
+ u8 eof_flag;
+ u16 check_sum;
+ u32 data[62];
+};
+
+enum txgbe_upg_flag {
+ TXGBE_RESET_NONE = 0,
+ TXGBE_RESET_FIRMWARE,
+ TXGBE_RELOAD_EEPROM,
+ TXGBE_RESET_LAN
+};
+
+struct txgbe_hic_upg_verify {
+ struct txgbe_hic_hdr hdr;
+ u32 action_flag;
+};
+
+s32 txgbe_hic_sr_read(struct txgbe_hw *hw, u32 addr, u8 *buf, int len);
+s32 txgbe_hic_sr_write(struct txgbe_hw *hw, u32 addr, u8 *buf, int len);
+
+s32 txgbe_hic_set_drv_ver(struct txgbe_hw *hw, u8 maj, u8 min, u8 build, u8 ver, u16 len, const char *str);
+s32 txgbe_hic_reset(struct txgbe_hw *hw);
+bool txgbe_mng_present(struct txgbe_hw *hw);
+bool txgbe_mng_enabled(struct txgbe_hw *hw);
+#endif /* _TXGBE_MNG_H_ */
new file mode 100644
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef _TXGBE_OS_H_
+#define _TXGBE_OS_H_
+
+#include <string.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <rte_version.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+#include <rte_byteorder.h>
+#include <rte_config.h>
+#include <rte_io.h>
+
+#include "../txgbe_logs.h"
+
+#define RTE_LIBRTE_TXGBE_TM DCPV(1, 0)
+#define TMZ_PADDR(mz) ((mz)->iova)
+#define TMZ_VADDR(mz) ((mz)->addr)
+#define TDEV_NAME(eth_dev) ((eth_dev)->device->name)
+
+#define ASSERT(x) do { \
+ if (!(x)) \
+ rte_panic("TXGBE: x"); \
+} while (0)
+
+#define usec_delay(x) rte_delay_us(x)
+#define msec_delay(x) rte_delay_ms(x)
+#define usleep(x) rte_delay_us(x)
+#define msleep(x) rte_delay_ms(x)
+
+#define FALSE 0
+#define TRUE 1
+
+#define false 0
+#define true 1
+#define min(a, b) RTE_MIN(a, b)
+#define max(a, b) RTE_MAX(a, b)
+
+/* Bunch of defines for shared code bogosity */
+
+static inline void UNREFERENCED(const char *a __rte_unused, ...) {}
+#define UNREFERENCED_PARAMETER(args...) UNREFERENCED("", ##args)
+
+#define STATIC static
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef int16_t s16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint64_t u64;
+typedef int64_t s64;
+
+/* Little Endian defines */
+#ifndef __le16
+#define __le16 u16
+#define __le32 u32
+#define __le64 u64
+#endif
+#ifndef __be16
+#define __be16 u16
+#define __be32 u32
+#define __be64 u64
+#endif
+
+/* Bit shift and mask */
+#define BIT_MASK4 (0x0000000FU)
+#define BIT_MASK8 (0x000000FFU)
+#define BIT_MASK16 (0x0000FFFFU)
+#define BIT_MASK32 (0xFFFFFFFFU)
+#define BIT_MASK64 (0xFFFFFFFFFFFFFFFFUL)
+
+#ifndef cpu_to_le32
+#define cpu_to_le16(v) rte_cpu_to_le_16((u16)(v))
+#define cpu_to_le32(v) rte_cpu_to_le_32((u32)(v))
+#define cpu_to_le64(v) rte_cpu_to_le_64((u64)(v))
+#define le_to_cpu16(v) rte_le_to_cpu_16((u16)(v))
+#define le_to_cpu32(v) rte_le_to_cpu_32((u32)(v))
+#define le_to_cpu64(v) rte_le_to_cpu_64((u64)(v))
+
+#define cpu_to_be16(v) rte_cpu_to_be_16((u16)(v))
+#define cpu_to_be32(v) rte_cpu_to_be_32((u32)(v))
+#define cpu_to_be64(v) rte_cpu_to_be_64((u64)(v))
+#define be_to_cpu16(v) rte_be_to_cpu_16((u16)(v))
+#define be_to_cpu32(v) rte_be_to_cpu_32((u32)(v))
+#define be_to_cpu64(v) rte_be_to_cpu_64((u64)(v))
+
+#define le_to_be16(v) rte_bswap16((u16)(v))
+#define le_to_be32(v) rte_bswap32((u32)(v))
+#define le_to_be64(v) rte_bswap64((u64)(v))
+#define be_to_le16(v) rte_bswap16((u16)(v))
+#define be_to_le32(v) rte_bswap32((u32)(v))
+#define be_to_le64(v) rte_bswap64((u64)(v))
+
+#define npu_to_le16(v) (v)
+#define npu_to_le32(v) (v)
+#define npu_to_le64(v) (v)
+#define le_to_npu16(v) (v)
+#define le_to_npu32(v) (v)
+#define le_to_npu64(v) (v)
+
+#define npu_to_be16(v) le_to_be16((u16)(v))
+#define npu_to_be32(v) le_to_be32((u32)(v))
+#define npu_to_be64(v) le_to_be64((u64)(v))
+#define be_to_npu16(v) be_to_le16((u16)(v))
+#define be_to_npu32(v) be_to_le32((u32)(v))
+#define be_to_npu64(v) be_to_le64((u64)(v))
+#endif /* !cpu_to_le32 */
+
+static inline u16 REVERT_BIT_MASK16(u16 mask)
+{
+ mask = ((mask & 0x5555) << 1) | ((mask & 0xAAAA) >> 1);
+ mask = ((mask & 0x3333) << 2) | ((mask & 0xCCCC) >> 2);
+ mask = ((mask & 0x0F0F) << 4) | ((mask & 0xF0F0) >> 4);
+ return ((mask & 0x00FF) << 8) | ((mask & 0xFF00) >> 8);
+}
+
+static inline u32 REVERT_BIT_MASK32(u32 mask)
+{
+ mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+ mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+ mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+ mask = ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+ return ((mask & 0x0000FFFF) << 16) | ((mask & 0xFFFF0000) >> 16);
+}
+
+static inline u64 REVERT_BIT_MASK64(u64 mask)
+{
+ mask = ((mask & 0x5555555555555555) << 1) |
+ ((mask & 0xAAAAAAAAAAAAAAAA) >> 1);
+ mask = ((mask & 0x3333333333333333) << 2) |
+ ((mask & 0xCCCCCCCCCCCCCCCC) >> 2);
+ mask = ((mask & 0x0F0F0F0F0F0F0F0F) << 4) |
+ ((mask & 0xF0F0F0F0F0F0F0F0) >> 4);
+ mask = ((mask & 0x00FF00FF00FF00FF) << 8) |
+ ((mask & 0xFF00FF00FF00FF00) >> 8);
+ mask = ((mask & 0x0000FFFF0000FFFF) << 16) |
+ ((mask & 0xFFFF0000FFFF0000) >> 16);
+ return ((mask & 0x00000000FFFFFFFF) << 32) |
+ ((mask & 0xFFFFFFFF00000000) >> 32);
+}
+
+#define mb() rte_mb()
+#define wmb() rte_wmb()
+#define rmb() rte_rmb()
+
+#ifndef __rte_weak
+#define __rte_weak __attribute__((__weak__))
+#endif
+
+#define IOMEM
+
+#define prefetch(x) rte_prefetch0(x)
+
+#define ARRAY_SIZE(x) ((int32_t)RTE_DIM(x))
+
+#ifndef MAX_UDELAY_MS
+#define MAX_UDELAY_MS 5
+#endif
+
+#define ETH_ADDR_LEN 6
+#define ETH_FCS_LEN 4
+
+/* Check whether address is multicast. This is little-endian specific check.*/
+#define TXGBE_IS_MULTICAST(Address) \
+ (bool)(((u8 *)(Address))[0] & ((u8)0x01))
+
+/* Check whether an address is broadcast. */
+#define TXGBE_IS_BROADCAST(Address) \
+ ((((u8 *)(Address))[0] == ((u8)0xff)) && \
+ (((u8 *)(Address))[1] == ((u8)0xff)))
+
+#define ETH_P_8021Q 0x8100
+#define ETH_P_8021AD 0x88A8
+
+#endif /* _TXGBE_OS_H_ */
new file mode 100644
@@ -0,0 +1,2619 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include "txgbe_hw.h"
+#include "txgbe_eeprom.h"
+#include "txgbe_mng.h"
+#include "txgbe_phy.h"
+
+STATIC void txgbe_i2c_start(struct txgbe_hw *hw);
+STATIC void txgbe_i2c_stop(struct txgbe_hw *hw);
+
+/**
+ * txgbe_identify_extphy - Identify a single address for a PHY
+ * @hw: pointer to hardware structure
+ * @phy_addr: PHY address to probe
+ *
+ * Returns true if PHY found
+ */
+static bool txgbe_identify_extphy(struct txgbe_hw *hw)
+{
+ u16 phy_addr = 0;
+
+ if (!txgbe_validate_phy_addr(hw, phy_addr)) {
+ DEBUGOUT("Unable to validate PHY address 0x%04X\n",
+ phy_addr);
+ return false;
+ }
+
+ if (txgbe_get_phy_id(hw))
+ return false;
+
+ hw->phy.type = txgbe_get_phy_type_from_id(hw->phy.id);
+ if (hw->phy.type == txgbe_phy_unknown) {
+ u16 ext_ability = 0;
+ hw->phy.read_reg(hw, TXGBE_MD_PHY_EXT_ABILITY,
+ TXGBE_MD_DEV_PMA_PMD,
+ &ext_ability);
+
+ if (ext_ability & (TXGBE_MD_PHY_10GBASET_ABILITY |
+ TXGBE_MD_PHY_1000BASET_ABILITY))
+ hw->phy.type = txgbe_phy_cu_unknown;
+ else
+ hw->phy.type = txgbe_phy_generic;
+ }
+
+ return true;
+}
+
+/**
+ * txgbe_read_phy_if - Read TXGBE_ETHPHYIF register
+ * @hw: pointer to hardware structure
+ *
+ * Read TXGBE_ETHPHYIF register and save field values, and check for valid field
+ * values.
+ **/
+static s32 txgbe_read_phy_if(struct txgbe_hw *hw)
+{
+ hw->phy.media_type = hw->phy.get_media_type(hw);
+
+ /* Save NW management interface connected on board. This is used
+ * to determine internal PHY mode.
+ */
+ hw->phy.nw_mng_if_sel = rd32(hw, TXGBE_ETHPHYIF);
+
+ /* If MDIO is connected to external PHY, then set PHY address. */
+ if (hw->phy.nw_mng_if_sel & TXGBE_ETHPHYIF_MDIO_ACT) {
+ hw->phy.addr = TXGBE_ETHPHYIF_MDIO_BASE(hw->phy.nw_mng_if_sel);
+ }
+
+ if (!hw->phy.phy_semaphore_mask) {
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask = TXGBE_MNGSEM_SWPHY;
+ else
+ hw->phy.phy_semaphore_mask = TXGBE_MNGSEM_SWPHY;
+ }
+
+ return 0;
+}
+
+/**
+ * txgbe_identify_phy - Get physical layer module
+ * @hw: pointer to hardware structure
+ *
+ * Determines the physical layer module found on the current adapter.
+ **/
+s32 txgbe_identify_phy(struct txgbe_hw *hw)
+{
+ s32 err = TXGBE_ERR_PHY_ADDR_INVALID;
+
+ DEBUGFUNC("txgbe_identify_phy");
+
+ txgbe_read_phy_if(hw);
+
+ if (hw->phy.type != txgbe_phy_unknown)
+ return 0;
+
+ /* Raptor 10GBASE-T requires an external PHY */
+ if (hw->phy.media_type == txgbe_media_type_copper) {
+ err = txgbe_identify_extphy(hw);
+ } else if (hw->phy.media_type == txgbe_media_type_fiber) {
+ err = txgbe_identify_module(hw);
+ } else {
+ hw->phy.type = txgbe_phy_none;
+ return 0;
+ }
+
+ /* Return error if SFP module has been detected but is not supported */
+ if (hw->phy.type == txgbe_phy_sfp_unsupported)
+ return TXGBE_ERR_SFP_NOT_SUPPORTED;
+
+ return err;
+}
+
+/**
+ * txgbe_check_reset_blocked - check status of MNG FW veto bit
+ * @hw: pointer to the hardware structure
+ *
+ * This function checks the STAT.MNGVETO bit to see if there are
+ * any constraints on link from manageability. For MAC's that don't
+ * have this bit just return faluse since the link can not be blocked
+ * via this method.
+ **/
+s32 txgbe_check_reset_blocked(struct txgbe_hw *hw)
+{
+ u32 mmngc;
+
+ DEBUGFUNC("txgbe_check_reset_blocked");
+
+ mmngc = rd32(hw, TXGBE_STAT);
+ if (mmngc & TXGBE_STAT_MNGVETO) {
+ DEBUGOUT("MNG_VETO bit detected.\n");
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * txgbe_validate_phy_addr - Determines phy address is valid
+ * @hw: pointer to hardware structure
+ * @phy_addr: PHY address
+ *
+ **/
+bool txgbe_validate_phy_addr(struct txgbe_hw *hw, u32 phy_addr)
+{
+ u16 phy_id = 0;
+ bool valid = false;
+
+ DEBUGFUNC("txgbe_validate_phy_addr");
+
+ hw->phy.addr = phy_addr;
+ hw->phy.read_reg(hw, TXGBE_MD_PHY_ID_HIGH,
+ TXGBE_MD_DEV_PMA_PMD, &phy_id);
+
+ if (phy_id != 0xFFFF && phy_id != 0x0)
+ valid = true;
+
+ DEBUGOUT("PHY ID HIGH is 0x%04X\n", phy_id);
+
+ return valid;
+}
+
+/**
+ * txgbe_get_phy_id - Get the phy type
+ * @hw: pointer to hardware structure
+ *
+ **/
+s32 txgbe_get_phy_id(struct txgbe_hw *hw)
+{
+ u32 err;
+ u16 phy_id_high = 0;
+ u16 phy_id_low = 0;
+
+ DEBUGFUNC("txgbe_get_phy_id");
+
+ err = hw->phy.read_reg(hw, TXGBE_MD_PHY_ID_HIGH,
+ TXGBE_MD_DEV_PMA_PMD,
+ &phy_id_high);
+
+ if (err == 0) {
+ hw->phy.id = (u32)(phy_id_high << 16);
+ err = hw->phy.read_reg(hw, TXGBE_MD_PHY_ID_LOW,
+ TXGBE_MD_DEV_PMA_PMD,
+ &phy_id_low);
+ hw->phy.id |= (u32)(phy_id_low & TXGBE_PHY_REVISION_MASK);
+ hw->phy.revision = (u32)(phy_id_low & ~TXGBE_PHY_REVISION_MASK);
+ }
+ DEBUGOUT("PHY_ID_HIGH 0x%04X, PHY_ID_LOW 0x%04X\n",
+ phy_id_high, phy_id_low);
+
+ return err;
+}
+
+/**
+ * txgbe_get_phy_type_from_id - Get the phy type
+ * @phy_id: PHY ID information
+ *
+ **/
+enum txgbe_phy_type txgbe_get_phy_type_from_id(u32 phy_id)
+{
+ enum txgbe_phy_type phy_type;
+
+ DEBUGFUNC("txgbe_get_phy_type_from_id");
+
+ switch (phy_id) {
+ case TXGBE_PHYID_TN1010:
+ phy_type = txgbe_phy_tn;
+ break;
+ case TXGBE_PHYID_QT2022:
+ phy_type = txgbe_phy_qt;
+ break;
+ case TXGBE_PHYID_ATH:
+ phy_type = txgbe_phy_nl;
+ break;
+ case TXGBE_PHYID_MTD3310:
+ phy_type = txgbe_phy_cu_mtd;
+ break;
+ default:
+ phy_type = txgbe_phy_unknown;
+ break;
+ }
+
+ return phy_type;
+}
+
+static s32
+txgbe_reset_extphy(struct txgbe_hw *hw)
+{
+ u16 ctrl = 0;
+ int err, i;
+
+ err = hw->phy.read_reg(hw, TXGBE_MD_PORT_CTRL,
+ TXGBE_MD_DEV_GENERAL, &ctrl);
+ if (err != 0)
+ return err;
+ ctrl |= TXGBE_MD_PORT_CTRL_RESET;
+ err = hw->phy.write_reg(hw, TXGBE_MD_PORT_CTRL,
+ TXGBE_MD_DEV_GENERAL, ctrl);
+ if (err != 0)
+ return err;
+
+ /*
+ * Poll for reset bit to self-clear indicating reset is complete.
+ * Some PHYs could take up to 3 seconds to complete and need about
+ * 1.7 usec delay after the reset is complete.
+ */
+ for (i = 0; i < 30; i++) {
+ msec_delay(100);
+ err = hw->phy.read_reg(hw, TXGBE_MD_PORT_CTRL,
+ TXGBE_MD_DEV_GENERAL, &ctrl);
+ if (err != 0)
+ return err;
+
+ if (!(ctrl & TXGBE_MD_PORT_CTRL_RESET)) {
+ usec_delay(2);
+ break;
+ }
+ }
+
+ if (ctrl & TXGBE_MD_PORT_CTRL_RESET) {
+ err = TXGBE_ERR_RESET_FAILED;
+ DEBUGOUT("PHY reset polling failed to complete.\n");
+ }
+
+ return err;
+}
+
+/**
+ * txgbe_reset_phy - Performs a PHY reset
+ * @hw: pointer to hardware structure
+ **/
+s32 txgbe_reset_phy(struct txgbe_hw *hw)
+{
+ s32 err = 0;
+
+ DEBUGFUNC("txgbe_reset_phy");
+
+ if (hw->phy.type == txgbe_phy_unknown)
+ err = txgbe_identify_phy(hw);
+
+ if (err != 0 || hw->phy.type == txgbe_phy_none)
+ return err;
+
+ /* Don't reset PHY if it's shut down due to overtemp. */
+ if (TXGBE_ERR_OVERTEMP == hw->phy.check_overtemp(hw))
+ return err;
+
+ /* Blocked by MNG FW so bail */
+ if (txgbe_check_reset_blocked(hw))
+ return err;
+
+ switch (hw->phy.type) {
+ case txgbe_phy_cu_mtd:
+ err = txgbe_reset_extphy(hw);
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * txgbe_read_phy_mdi - Reads a value from a specified PHY register without
+ * the SWFW lock
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @device_type: 5 bit device type
+ * @phy_data: Pointer to read data from PHY register
+ **/
+s32 txgbe_read_phy_reg_mdi(struct txgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 *phy_data)
+{
+ u32 command, data;
+
+ /* Setup and write the address cycle command */
+ command = TXGBE_MDIOSCA_REG(reg_addr) |
+ TXGBE_MDIOSCA_DEV(device_type) |
+ TXGBE_MDIOSCA_PORT(hw->phy.addr);
+ wr32(hw, TXGBE_MDIOSCA, command);
+
+ command = TXGBE_MDIOSCD_CMD_READ |
+ TXGBE_MDIOSCD_BUSY;
+ wr32(hw, TXGBE_MDIOSCD, command);
+
+ /*
+ * Check every 10 usec to see if the address cycle completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ if (!po32m(hw, TXGBE_MDIOSCD, TXGBE_MDIOSCD_BUSY,
+ 0, NULL, 100, 100)) {
+ DEBUGOUT("PHY address command did not complete\n");
+ return TXGBE_ERR_PHY;
+ }
+
+ data = rd32(hw, TXGBE_MDIOSCD);
+ *phy_data = (u16)TXGBD_MDIOSCD_DAT(data);
+
+ return 0;
+}
+
+/**
+ * txgbe_read_phy_reg - Reads a value from a specified PHY register
+ * using the SWFW lock - this function is needed in most cases
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @device_type: 5 bit device type
+ * @phy_data: Pointer to read data from PHY register
+ **/
+s32 txgbe_read_phy_reg(struct txgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data)
+{
+ s32 err;
+ u32 gssr = hw->phy.phy_semaphore_mask;
+
+ DEBUGFUNC("txgbe_read_phy_reg");
+
+ if (hw->mac.acquire_swfw_sync(hw, gssr))
+ return TXGBE_ERR_SWFW_SYNC;
+
+ err = hw->phy.read_reg_mdi(hw, reg_addr, device_type, phy_data);
+
+ hw->mac.release_swfw_sync(hw, gssr);
+
+ return err;
+}
+
+/**
+ * txgbe_write_phy_reg_mdi - Writes a value to specified PHY register
+ * without SWFW lock
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 5 bit device type
+ * @phy_data: Data to write to the PHY register
+ **/
+s32 txgbe_write_phy_reg_mdi(struct txgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data)
+{
+ u32 command;
+
+ /* write command */
+ command = TXGBE_MDIOSCA_REG(reg_addr) |
+ TXGBE_MDIOSCA_DEV(device_type) |
+ TXGBE_MDIOSCA_PORT(hw->phy.addr);
+ wr32(hw, TXGBE_MDIOSCA, command);
+
+ command = TXGBE_MDIOSCD_CMD_WRITE |
+ TXGBE_MDIOSCD_DAT(phy_data) |
+ TXGBE_MDIOSCD_BUSY;
+ wr32(hw, TXGBE_MDIOSCD, command);
+
+ /* wait for completion */
+ if (!po32m(hw, TXGBE_MDIOSCD, TXGBE_MDIOSCD_BUSY,
+ 0, NULL, 100, 100)) {
+ TLOG_DEBUG("PHY write cmd didn't complete\n");
+ return -TERR_PHY;
+ }
+
+ return 0;
+}
+
+/**
+ * txgbe_write_phy_reg - Writes a value to specified PHY register
+ * using SWFW lock- this function is needed in most cases
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 5 bit device type
+ * @phy_data: Data to write to the PHY register
+ **/
+s32 txgbe_write_phy_reg(struct txgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data)
+{
+ s32 err;
+ u32 gssr = hw->phy.phy_semaphore_mask;
+
+ DEBUGFUNC("txgbe_write_phy_reg");
+
+ if (hw->mac.acquire_swfw_sync(hw, gssr))
+ err = TXGBE_ERR_SWFW_SYNC;
+
+ err = hw->phy.write_reg_mdi(hw, reg_addr, device_type,
+ phy_data);
+ hw->mac.release_swfw_sync(hw, gssr);
+
+ return err;
+}
+
+/**
+ * txgbe_setup_phy_link - Set and restart auto-neg
+ * @hw: pointer to hardware structure
+ *
+ * Restart auto-negotiation and PHY and waits for completion.
+ **/
+s32 txgbe_setup_phy_link(struct txgbe_hw *hw)
+{
+ s32 err = 0;
+ u16 autoneg_reg = TXGBE_MII_AUTONEG_REG;
+ bool autoneg = false;
+ u32 speed;
+
+ DEBUGFUNC("txgbe_setup_phy_link");
+
+ txgbe_get_copper_link_capabilities(hw, &speed, &autoneg);
+
+ /* Set or unset auto-negotiation 10G advertisement */
+ hw->phy.read_reg(hw, TXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ TXGBE_MD_DEV_AUTO_NEG,
+ &autoneg_reg);
+
+ autoneg_reg &= ~TXGBE_MII_10GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_10GB_FULL) &&
+ (speed & TXGBE_LINK_SPEED_10GB_FULL))
+ autoneg_reg |= TXGBE_MII_10GBASE_T_ADVERTISE;
+
+ hw->phy.write_reg(hw, TXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ TXGBE_MD_DEV_AUTO_NEG,
+ autoneg_reg);
+
+ hw->phy.read_reg(hw, TXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ TXGBE_MD_DEV_AUTO_NEG,
+ &autoneg_reg);
+
+ /* Set or unset auto-negotiation 5G advertisement */
+ autoneg_reg &= ~TXGBE_MII_5GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_5GB_FULL) &&
+ (speed & TXGBE_LINK_SPEED_5GB_FULL))
+ autoneg_reg |= TXGBE_MII_5GBASE_T_ADVERTISE;
+
+ /* Set or unset auto-negotiation 2.5G advertisement */
+ autoneg_reg &= ~TXGBE_MII_2_5GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised &
+ TXGBE_LINK_SPEED_2_5GB_FULL) &&
+ (speed & TXGBE_LINK_SPEED_2_5GB_FULL))
+ autoneg_reg |= TXGBE_MII_2_5GBASE_T_ADVERTISE;
+ /* Set or unset auto-negotiation 1G advertisement */
+ autoneg_reg &= ~TXGBE_MII_1GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_1GB_FULL) &&
+ (speed & TXGBE_LINK_SPEED_1GB_FULL))
+ autoneg_reg |= TXGBE_MII_1GBASE_T_ADVERTISE;
+
+ hw->phy.write_reg(hw, TXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ TXGBE_MD_DEV_AUTO_NEG,
+ autoneg_reg);
+
+ /* Set or unset auto-negotiation 100M advertisement */
+ hw->phy.read_reg(hw, TXGBE_MII_AUTONEG_ADVERTISE_REG,
+ TXGBE_MD_DEV_AUTO_NEG,
+ &autoneg_reg);
+
+ autoneg_reg &= ~(TXGBE_MII_100BASE_T_ADVERTISE |
+ TXGBE_MII_100BASE_T_ADVERTISE_HALF);
+ if ((hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_100M_FULL) &&
+ (speed & TXGBE_LINK_SPEED_100M_FULL))
+ autoneg_reg |= TXGBE_MII_100BASE_T_ADVERTISE;
+
+ hw->phy.write_reg(hw, TXGBE_MII_AUTONEG_ADVERTISE_REG,
+ TXGBE_MD_DEV_AUTO_NEG,
+ autoneg_reg);
+
+ /* Blocked by MNG FW so don't reset PHY */
+ if (txgbe_check_reset_blocked(hw))
+ return err;
+
+ /* Restart PHY auto-negotiation. */
+ hw->phy.read_reg(hw, TXGBE_MD_AUTO_NEG_CONTROL,
+ TXGBE_MD_DEV_AUTO_NEG, &autoneg_reg);
+
+ autoneg_reg |= TXGBE_MII_RESTART;
+
+ hw->phy.write_reg(hw, TXGBE_MD_AUTO_NEG_CONTROL,
+ TXGBE_MD_DEV_AUTO_NEG, autoneg_reg);
+
+ return err;
+}
+
+/**
+ * txgbe_setup_phy_link_speed - Sets the auto advertised capabilities
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: unused
+ **/
+s32 txgbe_setup_phy_link_speed(struct txgbe_hw *hw,
+ u32 speed,
+ bool autoneg_wait_to_complete)
+{
+ UNREFERENCED_PARAMETER(autoneg_wait_to_complete);
+
+ DEBUGFUNC("txgbe_setup_phy_link_speed");
+
+ /*
+ * Clear autoneg_advertised and set new values based on input link
+ * speed.
+ */
+ hw->phy.autoneg_advertised = 0;
+
+ if (speed & TXGBE_LINK_SPEED_10GB_FULL)
+ hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_10GB_FULL;
+
+ if (speed & TXGBE_LINK_SPEED_5GB_FULL)
+ hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_5GB_FULL;
+
+ if (speed & TXGBE_LINK_SPEED_2_5GB_FULL)
+ hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_2_5GB_FULL;
+
+ if (speed & TXGBE_LINK_SPEED_1GB_FULL)
+ hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_1GB_FULL;
+
+ if (speed & TXGBE_LINK_SPEED_100M_FULL)
+ hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_100M_FULL;
+
+ if (speed & TXGBE_LINK_SPEED_10M_FULL)
+ hw->phy.autoneg_advertised |= TXGBE_LINK_SPEED_10M_FULL;
+
+ /* Setup link based on the new speed settings */
+ hw->phy.setup_link(hw);
+
+ return 0;
+}
+
+/**
+ * txgbe_get_copper_speeds_supported - Get copper link speeds from phy
+ * @hw: pointer to hardware structure
+ *
+ * Determines the supported link capabilities by reading the PHY auto
+ * negotiation register.
+ **/
+static s32 txgbe_get_copper_speeds_supported(struct txgbe_hw *hw)
+{
+ s32 err;
+ u16 speed_ability;
+
+ err = hw->phy.read_reg(hw, TXGBE_MD_PHY_SPEED_ABILITY,
+ TXGBE_MD_DEV_PMA_PMD,
+ &speed_ability);
+ if (err)
+ return err;
+
+ if (speed_ability & TXGBE_MD_PHY_SPEED_10G)
+ hw->phy.speeds_supported |= TXGBE_LINK_SPEED_10GB_FULL;
+ if (speed_ability & TXGBE_MD_PHY_SPEED_1G)
+ hw->phy.speeds_supported |= TXGBE_LINK_SPEED_1GB_FULL;
+ if (speed_ability & TXGBE_MD_PHY_SPEED_100M)
+ hw->phy.speeds_supported |= TXGBE_LINK_SPEED_100M_FULL;
+
+ return err;
+}
+
+/**
+ * txgbe_get_copper_link_capabilities - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: boolean auto-negotiation value
+ **/
+s32 txgbe_get_copper_link_capabilities(struct txgbe_hw *hw,
+ u32 *speed,
+ bool *autoneg)
+{
+ s32 err = 0;
+
+ DEBUGFUNC("txgbe_get_copper_link_capabilities");
+
+ *autoneg = true;
+ if (!hw->phy.speeds_supported)
+ err = txgbe_get_copper_speeds_supported(hw);
+
+ *speed = hw->phy.speeds_supported;
+ return err;
+}
+
+/**
+ * txgbe_check_phy_link_tnx - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: current link speed
+ * @link_up: true is link is up, false otherwise
+ *
+ * Reads the VS1 register to determine if link is up and the current speed for
+ * the PHY.
+ **/
+s32 txgbe_check_phy_link_tnx(struct txgbe_hw *hw, u32 *speed,
+ bool *link_up)
+{
+ s32 err = 0;
+ u32 time_out;
+ u32 max_time_out = 10;
+ u16 phy_link = 0;
+ u16 phy_speed = 0;
+ u16 phy_data = 0;
+
+ DEBUGFUNC("txgbe_check_phy_link_tnx");
+
+ /* Initialize speed and link to default case */
+ *link_up = false;
+ *speed = TXGBE_LINK_SPEED_10GB_FULL;
+
+ /*
+ * Check current speed and link status of the PHY register.
+ * This is a vendor specific register and may have to
+ * be changed for other copper PHYs.
+ */
+ for (time_out = 0; time_out < max_time_out; time_out++) {
+ usec_delay(10);
+ err = hw->phy.read_reg(hw,
+ TXGBE_MD_VENDOR_SPECIFIC_1_STATUS,
+ TXGBE_MD_DEV_VENDOR_1,
+ &phy_data);
+ phy_link = phy_data & TXGBE_MD_VENDOR_SPECIFIC_1_LINK_STATUS;
+ phy_speed = phy_data &
+ TXGBE_MD_VENDOR_SPECIFIC_1_SPEED_STATUS;
+ if (phy_link == TXGBE_MD_VENDOR_SPECIFIC_1_LINK_STATUS) {
+ *link_up = true;
+ if (phy_speed ==
+ TXGBE_MD_VENDOR_SPECIFIC_1_SPEED_STATUS)
+ *speed = TXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * txgbe_setup_phy_link_tnx - Set and restart auto-neg
+ * @hw: pointer to hardware structure
+ *
+ * Restart auto-negotiation and PHY and waits for completion.
+ **/
+s32 txgbe_setup_phy_link_tnx(struct txgbe_hw *hw)
+{
+ s32 err = 0;
+ u16 autoneg_reg = TXGBE_MII_AUTONEG_REG;
+ bool autoneg = false;
+ u32 speed;
+
+ DEBUGFUNC("txgbe_setup_phy_link_tnx");
+
+ txgbe_get_copper_link_capabilities(hw, &speed, &autoneg);
+
+ if (speed & TXGBE_LINK_SPEED_10GB_FULL) {
+ /* Set or unset auto-negotiation 10G advertisement */
+ hw->phy.read_reg(hw, TXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ TXGBE_MD_DEV_AUTO_NEG,
+ &autoneg_reg);
+
+ autoneg_reg &= ~TXGBE_MII_10GBASE_T_ADVERTISE;
+ if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_10GB_FULL)
+ autoneg_reg |= TXGBE_MII_10GBASE_T_ADVERTISE;
+
+ hw->phy.write_reg(hw, TXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ TXGBE_MD_DEV_AUTO_NEG,
+ autoneg_reg);
+ }
+
+ if (speed & TXGBE_LINK_SPEED_1GB_FULL) {
+ /* Set or unset auto-negotiation 1G advertisement */
+ hw->phy.read_reg(hw, TXGBE_MII_AUTONEG_XNP_TX_REG,
+ TXGBE_MD_DEV_AUTO_NEG,
+ &autoneg_reg);
+
+ autoneg_reg &= ~TXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
+ if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_1GB_FULL)
+ autoneg_reg |= TXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
+
+ hw->phy.write_reg(hw, TXGBE_MII_AUTONEG_XNP_TX_REG,
+ TXGBE_MD_DEV_AUTO_NEG,
+ autoneg_reg);
+ }
+
+ if (speed & TXGBE_LINK_SPEED_100M_FULL) {
+ /* Set or unset auto-negotiation 100M advertisement */
+ hw->phy.read_reg(hw, TXGBE_MII_AUTONEG_ADVERTISE_REG,
+ TXGBE_MD_DEV_AUTO_NEG,
+ &autoneg_reg);
+
+ autoneg_reg &= ~TXGBE_MII_100BASE_T_ADVERTISE;
+ if (hw->phy.autoneg_advertised & TXGBE_LINK_SPEED_100M_FULL)
+ autoneg_reg |= TXGBE_MII_100BASE_T_ADVERTISE;
+
+ hw->phy.write_reg(hw, TXGBE_MII_AUTONEG_ADVERTISE_REG,
+ TXGBE_MD_DEV_AUTO_NEG,
+ autoneg_reg);
+ }
+
+ /* Blocked by MNG FW so don't reset PHY */
+ if (txgbe_check_reset_blocked(hw))
+ return err;
+
+ /* Restart PHY auto-negotiation. */
+ hw->phy.read_reg(hw, TXGBE_MD_AUTO_NEG_CONTROL,
+ TXGBE_MD_DEV_AUTO_NEG, &autoneg_reg);
+
+ autoneg_reg |= TXGBE_MII_RESTART;
+
+ hw->phy.write_reg(hw, TXGBE_MD_AUTO_NEG_CONTROL,
+ TXGBE_MD_DEV_AUTO_NEG, autoneg_reg);
+
+ return err;
+}
+
+/**
+ * txgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
+ * @hw: pointer to hardware structure
+ * @firmware_version: pointer to the PHY Firmware Version
+ **/
+s32 txgbe_get_phy_firmware_version_tnx(struct txgbe_hw *hw,
+ u32 *firmware_version)
+{
+ u16 fw_rev_lo;
+ s32 err;
+
+ DEBUGFUNC("txgbe_get_phy_firmware_version_tnx");
+
+ err = hw->phy.read_reg(hw, TNX_FW_REV,
+ TXGBE_MD_DEV_VENDOR_1,
+ &fw_rev_lo);
+
+ *firmware_version = fw_rev_lo;
+
+ return err;
+}
+
+/**
+ * txgbe_get_phy_firmware_version - Gets the PHY Firmware Version
+ * @hw: pointer to hardware structure
+ * @firmware_version: pointer to the PHY Firmware Version
+ **/
+s32 txgbe_get_phy_firmware_version(struct txgbe_hw *hw,
+ u32 *firmware_version)
+{
+ u16 fw_rev_lo, fw_rev_hi;
+ s32 err;
+
+ DEBUGFUNC("txgbe_get_phy_firmware_version");
+
+ err = hw->phy.read_reg(hw, TXGBE_MD_FW_REV_LO,
+ TXGBE_MD_DEV_PMA_PMD,
+ &fw_rev_lo);
+ if (!err)
+ return err;
+
+ err = hw->phy.read_reg(hw, TXGBE_MD_FW_REV_HI,
+ TXGBE_MD_DEV_PMA_PMD,
+ &fw_rev_hi);
+ if (!err)
+ return err;
+
+ *firmware_version = ((u32)fw_rev_hi << 16) | fw_rev_lo;
+ return 0;
+}
+
+/**
+ * txgbe_reset_phy_nl - Performs a PHY reset
+ * @hw: pointer to hardware structure
+ **/
+s32 txgbe_reset_phy_nl(struct txgbe_hw *hw)
+{
+ u16 phy_offset, control, eword, edata, block_crc;
+ bool end_data = false;
+ u16 list_offset, data_offset;
+ u16 phy_data = 0;
+ s32 ret_val = 0;
+ u32 i;
+
+ DEBUGFUNC("txgbe_reset_phy_nl");
+
+ /* Blocked by MNG FW so bail */
+ if (txgbe_check_reset_blocked(hw))
+ goto out;
+
+ hw->phy.read_reg(hw, TXGBE_MD_PHY_XS_CONTROL,
+ TXGBE_MD_DEV_PHY_XS, &phy_data);
+
+ /* reset the PHY and poll for completion */
+ hw->phy.write_reg(hw, TXGBE_MD_PHY_XS_CONTROL,
+ TXGBE_MD_DEV_PHY_XS,
+ (phy_data | TXGBE_MD_PHY_XS_RESET));
+
+ for (i = 0; i < 100; i++) {
+ hw->phy.read_reg(hw, TXGBE_MD_PHY_XS_CONTROL,
+ TXGBE_MD_DEV_PHY_XS, &phy_data);
+ if ((phy_data & TXGBE_MD_PHY_XS_RESET) == 0)
+ break;
+ msec_delay(10);
+ }
+
+ if ((phy_data & TXGBE_MD_PHY_XS_RESET) != 0) {
+ DEBUGOUT("PHY reset did not complete.\n");
+ ret_val = TXGBE_ERR_PHY;
+ goto out;
+ }
+
+ /* Get init offsets */
+ ret_val = txgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
+ &data_offset);
+ if (ret_val != 0)
+ goto out;
+
+ ret_val = hw->rom.read16(hw, data_offset, &block_crc);
+ data_offset++;
+ while (!end_data) {
+ /*
+ * Read control word from PHY init contents offset
+ */
+ ret_val = hw->rom.read16(hw, data_offset, &eword);
+ if (ret_val)
+ goto err_eeprom;
+ control = (eword & TXGBE_EE_CONTROL_MASK_NL) >>
+ TXGBE_EE_CONTROL_SHIFT_NL;
+ edata = eword & TXGBE_EE_DATA_MASK_NL;
+ switch (control) {
+ case TXGBE_EE_DELAY_NL:
+ data_offset++;
+ DEBUGOUT("DELAY: %d MS\n", edata);
+ msec_delay(edata);
+ break;
+ case TXGBE_EE_DATA_NL:
+ DEBUGOUT("DATA:\n");
+ data_offset++;
+ ret_val = hw->rom.read16(hw, data_offset,
+ &phy_offset);
+ if (ret_val)
+ goto err_eeprom;
+ data_offset++;
+ for (i = 0; i < edata; i++) {
+ ret_val = hw->rom.read16(hw, data_offset,
+ &eword);
+ if (ret_val)
+ goto err_eeprom;
+ hw->phy.write_reg(hw, phy_offset,
+ TXGBE_MD_DEV_PMA_PMD, eword);
+ DEBUGOUT("Wrote %4.4x to %4.4x\n", eword,
+ phy_offset);
+ data_offset++;
+ phy_offset++;
+ }
+ break;
+ case TXGBE_EE_CONTROL_NL:
+ data_offset++;
+ DEBUGOUT("CONTROL:\n");
+ if (edata == TXGBE_EE_CONTROL_EOL_NL) {
+ DEBUGOUT("EOL\n");
+ end_data = true;
+ } else if (edata == TXGBE_EE_CONTROL_SOL_NL) {
+ DEBUGOUT("SOL\n");
+ } else {
+ DEBUGOUT("Bad control value\n");
+ ret_val = TXGBE_ERR_PHY;
+ goto out;
+ }
+ break;
+ default:
+ DEBUGOUT("Bad control type\n");
+ ret_val = TXGBE_ERR_PHY;
+ goto out;
+ }
+ }
+
+out:
+ return ret_val;
+
+err_eeprom:
+ DEBUGOUT("eeprom read at offset %d failed", data_offset);
+ return TXGBE_ERR_PHY;
+}
+
+/**
+ * txgbe_identify_module - Identifies module type
+ * @hw: pointer to hardware structure
+ *
+ * Determines HW type and calls appropriate function.
+ **/
+s32 txgbe_identify_module(struct txgbe_hw *hw)
+{
+ s32 err = TXGBE_ERR_SFP_NOT_PRESENT;
+
+ DEBUGFUNC("txgbe_identify_module");
+
+ switch (hw->phy.media_type) {
+ case txgbe_media_type_fiber:
+ err = txgbe_identify_sfp_module(hw);
+ break;
+
+ case txgbe_media_type_fiber_qsfp:
+ err = txgbe_identify_qsfp_module(hw);
+ break;
+
+ default:
+ hw->phy.sfp_type = txgbe_sfp_type_not_present;
+ err = TXGBE_ERR_SFP_NOT_PRESENT;
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * txgbe_identify_sfp_module - Identifies SFP modules
+ * @hw: pointer to hardware structure
+ *
+ * Searches for and identifies the SFP module and assigns appropriate PHY type.
+ **/
+s32 txgbe_identify_sfp_module(struct txgbe_hw *hw)
+{
+ s32 err = TXGBE_ERR_PHY_ADDR_INVALID;
+ u32 vendor_oui = 0;
+ enum txgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
+ u8 identifier = 0;
+ u8 comp_codes_1g = 0;
+ u8 comp_codes_10g = 0;
+ u8 oui_bytes[3] = {0, 0, 0};
+ u8 cable_tech = 0;
+ u8 cable_spec = 0;
+ u16 enforce_sfp = 0;
+
+ DEBUGFUNC("txgbe_identify_sfp_module");
+
+ if (hw->phy.media_type != txgbe_media_type_fiber) {
+ hw->phy.sfp_type = txgbe_sfp_type_not_present;
+ return TXGBE_ERR_SFP_NOT_PRESENT;
+ }
+
+ err = hw->phy.read_i2c_eeprom(hw, TXGBE_SFF_IDENTIFIER,
+ &identifier);
+ if (err != 0) {
+ERR_I2C:
+ hw->phy.sfp_type = txgbe_sfp_type_not_present;
+ if (hw->phy.type != txgbe_phy_nl) {
+ hw->phy.id = 0;
+ hw->phy.type = txgbe_phy_unknown;
+ }
+ return TXGBE_ERR_SFP_NOT_PRESENT;
+ }
+
+ if (identifier != TXGBE_SFF_IDENTIFIER_SFP) {
+ hw->phy.type = txgbe_phy_sfp_unsupported;
+ return TXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+
+ err = hw->phy.read_i2c_eeprom(hw, TXGBE_SFF_1GBE_COMP_CODES,
+ &comp_codes_1g);
+ if (err != 0)
+ goto ERR_I2C;
+
+ err = hw->phy.read_i2c_eeprom(hw, TXGBE_SFF_10GBE_COMP_CODES,
+ &comp_codes_10g);
+ if (err != 0)
+ goto ERR_I2C;
+
+ err = hw->phy.read_i2c_eeprom(hw, TXGBE_SFF_CABLE_TECHNOLOGY,
+ &cable_tech);
+ if (err != 0)
+ goto ERR_I2C;
+
+ /* ID Module
+ * =========
+ * 0 SFP_DA_CU
+ * 1 SFP_SR
+ * 2 SFP_LR
+ * 3 SFP_DA_CORE0 - chip-specific
+ * 4 SFP_DA_CORE1 - chip-specific
+ * 5 SFP_SR/LR_CORE0 - chip-specific
+ * 6 SFP_SR/LR_CORE1 - chip-specific
+ * 7 SFP_act_lmt_DA_CORE0 - chip-specific
+ * 8 SFP_act_lmt_DA_CORE1 - chip-specific
+ * 9 SFP_1g_cu_CORE0 - chip-specific
+ * 10 SFP_1g_cu_CORE1 - chip-specific
+ * 11 SFP_1g_sx_CORE0 - chip-specific
+ * 12 SFP_1g_sx_CORE1 - chip-specific
+ */
+ if (cable_tech & TXGBE_SFF_CABLE_DA_PASSIVE) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type = txgbe_sfp_type_da_cu_core0;
+ else
+ hw->phy.sfp_type = txgbe_sfp_type_da_cu_core1;
+ } else if (cable_tech & TXGBE_SFF_CABLE_DA_ACTIVE) {
+ err = hw->phy.read_i2c_eeprom(hw,
+ TXGBE_SFF_CABLE_SPEC_COMP, &cable_spec);
+ if (err != 0)
+ goto ERR_I2C;
+ if (cable_spec & TXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
+ hw->phy.sfp_type = (hw->bus.lan_id == 0
+ ? txgbe_sfp_type_da_act_lmt_core0
+ : txgbe_sfp_type_da_act_lmt_core1);
+ } else {
+ hw->phy.sfp_type = txgbe_sfp_type_unknown;
+ }
+ } else if (comp_codes_10g &
+ (TXGBE_SFF_10GBASESR_CAPABLE |
+ TXGBE_SFF_10GBASELR_CAPABLE)) {
+ hw->phy.sfp_type = (hw->bus.lan_id == 0
+ ? txgbe_sfp_type_srlr_core0
+ : txgbe_sfp_type_srlr_core1);
+ } else if (comp_codes_1g & TXGBE_SFF_1GBASET_CAPABLE) {
+ hw->phy.sfp_type = (hw->bus.lan_id == 0
+ ? txgbe_sfp_type_1g_cu_core0
+ : txgbe_sfp_type_1g_cu_core1);
+ } else if (comp_codes_1g & TXGBE_SFF_1GBASESX_CAPABLE) {
+ hw->phy.sfp_type = (hw->bus.lan_id == 0
+ ? txgbe_sfp_type_1g_sx_core0
+ : txgbe_sfp_type_1g_sx_core1);
+ } else if (comp_codes_1g & TXGBE_SFF_1GBASELX_CAPABLE) {
+ hw->phy.sfp_type = (hw->bus.lan_id == 0
+ ? txgbe_sfp_type_1g_lx_core0
+ : txgbe_sfp_type_1g_lx_core1);
+ } else {
+ hw->phy.sfp_type = txgbe_sfp_type_unknown;
+ }
+
+ if (hw->phy.sfp_type != stored_sfp_type)
+ hw->phy.sfp_setup_needed = true;
+
+ /* Determine if the SFP+ PHY is dual speed or not. */
+ hw->phy.multispeed_fiber = false;
+ if (((comp_codes_1g & TXGBE_SFF_1GBASESX_CAPABLE) &&
+ (comp_codes_10g & TXGBE_SFF_10GBASESR_CAPABLE)) ||
+ ((comp_codes_1g & TXGBE_SFF_1GBASELX_CAPABLE) &&
+ (comp_codes_10g & TXGBE_SFF_10GBASELR_CAPABLE)))
+ hw->phy.multispeed_fiber = true;
+
+ /* Determine PHY vendor */
+ if (hw->phy.type != txgbe_phy_nl) {
+ hw->phy.id = identifier;
+ err = hw->phy.read_i2c_eeprom(hw,
+ TXGBE_SFF_VENDOR_OUI_BYTE0, &oui_bytes[0]);
+ if (err != 0)
+ goto ERR_I2C;
+
+ err = hw->phy.read_i2c_eeprom(hw,
+ TXGBE_SFF_VENDOR_OUI_BYTE1, &oui_bytes[1]);
+ if (err != 0)
+ goto ERR_I2C;
+
+ err = hw->phy.read_i2c_eeprom(hw,
+ TXGBE_SFF_VENDOR_OUI_BYTE2, &oui_bytes[2]);
+ if (err != 0)
+ goto ERR_I2C;
+
+ vendor_oui = ((u32)oui_bytes[0] << 24) |
+ ((u32)oui_bytes[1] << 16) |
+ ((u32)oui_bytes[2] << 8);
+ switch (vendor_oui) {
+ case TXGBE_SFF_VENDOR_OUI_TYCO:
+ if (cable_tech & TXGBE_SFF_CABLE_DA_PASSIVE)
+ hw->phy.type = txgbe_phy_sfp_tyco_passive;
+ break;
+ case TXGBE_SFF_VENDOR_OUI_FTL:
+ if (cable_tech & TXGBE_SFF_CABLE_DA_ACTIVE)
+ hw->phy.type = txgbe_phy_sfp_ftl_active;
+ else
+ hw->phy.type = txgbe_phy_sfp_ftl;
+ break;
+ case TXGBE_SFF_VENDOR_OUI_AVAGO:
+ hw->phy.type = txgbe_phy_sfp_avago;
+ break;
+ case TXGBE_SFF_VENDOR_OUI_INTEL:
+ hw->phy.type = txgbe_phy_sfp_intel;
+ break;
+ default:
+ if (cable_tech & TXGBE_SFF_CABLE_DA_PASSIVE)
+ hw->phy.type = txgbe_phy_sfp_unknown_passive;
+ else if (cable_tech & TXGBE_SFF_CABLE_DA_ACTIVE)
+ hw->phy.type = txgbe_phy_sfp_unknown_active;
+ else
+ hw->phy.type = txgbe_phy_sfp_unknown;
+ break;
+ }
+ }
+
+ /* Allow any DA cable vendor */
+ if (cable_tech & (TXGBE_SFF_CABLE_DA_PASSIVE |
+ TXGBE_SFF_CABLE_DA_ACTIVE)) {
+ return 0;
+ }
+
+ /* Verify supported 1G SFP modules */
+ if (comp_codes_10g == 0 &&
+ !(hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1 ||
+ hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 ||
+ hw->phy.sfp_type == txgbe_sfp_type_1g_lx_core0 ||
+ hw->phy.sfp_type == txgbe_sfp_type_1g_lx_core1 ||
+ hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core1)) {
+ hw->phy.type = txgbe_phy_sfp_unsupported;
+ return TXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+
+ hw->mac.get_device_caps(hw, &enforce_sfp);
+ if (!(enforce_sfp & TXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
+ !hw->allow_unsupported_sfp &&
+ !(hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 ||
+ hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1 ||
+ hw->phy.sfp_type == txgbe_sfp_type_1g_lx_core0 ||
+ hw->phy.sfp_type == txgbe_sfp_type_1g_lx_core1 ||
+ hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == txgbe_sfp_type_1g_sx_core1)) {
+ DEBUGOUT("SFP+ module not supported\n");
+ hw->phy.type = txgbe_phy_sfp_unsupported;
+ return TXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+
+ return err;
+}
+
+/**
+ * txgbe_get_supported_phy_sfp_layer - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current SFP.
+ */
+u64 txgbe_get_supported_phy_sfp_layer(struct txgbe_hw *hw)
+{
+ u64 physical_layer = TXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u8 comp_codes_10g = 0;
+ u8 comp_codes_1g = 0;
+
+ DEBUGFUNC("txgbe_get_supported_phy_sfp_layer");
+
+ hw->phy.identify_sfp(hw);
+ if (hw->phy.sfp_type == txgbe_sfp_type_not_present)
+ return physical_layer;
+
+ switch (hw->phy.type) {
+ case txgbe_phy_sfp_tyco_passive:
+ case txgbe_phy_sfp_unknown_passive:
+ case txgbe_phy_qsfp_unknown_passive:
+ physical_layer = TXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+ break;
+ case txgbe_phy_sfp_ftl_active:
+ case txgbe_phy_sfp_unknown_active:
+ case txgbe_phy_qsfp_unknown_active:
+ physical_layer = TXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
+ break;
+ case txgbe_phy_sfp_avago:
+ case txgbe_phy_sfp_ftl:
+ case txgbe_phy_sfp_intel:
+ case txgbe_phy_sfp_unknown:
+ hw->phy.read_i2c_eeprom(hw,
+ TXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
+ hw->phy.read_i2c_eeprom(hw,
+ TXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
+ if (comp_codes_10g & TXGBE_SFF_10GBASESR_CAPABLE)
+ physical_layer = TXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ else if (comp_codes_10g & TXGBE_SFF_10GBASELR_CAPABLE)
+ physical_layer = TXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ else if (comp_codes_1g & TXGBE_SFF_1GBASET_CAPABLE)
+ physical_layer = TXGBE_PHYSICAL_LAYER_1000BASE_T;
+ else if (comp_codes_1g & TXGBE_SFF_1GBASESX_CAPABLE)
+ physical_layer = TXGBE_PHYSICAL_LAYER_1000BASE_SX;
+ break;
+ case txgbe_phy_qsfp_intel:
+ case txgbe_phy_qsfp_unknown:
+ hw->phy.read_i2c_eeprom(hw,
+ TXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g);
+ if (comp_codes_10g & TXGBE_SFF_10GBASESR_CAPABLE)
+ physical_layer = TXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ else if (comp_codes_10g & TXGBE_SFF_10GBASELR_CAPABLE)
+ physical_layer = TXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ break;
+ default:
+ break;
+ }
+
+ return physical_layer;
+}
+
+/**
+ * txgbe_identify_qsfp_module - Identifies QSFP modules
+ * @hw: pointer to hardware structure
+ *
+ * Searches for and identifies the QSFP module and assigns appropriate PHY type
+ **/
+s32 txgbe_identify_qsfp_module(struct txgbe_hw *hw)
+{
+ s32 err = TXGBE_ERR_PHY_ADDR_INVALID;
+ u32 vendor_oui = 0;
+ enum txgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
+ u8 identifier = 0;
+ u8 comp_codes_1g = 0;
+ u8 comp_codes_10g = 0;
+ u8 oui_bytes[3] = {0, 0, 0};
+ u16 enforce_sfp = 0;
+ u8 connector = 0;
+ u8 cable_length = 0;
+ u8 device_tech = 0;
+ bool active_cable = false;
+
+ DEBUGFUNC("txgbe_identify_qsfp_module");
+
+ if (hw->phy.media_type != txgbe_media_type_fiber_qsfp) {
+ hw->phy.sfp_type = txgbe_sfp_type_not_present;
+ err = TXGBE_ERR_SFP_NOT_PRESENT;
+ goto out;
+ }
+
+ err = hw->phy.read_i2c_eeprom(hw, TXGBE_SFF_IDENTIFIER,
+ &identifier);
+ERR_I2C:
+ if (err != 0) {
+ hw->phy.sfp_type = txgbe_sfp_type_not_present;
+ hw->phy.id = 0;
+ hw->phy.type = txgbe_phy_unknown;
+ return TXGBE_ERR_SFP_NOT_PRESENT;
+ }
+ if (identifier != TXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
+ hw->phy.type = txgbe_phy_sfp_unsupported;
+ err = TXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+
+ hw->phy.id = identifier;
+
+ err = hw->phy.read_i2c_eeprom(hw, TXGBE_SFF_QSFP_10GBE_COMP,
+ &comp_codes_10g);
+
+ if (err != 0)
+ goto ERR_I2C;
+
+ err = hw->phy.read_i2c_eeprom(hw, TXGBE_SFF_QSFP_1GBE_COMP,
+ &comp_codes_1g);
+
+ if (err != 0)
+ goto ERR_I2C;
+
+ if (comp_codes_10g & TXGBE_SFF_QSFP_DA_PASSIVE_CABLE) {
+ hw->phy.type = txgbe_phy_qsfp_unknown_passive;
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type = txgbe_sfp_type_da_cu_core0;
+ else
+ hw->phy.sfp_type = txgbe_sfp_type_da_cu_core1;
+ } else if (comp_codes_10g & (TXGBE_SFF_10GBASESR_CAPABLE |
+ TXGBE_SFF_10GBASELR_CAPABLE)) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type = txgbe_sfp_type_srlr_core0;
+ else
+ hw->phy.sfp_type = txgbe_sfp_type_srlr_core1;
+ } else {
+ if (comp_codes_10g & TXGBE_SFF_QSFP_DA_ACTIVE_CABLE)
+ active_cable = true;
+
+ if (!active_cable) {
+ hw->phy.read_i2c_eeprom(hw,
+ TXGBE_SFF_QSFP_CONNECTOR,
+ &connector);
+
+ hw->phy.read_i2c_eeprom(hw,
+ TXGBE_SFF_QSFP_CABLE_LENGTH,
+ &cable_length);
+
+ hw->phy.read_i2c_eeprom(hw,
+ TXGBE_SFF_QSFP_DEVICE_TECH,
+ &device_tech);
+
+ if ((connector ==
+ TXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) &&
+ (cable_length > 0) &&
+ ((device_tech >> 4) ==
+ TXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL))
+ active_cable = true;
+ }
+
+ if (active_cable) {
+ hw->phy.type = txgbe_phy_qsfp_unknown_active;
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ txgbe_sfp_type_da_act_lmt_core0;
+ else
+ hw->phy.sfp_type =
+ txgbe_sfp_type_da_act_lmt_core1;
+ } else {
+ /* unsupported module type */
+ hw->phy.type = txgbe_phy_sfp_unsupported;
+ err = TXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+ }
+
+ if (hw->phy.sfp_type != stored_sfp_type)
+ hw->phy.sfp_setup_needed = true;
+
+ /* Determine if the QSFP+ PHY is dual speed or not. */
+ hw->phy.multispeed_fiber = false;
+ if (((comp_codes_1g & TXGBE_SFF_1GBASESX_CAPABLE) &&
+ (comp_codes_10g & TXGBE_SFF_10GBASESR_CAPABLE)) ||
+ ((comp_codes_1g & TXGBE_SFF_1GBASELX_CAPABLE) &&
+ (comp_codes_10g & TXGBE_SFF_10GBASELR_CAPABLE)))
+ hw->phy.multispeed_fiber = true;
+
+ /* Determine PHY vendor for optical modules */
+ if (comp_codes_10g & (TXGBE_SFF_10GBASESR_CAPABLE |
+ TXGBE_SFF_10GBASELR_CAPABLE)) {
+ err = hw->phy.read_i2c_eeprom(hw,
+ TXGBE_SFF_QSFP_VENDOR_OUI_BYTE0,
+ &oui_bytes[0]);
+
+ if (err != 0)
+ goto ERR_I2C;
+
+ err = hw->phy.read_i2c_eeprom(hw,
+ TXGBE_SFF_QSFP_VENDOR_OUI_BYTE1,
+ &oui_bytes[1]);
+
+ if (err != 0)
+ goto ERR_I2C;
+
+ err = hw->phy.read_i2c_eeprom(hw,
+ TXGBE_SFF_QSFP_VENDOR_OUI_BYTE2,
+ &oui_bytes[2]);
+
+ if (err != 0)
+ goto ERR_I2C;
+
+ vendor_oui =
+ ((oui_bytes[0] << 24) |
+ (oui_bytes[1] << 16) |
+ (oui_bytes[2] << 8));
+
+ if (vendor_oui == TXGBE_SFF_VENDOR_OUI_INTEL)
+ hw->phy.type = txgbe_phy_qsfp_intel;
+ else
+ hw->phy.type = txgbe_phy_qsfp_unknown;
+
+ hw->mac.get_device_caps(hw, &enforce_sfp);
+ if (!(enforce_sfp & TXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
+ /* Make sure we're a supported PHY type */
+ if (hw->phy.type == txgbe_phy_qsfp_intel) {
+ err = 0;
+ } else {
+ if (hw->allow_unsupported_sfp == true) {
+ DEBUGOUT(
+ "WARNING: Wangxun (R) Network Connections are quality tested using Wangxun (R) Ethernet Optics. "
+ "Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. "
+ "Wangxun Corporation is not responsible for any harm caused by using untested modules.\n");
+ err = 0;
+ } else {
+ DEBUGOUT("QSFP module not supported\n");
+ hw->phy.type =
+ txgbe_phy_sfp_unsupported;
+ err = TXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+ }
+ } else {
+ err = 0;
+ }
+ }
+
+out:
+ return err;
+}
+
+/**
+ * txgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
+ * @hw: pointer to hardware structure
+ * @list_offset: offset to the SFP ID list
+ * @data_offset: offset to the SFP data block
+ *
+ * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if
+ * so it returns the offsets to the phy init sequence block.
+ **/
+s32 txgbe_get_sfp_init_sequence_offsets(struct txgbe_hw *hw,
+ u16 *list_offset,
+ u16 *data_offset)
+{
+ u16 sfp_id;
+ u16 sfp_type = hw->phy.sfp_type;
+
+ DEBUGFUNC("txgbe_get_sfp_init_sequence_offsets");
+
+ if (hw->phy.sfp_type == txgbe_sfp_type_unknown)
+ return TXGBE_ERR_SFP_NOT_SUPPORTED;
+
+ if (hw->phy.sfp_type == txgbe_sfp_type_not_present)
+ return TXGBE_ERR_SFP_NOT_PRESENT;
+
+ /*
+ * Limiting active cables and 1G Phys must be initialized as
+ * SR modules
+ */
+ if (sfp_type == txgbe_sfp_type_da_act_lmt_core0 ||
+ sfp_type == txgbe_sfp_type_1g_lx_core0 ||
+ sfp_type == txgbe_sfp_type_1g_cu_core0 ||
+ sfp_type == txgbe_sfp_type_1g_sx_core0)
+ sfp_type = txgbe_sfp_type_srlr_core0;
+ else if (sfp_type == txgbe_sfp_type_da_act_lmt_core1 ||
+ sfp_type == txgbe_sfp_type_1g_lx_core1 ||
+ sfp_type == txgbe_sfp_type_1g_cu_core1 ||
+ sfp_type == txgbe_sfp_type_1g_sx_core1)
+ sfp_type = txgbe_sfp_type_srlr_core1;
+
+ /* Read offset to PHY init contents */
+ if (hw->rom.read16(hw, TXGBE_EE_PHY_INIT_OFFSET_NL, list_offset)) {
+ DEBUGOUT("eeprom read at offset %d failed",
+ TXGBE_EE_PHY_INIT_OFFSET_NL);
+ return TXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
+ }
+
+ if ((!*list_offset) || (*list_offset == 0xFFFF))
+ return TXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
+
+ /* Shift offset to first ID word */
+ (*list_offset)++;
+
+ /*
+ * Find the matching SFP ID in the EEPROM
+ * and program the init sequence
+ */
+ if (hw->rom.read16(hw, *list_offset, &sfp_id))
+ goto err_phy;
+
+ while (sfp_id != TXGBE_EE_PHY_INIT_END_NL) {
+ if (sfp_id == sfp_type) {
+ (*list_offset)++;
+ if (hw->rom.read16(hw, *list_offset, data_offset))
+ goto err_phy;
+ if ((!*data_offset) || (*data_offset == 0xFFFF)) {
+ DEBUGOUT("SFP+ module not supported\n");
+ return TXGBE_ERR_SFP_NOT_SUPPORTED;
+ } else {
+ break;
+ }
+ } else {
+ (*list_offset) += 2;
+ if (hw->rom.read16(hw, *list_offset, &sfp_id))
+ goto err_phy;
+ }
+ }
+
+ if (sfp_id == TXGBE_EE_PHY_INIT_END_NL) {
+ DEBUGOUT("No matching SFP+ module found\n");
+ return TXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+
+ return 0;
+
+err_phy:
+ DEBUGOUT("eeprom read at offset %d failed", *list_offset);
+ return TXGBE_ERR_PHY;
+}
+
+/**
+ * txgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to read
+ * @eeprom_data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 txgbe_read_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data)
+{
+ DEBUGFUNC("txgbe_read_i2c_eeprom");
+
+ return hw->phy.read_i2c_byte(hw, byte_offset,
+ TXGBE_I2C_EEPROM_DEV_ADDR,
+ eeprom_data);
+}
+
+/**
+ * txgbe_read_i2c_sff8472 - Reads 8 bit word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset at address 0xA2
+ * @sff8472_data: value read
+ *
+ * Performs byte read operation to SFP module's SFF-8472 data over I2C
+ **/
+s32 txgbe_read_i2c_sff8472(struct txgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data)
+{
+ return hw->phy.read_i2c_byte(hw, byte_offset,
+ TXGBE_I2C_EEPROM_DEV_ADDR2,
+ sff8472_data);
+}
+
+/**
+ * txgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to write
+ * @eeprom_data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 txgbe_write_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset,
+ u8 eeprom_data)
+{
+ DEBUGFUNC("txgbe_write_i2c_eeprom");
+
+ return hw->phy.write_i2c_byte(hw, byte_offset,
+ TXGBE_I2C_EEPROM_DEV_ADDR,
+ eeprom_data);
+}
+
+/**
+ * txgbe_read_i2c_byte_unlocked - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @dev_addr: address to read from
+ * @data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+s32 txgbe_read_i2c_byte_unlocked(struct txgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ UNREFERENCED_PARAMETER(dev_addr);
+
+ DEBUGFUNC("txgbe_read_i2c_byte");
+
+ txgbe_i2c_start(hw);
+
+ /* wait tx empty */
+ if (!po32m(hw, TXGBE_I2CICR, TXGBE_I2CICR_TXEMPTY,
+ TXGBE_I2CICR_TXEMPTY, NULL, 100, 100)) {
+ return -TERR_TIMEOUT;
+ }
+
+ /* read data */
+ wr32(hw, TXGBE_I2CDATA,
+ byte_offset | TXGBE_I2CDATA_STOP);
+ wr32(hw, TXGBE_I2CDATA, TXGBE_I2CDATA_READ);
+
+ /* wait for read complete */
+ if (!po32m(hw, TXGBE_I2CICR, TXGBE_I2CICR_RXFULL,
+ TXGBE_I2CICR_RXFULL, NULL, 100, 100)) {
+ return -TERR_TIMEOUT;
+ }
+
+ txgbe_i2c_stop(hw);
+
+ *data = 0xFF & rd32(hw, TXGBE_I2CDATA);
+
+ return 0;
+}
+
+/**
+ * txgbe_read_i2c_byte - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @dev_addr: address to read from
+ * @data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+s32 txgbe_read_i2c_byte(struct txgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ int err = 0;
+
+ if (hw->mac.acquire_swfw_sync(hw, swfw_mask))
+ return TXGBE_ERR_SWFW_SYNC;
+ err = txgbe_read_i2c_byte_unlocked(hw, byte_offset, dev_addr, data);
+ hw->mac.release_swfw_sync(hw, swfw_mask);
+ return err;
+}
+
+/**
+ * txgbe_write_i2c_byte_unlocked - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: address to write to
+ * @data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+s32 txgbe_write_i2c_byte_unlocked(struct txgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ UNREFERENCED_PARAMETER(dev_addr);
+
+ DEBUGFUNC("txgbe_write_i2c_byte");
+
+ txgbe_i2c_start(hw);
+
+ /* wait tx empty */
+ if (!po32m(hw, TXGBE_I2CICR, TXGBE_I2CICR_TXEMPTY,
+ TXGBE_I2CICR_TXEMPTY, NULL, 100, 100)) {
+ return -TERR_TIMEOUT;
+ }
+
+ wr32(hw, TXGBE_I2CDATA, byte_offset | TXGBE_I2CDATA_STOP);
+ wr32(hw, TXGBE_I2CDATA, data | TXGBE_I2CDATA_WRITE);
+
+ /* wait for write complete */
+ if (!po32m(hw, TXGBE_I2CICR, TXGBE_I2CICR_RXFULL,
+ TXGBE_I2CICR_RXFULL, NULL, 100, 100)) {
+ return -TERR_TIMEOUT;
+ }
+ txgbe_i2c_stop(hw);
+
+ return 0;
+}
+
+/**
+ * txgbe_write_i2c_byte - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: address to write to
+ * @data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+s32 txgbe_write_i2c_byte(struct txgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ int err = 0;
+
+ if (hw->mac.acquire_swfw_sync(hw, swfw_mask))
+ return TXGBE_ERR_SWFW_SYNC;
+ err = txgbe_write_i2c_byte_unlocked(hw, byte_offset, dev_addr, data);
+ hw->mac.release_swfw_sync(hw, swfw_mask);
+
+ return err;
+}
+
+/**
+ * txgbe_i2c_start - Sets I2C start condition
+ * @hw: pointer to hardware structure
+ *
+ * Sets I2C start condition (High -> Low on SDA while SCL is High)
+ **/
+STATIC void txgbe_i2c_start(struct txgbe_hw *hw)
+{
+ DEBUGFUNC("txgbe_i2c_start");
+
+ wr32(hw, TXGBE_I2CENA, 0);
+
+ wr32(hw, TXGBE_I2CCON,
+ (TXGBE_I2CCON_MENA |
+ TXGBE_I2CCON_SPEED(1) |
+ TXGBE_I2CCON_RESTART |
+ TXGBE_I2CCON_SDIA));
+ wr32(hw, TXGBE_I2CTAR, TXGBE_I2C_SLAVEADDR);
+ wr32(hw, TXGBE_I2CSSSCLHCNT, 600);
+ wr32(hw, TXGBE_I2CSSSCLLCNT, 600);
+ wr32(hw, TXGBE_I2CRXTL, 0); /* 1byte for rx full signal */
+ wr32(hw, TXGBE_I2CTXTL, 4);
+ wr32(hw, TXGBE_I2CSCLTMOUT, 0xFFFFFF);
+ wr32(hw, TXGBE_I2CSDATMOUT, 0xFFFFFF);
+
+ wr32(hw, TXGBE_I2CICM, 0);
+ wr32(hw, TXGBE_I2CENA, 1);
+
+}
+
+/**
+ * txgbe_i2c_stop - Sets I2C stop condition
+ * @hw: pointer to hardware structure
+ *
+ * Sets I2C stop condition (Low -> High on SDA while SCL is High)
+ **/
+STATIC void txgbe_i2c_stop(struct txgbe_hw *hw)
+{
+ DEBUGFUNC("txgbe_i2c_stop");
+
+ /* wait for completion */
+ if (!po32m(hw, TXGBE_I2CSTAT, TXGBE_I2CSTAT_MST,
+ 0, NULL, 100, 100)) {
+ DEBUGFUNC("i2c stop timeout.");
+ }
+
+ wr32(hw, TXGBE_I2CENA, 0);
+}
+
+/**
+ * txgbe_tn_check_overtemp - Checks if an overtemp occurred.
+ * @hw: pointer to hardware structure
+ *
+ * Checks if the LASI temp alarm status was triggered due to overtemp
+ **/
+s32 txgbe_tn_check_overtemp(struct txgbe_hw *hw)
+{
+ s32 err = 0;
+ u16 phy_data = 0;
+
+ DEBUGFUNC("txgbe_tn_check_overtemp");
+
+ if (hw->device_id != TXGBE_DEV_ID_RAPTOR_T3_LOM)
+ goto out;
+
+ /* Check that the LASI temp alarm status was triggered */
+ hw->phy.read_reg(hw, TXGBE_TN_LASI_STATUS_REG,
+ TXGBE_MD_DEV_PMA_PMD, &phy_data);
+
+ if (!(phy_data & TXGBE_TN_LASI_STATUS_TEMP_ALARM))
+ goto out;
+
+ err = TXGBE_ERR_OVERTEMP;
+ DEBUGOUT("Device over temperature");
+out:
+ return err;
+}
+
+/**
+ * txgbe_set_copper_phy_power - Control power for copper phy
+ * @hw: pointer to hardware structure
+ * @on: true for on, false for off
+ */
+s32 txgbe_set_copper_phy_power(struct txgbe_hw *hw, bool on)
+{
+ u32 err;
+ u16 reg;
+
+ if (!on && txgbe_mng_present(hw))
+ return 0;
+
+ err = hw->phy.read_reg(hw, TXGBE_MD_VENDOR_SPECIFIC_1_CONTROL,
+ TXGBE_MD_DEV_VENDOR_1,
+ ®);
+ if (err)
+ return err;
+
+ if (on) {
+ reg &= ~TXGBE_MD_PHY_SET_LOW_POWER_MODE;
+ } else {
+ if (txgbe_check_reset_blocked(hw))
+ return 0;
+ reg |= TXGBE_MD_PHY_SET_LOW_POWER_MODE;
+ }
+
+ err = hw->phy.write_reg(hw, TXGBE_MD_VENDOR_SPECIFIC_1_CONTROL,
+ TXGBE_MD_DEV_VENDOR_1,
+ reg);
+ return err;
+}
+
+static s32
+txgbe_set_sgmii_an37_ability(struct txgbe_hw *hw)
+{
+ u32 value;
+
+ wr32_epcs(hw, VR_XS_OR_PCS_MMD_DIGI_CTL1, 0x3002);
+ wr32_epcs(hw, SR_MII_MMD_AN_CTL, 0x0105);
+ wr32_epcs(hw, SR_MII_MMD_DIGI_CTL, 0x0200);
+ value = rd32_epcs(hw, SR_MII_MMD_CTL);
+ value = (value & ~0x1200) | (0x1 << 12) | (0x1 << 9);
+ wr32_epcs(hw, SR_MII_MMD_CTL, value);
+ return 0;
+}
+
+static s32
+txgbe_set_link_to_kr(struct txgbe_hw *hw, bool autoneg)
+{
+ u32 i;
+ s32 err = 0;
+
+ /* 1. Wait xpcs power-up good */
+ for (i = 0; i < 100; i++) {
+ if ((rd32_epcs(hw, VR_XS_OR_PCS_MMD_DIGI_STATUS) &
+ VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK) ==
+ VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD)
+ break;
+ msleep(10);
+ }
+ if (i == 100) {
+ err = TXGBE_ERR_XPCS_POWER_UP_FAILED;
+ goto out;
+ }
+
+ if (!autoneg) {
+ /* 2. Disable xpcs AN-73 */
+ wr32_epcs(hw, SR_AN_CTRL, 0x0);
+ /* Disable PHY MPLLA for eth mode change(after ECO) */
+ wr32_ephy(hw, 0x4, 0x243A);
+ txgbe_flush(hw);
+ msleep(1);
+ /* Set the eth change_mode bit first in mis_rst register
+ * for corresponding LAN port */
+ wr32(hw, TXGBE_RST, TXGBE_RST_ETH(hw->bus.lan_id));
+
+ /* 3. Set VR_XS_PMA_Gen5_12G_MPLLA_CTRL3 Register
+ * Bit[10:0](MPLLA_BANDWIDTH) = 11'd123 (default: 11'd16)
+ */
+ wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL3,
+ TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_10GBASER_KR);
+
+ /* 4. Set VR_XS_PMA_Gen5_12G_MISC_CTRL0 Register
+ * Bit[12:8](RX_VREF_CTRL) = 5'hF (default: 5'h11)
+ */
+ wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0xCF00);
+
+ /* 5. Set VR_XS_PMA_Gen5_12G_RX_EQ_CTRL0 Register
+ * Bit[15:8](VGA1/2_GAIN_0) = 8'h77
+ * Bit[7:5](CTLE_POLE_0) = 3'h2
+ * Bit[4:0](CTLE_BOOST_0) = 4'hA
+ */
+ wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0, 0x774A);
+
+ /* 6. Set VR_MII_Gen5_12G_RX_GENCTRL3 Register
+ * Bit[2:0](LOS_TRSHLD_0) = 3'h4 (default: 3)
+ */
+ wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL3, 0x0004);
+
+ /* 7. Initialize the mode by setting VR XS or PCS MMD Digital
+ * Control1 Register Bit[15](VR_RST) */
+ wr32_epcs(hw, VR_XS_OR_PCS_MMD_DIGI_CTL1, 0xA000);
+
+ /* Wait phy initialization done */
+ for (i = 0; i < 100; i++) {
+ if ((rd32_epcs(hw,
+ VR_XS_OR_PCS_MMD_DIGI_CTL1) &
+ VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST) == 0)
+ break;
+ msleep(100);
+ }
+ if (i == 100) {
+ err = TXGBE_ERR_PHY_INIT_NOT_DONE;
+ goto out;
+ }
+ } else {
+ wr32_epcs(hw, VR_AN_KR_MODE_CL, 0x1);
+ }
+out:
+ return err;
+}
+
+static s32
+txgbe_set_link_to_kx4(struct txgbe_hw *hw, bool autoneg)
+{
+ u32 i;
+ s32 err = 0;
+ u32 value;
+
+ /* Check link status, if already set, skip setting it again */
+ if (hw->link_status == TXGBE_LINK_STATUS_KX4) {
+ goto out;
+ }
+
+ /* 1. Wait xpcs power-up good */
+ for (i = 0; i < 100; i++) {
+ if ((rd32_epcs(hw, VR_XS_OR_PCS_MMD_DIGI_STATUS) &
+ VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK) ==
+ VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD)
+ break;
+ msleep(10);
+ }
+ if (i == 100) {
+ err = TXGBE_ERR_XPCS_POWER_UP_FAILED;
+ goto out;
+ }
+
+ wr32m(hw, TXGBE_MACTXCFG, TXGBE_MACTXCFG_TE,
+ ~TXGBE_MACTXCFG_TE);
+
+ /* 2. Disable xpcs AN-73 */
+ if (!autoneg)
+ wr32_epcs(hw, SR_AN_CTRL, 0x0);
+ else
+ wr32_epcs(hw, SR_AN_CTRL, 0x3000);
+
+ /* Disable PHY MPLLA for eth mode change(after ECO) */
+ wr32_ephy(hw, 0x4, 0x250A);
+ txgbe_flush(hw);
+ msleep(1);
+
+ /* Set the eth change_mode bit first in mis_rst register
+ * for corresponding LAN port */
+ wr32(hw, TXGBE_RST, TXGBE_RST_ETH(hw->bus.lan_id));
+
+ /* Set SR PCS Control2 Register Bits[1:0] = 2'b01
+ * PCS_TYPE_SEL: non KR
+ */
+ wr32_epcs(hw, SR_XS_PCS_CTRL2,
+ SR_PCS_CTRL2_TYPE_SEL_X);
+
+ /* Set SR PMA MMD Control1 Register Bit[13] = 1'b1
+ * SS13: 10G speed
+ */
+ wr32_epcs(hw, SR_PMA_CTRL1,
+ SR_PMA_CTRL1_SS13_KX4);
+
+ value = (0xf5f0 & ~0x7F0) | (0x5 << 8) | (0x7 << 5) | 0x10;
+ wr32_epcs(hw, TXGBE_PHY_TX_GENCTRL1, value);
+
+ wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0x4F00);
+
+ value = (0x1804 & ~0x3F3F);
+ wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value);
+
+ value = (0x50 & ~0x7F) | 40 | (1 << 6);
+ wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value);
+
+ for (i = 0; i < 4; i++) {
+ if (i == 0)
+ value = (0x45 & ~0xFFFF) | (0x7 << 12) | (0x7 << 8) | 0x6;
+ else
+ value = (0xff06 & ~0xFFFF) | (0x7 << 12) | (0x7 << 8) | 0x6;
+ wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0 + i, value);
+ }
+
+ value = 0x0 & ~0x7777;
+ wr32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0, value);
+
+ wr32_epcs(hw, TXGBE_PHY_DFE_TAP_CTL0, 0x0);
+
+ value = (0x6db & ~0xFFF) | (0x1 << 9) | (0x1 << 6) | (0x1 << 3) | 0x1;
+ wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL3, value);
+
+ /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY MPLLA
+ * Control 0 Register Bit[7:0] = 8'd40 //MPLLA_MULTIPLIER
+ */
+ wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL0,
+ TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_OTHER);
+
+ /* Set VR XS, PMA or MII Synopsys Enterprise Gen5 12G PHY MPLLA
+ * Control 3 Register Bit[10:0] = 11'd86 //MPLLA_BANDWIDTH
+ */
+ wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL3,
+ TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_OTHER);
+
+ /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO
+ * Calibration Load 0 Register Bit[12:0] = 13'd1360 //VCO_LD_VAL_0
+ */
+ wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD0,
+ TXGBE_PHY_VCO_CAL_LD0_OTHER);
+
+ /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO
+ * Calibration Load 1 Register Bit[12:0] = 13'd1360 //VCO_LD_VAL_1
+ */
+ wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD1,
+ TXGBE_PHY_VCO_CAL_LD0_OTHER);
+
+ /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO
+ * Calibration Load 2 Register Bit[12:0] = 13'd1360 //VCO_LD_VAL_2
+ */
+ wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD2,
+ TXGBE_PHY_VCO_CAL_LD0_OTHER);
+ /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO
+ * Calibration Load 3 Register Bit[12:0] = 13'd1360 //VCO_LD_VAL_3
+ */
+ wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD3,
+ TXGBE_PHY_VCO_CAL_LD0_OTHER);
+ /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO
+ * Calibration Reference 0 Register Bit[5:0] = 6'd34 //VCO_REF_LD_0/1
+ */
+ wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF0, 0x2222);
+
+ /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO
+ * Calibration Reference 1 Register Bit[5:0] = 6'd34 //VCO_REF_LD_2/3
+ */
+ wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF1, 0x2222);
+
+ /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY AFE-DFE
+ * Enable Register Bit[7:0] = 8'd0 //AFE_EN_0/3_1, DFE_EN_0/3_1
+ */
+ wr32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE, 0x0);
+
+ /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx
+ * Equalization Control 4 Register Bit[3:0] = 4'd0 //CONT_ADAPT_0/3_1
+ */
+ wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, 0x00F0);
+
+ /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Tx Rate
+ * Control Register Bit[14:12], Bit[10:8], Bit[6:4], Bit[2:0],
+ * all rates to 3'b010 //TX0/1/2/3_RATE
+ */
+ wr32_epcs(hw, TXGBE_PHY_TX_RATE_CTL, 0x2222);
+
+ /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx Rate
+ * Control Register Bit[13:12], Bit[9:8], Bit[5:4], Bit[1:0],
+ * all rates to 2'b10 //RX0/1/2/3_RATE
+ */
+ wr32_epcs(hw, TXGBE_PHY_RX_RATE_CTL, 0x2222);
+
+ /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Tx General
+ * Control 2 Register Bit[15:8] = 2'b01 //TX0/1/2/3_WIDTH: 10bits
+ */
+ wr32_epcs(hw, TXGBE_PHY_TX_GEN_CTL2, 0x5500);
+
+ /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx General
+ * Control 2 Register Bit[15:8] = 2'b01 //RX0/1/2/3_WIDTH: 10bits
+ */
+ wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL2, 0x5500);
+
+ /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY MPLLA Control
+ * 2 Register Bit[10:8] = 3'b010
+ * MPLLA_DIV16P5_CLK_EN=0, MPLLA_DIV10_CLK_EN=1, MPLLA_DIV8_CLK_EN=0
+ */
+ wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL2,
+ TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_10);
+
+ wr32_epcs(hw, 0x1f0000, 0x0);
+ wr32_epcs(hw, 0x1f8001, 0x0);
+ wr32_epcs(hw, SR_MII_MMD_DIGI_CTL, 0x0);
+
+ /* 10. Initialize the mode by setting VR XS or PCS MMD Digital Control1
+ * Register Bit[15](VR_RST) */
+ wr32_epcs(hw, VR_XS_OR_PCS_MMD_DIGI_CTL1, 0xA000);
+
+ /* Wait phy initialization done */
+ for (i = 0; i < 100; i++) {
+ if ((rd32_epcs(hw, VR_XS_OR_PCS_MMD_DIGI_CTL1) &
+ VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST) == 0)
+ break;
+ msleep(100);
+ }
+
+ /* If success, set link status */
+ hw->link_status = TXGBE_LINK_STATUS_KX4;
+
+ if (i == 100) {
+ err = TXGBE_ERR_PHY_INIT_NOT_DONE;
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+static s32
+txgbe_set_link_to_kx(struct txgbe_hw *hw,
+ u32 speed,
+ bool autoneg)
+{
+ u32 i;
+ s32 err = 0;
+ u32 wdata = 0;
+ u32 value;
+
+ /* Check link status, if already set, skip setting it again */
+ if (hw->link_status == TXGBE_LINK_STATUS_KX) {
+ goto out;
+ }
+
+ /* 1. Wait xpcs power-up good */
+ for (i = 0; i < 100; i++) {
+ if ((rd32_epcs(hw, VR_XS_OR_PCS_MMD_DIGI_STATUS) &
+ VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK) ==
+ VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD)
+ break;
+ msleep(10);
+ }
+ if (i == 100) {
+ err = TXGBE_ERR_XPCS_POWER_UP_FAILED;
+ goto out;
+ }
+
+ wr32m(hw, TXGBE_MACTXCFG, TXGBE_MACTXCFG_TE,
+ ~TXGBE_MACTXCFG_TE);
+
+ /* 2. Disable xpcs AN-73 */
+ if (!autoneg)
+ wr32_epcs(hw, SR_AN_CTRL, 0x0);
+ else
+ wr32_epcs(hw, SR_AN_CTRL, 0x3000);
+
+ /* Disable PHY MPLLA for eth mode change(after ECO) */
+ wr32_ephy(hw, 0x4, 0x240A);
+ txgbe_flush(hw);
+ msleep(1);
+
+ /* Set the eth change_mode bit first in mis_rst register
+ * for corresponding LAN port */
+ wr32(hw, TXGBE_RST, TXGBE_RST_ETH(hw->bus.lan_id));
+
+ /* Set SR PCS Control2 Register Bits[1:0] = 2'b01
+ * PCS_TYPE_SEL: non KR
+ */
+ wr32_epcs(hw, SR_XS_PCS_CTRL2,
+ SR_PCS_CTRL2_TYPE_SEL_X);
+
+ /* Set SR PMA MMD Control1 Register Bit[13] = 1'b0
+ * SS13: 1G speed
+ */
+ wr32_epcs(hw, SR_PMA_CTRL1,
+ SR_PMA_CTRL1_SS13_KX);
+
+ /* Set SR MII MMD Control Register to corresponding speed: {Bit[6],
+ * Bit[13]}=[2'b00,2'b01,2'b10]->[10M,100M,1G]
+ */
+ if (speed == TXGBE_LINK_SPEED_100M_FULL)
+ wdata = 0x2100;
+ else if (speed == TXGBE_LINK_SPEED_1GB_FULL)
+ wdata = 0x0140;
+ else if (speed == TXGBE_LINK_SPEED_10M_FULL)
+ wdata = 0x0100;
+ wr32_epcs(hw, SR_MII_MMD_CTL,
+ wdata);
+
+ value = (0xf5f0 & ~0x710) | (0x5 << 8);
+ wr32_epcs(hw, TXGBE_PHY_TX_GENCTRL1, value);
+
+ wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0x4F00);
+
+ value = (0x1804 & ~0x3F3F) | (24 << 8) | 4;
+ wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value);
+
+ value = (0x50 & ~0x7F) | 16 | (1 << 6);
+ wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value);
+
+ for (i = 0; i < 4; i++) {
+ if (i) {
+ value = 0xff06;
+ } else {
+ value = (0x45 & ~0xFFFF) | (0x7 << 12) |
+ (0x7 << 8) | 0x6;
+ }
+ wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0 + i, value);
+ }
+
+ value = 0x0 & ~0x7;
+ wr32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0, value);
+
+ wr32_epcs(hw, TXGBE_PHY_DFE_TAP_CTL0, 0x0);
+
+ value = (0x6db & ~0x7) | 0x4;
+ wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL3, value);
+
+ /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY MPLLA Control
+ * 0 Register Bit[7:0] = 8'd32 //MPLLA_MULTIPLIER
+ */
+ wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL0,
+ TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_1GBASEX_KX);
+
+ /* Set VR XS, PMA or MII Synopsys Enterprise Gen5 12G PHY MPLLA Control
+ * 3 Register Bit[10:0] = 11'd70 //MPLLA_BANDWIDTH
+ */
+ wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL3,
+ TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_1GBASEX_KX);
+
+ /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO
+ * Calibration Load 0 Register Bit[12:0] = 13'd1344 //VCO_LD_VAL_0
+ */
+ wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD0,
+ TXGBE_PHY_VCO_CAL_LD0_1GBASEX_KX);
+
+ wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD1, 0x549);
+ wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD2, 0x549);
+ wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD3, 0x549);
+
+ /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY VCO
+ * Calibration Reference 0 Register Bit[5:0] = 6'd42 //VCO_REF_LD_0
+ */
+ wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF0,
+ TXGBE_PHY_VCO_CAL_REF0_LD0_1GBASEX_KX);
+
+ wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF1, 0x2929);
+
+ /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY AFE-DFE
+ * Enable Register Bit[4], Bit[0] = 1'b0 //AFE_EN_0, DFE_EN_0
+ */
+ wr32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE,
+ 0x0);
+ /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx
+ * Equalization Control 4 Register Bit[0] = 1'b0 //CONT_ADAPT_0
+ */
+ wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL,
+ 0x0010);
+ /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Tx Rate
+ * Control Register Bit[2:0] = 3'b011 //TX0_RATE
+ */
+ wr32_epcs(hw, TXGBE_PHY_TX_RATE_CTL,
+ TXGBE_PHY_TX_RATE_CTL_TX0_RATE_1GBASEX_KX);
+
+ /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx Rate
+ * Control Register Bit[2:0] = 3'b011 //RX0_RATE
+ */
+ wr32_epcs(hw, TXGBE_PHY_RX_RATE_CTL,
+ TXGBE_PHY_RX_RATE_CTL_RX0_RATE_1GBASEX_KX);
+
+ /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Tx General
+ * Control 2 Register Bit[9:8] = 2'b01 //TX0_WIDTH: 10bits
+ */
+ wr32_epcs(hw, TXGBE_PHY_TX_GEN_CTL2,
+ TXGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_OTHER);
+ /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY Rx General
+ * Control 2 Register Bit[9:8] = 2'b01 //RX0_WIDTH: 10bits
+ */
+ wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL2,
+ TXGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_OTHER);
+ /* Set VR XS, PMA, or MII Synopsys Enterprise Gen5 12G PHY MPLLA Control
+ * 2 Register Bit[10:8] = 3'b010 //MPLLA_DIV16P5_CLK_EN=0,
+ * MPLLA_DIV10_CLK_EN=1, MPLLA_DIV8_CLK_EN=0
+ */
+ wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL2,
+ TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_10);
+
+ /* VR MII MMD AN Control Register Bit[8] = 1'b1 //MII_CTRL
+ * Set to 8bit MII (required in 10M/100M SGMII)
+ */
+ wr32_epcs(hw, SR_MII_MMD_AN_CTL,
+ 0x0100);
+
+ /* 10. Initialize the mode by setting VR XS or PCS MMD Digital Control1
+ * Register Bit[15](VR_RST)
+ */
+ wr32_epcs(hw, VR_XS_OR_PCS_MMD_DIGI_CTL1, 0xA000);
+
+ /* Wait phy initialization done */
+ for (i = 0; i < 100; i++) {
+ if ((rd32_epcs(hw, VR_XS_OR_PCS_MMD_DIGI_CTL1) &
+ VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST) == 0)
+ break;
+ msleep(100);
+ }
+
+ /* If success, set link status */
+ hw->link_status = TXGBE_LINK_STATUS_KX;
+
+ if (i == 100) {
+ err = TXGBE_ERR_PHY_INIT_NOT_DONE;
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+static s32
+txgbe_set_link_to_sfi(struct txgbe_hw *hw,
+ u32 speed)
+{
+ u32 i;
+ s32 err = 0;
+ u32 value = 0;
+
+ /* Set the module link speed */
+ hw->mac.set_rate_select_speed(hw, speed);
+ /* 1. Wait xpcs power-up good */
+ for (i = 0; i < 100; i++) {
+ if ((rd32_epcs(hw, VR_XS_OR_PCS_MMD_DIGI_STATUS) &
+ VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK) ==
+ VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD)
+ break;
+ msleep(10);
+ }
+ if (i == 100) {
+ err = TXGBE_ERR_XPCS_POWER_UP_FAILED;
+ goto out;
+ }
+
+ wr32m(hw, TXGBE_MACTXCFG, TXGBE_MACTXCFG_TE,
+ ~TXGBE_MACTXCFG_TE);
+
+ /* 2. Disable xpcs AN-73 */
+ wr32_epcs(hw, SR_AN_CTRL, 0x0);
+
+ /* Disable PHY MPLLA for eth mode change(after ECO) */
+ wr32_ephy(hw, 0x4, 0x243A);
+ txgbe_flush(hw);
+ msleep(1);
+ /* Set the eth change_mode bit first in mis_rst register
+ * for corresponding LAN port */
+ wr32(hw, TXGBE_RST, TXGBE_RST_ETH(hw->bus.lan_id));
+
+ if (speed == TXGBE_LINK_SPEED_10GB_FULL) {
+ /* Set SR PCS Control2 Register Bits[1:0] = 2'b00
+ * PCS_TYPE_SEL: KR
+ */
+ wr32_epcs(hw, SR_XS_PCS_CTRL2, 0);
+ value = rd32_epcs(hw, SR_PMA_CTRL1);
+ value = value | 0x2000;
+ wr32_epcs(hw, SR_PMA_CTRL1, value);
+ /* Set VR_XS_PMA_Gen5_12G_MPLLA_CTRL0 Register Bit[7:0] = 8'd33
+ * MPLLA_MULTIPLIER
+ */
+ wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL0, 0x0021);
+ /* 3. Set VR_XS_PMA_Gen5_12G_MPLLA_CTRL3 Register
+ * Bit[10:0](MPLLA_BANDWIDTH) = 11'd0
+ */
+ wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL3, 0);
+ value = rd32_epcs(hw, TXGBE_PHY_TX_GENCTRL1);
+ value = (value & ~0x700) | 0x500;
+ wr32_epcs(hw, TXGBE_PHY_TX_GENCTRL1, value);
+ /* 4. Set VR_XS_PMA_Gen5_12G_MISC_CTRL0 Register
+ * Bit[12:8](RX_VREF_CTRL) = 5'hF
+ */
+ wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0xCF00);
+ /* Set VR_XS_PMA_Gen5_12G_VCO_CAL_LD0 Register
+ * Bit[12:0] = 13'd1353 //VCO_LD_VAL_0
+ */
+ wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD0, 0x0549);
+ /* Set VR_XS_PMA_Gen5_12G_VCO_CAL_REF0 Register
+ * Bit[5:0] = 6'd41 //VCO_REF_LD_0
+ */
+ wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF0, 0x0029);
+ /* Set VR_XS_PMA_Gen5_12G_TX_RATE_CTRL Register
+ * Bit[2:0] = 3'b000 //TX0_RATE
+ */
+ wr32_epcs(hw, TXGBE_PHY_TX_RATE_CTL, 0);
+ /* Set VR_XS_PMA_Gen5_12G_RX_RATE_CTRL Register
+ * Bit[2:0] = 3'b000 //RX0_RATE
+ */
+ wr32_epcs(hw, TXGBE_PHY_RX_RATE_CTL, 0);
+ /* Set VR_XS_PMA_Gen5_12G_TX_GENCTRL2 Register Bit[9:8] = 2'b11
+ * TX0_WIDTH: 20bits
+ */
+ wr32_epcs(hw, TXGBE_PHY_TX_GEN_CTL2, 0x0300);
+ /* Set VR_XS_PMA_Gen5_12G_RX_GENCTRL2 Register Bit[9:8] = 2'b11
+ * RX0_WIDTH: 20bits
+ */
+ wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL2, 0x0300);
+ /* Set VR_XS_PMA_Gen5_12G_MPLLA_CTRL2 Register
+ * Bit[10:8] = 3'b110
+ * MPLLA_DIV16P5_CLK_EN=1
+ * MPLLA_DIV10_CLK_EN=1
+ * MPLLA_DIV8_CLK_EN=0
+ */
+ wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL2, 0x0600);
+ /* 5. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL0 Register
+ * Bit[13:8](TX_EQ_MAIN) = 6'd30, Bit[5:0](TX_EQ_PRE) = 6'd4
+ */
+ value = rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0);
+ value = (value & ~0x3F3F) | (24 << 8) | 4;
+ wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value);
+ /* 6. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL1 Register
+ * Bit[6](TX_EQ_OVR_RIDE) = 1'b1, Bit[5:0](TX_EQ_POST) = 6'd36
+ */
+ value = rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1);
+ value = (value & ~0x7F) | 16 | (1 << 6);
+ wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value);
+ if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 ||
+ hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1) {
+ /* 7. Set VR_XS_PMA_Gen5_12G_RX_EQ_CTRL0 Register
+ * Bit[15:8](VGA1/2_GAIN_0) = 8'h77
+ * Bit[7:5](CTLE_POLE_0) = 3'h2
+ * Bit[4:0](CTLE_BOOST_0) = 4'hF
+ */
+ wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0, 0x774F);
+
+ } else {
+ /* 7. Set VR_XS_PMA_Gen5_12G_RX_EQ_CTRL0 Register
+ * Bit[15:8](VGA1/2_GAIN_0) = 8'h00
+ * Bit[7:5](CTLE_POLE_0) = 3'h2
+ * Bit[4:0](CTLE_BOOST_0) = 4'hA
+ */
+ value = rd32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0);
+ value = (value & ~0xFFFF) | (2 << 5) | 0x05;
+ wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0, value);
+ }
+ value = rd32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0);
+ value = (value & ~0x7) | 0x0;
+ wr32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0, value);
+
+ if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 ||
+ hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1) {
+ /* 8. Set VR_XS_PMA_Gen5_12G_DFE_TAP_CTRL0 Register
+ * Bit[7:0](DFE_TAP1_0) = 8'd20 */
+ wr32_epcs(hw, TXGBE_PHY_DFE_TAP_CTL0, 0x0014);
+ value = rd32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE);
+ value = (value & ~0x11) | 0x11;
+ wr32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE, value);
+ } else {
+ /* 8. Set VR_XS_PMA_Gen5_12G_DFE_TAP_CTRL0 Register
+ * Bit[7:0](DFE_TAP1_0) = 8'd20 */
+ wr32_epcs(hw, TXGBE_PHY_DFE_TAP_CTL0, 0xBE);
+ /* 9. Set VR_MII_Gen5_12G_AFE_DFE_EN_CTRL Register
+ * Bit[4](DFE_EN_0) = 1'b0, Bit[0](AFE_EN_0) = 1'b0 */
+ value = rd32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE);
+ value = (value & ~0x11) | 0x0;
+ wr32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE, value);
+ }
+ value = rd32_epcs(hw, TXGBE_PHY_RX_EQ_CTL);
+ value = value & ~0x1;
+ wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, value);
+ } else {
+ /* Set SR PCS Control2 Register Bits[1:0] = 2'b00
+ * PCS_TYPE_SEL: KR
+ */
+ wr32_epcs(hw, SR_XS_PCS_CTRL2, 0x1);
+ /* Set SR PMA MMD Control1 Register Bit[13] = 1'b0
+ * SS13: 1G speed
+ */
+ wr32_epcs(hw, SR_PMA_CTRL1, 0x0000);
+ /* Set SR MII MMD Control Register to corresponding speed */
+ wr32_epcs(hw, SR_MII_MMD_CTL, 0x0140);
+
+ value = rd32_epcs(hw, TXGBE_PHY_TX_GENCTRL1);
+ value = (value & ~0x710) | 0x500;
+ wr32_epcs(hw, TXGBE_PHY_TX_GENCTRL1, value);
+ /* 4. Set VR_XS_PMA_Gen5_12G_MISC_CTRL0 Register
+ * Bit[12:8](RX_VREF_CTRL) = 5'hF */
+ wr32_epcs(hw, TXGBE_PHY_MISC_CTL0, 0xCF00);
+ /* 5. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL0 Register
+ * Bit[13:8](TX_EQ_MAIN) = 6'd30, Bit[5:0](TX_EQ_PRE) = 6'd4 */
+ value = rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0);
+ value = (value & ~0x3F3F) | (24 << 8) | 4;
+ wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value);
+ /* 6. Set VR_XS_PMA_Gen5_12G_TX_EQ_CTRL1 Register Bit[6]
+ * (TX_EQ_OVR_RIDE) = 1'b1, Bit[5:0](TX_EQ_POST) = 6'd36 */
+ value = rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1);
+ value = (value & ~0x7F) | 16 | (1 << 6);
+ wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value);
+ if (hw->phy.sfp_type == txgbe_sfp_type_da_cu_core0 ||
+ hw->phy.sfp_type == txgbe_sfp_type_da_cu_core1) {
+ wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0, 0x774F);
+ } else {
+ /* 7. Set VR_XS_PMA_Gen5_12G_RX_EQ_CTRL0 Register
+ * Bit[15:8](VGA1/2_GAIN_0) = 8'h00
+ * Bit[7:5](CTLE_POLE_0) = 3'h2
+ * Bit[4:0](CTLE_BOOST_0) = 4'hA
+ */
+ value = rd32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0);
+ value = (value & ~0xFFFF) | 0x7706;
+ wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL0, value);
+ }
+ value = rd32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0);
+ value = (value & ~0x7) | 0x0;
+ wr32_epcs(hw, TXGBE_PHY_RX_EQ_ATT_LVL0, value);
+ /* 8. Set VR_XS_PMA_Gen5_12G_DFE_TAP_CTRL0 Register
+ * Bit[7:0](DFE_TAP1_0) = 8'd00 */
+ wr32_epcs(hw, TXGBE_PHY_DFE_TAP_CTL0, 0x0);
+ /* 9. Set VR_MII_Gen5_12G_AFE_DFE_EN_CTRL Register
+ * Bit[4](DFE_EN_0) = 1'b0, Bit[0](AFE_EN_0) = 1'b0 */
+ value = rd32_epcs(hw, TXGBE_PHY_RX_GEN_CTL3);
+ value = (value & ~0x7) | 0x4;
+ wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL3, value);
+ wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL0, 0x0020);
+ wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL3, 0x0046);
+ wr32_epcs(hw, TXGBE_PHY_VCO_CAL_LD0, 0x0540);
+ wr32_epcs(hw, TXGBE_PHY_VCO_CAL_REF0, 0x002A);
+ wr32_epcs(hw, TXGBE_PHY_AFE_DFE_ENABLE, 0x0);
+ wr32_epcs(hw, TXGBE_PHY_RX_EQ_CTL, 0x0010);
+ wr32_epcs(hw, TXGBE_PHY_TX_RATE_CTL, 0x0003);
+ wr32_epcs(hw, TXGBE_PHY_RX_RATE_CTL, 0x0003);
+ wr32_epcs(hw, TXGBE_PHY_TX_GEN_CTL2, 0x0100);
+ wr32_epcs(hw, TXGBE_PHY_RX_GEN_CTL2, 0x0100);
+ wr32_epcs(hw, TXGBE_PHY_MPLLA_CTL2, 0x0200);
+ wr32_epcs(hw, SR_MII_MMD_AN_CTL, 0x0100);
+ }
+ /* 10. Initialize the mode by setting VR XS or PCS MMD Digital Control1
+ * Register Bit[15](VR_RST) */
+ wr32_epcs(hw, VR_XS_OR_PCS_MMD_DIGI_CTL1, 0xA000);
+
+ /* Wait phy initialization done */
+ for (i = 0; i < 100; i++) {
+ if ((rd32_epcs(hw, VR_XS_OR_PCS_MMD_DIGI_CTL1) &
+ VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST) == 0)
+ break;
+ msleep(100);
+ }
+ if (i == 100) {
+ err = TXGBE_ERR_PHY_INIT_NOT_DONE;
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+/**
+ * txgbe_autoc_read - Hides MAC differences needed for AUTOC read
+ * @hw: pointer to hardware structure
+ */
+u64 txgbe_autoc_read(struct txgbe_hw *hw)
+{
+ u64 autoc = 0;
+ u32 sr_pcs_ctl;
+ u32 sr_pma_ctl1;
+ u32 sr_an_ctl;
+ u32 sr_an_adv_reg2;
+
+ if (hw->phy.multispeed_fiber) {
+ autoc |= TXGBE_AUTOC_LMS_10Gs;
+ } else if (hw->device_id == TXGBE_DEV_ID_RAPTOR_SFP ||
+ hw->device_id == TXGBE_DEV_ID_WX1820_SFP) {
+ autoc |= TXGBE_AUTOC_LMS_10Gs |
+ TXGBE_AUTOC_10Gs_SFI;
+ } else if (hw->device_id == TXGBE_DEV_ID_RAPTOR_QSFP) {
+ autoc = 0; /*TBD*/
+ } else if (hw->device_id == TXGBE_DEV_ID_RAPTOR_XAUI) {
+ autoc |= TXGBE_AUTOC_LMS_10G_LINK_NO_AN |
+ TXGBE_AUTOC_10G_XAUI;
+ hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_10GBASE_T;
+ } else if (hw->device_id == TXGBE_DEV_ID_RAPTOR_SGMII) {
+ autoc |= TXGBE_AUTOC_LMS_SGMII_1G_100M;
+ hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_1000BASE_T |
+ TXGBE_PHYSICAL_LAYER_100BASE_TX;
+ }
+
+ if (hw->device_id != TXGBE_DEV_ID_RAPTOR_SGMII) {
+ return autoc;
+ }
+
+ sr_pcs_ctl = rd32_epcs(hw, SR_XS_PCS_CTRL2);
+ sr_pma_ctl1 = rd32_epcs(hw, SR_PMA_CTRL1);
+ sr_an_ctl = rd32_epcs(hw, SR_AN_CTRL);
+ sr_an_adv_reg2 = rd32_epcs(hw, SR_AN_MMD_ADV_REG2);
+
+ if ((sr_pcs_ctl & SR_PCS_CTRL2_TYPE_SEL) == SR_PCS_CTRL2_TYPE_SEL_X &&
+ (sr_pma_ctl1 & SR_PMA_CTRL1_SS13) == SR_PMA_CTRL1_SS13_KX &&
+ (sr_an_ctl & SR_AN_CTRL_AN_EN) == 0) {
+ /* 1G or KX - no backplane auto-negotiation */
+ autoc |= TXGBE_AUTOC_LMS_1G_LINK_NO_AN |
+ TXGBE_AUTOC_1G_KX;
+ hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ } else if ((sr_pcs_ctl & SR_PCS_CTRL2_TYPE_SEL) ==
+ SR_PCS_CTRL2_TYPE_SEL_X &&
+ (sr_pma_ctl1 & SR_PMA_CTRL1_SS13) == SR_PMA_CTRL1_SS13_KX4 &&
+ (sr_an_ctl & SR_AN_CTRL_AN_EN) == 0) {
+ autoc |= TXGBE_AUTOC_LMS_10Gs |
+ TXGBE_AUTOC_10G_KX4;
+ hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+ } else if ((sr_pcs_ctl & SR_PCS_CTRL2_TYPE_SEL) ==
+ SR_PCS_CTRL2_TYPE_SEL_R &&
+ (sr_an_ctl & SR_AN_CTRL_AN_EN) == 0) {
+ /* 10 GbE serial link (KR -no backplane auto-negotiation) */
+ autoc |= TXGBE_AUTOC_LMS_10Gs |
+ TXGBE_AUTOC_10Gs_KR;
+ hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_10GBASE_KR;
+ } else if ((sr_an_ctl & SR_AN_CTRL_AN_EN)) {
+ /* KX/KX4/KR backplane auto-negotiation enable */
+ if (sr_an_adv_reg2 & SR_AN_MMD_ADV_REG2_BP_TYPE_KR) {
+ autoc |= TXGBE_AUTOC_10G_KR;
+ }
+ if (sr_an_adv_reg2 & SR_AN_MMD_ADV_REG2_BP_TYPE_KX4) {
+ autoc |= TXGBE_AUTOC_10G_KX4;
+ }
+ if (sr_an_adv_reg2 & SR_AN_MMD_ADV_REG2_BP_TYPE_KX) {
+ autoc |= TXGBE_AUTOC_1G_KX;
+ }
+ autoc |= TXGBE_AUTOC_LMS_KX4_KX_KR;
+ hw->phy.link_mode = TXGBE_PHYSICAL_LAYER_10GBASE_KR |
+ TXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
+ TXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ }
+
+ return autoc;
+}
+
+/**
+ * txgbe_autoc_write - Hides MAC differences needed for AUTOC write
+ * @hw: pointer to hardware structure
+ * @autoc: value to write to AUTOC
+ */
+void txgbe_autoc_write(struct txgbe_hw *hw, u64 autoc)
+{
+ bool autoneg;
+ u32 speed;
+ u32 mactxcfg = 0;
+
+ speed = TXGBE_AUTOC_SPEED(autoc);
+ autoc &= ~TXGBE_AUTOC_SPEED_MASK;
+ autoneg = (autoc & TXGBE_AUTOC_AUTONEG ? true : false);
+ autoc &= ~TXGBE_AUTOC_AUTONEG;
+
+ if (hw->device_id == TXGBE_DEV_ID_RAPTOR_KR_KX_KX4) {
+ if (!autoneg) {
+ switch (hw->phy.link_mode) {
+ case TXGBE_PHYSICAL_LAYER_10GBASE_KR:
+ txgbe_set_link_to_kr(hw, autoneg);
+ break;
+ case TXGBE_PHYSICAL_LAYER_10GBASE_KX4:
+ txgbe_set_link_to_kx4(hw, autoneg);
+ break;
+ case TXGBE_PHYSICAL_LAYER_1000BASE_KX:
+ txgbe_set_link_to_kx(hw, speed, autoneg);
+ break;
+ default:
+ return;
+ }
+ }
+ } else if (hw->device_id == TXGBE_DEV_ID_RAPTOR_XAUI ||
+ hw->device_id == TXGBE_DEV_ID_RAPTOR_SGMII) {
+ if (speed == TXGBE_LINK_SPEED_10GB_FULL) {
+ txgbe_set_link_to_kx4(hw, autoneg);
+ } else {
+ txgbe_set_link_to_kx(hw, speed, 0);
+ txgbe_set_sgmii_an37_ability(hw);
+ }
+ } else if (hw->device_id == TXGBE_DEV_ID_RAPTOR_SFP ||
+ hw->device_id == TXGBE_DEV_ID_WX1820_SFP) {
+ txgbe_set_link_to_sfi(hw, speed);
+ }
+
+ if (speed == TXGBE_LINK_SPEED_10GB_FULL) {
+ mactxcfg = TXGBE_MACTXCFG_SPEED_10G;
+ } else if (speed == TXGBE_LINK_SPEED_1GB_FULL) {
+ mactxcfg = TXGBE_MACTXCFG_SPEED_1G;
+ }
+ /* enable mac transmitter */
+ wr32m(hw, TXGBE_MACTXCFG, TXGBE_MACTXCFG_SPEED_MASK, mactxcfg);
+}
+
+
new file mode 100644
@@ -0,0 +1,389 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef _TXGBE_PHY_H_
+#define _TXGBE_PHY_H_
+
+#include "txgbe_type.h"
+
+#define TXGBE_SFP_DETECT_RETRIES 10
+#define TXGBE_MD_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */
+
+
+/* ETH PHY Registers */
+#define SR_XS_PCS_MMD_STATUS1 0x030001
+#define SR_XS_PCS_CTRL2 0x030007
+#define SR_PCS_CTRL2_TYPE_SEL MS16(0, 0x3)
+#define SR_PCS_CTRL2_TYPE_SEL_R LS16(0, 0, 0x3)
+#define SR_PCS_CTRL2_TYPE_SEL_X LS16(1, 0, 0x3)
+#define SR_PCS_CTRL2_TYPE_SEL_W LS16(2, 0, 0x3)
+#define SR_PMA_CTRL1 0x010000
+#define SR_PMA_CTRL1_SS13 MS16(13, 0x1)
+#define SR_PMA_CTRL1_SS13_KX LS16(0, 13, 0x1)
+#define SR_PMA_CTRL1_SS13_KX4 LS16(1, 13, 0x1)
+#define SR_PMA_CTRL1_LB MS16(0, 0x1)
+#define SR_MII_MMD_CTL 0x1F0000
+#define SR_MII_MMD_CTL_AN_EN 0x1000
+#define SR_MII_MMD_CTL_RESTART_AN 0x0200
+#define SR_MII_MMD_DIGI_CTL 0x1F8000
+#define SR_MII_MMD_AN_CTL 0x1F8001
+#define SR_MII_MMD_AN_ADV 0x1F0004
+#define SR_MII_MMD_AN_ADV_PAUSE(v) ((0x3 & (v)) << 7)
+#define SR_MII_MMD_AN_ADV_PAUSE_ASM 0x80
+#define SR_MII_MMD_AN_ADV_PAUSE_SYM 0x100
+#define SR_MII_MMD_LP_BABL 0x1F0005
+#define SR_AN_CTRL 0x070000
+#define SR_AN_CTRL_RSTRT_AN MS16(9, 0x1)
+#define SR_AN_CTRL_AN_EN MS16(12, 0x1)
+#define SR_AN_MMD_ADV_REG1 0x070010
+#define SR_AN_MMD_ADV_REG1_PAUSE(v) ((0x3 & (v)) << 10)
+#define SR_AN_MMD_ADV_REG1_PAUSE_SYM 0x400
+#define SR_AN_MMD_ADV_REG1_PAUSE_ASM 0x800
+#define SR_AN_MMD_ADV_REG2 0x070011
+#define SR_AN_MMD_ADV_REG2_BP_TYPE_KX4 0x40
+#define SR_AN_MMD_ADV_REG2_BP_TYPE_KX 0x20
+#define SR_AN_MMD_ADV_REG2_BP_TYPE_KR 0x80
+#define SR_AN_MMD_ADV_REG2_BP_TYPE_MASK 0xFFFF
+#define SR_AN_MMD_LP_ABL1 0x070013
+#define VR_AN_KR_MODE_CL 0x078003
+#define VR_XS_OR_PCS_MMD_DIGI_CTL1 0x038000
+#define VR_XS_OR_PCS_MMD_DIGI_CTL1_ENABLE 0x1000
+#define VR_XS_OR_PCS_MMD_DIGI_CTL1_VR_RST 0x8000
+#define VR_XS_OR_PCS_MMD_DIGI_STATUS 0x038010
+#define VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_MASK 0x1C
+#define VR_XS_OR_PCS_MMD_DIGI_STATUS_PSEQ_POWER_GOOD 0x10
+
+#define TXGBE_PHY_MPLLA_CTL0 0x018071
+#define TXGBE_PHY_MPLLA_CTL3 0x018077
+#define TXGBE_PHY_MISC_CTL0 0x018090
+#define TXGBE_PHY_VCO_CAL_LD0 0x018092
+#define TXGBE_PHY_VCO_CAL_LD1 0x018093
+#define TXGBE_PHY_VCO_CAL_LD2 0x018094
+#define TXGBE_PHY_VCO_CAL_LD3 0x018095
+#define TXGBE_PHY_VCO_CAL_REF0 0x018096
+#define TXGBE_PHY_VCO_CAL_REF1 0x018097
+#define TXGBE_PHY_RX_AD_ACK 0x018098
+#define TXGBE_PHY_AFE_DFE_ENABLE 0x01805D
+#define TXGBE_PHY_DFE_TAP_CTL0 0x01805E
+#define TXGBE_PHY_RX_EQ_ATT_LVL0 0x018057
+#define TXGBE_PHY_RX_EQ_CTL0 0x018058
+#define TXGBE_PHY_RX_EQ_CTL 0x01805C
+#define TXGBE_PHY_TX_EQ_CTL0 0x018036
+#define TXGBE_PHY_TX_EQ_CTL1 0x018037
+#define TXGBE_PHY_TX_RATE_CTL 0x018034
+#define TXGBE_PHY_RX_RATE_CTL 0x018054
+#define TXGBE_PHY_TX_GEN_CTL2 0x018032
+#define TXGBE_PHY_RX_GEN_CTL2 0x018052
+#define TXGBE_PHY_RX_GEN_CTL3 0x018053
+#define TXGBE_PHY_MPLLA_CTL2 0x018073
+#define TXGBE_PHY_RX_POWER_ST_CTL 0x018055
+#define TXGBE_PHY_TX_POWER_ST_CTL 0x018035
+#define TXGBE_PHY_TX_GENCTRL1 0x018031
+
+#define TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_1GBASEX_KX 32
+#define TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_10GBASER_KR 33
+#define TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_OTHER 40
+#define TXGBE_PHY_MPLLA_CTL0_MULTIPLIER_MASK 0xFF
+#define TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_1GBASEX_KX 0x46
+#define TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_10GBASER_KR 0x7B
+#define TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_OTHER 0x56
+#define TXGBE_PHY_MPLLA_CTL3_MULTIPLIER_BW_MASK 0x7FF
+#define TXGBE_PHY_MISC_CTL0_TX2RX_LB_EN_0 0x1
+#define TXGBE_PHY_MISC_CTL0_TX2RX_LB_EN_3_1 0xE
+#define TXGBE_PHY_MISC_CTL0_RX_VREF_CTRL 0x1F00
+#define TXGBE_PHY_VCO_CAL_LD0_1GBASEX_KX 1344
+#define TXGBE_PHY_VCO_CAL_LD0_10GBASER_KR 1353
+#define TXGBE_PHY_VCO_CAL_LD0_OTHER 1360
+#define TXGBE_PHY_VCO_CAL_LD0_MASK 0x1000
+#define TXGBE_PHY_VCO_CAL_REF0_LD0_1GBASEX_KX 42
+#define TXGBE_PHY_VCO_CAL_REF0_LD0_10GBASER_KR 41
+#define TXGBE_PHY_VCO_CAL_REF0_LD0_OTHER 34
+#define TXGBE_PHY_VCO_CAL_REF0_LD0_MASK 0x3F
+#define TXGBE_PHY_AFE_DFE_ENABLE_DFE_EN0 0x10
+#define TXGBE_PHY_AFE_DFE_ENABLE_AFE_EN0 0x1
+#define TXGBE_PHY_AFE_DFE_ENABLE_MASK 0xFF
+#define TXGBE_PHY_RX_EQ_CTL_CONT_ADAPT0 0x1
+#define TXGBE_PHY_RX_EQ_CTL_CONT_ADAPT_MASK 0xF
+#define TXGBE_PHY_TX_RATE_CTL_TX0_RATE_10GBASER_KR 0x0
+#define TXGBE_PHY_TX_RATE_CTL_TX0_RATE_RXAUI 0x1
+#define TXGBE_PHY_TX_RATE_CTL_TX0_RATE_1GBASEX_KX 0x3
+#define TXGBE_PHY_TX_RATE_CTL_TX0_RATE_OTHER 0x2
+#define TXGBE_PHY_TX_RATE_CTL_TX1_RATE_OTHER 0x20
+#define TXGBE_PHY_TX_RATE_CTL_TX2_RATE_OTHER 0x200
+#define TXGBE_PHY_TX_RATE_CTL_TX3_RATE_OTHER 0x2000
+#define TXGBE_PHY_TX_RATE_CTL_TX0_RATE_MASK 0x7
+#define TXGBE_PHY_TX_RATE_CTL_TX1_RATE_MASK 0x70
+#define TXGBE_PHY_TX_RATE_CTL_TX2_RATE_MASK 0x700
+#define TXGBE_PHY_TX_RATE_CTL_TX3_RATE_MASK 0x7000
+#define TXGBE_PHY_RX_RATE_CTL_RX0_RATE_10GBASER_KR 0x0
+#define TXGBE_PHY_RX_RATE_CTL_RX0_RATE_RXAUI 0x1
+#define TXGBE_PHY_RX_RATE_CTL_RX0_RATE_1GBASEX_KX 0x3
+#define TXGBE_PHY_RX_RATE_CTL_RX0_RATE_OTHER 0x2
+#define TXGBE_PHY_RX_RATE_CTL_RX1_RATE_OTHER 0x20
+#define TXGBE_PHY_RX_RATE_CTL_RX2_RATE_OTHER 0x200
+#define TXGBE_PHY_RX_RATE_CTL_RX3_RATE_OTHER 0x2000
+#define TXGBE_PHY_RX_RATE_CTL_RX0_RATE_MASK 0x7
+#define TXGBE_PHY_RX_RATE_CTL_RX1_RATE_MASK 0x70
+#define TXGBE_PHY_RX_RATE_CTL_RX2_RATE_MASK 0x700
+#define TXGBE_PHY_RX_RATE_CTL_RX3_RATE_MASK 0x7000
+#define TXGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_10GBASER_KR 0x200
+#define TXGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_10GBASER_KR_RXAUI 0x300
+#define TXGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_OTHER 0x100
+#define TXGBE_PHY_TX_GEN_CTL2_TX0_WIDTH_MASK 0x300
+#define TXGBE_PHY_TX_GEN_CTL2_TX1_WIDTH_OTHER 0x400
+#define TXGBE_PHY_TX_GEN_CTL2_TX1_WIDTH_MASK 0xC00
+#define TXGBE_PHY_TX_GEN_CTL2_TX2_WIDTH_OTHER 0x1000
+#define TXGBE_PHY_TX_GEN_CTL2_TX2_WIDTH_MASK 0x3000
+#define TXGBE_PHY_TX_GEN_CTL2_TX3_WIDTH_OTHER 0x4000
+#define TXGBE_PHY_TX_GEN_CTL2_TX3_WIDTH_MASK 0xC000
+#define TXGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_10GBASER_KR 0x200
+#define TXGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_10GBASER_KR_RXAUI 0x300
+#define TXGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_OTHER 0x100
+#define TXGBE_PHY_RX_GEN_CTL2_RX0_WIDTH_MASK 0x300
+#define TXGBE_PHY_RX_GEN_CTL2_RX1_WIDTH_OTHER 0x400
+#define TXGBE_PHY_RX_GEN_CTL2_RX1_WIDTH_MASK 0xC00
+#define TXGBE_PHY_RX_GEN_CTL2_RX2_WIDTH_OTHER 0x1000
+#define TXGBE_PHY_RX_GEN_CTL2_RX2_WIDTH_MASK 0x3000
+#define TXGBE_PHY_RX_GEN_CTL2_RX3_WIDTH_OTHER 0x4000
+#define TXGBE_PHY_RX_GEN_CTL2_RX3_WIDTH_MASK 0xC000
+#define TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_8 0x100
+#define TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_10 0x200
+#define TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_16P5 0x400
+#define TXGBE_PHY_MPLLA_CTL2_DIV_CLK_EN_MASK 0x700
+
+/******************************************************************************
+ * SFP I2C Registers:
+ ******************************************************************************/
+/* SFP IDs: format of OUI is 0x[byte0][byte1][byte2][00] */
+#define TXGBE_SFF_VENDOR_OUI_TYCO 0x00407600
+#define TXGBE_SFF_VENDOR_OUI_FTL 0x00906500
+#define TXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00
+#define TXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100
+
+/* EEPROM (dev_addr = 0xA0) */
+#define TXGBE_I2C_EEPROM_DEV_ADDR 0xA0
+#define TXGBE_SFF_IDENTIFIER 0x00
+#define TXGBE_SFF_IDENTIFIER_SFP 0x03
+#define TXGBE_SFF_VENDOR_OUI_BYTE0 0x25
+#define TXGBE_SFF_VENDOR_OUI_BYTE1 0x26
+#define TXGBE_SFF_VENDOR_OUI_BYTE2 0x27
+#define TXGBE_SFF_1GBE_COMP_CODES 0x06
+#define TXGBE_SFF_10GBE_COMP_CODES 0x03
+#define TXGBE_SFF_CABLE_TECHNOLOGY 0x08
+#define TXGBE_SFF_CABLE_DA_PASSIVE 0x4
+#define TXGBE_SFF_CABLE_DA_ACTIVE 0x8
+#define TXGBE_SFF_CABLE_SPEC_COMP 0x3C
+#define TXGBE_SFF_SFF_8472_SWAP 0x5C
+#define TXGBE_SFF_SFF_8472_COMP 0x5E
+#define TXGBE_SFF_SFF_8472_OSCB 0x6E
+#define TXGBE_SFF_SFF_8472_ESCB 0x76
+
+#define TXGBE_SFF_IDENTIFIER_QSFP_PLUS 0x0D
+#define TXGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5
+#define TXGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6
+#define TXGBE_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7
+#define TXGBE_SFF_QSFP_CONNECTOR 0x82
+#define TXGBE_SFF_QSFP_10GBE_COMP 0x83
+#define TXGBE_SFF_QSFP_1GBE_COMP 0x86
+#define TXGBE_SFF_QSFP_CABLE_LENGTH 0x92
+#define TXGBE_SFF_QSFP_DEVICE_TECH 0x93
+
+/* Bitmasks */
+#define TXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
+#define TXGBE_SFF_1GBASESX_CAPABLE 0x1
+#define TXGBE_SFF_1GBASELX_CAPABLE 0x2
+#define TXGBE_SFF_1GBASET_CAPABLE 0x8
+#define TXGBE_SFF_10GBASESR_CAPABLE 0x10
+#define TXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define TXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
+#define TXGBE_SFF_SOFT_RS_SELECT_10G 0x8
+#define TXGBE_SFF_SOFT_RS_SELECT_1G 0x0
+#define TXGBE_SFF_ADDRESSING_MODE 0x4
+#define TXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
+#define TXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
+#define TXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
+#define TXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0
+#define TXGBE_I2C_EEPROM_READ_MASK 0x100
+#define TXGBE_I2C_EEPROM_STATUS_MASK 0x3
+#define TXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
+#define TXGBE_I2C_EEPROM_STATUS_PASS 0x1
+#define TXGBE_I2C_EEPROM_STATUS_FAIL 0x2
+#define TXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
+
+/* EEPROM for SFF-8472 (dev_addr = 0xA2) */
+#define TXGBE_I2C_EEPROM_DEV_ADDR2 0xA2
+
+/* SFP+ SFF-8472 Compliance */
+#define TXGBE_SFF_SFF_8472_UNSUP 0x00
+
+/******************************************************************************
+ * PHY MDIO Registers:
+ ******************************************************************************/
+#define TXGBE_MAX_PHY_ADDR 32
+/* PHY IDs*/
+#define TXGBE_PHYID_MTD3310 0x00000000U
+#define TXGBE_PHYID_TN1010 0x00A19410U
+#define TXGBE_PHYID_QT2022 0x0043A400U
+#define TXGBE_PHYID_ATH 0x03429050U
+
+/* (dev_type = 1) */
+#define TXGBE_MD_DEV_PMA_PMD 0x1
+#define TXGBE_MD_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/
+#define TXGBE_MD_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/
+#define TXGBE_PHY_REVISION_MASK 0xFFFFFFF0
+#define TXGBE_MD_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */
+#define TXGBE_MD_PHY_SPEED_10G 0x0001 /* 10G capable */
+#define TXGBE_MD_PHY_SPEED_1G 0x0010 /* 1G capable */
+#define TXGBE_MD_PHY_SPEED_100M 0x0020 /* 100M capable */
+#define TXGBE_MD_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */
+#define TXGBE_MD_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */
+#define TXGBE_MD_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */
+#define TXGBE_MD_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */
+#define TXGBE_MD_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */
+
+#define TXGBE_MD_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */
+#define TXGBE_MD_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
+#define TXGBE_MD_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
+#define TXGBE_MD_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
+
+#define TXGBE_MD_FW_REV_LO 0xC011
+#define TXGBE_MD_FW_REV_HI 0xC012
+
+#define TXGBE_TN_LASI_STATUS_REG 0x9005
+#define TXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
+
+/* (dev_type = 3) */
+#define TXGBE_MD_DEV_PCS 0x3
+#define TXGBE_PCRC8ECL 0x0E810 /* PCR CRC-8 Error Count Lo */
+#define TXGBE_PCRC8ECH 0x0E811 /* PCR CRC-8 Error Count Hi */
+#define TXGBE_PCRC8ECH_MASK 0x1F
+#define TXGBE_LDPCECL 0x0E820 /* PCR Uncorrected Error Count Lo */
+#define TXGBE_LDPCECH 0x0E821 /* PCR Uncorrected Error Count Hi */
+
+/* (dev_type = 4) */
+#define TXGBE_MD_DEV_PHY_XS 0x4
+#define TXGBE_MD_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */
+#define TXGBE_MD_PHY_XS_RESET 0x8000 /* PHY_XS Reset */
+
+/* (dev_type = 7) */
+#define TXGBE_MD_DEV_AUTO_NEG 0x7
+
+#define TXGBE_MD_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */
+#define TXGBE_MD_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */
+#define TXGBE_MD_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */
+#define TXGBE_MD_AUTO_NEG_VENDOR_TX_ALARM 0xCC00 /* AUTO_NEG Vendor TX Reg */
+#define TXGBE_MD_AUTO_NEG_VENDOR_TX_ALARM2 0xCC01 /* AUTO_NEG Vendor Tx Reg */
+#define TXGBE_MD_AUTO_NEG_VEN_LSC 0x1 /* AUTO_NEG Vendor Tx LSC */
+#define TXGBE_MD_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */
+#define TXGBE_TAF_SYM_PAUSE MS16(10, 0x3)
+#define TXGBE_TAF_ASM_PAUSE MS16(11, 0x3)
+
+#define TXGBE_MD_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */
+#define TXGBE_MD_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */
+/* PHY address definitions for new protocol MDIO commands */
+#define TXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */
+#define TXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
+#define TXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */
+#define TXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */
+#define TXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/
+#define TXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/
+#define TXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/
+#define TXGBE_MII_2_5GBASE_T_ADVERTISE 0x0400
+#define TXGBE_MII_5GBASE_T_ADVERTISE 0x0800
+#define TXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */
+#define TXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */
+#define TXGBE_MII_RESTART 0x200
+#define TXGBE_MII_AUTONEG_COMPLETE 0x20
+#define TXGBE_MII_AUTONEG_LINK_UP 0x04
+#define TXGBE_MII_AUTONEG_REG 0x0
+#define TXGBE_MD_PMA_TX_VEN_LASI_INT_MASK 0xD401 /* PHY TX Vendor LASI */
+#define TXGBE_MD_PMA_TX_VEN_LASI_INT_EN 0x1 /* PHY TX Vendor LASI enable */
+#define TXGBE_MD_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Transmit Dis Reg */
+#define TXGBE_MD_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Transmit Dis */
+
+/* (dev_type = 30) */
+#define TXGBE_MD_DEV_VENDOR_1 30
+#define TXGBE_MD_DEV_XFI_DSP 30
+#define TNX_FW_REV 0xB
+#define TXGBE_MD_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Ctrl Reg */
+#define TXGBE_MD_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */
+#define TXGBE_MD_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */
+#define TXGBE_MD_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0-10G, 1-1G */
+#define TXGBE_MD_VENDOR_SPECIFIC_1_10G_SPEED 0x0018
+#define TXGBE_MD_VENDOR_SPECIFIC_1_1G_SPEED 0x0010
+
+/* (dev_type = 31) */
+#define TXGBE_MD_DEV_GENERAL 31
+#define TXGBE_MD_PORT_CTRL 0xF001
+#define TXGBE_MD_PORT_CTRL_RESET MS16(14, 0x1)
+
+/******************************************************************************
+ * SFP I2C Registers:
+ ******************************************************************************/
+#define TXGBE_I2C_SLAVEADDR (0x50)
+
+bool txgbe_validate_phy_addr(struct txgbe_hw *hw, u32 phy_addr);
+enum txgbe_phy_type txgbe_get_phy_type_from_id(u32 phy_id);
+s32 txgbe_get_phy_id(struct txgbe_hw *hw);
+s32 txgbe_identify_phy(struct txgbe_hw *hw);
+s32 txgbe_reset_phy(struct txgbe_hw *hw);
+s32 txgbe_read_phy_reg_mdi(struct txgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 *phy_data);
+s32 txgbe_write_phy_reg_mdi(struct txgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 phy_data);
+s32 txgbe_read_phy_reg(struct txgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data);
+s32 txgbe_write_phy_reg(struct txgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data);
+s32 txgbe_setup_phy_link(struct txgbe_hw *hw);
+s32 txgbe_setup_phy_link_speed(struct txgbe_hw *hw,
+ u32 speed,
+ bool autoneg_wait_to_complete);
+s32 txgbe_get_copper_link_capabilities(struct txgbe_hw *hw,
+ u32 *speed,
+ bool *autoneg);
+s32 txgbe_check_reset_blocked(struct txgbe_hw *hw);
+
+/* PHY specific */
+s32 txgbe_check_phy_link_tnx(struct txgbe_hw *hw,
+ u32 *speed,
+ bool *link_up);
+s32 txgbe_setup_phy_link_tnx(struct txgbe_hw *hw);
+s32 txgbe_get_phy_firmware_version_tnx(struct txgbe_hw *hw,
+ u32 *firmware_version);
+s32 txgbe_get_phy_firmware_version(struct txgbe_hw *hw,
+ u32 *firmware_version);
+
+s32 txgbe_reset_phy_nl(struct txgbe_hw *hw);
+s32 txgbe_set_copper_phy_power(struct txgbe_hw *hw, bool on);
+s32 txgbe_identify_module(struct txgbe_hw *hw);
+s32 txgbe_identify_sfp_module(struct txgbe_hw *hw);
+u64 txgbe_get_supported_phy_sfp_layer(struct txgbe_hw *hw);
+s32 txgbe_identify_qsfp_module(struct txgbe_hw *hw);
+s32 txgbe_get_sfp_init_sequence_offsets(struct txgbe_hw *hw,
+ u16 *list_offset,
+ u16 *data_offset);
+s32 txgbe_tn_check_overtemp(struct txgbe_hw *hw);
+s32 txgbe_read_i2c_byte(struct txgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+s32 txgbe_read_i2c_byte_unlocked(struct txgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+s32 txgbe_write_i2c_byte(struct txgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
+s32 txgbe_write_i2c_byte_unlocked(struct txgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
+s32 txgbe_read_i2c_sff8472(struct txgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data);
+s32 txgbe_read_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data);
+s32 txgbe_write_i2c_eeprom(struct txgbe_hw *hw, u8 byte_offset,
+ u8 eeprom_data);
+s32 txgbe_read_i2c_combined_int(struct txgbe_hw *, u8 addr, u16 reg,
+ u16 *val, bool lock);
+s32 txgbe_write_i2c_combined_int(struct txgbe_hw *, u8 addr, u16 reg,
+ u16 val, bool lock);
+u64 txgbe_autoc_read(struct txgbe_hw *hw);
+void txgbe_autoc_write(struct txgbe_hw *hw, u64 value);
+
+#endif /* _TXGBE_PHY_H_ */
new file mode 100644
@@ -0,0 +1,1939 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef _TXGBE_REGS_H_
+#define _TXGBE_REGS_H_
+
+#define TXGBE_PVMBX_QSIZE (16) /* 16*4B */
+#define TXGBE_PVMBX_BSIZE (TXGBE_PVMBX_QSIZE * 4)
+
+#define TXGBE_REMOVED(a) (0)
+
+#define TXGBE_REG_DUMMY 0xFFFFFF
+
+#define MS8(shift, mask) (((u8)(mask)) << (shift))
+#define LS8(val, shift, mask) (((u8)(val) & (u8)(mask)) << (shift))
+#define RS8(reg, shift, mask) (((u8)(reg) >> (shift)) & (u8)(mask))
+
+#define MS16(shift, mask) (((u16)(mask)) << (shift))
+#define LS16(val, shift, mask) (((u16)(val) & (u16)(mask)) << (shift))
+#define RS16(reg, shift, mask) (((u16)(reg) >> (shift)) & (u16)(mask))
+
+#define MS32(shift, mask) (((u32)(mask)) << (shift))
+#define LS32(val, shift, mask) (((u32)(val) & (u32)(mask)) << (shift))
+#define RS32(reg, shift, mask) (((u32)(reg) >> (shift)) & (u32)(mask))
+
+#define MS64(shift, mask) (((u64)(mask)) << (shift))
+#define LS64(val, shift, mask) (((u64)(val) & (u64)(mask)) << (shift))
+#define RS64(reg, shift, mask) (((u64)(reg) >> (shift)) & (u64)(mask))
+
+#define MS(shift, mask) MS32(shift, mask)
+#define LS(val, shift, mask) LS32(val, shift, mask)
+#define RS(reg, shift, mask) RS32(reg, shift, mask)
+
+#define ROUND_UP(x, y) (((x) + (y) - 1) / (y) * (y))
+#define ROUND_DOWN(x, y) ((x) / (y) * (y))
+#define ROUND_OVER(x, maxbits, unitbits) \
+ ((x) >= 1 << (maxbits) ? 0 : (x) >> (unitbits))
+
+/* autoc bits definition */
+#define TXGBE_AUTOC TXGBE_REG_DUMMY
+#define TXGBE_AUTOC_FLU MS64(0, 0x1)
+#define TXGBE_AUTOC_10G_PMA_PMD_MASK MS64(7, 0x3) /* parallel */
+#define TXGBE_AUTOC_10G_XAUI LS64(0, 7, 0x3)
+#define TXGBE_AUTOC_10G_KX4 LS64(1, 7, 0x3)
+#define TXGBE_AUTOC_10G_CX4 LS64(2, 7, 0x3)
+#define TXGBE_AUTOC_10G_KR LS64(3, 7, 0x3) /* fixme */
+#define TXGBE_AUTOC_1G_PMA_PMD_MASK MS64(9, 0x7)
+#define TXGBE_AUTOC_1G_BX LS64(0, 9, 0x7)
+#define TXGBE_AUTOC_1G_KX LS64(1, 9, 0x7)
+#define TXGBE_AUTOC_1G_SFI LS64(0, 9, 0x7)
+#define TXGBE_AUTOC_1G_KX_BX LS64(1, 9, 0x7)
+#define TXGBE_AUTOC_AN_RESTART MS64(12, 0x1)
+#define TXGBE_AUTOC_LMS_MASK MS64(13, 0x7)
+#define TXGBE_AUTOC_LMS_10Gs LS64(3, 13, 0x7)
+#define TXGBE_AUTOC_LMS_KX4_KX_KR LS64(4, 13, 0x7)
+#define TXGBE_AUTOC_LMS_SGMII_1G_100M LS64(5, 13, 0x7)
+#define TXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN LS64(6, 13, 0x7)
+#define TXGBE_AUTOC_LMS_KX4_KX_KR_SGMII LS64(7, 13, 0x7)
+#define TXGBE_AUTOC_LMS_1G_LINK_NO_AN LS64(0, 13, 0x7)
+#define TXGBE_AUTOC_LMS_10G_LINK_NO_AN LS64(1, 13, 0x7)
+#define TXGBE_AUTOC_LMS_1G_AN LS64(2, 13, 0x7)
+#define TXGBE_AUTOC_LMS_KX4_AN LS64(4, 13, 0x7)
+#define TXGBE_AUTOC_LMS_KX4_AN_1G_AN LS64(6, 13, 0x7)
+#define TXGBE_AUTOC_LMS_ATTACH_TYPE LS64(7, 13, 0x7)
+#define TXGBE_AUTOC_LMS_AN MS64(15, 0x7)
+
+#define TXGBE_AUTOC_KR_SUPP MS64(16, 0x1)
+#define TXGBE_AUTOC_FECR MS64(17, 0x1)
+#define TXGBE_AUTOC_FECA MS64(18, 0x1)
+#define TXGBE_AUTOC_AN_RX_ALIGN MS64(18, 0x1F) /* fixme */
+#define TXGBE_AUTOC_AN_RX_DRIFT MS64(23, 0x3)
+#define TXGBE_AUTOC_AN_RX_LOOSE MS64(24, 0x3)
+#define TXGBE_AUTOC_PD_TMR MS64(25, 0x3)
+#define TXGBE_AUTOC_RF MS64(27, 0x1)
+#define TXGBE_AUTOC_ASM_PAUSE MS64(29, 0x1)
+#define TXGBE_AUTOC_SYM_PAUSE MS64(28, 0x1)
+#define TXGBE_AUTOC_PAUSE MS64(28, 0x3)
+#define TXGBE_AUTOC_KX_SUPP MS64(30, 0x1)
+#define TXGBE_AUTOC_KX4_SUPP MS64(31, 0x1)
+
+#define TXGBE_AUTOC_10Gs_PMA_PMD_MASK MS64(48, 0x3) /* serial */
+#define TXGBE_AUTOC_10Gs_KR LS64(0, 48, 0x3)
+#define TXGBE_AUTOC_10Gs_XFI LS64(1, 48, 0x3)
+#define TXGBE_AUTOC_10Gs_SFI LS64(2, 48, 0x3)
+#define TXGBE_AUTOC_LINK_DIA_MASK MS64(60, 0x7)
+#define TXGBE_AUTOC_LINK_DIA_D3_MASK LS64(5, 60, 0x7)
+
+#define TXGBE_AUTOC_SPEED_MASK MS64(32, 0xFFFF)
+#define TXGBD_AUTOC_SPEED(r) RS64(r, 32, 0xFFFF)
+#define TXGBE_AUTOC_SPEED(v) LS64(v, 32, 0xFFFF)
+#define TXGBE_LINK_SPEED_UNKNOWN 0
+#define TXGBE_LINK_SPEED_10M_FULL 0x0002
+#define TXGBE_LINK_SPEED_100M_FULL 0x0008
+#define TXGBE_LINK_SPEED_1GB_FULL 0x0020
+#define TXGBE_LINK_SPEED_2_5GB_FULL 0x0400
+#define TXGBE_LINK_SPEED_5GB_FULL 0x0800
+#define TXGBE_LINK_SPEED_10GB_FULL 0x0080
+#define TXGBE_LINK_SPEED_40GB_FULL 0x0100
+#define TXGBE_AUTOC_AUTONEG MS64(63, 0x1)
+
+
+
+/* Hardware Datapath:
+ * RX: / Queue <- Filter \
+ * Host | TC <=> SEC <=> MAC <=> PHY
+ * TX: \ Queue -> Filter /
+ *
+ * Packet Filter:
+ * RX: RSS < FDIR < Filter < Encrypt
+ *
+ * Macro Argument Naming:
+ * rp = ring pair [0,127]
+ * tc = traffic class [0,7]
+ * up = user priority [0,7]
+ * pi = pool index [0,63]
+ * r = register
+ * v = value
+ * s = shift
+ * m = mask
+ * i,j,k = array index
+ * H,L = high/low bits
+ * HI,LO = high/low state
+ */
+
+#define TXGBE_ETHPHYIF TXGBE_REG_DUMMY
+#define TXGBE_ETHPHYIF_MDIO_ACT MS(1, 0x1)
+#define TXGBE_ETHPHYIF_MDIO_MODE MS(2, 0x1)
+#define TXGBE_ETHPHYIF_MDIO_BASE(r) RS(r, 3, 0x1F)
+#define TXGBE_ETHPHYIF_MDIO_SHARED MS(13, 0x1)
+#define TXGBE_ETHPHYIF_SPEED_10M MS(17, 0x1)
+#define TXGBE_ETHPHYIF_SPEED_100M MS(18, 0x1)
+#define TXGBE_ETHPHYIF_SPEED_1G MS(19, 0x1)
+#define TXGBE_ETHPHYIF_SPEED_2_5G MS(20, 0x1)
+#define TXGBE_ETHPHYIF_SPEED_10G MS(21, 0x1)
+#define TXGBE_ETHPHYIF_SGMII_ENABLE MS(25, 0x1)
+#define TXGBE_ETHPHYIF_INT_PHY_MODE MS(24, 0x1)
+#define TXGBE_ETHPHYIF_IO_XPCS MS(30, 0x1)
+#define TXGBE_ETHPHYIF_IO_EPHY MS(31, 0x1)
+
+/******************************************************************************
+ * Chip Registers
+ ******************************************************************************/
+/**
+ * Chip Status
+ **/
+#define TXGBE_PWR 0x010000
+#define TXGBE_PWR_LAN(r) RS(r, 30, 0x3)
+#define TXGBE_PWR_LAN_0 (1)
+#define TXGBE_PWR_LAN_1 (2)
+#define TXGBE_PWR_LAN_A (3)
+#define TXGBE_CTL 0x010004
+#define TXGBE_LOCKPF 0x010008
+#define TXGBE_RST 0x01000C
+#define TXGBE_RST_SW MS(0, 0x1)
+#define TXGBE_RST_LAN(i) MS(((i)+1), 0x1)
+#define TXGBE_RST_FW MS(3, 0x1)
+#define TXGBE_RST_ETH(i) MS(((i)+29), 0x1)
+#define TXGBE_RST_GLB MS(31, 0x1)
+#define TXGBE_RST_DEFAULT (TXGBE_RST_SW | \
+ TXGBE_RST_LAN(0) | \
+ TXGBE_RST_LAN(1))
+
+#define TXGBE_STAT 0x010028
+#define TXGBE_STAT_MNGINIT MS(0, 0x1)
+#define TXGBE_STAT_MNGVETO MS(8, 0x1)
+#define TXGBE_STAT_ECCLAN0 MS(16, 0x1)
+#define TXGBE_STAT_ECCLAN1 MS(17, 0x1)
+#define TXGBE_STAT_ECCMNG MS(18, 0x1)
+#define TXGBE_STAT_ECCPCIE MS(19, 0x1)
+#define TXGBE_STAT_ECCPCIW MS(20, 0x1)
+#define TXGBE_RSTSTAT 0x010030
+#define TXGBE_RSTSTAT_PROG MS(20, 0x1)
+#define TXGBE_RSTSTAT_PREP MS(19, 0x1)
+#define TXGBE_RSTSTAT_TYPE_MASK MS(16, 0x7)
+#define TXGBE_RSTSTAT_TYPE(r) RS(r, 16, 0x7)
+#define TXGBE_RSTSTAT_TYPE_PE LS(0, 16, 0x7)
+#define TXGBE_RSTSTAT_TYPE_PWR LS(1, 16, 0x7)
+#define TXGBE_RSTSTAT_TYPE_HOT LS(2, 16, 0x7)
+#define TXGBE_RSTSTAT_TYPE_SW LS(3, 16, 0x7)
+#define TXGBE_RSTSTAT_TYPE_FW LS(4, 16, 0x7)
+#define TXGBE_RSTSTAT_TMRINIT_MASK MS(8, 0xFF)
+#define TXGBE_RSTSTAT_TMRINIT(v) LS(v, 8, 0xFF)
+#define TXGBE_RSTSTAT_TMRCNT_MASK MS(0, 0xFF)
+#define TXGBE_RSTSTAT_TMRCNT(v) LS(v, 0, 0xFF)
+#define TXGBE_PWRTMR 0x010034
+
+/**
+ * SPI(Flash)
+ **/
+#define TXGBE_SPICMD 0x010104
+#define TXGBE_SPICMD_ADDR(v) LS(v, 0, 0xFFFFFF)
+#define TXGBE_SPICMD_CLK(v) LS(v, 25, 0x7)
+#define TXGBE_SPICMD_CMD(v) LS(v, 28, 0x7)
+#define TXGBE_SPIDAT 0x010108
+#define TXGBE_SPIDAT_BYPASS MS(31, 0x1)
+#define TXGBE_SPIDAT_STATUS(v) LS(v, 16, 0xFF)
+#define TXGBE_SPIDAT_OPDONE MS(0, 0x1)
+#define TXGBE_SPISTATUS 0x01010C
+#define TXGBE_SPISTATUS_OPDONE MS(0, 0x1)
+#define TXGBE_SPISTATUS_BYPASS MS(31, 0x1)
+#define TXGBE_SPIUSRCMD 0x010110
+#define TXGBE_SPICFG0 0x010114
+#define TXGBE_SPICFG1 0x010118
+#define TXGBE_FLASH 0x010120
+#define TXGBE_FLASH_PERSTD MS(0, 0x1)
+#define TXGBE_FLASH_PWRRSTD MS(1, 0x1)
+#define TXGBE_FLASH_SWRSTD MS(7, 0x1)
+#define TXGBE_FLASH_LANRSTD(i) MS(((i)+9), 0x1)
+#define TXGBE_SRAM 0x010124
+#define TXGBE_SRAM_SZ(v) LS(v, 28, 0x7)
+#define TXGBE_SRAMCTLECC 0x010130
+#define TXGBE_SRAMINJECC 0x010134
+#define TXGBE_SRAMECC 0x010138
+
+/**
+ * Thermel Sensor
+ **/
+#define TXGBE_TSCTL 0x010300
+#define TXGBE_TSCTL_MODE MS(31, 0x1)
+#define TXGBE_TSREVAL 0x010304
+#define TXGBE_TSREVAL_EA MS(0, 0x1)
+#define TXGBE_TSDAT 0x010308
+#define TXGBE_TSDAT_TMP(r) ((r) & 0x3FF)
+#define TXGBE_TSDAT_VLD MS(16, 0x1)
+#define TXGBE_TSALMWTRHI 0x01030C
+#define TXGBE_TSALMWTRHI_VAL(v) (((v) & 0x3FF))
+#define TXGBE_TSALMWTRLO 0x010310
+#define TXGBE_TSALMWTRLO_VAL(v) (((v) & 0x3FF))
+#define TXGBE_TSINTWTR 0x010314
+#define TXGBE_TSINTWTR_HI MS(0, 0x1)
+#define TXGBE_TSINTWTR_LO MS(1, 0x1)
+#define TXGBE_TSALM 0x010318
+#define TXGBE_TSALM_LO MS(0, 0x1)
+#define TXGBE_TSALM_HI MS(1, 0x1)
+
+/**
+ * Management
+ **/
+#define TXGBE_MNGTC 0x01CD10
+#define TXGBE_MNGFWSYNC 0x01E000
+#define TXGBE_MNGFWSYNC_REQ MS(0, 0x1)
+#define TXGBE_MNGSWSYNC 0x01E004
+#define TXGBE_MNGSWSYNC_REQ MS(0, 0x1)
+#define TXGBE_SWSEM 0x01002C
+#define TXGBE_SWSEM_PF MS(0, 0x1)
+#define TXGBE_MNGSEM 0x01E008
+#define TXGBE_MNGSEM_SW(v) LS(v, 0, 0xFFFF)
+#define TXGBE_MNGSEM_SWPHY MS(0, 0x1)
+#define TXGBE_MNGSEM_SWMBX MS(2, 0x1)
+#define TXGBE_MNGSEM_SWFLASH MS(3, 0x1)
+#define TXGBE_MNGSEM_FW(v) LS(v, 16, 0xFFFF)
+#define TXGBE_MNGSEM_FWPHY MS(16, 0x1)
+#define TXGBE_MNGSEM_FWMBX MS(18, 0x1)
+#define TXGBE_MNGSEM_FWFLASH MS(19, 0x1)
+#define TXGBE_MNGMBXCTL 0x01E044
+#define TXGBE_MNGMBXCTL_SWRDY MS(0, 0x1)
+#define TXGBE_MNGMBXCTL_SWACK MS(1, 0x1)
+#define TXGBE_MNGMBXCTL_FWRDY MS(2, 0x1)
+#define TXGBE_MNGMBXCTL_FWACK MS(3, 0x1)
+#define TXGBE_MNGMBX 0x01E100
+
+/******************************************************************************
+ * Port Registers
+ ******************************************************************************/
+/* Port Control */
+#define TXGBE_PORTCTL 0x014400
+#define TXGBE_PORTCTL_VLANEXT MS(0, 0x1)
+#define TXGBE_PORTCTL_ETAG MS(1, 0x1)
+#define TXGBE_PORTCTL_QINQ MS(2, 0x1)
+#define TXGBE_PORTCTL_DRVLOAD MS(3, 0x1)
+#define TXGBE_PORTCTL_UPLNK MS(4, 0x1)
+#define TXGBE_PORTCTL_DCB MS(10, 0x1)
+#define TXGBE_PORTCTL_NUMTC_MASK MS(11, 0x1)
+#define TXGBE_PORTCTL_NUMTC_4 LS(0, 11, 0x1)
+#define TXGBE_PORTCTL_NUMTC_8 LS(1, 11, 0x1)
+#define TXGBE_PORTCTL_NUMVT_MASK MS(12, 0x3)
+#define TXGBE_PORTCTL_NUMVT_16 LS(1, 12, 0x3)
+#define TXGBE_PORTCTL_NUMVT_32 LS(2, 12, 0x3)
+#define TXGBE_PORTCTL_NUMVT_64 LS(3, 12, 0x3)
+#define TXGBE_PORTCTL_RSTDONE MS(14, 0x1)
+#define TXGBE_PORTCTL_TEREDODIA MS(27, 0x1)
+#define TXGBE_PORTCTL_GENEVEDIA MS(28, 0x1)
+#define TXGBE_PORTCTL_VXLANGPEDIA MS(30, 0x1)
+#define TXGBE_PORTCTL_VXLANDIA MS(31, 0x1)
+
+#define TXGBE_PORT 0x014404
+#define TXGBE_PORT_LINKUP MS(0, 0x1)
+#define TXGBE_PORT_LINK10G MS(1, 0x1)
+#define TXGBE_PORT_LINK1000M MS(2, 0x1)
+#define TXGBE_PORT_LINK100M MS(3, 0x1)
+#define TXGBE_PORT_LANID(r) RS(r, 8, 0x1)
+#define TXGBE_EXTAG 0x014408
+#define TXGBE_EXTAG_ETAG_MASK MS(0, 0xFFFF)
+#define TXGBE_EXTAG_ETAG(v) LS(v, 0, 0xFFFF)
+#define TXGBE_EXTAG_VLAN_MASK MS(16, 0xFFFF)
+#define TXGBE_EXTAG_VLAN(v) LS(v, 16, 0xFFFF)
+#define TXGBE_VXLANPORT 0x014410
+#define TXGBE_VXLANPORTGPE 0x014414
+#define TXGBE_GENEVEPORT 0x014418
+#define TXGBE_TEREDOPORT 0x01441C
+#define TXGBE_LEDCTL 0x014424
+#define TXGBE_LEDCTL_SEL_MASK MS(0, 0xFFFF)
+#define TXGBE_LEDCTL_SEL(s) MS((s), 0x1)
+#define TXGBE_LEDCTL_OD_MASK MS(16, 0xFFFF)
+#define TXGBE_LEDCTL_OD(s) MS(((s)+16), 0x1)
+ /* s=UP(0),10G(1),1G(2),100M(3),BSY(4) */
+#define TXGBE_LEDCTL_ACTIVE (TXGBE_LEDCTL_SEL(4) | TXGBE_LEDCTL_OD(4))
+#define TXGBE_TAGTPID(i) (0x014430 + (i) * 4) /* 0-3 */
+#define TXGBE_TAGTPID_LSB_MASK MS(0, 0xFFFF)
+#define TXGBE_TAGTPID_LSB(v) LS(v, 0, 0xFFFF)
+#define TXGBE_TAGTPID_MSB_MASK MS(16, 0xFFFF)
+#define TXGBE_TAGTPID_MSB(v) LS(v, 16, 0xFFFF)
+
+/**
+ * GPIO Control
+ * P0: link speed change
+ * P1:
+ * P2:
+ * P3: optical laser disable
+ * P4:
+ * P5: link speed selection
+ * P6:
+ * P7: external phy event
+ **/
+#define TXGBE_SDP 0x014800
+#define TXGBE_SDP_0 MS(0, 0x1)
+#define TXGBE_SDP_1 MS(1, 0x1)
+#define TXGBE_SDP_2 MS(2, 0x1)
+#define TXGBE_SDP_3 MS(3, 0x1)
+#define TXGBE_SDP_4 MS(4, 0x1)
+#define TXGBE_SDP_5 MS(5, 0x1)
+#define TXGBE_SDP_6 MS(6, 0x1)
+#define TXGBE_SDP_7 MS(7, 0x1)
+#define TXGBE_SDPDIR 0x014804
+#define TXGBE_SDPCTL 0x014808
+#define TXGBE_SDPINTEA 0x014830
+#define TXGBE_SDPINTMSK 0x014834
+#define TXGBE_SDPINTTYP 0x014838
+#define TXGBE_SDPINTPOL 0x01483C
+#define TXGBE_SDPINT 0x014840
+#define TXGBE_SDPINTDB 0x014848
+#define TXGBE_SDPINTEND 0x01484C
+#define TXGBE_SDPDAT 0x014850
+#define TXGBE_SDPLVLSYN 0x014854
+
+/**
+ * MDIO(PHY)
+ **/
+#define TXGBE_MDIOSCA 0x011200
+#define TXGBE_MDIOSCA_REG(v) LS(v, 0, 0xFFFF)
+#define TXGBE_MDIOSCA_PORT(v) LS(v, 16, 0x1F)
+#define TXGBE_MDIOSCA_DEV(v) LS(v, 21, 0x1F)
+#define TXGBE_MDIOSCD 0x011204
+#define TXGBD_MDIOSCD_DAT(r) RS(r, 0, 0xFFFF)
+#define TXGBE_MDIOSCD_DAT(v) LS(v, 0, 0xFFFF)
+#define TXGBE_MDIOSCD_CMD_PREAD LS(1, 16, 0x3)
+#define TXGBE_MDIOSCD_CMD_WRITE LS(2, 16, 0x3)
+#define TXGBE_MDIOSCD_CMD_READ LS(3, 16, 0x3)
+#define TXGBE_MDIOSCD_SADDR MS(18, 0x1)
+#define TXGBE_MDIOSCD_CLOCK(v) LS(v, 19, 0x7)
+#define TXGBE_MDIOSCD_BUSY MS(22, 0x1)
+
+/**
+ * I2C (SFP)
+ **/
+#define TXGBE_I2CCTL 0x014900
+#define TXGBE_I2CCTL_MAEA MS(0, 0x1)
+#define TXGBE_I2CCTL_SPEED(v) LS(v, 1, 0x3)
+#define TXGBE_I2CCTL_RESTART MS(5, 0x1)
+#define TXGBE_I2CCTL_SLDA MS(6, 0x1)
+#define TXGBE_I2CTGT 0x014904
+#define TXGBE_I2CTGT_ADDR(v) LS(v, 0, 0x3FF)
+#define TXGBE_I2CCMD 0x014910
+#define TXGBE_I2CCMD_READ (MS(9, 0x1) | 0x100)
+#define TXGBE_I2CCMD_WRITE (MS(9, 0x1))
+#define TXGBE_I2CSCLHITM 0x014914
+#define TXGBE_I2CSCLLOTM 0x014918
+#define TXGBE_I2CINT 0x014934
+#define TXGBE_I2CINT_RXFULL MS(2, 0x1)
+#define TXGBE_I2CINT_TXEMPTY MS(4, 0x1)
+#define TXGBE_I2CINTMSK 0x014930
+#define TXGBE_I2CRXFIFO 0x014938
+#define TXGBE_I2CTXFIFO 0x01493C
+#define TXGBE_I2CEA 0x01496C
+#define TXGBE_I2CST 0x014970
+#define TXGBE_I2CST_ACT MS(5, 0x1)
+#define TXGBE_I2CSCLTM 0x0149AC
+#define TXGBE_I2CSDATM 0x0149B0
+
+/**
+ * TPH
+ **/
+#define TXGBE_TPHCFG 0x014F00
+
+/******************************************************************************
+ * Pool Registers
+ ******************************************************************************/
+#define TXGBE_POOLETHCTL(pl) (0x015600 + (pl) * 4)
+#define TXGBE_POOLETHCTL_LBDIA MS(0, 0x1)
+#define TXGBE_POOLETHCTL_LLBDIA MS(1, 0x1)
+#define TXGBE_POOLETHCTL_LLB MS(2, 0x1)
+#define TXGBE_POOLETHCTL_UCP MS(4, 0x1)
+#define TXGBE_POOLETHCTL_ETP MS(5, 0x1)
+#define TXGBE_POOLETHCTL_VLA MS(6, 0x1)
+#define TXGBE_POOLETHCTL_VLP MS(7, 0x1)
+#define TXGBE_POOLETHCTL_UTA MS(8, 0x1)
+#define TXGBE_POOLETHCTL_MCHA MS(9, 0x1)
+#define TXGBE_POOLETHCTL_UCHA MS(10, 0x1)
+#define TXGBE_POOLETHCTL_BCA MS(11, 0x1)
+#define TXGBE_POOLETHCTL_MCP MS(12, 0x1)
+
+/* DMA Control */
+#define TXGBE_POOLRXENA(i) (0x012004 + (i) * 4) /* 0-1 */
+#define TXGBE_POOLRXDNA(i) (0x012060 + (i) * 4) /* 0-1 */
+#define TXGBE_POOLTXENA(i) (0x018004 + (i) * 4) /* 0-1 */
+#define TXGBE_POOLTXDSA(i) (0x0180A0 + (i) * 4) /* 0-1 */
+#define TXGBE_POOLTXLBET(i) (0x018050 + (i) * 4) /* 0-1 */
+#define TXGBE_POOLTXASET(i) (0x018058 + (i) * 4) /* 0-1 */
+#define TXGBE_POOLTXASMAC(i) (0x018060 + (i) * 4) /* 0-1 */
+#define TXGBE_POOLTXASVLAN(i) (0x018070 + (i) * 4) /* 0-1 */
+#define TXGBE_POOLDROPSWBK(i) (0x0151C8 + (i) * 4) /* 0-1 */
+
+#define TXGBE_POOLTAG(pl) (0x018100 + (pl) * 4)
+#define TXGBE_POOLTAG_VTAG(v) LS(v, 0, 0xFFFF)
+#define TXGBE_POOLTAG_VTAG_MASK MS(0, 0xFFFF)
+#define TXGBD_POOLTAG_VTAG_UP(r) RS(r, 13, 0x7)
+#define TXGBE_POOLTAG_TPIDSEL(v) LS(v, 24, 0x7)
+#define TXGBE_POOLTAG_ETAG_MASK MS(27, 0x3)
+#define TXGBE_POOLTAG_ETAG LS(2, 27, 0x3)
+#define TXGBE_POOLTAG_ACT_MASK MS(30, 0x3)
+#define TXGBE_POOLTAG_ACT_ALWAYS LS(1, 30, 0x3)
+#define TXGBE_POOLTAG_ACT_NEVER LS(2, 30, 0x3)
+#define TXGBE_POOLTXARB 0x018204
+#define TXGBE_POOLTXARB_WRR MS(1, 0x1)
+#define TXGBE_POOLETAG(pl) (0x018700 + (pl) * 4)
+
+/* RSS Hash */
+#define TXGBE_POOLRSS(pl) (0x019300 + (pl) * 4)
+#define TXGBE_POOLRSS_L4HDR MS(1, 0x1)
+#define TXGBE_POOLRSS_L3HDR MS(2, 0x1)
+#define TXGBE_POOLRSS_L2HDR MS(3, 0x1)
+#define TXGBE_POOLRSS_L2TUN MS(4, 0x1)
+#define TXGBE_POOLRSS_TUNHDR MS(5, 0x1)
+#define TXGBE_POOLRSSKEY(pl, i) (0x01A000 + (pl) * 0x40 + (i) * 4)
+#define TXGBE_POOLRSSMAP(pl, i) (0x01B000 + (pl) * 0x40 + (i) * 4)
+
+/******************************************************************************
+ * Packet Buffer
+ ******************************************************************************/
+/* Flow Control */
+#define TXGBE_FCXOFFTM(i) (0x019200 + (i) * 4) /* 0-3 */
+#define TXGBE_FCWTRLO(tc) (0x019220 + (tc) * 4)
+#define TXGBE_FCWTRLO_TH(v) LS(v, 10, 0x1FF) /* KB */
+#define TXGBE_FCWTRLO_XON MS(31, 0x1)
+#define TXGBE_FCWTRHI(tc) (0x019260 + (tc) * 4)
+#define TXGBE_FCWTRHI_TH(v) LS(v, 10, 0x1FF) /* KB */
+#define TXGBE_FCWTRHI_XOFF MS(31, 0x1)
+#define TXGBE_RXFCRFSH 0x0192A0
+#define TXGBE_RXFCFSH_TIME(v) LS(v, 0, 0xFFFF)
+#define TXGBE_FCSTAT 0x01CE00
+#define TXGBE_FCSTAT_DLNK(tc) MS((tc), 0x1)
+#define TXGBE_FCSTAT_ULNK(tc) MS((tc) + 8, 0x1)
+
+#define TXGBE_RXFCCFG 0x011090
+#define TXGBE_RXFCCFG_FC MS(0, 0x1)
+#define TXGBE_RXFCCFG_PFC MS(8, 0x1)
+#define TXGBE_TXFCCFG 0x0192A4
+#define TXGBE_TXFCCFG_FC MS(3, 0x1)
+#define TXGBE_TXFCCFG_PFC MS(4, 0x1)
+
+/* Data Buffer */
+#define TXGBE_PBRXCTL 0x019000
+#define TXGBE_PBRXCTL_ST MS(0, 0x1)
+#define TXGBE_PBRXCTL_ENA MS(31, 0x1)
+#define TXGBE_PBRXUP2TC 0x019008
+#define TXGBE_PBTXUP2TC 0x01C800
+#define TXGBE_DCBUP2TC_MAP(tc, v) LS(v, 3 * (tc), 0x7)
+#define TXGBE_DCBUP2TC_DEC(tc, r) RS(r, 3 * (tc), 0x7)
+#define TXGBE_PBRXSIZE(tc) (0x019020 + (tc) * 4)
+#define TXGBE_PBRXSIZE_KB(v) LS(v, 10, 0x3FF)
+
+#define TXGBE_PBRXOFTMR 0x019094
+#define TXGBE_PBRXDBGCMD 0x019090
+#define TXGBE_PBRXDBGDAT(tc) (0x0190A0 + (tc) * 4)
+#define TXGBE_PBTXDMATH(tc) (0x018020 + (tc) * 4)
+#define TXGBE_PBTXSIZE(tc) (0x01CC00 + (tc) * 4)
+
+/* LLI */
+#define TXGBE_PBRXLLI 0x19080
+#define TXGBE_PBRXLLI_SZLT(v) LS(v, 0, 0xFFF)
+#define TXGBE_PBRXLLI_UPLT(v) LS(v, 16, 0x7)
+#define TXGBE_PBRXLLI_UPEA MS(19, 0x1)
+#define TXGBE_PBRXLLI_CNM MS(20, 0x1)
+
+/* Port Arbiter(QoS) */
+#define TXGBE_PARBTXCTL 0x01CD00
+#define TXGBE_PARBTXCTL_SP MS(5, 0x1)
+#define TXGBE_PARBTXCTL_DA MS(6, 0x1)
+#define TXGBE_PARBTXCTL_RECYC MS(8, 0x1)
+#define TXGBE_PARBTXCFG(tc) (0x01CD20 + (tc) * 4)
+#define TXGBE_PARBTXCFG_CRQ(v) LS(v, 0, 0x1FF)
+#define TXGBE_PARBTXCFG_BWG(v) LS(v, 9, 0x7)
+#define TXGBE_PARBTXCFG_MCL(v) LS(v, 12, 0xFFF)
+#define TXGBE_PARBTXCFG_GSP MS(30, 0x1)
+#define TXGBE_PARBTXCFG_LSP MS(31, 0x1)
+
+/******************************************************************************
+ * Queue Registers
+ ******************************************************************************/
+/* Queue Control */
+#define TXGBE_QPRXDROP(i) (0x012080 + (i) * 4) /* 0-3 */
+#define TXGBE_QPRXSTRPVLAN(i) (0x012090 + (i) * 4) /* 0-3 */
+#define TXGBE_QPTXLLI(i) (0x018040 + (i) * 4) /* 0-3 */
+
+/* Queue Arbiter(QoS) */
+#define TXGBE_QARBRXCTL 0x012000
+#define TXGBE_QARBRXCTL_RC MS(1, 0x1)
+#define TXGBE_QARBRXCTL_WSP MS(2, 0x1)
+#define TXGBE_QARBRXCTL_DA MS(6, 0x1)
+#define TXGBE_QARBRXCFG(tc) (0x012040 + (tc) * 4)
+#define TXGBE_QARBRXCFG_CRQ(v) LS(v, 0, 0x1FF)
+#define TXGBE_QARBRXCFG_BWG(v) LS(v, 9, 0x7)
+#define TXGBE_QARBRXCFG_MCL(v) LS(v, 12, 0xFFF)
+#define TXGBE_QARBRXCFG_GSP MS(30, 0x1)
+#define TXGBE_QARBRXCFG_LSP MS(31, 0x1)
+#define TXGBE_QARBRXTC 0x0194F8
+#define TXGBE_QARBRXTC_RR MS(0, 0x1)
+
+#define TXGBE_QARBTXCTL 0x018200
+#define TXGBE_QARBTXCTL_WSP MS(1, 0x1)
+#define TXGBE_QARBTXCTL_RECYC MS(4, 0x1)
+#define TXGBE_QARBTXCTL_DA MS(6, 0x1)
+#define TXGBE_QARBTXCFG(tc) (0x018220 + (tc) * 4)
+#define TXGBE_QARBTXCFG_CRQ(v) LS(v, 0, 0x1FF)
+#define TXGBE_QARBTXCFG_BWG(v) LS(v, 9, 0x7)
+#define TXGBE_QARBTXCFG_MCL(v) LS(v, 12, 0xFFF)
+#define TXGBE_QARBTXCFG_GSP MS(30, 0x1)
+#define TXGBE_QARBTXCFG_LSP MS(31, 0x1)
+#define TXGBE_QARBTXMMW 0x018208
+#define TXGBE_QARBTXMMW_DEF (4)
+#define TXGBE_QARBTXMMW_JF (20)
+#define TXGBE_QARBTXRATEI 0x01820C
+#define TXGBE_QARBTXRATE 0x018404
+#define TXGBE_QARBTXRATE_MIN(v) LS(v, 0, 0x3FFF)
+#define TXGBE_QARBTXRATE_MAX(v) LS(v, 16, 0x3FFF)
+#define TXGBE_QARBTXCRED(rp) (0x018500 + (rp) * 4)
+
+/* QCN */
+#define TXGBE_QCNADJ 0x018210
+#define TXGBE_QCNRP 0x018400
+#define TXGBE_QCNRPRATE 0x018404
+#define TXGBE_QCNRPADJ 0x018408
+#define TXGBE_QCNRPRLD 0x01840C
+
+/* Misc Control */
+#define TXGBE_RSECCTL 0x01200C
+#define TXGBE_RSECCTL_TSRSC MS(0, 0x1)
+#define TXGBE_DMATXCTRL 0x018000
+#define TXGBE_DMATXCTRL_ENA MS(0, 0x1)
+#define TXGBE_DMATXCTRL_TPID_MASK MS(16, 0xFFFF)
+#define TXGBE_DMATXCTRL_TPID(v) LS(v, 16, 0xFFFF)
+
+/******************************************************************************
+ * Packet Filter (L2-7)
+ ******************************************************************************/
+/**
+ * Receive Scaling
+ **/
+#define TXGBE_RSSTBL(i) (0x019400 + (i) * 4) /* 32 */
+#define TXGBE_RSSKEY(i) (0x019480 + (i) * 4) /* 10 */
+#define TXGBE_RSSPBHASH 0x0194F0
+#define TXGBE_RSSPBHASH_BITS(tc, v) LS(v, 3 * (tc), 0x7)
+#define TXGBE_RACTL 0x0194F4
+#define TXGBE_RACTL_RSSMKEY MS(0, 0x1)
+#define TXGBE_RACTL_RSSENA MS(2, 0x1)
+#define TXGBE_RACTL_RSSMASK MS(16, 0xFFFF)
+#define TXGBE_RACTL_RSSIPV4TCP MS(16, 0x1)
+#define TXGBE_RACTL_RSSIPV4 MS(17, 0x1)
+#define TXGBE_RACTL_RSSIPV6 MS(20, 0x1)
+#define TXGBE_RACTL_RSSIPV6TCP MS(21, 0x1)
+#define TXGBE_RACTL_RSSIPV4UDP MS(22, 0x1)
+#define TXGBE_RACTL_RSSIPV6UDP MS(23, 0x1)
+
+/**
+ * Flow Director
+ **/
+#define PERFECT_BUCKET_64KB_HASH_MASK 0x07FF /* 11 bits */
+#define PERFECT_BUCKET_128KB_HASH_MASK 0x0FFF /* 12 bits */
+#define PERFECT_BUCKET_256KB_HASH_MASK 0x1FFF /* 13 bits */
+#define SIG_BUCKET_64KB_HASH_MASK 0x1FFF /* 13 bits */
+#define SIG_BUCKET_128KB_HASH_MASK 0x3FFF /* 14 bits */
+#define SIG_BUCKET_256KB_HASH_MASK 0x7FFF /* 15 bits */
+
+#define TXGBE_FDIRCTL 0x019500
+#define TXGBE_FDIRCTL_BUF_MASK MS(0, 0x3)
+#define TXGBE_FDIRCTL_BUF_64K LS(1, 0, 0x3)
+#define TXGBE_FDIRCTL_BUF_128K LS(2, 0, 0x3)
+#define TXGBE_FDIRCTL_BUF_256K LS(3, 0, 0x3)
+#define TXGBD_FDIRCTL_BUF_BYTE(r) (1 << (15 + RS(r, 0, 0x3)))
+#define TXGBE_FDIRCTL_INITDONE MS(3, 0x1)
+#define TXGBE_FDIRCTL_PERFECT MS(4, 0x1)
+#define TXGBE_FDIRCTL_REPORT_MASK MS(5, 0x7)
+#define TXGBE_FDIRCTL_REPORT_MATCH LS(1, 5, 0x7)
+#define TXGBE_FDIRCTL_REPORT_ALWAYS LS(5, 5, 0x7)
+#define TXGBE_FDIRCTL_DROPQP_MASK MS(8, 0x7F)
+#define TXGBE_FDIRCTL_DROPQP(v) LS(v, 8, 0x7F)
+#define TXGBE_FDIRCTL_HASHBITS_MASK LS(20, 0xF)
+#define TXGBE_FDIRCTL_HASHBITS(v) LS(v, 20, 0xF)
+#define TXGBE_FDIRCTL_MAXLEN(v) LS(v, 24, 0xF)
+#define TXGBE_FDIRCTL_FULLTHR(v) LS(v, 28, 0xF)
+#define TXGBE_FDIRFLEXCFG(i) (0x019580 + (i) * 4) /* 0-15 */
+#define TXGBD_FDIRFLEXCFG_ALL(r, i) RS(0, (i) << 3, 0xFF)
+#define TXGBE_FDIRFLEXCFG_ALL(v, i) LS(v, (i) << 3, 0xFF)
+#define TXGBE_FDIRFLEXCFG_BASE_MAC LS(0, 0, 0x3)
+#define TXGBE_FDIRFLEXCFG_BASE_L2 LS(1, 0, 0x3)
+#define TXGBE_FDIRFLEXCFG_BASE_L3 LS(2, 0, 0x3)
+#define TXGBE_FDIRFLEXCFG_BASE_PAY LS(3, 0, 0x3)
+#define TXGBE_FDIRFLEXCFG_DIA MS(2, 0x1)
+#define TXGBE_FDIRFLEXCFG_OFST_MASK MS(3, 0x1F)
+#define TXGBD_FDIRFLEXCFG_OFST(r) RS(r, 3, 0x1F)
+#define TXGBE_FDIRFLEXCFG_OFST(v) LS(v, 3, 0x1F)
+#define TXGBE_FDIRBKTHKEY 0x019568
+#define TXGBE_FDIRSIGHKEY 0x01956C
+
+/* Common Mask */
+#define TXGBE_FDIRDIP4MSK 0x01953C
+#define TXGBE_FDIRSIP4MSK 0x019540
+#define TXGBE_FDIRIP6MSK 0x019574
+#define TXGBE_FDIRIP6MSK_SRC(v) LS(v, 0, 0xFFFF)
+#define TXGBE_FDIRIP6MSK_DST(v) LS(v, 16, 0xFFFF)
+#define TXGBE_FDIRTCPMSK 0x019544
+#define TXGBE_FDIRTCPMSK_SRC(v) LS(v, 0, 0xFFFF)
+#define TXGBE_FDIRTCPMSK_DST(v) LS(v, 16, 0xFFFF)
+#define TXGBE_FDIRUDPMSK 0x019548
+#define TXGBE_FDIRUDPMSK_SRC(v) LS(v, 0, 0xFFFF)
+#define TXGBE_FDIRUDPMSK_DST(v) LS(v, 16, 0xFFFF)
+#define TXGBE_FDIRSCTPMSK 0x019560
+#define TXGBE_FDIRSCTPMSK_SRC(v) LS(v, 0, 0xFFFF)
+#define TXGBE_FDIRSCTPMSK_DST(v) LS(v, 16, 0xFFFF)
+#define TXGBE_FDIRMSK 0x019570
+#define TXGBE_FDIRMSK_POOL MS(2, 0x1)
+#define TXGBE_FDIRMSK_L4P MS(3, 0x1)
+#define TXGBE_FDIRMSK_L3P MS(4, 0x1)
+#define TXGBE_FDIRMSK_TUNTYPE MS(5, 0x1)
+#define TXGBE_FDIRMSK_TUNIP MS(6, 0x1)
+#define TXGBE_FDIRMSK_TUNPKT MS(7, 0x1)
+
+/* Programming Interface */
+#define TXGBE_FDIRPIPORT 0x019520
+#define TXGBE_FDIRPIPORT_SRC(v) LS(v, 0, 0xFFFF)
+#define TXGBE_FDIRPIPORT_DST(v) LS(v, 16, 0xFFFF)
+#define TXGBE_FDIRPISIP6(i) (0x01950C + (i) * 4) /* [0,2] */
+#define TXGBE_FDIRPISIP4 0x019518
+#define TXGBE_FDIRPIDIP4 0x01951C
+#define TXGBE_FDIRPIFLEX 0x019524
+#define TXGBE_FDIRPIFLEX_PTYPE(v) LS(v, 0, 0xFF)
+#define TXGBE_FDIRPIFLEX_FLEX(v) LS(v, 16, 0xFFFF)
+#define TXGBE_FDIRPIHASH 0x019528
+#define TXGBE_FDIRPIHASH_BKT(v) LS(v, 0, 0x7FFF)
+#define TXGBE_FDIRPIHASH_VLD MS(15, 0x1)
+#define TXGBE_FDIRPIHASH_SIG(v) LS(v, 16, 0x7FFF)
+#define TXGBE_FDIRPIHASH_IDX(v) LS(v, 16, 0xFFFF)
+#define TXGBE_FDIRPICMD 0x01952C
+#define TXGBE_FDIRPICMD_OP_MASK MS(0, 0x3)
+#define TXGBE_FDIRPICMD_OP_ADD LS(1, 0, 0x3)
+#define TXGBE_FDIRPICMD_OP_REM LS(2, 0, 0x3)
+#define TXGBE_FDIRPICMD_OP_QRY LS(3, 0, 0x3)
+#define TXGBE_FDIRPICMD_VLD MS(2, 0x1)
+#define TXGBE_FDIRPICMD_UPD MS(3, 0x1)
+#define TXGBE_FDIRPICMD_DIP6 MS(4, 0x1)
+#define TXGBE_FDIRPICMD_FT(v) LS(v, 5, 0x3)
+#define TXGBE_FDIRPICMD_FT_MASK MS(5, 0x3)
+#define TXGBE_FDIRPICMD_FT_UDP LS(1, 5, 0x3)
+#define TXGBE_FDIRPICMD_FT_TCP LS(2, 5, 0x3)
+#define TXGBE_FDIRPICMD_FT_SCTP LS(3, 5, 0x3)
+#define TXGBE_FDIRPICMD_IP6 MS(7, 0x1)
+#define TXGBE_FDIRPICMD_CLR MS(8, 0x1)
+#define TXGBE_FDIRPICMD_DROP MS(9, 0x1)
+#define TXGBE_FDIRPICMD_LLI MS(10, 0x1)
+#define TXGBE_FDIRPICMD_LAST MS(11, 0x1)
+#define TXGBE_FDIRPICMD_COLLI MS(12, 0x1)
+#define TXGBE_FDIRPICMD_QPENA MS(15, 0x1)
+#define TXGBE_FDIRPICMD_QP(v) LS(v, 16, 0x7F)
+#define TXGBE_FDIRPICMD_POOL(v) LS(v, 24, 0x3F)
+
+/**
+ * 5-tuple Filter
+ **/
+#define TXGBE_5TFSADDR(i) (0x019600 + (i) * 4) /* 0-127 */
+#define TXGBE_5TFDADDR(i) (0x019800 + (i) * 4) /* 0-127 */
+#define TXGBE_5TFPORT(i) (0x019A00 + (i) * 4) /* 0-127 */
+#define TXGBE_5TFPORT_SRC(v) LS(v, 0, 0xFFFF)
+#define TXGBE_5TFPORT_DST(v) LS(v, 16, 0xFFFF)
+#define TXGBE_5TFCTL0(i) (0x019C00 + (i) * 4) /* 0-127 */
+#define TXGBE_5TFCTL0_PROTO(v) LS(v, 0, 0x3)
+enum txgbe_5tuple_protocol {
+ TXGBE_5TF_PROT_TCP = 0,
+ TXGBE_5TF_PROT_UDP,
+ TXGBE_5TF_PROT_SCTP,
+ TXGBE_5TF_PROT_NONE,
+};
+#define TXGBE_5TFCTL0_PRI(v) LS(v, 2, 0x7)
+#define TXGBE_5TFCTL0_POOL(v) LS(v, 8, 0x3F)
+#define TXGBE_5TFCTL0_MASK MS(25, 0x3F)
+#define TXGBE_5TFCTL0_MSADDR MS(25, 0x1)
+#define TXGBE_5TFCTL0_MDADDR MS(26, 0x1)
+#define TXGBE_5TFCTL0_MSPORT MS(27, 0x1)
+#define TXGBE_5TFCTL0_MDPORT MS(28, 0x1)
+#define TXGBE_5TFCTL0_MPROTO MS(29, 0x1)
+#define TXGBE_5TFCTL0_MPOOL MS(30, 0x1)
+#define TXGBE_5TFCTL0_ENA MS(31, 0x1)
+#define TXGBE_5TFCTL1(i) (0x019E00 + (i) * 4) /* 0-127 */
+#define TXGBE_5TFCTL1_CHKSZ MS(12, 0x1)
+#define TXGBE_5TFCTL1_LLI MS(20, 0x1)
+#define TXGBE_5TFCTL1_QP(v) LS(v, 21, 0x7F)
+
+/**
+ * Storm Control
+ **/
+#define TXGBE_STRMCTL 0x015004
+#define TXGBE_STRMCTL_MCPNSH MS(0, 0x1)
+#define TXGBE_STRMCTL_MCDROP MS(1, 0x1)
+#define TXGBE_STRMCTL_BCPNSH MS(2, 0x1)
+#define TXGBE_STRMCTL_BCDROP MS(3, 0x1)
+#define TXGBE_STRMCTL_DFTPOOL MS(4, 0x1)
+#define TXGBE_STRMCTL_ITVL(v) LS(v, 8, 0x3FF)
+#define TXGBE_STRMTH 0x015008
+#define TXGBE_STRMTH_MC(v) LS(v, 0, 0xFFFF)
+#define TXGBE_STRMTH_BC(v) LS(v, 16, 0xFFFF)
+
+/******************************************************************************
+ * Ether Flow
+ ******************************************************************************/
+#define TXGBE_PSRCTL 0x015000
+#define TXGBE_PSRCTL_TPE MS(4, 0x1)
+#define TXGBE_PSRCTL_ADHF12_MASK MS(5, 0x3)
+#define TXGBE_PSRCTL_ADHF12(v) LS(v, 5, 0x3)
+#define TXGBE_PSRCTL_UCHFENA MS(7, 0x1)
+#define TXGBE_PSRCTL_MCHFENA MS(7, 0x1)
+#define TXGBE_PSRCTL_MCP MS(8, 0x1)
+#define TXGBE_PSRCTL_UCP MS(9, 0x1)
+#define TXGBE_PSRCTL_BCA MS(10, 0x1)
+#define TXGBE_PSRCTL_L4CSUM MS(12, 0x1)
+#define TXGBE_PSRCTL_PCSD MS(13, 0x1)
+#define TXGBE_PSRCTL_RSCPUSH MS(15, 0x1)
+#define TXGBE_PSRCTL_RSCDIA MS(16, 0x1)
+#define TXGBE_PSRCTL_RSCACK MS(17, 0x1)
+#define TXGBE_PSRCTL_LBENA MS(18, 0x1)
+#define TXGBE_FRMSZ 0x015020
+#define TXGBE_FRMSZ_MAX_MASK MS(0, 0xFFFF)
+#define TXGBE_FRMSZ_MAX(v) LS((v) + 4, 0, 0xFFFF)
+#define TXGBE_VLANCTL 0x015088
+#define TXGBE_VLANCTL_TPID_MASK MS(0, 0xFFFF)
+#define TXGBE_VLANCTL_TPID(v) LS(v, 0, 0xFFFF)
+#define TXGBE_VLANCTL_CFI MS(28, 0x1)
+#define TXGBE_VLANCTL_CFIENA MS(29, 0x1)
+#define TXGBE_VLANCTL_VFE MS(30, 0x1)
+#define TXGBE_POOLCTL 0x0151B0
+#define TXGBE_POOLCTL_DEFDSA MS(29, 0x1)
+#define TXGBE_POOLCTL_RPLEN MS(30, 0x1)
+#define TXGBE_POOLCTL_MODE_MASK MS(16, 0x3)
+#define TXGBE_PSRPOOL_MODE_MAC LS(0, 16, 0x3)
+#define TXGBE_PSRPOOL_MODE_ETAG LS(1, 16, 0x3)
+#define TXGBE_POOLCTL_DEFPL(v) LS(v, 7, 0x3F)
+#define TXGBE_POOLCTL_DEFPL_MASK MS(7, 0x3F)
+
+#define TXGBE_ETFLT(i) (0x015128 + (i) * 4) /* 0-7 */
+#define TXGBE_ETFLT_ETID(v) LS(v, 0, 0xFFFF)
+#define TXGBE_ETFLT_ETID_MASK MS(0, 0xFFFF)
+#define TXGBE_ETFLT_POOL(v) LS(v, 20, 0x3FF)
+#define TXGBE_ETFLT_POOLENA MS(26, 0x1)
+#define TXGBE_ETFLT_FCOE MS(27, 0x1)
+#define TXGBE_ETFLT_TXAS MS(29, 0x1)
+#define TXGBE_ETFLT_1588 MS(30, 0x1)
+#define TXGBE_ETFLT_ENA MS(31, 0x1)
+#define TXGBE_ETCLS(i) (0x019100 + (i) * 4) /* 0-7 */
+#define TXGBE_ETCLS_QPID(v) LS(v, 16, 0x7F)
+#define TXGBD_ETCLS_QPID(r) RS(r, 16, 0x7F)
+#define TXGBE_ETCLS_LLI MS(29, 0x1)
+#define TXGBE_ETCLS_QENA MS(31, 0x1)
+#define TXGBE_SYNCLS 0x019130
+#define TXGBE_SYNCLS_ENA MS(0, 0x1)
+#define TXGBE_SYNCLS_QPID(v) LS(v, 1, 0x7F)
+#define TXGBD_SYNCLS_QPID(r) RS(r, 1, 0x7F)
+#define TXGBE_SYNCLS_QPID_MASK MS(1, 0x7F)
+#define TXGBE_SYNCLS_HIPRIO MS(31, 0x1)
+
+/* MAC & VLAN & NVE */
+#define TXGBE_PSRVLANIDX 0x016230 /* 0-63 */
+#define TXGBE_PSRVLAN 0x016220
+#define TXGBE_PSRVLAN_VID(v) LS(v, 0, 0xFFF)
+#define TXGBE_PSRVLAN_EA MS(31, 0x1)
+#define TXGBE_PSRVLANPLM(i) (0x016224 + (i) * 4) /* 0-1 */
+
+#define TXGBE_PSRNVEI 0x016260 /* 256 */
+#define TXGBE_PSRNVEADDR(i) (0x016240 + (i) * 4) /* 0-3 */
+#define TXGBE_PSRNVE 0x016250
+#define TXGBE_PSRNVE_KEY(v) LS(v, 0, 0xFFFFFF)
+#define TXGBE_PSRNVE_TYPE(v) LS(v, 24, 0x3)
+#define TXGBE_PSRNVECTL 0x016254
+#define TXGBE_PSRNVECTL_MKEY MS(0, 0x1)
+#define TXGBE_PSRNVECTL_MADDR MS(1, 0x1)
+#define TXGBE_PSRNVECTL_SEL(v) LS(v, 8, 0x3)
+#define TXGBE_PSRNVECTL_SEL_ODIP (0)
+#define TXGBE_PSRNVECTL_SEL_IDMAC (1)
+#define TXGBE_PSRNVECTL_SEL_IDIP (2)
+#define TXGBE_PSRNVECTL_EA MS(31, 0x1)
+#define TXGBE_PSRNVEPM(i) (0x016258 + (i) * 4) /* 0-1 */
+
+/**
+ * FCoE
+ **/
+#define TXGBE_FCCTL 0x015100
+#define TXGBE_FCCTL_LLI MS(0, 0x1)
+#define TXGBE_FCCTL_SAVBAD MS(1, 0x1)
+#define TXGBE_FCCTL_FRSTRDH MS(2, 0x1)
+#define TXGBE_FCCTL_LSEQH MS(3, 0x1)
+#define TXGBE_FCCTL_ALLH MS(4, 0x1)
+#define TXGBE_FCCTL_FSEQH MS(5, 0x1)
+#define TXGBE_FCCTL_ICRC MS(6, 0x1)
+#define TXGBE_FCCTL_CRCBO MS(7, 0x1)
+#define TXGBE_FCCTL_VER(v) LS(v, 8, 0xF)
+#define TXGBE_FCRSSCTL 0x019140
+#define TXGBE_FCRSSCTL_EA MS(0, 0x1)
+#define TXGBE_FCRSSTBL(i) (0x019160 + (i) * 4) /* 0-7 */
+#define TXGBE_FCRSSTBL_QUE(v) LS(v, 0, 0x7F)
+
+#define TXGBE_FCRXEOF 0x015158
+#define TXGBE_FCRXSOF 0x0151F8
+#define TXGBE_FCTXEOF 0x018384
+#define TXGBE_FCTXSOF 0x018380
+#define TXGBE_FCRXFCDESC(i) (0x012410 + (i) * 4) /* 0-1 */
+#define TXGBE_FCRXFCBUF 0x012418
+#define TXGBE_FCRXFCDDP 0x012420
+#define TXGBE_FCRXCTXINVL(i) (0x0190C0 + (i) * 4) /* 0-15 */
+
+/* Programming Interface */
+#define TXGBE_FCCTXT 0x015110
+#define TXGBE_FCCTXT_ID(v) (((v) & 0x1FF)) /* 512 */
+#define TXGBE_FCCTXT_REVA LS(0x1, 13, 0x1)
+#define TXGBE_FCCTXT_WREA LS(0x1, 14, 0x1)
+#define TXGBE_FCCTXT_RDEA LS(0x1, 15, 0x1)
+#define TXGBE_FCCTXTCTL 0x015108
+#define TXGBE_FCCTXTCTL_EA MS(0, 0x1)
+#define TXGBE_FCCTXTCTL_FIRST MS(1, 0x1)
+#define TXGBE_FCCTXTCTL_WR MS(2, 0x1)
+#define TXGBE_FCCTXTCTL_SEQID(v) LS(v, 8, 0xFF)
+#define TXGBE_FCCTXTCTL_SEQNR(v) LS(v, 16, 0xFFFF)
+#define TXGBE_FCCTXTPARM 0x0151D8
+
+/**
+ * Mirror Rules
+ **/
+#define TXGBE_MIRRCTL(i) (0x015B00 + (i) * 4)
+#define TXGBE_MIRRCTL_POOL MS(0, 0x1)
+#define TXGBE_MIRRCTL_UPLINK MS(1, 0x1)
+#define TXGBE_MIRRCTL_DNLINK MS(2, 0x1)
+#define TXGBE_MIRRCTL_VLAN MS(3, 0x1)
+#define TXGBE_MIRRCTL_DESTP(v) LS(v, 8, 0x3F)
+#define TXGBE_MIRRVLANL(i) (0x015B10 + (i) * 8)
+#define TXGBE_MIRRVLANH(i) (0x015B14 + (i) * 8)
+#define TXGBE_MIRRPOOLL(i) (0x015B30 + (i) * 8)
+#define TXGBE_MIRRPOOLH(i) (0x015B34 + (i) * 8)
+
+/**
+ * Time Stamp
+ **/
+#define TXGBE_TSRXCTL 0x015188
+#define TXGBE_TSRXCTL_VLD MS(0, 0x1)
+#define TXGBE_TSRXCTL_TYPE(v) LS(v, 1, 0x7)
+#define TXGBE_TSRXCTL_TYPE_V2L2 (0)
+#define TXGBE_TSRXCTL_TYPE_V1L4 (1)
+#define TXGBE_TSRXCTL_TYPE_V2L24 (2)
+#define TXGBE_TSRXCTL_TYPE_V2EVENT (5)
+#define TXGBE_TSRXCTL_ENA MS(4, 0x1)
+#define TXGBE_TSRXSTMPL 0x0151E8
+#define TXGBE_TSRXSTMPH 0x0151A4
+#define TXGBE_TSTXCTL 0x01D400
+#define TXGBE_TSTXCTL_VLD MS(0, 0x1)
+#define TXGBE_TSTXCTL_ENA MS(4, 0x1)
+#define TXGBE_TSTXSTMPL 0x01D404
+#define TXGBE_TSTXSTMPH 0x01D408
+#define TXGBE_TSTIMEL 0x01D40C
+#define TXGBE_TSTIMEH 0x01D410
+#define TXGBE_TSTIMEINC 0x01D414
+#define TXGBE_TSTIMEINC_IV(v) LS(v, 0, 0xFFFFFF)
+#define TXGBE_TSTIMEINC_IP(v) LS(v, 24, 0xFF)
+#define TXGBE_TSTIMEINC_VP(v, p) \
+ (((v) & MS(0, 0xFFFFFF)) | TXGBE_TSTIMEINC_IP(p))
+
+/**
+ * Wake on Lan
+ **/
+#define TXGBE_WOLCTL 0x015B80
+#define TXGBE_WOLIPCTL 0x015B84
+#define TXGBE_WOLIP4(i) (0x015BC0 + (i) * 4) /* 0-3 */
+#define TXGBE_WOLIP6(i) (0x015BE0 + (i) * 4) /* 0-3 */
+
+#define TXGBE_WOLFLEXCTL 0x015CFC
+#define TXGBE_WOLFLEXI 0x015B8C
+#define TXGBE_WOLFLEXDAT(i) (0x015C00 + (i) * 16) /* 0-15 */
+#define TXGBE_WOLFLEXMSK(i) (0x015C08 + (i) * 16) /* 0-15 */
+
+/******************************************************************************
+ * Security Registers
+ ******************************************************************************/
+#define TXGBE_SECRXCTL 0x017000
+#define TXGBE_SECRXCTL_ODSA MS(0, 0x1)
+#define TXGBE_SECRXCTL_XDSA MS(1, 0x1)
+#define TXGBE_SECRXCTL_CRCSTRIP MS(2, 0x1)
+#define TXGBE_SECRXCTL_SAVEBAD MS(6, 0x1)
+#define TXGBE_SECRXSTAT 0x017004
+#define TXGBE_SECRXSTAT_RDY MS(0, 0x1)
+#define TXGBE_SECRXSTAT_ECC MS(1, 0x1)
+
+#define TXGBE_SECTXCTL 0x01D000
+#define TXGBE_SECTXCTL_ODSA MS(0, 0x1)
+#define TXGBE_SECTXCTL_XDSA MS(1, 0x1)
+#define TXGBE_SECTXCTL_STFWD MS(2, 0x1)
+#define TXGBE_SECTXCTL_MSKIV MS(3, 0x1)
+#define TXGBE_SECTXSTAT 0x01D004
+#define TXGBE_SECTXSTAT_RDY MS(0, 0x1)
+#define TXGBE_SECTXSTAT_ECC MS(1, 0x1)
+#define TXGBE_SECTXBUFAF 0x01D008
+#define TXGBE_SECTXBUFAE 0x01D00C
+#define TXGBE_SECTXIFG 0x01D020
+#define TXGBE_SECTXIFG_MIN(v) LS(v, 0, 0xF)
+#define TXGBE_SECTXIFG_MIN_MASK MS(0, 0xF)
+
+
+/**
+ * LinkSec
+ **/
+#define TXGBE_LSECRXCAP 0x017200
+#define TXGBE_LSECRXCTL 0x017204
+ /* disabled(0),check(1),strict(2),drop(3) */
+#define TXGBE_LSECRXCTL_MODE_MASK MS(2, 0x3)
+#define TXGBE_LSECRXCTL_MODE_STRICT LS(2, 2, 0x3)
+#define TXGBE_LSECRXCTL_POSTHDR MS(6, 0x1)
+#define TXGBE_LSECRXCTL_REPLAY MS(7, 0x1)
+#define TXGBE_LSECRXSCIL 0x017208
+#define TXGBE_LSECRXSCIH 0x01720C
+#define TXGBE_LSECRXSA(i) (0x017210 + (i) * 4) /* 0-1 */
+#define TXGBE_LSECRXPN(i) (0x017218 + (i) * 4) /* 0-1 */
+#define TXGBE_LSECRXKEY(n, i) (0x017220 + 0x10 * (n) + 4 * (i)) /* 0-3 */
+#define TXGBE_LSECTXCAP 0x01D200
+#define TXGBE_LSECTXCTL 0x01D204
+ /* disabled(0), auth(1), auth+encrypt(2) */
+#define TXGBE_LSECTXCTL_MODE_MASK MS(0, 0x3)
+#define TXGBE_LSECTXCTL_MODE_AUTH LS(1, 0, 0x3)
+#define TXGBE_LSECTXCTL_MODE_AENC LS(2, 0, 0x3)
+#define TXGBE_LSECTXCTL_PNTRH_MASK MS(8, 0xFFFFFF)
+#define TXGBE_LSECTXCTL_PNTRH(v) LS(v, 8, 0xFFFFFF)
+#define TXGBE_LSECTXSCIL 0x01D208
+#define TXGBE_LSECTXSCIH 0x01D20C
+#define TXGBE_LSECTXSA 0x01D210
+#define TXGBE_LSECTXPN0 0x01D214
+#define TXGBE_LSECTXPN1 0x01D218
+#define TXGBE_LSECTXKEY0(i) (0x01D21C + (i) * 4) /* 0-3 */
+#define TXGBE_LSECTXKEY1(i) (0x01D22C + (i) * 4) /* 0-3 */
+
+#define TXGBE_LSECRX_UTPKT 0x017240
+#define TXGBE_LSECRX_DECOCT 0x017244
+#define TXGBE_LSECRX_VLDOCT 0x017248
+#define TXGBE_LSECRX_BTPKT 0x01724C
+#define TXGBE_LSECRX_NOSCIPKT 0x017250
+#define TXGBE_LSECRX_UNSCIPKT 0x017254
+#define TXGBE_LSECRX_UNCHKPKT 0x017258
+#define TXGBE_LSECRX_DLYPKT 0x01725C
+#define TXGBE_LSECRX_LATEPKT 0x017260
+#define TXGBE_LSECRX_OKPKT(i) (0x017264 + (i) * 4) /* 0-1 */
+#define TXGBE_LSECRX_BADPKT(i) (0x01726C + (i) * 4) /* 0-1 */
+#define TXGBE_LSECRX_INVPKT(i) (0x017274 + (i) * 4) /* 0-1 */
+#define TXGBE_LSECRX_BADSAPKT 0x01727C
+#define TXGBE_LSECRX_INVSAPKT 0x017280
+#define TXGBE_LSECTX_UTPKT 0x01D23C
+#define TXGBE_LSECTX_ENCPKT 0x01D240
+#define TXGBE_LSECTX_PROTPKT 0x01D244
+#define TXGBE_LSECTX_ENCOCT 0x01D248
+#define TXGBE_LSECTX_PROTOCT 0x01D24C
+
+/**
+ * IpSec
+ **/
+#define TXGBE_ISECRXIDX 0x017100
+#define TXGBE_ISECRXADDR(i) (0x017104 + (i) * 4) /*0-3*/
+#define TXGBE_ISECRXSPI 0x017114
+#define TXGBE_ISECRXIPIDX 0x017118
+#define TXGBE_ISECRXKEY(i) (0x01711C + (i) * 4) /*0-3*/
+#define TXGBE_ISECRXSALT 0x01712C
+#define TXGBE_ISECRXMODE 0x017130
+
+#define TXGBE_ISECTXIDX 0x01D100
+#define TXGBE_ISECTXIDX_WT 0x80000000U
+#define TXGBE_ISECTXIDX_RD 0x40000000U
+#define TXGBE_ISECTXIDX_SDIDX 0x0U
+#define TXGBE_ISECTXIDX_ENA 0x00000001U
+
+#define TXGBE_ISECTXSALT 0x01D104
+#define TXGBE_ISECTXKEY(i) (0x01D108 + (i) * 4) /* 0-3 */
+
+/******************************************************************************
+ * MAC Registers
+ ******************************************************************************/
+#define TXGBE_MACRXCFG 0x011004
+#define TXGBE_MACRXCFG_ENA MS(0, 0x1)
+#define TXGBE_MACRXCFG_JUMBO MS(8, 0x1)
+#define TXGBE_MACRXCFG_LB MS(10, 0x1)
+#define TXGBE_MACCNTCTL 0x011800
+#define TXGBE_MACCNTCTL_RC MS(2, 0x1)
+
+#define TXGBE_MACRXFLT 0x011008
+#define TXGBE_MACRXFLT_PROMISC MS(0, 0x1)
+#define TXGBE_MACRXFLT_CTL_MASK MS(6, 0x3)
+#define TXGBE_MACRXFLT_CTL_DROP LS(0, 6, 0x3)
+#define TXGBE_MACRXFLT_CTL_NOPS LS(1, 6, 0x3)
+#define TXGBE_MACRXFLT_CTL_NOFT LS(2, 6, 0x3)
+#define TXGBE_MACRXFLT_CTL_PASS LS(3, 6, 0x3)
+#define TXGBE_MACRXFLT_RXALL MS(31, 0x1)
+
+/******************************************************************************
+ * Statistic Registers
+ ******************************************************************************/
+/* Ring Counter */
+#define TXGBE_QPRXPKT(rp) (0x001014 + 0x40 * (rp))
+#define TXGBE_QPRXOCTL(rp) (0x001018 + 0x40 * (rp))
+#define TXGBE_QPRXOCTH(rp) (0x00101C + 0x40 * (rp))
+#define TXGBE_QPTXPKT(rp) (0x003014 + 0x40 * (rp))
+#define TXGBE_QPTXOCTL(rp) (0x003018 + 0x40 * (rp))
+#define TXGBE_QPTXOCTH(rp) (0x00301C + 0x40 * (rp))
+#define TXGBE_QPRXMPKT(rp) (0x001020 + 0x40 * (rp))
+
+/* Host DMA Counter */
+#define TXGBE_DMATXDROP 0x018300
+#define TXGBE_DMATXSECDROP 0x018304
+#define TXGBE_DMATXPKT 0x018308
+#define TXGBE_DMATXOCTL 0x01830C
+#define TXGBE_DMATXOCTH 0x018310
+#define TXGBE_DMATXMNG 0x018314
+#define TXGBE_DMARXDROP 0x012500
+#define TXGBE_DMARXPKT 0x012504
+#define TXGBE_DMARXOCTL 0x012508
+#define TXGBE_DMARXOCTH 0x01250C
+#define TXGBE_DMARXMNG 0x012510
+
+/* Packet Buffer Counter */
+#define TXGBE_PBRXMISS(tc) (0x019040 + (tc) * 4)
+#define TXGBE_PBRXPKT 0x019060
+#define TXGBE_PBRXREP 0x019064
+#define TXGBE_PBRXDROP 0x019068
+#define TXGBE_PBRXLNKXOFF 0x011988
+#define TXGBE_PBRXLNKXON 0x011E0C
+#define TXGBE_PBRXUPXON(up) (0x011E30 + (up) * 4)
+#define TXGBE_PBRXUPXOFF(up) (0x011E10 + (up) * 4)
+
+#define TXGBE_PBTXLNKXOFF 0x019218
+#define TXGBE_PBTXLNKXON 0x01921C
+#define TXGBE_PBTXUPXON(up) (0x0192E0 + (up) * 4)
+#define TXGBE_PBTXUPXOFF(up) (0x0192C0 + (up) * 4)
+#define TXGBE_PBTXUPOFF(up) (0x019280 + (up) * 4)
+
+#define TXGBE_PBLPBK 0x01CF08
+
+/* Ether Flow Counter */
+#define TXGBE_LANPKTDROP 0x0151C0
+#define TXGBE_MNGPKTDROP 0x0151C4
+
+/* MAC Counter */
+#define TXGBE_MACRXERRCRCL 0x011928
+#define TXGBE_MACRXERRCRCH 0x01192C
+#define TXGBE_MACRXERRLENL 0x011978
+#define TXGBE_MACRXERRLENH 0x01197C
+#define TXGBE_MACRX1to64L 0x001940
+#define TXGBE_MACRX1to64H 0x001944
+#define TXGBE_MACRX65to127L 0x001948
+#define TXGBE_MACRX65to127H 0x00194C
+#define TXGBE_MACRX128to255L 0x001950
+#define TXGBE_MACRX128to255H 0x001954
+#define TXGBE_MACRX256to511L 0x001958
+#define TXGBE_MACRX256to511H 0x00195C
+#define TXGBE_MACRX512to1023L 0x001960
+#define TXGBE_MACRX512to1023H 0x001964
+#define TXGBE_MACRX1024toMAXL 0x001968
+#define TXGBE_MACRX1024toMAXH 0x00196C
+#define TXGBE_MACTX1to64L 0x001834
+#define TXGBE_MACTX1to64H 0x001838
+#define TXGBE_MACTX65to127L 0x00183C
+#define TXGBE_MACTX65to127H 0x001840
+#define TXGBE_MACTX128to255L 0x001844
+#define TXGBE_MACTX128to255H 0x001848
+#define TXGBE_MACTX256to511L 0x00184C
+#define TXGBE_MACTX256to511H 0x001850
+#define TXGBE_MACTX512to1023L 0x001854
+#define TXGBE_MACTX512to1023H 0x001858
+#define TXGBE_MACTX1024toMAXL 0x00185C
+#define TXGBE_MACTX1024toMAXH 0x001860
+
+#define TXGBE_MACRXUNDERSIZE 0x011938
+#define TXGBE_MACRXOVERSIZE 0x01193C
+#define TXGBE_MACRXJABBER 0x011934
+
+#define TXGBE_MACRXPKTL 0x011900
+#define TXGBE_MACRXPKTH 0x011904
+#define TXGBE_MACTXPKTL 0x01181C
+#define TXGBE_MACTXPKTH 0x011820
+#define TXGBE_MACRXGBOCTL 0x011908
+#define TXGBE_MACRXGBOCTH 0x01190C
+#define TXGBE_MACTXGBOCTL 0x011814
+#define TXGBE_MACTXGBOCTH 0x011818
+
+#define TXGBE_MACRXOCTL 0x011918
+#define TXGBE_MACRXOCTH 0x01191C
+#define TXGBE_MACRXMPKTL 0x011920
+#define TXGBE_MACRXMPKTH 0x011924
+#define TXGBE_MACTXOCTL 0x011824
+#define TXGBE_MACTXOCTH 0x011828
+#define TXGBE_MACTXMPKTL 0x01182C
+#define TXGBE_MACTXMPKTH 0x011830
+
+/* Management Counter */
+#define TXGBE_MNGOUT 0x01CF00
+#define TXGBE_MNGIN 0x01CF04
+
+/* MAC SEC Counter */
+#define TXGBE_LSECRXUNTAG 0x017240
+#define TXGBE_LSECRXDECOCT 0x017244
+#define TXGBE_LSECRXVLDOCT 0x017248
+#define TXGBE_LSECRXBADTAG 0x01724C
+#define TXGBE_LSECRXNOSCI 0x017250
+#define TXGBE_LSECRXUKSCI 0x017254
+#define TXGBE_LSECRXUNCHK 0x017258
+#define TXGBE_LSECRXDLY 0x01725C
+#define TXGBE_LSECRXLATE 0x017260
+#define TXGBE_LSECRXGOOD 0x017264
+#define TXGBE_LSECRXBAD 0x01726C
+#define TXGBE_LSECRXUK 0x017274
+#define TXGBE_LSECRXBADSA 0x01727C
+#define TXGBE_LSECRXUKSA 0x017280
+#define TXGBE_LSECTXUNTAG 0x01D23C
+#define TXGBE_LSECTXENC 0x01D240
+#define TXGBE_LSECTXPTT 0x01D244
+#define TXGBE_LSECTXENCOCT 0x01D248
+#define TXGBE_LSECTXPTTOCT 0x01D24C
+
+/* IP SEC Counter */
+
+/* FDIR Counter */
+#define TXGBE_FDIRFREE 0x019538
+#define TXGBE_FDIRFREE_FLT(r) RS(r, 0, 0xFFFF)
+#define TXGBE_FDIRLEN 0x01954C
+#define TXGBE_FDIRLEN_BKTLEN(r) RS(r, 0, 0x3F)
+#define TXGBE_FDIRLEN_MAXLEN(r) RS(r, 8, 0x3F)
+#define TXGBE_FDIRUSED 0x019550
+#define TXGBE_FDIRUSED_ADD(r) RS(r, 0, 0xFFFF)
+#define TXGBE_FDIRUSED_REM(r) RS(r, 16, 0xFFFF)
+#define TXGBE_FDIRFAIL 0x019554
+#define TXGBE_FDIRFAIL_ADD(r) RS(r, 0, 0xFF)
+#define TXGBE_FDIRFAIL_REM(r) RS(r, 8, 0xFF)
+#define TXGBE_FDIRMATCH 0x019558
+#define TXGBE_FDIRMISS 0x01955C
+
+/* FCOE Counter */
+#define TXGBE_FCOECRC 0x015160
+#define TXGBE_FCOERPDC 0x012514
+#define TXGBE_FCOELAST 0x012518
+#define TXGBE_FCOEPRC 0x015164
+#define TXGBE_FCOEDWRC 0x015168
+#define TXGBE_FCOEPTC 0x018318
+#define TXGBE_FCOEDWTC 0x01831C
+
+/* Management Counter */
+#define TXGBE_MNGOS2BMC 0x01E094
+#define TXGBE_MNGBMC2OS 0x01E090
+
+/******************************************************************************
+ * PF(Physical Function) Registers
+ ******************************************************************************/
+/* Interrupt */
+#define TXGBE_ICRMISC 0x000100
+#define TXGBE_ICRMISC_MASK MS(8, 0xFFFFFF)
+#define TXGBE_ICRMISC_LNKDN MS(8, 0x1) /* eth link down */
+#define TXGBE_ICRMISC_RST MS(10, 0x1) /* device reset event */
+#define TXGBE_ICRMISC_TS MS(11, 0x1) /* time sync */
+#define TXGBE_ICRMISC_STALL MS(12, 0x1) /* trans or recv path is stalled */
+#define TXGBE_ICRMISC_LNKSEC MS(13, 0x1) /* Tx LinkSec require key exchange */
+#define TXGBE_ICRMISC_ERRBUF MS(14, 0x1) /* Packet Buffer Overrun */
+#define TXGBE_ICRMISC_FDIR MS(15, 0x1) /* FDir Exception */
+#define TXGBE_ICRMISC_I2C MS(16, 0x1) /* I2C interrupt */
+#define TXGBE_ICRMISC_ERRMAC MS(17, 0x1) /* err reported by MAC */
+#define TXGBE_ICRMISC_LNKUP MS(18, 0x1) /* link up */
+#define TXGBE_ICRMISC_ANDONE MS(19, 0x1) /* link auto-nego done */
+#define TXGBE_ICRMISC_ERRIG MS(20, 0x1) /* integrity error */
+#define TXGBE_ICRMISC_SPI MS(21, 0x1) /* SPI interface */
+#define TXGBE_ICRMISC_VFMBX MS(22, 0x1) /* VF-PF message box */
+#define TXGBE_ICRMISC_GPIO MS(26, 0x1) /* GPIO interrupt */
+#define TXGBE_ICRMISC_ERRPCI MS(27, 0x1) /* pcie request error */
+#define TXGBE_ICRMISC_HEAT MS(28, 0x1) /* overheat detection */
+#define TXGBE_ICRMISC_PROBE MS(29, 0x1) /* probe match */
+#define TXGBE_ICRMISC_MNGMBX MS(30, 0x1) /* mng mailbox */
+#define TXGBE_ICRMISC_TIMER MS(31, 0x1) /* tcp timer */
+#define TXGBE_ICRMISC_DEFAULT ( \
+ TXGBE_ICRMISC_LNKDN | \
+ TXGBE_ICRMISC_RST | \
+ TXGBE_ICRMISC_ERRMAC | \
+ TXGBE_ICRMISC_LNKUP | \
+ TXGBE_ICRMISC_ANDONE | \
+ TXGBE_ICRMISC_ERRIG | \
+ TXGBE_ICRMISC_VFMBX | \
+ TXGBE_ICRMISC_MNGMBX | \
+ TXGBE_ICRMISC_STALL | \
+ TXGBE_ICRMISC_TIMER)
+#define TXGBE_ICRMISC_LSC ( \
+ TXGBE_ICRMISC_LNKDN | \
+ TXGBE_ICRMISC_LNKUP)
+#define TXGBE_ICSMISC 0x000104
+#define TXGBE_IENMISC 0x000108
+#define TXGBE_IVARMISC 0x0004FC
+#define TXGBE_IVARMISC_VEC(v) LS(v, 0, 0x7)
+#define TXGBE_IVARMISC_VLD MS(7, 0x1)
+#define TXGBE_ICR(i) (0x000120 + (i) * 4) /* 0-1 */
+#define TXGBE_ICR_MASK MS(0, 0xFFFFFFFF)
+#define TXGBE_ICS(i) (0x000130 + (i) * 4) /* 0-1 */
+#define TXGBE_ICS_MASK TXGBE_ICR_MASK
+#define TXGBE_IMS(i) (0x000140 + (i) * 4) /* 0-1 */
+#define TXGBE_IMS_MASK TXGBE_ICR_MASK
+#define TXGBE_IMC(i) (0x000150 + (i) * 4) /* 0-1 */
+#define TXGBE_IMC_MASK TXGBE_ICR_MASK
+#define TXGBE_IVAR(i) (0x000500 + (i) * 4) /* 0-3 */
+#define TXGBE_IVAR_VEC(v) LS(v, 0, 0x7)
+#define TXGBE_IVAR_VLD MS(7, 0x1)
+#define TXGBE_TCPTMR 0x000170
+#define TXGBE_ITRSEL 0x000180
+
+/* P2V Mailbox */
+#define TXGBE_MBMEM(i) (0x005000 + 0x40 * (i)) /* 0-63 */
+#define TXGBE_MBCTL(i) (0x000600 + 4 * (i)) /* 0-63 */
+#define TXGBE_MBCTL_STS MS(0, 0x1) /* Initiate message send to VF */
+#define TXGBE_MBCTL_ACK MS(1, 0x1) /* Ack message recv'd from VF */
+#define TXGBE_MBCTL_VFU MS(2, 0x1) /* VF owns the mailbox buffer */
+#define TXGBE_MBCTL_PFU MS(3, 0x1) /* PF owns the mailbox buffer */
+#define TXGBE_MBCTL_RVFU MS(4, 0x1) /* Reset VFU - used when VF stuck */
+#define TXGBE_MBVFICR(i) (0x000480 + 4 * (i)) /* 0-3 */
+#define TXGBE_MBVFICR_INDEX(vf) ((vf) >> 4)
+#define TXGBE_MBVFICR_VFREQ_MASK (0x0000FFFF) /* bits for VF messages */
+#define TXGBE_MBVFICR_VFREQ_VF1 (0x00000001) /* bit for VF 1 message */
+#define TXGBE_MBVFICR_VFACK_MASK (0xFFFF0000) /* bits for VF acks */
+#define TXGBE_MBVFICR_VFACK_VF1 (0x00010000) /* bit for VF 1 ack */
+#define TXGBE_FLRVFP(i) (0x000490 + 4 * (i)) /* 0-1 */
+#define TXGBE_FLRVFE(i) (0x0004A0 + 4 * (i)) /* 0-1 */
+#define TXGBE_FLRVFEC(i) (0x0004A8 + 4 * (i)) /* 0-1 */
+
+/******************************************************************************
+ * VF(Virtual Function) Registers
+ ******************************************************************************/
+#define TXGBE_VFPBWRAP 0x000000
+#define TXGBE_VFPBWRAP_WRAP(r, tc) ((0x7 << 4 * (tc) & (r)) >> 4 * (tc))
+#define TXGBE_VFPBWRAP_EMPT(r, tc) ((0x8 << 4 * (tc) & (r)) >> 4 * (tc))
+#define TXGBE_VFSTATUS 0x000004
+#define TXGBE_VFSTATUS_UP MS(0, 0x1)
+#define TXGBE_VFSTATUS_BW_MASK MS(1, 0x7)
+#define TXGBE_VFSTATUS_BW_10G LS(0x1, 1, 0x7)
+#define TXGBE_VFSTATUS_BW_1G LS(0x2, 1, 0x7)
+#define TXGBE_VFSTATUS_BW_100M LS(0x4, 1, 0x7)
+#define TXGBE_VFSTATUS_BUSY MS(4, 0x1)
+#define TXGBE_VFSTATUS_LANID MS(8, 0x1)
+#define TXGBE_VFRST 0x000008
+#define TXGBE_VFRST_SET MS(0, 0x1)
+#define TXGBE_VFPLCFG 0x000078
+#define TXGBE_VFPLCFG_RSV MS(0, 0x1)
+#define TXGBE_VFPLCFG_PSR(v) LS(v, 1, 0x1F)
+#define TXGBE_VFPLCFG_PSRL4HDR (0x1)
+#define TXGBE_VFPLCFG_PSRL3HDR (0x2)
+#define TXGBE_VFPLCFG_PSRL2HDR (0x4)
+#define TXGBE_VFPLCFG_PSRTUNHDR (0x8)
+#define TXGBE_VFPLCFG_PSRTUNMAC (0x10)
+#define TXGBE_VFPLCFG_RSSMASK MS(16, 0xFF)
+#define TXGBE_VFPLCFG_RSSIPV4TCP MS(16, 0x1)
+#define TXGBE_VFPLCFG_RSSIPV4 MS(17, 0x1)
+#define TXGBE_VFPLCFG_RSSIPV6 MS(20, 0x1)
+#define TXGBE_VFPLCFG_RSSIPV6TCP MS(21, 0x1)
+#define TXGBE_VFPLCFG_RSSIPV4UDP MS(22, 0x1)
+#define TXGBE_VFPLCFG_RSSIPV6UDP MS(23, 0x1)
+#define TXGBE_VFPLCFG_RSSENA MS(24, 0x1)
+#define TXGBE_VFPLCFG_RSSHASH(v) LS(v, 29, 0x7)
+#define TXGBE_VFRSSKEY(i) (0x000080 + (i) * 4) /* 0-9 */
+#define TXGBE_VFRSSTBL(i) (0x0000C0 + (i) * 4) /* 0-15 */
+#define TXGBE_VFICR 0x000100
+#define TXGBE_VFICR_MASK LS(7, 0, 0x7)
+#define TXGBE_VFICR_MBX MS(0, 0x1)
+#define TXGBE_VFICR_DONE1 MS(1, 0x1)
+#define TXGBE_VFICR_DONE2 MS(2, 0x1)
+#define TXGBE_VFICS 0x000104
+#define TXGBE_VFICS_MASK TXGBE_VFICR_MASK
+#define TXGBE_VFIMS 0x000108
+#define TXGBE_VFIMS_MASK TXGBE_VFICR_MASK
+#define TXGBE_VFIMC 0x00010C
+#define TXGBE_VFIMC_MASK TXGBE_VFICR_MASK
+#define TXGBE_VFGPIE 0x000118
+#define TXGBE_VFIVAR(i) (0x000240 + 4 * (i)) /* 0-3 */
+#define TXGBE_VFIVARMISC 0x000260
+#define TXGBE_VFIVAR_ALLOC(v) LS(v, 0, 0x3)
+#define TXGBE_VFIVAR_VLD MS(7, 0x1)
+
+#define TXGBE_VFMBCTL 0x000600
+#define TXGBE_VFMBCTL_REQ MS(0, 0x1) /* Request for PF Ready bit */
+#define TXGBE_VFMBCTL_ACK MS(1, 0x1) /* Ack PF message received */
+#define TXGBE_VFMBCTL_VFU MS(2, 0x1) /* VF owns the mailbox buffer */
+#define TXGBE_VFMBCTL_PFU MS(3, 0x1) /* PF owns the mailbox buffer */
+#define TXGBE_VFMBCTL_PFSTS MS(4, 0x1) /* PF wrote a message in the MB */
+#define TXGBE_VFMBCTL_PFACK MS(5, 0x1) /* PF ack the previous VF msg */
+#define TXGBE_VFMBCTL_RSTI MS(6, 0x1) /* PF has reset indication */
+#define TXGBE_VFMBCTL_RSTD MS(7, 0x1) /* PF has indicated reset done */
+#define TXGBE_VFMBCTL_R2C_BITS (TXGBE_VFMBCTL_RSTD | \
+ TXGBE_VFMBCTL_PFSTS | \
+ TXGBE_VFMBCTL_PFACK)
+#define TXGBE_VFMBX 0x000C00 /* 0-15 */
+#define TXGBE_VFTPHCTL(i) (0x000D00 + 4 * (i)) /* 0-7 */
+
+/******************************************************************************
+ * PF&VF TxRx Interface
+ ******************************************************************************/
+#define RNGLEN(v) ROUND_OVER(v, 13, 7)
+#define HDRLEN(v) ROUND_OVER(v, 10, 6)
+#define PKTLEN(v) ROUND_OVER(v, 14, 10)
+#define INTTHR(v) ROUND_OVER(v, 4, 0)
+
+#define TXGBE_RING_DESC_ALIGN 128
+#define TXGBE_RING_DESC_MIN 128
+#define TXGBE_RING_DESC_MAX 8192
+#define TXGBE_RXD_ALIGN TXGBE_RING_DESC_ALIGN
+#define TXGBE_TXD_ALIGN TXGBE_RING_DESC_ALIGN
+
+/* receive ring */
+#define TXGBE_RXBAL(rp) (0x001000 + 0x40 * (rp))
+#define TXGBE_RXBAH(rp) (0x001004 + 0x40 * (rp))
+#define TXGBE_RXRP(rp) (0x00100C + 0x40 * (rp))
+#define TXGBE_RXWP(rp) (0x001008 + 0x40 * (rp))
+#define TXGBE_RXCFG(rp) (0x001010 + 0x40 * (rp))
+#define TXGBE_RXCFG_ENA MS(0, 0x1)
+#define TXGBE_RXCFG_RNGLEN(v) LS(RNGLEN(v), 1, 0x3F)
+#define TXGBE_RXCFG_PKTLEN(v) LS(PKTLEN(v), 8, 0xF)
+#define TXGBE_RXCFG_PKTLEN_MASK MS(8, 0xF)
+#define TXGBE_RXCFG_HDRLEN(v) LS(HDRLEN(v), 12, 0xF)
+#define TXGBE_RXCFG_HDRLEN_MASK MS(12, 0xF)
+#define TXGBE_RXCFG_WTHRESH(v) LS(v, 16, 0x7)
+#define TXGBE_RXCFG_ETAG MS(22, 0x1)
+#define TXGBE_RXCFG_RSCMAX_MASK MS(23, 0x3)
+#define TXGBE_RXCFG_RSCMAX_1 LS(0, 23, 0x3)
+#define TXGBE_RXCFG_RSCMAX_4 LS(1, 23, 0x3)
+#define TXGBE_RXCFG_RSCMAX_8 LS(2, 23, 0x3)
+#define TXGBE_RXCFG_RSCMAX_16 LS(3, 23, 0x3)
+#define TXGBE_RXCFG_STALL MS(25, 0x1)
+#define TXGBE_RXCFG_SPLIT MS(26, 0x1)
+#define TXGBE_RXCFG_RSCMODE MS(27, 0x1)
+#define TXGBE_RXCFG_CNTAG MS(28, 0x1)
+#define TXGBE_RXCFG_RSCENA MS(29, 0x1)
+#define TXGBE_RXCFG_DROP MS(30, 0x1)
+#define TXGBE_RXCFG_VLAN MS(31, 0x1)
+
+/* transmit ring */
+#define TXGBE_TXBAL(rp) (0x003000 + 0x40 * (rp))
+#define TXGBE_TXBAH(rp) (0x003004 + 0x40 * (rp))
+#define TXGBE_TXWP(rp) (0x003008 + 0x40 * (rp))
+#define TXGBE_TXRP(rp) (0x00300C + 0x40 * (rp))
+#define TXGBE_TXCFG(rp) (0x003010 + 0x40 * (rp))
+#define TXGBE_TXCFG_ENA MS(0, 0x1)
+#define TXGBE_TXCFG_BUFLEN_MASK MS(1, 0x3F)
+#define TXGBE_TXCFG_BUFLEN(v) LS(RNGLEN(v), 1, 0x3F)
+#define TXGBE_TXCFG_HTHRESH_MASK MS(8, 0xF)
+#define TXGBE_TXCFG_HTHRESH(v) LS(v, 8, 0xF)
+#define TXGBE_TXCFG_WTHRESH_MASK MS(16, 0x7F)
+#define TXGBE_TXCFG_WTHRESH(v) LS(v, 16, 0x7F)
+#define TXGBE_TXCFG_FLUSH MS(26, 0x1)
+
+/* interrupt registers */
+#define TXGBE_ITRI 0x000180
+#define TXGBE_ITR(i) (0x000200 + 4 * (i))
+#define TXGBE_ITR_IVAL_MASK MS(2, 0x3FE)
+#define TXGBE_ITR_IVAL(v) LS(v, 2, 0x3FE)
+#define TXGBE_ITR_IVAL_1G(us) TXGBE_ITR_IVAL((us) / 2)
+#define TXGBE_ITR_IVAL_10G(us) TXGBE_ITR_IVAL((us) / 20)
+#define TXGBE_ITR_LLIEA MS(15, 0x1)
+#define TXGBE_ITR_LLICREDIT(v) LS(v, 16, 0x1F)
+#define TXGBE_ITR_CNT(v) LS(v, 21, 0x7F)
+#define TXGBE_ITR_WRDSA MS(31, 0x1)
+#define TXGBE_GPIE 0x000118
+#define TXGBE_GPIE_MSIX MS(0, 0x1)
+#define TXGBE_GPIE_LLIEA MS(1, 0x1)
+#define TXGBE_GPIE_LLIVAL(v) LS(v, 4, 0xF)
+#define TXGBE_GPIE_RSCDLY(v) LS(v, 8, 0x7)
+
+/******************************************************************************
+ * Debug Registers
+ ******************************************************************************/
+/**
+ * Probe
+ **/
+#define TXGBE_PROB 0x010010
+#define TXGBE_IODRV 0x010024
+
+#define TXGBE_PRBCTL 0x010200
+#define TXGBE_PRBSTA 0x010204
+#define TXGBE_PRBDAT 0x010220
+#define TXGBE_PRBPTN 0x010224
+#define TXGBE_PRBCNT 0x010228
+#define TXGBE_PRBMSK 0x01022C
+
+#define TXGBE_PRBPCI 0x01F010
+#define TXGBE_PRBRDMA 0x012010
+#define TXGBE_PRBTDMA 0x018010
+#define TXGBE_PRBPSR 0x015010
+#define TXGBE_PRBRDB 0x019010
+#define TXGBE_PRBTDB 0x01C010
+#define TXGBE_PRBRSEC 0x017010
+#define TXGBE_PRBTSEC 0x01D010
+#define TXGBE_PRBMNG 0x01E010
+#define TXGBE_PRBRMAC 0x011014
+#define TXGBE_PRBTMAC 0x011010
+#define TXGBE_PRBREMAC 0x011E04
+#define TXGBE_PRBTEMAC 0x011E00
+
+/**
+ * ECC
+ **/
+#define TXGBE_ECCRXDMACTL 0x012014
+#define TXGBE_ECCRXDMAINJ 0x012018
+#define TXGBE_ECCRXDMA 0x01201C
+#define TXGBE_ECCTXDMACTL 0x018014
+#define TXGBE_ECCTXDMAINJ 0x018018
+#define TXGBE_ECCTXDMA 0x01801C
+
+#define TXGBE_ECCRXPBCTL 0x019014
+#define TXGBE_ECCRXPBINJ 0x019018
+#define TXGBE_ECCRXPB 0x01901C
+#define TXGBE_ECCTXPBCTL 0x01C014
+#define TXGBE_ECCTXPBINJ 0x01C018
+#define TXGBE_ECCTXPB 0x01C01C
+
+#define TXGBE_ECCRXETHCTL 0x015014
+#define TXGBE_ECCRXETHINJ 0x015018
+#define TXGBE_ECCRXETH 0x01401C
+
+#define TXGBE_ECCRXSECCTL 0x017014
+#define TXGBE_ECCRXSECINJ 0x017018
+#define TXGBE_ECCRXSEC 0x01701C
+#define TXGBE_ECCTXSECCTL 0x01D014
+#define TXGBE_ECCTXSECINJ 0x01D018
+#define TXGBE_ECCTXSEC 0x01D01C
+
+/**
+ * Inspection
+ **/
+#define TXGBE_PBLBSTAT 0x01906C
+#define TXGBE_PBLBSTAT_FREE(r) RS(r, 0, 0x3FF)
+#define TXGBE_PBLBSTAT_FULL MS(11, 0x1)
+#define TXGBE_PBRXSTAT 0x019004
+#define TXGBE_PBRXSTAT_WRAP(tc, r) ((7u << 4 * (tc) & (r)) >> 4 * (tc))
+#define TXGBE_PBRXSTAT_EMPT(tc, r) ((8u << 4 * (tc) & (r)) >> 4 * (tc))
+#define TXGBE_PBRXSTAT2(tc) (0x019180 + (tc) * 4)
+#define TXGBE_PBRXSTAT2_USED(r) RS(r, 0, 0xFFFF)
+#define TXGBE_PBRXWRPTR(tc) (0x019180 + (tc) * 4)
+#define TXGBE_PBRXWRPTR_HEAD(r) RS(r, 0, 0xFFFF)
+#define TXGBE_PBRXWRPTR_TAIL(r) RS(r, 16, 0xFFFF)
+#define TXGBE_PBRXRDPTR(tc) (0x0191A0 + (tc) * 4)
+#define TXGBE_PBRXRDPTR_HEAD(r) RS(r, 0, 0xFFFF)
+#define TXGBE_PBRXRDPTR_TAIL(r) RS(r, 16, 0xFFFF)
+#define TXGBE_PBRXDATA(tc) (0x0191C0 + (tc) * 4)
+#define TXGBE_PBRXDATA_RDPTR(r) RS(r, 0, 0xFFFF)
+#define TXGBE_PBRXDATA_WRPTR(r) RS(r, 16, 0xFFFF)
+#define TXGBE_PBTXSTAT 0x01C004
+#define TXGBE_PBTXSTAT_EMPT(tc, r) ((1 << (tc) & (r)) >> (tc))
+
+#define TXGBE_RXPBPFCDMACL 0x019210
+#define TXGBE_RXPBPFCDMACH 0x019214
+
+#define TXGBE_PSRLANPKTCNT 0x0151B8
+#define TXGBE_PSRMNGPKTCNT 0x0151BC
+
+#define TXGBE_P2VMBX_SIZE (16) /* 16*4B */
+#define TXGBE_P2MMBX_SIZE (64) /* 64*4B */
+
+/**************** Global Registers ****************************/
+/* chip control Registers */
+#define TXGBE_PWR 0x010000
+#define TXGBE_PWR_LANID(r) RS(r, 30, 0x3)
+#define TXGBE_PWR_LANID_SWAP LS(2, 30, 0x3)
+
+/* Sensors for PVT(Process Voltage Temperature) */
+#define TXGBE_TSCTRL 0x010300
+#define TXGBE_TSCTRL_EVALMD MS(31, 0x1)
+#define TXGBE_TSEN 0x010304
+#define TXGBE_TSEN_ENA MS(0, 0x1)
+#define TXGBE_TSSTAT 0x010308
+#define TXGBE_TSSTAT_VLD MS(16, 0x1)
+#define TXGBE_TSSTAT_DATA(r) RS(r, 0, 0x3FF)
+
+#define TXGBE_TSATHRE 0x01030C
+#define TXGBE_TSDTHRE 0x010310
+#define TXGBE_TSINTR 0x010314
+#define TXGBE_TSINTR_AEN MS(0, 0x1)
+#define TXGBE_TSINTR_DEN MS(1, 0x1)
+#define TXGBE_TS_ALARM_ST 0x10318
+#define TXGBE_TS_ALARM_ST_DALARM 0x00000002U
+#define TXGBE_TS_ALARM_ST_ALARM 0x00000001U
+
+/* FMGR Registers */
+#define TXGBE_ILDRSTAT 0x010120
+#define TXGBE_ILDRSTAT_PCIRST MS(0, 0x1)
+#define TXGBE_ILDRSTAT_PWRRST MS(1, 0x1)
+#define TXGBE_ILDRSTAT_SWRST MS(7, 0x1)
+#define TXGBE_ILDRSTAT_SWRST_LAN0 MS(9, 0x1)
+#define TXGBE_ILDRSTAT_SWRST_LAN1 MS(10, 0x1)
+
+#define TXGBE_SPISTAT 0x01010C
+#define TXGBE_SPISTAT_OPDONE MS(0, 0x1)
+#define TXGBE_SPISTAT_BPFLASH MS(31, 0x1)
+
+/************************* Port Registers ************************************/
+/* I2C registers */
+#define TXGBE_I2CCON 0x014900 /* I2C Control */
+#define TXGBE_I2CCON_SDIA ((1 << 6))
+#define TXGBE_I2CCON_RESTART ((1 << 5))
+#define TXGBE_I2CCON_M10BITADDR ((1 << 4))
+#define TXGBE_I2CCON_S10BITADDR ((1 << 3))
+#define TXGBE_I2CCON_SPEED(v) (((v) & 0x3) << 1)
+#define TXGBE_I2CCON_MENA ((1 << 0))
+#define TXGBE_I2CTAR 0x014904 /* I2C Target Address */
+#define TXGBE_I2CDATA 0x014910 /* I2C Rx/Tx Data Buf and Cmd */
+#define TXGBE_I2CDATA_STOP ((1 << 9))
+#define TXGBE_I2CDATA_READ ((1 << 8) | TXGBE_I2CDATA_STOP)
+#define TXGBE_I2CDATA_WRITE ((0 << 8) | TXGBE_I2CDATA_STOP)
+#define TXGBE_I2CSSSCLHCNT 0x014914 /* Standard speed I2C Clock SCL High Count */
+#define TXGBE_I2CSSSCLLCNT 0x014918 /* Standard speed I2C Clock SCL Low Count */
+#define TXGBE_I2CICR 0x014934 /* I2C Raw Interrupt Status */
+#define TXGBE_I2CICR_RXFULL ((0x1) << 2)
+#define TXGBE_I2CICR_TXEMPTY ((0x1) << 4)
+#define TXGBE_I2CICM 0x014930 /* I2C Interrupt Mask */
+#define TXGBE_I2CRXTL 0x014938 /* I2C Receive FIFO Threshold */
+#define TXGBE_I2CTXTL 0x01493C /* I2C TX FIFO Threshold */
+#define TXGBE_I2CENA 0x01496C /* I2C Enable */
+#define TXGBE_I2CSTAT 0x014970 /* I2C Status register */
+#define TXGBE_I2CSTAT_MST ((1U << 5))
+#define TXGBE_I2CSCLTMOUT 0x0149AC /* I2C SCL stuck at low timeout register */
+#define TXGBE_I2CSDATMOUT 0x0149B0 /*I2C SDA Stuck at Low Timeout*/
+
+/* port cfg Registers */
+#define TXGBE_PORTSTAT 0x014404
+#define TXGBE_PORTSTAT_UP MS(0, 0x1)
+#define TXGBE_PORTSTAT_BW_MASK MS(1, 0x7)
+#define TXGBE_PORTSTAT_BW_10G MS(1, 0x1)
+#define TXGBE_PORTSTAT_BW_1G MS(2, 0x1)
+#define TXGBE_PORTSTAT_BW_100M MS(3, 0x1)
+#define TXGBE_PORTSTAT_ID(r) RS(r, 8, 0x1)
+
+#define TXGBE_VXLAN 0x014410
+#define TXGBE_VXLAN_GPE 0x014414
+#define TXGBE_GENEVE 0x014418
+#define TXGBE_TEREDO 0x01441C
+#define TXGBE_TCPTIME 0x014420
+
+/* GPIO Registers */
+#define TXGBE_GPIODATA 0x014800
+#define TXGBE_GPIOBIT_0 MS(0, 0x1) /* O:tx fault */
+#define TXGBE_GPIOBIT_1 MS(1, 0x1) /* O:tx disabled */
+#define TXGBE_GPIOBIT_2 MS(2, 0x1) /* I:sfp module absent */
+#define TXGBE_GPIOBIT_3 MS(3, 0x1) /* I:rx signal lost */
+#define TXGBE_GPIOBIT_4 MS(4, 0x1) /* O:rate select, 1G(0) 10G(1) */
+#define TXGBE_GPIOBIT_5 MS(5, 0x1) /* O:rate select, 1G(0) 10G(1) */
+#define TXGBE_GPIOBIT_6 MS(6, 0x1) /* I:ext phy interrupt */
+#define TXGBE_GPIOBIT_7 MS(7, 0x1) /* I:fan speed alarm */
+#define TXGBE_GPIODIR 0x014804
+#define TXGBE_GPIOCTL 0x014808
+#define TXGBE_GPIOINTEN 0x014830
+#define TXGBE_GPIOINTMASK 0x014834
+#define TXGBE_GPIOINTTYPE 0x014838
+#define TXGBE_GPIOINTSTAT 0x014840
+#define TXGBE_GPIOEOI 0x01484C
+
+
+#define TXGBE_ARBPOOLIDX 0x01820C
+#define TXGBE_ARBTXRATE 0x018404
+#define TXGBE_ARBTXRATE_MIN(v) LS(v, 0, 0x3FFF)
+#define TXGBE_ARBTXRATE_MAX(v) LS(v, 16, 0x3FFF)
+
+/* qos */
+#define TXGBE_ARBTXCTL 0x018200
+#define TXGBE_ARBTXCTL_RRM MS(1, 0x1)
+#define TXGBE_ARBTXCTL_WSP MS(2, 0x1)
+#define TXGBE_ARBTXCTL_DIA MS(6, 0x1)
+#define TXGBE_ARBTXMMW 0x018208
+
+/**************************** Receive DMA registers **************************/
+/* receive control */
+#define TXGBE_ARBRXCTL 0x012000
+#define TXGBE_ARBRXCTL_RRM MS(1, 0x1)
+#define TXGBE_ARBRXCTL_WSP MS(2, 0x1)
+#define TXGBE_ARBRXCTL_DIA MS(6, 0x1)
+
+#define TXGBE_RPUP2TC 0x019008
+#define TXGBE_RPUP2TC_UP_SHIFT 3
+#define TXGBE_RPUP2TC_UP_MASK 0x7
+
+/* mac switcher */
+#define TXGBE_ETHADDRL 0x016200
+#define TXGBE_ETHADDRL_AD0(v) LS(v, 0, 0xFF)
+#define TXGBE_ETHADDRL_AD1(v) LS(v, 8, 0xFF)
+#define TXGBE_ETHADDRL_AD2(v) LS(v, 16, 0xFF)
+#define TXGBE_ETHADDRL_AD3(v) LS(v, 24, 0xFF)
+#define TXGBE_ETHADDRL_ETAG(r) RS(r, 0, 0x3FFF)
+#define TXGBE_ETHADDRH 0x016204
+#define TXGBE_ETHADDRH_AD4(v) LS(v, 0, 0xFF)
+#define TXGBE_ETHADDRH_AD5(v) LS(v, 8, 0xFF)
+#define TXGBE_ETHADDRH_AD_MASK MS(0, 0xFFFF)
+#define TXGBE_ETHADDRH_ETAG MS(30, 0x1)
+#define TXGBE_ETHADDRH_VLD MS(31, 0x1)
+#define TXGBE_ETHADDRASSL 0x016208
+#define TXGBE_ETHADDRASSH 0x01620C
+#define TXGBE_ETHADDRIDX 0x016210
+
+/* Outmost Barrier Filters */
+#define TXGBE_MCADDRTBL(i) (0x015200 + (i) * 4) /* 0-127 */
+#define TXGBE_UCADDRTBL(i) (0x015400 + (i) * 4) /* 0-127 */
+#define TXGBE_VLANTBL(i) (0x016000 + (i) * 4) /* 0-127 */
+
+#define TXGBE_MNGFLEXSEL 0x1582C
+#define TXGBE_MNGFLEXDWL(i) (0x15A00 + ((i) * 16))
+#define TXGBE_MNGFLEXDWH(i) (0x15A04 + ((i) * 16))
+#define TXGBE_MNGFLEXMSK(i) (0x15A08 + ((i) * 16))
+
+#define TXGBE_LANFLEXSEL 0x15B8C
+#define TXGBE_LANFLEXDWL(i) (0x15C00 + ((i) * 16))
+#define TXGBE_LANFLEXDWH(i) (0x15C04 + ((i) * 16))
+#define TXGBE_LANFLEXMSK(i) (0x15C08 + ((i) * 16))
+#define TXGBE_LANFLEXCTL 0x15CFC
+
+/* ipsec */
+#define TXGBE_IPSRXIDX 0x017100
+#define TXGBE_IPSRXIDX_ENA MS(0, 0x1)
+#define TXGBE_IPSRXIDX_TB_MASK MS(1, 0x3)
+#define TXGBE_IPSRXIDX_TB_IP LS(1, 1, 0x3)
+#define TXGBE_IPSRXIDX_TB_SPI LS(2, 1, 0x3)
+#define TXGBE_IPSRXIDX_TB_KEY LS(3, 1, 0x3)
+#define TXGBE_IPSRXIDX_TBIDX(v) LS(v, 3, 0x3FF)
+#define TXGBE_IPSRXIDX_READ MS(30, 0x1)
+#define TXGBE_IPSRXIDX_WRITE MS(31, 0x1)
+#define TXGBE_IPSRXADDR(i) (0x017104 + (i) * 4)
+
+#define TXGBE_IPSRXSPI 0x017114
+#define TXGBE_IPSRXADDRIDX 0x017118
+#define TXGBE_IPSRXKEY(i) (0x01711C + (i) * 4)
+#define TXGBE_IPSRXSALT 0x01712C
+#define TXGBE_IPSRXMODE 0x017130
+#define TXGBE_IPSRXMODE_IPV6 0x00000010
+#define TXGBE_IPSRXMODE_DEC 0x00000008
+#define TXGBE_IPSRXMODE_ESP 0x00000004
+#define TXGBE_IPSRXMODE_AH 0x00000002
+#define TXGBE_IPSRXMODE_VLD 0x00000001
+#define TXGBE_IPSTXIDX 0x01D100
+#define TXGBE_IPSTXIDX_ENA MS(0, 0x1)
+#define TXGBE_IPSTXIDX_SAIDX(v) LS(v, 3, 0x3FF)
+#define TXGBE_IPSTXIDX_READ MS(30, 0x1)
+#define TXGBE_IPSTXIDX_WRITE MS(31, 0x1)
+#define TXGBE_IPSTXSALT 0x01D104
+#define TXGBE_IPSTXKEY(i) (0x01D108 + (i) * 4)
+
+#define TXGBE_MACTXCFG 0x011000
+#define TXGBE_MACTXCFG_TE MS(0, 0x1)
+#define TXGBE_MACTXCFG_SPEED_MASK MS(29, 0x3)
+#define TXGBE_MACTXCFG_SPEED(v) LS(v, 29, 0x3)
+#define TXGBE_MACTXCFG_SPEED_10G LS(0, 29, 0x3)
+#define TXGBE_MACTXCFG_SPEED_1G LS(3, 29, 0x3)
+
+#define TXGBE_ISBADDRL 0x000160
+#define TXGBE_ISBADDRH 0x000164
+
+#define NVM_OROM_OFFSET 0x17
+#define NVM_OROM_BLK_LOW 0x83
+#define NVM_OROM_BLK_HI 0x84
+#define NVM_OROM_PATCH_MASK 0xFF
+#define NVM_OROM_SHIFT 8
+#define NVM_VER_MASK 0x00FF /* version mask */
+#define NVM_VER_SHIFT 8 /* version bit shift */
+#define NVM_OEM_PROD_VER_PTR 0x1B /* OEM Product version block pointer */
+#define NVM_OEM_PROD_VER_CAP_OFF 0x1 /* OEM Product version format offset */
+#define NVM_OEM_PROD_VER_OFF_L 0x2 /* OEM Product version offset low */
+#define NVM_OEM_PROD_VER_OFF_H 0x3 /* OEM Product version offset high */
+#define NVM_OEM_PROD_VER_CAP_MASK 0xF /* OEM Product version cap mask */
+#define NVM_OEM_PROD_VER_MOD_LEN 0x3 /* OEM Product version module length */
+#define NVM_ETK_OFF_LOW 0x2D /* version low order word */
+#define NVM_ETK_OFF_HI 0x2E /* version high order word */
+#define NVM_ETK_SHIFT 16 /* high version word shift */
+#define NVM_VER_INVALID 0xFFFF
+#define NVM_ETK_VALID 0x8000
+#define NVM_INVALID_PTR 0xFFFF
+#define NVM_VER_SIZE 32 /* version sting size */
+
+#define TXGBE_REG_RSSTBL TXGBE_RSSTBL(0)
+#define TXGBE_REG_RSSKEY TXGBE_RSSKEY(0)
+static inline u32
+txgbe_map_reg(struct txgbe_hw *hw, u32 reg)
+{
+ switch (reg) {
+ case TXGBE_REG_RSSTBL:
+ if (hw->mac.type == txgbe_mac_raptor_vf)
+ reg = TXGBE_VFRSSTBL(0);
+ break;
+ case TXGBE_REG_RSSKEY:
+ if (hw->mac.type == txgbe_mac_raptor_vf)
+ reg = TXGBE_VFRSSKEY(0);
+ break;
+ default:
+ /* you should never reach here */
+ reg = TXGBE_REG_DUMMY;
+ break;
+ }
+
+ return reg;
+}
+
+/*
+ * read non-rc counters
+ */
+#define TXGBE_UPDCNT32(reg, last, cur) \
+do { \
+ uint32_t latest = rd32(hw, reg); \
+ if (hw->offset_loaded | hw->rx_loaded) \
+ last = 0; \
+ cur += (latest - last) & UINT_MAX; \
+ last = latest; \
+} while (0)
+
+#define TXGBE_UPDCNT36(regl, last, cur) \
+do { \
+ uint64_t new_lsb = rd32(hw, regl); \
+ uint64_t new_msb = rd32(hw, regl + 4); \
+ uint64_t latest = ((new_msb << 32) | new_lsb); \
+ if (hw->offset_loaded | hw->rx_loaded) \
+ last = 0; \
+ cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
+ last = latest; \
+} while (0)
+
+
+/**
+ * register operations
+ **/
+#define TXGBE_REG_READ32(addr) rte_read32(addr)
+#define TXGBE_REG_READ32_RELAXED(addr) rte_read32_relaxed(addr)
+#define TXGBE_REG_WRITE32(addr, val) rte_write32(val, addr)
+#define TXGBE_REG_WRITE32_RELAXED(addr, val) rte_write32_relaxed(val, addr)
+
+#define TXGBE_DEAD_READ_REG 0xdeadbeefU
+#define TXGBE_FAILED_READ_REG 0xffffffffU
+#define TXGBE_REG_ADDR(hw, reg) \
+ ((volatile u32 *)((char *)(hw)->hw_addr + (reg)))
+
+static inline u32
+txgbe_get32(volatile u32 *addr)
+{
+ u32 val = TXGBE_REG_READ32(addr);
+ return rte_le_to_cpu_32(val);
+}
+
+static inline void
+txgbe_set32(volatile u32 *addr, u32 val)
+{
+ val = rte_cpu_to_le_32(val);
+ TXGBE_REG_WRITE32(addr, val);
+}
+
+static inline u32
+txgbe_get32_masked(volatile u32 *addr, u32 mask)
+{
+ u32 val = txgbe_get32(addr);
+ val &= mask;
+ return val;
+}
+
+static inline void
+txgbe_set32_masked(volatile u32 *addr, u32 mask, u32 field)
+{
+ u32 val = txgbe_get32(addr);
+ val = ((val & ~mask) | (field & mask));
+ txgbe_set32(addr, val);
+}
+
+static inline u32
+txgbe_get32_relaxed(volatile u32 *addr)
+{
+ u32 val = TXGBE_REG_READ32_RELAXED(addr);
+ return rte_le_to_cpu_32(val);
+}
+
+static inline void
+txgbe_set32_relaxed(volatile u32 *addr, u32 val)
+{
+ val = rte_cpu_to_le_32(val);
+ TXGBE_REG_WRITE32_RELAXED(addr, val);
+ return;
+}
+
+static inline u32
+rd32(struct txgbe_hw *hw, u32 reg)
+{
+ if (reg == TXGBE_REG_DUMMY)
+ return 0;
+ return txgbe_get32(TXGBE_REG_ADDR(hw, reg));
+}
+
+static inline void
+wr32(struct txgbe_hw *hw, u32 reg, u32 val)
+{
+ if (reg == TXGBE_REG_DUMMY)
+ return;
+ txgbe_set32(TXGBE_REG_ADDR(hw, reg), val);
+}
+
+static inline u32
+rd32m(struct txgbe_hw *hw, u32 reg, u32 mask)
+{
+ u32 val = rd32(hw, reg);
+ val &= mask;
+ return val;
+}
+
+static inline void
+wr32m(struct txgbe_hw *hw, u32 reg, u32 mask, u32 field)
+{
+ u32 val = rd32(hw, reg);
+ val = ((val & ~mask) | (field & mask));
+ wr32(hw, reg, val);
+}
+
+static inline u64
+rd64(struct txgbe_hw *hw, u32 reg)
+{
+ u64 lsb = rd32(hw, reg);
+ u64 msb = rd32(hw, reg + 4);
+ return (lsb | msb << 32);
+}
+
+static inline void
+wr64(struct txgbe_hw *hw, u32 reg, u64 val)
+{
+ wr32(hw, reg, (u32)val);
+ wr32(hw, reg + 4, (u32)(val >> 32));
+}
+
+/* poll register */
+static inline u32
+po32m(struct txgbe_hw *hw, u32 reg, u32 mask, u32 expect, u32 *actual,
+ u32 loop, u32 slice)
+{
+ bool usec = true;
+ u32 value = 0, all = 0;
+
+ if (slice > 1000 * MAX_UDELAY_MS) {
+ usec = false;
+ slice = (slice + 500) / 1000;
+ }
+
+ do {
+ all |= rd32(hw, reg);
+ value |= mask & all;
+ if (value == expect) {
+ break;
+ }
+
+ usec ? usec_delay(slice) : msec_delay(slice);
+ } while (--loop > 0);
+
+ if (actual) {
+ *actual = all;
+ }
+
+ return loop;
+}
+
+/* flush all write operations */
+#define txgbe_flush(hw) rd32(hw, 0x00100C)
+
+#define rd32a(hw, reg, idx) ( \
+ rd32((hw), (reg) + ((idx) << 2)))
+#define wr32a(hw, reg, idx, val) \
+ wr32((hw), (reg) + ((idx) << 2), (val))
+
+#define rd32at(hw, reg, idx) \
+ rd32a(hw, txgbe_map_reg(hw, reg), idx)
+#define wr32at(hw, reg, idx, val) \
+ wr32a(hw, txgbe_map_reg(hw, reg), idx, val)
+
+#define rd32w(hw, reg, mask, slice) do { \
+ rd32((hw), reg); \
+ po32m((hw), reg, mask, mask, NULL, 5, slice); \
+} while (0)
+
+#define wr32w(hw, reg, val, mask, slice) do { \
+ wr32((hw), reg, val); \
+ po32m((hw), reg, mask, mask, NULL, 5, slice); \
+} while (0)
+
+#define TXGBE_XPCS_IDAADDR 0x13000
+#define TXGBE_XPCS_IDADATA 0x13004
+#define TXGBE_EPHY_IDAADDR 0x13008
+#define TXGBE_EPHY_IDADATA 0x1300C
+static inline u32
+rd32_epcs(struct txgbe_hw *hw, u32 addr)
+{
+ u32 data;
+ wr32(hw, TXGBE_XPCS_IDAADDR, addr);
+ data = rd32(hw, TXGBE_XPCS_IDADATA);
+ return data;
+}
+
+static inline void
+wr32_epcs(struct txgbe_hw *hw, u32 addr, u32 data)
+{
+ wr32(hw, TXGBE_XPCS_IDAADDR, addr);
+ wr32(hw, TXGBE_XPCS_IDADATA, data);
+}
+
+static inline u32
+rd32_ephy(struct txgbe_hw *hw, u32 addr)
+{
+ u32 data;
+ wr32(hw, TXGBE_EPHY_IDAADDR, addr);
+ data = rd32(hw, TXGBE_EPHY_IDADATA);
+ return data;
+}
+
+static inline void
+wr32_ephy(struct txgbe_hw *hw, u32 addr, u32 data)
+{
+ wr32(hw, TXGBE_EPHY_IDAADDR, addr);
+ wr32(hw, TXGBE_EPHY_IDADATA, data);
+}
+
+#endif /* _TXGBE_REGS_H_ */
new file mode 100644
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef _TXGBE_STATUS_H_
+#define _TXGBE_STATUS_H_
+
+/* Error Codes:
+ * common error
+ * module error(simple)
+ * module error(detailed)
+ *
+ * (-256, 256): reserved for non-txgbe defined error code
+ */
+#define TERR_BASE (0x100)
+enum txgbe_error {
+ TERR_NULL = TERR_BASE,
+ TERR_ANY,
+ TERR_NOSUPP,
+ TERR_NOIMPL,
+ TERR_NOMEM,
+ TERR_NOSPACE,
+ TERR_NOENTRY,
+ TERR_CONFIG,
+ TERR_ARGS,
+ TERR_PARAM,
+ TERR_INVALID,
+ TERR_TIMEOUT,
+ TERR_VERSION,
+ TERR_REGISTER,
+ TERR_FEATURE,
+ TERR_RESET,
+ TERR_AUTONEG,
+ TERR_MBX,
+ TERR_I2C,
+ TERR_FC,
+ TERR_FLASH,
+ TERR_DEVICE,
+ TERR_HOSTIF,
+ TERR_SRAM,
+ TERR_EEPROM,
+ TERR_EEPROM_CHECKSUM,
+ TERR_EEPROM_PROTECT,
+ TERR_EEPROM_VERSION,
+ TERR_MAC,
+ TERR_MAC_ADDR,
+ TERR_SFP,
+ TERR_SFP_INITSEQ,
+ TERR_SFP_PRESENT,
+ TERR_SFP_SUPPORT,
+ TERR_SFP_SETUP,
+ TERR_PHY,
+ TERR_PHY_ADDR,
+ TERR_PHY_INIT,
+ TERR_FDIR_CMD,
+ TERR_FDIR_REINIT,
+ TERR_SWFW_SYNC,
+ TERR_SWFW_COMMAND,
+ TERR_FC_CFG,
+ TERR_FC_NEGO,
+ TERR_LINK_SETUP,
+ TERR_PCIE_PENDING,
+ TERR_PBA_SECTION,
+ TERR_OVERTEMP,
+ TERR_UNDERTEMP,
+ TERR_XPCS_POWERUP,
+};
+
+/* WARNING: just for legacy compatibility */
+#define TXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
+#define TXGBE_ERR_OPS_DUMMY 0x3FFFFFFF
+
+/* Error Codes */
+#define TXGBE_ERR_EEPROM -(TERR_BASE + 1)
+#define TXGBE_ERR_EEPROM_CHECKSUM -(TERR_BASE + 2)
+#define TXGBE_ERR_PHY -(TERR_BASE + 3)
+#define TXGBE_ERR_CONFIG -(TERR_BASE + 4)
+#define TXGBE_ERR_PARAM -(TERR_BASE + 5)
+#define TXGBE_ERR_MAC_TYPE -(TERR_BASE + 6)
+#define TXGBE_ERR_UNKNOWN_PHY -(TERR_BASE + 7)
+#define TXGBE_ERR_LINK_SETUP -(TERR_BASE + 8)
+#define TXGBE_ERR_ADAPTER_STOPPED -(TERR_BASE + 9)
+#define TXGBE_ERR_INVALID_MAC_ADDR -(TERR_BASE + 10)
+#define TXGBE_ERR_DEVICE_NOT_SUPPORTED -(TERR_BASE + 11)
+#define TXGBE_ERR_MASTER_REQUESTS_PENDING -(TERR_BASE + 12)
+#define TXGBE_ERR_INVALID_LINK_SETTINGS -(TERR_BASE + 13)
+#define TXGBE_ERR_AUTONEG_NOT_COMPLETE -(TERR_BASE + 14)
+#define TXGBE_ERR_RESET_FAILED -(TERR_BASE + 15)
+#define TXGBE_ERR_SWFW_SYNC -(TERR_BASE + 16)
+#define TXGBE_ERR_PHY_ADDR_INVALID -(TERR_BASE + 17)
+#define TXGBE_ERR_I2C -(TERR_BASE + 18)
+#define TXGBE_ERR_SFP_NOT_SUPPORTED -(TERR_BASE + 19)
+#define TXGBE_ERR_SFP_NOT_PRESENT -(TERR_BASE + 20)
+#define TXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -(TERR_BASE + 21)
+#define TXGBE_ERR_NO_SAN_ADDR_PTR -(TERR_BASE + 22)
+#define TXGBE_ERR_FDIR_REINIT_FAILED -(TERR_BASE + 23)
+#define TXGBE_ERR_EEPROM_VERSION -(TERR_BASE + 24)
+#define TXGBE_ERR_NO_SPACE -(TERR_BASE + 25)
+#define TXGBE_ERR_OVERTEMP -(TERR_BASE + 26)
+#define TXGBE_ERR_FC_NOT_NEGOTIATED -(TERR_BASE + 27)
+#define TXGBE_ERR_FC_NOT_SUPPORTED -(TERR_BASE + 28)
+#define TXGBE_ERR_SFP_SETUP_NOT_COMPLETE -(TERR_BASE + 30)
+#define TXGBE_ERR_PBA_SECTION -(TERR_BASE + 31)
+#define TXGBE_ERR_INVALID_ARGUMENT -(TERR_BASE + 32)
+#define TXGBE_ERR_HOST_INTERFACE_COMMAND -(TERR_BASE + 33)
+#define TXGBE_ERR_OUT_OF_MEM -(TERR_BASE + 34)
+#define TXGBE_ERR_FEATURE_NOT_SUPPORTED -(TERR_BASE + 36)
+#define TXGBE_ERR_EEPROM_PROTECTED_REGION -(TERR_BASE + 37)
+#define TXGBE_ERR_FDIR_CMD_INCOMPLETE -(TERR_BASE + 38)
+#define TXGBE_ERR_FW_RESP_INVALID -(TERR_BASE + 39)
+#define TXGBE_ERR_TOKEN_RETRY -(TERR_BASE + 40)
+#define TXGBE_ERR_FLASH_LOADING_FAILED -(TERR_BASE + 41)
+
+#define TXGBE_ERR_NOSUPP -(TERR_BASE + 42)
+#define TXGBE_ERR_UNDERTEMP -(TERR_BASE + 43)
+#define TXGBE_ERR_XPCS_POWER_UP_FAILED -(TERR_BASE + 44)
+#define TXGBE_ERR_PHY_INIT_NOT_DONE -(TERR_BASE + 45)
+#define TXGBE_ERR_TIMEOUT -(TERR_BASE + 46)
+#define TXGBE_ERR_REGISTER -(TERR_BASE + 47)
+#define TXGBE_ERR_MNG_ACCESS_FAILED -(TERR_BASE + 49)
+
+#endif /* _TXGBE_STATUS_H_ */
new file mode 100644
@@ -0,0 +1,868 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef _TXGBE_TYPE_H_
+#define _TXGBE_TYPE_H_
+
+/*
+ * Driver Configuration
+ */
+/* DCB configuration defines */
+#define TXGBE_DCB_TC_MAX TXGBE_MAX_UP
+#define TXGBE_DCB_UP_MAX TXGBE_MAX_UP
+#define TXGBE_DCB_BWG_MAX TXGBE_MAX_UP
+#define TXGBE_DCB_BW_PERCENT 100
+
+#define TXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
+#define TXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
+
+#define TXGBE_RX_HDR_SIZE 256
+#define TXGBE_RX_BUF_SIZE 2048
+
+#define TXGBE_FRAME_SIZE_MAX (9728) /* Maximum frame size, +FCS */
+#define TXGBE_FRAME_SIZE_DFT (1518) /* Default frame size, +FCS */
+#define TXGBE_NUM_POOL (64)
+#define TXGBE_PBRXSIZE_MAX 0x00080000 /* 512KB Packet Buffer */
+#define TXGBE_TXPKTSIZE_MAX (10)
+#define TXGBE_PBTXSIZE_MAX 0x00028000 /* 160KB Packet Buffer */
+#define TXGBE_FDIR_DROP_QUEUE 127
+#define TXGBE_MAX_FTQF_FILTERS 128
+#define TXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
+#define TXGBE_MAX_UP 8
+#define TXGBE_MAX_QP (128)
+
+#define TXGBE_MAX_UTA 128
+
+#define TXGBE_FDIR_INIT_DONE_POLL 10
+#define TXGBE_FDIRCMD_CMD_POLL 10
+#define TXGBE_MD_TIMEOUT 1000
+#define TXGBE_SPI_TIMEOUT 1000
+#define TXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+#define TXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
+
+#define TXGBE_MAX_MSIX_VECTORS_RAPTOR 0x40
+
+#define TXGBE_ALIGN 128 /* as intel did */
+
+#include "txgbe_status.h"
+#include "txgbe_osdep.h"
+#include "txgbe_devids.h"
+
+struct txgbe_thermal_diode_data {
+ s16 temp;
+ s16 alarm_thresh;
+ s16 dalarm_thresh;
+};
+
+struct txgbe_thermal_sensor_data {
+ struct txgbe_thermal_diode_data sensor[1];
+};
+
+struct txgbe_nvm_version {
+ u32 etk_id;
+ u8 nvm_major;
+ u16 nvm_minor;
+ u8 nvm_id;
+
+ bool oem_valid;
+ u8 oem_major;
+ u8 oem_minor;
+ u16 oem_release;
+
+ bool or_valid;
+ u8 or_major;
+ u16 or_build;
+ u8 or_patch;
+};
+
+/* Power Management */
+/* DMA Coalescing configuration */
+struct txgbe_dmac_config {
+ u16 watchdog_timer; /* usec units */
+ bool fcoe_en;
+ u32 link_speed;
+ u8 fcoe_tc;
+ u8 num_tcs;
+};
+
+/* Packet buffer allocation strategies */
+enum {
+ PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */
+#define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL
+ PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */
+#define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED
+};
+
+enum txgbe_fdir_pballoc_type {
+ TXGBE_FDIR_PBALLOC_NONE = 0,
+ TXGBE_FDIR_PBALLOC_64K = 1,
+ TXGBE_FDIR_PBALLOC_128K = 2,
+ TXGBE_FDIR_PBALLOC_256K = 3,
+};
+
+/* Physical layer type */
+#define TXGBE_PHYSICAL_LAYER_UNKNOWN 0
+#define TXGBE_PHYSICAL_LAYER_10GBASE_T 0x00001
+#define TXGBE_PHYSICAL_LAYER_1000BASE_T 0x00002
+#define TXGBE_PHYSICAL_LAYER_100BASE_TX 0x00004
+#define TXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x00008
+#define TXGBE_PHYSICAL_LAYER_10GBASE_LR 0x00010
+#define TXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x00020
+#define TXGBE_PHYSICAL_LAYER_10GBASE_SR 0x00040
+#define TXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x00080
+#define TXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x00100
+#define TXGBE_PHYSICAL_LAYER_1000BASE_KX 0x00200
+#define TXGBE_PHYSICAL_LAYER_1000BASE_BX 0x00400
+#define TXGBE_PHYSICAL_LAYER_10GBASE_KR 0x00800
+#define TXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x01000
+#define TXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x02000
+#define TXGBE_PHYSICAL_LAYER_1000BASE_SX 0x04000
+#define TXGBE_PHYSICAL_LAYER_10BASE_T 0x08000
+#define TXGBE_PHYSICAL_LAYER_2500BASE_KX 0x10000
+
+/* Software ATR hash keys */
+#define TXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
+#define TXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
+
+/* Software ATR input stream values and masks */
+#define TXGBE_ATR_HASH_MASK 0x7fff
+#define TXGBE_ATR_L3TYPE_MASK 0x4
+#define TXGBE_ATR_L3TYPE_IPV4 0x0
+#define TXGBE_ATR_L3TYPE_IPV6 0x4
+#define TXGBE_ATR_L4TYPE_MASK 0x3
+#define TXGBE_ATR_L4TYPE_UDP 0x1
+#define TXGBE_ATR_L4TYPE_TCP 0x2
+#define TXGBE_ATR_L4TYPE_SCTP 0x3
+#define TXGBE_ATR_TUNNEL_MASK 0x10
+#define TXGBE_ATR_TUNNEL_ANY 0x10
+enum txgbe_atr_flow_type {
+ TXGBE_ATR_FLOW_TYPE_IPV4 = 0x0,
+ TXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1,
+ TXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2,
+ TXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
+ TXGBE_ATR_FLOW_TYPE_IPV6 = 0x4,
+ TXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5,
+ TXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6,
+ TXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17,
+};
+
+/* Flow Director ATR input struct. */
+struct txgbe_atr_input {
+ /*
+ * Byte layout in order, all values with MSB first:
+ *
+ * vm_pool - 1 byte
+ * flow_type - 1 byte
+ * vlan_id - 2 bytes
+ * src_ip - 16 bytes
+ * inner_mac - 6 bytes
+ * cloud_mode - 2 bytes
+ * tni_vni - 4 bytes
+ * dst_ip - 16 bytes
+ * src_port - 2 bytes
+ * dst_port - 2 bytes
+ * flex_bytes - 2 bytes
+ * bkt_hash - 2 bytes
+ */
+ u8 vm_pool;
+ u8 flow_type;
+ __be16 pkt_type;
+ __be32 dst_ip[4];
+ __be32 src_ip[4];
+ __be16 src_port;
+ __be16 dst_port;
+ __be16 flex_bytes;
+ __be16 bkt_hash;
+};
+
+/* Flow Director compressed ATR hash input struct */
+union txgbe_atr_hash_dword {
+ struct {
+ u8 vm_pool;
+ u8 flow_type;
+ __be16 vlan_id;
+ } formatted;
+ __be32 ip;
+ struct {
+ __be16 src;
+ __be16 dst;
+ } port;
+ __be16 flex_bytes;
+ __be32 dword;
+};
+
+/*
+ * Unavailable: The FCoE Boot Option ROM is not present in the flash.
+ * Disabled: Present; boot order is not set for any targets on the port.
+ * Enabled: Present; boot order is set for at least one target on the port.
+ */
+enum txgbe_fcoe_boot_status {
+ txgbe_fcoe_bootstatus_disabled = 0,
+ txgbe_fcoe_bootstatus_enabled = 1,
+ txgbe_fcoe_bootstatus_unavailable = 0xFFFF
+};
+
+enum txgbe_eeprom_type {
+ txgbe_eeprom_unknown = 0,
+ txgbe_eeprom_spi,
+ txgbe_eeprom_flash,
+ txgbe_eeprom_none /* No NVM support */
+};
+
+enum txgbe_mac_type {
+ txgbe_mac_unknown = 0,
+ txgbe_mac_raptor,
+ txgbe_mac_raptor_vf,
+ txgbe_num_macs
+};
+
+enum txgbe_phy_type {
+ txgbe_phy_unknown = 0,
+ txgbe_phy_none,
+ txgbe_phy_tn,
+ txgbe_phy_aq,
+ txgbe_phy_ext_1g_t,
+ txgbe_phy_cu_mtd,
+ txgbe_phy_cu_unknown,
+ txgbe_phy_qt,
+ txgbe_phy_xaui,
+ txgbe_phy_nl,
+ txgbe_phy_sfp_tyco_passive,
+ txgbe_phy_sfp_unknown_passive,
+ txgbe_phy_sfp_unknown_active,
+ txgbe_phy_sfp_avago,
+ txgbe_phy_sfp_ftl,
+ txgbe_phy_sfp_ftl_active,
+ txgbe_phy_sfp_unknown,
+ txgbe_phy_sfp_intel,
+ txgbe_phy_qsfp_unknown_passive,
+ txgbe_phy_qsfp_unknown_active,
+ txgbe_phy_qsfp_intel,
+ txgbe_phy_qsfp_unknown,
+ txgbe_phy_sfp_unsupported, /* Enforce bit set with unsupported module */
+ txgbe_phy_sgmii,
+ txgbe_phy_fw,
+ txgbe_phy_generic
+};
+
+/*
+ * SFP+ module type IDs:
+ *
+ * ID Module Type
+ * =============
+ * 0 SFP_DA_CU
+ * 1 SFP_SR
+ * 2 SFP_LR
+ * 3 SFP_DA_CU_CORE0 - chip-specific
+ * 4 SFP_DA_CU_CORE1 - chip-specific
+ * 5 SFP_SR/LR_CORE0 - chip-specific
+ * 6 SFP_SR/LR_CORE1 - chip-specific
+ */
+enum txgbe_sfp_type {
+ txgbe_sfp_type_unknown = 0,
+ txgbe_sfp_type_da_cu,
+ txgbe_sfp_type_sr,
+ txgbe_sfp_type_lr,
+ txgbe_sfp_type_da_cu_core0,
+ txgbe_sfp_type_da_cu_core1,
+ txgbe_sfp_type_srlr_core0,
+ txgbe_sfp_type_srlr_core1,
+ txgbe_sfp_type_da_act_lmt_core0,
+ txgbe_sfp_type_da_act_lmt_core1,
+ txgbe_sfp_type_1g_cu_core0,
+ txgbe_sfp_type_1g_cu_core1,
+ txgbe_sfp_type_1g_sx_core0,
+ txgbe_sfp_type_1g_sx_core1,
+ txgbe_sfp_type_1g_lx_core0,
+ txgbe_sfp_type_1g_lx_core1,
+ txgbe_sfp_type_not_present = 0xFFFE,
+ txgbe_sfp_type_not_known = 0xFFFF
+};
+
+enum txgbe_media_type {
+ txgbe_media_type_unknown = 0,
+ txgbe_media_type_fiber,
+ txgbe_media_type_fiber_qsfp,
+ txgbe_media_type_copper,
+ txgbe_media_type_backplane,
+ txgbe_media_type_cx4,
+ txgbe_media_type_virtual
+};
+
+/* Flow Control Settings */
+enum txgbe_fc_mode {
+ txgbe_fc_none = 0,
+ txgbe_fc_rx_pause,
+ txgbe_fc_tx_pause,
+ txgbe_fc_full,
+ txgbe_fc_default
+};
+
+/* Smart Speed Settings */
+#define TXGBE_SMARTSPEED_MAX_RETRIES 3
+enum txgbe_smart_speed {
+ txgbe_smart_speed_auto = 0,
+ txgbe_smart_speed_on,
+ txgbe_smart_speed_off
+};
+
+/* PCI bus types */
+enum txgbe_bus_type {
+ txgbe_bus_type_unknown = 0,
+ txgbe_bus_type_pci,
+ txgbe_bus_type_pcix,
+ txgbe_bus_type_pci_express,
+ txgbe_bus_type_internal,
+ txgbe_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum txgbe_bus_speed {
+ txgbe_bus_speed_unknown = 0,
+ txgbe_bus_speed_33 = 33,
+ txgbe_bus_speed_66 = 66,
+ txgbe_bus_speed_100 = 100,
+ txgbe_bus_speed_120 = 120,
+ txgbe_bus_speed_133 = 133,
+ txgbe_bus_speed_2500 = 2500,
+ txgbe_bus_speed_5000 = 5000,
+ txgbe_bus_speed_8000 = 8000,
+ txgbe_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum txgbe_bus_width {
+ txgbe_bus_width_unknown = 0,
+ txgbe_bus_width_pcie_x1 = 1,
+ txgbe_bus_width_pcie_x2 = 2,
+ txgbe_bus_width_pcie_x4 = 4,
+ txgbe_bus_width_pcie_x8 = 8,
+ txgbe_bus_width_32 = 32,
+ txgbe_bus_width_64 = 64,
+ txgbe_bus_width_reserved
+};
+
+struct txgbe_hw;
+
+struct txgbe_addr_filter_info {
+ u32 num_mc_addrs;
+ u32 rar_used_count;
+ u32 mta_in_use;
+ u32 overflow_promisc;
+ bool user_set_promisc;
+};
+
+/* Bus parameters */
+struct txgbe_bus_info {
+ s32 (*get_bus_info)(struct txgbe_hw *);
+ void (*set_lan_id)(struct txgbe_hw *);
+
+ enum txgbe_bus_speed speed;
+ enum txgbe_bus_width width;
+ enum txgbe_bus_type type;
+
+ u16 func;
+ u8 lan_id;
+ u16 instance_id;
+};
+
+/* Flow control parameters */
+struct txgbe_fc_info {
+ u32 high_water[TXGBE_DCB_TC_MAX]; /* Flow Ctrl High-water */
+ u32 low_water[TXGBE_DCB_TC_MAX]; /* Flow Ctrl Low-water */
+ u16 pause_time; /* Flow Control Pause timer */
+ bool send_xon; /* Flow control send XON */
+ bool strict_ieee; /* Strict IEEE mode */
+ bool disable_fc_autoneg; /* Do not autonegotiate FC */
+ bool fc_was_autonegged; /* Is current_mode the result of autonegging? */
+ enum txgbe_fc_mode current_mode; /* FC mode in effect */
+ enum txgbe_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+/* Statistics counters collected by the MAC */
+/* PB[] RxTx */
+struct txgbe_pb_stats {
+ u64 tx_pb_xon_packets;
+ u64 rx_pb_xon_packets;
+ u64 tx_pb_xoff_packets;
+ u64 rx_pb_xoff_packets;
+ u64 rx_pb_dropped;
+ u64 rx_pb_mbuf_alloc_errors;
+ u64 tx_pb_xon2off_packets;
+};
+
+/* QP[] RxTx */
+struct txgbe_qp_stats {
+ u64 rx_qp_packets;
+ u64 tx_qp_packets;
+ u64 rx_qp_bytes;
+ u64 tx_qp_bytes;
+ u64 rx_qp_mc_packets;
+};
+
+struct txgbe_hw_stats {
+ /* MNG RxTx */
+ u64 mng_bmc2host_packets;
+ u64 mng_host2bmc_packets;
+ /* Basix RxTx */
+ u64 rx_packets;
+ u64 tx_packets;
+ u64 rx_bytes;
+ u64 tx_bytes;
+ u64 rx_total_bytes;
+ u64 rx_total_packets;
+ u64 tx_total_packets;
+ u64 rx_total_missed_packets;
+ u64 rx_broadcast_packets;
+ u64 tx_broadcast_packets;
+ u64 rx_multicast_packets;
+ u64 tx_multicast_packets;
+ u64 rx_management_packets;
+ u64 tx_management_packets;
+ u64 rx_management_dropped;
+ u64 rx_drop_packets;
+
+ /* Basic Error */
+ u64 rx_crc_errors;
+ u64 rx_illegal_byte_errors;
+ u64 rx_error_bytes;
+ u64 rx_mac_short_packet_dropped;
+ u64 rx_length_errors;
+ u64 rx_undersize_errors;
+ u64 rx_fragment_errors;
+ u64 rx_oversize_errors;
+ u64 rx_jabber_errors;
+ u64 rx_l3_l4_xsum_error;
+ u64 mac_local_errors;
+ u64 mac_remote_errors;
+
+ /* Flow Director */
+ u64 flow_director_added_filters;
+ u64 flow_director_removed_filters;
+ u64 flow_director_filter_add_errors;
+ u64 flow_director_filter_remove_errors;
+ u64 flow_director_matched_filters;
+ u64 flow_director_missed_filters;
+
+ /* FCoE */
+ u64 rx_fcoe_crc_errors;
+ u64 rx_fcoe_mbuf_allocation_errors;
+ u64 rx_fcoe_dropped;
+ u64 rx_fcoe_packets;
+ u64 tx_fcoe_packets;
+ u64 rx_fcoe_bytes;
+ u64 tx_fcoe_bytes;
+ u64 rx_fcoe_no_ddp;
+ u64 rx_fcoe_no_ddp_ext_buff;
+
+ /* MACSEC */
+ u64 tx_macsec_pkts_untagged;
+ u64 tx_macsec_pkts_encrypted;
+ u64 tx_macsec_pkts_protected;
+ u64 tx_macsec_octets_encrypted;
+ u64 tx_macsec_octets_protected;
+ u64 rx_macsec_pkts_untagged;
+ u64 rx_macsec_pkts_badtag;
+ u64 rx_macsec_pkts_nosci;
+ u64 rx_macsec_pkts_unknownsci;
+ u64 rx_macsec_octets_decrypted;
+ u64 rx_macsec_octets_validated;
+ u64 rx_macsec_sc_pkts_unchecked;
+ u64 rx_macsec_sc_pkts_delayed;
+ u64 rx_macsec_sc_pkts_late;
+ u64 rx_macsec_sa_pkts_ok;
+ u64 rx_macsec_sa_pkts_invalid;
+ u64 rx_macsec_sa_pkts_notvalid;
+ u64 rx_macsec_sa_pkts_unusedsa;
+ u64 rx_macsec_sa_pkts_notusingsa;
+
+ /* MAC RxTx */
+ u64 rx_size_64_packets;
+ u64 rx_size_65_to_127_packets;
+ u64 rx_size_128_to_255_packets;
+ u64 rx_size_256_to_511_packets;
+ u64 rx_size_512_to_1023_packets;
+ u64 rx_size_1024_to_max_packets;
+ u64 tx_size_64_packets;
+ u64 tx_size_65_to_127_packets;
+ u64 tx_size_128_to_255_packets;
+ u64 tx_size_256_to_511_packets;
+ u64 tx_size_512_to_1023_packets;
+ u64 tx_size_1024_to_max_packets;
+
+ /* Flow Control */
+ u64 tx_xon_packets;
+ u64 rx_xon_packets;
+ u64 tx_xoff_packets;
+ u64 rx_xoff_packets;
+
+ /* PB[] RxTx */
+ struct {
+ u64 rx_up_packets;
+ u64 tx_up_packets;
+ u64 rx_up_bytes;
+ u64 tx_up_bytes;
+ u64 rx_up_drop_packets;
+
+ u64 tx_up_xon_packets;
+ u64 rx_up_xon_packets;
+ u64 tx_up_xoff_packets;
+ u64 rx_up_xoff_packets;
+ u64 rx_up_dropped;
+ u64 rx_up_mbuf_alloc_errors;
+ u64 tx_up_xon2off_packets;
+ } up[TXGBE_MAX_UP];
+
+ /* QP[] RxTx */
+ struct {
+ u64 rx_qp_packets;
+ u64 tx_qp_packets;
+ u64 rx_qp_bytes;
+ u64 tx_qp_bytes;
+ u64 rx_qp_mc_packets;
+ } qp[TXGBE_MAX_QP];
+
+};
+
+/* iterator type for walking multicast address lists */
+typedef u8* (*txgbe_mc_addr_itr) (struct txgbe_hw *hw, u8 **mc_addr_ptr,
+ u32 *vmdq);
+
+struct txgbe_link_info {
+ s32 (*read_link)(struct txgbe_hw *, u8 addr, u16 reg, u16 *val);
+ s32 (*read_link_unlocked)(struct txgbe_hw *, u8 addr, u16 reg,
+ u16 *val);
+ s32 (*write_link)(struct txgbe_hw *, u8 addr, u16 reg, u16 val);
+ s32 (*write_link_unlocked)(struct txgbe_hw *, u8 addr, u16 reg,
+ u16 val);
+
+ u8 addr;
+};
+
+struct txgbe_rom_info {
+ s32 (*init_params)(struct txgbe_hw *);
+ s32 (*read16)(struct txgbe_hw *, u32, u16 *);
+ s32 (*readw_sw)(struct txgbe_hw *, u32, u16 *);
+ s32 (*readw_buffer)(struct txgbe_hw *, u32, u32, void *);
+ s32 (*read32)(struct txgbe_hw *, u32, u32 *);
+ s32 (*read_buffer)(struct txgbe_hw *, u32, u32, void *);
+ s32 (*write16)(struct txgbe_hw *, u32, u16);
+ s32 (*writew_sw)(struct txgbe_hw *, u32, u16);
+ s32 (*writew_buffer)(struct txgbe_hw *, u32, u32, void *);
+ s32 (*write32)(struct txgbe_hw *, u32, u32);
+ s32 (*write_buffer)(struct txgbe_hw *, u32, u32, void *);
+ s32 (*validate_checksum)(struct txgbe_hw *, u16 *);
+ s32 (*update_checksum)(struct txgbe_hw *);
+ s32 (*calc_checksum)(struct txgbe_hw *);
+
+ enum txgbe_eeprom_type type;
+ u32 semaphore_delay;
+ u16 word_size;
+ u16 address_bits;
+ u16 word_page_size;
+ u16 ctrl_word_3;
+
+ u32 sw_addr;
+};
+
+
+struct txgbe_flash_info {
+ s32 (*init_params)(struct txgbe_hw *);
+ s32 (*read_buffer)(struct txgbe_hw *, u32, u32, u32 *);
+ s32 (*write_buffer)(struct txgbe_hw *, u32, u32, u32 *);
+ u32 semaphore_delay;
+ u32 dword_size;
+ u16 address_bits;
+};
+
+#define TXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
+struct txgbe_mac_info {
+ s32 (*init_hw)(struct txgbe_hw *);
+ s32 (*reset_hw)(struct txgbe_hw *);
+ s32 (*start_hw)(struct txgbe_hw *);
+ s32 (*stop_hw)(struct txgbe_hw *);
+ s32 (*clear_hw_cntrs)(struct txgbe_hw *);
+ void (*enable_relaxed_ordering)(struct txgbe_hw *);
+ u64 (*get_supported_physical_layer)(struct txgbe_hw *);
+ s32 (*get_mac_addr)(struct txgbe_hw *, u8 *);
+ s32 (*get_san_mac_addr)(struct txgbe_hw *, u8 *);
+ s32 (*set_san_mac_addr)(struct txgbe_hw *, u8 *);
+ s32 (*get_device_caps)(struct txgbe_hw *, u16 *);
+ s32 (*get_wwn_prefix)(struct txgbe_hw *, u16 *, u16 *);
+ s32 (*get_fcoe_boot_status)(struct txgbe_hw *, u16 *);
+ s32 (*read_analog_reg8)(struct txgbe_hw*, u32, u8*);
+ s32 (*write_analog_reg8)(struct txgbe_hw*, u32, u8);
+ s32 (*setup_sfp)(struct txgbe_hw *);
+ s32 (*enable_rx_dma)(struct txgbe_hw *, u32);
+ s32 (*disable_sec_rx_path)(struct txgbe_hw *);
+ s32 (*enable_sec_rx_path)(struct txgbe_hw *);
+ s32 (*disable_sec_tx_path)(struct txgbe_hw *);
+ s32 (*enable_sec_tx_path)(struct txgbe_hw *);
+ s32 (*acquire_swfw_sync)(struct txgbe_hw *, u32);
+ void (*release_swfw_sync)(struct txgbe_hw *, u32);
+ void (*init_swfw_sync)(struct txgbe_hw *);
+ u64 (*autoc_read)(struct txgbe_hw *);
+ void (*autoc_write)(struct txgbe_hw *, u64);
+ s32 (*prot_autoc_read)(struct txgbe_hw *, bool *, u64 *);
+ s32 (*prot_autoc_write)(struct txgbe_hw *, bool, u64);
+ s32 (*negotiate_api_version)(struct txgbe_hw *hw, int api);
+
+ /* Link */
+ void (*disable_tx_laser)(struct txgbe_hw *);
+ void (*enable_tx_laser)(struct txgbe_hw *);
+ void (*flap_tx_laser)(struct txgbe_hw *);
+ s32 (*setup_link)(struct txgbe_hw *, u32, bool);
+ s32 (*setup_mac_link)(struct txgbe_hw *, u32, bool);
+ s32 (*check_link)(struct txgbe_hw *, u32 *, bool *, bool);
+ s32 (*get_link_capabilities)(struct txgbe_hw *, u32 *,
+ bool *);
+ void (*set_rate_select_speed)(struct txgbe_hw *, u32);
+
+ /* Packet Buffer manipulation */
+ void (*setup_pba)(struct txgbe_hw *, int, u32, int);
+
+ /* LED */
+ s32 (*led_on)(struct txgbe_hw *, u32);
+ s32 (*led_off)(struct txgbe_hw *, u32);
+ s32 (*blink_led_start)(struct txgbe_hw *, u32);
+ s32 (*blink_led_stop)(struct txgbe_hw *, u32);
+ s32 (*init_led_link_act)(struct txgbe_hw *);
+
+ /* RAR, Multicast, VLAN */
+ s32 (*set_rar)(struct txgbe_hw *, u32, u8 *, u32, u32);
+ s32 (*set_uc_addr)(struct txgbe_hw *, u32, u8 *);
+ s32 (*clear_rar)(struct txgbe_hw *, u32);
+ s32 (*insert_mac_addr)(struct txgbe_hw *, u8 *, u32);
+ s32 (*set_vmdq)(struct txgbe_hw *, u32, u32);
+ s32 (*set_vmdq_san_mac)(struct txgbe_hw *, u32);
+ s32 (*clear_vmdq)(struct txgbe_hw *, u32, u32);
+ s32 (*init_rx_addrs)(struct txgbe_hw *);
+ s32 (*update_uc_addr_list)(struct txgbe_hw *, u8 *, u32,
+ txgbe_mc_addr_itr);
+ s32 (*update_mc_addr_list)(struct txgbe_hw *, u8 *, u32,
+ txgbe_mc_addr_itr, bool clear);
+ s32 (*enable_mc)(struct txgbe_hw *);
+ s32 (*disable_mc)(struct txgbe_hw *);
+ s32 (*clear_vfta)(struct txgbe_hw *);
+ s32 (*set_vfta)(struct txgbe_hw *, u32, u32, bool, bool);
+ s32 (*set_vlvf)(struct txgbe_hw *, u32, u32, bool, u32 *, u32,
+ bool);
+ s32 (*init_uta_tables)(struct txgbe_hw *);
+ void (*set_mac_anti_spoofing)(struct txgbe_hw *, bool, int);
+ void (*set_vlan_anti_spoofing)(struct txgbe_hw *, bool, int);
+ s32 (*update_xcast_mode)(struct txgbe_hw *, int);
+ s32 (*set_rlpml)(struct txgbe_hw *, u16);
+
+ /* Flow Control */
+ s32 (*fc_enable)(struct txgbe_hw *);
+ s32 (*setup_fc)(struct txgbe_hw *);
+ void (*fc_autoneg)(struct txgbe_hw *);
+
+ /* Manageability interface */
+ s32 (*set_fw_drv_ver)(struct txgbe_hw *, u8, u8, u8, u8, u16,
+ const char *);
+ s32 (*get_thermal_sensor_data)(struct txgbe_hw *);
+ s32 (*init_thermal_sensor_thresh)(struct txgbe_hw *hw);
+ void (*get_rtrup2tc)(struct txgbe_hw *hw, u8 *map);
+ void (*disable_rx)(struct txgbe_hw *hw);
+ void (*enable_rx)(struct txgbe_hw *hw);
+ void (*set_source_address_pruning)(struct txgbe_hw *, bool,
+ unsigned int);
+ void (*set_ethertype_anti_spoofing)(struct txgbe_hw *, bool, int);
+ s32 (*dmac_update_tcs)(struct txgbe_hw *hw);
+ s32 (*dmac_config_tcs)(struct txgbe_hw *hw);
+ s32 (*dmac_config)(struct txgbe_hw *hw);
+ s32 (*setup_eee)(struct txgbe_hw *hw, bool enable_eee);
+ s32 (*read_iosf_sb_reg)(struct txgbe_hw *, u32, u32, u32 *);
+ s32 (*write_iosf_sb_reg)(struct txgbe_hw *, u32, u32, u32);
+ void (*disable_mdd)(struct txgbe_hw *hw);
+ void (*enable_mdd)(struct txgbe_hw *hw);
+ void (*mdd_event)(struct txgbe_hw *hw, u32 *vf_bitmap);
+ void (*restore_mdd_vf)(struct txgbe_hw *hw, u32 vf);
+ bool (*fw_recovery_mode)(struct txgbe_hw *hw);
+
+ enum txgbe_mac_type type;
+ u8 addr[ETH_ADDR_LEN];
+ u8 perm_addr[ETH_ADDR_LEN];
+ u8 san_addr[ETH_ADDR_LEN];
+ /* prefix for World Wide Node Name (WWNN) */
+ u16 wwnn_prefix;
+ /* prefix for World Wide Port Name (WWPN) */
+ u16 wwpn_prefix;
+#define TXGBE_MAX_MTA 128
+ u32 mta_shadow[TXGBE_MAX_MTA];
+ s32 mc_filter_type;
+ u32 mcft_size;
+ u32 vft_size;
+ u32 num_rar_entries;
+ u32 rar_highwater;
+ u32 rx_pb_size;
+ u32 max_tx_queues;
+ u32 max_rx_queues;
+ u64 orig_autoc; /* cached value of AUTOC */
+ u8 san_mac_rar_index;
+ bool get_link_status;
+ bool orig_link_settings_stored;
+ bool autotry_restart;
+ u8 flags;
+ struct txgbe_thermal_sensor_data thermal_sensor_data;
+ bool thermal_sensor_enabled;
+ struct txgbe_dmac_config dmac_cfg;
+ bool set_lben;
+ u32 max_link_up_time;
+};
+
+struct txgbe_phy_info {
+ u32 (*get_media_type)(struct txgbe_hw *);
+ s32 (*identify)(struct txgbe_hw *);
+ s32 (*identify_sfp)(struct txgbe_hw *);
+ s32 (*init)(struct txgbe_hw *);
+ s32 (*reset)(struct txgbe_hw *);
+ s32 (*read_reg)(struct txgbe_hw *, u32, u32, u16 *);
+ s32 (*write_reg)(struct txgbe_hw *, u32, u32, u16);
+ s32 (*read_reg_mdi)(struct txgbe_hw *, u32, u32, u16 *);
+ s32 (*write_reg_mdi)(struct txgbe_hw *, u32, u32, u16);
+ s32 (*setup_link)(struct txgbe_hw *);
+ s32 (*setup_internal_link)(struct txgbe_hw *);
+ s32 (*setup_link_speed)(struct txgbe_hw *, u32, bool);
+ s32 (*check_link)(struct txgbe_hw *, u32 *, bool *);
+ s32 (*get_firmware_version)(struct txgbe_hw *, u32 *);
+ s32 (*read_i2c_byte)(struct txgbe_hw *, u8, u8, u8 *);
+ s32 (*write_i2c_byte)(struct txgbe_hw *, u8, u8, u8);
+ s32 (*read_i2c_sff8472)(struct txgbe_hw *, u8, u8 *);
+ s32 (*read_i2c_eeprom)(struct txgbe_hw *, u8, u8 *);
+ s32 (*write_i2c_eeprom)(struct txgbe_hw *, u8, u8);
+ void (*i2c_bus_clear)(struct txgbe_hw *);
+ s32 (*check_overtemp)(struct txgbe_hw *);
+ s32 (*set_phy_power)(struct txgbe_hw *, bool on);
+ s32 (*enter_lplu)(struct txgbe_hw *);
+ s32 (*handle_lasi)(struct txgbe_hw *hw);
+ s32 (*read_i2c_byte_unlocked)(struct txgbe_hw *, u8 offset, u8 addr,
+ u8 *value);
+ s32 (*write_i2c_byte_unlocked)(struct txgbe_hw *, u8 offset, u8 addr,
+ u8 value);
+
+ enum txgbe_phy_type type;
+ u32 addr;
+ u32 id;
+ enum txgbe_sfp_type sfp_type;
+ bool sfp_setup_needed;
+ u32 revision;
+ u32 media_type;
+ u32 phy_semaphore_mask;
+ bool reset_disable;
+ u32 autoneg_advertised;
+ u32 speeds_supported;
+ u32 eee_speeds_supported;
+ u32 eee_speeds_advertised;
+ enum txgbe_smart_speed smart_speed;
+ bool smart_speed_active;
+ bool multispeed_fiber;
+ bool reset_if_overtemp;
+ bool qsfp_shared_i2c_bus;
+ u32 nw_mng_if_sel;
+ u32 link_mode;
+};
+
+struct txgbe_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct txgbe_mbx_info {
+ void (*init_params)(struct txgbe_hw *hw);
+ s32 (*read)(struct txgbe_hw *, u32 *, u16, u16);
+ s32 (*write)(struct txgbe_hw *, u32 *, u16, u16);
+ s32 (*read_posted)(struct txgbe_hw *, u32 *, u16, u16);
+ s32 (*write_posted)(struct txgbe_hw *, u32 *, u16, u16);
+ s32 (*check_for_msg)(struct txgbe_hw *, u16);
+ s32 (*check_for_ack)(struct txgbe_hw *, u16);
+ s32 (*check_for_rst)(struct txgbe_hw *, u16);
+
+ struct txgbe_mbx_stats stats;
+ u32 timeout;
+ u32 usec_delay;
+ u32 v2p_mailbox;
+ u16 size;
+};
+
+enum txgbe_isb_idx {
+ TXGBE_ISB_HEADER,
+ TXGBE_ISB_MISC,
+ TXGBE_ISB_VEC0,
+ TXGBE_ISB_VEC1,
+ TXGBE_ISB_MAX
+};
+
+struct txgbe_hw {
+ void IOMEM *hw_addr;
+ void *back;
+ struct txgbe_mac_info mac;
+ struct txgbe_addr_filter_info addr_ctrl;
+ struct txgbe_fc_info fc;
+ struct txgbe_phy_info phy;
+ struct txgbe_link_info link;
+ struct txgbe_rom_info rom;
+ struct txgbe_flash_info flash;
+ struct txgbe_bus_info bus;
+ struct txgbe_mbx_info mbx;
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ bool adapter_stopped;
+ int api_version;
+ bool force_full_reset;
+ bool allow_unsupported_sfp;
+ bool wol_enabled;
+ bool need_crosstalk_fix;
+
+ u32 b4_buf[16];
+ uint64_t isb_dma;
+ void IOMEM *isb_mem;
+ u16 nb_rx_queues;
+ u16 nb_tx_queues;
+
+ u32 mode;
+ enum txgbe_link_status {
+ TXGBE_LINK_STATUS_NONE = 0,
+ TXGBE_LINK_STATUS_KX,
+ TXGBE_LINK_STATUS_KX4
+ } link_status;
+ enum txgbe_reset_type {
+ TXGBE_LAN_RESET = 0,
+ TXGBE_SW_RESET,
+ TXGBE_GLOBAL_RESET
+ } reset_type;
+
+ u32 q_rx_regs[128 * 4];
+ u32 q_tx_regs[128 * 4];
+ bool offset_loaded;
+ bool rx_loaded;
+ struct {
+ u64 rx_qp_packets;
+ u64 tx_qp_packets;
+ u64 rx_qp_bytes;
+ u64 tx_qp_bytes;
+ u64 rx_qp_mc_packets;
+ } qp_last[TXGBE_MAX_QP];
+};
+
+#include "txgbe_regs.h"
+#include "txgbe_dummy.h"
+
+#endif /* _TXGBE_TYPE_H_ */
new file mode 100644
@@ -0,0 +1,719 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include "txgbe_type.h"
+#include "txgbe_mbx.h"
+#include "txgbe_vf.h"
+
+/**
+ * txgbe_init_ops_vf - Initialize the pointers for vf
+ * @hw: pointer to hardware structure
+ *
+ * This will assign function pointers, adapter-specific functions can
+ * override the assignment of generic function pointers by assigning
+ * their own adapter-specific function pointers.
+ * Does not touch the hardware.
+ **/
+s32 txgbe_init_ops_vf(struct txgbe_hw *hw)
+{
+ struct txgbe_mac_info *mac = &hw->mac;
+ struct txgbe_mbx_info *mbx = &hw->mbx;
+
+ /* MAC */
+ mac->init_hw = txgbe_init_hw_vf;
+ mac->reset_hw = txgbe_reset_hw_vf;
+ mac->start_hw = txgbe_start_hw_vf;
+ /* Cannot clear stats on VF */
+ mac->get_mac_addr = txgbe_get_mac_addr_vf;
+ mac->stop_hw = txgbe_stop_hw_vf;
+ mac->negotiate_api_version = txgbevf_negotiate_api_version;
+
+ /* Link */
+ mac->setup_link = txgbe_setup_mac_link_vf;
+ mac->check_link = txgbe_check_mac_link_vf;
+
+ /* RAR, Multicast, VLAN */
+ mac->set_rar = txgbe_set_rar_vf;
+ mac->set_uc_addr = txgbevf_set_uc_addr_vf;
+ mac->update_mc_addr_list = txgbe_update_mc_addr_list_vf;
+ mac->update_xcast_mode = txgbevf_update_xcast_mode;
+ mac->set_vfta = txgbe_set_vfta_vf;
+ mac->set_rlpml = txgbevf_rlpml_set_vf;
+
+ mac->max_tx_queues = 1;
+ mac->max_rx_queues = 1;
+
+ mbx->init_params = txgbe_init_mbx_params_vf;
+ mbx->read = txgbe_read_mbx_vf;
+ mbx->write = txgbe_write_mbx_vf;
+ mbx->read_posted = txgbe_read_posted_mbx;
+ mbx->write_posted = txgbe_write_posted_mbx;
+ mbx->check_for_msg = txgbe_check_for_msg_vf;
+ mbx->check_for_ack = txgbe_check_for_ack_vf;
+ mbx->check_for_rst = txgbe_check_for_rst_vf;
+
+ return 0;
+}
+
+/* txgbe_virt_clr_reg - Set register to default (power on) state.
+ * @hw: pointer to hardware structure
+ */
+static void txgbe_virt_clr_reg(struct txgbe_hw *hw)
+{
+ int i;
+ u32 vfsrrctl;
+
+ /* default values (BUF_SIZE = 2048, HDR_SIZE = 256) */
+ vfsrrctl = TXGBE_RXCFG_HDRLEN(TXGBE_RX_HDR_SIZE);
+ vfsrrctl |= TXGBE_RXCFG_PKTLEN(TXGBE_RX_BUF_SIZE);
+
+ for (i = 0; i < 7; i++) {
+ wr32m(hw, TXGBE_RXCFG(i),
+ (TXGBE_RXCFG_HDRLEN_MASK | TXGBE_RXCFG_PKTLEN_MASK),
+ vfsrrctl);
+ }
+
+ txgbe_flush(hw);
+}
+
+/**
+ * txgbe_start_hw_vf - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware by filling the bus info structure and media type, clears
+ * all on chip counters, initializes receive address registers, multicast
+ * table, VLAN filter table, calls routine to set up link and flow control
+ * settings, and leaves transmit and receive units disabled and uninitialized
+ **/
+s32 txgbe_start_hw_vf(struct txgbe_hw *hw)
+{
+ /* Clear adapter stopped flag */
+ hw->adapter_stopped = false;
+
+ return 0;
+}
+
+/**
+ * txgbe_init_hw_vf - virtual function hardware initialization
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the hardware by resetting the hardware and then starting
+ * the hardware
+ **/
+s32 txgbe_init_hw_vf(struct txgbe_hw *hw)
+{
+ s32 status = hw->mac.start_hw(hw);
+
+ hw->mac.get_mac_addr(hw, hw->mac.addr);
+
+ return status;
+}
+
+/**
+ * txgbe_reset_hw_vf - Performs hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by reseting the transmit and receive units, masks and
+ * clears all interrupts.
+ **/
+s32 txgbe_reset_hw_vf(struct txgbe_hw *hw)
+{
+ struct txgbe_mbx_info *mbx = &hw->mbx;
+ u32 timeout = TXGBE_VF_INIT_TIMEOUT;
+ s32 ret_val = TXGBE_ERR_INVALID_MAC_ADDR;
+ u32 msgbuf[TXGBE_VF_PERMADDR_MSG_LEN];
+ u8 *addr = (u8 *)(&msgbuf[1]);
+
+ DEBUGFUNC("txgbevf_reset_hw_vf");
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ hw->mac.stop_hw(hw);
+
+ /* reset the api version */
+ hw->api_version = txgbe_mbox_api_10;
+
+ /* backup msix vectors */
+ mbx->timeout = TXGBE_VF_MBX_INIT_TIMEOUT;
+ msgbuf[0] = TXGBE_VF_BACKUP;
+ mbx->write_posted(hw, msgbuf, 1, 0);
+ msec_delay(10);
+
+ DEBUGOUT("Issuing a function level reset to MAC\n");
+ wr32(hw, TXGBE_VFRST, TXGBE_VFRST_SET);
+ txgbe_flush(hw);
+ msec_delay(50);
+
+ hw->offset_loaded = 1;
+
+ /* we cannot reset while the RSTI / RSTD bits are asserted */
+ while (!mbx->check_for_rst(hw, 0) && timeout) {
+ timeout--;
+ /* if it doesn't work, try in 1 ms */
+ usec_delay(5);
+ }
+
+ if (!timeout)
+ return TXGBE_ERR_RESET_FAILED;
+
+ /* Reset VF registers to initial values */
+ txgbe_virt_clr_reg(hw);
+
+ /* mailbox timeout can now become active */
+ mbx->timeout = TXGBE_VF_MBX_INIT_TIMEOUT;
+
+ msgbuf[0] = TXGBE_VF_RESET;
+ mbx->write_posted(hw, msgbuf, 1, 0);
+
+ msec_delay(10);
+
+ /*
+ * set our "perm_addr" based on info provided by PF
+ * also set up the mc_filter_type which is piggy backed
+ * on the mac address in word 3
+ */
+ ret_val = mbx->read_posted(hw, msgbuf,
+ TXGBE_VF_PERMADDR_MSG_LEN, 0);
+ if (ret_val)
+ return ret_val;
+
+ if (msgbuf[0] != (TXGBE_VF_RESET | TXGBE_VT_MSGTYPE_ACK) &&
+ msgbuf[0] != (TXGBE_VF_RESET | TXGBE_VT_MSGTYPE_NACK))
+ return TXGBE_ERR_INVALID_MAC_ADDR;
+
+ if (msgbuf[0] == (TXGBE_VF_RESET | TXGBE_VT_MSGTYPE_ACK))
+ memcpy(hw->mac.perm_addr, addr, ETH_ADDR_LEN);
+
+ hw->mac.mc_filter_type = msgbuf[TXGBE_VF_MC_TYPE_WORD];
+
+ return ret_val;
+}
+
+/**
+ * txgbe_stop_hw_vf - Generic stop Tx/Rx units
+ * @hw: pointer to hardware structure
+ *
+ * Sets the adapter_stopped flag within txgbe_hw struct. Clears interrupts,
+ * disables transmit and receive units. The adapter_stopped flag is used by
+ * the shared code and drivers to determine if the adapter is in a stopped
+ * state and should not touch the hardware.
+ **/
+s32 txgbe_stop_hw_vf(struct txgbe_hw *hw)
+{
+ u16 i;
+
+ /*
+ * Set the adapter_stopped flag so other driver functions stop touching
+ * the hardware
+ */
+ hw->adapter_stopped = true;
+
+ /* Clear interrupt mask to stop from interrupts being generated */
+ wr32(hw, TXGBE_VFIMC, TXGBE_VFIMC_MASK);
+
+ /* Clear any pending interrupts, flush previous writes */
+ wr32(hw, TXGBE_VFICR, TXGBE_VFICR_MASK);
+
+ /* Disable the transmit unit. Each queue must be disabled. */
+ for (i = 0; i < hw->mac.max_tx_queues; i++)
+ wr32(hw, TXGBE_TXCFG(i), TXGBE_TXCFG_FLUSH);
+
+ /* Disable the receive unit by stopping each queue */
+ for (i = 0; i < hw->mac.max_rx_queues; i++) {
+ wr32m(hw, TXGBE_RXCFG(i), TXGBE_RXCFG_ENA, 0);
+ }
+ /* Clear packet split and pool config */
+ wr32(hw, TXGBE_VFPLCFG, 0);
+ hw->rx_loaded = 1;
+
+ /* flush all queues disables */
+ txgbe_flush(hw);
+ msec_delay(2);
+
+ return 0;
+}
+
+/**
+ * txgbe_mta_vector - Determines bit-vector in multicast table to set
+ * @hw: pointer to hardware structure
+ * @mc_addr: the multicast address
+ **/
+STATIC s32 txgbe_mta_vector(struct txgbe_hw *hw, u8 *mc_addr)
+{
+ u32 vector = 0;
+
+ switch (hw->mac.mc_filter_type) {
+ case 0: /* use bits [47:36] of the address */
+ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+ break;
+ case 1: /* use bits [46:35] of the address */
+ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+ break;
+ case 2: /* use bits [45:34] of the address */
+ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+ break;
+ case 3: /* use bits [43:32] of the address */
+ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+ break;
+ default: /* Invalid mc_filter_type */
+ DEBUGOUT("MC filter type param set incorrectly\n");
+ ASSERT(0);
+ break;
+ }
+
+ /* vector can only be 12-bits or boundary will be exceeded */
+ vector &= 0xFFF;
+ return vector;
+}
+
+STATIC s32 txgbevf_write_msg_read_ack(struct txgbe_hw *hw, u32 *msg,
+ u32 *retmsg, u16 size)
+{
+ struct txgbe_mbx_info *mbx = &hw->mbx;
+ s32 retval = mbx->write_posted(hw, msg, size, 0);
+
+ if (retval)
+ return retval;
+
+ return mbx->read_posted(hw, retmsg, size, 0);
+}
+
+/**
+ * txgbe_set_rar_vf - set device MAC address
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: VMDq "set" or "pool" index
+ * @enable_addr: set flag that address is active
+ **/
+s32 txgbe_set_rar_vf(struct txgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr)
+{
+ u32 msgbuf[3];
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ s32 ret_val;
+ UNREFERENCED_PARAMETER(vmdq, enable_addr, index);
+
+ memset(msgbuf, 0, 12);
+ msgbuf[0] = TXGBE_VF_SET_MAC_ADDR;
+ memcpy(msg_addr, addr, 6);
+ ret_val = txgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3);
+
+ msgbuf[0] &= ~TXGBE_VT_MSGTYPE_CTS;
+
+ /* if nacked the address was rejected, use "perm_addr" */
+ if (!ret_val &&
+ (msgbuf[0] == (TXGBE_VF_SET_MAC_ADDR | TXGBE_VT_MSGTYPE_NACK))) {
+ txgbe_get_mac_addr_vf(hw, hw->mac.addr);
+ return TXGBE_ERR_MBX;
+ }
+
+ return ret_val;
+}
+
+/**
+ * txgbe_update_mc_addr_list_vf - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ * @next: caller supplied function to return next address in list
+ * @clear: unused
+ *
+ * Updates the Multicast Table Array.
+ **/
+s32 txgbe_update_mc_addr_list_vf(struct txgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, txgbe_mc_addr_itr next,
+ bool clear)
+{
+ struct txgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[TXGBE_P2VMBX_SIZE];
+ u16 *vector_list = (u16 *)&msgbuf[1];
+ u32 vector;
+ u32 cnt, i;
+ u32 vmdq;
+
+ UNREFERENCED_PARAMETER(clear);
+
+ DEBUGFUNC("txgbe_update_mc_addr_list_vf");
+
+ /* Each entry in the list uses 1 16 bit word. We have 30
+ * 16 bit words available in our HW msg buffer (minus 1 for the
+ * msg type). That's 30 hash values if we pack 'em right. If
+ * there are more than 30 MC addresses to add then punt the
+ * extras for now and then add code to handle more than 30 later.
+ * It would be unusual for a server to request that many multi-cast
+ * addresses except for in large enterprise network environments.
+ */
+
+ DEBUGOUT("MC Addr Count = %d\n", mc_addr_count);
+
+ cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
+ msgbuf[0] = TXGBE_VF_SET_MULTICAST;
+ msgbuf[0] |= cnt << TXGBE_VT_MSGINFO_SHIFT;
+
+ for (i = 0; i < cnt; i++) {
+ vector = txgbe_mta_vector(hw, next(hw, &mc_addr_list, &vmdq));
+ DEBUGOUT("Hash value = 0x%03X\n", vector);
+ vector_list[i] = (u16)vector;
+ }
+
+ return mbx->write_posted(hw, msgbuf, TXGBE_P2VMBX_SIZE, 0);
+}
+
+/**
+ * txgbevf_update_xcast_mode - Update Multicast mode
+ * @hw: pointer to the HW structure
+ * @xcast_mode: new multicast mode
+ *
+ * Updates the Multicast Mode of VF.
+ **/
+s32 txgbevf_update_xcast_mode(struct txgbe_hw *hw, int xcast_mode)
+{
+ u32 msgbuf[2];
+ s32 err;
+
+ switch (hw->api_version) {
+ case txgbe_mbox_api_12:
+ /* New modes were introduced in 1.3 version */
+ if (xcast_mode > TXGBEVF_XCAST_MODE_ALLMULTI)
+ return TXGBE_ERR_FEATURE_NOT_SUPPORTED;
+ /* Fall through */
+ case txgbe_mbox_api_13:
+ break;
+ default:
+ return TXGBE_ERR_FEATURE_NOT_SUPPORTED;
+ }
+
+ msgbuf[0] = TXGBE_VF_UPDATE_XCAST_MODE;
+ msgbuf[1] = xcast_mode;
+
+ err = txgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+ if (err)
+ return err;
+
+ msgbuf[0] &= ~TXGBE_VT_MSGTYPE_CTS;
+ if (msgbuf[0] == (TXGBE_VF_UPDATE_XCAST_MODE | TXGBE_VT_MSGTYPE_NACK))
+ return TXGBE_ERR_FEATURE_NOT_SUPPORTED;
+ return 0;
+}
+
+/**
+ * txgbe_set_vfta_vf - Set/Unset vlan filter table address
+ * @hw: pointer to the HW structure
+ * @vlan: 12 bit VLAN ID
+ * @vind: unused by VF drivers
+ * @vlan_on: if true then set bit, else clear bit
+ * @vlvf_bypass: boolean flag indicating updating default pool is okay
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 txgbe_set_vfta_vf(struct txgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool vlvf_bypass)
+{
+ u32 msgbuf[2];
+ s32 ret_val;
+ UNREFERENCED_PARAMETER(vind, vlvf_bypass);
+
+ msgbuf[0] = TXGBE_VF_SET_VLAN;
+ msgbuf[1] = vlan;
+ /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+ msgbuf[0] |= vlan_on << TXGBE_VT_MSGINFO_SHIFT;
+
+ ret_val = txgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+ if (!ret_val && (msgbuf[0] & TXGBE_VT_MSGTYPE_ACK))
+ return 0;
+
+ return ret_val | (msgbuf[0] & TXGBE_VT_MSGTYPE_NACK);
+}
+
+/**
+ * txgbe_get_num_of_tx_queues_vf - Get number of TX queues
+ * @hw: pointer to hardware structure
+ *
+ * Returns the number of transmit queues for the given adapter.
+ **/
+u32 txgbe_get_num_of_tx_queues_vf(struct txgbe_hw *hw)
+{
+ UNREFERENCED_PARAMETER(hw);
+ return TXGBE_VF_MAX_TX_QUEUES;
+}
+
+/**
+ * txgbe_get_num_of_rx_queues_vf - Get number of RX queues
+ * @hw: pointer to hardware structure
+ *
+ * Returns the number of receive queues for the given adapter.
+ **/
+u32 txgbe_get_num_of_rx_queues_vf(struct txgbe_hw *hw)
+{
+ UNREFERENCED_PARAMETER(hw);
+ return TXGBE_VF_MAX_RX_QUEUES;
+}
+
+/**
+ * txgbe_get_mac_addr_vf - Read device MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: the MAC address
+ **/
+s32 txgbe_get_mac_addr_vf(struct txgbe_hw *hw, u8 *mac_addr)
+{
+ int i;
+
+ for (i = 0; i < ETH_ADDR_LEN; i++)
+ mac_addr[i] = hw->mac.perm_addr[i];
+
+ return 0;
+}
+
+s32 txgbevf_set_uc_addr_vf(struct txgbe_hw *hw, u32 index, u8 *addr)
+{
+ u32 msgbuf[3], msgbuf_chk;
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ s32 ret_val;
+
+ memset(msgbuf, 0, sizeof(msgbuf));
+ /*
+ * If index is one then this is the start of a new list and needs
+ * indication to the PF so it can do it's own list management.
+ * If it is zero then that tells the PF to just clear all of
+ * this VF's macvlans and there is no new list.
+ */
+ msgbuf[0] |= index << TXGBE_VT_MSGINFO_SHIFT;
+ msgbuf[0] |= TXGBE_VF_SET_MACVLAN;
+ msgbuf_chk = msgbuf[0];
+ if (addr)
+ memcpy(msg_addr, addr, 6);
+
+ ret_val = txgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3);
+ if (!ret_val) {
+ msgbuf[0] &= ~TXGBE_VT_MSGTYPE_CTS;
+
+ if (msgbuf[0] == (msgbuf_chk | TXGBE_VT_MSGTYPE_NACK))
+ return TXGBE_ERR_OUT_OF_MEM;
+ }
+
+ return ret_val;
+}
+
+/**
+ * txgbe_setup_mac_link_vf - Setup MAC link settings
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Set the link speed in the AUTOC register and restarts link.
+ **/
+s32 txgbe_setup_mac_link_vf(struct txgbe_hw *hw, u32 speed,
+ bool autoneg_wait_to_complete)
+{
+ UNREFERENCED_PARAMETER(hw, speed, autoneg_wait_to_complete);
+ return 0;
+}
+
+/**
+ * txgbe_check_mac_link_vf - Get link/speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true is link is up, false otherwise
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+s32 txgbe_check_mac_link_vf(struct txgbe_hw *hw, u32 *speed,
+ bool *link_up, bool wait_to_complete)
+{
+ /**
+ * for a quick link status checking, wait_to_compelet == 0,
+ * skip PF link status checking
+ */
+ bool no_pflink_check = wait_to_complete == 0;
+ struct txgbe_mbx_info *mbx = &hw->mbx;
+ struct txgbe_mac_info *mac = &hw->mac;
+ s32 ret_val = 0;
+ u32 links_reg;
+ u32 in_msg = 0;
+ UNREFERENCED_PARAMETER(wait_to_complete);
+
+ /* If we were hit with a reset drop the link */
+ if (!mbx->check_for_rst(hw, 0) || !mbx->timeout)
+ mac->get_link_status = true;
+
+ if (!mac->get_link_status)
+ goto out;
+
+ /* if link status is down no point in checking to see if pf is up */
+ links_reg = rd32(hw, TXGBE_VFSTATUS);
+ if (!(links_reg & TXGBE_VFSTATUS_UP))
+ goto out;
+
+ /* for SFP+ modules and DA cables it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (mac->type == txgbe_mac_raptor_vf && wait_to_complete) {
+ if (po32m(hw, TXGBE_VFSTATUS, TXGBE_VFSTATUS_UP,
+ 0, NULL, 5, 100))
+ goto out;
+ }
+
+ switch (links_reg & TXGBE_VFSTATUS_BW_MASK) {
+ case TXGBE_VFSTATUS_BW_10G:
+ *speed = TXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case TXGBE_VFSTATUS_BW_1G:
+ *speed = TXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case TXGBE_VFSTATUS_BW_100M:
+ *speed = TXGBE_LINK_SPEED_100M_FULL;
+ break;
+ default:
+ *speed = TXGBE_LINK_SPEED_UNKNOWN;
+ }
+
+ if (no_pflink_check) {
+ if (*speed == TXGBE_LINK_SPEED_UNKNOWN)
+ mac->get_link_status = true;
+ else
+ mac->get_link_status = false;
+
+ goto out;
+ }
+
+ /* if the read failed it could just be a mailbox collision, best wait
+ * until we are called again and don't report an error
+ */
+ if (mbx->read(hw, &in_msg, 1, 0))
+ goto out;
+
+ if (!(in_msg & TXGBE_VT_MSGTYPE_CTS)) {
+ /* msg is not CTS and is NACK we must have lost CTS status */
+ if (in_msg & TXGBE_VT_MSGTYPE_NACK)
+ ret_val = -1;
+ goto out;
+ }
+
+ /* the pf is talking, if we timed out in the past we reinit */
+ if (!mbx->timeout) {
+ ret_val = -1;
+ goto out;
+ }
+
+ /* if we passed all the tests above then the link is up and we no
+ * longer need to check for link
+ */
+ mac->get_link_status = false;
+
+out:
+ *link_up = !mac->get_link_status;
+ return ret_val;
+}
+
+/**
+ * txgbevf_rlpml_set_vf - Set the maximum receive packet length
+ * @hw: pointer to the HW structure
+ * @max_size: value to assign to max frame size
+ **/
+s32 txgbevf_rlpml_set_vf(struct txgbe_hw *hw, u16 max_size)
+{
+ u32 msgbuf[2];
+ s32 retval;
+
+ msgbuf[0] = TXGBE_VF_SET_LPE;
+ msgbuf[1] = max_size;
+
+ retval = txgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+ if (retval)
+ return retval;
+ if ((msgbuf[0] & TXGBE_VF_SET_LPE) &&
+ (msgbuf[0] & TXGBE_VT_MSGTYPE_NACK))
+ return TXGBE_ERR_MBX;
+
+ return 0;
+}
+
+/**
+ * txgbevf_negotiate_api_version - Negotiate supported API version
+ * @hw: pointer to the HW structure
+ * @api: integer containing requested API version
+ **/
+int txgbevf_negotiate_api_version(struct txgbe_hw *hw, int api)
+{
+ int err;
+ u32 msg[3];
+
+ /* Negotiate the mailbox API version */
+ msg[0] = TXGBE_VF_API_NEGOTIATE;
+ msg[1] = api;
+ msg[2] = 0;
+
+ err = txgbevf_write_msg_read_ack(hw, msg, msg, 3);
+ if (!err) {
+ msg[0] &= ~TXGBE_VT_MSGTYPE_CTS;
+
+ /* Store value and return 0 on success */
+ if (msg[0] == (TXGBE_VF_API_NEGOTIATE | TXGBE_VT_MSGTYPE_ACK)) {
+ hw->api_version = api;
+ return 0;
+ }
+
+ err = TXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ return err;
+}
+
+int txgbevf_get_queues(struct txgbe_hw *hw, unsigned int *num_tcs,
+ unsigned int *default_tc)
+{
+ int err;
+ u32 msg[5];
+
+ /* do nothing if API doesn't support txgbevf_get_queues */
+ switch (hw->api_version) {
+ case txgbe_mbox_api_11:
+ case txgbe_mbox_api_12:
+ case txgbe_mbox_api_13:
+ break;
+ default:
+ return 0;
+ }
+
+ /* Fetch queue configuration from the PF */
+ msg[0] = TXGBE_VF_GET_QUEUES;
+ msg[1] = msg[2] = msg[3] = msg[4] = 0;
+
+ err = txgbevf_write_msg_read_ack(hw, msg, msg, 5);
+ if (!err) {
+ msg[0] &= ~TXGBE_VT_MSGTYPE_CTS;
+
+ /*
+ * if we we didn't get an ACK there must have been
+ * some sort of mailbox error so we should treat it
+ * as such
+ */
+ if (msg[0] != (TXGBE_VF_GET_QUEUES | TXGBE_VT_MSGTYPE_ACK))
+ return TXGBE_ERR_MBX;
+
+ /* record and validate values from message */
+ hw->mac.max_tx_queues = msg[TXGBE_VF_TX_QUEUES];
+ if (hw->mac.max_tx_queues == 0 ||
+ hw->mac.max_tx_queues > TXGBE_VF_MAX_TX_QUEUES)
+ hw->mac.max_tx_queues = TXGBE_VF_MAX_TX_QUEUES;
+
+ hw->mac.max_rx_queues = msg[TXGBE_VF_RX_QUEUES];
+ if (hw->mac.max_rx_queues == 0 ||
+ hw->mac.max_rx_queues > TXGBE_VF_MAX_RX_QUEUES)
+ hw->mac.max_rx_queues = TXGBE_VF_MAX_RX_QUEUES;
+
+ *num_tcs = msg[TXGBE_VF_TRANS_VLAN];
+ /* in case of unknown state assume we cannot tag frames */
+ if (*num_tcs > hw->mac.max_rx_queues)
+ *num_tcs = 1;
+
+ *default_tc = msg[TXGBE_VF_DEF_QUEUE];
+ /* default to queue 0 on out-of-bounds queue number */
+ if (*default_tc >= hw->mac.max_tx_queues)
+ *default_tc = 0;
+ }
+
+ return err;
+}
new file mode 100644
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef _TXGBE_VF_H_
+#define _TXGBE_VF_H_
+
+#include "txgbe_type.h"
+
+#define TXGBE_VF_MAX_TX_QUEUES 8
+#define TXGBE_VF_MAX_RX_QUEUES 8
+
+/* DCB define */
+#define TXGBE_VF_MAX_TRAFFIC_CLASS 8
+
+struct txgbevf_hw_stats {
+ u64 base_vfgprc;
+ u64 base_vfgptc;
+ u64 base_vfgorc;
+ u64 base_vfgotc;
+ u64 base_vfmprc;
+
+ struct{
+ u64 last_vfgprc;
+ u64 last_vfgptc;
+ u64 last_vfgorc;
+ u64 last_vfgotc;
+ u64 last_vfmprc;
+ u64 vfgprc;
+ u64 vfgptc;
+ u64 vfgorc;
+ u64 vfgotc;
+ u64 vfmprc;
+ } qp[8];
+
+ u64 saved_reset_vfgprc;
+ u64 saved_reset_vfgptc;
+ u64 saved_reset_vfgorc;
+ u64 saved_reset_vfgotc;
+ u64 saved_reset_vfmprc;
+};
+
+s32 txgbe_init_ops_vf(struct txgbe_hw *hw);
+s32 txgbe_init_hw_vf(struct txgbe_hw *hw);
+s32 txgbe_start_hw_vf(struct txgbe_hw *hw);
+s32 txgbe_reset_hw_vf(struct txgbe_hw *hw);
+s32 txgbe_stop_hw_vf(struct txgbe_hw *hw);
+u32 txgbe_get_num_of_tx_queues_vf(struct txgbe_hw *hw);
+u32 txgbe_get_num_of_rx_queues_vf(struct txgbe_hw *hw);
+s32 txgbe_get_mac_addr_vf(struct txgbe_hw *hw, u8 *mac_addr);
+s32 txgbe_setup_mac_link_vf(struct txgbe_hw *hw, u32 speed,
+ bool autoneg_wait_to_complete);
+s32 txgbe_check_mac_link_vf(struct txgbe_hw *hw, u32 *speed,
+ bool *link_up, bool autoneg_wait_to_complete);
+s32 txgbe_set_rar_vf(struct txgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr);
+s32 txgbevf_set_uc_addr_vf(struct txgbe_hw *hw, u32 index, u8 *addr);
+s32 txgbe_update_mc_addr_list_vf(struct txgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, txgbe_mc_addr_itr,
+ bool clear);
+s32 txgbevf_update_xcast_mode(struct txgbe_hw *hw, int xcast_mode);
+s32 txgbe_set_vfta_vf(struct txgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool vlvf_bypass);
+s32 txgbevf_rlpml_set_vf(struct txgbe_hw *hw, u16 max_size);
+int txgbevf_negotiate_api_version(struct txgbe_hw *hw, int api);
+int txgbevf_get_queues(struct txgbe_hw *hw, unsigned int *num_tcs,
+ unsigned int *default_tc);
+
+#endif /* __TXGBE_VF_H__ */
new file mode 100644
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2015-2020
+
+cflags += ['-DRTE_LIBRTE_TXGBE_BYPASS']
+
+subdir('base')
+objs = [base_objs]
+
+sources = files(
+ 'txgbe_ethdev.c',
+ 'txgbe_ethdev_vf.c',
+ 'txgbe_fdir.c',
+ 'txgbe_flow.c',
+ 'txgbe_ipsec.c',
+ 'txgbe_ptypes.c',
+ 'txgbe_pf.c',
+ 'txgbe_rxtx.c',
+ 'txgbe_tm.c',
+ 'txgbe_vf_representor.c',
+ 'rte_pmd_txgbe.c'
+)
+
+deps += ['hash', 'security']
+
+includes += include_directories('base')
+
+install_headers('rte_pmd_txgbe.h')
new file mode 100644
@@ -0,0 +1,992 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include <rte_ethdev_driver.h>
+
+#include "base/txgbe.h"
+#include "txgbe_ethdev.h"
+#include "rte_pmd_txgbe.h"
+
+int
+rte_pmd_txgbe_set_vf_mac_addr(uint16_t port, uint16_t vf,
+ struct rte_ether_addr *mac_addr)
+{
+ struct txgbe_hw *hw;
+ struct txgbe_vf_info *vfinfo;
+ int rar_entry;
+ uint8_t *new_mac = (uint8_t *)(mac_addr);
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (!is_txgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ hw = TXGBE_DEV_HW(dev);
+ vfinfo = *(TXGBE_DEV_VFDATA(dev));
+ rar_entry = hw->mac.num_rar_entries - (vf + 1);
+
+ if (rte_is_valid_assigned_ether_addr((struct rte_ether_addr *)new_mac)) {
+ rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
+ RTE_ETHER_ADDR_LEN);
+ return hw->mac.set_rar(hw, rar_entry, new_mac, vf,
+ true);
+ }
+ return -EINVAL;
+}
+
+int
+rte_pmd_txgbe_ping_vf(uint16_t port, uint16_t vf)
+{
+ struct txgbe_hw *hw;
+ struct txgbe_vf_info *vfinfo;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+ uint32_t ctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (!is_txgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ hw = TXGBE_DEV_HW(dev);
+ vfinfo = *(TXGBE_DEV_VFDATA(dev));
+
+ ctrl = TXGBE_PF_CONTROL_MSG;
+ if (vfinfo[vf].clear_to_send)
+ ctrl |= TXGBE_VT_MSGTYPE_CTS;
+
+ txgbe_write_mbx(hw, &ctrl, 1, vf);
+
+ return 0;
+}
+
+int
+rte_pmd_txgbe_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
+{
+ struct txgbe_hw *hw;
+ struct txgbe_mac_info *mac;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (!is_txgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = TXGBE_DEV_HW(dev);
+ mac = &hw->mac;
+
+ mac->set_vlan_anti_spoofing(hw, on, vf);
+
+ return 0;
+}
+
+int
+rte_pmd_txgbe_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
+{
+ struct txgbe_hw *hw;
+ struct txgbe_mac_info *mac;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (!is_txgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = TXGBE_DEV_HW(dev);
+ mac = &hw->mac;
+ mac->set_mac_anti_spoofing(hw, on, vf);
+
+ return 0;
+}
+
+int
+rte_pmd_txgbe_set_vf_vlan_insert(uint16_t port, uint16_t vf, uint16_t vlan_id)
+{
+ struct txgbe_hw *hw;
+ uint32_t ctrl;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (!is_txgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (vlan_id > RTE_ETHER_MAX_VLAN_ID)
+ return -EINVAL;
+
+ hw = TXGBE_DEV_HW(dev);
+ ctrl = rd32(hw, TXGBE_POOLTAG(vf));
+ if (vlan_id) {
+ ctrl = TXGBE_POOLTAG(vf);
+ ctrl &= ~TXGBE_POOLTAG_ACT_MASK;
+ ctrl |= TXGBE_POOLTAG_ACT_ALWAYS;
+
+ ctrl &= ~TXGBE_POOLTAG_VTAG_MASK;
+ ctrl |= TXGBE_POOLTAG_VTAG(vlan_id);
+ } else {
+ ctrl = 0;
+ }
+
+ wr32(hw, TXGBE_POOLTAG(vf), ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_txgbe_set_tx_loopback(uint16_t port, uint8_t on)
+{
+ struct txgbe_hw *hw;
+ uint32_t ctrl;
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_txgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = TXGBE_DEV_HW(dev);
+ ctrl = rd32(hw, TXGBE_PSRCTL);
+ /* enable or disable VMDQ loopback */
+ if (on)
+ ctrl |= TXGBE_PSRCTL_LBENA;
+ else
+ ctrl &= ~TXGBE_PSRCTL_LBENA;
+
+ wr32(hw, TXGBE_PSRCTL, ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_txgbe_set_all_queues_drop_en(uint16_t port, uint8_t on)
+{
+ struct txgbe_hw *hw;
+ int i;
+ int num_queues = 128;
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_txgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = TXGBE_DEV_HW(dev);
+ for (i = 0; i <= num_queues; i++) {
+ u32 val = 1 << (i % 32);
+ wr32m(hw, TXGBE_QPRXDROP(i / 32), val, val);
+ }
+
+ return 0;
+}
+
+int
+rte_pmd_txgbe_set_vf_split_drop_en(uint16_t port, uint16_t vf, uint8_t on)
+{
+ struct txgbe_hw *hw;
+ uint32_t reg_value;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (!is_txgbe_supported(dev))
+ return -ENOTSUP;
+
+ /* only support VF's 0 to 63 */
+ if ((vf >= pci_dev->max_vfs) || (vf > 63))
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = TXGBE_DEV_HW(dev);
+ reg_value = rd32(hw, TXGBE_RXCFG(vf * 2));
+ if (on)
+ reg_value |= TXGBE_RXCFG_DROP;
+ else
+ reg_value &= ~TXGBE_RXCFG_DROP;
+
+ wr32(hw, TXGBE_RXCFG(vf * 2), reg_value);
+
+ reg_value = rd32(hw, TXGBE_RXCFG(vf * 2 + 1));
+ if (on)
+ reg_value |= TXGBE_RXCFG_DROP;
+ else
+ reg_value &= ~TXGBE_RXCFG_DROP;
+
+ wr32(hw, TXGBE_RXCFG(vf * 2 + 1), reg_value);
+
+ return 0;
+}
+
+int
+rte_pmd_txgbe_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+ struct txgbe_hw *hw;
+ uint16_t queues_per_pool;
+ uint32_t q;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ hw = TXGBE_DEV_HW(dev);
+
+ if (!is_txgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
+
+ /* The PF has 128 queue pairs and in SRIOV configuration
+ * those queues will be assigned to VF's, so RXDCTL
+ * registers will be dealing with queues which will be
+ * assigned to VF's.
+ * Let's say we have SRIOV configured with 31 VF's then the
+ * first 124 queues 0-123 will be allocated to VF's and only
+ * the last 4 queues 123-127 will be assigned to the PF.
+ */
+ queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
+ ETH_64_POOLS;
+
+ for (q = 0; q < queues_per_pool; q++)
+ (*dev->dev_ops->vlan_strip_queue_set)(dev,
+ q + vf * queues_per_pool, on);
+ return 0;
+}
+
+int
+rte_pmd_txgbe_set_vf_rxmode(uint16_t port, uint16_t vf,
+ uint16_t rx_mask, uint8_t on)
+{
+ int val = 0;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+ struct txgbe_hw *hw;
+ uint32_t vmolr;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (!is_txgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = TXGBE_DEV_HW(dev);
+ vmolr = rd32(hw, TXGBE_POOLETHCTL(vf));
+
+ if (txgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ val = txgbe_convert_vm_rx_mask_to_val(rx_mask, val);
+
+ if (on)
+ vmolr |= val;
+ else
+ vmolr &= ~val;
+
+ wr32(hw, TXGBE_POOLETHCTL(vf), vmolr);
+
+ return 0;
+}
+
+int
+rte_pmd_txgbe_set_vf_rx(uint16_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+ uint32_t reg, addr;
+ uint32_t val;
+ const uint8_t bit1 = 0x1;
+ struct txgbe_hw *hw;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (!is_txgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = TXGBE_DEV_HW(dev);
+
+ if (txgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ /* for vf >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
+ if (vf >= 32) {
+ addr = TXGBE_POOLRXENA(1);
+ val = bit1 << (vf - 32);
+ } else {
+ addr = TXGBE_POOLRXENA(0);
+ val = bit1 << vf;
+ }
+
+ reg = rd32(hw, addr);
+
+ if (on)
+ reg |= val;
+ else
+ reg &= ~val;
+
+ wr32(hw, addr, reg);
+
+ return 0;
+}
+
+int
+rte_pmd_txgbe_set_vf_tx(uint16_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+ uint32_t reg, addr;
+ uint32_t val;
+ const uint8_t bit1 = 0x1;
+
+ struct txgbe_hw *hw;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (!is_txgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = TXGBE_DEV_HW(dev);
+ if (txgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ /* for vf >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
+ if (vf >= 32) {
+ addr = TXGBE_POOLTXENA(1);
+ val = bit1 << (vf - 32);
+ } else {
+ addr = TXGBE_POOLTXENA(0);
+ val = bit1 << vf;
+ }
+
+ reg = rd32(hw, addr);
+
+ if (on)
+ reg |= val;
+ else
+ reg &= ~val;
+
+ wr32(hw, addr, reg);
+
+ return 0;
+}
+
+int
+rte_pmd_txgbe_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
+ uint64_t vf_mask, uint8_t vlan_on)
+{
+ struct rte_eth_dev *dev;
+ int ret = 0;
+ uint16_t vf_idx;
+ struct txgbe_hw *hw;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_txgbe_supported(dev))
+ return -ENOTSUP;
+
+ if ((vlan > RTE_ETHER_MAX_VLAN_ID) || (vf_mask == 0))
+ return -EINVAL;
+
+ hw = TXGBE_DEV_HW(dev);
+ if (txgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ for (vf_idx = 0; vf_idx < 64; vf_idx++) {
+ if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
+ ret = hw->mac.set_vfta(hw, vlan, vf_idx,
+ vlan_on, false);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_txgbe_set_vf_rate_limit(uint16_t port, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_txgbe_supported(dev))
+ return -ENOTSUP;
+
+ return txgbe_set_vf_rate_limit(dev, vf, tx_rate, q_msk);
+}
+
+int
+rte_pmd_txgbe_macsec_enable(uint16_t port, uint8_t en, uint8_t rp)
+{
+ struct rte_eth_dev *dev;
+ struct txgbe_macsec_setting macsec_setting;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_txgbe_supported(dev))
+ return -ENOTSUP;
+
+ macsec_setting.offload_en = 1;
+ macsec_setting.encrypt_en = en;
+ macsec_setting.replayprotect_en = rp;
+
+ txgbe_dev_macsec_setting_save(dev, &macsec_setting);
+
+ txgbe_dev_macsec_register_enable(dev, &macsec_setting);
+
+ return 0;
+}
+
+int
+rte_pmd_txgbe_macsec_disable(uint16_t port)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_txgbe_supported(dev))
+ return -ENOTSUP;
+
+ txgbe_dev_macsec_setting_reset(dev);
+
+ txgbe_dev_macsec_register_disable(dev);
+
+ return 0;
+}
+
+int
+rte_pmd_txgbe_macsec_config_txsc(uint16_t port, uint8_t *mac)
+{
+ struct txgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_txgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = TXGBE_DEV_HW(dev);
+
+ ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+ wr32(hw, TXGBE_LSECTXSCIL, ctrl);
+
+ ctrl = mac[4] | (mac[5] << 8);
+ wr32(hw, TXGBE_LSECTXSCIH, ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_txgbe_macsec_config_rxsc(uint16_t port, uint8_t *mac, uint16_t pi)
+{
+ struct txgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_txgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = TXGBE_DEV_HW(dev);
+
+ ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+ wr32(hw, TXGBE_LSECRXSCIL, ctrl);
+
+ pi = rte_cpu_to_be_16(pi);
+ ctrl = mac[4] | (mac[5] << 8) | (pi << 16);
+ wr32(hw, TXGBE_LSECRXSCIH, ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_txgbe_macsec_select_txsa(uint16_t port, uint8_t idx, uint8_t an,
+ uint32_t pn, uint8_t *key)
+{
+ struct txgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl, i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_txgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = TXGBE_DEV_HW(dev);
+
+ if (idx != 0 && idx != 1)
+ return -EINVAL;
+
+ if (an >= 4)
+ return -EINVAL;
+
+ hw = TXGBE_DEV_HW(dev);
+
+ /* Set the PN and key */
+ pn = rte_cpu_to_be_32(pn);
+ if (idx == 0) {
+ wr32(hw, TXGBE_LSECTXPN0, pn);
+
+ for (i = 0; i < 4; i++) {
+ ctrl = (key[i * 4 + 0] << 0) |
+ (key[i * 4 + 1] << 8) |
+ (key[i * 4 + 2] << 16) |
+ (key[i * 4 + 3] << 24);
+ wr32(hw, TXGBE_LSECTXKEY0(i), ctrl);
+ }
+ } else {
+ wr32(hw, TXGBE_LSECTXPN1, pn);
+
+ for (i = 0; i < 4; i++) {
+ ctrl = (key[i * 4 + 0] << 0) |
+ (key[i * 4 + 1] << 8) |
+ (key[i * 4 + 2] << 16) |
+ (key[i * 4 + 3] << 24);
+ wr32(hw, TXGBE_LSECTXKEY1(i), ctrl);
+ }
+ }
+
+ /* Set AN and select the SA */
+ ctrl = (an << idx * 2) | (idx << 4);
+ wr32(hw, TXGBE_LSECTXSA, ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_txgbe_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an,
+ uint32_t pn, uint8_t *key)
+{
+ struct txgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl, i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_txgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = TXGBE_DEV_HW(dev);
+
+ if (idx != 0 && idx != 1)
+ return -EINVAL;
+
+ if (an >= 4)
+ return -EINVAL;
+
+ /* Set the PN */
+ pn = rte_cpu_to_be_32(pn);
+ wr32(hw, TXGBE_LSECRXPN(idx), pn);
+
+ /* Set the key */
+ for (i = 0; i < 4; i++) {
+ ctrl = (key[i * 4 + 0] << 0) |
+ (key[i * 4 + 1] << 8) |
+ (key[i * 4 + 2] << 16) |
+ (key[i * 4 + 3] << 24);
+ wr32(hw, TXGBE_LSECRXKEY(idx, i), ctrl);
+ }
+
+ /* Set the AN and validate the SA */
+ ctrl = an | (1 << 2);
+ wr32(hw, TXGBE_LSECRXSA(idx), ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_txgbe_set_tc_bw_alloc(uint16_t port,
+ uint8_t tc_num,
+ uint8_t *bw_weight)
+{
+ struct rte_eth_dev *dev;
+ struct txgbe_dcb_config *dcb_config;
+ struct txgbe_dcb_tc_config *tc;
+ struct rte_eth_conf *eth_conf;
+ struct txgbe_bw_conf *bw_conf;
+ uint8_t i;
+ uint8_t nb_tcs;
+ uint16_t sum;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_txgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (tc_num > TXGBE_DCB_TC_MAX) {
+ PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
+ TXGBE_DCB_TC_MAX);
+ return -EINVAL;
+ }
+
+ dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
+ bw_conf = TXGBE_DEV_BW_CONF(dev);
+ eth_conf = &dev->data->dev_conf;
+
+ if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+ nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
+ } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
+ ETH_32_POOLS)
+ nb_tcs = ETH_4_TCS;
+ else
+ nb_tcs = ETH_8_TCS;
+ } else {
+ nb_tcs = 1;
+ }
+
+ if (nb_tcs != tc_num) {
+ PMD_DRV_LOG(ERR,
+ "Weight should be set for all %d enabled TCs.",
+ nb_tcs);
+ return -EINVAL;
+ }
+
+ sum = 0;
+ for (i = 0; i < nb_tcs; i++)
+ sum += bw_weight[i];
+ if (sum != 100) {
+ PMD_DRV_LOG(ERR,
+ "The summary of the TC weight should be 100.");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bw_weight[i];
+ }
+ for (; i < TXGBE_DCB_TC_MAX; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = 0;
+ }
+
+ bw_conf->tc_num = nb_tcs;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_pmd_txgbe_upd_fctrl_sbp(uint16_t port, int enable)
+{
+ struct txgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t fctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+ dev = &rte_eth_devices[port];
+ if (!is_txgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = TXGBE_DEV_HW(dev);
+ if (!hw)
+ return -ENOTSUP;
+
+ fctrl = rd32(hw, TXGBE_SECRXCTL);
+
+ /* If 'enable' set the SBP bit else clear it */
+ if (enable) {
+ fctrl |= TXGBE_SECRXCTL_SAVEBAD;
+ hw->mode = 1;
+ wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK, TXGBE_FRAME_SIZE_MAX);
+ } else {
+ fctrl &= ~(TXGBE_SECRXCTL_SAVEBAD);
+ hw->mode = 0;
+ wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
+ TXGBE_FRMSZ_MAX(dev->data->dev_conf.rxmode.max_rx_pkt_len));
+ }
+ wr32(hw, TXGBE_SECRXCTL, fctrl);
+ return 0;
+}
+
+/**
+ * rte_pmd_txgbe_acquire_swfw - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore and get the shared phy token as needed
+ */
+STATIC s32 rte_pmd_txgbe_acquire_swfw(struct txgbe_hw *hw, u32 mask)
+{
+ int retries = FW_PHY_TOKEN_RETRIES;
+ s32 status = 0;
+
+ while (--retries) {
+ status = hw->mac.acquire_swfw_sync(hw, mask);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Get SWFW sem failed, Status = %d\n",
+ status);
+ return status;
+ }
+ if (status == 0)
+ return 0;
+
+ if (status == TXGBE_ERR_TOKEN_RETRY)
+ PMD_DRV_LOG(ERR, "Get PHY token failed, Status = %d\n",
+ status);
+
+ hw->mac.release_swfw_sync(hw, mask);
+ if (status != TXGBE_ERR_TOKEN_RETRY) {
+ PMD_DRV_LOG(ERR,
+ "Retry get PHY token failed, Status=%d\n",
+ status);
+ return status;
+ }
+ }
+ PMD_DRV_LOG(ERR, "swfw acquisition retries failed!: PHY ID = 0x%08X\n",
+ hw->phy.id);
+ return status;
+}
+
+/**
+ * rte_pmd_txgbe_release_swfw_sync - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore and puts the shared phy token as needed
+ */
+STATIC void rte_pmd_txgbe_release_swfw(struct txgbe_hw *hw, u32 mask)
+{
+ hw->mac.release_swfw_sync(hw, mask);
+}
+
+int __rte_experimental
+rte_pmd_txgbe_mdio_lock(uint16_t port)
+{
+ struct txgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ u32 swfw_mask;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+ dev = &rte_eth_devices[port];
+ if (!is_txgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = TXGBE_DEV_HW(dev);
+ if (!hw)
+ return -ENOTSUP;
+
+ if (hw->bus.lan_id)
+ swfw_mask = TXGBE_MNGSEM_SWPHY;
+ else
+ swfw_mask = TXGBE_MNGSEM_SWPHY;
+
+ if (rte_pmd_txgbe_acquire_swfw(hw, swfw_mask))
+ return TXGBE_ERR_SWFW_SYNC;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_pmd_txgbe_mdio_unlock(uint16_t port)
+{
+ struct rte_eth_dev *dev;
+ struct txgbe_hw *hw;
+ u32 swfw_mask;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ if (!is_txgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = TXGBE_DEV_HW(dev);
+ if (!hw)
+ return -ENOTSUP;
+
+ if (hw->bus.lan_id)
+ swfw_mask = TXGBE_MNGSEM_SWPHY;
+ else
+ swfw_mask = TXGBE_MNGSEM_SWPHY;
+
+ rte_pmd_txgbe_release_swfw(hw, swfw_mask);
+
+ return 0;
+}
+
+int __rte_experimental
+rte_pmd_txgbe_mdio_unlocked_read(uint16_t port, uint32_t reg_addr,
+ uint32_t dev_type, uint16_t *phy_data)
+{
+ struct txgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ u32 command;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+ dev = &rte_eth_devices[port];
+ if (!is_txgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = TXGBE_DEV_HW(dev);
+ if (!hw)
+ return -ENOTSUP;
+
+ /* write command */
+ command = TXGBE_MDIOSCA_REG(reg_addr) |
+ TXGBE_MDIOSCA_DEV(dev_type) |
+ TXGBE_MDIOSCA_PORT(hw->phy.addr);
+ wr32(hw, TXGBE_MDIOSCA, command);
+
+ command = TXGBE_MDIOSCD_CMD_READ |
+ TXGBE_MDIOSCD_BUSY;
+ wr32(hw, TXGBE_MDIOSCD, command);
+
+ /* wait for completion */
+ if (!po32m(hw, TXGBE_MDIOSCD, TXGBE_MDIOSCD_BUSY,
+ 0, NULL, 100, 100)) {
+ TLOG_DEBUG("PHY read command did not complete.\n");
+ return -TERR_PHY;
+ }
+
+ *phy_data = rd32m(hw, TXGBE_MDIOSCA, TXGBE_MDIOSCD_DAT(~0));
+
+ return 0;
+}
+
+int __rte_experimental
+rte_pmd_txgbe_mdio_unlocked_write(uint16_t port, uint32_t reg_addr,
+ uint32_t dev_type, uint16_t phy_data)
+{
+ struct txgbe_hw *hw;
+ u32 command;
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+ dev = &rte_eth_devices[port];
+ if (!is_txgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = TXGBE_DEV_HW(dev);
+ if (!hw)
+ return -ENOTSUP;
+
+ /* Put the data in the MDI single read and write data register*/
+ command = TXGBE_MDIOSCA_REG(reg_addr) |
+ TXGBE_MDIOSCA_DEV(dev_type) |
+ TXGBE_MDIOSCA_PORT(hw->phy.addr);
+ wr32(hw, TXGBE_MDIOSCA, command);
+
+ command = TXGBE_MDIOSCD_CMD_WRITE |
+ TXGBE_MDIOSCD_DAT(phy_data) |
+ TXGBE_MDIOSCD_BUSY;
+ wr32(hw, TXGBE_MDIOSCD, command);
+
+ /* wait for completion */
+ if (!po32m(hw, TXGBE_MDIOSCD, TXGBE_MDIOSCD_BUSY,
+ 0, NULL, 100, 100)) {
+ TLOG_DEBUG("PHY write cmd didn't complete\n");
+ return -TERR_PHY;
+ }
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,726 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+/**
+ * @file rte_pmd_txgbe.h
+ * txgbe PMD specific functions.
+ *
+ **/
+
+#ifndef _PMD_TXGBE_H_
+#define _PMD_TXGBE_H_
+
+#include <rte_compat.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+
+/**
+ * Notify VF when PF link status changes.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* invalid.
+ */
+int rte_pmd_txgbe_ping_vf(uint16_t port, uint16_t vf);
+
+/**
+ * Set the VF MAC address.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param mac_addr
+ * VF MAC address.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* or *mac_addr* is invalid.
+ */
+int rte_pmd_txgbe_set_vf_mac_addr(uint16_t port, uint16_t vf,
+ struct rte_ether_addr *mac_addr);
+
+/**
+ * Enable/Disable VF VLAN anti spoofing.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF on which to set VLAN anti spoofing.
+ * @param on
+ * 1 - Enable VFs VLAN anti spoofing.
+ * 0 - Disable VFs VLAN anti spoofing.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_txgbe_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf,
+ uint8_t on);
+
+/**
+ * Enable/Disable VF MAC anti spoofing.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF on which to set MAC anti spoofing.
+ * @param on
+ * 1 - Enable VFs MAC anti spoofing.
+ * 0 - Disable VFs MAC anti spoofing.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_txgbe_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on);
+
+/**
+ * Enable/Disable vf vlan insert
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * ID specifying VF.
+ * @param vlan_id
+ * 0 - Disable VF's vlan insert.
+ * n - Enable; n is inserted as the vlan id.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_txgbe_set_vf_vlan_insert(uint16_t port, uint16_t vf,
+ uint16_t vlan_id);
+
+/**
+ * Enable/Disable tx loopback
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param on
+ * 1 - Enable tx loopback.
+ * 0 - Disable tx loopback.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_txgbe_set_tx_loopback(uint16_t port, uint8_t on);
+
+/**
+ * set all queues drop enable bit
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param on
+ * 1 - set the queue drop enable bit for all pools.
+ * 0 - reset the queue drop enable bit for all pools.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_txgbe_set_all_queues_drop_en(uint16_t port, uint8_t on);
+
+/**
+ * set drop enable bit in the VF split rx control register
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * ID specifying VF.
+ * @param on
+ * 1 - set the drop enable bit in the split rx control register.
+ * 0 - reset the drop enable bit in the split rx control register.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+
+int rte_pmd_txgbe_set_vf_split_drop_en(uint16_t port, uint16_t vf, uint8_t on);
+
+/**
+ * Enable/Disable vf vlan strip for all queues in a pool
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * ID specifying VF.
+ * @param on
+ * 1 - Enable VF's vlan strip on RX queues.
+ * 0 - Disable VF's vlan strip on RX queues.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int
+rte_pmd_txgbe_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on);
+
+/**
+ * Enable MACsec offload.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param en
+ * 1 - Enable encryption (encrypt and add integrity signature).
+ * 0 - Disable encryption (only add integrity signature).
+ * @param rp
+ * 1 - Enable replay protection.
+ * 0 - Disable replay protection.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_txgbe_macsec_enable(uint16_t port, uint8_t en, uint8_t rp);
+
+/**
+ * Disable MACsec offload.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_txgbe_macsec_disable(uint16_t port);
+
+/**
+ * Configure Tx SC (Secure Connection).
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param mac
+ * The MAC address on the local side.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_txgbe_macsec_config_txsc(uint16_t port, uint8_t *mac);
+
+/**
+ * Configure Rx SC (Secure Connection).
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param mac
+ * The MAC address on the remote side.
+ * @param pi
+ * The PI (port identifier) on the remote side.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_txgbe_macsec_config_rxsc(uint16_t port, uint8_t *mac, uint16_t pi);
+
+/**
+ * Enable Tx SA (Secure Association).
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param idx
+ * The SA to be enabled (0 or 1).
+ * @param an
+ * The association number on the local side.
+ * @param pn
+ * The packet number on the local side.
+ * @param key
+ * The key on the local side.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_txgbe_macsec_select_txsa(uint16_t port, uint8_t idx, uint8_t an,
+ uint32_t pn, uint8_t *key);
+
+/**
+ * Enable Rx SA (Secure Association).
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param idx
+ * The SA to be enabled (0 or 1)
+ * @param an
+ * The association number on the remote side.
+ * @param pn
+ * The packet number on the remote side.
+ * @param key
+ * The key on the remote side.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_txgbe_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an,
+ uint32_t pn, uint8_t *key);
+
+/**
+* Set RX L2 Filtering mode of a VF of an Ethernet device.
+*
+* @param port
+* The port identifier of the Ethernet device.
+* @param vf
+* VF id.
+* @param rx_mask
+* The RX mode mask, which is one or more of accepting Untagged Packets,
+* packets that match the PFUTA table, Broadcast and Multicast Promiscuous.
+* ETH_VMDQ_ACCEPT_UNTAG,ETH_VMDQ_ACCEPT_HASH_UC,
+* ETH_VMDQ_ACCEPT_BROADCAST and ETH_VMDQ_ACCEPT_MULTICAST will be used
+* in rx_mode.
+* @param on
+* 1 - Enable a VF RX mode.
+* 0 - Disable a VF RX mode.
+* @return
+* - (0) if successful.
+* - (-ENOTSUP) if hardware doesn't support.
+* - (-ENODEV) if *port_id* invalid.
+* - (-EINVAL) if bad parameter.
+*/
+int
+rte_pmd_txgbe_set_vf_rxmode(uint16_t port, uint16_t vf, uint16_t rx_mask,
+ uint8_t on);
+
+/**
+* Enable or disable a VF traffic receive of an Ethernet device.
+*
+* @param port
+* The port identifier of the Ethernet device.
+* @param vf
+* VF id.
+* @param on
+* 1 - Enable a VF traffic receive.
+* 0 - Disable a VF traffic receive.
+* @return
+* - (0) if successful.
+* - (-ENOTSUP) if hardware doesn't support.
+* - (-ENODEV) if *port_id* invalid.
+* - (-EINVAL) if bad parameter.
+*/
+int
+rte_pmd_txgbe_set_vf_rx(uint16_t port, uint16_t vf, uint8_t on);
+
+/**
+* Enable or disable a VF traffic transmit of the Ethernet device.
+*
+* @param port
+* The port identifier of the Ethernet device.
+* @param vf
+* VF id.
+* @param on
+* 1 - Enable a VF traffic transmit.
+* 0 - Disable a VF traffic transmit.
+* @return
+* - (0) if successful.
+* - (-ENODEV) if *port_id* invalid.
+* - (-ENOTSUP) if hardware doesn't support.
+* - (-EINVAL) if bad parameter.
+*/
+int
+rte_pmd_txgbe_set_vf_tx(uint16_t port, uint16_t vf, uint8_t on);
+
+/**
+* Enable/Disable hardware VF VLAN filtering by an Ethernet device of
+* received VLAN packets tagged with a given VLAN Tag Identifier.
+*
+* @param port
+* The port identifier of the Ethernet device.
+* @param vlan
+* The VLAN Tag Identifier whose filtering must be enabled or disabled.
+* @param vf_mask
+* Bitmap listing which VFs participate in the VLAN filtering.
+* @param vlan_on
+* 1 - Enable VFs VLAN filtering.
+* 0 - Disable VFs VLAN filtering.
+* @return
+* - (0) if successful.
+* - (-ENOTSUP) if hardware doesn't support.
+* - (-ENODEV) if *port_id* invalid.
+* - (-EINVAL) if bad parameter.
+*/
+int
+rte_pmd_txgbe_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
+ uint64_t vf_mask, uint8_t vlan_on);
+
+/**
+ * Set the rate limitation for a vf on an Ethernet device.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param tx_rate
+ * The tx rate allocated from the total link speed for this VF id.
+ * @param q_msk
+ * The queue mask which need to set the rate.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_txgbe_set_vf_rate_limit(uint16_t port, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk);
+
+/**
+ * Set all the TCs' bandwidth weight.
+ *
+ * The bw_weight means the percentage occupied by the TC.
+ * It can be taken as the relative min bandwidth setting.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param tc_num
+ * Number of TCs.
+ * @param bw_weight
+ * An array of relative bandwidth weight for all the TCs.
+ * The summary of the bw_weight should be 100.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_txgbe_set_tc_bw_alloc(uint16_t port,
+ uint8_t tc_num,
+ uint8_t *bw_weight);
+
+
+/**
+ * Initialize bypass logic. This function needs to be called before
+ * executing any other bypass API.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_txgbe_bypass_init(uint16_t port);
+
+/**
+ * Return bypass state.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param state
+ * The return bypass state.
+ * - (1) Normal mode
+ * - (2) Bypass mode
+ * - (3) Isolate mode
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_txgbe_bypass_state_show(uint16_t port, uint32_t *state);
+
+/**
+ * Set bypass state
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param new_state
+ * The current bypass state.
+ * - (1) Normal mode
+ * - (2) Bypass mode
+ * - (3) Isolate mode
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_txgbe_bypass_state_set(uint16_t port, uint32_t *new_state);
+
+/**
+ * Return bypass state when given event occurs.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param event
+ * The bypass event
+ * - (1) Main power on (power button is pushed)
+ * - (2) Auxiliary power on (power supply is being plugged)
+ * - (3) Main power off (system shutdown and power supply is left plugged in)
+ * - (4) Auxiliary power off (power supply is being unplugged)
+ * - (5) Display or set the watchdog timer
+ * @param state
+ * The bypass state when given event occurred.
+ * - (1) Normal mode
+ * - (2) Bypass mode
+ * - (3) Isolate mode
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_txgbe_bypass_event_show(uint16_t port,
+ uint32_t event,
+ uint32_t *state);
+
+/**
+ * Set bypass state when given event occurs.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param event
+ * The bypass event
+ * - (1) Main power on (power button is pushed)
+ * - (2) Auxiliary power on (power supply is being plugged)
+ * - (3) Main power off (system shutdown and power supply is left plugged in)
+ * - (4) Auxiliary power off (power supply is being unplugged)
+ * - (5) Display or set the watchdog timer
+ * @param state
+ * The assigned state when given event occurs.
+ * - (1) Normal mode
+ * - (2) Bypass mode
+ * - (3) Isolate mode
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_txgbe_bypass_event_store(uint16_t port,
+ uint32_t event,
+ uint32_t state);
+
+/**
+ * Set bypass watchdog timeout count.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param timeout
+ * The timeout to be set.
+ * - (0) 0 seconds (timer is off)
+ * - (1) 1.5 seconds
+ * - (2) 2 seconds
+ * - (3) 3 seconds
+ * - (4) 4 seconds
+ * - (5) 8 seconds
+ * - (6) 16 seconds
+ * - (7) 32 seconds
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_txgbe_bypass_wd_timeout_store(uint16_t port, uint32_t timeout);
+
+/**
+ * Get bypass firmware version.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param ver
+ * The firmware version
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_txgbe_bypass_ver_show(uint16_t port, uint32_t *ver);
+
+/**
+ * Return bypass watchdog timeout in seconds
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param wd_timeout
+ * The return watchdog timeout. "0" represents timer expired
+ * - (0) 0 seconds (timer is off)
+ * - (1) 1.5 seconds
+ * - (2) 2 seconds
+ * - (3) 3 seconds
+ * - (4) 4 seconds
+ * - (5) 8 seconds
+ * - (6) 16 seconds
+ * - (7) 32 seconds
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_txgbe_bypass_wd_timeout_show(uint16_t port, uint32_t *wd_timeout);
+
+/**
+ * Reset bypass watchdog timer
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_txgbe_bypass_wd_reset(uint16_t port);
+
+/**
+ * Acquire swfw semaphore lock for MDIO access
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port* invalid.
+ * - (TXGBE_ERR_SWFW_SYNC) If sw/fw semaphore acquisition failed
+ */
+int __rte_experimental
+rte_pmd_txgbe_mdio_lock(uint16_t port);
+
+/**
+ * Release swfw semaphore lock used for MDIO access
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port* invalid.
+ */
+int __rte_experimental
+rte_pmd_txgbe_mdio_unlock(uint16_t port);
+
+/**
+ * Read PHY register using MDIO without MDIO lock
+ * The lock must be taken separately before calling this
+ * API
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param reg_addr
+ * 32 bit PHY Register
+ * @param dev_type
+ * Used to define device base address
+ * @param phy_data
+ * Pointer for reading PHY register data
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port* invalid.
+ * - (TXGBE_ERR_PHY) If PHY read command failed
+ */
+int __rte_experimental
+rte_pmd_txgbe_mdio_unlocked_read(uint16_t port, uint32_t reg_addr,
+ uint32_t dev_type, uint16_t *phy_data);
+
+/**
+ * Write data to PHY register using without MDIO lock
+ * The lock must be taken separately before calling this
+ * API
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param reg_addr
+ * 32 bit PHY Register
+ * @param dev_type
+ * Used to define device base address
+ * @param phy_data
+ * Data to write to PHY register
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port* invalid.
+ * - (TXGBE_ERR_PHY) If PHY read command failed
+ */
+int __rte_experimental
+rte_pmd_txgbe_mdio_unlocked_write(uint16_t port, uint32_t reg_addr,
+ uint32_t dev_type, uint16_t phy_data);
+
+/**
+ * Response sent back to txgbe driver from user app after callback
+ */
+enum rte_pmd_txgbe_mb_event_rsp {
+ RTE_PMD_TXGBE_MB_EVENT_NOOP_ACK, /**< skip mbox request and ACK */
+ RTE_PMD_TXGBE_MB_EVENT_NOOP_NACK, /**< skip mbox request and NACK */
+ RTE_PMD_TXGBE_MB_EVENT_PROCEED, /**< proceed with mbox request */
+ RTE_PMD_TXGBE_MB_EVENT_MAX /**< max value of this enum */
+};
+
+/**
+ * Data sent to the user application when the callback is executed.
+ */
+struct rte_pmd_txgbe_mb_event_param {
+ uint16_t vfid; /**< Virtual Function number */
+ uint16_t msg_type; /**< VF to PF message type, defined in txgbe_mbx.h */
+ uint16_t retval; /**< return value */
+ void *msg; /**< pointer to message */
+};
+enum {
+ RTE_PMD_TXGBE_BYPASS_MODE_NONE,
+ RTE_PMD_TXGBE_BYPASS_MODE_NORMAL,
+ RTE_PMD_TXGBE_BYPASS_MODE_BYPASS,
+ RTE_PMD_TXGBE_BYPASS_MODE_ISOLATE,
+ RTE_PMD_TXGBE_BYPASS_MODE_NUM,
+};
+
+#define RTE_PMD_TXGBE_BYPASS_MODE_VALID(x) \
+ ((x) > RTE_PMD_TXGBE_BYPASS_MODE_NONE && \
+ (x) < RTE_PMD_TXGBE_BYPASS_MODE_NUM)
+
+enum {
+ RTE_PMD_TXGBE_BYPASS_EVENT_NONE,
+ RTE_PMD_TXGBE_BYPASS_EVENT_START,
+ RTE_PMD_TXGBE_BYPASS_EVENT_OS_ON = RTE_PMD_TXGBE_BYPASS_EVENT_START,
+ RTE_PMD_TXGBE_BYPASS_EVENT_POWER_ON,
+ RTE_PMD_TXGBE_BYPASS_EVENT_OS_OFF,
+ RTE_PMD_TXGBE_BYPASS_EVENT_POWER_OFF,
+ RTE_PMD_TXGBE_BYPASS_EVENT_TIMEOUT,
+ RTE_PMD_TXGBE_BYPASS_EVENT_NUM
+};
+
+#define RTE_PMD_TXGBE_BYPASS_EVENT_VALID(x) \
+ ((x) > RTE_PMD_TXGBE_BYPASS_EVENT_NONE && \
+ (x) < RTE_PMD_TXGBE_BYPASS_MODE_NUM)
+
+enum {
+ RTE_PMD_TXGBE_BYPASS_TMT_OFF, /* timeout disabled. */
+ RTE_PMD_TXGBE_BYPASS_TMT_1_5_SEC, /* timeout for 1.5 seconds */
+ RTE_PMD_TXGBE_BYPASS_TMT_2_SEC, /* timeout for 2 seconds */
+ RTE_PMD_TXGBE_BYPASS_TMT_3_SEC, /* timeout for 3 seconds */
+ RTE_PMD_TXGBE_BYPASS_TMT_4_SEC, /* timeout for 4 seconds */
+ RTE_PMD_TXGBE_BYPASS_TMT_8_SEC, /* timeout for 8 seconds */
+ RTE_PMD_TXGBE_BYPASS_TMT_16_SEC, /* timeout for 16 seconds */
+ RTE_PMD_TXGBE_BYPASS_TMT_32_SEC, /* timeout for 32 seconds */
+ RTE_PMD_TXGBE_BYPASS_TMT_NUM
+};
+
+#define RTE_PMD_TXGBE_BYPASS_TMT_VALID(x) \
+ ((x) == RTE_PMD_TXGBE_BYPASS_TMT_OFF || \
+ ((x) > RTE_PMD_TXGBE_BYPASS_TMT_OFF && \
+ (x) < RTE_PMD_TXGBE_BYPASS_TMT_NUM))
+
+/**
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param enable
+ * 0 to disable and nonzero to enable 'SBP' bit in FCTRL register
+ * to receive all packets
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int __rte_experimental
+rte_pmd_txgbe_upd_fctrl_sbp(uint16_t port, int enable);
+#endif /* _PMD_TXGBE_H_ */
new file mode 100644
@@ -0,0 +1,46 @@
+DPDK_21 {
+ global:
+
+ rte_pmd_txgbe_bypass_event_show;
+ rte_pmd_txgbe_bypass_event_store;
+ rte_pmd_txgbe_bypass_init;
+ rte_pmd_txgbe_bypass_state_set;
+ rte_pmd_txgbe_bypass_state_show;
+ rte_pmd_txgbe_bypass_ver_show;
+ rte_pmd_txgbe_bypass_wd_reset;
+ rte_pmd_txgbe_bypass_wd_timeout_show;
+ rte_pmd_txgbe_bypass_wd_timeout_store;
+ rte_pmd_txgbe_macsec_config_rxsc;
+ rte_pmd_txgbe_macsec_config_txsc;
+ rte_pmd_txgbe_macsec_disable;
+ rte_pmd_txgbe_macsec_enable;
+ rte_pmd_txgbe_macsec_select_rxsa;
+ rte_pmd_txgbe_macsec_select_txsa;
+ rte_pmd_txgbe_ping_vf;
+ rte_pmd_txgbe_set_all_queues_drop_en;
+ rte_pmd_txgbe_set_tc_bw_alloc;
+ rte_pmd_txgbe_set_tx_loopback;
+ rte_pmd_txgbe_set_vf_mac_addr;
+ rte_pmd_txgbe_set_vf_mac_anti_spoof;
+ rte_pmd_txgbe_set_vf_rate_limit;
+ rte_pmd_txgbe_set_vf_rx;
+ rte_pmd_txgbe_set_vf_rxmode;
+ rte_pmd_txgbe_set_vf_split_drop_en;
+ rte_pmd_txgbe_set_vf_tx;
+ rte_pmd_txgbe_set_vf_vlan_anti_spoof;
+ rte_pmd_txgbe_set_vf_vlan_filter;
+ rte_pmd_txgbe_set_vf_vlan_insert;
+ rte_pmd_txgbe_set_vf_vlan_stripq;
+
+ local: *;
+};
+
+EXPERIMENTAL {
+ global:
+
+ rte_pmd_txgbe_mdio_lock;
+ rte_pmd_txgbe_mdio_unlock;
+ rte_pmd_txgbe_mdio_unlocked_read;
+ rte_pmd_txgbe_mdio_unlocked_write;
+ rte_pmd_txgbe_upd_fctrl_sbp;
+};
new file mode 100644
@@ -0,0 +1,6264 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_dev.h>
+#include <rte_hash_crc.h>
+#ifdef RTE_LIBRTE_SECURITY
+#include <rte_security_driver.h>
+#endif
+
+#include "txgbe_logs.h"
+#include "base/txgbe.h"
+#include "txgbe_ethdev.h"
+#include "txgbe_rxtx.h"
+#include "txgbe_regs_group.h"
+
+static const struct reg_info txgbe_regs_general[] = {
+ {TXGBE_RST, 1, 1, "TXGBE_RST"},
+ {TXGBE_STAT, 1, 1, "TXGBE_STAT"},
+ {TXGBE_PORTCTL, 1, 1, "TXGBE_PORTCTL"},
+ {TXGBE_SDP, 1, 1, "TXGBE_SDP"},
+ {TXGBE_SDPCTL, 1, 1, "TXGBE_SDPCTL"},
+ {TXGBE_LEDCTL, 1, 1, "TXGBE_LEDCTL"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_nvm[] = {
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_interrupt[] = {
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_fctl_others[] = {
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_rxdma[] = {
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_rx[] = {
+ {0, 0, 0, ""}
+};
+
+static struct reg_info txgbe_regs_tx[] = {
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_wakeup[] = {
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_dcb[] = {
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_mac[] = {
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_diagnostic[] = {
+ {0, 0, 0, ""},
+};
+
+/* PF registers */
+static const struct reg_info *txgbe_regs_others[] = {
+ txgbe_regs_general,
+ txgbe_regs_nvm,
+ txgbe_regs_interrupt,
+ txgbe_regs_fctl_others,
+ txgbe_regs_rxdma,
+ txgbe_regs_rx,
+ txgbe_regs_tx,
+ txgbe_regs_wakeup,
+ txgbe_regs_dcb,
+ txgbe_regs_mac,
+ txgbe_regs_diagnostic,
+ NULL};
+
+static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
+static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
+static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
+static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
+static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev);
+static int txgbe_dev_set_link_up(struct rte_eth_dev *dev);
+static int txgbe_dev_set_link_down(struct rte_eth_dev *dev);
+static void txgbe_dev_close(struct rte_eth_dev *dev);
+static int txgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static int txgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int txgbe_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static int txgbe_dev_stats_reset(struct rte_eth_dev *dev);
+static int txgbe_dev_xstats_reset(struct rte_eth_dev *dev);
+static void txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
+static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
+
+static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
+static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
+static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
+static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
+static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
+static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
+ struct rte_intr_handle *handle);
+static void txgbe_dev_interrupt_handler(void *param);
+static void txgbe_dev_interrupt_delayed_handler(void *param);
+static int txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
+ uint32_t index, uint32_t pool);
+static void txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
+
+/* For Eth VMDQ APIs support */
+static int txgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
+ rte_ether_addr * mac_addr, uint8_t on);
+static int txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
+static int txgbe_mirror_rule_set(struct rte_eth_dev *dev,
+ struct rte_eth_mirror_conf *mirror_conf,
+ uint8_t rule_id, uint8_t on);
+static int txgbe_mirror_rule_reset(struct rte_eth_dev *dev,
+ uint8_t rule_id);
+static int txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static int txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static void txgbe_configure_msix(struct rte_eth_dev *dev);
+
+static int txgbe_syn_filter_get(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter);
+static int txgbe_syn_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg);
+static int txgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg);
+
+static int txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel);
+static int txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel);
+static int txgbe_filter_restore(struct rte_eth_dev *dev);
+static void txgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
+
+#define TXGBE_SET_HWSTRIP(h, q) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
+ (h)->bitmap[idx] |= 1 << bit;\
+ } while (0)
+
+#define TXGBE_CLEAR_HWSTRIP(h, q) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
+ (h)->bitmap[idx] &= ~(1 << bit);\
+ } while (0)
+
+#define TXGBE_GET_HWSTRIP(h, q, r) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
+ (r) = (h)->bitmap[idx] >> bit & 1;\
+ } while (0)
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id pci_id_txgbe_map[] = {
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static const struct rte_eth_desc_lim rx_desc_lim = {
+ .nb_max = TXGBE_RING_DESC_MAX,
+ .nb_min = TXGBE_RING_DESC_MIN,
+ .nb_align = TXGBE_RXD_ALIGN,
+};
+
+static const struct rte_eth_desc_lim tx_desc_lim = {
+ .nb_max = TXGBE_RING_DESC_MAX,
+ .nb_min = TXGBE_RING_DESC_MIN,
+ .nb_align = TXGBE_TXD_ALIGN,
+ .nb_seg_max = TXGBE_TX_MAX_SEG,
+ .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
+};
+
+static const struct eth_dev_ops txgbe_eth_dev_ops;
+
+#define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)}
+#define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)}
+static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = {
+ /* MNG RxTx */
+ HW_XSTAT(mng_bmc2host_packets),
+ HW_XSTAT(mng_host2bmc_packets),
+ /* Basic RxTx */
+ HW_XSTAT(rx_packets),
+ HW_XSTAT(tx_packets),
+ HW_XSTAT(rx_bytes),
+ HW_XSTAT(tx_bytes),
+ HW_XSTAT(rx_total_bytes),
+ HW_XSTAT(rx_total_packets),
+ HW_XSTAT(tx_total_packets),
+ HW_XSTAT(rx_total_missed_packets),
+ HW_XSTAT(rx_broadcast_packets),
+ HW_XSTAT(rx_multicast_packets),
+ HW_XSTAT(rx_management_packets),
+ HW_XSTAT(tx_management_packets),
+ HW_XSTAT(rx_management_dropped),
+
+ /* Basic Error */
+ HW_XSTAT(rx_crc_errors),
+ HW_XSTAT(rx_illegal_byte_errors),
+ HW_XSTAT(rx_error_bytes),
+ HW_XSTAT(rx_mac_short_packet_dropped),
+ HW_XSTAT(rx_length_errors),
+ HW_XSTAT(rx_undersize_errors),
+ HW_XSTAT(rx_fragment_errors),
+ HW_XSTAT(rx_oversize_errors),
+ HW_XSTAT(rx_jabber_errors),
+ HW_XSTAT(rx_l3_l4_xsum_error),
+ HW_XSTAT(mac_local_errors),
+ HW_XSTAT(mac_remote_errors),
+
+ /* Flow Director */
+ HW_XSTAT(flow_director_added_filters),
+ HW_XSTAT(flow_director_removed_filters),
+ HW_XSTAT(flow_director_filter_add_errors),
+ HW_XSTAT(flow_director_filter_remove_errors),
+ HW_XSTAT(flow_director_matched_filters),
+ HW_XSTAT(flow_director_missed_filters),
+
+ /* FCoE */
+ HW_XSTAT(rx_fcoe_crc_errors),
+ HW_XSTAT(rx_fcoe_mbuf_allocation_errors),
+ HW_XSTAT(rx_fcoe_dropped),
+ HW_XSTAT(rx_fcoe_packets),
+ HW_XSTAT(tx_fcoe_packets),
+ HW_XSTAT(rx_fcoe_bytes),
+ HW_XSTAT(tx_fcoe_bytes),
+ HW_XSTAT(rx_fcoe_no_ddp),
+ HW_XSTAT(rx_fcoe_no_ddp_ext_buff),
+
+ /* MACSEC */
+ HW_XSTAT(tx_macsec_pkts_untagged),
+ HW_XSTAT(tx_macsec_pkts_encrypted),
+ HW_XSTAT(tx_macsec_pkts_protected),
+ HW_XSTAT(tx_macsec_octets_encrypted),
+ HW_XSTAT(tx_macsec_octets_protected),
+ HW_XSTAT(rx_macsec_pkts_untagged),
+ HW_XSTAT(rx_macsec_pkts_badtag),
+ HW_XSTAT(rx_macsec_pkts_nosci),
+ HW_XSTAT(rx_macsec_pkts_unknownsci),
+ HW_XSTAT(rx_macsec_octets_decrypted),
+ HW_XSTAT(rx_macsec_octets_validated),
+ HW_XSTAT(rx_macsec_sc_pkts_unchecked),
+ HW_XSTAT(rx_macsec_sc_pkts_delayed),
+ HW_XSTAT(rx_macsec_sc_pkts_late),
+ HW_XSTAT(rx_macsec_sa_pkts_ok),
+ HW_XSTAT(rx_macsec_sa_pkts_invalid),
+ HW_XSTAT(rx_macsec_sa_pkts_notvalid),
+ HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
+ HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
+
+ /* MAC RxTx */
+ HW_XSTAT(rx_size_64_packets),
+ HW_XSTAT(rx_size_65_to_127_packets),
+ HW_XSTAT(rx_size_128_to_255_packets),
+ HW_XSTAT(rx_size_256_to_511_packets),
+ HW_XSTAT(rx_size_512_to_1023_packets),
+ HW_XSTAT(rx_size_1024_to_max_packets),
+ HW_XSTAT(tx_size_64_packets),
+ HW_XSTAT(tx_size_65_to_127_packets),
+ HW_XSTAT(tx_size_128_to_255_packets),
+ HW_XSTAT(tx_size_256_to_511_packets),
+ HW_XSTAT(tx_size_512_to_1023_packets),
+ HW_XSTAT(tx_size_1024_to_max_packets),
+
+ /* Flow Control */
+ HW_XSTAT(tx_xon_packets),
+ HW_XSTAT(rx_xon_packets),
+ HW_XSTAT(tx_xoff_packets),
+ HW_XSTAT(rx_xoff_packets),
+
+ HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
+ HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
+ HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
+ HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
+};
+
+#define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \
+ sizeof(rte_txgbe_stats_strings[0]))
+
+/* Per-priority statistics */
+#define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)}
+static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = {
+ UP_XSTAT(rx_up_packets),
+ UP_XSTAT(tx_up_packets),
+ UP_XSTAT(rx_up_bytes),
+ UP_XSTAT(tx_up_bytes),
+ UP_XSTAT(rx_up_drop_packets),
+
+ UP_XSTAT(tx_up_xon_packets),
+ UP_XSTAT(rx_up_xon_packets),
+ UP_XSTAT(tx_up_xoff_packets),
+ UP_XSTAT(rx_up_xoff_packets),
+ UP_XSTAT(rx_up_dropped),
+ UP_XSTAT(rx_up_mbuf_alloc_errors),
+ UP_XSTAT(tx_up_xon2off_packets),
+};
+
+#define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \
+ sizeof(rte_txgbe_up_strings[0]))
+
+/* Per-queue statistics */
+#define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)}
+static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = {
+ QP_XSTAT(rx_qp_packets),
+ QP_XSTAT(tx_qp_packets),
+ QP_XSTAT(rx_qp_bytes),
+ QP_XSTAT(tx_qp_bytes),
+ QP_XSTAT(rx_qp_mc_packets),
+};
+
+#define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \
+ sizeof(rte_txgbe_qp_strings[0]))
+
+static inline int
+txgbe_is_sfp(struct txgbe_hw *hw)
+{
+ switch (hw->phy.type) {
+ case txgbe_phy_sfp_avago:
+ case txgbe_phy_sfp_ftl:
+ case txgbe_phy_sfp_intel:
+ case txgbe_phy_sfp_unknown:
+ case txgbe_phy_sfp_tyco_passive:
+ case txgbe_phy_sfp_unknown_passive:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static inline int32_t
+txgbe_pf_reset_hw(struct txgbe_hw *hw)
+{
+ uint32_t ctrl_ext;
+ int32_t status;
+
+ status = hw->mac.reset_hw(hw);
+
+ ctrl_ext = rd32(hw, TXGBE_PORTCTL);
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
+ wr32(hw, TXGBE_PORTCTL, ctrl_ext);
+ txgbe_flush(hw);
+
+ if (status == TXGBE_ERR_SFP_NOT_PRESENT)
+ status = 0;
+ return status;
+}
+
+static inline void
+txgbe_enable_intr(struct rte_eth_dev *dev)
+{
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ wr32(hw, TXGBE_IENMISC, intr->mask_misc);
+ wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
+ wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
+ txgbe_flush(hw);
+}
+
+static void
+txgbe_disable_intr(struct txgbe_hw *hw)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
+ wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
+ wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
+ txgbe_flush(hw);
+}
+
+static int
+txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
+ uint16_t queue_id,
+ uint8_t stat_idx,
+ uint8_t is_rx)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ struct txgbe_stat_mappings *stat_mappings =
+ TXGBE_DEV_STAT_MAPPINGS(eth_dev);
+ uint32_t qsmr_mask = 0;
+ uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
+ uint32_t q_map;
+ uint8_t n, offset;
+
+ if (hw->mac.type != txgbe_mac_raptor)
+ return -ENOSYS;
+
+ PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
+ (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+ queue_id, stat_idx);
+
+ n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
+ if (n >= TXGBE_NB_STAT_MAPPING) {
+ PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
+ return -EIO;
+ }
+ offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
+
+ /* Now clear any previous stat_idx set */
+ clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
+ if (!is_rx)
+ stat_mappings->tqsm[n] &= ~clearing_mask;
+ else
+ stat_mappings->rqsm[n] &= ~clearing_mask;
+
+ q_map = (uint32_t)stat_idx;
+ q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
+ qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
+ if (!is_rx)
+ stat_mappings->tqsm[n] |= qsmr_mask;
+ else
+ stat_mappings->rqsm[n] |= qsmr_mask;
+
+ PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
+ (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+ queue_id, stat_idx);
+ PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
+ is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]);
+ return 0;
+}
+
+static void
+txgbe_dcb_init(struct txgbe_hw *hw, struct txgbe_dcb_config *dcb_config)
+{
+ int i;
+ u8 bwgp;
+ struct txgbe_dcb_tc_config *tc;
+
+ UNREFERENCED_PARAMETER(hw);
+
+ dcb_config->num_tcs.pg_tcs = TXGBE_DCB_TC_MAX;
+ dcb_config->num_tcs.pfc_tcs = TXGBE_DCB_TC_MAX;
+ bwgp = (u8)(100 / TXGBE_DCB_TC_MAX);
+ for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->path[TXGBE_DCB_TX_CONFIG].bwg_id = i;
+ tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bwgp + (i & 1);
+ tc->path[TXGBE_DCB_RX_CONFIG].bwg_id = i;
+ tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = bwgp + (i & 1);
+ tc->pfc = txgbe_dcb_pfc_disabled;
+ }
+
+ /* Initialize default user to priority mapping, UPx->TC0 */
+ tc = &dcb_config->tc_config[0];
+ tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
+ tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
+ for (i = 0; i < TXGBE_DCB_BWG_MAX; i++) {
+ dcb_config->bw_percentage[i][TXGBE_DCB_TX_CONFIG] = 100;
+ dcb_config->bw_percentage[i][TXGBE_DCB_RX_CONFIG] = 100;
+ }
+ dcb_config->rx_pba_cfg = txgbe_dcb_pba_equal;
+ dcb_config->pfc_mode_enable = false;
+ dcb_config->vt_mode = true;
+ dcb_config->round_robin_enable = false;
+ /* support all DCB capabilities */
+ dcb_config->support.capabilities = 0xFF;
+}
+
+/*
+ * Ensure that all locks are released before first NVM or PHY access
+ */
+static void
+txgbe_swfw_lock_reset(struct txgbe_hw *hw)
+{
+ uint16_t mask;
+
+ /*
+ * These ones are more tricky since they are common to all ports; but
+ * swfw_sync retries last long enough (1s) to be almost sure that if
+ * lock can not be taken it is due to an improper lock of the
+ * semaphore.
+ */
+ mask = TXGBE_MNGSEM_SWPHY |
+ TXGBE_MNGSEM_SWMBX |
+ TXGBE_MNGSEM_SWFLASH;
+ if (hw->mac.acquire_swfw_sync(hw, mask) < 0) {
+ PMD_DRV_LOG(DEBUG, "SWFW common locks released");
+ }
+ hw->mac.release_swfw_sync(hw, mask);
+}
+
+static int
+eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
+{
+ struct txgbe_adapter *ad = eth_dev->data->dev_private;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
+ struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
+ struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev);
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
+ struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ const struct rte_memzone *mz;
+ uint32_t ctrl_ext;
+ uint16_t csum;
+ int err, i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ txgbe_dev_macsec_setting_reset(eth_dev);
+
+ eth_dev->dev_ops = &txgbe_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
+ eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
+ eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
+
+ /*
+ * For secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX and TX function.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ struct txgbe_tx_queue *txq;
+ /* TX queue function in primary, set by last queue initialized
+ * Tx queue may not initialized by primary process
+ */
+ if (eth_dev->data->tx_queues) {
+ txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
+ txgbe_set_tx_function(eth_dev, txq);
+ } else {
+ /* Use default TX function if we get here */
+ PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
+ "Using default TX function.");
+ }
+
+ txgbe_set_rx_function(eth_dev);
+
+ return 0;
+ }
+
+ rte_atomic32_clear(&ad->link_thread_running);
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ /* Vendor and Device ID need to be set before init of shared code */
+ hw->device_id = pci_dev->id.device_id;
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+ hw->allow_unsupported_sfp = 1;
+
+ /* Reserve memory for interrupt status block */
+ mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
+ 16, TXGBE_ALIGN, SOCKET_ID_ANY);
+ if (mz == NULL) {
+ return -ENOMEM;
+ }
+ hw->isb_dma = TMZ_PADDR(mz);
+ hw->isb_mem = TMZ_VADDR(mz);
+
+ /* Initialize the shared code (base driver) */
+ err = txgbe_init_shared_code(hw);
+ if (err != 0) {
+ PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
+ return -EIO;
+ }
+
+ if (hw->mac.fw_recovery_mode && hw->mac.fw_recovery_mode(hw)) {
+ PMD_INIT_LOG(ERR, "\nERROR: "
+ "Firmware recovery mode detected. Limiting functionality.\n"
+ "Refer to the WangXun(R) Ethernet Adapters and Devices "
+ "User Guide for details on firmware recovery mode.");
+ return -EIO;
+ }
+
+ /* Unlock any pending hardware semaphore */
+ txgbe_swfw_lock_reset(hw);
+
+#ifdef RTE_LIBRTE_SECURITY
+ /* Initialize security_ctx only for primary process*/
+ if (txgbe_ipsec_ctx_create(eth_dev))
+ return -ENOMEM;
+#endif
+
+ /* Initialize DCB configuration*/
+ memset(dcb_config, 0, sizeof(struct txgbe_dcb_config));
+ txgbe_dcb_init(hw, dcb_config);
+
+ /* Get Hardware Flow Control setting */
+ hw->fc.requested_mode = txgbe_fc_full;
+ hw->fc.current_mode = txgbe_fc_full;
+ hw->fc.pause_time = TXGBE_FC_PAUSE_TIME;
+ for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+ hw->fc.low_water[i] = TXGBE_FC_XON_LOTH;
+ hw->fc.high_water[i] = TXGBE_FC_XOFF_HITH;
+ }
+ hw->fc.send_xon = 1;
+
+ err = hw->rom.init_params(hw);
+ if (err != 0) {
+ PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
+ return -EIO;
+ }
+
+ /* Make sure we have a good EEPROM before we read from it */
+ err = hw->rom.validate_checksum(hw, &csum);
+ if (err != 0) {
+ PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
+ return -EIO;
+ }
+
+ err = hw->mac.init_hw(hw);
+
+ /*
+ * Devices with copper phys will fail to initialise if txgbe_init_hw()
+ * is called too soon after the kernel driver unbinding/binding occurs.
+ * The failure occurs in txgbe_identify_phy() for all devices,
+ * but for non-copper devies, txgbe_identify_sfp_module() is
+ * also called. See txgbe_identify_phy(). The reason for the
+ * failure is not known, and only occuts when virtualisation features
+ * are disabled in the bios. A delay of 200ms was found to be enough by
+ * trial-and-error, and is doubled to be safe.
+ */
+ if (err && (hw->phy.media_type == txgbe_media_type_copper)) {
+ rte_delay_ms(200);
+ err = hw->mac.init_hw(hw);
+ }
+
+ if (err == TXGBE_ERR_SFP_NOT_PRESENT)
+ err = 0;
+
+ if (err == TXGBE_ERR_EEPROM_VERSION) {
+ PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
+ "LOM. Please be aware there may be issues associated "
+ "with your hardware.");
+ PMD_INIT_LOG(ERR, "If you are experiencing problems "
+ "please contact your hardware representative "
+ "who provided you with this hardware.");
+ } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED)
+ PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
+ if (err) {
+ PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
+ return -EIO;
+ }
+
+ /* Reset the hw statistics */
+ txgbe_dev_stats_reset(eth_dev);
+
+ /* disable interrupt */
+ txgbe_disable_intr(hw);
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
+ hw->mac.num_rar_entries, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %u bytes needed to store "
+ "MAC addresses",
+ RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+ return -ENOMEM;
+ }
+
+ /* Copy the permanent MAC address */
+ rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
+ ð_dev->data->mac_addrs[0]);
+
+ /* Allocate memory for storing hash filter MAC addresses */
+ eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
+ TXGBE_VMDQ_NUM_UC_MAC, 0);
+ if (eth_dev->data->hash_mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %d bytes needed to store MAC addresses",
+ RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
+ return -ENOMEM;
+ }
+
+ /* Pass the information to the rte_eth_dev_close() that it should also
+ * release the private port resources.
+ */
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+
+ /* initialize the vfta */
+ memset(shadow_vfta, 0, sizeof(*shadow_vfta));
+
+ /* initialize the hw strip bitmap*/
+ memset(hwstrip, 0, sizeof(*hwstrip));
+
+ /* initialize PF if max_vfs not zero */
+ txgbe_pf_host_init(eth_dev);
+
+ ctrl_ext = rd32(hw, TXGBE_PORTCTL);
+ /* let hardware know driver is loaded */
+ ctrl_ext |= TXGBE_PORTCTL_DRVLOAD;
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
+ wr32(hw, TXGBE_PORTCTL, ctrl_ext);
+ txgbe_flush(hw);
+
+ if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
+ PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
+ (int)hw->mac.type, (int)hw->phy.type,
+ (int)hw->phy.sfp_type);
+ else
+ PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
+ (int)hw->mac.type, (int) hw->phy.type);
+
+ PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+
+ rte_intr_callback_register(intr_handle,
+ txgbe_dev_interrupt_handler, eth_dev);
+
+ /* enable uio/vfio intr/eventfd mapping */
+ rte_intr_enable(intr_handle);
+
+ /* enable support intr */
+ txgbe_enable_intr(eth_dev);
+
+ /* initialize filter info */
+ memset(filter_info, 0,
+ sizeof(struct txgbe_filter_info));
+
+ /* initialize 5tuple filter list */
+ TAILQ_INIT(&filter_info->fivetuple_list);
+
+ /* initialize flow director filter list & hash */
+ txgbe_fdir_filter_init(eth_dev);
+
+ /* initialize l2 tunnel filter list & hash */
+ txgbe_l2_tn_filter_init(eth_dev);
+
+ /* initialize flow filter lists */
+ txgbe_filterlist_init();
+
+ /* initialize bandwidth configuration info */
+ memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
+
+ /* initialize Traffic Manager configuration */
+ txgbe_tm_conf_init(eth_dev);
+
+ return 0;
+}
+
+static int
+eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ txgbe_dev_close(eth_dev);
+
+ return 0;
+}
+
+static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
+ struct txgbe_5tuple_filter *p_5tuple;
+
+ while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
+ TAILQ_REMOVE(&filter_info->fivetuple_list,
+ p_5tuple,
+ entries);
+ rte_free(p_5tuple);
+ }
+ memset(filter_info->fivetuple_mask, 0,
+ sizeof(uint32_t) * TXGBE_5TUPLE_ARRAY_SIZE);
+
+ return 0;
+}
+
+static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
+ struct txgbe_fdir_filter *fdir_filter;
+
+ if (fdir_info->hash_map)
+ rte_free(fdir_info->hash_map);
+ if (fdir_info->hash_handle)
+ rte_hash_free(fdir_info->hash_handle);
+
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list,
+ fdir_filter,
+ entries);
+ rte_free(fdir_filter);
+ }
+
+ return 0;
+}
+
+static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
+ struct txgbe_l2_tn_filter *l2_tn_filter;
+
+ if (l2_tn_info->hash_map)
+ rte_free(l2_tn_info->hash_map);
+ if (l2_tn_info->hash_handle)
+ rte_hash_free(l2_tn_info->hash_handle);
+
+ while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
+ TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
+ l2_tn_filter,
+ entries);
+ rte_free(l2_tn_filter);
+ }
+
+ return 0;
+}
+
+static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
+{
+ struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
+ char fdir_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters fdir_hash_params = {
+ .name = fdir_hash_name,
+ .entries = TXGBE_MAX_FDIR_FILTER_NUM,
+ .key_len = sizeof(struct txgbe_atr_input),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ TAILQ_INIT(&fdir_info->fdir_list);
+ snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+ "fdir_%s", TDEV_NAME(eth_dev));
+ fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
+ if (!fdir_info->hash_handle) {
+ PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+ return -EINVAL;
+ }
+ fdir_info->hash_map = rte_zmalloc("txgbe",
+ sizeof(struct txgbe_fdir_filter *) *
+ TXGBE_MAX_FDIR_FILTER_NUM,
+ 0);
+ if (!fdir_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for fdir hash map!");
+ return -ENOMEM;
+ }
+ fdir_info->mask_added = FALSE;
+
+ return 0;
+}
+
+static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
+{
+ struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
+ char l2_tn_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters l2_tn_hash_params = {
+ .name = l2_tn_hash_name,
+ .entries = TXGBE_MAX_L2_TN_FILTER_NUM,
+ .key_len = sizeof(struct txgbe_l2_tn_key),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ TAILQ_INIT(&l2_tn_info->l2_tn_list);
+ snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
+ "l2_tn_%s", TDEV_NAME(eth_dev));
+ l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
+ if (!l2_tn_info->hash_handle) {
+ PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
+ return -EINVAL;
+ }
+ l2_tn_info->hash_map = rte_zmalloc("txgbe",
+ sizeof(struct txgbe_l2_tn_filter *) *
+ TXGBE_MAX_L2_TN_FILTER_NUM,
+ 0);
+ if (!l2_tn_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for L2 TN hash map!");
+ return -ENOMEM;
+ }
+ l2_tn_info->e_tag_en = FALSE;
+ l2_tn_info->e_tag_fwd_en = FALSE;
+ l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG;
+
+ return 0;
+}
+
+static int
+eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_dev *pf_ethdev;
+ struct rte_eth_devargs eth_da;
+ int i, retval;
+
+ if (pci_dev->device.devargs) {
+ retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
+ ð_da);
+ if (retval)
+ return retval;
+ } else
+ memset(ð_da, 0, sizeof(eth_da));
+
+ retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
+ sizeof(struct txgbe_adapter),
+ eth_dev_pci_specific_init, pci_dev,
+ eth_txgbe_dev_init, NULL);
+
+ if (retval || eth_da.nb_representor_ports < 1)
+ return retval;
+
+ pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
+ if (pf_ethdev == NULL)
+ return -ENODEV;
+
+ /* probe VF representor ports */
+ for (i = 0; i < eth_da.nb_representor_ports; i++) {
+ struct txgbe_vf_info *vfinfo;
+ struct txgbe_vf_representor representor;
+
+ vfinfo = *TXGBE_DEV_VFDATA(pf_ethdev);
+ if (vfinfo == NULL) {
+ PMD_DRV_LOG(ERR,
+ "no virtual functions supported by PF");
+ break;
+ }
+
+ representor.vf_id = eth_da.representor_ports[i];
+ representor.switch_domain_id = vfinfo->switch_domain_id;
+ representor.pf_ethdev = pf_ethdev;
+
+ /* representor port net_bdf_port */
+ snprintf(name, sizeof(name), "net_%s_representor_%d",
+ pci_dev->device.name,
+ eth_da.representor_ports[i]);
+
+ retval = rte_eth_dev_create(&pci_dev->device, name,
+ sizeof(struct txgbe_vf_representor), NULL, NULL,
+ txgbe_vf_representor_init, &representor);
+
+ if (retval)
+ PMD_DRV_LOG(ERR, "failed to create txgbe vf "
+ "representor %s.", name);
+ }
+
+ return 0;
+}
+
+static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
+{
+ struct rte_eth_dev *ethdev;
+
+ ethdev = rte_eth_dev_allocated(pci_dev->device.name);
+ if (!ethdev)
+ return -ENODEV;
+
+ if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
+ return rte_eth_dev_destroy(ethdev, txgbe_vf_representor_uninit);
+ else
+ return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
+}
+
+static struct rte_pci_driver rte_txgbe_pmd = {
+ .id_table = pci_id_txgbe_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
+ RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_txgbe_pci_probe,
+ .remove = eth_txgbe_pci_remove,
+};
+
+static int
+txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
+ uint32_t vfta;
+ uint32_t vid_idx;
+ uint32_t vid_bit;
+
+ vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
+ vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
+ vfta = rd32(hw, TXGBE_VLANTBL(vid_idx));
+ if (on)
+ vfta |= vid_bit;
+ else
+ vfta &= ~vid_bit;
+ wr32(hw, TXGBE_VLANTBL(vid_idx), vfta);
+
+ /* update local VFTA copy */
+ shadow_vfta->vfta[vid_idx] = vfta;
+
+ return 0;
+}
+
+static void
+txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_rx_queue *rxq;
+ bool restart;
+ uint32_t rxcfg, rxbal, rxbah;
+
+ if (on)
+ txgbe_vlan_hw_strip_enable(dev, queue);
+ else
+ txgbe_vlan_hw_strip_disable(dev, queue);
+
+ rxq = dev->data->rx_queues[queue];
+ rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
+ rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
+ rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ restart = (rxcfg & TXGBE_RXCFG_ENA) &&
+ !(rxcfg & TXGBE_RXCFG_VLAN);
+ rxcfg |= TXGBE_RXCFG_VLAN;
+ } else {
+ restart = (rxcfg & TXGBE_RXCFG_ENA) &&
+ (rxcfg & TXGBE_RXCFG_VLAN);
+ rxcfg &= ~TXGBE_RXCFG_VLAN;
+ }
+ rxcfg &= ~TXGBE_RXCFG_ENA;
+
+ if (restart) {
+ /* set vlan strip for ring */
+ txgbe_dev_rx_queue_stop(dev, queue);
+ wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal);
+ wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah);
+ wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg);
+ txgbe_dev_rx_queue_start(dev, queue);
+ }
+}
+
+static int
+txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
+ enum rte_vlan_type vlan_type,
+ uint16_t tpid)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int ret = 0;
+ uint32_t portctrl, vlan_ext, qinq;
+
+ portctrl = rd32(hw, TXGBE_PORTCTL);
+
+ vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
+ qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
+ switch (vlan_type) {
+ case ETH_VLAN_TYPE_INNER:
+ if (vlan_ext) {
+ wr32m(hw, TXGBE_VLANCTL,
+ TXGBE_VLANCTL_TPID_MASK,
+ TXGBE_VLANCTL_TPID(tpid));
+ wr32m(hw, TXGBE_DMATXCTRL,
+ TXGBE_DMATXCTRL_TPID_MASK,
+ TXGBE_DMATXCTRL_TPID(tpid));
+ } else {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Inner type is not supported"
+ " by single VLAN");
+ }
+
+ if (qinq) {
+ wr32m(hw, TXGBE_TAGTPID(0),
+ TXGBE_TAGTPID_LSB_MASK,
+ TXGBE_TAGTPID_LSB(tpid));
+ }
+ break;
+ case ETH_VLAN_TYPE_OUTER:
+ if (vlan_ext) {
+ /* Only the high 16-bits is valid */
+ wr32m(hw, TXGBE_EXTAG,
+ TXGBE_EXTAG_VLAN_MASK,
+ TXGBE_EXTAG_VLAN(tpid));
+ } else {
+ wr32m(hw, TXGBE_VLANCTL,
+ TXGBE_VLANCTL_TPID_MASK,
+ TXGBE_VLANCTL_TPID(tpid));
+ wr32m(hw, TXGBE_DMATXCTRL,
+ TXGBE_DMATXCTRL_TPID_MASK,
+ TXGBE_DMATXCTRL_TPID(tpid));
+ }
+
+ if (qinq) {
+ wr32m(hw, TXGBE_TAGTPID(0),
+ TXGBE_TAGTPID_MSB_MASK,
+ TXGBE_TAGTPID_MSB(tpid));
+ }
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+void
+txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t vlnctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Filter Table Disable */
+ vlnctrl = rd32(hw, TXGBE_VLANCTL);
+ vlnctrl &= ~TXGBE_VLANCTL_VFE;
+ wr32(hw, TXGBE_VLANCTL, vlnctrl);
+}
+
+void
+txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
+ uint32_t vlnctrl;
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Filter Table Enable */
+ vlnctrl = rd32(hw, TXGBE_VLANCTL);
+ vlnctrl &= ~TXGBE_VLANCTL_CFIENA;
+ vlnctrl |= TXGBE_VLANCTL_VFE;
+ wr32(hw, TXGBE_VLANCTL, vlnctrl);
+
+ /* write whatever is in local vfta copy */
+ for (i = 0; i < TXGBE_VFTA_SIZE; i++)
+ wr32(hw, TXGBE_VLANTBL(i), shadow_vfta->vfta[i]);
+}
+
+void
+txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
+{
+ struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(dev);
+ struct txgbe_rx_queue *rxq;
+
+ if (queue >= TXGBE_MAX_RX_QUEUE_NUM)
+ return;
+
+ if (on)
+ TXGBE_SET_HWSTRIP(hwstrip, queue);
+ else
+ TXGBE_CLEAR_HWSTRIP(hwstrip, queue);
+
+ if (queue >= dev->data->nb_rx_queues)
+ return;
+
+ rxq = dev->data->rx_queues[queue];
+
+ if (on) {
+ rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ } else {
+ rxq->vlan_flags = PKT_RX_VLAN;
+ rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
+}
+
+static void
+txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ctrl = rd32(hw, TXGBE_RXCFG(queue));
+ ctrl &= ~TXGBE_RXCFG_VLAN;
+ wr32(hw, TXGBE_RXCFG(queue), ctrl);
+
+ /* record those setting for HW strip per queue */
+ txgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
+}
+
+static void
+txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ctrl = rd32(hw, TXGBE_RXCFG(queue));
+ ctrl |= TXGBE_RXCFG_VLAN;
+ wr32(hw, TXGBE_RXCFG(queue), ctrl);
+
+ /* record those setting for HW strip per queue */
+ txgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
+}
+
+static void
+txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ctrl = rd32(hw, TXGBE_PORTCTL);
+ ctrl &= ~TXGBE_PORTCTL_VLANEXT;
+ ctrl &= ~TXGBE_PORTCTL_QINQ;
+ wr32(hw, TXGBE_PORTCTL, ctrl);
+}
+
+static void
+txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
+ uint32_t ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ctrl = rd32(hw, TXGBE_PORTCTL);
+ ctrl |= TXGBE_PORTCTL_VLANEXT;
+ if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP ||
+ txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
+ ctrl |= TXGBE_PORTCTL_QINQ;
+ wr32(hw, TXGBE_PORTCTL, ctrl);
+}
+
+void
+txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
+{
+ struct txgbe_rx_queue *rxq;
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ txgbe_vlan_strip_queue_set(dev, i, 1);
+ } else {
+ txgbe_vlan_strip_queue_set(dev, i, 0);
+ }
+ }
+}
+
+void
+txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
+{
+ uint16_t i;
+ struct rte_eth_rxmode *rxmode;
+ struct txgbe_rx_queue *rxq;
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ rxmode = &dev->data->dev_conf.rxmode;
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
+ else
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
+ }
+}
+
+static int
+txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
+{
+ struct rte_eth_rxmode *rxmode;
+ rxmode = &dev->data->dev_conf.rxmode;
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ txgbe_vlan_hw_strip_config(dev);
+ }
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ txgbe_vlan_hw_filter_enable(dev);
+ else
+ txgbe_vlan_hw_filter_disable(dev);
+ }
+
+ if (mask & ETH_VLAN_EXTEND_MASK) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ txgbe_vlan_hw_extend_enable(dev);
+ else
+ txgbe_vlan_hw_extend_disable(dev);
+ }
+
+ return 0;
+}
+
+static int
+txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ txgbe_config_vlan_strip_on_all_queues(dev, mask);
+
+ txgbe_vlan_offload_config(dev, mask);
+
+ return 0;
+}
+
+static void
+txgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ /* VLNCTL: enable vlan filtering and allow all vlan tags through */
+ uint32_t vlanctrl = rd32(hw, TXGBE_VLANCTL);
+
+ vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
+ wr32(hw, TXGBE_VLANCTL, vlanctrl);
+}
+
+static int
+txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ switch (nb_rx_q) {
+ case 1:
+ case 2:
+ RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
+ break;
+ case 4:
+ RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
+ TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+ RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
+ pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+ return 0;
+}
+
+static int
+txgbe_check_mq_mode(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ uint16_t nb_rx_q = dev->data->nb_rx_queues;
+ uint16_t nb_tx_q = dev->data->nb_tx_queues;
+
+ if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
+ /* check multi-queue mode */
+ switch (dev_conf->rxmode.mq_mode) {
+ case ETH_MQ_RX_VMDQ_DCB:
+ PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
+ break;
+ case ETH_MQ_RX_VMDQ_DCB_RSS:
+ /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
+ PMD_INIT_LOG(ERR, "SRIOV active,"
+ " unsupported mq_mode rx %d.",
+ dev_conf->rxmode.mq_mode);
+ return -EINVAL;
+ case ETH_MQ_RX_RSS:
+ case ETH_MQ_RX_VMDQ_RSS:
+ dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+ if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
+ if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
+ PMD_INIT_LOG(ERR, "SRIOV is active,"
+ " invalid queue number"
+ " for VMDQ RSS, allowed"
+ " value are 1, 2 or 4.");
+ return -EINVAL;
+ }
+ break;
+ case ETH_MQ_RX_VMDQ_ONLY:
+ case ETH_MQ_RX_NONE:
+ /* if nothing mq mode configure, use default scheme */
+ dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+ break;
+ default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
+ /* SRIOV only works in VMDq enable mode */
+ PMD_INIT_LOG(ERR, "SRIOV is active,"
+ " wrong mq_mode rx %d.",
+ dev_conf->rxmode.mq_mode);
+ return -EINVAL;
+ }
+
+ switch (dev_conf->txmode.mq_mode) {
+ case ETH_MQ_TX_VMDQ_DCB:
+ PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
+ dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+ break;
+ default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
+ dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
+ break;
+ }
+
+ /* check valid queue number */
+ if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
+ (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
+ PMD_INIT_LOG(ERR, "SRIOV is active,"
+ " nb_rx_q=%d nb_tx_q=%d queue number"
+ " must be less than or equal to %d.",
+ nb_rx_q, nb_tx_q,
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
+ return -EINVAL;
+ }
+ } else {
+ if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+ PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
+ " not supported.");
+ return -EINVAL;
+ }
+ /* check configuration for vmdb+dcb mode */
+ if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+ const struct rte_eth_vmdq_dcb_conf *conf;
+
+ if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
+ PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
+ TXGBE_VMDQ_DCB_NB_QUEUES);
+ return -EINVAL;
+ }
+ conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
+ if (!(conf->nb_queue_pools == ETH_16_POOLS ||
+ conf->nb_queue_pools == ETH_32_POOLS)) {
+ PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
+ " nb_queue_pools must be %d or %d.",
+ ETH_16_POOLS, ETH_32_POOLS);
+ return -EINVAL;
+ }
+ }
+ if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ const struct rte_eth_vmdq_dcb_tx_conf *conf;
+
+ if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
+ PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
+ TXGBE_VMDQ_DCB_NB_QUEUES);
+ return -EINVAL;
+ }
+ conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
+ if (!(conf->nb_queue_pools == ETH_16_POOLS ||
+ conf->nb_queue_pools == ETH_32_POOLS)) {
+ PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
+ " nb_queue_pools != %d and"
+ " nb_queue_pools != %d.",
+ ETH_16_POOLS, ETH_32_POOLS);
+ return -EINVAL;
+ }
+ }
+
+ /* For DCB mode check our configuration before we go further */
+ if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+ const struct rte_eth_dcb_rx_conf *conf;
+
+ conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
+ if (!(conf->nb_tcs == ETH_4_TCS ||
+ conf->nb_tcs == ETH_8_TCS)) {
+ PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
+ " and nb_tcs != %d.",
+ ETH_4_TCS, ETH_8_TCS);
+ return -EINVAL;
+ }
+ }
+
+ if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+ const struct rte_eth_dcb_tx_conf *conf;
+
+ conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
+ if (!(conf->nb_tcs == ETH_4_TCS ||
+ conf->nb_tcs == ETH_8_TCS)) {
+ PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
+ " and nb_tcs != %d.",
+ ETH_4_TCS, ETH_8_TCS);
+ return -EINVAL;
+ }
+ }
+ }
+ return 0;
+}
+
+static int
+txgbe_dev_configure(struct rte_eth_dev *dev)
+{
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+ struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
+ /* multiple queue mode checking */
+ ret = txgbe_check_mq_mode(dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
+ ret);
+ return ret;
+ }
+
+ /* set flag to update link status after init */
+ intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
+
+ /*
+ * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
+ * allocation Rx preconditions we will reset it.
+ */
+ adapter->rx_bulk_alloc_allowed = true;
+
+ return 0;
+}
+
+static void
+txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+ uint32_t gpie;
+
+ gpie = rd32(hw, TXGBE_GPIOINTEN);
+ gpie |= TXGBE_GPIOBIT_6;
+ wr32(hw, TXGBE_GPIOINTEN, gpie);
+ intr->mask_misc |= TXGBE_ICRMISC_GPIO;
+}
+
+int
+txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk)
+{
+ struct txgbe_hw *hw;
+ struct txgbe_vf_info *vfinfo;
+ struct rte_eth_link link;
+ uint8_t nb_q_per_pool;
+ uint32_t queue_stride;
+ uint32_t queue_idx, idx = 0, vf_idx;
+ uint32_t queue_end;
+ uint16_t total_rate = 0;
+ struct rte_pci_device *pci_dev;
+ int ret;
+
+ pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ ret = rte_eth_link_get_nowait(dev->data->port_id, &link);
+ if (ret < 0)
+ return ret;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (tx_rate > link.link_speed)
+ return -EINVAL;
+
+ if (q_msk == 0)
+ return 0;
+
+ hw = TXGBE_DEV_HW(dev);
+ vfinfo = *(TXGBE_DEV_VFDATA(dev));
+ nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+ queue_stride = TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+ queue_idx = vf * queue_stride;
+ queue_end = queue_idx + nb_q_per_pool - 1;
+ if (queue_end >= hw->mac.max_tx_queues)
+ return -EINVAL;
+
+ if (vfinfo) {
+ for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
+ if (vf_idx == vf)
+ continue;
+ for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
+ idx++)
+ total_rate += vfinfo[vf_idx].tx_rate[idx];
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ /* Store tx_rate for this vf. */
+ for (idx = 0; idx < nb_q_per_pool; idx++) {
+ if (((uint64_t)0x1 << idx) & q_msk) {
+ if (vfinfo[vf].tx_rate[idx] != tx_rate)
+ vfinfo[vf].tx_rate[idx] = tx_rate;
+ total_rate += tx_rate;
+ }
+ }
+
+ if (total_rate > dev->data->dev_link.link_speed) {
+ /* Reset stored TX rate of the VF if it causes exceed
+ * link speed.
+ */
+ memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
+ return -EINVAL;
+ }
+
+ /* Set ARBTXRATE of each queue/pool for vf X */
+ for (; queue_idx <= queue_end; queue_idx++) {
+ if (0x1 & q_msk)
+ txgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
+ q_msk = q_msk >> 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Configure device link speed and setup link.
+ * It returns 0 on success.
+ */
+static int
+txgbe_dev_start(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+ struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t intr_vector = 0;
+ int err;
+ bool link_up = false, negotiate = 0;
+ uint32_t speed = 0;
+ uint32_t allowed_speeds = 0;
+ int mask = 0;
+ int status;
+ uint16_t vf, idx;
+ uint32_t *link_speeds;
+ struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+ struct txgbe_macsec_setting *macsec_setting = TXGBE_DEV_MACSEC_SETTING(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* TXGBE devices don't support:
+ * - half duplex (checked afterwards for valid speeds)
+ * - fixed speed: TODO implement
+ */
+ if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
+ PMD_INIT_LOG(ERR,
+ "Invalid link_speeds for port %u, fix speed not supported",
+ dev->data->port_id);
+ return -EINVAL;
+ }
+
+ /* Stop the link setup handler before resetting the HW. */
+ rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
+
+ /* disable uio/vfio intr/eventfd mapping */
+ rte_intr_disable(intr_handle);
+
+ /* stop adapter */
+ hw->adapter_stopped = 0;
+ txgbe_stop_hw(hw);
+
+ /* reinitialize adapter
+ * this calls reset and start
+ */
+ hw->nb_rx_queues = dev->data->nb_rx_queues;
+ hw->nb_tx_queues = dev->data->nb_tx_queues;
+ status = txgbe_pf_reset_hw(hw);
+ if (status != 0)
+ return -1;
+ hw->mac.start_hw(hw);
+ hw->mac.get_link_status = true;
+
+ /* configure PF module if SRIOV enabled */
+ txgbe_pf_host_configure(dev);
+
+ txgbe_dev_phy_intr_setup(dev);
+
+ /* check and configure queue intr-vector mapping */
+ if ((rte_intr_cap_multiple(intr_handle) ||
+ !RTE_ETH_DEV_SRIOV(dev).active) &&
+ dev->data->dev_conf.intr_conf.rxq != 0) {
+ intr_vector = dev->data->nb_rx_queues;
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec", dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+
+ /* confiugre msix for sleep until rx interrupt */
+ txgbe_configure_msix(dev);
+
+ /* initialize transmission unit */
+ txgbe_dev_tx_init(dev);
+
+ /* This can fail when allocating mbufs for descriptor rings */
+ err = txgbe_dev_rx_init(dev);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
+ goto error;
+ }
+
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK;
+ err = txgbe_vlan_offload_config(dev, mask);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
+ goto error;
+ }
+
+ if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+ /* Enable vlan filtering for VMDq */
+ txgbe_vmdq_vlan_hw_filter_enable(dev);
+ }
+
+ /* Configure DCB hw */
+ txgbe_configure_pb(dev);
+ txgbe_configure_port(dev);
+ txgbe_configure_dcb(dev);
+
+ if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+ err = txgbe_fdir_configure(dev);
+ if (err)
+ goto error;
+ }
+
+ /* Restore vf rate limit */
+ if (vfinfo != NULL) {
+ for (vf = 0; vf < pci_dev->max_vfs; vf++)
+ for (idx = 0; idx < TXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
+ if (vfinfo[vf].tx_rate[idx] != 0)
+ txgbe_set_vf_rate_limit(dev, vf,
+ vfinfo[vf].tx_rate[idx],
+ 1 << idx);
+ }
+
+ err = txgbe_dev_rxtx_start(dev);
+ if (err < 0) {
+ PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
+ goto error;
+ }
+
+ /* Skip link setup if loopback mode is enabled. */
+ if (hw->mac.type == txgbe_mac_raptor &&
+ dev->data->dev_conf.lpbk_mode)
+ goto skip_link_setup;
+
+ if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
+ err = hw->mac.setup_sfp(hw);
+ if (err)
+ goto error;
+ }
+
+ if (hw->phy.media_type == txgbe_media_type_copper) {
+ /* Turn on the copper */
+ hw->phy.set_phy_power(hw, true);
+ } else {
+ /* Turn on the laser */
+ hw->mac.enable_tx_laser(hw);
+ }
+
+ err = hw->mac.check_link(hw, &speed, &link_up, 0);
+ if (err)
+ goto error;
+ dev->data->dev_link.link_status = link_up;
+
+ err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
+ if (err)
+ goto error;
+
+ allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_10G;
+
+ link_speeds = &dev->data->dev_conf.link_speeds;
+ if (*link_speeds & ~allowed_speeds) {
+ PMD_INIT_LOG(ERR, "Invalid link setting");
+ goto error;
+ }
+
+ speed = 0x0;
+ if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+ speed = (TXGBE_LINK_SPEED_100M_FULL |
+ TXGBE_LINK_SPEED_1GB_FULL |
+ TXGBE_LINK_SPEED_10GB_FULL);
+ } else {
+ if (*link_speeds & ETH_LINK_SPEED_10G)
+ speed |= TXGBE_LINK_SPEED_10GB_FULL;
+ if (*link_speeds & ETH_LINK_SPEED_5G)
+ speed |= TXGBE_LINK_SPEED_5GB_FULL;
+ if (*link_speeds & ETH_LINK_SPEED_2_5G)
+ speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
+ if (*link_speeds & ETH_LINK_SPEED_1G)
+ speed |= TXGBE_LINK_SPEED_1GB_FULL;
+ if (*link_speeds & ETH_LINK_SPEED_100M)
+ speed |= TXGBE_LINK_SPEED_100M_FULL;
+ }
+
+ err = hw->mac.setup_link(hw, speed, link_up);
+ if (err)
+ goto error;
+
+skip_link_setup:
+
+ if (rte_intr_allow_others(intr_handle)) {
+ /* check if lsc interrupt is enabled */
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ txgbe_dev_lsc_interrupt_setup(dev, TRUE);
+ else
+ txgbe_dev_lsc_interrupt_setup(dev, FALSE);
+ txgbe_dev_macsec_interrupt_setup(dev);
+ txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
+ } else {
+ rte_intr_callback_unregister(intr_handle,
+ txgbe_dev_interrupt_handler, dev);
+ if (dev->data->dev_conf.intr_conf.lsc != 0)
+ PMD_INIT_LOG(INFO, "lsc won't enable because of"
+ " no intr multiplex");
+ }
+
+ /* check if rxq interrupt is enabled */
+ if (dev->data->dev_conf.intr_conf.rxq != 0 &&
+ rte_intr_dp_is_en(intr_handle))
+ txgbe_dev_rxq_interrupt_setup(dev);
+
+ /* enable uio/vfio intr/eventfd mapping */
+ rte_intr_enable(intr_handle);
+
+ /* resume enabled intr since hw reset */
+ txgbe_enable_intr(dev);
+ txgbe_l2_tunnel_conf(dev);
+ txgbe_filter_restore(dev);
+
+ if (tm_conf->root && !tm_conf->committed)
+ PMD_DRV_LOG(WARNING,
+ "please call hierarchy_commit() "
+ "before starting the port");
+
+ /*
+ * Update link status right before return, because it may
+ * start link configuration process in a separate thread.
+ */
+ txgbe_dev_link_update(dev, 0);
+
+ /* setup the macsec ctrl register */
+ if (macsec_setting->offload_en)
+ txgbe_dev_macsec_register_enable(dev, macsec_setting);
+
+ wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_OD_MASK);
+
+ txgbe_read_stats_registers(hw, hw_stats);
+ hw->offset_loaded = 1;
+
+ return 0;
+
+error:
+ PMD_INIT_LOG(ERR, "failure in txgbe_dev_start(): %d", err);
+ txgbe_dev_clear_queues(dev);
+ return -EIO;
+}
+
+/*
+ * Stop device: disable rx and tx functions to allow for reconfiguring.
+ */
+static void
+txgbe_dev_stop(struct rte_eth_dev *dev)
+{
+ struct rte_eth_link link;
+ struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ int vf;
+ struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+
+ if (hw->adapter_stopped)
+ return;
+
+ PMD_INIT_FUNC_TRACE();
+
+ rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
+
+ /* disable interrupts */
+ txgbe_disable_intr(hw);
+
+ /* reset the NIC */
+ txgbe_pf_reset_hw(hw);
+ hw->adapter_stopped = 0;
+
+ /* stop adapter */
+ txgbe_stop_hw(hw);
+
+ for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
+ vfinfo[vf].clear_to_send = false;
+
+ if (hw->phy.media_type == txgbe_media_type_copper) {
+ /* Turn off the copper */
+ hw->phy.set_phy_power(hw, false);
+ } else {
+ /* Turn off the laser */
+ hw->mac.disable_tx_laser(hw);
+ }
+
+ txgbe_dev_clear_queues(dev);
+
+ /* Clear stored conf */
+ dev->data->scattered_rx = 0;
+ dev->data->lro = 0;
+
+ /* Clear recorded link status */
+ memset(&link, 0, sizeof(link));
+ rte_eth_linkstatus_set(dev, &link);
+
+ if (!rte_intr_allow_others(intr_handle))
+ /* resume to the default handler */
+ rte_intr_callback_register(intr_handle,
+ txgbe_dev_interrupt_handler,
+ (void *)dev);
+
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec != NULL) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+
+ /* reset hierarchy commit */
+ tm_conf->committed = false;
+
+ adapter->rss_reta_updated = 0;
+ wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
+
+ hw->adapter_stopped = true;
+}
+
+/*
+ * Set device link up: enable tx.
+ */
+static int
+txgbe_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ if (hw->phy.media_type == txgbe_media_type_copper) {
+ /* Turn on the copper */
+ hw->phy.set_phy_power(hw, true);
+ } else {
+ /* Turn on the laser */
+ hw->mac.enable_tx_laser(hw);
+ txgbe_dev_link_update(dev, 0);
+ }
+
+ return 0;
+}
+
+/*
+ * Set device link down: disable tx.
+ */
+static int
+txgbe_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ if (hw->phy.media_type == txgbe_media_type_copper) {
+ /* Turn off the copper */
+ hw->phy.set_phy_power(hw, false);
+ } else {
+ /* Turn off the laser */
+ hw->mac.disable_tx_laser(hw);
+ txgbe_dev_link_update(dev, 0);
+ }
+
+ return 0;
+}
+
+/*
+ * Reset and stop device.
+ */
+static void
+txgbe_dev_close(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ int retries = 0;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ txgbe_pf_reset_hw(hw);
+
+ txgbe_dev_stop(dev);
+
+ txgbe_dev_free_queues(dev);
+
+ /* reprogram the RAR[0] in case user changed it. */
+ txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+
+ /* Unlock any pending hardware semaphore */
+ txgbe_swfw_lock_reset(hw);
+
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(intr_handle);
+
+ do {
+ ret = rte_intr_callback_unregister(intr_handle,
+ txgbe_dev_interrupt_handler, dev);
+ if (ret >= 0 || ret == -ENOENT) {
+ break;
+ } else if (ret != -EAGAIN) {
+ PMD_INIT_LOG(ERR,
+ "intr callback unregister failed: %d",
+ ret);
+ }
+ rte_delay_ms(100);
+ } while (retries++ < (10 + TXGBE_LINK_UP_TIME));
+
+ /* cancel the delay handler before remove dev */
+ rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
+
+ /* uninitialize PF if max_vfs not zero */
+ txgbe_pf_host_uninit(dev);
+
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+ rte_free(dev->data->hash_mac_addrs);
+ dev->data->hash_mac_addrs = NULL;
+
+ /* remove all the fdir filters & hash */
+ txgbe_fdir_filter_uninit(dev);
+
+ /* remove all the L2 tunnel filters & hash */
+ txgbe_l2_tn_filter_uninit(dev);
+
+ /* Remove all ntuple filters of the device */
+ txgbe_ntuple_filter_uninit(dev);
+
+ /* clear all the filters list */
+ txgbe_filterlist_flush();
+
+ /* Remove all Traffic Manager configuration */
+ txgbe_tm_conf_uninit(dev);
+
+#ifdef RTE_LIBRTE_SECURITY
+ rte_free(dev->security_ctx);
+#endif
+}
+
+/*
+ * Reset PF device.
+ */
+static int
+txgbe_dev_reset(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ /* When a DPDK PMD PF begin to reset PF port, it should notify all
+ * its VF to make them align with it. The detailed notification
+ * mechanism is PMD specific. As to txgbe PF, it is rather complex.
+ * To avoid unexpected behavior in VF, currently reset of PF with
+ * SR-IOV activation is not supported. It might be supported later.
+ */
+ if (dev->data->sriov.active)
+ return -ENOTSUP;
+
+ ret = eth_txgbe_dev_uninit(dev);
+ if (ret)
+ return ret;
+
+ ret = eth_txgbe_dev_init(dev, NULL);
+
+ return ret;
+}
+
+#define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \
+ { \
+ uint32_t current_counter = rd32(hw, reg); \
+ if (current_counter < last_counter) \
+ current_counter += 0x100000000LL; \
+ if (!hw->offset_loaded) \
+ last_counter = current_counter; \
+ counter = current_counter - last_counter; \
+ counter &= 0xFFFFFFFFLL; \
+ }
+
+#define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
+ { \
+ uint64_t current_counter_lsb = rd32(hw, reg_lsb); \
+ uint64_t current_counter_msb = rd32(hw, reg_msb); \
+ uint64_t current_counter = (current_counter_msb << 32) | \
+ current_counter_lsb; \
+ if (current_counter < last_counter) \
+ current_counter += 0x1000000000LL; \
+ if (!hw->offset_loaded) \
+ last_counter = current_counter; \
+ counter = current_counter - last_counter; \
+ counter &= 0xFFFFFFFFFLL; \
+ }
+
+
+void
+txgbe_read_stats_registers(struct txgbe_hw *hw,
+ struct txgbe_hw_stats *hw_stats)
+{
+ unsigned i;
+
+ /* QP Stats */
+ for (i = 0; i < hw->nb_rx_queues; i++) {
+ UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
+ hw->qp_last[i].rx_qp_packets,
+ hw_stats->qp[i].rx_qp_packets);
+ UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
+ hw->qp_last[i].rx_qp_bytes,
+ hw_stats->qp[i].rx_qp_bytes);
+ UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
+ hw->qp_last[i].rx_qp_mc_packets,
+ hw_stats->qp[i].rx_qp_mc_packets);
+ }
+
+ for (i = 0; i < hw->nb_tx_queues; i++) {
+ UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
+ hw->qp_last[i].tx_qp_packets,
+ hw_stats->qp[i].tx_qp_packets);
+ UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
+ hw->qp_last[i].tx_qp_bytes,
+ hw_stats->qp[i].tx_qp_bytes);
+ }
+ /* PB Stats */
+ for (i = 0; i < TXGBE_MAX_UP; i++) {
+ hw_stats->up[i].rx_up_xon_packets +=
+ rd32(hw, TXGBE_PBRXUPXON(i));
+ hw_stats->up[i].rx_up_xoff_packets +=
+ rd32(hw, TXGBE_PBRXUPXOFF(i));
+ hw_stats->up[i].tx_up_xon_packets +=
+ rd32(hw, TXGBE_PBTXUPXON(i));
+ hw_stats->up[i].tx_up_xoff_packets +=
+ rd32(hw, TXGBE_PBTXUPXOFF(i));
+ hw_stats->up[i].tx_up_xon2off_packets +=
+ rd32(hw, TXGBE_PBTXUPOFF(i));
+ hw_stats->up[i].rx_up_dropped +=
+ rd32(hw, TXGBE_PBRXMISS(i));
+ }
+ hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
+ hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
+ hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
+ hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
+
+ /* DMA Stats */
+ hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
+ hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
+
+ hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
+ hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
+ hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
+
+ /* MAC Stats */
+ hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
+ hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
+ hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
+
+ hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
+ hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
+ hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
+
+ hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
+ hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
+
+ hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1to64L);
+ hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65to127L);
+ hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128to255L);
+ hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256to511L);
+ hw_stats->rx_size_512_to_1023_packets += rd64(hw, TXGBE_MACRX512to1023L);
+ hw_stats->rx_size_1024_to_max_packets += rd64(hw, TXGBE_MACRX1024toMAXL);
+ hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1to64L);
+ hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65to127L);
+ hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128to255L);
+ hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256to511L);
+ hw_stats->tx_size_512_to_1023_packets += rd64(hw, TXGBE_MACTX512to1023L);
+ hw_stats->tx_size_1024_to_max_packets += rd64(hw, TXGBE_MACTX1024toMAXL);
+
+ hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
+ hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
+ hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
+
+ /* MNG Stats */
+ hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
+ hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
+ hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
+ hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
+
+ /* FCoE Stats */
+ hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
+ hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
+ hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
+ hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
+ hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
+ hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
+ hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
+
+ /* Flow Director Stats */
+ hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
+ hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
+ hw_stats->flow_director_added_filters +=
+ TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
+ hw_stats->flow_director_removed_filters +=
+ TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
+ hw_stats->flow_director_filter_add_errors +=
+ TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
+ hw_stats->flow_director_filter_remove_errors +=
+ TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
+
+ /* MACsec Stats */
+ hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
+ hw_stats->tx_macsec_pkts_encrypted +=
+ rd32(hw, TXGBE_LSECTX_ENCPKT);
+ hw_stats->tx_macsec_pkts_protected +=
+ rd32(hw, TXGBE_LSECTX_PROTPKT);
+ hw_stats->tx_macsec_octets_encrypted +=
+ rd32(hw, TXGBE_LSECTX_ENCOCT);
+ hw_stats->tx_macsec_octets_protected +=
+ rd32(hw, TXGBE_LSECTX_PROTOCT);
+ hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
+ hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
+ hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
+ hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
+ hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
+ hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
+ hw_stats->rx_macsec_sc_pkts_unchecked += rd32(hw, TXGBE_LSECRX_UNCHKPKT);
+ hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
+ hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
+ for (i = 0; i < 2; i++) {
+ hw_stats->rx_macsec_sa_pkts_ok +=
+ rd32(hw, TXGBE_LSECRX_OKPKT(i));
+ hw_stats->rx_macsec_sa_pkts_invalid +=
+ rd32(hw, TXGBE_LSECRX_INVPKT(i));
+ hw_stats->rx_macsec_sa_pkts_notvalid +=
+ rd32(hw, TXGBE_LSECRX_BADPKT(i));
+ }
+ hw_stats->rx_macsec_sa_pkts_unusedsa +=
+ rd32(hw, TXGBE_LSECRX_INVSAPKT);
+ hw_stats->rx_macsec_sa_pkts_notusingsa +=
+ rd32(hw, TXGBE_LSECRX_BADSAPKT);
+
+ hw_stats->rx_total_missed_packets = 0;
+ for (i = 0; i < TXGBE_MAX_UP; i++) {
+ hw_stats->rx_total_missed_packets +=
+ hw_stats->up[i].rx_up_dropped;
+ }
+}
+
+static int
+txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+ struct txgbe_stat_mappings *stat_mappings =
+ TXGBE_DEV_STAT_MAPPINGS(dev);
+ uint32_t i, j;
+
+ txgbe_read_stats_registers(hw, hw_stats);
+
+ if (stats == NULL)
+ return -EINVAL;
+
+ /* Fill out the rte_eth_stats statistics structure */
+ stats->ipackets = hw_stats->rx_packets;
+ stats->ibytes = hw_stats->rx_bytes;
+ stats->opackets = hw_stats->tx_packets;
+ stats->obytes = hw_stats->tx_bytes;
+
+ memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
+ memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
+ memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
+ memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
+ memset(&stats->q_errors, 0, sizeof(stats->q_errors));
+ for (i = 0; i < TXGBE_MAX_QP; i++) {
+ uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
+ uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
+ uint32_t q_map;
+
+ q_map = (stat_mappings->rqsm[n] >> offset)
+ & QMAP_FIELD_RESERVED_BITS_MASK;
+ j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
+ ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
+ stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
+
+ q_map = (stat_mappings->tqsm[n] >> offset)
+ & QMAP_FIELD_RESERVED_BITS_MASK;
+ j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
+ ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
+ stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
+ }
+
+ /* Rx Errors */
+ stats->imissed = hw_stats->rx_total_missed_packets;
+ stats->ierrors = hw_stats->rx_crc_errors +
+ hw_stats->rx_mac_short_packet_dropped +
+ hw_stats->rx_length_errors +
+ hw_stats->rx_undersize_errors +
+ hw_stats->rx_oversize_errors +
+ hw_stats->rx_drop_packets +
+ hw_stats->rx_illegal_byte_errors +
+ hw_stats->rx_error_bytes +
+ hw_stats->rx_fragment_errors +
+ hw_stats->rx_fcoe_crc_errors +
+ hw_stats->rx_fcoe_mbuf_allocation_errors;
+
+ /* Tx Errors */
+ stats->oerrors = 0;
+ return 0;
+}
+
+static int
+txgbe_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+
+ /* HW registers are cleared on read */
+ hw->offset_loaded = 0;
+ txgbe_dev_stats_get(dev, NULL);
+ hw->offset_loaded = 1;
+
+ /* Reset software totals */
+ memset(hw_stats, 0, sizeof(*hw_stats));
+
+ return 0;
+}
+
+/* This function calculates the number of xstats based on the current config */
+static unsigned
+txgbe_xstats_calc_num(struct rte_eth_dev *dev)
+{
+ int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
+ return TXGBE_NB_HW_STATS +
+ TXGBE_NB_UP_STATS * TXGBE_MAX_UP +
+ TXGBE_NB_QP_STATS * nb_queues;
+}
+
+static inline int
+txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
+{
+ int nb, st;
+
+ /* Extended stats from txgbe_hw_stats */
+ if (id < TXGBE_NB_HW_STATS) {
+ snprintf(name, size, "[hw]%s",
+ rte_txgbe_stats_strings[id].name);
+ return 0;
+ }
+ id -= TXGBE_NB_HW_STATS;
+
+ /* Priority Stats */
+ if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
+ nb = id / TXGBE_NB_UP_STATS;
+ st = id % TXGBE_NB_UP_STATS;
+ snprintf(name, size, "[p%u]%s", nb,
+ rte_txgbe_up_strings[st].name);
+ return 0;
+ }
+ id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
+
+ /* Queue Stats */
+ if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
+ nb = id / TXGBE_NB_QP_STATS;
+ st = id % TXGBE_NB_QP_STATS;
+ snprintf(name, size, "[q%u]%s", nb,
+ rte_txgbe_qp_strings[st].name);
+ return 0;
+ }
+ id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
+
+ return -(int)(id + 1);
+}
+
+static inline int
+txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
+{
+ int nb, st;
+
+ /* Extended stats from txgbe_hw_stats */
+ if (id < TXGBE_NB_HW_STATS) {
+ *offset = rte_txgbe_stats_strings[id].offset;
+ return 0;
+ }
+ id -= TXGBE_NB_HW_STATS;
+
+ /* Priority Stats */
+ if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
+ nb = id / TXGBE_NB_UP_STATS;
+ st = id % TXGBE_NB_UP_STATS;
+ *offset = rte_txgbe_up_strings[st].offset +
+ nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t));
+ return 0;
+ }
+ id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
+
+ /* Queue Stats */
+ if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
+ nb = id / TXGBE_NB_QP_STATS;
+ st = id % TXGBE_NB_QP_STATS;
+ *offset = rte_txgbe_qp_strings[st].offset +
+ nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
+ return 0;
+ }
+ id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
+
+ return -(int)(id + 1);
+}
+
+static int txgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, unsigned int limit)
+{
+ unsigned i, count;
+
+ count = txgbe_xstats_calc_num(dev);
+ if (xstats_names == NULL) {
+ return count;
+ }
+
+ /* Note: limit >= cnt_stats checked upstream
+ * in rte_eth_xstats_names()
+ */
+ limit = min(limit, count);
+
+ /* Extended stats from txgbe_hw_stats */
+ for (i = 0; i < limit; i++) {
+ if (txgbe_get_name_by_id(i, xstats_names[i].name,
+ sizeof(xstats_names[i].name))) {
+ PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+ break;
+ }
+ }
+
+ return i;
+}
+
+static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ const uint64_t *ids,
+ unsigned int limit)
+{
+ unsigned i;
+
+ if (ids == NULL) {
+ return txgbe_dev_xstats_get_names(dev, xstats_names, limit);
+ }
+
+ for (i = 0; i < limit; i++) {
+ if (txgbe_get_name_by_id(ids[i], xstats_names[i].name,
+ sizeof(xstats_names[i].name))) {
+ PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+ return -1;
+ }
+ }
+
+ return i;
+}
+
+static int
+txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned limit)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+ unsigned i, count;
+
+ txgbe_read_stats_registers(hw, hw_stats);
+
+ /* If this is a reset xstats is NULL, and we have cleared the
+ * registers by reading them.
+ */
+ count = txgbe_xstats_calc_num(dev);
+ if (xstats == NULL) {
+ return count;
+ }
+
+ limit = min(limit, txgbe_xstats_calc_num(dev));
+
+ /* Extended stats from txgbe_hw_stats */
+ for (i = 0; i < limit; i++) {
+ uint32_t offset = 0;
+
+ if (txgbe_get_offset_by_id(i, &offset)) {
+ PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+ break;
+ }
+ xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
+ xstats[i].id = i;
+ }
+
+ return i;
+}
+
+static int
+txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
+ unsigned limit)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+ unsigned i, count;
+
+ txgbe_read_stats_registers(hw, hw_stats);
+
+ /* If this is a reset xstats is NULL, and we have cleared the
+ * registers by reading them.
+ */
+ count = txgbe_xstats_calc_num(dev);
+ if (values == NULL) {
+ return count;
+ }
+
+ limit = min(limit, txgbe_xstats_calc_num(dev));
+
+ /* Extended stats from txgbe_hw_stats */
+ for (i = 0; i < limit; i++) {
+ uint32_t offset;
+
+ if (txgbe_get_offset_by_id(i, &offset)) {
+ PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+ break;
+ }
+ values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
+ }
+
+ return i;
+}
+
+static int
+txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int limit)
+{
+ struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+ unsigned i;
+
+ if (ids == NULL) {
+ return txgbe_dev_xstats_get_(dev, values, limit);
+ }
+
+ for (i = 0; i < limit; i++) {
+ uint32_t offset;
+
+ if (txgbe_get_offset_by_id(ids[i], &offset)) {
+ PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
+ break;
+ }
+ values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
+ }
+
+ return i;
+}
+
+static int
+txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+
+ /* HW registers are cleared on read */
+ txgbe_read_stats_registers(hw, hw_stats);
+
+ /* Reset software totals */
+ memset(hw_stats, 0, sizeof(*hw_stats));
+
+ return 0;
+}
+
+static int
+txgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ u16 eeprom_verh, eeprom_verl;
+ u32 etrack_id;
+ int ret;
+
+ hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_H, &eeprom_verh);
+ hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_L, &eeprom_verl);
+
+ etrack_id = (eeprom_verh << 16) | eeprom_verl;
+ ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (u32)ret)
+ return ret;
+ else
+ return 0;
+}
+
+static int
+txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
+ dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
+ dev_info->min_rx_bufsize = 1024;
+ dev_info->max_rx_pktlen = 15872;
+ dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+ dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
+ dev_info->max_vfs = pci_dev->max_vfs;
+ dev_info->max_vmdq_pools = ETH_64_POOLS;
+ dev_info->vmdq_queue_num = dev_info->max_rx_queues;
+ dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
+ dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
+ dev_info->rx_queue_offload_capa);
+ dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
+ dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
+ .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
+ .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ .offloads = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
+ .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
+ .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
+ .offloads = 0,
+ };
+
+ dev_info->rx_desc_lim = rx_desc_lim;
+ dev_info->tx_desc_lim = tx_desc_lim;
+
+ dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
+ dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+ dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
+
+ dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+ dev_info->speed_capa |= ETH_LINK_SPEED_100M;
+
+ /* Driver-preferred Rx/Tx parameters */
+ dev_info->default_rxportconf.burst_size = 32;
+ dev_info->default_txportconf.burst_size = 32;
+ dev_info->default_rxportconf.nb_queues = 1;
+ dev_info->default_txportconf.nb_queues = 1;
+ dev_info->default_rxportconf.ring_size = 256;
+ dev_info->default_txportconf.ring_size = 256;
+
+ return 0;
+}
+
+const uint32_t *
+txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ if (dev->rx_pkt_burst == txgbe_recv_pkts ||
+ dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
+ dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
+ dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
+ return txgbe_get_supported_ptypes();
+
+ return NULL;
+}
+
+void
+txgbe_dev_setup_link_alarm_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+ u32 speed;
+ bool autoneg = false;
+
+ speed = hw->phy.autoneg_advertised;
+ if (!speed)
+ hw->mac.get_link_capabilities(hw, &speed, &autoneg);
+
+ hw->mac.setup_link(hw, speed, true);
+
+ intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
+}
+
+/* return 0 means link status changed, -1 means not changed */
+int
+txgbe_dev_link_update_share(struct rte_eth_dev *dev,
+ int wait_to_complete)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct rte_eth_link link;
+ u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+ bool link_up;
+ int err;
+ int wait = 1;
+
+ memset(&link, 0, sizeof(link));
+ link.link_status = ETH_LINK_DOWN;
+ link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_autoneg = ETH_LINK_AUTONEG;
+
+ hw->mac.get_link_status = true;
+
+ if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
+ return rte_eth_linkstatus_set(dev, &link);
+
+ /* check if it needs to wait to complete, if lsc interrupt is enabled */
+ if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
+ wait = 0;
+
+ err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
+
+ if (err != 0) {
+ link.link_speed = ETH_SPEED_NUM_100M;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ return rte_eth_linkstatus_set(dev, &link);
+ }
+
+ if (link_up == 0) {
+ if (hw->phy.media_type == txgbe_media_type_fiber) {
+ intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
+ rte_eal_alarm_set(10,
+ txgbe_dev_setup_link_alarm_handler, dev);
+ }
+ return rte_eth_linkstatus_set(dev, &link);
+ }
+
+ intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
+ link.link_status = ETH_LINK_UP;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+
+ switch (link_speed) {
+ default:
+ case TXGBE_LINK_SPEED_UNKNOWN:
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = ETH_SPEED_NUM_100M;
+ break;
+
+ case TXGBE_LINK_SPEED_100M_FULL:
+ link.link_speed = ETH_SPEED_NUM_100M;
+ break;
+
+ case TXGBE_LINK_SPEED_1GB_FULL:
+ link.link_speed = ETH_SPEED_NUM_1G;
+ break;
+
+ case TXGBE_LINK_SPEED_2_5GB_FULL:
+ link.link_speed = ETH_SPEED_NUM_2_5G;
+ break;
+
+ case TXGBE_LINK_SPEED_5GB_FULL:
+ link.link_speed = ETH_SPEED_NUM_5G;
+ break;
+
+ case TXGBE_LINK_SPEED_10GB_FULL:
+ link.link_speed = ETH_SPEED_NUM_10G;
+ break;
+ }
+
+ return rte_eth_linkstatus_set(dev, &link);
+}
+
+static int
+txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ return txgbe_dev_link_update_share(dev, wait_to_complete);
+}
+
+static int
+txgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t fctrl;
+
+ fctrl = rd32(hw, TXGBE_PSRCTL);
+ fctrl |= (TXGBE_PSRCTL_UCP | TXGBE_PSRCTL_MCP);
+ wr32(hw, TXGBE_PSRCTL, fctrl);
+
+ return 0;
+}
+
+static int
+txgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t fctrl;
+
+ fctrl = rd32(hw, TXGBE_PSRCTL);
+ fctrl &= (~TXGBE_PSRCTL_UCP);
+ if (dev->data->all_multicast == 1)
+ fctrl |= TXGBE_PSRCTL_MCP;
+ else
+ fctrl &= (~TXGBE_PSRCTL_MCP);
+ wr32(hw, TXGBE_PSRCTL, fctrl);
+
+ return 0;
+}
+
+static int
+txgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t fctrl;
+
+ fctrl = rd32(hw, TXGBE_PSRCTL);
+ fctrl |= TXGBE_PSRCTL_MCP;
+ wr32(hw, TXGBE_PSRCTL, fctrl);
+
+ return 0;
+}
+
+static int
+txgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t fctrl;
+
+ if (dev->data->promiscuous == 1)
+ return 0; /* must remain in all_multicast mode */
+
+ fctrl = rd32(hw, TXGBE_PSRCTL);
+ fctrl &= (~TXGBE_PSRCTL_MCP);
+ wr32(hw, TXGBE_PSRCTL, fctrl);
+
+ return 0;
+}
+
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ * @param on
+ * Enable or Disable.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
+{
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+
+ txgbe_dev_link_status_print(dev);
+ if (on)
+ intr->mask_misc |= TXGBE_ICRMISC_LSC;
+ else
+ intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
+
+ return 0;
+}
+
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
+{
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+
+ intr->mask[0] |= TXGBE_ICR_MASK;
+ intr->mask[1] |= TXGBE_ICR_MASK;
+
+ return 0;
+}
+
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
+{
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+
+ intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
+
+ return 0;
+}
+
+/*
+ * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
+{
+ uint32_t eicr;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+
+ /* clear all cause mask */
+ txgbe_disable_intr(hw);
+
+ /* read-on-clear nic registers here */
+ eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
+ PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
+
+ intr->flags = 0;
+
+ /* set flag for async link update */
+ if (eicr & TXGBE_ICRMISC_LSC)
+ intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
+
+ if (eicr & TXGBE_ICRMISC_VFMBX)
+ intr->flags |= TXGBE_FLAG_MAILBOX;
+
+ if (eicr & TXGBE_ICRMISC_LNKSEC)
+ intr->flags |= TXGBE_FLAG_MACSEC;
+
+ if (eicr & TXGBE_ICRMISC_GPIO)
+ intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
+
+ return 0;
+}
+
+/**
+ * It gets and then prints the link status.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static void
+txgbe_dev_link_status_print(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_eth_link link;
+
+ rte_eth_linkstatus_get(dev, &link);
+
+ if (link.link_status) {
+ PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
+ (int)(dev->data->port_id),
+ (unsigned)link.link_speed,
+ link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+ "full-duplex" : "half-duplex");
+ } else {
+ PMD_INIT_LOG(INFO, " Port %d: Link Down",
+ (int)(dev->data->port_id));
+ }
+ PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
+ pci_dev->addr.domain,
+ pci_dev->addr.bus,
+ pci_dev->addr.devid,
+ pci_dev->addr.function);
+}
+
+/*
+ * It executes link_update after knowing an interrupt occurred.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
+ struct rte_intr_handle *intr_handle)
+{
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+ int64_t timeout;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
+
+ if (intr->flags & TXGBE_FLAG_MAILBOX) {
+ txgbe_pf_mbx_process(dev);
+ intr->flags &= ~TXGBE_FLAG_MAILBOX;
+ }
+
+ if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
+ hw->phy.handle_lasi(hw);
+ intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
+ }
+
+ if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
+ struct rte_eth_link link;
+
+ /* get the link status before link update, for predicting later */
+ rte_eth_linkstatus_get(dev, &link);
+
+ txgbe_dev_link_update(dev, 0);
+
+ /* likely to up */
+ if (!link.link_status)
+ /* handle it 1 sec later, wait it being stable */
+ timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
+ /* likely to down */
+ else
+ /* handle it 4 sec later, wait it being stable */
+ timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
+
+ txgbe_dev_link_status_print(dev);
+ if (rte_eal_alarm_set(timeout * 1000,
+ txgbe_dev_interrupt_delayed_handler,
+ (void *)dev) < 0)
+ PMD_DRV_LOG(ERR, "Error setting alarm");
+ else {
+ /* remember original mask */
+ intr->mask_misc_orig = intr->mask_misc;
+ /* only disable lsc interrupt */
+ intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
+ }
+ }
+
+ PMD_DRV_LOG(DEBUG, "enable intr immediately");
+ txgbe_enable_intr(dev);
+ rte_intr_enable(intr_handle);
+
+ return 0;
+}
+
+/**
+ * Interrupt handler which shall be registered for alarm callback for delayed
+ * handling specific interrupt to wait for the stable nic state. As the
+ * NIC interrupt state is not stable for txgbe after link is just down,
+ * it needs to wait 4 seconds to get the stable status.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+txgbe_dev_interrupt_delayed_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t eicr;
+
+ txgbe_disable_intr(hw);
+
+ eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
+ if (eicr & TXGBE_ICRMISC_VFMBX)
+ txgbe_pf_mbx_process(dev);
+
+ if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
+ hw->phy.handle_lasi(hw);
+ intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
+ }
+
+ if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
+ txgbe_dev_link_update(dev, 0);
+ intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
+ txgbe_dev_link_status_print(dev);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ }
+
+ if (intr->flags & TXGBE_FLAG_MACSEC) {
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
+ NULL);
+ intr->flags &= ~TXGBE_FLAG_MACSEC;
+ }
+
+ /* restore original mask */
+ intr->mask_misc = intr->mask_misc_orig;
+ intr->mask_misc_orig = 0;
+
+ PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
+ txgbe_enable_intr(dev);
+ rte_intr_enable(intr_handle);
+}
+
+/**
+ * Interrupt handler triggered by NIC for handling
+ * specific interrupt.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+txgbe_dev_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
+ txgbe_dev_interrupt_get_status(dev);
+ txgbe_dev_interrupt_action(dev, dev->intr_handle);
+}
+
+static int
+txgbe_dev_led_on(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw;
+
+ hw = TXGBE_DEV_HW(dev);
+ return txgbe_led_on(hw, 4) == 0 ? 0 : -ENOTSUP;
+}
+
+static int
+txgbe_dev_led_off(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw;
+
+ hw = TXGBE_DEV_HW(dev);
+ return txgbe_led_off(hw, 4) == 0 ? 0 : -ENOTSUP;
+}
+
+static int
+txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct txgbe_hw *hw;
+ uint32_t mflcn_reg;
+ uint32_t fccfg_reg;
+ int rx_pause;
+ int tx_pause;
+
+ hw = TXGBE_DEV_HW(dev);
+
+ fc_conf->pause_time = hw->fc.pause_time;
+ fc_conf->high_water = hw->fc.high_water[0];
+ fc_conf->low_water = hw->fc.low_water[0];
+ fc_conf->send_xon = hw->fc.send_xon;
+ fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
+
+ /*
+ * Return rx_pause status according to actual setting of
+ * RXFCCFG register.
+ */
+ mflcn_reg = rd32(hw, TXGBE_RXFCCFG);
+ if (mflcn_reg & (TXGBE_RXFCCFG_FC | TXGBE_RXFCCFG_PFC))
+ rx_pause = 1;
+ else
+ rx_pause = 0;
+
+ /*
+ * Return tx_pause status according to actual setting of
+ * TXFCCFG register.
+ */
+ fccfg_reg = rd32(hw, TXGBE_TXFCCFG);
+ if (fccfg_reg & (TXGBE_TXFCCFG_FC | TXGBE_TXFCCFG_PFC))
+ tx_pause = 1;
+ else
+ tx_pause = 0;
+
+ if (rx_pause && tx_pause)
+ fc_conf->mode = RTE_FC_FULL;
+ else if (rx_pause)
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ else if (tx_pause)
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ else
+ fc_conf->mode = RTE_FC_NONE;
+
+ return 0;
+}
+
+static int
+txgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct txgbe_hw *hw;
+ int err;
+ uint32_t rx_buf_size;
+ uint32_t max_high_water;
+ enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
+ txgbe_fc_none,
+ txgbe_fc_rx_pause,
+ txgbe_fc_tx_pause,
+ txgbe_fc_full
+ };
+
+ PMD_INIT_FUNC_TRACE();
+
+ hw = TXGBE_DEV_HW(dev);
+ rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(0));
+ PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
+
+ /*
+ * At least reserve one Ethernet frame for watermark
+ * high_water/low_water in kilo bytes for txgbe
+ */
+ max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
+ if ((fc_conf->high_water > max_high_water) ||
+ (fc_conf->high_water < fc_conf->low_water)) {
+ PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+ PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
+ return -EINVAL;
+ }
+
+ hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[fc_conf->mode];
+ hw->fc.pause_time = fc_conf->pause_time;
+ hw->fc.high_water[0] = fc_conf->high_water;
+ hw->fc.low_water[0] = fc_conf->low_water;
+ hw->fc.send_xon = fc_conf->send_xon;
+ hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
+
+ err = txgbe_fc_enable(hw);
+
+ /* Not negotiated is not an error case */
+ if ((err == 0) || (err == TXGBE_ERR_FC_NOT_NEGOTIATED)) {
+ wr32m(hw, TXGBE_MACRXFLT, TXGBE_MACRXFLT_CTL_MASK,
+ (fc_conf->mac_ctrl_frame_fwd
+ ? TXGBE_MACRXFLT_CTL_NOPS : TXGBE_MACRXFLT_CTL_DROP));
+ txgbe_flush(hw);
+
+ return 0;
+ }
+
+ PMD_INIT_LOG(ERR, "txgbe_fc_enable = 0x%x", err);
+ return -EIO;
+}
+
+static int
+txgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
+{
+ int err;
+ uint32_t rx_buf_size;
+ uint32_t max_high_water;
+ uint8_t tc_num;
+ uint8_t map[TXGBE_DCB_UP_MAX] = { 0 };
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
+
+ enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
+ txgbe_fc_none,
+ txgbe_fc_rx_pause,
+ txgbe_fc_tx_pause,
+ txgbe_fc_full
+ };
+
+ PMD_INIT_FUNC_TRACE();
+
+ txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
+ tc_num = map[pfc_conf->priority];
+ rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(tc_num));
+ PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
+ /*
+ * At least reserve one Ethernet frame for watermark
+ * high_water/low_water in kilo bytes for txgbe
+ */
+ max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
+ if ((pfc_conf->fc.high_water > max_high_water) ||
+ (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
+ PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+ PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
+ return -EINVAL;
+ }
+
+ hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[pfc_conf->fc.mode];
+ hw->fc.pause_time = pfc_conf->fc.pause_time;
+ hw->fc.send_xon = pfc_conf->fc.send_xon;
+ hw->fc.low_water[tc_num] = pfc_conf->fc.low_water;
+ hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
+
+ err = txgbe_dcb_pfc_enable(hw, tc_num);
+
+ /* Not negotiated is not an error case */
+ if ((err == 0) || (err == TXGBE_ERR_FC_NOT_NEGOTIATED))
+ return 0;
+
+ PMD_INIT_LOG(ERR, "txgbe_dcb_pfc_enable = 0x%x", err);
+ return -EIO;
+}
+
+int
+txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ uint8_t i, j, mask;
+ uint32_t reta;
+ uint16_t idx, shift;
+ struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!txgbe_rss_update_sp(hw->mac.type)) {
+ PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
+ "NIC.");
+ return -ENOTSUP;
+ }
+
+ if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i += 4) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
+ if (!mask)
+ continue;
+
+ reta = rd32at(hw, TXGBE_REG_RSSTBL, i >> 2);
+ for (j = 0; j < 4; j++) {
+ if (RS8(mask, j, 0x1)) {
+ reta &= ~(MS32(8 * j, 0xFF));
+ reta |= LS32(reta_conf[idx].reta[shift + j],
+ 8 * j, 0xFF);
+ }
+ }
+ wr32at(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
+ }
+ adapter->rss_reta_updated = 1;
+
+ return 0;
+}
+
+int
+txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint8_t i, j, mask;
+ uint32_t reta;
+ uint16_t idx, shift;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i += 4) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
+ if (!mask)
+ continue;
+
+ reta = rd32at(hw, TXGBE_REG_RSSTBL, i >> 2);
+ for (j = 0; j < 4; j++) {
+ if (RS8(mask, j, 0x1))
+ reta_conf[idx].reta[shift + j] =
+ (uint16_t)RS32(reta, 8 * j, 0xFF);
+ }
+ }
+
+ return 0;
+}
+
+static int
+txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
+ uint32_t index, uint32_t pool)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t enable_addr = 1;
+
+ return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
+ pool, enable_addr);
+}
+
+static void
+txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ txgbe_clear_rar(hw, index);
+}
+
+static int
+txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ txgbe_remove_rar(dev, 0);
+ txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
+
+ return 0;
+}
+
+static int
+txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct rte_eth_dev_info dev_info;
+ uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+ struct rte_eth_dev_data *dev_data = dev->data;
+ int ret;
+
+ ret = txgbe_dev_info_get(dev, &dev_info);
+ if (ret != 0)
+ return ret;
+
+ /* check that mtu is within the allowed range */
+ if ((mtu < RTE_ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+ return -EINVAL;
+
+ /* If device is started, refuse mtu that requires the support of
+ * scattered packets when this feature has not been enabled before.
+ */
+ if (dev_data->dev_started && !dev_data->scattered_rx &&
+ (frame_size + 2 * TXGBE_VLAN_TAG_SIZE >
+ dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
+ PMD_INIT_LOG(ERR, "Stop port first.");
+ return -EINVAL;
+ }
+
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ if (hw->mode)
+ wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
+ TXGBE_FRAME_SIZE_MAX);
+ else
+ wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
+ TXGBE_FRMSZ_MAX(dev->data->dev_conf.rxmode.max_rx_pkt_len));
+
+ return 0;
+}
+
+int
+txgbe_vt_check(struct txgbe_hw *hw)
+{
+ uint32_t reg_val;
+
+ /* if Virtualization Technology is enabled */
+ reg_val = rd32(hw, TXGBE_PORTCTL);
+ if (!(reg_val & TXGBE_PORTCTL_NUMVT_MASK)) {
+ PMD_INIT_LOG(ERR, "VT must be enabled for this setting");
+ return -1;
+ }
+
+ return 0;
+}
+
+static uint32_t
+txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
+{
+ uint32_t vector = 0;
+
+ switch (hw->mac.mc_filter_type) {
+ case 0: /* use bits [47:36] of the address */
+ vector = ((uc_addr->addr_bytes[4] >> 4) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 4));
+ break;
+ case 1: /* use bits [46:35] of the address */
+ vector = ((uc_addr->addr_bytes[4] >> 3) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 5));
+ break;
+ case 2: /* use bits [45:34] of the address */
+ vector = ((uc_addr->addr_bytes[4] >> 2) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 6));
+ break;
+ case 3: /* use bits [43:32] of the address */
+ vector = ((uc_addr->addr_bytes[4]) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 8));
+ break;
+ default: /* Invalid mc_filter_type */
+ break;
+ }
+
+ /* vector can only be 12-bits or boundary will be exceeded */
+ vector &= 0xFFF;
+ return vector;
+}
+
+static int
+txgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
+ uint8_t on)
+{
+ uint32_t vector;
+ uint32_t uta_idx;
+ uint32_t reg_val;
+ uint32_t uta_mask;
+ uint32_t psrctl;
+
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
+
+ /* The UTA table only exists on pf hardware */
+ if (hw->mac.type < txgbe_mac_raptor)
+ return -ENOTSUP;
+
+ vector = txgbe_uta_vector(hw, mac_addr);
+ uta_idx = (vector >> 5) & 0x7F;
+ uta_mask = 0x1UL << (vector & 0x1F);
+
+ if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
+ return 0;
+
+ reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
+ if (on) {
+ uta_info->uta_in_use++;
+ reg_val |= uta_mask;
+ uta_info->uta_shadow[uta_idx] |= uta_mask;
+ } else {
+ uta_info->uta_in_use--;
+ reg_val &= ~uta_mask;
+ uta_info->uta_shadow[uta_idx] &= ~uta_mask;
+ }
+
+ wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
+
+ psrctl = rd32(hw, TXGBE_PSRCTL);
+ if (uta_info->uta_in_use > 0) {
+ psrctl |= TXGBE_PSRCTL_UCHFENA;
+ } else {
+ psrctl &= ~TXGBE_PSRCTL_UCHFENA;
+ }
+ psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
+ psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
+ wr32(hw, TXGBE_PSRCTL, psrctl);
+
+ return 0;
+}
+
+static int
+txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
+ uint32_t psrctl;
+ int i;
+
+ /* The UTA table only exists on pf hardware */
+ if (hw->mac.type < txgbe_mac_raptor)
+ return -ENOTSUP;
+
+ if (on) {
+ for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+ uta_info->uta_shadow[i] = ~0;
+ wr32(hw, TXGBE_UCADDRTBL(i), ~0);
+ }
+ } else {
+ for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+ uta_info->uta_shadow[i] = 0;
+ wr32(hw, TXGBE_UCADDRTBL(i), 0);
+ }
+ }
+
+ psrctl = rd32(hw, TXGBE_PSRCTL);
+ if (on) {
+ psrctl |= TXGBE_PSRCTL_UCHFENA;
+ } else {
+ psrctl &= ~TXGBE_PSRCTL_UCHFENA;
+ }
+ psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
+ psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
+ wr32(hw, TXGBE_PSRCTL, psrctl);
+
+ return 0;
+}
+
+uint32_t
+txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
+{
+ uint32_t new_val = orig_val;
+
+ if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+ new_val |= TXGBE_POOLETHCTL_UTA;
+ if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+ new_val |= TXGBE_POOLETHCTL_MCHA;
+ if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+ new_val |= TXGBE_POOLETHCTL_UCHA;
+ if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+ new_val |= TXGBE_POOLETHCTL_BCA;
+ if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+ new_val |= TXGBE_POOLETHCTL_MCP;
+
+ return new_val;
+}
+
+#define TXGBE_INVALID_MIRROR_TYPE(mirror_type) \
+ ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
+ ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
+
+static int
+txgbe_mirror_rule_set(struct rte_eth_dev *dev,
+ struct rte_eth_mirror_conf *mirror_conf,
+ uint8_t rule_id, uint8_t on)
+{
+ uint32_t mr_ctl, vlvf;
+ uint32_t mp_lsb = 0;
+ uint32_t mv_msb = 0;
+ uint32_t mv_lsb = 0;
+ uint32_t mp_msb = 0;
+ uint8_t i = 0;
+ int reg_index = 0;
+ uint64_t vlan_mask = 0;
+
+ const uint8_t pool_mask_offset = 32;
+ const uint8_t vlan_mask_offset = 32;
+ const uint8_t dst_pool_offset = 8;
+ const uint8_t rule_mr_offset = 4;
+ const uint8_t mirror_rule_mask = 0x0F;
+
+ struct txgbe_mirror_info *mr_info = TXGBE_DEV_MR_INFO(dev);
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint8_t mirror_type = 0;
+
+ if (txgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ if (rule_id >= TXGBE_MAX_MIRROR_RULES)
+ return -EINVAL;
+
+ if (TXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
+ PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
+ mirror_conf->rule_type);
+ return -EINVAL;
+ }
+
+ if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
+ mirror_type |= TXGBE_MIRRCTL_VLAN;
+ /* Check if vlan id is valid and find conresponding VLAN ID
+ * index in PSRVLAN
+ */
+ for (i = 0; i < TXGBE_NUM_POOL; i++) {
+ if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
+ /* search vlan id related pool vlan filter
+ * index
+ */
+ reg_index = txgbe_find_vlvf_slot(hw,
+ mirror_conf->vlan.vlan_id[i],
+ false);
+ if (reg_index < 0)
+ return -EINVAL;
+ wr32(hw, TXGBE_PSRVLANIDX, reg_index);
+ vlvf = rd32(hw, TXGBE_PSRVLAN);
+ if ((TXGBE_PSRVLAN_VID(vlvf) ==
+ mirror_conf->vlan.vlan_id[i]))
+ vlan_mask |= (1ULL << reg_index);
+ else
+ return -EINVAL;
+ }
+ }
+
+ if (on) {
+ mv_lsb = vlan_mask & BIT_MASK32;
+ mv_msb = vlan_mask >> vlan_mask_offset;
+
+ mr_info->mr_conf[rule_id].vlan.vlan_mask =
+ mirror_conf->vlan.vlan_mask;
+ for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
+ if (mirror_conf->vlan.vlan_mask & (1ULL << i))
+ mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
+ mirror_conf->vlan.vlan_id[i];
+ }
+ } else {
+ mv_lsb = 0;
+ mv_msb = 0;
+ mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
+ for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
+ mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
+ }
+ }
+
+ if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
+ mirror_type |= TXGBE_MIRRCTL_POOL;
+ if (on) {
+ mp_lsb = mirror_conf->pool_mask & BIT_MASK32;
+ mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
+ mr_info->mr_conf[rule_id].pool_mask =
+ mirror_conf->pool_mask;
+ } else {
+ mp_lsb = 0;
+ mp_msb = 0;
+ mr_info->mr_conf[rule_id].pool_mask = 0;
+ }
+ }
+ if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
+ mirror_type |= TXGBE_MIRRCTL_UPLINK;
+ if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
+ mirror_type |= TXGBE_MIRRCTL_DNLINK;
+
+ /* read mirror control register and recalculate it */
+ mr_ctl = rd32(hw, TXGBE_MIRRCTL(rule_id));
+
+ if (on) {
+ mr_ctl |= mirror_type;
+ mr_ctl &= mirror_rule_mask;
+ mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
+ } else {
+ mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
+ }
+
+ mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
+ mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
+
+ /* write mirrror control register */
+ wr32(hw, TXGBE_MIRRCTL(rule_id), mr_ctl);
+
+ /* write pool mirrror control register */
+ if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
+ wr32(hw, TXGBE_MIRRPOOLL(rule_id), mp_lsb);
+ wr32(hw, TXGBE_MIRRPOOLH(rule_id + rule_mr_offset),
+ mp_msb);
+ }
+ /* write VLAN mirrror control register */
+ if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
+ wr32(hw, TXGBE_MIRRVLANL(rule_id), mv_lsb);
+ wr32(hw, TXGBE_MIRRVLANH(rule_id + rule_mr_offset),
+ mv_msb);
+ }
+
+ return 0;
+}
+
+static int
+txgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
+{
+ int mr_ctl = 0;
+ uint32_t lsb_val = 0;
+ uint32_t msb_val = 0;
+ const uint8_t rule_mr_offset = 4;
+
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_mirror_info *mr_info = TXGBE_DEV_MR_INFO(dev);
+
+ if (txgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ if (rule_id >= TXGBE_MAX_MIRROR_RULES)
+ return -EINVAL;
+
+ memset(&mr_info->mr_conf[rule_id], 0,
+ sizeof(struct rte_eth_mirror_conf));
+
+ /* clear MIRRCTL register */
+ wr32(hw, TXGBE_MIRRCTL(rule_id), mr_ctl);
+
+ /* clear pool mask register */
+ wr32(hw, TXGBE_MIRRPOOLL(rule_id), lsb_val);
+ wr32(hw, TXGBE_MIRRPOOLH(rule_id + rule_mr_offset), msb_val);
+
+ /* clear vlan mask register */
+ wr32(hw, TXGBE_MIRRVLANL(rule_id), lsb_val);
+ wr32(hw, TXGBE_MIRRVLANH(rule_id + rule_mr_offset), msb_val);
+
+ return 0;
+}
+
+static int
+txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t mask;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ if (queue_id < 32) {
+ mask = rd32(hw, TXGBE_IMS(0));
+ mask &= (1 << queue_id);
+ wr32(hw, TXGBE_IMS(0), mask);
+ } else if (queue_id < 64) {
+ mask = rd32(hw, TXGBE_IMS(1));
+ mask &= (1 << (queue_id - 32));
+ wr32(hw, TXGBE_IMS(1), mask);
+ }
+ rte_intr_enable(intr_handle);
+
+ return 0;
+}
+
+static int
+txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ uint32_t mask;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ if (queue_id < 32) {
+ mask = rd32(hw, TXGBE_IMS(0));
+ mask &= ~(1 << queue_id);
+ wr32(hw, TXGBE_IMS(0), mask);
+ } else if (queue_id < 64) {
+ mask = rd32(hw, TXGBE_IMS(1));
+ mask &= ~(1 << (queue_id - 32));
+ wr32(hw, TXGBE_IMS(1), mask);
+ }
+
+ return 0;
+}
+
+/**
+ * set the IVAR registers, mapping interrupt causes to vectors
+ * @param hw
+ * pointer to txgbe_hw struct
+ * @direction
+ * 0 for Rx, 1 for Tx, -1 for other causes
+ * @queue
+ * queue to map the corresponding interrupt to
+ * @msix_vector
+ * the vector to map to the corresponding queue
+ */
+void
+txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
+ uint8_t queue, uint8_t msix_vector)
+{
+ uint32_t tmp, idx;
+
+ if (direction == -1) {
+ /* other causes */
+ msix_vector |= TXGBE_IVARMISC_VLD;
+ idx = 0;
+ tmp = rd32(hw, TXGBE_IVARMISC);
+ tmp &= ~(0xFF << idx);
+ tmp |= (msix_vector << idx);
+ wr32(hw, TXGBE_IVARMISC, tmp);
+ } else {
+ /* rx or tx causes */
+ /* Workround for ICR lost */
+ idx = ((16 * (queue & 1)) + (8 * direction));
+ tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
+ tmp &= ~(0xFF << idx);
+ tmp |= (msix_vector << idx);
+ wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
+ }
+}
+
+/**
+ * Sets up the hardware to properly generate MSI-X interrupts
+ * @hw
+ * board private structure
+ */
+static void
+txgbe_configure_msix(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
+ uint32_t vec = TXGBE_MISC_VEC_ID;
+ uint32_t gpie;
+
+ /* won't configure msix register if no mapping is done
+ * between intr vector and event fd
+ * but if misx has been enabled already, need to configure
+ * auto clean, auto mask and throttling.
+ */
+ gpie = rd32(hw, TXGBE_GPIE);
+ if (!rte_intr_dp_is_en(intr_handle) &&
+ !(gpie & TXGBE_GPIE_MSIX))
+ return;
+
+ if (rte_intr_allow_others(intr_handle))
+ vec = base = TXGBE_RX_VEC_START;
+
+ /* setup GPIE for MSI-x mode */
+ gpie = rd32(hw, TXGBE_GPIE);
+ gpie |= TXGBE_GPIE_MSIX;
+ wr32(hw, TXGBE_GPIE, gpie);
+
+ /* Populate the IVAR table and set the ITR values to the
+ * corresponding register.
+ */
+ if (rte_intr_dp_is_en(intr_handle)) {
+ for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
+ queue_id++) {
+ /* by default, 1:1 mapping */
+ txgbe_set_ivar_map(hw, 0, queue_id, vec);
+ intr_handle->intr_vec[queue_id] = vec;
+ if (vec < base + intr_handle->nb_efd - 1)
+ vec++;
+ }
+
+ txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
+ }
+ wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
+ TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
+ | TXGBE_ITR_WRDSA);
+}
+
+int
+txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
+ uint16_t queue_idx, uint16_t tx_rate)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t bcnrc_val;
+
+ if (queue_idx >= hw->mac.max_tx_queues)
+ return -EINVAL;
+
+ if (tx_rate != 0) {
+ bcnrc_val = TXGBE_ARBTXRATE_MAX(tx_rate);
+ bcnrc_val |= TXGBE_ARBTXRATE_MIN(tx_rate / 2);
+ } else {
+ bcnrc_val = 0;
+ }
+
+ /*
+ * Set global transmit compensation time to the MMW_SIZE in ARBTXMMW
+ * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
+ */
+ wr32(hw, TXGBE_ARBTXMMW, 0x14);
+
+ /* Set ARBTXRATE of queue X */
+ wr32(hw, TXGBE_ARBPOOLIDX, queue_idx);
+ wr32(hw, TXGBE_ARBTXRATE, bcnrc_val);
+ txgbe_flush(hw);
+
+ return 0;
+}
+
+int
+txgbe_syn_filter_set(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter,
+ bool add)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ uint32_t syn_info;
+ uint32_t synqf;
+
+ if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
+ return -EINVAL;
+
+ syn_info = filter_info->syn_info;
+
+ if (add) {
+ if (syn_info & TXGBE_SYNCLS_ENA)
+ return -EINVAL;
+ synqf = (uint32_t)TXGBE_SYNCLS_QPID(filter->queue);
+ synqf |= TXGBE_SYNCLS_ENA;
+
+ if (filter->hig_pri)
+ synqf |= TXGBE_SYNCLS_HIPRIO;
+ else
+ synqf &= ~TXGBE_SYNCLS_HIPRIO;
+ } else {
+ synqf = rd32(hw, TXGBE_SYNCLS);
+ if (!(syn_info & TXGBE_SYNCLS_ENA))
+ return -ENOENT;
+ synqf &= ~(TXGBE_SYNCLS_QPID_MASK | TXGBE_SYNCLS_ENA);
+ }
+
+ filter_info->syn_info = synqf;
+ wr32(hw, TXGBE_SYNCLS, synqf);
+ txgbe_flush(hw);
+ return 0;
+}
+
+static int
+txgbe_syn_filter_get(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t synqf = rd32(hw, TXGBE_SYNCLS);
+
+ if (synqf & TXGBE_SYNCLS_ENA) {
+ filter->hig_pri = (synqf & TXGBE_SYNCLS_HIPRIO) ? 1 : 0;
+ filter->queue = (uint16_t)TXGBD_SYNCLS_QPID(synqf);
+ return 0;
+ }
+ return -ENOENT;
+}
+
+static int
+txgbe_syn_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret;
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
+ filter_op);
+ return -EINVAL;
+ }
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = txgbe_syn_filter_set(dev,
+ (struct rte_eth_syn_filter *)arg,
+ TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = txgbe_syn_filter_set(dev,
+ (struct rte_eth_syn_filter *)arg,
+ FALSE);
+ break;
+ case RTE_ETH_FILTER_GET:
+ ret = txgbe_syn_filter_get(dev,
+ (struct rte_eth_syn_filter *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+
+static inline enum txgbe_5tuple_protocol
+convert_protocol_type(uint8_t protocol_value)
+{
+ if (protocol_value == IPPROTO_TCP)
+ return TXGBE_5TF_PROT_TCP;
+ else if (protocol_value == IPPROTO_UDP)
+ return TXGBE_5TF_PROT_UDP;
+ else if (protocol_value == IPPROTO_SCTP)
+ return TXGBE_5TF_PROT_SCTP;
+ else
+ return TXGBE_5TF_PROT_NONE;
+}
+
+/* inject a 5-tuple filter to HW */
+static inline void
+txgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
+ struct txgbe_5tuple_filter *filter)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int i;
+ uint32_t ftqf, sdpqf;
+ uint32_t l34timir = 0;
+ uint32_t mask = TXGBE_5TFCTL0_MASK;
+
+ i = filter->index;
+ sdpqf = TXGBE_5TFPORT_DST(be_to_le16(filter->filter_info.dst_port));
+ sdpqf |= TXGBE_5TFPORT_SRC(be_to_le16(filter->filter_info.src_port));
+
+ ftqf = TXGBE_5TFCTL0_PROTO(filter->filter_info.proto);
+ ftqf |= TXGBE_5TFCTL0_PRI(filter->filter_info.priority);
+ if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
+ mask &= ~TXGBE_5TFCTL0_MSADDR;
+ if (filter->filter_info.dst_ip_mask == 0)
+ mask &= ~TXGBE_5TFCTL0_MDADDR;
+ if (filter->filter_info.src_port_mask == 0)
+ mask &= ~TXGBE_5TFCTL0_MSPORT;
+ if (filter->filter_info.dst_port_mask == 0)
+ mask &= ~TXGBE_5TFCTL0_MDPORT;
+ if (filter->filter_info.proto_mask == 0)
+ mask &= ~TXGBE_5TFCTL0_MPROTO;
+ ftqf |= mask;
+ ftqf |= TXGBE_5TFCTL0_MPOOL;
+ ftqf |= TXGBE_5TFCTL0_ENA;
+
+ wr32(hw, TXGBE_5TFDADDR(i), be_to_le32(filter->filter_info.dst_ip));
+ wr32(hw, TXGBE_5TFSADDR(i), be_to_le32(filter->filter_info.src_ip));
+ wr32(hw, TXGBE_5TFPORT(i), sdpqf);
+ wr32(hw, TXGBE_5TFCTL0(i), ftqf);
+
+ l34timir |= TXGBE_5TFCTL1_QP(filter->queue);
+ wr32(hw, TXGBE_5TFCTL1(i), l34timir);
+}
+
+/*
+ * add a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+txgbe_add_5tuple_filter(struct rte_eth_dev *dev,
+ struct txgbe_5tuple_filter *filter)
+{
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ int i, idx, shift;
+
+ /*
+ * look for an unused 5tuple filter index,
+ * and insert the filter to list.
+ */
+ for (i = 0; i < TXGBE_MAX_FTQF_FILTERS; i++) {
+ idx = i / (sizeof(uint32_t) * NBBY);
+ shift = i % (sizeof(uint32_t) * NBBY);
+ if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
+ filter_info->fivetuple_mask[idx] |= 1 << shift;
+ filter->index = i;
+ TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
+ filter,
+ entries);
+ break;
+ }
+ }
+ if (i >= TXGBE_MAX_FTQF_FILTERS) {
+ PMD_DRV_LOG(ERR, "5tuple filters are full.");
+ return -ENOSYS;
+ }
+
+ txgbe_inject_5tuple_filter(dev, filter);
+
+ return 0;
+}
+
+/*
+ * remove a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * filter: the pointer of the filter will be removed.
+ */
+static void
+txgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
+ struct txgbe_5tuple_filter *filter)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ uint16_t index = filter->index;
+
+ filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
+ ~(1 << (index % (sizeof(uint32_t) * NBBY)));
+ TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
+ rte_free(filter);
+
+ wr32(hw, TXGBE_5TFDADDR(index), 0);
+ wr32(hw, TXGBE_5TFSADDR(index), 0);
+ wr32(hw, TXGBE_5TFPORT(index), 0);
+ wr32(hw, TXGBE_5TFCTL0(index), 0);
+ wr32(hw, TXGBE_5TFCTL1(index), 0);
+}
+
+static inline struct txgbe_5tuple_filter *
+txgbe_5tuple_filter_lookup(struct txgbe_5tuple_filter_list *filter_list,
+ struct txgbe_5tuple_filter_info *key)
+{
+ struct txgbe_5tuple_filter *it;
+
+ TAILQ_FOREACH(it, filter_list, entries) {
+ if (memcmp(key, &it->filter_info,
+ sizeof(struct txgbe_5tuple_filter_info)) == 0) {
+ return it;
+ }
+ }
+ return NULL;
+}
+
+/* translate elements in struct rte_eth_ntuple_filter to struct txgbe_5tuple_filter_info*/
+static inline int
+ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
+ struct txgbe_5tuple_filter_info *filter_info)
+{
+ if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM ||
+ filter->priority > TXGBE_5TUPLE_MAX_PRI ||
+ filter->priority < TXGBE_5TUPLE_MIN_PRI)
+ return -EINVAL;
+
+ switch (filter->dst_ip_mask) {
+ case UINT32_MAX:
+ filter_info->dst_ip_mask = 0;
+ filter_info->dst_ip = filter->dst_ip;
+ break;
+ case 0:
+ filter_info->dst_ip_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->src_ip_mask) {
+ case UINT32_MAX:
+ filter_info->src_ip_mask = 0;
+ filter_info->src_ip = filter->src_ip;
+ break;
+ case 0:
+ filter_info->src_ip_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid src_ip mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->dst_port_mask) {
+ case UINT16_MAX:
+ filter_info->dst_port_mask = 0;
+ filter_info->dst_port = filter->dst_port;
+ break;
+ case 0:
+ filter_info->dst_port_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid dst_port mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->src_port_mask) {
+ case UINT16_MAX:
+ filter_info->src_port_mask = 0;
+ filter_info->src_port = filter->src_port;
+ break;
+ case 0:
+ filter_info->src_port_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid src_port mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->proto_mask) {
+ case UINT8_MAX:
+ filter_info->proto_mask = 0;
+ filter_info->proto =
+ convert_protocol_type(filter->proto);
+ break;
+ case 0:
+ filter_info->proto_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid protocol mask.");
+ return -EINVAL;
+ }
+
+ filter_info->priority = (uint8_t)filter->priority;
+ return 0;
+}
+
+/*
+ * add or delete a ntuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
+ * add: if true, add filter, if false, remove filter
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int
+txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter,
+ bool add)
+{
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ struct txgbe_5tuple_filter_info filter_5tuple;
+ struct txgbe_5tuple_filter *filter;
+ int ret;
+
+ if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
+ PMD_DRV_LOG(ERR, "only 5tuple is supported.");
+ return -EINVAL;
+ }
+
+ memset(&filter_5tuple, 0, sizeof(struct txgbe_5tuple_filter_info));
+ ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
+ if (ret < 0)
+ return ret;
+
+ filter = txgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
+ &filter_5tuple);
+ if (filter != NULL && add) {
+ PMD_DRV_LOG(ERR, "filter exists.");
+ return -EEXIST;
+ }
+ if (filter == NULL && !add) {
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
+ return -ENOENT;
+ }
+
+ if (add) {
+ filter = rte_zmalloc("txgbe_5tuple_filter",
+ sizeof(struct txgbe_5tuple_filter), 0);
+ if (filter == NULL)
+ return -ENOMEM;
+ rte_memcpy(&filter->filter_info,
+ &filter_5tuple,
+ sizeof(struct txgbe_5tuple_filter_info));
+ filter->queue = ntuple_filter->queue;
+ ret = txgbe_add_5tuple_filter(dev, filter);
+ if (ret < 0) {
+ rte_free(filter);
+ return ret;
+ }
+ } else {
+ txgbe_remove_5tuple_filter(dev, filter);
+ }
+
+ return 0;
+}
+
+/*
+ * get a ntuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+txgbe_get_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter)
+{
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ struct txgbe_5tuple_filter_info filter_5tuple;
+ struct txgbe_5tuple_filter *filter;
+ int ret;
+
+ if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
+ PMD_DRV_LOG(ERR, "only 5tuple is supported.");
+ return -EINVAL;
+ }
+
+ memset(&filter_5tuple, 0, sizeof(struct txgbe_5tuple_filter_info));
+ ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
+ if (ret < 0)
+ return ret;
+
+ filter = txgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
+ &filter_5tuple);
+ if (filter == NULL) {
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
+ return -ENOENT;
+ }
+ ntuple_filter->queue = filter->queue;
+ return 0;
+}
+
+/*
+ * txgbe_ntuple_filter_handle - Handle operations for ntuple filter.
+ * @dev: pointer to rte_eth_dev structure
+ * @filter_op:operation will be taken.
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+txgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret;
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
+ filter_op);
+ return -EINVAL;
+ }
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = txgbe_add_del_ntuple_filter(dev,
+ (struct rte_eth_ntuple_filter *)arg,
+ TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = txgbe_add_del_ntuple_filter(dev,
+ (struct rte_eth_ntuple_filter *)arg,
+ FALSE);
+ break;
+ case RTE_ETH_FILTER_GET:
+ ret = txgbe_get_ntuple_filter(dev,
+ (struct rte_eth_ntuple_filter *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+int
+txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter,
+ bool add)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ uint32_t etqf = 0;
+ uint32_t etqs = 0;
+ int ret;
+ struct txgbe_ethertype_filter ethertype_filter;
+
+ if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
+ return -EINVAL;
+
+ if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
+ filter->ether_type == RTE_ETHER_TYPE_IPV6) {
+ PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
+ " ethertype filter.", filter->ether_type);
+ return -EINVAL;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ PMD_DRV_LOG(ERR, "mac compare is unsupported.");
+ return -EINVAL;
+ }
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ PMD_DRV_LOG(ERR, "drop option is unsupported.");
+ return -EINVAL;
+ }
+
+ ret = txgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
+ if (ret >= 0 && add) {
+ PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
+ filter->ether_type);
+ return -EEXIST;
+ }
+ if (ret < 0 && !add) {
+ PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
+ filter->ether_type);
+ return -ENOENT;
+ }
+
+ if (add) {
+ etqf = TXGBE_ETFLT_ENA;
+ etqf |= TXGBE_ETFLT_ETID(filter->ether_type);
+ etqs |= TXGBE_ETCLS_QPID(filter->queue);
+ etqs |= TXGBE_ETCLS_QENA;
+
+ ethertype_filter.ethertype = filter->ether_type;
+ ethertype_filter.etqf = etqf;
+ ethertype_filter.etqs = etqs;
+ ethertype_filter.conf = FALSE;
+ ret = txgbe_ethertype_filter_insert(filter_info,
+ ðertype_filter);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "ethertype filters are full.");
+ return -ENOSPC;
+ }
+ } else {
+ ret = txgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
+ if (ret < 0)
+ return -ENOSYS;
+ }
+ wr32(hw, TXGBE_ETFLT(ret), etqf);
+ wr32(hw, TXGBE_ETCLS(ret), etqs);
+ txgbe_flush(hw);
+
+ return 0;
+}
+
+static int
+txgbe_get_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ uint32_t etqf, etqs;
+ int ret;
+
+ ret = txgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
+ filter->ether_type);
+ return -ENOENT;
+ }
+
+ etqf = rd32(hw, TXGBE_ETFLT(ret));
+ if (etqf & TXGBE_ETFLT_ENA) {
+ etqs = rd32(hw, TXGBE_ETCLS(ret));
+ filter->ether_type = etqf & TXGBE_ETFLT_ETID_MASK;
+ filter->flags = 0;
+ filter->queue = TXGBD_ETCLS_QPID(etqs);
+ return 0;
+ }
+ return -ENOENT;
+}
+
+/*
+ * txgbe_ethertype_filter_handle - Handle operations for ethertype filter.
+ * @dev: pointer to rte_eth_dev structure
+ * @filter_op:operation will be taken.
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ */
+static int
+txgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret;
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
+ filter_op);
+ return -EINVAL;
+ }
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = txgbe_add_del_ethertype_filter(dev,
+ (struct rte_eth_ethertype_filter *)arg,
+ TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = txgbe_add_del_ethertype_filter(dev,
+ (struct rte_eth_ethertype_filter *)arg,
+ FALSE);
+ break;
+ case RTE_ETH_FILTER_GET:
+ ret = txgbe_get_ethertype_filter(dev,
+ (struct rte_eth_ethertype_filter *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int
+txgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret = 0;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NTUPLE:
+ ret = txgbe_ntuple_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = txgbe_ethertype_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_SYN:
+ ret = txgbe_syn_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = txgbe_fdir_ctrl_func(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_L2_TUNNEL:
+ ret = txgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &txgbe_flow_ops;
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static u8 *
+txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
+ u8 **mc_addr_ptr, u32 *vmdq)
+{
+ u8 *mc_addr;
+
+ *vmdq = 0;
+ mc_addr = *mc_addr_ptr;
+ *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
+ return mc_addr;
+}
+
+int
+txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct txgbe_hw *hw;
+ u8 *mc_addr_list;
+
+ hw = TXGBE_DEV_HW(dev);
+ mc_addr_list = (u8 *)mc_addr_set;
+ return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
+ txgbe_dev_addr_list_itr, TRUE);
+}
+
+static uint64_t
+txgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint64_t systime_cycles;
+
+ systime_cycles = (uint64_t)rd32(hw, TXGBE_TSTIMEL);
+ systime_cycles |= (uint64_t)rd32(hw, TXGBE_TSTIMEH) << 32;
+
+ return systime_cycles;
+}
+
+static uint64_t
+txgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint64_t rx_tstamp_cycles;
+
+ /* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
+ rx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSRXSTMPL);
+ rx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSRXSTMPH) << 32;
+
+ return rx_tstamp_cycles;
+}
+
+static uint64_t
+txgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint64_t tx_tstamp_cycles;
+
+ /* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
+ tx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSTXSTMPL);
+ tx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSTXSTMPH) << 32;
+
+ return tx_tstamp_cycles;
+}
+
+static void
+txgbe_start_timecounters(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+ struct rte_eth_link link;
+ uint32_t incval = 0;
+ uint32_t shift = 0;
+
+ /* Get current link speed. */
+ txgbe_dev_link_update(dev, 1);
+ rte_eth_linkstatus_get(dev, &link);
+
+ switch (link.link_speed) {
+ case ETH_SPEED_NUM_100M:
+ incval = TXGBE_INCVAL_100;
+ shift = TXGBE_INCVAL_SHIFT_100;
+ break;
+ case ETH_SPEED_NUM_1G:
+ incval = TXGBE_INCVAL_1GB;
+ shift = TXGBE_INCVAL_SHIFT_1GB;
+ break;
+ case ETH_SPEED_NUM_10G:
+ default:
+ incval = TXGBE_INCVAL_10GB;
+ shift = TXGBE_INCVAL_SHIFT_10GB;
+ break;
+ }
+
+ wr32(hw, TXGBE_TSTIMEINC, TXGBE_TSTIMEINC_VP(incval, 2));
+
+ memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
+ memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+ memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+ adapter->systime_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
+ adapter->systime_tc.cc_shift = shift;
+ adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
+
+ adapter->rx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
+ adapter->rx_tstamp_tc.cc_shift = shift;
+ adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+
+ adapter->tx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
+ adapter->tx_tstamp_tc.cc_shift = shift;
+ adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+}
+
+static int
+txgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+ struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+
+ adapter->systime_tc.nsec += delta;
+ adapter->rx_tstamp_tc.nsec += delta;
+ adapter->tx_tstamp_tc.nsec += delta;
+
+ return 0;
+}
+
+static int
+txgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+ uint64_t ns;
+ struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+
+ ns = rte_timespec_to_ns(ts);
+ /* Set the timecounters to a new value. */
+ adapter->systime_tc.nsec = ns;
+ adapter->rx_tstamp_tc.nsec = ns;
+ adapter->tx_tstamp_tc.nsec = ns;
+
+ return 0;
+}
+
+static int
+txgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+ uint64_t ns, systime_cycles;
+ struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+
+ systime_cycles = txgbe_read_systime_cyclecounter(dev);
+ ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
+ *ts = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+txgbe_timesync_enable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t tsync_ctl;
+
+ /* Stop the timesync system time. */
+ wr32(hw, TXGBE_TSTIMEINC, 0x0);
+ /* Reset the timesync system time value. */
+ wr32(hw, TXGBE_TSTIMEL, 0x0);
+ wr32(hw, TXGBE_TSTIMEH, 0x0);
+
+ txgbe_start_timecounters(dev);
+
+ /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
+ wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588),
+ RTE_ETHER_TYPE_1588 | TXGBE_ETFLT_ENA | TXGBE_ETFLT_1588);
+
+ /* Enable timestamping of received PTP packets. */
+ tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
+ tsync_ctl |= TXGBE_TSRXCTL_ENA;
+ wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
+
+ /* Enable timestamping of transmitted PTP packets. */
+ tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
+ tsync_ctl |= TXGBE_TSTXCTL_ENA;
+ wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
+
+ txgbe_flush(hw);
+
+ return 0;
+}
+
+static int
+txgbe_timesync_disable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t tsync_ctl;
+
+ /* Disable timestamping of transmitted PTP packets. */
+ tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
+ tsync_ctl &= ~TXGBE_TSTXCTL_ENA;
+ wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
+
+ /* Disable timestamping of received PTP packets. */
+ tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
+ tsync_ctl &= ~TXGBE_TSRXCTL_ENA;
+ wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
+
+ /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
+ wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0);
+
+ /* Stop incrementating the System Time registers. */
+ wr32(hw, TXGBE_TSTIMEINC, 0);
+
+ return 0;
+}
+
+static int
+txgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags __rte_unused)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+ uint32_t tsync_rxctl;
+ uint64_t rx_tstamp_cycles;
+ uint64_t ns;
+
+ tsync_rxctl = rd32(hw, TXGBE_TSRXCTL);
+ if ((tsync_rxctl & TXGBE_TSRXCTL_VLD) == 0)
+ return -EINVAL;
+
+ rx_tstamp_cycles = txgbe_read_rx_tstamp_cyclecounter(dev);
+ ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+txgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+ uint32_t tsync_txctl;
+ uint64_t tx_tstamp_cycles;
+ uint64_t ns;
+
+ tsync_txctl = rd32(hw, TXGBE_TSTXCTL);
+ if ((tsync_txctl & TXGBE_TSTXCTL_VLD) == 0)
+ return -EINVAL;
+
+ tx_tstamp_cycles = txgbe_read_tx_tstamp_cyclecounter(dev);
+ ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+txgbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
+{
+ int count = 0;
+ int g_ind = 0;
+ const struct reg_info *reg_group;
+ const struct reg_info **reg_set = txgbe_regs_others;
+
+ while ((reg_group = reg_set[g_ind++]))
+ count += txgbe_regs_group_count(reg_group);
+
+ return count;
+}
+
+static int
+txgbe_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t *data = regs->data;
+ int g_ind = 0;
+ int count = 0;
+ const struct reg_info *reg_group;
+ const struct reg_info **reg_set = txgbe_regs_others;
+
+ if (data == NULL) {
+ regs->length = txgbe_get_reg_length(dev);
+ regs->width = sizeof(uint32_t);
+ return 0;
+ }
+
+ /* Support only full register dump */
+ if ((regs->length == 0) ||
+ (regs->length == (uint32_t)txgbe_get_reg_length(dev))) {
+ regs->version = hw->mac.type << 24 |
+ hw->revision_id << 16 |
+ hw->device_id;
+ while ((reg_group = reg_set[g_ind++]))
+ count += txgbe_read_regs_group(dev, &data[count],
+ reg_group);
+ return 0;
+ }
+
+ return -ENOTSUP;
+}
+
+static int
+txgbe_get_eeprom_length(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ /* Return unit is byte count */
+ return hw->rom.word_size * 2;
+}
+
+static int
+txgbe_get_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *in_eeprom)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_rom_info *eeprom = &hw->rom;
+ uint16_t *data = in_eeprom->data;
+ int first, length;
+
+ first = in_eeprom->offset >> 1;
+ length = in_eeprom->length >> 1;
+ if ((first > hw->rom.word_size) ||
+ ((first + length) > hw->rom.word_size))
+ return -EINVAL;
+
+ in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ return eeprom->readw_buffer(hw, first, length, data);
+}
+
+static int
+txgbe_set_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *in_eeprom)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_rom_info *eeprom = &hw->rom;
+ uint16_t *data = in_eeprom->data;
+ int first, length;
+
+ first = in_eeprom->offset >> 1;
+ length = in_eeprom->length >> 1;
+ if ((first > hw->rom.word_size) ||
+ ((first + length) > hw->rom.word_size))
+ return -EINVAL;
+
+ in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ return eeprom->writew_buffer(hw, first, length, data);
+}
+
+static int
+txgbe_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t status;
+ uint8_t sff8472_rev, addr_mode;
+ bool page_swap = false;
+
+ /* Check whether we support SFF-8472 or not */
+ status = hw->phy.read_i2c_eeprom(hw,
+ TXGBE_SFF_SFF_8472_COMP,
+ &sff8472_rev);
+ if (status != 0)
+ return -EIO;
+
+ /* addressing mode is not supported */
+ status = hw->phy.read_i2c_eeprom(hw,
+ TXGBE_SFF_SFF_8472_SWAP,
+ &addr_mode);
+ if (status != 0)
+ return -EIO;
+
+ if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) {
+ PMD_DRV_LOG(ERR,
+ "Address change required to access page 0xA2, "
+ "but not supported. Please report the module "
+ "type to the driver maintainers.");
+ page_swap = true;
+ }
+
+ if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap) {
+ /* We have a SFP, but it does not support SFF-8472 */
+ modinfo->type = RTE_ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
+ } else {
+ /* We have a SFP which supports a revision of SFF-8472. */
+ modinfo->type = RTE_ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
+ }
+
+ return 0;
+}
+
+static int
+txgbe_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t status = TXGBE_ERR_PHY_ADDR_INVALID;
+ uint8_t databyte = 0xFF;
+ uint8_t *data = info->data;
+ uint32_t i = 0;
+
+ if (info->length == 0)
+ return -EINVAL;
+
+ for (i = info->offset; i < info->offset + info->length; i++) {
+ if (i < RTE_ETH_MODULE_SFF_8079_LEN)
+ status = hw->phy.read_i2c_eeprom(hw, i, &databyte);
+ else
+ status = hw->phy.read_i2c_sff8472(hw, i, &databyte);
+
+ if (status != 0)
+ return -EIO;
+
+ data[i - info->offset] = databyte;
+ }
+
+ return 0;
+}
+
+bool
+txgbe_rss_update_sp(enum txgbe_mac_type mac_type)
+{
+ switch (mac_type) {
+ case txgbe_mac_raptor:
+ case txgbe_mac_raptor_vf:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int
+txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
+ struct rte_eth_dcb_info *dcb_info)
+{
+ struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
+ struct txgbe_dcb_tc_config *tc;
+ struct rte_eth_dcb_tc_queue_mapping *tc_queue;
+ uint8_t nb_tcs;
+ uint8_t i, j;
+
+ if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+ dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
+ else
+ dcb_info->nb_tcs = 1;
+
+ tc_queue = &dcb_info->tc_queue;
+ nb_tcs = dcb_info->nb_tcs;
+
+ if (dcb_config->vt_mode) { /* vt is enabled */
+ struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
+ if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
+ for (j = 0; j < nb_tcs; j++) {
+ tc_queue->tc_rxq[0][j].base = j;
+ tc_queue->tc_rxq[0][j].nb_queue = 1;
+ tc_queue->tc_txq[0][j].base = j;
+ tc_queue->tc_txq[0][j].nb_queue = 1;
+ }
+ } else {
+ for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
+ for (j = 0; j < nb_tcs; j++) {
+ tc_queue->tc_rxq[i][j].base =
+ i * nb_tcs + j;
+ tc_queue->tc_rxq[i][j].nb_queue = 1;
+ tc_queue->tc_txq[i][j].base =
+ i * nb_tcs + j;
+ tc_queue->tc_txq[i][j].nb_queue = 1;
+ }
+ }
+ }
+ } else { /* vt is disabled */
+ struct rte_eth_dcb_rx_conf *rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
+ if (dcb_info->nb_tcs == ETH_4_TCS) {
+ for (i = 0; i < dcb_info->nb_tcs; i++) {
+ dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
+ dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+ }
+ dcb_info->tc_queue.tc_txq[0][0].base = 0;
+ dcb_info->tc_queue.tc_txq[0][1].base = 64;
+ dcb_info->tc_queue.tc_txq[0][2].base = 96;
+ dcb_info->tc_queue.tc_txq[0][3].base = 112;
+ dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
+ dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+ dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+ dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+ } else if (dcb_info->nb_tcs == ETH_8_TCS) {
+ for (i = 0; i < dcb_info->nb_tcs; i++) {
+ dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
+ dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+ }
+ dcb_info->tc_queue.tc_txq[0][0].base = 0;
+ dcb_info->tc_queue.tc_txq[0][1].base = 32;
+ dcb_info->tc_queue.tc_txq[0][2].base = 64;
+ dcb_info->tc_queue.tc_txq[0][3].base = 80;
+ dcb_info->tc_queue.tc_txq[0][4].base = 96;
+ dcb_info->tc_queue.tc_txq[0][5].base = 104;
+ dcb_info->tc_queue.tc_txq[0][6].base = 112;
+ dcb_info->tc_queue.tc_txq[0][7].base = 120;
+ dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
+ dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+ dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+ dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+ dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
+ dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
+ dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
+ dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
+ }
+ }
+ for (i = 0; i < dcb_info->nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ dcb_info->tc_bws[i] = tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent;
+ }
+ return 0;
+}
+
+/* Update e-tag ether type */
+static int
+txgbe_update_e_tag_eth_type(struct txgbe_hw *hw,
+ uint16_t ether_type)
+{
+ uint32_t etag_etype;
+
+ etag_etype = rd32(hw, TXGBE_EXTAG);
+ etag_etype &= ~TXGBE_EXTAG_ETAG_MASK;
+ etag_etype |= ether_type;
+ wr32(hw, TXGBE_EXTAG, etag_etype);
+ txgbe_flush(hw);
+
+ return 0;
+}
+
+/* Config l2 tunnel ether type */
+static int
+txgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+ int ret = 0;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+
+ if (l2_tunnel == NULL)
+ return -EINVAL;
+
+ switch (l2_tunnel->l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_ether_type = l2_tunnel->ether_type;
+ ret = txgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Enable e-tag tunnel */
+static int
+txgbe_e_tag_enable(struct txgbe_hw *hw)
+{
+ uint32_t etag_etype;
+
+ etag_etype = rd32(hw, TXGBE_PORTCTL);
+ etag_etype |= TXGBE_PORTCTL_ETAG;
+ wr32(hw, TXGBE_PORTCTL, etag_etype);
+ txgbe_flush(hw);
+
+ return 0;
+}
+
+/* Enable l2 tunnel */
+static int
+txgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev,
+ enum rte_eth_tunnel_type l2_tunnel_type)
+{
+ int ret = 0;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+
+ switch (l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_en = TRUE;
+ ret = txgbe_e_tag_enable(hw);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Disable e-tag tunnel */
+static int
+txgbe_e_tag_disable(struct txgbe_hw *hw)
+{
+ uint32_t etag_etype;
+
+ etag_etype = rd32(hw, TXGBE_PORTCTL);
+ etag_etype &= ~TXGBE_PORTCTL_ETAG;
+ wr32(hw, TXGBE_PORTCTL, etag_etype);
+ txgbe_flush(hw);
+
+ return 0;
+}
+
+/* Disable l2 tunnel */
+static int
+txgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev,
+ enum rte_eth_tunnel_type l2_tunnel_type)
+{
+ int ret = 0;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+
+ switch (l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_en = FALSE;
+ ret = txgbe_e_tag_disable(hw);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+txgbe_e_tag_filter_del(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+ int ret = 0;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t i, rar_entries;
+ uint32_t rar_low, rar_high;
+
+ rar_entries = hw->mac.num_rar_entries;
+
+ for (i = 1; i < rar_entries; i++) {
+ wr32(hw, TXGBE_ETHADDRIDX, i);
+ rar_high = rd32(hw, TXGBE_ETHADDRH);
+ rar_low = rd32(hw, TXGBE_ETHADDRL);
+ if ((rar_high & TXGBE_ETHADDRH_VLD) &&
+ (rar_high & TXGBE_ETHADDRH_ETAG) &&
+ (TXGBE_ETHADDRL_ETAG(rar_low) ==
+ l2_tunnel->tunnel_id)) {
+ wr32(hw, TXGBE_ETHADDRL, 0);
+ wr32(hw, TXGBE_ETHADDRH, 0);
+
+ txgbe_clear_vmdq(hw, i, BIT_MASK32);
+
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int
+txgbe_e_tag_filter_add(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+ int ret = 0;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t i, rar_entries;
+ uint32_t rar_low, rar_high;
+
+ /* One entry for one tunnel. Try to remove potential existing entry. */
+ txgbe_e_tag_filter_del(dev, l2_tunnel);
+
+ rar_entries = hw->mac.num_rar_entries;
+
+ for (i = 1; i < rar_entries; i++) {
+ wr32(hw, TXGBE_ETHADDRIDX, i);
+ rar_high = rd32(hw, TXGBE_ETHADDRH);
+ if (rar_high & TXGBE_ETHADDRH_VLD) {
+ continue;
+ } else {
+ txgbe_set_vmdq(hw, i, l2_tunnel->pool);
+ rar_high = TXGBE_ETHADDRH_VLD | TXGBE_ETHADDRH_ETAG;
+ rar_low = l2_tunnel->tunnel_id;
+
+ wr32(hw, TXGBE_ETHADDRL, rar_low);
+ wr32(hw, TXGBE_ETHADDRH, rar_high);
+
+ return ret;
+ }
+ }
+
+ PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
+ " Please remove a rule before adding a new one.");
+ return -EINVAL;
+}
+
+static inline struct txgbe_l2_tn_filter *
+txgbe_l2_tn_filter_lookup(struct txgbe_l2_tn_info *l2_tn_info,
+ struct txgbe_l2_tn_key *key)
+{
+ int ret;
+
+ ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
+ if (ret < 0)
+ return NULL;
+
+ return l2_tn_info->hash_map[ret];
+}
+
+static inline int
+txgbe_insert_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
+ struct txgbe_l2_tn_filter *l2_tn_filter)
+{
+ int ret;
+
+ ret = rte_hash_add_key(l2_tn_info->hash_handle,
+ &l2_tn_filter->key);
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert L2 tunnel filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+
+ l2_tn_info->hash_map[ret] = l2_tn_filter;
+
+ TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
+
+ return 0;
+}
+
+static inline int
+txgbe_remove_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
+ struct txgbe_l2_tn_key *key)
+{
+ int ret;
+ struct txgbe_l2_tn_filter *l2_tn_filter;
+
+ ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "No such L2 tunnel filter to delete %d!",
+ ret);
+ return ret;
+ }
+
+ l2_tn_filter = l2_tn_info->hash_map[ret];
+ l2_tn_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
+ rte_free(l2_tn_filter);
+
+ return 0;
+}
+
+/* Add l2 tunnel filter */
+int
+txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel,
+ bool restore)
+{
+ int ret;
+ struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+ struct txgbe_l2_tn_key key;
+ struct txgbe_l2_tn_filter *node;
+
+ if (!restore) {
+ key.l2_tn_type = l2_tunnel->l2_tunnel_type;
+ key.tn_id = l2_tunnel->tunnel_id;
+
+ node = txgbe_l2_tn_filter_lookup(l2_tn_info, &key);
+
+ if (node) {
+ PMD_DRV_LOG(ERR,
+ "The L2 tunnel filter already exists!");
+ return -EINVAL;
+ }
+
+ node = rte_zmalloc("txgbe_l2_tn",
+ sizeof(struct txgbe_l2_tn_filter),
+ 0);
+ if (!node)
+ return -ENOMEM;
+
+ rte_memcpy(&node->key,
+ &key,
+ sizeof(struct txgbe_l2_tn_key));
+ node->pool = l2_tunnel->pool;
+ ret = txgbe_insert_l2_tn_filter(l2_tn_info, node);
+ if (ret < 0) {
+ rte_free(node);
+ return ret;
+ }
+ }
+
+ switch (l2_tunnel->l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ ret = txgbe_e_tag_filter_add(dev, l2_tunnel);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ if ((!restore) && (ret < 0))
+ (void)txgbe_remove_l2_tn_filter(l2_tn_info, &key);
+
+ return ret;
+}
+
+/* Delete l2 tunnel filter */
+int
+txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+ int ret;
+ struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+ struct txgbe_l2_tn_key key;
+
+ key.l2_tn_type = l2_tunnel->l2_tunnel_type;
+ key.tn_id = l2_tunnel->tunnel_id;
+ ret = txgbe_remove_l2_tn_filter(l2_tn_info, &key);
+ if (ret < 0)
+ return ret;
+
+ switch (l2_tunnel->l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ ret = txgbe_e_tag_filter_del(dev, l2_tunnel);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * txgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter.
+ * @dev: pointer to rte_eth_dev structure
+ * @filter_op:operation will be taken.
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ */
+static int
+txgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret;
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
+ filter_op);
+ return -EINVAL;
+ }
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = txgbe_dev_l2_tunnel_filter_add
+ (dev,
+ (struct rte_eth_l2_tunnel_conf *)arg,
+ FALSE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = txgbe_dev_l2_tunnel_filter_del
+ (dev,
+ (struct rte_eth_l2_tunnel_conf *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int
+txgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
+{
+ int ret = 0;
+ uint32_t ctrl;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ ctrl = rd32(hw, TXGBE_POOLCTL);
+ ctrl &= ~TXGBE_POOLCTL_MODE_MASK;
+ if (en)
+ ctrl |= TXGBE_PSRPOOL_MODE_ETAG;
+ wr32(hw, TXGBE_POOLCTL, ctrl);
+
+ return ret;
+}
+
+/* Enable l2 tunnel forwarding */
+static int
+txgbe_dev_l2_tunnel_forwarding_enable
+ (struct rte_eth_dev *dev,
+ enum rte_eth_tunnel_type l2_tunnel_type)
+{
+ struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+ int ret = 0;
+
+ switch (l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_fwd_en = TRUE;
+ ret = txgbe_e_tag_forwarding_en_dis(dev, 1);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Disable l2 tunnel forwarding */
+static int
+txgbe_dev_l2_tunnel_forwarding_disable
+ (struct rte_eth_dev *dev,
+ enum rte_eth_tunnel_type l2_tunnel_type)
+{
+ struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+ int ret = 0;
+
+ switch (l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_fwd_en = FALSE;
+ ret = txgbe_e_tag_forwarding_en_dis(dev, 0);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+txgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel,
+ bool en)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ int ret = 0;
+ uint32_t vmtir, vmvir;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ if (l2_tunnel->vf_id >= pci_dev->max_vfs) {
+ PMD_DRV_LOG(ERR,
+ "VF id %u should be less than %u",
+ l2_tunnel->vf_id,
+ pci_dev->max_vfs);
+ return -EINVAL;
+ }
+
+ if (en)
+ vmtir = l2_tunnel->tunnel_id;
+ else
+ vmtir = 0;
+
+ wr32(hw, TXGBE_POOLETAG(l2_tunnel->vf_id), vmtir);
+
+ vmvir = rd32(hw, TXGBE_POOLTAG(l2_tunnel->vf_id));
+ vmvir &= ~TXGBE_POOLTAG_ETAG_MASK;
+ if (en)
+ vmvir |= TXGBE_POOLTAG_ETAG;
+ wr32(hw, TXGBE_POOLTAG(l2_tunnel->vf_id), vmvir);
+
+ return ret;
+}
+
+/* Enable l2 tunnel tag insertion */
+static int
+txgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+ int ret = 0;
+
+ switch (l2_tunnel->l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ ret = txgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Disable l2 tunnel tag insertion */
+static int
+txgbe_dev_l2_tunnel_insertion_disable
+ (struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+ int ret = 0;
+
+ switch (l2_tunnel->l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ ret = txgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+txgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev,
+ bool en)
+{
+ int ret = 0;
+ int i;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ for (i = 0; i < 128; ++i) {
+ wr32m(hw, TXGBE_RXCFG(i), TXGBE_RXCFG_ETAG,
+ en ? TXGBE_RXCFG_ETAG : 0);
+ }
+
+ return ret;
+}
+
+/* Enable l2 tunnel tag stripping */
+static int
+txgbe_dev_l2_tunnel_stripping_enable
+ (struct rte_eth_dev *dev,
+ enum rte_eth_tunnel_type l2_tunnel_type)
+{
+ int ret = 0;
+
+ switch (l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ ret = txgbe_e_tag_stripping_en_dis(dev, 1);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Disable l2 tunnel tag stripping */
+static int
+txgbe_dev_l2_tunnel_stripping_disable
+ (struct rte_eth_dev *dev,
+ enum rte_eth_tunnel_type l2_tunnel_type)
+{
+ int ret = 0;
+
+ switch (l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ ret = txgbe_e_tag_stripping_en_dis(dev, 0);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Enable/disable l2 tunnel offload functions */
+static int
+txgbe_dev_l2_tunnel_offload_set
+ (struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel,
+ uint32_t mask,
+ uint8_t en)
+{
+ int ret = 0;
+
+ if (l2_tunnel == NULL)
+ return -EINVAL;
+
+ ret = -EINVAL;
+ if (mask & ETH_L2_TUNNEL_ENABLE_MASK) {
+ if (en)
+ ret = txgbe_dev_l2_tunnel_enable(dev,
+ l2_tunnel->l2_tunnel_type);
+ else
+ ret = txgbe_dev_l2_tunnel_disable(dev,
+ l2_tunnel->l2_tunnel_type);
+ }
+
+ if (mask & ETH_L2_TUNNEL_INSERTION_MASK) {
+ if (en)
+ ret = txgbe_dev_l2_tunnel_insertion_enable(dev,
+ l2_tunnel);
+ else
+ ret = txgbe_dev_l2_tunnel_insertion_disable(dev,
+ l2_tunnel);
+ }
+
+ if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) {
+ if (en)
+ ret = txgbe_dev_l2_tunnel_stripping_enable(dev,
+ l2_tunnel->l2_tunnel_type);
+ else
+ ret = txgbe_dev_l2_tunnel_stripping_disable(dev,
+ l2_tunnel->l2_tunnel_type);
+ }
+
+ if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) {
+ if (en)
+ ret = txgbe_dev_l2_tunnel_forwarding_enable(dev,
+ l2_tunnel->l2_tunnel_type);
+ else
+ ret = txgbe_dev_l2_tunnel_forwarding_disable(dev,
+ l2_tunnel->l2_tunnel_type);
+ }
+
+ return ret;
+}
+
+/* Add UDP tunneling port */
+static int
+txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int ret = 0;
+
+ if (udp_tunnel == NULL)
+ return -EINVAL;
+
+ switch (udp_tunnel->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ if (udp_tunnel->udp_port == 0) {
+ PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
+ ret = -EINVAL;
+ break;
+ }
+ wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port);
+ wr32(hw, TXGBE_VXLANPORTGPE, udp_tunnel->udp_port);
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ if (udp_tunnel->udp_port == 0) {
+ PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed.");
+ ret = -EINVAL;
+ break;
+ }
+ wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port);
+ break;
+ case RTE_TUNNEL_TYPE_TEREDO:
+ if (udp_tunnel->udp_port == 0) {
+ PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed.");
+ ret = -EINVAL;
+ break;
+ }
+ wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ txgbe_flush(hw);
+
+ return ret;
+}
+
+/* Remove UDP tunneling port */
+static int
+txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int ret = 0;
+ uint16_t cur_port;
+
+ if (udp_tunnel == NULL)
+ return -EINVAL;
+
+ switch (udp_tunnel->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT);
+ if (cur_port != udp_tunnel->udp_port) {
+ PMD_DRV_LOG(ERR, "Port %u does not exist.",
+ udp_tunnel->udp_port);
+ ret = -EINVAL;
+ break;
+ }
+ wr32(hw, TXGBE_VXLANPORT, 0);
+ wr32(hw, TXGBE_VXLANPORTGPE, 0);
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT);
+ if (cur_port != udp_tunnel->udp_port) {
+ PMD_DRV_LOG(ERR, "Port %u does not exist.",
+ udp_tunnel->udp_port);
+ ret = -EINVAL;
+ break;
+ }
+ wr32(hw, TXGBE_GENEVEPORT, 0);
+ break;
+ case RTE_TUNNEL_TYPE_TEREDO:
+ cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT);
+ if (cur_port != udp_tunnel->udp_port) {
+ PMD_DRV_LOG(ERR, "Port %u does not exist.",
+ udp_tunnel->udp_port);
+ ret = -EINVAL;
+ break;
+ }
+ wr32(hw, TXGBE_TEREDOPORT, 0);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ txgbe_flush(hw);
+
+ return ret;
+}
+
+/* restore n-tuple filter */
+static inline void
+txgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
+{
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ struct txgbe_5tuple_filter *node;
+
+ TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
+ txgbe_inject_5tuple_filter(dev, node);
+ }
+}
+
+/* restore ethernet type filter */
+static inline void
+txgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ int i;
+
+ for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
+ if (filter_info->ethertype_mask & (1 << i)) {
+ wr32(hw, TXGBE_ETFLT(i),
+ filter_info->ethertype_filters[i].etqf);
+ wr32(hw, TXGBE_ETCLS(i),
+ filter_info->ethertype_filters[i].etqs);
+ txgbe_flush(hw);
+ }
+ }
+}
+
+/* restore SYN filter */
+static inline void
+txgbe_syn_filter_restore(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ uint32_t synqf;
+
+ synqf = filter_info->syn_info;
+
+ if (synqf & TXGBE_SYNCLS_ENA) {
+ wr32(hw, TXGBE_SYNCLS, synqf);
+ txgbe_flush(hw);
+ }
+}
+
+/* restore L2 tunnel filter */
+static inline void
+txgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
+{
+ struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+ struct txgbe_l2_tn_filter *node;
+ struct rte_eth_l2_tunnel_conf l2_tn_conf;
+
+ TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
+ l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
+ l2_tn_conf.tunnel_id = node->key.tn_id;
+ l2_tn_conf.pool = node->pool;
+ (void)txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
+ }
+}
+
+/* restore rss filter */
+static inline void
+txgbe_rss_filter_restore(struct rte_eth_dev *dev)
+{
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+
+ if (filter_info->rss_info.conf.queue_num)
+ txgbe_config_rss_filter(dev,
+ &filter_info->rss_info, TRUE);
+}
+
+static int
+txgbe_filter_restore(struct rte_eth_dev *dev)
+{
+ txgbe_ntuple_filter_restore(dev);
+ txgbe_ethertype_filter_restore(dev);
+ txgbe_syn_filter_restore(dev);
+ txgbe_fdir_filter_restore(dev);
+ txgbe_l2_tn_filter_restore(dev);
+ txgbe_rss_filter_restore(dev);
+
+ return 0;
+}
+
+static void
+txgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
+{
+ struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ if (l2_tn_info->e_tag_en)
+ (void)txgbe_e_tag_enable(hw);
+
+ if (l2_tn_info->e_tag_fwd_en)
+ (void)txgbe_e_tag_forwarding_en_dis(dev, 1);
+
+ (void)txgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
+}
+
+/* remove all the n-tuple filters */
+void
+txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ struct txgbe_5tuple_filter *p_5tuple;
+
+ while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
+ txgbe_remove_5tuple_filter(dev, p_5tuple);
+}
+
+/* remove all the ether type filters */
+void
+txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ int i;
+
+ for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
+ if (filter_info->ethertype_mask & (1 << i) &&
+ !filter_info->ethertype_filters[i].conf) {
+ (void)txgbe_ethertype_filter_remove(filter_info,
+ (uint8_t)i);
+ wr32(hw, TXGBE_ETFLT(i), 0);
+ wr32(hw, TXGBE_ETCLS(i), 0);
+ txgbe_flush(hw);
+ }
+ }
+}
+
+/* remove the SYN filter */
+void
+txgbe_clear_syn_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+
+ if (filter_info->syn_info & TXGBE_SYNCLS_ENA) {
+ filter_info->syn_info = 0;
+
+ wr32(hw, TXGBE_SYNCLS, 0);
+ txgbe_flush(hw);
+ }
+}
+
+/* remove all the L2 tunnel filters */
+int
+txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+ struct txgbe_l2_tn_filter *l2_tn_filter;
+ struct rte_eth_l2_tunnel_conf l2_tn_conf;
+ int ret = 0;
+
+ while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
+ l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
+ l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id;
+ l2_tn_conf.pool = l2_tn_filter->pool;
+ ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+bool
+is_txgbe_supported(struct rte_eth_dev *dev)
+{
+ if (strcmp(dev->device->driver->name, rte_txgbe_pmd.driver.name))
+ return false;
+ else
+ return true;
+}
+
+void
+txgbe_dev_macsec_setting_save(struct rte_eth_dev *dev,
+ struct txgbe_macsec_setting *macsec_setting)
+{
+ struct txgbe_macsec_setting *macsec = TXGBE_DEV_MACSEC_SETTING(dev);
+
+ macsec->offload_en = macsec_setting->offload_en;
+ macsec->encrypt_en = macsec_setting->encrypt_en;
+ macsec->replayprotect_en = macsec_setting->replayprotect_en;
+}
+
+void
+txgbe_dev_macsec_setting_reset(struct rte_eth_dev *dev)
+{
+ struct txgbe_macsec_setting *macsec = TXGBE_DEV_MACSEC_SETTING(dev);
+
+ macsec->offload_en = 0;
+ macsec->encrypt_en = 0;
+ macsec->replayprotect_en = 0;
+}
+
+void
+txgbe_dev_macsec_register_enable(struct rte_eth_dev *dev,
+ struct txgbe_macsec_setting *macsec_setting)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t ctrl;
+ uint8_t en = macsec_setting->encrypt_en;
+ uint8_t rp = macsec_setting->replayprotect_en;
+
+ /**
+ * Workaround:
+ * As no txgbe_disable_sec_rx_path equivalent is
+ * implemented for tx in the base code, and we are
+ * not allowed to modify the base code in DPDK, so
+ * just call the hand-written one directly for now.
+ * The hardware support has been checked by
+ * txgbe_disable_sec_rx_path().
+ */
+ txgbe_disable_sec_tx_path(hw);
+
+ /* Enable Ethernet CRC (required by MACsec offload) */
+ ctrl = rd32(hw, TXGBE_SECRXCTL);
+ ctrl |= TXGBE_SECRXCTL_CRCSTRIP;
+ wr32(hw, TXGBE_SECRXCTL, ctrl);
+
+ /* Enable the TX and RX crypto engines */
+ ctrl = rd32(hw, TXGBE_SECTXCTL);
+ ctrl &= ~TXGBE_SECTXCTL_XDSA;
+ wr32(hw, TXGBE_SECTXCTL, ctrl);
+
+ ctrl = rd32(hw, TXGBE_SECRXCTL);
+ ctrl &= ~TXGBE_SECRXCTL_XDSA;
+ wr32(hw, TXGBE_SECRXCTL, ctrl);
+
+ ctrl = rd32(hw, TXGBE_SECTXIFG);
+ ctrl &= ~TXGBE_SECTXIFG_MIN_MASK;
+ ctrl |= TXGBE_SECTXIFG_MIN(0x3);
+ wr32(hw, TXGBE_SECTXIFG, ctrl);
+
+ /* Enable SA lookup */
+ ctrl = rd32(hw, TXGBE_LSECTXCTL);
+ ctrl &= ~TXGBE_LSECTXCTL_MODE_MASK;
+ ctrl |= en ? TXGBE_LSECTXCTL_MODE_AENC : TXGBE_LSECTXCTL_MODE_AUTH;
+ ctrl &= ~TXGBE_LSECTXCTL_PNTRH_MASK;
+ ctrl |= TXGBE_LSECTXCTL_PNTRH(TXGBE_MACSEC_PNTHRSH);
+ wr32(hw, TXGBE_LSECTXCTL, ctrl);
+
+ ctrl = rd32(hw, TXGBE_LSECRXCTL);
+ ctrl &= ~TXGBE_LSECRXCTL_MODE_MASK;
+ ctrl |= TXGBE_LSECRXCTL_MODE_STRICT;
+ ctrl &= ~TXGBE_LSECRXCTL_POSTHDR;
+ if (rp)
+ ctrl |= TXGBE_LSECRXCTL_REPLAY;
+ else
+ ctrl &= ~TXGBE_LSECRXCTL_REPLAY;
+ wr32(hw, TXGBE_LSECRXCTL, ctrl);
+
+ /* Start the data paths */
+ txgbe_enable_sec_rx_path(hw);
+ /**
+ * Workaround:
+ * As no txgbe_enable_sec_rx_path equivalent is
+ * implemented for tx in the base code, and we are
+ * not allowed to modify the base code in DPDK, so
+ * just call the hand-written one directly for now.
+ */
+ txgbe_enable_sec_tx_path(hw);
+}
+
+void
+txgbe_dev_macsec_register_disable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t ctrl;
+
+ /**
+ * Workaround:
+ * As no txgbe_disable_sec_rx_path equivalent is
+ * implemented for tx in the base code, and we are
+ * not allowed to modify the base code in DPDK, so
+ * just call the hand-written one directly for now.
+ * The hardware support has been checked by
+ * txgbe_disable_sec_rx_path().
+ */
+ txgbe_disable_sec_tx_path(hw);
+
+ /* Disable the TX and RX crypto engines */
+ ctrl = rd32(hw, TXGBE_SECTXCTL);
+ ctrl |= TXGBE_SECTXCTL_XDSA;
+ wr32(hw, TXGBE_SECTXCTL, ctrl);
+
+ ctrl = rd32(hw, TXGBE_SECRXCTL);
+ ctrl |= TXGBE_SECRXCTL_XDSA;
+ wr32(hw, TXGBE_SECRXCTL, ctrl);
+
+ /* Disable SA lookup */
+ ctrl = rd32(hw, TXGBE_LSECTXCTL);
+ ctrl &= ~TXGBE_LSECTXCTL_MODE_MASK;
+ wr32(hw, TXGBE_LSECTXCTL, ctrl);
+
+ ctrl = rd32(hw, TXGBE_LSECRXCTL);
+ ctrl &= ~TXGBE_LSECRXCTL_MODE_MASK;
+ wr32(hw, TXGBE_LSECRXCTL, ctrl);
+
+ /* Start the data paths */
+ txgbe_enable_sec_rx_path(hw);
+ /**
+ * Workaround:
+ * As no txgbe_enable_sec_rx_path equivalent is
+ * implemented for tx in the base code, and we are
+ * not allowed to modify the base code in DPDK, so
+ * just call the hand-written one directly for now.
+ */
+ txgbe_enable_sec_tx_path(hw);
+}
+
+static const struct eth_dev_ops txgbe_eth_dev_ops = {
+ .dev_configure = txgbe_dev_configure,
+ .dev_start = txgbe_dev_start,
+ .dev_stop = txgbe_dev_stop,
+ .dev_set_link_up = txgbe_dev_set_link_up,
+ .dev_set_link_down = txgbe_dev_set_link_down,
+ .dev_close = txgbe_dev_close,
+ .dev_reset = txgbe_dev_reset,
+ .promiscuous_enable = txgbe_dev_promiscuous_enable,
+ .promiscuous_disable = txgbe_dev_promiscuous_disable,
+ .allmulticast_enable = txgbe_dev_allmulticast_enable,
+ .allmulticast_disable = txgbe_dev_allmulticast_disable,
+ .link_update = txgbe_dev_link_update,
+ .stats_get = txgbe_dev_stats_get,
+ .xstats_get = txgbe_dev_xstats_get,
+ .xstats_get_by_id = txgbe_dev_xstats_get_by_id,
+ .stats_reset = txgbe_dev_stats_reset,
+ .xstats_reset = txgbe_dev_xstats_reset,
+ .xstats_get_names = txgbe_dev_xstats_get_names,
+ .xstats_get_names_by_id = txgbe_dev_xstats_get_names_by_id,
+ .queue_stats_mapping_set = txgbe_dev_queue_stats_mapping_set,
+ .fw_version_get = txgbe_fw_version_get,
+ .dev_infos_get = txgbe_dev_info_get,
+ .dev_supported_ptypes_get = txgbe_dev_supported_ptypes_get,
+ .mtu_set = txgbe_dev_mtu_set,
+ .vlan_filter_set = txgbe_vlan_filter_set,
+ .vlan_tpid_set = txgbe_vlan_tpid_set,
+ .vlan_offload_set = txgbe_vlan_offload_set,
+ .vlan_strip_queue_set = txgbe_vlan_strip_queue_set,
+ .rx_queue_start = txgbe_dev_rx_queue_start,
+ .rx_queue_stop = txgbe_dev_rx_queue_stop,
+ .tx_queue_start = txgbe_dev_tx_queue_start,
+ .tx_queue_stop = txgbe_dev_tx_queue_stop,
+ .rx_queue_setup = txgbe_dev_rx_queue_setup,
+ .rx_queue_intr_enable = txgbe_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = txgbe_dev_rx_queue_intr_disable,
+ .rx_queue_release = txgbe_dev_rx_queue_release,
+ .rx_queue_count = txgbe_dev_rx_queue_count,
+ .rx_descriptor_done = txgbe_dev_rx_descriptor_done,
+ .rx_descriptor_status = txgbe_dev_rx_descriptor_status,
+ .tx_descriptor_status = txgbe_dev_tx_descriptor_status,
+ .tx_queue_setup = txgbe_dev_tx_queue_setup,
+ .tx_queue_release = txgbe_dev_tx_queue_release,
+ .dev_led_on = txgbe_dev_led_on,
+ .dev_led_off = txgbe_dev_led_off,
+ .flow_ctrl_get = txgbe_flow_ctrl_get,
+ .flow_ctrl_set = txgbe_flow_ctrl_set,
+ .priority_flow_ctrl_set = txgbe_priority_flow_ctrl_set,
+ .mac_addr_add = txgbe_add_rar,
+ .mac_addr_remove = txgbe_remove_rar,
+ .mac_addr_set = txgbe_set_default_mac_addr,
+ .uc_hash_table_set = txgbe_uc_hash_table_set,
+ .uc_all_hash_table_set = txgbe_uc_all_hash_table_set,
+ .mirror_rule_set = txgbe_mirror_rule_set,
+ .mirror_rule_reset = txgbe_mirror_rule_reset,
+ .set_queue_rate_limit = txgbe_set_queue_rate_limit,
+ .reta_update = txgbe_dev_rss_reta_update,
+ .reta_query = txgbe_dev_rss_reta_query,
+ .rss_hash_update = txgbe_dev_rss_hash_update,
+ .rss_hash_conf_get = txgbe_dev_rss_hash_conf_get,
+ .filter_ctrl = txgbe_dev_filter_ctrl,
+ .set_mc_addr_list = txgbe_dev_set_mc_addr_list,
+ .rxq_info_get = txgbe_rxq_info_get,
+ .txq_info_get = txgbe_txq_info_get,
+ .timesync_enable = txgbe_timesync_enable,
+ .timesync_disable = txgbe_timesync_disable,
+ .timesync_read_rx_timestamp = txgbe_timesync_read_rx_timestamp,
+ .timesync_read_tx_timestamp = txgbe_timesync_read_tx_timestamp,
+ .get_reg = txgbe_get_regs,
+ .get_eeprom_length = txgbe_get_eeprom_length,
+ .get_eeprom = txgbe_get_eeprom,
+ .set_eeprom = txgbe_set_eeprom,
+ .get_module_info = txgbe_get_module_info,
+ .get_module_eeprom = txgbe_get_module_eeprom,
+ .get_dcb_info = txgbe_dev_get_dcb_info,
+ .timesync_adjust_time = txgbe_timesync_adjust_time,
+ .timesync_read_time = txgbe_timesync_read_time,
+ .timesync_write_time = txgbe_timesync_write_time,
+ .l2_tunnel_eth_type_conf = txgbe_dev_l2_tunnel_eth_type_conf,
+ .l2_tunnel_offload_set = txgbe_dev_l2_tunnel_offload_set,
+ .udp_tunnel_port_add = txgbe_dev_udp_tunnel_port_add,
+ .udp_tunnel_port_del = txgbe_dev_udp_tunnel_port_del,
+ .tm_ops_get = txgbe_tm_ops_get,
+ .tx_done_cleanup = txgbe_dev_tx_done_cleanup,
+};
+
+RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
+RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
+
+#ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
+ RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
+#endif
+#ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
+ RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
+#endif
+
+#ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
+ RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);
+#endif
new file mode 100644
@@ -0,0 +1,813 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef _TXGBE_ETHDEV_H_
+#define _TXGBE_ETHDEV_H_
+
+#include <stdint.h>
+
+#include "base/txgbe.h"
+#include "txgbe_ptypes.h"
+#ifdef RTE_LIBRTE_SECURITY
+#include "txgbe_ipsec.h"
+#endif
+#include <rte_flow.h>
+#include <rte_time.h>
+#include <rte_hash.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_tm_driver.h>
+
+/* need update link, bit flag */
+#define TXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
+#define TXGBE_FLAG_MAILBOX (uint32_t)(1 << 1)
+#define TXGBE_FLAG_PHY_INTERRUPT (uint32_t)(1 << 2)
+#define TXGBE_FLAG_MACSEC (uint32_t)(1 << 3)
+#define TXGBE_FLAG_NEED_LINK_CONFIG (uint32_t)(1 << 4)
+
+/*
+ * Defines that were not part of txgbe_type.h as they are not used by the
+ * FreeBSD driver.
+ */
+#define TXGBE_VFTA_SIZE 128
+#define TXGBE_VLAN_TAG_SIZE 4
+#define TXGBE_HKEY_MAX_INDEX 10
+#define TXGBE_MAX_RX_QUEUE_NUM 128
+#define TXGBE_VMDQ_DCB_NB_QUEUES TXGBE_MAX_RX_QUEUE_NUM
+#define TXGBE_DCB_NB_QUEUES TXGBE_MAX_RX_QUEUE_NUM
+
+#ifndef NBBY
+#define NBBY 8 /* number of bits in a byte */
+#endif
+#define TXGBE_HWSTRIP_BITMAP_SIZE (TXGBE_MAX_RX_QUEUE_NUM / (sizeof(uint32_t) * NBBY))
+
+#define TXGBE_QUEUE_ITR_INTERVAL_DEFAULT 500 /* 500us */
+
+#define TXGBE_MAX_QUEUE_NUM_PER_VF 8
+
+#define TXGBE_5TUPLE_MAX_PRI 7
+#define TXGBE_5TUPLE_MIN_PRI 1
+
+/* The overhead from MTU to max frame size. */
+#define TXGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
+
+/* bit of VXLAN tunnel type | 7 bits of zeros | 8 bits of zeros*/
+#define TXGBE_FDIR_VXLAN_TUNNEL_TYPE 0x8000
+/* bit of NVGRE tunnel type | 7 bits of zeros | 8 bits of zeros*/
+#define TXGBE_FDIR_NVGRE_TUNNEL_TYPE 0x0
+
+#define TXGBE_RSS_OFFLOAD_ALL ( \
+ ETH_RSS_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_NONFRAG_IPV6_UDP | \
+ ETH_RSS_IPV6_EX | \
+ ETH_RSS_IPV6_TCP_EX | \
+ ETH_RSS_IPV6_UDP_EX)
+
+#define TXGBE_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
+#define TXGBE_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+
+#define TXGBE_MACSEC_PNTHRSH 0xFFFFFE00
+
+#define TXGBE_MAX_FDIR_FILTER_NUM (1024 * 32)
+#define TXGBE_MAX_L2_TN_FILTER_NUM 128
+
+/*
+ * Information about the fdir mode.
+ */
+struct txgbe_hw_fdir_mask {
+ uint16_t vlan_tci_mask;
+ uint32_t src_ipv4_mask;
+ uint32_t dst_ipv4_mask;
+ uint16_t src_ipv6_mask;
+ uint16_t dst_ipv6_mask;
+ uint16_t src_port_mask;
+ uint16_t dst_port_mask;
+ uint16_t flex_bytes_mask;
+ uint8_t mac_addr_byte_mask;
+ uint32_t tunnel_id_mask;
+ uint8_t tunnel_type_mask;
+};
+
+struct txgbe_fdir_filter {
+ TAILQ_ENTRY(txgbe_fdir_filter) entries;
+ struct txgbe_atr_input input; /* key of fdir filter*/
+ uint32_t fdirflags; /* drop or forward */
+ uint32_t fdirhash; /* hash value for fdir */
+ uint8_t queue; /* assigned rx queue */
+};
+
+/* list of fdir filters */
+TAILQ_HEAD(txgbe_fdir_filter_list, txgbe_fdir_filter);
+
+struct txgbe_fdir_rule {
+ struct txgbe_hw_fdir_mask mask;
+ struct txgbe_atr_input input; /* key of fdir filter */
+ bool b_spec; /* If TRUE, input, fdirflags, queue have meaning. */
+ bool b_mask; /* If TRUE, mask has meaning. */
+ enum rte_fdir_mode mode; /* IP, MAC VLAN, Tunnel */
+ uint32_t fdirflags; /* drop or forward */
+ uint32_t soft_id; /* an unique value for this rule */
+ uint8_t queue; /* assigned rx queue */
+ uint8_t flex_bytes_offset;
+};
+
+struct txgbe_hw_fdir_info {
+ struct txgbe_hw_fdir_mask mask;
+ uint8_t flex_bytes_offset;
+ uint16_t collision;
+ uint16_t free;
+ uint16_t maxhash;
+ uint8_t maxlen;
+ uint64_t add;
+ uint64_t remove;
+ uint64_t f_add;
+ uint64_t f_remove;
+ struct txgbe_fdir_filter_list fdir_list; /* filter list*/
+ /* store the pointers of the filters, index is the hash value. */
+ struct txgbe_fdir_filter **hash_map;
+ struct rte_hash *hash_handle; /* cuckoo hash handler */
+ bool mask_added; /* If already got mask from consistent filter */
+};
+
+struct txgbe_rte_flow_rss_conf {
+ struct rte_flow_action_rss conf; /**< RSS parameters. */
+ uint8_t key[TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */
+ uint16_t queue[TXGBE_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */
+};
+
+/* structure for interrupt relative data */
+struct txgbe_interrupt {
+ uint32_t flags;
+ uint32_t mask_misc;
+ /* to save original mask during delayed handler */
+ uint32_t mask_misc_orig;
+ uint32_t mask[2];
+};
+
+#define TXGBE_NB_STAT_MAPPING 32
+#define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
+#define NB_QMAP_FIELDS_PER_QSM_REG 4
+#define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
+struct txgbe_stat_mappings {
+ uint32_t tqsm[TXGBE_NB_STAT_MAPPING];
+ uint32_t rqsm[TXGBE_NB_STAT_MAPPING];
+};
+
+struct txgbe_vfta {
+ uint32_t vfta[TXGBE_VFTA_SIZE];
+};
+
+struct txgbe_hwstrip {
+ uint32_t bitmap[TXGBE_HWSTRIP_BITMAP_SIZE];
+};
+
+/*
+ * VF data which used by PF host only
+ */
+#define TXGBE_MAX_VF_MC_ENTRIES 30
+#define TXGBE_MAX_MR_RULE_ENTRIES 4 /* number of mirroring rules supported */
+
+struct txgbe_uta_info {
+ uint8_t uc_filter_type;
+ uint16_t uta_in_use;
+ uint32_t uta_shadow[TXGBE_MAX_UTA];
+};
+
+#define TXGBE_MAX_MIRROR_RULES 4 /* Maximum nb. of mirror rules. */
+
+struct txgbe_mirror_info {
+ struct rte_eth_mirror_conf mr_conf[TXGBE_MAX_MIRROR_RULES];
+ /* store PF mirror rules configuration */
+};
+
+struct txgbe_vf_info {
+ uint8_t vf_mac_addresses[RTE_ETHER_ADDR_LEN];
+ uint16_t vf_mc_hashes[TXGBE_MAX_VF_MC_ENTRIES];
+ uint16_t num_vf_mc_hashes;
+ uint16_t default_vf_vlan_id;
+ uint16_t vlans_enabled;
+ bool clear_to_send;
+ uint16_t tx_rate[TXGBE_MAX_QUEUE_NUM_PER_VF];
+ uint16_t vlan_count;
+ uint8_t spoofchk_enabled;
+ uint8_t api_version;
+ uint16_t switch_domain_id;
+ uint16_t xcast_mode;
+ uint16_t mac_count;
+};
+
+TAILQ_HEAD(txgbe_5tuple_filter_list, txgbe_5tuple_filter);
+
+struct txgbe_5tuple_filter_info {
+ uint32_t dst_ip;
+ uint32_t src_ip;
+ uint16_t dst_port;
+ uint16_t src_port;
+ enum txgbe_5tuple_protocol proto; /* l4 protocol. */
+ uint8_t priority; /* seven levels (001b-111b), 111b is highest,
+ used when more than one filter matches. */
+ uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */
+ src_ip_mask:1, /* if mask is 1b, do not compare src ip. */
+ dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
+ src_port_mask:1, /* if mask is 1b, do not compare src port. */
+ proto_mask:1; /* if mask is 1b, do not compare protocol. */
+};
+
+/* 5tuple filter structure */
+struct txgbe_5tuple_filter {
+ TAILQ_ENTRY(txgbe_5tuple_filter) entries;
+ uint16_t index; /* the index of 5tuple filter */
+ struct txgbe_5tuple_filter_info filter_info;
+ uint16_t queue; /* rx queue assigned to */
+};
+
+#define TXGBE_5TUPLE_ARRAY_SIZE \
+ (RTE_ALIGN(TXGBE_MAX_FTQF_FILTERS, (sizeof(uint32_t) * NBBY)) / \
+ (sizeof(uint32_t) * NBBY))
+
+struct txgbe_ethertype_filter {
+ uint16_t ethertype;
+ uint32_t etqf;
+ uint32_t etqs;
+ /**
+ * If this filter is added by configuration,
+ * it should not be removed.
+ */
+ bool conf;
+};
+
+/*
+ * Structure to store filters' info.
+ */
+struct txgbe_filter_info {
+ uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */
+ /* store used ethertype filters*/
+ struct txgbe_ethertype_filter ethertype_filters[TXGBE_ETF_ID_MAX];
+ /* Bit mask for every used 5tuple filter */
+ uint32_t fivetuple_mask[TXGBE_5TUPLE_ARRAY_SIZE];
+ struct txgbe_5tuple_filter_list fivetuple_list;
+ /* store the SYN filter info */
+ uint32_t syn_info;
+ /* store the rss filter info */
+ struct txgbe_rte_flow_rss_conf rss_info;
+};
+
+struct txgbe_l2_tn_key {
+ enum rte_eth_tunnel_type l2_tn_type;
+ uint32_t tn_id;
+};
+
+struct txgbe_l2_tn_filter {
+ TAILQ_ENTRY(txgbe_l2_tn_filter) entries;
+ struct txgbe_l2_tn_key key;
+ uint32_t pool;
+};
+
+TAILQ_HEAD(txgbe_l2_tn_filter_list, txgbe_l2_tn_filter);
+
+struct txgbe_l2_tn_info {
+ struct txgbe_l2_tn_filter_list l2_tn_list;
+ struct txgbe_l2_tn_filter **hash_map;
+ struct rte_hash *hash_handle;
+ bool e_tag_en; /* e-tag enabled */
+ bool e_tag_fwd_en; /* e-tag based forwarding enabled */
+ uint16_t e_tag_ether_type; /* ether type for e-tag */
+};
+
+struct rte_flow {
+ enum rte_filter_type filter_type;
+ void *rule;
+};
+
+struct txgbe_macsec_setting {
+ uint8_t offload_en;
+ uint8_t encrypt_en;
+ uint8_t replayprotect_en;
+};
+
+/*
+ * Statistics counters collected by the MACsec
+ */
+struct txgbe_macsec_stats {
+ /* TX port statistics */
+ uint64_t out_pkts_untagged;
+ uint64_t out_pkts_encrypted;
+ uint64_t out_pkts_protected;
+ uint64_t out_octets_encrypted;
+ uint64_t out_octets_protected;
+
+ /* RX port statistics */
+ uint64_t in_pkts_untagged;
+ uint64_t in_pkts_badtag;
+ uint64_t in_pkts_nosci;
+ uint64_t in_pkts_unknownsci;
+ uint64_t in_octets_decrypted;
+ uint64_t in_octets_validated;
+
+ /* RX SC statistics */
+ uint64_t in_pkts_unchecked;
+ uint64_t in_pkts_delayed;
+ uint64_t in_pkts_late;
+
+ /* RX SA statistics */
+ uint64_t in_pkts_ok;
+ uint64_t in_pkts_invalid;
+ uint64_t in_pkts_notvalid;
+ uint64_t in_pkts_unusedsa;
+ uint64_t in_pkts_notusingsa;
+};
+
+/* The configuration of bandwidth */
+struct txgbe_bw_conf {
+ uint8_t tc_num; /* Number of TCs. */
+};
+
+/* Struct to store Traffic Manager shaper profile. */
+struct txgbe_tm_shaper_profile {
+ TAILQ_ENTRY(txgbe_tm_shaper_profile) node;
+ uint32_t shaper_profile_id;
+ uint32_t reference_count;
+ struct rte_tm_shaper_params profile;
+};
+
+TAILQ_HEAD(txgbe_shaper_profile_list, txgbe_tm_shaper_profile);
+
+/* node type of Traffic Manager */
+enum txgbe_tm_node_type {
+ TXGBE_TM_NODE_TYPE_PORT,
+ TXGBE_TM_NODE_TYPE_TC,
+ TXGBE_TM_NODE_TYPE_QUEUE,
+ TXGBE_TM_NODE_TYPE_MAX,
+};
+
+/* Struct to store Traffic Manager node configuration. */
+struct txgbe_tm_node {
+ TAILQ_ENTRY(txgbe_tm_node) node;
+ uint32_t id;
+ uint32_t priority;
+ uint32_t weight;
+ uint32_t reference_count;
+ uint16_t no;
+ struct txgbe_tm_node *parent;
+ struct txgbe_tm_shaper_profile *shaper_profile;
+ struct rte_tm_node_params params;
+};
+
+TAILQ_HEAD(txgbe_tm_node_list, txgbe_tm_node);
+
+/* The configuration of Traffic Manager */
+struct txgbe_tm_conf {
+ struct txgbe_shaper_profile_list shaper_profile_list;
+ struct txgbe_tm_node *root; /* root node - port */
+ struct txgbe_tm_node_list tc_list; /* node list for all the TCs */
+ struct txgbe_tm_node_list queue_list; /* node list for all the queues */
+ /**
+ * The number of added TC nodes.
+ * It should be no more than the TC number of this port.
+ */
+ uint32_t nb_tc_node;
+ /**
+ * The number of added queue nodes.
+ * It should be no more than the queue number of this port.
+ */
+ uint32_t nb_queue_node;
+ /**
+ * This flag is used to check if APP can change the TM node
+ * configuration.
+ * When it's true, means the configuration is applied to HW,
+ * APP should not change the configuration.
+ * As we don't support on-the-fly configuration, when starting
+ * the port, APP should call the hierarchy_commit API to set this
+ * flag to true. When stopping the port, this flag should be set
+ * to false.
+ */
+ bool committed;
+};
+
+/*
+ * Structure to store private data for each driver instance (for each port).
+ */
+struct txgbe_adapter {
+ struct txgbe_hw hw;
+ struct txgbe_hw_stats stats;
+ struct txgbe_macsec_stats macsec_stats;
+ struct txgbe_macsec_setting macsec_setting;
+ struct txgbe_hw_fdir_info fdir;
+ struct txgbe_interrupt intr;
+ struct txgbe_stat_mappings stat_mappings;
+ struct txgbe_vfta shadow_vfta;
+ struct txgbe_hwstrip hwstrip;
+ struct txgbe_dcb_config dcb_config;
+ struct txgbe_mirror_info mr_data;
+ struct txgbe_vf_info *vfdata;
+ struct txgbe_uta_info uta_info;
+ struct txgbe_filter_info filter;
+ struct txgbe_l2_tn_info l2_tn;
+ struct txgbe_bw_conf bw_conf;
+#ifdef RTE_LIBRTE_SECURITY
+ struct txgbe_ipsec ipsec;
+#endif
+ bool rx_bulk_alloc_allowed;
+ struct rte_timecounter systime_tc;
+ struct rte_timecounter rx_tstamp_tc;
+ struct rte_timecounter tx_tstamp_tc;
+ struct txgbe_tm_conf tm_conf;
+
+ /* For RSS reta table update */
+ uint8_t rss_reta_updated;
+
+ /* Used for VF link sync with PF's physical and logical (by checking
+ * mailbox status) link status.
+ */
+ uint8_t pflink_fullchk;
+ uint8_t mac_ctrl_frame_fwd;
+ rte_atomic32_t link_thread_running;
+ pthread_t link_thread_tid;
+};
+
+struct txgbe_vf_representor {
+ uint16_t vf_id;
+ uint16_t switch_domain_id;
+ struct rte_eth_dev *pf_ethdev;
+};
+
+int txgbe_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params);
+int txgbe_vf_representor_uninit(struct rte_eth_dev *ethdev);
+
+#define TXGBE_DEV_REPRESENTOR(dev) \
+ ((struct txgbe_vf_representor *)(dev)->data->dev_private)
+
+#define TXGBE_DEV_ADAPTER(dev) \
+ ((struct txgbe_adapter *)(dev)->data->dev_private)
+
+#define TXGBE_DEV_HW(dev) \
+ (&((struct txgbe_adapter *)(dev)->data->dev_private)->hw)
+
+#define TXGBE_DEV_STATS(dev) \
+ (&((struct txgbe_adapter *)(dev)->data->dev_private)->stats)
+
+#define TXGBE_DEV_MACSEC_STATS(dev) \
+ (&((struct txgbe_adapter *)(dev)->data->dev_private)->macsec_stats)
+
+#define TXGBE_DEV_MACSEC_SETTING(dev) \
+ (&((struct txgbe_adapter *)(dev)->data->dev_private)->macsec_setting)
+
+#define TXGBE_DEV_INTR(dev) \
+ (&((struct txgbe_adapter *)(dev)->data->dev_private)->intr)
+
+#define TXGBE_DEV_FDIR(dev) \
+ (&((struct txgbe_adapter *)(dev)->data->dev_private)->fdir)
+
+#define TXGBE_DEV_STAT_MAPPINGS(dev) \
+ (&((struct txgbe_adapter *)(dev)->data->dev_private)->stat_mappings)
+
+#define TXGBE_DEV_VFTA(dev) \
+ (&((struct txgbe_adapter *)(dev)->data->dev_private)->shadow_vfta)
+
+#define TXGBE_DEV_HWSTRIP(dev) \
+ (&((struct txgbe_adapter *)(dev)->data->dev_private)->hwstrip)
+
+#define TXGBE_DEV_DCB_CONFIG(dev) \
+ (&((struct txgbe_adapter *)(dev)->data->dev_private)->dcb_config)
+
+#define TXGBE_DEV_VFDATA(dev) \
+ (&((struct txgbe_adapter *)(dev)->data->dev_private)->vfdata)
+
+#define TXGBE_DEV_MR_INFO(dev) \
+ (&((struct txgbe_adapter *)(dev)->data->dev_private)->mr_data)
+
+#define TXGBE_DEV_UTA_INFO(dev) \
+ (&((struct txgbe_adapter *)(dev)->data->dev_private)->uta_info)
+
+#define TXGBE_DEV_FILTER(dev) \
+ (&((struct txgbe_adapter *)(dev)->data->dev_private)->filter)
+
+#define TXGBE_DEV_L2_TN(dev) \
+ (&((struct txgbe_adapter *)(dev)->data->dev_private)->l2_tn)
+
+#define TXGBE_DEV_BW_CONF(dev) \
+ (&((struct txgbe_adapter *)(dev)->data->dev_private)->bw_conf)
+
+#define TXGBE_DEV_TM_CONF(dev) \
+ (&((struct txgbe_adapter *)(dev)->data->dev_private)->tm_conf)
+
+#define TXGBE_DEV_IPSEC(dev) \
+ (&((struct txgbe_adapter *)(dev)->data->dev_private)->ipsec)
+
+/*
+ * RX/TX function prototypes
+ */
+void txgbe_dev_clear_queues(struct rte_eth_dev *dev);
+
+void txgbe_dev_free_queues(struct rte_eth_dev *dev);
+
+void txgbe_dev_rx_queue_release(void *rxq);
+
+void txgbe_dev_tx_queue_release(void *txq);
+
+int txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+
+int txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+
+uint32_t txgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+
+int txgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+
+int txgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
+int txgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
+
+int txgbe_dev_rx_init(struct rte_eth_dev *dev);
+
+void txgbe_dev_tx_init(struct rte_eth_dev *dev);
+
+int txgbe_dev_rxtx_start(struct rte_eth_dev *dev);
+
+void txgbe_dev_save_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id);
+void txgbe_dev_store_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id);
+void txgbe_dev_save_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id);
+void txgbe_dev_store_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id);
+
+int txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+
+int txgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+
+int txgbe_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int txgbe_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+
+int txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
+int txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
+void txgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo);
+
+void txgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo);
+
+int txgbevf_dev_rx_init(struct rte_eth_dev *dev);
+
+void txgbevf_dev_tx_init(struct rte_eth_dev *dev);
+
+void txgbevf_dev_rxtx_start(struct rte_eth_dev *dev);
+
+uint16_t txgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t txgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t txgbe_recv_pkts_lro_single_alloc(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t txgbe_recv_pkts_lro_bulk_alloc(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
+uint16_t txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t txgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t txgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+int txgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+
+int txgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+
+bool txgbe_rss_update_sp(enum txgbe_mac_type mac_type);
+
+int txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *filter,
+ bool add);
+int txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter,
+ bool add);
+int txgbe_syn_filter_set(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter,
+ bool add);
+int
+txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel,
+ bool restore);
+int
+txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel);
+void txgbe_filterlist_init(void);
+void txgbe_filterlist_flush(void);
+
+void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
+ uint8_t queue, uint8_t msix_vector);
+
+/*
+ * Flow director function prototypes
+ */
+int txgbe_fdir_configure(struct rte_eth_dev *dev);
+int txgbe_fdir_set_input_mask(struct rte_eth_dev *dev);
+int txgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
+ uint16_t offset);
+int txgbe_fdir_filter_program(struct rte_eth_dev *dev,
+ struct txgbe_fdir_rule *rule,
+ bool del, bool update);
+
+void txgbe_configure_pb(struct rte_eth_dev *dev);
+void txgbe_configure_port(struct rte_eth_dev *dev);
+void txgbe_configure_dcb(struct rte_eth_dev *dev);
+
+int
+txgbe_dev_link_update_share(struct rte_eth_dev *dev,
+ int wait_to_complete);
+
+/*
+ * misc function prototypes
+ */
+void txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev);
+
+void txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev);
+
+void txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev);
+
+void txgbe_pf_host_init(struct rte_eth_dev *eth_dev);
+
+void txgbe_pf_host_uninit(struct rte_eth_dev *eth_dev);
+
+void txgbe_pf_mbx_process(struct rte_eth_dev *eth_dev);
+
+int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev);
+
+uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
+
+int txgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op, void *arg);
+void txgbe_fdir_filter_restore(struct rte_eth_dev *dev);
+int txgbe_clear_all_fdir_filter(struct rte_eth_dev *dev);
+
+extern const struct rte_flow_ops txgbe_flow_ops;
+
+void txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev);
+void txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev);
+void txgbe_clear_syn_filter(struct rte_eth_dev *dev);
+int txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev);
+
+int txgbe_vt_check(struct txgbe_hw *hw);
+int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk);
+int txgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops);
+void txgbe_tm_conf_init(struct rte_eth_dev *dev);
+void txgbe_tm_conf_uninit(struct rte_eth_dev *dev);
+int txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t tx_rate);
+int txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in);
+int txgbe_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with);
+int txgbe_config_rss_filter(struct rte_eth_dev *dev,
+ struct txgbe_rte_flow_rss_conf *conf, bool add);
+
+void txgbe_dev_macsec_register_enable(struct rte_eth_dev *dev,
+ struct txgbe_macsec_setting *macsec_setting);
+
+void txgbe_dev_macsec_register_disable(struct rte_eth_dev *dev);
+
+void txgbe_dev_macsec_setting_save(struct rte_eth_dev *dev,
+ struct txgbe_macsec_setting *macsec_setting);
+
+void txgbe_dev_macsec_setting_reset(struct rte_eth_dev *dev);
+
+static inline int
+txgbe_ethertype_filter_lookup(struct txgbe_filter_info *filter_info,
+ uint16_t ethertype)
+{
+ int i;
+
+ for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
+ if (filter_info->ethertype_filters[i].ethertype == ethertype &&
+ (filter_info->ethertype_mask & (1 << i)))
+ return i;
+ }
+ return -1;
+}
+
+static inline int
+txgbe_ethertype_filter_insert(struct txgbe_filter_info *filter_info,
+ struct txgbe_ethertype_filter *ethertype_filter)
+{
+ int i;
+
+ for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
+ if (filter_info->ethertype_mask & (1 << i)) {
+ continue;
+ }
+ filter_info->ethertype_mask |= 1 << i;
+ filter_info->ethertype_filters[i].ethertype =
+ ethertype_filter->ethertype;
+ filter_info->ethertype_filters[i].etqf =
+ ethertype_filter->etqf;
+ filter_info->ethertype_filters[i].etqs =
+ ethertype_filter->etqs;
+ filter_info->ethertype_filters[i].conf =
+ ethertype_filter->conf;
+ break;
+ }
+ return (i < TXGBE_ETF_ID_MAX ? i : -1);
+}
+
+static inline int
+txgbe_ethertype_filter_remove(struct txgbe_filter_info *filter_info,
+ uint8_t idx)
+{
+ if (idx >= TXGBE_ETF_ID_MAX)
+ return -1;
+ filter_info->ethertype_mask &= ~(1 << idx);
+ filter_info->ethertype_filters[idx].ethertype = 0;
+ filter_info->ethertype_filters[idx].etqf = 0;
+ filter_info->ethertype_filters[idx].etqs = 0;
+ filter_info->ethertype_filters[idx].etqs = FALSE;
+ return idx;
+}
+
+/* High threshold controlling when to start sending XOFF frames. */
+#define TXGBE_FC_XOFF_HITH 128 /*KB*/
+/* Low threshold controlling when to start sending XON frames. */
+#define TXGBE_FC_XON_LOTH 64 /*KB*/
+
+/* Timer value included in XOFF frames. */
+#define TXGBE_FC_PAUSE_TIME 0x680
+
+/*Default value of Max Rx Queue*/
+#define TXGBE_MAX_RX_QUEUE_NUM 128
+
+#define TXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
+#define TXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */
+#define TXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */
+
+#define TXGBE_MMW_SIZE_DEFAULT 0x4
+#define TXGBE_MMW_SIZE_JUMBO_FRAME 0x14
+
+/*
+ * Default values for RX/TX configuration
+ */
+#define TXGBE_DEFAULT_RX_FREE_THRESH 32
+#define TXGBE_DEFAULT_RX_PTHRESH 8
+#define TXGBE_DEFAULT_RX_HTHRESH 8
+#define TXGBE_DEFAULT_RX_WTHRESH 0
+
+#define TXGBE_DEFAULT_TX_FREE_THRESH 32
+#define TXGBE_DEFAULT_TX_PTHRESH 32
+#define TXGBE_DEFAULT_TX_HTHRESH 0
+#define TXGBE_DEFAULT_TX_WTHRESH 0
+
+/* Additional timesync values. */
+#define NSEC_PER_SEC 1000000000L
+#define TXGBE_INCVAL_10GB 0xCCCCCC
+#define TXGBE_INCVAL_1GB 0x800000
+#define TXGBE_INCVAL_100 0xA00000
+#define TXGBE_INCVAL_10 0xC7F380
+#define TXGBE_INCVAL_FPGA 0x800000
+#define TXGBE_INCVAL_SHIFT_10GB 20
+#define TXGBE_INCVAL_SHIFT_1GB 18
+#define TXGBE_INCVAL_SHIFT_100 15
+#define TXGBE_INCVAL_SHIFT_10 12
+#define TXGBE_INCVAL_SHIFT_FPGA 17
+
+#define TXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL
+
+/* store statistics names and its offset in stats structure */
+struct rte_txgbe_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned offset;
+};
+
+const uint32_t *txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+int txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr);
+int txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+int txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+void txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
+ uint16_t queue, bool on);
+void txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev,
+ int mask);
+void txgbe_dev_setup_link_alarm_handler(void *param);
+void txgbe_read_stats_registers(struct txgbe_hw *hw,
+ struct txgbe_hw_stats *hw_stats);
+
+bool is_txgbe_supported(struct rte_eth_dev *dev);
+
+#endif /* _TXGBE_ETHDEV_H_ */
new file mode 100644
@@ -0,0 +1,1396 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_dev.h>
+#include <rte_hash_crc.h>
+#ifdef RTE_LIBRTE_SECURITY
+#include <rte_security_driver.h>
+#endif
+
+#include "txgbe_logs.h"
+#include "base/txgbe.h"
+#include "txgbe_ethdev.h"
+#include "txgbe_rxtx.h"
+#include "txgbe_regs_group.h"
+
+static const struct reg_info txgbevf_regs_general[] = {
+ {TXGBE_VFRST, 1, 1, "TXGBE_VFRST"},
+ {TXGBE_VFSTATUS, 1, 1, "TXGBE_VFSTATUS"},
+ {TXGBE_VFMBCTL, 1, 1, "TXGBE_VFMAILBOX"},
+ {TXGBE_VFMBX, 16, 4, "TXGBE_VFMBX"},
+ {TXGBE_VFPBWRAP, 1, 1, "TXGBE_VFPBWRAP"},
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbevf_regs_interrupt[] = {
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbevf_regs_rxdma[] = {
+ {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbevf_regs_tx[] = {
+ {0, 0, 0, ""}
+};
+
+/* VF registers */
+static const struct reg_info *txgbevf_regs[] = {
+ txgbevf_regs_general,
+ txgbevf_regs_interrupt,
+ txgbevf_regs_rxdma,
+ txgbevf_regs_tx,
+ NULL};
+
+#define TXGBEVF_PMD_NAME "rte_txgbevf_pmd" /* PMD name */
+static int txgbevf_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *xstats, unsigned n);
+static int txgbevf_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+/* For Virtual Function support */
+static int txgbevf_dev_configure(struct rte_eth_dev *dev);
+static int txgbevf_dev_start(struct rte_eth_dev *dev);
+static int txgbevf_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static void txgbevf_dev_stop(struct rte_eth_dev *dev);
+static void txgbevf_dev_close(struct rte_eth_dev *dev);
+static void txgbevf_intr_disable(struct rte_eth_dev *dev);
+static void txgbevf_intr_enable(struct rte_eth_dev *dev);
+static int txgbevf_dev_stats_reset(struct rte_eth_dev *dev);
+static int txgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask);
+static void txgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
+static void txgbevf_configure_msix(struct rte_eth_dev *dev);
+static int txgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static int txgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static void txgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
+static void txgbevf_dev_interrupt_handler(void *param);
+
+/*
+ * The set of PCI devices this driver supports (for VF)
+ */
+static const struct rte_pci_id pci_id_txgbevf_map[] = {
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_VF_HV) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static const struct rte_eth_desc_lim rx_desc_lim = {
+ .nb_max = TXGBE_RING_DESC_MAX,
+ .nb_min = TXGBE_RING_DESC_MIN,
+ .nb_align = TXGBE_RXD_ALIGN,
+};
+
+static const struct rte_eth_desc_lim tx_desc_lim = {
+ .nb_max = TXGBE_RING_DESC_MAX,
+ .nb_min = TXGBE_RING_DESC_MIN,
+ .nb_align = TXGBE_TXD_ALIGN,
+ .nb_seg_max = TXGBE_TX_MAX_SEG,
+ .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
+};
+
+static const struct eth_dev_ops txgbevf_eth_dev_ops;
+
+static const struct rte_txgbe_xstats_name_off rte_txgbevf_stats_strings[] = {
+ {"rx_multicast_packets_0", offsetof(struct txgbevf_hw_stats, qp[0].vfmprc)},
+ {"rx_multicast_packets_1", offsetof(struct txgbevf_hw_stats, qp[1].vfmprc)},
+ {"rx_multicast_packets_2", offsetof(struct txgbevf_hw_stats, qp[2].vfmprc)},
+ {"rx_multicast_packets_3", offsetof(struct txgbevf_hw_stats, qp[3].vfmprc)},
+ {"rx_multicast_packets_4", offsetof(struct txgbevf_hw_stats, qp[4].vfmprc)},
+ {"rx_multicast_packets_5", offsetof(struct txgbevf_hw_stats, qp[5].vfmprc)},
+ {"rx_multicast_packets_6", offsetof(struct txgbevf_hw_stats, qp[6].vfmprc)},
+ {"rx_multicast_packets_7", offsetof(struct txgbevf_hw_stats, qp[7].vfmprc)}
+};
+
+#define TXGBEVF_NB_XSTATS (sizeof(rte_txgbevf_stats_strings) / \
+ sizeof(rte_txgbevf_stats_strings[0]))
+
+/*
+ * Negotiate mailbox API version with the PF.
+ * After reset API version is always set to the basic one (txgbe_mbox_api_10).
+ * Then we try to negotiate starting with the most recent one.
+ * If all negotiation attempts fail, then we will proceed with
+ * the default one (txgbe_mbox_api_10).
+ */
+static void
+txgbevf_negotiate_api(struct txgbe_hw *hw)
+{
+ int32_t i;
+
+ /* start with highest supported, proceed down */
+ static const int sup_ver[] = {
+ txgbe_mbox_api_13,
+ txgbe_mbox_api_12,
+ txgbe_mbox_api_11,
+ txgbe_mbox_api_10,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(sup_ver); i++) {
+ if (0 == txgbevf_negotiate_api_version(hw, sup_ver[i]))
+ break;
+ }
+}
+
+static void
+generate_random_mac_addr(struct rte_ether_addr *mac_addr)
+{
+ uint64_t random;
+
+ /* Set Organizationally Unique Identifier (OUI) prefix. */
+ mac_addr->addr_bytes[0] = 0x00;
+ mac_addr->addr_bytes[1] = 0x09;
+ mac_addr->addr_bytes[2] = 0xC0;
+ /* Force indication of locally assigned MAC address. */
+ mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR;
+ /* Generate the last 3 bytes of the MAC address with a random number. */
+ random = rte_rand();
+ memcpy(&mac_addr->addr_bytes[3], &random, 3);
+}
+
+/*
+ * Virtual Function device init
+ */
+static int
+eth_txgbevf_dev_init(struct rte_eth_dev *eth_dev)
+{
+ int err;
+ uint32_t tc, tcs;
+ struct txgbe_adapter *ad = eth_dev->data->dev_private;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
+ struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
+ struct rte_ether_addr *perm_addr = (struct rte_ether_addr *)hw->mac.perm_addr;
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev->dev_ops = &txgbevf_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
+ eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
+
+ /* for secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ struct txgbe_tx_queue *txq;
+ /* TX queue function in primary, set by last queue initialized
+ * Tx queue may not initialized by primary process
+ */
+ if (eth_dev->data->tx_queues) {
+ txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1];
+ txgbe_set_tx_function(eth_dev, txq);
+ } else {
+ /* Use default TX function if we get here */
+ PMD_INIT_LOG(NOTICE,
+ "No TX queues configured yet. Using default TX function.");
+ }
+
+ txgbe_set_rx_function(eth_dev);
+
+ return 0;
+ }
+
+ rte_atomic32_clear(&ad->link_thread_running);
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ hw->device_id = pci_dev->id.device_id;
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
+ hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
+ hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+
+ /* initialize the vfta */
+ memset(shadow_vfta, 0, sizeof(*shadow_vfta));
+
+ /* initialize the hw strip bitmap*/
+ memset(hwstrip, 0, sizeof(*hwstrip));
+
+ /* Initialize the shared code (base driver) */
+ err = txgbe_init_shared_code(hw);
+ if (err != 0) {
+ PMD_INIT_LOG(ERR, "Shared code init failed for txgbevf: %d", err);
+ return -EIO;
+ }
+
+ /* init_mailbox_params */
+ hw->mbx.init_params(hw);
+
+ /* Reset the hw statistics */
+ txgbevf_dev_stats_reset(eth_dev);
+
+ /* Disable the interrupts for VF */
+ txgbevf_intr_disable(eth_dev);
+
+ hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
+ err = hw->mac.reset_hw(hw);
+
+ /*
+ * The VF reset operation returns the TXGBE_ERR_INVALID_MAC_ADDR when
+ * the underlying PF driver has not assigned a MAC address to the VF.
+ * In this case, assign a random MAC address.
+ */
+ if ((err != 0) && (err != TXGBE_ERR_INVALID_MAC_ADDR)) {
+ PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", err);
+ /*
+ * This error code will be propagated to the app by
+ * rte_eth_dev_reset, so use a public error code rather than
+ * the internal-only TXGBE_ERR_RESET_FAILED
+ */
+ return -EAGAIN;
+ }
+
+ /* negotiate mailbox API version to use with the PF. */
+ txgbevf_negotiate_api(hw);
+
+ /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
+ txgbevf_get_queues(hw, &tcs, &tc);
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("txgbevf", RTE_ETHER_ADDR_LEN *
+ hw->mac.num_rar_entries, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %u bytes needed to store "
+ "MAC addresses",
+ RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+ return -ENOMEM;
+ }
+
+ /* Pass the information to the rte_eth_dev_close() that it should also
+ * release the private port resources.
+ */
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+
+ /* Generate a random MAC address, if none was assigned by PF. */
+ if (rte_is_zero_ether_addr(perm_addr)) {
+ generate_random_mac_addr(perm_addr);
+ err = txgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
+ if (err) {
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ return err;
+ }
+ PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
+ PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
+ "%02x:%02x:%02x:%02x:%02x:%02x",
+ perm_addr->addr_bytes[0],
+ perm_addr->addr_bytes[1],
+ perm_addr->addr_bytes[2],
+ perm_addr->addr_bytes[3],
+ perm_addr->addr_bytes[4],
+ perm_addr->addr_bytes[5]);
+ }
+
+ /* Copy the permanent MAC address */
+ rte_ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]);
+
+ /* reset the hardware with the new settings */
+ err = hw->mac.start_hw(hw);
+ switch (err) {
+ case 0:
+ break;
+
+ default:
+ PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", err);
+ return -EIO;
+ }
+
+ /* enter promiscuous mode */
+ txgbevf_dev_promiscuous_enable(eth_dev);
+
+ rte_intr_callback_register(intr_handle,
+ txgbevf_dev_interrupt_handler, eth_dev);
+ rte_intr_enable(intr_handle);
+ txgbevf_intr_enable(eth_dev);
+
+ PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id, "txgbe_mac_raptor_vf");
+
+ return 0;
+}
+
+/* Virtual Function device uninit */
+
+static int
+eth_txgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ txgbevf_dev_close(eth_dev);
+
+ return 0;
+}
+
+static int eth_txgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct txgbe_adapter), eth_txgbevf_dev_init);
+}
+
+static int eth_txgbevf_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_txgbevf_dev_uninit);
+}
+
+/*
+ * virtual function driver struct
+ */
+static struct rte_pci_driver rte_txgbevf_pmd = {
+ .id_table = pci_id_txgbevf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_txgbevf_pci_probe,
+ .remove = eth_txgbevf_pci_remove,
+};
+
+static int txgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, unsigned limit)
+{
+ unsigned i;
+
+ if (limit < TXGBEVF_NB_XSTATS && xstats_names != NULL)
+ return -ENOMEM;
+
+ if (xstats_names != NULL)
+ for (i = 0; i < TXGBEVF_NB_XSTATS; i++)
+ snprintf(xstats_names[i].name,
+ sizeof(xstats_names[i].name),
+ "%s", rte_txgbevf_stats_strings[i].name);
+ return TXGBEVF_NB_XSTATS;
+}
+
+static void
+txgbevf_update_stats(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbevf_hw_stats *hw_stats = (struct txgbevf_hw_stats *)
+ TXGBE_DEV_STATS(dev);
+ unsigned i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ /* Good Rx packet, include VF loopback */
+ TXGBE_UPDCNT32(TXGBE_QPRXPKT(i),
+ hw_stats->qp[i].last_vfgprc, hw_stats->qp[i].vfgprc);
+
+ /* Good Rx octets, include VF loopback */
+ TXGBE_UPDCNT36(TXGBE_QPRXOCTL(i),
+ hw_stats->qp[i].last_vfgorc, hw_stats->qp[i].vfgorc);
+
+ /* Rx Multicst Packet */
+ TXGBE_UPDCNT32(TXGBE_QPRXMPKT(i),
+ hw_stats->qp[i].last_vfmprc, hw_stats->qp[i].vfmprc);
+ }
+ hw->rx_loaded = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ /* Good Tx packet, include VF loopback */
+ TXGBE_UPDCNT32(TXGBE_QPTXPKT(i),
+ hw_stats->qp[i].last_vfgptc, hw_stats->qp[i].vfgptc);
+
+ /* Good Tx octets, include VF loopback */
+ TXGBE_UPDCNT36(TXGBE_QPTXOCTL(i),
+ hw_stats->qp[i].last_vfgotc, hw_stats->qp[i].vfgotc);
+ }
+ hw->offset_loaded = 0;
+}
+
+static int
+txgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned n)
+{
+ struct txgbevf_hw_stats *hw_stats = (struct txgbevf_hw_stats *)
+ TXGBE_DEV_STATS(dev);
+ unsigned i;
+
+ if (n < TXGBEVF_NB_XSTATS)
+ return TXGBEVF_NB_XSTATS;
+
+ txgbevf_update_stats(dev);
+
+ if (!xstats)
+ return 0;
+
+ /* Extended stats */
+ for (i = 0; i < TXGBEVF_NB_XSTATS; i++) {
+ xstats[i].id = i;
+ xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
+ rte_txgbevf_stats_strings[i].offset);
+ }
+
+ return TXGBEVF_NB_XSTATS;
+}
+
+static int
+txgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct txgbevf_hw_stats *hw_stats = (struct txgbevf_hw_stats *)
+ TXGBE_DEV_STATS(dev);
+ uint32_t i;
+
+ txgbevf_update_stats(dev);
+
+ if (stats == NULL)
+ return -EINVAL;
+
+ stats->ipackets = 0;
+ stats->ibytes = 0;
+ stats->opackets = 0;
+ stats->obytes = 0;
+
+ for (i = 0; i < 8; i++) {
+ stats->ipackets += hw_stats->qp[i].vfgprc;
+ stats->ibytes += hw_stats->qp[i].vfgorc;
+ stats->opackets += hw_stats->qp[i].vfgptc;
+ stats->obytes += hw_stats->qp[i].vfgotc;
+ }
+
+ return 0;
+}
+
+static int
+txgbevf_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct txgbevf_hw_stats *hw_stats = (struct txgbevf_hw_stats *)
+ TXGBE_DEV_STATS(dev);
+ uint32_t i;
+
+ /* Sync HW register to the last stats */
+ txgbevf_dev_stats_get(dev, NULL);
+
+ /* reset HW current stats*/
+ for (i = 0; i < 8; i++) {
+ hw_stats->qp[i].vfgprc = 0;
+ hw_stats->qp[i].vfgorc = 0;
+ hw_stats->qp[i].vfgptc = 0;
+ hw_stats->qp[i].vfgotc = 0;
+ }
+
+ return 0;
+}
+
+static int
+txgbevf_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
+ dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
+ dev_info->min_rx_bufsize = 1024;
+ dev_info->max_rx_pktlen = TXGBE_FRAME_SIZE_MAX;
+ dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+ dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
+ dev_info->max_vfs = pci_dev->max_vfs;
+ dev_info->max_vmdq_pools = ETH_64_POOLS;
+ dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
+ dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
+ dev_info->rx_queue_offload_capa);
+ dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
+ dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
+ dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
+ dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+ dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
+ .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
+ .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ .offloads = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
+ .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
+ .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
+ .offloads = 0,
+ };
+
+ dev_info->rx_desc_lim = rx_desc_lim;
+ dev_info->tx_desc_lim = tx_desc_lim;
+
+ return 0;
+}
+
+static int
+txgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ return txgbe_dev_link_update_share(dev, wait_to_complete);
+}
+
+/*
+ * Virtual Function operations
+ */
+static void
+txgbevf_intr_disable(struct rte_eth_dev *dev)
+{
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Clear interrupt mask to stop from interrupts being generated */
+ wr32(hw, TXGBE_VFIMS, TXGBE_VFIMS_MASK);
+
+ txgbe_flush(hw);
+
+ /* Clear mask value. */
+ intr->mask_misc = TXGBE_VFIMS_MASK;
+}
+
+static void
+txgbevf_intr_enable(struct rte_eth_dev *dev)
+{
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* VF enable interrupt autoclean */
+ wr32(hw, TXGBE_VFIMC, TXGBE_VFIMC_MASK);
+
+ txgbe_flush(hw);
+
+ intr->mask_misc = 0;
+}
+
+static int
+txgbevf_dev_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf *conf = &dev->data->dev_conf;
+ struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+
+ PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
+ dev->data->port_id);
+
+ if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
+ /*
+ * VF has no ability to enable/disable HW CRC
+ * Keep the persistent behavior the same as Host PF
+ */
+#ifndef RTE_LIBRTE_TXGBE_PF_DISABLE_STRIP_CRC
+ if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+ PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
+ conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+ }
+#else
+ if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
+ PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
+ conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+ }
+#endif
+
+ /*
+ * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
+ * allocation or vector Rx preconditions we will reset it.
+ */
+ adapter->rx_bulk_alloc_allowed = true;
+
+ return 0;
+}
+
+static int
+txgbevf_dev_start(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t intr_vector = 0;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+ int err, mask = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Stop the link setup handler before resetting the HW. */
+ rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
+
+ err = hw->mac.reset_hw(hw);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err);
+ return err;
+ }
+ hw->mac.get_link_status = true;
+
+ /* negotiate mailbox API version to use with the PF. */
+ txgbevf_negotiate_api(hw);
+
+ txgbevf_dev_tx_init(dev);
+
+ /* This can fail when allocating mbufs for descriptor rings */
+ err = txgbevf_dev_rx_init(dev);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
+ txgbe_dev_clear_queues(dev);
+ return err;
+ }
+
+ /* Set vfta */
+ txgbevf_set_vfta_all(dev, 1);
+
+ /* Set HW strip */
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK;
+ err = txgbevf_vlan_offload_config(dev, mask);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
+ txgbe_dev_clear_queues(dev);
+ return err;
+ }
+
+ txgbevf_dev_rxtx_start(dev);
+
+ /* check and configure queue intr-vector mapping */
+ if (rte_intr_cap_multiple(intr_handle) &&
+ dev->data->dev_conf.intr_conf.rxq) {
+ /* According to datasheet, only vector 0/1/2 can be used,
+ * now only one vector is used for Rx queue
+ */
+ intr_vector = 1;
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec", dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+ txgbevf_configure_msix(dev);
+
+ /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
+ * is mapped to VFIO vector 0 in eth_txgbevf_dev_init( ).
+ * If previous VFIO interrupt mapping setting in eth_txgbevf_dev_init( )
+ * is not cleared, it will fail when following rte_intr_enable( ) tries
+ * to map Rx queue interrupt to other VFIO vectors.
+ * So clear uio/vfio intr/evevnfd first to avoid failure.
+ */
+ rte_intr_disable(intr_handle);
+
+ rte_intr_enable(intr_handle);
+
+ /* Re-enable interrupt for VF */
+ txgbevf_intr_enable(dev);
+
+ /*
+ * Update link status right before return, because it may
+ * start link configuration process in a separate thread.
+ */
+ txgbevf_dev_link_update(dev, 0);
+
+ hw->adapter_stopped = false;
+
+ return 0;
+}
+
+static void
+txgbevf_dev_stop(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+ if (hw->adapter_stopped)
+ return;
+
+ PMD_INIT_FUNC_TRACE();
+
+ rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
+
+ txgbevf_intr_disable(dev);
+
+ hw->adapter_stopped = 1;
+ hw->mac.stop_hw(hw);
+
+ /*
+ * Clear what we set, but we still keep shadow_vfta to
+ * restore after device starts
+ */
+ txgbevf_set_vfta_all(dev, 0);
+
+ /* Clear stored conf */
+ dev->data->scattered_rx = 0;
+
+ txgbe_dev_clear_queues(dev);
+
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec != NULL) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+
+ adapter->rss_reta_updated = 0;
+}
+
+static void
+txgbevf_dev_close(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+ PMD_INIT_FUNC_TRACE();
+
+ hw->mac.reset_hw(hw);
+
+ txgbevf_dev_stop(dev);
+
+ txgbe_dev_free_queues(dev);
+
+ /**
+ * Remove the VF MAC address ro ensure
+ * that the VF traffic goes to the PF
+ * after stop, close and detach of the VF
+ **/
+ txgbevf_remove_mac_addr(dev, 0);
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+
+ /* Disable the interrupts for VF */
+ txgbevf_intr_disable(dev);
+
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+ rte_intr_disable(intr_handle);
+ rte_intr_callback_unregister(intr_handle,
+ txgbevf_dev_interrupt_handler, dev);
+}
+
+/*
+ * Reset VF device
+ */
+static int
+txgbevf_dev_reset(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ ret = eth_txgbevf_dev_uninit(dev);
+ if (ret)
+ return ret;
+
+ ret = eth_txgbevf_dev_init(dev);
+
+ return ret;
+}
+
+static void txgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
+ int i = 0, j = 0, vfta = 0, mask = 1;
+
+ for (i = 0; i < TXGBE_VFTA_SIZE; i++) {
+ vfta = shadow_vfta->vfta[i];
+ if (vfta) {
+ mask = 1;
+ for (j = 0; j < 32; j++) {
+ if (vfta & mask)
+ txgbe_set_vfta(hw, (i << 5) + j, 0,
+ on, false);
+ mask <<= 1;
+ }
+ }
+ }
+}
+
+static int
+txgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
+ uint32_t vid_idx = 0;
+ uint32_t vid_bit = 0;
+ int ret = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* vind is not used in VF driver, set to 0, check txgbe_set_vfta_vf */
+ ret = hw->mac.set_vfta(hw, vlan_id, 0, !!on, false);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Unable to set VF vlan");
+ return ret;
+ }
+ vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
+ vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
+
+ /* Save what we set and retore it after device reset */
+ if (on)
+ shadow_vfta->vfta[vid_idx] |= vid_bit;
+ else
+ shadow_vfta->vfta[vid_idx] &= ~vid_bit;
+
+ return 0;
+}
+
+static void
+txgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (queue >= hw->mac.max_rx_queues)
+ return;
+
+ ctrl = rd32(hw, TXGBE_RXCFG(queue));
+ txgbe_dev_save_rx_queue(hw, queue);
+ if (on)
+ ctrl |= TXGBE_RXCFG_VLAN;
+ else
+ ctrl &= ~TXGBE_RXCFG_VLAN;
+ wr32(hw, TXGBE_RXCFG(queue), 0);
+ msec_delay(100);
+ txgbe_dev_store_rx_queue(hw, queue);
+ wr32m(hw, TXGBE_RXCFG(queue),
+ TXGBE_RXCFG_VLAN | TXGBE_RXCFG_ENA, ctrl);
+
+ txgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
+}
+
+static int
+txgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
+{
+ struct txgbe_rx_queue *rxq;
+ uint16_t i;
+ int on = 0;
+
+ /* VF function only support hw strip feature, others are not support */
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ on = !!(rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ txgbevf_vlan_strip_queue_set(dev, i, on);
+ }
+ }
+
+ return 0;
+}
+
+static int
+txgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ txgbe_config_vlan_strip_on_all_queues(dev, mask);
+
+ txgbevf_vlan_offload_config(dev, mask);
+
+ return 0;
+}
+
+static int
+txgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t vec = TXGBE_MISC_VEC_ID;
+
+ if (rte_intr_allow_others(intr_handle))
+ vec = TXGBE_RX_VEC_START;
+ intr->mask_misc &= ~(1 << vec);
+ RTE_SET_USED(queue_id);
+ wr32(hw, TXGBE_VFIMC, ~intr->mask_misc);
+
+ rte_intr_enable(intr_handle);
+
+ return 0;
+}
+
+static int
+txgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t vec = TXGBE_MISC_VEC_ID;
+
+ if (rte_intr_allow_others(intr_handle))
+ vec = TXGBE_RX_VEC_START;
+ intr->mask_misc |= (1 << vec);
+ RTE_SET_USED(queue_id);
+ wr32(hw, TXGBE_VFIMS, intr->mask_misc);
+
+ return 0;
+}
+static void
+txgbevf_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
+ uint8_t queue, uint8_t msix_vector)
+{
+ uint32_t tmp, idx;
+
+ if (direction == -1) {
+ /* other causes */
+ msix_vector |= TXGBE_VFIVAR_VLD;
+ tmp = rd32(hw, TXGBE_VFIVARMISC);
+ tmp &= ~0xFF;
+ tmp |= msix_vector;
+ wr32(hw, TXGBE_VFIVARMISC, tmp);
+ } else {
+ /* rx or tx cause */
+ /* Workround for ICR lost */
+ idx = ((16 * (queue & 1)) + (8 * direction));
+ tmp = rd32(hw, TXGBE_VFIVAR(queue >> 1));
+ tmp &= ~(0xFF << idx);
+ tmp |= (msix_vector << idx);
+ wr32(hw, TXGBE_VFIVAR(queue >> 1), tmp);
+ }
+}
+
+static void
+txgbevf_configure_msix(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t q_idx;
+ uint32_t vector_idx = TXGBE_MISC_VEC_ID;
+ uint32_t base = TXGBE_MISC_VEC_ID;
+
+ /* Configure VF other cause ivar */
+ txgbevf_set_ivar_map(hw, -1, 1, vector_idx);
+
+ /* won't configure msix register if no mapping is done
+ * between intr vector and event fd.
+ */
+ if (!rte_intr_dp_is_en(intr_handle))
+ return;
+
+ if (rte_intr_allow_others(intr_handle)) {
+ base = TXGBE_RX_VEC_START;
+ vector_idx = TXGBE_RX_VEC_START;
+ }
+
+ /* Configure all RX queues of VF */
+ for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
+ /* Force all queue use vector 0,
+ * as TXGBE_VF_MAXMSIVECOTR = 1
+ */
+ txgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
+ intr_handle->intr_vec[q_idx] = vector_idx;
+ if (vector_idx < base + intr_handle->nb_efd - 1)
+ vector_idx++;
+ }
+
+ /* As RX queue setting above show, all queues use the vector 0.
+ * Set only the ITR value of TXGBE_MISC_VEC_ID.
+ */
+ wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
+ TXGBE_ITR_IVAL(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
+ | TXGBE_ITR_WRDSA);
+}
+
+
+static int
+txgbevf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
+ __rte_unused uint32_t index,
+ __rte_unused uint32_t pool)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int err;
+
+ /*
+ * On a VF, adding again the same MAC addr is not an idempotent
+ * operation. Trap this case to avoid exhausting the [very limited]
+ * set of PF resources used to store VF MAC addresses.
+ */
+ if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct rte_ether_addr)) == 0)
+ return -1;
+ err = txgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
+ if (err != 0)
+ PMD_DRV_LOG(ERR, "Unable to add MAC address "
+ "%02x:%02x:%02x:%02x:%02x:%02x - err=%d",
+ mac_addr->addr_bytes[0],
+ mac_addr->addr_bytes[1],
+ mac_addr->addr_bytes[2],
+ mac_addr->addr_bytes[3],
+ mac_addr->addr_bytes[4],
+ mac_addr->addr_bytes[5],
+ err);
+ return err;
+}
+
+static void
+txgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct rte_ether_addr *perm_addr = (struct rte_ether_addr *)hw->mac.perm_addr;
+ struct rte_ether_addr *mac_addr;
+ uint32_t i;
+ int err;
+
+ /*
+ * The TXGBE_VF_SET_MACVLAN command of the txgbe-pf driver does
+ * not support the deletion of a given MAC address.
+ * Instead, it imposes to delete all MAC addresses, then to add again
+ * all MAC addresses with the exception of the one to be deleted.
+ */
+ (void)txgbevf_set_uc_addr_vf(hw, 0, NULL);
+
+ /*
+ * Add again all MAC addresses, with the exception of the deleted one
+ * and of the permanent MAC address.
+ */
+ for (i = 0, mac_addr = dev->data->mac_addrs;
+ i < hw->mac.num_rar_entries; i++, mac_addr++) {
+ /* Skip the deleted MAC address */
+ if (i == index)
+ continue;
+ /* Skip NULL MAC addresses */
+ if (rte_is_zero_ether_addr(mac_addr))
+ continue;
+ /* Skip the permanent MAC address */
+ if (memcmp(perm_addr, mac_addr, sizeof(struct rte_ether_addr)) == 0)
+ continue;
+ err = txgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
+ if (err != 0)
+ PMD_DRV_LOG(ERR,
+ "Adding again MAC address "
+ "%02x:%02x:%02x:%02x:%02x:%02x failed "
+ "err=%d",
+ mac_addr->addr_bytes[0],
+ mac_addr->addr_bytes[1],
+ mac_addr->addr_bytes[2],
+ mac_addr->addr_bytes[3],
+ mac_addr->addr_bytes[4],
+ mac_addr->addr_bytes[5],
+ err);
+ }
+}
+
+static int
+txgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ hw->mac.set_rar(hw, 0, (void *)addr, 0, 0);
+
+ return 0;
+}
+
+static int
+txgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct txgbe_hw *hw;
+ uint32_t max_frame = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+
+ hw = TXGBE_DEV_HW(dev);
+
+ if ((mtu < RTE_ETHER_MIN_MTU) || (max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN))
+ return -EINVAL;
+
+ /* refuse mtu that requires the support of scattered packets when this
+ * feature has not been enabled before.
+ */
+ if (!(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) &&
+ (max_frame + 2 * TXGBE_VLAN_TAG_SIZE >
+ dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
+ return -EINVAL;
+
+ /*
+ * When supported by the underlying PF driver, use the TXGBE_VF_SET_MTU
+ * request of the version 2.0 of the mailbox API.
+ * For now, use the TXGBE_VF_SET_LPE request of the version 1.0
+ * of the mailbox API.
+ */
+ txgbevf_rlpml_set_vf(hw, max_frame);
+
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
+ return 0;
+}
+
+static int
+txgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
+{
+ int count = 0;
+ int g_ind = 0;
+ const struct reg_info *reg_group;
+
+ while ((reg_group = txgbevf_regs[g_ind++]))
+ count += txgbe_regs_group_count(reg_group);
+
+ return count;
+}
+
+static int
+txgbevf_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t *data = regs->data;
+ int g_ind = 0;
+ int count = 0;
+ const struct reg_info *reg_group;
+
+ if (data == NULL) {
+ regs->length = txgbevf_get_reg_length(dev);
+ regs->width = sizeof(uint32_t);
+ return 0;
+ }
+
+ /* Support only full register dump */
+ if ((regs->length == 0) ||
+ (regs->length == (uint32_t)txgbevf_get_reg_length(dev))) {
+ regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
+ hw->device_id;
+ while ((reg_group = txgbevf_regs[g_ind++]))
+ count += txgbe_read_regs_group(dev, &data[count],
+ reg_group);
+ return 0;
+ }
+
+ return -ENOTSUP;
+}
+
+static int
+txgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int ret;
+
+ switch (hw->mac.update_xcast_mode(hw, TXGBEVF_XCAST_MODE_PROMISC)) {
+ case 0:
+ ret = 0;
+ break;
+ case TXGBE_ERR_FEATURE_NOT_SUPPORTED:
+ ret = -ENOTSUP;
+ break;
+ default:
+ ret = -EAGAIN;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+txgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int ret;
+
+ switch (hw->mac.update_xcast_mode(hw, TXGBEVF_XCAST_MODE_NONE)) {
+ case 0:
+ ret = 0;
+ break;
+ case TXGBE_ERR_FEATURE_NOT_SUPPORTED:
+ ret = -ENOTSUP;
+ break;
+ default:
+ ret = -EAGAIN;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+txgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int ret;
+ int mode = TXGBEVF_XCAST_MODE_ALLMULTI;
+
+ switch (hw->mac.update_xcast_mode(hw, mode)) {
+ case 0:
+ ret = 0;
+ break;
+ case TXGBE_ERR_FEATURE_NOT_SUPPORTED:
+ ret = -ENOTSUP;
+ break;
+ default:
+ ret = -EAGAIN;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+txgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int ret;
+
+ switch (hw->mac.update_xcast_mode(hw, TXGBEVF_XCAST_MODE_MULTI)) {
+ case 0:
+ ret = 0;
+ break;
+ case TXGBE_ERR_FEATURE_NOT_SUPPORTED:
+ ret = -ENOTSUP;
+ break;
+ default:
+ ret = -EAGAIN;
+ break;
+ }
+
+ return ret;
+}
+
+static void txgbevf_mbx_process(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ u32 in_msg = 0;
+
+ /* peek the message first */
+ in_msg = rd32(hw, TXGBE_VFMBX);
+
+ /* PF reset VF event */
+ if (in_msg == TXGBE_PF_CONTROL_MSG) {
+ /* dummy mbx read to ack pf */
+ if (txgbe_read_mbx(hw, &in_msg, 1, 0))
+ return;
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+ NULL);
+ }
+}
+
+static int
+txgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
+{
+ uint32_t eicr;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+ txgbevf_intr_disable(dev);
+
+ /* read-on-clear nic registers here */
+ eicr = rd32(hw, TXGBE_VFICR);
+ intr->flags = 0;
+
+ /* only one misc vector supported - mailbox */
+ eicr &= TXGBE_VFICR_MASK;
+ /* Workround for ICR lost */
+ intr->flags |= TXGBE_FLAG_MAILBOX;
+
+ return 0;
+}
+
+static int
+txgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
+{
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+
+ if (intr->flags & TXGBE_FLAG_MAILBOX) {
+ txgbevf_mbx_process(dev);
+ intr->flags &= ~TXGBE_FLAG_MAILBOX;
+ }
+
+ txgbevf_intr_enable(dev);
+
+ return 0;
+}
+
+static void
+txgbevf_dev_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
+ txgbevf_dev_interrupt_get_status(dev);
+ txgbevf_dev_interrupt_action(dev);
+}
+
+/*
+ * dev_ops for virtual function, bare necessities for basic vf
+ * operation have been implemented
+ */
+static const struct eth_dev_ops txgbevf_eth_dev_ops = {
+ .dev_configure = txgbevf_dev_configure,
+ .dev_start = txgbevf_dev_start,
+ .dev_stop = txgbevf_dev_stop,
+ .link_update = txgbevf_dev_link_update,
+ .stats_get = txgbevf_dev_stats_get,
+ .xstats_get = txgbevf_dev_xstats_get,
+ .stats_reset = txgbevf_dev_stats_reset,
+ .xstats_reset = txgbevf_dev_stats_reset,
+ .xstats_get_names = txgbevf_dev_xstats_get_names,
+ .dev_close = txgbevf_dev_close,
+ .dev_reset = txgbevf_dev_reset,
+ .promiscuous_enable = txgbevf_dev_promiscuous_enable,
+ .promiscuous_disable = txgbevf_dev_promiscuous_disable,
+ .allmulticast_enable = txgbevf_dev_allmulticast_enable,
+ .allmulticast_disable = txgbevf_dev_allmulticast_disable,
+ .dev_infos_get = txgbevf_dev_info_get,
+ .dev_supported_ptypes_get = txgbe_dev_supported_ptypes_get,
+ .mtu_set = txgbevf_dev_set_mtu,
+ .vlan_filter_set = txgbevf_vlan_filter_set,
+ .vlan_strip_queue_set = txgbevf_vlan_strip_queue_set,
+ .vlan_offload_set = txgbevf_vlan_offload_set,
+ .rx_queue_setup = txgbe_dev_rx_queue_setup,
+ .rx_queue_release = txgbe_dev_rx_queue_release,
+ .rx_descriptor_done = txgbe_dev_rx_descriptor_done,
+ .rx_descriptor_status = txgbe_dev_rx_descriptor_status,
+ .tx_descriptor_status = txgbe_dev_tx_descriptor_status,
+ .tx_queue_setup = txgbe_dev_tx_queue_setup,
+ .tx_queue_release = txgbe_dev_tx_queue_release,
+ .rx_queue_intr_enable = txgbevf_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = txgbevf_dev_rx_queue_intr_disable,
+ .mac_addr_add = txgbevf_add_mac_addr,
+ .mac_addr_remove = txgbevf_remove_mac_addr,
+ .set_mc_addr_list = txgbe_dev_set_mc_addr_list,
+ .rxq_info_get = txgbe_rxq_info_get,
+ .txq_info_get = txgbe_txq_info_get,
+ .mac_addr_set = txgbevf_set_default_mac_addr,
+ .get_reg = txgbevf_get_regs,
+ .reta_update = txgbe_dev_rss_reta_update,
+ .reta_query = txgbe_dev_rss_reta_query,
+ .rss_hash_update = txgbe_dev_rss_hash_update,
+ .rss_hash_conf_get = txgbe_dev_rss_hash_conf_get,
+ .tx_done_cleanup = txgbe_dev_tx_done_cleanup,
+};
+
+RTE_PMD_REGISTER_PCI(net_txgbe_vf, rte_txgbevf_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_txgbe_vf, pci_id_txgbevf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_txgbe_vf, "* igb_uio | vfio-pci");
new file mode 100644
@@ -0,0 +1,1341 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_malloc.h>
+#include <rte_vxlan.h>
+
+#include "txgbe_logs.h"
+#include "base/txgbe.h"
+#include "txgbe_ethdev.h"
+
+#define TXGBE_DEFAULT_FLEXBYTES_OFFSET 12 /* default flexbytes offset in bytes */
+#define TXGBE_FDIR_MAX_FLEX_LEN 2 /* len in bytes of flexbytes */
+#define TXGBE_MAX_FLX_SOURCE_OFF 62
+#define TXGBE_FDIRCMD_CMD_INTERVAL_US 10
+
+#define TXGBE_FDIR_FLOW_TYPES ( \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER))
+
+#define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \
+ uint8_t ipv6_addr[16]; \
+ uint8_t i; \
+ rte_memcpy(ipv6_addr, (ipaddr), sizeof(ipv6_addr));\
+ (ipv6m) = 0; \
+ for (i = 0; i < sizeof(ipv6_addr); i++) { \
+ if (ipv6_addr[i] == UINT8_MAX) \
+ (ipv6m) |= 1 << i; \
+ else if (ipv6_addr[i] != 0) { \
+ PMD_DRV_LOG(ERR, " invalid IPv6 address mask."); \
+ return -EINVAL; \
+ } \
+ } \
+} while (0)
+
+#define IPV6_MASK_TO_ADDR(ipv6m, ipaddr) do { \
+ uint8_t ipv6_addr[16]; \
+ uint8_t i; \
+ for (i = 0; i < sizeof(ipv6_addr); i++) { \
+ if ((ipv6m) & (1 << i)) \
+ ipv6_addr[i] = UINT8_MAX; \
+ else \
+ ipv6_addr[i] = 0; \
+ } \
+ rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
+} while (0)
+
+#define TXGBE_FDIRIP6M_INNER_MAC_SHIFT 4
+
+static int fdir_erase_filter_raptor(struct txgbe_hw *hw, uint32_t fdirhash);
+static int txgbe_set_fdir_flex_conf(struct rte_eth_dev *dev, uint32_t flex);
+static int txgbe_fdir_enable(struct txgbe_hw *hw, uint32_t fdirctrl);
+static int txgbe_fdir_filter_to_atr_input(
+ const struct rte_eth_fdir_filter *fdir_filter,
+ struct txgbe_atr_input *input,
+ enum rte_fdir_mode mode);
+static uint32_t txgbe_atr_compute_hash(struct txgbe_atr_input *atr_input,
+ uint32_t key);
+static uint32_t atr_compute_signature_hash(struct txgbe_atr_input *input,
+ enum rte_fdir_pballoc_type pballoc);
+static uint32_t atr_compute_perfect_hash(struct txgbe_atr_input *input,
+ enum rte_fdir_pballoc_type pballoc);
+static int fdir_write_perfect_filter(struct txgbe_hw *hw,
+ struct txgbe_atr_input *input, uint8_t queue,
+ uint32_t fdircmd, uint32_t fdirhash,
+ enum rte_fdir_mode mode);
+static int fdir_add_signature_filter(struct txgbe_hw *hw,
+ struct txgbe_atr_input *input, uint8_t queue, uint32_t fdircmd,
+ uint32_t fdirhash);
+static int txgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *fdir_filter,
+ bool del,
+ bool update);
+static int txgbe_fdir_flush(struct rte_eth_dev *dev);
+static void txgbe_fdir_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_fdir_info *fdir_info);
+static void txgbe_fdir_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_fdir_stats *fdir_stats);
+
+/**
+ * This function is based on txgbe_fdir_enable_raptor() in base/txgbe_hw.c.
+ * It adds extra configuration of fdirctrl that is common for all filter types.
+ *
+ * Initialize Flow Director control registers
+ * @hw: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register
+ **/
+static int
+txgbe_fdir_enable(struct txgbe_hw *hw, uint32_t fdirctrl)
+{
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Prime the keys for hashing */
+ wr32(hw, TXGBE_FDIRBKTHKEY, TXGBE_ATR_BUCKET_HASH_KEY);
+ wr32(hw, TXGBE_FDIRSIGHKEY, TXGBE_ATR_SIGNATURE_HASH_KEY);
+
+ /*
+ * Continue setup of fdirctrl register bits:
+ * Set the maximum length per hash bucket to 0xA filters
+ * Send interrupt when 64 filters are left
+ */
+ fdirctrl |= TXGBE_FDIRCTL_MAXLEN(0xA) |
+ TXGBE_FDIRCTL_FULLTHR(4);
+
+ /*
+ * Poll init-done after we write the register. Estimated times:
+ * 10G: PBALLOC = 11b, timing is 60us
+ * 1G: PBALLOC = 11b, timing is 600us
+ * 100M: PBALLOC = 11b, timing is 6ms
+ *
+ * Multiple these timings by 4 if under full Rx load
+ *
+ * So we'll poll for TXGBE_FDIR_INIT_DONE_POLL times, sleeping for
+ * 1 msec per poll time. If we're at line rate and drop to 100M, then
+ * this might not finish in our poll time, but we can live with that
+ * for now.
+ */
+ wr32(hw, TXGBE_FDIRCTL, fdirctrl);
+ txgbe_flush(hw);
+ for (i = 0; i < TXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (rd32(hw, TXGBE_FDIRCTL) & TXGBE_FDIRCTL_INITDONE)
+ break;
+ msec_delay(1);
+ }
+
+ if (i >= TXGBE_FDIR_INIT_DONE_POLL) {
+ PMD_INIT_LOG(ERR, "Flow Director poll time exceeded during enabling!");
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+/*
+ * Set appropriate bits in fdirctrl for: variable reporting levels, moving
+ * flexbytes matching field, and drop queue (only for perfect matching mode).
+ */
+static inline int
+configure_fdir_flags(const struct rte_fdir_conf *conf,
+ uint32_t *fdirctrl, uint32_t *flex)
+{
+ *fdirctrl = 0;
+ *flex = 0;
+
+ switch (conf->pballoc) {
+ case RTE_FDIR_PBALLOC_64K:
+ /* 8k - 1 signature filters */
+ *fdirctrl |= TXGBE_FDIRCTL_BUF_64K;
+ break;
+ case RTE_FDIR_PBALLOC_128K:
+ /* 16k - 1 signature filters */
+ *fdirctrl |= TXGBE_FDIRCTL_BUF_128K;
+ break;
+ case RTE_FDIR_PBALLOC_256K:
+ /* 32k - 1 signature filters */
+ *fdirctrl |= TXGBE_FDIRCTL_BUF_256K;
+ break;
+ default:
+ /* bad value */
+ PMD_INIT_LOG(ERR, "Invalid fdir_conf->pballoc value");
+ return -EINVAL;
+ };
+
+ /* status flags: write hash & swindex in the rx descriptor */
+ switch (conf->status) {
+ case RTE_FDIR_NO_REPORT_STATUS:
+ /* do nothing, default mode */
+ break;
+ case RTE_FDIR_REPORT_STATUS:
+ /* report status when the packet matches a fdir rule */
+ *fdirctrl |= TXGBE_FDIRCTL_REPORT_MATCH;
+ break;
+ case RTE_FDIR_REPORT_STATUS_ALWAYS:
+ /* always report status */
+ *fdirctrl |= TXGBE_FDIRCTL_REPORT_ALWAYS;
+ break;
+ default:
+ /* bad value */
+ PMD_INIT_LOG(ERR, "Invalid fdir_conf->status value");
+ return -EINVAL;
+ };
+
+ *flex |= TXGBE_FDIRFLEXCFG_BASE_MAC;
+ *flex |= TXGBE_FDIRFLEXCFG_OFST(TXGBE_DEFAULT_FLEXBYTES_OFFSET / 2);
+
+ switch (conf->mode) {
+ case RTE_FDIR_MODE_SIGNATURE:
+ break;
+ case RTE_FDIR_MODE_PERFECT:
+ *fdirctrl |= TXGBE_FDIRCTL_PERFECT;
+ *fdirctrl |= TXGBE_FDIRCTL_DROPQP(conf->drop_queue);
+ break;
+ default:
+ /* bad value */
+ PMD_INIT_LOG(ERR, "Invalid fdir_conf->mode value");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline uint32_t
+reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
+{
+ uint32_t mask = hi_dword << 16;
+
+ mask |= lo_dword;
+ mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+ mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+ mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+ return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+}
+
+/*
+ * This references txgbe_fdir_set_input_mask_raptor() in base/txgbe_hw.c,
+ * but makes use of the rte_fdir_masks structure to see which bits to set.
+ */
+int
+txgbe_fdir_set_input_mask(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev);
+ enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+ /*
+ * mask VM pool and DIPv6 since there are currently not supported
+ * mask FLEX byte, it will be set in flex_conf
+ */
+ uint32_t fdirm = TXGBE_FDIRMSK_POOL;
+ uint32_t fdirtcpm; /* TCP source and destination port masks. */
+ uint32_t fdiripv6m; /* IPv6 source and destination masks. */
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (mode != RTE_FDIR_MODE_SIGNATURE &&
+ mode != RTE_FDIR_MODE_PERFECT) {
+ PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
+ return -ENOTSUP;
+ }
+
+ /*
+ * Program the relevant mask registers. If src/dst_port or src/dst_addr
+ * are zero, then assume a full mask for that field. Also assume that
+ * a VLAN of 0 is unspecified, so mask that out as well. L4type
+ * cannot be masked out in this implementation.
+ */
+ if (info->mask.dst_port_mask == 0 && info->mask.src_port_mask == 0) {
+ /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
+ fdirm |= TXGBE_FDIRMSK_L4P;
+ }
+
+ /* TBD: don't support encapsulation yet */
+ wr32(hw, TXGBE_FDIRMSK, fdirm);
+
+ /* store the TCP/UDP port masks, bit reversed from port layout */
+ fdirtcpm = reverse_fdir_bitmasks(
+ rte_be_to_cpu_16(info->mask.dst_port_mask),
+ rte_be_to_cpu_16(info->mask.src_port_mask));
+
+ /* write all the same so that UDP, TCP and SCTP use the same mask
+ * (little-endian)
+ */
+ wr32(hw, TXGBE_FDIRTCPMSK, ~fdirtcpm);
+ wr32(hw, TXGBE_FDIRUDPMSK, ~fdirtcpm);
+ wr32(hw, TXGBE_FDIRSCTPMSK, ~fdirtcpm);
+
+ /* Store source and destination IPv4 masks (big-endian) */
+ wr32(hw, TXGBE_FDIRSIP4MSK, ~info->mask.src_ipv4_mask);
+ wr32(hw, TXGBE_FDIRDIP4MSK, ~info->mask.dst_ipv4_mask);
+
+ if (mode == RTE_FDIR_MODE_SIGNATURE) {
+ /*
+ * Store source and destination IPv6 masks (bit reversed)
+ */
+ fdiripv6m = TXGBE_FDIRIP6MSK_DST(info->mask.dst_ipv6_mask) |
+ TXGBE_FDIRIP6MSK_SRC(info->mask.src_ipv6_mask);
+
+ wr32(hw, TXGBE_FDIRIP6MSK, ~fdiripv6m);
+ }
+
+ return 0;
+}
+
+static int
+txgbe_fdir_store_input_mask(struct rte_eth_dev *dev)
+{
+ struct rte_eth_fdir_masks *input_mask =
+ &dev->data->dev_conf.fdir_conf.mask;
+ enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+ struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev);
+ uint16_t dst_ipv6m = 0;
+ uint16_t src_ipv6m = 0;
+
+ if (mode != RTE_FDIR_MODE_SIGNATURE &&
+ mode != RTE_FDIR_MODE_PERFECT) {
+ PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
+ return -ENOTSUP;
+ }
+
+ memset(&info->mask, 0, sizeof(struct txgbe_hw_fdir_mask));
+ info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
+ info->mask.src_port_mask = input_mask->src_port_mask;
+ info->mask.dst_port_mask = input_mask->dst_port_mask;
+ info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
+ info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
+ IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
+ IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
+ info->mask.src_ipv6_mask = src_ipv6m;
+ info->mask.dst_ipv6_mask = dst_ipv6m;
+
+ return 0;
+}
+
+int
+txgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
+ uint16_t offset)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int i;
+
+ for (i = 0; i < 64; i++) {
+ uint32_t flexreg, flex;
+ flexreg = rd32(hw, TXGBE_FDIRFLEXCFG(i / 4));
+ flex = TXGBE_FDIRFLEXCFG_BASE_MAC;
+ flex |= TXGBE_FDIRFLEXCFG_OFST(offset / 2);
+ flexreg &= ~(TXGBE_FDIRFLEXCFG_ALL(~0UL, i % 4));
+ flexreg |= TXGBE_FDIRFLEXCFG_ALL(flex, i % 4);
+ wr32(hw, TXGBE_FDIRFLEXCFG(i / 4), flexreg);
+ }
+
+ txgbe_flush(hw);
+ for (i = 0; i < TXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (rd32(hw, TXGBE_FDIRCTL) &
+ TXGBE_FDIRCTL_INITDONE)
+ break;
+ msec_delay(1);
+ }
+ return 0;
+}
+
+/*
+ * txgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
+ * arguments are valid
+ */
+static int
+txgbe_set_fdir_flex_conf(struct rte_eth_dev *dev, uint32_t flex)
+{
+ const struct rte_eth_fdir_flex_conf *conf =
+ &dev->data->dev_conf.fdir_conf.flex_conf;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev);
+ const struct rte_eth_flex_payload_cfg *flex_cfg;
+ const struct rte_eth_fdir_flex_mask *flex_mask;
+ uint16_t flexbytes = 0;
+ uint16_t i;
+
+ if (conf == NULL) {
+ PMD_DRV_LOG(ERR, "NULL pointer.");
+ return -EINVAL;
+ }
+
+ flex |= TXGBE_FDIRFLEXCFG_DIA;
+
+ for (i = 0; i < conf->nb_payloads; i++) {
+ flex_cfg = &conf->flex_set[i];
+ if (flex_cfg->type != RTE_ETH_RAW_PAYLOAD) {
+ PMD_DRV_LOG(ERR, "unsupported payload type.");
+ return -EINVAL;
+ }
+ if (((flex_cfg->src_offset[0] & 0x1) == 0) &&
+ (flex_cfg->src_offset[1] == flex_cfg->src_offset[0] + 1) &&
+ (flex_cfg->src_offset[0] <= TXGBE_MAX_FLX_SOURCE_OFF)) {
+ flex &= ~TXGBE_FDIRFLEXCFG_OFST_MASK;
+ flex |= TXGBE_FDIRFLEXCFG_OFST(
+ flex_cfg->src_offset[0] / 2);
+ } else {
+ PMD_DRV_LOG(ERR, "invalid flexbytes arguments.");
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < conf->nb_flexmasks; i++) {
+ flex_mask = &conf->flex_mask[i];
+ if (flex_mask->flow_type != RTE_ETH_FLOW_UNKNOWN) {
+ PMD_DRV_LOG(ERR, "flexmask should be set globally.");
+ return -EINVAL;
+ }
+ flexbytes = (uint16_t)(((flex_mask->mask[1] << 8) & 0xFF00) |
+ ((flex_mask->mask[0]) & 0xFF));
+ if (flexbytes == UINT16_MAX) {
+ flex &= ~TXGBE_FDIRFLEXCFG_DIA;
+ } else if (flexbytes != 0) {
+ /* TXGBE_FDIRFLEXCFG_DIA is set by default when set mask */
+ PMD_DRV_LOG(ERR, " invalid flexbytes mask arguments.");
+ return -EINVAL;
+ }
+ }
+
+ info->mask.flex_bytes_mask = flexbytes ? UINT16_MAX : 0;
+ info->flex_bytes_offset = (uint8_t)(TXGBD_FDIRFLEXCFG_OFST(flex) * 2);
+
+ for (i = 0; i < 64; i++) {
+ uint32_t flexreg;
+ flexreg = rd32(hw, TXGBE_FDIRFLEXCFG(i / 4));
+ flexreg &= ~(TXGBE_FDIRFLEXCFG_ALL(~0UL, i % 4));
+ flexreg |= TXGBE_FDIRFLEXCFG_ALL(flex, i % 4);
+ wr32(hw, TXGBE_FDIRFLEXCFG(i / 4), flexreg);
+ }
+ return 0;
+}
+
+int
+txgbe_fdir_configure(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int err;
+ uint32_t fdirctrl, flex, pbsize;
+ int i;
+ enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* supports mac-vlan and tunnel mode */
+ if (mode != RTE_FDIR_MODE_SIGNATURE &&
+ mode != RTE_FDIR_MODE_PERFECT)
+ return -ENOSYS;
+
+ err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf,
+ &fdirctrl, &flex);
+ if (err)
+ return err;
+
+ /*
+ * Before enabling Flow Director, the Rx Packet Buffer size
+ * must be reduced. The new value is the current size minus
+ * flow director memory usage size.
+ */
+ pbsize = rd32(hw, TXGBE_PBRXSIZE(0));
+ pbsize -= TXGBD_FDIRCTL_BUF_BYTE(fdirctrl);
+ wr32(hw, TXGBE_PBRXSIZE(0), pbsize);
+
+ /*
+ * The defaults in the HW for RX PB 1-7 are not zero and so should be
+ * initialized to zero for non DCB mode otherwise actual total RX PB
+ * would be bigger than programmed and filter space would run into
+ * the PB 0 region.
+ */
+ for (i = 1; i < 8; i++)
+ wr32(hw, TXGBE_PBRXSIZE(i), 0);
+
+ err = txgbe_fdir_store_input_mask(dev);
+ if (err < 0) {
+ PMD_INIT_LOG(ERR, " Error on setting FD mask");
+ return err;
+ }
+
+ err = txgbe_fdir_set_input_mask(dev);
+ if (err < 0) {
+ PMD_INIT_LOG(ERR, " Error on setting FD mask");
+ return err;
+ }
+
+ err = txgbe_set_fdir_flex_conf(dev, flex);
+ if (err < 0) {
+ PMD_INIT_LOG(ERR, " Error on setting FD flexible arguments.");
+ return err;
+ }
+
+ err = txgbe_fdir_enable(hw, fdirctrl);
+ if (err < 0) {
+ PMD_INIT_LOG(ERR, " Error on enabling FD.");
+ return err;
+ }
+ return 0;
+}
+
+/*
+ * Convert DPDK rte_eth_fdir_filter struct to txgbe_atr_input union that is used
+ * by the TXGBE driver code.
+ */
+static int
+txgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
+ struct txgbe_atr_input *input, enum rte_fdir_mode mode)
+{
+ u32 ptype;
+ UNREFERENCED_PARAMETER(mode);
+
+ input->flex_bytes = (uint16_t)(
+ (fdir_filter->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
+ (fdir_filter->input.flow_ext.flexbytes[0] & 0xFF));
+
+ switch (fdir_filter->input.flow_type) {
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ input->flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4;
+ ptype = txgbe_ptype_table[TXGBE_PT_IPV4_UDP];
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ input->flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4;
+ ptype = txgbe_ptype_table[TXGBE_PT_IPV4_TCP];
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
+ input->flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4;
+ ptype = txgbe_ptype_table[TXGBE_PT_IPV4_SCTP];
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
+ input->flow_type = TXGBE_ATR_FLOW_TYPE_IPV4;
+ ptype = txgbe_ptype_table[TXGBE_PT_IPV4];
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ input->flow_type = TXGBE_ATR_FLOW_TYPE_UDPV6;
+ ptype = txgbe_ptype_table[TXGBE_PT_IPV6_UDP];
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ input->flow_type = TXGBE_ATR_FLOW_TYPE_TCPV6;
+ ptype = txgbe_ptype_table[TXGBE_PT_IPV6_TCP];
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
+ input->flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV6;
+ ptype = txgbe_ptype_table[TXGBE_PT_IPV6_SCTP];
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+ input->flow_type = TXGBE_ATR_FLOW_TYPE_IPV6;
+ ptype = txgbe_ptype_table[TXGBE_PT_IPV6];
+ break;
+ default:
+ ptype = 0;
+ break;
+ }
+
+ switch (fdir_filter->input.flow_type) {
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ input->src_port =
+ fdir_filter->input.flow.udp4_flow.src_port;
+ input->dst_port =
+ fdir_filter->input.flow.udp4_flow.dst_port;
+ /* fall-through */
+ /* for SCTP flow type, port and verify_tag are meaningless in txgbe. */
+ case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
+ input->src_ip[0] =
+ fdir_filter->input.flow.ip4_flow.src_ip;
+ input->dst_ip[0] =
+ fdir_filter->input.flow.ip4_flow.dst_ip;
+ break;
+
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ input->src_port =
+ fdir_filter->input.flow.udp6_flow.src_port;
+ input->dst_port =
+ fdir_filter->input.flow.udp6_flow.dst_port;
+ /* fall-through */
+ /* for SCTP flow type, port and verify_tag are meaningless in txgbe. */
+ case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+ rte_memcpy(input->src_ip,
+ fdir_filter->input.flow.ipv6_flow.src_ip,
+ sizeof(input->src_ip));
+ rte_memcpy(input->dst_ip,
+ fdir_filter->input.flow.ipv6_flow.dst_ip,
+ sizeof(input->dst_ip));
+ break;
+ default:
+ break;
+ }
+
+ input->pkt_type = cpu_to_be16(txgbe_encode_ptype(ptype));
+
+ return 0;
+}
+
+/*
+ * Note that the bkt_hash field in the txgbe_atr_input structure is also never
+ * set.
+ *
+ * Compute the hashes for SW ATR
+ * @stream: input bitstream to compute the hash on
+ * @key: 32-bit hash key
+ **/
+static uint32_t
+txgbe_atr_compute_hash(struct txgbe_atr_input *atr_input,
+ uint32_t key)
+{
+ /*
+ * The algorithm is as follows:
+ * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
+ * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
+ * and A[n] x B[n] is bitwise AND between same length strings
+ *
+ * K[n] is 16 bits, defined as:
+ * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
+ * for n modulo 32 < 15, K[n] =
+ * K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
+ *
+ * S[n] is 16 bits, defined as:
+ * for n >= 15, S[n] = S[n:n - 15]
+ * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
+ *
+ * To simplify for programming, the algorithm is implemented
+ * in software this way:
+ *
+ * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
+ *
+ * for (i = 0; i < 352; i+=32)
+ * hi_hash_dword[31:0] ^= Stream[(i+31):i];
+ *
+ * lo_hash_dword[15:0] ^= Stream[15:0];
+ * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
+ * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
+ *
+ * hi_hash_dword[31:0] ^= Stream[351:320];
+ *
+ * if (key[0])
+ * hash[15:0] ^= Stream[15:0];
+ *
+ * for (i = 0; i < 16; i++) {
+ * if (key[i])
+ * hash[15:0] ^= lo_hash_dword[(i+15):i];
+ * if (key[i + 16])
+ * hash[15:0] ^= hi_hash_dword[(i+15):i];
+ * }
+ *
+ */
+ __be32 *dword_stream = (__be32 *)atr_input;
+ __be32 common_hash_dword = 0;
+ u32 hi_hash_dword, lo_hash_dword, flow_pool_ptid;
+ u32 hash_result = 0;
+ u8 i;
+
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_pool_ptid = be_to_cpu32(dword_stream[0]);
+
+ /* generate common hash dword */
+ for (i = 1; i <= 10; i++)
+ common_hash_dword ^= dword_stream[i];
+
+ hi_hash_dword = be_to_cpu32(common_hash_dword);
+
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+ /* apply (Flow ID/VM Pool/Packet Type) bits to hash words */
+ hi_hash_dword ^= flow_pool_ptid ^ (flow_pool_ptid >> 16);
+
+ /* Process bits 0 and 16 */
+ if (key & 0x0001)
+ hash_result ^= lo_hash_dword;
+ if (key & 0x00010000)
+ hash_result ^= hi_hash_dword;
+
+ /*
+ * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the vlan until after bit 0 was processed
+ */
+ lo_hash_dword ^= flow_pool_ptid ^ (flow_pool_ptid << 16);
+
+ /* process the remaining 30 bits in the key 2 bits at a time */
+ for (i = 15; i; i--) {
+ if (key & (0x0001 << i))
+ hash_result ^= lo_hash_dword >> i;
+ if (key & (0x00010000 << i))
+ hash_result ^= hi_hash_dword >> i;
+ }
+
+ return hash_result;
+}
+
+static uint32_t
+atr_compute_perfect_hash(struct txgbe_atr_input *input,
+ enum rte_fdir_pballoc_type pballoc)
+{
+ uint32_t bucket_hash;
+
+ bucket_hash = txgbe_atr_compute_hash(input,
+ TXGBE_ATR_BUCKET_HASH_KEY);
+ if (pballoc == RTE_FDIR_PBALLOC_256K)
+ bucket_hash &= PERFECT_BUCKET_256KB_HASH_MASK;
+ else if (pballoc == RTE_FDIR_PBALLOC_128K)
+ bucket_hash &= PERFECT_BUCKET_128KB_HASH_MASK;
+ else
+ bucket_hash &= PERFECT_BUCKET_64KB_HASH_MASK;
+
+ return TXGBE_FDIRPIHASH_BKT(bucket_hash);
+}
+
+/**
+ * txgbe_fdir_check_cmd_complete - poll to check whether FDIRPICMD is complete
+ * @hw: pointer to hardware structure
+ */
+static inline int
+txgbe_fdir_check_cmd_complete(struct txgbe_hw *hw, uint32_t *fdircmd)
+{
+ int i;
+
+ for (i = 0; i < TXGBE_FDIRCMD_CMD_POLL; i++) {
+ *fdircmd = rd32(hw, TXGBE_FDIRPICMD);
+ if (!(*fdircmd & TXGBE_FDIRPICMD_OP_MASK))
+ return 0;
+ rte_delay_us(TXGBE_FDIRCMD_CMD_INTERVAL_US);
+ }
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * Calculate the hash value needed for signature-match filters. In the FreeBSD
+ * driver, this is done by the optimised function
+ * txgbe_atr_compute_sig_hash_raptor(). However that can't be used here as it
+ * doesn't support calculating a hash for an IPv6 filter.
+ */
+static uint32_t
+atr_compute_signature_hash(struct txgbe_atr_input *input,
+ enum rte_fdir_pballoc_type pballoc)
+{
+ uint32_t bucket_hash, sig_hash;
+
+ bucket_hash = txgbe_atr_compute_hash(input,
+ TXGBE_ATR_BUCKET_HASH_KEY);
+ if (pballoc == RTE_FDIR_PBALLOC_256K)
+ bucket_hash &= SIG_BUCKET_256KB_HASH_MASK;
+ else if (pballoc == RTE_FDIR_PBALLOC_128K)
+ bucket_hash &= SIG_BUCKET_128KB_HASH_MASK;
+ else
+ bucket_hash &= SIG_BUCKET_64KB_HASH_MASK;
+
+ sig_hash = txgbe_atr_compute_hash(input,
+ TXGBE_ATR_SIGNATURE_HASH_KEY);
+
+ return TXGBE_FDIRPIHASH_SIG(sig_hash) |
+ TXGBE_FDIRPIHASH_BKT(bucket_hash);
+}
+
+/*
+ * This is based on txgbe_fdir_write_perfect_filter_raptor() in
+ * base/txgbe_hw.c, with the ability to set extra flags in FDIRPICMD register
+ * added, and IPv6 support also added. The hash value is also pre-calculated
+ * as the pballoc value is needed to do it.
+ */
+static int
+fdir_write_perfect_filter(struct txgbe_hw *hw,
+ struct txgbe_atr_input *input, uint8_t queue,
+ uint32_t fdircmd, uint32_t fdirhash,
+ enum rte_fdir_mode mode)
+{
+ uint32_t fdirport, fdirflex;
+ int err = 0;
+
+ UNREFERENCED_PARAMETER(mode);
+
+ /* record the IPv4 address (little-endian)
+ * can not use wr32.
+ */
+ wr32(hw, TXGBE_FDIRPISIP4, be_to_le32(input->src_ip[0]));
+ wr32(hw, TXGBE_FDIRPIDIP4, be_to_le32(input->dst_ip[0]));
+
+ /* record source and destination port (little-endian)*/
+ fdirport = TXGBE_FDIRPIPORT_DST(be_to_le16(input->dst_port));
+ fdirport |= TXGBE_FDIRPIPORT_SRC(be_to_le16(input->src_port));
+ wr32(hw, TXGBE_FDIRPIPORT, fdirport);
+
+ /* record pkt_type (little-endian) and flex_bytes(big-endian) */
+ fdirflex = TXGBE_FDIRPIFLEX_FLEX(be_to_npu16(input->flex_bytes));
+ fdirflex |= TXGBE_FDIRPIFLEX_PTYPE(be_to_le16(input->pkt_type));
+ wr32(hw, TXGBE_FDIRPIFLEX, fdirflex);
+
+ /* configure FDIRHASH register */
+ fdirhash |= TXGBE_FDIRPIHASH_VLD;
+ wr32(hw, TXGBE_FDIRPIHASH, fdirhash);
+
+ /*
+ * flush all previous writes to make certain registers are
+ * programmed prior to issuing the command
+ */
+ txgbe_flush(hw);
+
+ /* configure FDIRPICMD register */
+ fdircmd |= TXGBE_FDIRPICMD_OP_ADD |
+ TXGBE_FDIRPICMD_UPD |
+ TXGBE_FDIRPICMD_LAST |
+ TXGBE_FDIRPICMD_QPENA;
+ fdircmd |= TXGBE_FDIRPICMD_FT(input->flow_type);
+ fdircmd |= TXGBE_FDIRPICMD_QP(queue);
+ fdircmd |= TXGBE_FDIRPICMD_POOL(input->vm_pool);
+
+ wr32(hw, TXGBE_FDIRPICMD, fdircmd);
+
+ PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
+
+ err = txgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err < 0)
+ PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
+
+ return err;
+}
+
+/**
+ * This function is based on txgbe_fdir_add_signature_filter_raptor() in
+ * base/txgbe_hw.c, but uses a pre-calculated hash value. It also supports
+ * setting extra fields in the FDIRPICMD register, and removes the code that was
+ * verifying the flow_type field. According to the documentation, a flow type of
+ * 00 (i.e. not TCP, UDP, or SCTP) is not supported, however it appears to
+ * work ok...
+ *
+ * Adds a signature hash filter
+ * @hw: pointer to hardware structure
+ * @input: unique input dword
+ * @queue: queue index to direct traffic to
+ * @fdircmd: any extra flags to set in fdircmd register
+ * @fdirhash: pre-calculated hash value for the filter
+ **/
+static int
+fdir_add_signature_filter(struct txgbe_hw *hw,
+ struct txgbe_atr_input *input, uint8_t queue, uint32_t fdircmd,
+ uint32_t fdirhash)
+{
+ int err = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* configure FDIRPICMD register */
+ fdircmd |= TXGBE_FDIRPICMD_OP_ADD |
+ TXGBE_FDIRPICMD_UPD |
+ TXGBE_FDIRPICMD_LAST |
+ TXGBE_FDIRPICMD_QPENA;
+ fdircmd |= TXGBE_FDIRPICMD_FT(input->flow_type);
+ fdircmd |= TXGBE_FDIRPICMD_QP(queue);
+
+ fdirhash |= TXGBE_FDIRPIHASH_VLD;
+ wr32(hw, TXGBE_FDIRPIHASH, fdirhash);
+ wr32(hw, TXGBE_FDIRPICMD, fdircmd);
+
+ PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
+
+ err = txgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err < 0)
+ PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
+
+ return err;
+}
+
+/*
+ * This is based on txgbe_fdir_erase_perfect_filter_raptor() in
+ * base/txgbe_hw.c. It is modified to take in the hash as a parameter so
+ * that it can be used for removing signature and perfect filters.
+ */
+static int
+fdir_erase_filter_raptor(struct txgbe_hw *hw, uint32_t fdirhash)
+{
+ uint32_t fdircmd = 0;
+ int err = 0;
+
+ wr32(hw, TXGBE_FDIRPIHASH, fdirhash);
+
+ /* flush hash to HW */
+ txgbe_flush(hw);
+
+ /* Query if filter is present */
+ wr32(hw, TXGBE_FDIRPICMD, TXGBE_FDIRPICMD_OP_QRY);
+
+ err = txgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err < 0) {
+ PMD_INIT_LOG(ERR, "Timeout querying for flow director filter.");
+ return err;
+ }
+
+ /* if filter exists in hardware then remove it */
+ if (fdircmd & TXGBE_FDIRPICMD_VLD) {
+ wr32(hw, TXGBE_FDIRPIHASH, fdirhash);
+ txgbe_flush(hw);
+ wr32(hw, TXGBE_FDIRPICMD, TXGBE_FDIRPICMD_OP_REM);
+ }
+
+ err = txgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err < 0)
+ PMD_INIT_LOG(ERR, "Timeout erasing flow director filter.");
+
+ return err;
+}
+
+static inline struct txgbe_fdir_filter *
+txgbe_fdir_filter_lookup(struct txgbe_hw_fdir_info *fdir_info,
+ struct txgbe_atr_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(fdir_info->hash_handle, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return fdir_info->hash_map[ret];
+}
+
+static inline int
+txgbe_insert_fdir_filter(struct txgbe_hw_fdir_info *fdir_info,
+ struct txgbe_fdir_filter *fdir_filter)
+{
+ int ret;
+
+ ret = rte_hash_add_key(fdir_info->hash_handle, &fdir_filter->input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+
+ fdir_info->hash_map[ret] = fdir_filter;
+
+ TAILQ_INSERT_TAIL(&fdir_info->fdir_list, fdir_filter, entries);
+
+ return 0;
+}
+
+static inline int
+txgbe_remove_fdir_filter(struct txgbe_hw_fdir_info *fdir_info,
+ struct txgbe_atr_input *input)
+{
+ int ret;
+ struct txgbe_fdir_filter *fdir_filter;
+
+ ret = rte_hash_del_key(fdir_info->hash_handle, input);
+ if (ret < 0) {
+ return ret;
+ }
+
+ fdir_filter = fdir_info->hash_map[ret];
+ fdir_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries);
+ rte_free(fdir_filter);
+
+ return 0;
+}
+
+static int
+txgbe_interpret_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *fdir_filter,
+ struct txgbe_fdir_rule *rule)
+{
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+ int err;
+
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+
+ err = txgbe_fdir_filter_to_atr_input(fdir_filter,
+ &rule->input,
+ fdir_mode);
+ if (err)
+ return err;
+
+ rule->mode = fdir_mode;
+ if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT)
+ rule->fdirflags = TXGBE_FDIRPICMD_DROP;
+ rule->queue = fdir_filter->action.rx_queue;
+ rule->soft_id = fdir_filter->soft_id;
+
+ return 0;
+}
+
+int
+txgbe_fdir_filter_program(struct rte_eth_dev *dev,
+ struct txgbe_fdir_rule *rule,
+ bool del,
+ bool update)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t fdirhash;
+ uint8_t queue;
+ bool is_perfect = FALSE;
+ int err;
+ struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev);
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+ struct txgbe_fdir_filter *node;
+
+ if (fdir_mode == RTE_FDIR_MODE_NONE ||
+ fdir_mode != rule->mode)
+ return -ENOTSUP;
+
+ if (fdir_mode >= RTE_FDIR_MODE_PERFECT)
+ is_perfect = TRUE;
+
+ if (is_perfect) {
+ if (rule->input.flow_type & TXGBE_ATR_L3TYPE_IPV6) {
+ PMD_DRV_LOG(ERR, "IPv6 is not supported in"
+ " perfect mode!");
+ return -ENOTSUP;
+ }
+ fdirhash = atr_compute_perfect_hash(&rule->input,
+ dev->data->dev_conf.fdir_conf.pballoc);
+ fdirhash |= TXGBE_FDIRPIHASH_IDX(rule->soft_id);
+ } else {
+ fdirhash = atr_compute_signature_hash(&rule->input,
+ dev->data->dev_conf.fdir_conf.pballoc);
+ }
+
+ if (del) {
+ err = txgbe_remove_fdir_filter(info, &rule->input);
+ if (err < 0) {
+ PMD_DRV_LOG(ERR, "No such fdir filter to delete %d!", err);
+ return err;
+ }
+
+ err = fdir_erase_filter_raptor(hw, fdirhash);
+ if (err < 0)
+ PMD_DRV_LOG(ERR, "Fail to delete FDIR filter!");
+ else
+ PMD_DRV_LOG(DEBUG, "Success to delete FDIR filter!");
+ return err;
+ }
+
+ /* add or update an fdir filter*/
+ if (rule->fdirflags & TXGBE_FDIRPICMD_DROP) {
+ if (!is_perfect) {
+ PMD_DRV_LOG(ERR, "Drop option is not supported in"
+ " signature mode.");
+ return -EINVAL;
+ }
+ queue = dev->data->dev_conf.fdir_conf.drop_queue;
+ } else if (rule->queue < TXGBE_MAX_RX_QUEUE_NUM) {
+ queue = rule->queue;
+ } else {
+ return -EINVAL;
+ }
+
+ node = txgbe_fdir_filter_lookup(info, &rule->input);
+ if (node) {
+ if (!update) {
+ PMD_DRV_LOG(ERR, "Conflict with existing fdir filter!");
+ return -EINVAL;
+ }
+ node->fdirflags = rule->fdirflags;
+ node->fdirhash = fdirhash;
+ node->queue = queue;
+ } else {
+ node = rte_zmalloc("txgbe_fdir",
+ sizeof(struct txgbe_fdir_filter), 0);
+ if (!node)
+ return -ENOMEM;
+ rte_memcpy(&node->input, &rule->input,
+ sizeof(struct txgbe_atr_input));
+ node->fdirflags = rule->fdirflags;
+ node->fdirhash = fdirhash;
+ node->queue = queue;
+
+ err = txgbe_insert_fdir_filter(info, node);
+ if (err < 0) {
+ rte_free(node);
+ return err;
+ }
+ }
+
+ if (is_perfect)
+ err = fdir_write_perfect_filter(hw, &node->input,
+ node->queue, node->fdirflags,
+ node->fdirhash, fdir_mode);
+ else
+ err = fdir_add_signature_filter(hw, &node->input,
+ node->queue, node->fdirflags,
+ node->fdirhash);
+ if (err < 0) {
+ PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
+ txgbe_remove_fdir_filter(info, &rule->input);
+ } else {
+ PMD_DRV_LOG(DEBUG, "Success to add FDIR filter");
+ }
+
+ return err;
+}
+
+/* txgbe_add_del_fdir_filter - add or remove a flow diretor filter.
+ * @dev: pointer to the structure rte_eth_dev
+ * @fdir_filter: fdir filter entry
+ * @del: 1 - delete, 0 - add
+ * @update: 1 - update
+ */
+static int
+txgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *fdir_filter,
+ bool del,
+ bool update)
+{
+ struct txgbe_fdir_rule rule;
+ int err;
+
+ err = txgbe_interpret_fdir_filter(dev, fdir_filter, &rule);
+ if (err)
+ return err;
+
+ return txgbe_fdir_filter_program(dev, &rule, del, update);
+}
+
+static int
+txgbe_fdir_flush(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev);
+ int ret;
+
+ ret = txgbe_reinit_fdir_tables(hw);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "Failed to re-initialize FD table.");
+ return ret;
+ }
+
+ info->f_add = 0;
+ info->f_remove = 0;
+ info->add = 0;
+ info->remove = 0;
+
+ return ret;
+}
+
+static void
+txgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev);
+ uint32_t fdirctrl, max_num, i;
+ uint8_t offset;
+
+ fdirctrl = rd32(hw, TXGBE_FDIRFLEXCFG(0));
+ offset = TXGBD_FDIRFLEXCFG_OFST(fdirctrl) * 2;
+
+ fdirctrl = rd32(hw, TXGBE_FDIRCTL);
+ fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
+ max_num = TXGBD_FDIRCTL_BUF_BYTE(fdirctrl) / 32;
+ if (fdir_info->mode >= RTE_FDIR_MODE_PERFECT &&
+ fdir_info->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ fdir_info->guarant_spc = max_num;
+ else if (fdir_info->mode == RTE_FDIR_MODE_SIGNATURE)
+ fdir_info->guarant_spc = max_num * 4;
+
+ fdir_info->mask.vlan_tci_mask = info->mask.vlan_tci_mask;
+ fdir_info->mask.ipv4_mask.src_ip = info->mask.src_ipv4_mask;
+ fdir_info->mask.ipv4_mask.dst_ip = info->mask.dst_ipv4_mask;
+ IPV6_MASK_TO_ADDR(info->mask.src_ipv6_mask,
+ fdir_info->mask.ipv6_mask.src_ip);
+ IPV6_MASK_TO_ADDR(info->mask.dst_ipv6_mask,
+ fdir_info->mask.ipv6_mask.dst_ip);
+ fdir_info->mask.src_port_mask = info->mask.src_port_mask;
+ fdir_info->mask.dst_port_mask = info->mask.dst_port_mask;
+ fdir_info->mask.mac_addr_byte_mask = info->mask.mac_addr_byte_mask;
+ fdir_info->mask.tunnel_id_mask = info->mask.tunnel_id_mask;
+ fdir_info->mask.tunnel_type_mask = info->mask.tunnel_type_mask;
+ fdir_info->max_flexpayload = TXGBE_FDIR_MAX_FLEX_LEN;
+
+ if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN ||
+ fdir_info->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+ fdir_info->flow_types_mask[0] = 0ULL;
+ else
+ fdir_info->flow_types_mask[0] = TXGBE_FDIR_FLOW_TYPES;
+ for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++)
+ fdir_info->flow_types_mask[i] = 0ULL;
+
+ fdir_info->flex_payload_unit = 2;
+ fdir_info->max_flex_payload_segment_num = 1;
+ fdir_info->flex_payload_limit = TXGBE_MAX_FLX_SOURCE_OFF;
+ fdir_info->flex_conf.nb_payloads = 1;
+ fdir_info->flex_conf.flex_set[0].type = RTE_ETH_RAW_PAYLOAD;
+ fdir_info->flex_conf.flex_set[0].src_offset[0] = offset;
+ fdir_info->flex_conf.flex_set[0].src_offset[1] = offset + 1;
+ fdir_info->flex_conf.nb_flexmasks = 1;
+ fdir_info->flex_conf.flex_mask[0].flow_type = RTE_ETH_FLOW_UNKNOWN;
+ fdir_info->flex_conf.flex_mask[0].mask[0] =
+ (uint8_t)(info->mask.flex_bytes_mask & 0x00FF);
+ fdir_info->flex_conf.flex_mask[0].mask[1] =
+ (uint8_t)((info->mask.flex_bytes_mask & 0xFF00) >> 8);
+}
+
+static void
+txgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_stats)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev);
+ uint32_t reg, max_num;
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+
+ /* Get the information from registers */
+ reg = rd32(hw, TXGBE_FDIRFREE);
+ info->collision = 0;
+ info->free = (uint16_t)TXGBE_FDIRFREE_FLT(reg);
+
+ reg = rd32(hw, TXGBE_FDIRLEN);
+ info->maxhash = (uint16_t)TXGBE_FDIRLEN_BKTLEN(reg);
+ info->maxlen = (uint8_t)TXGBE_FDIRLEN_MAXLEN(reg);
+
+ reg = rd32(hw, TXGBE_FDIRUSED);
+ info->remove += TXGBE_FDIRUSED_REM(reg);
+ info->add += TXGBE_FDIRUSED_ADD(reg);
+
+ reg = rd32(hw, TXGBE_FDIRFAIL);
+ info->f_remove += TXGBE_FDIRFAIL_REM(reg);
+ info->f_add += TXGBE_FDIRFAIL_ADD(reg);
+
+ /* Copy the new information in the fdir parameter */
+ fdir_stats->collision = info->collision;
+ fdir_stats->free = info->free;
+ fdir_stats->maxhash = info->maxhash;
+ fdir_stats->maxlen = info->maxlen;
+ fdir_stats->remove = info->remove;
+ fdir_stats->add = info->add;
+ fdir_stats->f_remove = info->f_remove;
+ fdir_stats->f_add = info->f_add;
+
+ reg = rd32(hw, TXGBE_FDIRCTL);
+ max_num = TXGBD_FDIRCTL_BUF_BYTE(reg) / 32;
+ if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
+ fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ fdir_stats->guarant_cnt = max_num - fdir_stats->free;
+ else if (fdir_mode == RTE_FDIR_MODE_SIGNATURE)
+ fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free;
+
+}
+
+/*
+ * txgbe_fdir_ctrl_func - deal with all operations on flow director.
+ * @dev: pointer to the structure rte_eth_dev
+ * @filter_op:operation will be taken
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ */
+int
+txgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op, void *arg)
+{
+ int ret = 0;
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
+ return -EINVAL;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = txgbe_add_del_fdir_filter(dev,
+ (struct rte_eth_fdir_filter *)arg, FALSE, FALSE);
+ break;
+ case RTE_ETH_FILTER_UPDATE:
+ ret = txgbe_add_del_fdir_filter(dev,
+ (struct rte_eth_fdir_filter *)arg, FALSE, TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = txgbe_add_del_fdir_filter(dev,
+ (struct rte_eth_fdir_filter *)arg, TRUE, FALSE);
+ break;
+ case RTE_ETH_FILTER_FLUSH:
+ ret = txgbe_fdir_flush(dev);
+ break;
+ case RTE_ETH_FILTER_INFO:
+ txgbe_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
+ break;
+ case RTE_ETH_FILTER_STATS:
+ txgbe_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+/* restore flow director filter */
+void
+txgbe_fdir_filter_restore(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
+ struct txgbe_fdir_filter *node;
+ bool is_perfect = FALSE;
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+
+ if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
+ fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ is_perfect = TRUE;
+
+ if (is_perfect) {
+ TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
+ (void)fdir_write_perfect_filter(hw,
+ &node->input,
+ node->queue,
+ node->fdirflags,
+ node->fdirhash,
+ fdir_mode);
+ }
+ } else {
+ TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
+ (void)fdir_add_signature_filter(hw,
+ &node->input,
+ node->queue,
+ node->fdirflags,
+ node->fdirhash);
+ }
+ }
+}
+
+/* remove all the flow director filters */
+int
+txgbe_clear_all_fdir_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
+ struct txgbe_fdir_filter *fdir_filter;
+ struct txgbe_fdir_filter *filter_flag;
+ int ret = 0;
+
+ /* flush flow director */
+ rte_hash_reset(fdir_info->hash_handle);
+ memset(fdir_info->hash_map, 0,
+ sizeof(struct txgbe_fdir_filter *) * TXGBE_MAX_FDIR_FILTER_NUM);
+ filter_flag = TAILQ_FIRST(&fdir_info->fdir_list);
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list,
+ fdir_filter,
+ entries);
+ rte_free(fdir_filter);
+ }
+
+ if (filter_flag != NULL)
+ ret = txgbe_fdir_flush(dev);
+
+ return ret;
+}
new file mode 100644
@@ -0,0 +1,3192 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_dev.h>
+#include <rte_hash_crc.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+
+#include "txgbe_logs.h"
+#include "base/txgbe.h"
+#include "txgbe_ethdev.h"
+#include "txgbe_rxtx.h"
+#include "rte_pmd_txgbe.h"
+
+
+#define TXGBE_MIN_N_TUPLE_PRIO 1
+#define TXGBE_MAX_N_TUPLE_PRIO 7
+#define TXGBE_MAX_FLX_SOURCE_OFF 62
+
+/* ntuple filter list structure */
+struct txgbe_ntuple_filter_ele {
+ TAILQ_ENTRY(txgbe_ntuple_filter_ele) entries;
+ struct rte_eth_ntuple_filter filter_info;
+};
+/* ethertype filter list structure */
+struct txgbe_ethertype_filter_ele {
+ TAILQ_ENTRY(txgbe_ethertype_filter_ele) entries;
+ struct rte_eth_ethertype_filter filter_info;
+};
+/* syn filter list structure */
+struct txgbe_eth_syn_filter_ele {
+ TAILQ_ENTRY(txgbe_eth_syn_filter_ele) entries;
+ struct rte_eth_syn_filter filter_info;
+};
+/* fdir filter list structure */
+struct txgbe_fdir_rule_ele {
+ TAILQ_ENTRY(txgbe_fdir_rule_ele) entries;
+ struct txgbe_fdir_rule filter_info;
+};
+/* l2_tunnel filter list structure */
+struct txgbe_eth_l2_tunnel_conf_ele {
+ TAILQ_ENTRY(txgbe_eth_l2_tunnel_conf_ele) entries;
+ struct rte_eth_l2_tunnel_conf filter_info;
+};
+/* rss filter list structure */
+struct txgbe_rss_conf_ele {
+ TAILQ_ENTRY(txgbe_rss_conf_ele) entries;
+ struct txgbe_rte_flow_rss_conf filter_info;
+};
+/* txgbe_flow memory list structure */
+struct txgbe_flow_mem {
+ TAILQ_ENTRY(txgbe_flow_mem) entries;
+ struct rte_flow *flow;
+};
+
+TAILQ_HEAD(txgbe_ntuple_filter_list, txgbe_ntuple_filter_ele);
+TAILQ_HEAD(txgbe_ethertype_filter_list, txgbe_ethertype_filter_ele);
+TAILQ_HEAD(txgbe_syn_filter_list, txgbe_eth_syn_filter_ele);
+TAILQ_HEAD(txgbe_fdir_rule_filter_list, txgbe_fdir_rule_ele);
+TAILQ_HEAD(txgbe_l2_tunnel_filter_list, txgbe_eth_l2_tunnel_conf_ele);
+TAILQ_HEAD(txgbe_rss_filter_list, txgbe_rss_conf_ele);
+TAILQ_HEAD(txgbe_flow_mem_list, txgbe_flow_mem);
+
+static struct txgbe_ntuple_filter_list filter_ntuple_list;
+static struct txgbe_ethertype_filter_list filter_ethertype_list;
+static struct txgbe_syn_filter_list filter_syn_list;
+static struct txgbe_fdir_rule_filter_list filter_fdir_list;
+static struct txgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
+static struct txgbe_rss_filter_list filter_rss_list;
+static struct txgbe_flow_mem_list txgbe_flow_list;
+
+/**
+ * Endless loop will never happen with below assumption
+ * 1. there is at least one no-void item(END)
+ * 2. cur is before END.
+ */
+static inline
+const struct rte_flow_item *next_no_void_pattern(
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_item *cur)
+{
+ const struct rte_flow_item *next =
+ cur ? cur + 1 : &pattern[0];
+ while (1) {
+ if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
+ return next;
+ next++;
+ }
+}
+
+static inline
+const struct rte_flow_action *next_no_void_action(
+ const struct rte_flow_action actions[],
+ const struct rte_flow_action *cur)
+{
+ const struct rte_flow_action *next =
+ cur ? cur + 1 : &actions[0];
+ while (1) {
+ if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
+ return next;
+ next++;
+ }
+}
+
+/**
+ * Please aware there's an asumption for all the parsers.
+ * rte_flow_item is using big endian, rte_flow_attr and
+ * rte_flow_action are using CPU order.
+ * Because the pattern is used to describe the packets,
+ * normally the packets should use network order.
+ */
+
+/**
+ * Parse the rule to see if it is a n-tuple rule.
+ * And get the n-tuple filter info BTW.
+ * pattern:
+ * The first not void item can be ETH or IPV4.
+ * The second not void item must be IPV4 if the first one is ETH.
+ * The third not void item must be UDP or TCP.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
+ * dst_addr 192.167.3.50 0xFFFFFFFF
+ * next_proto_id 17 0xFF
+ * UDP/TCP/ src_port 80 0xFFFF
+ * SCTP dst_port 80 0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ *
+ * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
+ *
+ */
+static int
+cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_ipv4 *ipv4_spec;
+ const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec;
+ const struct rte_flow_item_udp *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec;
+ const struct rte_flow_item_sctp *sctp_mask;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_vlan *vlan_spec;
+ const struct rte_flow_item_vlan *vlan_mask;
+ struct rte_flow_item_eth eth_null;
+ struct rte_flow_item_vlan vlan_null;
+
+ if (!pattern) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ memset(ð_null, 0, sizeof(struct rte_flow_item_eth));
+ memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
+
+#ifdef RTE_LIBRTE_SECURITY
+ /**
+ * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
+ */
+ act = next_no_void_action(actions, NULL);
+ if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
+ const void *conf = act->conf;
+ /* check if the next not void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* get the IP pattern*/
+ item = next_no_void_pattern(pattern, NULL);
+ while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ if (item->last ||
+ item->type == RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "IP pattern missing.");
+ return -rte_errno;
+ }
+ item = next_no_void_pattern(pattern, item);
+ }
+
+ filter->proto = IPPROTO_ESP;
+ return txgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6);
+ }
+#endif
+
+ /* the first not void item can be MAC or IPv4 */
+ item = next_no_void_pattern(pattern, NULL);
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ /* Skip Ethernet */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+
+ }
+ /* if the first item is MAC, the content should be NULL */
+ if ((item->spec || item->mask) &&
+ (memcmp(eth_spec, ð_null,
+ sizeof(struct rte_flow_item_eth)) ||
+ memcmp(eth_mask, ð_null,
+ sizeof(struct rte_flow_item_eth)))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ /* check if the next not void item is IPv4 or Vlan */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /* the content should be NULL */
+ if ((item->spec || item->mask) &&
+ (memcmp(vlan_spec, &vlan_null,
+ sizeof(struct rte_flow_item_vlan)) ||
+ memcmp(vlan_mask, &vlan_null,
+ sizeof(struct rte_flow_item_vlan)))) {
+
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ /* check if the next not void item is IPv4 */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ }
+
+ if (item->mask) {
+ /* get the IPv4 info */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ntuple mask");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ ipv4_mask = item->mask;
+ /**
+ * Only support src & dst addresses, protocol,
+ * others should be masked.
+ */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ if ((ipv4_mask->hdr.src_addr != 0 &&
+ ipv4_mask->hdr.src_addr != UINT32_MAX) ||
+ (ipv4_mask->hdr.dst_addr != 0 &&
+ ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
+ (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
+ ipv4_mask->hdr.next_proto_id != 0)) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+ filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+ filter->proto_mask = ipv4_mask->hdr.next_proto_id;
+
+ ipv4_spec = item->spec;
+ filter->dst_ip = ipv4_spec->hdr.dst_addr;
+ filter->src_ip = ipv4_spec->hdr.src_addr;
+ filter->proto = ipv4_spec->hdr.next_proto_id;
+ }
+
+ /* check if the next not void item is TCP or UDP */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
+ (!item->spec && !item->mask)) {
+ goto action;
+ }
+
+ /* get the TCP/UDP/SCTP info */
+ if (item->type != RTE_FLOW_ITEM_TYPE_END &&
+ (!item->spec || !item->mask)) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ntuple mask");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+ tcp_mask = item->mask;
+
+ /**
+ * Only support src & dst ports, tcp flags,
+ * others should be masked.
+ */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ if ((tcp_mask->hdr.src_port != 0 &&
+ tcp_mask->hdr.src_port != UINT16_MAX) ||
+ (tcp_mask->hdr.dst_port != 0 &&
+ tcp_mask->hdr.dst_port != UINT16_MAX)) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = tcp_mask->hdr.dst_port;
+ filter->src_port_mask = tcp_mask->hdr.src_port;
+ if (tcp_mask->hdr.tcp_flags == 0xFF) {
+ filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
+ } else if (!tcp_mask->hdr.tcp_flags) {
+ filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
+ } else {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ tcp_spec = item->spec;
+ filter->dst_port = tcp_spec->hdr.dst_port;
+ filter->src_port = tcp_spec->hdr.src_port;
+ filter->tcp_flags = tcp_spec->hdr.tcp_flags;
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ udp_mask = item->mask;
+
+ /**
+ * Only support src & dst ports,
+ * others should be masked.
+ */
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ if ((udp_mask->hdr.src_port != 0 &&
+ udp_mask->hdr.src_port != UINT16_MAX) ||
+ (udp_mask->hdr.dst_port != 0 &&
+ udp_mask->hdr.dst_port != UINT16_MAX)) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = udp_mask->hdr.dst_port;
+ filter->src_port_mask = udp_mask->hdr.src_port;
+
+ udp_spec = item->spec;
+ filter->dst_port = udp_spec->hdr.dst_port;
+ filter->src_port = udp_spec->hdr.src_port;
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
+ sctp_mask = item->mask;
+
+ /**
+ * Only support src & dst ports,
+ * others should be masked.
+ */
+ if (sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = sctp_mask->hdr.dst_port;
+ filter->src_port_mask = sctp_mask->hdr.src_port;
+
+ sctp_spec = item->spec;
+ filter->dst_port = sctp_spec->hdr.dst_port;
+ filter->src_port = sctp_spec->hdr.src_port;
+ } else {
+ goto action;
+ }
+
+ /* check if the next not void item is END */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+action:
+
+ /**
+ * n-tuple only supports forwarding,
+ * check if the first not void action is QUEUE.
+ */
+ act = next_no_void_action(actions, NULL);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ item, "Not supported action.");
+ return -rte_errno;
+ }
+ filter->queue =
+ ((const struct rte_flow_action_queue *)act->conf)->index;
+
+ /* check if the next not void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ if (attr->priority > 0xFFFF) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Error priority.");
+ return -rte_errno;
+ }
+ filter->priority = (uint16_t)attr->priority;
+ if (attr->priority < TXGBE_MIN_N_TUPLE_PRIO ||
+ attr->priority > TXGBE_MAX_N_TUPLE_PRIO)
+ filter->priority = 1;
+
+ return 0;
+}
+
+/* a specific function for txgbe because the flags is specific */
+static int
+txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
+
+ if (ret)
+ return ret;
+
+#ifdef RTE_LIBRTE_SECURITY
+ /* ESP flow not really a flow*/
+ if (filter->proto == IPPROTO_ESP)
+ return 0;
+#endif
+
+ /* txgbe doesn't support tcp flags. */
+ if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ /* txgbe doesn't support many priorities. */
+ if (filter->priority < TXGBE_MIN_N_TUPLE_PRIO ||
+ filter->priority > TXGBE_MAX_N_TUPLE_PRIO) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Priority not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ if (filter->queue >= dev->data->nb_rx_queues)
+ return -rte_errno;
+
+ /* fixed value for txgbe */
+ filter->flags = RTE_5TUPLE_FLAGS;
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a ethertype rule.
+ * And get the ethertype filter info BTW.
+ * pattern:
+ * The first not void item can be ETH.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH type 0x0807 0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_action_queue *act_q;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ item = next_no_void_pattern(pattern, NULL);
+ /* The first non-void item should be MAC. */
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Get the MAC info. */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+
+ /* Mask bits of source MAC address must be full of 0.
+ * Mask bits of destination MAC address must be full
+ * of 1 or full of 0.
+ */
+ if (!rte_is_zero_ether_addr(ð_mask->src) ||
+ (!rte_is_zero_ether_addr(ð_mask->dst) &&
+ !rte_is_broadcast_ether_addr(ð_mask->dst))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ether address mask");
+ return -rte_errno;
+ }
+
+ if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ethertype mask");
+ return -rte_errno;
+ }
+
+ /* If mask bits of destination MAC address
+ * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
+ */
+ if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
+ filter->mac_addr = eth_spec->dst;
+ filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+ } else {
+ filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+ }
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+ /* Check if the next non-void item is END. */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter.");
+ return -rte_errno;
+ }
+
+ /* Parse action */
+
+ act = next_no_void_action(actions, NULL);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ } else {
+ filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+ }
+
+ /* Check if the next non-void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* Parse attr */
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->transfer) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "Not support group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+txgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ ret = cons_parse_ethertype_filter(attr, pattern,
+ actions, filter, error);
+
+ if (ret)
+ return ret;
+
+ /* txgbe doesn't support MAC address. */
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ if (filter->queue >= dev->data->nb_rx_queues) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue index much too big");
+ return -rte_errno;
+ }
+
+ if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
+ filter->ether_type == RTE_ETHER_TYPE_IPV6) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "IPv4/IPv6 not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "mac compare is unsupported");
+ return -rte_errno;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "drop option is unsupported");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a TCP SYN rule.
+ * And get the TCP SYN filter info BTW.
+ * pattern:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4 or IPV6.
+ * The third not void item must be TCP.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4/IPV6 NULL NULL
+ * TCP tcp_flags 0x02 0xFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_syn_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_syn_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_action_queue *act_q;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+
+ /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
+ item = next_no_void_pattern(pattern, NULL);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+ item->type != RTE_FLOW_ITEM_TYPE_TCP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Skip Ethernet */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /* if the item is MAC, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN address mask");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is IPv4 or IPv6 */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Skip IP */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /* if the item is IP, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN mask");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is TCP */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the TCP info. Only support SYN. */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN mask");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
+ if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
+ tcp_mask->hdr.src_port ||
+ tcp_mask->hdr.dst_port ||
+ tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is END */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+
+ /* check if the first not void action is QUEUE. */
+ act = next_no_void_action(actions, NULL);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ /* Support 2 priorities, the lowest or highest. */
+ if (!attr->priority) {
+ filter->hig_pri = 0;
+ } else if (attr->priority == (uint32_t)~0U) {
+ filter->hig_pri = 1;
+ } else {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+txgbe_parse_syn_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_syn_filter *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ ret = cons_parse_syn_filter(attr, pattern,
+ actions, filter, error);
+
+ if (filter->queue >= dev->data->nb_rx_queues)
+ return -rte_errno;
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a L2 tunnel rule.
+ * And get the L2 tunnel filter info BTW.
+ * Only support E-tag now.
+ * pattern:
+ * The first not void item can be E_TAG.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be VF or PF.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * E_TAG grp 0x1 0x3
+ e_cid_base 0x309 0xFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_l2_tunnel_conf *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_item_e_tag *e_tag_spec;
+ const struct rte_flow_item_e_tag *e_tag_mask;
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_vf *act_vf;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* The first not void item should be e-tag. */
+ item = next_no_void_pattern(pattern, NULL);
+ if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ if (!item->spec || !item->mask) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ e_tag_spec = item->spec;
+ e_tag_mask = item->mask;
+
+ /* Only care about GRP and E cid base. */
+ if (e_tag_mask->epcp_edei_in_ecid_b ||
+ e_tag_mask->in_ecid_e ||
+ e_tag_mask->ecid_e ||
+ e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+ /**
+ * grp and e_cid_base are bit fields and only use 14 bits.
+ * e-tag id is taken as little endian by HW.
+ */
+ filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
+
+ /* check if the next not void item is END */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->priority) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* check if the first not void action is VF or PF. */
+ act = next_no_void_action(actions, NULL);
+ if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
+ act->type != RTE_FLOW_ACTION_TYPE_PF) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
+ act_vf = (const struct rte_flow_action_vf *)act->conf;
+ filter->pool = act_vf->id;
+ } else {
+ filter->pool = pci_dev->max_vfs;
+ }
+
+ /* check if the next not void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+txgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_l2_tunnel_conf *l2_tn_filter,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+
+ ret = cons_parse_l2_tn_filter(dev, attr, pattern,
+ actions, l2_tn_filter, error);
+
+ memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by L2 tunnel filter");
+ ret = -rte_errno;
+ return ret;
+}
+
+/* Parse to get the attr and action info of flow director rule. */
+static int
+txgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
+ const struct rte_flow_action actions[],
+ struct txgbe_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_mark *mark;
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->transfer) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->priority) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* check if the first not void action is QUEUE or DROP. */
+ act = next_no_void_action(actions, NULL);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ rule->queue = act_q->index;
+ } else { /* drop */
+ /* signature mode does not support drop action. */
+ if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+ rule->fdirflags = TXGBE_FDIRPICMD_DROP;
+ }
+
+ /* check if the next not void item is MARK */
+ act = next_no_void_action(actions, act);
+ if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
+ (act->type != RTE_FLOW_ACTION_TYPE_END)) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ rule->soft_id = 0;
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
+ mark = (const struct rte_flow_action_mark *)act->conf;
+ rule->soft_id = mark->id;
+ act = next_no_void_action(actions, act);
+ }
+
+ /* check if the next not void item is END */
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/* search next no void pattern and skip fuzzy */
+static inline
+const struct rte_flow_item *next_no_fuzzy_pattern(
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_item *cur)
+{
+ const struct rte_flow_item *next =
+ next_no_void_pattern(pattern, cur);
+ while (1) {
+ if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
+ return next;
+ next = next_no_void_pattern(pattern, next);
+ }
+}
+
+static inline uint8_t signature_match(const struct rte_flow_item pattern[])
+{
+ const struct rte_flow_item_fuzzy *spec, *last, *mask;
+ const struct rte_flow_item *item;
+ uint32_t sh, lh, mh;
+ int i = 0;
+
+ while (1) {
+ item = pattern + i;
+ if (item->type == RTE_FLOW_ITEM_TYPE_END)
+ break;
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
+ spec = item->spec;
+ last = item->last;
+ mask = item->mask;
+
+ if (!spec || !mask)
+ return 0;
+
+ sh = spec->thresh;
+
+ if (!last)
+ lh = sh;
+ else
+ lh = last->thresh;
+
+ mh = mask->thresh;
+ sh = sh & mh;
+ lh = lh & mh;
+
+ if (!sh || sh > lh)
+ return 0;
+
+ return 1;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
+ * And get the flow director filter info BTW.
+ * UDP/TCP/SCTP PATTERN:
+ * The first not void item can be ETH or IPV4 or IPV6
+ * The second not void item must be IPV4 or IPV6 if the first one is ETH.
+ * The next not void item could be UDP or TCP or SCTP (optional)
+ * The next not void item could be RAW (for flexbyte, optional)
+ * The next not void item must be END.
+ * A Fuzzy Match pattern can appear at any place before END.
+ * Fuzzy Match is optional for IPV4 but is required for IPV6
+ * MAC VLAN PATTERN:
+ * The first not void item must be ETH.
+ * The second not void item must be MAC VLAN.
+ * The next not void item must be END.
+ * ACTION:
+ * The first not void action should be QUEUE or DROP.
+ * The second not void optional action should be MARK,
+ * mark_id is a uint32_t number.
+ * The next not void action should be END.
+ * UDP/TCP/SCTP pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
+ * dst_addr 192.167.3.50 0xFFFFFFFF
+ * UDP/TCP/SCTP src_port 80 0xFFFF
+ * dst_port 80 0xFFFF
+ * FLEX relative 0 0x1
+ * search 0 0x1
+ * reserved 0 0
+ * offset 12 0xFFFFFFFF
+ * limit 0 0xFFFF
+ * length 2 0xFFFF
+ * pattern[0] 0x86 0xFF
+ * pattern[1] 0xDD 0xFF
+ * END
+ * MAC VLAN pattern example:
+ * ITEM Spec Mask
+ * ETH dst_addr
+ {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
+ 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
+ * MAC VLAN tci 0x2016 0xEFFF
+ * END
+ * Other members in mask and spec should set to 0x00.
+ * Item->last should be NULL.
+ */
+static int
+txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct txgbe_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec;
+ const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec;
+ const struct rte_flow_item_ipv6 *ipv6_mask;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec;
+ const struct rte_flow_item_udp *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec;
+ const struct rte_flow_item_sctp *sctp_mask;
+ const struct rte_flow_item_raw *raw_mask;
+ const struct rte_flow_item_raw *raw_spec;
+ u32 ptype = 0;
+ uint8_t j;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /**
+ * Some fields may not be provided. Set spec to 0 and mask to default
+ * value. So, we need not do anything for the not provided fields later.
+ */
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
+ rule->mask.vlan_tci_mask = 0;
+ rule->mask.flex_bytes_mask = 0;
+
+ /**
+ * The first not void item should be
+ * MAC or IPv4 or TCP or UDP or SCTP.
+ */
+ item = next_no_fuzzy_pattern(pattern, NULL);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+ item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ if (signature_match(pattern))
+ rule->mode = RTE_FDIR_MODE_SIGNATURE;
+ else
+ rule->mode = RTE_FDIR_MODE_PERFECT;
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Get the MAC info. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /**
+ * Only support vlan and dst MAC address,
+ * others should be masked.
+ */
+ if (item->spec && !item->mask) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ if (item->mask) {
+
+ rule->b_mask = TRUE;
+ eth_mask = item->mask;
+
+ /* Ether type should be masked. */
+ if (eth_mask->type ||
+ rule->mode == RTE_FDIR_MODE_SIGNATURE) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /* If ethernet has meaning, it means MAC VLAN mode. */
+ rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
+
+ /**
+ * src MAC address must be masked,
+ * and don't support dst MAC address mask.
+ */
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+ if (eth_mask->src.addr_bytes[j] ||
+ eth_mask->dst.addr_bytes[j] != 0xFF) {
+ memset(rule, 0,
+ sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* When no VLAN, considered as full mask. */
+ rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
+ }
+ /*** If both spec and mask are item,
+ * it means don't care about ETH.
+ * Do nothing.
+ */
+
+ /**
+ * Check if the next not void item is vlan or ipv4.
+ * IPv6 is not supported.
+ */
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+ if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ } else {
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+ }
+
+ /* Get the IPV4 info. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV4;
+ ptype = txgbe_ptype_table[TXGBE_PT_IPV4];
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /**
+ * Only care about src & dst addresses,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ ipv4_mask = item->mask;
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.next_proto_id ||
+ ipv4_mask->hdr.hdr_checksum) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
+ rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ ipv4_spec = item->spec;
+ rule->input.dst_ip[0] =
+ ipv4_spec->hdr.dst_addr;
+ rule->input.src_ip[0] =
+ ipv4_spec->hdr.src_addr;
+ }
+
+ /**
+ * Check if the next not void item is
+ * TCP or UDP or SCTP or END.
+ */
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+ item->type != RTE_FLOW_ITEM_TYPE_END &&
+ item->type != RTE_FLOW_ITEM_TYPE_RAW) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the IPV6 info. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV6;
+ ptype = txgbe_ptype_table[TXGBE_PT_IPV6];
+
+ /**
+ * 1. must signature match
+ * 2. not support last
+ * 3. mask must not null
+ */
+ if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
+ item->last ||
+ !item->mask) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ rule->b_mask = TRUE;
+ ipv6_mask = item->mask;
+ if (ipv6_mask->hdr.vtc_flow ||
+ ipv6_mask->hdr.payload_len ||
+ ipv6_mask->hdr.proto ||
+ ipv6_mask->hdr.hop_limits) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /* check src addr mask */
+ for (j = 0; j < 16; j++) {
+ if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
+ rule->mask.src_ipv6_mask |= 1 << j;
+ } else if (ipv6_mask->hdr.src_addr[j] != 0) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* check dst addr mask */
+ for (j = 0; j < 16; j++) {
+ if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
+ rule->mask.dst_ipv6_mask |= 1 << j;
+ } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ ipv6_spec = item->spec;
+ rte_memcpy(rule->input.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(rule->input.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+ }
+
+ /**
+ * Check if the next not void item is
+ * TCP or UDP or SCTP or END.
+ */
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+ item->type != RTE_FLOW_ITEM_TYPE_END &&
+ item->type != RTE_FLOW_ITEM_TYPE_RAW) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the TCP info. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->input.flow_type |= TXGBE_ATR_L4TYPE_TCP;
+ ptype = txgbe_ptype_table[TXGBE_PT_IPV4_TCP];
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /**
+ * Only care about src & dst ports,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ tcp_mask = item->mask;
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.src_port_mask = tcp_mask->hdr.src_port;
+ rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ tcp_spec = item->spec;
+ rule->input.src_port =
+ tcp_spec->hdr.src_port;
+ rule->input.dst_port =
+ tcp_spec->hdr.dst_port;
+ }
+
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ }
+
+ /* Get the UDP info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->input.flow_type |= TXGBE_ATR_L4TYPE_UDP;
+ ptype = txgbe_ptype_table[TXGBE_PT_IPV4_UDP];
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /**
+ * Only care about src & dst ports,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ udp_mask = item->mask;
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.src_port_mask = udp_mask->hdr.src_port;
+ rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ udp_spec = item->spec;
+ rule->input.src_port =
+ udp_spec->hdr.src_port;
+ rule->input.dst_port =
+ udp_spec->hdr.dst_port;
+ }
+
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ }
+
+ /* Get the SCTP info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->input.flow_type |= TXGBE_ATR_L4TYPE_SCTP;
+ ptype = txgbe_ptype_table[TXGBE_PT_IPV4_SCTP];
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /**
+ * Only care about src & dst ports,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ sctp_mask = item->mask;
+ if (sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.src_port_mask = sctp_mask->hdr.src_port;
+ rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ sctp_spec = item->spec;
+ rule->input.src_port =
+ sctp_spec->hdr.src_port;
+ rule->input.dst_port =
+ sctp_spec->hdr.dst_port;
+ }
+ /* others even sctp port is not supported */
+ sctp_mask = item->mask;
+ if (sctp_mask &&
+ (sctp_mask->hdr.src_port ||
+ sctp_mask->hdr.dst_port ||
+ sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum)) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the flex byte info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
+ /* Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /* mask should not be null */
+ if (!item->mask || !item->spec) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ raw_mask = item->mask;
+
+ /* check mask */
+ if (raw_mask->relative != 0x1 ||
+ raw_mask->search != 0x1 ||
+ raw_mask->reserved != 0x0 ||
+ (uint32_t)raw_mask->offset != 0xffffffff ||
+ raw_mask->limit != 0xffff ||
+ raw_mask->length != 0xffff) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ raw_spec = item->spec;
+
+ /* check spec */
+ if (raw_spec->relative != 0 ||
+ raw_spec->search != 0 ||
+ raw_spec->reserved != 0 ||
+ raw_spec->offset > TXGBE_MAX_FLX_SOURCE_OFF ||
+ raw_spec->offset % 2 ||
+ raw_spec->limit != 0 ||
+ raw_spec->length != 2 ||
+ /* pattern can't be 0xffff */
+ (raw_spec->pattern[0] == 0xff &&
+ raw_spec->pattern[1] == 0xff)) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /* check pattern mask */
+ if (raw_mask->pattern[0] != 0xff ||
+ raw_mask->pattern[1] != 0xff) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ rule->mask.flex_bytes_mask = 0xffff;
+ rule->input.flex_bytes =
+ (((uint16_t)raw_spec->pattern[1]) << 8) |
+ raw_spec->pattern[0];
+ rule->flex_bytes_offset = raw_spec->offset;
+ }
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ /* check if the next not void item is END */
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ rule->input.pkt_type = cpu_to_be16(txgbe_encode_ptype(ptype));
+
+ return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
+}
+
+#define NVGRE_PROTOCOL 0x6558
+
+/**
+ * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
+ * And get the flow director filter info BTW.
+ * VxLAN PATTERN:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4/ IPV6.
+ * The third not void item must be NVGRE.
+ * The next not void item must be END.
+ * NVGRE PATTERN:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4/ IPV6.
+ * The third not void item must be NVGRE.
+ * The next not void item must be END.
+ * ACTION:
+ * The first not void action should be QUEUE or DROP.
+ * The second not void optional action should be MARK,
+ * mark_id is a uint32_t number.
+ * The next not void action should be END.
+ * VxLAN pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4/IPV6 NULL NULL
+ * UDP NULL NULL
+ * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
+ * MAC VLAN tci 0x2016 0xEFFF
+ * END
+ * NEGRV pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4/IPV6 NULL NULL
+ * NVGRE protocol 0x6558 0xFFFF
+ * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
+ * MAC VLAN tci 0x2016 0xEFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct txgbe_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_item_eth *eth_mask;
+ uint32_t j;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /**
+ * Some fields may not be provided. Set spec to 0 and mask to default
+ * value. So, we need not do anything for the not provided fields later.
+ */
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
+ rule->mask.vlan_tci_mask = 0;
+
+ /**
+ * The first not void item should be
+ * MAC or IPv4 or IPv6 or UDP or VxLAN.
+ */
+ item = next_no_void_pattern(pattern, NULL);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
+ item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
+
+ /* Skip MAC. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /* Only used to describe the protocol stack. */
+ if (item->spec || item->mask) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /* Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Check if the next not void item is IPv4 or IPv6. */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Skip IP. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /* Only used to describe the protocol stack. */
+ if (item->spec || item->mask) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Check if the next not void item is UDP or NVGRE. */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Skip UDP. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ /* Only used to describe the protocol stack. */
+ if (item->spec || item->mask) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Check if the next not void item is VxLAN. */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* check if the next not void item is MAC */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /**
+ * Only support vlan and dst MAC address,
+ * others should be masked.
+ */
+
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ eth_mask = item->mask;
+
+ /* Ether type should be masked. */
+ if (eth_mask->type) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /* src MAC address should be masked. */
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+ if (eth_mask->src.addr_bytes[j]) {
+ memset(rule, 0,
+ sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+ rule->mask.mac_addr_byte_mask = 0;
+ for (j = 0; j < ETH_ADDR_LEN; j++) {
+ /* It's a per byte mask. */
+ if (eth_mask->dst.addr_bytes[j] == 0xFF) {
+ rule->mask.mac_addr_byte_mask |= 0x1 << j;
+ } else if (eth_mask->dst.addr_bytes[j]) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* When no vlan, considered as full mask. */
+ rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
+
+ /**
+ * Check if the next not void item is vlan or ipv4.
+ * IPv6 is not supported.
+ */
+ item = next_no_void_pattern(pattern, item);
+ if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
+ (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /**
+ * If the tags is 0, it means don't care about the VLAN.
+ * Do nothing.
+ */
+
+ return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
+}
+
+static int
+txgbe_parse_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct txgbe_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+
+ ret = txgbe_parse_fdir_filter_normal(dev, attr, pattern,
+ actions, rule, error);
+ if (!ret)
+ goto step_next;
+
+ ret = txgbe_parse_fdir_filter_tunnel(attr, pattern,
+ actions, rule, error);
+ if (ret)
+ return ret;
+
+step_next:
+
+ if (hw->mac.type == txgbe_mac_raptor &&
+ rule->fdirflags == TXGBE_FDIRPICMD_DROP &&
+ (rule->input.src_port != 0 || rule->input.dst_port != 0))
+ return -ENOTSUP;
+
+ if (fdir_mode == RTE_FDIR_MODE_NONE ||
+ fdir_mode != rule->mode)
+ return -ENOTSUP;
+
+ if (rule->queue >= dev->data->nb_rx_queues)
+ return -ENOTSUP;
+
+ return ret;
+}
+
+static int
+txgbe_parse_rss_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_action actions[],
+ struct txgbe_rte_flow_rss_conf *rss_conf,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_rss *rss;
+ uint16_t n;
+
+ /**
+ * rss only supports forwarding,
+ * check if the first not void action is RSS.
+ */
+ act = next_no_void_action(actions, NULL);
+ if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+ memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ rss = (const struct rte_flow_action_rss *)act->conf;
+
+ if (!rss || !rss->queue_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+
+ for (n = 0; n < rss->queue_num; n++) {
+ if (rss->queue[n] >= dev->data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "queue id > max number of queues");
+ return -rte_errno;
+ }
+ }
+
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "non-default RSS hash functions are not supported");
+ if (rss->level)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "a nonzero RSS encapsulation level is not supported");
+ if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS hash key must be exactly 40 bytes");
+ if (rss->queue_num > RTE_DIM(rss_conf->queue))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "too many queues for RSS context");
+ if (txgbe_rss_conf_init(rss_conf, rss))
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS context initialization failure");
+
+ /* check if the next not void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->transfer) {
+ memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ if (attr->priority > 0xFFFF) {
+ memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Error priority.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/* remove the rss filter */
+static void
+txgbe_clear_rss_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+
+ if (filter_info->rss_info.conf.queue_num)
+ txgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
+}
+
+void
+txgbe_filterlist_init(void)
+{
+ TAILQ_INIT(&filter_ntuple_list);
+ TAILQ_INIT(&filter_ethertype_list);
+ TAILQ_INIT(&filter_syn_list);
+ TAILQ_INIT(&filter_fdir_list);
+ TAILQ_INIT(&filter_l2_tunnel_list);
+ TAILQ_INIT(&filter_rss_list);
+ TAILQ_INIT(&txgbe_flow_list);
+}
+
+void
+txgbe_filterlist_flush(void)
+{
+ struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
+ struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
+ struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
+ struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+ struct txgbe_fdir_rule_ele *fdir_rule_ptr;
+ struct txgbe_flow_mem *txgbe_flow_mem_ptr;
+ struct txgbe_rss_conf_ele *rss_filter_ptr;
+
+ while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
+ TAILQ_REMOVE(&filter_ntuple_list,
+ ntuple_filter_ptr,
+ entries);
+ rte_free(ntuple_filter_ptr);
+ }
+
+ while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
+ TAILQ_REMOVE(&filter_ethertype_list,
+ ethertype_filter_ptr,
+ entries);
+ rte_free(ethertype_filter_ptr);
+ }
+
+ while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
+ TAILQ_REMOVE(&filter_syn_list,
+ syn_filter_ptr,
+ entries);
+ rte_free(syn_filter_ptr);
+ }
+
+ while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
+ TAILQ_REMOVE(&filter_l2_tunnel_list,
+ l2_tn_filter_ptr,
+ entries);
+ rte_free(l2_tn_filter_ptr);
+ }
+
+ while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
+ TAILQ_REMOVE(&filter_fdir_list,
+ fdir_rule_ptr,
+ entries);
+ rte_free(fdir_rule_ptr);
+ }
+
+ while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) {
+ TAILQ_REMOVE(&filter_rss_list,
+ rss_filter_ptr,
+ entries);
+ rte_free(rss_filter_ptr);
+ }
+
+ while ((txgbe_flow_mem_ptr = TAILQ_FIRST(&txgbe_flow_list))) {
+ TAILQ_REMOVE(&txgbe_flow_list,
+ txgbe_flow_mem_ptr,
+ entries);
+ rte_free(txgbe_flow_mem_ptr->flow);
+ rte_free(txgbe_flow_mem_ptr);
+ }
+}
+
+/**
+ * Create or destroy a flow rule.
+ * Theorically one rule can match more than one filters.
+ * We will let it use the filter which it hitt first.
+ * So, the sequence matters.
+ */
+static struct rte_flow *
+txgbe_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct txgbe_fdir_rule fdir_rule;
+ struct rte_eth_l2_tunnel_conf l2_tn_filter;
+ struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
+ struct txgbe_rte_flow_rss_conf rss_conf;
+ struct rte_flow *flow = NULL;
+ struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
+ struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
+ struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
+ struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+ struct txgbe_fdir_rule_ele *fdir_rule_ptr;
+ struct txgbe_rss_conf_ele *rss_filter_ptr;
+ struct txgbe_flow_mem *txgbe_flow_mem_ptr;
+ uint8_t first_mask = FALSE;
+
+ flow = rte_zmalloc("txgbe_rte_flow", sizeof(struct rte_flow), 0);
+ if (!flow) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return (struct rte_flow *)flow;
+ }
+ txgbe_flow_mem_ptr = rte_zmalloc("txgbe_flow_mem",
+ sizeof(struct txgbe_flow_mem), 0);
+ if (!txgbe_flow_mem_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ rte_free(flow);
+ return NULL;
+ }
+ txgbe_flow_mem_ptr->flow = flow;
+ TAILQ_INSERT_TAIL(&txgbe_flow_list,
+ txgbe_flow_mem_ptr, entries);
+
+ memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
+ actions, &ntuple_filter, error);
+
+#ifdef RTE_LIBRTE_SECURITY
+ /* ESP flow not really a flow*/
+ if (ntuple_filter.proto == IPPROTO_ESP)
+ return flow;
+#endif
+
+ if (!ret) {
+ ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
+ if (!ret) {
+ ntuple_filter_ptr = rte_zmalloc("txgbe_ntuple_filter",
+ sizeof(struct txgbe_ntuple_filter_ele), 0);
+ if (!ntuple_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(&ntuple_filter_ptr->filter_info,
+ &ntuple_filter,
+ sizeof(struct rte_eth_ntuple_filter));
+ TAILQ_INSERT_TAIL(&filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ flow->rule = ntuple_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_NTUPLE;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
+ actions, ðertype_filter, error);
+ if (!ret) {
+ ret = txgbe_add_del_ethertype_filter(dev,
+ ðertype_filter, TRUE);
+ if (!ret) {
+ ethertype_filter_ptr = rte_zmalloc(
+ "txgbe_ethertype_filter",
+ sizeof(struct txgbe_ethertype_filter_ele), 0);
+ if (!ethertype_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(ðertype_filter_ptr->filter_info,
+ ðertype_filter,
+ sizeof(struct rte_eth_ethertype_filter));
+ TAILQ_INSERT_TAIL(&filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ flow->rule = ethertype_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+ ret = txgbe_parse_syn_filter(dev, attr, pattern,
+ actions, &syn_filter, error);
+ if (!ret) {
+ ret = txgbe_syn_filter_set(dev, &syn_filter, TRUE);
+ if (!ret) {
+ syn_filter_ptr = rte_zmalloc("txgbe_syn_filter",
+ sizeof(struct txgbe_eth_syn_filter_ele), 0);
+ if (!syn_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(&syn_filter_ptr->filter_info,
+ &syn_filter,
+ sizeof(struct rte_eth_syn_filter));
+ TAILQ_INSERT_TAIL(&filter_syn_list,
+ syn_filter_ptr,
+ entries);
+ flow->rule = syn_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_SYN;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule));
+ ret = txgbe_parse_fdir_filter(dev, attr, pattern,
+ actions, &fdir_rule, error);
+ if (!ret) {
+ /* A mask cannot be deleted. */
+ if (fdir_rule.b_mask) {
+ if (!fdir_info->mask_added) {
+ /* It's the first time the mask is set. */
+ rte_memcpy(&fdir_info->mask,
+ &fdir_rule.mask,
+ sizeof(struct txgbe_hw_fdir_mask));
+ fdir_info->flex_bytes_offset =
+ fdir_rule.flex_bytes_offset;
+
+ if (fdir_rule.mask.flex_bytes_mask)
+ txgbe_fdir_set_flexbytes_offset(dev,
+ fdir_rule.flex_bytes_offset);
+
+ ret = txgbe_fdir_set_input_mask(dev);
+ if (ret)
+ goto out;
+
+ fdir_info->mask_added = TRUE;
+ first_mask = TRUE;
+ } else {
+ /**
+ * Only support one global mask,
+ * all the masks should be the same.
+ */
+ ret = memcmp(&fdir_info->mask,
+ &fdir_rule.mask,
+ sizeof(struct txgbe_hw_fdir_mask));
+ if (ret)
+ goto out;
+
+ if (fdir_info->flex_bytes_offset !=
+ fdir_rule.flex_bytes_offset)
+ goto out;
+ }
+ }
+
+ if (fdir_rule.b_spec) {
+ ret = txgbe_fdir_filter_program(dev, &fdir_rule,
+ FALSE, FALSE);
+ if (!ret) {
+ fdir_rule_ptr = rte_zmalloc("txgbe_fdir_filter",
+ sizeof(struct txgbe_fdir_rule_ele), 0);
+ if (!fdir_rule_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(&fdir_rule_ptr->filter_info,
+ &fdir_rule,
+ sizeof(struct txgbe_fdir_rule));
+ TAILQ_INSERT_TAIL(&filter_fdir_list,
+ fdir_rule_ptr, entries);
+ flow->rule = fdir_rule_ptr;
+ flow->filter_type = RTE_ETH_FILTER_FDIR;
+
+ return flow;
+ }
+
+ if (ret) {
+ /**
+ * clean the mask_added flag if fail to
+ * program
+ **/
+ if (first_mask)
+ fdir_info->mask_added = FALSE;
+ goto out;
+ }
+ }
+
+ goto out;
+ }
+
+ memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
+ actions, &l2_tn_filter, error);
+ if (!ret) {
+ ret = txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
+ if (!ret) {
+ l2_tn_filter_ptr = rte_zmalloc("txgbe_l2_tn_filter",
+ sizeof(struct txgbe_eth_l2_tunnel_conf_ele), 0);
+ if (!l2_tn_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(&l2_tn_filter_ptr->filter_info,
+ &l2_tn_filter,
+ sizeof(struct rte_eth_l2_tunnel_conf));
+ TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
+ l2_tn_filter_ptr, entries);
+ flow->rule = l2_tn_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
+ return flow;
+ }
+ }
+
+ memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
+ ret = txgbe_parse_rss_filter(dev, attr,
+ actions, &rss_conf, error);
+ if (!ret) {
+ ret = txgbe_config_rss_filter(dev, &rss_conf, TRUE);
+ if (!ret) {
+ rss_filter_ptr = rte_zmalloc("txgbe_rss_filter",
+ sizeof(struct txgbe_rss_conf_ele), 0);
+ if (!rss_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ txgbe_rss_conf_init(&rss_filter_ptr->filter_info,
+ &rss_conf.conf);
+ TAILQ_INSERT_TAIL(&filter_rss_list,
+ rss_filter_ptr, entries);
+ flow->rule = rss_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_HASH;
+ return flow;
+ }
+ }
+
+out:
+ TAILQ_REMOVE(&txgbe_flow_list,
+ txgbe_flow_mem_ptr, entries);
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
+ rte_free(txgbe_flow_mem_ptr);
+ rte_free(flow);
+ return NULL;
+}
+
+/**
+ * Check if the flow rule is supported by txgbe.
+ * It only checkes the format. Don't guarantee the rule can be programmed into
+ * the HW. Because there can be no enough room for the rule.
+ */
+static int
+txgbe_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct rte_eth_l2_tunnel_conf l2_tn_filter;
+ struct txgbe_fdir_rule fdir_rule;
+ struct txgbe_rte_flow_rss_conf rss_conf;
+ int ret;
+
+ memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
+ actions, &ntuple_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
+ actions, ðertype_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+ ret = txgbe_parse_syn_filter(dev, attr, pattern,
+ actions, &syn_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule));
+ ret = txgbe_parse_fdir_filter(dev, attr, pattern,
+ actions, &fdir_rule, error);
+ if (!ret)
+ return 0;
+
+ memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
+ actions, &l2_tn_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
+ ret = txgbe_parse_rss_filter(dev, attr,
+ actions, &rss_conf, error);
+
+ return ret;
+}
+
+/* Destroy a flow rule on txgbe. */
+static int
+txgbe_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_flow *pmd_flow = flow;
+ enum rte_filter_type filter_type = pmd_flow->filter_type;
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct txgbe_fdir_rule fdir_rule;
+ struct rte_eth_l2_tunnel_conf l2_tn_filter;
+ struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
+ struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
+ struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
+ struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+ struct txgbe_fdir_rule_ele *fdir_rule_ptr;
+ struct txgbe_flow_mem *txgbe_flow_mem_ptr;
+ struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
+ struct txgbe_rss_conf_ele *rss_filter_ptr;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NTUPLE:
+ ntuple_filter_ptr = (struct txgbe_ntuple_filter_ele *)
+ pmd_flow->rule;
+ rte_memcpy(&ntuple_filter,
+ &ntuple_filter_ptr->filter_info,
+ sizeof(struct rte_eth_ntuple_filter));
+ ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ rte_free(ntuple_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ethertype_filter_ptr = (struct txgbe_ethertype_filter_ele *)
+ pmd_flow->rule;
+ rte_memcpy(ðertype_filter,
+ ðertype_filter_ptr->filter_info,
+ sizeof(struct rte_eth_ethertype_filter));
+ ret = txgbe_add_del_ethertype_filter(dev,
+ ðertype_filter, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ rte_free(ethertype_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_SYN:
+ syn_filter_ptr = (struct txgbe_eth_syn_filter_ele *)
+ pmd_flow->rule;
+ rte_memcpy(&syn_filter,
+ &syn_filter_ptr->filter_info,
+ sizeof(struct rte_eth_syn_filter));
+ ret = txgbe_syn_filter_set(dev, &syn_filter, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_syn_list,
+ syn_filter_ptr, entries);
+ rte_free(syn_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ fdir_rule_ptr = (struct txgbe_fdir_rule_ele *)pmd_flow->rule;
+ rte_memcpy(&fdir_rule,
+ &fdir_rule_ptr->filter_info,
+ sizeof(struct txgbe_fdir_rule));
+ ret = txgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_fdir_list,
+ fdir_rule_ptr, entries);
+ rte_free(fdir_rule_ptr);
+ if (TAILQ_EMPTY(&filter_fdir_list))
+ fdir_info->mask_added = false;
+ }
+ break;
+ case RTE_ETH_FILTER_L2_TUNNEL:
+ l2_tn_filter_ptr = (struct txgbe_eth_l2_tunnel_conf_ele *)
+ pmd_flow->rule;
+ rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
+ sizeof(struct rte_eth_l2_tunnel_conf));
+ ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_l2_tunnel_list,
+ l2_tn_filter_ptr, entries);
+ rte_free(l2_tn_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_HASH:
+ rss_filter_ptr = (struct txgbe_rss_conf_ele *)
+ pmd_flow->rule;
+ ret = txgbe_config_rss_filter(dev,
+ &rss_filter_ptr->filter_info, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_rss_list,
+ rss_filter_ptr, entries);
+ rte_free(rss_filter_ptr);
+ }
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to destroy flow");
+ return ret;
+ }
+
+ TAILQ_FOREACH(txgbe_flow_mem_ptr, &txgbe_flow_list, entries) {
+ if (txgbe_flow_mem_ptr->flow == pmd_flow) {
+ TAILQ_REMOVE(&txgbe_flow_list,
+ txgbe_flow_mem_ptr, entries);
+ rte_free(txgbe_flow_mem_ptr);
+ }
+ }
+ rte_free(flow);
+
+ return ret;
+}
+
+/* Destroy all flow rules associated with a port on txgbe. */
+static int
+txgbe_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+
+ txgbe_clear_all_ntuple_filter(dev);
+ txgbe_clear_all_ethertype_filter(dev);
+ txgbe_clear_syn_filter(dev);
+
+ ret = txgbe_clear_all_fdir_filter(dev);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to flush rule");
+ return ret;
+ }
+
+ ret = txgbe_clear_all_l2_tn_filter(dev);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to flush rule");
+ return ret;
+ }
+
+ txgbe_clear_rss_filter(dev);
+
+ txgbe_filterlist_flush();
+
+ return 0;
+}
+
+const struct rte_flow_ops txgbe_flow_ops = {
+ .validate = txgbe_flow_validate,
+ .create = txgbe_flow_create,
+ .destroy = txgbe_flow_destroy,
+ .flush = txgbe_flow_flush,
+};
new file mode 100644
@@ -0,0 +1,736 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include <rte_ethdev_pci.h>
+#include <rte_ip.h>
+#include <rte_jhash.h>
+#include <rte_security_driver.h>
+#include <rte_cryptodev.h>
+#include <rte_flow.h>
+
+#include "base/txgbe.h"
+#include "txgbe_ethdev.h"
+#include "txgbe_ipsec.h"
+
+#define RTE_TXGBE_REGISTER_POLL_WAIT_5_MS 5
+
+#define CMP_IP(a, b) (\
+ (a).ipv6[0] == (b).ipv6[0] && \
+ (a).ipv6[1] == (b).ipv6[1] && \
+ (a).ipv6[2] == (b).ipv6[2] && \
+ (a).ipv6[3] == (b).ipv6[3])
+
+
+static void
+txgbe_crypto_clear_ipsec_tables(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_ipsec *priv = TXGBE_DEV_IPSEC(dev);
+ int i = 0;
+
+ /* clear Rx IP table*/
+ for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+ uint16_t index = i << 3;
+ uint32_t reg_val = TXGBE_IPSRXIDX_WRITE |
+ TXGBE_IPSRXIDX_TB_IP | index;
+ wr32(hw, TXGBE_IPSRXADDR(0), 0);
+ wr32(hw, TXGBE_IPSRXADDR(1), 0);
+ wr32(hw, TXGBE_IPSRXADDR(2), 0);
+ wr32(hw, TXGBE_IPSRXADDR(3), 0);
+ wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
+ }
+
+ /* clear Rx SPI and Rx/Tx SA tables*/
+ for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+ uint32_t index = i << 3;
+ uint32_t reg_val = TXGBE_IPSRXIDX_WRITE |
+ TXGBE_IPSRXIDX_TB_SPI | index;
+ wr32(hw, TXGBE_IPSRXSPI, 0);
+ wr32(hw, TXGBE_IPSRXADDRIDX, 0);
+ wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
+ reg_val = TXGBE_IPSRXIDX_WRITE | TXGBE_IPSRXIDX_TB_KEY | index;
+ wr32(hw, TXGBE_IPSRXKEY(0), 0);
+ wr32(hw, TXGBE_IPSRXKEY(1), 0);
+ wr32(hw, TXGBE_IPSRXKEY(2), 0);
+ wr32(hw, TXGBE_IPSRXKEY(3), 0);
+ wr32(hw, TXGBE_IPSRXSALT, 0);
+ wr32(hw, TXGBE_IPSRXMODE, 0);
+ wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
+ reg_val = TXGBE_IPSTXIDX_WRITE | index;
+ wr32(hw, TXGBE_IPSTXKEY(0), 0);
+ wr32(hw, TXGBE_IPSTXKEY(1), 0);
+ wr32(hw, TXGBE_IPSTXKEY(2), 0);
+ wr32(hw, TXGBE_IPSTXKEY(3), 0);
+ wr32(hw, TXGBE_IPSTXSALT, 0);
+ wr32w(hw, TXGBE_IPSTXIDX, reg_val, TXGBE_IPSTXIDX_WRITE, 1000);
+ }
+
+ memset(priv->rx_ip_tbl, 0, sizeof(priv->rx_ip_tbl));
+ memset(priv->rx_sa_tbl, 0, sizeof(priv->rx_sa_tbl));
+ memset(priv->tx_sa_tbl, 0, sizeof(priv->tx_sa_tbl));
+}
+
+static int
+txgbe_crypto_add_sa(struct txgbe_crypto_session *ic_session)
+{
+ struct rte_eth_dev *dev = ic_session->dev;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_ipsec *priv = TXGBE_DEV_IPSEC(dev);
+ uint32_t reg_val;
+ int sa_index = -1;
+
+ if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION) {
+ int i, ip_index = -1;
+ uint8_t *key;
+
+ /* Find a match in the IP table*/
+ for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+ if (CMP_IP(priv->rx_ip_tbl[i].ip,
+ ic_session->dst_ip)) {
+ ip_index = i;
+ break;
+ }
+ }
+ /* If no match, find a free entry in the IP table*/
+ if (ip_index < 0) {
+ for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+ if (priv->rx_ip_tbl[i].ref_count == 0) {
+ ip_index = i;
+ break;
+ }
+ }
+ }
+
+ /* Fail if no match and no free entries*/
+ if (ip_index < 0) {
+ PMD_DRV_LOG(ERR,
+ "No free entry left in the Rx IP table\n");
+ return -1;
+ }
+
+ /* Find a free entry in the SA table*/
+ for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+ if (priv->rx_sa_tbl[i].used == 0) {
+ sa_index = i;
+ break;
+ }
+ }
+ /* Fail if no free entries*/
+ if (sa_index < 0) {
+ PMD_DRV_LOG(ERR,
+ "No free entry left in the Rx SA table\n");
+ return -1;
+ }
+
+ priv->rx_ip_tbl[ip_index].ip.ipv6[0] =
+ ic_session->dst_ip.ipv6[0];
+ priv->rx_ip_tbl[ip_index].ip.ipv6[1] =
+ ic_session->dst_ip.ipv6[1];
+ priv->rx_ip_tbl[ip_index].ip.ipv6[2] =
+ ic_session->dst_ip.ipv6[2];
+ priv->rx_ip_tbl[ip_index].ip.ipv6[3] =
+ ic_session->dst_ip.ipv6[3];
+ priv->rx_ip_tbl[ip_index].ref_count++;
+
+ priv->rx_sa_tbl[sa_index].spi = ic_session->spi;
+ priv->rx_sa_tbl[sa_index].ip_index = ip_index;
+ priv->rx_sa_tbl[sa_index].mode = IPSRXMOD_VALID;
+ if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION)
+ priv->rx_sa_tbl[sa_index].mode |=
+ (IPSRXMOD_PROTO | IPSRXMOD_DECRYPT);
+ if (ic_session->dst_ip.type == IPv6) {
+ priv->rx_sa_tbl[sa_index].mode |= IPSRXMOD_IPV6;
+ priv->rx_ip_tbl[ip_index].ip.type = IPv6;
+ } else if (ic_session->dst_ip.type == IPv4) {
+ priv->rx_ip_tbl[ip_index].ip.type = IPv4;
+ }
+ priv->rx_sa_tbl[sa_index].used = 1;
+
+ /* write IP table entry*/
+ reg_val = TXGBE_IPSRXIDX_ENA | TXGBE_IPSRXIDX_WRITE |
+ TXGBE_IPSRXIDX_TB_IP | (ip_index << 3);
+ if (priv->rx_ip_tbl[ip_index].ip.type == IPv4) {
+ wr32(hw, TXGBE_IPSRXADDR(0), 0);
+ wr32(hw, TXGBE_IPSRXADDR(1), 0);
+ wr32(hw, TXGBE_IPSRXADDR(2), 0);
+ wr32(hw, TXGBE_IPSRXADDR(3),
+ priv->rx_ip_tbl[ip_index].ip.ipv4);
+ } else {
+ wr32(hw, TXGBE_IPSRXADDR(0),
+ priv->rx_ip_tbl[ip_index].ip.ipv6[0]);
+ wr32(hw, TXGBE_IPSRXADDR(1),
+ priv->rx_ip_tbl[ip_index].ip.ipv6[1]);
+ wr32(hw, TXGBE_IPSRXADDR(2),
+ priv->rx_ip_tbl[ip_index].ip.ipv6[2]);
+ wr32(hw, TXGBE_IPSRXADDR(3),
+ priv->rx_ip_tbl[ip_index].ip.ipv6[3]);
+ }
+ wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
+
+ /* write SPI table entry*/
+ reg_val = TXGBE_IPSRXIDX_ENA | TXGBE_IPSRXIDX_WRITE |
+ TXGBE_IPSRXIDX_TB_SPI | (sa_index << 3);
+ wr32(hw, TXGBE_IPSRXSPI,
+ priv->rx_sa_tbl[sa_index].spi);
+ wr32(hw, TXGBE_IPSRXADDRIDX,
+ priv->rx_sa_tbl[sa_index].ip_index);
+ wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
+
+ /* write Key table entry*/
+ key = malloc(ic_session->key_len);
+ if (!key)
+ return -ENOMEM;
+
+ memcpy(key, ic_session->key, ic_session->key_len);
+
+ reg_val = TXGBE_IPSRXIDX_ENA | TXGBE_IPSRXIDX_WRITE |
+ TXGBE_IPSRXIDX_TB_KEY | (sa_index << 3);
+ wr32(hw, TXGBE_IPSRXKEY(0),
+ rte_cpu_to_be_32(*(uint32_t *)&key[12]));
+ wr32(hw, TXGBE_IPSRXKEY(1),
+ rte_cpu_to_be_32(*(uint32_t *)&key[8]));
+ wr32(hw, TXGBE_IPSRXKEY(2),
+ rte_cpu_to_be_32(*(uint32_t *)&key[4]));
+ wr32(hw, TXGBE_IPSRXKEY(3),
+ rte_cpu_to_be_32(*(uint32_t *)&key[0]));
+ wr32(hw, TXGBE_IPSRXSALT,
+ rte_cpu_to_be_32(ic_session->salt));
+ wr32(hw, TXGBE_IPSRXMODE,
+ priv->rx_sa_tbl[sa_index].mode);
+ wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
+
+ free(key);
+ } else { /* sess->dir == RTE_CRYPTO_OUTBOUND */
+ uint8_t *key;
+ int i;
+
+ /* Find a free entry in the SA table*/
+ for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+ if (priv->tx_sa_tbl[i].used == 0) {
+ sa_index = i;
+ break;
+ }
+ }
+ /* Fail if no free entries*/
+ if (sa_index < 0) {
+ PMD_DRV_LOG(ERR,
+ "No free entry left in the Tx SA table\n");
+ return -1;
+ }
+
+ priv->tx_sa_tbl[sa_index].spi =
+ rte_cpu_to_be_32(ic_session->spi);
+ priv->tx_sa_tbl[i].used = 1;
+ ic_session->sa_index = sa_index;
+
+ key = malloc(ic_session->key_len);
+ if (!key)
+ return -ENOMEM;
+
+ memcpy(key, ic_session->key, ic_session->key_len);
+
+ /* write Key table entry*/
+ reg_val = TXGBE_IPSRXIDX_ENA | TXGBE_IPSRXIDX_WRITE | (sa_index << 3);
+ wr32(hw, TXGBE_IPSTXKEY(0),
+ rte_cpu_to_be_32(*(uint32_t *)&key[12]));
+ wr32(hw, TXGBE_IPSTXKEY(1),
+ rte_cpu_to_be_32(*(uint32_t *)&key[8]));
+ wr32(hw, TXGBE_IPSTXKEY(2),
+ rte_cpu_to_be_32(*(uint32_t *)&key[4]));
+ wr32(hw, TXGBE_IPSTXKEY(3),
+ rte_cpu_to_be_32(*(uint32_t *)&key[0]));
+ wr32(hw, TXGBE_IPSTXSALT,
+ rte_cpu_to_be_32(ic_session->salt));
+ wr32w(hw, TXGBE_IPSTXIDX, reg_val, TXGBE_IPSTXIDX_WRITE, 1000);
+
+ free(key);
+ }
+
+ return 0;
+}
+
+static int
+txgbe_crypto_remove_sa(struct rte_eth_dev *dev,
+ struct txgbe_crypto_session *ic_session)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_ipsec *priv = TXGBE_DEV_IPSEC(dev);
+ uint32_t reg_val;
+ int sa_index = -1;
+
+ if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION) {
+ int i, ip_index = -1;
+
+ /* Find a match in the IP table*/
+ for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+ if (CMP_IP(priv->rx_ip_tbl[i].ip, ic_session->dst_ip)) {
+ ip_index = i;
+ break;
+ }
+ }
+
+ /* Fail if no match*/
+ if (ip_index < 0) {
+ PMD_DRV_LOG(ERR,
+ "Entry not found in the Rx IP table\n");
+ return -1;
+ }
+
+ /* Find a free entry in the SA table*/
+ for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+ if (priv->rx_sa_tbl[i].spi ==
+ rte_cpu_to_be_32(ic_session->spi)) {
+ sa_index = i;
+ break;
+ }
+ }
+ /* Fail if no match*/
+ if (sa_index < 0) {
+ PMD_DRV_LOG(ERR,
+ "Entry not found in the Rx SA table\n");
+ return -1;
+ }
+
+ /* Disable and clear Rx SPI and key table table entryes*/
+ reg_val = TXGBE_IPSRXIDX_WRITE |
+ TXGBE_IPSRXIDX_TB_SPI | (sa_index << 3);
+ wr32(hw, TXGBE_IPSRXSPI, 0);
+ wr32(hw, TXGBE_IPSRXADDRIDX, 0);
+ wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
+ reg_val = TXGBE_IPSRXIDX_WRITE |
+ TXGBE_IPSRXIDX_TB_KEY | (sa_index << 3);
+ wr32(hw, TXGBE_IPSRXKEY(0), 0);
+ wr32(hw, TXGBE_IPSRXKEY(1), 0);
+ wr32(hw, TXGBE_IPSRXKEY(2), 0);
+ wr32(hw, TXGBE_IPSRXKEY(3), 0);
+ wr32(hw, TXGBE_IPSRXSALT, 0);
+ wr32(hw, TXGBE_IPSRXMODE, 0);
+ wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
+ priv->rx_sa_tbl[sa_index].used = 0;
+
+ /* If last used then clear the IP table entry*/
+ priv->rx_ip_tbl[ip_index].ref_count--;
+ if (priv->rx_ip_tbl[ip_index].ref_count == 0) {
+ reg_val = TXGBE_IPSRXIDX_WRITE | TXGBE_IPSRXIDX_TB_IP |
+ (ip_index << 3);
+ wr32(hw, TXGBE_IPSRXADDR(0), 0);
+ wr32(hw, TXGBE_IPSRXADDR(1), 0);
+ wr32(hw, TXGBE_IPSRXADDR(2), 0);
+ wr32(hw, TXGBE_IPSRXADDR(3), 0);
+ }
+ } else { /* session->dir == RTE_CRYPTO_OUTBOUND */
+ int i;
+
+ /* Find a match in the SA table*/
+ for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+ if (priv->tx_sa_tbl[i].spi ==
+ rte_cpu_to_be_32(ic_session->spi)) {
+ sa_index = i;
+ break;
+ }
+ }
+ /* Fail if no match entries*/
+ if (sa_index < 0) {
+ PMD_DRV_LOG(ERR,
+ "Entry not found in the Tx SA table\n");
+ return -1;
+ }
+ reg_val = TXGBE_IPSRXIDX_WRITE | (sa_index << 3);
+ wr32(hw, TXGBE_IPSTXKEY(0), 0);
+ wr32(hw, TXGBE_IPSTXKEY(1), 0);
+ wr32(hw, TXGBE_IPSTXKEY(2), 0);
+ wr32(hw, TXGBE_IPSTXKEY(3), 0);
+ wr32(hw, TXGBE_IPSTXSALT, 0);
+ wr32w(hw, TXGBE_IPSTXIDX, reg_val, TXGBE_IPSTXIDX_WRITE, 1000);
+
+ priv->tx_sa_tbl[sa_index].used = 0;
+ }
+
+ return 0;
+}
+
+static int
+txgbe_crypto_create_session(void *device,
+ struct rte_security_session_conf *conf,
+ struct rte_security_session *session,
+ struct rte_mempool *mempool)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+ struct txgbe_crypto_session *ic_session = NULL;
+ struct rte_crypto_aead_xform *aead_xform;
+ struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf;
+
+ if (rte_mempool_get(mempool, (void **)&ic_session)) {
+ PMD_DRV_LOG(ERR, "Cannot get object from ic_session mempool");
+ return -ENOMEM;
+ }
+
+ if (conf->crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD ||
+ conf->crypto_xform->aead.algo !=
+ RTE_CRYPTO_AEAD_AES_GCM) {
+ PMD_DRV_LOG(ERR, "Unsupported crypto transformation mode\n");
+ rte_mempool_put(mempool, (void *)ic_session);
+ return -ENOTSUP;
+ }
+ aead_xform = &conf->crypto_xform->aead;
+
+ if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+ ic_session->op = TXGBE_OP_AUTHENTICATED_DECRYPTION;
+ } else {
+ PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
+ rte_mempool_put(mempool, (void *)ic_session);
+ return -ENOTSUP;
+ }
+ } else {
+ if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+ ic_session->op = TXGBE_OP_AUTHENTICATED_ENCRYPTION;
+ } else {
+ PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
+ rte_mempool_put(mempool, (void *)ic_session);
+ return -ENOTSUP;
+ }
+ }
+
+ ic_session->key = aead_xform->key.data;
+ ic_session->key_len = aead_xform->key.length;
+ memcpy(&ic_session->salt,
+ &aead_xform->key.data[aead_xform->key.length], 4);
+ ic_session->spi = conf->ipsec.spi;
+ ic_session->dev = eth_dev;
+
+ set_sec_session_private_data(session, ic_session);
+
+ if (ic_session->op == TXGBE_OP_AUTHENTICATED_ENCRYPTION) {
+ if (txgbe_crypto_add_sa(ic_session)) {
+ PMD_DRV_LOG(ERR, "Failed to add SA\n");
+ rte_mempool_put(mempool, (void *)ic_session);
+ return -EPERM;
+ }
+ }
+
+ return 0;
+}
+
+static unsigned int
+txgbe_crypto_session_get_size(__rte_unused void *device)
+{
+ return sizeof(struct txgbe_crypto_session);
+}
+
+static int
+txgbe_crypto_remove_session(void *device,
+ struct rte_security_session *session)
+{
+ struct rte_eth_dev *eth_dev = device;
+ struct txgbe_crypto_session *ic_session =
+ (struct txgbe_crypto_session *)
+ get_sec_session_private_data(session);
+ struct rte_mempool *mempool = rte_mempool_from_obj(ic_session);
+
+ if (eth_dev != ic_session->dev) {
+ PMD_DRV_LOG(ERR, "Session not bound to this device\n");
+ return -ENODEV;
+ }
+
+ if (txgbe_crypto_remove_sa(eth_dev, ic_session)) {
+ PMD_DRV_LOG(ERR, "Failed to remove session\n");
+ return -EFAULT;
+ }
+
+ rte_mempool_put(mempool, (void *)ic_session);
+
+ return 0;
+}
+
+static inline uint8_t
+txgbe_crypto_compute_pad_len(struct rte_mbuf *m)
+{
+ if (m->nb_segs == 1) {
+ /* 16 bytes ICV + 2 bytes ESP trailer + payload padding size
+ * payload padding size is stored at <pkt_len - 18>
+ */
+ uint8_t *esp_pad_len = rte_pktmbuf_mtod_offset(m, uint8_t *,
+ rte_pktmbuf_pkt_len(m) -
+ (ESP_TRAILER_SIZE + ESP_ICV_SIZE));
+ return *esp_pad_len + ESP_TRAILER_SIZE + ESP_ICV_SIZE;
+ }
+ return 0;
+}
+
+static int
+txgbe_crypto_update_mb(void *device __rte_unused,
+ struct rte_security_session *session,
+ struct rte_mbuf *m, void *params __rte_unused)
+{
+ struct txgbe_crypto_session *ic_session =
+ get_sec_session_private_data(session);
+ if (ic_session->op == TXGBE_OP_AUTHENTICATED_ENCRYPTION) {
+ union txgbe_crypto_tx_desc_md *mdata =
+ (union txgbe_crypto_tx_desc_md *)&m->udata64;
+ mdata->enc = 1;
+ mdata->sa_idx = ic_session->sa_index;
+ mdata->pad_len = txgbe_crypto_compute_pad_len(m);
+ }
+ return 0;
+}
+
+
+static const struct rte_security_capability *
+txgbe_crypto_capabilities_get(void *device __rte_unused)
+{
+ static const struct rte_cryptodev_capabilities
+ aes_gcm_gmac_crypto_capabilities[] = {
+ { /* AES GMAC (128-bit) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM (128-bit) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 65535,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ {
+ .op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED
+ }, }
+ },
+ };
+
+ static const struct rte_security_capability
+ txgbe_security_capabilities[] = {
+ { /* IPsec Inline Crypto ESP Transport Egress */
+ .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ {.ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+ .options = { 0 }
+ } },
+ .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+ .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+ },
+ { /* IPsec Inline Crypto ESP Transport Ingress */
+ .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ {.ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+ .options = { 0 }
+ } },
+ .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+ .ol_flags = 0
+ },
+ { /* IPsec Inline Crypto ESP Tunnel Egress */
+ .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ {.ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+ .options = { 0 }
+ } },
+ .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+ .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+ },
+ { /* IPsec Inline Crypto ESP Tunnel Ingress */
+ .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ {.ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+ .options = { 0 }
+ } },
+ .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+ .ol_flags = 0
+ },
+ {
+ .action = RTE_SECURITY_ACTION_TYPE_NONE
+ }
+ };
+
+ return txgbe_security_capabilities;
+}
+
+int
+txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t reg;
+ uint64_t rx_offloads;
+ uint64_t tx_offloads;
+
+ rx_offloads = dev->data->dev_conf.rxmode.offloads;
+ tx_offloads = dev->data->dev_conf.txmode.offloads;
+
+ /* sanity checks */
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
+ return -1;
+ }
+ if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+ PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
+ return -1;
+ }
+
+ /* Set TXGBE_SECTXBUFFAF to 0x14 as required in the datasheet*/
+ wr32(hw, TXGBE_SECTXBUFAF, 0x14);
+
+ /* IFG needs to be set to 3 when we are using security. Otherwise a Tx
+ * hang will occur with heavy traffic.
+ */
+ reg = rd32(hw, TXGBE_SECTXIFG);
+ reg = (reg & ~TXGBE_SECTXIFG_MIN_MASK) | TXGBE_SECTXIFG_MIN(0x3);
+ wr32(hw, TXGBE_SECTXIFG, reg);
+
+ reg = rd32(hw, TXGBE_SECRXCTL);
+ reg |= TXGBE_SECRXCTL_CRCSTRIP;
+ wr32(hw, TXGBE_SECRXCTL, reg);
+
+ if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ wr32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA, 0);
+ reg = rd32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA);
+ if (reg != 0) {
+ PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
+ return -1;
+ }
+ }
+ if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
+ wr32(hw, TXGBE_SECTXCTL, TXGBE_SECTXCTL_STFWD);
+ reg = rd32(hw, TXGBE_SECTXCTL);
+ if (reg != TXGBE_SECTXCTL_STFWD) {
+ PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
+ return -1;
+ }
+ }
+
+ txgbe_crypto_clear_ipsec_tables(dev);
+
+ return 0;
+}
+
+int
+txgbe_crypto_add_ingress_sa_from_flow(const void *sess,
+ const void *ip_spec,
+ uint8_t is_ipv6)
+{
+ struct txgbe_crypto_session *ic_session
+ = get_sec_session_private_data(sess);
+
+ if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION) {
+ if (is_ipv6) {
+ const struct rte_flow_item_ipv6 *ipv6 = ip_spec;
+ ic_session->src_ip.type = IPv6;
+ ic_session->dst_ip.type = IPv6;
+ rte_memcpy(ic_session->src_ip.ipv6,
+ ipv6->hdr.src_addr, 16);
+ rte_memcpy(ic_session->dst_ip.ipv6,
+ ipv6->hdr.dst_addr, 16);
+ } else {
+ const struct rte_flow_item_ipv4 *ipv4 = ip_spec;
+ ic_session->src_ip.type = IPv4;
+ ic_session->dst_ip.type = IPv4;
+ ic_session->src_ip.ipv4 = ipv4->hdr.src_addr;
+ ic_session->dst_ip.ipv4 = ipv4->hdr.dst_addr;
+ }
+ return txgbe_crypto_add_sa(ic_session);
+ }
+
+ return 0;
+}
+
+static struct rte_security_ops txgbe_security_ops = {
+ .session_create = txgbe_crypto_create_session,
+ .session_update = NULL,
+ .session_get_size = txgbe_crypto_session_get_size,
+ .session_stats_get = NULL,
+ .session_destroy = txgbe_crypto_remove_session,
+ .set_pkt_metadata = txgbe_crypto_update_mb,
+ .capabilities_get = txgbe_crypto_capabilities_get
+};
+
+static int
+txgbe_crypto_capable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t reg_i, reg, capable = 1;
+ /* test if rx crypto can be enabled and then write back initial value*/
+ reg_i = rd32(hw, TXGBE_SECRXCTL);
+ wr32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA, 0);
+ reg = rd32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA);
+ if (reg != 0)
+ capable = 0;
+ wr32(hw, TXGBE_SECRXCTL, reg_i);
+ return capable;
+}
+
+int
+txgbe_ipsec_ctx_create(struct rte_eth_dev *dev)
+{
+ struct rte_security_ctx *ctx = NULL;
+
+ if (txgbe_crypto_capable(dev)) {
+ ctx = rte_malloc("rte_security_instances_ops",
+ sizeof(struct rte_security_ctx), 0);
+ if (ctx) {
+ ctx->device = (void *)dev;
+ ctx->ops = &txgbe_security_ops;
+ ctx->sess_cnt = 0;
+ dev->security_ctx = ctx;
+ } else {
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef TXGBE_IPSEC_H_
+#define TXGBE_IPSEC_H_
+
+#include <rte_security.h>
+
+#define IPSRXMOD_VALID 0x00000001
+#define IPSRXMOD_PROTO 0x00000004
+#define IPSRXMOD_DECRYPT 0x00000008
+#define IPSRXMOD_IPV6 0x00000010
+
+#define IPSEC_MAX_RX_IP_COUNT 128
+#define IPSEC_MAX_SA_COUNT 1024
+
+#define ESP_ICV_SIZE 16
+#define ESP_TRAILER_SIZE 2
+
+enum txgbe_operation {
+ TXGBE_OP_AUTHENTICATED_ENCRYPTION,
+ TXGBE_OP_AUTHENTICATED_DECRYPTION
+};
+
+enum txgbe_gcm_key {
+ TXGBE_GCM_KEY_128,
+ TXGBE_GCM_KEY_256
+};
+
+/**
+ * Generic IP address structure
+ * TODO: Find better location for this rte_net.h possibly.
+ **/
+struct ipaddr {
+ enum ipaddr_type {
+ IPv4,
+ IPv6
+ } type;
+ /**< IP Address Type - IPv4/IPv6 */
+
+ union {
+ uint32_t ipv4;
+ uint32_t ipv6[4];
+ };
+};
+
+/** inline crypto crypto private session structure */
+struct txgbe_crypto_session {
+ enum txgbe_operation op;
+ const uint8_t *key;
+ uint32_t key_len;
+ uint32_t salt;
+ uint32_t sa_index;
+ uint32_t spi;
+ struct ipaddr src_ip;
+ struct ipaddr dst_ip;
+ struct rte_eth_dev *dev;
+} __rte_cache_aligned;
+
+struct txgbe_crypto_rx_ip_table {
+ struct ipaddr ip;
+ uint16_t ref_count;
+};
+struct txgbe_crypto_rx_sa_table {
+ uint32_t spi;
+ uint32_t ip_index;
+ uint8_t mode;
+ uint8_t used;
+};
+
+struct txgbe_crypto_tx_sa_table {
+ uint32_t spi;
+ uint8_t used;
+};
+
+union txgbe_crypto_tx_desc_md {
+ uint64_t data;
+ struct {
+ /**< SA table index */
+ uint32_t sa_idx;
+ /**< ICV and ESP trailer length */
+ uint8_t pad_len;
+ /**< enable encryption */
+ uint8_t enc;
+ };
+};
+
+struct txgbe_ipsec {
+ struct txgbe_crypto_rx_ip_table rx_ip_tbl[IPSEC_MAX_RX_IP_COUNT];
+ struct txgbe_crypto_rx_sa_table rx_sa_tbl[IPSEC_MAX_SA_COUNT];
+ struct txgbe_crypto_tx_sa_table tx_sa_tbl[IPSEC_MAX_SA_COUNT];
+};
+
+
+int txgbe_ipsec_ctx_create(struct rte_eth_dev *dev);
+int txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev);
+int txgbe_crypto_add_ingress_sa_from_flow(const void *sess,
+ const void *ip_spec,
+ uint8_t is_ipv6);
+
+#endif /*TXGBE_IPSEC_H_*/
new file mode 100644
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef _TXGBE_LOGS_H_
+#define _TXGBE_LOGS_H_
+
+/*
+ * PMD_USER_LOG: for user
+ */
+extern int txgbe_logtype_init;
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, txgbe_logtype_init, \
+ "%s(): " fmt "\n", __func__, ##args)
+
+extern int txgbe_logtype_driver;
+#define PMD_DRV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, txgbe_logtype_driver, \
+ "%s(): " fmt "\n", __func__, ##args)
+
+#ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
+extern int txgbe_logtype_rx;
+#define PMD_RX_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, txgbe_logtype_rx, \
+ "%s(): " fmt "\n", __func__, ##args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
+extern int txgbe_logtype_tx;
+#define PMD_TX_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, txgbe_logtype_tx, \
+ "%s(): " fmt "\n", __func__, ##args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
+extern int txgbe_logtype_tx_free;
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, txgbe_logtype_tx_free, \
+ "%s(): " fmt "\n", __func__, ##args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_TXGBE_DEBUG_INIT
+#define PMD_TLOG_INIT(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, txgbe_logtype_init, \
+ "%s(): " fmt, __func__, ##args)
+#else
+#define PMD_TLOG_INIT(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_TXGBE_DEBUG_DRIVER
+#define PMD_TLOG_DRIVER(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, txgbe_logtype_driver, \
+ "%s(): " fmt, __func__, ##args)
+#else
+#define PMD_TLOG_DRIVER(level, fmt, args...) do { } while (0)
+#endif
+
+/*
+ * PMD_DEBUG_LOG: for debugger
+ */
+#define TLOG_EMERG(fmt, args...) PMD_TLOG_DRIVER(EMERG, fmt, ##args)
+#define TLOG_ALERT(fmt, args...) PMD_TLOG_DRIVER(ALERT, fmt, ##args)
+#define TLOG_CRIT(fmt, args...) PMD_TLOG_DRIVER(CRIT, fmt, ##args)
+#define TLOG_ERR(fmt, args...) PMD_TLOG_DRIVER(ERR, fmt, ##args)
+#define TLOG_WARN(fmt, args...) PMD_TLOG_DRIVER(WARNING, fmt, ##args)
+#define TLOG_NOTICE(fmt, args...) PMD_TLOG_DRIVER(NOTICE, fmt, ##args)
+#define TLOG_INFO(fmt, args...) PMD_TLOG_DRIVER(INFO, fmt, ##args)
+#define TLOG_DEBUG(fmt, args...) PMD_TLOG_DRIVER(DEBUG, fmt, ##args)
+
+/* to be deleted */
+#define DEBUGOUT(fmt, args...) TLOG_DEBUG(fmt, ##args)
+#define PMD_INIT_FUNC_TRACE() TLOG_DEBUG(" >>")
+#define DEBUGFUNC(fmt) TLOG_DEBUG(fmt)
+
+/*
+ * PMD_TEMP_LOG: for tester
+ */
+#ifdef RTE_LIBRTE_TXGBE_DEBUG
+#define wjmsg_line(fmt, ...) \
+ do { \
+ RTE_LOG(CRIT, PMD, "%s(%d): " fmt, \
+ __FUNCTION__, __LINE__, ## __VA_ARGS__); \
+ } while (0)
+#define wjmsg_stack(fmt, ...) \
+ do { \
+ wjmsg_line(fmt, ## __VA_ARGS__); \
+ rte_dump_stack(); \
+ } while (0)
+#define wjmsg wjmsg_line
+
+#define wjdump(mb) { \
+ int j; char buf[128] = ""; \
+ wjmsg("data_len=%d pkt_len=%d vlan_tci=%d " \
+ "packet_type=0x%08x ol_flags=0x%016lx " \
+ "hash.rss=0x%08x hash.fdir.hash=0x%04x hash.fdir.id=%d\n", \
+ mb->data_len, mb->pkt_len, mb->vlan_tci, \
+ mb->packet_type, mb->ol_flags, \
+ mb->hash.rss, mb->hash.fdir.hash, mb->hash.fdir.id); \
+ for (j = 0; j < mb->data_len; j++) { \
+ sprintf(buf + strlen(buf), "%02x ", \
+ ((uint8_t *)(mb->buf_addr) + mb->data_off)[j]); \
+ if (j % 8 == 7) {\
+ wjmsg("%s\n", buf); \
+ buf[0] = '\0'; \
+ } \
+ } \
+ wjmsg("%s\n", buf); \
+}
+#else /* RTE_LIBRTE_TXGBE_DEBUG */
+#define wjmsg_line(fmt, args...) do {} while (0)
+#define wjmsg_limit(fmt, args...) do {} while (0)
+#define wjmsg_stack(fmt, args...) do {} while (0)
+#define wjmsg(fmt, args...) do {} while (0)
+#define wjdump(fmt, args...) do {} while (0)
+#endif /* RTE_LIBRTE_TXGBE_DEBUG */
+
+#endif /* _TXGBE_LOGS_H_ */
new file mode 100644
@@ -0,0 +1,879 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_memcpy.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+
+#include "base/txgbe.h"
+#include "txgbe_ethdev.h"
+#include "rte_pmd_txgbe.h"
+
+#define TXGBE_MAX_VFTA (128)
+#define TXGBE_VF_MSG_SIZE_DEFAULT 1
+#define TXGBE_VF_GET_QUEUE_MSG_SIZE 5
+
+static inline uint16_t
+dev_num_vf(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ return pci_dev->max_vfs;
+}
+
+static inline
+int txgbe_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)
+{
+ unsigned char vf_mac_addr[RTE_ETHER_ADDR_LEN];
+ struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
+ uint16_t vfn;
+
+ for (vfn = 0; vfn < vf_num; vfn++) {
+ rte_eth_random_addr(vf_mac_addr);
+ /* keep the random address as default */
+ memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr,
+ RTE_ETHER_ADDR_LEN);
+ }
+
+ return 0;
+}
+
+static inline int
+txgbe_mb_intr_setup(struct rte_eth_dev *dev)
+{
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+
+ intr->mask_misc |= TXGBE_ICRMISC_VFMBX;
+
+ return 0;
+}
+
+void txgbe_pf_host_init(struct rte_eth_dev *eth_dev)
+{
+ struct txgbe_vf_info **vfinfo = TXGBE_DEV_VFDATA(eth_dev);
+ struct txgbe_mirror_info *mirror_info = TXGBE_DEV_MR_INFO(eth_dev);
+ struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(eth_dev);
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ uint16_t vf_num;
+ uint8_t nb_queue;
+
+ PMD_INIT_FUNC_TRACE();
+
+ RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
+ vf_num = dev_num_vf(eth_dev);
+ if (vf_num == 0)
+ return;
+
+ *vfinfo = rte_zmalloc("vf_info", sizeof(struct txgbe_vf_info) * vf_num, 0);
+ if (*vfinfo == NULL)
+ rte_panic("Cannot allocate memory for private VF data\n");
+
+ rte_eth_switch_domain_alloc(&(*vfinfo)->switch_domain_id);
+
+ memset(mirror_info, 0, sizeof(struct txgbe_mirror_info));
+ memset(uta_info, 0, sizeof(struct txgbe_uta_info));
+ hw->mac.mc_filter_type = 0;
+
+ if (vf_num >= ETH_32_POOLS) {
+ nb_queue = 2;
+ RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
+ } else if (vf_num >= ETH_16_POOLS) {
+ nb_queue = 4;
+ RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
+ } else {
+ nb_queue = 8;
+ RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
+ }
+
+ RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
+ RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
+ RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
+
+ txgbe_vf_perm_addr_gen(eth_dev, vf_num);
+
+ /* init_mailbox_params */
+ hw->mbx.init_params(hw);
+
+ /* set mb interrupt mask */
+ txgbe_mb_intr_setup(eth_dev);
+}
+
+void txgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct txgbe_vf_info **vfinfo;
+ uint16_t vf_num;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
+ RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = 0;
+ RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = 0;
+ RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = 0;
+
+ vf_num = dev_num_vf(eth_dev);
+ if (vf_num == 0)
+ return;
+
+ vfinfo = TXGBE_DEV_VFDATA(eth_dev);
+ if (*vfinfo == NULL)
+ return;
+
+ ret = rte_eth_switch_domain_free((*vfinfo)->switch_domain_id);
+ if (ret)
+ PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
+
+ rte_free(*vfinfo);
+ *vfinfo = NULL;
+}
+
+static void
+txgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
+ uint16_t vf_num;
+ int i;
+ struct txgbe_ethertype_filter ethertype_filter;
+
+ if (!hw->mac.set_ethertype_anti_spoofing) {
+ PMD_DRV_LOG(INFO, "ether type anti-spoofing is not supported.\n");
+ return;
+ }
+
+ i = txgbe_ethertype_filter_lookup(filter_info,
+ TXGBE_ETHERTYPE_FLOW_CTRL);
+ if (i >= 0) {
+ PMD_DRV_LOG(ERR, "A ether type filter entity for flow control already exists!\n");
+ return;
+ }
+
+ ethertype_filter.ethertype = TXGBE_ETHERTYPE_FLOW_CTRL;
+ ethertype_filter.etqf = TXGBE_ETFLT_ENA |
+ TXGBE_ETFLT_TXAS |
+ TXGBE_ETHERTYPE_FLOW_CTRL;
+ ethertype_filter.etqs = 0;
+ ethertype_filter.conf = TRUE;
+ i = txgbe_ethertype_filter_insert(filter_info,
+ ðertype_filter);
+ if (i < 0) {
+ PMD_DRV_LOG(ERR, "Cannot find an unused ether type filter entity for flow control.\n");
+ return;
+ }
+
+ wr32(hw, TXGBE_ETFLT(i),
+ (TXGBE_ETFLT_ENA |
+ TXGBE_ETFLT_TXAS |
+ TXGBE_ETHERTYPE_FLOW_CTRL));
+
+ vf_num = dev_num_vf(eth_dev);
+ for (i = 0; i < vf_num; i++)
+ hw->mac.set_ethertype_anti_spoofing(hw, true, i);
+}
+
+int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
+{
+ uint32_t vtctl, fcrth;
+ uint32_t vfre_slot, vfre_offset;
+ uint16_t vf_num;
+ const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */
+ const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ uint32_t gpie;
+ uint32_t gcr_ext;
+ uint32_t vlanctrl;
+ int i;
+
+ vf_num = dev_num_vf(eth_dev);
+ if (vf_num == 0)
+ return -1;
+
+ /* enable VMDq and set the default pool for PF */
+ vtctl = rd32(hw, TXGBE_POOLCTL);
+ vtctl &= ~TXGBE_POOLCTL_DEFPL_MASK;
+ vtctl |= TXGBE_POOLCTL_DEFPL(RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx);
+ vtctl |= TXGBE_POOLCTL_RPLEN;
+ wr32(hw, TXGBE_POOLCTL, vtctl);
+
+ vfre_offset = vf_num & VFRE_MASK;
+ vfre_slot = (vf_num >> VFRE_SHIFT) > 0 ? 1 : 0;
+
+ /* Enable pools reserved to PF only */
+ wr32(hw, TXGBE_POOLRXENA(vfre_slot), (~0U) << vfre_offset);
+ wr32(hw, TXGBE_POOLRXENA(vfre_slot ^ 1), vfre_slot - 1);
+ wr32(hw, TXGBE_POOLTXENA(vfre_slot), (~0U) << vfre_offset);
+ wr32(hw, TXGBE_POOLTXENA(vfre_slot ^ 1), vfre_slot - 1);
+
+ wr32(hw, TXGBE_PSRCTL, TXGBE_PSRCTL_LBENA);
+
+ /* clear VMDq map to perment rar 0 */
+ hw->mac.clear_vmdq(hw, 0, BIT_MASK32);
+
+ /* clear VMDq map to scan rar 127 */
+ wr32(hw, TXGBE_ETHADDRIDX, hw->mac.num_rar_entries);
+ wr32(hw, TXGBE_ETHADDRASSL, 0);
+ wr32(hw, TXGBE_ETHADDRASSH, 0);
+
+ /* set VMDq map to default PF pool */
+ hw->mac.set_vmdq(hw, 0, RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx);
+
+ /*
+ * SW msut set PORTCTL.VT_Mode the same as GPIE.VT_Mode
+ */
+ gpie = rd32(hw, TXGBE_GPIE);
+ gpie |= TXGBE_GPIE_MSIX;
+ gcr_ext = rd32(hw, TXGBE_PORTCTL);
+ gcr_ext &= ~TXGBE_PORTCTL_NUMVT_MASK;
+
+ switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
+ case ETH_64_POOLS:
+ gcr_ext |= TXGBE_PORTCTL_NUMVT_64;
+ break;
+ case ETH_32_POOLS:
+ gcr_ext |= TXGBE_PORTCTL_NUMVT_32;
+ break;
+ case ETH_16_POOLS:
+ gcr_ext |= TXGBE_PORTCTL_NUMVT_16;
+ break;
+ }
+
+ wr32(hw, TXGBE_PORTCTL, gcr_ext);
+ wr32(hw, TXGBE_GPIE, gpie);
+
+ /*
+ * enable vlan filtering and allow all vlan tags through
+ */
+ vlanctrl = rd32(hw, TXGBE_VLANCTL);
+ vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
+ wr32(hw, TXGBE_VLANCTL, vlanctrl);
+
+ /* enable all vlan filters */
+ for (i = 0; i < TXGBE_MAX_VFTA; i++)
+ wr32(hw, TXGBE_VLANTBL(i), 0xFFFFFFFF);
+
+ /* Enable MAC Anti-Spoofing */
+ hw->mac.set_mac_anti_spoofing(hw, FALSE, vf_num);
+
+ /* set flow control threshold to max to avoid tx switch hang */
+ for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+ wr32(hw, TXGBE_FCWTRLO(i), 0);
+ fcrth = rd32(hw, TXGBE_PBRXSIZE(i)) - 32;
+ wr32(hw, TXGBE_FCWTRHI(i), fcrth);
+ }
+
+ txgbe_add_tx_flow_control_drop_filter(eth_dev);
+
+ return 0;
+}
+
+static void
+txgbe_set_rx_mode(struct rte_eth_dev *eth_dev)
+{
+ struct rte_eth_dev_data *dev_data = eth_dev->data;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ u32 fctrl, vmolr;
+ uint16_t vfn = dev_num_vf(eth_dev);
+
+ /* disable store-bad-packets */
+ wr32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_SAVEBAD, 0);
+
+ /* Check for Promiscuous and All Multicast modes */
+ fctrl = rd32m(hw, TXGBE_PSRCTL,
+ ~(TXGBE_PSRCTL_UCP | TXGBE_PSRCTL_MCP));
+ fctrl |= TXGBE_PSRCTL_BCA |
+ TXGBE_PSRCTL_MCHFENA;
+
+ vmolr = rd32m(hw, TXGBE_POOLETHCTL(vfn),
+ ~(TXGBE_POOLETHCTL_UCP |
+ TXGBE_POOLETHCTL_MCP |
+ TXGBE_POOLETHCTL_UCHA |
+ TXGBE_POOLETHCTL_MCHA));
+ vmolr |= TXGBE_POOLETHCTL_BCA |
+ TXGBE_POOLETHCTL_UTA |
+ TXGBE_POOLETHCTL_VLA;
+
+ if (dev_data->promiscuous) {
+ fctrl |= TXGBE_PSRCTL_UCP |
+ TXGBE_PSRCTL_MCP;
+ /* pf don't want packets routing to vf, so clear UPE */
+ vmolr |= TXGBE_POOLETHCTL_MCP;
+ } else if (dev_data->all_multicast) {
+ fctrl |= TXGBE_PSRCTL_MCP;
+ vmolr |= TXGBE_POOLETHCTL_MCP;
+ } else {
+ vmolr |= TXGBE_POOLETHCTL_UCHA;
+ vmolr |= TXGBE_POOLETHCTL_MCHA;
+ }
+
+ wr32(hw, TXGBE_POOLETHCTL(vfn), vmolr);
+
+ wr32(hw, TXGBE_PSRCTL, fctrl);
+
+ txgbe_vlan_hw_strip_config(eth_dev);
+}
+
+static inline void
+txgbe_vf_reset_event(struct rte_eth_dev *eth_dev, uint16_t vf)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ struct txgbe_vf_info *vfinfo = *(TXGBE_DEV_VFDATA(eth_dev));
+ int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+ uint32_t vmolr = rd32(hw, TXGBE_POOLETHCTL(vf));
+
+ vmolr |= (TXGBE_POOLETHCTL_UCHA |
+ TXGBE_POOLETHCTL_BCA | TXGBE_POOLETHCTL_UTA);
+ wr32(hw, TXGBE_POOLETHCTL(vf), vmolr);
+
+ wr32(hw, TXGBE_POOLTAG(vf), 0);
+
+ /* reset multicast table array for vf */
+ vfinfo[vf].num_vf_mc_hashes = 0;
+
+ /* reset rx mode */
+ txgbe_set_rx_mode(eth_dev);
+
+ hw->mac.clear_rar(hw, rar_entry);
+}
+
+static inline void
+txgbe_vf_reset_msg(struct rte_eth_dev *eth_dev, uint16_t vf)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ uint32_t reg;
+ uint32_t reg_offset, vf_shift;
+ const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */
+ const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
+ uint8_t nb_q_per_pool;
+ int i;
+
+ vf_shift = vf & VFRE_MASK;
+ reg_offset = (vf >> VFRE_SHIFT) > 0 ? 1 : 0;
+
+ /* enable transmit for vf */
+ reg = rd32(hw, TXGBE_POOLTXENA(reg_offset));
+ reg |= (reg | (1 << vf_shift));
+ wr32(hw, TXGBE_POOLTXENA(reg_offset), reg);
+
+ /* enable all queue drop for IOV */
+ nb_q_per_pool = RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool;
+ for (i = vf * nb_q_per_pool; i < (vf + 1) * nb_q_per_pool; i++) {
+ txgbe_flush(hw);
+ reg = 1 << (i % 32);
+ wr32m(hw, TXGBE_QPRXDROP(i / 32), reg, reg);
+ }
+
+ /* enable receive for vf */
+ reg = rd32(hw, TXGBE_POOLRXENA(reg_offset));
+ reg |= (reg | (1 << vf_shift));
+ wr32(hw, TXGBE_POOLRXENA(reg_offset), reg);
+
+ txgbe_vf_reset_event(eth_dev, vf);
+}
+
+static int
+txgbe_disable_vf_mc_promisc(struct rte_eth_dev *eth_dev, uint32_t vf)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ uint32_t vmolr;
+
+ vmolr = rd32(hw, TXGBE_POOLETHCTL(vf));
+
+ PMD_DRV_LOG(INFO, "VF %u: disabling multicast promiscuous\n", vf);
+
+ vmolr &= ~TXGBE_POOLETHCTL_MCP;
+
+ wr32(hw, TXGBE_POOLETHCTL(vf), vmolr);
+
+ return 0;
+}
+
+static int
+txgbe_vf_reset(struct rte_eth_dev *eth_dev, uint16_t vf, uint32_t *msgbuf)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ struct txgbe_vf_info *vfinfo = *(TXGBE_DEV_VFDATA(eth_dev));
+ unsigned char *vf_mac = vfinfo[vf].vf_mac_addresses;
+ int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+ uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
+
+ txgbe_vf_reset_msg(eth_dev, vf);
+
+ hw->mac.set_rar(hw, rar_entry, vf_mac, vf, true);
+
+ /* Disable multicast promiscuous at reset */
+ txgbe_disable_vf_mc_promisc(eth_dev, vf);
+
+ /* reply to reset with ack and vf mac address */
+ msgbuf[0] = TXGBE_VF_RESET | TXGBE_VT_MSGTYPE_ACK;
+ rte_memcpy(new_mac, vf_mac, RTE_ETHER_ADDR_LEN);
+ /*
+ * Piggyback the multicast filter type so VF can compute the
+ * correct vectors
+ */
+ msgbuf[3] = hw->mac.mc_filter_type;
+ txgbe_write_mbx(hw, msgbuf, TXGBE_VF_PERMADDR_MSG_LEN, vf);
+
+ return 0;
+}
+
+static int
+txgbe_vf_set_mac_addr(struct rte_eth_dev *eth_dev, uint32_t vf, uint32_t *msgbuf)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ struct txgbe_vf_info *vfinfo = *(TXGBE_DEV_VFDATA(eth_dev));
+ int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+ uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
+
+ if (rte_is_valid_assigned_ether_addr((struct rte_ether_addr *)new_mac)) {
+ rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
+ return hw->mac.set_rar(hw, rar_entry, new_mac, vf, true);
+ }
+ return -1;
+}
+
+static int
+txgbe_vf_set_multicast(struct rte_eth_dev *eth_dev, uint32_t vf, uint32_t *msgbuf)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ struct txgbe_vf_info *vfinfo = *(TXGBE_DEV_VFDATA(eth_dev));
+ int nb_entries = (msgbuf[0] & TXGBE_VT_MSGINFO_MASK) >>
+ TXGBE_VT_MSGINFO_SHIFT;
+ uint16_t *hash_list = (uint16_t *)&msgbuf[1];
+ uint32_t mta_idx;
+ uint32_t mta_shift;
+ const uint32_t TXGBE_MTA_INDEX_MASK = 0x7F;
+ const uint32_t TXGBE_MTA_BIT_SHIFT = 5;
+ const uint32_t TXGBE_MTA_BIT_MASK = (0x1 << TXGBE_MTA_BIT_SHIFT) - 1;
+ uint32_t reg_val;
+ int i;
+ u32 vmolr = rd32(hw, TXGBE_POOLETHCTL(vf));
+
+ /* Disable multicast promiscuous first */
+ txgbe_disable_vf_mc_promisc(eth_dev, vf);
+
+ /* only so many hash values supported */
+ nb_entries = RTE_MIN(nb_entries, TXGBE_MAX_VF_MC_ENTRIES);
+
+ /* store the mc entries */
+ vfinfo->num_vf_mc_hashes = (uint16_t)nb_entries;
+ for (i = 0; i < nb_entries; i++) {
+ vfinfo->vf_mc_hashes[i] = hash_list[i];
+ }
+
+ if (nb_entries == 0) {
+ vmolr &= ~TXGBE_POOLETHCTL_MCHA;
+ wr32(hw, TXGBE_POOLETHCTL(vf), vmolr);
+ return 0;
+ }
+
+ for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
+ mta_idx = (vfinfo->vf_mc_hashes[i] >> TXGBE_MTA_BIT_SHIFT)
+ & TXGBE_MTA_INDEX_MASK;
+ mta_shift = vfinfo->vf_mc_hashes[i] & TXGBE_MTA_BIT_MASK;
+ reg_val = rd32(hw, TXGBE_MCADDRTBL(mta_idx));
+ reg_val |= (1 << mta_shift);
+ wr32(hw, TXGBE_MCADDRTBL(mta_idx), reg_val);
+ }
+
+ vmolr |= TXGBE_POOLETHCTL_MCHA;
+ wr32(hw, TXGBE_POOLETHCTL(vf), vmolr);
+
+ return 0;
+}
+
+static int
+txgbe_vf_set_vlan(struct rte_eth_dev *eth_dev, uint32_t vf, uint32_t *msgbuf)
+{
+ int add, vid;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ struct txgbe_vf_info *vfinfo = *(TXGBE_DEV_VFDATA(eth_dev));
+
+ add = (msgbuf[0] & TXGBE_VT_MSGINFO_MASK)
+ >> TXGBE_VT_MSGINFO_SHIFT;
+ vid = TXGBE_PSRVLAN_VID(msgbuf[1]);
+
+ if (add)
+ vfinfo[vf].vlan_count++;
+ else if (vfinfo[vf].vlan_count)
+ vfinfo[vf].vlan_count--;
+ return hw->mac.set_vfta(hw, vid, vf, (bool)add, false);
+}
+
+static int
+txgbe_set_vf_lpe(struct rte_eth_dev *eth_dev, __rte_unused uint32_t vf, uint32_t *msgbuf)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ uint32_t max_frame = msgbuf[1];
+ uint32_t max_frs;
+
+ if ((max_frame < RTE_ETHER_MIN_LEN) ||
+ (max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN))
+ return -1;
+
+ max_frs = rd32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK);
+ if (max_frs < max_frame) {
+ wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
+ TXGBE_FRMSZ_MAX(max_frame));
+ }
+
+ return 0;
+}
+
+static int
+txgbe_negotiate_vf_api(struct rte_eth_dev *eth_dev, uint32_t vf, uint32_t *msgbuf)
+{
+ uint32_t api_version = msgbuf[1];
+ struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(eth_dev);
+
+ switch (api_version) {
+ case txgbe_mbox_api_10:
+ case txgbe_mbox_api_11:
+ case txgbe_mbox_api_12:
+ case txgbe_mbox_api_13:
+ vfinfo[vf].api_version = (uint8_t)api_version;
+ return 0;
+ default:
+ break;
+ }
+
+ PMD_DRV_LOG(ERR, "Negotiate invalid api version %u from VF %d\n",
+ api_version, vf);
+
+ return -1;
+}
+
+static int
+txgbe_get_vf_queues(struct rte_eth_dev *eth_dev, uint32_t vf, uint32_t *msgbuf)
+{
+ struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(eth_dev);
+ uint32_t default_q = vf * RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool;
+ struct rte_eth_conf *eth_conf;
+ struct rte_eth_vmdq_dcb_tx_conf *vmdq_dcb_tx_conf;
+ u8 num_tcs;
+ struct txgbe_hw *hw;
+ u32 vmvir;
+ u32 vlana;
+ u32 vid;
+ u32 user_priority;
+
+ /* Verify if the PF supports the mbox APIs version or not */
+ switch (vfinfo[vf].api_version) {
+ case txgbe_mbox_api_20:
+ case txgbe_mbox_api_11:
+ case txgbe_mbox_api_12:
+ case txgbe_mbox_api_13:
+ break;
+ default:
+ return -1;
+ }
+
+ /* Notify VF of Rx and Tx queue number */
+ msgbuf[TXGBE_VF_RX_QUEUES] = RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool;
+ msgbuf[TXGBE_VF_TX_QUEUES] = RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool;
+
+ /* Notify VF of default queue */
+ msgbuf[TXGBE_VF_DEF_QUEUE] = default_q;
+
+ /* Notify VF of number of DCB traffic classes */
+ eth_conf = ð_dev->data->dev_conf;
+ switch (eth_conf->txmode.mq_mode) {
+ case ETH_MQ_TX_NONE:
+ case ETH_MQ_TX_DCB:
+ PMD_DRV_LOG(ERR, "PF must work with virtualization for VF %u"
+ ", but its tx mode = %d\n", vf,
+ eth_conf->txmode.mq_mode);
+ return -1;
+
+ case ETH_MQ_TX_VMDQ_DCB:
+ vmdq_dcb_tx_conf = ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
+ switch (vmdq_dcb_tx_conf->nb_queue_pools) {
+ case ETH_16_POOLS:
+ num_tcs = ETH_8_TCS;
+ break;
+ case ETH_32_POOLS:
+ num_tcs = ETH_4_TCS;
+ break;
+ default:
+ return -1;
+ }
+ break;
+
+ /* ETH_MQ_TX_VMDQ_ONLY, DCB not enabled */
+ case ETH_MQ_TX_VMDQ_ONLY:
+ hw = TXGBE_DEV_HW(eth_dev);
+ vmvir = rd32(hw, TXGBE_POOLTAG(vf));
+ vlana = vmvir & TXGBE_POOLTAG_ACT_MASK;
+ vid = vmvir & TXGBE_POOLTAG_VTAG_MASK;
+ user_priority =
+ TXGBD_POOLTAG_VTAG_UP(vmvir);
+ if ((vlana == TXGBE_POOLTAG_ACT_ALWAYS) &&
+ ((vid != 0) || (user_priority != 0)))
+ num_tcs = 1;
+ else
+ num_tcs = 0;
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "PF work with invalid mode = %d\n",
+ eth_conf->txmode.mq_mode);
+ return -1;
+ }
+ msgbuf[TXGBE_VF_TRANS_VLAN] = num_tcs;
+
+ return 0;
+}
+
+static int
+txgbe_set_vf_mc_promisc(struct rte_eth_dev *eth_dev, uint32_t vf, uint32_t *msgbuf)
+{
+ struct txgbe_vf_info *vfinfo = *(TXGBE_DEV_VFDATA(eth_dev));
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ int xcast_mode = msgbuf[1]; /* msgbuf contains the flag to enable */
+ u32 vmolr, fctrl, disable, enable;
+
+ switch (vfinfo[vf].api_version) {
+ case txgbe_mbox_api_12:
+ /* promisc introduced in 1.3 version */
+ if (xcast_mode == TXGBEVF_XCAST_MODE_PROMISC)
+ return -EOPNOTSUPP;
+ break;
+ /* Fall threw */
+ case txgbe_mbox_api_13:
+ break;
+ default:
+ return -1;
+ }
+
+ if (vfinfo[vf].xcast_mode == xcast_mode)
+ goto out;
+
+ switch (xcast_mode) {
+ case TXGBEVF_XCAST_MODE_NONE:
+ disable = TXGBE_POOLETHCTL_BCA | TXGBE_POOLETHCTL_MCHA |
+ TXGBE_POOLETHCTL_MCP | TXGBE_POOLETHCTL_UCP |
+ TXGBE_POOLETHCTL_VLP;
+ enable = 0;
+ break;
+ case TXGBEVF_XCAST_MODE_MULTI:
+ disable = TXGBE_POOLETHCTL_MCP | TXGBE_POOLETHCTL_UCP |
+ TXGBE_POOLETHCTL_VLP;
+ enable = TXGBE_POOLETHCTL_BCA | TXGBE_POOLETHCTL_MCHA;
+ break;
+ case TXGBEVF_XCAST_MODE_ALLMULTI:
+ disable = TXGBE_POOLETHCTL_UCP | TXGBE_POOLETHCTL_VLP;
+ enable = TXGBE_POOLETHCTL_BCA | TXGBE_POOLETHCTL_MCHA |
+ TXGBE_POOLETHCTL_MCP;
+ break;
+ case TXGBEVF_XCAST_MODE_PROMISC:
+ fctrl = rd32(hw, TXGBE_PSRCTL);
+ if (!(fctrl & TXGBE_PSRCTL_UCP)) {
+ /* VF promisc requires PF in promisc */
+ PMD_DRV_LOG(ERR,
+ "Enabling VF promisc requires PF in promisc\n");
+ return -1;
+ }
+
+ disable = 0;
+ enable = TXGBE_POOLETHCTL_BCA | TXGBE_POOLETHCTL_MCHA |
+ TXGBE_POOLETHCTL_MCP | TXGBE_POOLETHCTL_UCP |
+ TXGBE_POOLETHCTL_VLP;
+ break;
+ default:
+ return -1;
+ }
+
+ vmolr = rd32(hw, TXGBE_POOLETHCTL(vf));
+ vmolr &= ~disable;
+ vmolr |= enable;
+ wr32(hw, TXGBE_POOLETHCTL(vf), vmolr);
+ vfinfo[vf].xcast_mode = xcast_mode;
+
+out:
+ msgbuf[1] = xcast_mode;
+
+ return 0;
+}
+
+static int
+txgbe_set_vf_macvlan_msg(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_vf_info *vf_info = *(TXGBE_DEV_VFDATA(dev));
+ uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
+ int index = (msgbuf[0] & TXGBE_VT_MSGINFO_MASK) >>
+ TXGBE_VT_MSGINFO_SHIFT;
+
+ if (index) {
+ if (!rte_is_valid_assigned_ether_addr(
+ (struct rte_ether_addr *)new_mac)) {
+ PMD_DRV_LOG(ERR, "set invalid mac vf:%d\n", vf);
+ return -1;
+ }
+
+ vf_info[vf].mac_count++;
+
+ hw->mac.set_rar(hw, vf_info[vf].mac_count,
+ new_mac, vf, true);
+ } else {
+ if (vf_info[vf].mac_count) {
+ hw->mac.clear_rar(hw, vf_info[vf].mac_count);
+ vf_info[vf].mac_count = 0;
+ }
+ }
+ return 0;
+}
+
+static int
+txgbe_rcv_msg_from_vf(struct rte_eth_dev *eth_dev, uint16_t vf)
+{
+ uint16_t mbx_size = TXGBE_P2VMBX_SIZE;
+ uint16_t msg_size = TXGBE_VF_MSG_SIZE_DEFAULT;
+ uint32_t msgbuf[TXGBE_P2VMBX_SIZE];
+ int32_t retval;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(eth_dev);
+ struct rte_pmd_txgbe_mb_event_param ret_param;
+
+ retval = txgbe_read_mbx(hw, msgbuf, mbx_size, vf);
+ if (retval) {
+ PMD_DRV_LOG(ERR, "Error mbx recv msg from VF %d", vf);
+ return retval;
+ }
+
+ /* do nothing with the message already been processed */
+ if (msgbuf[0] & (TXGBE_VT_MSGTYPE_ACK | TXGBE_VT_MSGTYPE_NACK))
+ return retval;
+
+ /* flush the ack before we write any messages back */
+ txgbe_flush(hw);
+
+ /**
+ * initialise structure to send to user application
+ * will return response from user in retval field
+ */
+ ret_param.retval = RTE_PMD_TXGBE_MB_EVENT_PROCEED;
+ ret_param.vfid = vf;
+ ret_param.msg_type = msgbuf[0] & 0xFFFF;
+ ret_param.msg = (void *)msgbuf;
+
+ /* perform VF reset */
+ if (msgbuf[0] == TXGBE_VF_RESET) {
+ int ret = txgbe_vf_reset(eth_dev, vf, msgbuf);
+
+ vfinfo[vf].clear_to_send = true;
+
+ /* notify application about VF reset */
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_VF_MBOX,
+ &ret_param);
+ return ret;
+ }
+
+ /**
+ * ask user application if we allowed to perform those functions
+ * if we get ret_param.retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED
+ * then business as usual,
+ * if 0, do nothing and send ACK to VF
+ * if ret_param.retval > 1, do nothing and send NAK to VF
+ */
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_VF_MBOX,
+ &ret_param);
+
+ retval = ret_param.retval;
+
+ /* check & process VF to PF mailbox message */
+ switch ((msgbuf[0] & 0xFFFF)) {
+ case TXGBE_VF_SET_MAC_ADDR:
+ if (retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED)
+ retval = txgbe_vf_set_mac_addr(eth_dev, vf, msgbuf);
+ break;
+ case TXGBE_VF_SET_MULTICAST:
+ if (retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED)
+ retval = txgbe_vf_set_multicast(eth_dev, vf, msgbuf);
+ break;
+ case TXGBE_VF_SET_LPE:
+ if (retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED)
+ retval = txgbe_set_vf_lpe(eth_dev, vf, msgbuf);
+ break;
+ case TXGBE_VF_SET_VLAN:
+ if (retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED)
+ retval = txgbe_vf_set_vlan(eth_dev, vf, msgbuf);
+ break;
+ case TXGBE_VF_API_NEGOTIATE:
+ retval = txgbe_negotiate_vf_api(eth_dev, vf, msgbuf);
+ break;
+ case TXGBE_VF_GET_QUEUES:
+ retval = txgbe_get_vf_queues(eth_dev, vf, msgbuf);
+ msg_size = TXGBE_VF_GET_QUEUE_MSG_SIZE;
+ break;
+ case TXGBE_VF_UPDATE_XCAST_MODE:
+ if (retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED)
+ retval = txgbe_set_vf_mc_promisc(eth_dev, vf, msgbuf);
+ break;
+ case TXGBE_VF_SET_MACVLAN:
+ if (retval == RTE_PMD_TXGBE_MB_EVENT_PROCEED)
+ retval = txgbe_set_vf_macvlan_msg(eth_dev, vf, msgbuf);
+ break;
+ default:
+ PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (unsigned)msgbuf[0]);
+ retval = TXGBE_ERR_MBX;
+ break;
+ }
+
+ /* response the VF according to the message process result */
+ if (retval)
+ msgbuf[0] |= TXGBE_VT_MSGTYPE_NACK;
+ else
+ msgbuf[0] |= TXGBE_VT_MSGTYPE_ACK;
+
+ msgbuf[0] |= TXGBE_VT_MSGTYPE_CTS;
+
+ txgbe_write_mbx(hw, msgbuf, msg_size, vf);
+
+ return retval;
+}
+
+static inline void
+txgbe_rcv_ack_from_vf(struct rte_eth_dev *eth_dev, uint16_t vf)
+{
+ uint32_t msg = TXGBE_VT_MSGTYPE_NACK;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+ struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(eth_dev);
+
+ if (!vfinfo[vf].clear_to_send)
+ txgbe_write_mbx(hw, &msg, 1, vf);
+}
+
+void txgbe_pf_mbx_process(struct rte_eth_dev *eth_dev)
+{
+ uint16_t vf;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+
+ for (vf = 0; vf < dev_num_vf(eth_dev); vf++) {
+ /* check & process vf function level reset */
+ if (!txgbe_check_for_rst(hw, vf))
+ txgbe_vf_reset_event(eth_dev, vf);
+
+ /* check & process vf mailbox messages */
+ if (!txgbe_check_for_msg(hw, vf))
+ txgbe_rcv_msg_from_vf(eth_dev, vf);
+
+ /* check & process acks from vf */
+ if (!txgbe_check_for_ack(hw, vf))
+ txgbe_rcv_ack_from_vf(eth_dev, vf);
+ }
+}
new file mode 100644
@@ -0,0 +1,676 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+
+#include "base/txgbe_type.h"
+#include "txgbe_ptypes.h"
+
+/* The txgbe_ptype_lookup is used to convert from the 8-bit ptid in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT txgbe_ptype_lookup[ptid].known
+ * THEN
+ * Packet is unknown
+ * ELSE IF txgbe_ptype_lookup[ptid].mac == TXGBE_DEC_PTYPE_MAC_IP
+ * Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ * Use the enum txgbe_l2_ptypes to decode the packet type
+ * ENDIF
+ */
+#define TPTE(ptid, l2, l3, l4, tun, el2, el3, el4) \
+ [ptid] = (RTE_PTYPE_L2_##l2 | \
+ RTE_PTYPE_L3_##l3 | \
+ RTE_PTYPE_L4_##l4 | \
+ RTE_PTYPE_TUNNEL_##tun | \
+ RTE_PTYPE_INNER_L2_##el2 | \
+ RTE_PTYPE_INNER_L3_##el3 | \
+ RTE_PTYPE_INNER_L4_##el4)
+
+#define RTE_PTYPE_L2_NONE 0
+#define RTE_PTYPE_L3_NONE 0
+#define RTE_PTYPE_L4_NONE 0
+#define RTE_PTYPE_TUNNEL_NONE 0
+#define RTE_PTYPE_INNER_L2_NONE 0
+#define RTE_PTYPE_INNER_L3_NONE 0
+#define RTE_PTYPE_INNER_L4_NONE 0
+
+static u32 txgbe_ptype_lookup[TXGBE_PTID_MAX] __rte_cache_aligned = {
+ /* L2:0-3 L3:4-7 L4:8-11 TUN:12-15 EL2:16-19 EL3:20-23 EL2:24-27 */
+ /* L2: ETH */
+ TPTE(0x11, ETHER, NONE, NONE, NONE, NONE, NONE, NONE),
+ TPTE(0x12, ETHER_TIMESYNC, NONE, NONE, NONE, NONE, NONE, NONE),
+ TPTE(0x13, ETHER_FIP, NONE, NONE, NONE, NONE, NONE, NONE),
+ TPTE(0x14, ETHER_LLDP, NONE, NONE, NONE, NONE, NONE, NONE),
+ TPTE(0x15, ETHER_CNM, NONE, NONE, NONE, NONE, NONE, NONE),
+ TPTE(0x16, ETHER_EAPOL, NONE, NONE, NONE, NONE, NONE, NONE),
+ TPTE(0x17, ETHER_ARP, NONE, NONE, NONE, NONE, NONE, NONE),
+ /* L2: Ethertype Filter */
+ TPTE(0x18, ETHER_FILTER, NONE, NONE, NONE, NONE, NONE, NONE),
+ TPTE(0x19, ETHER_FILTER, NONE, NONE, NONE, NONE, NONE, NONE),
+ TPTE(0x1A, ETHER_FILTER, NONE, NONE, NONE, NONE, NONE, NONE),
+ TPTE(0x1B, ETHER_FILTER, NONE, NONE, NONE, NONE, NONE, NONE),
+ TPTE(0x1C, ETHER_FILTER, NONE, NONE, NONE, NONE, NONE, NONE),
+ TPTE(0x1D, ETHER_FILTER, NONE, NONE, NONE, NONE, NONE, NONE),
+ TPTE(0x1E, ETHER_FILTER, NONE, NONE, NONE, NONE, NONE, NONE),
+ TPTE(0x1F, ETHER_FILTER, NONE, NONE, NONE, NONE, NONE, NONE),
+ /* L3: IP */
+ TPTE(0x21, ETHER, IPV4, FRAG, NONE, NONE, NONE, NONE),
+ TPTE(0x22, ETHER, IPV4, NONFRAG, NONE, NONE, NONE, NONE),
+ TPTE(0x23, ETHER, IPV4, UDP, NONE, NONE, NONE, NONE),
+ TPTE(0x24, ETHER, IPV4, TCP, NONE, NONE, NONE, NONE),
+ TPTE(0x25, ETHER, IPV4, SCTP, NONE, NONE, NONE, NONE),
+ TPTE(0x29, ETHER, IPV6, FRAG, NONE, NONE, NONE, NONE),
+ TPTE(0x2A, ETHER, IPV6, NONFRAG, NONE, NONE, NONE, NONE),
+ TPTE(0x2B, ETHER, IPV6, UDP, NONE, NONE, NONE, NONE),
+ TPTE(0x2C, ETHER, IPV6, TCP, NONE, NONE, NONE, NONE),
+ TPTE(0x2D, ETHER, IPV6, SCTP, NONE, NONE, NONE, NONE),
+ /* L2: FCoE */
+ TPTE(0x30, ETHER_FCOE, NONE, NONE, NONE, NONE, NONE, NONE),
+ TPTE(0x31, ETHER_FCOE, NONE, NONE, NONE, NONE, NONE, NONE),
+ TPTE(0x32, ETHER_FCOE, NONE, NONE, NONE, NONE, NONE, NONE),
+ TPTE(0x33, ETHER_FCOE, NONE, NONE, NONE, NONE, NONE, NONE),
+ TPTE(0x34, ETHER_FCOE, NONE, NONE, NONE, NONE, NONE, NONE),
+ TPTE(0x35, ETHER_FCOE, NONE, NONE, NONE, NONE, NONE, NONE),
+ TPTE(0x36, ETHER_FCOE, NONE, NONE, NONE, NONE, NONE, NONE),
+ TPTE(0x37, ETHER_FCOE, NONE, NONE, NONE, NONE, NONE, NONE),
+ TPTE(0x38, ETHER_FCOE, NONE, NONE, NONE, NONE, NONE, NONE),
+ TPTE(0x39, ETHER_FCOE, NONE, NONE, NONE, NONE, NONE, NONE),
+ /* IPv4 -> IPv4/IPv6 */
+ TPTE(0x81, ETHER, IPV4, NONE, IP, NONE, IPV4, FRAG),
+ TPTE(0x82, ETHER, IPV4, NONE, IP, NONE, IPV4, NONFRAG),
+ TPTE(0x83, ETHER, IPV4, NONE, IP, NONE, IPV4, UDP),
+ TPTE(0x84, ETHER, IPV4, NONE, IP, NONE, IPV4, TCP),
+ TPTE(0x85, ETHER, IPV4, NONE, IP, NONE, IPV4, SCTP),
+ TPTE(0x89, ETHER, IPV4, NONE, IP, NONE, IPV6, FRAG),
+ TPTE(0x8A, ETHER, IPV4, NONE, IP, NONE, IPV6, NONFRAG),
+ TPTE(0x8B, ETHER, IPV4, NONE, IP, NONE, IPV6, UDP),
+ TPTE(0x8C, ETHER, IPV4, NONE, IP, NONE, IPV6, TCP),
+ TPTE(0x8D, ETHER, IPV4, NONE, IP, NONE, IPV6, SCTP),
+ /* IPv4 -> GRE/Teredo/VXLAN -> NONE/IPv4/IPv6 */
+ TPTE(0x90, ETHER, IPV4, NONE, GRENAT, NONE, NONE, NONE),
+ TPTE(0x91, ETHER, IPV4, NONE, GRENAT, NONE, IPV4, FRAG),
+ TPTE(0x92, ETHER, IPV4, NONE, GRENAT, NONE, IPV4, NONFRAG),
+ TPTE(0x93, ETHER, IPV4, NONE, GRENAT, NONE, IPV4, UDP),
+ TPTE(0x94, ETHER, IPV4, NONE, GRENAT, NONE, IPV4, TCP),
+ TPTE(0x95, ETHER, IPV4, NONE, GRENAT, NONE, IPV4, SCTP),
+ TPTE(0x99, ETHER, IPV4, NONE, GRENAT, NONE, IPV6, FRAG),
+ TPTE(0x9A, ETHER, IPV4, NONE, GRENAT, NONE, IPV6, NONFRAG),
+ TPTE(0x9B, ETHER, IPV4, NONE, GRENAT, NONE, IPV6, UDP),
+ TPTE(0x9C, ETHER, IPV4, NONE, GRENAT, NONE, IPV6, TCP),
+ TPTE(0x9D, ETHER, IPV4, NONE, GRENAT, NONE, IPV6, SCTP),
+ /* IPv4 -> GRE/Teredo/VXLAN -> MAC -> NONE/IPv4/IPv6 */
+ TPTE(0xA0, ETHER, IPV4, NONE, GRENAT, ETHER, NONE, NONE),
+ TPTE(0xA1, ETHER, IPV4, NONE, GRENAT, ETHER, IPV4, FRAG),
+ TPTE(0xA2, ETHER, IPV4, NONE, GRENAT, ETHER, IPV4, NONFRAG),
+ TPTE(0xA3, ETHER, IPV4, NONE, GRENAT, ETHER, IPV4, UDP),
+ TPTE(0xA4, ETHER, IPV4, NONE, GRENAT, ETHER, IPV4, TCP),
+ TPTE(0xA5, ETHER, IPV4, NONE, GRENAT, ETHER, IPV4, SCTP),
+ TPTE(0xA9, ETHER, IPV4, NONE, GRENAT, ETHER, IPV6, FRAG),
+ TPTE(0xAA, ETHER, IPV4, NONE, GRENAT, ETHER, IPV6, NONFRAG),
+ TPTE(0xAB, ETHER, IPV4, NONE, GRENAT, ETHER, IPV6, UDP),
+ TPTE(0xAC, ETHER, IPV4, NONE, GRENAT, ETHER, IPV6, TCP),
+ TPTE(0xAD, ETHER, IPV4, NONE, GRENAT, ETHER, IPV6, SCTP),
+ /* IPv4 -> GRE/Teredo/VXLAN -> MAC+VLAN -> NONE/IPv4/IPv6 */
+ TPTE(0xB0, ETHER, IPV4, NONE, GRENAT, ETHER_VLAN, NONE, NONE),
+ TPTE(0xB1, ETHER, IPV4, NONE, GRENAT, ETHER_VLAN, IPV4, FRAG),
+ TPTE(0xB2, ETHER, IPV4, NONE, GRENAT, ETHER_VLAN, IPV4, NONFRAG),
+ TPTE(0xB3, ETHER, IPV4, NONE, GRENAT, ETHER_VLAN, IPV4, UDP),
+ TPTE(0xB4, ETHER, IPV4, NONE, GRENAT, ETHER_VLAN, IPV4, TCP),
+ TPTE(0xB5, ETHER, IPV4, NONE, GRENAT, ETHER_VLAN, IPV4, SCTP),
+ TPTE(0xB9, ETHER, IPV4, NONE, GRENAT, ETHER_VLAN, IPV6, FRAG),
+ TPTE(0xBA, ETHER, IPV4, NONE, GRENAT, ETHER_VLAN, IPV6, NONFRAG),
+ TPTE(0xBB, ETHER, IPV4, NONE, GRENAT, ETHER_VLAN, IPV6, UDP),
+ TPTE(0xBC, ETHER, IPV4, NONE, GRENAT, ETHER_VLAN, IPV6, TCP),
+ TPTE(0xBD, ETHER, IPV4, NONE, GRENAT, ETHER_VLAN, IPV6, SCTP),
+ /* IPv6 -> IPv4/IPv6 */
+ TPTE(0xC1, ETHER, IPV6, NONE, IP, NONE, IPV4, FRAG),
+ TPTE(0xC2, ETHER, IPV6, NONE, IP, NONE, IPV4, NONFRAG),
+ TPTE(0xC3, ETHER, IPV6, NONE, IP, NONE, IPV4, UDP),
+ TPTE(0xC4, ETHER, IPV6, NONE, IP, NONE, IPV4, TCP),
+ TPTE(0xC5, ETHER, IPV6, NONE, IP, NONE, IPV4, SCTP),
+ TPTE(0xC9, ETHER, IPV6, NONE, IP, NONE, IPV6, FRAG),
+ TPTE(0xCA, ETHER, IPV6, NONE, IP, NONE, IPV6, NONFRAG),
+ TPTE(0xCB, ETHER, IPV6, NONE, IP, NONE, IPV6, UDP),
+ TPTE(0xCC, ETHER, IPV6, NONE, IP, NONE, IPV6, TCP),
+ TPTE(0xCD, ETHER, IPV6, NONE, IP, NONE, IPV6, SCTP),
+ /* IPv6 -> GRE/Teredo/VXLAN -> NONE/IPv4/IPv6 */
+ TPTE(0xD0, ETHER, IPV6, NONE, GRENAT, NONE, NONE, NONE),
+ TPTE(0xD1, ETHER, IPV6, NONE, GRENAT, NONE, IPV4, FRAG),
+ TPTE(0xD2, ETHER, IPV6, NONE, GRENAT, NONE, IPV4, NONFRAG),
+ TPTE(0xD3, ETHER, IPV6, NONE, GRENAT, NONE, IPV4, UDP),
+ TPTE(0xD4, ETHER, IPV6, NONE, GRENAT, NONE, IPV4, TCP),
+ TPTE(0xD5, ETHER, IPV6, NONE, GRENAT, NONE, IPV4, SCTP),
+ TPTE(0xD9, ETHER, IPV6, NONE, GRENAT, NONE, IPV6, FRAG),
+ TPTE(0xDA, ETHER, IPV6, NONE, GRENAT, NONE, IPV6, NONFRAG),
+ TPTE(0xDB, ETHER, IPV6, NONE, GRENAT, NONE, IPV6, UDP),
+ TPTE(0xDC, ETHER, IPV6, NONE, GRENAT, NONE, IPV6, TCP),
+ TPTE(0xDD, ETHER, IPV6, NONE, GRENAT, NONE, IPV6, SCTP),
+ /* IPv6 -> GRE/Teredo/VXLAN -> MAC -> NONE/IPv4/IPv6 */
+ TPTE(0xE0, ETHER, IPV6, NONE, GRENAT, ETHER, NONE, NONE),
+ TPTE(0xE1, ETHER, IPV6, NONE, GRENAT, ETHER, IPV4, FRAG),
+ TPTE(0xE2, ETHER, IPV6, NONE, GRENAT, ETHER, IPV4, NONFRAG),
+ TPTE(0xE3, ETHER, IPV6, NONE, GRENAT, ETHER, IPV4, UDP),
+ TPTE(0xE4, ETHER, IPV6, NONE, GRENAT, ETHER, IPV4, TCP),
+ TPTE(0xE5, ETHER, IPV6, NONE, GRENAT, ETHER, IPV4, SCTP),
+ TPTE(0xE9, ETHER, IPV6, NONE, GRENAT, ETHER, IPV6, FRAG),
+ TPTE(0xEA, ETHER, IPV6, NONE, GRENAT, ETHER, IPV6, NONFRAG),
+ TPTE(0xEB, ETHER, IPV6, NONE, GRENAT, ETHER, IPV6, UDP),
+ TPTE(0xEC, ETHER, IPV6, NONE, GRENAT, ETHER, IPV6, TCP),
+ TPTE(0xED, ETHER, IPV6, NONE, GRENAT, ETHER, IPV6, SCTP),
+ /* IPv6 -> GRE/Teredo/VXLAN -> MAC+VLAN -> NONE/IPv4/IPv6 */
+ TPTE(0xF0, ETHER, IPV6, NONE, GRENAT, ETHER_VLAN, NONE, NONE),
+ TPTE(0xF1, ETHER, IPV6, NONE, GRENAT, ETHER_VLAN, IPV4, FRAG),
+ TPTE(0xF2, ETHER, IPV6, NONE, GRENAT, ETHER_VLAN, IPV4, NONFRAG),
+ TPTE(0xF3, ETHER, IPV6, NONE, GRENAT, ETHER_VLAN, IPV4, UDP),
+ TPTE(0xF4, ETHER, IPV6, NONE, GRENAT, ETHER_VLAN, IPV4, TCP),
+ TPTE(0xF5, ETHER, IPV6, NONE, GRENAT, ETHER_VLAN, IPV4, SCTP),
+ TPTE(0xF9, ETHER, IPV6, NONE, GRENAT, ETHER_VLAN, IPV6, FRAG),
+ TPTE(0xFA, ETHER, IPV6, NONE, GRENAT, ETHER_VLAN, IPV6, NONFRAG),
+ TPTE(0xFB, ETHER, IPV6, NONE, GRENAT, ETHER_VLAN, IPV6, UDP),
+ TPTE(0xFC, ETHER, IPV6, NONE, GRENAT, ETHER_VLAN, IPV6, TCP),
+ TPTE(0xFD, ETHER, IPV6, NONE, GRENAT, ETHER_VLAN, IPV6, SCTP),
+};
+
+u32 *txgbe_get_supported_ptypes(void)
+{
+ static u32 ptypes[] = {
+ /* For non-vec functions,
+ * refers to txgbe_rxd_pkt_info_to_pkt_type();
+ * for vec functions,
+ * refers to _recv_raw_pkts_vec().
+ */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_SCTP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_TUNNEL_IP,
+ RTE_PTYPE_INNER_L3_IPV6,
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ return ptypes;
+}
+
+static inline u8
+txgbe_encode_ptype_fcoe(u32 ptype)
+{
+ u8 ptid;
+
+ UNREFERENCED_PARAMETER(ptype);
+ ptid = TXGBE_PTID_PKT_FCOE;
+
+ return ptid;
+}
+
+static inline u8
+txgbe_encode_ptype_mac(u32 ptype)
+{
+ u8 ptid;
+
+ ptid = TXGBE_PTID_PKT_MAC;
+
+ switch (ptype & RTE_PTYPE_L2_MASK) {
+ case RTE_PTYPE_L2_ETHER_FCOE:
+ ptid = txgbe_encode_ptype_fcoe(ptype);
+ break;
+ case RTE_PTYPE_UNKNOWN:
+ break;
+ case RTE_PTYPE_L2_ETHER_TIMESYNC:
+ ptid |= TXGBE_PTID_TYP_TS;
+ break;
+ case RTE_PTYPE_L2_ETHER_ARP:
+ ptid |= TXGBE_PTID_TYP_ARP;
+ break;
+ case RTE_PTYPE_L2_ETHER_LLDP:
+ ptid |= TXGBE_PTID_TYP_LLDP;
+ break;
+ default:
+ ptid |= TXGBE_PTID_TYP_MAC;
+ break;
+ }
+
+ return ptid;
+}
+
+static inline u8
+txgbe_encode_ptype_ip(u32 ptype)
+{
+ u8 ptid;
+
+ ptid = TXGBE_PTID_PKT_IP;
+
+ switch (ptype & RTE_PTYPE_L3_MASK) {
+ case RTE_PTYPE_L3_IPV4:
+ case RTE_PTYPE_L3_IPV4_EXT:
+ case RTE_PTYPE_L3_IPV4_EXT_UNKNOWN:
+ break;
+ case RTE_PTYPE_L3_IPV6:
+ case RTE_PTYPE_L3_IPV6_EXT:
+ case RTE_PTYPE_L3_IPV6_EXT_UNKNOWN:
+ ptid |= TXGBE_PTID_PKT_IPV6;
+ break;
+ default:
+ return txgbe_encode_ptype_mac(ptype);
+ }
+
+ switch (ptype & RTE_PTYPE_L4_MASK) {
+ case RTE_PTYPE_L4_TCP:
+ ptid |= TXGBE_PTID_TYP_TCP;
+ break;
+ case RTE_PTYPE_L4_UDP:
+ ptid |= TXGBE_PTID_TYP_UDP;
+ break;
+ case RTE_PTYPE_L4_SCTP:
+ ptid |= TXGBE_PTID_TYP_SCTP;
+ break;
+ case RTE_PTYPE_L4_FRAG:
+ ptid |= TXGBE_PTID_TYP_IPFRAG;
+ break;
+ default:
+ ptid |= TXGBE_PTID_TYP_IPDATA;
+ break;
+ }
+
+ return ptid;
+}
+
+static inline u8
+txgbe_encode_ptype_tunnel(u32 ptype)
+{
+ u8 ptid;
+
+ ptid = TXGBE_PTID_PKT_TUN;
+
+ switch (ptype & RTE_PTYPE_L3_MASK) {
+ case RTE_PTYPE_L3_IPV4:
+ case RTE_PTYPE_L3_IPV4_EXT:
+ case RTE_PTYPE_L3_IPV4_EXT_UNKNOWN:
+ break;
+ case RTE_PTYPE_L3_IPV6:
+ case RTE_PTYPE_L3_IPV6_EXT:
+ case RTE_PTYPE_L3_IPV6_EXT_UNKNOWN:
+ ptid |= TXGBE_PTID_TUN_IPV6;
+ break;
+ default:
+ return txgbe_encode_ptype_ip(ptype);
+ }
+
+ switch (ptype & RTE_PTYPE_TUNNEL_MASK) {
+ case RTE_PTYPE_TUNNEL_IP:
+ ptid |= TXGBE_PTID_TUN_EI;
+ break;
+ case RTE_PTYPE_TUNNEL_GRE:
+ ptid |= TXGBE_PTID_TUN_EIG;
+ break;
+ case RTE_PTYPE_TUNNEL_VXLAN:
+ case RTE_PTYPE_TUNNEL_VXLAN_GPE:
+ case RTE_PTYPE_TUNNEL_NVGRE:
+ case RTE_PTYPE_TUNNEL_GENEVE:
+ case RTE_PTYPE_TUNNEL_GRENAT:
+ break;
+ default:
+ return ptid;
+ }
+
+ switch (ptype & RTE_PTYPE_INNER_L2_MASK) {
+ case RTE_PTYPE_INNER_L2_ETHER:
+ ptid |= TXGBE_PTID_TUN_EIGM;
+ break;
+ case RTE_PTYPE_INNER_L2_ETHER_VLAN:
+ ptid |= TXGBE_PTID_TUN_EIGMV;
+ break;
+ case RTE_PTYPE_INNER_L2_ETHER_QINQ:
+ ptid |= TXGBE_PTID_TUN_EIGMV;
+ return ptid;
+ default:
+ break;
+ }
+
+ switch (ptype & RTE_PTYPE_INNER_L3_MASK) {
+ case RTE_PTYPE_INNER_L3_IPV4:
+ case RTE_PTYPE_INNER_L3_IPV4_EXT:
+ case RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN:
+ break;
+ case RTE_PTYPE_INNER_L3_IPV6:
+ case RTE_PTYPE_INNER_L3_IPV6_EXT:
+ case RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN:
+ ptid |= TXGBE_PTID_PKT_IPV6;
+ break;
+ default:
+ return ptid;
+ }
+
+ switch (ptype & RTE_PTYPE_INNER_L4_MASK) {
+ case RTE_PTYPE_INNER_L4_TCP:
+ ptid |= TXGBE_PTID_TYP_TCP;
+ break;
+ case RTE_PTYPE_INNER_L4_UDP:
+ ptid |= TXGBE_PTID_TYP_UDP;
+ break;
+ case RTE_PTYPE_INNER_L4_SCTP:
+ ptid |= TXGBE_PTID_TYP_SCTP;
+ break;
+ case RTE_PTYPE_INNER_L4_FRAG:
+ ptid |= TXGBE_PTID_TYP_IPFRAG;
+ break;
+ default:
+ ptid |= TXGBE_PTID_TYP_IPDATA;
+ break;
+ }
+
+ return ptid;
+}
+
+u32 txgbe_decode_ptype(u8 ptid)
+{
+ if (-1 != txgbe_etflt_id(ptid))
+ return RTE_PTYPE_UNKNOWN;
+
+ return txgbe_ptype_lookup[ptid];
+}
+
+u8 txgbe_encode_ptype(u32 ptype)
+{
+ u8 ptid = 0;
+
+ if (ptype & RTE_PTYPE_TUNNEL_MASK) {
+ ptid = txgbe_encode_ptype_tunnel(ptype);
+ } else if (ptype & RTE_PTYPE_L3_MASK) {
+ ptid = txgbe_encode_ptype_ip(ptype);
+ } else if (ptype & RTE_PTYPE_L2_MASK) {
+ ptid = txgbe_encode_ptype_mac(ptype);
+ } else {
+ ptid = TXGBE_PTID_NULL;
+ }
+
+ return ptid;
+}
+
+/**
+ * Use 2 different table for normal packet and tunnel packet
+ * to save the space.
+ */
+const u32
+txgbe_ptype_table[TXGBE_PTID_MAX] __rte_cache_aligned = {
+ [TXGBE_PT_ETHER] = RTE_PTYPE_L2_ETHER,
+ [TXGBE_PT_IPV4] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4,
+ [TXGBE_PT_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+ [TXGBE_PT_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+ [TXGBE_PT_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
+ [TXGBE_PT_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT,
+ [TXGBE_PT_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
+ [TXGBE_PT_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
+ [TXGBE_PT_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
+ [TXGBE_PT_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6,
+ [TXGBE_PT_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+ [TXGBE_PT_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+ [TXGBE_PT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP,
+ [TXGBE_PT_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT,
+ [TXGBE_PT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
+ [TXGBE_PT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+ [TXGBE_PT_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP,
+ [TXGBE_PT_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6,
+ [TXGBE_PT_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+ [TXGBE_PT_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+ [TXGBE_PT_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
+ [TXGBE_PT_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6,
+ [TXGBE_PT_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+ [TXGBE_PT_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+ [TXGBE_PT_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
+ [TXGBE_PT_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [TXGBE_PT_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [TXGBE_PT_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+ [TXGBE_PT_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
+ [TXGBE_PT_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [TXGBE_PT_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [TXGBE_PT_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+ [TXGBE_PT_IPV4_EXT_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
+};
+
+const u32
+txgbe_ptype_table_tn[TXGBE_PTID_MAX] __rte_cache_aligned = {
+ [TXGBE_PT_NVGRE] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER,
+ [TXGBE_PT_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [TXGBE_PT_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT,
+ [TXGBE_PT_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6,
+ [TXGBE_PT_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [TXGBE_PT_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [TXGBE_PT_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [TXGBE_PT_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_TCP,
+ [TXGBE_PT_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_TCP,
+ [TXGBE_PT_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [TXGBE_PT_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
+ RTE_PTYPE_INNER_L4_TCP,
+ [TXGBE_PT_NVGRE_IPV4_IPV6_EXT_TCP] =
+ RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [TXGBE_PT_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_UDP,
+ [TXGBE_PT_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_UDP,
+ [TXGBE_PT_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [TXGBE_PT_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [TXGBE_PT_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
+ RTE_PTYPE_INNER_L4_UDP,
+ [TXGBE_PT_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [TXGBE_PT_NVGRE_IPV4_IPV6_EXT_UDP] =
+ RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [TXGBE_PT_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [TXGBE_PT_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [TXGBE_PT_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_TCP,
+ [TXGBE_PT_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_UDP,
+
+ [TXGBE_PT_VXLAN] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER,
+ [TXGBE_PT_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [TXGBE_PT_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT,
+ [TXGBE_PT_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6,
+ [TXGBE_PT_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [TXGBE_PT_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [TXGBE_PT_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [TXGBE_PT_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP,
+ [TXGBE_PT_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+ [TXGBE_PT_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [TXGBE_PT_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [TXGBE_PT_VXLAN_IPV4_IPV6_EXT_TCP] =
+ RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [TXGBE_PT_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP,
+ [TXGBE_PT_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+ [TXGBE_PT_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
+ [TXGBE_PT_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4,
+ [TXGBE_PT_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+ [TXGBE_PT_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
+ [TXGBE_PT_VXLAN_IPV4_IPV6_EXT_UDP] =
+ RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
+ [TXGBE_PT_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP,
+ [TXGBE_PT_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP,
+ [TXGBE_PT_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [TXGBE_PT_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
+};
+
new file mode 100644
@@ -0,0 +1,351 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef _TXGBE_PTYPE_H_
+#define _TXGBE_PTYPE_H_
+
+/**
+ * PTID(Packet Type Identifier, 8bits)
+ * - Bit 3:0 detailed types.
+ * - Bit 5:4 basic types.
+ * - Bit 7:6 tunnel types.
+ **/
+#define TXGBE_PTID_NULL 0
+#define TXGBE_PTID_MAX 256
+#define TXGBE_PTID_MASK 0xFF
+#define TXGBE_PTID_MASK_TUNNEL 0x7F
+
+/* TUN */
+#define TXGBE_PTID_TUN_IPV6 0x40
+#define TXGBE_PTID_TUN_EI 0x00 /* IP */
+#define TXGBE_PTID_TUN_EIG 0x10 /* IP+GRE */
+#define TXGBE_PTID_TUN_EIGM 0x20 /* IP+GRE+MAC */
+#define TXGBE_PTID_TUN_EIGMV 0x30 /* IP+GRE+MAC+VLAN */
+
+/* PKT for !TUN */
+#define TXGBE_PTID_PKT_TUN (0x80)
+#define TXGBE_PTID_PKT_MAC (0x10)
+#define TXGBE_PTID_PKT_IP (0x20)
+#define TXGBE_PTID_PKT_FCOE (0x30)
+
+/* TYP for PKT=mac */
+#define TXGBE_PTID_TYP_MAC (0x01)
+#define TXGBE_PTID_TYP_TS (0x02) /* time sync */
+#define TXGBE_PTID_TYP_FIP (0x03)
+#define TXGBE_PTID_TYP_LLDP (0x04)
+#define TXGBE_PTID_TYP_CNM (0x05)
+#define TXGBE_PTID_TYP_EAPOL (0x06)
+#define TXGBE_PTID_TYP_ARP (0x07)
+#define TXGBE_PTID_TYP_ETF (0x08)
+
+/* TYP for PKT=ip */
+#define TXGBE_PTID_PKT_IPV6 (0x08)
+#define TXGBE_PTID_TYP_IPFRAG (0x01)
+#define TXGBE_PTID_TYP_IPDATA (0x02)
+#define TXGBE_PTID_TYP_UDP (0x03)
+#define TXGBE_PTID_TYP_TCP (0x04)
+#define TXGBE_PTID_TYP_SCTP (0x05)
+
+/* TYP for PKT=fcoe */
+#define TXGBE_PTID_PKT_VFT (0x08)
+#define TXGBE_PTID_TYP_FCOE (0x00)
+#define TXGBE_PTID_TYP_FCDATA (0x01)
+#define TXGBE_PTID_TYP_FCRDY (0x02)
+#define TXGBE_PTID_TYP_FCRSP (0x03)
+#define TXGBE_PTID_TYP_FCOTHER (0x04)
+
+/* packet type non-ip values */
+enum txgbe_l2_ptids {
+ TXGBE_PTID_L2_ABORTED = (TXGBE_PTID_PKT_MAC),
+ TXGBE_PTID_L2_MAC = (TXGBE_PTID_PKT_MAC | TXGBE_PTID_TYP_MAC),
+ TXGBE_PTID_L2_TMST = (TXGBE_PTID_PKT_MAC | TXGBE_PTID_TYP_TS),
+ TXGBE_PTID_L2_FIP = (TXGBE_PTID_PKT_MAC | TXGBE_PTID_TYP_FIP),
+ TXGBE_PTID_L2_LLDP = (TXGBE_PTID_PKT_MAC | TXGBE_PTID_TYP_LLDP),
+ TXGBE_PTID_L2_CNM = (TXGBE_PTID_PKT_MAC | TXGBE_PTID_TYP_CNM),
+ TXGBE_PTID_L2_EAPOL = (TXGBE_PTID_PKT_MAC | TXGBE_PTID_TYP_EAPOL),
+ TXGBE_PTID_L2_ARP = (TXGBE_PTID_PKT_MAC | TXGBE_PTID_TYP_ARP),
+
+ TXGBE_PTID_L2_IPV4_FRAG = (TXGBE_PTID_PKT_IP | TXGBE_PTID_TYP_IPFRAG),
+ TXGBE_PTID_L2_IPV4 = (TXGBE_PTID_PKT_IP | TXGBE_PTID_TYP_IPDATA),
+ TXGBE_PTID_L2_IPV4_UDP = (TXGBE_PTID_PKT_IP | TXGBE_PTID_TYP_UDP),
+ TXGBE_PTID_L2_IPV4_TCP = (TXGBE_PTID_PKT_IP | TXGBE_PTID_TYP_TCP),
+ TXGBE_PTID_L2_IPV4_SCTP = (TXGBE_PTID_PKT_IP | TXGBE_PTID_TYP_SCTP),
+ TXGBE_PTID_L2_IPV6_FRAG = (TXGBE_PTID_PKT_IP | TXGBE_PTID_PKT_IPV6 |
+ TXGBE_PTID_TYP_IPFRAG),
+ TXGBE_PTID_L2_IPV6 = (TXGBE_PTID_PKT_IP | TXGBE_PTID_PKT_IPV6 |
+ TXGBE_PTID_TYP_IPDATA),
+ TXGBE_PTID_L2_IPV6_UDP = (TXGBE_PTID_PKT_IP | TXGBE_PTID_PKT_IPV6 |
+ TXGBE_PTID_TYP_UDP),
+ TXGBE_PTID_L2_IPV6_TCP = (TXGBE_PTID_PKT_IP | TXGBE_PTID_PKT_IPV6 |
+ TXGBE_PTID_TYP_TCP),
+ TXGBE_PTID_L2_IPV6_SCTP = (TXGBE_PTID_PKT_IP | TXGBE_PTID_PKT_IPV6 |
+ TXGBE_PTID_TYP_SCTP),
+
+ TXGBE_PTID_L2_FCOE = (TXGBE_PTID_PKT_FCOE |
+ TXGBE_PTID_TYP_FCOE),
+ TXGBE_PTID_L2_FCOE_FCDATA = (TXGBE_PTID_PKT_FCOE |
+ TXGBE_PTID_TYP_FCDATA),
+ TXGBE_PTID_L2_FCOE_FCRDY = (TXGBE_PTID_PKT_FCOE |
+ TXGBE_PTID_TYP_FCRDY),
+ TXGBE_PTID_L2_FCOE_FCRSP = (TXGBE_PTID_PKT_FCOE |
+ TXGBE_PTID_TYP_FCRSP),
+ TXGBE_PTID_L2_FCOE_FCOTHER = (TXGBE_PTID_PKT_FCOE |
+ TXGBE_PTID_TYP_FCOTHER),
+ TXGBE_PTID_L2_FCOE_VFT = (TXGBE_PTID_PKT_FCOE |
+ TXGBE_PTID_PKT_VFT),
+ TXGBE_PTID_L2_FCOE_VFT_FCDATA = (TXGBE_PTID_PKT_FCOE |
+ TXGBE_PTID_PKT_VFT | TXGBE_PTID_TYP_FCDATA),
+ TXGBE_PTID_L2_FCOE_VFT_FCRDY = (TXGBE_PTID_PKT_FCOE |
+ TXGBE_PTID_PKT_VFT | TXGBE_PTID_TYP_FCRDY),
+ TXGBE_PTID_L2_FCOE_VFT_FCRSP = (TXGBE_PTID_PKT_FCOE |
+ TXGBE_PTID_PKT_VFT | TXGBE_PTID_TYP_FCRSP),
+ TXGBE_PTID_L2_FCOE_VFT_FCOTHER = (TXGBE_PTID_PKT_FCOE |
+ TXGBE_PTID_PKT_VFT | TXGBE_PTID_TYP_FCOTHER),
+
+ TXGBE_PTID_L2_TUN4_MAC = (TXGBE_PTID_PKT_TUN |
+ TXGBE_PTID_TUN_EIGM),
+ TXGBE_PTID_L2_TUN6_MAC = (TXGBE_PTID_PKT_TUN |
+ TXGBE_PTID_TUN_IPV6 | TXGBE_PTID_TUN_EIGM),
+};
+
+
+/*
+ * PTYPE(Packet Type, 32bits)
+ * - Bit 3:0 is for L2 types.
+ * - Bit 7:4 is for L3 or outer L3 (for tunneling case) types.
+ * - Bit 11:8 is for L4 or outer L4 (for tunneling case) types.
+ * - Bit 15:12 is for tunnel types.
+ * - Bit 19:16 is for inner L2 types.
+ * - Bit 23:20 is for inner L3 types.
+ * - Bit 27:24 is for inner L4 types.
+ * - Bit 31:28 is reserved.
+ * please ref to rte_mbuf.h: rte_mbuf.packet_type
+ */
+struct rte_txgbe_ptype {
+ u32 l2:4; /* outer mac */
+ u32 l3:4; /* outer internet protocol */
+ u32 l4:4; /* outer transport protocol */
+ u32 tun:4; /* tunnel protocol */
+
+ u32 el2:4; /* inner mac */
+ u32 el3:4; /* inner internet protocol */
+ u32 el4:4; /* inner transport protocol */
+ u32 rsv:3;
+ u32 known:1;
+};
+
+#ifndef RTE_PTYPE_UNKNOWN
+#define RTE_PTYPE_UNKNOWN 0x00000000
+#define RTE_PTYPE_L2_ETHER 0x00000001
+#define RTE_PTYPE_L2_ETHER_TIMESYNC 0x00000002
+#define RTE_PTYPE_L2_ETHER_ARP 0x00000003
+#define RTE_PTYPE_L2_ETHER_LLDP 0x00000004
+#define RTE_PTYPE_L2_ETHER_NSH 0x00000005
+#define RTE_PTYPE_L2_ETHER_FCOE 0x00000009
+#define RTE_PTYPE_L3_IPV4 0x00000010
+#define RTE_PTYPE_L3_IPV4_EXT 0x00000030
+#define RTE_PTYPE_L3_IPV6 0x00000040
+#define RTE_PTYPE_L3_IPV4_EXT_UNKNOWN 0x00000090
+#define RTE_PTYPE_L3_IPV6_EXT 0x000000c0
+#define RTE_PTYPE_L3_IPV6_EXT_UNKNOWN 0x000000e0
+#define RTE_PTYPE_L4_TCP 0x00000100
+#define RTE_PTYPE_L4_UDP 0x00000200
+#define RTE_PTYPE_L4_FRAG 0x00000300
+#define RTE_PTYPE_L4_SCTP 0x00000400
+#define RTE_PTYPE_L4_ICMP 0x00000500
+#define RTE_PTYPE_L4_NONFRAG 0x00000600
+#define RTE_PTYPE_TUNNEL_IP 0x00001000
+#define RTE_PTYPE_TUNNEL_GRE 0x00002000
+#define RTE_PTYPE_TUNNEL_VXLAN 0x00003000
+#define RTE_PTYPE_TUNNEL_NVGRE 0x00004000
+#define RTE_PTYPE_TUNNEL_GENEVE 0x00005000
+#define RTE_PTYPE_TUNNEL_GRENAT 0x00006000
+#define RTE_PTYPE_INNER_L2_ETHER 0x00010000
+#define RTE_PTYPE_INNER_L2_ETHER_VLAN 0x00020000
+#define RTE_PTYPE_INNER_L3_IPV4 0x00100000
+#define RTE_PTYPE_INNER_L3_IPV4_EXT 0x00200000
+#define RTE_PTYPE_INNER_L3_IPV6 0x00300000
+#define RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN 0x00400000
+#define RTE_PTYPE_INNER_L3_IPV6_EXT 0x00500000
+#define RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN 0x00600000
+#define RTE_PTYPE_INNER_L4_TCP 0x01000000
+#define RTE_PTYPE_INNER_L4_UDP 0x02000000
+#define RTE_PTYPE_INNER_L4_FRAG 0x03000000
+#define RTE_PTYPE_INNER_L4_SCTP 0x04000000
+#define RTE_PTYPE_INNER_L4_ICMP 0x05000000
+#define RTE_PTYPE_INNER_L4_NONFRAG 0x06000000
+#endif /* !RTE_PTYPE_UNKNOWN */
+#define RTE_PTYPE_L3_IPV4u RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
+#define RTE_PTYPE_L3_IPV6u RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
+#define RTE_PTYPE_INNER_L3_IPV4u RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN
+#define RTE_PTYPE_INNER_L3_IPV6u RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN
+#define RTE_PTYPE_L2_ETHER_FIP RTE_PTYPE_L2_ETHER
+#define RTE_PTYPE_L2_ETHER_CNM RTE_PTYPE_L2_ETHER
+#define RTE_PTYPE_L2_ETHER_EAPOL RTE_PTYPE_L2_ETHER
+#define RTE_PTYPE_L2_ETHER_FILTER RTE_PTYPE_L2_ETHER
+
+u32 *txgbe_get_supported_ptypes(void);
+u32 txgbe_decode_ptype(u8 ptid);
+u8 txgbe_encode_ptype(u32 ptype);
+
+/**
+ * PT(Packet Type, 32bits)
+ * - Bit 3:0 is for L2 types.
+ * - Bit 7:4 is for L3 or outer L3 (for tunneling case) types.
+ * - Bit 11:8 is for L4 or outer L4 (for tunneling case) types.
+ * - Bit 15:12 is for tunnel types.
+ * - Bit 19:16 is for inner L2 types.
+ * - Bit 23:20 is for inner L3 types.
+ * - Bit 27:24 is for inner L4 types.
+ * - Bit 31:28 is reserved.
+ * PT is a more accurate version of PTYPE
+ **/
+#define TXGBE_PT_ETHER 0x00
+#define TXGBE_PT_IPV4 0x01
+#define TXGBE_PT_IPV4_TCP 0x11
+#define TXGBE_PT_IPV4_UDP 0x21
+#define TXGBE_PT_IPV4_SCTP 0x41
+#define TXGBE_PT_IPV4_EXT 0x03
+#define TXGBE_PT_IPV4_EXT_TCP 0x13
+#define TXGBE_PT_IPV4_EXT_UDP 0x23
+#define TXGBE_PT_IPV4_EXT_SCTP 0x43
+#define TXGBE_PT_IPV6 0x04
+#define TXGBE_PT_IPV6_TCP 0x14
+#define TXGBE_PT_IPV6_UDP 0x24
+#define TXGBE_PT_IPV6_SCTP 0x44
+#define TXGBE_PT_IPV6_EXT 0x0C
+#define TXGBE_PT_IPV6_EXT_TCP 0x1C
+#define TXGBE_PT_IPV6_EXT_UDP 0x2C
+#define TXGBE_PT_IPV6_EXT_SCTP 0x4C
+#define TXGBE_PT_IPV4_IPV6 0x05
+#define TXGBE_PT_IPV4_IPV6_TCP 0x15
+#define TXGBE_PT_IPV4_IPV6_UDP 0x25
+#define TXGBE_PT_IPV4_IPV6_SCTP 0x45
+#define TXGBE_PT_IPV4_EXT_IPV6 0x07
+#define TXGBE_PT_IPV4_EXT_IPV6_TCP 0x17
+#define TXGBE_PT_IPV4_EXT_IPV6_UDP 0x27
+#define TXGBE_PT_IPV4_EXT_IPV6_SCTP 0x47
+#define TXGBE_PT_IPV4_IPV6_EXT 0x0D
+#define TXGBE_PT_IPV4_IPV6_EXT_TCP 0x1D
+#define TXGBE_PT_IPV4_IPV6_EXT_UDP 0x2D
+#define TXGBE_PT_IPV4_IPV6_EXT_SCTP 0x4D
+#define TXGBE_PT_IPV4_EXT_IPV6_EXT 0x0F
+#define TXGBE_PT_IPV4_EXT_IPV6_EXT_TCP 0x1F
+#define TXGBE_PT_IPV4_EXT_IPV6_EXT_UDP 0x2F
+#define TXGBE_PT_IPV4_EXT_IPV6_EXT_SCTP 0x4F
+
+#define TXGBE_PT_NVGRE 0x00
+#define TXGBE_PT_NVGRE_IPV4 0x01
+#define TXGBE_PT_NVGRE_IPV4_TCP 0x11
+#define TXGBE_PT_NVGRE_IPV4_UDP 0x21
+#define TXGBE_PT_NVGRE_IPV4_SCTP 0x41
+#define TXGBE_PT_NVGRE_IPV4_EXT 0x03
+#define TXGBE_PT_NVGRE_IPV4_EXT_TCP 0x13
+#define TXGBE_PT_NVGRE_IPV4_EXT_UDP 0x23
+#define TXGBE_PT_NVGRE_IPV4_EXT_SCTP 0x43
+#define TXGBE_PT_NVGRE_IPV6 0x04
+#define TXGBE_PT_NVGRE_IPV6_TCP 0x14
+#define TXGBE_PT_NVGRE_IPV6_UDP 0x24
+#define TXGBE_PT_NVGRE_IPV6_SCTP 0x44
+#define TXGBE_PT_NVGRE_IPV6_EXT 0x0C
+#define TXGBE_PT_NVGRE_IPV6_EXT_TCP 0x1C
+#define TXGBE_PT_NVGRE_IPV6_EXT_UDP 0x2C
+#define TXGBE_PT_NVGRE_IPV6_EXT_SCTP 0x4C
+#define TXGBE_PT_NVGRE_IPV4_IPV6 0x05
+#define TXGBE_PT_NVGRE_IPV4_IPV6_TCP 0x15
+#define TXGBE_PT_NVGRE_IPV4_IPV6_UDP 0x25
+#define TXGBE_PT_NVGRE_IPV4_IPV6_EXT 0x0D
+#define TXGBE_PT_NVGRE_IPV4_IPV6_EXT_TCP 0x1D
+#define TXGBE_PT_NVGRE_IPV4_IPV6_EXT_UDP 0x2D
+
+#define TXGBE_PT_VXLAN 0x80
+#define TXGBE_PT_VXLAN_IPV4 0x81
+#define TXGBE_PT_VXLAN_IPV4_TCP 0x91
+#define TXGBE_PT_VXLAN_IPV4_UDP 0xA1
+#define TXGBE_PT_VXLAN_IPV4_SCTP 0xC1
+#define TXGBE_PT_VXLAN_IPV4_EXT 0x83
+#define TXGBE_PT_VXLAN_IPV4_EXT_TCP 0x93
+#define TXGBE_PT_VXLAN_IPV4_EXT_UDP 0xA3
+#define TXGBE_PT_VXLAN_IPV4_EXT_SCTP 0xC3
+#define TXGBE_PT_VXLAN_IPV6 0x84
+#define TXGBE_PT_VXLAN_IPV6_TCP 0x94
+#define TXGBE_PT_VXLAN_IPV6_UDP 0xA4
+#define TXGBE_PT_VXLAN_IPV6_SCTP 0xC4
+#define TXGBE_PT_VXLAN_IPV6_EXT 0x8C
+#define TXGBE_PT_VXLAN_IPV6_EXT_TCP 0x9C
+#define TXGBE_PT_VXLAN_IPV6_EXT_UDP 0xAC
+#define TXGBE_PT_VXLAN_IPV6_EXT_SCTP 0xCC
+#define TXGBE_PT_VXLAN_IPV4_IPV6 0x85
+#define TXGBE_PT_VXLAN_IPV4_IPV6_TCP 0x95
+#define TXGBE_PT_VXLAN_IPV4_IPV6_UDP 0xA5
+#define TXGBE_PT_VXLAN_IPV4_IPV6_EXT 0x8D
+#define TXGBE_PT_VXLAN_IPV4_IPV6_EXT_TCP 0x9D
+#define TXGBE_PT_VXLAN_IPV4_IPV6_EXT_UDP 0xAD
+
+#define TXGBE_PT_MAX 256
+extern const u32 txgbe_ptype_table[TXGBE_PT_MAX];
+extern const u32 txgbe_ptype_table_tn[TXGBE_PT_MAX];
+
+
+/* ether type filter list: one static filter per filter consumer. This is
+ * to avoid filter collisions later. Add new filters
+ * here!!
+ * EAPOL 802.1x (0x888e): Filter 0
+ * FCoE (0x8906): Filter 2
+ * 1588 (0x88f7): Filter 3
+ * FIP (0x8914): Filter 4
+ * LLDP (0x88CC): Filter 5
+ * LACP (0x8809): Filter 6
+ * FC (0x8808): Filter 7
+ */
+#define TXGBE_ETF_ID_EAPOL 0
+#define TXGBE_ETF_ID_FCOE 2
+#define TXGBE_ETF_ID_1588 3
+#define TXGBE_ETF_ID_FIP 4
+#define TXGBE_ETF_ID_LLDP 5
+#define TXGBE_ETF_ID_LACP 6
+#define TXGBE_ETF_ID_FC 7
+#define TXGBE_ETF_ID_MAX 8
+
+#define TXGBE_PTID_ETF_MIN 0x18
+#define TXGBE_PTID_ETF_MAX 0x1F
+static inline int txgbe_etflt_id(u8 ptid)
+{
+ if (ptid >= TXGBE_PTID_ETF_MIN && ptid <= TXGBE_PTID_ETF_MAX)
+ return ptid - TXGBE_PTID_ETF_MIN;
+ else
+ return -1;
+}
+
+struct txgbe_udphdr {
+ __be16 source;
+ __be16 dest;
+ __be16 len;
+ __be16 check;
+};
+
+struct txgbe_vxlanhdr {
+ __be32 vx_flags;
+ __be32 vx_vni;
+};
+
+struct txgbe_genevehdr {
+ u8 opt_len:6;
+ u8 ver:2;
+ u8 rsvd1:6;
+ u8 critical:1;
+ u8 oam:1;
+ __be16 proto_type;
+
+ u8 vni[3];
+ u8 rsvd2;
+};
+
+struct txgbe_nvgrehdr {
+ __be16 flags;
+ __be16 proto;
+ __be32 tni;
+};
+
+#endif /* _TXGBE_PTYPE_H_ */
new file mode 100644
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef _TXGBE_REGS_GROUP_H_
+#define _TXGBE_REGS_GROUP_H_
+
+#include "txgbe_ethdev.h"
+
+struct txgbe_hw;
+struct reg_info {
+ uint32_t base_addr;
+ uint32_t count;
+ uint32_t stride;
+ const char *name;
+};
+
+static inline int
+txgbe_read_regs(struct txgbe_hw *hw, const struct reg_info *reg,
+ uint32_t *reg_buf)
+{
+ unsigned int i;
+
+ for (i = 0; i < reg->count; i++)
+ reg_buf[i] = rd32(hw,
+ reg->base_addr + i * reg->stride);
+ return reg->count;
+};
+
+static inline int
+txgbe_regs_group_count(const struct reg_info *regs)
+{
+ int count = 0;
+ int i = 0;
+
+ while (regs[i].count)
+ count += regs[i++].count;
+ return count;
+};
+
+static inline int
+txgbe_read_regs_group(struct rte_eth_dev *dev, uint32_t *reg_buf,
+ const struct reg_info *regs)
+{
+ int count = 0;
+ int i = 0;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ while (regs[i].count)
+ count += txgbe_read_regs(hw, ®s[i++], ®_buf[count]);
+ return count;
+};
+
+#endif /* _TXGBE_REGS_GROUP_H_ */
new file mode 100644
@@ -0,0 +1,5158 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include <sys/queue.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_prefetch.h>
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_string_fns.h>
+#include <rte_errno.h>
+#include <rte_ip.h>
+#include <rte_net.h>
+
+#include "txgbe_logs.h"
+#include "base/txgbe.h"
+#include "txgbe_ethdev.h"
+#include "txgbe_rxtx.h"
+
+#ifdef RTE_LIBRTE_IEEE1588
+#define TXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#else
+#define TXGBE_TX_IEEE1588_TMST 0
+#endif
+/* Bit Mask to indicate what bits required for building TX context */
+static const u64 TXGBE_TX_OFFLOAD_MASK = (
+ PKT_TX_OUTER_IPV6 |
+ PKT_TX_OUTER_IPV4 |
+ PKT_TX_IPV6 |
+ PKT_TX_IPV4 |
+ PKT_TX_VLAN_PKT |
+ PKT_TX_IP_CKSUM |
+ PKT_TX_L4_MASK |
+ PKT_TX_TCP_SEG |
+ PKT_TX_TUNNEL_MASK |
+#ifdef RTE_LIBRTE_MACSEC
+ PKT_TX_MACSEC |
+#endif
+ PKT_TX_OUTER_IP_CKSUM |
+#ifdef RTE_LIBRTE_SECURITY
+ PKT_TX_SEC_OFFLOAD |
+#endif
+ TXGBE_TX_IEEE1588_TMST);
+
+#define TXGBE_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ TXGBE_TX_OFFLOAD_MASK)
+
+#if 1
+#define RTE_PMD_USE_PREFETCH
+#endif
+
+#ifdef RTE_PMD_USE_PREFETCH
+/*
+ * Prefetch a cache line into all cache levels.
+ */
+#define rte_txgbe_prefetch(p) rte_prefetch0(p)
+#else
+#define rte_txgbe_prefetch(p) do {} while (0)
+#endif
+
+static int
+txgbe_is_vf(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ switch (hw->mac.type) {
+ case txgbe_mac_raptor_vf:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/*********************************************************************
+ *
+ * TX functions
+ *
+ **********************************************************************/
+
+/*
+ * Check for descriptors with their DD bit set and free mbufs.
+ * Return the total number of buffers freed.
+ */
+static __rte_always_inline int
+txgbe_tx_free_bufs(struct txgbe_tx_queue *txq)
+{
+ struct txgbe_tx_entry *txep;
+ uint32_t status;
+ int i, nb_free = 0;
+ struct rte_mbuf *m, *free[RTE_TXGBE_TX_MAX_FREE_BUF_SZ];
+
+ /* check DD bit on threshold descriptor */
+ status = txq->tx_ring[txq->tx_next_dd].dw3;
+ if (!(status & rte_cpu_to_le_32(TXGBE_TXD_DD))) {
+ if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
+ txgbe_set32_masked(txq->tdc_reg_addr,
+ TXGBE_TXCFG_FLUSH, TXGBE_TXCFG_FLUSH);
+ return 0;
+ }
+
+ /*
+ * first buffer to free from S/W ring is at index
+ * tx_next_dd - (tx_free_thresh-1)
+ */
+ txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_free_thresh - 1)]);
+ for (i = 0; i < txq->tx_free_thresh; ++i, ++txep) {
+ /* free buffers one at a time */
+ m = rte_pktmbuf_prefree_seg(txep->mbuf);
+ txep->mbuf = NULL;
+
+ if (unlikely(m == NULL))
+ continue;
+
+ if (nb_free >= RTE_TXGBE_TX_MAX_FREE_BUF_SZ ||
+ (nb_free > 0 && m->pool != free[0]->pool)) {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void **)free, nb_free);
+ nb_free = 0;
+ }
+
+ free[nb_free++] = m;
+ }
+
+ if (nb_free > 0)
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+
+ /* buffers were freed, update counters */
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_free_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_free_thresh);
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
+
+ return txq->tx_free_thresh;
+}
+
+/* Populate 4 descriptors with data from 4 mbufs */
+static inline void
+tx4(volatile struct txgbe_tx_desc *txdp, struct rte_mbuf **pkts)
+{
+ uint64_t buf_dma_addr;
+ uint32_t pkt_len;
+ int i;
+
+ for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
+ buf_dma_addr = rte_mbuf_data_iova(*pkts);
+ pkt_len = (*pkts)->data_len;
+
+ /* write data to descriptor */
+ txdp->qw0 = rte_cpu_to_le_64(buf_dma_addr);
+ txdp->dw2 = cpu_to_le32(TXGBE_TXD_FLAGS |
+ TXGBE_TXD_DATLEN(pkt_len));
+ txdp->dw3 = cpu_to_le32(TXGBE_TXD_PAYLEN(pkt_len));
+
+ rte_prefetch0(&(*pkts)->pool);
+ }
+}
+
+/* Populate 1 descriptor with data from 1 mbuf */
+static inline void
+tx1(volatile struct txgbe_tx_desc *txdp, struct rte_mbuf **pkts)
+{
+ uint64_t buf_dma_addr;
+ uint32_t pkt_len;
+
+ buf_dma_addr = rte_mbuf_data_iova(*pkts);
+ pkt_len = (*pkts)->data_len;
+
+ /* write data to descriptor */
+ txdp->qw0 = cpu_to_le64(buf_dma_addr);
+ txdp->dw2 = cpu_to_le32(TXGBE_TXD_FLAGS |
+ TXGBE_TXD_DATLEN(pkt_len));
+ txdp->dw3 = cpu_to_le32(TXGBE_TXD_PAYLEN(pkt_len));
+
+ rte_prefetch0(&(*pkts)->pool);
+}
+
+/*
+ * Fill H/W descriptor ring with mbuf data.
+ * Copy mbuf pointers to the S/W ring.
+ */
+static inline void
+txgbe_tx_fill_hw_ring(struct txgbe_tx_queue *txq, struct rte_mbuf **pkts,
+ uint16_t nb_pkts)
+{
+ volatile struct txgbe_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
+ struct txgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
+ const int N_PER_LOOP = 4;
+ const int N_PER_LOOP_MASK = N_PER_LOOP-1;
+ int mainpart, leftover;
+ int i, j;
+
+ /*
+ * Process most of the packets in chunks of N pkts. Any
+ * leftover packets will get processed one at a time.
+ */
+ mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
+ leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK));
+ for (i = 0; i < mainpart; i += N_PER_LOOP) {
+ /* Copy N mbuf pointers to the S/W ring */
+ for (j = 0; j < N_PER_LOOP; ++j) {
+ (txep + i + j)->mbuf = *(pkts + i + j);
+ }
+ tx4(txdp + i, pkts + i);
+ }
+
+ if (unlikely(leftover > 0)) {
+ for (i = 0; i < leftover; ++i) {
+ (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
+ tx1(txdp + mainpart + i, pkts + mainpart + i);
+ }
+ }
+}
+
+static inline uint16_t
+tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue;
+ //volatile struct txgbe_tx_desc *tx_r = txq->tx_ring;
+ uint16_t n = 0;
+
+ /*
+ * Begin scanning the H/W ring for done descriptors when the
+ * number of available descriptors drops below tx_free_thresh. For
+ * each done descriptor, free the associated buffer.
+ */
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ txgbe_tx_free_bufs(txq);
+
+ /* Only use descriptors that are available */
+ nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ /* Use exactly nb_pkts descriptors */
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ /*
+ * At this point, we know there are enough descriptors in the
+ * ring to transmit all the packets. This assumes that each
+ * mbuf contains a single segment, and that no new offloads
+ * are expected, which would require a new context descriptor.
+ */
+
+ /*
+ * See if we're going to wrap-around. If so, handle the top
+ * of the descriptor ring first, then do the bottom. If not,
+ * the processing looks just like the "bottom" part anyway...
+ */
+ if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
+ n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
+ txgbe_tx_fill_hw_ring(txq, tx_pkts, n);
+ txq->tx_tail = 0;
+ }
+
+ /* Fill H/W descriptor ring with mbuf data */
+ txgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
+ txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
+
+ /*
+ * Check for wrap-around. This would only happen if we used
+ * up to the last descriptor in the ring, no more, no less.
+ */
+ if (txq->tx_tail >= txq->nb_tx_desc)
+ txq->tx_tail = 0;
+
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
+ (unsigned) txq->port_id, (unsigned) txq->queue_id,
+ (unsigned) txq->tx_tail, (unsigned) nb_pkts);
+
+ /* update tail pointer */
+ rte_wmb();
+ txgbe_set32_relaxed(txq->tdt_reg_addr, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+uint16_t
+txgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx;
+
+ /* Try to transmit at least chunks of TX_MAX_BURST pkts */
+ if (likely(nb_pkts <= RTE_PMD_TXGBE_TX_MAX_BURST))
+ return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
+
+ /* transmit more than the max burst, in chunks of TX_MAX_BURST */
+ nb_tx = 0;
+ while (nb_pkts) {
+ uint16_t ret, n;
+
+ n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_TXGBE_TX_MAX_BURST);
+ ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
+ nb_tx = (uint16_t)(nb_tx + ret);
+ nb_pkts = (uint16_t)(nb_pkts - ret);
+ if (ret < n)
+ break;
+ }
+
+ return nb_tx;
+}
+
+static inline void
+txgbe_set_xmit_ctx(struct txgbe_tx_queue *txq,
+ volatile struct txgbe_tx_ctx_desc *ctx_txd,
+ uint64_t ol_flags, union txgbe_tx_offload tx_offload,
+ __rte_unused uint64_t *mdata)
+{
+ union txgbe_tx_offload tx_offload_mask;
+ uint32_t type_tucmd_mlhl;
+ uint32_t mss_l4len_idx;
+ uint32_t ctx_idx;
+ uint32_t vlan_macip_lens;
+ uint32_t tunnel_seed;
+
+ ctx_idx = txq->ctx_curr;
+ tx_offload_mask.data[0] = 0;
+ tx_offload_mask.data[1] = 0;
+
+ /* Specify which HW CTX to upload. */
+ mss_l4len_idx = TXGBE_TXD_IDX(ctx_idx);
+ type_tucmd_mlhl = TXGBE_TXD_CTXT;
+
+ tx_offload_mask.ptid |= ~0;
+ type_tucmd_mlhl |= TXGBE_TXD_PTID(tx_offload.ptid);
+
+ /* check if TCP segmentation required for this packet */
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ tx_offload_mask.l4_len |= ~0;
+ tx_offload_mask.tso_segsz |= ~0;
+ mss_l4len_idx |= TXGBE_TXD_MSS(tx_offload.tso_segsz);
+ mss_l4len_idx |= TXGBE_TXD_L4LEN(tx_offload.l4_len);
+ } else { /* no TSO, check if hardware checksum is needed */
+ if (ol_flags & PKT_TX_IP_CKSUM) {
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ }
+
+ switch (ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_UDP_CKSUM:
+ mss_l4len_idx |=
+ TXGBE_TXD_L4LEN(sizeof(struct rte_udp_hdr));
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ break;
+ case PKT_TX_TCP_CKSUM:
+ mss_l4len_idx |=
+ TXGBE_TXD_L4LEN(sizeof(struct rte_tcp_hdr));
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ break;
+ case PKT_TX_SCTP_CKSUM:
+ mss_l4len_idx |=
+ TXGBE_TXD_L4LEN(sizeof(struct rte_sctp_hdr));
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ break;
+ default:
+ break;
+ }
+ }
+
+ vlan_macip_lens = TXGBE_TXD_IPLEN(tx_offload.l3_len >> 1);
+
+ if (ol_flags & PKT_TX_TUNNEL_MASK) {
+ tx_offload_mask.outer_tun_len |= ~0;
+ tx_offload_mask.outer_l2_len |= ~0;
+ tx_offload_mask.outer_l3_len |= ~0;
+ tx_offload_mask.l2_len |= ~0;
+ tunnel_seed = TXGBE_TXD_ETUNLEN(tx_offload.outer_tun_len >> 1);
+ tunnel_seed |= TXGBE_TXD_EIPLEN(tx_offload.outer_l3_len >> 2);
+
+ switch (ol_flags & PKT_TX_TUNNEL_MASK) {
+ case PKT_TX_TUNNEL_IPIP:
+ /* for non UDP / GRE tunneling, set to 0b */
+ break;
+ case PKT_TX_TUNNEL_VXLAN:
+ case PKT_TX_TUNNEL_GENEVE:
+ tunnel_seed |= TXGBE_TXD_ETYPE_UDP;
+ break;
+ case PKT_TX_TUNNEL_GRE:
+ tunnel_seed |= TXGBE_TXD_ETYPE_GRE;
+ break;
+ default:
+ PMD_TX_LOG(ERR, "Tunnel type not supported");
+ return;
+ }
+ vlan_macip_lens |= TXGBE_TXD_MACLEN(tx_offload.outer_l2_len);
+ } else {
+ tunnel_seed = 0;
+ vlan_macip_lens |= TXGBE_TXD_MACLEN(tx_offload.l2_len);
+ }
+
+ if (ol_flags & PKT_TX_VLAN_PKT) {
+ tx_offload_mask.vlan_tci |= ~0;
+ vlan_macip_lens |= TXGBE_TXD_VLAN(tx_offload.vlan_tci);
+ }
+
+#ifdef RTE_LIBRTE_SECURITY
+ if (ol_flags & PKT_TX_SEC_OFFLOAD) {
+ union txgbe_crypto_tx_desc_md *md =
+ (union txgbe_crypto_tx_desc_md *)mdata;
+ tunnel_seed |= TXGBE_TXD_IPSEC_SAIDX(md->sa_idx);
+ type_tucmd_mlhl |= md->enc ?
+ (TXGBE_TXD_IPSEC_ESP | TXGBE_TXD_IPSEC_ESPENC) : 0;
+ type_tucmd_mlhl |= TXGBE_TXD_IPSEC_ESPLEN(md->pad_len);
+ tx_offload_mask.sa_idx |= ~0;
+ tx_offload_mask.sec_pad_len |= ~0;
+ }
+#endif
+
+ txq->ctx_cache[ctx_idx].flags = ol_flags;
+ txq->ctx_cache[ctx_idx].tx_offload.data[0] =
+ tx_offload_mask.data[0] & tx_offload.data[0];
+ txq->ctx_cache[ctx_idx].tx_offload.data[1] =
+ tx_offload_mask.data[1] & tx_offload.data[1];
+ txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
+
+ ctx_txd->dw0 = rte_cpu_to_le_32(vlan_macip_lens);
+ ctx_txd->dw1 = rte_cpu_to_le_32(tunnel_seed);
+ ctx_txd->dw2 = rte_cpu_to_le_32(type_tucmd_mlhl);
+ ctx_txd->dw3 = rte_cpu_to_le_32(mss_l4len_idx);
+}
+
+/*
+ * Check which hardware context can be used. Use the existing match
+ * or create a new context descriptor.
+ */
+static inline uint32_t
+what_ctx_update(struct txgbe_tx_queue *txq, uint64_t flags,
+ union txgbe_tx_offload tx_offload)
+{
+ /* If match with the current used context */
+ if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
+ & tx_offload.data[0])) &&
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
+ & tx_offload.data[1]))))
+ return txq->ctx_curr;
+
+ /* What if match with the next context */
+ txq->ctx_curr ^= 1;
+ if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
+ & tx_offload.data[0])) &&
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
+ & tx_offload.data[1]))))
+ return txq->ctx_curr;
+
+ /* Mismatch, use the previous context */
+ return TXGBE_CTX_NUM;
+}
+
+static inline uint32_t
+tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
+{
+ uint32_t tmp = 0;
+
+ if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM) {
+ tmp |= TXGBE_TXD_CC;
+ tmp |= TXGBE_TXD_L4CS;
+ }
+ if (ol_flags & PKT_TX_IP_CKSUM) {
+ tmp |= TXGBE_TXD_CC;
+ tmp |= TXGBE_TXD_IPCS;
+ }
+ if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
+ tmp |= TXGBE_TXD_CC;
+ tmp |= TXGBE_TXD_EIPCS;
+ }
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ tmp |= TXGBE_TXD_CC;
+ /* implies IPv4 cksum */
+ if (ol_flags & PKT_TX_IPV4)
+ tmp |= TXGBE_TXD_IPCS;
+ tmp |= TXGBE_TXD_L4CS;
+ }
+ if (ol_flags & PKT_TX_VLAN_PKT) {
+ tmp |= TXGBE_TXD_CC;
+ }
+
+ return tmp;
+}
+
+static inline uint32_t
+tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
+{
+ uint32_t cmdtype = 0;
+
+ if (ol_flags & PKT_TX_VLAN_PKT)
+ cmdtype |= TXGBE_TXD_VLE;
+ if (ol_flags & PKT_TX_TCP_SEG)
+ cmdtype |= TXGBE_TXD_TSE;
+ if (ol_flags & PKT_TX_MACSEC)
+ cmdtype |= TXGBE_TXD_LINKSEC;
+ return cmdtype;
+}
+
+static inline uint8_t
+tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype)
+{
+ bool tun;
+
+ if (ptype)
+ return txgbe_encode_ptype(ptype);
+
+ /* Only suport flags in TXGBE_TX_OFFLOAD_MASK */
+ tun = !!(oflags & PKT_TX_TUNNEL_MASK);
+
+ /* L2 level */
+ ptype = RTE_PTYPE_L2_ETHER;
+ if (oflags & PKT_TX_VLAN) {
+ ptype |= RTE_PTYPE_L2_ETHER_VLAN;
+ }
+
+ /* L3 level */
+ if (oflags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM)) {
+ ptype |= RTE_PTYPE_L3_IPV4;
+ } else if (oflags & (PKT_TX_OUTER_IPV6)) {
+ ptype |= RTE_PTYPE_L3_IPV6;
+ }
+
+ if (oflags & (PKT_TX_IPV4 | PKT_TX_IP_CKSUM)) {
+ ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4);
+ } else if (oflags & (PKT_TX_IPV6)) {
+ ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6);
+ }
+
+ /* L4 level */
+ switch (oflags & (PKT_TX_L4_MASK)) {
+ case PKT_TX_TCP_CKSUM:
+ ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
+ break;
+ case PKT_TX_UDP_CKSUM:
+ ptype |= (tun ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP);
+ break;
+ case PKT_TX_SCTP_CKSUM:
+ ptype |= (tun ? RTE_PTYPE_INNER_L4_SCTP : RTE_PTYPE_L4_SCTP);
+ break;
+ }
+
+ if (oflags & PKT_TX_TCP_SEG) {
+ ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
+ }
+
+ /* Tunnel */
+ switch (oflags & PKT_TX_TUNNEL_MASK) {
+ case PKT_TX_TUNNEL_VXLAN:
+ ptype |= RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_TUNNEL_VXLAN;
+ ptype |= RTE_PTYPE_INNER_L2_ETHER;
+ break;
+ case PKT_TX_TUNNEL_GRE:
+ ptype |= RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_TUNNEL_GRE;
+ ptype |= RTE_PTYPE_INNER_L2_ETHER;
+ break;
+ case PKT_TX_TUNNEL_GENEVE:
+ ptype |= RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_TUNNEL_GENEVE;
+ ptype |= RTE_PTYPE_INNER_L2_ETHER;
+ break;
+ case PKT_TX_TUNNEL_VXLAN_GPE:
+ ptype |= RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_TUNNEL_VXLAN_GPE;
+ ptype |= RTE_PTYPE_INNER_L2_ETHER;
+ break;
+ case PKT_TX_TUNNEL_IPIP:
+ case PKT_TX_TUNNEL_IP:
+ ptype |= RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_TUNNEL_IP;
+ break;
+ }
+
+ return txgbe_encode_ptype(ptype);
+}
+
+#ifndef DEFAULT_TX_FREE_THRESH
+#define DEFAULT_TX_FREE_THRESH 32
+#endif
+
+/* Reset transmit descriptors after they have been used */
+static inline int
+txgbe_xmit_cleanup(struct txgbe_tx_queue *txq)
+{
+ struct txgbe_tx_entry *sw_ring = txq->sw_ring;
+ volatile struct txgbe_tx_desc *txr = txq->tx_ring;
+ uint16_t last_desc_cleaned = txq->last_desc_cleaned;
+ uint16_t nb_tx_desc = txq->nb_tx_desc;
+ uint16_t desc_to_clean_to;
+ uint16_t nb_tx_to_clean;
+ uint32_t status;
+
+ /* Determine the last descriptor needing to be cleaned */
+ desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_free_thresh);
+ if (desc_to_clean_to >= nb_tx_desc)
+ desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
+
+ /* Check to make sure the last descriptor to clean is done */
+ desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
+ status = txr[desc_to_clean_to].dw3;
+ if (!(status & rte_cpu_to_le_32(TXGBE_TXD_DD))) {
+ PMD_TX_FREE_LOG(DEBUG,
+ "TX descriptor %4u is not done"
+ "(port=%d queue=%d)",
+ desc_to_clean_to,
+ txq->port_id, txq->queue_id);
+ if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
+ txgbe_set32_masked(txq->tdc_reg_addr,
+ TXGBE_TXCFG_FLUSH, TXGBE_TXCFG_FLUSH);
+ /* Failed to clean any descriptors, better luck next time */
+ return -(1);
+ }
+
+ /* Figure out how many descriptors will be cleaned */
+ if (last_desc_cleaned > desc_to_clean_to)
+ nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
+ desc_to_clean_to);
+ else
+ nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
+ last_desc_cleaned);
+
+ PMD_TX_FREE_LOG(DEBUG,
+ "Cleaning %4u TX descriptors: %4u to %4u "
+ "(port=%d queue=%d)",
+ nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
+ txq->port_id, txq->queue_id);
+
+ /*
+ * The last descriptor to clean is done, so that means all the
+ * descriptors from the last descriptor that was cleaned
+ * up to the last descriptor with the RS bit set
+ * are done. Only reset the threshold descriptor.
+ */
+ txr[desc_to_clean_to].dw3 = 0;
+
+ /* Update the txq to reflect the last descriptor that was cleaned */
+ txq->last_desc_cleaned = desc_to_clean_to;
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
+
+ /* No Error */
+ return 0;
+}
+
+static inline uint8_t
+txgbe_get_tun_len(struct rte_mbuf *mbuf)
+{
+ struct txgbe_genevehdr genevehdr;
+ const struct txgbe_genevehdr *gh;
+ uint8_t tun_len;
+
+ switch (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) {
+ case PKT_TX_TUNNEL_IPIP:
+ tun_len = 0;
+ break;
+ case PKT_TX_TUNNEL_VXLAN:
+ case PKT_TX_TUNNEL_VXLAN_GPE:
+ tun_len = sizeof(struct txgbe_udphdr)
+ + sizeof(struct txgbe_vxlanhdr);
+ break;
+ case PKT_TX_TUNNEL_GRE:
+ tun_len = sizeof(struct txgbe_nvgrehdr);
+ break;
+ case PKT_TX_TUNNEL_GENEVE:
+ gh = rte_pktmbuf_read(mbuf,
+ mbuf->outer_l2_len + mbuf->outer_l3_len,
+ sizeof(genevehdr), &genevehdr);
+ tun_len = sizeof(struct txgbe_udphdr)
+ + sizeof(struct txgbe_genevehdr)
+ + (gh->opt_len << 2);
+ break;
+ default:
+ tun_len = 0;
+ }
+
+ return tun_len;
+}
+
+uint16_t
+txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct txgbe_tx_queue *txq;
+ struct txgbe_tx_entry *sw_ring;
+ struct txgbe_tx_entry *txe, *txn;
+ volatile struct txgbe_tx_desc *txr;
+ volatile struct txgbe_tx_desc *txd;
+ struct rte_mbuf *tx_pkt;
+ struct rte_mbuf *m_seg;
+ uint64_t buf_dma_addr;
+ uint32_t olinfo_status;
+ uint32_t cmd_type_len;
+ uint32_t pkt_len;
+ uint16_t slen;
+ uint64_t ol_flags;
+ uint16_t tx_id;
+ uint16_t tx_last;
+ uint16_t nb_tx;
+ uint16_t nb_used;
+ uint64_t tx_ol_req;
+ uint32_t ctx = 0;
+ uint32_t new_ctx;
+ union txgbe_tx_offload tx_offload;
+#ifdef RTE_LIBRTE_SECURITY
+ uint8_t use_ipsec;
+#endif
+
+ tx_offload.data[0] = 0;
+ tx_offload.data[1] = 0;
+ txq = tx_queue;
+ sw_ring = txq->sw_ring;
+ txr = txq->tx_ring;
+ tx_id = txq->tx_tail;
+ txe = &sw_ring[tx_id];
+
+ /* Determine if the descriptor ring needs to be cleaned. */
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ txgbe_xmit_cleanup(txq);
+
+ rte_prefetch0(&txe->mbuf->pool);
+
+ /* TX loop */
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ new_ctx = 0;
+ tx_pkt = *tx_pkts++;
+ pkt_len = tx_pkt->pkt_len;
+
+ /*
+ * Determine how many (if any) context descriptors
+ * are needed for offload functionality.
+ */
+ ol_flags = tx_pkt->ol_flags;
+#ifdef RTE_LIBRTE_SECURITY
+ use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
+#endif
+
+ /* If hardware offload required */
+ tx_ol_req = ol_flags & TXGBE_TX_OFFLOAD_MASK;
+ if (tx_ol_req) {
+ tx_offload.ptid = tx_desc_ol_flags_to_ptid(
+ tx_ol_req, tx_pkt->packet_type);
+ tx_offload.l2_len = tx_pkt->l2_len;
+ tx_offload.l3_len = tx_pkt->l3_len;
+ tx_offload.l4_len = tx_pkt->l4_len;
+ tx_offload.vlan_tci = tx_pkt->vlan_tci;
+ tx_offload.tso_segsz = tx_pkt->tso_segsz;
+ tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
+ tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
+ tx_offload.outer_tun_len = txgbe_get_tun_len(tx_pkt);
+
+#ifdef RTE_LIBRTE_SECURITY
+ if (use_ipsec) {
+ union txgbe_crypto_tx_desc_md *ipsec_mdata =
+ (union txgbe_crypto_tx_desc_md *)
+ &tx_pkt->udata64;
+ tx_offload.sa_idx = ipsec_mdata->sa_idx;
+ tx_offload.sec_pad_len = ipsec_mdata->pad_len;
+ }
+#endif
+
+ /* If new context need be built or reuse the exist ctx. */
+ ctx = what_ctx_update(txq, tx_ol_req, tx_offload);
+ /* Only allocate context descriptor if required*/
+ new_ctx = (ctx == TXGBE_CTX_NUM);
+ ctx = txq->ctx_curr;
+ }
+
+ /*
+ * Keep track of how many descriptors are used this loop
+ * This will always be the number of segments + the number of
+ * Context descriptors required to transmit the packet
+ */
+ nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
+
+ /*
+ * The number of descriptors that must be allocated for a
+ * packet is the number of segments of that packet, plus 1
+ * Context Descriptor for the hardware offload, if any.
+ * Determine the last TX descriptor to allocate in the TX ring
+ * for the packet, starting from the current position (tx_id)
+ * in the ring.
+ */
+ tx_last = (uint16_t) (tx_id + nb_used - 1);
+
+ /* Circular ring */
+ if (tx_last >= txq->nb_tx_desc)
+ tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
+
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
+ " tx_first=%u tx_last=%u",
+ (unsigned) txq->port_id,
+ (unsigned) txq->queue_id,
+ (unsigned) pkt_len,
+ (unsigned) tx_id,
+ (unsigned) tx_last);
+
+ /*
+ * Make sure there are enough TX descriptors available to
+ * transmit the entire packet.
+ * nb_used better be less than or equal to txq->tx_free_thresh
+ */
+ if (nb_used > txq->nb_tx_free) {
+ PMD_TX_FREE_LOG(DEBUG,
+ "Not enough free TX descriptors "
+ "nb_used=%4u nb_free=%4u "
+ "(port=%d queue=%d)",
+ nb_used, txq->nb_tx_free,
+ txq->port_id, txq->queue_id);
+
+ if (txgbe_xmit_cleanup(txq) != 0) {
+ /* Could not clean any descriptors */
+ if (nb_tx == 0)
+ return 0;
+ goto end_of_tx;
+ }
+
+ /* nb_used better be <= txq->tx_free_thresh */
+ if (unlikely(nb_used > txq->tx_free_thresh)) {
+ PMD_TX_FREE_LOG(DEBUG,
+ "The number of descriptors needed to "
+ "transmit the packet exceeds the "
+ "RS bit threshold. This will impact "
+ "performance."
+ "nb_used=%4u nb_free=%4u "
+ "tx_free_thresh=%4u. "
+ "(port=%d queue=%d)",
+ nb_used, txq->nb_tx_free,
+ txq->tx_free_thresh,
+ txq->port_id, txq->queue_id);
+ /*
+ * Loop here until there are enough TX
+ * descriptors or until the ring cannot be
+ * cleaned.
+ */
+ while (nb_used > txq->nb_tx_free) {
+ if (txgbe_xmit_cleanup(txq) != 0) {
+ /*
+ * Could not clean any
+ * descriptors
+ */
+ if (nb_tx == 0)
+ return 0;
+ goto end_of_tx;
+ }
+ }
+ }
+ }
+
+ /*
+ * By now there are enough free TX descriptors to transmit
+ * the packet.
+ */
+
+ /*
+ * Set common flags of all TX Data Descriptors.
+ *
+ * The following bits must be set in all Data Descriptors:
+ * - TXGBE_TXD_DTYP_DATA
+ * - TXGBE_TXD_DCMD_DEXT
+ *
+ * The following bits must be set in the first Data Descriptor
+ * and are ignored in the other ones:
+ * - TXGBE_TXD_DCMD_IFCS
+ * - TXGBE_TXD_MAC_1588
+ * - TXGBE_TXD_DCMD_VLE
+ *
+ * The following bits must only be set in the last Data
+ * Descriptor:
+ * - TXGBE_TXD_CMD_EOP
+ *
+ * The following bits can be set in any Data Descriptor, but
+ * are only set in the last Data Descriptor:
+ * - TXGBE_TXD_CMD_RS
+ */
+ cmd_type_len = TXGBE_TXD_FCS;
+
+#ifdef RTE_LIBRTE_IEEE1588
+ if (ol_flags & PKT_TX_IEEE1588_TMST)
+ cmd_type_len |= TXGBE_TXD_1588;
+#endif
+
+ olinfo_status = 0;
+ if (tx_ol_req) {
+
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ /* when TSO is on, paylen in descriptor is the
+ * not the packet len but the tcp payload len */
+ pkt_len -= (tx_offload.l2_len +
+ tx_offload.l3_len + tx_offload.l4_len);
+ pkt_len -=
+ (tx_pkt->ol_flags & PKT_TX_TUNNEL_MASK)
+ ? tx_offload.outer_l2_len +
+ tx_offload.outer_l3_len : 0;
+ }
+
+ /*
+ * Setup the TX Advanced Context Descriptor if required
+ */
+ if (new_ctx) {
+ volatile struct txgbe_tx_ctx_desc *ctx_txd;
+
+ ctx_txd = (volatile struct txgbe_tx_ctx_desc *)
+ &txr[tx_id];
+
+ txn = &sw_ring[txe->next_id];
+ rte_prefetch0(&txn->mbuf->pool);
+
+ if (txe->mbuf != NULL) {
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = NULL;
+ }
+
+ txgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
+ tx_offload, &tx_pkt->udata64);
+
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ }
+
+ /*
+ * Setup the TX Advanced Data Descriptor,
+ * This path will go through
+ * whatever new/reuse the context descriptor
+ */
+ cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
+ olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
+ olinfo_status |= TXGBE_TXD_IDX(ctx);
+ }
+
+ olinfo_status |= TXGBE_TXD_PAYLEN(pkt_len);
+#ifdef RTE_LIBRTE_SECURITY
+ if (use_ipsec)
+ olinfo_status |= TXGBE_TXD_IPSEC;
+#endif
+
+ m_seg = tx_pkt;
+ do {
+ txd = &txr[tx_id];
+ txn = &sw_ring[txe->next_id];
+ rte_prefetch0(&txn->mbuf->pool);
+
+ if (txe->mbuf != NULL)
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = m_seg;
+
+ /*
+ * Set up Transmit Data Descriptor.
+ */
+ slen = m_seg->data_len;
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
+ txd->qw0 = rte_cpu_to_le_64(buf_dma_addr);
+ txd->dw2 = rte_cpu_to_le_32(cmd_type_len | slen);
+ txd->dw3 = rte_cpu_to_le_32(olinfo_status);
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ m_seg = m_seg->next;
+ } while (m_seg != NULL);
+
+ /*
+ * The last packet data descriptor needs End Of Packet (EOP)
+ */
+ cmd_type_len |= TXGBE_TXD_EOP;
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
+
+ txd->dw2 |= rte_cpu_to_le_32(cmd_type_len);
+ }
+
+end_of_tx:
+
+ rte_wmb();
+
+ /*
+ * Set the Transmit Descriptor Tail (TDT)
+ */
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
+ (unsigned) txq->port_id, (unsigned) txq->queue_id,
+ (unsigned) tx_id, (unsigned) nb_tx);
+ txgbe_set32_relaxed(txq->tdt_reg_addr, tx_id);
+ txq->tx_tail = tx_id;
+
+ return nb_tx;
+}
+
+/*********************************************************************
+ *
+ * TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+txgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i, ret;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+ struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ /**
+ * Check if packet meets requirements for number of segments
+ *
+ * NOTE: for txgbe it's always (40 - WTHRESH) for both TSO and
+ * non-TSO
+ */
+
+ if (m->nb_segs > TXGBE_TX_MAX_SEG - txq->wthresh) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (ol_flags & TXGBE_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+/*********************************************************************
+ *
+ * RX functions
+ *
+ **********************************************************************/
+/* @note: fix txgbe_dev_supported_ptypes_get() if any change here. */
+static inline uint32_t
+txgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptid_mask)
+{
+ uint16_t ptid = TXGBE_RXD_PTID(pkt_info);
+
+ ptid &= ptid_mask;
+
+ return txgbe_decode_ptype(ptid);
+}
+
+static inline uint64_t
+txgbe_rxd_pkt_info_to_pkt_flags(uint32_t pkt_info)
+{
+ static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
+ 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
+ 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
+ PKT_RX_RSS_HASH, 0, 0, 0,
+ 0, 0, 0, PKT_RX_FDIR,
+ };
+#ifdef RTE_LIBRTE_IEEE1588
+ static uint64_t ip_pkt_etqf_map[8] = {
+ 0, 0, 0, PKT_RX_IEEE1588_PTP,
+ 0, 0, 0, 0,
+ };
+ int etfid = txgbe_etflt_id(TXGBE_RXD_PTID(pkt_info));
+ if (likely(-1 != etfid))
+ return ip_pkt_etqf_map[etfid] |
+ ip_rss_types_map[TXGBE_RXD_RSSTYPE(pkt_info)];
+ else
+ return ip_rss_types_map[TXGBE_RXD_RSSTYPE(pkt_info)];
+#else
+ return ip_rss_types_map[TXGBE_RXD_RSSTYPE(pkt_info)];
+#endif
+}
+
+static inline uint64_t
+rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
+{
+ uint64_t pkt_flags;
+
+ /*
+ * Check if VLAN present only.
+ * Do not check whether L3/L4 rx checksum done by NIC or not,
+ * That can be found from rte_eth_rxmode.offloads flag
+ */
+ pkt_flags = (rx_status & TXGBE_RXD_STAT_VLAN &&
+ vlan_flags & PKT_RX_VLAN_STRIPPED)
+ ? vlan_flags : 0;
+
+#ifdef RTE_LIBRTE_IEEE1588
+ if (rx_status & TXGBE_RXD_STAT_1588)
+ pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
+#endif
+ return pkt_flags;
+}
+
+static inline uint64_t
+rx_desc_error_to_pkt_flags(uint32_t rx_status)
+{
+ uint64_t pkt_flags = 0;
+
+ /* checksum offload can't be disabled */
+ if (rx_status & TXGBE_RXD_STAT_IPCS) {
+ pkt_flags |= (rx_status & TXGBE_RXD_ERR_IPCS
+ ? PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD);
+ }
+
+ if (rx_status & TXGBE_RXD_STAT_L4CS) {
+ pkt_flags |= (rx_status & TXGBE_RXD_ERR_L4CS
+ ? PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD);
+ }
+
+ if (rx_status & TXGBE_RXD_STAT_EIPCS &&
+ rx_status & TXGBE_RXD_ERR_EIPCS) {
+ pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
+ }
+
+#ifdef RTE_LIBRTE_SECURITY
+ if (rx_status & TXGBE_RXD_STAT_SECP) {
+ pkt_flags |= PKT_RX_SEC_OFFLOAD;
+ if (rx_status & TXGBE_RXD_ERR_SECERR)
+ pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+ }
+#endif
+
+ return pkt_flags;
+}
+
+/*
+ * LOOK_AHEAD defines how many desc statuses to check beyond the
+ * current descriptor.
+ * It must be a pound define for optimal performance.
+ * Do not change the value of LOOK_AHEAD, as the txgbe_rx_scan_hw_ring
+ * function only works with LOOK_AHEAD=8.
+ */
+#define LOOK_AHEAD 8
+#if (LOOK_AHEAD != 8)
+#error "PMD TXGBE: LOOK_AHEAD must be 8\n"
+#endif
+static inline int
+txgbe_rx_scan_hw_ring(struct txgbe_rx_queue *rxq)
+{
+ volatile struct txgbe_rx_desc *rxdp;
+ struct txgbe_rx_entry *rxep;
+ struct rte_mbuf *mb;
+ uint16_t pkt_len;
+ uint64_t pkt_flags;
+ int nb_dd;
+ uint32_t s[LOOK_AHEAD];
+ uint32_t pkt_info[LOOK_AHEAD];
+ int i, j, nb_rx = 0;
+ uint32_t status;
+
+ /* get references to current descriptor and S/W ring entry */
+ rxdp = &rxq->rx_ring[rxq->rx_tail];
+ rxep = &rxq->sw_ring[rxq->rx_tail];
+
+ status = rxdp->qw1.lo.status;
+ /* check to make sure there is at least 1 packet to receive */
+ if (!(status & rte_cpu_to_le_32(TXGBE_RXD_STAT_DD)))
+ return 0;
+
+ /*
+ * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
+ * reference packets that are ready to be received.
+ */
+ for (i = 0; i < RTE_PMD_TXGBE_RX_MAX_BURST;
+ i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
+ /* Read desc statuses backwards to avoid race condition */
+ for (j = 0; j < LOOK_AHEAD; j++)
+ s[j] = rte_le_to_cpu_32(rxdp[j].qw1.lo.status);
+
+ rte_smp_rmb();
+
+ /* Compute how many status bits were set */
+ for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
+ (s[nb_dd] & TXGBE_RXD_STAT_DD); nb_dd++)
+ ;
+
+ for (j = 0; j < nb_dd; j++)
+ pkt_info[j] = rte_le_to_cpu_32(rxdp[j].qw0.dw0);
+
+ nb_rx += nb_dd;
+
+ /* Translate descriptor info to mbuf format */
+ for (j = 0; j < nb_dd; ++j) {
+ mb = rxep[j].mbuf;
+ pkt_len = rte_le_to_cpu_16(rxdp[j].qw1.hi.len) -
+ rxq->crc_len;
+ mb->data_len = pkt_len;
+ mb->pkt_len = pkt_len;
+ mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].qw1.hi.tag);
+
+ /* convert descriptor fields to rte mbuf flags */
+ pkt_flags = rx_desc_status_to_pkt_flags(s[j],
+ rxq->vlan_flags);
+ pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
+ pkt_flags |= txgbe_rxd_pkt_info_to_pkt_flags(
+ pkt_info[j]);
+ mb->ol_flags = pkt_flags;
+ mb->packet_type = txgbe_rxd_pkt_info_to_pkt_type(
+ pkt_info[j], rxq->pkt_type_mask);
+
+ if (likely(pkt_flags & PKT_RX_RSS_HASH))
+ mb->hash.rss = rte_le_to_cpu_32(
+ rxdp[j].qw0.dw1);
+ else if (pkt_flags & PKT_RX_FDIR) {
+ mb->hash.fdir.hash = rte_le_to_cpu_16(
+ rxdp[j].qw0.hi.csum) &
+ TXGBE_ATR_HASH_MASK;
+ mb->hash.fdir.id = rte_le_to_cpu_16(
+ rxdp[j].qw0.hi.ipid);
+ }
+ }
+
+ /* Move mbuf pointers from the S/W ring to the stage */
+ for (j = 0; j < LOOK_AHEAD; ++j) {
+ rxq->rx_stage[i + j] = rxep[j].mbuf;
+ }
+
+ /* stop if all requested packets could not be received */
+ if (nb_dd != LOOK_AHEAD)
+ break;
+ }
+
+ /* clear software ring entries so we can cleanup correctly */
+ for (i = 0; i < nb_rx; ++i) {
+ rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
+ }
+
+ return nb_rx;
+}
+
+static inline int
+txgbe_rx_alloc_bufs(struct txgbe_rx_queue *rxq, bool reset_mbuf)
+{
+ volatile struct txgbe_rx_desc *rxdp;
+ struct txgbe_rx_entry *rxep;
+ struct rte_mbuf *mb;
+ uint16_t alloc_idx;
+ __le64 dma_addr;
+ int diag, i;
+
+ /* allocate buffers in bulk directly into the S/W ring */
+ alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
+ rxep = &rxq->sw_ring[alloc_idx];
+ diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
+ rxq->rx_free_thresh);
+ if (unlikely(diag != 0))
+ return -ENOMEM;
+
+ rxdp = &rxq->rx_ring[alloc_idx];
+ for (i = 0; i < rxq->rx_free_thresh; ++i) {
+ /* populate the static rte mbuf fields */
+ mb = rxep[i].mbuf;
+ if (reset_mbuf) {
+ mb->port = rxq->port_id;
+ }
+
+ rte_mbuf_refcnt_set(mb, 1);
+ mb->data_off = RTE_PKTMBUF_HEADROOM;
+
+ /* populate the descriptors */
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
+ TXGBE_RXD_HDRADDR(&rxdp[i], 0);
+ TXGBE_RXD_PKTADDR(&rxdp[i], dma_addr);
+ }
+
+ /* update state of internal queue structure */
+ rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
+ if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
+ rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
+
+ /* no errors */
+ return 0;
+}
+
+static inline uint16_t
+txgbe_rx_fill_from_stage(struct txgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
+ int i;
+
+ /* how many packets are ready to return? */
+ nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
+
+ /* copy mbuf pointers to the application's packet list */
+ for (i = 0; i < nb_pkts; ++i)
+ rx_pkts[i] = stage[i];
+
+ /* update internal queue state */
+ rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
+ rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
+
+ return nb_pkts;
+}
+
+static inline uint16_t
+txgbe_rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct txgbe_rx_queue *rxq = (struct txgbe_rx_queue *)rx_queue;
+ uint16_t nb_rx = 0;
+
+ /* Any previously recv'd pkts will be returned from the Rx stage */
+ if (rxq->rx_nb_avail)
+ return txgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+ /* Scan the H/W ring for packets to receive */
+ nb_rx = (uint16_t)txgbe_rx_scan_hw_ring(rxq);
+
+ /* update internal queue state */
+ rxq->rx_next_avail = 0;
+ rxq->rx_nb_avail = nb_rx;
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
+
+ /* if required, allocate new buffers to replenish descriptors */
+ if (rxq->rx_tail > rxq->rx_free_trigger) {
+ uint16_t cur_free_trigger = rxq->rx_free_trigger;
+
+ if (txgbe_rx_alloc_bufs(rxq, true) != 0) {
+ int i, j;
+
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", (unsigned) rxq->port_id,
+ (unsigned) rxq->queue_id);
+
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ rxq->rx_free_thresh;
+
+ /*
+ * Need to rewind any previous receives if we cannot
+ * allocate new buffers to replenish the old ones.
+ */
+ rxq->rx_nb_avail = 0;
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
+ for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
+ rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
+
+ return 0;
+ }
+
+ /* update tail pointer */
+ rte_wmb();
+ txgbe_set32_relaxed(rxq->rdt_reg_addr, cur_free_trigger);
+ }
+
+ if (rxq->rx_tail >= rxq->nb_rx_desc)
+ rxq->rx_tail = 0;
+
+ /* received any packets this loop? */
+ if (rxq->rx_nb_avail)
+ return txgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+ return 0;
+}
+
+/* split requests into chunks of size RTE_PMD_TXGBE_RX_MAX_BURST */
+uint16_t
+txgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_rx;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ if (likely(nb_pkts <= RTE_PMD_TXGBE_RX_MAX_BURST))
+ return txgbe_rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
+
+ /* request is relatively large, chunk it up */
+ nb_rx = 0;
+ while (nb_pkts) {
+ uint16_t ret, n;
+
+ n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_TXGBE_RX_MAX_BURST);
+ ret = txgbe_rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
+ nb_rx = (uint16_t)(nb_rx + ret);
+ nb_pkts = (uint16_t)(nb_pkts - ret);
+ if (ret < n)
+ break;
+ }
+
+ return nb_rx;
+}
+
+uint16_t
+txgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct txgbe_rx_queue *rxq;
+ volatile struct txgbe_rx_desc *rx_ring;
+ volatile struct txgbe_rx_desc *rxdp;
+ struct txgbe_rx_entry *sw_ring;
+ struct txgbe_rx_entry *rxe;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb;
+ struct txgbe_rx_desc rxd;
+ uint64_t dma_addr;
+ uint32_t staterr;
+ uint32_t pkt_info;
+ uint16_t pkt_len;
+ uint16_t rx_id;
+ uint16_t nb_rx;
+ uint16_t nb_hold;
+ uint64_t pkt_flags;
+
+ nb_rx = 0;
+ nb_hold = 0;
+ rxq = rx_queue;
+ rx_id = rxq->rx_tail;
+ rx_ring = rxq->rx_ring;
+ sw_ring = rxq->sw_ring;
+ while (nb_rx < nb_pkts) {
+ /*
+ * The order of operations here is important as the DD status
+ * bit must not be read after any other descriptor fields.
+ * rx_ring and rxdp are pointing to volatile data so the order
+ * of accesses cannot be reordered by the compiler. If they were
+ * not volatile, they could be reordered which could lead to
+ * using invalid descriptor fields when read from rxd.
+ */
+ rxdp = &rx_ring[rx_id];
+ staterr = rxdp->qw1.lo.status;
+ if (!(staterr & rte_cpu_to_le_32(TXGBE_RXD_STAT_DD)))
+ break;
+ rxd = *rxdp;
+
+ /*
+ * End of packet.
+ *
+ * If the TXGBE_RXD_STAT_EOP flag is not set, the RX packet
+ * is likely to be invalid and to be dropped by the various
+ * validation checks performed by the network stack.
+ *
+ * Allocate a new mbuf to replenish the RX ring descriptor.
+ * If the allocation fails:
+ * - arrange for that RX descriptor to be the first one
+ * being parsed the next time the receive function is
+ * invoked [on the same queue].
+ *
+ * - Stop parsing the RX ring and return immediately.
+ *
+ * This policy do not drop the packet received in the RX
+ * descriptor for which the allocation of a new mbuf failed.
+ * Thus, it allows that packet to be later retrieved if
+ * mbuf have been freed in the mean time.
+ * As a side effect, holding RX descriptors instead of
+ * systematically giving them back to the NIC may lead to
+ * RX ring exhaustion situations.
+ * However, the NIC can gracefully prevent such situations
+ * to happen by sending specific "back-pressure" flow control
+ * frames to its peer(s).
+ */
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+ "ext_err_stat=0x%08x pkt_len=%u",
+ (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+ (unsigned) rx_id, (unsigned) staterr,
+ (unsigned) rte_le_to_cpu_16(rxd.qw1.hi.len));
+
+ nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (nmb == NULL) {
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", (unsigned) rxq->port_id,
+ (unsigned) rxq->queue_id);
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ nb_hold++;
+ rxe = &sw_ring[rx_id];
+ rx_id++;
+ if (rx_id == rxq->nb_rx_desc)
+ rx_id = 0;
+
+ /* Prefetch next mbuf while processing current one. */
+ rte_txgbe_prefetch(sw_ring[rx_id].mbuf);
+
+ /*
+ * When next RX descriptor is on a cache-line boundary,
+ * prefetch the next 4 RX descriptors and the next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_txgbe_prefetch(&rx_ring[rx_id]);
+ rte_txgbe_prefetch(&sw_ring[rx_id]);
+ }
+
+ rxm = rxe->mbuf;
+ rxe->mbuf = nmb;
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+ TXGBE_RXD_HDRADDR(rxdp, 0);
+ TXGBE_RXD_PKTADDR(rxdp, dma_addr);
+
+ /*
+ * Initialize the returned mbuf.
+ * 1) setup generic mbuf fields:
+ * - number of segments,
+ * - next segment,
+ * - packet length,
+ * - RX port identifier.
+ * 2) integrate hardware offload data, if any:
+ * - RSS flag & hash,
+ * - IP checksum flag,
+ * - VLAN TCI, if any,
+ * - error flags.
+ */
+ pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.qw1.hi.len) -
+ rxq->crc_len);
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = pkt_len;
+ rxm->data_len = pkt_len;
+ rxm->port = rxq->port_id;
+
+ pkt_info = rte_le_to_cpu_32(rxd.qw0.dw0);
+ /* Only valid if PKT_RX_VLAN set in pkt_flags */
+ rxm->vlan_tci = rte_le_to_cpu_16(rxd.qw1.hi.tag);
+
+ pkt_flags = rx_desc_status_to_pkt_flags(staterr,
+ rxq->vlan_flags);
+ pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
+ pkt_flags |= txgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
+ rxm->ol_flags = pkt_flags;
+ rxm->packet_type = txgbe_rxd_pkt_info_to_pkt_type(pkt_info,
+ rxq->pkt_type_mask);
+
+ if (likely(pkt_flags & PKT_RX_RSS_HASH))
+ rxm->hash.rss = rte_le_to_cpu_32(rxd.qw0.dw1);
+ else if (pkt_flags & PKT_RX_FDIR) {
+ rxm->hash.fdir.hash = rte_le_to_cpu_16(
+ rxd.qw0.hi.csum) &
+ TXGBE_ATR_HASH_MASK;
+ rxm->hash.fdir.id = rte_le_to_cpu_16(
+ rxd.qw0.hi.ipid);
+ }
+ /*
+ * Store the mbuf address into the next entry of the array
+ * of returned packets.
+ */
+ rx_pkts[nb_rx++] = rxm;
+ }
+ rxq->rx_tail = rx_id;
+
+ /*
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register.
+ * Update the RDT with the value of the last processed RX descriptor
+ * minus 1, to guarantee that the RDT register is never equal to the
+ * RDH register, which creates a "full" ring situtation from the
+ * hardware point of view...
+ */
+ nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
+ if (nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "nb_hold=%u nb_rx=%u",
+ (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+ (unsigned) rx_id, (unsigned) nb_hold,
+ (unsigned) nb_rx);
+ rx_id = (uint16_t) ((rx_id == 0) ?
+ (rxq->nb_rx_desc - 1) : (rx_id - 1));
+ txgbe_set32(rxq->rdt_reg_addr, rx_id);
+ nb_hold = 0;
+ }
+ rxq->nb_rx_hold = nb_hold;
+ return nb_rx;
+}
+
+/**
+ * txgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
+ *
+ * Fill the following info in the HEAD buffer of the Rx cluster:
+ * - RX port identifier
+ * - hardware offload data, if any:
+ * - RSS flag & hash
+ * - IP checksum flag
+ * - VLAN TCI, if any
+ * - error flags
+ * @head HEAD of the packet cluster
+ * @desc HW descriptor to get data from
+ * @rxq Pointer to the Rx queue
+ */
+static inline void
+txgbe_fill_cluster_head_buf(
+ struct rte_mbuf *head,
+ struct txgbe_rx_desc *desc,
+ struct txgbe_rx_queue *rxq,
+ uint32_t staterr)
+{
+ uint32_t pkt_info;
+ uint64_t pkt_flags;
+
+ head->port = rxq->port_id;
+
+ /* The vlan_tci field is only valid when PKT_RX_VLAN is
+ * set in the pkt_flags field.
+ */
+ head->vlan_tci = rte_le_to_cpu_16(desc->qw1.hi.tag);
+ pkt_info = rte_le_to_cpu_32(desc->qw0.dw0);
+ pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
+ pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
+ pkt_flags |= txgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
+ head->ol_flags = pkt_flags;
+ head->packet_type = txgbe_rxd_pkt_info_to_pkt_type(pkt_info,
+ rxq->pkt_type_mask);
+
+ if (likely(pkt_flags & PKT_RX_RSS_HASH))
+ head->hash.rss = rte_le_to_cpu_32(desc->qw0.dw1);
+ else if (pkt_flags & PKT_RX_FDIR) {
+ head->hash.fdir.hash = rte_le_to_cpu_16(desc->qw0.hi.csum)
+ & TXGBE_ATR_HASH_MASK;
+ head->hash.fdir.id = rte_le_to_cpu_16(desc->qw0.hi.ipid);
+ }
+}
+
+/**
+ * txgbe_recv_pkts_lro - receive handler for and LRO case.
+ *
+ * @rx_queue Rx queue handle
+ * @rx_pkts table of received packets
+ * @nb_pkts size of rx_pkts table
+ * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
+ *
+ * Handles the Rx HW ring completions when RSC feature is configured. Uses an
+ * additional ring of txgbe_rsc_entry's that will hold the relevant RSC info.
+ *
+ * We use the same logic as in Linux and in FreeBSD txgbe drivers:
+ * 1) When non-EOP RSC completion arrives:
+ * a) Update the HEAD of the current RSC aggregation cluster with the new
+ * segment's data length.
+ * b) Set the "next" pointer of the current segment to point to the segment
+ * at the NEXTP index.
+ * c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
+ * in the sw_rsc_ring.
+ * 2) When EOP arrives we just update the cluster's total length and offload
+ * flags and deliver the cluster up to the upper layers. In our case - put it
+ * in the rx_pkts table.
+ *
+ * Returns the number of received packets/clusters (according to the "bulk
+ * receive" interface).
+ */
+static inline uint16_t
+txgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
+ bool bulk_alloc)
+{
+ struct txgbe_rx_queue *rxq = rx_queue;
+ volatile struct txgbe_rx_desc *rx_ring = rxq->rx_ring;
+ struct txgbe_rx_entry *sw_ring = rxq->sw_ring;
+ struct txgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
+ uint16_t rx_id = rxq->rx_tail;
+ uint16_t nb_rx = 0;
+ uint16_t nb_hold = rxq->nb_rx_hold;
+ uint16_t prev_id = rxq->rx_tail;
+
+ while (nb_rx < nb_pkts) {
+ bool eop;
+ struct txgbe_rx_entry *rxe;
+ struct txgbe_scattered_rx_entry *sc_entry;
+ struct txgbe_scattered_rx_entry *next_sc_entry = NULL;
+ struct txgbe_rx_entry *next_rxe = NULL;
+ struct rte_mbuf *first_seg;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb = NULL;
+ struct txgbe_rx_desc rxd;
+ uint16_t data_len;
+ uint16_t next_id;
+ volatile struct txgbe_rx_desc *rxdp;
+ uint32_t staterr;
+
+next_desc:
+ /*
+ * The code in this whole file uses the volatile pointer to
+ * ensure the read ordering of the status and the rest of the
+ * descriptor fields (on the compiler level only!!!). This is so
+ * UGLY - why not to just use the compiler barrier instead? DPDK
+ * even has the rte_compiler_barrier() for that.
+ *
+ * But most importantly this is just wrong because this doesn't
+ * ensure memory ordering in a general case at all. For
+ * instance, DPDK is supposed to work on Power CPUs where
+ * compiler barrier may just not be enough!
+ *
+ * I tried to write only this function properly to have a
+ * starting point (as a part of an LRO/RSC series) but the
+ * compiler cursed at me when I tried to cast away the
+ * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
+ * keeping it the way it is for now.
+ *
+ * The code in this file is broken in so many other places and
+ * will just not work on a big endian CPU anyway therefore the
+ * lines below will have to be revisited together with the rest
+ * of the txgbe PMD.
+ *
+ * TODO:
+ * - Get rid of "volatile" and let the compiler do its job.
+ * - Use the proper memory barrier (rte_rmb()) to ensure the
+ * memory ordering below.
+ */
+ rxdp = &rx_ring[rx_id];
+ staterr = rte_le_to_cpu_32(rxdp->qw1.lo.status);
+
+ if (!(staterr & TXGBE_RXD_STAT_DD))
+ break;
+
+ rxd = *rxdp;
+
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+ "staterr=0x%x data_len=%u",
+ rxq->port_id, rxq->queue_id, rx_id, staterr,
+ rte_le_to_cpu_16(rxd.qw1.hi.len));
+
+ if (!bulk_alloc) {
+ nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (nmb == NULL) {
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
+ "port_id=%u queue_id=%u",
+ rxq->port_id, rxq->queue_id);
+
+ rte_eth_devices[rxq->port_id].data->
+ rx_mbuf_alloc_failed++;
+ break;
+ }
+ } else if (nb_hold > rxq->rx_free_thresh) {
+ uint16_t next_rdt = rxq->rx_free_trigger;
+
+ if (!txgbe_rx_alloc_bufs(rxq, false)) {
+ rte_wmb();
+ txgbe_set32_relaxed(rxq->rdt_reg_addr,
+ next_rdt);
+ nb_hold -= rxq->rx_free_thresh;
+ } else {
+ PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
+ "port_id=%u queue_id=%u",
+ rxq->port_id, rxq->queue_id);
+
+ rte_eth_devices[rxq->port_id].data->
+ rx_mbuf_alloc_failed++;
+ break;
+ }
+ }
+
+ nb_hold++;
+ rxe = &sw_ring[rx_id];
+ eop = staterr & TXGBE_RXD_STAT_EOP;
+
+ next_id = rx_id + 1;
+ if (next_id == rxq->nb_rx_desc)
+ next_id = 0;
+
+ /* Prefetch next mbuf while processing current one. */
+ rte_txgbe_prefetch(sw_ring[next_id].mbuf);
+
+ /*
+ * When next RX descriptor is on a cache-line boundary,
+ * prefetch the next 4 RX descriptors and the next 4 pointers
+ * to mbufs.
+ */
+ if ((next_id & 0x3) == 0) {
+ rte_txgbe_prefetch(&rx_ring[next_id]);
+ rte_txgbe_prefetch(&sw_ring[next_id]);
+ }
+
+ rxm = rxe->mbuf;
+
+ if (!bulk_alloc) {
+ __le64 dma =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+ /*
+ * Update RX descriptor with the physical address of the
+ * new data buffer of the new allocated mbuf.
+ */
+ rxe->mbuf = nmb;
+
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ TXGBE_RXD_HDRADDR(rxdp, 0);
+ TXGBE_RXD_PKTADDR(rxdp, dma);
+ } else
+ rxe->mbuf = NULL;
+
+ /*
+ * Set data length & data buffer address of mbuf.
+ */
+ data_len = rte_le_to_cpu_16(rxd.qw1.hi.len);
+ rxm->data_len = data_len;
+
+ if (!eop) {
+ uint16_t nextp_id;
+ /*
+ * Get next descriptor index:
+ * - For RSC it's in the NEXTP field.
+ * - For a scattered packet - it's just a following
+ * descriptor.
+ */
+ if (TXGBE_RXD_RSCCNT(rxd.qw0.dw0))
+ nextp_id = TXGBE_RXD_NEXTP(staterr);
+ else
+ nextp_id = next_id;
+
+ next_sc_entry = &sw_sc_ring[nextp_id];
+ next_rxe = &sw_ring[nextp_id];
+ rte_txgbe_prefetch(next_rxe);
+ }
+
+ sc_entry = &sw_sc_ring[rx_id];
+ first_seg = sc_entry->fbuf;
+ sc_entry->fbuf = NULL;
+
+ /*
+ * If this is the first buffer of the received packet,
+ * set the pointer to the first mbuf of the packet and
+ * initialize its context.
+ * Otherwise, update the total length and the number of segments
+ * of the current scattered packet, and update the pointer to
+ * the last mbuf of the current packet.
+ */
+ if (first_seg == NULL) {
+ first_seg = rxm;
+ first_seg->pkt_len = data_len;
+ first_seg->nb_segs = 1;
+ } else {
+ first_seg->pkt_len += data_len;
+ first_seg->nb_segs++;
+ }
+
+ prev_id = rx_id;
+ rx_id = next_id;
+
+ /*
+ * If this is not the last buffer of the received packet, update
+ * the pointer to the first mbuf at the NEXTP entry in the
+ * sw_sc_ring and continue to parse the RX ring.
+ */
+ if (!eop && next_rxe) {
+ rxm->next = next_rxe->mbuf;
+ next_sc_entry->fbuf = first_seg;
+ goto next_desc;
+ }
+
+ /* Initialize the first mbuf of the returned packet */
+ txgbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
+
+ /*
+ * Deal with the case, when HW CRC srip is disabled.
+ * That can't happen when LRO is enabled, but still could
+ * happen for scattered RX mode.
+ */
+ first_seg->pkt_len -= rxq->crc_len;
+ if (unlikely(rxm->data_len <= rxq->crc_len)) {
+ struct rte_mbuf *lp;
+
+ for (lp = first_seg; lp->next != rxm; lp = lp->next)
+ ;
+
+ first_seg->nb_segs--;
+ lp->data_len -= rxq->crc_len - rxm->data_len;
+ lp->next = NULL;
+ rte_pktmbuf_free_seg(rxm);
+ } else
+ rxm->data_len -= rxq->crc_len;
+
+ /* Prefetch data of first segment, if configured to do so. */
+ rte_packet_prefetch((char *)first_seg->buf_addr +
+ first_seg->data_off);
+
+ /*
+ * Store the mbuf address into the next entry of the array
+ * of returned packets.
+ */
+ rx_pkts[nb_rx++] = first_seg;
+ }
+
+ /*
+ * Record index of the next RX descriptor to probe.
+ */
+ rxq->rx_tail = rx_id;
+
+ /*
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register.
+ * Update the RDT with the value of the last processed RX descriptor
+ * minus 1, to guarantee that the RDT register is never equal to the
+ * RDH register, which creates a "full" ring situtation from the
+ * hardware point of view...
+ */
+ if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "nb_hold=%u nb_rx=%u",
+ rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
+
+ rte_wmb();
+ txgbe_set32_relaxed(rxq->rdt_reg_addr, prev_id);
+ nb_hold = 0;
+ }
+
+ rxq->nb_rx_hold = nb_hold;
+ return nb_rx;
+}
+
+uint16_t
+txgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return txgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
+}
+
+uint16_t
+txgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return txgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
+}
+
+/*********************************************************************
+ *
+ * Queue management functions
+ *
+ **********************************************************************/
+
+static void __rte_cold
+txgbe_tx_queue_release_mbufs(struct txgbe_tx_queue *txq)
+{
+ unsigned i;
+
+ if (txq->sw_ring != NULL) {
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ if (txq->sw_ring[i].mbuf != NULL) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ }
+ }
+}
+
+static int
+txgbe_tx_done_cleanup_full(struct txgbe_tx_queue *txq, uint32_t free_cnt)
+{
+ struct txgbe_tx_entry *swr_ring = txq->sw_ring;
+ uint16_t i, tx_last, tx_id;
+ uint16_t nb_tx_free_last;
+ uint16_t nb_tx_to_clean;
+ uint32_t pkt_cnt;
+
+ /* Start free mbuf from the next of tx_tail */
+ tx_last = txq->tx_tail;
+ tx_id = swr_ring[tx_last].next_id;
+
+ if (txq->nb_tx_free == 0 && txgbe_xmit_cleanup(txq))
+ return 0;
+
+ nb_tx_to_clean = txq->nb_tx_free;
+ nb_tx_free_last = txq->nb_tx_free;
+ if (!free_cnt)
+ free_cnt = txq->nb_tx_desc;
+
+ /* Loop through swr_ring to count the amount of
+ * freeable mubfs and packets.
+ */
+ for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
+ for (i = 0; i < nb_tx_to_clean &&
+ pkt_cnt < free_cnt &&
+ tx_id != tx_last; i++) {
+ if (swr_ring[tx_id].mbuf != NULL) {
+ rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
+ swr_ring[tx_id].mbuf = NULL;
+
+ /*
+ * last segment in the packet,
+ * increment packet count
+ */
+ pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
+ }
+
+ tx_id = swr_ring[tx_id].next_id;
+ }
+
+ if (pkt_cnt < free_cnt) {
+ if (txgbe_xmit_cleanup(txq))
+ break;
+
+ nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
+ nb_tx_free_last = txq->nb_tx_free;
+ }
+ }
+
+ return (int)pkt_cnt;
+}
+
+static int
+txgbe_tx_done_cleanup_simple(struct txgbe_tx_queue *txq,
+ uint32_t free_cnt)
+{
+ int i, n, cnt;
+
+ if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
+ free_cnt = txq->nb_tx_desc;
+
+ cnt = free_cnt - free_cnt % txq->tx_free_thresh;
+
+ for (i = 0; i < cnt; i += n) {
+ if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_free_thresh)
+ break;
+
+ n = txgbe_tx_free_bufs(txq);
+
+ if (n == 0)
+ break;
+ }
+
+ return i;
+}
+
+int
+txgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)
+{
+ struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue;
+ if (txq->offloads == 0 &&
+#ifdef RTE_LIBRTE_SECURITY
+ !(txq->using_ipsec) &&
+#endif
+ txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST) {
+
+ return txgbe_tx_done_cleanup_simple(txq, free_cnt);
+ }
+
+ return txgbe_tx_done_cleanup_full(txq, free_cnt);
+}
+
+static void __rte_cold
+txgbe_tx_free_swring(struct txgbe_tx_queue *txq)
+{
+ if (txq != NULL &&
+ txq->sw_ring != NULL)
+ rte_free(txq->sw_ring);
+}
+
+static void __rte_cold
+txgbe_tx_queue_release(struct txgbe_tx_queue *txq)
+{
+ if (txq != NULL && txq->ops != NULL) {
+ txq->ops->release_mbufs(txq);
+ txq->ops->free_swring(txq);
+ rte_free(txq);
+ }
+}
+
+void __rte_cold
+txgbe_dev_tx_queue_release(void *txq)
+{
+ txgbe_tx_queue_release(txq);
+}
+
+/* (Re)set dynamic txgbe_tx_queue fields to defaults */
+static void __rte_cold
+txgbe_reset_tx_queue(struct txgbe_tx_queue *txq)
+{
+ static const struct txgbe_tx_desc zeroed_desc = {0};
+ struct txgbe_tx_entry *txe = txq->sw_ring;
+ uint16_t prev, i;
+
+ /* Zero out HW ring memory */
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ txq->tx_ring[i] = zeroed_desc;
+ }
+
+ /* Initialize SW ring entries */
+ prev = (uint16_t) (txq->nb_tx_desc - 1);
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ volatile struct txgbe_tx_desc *txd = &txq->tx_ring[i];
+
+ txd->dw3 = rte_cpu_to_le_32(TXGBE_TXD_DD);
+ txe[i].mbuf = NULL;
+ txe[i].last_id = i;
+ txe[prev].next_id = i;
+ prev = i;
+ }
+
+ txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
+ txq->tx_tail = 0;
+
+ /*
+ * Always allow 1 descriptor to be un-allocated to avoid
+ * a H/W race condition
+ */
+ txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
+ txq->ctx_curr = 0;
+ memset((void *)&txq->ctx_cache, 0,
+ TXGBE_CTX_NUM * sizeof(struct txgbe_ctx_info));
+}
+
+static const struct txgbe_txq_ops def_txq_ops = {
+ .release_mbufs = txgbe_tx_queue_release_mbufs,
+ .free_swring = txgbe_tx_free_swring,
+ .reset = txgbe_reset_tx_queue,
+};
+
+/* Takes an ethdev and a queue and sets up the tx function to be used based on
+ * the queue parameters. Used in tx_queue_setup by primary process and then
+ * in dev_init by secondary process when attaching to an existing ethdev.
+ */
+void __rte_cold
+txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq)
+{
+ /* Use a simple Tx queue (no offloads, no multi segs) if possible */
+ if ((txq->offloads == 0) &&
+#ifdef RTE_LIBRTE_SECURITY
+ !(txq->using_ipsec) &&
+#endif
+ (txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST)) {
+ PMD_INIT_LOG(DEBUG, "Using simple tx code path");
+ dev->tx_pkt_burst = txgbe_xmit_pkts_simple;
+ dev->tx_pkt_prepare = NULL;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
+ PMD_INIT_LOG(DEBUG,
+ " - offloads = 0x%" PRIx64,
+ txq->offloads);
+ PMD_INIT_LOG(DEBUG,
+ " - tx_free_thresh = %lu " "[RTE_PMD_TXGBE_TX_MAX_BURST=%lu]",
+ (unsigned long)txq->tx_free_thresh,
+ (unsigned long)RTE_PMD_TXGBE_TX_MAX_BURST);
+ dev->tx_pkt_burst = txgbe_xmit_pkts;
+ dev->tx_pkt_prepare = txgbe_prep_pkts;
+ }
+}
+
+uint64_t
+txgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+uint64_t
+txgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
+{
+ uint64_t tx_offload_capa;
+
+ tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_UDP_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO |
+ DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IPIP_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ if (!txgbe_is_vf(dev))
+ tx_offload_capa |= DEV_TX_OFFLOAD_QINQ_INSERT;
+
+ tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+
+ tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIBRTE_SECURITY
+ if (dev->security_ctx)
+ tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+#endif
+ return tx_offload_capa;
+}
+
+int __rte_cold
+txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ const struct rte_memzone *tz;
+ struct txgbe_tx_queue *txq;
+ struct txgbe_hw *hw;
+ uint16_t tx_free_thresh;
+ uint64_t offloads;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = TXGBE_DEV_HW(dev);
+
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
+ /*
+ * Validate number of transmit descriptors.
+ * It must not exceed hardware maximum, and must be multiple
+ * of TXGBE_ALIGN.
+ */
+ if (nb_desc % TXGBE_TXD_ALIGN != 0 ||
+ (nb_desc > TXGBE_RING_DESC_MAX) ||
+ (nb_desc < TXGBE_RING_DESC_MIN)) {
+ return -EINVAL;
+ }
+
+ /*
+ * The TX descriptor ring will be cleaned after txq->tx_free_thresh
+ * descriptors are used or if the number of descriptors required
+ * to transmit a packet is greater than the number of free TX
+ * descriptors.
+ * One descriptor in the TX ring is used as a sentinel to avoid a
+ * H/W race condition, hence the maximum threshold constraints.
+ * When set to zero use default values.
+ */
+ tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+ tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
+ if (tx_free_thresh >= (nb_desc - 3)) {
+ PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the number of "
+ "TX descriptors minus 3. (tx_free_thresh=%u "
+ "port=%d queue=%d)",
+ (unsigned int)tx_free_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
+ return -(EINVAL);
+ }
+
+ if ((nb_desc % tx_free_thresh) != 0) {
+ PMD_INIT_LOG(ERR, "tx_free_thresh must be a divisor of the "
+ "number of TX descriptors. (tx_free_thresh=%u "
+ "port=%d queue=%d)", (unsigned int)tx_free_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
+ return -(EINVAL);
+ }
+
+ /* Free memory prior to re-allocation if needed... */
+ if (dev->data->tx_queues[queue_idx] != NULL) {
+ txgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* First allocate the tx queue data structure */
+ txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct txgbe_tx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq == NULL)
+ return -ENOMEM;
+
+ /*
+ * Allocate TX ring hardware descriptors. A memzone large enough to
+ * handle the maximum ring size is allocated in order to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ sizeof(struct txgbe_tx_desc) * TXGBE_RING_DESC_MAX,
+ TXGBE_ALIGN, socket_id);
+ if (tz == NULL) {
+ txgbe_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+
+ txq->nb_tx_desc = nb_desc;
+ txq->tx_free_thresh = tx_free_thresh;
+ txq->pthresh = tx_conf->tx_thresh.pthresh;
+ txq->hthresh = tx_conf->tx_thresh.hthresh;
+ txq->wthresh = tx_conf->tx_thresh.wthresh;
+ txq->queue_id = queue_idx;
+ txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+ queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
+ txq->port_id = dev->data->port_id;
+ txq->offloads = offloads;
+ txq->ops = &def_txq_ops;
+ txq->tx_deferred_start = tx_conf->tx_deferred_start;
+#ifdef RTE_LIBRTE_SECURITY
+ txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
+ DEV_TX_OFFLOAD_SECURITY);
+#endif
+
+ /*
+ * Modification to set tail pointer for virtual function if vf is detected
+ */
+ if (hw->mac.type == txgbe_mac_raptor_vf) {
+ txq->tdt_reg_addr = TXGBE_REG_ADDR(hw, TXGBE_TXWP(queue_idx));
+ txq->tdc_reg_addr = TXGBE_REG_ADDR(hw, TXGBE_TXCFG(queue_idx));
+ } else {
+ txq->tdt_reg_addr = TXGBE_REG_ADDR(hw, TXGBE_TXWP(txq->reg_idx));
+ txq->tdc_reg_addr = TXGBE_REG_ADDR(hw, TXGBE_TXCFG(txq->reg_idx));
+ }
+
+ txq->tx_ring_phys_addr = TMZ_PADDR(tz);
+ txq->tx_ring = (struct txgbe_tx_desc *) TMZ_VADDR(tz);
+
+ /* Allocate software ring */
+ txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
+ sizeof(struct txgbe_tx_entry) * nb_desc,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq->sw_ring == NULL) {
+ txgbe_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+ PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
+ txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
+
+ /* set up scalar TX function as appropriate */
+ txgbe_set_tx_function(dev, txq);
+
+ txq->ops->reset(txq);
+
+ dev->data->tx_queues[queue_idx] = txq;
+
+ return 0;
+}
+
+/**
+ * txgbe_free_sc_cluster - free the not-yet-completed scattered cluster
+ *
+ * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
+ * in the sw_rsc_ring is not set to NULL but rather points to the next
+ * mbuf of this RSC aggregation (that has not been completed yet and still
+ * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
+ * will just free first "nb_segs" segments of the cluster explicitly by calling
+ * an rte_pktmbuf_free_seg().
+ *
+ * @m scattered cluster head
+ */
+static void __rte_cold
+txgbe_free_sc_cluster(struct rte_mbuf *m)
+{
+ uint16_t i, nb_segs = m->nb_segs;
+ struct rte_mbuf *next_seg;
+
+ for (i = 0; i < nb_segs; i++) {
+ next_seg = m->next;
+ rte_pktmbuf_free_seg(m);
+ m = next_seg;
+ }
+}
+
+static void __rte_cold
+txgbe_rx_queue_release_mbufs(struct txgbe_rx_queue *rxq)
+{
+ unsigned i;
+
+ if (rxq->sw_ring != NULL) {
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i].mbuf != NULL) {
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ rxq->sw_ring[i].mbuf = NULL;
+ }
+ }
+ if (rxq->rx_nb_avail) {
+ for (i = 0; i < rxq->rx_nb_avail; ++i) {
+ struct rte_mbuf *mb;
+
+ mb = rxq->rx_stage[rxq->rx_next_avail + i];
+ rte_pktmbuf_free_seg(mb);
+ }
+ rxq->rx_nb_avail = 0;
+ }
+ }
+
+ if (rxq->sw_sc_ring)
+ for (i = 0; i < rxq->nb_rx_desc; i++)
+ if (rxq->sw_sc_ring[i].fbuf) {
+ txgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
+ rxq->sw_sc_ring[i].fbuf = NULL;
+ }
+}
+
+static void __rte_cold
+txgbe_rx_queue_release(struct txgbe_rx_queue *rxq)
+{
+ if (rxq != NULL) {
+ txgbe_rx_queue_release_mbufs(rxq);
+ rte_free(rxq->sw_ring);
+ rte_free(rxq->sw_sc_ring);
+ rte_free(rxq);
+ }
+}
+
+void __rte_cold
+txgbe_dev_rx_queue_release(void *rxq)
+{
+ txgbe_rx_queue_release(rxq);
+}
+
+/*
+ * Check if Rx Burst Bulk Alloc function can be used.
+ * Return
+ * 0: the preconditions are satisfied and the bulk allocation function
+ * can be used.
+ * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
+ * function must be used.
+ */
+static inline int __rte_cold
+check_rx_burst_bulk_alloc_preconditions(struct txgbe_rx_queue *rxq)
+{
+ int ret = 0;
+
+ /*
+ * Make sure the following pre-conditions are satisfied:
+ * rxq->rx_free_thresh >= RTE_PMD_TXGBE_RX_MAX_BURST
+ * rxq->rx_free_thresh < rxq->nb_rx_desc
+ * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
+ * Scattered packets are not supported. This should be checked
+ * outside of this function.
+ */
+ if (!(rxq->rx_free_thresh >= RTE_PMD_TXGBE_RX_MAX_BURST)) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->rx_free_thresh=%d, "
+ "RTE_PMD_TXGBE_RX_MAX_BURST=%d",
+ rxq->rx_free_thresh, RTE_PMD_TXGBE_RX_MAX_BURST);
+ ret = -EINVAL;
+ } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->rx_free_thresh=%d, "
+ "rxq->nb_rx_desc=%d",
+ rxq->rx_free_thresh, rxq->nb_rx_desc);
+ ret = -EINVAL;
+ } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->nb_rx_desc=%d, "
+ "rxq->rx_free_thresh=%d",
+ rxq->nb_rx_desc, rxq->rx_free_thresh);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/* Reset dynamic txgbe_rx_queue fields back to defaults */
+static void __rte_cold
+txgbe_reset_rx_queue(struct txgbe_adapter *adapter, struct txgbe_rx_queue *rxq)
+{
+ static const struct txgbe_rx_desc zeroed_desc = {{{0}, {0} }, {{0}, {0} } };
+ unsigned i;
+ uint16_t len = rxq->nb_rx_desc;
+
+ /*
+ * By default, the Rx queue setup function allocates enough memory for
+ * TXGBE_RING_DESC_MAX. The Rx Burst bulk allocation function requires
+ * extra memory at the end of the descriptor ring to be zero'd out.
+ */
+ if (adapter->rx_bulk_alloc_allowed)
+ /* zero out extra memory */
+ len += RTE_PMD_TXGBE_RX_MAX_BURST;
+
+ /*
+ * Zero out HW ring memory. Zero out extra memory at the end of
+ * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
+ * reads extra memory as zeros.
+ */
+ for (i = 0; i < len; i++) {
+ rxq->rx_ring[i] = zeroed_desc;
+ }
+
+ /*
+ * initialize extra software ring entries. Space for these extra
+ * entries is always allocated
+ */
+ memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+ for (i = rxq->nb_rx_desc; i < len; ++i) {
+ rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
+ }
+
+ rxq->rx_nb_avail = 0;
+ rxq->rx_next_avail = 0;
+ rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+ rxq->rx_tail = 0;
+ rxq->nb_rx_hold = 0;
+ rxq->pkt_first_seg = NULL;
+ rxq->pkt_last_seg = NULL;
+
+}
+
+uint64_t
+txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
+{
+ uint64_t offloads = 0;
+
+ offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+
+ return offloads;
+}
+
+uint64_t
+txgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
+{
+ uint64_t offloads;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct rte_eth_dev_sriov *sriov = &RTE_ETH_DEV_SRIOV(dev);
+
+ offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_RSS_HASH |
+ DEV_RX_OFFLOAD_SCATTER;
+
+ if (!txgbe_is_vf(dev))
+ offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
+ DEV_RX_OFFLOAD_VLAN_EXTEND);
+
+ /*
+ * RSC is only supported by PF devices in a non-SR-IOV
+ * mode.
+ */
+ if ((hw->mac.type == txgbe_mac_raptor) &&
+ !sriov->active)
+ offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+
+ if (hw->mac.type == txgbe_mac_raptor)
+ offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+
+ offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIBRTE_SECURITY
+ if (dev->security_ctx)
+ offloads |= DEV_RX_OFFLOAD_SECURITY;
+#endif
+
+ return offloads;
+}
+
+int __rte_cold
+txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ const struct rte_memzone *rz;
+ struct txgbe_rx_queue *rxq;
+ struct txgbe_hw *hw;
+ uint16_t len;
+ struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+ uint64_t offloads;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = TXGBE_DEV_HW(dev);
+
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
+ /*
+ * Validate number of receive descriptors.
+ * It must not exceed hardware maximum, and must be multiple
+ * of TXGBE_ALIGN.
+ */
+ if (nb_desc % TXGBE_RXD_ALIGN != 0 ||
+ (nb_desc > TXGBE_RING_DESC_MAX) ||
+ (nb_desc < TXGBE_RING_DESC_MIN)) {
+ return -EINVAL;
+ }
+
+ /* Free memory prior to re-allocation if needed... */
+ if (dev->data->rx_queues[queue_idx] != NULL) {
+ txgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* First allocate the rx queue data structure */
+ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct txgbe_rx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL)
+ return -ENOMEM;
+ rxq->mb_pool = mp;
+ rxq->nb_rx_desc = nb_desc;
+ rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+ rxq->queue_id = queue_idx;
+ rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+ queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
+ rxq->port_id = dev->data->port_id;
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
+ rxq->drop_en = rx_conf->rx_drop_en;
+ rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+ rxq->offloads = offloads;
+
+ /*
+ * The packet type in RX descriptor is different for different NICs.
+ * So set different masks for different NICs.
+ */
+ rxq->pkt_type_mask = TXGBE_PTID_MASK;
+
+ /*
+ * Allocate RX ring hardware descriptors. A memzone large enough to
+ * handle the maximum ring size is allocated in order to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+ RX_RING_SZ, TXGBE_ALIGN, socket_id);
+ if (rz == NULL) {
+ txgbe_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+
+ /*
+ * Zero init all the descriptors in the ring.
+ */
+ memset(rz->addr, 0, RX_RING_SZ);
+
+ /*
+ * Modified to setup VFRDT for Virtual Function
+ */
+ if (hw->mac.type == txgbe_mac_raptor_vf) {
+ rxq->rdt_reg_addr =
+ TXGBE_REG_ADDR(hw, TXGBE_RXWP(queue_idx));
+ rxq->rdh_reg_addr =
+ TXGBE_REG_ADDR(hw, TXGBE_RXRP(queue_idx));
+ } else {
+ rxq->rdt_reg_addr =
+ TXGBE_REG_ADDR(hw, TXGBE_RXWP(rxq->reg_idx));
+ rxq->rdh_reg_addr =
+ TXGBE_REG_ADDR(hw, TXGBE_RXRP(rxq->reg_idx));
+ }
+
+ rxq->rx_ring_phys_addr = TMZ_PADDR(rz);
+ rxq->rx_ring = (struct txgbe_rx_desc *)TMZ_VADDR(rz);
+
+ /*
+ * Certain constraints must be met in order to use the bulk buffer
+ * allocation Rx burst function. If any of Rx queues doesn't meet them
+ * the feature should be disabled for the whole port.
+ */
+ if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
+ PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
+ "preconditions - canceling the feature for "
+ "the whole port[%d]",
+ rxq->queue_id, rxq->port_id);
+ adapter->rx_bulk_alloc_allowed = false;
+ }
+
+ /*
+ * Allocate software ring. Allow for space at the end of the
+ * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
+ * function does not access an invalid memory region.
+ */
+ len = nb_desc;
+ if (adapter->rx_bulk_alloc_allowed)
+ len += RTE_PMD_TXGBE_RX_MAX_BURST;
+
+ rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
+ sizeof(struct txgbe_rx_entry) * len,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!rxq->sw_ring) {
+ txgbe_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+
+ /*
+ * Always allocate even if it's not going to be needed in order to
+ * simplify the code.
+ *
+ * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
+ * be requested in txgbe_dev_rx_init(), which is called later from
+ * dev_start() flow.
+ */
+ rxq->sw_sc_ring =
+ rte_zmalloc_socket("rxq->sw_sc_ring",
+ sizeof(struct txgbe_scattered_rx_entry) * len,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!rxq->sw_sc_ring) {
+ txgbe_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+
+ PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
+ "dma_addr=0x%"PRIx64,
+ rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
+ rxq->rx_ring_phys_addr);
+
+ dev->data->rx_queues[queue_idx] = rxq;
+
+ txgbe_reset_rx_queue(adapter, rxq);
+
+ return 0;
+}
+
+uint32_t
+txgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+#define TXGBE_RXQ_SCAN_INTERVAL 4
+ volatile struct txgbe_rx_desc *rxdp;
+ struct txgbe_rx_queue *rxq;
+ uint32_t desc = 0;
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ rxdp = &(rxq->rx_ring[rxq->rx_tail]);
+
+ while ((desc < rxq->nb_rx_desc) &&
+ (rxdp->qw1.lo.status &
+ rte_cpu_to_le_32(TXGBE_RXD_STAT_DD))) {
+ desc += TXGBE_RXQ_SCAN_INTERVAL;
+ rxdp += TXGBE_RXQ_SCAN_INTERVAL;
+ if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
+ rxdp = &(rxq->rx_ring[rxq->rx_tail +
+ desc - rxq->nb_rx_desc]);
+ }
+
+ return desc;
+}
+
+int
+txgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
+{
+ volatile struct txgbe_rx_desc *rxdp;
+ struct txgbe_rx_queue *rxq = rx_queue;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return 0;
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ rxdp = &rxq->rx_ring[desc];
+ return !!(rxdp->qw1.lo.status &
+ rte_cpu_to_le_32(TXGBE_RXD_STAT_DD));
+}
+
+int
+txgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct txgbe_rx_queue *rxq = rx_queue;
+ volatile uint32_t *status;
+ uint32_t nb_hold, desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+ nb_hold = rxq->nb_rx_hold;
+ if (offset >= rxq->nb_rx_desc - nb_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].qw1.lo.status;
+ if (*status & rte_cpu_to_le_32(TXGBE_RXD_STAT_DD))
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+txgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct txgbe_tx_queue *txq = tx_queue;
+ volatile uint32_t *status;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ if (desc >= txq->nb_tx_desc) {
+ desc -= txq->nb_tx_desc;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+ }
+
+ status = &txq->tx_ring[desc].dw3;
+ if (*status & rte_cpu_to_le_32(TXGBE_TXD_DD))
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
+void __rte_cold
+txgbe_dev_clear_queues(struct rte_eth_dev *dev)
+{
+ unsigned i;
+ struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct txgbe_tx_queue *txq = dev->data->tx_queues[i];
+
+ if (txq != NULL) {
+ txq->ops->release_mbufs(txq);
+ txq->ops->reset(txq);
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (rxq != NULL) {
+ txgbe_rx_queue_release_mbufs(rxq);
+ txgbe_reset_rx_queue(adapter, rxq);
+ }
+ }
+}
+
+void
+txgbe_dev_free_queues(struct rte_eth_dev *dev)
+{
+ unsigned i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ txgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
+}
+
+/*********************************************************************
+ *
+ * Device RX/TX init functions
+ *
+ **********************************************************************/
+
+/**
+ * Receive Side Scaling (RSS)
+ *
+ * Principles:
+ * The source and destination IP addresses of the IP header and the source
+ * and destination ports of TCP/UDP headers, if any, of received packets are
+ * hashed against a configurable random key to compute a 32-bit RSS hash result.
+ * The seven (7) LSBs of the 32-bit hash result are used as an index into a
+ * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
+ * RSS output index which is used as the RX queue index where to store the
+ * received packets.
+ * The following output is supplied in the RX write-back descriptor:
+ * - 32-bit result of the Microsoft RSS hash function,
+ * - 4-bit RSS type field.
+ */
+
+/*
+ * Used as the default key.
+ */
+static uint8_t rss_intel_key[40] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
+};
+
+static void
+txgbe_rss_disable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw;
+
+ hw = TXGBE_DEV_HW(dev);
+ if (hw->mac.type == txgbe_mac_raptor_vf) {
+ wr32m(hw, TXGBE_VFPLCFG, TXGBE_VFPLCFG_RSSENA, 0);
+ } else {
+ wr32m(hw, TXGBE_RACTL, TXGBE_RACTL_RSSENA, 0);
+ }
+}
+
+int
+txgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint8_t *hash_key;
+ uint32_t mrqc;
+ uint32_t rss_key;
+ uint64_t rss_hf;
+ uint16_t i;
+
+ if (!txgbe_rss_update_sp(hw->mac.type)) {
+ PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
+ "NIC.");
+ return -ENOTSUP;
+ }
+
+ hash_key = rss_conf->rss_key;
+ if (hash_key) {
+ /* Fill in RSS hash key */
+ for (i = 0; i < 10; i++) {
+ rss_key = LS32(hash_key[(i * 4) + 0], 0, 0xFF);
+ rss_key |= LS32(hash_key[(i * 4) + 1], 8, 0xFF);
+ rss_key |= LS32(hash_key[(i * 4) + 2], 16, 0xFF);
+ rss_key |= LS32(hash_key[(i * 4) + 3], 24, 0xFF);
+ wr32at(hw, TXGBE_REG_RSSKEY, i, rss_key);
+ }
+ }
+
+ /* Set configured hashing protocols */
+ rss_hf = rss_conf->rss_hf & TXGBE_RSS_OFFLOAD_ALL;
+ if (hw->mac.type == txgbe_mac_raptor_vf) {
+ mrqc = rd32(hw, TXGBE_VFPLCFG);
+ mrqc &= ~TXGBE_VFPLCFG_RSSMASK;
+ if (rss_hf & ETH_RSS_IPV4)
+ mrqc |= TXGBE_VFPLCFG_RSSIPV4;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ mrqc |= TXGBE_VFPLCFG_RSSIPV4TCP;
+ if (rss_hf & ETH_RSS_IPV6 ||
+ rss_hf & ETH_RSS_IPV6_EX)
+ mrqc |= TXGBE_VFPLCFG_RSSIPV6;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP ||
+ rss_hf & ETH_RSS_IPV6_TCP_EX)
+ mrqc |= TXGBE_VFPLCFG_RSSIPV6TCP;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ mrqc |= TXGBE_VFPLCFG_RSSIPV4UDP;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP ||
+ rss_hf & ETH_RSS_IPV6_UDP_EX)
+ mrqc |= TXGBE_VFPLCFG_RSSIPV6UDP;
+
+ if (rss_hf) {
+ mrqc |= TXGBE_VFPLCFG_RSSENA;
+ } else {
+ mrqc &= ~TXGBE_VFPLCFG_RSSENA;
+ }
+
+ if (dev->data->nb_rx_queues > 3) {
+ mrqc |= TXGBE_VFPLCFG_RSSHASH(2);
+ } else if (dev->data->nb_rx_queues > 1) {
+ mrqc |= TXGBE_VFPLCFG_RSSHASH(1);
+ }
+ wr32(hw, TXGBE_VFPLCFG, mrqc);
+ } else {
+ mrqc = rd32(hw, TXGBE_RACTL);
+ mrqc &= ~TXGBE_RACTL_RSSMASK;
+ if (rss_hf & ETH_RSS_IPV4)
+ mrqc |= TXGBE_RACTL_RSSIPV4;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ mrqc |= TXGBE_RACTL_RSSIPV4TCP;
+ if (rss_hf & ETH_RSS_IPV6 ||
+ rss_hf & ETH_RSS_IPV6_EX)
+ mrqc |= TXGBE_RACTL_RSSIPV6;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP ||
+ rss_hf & ETH_RSS_IPV6_TCP_EX)
+ mrqc |= TXGBE_RACTL_RSSIPV6TCP;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ mrqc |= TXGBE_RACTL_RSSIPV4UDP;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP ||
+ rss_hf & ETH_RSS_IPV6_UDP_EX)
+ mrqc |= TXGBE_RACTL_RSSIPV6UDP;
+
+ if (rss_hf) {
+ mrqc |= TXGBE_RACTL_RSSENA;
+ } else {
+ mrqc &= ~TXGBE_RACTL_RSSENA;
+ }
+ wr32(hw, TXGBE_RACTL, mrqc);
+ }
+
+ return 0;
+}
+
+int
+txgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint8_t *hash_key;
+ uint32_t mrqc;
+ uint32_t rss_key;
+ uint64_t rss_hf;
+ uint16_t i;
+
+ hash_key = rss_conf->rss_key;
+ if (hash_key) {
+ /* Return RSS hash key */
+ for (i = 0; i < 10; i++) {
+ rss_key = rd32at(hw, TXGBE_REG_RSSKEY, i);
+ hash_key[(i * 4) + 0] = RS32(rss_key, 0, 0xFF);
+ hash_key[(i * 4) + 1] = RS32(rss_key, 8, 0xFF);
+ hash_key[(i * 4) + 2] = RS32(rss_key, 16, 0xFF);
+ hash_key[(i * 4) + 3] = RS32(rss_key, 24, 0xFF);
+ }
+ }
+
+ rss_hf = 0;
+ if (hw->mac.type == txgbe_mac_raptor_vf) {
+ mrqc = rd32(hw, TXGBE_VFPLCFG);
+ if (mrqc & TXGBE_VFPLCFG_RSSIPV4)
+ rss_hf |= ETH_RSS_IPV4;
+ if (mrqc & TXGBE_VFPLCFG_RSSIPV4TCP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ if (mrqc & TXGBE_VFPLCFG_RSSIPV6)
+ rss_hf |= ETH_RSS_IPV6 |
+ ETH_RSS_IPV6_EX;
+ if (mrqc & TXGBE_VFPLCFG_RSSIPV6TCP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP |
+ ETH_RSS_IPV6_TCP_EX;
+ if (mrqc & TXGBE_VFPLCFG_RSSIPV4UDP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ if (mrqc & TXGBE_VFPLCFG_RSSIPV6UDP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP |
+ ETH_RSS_IPV6_UDP_EX;
+ if (!(mrqc & TXGBE_VFPLCFG_RSSENA)) {
+ rss_hf = 0;
+ }
+ } else {
+ mrqc = rd32(hw, TXGBE_RACTL);
+ if (mrqc & TXGBE_RACTL_RSSIPV4)
+ rss_hf |= ETH_RSS_IPV4;
+ if (mrqc & TXGBE_RACTL_RSSIPV4TCP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ if (mrqc & TXGBE_RACTL_RSSIPV6)
+ rss_hf |= ETH_RSS_IPV6 |
+ ETH_RSS_IPV6_EX;
+ if (mrqc & TXGBE_RACTL_RSSIPV6TCP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP |
+ ETH_RSS_IPV6_TCP_EX;
+ if (mrqc & TXGBE_RACTL_RSSIPV4UDP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ if (mrqc & TXGBE_RACTL_RSSIPV6UDP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP |
+ ETH_RSS_IPV6_UDP_EX;
+ if (!(mrqc & TXGBE_RACTL_RSSENA)) {
+ rss_hf = 0;
+ }
+ }
+
+ rss_hf &= TXGBE_RSS_OFFLOAD_ALL;
+
+ rss_conf->rss_hf = rss_hf;
+ return 0;
+}
+
+static void
+txgbe_rss_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_rss_conf rss_conf;
+ struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t reta;
+ uint16_t i;
+ uint16_t j;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /*
+ * Fill in redirection table
+ * The byte-swap is needed because NIC registers are in
+ * little-endian order.
+ */
+ if (adapter->rss_reta_updated == 0) {
+ reta = 0;
+ for (i = 0, j = 0; i < ETH_RSS_RETA_SIZE_128; i++, j++) {
+ if (j == dev->data->nb_rx_queues)
+ j = 0;
+ reta = (reta >> 8) | LS32(j, 24, 0xFF);
+ if ((i & 3) == 3)
+ wr32at(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
+ }
+ }
+ /*
+ * Configure the RSS key and the RSS protocols used to compute
+ * the RSS hash of input packets.
+ */
+ rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
+ if (rss_conf.rss_key == NULL)
+ rss_conf.rss_key = rss_intel_key; /* Default hash key */
+ txgbe_dev_rss_hash_update(dev, &rss_conf);
+}
+
+#define NUM_VFTA_REGISTERS 128
+#define NIC_RX_BUFFER_SIZE 0x200
+
+static void
+txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_vmdq_dcb_conf *cfg;
+ struct txgbe_hw *hw;
+ enum rte_eth_nb_pools num_pools;
+ uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
+ uint16_t pbsize;
+ uint8_t nb_tcs; /* number of traffic classes */
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = TXGBE_DEV_HW(dev);
+ cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+ num_pools = cfg->nb_queue_pools;
+ /* Check we have a valid number of pools */
+ if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
+ txgbe_rss_disable(dev);
+ return;
+ }
+ /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
+ nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
+
+ /*
+ * split rx buffer up into sections, each for 1 traffic class
+ */
+ pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
+ for (i = 0; i < nb_tcs; i++) {
+ uint32_t rxpbsize = rd32(hw, TXGBE_PBRXSIZE(i));
+
+ rxpbsize &= (~(0x3FF << 10));
+ /* clear 10 bits. */
+ rxpbsize |= (pbsize << 10); /* set value */
+ wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
+ }
+ /* zero alloc all unused TCs */
+ for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ uint32_t rxpbsize = rd32(hw, TXGBE_PBRXSIZE(i));
+
+ rxpbsize &= (~(0x3FF << 10));
+ /* clear 10 bits. */
+ wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
+ }
+
+ if (num_pools == ETH_16_POOLS) {
+ mrqc = TXGBE_PORTCTL_NUMTC_8;
+ mrqc |= TXGBE_PORTCTL_NUMVT_16;
+ } else {
+ mrqc = TXGBE_PORTCTL_NUMTC_4;
+ mrqc |= TXGBE_PORTCTL_NUMVT_32;
+ }
+ wr32m(hw, TXGBE_PORTCTL,
+ TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK, mrqc);
+
+ vt_ctl = TXGBE_POOLCTL_RPLEN;
+ if (cfg->enable_default_pool) {
+ vt_ctl |= TXGBE_POOLCTL_DEFPL(cfg->default_pool);
+ } else {
+ vt_ctl |= TXGBE_POOLCTL_DEFDSA;
+ }
+
+ wr32(hw, TXGBE_POOLCTL, vt_ctl);
+
+ queue_mapping = 0;
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ /*
+ * mapping is done with 3 bits per priority,
+ * so shift by i*3 each time
+ */
+ queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
+
+ wr32(hw, TXGBE_RPUP2TC, queue_mapping);
+
+ wr32(hw, TXGBE_ARBRXCTL, TXGBE_ARBRXCTL_RRM);
+
+ /* enable vlan filtering and allow all vlan tags through */
+ vlanctrl = rd32(hw, TXGBE_VLANCTL);
+ vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
+ wr32(hw, TXGBE_VLANCTL, vlanctrl);
+
+ /* enable all vlan filters */
+ for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
+ wr32(hw, TXGBE_VLANTBL(i), 0xFFFFFFFF);
+ }
+
+ wr32(hw, TXGBE_POOLRXENA(0),
+ num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+
+ wr32(hw, TXGBE_ETHADDRIDX, 0);
+ wr32(hw, TXGBE_ETHADDRASSL, 0xFFFFFFFF);
+ wr32(hw, TXGBE_ETHADDRASSH, 0xFFFFFFFF);
+
+ /* set up filters for vlan tags as configured */
+ for (i = 0; i < cfg->nb_pool_maps; i++) {
+ /* set vlan id in VF register and set the valid bit */
+ wr32(hw, TXGBE_PSRVLANIDX, i);
+ wr32(hw, TXGBE_PSRVLAN, (TXGBE_PSRVLAN_EA |
+ (cfg->pool_map[i].vlan_id & 0xFFF)));
+
+ wr32(hw, TXGBE_PSRVLANPLM(0), cfg->pool_map[i].pools);
+ }
+}
+
+/**
+ * txgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
+ * @dev: pointer to eth_dev structure
+ * @dcb_config: pointer to txgbe_dcb_config structure
+ */
+static void
+txgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ uint32_t reg;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Disable the Tx desc arbiter */
+ reg = rd32(hw, TXGBE_ARBTXCTL);
+ reg |= TXGBE_ARBTXCTL_DIA;
+ wr32(hw, TXGBE_ARBTXCTL, reg);
+
+ /* Enable DCB for Tx with 8 TCs */
+ reg = rd32(hw, TXGBE_PORTCTL);
+ reg &= TXGBE_PORTCTL_NUMTC_MASK;
+ reg |= TXGBE_PORTCTL_DCB;
+ if (dcb_config->num_tcs.pg_tcs == 8) {
+ reg |= TXGBE_PORTCTL_NUMTC_8;
+ } else {
+ reg |= TXGBE_PORTCTL_NUMTC_4;
+ }
+ wr32(hw, TXGBE_PORTCTL, reg);
+
+ /* Enable the Tx desc arbiter */
+ reg = rd32(hw, TXGBE_ARBTXCTL);
+ reg &= ~TXGBE_ARBTXCTL_DIA;
+ wr32(hw, TXGBE_ARBTXCTL, reg);
+}
+
+/**
+ * txgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
+ * @dev: pointer to rte_eth_dev structure
+ * @dcb_config: pointer to txgbe_dcb_config structure
+ */
+static void
+txgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
+ &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ /*PF VF Transmit Enable*/
+ wr32(hw, TXGBE_POOLTXENA(0),
+ vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+
+ /*Configure general DCB TX parameters*/
+ txgbe_dcb_tx_hw_config(dev, dcb_config);
+}
+
+static void
+txgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+ struct txgbe_dcb_tc_config *tc;
+ uint8_t i, j;
+
+ /* convert rte_eth_conf.rx_adv_conf to struct txgbe_dcb_config */
+ if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
+ dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
+ dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+ } else {
+ dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
+ dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+ }
+
+ /* Initialize User Priority to Traffic Class mapping */
+ for (j = 0; j < TXGBE_DCB_TC_MAX; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
+ }
+
+ /* User Priority to Traffic Class mapping */
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = vmdq_rx_conf->dcb_tc[i];
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
+ (uint8_t)(1 << i);
+ }
+}
+
+static void
+txgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
+ &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
+ struct txgbe_dcb_tc_config *tc;
+ uint8_t i, j;
+
+ /* convert rte_eth_conf.rx_adv_conf to struct txgbe_dcb_config */
+ if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
+ dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
+ dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
+ } else {
+ dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
+ dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
+ }
+
+ /* Initialize User Priority to Traffic Class mapping */
+ for (j = 0; j < TXGBE_DCB_TC_MAX; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
+ }
+
+ /* User Priority to Traffic Class mapping */
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = vmdq_tx_conf->dcb_tc[i];
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
+ (uint8_t)(1 << i);
+ }
+}
+
+static void
+txgbe_dcb_rx_config(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_dcb_rx_conf *rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+ struct txgbe_dcb_tc_config *tc;
+ uint8_t i, j;
+
+ dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
+ dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
+
+ /* Initialize User Priority to Traffic Class mapping */
+ for (j = 0; j < TXGBE_DCB_TC_MAX; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
+ }
+
+ /* User Priority to Traffic Class mapping */
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = rx_conf->dcb_tc[i];
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
+ (uint8_t)(1 << i);
+ }
+}
+
+static void
+txgbe_dcb_tx_config(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ struct rte_eth_dcb_tx_conf *tx_conf =
+ &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
+ struct txgbe_dcb_tc_config *tc;
+ uint8_t i, j;
+
+ dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
+ dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
+
+ /* Initialize User Priority to Traffic Class mapping */
+ for (j = 0; j < TXGBE_DCB_TC_MAX; j++) {
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
+ }
+
+ /* User Priority to Traffic Class mapping */
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ j = tx_conf->dcb_tc[i];
+ tc = &dcb_config->tc_config[j];
+ tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
+ (uint8_t)(1 << i);
+ }
+}
+
+/**
+ * txgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
+ * @dev: pointer to eth_dev structure
+ * @dcb_config: pointer to txgbe_dcb_config structure
+ */
+static void
+txgbe_dcb_rx_hw_config(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ uint32_t reg;
+ uint32_t vlanctrl;
+ uint8_t i;
+ uint32_t q;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ /*
+ * Disable the arbiter before changing parameters
+ * (always enable recycle mode; WSP)
+ */
+ reg = TXGBE_ARBRXCTL_RRM | TXGBE_ARBRXCTL_WSP | TXGBE_ARBRXCTL_DIA;
+ wr32(hw, TXGBE_ARBRXCTL, reg);
+
+ reg = rd32(hw, TXGBE_PORTCTL);
+ reg &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
+ if (dcb_config->num_tcs.pg_tcs == 4) {
+ reg |= TXGBE_PORTCTL_NUMTC_4;
+ if (dcb_config->vt_mode) {
+ reg |= TXGBE_PORTCTL_NUMVT_32;
+ } else {
+ wr32(hw, TXGBE_POOLCTL, 0);
+ }
+ }
+
+ if (dcb_config->num_tcs.pg_tcs == 8) {
+ reg |= TXGBE_PORTCTL_NUMTC_8;
+ if (dcb_config->vt_mode)
+ reg |= TXGBE_PORTCTL_NUMVT_16;
+ else {
+ wr32(hw, TXGBE_POOLCTL, 0);
+ }
+ }
+
+ wr32(hw, TXGBE_PORTCTL, reg);
+
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /* Disable drop for all queues in VMDQ mode*/
+ for (q = 0; q < TXGBE_MAX_RX_QUEUE_NUM; q++) {
+ u32 val = 1 << (q % 32);
+ wr32m(hw, TXGBE_QPRXDROP(q / 32), val, val);
+ }
+ } else {
+ /* Enable drop for all queues in SRIOV mode */
+ for (q = 0; q < TXGBE_MAX_RX_QUEUE_NUM; q++) {
+ u32 val = 1 << (q % 32);
+ wr32m(hw, TXGBE_QPRXDROP(q / 32), val, val);
+ }
+ }
+
+ /* VLNCTL: enable vlan filtering and allow all vlan tags through */
+ vlanctrl = rd32(hw, TXGBE_VLANCTL);
+ vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
+ wr32(hw, TXGBE_VLANCTL, vlanctrl);
+
+ /* VLANTBL - enable all vlan filters */
+ for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
+ wr32(hw, TXGBE_VLANTBL(i), 0xFFFFFFFF);
+ }
+
+ /*
+ * Configure Rx packet plane (recycle mode; WSP) and
+ * enable arbiter
+ */
+ reg = TXGBE_ARBRXCTL_RRM | TXGBE_ARBRXCTL_WSP;
+ wr32(hw, TXGBE_ARBRXCTL, reg);
+}
+
+static void
+txgbe_dcb_hw_arbite_rx_config(struct txgbe_hw *hw, uint16_t *refill,
+ uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
+{
+ txgbe_dcb_config_rx_arbiter_raptor(hw, refill, max, bwg_id,
+ tsa, map);
+}
+
+static void
+txgbe_dcb_hw_arbite_tx_config(struct txgbe_hw *hw, uint16_t *refill, uint16_t *max,
+ uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
+{
+ switch (hw->mac.type) {
+ case txgbe_mac_raptor:
+ txgbe_dcb_config_tx_desc_arbiter_raptor(hw, refill, max, bwg_id, tsa);
+ txgbe_dcb_config_tx_data_arbiter_raptor(hw, refill, max, bwg_id, tsa, map);
+ break;
+ default:
+ break;
+ }
+}
+
+#define DCB_RX_CONFIG 1
+#define DCB_TX_CONFIG 1
+#define DCB_TX_PB 1024
+/**
+ * txgbe_dcb_hw_configure - Enable DCB and configure
+ * general DCB in VT mode and non-VT mode parameters
+ * @dev: pointer to rte_eth_dev structure
+ * @dcb_config: pointer to txgbe_dcb_config structure
+ */
+static int
+txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
+ struct txgbe_dcb_config *dcb_config)
+{
+ int ret = 0;
+ uint8_t i, pfc_en, nb_tcs;
+ uint16_t pbsize, rx_buffer_size;
+ uint8_t config_dcb_rx = 0;
+ uint8_t config_dcb_tx = 0;
+ uint8_t tsa[TXGBE_DCB_TC_MAX] = {0};
+ uint8_t bwgid[TXGBE_DCB_TC_MAX] = {0};
+ uint16_t refill[TXGBE_DCB_TC_MAX] = {0};
+ uint16_t max[TXGBE_DCB_TC_MAX] = {0};
+ uint8_t map[TXGBE_DCB_TC_MAX] = {0};
+ struct txgbe_dcb_tc_config *tc;
+ uint32_t max_frame = dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(dev);
+
+ switch (dev->data->dev_conf.rxmode.mq_mode) {
+ case ETH_MQ_RX_VMDQ_DCB:
+ dcb_config->vt_mode = true;
+ config_dcb_rx = DCB_RX_CONFIG;
+ /*
+ * get dcb and VT rx configuration parameters
+ * from rte_eth_conf
+ */
+ txgbe_vmdq_dcb_rx_config(dev, dcb_config);
+ /*Configure general VMDQ and DCB RX parameters*/
+ txgbe_vmdq_dcb_configure(dev);
+ break;
+ case ETH_MQ_RX_DCB:
+ case ETH_MQ_RX_DCB_RSS:
+ dcb_config->vt_mode = false;
+ config_dcb_rx = DCB_RX_CONFIG;
+ /* Get dcb TX configuration parameters from rte_eth_conf */
+ txgbe_dcb_rx_config(dev, dcb_config);
+ /*Configure general DCB RX parameters*/
+ txgbe_dcb_rx_hw_config(dev, dcb_config);
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
+ break;
+ }
+ switch (dev->data->dev_conf.txmode.mq_mode) {
+ case ETH_MQ_TX_VMDQ_DCB:
+ dcb_config->vt_mode = true;
+ config_dcb_tx = DCB_TX_CONFIG;
+ /* get DCB and VT TX configuration parameters
+ * from rte_eth_conf
+ */
+ txgbe_dcb_vt_tx_config(dev, dcb_config);
+ /* Configure general VMDQ and DCB TX parameters */
+ txgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
+ break;
+
+ case ETH_MQ_TX_DCB:
+ dcb_config->vt_mode = false;
+ config_dcb_tx = DCB_TX_CONFIG;
+ /* get DCB TX configuration parameters from rte_eth_conf */
+ txgbe_dcb_tx_config(dev, dcb_config);
+ /* Configure general DCB TX parameters */
+ txgbe_dcb_tx_hw_config(dev, dcb_config);
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
+ break;
+ }
+
+ nb_tcs = dcb_config->num_tcs.pfc_tcs;
+ /* Unpack map */
+ txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
+ if (nb_tcs == ETH_4_TCS) {
+ /* Avoid un-configured priority mapping to TC0 */
+ uint8_t j = 4;
+ uint8_t mask = 0xFF;
+
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
+ mask = (uint8_t)(mask & (~(1 << map[i])));
+ for (i = 0; mask && (i < TXGBE_DCB_TC_MAX); i++) {
+ if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
+ map[j++] = i;
+ mask >>= 1;
+ }
+ /* Re-configure 4 TCs BW */
+ for (i = 0; i < nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ if (bw_conf->tc_num != nb_tcs)
+ tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs);
+ tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs);
+ }
+ for (; i < TXGBE_DCB_TC_MAX; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = 0;
+ tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = 0;
+ }
+ } else {
+ /* Re-configure 8 TCs BW */
+ for (i = 0; i < nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ if (bw_conf->tc_num != nb_tcs)
+ tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs + (i & 1));
+ tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs + (i & 1));
+ }
+ }
+
+ rx_buffer_size = NIC_RX_BUFFER_SIZE;
+
+ if (config_dcb_rx) {
+ /* Set RX buffer size */
+ pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
+ uint32_t rxpbsize = pbsize << 10;
+
+ for (i = 0; i < nb_tcs; i++) {
+ wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
+ }
+ /* zero alloc all unused TCs */
+ for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ wr32(hw, TXGBE_PBRXSIZE(i), 0);
+ }
+ }
+ if (config_dcb_tx) {
+ /* Only support an equally distributed
+ * Tx packet buffer strategy.
+ */
+ uint32_t txpktsize = TXGBE_PBTXSIZE_MAX / nb_tcs;
+ uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - TXGBE_TXPKT_SIZE_MAX;
+
+ for (i = 0; i < nb_tcs; i++) {
+ wr32(hw, TXGBE_PBTXSIZE(i), txpktsize);
+ wr32(hw, TXGBE_PBTXDMATH(i), txpbthresh);
+ }
+ /* Clear unused TCs, if any, to zero buffer size*/
+ for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ wr32(hw, TXGBE_PBTXSIZE(i), 0);
+ wr32(hw, TXGBE_PBTXDMATH(i), 0);
+ }
+ }
+
+ /*Calculates traffic class credits*/
+ txgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
+ TXGBE_DCB_TX_CONFIG);
+ txgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
+ TXGBE_DCB_RX_CONFIG);
+
+ if (config_dcb_rx) {
+ /* Unpack CEE standard containers */
+ txgbe_dcb_unpack_refill_cee(dcb_config, TXGBE_DCB_RX_CONFIG, refill);
+ txgbe_dcb_unpack_max_cee(dcb_config, max);
+ txgbe_dcb_unpack_bwgid_cee(dcb_config, TXGBE_DCB_RX_CONFIG, bwgid);
+ txgbe_dcb_unpack_tsa_cee(dcb_config, TXGBE_DCB_RX_CONFIG, tsa);
+ /* Configure PG(ETS) RX */
+ txgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map);
+ }
+
+ if (config_dcb_tx) {
+ /* Unpack CEE standard containers */
+ txgbe_dcb_unpack_refill_cee(dcb_config, TXGBE_DCB_TX_CONFIG, refill);
+ txgbe_dcb_unpack_max_cee(dcb_config, max);
+ txgbe_dcb_unpack_bwgid_cee(dcb_config, TXGBE_DCB_TX_CONFIG, bwgid);
+ txgbe_dcb_unpack_tsa_cee(dcb_config, TXGBE_DCB_TX_CONFIG, tsa);
+ /* Configure PG(ETS) TX */
+ txgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map);
+ }
+
+ /* Configure queue statistics registers */
+ txgbe_dcb_config_tc_stats_raptor(hw, dcb_config);
+
+ /* Check if the PFC is supported */
+ if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+ pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
+ for (i = 0; i < nb_tcs; i++) {
+ /*
+ * If the TC count is 8,and the default high_water is 48,
+ * the low_water is 16 as default.
+ */
+ hw->fc.high_water[i] = (pbsize * 3) / 4;
+ hw->fc.low_water[i] = pbsize / 4;
+ /* Enable pfc for this TC */
+ tc = &dcb_config->tc_config[i];
+ tc->pfc = txgbe_dcb_pfc_enabled;
+ }
+ txgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
+ if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
+ pfc_en &= 0x0F;
+ ret = txgbe_dcb_config_pfc(hw, pfc_en, map);
+ }
+
+ return ret;
+}
+
+void txgbe_configure_pb(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ int hdrm;
+ int tc = dev_conf->rx_adv_conf.dcb_rx_conf.nb_tcs;
+
+ /* Reserve 256KB(/512KB) rx buffer for fdir */
+ hdrm = 256; /*KB*/
+
+ hw->mac.setup_pba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
+}
+
+void txgbe_configure_port(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int i = 0;
+ uint16_t tpids[8] = {RTE_ETHER_TYPE_VLAN, RTE_ETHER_TYPE_QINQ,
+ 0x9100, 0x9200,
+ 0x0000, 0x0000,
+ 0x0000, 0x0000};
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* default outer vlan tpid */
+ wr32(hw, TXGBE_EXTAG,
+ TXGBE_EXTAG_ETAG(RTE_ETHER_TYPE_ETAG) |
+ TXGBE_EXTAG_VLAN(RTE_ETHER_TYPE_QINQ));
+
+ /* default inner vlan tpid */
+ wr32m(hw, TXGBE_VLANCTL,
+ TXGBE_VLANCTL_TPID_MASK,
+ TXGBE_VLANCTL_TPID(RTE_ETHER_TYPE_VLAN));
+ wr32m(hw, TXGBE_DMATXCTRL,
+ TXGBE_DMATXCTRL_TPID_MASK,
+ TXGBE_DMATXCTRL_TPID(RTE_ETHER_TYPE_VLAN));
+
+ /* default vlan tpid filters */
+ for (i = 0; i < 8; i++) {
+ wr32m(hw, TXGBE_TAGTPID(i/2),
+ (i % 2 ? TXGBE_TAGTPID_MSB_MASK
+ : TXGBE_TAGTPID_LSB_MASK),
+ (i % 2 ? TXGBE_TAGTPID_MSB(tpids[i])
+ : TXGBE_TAGTPID_LSB(tpids[i])));
+ }
+
+ /* default vxlan port */
+ wr32(hw, TXGBE_VXLANPORT, 4789);
+}
+
+/**
+ * txgbe_configure_dcb - Configure DCB Hardware
+ * @dev: pointer to rte_eth_dev
+ */
+void txgbe_configure_dcb(struct rte_eth_dev *dev)
+{
+ struct txgbe_dcb_config *dcb_cfg = TXGBE_DEV_DCB_CONFIG(dev);
+ struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* check support mq_mode for DCB */
+ if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
+ (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
+ (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
+ return;
+
+ if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
+ return;
+
+ /** Configure DCB hardware **/
+ txgbe_dcb_hw_configure(dev, dcb_cfg);
+}
+
+/*
+ * VMDq only support for 10 GbE NIC.
+ */
+static void
+txgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_vmdq_rx_conf *cfg;
+ struct txgbe_hw *hw;
+ enum rte_eth_nb_pools num_pools;
+ uint32_t mrqc, vt_ctl, vlanctrl;
+ uint32_t vmolr = 0;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = TXGBE_DEV_HW(dev);
+ cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+ num_pools = cfg->nb_queue_pools;
+
+ txgbe_rss_disable(dev);
+
+ /* enable vmdq */
+ mrqc = TXGBE_PORTCTL_NUMVT_64;
+ wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, mrqc);
+
+ /* turn on virtualisation and set the default pool */
+ vt_ctl = TXGBE_POOLCTL_RPLEN;
+ if (cfg->enable_default_pool)
+ vt_ctl |= TXGBE_POOLCTL_DEFPL(cfg->default_pool);
+ else
+ vt_ctl |= TXGBE_POOLCTL_DEFDSA;
+
+ wr32(hw, TXGBE_POOLCTL, vt_ctl);
+
+ for (i = 0; i < (int)num_pools; i++) {
+ vmolr = txgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
+ wr32(hw, TXGBE_POOLETHCTL(i), vmolr);
+ }
+
+ /* enable vlan filtering and allow all vlan tags through */
+ vlanctrl = rd32(hw, TXGBE_VLANCTL);
+ vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
+ wr32(hw, TXGBE_VLANCTL, vlanctrl);
+
+ /* enable all vlan filters */
+ for (i = 0; i < NUM_VFTA_REGISTERS; i++)
+ wr32(hw, TXGBE_VLANTBL(i), UINT32_MAX);
+
+ /* pool enabling for receive - 64 */
+ wr32(hw, TXGBE_POOLRXENA(0), UINT32_MAX);
+ if (num_pools == ETH_64_POOLS)
+ wr32(hw, TXGBE_POOLRXENA(1), UINT32_MAX);
+
+ /*
+ * allow pools to read specific mac addresses
+ * In this case, all pools should be able to read from mac addr 0
+ */
+ wr32(hw, TXGBE_ETHADDRIDX, 0);
+ wr32(hw, TXGBE_ETHADDRASSL, 0xFFFFFFFF);
+ wr32(hw, TXGBE_ETHADDRASSH, 0xFFFFFFFF);
+
+ /* set up filters for vlan tags as configured */
+ for (i = 0; i < cfg->nb_pool_maps; i++) {
+ /* set vlan id in VF register and set the valid bit */
+ wr32(hw, TXGBE_PSRVLANIDX, i);
+ wr32(hw, TXGBE_PSRVLAN, (TXGBE_PSRVLAN_EA |
+ TXGBE_PSRVLAN_VID(cfg->pool_map[i].vlan_id)));
+ /*
+ * Put the allowed pools in VFB reg. As we only have 16 or 64
+ * pools, we only need to use the first half of the register
+ * i.e. bits 0-31
+ */
+ if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
+ wr32(hw, TXGBE_PSRVLANPLM(0),
+ (cfg->pool_map[i].pools & UINT32_MAX));
+ else
+ wr32(hw, TXGBE_PSRVLANPLM(1),
+ ((cfg->pool_map[i].pools >> 32) & UINT32_MAX));
+
+ }
+
+ /* Tx General Switch Control Enables VMDQ loopback */
+ if (cfg->enable_loop_back) {
+ wr32(hw, TXGBE_PSRCTL, TXGBE_PSRCTL_LBENA);
+ for (i = 0; i < 64; i++)
+ wr32m(hw, TXGBE_POOLETHCTL(i),
+ TXGBE_POOLETHCTL_LLB, TXGBE_POOLETHCTL_LLB);
+ }
+
+ txgbe_flush(hw);
+}
+
+/*
+ * txgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
+ * @hw: pointer to hardware structure
+ */
+static void
+txgbe_vmdq_tx_hw_configure(struct txgbe_hw *hw)
+{
+ uint32_t reg;
+ uint32_t q;
+
+ PMD_INIT_FUNC_TRACE();
+ /*PF VF Transmit Enable*/
+ wr32(hw, TXGBE_POOLTXENA(0), UINT32_MAX);
+ wr32(hw, TXGBE_POOLTXENA(1), UINT32_MAX);
+
+ /* Disable the Tx desc arbiter */
+ reg = rd32(hw, TXGBE_ARBTXCTL);
+ reg |= TXGBE_ARBTXCTL_DIA;
+ wr32(hw, TXGBE_ARBTXCTL, reg);
+
+ wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK,
+ TXGBE_PORTCTL_NUMVT_64);
+
+ /* Disable drop for all queues */
+ for (q = 0; q < 128; q++) {
+ u32 val = 1 << (q % 32);
+ wr32m(hw, TXGBE_QPRXDROP(q / 32), val, val);
+ }
+
+ /* Enable the Tx desc arbiter */
+ reg = rd32(hw, TXGBE_ARBTXCTL);
+ reg &= ~TXGBE_ARBTXCTL_DIA;
+ wr32(hw, TXGBE_ARBTXCTL, reg);
+
+ txgbe_flush(hw);
+}
+
+static int __rte_cold
+txgbe_alloc_rx_queue_mbufs(struct txgbe_rx_queue *rxq)
+{
+ struct txgbe_rx_entry *rxe = rxq->sw_ring;
+ uint64_t dma_addr;
+ unsigned int i;
+
+ /* Initialize software ring entries */
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ volatile struct txgbe_rx_desc *rxd;
+ struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+
+ if (mbuf == NULL) {
+ PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
+ (unsigned) rxq->queue_id);
+ return -ENOMEM;
+ }
+
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->port = rxq->port_id;
+
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+ rxd = &rxq->rx_ring[i];
+ TXGBE_RXD_HDRADDR(rxd, 0);
+ TXGBE_RXD_PKTADDR(rxd, dma_addr);
+ rxe[i].mbuf = mbuf;
+ }
+
+ return 0;
+}
+
+static int
+txgbe_config_vf_rss(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw;
+ uint32_t mrqc;
+
+ txgbe_rss_configure(dev);
+
+ hw = TXGBE_DEV_HW(dev);
+
+ /* enable VF RSS */
+ mrqc = rd32(hw, TXGBE_PORTCTL);
+ mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
+ switch (RTE_ETH_DEV_SRIOV(dev).active) {
+ case ETH_64_POOLS:
+ mrqc |= TXGBE_PORTCTL_NUMVT_64;
+ break;
+
+ case ETH_32_POOLS:
+ mrqc |= TXGBE_PORTCTL_NUMVT_32;
+ break;
+
+ default:
+ PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
+ return -EINVAL;
+ }
+
+ wr32(hw, TXGBE_PORTCTL, mrqc);
+
+ return 0;
+}
+
+static int
+txgbe_config_vf_default(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t mrqc;
+
+ mrqc = rd32(hw, TXGBE_PORTCTL);
+ mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
+ switch (RTE_ETH_DEV_SRIOV(dev).active) {
+ case ETH_64_POOLS:
+ mrqc |= TXGBE_PORTCTL_NUMVT_64;
+ break;
+
+ case ETH_32_POOLS:
+ mrqc |= TXGBE_PORTCTL_NUMVT_32;
+ break;
+
+ case ETH_16_POOLS:
+ mrqc |= TXGBE_PORTCTL_NUMVT_16;
+ break;
+ default:
+ PMD_INIT_LOG(ERR,
+ "invalid pool number in IOV mode");
+ return 0;
+ }
+
+ wr32(hw, TXGBE_PORTCTL, mrqc);
+
+ return 0;
+}
+
+static int
+txgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
+{
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /*
+ * SRIOV inactive scheme
+ * any DCB/RSS w/o VMDq multi-queue setting
+ */
+ switch (dev->data->dev_conf.rxmode.mq_mode) {
+ case ETH_MQ_RX_RSS:
+ case ETH_MQ_RX_DCB_RSS:
+ case ETH_MQ_RX_VMDQ_RSS:
+ txgbe_rss_configure(dev);
+ break;
+
+ case ETH_MQ_RX_VMDQ_DCB:
+ txgbe_vmdq_dcb_configure(dev);
+ break;
+
+ case ETH_MQ_RX_VMDQ_ONLY:
+ txgbe_vmdq_rx_hw_configure(dev);
+ break;
+
+ case ETH_MQ_RX_NONE:
+ default:
+ /* if mq_mode is none, disable rss mode.*/
+ txgbe_rss_disable(dev);
+ break;
+ }
+ } else {
+ /* SRIOV active scheme
+ * Support RSS together with SRIOV.
+ */
+ switch (dev->data->dev_conf.rxmode.mq_mode) {
+ case ETH_MQ_RX_RSS:
+ case ETH_MQ_RX_VMDQ_RSS:
+ txgbe_config_vf_rss(dev);
+ break;
+ case ETH_MQ_RX_VMDQ_DCB:
+ case ETH_MQ_RX_DCB:
+ /* In SRIOV, the configuration is the same as VMDq case */
+ txgbe_vmdq_dcb_configure(dev);
+ break;
+ /* DCB/RSS together with SRIOV is not supported */
+ case ETH_MQ_RX_VMDQ_DCB_RSS:
+ case ETH_MQ_RX_DCB_RSS:
+ PMD_INIT_LOG(ERR,
+ "Could not support DCB/RSS with VMDq & SRIOV");
+ return -1;
+ default:
+ txgbe_config_vf_default(dev);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+txgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t mtqc;
+ uint32_t rttdcs;
+
+ /* disable arbiter */
+ rttdcs = rd32(hw, TXGBE_ARBTXCTL);
+ rttdcs |= TXGBE_ARBTXCTL_DIA;
+ wr32(hw, TXGBE_ARBTXCTL, rttdcs);
+
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /*
+ * SRIOV inactive scheme
+ * any DCB w/o VMDq multi-queue setting
+ */
+ if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
+ txgbe_vmdq_tx_hw_configure(hw);
+ else {
+ wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, 0);
+ }
+ } else {
+ switch (RTE_ETH_DEV_SRIOV(dev).active) {
+
+ /*
+ * SRIOV active scheme
+ * FIXME if support DCB together with VMDq & SRIOV
+ */
+ case ETH_64_POOLS:
+ mtqc = TXGBE_PORTCTL_NUMVT_64;
+ break;
+ case ETH_32_POOLS:
+ mtqc = TXGBE_PORTCTL_NUMVT_32;
+ break;
+ case ETH_16_POOLS:
+ mtqc = TXGBE_PORTCTL_NUMVT_16;
+ break;
+ default:
+ mtqc = 0;
+ PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
+ }
+ wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, mtqc);
+ }
+
+ /* re-enable arbiter */
+ rttdcs &= ~TXGBE_ARBTXCTL_DIA;
+ wr32(hw, TXGBE_ARBTXCTL, rttdcs);
+
+ return 0;
+}
+
+/**
+ * txgbe_get_rscctl_maxdesc
+ *
+ * @pool Memory pool of the Rx queue
+ */
+static inline uint32_t
+txgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
+{
+ struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
+
+ uint16_t maxdesc =
+ RTE_IPV4_MAX_PKT_LEN /
+ (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
+
+ if (maxdesc >= 16)
+ return TXGBE_RXCFG_RSCMAX_16;
+ else if (maxdesc >= 8)
+ return TXGBE_RXCFG_RSCMAX_8;
+ else if (maxdesc >= 4)
+ return TXGBE_RXCFG_RSCMAX_4;
+ else
+ return TXGBE_RXCFG_RSCMAX_1;
+}
+
+void __rte_cold
+txgbe_set_rx_function(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+ struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+
+ /*
+ * Initialize the appropriate LRO callback.
+ *
+ * If all queues satisfy the bulk allocation preconditions
+ * (adapter->rx_bulk_alloc_allowed is TRUE) then we may use
+ * bulk allocation. Otherwise use a single allocation version.
+ */
+ if (dev->data->lro) {
+ if (adapter->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
+ "allocation version");
+ dev->rx_pkt_burst = txgbe_recv_pkts_lro_bulk_alloc;
+ } else {
+ PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
+ "allocation version");
+ dev->rx_pkt_burst = txgbe_recv_pkts_lro_single_alloc;
+ }
+ } else if (dev->data->scattered_rx) {
+ /*
+ * Set the non-LRO scattered callback: there are bulk and
+ * single allocation versions.
+ */
+ if (adapter->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
+ "allocation callback (port=%d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst = txgbe_recv_pkts_lro_bulk_alloc;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
+ "single allocation) "
+ "Scattered Rx callback "
+ "(port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = txgbe_recv_pkts_lro_single_alloc;
+ }
+ /*
+ * Below we set "simple" callbacks according to port/queues parameters.
+ * If parameters allow we are going to choose between the following
+ * callbacks:
+ * - Bulk Allocation
+ * - Single buffer allocation (the simplest one)
+ */
+ } else if (adapter->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function "
+ "will be used on port=%d.",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = txgbe_recv_pkts_bulk_alloc;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
+ "satisfied, or Scattered Rx is requested "
+ "(port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = txgbe_recv_pkts;
+ }
+
+ /* Propagate information about RX function choice through all queues. */
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
+
+#ifdef RTE_LIBRTE_SECURITY
+ rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_SECURITY);
+#endif
+ }
+}
+
+/**
+ * txgbe_set_rsc - configure RSC related port HW registers
+ *
+ * Configures the port's RSC related registers.
+ *
+ * @dev port handle
+ *
+ * Returns 0 in case of success or a non-zero error code
+ */
+static int
+txgbe_set_rsc(struct rte_eth_dev *dev)
+{
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct rte_eth_dev_info dev_info = { 0 };
+ bool rsc_capable = false;
+ uint16_t i;
+ uint32_t rdrxctl;
+ uint32_t rfctl;
+
+ /* Sanity check */
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
+ rsc_capable = true;
+
+ if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+ PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
+ "support it");
+ return -EINVAL;
+ }
+
+ /* RSC global configuration */
+
+ if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
+ (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
+ PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
+ "is disabled");
+ return -EINVAL;
+ }
+
+ rfctl = rd32(hw, TXGBE_PSRCTL);
+ if (rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+ rfctl &= ~TXGBE_PSRCTL_RSCDIA;
+ else
+ rfctl |= TXGBE_PSRCTL_RSCDIA;
+ wr32(hw, TXGBE_PSRCTL, rfctl);
+
+ /* If LRO hasn't been requested - we are done here. */
+ if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
+ return 0;
+
+ /* Set PSRCTL.RSCACK bit */
+ rdrxctl = rd32(hw, TXGBE_PSRCTL);
+ rdrxctl |= TXGBE_PSRCTL_RSCACK;
+ wr32(hw, TXGBE_PSRCTL, rdrxctl);
+
+ /* Per-queue RSC configuration */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
+ uint32_t srrctl =
+ rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
+ uint32_t psrtype =
+ rd32(hw, TXGBE_POOLRSS(rxq->reg_idx));
+ uint32_t eitr =
+ rd32(hw, TXGBE_ITR(rxq->reg_idx));
+
+ /*
+ * txgbe PMD doesn't support header-split at the moment.
+ */
+ srrctl &= ~TXGBE_RXCFG_HDRLEN_MASK;
+ srrctl |= TXGBE_RXCFG_HDRLEN(128);
+
+ /*
+ * TODO: Consider setting the Receive Descriptor Minimum
+ * Threshold Size for an RSC case. This is not an obviously
+ * beneficiary option but the one worth considering...
+ */
+
+ srrctl |= TXGBE_RXCFG_RSCENA;
+ srrctl &= ~TXGBE_RXCFG_RSCMAX_MASK;
+ srrctl |= txgbe_get_rscctl_maxdesc(rxq->mb_pool);
+ psrtype |= TXGBE_POOLRSS_L4HDR;
+
+ /*
+ * RSC: Set ITR interval corresponding to 2K ints/s.
+ *
+ * Full-sized RSC aggregations for a 10Gb/s link will
+ * arrive at about 20K aggregation/s rate.
+ *
+ * 2K inst/s rate will make only 10% of the
+ * aggregations to be closed due to the interrupt timer
+ * expiration for a streaming at wire-speed case.
+ *
+ * For a sparse streaming case this setting will yield
+ * at most 500us latency for a single RSC aggregation.
+ */
+ eitr &= ~TXGBE_ITR_IVAL_MASK;
+ eitr |= TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT);
+ eitr |= TXGBE_ITR_WRDSA;
+
+ wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl);
+ wr32(hw, TXGBE_POOLRSS(rxq->reg_idx), psrtype);
+ wr32(hw, TXGBE_ITR(rxq->reg_idx), eitr);
+
+ /*
+ * RSC requires the mapping of the queue to the
+ * interrupt vector.
+ */
+ txgbe_set_ivar_map(hw, 0, rxq->reg_idx, i);
+ }
+
+ dev->data->lro = 1;
+
+ PMD_INIT_LOG(DEBUG, "enabling LRO mode");
+
+ return 0;
+}
+
+/*
+ * Initializes Receive Unit.
+ */
+int __rte_cold
+txgbe_dev_rx_init(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw;
+ struct txgbe_rx_queue *rxq;
+ uint64_t bus_addr;
+ uint32_t fctrl;
+ uint32_t hlreg0;
+ uint32_t srrctl;
+ uint32_t rdrxctl;
+ uint32_t rxcsum;
+ uint16_t buf_size;
+ uint16_t i;
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+ int rc;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = TXGBE_DEV_HW(dev);
+
+ /*
+ * Make sure receives are disabled while setting
+ * up the RX context (registers, descriptor rings, etc.).
+ */
+ wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_ENA, 0);
+ wr32m(hw, TXGBE_PBRXCTL, TXGBE_PBRXCTL_ENA, 0);
+
+ /* Enable receipt of broadcasted frames */
+ fctrl = rd32(hw, TXGBE_PSRCTL);
+ fctrl |= TXGBE_PSRCTL_BCA;
+ wr32(hw, TXGBE_PSRCTL, fctrl);
+
+ /*
+ * Configure CRC stripping, if any.
+ */
+ hlreg0 = rd32(hw, TXGBE_SECRXCTL);
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ hlreg0 &= ~TXGBE_SECRXCTL_CRCSTRIP;
+ else
+ hlreg0 |= TXGBE_SECRXCTL_CRCSTRIP;
+ wr32(hw, TXGBE_SECRXCTL, hlreg0);
+
+ /*
+ * Configure jumbo frame support, if any.
+ */
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
+ TXGBE_FRMSZ_MAX(rx_conf->max_rx_pkt_len));
+ } else {
+ wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
+ TXGBE_FRMSZ_MAX(TXGBE_FRAME_SIZE_DFT));
+ }
+
+ /*
+ * If loopback mode is configured, set LPBK bit.
+ */
+ hlreg0 = rd32(hw, TXGBE_PSRCTL);
+ if (hw->mac.type == txgbe_mac_raptor &&
+ dev->data->dev_conf.lpbk_mode)
+ hlreg0 |= TXGBE_PSRCTL_LBENA;
+ else
+ hlreg0 &= ~TXGBE_PSRCTL_LBENA;
+
+ wr32(hw, TXGBE_PSRCTL, hlreg0);
+
+ /*
+ * Assume no header split and no VLAN strip support
+ * on any Rx queue first .
+ */
+ rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+
+ /* Setup RX queues */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+
+ /*
+ * Reset crc_len in case it was changed after queue setup by a
+ * call to configure.
+ */
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
+
+ /* Setup the Base and Length of the Rx Descriptor Rings */
+ bus_addr = rxq->rx_ring_phys_addr;
+ wr32(hw, TXGBE_RXBAL(rxq->reg_idx),
+ (uint32_t)(bus_addr & BIT_MASK32));
+ wr32(hw, TXGBE_RXBAH(rxq->reg_idx),
+ (uint32_t)(bus_addr >> 32));
+ wr32(hw, TXGBE_RXRP(rxq->reg_idx), 0);
+ wr32(hw, TXGBE_RXWP(rxq->reg_idx), 0);
+
+ srrctl = TXGBE_RXCFG_RNGLEN(rxq->nb_rx_desc);
+
+ /* Set if packets are dropped when no descriptors available */
+ if (rxq->drop_en)
+ srrctl |= TXGBE_RXCFG_DROP;
+
+ /*
+ * Configure the RX buffer size in the PKTLEN field of
+ * the RXCFG register of the queue.
+ * The value is in 1 KB resolution. Valid values can be from
+ * 1 KB to 16 KB.
+ */
+ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
+ RTE_PKTMBUF_HEADROOM);
+ buf_size = ROUND_UP(buf_size, 0x1 << 10);
+ srrctl |= TXGBE_RXCFG_PKTLEN(buf_size);
+
+ wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl);
+
+ /* It adds dual VLAN length for supporting dual VLAN */
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ 2 * TXGBE_VLAN_TAG_SIZE > buf_size)
+ dev->data->scattered_rx = 1;
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
+
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+ dev->data->scattered_rx = 1;
+
+ /*
+ * Device configured with multiple RX queues.
+ */
+ txgbe_dev_mq_rx_configure(dev);
+
+ /*
+ * Setup the Checksum Register.
+ * Disable Full-Packet Checksum which is mutually exclusive with RSS.
+ * Enable IP/L4 checkum computation by hardware if requested to do so.
+ */
+ rxcsum = rd32(hw, TXGBE_PSRCTL);
+ rxcsum |= TXGBE_PSRCTL_PCSD;
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ rxcsum |= TXGBE_PSRCTL_L4CSUM;
+ else
+ rxcsum &= ~TXGBE_PSRCTL_L4CSUM;
+
+ wr32(hw, TXGBE_PSRCTL, rxcsum);
+
+ if (hw->mac.type == txgbe_mac_raptor) {
+ rdrxctl = rd32(hw, TXGBE_SECRXCTL);
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ rdrxctl &= ~TXGBE_SECRXCTL_CRCSTRIP;
+ else
+ rdrxctl |= TXGBE_SECRXCTL_CRCSTRIP;
+ wr32(hw, TXGBE_SECRXCTL, rdrxctl);
+ }
+
+ rc = txgbe_set_rsc(dev);
+ if (rc)
+ return rc;
+
+ txgbe_set_rx_function(dev);
+
+ return 0;
+}
+
+/*
+ * Initializes Transmit Unit.
+ */
+void __rte_cold
+txgbe_dev_tx_init(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw;
+ struct txgbe_tx_queue *txq;
+ uint64_t bus_addr;
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = TXGBE_DEV_HW(dev);
+
+ /* Setup the Base and Length of the Tx Descriptor Rings */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+
+ bus_addr = txq->tx_ring_phys_addr;
+ wr32(hw, TXGBE_TXBAL(txq->reg_idx),
+ (uint32_t)(bus_addr & BIT_MASK32));
+ wr32(hw, TXGBE_TXBAH(txq->reg_idx),
+ (uint32_t)(bus_addr >> 32));
+ wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_BUFLEN_MASK,
+ TXGBE_TXCFG_BUFLEN(txq->nb_tx_desc));
+ /* Setup the HW Tx Head and TX Tail descriptor pointers */
+ wr32(hw, TXGBE_TXRP(txq->reg_idx), 0);
+ wr32(hw, TXGBE_TXWP(txq->reg_idx), 0);
+ }
+
+ /* Device configured with multiple TX queues. */
+ txgbe_dev_mq_tx_configure(dev);
+}
+
+/*
+ * Set up link loopback mode Tx->Rx.
+ */
+static inline void __rte_cold
+txgbe_setup_loopback_link_raptor(struct txgbe_hw *hw)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_LB, TXGBE_MACRXCFG_LB);
+
+ msec_delay(50);
+}
+
+/*
+ * Start Transmit and Receive Units.
+ */
+int __rte_cold
+txgbe_dev_rxtx_start(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw;
+ struct txgbe_tx_queue *txq;
+ struct txgbe_rx_queue *rxq;
+ uint32_t dmatxctl;
+ uint32_t rxctrl;
+ uint16_t i;
+ int ret = 0;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = TXGBE_DEV_HW(dev);
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ /* Setup Transmit Threshold Registers */
+ wr32m(hw, TXGBE_TXCFG(txq->reg_idx),
+ TXGBE_TXCFG_HTHRESH_MASK |
+ TXGBE_TXCFG_WTHRESH_MASK,
+ TXGBE_TXCFG_HTHRESH(txq->hthresh) |
+ TXGBE_TXCFG_WTHRESH(txq->wthresh));
+ }
+
+ dmatxctl = rd32(hw, TXGBE_DMATXCTRL);
+ dmatxctl |= TXGBE_DMATXCTRL_ENA;
+ wr32(hw, TXGBE_DMATXCTRL, dmatxctl);
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (!txq->tx_deferred_start) {
+ ret = txgbe_dev_tx_queue_start(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (!rxq->rx_deferred_start) {
+ ret = txgbe_dev_rx_queue_start(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ /* Enable Receive engine */
+ rxctrl = rd32(hw, TXGBE_PBRXCTL);
+ rxctrl |= TXGBE_PBRXCTL_ENA;
+ hw->mac.enable_rx_dma(hw, rxctrl);
+
+ /* If loopback mode is enabled, set up the link accordingly */
+ if (hw->mac.type == txgbe_mac_raptor &&
+ dev->data->dev_conf.lpbk_mode)
+ txgbe_setup_loopback_link_raptor(hw);
+
+#ifdef RTE_LIBRTE_SECURITY
+ if ((dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) ||
+ (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY)) {
+ ret = txgbe_crypto_enable_ipsec(dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR,
+ "txgbe_crypto_enable_ipsec fails with %d.",
+ ret);
+ return ret;
+ }
+ }
+#endif
+
+ return 0;
+}
+
+void
+txgbe_dev_save_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id)
+{
+ u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
+ *(reg++) = rd32(hw, TXGBE_RXBAL(rx_queue_id));
+ *(reg++) = rd32(hw, TXGBE_RXBAH(rx_queue_id));
+ *(reg++) = rd32(hw, TXGBE_RXCFG(rx_queue_id));
+}
+
+void
+txgbe_dev_store_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id)
+{
+ u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
+ wr32(hw, TXGBE_RXBAL(rx_queue_id), *(reg++));
+ wr32(hw, TXGBE_RXBAH(rx_queue_id), *(reg++));
+ wr32(hw, TXGBE_RXCFG(rx_queue_id), *(reg++) & ~TXGBE_RXCFG_ENA);
+}
+
+void
+txgbe_dev_save_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id)
+{
+ u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
+ *(reg++) = rd32(hw, TXGBE_TXBAL(tx_queue_id));
+ *(reg++) = rd32(hw, TXGBE_TXBAH(tx_queue_id));
+ *(reg++) = rd32(hw, TXGBE_TXCFG(tx_queue_id));
+}
+
+void
+txgbe_dev_store_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id)
+{
+ u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
+ wr32(hw, TXGBE_TXBAL(tx_queue_id), *(reg++));
+ wr32(hw, TXGBE_TXBAH(tx_queue_id), *(reg++));
+ wr32(hw, TXGBE_TXCFG(tx_queue_id), *(reg++) & ~TXGBE_TXCFG_ENA);
+}
+
+/*
+ * Start Receive Units for specified queue.
+ */
+int __rte_cold
+txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_rx_queue *rxq;
+ uint32_t rxdctl;
+ int poll_ms;
+
+ PMD_INIT_FUNC_TRACE();
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ /* Allocate buffers for descriptor rings */
+ if (txgbe_alloc_rx_queue_mbufs(rxq) != 0) {
+ PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
+ rx_queue_id);
+ return -1;
+ }
+ rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
+ rxdctl |= TXGBE_RXCFG_ENA;
+ wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxdctl);
+
+ /* Wait until RX Enable ready */
+ poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
+ } while (--poll_ms && !(rxdctl & TXGBE_RXCFG_ENA));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
+ rte_wmb();
+ wr32(hw, TXGBE_RXRP(rxq->reg_idx), 0);
+ wr32(hw, TXGBE_RXWP(rxq->reg_idx), rxq->nb_rx_desc - 1);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+/*
+ * Stop Receive Units for specified queue.
+ */
+int __rte_cold
+txgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+ struct txgbe_rx_queue *rxq;
+ uint32_t rxdctl;
+ int poll_ms;
+
+ PMD_INIT_FUNC_TRACE();
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ txgbe_dev_save_rx_queue(hw, rxq->reg_idx);
+ wr32m(hw, TXGBE_RXCFG(rxq->reg_idx), TXGBE_RXCFG_ENA, 0);
+
+ /* Wait until RX Enable bit clear */
+ poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
+ } while (--poll_ms && (rxdctl & TXGBE_RXCFG_ENA));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
+
+ rte_delay_us(RTE_TXGBE_WAIT_100_US);
+ txgbe_dev_store_rx_queue(hw, rxq->reg_idx);
+
+ txgbe_rx_queue_release_mbufs(rxq);
+ txgbe_reset_rx_queue(adapter, rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+/*
+ * Start Transmit Units for specified queue.
+ */
+int __rte_cold
+txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_tx_queue *txq;
+ uint32_t txdctl;
+ int poll_ms;
+
+ PMD_INIT_FUNC_TRACE();
+
+ txq = dev->data->tx_queues[tx_queue_id];
+ wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_ENA, TXGBE_TXCFG_ENA);
+
+ /* Wait until TX Enable ready */
+ poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ txdctl = rd32(hw, TXGBE_TXCFG(txq->reg_idx));
+ } while (--poll_ms && !(txdctl & TXGBE_TXCFG_ENA));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable "
+ "Tx Queue %d", tx_queue_id);
+
+ rte_wmb();
+ wr32(hw, TXGBE_TXWP(txq->reg_idx), txq->tx_tail);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+/*
+ * Stop Transmit Units for specified queue.
+ */
+int __rte_cold
+txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_tx_queue *txq;
+ uint32_t txdctl;
+ uint32_t txtdh, txtdt;
+ int poll_ms;
+
+ PMD_INIT_FUNC_TRACE();
+
+ txq = dev->data->tx_queues[tx_queue_id];
+
+ /* Wait until TX queue is empty */
+ poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_us(RTE_TXGBE_WAIT_100_US);
+ txtdh = rd32(hw, TXGBE_TXRP(txq->reg_idx));
+ txtdt = rd32(hw, TXGBE_TXWP(txq->reg_idx));
+ } while (--poll_ms && (txtdh != txtdt));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR,
+ "Tx Queue %d is not empty when stopping.",
+ tx_queue_id);
+
+ txgbe_dev_save_tx_queue(hw, txq->reg_idx);
+ wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_ENA, 0);
+
+ /* Wait until TX Enable bit clear */
+ poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ txdctl = rd32(hw, TXGBE_TXCFG(txq->reg_idx));
+ } while (--poll_ms && (txdctl & TXGBE_TXCFG_ENA));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
+ tx_queue_id);
+
+ rte_delay_us(RTE_TXGBE_WAIT_100_US);
+ txgbe_dev_store_tx_queue(hw, txq->reg_idx);
+
+ if (txq->ops != NULL) {
+ txq->ops->release_mbufs(txq);
+ txq->ops->reset(txq);
+ }
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+void
+txgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct txgbe_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_drop_en = rxq->drop_en;
+ qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
+}
+
+void
+txgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct txgbe_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.offloads = txq->offloads;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
+/*
+ * [VF] Initializes Receive Unit.
+ */
+int __rte_cold
+txgbevf_dev_rx_init(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw;
+ struct txgbe_rx_queue *rxq;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ uint64_t bus_addr;
+ uint32_t srrctl, psrtype;
+ uint16_t buf_size;
+ uint16_t i;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = TXGBE_DEV_HW(dev);
+
+ if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
+ PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
+ "it should be power of 2");
+ return -1;
+ }
+
+ if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
+ PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
+ "it should be equal to or less than %d",
+ hw->mac.max_rx_queues);
+ return -1;
+ }
+
+ /*
+ * When the VF driver issues a TXGBE_VF_RESET request, the PF driver
+ * disables the VF receipt of packets if the PF MTU is > 1500.
+ * This is done to deal with limitations that imposes
+ * the PF and all VFs to share the same MTU.
+ * Then, the PF driver enables again the VF receipt of packet when
+ * the VF driver issues a TXGBE_VF_SET_LPE request.
+ * In the meantime, the VF device cannot be used, even if the VF driver
+ * and the Guest VM network stack are ready to accept packets with a
+ * size up to the PF MTU.
+ * As a work-around to this PF behaviour, force the call to
+ * txgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
+ * VF packets received can work in all cases.
+ */
+ txgbevf_rlpml_set_vf(hw,
+ (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
+
+ /*
+ * Assume no header split and no VLAN strip support
+ * on any Rx queue first .
+ */
+ rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+
+ /* Set PSR type for VF RSS according to max Rx queue */
+ psrtype = TXGBE_VFPLCFG_PSRL4HDR |
+ TXGBE_VFPLCFG_PSRL4HDR |
+ TXGBE_VFPLCFG_PSRL2HDR |
+ TXGBE_VFPLCFG_PSRTUNHDR |
+ TXGBE_VFPLCFG_PSRTUNMAC;
+ wr32(hw, TXGBE_VFPLCFG, TXGBE_VFPLCFG_PSR(psrtype));
+
+ /* Setup RX queues */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+
+ /* Allocate buffers for descriptor rings */
+ ret = txgbe_alloc_rx_queue_mbufs(rxq);
+ if (ret)
+ return ret;
+
+ /* Setup the Base and Length of the Rx Descriptor Rings */
+ bus_addr = rxq->rx_ring_phys_addr;
+
+ wr32(hw, TXGBE_RXBAL(i),
+ (uint32_t)(bus_addr & BIT_MASK32));
+ wr32(hw, TXGBE_RXBAH(i),
+ (uint32_t)(bus_addr >> 32));
+ wr32(hw, TXGBE_RXRP(i), 0);
+ wr32(hw, TXGBE_RXWP(i), 0);
+
+ /* Configure the RXCFG register */
+ srrctl = TXGBE_RXCFG_RNGLEN(rxq->nb_rx_desc);
+
+ /* Set if packets are dropped when no descriptors available */
+ if (rxq->drop_en)
+ srrctl |= TXGBE_RXCFG_DROP;
+
+ /*
+ * Configure the RX buffer size in the PKTLEN field of
+ * the RXCFG register of the queue.
+ * The value is in 1 KB resolution. Valid values can be from
+ * 1 KB to 16 KB.
+ */
+ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
+ RTE_PKTMBUF_HEADROOM);
+ buf_size = ROUND_UP(buf_size, 1 << 10);
+ srrctl |= TXGBE_RXCFG_PKTLEN(buf_size);
+
+ /*
+ * VF modification to write virtual function RXCFG register
+ */
+ wr32(hw, TXGBE_RXCFG(i), srrctl);
+
+ if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
+ /* It adds dual VLAN length for supporting dual VLAN */
+ (rxmode->max_rx_pkt_len +
+ 2 * TXGBE_VLAN_TAG_SIZE) > buf_size) {
+ if (!dev->data->scattered_rx)
+ PMD_INIT_LOG(DEBUG, "forcing scatter mode");
+ dev->data->scattered_rx = 1;
+ }
+
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
+
+ /*
+ * Device configured with multiple RX queues.
+ */
+ txgbe_dev_mq_rx_configure(dev);
+
+ txgbe_set_rx_function(dev);
+
+ return 0;
+}
+
+/*
+ * [VF] Initializes Transmit Unit.
+ */
+void __rte_cold
+txgbevf_dev_tx_init(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw;
+ struct txgbe_tx_queue *txq;
+ uint64_t bus_addr;
+ uint16_t i;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = TXGBE_DEV_HW(dev);
+
+ /* Setup the Base and Length of the Tx Descriptor Rings */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ bus_addr = txq->tx_ring_phys_addr;
+ wr32(hw, TXGBE_TXBAL(i),
+ (uint32_t)(bus_addr & BIT_MASK32));
+ wr32(hw, TXGBE_TXBAH(i),
+ (uint32_t)(bus_addr >> 32));
+ wr32m(hw, TXGBE_TXCFG(i), TXGBE_TXCFG_BUFLEN_MASK,
+ TXGBE_TXCFG_BUFLEN(txq->nb_tx_desc));
+ /* Setup the HW Tx Head and TX Tail descriptor pointers */
+ wr32(hw, TXGBE_TXRP(i), 0);
+ wr32(hw, TXGBE_TXWP(i), 0);
+ }
+}
+
+/*
+ * [VF] Start Transmit and Receive Units.
+ */
+void __rte_cold
+txgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw;
+ struct txgbe_tx_queue *txq;
+ struct txgbe_rx_queue *rxq;
+ uint32_t txdctl;
+ uint32_t rxdctl;
+ uint16_t i;
+ int poll_ms;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = TXGBE_DEV_HW(dev);
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ /* Setup Transmit Threshold Registers */
+ wr32m(hw, TXGBE_TXCFG(txq->reg_idx),
+ TXGBE_TXCFG_HTHRESH_MASK |
+ TXGBE_TXCFG_WTHRESH_MASK,
+ TXGBE_TXCFG_HTHRESH(txq->hthresh) |
+ TXGBE_TXCFG_WTHRESH(txq->wthresh));
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+
+ wr32m(hw, TXGBE_TXCFG(i), TXGBE_TXCFG_ENA, TXGBE_TXCFG_ENA);
+
+ poll_ms = 10;
+ /* Wait until TX Enable ready */
+ do {
+ rte_delay_ms(1);
+ txdctl = rd32(hw, TXGBE_TXCFG(i));
+ } while (--poll_ms && !(txdctl & TXGBE_TXCFG_ENA));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
+ }
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+
+ rxq = dev->data->rx_queues[i];
+
+ wr32m(hw, TXGBE_RXCFG(i), TXGBE_RXCFG_ENA, TXGBE_RXCFG_ENA);
+
+ /* Wait until RX Enable ready */
+ poll_ms = 10;
+ do {
+ rte_delay_ms(1);
+ rxdctl = rd32(hw, TXGBE_RXCFG(i));
+ } while (--poll_ms && !(rxdctl & TXGBE_RXCFG_ENA));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
+ rte_wmb();
+ wr32(hw, TXGBE_RXWP(i), rxq->nb_rx_desc - 1);
+
+ }
+}
+
+int
+txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in)
+{
+ if (in->key_len > RTE_DIM(out->key) ||
+ in->queue_num > RTE_DIM(out->queue))
+ return -EINVAL;
+ out->conf = (struct rte_flow_action_rss){
+ .func = in->func,
+ .level = in->level,
+ .types = in->types,
+ .key_len = in->key_len,
+ .queue_num = in->queue_num,
+ .key = memcpy(out->key, in->key, in->key_len),
+ .queue = memcpy(out->queue, in->queue,
+ sizeof(*in->queue) * in->queue_num),
+ };
+ return 0;
+}
+
+int
+txgbe_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with)
+{
+ return (comp->func == with->func &&
+ comp->level == with->level &&
+ comp->types == with->types &&
+ comp->key_len == with->key_len &&
+ comp->queue_num == with->queue_num &&
+ !memcmp(comp->key, with->key, with->key_len) &&
+ !memcmp(comp->queue, with->queue,
+ sizeof(*with->queue) * with->queue_num));
+}
+
+int
+txgbe_config_rss_filter(struct rte_eth_dev *dev,
+ struct txgbe_rte_flow_rss_conf *conf, bool add)
+{
+ struct txgbe_hw *hw;
+ uint32_t reta;
+ uint16_t i;
+ uint16_t j;
+ struct rte_eth_rss_conf rss_conf = {
+ .rss_key = conf->conf.key_len ?
+ (void *)(uintptr_t)conf->conf.key : NULL,
+ .rss_key_len = conf->conf.key_len,
+ .rss_hf = conf->conf.types,
+ };
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ hw = TXGBE_DEV_HW(dev);
+
+ if (!add) {
+ if (txgbe_action_rss_same(&filter_info->rss_info.conf,
+ &conf->conf)) {
+ txgbe_rss_disable(dev);
+ memset(&filter_info->rss_info, 0,
+ sizeof(struct txgbe_rte_flow_rss_conf));
+ return 0;
+ }
+ return -EINVAL;
+ }
+
+ if (filter_info->rss_info.conf.queue_num)
+ return -EINVAL;
+ /* Fill in redirection table
+ * The byte-swap is needed because NIC registers are in
+ * little-endian order.
+ */
+ reta = 0;
+ for (i = 0, j = 0; i < ETH_RSS_RETA_SIZE_128; i++, j++) {
+ if (j == conf->conf.queue_num)
+ j = 0;
+ reta = (reta >> 8) | LS32(conf->conf.queue[j], 24, 0xFF);
+ if ((i & 3) == 3)
+ wr32at(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
+ }
+
+ /* Configure the RSS key and the RSS protocols used to compute
+ * the RSS hash of input packets.
+ */
+ if ((rss_conf.rss_hf & TXGBE_RSS_OFFLOAD_ALL) == 0) {
+ txgbe_rss_disable(dev);
+ return 0;
+ }
+ if (rss_conf.rss_key == NULL)
+ rss_conf.rss_key = rss_intel_key; /* Default hash key */
+ txgbe_dev_rss_hash_update(dev, &rss_conf);
+
+ if (txgbe_rss_conf_init(&filter_info->rss_info, &conf->conf))
+ return -EINVAL;
+
+ return 0;
+}
+
new file mode 100644
@@ -0,0 +1,460 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef _TXGBE_RXTX_H_
+#define _TXGBE_RXTX_H_
+
+/*****************************************************************************
+ * Receive Descriptor
+ *****************************************************************************/
+struct txgbe_rx_desc {
+ struct {
+ union {
+ __le32 dw0;
+ struct {
+ __le16 pkt;
+ __le16 hdr;
+ } lo;
+ };
+ union {
+ __le32 dw1;
+ struct {
+ __le16 ipid;
+ __le16 csum;
+ } hi;
+ };
+ } qw0; /* also as r.pkt_addr */
+ struct {
+ union {
+ __le32 dw2;
+ struct {
+ __le32 status;
+ } lo;
+ };
+ union {
+ __le32 dw3;
+ struct {
+ __le16 len;
+ __le16 tag;
+ } hi;
+ };
+ } qw1; /* also as r.hdr_addr */
+};
+
+/* @txgbe_rx_desc.qw0 */
+#define TXGBE_RXD_PKTADDR(rxd, v) \
+ (((volatile __le64 *)(rxd))[0] = cpu_to_le64(v))
+
+/* @txgbe_rx_desc.qw1 */
+#define TXGBE_RXD_HDRADDR(rxd, v) \
+ (((volatile __le64 *)(rxd))[1] = cpu_to_le64(v))
+
+/* @txgbe_rx_desc.dw0 */
+#define TXGBE_RXD_RSSTYPE(dw) RS(dw, 0, 0xF)
+#define TXGBE_RSSTYPE_NONE 0
+#define TXGBE_RSSTYPE_IPV4TCP 1
+#define TXGBE_RSSTYPE_IPV4 2
+#define TXGBE_RSSTYPE_IPV6TCP 3
+#define TXGBE_RSSTYPE_IPV4SCTP 4
+#define TXGBE_RSSTYPE_IPV6 5
+#define TXGBE_RSSTYPE_IPV6SCTP 6
+#define TXGBE_RSSTYPE_IPV4UDP 7
+#define TXGBE_RSSTYPE_IPV6UDP 8
+#define TXGBE_RSSTYPE_FDIR 15
+#define TXGBE_RXD_SECTYPE(dw) RS(dw, 4, 0x3)
+#define TXGBE_RXD_SECTYPE_NONE LS(0, 4, 0x3)
+#define TXGBE_RXD_SECTYPE_LINKSEC LS(1, 4, 0x3)
+#define TXGBE_RXD_SECTYPE_IPSECESP LS(2, 4, 0x3)
+#define TXGBE_RXD_SECTYPE_IPSECAH LS(3, 4, 0x3)
+#define TXGBE_RXD_TPIDSEL(dw) RS(dw, 6, 0x7)
+#define TXGBE_RXD_PTID(dw) RS(dw, 9, 0xFF)
+#define TXGBE_RXD_RSCCNT(dw) RS(dw, 17, 0xF)
+#define TXGBE_RXD_HDRLEN(dw) RS(dw, 21, 0x3FF)
+#define TXGBE_RXD_SPH MS(31, 0x1)
+
+/* @txgbe_rx_desc.dw1 */
+/** bit 0-31, as rss hash when **/
+#define TXGBE_RXD_RSSHASH(rxd) ((rxd)->qw0.dw1)
+
+/** bit 0-31, as ip csum when **/
+#define TXGBE_RXD_IPID(rxd) ((rxd)->qw0.hi.ipid)
+#define TXGBE_RXD_CSUM(rxd) ((rxd)->qw0.hi.csum)
+
+/** bit 0-31, as fdir id when **/
+#define TXGBE_RXD_FDIRID(rxd) ((rxd)->qw0.hi.dw1)
+
+/* @txgbe_rx_desc.dw2 */
+#define TXGBE_RXD_STATUS(rxd) ((rxd)->qw1.lo.status)
+/** bit 0-1 **/
+#define TXGBE_RXD_STAT_DD MS(0, 0x1) /* Descriptor Done */
+#define TXGBE_RXD_STAT_EOP MS(1, 0x1) /* End of Packet */
+/** bit 2-31, when EOP=0 **/
+#define TXGBE_RXD_NEXTP_RESV(v) LS(v, 2, 0x3)
+#define TXGBE_RXD_NEXTP(dw) RS(dw, 4, 0xFFFF) /* Next Descriptor */
+/** bit 2-31, when EOP=1 **/
+#define TXGBE_RXD_PKT_CLS_MASK MS(2, 0x7) /* Packet Class */
+#define TXGBE_RXD_PKT_CLS_TC_RSS LS(0, 2, 0x7) /* RSS Hash */
+#define TXGBE_RXD_PKT_CLS_FLM LS(1, 2, 0x7) /* FDir Match */
+#define TXGBE_RXD_PKT_CLS_SYN LS(2, 2, 0x7) /* TCP Sync */
+#define TXGBE_RXD_PKT_CLS_5TUPLE LS(3, 2, 0x7) /* 5 Tuple */
+#define TXGBE_RXD_PKT_CLS_ETF LS(4, 2, 0x7) /* Ethertype Filter */
+#define TXGBE_RXD_STAT_VLAN MS(5, 0x1) /* IEEE VLAN Packet */
+#define TXGBE_RXD_STAT_UDPCS MS(6, 0x1) /* UDP xsum calculated */
+#define TXGBE_RXD_STAT_L4CS MS(7, 0x1) /* L4 xsum calculated */
+#define TXGBE_RXD_STAT_IPCS MS(8, 0x1) /* IP xsum calculated */
+#define TXGBE_RXD_STAT_PIF MS(9, 0x1) /* Non-unicast address */
+#define TXGBE_RXD_STAT_EIPCS MS(10, 0x1) /* Encap IP xsum calculated */
+#define TXGBE_RXD_STAT_VEXT MS(11, 0x1) /* Multi-VLAN */
+#define TXGBE_RXD_STAT_IPV6EX MS(12, 0x1) /* IPv6 with option header */
+#define TXGBE_RXD_STAT_LLINT MS(13, 0x1) /* Pkt caused LLI */
+#define TXGBE_RXD_STAT_1588 MS(14, 0x1) /* IEEE1588 Time Stamp */
+#define TXGBE_RXD_STAT_SECP MS(15, 0x1) /* Security Processing */
+#define TXGBE_RXD_STAT_LB MS(16, 0x1) /* Loopback Status */
+/*** bit 17-30, when PTYPE=IP ***/
+#define TXGBE_RXD_STAT_BMC MS(17, 0x1) /* PTYPE=IP, BMC status */
+#define TXGBE_RXD_ERR_FDIR_LEN MS(20, 0x1) /* FDIR Length error */
+#define TXGBE_RXD_ERR_FDIR_DROP MS(21, 0x1) /* FDIR Drop error */
+#define TXGBE_RXD_ERR_FDIR_COLL MS(22, 0x1) /* FDIR Collision error */
+#define TXGBE_RXD_ERR_HBO MS(23, 0x1) /* Header Buffer Overflow */
+#define TXGBE_RXD_ERR_EIPCS MS(26, 0x1) /* Encap IP header error */
+#define TXGBE_RXD_ERR_SECERR MS(27, 0x1) /* macsec or ipsec error */
+#define TXGBE_RXD_ERR_RXE MS(29, 0x1) /* Any MAC Error */
+#define TXGBE_RXD_ERR_L4CS MS(30, 0x1) /* TCP/UDP xsum error */
+#define TXGBE_RXD_ERR_IPCS MS(31, 0x1) /* IP xsum error */
+#define TXGBE_RXD_ERR_CSUM(dw) RS(dw, 30, 0x3)
+/*** bit 17-30, when PTYPE=FCOE ***/
+#define TXGBE_RXD_STAT_FCOEFS MS(17, 0x1) /* PTYPE=FCOE, FCoE EOF/SOF */
+#define TXGBE_RXD_FCSTAT_MASK MS(18, 0x3) /* FCoE Pkt Stat */
+#define TXGBE_RXD_FCSTAT_NOMTCH LS(0, 18, 0x3) /* No Ctxt Match */
+#define TXGBE_RXD_FCSTAT_NODDP LS(1, 18, 0x3) /* Ctxt w/o DDP */
+#define TXGBE_RXD_FCSTAT_FCPRSP LS(2, 18, 0x3) /* Recv. FCP_RSP */
+#define TXGBE_RXD_FCSTAT_DDP LS(3, 18, 0x3) /* Ctxt w/ DDP */
+#define TXGBE_RXD_FCERR_MASK MS(20, 0x7) /* FCERR */
+#define TXGBE_RXD_FCERR_0 LS(0, 20, 0x7)
+#define TXGBE_RXD_FCERR_1 LS(1, 20, 0x7)
+#define TXGBE_RXD_FCERR_2 LS(2, 20, 0x7)
+#define TXGBE_RXD_FCERR_3 LS(3, 20, 0x7)
+#define TXGBE_RXD_FCERR_4 LS(4, 20, 0x7)
+#define TXGBE_RXD_FCERR_5 LS(5, 20, 0x7)
+#define TXGBE_RXD_FCERR_6 LS(6, 20, 0x7)
+#define TXGBE_RXD_FCERR_7 LS(7, 20, 0x7)
+
+/* @txgbe_rx_desc.dw3 */
+#define TXGBE_RXD_LENGTH(rxd) ((rxd)->qw1.hi.len)
+#define TXGBE_RXD_VLAN(rxd) ((rxd)->qw1.hi.tag)
+
+/******************************************************************************
+ * Transmit Descriptor
+******************************************************************************/
+/**
+ * Transmit Context Descriptor (TXGBE_TXD_TYP=CTXT)
+ **/
+struct txgbe_tx_ctx_desc {
+ __le32 dw0; /* w.vlan_macip_lens */
+ __le32 dw1; /* w.seqnum_seed */
+ __le32 dw2; /* w.type_tucmd_mlhl */
+ __le32 dw3; /* w.mss_l4len_idx */
+};
+
+/* @txgbe_tx_ctx_desc.dw0 */
+#define TXGBE_TXD_IPLEN(v) LS(v, 0, 0x1FF) /* ip/fcoe header end */
+#define TXGBE_TXD_MACLEN(v) LS(v, 9, 0x7F) /* desc mac len */
+#define TXGBE_TXD_VLAN(v) LS(v, 16, 0xFFFF) /* vlan tag */
+
+/* @txgbe_tx_ctx_desc.dw1 */
+/*** bit 0-31, when TXGBE_TXD_DTYP_FCOE=0 ***/
+#define TXGBE_TXD_IPSEC_SAIDX(v) LS(v, 0, 0x3FF) /* ipsec SA index */
+#define TXGBE_TXD_ETYPE(v) LS(v, 11, 0x1) /* tunnel type */
+#define TXGBE_TXD_ETYPE_UDP LS(0, 11, 0x1)
+#define TXGBE_TXD_ETYPE_GRE LS(1, 11, 0x1)
+#define TXGBE_TXD_EIPLEN(v) LS(v, 12, 0x7F) /* tunnel ip header */
+#define TXGBE_TXD_DTYP_FCOE MS(16, 0x1) /* FCoE/IP descriptor */
+#define TXGBE_TXD_ETUNLEN(v) LS(v, 21, 0xFF) /* tunnel header */
+#define TXGBE_TXD_DECTTL(v) LS(v, 29, 0xF) /* decrease ip TTL */
+/*** bit 0-31, when TXGBE_TXD_DTYP_FCOE=1 ***/
+#define TXGBE_TXD_FCOEF_EOF_MASK MS(10, 0x3) /* FC EOF index */
+#define TXGBE_TXD_FCOEF_EOF_N LS(0, 10, 0x3) /* EOFn */
+#define TXGBE_TXD_FCOEF_EOF_T LS(1, 10, 0x3) /* EOFt */
+#define TXGBE_TXD_FCOEF_EOF_NI LS(2, 10, 0x3) /* EOFni */
+#define TXGBE_TXD_FCOEF_EOF_A LS(3, 10, 0x3) /* EOFa */
+#define TXGBE_TXD_FCOEF_SOF MS(12, 0x1) /* FC SOF index */
+#define TXGBE_TXD_FCOEF_PARINC MS(13, 0x1) /* Rel_Off in F_CTL */
+#define TXGBE_TXD_FCOEF_ORIE MS(14, 0x1) /* orientation end */
+#define TXGBE_TXD_FCOEF_ORIS MS(15, 0x1) /* orientation start */
+
+/* @txgbe_tx_ctx_desc.dw2 */
+#define TXGBE_TXD_IPSEC_ESPLEN(v) LS(v, 1, 0x1FF) /* ipsec ESP length */
+#define TXGBE_TXD_SNAP MS(10, 0x1) /* SNAP indication */
+#define TXGBE_TXD_TPID_SEL(v) LS(v, 11, 0x7) /* vlan tag index */
+#define TXGBE_TXD_IPSEC_ESP MS(14, 0x1) /* ipsec type: esp=1 ah=0 */
+#define TXGBE_TXD_IPSEC_ESPENC MS(15, 0x1) /* ESP encrypt */
+#define TXGBE_TXD_CTXT MS(20, 0x1) /* context descriptor */
+#define TXGBE_TXD_PTID(v) LS(v, 24, 0xFF) /* packet type */
+/* @txgbe_tx_ctx_desc.dw3 */
+#define TXGBE_TXD_DD MS(0, 0x1) /* descriptor done */
+#define TXGBE_TXD_IDX(v) LS(v, 4, 0x1) /* ctxt desc index */
+#define TXGBE_TXD_L4LEN(v) LS(v, 8, 0xFF) /* l4 header length */
+#define TXGBE_TXD_MSS(v) LS(v, 16, 0xFFFF) /* l4 MSS */
+
+/**
+ * Transmit Data Descriptor (TXGBE_TXD_TYP=DATA)
+ **/
+struct txgbe_tx_desc {
+ __le64 qw0; /* r.buffer_addr , w.reserved */
+ __le32 dw2; /* r.cmd_type_len, w.nxtseq_seed */
+ __le32 dw3; /* r.olinfo_status, w.status */
+};
+/* @txgbe_tx_desc.qw0 */
+
+/* @txgbe_tx_desc.dw2 */
+#define TXGBE_TXD_DATLEN(v) ((0xFFFF & (v))) /* data buffer length */
+#define TXGBE_TXD_1588 ((0x1) << 19) /* IEEE1588 time stamp */
+#define TXGBE_TXD_DATA ((0x0) << 20) /* data descriptor */
+#define TXGBE_TXD_EOP ((0x1) << 24) /* End of Packet */
+#define TXGBE_TXD_FCS ((0x1) << 25) /* Insert FCS */
+#define TXGBE_TXD_LINKSEC ((0x1) << 26) /* Insert LinkSec */
+#define TXGBE_TXD_ECU ((0x1) << 28) /* forward to ECU */
+#define TXGBE_TXD_CNTAG ((0x1) << 29) /* insert CN tag */
+#define TXGBE_TXD_VLE ((0x1) << 30) /* insert VLAN tag */
+#define TXGBE_TXD_TSE ((0x1) << 31) /* transmit segmentation */
+
+#define TXGBE_TXD_FLAGS (TXGBE_TXD_FCS | TXGBE_TXD_EOP)
+
+/* @txgbe_tx_desc.dw3 */
+#define TXGBE_TXD_DD_UNUSED TXGBE_TXD_DD
+#define TXGBE_TXD_IDX_UNUSED(v) TXGBE_TXD_IDX(v)
+#define TXGBE_TXD_CC ((0x1) << 7) /* check context */
+#define TXGBE_TXD_IPSEC ((0x1) << 8) /* request ipsec offload */
+#define TXGBE_TXD_L4CS ((0x1) << 9) /* insert TCP/UDP/SCTP csum */
+#define TXGBE_TXD_IPCS ((0x1) << 10) /* insert IPv4 csum */
+#define TXGBE_TXD_EIPCS ((0x1) << 11) /* insert outer IP csum */
+#define TXGBE_TXD_MNGFLT ((0x1) << 12) /* enable management filter */
+#define TXGBE_TXD_PAYLEN(v) ((0x7FFFF & (v)) << 13) /* payload length */
+
+#define RTE_PMD_TXGBE_TX_MAX_BURST 32
+#define RTE_PMD_TXGBE_RX_MAX_BURST 32
+#define RTE_TXGBE_TX_MAX_FREE_BUF_SZ 64
+
+#define RTE_TXGBE_DESCS_PER_LOOP 4
+
+#define RX_RING_SZ ((TXGBE_RING_DESC_MAX + RTE_PMD_TXGBE_RX_MAX_BURST) * \
+ sizeof(struct txgbe_rx_desc))
+
+#ifdef RTE_PMD_PACKET_PREFETCH
+#define rte_packet_prefetch(p) rte_prefetch1(p)
+#else
+#define rte_packet_prefetch(p) do {} while (0)
+#endif
+
+#define RTE_TXGBE_REGISTER_POLL_WAIT_10_MS 10
+#define RTE_TXGBE_WAIT_100_US 100
+
+#define TXGBE_TX_MAX_SEG 40
+
+/**
+ * Structure associated with each descriptor of the RX ring of a RX queue.
+ */
+struct txgbe_rx_entry {
+ struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
+};
+
+struct txgbe_scattered_rx_entry {
+ struct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */
+};
+
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct txgbe_tx_entry {
+ struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
+ uint16_t next_id; /**< Index of next descriptor in ring. */
+ uint16_t last_id; /**< Index of last scattered descriptor. */
+};
+
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct txgbe_tx_entry_v {
+ struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
+};
+
+/**
+ * Structure associated with each RX queue.
+ */
+struct txgbe_rx_queue {
+ struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
+ volatile struct txgbe_rx_desc *rx_ring; /**< RX ring virtual address. */
+ uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
+ volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
+ volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
+ struct txgbe_rx_entry *sw_ring; /**< address of RX software ring. */
+ struct txgbe_scattered_rx_entry *sw_sc_ring; /**< address of scattered Rx software ring. */
+ struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
+ struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
+ uint64_t mbuf_initializer; /**< value to init mbufs */
+ uint16_t nb_rx_desc; /**< number of RX descriptors. */
+ uint16_t rx_tail; /**< current value of RDT register. */
+ uint16_t nb_rx_hold; /**< number of held free RX desc. */
+ uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
+ uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
+ uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
+#ifdef RTE_LIBRTE_SECURITY
+ uint8_t using_ipsec;
+ /**< indicates that IPsec RX feature is in use */
+#endif
+ uint16_t rx_free_thresh; /**< max free RX desc to hold. */
+ uint16_t queue_id; /**< RX queue index. */
+ uint16_t reg_idx; /**< RX queue register index. */
+ uint16_t pkt_type_mask; /**< Packet type mask for different NICs. */
+ uint16_t port_id; /**< Device port identifier. */
+ uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
+ uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
+ uint8_t rx_deferred_start; /**< not in global dev start. */
+ /** flags to set in mbuf when a vlan is detected. */
+ uint64_t vlan_flags;
+ uint64_t offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
+ /** need to alloc dummy mbuf, for wraparound when scanning hw ring */
+ struct rte_mbuf fake_mbuf;
+ /** hold packets to return to application */
+ struct rte_mbuf *rx_stage[RTE_PMD_TXGBE_RX_MAX_BURST*2];
+};
+
+/**
+ * TXGBE CTX Constants
+ */
+enum txgbe_ctx_num {
+ TXGBE_CTX_0 = 0, /**< CTX0 */
+ TXGBE_CTX_1 = 1, /**< CTX1 */
+ TXGBE_CTX_NUM = 2, /**< CTX NUMBER */
+};
+
+/** Offload features */
+union txgbe_tx_offload {
+ uint64_t data[2];
+ struct {
+ uint64_t ptid:8; /**< Packet Type Identifier. */
+ uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
+ uint64_t l3_len:9; /**< L3 (IP) Header Length. */
+ uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
+ uint64_t tso_segsz:16; /**< TCP TSO segment size */
+ uint64_t vlan_tci:16;
+ /**< VLAN Tag Control Identifier (CPU order). */
+
+ /* fields for TX offloading of tunnels */
+ uint64_t outer_tun_len:8; /**< Outer TUN (Tunnel) Hdr Length. */
+ uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */
+ uint64_t outer_l3_len:16; /**< Outer L3 (IP) Hdr Length. */
+#ifdef RTE_LIBRTE_SECURITY
+ /* inline ipsec related*/
+ uint64_t sa_idx:8; /**< TX SA database entry index */
+ uint64_t sec_pad_len:4; /**< padding length */
+#endif
+ };
+};
+
+/*
+ * Compare mask for vlan_macip_len.data,
+ * should be in sync with txgbe_vlan_macip.f layout.
+ * */
+#define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
+#define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
+#define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
+/** MAC+IP length. */
+#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
+
+/**
+ * Structure to check if new context need be built
+ */
+
+struct txgbe_ctx_info {
+ uint64_t flags; /**< ol_flags for context build. */
+ /**< tx offload: vlan, tso, l2-l3-l4 lengths. */
+ union txgbe_tx_offload tx_offload;
+ /** compare mask for tx offload. */
+ union txgbe_tx_offload tx_offload_mask;
+};
+
+/**
+ * Structure associated with each TX queue.
+ */
+struct txgbe_tx_queue {
+ /** TX ring virtual address. */
+ volatile struct txgbe_tx_desc *tx_ring;
+ uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
+ union {
+ struct txgbe_tx_entry *sw_ring; /**< address of SW ring for scalar PMD. */
+ struct txgbe_tx_entry_v *sw_ring_v; /**< address of SW ring for vector PMD */
+ };
+ volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
+ volatile uint32_t *tdc_reg_addr; /**< Address of TDC register. */
+ uint16_t nb_tx_desc; /**< number of TX descriptors. */
+ uint16_t tx_tail; /**< current value of TDT reg. */
+ /**< Start freeing TX buffers if there are less free descriptors than
+ this value. */
+ uint16_t tx_free_thresh;
+ /** Index to last TX descriptor to have been cleaned. */
+ uint16_t last_desc_cleaned;
+ /** Total number of TX descriptors ready to be allocated. */
+ uint16_t nb_tx_free;
+ uint16_t tx_next_dd; /**< next desc to scan for DD bit */
+ uint16_t queue_id; /**< TX queue index. */
+ uint16_t reg_idx; /**< TX queue register index. */
+ uint16_t port_id; /**< Device port identifier. */
+ uint8_t pthresh; /**< Prefetch threshold register. */
+ uint8_t hthresh; /**< Host threshold register. */
+ uint8_t wthresh; /**< Write-back threshold reg. */
+ uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
+ uint32_t ctx_curr; /**< Hardware context states. */
+ /** Hardware context0 history. */
+ struct txgbe_ctx_info ctx_cache[TXGBE_CTX_NUM];
+ const struct txgbe_txq_ops *ops; /**< txq ops */
+ uint8_t tx_deferred_start; /**< not in global dev start. */
+#ifdef RTE_LIBRTE_SECURITY
+ uint8_t using_ipsec;
+ /**< indicates that IPsec TX feature is in use */
+#endif
+};
+
+struct txgbe_txq_ops {
+ void (*release_mbufs)(struct txgbe_tx_queue *txq);
+ void (*free_swring)(struct txgbe_tx_queue *txq);
+ void (*reset)(struct txgbe_tx_queue *txq);
+};
+
+/*
+ * Populate descriptors with the following info:
+ * 1.) buffer_addr = phys_addr + headroom
+ * 2.) cmd_type_len = TXGBE_TXD_FLAGS | pkt_len
+ * 3.) olinfo_status = pkt_len << PAYLEN_SHIFT
+ */
+
+
+/* Takes an ethdev and a queue and sets up the tx function to be used based on
+ * the queue parameters. Used in tx_queue_setup by primary process and then
+ * in dev_init by secondary process when attaching to an existing ethdev.
+ */
+void txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq);
+
+/**
+ * Sets the rx_pkt_burst callback in the txgbe rte_eth_dev instance.
+ *
+ * Sets the callback based on the device parameters:
+ * - txgbe_hw.rx_bulk_alloc_allowed
+ * - rte_eth_dev_data.scattered_rx
+ * - rte_eth_dev_data.lro
+ *
+ * This means that the parameters above have to be configured prior to calling
+ * to this function.
+ *
+ * @dev rte_eth_dev handle
+ */
+void txgbe_set_rx_function(struct rte_eth_dev *dev);
+int txgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt);
+
+uint64_t txgbe_get_tx_port_offloads(struct rte_eth_dev *dev);
+uint64_t txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
+uint64_t txgbe_get_rx_port_offloads(struct rte_eth_dev *dev);
+uint64_t txgbe_get_tx_queue_offloads(struct rte_eth_dev *dev);
+
+#endif /* _TXGBE_RXTX_H_ */
new file mode 100644
@@ -0,0 +1,1022 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include <rte_malloc.h>
+
+#include "txgbe_ethdev.h"
+
+static int txgbe_tm_capabilities_get(struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error);
+static int txgbe_shaper_profile_add(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error);
+static int txgbe_shaper_profile_del(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error);
+static int txgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error);
+static int txgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_error *error);
+static int txgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+ int *is_leaf, struct rte_tm_error *error);
+static int txgbe_level_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error);
+static int txgbe_node_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error);
+static int txgbe_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error);
+
+const struct rte_tm_ops txgbe_tm_ops = {
+ .capabilities_get = txgbe_tm_capabilities_get,
+ .shaper_profile_add = txgbe_shaper_profile_add,
+ .shaper_profile_delete = txgbe_shaper_profile_del,
+ .node_add = txgbe_node_add,
+ .node_delete = txgbe_node_delete,
+ .node_type_get = txgbe_node_type_get,
+ .level_capabilities_get = txgbe_level_capabilities_get,
+ .node_capabilities_get = txgbe_node_capabilities_get,
+ .hierarchy_commit = txgbe_hierarchy_commit,
+};
+
+int
+txgbe_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+ void *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ *(const void **)arg = &txgbe_tm_ops;
+
+ return 0;
+}
+
+void
+txgbe_tm_conf_init(struct rte_eth_dev *dev)
+{
+ struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+
+ /* initialize shaper profile list */
+ TAILQ_INIT(&tm_conf->shaper_profile_list);
+
+ /* initialize node configuration */
+ tm_conf->root = NULL;
+ TAILQ_INIT(&tm_conf->queue_list);
+ TAILQ_INIT(&tm_conf->tc_list);
+ tm_conf->nb_tc_node = 0;
+ tm_conf->nb_queue_node = 0;
+ tm_conf->committed = false;
+}
+
+void
+txgbe_tm_conf_uninit(struct rte_eth_dev *dev)
+{
+ struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+ struct txgbe_tm_shaper_profile *shaper_profile;
+ struct txgbe_tm_node *tm_node;
+
+ /* clear node configuration */
+ while ((tm_node = TAILQ_FIRST(&tm_conf->queue_list))) {
+ TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
+ rte_free(tm_node);
+ }
+ tm_conf->nb_queue_node = 0;
+ while ((tm_node = TAILQ_FIRST(&tm_conf->tc_list))) {
+ TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
+ rte_free(tm_node);
+ }
+ tm_conf->nb_tc_node = 0;
+ if (tm_conf->root) {
+ rte_free(tm_conf->root);
+ tm_conf->root = NULL;
+ }
+
+ /* Remove all shaper profiles */
+ while ((shaper_profile =
+ TAILQ_FIRST(&tm_conf->shaper_profile_list))) {
+ TAILQ_REMOVE(&tm_conf->shaper_profile_list,
+ shaper_profile, node);
+ rte_free(shaper_profile);
+ }
+}
+
+static inline uint8_t
+txgbe_tc_nb_get(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf *eth_conf;
+ uint8_t nb_tcs = 0;
+
+ eth_conf = &dev->data->dev_conf;
+ if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+ nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
+ } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
+ ETH_32_POOLS)
+ nb_tcs = ETH_4_TCS;
+ else
+ nb_tcs = ETH_8_TCS;
+ } else {
+ nb_tcs = 1;
+ }
+
+ return nb_tcs;
+}
+
+static int
+txgbe_tm_capabilities_get(struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint8_t tc_nb = txgbe_tc_nb_get(dev);
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (tc_nb > hw->mac.max_tx_queues)
+ return -EINVAL;
+
+ error->type = RTE_TM_ERROR_TYPE_NONE;
+
+ /* set all the parameters to 0 first. */
+ memset(cap, 0, sizeof(struct rte_tm_capabilities));
+
+ /**
+ * here is the max capability not the current configuration.
+ */
+ /* port + TCs + queues */
+ cap->n_nodes_max = 1 + TXGBE_DCB_TC_MAX +
+ hw->mac.max_tx_queues;
+ cap->n_levels_max = 3;
+ cap->non_leaf_nodes_identical = 1;
+ cap->leaf_nodes_identical = 1;
+ cap->shaper_n_max = cap->n_nodes_max;
+ cap->shaper_private_n_max = cap->n_nodes_max;
+ cap->shaper_private_dual_rate_n_max = 0;
+ cap->shaper_private_rate_min = 0;
+ /* 10Gbps -> 1.25GBps */
+ cap->shaper_private_rate_max = 1250000000ull;
+ cap->shaper_shared_n_max = 0;
+ cap->shaper_shared_n_nodes_per_shaper_max = 0;
+ cap->shaper_shared_n_shapers_per_node_max = 0;
+ cap->shaper_shared_dual_rate_n_max = 0;
+ cap->shaper_shared_rate_min = 0;
+ cap->shaper_shared_rate_max = 0;
+ cap->sched_n_children_max = hw->mac.max_tx_queues;
+ /**
+ * HW supports SP. But no plan to support it now.
+ * So, all the nodes should have the same priority.
+ */
+ cap->sched_sp_n_priorities_max = 1;
+ cap->sched_wfq_n_children_per_group_max = 0;
+ cap->sched_wfq_n_groups_max = 0;
+ /**
+ * SW only supports fair round robin now.
+ * So, all the nodes should have the same weight.
+ */
+ cap->sched_wfq_weight_max = 1;
+ cap->cman_head_drop_supported = 0;
+ cap->dynamic_update_mask = 0;
+ cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
+ cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
+ cap->cman_wred_context_n_max = 0;
+ cap->cman_wred_context_private_n_max = 0;
+ cap->cman_wred_context_shared_n_max = 0;
+ cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
+ cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
+ cap->stats_mask = 0;
+
+ return 0;
+}
+
+static inline struct txgbe_tm_shaper_profile *
+txgbe_shaper_profile_search(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id)
+{
+ struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+ struct txgbe_shaper_profile_list *shaper_profile_list =
+ &tm_conf->shaper_profile_list;
+ struct txgbe_tm_shaper_profile *shaper_profile;
+
+ TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
+ if (shaper_profile_id == shaper_profile->shaper_profile_id)
+ return shaper_profile;
+ }
+
+ return NULL;
+}
+
+static int
+txgbe_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ /* min rate not supported */
+ if (profile->committed.rate) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
+ error->message = "committed rate not supported";
+ return -EINVAL;
+ }
+ /* min bucket size not supported */
+ if (profile->committed.size) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+ error->message = "committed bucket size not supported";
+ return -EINVAL;
+ }
+ /* max bucket size not supported */
+ if (profile->peak.size) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+ error->message = "peak bucket size not supported";
+ return -EINVAL;
+ }
+ /* length adjustment not supported */
+ if (profile->pkt_length_adjust) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+ error->message = "packet length adjustment not supported";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+txgbe_shaper_profile_add(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+ struct txgbe_tm_shaper_profile *shaper_profile;
+ int ret;
+
+ if (!profile || !error)
+ return -EINVAL;
+
+ ret = txgbe_shaper_profile_param_check(profile, error);
+ if (ret)
+ return ret;
+
+ shaper_profile = txgbe_shaper_profile_search(dev, shaper_profile_id);
+
+ if (shaper_profile) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "profile ID exist";
+ return -EINVAL;
+ }
+
+ shaper_profile = rte_zmalloc("txgbe_tm_shaper_profile",
+ sizeof(struct txgbe_tm_shaper_profile),
+ 0);
+ if (!shaper_profile)
+ return -ENOMEM;
+ shaper_profile->shaper_profile_id = shaper_profile_id;
+ rte_memcpy(&shaper_profile->profile, profile,
+ sizeof(struct rte_tm_shaper_params));
+ TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list,
+ shaper_profile, node);
+
+ return 0;
+}
+
+static int
+txgbe_shaper_profile_del(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+ struct txgbe_tm_shaper_profile *shaper_profile;
+
+ if (!error)
+ return -EINVAL;
+
+ shaper_profile = txgbe_shaper_profile_search(dev, shaper_profile_id);
+
+ if (!shaper_profile) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "profile ID not exist";
+ return -EINVAL;
+ }
+
+ /* don't delete a profile if it's used by one or several nodes */
+ if (shaper_profile->reference_count) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "profile in use";
+ return -EINVAL;
+ }
+
+ TAILQ_REMOVE(&tm_conf->shaper_profile_list, shaper_profile, node);
+ rte_free(shaper_profile);
+
+ return 0;
+}
+
+static inline struct txgbe_tm_node *
+txgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id,
+ enum txgbe_tm_node_type *node_type)
+{
+ struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+ struct txgbe_tm_node *tm_node;
+
+ if (tm_conf->root && tm_conf->root->id == node_id) {
+ *node_type = TXGBE_TM_NODE_TYPE_PORT;
+ return tm_conf->root;
+ }
+
+ TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
+ if (tm_node->id == node_id) {
+ *node_type = TXGBE_TM_NODE_TYPE_TC;
+ return tm_node;
+ }
+ }
+
+ TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
+ if (tm_node->id == node_id) {
+ *node_type = TXGBE_TM_NODE_TYPE_QUEUE;
+ return tm_node;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+txgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
+ uint16_t *base, uint16_t *nb)
+{
+ uint8_t nb_tcs = txgbe_tc_nb_get(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ uint16_t vf_num = pci_dev->max_vfs;
+
+ *base = 0;
+ *nb = 0;
+
+ /* VT on */
+ if (vf_num) {
+ /* no DCB */
+ if (nb_tcs == 1) {
+ if (vf_num >= ETH_32_POOLS) {
+ *nb = 2;
+ *base = vf_num * 2;
+ } else if (vf_num >= ETH_16_POOLS) {
+ *nb = 4;
+ *base = vf_num * 4;
+ } else {
+ *nb = 8;
+ *base = vf_num * 8;
+ }
+ } else {
+ /* DCB */
+ *nb = 1;
+ *base = vf_num * nb_tcs + tc_node_no;
+ }
+ } else {
+ /* VT off */
+ if (nb_tcs == ETH_8_TCS) {
+ switch (tc_node_no) {
+ case 0:
+ *base = 0;
+ *nb = 32;
+ break;
+ case 1:
+ *base = 32;
+ *nb = 32;
+ break;
+ case 2:
+ *base = 64;
+ *nb = 16;
+ break;
+ case 3:
+ *base = 80;
+ *nb = 16;
+ break;
+ case 4:
+ *base = 96;
+ *nb = 8;
+ break;
+ case 5:
+ *base = 104;
+ *nb = 8;
+ break;
+ case 6:
+ *base = 112;
+ *nb = 8;
+ break;
+ case 7:
+ *base = 120;
+ *nb = 8;
+ break;
+ default:
+ return;
+ }
+ } else {
+ switch (tc_node_no) {
+ /**
+ * If no VF and no DCB, only 64 queues can be used.
+ * This case also be covered by this "case 0".
+ */
+ case 0:
+ *base = 0;
+ *nb = 64;
+ break;
+ case 1:
+ *base = 64;
+ *nb = 32;
+ break;
+ case 2:
+ *base = 96;
+ *nb = 16;
+ break;
+ case 3:
+ *base = 112;
+ *nb = 16;
+ break;
+ default:
+ return;
+ }
+ }
+ }
+}
+
+static int
+txgbe_node_param_check(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t priority, uint32_t weight,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ if (priority) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
+ error->message = "priority should be 0";
+ return -EINVAL;
+ }
+
+ if (weight != 1) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
+ error->message = "weight must be 1";
+ return -EINVAL;
+ }
+
+ /* not support shared shaper */
+ if (params->shared_shaper_id) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
+ error->message = "shared shaper not supported";
+ return -EINVAL;
+ }
+ if (params->n_shared_shapers) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
+ error->message = "shared shaper not supported";
+ return -EINVAL;
+ }
+
+ /* for non-leaf node */
+ if (node_id >= dev->data->nb_tx_queues) {
+ /* check the unsupported parameters */
+ if (params->nonleaf.wfq_weight_mode) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+ error->message = "WFQ not supported";
+ return -EINVAL;
+ }
+ if (params->nonleaf.n_sp_priorities != 1) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
+ error->message = "SP priority not supported";
+ return -EINVAL;
+ } else if (params->nonleaf.wfq_weight_mode &&
+ !(*params->nonleaf.wfq_weight_mode)) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+ error->message = "WFP should be byte mode";
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ /* for leaf node */
+ /* check the unsupported parameters */
+ if (params->leaf.cman) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
+ error->message = "Congestion management not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.wred_profile_id !=
+ RTE_TM_WRED_PROFILE_ID_NONE) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.shared_wred_context_id) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.n_shared_wred_contexts) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * Now the TC and queue configuration is controlled by DCB.
+ * We need check if the node configuration follows the DCB configuration.
+ * In the future, we may use TM to cover DCB.
+ */
+static int
+txgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+ enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX;
+ enum txgbe_tm_node_type parent_node_type = TXGBE_TM_NODE_TYPE_MAX;
+ struct txgbe_tm_shaper_profile *shaper_profile = NULL;
+ struct txgbe_tm_node *tm_node;
+ struct txgbe_tm_node *parent_node;
+ uint8_t nb_tcs;
+ uint16_t q_base = 0;
+ uint16_t q_nb = 0;
+ int ret;
+
+ if (!params || !error)
+ return -EINVAL;
+
+ /* if already committed */
+ if (tm_conf->committed) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "already committed";
+ return -EINVAL;
+ }
+
+ ret = txgbe_node_param_check(dev, node_id, priority, weight,
+ params, error);
+ if (ret)
+ return ret;
+
+ /* check if the node ID is already used */
+ if (txgbe_tm_node_search(dev, node_id, &node_type)) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "node id already used";
+ return -EINVAL;
+ }
+
+ /* check the shaper profile id */
+ if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+ shaper_profile = txgbe_shaper_profile_search(
+ dev, params->shaper_profile_id);
+ if (!shaper_profile) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+ error->message = "shaper profile not exist";
+ return -EINVAL;
+ }
+ }
+
+ /* root node if not have a parent */
+ if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ /* check level */
+ if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+ level_id > TXGBE_TM_NODE_TYPE_PORT) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "Wrong level";
+ return -EINVAL;
+ }
+
+ /* obviously no more than one root */
+ if (tm_conf->root) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "already have a root";
+ return -EINVAL;
+ }
+
+ /* add the root node */
+ tm_node = rte_zmalloc("txgbe_tm_node",
+ sizeof(struct txgbe_tm_node),
+ 0);
+ if (!tm_node)
+ return -ENOMEM;
+ tm_node->id = node_id;
+ tm_node->priority = priority;
+ tm_node->weight = weight;
+ tm_node->reference_count = 0;
+ tm_node->no = 0;
+ tm_node->parent = NULL;
+ tm_node->shaper_profile = shaper_profile;
+ rte_memcpy(&tm_node->params, params,
+ sizeof(struct rte_tm_node_params));
+ tm_conf->root = tm_node;
+
+ /* increase the reference counter of the shaper profile */
+ if (shaper_profile)
+ shaper_profile->reference_count++;
+
+ return 0;
+ }
+
+ /* TC or queue node */
+ /* check the parent node */
+ parent_node = txgbe_tm_node_search(dev, parent_node_id,
+ &parent_node_type);
+ if (!parent_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "parent not exist";
+ return -EINVAL;
+ }
+ if (parent_node_type != TXGBE_TM_NODE_TYPE_PORT &&
+ parent_node_type != TXGBE_TM_NODE_TYPE_TC) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "parent is not port or TC";
+ return -EINVAL;
+ }
+ /* check level */
+ if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+ level_id != parent_node_type + 1) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "Wrong level";
+ return -EINVAL;
+ }
+
+ /* check the node number */
+ if (parent_node_type == TXGBE_TM_NODE_TYPE_PORT) {
+ /* check TC number */
+ nb_tcs = txgbe_tc_nb_get(dev);
+ if (tm_conf->nb_tc_node >= nb_tcs) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many TCs";
+ return -EINVAL;
+ }
+ } else {
+ /* check queue number */
+ if (tm_conf->nb_queue_node >= dev->data->nb_tx_queues) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many queues";
+ return -EINVAL;
+ }
+
+ txgbe_queue_base_nb_get(dev, parent_node->no, &q_base, &q_nb);
+ if (parent_node->reference_count >= q_nb) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many queues than TC supported";
+ return -EINVAL;
+ }
+
+ /**
+ * check the node id.
+ * For queue, the node id means queue id.
+ */
+ if (node_id >= dev->data->nb_tx_queues) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too large queue id";
+ return -EINVAL;
+ }
+ }
+
+ /* add the TC or queue node */
+ tm_node = rte_zmalloc("txgbe_tm_node",
+ sizeof(struct txgbe_tm_node),
+ 0);
+ if (!tm_node)
+ return -ENOMEM;
+ tm_node->id = node_id;
+ tm_node->priority = priority;
+ tm_node->weight = weight;
+ tm_node->reference_count = 0;
+ tm_node->parent = parent_node;
+ tm_node->shaper_profile = shaper_profile;
+ rte_memcpy(&tm_node->params, params,
+ sizeof(struct rte_tm_node_params));
+ if (parent_node_type == TXGBE_TM_NODE_TYPE_PORT) {
+ tm_node->no = parent_node->reference_count;
+ TAILQ_INSERT_TAIL(&tm_conf->tc_list,
+ tm_node, node);
+ tm_conf->nb_tc_node++;
+ } else {
+ tm_node->no = q_base + parent_node->reference_count;
+ TAILQ_INSERT_TAIL(&tm_conf->queue_list,
+ tm_node, node);
+ tm_conf->nb_queue_node++;
+ }
+ tm_node->parent->reference_count++;
+
+ /* increase the reference counter of the shaper profile */
+ if (shaper_profile)
+ shaper_profile->reference_count++;
+
+ return 0;
+}
+
+static int
+txgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+ enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX;
+ struct txgbe_tm_node *tm_node;
+
+ if (!error)
+ return -EINVAL;
+
+ /* if already committed */
+ if (tm_conf->committed) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "already committed";
+ return -EINVAL;
+ }
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check the if the node id exists */
+ tm_node = txgbe_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ /* the node should have no child */
+ if (tm_node->reference_count) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message =
+ "cannot delete a node which has children";
+ return -EINVAL;
+ }
+
+ /* root node */
+ if (node_type == TXGBE_TM_NODE_TYPE_PORT) {
+ if (tm_node->shaper_profile)
+ tm_node->shaper_profile->reference_count--;
+ rte_free(tm_node);
+ tm_conf->root = NULL;
+ return 0;
+ }
+
+ /* TC or queue node */
+ if (tm_node->shaper_profile)
+ tm_node->shaper_profile->reference_count--;
+ tm_node->parent->reference_count--;
+ if (node_type == TXGBE_TM_NODE_TYPE_TC) {
+ TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
+ tm_conf->nb_tc_node--;
+ } else {
+ TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
+ tm_conf->nb_queue_node--;
+ }
+ rte_free(tm_node);
+
+ return 0;
+}
+
+static int
+txgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+ int *is_leaf, struct rte_tm_error *error)
+{
+ enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX;
+ struct txgbe_tm_node *tm_node;
+
+ if (!is_leaf || !error)
+ return -EINVAL;
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check if the node id exists */
+ tm_node = txgbe_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ if (node_type == TXGBE_TM_NODE_TYPE_QUEUE)
+ *is_leaf = true;
+ else
+ *is_leaf = false;
+
+ return 0;
+}
+
+static int
+txgbe_level_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (level_id >= TXGBE_TM_NODE_TYPE_MAX) {
+ error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
+ error->message = "too deep level";
+ return -EINVAL;
+ }
+
+ /* root node */
+ if (level_id == TXGBE_TM_NODE_TYPE_PORT) {
+ cap->n_nodes_max = 1;
+ cap->n_nodes_nonleaf_max = 1;
+ cap->n_nodes_leaf_max = 0;
+ } else if (level_id == TXGBE_TM_NODE_TYPE_TC) {
+ /* TC */
+ cap->n_nodes_max = TXGBE_DCB_TC_MAX;
+ cap->n_nodes_nonleaf_max = TXGBE_DCB_TC_MAX;
+ cap->n_nodes_leaf_max = 0;
+ } else {
+ /* queue */
+ cap->n_nodes_max = hw->mac.max_tx_queues;
+ cap->n_nodes_nonleaf_max = 0;
+ cap->n_nodes_leaf_max = hw->mac.max_tx_queues;
+ }
+
+ cap->non_leaf_nodes_identical = true;
+ cap->leaf_nodes_identical = true;
+
+ if (level_id != TXGBE_TM_NODE_TYPE_QUEUE) {
+ cap->nonleaf.shaper_private_supported = true;
+ cap->nonleaf.shaper_private_dual_rate_supported = false;
+ cap->nonleaf.shaper_private_rate_min = 0;
+ /* 10Gbps -> 1.25GBps */
+ cap->nonleaf.shaper_private_rate_max = 1250000000ull;
+ cap->nonleaf.shaper_shared_n_max = 0;
+ if (level_id == TXGBE_TM_NODE_TYPE_PORT)
+ cap->nonleaf.sched_n_children_max =
+ TXGBE_DCB_TC_MAX;
+ else
+ cap->nonleaf.sched_n_children_max =
+ hw->mac.max_tx_queues;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+ cap->nonleaf.sched_wfq_n_groups_max = 0;
+ cap->nonleaf.sched_wfq_weight_max = 1;
+ cap->nonleaf.stats_mask = 0;
+
+ return 0;
+ }
+
+ /* queue node */
+ cap->leaf.shaper_private_supported = true;
+ cap->leaf.shaper_private_dual_rate_supported = false;
+ cap->leaf.shaper_private_rate_min = 0;
+ /* 10Gbps -> 1.25GBps */
+ cap->leaf.shaper_private_rate_max = 1250000000ull;
+ cap->leaf.shaper_shared_n_max = 0;
+ cap->leaf.cman_head_drop_supported = false;
+ cap->leaf.cman_wred_context_private_supported = true;
+ cap->leaf.cman_wred_context_shared_n_max = 0;
+ cap->leaf.stats_mask = 0;
+
+ return 0;
+}
+
+static int
+txgbe_node_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX;
+ struct txgbe_tm_node *tm_node;
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check if the node id exists */
+ tm_node = txgbe_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ cap->shaper_private_supported = true;
+ cap->shaper_private_dual_rate_supported = false;
+ cap->shaper_private_rate_min = 0;
+ /* 10Gbps -> 1.25GBps */
+ cap->shaper_private_rate_max = 1250000000ull;
+ cap->shaper_shared_n_max = 0;
+
+ if (node_type == TXGBE_TM_NODE_TYPE_QUEUE) {
+ cap->leaf.cman_head_drop_supported = false;
+ cap->leaf.cman_wred_context_private_supported = true;
+ cap->leaf.cman_wred_context_shared_n_max = 0;
+ } else {
+ if (node_type == TXGBE_TM_NODE_TYPE_PORT)
+ cap->nonleaf.sched_n_children_max =
+ TXGBE_DCB_TC_MAX;
+ else
+ cap->nonleaf.sched_n_children_max =
+ hw->mac.max_tx_queues;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+ cap->nonleaf.sched_wfq_n_groups_max = 0;
+ cap->nonleaf.sched_wfq_weight_max = 1;
+ }
+
+ cap->stats_mask = 0;
+
+ return 0;
+}
+
+static int
+txgbe_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error)
+{
+ struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+ struct txgbe_tm_node *tm_node;
+ uint64_t bw;
+ int ret;
+
+ if (!error)
+ return -EINVAL;
+
+ /* check the setting */
+ if (!tm_conf->root)
+ goto done;
+
+ /* not support port max bandwidth yet */
+ if (tm_conf->root->shaper_profile &&
+ tm_conf->root->shaper_profile->profile.peak.rate) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "no port max bandwidth";
+ goto fail_clear;
+ }
+
+ /* HW not support TC max bandwidth */
+ TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
+ if (tm_node->shaper_profile &&
+ tm_node->shaper_profile->profile.peak.rate) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "no TC max bandwidth";
+ goto fail_clear;
+ }
+ }
+
+ /* queue max bandwidth */
+ TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
+ if (tm_node->shaper_profile)
+ bw = tm_node->shaper_profile->profile.peak.rate;
+ else
+ bw = 0;
+ if (bw) {
+ /* interpret Bps to Mbps */
+ bw = bw * 8 / 1000 / 1000;
+ ret = txgbe_set_queue_rate_limit(dev, tm_node->no, bw);
+ if (ret) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message =
+ "failed to set queue max bandwidth";
+ goto fail_clear;
+ }
+ }
+ }
+
+done:
+ tm_conf->committed = true;
+ return 0;
+
+fail_clear:
+ /* clear all the traffic manager configuration */
+ if (clear_on_fail) {
+ txgbe_tm_conf_uninit(dev);
+ txgbe_tm_conf_init(dev);
+ }
+ return -EINVAL;
+}
new file mode 100644
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include <rte_ethdev.h>
+#include <rte_pci.h>
+#include <rte_malloc.h>
+
+#include "base/txgbe_type.h"
+#include "base/txgbe_vf.h"
+#include "txgbe_ethdev.h"
+#include "txgbe_rxtx.h"
+#include "rte_pmd_txgbe.h"
+
+static int
+txgbe_vf_representor_link_update(struct rte_eth_dev *ethdev,
+ int wait_to_complete)
+{
+ struct txgbe_vf_representor *representor = TXGBE_DEV_REPRESENTOR(ethdev);
+
+ return txgbe_dev_link_update_share(representor->pf_ethdev,
+ wait_to_complete);
+}
+
+static int
+txgbe_vf_representor_mac_addr_set(struct rte_eth_dev *ethdev,
+ struct rte_ether_addr *mac_addr)
+{
+ struct txgbe_vf_representor *representor = TXGBE_DEV_REPRESENTOR(ethdev);
+
+ return rte_pmd_txgbe_set_vf_mac_addr(
+ representor->pf_ethdev->data->port_id,
+ representor->vf_id, mac_addr);
+}
+
+static int
+txgbe_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct txgbe_vf_representor *representor = TXGBE_DEV_REPRESENTOR(ethdev);
+
+ struct txgbe_hw *hw = TXGBE_DEV_HW(representor->pf_ethdev);
+
+ dev_info->device = representor->pf_ethdev->device;
+
+ dev_info->min_rx_bufsize = 1024;
+ /**< Minimum size of RX buffer. */
+ dev_info->max_rx_pktlen = 9728;
+ /**< Maximum configurable length of RX pkt. */
+ dev_info->max_rx_queues = TXGBE_VF_MAX_RX_QUEUES;
+ /**< Maximum number of RX queues. */
+ dev_info->max_tx_queues = TXGBE_VF_MAX_TX_QUEUES;
+ /**< Maximum number of TX queues. */
+
+ dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+ /**< Maximum number of MAC addresses. */
+
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ /**< Device RX offload capabilities. */
+
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_MULTI_SEGS;
+ /**< Device TX offload capabilities. */
+
+ dev_info->speed_capa =
+ representor->pf_ethdev->data->dev_link.link_speed;
+ /**< Supported speeds bitmap (ETH_LINK_SPEED_). */
+
+ dev_info->switch_info.name =
+ representor->pf_ethdev->device->name;
+ dev_info->switch_info.domain_id = representor->switch_domain_id;
+ dev_info->switch_info.port_id = representor->vf_id;
+
+ return 0;
+}
+
+static int txgbe_vf_representor_dev_configure(
+ __rte_unused struct rte_eth_dev *dev)
+{
+ return 0;
+}
+
+static int txgbe_vf_representor_rx_queue_setup(
+ __rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t rx_queue_id,
+ __rte_unused uint16_t nb_rx_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ __rte_unused struct rte_mempool *mb_pool)
+{
+ return 0;
+}
+
+static int txgbe_vf_representor_tx_queue_setup(
+ __rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t rx_queue_id,
+ __rte_unused uint16_t nb_rx_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_txconf *tx_conf)
+{
+ return 0;
+}
+
+static int txgbe_vf_representor_dev_start(__rte_unused struct rte_eth_dev *dev)
+{
+ return 0;
+}
+
+static void txgbe_vf_representor_dev_stop(__rte_unused struct rte_eth_dev *dev)
+{
+}
+
+static int
+txgbe_vf_representor_vlan_filter_set(struct rte_eth_dev *ethdev,
+ uint16_t vlan_id, int on)
+{
+ struct txgbe_vf_representor *representor = TXGBE_DEV_REPRESENTOR(ethdev);
+ uint64_t vf_mask = 1ULL << representor->vf_id;
+
+ return rte_pmd_txgbe_set_vf_vlan_filter(
+ representor->pf_ethdev->data->port_id, vlan_id, vf_mask, on);
+}
+
+static void
+txgbe_vf_representor_vlan_strip_queue_set(struct rte_eth_dev *ethdev,
+ __rte_unused uint16_t rx_queue_id, int on)
+{
+ struct txgbe_vf_representor *representor = TXGBE_DEV_REPRESENTOR(ethdev);
+
+ rte_pmd_txgbe_set_vf_vlan_stripq(representor->pf_ethdev->data->port_id,
+ representor->vf_id, on);
+}
+
+static const struct eth_dev_ops txgbe_vf_representor_dev_ops = {
+ .dev_infos_get = txgbe_vf_representor_dev_infos_get,
+
+ .dev_start = txgbe_vf_representor_dev_start,
+ .dev_configure = txgbe_vf_representor_dev_configure,
+ .dev_stop = txgbe_vf_representor_dev_stop,
+
+ .rx_queue_setup = txgbe_vf_representor_rx_queue_setup,
+ .tx_queue_setup = txgbe_vf_representor_tx_queue_setup,
+
+ .link_update = txgbe_vf_representor_link_update,
+
+ .vlan_filter_set = txgbe_vf_representor_vlan_filter_set,
+ .vlan_strip_queue_set = txgbe_vf_representor_vlan_strip_queue_set,
+
+ .mac_addr_set = txgbe_vf_representor_mac_addr_set,
+};
+
+static uint16_t
+txgbe_vf_representor_rx_burst(__rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+static uint16_t
+txgbe_vf_representor_tx_burst(__rte_unused void *tx_queue,
+ __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+int
+txgbe_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
+{
+ struct txgbe_vf_representor *representor = TXGBE_DEV_REPRESENTOR(ethdev);
+
+ struct txgbe_vf_info *vf_data;
+ struct rte_pci_device *pci_dev;
+ struct rte_eth_link *link;
+
+ if (!representor)
+ return -ENOMEM;
+
+ representor->vf_id =
+ ((struct txgbe_vf_representor *)init_params)->vf_id;
+ representor->switch_domain_id =
+ ((struct txgbe_vf_representor *)init_params)->switch_domain_id;
+ representor->pf_ethdev =
+ ((struct txgbe_vf_representor *)init_params)->pf_ethdev;
+
+ pci_dev = RTE_ETH_DEV_TO_PCI(representor->pf_ethdev);
+
+ if (representor->vf_id >= pci_dev->max_vfs)
+ return -ENODEV;
+
+ ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+ ethdev->data->representor_id = representor->vf_id;
+
+ /* Set representor device ops */
+ ethdev->dev_ops = &txgbe_vf_representor_dev_ops;
+
+ /* No data-path, but need stub Rx/Tx functions to avoid crash
+ * when testing with the likes of testpmd.
+ */
+ ethdev->rx_pkt_burst = txgbe_vf_representor_rx_burst;
+ ethdev->tx_pkt_burst = txgbe_vf_representor_tx_burst;
+
+ /* Setting the number queues allocated to the VF */
+ ethdev->data->nb_rx_queues = TXGBE_VF_MAX_RX_QUEUES;
+ ethdev->data->nb_tx_queues = TXGBE_VF_MAX_RX_QUEUES;
+
+ /* Reference VF mac address from PF data structure */
+ vf_data = *TXGBE_DEV_VFDATA(representor->pf_ethdev);
+
+ ethdev->data->mac_addrs = (struct rte_ether_addr *)
+ vf_data[representor->vf_id].vf_mac_addresses;
+
+ /* Link state. Inherited from PF */
+ link = &representor->pf_ethdev->data->dev_link;
+
+ ethdev->data->dev_link.link_speed = link->link_speed;
+ ethdev->data->dev_link.link_duplex = link->link_duplex;
+ ethdev->data->dev_link.link_status = link->link_status;
+ ethdev->data->dev_link.link_autoneg = link->link_autoneg;
+
+ return 0;
+}
+
+int
+txgbe_vf_representor_uninit(struct rte_eth_dev *ethdev)
+{
+ /* fixme: mac_addrs must not be freed because part of txgbe_vf_info */
+ ethdev->data->mac_addrs = NULL;
+
+ return 0;
+}
\ No newline at end of file
@@ -239,6 +239,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_NFB_PMD) += -lrte_pmd_nfb
_LDLIBS-$(CONFIG_RTE_LIBRTE_NFB_PMD) += $(shell command -v pkg-config > /dev/null 2>&1 && pkg-config --libs netcope-common)
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += -lrte_pmd_tap
_LDLIBS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += -lrte_pmd_thunderx_nicvf
+_LDLIBS-$(CONFIG_RTE_LIBRTE_TXGBE_PMD) += -lrte_pmd_txgbe
_LDLIBS-$(CONFIG_RTE_LIBRTE_VDEV_NETVSC_PMD) += -lrte_pmd_vdev_netvsc
_LDLIBS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += -lrte_pmd_virtio
ifeq ($(CONFIG_RTE_LIBRTE_VHOST),y)