get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/64385/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 64385,
    "url": "http://patches.dpdk.org/api/patches/64385/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1578625225-110361-2-git-send-email-alvinx.zhang@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1578625225-110361-2-git-send-email-alvinx.zhang@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1578625225-110361-2-git-send-email-alvinx.zhang@intel.com",
    "date": "2020-01-10T03:00:20",
    "name": "[RFC,2/7] net/igc: igc poll mode driver",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "e28fb3a0ec5541e141354cbd0558b5cc276d9ca1",
    "submitter": {
        "id": 1398,
        "url": "http://patches.dpdk.org/api/people/1398/?format=api",
        "name": "Alvin Zhang",
        "email": "alvinx.zhang@intel.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1578625225-110361-2-git-send-email-alvinx.zhang@intel.com/mbox/",
    "series": [
        {
            "id": 8043,
            "url": "http://patches.dpdk.org/api/series/8043/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=8043",
            "date": "2020-01-10T03:00:20",
            "name": "[RFC,1/7] net/igc: base driver",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/8043/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/64385/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/64385/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id AC369A04F9;\n\tFri, 10 Jan 2020 04:02:45 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id BDF661E86B;\n\tFri, 10 Jan 2020 04:02:44 +0100 (CET)",
            "from mga03.intel.com (mga03.intel.com [134.134.136.65])\n by dpdk.org (Postfix) with ESMTP id C75541E86A\n for <dev@dpdk.org>; Fri, 10 Jan 2020 04:02:41 +0100 (CET)",
            "from orsmga002.jf.intel.com ([10.7.209.21])\n by orsmga103.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n 09 Jan 2020 19:02:40 -0800",
            "from unknown (HELO dpdk-zhangalvin-dev.sh.intel.com)\n ([10.240.179.50])\n by orsmga002.jf.intel.com with ESMTP; 09 Jan 2020 19:02:37 -0800"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.69,415,1571727600\"; d=\"scan'208\";a=\"236723899\"",
        "From": "alvinx.zhang@intel.com",
        "To": "haiyue.wang@intel.com, qi.z.zhang@intel.com, beilei.xing@intel.com,\n xiaolong.ye@intel.com",
        "Cc": "dev@dpdk.org,\n\tAlvin Zhang <alvinx.zhang@intel.com>",
        "Date": "Fri, 10 Jan 2020 11:00:20 +0800",
        "Message-Id": "<1578625225-110361-2-git-send-email-alvinx.zhang@intel.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1578625225-110361-1-git-send-email-alvinx.zhang@intel.com>",
        "References": "<1578625225-110361-1-git-send-email-alvinx.zhang@intel.com>",
        "Subject": "[dpdk-dev] [RFC 2/7] net/igc: igc poll mode driver",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Alvin Zhang <alvinx.zhang@intel.com>\n\nlink status,\nspeed capabilities,\nTx and Rx of packets,\nflow control,\nRSS,\ntimestampping,\nVLAN filter,\nExternal VLAN,\nQinQ offload,\nbase statistics,\nextend statistics,\nper queue statistics,\n\nSigned-off-by: Alvin Zhang <alvinx.zhang@intel.com>\n---\n drivers/net/igc/igc_ethdev.c | 2819 ++++++++++++++++++++++++++++++++++++++++++\n drivers/net/igc/igc_ethdev.h |  179 +++\n drivers/net/igc/igc_logs.c   |   21 +\n drivers/net/igc/igc_logs.h   |   48 +\n drivers/net/igc/igc_txrx.c   | 2237 +++++++++++++++++++++++++++++++++\n drivers/net/igc/igc_txrx.h   |   56 +\n 6 files changed, 5360 insertions(+)\n create mode 100644 drivers/net/igc/igc_ethdev.c\n create mode 100644 drivers/net/igc/igc_ethdev.h\n create mode 100644 drivers/net/igc/igc_logs.c\n create mode 100644 drivers/net/igc/igc_logs.h\n create mode 100644 drivers/net/igc/igc_txrx.c\n create mode 100644 drivers/net/igc/igc_txrx.h",
    "diff": "diff --git a/drivers/net/igc/igc_ethdev.c b/drivers/net/igc/igc_ethdev.c\nnew file mode 100644\nindex 0000000..87179cf\n--- /dev/null\n+++ b/drivers/net/igc/igc_ethdev.c\n@@ -0,0 +1,2819 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2020 Intel Corporation\n+ */\n+\n+#include <rte_string_fns.h>\n+#include <rte_pci.h>\n+#include <rte_bus_pci.h>\n+#include <rte_ethdev_driver.h>\n+#include <rte_ethdev_pci.h>\n+#include <rte_malloc.h>\n+#include <rte_alarm.h>\n+\n+#include \"igc_logs.h\"\n+#include \"igc_txrx.h\"\n+\n+/* Per Queue Good Packets Received Count */\n+#define IGC_PQGPRC(idx)\t\t(0x10010 + 0x100 * (idx))\n+/* Per Queue Good Octets Received Count */\n+#define IGC_PQGORC(idx)\t\t(0x10018 + 0x100 * (idx))\n+/* Per Queue Good Octets Transmitted Count */\n+#define IGC_PQGOTC(idx)\t\t(0x10034 + 0x100 * (idx))\n+/* Per Queue Multicast Packets Received Count */\n+#define IGC_PQMPRC(idx)\t\t(0x10038 + 0x100 * (idx))\n+/* Transmit Queue Drop Packet Count */\n+#define IGC_TQDPC(idx)\t\t(0xe030 + 0x40 * (idx))\n+\n+#define IGC_FC_PAUSE_TIME\t\t0x0680\n+#define IGC_LINK_UPDATE_CHECK_TIMEOUT\t90  /* 9s */\n+#define IGC_LINK_UPDATE_CHECK_INTERVAL\t100 /* ms */\n+#define IGC_MSIX_OTHER_INTR_VEC\t\t0   /* MSI-X other interrupt vector */\n+\n+#define IGC_DISABLE_TIMER0_MSK\t\t(1u << 31)\n+#define IGC_TIMADJ_MAX\t\t\t999999900u /* nono-seconds */\n+#define IGC_TIMADJ_SIGN_MINUS\t\t(1u << 31) /* 0b=\"+\", 1b=\"-\" */\n+/* single cycle or multi-cycle */\n+#define IGC_TIMADJ_METH_SINGLE\t\t(1u << 30)\n+/* Use the PHY sop indication or not */\n+#define IGC_TSYNCRXCTL_RXSYNSIG\t\t(1u << 10)\n+#define IGC_TSYNCTXCTL_TXSYNSIG\t\t(1u << 5)\n+\n+#define IGC_DEFAULT_RX_FREE_THRESH\t32\n+\n+#define IGC_DEFAULT_RX_PTHRESH\t\t8\n+#define IGC_DEFAULT_RX_HTHRESH\t\t8\n+#define IGC_DEFAULT_RX_WTHRESH\t\t4\n+\n+#define IGC_DEFAULT_TX_PTHRESH\t\t8\n+#define IGC_DEFAULT_TX_HTHRESH\t\t1\n+#define IGC_DEFAULT_TX_WTHRESH\t\t16\n+\n+#define IGC_ALARM_INTERVAL\t\t8000000u\n+/* us, about 13.6s some per-queue registers will wrap around back to 0. */\n+\n+/* MSI-X other interrupt vector */\n+#define IGC_MSIX_OTHER_INTR_VEC\t\t0\n+\n+/* External VLAN Enable bit mask */\n+#define IGC_CTRL_EXT_EXT_VLAN      (1 << 26)\n+\n+/* External VLAN Ether Type bit mask and shift */\n+#define IGC_VET_EXT            0xFFFF0000\n+#define IGC_VET_EXT_SHIFT      16\n+\n+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN\n+#define U32_0_IN_U64\t0\t/* lower bytes of u64 */\n+#define U32_1_IN_U64\t1\t/* higher bytes of u64 */\n+#else\n+#define U32_0_IN_U64\t1\n+#define U32_1_IN_U64\t0\n+#endif\n+\n+static const struct rte_eth_desc_lim rx_desc_lim = {\n+\t.nb_max = IGC_MAX_RXD,\n+\t.nb_min = IGC_MIN_RXD,\n+\t.nb_align = IGC_RXD_ALIGN,\n+};\n+\n+static const struct rte_eth_desc_lim tx_desc_lim = {\n+\t.nb_max = IGC_MAX_TXD,\n+\t.nb_min = IGC_MIN_TXD,\n+\t.nb_align = IGC_TXD_ALIGN,\n+\t.nb_seg_max = IGC_TX_MAX_SEG,\n+\t.nb_mtu_seg_max = IGC_TX_MAX_MTU_SEG,\n+};\n+\n+static enum igc_fc_mode igc_fc_setting = igc_fc_full;\n+\n+static const struct rte_pci_id pci_id_igc_map[] = {\n+\t{ RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_LM) },\n+\t{ RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_V)  },\n+\t{ RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_I)  },\n+\t{ RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_V)  },\n+\t{ RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_K)  },\n+\t{ .vendor_id = 0, /* sentinel */ },\n+};\n+\n+/* store statistics names and its offset in stats structure */\n+struct rte_igc_xstats_name_off {\n+\tchar name[RTE_ETH_XSTATS_NAME_SIZE];\n+\tunsigned int offset;\n+};\n+\n+static const struct rte_igc_xstats_name_off rte_igc_stats_strings[] = {\n+\t{\"rx_crc_errors\", offsetof(struct igc_hw_stats, crcerrs)},\n+\t{\"rx_align_errors\", offsetof(struct igc_hw_stats, algnerrc)},\n+\t{\"rx_errors\", offsetof(struct igc_hw_stats, rxerrc)},\n+\t{\"rx_missed_packets\", offsetof(struct igc_hw_stats, mpc)},\n+\t{\"tx_single_collision_packets\", offsetof(struct igc_hw_stats, scc)},\n+\t{\"tx_multiple_collision_packets\", offsetof(struct igc_hw_stats, mcc)},\n+\t{\"tx_excessive_collision_packets\", offsetof(struct igc_hw_stats,\n+\t\tecol)},\n+\t{\"tx_late_collisions\", offsetof(struct igc_hw_stats, latecol)},\n+\t{\"tx_total_collisions\", offsetof(struct igc_hw_stats, colc)},\n+\t{\"tx_deferred_packets\", offsetof(struct igc_hw_stats, dc)},\n+\t{\"tx_no_carrier_sense_packets\", offsetof(struct igc_hw_stats, tncrs)},\n+\t{\"tx_discarded_packets\", offsetof(struct igc_hw_stats, htdpmc)},\n+\t{\"rx_length_errors\", offsetof(struct igc_hw_stats, rlec)},\n+\t{\"rx_xon_packets\", offsetof(struct igc_hw_stats, xonrxc)},\n+\t{\"tx_xon_packets\", offsetof(struct igc_hw_stats, xontxc)},\n+\t{\"rx_xoff_packets\", offsetof(struct igc_hw_stats, xoffrxc)},\n+\t{\"tx_xoff_packets\", offsetof(struct igc_hw_stats, xofftxc)},\n+\t{\"rx_flow_control_unsupported_packets\", offsetof(struct igc_hw_stats,\n+\t\tfcruc)},\n+\t{\"rx_size_64_packets\", offsetof(struct igc_hw_stats, prc64)},\n+\t{\"rx_size_65_to_127_packets\", offsetof(struct igc_hw_stats, prc127)},\n+\t{\"rx_size_128_to_255_packets\", offsetof(struct igc_hw_stats, prc255)},\n+\t{\"rx_size_256_to_511_packets\", offsetof(struct igc_hw_stats, prc511)},\n+\t{\"rx_size_512_to_1023_packets\", offsetof(struct igc_hw_stats,\n+\t\tprc1023)},\n+\t{\"rx_size_1024_to_max_packets\", offsetof(struct igc_hw_stats,\n+\t\tprc1522)},\n+\t{\"rx_broadcast_packets\", offsetof(struct igc_hw_stats, bprc)},\n+\t{\"rx_multicast_packets\", offsetof(struct igc_hw_stats, mprc)},\n+\t{\"rx_undersize_errors\", offsetof(struct igc_hw_stats, ruc)},\n+\t{\"rx_fragment_errors\", offsetof(struct igc_hw_stats, rfc)},\n+\t{\"rx_oversize_errors\", offsetof(struct igc_hw_stats, roc)},\n+\t{\"rx_jabber_errors\", offsetof(struct igc_hw_stats, rjc)},\n+\t{\"rx_no_buffers\", offsetof(struct igc_hw_stats, rnbc)},\n+\t{\"rx_management_packets\", offsetof(struct igc_hw_stats, mgprc)},\n+\t{\"rx_management_dropped\", offsetof(struct igc_hw_stats, mgpdc)},\n+\t{\"tx_management_packets\", offsetof(struct igc_hw_stats, mgptc)},\n+\t{\"rx_total_packets\", offsetof(struct igc_hw_stats, tpr)},\n+\t{\"tx_total_packets\", offsetof(struct igc_hw_stats, tpt)},\n+\t{\"rx_total_bytes\", offsetof(struct igc_hw_stats, tor)},\n+\t{\"tx_total_bytes\", offsetof(struct igc_hw_stats, tot)},\n+\t{\"tx_size_64_packets\", offsetof(struct igc_hw_stats, ptc64)},\n+\t{\"tx_size_65_to_127_packets\", offsetof(struct igc_hw_stats, ptc127)},\n+\t{\"tx_size_128_to_255_packets\", offsetof(struct igc_hw_stats, ptc255)},\n+\t{\"tx_size_256_to_511_packets\", offsetof(struct igc_hw_stats, ptc511)},\n+\t{\"tx_size_512_to_1023_packets\", offsetof(struct igc_hw_stats,\n+\t\tptc1023)},\n+\t{\"tx_size_1023_to_max_packets\", offsetof(struct igc_hw_stats,\n+\t\tptc1522)},\n+\t{\"tx_multicast_packets\", offsetof(struct igc_hw_stats, mptc)},\n+\t{\"tx_broadcast_packets\", offsetof(struct igc_hw_stats, bptc)},\n+\t{\"tx_tso_packets\", offsetof(struct igc_hw_stats, tsctc)},\n+\t{\"rx_sent_to_host_packets\", offsetof(struct igc_hw_stats, rpthc)},\n+\t{\"tx_sent_by_host_packets\", offsetof(struct igc_hw_stats, hgptc)},\n+\t{\"interrupt_assert_count\", offsetof(struct igc_hw_stats, iac)},\n+\t{\"rx_descriptor_lower_threshold\",\n+\t\toffsetof(struct igc_hw_stats, icrxdmtc)},\n+};\n+\n+#define IGC_NB_XSTATS (sizeof(rte_igc_stats_strings) / \\\n+\t\tsizeof(rte_igc_stats_strings[0]))\n+\n+static int eth_igc_configure(struct rte_eth_dev *dev);\n+static void eth_igc_stop(struct rte_eth_dev *dev);\n+static int eth_igc_start(struct rte_eth_dev *dev);\n+static int eth_igc_set_link_up(struct rte_eth_dev *dev);\n+static int eth_igc_set_link_down(struct rte_eth_dev *dev);\n+static void eth_igc_close(struct rte_eth_dev *dev);\n+static int eth_igc_reset(struct rte_eth_dev *dev);\n+static int eth_igc_promiscuous_enable(struct rte_eth_dev *dev);\n+static int eth_igc_promiscuous_disable(struct rte_eth_dev *dev);\n+static int eth_igc_allmulticast_enable(struct rte_eth_dev *dev);\n+static int eth_igc_allmulticast_disable(struct rte_eth_dev *dev);\n+static int eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete);\n+\n+static int eth_igc_stats_get(struct rte_eth_dev *dev,\n+\t\t\tstruct rte_eth_stats *rte_stats);\n+static int eth_igc_xstats_get(struct rte_eth_dev *dev,\n+\t\t\tstruct rte_eth_xstat *xstats, unsigned int n);\n+static int eth_igc_xstats_get_by_id(struct rte_eth_dev *dev,\n+\t\t\t\tconst uint64_t *ids,\n+\t\t\t\tuint64_t *values, unsigned int n);\n+static int eth_igc_xstats_get_names(struct rte_eth_dev *dev,\n+\t\t\t\tstruct rte_eth_xstat_name *xstats_names,\n+\t\t\t\tunsigned int size);\n+static int eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev,\n+\t\tstruct rte_eth_xstat_name *xstats_names, const uint64_t *ids,\n+\t\tunsigned int limit);\n+static int eth_igc_xstats_reset(struct rte_eth_dev *dev);\n+static int\n+eth_igc_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *eth_dev,\n+\tuint16_t queue_id, uint8_t stat_idx, __rte_unused uint8_t is_rx);\n+static int eth_igc_fw_version_get(struct rte_eth_dev *dev,\n+\t\t\t\tchar *fw_version, size_t fw_size);\n+static int eth_igc_infos_get(struct rte_eth_dev *dev,\n+\t\t\tstruct rte_eth_dev_info *dev_info);\n+static const uint32_t *eth_igc_supported_ptypes_get(struct rte_eth_dev *dev);\n+static int eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);\n+static int\n+eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);\n+static int\n+eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);\n+static int\n+eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);\n+static int eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,\n+\t\t      enum rte_vlan_type vlan_type, uint16_t tpid);\n+static int eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask);\n+static int eth_igc_led_on(struct rte_eth_dev *dev);\n+static int eth_igc_led_off(struct rte_eth_dev *dev);\n+static int\n+eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);\n+static int\n+eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);\n+static int eth_igc_rar_set(struct rte_eth_dev *dev,\n+\t\tstruct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool);\n+static void eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index);\n+static int eth_igc_default_mac_addr_set(struct rte_eth_dev *dev,\n+\t\t\tstruct rte_ether_addr *addr);\n+static int eth_igc_rss_reta_update(struct rte_eth_dev *dev,\n+\t\t\tstruct rte_eth_rss_reta_entry64 *reta_conf,\n+\t\t\tuint16_t reta_size);\n+static int eth_igc_rss_reta_query(struct rte_eth_dev *dev,\n+\t\t       struct rte_eth_rss_reta_entry64 *reta_conf,\n+\t\t       uint16_t reta_size);\n+static int eth_igc_rss_hash_update(struct rte_eth_dev *dev,\n+\t\t\tstruct rte_eth_rss_conf *rss_conf);\n+static int eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev,\n+\t\t\tstruct rte_eth_rss_conf *rss_conf);\n+static int eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,\n+\t\t\t struct rte_ether_addr *mc_addr_set,\n+\t\t\t uint32_t nb_mc_addr);\n+static int eth_igc_get_eeprom_length(struct rte_eth_dev *dev);\n+\n+static int\n+eth_igc_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);\n+static int\n+eth_igc_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts);\n+static int\n+eth_igc_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts);\n+\n+static int eth_igc_timesync_enable(struct rte_eth_dev *dev);\n+static int eth_igc_timesync_disable(struct rte_eth_dev *dev);\n+\n+static int eth_igc_timesync_read_tx_timestamp(struct rte_eth_dev *dev,\n+\t\t\t       struct timespec *timestamp);\n+\n+static const struct eth_dev_ops eth_igc_ops = {\n+\t.dev_configure\t\t= eth_igc_configure,\n+\t.dev_start\t\t= eth_igc_start,\n+\t.dev_stop\t\t= eth_igc_stop,\n+\t.dev_close\t\t= eth_igc_close,\n+\t.dev_reset\t\t= eth_igc_reset,\n+\t.dev_set_link_up\t= eth_igc_set_link_up,\n+\t.dev_set_link_down\t= eth_igc_set_link_down,\n+\t.promiscuous_enable\t= eth_igc_promiscuous_enable,\n+\t.promiscuous_disable\t= eth_igc_promiscuous_disable,\n+\t.allmulticast_enable\t= eth_igc_allmulticast_enable,\n+\t.allmulticast_disable\t= eth_igc_allmulticast_disable,\n+\t.link_update\t\t= eth_igc_link_update,\n+\t.stats_get\t\t= eth_igc_stats_get,\n+\t.xstats_get\t\t= eth_igc_xstats_get,\n+\t.xstats_get_by_id\t= eth_igc_xstats_get_by_id,\n+\t.xstats_get_names_by_id\t= eth_igc_xstats_get_names_by_id,\n+\t.xstats_get_names\t= eth_igc_xstats_get_names,\n+\t.stats_reset\t\t= eth_igc_xstats_reset,\n+\t.xstats_reset\t\t= eth_igc_xstats_reset,\n+\t.queue_stats_mapping_set = eth_igc_queue_stats_mapping_set,\n+\t.fw_version_get\t\t= eth_igc_fw_version_get,\n+\t.dev_infos_get\t\t= eth_igc_infos_get,\n+\t.dev_supported_ptypes_get = eth_igc_supported_ptypes_get,\n+\t.mtu_set\t\t= eth_igc_mtu_set,\n+\t.vlan_filter_set\t= eth_igc_vlan_filter_set,\n+\t.vlan_tpid_set\t\t= eth_igc_vlan_tpid_set,\n+\t.vlan_strip_queue_set\t= eth_igc_vlan_strip_queue_set,\n+\t.vlan_offload_set\t= eth_igc_vlan_offload_set,\n+\t.rx_queue_setup\t\t= eth_igc_rx_queue_setup,\n+\t.rx_queue_intr_enable\t= eth_igc_rx_queue_intr_enable,\n+\t.rx_queue_intr_disable\t= eth_igc_rx_queue_intr_disable,\n+\t.rx_queue_release\t= eth_igc_rx_queue_release,\n+\t.rx_queue_count\t\t= eth_igc_rx_queue_count,\n+\t.rx_descriptor_done\t= eth_igc_rx_descriptor_done,\n+\t.rx_descriptor_status\t= eth_igc_rx_descriptor_status,\n+\t.tx_descriptor_status\t= eth_igc_tx_descriptor_status,\n+\t.tx_queue_setup\t\t= eth_igc_tx_queue_setup,\n+\t.tx_queue_release\t= eth_igc_tx_queue_release,\n+\t.tx_done_cleanup\t= eth_igc_tx_done_cleanup,\n+\t.dev_led_on\t\t= eth_igc_led_on,\n+\t.dev_led_off\t\t= eth_igc_led_off,\n+\t.flow_ctrl_get\t\t= eth_igc_flow_ctrl_get,\n+\t.flow_ctrl_set\t\t= eth_igc_flow_ctrl_set,\n+\t.mac_addr_add\t\t= eth_igc_rar_set,\n+\t.mac_addr_remove\t= eth_igc_rar_clear,\n+\t.mac_addr_set\t\t= eth_igc_default_mac_addr_set,\n+\t.reta_update\t\t= eth_igc_rss_reta_update,\n+\t.reta_query\t\t= eth_igc_rss_reta_query,\n+\t.rss_hash_update\t= eth_igc_rss_hash_update,\n+\t.rss_hash_conf_get\t= eth_igc_rss_hash_conf_get,\n+\t.set_mc_addr_list\t= eth_igc_set_mc_addr_list,\n+\t.rxq_info_get\t\t= eth_igc_rxq_info_get,\n+\t.txq_info_get\t\t= eth_igc_txq_info_get,\n+\t.timesync_enable\t= eth_igc_timesync_enable,\n+\t.timesync_disable\t= eth_igc_timesync_disable,\n+\t.timesync_read_rx_timestamp = eth_igc_timesync_read_rx_timestamp,\n+\t.timesync_read_tx_timestamp = eth_igc_timesync_read_tx_timestamp,\n+\t.get_eeprom_length\t= eth_igc_get_eeprom_length,\n+\t.timesync_adjust_time\t= eth_igc_timesync_adjust_time,\n+\t.timesync_read_time\t= eth_igc_timesync_read_time,\n+\t.timesync_write_time\t= eth_igc_timesync_write_time,\n+};\n+\n+/*\n+ * multipe queue mode checking\n+ */\n+static int\n+igc_check_mq_mode(struct rte_eth_dev *dev)\n+{\n+\tenum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;\n+\tenum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;\n+\n+\tif ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) ||\n+\t    tx_mq_mode == ETH_MQ_TX_DCB ||\n+\t    tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) {\n+\t\tPMD_INIT_LOG(ERR, \"DCB mode is not supported.\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (RTE_ETH_DEV_SRIOV(dev).active != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"SRIOV is not supported.\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (rx_mq_mode != ETH_MQ_RX_NONE &&\n+\t\trx_mq_mode != ETH_MQ_RX_RSS) {\n+\t\t/* RSS together with VMDq not supported*/\n+\t\tPMD_INIT_LOG(ERR, \"RX mode %d is not supported.\",\n+\t\t\t\trx_mq_mode);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* To no break software that set invalid mode, only display\n+\t * warning if invalid mode is used.\n+\t */\n+\tif (tx_mq_mode != ETH_MQ_TX_NONE)\n+\t\tPMD_INIT_LOG(WARNING, \"TX mode %d is not supported.\"\n+\t\t\t\t\" Due to txmode is meaningless in this driver,\"\n+\t\t\t\t\" just ignore.\", tx_mq_mode);\n+\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_configure(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_interrupt *intr =\n+\t\tIGC_DEV_PRIVATE_TO_INTR(dev->data->dev_private);\n+\tint ret;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tret  = igc_check_mq_mode(dev);\n+\tif (ret != 0)\n+\t\treturn ret;\n+\n+\tintr->flags |= IGC_FLAG_NEED_LINK_UPDATE;\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_set_link_up(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\n+\tif (hw->phy.media_type == igc_media_type_copper)\n+\t\tigc_power_up_phy(hw);\n+\telse\n+\t\tigc_power_up_fiber_serdes_link(hw);\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_set_link_down(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\n+\tif (hw->phy.media_type == igc_media_type_copper)\n+\t\tigc_power_down_phy(hw);\n+\telse\n+\t\tigc_shutdown_fiber_serdes_link(hw);\n+\treturn 0;\n+}\n+\n+/*\n+ * rx,tx enable/disable\n+ */\n+static void\n+eth_igc_rxtx_control(struct rte_eth_dev *dev, bool enable)\n+{\n+\tstruct igc_hw *hw =\n+\t\tIGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tuint32_t tctl, rctl;\n+\n+\ttctl = IGC_READ_REG(hw, IGC_TCTL);\n+\trctl = IGC_READ_REG(hw, IGC_RCTL);\n+\n+\tif (enable) {\n+\t\t/* enable Tx/Rx */\n+\t\ttctl |= IGC_TCTL_EN;\n+\t\trctl |= IGC_RCTL_EN;\n+\t} else {\n+\t\t/* disable Tx/Rx */\n+\t\ttctl &= ~IGC_TCTL_EN;\n+\t\trctl &= ~IGC_RCTL_EN;\n+\t}\n+\tIGC_WRITE_REG(hw, IGC_TCTL, tctl);\n+\tIGC_WRITE_REG(hw, IGC_RCTL, rctl);\n+\tIGC_WRITE_FLUSH(hw);\n+}\n+\n+/*\n+ * disable other interrupt\n+ */\n+static void\n+igc_intr_other_disable(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw =\n+\t\tIGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);\n+\tstruct rte_intr_handle *intr_handle = &pci_dev->intr_handle;\n+\n+\tif (rte_intr_allow_others(intr_handle) &&\n+\t\tdev->data->dev_conf.intr_conf.lsc != 0) {\n+\t\tIGC_WRITE_REG(hw, IGC_EIMC, 1 << IGC_MSIX_OTHER_INTR_VEC);\n+\t}\n+\n+\tIGC_WRITE_REG(hw, IGC_IMC, ~0);\n+\tIGC_WRITE_FLUSH(hw);\n+}\n+\n+/*\n+ * enable other interrupt\n+ */\n+static inline void\n+igc_intr_other_enable(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_interrupt *intr =\n+\t\tIGC_DEV_PRIVATE_TO_INTR(dev->data->dev_private);\n+\tstruct igc_hw *hw =\n+\t\tIGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);\n+\tstruct rte_intr_handle *intr_handle = &pci_dev->intr_handle;\n+\n+\tif (rte_intr_allow_others(intr_handle) &&\n+\t\tdev->data->dev_conf.intr_conf.lsc != 0) {\n+\t\tIGC_WRITE_REG(hw, IGC_EIMS, 1 << IGC_MSIX_OTHER_INTR_VEC);\n+\t}\n+\n+\tIGC_WRITE_REG(hw, IGC_IMS, intr->mask);\n+\tIGC_WRITE_FLUSH(hw);\n+}\n+\n+/*\n+ * It reads ICR and gets interrupt causes, check it and set a bit flag\n+ * to update link status.\n+ */\n+static void\n+eth_igc_interrupt_get_status(struct rte_eth_dev *dev)\n+{\n+\tuint32_t icr;\n+\tstruct igc_hw *hw =\n+\t\tIGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct igc_interrupt *intr =\n+\t\tIGC_DEV_PRIVATE_TO_INTR(dev->data->dev_private);\n+\n+\t/* read-on-clear nic registers here */\n+\ticr = IGC_READ_REG(hw, IGC_ICR);\n+\n+\tintr->flags = 0;\n+\tif (icr & IGC_ICR_LSC)\n+\t\tintr->flags |= IGC_FLAG_NEED_LINK_UPDATE;\n+}\n+\n+/* return 0 means link status changed, -1 means not changed */\n+static int\n+eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)\n+{\n+\tstruct igc_hw *hw =\n+\t\tIGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct rte_eth_link link;\n+\tint link_check, count;\n+\n+\tlink_check = 0;\n+\thw->mac.get_link_status = 1;\n+\n+\t/* possible wait-to-complete in up to 9 seconds */\n+\tfor (count = 0; count < IGC_LINK_UPDATE_CHECK_TIMEOUT; count++) {\n+\t\t/* Read the real link status */\n+\t\tswitch (hw->phy.media_type) {\n+\t\tcase igc_media_type_copper:\n+\t\t\t/* Do the work to read phy */\n+\t\t\tigc_check_for_link(hw);\n+\t\t\tlink_check = !hw->mac.get_link_status;\n+\t\t\tbreak;\n+\n+\t\tcase igc_media_type_fiber:\n+\t\t\tigc_check_for_link(hw);\n+\t\t\tlink_check = (IGC_READ_REG(hw, IGC_STATUS) &\n+\t\t\t\t      IGC_STATUS_LU);\n+\t\t\tbreak;\n+\n+\t\tcase igc_media_type_internal_serdes:\n+\t\t\tigc_check_for_link(hw);\n+\t\t\tlink_check = hw->mac.serdes_has_link;\n+\t\t\tbreak;\n+\n+\t\tdefault:\n+\t\t\tbreak;\n+\t\t}\n+\t\tif (link_check || wait_to_complete == 0)\n+\t\t\tbreak;\n+\t\trte_delay_ms(IGC_LINK_UPDATE_CHECK_INTERVAL);\n+\t}\n+\tmemset(&link, 0, sizeof(link));\n+\n+\t/* Now we check if a transition has happened */\n+\tif (link_check) {\n+\t\tuint16_t duplex, speed;\n+\t\thw->mac.ops.get_link_up_info(hw, &speed, &duplex);\n+\t\tlink.link_duplex = (duplex == FULL_DUPLEX) ?\n+\t\t\t\tETH_LINK_FULL_DUPLEX :\n+\t\t\t\tETH_LINK_HALF_DUPLEX;\n+\t\tlink.link_speed = speed;\n+\t\tlink.link_status = ETH_LINK_UP;\n+\t\tlink.link_autoneg = !(dev->data->dev_conf.link_speeds &\n+\t\t\t\tETH_LINK_SPEED_FIXED);\n+\n+\t\tif (speed == SPEED_2500) {\n+\t\t\tuint32_t tipg = IGC_READ_REG(hw, IGC_TIPG);\n+\t\t\tif ((tipg & IGC_TIPG_IPGT_MASK) != 0x0b) {\n+\t\t\t\ttipg &= ~IGC_TIPG_IPGT_MASK;\n+\t\t\t\ttipg |= 0x0b;\n+\t\t\t\tIGC_WRITE_REG(hw, IGC_TIPG, tipg);\n+\t\t\t}\n+\t\t}\n+\t} else if (!link_check) {\n+\t\tlink.link_speed = 0;\n+\t\tlink.link_duplex = ETH_LINK_HALF_DUPLEX;\n+\t\tlink.link_status = ETH_LINK_DOWN;\n+\t\tlink.link_autoneg = ETH_LINK_FIXED;\n+\t}\n+\n+\treturn rte_eth_linkstatus_set(dev, &link);\n+}\n+\n+/*\n+ * It executes link_update after knowing an interrupt is present.\n+ */\n+static void\n+eth_igc_interrupt_action(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_interrupt *intr =\n+\t\tIGC_DEV_PRIVATE_TO_INTR(dev->data->dev_private);\n+\tstruct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);\n+\tstruct rte_eth_link link;\n+\tint ret;\n+\n+\tif (intr->flags & IGC_FLAG_NEED_LINK_UPDATE) {\n+\t\tintr->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;\n+\n+\t\t/* set get_link_status to check register later */\n+\t\tret = eth_igc_link_update(dev, 0);\n+\n+\t\t/* check if link has changed */\n+\t\tif (ret < 0)\n+\t\t\treturn;\n+\n+\t\trte_eth_linkstatus_get(dev, &link);\n+\t\tif (link.link_status)\n+\t\t\tPMD_INIT_LOG(INFO,\n+\t\t\t\t\" Port %d: Link Up - speed %u Mbps - %s\",\n+\t\t\t\tdev->data->port_id,\n+\t\t\t\t(unsigned int)link.link_speed,\n+\t\t\t\tlink.link_duplex == ETH_LINK_FULL_DUPLEX ?\n+\t\t\t\t\"full-duplex\" : \"half-duplex\");\n+\t\telse\n+\t\t\tPMD_INIT_LOG(INFO, \" Port %d: Link Down\",\n+\t\t\t\tdev->data->port_id);\n+\n+\t\tPMD_INIT_LOG(DEBUG, \"PCI Address: %04d:%02d:%02d:%d\",\n+\t\t\t\tpci_dev->addr.domain,\n+\t\t\t\tpci_dev->addr.bus,\n+\t\t\t\tpci_dev->addr.devid,\n+\t\t\t\tpci_dev->addr.function);\n+\t\t_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,\n+\t\t\t\tNULL);\n+\t}\n+}\n+\n+/*\n+ * Interrupt handler which shall be registered at first.\n+ *\n+ * @handle\n+ *  Pointer to interrupt handle.\n+ * @param\n+ *  The address of parameter (struct rte_eth_dev *) regsitered before.\n+ */\n+static void\n+eth_igc_interrupt_handler(void *param)\n+{\n+\tstruct rte_eth_dev *dev = (struct rte_eth_dev *)param;\n+\n+\teth_igc_interrupt_get_status(dev);\n+\teth_igc_interrupt_action(dev);\n+}\n+\n+static void igc_read_queue_stats_register(struct rte_eth_dev *dev);\n+\n+/*\n+ * Update the queue status every IGC_ALARM_INTERVAL time.\n+ * @param\n+ *  The address of parameter (struct rte_eth_dev *) regsitered before.\n+ */\n+static void\n+igc_update_queue_stats_handler(void *param)\n+{\n+\tstruct rte_eth_dev *dev = param;\n+\tigc_read_queue_stats_register(dev);\n+\trte_eal_alarm_set(IGC_ALARM_INTERVAL,\n+\t\t\tigc_update_queue_stats_handler, dev);\n+}\n+\n+/*\n+ *  This routine disables all traffic on the adapter by issuing a\n+ *  global reset on the MAC.\n+ */\n+static void\n+eth_igc_stop(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_adapter *adapter =\n+\t\tIGC_DEV_PRIVATE(dev->data->dev_private);\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);\n+\tstruct rte_eth_link link;\n+\tstruct rte_intr_handle *intr_handle = &pci_dev->intr_handle;\n+\n+\tadapter->stopped = 1;\n+\n+\t/* disable receive and transmit */\n+\teth_igc_rxtx_control(dev, false);\n+\n+\t/* disable all MSI-X interrupts */\n+\tIGC_WRITE_REG(hw, IGC_EIMC, 0x1f);\n+\tIGC_WRITE_FLUSH(hw);\n+\n+\tigc_intr_other_disable(dev);\n+\n+\trte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);\n+\n+\t/* disable intr eventfd mapping */\n+\trte_intr_disable(intr_handle);\n+\n+\tigc_reset_hw(hw);\n+\n+\t/* disable all wake up */\n+\tIGC_WRITE_REG(hw, IGC_WUC, 0);\n+\n+\t/* Set bit for Go Link disconnect */\n+\tif (hw->mac.type >= igc_82580) {\n+\t\tuint32_t phpm_reg;\n+\n+\t\tphpm_reg = IGC_READ_REG(hw, IGC_82580_PHY_POWER_MGMT);\n+\t\tphpm_reg |= IGC_82580_PM_GO_LINKD;\n+\t\tIGC_WRITE_REG(hw, IGC_82580_PHY_POWER_MGMT, phpm_reg);\n+\t}\n+\n+\t/* Power down the phy. Needed to make the link go Down */\n+\teth_igc_set_link_down(dev);\n+\n+\tigc_dev_clear_queues(dev);\n+\n+\t/* clear the recorded link status */\n+\tmemset(&link, 0, sizeof(link));\n+\trte_eth_linkstatus_set(dev, &link);\n+\n+\tif (!rte_intr_allow_others(intr_handle))\n+\t\t/* resume to the default handler */\n+\t\trte_intr_callback_register(intr_handle,\n+\t\t\t\t\t   eth_igc_interrupt_handler,\n+\t\t\t\t\t   (void *)dev);\n+\n+\t/* Clean datapath event and queue/vec mapping */\n+\trte_intr_efd_disable(intr_handle);\n+\tif (intr_handle->intr_vec != NULL) {\n+\t\trte_free(intr_handle->intr_vec);\n+\t\tintr_handle->intr_vec = NULL;\n+\t}\n+}\n+\n+/*\n+ * write interrupt vector allocation register\n+ * @hw\n+ *  board private structure\n+ * @queue_index\n+ *  queue index, valid 0,1,2,3\n+ * @tx\n+ *  tx:1, rx:0\n+ * @msix_vector\n+ *  msix-vector, valid 0,1,2,3,4\n+ */\n+static void\n+igc_write_ivar(struct igc_hw *hw, uint8_t queue_index,\n+\t\tbool tx, uint8_t msix_vector)\n+{\n+\tuint8_t offset = 0;\n+\tuint8_t reg_index = queue_index >> 1;\n+\tuint32_t val;\n+\n+\t/*\n+\t * IVAR(0)\n+\t * bit31...24\tbit23...16\tbit15...8\tbit7...0\n+\t * TX1\t\tRX1\t\tTX0\t\tRX0\n+\t *\n+\t * IVAR(1)\n+\t * bit31...24\tbit23...16\tbit15...8\tbit7...0\n+\t * TX3\t\tRX3\t\tTX2\t\tRX2\n+\t */\n+\n+\tif (tx)\n+\t\toffset = 8;\n+\n+\tif (queue_index & 1)\n+\t\toffset += 16;\n+\n+\tval = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, reg_index);\n+\n+\t/* clear bits */\n+\tval &= ~((uint32_t)0xFF << offset);\n+\n+\t/* write vector and valid bit */\n+\tval |= (msix_vector | IGC_IVAR_VALID) << offset;\n+\n+\tIGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, reg_index, val);\n+}\n+\n+/* Sets up the hardware to generate MSI-X interrupts properly\n+ * @hw\n+ *  board private structure\n+ */\n+static void\n+eth_igc_configure_msix_intr(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);\n+\tstruct rte_intr_handle *intr_handle = &pci_dev->intr_handle;\n+\n+\tuint32_t regval, intr_mask;\n+\tuint32_t vec = IGC_MISC_VEC_ID;\n+\tuint32_t base = IGC_MISC_VEC_ID;\n+\tuint32_t misc_shift = 0;\n+\tint i;\n+\n+\t/* won't configure msix register if no mapping is done\n+\t * between intr vector and event fd\n+\t */\n+\tif (!rte_intr_dp_is_en(intr_handle))\n+\t\treturn;\n+\n+\tif (rte_intr_allow_others(intr_handle)) {\n+\t\tbase = IGC_RX_VEC_START;\n+\t\tvec = base;\n+\t\tmisc_shift = 1;\n+\t}\n+\n+\t/* turn on MSI-X capability first */\n+\tIGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE |\n+\t\t\t\tIGC_GPIE_PBA | IGC_GPIE_EIAME |\n+\t\t\t\tIGC_GPIE_NSICR);\n+\tintr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<\n+\t\tmisc_shift;\n+\n+\tif (dev->data->dev_conf.intr_conf.lsc != 0)\n+\t\tintr_mask |= (1 << IGC_MSIX_OTHER_INTR_VEC);\n+\n+\t/* enable msix auto-clear */\n+\tregval = IGC_READ_REG(hw, IGC_EIAC);\n+\tIGC_WRITE_REG(hw, IGC_EIAC, regval | intr_mask);\n+\n+\t/* set other cause interrupt vector */\n+\tregval = (IGC_MSIX_OTHER_INTR_VEC | IGC_IVAR_VALID) << 8;\n+\tIGC_WRITE_REG(hw, IGC_IVAR_MISC, regval);\n+\n+\t/* disable auto-mask */\n+\tIGC_WRITE_REG(hw, IGC_EIAM, 0);\n+\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\tigc_write_ivar(hw, i, 0, vec);\n+\t\tintr_handle->intr_vec[i] = vec;\n+\t\tif (vec < base + intr_handle->nb_efd - 1)\n+\t\t\tvec++;\n+\t}\n+\n+\tIGC_WRITE_FLUSH(hw);\n+}\n+\n+static int\n+eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)\n+{\n+\tstruct igc_hw *hw =\n+\t\tIGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct igc_vfta *shadow_vfta =\n+\t\tIGC_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);\n+\tuint32_t vfta;\n+\tuint32_t vid_idx;\n+\tuint32_t vid_bit;\n+\n+\tvid_idx = (vlan_id >> IGC_VFTA_ENTRY_SHIFT) & IGC_VFTA_ENTRY_MASK;\n+\tvid_bit = 1u << (vlan_id & IGC_VFTA_ENTRY_BIT_SHIFT_MASK);\n+\tvfta = IGC_READ_REG_ARRAY(hw, IGC_VFTA, vid_idx);\n+\tif (on)\n+\t\tvfta |= vid_bit;\n+\telse\n+\t\tvfta &= ~vid_bit;\n+\tIGC_WRITE_REG_ARRAY(hw, IGC_VFTA, vid_idx, vfta);\n+\n+\t/* update local VFTA copy */\n+\tshadow_vfta->vfta[vid_idx] = vfta;\n+\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,\n+\t\t      enum rte_vlan_type vlan_type,\n+\t\t      uint16_t tpid)\n+{\n+\tstruct igc_hw *hw =\n+\t\tIGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tuint32_t reg_val, qinq;\n+\n+\tqinq = IGC_READ_REG(hw, IGC_CTRL_EXT);\n+\tqinq &= IGC_CTRL_EXT_EXT_VLAN;\n+\n+\t/* only outer TPID of double VLAN can be configured*/\n+\tif (qinq && vlan_type == ETH_VLAN_TYPE_OUTER) {\n+\t\treg_val = IGC_READ_REG(hw, IGC_VET);\n+\t\treg_val = (reg_val & (~IGC_VET_EXT)) |\n+\t\t\t((uint32_t)tpid << IGC_VET_EXT_SHIFT);\n+\t\tIGC_WRITE_REG(hw, IGC_VET, reg_val);\n+\n+\t\treturn 0;\n+\t}\n+\n+\t/* all other TPID values are read-only*/\n+\tPMD_DRV_LOG(ERR, \"Not supported\");\n+\treturn -ENOTSUP;\n+}\n+\n+static void\n+igc_vlan_hw_filter_disable(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw =\n+\t\tIGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tuint32_t reg_val;\n+\n+\t/* Filter Table Disable */\n+\treg_val = IGC_READ_REG(hw, IGC_RCTL);\n+\treg_val &= ~(IGC_RCTL_CFIEN | IGC_RCTL_VFE);\n+\n+\tIGC_WRITE_REG(hw, IGC_RCTL, reg_val);\n+}\n+\n+static void\n+igc_vlan_hw_filter_enable(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw =\n+\t\tIGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct igc_vfta *shadow_vfta =\n+\t\tIGC_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);\n+\tuint32_t reg_val;\n+\tint i;\n+\n+\t/* Filter Table Enable, CFI not used for packet acceptance */\n+\treg_val = IGC_READ_REG(hw, IGC_RCTL);\n+\treg_val &= ~IGC_RCTL_CFIEN;\n+\treg_val |= IGC_RCTL_VFE;\n+\tIGC_WRITE_REG(hw, IGC_RCTL, reg_val);\n+\n+\t/* restore VFTA table */\n+\tfor (i = 0; i < IGC_VFTA_SIZE; i++)\n+\t\tIGC_WRITE_REG_ARRAY(hw, IGC_VFTA, i, shadow_vfta->vfta[i]);\n+}\n+\n+static void\n+igc_vlan_hw_strip_disable(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw =\n+\t\tIGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tuint32_t reg_val;\n+\n+\t/* VLAN Mode Disable */\n+\treg_val = IGC_READ_REG(hw, IGC_CTRL);\n+\treg_val &= ~IGC_CTRL_VME;\n+\tIGC_WRITE_REG(hw, IGC_CTRL, reg_val);\n+}\n+\n+static void\n+igc_vlan_hw_strip_enable(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw =\n+\t\tIGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tuint32_t reg_val;\n+\n+\t/* VLAN Mode Enable */\n+\treg_val = IGC_READ_REG(hw, IGC_CTRL);\n+\treg_val |= IGC_CTRL_VME;\n+\tIGC_WRITE_REG(hw, IGC_CTRL, reg_val);\n+}\n+\n+static void\n+igc_vlan_hw_extend_disable(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw =\n+\t\tIGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tuint32_t reg_val;\n+\n+\t/* CTRL_EXT: Extended VLAN */\n+\treg_val = IGC_READ_REG(hw, IGC_CTRL_EXT);\n+\treg_val &= ~IGC_CTRL_EXT_EXTEND_VLAN;\n+\tIGC_WRITE_REG(hw, IGC_CTRL_EXT, reg_val);\n+\n+\t/* Update maximum packet length */\n+\tif (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)\n+\t\tIGC_WRITE_REG(hw, IGC_RLPML,\n+\t\t\tdev->data->dev_conf.rxmode.max_rx_pkt_len +\n+\t\t\t\t\t\tVLAN_TAG_SIZE);\n+}\n+\n+static void\n+igc_vlan_hw_extend_enable(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw =\n+\t\tIGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tuint32_t reg_val;\n+\n+\t/* CTRL_EXT: Extended VLAN */\n+\treg_val = IGC_READ_REG(hw, IGC_CTRL_EXT);\n+\treg_val |= IGC_CTRL_EXT_EXTEND_VLAN;\n+\tIGC_WRITE_REG(hw, IGC_CTRL_EXT, reg_val);\n+\n+\t/* Update maximum packet length */\n+\tif (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)\n+\t\tIGC_WRITE_REG(hw, IGC_RLPML,\n+\t\t\tdev->data->dev_conf.rxmode.max_rx_pkt_len +\n+\t\t\t\t\t\t2 * VLAN_TAG_SIZE);\n+}\n+\n+static int\n+eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask)\n+{\n+\tstruct rte_eth_rxmode *rxmode;\n+\n+\trxmode = &dev->data->dev_conf.rxmode;\n+\tif (mask & ETH_VLAN_STRIP_MASK) {\n+\t\tif (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)\n+\t\t\tigc_vlan_hw_strip_enable(dev);\n+\t\telse\n+\t\t\tigc_vlan_hw_strip_disable(dev);\n+\t}\n+\n+\tif (mask & ETH_VLAN_FILTER_MASK) {\n+\t\tif (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)\n+\t\t\tigc_vlan_hw_filter_enable(dev);\n+\t\telse\n+\t\t\tigc_vlan_hw_filter_disable(dev);\n+\t}\n+\n+\tif (mask & ETH_VLAN_EXTEND_MASK) {\n+\t\tif (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)\n+\t\t\tigc_vlan_hw_extend_enable(dev);\n+\t\telse\n+\t\t\tigc_vlan_hw_extend_disable(dev);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * It enables the interrupt mask and then enable the interrupt.\n+ *\n+ * @dev\n+ *  Pointer to struct rte_eth_dev.\n+ * @on\n+ *  Enable or Disable\n+ */\n+static void\n+igc_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)\n+{\n+\tstruct igc_interrupt *intr =\n+\t\tIGC_DEV_PRIVATE_TO_INTR(dev->data->dev_private);\n+\n+\tif (on)\n+\t\tintr->mask |= IGC_ICR_LSC;\n+\telse\n+\t\tintr->mask &= ~IGC_ICR_LSC;\n+}\n+\n+/*\n+ * It enables the interrupt.\n+ * It will be called once only during nic initialized.\n+ */\n+static void\n+igc_rxq_interrupt_setup(struct rte_eth_dev *dev)\n+{\n+\tuint32_t mask;\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);\n+\tstruct rte_intr_handle *intr_handle = &pci_dev->intr_handle;\n+\tint misc_shift = rte_intr_allow_others(intr_handle) ? 1 : 0;\n+\n+\t/* won't configure msix register if no mapping is done\n+\t * between intr vector and event fd\n+\t */\n+\tif (!rte_intr_dp_is_en(intr_handle))\n+\t\treturn;\n+\n+\tmask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << misc_shift;\n+\tIGC_WRITE_REG(hw, IGC_EIMS, mask);\n+}\n+\n+/*\n+ *  Get hardware rx-buffer size.\n+ */\n+static inline int\n+igc_get_rx_buffer_size(struct igc_hw *hw)\n+{\n+\treturn (IGC_READ_REG(hw, IGC_RXPBS) & 0x3f) << 10;\n+}\n+\n+/*\n+ * free all rx/tx queues.\n+ */\n+static void\n+igc_dev_free_queues(struct rte_eth_dev *dev)\n+{\n+\tuint16_t i;\n+\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\teth_igc_rx_queue_release(dev->data->rx_queues[i]);\n+\t\tdev->data->rx_queues[i] = NULL;\n+\t}\n+\tdev->data->nb_rx_queues = 0;\n+\n+\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n+\t\teth_igc_tx_queue_release(dev->data->tx_queues[i]);\n+\t\tdev->data->tx_queues[i] = NULL;\n+\t}\n+\tdev->data->nb_tx_queues = 0;\n+}\n+\n+/*\n+ * igc_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.\n+ * For ASF and Pass Through versions of f/w this means\n+ * that the driver is loaded.\n+ */\n+static void\n+igc_hw_control_acquire(struct igc_hw *hw)\n+{\n+\tuint32_t ctrl_ext;\n+\n+\t/* Let firmware know the driver has taken over */\n+\tctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);\n+\tIGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);\n+}\n+\n+/*\n+ * igc_hw_control_release resets CTRL_EXT:DRV_LOAD bit.\n+ * For ASF and Pass Through versions of f/w this means that the\n+ * driver is no longer loaded.\n+ */\n+static void\n+igc_hw_control_release(struct igc_hw *hw)\n+{\n+\tuint32_t ctrl_ext;\n+\n+\t/* Let firmware taken over control of h/w */\n+\tctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);\n+\tIGC_WRITE_REG(hw, IGC_CTRL_EXT,\n+\t\t\tctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);\n+}\n+\n+static int\n+igc_hardware_init(struct igc_hw *hw)\n+{\n+\tuint32_t rx_buf_size;\n+\tint diag;\n+\n+\t/* Let the firmware know the OS is in control */\n+\tigc_hw_control_acquire(hw);\n+\n+\t/*\n+\t * These parameters control the automatic generation (Tx) and\n+\t * response (Rx) to Ethernet PAUSE frames.\n+\t * - High water mark should allow for at least two standard size (1518)\n+\t *   frames to be received after sending an XOFF.\n+\t * - Low water mark works best when it is very near the high water mark.\n+\t *   This allows the receiver to restart by sending XON when it has\n+\t *   drained a bit. Here we use an arbitrary value of 1500 which will\n+\t *   restart after one full frame is pulled from the buffer. There\n+\t *   could be several smaller frames in the buffer and if so they will\n+\t *   not trigger the XON until their total number reduces the buffer\n+\t *   by 1500.\n+\t */\n+\trx_buf_size = igc_get_rx_buffer_size(hw);\n+\thw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2);\n+\thw->fc.low_water = hw->fc.high_water - 1500;\n+\thw->fc.pause_time = IGC_FC_PAUSE_TIME;\n+\thw->fc.send_xon = 1;\n+\n+\t/* Set Flow control, use the tunable location if sane */\n+\tif (igc_fc_setting != igc_fc_none && igc_fc_setting < 4)\n+\t\thw->fc.requested_mode = igc_fc_setting;\n+\telse\n+\t\thw->fc.requested_mode = igc_fc_none;\n+\n+\t/* Issue a global reset */\n+\tigc_reset_hw(hw);\n+\n+\t/* disable all wake up */\n+\tIGC_WRITE_REG(hw, IGC_WUC, 0);\n+\n+\tdiag = igc_init_hw(hw);\n+\tif (diag < 0)\n+\t\treturn diag;\n+\n+\t/* write vlan ethernet type */\n+\tIGC_WRITE_REG(hw, IGC_VET,\n+\t\t\tRTE_ETHER_TYPE_VLAN << 16 | RTE_ETHER_TYPE_VLAN);\n+\n+\tigc_get_phy_info(hw);\n+\tigc_check_for_link(hw);\n+\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_start(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw =\n+\t\tIGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct igc_adapter *adapter =\n+\t\tIGC_DEV_PRIVATE(dev->data->dev_private);\n+\tstruct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);\n+\tstruct rte_intr_handle *intr_handle = &pci_dev->intr_handle;\n+\tuint32_t *speeds;\n+\tuint32_t intr_vector = 0;\n+\tint ret, mask;\n+\tint num_speeds;\n+\tbool autoneg;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\t/* disable all MSI-X interrupts */\n+\tIGC_WRITE_REG(hw, IGC_EIMC, 0x1f);\n+\tIGC_WRITE_FLUSH(hw);\n+\n+\t/* disable uio/vfio intr/eventfd mapping */\n+\trte_intr_disable(intr_handle);\n+\n+\t/* Power up the phy. Needed to make the link go Up */\n+\teth_igc_set_link_up(dev);\n+\n+\t/* Put the address into the Receive Address Array */\n+\tigc_rar_set(hw, hw->mac.addr, 0);\n+\n+\t/* Initialize the hardware */\n+\tif (igc_hardware_init(hw)) {\n+\t\tPMD_INIT_LOG(ERR, \"Unable to initialize the hardware\");\n+\t\treturn -EIO;\n+\t}\n+\tadapter->stopped = 0;\n+\n+\tIGC_WRITE_REG(hw, IGC_VET,\n+\t\t\tRTE_ETHER_TYPE_VLAN << 16 | RTE_ETHER_TYPE_VLAN);\n+\n+\t/* check and configure queue intr-vector mapping */\n+\tif ((rte_intr_cap_multiple(intr_handle) ||\n+\t     !RTE_ETH_DEV_SRIOV(dev).active) &&\n+\t    dev->data->dev_conf.intr_conf.rxq != 0) {\n+\t\tintr_vector = dev->data->nb_rx_queues;\n+\t\tif (rte_intr_efd_enable(intr_handle, intr_vector))\n+\t\t\treturn -1;\n+\t}\n+\n+\tif (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {\n+\t\tintr_handle->intr_vec =\n+\t\t\trte_zmalloc(\"intr_vec\",\n+\t\t\t\t    dev->data->nb_rx_queues * sizeof(int), 0);\n+\t\tif (intr_handle->intr_vec == NULL) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to allocate %d rx_queues\"\n+\t\t\t\t     \" intr_vec\", dev->data->nb_rx_queues);\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\t}\n+\n+\t/* confiugre msix for rx interrupt */\n+\teth_igc_configure_msix_intr(dev);\n+\n+\tigc_tx_init(dev);\n+\n+\t/* This can fail when allocating mbufs for descriptor rings */\n+\tret = igc_rx_init(dev);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Unable to initialize RX hardware\");\n+\t\tigc_dev_clear_queues(dev);\n+\t\treturn ret;\n+\t}\n+\n+\tigc_clear_hw_cntrs_base_generic(hw);\n+\n+\t/*\n+\t * VLAN Offload Settings\n+\t */\n+\tmask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |\n+\t\t\tETH_VLAN_EXTEND_MASK;\n+\tret = eth_igc_vlan_offload_set(dev, mask);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Unable to set vlan offload\");\n+\t\tigc_dev_clear_queues(dev);\n+\t\treturn ret;\n+\t}\n+\n+\t/* Setup link speed and duplex */\n+\tspeeds = &dev->data->dev_conf.link_speeds;\n+\tif (*speeds == ETH_LINK_SPEED_AUTONEG) {\n+\t\thw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;\n+\t\thw->mac.autoneg = 1;\n+\t} else {\n+\t\tnum_speeds = 0;\n+\t\tautoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;\n+\n+\t\t/* Reset */\n+\t\thw->phy.autoneg_advertised = 0;\n+\n+\t\tif (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |\n+\t\t\t\tETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |\n+\t\t\t\tETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |\n+\t\t\t\tETH_LINK_SPEED_FIXED)) {\n+\t\t\tnum_speeds = -1;\n+\t\t\tgoto error_invalid_config;\n+\t\t}\n+\t\tif (*speeds & ETH_LINK_SPEED_10M_HD) {\n+\t\t\thw->phy.autoneg_advertised |= ADVERTISE_10_HALF;\n+\t\t\tnum_speeds++;\n+\t\t}\n+\t\tif (*speeds & ETH_LINK_SPEED_10M) {\n+\t\t\thw->phy.autoneg_advertised |= ADVERTISE_10_FULL;\n+\t\t\tnum_speeds++;\n+\t\t}\n+\t\tif (*speeds & ETH_LINK_SPEED_100M_HD) {\n+\t\t\thw->phy.autoneg_advertised |= ADVERTISE_100_HALF;\n+\t\t\tnum_speeds++;\n+\t\t}\n+\t\tif (*speeds & ETH_LINK_SPEED_100M) {\n+\t\t\thw->phy.autoneg_advertised |= ADVERTISE_100_FULL;\n+\t\t\tnum_speeds++;\n+\t\t}\n+\t\tif (*speeds & ETH_LINK_SPEED_1G) {\n+\t\t\thw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;\n+\t\t\tnum_speeds++;\n+\t\t}\n+\t\tif (*speeds & ETH_LINK_SPEED_2_5G) {\n+\t\t\thw->phy.autoneg_advertised |= ADVERTISE_2500_FULL;\n+\t\t\tnum_speeds++;\n+\t\t}\n+\t\tif (num_speeds == 0 || (!autoneg && num_speeds > 1))\n+\t\t\tgoto error_invalid_config;\n+\n+\t\t/* Set/reset the mac.autoneg based on the link speed,\n+\t\t * fixed or not\n+\t\t */\n+\t\tif (!autoneg) {\n+\t\t\thw->mac.autoneg = 0;\n+\t\t\thw->mac.forced_speed_duplex =\n+\t\t\t\t\thw->phy.autoneg_advertised;\n+\t\t} else {\n+\t\t\thw->mac.autoneg = 1;\n+\t\t}\n+\t}\n+\n+\tigc_setup_link(hw);\n+\n+\tif (rte_intr_allow_others(intr_handle)) {\n+\t\t/* check if lsc interrupt is enabled */\n+\t\tif (dev->data->dev_conf.intr_conf.lsc != 0)\n+\t\t\tigc_lsc_interrupt_setup(dev, TRUE);\n+\t\telse\n+\t\t\tigc_lsc_interrupt_setup(dev, FALSE);\n+\t} else {\n+\t\trte_intr_callback_unregister(intr_handle,\n+\t\t\t\t\t     eth_igc_interrupt_handler,\n+\t\t\t\t\t     (void *)dev);\n+\t\tif (dev->data->dev_conf.intr_conf.lsc != 0)\n+\t\t\tPMD_INIT_LOG(INFO, \"lsc won't enable because of\"\n+\t\t\t\t     \" no intr multiplex\");\n+\t}\n+\n+\t/* enable uio/vfio intr/eventfd mapping */\n+\trte_intr_enable(intr_handle);\n+\n+\trte_eal_alarm_set(IGC_ALARM_INTERVAL,\n+\t\t\tigc_update_queue_stats_handler, dev);\n+\n+\t/* check if rxq interrupt is enabled */\n+\tif (dev->data->dev_conf.intr_conf.rxq != 0 &&\n+\t\t\trte_intr_dp_is_en(intr_handle))\n+\t\tigc_rxq_interrupt_setup(dev);\n+\n+\t/* resume enabled intr since hw reset */\n+\tigc_intr_other_enable(dev);\n+\n+\teth_igc_rxtx_control(dev, true);\n+\teth_igc_link_update(dev, 0);\n+\n+\tPMD_INIT_LOG(DEBUG, \"<<\");\n+\treturn 0;\n+\n+error_invalid_config:\n+\tPMD_INIT_LOG(ERR, \"Invalid advertised speeds (%u) for port %u\",\n+\t\t     dev->data->dev_conf.link_speeds, dev->data->port_id);\n+\tigc_dev_clear_queues(dev);\n+\treturn -EINVAL;\n+}\n+\n+static int\n+igc_reset_swfw_lock(struct igc_hw *hw)\n+{\n+\tint ret_val;\n+\n+\t/*\n+\t * Do mac ops initialization manually here, since we will need\n+\t * some function pointers set by this call.\n+\t */\n+\tret_val = igc_init_mac_params(hw);\n+\tif (ret_val)\n+\t\treturn ret_val;\n+\n+\t/*\n+\t * SMBI lock should not fail in this early stage. If this is the case,\n+\t * it is due to an improper exit of the application.\n+\t * So force the release of the faulty lock.\n+\t */\n+\tif (igc_get_hw_semaphore_generic(hw) < 0)\n+\t\tPMD_DRV_LOG(DEBUG, \"SMBI lock released\");\n+\n+\tigc_put_hw_semaphore_generic(hw);\n+\n+\tif (hw->mac.ops.acquire_swfw_sync != NULL) {\n+\t\tuint16_t mask;\n+\n+\t\t/*\n+\t\t * Phy lock should not fail in this early stage.\n+\t\t * If this is the case, it is due to an improper exit of the\n+\t\t * application. So force the release of the faulty lock.\n+\t\t */\n+\t\tmask = IGC_SWFW_PHY0_SM;\n+\t\tif (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {\n+\t\t\tPMD_DRV_LOG(DEBUG, \"SWFW phy%d lock released\",\n+\t\t\t\t    hw->bus.func);\n+\t\t}\n+\t\thw->mac.ops.release_swfw_sync(hw, mask);\n+\n+\t\t/*\n+\t\t * This one is more tricky since it is common to all ports; but\n+\t\t * swfw_sync retries last long enough (1s) to be almost sure\n+\t\t * that if lock can not be taken it is due to an improper lock\n+\t\t * of the semaphore.\n+\t\t */\n+\t\tmask = IGC_SWFW_EEP_SM;\n+\t\tif (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0)\n+\t\t\tPMD_DRV_LOG(DEBUG, \"SWFW common locks released\");\n+\n+\t\thw->mac.ops.release_swfw_sync(hw, mask);\n+\t}\n+\n+\treturn IGC_SUCCESS;\n+}\n+\n+static void\n+eth_igc_close(struct rte_eth_dev *dev)\n+{\n+\tstruct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);\n+\tstruct rte_intr_handle *intr_handle = &pci_dev->intr_handle;\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct igc_adapter *adapter =\n+\t\tIGC_DEV_PRIVATE(dev->data->dev_private);\n+\tint retry = 0;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (!adapter->stopped)\n+\t\teth_igc_stop(dev);\n+\n+\t/* disable all MSI-X interrupts */\n+\tIGC_WRITE_REG(hw, IGC_EIMC, 0x1f);\n+\tIGC_WRITE_FLUSH(hw);\n+\n+\tigc_intr_other_disable(dev);\n+\tdo {\n+\t\tint ret = rte_intr_callback_unregister(intr_handle,\n+\t\t\t\teth_igc_interrupt_handler, dev);\n+\t\tif (ret >= 0 || ret == -ENOENT || ret == -EINVAL)\n+\t\t\tbreak;\n+\n+\t\tPMD_INIT_LOG(ERR, \"intr callback unregister failed: %d\", ret);\n+\t\tDELAY(200 * 1000); /* delay 200ms */\n+\t} while (retry++ < 5);\n+\n+\tigc_phy_hw_reset(hw);\n+\tigc_hw_control_release(hw);\n+\tigc_dev_free_queues(dev);\n+\n+\tdev->dev_ops = NULL;\n+\tdev->rx_pkt_burst = NULL;\n+\tdev->tx_pkt_burst = NULL;\n+\n+\t/* Reset any pending lock */\n+\tigc_reset_swfw_lock(hw);\n+}\n+\n+static void\n+igc_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev)\n+{\n+\tstruct igc_hw *hw =\n+\t\tIGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\n+\thw->vendor_id = pci_dev->id.vendor_id;\n+\thw->device_id = pci_dev->id.device_id;\n+\thw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;\n+\thw->subsystem_device_id = pci_dev->id.subsystem_device_id;\n+}\n+\n+static int\n+eth_igc_dev_init(struct rte_eth_dev *eth_dev)\n+{\n+\tint error = 0;\n+\tstruct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);\n+\tstruct igc_hw *hw =\n+\t\tIGC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);\n+\tstruct igc_adapter *adapter =\n+\t\tIGC_DEV_PRIVATE(eth_dev->data->dev_private);\n+\n+\teth_dev->dev_ops = &eth_igc_ops;\n+\n+\t/*\n+\t * for secondary processes, we don't initialize any further as primary\n+\t * has already done this work. Only check we don't need a different\n+\t * RX function.\n+\t */\n+\tif (rte_eal_process_type() != RTE_PROC_PRIMARY)\n+\t\treturn 0;\n+\n+\trte_eth_copy_pci_info(eth_dev, pci_dev);\n+\n+\thw->back = pci_dev;\n+\thw->hw_addr = (void *)pci_dev->mem_resource[0].addr;\n+\n+\tigc_identify_hardware(eth_dev, pci_dev);\n+\tif (igc_setup_init_funcs(hw, FALSE) != IGC_SUCCESS) {\n+\t\terror = -EIO;\n+\t\tgoto err_late;\n+\t}\n+\n+\tigc_get_bus_info(hw);\n+\n+\t/* Reset any pending lock */\n+\tif (igc_reset_swfw_lock(hw) != IGC_SUCCESS) {\n+\t\terror = -EIO;\n+\t\tgoto err_late;\n+\t}\n+\n+\t/* Finish initialization */\n+\tif (igc_setup_init_funcs(hw, TRUE) != IGC_SUCCESS) {\n+\t\terror = -EIO;\n+\t\tgoto err_late;\n+\t}\n+\n+\thw->mac.autoneg = 1;\n+\thw->phy.autoneg_wait_to_complete = 0;\n+\thw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;\n+\n+\t/* Copper options */\n+\tif (hw->phy.media_type == igc_media_type_copper) {\n+\t\thw->phy.mdix = 0; /* AUTO_ALL_MODES */\n+\t\thw->phy.disable_polarity_correction = 0;\n+\t\thw->phy.ms_type = igc_ms_hw_default;\n+\t}\n+\n+\t/*\n+\t * Start from a known state, this is important in reading the nvm\n+\t * and mac from that.\n+\t */\n+\tigc_reset_hw(hw);\n+\n+\t/* Make sure we have a good EEPROM before we read from it */\n+\tif (igc_validate_nvm_checksum(hw) < 0) {\n+\t\t/*\n+\t\t * Some PCI-E parts fail the first check due to\n+\t\t * the link being in sleep state, call it again,\n+\t\t * if it fails a second time its a real issue.\n+\t\t */\n+\t\tif (igc_validate_nvm_checksum(hw) < 0) {\n+\t\t\tPMD_INIT_LOG(ERR, \"EEPROM checksum invalid\");\n+\t\t\terror = -EIO;\n+\t\t\tgoto err_late;\n+\t\t}\n+\t}\n+\n+\t/* Read the permanent MAC address out of the EEPROM */\n+\tif (igc_read_mac_addr(hw) != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"EEPROM error while reading MAC address\");\n+\t\terror = -EIO;\n+\t\tgoto err_late;\n+\t}\n+\n+\t/* Allocate memory for storing MAC addresses */\n+\teth_dev->data->mac_addrs = rte_zmalloc(\"e1000\",\n+\t\tRTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);\n+\tif (eth_dev->data->mac_addrs == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate %d bytes needed to \"\n+\t\t\t\t\t\t\"store MAC addresses\",\n+\t\t\t\tRTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);\n+\t\terror = -ENOMEM;\n+\t\tgoto err_late;\n+\t}\n+\n+\t/* Copy the permanent MAC address */\n+\trte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,\n+\t\t\t&eth_dev->data->mac_addrs[0]);\n+\n+\t/* Now initialize the hardware */\n+\tif (igc_hardware_init(hw) != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"Hardware initialization failed\");\n+\t\trte_free(eth_dev->data->mac_addrs);\n+\t\teth_dev->data->mac_addrs = NULL;\n+\t\terror = -ENODEV;\n+\t\tgoto err_late;\n+\t}\n+\n+\t/* Pass the information to the rte_eth_dev_close() that it should also\n+\t * release the private port resources.\n+\t */\n+\teth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;\n+\n+\thw->mac.get_link_status = 1;\n+\tadapter->stopped = 0;\n+\n+\t/* Indicate SOL/IDER usage */\n+\tif (igc_check_reset_block(hw) < 0)\n+\t\tPMD_INIT_LOG(ERR, \"PHY reset is blocked due to\"\n+\t\t\t\t\" SOL/IDER session.\");\n+\n+\tPMD_INIT_LOG(DEBUG, \"port_id %d vendorID=0x%x deviceID=0x%x\",\n+\t\t\teth_dev->data->port_id, pci_dev->id.vendor_id,\n+\t\t\tpci_dev->id.device_id);\n+\n+\trte_intr_callback_register(&pci_dev->intr_handle,\n+\t\t\teth_igc_interrupt_handler, (void *)eth_dev);\n+\n+\t/* enable uio/vfio intr/eventfd mapping */\n+\trte_intr_enable(&pci_dev->intr_handle);\n+\n+\t/* enable support intr */\n+\tigc_intr_other_enable(eth_dev);\n+\treturn 0;\n+\n+err_late:\n+\tigc_hw_control_release(hw);\n+\treturn error;\n+}\n+\n+static int\n+eth_igc_dev_uninit(__rte_unused struct rte_eth_dev *eth_dev)\n+{\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (rte_eal_process_type() != RTE_PROC_PRIMARY)\n+\t\treturn -EPERM;\n+\n+\teth_igc_close(eth_dev);\n+\treturn 0;\n+}\n+\n+/*\n+ * Reset PF device.\n+ */\n+static int\n+eth_igc_reset(struct rte_eth_dev *dev)\n+{\n+\tint ret;\n+\n+\tret = eth_igc_dev_uninit(dev);\n+\tif (ret)\n+\t\treturn ret;\n+\n+\tret = eth_igc_dev_init(dev);\n+\n+\treturn ret;\n+}\n+\n+static int\n+eth_igc_promiscuous_enable(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw =\n+\t\tIGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tuint32_t rctl;\n+\n+\trctl = IGC_READ_REG(hw, IGC_RCTL);\n+\trctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE);\n+\tIGC_WRITE_REG(hw, IGC_RCTL, rctl);\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_promiscuous_disable(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw =\n+\t\tIGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tuint32_t rctl;\n+\n+\trctl = IGC_READ_REG(hw, IGC_RCTL);\n+\trctl &= (~IGC_RCTL_UPE);\n+\tif (dev->data->all_multicast == 1)\n+\t\trctl |= IGC_RCTL_MPE;\n+\telse\n+\t\trctl &= (~IGC_RCTL_MPE);\n+\tIGC_WRITE_REG(hw, IGC_RCTL, rctl);\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_allmulticast_enable(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw =\n+\t\tIGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tuint32_t rctl;\n+\n+\trctl = IGC_READ_REG(hw, IGC_RCTL);\n+\trctl |= IGC_RCTL_MPE;\n+\tIGC_WRITE_REG(hw, IGC_RCTL, rctl);\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_allmulticast_disable(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw =\n+\t\tIGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tuint32_t rctl;\n+\n+\tif (dev->data->promiscuous == 1)\n+\t\treturn 0;\t/* must remain in all_multicast mode */\n+\n+\trctl = IGC_READ_REG(hw, IGC_RCTL);\n+\trctl &= (~IGC_RCTL_MPE);\n+\tIGC_WRITE_REG(hw, IGC_RCTL, rctl);\n+\treturn 0;\n+}\n+\n+static void\n+igc_read_stats_registers(struct igc_hw *hw, struct igc_hw_stats *stats)\n+{\n+\tint pause_frames;\n+\n+\tuint64_t old_gprc  = stats->gprc;\n+\tuint64_t old_gptc  = stats->gptc;\n+\tuint64_t old_tpr   = stats->tpr;\n+\tuint64_t old_tpt   = stats->tpt;\n+\tuint64_t old_rpthc = stats->rpthc;\n+\tuint64_t old_hgptc = stats->hgptc;\n+\n+\tstats->crcerrs += IGC_READ_REG(hw, IGC_CRCERRS);\n+\tstats->algnerrc += IGC_READ_REG(hw, IGC_ALGNERRC);\n+\tstats->rxerrc += IGC_READ_REG(hw, IGC_RXERRC);\n+\tstats->mpc += IGC_READ_REG(hw, IGC_MPC);\n+\tstats->scc += IGC_READ_REG(hw, IGC_SCC);\n+\tstats->ecol += IGC_READ_REG(hw, IGC_ECOL);\n+\n+\tstats->mcc += IGC_READ_REG(hw, IGC_MCC);\n+\tstats->latecol += IGC_READ_REG(hw, IGC_LATECOL);\n+\tstats->colc += IGC_READ_REG(hw, IGC_COLC);\n+\n+\tstats->dc += IGC_READ_REG(hw, IGC_DC);\n+\tstats->tncrs += IGC_READ_REG(hw, IGC_TNCRS);\n+\tstats->htdpmc += IGC_READ_REG(hw, IGC_HTDPMC);\n+\tstats->rlec += IGC_READ_REG(hw, IGC_RLEC);\n+\tstats->xonrxc += IGC_READ_REG(hw, IGC_XONRXC);\n+\tstats->xontxc += IGC_READ_REG(hw, IGC_XONTXC);\n+\n+\t/*\n+\t * For watchdog management we need to know if we have been\n+\t * paused during the last interval, so capture that here.\n+\t */\n+\tpause_frames = IGC_READ_REG(hw, IGC_XOFFRXC);\n+\tstats->xoffrxc += pause_frames;\n+\tstats->xofftxc += IGC_READ_REG(hw, IGC_XOFFTXC);\n+\tstats->fcruc += IGC_READ_REG(hw, IGC_FCRUC);\n+\tstats->prc64 += IGC_READ_REG(hw, IGC_PRC64);\n+\tstats->prc127 += IGC_READ_REG(hw, IGC_PRC127);\n+\tstats->prc255 += IGC_READ_REG(hw, IGC_PRC255);\n+\tstats->prc511 += IGC_READ_REG(hw, IGC_PRC511);\n+\tstats->prc1023 += IGC_READ_REG(hw, IGC_PRC1023);\n+\tstats->prc1522 += IGC_READ_REG(hw, IGC_PRC1522);\n+\tstats->gprc += IGC_READ_REG(hw, IGC_GPRC);\n+\tstats->bprc += IGC_READ_REG(hw, IGC_BPRC);\n+\tstats->mprc += IGC_READ_REG(hw, IGC_MPRC);\n+\tstats->gptc += IGC_READ_REG(hw, IGC_GPTC);\n+\n+\t/* For the 64-bit byte counters the low dword must be read first. */\n+\t/* Both registers clear on the read of the high dword */\n+\n+\t/* Workaround CRC bytes included in size, take away 4 bytes/packet */\n+\tstats->gorc += IGC_READ_REG(hw, IGC_GORCL);\n+\tstats->gorc += ((uint64_t)IGC_READ_REG(hw, IGC_GORCH) << 32);\n+\tstats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN;\n+\tstats->gotc += IGC_READ_REG(hw, IGC_GOTCL);\n+\tstats->gotc += ((uint64_t)IGC_READ_REG(hw, IGC_GOTCH) << 32);\n+\tstats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN;\n+\n+\tstats->rnbc += IGC_READ_REG(hw, IGC_RNBC);\n+\tstats->ruc += IGC_READ_REG(hw, IGC_RUC);\n+\tstats->rfc += IGC_READ_REG(hw, IGC_RFC);\n+\tstats->roc += IGC_READ_REG(hw, IGC_ROC);\n+\tstats->rjc += IGC_READ_REG(hw, IGC_RJC);\n+\n+\tstats->mgprc += IGC_READ_REG(hw, IGC_MGTPRC);\n+\tstats->mgpdc += IGC_READ_REG(hw, IGC_MGTPDC);\n+\tstats->mgptc += IGC_READ_REG(hw, IGC_MGTPTC);\n+\tstats->b2ospc += IGC_READ_REG(hw, IGC_B2OSPC);\n+\tstats->b2ogprc += IGC_READ_REG(hw, IGC_B2OGPRC);\n+\tstats->o2bgptc += IGC_READ_REG(hw, IGC_O2BGPTC);\n+\tstats->o2bspc += IGC_READ_REG(hw, IGC_O2BSPC);\n+\n+\tstats->tpr += IGC_READ_REG(hw, IGC_TPR);\n+\tstats->tpt += IGC_READ_REG(hw, IGC_TPT);\n+\n+\tstats->tor += IGC_READ_REG(hw, IGC_TORL);\n+\tstats->tor += ((uint64_t)IGC_READ_REG(hw, IGC_TORH) << 32);\n+\tstats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN;\n+\tstats->tot += IGC_READ_REG(hw, IGC_TOTL);\n+\tstats->tot += ((uint64_t)IGC_READ_REG(hw, IGC_TOTH) << 32);\n+\tstats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN;\n+\n+\tstats->ptc64 += IGC_READ_REG(hw, IGC_PTC64);\n+\tstats->ptc127 += IGC_READ_REG(hw, IGC_PTC127);\n+\tstats->ptc255 += IGC_READ_REG(hw, IGC_PTC255);\n+\tstats->ptc511 += IGC_READ_REG(hw, IGC_PTC511);\n+\tstats->ptc1023 += IGC_READ_REG(hw, IGC_PTC1023);\n+\tstats->ptc1522 += IGC_READ_REG(hw, IGC_PTC1522);\n+\tstats->mptc += IGC_READ_REG(hw, IGC_MPTC);\n+\tstats->bptc += IGC_READ_REG(hw, IGC_BPTC);\n+\tstats->tsctc += IGC_READ_REG(hw, IGC_TSCTC);\n+\n+\tstats->iac += IGC_READ_REG(hw, IGC_IAC);\n+\tstats->rpthc += IGC_READ_REG(hw, IGC_RPTHC);\n+\tstats->hgptc += IGC_READ_REG(hw, IGC_HGPTC);\n+\tstats->icrxdmtc += IGC_READ_REG(hw, IGC_ICRXDMTC);\n+\n+\t/* Host to Card Statistics */\n+\tstats->hgorc += IGC_READ_REG(hw, IGC_HGORCL);\n+\tstats->hgorc += ((uint64_t)IGC_READ_REG(hw, IGC_HGORCH) << 32);\n+\tstats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN;\n+\tstats->hgotc += IGC_READ_REG(hw, IGC_HGOTCL);\n+\tstats->hgotc += ((uint64_t)IGC_READ_REG(hw, IGC_HGOTCH) << 32);\n+\tstats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN;\n+\tstats->lenerrs += IGC_READ_REG(hw, IGC_LENERRS);\n+}\n+\n+/*\n+ * Write 0 to all queue status registers\n+ */\n+static void\n+igc_reset_queue_stats_register(struct igc_hw *hw)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {\n+\t\tIGC_WRITE_REG(hw, IGC_PQGPRC(i), 0);\n+\t\tIGC_WRITE_REG(hw, IGC_PQGPTC(i), 0);\n+\t\tIGC_WRITE_REG(hw, IGC_PQGORC(i), 0);\n+\t\tIGC_WRITE_REG(hw, IGC_PQGOTC(i), 0);\n+\t\tIGC_WRITE_REG(hw, IGC_PQMPRC(i), 0);\n+\t\tIGC_WRITE_REG(hw, IGC_RQDPC(i), 0);\n+\t\tIGC_WRITE_REG(hw, IGC_TQDPC(i), 0);\n+\t}\n+}\n+\n+static void\n+igc_read_queue_stats_register(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct igc_hw_queue_stats *queue_stats =\n+\t\tIGC_DEV_PRIVATE_TO_QUEUE_STATS(dev->data->dev_private);\n+\tint i;\n+\n+\t/*\n+\t * This register is not cleared on read. Furthermore, the register wraps\n+\t * around back to 0x00000000 on the next increment when reaching a value\n+\t * of 0xFFFFFFFF and then continues normal count operation.\n+\t */\n+\tfor (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {\n+\t\tunion {\n+\t\t\tu64 ddword;\n+\t\t\tu32 dword[2];\n+\t\t} value;\n+\t\tu32 tmp;\n+\n+\t\t/*\n+\t\t * Read the register first, if the value is smaller than that\n+\t\t * previous read, that mean the register has been overflowed,\n+\t\t * then we add the high 4 bytes by 1 and replace the low 4\n+\t\t * bytes by the new value.\n+\t\t */\n+\t\ttmp = IGC_READ_REG(hw, IGC_PQGPRC(i));\n+\t\tvalue.ddword = queue_stats->pqgprc[i];\n+\t\tif (value.dword[U32_0_IN_U64] > tmp)\n+\t\t\tvalue.dword[U32_1_IN_U64]++;\n+\t\tvalue.dword[U32_0_IN_U64] = tmp;\n+\t\tqueue_stats->pqgprc[i] = value.ddword;\n+\n+\t\ttmp = IGC_READ_REG(hw, IGC_PQGPTC(i));\n+\t\tvalue.ddword = queue_stats->pqgptc[i];\n+\t\tif (value.dword[U32_0_IN_U64] > tmp)\n+\t\t\tvalue.dword[U32_1_IN_U64]++;\n+\t\tvalue.dword[U32_0_IN_U64] = tmp;\n+\t\tqueue_stats->pqgptc[i] = value.ddword;\n+\n+\t\ttmp = IGC_READ_REG(hw, IGC_PQGORC(i));\n+\t\tvalue.ddword = queue_stats->pqgorc[i];\n+\t\tif (value.dword[U32_0_IN_U64] > tmp)\n+\t\t\tvalue.dword[U32_1_IN_U64]++;\n+\t\tvalue.dword[U32_0_IN_U64] = tmp;\n+\t\tqueue_stats->pqgorc[i] = value.ddword;\n+\n+\t\ttmp = IGC_READ_REG(hw, IGC_PQGOTC(i));\n+\t\tvalue.ddword = queue_stats->pqgotc[i];\n+\t\tif (value.dword[U32_0_IN_U64] > tmp)\n+\t\t\tvalue.dword[U32_1_IN_U64]++;\n+\t\tvalue.dword[U32_0_IN_U64] = tmp;\n+\t\tqueue_stats->pqgotc[i] = value.ddword;\n+\n+\t\ttmp = IGC_READ_REG(hw, IGC_PQMPRC(i));\n+\t\tvalue.ddword = queue_stats->pqmprc[i];\n+\t\tif (value.dword[U32_0_IN_U64] > tmp)\n+\t\t\tvalue.dword[U32_1_IN_U64]++;\n+\t\tvalue.dword[U32_0_IN_U64] = tmp;\n+\t\tqueue_stats->pqmprc[i] = value.ddword;\n+\n+\t\ttmp = IGC_READ_REG(hw, IGC_RQDPC(i));\n+\t\tvalue.ddword = queue_stats->rqdpc[i];\n+\t\tif (value.dword[U32_0_IN_U64] > tmp)\n+\t\t\tvalue.dword[U32_1_IN_U64]++;\n+\t\tvalue.dword[U32_0_IN_U64] = tmp;\n+\t\tqueue_stats->rqdpc[i] = value.ddword;\n+\n+\t\ttmp = IGC_READ_REG(hw, IGC_TQDPC(i));\n+\t\tvalue.ddword = queue_stats->tqdpc[i];\n+\t\tif (value.dword[U32_0_IN_U64] > tmp)\n+\t\t\tvalue.dword[U32_1_IN_U64]++;\n+\t\tvalue.dword[U32_0_IN_U64] = tmp;\n+\t\tqueue_stats->tqdpc[i] = value.ddword;\n+\t}\n+}\n+\n+static int\n+eth_igc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct igc_hw_stats *stats =\n+\t\tIGC_DEV_PRIVATE_TO_STATS(dev->data->dev_private);\n+\tstruct igc_hw_queue_stats *queue_stats =\n+\t\tIGC_DEV_PRIVATE_TO_QUEUE_STATS(dev->data->dev_private);\n+\tint i;\n+\n+\t/*\n+\t * Cancel status handler since it will read the queue status registers\n+\t */\n+\trte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);\n+\n+\t/* Read status register */\n+\tigc_read_queue_stats_register(dev);\n+\tigc_read_stats_registers(hw, stats);\n+\n+\tif (rte_stats == NULL) {\n+\t\t/* Restart queue status handler */\n+\t\trte_eal_alarm_set(IGC_ALARM_INTERVAL,\n+\t\t\t\tigc_update_queue_stats_handler, dev);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Rx Errors */\n+\trte_stats->imissed = stats->mpc;\n+\trte_stats->ierrors = stats->crcerrs +\n+\t\t\tstats->rlec + stats->ruc + stats->roc +\n+\t\t\tstats->rxerrc + stats->algnerrc;\n+\n+\t/* Tx Errors */\n+\trte_stats->oerrors = stats->ecol + stats->latecol;\n+\n+\trte_stats->ipackets = stats->gprc;\n+\trte_stats->opackets = stats->gptc;\n+\trte_stats->ibytes   = stats->gorc;\n+\trte_stats->obytes   = stats->gotc;\n+\n+\tRTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < IGC_QUEUE_PAIRS_NUM);\n+\n+\tfor (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {\n+\t\trte_stats->q_ipackets[i] = queue_stats->pqgprc[i];\n+\t\trte_stats->q_opackets[i] = queue_stats->pqgptc[i];\n+\t\trte_stats->q_ibytes[i] = queue_stats->pqgorc[i];\n+\t\trte_stats->q_obytes[i] = queue_stats->pqgotc[i];\n+\t\trte_stats->q_errors[i] = queue_stats->rqdpc[i];\n+\t}\n+\n+\t/* Restart queue status handler */\n+\trte_eal_alarm_set(IGC_ALARM_INTERVAL,\n+\t\t\tigc_update_queue_stats_handler, dev);\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,\n+\t\t   unsigned int n)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct igc_hw_stats *hw_stats =\n+\t\t\tIGC_DEV_PRIVATE_TO_STATS(dev->data->dev_private);\n+\tunsigned int i;\n+\n+\tigc_read_stats_registers(hw, hw_stats);\n+\n+\tif (n < IGC_NB_XSTATS)\n+\t\treturn IGC_NB_XSTATS;\n+\n+\t/* If this is a reset xstats is NULL, and we have cleared the\n+\t * registers by reading them.\n+\t */\n+\tif (!xstats)\n+\t\treturn 0;\n+\n+\t/* Extended stats */\n+\tfor (i = 0; i < IGC_NB_XSTATS; i++) {\n+\t\txstats[i].id = i;\n+\t\txstats[i].value = *(uint64_t *)(((char *)hw_stats) +\n+\t\t\trte_igc_stats_strings[i].offset);\n+\t}\n+\n+\treturn IGC_NB_XSTATS;\n+}\n+\n+static int\n+eth_igc_xstats_reset(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct igc_hw_stats *hw_stats =\n+\t\t\tIGC_DEV_PRIVATE_TO_STATS(dev->data->dev_private);\n+\tstruct igc_hw_queue_stats *queue_stats =\n+\t\tIGC_DEV_PRIVATE_TO_QUEUE_STATS(dev->data->dev_private);\n+\n+\trte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);\n+\n+\t/* HW registers are cleared on read */\n+\tigc_reset_queue_stats_register(hw);\n+\tigc_read_stats_registers(hw, hw_stats);\n+\n+\t/* Reset software totals */\n+\tmemset(hw_stats, 0, sizeof(*hw_stats));\n+\tmemset(queue_stats, 0, sizeof(*queue_stats));\n+\trte_eal_alarm_set(IGC_ALARM_INTERVAL, igc_update_queue_stats_handler,\n+\t\t\tdev);\n+\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_xstats_get_names(__rte_unused struct rte_eth_dev *dev,\n+\tstruct rte_eth_xstat_name *xstats_names, unsigned int size)\n+{\n+\tunsigned int i;\n+\n+\tif (xstats_names == NULL)\n+\t\treturn IGC_NB_XSTATS;\n+\n+\tif (size < IGC_NB_XSTATS) {\n+\t\tPMD_DRV_LOG(ERR, \"not enough buffers!\");\n+\t\treturn IGC_NB_XSTATS;\n+\t}\n+\n+\tfor (i = 0; i < IGC_NB_XSTATS; i++) {\n+\t\tstrlcpy(xstats_names[i].name, rte_igc_stats_strings[i].name,\n+\t\t\tsizeof(xstats_names[i].name));\n+\t}\n+\n+\treturn IGC_NB_XSTATS;\n+}\n+\n+static int\n+eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev,\n+\t\tstruct rte_eth_xstat_name *xstats_names, const uint64_t *ids,\n+\t\tunsigned int limit)\n+{\n+\tunsigned int i;\n+\n+\tif (!ids)\n+\t\treturn eth_igc_xstats_get_names(dev, xstats_names, limit);\n+\n+\tfor (i = 0; i < limit; i++) {\n+\t\tif (ids[i] >= IGC_NB_XSTATS) {\n+\t\t\tPMD_INIT_LOG(ERR, \"id value isn't valid\");\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t\tstrlcpy(xstats_names[i].name,\n+\t\t\t\trte_igc_stats_strings[i].name,\n+\t\t\t\tsizeof(xstats_names[i].name));\n+\t}\n+\treturn limit;\n+}\n+\n+static int\n+eth_igc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,\n+\t\tuint64_t *values, unsigned int n)\n+{\n+\tstruct igc_hw *hw =\n+\t\tIGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct igc_hw_stats *hw_stats =\n+\t\tIGC_DEV_PRIVATE_TO_STATS(dev->data->dev_private);\n+\tunsigned int i;\n+\n+\tigc_read_stats_registers(hw, hw_stats);\n+\n+\tif (!ids) {\n+\t\tif (n < IGC_NB_XSTATS)\n+\t\t\treturn IGC_NB_XSTATS;\n+\n+\t\t/* If this is a reset xstats is NULL, and we have cleared the\n+\t\t * registers by reading them.\n+\t\t */\n+\t\tif (!values)\n+\t\t\treturn 0;\n+\n+\t\t/* Extended stats */\n+\t\tfor (i = 0; i < IGC_NB_XSTATS; i++)\n+\t\t\tvalues[i] = *(uint64_t *)(((char *)hw_stats) +\n+\t\t\t\t\trte_igc_stats_strings[i].offset);\n+\n+\t\treturn IGC_NB_XSTATS;\n+\n+\t} else {\n+\t\tfor (i = 0; i < n; i++) {\n+\t\t\tif (ids[i] >= IGC_NB_XSTATS) {\n+\t\t\t\tPMD_INIT_LOG(ERR, \"id value isn't valid\");\n+\t\t\t\treturn -EINVAL;\n+\t\t\t}\n+\t\t\tvalues[i] = *(uint64_t *)(((char *)hw_stats) +\n+\t\t\t\t\trte_igc_stats_strings[ids[i]].offset);\n+\t\t}\n+\t\treturn n;\n+\t}\n+}\n+\n+static int\n+eth_igc_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *eth_dev,\n+\t\tuint16_t queue_id, uint8_t stat_idx, __rte_unused uint8_t is_rx)\n+{\n+\tif (queue_id == stat_idx)\n+\t\treturn 0;\n+\treturn -EIO;\n+}\n+\n+static int\n+eth_igc_fw_version_get(struct rte_eth_dev *dev, char *fw_version,\n+\t\t       size_t fw_size)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct igc_fw_version fw;\n+\tint ret;\n+\n+\tigc_get_fw_version(hw, &fw);\n+\n+\t/* if option rom is valid, display its version too */\n+\tif (fw.or_valid) {\n+\t\tret = snprintf(fw_version, fw_size,\n+\t\t\t \"%d.%d, 0x%08x, %d.%d.%d\",\n+\t\t\t fw.eep_major, fw.eep_minor, fw.etrack_id,\n+\t\t\t fw.or_major, fw.or_build, fw.or_patch);\n+\t/* no option rom */\n+\t} else {\n+\t\tif (fw.etrack_id != 0X0000) {\n+\t\t\tret = snprintf(fw_version, fw_size,\n+\t\t\t\t \"%d.%d, 0x%08x\",\n+\t\t\t\t fw.eep_major, fw.eep_minor,\n+\t\t\t\t fw.etrack_id);\n+\t\t} else {\n+\t\t\tret = snprintf(fw_version, fw_size,\n+\t\t\t\t \"%d.%d.%d\",\n+\t\t\t\t fw.eep_major, fw.eep_minor,\n+\t\t\t\t fw.eep_build);\n+\t\t}\n+\t}\n+\n+\tret += 1; /* add the size of '\\0' */\n+\tif (fw_size < (u32)ret)\n+\t\treturn ret;\n+\telse\n+\t\treturn 0;\n+}\n+\n+static int\n+eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\n+\tdev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */\n+\tdev_info->max_rx_pktlen  = 0x2600; /* See RLPML register. */\n+\tdev_info->max_mac_addrs = hw->mac.rar_entry_count;\n+\tdev_info->rx_queue_offload_capa = IGC_RX_OFFLOAD_ALL;\n+\tdev_info->rx_offload_capa = dev_info->rx_queue_offload_capa;\n+\tdev_info->tx_queue_offload_capa = IGC_TX_OFFLOAD_ALL;\n+\tdev_info->tx_offload_capa = dev_info->tx_queue_offload_capa;\n+\n+\tdev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM;\n+\tdev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM;\n+\tdev_info->max_vmdq_pools = 0;\n+\n+\tdev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t);\n+\tdev_info->reta_size = ETH_RSS_RETA_SIZE_128;\n+\tdev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL;\n+\n+\tdev_info->default_rxconf = (struct rte_eth_rxconf) {\n+\t\t.rx_thresh = {\n+\t\t\t.pthresh = IGC_DEFAULT_RX_PTHRESH,\n+\t\t\t.hthresh = IGC_DEFAULT_RX_HTHRESH,\n+\t\t\t.wthresh = IGC_DEFAULT_RX_WTHRESH,\n+\t\t},\n+\t\t.rx_free_thresh = IGC_DEFAULT_RX_FREE_THRESH,\n+\t\t.rx_drop_en = 0,\n+\t\t.offloads = 0,\n+\t};\n+\n+\tdev_info->default_txconf = (struct rte_eth_txconf) {\n+\t\t.tx_thresh = {\n+\t\t\t.pthresh = IGC_DEFAULT_TX_PTHRESH,\n+\t\t\t.hthresh = IGC_DEFAULT_TX_HTHRESH,\n+\t\t\t.wthresh = IGC_DEFAULT_TX_WTHRESH,\n+\t\t},\n+\t\t.offloads = 0,\n+\t};\n+\n+\tdev_info->rx_desc_lim = rx_desc_lim;\n+\tdev_info->tx_desc_lim = tx_desc_lim;\n+\n+\tdev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |\n+\t\t\tETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |\n+\t\t\tETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G;\n+\n+\tdev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD;\n+\tdev_info->min_mtu = RTE_ETHER_MIN_MTU;\n+\n+\treturn 0;\n+}\n+\n+static const uint32_t *\n+eth_igc_supported_ptypes_get(__rte_unused struct rte_eth_dev *dev)\n+{\n+\tstatic const uint32_t ptypes[] = {\n+\t\t/* refers to rx_desc_pkt_info_to_pkt_type() */\n+\t\tRTE_PTYPE_L2_ETHER,\n+\t\tRTE_PTYPE_L3_IPV4,\n+\t\tRTE_PTYPE_L3_IPV4_EXT,\n+\t\tRTE_PTYPE_L3_IPV6,\n+\t\tRTE_PTYPE_L3_IPV6_EXT,\n+\t\tRTE_PTYPE_L4_TCP,\n+\t\tRTE_PTYPE_L4_UDP,\n+\t\tRTE_PTYPE_L4_SCTP,\n+\t\tRTE_PTYPE_TUNNEL_IP,\n+\t\tRTE_PTYPE_INNER_L3_IPV6,\n+\t\tRTE_PTYPE_INNER_L3_IPV6_EXT,\n+\t\tRTE_PTYPE_INNER_L4_TCP,\n+\t\tRTE_PTYPE_INNER_L4_UDP,\n+\t\tRTE_PTYPE_UNKNOWN\n+\t};\n+\n+\treturn ptypes;\n+}\n+\n+static int\n+eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n+{\n+\tuint32_t rctl;\n+\tstruct igc_hw *hw;\n+\tstruct rte_eth_dev_info dev_info;\n+\tuint32_t frame_size = mtu + IGC_ETH_OVERHEAD;\n+\n+\thw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\n+\teth_igc_infos_get(dev, &dev_info);\n+\n+\t/* check that mtu is within the allowed range */\n+\tif (mtu < RTE_ETHER_MIN_MTU ||\n+\t\t\tframe_size > dev_info.max_rx_pktlen)\n+\t\treturn -EINVAL;\n+\n+\t/*\n+\t * refuse mtu that requires the support of scattered packets when\n+\t * this feature has not been enabled before.\n+\t */\n+\tif (!dev->data->scattered_rx &&\n+\t    frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)\n+\t\treturn -EINVAL;\n+\n+\trctl = IGC_READ_REG(hw, IGC_RCTL);\n+\n+\t/* switch to jumbo mode if needed */\n+\tif (frame_size > RTE_ETHER_MAX_LEN) {\n+\t\tdev->data->dev_conf.rxmode.offloads |=\n+\t\t\tDEV_RX_OFFLOAD_JUMBO_FRAME;\n+\t\trctl |= IGC_RCTL_LPE;\n+\t} else {\n+\t\tdev->data->dev_conf.rxmode.offloads &=\n+\t\t\t~DEV_RX_OFFLOAD_JUMBO_FRAME;\n+\t\trctl &= ~IGC_RCTL_LPE;\n+\t}\n+\tIGC_WRITE_REG(hw, IGC_RCTL, rctl);\n+\n+\t/* update max frame size */\n+\tdev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;\n+\n+\tIGC_WRITE_REG(hw, IGC_RLPML,\n+\t\t\tdev->data->dev_conf.rxmode.max_rx_pkt_len);\n+\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)\n+{\n+\tstruct igc_hw *hw =\n+\t\tIGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);\n+\tstruct rte_intr_handle *intr_handle = &pci_dev->intr_handle;\n+\tuint32_t vec = IGC_MISC_VEC_ID;\n+\n+\tif (rte_intr_allow_others(intr_handle))\n+\t\tvec = IGC_RX_VEC_START;\n+\n+\tuint32_t mask = 1 << (queue_id + vec);\n+\n+\tIGC_WRITE_REG(hw, IGC_EIMC, mask);\n+\tIGC_WRITE_FLUSH(hw);\n+\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)\n+{\n+\tstruct igc_hw *hw =\n+\t\tIGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);\n+\tstruct rte_intr_handle *intr_handle = &pci_dev->intr_handle;\n+\tuint32_t vec = IGC_MISC_VEC_ID;\n+\n+\tif (rte_intr_allow_others(intr_handle))\n+\t\tvec = IGC_RX_VEC_START;\n+\n+\tuint32_t mask = 1 << (queue_id + vec);\n+\n+\tIGC_WRITE_REG(hw, IGC_EIMS, mask);\n+\tIGC_WRITE_FLUSH(hw);\n+\n+\trte_intr_enable(intr_handle);\n+\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_led_on(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw;\n+\n+\thw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\treturn igc_led_on(hw) == IGC_SUCCESS ? 0 : -ENOTSUP;\n+}\n+\n+static int\n+eth_igc_led_off(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw;\n+\n+\thw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\treturn igc_led_off(hw) == IGC_SUCCESS ? 0 : -ENOTSUP;\n+}\n+\n+static int\n+eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)\n+{\n+\tstruct igc_hw *hw;\n+\tuint32_t ctrl;\n+\tint tx_pause;\n+\tint rx_pause;\n+\n+\thw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tfc_conf->pause_time = hw->fc.pause_time;\n+\tfc_conf->high_water = hw->fc.high_water;\n+\tfc_conf->low_water = hw->fc.low_water;\n+\tfc_conf->send_xon = hw->fc.send_xon;\n+\tfc_conf->autoneg = hw->mac.autoneg;\n+\n+\t/*\n+\t * Return rx_pause and tx_pause status according to actual setting of\n+\t * the TFCE and RFCE bits in the CTRL register.\n+\t */\n+\tctrl = IGC_READ_REG(hw, IGC_CTRL);\n+\tif (ctrl & IGC_CTRL_TFCE)\n+\t\ttx_pause = 1;\n+\telse\n+\t\ttx_pause = 0;\n+\n+\tif (ctrl & IGC_CTRL_RFCE)\n+\t\trx_pause = 1;\n+\telse\n+\t\trx_pause = 0;\n+\n+\tif (rx_pause && tx_pause)\n+\t\tfc_conf->mode = RTE_FC_FULL;\n+\telse if (rx_pause)\n+\t\tfc_conf->mode = RTE_FC_RX_PAUSE;\n+\telse if (tx_pause)\n+\t\tfc_conf->mode = RTE_FC_TX_PAUSE;\n+\telse\n+\t\tfc_conf->mode = RTE_FC_NONE;\n+\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tenum igc_fc_mode rte_fcmode_2_igc_fcmode[] = {\n+\t\tigc_fc_none,\n+\t\tigc_fc_rx_pause,\n+\t\tigc_fc_tx_pause,\n+\t\tigc_fc_full\n+\t};\n+\tuint32_t rx_buf_size;\n+\tuint32_t max_high_water;\n+\tuint32_t rctl;\n+\tint err;\n+\n+\tif (fc_conf->autoneg != hw->mac.autoneg)\n+\t\treturn -ENOTSUP;\n+\n+\trx_buf_size = igc_get_rx_buffer_size(hw);\n+\tPMD_INIT_LOG(DEBUG, \"Rx packet buffer size = 0x%x\", rx_buf_size);\n+\n+\t/* At least reserve one Ethernet frame for watermark */\n+\tmax_high_water = rx_buf_size - RTE_ETHER_MAX_LEN;\n+\tif (fc_conf->high_water > max_high_water ||\n+\t\tfc_conf->high_water < fc_conf->low_water) {\n+\t\tPMD_INIT_LOG(ERR, \"e1000 incorrect high/low water value\");\n+\t\tPMD_INIT_LOG(ERR, \"high water must <=  0x%x\", max_high_water);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\thw->fc.requested_mode = rte_fcmode_2_igc_fcmode[fc_conf->mode];\n+\thw->fc.pause_time     = fc_conf->pause_time;\n+\thw->fc.high_water     = fc_conf->high_water;\n+\thw->fc.low_water      = fc_conf->low_water;\n+\thw->fc.send_xon\t      = fc_conf->send_xon;\n+\n+\terr = igc_setup_link_generic(hw);\n+\tif (err == IGC_SUCCESS) {\n+\t\t/**\n+\t\t * check if we want to forward MAC frames - driver doesn't have\n+\t\t * native capability to do that, so we'll write the registers\n+\t\t * ourselves\n+\t\t **/\n+\t\trctl = IGC_READ_REG(hw, IGC_RCTL);\n+\n+\t\t/* set or clear MFLCN.PMCF bit depending on configuration */\n+\t\tif (fc_conf->mac_ctrl_frame_fwd != 0)\n+\t\t\trctl |= IGC_RCTL_PMCF;\n+\t\telse\n+\t\t\trctl &= ~IGC_RCTL_PMCF;\n+\n+\t\tIGC_WRITE_REG(hw, IGC_RCTL, rctl);\n+\t\tIGC_WRITE_FLUSH(hw);\n+\n+\t\treturn 0;\n+\t}\n+\n+\tPMD_INIT_LOG(ERR, \"igc_setup_link_generic = 0x%x\", err);\n+\treturn -EIO;\n+}\n+\n+static int\n+eth_igc_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,\n+\t\tuint32_t index, uint32_t pool)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\n+\tigc_rar_set(hw, mac_addr->addr_bytes, index);\n+\tRTE_SET_USED(pool);\n+\treturn 0;\n+}\n+\n+static void\n+eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index)\n+{\n+\tuint8_t addr[RTE_ETHER_ADDR_LEN];\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\n+\tmemset(addr, 0, sizeof(addr));\n+\tigc_rar_set(hw, addr, index);\n+}\n+\n+static int\n+eth_igc_default_mac_addr_set(struct rte_eth_dev *dev,\n+\t\t\tstruct rte_ether_addr *addr)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tigc_rar_set(hw, addr->addr_bytes, 0);\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_rss_reta_update(struct rte_eth_dev *dev,\n+\t\t\tstruct rte_eth_rss_reta_entry64 *reta_conf,\n+\t\t\tuint16_t reta_size)\n+{\n+\tuint8_t i, j, mask;\n+\tuint16_t idx, shift;\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\n+\tif (reta_size != ETH_RSS_RETA_SIZE_128) {\n+\t\tPMD_DRV_LOG(ERR, \"The size of hash lookup table configured \"\n+\t\t\t\"(%d) doesn't match the number hardware can supported \"\n+\t\t\t\"(%d)\", reta_size, ETH_RSS_RETA_SIZE_128);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* each register 4 entry */\n+\tfor (i = 0; i < reta_size; i += 4) {\n+\t\tunion igc_reta {\n+\t\t\tuint32_t dword;\n+\t\t\tuint8_t  bytes[4];\n+\t\t} reta, r;\n+\n+\t\tidx = i / RTE_RETA_GROUP_SIZE;\n+\t\tshift = i % RTE_RETA_GROUP_SIZE;\n+\t\tmask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xf);\n+\t\tif (!mask)\n+\t\t\tcontinue;\n+\n+\t\tif (mask == 0xf)\n+\t\t\tr.dword = 0;\n+\t\telse\n+\t\t\tr.dword = IGC_READ_REG_LE_VALUE(hw,\n+\t\t\t\t\tIGC_RETA(i >> 2));\n+\n+\t\tfor (j = 0; j < 4; j++) {\n+\t\t\tif (mask & (0x1 << j))\n+\t\t\t\treta.bytes[j] =\n+\t\t\t\t\t(uint8_t)reta_conf[idx].reta[shift + j];\n+\t\t\telse\n+\t\t\t\treta.bytes[j] = r.bytes[j];\n+\t\t}\n+\t\tIGC_WRITE_REG_LE_VALUE(hw, IGC_RETA(i >> 2), reta.dword);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_rss_reta_query(struct rte_eth_dev *dev,\n+\t\t       struct rte_eth_rss_reta_entry64 *reta_conf,\n+\t\t       uint16_t reta_size)\n+{\n+\tuint8_t i, j, mask;\n+\tuint16_t idx, shift;\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\n+\tif (reta_size != ETH_RSS_RETA_SIZE_128) {\n+\t\tPMD_DRV_LOG(ERR, \"The size of hash lookup table configured \"\n+\t\t\t\"(%d) doesn't match the number hardware can supported \"\n+\t\t\t\"(%d)\", reta_size, ETH_RSS_RETA_SIZE_128);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* each register 4 entry */\n+\tfor (i = 0; i < reta_size; i += 4) {\n+\t\tunion igc_reta {\n+\t\t\tuint32_t dword;\n+\t\t\tuint8_t  bytes[4];\n+\t\t} reta;\n+\n+\t\tidx = i / RTE_RETA_GROUP_SIZE;\n+\t\tshift = i % RTE_RETA_GROUP_SIZE;\n+\t\tmask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xf);\n+\t\tif (!mask)\n+\t\t\tcontinue;\n+\n+\t\treta.dword = IGC_READ_REG_LE_VALUE(hw, IGC_RETA(i >> 2));\n+\t\tfor (j = 0; j < 4; j++) {\n+\t\t\tif (mask & (0x1 << j))\n+\t\t\t\treta_conf[idx].reta[shift + j] = reta.bytes[j];\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_rss_hash_update(struct rte_eth_dev *dev,\n+\t\t\tstruct rte_eth_rss_conf *rss_conf)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tuint64_t rss_hf = rss_conf->rss_hf & IGC_RSS_OFFLOAD_ALL;\n+\tuint32_t mrqc = IGC_READ_REG(hw, IGC_MRQC);\n+\n+\t/*\n+\t * Before changing anything, first check that the update RSS operation\n+\t * does not attempt to disable RSS, if RSS was enabled at\n+\t * initialization time, or does not attempt to enable RSS, if RSS was\n+\t * disabled at initialization time.\n+\t */\n+\tif (!(mrqc & IGC_MRQC_ENABLE_MASK)) { /* RSS disabled */\n+\t\tif (rss_hf != 0)\n+\t\t\treturn -(EINVAL);\n+\t\treturn 0; /* Nothing to do */\n+\t}\n+\n+\t/* RSS enabled */\n+\tif (rss_hf == 0) /* Disable RSS */\n+\t\treturn -(EINVAL);\n+\tigc_hw_rss_hash_set(hw, rss_conf);\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev,\n+\t\t\tstruct rte_eth_rss_conf *rss_conf)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tuint8_t *hash_key = rss_conf->rss_key;\n+\tuint32_t mrqc;\n+\tuint64_t rss_hf;\n+\n+\tif (hash_key != NULL) {\n+\t\tint i;\n+\t\tfor (i = 0; i < 10; i++) {\n+\t\t\tuint32_t rss_key =\n+\t\t\t\tIGC_READ_REG_ARRAY(hw, IGC_RSSRK(0), i);\n+\t\t\thash_key[(i * 4)] = rss_key & 0x000000FF;\n+\t\t\thash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;\n+\t\t\thash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;\n+\t\t\thash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;\n+\t\t}\n+\t}\n+\n+\t/* Get RSS functions configured in MRQC register */\n+\tmrqc = IGC_READ_REG(hw, IGC_MRQC);\n+\tif ((mrqc & IGC_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */\n+\t\trss_conf->rss_hf = 0;\n+\t\treturn 0;\n+\t}\n+\n+\trss_hf = 0;\n+\tif (mrqc & IGC_MRQC_RSS_FIELD_IPV4)\n+\t\trss_hf |= ETH_RSS_IPV4;\n+\tif (mrqc & IGC_MRQC_RSS_FIELD_IPV4_TCP)\n+\t\trss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;\n+\tif (mrqc & IGC_MRQC_RSS_FIELD_IPV6)\n+\t\trss_hf |= ETH_RSS_IPV6;\n+\tif (mrqc & IGC_MRQC_RSS_FIELD_IPV6_EX)\n+\t\trss_hf |= ETH_RSS_IPV6_EX;\n+\tif (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP)\n+\t\trss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;\n+\tif (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP_EX)\n+\t\trss_hf |= ETH_RSS_IPV6_TCP_EX;\n+\tif (mrqc & IGC_MRQC_RSS_FIELD_IPV4_UDP)\n+\t\trss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;\n+\tif (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP)\n+\t\trss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;\n+\tif (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP_EX)\n+\t\trss_hf |= ETH_RSS_IPV6_UDP_EX;\n+\n+\trss_conf->rss_hf = rss_hf;\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,\n+\t\t\t struct rte_ether_addr *mc_addr_set,\n+\t\t\t uint32_t nb_mc_addr)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tigc_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_get_eeprom_length(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\n+\t/* Return unit is byte count */\n+\treturn hw->nvm.word_size * 2;\n+}\n+\n+static int\n+eth_igc_timesync_enable(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tuint32_t tsync_ctl;\n+\tuint32_t tsauxc;\n+\tuint16_t i;\n+\n+\tIGC_WRITE_REG(hw, IGC_TIMINCA, 0x0);\n+\n+\t/* enable SYSTIM 0. */\n+\ttsauxc = IGC_READ_REG(hw, IGC_TSAUXC);\n+\ttsauxc &= ~IGC_DISABLE_TIMER0_MSK;\n+\tIGC_WRITE_REG(hw, IGC_TSAUXC, tsauxc);\n+\n+\t/* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */\n+\tIGC_WRITE_REG(hw, IGC_ETQF(IGC_ETQF_FILTER_1588),\n+\t\t\tRTE_ETHER_TYPE_1588 | IGC_ETQF_FILTER_ENABLE |\n+\t\t\tIGC_ETQF_1588);\n+\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++)\n+\t\tigc_enable_rx_queue_timestamp(dev, i);\n+\n+\t/* Enable timestamping of received all packets. */\n+\ttsync_ctl = IGC_TSYNCRXCTL_ENABLED | IGC_TSYNCRXCTL_TYPE_ALL |\n+\t\t\tIGC_TSYNCRXCTL_SYNSIG_PHY;\n+\tIGC_WRITE_REG(hw, IGC_TSYNCRXCTL, tsync_ctl);\n+\n+\t/* Enable timestamping of transmitted PTP packets. */\n+\ttsync_ctl = IGC_READ_REG(hw, IGC_TSYNCTXCTL);\n+\ttsync_ctl |= IGC_TSYNCTXCTL_ENABLED | IGC_TSYNCTXCTL_TXSYNSIG;\n+\tIGC_WRITE_REG(hw, IGC_TSYNCTXCTL, tsync_ctl);\n+\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_timesync_disable(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tuint32_t tsync_ctl;\n+\n+\t/* Disable timestamping of transmitted PTP packets. */\n+\ttsync_ctl = IGC_READ_REG(hw, IGC_TSYNCTXCTL);\n+\ttsync_ctl &= ~IGC_TSYNCTXCTL_ENABLED;\n+\tIGC_WRITE_REG(hw, IGC_TSYNCTXCTL, tsync_ctl);\n+\n+\t/* Disable timestamping of received PTP packets. */\n+\ttsync_ctl = IGC_READ_REG(hw, IGC_TSYNCRXCTL);\n+\ttsync_ctl &= ~IGC_TSYNCRXCTL_ENABLED;\n+\tIGC_WRITE_REG(hw, IGC_TSYNCRXCTL, tsync_ctl);\n+\n+\t/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */\n+\tIGC_WRITE_REG(hw, IGC_ETQF(IGC_ETQF_FILTER_1588), 0);\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\n+\t/*\n+\t * SYSTIMEL stores ns and SYSTIMEH stores seconds.\n+\t * Reading the SYSTIML register, the upper 32bits are latched to a\n+\t * SYSTIMH shadow register for coherent context\n+\t */\n+\tts->tv_nsec = IGC_READ_REG(hw, IGC_SYSTIML);\n+\tts->tv_sec = IGC_READ_REG(hw, IGC_SYSTIMH);\n+\treturn 0;\n+}\n+\n+/*\n+ * Write register and wait for being auto-cleared\n+ * @hw: hardware structure\n+ * @adjust: register value(see register definition)\n+ */\n+static inline int\n+igc_timesync_adjust_reg(struct igc_hw *hw, uint32_t reg_val)\n+{\n+\tconst int try_max = 10000;\n+\tint i;\n+\n+\tIGC_WRITE_REG(hw, IGC_TIMADJL, reg_val);\n+\n+\t/*\n+\t * Every 3.25ns for 2.5G and 8ns 1G, the register\n+\t * TIMADJ will be auto-cleared by the hardware and\n+\t * the SYSTIM registers are updated.\n+\t */\n+\tfor (i = 0; i < try_max; i++) {\n+\t\tif (IGC_READ_REG(hw, IGC_TIMADJL) == 0)\n+\t\t\tbreak;\n+\t}\n+\tif (i >= try_max) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Adjust failed, may NIC stopped.\");\n+\t\treturn -EIO;\n+\t}\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tuint32_t adjust_sign = 0;\n+\n+\tif (delta == 0)\n+\t\treturn 0;\n+\n+\tif (IGC_READ_REG(hw, IGC_TIMADJL)) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Previous time adjusting not been\"\n+\t\t\t\t\" finished, try it latter.\");\n+\t\treturn -EAGAIN;\n+\t}\n+\n+\tif (delta < 0) {\n+\t\tdelta = 0 - delta;\n+\t\tadjust_sign = IGC_TIMADJ_SIGN_MINUS;\n+\t}\n+\n+\tif (delta > IGC_TIMADJ_MAX) {\n+\t\t/* Adjust IGC_TIMADJ_MAX nano seconds each time. */\n+\t\tuint32_t reg_val = IGC_TIMADJ_MAX |\n+\t\t\t\tIGC_TIMADJ_METH_SINGLE | adjust_sign;\n+\n+\t\tdo {\n+\t\t\tint ret = igc_timesync_adjust_reg(hw, reg_val);\n+\t\t\tif (ret)\n+\t\t\t\treturn ret;\n+\t\t\tdelta -= IGC_TIMADJ_MAX;\n+\t\t} while (delta > IGC_TIMADJ_MAX);\n+\t}\n+\n+\treturn igc_timesync_adjust_reg(hw,\n+\t\t(uint32_t)delta | IGC_TIMADJ_METH_SINGLE | adjust_sign);\n+}\n+\n+static int\n+eth_igc_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct timespec now;\n+\n+\t/*\n+\t * It makes sense to set only the SYSTIMH register (the sec units of\n+\t * the timer). Setting the sub sec units can be made by the\n+\t * \"Time adjust\" procedure.\n+\t */\n+\tIGC_WRITE_REG(hw, IGC_SYSTIMH, (uint32_t)ts->tv_sec);\n+\teth_igc_timesync_read_time(dev, &now);\n+\n+\treturn eth_igc_timesync_adjust_time(dev,\n+\t\t\trte_timespec_to_ns(ts) - rte_timespec_to_ns(&now));\n+}\n+\n+static int\n+eth_igc_timesync_read_tx_timestamp(struct rte_eth_dev *dev,\n+\t\t\t       struct timespec *timestamp)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tuint32_t tsync_txctl;\n+\n+\ttsync_txctl = IGC_READ_REG(hw, IGC_TSYNCTXCTL);\n+\tif ((tsync_txctl & IGC_TSYNCTXCTL_VALID) == 0)\n+\t\treturn -EINVAL;\n+\n+\t/* RXSTMPL stores ns and RXSTMPH stores seconds. */\n+\ttimestamp->tv_nsec = IGC_READ_REG(hw, IGC_TXSTMPL);\n+\ttimestamp->tv_sec = IGC_READ_REG(hw, IGC_TXSTMPH);\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,\n+\tstruct rte_pci_device *pci_dev)\n+{\n+\treturn rte_eth_dev_pci_generic_probe(pci_dev,\n+\t\tsizeof(struct igc_adapter), eth_igc_dev_init);\n+}\n+\n+static int eth_igc_pci_remove(struct rte_pci_device *pci_dev __rte_unused)\n+{\n+\treturn rte_eth_dev_pci_generic_remove(pci_dev, eth_igc_dev_uninit);\n+}\n+\n+static struct rte_pci_driver rte_igc_pmd = {\n+\t.id_table = pci_id_igc_map,\n+\t.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,\n+\t.probe = eth_igc_pci_probe,\n+\t.remove = eth_igc_pci_remove,\n+};\n+\n+RTE_PMD_REGISTER_PCI(net_igc, rte_igc_pmd);\n+RTE_PMD_REGISTER_PCI_TABLE(net_igc, pci_id_igc_map);\n+RTE_PMD_REGISTER_KMOD_DEP(net_igc, \"* igb_uio | uio_pci_generic | vfio-pci\");\ndiff --git a/drivers/net/igc/igc_ethdev.h b/drivers/net/igc/igc_ethdev.h\nnew file mode 100644\nindex 0000000..7f836ed\n--- /dev/null\n+++ b/drivers/net/igc/igc_ethdev.h\n@@ -0,0 +1,179 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2020 Intel Corporation\n+ */\n+\n+#ifndef _IGC_ETHDEV_H_\n+#define _IGC_ETHDEV_H_\n+\n+#include <rte_mbuf.h>\n+#include <rte_flow.h>\n+#include <rte_ethdev.h>\n+#include <rte_time.h>\n+\n+#include \"base/e1000_osdep.h\"\n+#include \"base/e1000_hw.h\"\n+#include \"base/e1000_i225.h\"\n+#include \"base/e1000_api.h\"\n+\n+#define IGC_INTEL_VENDOR_ID       0x8086\n+\n+/* need update link, bit flag */\n+#define IGC_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)\n+\n+/* VLAN filter table size */\n+#define IGC_VFTA_SIZE             128\n+\n+#define IGC_MISC_VEC_ID           RTE_INTR_VEC_ZERO_OFFSET\n+#define IGC_RX_VEC_START          RTE_INTR_VEC_RXTX_OFFSET\n+\n+/*\n+ * The overhead from MTU to max frame size.\n+ * Considering VLAN so a tag needs to be counted.\n+ */\n+#define IGC_ETH_OVERHEAD          (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \\\n+\t\t\t\t   VLAN_TAG_SIZE)\n+\n+#define IGC_ADVTXD_POPTS_TXSM      0x00000200 /* L4 Checksum offload request */\n+#define IGC_ADVTXD_POPTS_IXSM      0x00000100 /* IP Checksum offload request */\n+#define IGC_ADVTXD_TUCMD_L4T_RSV   0x00001800 /* L4 Packet TYPE of Reserved */\n+#define IGC_CTRL_EXT_EXTEND_VLAN   (1UL << 26)/* EXTENDED VLAN */\n+\n+#define IGC_TSYNCRXCTL_SYNSIG_PHY  (1UL << 10)\n+#define IGC_SRRCTL_TIMESTAMP_EN\t   (1UL << 30)\n+#define IGC_SRRCTL_TIME1_SHIFT\t   14\n+#define IGC_SRRCTL_TIME0_SHIFT\t   17\n+#define IGC_SRRCTL_TIME1_MSK\t   (3UL << IGC_SRRCTL_TIME1_SHIFT)\n+#define IGC_SRRCTL_TIME0_MSK\t   (3UL << IGC_SRRCTL_TIME0_SHIFT)\n+#define IGC_RXD_STAT_TSIP\t   (1UL << 15)\n+\n+/*\n+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be\n+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.\n+ * This will also optimize cache line size effect.\n+ * H/W supports up to cache line size 128.\n+ */\n+#define\tIGC_ALIGN\t            128\n+\n+#define IGC_TX_DESCRIPTOR_MULTIPLE\t8\n+#define IGC_RX_DESCRIPTOR_MULTIPLE\t8\n+\n+#define IGC_HKEY_MAX_INDEX\t10\n+#define IGC_QUEUE_PAIRS_NUM\t4\n+\n+#define\tIGC_RXD_ALIGN\t((uint16_t)(IGC_ALIGN / \\\n+\t\tsizeof(union igc_adv_rx_desc)))\n+#define\tIGC_TXD_ALIGN\t((uint16_t)(IGC_ALIGN / \\\n+\t\tsizeof(union igc_adv_tx_desc)))\n+#define IGC_MIN_TXD\tIGC_TX_DESCRIPTOR_MULTIPLE\n+#define IGC_MAX_TXD\t((uint16_t)(0x80000 / sizeof(union igc_adv_tx_desc)))\n+#define IGC_MIN_RXD\tIGC_RX_DESCRIPTOR_MULTIPLE\n+#define IGC_MAX_RXD\t((uint16_t)(0x80000 / sizeof(union igc_adv_rx_desc)))\n+\n+#define IGC_TX_MAX_SEG\t\tUINT8_MAX\n+#define IGC_TX_MAX_MTU_SEG\tUINT8_MAX\n+\n+#define IGC_RX_OFFLOAD_ALL\t\t\\\n+\t(DEV_RX_OFFLOAD_VLAN_STRIP  | \\\n+\tDEV_RX_OFFLOAD_VLAN_FILTER | \\\n+\tDEV_RX_OFFLOAD_IPV4_CKSUM  | \\\n+\tDEV_RX_OFFLOAD_UDP_CKSUM   | \\\n+\tDEV_RX_OFFLOAD_TCP_CKSUM   | \\\n+\tDEV_RX_OFFLOAD_JUMBO_FRAME | \\\n+\tDEV_RX_OFFLOAD_KEEP_CRC    | \\\n+\tDEV_RX_OFFLOAD_SCATTER     | \\\n+\tDEV_RX_OFFLOAD_TIMESTAMP   | \\\n+\tDEV_RX_OFFLOAD_QINQ_STRIP)\n+\n+#define IGC_TX_OFFLOAD_ALL\t\\\n+\t(DEV_TX_OFFLOAD_VLAN_INSERT | \\\n+\tDEV_TX_OFFLOAD_IPV4_CKSUM  | \\\n+\tDEV_TX_OFFLOAD_UDP_CKSUM   | \\\n+\tDEV_TX_OFFLOAD_TCP_CKSUM   | \\\n+\tDEV_TX_OFFLOAD_SCTP_CKSUM  | \\\n+\tDEV_TX_OFFLOAD_TCP_TSO     | \\\n+\tDEV_TX_OFFLOAD_MULTI_SEGS  | \\\n+\tDEV_TX_OFFLOAD_QINQ_INSERT)\n+\n+#define IGC_RSS_OFFLOAD_ALL ( \\\n+\tETH_RSS_IPV4 | \\\n+\tETH_RSS_NONFRAG_IPV4_TCP | \\\n+\tETH_RSS_NONFRAG_IPV4_UDP | \\\n+\tETH_RSS_IPV6 | \\\n+\tETH_RSS_NONFRAG_IPV6_TCP | \\\n+\tETH_RSS_NONFRAG_IPV6_UDP | \\\n+\tETH_RSS_IPV6_EX | \\\n+\tETH_RSS_IPV6_TCP_EX | \\\n+\tETH_RSS_IPV6_UDP_EX)\n+\n+#define IGC_ETQF_FILTER_1588        3\n+\n+struct igc_rte_flow_rss_conf {\n+\tstruct rte_flow_action_rss conf; /**< RSS parameters. */\n+\tuint8_t key[IGC_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */\n+\t/* Queues indices to use. */\n+\tuint16_t queue[IGC_QUEUE_PAIRS_NUM];\n+};\n+\n+/* Structure to per-queue statics */\n+struct igc_hw_queue_stats {\n+\tu64\tpqgprc[IGC_QUEUE_PAIRS_NUM];\n+\t/* per queue good packets received count */\n+\tu64\tpqgptc[IGC_QUEUE_PAIRS_NUM];\n+\t/* per queue good packets transmitted count */\n+\tu64\tpqgorc[IGC_QUEUE_PAIRS_NUM];\n+\t/* per queue good octets received count */\n+\tu64\tpqgotc[IGC_QUEUE_PAIRS_NUM];\n+\t/* per queue good octets transmitted count */\n+\tu64\tpqmprc[IGC_QUEUE_PAIRS_NUM];\n+\t/* per queue multicast packets received count */\n+\tu64\trqdpc[IGC_QUEUE_PAIRS_NUM];\n+\t/* per receive queue drop packet count */\n+\tu64\ttqdpc[IGC_QUEUE_PAIRS_NUM];\n+\t/* per transmit queue drop packet count */\n+};\n+\n+/* structure for interrupt relative data */\n+struct igc_interrupt {\n+\tuint32_t flags;\n+\tuint32_t mask;\n+};\n+\n+/* local vfta copy */\n+struct igc_vfta {\n+\tuint32_t vfta[IGC_VFTA_SIZE];\n+};\n+\n+/*\n+ * Structure to store private data for each driver instance (for each port).\n+ */\n+struct igc_adapter {\n+\tstruct igc_hw         hw;\n+\tstruct igc_hw_stats   stats;\n+\tstruct igc_hw_queue_stats queue_stats;\n+\tstruct igc_interrupt  intr;\n+\tstruct igc_vfta       shadow_vfta;\n+\tbool stopped;\n+\tstruct rte_timecounter  systime_tc;\n+\tstruct rte_timecounter  rx_tstamp_tc;\n+\tstruct rte_timecounter  tx_tstamp_tc;\n+};\n+\n+#define IGC_DEV_PRIVATE(adapter) \\\n+\t((struct igc_adapter *)adapter)\n+\n+#define IGC_DEV_PRIVATE_TO_HW(adapter) \\\n+\t(&((struct igc_adapter *)adapter)->hw)\n+\n+#define IGC_DEV_PRIVATE_TO_STATS(adapter) \\\n+\t(&((struct igc_adapter *)adapter)->stats)\n+\n+#define IGC_DEV_PRIVATE_TO_QUEUE_STATS(adapter) \\\n+\t(&((struct igc_adapter *)adapter)->queue_stats)\n+\n+#define IGC_DEV_PRIVATE_TO_INTR(adapter) \\\n+\t(&((struct igc_adapter *)adapter)->intr)\n+\n+#define IGC_DEV_PRIVATE_TO_VFTA(adapter) \\\n+\t(&((struct igc_adapter *)adapter)->shadow_vfta)\n+\n+#endif\ndiff --git a/drivers/net/igc/igc_logs.c b/drivers/net/igc/igc_logs.c\nnew file mode 100644\nindex 0000000..c653783\n--- /dev/null\n+++ b/drivers/net/igc/igc_logs.c\n@@ -0,0 +1,21 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2020 Intel Corporation\n+ */\n+\n+#include \"igc_logs.h\"\n+#include \"rte_common.h\"\n+\n+/* declared as extern in igc_logs.h */\n+int igc_logtype_init = -1;\n+int igc_logtype_driver = -1;\n+\n+RTE_INIT(igc_init_log)\n+{\n+\tigc_logtype_init = rte_log_register(\"pmd.net.igc.init\");\n+\tif (igc_logtype_init >= 0)\n+\t\trte_log_set_level(igc_logtype_init, RTE_LOG_INFO);\n+\n+\tigc_logtype_driver = rte_log_register(\"pmd.net.igc.driver\");\n+\tif (igc_logtype_driver >= 0)\n+\t\trte_log_set_level(igc_logtype_driver, RTE_LOG_INFO);\n+}\ndiff --git a/drivers/net/igc/igc_logs.h b/drivers/net/igc/igc_logs.h\nnew file mode 100644\nindex 0000000..632bf4a\n--- /dev/null\n+++ b/drivers/net/igc/igc_logs.h\n@@ -0,0 +1,48 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2020 Intel Corporation\n+ */\n+\n+#ifndef _IGC_LOGS_H_\n+#define _IGC_LOGS_H_\n+\n+#include <rte_log.h>\n+\n+extern int igc_logtype_init;\n+extern int igc_logtype_driver;\n+\n+#define PMD_INIT_LOG(level, fmt, args...) \\\n+\trte_log(RTE_LOG_ ## level, igc_logtype_init, \\\n+\t\t\"%s(): \" fmt \"\\n\", __func__, ##args)\n+\n+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, \" >>\")\n+\n+#ifdef RTE_LIBRTE_IGC_DEBUG_RX\n+#define PMD_RX_LOG(level, fmt, args...) \\\n+\tRTE_LOG(level, PMD, \"%s(): \" fmt \"\\n\", __func__, ## args)\n+#else\n+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)\n+#endif\n+\n+#ifdef RTE_LIBRTE_IGC_DEBUG_TX\n+#define PMD_TX_LOG(level, fmt, args...) \\\n+\tRTE_LOG(level, PMD, \"%s(): \" fmt \"\\n\", __func__, ## args)\n+#else\n+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)\n+#endif\n+\n+#ifdef RTE_LIBRTE_IGC_DEBUG_TX_FREE\n+#define PMD_TX_FREE_LOG(level, fmt, args...) \\\n+\tRTE_LOG(level, PMD, \"%s(): \" fmt \"\\n\", __func__, ## args)\n+#else\n+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0)\n+#endif\n+\n+extern int igc_logtype_driver;\n+#define PMD_DRV_LOG_RAW(level, fmt, args...) \\\n+\trte_log(RTE_LOG_ ## level, igc_logtype_driver, \"%s(): \" fmt, \\\n+\t\t__func__, ## args)\n+\n+#define PMD_DRV_LOG(level, fmt, args...) \\\n+\tPMD_DRV_LOG_RAW(level, fmt \"\\n\", ## args)\n+\n+#endif /* _IGC_LOGS_H_ */\ndiff --git a/drivers/net/igc/igc_txrx.c b/drivers/net/igc/igc_txrx.c\nnew file mode 100644\nindex 0000000..2336443\n--- /dev/null\n+++ b/drivers/net/igc/igc_txrx.c\n@@ -0,0 +1,2237 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2020 Intel Corporation\n+ */\n+\n+#include <rte_config.h>\n+#include <rte_malloc.h>\n+#include <rte_ethdev_driver.h>\n+#include <rte_net.h>\n+\n+#include \"igc_logs.h\"\n+#include \"igc_txrx.h\"\n+\n+#ifdef RTE_PMD_USE_PREFETCH\n+#define rte_igc_prefetch(p)\trte_prefetch0(p)\n+#else\n+#define rte_igc_prefetch(p)\tdo {} while (0)\n+#endif\n+\n+#ifdef RTE_PMD_PACKET_PREFETCH\n+#define rte_packet_prefetch(p) rte_prefetch1(p)\n+#else\n+#define rte_packet_prefetch(p)\tdo {} while (0)\n+#endif\n+\n+/* Multicast / Unicast table offset mask. */\n+#define IGC_RCTL_MO_MSK\t\t(3 << IGC_RCTL_MO_SHIFT)\n+\n+/* Loopback mode. */\n+#define IGC_RCTL_LBM_SHIFT\t\t6\n+#define IGC_RCTL_LBM_MSK\t\t(3 << IGC_RCTL_LBM_SHIFT)\n+\n+/* Hash select for MTA */\n+#define IGC_RCTL_HSEL_SHIFT\t\t8\n+#define IGC_RCTL_HSEL_MSK\t\t(3 << IGC_RCTL_HSEL_SHIFT)\n+#define IGC_RCTL_PSP\t\t\t(1 << 21)\n+\n+/* Receive buffer size for header buffer */\n+#define IGC_SRRCTL_BSIZEHEADER_SHIFT\t8\n+\n+/* RX descriptor status and error flags */\n+#define IGC_RXD_STAT_L4CS\t\t(1 << 5)\n+#define IGC_RXD_STAT_VEXT\t\t(1 << 9)\n+#define IGC_RXD_STAT_LLINT\t\t(1 << 11)\n+#define IGC_RXD_STAT_SCRC\t\t(1 << 12)\n+#define IGC_RXD_STAT_SMDT_MASK\t\t(3 << 13)\n+#define IGC_RXD_STAT_MC\t\t\t(1 << 19)\n+#define IGC_RXD_EXT_ERR_L4E\t\t(1 << 29)\n+#define IGC_RXD_EXT_ERR_IPE\t\t(1 << 30)\n+#define IGC_RXD_EXT_ERR_RXE\t\t(1 << 31)\n+#define IGC_RXD_RSS_TYPE_MASK\t\t0xf\n+#define IGC_RXD_PCTYPE_MASK\t\t(0x7f << 4)\n+#define IGC_RXD_ETQF_SHIFT\t\t12\n+#define IGC_RXD_ETQF_MSK\t\t(0xfUL << IGC_RXD_ETQF_SHIFT)\n+#define IGC_RXD_VPKT\t\t\t(1 << 16)\n+\n+/* ETQF register index for 1588 */\n+#define IGC_ETQF_FILTER_1588\t\t3\n+\n+/* TXD control bits */\n+#define IGC_TXDCTL_PTHRESH_SHIFT\t0\n+#define IGC_TXDCTL_HTHRESH_SHIFT\t8\n+#define IGC_TXDCTL_WTHRESH_SHIFT\t16\n+#define IGC_TXDCTL_PTHRESH_MSK\t\t(0x1f << IGC_TXDCTL_PTHRESH_SHIFT)\n+#define IGC_TXDCTL_HTHRESH_MSK\t\t(0x1f << IGC_TXDCTL_HTHRESH_SHIFT)\n+#define IGC_TXDCTL_WTHRESH_MSK\t\t(0x1f << IGC_TXDCTL_WTHRESH_SHIFT)\n+\n+/* RXD control bits */\n+#define IGC_RXDCTL_PTHRESH_SHIFT\t0\n+#define IGC_RXDCTL_HTHRESH_SHIFT\t8\n+#define IGC_RXDCTL_WTHRESH_SHIFT\t16\n+#define IGC_RXDCTL_PTHRESH_MSK\t\t(0x1f << IGC_RXDCTL_PTHRESH_SHIFT)\n+#define IGC_RXDCTL_HTHRESH_MSK\t\t(0x1f << IGC_RXDCTL_HTHRESH_SHIFT)\n+#define IGC_RXDCTL_WTHRESH_MSK\t\t(0x1f << IGC_RXDCTL_WTHRESH_SHIFT)\n+\n+#define IGC_TSO_MAX_HDRLEN\t\t512\n+#define IGC_TSO_MAX_MSS\t\t\t9216\n+\n+/* Header size for timestamp */\n+#define IGC_TIMHDR_SIZE\t\t\t16\n+\n+/* Bit Mask to indicate what bits required for building TX context */\n+#define IGC_TX_OFFLOAD_MASK (\t\t\\\n+\t\tPKT_TX_OUTER_IPV6 |\t\\\n+\t\tPKT_TX_OUTER_IPV4 |\t\\\n+\t\tPKT_TX_IPV6 |\t\t\\\n+\t\tPKT_TX_IPV4 |\t\t\\\n+\t\tPKT_TX_VLAN_PKT |\t\\\n+\t\tPKT_TX_IP_CKSUM |\t\\\n+\t\tPKT_TX_L4_MASK |\t\\\n+\t\tPKT_TX_TCP_SEG |\t\\\n+\t\tPKT_TX_IEEE1588_TMST)\n+\n+#define IGC_TX_OFFLOAD_NOTSUP_MASK (PKT_TX_OFFLOAD_MASK ^ IGC_TX_OFFLOAD_MASK)\n+\n+/**\n+ * Structure associated with each descriptor of the RX ring of a RX queue.\n+ */\n+struct igc_rx_entry {\n+\tstruct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */\n+};\n+\n+/**\n+ * Structure associated with each RX queue.\n+ */\n+struct igc_rx_queue {\n+\tstruct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */\n+\tvolatile union igc_adv_rx_desc *rx_ring;\n+\t/**< RX ring virtual address. */\n+\tuint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */\n+\tvolatile uint32_t   *rdt_reg_addr; /**< RDT register address. */\n+\tvolatile uint32_t   *rdh_reg_addr; /**< RDH register address. */\n+\tstruct igc_rx_entry *sw_ring;   /**< address of RX software ring. */\n+\tstruct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */\n+\tstruct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */\n+\tuint16_t            nb_rx_desc; /**< number of RX descriptors. */\n+\tuint16_t            rx_tail;    /**< current value of RDT register. */\n+\tuint16_t            nb_rx_hold; /**< number of held free RX desc. */\n+\tuint16_t            rx_free_thresh; /**< max free RX desc to hold. */\n+\tuint16_t            queue_id;   /**< RX queue index. */\n+\tuint16_t            reg_idx;    /**< RX queue register index. */\n+\tuint16_t            port_id;    /**< Device port identifier. */\n+\tuint8_t             pthresh;    /**< Prefetch threshold register. */\n+\tuint8_t             hthresh;    /**< Host threshold register. */\n+\tuint8_t             wthresh;    /**< Write-back threshold register. */\n+\tuint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */\n+\tuint8_t             drop_en;\t/**< If not 0, set SRRCTL.Drop_En. */\n+\tuint32_t            flags;      /**< RX flags. */\n+\tuint64_t\t    offloads;   /**< offloads of DEV_RX_OFFLOAD_* */\n+\tstruct timespec     timestamp[2];  /**< timestamp of last packet. */\n+};\n+\n+/** Offload features */\n+union igc_tx_offload {\n+\tuint64_t data;\n+\tstruct {\n+\t\tuint64_t l3_len:9; /**< L3 (IP) Header Length. */\n+\t\tuint64_t l2_len:7; /**< L2 (MAC) Header Length. */\n+\t\tuint64_t vlan_tci:16;\n+\t\t/**< VLAN Tag Control Identifier(CPU order). */\n+\t\tuint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */\n+\t\tuint64_t tso_segsz:16; /**< TCP TSO segment size. */\n+\t\t/* uint64_t unused:8; */\n+\t};\n+};\n+\n+/*\n+ * Compare mask for igc_tx_offload.data,\n+ * should be in sync with igc_tx_offload layout.\n+ */\n+#define TX_MACIP_LEN_CMP_MASK\t0x000000000000FFFFULL /**< L2L3 header mask. */\n+#define TX_VLAN_CMP_MASK\t0x00000000FFFF0000ULL /**< Vlan mask. */\n+#define TX_TCP_LEN_CMP_MASK\t0x000000FF00000000ULL /**< TCP header mask. */\n+#define TX_TSO_MSS_CMP_MASK\t0x00FFFF0000000000ULL /**< TSO segsz mask. */\n+/** Mac + IP + TCP + Mss mask. */\n+#define TX_TSO_CMP_MASK\t\\\n+\t(TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)\n+\n+/**\n+ * Strucutre to check if new context need be built\n+ */\n+struct igc_advctx_info {\n+\tuint64_t flags;           /**< ol_flags related to context build. */\n+\t/** tx offload: vlan, tso, l2-l3-l4 lengths. */\n+\tunion igc_tx_offload tx_offload;\n+\t/** compare mask for tx offload. */\n+\tunion igc_tx_offload tx_offload_mask;\n+};\n+\n+/**\n+ * Hardware context number\n+ */\n+enum {\n+\tIGC_CTX_0    = 0, /**< CTX0    */\n+\tIGC_CTX_1    = 1, /**< CTX1    */\n+\tIGC_CTX_NUM  = 2, /**< CTX_NUM */\n+};\n+\n+/**\n+ * Structure associated with each descriptor of the TX ring of a TX queue.\n+ */\n+struct igc_tx_entry {\n+\tstruct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */\n+\tuint16_t next_id; /**< Index of next descriptor in ring. */\n+\tuint16_t last_id; /**< Index of last scattered descriptor. */\n+};\n+\n+/**\n+ * Structure associated with each TX queue.\n+ */\n+struct igc_tx_queue {\n+\tvolatile union igc_adv_tx_desc *tx_ring; /**< TX ring address */\n+\tuint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */\n+\tstruct igc_tx_entry    *sw_ring; /**< virtual address of SW ring. */\n+\tvolatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */\n+\tuint32_t               txd_type;      /**< Device-specific TXD type */\n+\tuint16_t               nb_tx_desc;    /**< number of TX descriptors. */\n+\tuint16_t               tx_tail;  /**< Current value of TDT register. */\n+\tuint16_t               tx_head;\n+\t/**< Index of first used TX descriptor. */\n+\tuint16_t               queue_id; /**< TX queue index. */\n+\tuint16_t               reg_idx;  /**< TX queue register index. */\n+\tuint16_t               port_id;  /**< Device port identifier. */\n+\tuint8_t                pthresh;  /**< Prefetch threshold register. */\n+\tuint8_t                hthresh;  /**< Host threshold register. */\n+\tuint8_t                wthresh;  /**< Write-back threshold register. */\n+\tuint8_t                ctx_curr;\n+\n+\t/**< Start context position for transmit queue. */\n+\tstruct igc_advctx_info ctx_cache[IGC_CTX_NUM];\n+\t/**< Hardware context history.*/\n+\tuint64_t\t       offloads; /**< offloads of DEV_TX_OFFLOAD_* */\n+};\n+\n+static inline void\n+get_rx_pkt_timestamp(struct igc_rx_queue *rxq, struct rte_mbuf *rxm)\n+{\n+\tuint32_t *time = rte_pktmbuf_mtod(rxm, uint32_t *);\n+\n+\t/*\n+\t * Foxville supports adding an optional tailored header before\n+\t * the MAC header of the packet in the receive buffer. The 128\n+\t * bit tailored header include a timestamp composed of the\n+\t * packet reception time measured in the SYSTIML (Low DW) and\n+\t * SYSTIMH (High DW) registers of two selected 1588 timers.\n+\t */\n+\trxq->timestamp[1].tv_nsec = rte_le_to_cpu_32(time[0]);\n+\trxq->timestamp[1].tv_sec = rte_le_to_cpu_32(time[1]);\n+\trxq->timestamp[0].tv_nsec = rte_le_to_cpu_32(time[2]);\n+\trxq->timestamp[0].tv_sec = rte_le_to_cpu_32(time[3]);\n+\trxm->timestamp = rte_timespec_to_ns(&rxq->timestamp[0]);\n+\trxm->timesync = rxq->queue_id;\n+\trxm->data_off += IGC_TIMHDR_SIZE;\n+\trxm->data_len -= IGC_TIMHDR_SIZE;\n+\trxm->pkt_len -= IGC_TIMHDR_SIZE;\n+}\n+\n+static inline uint64_t\n+rx_desc_statuserr_to_pkt_flags(uint32_t statuserr)\n+{\n+\tstatic uint64_t l4_chksum_flags[] = {0, 0, PKT_RX_L4_CKSUM_GOOD,\n+\t\t\tPKT_RX_L4_CKSUM_BAD};\n+\n+\tstatic uint64_t l3_chksum_flags[] = {0, 0, PKT_RX_IP_CKSUM_GOOD,\n+\t\t\tPKT_RX_IP_CKSUM_BAD};\n+\tuint64_t pkt_flags = 0;\n+\tuint32_t tmp;\n+\n+\tif (statuserr & IGC_RXD_STAT_VP)\n+\t\tpkt_flags |= PKT_RX_VLAN_STRIPPED;\n+\n+\ttmp = !!(statuserr & (IGC_RXD_STAT_L4CS | IGC_RXD_STAT_UDPCS));\n+\ttmp = (tmp << 1) | (uint32_t)!!(statuserr & IGC_RXD_EXT_ERR_L4E);\n+\tpkt_flags |= l4_chksum_flags[tmp];\n+\n+\ttmp = !!(statuserr & IGC_RXD_STAT_IPCS);\n+\ttmp = (tmp << 1) | (uint32_t)!!(statuserr & IGC_RXD_EXT_ERR_IPE);\n+\tpkt_flags |= l3_chksum_flags[tmp];\n+\n+\treturn pkt_flags;\n+}\n+\n+#define IGC_PACKET_TYPE_IPV4              0X01\n+#define IGC_PACKET_TYPE_IPV4_TCP          0X11\n+#define IGC_PACKET_TYPE_IPV4_UDP          0X21\n+#define IGC_PACKET_TYPE_IPV4_SCTP         0X41\n+#define IGC_PACKET_TYPE_IPV4_EXT          0X03\n+#define IGC_PACKET_TYPE_IPV4_EXT_SCTP     0X43\n+#define IGC_PACKET_TYPE_IPV6              0X04\n+#define IGC_PACKET_TYPE_IPV6_TCP          0X14\n+#define IGC_PACKET_TYPE_IPV6_UDP          0X24\n+#define IGC_PACKET_TYPE_IPV6_EXT          0X0C\n+#define IGC_PACKET_TYPE_IPV6_EXT_TCP      0X1C\n+#define IGC_PACKET_TYPE_IPV6_EXT_UDP      0X2C\n+#define IGC_PACKET_TYPE_IPV4_IPV6         0X05\n+#define IGC_PACKET_TYPE_IPV4_IPV6_TCP     0X15\n+#define IGC_PACKET_TYPE_IPV4_IPV6_UDP     0X25\n+#define IGC_PACKET_TYPE_IPV4_IPV6_EXT     0X0D\n+#define IGC_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D\n+#define IGC_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D\n+#define IGC_PACKET_TYPE_MAX               0X80\n+#define IGC_PACKET_TYPE_MASK              0X7F\n+#define IGC_PACKET_TYPE_SHIFT             0X04\n+\n+static inline uint32_t\n+rx_desc_pkt_info_to_pkt_type(uint32_t pkt_info)\n+{\n+\tstatic const uint32_t\n+\t\tptype_table[IGC_PACKET_TYPE_MAX] __rte_cache_aligned = {\n+\t\t[IGC_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV4,\n+\t\t[IGC_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV4_EXT,\n+\t\t[IGC_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV6,\n+\t\t[IGC_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |\n+\t\t\tRTE_PTYPE_INNER_L3_IPV6,\n+\t\t[IGC_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV6_EXT,\n+\t\t[IGC_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |\n+\t\t\tRTE_PTYPE_INNER_L3_IPV6_EXT,\n+\t\t[IGC_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,\n+\t\t[IGC_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,\n+\t\t[IGC_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |\n+\t\t\tRTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,\n+\t\t[IGC_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,\n+\t\t[IGC_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |\n+\t\t\tRTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,\n+\t\t[IGC_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,\n+\t\t[IGC_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,\n+\t\t[IGC_PACKET_TYPE_IPV4_IPV6_UDP] =  RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |\n+\t\t\tRTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,\n+\t\t[IGC_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,\n+\t\t[IGC_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |\n+\t\t\tRTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,\n+\t\t[IGC_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,\n+\t\t[IGC_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,\n+\t};\n+\tif (unlikely(pkt_info & IGC_RXDADV_PKTTYPE_ETQF))\n+\t\treturn RTE_PTYPE_UNKNOWN;\n+\n+\tpkt_info = (pkt_info >> IGC_PACKET_TYPE_SHIFT) & IGC_PACKET_TYPE_MASK;\n+\n+\treturn ptype_table[pkt_info];\n+}\n+\n+static inline void\n+rx_desc_get_pkt_info(struct igc_rx_queue *rxq, struct rte_mbuf *rxm,\n+\t\tunion igc_adv_rx_desc *rxd, uint32_t staterr)\n+{\n+\tuint64_t pkt_flags;\n+\tuint32_t hlen_type_rss;\n+\tuint16_t pkt_info;\n+\n+\t/* Prefetch data of first segment, if configured to do so. */\n+\trte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);\n+\n+\trxm->port = rxq->port_id;\n+\thlen_type_rss = rte_le_to_cpu_32(rxd->wb.lower.lo_dword.data);\n+\trxm->hash.rss = rte_le_to_cpu_32(rxd->wb.lower.hi_dword.rss);\n+\trxm->vlan_tci = rte_le_to_cpu_16(rxd->wb.upper.vlan);\n+\n+\tpkt_flags = (hlen_type_rss & IGC_RXD_RSS_TYPE_MASK) ?\n+\t\t\tPKT_RX_RSS_HASH : 0;\n+\n+\tif (hlen_type_rss & IGC_RXD_VPKT)\n+\t\tpkt_flags |= PKT_RX_VLAN;\n+\n+\tif ((hlen_type_rss & IGC_RXD_ETQF_MSK) == (IGC_RXDADV_PKTTYPE_ETQF |\n+\t\t\t(IGC_ETQF_FILTER_1588 << IGC_RXD_ETQF_SHIFT)))\n+\t\tpkt_flags |= PKT_RX_IEEE1588_PTP;\n+\n+\tpkt_flags |= rx_desc_statuserr_to_pkt_flags(staterr);\n+\n+\tif (unlikely(staterr & IGC_RXD_STAT_TSIP)) {\n+\t\tget_rx_pkt_timestamp(rxq, rxm);\n+\t\tpkt_flags |= PKT_RX_TIMESTAMP;\n+\t\tif (pkt_flags & PKT_RX_IEEE1588_PTP)\n+\t\t\tpkt_flags |= PKT_RX_IEEE1588_TMST;\n+\t}\n+\n+\trxm->ol_flags = pkt_flags;\n+\tpkt_info = rte_le_to_cpu_16(rxd->wb.lower.lo_dword.hs_rss.pkt_info);\n+\trxm->packet_type = rx_desc_pkt_info_to_pkt_type(pkt_info);\n+}\n+\n+static uint16_t\n+eth_igc_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n+{\n+\tstruct igc_rx_queue * const rxq = rx_queue;\n+\tvolatile union igc_adv_rx_desc * const rx_ring = rxq->rx_ring;\n+\tstruct igc_rx_entry * const sw_ring = rxq->sw_ring;\n+\tuint16_t rx_id = rxq->rx_tail;\n+\tuint16_t nb_rx = 0;\n+\tuint16_t nb_hold = 0;\n+\n+\twhile (nb_rx < nb_pkts) {\n+\t\tvolatile union igc_adv_rx_desc *rxdp;\n+\t\tstruct igc_rx_entry *rxe;\n+\t\tstruct rte_mbuf *rxm;\n+\t\tstruct rte_mbuf *nmb;\n+\t\tunion igc_adv_rx_desc rxd;\n+\t\tuint32_t staterr;\n+\t\tuint16_t data_len;\n+\n+\t\t/*\n+\t\t * The order of operations here is important as the DD status\n+\t\t * bit must not be read after any other descriptor fields.\n+\t\t * rx_ring and rxdp are pointing to volatile data so the order\n+\t\t * of accesses cannot be reordered by the compiler. If they were\n+\t\t * not volatile, they could be reordered which could lead to\n+\t\t * using invalid descriptor fields when read from rxd.\n+\t\t */\n+\t\trxdp = &rx_ring[rx_id];\n+\t\tstaterr = rte_cpu_to_le_32(rxdp->wb.upper.status_error);\n+\t\tif (!(staterr & IGC_RXD_STAT_DD))\n+\t\t\tbreak;\n+\t\trxd = *rxdp;\n+\n+\t\t/*\n+\t\t * End of packet.\n+\t\t *\n+\t\t * If the IGC_RXD_STAT_EOP flag is not set, the RX packet is\n+\t\t * likely to be invalid and to be dropped by the various\n+\t\t * validation checks performed by the network stack.\n+\t\t *\n+\t\t * Allocate a new mbuf to replenish the RX ring descriptor.\n+\t\t * If the allocation fails:\n+\t\t *    - arrange for that RX descriptor to be the first one\n+\t\t *      being parsed the next time the receive function is\n+\t\t *      invoked [on the same queue].\n+\t\t *\n+\t\t *    - Stop parsing the RX ring and return immediately.\n+\t\t *\n+\t\t * This policy does not drop the packet received in the RX\n+\t\t * descriptor for which the allocation of a new mbuf failed.\n+\t\t * Thus, it allows that packet to be later retrieved if\n+\t\t * mbuf have been freed in the mean time.\n+\t\t * As a side effect, holding RX descriptors instead of\n+\t\t * systematically giving them back to the NIC may lead to\n+\t\t * RX ring exhaustion situations.\n+\t\t * However, the NIC can gracefully prevent such situations\n+\t\t * to happen by sending specific \"back-pressure\" flow control\n+\t\t * frames to its peer(s).\n+\t\t */\n+\t\tPMD_RX_LOG(DEBUG, \"port_id=%u queue_id=%u rx_id=%u\"\n+\t\t\t\" staterr=0x%x data_len=%u\", rxq->port_id,\n+\t\t\trxq->queue_id, rx_id, staterr,\n+\t\t\trte_le_to_cpu_16(rxd.wb.upper.length));\n+\n+\t\tnmb = rte_mbuf_raw_alloc(rxq->mb_pool);\n+\t\tif (nmb == NULL) {\n+\t\t\tunsigned int id;\n+\t\t\tPMD_RX_LOG(DEBUG, \"RX mbuf alloc failed port_id=%u\"\n+\t\t\t\t\" queue_id=%u\", rxq->port_id, rxq->queue_id);\n+\t\t\tid = rxq->port_id;\n+\t\t\trte_eth_devices[id].data->rx_mbuf_alloc_failed++;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tnb_hold++;\n+\t\trxe = &sw_ring[rx_id];\n+\t\trx_id++;\n+\t\tif (rx_id >= rxq->nb_rx_desc)\n+\t\t\trx_id = 0;\n+\n+\t\t/* Prefetch next mbuf while processing current one. */\n+\t\trte_igc_prefetch(sw_ring[rx_id].mbuf);\n+\n+\t\t/*\n+\t\t * When next RX descriptor is on a cache-line boundary,\n+\t\t * prefetch the next 4 RX descriptors and the next 8 pointers\n+\t\t * to mbufs.\n+\t\t */\n+\t\tif ((rx_id & 0x3) == 0) {\n+\t\t\trte_igc_prefetch(&rx_ring[rx_id]);\n+\t\t\trte_igc_prefetch(&sw_ring[rx_id]);\n+\t\t}\n+\n+\t\t/*\n+\t\t * Update RX descriptor with the physical address of the new\n+\t\t * data buffer of the new allocated mbuf.\n+\t\t */\n+\t\trxm = rxe->mbuf;\n+\t\trxe->mbuf = nmb;\n+\t\trxdp->read.hdr_addr = 0;\n+\t\trxdp->read.pkt_addr =\n+\t\t\trte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));\n+\t\trxm->next = NULL;\n+\n+\t\trxm->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\tdata_len = rte_le_to_cpu_16(rxd.wb.upper.length) - rxq->crc_len;\n+\t\trxm->data_len = data_len;\n+\t\trxm->pkt_len = data_len;\n+\t\trxm->nb_segs = 1;\n+\n+\t\trx_desc_get_pkt_info(rxq, rxm, &rxd, staterr);\n+\n+\t\t/*\n+\t\t * Store the mbuf address into the next entry of the array\n+\t\t * of returned packets.\n+\t\t */\n+\t\trx_pkts[nb_rx++] = rxm;\n+\t}\n+\trxq->rx_tail = rx_id;\n+\n+\t/*\n+\t * If the number of free RX descriptors is greater than the RX free\n+\t * threshold of the queue, advance the Receive Descriptor Tail (RDT)\n+\t * register.\n+\t * Update the RDT with the value of the last processed RX descriptor\n+\t * minus 1, to guarantee that the RDT register is never equal to the\n+\t * RDH register, which creates a \"full\" ring situtation from the\n+\t * hardware point of view...\n+\t */\n+\tnb_hold = nb_hold + rxq->nb_rx_hold;\n+\tif (nb_hold > rxq->rx_free_thresh) {\n+\t\tPMD_RX_LOG(DEBUG, \"port_id=%u queue_id=%u rx_tail=%u\"\n+\t\t\t\" nb_hold=%u nb_rx=%u\", rxq->port_id, rxq->queue_id,\n+\t\t\trx_id, nb_hold, nb_rx);\n+\t\trx_id = (rx_id == 0) ? (rxq->nb_rx_desc - 1) : (rx_id - 1);\n+\t\tIGC_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);\n+\t\tnb_hold = 0;\n+\t}\n+\trxq->nb_rx_hold = nb_hold;\n+\treturn nb_rx;\n+}\n+\n+static uint16_t\n+eth_igc_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\t\tuint16_t nb_pkts)\n+{\n+\tstruct igc_rx_queue * const rxq = rx_queue;\n+\tvolatile union igc_adv_rx_desc * const rx_ring = rxq->rx_ring;\n+\tstruct igc_rx_entry * const sw_ring = rxq->sw_ring;\n+\tstruct rte_mbuf *first_seg = rxq->pkt_first_seg;\n+\tstruct rte_mbuf *last_seg = rxq->pkt_last_seg;\n+\n+\tuint16_t rx_id = rxq->rx_tail;\n+\tuint16_t nb_rx = 0;\n+\tuint16_t nb_hold = 0;\n+\n+\twhile (nb_rx < nb_pkts) {\n+\t\tvolatile union igc_adv_rx_desc *rxdp;\n+\t\tstruct igc_rx_entry *rxe;\n+\t\tstruct rte_mbuf *rxm;\n+\t\tstruct rte_mbuf *nmb;\n+\t\tunion igc_adv_rx_desc rxd;\n+\t\tuint32_t staterr;\n+\t\tuint16_t data_len;\n+\n+next_desc:\n+\t\t/*\n+\t\t * The order of operations here is important as the DD status\n+\t\t * bit must not be read after any other descriptor fields.\n+\t\t * rx_ring and rxdp are pointing to volatile data so the order\n+\t\t * of accesses cannot be reordered by the compiler. If they were\n+\t\t * not volatile, they could be reordered which could lead to\n+\t\t * using invalid descriptor fields when read from rxd.\n+\t\t */\n+\t\trxdp = &rx_ring[rx_id];\n+\t\tstaterr = rte_cpu_to_le_32(rxdp->wb.upper.status_error);\n+\t\tif (!(staterr & IGC_RXD_STAT_DD))\n+\t\t\tbreak;\n+\t\trxd = *rxdp;\n+\n+\t\t/*\n+\t\t * Descriptor done.\n+\t\t *\n+\t\t * Allocate a new mbuf to replenish the RX ring descriptor.\n+\t\t * If the allocation fails:\n+\t\t *    - arrange for that RX descriptor to be the first one\n+\t\t *      being parsed the next time the receive function is\n+\t\t *      invoked [on the same queue].\n+\t\t *\n+\t\t *    - Stop parsing the RX ring and return immediately.\n+\t\t *\n+\t\t * This policy does not drop the packet received in the RX\n+\t\t * descriptor for which the allocation of a new mbuf failed.\n+\t\t * Thus, it allows that packet to be later retrieved if\n+\t\t * mbuf have been freed in the mean time.\n+\t\t * As a side effect, holding RX descriptors instead of\n+\t\t * systematically giving them back to the NIC may lead to\n+\t\t * RX ring exhaustion situations.\n+\t\t * However, the NIC can gracefully prevent such situations\n+\t\t * to happen by sending specific \"back-pressure\" flow control\n+\t\t * frames to its peer(s).\n+\t\t */\n+\t\tPMD_RX_LOG(DEBUG, \"port_id=%u queue_id=%u rx_id=%u\"\n+\t\t\t\" staterr=0x%x data_len=%u\", rxq->port_id,\n+\t\t\trxq->queue_id, rx_id, staterr,\n+\t\t\trte_le_to_cpu_16(rxd.wb.upper.length));\n+\n+\t\tnmb = rte_mbuf_raw_alloc(rxq->mb_pool);\n+\t\tif (nmb == NULL) {\n+\t\t\tunsigned int id;\n+\t\t\tPMD_RX_LOG(DEBUG, \"RX mbuf alloc failed port_id=%u\"\n+\t\t\t\t\" queue_id=%u\", rxq->port_id, rxq->queue_id);\n+\t\t\tid = rxq->port_id;\n+\t\t\trte_eth_devices[id].data->rx_mbuf_alloc_failed++;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tnb_hold++;\n+\t\trxe = &sw_ring[rx_id];\n+\t\trx_id++;\n+\t\tif (rx_id >= rxq->nb_rx_desc)\n+\t\t\trx_id = 0;\n+\n+\t\t/* Prefetch next mbuf while processing current one. */\n+\t\trte_igc_prefetch(sw_ring[rx_id].mbuf);\n+\n+\t\t/*\n+\t\t * When next RX descriptor is on a cache-line boundary,\n+\t\t * prefetch the next 4 RX descriptors and the next 8 pointers\n+\t\t * to mbufs.\n+\t\t */\n+\t\tif ((rx_id & 0x3) == 0) {\n+\t\t\trte_igc_prefetch(&rx_ring[rx_id]);\n+\t\t\trte_igc_prefetch(&sw_ring[rx_id]);\n+\t\t}\n+\n+\t\t/*\n+\t\t * Update RX descriptor with the physical address of the new\n+\t\t * data buffer of the new allocated mbuf.\n+\t\t */\n+\t\trxm = rxe->mbuf;\n+\t\trxe->mbuf = nmb;\n+\t\trxdp->read.hdr_addr = 0;\n+\t\trxdp->read.pkt_addr =\n+\t\t\trte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));\n+\t\trxm->next = NULL;\n+\n+\t\t/*\n+\t\t * Set data length & data buffer address of mbuf.\n+\t\t */\n+\t\trxm->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\tdata_len = rte_le_to_cpu_16(rxd.wb.upper.length);\n+\t\trxm->data_len = data_len;\n+\n+\t\t/*\n+\t\t * If this is the first buffer of the received packet,\n+\t\t * set the pointer to the first mbuf of the packet and\n+\t\t * initialize its context.\n+\t\t * Otherwise, update the total length and the number of segments\n+\t\t * of the current scattered packet, and update the pointer to\n+\t\t * the last mbuf of the current packet.\n+\t\t */\n+\t\tif (first_seg == NULL) {\n+\t\t\tfirst_seg = rxm;\n+\t\t\tfirst_seg->pkt_len = data_len;\n+\t\t\tfirst_seg->nb_segs = 1;\n+\t\t} else {\n+\t\t\tfirst_seg->pkt_len += data_len;\n+\t\t\tfirst_seg->nb_segs++;\n+\t\t\tlast_seg->next = rxm;\n+\t\t}\n+\n+\t\t/*\n+\t\t * If this is not the last buffer of the received packet,\n+\t\t * update the pointer to the last mbuf of the current scattered\n+\t\t * packet and continue to parse the RX ring.\n+\t\t */\n+\t\tif (!(staterr & IGC_RXD_STAT_EOP)) {\n+\t\t\tlast_seg = rxm;\n+\t\t\tgoto next_desc;\n+\t\t}\n+\n+\t\t/*\n+\t\t * This is the last buffer of the received packet.\n+\t\t * If the CRC is not stripped by the hardware:\n+\t\t *   - Subtract the CRC\tlength from the total packet length.\n+\t\t *   - If the last buffer only contains the whole CRC or a part\n+\t\t *     of it, free the mbuf associated to the last buffer.\n+\t\t *     If part of the CRC is also contained in the previous\n+\t\t *     mbuf, subtract the length of that CRC part from the\n+\t\t *     data length of the previous mbuf.\n+\t\t */\n+\t\tif (unlikely(rxq->crc_len > 0)) {\n+\t\t\tfirst_seg->pkt_len -= RTE_ETHER_CRC_LEN;\n+\t\t\tif (data_len <= RTE_ETHER_CRC_LEN) {\n+\t\t\t\trte_pktmbuf_free_seg(rxm);\n+\t\t\t\tfirst_seg->nb_segs--;\n+\t\t\t\tlast_seg->data_len = last_seg->data_len -\n+\t\t\t\t\t (RTE_ETHER_CRC_LEN - data_len);\n+\t\t\t\tlast_seg->next = NULL;\n+\t\t\t} else {\n+\t\t\t\trxm->data_len = (uint16_t)\n+\t\t\t\t\t(data_len - RTE_ETHER_CRC_LEN);\n+\t\t\t}\n+\t\t}\n+\n+\t\trx_desc_get_pkt_info(rxq, first_seg, &rxd, staterr);\n+\n+\t\t/*\n+\t\t * Store the mbuf address into the next entry of the array\n+\t\t * of returned packets.\n+\t\t */\n+\t\trx_pkts[nb_rx++] = first_seg;\n+\n+\t\t/* Setup receipt context for a new packet. */\n+\t\tfirst_seg = NULL;\n+\t}\n+\trxq->rx_tail = rx_id;\n+\n+\t/*\n+\t * Save receive context.\n+\t */\n+\trxq->pkt_first_seg = first_seg;\n+\trxq->pkt_last_seg = last_seg;\n+\n+\t/*\n+\t * If the number of free RX descriptors is greater than the RX free\n+\t * threshold of the queue, advance the Receive Descriptor Tail (RDT)\n+\t * register.\n+\t * Update the RDT with the value of the last processed RX descriptor\n+\t * minus 1, to guarantee that the RDT register is never equal to the\n+\t * RDH register, which creates a \"full\" ring situtation from the\n+\t * hardware point of view...\n+\t */\n+\tnb_hold = nb_hold + rxq->nb_rx_hold;\n+\tif (nb_hold > rxq->rx_free_thresh) {\n+\t\tPMD_RX_LOG(DEBUG, \"port_id=%u queue_id=%u rx_tail=%u\"\n+\t\t\t\" nb_hold=%u nb_rx=%u\", rxq->port_id, rxq->queue_id,\n+\t\t\trx_id, nb_hold, nb_rx);\n+\t\trx_id = (rx_id == 0) ? (rxq->nb_rx_desc - 1) : (rx_id - 1);\n+\t\tIGC_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);\n+\t\tnb_hold = 0;\n+\t}\n+\trxq->nb_rx_hold = nb_hold;\n+\treturn nb_rx;\n+}\n+\n+static void\n+igc_rx_queue_release_mbufs(struct igc_rx_queue *rxq)\n+{\n+\tunsigned int i;\n+\n+\tif (rxq->sw_ring != NULL) {\n+\t\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\t\tif (rxq->sw_ring[i].mbuf != NULL) {\n+\t\t\t\trte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);\n+\t\t\t\trxq->sw_ring[i].mbuf = NULL;\n+\t\t\t}\n+\t\t}\n+\t}\n+}\n+\n+static void\n+igc_rx_queue_release(struct igc_rx_queue *rxq)\n+{\n+\tigc_rx_queue_release_mbufs(rxq);\n+\trte_free(rxq->sw_ring);\n+\trte_free(rxq);\n+}\n+\n+void eth_igc_rx_queue_release(void *rxq)\n+{\n+\tif (rxq)\n+\t\tigc_rx_queue_release(rxq);\n+}\n+\n+uint32_t eth_igc_rx_queue_count(struct rte_eth_dev *dev,\n+\t\tuint16_t rx_queue_id)\n+{\n+\t/**\n+\t * Check the DD bit of a rx descriptor of each 4 in a group,\n+\t * to avoid checking too frequently and downgrading performance\n+\t * too much.\n+\t */\n+#define IGC_RXQ_SCAN_INTERVAL 4\n+\n+\tvolatile union igc_adv_rx_desc *rxdp;\n+\tstruct igc_rx_queue *rxq;\n+\tuint16_t desc = 0;\n+\n+\trxq = dev->data->rx_queues[rx_queue_id];\n+\trxdp = &rxq->rx_ring[rxq->rx_tail];\n+\n+\twhile (desc < rxq->nb_rx_desc - rxq->rx_tail) {\n+\t\tif (unlikely(!(rxdp->wb.upper.status_error &\n+\t\t\t\tIGC_RXD_STAT_DD)))\n+\t\t\treturn desc;\n+\t\tdesc += IGC_RXQ_SCAN_INTERVAL;\n+\t\trxdp += IGC_RXQ_SCAN_INTERVAL;\n+\t}\n+\trxdp = &rxq->rx_ring[rxq->rx_tail + desc - rxq->nb_rx_desc];\n+\n+\twhile (desc < rxq->nb_rx_desc &&\n+\t\t(rxdp->wb.upper.status_error & IGC_RXD_STAT_DD)) {\n+\t\tdesc += IGC_RXQ_SCAN_INTERVAL;\n+\t\trxdp += IGC_RXQ_SCAN_INTERVAL;\n+\t}\n+\n+\treturn desc;\n+}\n+\n+int eth_igc_rx_descriptor_done(void *rx_queue, uint16_t offset)\n+{\n+\tvolatile union igc_adv_rx_desc *rxdp;\n+\tstruct igc_rx_queue *rxq = rx_queue;\n+\tuint32_t desc;\n+\n+\tif (unlikely(offset >= rxq->nb_rx_desc))\n+\t\treturn 0;\n+\n+\tdesc = rxq->rx_tail + offset;\n+\tif (desc >= rxq->nb_rx_desc)\n+\t\tdesc -= rxq->nb_rx_desc;\n+\n+\trxdp = &rxq->rx_ring[desc];\n+\treturn !!(rxdp->wb.upper.status_error &\n+\t\t\trte_cpu_to_le_32(IGC_RXD_STAT_DD));\n+}\n+\n+int eth_igc_rx_descriptor_status(void *rx_queue, uint16_t offset)\n+{\n+\tstruct igc_rx_queue *rxq = rx_queue;\n+\tvolatile uint32_t *status;\n+\tuint32_t desc;\n+\n+\tif (unlikely(offset >= rxq->nb_rx_desc))\n+\t\treturn -EINVAL;\n+\n+\tif (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)\n+\t\treturn RTE_ETH_RX_DESC_UNAVAIL;\n+\n+\tdesc = rxq->rx_tail + offset;\n+\tif (desc >= rxq->nb_rx_desc)\n+\t\tdesc -= rxq->nb_rx_desc;\n+\n+\tstatus = &rxq->rx_ring[desc].wb.upper.status_error;\n+\tif (*status & rte_cpu_to_le_32(IGC_RXD_STAT_DD))\n+\t\treturn RTE_ETH_RX_DESC_DONE;\n+\n+\treturn RTE_ETH_RX_DESC_AVAIL;\n+}\n+\n+static int\n+igc_alloc_rx_queue_mbufs(struct igc_rx_queue *rxq)\n+{\n+\tstruct igc_rx_entry *rxe = rxq->sw_ring;\n+\tuint64_t dma_addr;\n+\tunsigned int i;\n+\n+\t/* Initialize software ring entries. */\n+\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\tvolatile union igc_adv_rx_desc *rxd;\n+\t\tstruct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);\n+\n+\t\tif (mbuf == NULL) {\n+\t\t\tPMD_INIT_LOG(ERR, \"RX mbuf alloc failed \"\n+\t\t\t     \"queue_id=%hu\", rxq->queue_id);\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\t\tdma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));\n+\t\trxd = &rxq->rx_ring[i];\n+\t\trxd->read.hdr_addr = 0;\n+\t\trxd->read.pkt_addr = dma_addr;\n+\t\trxe[i].mbuf = mbuf;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * RSS random key supplied in section 7.1.2.9.3 of the Intel I225 datasheet.\n+ * Used as the default key.\n+ */\n+static uint8_t rss_intel_key[40] = {\n+\t0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,\n+\t0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,\n+\t0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,\n+\t0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,\n+\t0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,\n+};\n+\n+static void\n+igc_rss_disable(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw;\n+\tuint32_t mrqc;\n+\n+\thw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tmrqc = IGC_READ_REG(hw, IGC_MRQC);\n+\tmrqc &= ~IGC_MRQC_ENABLE_MASK;\n+\tIGC_WRITE_REG(hw, IGC_MRQC, mrqc);\n+}\n+\n+void\n+igc_hw_rss_hash_set(struct igc_hw *hw, struct rte_eth_rss_conf *rss_conf)\n+{\n+\tuint8_t *hash_key;\n+\tuint32_t rss_key;\n+\tuint32_t mrqc;\n+\tuint64_t rss_hf;\n+\tuint16_t i;\n+\n+\thash_key = rss_conf->rss_key;\n+\tif (hash_key != NULL) {\n+\t\t/* Fill in RSS hash key */\n+\t\tfor (i = 0; i < 10; i++) {\n+\t\t\trss_key  = hash_key[(i * 4)];\n+\t\t\trss_key |= hash_key[(i * 4) + 1] << 8;\n+\t\t\trss_key |= hash_key[(i * 4) + 2] << 16;\n+\t\t\trss_key |= hash_key[(i * 4) + 3] << 24;\n+\t\t\tIGC_WRITE_REG(hw, IGC_RSSRK(i), rss_key);\n+\t\t}\n+\t}\n+\n+\t/* Set configured hashing protocols in MRQC register */\n+\trss_hf = rss_conf->rss_hf;\n+\tmrqc = IGC_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */\n+\tif (rss_hf & ETH_RSS_IPV4)\n+\t\tmrqc |= IGC_MRQC_RSS_FIELD_IPV4;\n+\tif (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)\n+\t\tmrqc |= IGC_MRQC_RSS_FIELD_IPV4_TCP;\n+\tif (rss_hf & ETH_RSS_IPV6)\n+\t\tmrqc |= IGC_MRQC_RSS_FIELD_IPV6;\n+\tif (rss_hf & ETH_RSS_IPV6_EX)\n+\t\tmrqc |= IGC_MRQC_RSS_FIELD_IPV6_EX;\n+\tif (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)\n+\t\tmrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP;\n+\tif (rss_hf & ETH_RSS_IPV6_TCP_EX)\n+\t\tmrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;\n+\tif (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)\n+\t\tmrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;\n+\tif (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)\n+\t\tmrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;\n+\tif (rss_hf & ETH_RSS_IPV6_UDP_EX)\n+\t\tmrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP_EX;\n+\tIGC_WRITE_REG(hw, IGC_MRQC, mrqc);\n+}\n+\n+static void\n+igc_rss_configure(struct rte_eth_dev *dev)\n+{\n+\tstruct rte_eth_rss_conf rss_conf;\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tuint16_t i;\n+\n+\t/* Fill in redirection table. */\n+\tfor (i = 0; i < 128; i++) {\n+\t\tunion igc_reta {\n+\t\t\tuint32_t dword;\n+\t\t\tuint8_t  bytes[4];\n+\t\t} reta;\n+\t\tuint8_t q_idx;\n+\n+\t\tq_idx = (uint8_t)((dev->data->nb_rx_queues > 1) ?\n+\t\t\t\t   i % dev->data->nb_rx_queues : 0);\n+\t\treta.bytes[i & 3] = q_idx;\n+\t\tif ((i & 3) == 3)\n+\t\t\tIGC_WRITE_REG(hw, IGC_RETA(i >> 2), reta.dword);\n+\t}\n+\n+\t/*\n+\t * Configure the RSS key and the RSS protocols used to compute\n+\t * the RSS hash of input packets.\n+\t */\n+\trss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;\n+\tif ((rss_conf.rss_hf & IGC_RSS_OFFLOAD_ALL) == 0) {\n+\t\tigc_rss_disable(dev);\n+\t\treturn;\n+\t}\n+\n+\tif (rss_conf.rss_key == NULL)\n+\t\trss_conf.rss_key = rss_intel_key; /* Default hash key */\n+\tigc_hw_rss_hash_set(hw, &rss_conf);\n+}\n+\n+static int\n+igc_dev_mq_rx_configure(struct rte_eth_dev *dev)\n+{\n+\tif (RTE_ETH_DEV_SRIOV(dev).active) {\n+\t\tPMD_INIT_LOG(ERR, \"SRIOV unsupported!\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tswitch (dev->data->dev_conf.rxmode.mq_mode) {\n+\tcase ETH_MQ_RX_RSS:\n+\t\tigc_rss_configure(dev);\n+\t\tbreak;\n+\tcase ETH_MQ_RX_NONE:\n+\t\tigc_rss_disable(dev);\n+\t\tbreak;\n+\tdefault:\n+\t\tPMD_INIT_LOG(ERR, \"rx mode(%d) not supported!\",\n+\t\t\tdev->data->dev_conf.rxmode.mq_mode);\n+\t\treturn -EINVAL;\n+\t}\n+\treturn 0;\n+}\n+\n+int\n+igc_rx_init(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_rx_queue *rxq;\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tconst uint64_t offloads = dev->data->dev_conf.rxmode.offloads;\n+\tuint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;\n+\tuint32_t rctl;\n+\tuint32_t rxcsum;\n+\tuint16_t buf_size;\n+\tuint16_t rctl_bsize;\n+\tuint16_t i;\n+\tint ret;\n+\n+\tdev->rx_pkt_burst = eth_igc_recv_pkts;\n+\n+\t/*\n+\t * Make sure receives are disabled while setting\n+\t * up the descriptor ring.\n+\t */\n+\trctl = IGC_READ_REG(hw, IGC_RCTL);\n+\tIGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN);\n+\n+\t/* Configure support of jumbo frames, if any. */\n+\tif (offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {\n+\t\trctl |= IGC_RCTL_LPE;\n+\n+\t\t/*\n+\t\t * Set maximum packet length by default, and might be updated\n+\t\t * together with enabling/disabling dual VLAN.\n+\t\t */\n+\t\tIGC_WRITE_REG(hw, IGC_RLPML,\n+\t\t\t\tmax_rx_pkt_len + VLAN_TAG_SIZE);\n+\t} else {\n+\t\trctl &= ~IGC_RCTL_LPE;\n+\t}\n+\n+\t/* Configure and enable each RX queue. */\n+\trctl_bsize = 0;\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\tuint64_t bus_addr;\n+\t\tuint32_t rxdctl;\n+\t\tuint32_t srrctl;\n+\n+\t\trxq = dev->data->rx_queues[i];\n+\t\trxq->flags = 0;\n+\n+\t\t/* Allocate buffers for descriptor rings and set up queue */\n+\t\tret = igc_alloc_rx_queue_mbufs(rxq);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\n+\t\t/*\n+\t\t * Reset crc_len in case it was changed after queue setup by a\n+\t\t * call to configure\n+\t\t */\n+\t\trxq->crc_len = (offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?\n+\t\t\t\tRTE_ETHER_CRC_LEN : 0;\n+\n+\t\tbus_addr = rxq->rx_ring_phys_addr;\n+\t\tIGC_WRITE_REG(hw, IGC_RDLEN(rxq->reg_idx),\n+\t\t\t\trxq->nb_rx_desc *\n+\t\t\t\tsizeof(union igc_adv_rx_desc));\n+\t\tIGC_WRITE_REG(hw, IGC_RDBAH(rxq->reg_idx),\n+\t\t\t\t(uint32_t)(bus_addr >> 32));\n+\t\tIGC_WRITE_REG(hw, IGC_RDBAL(rxq->reg_idx),\n+\t\t\t\t(uint32_t)bus_addr);\n+\n+\t\t/* set descriptor configuration */\n+\t\tsrrctl = IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;\n+\n+\t\tsrrctl |= (RTE_PKTMBUF_HEADROOM / 64) <<\n+\t\t\t\tIGC_SRRCTL_BSIZEHEADER_SHIFT;\n+\t\t/*\n+\t\t * Configure RX buffer size.\n+\t\t */\n+\t\tbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -\n+\t\t\tRTE_PKTMBUF_HEADROOM);\n+\t\tif (buf_size >= 1024) {\n+\t\t\t/*\n+\t\t\t * Configure the BSIZEPACKET field of the SRRCTL\n+\t\t\t * register of the queue.\n+\t\t\t * Value is in 1 KB resolution, from 1 KB to 16 KB.\n+\t\t\t * If this field is equal to 0b, then RCTL.BSIZE\n+\t\t\t * determines the RX packet buffer size.\n+\t\t\t */\n+\n+\t\t\tsrrctl |= ((buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT) &\n+\t\t\t\t   IGC_SRRCTL_BSIZEPKT_MASK);\n+\t\t\tbuf_size = (uint16_t)((srrctl &\n+\t\t\t\t\t\tIGC_SRRCTL_BSIZEPKT_MASK) <<\n+\t\t\t\t\t       IGC_SRRCTL_BSIZEPKT_SHIFT);\n+\n+\t\t\t/* It adds dual VLAN length for supporting dual VLAN */\n+\t\t\tif (max_rx_pkt_len + 2 * VLAN_TAG_SIZE > buf_size)\n+\t\t\t\tdev->data->scattered_rx = 1;\n+\t\t} else {\n+\t\t\t/*\n+\t\t\t * Use BSIZE field of the device RCTL register.\n+\t\t\t */\n+\t\t\tif (rctl_bsize == 0 || rctl_bsize > buf_size)\n+\t\t\t\trctl_bsize = buf_size;\n+\t\t\tdev->data->scattered_rx = 1;\n+\t\t}\n+\n+\t\t/* Set if packets are dropped when no descriptors available */\n+\t\tif (rxq->drop_en)\n+\t\t\tsrrctl |= IGC_SRRCTL_DROP_EN;\n+\n+\t\tIGC_WRITE_REG(hw, IGC_SRRCTL(rxq->reg_idx), srrctl);\n+\n+\t\t/* Enable this RX queue. */\n+\t\trxdctl = IGC_RXDCTL_QUEUE_ENABLE;\n+\t\trxdctl |= ((u32)rxq->pthresh << IGC_RXDCTL_PTHRESH_SHIFT) &\n+\t\t\t\tIGC_RXDCTL_PTHRESH_MSK;\n+\t\trxdctl |= ((u32)rxq->hthresh << IGC_RXDCTL_HTHRESH_SHIFT) &\n+\t\t\t\tIGC_RXDCTL_HTHRESH_MSK;\n+\t\trxdctl |= ((u32)rxq->wthresh << IGC_RXDCTL_WTHRESH_SHIFT) &\n+\t\t\t\tIGC_RXDCTL_WTHRESH_MSK;\n+\t\tIGC_WRITE_REG(hw, IGC_RXDCTL(rxq->reg_idx), rxdctl);\n+\t}\n+\n+\tif (offloads & DEV_RX_OFFLOAD_SCATTER)\n+\t\tdev->data->scattered_rx = 1;\n+\n+\tif (dev->data->scattered_rx) {\n+\t\tPMD_INIT_LOG(DEBUG, \"forcing scatter mode\");\n+\t\tdev->rx_pkt_burst = eth_igc_recv_scattered_pkts;\n+\t}\n+\t/*\n+\t * Setup BSIZE field of RCTL register, if needed.\n+\t * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL\n+\t * register, since the code above configures the SRRCTL register of\n+\t * the RX queue in such a case.\n+\t * All configurable sizes are:\n+\t * 16384: rctl |= (IGC_RCTL_SZ_16384 | IGC_RCTL_BSEX);\n+\t *  8192: rctl |= (IGC_RCTL_SZ_8192  | IGC_RCTL_BSEX);\n+\t *  4096: rctl |= (IGC_RCTL_SZ_4096  | IGC_RCTL_BSEX);\n+\t *  2048: rctl |= IGC_RCTL_SZ_2048;\n+\t *  1024: rctl |= IGC_RCTL_SZ_1024;\n+\t *   512: rctl |= IGC_RCTL_SZ_512;\n+\t *   256: rctl |= IGC_RCTL_SZ_256;\n+\t */\n+\tif (rctl_bsize > 0) {\n+\t\tif (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */\n+\t\t\trctl |= IGC_RCTL_SZ_512;\n+\t\telse /* 256 <= buf_size < 512 - use 256 */\n+\t\t\trctl |= IGC_RCTL_SZ_256;\n+\t}\n+\n+\t/*\n+\t * Configure RSS if device configured with multiple RX queues.\n+\t */\n+\tigc_dev_mq_rx_configure(dev);\n+\n+\t/* Update the rctl since igc_dev_mq_rx_configure may change its value */\n+\trctl |= IGC_READ_REG(hw, IGC_RCTL);\n+\n+\t/*\n+\t * Setup the Checksum Register.\n+\t * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.\n+\t */\n+\trxcsum = IGC_READ_REG(hw, IGC_RXCSUM);\n+\trxcsum |= IGC_RXCSUM_PCSD;\n+\n+\t/* Enable both L3/L4 rx checksum offload */\n+\tif (offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)\n+\t\trxcsum |= IGC_RXCSUM_IPOFL;\n+\telse\n+\t\trxcsum &= ~IGC_RXCSUM_IPOFL;\n+\tif (offloads &\n+\t\t(DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))\n+\t\trxcsum |= IGC_RXCSUM_TUOFL;\n+\telse\n+\t\trxcsum &= ~IGC_RXCSUM_TUOFL;\n+\tif (offloads & DEV_RX_OFFLOAD_SCTP_CKSUM)\n+\t\trxcsum |= IGC_RXCSUM_CRCOFL;\n+\telse\n+\t\trxcsum &= ~IGC_RXCSUM_CRCOFL;\n+\n+\tIGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum);\n+\n+\t/* Setup the Receive Control Register. */\n+\tif (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {\n+\t\trctl &= ~IGC_RCTL_SECRC; /* Do not Strip Ethernet CRC. */\n+\n+\t\t/* clear STRCRC bit in all queues */\n+\t\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\t\trxq = dev->data->rx_queues[i];\n+\t\t\tuint32_t dvmolr = IGC_READ_REG(hw,\n+\t\t\t\tIGC_DVMOLR(rxq->reg_idx));\n+\t\t\tdvmolr &= ~IGC_DVMOLR_STRCRC;\n+\t\t\tIGC_WRITE_REG(hw, IGC_DVMOLR(rxq->reg_idx), dvmolr);\n+\t\t}\n+\t} else {\n+\t\trctl |= IGC_RCTL_SECRC; /* Strip Ethernet CRC. */\n+\n+\t\t/* set STRCRC bit in all queues */\n+\t\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\t\trxq = dev->data->rx_queues[i];\n+\t\t\tuint32_t dvmolr = IGC_READ_REG(hw,\n+\t\t\t\tIGC_DVMOLR(rxq->reg_idx));\n+\t\t\tdvmolr |= IGC_DVMOLR_STRCRC;\n+\t\t\tIGC_WRITE_REG(hw, IGC_DVMOLR(rxq->reg_idx), dvmolr);\n+\t\t}\n+\t}\n+\n+\trctl &= ~IGC_RCTL_MO_MSK;\n+\trctl &= ~IGC_RCTL_LBM_MSK;\n+\trctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_LBM_NO |\n+\t\t\tIGC_RCTL_DPF |\n+\t\t\t(hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);\n+\n+\trctl &= ~(IGC_RCTL_HSEL_MSK | IGC_RCTL_CFIEN | IGC_RCTL_CFI |\n+\t\t\tIGC_RCTL_PSP | IGC_RCTL_PMCF);\n+\n+\t/* Make sure VLAN Filters are off. */\n+\trctl &= ~IGC_RCTL_VFE;\n+\t/* Don't store bad packets. */\n+\trctl &= ~IGC_RCTL_SBP;\n+\n+\t/* Enable Receives. */\n+\tIGC_WRITE_REG(hw, IGC_RCTL, rctl);\n+\n+\t/*\n+\t * Setup the HW Rx Head and Tail Descriptor Pointers.\n+\t * This needs to be done after enable.\n+\t */\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\trxq = dev->data->rx_queues[i];\n+\t\tIGC_WRITE_REG(hw, IGC_RDH(rxq->reg_idx), 0);\n+\t\tIGC_WRITE_REG(hw, IGC_RDT(rxq->reg_idx),\n+\t\t\t\trxq->nb_rx_desc - 1);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void\n+igc_reset_rx_queue(struct igc_rx_queue *rxq)\n+{\n+\tstatic const union igc_adv_rx_desc zeroed_desc = { {0} };\n+\tunsigned int i;\n+\n+\t/* Zero out HW ring memory */\n+\tfor (i = 0; i < rxq->nb_rx_desc; i++)\n+\t\trxq->rx_ring[i] = zeroed_desc;\n+\n+\trxq->rx_tail = 0;\n+\trxq->pkt_first_seg = NULL;\n+\trxq->pkt_last_seg = NULL;\n+}\n+\n+void\n+eth_igc_vlan_strip_queue_set(struct rte_eth_dev *dev,\n+\t\t\tuint16_t rx_queue_id, int on)\n+{\n+\tstruct igc_hw *hw =\n+\t\tIGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct igc_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];\n+\tuint32_t reg_val;\n+\n+\tif (rx_queue_id >= IGC_QUEUE_PAIRS_NUM) {\n+\t\tPMD_DRV_LOG(ERR, \"Queue index(%u) illegal, max is %u\",\n+\t\t\trx_queue_id, IGC_QUEUE_PAIRS_NUM - 1);\n+\t\treturn;\n+\t}\n+\n+\treg_val = IGC_READ_REG(hw, IGC_DVMOLR(rx_queue_id));\n+\tif (on) {\n+\t\t/* If vlan been stripped off, the CRC is meaningless. */\n+\t\treg_val |= IGC_DVMOLR_STRVLAN | IGC_DVMOLR_STRCRC;\n+\t\trxq->offloads |= ETH_VLAN_STRIP_MASK;\n+\t} else {\n+\t\treg_val &= ~(IGC_DVMOLR_STRVLAN | IGC_DVMOLR_HIDVLAN);\n+\t\tif (dev->data->dev_conf.rxmode.offloads & ETH_VLAN_STRIP_MASK)\n+\t\t\trxq->offloads &= ~ETH_VLAN_STRIP_MASK;\n+\t}\n+\n+\tIGC_WRITE_REG(hw, IGC_DVMOLR(rx_queue_id), reg_val);\n+}\n+\n+int\n+eth_igc_rx_queue_setup(struct rte_eth_dev *dev,\n+\t\t\t uint16_t queue_idx,\n+\t\t\t uint16_t nb_desc,\n+\t\t\t unsigned int socket_id,\n+\t\t\t const struct rte_eth_rxconf *rx_conf,\n+\t\t\t struct rte_mempool *mp)\n+{\n+\tconst struct rte_memzone *rz;\n+\tstruct igc_rx_queue *rxq;\n+\tstruct igc_hw *hw;\n+\tunsigned int size;\n+\tuint64_t offloads;\n+\n+\toffloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;\n+\thw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\n+\t/*\n+\t * Validate number of receive descriptors.\n+\t * It must not exceed hardware maximum, and must be multiple\n+\t * of IGC_RX_DESCRIPTOR_MULTIPLE.\n+\t */\n+\tif (nb_desc % IGC_RX_DESCRIPTOR_MULTIPLE != 0 ||\n+\t\tnb_desc > IGC_MAX_RXD || nb_desc < IGC_MIN_RXD) {\n+\t\tPMD_INIT_LOG(ERR, \"RX descriptor must be multiple of\"\n+\t\t\t\" %u(cur: %u) and between %u and %u!\",\n+\t\t\tIGC_RX_DESCRIPTOR_MULTIPLE, nb_desc,\n+\t\t\tIGC_MIN_RXD, IGC_MAX_RXD);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Free memory prior to re-allocation if needed */\n+\tif (dev->data->rx_queues[queue_idx] != NULL) {\n+\t\tigc_rx_queue_release(dev->data->rx_queues[queue_idx]);\n+\t\tdev->data->rx_queues[queue_idx] = NULL;\n+\t}\n+\n+\t/* First allocate the RX queue data structure. */\n+\trxq = rte_zmalloc(\"ethdev RX queue\", sizeof(struct igc_rx_queue),\n+\t\t\t  RTE_CACHE_LINE_SIZE);\n+\tif (rxq == NULL)\n+\t\treturn -ENOMEM;\n+\trxq->offloads = offloads;\n+\trxq->mb_pool = mp;\n+\trxq->nb_rx_desc = nb_desc;\n+\trxq->pthresh = rx_conf->rx_thresh.pthresh;\n+\trxq->hthresh = rx_conf->rx_thresh.hthresh;\n+\trxq->wthresh = rx_conf->rx_thresh.wthresh;\n+\trxq->drop_en = rx_conf->rx_drop_en;\n+\trxq->rx_free_thresh = rx_conf->rx_free_thresh;\n+\trxq->queue_id = queue_idx;\n+\trxq->reg_idx = queue_idx;\n+\trxq->port_id = dev->data->port_id;\n+\n+\t/*\n+\t *  Allocate RX ring hardware descriptors. A memzone large enough to\n+\t *  handle the maximum ring size is allocated in order to allow for\n+\t *  resizing in later calls to the queue setup function.\n+\t */\n+\tsize = sizeof(union igc_adv_rx_desc) * IGC_MAX_RXD;\n+\trz = rte_eth_dma_zone_reserve(dev, \"rx_ring\", queue_idx, size,\n+\t\t\t\t      IGC_ALIGN, socket_id);\n+\tif (rz == NULL) {\n+\t\tigc_rx_queue_release(rxq);\n+\t\treturn -ENOMEM;\n+\t}\n+\trxq->rdt_reg_addr = IGC_PCI_REG_ADDR(hw, IGC_RDT(rxq->reg_idx));\n+\trxq->rdh_reg_addr = IGC_PCI_REG_ADDR(hw, IGC_RDH(rxq->reg_idx));\n+\trxq->rx_ring_phys_addr = rz->iova;\n+\trxq->rx_ring = (union igc_adv_rx_desc *)rz->addr;\n+\n+\t/* Allocate software ring. */\n+\trxq->sw_ring = rte_zmalloc(\"rxq->sw_ring\",\n+\t\t\t\t   sizeof(struct igc_rx_entry) * nb_desc,\n+\t\t\t\t   RTE_CACHE_LINE_SIZE);\n+\tif (rxq->sw_ring == NULL) {\n+\t\tigc_rx_queue_release(rxq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tPMD_INIT_LOG(DEBUG, \"sw_ring=%p hw_ring=%p dma_addr=0x%\" PRIx64,\n+\t\trxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);\n+\n+\tdev->data->rx_queues[queue_idx] = rxq;\n+\tigc_reset_rx_queue(rxq);\n+\n+\treturn 0;\n+}\n+\n+/* prepare packets for transmit */\n+static uint16_t\n+eth_igc_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\tuint16_t nb_pkts)\n+{\n+\tint i, ret;\n+\tstruct rte_mbuf *m;\n+\n+\tfor (i = 0; i < nb_pkts; i++) {\n+\t\tm = tx_pkts[i];\n+\n+\t\t/* Check some limitations for TSO in hardware */\n+\t\tif (m->ol_flags & PKT_TX_TCP_SEG)\n+\t\t\tif (m->tso_segsz > IGC_TSO_MAX_MSS ||\n+\t\t\t\tm->l2_len + m->l3_len + m->l4_len >\n+\t\t\t\tIGC_TSO_MAX_HDRLEN) {\n+\t\t\t\trte_errno = EINVAL;\n+\t\t\t\treturn i;\n+\t\t\t}\n+\n+\t\tif (m->ol_flags & IGC_TX_OFFLOAD_NOTSUP_MASK) {\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\treturn i;\n+\t\t}\n+\n+#ifdef RTE_LIBRTE_ETHDEV_DEBUG\n+\t\tret = rte_validate_tx_offload(m);\n+\t\tif (ret != 0) {\n+\t\t\trte_errno = -ret;\n+\t\t\treturn i;\n+\t\t}\n+#endif\n+\t\tret = rte_net_intel_cksum_prepare(m);\n+\t\tif (ret != 0) {\n+\t\t\trte_errno = -ret;\n+\t\t\treturn i;\n+\t\t}\n+\t}\n+\n+\treturn i;\n+}\n+\n+/*\n+ *There're some limitations in hardware for TCP segmentation offload. We\n+ *should check whether the parameters are valid.\n+ */\n+static inline uint64_t\n+check_tso_para(uint64_t ol_req, union igc_tx_offload ol_para)\n+{\n+\tif (!(ol_req & PKT_TX_TCP_SEG))\n+\t\treturn ol_req;\n+\tif (ol_para.tso_segsz > IGC_TSO_MAX_MSS || ol_para.l2_len +\n+\t\tol_para.l3_len + ol_para.l4_len > IGC_TSO_MAX_HDRLEN) {\n+\t\tol_req &= ~PKT_TX_TCP_SEG;\n+\t\tol_req |= PKT_TX_TCP_CKSUM;\n+\t}\n+\treturn ol_req;\n+}\n+\n+/*\n+ * Check which hardware context can be used. Use the existing match\n+ * or create a new context descriptor.\n+ */\n+static inline uint32_t\n+what_advctx_update(struct igc_tx_queue *txq, uint64_t flags,\n+\t\tunion igc_tx_offload tx_offload)\n+{\n+\tuint32_t curr = txq->ctx_curr;\n+\n+\t/* If match with the current context */\n+\tif (likely(txq->ctx_cache[curr].flags == flags &&\n+\t\ttxq->ctx_cache[curr].tx_offload.data ==\n+\t\t(txq->ctx_cache[curr].tx_offload_mask.data &\n+\t\ttx_offload.data))) {\n+\t\treturn curr;\n+\t}\n+\n+\t/* Total two context, if match with the second context */\n+\tcurr ^= 1;\n+\tif (likely(txq->ctx_cache[curr].flags == flags &&\n+\t\ttxq->ctx_cache[curr].tx_offload.data ==\n+\t\t(txq->ctx_cache[curr].tx_offload_mask.data &\n+\t\ttx_offload.data))) {\n+\t\ttxq->ctx_curr = curr;\n+\t\treturn curr;\n+\t}\n+\n+\t/* Mismatch, create new one */\n+\treturn IGC_CTX_NUM;\n+}\n+\n+/*\n+ * This is a separate function, looking for optimization opportunity here\n+ * Rework required to go with the pre-defined values.\n+ */\n+static inline void\n+igc_set_xmit_ctx(struct igc_tx_queue *txq,\n+\t\tvolatile struct igc_adv_tx_context_desc *ctx_txd,\n+\t\tuint64_t ol_flags, union igc_tx_offload tx_offload)\n+{\n+\tuint32_t type_tucmd_mlhl;\n+\tuint32_t mss_l4len_idx;\n+\tuint32_t ctx_curr;\n+\tuint32_t vlan_macip_lens;\n+\tunion igc_tx_offload tx_offload_mask;\n+\n+\t/* Use the previous context */\n+\ttxq->ctx_curr ^= 1;\n+\tctx_curr = txq->ctx_curr;\n+\n+\ttx_offload_mask.data = 0;\n+\ttype_tucmd_mlhl = 0;\n+\n+\t/* Specify which HW CTX to upload. */\n+\tmss_l4len_idx = (ctx_curr << IGC_ADVTXD_IDX_SHIFT);\n+\n+\tif (ol_flags & PKT_TX_VLAN_PKT)\n+\t\ttx_offload_mask.vlan_tci = 0xffff;\n+\n+\t/* check if TCP segmentation required for this packet */\n+\tif (ol_flags & PKT_TX_TCP_SEG) {\n+\t\t/* implies IP cksum in IPv4 */\n+\t\tif (ol_flags & PKT_TX_IP_CKSUM)\n+\t\t\ttype_tucmd_mlhl = IGC_ADVTXD_TUCMD_IPV4 |\n+\t\t\t\tIGC_ADVTXD_TUCMD_L4T_TCP |\n+\t\t\t\tIGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;\n+\t\telse\n+\t\t\ttype_tucmd_mlhl = IGC_ADVTXD_TUCMD_IPV6 |\n+\t\t\t\tIGC_ADVTXD_TUCMD_L4T_TCP |\n+\t\t\t\tIGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;\n+\n+\t\ttx_offload_mask.data |= TX_TSO_CMP_MASK;\n+\t\tmss_l4len_idx |= tx_offload.tso_segsz << IGC_ADVTXD_MSS_SHIFT;\n+\t\tmss_l4len_idx |= tx_offload.l4_len << IGC_ADVTXD_L4LEN_SHIFT;\n+\t} else { /* no TSO, check if hardware checksum is needed */\n+\t\tif (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))\n+\t\t\ttx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;\n+\n+\t\tif (ol_flags & PKT_TX_IP_CKSUM)\n+\t\t\ttype_tucmd_mlhl = IGC_ADVTXD_TUCMD_IPV4;\n+\n+\t\tswitch (ol_flags & PKT_TX_L4_MASK) {\n+\t\tcase PKT_TX_TCP_CKSUM:\n+\t\t\ttype_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP |\n+\t\t\t\tIGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;\n+\t\t\tmss_l4len_idx |= sizeof(struct rte_tcp_hdr)\n+\t\t\t\t<< IGC_ADVTXD_L4LEN_SHIFT;\n+\t\t\tbreak;\n+\t\tcase PKT_TX_UDP_CKSUM:\n+\t\t\ttype_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_UDP |\n+\t\t\t\tIGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;\n+\t\t\tmss_l4len_idx |= sizeof(struct rte_udp_hdr)\n+\t\t\t\t<< IGC_ADVTXD_L4LEN_SHIFT;\n+\t\t\tbreak;\n+\t\tcase PKT_TX_SCTP_CKSUM:\n+\t\t\ttype_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_SCTP |\n+\t\t\t\tIGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;\n+\t\t\tmss_l4len_idx |= sizeof(struct rte_sctp_hdr)\n+\t\t\t\t<< IGC_ADVTXD_L4LEN_SHIFT;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\ttype_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_RSV |\n+\t\t\t\tIGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\ttxq->ctx_cache[ctx_curr].flags = ol_flags;\n+\ttxq->ctx_cache[ctx_curr].tx_offload.data =\n+\t\ttx_offload_mask.data & tx_offload.data;\n+\ttxq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask;\n+\n+\tctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);\n+\tvlan_macip_lens = (uint32_t)tx_offload.data;\n+\tctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);\n+\tctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);\n+\tctx_txd->u.launch_time = 0;\n+}\n+\n+static inline uint32_t\n+tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)\n+{\n+\tuint32_t cmdtype;\n+\tstatic uint32_t vlan_cmd[2] = {0, IGC_ADVTXD_DCMD_VLE};\n+\tstatic uint32_t tso_cmd[2] = {0, IGC_ADVTXD_DCMD_TSE};\n+\tcmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];\n+\tcmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];\n+\treturn cmdtype;\n+}\n+\n+static inline uint32_t\n+tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)\n+{\n+\tstatic const uint32_t l4_olinfo[2] = {0, IGC_ADVTXD_POPTS_TXSM};\n+\tstatic const uint32_t l3_olinfo[2] = {0, IGC_ADVTXD_POPTS_IXSM};\n+\tuint32_t tmp;\n+\n+\ttmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];\n+\ttmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];\n+\ttmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];\n+\treturn tmp;\n+}\n+\n+static uint16_t\n+eth_igc_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n+{\n+\tstruct igc_tx_queue * const txq = tx_queue;\n+\tstruct igc_tx_entry * const sw_ring = txq->sw_ring;\n+\tstruct igc_tx_entry *txe, *txn;\n+\tvolatile union igc_adv_tx_desc * const txr = txq->tx_ring;\n+\tvolatile union igc_adv_tx_desc *txd;\n+\tstruct rte_mbuf *tx_pkt;\n+\tstruct rte_mbuf *m_seg;\n+\tuint64_t buf_dma_addr;\n+\tuint32_t olinfo_status;\n+\tuint32_t cmd_type_len;\n+\tuint32_t pkt_len;\n+\tuint16_t slen;\n+\tuint64_t ol_flags;\n+\tuint16_t tx_end;\n+\tuint16_t tx_id;\n+\tuint16_t tx_last;\n+\tuint16_t nb_tx;\n+\tuint64_t tx_ol_req;\n+\tuint32_t new_ctx = 0;\n+\tunion igc_tx_offload tx_offload = {0};\n+\n+\ttx_id = txq->tx_tail;\n+\ttxe = &sw_ring[tx_id];\n+\n+\tfor (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {\n+\t\ttx_pkt = *tx_pkts++;\n+\t\tpkt_len = tx_pkt->pkt_len;\n+\n+\t\tRTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);\n+\n+\t\t/*\n+\t\t * The number of descriptors that must be allocated for a\n+\t\t * packet is the number of segments of that packet, plus 1\n+\t\t * Context Descriptor for the VLAN Tag Identifier, if any.\n+\t\t * Determine the last TX descriptor to allocate in the TX ring\n+\t\t * for the packet, starting from the current position (tx_id)\n+\t\t * in the ring.\n+\t\t */\n+\t\ttx_last = (uint16_t)(tx_id + tx_pkt->nb_segs - 1);\n+\n+\t\tol_flags = tx_pkt->ol_flags;\n+\t\ttx_ol_req = ol_flags & IGC_TX_OFFLOAD_MASK;\n+\n+\t\t/* If a Context Descriptor need be built . */\n+\t\tif (tx_ol_req) {\n+\t\t\ttx_offload.l2_len = tx_pkt->l2_len;\n+\t\t\ttx_offload.l3_len = tx_pkt->l3_len;\n+\t\t\ttx_offload.l4_len = tx_pkt->l4_len;\n+\t\t\ttx_offload.vlan_tci = tx_pkt->vlan_tci;\n+\t\t\ttx_offload.tso_segsz = tx_pkt->tso_segsz;\n+\t\t\ttx_ol_req = check_tso_para(tx_ol_req, tx_offload);\n+\n+\t\t\tnew_ctx = what_advctx_update(txq, tx_ol_req,\n+\t\t\t\t\ttx_offload);\n+\t\t\t/* Only allocate context descriptor if required*/\n+\t\t\tnew_ctx = (new_ctx >= IGC_CTX_NUM);\n+\t\t\ttx_last = (uint16_t)(tx_last + new_ctx);\n+\t\t}\n+\t\tif (tx_last >= txq->nb_tx_desc)\n+\t\t\ttx_last = (uint16_t)(tx_last - txq->nb_tx_desc);\n+\n+\t\tPMD_TX_LOG(DEBUG, \"port_id=%u queue_id=%u pktlen=%u\"\n+\t\t\t\" tx_first=%u tx_last=%u\", txq->port_id, txq->queue_id,\n+\t\t\tpkt_len, tx_id, tx_last);\n+\n+\t\t/*\n+\t\t * Check if there are enough free descriptors in the TX ring\n+\t\t * to transmit the next packet.\n+\t\t * This operation is based on the two following rules:\n+\t\t *\n+\t\t *   1- Only check that the last needed TX descriptor can be\n+\t\t *      allocated (by construction, if that descriptor is free,\n+\t\t *      all intermediate ones are also free).\n+\t\t *\n+\t\t *      For this purpose, the index of the last TX descriptor\n+\t\t *      used for a packet (the \"last descriptor\" of a packet)\n+\t\t *      is recorded in the TX entries (the last one included)\n+\t\t *      that are associated with all TX descriptors allocated\n+\t\t *      for that packet.\n+\t\t *\n+\t\t *   2- Avoid to allocate the last free TX descriptor of the\n+\t\t *      ring, in order to never set the TDT register with the\n+\t\t *      same value stored in parallel by the NIC in the TDH\n+\t\t *      register, which makes the TX engine of the NIC enter\n+\t\t *      in a deadlock situation.\n+\t\t *\n+\t\t *      By extension, avoid to allocate a free descriptor that\n+\t\t *      belongs to the last set of free descriptors allocated\n+\t\t *      to the same packet previously transmitted.\n+\t\t */\n+\n+\t\t/*\n+\t\t * The \"last descriptor\" of the previously sent packet, if any,\n+\t\t * which used the last descriptor to allocate.\n+\t\t */\n+\t\ttx_end = sw_ring[tx_last].last_id;\n+\n+\t\t/*\n+\t\t * The next descriptor following that \"last descriptor\" in the\n+\t\t * ring.\n+\t\t */\n+\t\ttx_end = sw_ring[tx_end].next_id;\n+\n+\t\t/*\n+\t\t * The \"last descriptor\" associated with that next descriptor.\n+\t\t */\n+\t\ttx_end = sw_ring[tx_end].last_id;\n+\n+\t\t/*\n+\t\t * Check that this descriptor is free.\n+\t\t */\n+\t\tif (!(txr[tx_end].wb.status & IGC_TXD_STAT_DD)) {\n+\t\t\tif (nb_tx == 0)\n+\t\t\t\treturn 0;\n+\t\t\tgoto end_of_tx;\n+\t\t}\n+\n+\t\t/*\n+\t\t * Set common flags of all TX Data Descriptors.\n+\t\t *\n+\t\t * The following bits must be set in all Data Descriptors:\n+\t\t *   - IGC_ADVTXD_DTYP_DATA\n+\t\t *   - IGC_ADVTXD_DCMD_DEXT\n+\t\t *\n+\t\t * The following bits must be set in the first Data Descriptor\n+\t\t * and are ignored in the other ones:\n+\t\t *   - IGC_ADVTXD_DCMD_IFCS\n+\t\t *   - IGC_ADVTXD_MAC_1588\n+\t\t *   - IGC_ADVTXD_DCMD_VLE\n+\t\t *\n+\t\t * The following bits must only be set in the last Data\n+\t\t * Descriptor:\n+\t\t *   - IGC_TXD_CMD_EOP\n+\t\t *\n+\t\t * The following bits can be set in any Data Descriptor, but\n+\t\t * are only set in the last Data Descriptor:\n+\t\t *   - IGC_TXD_CMD_RS\n+\t\t */\n+\t\tcmd_type_len = txq->txd_type |\n+\t\t\tIGC_ADVTXD_DCMD_IFCS | IGC_ADVTXD_DCMD_DEXT;\n+\t\tif (tx_ol_req & PKT_TX_TCP_SEG)\n+\t\t\tpkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len +\n+\t\t\t\t\ttx_pkt->l4_len);\n+\t\tolinfo_status = (pkt_len << IGC_ADVTXD_PAYLEN_SHIFT);\n+\n+\t\t/*\n+\t\t * Timer 0 should be used to for packet timestamping,\n+\t\t * sample the packet timestamp to reg 0\n+\t\t */\n+\t\tif (ol_flags & PKT_TX_IEEE1588_TMST)\n+\t\t\tcmd_type_len |= IGC_ADVTXD_MAC_TSTAMP;\n+\n+\t\tif (tx_ol_req) {\n+\t\t\t/* Setup TX Advanced context descriptor if required */\n+\t\t\tif (new_ctx) {\n+\t\t\t\tvolatile struct igc_adv_tx_context_desc *\n+\t\t\t\t\tctx_txd = (volatile struct\n+\t\t\t\t\tigc_adv_tx_context_desc *)&txr[tx_id];\n+\n+\t\t\t\ttxn = &sw_ring[txe->next_id];\n+\t\t\t\tRTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);\n+\n+\t\t\t\tif (txe->mbuf != NULL) {\n+\t\t\t\t\trte_pktmbuf_free_seg(txe->mbuf);\n+\t\t\t\t\ttxe->mbuf = NULL;\n+\t\t\t\t}\n+\n+\t\t\t\tigc_set_xmit_ctx(txq, ctx_txd, tx_ol_req,\n+\t\t\t\t\t\ttx_offload);\n+\n+\t\t\t\ttxe->last_id = tx_last;\n+\t\t\t\ttx_id = txe->next_id;\n+\t\t\t\ttxe = txn;\n+\t\t\t}\n+\n+\t\t\t/* Setup the TX Advanced Data Descriptor */\n+\t\t\tcmd_type_len |=\n+\t\t\t\ttx_desc_vlan_flags_to_cmdtype(tx_ol_req);\n+\t\t\tolinfo_status |=\n+\t\t\t\ttx_desc_cksum_flags_to_olinfo(tx_ol_req);\n+\t\t\tolinfo_status |= (txq->ctx_curr <<\n+\t\t\t\t\tIGC_ADVTXD_IDX_SHIFT);\n+\t\t}\n+\n+\t\tm_seg = tx_pkt;\n+\t\tdo {\n+\t\t\ttxn = &sw_ring[txe->next_id];\n+\t\t\tRTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);\n+\n+\t\t\ttxd = &txr[tx_id];\n+\n+\t\t\tif (txe->mbuf != NULL)\n+\t\t\t\trte_pktmbuf_free_seg(txe->mbuf);\n+\t\t\ttxe->mbuf = m_seg;\n+\n+\t\t\t/* Set up transmit descriptor */\n+\t\t\tslen = (uint16_t)m_seg->data_len;\n+\t\t\tbuf_dma_addr = rte_mbuf_data_iova(m_seg);\n+\t\t\ttxd->read.buffer_addr =\n+\t\t\t\trte_cpu_to_le_64(buf_dma_addr);\n+\t\t\ttxd->read.cmd_type_len =\n+\t\t\t\trte_cpu_to_le_32(cmd_type_len | slen);\n+\t\t\ttxd->read.olinfo_status =\n+\t\t\t\trte_cpu_to_le_32(olinfo_status);\n+\t\t\ttxe->last_id = tx_last;\n+\t\t\ttx_id = txe->next_id;\n+\t\t\ttxe = txn;\n+\t\t\tm_seg = m_seg->next;\n+\t\t} while (m_seg != NULL);\n+\n+\t\t/*\n+\t\t * The last packet data descriptor needs End Of Packet (EOP)\n+\t\t * and Report Status (RS).\n+\t\t */\n+\t\ttxd->read.cmd_type_len |=\n+\t\t\trte_cpu_to_le_32(IGC_TXD_CMD_EOP | IGC_TXD_CMD_RS);\n+\t}\n+end_of_tx:\n+\trte_wmb();\n+\n+\t/*\n+\t * Set the Transmit Descriptor Tail (TDT).\n+\t */\n+\tIGC_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);\n+\tPMD_TX_LOG(DEBUG, \"port_id=%u queue_id=%u tx_tail=%u nb_tx=%u\",\n+\t\ttxq->port_id, txq->queue_id, tx_id, nb_tx);\n+\ttxq->tx_tail = tx_id;\n+\n+\treturn nb_tx;\n+}\n+\n+int eth_igc_tx_descriptor_status(void *tx_queue, uint16_t offset)\n+{\n+\tstruct igc_tx_queue *txq = tx_queue;\n+\tvolatile uint32_t *status;\n+\tuint32_t desc;\n+\n+\tif (unlikely(offset >= txq->nb_tx_desc))\n+\t\treturn -EINVAL;\n+\n+\tdesc = txq->tx_tail + offset;\n+\tif (desc >= txq->nb_tx_desc)\n+\t\tdesc -= txq->nb_tx_desc;\n+\n+\tstatus = &txq->tx_ring[desc].wb.status;\n+\tif (*status & rte_cpu_to_le_32(IGC_TXD_STAT_DD))\n+\t\treturn RTE_ETH_TX_DESC_DONE;\n+\n+\treturn RTE_ETH_TX_DESC_FULL;\n+}\n+\n+static void\n+igc_tx_queue_release_mbufs(struct igc_tx_queue *txq)\n+{\n+\tunsigned int i;\n+\n+\tif (txq->sw_ring != NULL) {\n+\t\tfor (i = 0; i < txq->nb_tx_desc; i++) {\n+\t\t\tif (txq->sw_ring[i].mbuf != NULL) {\n+\t\t\t\trte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);\n+\t\t\t\ttxq->sw_ring[i].mbuf = NULL;\n+\t\t\t}\n+\t\t}\n+\t}\n+}\n+\n+static void\n+igc_tx_queue_release(struct igc_tx_queue *txq)\n+{\n+\tigc_tx_queue_release_mbufs(txq);\n+\trte_free(txq->sw_ring);\n+\trte_free(txq);\n+}\n+\n+void eth_igc_tx_queue_release(void *txq)\n+{\n+\tif (txq)\n+\t\tigc_tx_queue_release(txq);\n+}\n+\n+static void\n+igc_reset_tx_queue_stat(struct igc_tx_queue *txq)\n+{\n+\ttxq->tx_head = 0;\n+\ttxq->tx_tail = 0;\n+\ttxq->ctx_curr = 0;\n+\tmemset((void *)&txq->ctx_cache, 0,\n+\t\tIGC_CTX_NUM * sizeof(struct igc_advctx_info));\n+}\n+\n+static void\n+igc_reset_tx_queue(struct igc_tx_queue *txq)\n+{\n+\tstruct igc_tx_entry *txe = txq->sw_ring;\n+\tuint16_t i, prev;\n+\n+\t/* Initialize ring entries */\n+\tprev = (uint16_t)(txq->nb_tx_desc - 1);\n+\tfor (i = 0; i < txq->nb_tx_desc; i++) {\n+\t\tvolatile union igc_adv_tx_desc *txd = &txq->tx_ring[i];\n+\n+\t\ttxd->wb.status = IGC_TXD_STAT_DD;\n+\t\ttxe[i].mbuf = NULL;\n+\t\ttxe[i].last_id = i;\n+\t\ttxe[prev].next_id = i;\n+\t\tprev = i;\n+\t}\n+\n+\ttxq->txd_type = IGC_ADVTXD_DTYP_DATA;\n+\tigc_reset_tx_queue_stat(txq);\n+}\n+\n+/*\n+ * clear all rx/tx queue\n+ */\n+void\n+igc_dev_clear_queues(struct rte_eth_dev *dev)\n+{\n+\tuint16_t i;\n+\tstruct igc_tx_queue *txq;\n+\tstruct igc_rx_queue *rxq;\n+\n+\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n+\t\ttxq = dev->data->tx_queues[i];\n+\t\tif (txq != NULL) {\n+\t\t\tigc_tx_queue_release_mbufs(txq);\n+\t\t\tigc_reset_tx_queue(txq);\n+\t\t}\n+\t}\n+\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\trxq = dev->data->rx_queues[i];\n+\t\tif (rxq != NULL) {\n+\t\t\tigc_rx_queue_release_mbufs(rxq);\n+\t\t\tigc_reset_rx_queue(rxq);\n+\t\t}\n+\t}\n+}\n+\n+int eth_igc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\tuint16_t nb_desc, unsigned int socket_id,\n+\t\tconst struct rte_eth_txconf *tx_conf)\n+{\n+\tconst struct rte_memzone *tz;\n+\tstruct igc_tx_queue *txq;\n+\tstruct igc_hw *hw;\n+\tuint32_t size;\n+\n+\tif (nb_desc % IGC_TX_DESCRIPTOR_MULTIPLE != 0 ||\n+\t\tnb_desc > IGC_MAX_TXD || nb_desc < IGC_MIN_TXD) {\n+\t\tPMD_INIT_LOG(ERR, \"TX-descriptor must be a multiple of \"\n+\t\t\t\"%u and between %u and %u!, cur: %u\",\n+\t\t\tIGC_TX_DESCRIPTOR_MULTIPLE,\n+\t\t\tIGC_MAX_TXD, IGC_MIN_TXD, nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\thw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\n+\t/*\n+\t * The tx_free_thresh and tx_rs_thresh values are not used in the 2.5G\n+\t * driver.\n+\t */\n+\tif (tx_conf->tx_free_thresh != 0)\n+\t\tPMD_INIT_LOG(INFO, \"The tx_free_thresh parameter is not \"\n+\t\t\t\"used for the 2.5G driver.\");\n+\tif (tx_conf->tx_rs_thresh != 0)\n+\t\tPMD_INIT_LOG(INFO, \"The tx_rs_thresh parameter is not \"\n+\t\t\t\"used for the 2.5G driver.\");\n+\tif (tx_conf->tx_thresh.wthresh == 0)\n+\t\tPMD_INIT_LOG(INFO, \"To improve 2.5G driver performance, \"\n+\t\t\t\"consider setting the TX WTHRESH value to 4, 8, or 16.\");\n+\n+\t/* Free memory prior to re-allocation if needed */\n+\tif (dev->data->tx_queues[queue_idx] != NULL) {\n+\t\tigc_tx_queue_release(dev->data->tx_queues[queue_idx]);\n+\t\tdev->data->tx_queues[queue_idx] = NULL;\n+\t}\n+\n+\t/* First allocate the tx queue data structure */\n+\ttxq = rte_zmalloc(\"ethdev TX queue\", sizeof(struct igc_tx_queue),\n+\t\t\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\tif (txq == NULL)\n+\t\treturn -ENOMEM;\n+\n+\t/*\n+\t * Allocate TX ring hardware descriptors. A memzone large enough to\n+\t * handle the maximum ring size is allocated in order to allow for\n+\t * resizing in later calls to the queue setup function.\n+\t */\n+\tsize = sizeof(union igc_adv_tx_desc) * IGC_MAX_TXD;\n+\ttz = rte_eth_dma_zone_reserve(dev, \"tx_ring\", queue_idx, size,\n+\t\t\t\t      IGC_ALIGN, socket_id);\n+\tif (tz == NULL) {\n+\t\tigc_tx_queue_release(txq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\ttxq->nb_tx_desc = nb_desc;\n+\ttxq->pthresh = tx_conf->tx_thresh.pthresh;\n+\ttxq->hthresh = tx_conf->tx_thresh.hthresh;\n+\ttxq->wthresh = tx_conf->tx_thresh.wthresh;\n+\n+\ttxq->queue_id = queue_idx;\n+\ttxq->reg_idx = queue_idx;\n+\ttxq->port_id = dev->data->port_id;\n+\n+\ttxq->tdt_reg_addr = IGC_PCI_REG_ADDR(hw, IGC_TDT(txq->reg_idx));\n+\ttxq->tx_ring_phys_addr = tz->iova;\n+\n+\ttxq->tx_ring = (union igc_adv_tx_desc *)tz->addr;\n+\t/* Allocate software ring */\n+\ttxq->sw_ring = rte_zmalloc(\"txq->sw_ring\",\n+\t\t\t\t   sizeof(struct igc_tx_entry) * nb_desc,\n+\t\t\t\t   RTE_CACHE_LINE_SIZE);\n+\tif (txq->sw_ring == NULL) {\n+\t\tigc_tx_queue_release(txq);\n+\t\treturn -ENOMEM;\n+\t}\n+\tPMD_INIT_LOG(DEBUG, \"sw_ring=%p hw_ring=%p dma_addr=0x%\" PRIx64,\n+\t\ttxq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);\n+\n+\tigc_reset_tx_queue(txq);\n+\tdev->tx_pkt_burst = eth_igc_xmit_pkts;\n+\tdev->tx_pkt_prepare = &eth_igc_prep_pkts;\n+\tdev->data->tx_queues[queue_idx] = txq;\n+\ttxq->offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;\n+\n+\treturn 0;\n+}\n+\n+int\n+eth_igc_tx_done_cleanup(void *txqueue, uint32_t free_cnt)\n+{\n+\tstruct igc_tx_queue *txq = txqueue;\n+\tstruct igc_tx_entry *sw_ring;\n+\tvolatile union igc_adv_tx_desc *txr;\n+\tuint16_t tx_first; /* First segment analyzed. */\n+\tuint16_t tx_id;    /* Current segment being processed. */\n+\tuint16_t tx_last;  /* Last segment in the current packet. */\n+\tuint16_t tx_next;  /* First segment of the next packet. */\n+\tuint32_t count;\n+\n+\tif (txq == NULL)\n+\t\treturn -ENODEV;\n+\n+\tcount = 0;\n+\tsw_ring = txq->sw_ring;\n+\ttxr = txq->tx_ring;\n+\n+\t/*\n+\t * tx_tail is the last sent packet on the sw_ring. Goto the end\n+\t * of that packet (the last segment in the packet chain) and\n+\t * then the next segment will be the start of the oldest segment\n+\t * in the sw_ring. This is the first packet that will be\n+\t * attempted to be freed.\n+\t */\n+\n+\t/* Get last segment in most recently added packet. */\n+\ttx_first = sw_ring[txq->tx_tail].last_id;\n+\n+\t/* Get the next segment, which is the oldest segment in ring. */\n+\ttx_first = sw_ring[tx_first].next_id;\n+\n+\t/* Set the current index to the first. */\n+\ttx_id = tx_first;\n+\n+\t/*\n+\t * Loop through each packet. For each packet, verify that an\n+\t * mbuf exists and that the last segment is free. If so, free\n+\t * it and move on.\n+\t */\n+\twhile (1) {\n+\t\ttx_last = sw_ring[tx_id].last_id;\n+\n+\t\tif (sw_ring[tx_last].mbuf) {\n+\t\t\tif (!(txr[tx_last].wb.status &\n+\t\t\t\t\trte_cpu_to_le_32(IGC_TXD_STAT_DD)))\n+\t\t\t\tbreak;\n+\n+\t\t\t/* Get the start of the next packet. */\n+\t\t\ttx_next = sw_ring[tx_last].next_id;\n+\n+\t\t\t/*\n+\t\t\t * Loop through all segments in a\n+\t\t\t * packet.\n+\t\t\t */\n+\t\t\tdo {\n+\t\t\t\trte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);\n+\t\t\t\tsw_ring[tx_id].mbuf = NULL;\n+\t\t\t\tsw_ring[tx_id].last_id = tx_id;\n+\n+\t\t\t\t/* Move to next segemnt. */\n+\t\t\t\ttx_id = sw_ring[tx_id].next_id;\n+\t\t\t} while (tx_id != tx_next);\n+\n+\t\t\t/*\n+\t\t\t * Increment the number of packets\n+\t\t\t * freed.\n+\t\t\t */\n+\t\t\tcount++;\n+\t\t\tif (unlikely(count == free_cnt))\n+\t\t\t\tbreak;\n+\t\t} else {\n+\t\t\t/*\n+\t\t\t * There are multiple reasons to be here:\n+\t\t\t * 1) All the packets on the ring have been\n+\t\t\t *    freed - tx_id is equal to tx_first\n+\t\t\t *    and some packets have been freed.\n+\t\t\t *    - Done, exit\n+\t\t\t * 2) Interfaces has not sent a rings worth of\n+\t\t\t *    packets yet, so the segment after tail is\n+\t\t\t *    still empty. Or a previous call to this\n+\t\t\t *    function freed some of the segments but\n+\t\t\t *    not all so there is a hole in the list.\n+\t\t\t *    Hopefully this is a rare case.\n+\t\t\t *    - Walk the list and find the next mbuf. If\n+\t\t\t *      there isn't one, then done.\n+\t\t\t */\n+\t\t\tif (likely(tx_id == tx_first && count != 0))\n+\t\t\t\tbreak;\n+\n+\t\t\t/*\n+\t\t\t * Walk the list and find the next mbuf, if any.\n+\t\t\t */\n+\t\t\tdo {\n+\t\t\t\t/* Move to next segemnt. */\n+\t\t\t\ttx_id = sw_ring[tx_id].next_id;\n+\n+\t\t\t\tif (sw_ring[tx_id].mbuf)\n+\t\t\t\t\tbreak;\n+\n+\t\t\t} while (tx_id != tx_first);\n+\n+\t\t\t/*\n+\t\t\t * Determine why previous loop bailed. If there\n+\t\t\t * is not an mbuf, done.\n+\t\t\t */\n+\t\t\tif (sw_ring[tx_id].mbuf == NULL)\n+\t\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\treturn count;\n+}\n+\n+void\n+igc_tx_init(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tuint32_t tctl;\n+\tuint32_t txdctl;\n+\tuint16_t i;\n+\n+\t/* Setup the Base and Length of the Tx Descriptor Rings. */\n+\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n+\t\tstruct igc_tx_queue *txq = dev->data->tx_queues[i];\n+\t\tuint64_t bus_addr = txq->tx_ring_phys_addr;\n+\n+\t\tIGC_WRITE_REG(hw, IGC_TDLEN(txq->reg_idx),\n+\t\t\t\ttxq->nb_tx_desc *\n+\t\t\t\tsizeof(union igc_adv_tx_desc));\n+\t\tIGC_WRITE_REG(hw, IGC_TDBAH(txq->reg_idx),\n+\t\t\t\t(uint32_t)(bus_addr >> 32));\n+\t\tIGC_WRITE_REG(hw, IGC_TDBAL(txq->reg_idx),\n+\t\t\t\t(uint32_t)bus_addr);\n+\n+\t\t/* Setup the HW Tx Head and Tail descriptor pointers. */\n+\t\tIGC_WRITE_REG(hw, IGC_TDT(txq->reg_idx), 0);\n+\t\tIGC_WRITE_REG(hw, IGC_TDH(txq->reg_idx), 0);\n+\n+\t\t/* Setup Transmit threshold registers. */\n+\t\ttxdctl = ((u32)txq->pthresh << IGC_TXDCTL_PTHRESH_SHIFT) &\n+\t\t\t\tIGC_TXDCTL_PTHRESH_MSK;\n+\t\ttxdctl |= ((u32)txq->hthresh << IGC_TXDCTL_HTHRESH_SHIFT) &\n+\t\t\t\tIGC_TXDCTL_HTHRESH_MSK;\n+\t\ttxdctl |= ((u32)txq->wthresh << IGC_TXDCTL_WTHRESH_SHIFT) &\n+\t\t\t\tIGC_TXDCTL_WTHRESH_MSK;\n+\t\ttxdctl |= IGC_TXDCTL_QUEUE_ENABLE;\n+\t\tIGC_WRITE_REG(hw, IGC_TXDCTL(txq->reg_idx), txdctl);\n+\t}\n+\n+\tigc_config_collision_dist(hw);\n+\n+\t/* Program the Transmit Control Register. */\n+\ttctl = IGC_READ_REG(hw, IGC_TCTL);\n+\ttctl &= ~IGC_TCTL_CT;\n+\ttctl |= (IGC_TCTL_PSP | IGC_TCTL_RTLC | IGC_TCTL_EN |\n+\t\t (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT));\n+\n+\t/* This write will effectively turn on the transmit unit. */\n+\tIGC_WRITE_REG(hw, IGC_TCTL, tctl);\n+}\n+\n+void\n+eth_igc_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\tstruct rte_eth_rxq_info *qinfo)\n+{\n+\tstruct igc_rx_queue *rxq;\n+\n+\trxq = dev->data->rx_queues[queue_id];\n+\n+\tqinfo->mp = rxq->mb_pool;\n+\tqinfo->scattered_rx = dev->data->scattered_rx;\n+\tqinfo->nb_desc = rxq->nb_rx_desc;\n+\n+\tqinfo->conf.rx_free_thresh = rxq->rx_free_thresh;\n+\tqinfo->conf.rx_drop_en = rxq->drop_en;\n+\tqinfo->conf.offloads = rxq->offloads;\n+}\n+\n+void\n+eth_igc_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\tstruct rte_eth_txq_info *qinfo)\n+{\n+\tstruct igc_tx_queue *txq;\n+\n+\ttxq = dev->data->tx_queues[queue_id];\n+\n+\tqinfo->nb_desc = txq->nb_tx_desc;\n+\n+\tqinfo->conf.tx_thresh.pthresh = txq->pthresh;\n+\tqinfo->conf.tx_thresh.hthresh = txq->hthresh;\n+\tqinfo->conf.tx_thresh.wthresh = txq->wthresh;\n+\tqinfo->conf.offloads = txq->offloads;\n+}\n+\n+int\n+eth_igc_timesync_read_rx_timestamp(struct rte_eth_dev *dev,\n+\t\t\t       struct timespec *timestamp,\n+\t\t\t       uint32_t queue_idx)\n+{\n+\tstruct igc_rx_queue *rxq;\n+\n+\tif (queue_idx >= IGC_QUEUE_PAIRS_NUM) {\n+\t\tPMD_DRV_LOG(ERR, \"Error queue(%u), expect it smaller than %u\",\n+\t\t\t\tqueue_idx, IGC_QUEUE_PAIRS_NUM);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\trxq = dev->data->tx_queues[queue_idx];\n+\t*timestamp = rxq->timestamp[0];\n+\treturn 0;\n+}\n+\n+/*\n+ * Place timestamp at the beginning of a receive buffer.\n+ */\n+int\n+igc_enable_rx_queue_timestamp(struct rte_eth_dev *dev, uint16_t queue_idx)\n+{\n+\tstruct igc_hw *hw;\n+\tstruct igc_rx_queue *rxq;\n+\tuint32_t srrctl;\n+\n+\tif (queue_idx >= IGC_QUEUE_PAIRS_NUM) {\n+\t\tPMD_DRV_LOG(ERR, \"Error queue(%u), expect it smaller than %u\",\n+\t\t\t\tqueue_idx, IGC_QUEUE_PAIRS_NUM);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\thw = IGC_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\trxq = dev->data->rx_queues[queue_idx];\n+\tsrrctl = IGC_READ_REG(hw, IGC_SRRCTL(rxq->reg_idx));\n+\n+\t/* select timer 0 to report timestamp. */\n+\tsrrctl &= ~(IGC_SRRCTL_TIME1_MSK | IGC_SRRCTL_TIME0_MSK);\n+\tsrrctl |= IGC_SRRCTL_TIMESTAMP_EN;\n+\tIGC_WRITE_REG(hw, IGC_SRRCTL(rxq->reg_idx), srrctl);\n+\treturn 0;\n+}\n+\n+\ndiff --git a/drivers/net/igc/igc_txrx.h b/drivers/net/igc/igc_txrx.h\nnew file mode 100644\nindex 0000000..240f276\n--- /dev/null\n+++ b/drivers/net/igc/igc_txrx.h\n@@ -0,0 +1,56 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2010-2020 Intel Corporation\n+ */\n+\n+#ifndef _IGC_TXRX_H_\n+#define _IGC_TXRX_H_\n+\n+#include \"igc_ethdev.h\"\n+\n+/*\n+ * RX/TX function prototypes\n+ */\n+void eth_igc_tx_queue_release(void *txq);\n+void eth_igc_rx_queue_release(void *rxq);\n+void igc_dev_clear_queues(struct rte_eth_dev *dev);\n+void eth_igc_vlan_strip_queue_set(struct rte_eth_dev *dev,\n+\t\t\tuint16_t rx_queue_id, int on);\n+int eth_igc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n+\t\tuint16_t nb_rx_desc, unsigned int socket_id,\n+\t\tconst struct rte_eth_rxconf *rx_conf,\n+\t\tstruct rte_mempool *mb_pool);\n+\n+uint32_t eth_igc_rx_queue_count(struct rte_eth_dev *dev,\n+\t\tuint16_t rx_queue_id);\n+\n+int eth_igc_rx_descriptor_done(void *rx_queue, uint16_t offset);\n+\n+int eth_igc_rx_descriptor_status(void *rx_queue, uint16_t offset);\n+\n+int eth_igc_tx_descriptor_status(void *tx_queue, uint16_t offset);\n+\n+int eth_igc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\tuint16_t nb_desc, unsigned int socket_id,\n+\t\tconst struct rte_eth_txconf *tx_conf);\n+\n+int eth_igc_tx_done_cleanup(void *txqueue, uint32_t free_cnt);\n+\n+int igc_rx_init(struct rte_eth_dev *dev);\n+void igc_tx_init(struct rte_eth_dev *dev);\n+\n+void\n+igc_hw_rss_hash_set(struct igc_hw *hw, struct rte_eth_rss_conf *rss_conf);\n+void eth_igc_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\tstruct rte_eth_rxq_info *qinfo);\n+void eth_igc_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\tstruct rte_eth_txq_info *qinfo);\n+\n+int\n+eth_igc_timesync_read_rx_timestamp(struct rte_eth_dev *dev,\n+\t\t\t       struct timespec *timestamp,\n+\t\t\t       uint32_t queue_idx);\n+\n+int igc_enable_rx_queue_timestamp(struct rte_eth_dev *dev, uint16_t queue_idx);\n+\n+\n+#endif\n",
    "prefixes": [
        "RFC",
        "2/7"
    ]
}