get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/68246/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 68246,
    "url": "https://patches.dpdk.org/api/patches/68246/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20200413063037.13728-5-alvinx.zhang@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20200413063037.13728-5-alvinx.zhang@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20200413063037.13728-5-alvinx.zhang@intel.com",
    "date": "2020-04-13T06:30:30",
    "name": "[v3,04/11] net/igc: support reception and transmission of packets",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "c4c7531507b045ba8f0516287f8010a9885af2c1",
    "submitter": {
        "id": 1398,
        "url": "https://patches.dpdk.org/api/people/1398/?format=api",
        "name": "Alvin Zhang",
        "email": "alvinx.zhang@intel.com"
    },
    "delegate": {
        "id": 319,
        "url": "https://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20200413063037.13728-5-alvinx.zhang@intel.com/mbox/",
    "series": [
        {
            "id": 9328,
            "url": "https://patches.dpdk.org/api/series/9328/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=9328",
            "date": "2020-04-13T06:30:26",
            "name": "igc pmd",
            "version": 3,
            "mbox": "https://patches.dpdk.org/series/9328/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/68246/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/68246/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 11503A0577;\n\tMon, 13 Apr 2020 08:32:31 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 37C8C1BF46;\n\tMon, 13 Apr 2020 08:31:42 +0200 (CEST)",
            "from mga12.intel.com (mga12.intel.com [192.55.52.136])\n by dpdk.org (Postfix) with ESMTP id E46B62986\n for <dev@dpdk.org>; Mon, 13 Apr 2020 08:31:37 +0200 (CEST)",
            "from orsmga002.jf.intel.com ([10.7.209.21])\n by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 12 Apr 2020 23:31:36 -0700",
            "from shwdenpg235.ccr.corp.intel.com ([10.240.182.60])\n by orsmga002.jf.intel.com with ESMTP; 12 Apr 2020 23:31:33 -0700"
        ],
        "IronPort-SDR": [
            "\n UmFsZ6RhXqyX7txw+FbVZI4IWythCj9dXX9krzPhizREYIZd4XmLeZ44FyHz7BLfPqJRpLsVbz\n uhJ6GXVC7HsQ==",
            "\n sjccsAV38y6a2b0LBl991HIJ7mc5yRAymcrXI9+4QXzxm4baqeHrlbqC4Rb+CjVSNqOMheiQN2\n ROepV13Q/pPg=="
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.72,377,1580803200\"; d=\"scan'208\";a=\"270987670\"",
        "From": "alvinx.zhang@intel.com",
        "To": "dev@dpdk.org",
        "Cc": "xiaolong.ye@intel.com,\n\tAlvin Zhang <alvinx.zhang@intel.com>",
        "Date": "Mon, 13 Apr 2020 14:30:30 +0800",
        "Message-Id": "<20200413063037.13728-5-alvinx.zhang@intel.com>",
        "X-Mailer": "git-send-email 2.21.0.windows.1",
        "In-Reply-To": "<20200413063037.13728-1-alvinx.zhang@intel.com>",
        "References": "<20200413063037.13728-1-alvinx.zhang@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH v3 04/11] net/igc: support reception and\n\ttransmission of packets",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Alvin Zhang <alvinx.zhang@intel.com>\n\nBelow ops are added too:\nmac_addr_add\nmac_addr_remove\nmac_addr_set\nset_mc_addr_list\nmtu_set\npromiscuous_enable\npromiscuous_disable\nallmulticast_enable\nallmulticast_disable\nrx_queue_setup\nrx_queue_release\nrx_queue_count\nrx_descriptor_done\nrx_descriptor_status\ntx_descriptor_status\ntx_queue_setup\ntx_queue_release\ntx_done_cleanup\nrxq_info_get\ntxq_info_get\ndev_supported_ptypes_get\n\nSigned-off-by: Alvin Zhang <alvinx.zhang@intel.com>\n\nv2:\n- fix a Rx offload capability fault\n- fix mtu setting fault if extend vlan has been enabled\n- modify codes according to the comments\n\nv3: fix rx queue offload fault\n---\n doc/guides/nics/features/igc.ini |   15 +\n drivers/net/igc/Makefile         |    1 +\n drivers/net/igc/igc_ethdev.c     |  326 +++++-\n drivers/net/igc/igc_ethdev.h     |   62 ++\n drivers/net/igc/igc_logs.h       |   14 +\n drivers/net/igc/igc_txrx.c       | 2107 ++++++++++++++++++++++++++++++++++++++\n drivers/net/igc/igc_txrx.h       |   50 +\n drivers/net/igc/meson.build      |    3 +-\n 8 files changed, 2532 insertions(+), 46 deletions(-)\n create mode 100644 drivers/net/igc/igc_txrx.c\n create mode 100644 drivers/net/igc/igc_txrx.h",
    "diff": "diff --git a/doc/guides/nics/features/igc.ini b/doc/guides/nics/features/igc.ini\nindex 0fbdf7c..f910483 100644\n--- a/doc/guides/nics/features/igc.ini\n+++ b/doc/guides/nics/features/igc.ini\n@@ -8,6 +8,21 @@ Link status          = Y\n Link status event    = Y\n FW version           = Y\n LED                  = Y\n+Packet type parsing  = Y\n+Rx descriptor status = Y\n+Tx descriptor status = Y\n+MTU update           = Y\n+Jumbo frame          = Y\n+Scattered Rx         = Y\n+TSO                  = Y\n+Promiscuous mode     = Y\n+Allmulticast mode    = Y\n+Unicast MAC filter   = Y\n+Multicast MAC filter = Y\n+RSS hash             = Y\n+CRC offload          = Y\n+L3 checksum offload  = Y\n+L4 checksum offload  = Y\n Linux UIO            = Y\n Linux VFIO           = Y\n x86-64               = Y\ndiff --git a/drivers/net/igc/Makefile b/drivers/net/igc/Makefile\nindex 0902811..c162c51 100644\n--- a/drivers/net/igc/Makefile\n+++ b/drivers/net/igc/Makefile\n@@ -33,5 +33,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_osdep.c\n SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_phy.c\n SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_logs.c\n SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_ethdev.c\n+SRCS-$(CONFIG_RTE_LIBRTE_IGC_PMD) += igc_txrx.c\n \n include $(RTE_SDK)/mk/rte.lib.mk\ndiff --git a/drivers/net/igc/igc_ethdev.c b/drivers/net/igc/igc_ethdev.c\nindex f82dfb5..533dd73 100644\n--- a/drivers/net/igc/igc_ethdev.c\n+++ b/drivers/net/igc/igc_ethdev.c\n@@ -12,7 +12,7 @@\n #include <rte_malloc.h>\n \n #include \"igc_logs.h\"\n-#include \"igc_ethdev.h\"\n+#include \"igc_txrx.h\"\n \n #define IGC_INTEL_VENDOR_ID\t\t0x8086\n \n@@ -45,6 +45,23 @@\n /* MSI-X other interrupt vector */\n #define IGC_MSIX_OTHER_INTR_VEC\t\t0\n \n+/* External VLAN Enable bit mask */\n+#define IGC_CTRL_EXT_EXT_VLAN\t\t(1u << 26)\n+\n+static const struct rte_eth_desc_lim rx_desc_lim = {\n+\t.nb_max = IGC_MAX_RXD,\n+\t.nb_min = IGC_MIN_RXD,\n+\t.nb_align = IGC_RXD_ALIGN,\n+};\n+\n+static const struct rte_eth_desc_lim tx_desc_lim = {\n+\t.nb_max = IGC_MAX_TXD,\n+\t.nb_min = IGC_MIN_TXD,\n+\t.nb_align = IGC_TXD_ALIGN,\n+\t.nb_seg_max = IGC_TX_MAX_SEG,\n+\t.nb_mtu_seg_max = IGC_TX_MAX_MTU_SEG,\n+};\n+\n static const struct rte_pci_id pci_id_igc_map[] = {\n \t{ RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_LM) },\n \t{ RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_V)  },\n@@ -69,17 +86,18 @@ static int eth_igc_infos_get(struct rte_eth_dev *dev,\n \t\t\tstruct rte_eth_dev_info *dev_info);\n static int eth_igc_led_on(struct rte_eth_dev *dev);\n static int eth_igc_led_off(struct rte_eth_dev *dev);\n-static void eth_igc_tx_queue_release(void *txq);\n-static void eth_igc_rx_queue_release(void *rxq);\n-static int\n-eth_igc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n-\t\tuint16_t nb_rx_desc, unsigned int socket_id,\n-\t\tconst struct rte_eth_rxconf *rx_conf,\n-\t\tstruct rte_mempool *mb_pool);\n-static int\n-eth_igc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n-\t\tuint16_t nb_desc, unsigned int socket_id,\n-\t\tconst struct rte_eth_txconf *tx_conf);\n+static const uint32_t *eth_igc_supported_ptypes_get(struct rte_eth_dev *dev);\n+static int eth_igc_rar_set(struct rte_eth_dev *dev,\n+\t\tstruct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool);\n+static void eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index);\n+static int eth_igc_default_mac_addr_set(struct rte_eth_dev *dev,\n+\t\t\tstruct rte_ether_addr *addr);\n+static int eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,\n+\t\t\t struct rte_ether_addr *mc_addr_set,\n+\t\t\t uint32_t nb_mc_addr);\n+static int eth_igc_allmulticast_enable(struct rte_eth_dev *dev);\n+static int eth_igc_allmulticast_disable(struct rte_eth_dev *dev);\n+static int eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);\n \n static const struct eth_dev_ops eth_igc_ops = {\n \t.dev_configure\t\t= eth_igc_configure,\n@@ -92,16 +110,30 @@ static int eth_igc_infos_get(struct rte_eth_dev *dev,\n \t.dev_set_link_down\t= eth_igc_set_link_down,\n \t.promiscuous_enable\t= eth_igc_promiscuous_enable,\n \t.promiscuous_disable\t= eth_igc_promiscuous_disable,\n-\n+\t.allmulticast_enable\t= eth_igc_allmulticast_enable,\n+\t.allmulticast_disable\t= eth_igc_allmulticast_disable,\n \t.fw_version_get\t\t= eth_igc_fw_version_get,\n \t.dev_infos_get\t\t= eth_igc_infos_get,\n \t.dev_led_on\t\t= eth_igc_led_on,\n \t.dev_led_off\t\t= eth_igc_led_off,\n+\t.dev_supported_ptypes_get = eth_igc_supported_ptypes_get,\n+\t.mtu_set\t\t= eth_igc_mtu_set,\n+\t.mac_addr_add\t\t= eth_igc_rar_set,\n+\t.mac_addr_remove\t= eth_igc_rar_clear,\n+\t.mac_addr_set\t\t= eth_igc_default_mac_addr_set,\n+\t.set_mc_addr_list\t= eth_igc_set_mc_addr_list,\n \n \t.rx_queue_setup\t\t= eth_igc_rx_queue_setup,\n \t.rx_queue_release\t= eth_igc_rx_queue_release,\n+\t.rx_queue_count\t\t= eth_igc_rx_queue_count,\n+\t.rx_descriptor_done\t= eth_igc_rx_descriptor_done,\n+\t.rx_descriptor_status\t= eth_igc_rx_descriptor_status,\n+\t.tx_descriptor_status\t= eth_igc_tx_descriptor_status,\n \t.tx_queue_setup\t\t= eth_igc_tx_queue_setup,\n \t.tx_queue_release\t= eth_igc_tx_queue_release,\n+\t.tx_done_cleanup\t= eth_igc_tx_done_cleanup,\n+\t.rxq_info_get\t\t= eth_igc_rxq_info_get,\n+\t.txq_info_get\t\t= eth_igc_txq_info_get,\n };\n \n /*\n@@ -367,6 +399,32 @@ static int eth_igc_infos_get(struct rte_eth_dev *dev,\n }\n \n /*\n+ * rx,tx enable/disable\n+ */\n+static void\n+eth_igc_rxtx_control(struct rte_eth_dev *dev, bool enable)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);\n+\tuint32_t tctl, rctl;\n+\n+\ttctl = IGC_READ_REG(hw, IGC_TCTL);\n+\trctl = IGC_READ_REG(hw, IGC_RCTL);\n+\n+\tif (enable) {\n+\t\t/* enable Tx/Rx */\n+\t\ttctl |= IGC_TCTL_EN;\n+\t\trctl |= IGC_RCTL_EN;\n+\t} else {\n+\t\t/* disable Tx/Rx */\n+\t\ttctl &= ~IGC_TCTL_EN;\n+\t\trctl &= ~IGC_RCTL_EN;\n+\t}\n+\tIGC_WRITE_REG(hw, IGC_TCTL, tctl);\n+\tIGC_WRITE_REG(hw, IGC_RCTL, rctl);\n+\tIGC_WRITE_FLUSH(hw);\n+}\n+\n+/*\n  *  This routine disables all traffic on the adapter by issuing a\n  *  global reset on the MAC.\n  */\n@@ -381,6 +439,9 @@ static int eth_igc_infos_get(struct rte_eth_dev *dev,\n \n \tadapter->stopped = 1;\n \n+\t/* disable receive and transmit */\n+\teth_igc_rxtx_control(dev, false);\n+\n \t/* disable all MSI-X interrupts */\n \tIGC_WRITE_REG(hw, IGC_EIMC, 0x1f);\n \tIGC_WRITE_FLUSH(hw);\n@@ -405,6 +466,8 @@ static int eth_igc_infos_get(struct rte_eth_dev *dev,\n \t/* Power down the phy. Needed to make the link go Down */\n \teth_igc_set_link_down(dev);\n \n+\tigc_dev_clear_queues(dev);\n+\n \t/* clear the recorded link status */\n \tmemset(&link, 0, sizeof(link));\n \trte_eth_linkstatus_set(dev, &link);\n@@ -570,8 +633,7 @@ static int eth_igc_infos_get(struct rte_eth_dev *dev,\n \tstruct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);\n \tstruct rte_intr_handle *intr_handle = &pci_dev->intr_handle;\n \tuint32_t *speeds;\n-\tint num_speeds;\n-\tbool autoneg;\n+\tint ret;\n \n \tPMD_INIT_FUNC_TRACE();\n \n@@ -602,6 +664,16 @@ static int eth_igc_infos_get(struct rte_eth_dev *dev,\n \t/* confiugre msix for rx interrupt */\n \tigc_configure_msix_intr(dev);\n \n+\tigc_tx_init(dev);\n+\n+\t/* This can fail when allocating mbufs for descriptor rings */\n+\tret = igc_rx_init(dev);\n+\tif (ret) {\n+\t\tPMD_DRV_LOG(ERR, \"Unable to initialize RX hardware\");\n+\t\tigc_dev_clear_queues(dev);\n+\t\treturn ret;\n+\t}\n+\n \tigc_clear_hw_cntrs_base_generic(hw);\n \n \t/* Setup link speed and duplex */\n@@ -610,8 +682,8 @@ static int eth_igc_infos_get(struct rte_eth_dev *dev,\n \t\thw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;\n \t\thw->mac.autoneg = 1;\n \t} else {\n-\t\tnum_speeds = 0;\n-\t\tautoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;\n+\t\tint num_speeds = 0;\n+\t\tbool autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;\n \n \t\t/* Reset */\n \t\thw->phy.autoneg_advertised = 0;\n@@ -685,6 +757,7 @@ static int eth_igc_infos_get(struct rte_eth_dev *dev,\n \t/* resume enabled intr since hw reset */\n \tigc_intr_other_enable(dev);\n \n+\teth_igc_rxtx_control(dev, true);\n \teth_igc_link_update(dev, 0);\n \n \treturn 0;\n@@ -692,6 +765,7 @@ static int eth_igc_infos_get(struct rte_eth_dev *dev,\n error_invalid_config:\n \tPMD_DRV_LOG(ERR, \"Invalid advertised speeds (%u) for port %u\",\n \t\t     dev->data->dev_conf.link_speeds, dev->data->port_id);\n+\tigc_dev_clear_queues(dev);\n \treturn -EINVAL;\n }\n \n@@ -749,6 +823,27 @@ static int eth_igc_infos_get(struct rte_eth_dev *dev,\n \treturn IGC_SUCCESS;\n }\n \n+/*\n+ * free all rx/tx queues.\n+ */\n+static void\n+igc_dev_free_queues(struct rte_eth_dev *dev)\n+{\n+\tuint16_t i;\n+\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\teth_igc_rx_queue_release(dev->data->rx_queues[i]);\n+\t\tdev->data->rx_queues[i] = NULL;\n+\t}\n+\tdev->data->nb_rx_queues = 0;\n+\n+\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n+\t\teth_igc_tx_queue_release(dev->data->tx_queues[i]);\n+\t\tdev->data->tx_queues[i] = NULL;\n+\t}\n+\tdev->data->nb_tx_queues = 0;\n+}\n+\n static void\n eth_igc_close(struct rte_eth_dev *dev)\n {\n@@ -776,6 +871,7 @@ static int eth_igc_infos_get(struct rte_eth_dev *dev,\n \n \tigc_phy_hw_reset(hw);\n \tigc_hw_control_release(hw);\n+\tigc_dev_free_queues(dev);\n \n \t/* Reset any pending lock */\n \tigc_reset_swfw_lock(hw);\n@@ -960,16 +1056,55 @@ static int eth_igc_infos_get(struct rte_eth_dev *dev,\n static int\n eth_igc_promiscuous_enable(struct rte_eth_dev *dev)\n {\n-\tPMD_INIT_FUNC_TRACE();\n-\tRTE_SET_USED(dev);\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);\n+\tuint32_t rctl;\n+\n+\trctl = IGC_READ_REG(hw, IGC_RCTL);\n+\trctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE);\n+\tIGC_WRITE_REG(hw, IGC_RCTL, rctl);\n \treturn 0;\n }\n \n static int\n eth_igc_promiscuous_disable(struct rte_eth_dev *dev)\n {\n-\tPMD_INIT_FUNC_TRACE();\n-\tRTE_SET_USED(dev);\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);\n+\tuint32_t rctl;\n+\n+\trctl = IGC_READ_REG(hw, IGC_RCTL);\n+\trctl &= (~IGC_RCTL_UPE);\n+\tif (dev->data->all_multicast == 1)\n+\t\trctl |= IGC_RCTL_MPE;\n+\telse\n+\t\trctl &= (~IGC_RCTL_MPE);\n+\tIGC_WRITE_REG(hw, IGC_RCTL, rctl);\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_allmulticast_enable(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);\n+\tuint32_t rctl;\n+\n+\trctl = IGC_READ_REG(hw, IGC_RCTL);\n+\trctl |= IGC_RCTL_MPE;\n+\tIGC_WRITE_REG(hw, IGC_RCTL, rctl);\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_allmulticast_disable(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);\n+\tuint32_t rctl;\n+\n+\tif (dev->data->promiscuous == 1)\n+\t\treturn 0;\t/* must remain in all_multicast mode */\n+\n+\trctl = IGC_READ_REG(hw, IGC_RCTL);\n+\trctl &= (~IGC_RCTL_MPE);\n+\tIGC_WRITE_REG(hw, IGC_RCTL, rctl);\n \treturn 0;\n }\n \n@@ -1019,10 +1154,40 @@ static int eth_igc_infos_get(struct rte_eth_dev *dev,\n \tdev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */\n \tdev_info->max_rx_pktlen = MAX_RX_JUMBO_FRAME_SIZE;\n \tdev_info->max_mac_addrs = hw->mac.rar_entry_count;\n+\tdev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL;\n+\tdev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL;\n+\n \tdev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM;\n \tdev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM;\n \tdev_info->max_vmdq_pools = 0;\n \n+\tdev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t);\n+\tdev_info->reta_size = ETH_RSS_RETA_SIZE_128;\n+\tdev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL;\n+\n+\tdev_info->default_rxconf = (struct rte_eth_rxconf) {\n+\t\t.rx_thresh = {\n+\t\t\t.pthresh = IGC_DEFAULT_RX_PTHRESH,\n+\t\t\t.hthresh = IGC_DEFAULT_RX_HTHRESH,\n+\t\t\t.wthresh = IGC_DEFAULT_RX_WTHRESH,\n+\t\t},\n+\t\t.rx_free_thresh = IGC_DEFAULT_RX_FREE_THRESH,\n+\t\t.rx_drop_en = 0,\n+\t\t.offloads = 0,\n+\t};\n+\n+\tdev_info->default_txconf = (struct rte_eth_txconf) {\n+\t\t.tx_thresh = {\n+\t\t\t.pthresh = IGC_DEFAULT_TX_PTHRESH,\n+\t\t\t.hthresh = IGC_DEFAULT_TX_HTHRESH,\n+\t\t\t.wthresh = IGC_DEFAULT_TX_WTHRESH,\n+\t\t},\n+\t\t.offloads = 0,\n+\t};\n+\n+\tdev_info->rx_desc_lim = rx_desc_lim;\n+\tdev_info->tx_desc_lim = tx_desc_lim;\n+\n \tdev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |\n \t\t\tETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |\n \t\t\tETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G;\n@@ -1048,44 +1213,115 @@ static int eth_igc_infos_get(struct rte_eth_dev *dev,\n \treturn igc_led_off(hw) == IGC_SUCCESS ? 0 : -ENOTSUP;\n }\n \n+static const uint32_t *\n+eth_igc_supported_ptypes_get(__rte_unused struct rte_eth_dev *dev)\n+{\n+\tstatic const uint32_t ptypes[] = {\n+\t\t/* refers to rx_desc_pkt_info_to_pkt_type() */\n+\t\tRTE_PTYPE_L2_ETHER,\n+\t\tRTE_PTYPE_L3_IPV4,\n+\t\tRTE_PTYPE_L3_IPV4_EXT,\n+\t\tRTE_PTYPE_L3_IPV6,\n+\t\tRTE_PTYPE_L3_IPV6_EXT,\n+\t\tRTE_PTYPE_L4_TCP,\n+\t\tRTE_PTYPE_L4_UDP,\n+\t\tRTE_PTYPE_L4_SCTP,\n+\t\tRTE_PTYPE_TUNNEL_IP,\n+\t\tRTE_PTYPE_INNER_L3_IPV6,\n+\t\tRTE_PTYPE_INNER_L3_IPV6_EXT,\n+\t\tRTE_PTYPE_INNER_L4_TCP,\n+\t\tRTE_PTYPE_INNER_L4_UDP,\n+\t\tRTE_PTYPE_UNKNOWN\n+\t};\n+\n+\treturn ptypes;\n+}\n+\n static int\n-eth_igc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n-\t\tuint16_t nb_rx_desc, unsigned int socket_id,\n-\t\tconst struct rte_eth_rxconf *rx_conf,\n-\t\tstruct rte_mempool *mb_pool)\n+eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)\n {\n-\tPMD_INIT_FUNC_TRACE();\n-\tRTE_SET_USED(dev);\n-\tRTE_SET_USED(rx_queue_id);\n-\tRTE_SET_USED(nb_rx_desc);\n-\tRTE_SET_USED(socket_id);\n-\tRTE_SET_USED(rx_conf);\n-\tRTE_SET_USED(mb_pool);\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);\n+\tuint32_t frame_size = mtu + IGC_ETH_OVERHEAD;\n+\tuint32_t rctl;\n+\n+\t/* if extend vlan has been enabled */\n+\tif (IGC_READ_REG(hw, IGC_CTRL_EXT) & IGC_CTRL_EXT_EXT_VLAN)\n+\t\tframe_size += VLAN_TAG_SIZE;\n+\n+\t/* check that mtu is within the allowed range */\n+\tif (mtu < RTE_ETHER_MIN_MTU ||\n+\t\tframe_size > MAX_RX_JUMBO_FRAME_SIZE)\n+\t\treturn -EINVAL;\n+\n+\t/*\n+\t * refuse mtu that requires the support of scattered packets when\n+\t * this feature has not been enabled before.\n+\t */\n+\tif (!dev->data->scattered_rx &&\n+\t    frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)\n+\t\treturn -EINVAL;\n+\n+\trctl = IGC_READ_REG(hw, IGC_RCTL);\n+\n+\t/* switch to jumbo mode if needed */\n+\tif (mtu > RTE_ETHER_MTU) {\n+\t\tdev->data->dev_conf.rxmode.offloads |=\n+\t\t\tDEV_RX_OFFLOAD_JUMBO_FRAME;\n+\t\trctl |= IGC_RCTL_LPE;\n+\t} else {\n+\t\tdev->data->dev_conf.rxmode.offloads &=\n+\t\t\t~DEV_RX_OFFLOAD_JUMBO_FRAME;\n+\t\trctl &= ~IGC_RCTL_LPE;\n+\t}\n+\tIGC_WRITE_REG(hw, IGC_RCTL, rctl);\n+\n+\t/* update max frame size */\n+\tdev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;\n+\n+\tIGC_WRITE_REG(hw, IGC_RLPML,\n+\t\t\tdev->data->dev_conf.rxmode.max_rx_pkt_len);\n+\n \treturn 0;\n }\n \n static int\n-eth_igc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n-\t\tuint16_t nb_desc, unsigned int socket_id,\n-\t\tconst struct rte_eth_txconf *tx_conf)\n+eth_igc_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,\n+\t\tuint32_t index, uint32_t pool)\n {\n-\tPMD_INIT_FUNC_TRACE();\n-\tRTE_SET_USED(dev);\n-\tRTE_SET_USED(queue_idx);\n-\tRTE_SET_USED(nb_desc);\n-\tRTE_SET_USED(socket_id);\n-\tRTE_SET_USED(tx_conf);\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);\n+\n+\tigc_rar_set(hw, mac_addr->addr_bytes, index);\n+\tRTE_SET_USED(pool);\n \treturn 0;\n }\n \n-static void eth_igc_tx_queue_release(void *txq)\n+static void\n+eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index)\n {\n-\tRTE_SET_USED(txq);\n+\tuint8_t addr[RTE_ETHER_ADDR_LEN];\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);\n+\n+\tmemset(addr, 0, sizeof(addr));\n+\tigc_rar_set(hw, addr, index);\n }\n \n-static void eth_igc_rx_queue_release(void *rxq)\n+static int\n+eth_igc_default_mac_addr_set(struct rte_eth_dev *dev,\n+\t\t\tstruct rte_ether_addr *addr)\n {\n-\tRTE_SET_USED(rxq);\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);\n+\tigc_rar_set(hw, addr->addr_bytes, 0);\n+\treturn 0;\n+}\n+\n+static int\n+eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,\n+\t\t\t struct rte_ether_addr *mc_addr_set,\n+\t\t\t uint32_t nb_mc_addr)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);\n+\tigc_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);\n+\treturn 0;\n }\n \n static int\ndiff --git a/drivers/net/igc/igc_ethdev.h b/drivers/net/igc/igc_ethdev.h\nindex c1e3217..3910371 100644\n--- a/drivers/net/igc/igc_ethdev.h\n+++ b/drivers/net/igc/igc_ethdev.h\n@@ -18,12 +18,74 @@\n \n #define IGC_QUEUE_PAIRS_NUM\t\t4\n \n+#define IGC_HKEY_MAX_INDEX\t\t10\n+#define IGC_RSS_RDT_SIZD\t\t128\n+\n+/*\n+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be\n+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.\n+ * This will also optimize cache line size effect.\n+ * H/W supports up to cache line size 128.\n+ */\n+#define IGC_ALIGN\t\t\t128\n+\n+#define IGC_TX_DESCRIPTOR_MULTIPLE\t8\n+#define IGC_RX_DESCRIPTOR_MULTIPLE\t8\n+\n+#define IGC_RXD_ALIGN\t((uint16_t)(IGC_ALIGN / \\\n+\t\tsizeof(union igc_adv_rx_desc)))\n+#define IGC_TXD_ALIGN\t((uint16_t)(IGC_ALIGN / \\\n+\t\tsizeof(union igc_adv_tx_desc)))\n+#define IGC_MIN_TXD\tIGC_TX_DESCRIPTOR_MULTIPLE\n+#define IGC_MAX_TXD\t((uint16_t)(0x80000 / sizeof(union igc_adv_tx_desc)))\n+#define IGC_MIN_RXD\tIGC_RX_DESCRIPTOR_MULTIPLE\n+#define IGC_MAX_RXD\t((uint16_t)(0x80000 / sizeof(union igc_adv_rx_desc)))\n+\n+#define IGC_TX_MAX_SEG\t\tUINT8_MAX\n+#define IGC_TX_MAX_MTU_SEG\tUINT8_MAX\n+\n+#define IGC_RX_OFFLOAD_ALL\t(    \\\n+\tDEV_RX_OFFLOAD_IPV4_CKSUM  | \\\n+\tDEV_RX_OFFLOAD_UDP_CKSUM   | \\\n+\tDEV_RX_OFFLOAD_TCP_CKSUM   | \\\n+\tDEV_RX_OFFLOAD_SCTP_CKSUM  | \\\n+\tDEV_RX_OFFLOAD_JUMBO_FRAME | \\\n+\tDEV_RX_OFFLOAD_KEEP_CRC    | \\\n+\tDEV_RX_OFFLOAD_SCATTER)\n+\n+#define IGC_TX_OFFLOAD_ALL\t(    \\\n+\tDEV_TX_OFFLOAD_VLAN_INSERT | \\\n+\tDEV_TX_OFFLOAD_IPV4_CKSUM  | \\\n+\tDEV_TX_OFFLOAD_UDP_CKSUM   | \\\n+\tDEV_TX_OFFLOAD_TCP_CKSUM   | \\\n+\tDEV_TX_OFFLOAD_SCTP_CKSUM  | \\\n+\tDEV_TX_OFFLOAD_TCP_TSO     | \\\n+\tDEV_TX_OFFLOAD_UDP_TSO\t   | \\\n+\tDEV_TX_OFFLOAD_MULTI_SEGS)\n+\n+#define IGC_RSS_OFFLOAD_ALL\t(    \\\n+\tETH_RSS_IPV4               | \\\n+\tETH_RSS_NONFRAG_IPV4_TCP   | \\\n+\tETH_RSS_NONFRAG_IPV4_UDP   | \\\n+\tETH_RSS_IPV6               | \\\n+\tETH_RSS_NONFRAG_IPV6_TCP   | \\\n+\tETH_RSS_NONFRAG_IPV6_UDP   | \\\n+\tETH_RSS_IPV6_EX            | \\\n+\tETH_RSS_IPV6_TCP_EX        | \\\n+\tETH_RSS_IPV6_UDP_EX)\n+\n /* structure for interrupt relative data */\n struct igc_interrupt {\n \tuint32_t flags;\n \tuint32_t mask;\n };\n \n+/* Union of RSS redirect table register */\n+union igc_rss_reta_reg {\n+\tuint32_t dword;\n+\tuint8_t  bytes[4];\n+};\n+\n /*\n  * Structure to store private data for each driver instance (for each port).\n  */\ndiff --git a/drivers/net/igc/igc_logs.h b/drivers/net/igc/igc_logs.h\nindex 67b1699..6457c4d 100644\n--- a/drivers/net/igc/igc_logs.h\n+++ b/drivers/net/igc/igc_logs.h\n@@ -20,6 +20,20 @@\n \n #define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, \" >>\")\n \n+#ifdef RTE_LIBRTE_IGC_DEBUG_RX\n+#define PMD_RX_LOG(level, fmt, args...) \\\n+\tRTE_LOG(level, PMD, \"%s(): \" fmt \"\\n\", __func__, ## args)\n+#else\n+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)\n+#endif\n+\n+#ifdef RTE_LIBRTE_IGC_DEBUG_TX\n+#define PMD_TX_LOG(level, fmt, args...) \\\n+\tRTE_LOG(level, PMD, \"%s(): \" fmt \"\\n\", __func__, ## args)\n+#else\n+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)\n+#endif\n+\n #define PMD_DRV_LOG_RAW(level, fmt, args...) \\\n \trte_log(RTE_LOG_ ## level, igc_logtype_driver, \"%s(): \" fmt, \\\n \t\t__func__, ## args)\ndiff --git a/drivers/net/igc/igc_txrx.c b/drivers/net/igc/igc_txrx.c\nnew file mode 100644\nindex 0000000..906fbcb\n--- /dev/null\n+++ b/drivers/net/igc/igc_txrx.c\n@@ -0,0 +1,2107 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2019-2020 Intel Corporation\n+ */\n+\n+#include <rte_config.h>\n+#include <rte_malloc.h>\n+#include <rte_ethdev_driver.h>\n+#include <rte_net.h>\n+\n+#include \"igc_logs.h\"\n+#include \"igc_txrx.h\"\n+\n+#ifdef RTE_PMD_USE_PREFETCH\n+#define rte_igc_prefetch(p)\t\trte_prefetch0(p)\n+#else\n+#define rte_igc_prefetch(p)\t\tdo {} while (0)\n+#endif\n+\n+#ifdef RTE_PMD_PACKET_PREFETCH\n+#define rte_packet_prefetch(p)\t\trte_prefetch1(p)\n+#else\n+#define rte_packet_prefetch(p)\t\tdo {} while (0)\n+#endif\n+\n+/* Multicast / Unicast table offset mask. */\n+#define IGC_RCTL_MO_MSK\t\t\t(3u << IGC_RCTL_MO_SHIFT)\n+\n+/* Loopback mode. */\n+#define IGC_RCTL_LBM_SHIFT\t\t6\n+#define IGC_RCTL_LBM_MSK\t\t(3u << IGC_RCTL_LBM_SHIFT)\n+\n+/* Hash select for MTA */\n+#define IGC_RCTL_HSEL_SHIFT\t\t8\n+#define IGC_RCTL_HSEL_MSK\t\t(3u << IGC_RCTL_HSEL_SHIFT)\n+#define IGC_RCTL_PSP\t\t\t(1u << 21)\n+\n+/* Receive buffer size for header buffer */\n+#define IGC_SRRCTL_BSIZEHEADER_SHIFT\t8\n+\n+/* RX descriptor status and error flags */\n+#define IGC_RXD_STAT_L4CS\t\t(1u << 5)\n+#define IGC_RXD_STAT_VEXT\t\t(1u << 9)\n+#define IGC_RXD_STAT_LLINT\t\t(1u << 11)\n+#define IGC_RXD_STAT_SCRC\t\t(1u << 12)\n+#define IGC_RXD_STAT_SMDT_MASK\t\t(3u << 13)\n+#define IGC_RXD_STAT_MC\t\t\t(1u << 19)\n+#define IGC_RXD_EXT_ERR_L4E\t\t(1u << 29)\n+#define IGC_RXD_EXT_ERR_IPE\t\t(1u << 30)\n+#define IGC_RXD_EXT_ERR_RXE\t\t(1u << 31)\n+#define IGC_RXD_RSS_TYPE_MASK\t\t0xfu\n+#define IGC_RXD_PCTYPE_MASK\t\t(0x7fu << 4)\n+#define IGC_RXD_ETQF_SHIFT\t\t12\n+#define IGC_RXD_ETQF_MSK\t\t(0xfu << IGC_RXD_ETQF_SHIFT)\n+#define IGC_RXD_VPKT\t\t\t(1u << 16)\n+\n+/* TXD control bits */\n+#define IGC_TXDCTL_PTHRESH_SHIFT\t0\n+#define IGC_TXDCTL_HTHRESH_SHIFT\t8\n+#define IGC_TXDCTL_WTHRESH_SHIFT\t16\n+#define IGC_TXDCTL_PTHRESH_MSK\t\t(0x1fu << IGC_TXDCTL_PTHRESH_SHIFT)\n+#define IGC_TXDCTL_HTHRESH_MSK\t\t(0x1fu << IGC_TXDCTL_HTHRESH_SHIFT)\n+#define IGC_TXDCTL_WTHRESH_MSK\t\t(0x1fu << IGC_TXDCTL_WTHRESH_SHIFT)\n+\n+/* RXD control bits */\n+#define IGC_RXDCTL_PTHRESH_SHIFT\t0\n+#define IGC_RXDCTL_HTHRESH_SHIFT\t8\n+#define IGC_RXDCTL_WTHRESH_SHIFT\t16\n+#define IGC_RXDCTL_PTHRESH_MSK\t\t(0x1fu << IGC_RXDCTL_PTHRESH_SHIFT)\n+#define IGC_RXDCTL_HTHRESH_MSK\t\t(0x1fu << IGC_RXDCTL_HTHRESH_SHIFT)\n+#define IGC_RXDCTL_WTHRESH_MSK\t\t(0x1fu << IGC_RXDCTL_WTHRESH_SHIFT)\n+\n+#define IGC_TSO_MAX_HDRLEN\t\t512\n+#define IGC_TSO_MAX_MSS\t\t\t9216\n+\n+/* Bit Mask to indicate what bits required for building TX context */\n+#define IGC_TX_OFFLOAD_MASK (\t\t\\\n+\t\tPKT_TX_OUTER_IPV4 |\t\\\n+\t\tPKT_TX_IPV6 |\t\t\\\n+\t\tPKT_TX_IPV4 |\t\t\\\n+\t\tPKT_TX_VLAN_PKT |\t\\\n+\t\tPKT_TX_IP_CKSUM |\t\\\n+\t\tPKT_TX_L4_MASK |\t\\\n+\t\tPKT_TX_TCP_SEG |\t\\\n+\t\tPKT_TX_UDP_SEG)\n+\n+#define IGC_TX_OFFLOAD_SEG\t(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)\n+\n+#define IGC_ADVTXD_POPTS_TXSM\t0x00000200 /* L4 Checksum offload request */\n+#define IGC_ADVTXD_POPTS_IXSM\t0x00000100 /* IP Checksum offload request */\n+\n+/* L4 Packet TYPE of Reserved */\n+#define IGC_ADVTXD_TUCMD_L4T_RSV\t0x00001800\n+\n+#define IGC_TX_OFFLOAD_NOTSUP_MASK (PKT_TX_OFFLOAD_MASK ^ IGC_TX_OFFLOAD_MASK)\n+\n+/**\n+ * Structure associated with each descriptor of the RX ring of a RX queue.\n+ */\n+struct igc_rx_entry {\n+\tstruct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */\n+};\n+\n+/**\n+ * Structure associated with each RX queue.\n+ */\n+struct igc_rx_queue {\n+\tstruct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */\n+\tvolatile union igc_adv_rx_desc *rx_ring;\n+\t/**< RX ring virtual address. */\n+\tuint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */\n+\tvolatile uint32_t   *rdt_reg_addr; /**< RDT register address. */\n+\tvolatile uint32_t   *rdh_reg_addr; /**< RDH register address. */\n+\tstruct igc_rx_entry *sw_ring;   /**< address of RX software ring. */\n+\tstruct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */\n+\tstruct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */\n+\tuint16_t            nb_rx_desc; /**< number of RX descriptors. */\n+\tuint16_t            rx_tail;    /**< current value of RDT register. */\n+\tuint16_t            nb_rx_hold; /**< number of held free RX desc. */\n+\tuint16_t            rx_free_thresh; /**< max free RX desc to hold. */\n+\tuint16_t            queue_id;   /**< RX queue index. */\n+\tuint16_t            reg_idx;    /**< RX queue register index. */\n+\tuint16_t            port_id;    /**< Device port identifier. */\n+\tuint8_t             pthresh;    /**< Prefetch threshold register. */\n+\tuint8_t             hthresh;    /**< Host threshold register. */\n+\tuint8_t             wthresh;    /**< Write-back threshold register. */\n+\tuint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */\n+\tuint8_t             drop_en;\t/**< If not 0, set SRRCTL.Drop_En. */\n+\tuint32_t            flags;      /**< RX flags. */\n+\tuint64_t\t    offloads;   /**< offloads of DEV_RX_OFFLOAD_* */\n+};\n+\n+/** Offload features */\n+union igc_tx_offload {\n+\tuint64_t data;\n+\tstruct {\n+\t\tuint64_t l3_len:9; /**< L3 (IP) Header Length. */\n+\t\tuint64_t l2_len:7; /**< L2 (MAC) Header Length. */\n+\t\tuint64_t vlan_tci:16;\n+\t\t/**< VLAN Tag Control Identifier(CPU order). */\n+\t\tuint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */\n+\t\tuint64_t tso_segsz:16; /**< TCP TSO segment size. */\n+\t\t/* uint64_t unused:8; */\n+\t};\n+};\n+\n+/*\n+ * Compare mask for igc_tx_offload.data,\n+ * should be in sync with igc_tx_offload layout.\n+ */\n+#define TX_MACIP_LEN_CMP_MASK\t0x000000000000FFFFULL /**< L2L3 header mask. */\n+#define TX_VLAN_CMP_MASK\t0x00000000FFFF0000ULL /**< Vlan mask. */\n+#define TX_TCP_LEN_CMP_MASK\t0x000000FF00000000ULL /**< TCP header mask. */\n+#define TX_TSO_MSS_CMP_MASK\t0x00FFFF0000000000ULL /**< TSO segsz mask. */\n+/** Mac + IP + TCP + Mss mask. */\n+#define TX_TSO_CMP_MASK\t\\\n+\t(TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)\n+\n+/**\n+ * Strucutre to check if new context need be built\n+ */\n+struct igc_advctx_info {\n+\tuint64_t flags;           /**< ol_flags related to context build. */\n+\t/** tx offload: vlan, tso, l2-l3-l4 lengths. */\n+\tunion igc_tx_offload tx_offload;\n+\t/** compare mask for tx offload. */\n+\tunion igc_tx_offload tx_offload_mask;\n+};\n+\n+/**\n+ * Hardware context number\n+ */\n+enum {\n+\tIGC_CTX_0    = 0, /**< CTX0    */\n+\tIGC_CTX_1    = 1, /**< CTX1    */\n+\tIGC_CTX_NUM  = 2, /**< CTX_NUM */\n+};\n+\n+/**\n+ * Structure associated with each descriptor of the TX ring of a TX queue.\n+ */\n+struct igc_tx_entry {\n+\tstruct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */\n+\tuint16_t next_id; /**< Index of next descriptor in ring. */\n+\tuint16_t last_id; /**< Index of last scattered descriptor. */\n+};\n+\n+/**\n+ * Structure associated with each TX queue.\n+ */\n+struct igc_tx_queue {\n+\tvolatile union igc_adv_tx_desc *tx_ring; /**< TX ring address */\n+\tuint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */\n+\tstruct igc_tx_entry    *sw_ring; /**< virtual address of SW ring. */\n+\tvolatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */\n+\tuint32_t               txd_type;      /**< Device-specific TXD type */\n+\tuint16_t               nb_tx_desc;    /**< number of TX descriptors. */\n+\tuint16_t               tx_tail;  /**< Current value of TDT register. */\n+\tuint16_t               tx_head;\n+\t/**< Index of first used TX descriptor. */\n+\tuint16_t               queue_id; /**< TX queue index. */\n+\tuint16_t               reg_idx;  /**< TX queue register index. */\n+\tuint16_t               port_id;  /**< Device port identifier. */\n+\tuint8_t                pthresh;  /**< Prefetch threshold register. */\n+\tuint8_t                hthresh;  /**< Host threshold register. */\n+\tuint8_t                wthresh;  /**< Write-back threshold register. */\n+\tuint8_t                ctx_curr;\n+\n+\t/**< Start context position for transmit queue. */\n+\tstruct igc_advctx_info ctx_cache[IGC_CTX_NUM];\n+\t/**< Hardware context history.*/\n+\tuint64_t\t       offloads; /**< offloads of DEV_TX_OFFLOAD_* */\n+};\n+\n+static inline uint64_t\n+rx_desc_statuserr_to_pkt_flags(uint32_t statuserr)\n+{\n+\tstatic uint64_t l4_chksum_flags[] = {0, 0, PKT_RX_L4_CKSUM_GOOD,\n+\t\t\tPKT_RX_L4_CKSUM_BAD};\n+\n+\tstatic uint64_t l3_chksum_flags[] = {0, 0, PKT_RX_IP_CKSUM_GOOD,\n+\t\t\tPKT_RX_IP_CKSUM_BAD};\n+\tuint64_t pkt_flags = 0;\n+\tuint32_t tmp;\n+\n+\tif (statuserr & IGC_RXD_STAT_VP)\n+\t\tpkt_flags |= PKT_RX_VLAN_STRIPPED;\n+\n+\ttmp = !!(statuserr & (IGC_RXD_STAT_L4CS | IGC_RXD_STAT_UDPCS));\n+\ttmp = (tmp << 1) | (uint32_t)!!(statuserr & IGC_RXD_EXT_ERR_L4E);\n+\tpkt_flags |= l4_chksum_flags[tmp];\n+\n+\ttmp = !!(statuserr & IGC_RXD_STAT_IPCS);\n+\ttmp = (tmp << 1) | (uint32_t)!!(statuserr & IGC_RXD_EXT_ERR_IPE);\n+\tpkt_flags |= l3_chksum_flags[tmp];\n+\n+\treturn pkt_flags;\n+}\n+\n+#define IGC_PACKET_TYPE_IPV4              0X01\n+#define IGC_PACKET_TYPE_IPV4_TCP          0X11\n+#define IGC_PACKET_TYPE_IPV4_UDP          0X21\n+#define IGC_PACKET_TYPE_IPV4_SCTP         0X41\n+#define IGC_PACKET_TYPE_IPV4_EXT          0X03\n+#define IGC_PACKET_TYPE_IPV4_EXT_SCTP     0X43\n+#define IGC_PACKET_TYPE_IPV6              0X04\n+#define IGC_PACKET_TYPE_IPV6_TCP          0X14\n+#define IGC_PACKET_TYPE_IPV6_UDP          0X24\n+#define IGC_PACKET_TYPE_IPV6_EXT          0X0C\n+#define IGC_PACKET_TYPE_IPV6_EXT_TCP      0X1C\n+#define IGC_PACKET_TYPE_IPV6_EXT_UDP      0X2C\n+#define IGC_PACKET_TYPE_IPV4_IPV6         0X05\n+#define IGC_PACKET_TYPE_IPV4_IPV6_TCP     0X15\n+#define IGC_PACKET_TYPE_IPV4_IPV6_UDP     0X25\n+#define IGC_PACKET_TYPE_IPV4_IPV6_EXT     0X0D\n+#define IGC_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D\n+#define IGC_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D\n+#define IGC_PACKET_TYPE_MAX               0X80\n+#define IGC_PACKET_TYPE_MASK              0X7F\n+#define IGC_PACKET_TYPE_SHIFT             0X04\n+\n+static inline uint32_t\n+rx_desc_pkt_info_to_pkt_type(uint32_t pkt_info)\n+{\n+\tstatic const uint32_t\n+\t\tptype_table[IGC_PACKET_TYPE_MAX] __rte_cache_aligned = {\n+\t\t[IGC_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV4,\n+\t\t[IGC_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV4_EXT,\n+\t\t[IGC_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV6,\n+\t\t[IGC_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |\n+\t\t\tRTE_PTYPE_INNER_L3_IPV6,\n+\t\t[IGC_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV6_EXT,\n+\t\t[IGC_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |\n+\t\t\tRTE_PTYPE_INNER_L3_IPV6_EXT,\n+\t\t[IGC_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,\n+\t\t[IGC_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,\n+\t\t[IGC_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |\n+\t\t\tRTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,\n+\t\t[IGC_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,\n+\t\t[IGC_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |\n+\t\t\tRTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,\n+\t\t[IGC_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,\n+\t\t[IGC_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,\n+\t\t[IGC_PACKET_TYPE_IPV4_IPV6_UDP] =  RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |\n+\t\t\tRTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,\n+\t\t[IGC_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,\n+\t\t[IGC_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |\n+\t\t\tRTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,\n+\t\t[IGC_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,\n+\t\t[IGC_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |\n+\t\t\tRTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,\n+\t};\n+\tif (unlikely(pkt_info & IGC_RXDADV_PKTTYPE_ETQF))\n+\t\treturn RTE_PTYPE_UNKNOWN;\n+\n+\tpkt_info = (pkt_info >> IGC_PACKET_TYPE_SHIFT) & IGC_PACKET_TYPE_MASK;\n+\n+\treturn ptype_table[pkt_info];\n+}\n+\n+static inline void\n+rx_desc_get_pkt_info(struct igc_rx_queue *rxq, struct rte_mbuf *rxm,\n+\t\tunion igc_adv_rx_desc *rxd, uint32_t staterr)\n+{\n+\tuint64_t pkt_flags;\n+\tuint32_t hlen_type_rss;\n+\tuint16_t pkt_info;\n+\n+\t/* Prefetch data of first segment, if configured to do so. */\n+\trte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);\n+\n+\trxm->port = rxq->port_id;\n+\thlen_type_rss = rte_le_to_cpu_32(rxd->wb.lower.lo_dword.data);\n+\trxm->hash.rss = rte_le_to_cpu_32(rxd->wb.lower.hi_dword.rss);\n+\trxm->vlan_tci = rte_le_to_cpu_16(rxd->wb.upper.vlan);\n+\n+\tpkt_flags = (hlen_type_rss & IGC_RXD_RSS_TYPE_MASK) ?\n+\t\t\tPKT_RX_RSS_HASH : 0;\n+\n+\tif (hlen_type_rss & IGC_RXD_VPKT)\n+\t\tpkt_flags |= PKT_RX_VLAN;\n+\n+\tpkt_flags |= rx_desc_statuserr_to_pkt_flags(staterr);\n+\n+\trxm->ol_flags = pkt_flags;\n+\tpkt_info = rte_le_to_cpu_16(rxd->wb.lower.lo_dword.hs_rss.pkt_info);\n+\trxm->packet_type = rx_desc_pkt_info_to_pkt_type(pkt_info);\n+}\n+\n+static uint16_t\n+igc_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n+{\n+\tstruct igc_rx_queue * const rxq = rx_queue;\n+\tvolatile union igc_adv_rx_desc * const rx_ring = rxq->rx_ring;\n+\tstruct igc_rx_entry * const sw_ring = rxq->sw_ring;\n+\tuint16_t rx_id = rxq->rx_tail;\n+\tuint16_t nb_rx = 0;\n+\tuint16_t nb_hold = 0;\n+\n+\twhile (nb_rx < nb_pkts) {\n+\t\tvolatile union igc_adv_rx_desc *rxdp;\n+\t\tstruct igc_rx_entry *rxe;\n+\t\tstruct rte_mbuf *rxm;\n+\t\tstruct rte_mbuf *nmb;\n+\t\tunion igc_adv_rx_desc rxd;\n+\t\tuint32_t staterr;\n+\t\tuint16_t data_len;\n+\n+\t\t/*\n+\t\t * The order of operations here is important as the DD status\n+\t\t * bit must not be read after any other descriptor fields.\n+\t\t * rx_ring and rxdp are pointing to volatile data so the order\n+\t\t * of accesses cannot be reordered by the compiler. If they were\n+\t\t * not volatile, they could be reordered which could lead to\n+\t\t * using invalid descriptor fields when read from rxd.\n+\t\t */\n+\t\trxdp = &rx_ring[rx_id];\n+\t\tstaterr = rte_cpu_to_le_32(rxdp->wb.upper.status_error);\n+\t\tif (!(staterr & IGC_RXD_STAT_DD))\n+\t\t\tbreak;\n+\t\trxd = *rxdp;\n+\n+\t\t/*\n+\t\t * End of packet.\n+\t\t *\n+\t\t * If the IGC_RXD_STAT_EOP flag is not set, the RX packet is\n+\t\t * likely to be invalid and to be dropped by the various\n+\t\t * validation checks performed by the network stack.\n+\t\t *\n+\t\t * Allocate a new mbuf to replenish the RX ring descriptor.\n+\t\t * If the allocation fails:\n+\t\t *    - arrange for that RX descriptor to be the first one\n+\t\t *      being parsed the next time the receive function is\n+\t\t *      invoked [on the same queue].\n+\t\t *\n+\t\t *    - Stop parsing the RX ring and return immediately.\n+\t\t *\n+\t\t * This policy does not drop the packet received in the RX\n+\t\t * descriptor for which the allocation of a new mbuf failed.\n+\t\t * Thus, it allows that packet to be later retrieved if\n+\t\t * mbuf have been freed in the mean time.\n+\t\t * As a side effect, holding RX descriptors instead of\n+\t\t * systematically giving them back to the NIC may lead to\n+\t\t * RX ring exhaustion situations.\n+\t\t * However, the NIC can gracefully prevent such situations\n+\t\t * to happen by sending specific \"back-pressure\" flow control\n+\t\t * frames to its peer(s).\n+\t\t */\n+\t\tPMD_RX_LOG(DEBUG, \"port_id=%u queue_id=%u rx_id=%u\"\n+\t\t\t\" staterr=0x%x data_len=%u\", rxq->port_id,\n+\t\t\trxq->queue_id, rx_id, staterr,\n+\t\t\trte_le_to_cpu_16(rxd.wb.upper.length));\n+\n+\t\tnmb = rte_mbuf_raw_alloc(rxq->mb_pool);\n+\t\tif (nmb == NULL) {\n+\t\t\tunsigned int id;\n+\t\t\tPMD_RX_LOG(DEBUG, \"RX mbuf alloc failed port_id=%u\"\n+\t\t\t\t\" queue_id=%u\", rxq->port_id, rxq->queue_id);\n+\t\t\tid = rxq->port_id;\n+\t\t\trte_eth_devices[id].data->rx_mbuf_alloc_failed++;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tnb_hold++;\n+\t\trxe = &sw_ring[rx_id];\n+\t\trx_id++;\n+\t\tif (rx_id >= rxq->nb_rx_desc)\n+\t\t\trx_id = 0;\n+\n+\t\t/* Prefetch next mbuf while processing current one. */\n+\t\trte_igc_prefetch(sw_ring[rx_id].mbuf);\n+\n+\t\t/*\n+\t\t * When next RX descriptor is on a cache-line boundary,\n+\t\t * prefetch the next 4 RX descriptors and the next 8 pointers\n+\t\t * to mbufs.\n+\t\t */\n+\t\tif ((rx_id & 0x3) == 0) {\n+\t\t\trte_igc_prefetch(&rx_ring[rx_id]);\n+\t\t\trte_igc_prefetch(&sw_ring[rx_id]);\n+\t\t}\n+\n+\t\t/*\n+\t\t * Update RX descriptor with the physical address of the new\n+\t\t * data buffer of the new allocated mbuf.\n+\t\t */\n+\t\trxm = rxe->mbuf;\n+\t\trxe->mbuf = nmb;\n+\t\trxdp->read.hdr_addr = 0;\n+\t\trxdp->read.pkt_addr =\n+\t\t\trte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));\n+\t\trxm->next = NULL;\n+\n+\t\trxm->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\tdata_len = rte_le_to_cpu_16(rxd.wb.upper.length) - rxq->crc_len;\n+\t\trxm->data_len = data_len;\n+\t\trxm->pkt_len = data_len;\n+\t\trxm->nb_segs = 1;\n+\n+\t\trx_desc_get_pkt_info(rxq, rxm, &rxd, staterr);\n+\n+\t\t/*\n+\t\t * Store the mbuf address into the next entry of the array\n+\t\t * of returned packets.\n+\t\t */\n+\t\trx_pkts[nb_rx++] = rxm;\n+\t}\n+\trxq->rx_tail = rx_id;\n+\n+\t/*\n+\t * If the number of free RX descriptors is greater than the RX free\n+\t * threshold of the queue, advance the Receive Descriptor Tail (RDT)\n+\t * register.\n+\t * Update the RDT with the value of the last processed RX descriptor\n+\t * minus 1, to guarantee that the RDT register is never equal to the\n+\t * RDH register, which creates a \"full\" ring situtation from the\n+\t * hardware point of view...\n+\t */\n+\tnb_hold = nb_hold + rxq->nb_rx_hold;\n+\tif (nb_hold > rxq->rx_free_thresh) {\n+\t\tPMD_RX_LOG(DEBUG, \"port_id=%u queue_id=%u rx_tail=%u\"\n+\t\t\t\" nb_hold=%u nb_rx=%u\", rxq->port_id, rxq->queue_id,\n+\t\t\trx_id, nb_hold, nb_rx);\n+\t\trx_id = (rx_id == 0) ? (rxq->nb_rx_desc - 1) : (rx_id - 1);\n+\t\tIGC_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);\n+\t\tnb_hold = 0;\n+\t}\n+\trxq->nb_rx_hold = nb_hold;\n+\treturn nb_rx;\n+}\n+\n+static uint16_t\n+igc_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n+\t\t\tuint16_t nb_pkts)\n+{\n+\tstruct igc_rx_queue * const rxq = rx_queue;\n+\tvolatile union igc_adv_rx_desc * const rx_ring = rxq->rx_ring;\n+\tstruct igc_rx_entry * const sw_ring = rxq->sw_ring;\n+\tstruct rte_mbuf *first_seg = rxq->pkt_first_seg;\n+\tstruct rte_mbuf *last_seg = rxq->pkt_last_seg;\n+\n+\tuint16_t rx_id = rxq->rx_tail;\n+\tuint16_t nb_rx = 0;\n+\tuint16_t nb_hold = 0;\n+\n+\twhile (nb_rx < nb_pkts) {\n+\t\tvolatile union igc_adv_rx_desc *rxdp;\n+\t\tstruct igc_rx_entry *rxe;\n+\t\tstruct rte_mbuf *rxm;\n+\t\tstruct rte_mbuf *nmb;\n+\t\tunion igc_adv_rx_desc rxd;\n+\t\tuint32_t staterr;\n+\t\tuint16_t data_len;\n+\n+next_desc:\n+\t\t/*\n+\t\t * The order of operations here is important as the DD status\n+\t\t * bit must not be read after any other descriptor fields.\n+\t\t * rx_ring and rxdp are pointing to volatile data so the order\n+\t\t * of accesses cannot be reordered by the compiler. If they were\n+\t\t * not volatile, they could be reordered which could lead to\n+\t\t * using invalid descriptor fields when read from rxd.\n+\t\t */\n+\t\trxdp = &rx_ring[rx_id];\n+\t\tstaterr = rte_cpu_to_le_32(rxdp->wb.upper.status_error);\n+\t\tif (!(staterr & IGC_RXD_STAT_DD))\n+\t\t\tbreak;\n+\t\trxd = *rxdp;\n+\n+\t\t/*\n+\t\t * Descriptor done.\n+\t\t *\n+\t\t * Allocate a new mbuf to replenish the RX ring descriptor.\n+\t\t * If the allocation fails:\n+\t\t *    - arrange for that RX descriptor to be the first one\n+\t\t *      being parsed the next time the receive function is\n+\t\t *      invoked [on the same queue].\n+\t\t *\n+\t\t *    - Stop parsing the RX ring and return immediately.\n+\t\t *\n+\t\t * This policy does not drop the packet received in the RX\n+\t\t * descriptor for which the allocation of a new mbuf failed.\n+\t\t * Thus, it allows that packet to be later retrieved if\n+\t\t * mbuf have been freed in the mean time.\n+\t\t * As a side effect, holding RX descriptors instead of\n+\t\t * systematically giving them back to the NIC may lead to\n+\t\t * RX ring exhaustion situations.\n+\t\t * However, the NIC can gracefully prevent such situations\n+\t\t * to happen by sending specific \"back-pressure\" flow control\n+\t\t * frames to its peer(s).\n+\t\t */\n+\t\tPMD_RX_LOG(DEBUG, \"port_id=%u queue_id=%u rx_id=%u\"\n+\t\t\t\" staterr=0x%x data_len=%u\", rxq->port_id,\n+\t\t\trxq->queue_id, rx_id, staterr,\n+\t\t\trte_le_to_cpu_16(rxd.wb.upper.length));\n+\n+\t\tnmb = rte_mbuf_raw_alloc(rxq->mb_pool);\n+\t\tif (nmb == NULL) {\n+\t\t\tunsigned int id;\n+\t\t\tPMD_RX_LOG(DEBUG, \"RX mbuf alloc failed port_id=%u\"\n+\t\t\t\t\" queue_id=%u\", rxq->port_id, rxq->queue_id);\n+\t\t\tid = rxq->port_id;\n+\t\t\trte_eth_devices[id].data->rx_mbuf_alloc_failed++;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tnb_hold++;\n+\t\trxe = &sw_ring[rx_id];\n+\t\trx_id++;\n+\t\tif (rx_id >= rxq->nb_rx_desc)\n+\t\t\trx_id = 0;\n+\n+\t\t/* Prefetch next mbuf while processing current one. */\n+\t\trte_igc_prefetch(sw_ring[rx_id].mbuf);\n+\n+\t\t/*\n+\t\t * When next RX descriptor is on a cache-line boundary,\n+\t\t * prefetch the next 4 RX descriptors and the next 8 pointers\n+\t\t * to mbufs.\n+\t\t */\n+\t\tif ((rx_id & 0x3) == 0) {\n+\t\t\trte_igc_prefetch(&rx_ring[rx_id]);\n+\t\t\trte_igc_prefetch(&sw_ring[rx_id]);\n+\t\t}\n+\n+\t\t/*\n+\t\t * Update RX descriptor with the physical address of the new\n+\t\t * data buffer of the new allocated mbuf.\n+\t\t */\n+\t\trxm = rxe->mbuf;\n+\t\trxe->mbuf = nmb;\n+\t\trxdp->read.hdr_addr = 0;\n+\t\trxdp->read.pkt_addr =\n+\t\t\trte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));\n+\t\trxm->next = NULL;\n+\n+\t\t/*\n+\t\t * Set data length & data buffer address of mbuf.\n+\t\t */\n+\t\trxm->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\tdata_len = rte_le_to_cpu_16(rxd.wb.upper.length);\n+\t\trxm->data_len = data_len;\n+\n+\t\t/*\n+\t\t * If this is the first buffer of the received packet,\n+\t\t * set the pointer to the first mbuf of the packet and\n+\t\t * initialize its context.\n+\t\t * Otherwise, update the total length and the number of segments\n+\t\t * of the current scattered packet, and update the pointer to\n+\t\t * the last mbuf of the current packet.\n+\t\t */\n+\t\tif (first_seg == NULL) {\n+\t\t\tfirst_seg = rxm;\n+\t\t\tfirst_seg->pkt_len = data_len;\n+\t\t\tfirst_seg->nb_segs = 1;\n+\t\t} else {\n+\t\t\tfirst_seg->pkt_len += data_len;\n+\t\t\tfirst_seg->nb_segs++;\n+\t\t\tlast_seg->next = rxm;\n+\t\t}\n+\n+\t\t/*\n+\t\t * If this is not the last buffer of the received packet,\n+\t\t * update the pointer to the last mbuf of the current scattered\n+\t\t * packet and continue to parse the RX ring.\n+\t\t */\n+\t\tif (!(staterr & IGC_RXD_STAT_EOP)) {\n+\t\t\tlast_seg = rxm;\n+\t\t\tgoto next_desc;\n+\t\t}\n+\n+\t\t/*\n+\t\t * This is the last buffer of the received packet.\n+\t\t * If the CRC is not stripped by the hardware:\n+\t\t *   - Subtract the CRC\tlength from the total packet length.\n+\t\t *   - If the last buffer only contains the whole CRC or a part\n+\t\t *     of it, free the mbuf associated to the last buffer.\n+\t\t *     If part of the CRC is also contained in the previous\n+\t\t *     mbuf, subtract the length of that CRC part from the\n+\t\t *     data length of the previous mbuf.\n+\t\t */\n+\t\tif (unlikely(rxq->crc_len > 0)) {\n+\t\t\tfirst_seg->pkt_len -= RTE_ETHER_CRC_LEN;\n+\t\t\tif (data_len <= RTE_ETHER_CRC_LEN) {\n+\t\t\t\trte_pktmbuf_free_seg(rxm);\n+\t\t\t\tfirst_seg->nb_segs--;\n+\t\t\t\tlast_seg->data_len = last_seg->data_len -\n+\t\t\t\t\t (RTE_ETHER_CRC_LEN - data_len);\n+\t\t\t\tlast_seg->next = NULL;\n+\t\t\t} else {\n+\t\t\t\trxm->data_len = (uint16_t)\n+\t\t\t\t\t(data_len - RTE_ETHER_CRC_LEN);\n+\t\t\t}\n+\t\t}\n+\n+\t\trx_desc_get_pkt_info(rxq, first_seg, &rxd, staterr);\n+\n+\t\t/*\n+\t\t * Store the mbuf address into the next entry of the array\n+\t\t * of returned packets.\n+\t\t */\n+\t\trx_pkts[nb_rx++] = first_seg;\n+\n+\t\t/* Setup receipt context for a new packet. */\n+\t\tfirst_seg = NULL;\n+\t}\n+\trxq->rx_tail = rx_id;\n+\n+\t/*\n+\t * Save receive context.\n+\t */\n+\trxq->pkt_first_seg = first_seg;\n+\trxq->pkt_last_seg = last_seg;\n+\n+\t/*\n+\t * If the number of free RX descriptors is greater than the RX free\n+\t * threshold of the queue, advance the Receive Descriptor Tail (RDT)\n+\t * register.\n+\t * Update the RDT with the value of the last processed RX descriptor\n+\t * minus 1, to guarantee that the RDT register is never equal to the\n+\t * RDH register, which creates a \"full\" ring situtation from the\n+\t * hardware point of view...\n+\t */\n+\tnb_hold = nb_hold + rxq->nb_rx_hold;\n+\tif (nb_hold > rxq->rx_free_thresh) {\n+\t\tPMD_RX_LOG(DEBUG, \"port_id=%u queue_id=%u rx_tail=%u\"\n+\t\t\t\" nb_hold=%u nb_rx=%u\", rxq->port_id, rxq->queue_id,\n+\t\t\trx_id, nb_hold, nb_rx);\n+\t\trx_id = (rx_id == 0) ? (rxq->nb_rx_desc - 1) : (rx_id - 1);\n+\t\tIGC_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);\n+\t\tnb_hold = 0;\n+\t}\n+\trxq->nb_rx_hold = nb_hold;\n+\treturn nb_rx;\n+}\n+\n+static void\n+igc_rx_queue_release_mbufs(struct igc_rx_queue *rxq)\n+{\n+\tunsigned int i;\n+\n+\tif (rxq->sw_ring != NULL) {\n+\t\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\t\tif (rxq->sw_ring[i].mbuf != NULL) {\n+\t\t\t\trte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);\n+\t\t\t\trxq->sw_ring[i].mbuf = NULL;\n+\t\t\t}\n+\t\t}\n+\t}\n+}\n+\n+static void\n+igc_rx_queue_release(struct igc_rx_queue *rxq)\n+{\n+\tigc_rx_queue_release_mbufs(rxq);\n+\trte_free(rxq->sw_ring);\n+\trte_free(rxq);\n+}\n+\n+void eth_igc_rx_queue_release(void *rxq)\n+{\n+\tif (rxq)\n+\t\tigc_rx_queue_release(rxq);\n+}\n+\n+uint32_t eth_igc_rx_queue_count(struct rte_eth_dev *dev,\n+\t\tuint16_t rx_queue_id)\n+{\n+\t/**\n+\t * Check the DD bit of a rx descriptor of each 4 in a group,\n+\t * to avoid checking too frequently and downgrading performance\n+\t * too much.\n+\t */\n+#define IGC_RXQ_SCAN_INTERVAL 4\n+\n+\tvolatile union igc_adv_rx_desc *rxdp;\n+\tstruct igc_rx_queue *rxq;\n+\tuint16_t desc = 0;\n+\n+\trxq = dev->data->rx_queues[rx_queue_id];\n+\trxdp = &rxq->rx_ring[rxq->rx_tail];\n+\n+\twhile (desc < rxq->nb_rx_desc - rxq->rx_tail) {\n+\t\tif (unlikely(!(rxdp->wb.upper.status_error &\n+\t\t\t\tIGC_RXD_STAT_DD)))\n+\t\t\treturn desc;\n+\t\tdesc += IGC_RXQ_SCAN_INTERVAL;\n+\t\trxdp += IGC_RXQ_SCAN_INTERVAL;\n+\t}\n+\trxdp = &rxq->rx_ring[rxq->rx_tail + desc - rxq->nb_rx_desc];\n+\n+\twhile (desc < rxq->nb_rx_desc &&\n+\t\t(rxdp->wb.upper.status_error & IGC_RXD_STAT_DD)) {\n+\t\tdesc += IGC_RXQ_SCAN_INTERVAL;\n+\t\trxdp += IGC_RXQ_SCAN_INTERVAL;\n+\t}\n+\n+\treturn desc;\n+}\n+\n+int eth_igc_rx_descriptor_done(void *rx_queue, uint16_t offset)\n+{\n+\tvolatile union igc_adv_rx_desc *rxdp;\n+\tstruct igc_rx_queue *rxq = rx_queue;\n+\tuint32_t desc;\n+\n+\tif (unlikely(!rxq || offset >= rxq->nb_rx_desc))\n+\t\treturn 0;\n+\n+\tdesc = rxq->rx_tail + offset;\n+\tif (desc >= rxq->nb_rx_desc)\n+\t\tdesc -= rxq->nb_rx_desc;\n+\n+\trxdp = &rxq->rx_ring[desc];\n+\treturn !!(rxdp->wb.upper.status_error &\n+\t\t\trte_cpu_to_le_32(IGC_RXD_STAT_DD));\n+}\n+\n+int eth_igc_rx_descriptor_status(void *rx_queue, uint16_t offset)\n+{\n+\tstruct igc_rx_queue *rxq = rx_queue;\n+\tvolatile uint32_t *status;\n+\tuint32_t desc;\n+\n+\tif (unlikely(!rxq || offset >= rxq->nb_rx_desc))\n+\t\treturn -EINVAL;\n+\n+\tif (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)\n+\t\treturn RTE_ETH_RX_DESC_UNAVAIL;\n+\n+\tdesc = rxq->rx_tail + offset;\n+\tif (desc >= rxq->nb_rx_desc)\n+\t\tdesc -= rxq->nb_rx_desc;\n+\n+\tstatus = &rxq->rx_ring[desc].wb.upper.status_error;\n+\tif (*status & rte_cpu_to_le_32(IGC_RXD_STAT_DD))\n+\t\treturn RTE_ETH_RX_DESC_DONE;\n+\n+\treturn RTE_ETH_RX_DESC_AVAIL;\n+}\n+\n+static int\n+igc_alloc_rx_queue_mbufs(struct igc_rx_queue *rxq)\n+{\n+\tstruct igc_rx_entry *rxe = rxq->sw_ring;\n+\tuint64_t dma_addr;\n+\tunsigned int i;\n+\n+\t/* Initialize software ring entries. */\n+\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\tvolatile union igc_adv_rx_desc *rxd;\n+\t\tstruct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);\n+\n+\t\tif (mbuf == NULL) {\n+\t\t\tPMD_DRV_LOG(ERR, \"RX mbuf alloc failed \"\n+\t\t\t     \"queue_id=%hu\", rxq->queue_id);\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\t\tdma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));\n+\t\trxd = &rxq->rx_ring[i];\n+\t\trxd->read.hdr_addr = 0;\n+\t\trxd->read.pkt_addr = dma_addr;\n+\t\trxe[i].mbuf = mbuf;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * RSS random key supplied in section 7.1.2.9.3 of the Intel I225 datasheet.\n+ * Used as the default key.\n+ */\n+static uint8_t default_rss_key[40] = {\n+\t0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,\n+\t0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,\n+\t0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,\n+\t0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,\n+\t0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,\n+};\n+\n+static void\n+igc_rss_disable(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);\n+\tuint32_t mrqc;\n+\n+\tmrqc = IGC_READ_REG(hw, IGC_MRQC);\n+\tmrqc &= ~IGC_MRQC_ENABLE_MASK;\n+\tIGC_WRITE_REG(hw, IGC_MRQC, mrqc);\n+}\n+\n+static void\n+igc_hw_rss_hash_set(struct igc_hw *hw, struct rte_eth_rss_conf *rss_conf)\n+{\n+\tuint32_t *hash_key = (uint32_t *)rss_conf->rss_key;\n+\tuint32_t mrqc;\n+\tuint64_t rss_hf;\n+\n+\tif (hash_key != NULL) {\n+\t\tuint8_t i;\n+\n+\t\t/* Fill in RSS hash key */\n+\t\tfor (i = 0; i < IGC_HKEY_MAX_INDEX; i++)\n+\t\t\tIGC_WRITE_REG_LE_VALUE(hw, IGC_RSSRK(i), hash_key[i]);\n+\t}\n+\n+\t/* Set configured hashing protocols in MRQC register */\n+\trss_hf = rss_conf->rss_hf;\n+\tmrqc = IGC_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */\n+\tif (rss_hf & ETH_RSS_IPV4)\n+\t\tmrqc |= IGC_MRQC_RSS_FIELD_IPV4;\n+\tif (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)\n+\t\tmrqc |= IGC_MRQC_RSS_FIELD_IPV4_TCP;\n+\tif (rss_hf & ETH_RSS_IPV6)\n+\t\tmrqc |= IGC_MRQC_RSS_FIELD_IPV6;\n+\tif (rss_hf & ETH_RSS_IPV6_EX)\n+\t\tmrqc |= IGC_MRQC_RSS_FIELD_IPV6_EX;\n+\tif (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)\n+\t\tmrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP;\n+\tif (rss_hf & ETH_RSS_IPV6_TCP_EX)\n+\t\tmrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;\n+\tif (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)\n+\t\tmrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;\n+\tif (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)\n+\t\tmrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;\n+\tif (rss_hf & ETH_RSS_IPV6_UDP_EX)\n+\t\tmrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP_EX;\n+\tIGC_WRITE_REG(hw, IGC_MRQC, mrqc);\n+}\n+\n+static void\n+igc_rss_configure(struct rte_eth_dev *dev)\n+{\n+\tstruct rte_eth_rss_conf rss_conf;\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);\n+\tuint16_t i;\n+\n+\t/* Fill in redirection table. */\n+\tfor (i = 0; i < IGC_RSS_RDT_SIZD; i++) {\n+\t\tunion igc_rss_reta_reg reta;\n+\t\tuint16_t q_idx, reta_idx;\n+\n+\t\tq_idx = (uint8_t)((dev->data->nb_rx_queues > 1) ?\n+\t\t\t\t   i % dev->data->nb_rx_queues : 0);\n+\t\treta_idx = i % sizeof(reta);\n+\t\treta.bytes[reta_idx] = q_idx;\n+\t\tif (reta_idx == sizeof(reta) - 1)\n+\t\t\tIGC_WRITE_REG_LE_VALUE(hw,\n+\t\t\t\tIGC_RETA(i / sizeof(reta)), reta.dword);\n+\t}\n+\n+\t/*\n+\t * Configure the RSS key and the RSS protocols used to compute\n+\t * the RSS hash of input packets.\n+\t */\n+\trss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;\n+\tif (rss_conf.rss_key == NULL)\n+\t\trss_conf.rss_key = default_rss_key;\n+\tigc_hw_rss_hash_set(hw, &rss_conf);\n+}\n+\n+static int\n+igc_dev_mq_rx_configure(struct rte_eth_dev *dev)\n+{\n+\tif (RTE_ETH_DEV_SRIOV(dev).active) {\n+\t\tPMD_DRV_LOG(ERR, \"SRIOV unsupported!\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tswitch (dev->data->dev_conf.rxmode.mq_mode) {\n+\tcase ETH_MQ_RX_RSS:\n+\t\tigc_rss_configure(dev);\n+\t\tbreak;\n+\tcase ETH_MQ_RX_NONE:\n+\t\t/*\n+\t\t * configure RSS register for following,\n+\t\t * then disable the RSS logic\n+\t\t */\n+\t\tigc_rss_configure(dev);\n+\t\tigc_rss_disable(dev);\n+\t\tbreak;\n+\tdefault:\n+\t\tPMD_DRV_LOG(ERR, \"rx mode(%d) not supported!\",\n+\t\t\tdev->data->dev_conf.rxmode.mq_mode);\n+\t\treturn -EINVAL;\n+\t}\n+\treturn 0;\n+}\n+\n+int\n+igc_rx_init(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_rx_queue *rxq;\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);\n+\tuint64_t offloads = dev->data->dev_conf.rxmode.offloads;\n+\tuint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;\n+\tuint32_t rctl;\n+\tuint32_t rxcsum;\n+\tuint16_t buf_size;\n+\tuint16_t rctl_bsize;\n+\tuint16_t i;\n+\tint ret;\n+\n+\tdev->rx_pkt_burst = igc_recv_pkts;\n+\n+\t/*\n+\t * Make sure receives are disabled while setting\n+\t * up the descriptor ring.\n+\t */\n+\trctl = IGC_READ_REG(hw, IGC_RCTL);\n+\tIGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN);\n+\n+\t/* Configure support of jumbo frames, if any. */\n+\tif (offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {\n+\t\trctl |= IGC_RCTL_LPE;\n+\n+\t\t/*\n+\t\t * Set maximum packet length by default, and might be updated\n+\t\t * together with enabling/disabling dual VLAN.\n+\t\t */\n+\t\tIGC_WRITE_REG(hw, IGC_RLPML, max_rx_pkt_len);\n+\t} else {\n+\t\trctl &= ~IGC_RCTL_LPE;\n+\t}\n+\n+\t/* Configure and enable each RX queue. */\n+\trctl_bsize = 0;\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\tuint64_t bus_addr;\n+\t\tuint32_t rxdctl;\n+\t\tuint32_t srrctl;\n+\n+\t\trxq = dev->data->rx_queues[i];\n+\t\trxq->flags = 0;\n+\n+\t\t/* Allocate buffers for descriptor rings and set up queue */\n+\t\tret = igc_alloc_rx_queue_mbufs(rxq);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\n+\t\t/*\n+\t\t * Reset crc_len in case it was changed after queue setup by a\n+\t\t * call to configure\n+\t\t */\n+\t\trxq->crc_len = (offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?\n+\t\t\t\tRTE_ETHER_CRC_LEN : 0;\n+\n+\t\tbus_addr = rxq->rx_ring_phys_addr;\n+\t\tIGC_WRITE_REG(hw, IGC_RDLEN(rxq->reg_idx),\n+\t\t\t\trxq->nb_rx_desc *\n+\t\t\t\tsizeof(union igc_adv_rx_desc));\n+\t\tIGC_WRITE_REG(hw, IGC_RDBAH(rxq->reg_idx),\n+\t\t\t\t(uint32_t)(bus_addr >> 32));\n+\t\tIGC_WRITE_REG(hw, IGC_RDBAL(rxq->reg_idx),\n+\t\t\t\t(uint32_t)bus_addr);\n+\n+\t\t/* set descriptor configuration */\n+\t\tsrrctl = IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;\n+\n+\t\tsrrctl |= (uint32_t)(RTE_PKTMBUF_HEADROOM / 64) <<\n+\t\t\t\tIGC_SRRCTL_BSIZEHEADER_SHIFT;\n+\t\t/*\n+\t\t * Configure RX buffer size.\n+\t\t */\n+\t\tbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -\n+\t\t\tRTE_PKTMBUF_HEADROOM);\n+\t\tif (buf_size >= 1024) {\n+\t\t\t/*\n+\t\t\t * Configure the BSIZEPACKET field of the SRRCTL\n+\t\t\t * register of the queue.\n+\t\t\t * Value is in 1 KB resolution, from 1 KB to 16 KB.\n+\t\t\t * If this field is equal to 0b, then RCTL.BSIZE\n+\t\t\t * determines the RX packet buffer size.\n+\t\t\t */\n+\n+\t\t\tsrrctl |= ((buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT) &\n+\t\t\t\t   IGC_SRRCTL_BSIZEPKT_MASK);\n+\t\t\tbuf_size = (uint16_t)((srrctl &\n+\t\t\t\t\tIGC_SRRCTL_BSIZEPKT_MASK) <<\n+\t\t\t\t\tIGC_SRRCTL_BSIZEPKT_SHIFT);\n+\n+\t\t\t/* It adds dual VLAN length for supporting dual VLAN */\n+\t\t\tif (max_rx_pkt_len + 2 * VLAN_TAG_SIZE > buf_size)\n+\t\t\t\tdev->data->scattered_rx = 1;\n+\t\t} else {\n+\t\t\t/*\n+\t\t\t * Use BSIZE field of the device RCTL register.\n+\t\t\t */\n+\t\t\tif (rctl_bsize == 0 || rctl_bsize > buf_size)\n+\t\t\t\trctl_bsize = buf_size;\n+\t\t\tdev->data->scattered_rx = 1;\n+\t\t}\n+\n+\t\t/* Set if packets are dropped when no descriptors available */\n+\t\tif (rxq->drop_en)\n+\t\t\tsrrctl |= IGC_SRRCTL_DROP_EN;\n+\n+\t\tIGC_WRITE_REG(hw, IGC_SRRCTL(rxq->reg_idx), srrctl);\n+\n+\t\t/* Enable this RX queue. */\n+\t\trxdctl = IGC_RXDCTL_QUEUE_ENABLE;\n+\t\trxdctl |= ((uint32_t)rxq->pthresh << IGC_RXDCTL_PTHRESH_SHIFT) &\n+\t\t\t\tIGC_RXDCTL_PTHRESH_MSK;\n+\t\trxdctl |= ((uint32_t)rxq->hthresh << IGC_RXDCTL_HTHRESH_SHIFT) &\n+\t\t\t\tIGC_RXDCTL_HTHRESH_MSK;\n+\t\trxdctl |= ((uint32_t)rxq->wthresh << IGC_RXDCTL_WTHRESH_SHIFT) &\n+\t\t\t\tIGC_RXDCTL_WTHRESH_MSK;\n+\t\tIGC_WRITE_REG(hw, IGC_RXDCTL(rxq->reg_idx), rxdctl);\n+\t}\n+\n+\tif (offloads & DEV_RX_OFFLOAD_SCATTER)\n+\t\tdev->data->scattered_rx = 1;\n+\n+\tif (dev->data->scattered_rx) {\n+\t\tPMD_DRV_LOG(DEBUG, \"forcing scatter mode\");\n+\t\tdev->rx_pkt_burst = igc_recv_scattered_pkts;\n+\t}\n+\t/*\n+\t * Setup BSIZE field of RCTL register, if needed.\n+\t * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL\n+\t * register, since the code above configures the SRRCTL register of\n+\t * the RX queue in such a case.\n+\t * All configurable sizes are:\n+\t * 16384: rctl |= (IGC_RCTL_SZ_16384 | IGC_RCTL_BSEX);\n+\t *  8192: rctl |= (IGC_RCTL_SZ_8192  | IGC_RCTL_BSEX);\n+\t *  4096: rctl |= (IGC_RCTL_SZ_4096  | IGC_RCTL_BSEX);\n+\t *  2048: rctl |= IGC_RCTL_SZ_2048;\n+\t *  1024: rctl |= IGC_RCTL_SZ_1024;\n+\t *   512: rctl |= IGC_RCTL_SZ_512;\n+\t *   256: rctl |= IGC_RCTL_SZ_256;\n+\t */\n+\tif (rctl_bsize > 0) {\n+\t\tif (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */\n+\t\t\trctl |= IGC_RCTL_SZ_512;\n+\t\telse /* 256 <= buf_size < 512 - use 256 */\n+\t\t\trctl |= IGC_RCTL_SZ_256;\n+\t}\n+\n+\t/*\n+\t * Configure RSS if device configured with multiple RX queues.\n+\t */\n+\tigc_dev_mq_rx_configure(dev);\n+\n+\t/* Update the rctl since igc_dev_mq_rx_configure may change its value */\n+\trctl |= IGC_READ_REG(hw, IGC_RCTL);\n+\n+\t/*\n+\t * Setup the Checksum Register.\n+\t * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.\n+\t */\n+\trxcsum = IGC_READ_REG(hw, IGC_RXCSUM);\n+\trxcsum |= IGC_RXCSUM_PCSD;\n+\n+\t/* Enable both L3/L4 rx checksum offload */\n+\tif (offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)\n+\t\trxcsum |= IGC_RXCSUM_IPOFL;\n+\telse\n+\t\trxcsum &= ~IGC_RXCSUM_IPOFL;\n+\n+\tif (offloads &\n+\t\t(DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {\n+\t\trxcsum |= IGC_RXCSUM_TUOFL;\n+\t\toffloads |= DEV_RX_OFFLOAD_SCTP_CKSUM;\n+\t} else {\n+\t\trxcsum &= ~IGC_RXCSUM_TUOFL;\n+\t}\n+\n+\tif (offloads & DEV_RX_OFFLOAD_SCTP_CKSUM)\n+\t\trxcsum |= IGC_RXCSUM_CRCOFL;\n+\telse\n+\t\trxcsum &= ~IGC_RXCSUM_CRCOFL;\n+\n+\tIGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum);\n+\n+\t/* Setup the Receive Control Register. */\n+\tif (offloads & DEV_RX_OFFLOAD_KEEP_CRC)\n+\t\trctl &= ~IGC_RCTL_SECRC; /* Do not Strip Ethernet CRC. */\n+\telse\n+\t\trctl |= IGC_RCTL_SECRC; /* Strip Ethernet CRC. */\n+\n+\trctl &= ~IGC_RCTL_MO_MSK;\n+\trctl &= ~IGC_RCTL_LBM_MSK;\n+\trctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_LBM_NO |\n+\t\t\tIGC_RCTL_DPF |\n+\t\t\t(hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);\n+\n+\trctl &= ~(IGC_RCTL_HSEL_MSK | IGC_RCTL_CFIEN | IGC_RCTL_CFI |\n+\t\t\tIGC_RCTL_PSP | IGC_RCTL_PMCF);\n+\n+\t/* Make sure VLAN Filters are off. */\n+\trctl &= ~IGC_RCTL_VFE;\n+\t/* Don't store bad packets. */\n+\trctl &= ~IGC_RCTL_SBP;\n+\n+\t/* Enable Receives. */\n+\tIGC_WRITE_REG(hw, IGC_RCTL, rctl);\n+\n+\t/*\n+\t * Setup the HW Rx Head and Tail Descriptor Pointers.\n+\t * This needs to be done after enable.\n+\t */\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\trxq = dev->data->rx_queues[i];\n+\t\tIGC_WRITE_REG(hw, IGC_RDH(rxq->reg_idx), 0);\n+\t\tIGC_WRITE_REG(hw, IGC_RDT(rxq->reg_idx),\n+\t\t\t\trxq->nb_rx_desc - 1);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void\n+igc_reset_rx_queue(struct igc_rx_queue *rxq)\n+{\n+\tstatic const union igc_adv_rx_desc zeroed_desc = { {0} };\n+\tunsigned int i;\n+\n+\t/* Zero out HW ring memory */\n+\tfor (i = 0; i < rxq->nb_rx_desc; i++)\n+\t\trxq->rx_ring[i] = zeroed_desc;\n+\n+\trxq->rx_tail = 0;\n+\trxq->pkt_first_seg = NULL;\n+\trxq->pkt_last_seg = NULL;\n+}\n+\n+int\n+eth_igc_rx_queue_setup(struct rte_eth_dev *dev,\n+\t\t\t uint16_t queue_idx,\n+\t\t\t uint16_t nb_desc,\n+\t\t\t unsigned int socket_id,\n+\t\t\t const struct rte_eth_rxconf *rx_conf,\n+\t\t\t struct rte_mempool *mp)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);\n+\tconst struct rte_memzone *rz;\n+\tstruct igc_rx_queue *rxq;\n+\tunsigned int size;\n+\n+\t/*\n+\t * Validate number of receive descriptors.\n+\t * It must not exceed hardware maximum, and must be multiple\n+\t * of IGC_RX_DESCRIPTOR_MULTIPLE.\n+\t */\n+\tif (nb_desc % IGC_RX_DESCRIPTOR_MULTIPLE != 0 ||\n+\t\tnb_desc > IGC_MAX_RXD || nb_desc < IGC_MIN_RXD) {\n+\t\tPMD_DRV_LOG(ERR, \"RX descriptor must be multiple of\"\n+\t\t\t\" %u(cur: %u) and between %u and %u!\",\n+\t\t\tIGC_RX_DESCRIPTOR_MULTIPLE, nb_desc,\n+\t\t\tIGC_MIN_RXD, IGC_MAX_RXD);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Free memory prior to re-allocation if needed */\n+\tif (dev->data->rx_queues[queue_idx] != NULL) {\n+\t\tigc_rx_queue_release(dev->data->rx_queues[queue_idx]);\n+\t\tdev->data->rx_queues[queue_idx] = NULL;\n+\t}\n+\n+\t/* First allocate the RX queue data structure. */\n+\trxq = rte_zmalloc(\"ethdev RX queue\", sizeof(struct igc_rx_queue),\n+\t\t\t  RTE_CACHE_LINE_SIZE);\n+\tif (rxq == NULL)\n+\t\treturn -ENOMEM;\n+\trxq->offloads = rx_conf->offloads;\n+\trxq->mb_pool = mp;\n+\trxq->nb_rx_desc = nb_desc;\n+\trxq->pthresh = rx_conf->rx_thresh.pthresh;\n+\trxq->hthresh = rx_conf->rx_thresh.hthresh;\n+\trxq->wthresh = rx_conf->rx_thresh.wthresh;\n+\trxq->drop_en = rx_conf->rx_drop_en;\n+\trxq->rx_free_thresh = rx_conf->rx_free_thresh;\n+\trxq->queue_id = queue_idx;\n+\trxq->reg_idx = queue_idx;\n+\trxq->port_id = dev->data->port_id;\n+\n+\t/*\n+\t *  Allocate RX ring hardware descriptors. A memzone large enough to\n+\t *  handle the maximum ring size is allocated in order to allow for\n+\t *  resizing in later calls to the queue setup function.\n+\t */\n+\tsize = sizeof(union igc_adv_rx_desc) * IGC_MAX_RXD;\n+\trz = rte_eth_dma_zone_reserve(dev, \"rx_ring\", queue_idx, size,\n+\t\t\t\t      IGC_ALIGN, socket_id);\n+\tif (rz == NULL) {\n+\t\tigc_rx_queue_release(rxq);\n+\t\treturn -ENOMEM;\n+\t}\n+\trxq->rdt_reg_addr = IGC_PCI_REG_ADDR(hw, IGC_RDT(rxq->reg_idx));\n+\trxq->rdh_reg_addr = IGC_PCI_REG_ADDR(hw, IGC_RDH(rxq->reg_idx));\n+\trxq->rx_ring_phys_addr = rz->iova;\n+\trxq->rx_ring = (union igc_adv_rx_desc *)rz->addr;\n+\n+\t/* Allocate software ring. */\n+\trxq->sw_ring = rte_zmalloc(\"rxq->sw_ring\",\n+\t\t\t\t   sizeof(struct igc_rx_entry) * nb_desc,\n+\t\t\t\t   RTE_CACHE_LINE_SIZE);\n+\tif (rxq->sw_ring == NULL) {\n+\t\tigc_rx_queue_release(rxq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tPMD_DRV_LOG(DEBUG, \"sw_ring=%p hw_ring=%p dma_addr=0x%\" PRIx64,\n+\t\trxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);\n+\n+\tdev->data->rx_queues[queue_idx] = rxq;\n+\tigc_reset_rx_queue(rxq);\n+\n+\treturn 0;\n+}\n+\n+/* prepare packets for transmit */\n+static uint16_t\n+eth_igc_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,\n+\t\tuint16_t nb_pkts)\n+{\n+\tint i, ret;\n+\tstruct rte_mbuf *m;\n+\n+\tfor (i = 0; i < nb_pkts; i++) {\n+\t\tm = tx_pkts[i];\n+\n+\t\t/* Check some limitations for TSO in hardware */\n+\t\tif (m->ol_flags & IGC_TX_OFFLOAD_SEG)\n+\t\t\tif (m->tso_segsz > IGC_TSO_MAX_MSS ||\n+\t\t\t\tm->l2_len + m->l3_len + m->l4_len >\n+\t\t\t\tIGC_TSO_MAX_HDRLEN) {\n+\t\t\t\trte_errno = EINVAL;\n+\t\t\t\treturn i;\n+\t\t\t}\n+\n+\t\tif (m->ol_flags & IGC_TX_OFFLOAD_NOTSUP_MASK) {\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\treturn i;\n+\t\t}\n+\n+#ifdef RTE_LIBRTE_ETHDEV_DEBUG\n+\t\tret = rte_validate_tx_offload(m);\n+\t\tif (ret != 0) {\n+\t\t\trte_errno = -ret;\n+\t\t\treturn i;\n+\t\t}\n+#endif\n+\t\tret = rte_net_intel_cksum_prepare(m);\n+\t\tif (ret != 0) {\n+\t\t\trte_errno = -ret;\n+\t\t\treturn i;\n+\t\t}\n+\t}\n+\n+\treturn i;\n+}\n+\n+/*\n+ *There're some limitations in hardware for TCP segmentation offload. We\n+ *should check whether the parameters are valid.\n+ */\n+static inline uint64_t\n+check_tso_para(uint64_t ol_req, union igc_tx_offload ol_para)\n+{\n+\tif (!(ol_req & IGC_TX_OFFLOAD_SEG))\n+\t\treturn ol_req;\n+\tif (ol_para.tso_segsz > IGC_TSO_MAX_MSS || ol_para.l2_len +\n+\t\tol_para.l3_len + ol_para.l4_len > IGC_TSO_MAX_HDRLEN) {\n+\t\tol_req &= ~IGC_TX_OFFLOAD_SEG;\n+\t\tol_req |= PKT_TX_TCP_CKSUM;\n+\t}\n+\treturn ol_req;\n+}\n+\n+/*\n+ * Check which hardware context can be used. Use the existing match\n+ * or create a new context descriptor.\n+ */\n+static inline uint32_t\n+what_advctx_update(struct igc_tx_queue *txq, uint64_t flags,\n+\t\tunion igc_tx_offload tx_offload)\n+{\n+\tuint32_t curr = txq->ctx_curr;\n+\n+\t/* If match with the current context */\n+\tif (likely(txq->ctx_cache[curr].flags == flags &&\n+\t\ttxq->ctx_cache[curr].tx_offload.data ==\n+\t\t(txq->ctx_cache[curr].tx_offload_mask.data &\n+\t\ttx_offload.data))) {\n+\t\treturn curr;\n+\t}\n+\n+\t/* Total two context, if match with the second context */\n+\tcurr ^= 1;\n+\tif (likely(txq->ctx_cache[curr].flags == flags &&\n+\t\ttxq->ctx_cache[curr].tx_offload.data ==\n+\t\t(txq->ctx_cache[curr].tx_offload_mask.data &\n+\t\ttx_offload.data))) {\n+\t\ttxq->ctx_curr = curr;\n+\t\treturn curr;\n+\t}\n+\n+\t/* Mismatch, create new one */\n+\treturn IGC_CTX_NUM;\n+}\n+\n+/*\n+ * This is a separate function, looking for optimization opportunity here\n+ * Rework required to go with the pre-defined values.\n+ */\n+static inline void\n+igc_set_xmit_ctx(struct igc_tx_queue *txq,\n+\t\tvolatile struct igc_adv_tx_context_desc *ctx_txd,\n+\t\tuint64_t ol_flags, union igc_tx_offload tx_offload)\n+{\n+\tuint32_t type_tucmd_mlhl;\n+\tuint32_t mss_l4len_idx;\n+\tuint32_t ctx_curr;\n+\tuint32_t vlan_macip_lens;\n+\tunion igc_tx_offload tx_offload_mask;\n+\n+\t/* Use the previous context */\n+\ttxq->ctx_curr ^= 1;\n+\tctx_curr = txq->ctx_curr;\n+\n+\ttx_offload_mask.data = 0;\n+\ttype_tucmd_mlhl = 0;\n+\n+\t/* Specify which HW CTX to upload. */\n+\tmss_l4len_idx = (ctx_curr << IGC_ADVTXD_IDX_SHIFT);\n+\n+\tif (ol_flags & PKT_TX_VLAN_PKT)\n+\t\ttx_offload_mask.vlan_tci = 0xffff;\n+\n+\t/* check if TCP segmentation required for this packet */\n+\tif (ol_flags & IGC_TX_OFFLOAD_SEG) {\n+\t\t/* implies IP cksum in IPv4 */\n+\t\tif (ol_flags & PKT_TX_IP_CKSUM)\n+\t\t\ttype_tucmd_mlhl = IGC_ADVTXD_TUCMD_IPV4 |\n+\t\t\t\tIGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;\n+\t\telse\n+\t\t\ttype_tucmd_mlhl = IGC_ADVTXD_TUCMD_IPV6 |\n+\t\t\t\tIGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;\n+\n+\t\tif (ol_flags & PKT_TX_TCP_SEG)\n+\t\t\ttype_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP;\n+\t\telse\n+\t\t\ttype_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_UDP;\n+\n+\t\ttx_offload_mask.data |= TX_TSO_CMP_MASK;\n+\t\tmss_l4len_idx |= (uint32_t)tx_offload.tso_segsz <<\n+\t\t\t\tIGC_ADVTXD_MSS_SHIFT;\n+\t\tmss_l4len_idx |= (uint32_t)tx_offload.l4_len <<\n+\t\t\t\tIGC_ADVTXD_L4LEN_SHIFT;\n+\t} else { /* no TSO, check if hardware checksum is needed */\n+\t\tif (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))\n+\t\t\ttx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;\n+\n+\t\tif (ol_flags & PKT_TX_IP_CKSUM)\n+\t\t\ttype_tucmd_mlhl = IGC_ADVTXD_TUCMD_IPV4;\n+\n+\t\tswitch (ol_flags & PKT_TX_L4_MASK) {\n+\t\tcase PKT_TX_TCP_CKSUM:\n+\t\t\ttype_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP |\n+\t\t\t\tIGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;\n+\t\t\tmss_l4len_idx |= (uint32_t)sizeof(struct rte_tcp_hdr)\n+\t\t\t\t<< IGC_ADVTXD_L4LEN_SHIFT;\n+\t\t\tbreak;\n+\t\tcase PKT_TX_UDP_CKSUM:\n+\t\t\ttype_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_UDP |\n+\t\t\t\tIGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;\n+\t\t\tmss_l4len_idx |= (uint32_t)sizeof(struct rte_udp_hdr)\n+\t\t\t\t<< IGC_ADVTXD_L4LEN_SHIFT;\n+\t\t\tbreak;\n+\t\tcase PKT_TX_SCTP_CKSUM:\n+\t\t\ttype_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_SCTP |\n+\t\t\t\tIGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;\n+\t\t\tmss_l4len_idx |= (uint32_t)sizeof(struct rte_sctp_hdr)\n+\t\t\t\t<< IGC_ADVTXD_L4LEN_SHIFT;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\ttype_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_RSV |\n+\t\t\t\tIGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\ttxq->ctx_cache[ctx_curr].flags = ol_flags;\n+\ttxq->ctx_cache[ctx_curr].tx_offload.data =\n+\t\ttx_offload_mask.data & tx_offload.data;\n+\ttxq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask;\n+\n+\tctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);\n+\tvlan_macip_lens = (uint32_t)tx_offload.data;\n+\tctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);\n+\tctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);\n+\tctx_txd->u.launch_time = 0;\n+}\n+\n+static inline uint32_t\n+tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)\n+{\n+\tuint32_t cmdtype;\n+\tstatic uint32_t vlan_cmd[2] = {0, IGC_ADVTXD_DCMD_VLE};\n+\tstatic uint32_t tso_cmd[2] = {0, IGC_ADVTXD_DCMD_TSE};\n+\tcmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];\n+\tcmdtype |= tso_cmd[(ol_flags & IGC_TX_OFFLOAD_SEG) != 0];\n+\treturn cmdtype;\n+}\n+\n+static inline uint32_t\n+tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)\n+{\n+\tstatic const uint32_t l4_olinfo[2] = {0, IGC_ADVTXD_POPTS_TXSM};\n+\tstatic const uint32_t l3_olinfo[2] = {0, IGC_ADVTXD_POPTS_IXSM};\n+\tuint32_t tmp;\n+\n+\ttmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];\n+\ttmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];\n+\ttmp |= l4_olinfo[(ol_flags & IGC_TX_OFFLOAD_SEG) != 0];\n+\treturn tmp;\n+}\n+\n+static uint16_t\n+igc_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n+{\n+\tstruct igc_tx_queue * const txq = tx_queue;\n+\tstruct igc_tx_entry * const sw_ring = txq->sw_ring;\n+\tstruct igc_tx_entry *txe, *txn;\n+\tvolatile union igc_adv_tx_desc * const txr = txq->tx_ring;\n+\tvolatile union igc_adv_tx_desc *txd;\n+\tstruct rte_mbuf *tx_pkt;\n+\tstruct rte_mbuf *m_seg;\n+\tuint64_t buf_dma_addr;\n+\tuint32_t olinfo_status;\n+\tuint32_t cmd_type_len;\n+\tuint32_t pkt_len;\n+\tuint16_t slen;\n+\tuint64_t ol_flags;\n+\tuint16_t tx_end;\n+\tuint16_t tx_id;\n+\tuint16_t tx_last;\n+\tuint16_t nb_tx;\n+\tuint64_t tx_ol_req;\n+\tuint32_t new_ctx = 0;\n+\tunion igc_tx_offload tx_offload = {0};\n+\n+\ttx_id = txq->tx_tail;\n+\ttxe = &sw_ring[tx_id];\n+\n+\tfor (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {\n+\t\ttx_pkt = *tx_pkts++;\n+\t\tpkt_len = tx_pkt->pkt_len;\n+\n+\t\tRTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);\n+\n+\t\t/*\n+\t\t * The number of descriptors that must be allocated for a\n+\t\t * packet is the number of segments of that packet, plus 1\n+\t\t * Context Descriptor for the VLAN Tag Identifier, if any.\n+\t\t * Determine the last TX descriptor to allocate in the TX ring\n+\t\t * for the packet, starting from the current position (tx_id)\n+\t\t * in the ring.\n+\t\t */\n+\t\ttx_last = (uint16_t)(tx_id + tx_pkt->nb_segs - 1);\n+\n+\t\tol_flags = tx_pkt->ol_flags;\n+\t\ttx_ol_req = ol_flags & IGC_TX_OFFLOAD_MASK;\n+\n+\t\t/* If a Context Descriptor need be built . */\n+\t\tif (tx_ol_req) {\n+\t\t\ttx_offload.l2_len = tx_pkt->l2_len;\n+\t\t\ttx_offload.l3_len = tx_pkt->l3_len;\n+\t\t\ttx_offload.l4_len = tx_pkt->l4_len;\n+\t\t\ttx_offload.vlan_tci = tx_pkt->vlan_tci;\n+\t\t\ttx_offload.tso_segsz = tx_pkt->tso_segsz;\n+\t\t\ttx_ol_req = check_tso_para(tx_ol_req, tx_offload);\n+\n+\t\t\tnew_ctx = what_advctx_update(txq, tx_ol_req,\n+\t\t\t\t\ttx_offload);\n+\t\t\t/* Only allocate context descriptor if required*/\n+\t\t\tnew_ctx = (new_ctx >= IGC_CTX_NUM);\n+\t\t\ttx_last = (uint16_t)(tx_last + new_ctx);\n+\t\t}\n+\t\tif (tx_last >= txq->nb_tx_desc)\n+\t\t\ttx_last = (uint16_t)(tx_last - txq->nb_tx_desc);\n+\n+\t\tPMD_TX_LOG(DEBUG, \"port_id=%u queue_id=%u pktlen=%u\"\n+\t\t\t\" tx_first=%u tx_last=%u\", txq->port_id, txq->queue_id,\n+\t\t\tpkt_len, tx_id, tx_last);\n+\n+\t\t/*\n+\t\t * Check if there are enough free descriptors in the TX ring\n+\t\t * to transmit the next packet.\n+\t\t * This operation is based on the two following rules:\n+\t\t *\n+\t\t *   1- Only check that the last needed TX descriptor can be\n+\t\t *      allocated (by construction, if that descriptor is free,\n+\t\t *      all intermediate ones are also free).\n+\t\t *\n+\t\t *      For this purpose, the index of the last TX descriptor\n+\t\t *      used for a packet (the \"last descriptor\" of a packet)\n+\t\t *      is recorded in the TX entries (the last one included)\n+\t\t *      that are associated with all TX descriptors allocated\n+\t\t *      for that packet.\n+\t\t *\n+\t\t *   2- Avoid to allocate the last free TX descriptor of the\n+\t\t *      ring, in order to never set the TDT register with the\n+\t\t *      same value stored in parallel by the NIC in the TDH\n+\t\t *      register, which makes the TX engine of the NIC enter\n+\t\t *      in a deadlock situation.\n+\t\t *\n+\t\t *      By extension, avoid to allocate a free descriptor that\n+\t\t *      belongs to the last set of free descriptors allocated\n+\t\t *      to the same packet previously transmitted.\n+\t\t */\n+\n+\t\t/*\n+\t\t * The \"last descriptor\" of the previously sent packet, if any,\n+\t\t * which used the last descriptor to allocate.\n+\t\t */\n+\t\ttx_end = sw_ring[tx_last].last_id;\n+\n+\t\t/*\n+\t\t * The next descriptor following that \"last descriptor\" in the\n+\t\t * ring.\n+\t\t */\n+\t\ttx_end = sw_ring[tx_end].next_id;\n+\n+\t\t/*\n+\t\t * The \"last descriptor\" associated with that next descriptor.\n+\t\t */\n+\t\ttx_end = sw_ring[tx_end].last_id;\n+\n+\t\t/*\n+\t\t * Check that this descriptor is free.\n+\t\t */\n+\t\tif (!(txr[tx_end].wb.status & IGC_TXD_STAT_DD)) {\n+\t\t\tif (nb_tx == 0)\n+\t\t\t\treturn 0;\n+\t\t\tgoto end_of_tx;\n+\t\t}\n+\n+\t\t/*\n+\t\t * Set common flags of all TX Data Descriptors.\n+\t\t *\n+\t\t * The following bits must be set in all Data Descriptors:\n+\t\t *   - IGC_ADVTXD_DTYP_DATA\n+\t\t *   - IGC_ADVTXD_DCMD_DEXT\n+\t\t *\n+\t\t * The following bits must be set in the first Data Descriptor\n+\t\t * and are ignored in the other ones:\n+\t\t *   - IGC_ADVTXD_DCMD_IFCS\n+\t\t *   - IGC_ADVTXD_MAC_1588\n+\t\t *   - IGC_ADVTXD_DCMD_VLE\n+\t\t *\n+\t\t * The following bits must only be set in the last Data\n+\t\t * Descriptor:\n+\t\t *   - IGC_TXD_CMD_EOP\n+\t\t *\n+\t\t * The following bits can be set in any Data Descriptor, but\n+\t\t * are only set in the last Data Descriptor:\n+\t\t *   - IGC_TXD_CMD_RS\n+\t\t */\n+\t\tcmd_type_len = txq->txd_type |\n+\t\t\tIGC_ADVTXD_DCMD_IFCS | IGC_ADVTXD_DCMD_DEXT;\n+\t\tif (tx_ol_req & IGC_TX_OFFLOAD_SEG)\n+\t\t\tpkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len +\n+\t\t\t\t\ttx_pkt->l4_len);\n+\t\tolinfo_status = (pkt_len << IGC_ADVTXD_PAYLEN_SHIFT);\n+\n+\t\t/*\n+\t\t * Timer 0 should be used to for packet timestamping,\n+\t\t * sample the packet timestamp to reg 0\n+\t\t */\n+\t\tif (ol_flags & PKT_TX_IEEE1588_TMST)\n+\t\t\tcmd_type_len |= IGC_ADVTXD_MAC_TSTAMP;\n+\n+\t\tif (tx_ol_req) {\n+\t\t\t/* Setup TX Advanced context descriptor if required */\n+\t\t\tif (new_ctx) {\n+\t\t\t\tvolatile struct igc_adv_tx_context_desc *\n+\t\t\t\t\tctx_txd = (volatile struct\n+\t\t\t\t\tigc_adv_tx_context_desc *)&txr[tx_id];\n+\n+\t\t\t\ttxn = &sw_ring[txe->next_id];\n+\t\t\t\tRTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);\n+\n+\t\t\t\tif (txe->mbuf != NULL) {\n+\t\t\t\t\trte_pktmbuf_free_seg(txe->mbuf);\n+\t\t\t\t\ttxe->mbuf = NULL;\n+\t\t\t\t}\n+\n+\t\t\t\tigc_set_xmit_ctx(txq, ctx_txd, tx_ol_req,\n+\t\t\t\t\t\ttx_offload);\n+\n+\t\t\t\ttxe->last_id = tx_last;\n+\t\t\t\ttx_id = txe->next_id;\n+\t\t\t\ttxe = txn;\n+\t\t\t}\n+\n+\t\t\t/* Setup the TX Advanced Data Descriptor */\n+\t\t\tcmd_type_len |=\n+\t\t\t\ttx_desc_vlan_flags_to_cmdtype(tx_ol_req);\n+\t\t\tolinfo_status |=\n+\t\t\t\ttx_desc_cksum_flags_to_olinfo(tx_ol_req);\n+\t\t\tolinfo_status |= (uint32_t)txq->ctx_curr <<\n+\t\t\t\t\tIGC_ADVTXD_IDX_SHIFT;\n+\t\t}\n+\n+\t\tm_seg = tx_pkt;\n+\t\tdo {\n+\t\t\ttxn = &sw_ring[txe->next_id];\n+\t\t\tRTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);\n+\n+\t\t\ttxd = &txr[tx_id];\n+\n+\t\t\tif (txe->mbuf != NULL)\n+\t\t\t\trte_pktmbuf_free_seg(txe->mbuf);\n+\t\t\ttxe->mbuf = m_seg;\n+\n+\t\t\t/* Set up transmit descriptor */\n+\t\t\tslen = (uint16_t)m_seg->data_len;\n+\t\t\tbuf_dma_addr = rte_mbuf_data_iova(m_seg);\n+\t\t\ttxd->read.buffer_addr =\n+\t\t\t\trte_cpu_to_le_64(buf_dma_addr);\n+\t\t\ttxd->read.cmd_type_len =\n+\t\t\t\trte_cpu_to_le_32(cmd_type_len | slen);\n+\t\t\ttxd->read.olinfo_status =\n+\t\t\t\trte_cpu_to_le_32(olinfo_status);\n+\t\t\ttxe->last_id = tx_last;\n+\t\t\ttx_id = txe->next_id;\n+\t\t\ttxe = txn;\n+\t\t\tm_seg = m_seg->next;\n+\t\t} while (m_seg != NULL);\n+\n+\t\t/*\n+\t\t * The last packet data descriptor needs End Of Packet (EOP)\n+\t\t * and Report Status (RS).\n+\t\t */\n+\t\ttxd->read.cmd_type_len |=\n+\t\t\trte_cpu_to_le_32(IGC_TXD_CMD_EOP | IGC_TXD_CMD_RS);\n+\t}\n+end_of_tx:\n+\trte_wmb();\n+\n+\t/*\n+\t * Set the Transmit Descriptor Tail (TDT).\n+\t */\n+\tIGC_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);\n+\tPMD_TX_LOG(DEBUG, \"port_id=%u queue_id=%u tx_tail=%u nb_tx=%u\",\n+\t\ttxq->port_id, txq->queue_id, tx_id, nb_tx);\n+\ttxq->tx_tail = tx_id;\n+\n+\treturn nb_tx;\n+}\n+\n+int eth_igc_tx_descriptor_status(void *tx_queue, uint16_t offset)\n+{\n+\tstruct igc_tx_queue *txq = tx_queue;\n+\tvolatile uint32_t *status;\n+\tuint32_t desc;\n+\n+\tif (unlikely(!txq || offset >= txq->nb_tx_desc))\n+\t\treturn -EINVAL;\n+\n+\tdesc = txq->tx_tail + offset;\n+\tif (desc >= txq->nb_tx_desc)\n+\t\tdesc -= txq->nb_tx_desc;\n+\n+\tstatus = &txq->tx_ring[desc].wb.status;\n+\tif (*status & rte_cpu_to_le_32(IGC_TXD_STAT_DD))\n+\t\treturn RTE_ETH_TX_DESC_DONE;\n+\n+\treturn RTE_ETH_TX_DESC_FULL;\n+}\n+\n+static void\n+igc_tx_queue_release_mbufs(struct igc_tx_queue *txq)\n+{\n+\tunsigned int i;\n+\n+\tif (txq->sw_ring != NULL) {\n+\t\tfor (i = 0; i < txq->nb_tx_desc; i++) {\n+\t\t\tif (txq->sw_ring[i].mbuf != NULL) {\n+\t\t\t\trte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);\n+\t\t\t\ttxq->sw_ring[i].mbuf = NULL;\n+\t\t\t}\n+\t\t}\n+\t}\n+}\n+\n+static void\n+igc_tx_queue_release(struct igc_tx_queue *txq)\n+{\n+\tigc_tx_queue_release_mbufs(txq);\n+\trte_free(txq->sw_ring);\n+\trte_free(txq);\n+}\n+\n+void eth_igc_tx_queue_release(void *txq)\n+{\n+\tif (txq)\n+\t\tigc_tx_queue_release(txq);\n+}\n+\n+static void\n+igc_reset_tx_queue_stat(struct igc_tx_queue *txq)\n+{\n+\ttxq->tx_head = 0;\n+\ttxq->tx_tail = 0;\n+\ttxq->ctx_curr = 0;\n+\tmemset((void *)&txq->ctx_cache, 0,\n+\t\tIGC_CTX_NUM * sizeof(struct igc_advctx_info));\n+}\n+\n+static void\n+igc_reset_tx_queue(struct igc_tx_queue *txq)\n+{\n+\tstruct igc_tx_entry *txe = txq->sw_ring;\n+\tuint16_t i, prev;\n+\n+\t/* Initialize ring entries */\n+\tprev = (uint16_t)(txq->nb_tx_desc - 1);\n+\tfor (i = 0; i < txq->nb_tx_desc; i++) {\n+\t\tvolatile union igc_adv_tx_desc *txd = &txq->tx_ring[i];\n+\n+\t\ttxd->wb.status = IGC_TXD_STAT_DD;\n+\t\ttxe[i].mbuf = NULL;\n+\t\ttxe[i].last_id = i;\n+\t\ttxe[prev].next_id = i;\n+\t\tprev = i;\n+\t}\n+\n+\ttxq->txd_type = IGC_ADVTXD_DTYP_DATA;\n+\tigc_reset_tx_queue_stat(txq);\n+}\n+\n+/*\n+ * clear all rx/tx queue\n+ */\n+void\n+igc_dev_clear_queues(struct rte_eth_dev *dev)\n+{\n+\tuint16_t i;\n+\tstruct igc_tx_queue *txq;\n+\tstruct igc_rx_queue *rxq;\n+\n+\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n+\t\ttxq = dev->data->tx_queues[i];\n+\t\tif (txq != NULL) {\n+\t\t\tigc_tx_queue_release_mbufs(txq);\n+\t\t\tigc_reset_tx_queue(txq);\n+\t\t}\n+\t}\n+\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\trxq = dev->data->rx_queues[i];\n+\t\tif (rxq != NULL) {\n+\t\t\tigc_rx_queue_release_mbufs(rxq);\n+\t\t\tigc_reset_rx_queue(rxq);\n+\t\t}\n+\t}\n+}\n+\n+int eth_igc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\tuint16_t nb_desc, unsigned int socket_id,\n+\t\tconst struct rte_eth_txconf *tx_conf)\n+{\n+\tconst struct rte_memzone *tz;\n+\tstruct igc_tx_queue *txq;\n+\tstruct igc_hw *hw;\n+\tuint32_t size;\n+\n+\tif (nb_desc % IGC_TX_DESCRIPTOR_MULTIPLE != 0 ||\n+\t\tnb_desc > IGC_MAX_TXD || nb_desc < IGC_MIN_TXD) {\n+\t\tPMD_DRV_LOG(ERR, \"TX-descriptor must be a multiple of \"\n+\t\t\t\"%u and between %u and %u!, cur: %u\",\n+\t\t\tIGC_TX_DESCRIPTOR_MULTIPLE,\n+\t\t\tIGC_MAX_TXD, IGC_MIN_TXD, nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\thw = IGC_DEV_PRIVATE_HW(dev);\n+\n+\t/*\n+\t * The tx_free_thresh and tx_rs_thresh values are not used in the 2.5G\n+\t * driver.\n+\t */\n+\tif (tx_conf->tx_free_thresh != 0)\n+\t\tPMD_DRV_LOG(INFO, \"The tx_free_thresh parameter is not \"\n+\t\t\t\"used for the 2.5G driver.\");\n+\tif (tx_conf->tx_rs_thresh != 0)\n+\t\tPMD_DRV_LOG(INFO, \"The tx_rs_thresh parameter is not \"\n+\t\t\t\"used for the 2.5G driver.\");\n+\tif (tx_conf->tx_thresh.wthresh == 0)\n+\t\tPMD_DRV_LOG(INFO, \"To improve 2.5G driver performance, \"\n+\t\t\t\"consider setting the TX WTHRESH value to 4, 8, or 16.\");\n+\n+\t/* Free memory prior to re-allocation if needed */\n+\tif (dev->data->tx_queues[queue_idx] != NULL) {\n+\t\tigc_tx_queue_release(dev->data->tx_queues[queue_idx]);\n+\t\tdev->data->tx_queues[queue_idx] = NULL;\n+\t}\n+\n+\t/* First allocate the tx queue data structure */\n+\ttxq = rte_zmalloc(\"ethdev TX queue\", sizeof(struct igc_tx_queue),\n+\t\t\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\tif (txq == NULL)\n+\t\treturn -ENOMEM;\n+\n+\t/*\n+\t * Allocate TX ring hardware descriptors. A memzone large enough to\n+\t * handle the maximum ring size is allocated in order to allow for\n+\t * resizing in later calls to the queue setup function.\n+\t */\n+\tsize = sizeof(union igc_adv_tx_desc) * IGC_MAX_TXD;\n+\ttz = rte_eth_dma_zone_reserve(dev, \"tx_ring\", queue_idx, size,\n+\t\t\t\t      IGC_ALIGN, socket_id);\n+\tif (tz == NULL) {\n+\t\tigc_tx_queue_release(txq);\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\ttxq->nb_tx_desc = nb_desc;\n+\ttxq->pthresh = tx_conf->tx_thresh.pthresh;\n+\ttxq->hthresh = tx_conf->tx_thresh.hthresh;\n+\ttxq->wthresh = tx_conf->tx_thresh.wthresh;\n+\n+\ttxq->queue_id = queue_idx;\n+\ttxq->reg_idx = queue_idx;\n+\ttxq->port_id = dev->data->port_id;\n+\n+\ttxq->tdt_reg_addr = IGC_PCI_REG_ADDR(hw, IGC_TDT(txq->reg_idx));\n+\ttxq->tx_ring_phys_addr = tz->iova;\n+\n+\ttxq->tx_ring = (union igc_adv_tx_desc *)tz->addr;\n+\t/* Allocate software ring */\n+\ttxq->sw_ring = rte_zmalloc(\"txq->sw_ring\",\n+\t\t\t\t   sizeof(struct igc_tx_entry) * nb_desc,\n+\t\t\t\t   RTE_CACHE_LINE_SIZE);\n+\tif (txq->sw_ring == NULL) {\n+\t\tigc_tx_queue_release(txq);\n+\t\treturn -ENOMEM;\n+\t}\n+\tPMD_DRV_LOG(DEBUG, \"sw_ring=%p hw_ring=%p dma_addr=0x%\" PRIx64,\n+\t\ttxq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);\n+\n+\tigc_reset_tx_queue(txq);\n+\tdev->tx_pkt_burst = igc_xmit_pkts;\n+\tdev->tx_pkt_prepare = &eth_igc_prep_pkts;\n+\tdev->data->tx_queues[queue_idx] = txq;\n+\ttxq->offloads = tx_conf->offloads;\n+\n+\treturn 0;\n+}\n+\n+int\n+eth_igc_tx_done_cleanup(void *txqueue, uint32_t free_cnt)\n+{\n+\tstruct igc_tx_queue *txq = txqueue;\n+\tstruct igc_tx_entry *sw_ring;\n+\tvolatile union igc_adv_tx_desc *txr;\n+\tuint16_t tx_first; /* First segment analyzed. */\n+\tuint16_t tx_id;    /* Current segment being processed. */\n+\tuint16_t tx_last;  /* Last segment in the current packet. */\n+\tuint16_t tx_next;  /* First segment of the next packet. */\n+\tuint32_t count;\n+\n+\tif (txq == NULL)\n+\t\treturn -ENODEV;\n+\n+\tcount = 0;\n+\tsw_ring = txq->sw_ring;\n+\ttxr = txq->tx_ring;\n+\n+\t/*\n+\t * tx_tail is the last sent packet on the sw_ring. Goto the end\n+\t * of that packet (the last segment in the packet chain) and\n+\t * then the next segment will be the start of the oldest segment\n+\t * in the sw_ring. This is the first packet that will be\n+\t * attempted to be freed.\n+\t */\n+\n+\t/* Get last segment in most recently added packet. */\n+\ttx_first = sw_ring[txq->tx_tail].last_id;\n+\n+\t/* Get the next segment, which is the oldest segment in ring. */\n+\ttx_first = sw_ring[tx_first].next_id;\n+\n+\t/* Set the current index to the first. */\n+\ttx_id = tx_first;\n+\n+\t/*\n+\t * Loop through each packet. For each packet, verify that an\n+\t * mbuf exists and that the last segment is free. If so, free\n+\t * it and move on.\n+\t */\n+\twhile (1) {\n+\t\ttx_last = sw_ring[tx_id].last_id;\n+\n+\t\tif (sw_ring[tx_last].mbuf) {\n+\t\t\tif (!(txr[tx_last].wb.status &\n+\t\t\t\t\trte_cpu_to_le_32(IGC_TXD_STAT_DD)))\n+\t\t\t\tbreak;\n+\n+\t\t\t/* Get the start of the next packet. */\n+\t\t\ttx_next = sw_ring[tx_last].next_id;\n+\n+\t\t\t/*\n+\t\t\t * Loop through all segments in a\n+\t\t\t * packet.\n+\t\t\t */\n+\t\t\tdo {\n+\t\t\t\trte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);\n+\t\t\t\tsw_ring[tx_id].mbuf = NULL;\n+\t\t\t\tsw_ring[tx_id].last_id = tx_id;\n+\n+\t\t\t\t/* Move to next segemnt. */\n+\t\t\t\ttx_id = sw_ring[tx_id].next_id;\n+\t\t\t} while (tx_id != tx_next);\n+\n+\t\t\t/*\n+\t\t\t * Increment the number of packets\n+\t\t\t * freed.\n+\t\t\t */\n+\t\t\tcount++;\n+\t\t\tif (unlikely(count == free_cnt))\n+\t\t\t\tbreak;\n+\t\t} else {\n+\t\t\t/*\n+\t\t\t * There are multiple reasons to be here:\n+\t\t\t * 1) All the packets on the ring have been\n+\t\t\t *    freed - tx_id is equal to tx_first\n+\t\t\t *    and some packets have been freed.\n+\t\t\t *    - Done, exit\n+\t\t\t * 2) Interfaces has not sent a rings worth of\n+\t\t\t *    packets yet, so the segment after tail is\n+\t\t\t *    still empty. Or a previous call to this\n+\t\t\t *    function freed some of the segments but\n+\t\t\t *    not all so there is a hole in the list.\n+\t\t\t *    Hopefully this is a rare case.\n+\t\t\t *    - Walk the list and find the next mbuf. If\n+\t\t\t *      there isn't one, then done.\n+\t\t\t */\n+\t\t\tif (likely(tx_id == tx_first && count != 0))\n+\t\t\t\tbreak;\n+\n+\t\t\t/*\n+\t\t\t * Walk the list and find the next mbuf, if any.\n+\t\t\t */\n+\t\t\tdo {\n+\t\t\t\t/* Move to next segemnt. */\n+\t\t\t\ttx_id = sw_ring[tx_id].next_id;\n+\n+\t\t\t\tif (sw_ring[tx_id].mbuf)\n+\t\t\t\t\tbreak;\n+\n+\t\t\t} while (tx_id != tx_first);\n+\n+\t\t\t/*\n+\t\t\t * Determine why previous loop bailed. If there\n+\t\t\t * is not an mbuf, done.\n+\t\t\t */\n+\t\t\tif (sw_ring[tx_id].mbuf == NULL)\n+\t\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\treturn count;\n+}\n+\n+void\n+igc_tx_init(struct rte_eth_dev *dev)\n+{\n+\tstruct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);\n+\tuint32_t tctl;\n+\tuint32_t txdctl;\n+\tuint16_t i;\n+\n+\t/* Setup the Base and Length of the Tx Descriptor Rings. */\n+\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n+\t\tstruct igc_tx_queue *txq = dev->data->tx_queues[i];\n+\t\tuint64_t bus_addr = txq->tx_ring_phys_addr;\n+\n+\t\tIGC_WRITE_REG(hw, IGC_TDLEN(txq->reg_idx),\n+\t\t\t\ttxq->nb_tx_desc *\n+\t\t\t\tsizeof(union igc_adv_tx_desc));\n+\t\tIGC_WRITE_REG(hw, IGC_TDBAH(txq->reg_idx),\n+\t\t\t\t(uint32_t)(bus_addr >> 32));\n+\t\tIGC_WRITE_REG(hw, IGC_TDBAL(txq->reg_idx),\n+\t\t\t\t(uint32_t)bus_addr);\n+\n+\t\t/* Setup the HW Tx Head and Tail descriptor pointers. */\n+\t\tIGC_WRITE_REG(hw, IGC_TDT(txq->reg_idx), 0);\n+\t\tIGC_WRITE_REG(hw, IGC_TDH(txq->reg_idx), 0);\n+\n+\t\t/* Setup Transmit threshold registers. */\n+\t\ttxdctl = ((uint32_t)txq->pthresh << IGC_TXDCTL_PTHRESH_SHIFT) &\n+\t\t\t\tIGC_TXDCTL_PTHRESH_MSK;\n+\t\ttxdctl |= ((uint32_t)txq->hthresh << IGC_TXDCTL_HTHRESH_SHIFT) &\n+\t\t\t\tIGC_TXDCTL_HTHRESH_MSK;\n+\t\ttxdctl |= ((uint32_t)txq->wthresh << IGC_TXDCTL_WTHRESH_SHIFT) &\n+\t\t\t\tIGC_TXDCTL_WTHRESH_MSK;\n+\t\ttxdctl |= IGC_TXDCTL_QUEUE_ENABLE;\n+\t\tIGC_WRITE_REG(hw, IGC_TXDCTL(txq->reg_idx), txdctl);\n+\t}\n+\n+\tigc_config_collision_dist(hw);\n+\n+\t/* Program the Transmit Control Register. */\n+\ttctl = IGC_READ_REG(hw, IGC_TCTL);\n+\ttctl &= ~IGC_TCTL_CT;\n+\ttctl |= (IGC_TCTL_PSP | IGC_TCTL_RTLC | IGC_TCTL_EN |\n+\t\t ((uint32_t)IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT));\n+\n+\t/* This write will effectively turn on the transmit unit. */\n+\tIGC_WRITE_REG(hw, IGC_TCTL, tctl);\n+}\n+\n+void\n+eth_igc_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\tstruct rte_eth_rxq_info *qinfo)\n+{\n+\tstruct igc_rx_queue *rxq;\n+\n+\trxq = dev->data->rx_queues[queue_id];\n+\n+\tqinfo->mp = rxq->mb_pool;\n+\tqinfo->scattered_rx = dev->data->scattered_rx;\n+\tqinfo->nb_desc = rxq->nb_rx_desc;\n+\n+\tqinfo->conf.rx_free_thresh = rxq->rx_free_thresh;\n+\tqinfo->conf.rx_drop_en = rxq->drop_en;\n+\tqinfo->conf.offloads = rxq->offloads;\n+\tqinfo->conf.rx_thresh.hthresh = rxq->hthresh;\n+\tqinfo->conf.rx_thresh.pthresh = rxq->pthresh;\n+\tqinfo->conf.rx_thresh.wthresh = rxq->wthresh;\n+}\n+\n+void\n+eth_igc_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\tstruct rte_eth_txq_info *qinfo)\n+{\n+\tstruct igc_tx_queue *txq;\n+\n+\ttxq = dev->data->tx_queues[queue_id];\n+\n+\tqinfo->nb_desc = txq->nb_tx_desc;\n+\n+\tqinfo->conf.tx_thresh.pthresh = txq->pthresh;\n+\tqinfo->conf.tx_thresh.hthresh = txq->hthresh;\n+\tqinfo->conf.tx_thresh.wthresh = txq->wthresh;\n+\tqinfo->conf.offloads = txq->offloads;\n+}\ndiff --git a/drivers/net/igc/igc_txrx.h b/drivers/net/igc/igc_txrx.h\nnew file mode 100644\nindex 0000000..00ef512\n--- /dev/null\n+++ b/drivers/net/igc/igc_txrx.h\n@@ -0,0 +1,50 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2019-2020 Intel Corporation\n+ */\n+\n+#ifndef _IGC_TXRX_H_\n+#define _IGC_TXRX_H_\n+\n+#include \"igc_ethdev.h\"\n+\n+#ifdef __cplusplus\n+extern \"C\" {\n+#endif\n+\n+/*\n+ * RX/TX function prototypes\n+ */\n+void eth_igc_tx_queue_release(void *txq);\n+void eth_igc_rx_queue_release(void *rxq);\n+void igc_dev_clear_queues(struct rte_eth_dev *dev);\n+int eth_igc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,\n+\t\tuint16_t nb_rx_desc, unsigned int socket_id,\n+\t\tconst struct rte_eth_rxconf *rx_conf,\n+\t\tstruct rte_mempool *mb_pool);\n+\n+uint32_t eth_igc_rx_queue_count(struct rte_eth_dev *dev,\n+\t\tuint16_t rx_queue_id);\n+\n+int eth_igc_rx_descriptor_done(void *rx_queue, uint16_t offset);\n+\n+int eth_igc_rx_descriptor_status(void *rx_queue, uint16_t offset);\n+\n+int eth_igc_tx_descriptor_status(void *tx_queue, uint16_t offset);\n+\n+int eth_igc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\tuint16_t nb_desc, unsigned int socket_id,\n+\t\tconst struct rte_eth_txconf *tx_conf);\n+int eth_igc_tx_done_cleanup(void *txqueue, uint32_t free_cnt);\n+\n+int igc_rx_init(struct rte_eth_dev *dev);\n+void igc_tx_init(struct rte_eth_dev *dev);\n+void eth_igc_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\tstruct rte_eth_rxq_info *qinfo);\n+void eth_igc_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\tstruct rte_eth_txq_info *qinfo);\n+\n+#ifdef __cplusplus\n+}\n+#endif\n+\n+#endif /* _IGC_TXRX_H_ */\ndiff --git a/drivers/net/igc/meson.build b/drivers/net/igc/meson.build\nindex aa211d6..e402f26 100644\n--- a/drivers/net/igc/meson.build\n+++ b/drivers/net/igc/meson.build\n@@ -6,7 +6,8 @@ objs = [base_objs]\n \n sources = files(\n \t'igc_logs.c',\n-\t'igc_ethdev.c'\n+\t'igc_ethdev.c',\n+\t'igc_txrx.c'\n )\n \n includes += include_directories('base')\n",
    "prefixes": [
        "v3",
        "04/11"
    ]
}