get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/4968/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 4968,
    "url": "http://patches.dpdk.org/api/patches/4968/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1432889125-20255-11-git-send-email-cunming.liang@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1432889125-20255-11-git-send-email-cunming.liang@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1432889125-20255-11-git-send-email-cunming.liang@intel.com",
    "date": "2015-05-29T08:45:23",
    "name": "[dpdk-dev,v9,10/12] igb: enable rx queue interrupts for PF",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "a245a3d235346d428fbb34192c19403fc618f01e",
    "submitter": {
        "id": 46,
        "url": "http://patches.dpdk.org/api/people/46/?format=api",
        "name": "Cunming Liang",
        "email": "cunming.liang@intel.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1432889125-20255-11-git-send-email-cunming.liang@intel.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/4968/comments/",
    "check": "pending",
    "checks": "http://patches.dpdk.org/api/patches/4968/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id 7ED39C350;\n\tFri, 29 May 2015 10:46:18 +0200 (CEST)",
            "from mga14.intel.com (mga14.intel.com [192.55.52.115])\n\tby dpdk.org (Postfix) with ESMTP id 3D9421288\n\tfor <dev@dpdk.org>; Fri, 29 May 2015 10:46:09 +0200 (CEST)",
            "from orsmga002.jf.intel.com ([10.7.209.21])\n\tby fmsmga103.fm.intel.com with ESMTP; 29 May 2015 01:46:08 -0700",
            "from shvmail01.sh.intel.com ([10.239.29.42])\n\tby orsmga002.jf.intel.com with ESMTP; 29 May 2015 01:46:07 -0700",
            "from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com\n\t[10.239.29.89])\n\tby shvmail01.sh.intel.com with ESMTP id t4T8k59W003438;\n\tFri, 29 May 2015 16:46:05 +0800",
            "from shecgisg004.sh.intel.com (localhost [127.0.0.1])\n\tby shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP\n\tid t4T8k2eb020377; Fri, 29 May 2015 16:46:04 +0800",
            "(from cliang18@localhost)\n\tby shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id t4T8k2KC020373; \n\tFri, 29 May 2015 16:46:02 +0800"
        ],
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.13,515,1427785200\"; d=\"scan'208\";a=\"737255756\"",
        "From": "Cunming Liang <cunming.liang@intel.com>",
        "To": "dev@dpdk.org",
        "Date": "Fri, 29 May 2015 16:45:23 +0800",
        "Message-Id": "<1432889125-20255-11-git-send-email-cunming.liang@intel.com>",
        "X-Mailer": "git-send-email 1.7.4.1",
        "In-Reply-To": "<1432889125-20255-1-git-send-email-cunming.liang@intel.com>",
        "References": "<1432198563-16334-1-git-send-email-cunming.liang@intel.com>\n\t<1432889125-20255-1-git-send-email-cunming.liang@intel.com>",
        "Cc": "shemming@brocade.com, liang-min.wang@intel.com",
        "Subject": "[dpdk-dev] [PATCH v9 10/12] igb: enable rx queue interrupts for PF",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "The patch does below for igb PF:\n- Setup NIC to generate MSI-X interrupts\n- Set the IVAR register to map interrupt causes to vectors\n- Implement interrupt enable/disable functions\n\nSigned-off-by: Danny Zhou <danny.zhou@intel.com>\nSigned-off-by: Cunming Liang <cunming.liang@intel.com>\n---\nv9 changes\n - move queue-vec mapping init from dev_configure to dev_start\n - fix link interrupt not working issue in vfio-msix\n\nv8 changes\n - add vfio-msi/vfio-legacy and uio-legacy support\n\nv7 changes\n - add condition check when intr vector is not enabled\n\nv6 changes\n - fill queue-vector mapping table\n\nv5 changes\n - Rebase the patchset onto the HEAD\n\nv3 changes\n - Remove unnecessary variables in e1000_mac_info\n - Remove spinlok from PMD\n\nv2 changes\n - Consolidate review comments related to coding style\n\n drivers/net/e1000/igb_ethdev.c | 285 ++++++++++++++++++++++++++++++++++++-----\n 1 file changed, 252 insertions(+), 33 deletions(-)",
    "diff": "diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c\nindex e4b370d..bbd7b74 100644\n--- a/drivers/net/e1000/igb_ethdev.c\n+++ b/drivers/net/e1000/igb_ethdev.c\n@@ -96,6 +96,7 @@ static int  eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,\n static int  eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,\n \t\t\t\tstruct rte_eth_fc_conf *fc_conf);\n static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);\n+static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev);\n static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);\n static int eth_igb_interrupt_action(struct rte_eth_dev *dev);\n static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,\n@@ -194,6 +195,16 @@ static int eth_igb_filter_ctrl(struct rte_eth_dev *dev,\n \t\t     enum rte_filter_op filter_op,\n \t\t     void *arg);\n \n+static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev,\n+\t\t\t\t\tuint16_t queue_id);\n+static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev,\n+\t\t\t\t\tuint16_t queue_id);\n+static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,\n+\t\t\t\tuint8_t queue, uint8_t msix_vector);\n+static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev);\n+static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,\n+\t\t\t\tuint8_t index, uint8_t offset);\n+\n /*\n  * Define VF Stats MACRO for Non \"cleared on read\" register\n  */\n@@ -253,6 +264,8 @@ static const struct eth_dev_ops eth_igb_ops = {\n \t.vlan_tpid_set        = eth_igb_vlan_tpid_set,\n \t.vlan_offload_set     = eth_igb_vlan_offload_set,\n \t.rx_queue_setup       = eth_igb_rx_queue_setup,\n+\t.rx_queue_intr_enable = eth_igb_rx_queue_intr_enable,\n+\t.rx_queue_intr_disable = eth_igb_rx_queue_intr_disable,\n \t.rx_queue_release     = eth_igb_rx_queue_release,\n \t.rx_queue_count       = eth_igb_rx_queue_count,\n \t.rx_descriptor_done   = eth_igb_rx_descriptor_done,\n@@ -584,12 +597,6 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)\n \t\t     eth_dev->data->port_id, pci_dev->id.vendor_id,\n \t\t     pci_dev->id.device_id);\n \n-\trte_intr_callback_register(&(pci_dev->intr_handle),\n-\t\teth_igb_interrupt_handler, (void *)eth_dev);\n-\n-\t/* enable uio intr after callback register */\n-\trte_intr_enable(&(pci_dev->intr_handle));\n-\n \t/* enable support intr */\n \tigb_intr_enable(eth_dev);\n \n@@ -752,7 +759,9 @@ eth_igb_start(struct rte_eth_dev *dev)\n {\n \tstruct e1000_hw *hw =\n \t\tE1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n-\tint ret, i, mask;\n+\tstruct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;\n+\tuint32_t intr_vector = 0;\n+\tint ret, mask;\n \tuint32_t ctrl_ext;\n \n \tPMD_INIT_FUNC_TRACE();\n@@ -792,6 +801,27 @@ eth_igb_start(struct rte_eth_dev *dev)\n \t/* configure PF module if SRIOV enabled */\n \tigb_pf_host_configure(dev);\n \n+\t/* check and configure queue intr-vector mapping */\n+\tif (dev->data->dev_conf.intr_conf.rxq != 0)\n+\t\tintr_vector = dev->data->nb_rx_queues;\n+\n+\tif (rte_intr_efd_enable(intr_handle, intr_vector))\n+\t\treturn -1;\n+\n+\tif (rte_intr_dp_is_en(intr_handle)) {\n+\t\tintr_handle->intr_vec =\n+\t\t\trte_zmalloc(\"intr_vec\",\n+\t\t\t\t    dev->data->nb_rx_queues * sizeof(int), 0);\n+\t\tif (intr_handle->intr_vec == NULL) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to allocate %d rx_queues\"\n+\t\t\t\t     \" intr_vec\\n\", dev->data->nb_rx_queues);\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\t}\n+\n+\t/* confiugre msix for rx interrupt */\n+\teth_igb_configure_msix_intr(dev);\n+\n \t/* Configure for OS presence */\n \tigb_init_manageability(hw);\n \n@@ -819,33 +849,9 @@ eth_igb_start(struct rte_eth_dev *dev)\n \t\tigb_vmdq_vlan_hw_filter_enable(dev);\n \t}\n \n-\t/*\n-\t * Configure the Interrupt Moderation register (EITR) with the maximum\n-\t * possible value (0xFFFF) to minimize \"System Partial Write\" issued by\n-\t * spurious [DMA] memory updates of RX and TX ring descriptors.\n-\t *\n-\t * With a EITR granularity of 2 microseconds in the 82576, only 7/8\n-\t * spurious memory updates per second should be expected.\n-\t * ((65535 * 2) / 1000.1000 ~= 0.131 second).\n-\t *\n-\t * Because interrupts are not used at all, the MSI-X is not activated\n-\t * and interrupt moderation is controlled by EITR[0].\n-\t *\n-\t * Note that having [almost] disabled memory updates of RX and TX ring\n-\t * descriptors through the Interrupt Moderation mechanism, memory\n-\t * updates of ring descriptors are now moderated by the configurable\n-\t * value of Write-Back Threshold registers.\n-\t */\n \tif ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||\n \t\t(hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) ||\n \t\t(hw->mac.type == e1000_i211)) {\n-\t\tuint32_t ivar;\n-\n-\t\t/* Enable all RX & TX queues in the IVAR registers */\n-\t\tivar = (uint32_t) ((E1000_IVAR_VALID << 16) | E1000_IVAR_VALID);\n-\t\tfor (i = 0; i < 8; i++)\n-\t\t\tE1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, ivar);\n-\n \t\t/* Configure EITR with the maximum possible value (0xFFFF) */\n \t\tE1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);\n \t}\n@@ -896,8 +902,23 @@ eth_igb_start(struct rte_eth_dev *dev)\n \te1000_setup_link(hw);\n \n \t/* check if lsc interrupt feature is enabled */\n-\tif (dev->data->dev_conf.intr_conf.lsc != 0)\n-\t\tret = eth_igb_lsc_interrupt_setup(dev);\n+\tif (dev->data->dev_conf.intr_conf.lsc != 0) {\n+\t\tif (rte_intr_allow_others(intr_handle)) {\n+\t\t\trte_intr_callback_register(intr_handle,\n+\t\t\t\t\t\t   eth_igb_interrupt_handler,\n+\t\t\t\t\t\t   (void *)dev);\n+\t\t\teth_igb_lsc_interrupt_setup(dev);\n+\t\t} else\n+\t\t\tPMD_INIT_LOG(INFO, \"lsc won't enable because of\"\n+\t\t\t\t     \" no intr multiplex\\n\");\n+\t}\n+\n+\t/* check if rxq interrupt is enabled */\n+\tif (dev->data->dev_conf.intr_conf.rxq != 0)\n+\t\teth_igb_rxq_interrupt_setup(dev);\n+\n+\t/* enable uio/vfio intr/eventfd mapping */\n+\trte_intr_enable(intr_handle);\n \n \t/* resume enabled intr since hw reset */\n \tigb_intr_enable(dev);\n@@ -930,8 +951,13 @@ eth_igb_stop(struct rte_eth_dev *dev)\n \tstruct e1000_flex_filter *p_flex;\n \tstruct e1000_5tuple_filter *p_5tuple, *p_5tuple_next;\n \tstruct e1000_2tuple_filter *p_2tuple, *p_2tuple_next;\n+\tstruct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;\n \n \tigb_intr_disable(hw);\n+\n+\t/* disable intr eventfd mapping */\n+\trte_intr_disable(intr_handle);\n+\n \tigb_pf_reset_hw(hw);\n \tE1000_WRITE_REG(hw, E1000_WUC, 0);\n \n@@ -980,6 +1006,13 @@ eth_igb_stop(struct rte_eth_dev *dev)\n \t\trte_free(p_2tuple);\n \t}\n \tfilter_info->twotuple_mask = 0;\n+\n+\t/* Clean datapath event and queue/vec mapping */\n+\trte_intr_efd_disable(intr_handle);\n+\tif (intr_handle->intr_vec != NULL) {\n+\t\trte_free(intr_handle->intr_vec);\n+\t\tintr_handle->intr_vec = NULL;\n+\t}\n }\n \n static void\n@@ -987,6 +1020,7 @@ eth_igb_close(struct rte_eth_dev *dev)\n {\n \tstruct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n \tstruct rte_eth_link link;\n+\tstruct rte_pci_device *pci_dev;\n \n \teth_igb_stop(dev);\n \te1000_phy_hw_reset(hw);\n@@ -1004,6 +1038,12 @@ eth_igb_close(struct rte_eth_dev *dev)\n \n \tigb_dev_clear_queues(dev);\n \n+\tpci_dev = dev->pci_dev;\n+\tif (pci_dev->intr_handle.intr_vec) {\n+\t\trte_free(pci_dev->intr_handle.intr_vec);\n+\t\tpci_dev->intr_handle.intr_vec = NULL;\n+\t}\n+\n \tmemset(&link, 0, sizeof(link));\n \trte_igb_dev_atomic_write_link_status(dev, &link);\n }\n@@ -1828,6 +1868,34 @@ eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)\n }\n \n /*\n+ * It clears the interrupt causes and enables the interrupt.\n+ * It will be called once only during nic initialized.\n+ *\n+ * @param dev\n+ *  Pointer to struct rte_eth_dev.\n+ *\n+ * @return\n+ *  - On success, zero.\n+ *  - On failure, a negative value.\n+ */\n+static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev)\n+{\n+\tuint32_t mask, regval;\n+\tstruct e1000_hw *hw =\n+\t\tE1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct rte_eth_dev_info dev_info;\n+\n+\tmemset(&dev_info, 0, sizeof(dev_info));\n+\teth_igb_infos_get(dev, &dev_info);\n+\n+\tmask = 0xFFFFFFFF >> (32 - dev_info.max_rx_queues);\n+\tregval = E1000_READ_REG(hw, E1000_EIMS);\n+\tE1000_WRITE_REG(hw, E1000_EIMS, regval | mask);\n+\n+\treturn 0;\n+}\n+\n+/*\n  * It reads ICR and gets interrupt causes, check it and set a bit flag\n  * to update link status.\n  *\n@@ -3652,5 +3720,156 @@ static struct rte_driver pmd_igbvf_drv = {\n \t.init = rte_igbvf_pmd_init,\n };\n \n+static int\n+eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)\n+{\n+\tstruct e1000_hw *hw =\n+\t\tE1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tuint32_t mask = 1 << queue_id;\n+\n+\tE1000_WRITE_REG(hw, E1000_EIMC, mask);\n+\tE1000_WRITE_FLUSH(hw);\n+\n+\treturn 0;\n+}\n+\n+static int\n+eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)\n+{\n+\tstruct e1000_hw *hw =\n+\t\tE1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tuint32_t mask = 1 << queue_id;\n+\tuint32_t regval;\n+\n+\tregval = E1000_READ_REG(hw, E1000_EIMS);\n+\tE1000_WRITE_REG(hw, E1000_EIMS, regval | mask);\n+\tE1000_WRITE_FLUSH(hw);\n+\n+\trte_intr_enable(&(dev->pci_dev->intr_handle));\n+\n+\treturn 0;\n+}\n+\n+static void\n+eth_igb_write_ivar(struct e1000_hw *hw, uint8_t  msix_vector,\n+\t\t\tuint8_t index, uint8_t offset)\n+{\n+\tuint32_t val = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);\n+\n+\t/* clear bits */\n+\tval &= ~((uint32_t)0xFF << offset);\n+\n+\t/* write vector and valid bit */\n+\tval |= (msix_vector | E1000_IVAR_VALID) << offset;\n+\n+\tE1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, val);\n+}\n+\n+static void\n+eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,\n+\t\t\t\t uint8_t queue, uint8_t msix_vector)\n+{\n+\tuint32_t tmp = 0;\n+\tif (hw->mac.type == e1000_82575) {\n+\t\tif (direction == 0)\n+\t\t\ttmp = E1000_EICR_RX_QUEUE0 << queue;\n+\t\telse if (direction == 1)\n+\t\t\ttmp = E1000_EICR_TX_QUEUE0 << queue;\n+\t\tE1000_WRITE_REG(hw, E1000_MSIXBM(msix_vector), tmp);\n+\t} else if (hw->mac.type == e1000_82576) {\n+\t\tif ((direction == 0) || (direction == 1))\n+\t\t\teth_igb_write_ivar(hw, msix_vector, queue & 0x7,\n+\t\t\t\t\t((queue & 0x8) << 1) + 8 * direction);\n+\t} else if ((hw->mac.type == e1000_82580) ||\n+\t\t\t(hw->mac.type == e1000_i350) ||\n+\t\t\t(hw->mac.type == e1000_i354) ||\n+\t\t\t(hw->mac.type == e1000_i210) ||\n+\t\t\t(hw->mac.type == e1000_i211)) {\n+\t\tif ((direction == 0) || (direction == 1))\n+\t\t\teth_igb_write_ivar(hw, msix_vector,\n+\t\t\t\t\tqueue >> 1,\n+\t\t\t\t\t((queue & 0x1) << 4) + 8 * direction);\n+\t}\n+}\n+\n+/*\n+ * Sets up the hardware to generate MSI-X interrupts properly\n+ * @hw\n+ *  board private structure\n+ */\n+static void\n+eth_igb_configure_msix_intr(struct rte_eth_dev *dev)\n+{\n+\tint queue_id;\n+\tuint32_t tmpval, regval, intr_mask;\n+\tstruct e1000_hw *hw =\n+\t\tE1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;\n+\tuint32_t vec = 0;\n+\n+\t/* won't configure msix register if no mapping is done\n+\t * between intr vector and event fd */\n+\tif (!rte_intr_dp_is_en(intr_handle))\n+\t\treturn;\n+\n+\t/* set interrupt vector for other causes */\n+\tif (hw->mac.type == e1000_82575) {\n+\t\ttmpval = E1000_READ_REG(hw, E1000_CTRL_EXT);\n+\t\t/* enable MSI-X PBA support */\n+\t\ttmpval |= E1000_CTRL_EXT_PBA_CLR;\n+\n+\t\t/* Auto-Mask interrupts upon ICR read */\n+\t\ttmpval |= E1000_CTRL_EXT_EIAME;\n+\t\ttmpval |= E1000_CTRL_EXT_IRCA;\n+\n+\t\tE1000_WRITE_REG(hw, E1000_CTRL_EXT, tmpval);\n+\n+\t\t/* enable msix_other interrupt */\n+\t\tE1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), 0, E1000_EIMS_OTHER);\n+\t\tregval = E1000_READ_REG(hw, E1000_EIAC);\n+\t\tE1000_WRITE_REG(hw, E1000_EIAC, regval | E1000_EIMS_OTHER);\n+\t\tregval = E1000_READ_REG(hw, E1000_EIAM);\n+\t\tE1000_WRITE_REG(hw, E1000_EIMS, regval | E1000_EIMS_OTHER);\n+\t} else if ((hw->mac.type == e1000_82576) ||\n+\t\t\t(hw->mac.type == e1000_82580) ||\n+\t\t\t(hw->mac.type == e1000_i350) ||\n+\t\t\t(hw->mac.type == e1000_i354) ||\n+\t\t\t(hw->mac.type == e1000_i210) ||\n+\t\t\t(hw->mac.type == e1000_i211)) {\n+\t\t/* turn on MSI-X capability first */\n+\t\tE1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE |\n+\t\t\t\t\tE1000_GPIE_PBA | E1000_GPIE_EIAME |\n+\t\t\t\t\tE1000_GPIE_NSICR);\n+\n+\t\tintr_mask = (1 << intr_handle->max_intr) - 1;\n+\t\tregval = E1000_READ_REG(hw, E1000_EIAC);\n+\t\tE1000_WRITE_REG(hw, E1000_EIAC, regval | intr_mask);\n+\n+\t\t/* enable msix_other interrupt */\n+\t\tregval = E1000_READ_REG(hw, E1000_EIMS);\n+\t\tE1000_WRITE_REG(hw, E1000_EIMS, regval | intr_mask);\n+\t\ttmpval = (dev->data->nb_rx_queues | E1000_IVAR_VALID) << 8;\n+\t\tE1000_WRITE_REG(hw, E1000_IVAR_MISC, tmpval);\n+\t}\n+\n+\t/*\n+\t* use EIAM to auto-mask when MSI-X interrupt\n+\t* is asserted, this saves a register write for every interrupt\n+\t*/\n+\tintr_mask = (1 << intr_handle->nb_efd) - 1;\n+\tregval = E1000_READ_REG(hw, E1000_EIAM);\n+\tE1000_WRITE_REG(hw, E1000_EIAM, regval | intr_mask);\n+\n+\tfor (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {\n+\t\teth_igb_assign_msix_vector(hw, 0, queue_id, vec);\n+\t\tintr_handle->intr_vec[queue_id] = vec;\n+\t\tif (vec < intr_handle->nb_efd - 1)\n+\t\t\tvec++;\n+\t}\n+\n+\tE1000_WRITE_FLUSH(hw);\n+}\n+\n+\n PMD_REGISTER_DRIVER(pmd_igb_drv);\n PMD_REGISTER_DRIVER(pmd_igbvf_drv);\n",
    "prefixes": [
        "dpdk-dev",
        "v9",
        "10/12"
    ]
}