get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/48995/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 48995,
    "url": "https://patches.dpdk.org/api/patches/48995/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1545032259-77179-25-git-send-email-wenzhuo.lu@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1545032259-77179-25-git-send-email-wenzhuo.lu@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1545032259-77179-25-git-send-email-wenzhuo.lu@intel.com",
    "date": "2018-12-17T07:37:32",
    "name": "[v5,24/31] net/ice: support RX queue interruption",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "60ad707c6eabeeee0467d73ac77bcc0c53707313",
    "submitter": {
        "id": 258,
        "url": "https://patches.dpdk.org/api/people/258/?format=api",
        "name": "Wenzhuo Lu",
        "email": "wenzhuo.lu@intel.com"
    },
    "delegate": {
        "id": 1540,
        "url": "https://patches.dpdk.org/api/users/1540/?format=api",
        "username": "qzhan15",
        "first_name": "Qi",
        "last_name": "Zhang",
        "email": "qi.z.zhang@intel.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1545032259-77179-25-git-send-email-wenzhuo.lu@intel.com/mbox/",
    "series": [
        {
            "id": 2824,
            "url": "https://patches.dpdk.org/api/series/2824/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=2824",
            "date": "2018-12-17T07:37:08",
            "name": "A new net PMD - ICE",
            "version": 5,
            "mbox": "https://patches.dpdk.org/series/2824/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/48995/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/48995/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id E9CB51BAD7;\n\tMon, 17 Dec 2018 08:33:37 +0100 (CET)",
            "from mga09.intel.com (mga09.intel.com [134.134.136.24])\n\tby dpdk.org (Postfix) with ESMTP id CFD051B8F9\n\tfor <dev@dpdk.org>; Mon, 17 Dec 2018 08:33:24 +0100 (CET)",
            "from orsmga002.jf.intel.com ([10.7.209.21])\n\tby orsmga102.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t16 Dec 2018 23:33:23 -0800",
            "from dpdk26.sh.intel.com ([10.67.110.164])\n\tby orsmga002.jf.intel.com with ESMTP; 16 Dec 2018 23:33:23 -0800"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.56,364,1539673200\"; d=\"scan'208\";a=\"118899323\"",
        "From": "Wenzhuo Lu <wenzhuo.lu@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "Wenzhuo Lu <wenzhuo.lu@intel.com>, Qiming Yang <qiming.yang@intel.com>, \n\tXiaoyun Li <xiaoyun.li@intel.com>, Jingjing Wu <jingjing.wu@intel.com>",
        "Date": "Mon, 17 Dec 2018 15:37:32 +0800",
        "Message-Id": "<1545032259-77179-25-git-send-email-wenzhuo.lu@intel.com>",
        "X-Mailer": "git-send-email 1.9.3",
        "In-Reply-To": "<1545032259-77179-1-git-send-email-wenzhuo.lu@intel.com>",
        "References": "<1542956179-80951-1-git-send-email-wenzhuo.lu@intel.com>\n\t<1545032259-77179-1-git-send-email-wenzhuo.lu@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v5 24/31] net/ice: support RX queue interruption",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add below ops,\nrx_queue_intr_enable\nrx_queue_intr_disable\n\nSigned-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>\nSigned-off-by: Qiming Yang <qiming.yang@intel.com>\nSigned-off-by: Xiaoyun Li <xiaoyun.li@intel.com>\nSigned-off-by: Jingjing Wu <jingjing.wu@intel.com>\n---\n doc/guides/nics/features/ice.ini |   1 +\n drivers/net/ice/ice_ethdev.c     | 230 +++++++++++++++++++++++++++++++++++++++\n 2 files changed, 231 insertions(+)",
    "diff": "diff --git a/doc/guides/nics/features/ice.ini b/doc/guides/nics/features/ice.ini\nindex 953a869..2844f4c 100644\n--- a/doc/guides/nics/features/ice.ini\n+++ b/doc/guides/nics/features/ice.ini\n@@ -7,6 +7,7 @@\n Speed capabilities   = Y\n Link status          = Y\n Link status event    = Y\n+Rx interrupt         = Y\n Queue start/stop     = Y\n MTU update           = Y\n Jumbo frame          = Y\ndiff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c\nindex 28d0282..568d8a4 100644\n--- a/drivers/net/ice/ice_ethdev.c\n+++ b/drivers/net/ice/ice_ethdev.c\n@@ -48,6 +48,10 @@ static int ice_macaddr_add(struct rte_eth_dev *dev,\n \t\t\t   __rte_unused uint32_t index,\n \t\t\t   uint32_t pool);\n static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);\n+static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,\n+\t\t\t\t    uint16_t queue_id);\n+static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,\n+\t\t\t\t     uint16_t queue_id);\n static int ice_vlan_pvid_set(struct rte_eth_dev *dev,\n \t\t\t     uint16_t pvid, int on);\n \n@@ -86,6 +90,8 @@ static int ice_vlan_pvid_set(struct rte_eth_dev *dev,\n \t.reta_query                   = ice_rss_reta_query,\n \t.rss_hash_update              = ice_rss_hash_update,\n \t.rss_hash_conf_get            = ice_rss_hash_conf_get,\n+\t.rx_queue_intr_enable         = ice_rx_queue_intr_enable,\n+\t.rx_queue_intr_disable        = ice_rx_queue_intr_disable,\n \t.vlan_pvid_set                = ice_vlan_pvid_set,\n };\n \n@@ -1258,10 +1264,39 @@ static int ice_vlan_pvid_set(struct rte_eth_dev *dev,\n }\n \n static void\n+ice_vsi_disable_queues_intr(struct ice_vsi *vsi)\n+{\n+\tstruct rte_eth_dev *dev = vsi->adapter->eth_dev;\n+\tstruct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);\n+\tstruct rte_intr_handle *intr_handle = &pci_dev->intr_handle;\n+\tstruct ice_hw *hw = ICE_VSI_TO_HW(vsi);\n+\tuint16_t msix_intr, i;\n+\n+\t/* disable interrupt and also clear all the exist config */\n+\tfor (i = 0; i < vsi->nb_qps; i++) {\n+\t\tICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);\n+\t\tICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);\n+\t\trte_wmb();\n+\t}\n+\n+\tif (rte_intr_allow_others(intr_handle))\n+\t\t/* vfio-pci */\n+\t\tfor (i = 0; i < vsi->nb_msix; i++) {\n+\t\t\tmsix_intr = vsi->msix_intr + i;\n+\t\t\tICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),\n+\t\t\t\t      GLINT_DYN_CTL_WB_ON_ITR_M);\n+\t\t}\n+\telse\n+\t\t/* igb_uio */\n+\t\tICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);\n+}\n+\n+static void\n ice_dev_stop(struct rte_eth_dev *dev)\n {\n \tstruct rte_eth_dev_data *data = dev->data;\n \tstruct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\tstruct ice_vsi *main_vsi = pf->main_vsi;\n \tstruct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);\n \tstruct rte_intr_handle *intr_handle = &pci_dev->intr_handle;\n \tuint16_t i;\n@@ -1278,6 +1313,9 @@ static int ice_vlan_pvid_set(struct rte_eth_dev *dev,\n \tfor (i = 0; i < data->nb_tx_queues; i++)\n \t\tice_tx_queue_stop(dev, i);\n \n+\t/* disable all queue interrupts */\n+\tice_vsi_disable_queues_intr(main_vsi);\n+\n \t/* Clear all queues and release mbufs */\n \tice_clear_queues(dev);\n \n@@ -1405,6 +1443,158 @@ static int ice_init_rss(struct ice_pf *pf)\n \treturn 0;\n }\n \n+static void\n+__vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,\n+\t\t       int base_queue, int nb_queue)\n+{\n+\tstruct ice_hw *hw = ICE_VSI_TO_HW(vsi);\n+\tuint32_t val, val_tx;\n+\tint i;\n+\n+\tfor (i = 0; i < nb_queue; i++) {\n+\t\t/*do actual bind*/\n+\t\tval = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |\n+\t\t      (0 < QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;\n+\t\tval_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |\n+\t\t\t (0 < QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;\n+\n+\t\tPMD_DRV_LOG(INFO, \"queue %d is binding to vect %d\",\n+\t\t\t    base_queue + i, msix_vect);\n+\t\t/* set ITR0 value */\n+\t\tICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x10);\n+\t\tICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);\n+\t\tICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);\n+\t}\n+}\n+\n+static void\n+ice_vsi_queues_bind_intr(struct ice_vsi *vsi)\n+{\n+\tstruct rte_eth_dev *dev = vsi->adapter->eth_dev;\n+\tstruct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);\n+\tstruct rte_intr_handle *intr_handle = &pci_dev->intr_handle;\n+\tstruct ice_hw *hw = ICE_VSI_TO_HW(vsi);\n+\tuint16_t msix_vect = vsi->msix_intr;\n+\tuint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);\n+\tuint16_t queue_idx = 0;\n+\tint record = 0;\n+\tint i;\n+\n+\t/* clear Rx/Tx queue interrupt */\n+\tfor (i = 0; i < vsi->nb_used_qps; i++) {\n+\t\tICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);\n+\t\tICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);\n+\t}\n+\n+\t/* PF bind interrupt */\n+\tif (rte_intr_dp_is_en(intr_handle)) {\n+\t\tqueue_idx = 0;\n+\t\trecord = 1;\n+\t}\n+\n+\tfor (i = 0; i < vsi->nb_used_qps; i++) {\n+\t\tif (nb_msix <= 1) {\n+\t\t\tif (!rte_intr_allow_others(intr_handle))\n+\t\t\t\tmsix_vect = ICE_MISC_VEC_ID;\n+\n+\t\t\t/* uio mapping all queue to one msix_vect */\n+\t\t\t__vsi_queues_bind_intr(vsi, msix_vect,\n+\t\t\t\t\t       vsi->base_queue + i,\n+\t\t\t\t\t       vsi->nb_used_qps - i);\n+\n+\t\t\tfor (; !!record && i < vsi->nb_used_qps; i++)\n+\t\t\t\tintr_handle->intr_vec[queue_idx + i] =\n+\t\t\t\t\tmsix_vect;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\t/* vfio 1:1 queue/msix_vect mapping */\n+\t\t__vsi_queues_bind_intr(vsi, msix_vect,\n+\t\t\t\t       vsi->base_queue + i, 1);\n+\n+\t\tif (!!record)\n+\t\t\tintr_handle->intr_vec[queue_idx + i] = msix_vect;\n+\n+\t\tmsix_vect++;\n+\t\tnb_msix--;\n+\t}\n+}\n+\n+static void\n+ice_vsi_enable_queues_intr(struct ice_vsi *vsi)\n+{\n+\tstruct rte_eth_dev *dev = vsi->adapter->eth_dev;\n+\tstruct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);\n+\tstruct rte_intr_handle *intr_handle = &pci_dev->intr_handle;\n+\tstruct ice_hw *hw = ICE_VSI_TO_HW(vsi);\n+\tuint16_t msix_intr, i;\n+\n+\tif (rte_intr_allow_others(intr_handle))\n+\t\tfor (i = 0; i < vsi->nb_used_qps; i++) {\n+\t\t\tmsix_intr = vsi->msix_intr + i;\n+\t\t\tICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),\n+\t\t\t\t      GLINT_DYN_CTL_INTENA_M |\n+\t\t\t\t      GLINT_DYN_CTL_CLEARPBA_M |\n+\t\t\t\t      GLINT_DYN_CTL_ITR_INDX_M |\n+\t\t\t\t      GLINT_DYN_CTL_WB_ON_ITR_M);\n+\t\t}\n+\telse\n+\t\tICE_WRITE_REG(hw, GLINT_DYN_CTL(0),\n+\t\t\t      GLINT_DYN_CTL_INTENA_M |\n+\t\t\t      GLINT_DYN_CTL_CLEARPBA_M |\n+\t\t\t      GLINT_DYN_CTL_ITR_INDX_M |\n+\t\t\t      GLINT_DYN_CTL_WB_ON_ITR_M);\n+}\n+\n+static int\n+ice_rxq_intr_setup(struct rte_eth_dev *dev)\n+{\n+\tstruct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\tstruct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);\n+\tstruct rte_intr_handle *intr_handle = &pci_dev->intr_handle;\n+\tstruct ice_vsi *vsi = pf->main_vsi;\n+\tuint32_t intr_vector = 0;\n+\n+\trte_intr_disable(intr_handle);\n+\n+\t/* check and configure queue intr-vector mapping */\n+\tif ((rte_intr_cap_multiple(intr_handle) ||\n+\t     !RTE_ETH_DEV_SRIOV(dev).active) &&\n+\t    dev->data->dev_conf.intr_conf.rxq != 0) {\n+\t\tintr_vector = dev->data->nb_rx_queues;\n+\t\tif (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {\n+\t\t\tPMD_DRV_LOG(ERR, \"At most %d intr queues supported\",\n+\t\t\t\t    ICE_MAX_INTR_QUEUE_NUM);\n+\t\t\treturn -ENOTSUP;\n+\t\t}\n+\t\tif (rte_intr_efd_enable(intr_handle, intr_vector))\n+\t\t\treturn -1;\n+\t}\n+\n+\tif (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {\n+\t\tintr_handle->intr_vec =\n+\t\trte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),\n+\t\t\t    0);\n+\t\tif (!intr_handle->intr_vec) {\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t    \"Failed to allocate %d rx_queues intr_vec\",\n+\t\t\t\t    dev->data->nb_rx_queues);\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\t}\n+\n+\t/* Map queues with MSIX interrupt */\n+\tvsi->nb_used_qps = dev->data->nb_rx_queues;\n+\tice_vsi_queues_bind_intr(vsi);\n+\n+\t/* Enable interrupts for all the queues */\n+\tice_vsi_enable_queues_intr(vsi);\n+\n+\trte_intr_enable(intr_handle);\n+\n+\treturn 0;\n+}\n+\n static int\n ice_dev_start(struct rte_eth_dev *dev)\n {\n@@ -1439,6 +1629,10 @@ static int ice_init_rss(struct ice_pf *pf)\n \t\tgoto rx_err;\n \t}\n \n+\t/* enable Rx interrput and mapping Rx queue to interrupt vector */\n+\tif (ice_rxq_intr_setup(dev))\n+\t\treturn -EIO;\n+\n \tret = ice_aq_set_event_mask(hw, hw->port_info->lport,\n \t\t\t\t    ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |\n \t\t\t\t     ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |\n@@ -2247,6 +2441,42 @@ static int ice_macaddr_set(struct rte_eth_dev *dev,\n \treturn 0;\n }\n \n+static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,\n+\t\t\t\t    uint16_t queue_id)\n+{\n+\tstruct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);\n+\tstruct rte_intr_handle *intr_handle = &pci_dev->intr_handle;\n+\tstruct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tuint32_t val;\n+\tuint16_t msix_intr;\n+\n+\tmsix_intr = intr_handle->intr_vec[queue_id];\n+\n+\tval = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |\n+\t      GLINT_DYN_CTL_ITR_INDX_M;\n+\tval &= ~GLINT_DYN_CTL_WB_ON_ITR_M;\n+\n+\tICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);\n+\trte_intr_enable(&pci_dev->intr_handle);\n+\n+\treturn 0;\n+}\n+\n+static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,\n+\t\t\t\t     uint16_t queue_id)\n+{\n+\tstruct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);\n+\tstruct rte_intr_handle *intr_handle = &pci_dev->intr_handle;\n+\tstruct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tuint16_t msix_intr;\n+\n+\tmsix_intr = intr_handle->intr_vec[queue_id];\n+\n+\tICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);\n+\n+\treturn 0;\n+}\n+\n static int\n ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)\n {\n",
    "prefixes": [
        "v5",
        "24/31"
    ]
}