get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/1120/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 1120,
    "url": "https://patches.dpdk.org/api/patches/1120/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1415095289-28961-7-git-send-email-jing.d.chen@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1415095289-28961-7-git-send-email-jing.d.chen@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1415095289-28961-7-git-send-email-jing.d.chen@intel.com",
    "date": "2014-11-04T10:01:29",
    "name": "[dpdk-dev,v3,6/6] i40e: Add full VMDQ pools support",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "6b68e68f7dde8e703bb3ad72f06d5f227fe4ad21",
    "submitter": {
        "id": 40,
        "url": "https://patches.dpdk.org/api/people/40/?format=api",
        "name": "Chen, Jing D",
        "email": "jing.d.chen@intel.com"
    },
    "delegate": null,
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1415095289-28961-7-git-send-email-jing.d.chen@intel.com/mbox/",
    "series": [],
    "comments": "https://patches.dpdk.org/api/patches/1120/comments/",
    "check": "pending",
    "checks": "https://patches.dpdk.org/api/patches/1120/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id 420E37F18;\n\tTue,  4 Nov 2014 10:52:41 +0100 (CET)",
            "from mga03.intel.com (mga03.intel.com [134.134.136.65])\n\tby dpdk.org (Postfix) with ESMTP id 609D17E04\n\tfor <dev@dpdk.org>; Tue,  4 Nov 2014 10:52:37 +0100 (CET)",
            "from orsmga001.jf.intel.com ([10.7.209.18])\n\tby orsmga103.jf.intel.com with ESMTP; 04 Nov 2014 01:59:55 -0800",
            "from shvmail01.sh.intel.com ([10.239.29.42])\n\tby orsmga001.jf.intel.com with ESMTP; 04 Nov 2014 02:01:53 -0800",
            "from shecgisg003.sh.intel.com (shecgisg003.sh.intel.com\n\t[10.239.29.90])\n\tby shvmail01.sh.intel.com with ESMTP id sA4A1pRb022756;\n\tTue, 4 Nov 2014 18:01:51 +0800",
            "from shecgisg003.sh.intel.com (localhost [127.0.0.1])\n\tby shecgisg003.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP\n\tid sA4A1nlW029037; Tue, 4 Nov 2014 18:01:51 +0800",
            "(from jingche2@localhost)\n\tby shecgisg003.sh.intel.com (8.13.6/8.13.6/Submit) id sA4A1mNm029033; \n\tTue, 4 Nov 2014 18:01:48 +0800"
        ],
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.07,312,1413270000\"; d=\"scan'208\";a=\"601871517\"",
        "From": "\"Chen Jing D(Mark)\" <jing.d.chen@intel.com>",
        "To": "dev@dpdk.org",
        "Date": "Tue,  4 Nov 2014 18:01:29 +0800",
        "Message-Id": "<1415095289-28961-7-git-send-email-jing.d.chen@intel.com>",
        "X-Mailer": "git-send-email 1.7.12.2",
        "In-Reply-To": "<1415095289-28961-1-git-send-email-jing.d.chen@intel.com>",
        "References": "<1411478047-1251-2-git-send-email-jing.d.chen@intel.com>\n\t<1415095289-28961-1-git-send-email-jing.d.chen@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v3 6/6] i40e: Add full VMDQ pools support",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: \"Chen Jing D(Mark)\" <jing.d.chen@intel.com>\n\n1. Function i40e_vsi_* name change to i40e_dev_* since PF can contains\n   more than 1 VSI after VMDQ enabled.\n2. i40e_dev_rx/tx_queue_setup change to have capability of setup\n   queues that belongs to VMDQ pools.\n3. Add queue mapping. This will do a convertion between queue index\n   that application used and real NIC queue index.\n3. i40e_dev_start/stop change to have capability switching VMDQ queues.\n4. i40e_pf_config_rss change to calculate actual main VSI queue numbers\n   after VMDQ pools introduced.\n\nSigned-off-by: Chen Jing D(Mark) <jing.d.chen@intel.com>\n---\n lib/librte_pmd_i40e/i40e_ethdev.c |  174 +++++++++++++++++++++++++-----------\n lib/librte_pmd_i40e/i40e_ethdev.h |    4 +-\n lib/librte_pmd_i40e/i40e_rxtx.c   |  125 ++++++++++++++++++++++-----\n 3 files changed, 226 insertions(+), 77 deletions(-)",
    "diff": "diff --git a/lib/librte_pmd_i40e/i40e_ethdev.c b/lib/librte_pmd_i40e/i40e_ethdev.c\nindex 21401f8..5c15a9d 100644\n--- a/lib/librte_pmd_i40e/i40e_ethdev.c\n+++ b/lib/librte_pmd_i40e/i40e_ethdev.c\n@@ -162,7 +162,7 @@ static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,\n static int i40e_get_cap(struct i40e_hw *hw);\n static int i40e_pf_parameter_init(struct rte_eth_dev *dev);\n static int i40e_pf_setup(struct i40e_pf *pf);\n-static int i40e_vsi_init(struct i40e_vsi *vsi);\n+static int i40e_dev_rxtx_init(struct i40e_pf *pf);\n static int i40e_vmdq_setup(struct rte_eth_dev *dev);\n static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,\n \t\tbool offset_loaded, uint64_t *offset, uint64_t *stat);\n@@ -783,8 +783,8 @@ i40e_dev_start(struct rte_eth_dev *dev)\n {\n \tstruct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n \tstruct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n-\tstruct i40e_vsi *vsi = pf->main_vsi;\n-\tint ret;\n+\tstruct i40e_vsi *main_vsi = pf->main_vsi;\n+\tint ret, i;\n \n \tif ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&\n \t\t(dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {\n@@ -795,26 +795,37 @@ i40e_dev_start(struct rte_eth_dev *dev)\n \t}\n \n \t/* Initialize VSI */\n-\tret = i40e_vsi_init(vsi);\n+\tret = i40e_dev_rxtx_init(pf);\n \tif (ret != I40E_SUCCESS) {\n-\t\tPMD_DRV_LOG(ERR, \"Failed to init VSI\");\n+\t\tPMD_DRV_LOG(ERR, \"Failed to init rx/tx queues\");\n \t\tgoto err_up;\n \t}\n \n \t/* Map queues with MSIX interrupt */\n-\ti40e_vsi_queues_bind_intr(vsi);\n-\ti40e_vsi_enable_queues_intr(vsi);\n+\ti40e_vsi_queues_bind_intr(main_vsi);\n+\ti40e_vsi_enable_queues_intr(main_vsi);\n+\n+\t/* Map VMDQ VSI queues with MSIX interrupt */\n+\tfor (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {\n+\t\ti40e_vsi_queues_bind_intr(pf->vmdq[i].vsi);\n+\t\ti40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);\n+\t}\n \n \t/* Enable all queues which have been configured */\n-\tret = i40e_vsi_switch_queues(vsi, TRUE);\n+\tret = i40e_dev_switch_queues(pf, TRUE);\n \tif (ret != I40E_SUCCESS) {\n \t\tPMD_DRV_LOG(ERR, \"Failed to enable VSI\");\n \t\tgoto err_up;\n \t}\n \n \t/* Enable receiving broadcast packets */\n-\tif ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {\n-\t\tret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);\n+\tret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);\n+\tif (ret != I40E_SUCCESS)\n+\t\tPMD_DRV_LOG(INFO, \"fail to set vsi broadcast\");\n+\n+\tfor (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {\n+\t\tret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,\n+\t\t\t\t\t\ttrue, NULL);\n \t\tif (ret != I40E_SUCCESS)\n \t\t\tPMD_DRV_LOG(INFO, \"fail to set vsi broadcast\");\n \t}\n@@ -829,7 +840,8 @@ i40e_dev_start(struct rte_eth_dev *dev)\n \treturn I40E_SUCCESS;\n \n err_up:\n-\ti40e_vsi_switch_queues(vsi, FALSE);\n+\ti40e_dev_switch_queues(pf, FALSE);\n+\ti40e_dev_clear_queues(dev);\n \n \treturn ret;\n }\n@@ -838,17 +850,26 @@ static void\n i40e_dev_stop(struct rte_eth_dev *dev)\n {\n \tstruct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n-\tstruct i40e_vsi *vsi = pf->main_vsi;\n+\tstruct i40e_vsi *main_vsi = pf->main_vsi;\n+\tint i;\n \n \t/* Disable all queues */\n-\ti40e_vsi_switch_queues(vsi, FALSE);\n+\ti40e_dev_switch_queues(pf, FALSE);\n+\n+\t/* un-map queues with interrupt registers */\n+\ti40e_vsi_disable_queues_intr(main_vsi);\n+\ti40e_vsi_queues_unbind_intr(main_vsi);\n+\n+\tfor (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {\n+\t\ti40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);\n+\t\ti40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);\n+\t}\n+\n+\t/* Clear all queues and release memory */\n+\ti40e_dev_clear_queues(dev);\n \n \t/* Set link down */\n \ti40e_dev_set_link_down(dev);\n-\n-\t/* un-map queues with interrupt registers */\n-\ti40e_vsi_disable_queues_intr(vsi);\n-\ti40e_vsi_queues_unbind_intr(vsi);\n }\n \n static void\n@@ -3251,11 +3272,11 @@ i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)\n \n /* Swith on or off the tx queues */\n static int\n-i40e_vsi_switch_tx_queues(struct i40e_vsi *vsi, bool on)\n+i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)\n {\n-\tstruct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);\n+\tstruct rte_eth_dev_data *dev_data = pf->dev_data;\n \tstruct i40e_tx_queue *txq;\n-\tstruct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);\n+\tstruct rte_eth_dev *dev = pf->adapter->eth_dev;\n \tuint16_t i;\n \tint ret;\n \n@@ -3263,7 +3284,7 @@ i40e_vsi_switch_tx_queues(struct i40e_vsi *vsi, bool on)\n \t\ttxq = dev_data->tx_queues[i];\n \t\t/* Don't operate the queue if not configured or\n \t\t * if starting only per queue */\n-\t\tif (!txq->q_set || (on && txq->tx_deferred_start))\n+\t\tif (!txq || !txq->q_set || (on && txq->tx_deferred_start))\n \t\t\tcontinue;\n \t\tif (on)\n \t\t\tret = i40e_dev_tx_queue_start(dev, i);\n@@ -3329,11 +3350,11 @@ i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)\n }\n /* Switch on or off the rx queues */\n static int\n-i40e_vsi_switch_rx_queues(struct i40e_vsi *vsi, bool on)\n+i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)\n {\n-\tstruct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);\n+\tstruct rte_eth_dev_data *dev_data = pf->dev_data;\n \tstruct i40e_rx_queue *rxq;\n-\tstruct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);\n+\tstruct rte_eth_dev *dev = pf->adapter->eth_dev;\n \tuint16_t i;\n \tint ret;\n \n@@ -3341,7 +3362,7 @@ i40e_vsi_switch_rx_queues(struct i40e_vsi *vsi, bool on)\n \t\trxq = dev_data->rx_queues[i];\n \t\t/* Don't operate the queue if not configured or\n \t\t * if starting only per queue */\n-\t\tif (!rxq->q_set || (on && rxq->rx_deferred_start))\n+\t\tif (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))\n \t\t\tcontinue;\n \t\tif (on)\n \t\t\tret = i40e_dev_rx_queue_start(dev, i);\n@@ -3356,26 +3377,26 @@ i40e_vsi_switch_rx_queues(struct i40e_vsi *vsi, bool on)\n \n /* Switch on or off all the rx/tx queues */\n int\n-i40e_vsi_switch_queues(struct i40e_vsi *vsi, bool on)\n+i40e_dev_switch_queues(struct i40e_pf *pf, bool on)\n {\n \tint ret;\n \n \tif (on) {\n \t\t/* enable rx queues before enabling tx queues */\n-\t\tret = i40e_vsi_switch_rx_queues(vsi, on);\n+\t\tret = i40e_dev_switch_rx_queues(pf, on);\n \t\tif (ret) {\n \t\t\tPMD_DRV_LOG(ERR, \"Failed to switch rx queues\");\n \t\t\treturn ret;\n \t\t}\n-\t\tret = i40e_vsi_switch_tx_queues(vsi, on);\n+\t\tret = i40e_dev_switch_tx_queues(pf, on);\n \t} else {\n \t\t/* Stop tx queues before stopping rx queues */\n-\t\tret = i40e_vsi_switch_tx_queues(vsi, on);\n+\t\tret = i40e_dev_switch_tx_queues(pf, on);\n \t\tif (ret) {\n \t\t\tPMD_DRV_LOG(ERR, \"Failed to switch tx queues\");\n \t\t\treturn ret;\n \t\t}\n-\t\tret = i40e_vsi_switch_rx_queues(vsi, on);\n+\t\tret = i40e_dev_switch_rx_queues(pf, on);\n \t}\n \n \treturn ret;\n@@ -3383,15 +3404,18 @@ i40e_vsi_switch_queues(struct i40e_vsi *vsi, bool on)\n \n /* Initialize VSI for TX */\n static int\n-i40e_vsi_tx_init(struct i40e_vsi *vsi)\n+i40e_dev_tx_init(struct i40e_pf *pf)\n {\n-\tstruct i40e_pf *pf = I40E_VSI_TO_PF(vsi);\n \tstruct rte_eth_dev_data *data = pf->dev_data;\n \tuint16_t i;\n \tuint32_t ret = I40E_SUCCESS;\n+\tstruct i40e_tx_queue *txq;\n \n \tfor (i = 0; i < data->nb_tx_queues; i++) {\n-\t\tret = i40e_tx_queue_init(data->tx_queues[i]);\n+\t\ttxq = data->tx_queues[i];\n+\t\tif (!txq || !txq->q_set)\n+\t\t\tcontinue;\n+\t\tret = i40e_tx_queue_init(txq);\n \t\tif (ret != I40E_SUCCESS)\n \t\t\tbreak;\n \t}\n@@ -3401,16 +3425,20 @@ i40e_vsi_tx_init(struct i40e_vsi *vsi)\n \n /* Initialize VSI for RX */\n static int\n-i40e_vsi_rx_init(struct i40e_vsi *vsi)\n+i40e_dev_rx_init(struct i40e_pf *pf)\n {\n-\tstruct i40e_pf *pf = I40E_VSI_TO_PF(vsi);\n \tstruct rte_eth_dev_data *data = pf->dev_data;\n \tint ret = I40E_SUCCESS;\n \tuint16_t i;\n+\tstruct i40e_rx_queue *rxq;\n \n \ti40e_pf_config_mq_rx(pf);\n \tfor (i = 0; i < data->nb_rx_queues; i++) {\n-\t\tret = i40e_rx_queue_init(data->rx_queues[i]);\n+\t\trxq = data->rx_queues[i];\n+\t\tif (!rxq || !rxq->q_set)\n+\t\t\tcontinue;\n+\n+\t\tret = i40e_rx_queue_init(rxq);\n \t\tif (ret != I40E_SUCCESS) {\n \t\t\tPMD_DRV_LOG(ERR, \"Failed to do RX queue \"\n \t\t\t\t    \"initialization\");\n@@ -3421,20 +3449,19 @@ i40e_vsi_rx_init(struct i40e_vsi *vsi)\n \treturn ret;\n }\n \n-/* Initialize VSI */\n static int\n-i40e_vsi_init(struct i40e_vsi *vsi)\n+i40e_dev_rxtx_init(struct i40e_pf *pf)\n {\n \tint err;\n \n-\terr = i40e_vsi_tx_init(vsi);\n+\terr = i40e_dev_tx_init(pf);\n \tif (err) {\n-\t\tPMD_DRV_LOG(ERR, \"Failed to do vsi TX initialization\");\n+\t\tPMD_DRV_LOG(ERR, \"Failed to do TX initialization\");\n \t\treturn err;\n \t}\n-\terr = i40e_vsi_rx_init(vsi);\n+\terr = i40e_dev_rx_init(pf);\n \tif (err) {\n-\t\tPMD_DRV_LOG(ERR, \"Failed to do vsi RX initialization\");\n+\t\tPMD_DRV_LOG(ERR, \"Failed to do RX initialization\");\n \t\treturn err;\n \t}\n \n@@ -4806,6 +4833,26 @@ i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,\n \treturn ret;\n }\n \n+/* Calculate the maximum number of contiguous PF queues that are configured */\n+static int\n+i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)\n+{\n+\tstruct rte_eth_dev_data *data = pf->dev_data;\n+\tint i, num;\n+\tstruct i40e_rx_queue *rxq;\n+\n+\tnum = 0;\n+\tfor (i = 0; i < pf->lan_nb_qps; i++) {\n+\t\trxq = data->rx_queues[i];\n+\t\tif (rxq && rxq->q_set)\n+\t\t\tnum++;\n+\t\telse\n+\t\t\tbreak;\n+\t}\n+\n+\treturn num;\n+}\n+\n /* Configure RSS */\n static int\n i40e_pf_config_rss(struct i40e_pf *pf)\n@@ -4813,7 +4860,25 @@ i40e_pf_config_rss(struct i40e_pf *pf)\n \tstruct i40e_hw *hw = I40E_PF_TO_HW(pf);\n \tstruct rte_eth_rss_conf rss_conf;\n \tuint32_t i, lut = 0;\n-\tuint16_t j, num = i40e_align_floor(pf->dev_data->nb_rx_queues);\n+\tuint16_t j, num;\n+\n+\t/*\n+\t * If both VMDQ and RSS enabled, not all of PF queues are configured.\n+\t * It's necessary to calulate the actual PF queues that are configured.\n+\t */\n+\tif (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) {\n+\t\tnum = i40e_pf_calc_configured_queues_num(pf);\n+\t\tnum = i40e_align_floor(num);\n+\t} else\n+\t\tnum = i40e_align_floor(pf->dev_data->nb_rx_queues);\n+\n+\tPMD_INIT_LOG(INFO, \"Max of contiguous %u PF queues are configured\",\n+\t\t\tnum);\n+\n+\tif (num == 0) {\n+\t\tPMD_INIT_LOG(ERR, \"No PF queues are configured to enable RSS\");\n+\t\treturn -ENOTSUP;\n+\t}\n \n \tfor (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {\n \t\tif (j == num)\n@@ -4911,18 +4976,21 @@ i40e_tunnel_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,\n static int\n i40e_pf_config_mq_rx(struct i40e_pf *pf)\n {\n-\tif (!pf->dev_data->sriov.active) {\n-\t\tswitch (pf->dev_data->dev_conf.rxmode.mq_mode) {\n-\t\tcase ETH_MQ_RX_RSS:\n-\t\t\ti40e_pf_config_rss(pf);\n-\t\t\tbreak;\n-\t\tdefault:\n-\t\t\ti40e_pf_disable_rss(pf);\n-\t\t\tbreak;\n-\t\t}\n+\tint ret = 0;\n+\tenum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;\n+\n+\tif (mq_mode & ETH_MQ_RX_DCB_FLAG) {\n+\t\tPMD_INIT_LOG(ERR, \"i40e doesn't support DCB yet\");\n+\t\treturn -ENOTSUP;\n \t}\n \n-\treturn 0;\n+\t/* RSS setup */\n+\tif (mq_mode & ETH_MQ_RX_RSS_FLAG)\n+\t\tret = i40e_pf_config_rss(pf);\n+\telse\n+\t\ti40e_pf_disable_rss(pf);\n+\n+\treturn ret;\n }\n \n static int\ndiff --git a/lib/librte_pmd_i40e/i40e_ethdev.h b/lib/librte_pmd_i40e/i40e_ethdev.h\nindex 69512cd..afa14aa 100644\n--- a/lib/librte_pmd_i40e/i40e_ethdev.h\n+++ b/lib/librte_pmd_i40e/i40e_ethdev.h\n@@ -355,7 +355,7 @@ struct i40e_adapter {\n \t};\n };\n \n-int i40e_vsi_switch_queues(struct i40e_vsi *vsi, bool on);\n+int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);\n int i40e_vsi_release(struct i40e_vsi *vsi);\n struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf,\n \t\t\t\tenum i40e_vsi_type type,\n@@ -409,7 +409,7 @@ i40e_get_vsi_from_adapter(struct i40e_adapter *adapter)\n \t\treturn pf->main_vsi;\n \t}\n }\n-#define I40E_DEV_PRIVATE_TO_VSI(adapter) \\\n+#define I40E_DEV_PRIVATE_TO_MAIN_VSI(adapter) \\\n \ti40e_get_vsi_from_adapter((struct i40e_adapter *)adapter)\n \n /* I40E_VSI_TO */\ndiff --git a/lib/librte_pmd_i40e/i40e_rxtx.c b/lib/librte_pmd_i40e/i40e_rxtx.c\nindex 315a9c0..487591d 100644\n--- a/lib/librte_pmd_i40e/i40e_rxtx.c\n+++ b/lib/librte_pmd_i40e/i40e_rxtx.c\n@@ -1486,14 +1486,58 @@ i40e_xmit_pkts_simple(void *tx_queue,\n \treturn nb_tx;\n }\n \n+/*\n+ * Find the VSI the queue belongs to. 'queue_idx' is the queue index\n+ * application used, which assume having sequential ones. But from driver's\n+ * perspective, it's different. For example, q0 belongs to FDIR VSI, q1-q64\n+ * to MAIN VSI, , q65-96 to SRIOV VSIs, q97-128 to VMDQ VSIs. For application\n+ * running on host, q1-64 and q97-128 can be used, total 96 queues. They can\n+ * use queue_idx from 0 to 95 to access queues, while real queue would be\n+ * different. This function will do a queue mapping to find VSI the queue\n+ * belongs to.\n+ */\n+static struct i40e_vsi*\n+i40e_pf_get_vsi_by_qindex(struct i40e_pf *pf, uint16_t queue_idx)\n+{\n+\t/* the queue in MAIN VSI range */\n+\tif (queue_idx < pf->main_vsi->nb_qps)\n+\t\treturn pf->main_vsi;\n+\n+\tqueue_idx -= pf->main_vsi->nb_qps;\n+\n+\t/* queue_idx is greater than VMDQ VSIs range */\n+\tif (queue_idx > pf->nb_cfg_vmdq_vsi * pf->vmdq_nb_qps - 1) {\n+\t\tPMD_INIT_LOG(ERR, \"queue_idx out of range. VMDQ configured?\");\n+\t\treturn NULL;\n+\t}\n+\n+\treturn pf->vmdq[queue_idx / pf->vmdq_nb_qps].vsi;\n+}\n+\n+static uint16_t\n+i40e_get_queue_offset_by_qindex(struct i40e_pf *pf, uint16_t queue_idx)\n+{\n+\t/* the queue in MAIN VSI range */\n+\tif (queue_idx < pf->main_vsi->nb_qps)\n+\t\treturn queue_idx;\n+\n+\t/* It's VMDQ queues */\n+\tqueue_idx -= pf->main_vsi->nb_qps;\n+\n+\tif (pf->nb_cfg_vmdq_vsi)\n+\t\treturn queue_idx % pf->vmdq_nb_qps;\n+\telse {\n+\t\tPMD_INIT_LOG(ERR, \"Fail to get queue offset\");\n+\t\treturn (uint16_t)(-1);\n+\t}\n+}\n+\n int\n i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n {\n-\tstruct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private);\n \tstruct i40e_rx_queue *rxq;\n \tint err = -1;\n-\tstruct i40e_hw *hw = I40E_VSI_TO_HW(vsi);\n-\tuint16_t q_base = vsi->base_queue;\n+\tstruct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n \n \tPMD_INIT_FUNC_TRACE();\n \n@@ -1511,7 +1555,7 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n \t\t/* Init the RX tail regieter. */\n \t\tI40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);\n \n-\t\terr = i40e_switch_rx_queue(hw, rx_queue_id + q_base, TRUE);\n+\t\terr = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE);\n \n \t\tif (err) {\n \t\t\tPMD_DRV_LOG(ERR, \"Failed to switch RX queue %u on\",\n@@ -1528,16 +1572,18 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n int\n i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n {\n-\tstruct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private);\n \tstruct i40e_rx_queue *rxq;\n \tint err;\n-\tstruct i40e_hw *hw = I40E_VSI_TO_HW(vsi);\n-\tuint16_t q_base = vsi->base_queue;\n+\tstruct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n \n \tif (rx_queue_id < dev->data->nb_rx_queues) {\n \t\trxq = dev->data->rx_queues[rx_queue_id];\n \n-\t\terr = i40e_switch_rx_queue(hw, rx_queue_id + q_base, FALSE);\n+\t\t/*\n+\t\t* rx_queue_id is queue id aplication refers to, while\n+\t\t* rxq->reg_idx is the real queue index.\n+\t\t*/\n+\t\terr = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE);\n \n \t\tif (err) {\n \t\t\tPMD_DRV_LOG(ERR, \"Failed to switch RX queue %u off\",\n@@ -1554,15 +1600,20 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n int\n i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n {\n-\tstruct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private);\n \tint err = -1;\n-\tstruct i40e_hw *hw = I40E_VSI_TO_HW(vsi);\n-\tuint16_t q_base = vsi->base_queue;\n+\tstruct i40e_tx_queue *txq;\n+\tstruct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n \n \tPMD_INIT_FUNC_TRACE();\n \n \tif (tx_queue_id < dev->data->nb_tx_queues) {\n-\t\terr = i40e_switch_tx_queue(hw, tx_queue_id + q_base, TRUE);\n+\t\ttxq = dev->data->tx_queues[tx_queue_id];\n+\n+\t\t/*\n+\t\t* tx_queue_id is queue id aplication refers to, while\n+\t\t* rxq->reg_idx is the real queue index.\n+\t\t*/\n+\t\terr = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE);\n \t\tif (err)\n \t\t\tPMD_DRV_LOG(ERR, \"Failed to switch TX queue %u on\",\n \t\t\t\t    tx_queue_id);\n@@ -1574,16 +1625,18 @@ i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n int\n i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n {\n-\tstruct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private);\n \tstruct i40e_tx_queue *txq;\n \tint err;\n-\tstruct i40e_hw *hw = I40E_VSI_TO_HW(vsi);\n-\tuint16_t q_base = vsi->base_queue;\n+\tstruct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n \n \tif (tx_queue_id < dev->data->nb_tx_queues) {\n \t\ttxq = dev->data->tx_queues[tx_queue_id];\n \n-\t\terr = i40e_switch_tx_queue(hw, tx_queue_id + q_base, FALSE);\n+\t\t/*\n+\t\t* tx_queue_id is queue id aplication refers to, while\n+\t\t* txq->reg_idx is the real queue index.\n+\t\t*/\n+\t\terr = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE);\n \n \t\tif (err) {\n \t\t\tPMD_DRV_LOG(ERR, \"Failed to switch TX queue %u of\",\n@@ -1606,14 +1659,23 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,\n \t\t\tconst struct rte_eth_rxconf *rx_conf,\n \t\t\tstruct rte_mempool *mp)\n {\n-\tstruct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private);\n+\tstruct i40e_vsi *vsi;\n+\tstruct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n \tstruct i40e_rx_queue *rxq;\n \tconst struct rte_memzone *rz;\n \tuint32_t ring_size;\n \tuint16_t len;\n \tint use_def_burst_func = 1;\n \n-\tif (!vsi || queue_idx >= vsi->nb_qps) {\n+\tif (hw->mac.type == I40E_MAC_VF) {\n+\t\tstruct i40e_vf *vf =\n+\t\t\tI40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);\n+\t\tvsi = &vf->vsi;\n+\t} else\n+\t\tvsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);\n+\n+\tif (vsi == NULL) {\n \t\tPMD_DRV_LOG(ERR, \"VSI not available or queue \"\n \t\t\t    \"index exceeds the maximum\");\n \t\treturn I40E_ERR_PARAM;\n@@ -1646,7 +1708,12 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,\n \trxq->nb_rx_desc = nb_desc;\n \trxq->rx_free_thresh = rx_conf->rx_free_thresh;\n \trxq->queue_id = queue_idx;\n-\trxq->reg_idx = vsi->base_queue + queue_idx;\n+\tif (hw->mac.type == I40E_MAC_VF)\n+\t\trxq->reg_idx = queue_idx;\n+\telse /* PF device */\n+\t\trxq->reg_idx = vsi->base_queue +\n+\t\t\ti40e_get_queue_offset_by_qindex(pf, queue_idx);\n+\n \trxq->port_id = dev->data->port_id;\n \trxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?\n \t\t\t\t\t\t\t0 : ETHER_CRC_LEN);\n@@ -1804,13 +1871,22 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,\n \t\t\tunsigned int socket_id,\n \t\t\tconst struct rte_eth_txconf *tx_conf)\n {\n-\tstruct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private);\n+\tstruct i40e_vsi *vsi;\n+\tstruct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n \tstruct i40e_tx_queue *txq;\n \tconst struct rte_memzone *tz;\n \tuint32_t ring_size;\n \tuint16_t tx_rs_thresh, tx_free_thresh;\n \n-\tif (!vsi || queue_idx >= vsi->nb_qps) {\n+\tif (hw->mac.type == I40E_MAC_VF) {\n+\t\tstruct i40e_vf *vf =\n+\t\t\tI40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);\n+\t\tvsi = &vf->vsi;\n+\t} else\n+\t\tvsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);\n+\n+\tif (vsi == NULL) {\n \t\tPMD_DRV_LOG(ERR, \"VSI is NULL, or queue index (%u) \"\n \t\t\t    \"exceeds the maximum\", queue_idx);\n \t\treturn I40E_ERR_PARAM;\n@@ -1934,7 +2010,12 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,\n \ttxq->hthresh = tx_conf->tx_thresh.hthresh;\n \ttxq->wthresh = tx_conf->tx_thresh.wthresh;\n \ttxq->queue_id = queue_idx;\n-\ttxq->reg_idx = vsi->base_queue + queue_idx;\n+\tif (hw->mac.type == I40E_MAC_VF)\n+\t\ttxq->reg_idx = queue_idx;\n+\telse /* PF device */\n+\t\ttxq->reg_idx = vsi->base_queue +\n+\t\t\ti40e_get_queue_offset_by_qindex(pf, queue_idx);\n+\n \ttxq->port_id = dev->data->port_id;\n \ttxq->txq_flags = tx_conf->txq_flags;\n \ttxq->vsi = vsi;\n",
    "prefixes": [
        "dpdk-dev",
        "v3",
        "6/6"
    ]
}