get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/71618/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 71618,
    "url": "http://patches.dpdk.org/api/patches/71618/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20200616124112.108014-10-ting.xu@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20200616124112.108014-10-ting.xu@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20200616124112.108014-10-ting.xu@intel.com",
    "date": "2020-06-16T12:41:09",
    "name": "[v3,09/12] net/ice: add queue start and stop for DCF",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "80e08287fba5fb51b517258cab5c639c4bf18daf",
    "submitter": {
        "id": 1363,
        "url": "http://patches.dpdk.org/api/people/1363/?format=api",
        "name": "Xu, Ting",
        "email": "ting.xu@intel.com"
    },
    "delegate": {
        "id": 1540,
        "url": "http://patches.dpdk.org/api/users/1540/?format=api",
        "username": "qzhan15",
        "first_name": "Qi",
        "last_name": "Zhang",
        "email": "qi.z.zhang@intel.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20200616124112.108014-10-ting.xu@intel.com/mbox/",
    "series": [
        {
            "id": 10462,
            "url": "http://patches.dpdk.org/api/series/10462/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=10462",
            "date": "2020-06-16T12:41:00",
            "name": "enable DCF datapath configuration",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/10462/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/71618/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/71618/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 26E64A04A3;\n\tTue, 16 Jun 2020 06:43:57 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 9DAFA1BED1;\n\tTue, 16 Jun 2020 06:42:50 +0200 (CEST)",
            "from mga02.intel.com (mga02.intel.com [134.134.136.20])\n by dpdk.org (Postfix) with ESMTP id 09DD11BECB\n for <dev@dpdk.org>; Tue, 16 Jun 2020 06:42:47 +0200 (CEST)",
            "from fmsmga004.fm.intel.com ([10.253.24.48])\n by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 15 Jun 2020 21:42:47 -0700",
            "from dpdk-xuting-main.sh.intel.com ([10.67.117.84])\n by fmsmga004.fm.intel.com with ESMTP; 15 Jun 2020 21:42:44 -0700"
        ],
        "IronPort-SDR": [
            "\n zAQhGC9eRCw0caFfGKRM4lvrqAAvsrj5Zu6kOt+GNotE9VIh4vghCKslBrUTpLMj8aU9jhEgC8\n /NkSalNY5aSw==",
            "\n LugjxVIwC5+B2pOVLCqubTkdEhPxkZZ9iUdpvQcZDE5KrVfHw38ylCVFRYVrqYZJhDF19A2fNB\n vD/qkY/gSLdQ=="
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.73,517,1583222400\"; d=\"scan'208\";a=\"298764759\"",
        "From": "Ting Xu <ting.xu@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "qi.z.zhang@intel.com, qiming.yang@intel.com, jingjing.wu@intel.com,\n beilei.xing@intel.com, marko.kovacevic@intel.com, john.mcnamara@intel.com,\n xiaolong.ye@intel.com",
        "Date": "Tue, 16 Jun 2020 12:41:09 +0000",
        "Message-Id": "<20200616124112.108014-10-ting.xu@intel.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20200616124112.108014-1-ting.xu@intel.com>",
        "References": "<20200616124112.108014-1-ting.xu@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v3 09/12] net/ice: add queue start and stop for\n\tDCF",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Qi Zhang <qi.z.zhang@intel.com>\n\nAdd queue start and stop in DCF. Support queue enable and disable\nthrough virtual channel. Add support for Rx queue mbufs allocation\nand queue reset.\n\nSigned-off-by: Qi Zhang <qi.z.zhang@intel.com>\nSigned-off-by: Ting Xu <ting.xu@intel.com>\n---\n drivers/net/ice/ice_dcf.c        |  57 ++++++\n drivers/net/ice/ice_dcf.h        |   3 +-\n drivers/net/ice/ice_dcf_ethdev.c | 320 +++++++++++++++++++++++++++++++\n 3 files changed, 379 insertions(+), 1 deletion(-)",
    "diff": "diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c\nindex 8869e0d1c..f18c0f16a 100644\n--- a/drivers/net/ice/ice_dcf.c\n+++ b/drivers/net/ice/ice_dcf.c\n@@ -936,3 +936,60 @@ ice_dcf_config_irq_map(struct ice_dcf_hw *hw)\n \trte_free(map_info);\n \treturn err;\n }\n+\n+int\n+ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on)\n+{\n+\tstruct virtchnl_queue_select queue_select;\n+\tstruct dcf_virtchnl_cmd args;\n+\tint err;\n+\n+\tmemset(&queue_select, 0, sizeof(queue_select));\n+\tqueue_select.vsi_id = hw->vsi_res->vsi_id;\n+\tif (rx)\n+\t\tqueue_select.rx_queues |= 1 << qid;\n+\telse\n+\t\tqueue_select.tx_queues |= 1 << qid;\n+\n+\tmemset(&args, 0, sizeof(args));\n+\tif (on)\n+\t\targs.v_op = VIRTCHNL_OP_ENABLE_QUEUES;\n+\telse\n+\t\targs.v_op = VIRTCHNL_OP_DISABLE_QUEUES;\n+\n+\targs.req_msg = (u8 *)&queue_select;\n+\targs.req_msglen = sizeof(queue_select);\n+\n+\terr = ice_dcf_execute_virtchnl_cmd(hw, &args);\n+\tif (err)\n+\t\tPMD_DRV_LOG(ERR, \"Failed to execute command of %s\",\n+\t\t\t    on ? \"OP_ENABLE_QUEUES\" : \"OP_DISABLE_QUEUES\");\n+\n+\treturn err;\n+}\n+\n+int\n+ice_dcf_disable_queues(struct ice_dcf_hw *hw)\n+{\n+\tstruct virtchnl_queue_select queue_select;\n+\tstruct dcf_virtchnl_cmd args;\n+\tint err;\n+\n+\tmemset(&queue_select, 0, sizeof(queue_select));\n+\tqueue_select.vsi_id = hw->vsi_res->vsi_id;\n+\n+\tqueue_select.rx_queues = BIT(hw->eth_dev->data->nb_rx_queues) - 1;\n+\tqueue_select.tx_queues = BIT(hw->eth_dev->data->nb_tx_queues) - 1;\n+\n+\tmemset(&args, 0, sizeof(args));\n+\targs.v_op = VIRTCHNL_OP_DISABLE_QUEUES;\n+\targs.req_msg = (u8 *)&queue_select;\n+\targs.req_msglen = sizeof(queue_select);\n+\n+\terr = ice_dcf_execute_virtchnl_cmd(hw, &args);\n+\tif (err)\n+\t\tPMD_DRV_LOG(ERR,\n+\t\t\t    \"Failed to execute command of OP_DISABLE_QUEUES\");\n+\n+\treturn err;\n+}\ndiff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h\nindex 9470d1df7..68e1661c0 100644\n--- a/drivers/net/ice/ice_dcf.h\n+++ b/drivers/net/ice/ice_dcf.h\n@@ -70,5 +70,6 @@ void ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw);\n int ice_dcf_init_rss(struct ice_dcf_hw *hw);\n int ice_dcf_configure_queues(struct ice_dcf_hw *hw);\n int ice_dcf_config_irq_map(struct ice_dcf_hw *hw);\n-\n+int ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on);\n+int ice_dcf_disable_queues(struct ice_dcf_hw *hw);\n #endif /* _ICE_DCF_H_ */\ndiff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c\nindex 333fee037..239426b09 100644\n--- a/drivers/net/ice/ice_dcf_ethdev.c\n+++ b/drivers/net/ice/ice_dcf_ethdev.c\n@@ -227,6 +227,270 @@ ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,\n \treturn 0;\n }\n \n+static int\n+alloc_rxq_mbufs(struct ice_rx_queue *rxq)\n+{\n+\tvolatile union ice_32b_rx_flex_desc *rxd;\n+\tstruct rte_mbuf *mbuf = NULL;\n+\tuint64_t dma_addr;\n+\tuint16_t i;\n+\n+\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\tmbuf = rte_mbuf_raw_alloc(rxq->mp);\n+\t\tif (unlikely(!mbuf)) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to allocate mbuf for RX\");\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\trte_mbuf_refcnt_set(mbuf, 1);\n+\t\tmbuf->next = NULL;\n+\t\tmbuf->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\tmbuf->nb_segs = 1;\n+\t\tmbuf->port = rxq->port_id;\n+\n+\t\tdma_addr =\n+\t\t\trte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));\n+\n+\t\trxd = &rxq->rx_ring[i];\n+\t\trxd->read.pkt_addr = dma_addr;\n+\t\trxd->read.hdr_addr = 0;\n+\t\trxd->read.rsvd1 = 0;\n+\t\trxd->read.rsvd2 = 0;\n+\n+\t\trxq->sw_ring[i].mbuf = (void *)mbuf;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+ice_dcf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n+{\n+\tstruct ice_dcf_adapter *ad = dev->data->dev_private;\n+\tstruct iavf_hw *hw = &ad->real_hw.avf;\n+\tstruct ice_rx_queue *rxq;\n+\tint err = 0;\n+\n+\tif (rx_queue_id >= dev->data->nb_rx_queues)\n+\t\treturn -EINVAL;\n+\n+\trxq = dev->data->rx_queues[rx_queue_id];\n+\n+\terr = alloc_rxq_mbufs(rxq);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate RX queue mbuf\");\n+\t\treturn err;\n+\t}\n+\n+\trte_wmb();\n+\n+\t/* Init the RX tail register. */\n+\tIAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);\n+\tIAVF_WRITE_FLUSH(hw);\n+\n+\t/* Ready to switch the queue on */\n+\terr = ice_dcf_switch_queue(&ad->real_hw, rx_queue_id, true, true);\n+\tif (err)\n+\t\tPMD_DRV_LOG(ERR, \"Failed to switch RX queue %u on\",\n+\t\t\t    rx_queue_id);\n+\telse\n+\t\tdev->data->rx_queue_state[rx_queue_id] =\n+\t\t\tRTE_ETH_QUEUE_STATE_STARTED;\n+\n+\treturn err;\n+}\n+\n+static inline void\n+reset_rx_queue(struct ice_rx_queue *rxq)\n+{\n+\tuint16_t len;\n+\tuint32_t i;\n+\n+\tif (!rxq)\n+\t\treturn;\n+\n+\tlen = rxq->nb_rx_desc + ICE_RX_MAX_BURST;\n+\n+\tfor (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)\n+\t\t((volatile char *)rxq->rx_ring)[i] = 0;\n+\n+\tmemset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));\n+\n+\tfor (i = 0; i < ICE_RX_MAX_BURST; i++)\n+\t\trxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;\n+\n+\t/* for rx bulk */\n+\trxq->rx_nb_avail = 0;\n+\trxq->rx_next_avail = 0;\n+\trxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);\n+\n+\trxq->rx_tail = 0;\n+\trxq->nb_rx_hold = 0;\n+\trxq->pkt_first_seg = NULL;\n+\trxq->pkt_last_seg = NULL;\n+}\n+\n+static inline void\n+reset_tx_queue(struct ice_tx_queue *txq)\n+{\n+\tstruct ice_tx_entry *txe;\n+\tuint32_t i, size;\n+\tuint16_t prev;\n+\n+\tif (!txq) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Pointer to txq is NULL\");\n+\t\treturn;\n+\t}\n+\n+\ttxe = txq->sw_ring;\n+\tsize = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;\n+\tfor (i = 0; i < size; i++)\n+\t\t((volatile char *)txq->tx_ring)[i] = 0;\n+\n+\tprev = (uint16_t)(txq->nb_tx_desc - 1);\n+\tfor (i = 0; i < txq->nb_tx_desc; i++) {\n+\t\ttxq->tx_ring[i].cmd_type_offset_bsz =\n+\t\t\trte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);\n+\t\ttxe[i].mbuf =  NULL;\n+\t\ttxe[i].last_id = i;\n+\t\ttxe[prev].next_id = i;\n+\t\tprev = i;\n+\t}\n+\n+\ttxq->tx_tail = 0;\n+\ttxq->nb_tx_used = 0;\n+\n+\ttxq->last_desc_cleaned = txq->nb_tx_desc - 1;\n+\ttxq->nb_tx_free = txq->nb_tx_desc - 1;\n+\n+\ttxq->tx_next_dd = txq->tx_rs_thresh - 1;\n+\ttxq->tx_next_rs = txq->tx_rs_thresh - 1;\n+}\n+\n+static int\n+ice_dcf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n+{\n+\tstruct ice_dcf_adapter *ad = dev->data->dev_private;\n+\tstruct ice_dcf_hw *hw = &ad->real_hw;\n+\tstruct ice_rx_queue *rxq;\n+\tint err;\n+\n+\tif (rx_queue_id >= dev->data->nb_rx_queues)\n+\t\treturn -EINVAL;\n+\n+\terr = ice_dcf_switch_queue(hw, rx_queue_id, true, false);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to switch RX queue %u off\",\n+\t\t\t    rx_queue_id);\n+\t\treturn err;\n+\t}\n+\n+\trxq = dev->data->rx_queues[rx_queue_id];\n+\trxq->rx_rel_mbufs(rxq);\n+\treset_rx_queue(rxq);\n+\tdev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;\n+\n+\treturn 0;\n+}\n+\n+static int\n+ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n+{\n+\tstruct ice_dcf_adapter *ad = dev->data->dev_private;\n+\tstruct iavf_hw *hw = &ad->real_hw.avf;\n+\tstruct ice_tx_queue *txq;\n+\tint err = 0;\n+\n+\tif (tx_queue_id >= dev->data->nb_tx_queues)\n+\t\treturn -EINVAL;\n+\n+\ttxq = dev->data->tx_queues[tx_queue_id];\n+\n+\t/* Init the RX tail register. */\n+\ttxq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(tx_queue_id);\n+\tIAVF_PCI_REG_WRITE(txq->qtx_tail, 0);\n+\tIAVF_WRITE_FLUSH(hw);\n+\n+\t/* Ready to switch the queue on */\n+\terr = ice_dcf_switch_queue(&ad->real_hw, tx_queue_id, false, true);\n+\n+\tif (err)\n+\t\tPMD_DRV_LOG(ERR, \"Failed to switch TX queue %u on\",\n+\t\t\t    tx_queue_id);\n+\telse\n+\t\tdev->data->tx_queue_state[tx_queue_id] =\n+\t\t\tRTE_ETH_QUEUE_STATE_STARTED;\n+\n+\treturn err;\n+}\n+\n+static int\n+ice_dcf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n+{\n+\tstruct ice_dcf_adapter *ad = dev->data->dev_private;\n+\tstruct ice_dcf_hw *hw = &ad->real_hw;\n+\tstruct ice_tx_queue *txq;\n+\tint err;\n+\n+\tif (tx_queue_id >= dev->data->nb_tx_queues)\n+\t\treturn -EINVAL;\n+\n+\terr = ice_dcf_switch_queue(hw, tx_queue_id, false, false);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to switch TX queue %u off\",\n+\t\t\t    tx_queue_id);\n+\t\treturn err;\n+\t}\n+\n+\ttxq = dev->data->tx_queues[tx_queue_id];\n+\ttxq->tx_rel_mbufs(txq);\n+\treset_tx_queue(txq);\n+\tdev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;\n+\n+\treturn 0;\n+}\n+\n+static int\n+ice_dcf_start_queues(struct rte_eth_dev *dev)\n+{\n+\tstruct ice_rx_queue *rxq;\n+\tstruct ice_tx_queue *txq;\n+\tint nb_rxq = 0;\n+\tint nb_txq, i;\n+\n+\tfor (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {\n+\t\ttxq = dev->data->tx_queues[nb_txq];\n+\t\tif (txq->tx_deferred_start)\n+\t\t\tcontinue;\n+\t\tif (ice_dcf_tx_queue_start(dev, nb_txq) != 0) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Fail to start queue %u\", nb_txq);\n+\t\t\tgoto tx_err;\n+\t\t}\n+\t}\n+\n+\tfor (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {\n+\t\trxq = dev->data->rx_queues[nb_rxq];\n+\t\tif (rxq->rx_deferred_start)\n+\t\t\tcontinue;\n+\t\tif (ice_dcf_rx_queue_start(dev, nb_rxq) != 0) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Fail to start queue %u\", nb_rxq);\n+\t\t\tgoto rx_err;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+\n+\t/* stop the started queues if failed to start all queues */\n+rx_err:\n+\tfor (i = 0; i < nb_rxq; i++)\n+\t\tice_dcf_rx_queue_stop(dev, i);\n+tx_err:\n+\tfor (i = 0; i < nb_txq; i++)\n+\t\tice_dcf_tx_queue_stop(dev, i);\n+\n+\treturn -1;\n+}\n+\n static int\n ice_dcf_dev_start(struct rte_eth_dev *dev)\n {\n@@ -267,20 +531,72 @@ ice_dcf_dev_start(struct rte_eth_dev *dev)\n \t\treturn ret;\n \t}\n \n+\tif (dev->data->dev_conf.intr_conf.rxq != 0) {\n+\t\trte_intr_disable(intr_handle);\n+\t\trte_intr_enable(intr_handle);\n+\t}\n+\n+\tret = ice_dcf_start_queues(dev);\n+\tif (ret) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to enable queues\");\n+\t\treturn ret;\n+\t}\n+\n \tdev->data->dev_link.link_status = ETH_LINK_UP;\n \n \treturn 0;\n }\n \n+static void\n+ice_dcf_stop_queues(struct rte_eth_dev *dev)\n+{\n+\tstruct ice_dcf_adapter *ad = dev->data->dev_private;\n+\tstruct ice_dcf_hw *hw = &ad->real_hw;\n+\tstruct ice_rx_queue *rxq;\n+\tstruct ice_tx_queue *txq;\n+\tint ret, i;\n+\n+\t/* Stop All queues */\n+\tret = ice_dcf_disable_queues(hw);\n+\tif (ret)\n+\t\tPMD_DRV_LOG(WARNING, \"Fail to stop queues\");\n+\n+\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n+\t\ttxq = dev->data->tx_queues[i];\n+\t\tif (!txq)\n+\t\t\tcontinue;\n+\t\ttxq->tx_rel_mbufs(txq);\n+\t\treset_tx_queue(txq);\n+\t\tdev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;\n+\t}\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\trxq = dev->data->rx_queues[i];\n+\t\tif (!rxq)\n+\t\t\tcontinue;\n+\t\trxq->rx_rel_mbufs(rxq);\n+\t\treset_rx_queue(rxq);\n+\t\tdev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;\n+\t}\n+}\n+\n static void\n ice_dcf_dev_stop(struct rte_eth_dev *dev)\n {\n \tstruct ice_dcf_adapter *dcf_ad = dev->data->dev_private;\n+\tstruct rte_intr_handle *intr_handle = dev->intr_handle;\n \tstruct ice_adapter *ad = &dcf_ad->parent;\n \n \tif (ad->pf.adapter_stopped == 1)\n \t\treturn;\n \n+\tice_dcf_stop_queues(dev);\n+\n+\trte_intr_efd_disable(intr_handle);\n+\tif (intr_handle->intr_vec) {\n+\t\trte_free(intr_handle->intr_vec);\n+\t\tintr_handle->intr_vec = NULL;\n+\t}\n+\n \tdev->data->dev_link.link_status = ETH_LINK_DOWN;\n \tad->pf.adapter_stopped = 1;\n }\n@@ -477,6 +793,10 @@ static const struct eth_dev_ops ice_dcf_eth_dev_ops = {\n \t.tx_queue_setup          = ice_tx_queue_setup,\n \t.rx_queue_release        = ice_rx_queue_release,\n \t.tx_queue_release        = ice_tx_queue_release,\n+\t.rx_queue_start          = ice_dcf_rx_queue_start,\n+\t.tx_queue_start          = ice_dcf_tx_queue_start,\n+\t.rx_queue_stop           = ice_dcf_rx_queue_stop,\n+\t.tx_queue_stop           = ice_dcf_tx_queue_stop,\n \t.link_update             = ice_dcf_link_update,\n \t.stats_get               = ice_dcf_stats_get,\n \t.stats_reset             = ice_dcf_stats_reset,\n",
    "prefixes": [
        "v3",
        "09/12"
    ]
}