get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/48984/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 48984,
    "url": "https://patches.dpdk.org/api/patches/48984/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1545032259-77179-17-git-send-email-wenzhuo.lu@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1545032259-77179-17-git-send-email-wenzhuo.lu@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1545032259-77179-17-git-send-email-wenzhuo.lu@intel.com",
    "date": "2018-12-17T07:37:24",
    "name": "[v5,16/31] net/ice: support device and queue ops",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "afbfdd6b718e806491f99fe9be29ccee65f9a586",
    "submitter": {
        "id": 258,
        "url": "https://patches.dpdk.org/api/people/258/?format=api",
        "name": "Wenzhuo Lu",
        "email": "wenzhuo.lu@intel.com"
    },
    "delegate": {
        "id": 1540,
        "url": "https://patches.dpdk.org/api/users/1540/?format=api",
        "username": "qzhan15",
        "first_name": "Qi",
        "last_name": "Zhang",
        "email": "qi.z.zhang@intel.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1545032259-77179-17-git-send-email-wenzhuo.lu@intel.com/mbox/",
    "series": [
        {
            "id": 2824,
            "url": "https://patches.dpdk.org/api/series/2824/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=2824",
            "date": "2018-12-17T07:37:08",
            "name": "A new net PMD - ICE",
            "version": 5,
            "mbox": "https://patches.dpdk.org/series/2824/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/48984/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/48984/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 8DD721B904;\n\tMon, 17 Dec 2018 08:33:25 +0100 (CET)",
            "from mga09.intel.com (mga09.intel.com [134.134.136.24])\n\tby dpdk.org (Postfix) with ESMTP id 82A0A1B7E7\n\tfor <dev@dpdk.org>; Mon, 17 Dec 2018 08:33:14 +0100 (CET)",
            "from orsmga002.jf.intel.com ([10.7.209.21])\n\tby orsmga102.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t16 Dec 2018 23:33:13 -0800",
            "from dpdk26.sh.intel.com ([10.67.110.164])\n\tby orsmga002.jf.intel.com with ESMTP; 16 Dec 2018 23:33:12 -0800"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.56,364,1539673200\"; d=\"scan'208\";a=\"118899252\"",
        "From": "Wenzhuo Lu <wenzhuo.lu@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "Wenzhuo Lu <wenzhuo.lu@intel.com>, Qiming Yang <qiming.yang@intel.com>, \n\tXiaoyun Li <xiaoyun.li@intel.com>, Jingjing Wu <jingjing.wu@intel.com>",
        "Date": "Mon, 17 Dec 2018 15:37:24 +0800",
        "Message-Id": "<1545032259-77179-17-git-send-email-wenzhuo.lu@intel.com>",
        "X-Mailer": "git-send-email 1.9.3",
        "In-Reply-To": "<1545032259-77179-1-git-send-email-wenzhuo.lu@intel.com>",
        "References": "<1542956179-80951-1-git-send-email-wenzhuo.lu@intel.com>\n\t<1545032259-77179-1-git-send-email-wenzhuo.lu@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v5 16/31] net/ice: support device and queue ops",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Normally when starting/stopping the device the queue\nshould be started and stopped. Support them both in\nthis patch.\n\nBelow ops are added,\ndev_configure\ndev_start\ndev_stop\ndev_close\ndev_reset\nrx_queue_start\nrx_queue_stop\ntx_queue_start\ntx_queue_stop\nrx_queue_setup\nrx_queue_release\ntx_queue_setup\ntx_queue_release\n\nSigned-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>\nSigned-off-by: Qiming Yang <qiming.yang@intel.com>\nSigned-off-by: Xiaoyun Li <xiaoyun.li@intel.com>\nSigned-off-by: Jingjing Wu <jingjing.wu@intel.com>\n---\n config/common_base               |   2 +\n doc/guides/nics/features/ice.ini |   1 +\n doc/guides/nics/ice.rst          |   8 +\n drivers/net/ice/Makefile         |   3 +-\n drivers/net/ice/ice_ethdev.c     | 198 ++++++++-\n drivers/net/ice/ice_lan_rxtx.c   | 927 +++++++++++++++++++++++++++++++++++++++\n drivers/net/ice/ice_rxtx.h       |  20 +\n drivers/net/ice/meson.build      |   3 +-\n 8 files changed, 1159 insertions(+), 3 deletions(-)\n create mode 100644 drivers/net/ice/ice_lan_rxtx.c",
    "diff": "diff --git a/config/common_base b/config/common_base\nindex 872f440..a342760 100644\n--- a/config/common_base\n+++ b/config/common_base\n@@ -303,6 +303,8 @@ CONFIG_RTE_LIBRTE_ICE_PMD=y\n CONFIG_RTE_LIBRTE_ICE_DEBUG_RX=n\n CONFIG_RTE_LIBRTE_ICE_DEBUG_TX=n\n CONFIG_RTE_LIBRTE_ICE_DEBUG_TX_FREE=n\n+CONFIG_RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC=y\n+CONFIG_RTE_LIBRTE_ICE_16BYTE_RX_DESC=n\n \n # Compile burst-oriented AVF PMD driver\n #\ndiff --git a/doc/guides/nics/features/ice.ini b/doc/guides/nics/features/ice.ini\nindex 085e848..a43a9cd 100644\n--- a/doc/guides/nics/features/ice.ini\n+++ b/doc/guides/nics/features/ice.ini\n@@ -4,6 +4,7 @@\n ; Refer to default.ini for the full list of available PMD features.\n ;\n [Features]\n+Queue start/stop     = Y\n BSD nic_uio          = Y\n Linux UIO            = Y\n Linux VFIO           = Y\ndiff --git a/doc/guides/nics/ice.rst b/doc/guides/nics/ice.rst\nindex 946ed04..96a594f 100644\n--- a/doc/guides/nics/ice.rst\n+++ b/doc/guides/nics/ice.rst\n@@ -38,6 +38,14 @@ Please note that enabling debugging options may affect system performance.\n \n   Toggle display of generic debugging messages.\n \n+- ``CONFIG_RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC`` (default ``y``)\n+\n+  Toggle bulk allocation for RX.\n+\n+- ``CONFIG_RTE_LIBRTE_ICE_16BYTE_RX_DESC`` (default ``n``)\n+\n+  Toggle to use a 16-byte RX descriptor, by default the RX descriptor is 32 byte.\n+\n Runtime Config Options\n ~~~~~~~~~~~~~~~~~~~~~~\n \ndiff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile\nindex 70f23e3..ff93800 100644\n--- a/drivers/net/ice/Makefile\n+++ b/drivers/net/ice/Makefile\n@@ -11,7 +11,7 @@ LIB = librte_pmd_ice.a\n CFLAGS += -O3\n CFLAGS += $(WERROR_FLAGS)\n \n-LDLIBS += -lrte_eal -lrte_ethdev -lrte_kvargs -lrte_bus_pci\n+LDLIBS += -lrte_eal -lrte_ethdev -lrte_kvargs -lrte_bus_pci -lrte_mempool\n \n EXPORT_MAP := rte_pmd_ice_version.map\n \n@@ -50,5 +50,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch.c\n SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_nvm.c\n \n SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_ethdev.c\n+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_lan_rxtx.c\n \n include $(RTE_SDK)/mk/rte.lib.mk\ndiff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c\nindex 4f0c819..2c86b3d 100644\n--- a/drivers/net/ice/ice_ethdev.c\n+++ b/drivers/net/ice/ice_ethdev.c\n@@ -14,6 +14,12 @@\n int ice_logtype_init;\n int ice_logtype_driver;\n \n+static int ice_dev_configure(struct rte_eth_dev *dev);\n+static int ice_dev_start(struct rte_eth_dev *dev);\n+static void ice_dev_stop(struct rte_eth_dev *dev);\n+static void ice_dev_close(struct rte_eth_dev *dev);\n+static int ice_dev_reset(struct rte_eth_dev *dev);\n+\n static const struct rte_pci_id pci_id_ice_map[] = {\n \t{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },\n \t{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },\n@@ -22,7 +28,19 @@\n };\n \n static const struct eth_dev_ops ice_eth_dev_ops = {\n-\t.dev_configure                = NULL,\n+\t.dev_configure                = ice_dev_configure,\n+\t.dev_start                    = ice_dev_start,\n+\t.dev_stop                     = ice_dev_stop,\n+\t.dev_close                    = ice_dev_close,\n+\t.dev_reset                    = ice_dev_reset,\n+\t.rx_queue_start               = ice_rx_queue_start,\n+\t.rx_queue_stop                = ice_rx_queue_stop,\n+\t.tx_queue_start               = ice_tx_queue_start,\n+\t.tx_queue_stop                = ice_tx_queue_stop,\n+\t.rx_queue_setup               = ice_rx_queue_setup,\n+\t.rx_queue_release             = ice_rx_queue_release,\n+\t.tx_queue_setup               = ice_tx_queue_setup,\n+\t.tx_queue_release             = ice_tx_queue_release,\n };\n \n static void\n@@ -560,11 +578,41 @@\n }\n \n static void\n+ice_dev_stop(struct rte_eth_dev *dev)\n+{\n+\tstruct rte_eth_dev_data *data = dev->data;\n+\tstruct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\tuint16_t i;\n+\n+\t/* avoid stopping again */\n+\tif (pf->adapter_stopped)\n+\t\treturn;\n+\n+\t/* stop and clear all Rx queues */\n+\tfor (i = 0; i < data->nb_rx_queues; i++)\n+\t\tice_rx_queue_stop(dev, i);\n+\n+\t/* stop and clear all Tx queues */\n+\tfor (i = 0; i < data->nb_tx_queues; i++)\n+\t\tice_tx_queue_stop(dev, i);\n+\n+\t/* Clear all queues and release mbufs */\n+\tice_clear_queues(dev);\n+\n+\tpf->adapter_stopped = true;\n+}\n+\n+static void\n ice_dev_close(struct rte_eth_dev *dev)\n {\n \tstruct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n \tstruct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n \n+\tice_dev_stop(dev);\n+\n+\t/* release all queue resource */\n+\tice_free_queues(dev);\n+\n \tice_res_pool_destroy(&pf->msix_pool);\n \tice_release_vsi(pf->main_vsi);\n \n@@ -595,6 +643,154 @@\n }\n \n static int\n+ice_dev_configure(__rte_unused struct rte_eth_dev *dev)\n+{\n+\tstruct ice_adapter *ad =\n+\t\tICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);\n+\n+\t/* Initialize to TRUE. If any of Rx queues doesn't meet the\n+\t * bulk allocation or vector Rx preconditions we will reset it.\n+\t */\n+\tad->rx_bulk_alloc_allowed = true;\n+\tad->tx_simple_allowed = true;\n+\n+\treturn 0;\n+}\n+\n+static int ice_init_rss(struct ice_pf *pf)\n+{\n+\tstruct ice_hw *hw = ICE_PF_TO_HW(pf);\n+\tstruct ice_vsi *vsi = pf->main_vsi;\n+\tstruct rte_eth_dev *dev = pf->adapter->eth_dev;\n+\tstruct rte_eth_rss_conf *rss_conf;\n+\tstruct ice_aqc_get_set_rss_keys key;\n+\tuint16_t i, nb_q;\n+\tint ret = 0;\n+\n+\trss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;\n+\tnb_q = dev->data->nb_rx_queues;\n+\tvsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;\n+\tvsi->rss_lut_size = hw->func_caps.common_cap.rss_table_size;\n+\n+\tif (!vsi->rss_key)\n+\t\tvsi->rss_key = rte_zmalloc(NULL,\n+\t\t\t\t\t   vsi->rss_key_size, 0);\n+\tif (!vsi->rss_lut)\n+\t\tvsi->rss_lut = rte_zmalloc(NULL,\n+\t\t\t\t\t   vsi->rss_lut_size, 0);\n+\n+\t/* configure RSS key */\n+\tif (!rss_conf->rss_key) {\n+\t\t/* Calculate the default hash key */\n+\t\tfor (i = 0; i <= vsi->rss_key_size; i++)\n+\t\t\tvsi->rss_key[i] = (uint8_t)rte_rand();\n+\t} else {\n+\t\trte_memcpy(vsi->rss_key, rss_conf->rss_key,\n+\t\t\t   RTE_MIN(rss_conf->rss_key_len,\n+\t\t\t\t   vsi->rss_key_size));\n+\t}\n+\trte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);\n+\tret = ice_aq_set_rss_key(hw, vsi->idx, &key);\n+\tif (ret)\n+\t\treturn -EINVAL;\n+\n+\t/* init RSS LUT table */\n+\tfor (i = 0; i < vsi->rss_lut_size; i++)\n+\t\tvsi->rss_lut[i] = i % nb_q;\n+\n+\tret = ice_aq_set_rss_lut(hw, vsi->idx,\n+\t\t\t\t ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,\n+\t\t\t\t vsi->rss_lut, vsi->rss_lut_size);\n+\tif (ret)\n+\t\treturn -EINVAL;\n+\n+\treturn 0;\n+}\n+\n+static int\n+ice_dev_start(struct rte_eth_dev *dev)\n+{\n+\tstruct rte_eth_dev_data *data = dev->data;\n+\tstruct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tstruct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\tuint16_t nb_rxq = 0;\n+\tuint16_t nb_txq, i;\n+\tint ret;\n+\n+\t/* program Tx queues' context in hardware */\n+\tfor (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {\n+\t\tret = ice_tx_queue_start(dev, nb_txq);\n+\t\tif (ret) {\n+\t\t\tPMD_DRV_LOG(ERR, \"fail to start Tx queue %u\", nb_txq);\n+\t\t\tgoto tx_err;\n+\t\t}\n+\t}\n+\n+\t/* program Rx queues' context in hardware*/\n+\tfor (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {\n+\t\tret = ice_rx_queue_start(dev, nb_rxq);\n+\t\tif (ret) {\n+\t\t\tPMD_DRV_LOG(ERR, \"fail to start Rx queue %u\", nb_rxq);\n+\t\t\tgoto rx_err;\n+\t\t}\n+\t}\n+\n+\tret = ice_init_rss(pf);\n+\tif (ret) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to enable rss for PF\");\n+\t\tgoto rx_err;\n+\t}\n+\n+\tret = ice_aq_set_event_mask(hw, hw->port_info->lport,\n+\t\t\t\t    ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |\n+\t\t\t\t     ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |\n+\t\t\t\t     ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |\n+\t\t\t\t     ICE_AQ_LINK_EVENT_SIGNAL_DETECT |\n+\t\t\t\t     ICE_AQ_LINK_EVENT_AN_COMPLETED |\n+\t\t\t\t     ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),\n+\t\t\t\t     NULL);\n+\tif (ret != ICE_SUCCESS)\n+\t\tPMD_DRV_LOG(WARNING, \"Fail to set phy mask\");\n+\n+\tpf->adapter_stopped = false;\n+\n+\treturn 0;\n+\n+\t/* stop the started queues if failed to start all queues */\n+rx_err:\n+\tfor (i = 0; i < nb_rxq; i++)\n+\t\tice_rx_queue_stop(dev, i);\n+tx_err:\n+\tfor (i = 0; i < nb_txq; i++)\n+\t\tice_tx_queue_stop(dev, i);\n+\n+\treturn -EIO;\n+}\n+\n+static int\n+ice_dev_reset(struct rte_eth_dev *dev)\n+{\n+\tint ret;\n+\n+\tif (dev->data->sriov.active)\n+\t\treturn -ENOTSUP;\n+\n+\tret = ice_dev_uninit(dev);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"failed to uninit device, status = %d\", ret);\n+\t\treturn -ENXIO;\n+\t}\n+\n+\tret = ice_dev_init(dev);\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"failed to init device, status = %d\", ret);\n+\t\treturn -ENXIO;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,\n \t      struct rte_pci_device *pci_dev)\n {\ndiff --git a/drivers/net/ice/ice_lan_rxtx.c b/drivers/net/ice/ice_lan_rxtx.c\nnew file mode 100644\nindex 0000000..5c2301a\n--- /dev/null\n+++ b/drivers/net/ice/ice_lan_rxtx.c\n@@ -0,0 +1,927 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2018 Intel Corporation\n+ */\n+\n+#include <rte_ethdev_driver.h>\n+#include <rte_net.h>\n+\n+#include \"ice_rxtx.h\"\n+\n+#define ICE_TD_CMD ICE_TX_DESC_CMD_EOP\n+\n+#define ICE_TX_CKSUM_OFFLOAD_MASK (\t\t \\\n+\t\tPKT_TX_IP_CKSUM |\t\t \\\n+\t\tPKT_TX_L4_MASK |\t\t \\\n+\t\tPKT_TX_TCP_SEG |\t\t \\\n+\t\tPKT_TX_OUTER_IP_CKSUM)\n+\n+#define ICE_RX_ERR_BITS 0x3f\n+\n+static enum ice_status\n+ice_program_hw_rx_queue(struct ice_rx_queue *rxq)\n+{\n+\tstruct ice_vsi *vsi = rxq->vsi;\n+\tstruct ice_hw *hw = ICE_VSI_TO_HW(vsi);\n+\tstruct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);\n+\tstruct ice_rlan_ctx rx_ctx;\n+\tenum ice_status err;\n+\tuint16_t buf_size, len;\n+\tstruct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;\n+\tuint32_t regval;\n+\n+\t/**\n+\t * The kernel driver uses flex descriptor. It sets the register\n+\t * to flex descriptor mode.\n+\t * DPDK uses legacy descriptor. It should set the register back\n+\t * to the default value, then uses legacy descriptor mode.\n+\t */\n+\tregval = (0x01 << QRXFLXP_CNTXT_RXDID_PRIO_S) &\n+\t\t QRXFLXP_CNTXT_RXDID_PRIO_M;\n+\tICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);\n+\n+\t/* Set buffer size as the head split is disabled. */\n+\tbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -\n+\t\t\t      RTE_PKTMBUF_HEADROOM);\n+\trxq->rx_hdr_len = 0;\n+\trxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));\n+\tlen = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;\n+\trxq->max_pkt_len = RTE_MIN(len,\n+\t\t\t\t   dev->data->dev_conf.rxmode.max_rx_pkt_len);\n+\n+\tif (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {\n+\t\tif (rxq->max_pkt_len <= ETHER_MAX_LEN ||\n+\t\t    rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {\n+\t\t\tPMD_DRV_LOG(ERR, \"maximum packet length must \"\n+\t\t\t\t    \"be larger than %u and smaller than %u,\"\n+\t\t\t\t    \"as jumbo frame is enabled\",\n+\t\t\t\t    (uint32_t)ETHER_MAX_LEN,\n+\t\t\t\t    (uint32_t)ICE_FRAME_SIZE_MAX);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t} else {\n+\t\tif (rxq->max_pkt_len < ETHER_MIN_LEN ||\n+\t\t    rxq->max_pkt_len > ETHER_MAX_LEN) {\n+\t\t\tPMD_DRV_LOG(ERR, \"maximum packet length must be \"\n+\t\t\t\t    \"larger than %u and smaller than %u, \"\n+\t\t\t\t    \"as jumbo frame is disabled\",\n+\t\t\t\t    (uint32_t)ETHER_MIN_LEN,\n+\t\t\t\t    (uint32_t)ETHER_MAX_LEN);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t}\n+\n+\tmemset(&rx_ctx, 0, sizeof(rx_ctx));\n+\n+\trx_ctx.base = rxq->rx_ring_phys_addr / ICE_QUEUE_BASE_ADDR_UNIT;\n+\trx_ctx.qlen = rxq->nb_rx_desc;\n+\trx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;\n+\trx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;\n+\trx_ctx.dtype = 0; /* No Header Split mode */\n+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC\n+\trx_ctx.dsize = 1; /* 32B descriptors */\n+#endif\n+\trx_ctx.rxmax = rxq->max_pkt_len;\n+\t/* TPH: Transaction Layer Packet (TLP) processing hints */\n+\trx_ctx.tphrdesc_ena = 1;\n+\trx_ctx.tphwdesc_ena = 1;\n+\trx_ctx.tphdata_ena = 1;\n+\trx_ctx.tphhead_ena = 1;\n+\t/* Low Receive Queue Threshold defined in 64 descriptors units.\n+\t * When the number of free descriptors goes below the lrxqthresh,\n+\t * an immediate interrupt is triggered.\n+\t */\n+\trx_ctx.lrxqthresh = 2;\n+\t/*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/\n+\trx_ctx.l2tsel = 1;\n+\trx_ctx.showiv = 0;\n+\n+\terr = ice_clear_rxq_ctx(hw, rxq->reg_idx);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to clear Lan Rx queue (%u) context\",\n+\t\t\t    rxq->queue_id);\n+\t\treturn -EINVAL;\n+\t}\n+\terr = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to write Lan Rx queue (%u) context\",\n+\t\t\t    rxq->queue_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -\n+\t\t\t      RTE_PKTMBUF_HEADROOM);\n+\n+\t/* Check if scattered RX needs to be used. */\n+\tif ((rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size)\n+\t\tdev->data->scattered_rx = 1;\n+\n+\trxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);\n+\n+\t/* Init the Rx tail register*/\n+\tICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);\n+\n+\treturn 0;\n+}\n+\n+/* Allocate mbufs for all descriptors in rx queue */\n+static int\n+ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)\n+{\n+\tstruct ice_rx_entry *rxe = rxq->sw_ring;\n+\tuint64_t dma_addr;\n+\tuint16_t i;\n+\n+\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\tvolatile union ice_rx_desc *rxd;\n+\t\tstruct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);\n+\n+\t\tif (unlikely(!mbuf)) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to allocate mbuf for RX\");\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\trte_mbuf_refcnt_set(mbuf, 1);\n+\t\tmbuf->next = NULL;\n+\t\tmbuf->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\tmbuf->nb_segs = 1;\n+\t\tmbuf->port = rxq->port_id;\n+\n+\t\tdma_addr =\n+\t\t\trte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));\n+\n+\t\trxd = &rxq->rx_ring[i];\n+\t\trxd->read.pkt_addr = dma_addr;\n+\t\trxd->read.hdr_addr = 0;\n+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC\n+\t\trxd->read.rsvd1 = 0;\n+\t\trxd->read.rsvd2 = 0;\n+#endif\n+\t\trxe[i].mbuf = mbuf;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/* Free all mbufs for descriptors in rx queue */\n+static void\n+ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)\n+{\n+\tuint16_t i;\n+\n+\tif (!rxq || !rxq->sw_ring) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Pointer to sw_ring is NULL\");\n+\t\treturn;\n+\t}\n+\n+\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\tif (rxq->sw_ring[i].mbuf) {\n+\t\t\trte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);\n+\t\t\trxq->sw_ring[i].mbuf = NULL;\n+\t\t}\n+\t}\n+#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC\n+\t\tif (rxq->rx_nb_avail == 0)\n+\t\t\treturn;\n+\t\tfor (i = 0; i < rxq->rx_nb_avail; i++) {\n+\t\t\tstruct rte_mbuf *mbuf;\n+\n+\t\t\tmbuf = rxq->rx_stage[rxq->rx_next_avail + i];\n+\t\t\trte_pktmbuf_free_seg(mbuf);\n+\t\t}\n+\t\trxq->rx_nb_avail = 0;\n+#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */\n+}\n+\n+/* turn on or off rx queue\n+ * @q_idx: queue index in pf scope\n+ * @on: turn on or off the queue\n+ */\n+static int\n+ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)\n+{\n+\tuint32_t reg;\n+\tuint16_t j;\n+\n+\t/* QRX_CTRL = QRX_ENA */\n+\treg = ICE_READ_REG(hw, QRX_CTRL(q_idx));\n+\n+\tif (on) {\n+\t\tif (reg & QRX_CTRL_QENA_STAT_M)\n+\t\t\treturn 0; /* Already on, skip */\n+\t\treg |= QRX_CTRL_QENA_REQ_M;\n+\t} else {\n+\t\tif (!(reg & QRX_CTRL_QENA_STAT_M))\n+\t\t\treturn 0; /* Already off, skip */\n+\t\treg &= ~QRX_CTRL_QENA_REQ_M;\n+\t}\n+\n+\t/* Write the register */\n+\tICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);\n+\t/* Check the result. It is said that QENA_STAT\n+\t * follows the QENA_REQ not more than 10 use.\n+\t * TODO: need to change the wait counter later\n+\t */\n+\tfor (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {\n+\t\trte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);\n+\t\treg = ICE_READ_REG(hw, QRX_CTRL(q_idx));\n+\t\tif (on) {\n+\t\t\tif ((reg & QRX_CTRL_QENA_REQ_M) &&\n+\t\t\t    (reg & QRX_CTRL_QENA_STAT_M))\n+\t\t\t\tbreak;\n+\t\t} else {\n+\t\t\tif (!(reg & QRX_CTRL_QENA_REQ_M) &&\n+\t\t\t    !(reg & QRX_CTRL_QENA_STAT_M))\n+\t\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\t/* Check if it is timeout */\n+\tif (j >= ICE_CHK_Q_ENA_COUNT) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to %s rx queue[%u]\",\n+\t\t\t    (on ? \"enable\" : \"disable\"), q_idx);\n+\t\treturn -ETIMEDOUT;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static inline int\n+#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC\n+ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)\n+#else\n+ice_check_rx_burst_bulk_alloc_preconditions\n+\t(__rte_unused struct ice_rx_queue *rxq)\n+#endif\n+{\n+\tint ret = 0;\n+\n+#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC\n+\tif (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {\n+\t\tPMD_INIT_LOG(DEBUG, \"Rx Burst Bulk Alloc Preconditions: \"\n+\t\t\t     \"rxq->rx_free_thresh=%d, \"\n+\t\t\t     \"ICE_RX_MAX_BURST=%d\",\n+\t\t\t     rxq->rx_free_thresh, ICE_RX_MAX_BURST);\n+\t\tret = -EINVAL;\n+\t} else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {\n+\t\tPMD_INIT_LOG(DEBUG, \"Rx Burst Bulk Alloc Preconditions: \"\n+\t\t\t     \"rxq->rx_free_thresh=%d, \"\n+\t\t\t     \"rxq->nb_rx_desc=%d\",\n+\t\t\t     rxq->rx_free_thresh, rxq->nb_rx_desc);\n+\t\tret = -EINVAL;\n+\t} else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {\n+\t\tPMD_INIT_LOG(DEBUG, \"Rx Burst Bulk Alloc Preconditions: \"\n+\t\t\t     \"rxq->nb_rx_desc=%d, \"\n+\t\t\t     \"rxq->rx_free_thresh=%d\",\n+\t\t\t     rxq->nb_rx_desc, rxq->rx_free_thresh);\n+\t\tret = -EINVAL;\n+\t}\n+#else\n+\tret = -EINVAL;\n+#endif\n+\n+\treturn ret;\n+}\n+\n+/* reset fields in ice_rx_queue back to default */\n+static void\n+ice_reset_rx_queue(struct ice_rx_queue *rxq)\n+{\n+\tunsigned i;\n+\tuint16_t len;\n+\n+\tif (!rxq) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Pointer to rxq is NULL\");\n+\t\treturn;\n+\t}\n+\n+#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC\n+\tif (ice_check_rx_burst_bulk_alloc_preconditions(rxq) == 0)\n+\t\tlen = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);\n+\telse\n+#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */\n+\t\tlen = rxq->nb_rx_desc;\n+\n+\tfor (i = 0; i < len * sizeof(union ice_rx_desc); i++)\n+\t\t((volatile char *)rxq->rx_ring)[i] = 0;\n+\n+#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC\n+\tmemset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));\n+\tfor (i = 0; i < ICE_RX_MAX_BURST; ++i)\n+\t\trxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;\n+\n+\trxq->rx_nb_avail = 0;\n+\trxq->rx_next_avail = 0;\n+\trxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);\n+#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */\n+\n+\trxq->rx_tail = 0;\n+\trxq->nb_rx_hold = 0;\n+\trxq->pkt_first_seg = NULL;\n+\trxq->pkt_last_seg = NULL;\n+}\n+\n+int\n+ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n+{\n+\tstruct ice_rx_queue *rxq;\n+\tint err;\n+\tstruct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (rx_queue_id >= dev->data->nb_rx_queues) {\n+\t\tPMD_DRV_LOG(ERR, \"RX queue %u is out of range %u\",\n+\t\t\t    rx_queue_id, dev->data->nb_rx_queues);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\trxq = dev->data->rx_queues[rx_queue_id];\n+\tif (!rxq || !rxq->q_set) {\n+\t\tPMD_DRV_LOG(ERR, \"RX queue %u not available or setup\",\n+\t\t\t    rx_queue_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\terr = ice_program_hw_rx_queue(rxq);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"fail to program RX queue %u\",\n+\t\t\t    rx_queue_id);\n+\t\treturn -EIO;\n+\t}\n+\n+\terr = ice_alloc_rx_queue_mbufs(rxq);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate RX queue mbuf\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\trte_wmb();\n+\n+\t/* Init the RX tail register. */\n+\tICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);\n+\n+\terr = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to switch RX queue %u on\",\n+\t\t\t    rx_queue_id);\n+\n+\t\tice_rx_queue_release_mbufs(rxq);\n+\t\tice_reset_rx_queue(rxq);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tdev->data->rx_queue_state[rx_queue_id] =\n+\t\tRTE_ETH_QUEUE_STATE_STARTED;\n+\n+\treturn 0;\n+}\n+\n+int\n+ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n+{\n+\tstruct ice_rx_queue *rxq;\n+\tint err;\n+\tstruct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\n+\tif (rx_queue_id < dev->data->nb_rx_queues) {\n+\t\trxq = dev->data->rx_queues[rx_queue_id];\n+\n+\t\terr = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE);\n+\t\tif (err) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to switch RX queue %u off\",\n+\t\t\t\t    rx_queue_id);\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\t\tice_rx_queue_release_mbufs(rxq);\n+\t\tice_reset_rx_queue(rxq);\n+\t\tdev->data->rx_queue_state[rx_queue_id] =\n+\t\t\tRTE_ETH_QUEUE_STATE_STOPPED;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int\n+ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n+{\n+\tstruct ice_tx_queue *txq;\n+\tint err;\n+\tstruct ice_vsi *vsi;\n+\tstruct ice_hw *hw;\n+\tstruct ice_aqc_add_tx_qgrp txq_elem;\n+\tstruct ice_tlan_ctx tx_ctx;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tif (tx_queue_id >= dev->data->nb_tx_queues) {\n+\t\tPMD_DRV_LOG(ERR, \"TX queue %u is out of range %u\",\n+\t\t\t    tx_queue_id, dev->data->nb_tx_queues);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\ttxq = dev->data->tx_queues[tx_queue_id];\n+\tif (!txq || !txq->q_set) {\n+\t\tPMD_DRV_LOG(ERR, \"TX queue %u is not available or setup\",\n+\t\t\t    tx_queue_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tvsi = txq->vsi;\n+\thw = ICE_VSI_TO_HW(vsi);\n+\n+\tmemset(&txq_elem, 0, sizeof(txq_elem));\n+\tmemset(&tx_ctx, 0, sizeof(tx_ctx));\n+\ttxq_elem.num_txqs = 1;\n+\ttxq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);\n+\n+\ttx_ctx.base = txq->tx_ring_phys_addr / ICE_QUEUE_BASE_ADDR_UNIT;\n+\ttx_ctx.qlen = txq->nb_tx_desc;\n+\ttx_ctx.pf_num = hw->pf_id;\n+\ttx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;\n+\ttx_ctx.src_vsi = vsi->vsi_id;\n+\ttx_ctx.port_num = hw->port_info->lport;\n+\ttx_ctx.tso_ena = 1; /* tso enable */\n+\ttx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */\n+\ttx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */\n+\n+\tice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,\n+\t\t    ice_tlan_ctx_info);\n+\n+\ttxq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);\n+\n+\t/* Init the Tx tail register*/\n+\tICE_PCI_REG_WRITE(txq->qtx_tail, 0);\n+\n+\terr = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, 1, &txq_elem,\n+\t\t\t      sizeof(txq_elem), NULL);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to add lan txq\");\n+\t\treturn -EIO;\n+\t}\n+\t/* store the schedule node id */\n+\ttxq->q_teid = txq_elem.txqs[0].q_teid;\n+\n+\tdev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;\n+\treturn 0;\n+}\n+\n+/* Free all mbufs for descriptors in tx queue */\n+static void\n+ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)\n+{\n+\tuint16_t i;\n+\n+\tif (!txq || !txq->sw_ring) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Pointer to txq or sw_ring is NULL\");\n+\t\treturn;\n+\t}\n+\n+\tfor (i = 0; i < txq->nb_tx_desc; i++) {\n+\t\tif (txq->sw_ring[i].mbuf) {\n+\t\t\trte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);\n+\t\t\ttxq->sw_ring[i].mbuf = NULL;\n+\t\t}\n+\t}\n+}\n+\n+static void\n+ice_reset_tx_queue(struct ice_tx_queue *txq)\n+{\n+\tstruct ice_tx_entry *txe;\n+\tuint16_t i, prev, size;\n+\n+\tif (!txq) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Pointer to txq is NULL\");\n+\t\treturn;\n+\t}\n+\n+\ttxe = txq->sw_ring;\n+\tsize = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;\n+\tfor (i = 0; i < size; i++)\n+\t\t((volatile char *)txq->tx_ring)[i] = 0;\n+\n+\tprev = (uint16_t)(txq->nb_tx_desc - 1);\n+\tfor (i = 0; i < txq->nb_tx_desc; i++) {\n+\t\tvolatile struct ice_tx_desc *txd = &txq->tx_ring[i];\n+\n+\t\ttxd->cmd_type_offset_bsz =\n+\t\t\trte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);\n+\t\ttxe[i].mbuf =  NULL;\n+\t\ttxe[i].last_id = i;\n+\t\ttxe[prev].next_id = i;\n+\t\tprev = i;\n+\t}\n+\n+\ttxq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);\n+\ttxq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);\n+\n+\ttxq->tx_tail = 0;\n+\ttxq->nb_tx_used = 0;\n+\n+\ttxq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);\n+\ttxq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);\n+}\n+\n+int\n+ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n+{\n+\tstruct ice_tx_queue *txq;\n+\tstruct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);\n+\tenum ice_status status;\n+\tuint16_t q_ids[1];\n+\tuint32_t q_teids[1];\n+\n+\tif (tx_queue_id >= dev->data->nb_tx_queues) {\n+\t\tPMD_DRV_LOG(ERR, \"TX queue %u is out of range %u\",\n+\t\t\t    tx_queue_id, dev->data->nb_tx_queues);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\ttxq = dev->data->tx_queues[tx_queue_id];\n+\tif (!txq) {\n+\t\tPMD_DRV_LOG(ERR, \"TX queue %u is not available\",\n+\t\t\t    tx_queue_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tq_ids[0] = txq->reg_idx;\n+\tq_teids[0] = txq->q_teid;\n+\n+\tstatus = ice_dis_vsi_txq(hw->port_info, 1, q_ids, q_teids,\n+\t\t\t\t ICE_NO_RESET, 0, NULL);\n+\tif (status != ICE_SUCCESS) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Failed to disable Lan Tx queue\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tice_tx_queue_release_mbufs(txq);\n+\tice_reset_tx_queue(txq);\n+\tdev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;\n+\n+\treturn 0;\n+}\n+\n+int\n+ice_rx_queue_setup(struct rte_eth_dev *dev,\n+\t\t   uint16_t queue_idx,\n+\t\t   uint16_t nb_desc,\n+\t\t   unsigned int socket_id,\n+\t\t   const struct rte_eth_rxconf *rx_conf,\n+\t\t   struct rte_mempool *mp)\n+{\n+\tstruct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\tstruct ice_adapter *ad =\n+\t\tICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);\n+\tstruct ice_vsi *vsi = pf->main_vsi;\n+\tstruct ice_rx_queue *rxq;\n+\tconst struct rte_memzone *rz;\n+\tuint32_t ring_size;\n+\tuint16_t len;\n+\tint use_def_burst_func = 1;\n+\n+\tif (nb_desc % ICE_ALIGN_RING_DESC != 0 ||\n+\t    nb_desc > ICE_MAX_RING_DESC ||\n+\t    nb_desc < ICE_MIN_RING_DESC) {\n+\t\tPMD_INIT_LOG(ERR, \"Number (%u) of receive descriptors is \"\n+\t\t\t     \"invalid\", nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Free memory if needed */\n+\tif (dev->data->rx_queues[queue_idx]) {\n+\t\tice_rx_queue_release(dev->data->rx_queues[queue_idx]);\n+\t\tdev->data->rx_queues[queue_idx] = NULL;\n+\t}\n+\n+\t/* Allocate the rx queue data structure */\n+\trxq = rte_zmalloc_socket(NULL,\n+\t\t\t\t sizeof(struct ice_rx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE,\n+\t\t\t\t socket_id);\n+\tif (!rxq) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for \"\n+\t\t\t     \"rx queue data structure\");\n+\t\treturn -ENOMEM;\n+\t}\n+\trxq->mp = mp;\n+\trxq->nb_rx_desc = nb_desc;\n+\trxq->rx_free_thresh = rx_conf->rx_free_thresh;\n+\trxq->queue_id = queue_idx;\n+\n+\trxq->reg_idx = vsi->base_queue + queue_idx;\n+\trxq->port_id = dev->data->port_id;\n+\tif (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)\n+\t\trxq->crc_len = ETHER_CRC_LEN;\n+\telse\n+\t\trxq->crc_len = 0;\n+\n+\trxq->drop_en = rx_conf->rx_drop_en;\n+\trxq->vsi = vsi;\n+\trxq->rx_deferred_start = rx_conf->rx_deferred_start;\n+\n+\t/* Allocate the maximun number of RX ring hardware descriptor. */\n+\tlen = ICE_MAX_RING_DESC;\n+\n+#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC\n+\t/**\n+\t * Allocating a little more memory because vectorized/bulk_alloc Rx\n+\t * functions doesn't check boundaries each time.\n+\t */\n+\tlen += ICE_RX_MAX_BURST;\n+#endif\n+\n+\t/* Allocate the maximum number of RX ring hardware descriptor. */\n+\tring_size = sizeof(union ice_rx_desc) * len;\n+\tring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);\n+\trz = rte_eth_dma_zone_reserve(dev, \"rx_ring\", queue_idx,\n+\t\t\t\t      ring_size, ICE_RING_BASE_ALIGN,\n+\t\t\t\t      socket_id);\n+\tif (!rz) {\n+\t\tice_rx_queue_release(rxq);\n+\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for RX\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Zero all the descriptors in the ring. */\n+\tmemset(rz->addr, 0, ring_size);\n+\n+\trxq->rx_ring_phys_addr = rz->phys_addr;\n+\trxq->rx_ring = (union ice_rx_desc *)rz->addr;\n+\n+#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC\n+\tlen = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);\n+#else\n+\tlen = nb_desc;\n+#endif\n+\n+\t/* Allocate the software ring. */\n+\trxq->sw_ring = rte_zmalloc_socket(NULL,\n+\t\t\t\t\t  sizeof(struct ice_rx_entry) * len,\n+\t\t\t\t\t  RTE_CACHE_LINE_SIZE,\n+\t\t\t\t\t  socket_id);\n+\tif (!rxq->sw_ring) {\n+\t\tice_rx_queue_release(rxq);\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for SW ring\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tice_reset_rx_queue(rxq);\n+\trxq->q_set = TRUE;\n+\tdev->data->rx_queues[queue_idx] = rxq;\n+\n+\tuse_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);\n+\n+\tif (!use_def_burst_func) {\n+#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC\n+\t\tPMD_INIT_LOG(DEBUG, \"Rx Burst Bulk Alloc Preconditions are \"\n+\t\t\t     \"satisfied. Rx Burst Bulk Alloc function will be \"\n+\t\t\t     \"used on port=%d, queue=%d.\",\n+\t\t\t     rxq->port_id, rxq->queue_id);\n+#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */\n+\t} else {\n+\t\tPMD_INIT_LOG(DEBUG, \"Rx Burst Bulk Alloc Preconditions are \"\n+\t\t\t     \"not satisfied, Scattered Rx is requested, \"\n+\t\t\t     \"or RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC is \"\n+\t\t\t     \"not enabled on port=%d, queue=%d.\",\n+\t\t\t     rxq->port_id, rxq->queue_id);\n+\t\tad->rx_bulk_alloc_allowed = false;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+void\n+ice_rx_queue_release(void *rxq)\n+{\n+\tstruct ice_rx_queue *q = (struct ice_rx_queue *)rxq;\n+\n+\tif (!q) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Pointer to rxq is NULL\");\n+\t\treturn;\n+\t}\n+\n+\tice_rx_queue_release_mbufs(q);\n+\trte_free(q->sw_ring);\n+\trte_free(q);\n+}\n+\n+int\n+ice_tx_queue_setup(struct rte_eth_dev *dev,\n+\t\t   uint16_t queue_idx,\n+\t\t   uint16_t nb_desc,\n+\t\t   unsigned int socket_id,\n+\t\t   const struct rte_eth_txconf *tx_conf)\n+{\n+\tstruct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);\n+\tstruct ice_vsi *vsi = pf->main_vsi;\n+\tstruct ice_tx_queue *txq;\n+\tconst struct rte_memzone *tz;\n+\tuint32_t ring_size;\n+\tuint16_t tx_rs_thresh, tx_free_thresh;\n+\tuint64_t offloads;\n+\n+\toffloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;\n+\n+\tif (nb_desc % ICE_ALIGN_RING_DESC != 0 ||\n+\t    nb_desc > ICE_MAX_RING_DESC ||\n+\t    nb_desc < ICE_MIN_RING_DESC) {\n+\t\tPMD_INIT_LOG(ERR, \"Number (%u) of transmit descriptors is \"\n+\t\t\t     \"invalid\", nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/**\n+\t * The following two parameters control the setting of the RS bit on\n+\t * transmit descriptors. TX descriptors will have their RS bit set\n+\t * after txq->tx_rs_thresh descriptors have been used. The TX\n+\t * descriptor ring will be cleaned after txq->tx_free_thresh\n+\t * descriptors are used or if the number of descriptors required to\n+\t * transmit a packet is greater than the number of free TX descriptors.\n+\t *\n+\t * The following constraints must be satisfied:\n+\t *  - tx_rs_thresh must be greater than 0.\n+\t *  - tx_rs_thresh must be less than the size of the ring minus 2.\n+\t *  - tx_rs_thresh must be less than or equal to tx_free_thresh.\n+\t *  - tx_rs_thresh must be a divisor of the ring size.\n+\t *  - tx_free_thresh must be greater than 0.\n+\t *  - tx_free_thresh must be less than the size of the ring minus 3.\n+\t *\n+\t * One descriptor in the TX ring is used as a sentinel to avoid a H/W\n+\t * race condition, hence the maximum threshold constraints. When set\n+\t * to zero use default values.\n+\t */\n+\ttx_rs_thresh = (uint16_t)(tx_conf->tx_rs_thresh ?\n+\t\t\t\t  tx_conf->tx_rs_thresh :\n+\t\t\t\t  ICE_DEFAULT_TX_RSBIT_THRESH);\n+\ttx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?\n+\t\t\t\t    tx_conf->tx_free_thresh :\n+\t\t\t\t    ICE_DEFAULT_TX_FREE_THRESH);\n+\tif (tx_rs_thresh >= (nb_desc - 2)) {\n+\t\tPMD_INIT_LOG(ERR, \"tx_rs_thresh must be less than the \"\n+\t\t\t     \"number of TX descriptors minus 2. \"\n+\t\t\t     \"(tx_rs_thresh=%u port=%d queue=%d)\",\n+\t\t\t     (unsigned int)tx_rs_thresh,\n+\t\t\t     (int)dev->data->port_id,\n+\t\t\t     (int)queue_idx);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (tx_free_thresh >= (nb_desc - 3)) {\n+\t\tPMD_INIT_LOG(ERR, \"tx_rs_thresh must be less than the \"\n+\t\t\t     \"tx_free_thresh must be less than the \"\n+\t\t\t     \"number of TX descriptors minus 3. \"\n+\t\t\t     \"(tx_free_thresh=%u port=%d queue=%d)\",\n+\t\t\t     (unsigned int)tx_free_thresh,\n+\t\t\t     (int)dev->data->port_id,\n+\t\t\t     (int)queue_idx);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (tx_rs_thresh > tx_free_thresh) {\n+\t\tPMD_INIT_LOG(ERR, \"tx_rs_thresh must be less than or \"\n+\t\t\t     \"equal to tx_free_thresh. (tx_free_thresh=%u\"\n+\t\t\t     \" tx_rs_thresh=%u port=%d queue=%d)\",\n+\t\t\t     (unsigned int)tx_free_thresh,\n+\t\t\t     (unsigned int)tx_rs_thresh,\n+\t\t\t     (int)dev->data->port_id,\n+\t\t\t     (int)queue_idx);\n+\t\treturn -EINVAL;\n+\t}\n+\tif ((nb_desc % tx_rs_thresh) != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"tx_rs_thresh must be a divisor of the \"\n+\t\t\t     \"number of TX descriptors. (tx_rs_thresh=%u\"\n+\t\t\t     \" port=%d queue=%d)\",\n+\t\t\t     (unsigned int)tx_rs_thresh,\n+\t\t\t     (int)dev->data->port_id,\n+\t\t\t     (int)queue_idx);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"TX WTHRESH must be set to 0 if \"\n+\t\t\t     \"tx_rs_thresh is greater than 1. \"\n+\t\t\t     \"(tx_rs_thresh=%u port=%d queue=%d)\",\n+\t\t\t     (unsigned int)tx_rs_thresh,\n+\t\t\t     (int)dev->data->port_id,\n+\t\t\t     (int)queue_idx);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Free memory if needed. */\n+\tif (dev->data->tx_queues[queue_idx]) {\n+\t\tice_tx_queue_release(dev->data->tx_queues[queue_idx]);\n+\t\tdev->data->tx_queues[queue_idx] = NULL;\n+\t}\n+\n+\t/* Allocate the TX queue data structure. */\n+\ttxq = rte_zmalloc_socket(NULL,\n+\t\t\t\t sizeof(struct ice_tx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE,\n+\t\t\t\t socket_id);\n+\tif (!txq) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for \"\n+\t\t\t     \"tx queue structure\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Allocate TX hardware ring descriptors. */\n+\tring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;\n+\tring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);\n+\ttz = rte_eth_dma_zone_reserve(dev, \"tx_ring\", queue_idx,\n+\t\t\t\t      ring_size, ICE_RING_BASE_ALIGN,\n+\t\t\t\t      socket_id);\n+\tif (!tz) {\n+\t\tice_tx_queue_release(txq);\n+\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for TX\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\ttxq->nb_tx_desc = nb_desc;\n+\ttxq->tx_rs_thresh = tx_rs_thresh;\n+\ttxq->tx_free_thresh = tx_free_thresh;\n+\ttxq->pthresh = tx_conf->tx_thresh.pthresh;\n+\ttxq->hthresh = tx_conf->tx_thresh.hthresh;\n+\ttxq->wthresh = tx_conf->tx_thresh.wthresh;\n+\ttxq->queue_id = queue_idx;\n+\n+\ttxq->reg_idx = vsi->base_queue + queue_idx;\n+\ttxq->port_id = dev->data->port_id;\n+\ttxq->offloads = offloads;\n+\ttxq->vsi = vsi;\n+\ttxq->tx_deferred_start = tx_conf->tx_deferred_start;\n+\n+\ttxq->tx_ring_phys_addr = tz->phys_addr;\n+\ttxq->tx_ring = (struct ice_tx_desc *)tz->addr;\n+\n+\t/* Allocate software ring */\n+\ttxq->sw_ring =\n+\t\trte_zmalloc_socket(NULL,\n+\t\t\t\t   sizeof(struct ice_tx_entry) * nb_desc,\n+\t\t\t\t   RTE_CACHE_LINE_SIZE,\n+\t\t\t\t   socket_id);\n+\tif (!txq->sw_ring) {\n+\t\tice_tx_queue_release(txq);\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for SW TX ring\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tice_reset_tx_queue(txq);\n+\ttxq->q_set = TRUE;\n+\tdev->data->tx_queues[queue_idx] = txq;\n+\n+\treturn 0;\n+}\n+\n+void\n+ice_tx_queue_release(void *txq)\n+{\n+\tstruct ice_tx_queue *q = (struct ice_tx_queue *)txq;\n+\n+\tif (!q) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Pointer to TX queue is NULL\");\n+\t\treturn;\n+\t}\n+\n+\tice_tx_queue_release_mbufs(q);\n+\trte_free(q->sw_ring);\n+\trte_free(q);\n+}\n+\n+void\n+ice_clear_queues(struct rte_eth_dev *dev)\n+{\n+\tuint16_t i;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n+\t\tice_tx_queue_release_mbufs(dev->data->tx_queues[i]);\n+\t\tice_reset_tx_queue(dev->data->tx_queues[i]);\n+\t}\n+\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\tice_rx_queue_release_mbufs(dev->data->rx_queues[i]);\n+\t\tice_reset_rx_queue(dev->data->rx_queues[i]);\n+\t}\n+}\n+\n+void\n+ice_free_queues(struct rte_eth_dev *dev)\n+{\n+\tuint16_t i;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\tif (!dev->data->rx_queues[i])\n+\t\t\tcontinue;\n+\t\tice_rx_queue_release(dev->data->rx_queues[i]);\n+\t\tdev->data->rx_queues[i] = NULL;\n+\t}\n+\tdev->data->nb_rx_queues = 0;\n+\n+\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n+\t\tif (!dev->data->tx_queues[i])\n+\t\t\tcontinue;\n+\t\tice_tx_queue_release(dev->data->tx_queues[i]);\n+\t\tdev->data->tx_queues[i] = NULL;\n+\t}\n+\tdev->data->nb_tx_queues = 0;\n+}\ndiff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h\nindex c37dc23..088a206 100644\n--- a/drivers/net/ice/ice_rxtx.h\n+++ b/drivers/net/ice/ice_rxtx.h\n@@ -114,4 +114,24 @@ struct ice_tx_queue {\n \t\tuint64_t outer_l3_len:16; /* outer L3 Header Length */\n \t};\n };\n+\n+int ice_rx_queue_setup(struct rte_eth_dev *dev,\n+\t\t       uint16_t queue_idx,\n+\t\t       uint16_t nb_desc,\n+\t\t       unsigned int socket_id,\n+\t\t       const struct rte_eth_rxconf *rx_conf,\n+\t\t       struct rte_mempool *mp);\n+int ice_tx_queue_setup(struct rte_eth_dev *dev,\n+\t\t       uint16_t queue_idx,\n+\t\t       uint16_t nb_desc,\n+\t\t       unsigned int socket_id,\n+\t\t       const struct rte_eth_txconf *tx_conf);\n+int ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n+int ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n+int ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n+int ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n+void ice_rx_queue_release(void *rxq);\n+void ice_tx_queue_release(void *txq);\n+void ice_clear_queues(struct rte_eth_dev *dev);\n+void ice_free_queues(struct rte_eth_dev *dev);\n #endif /* _ICE_RXTX_H_ */\ndiff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build\nindex 9ed7b27..beb0d39 100644\n--- a/drivers/net/ice/meson.build\n+++ b/drivers/net/ice/meson.build\n@@ -5,7 +5,8 @@ subdir('base')\n objs = [base_objs]\n \n sources = files(\n-\t'ice_ethdev.c'\n+\t'ice_ethdev.c',\n+\t'ice_lan_rxtx.c'\n \t)\n \n deps += ['hash']\n",
    "prefixes": [
        "v5",
        "16/31"
    ]
}