get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/8074/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 8074,
    "url": "https://patches.dpdk.org/api/patches/8074/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1445950311-20497-5-git-send-email-konstantin.ananyev@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1445950311-20497-5-git-send-email-konstantin.ananyev@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1445950311-20497-5-git-send-email-konstantin.ananyev@intel.com",
    "date": "2015-10-27T12:51:46",
    "name": "[dpdk-dev,PATCHv7,4/9] e1000: add support for eth_(rxq|txq)_info_get and (rx|tx)_desc_lim",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "d658e264c203b7ab9ee294612c982f6675d5bd35",
    "submitter": {
        "id": 33,
        "url": "https://patches.dpdk.org/api/people/33/?format=api",
        "name": "Ananyev, Konstantin",
        "email": "konstantin.ananyev@intel.com"
    },
    "delegate": null,
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1445950311-20497-5-git-send-email-konstantin.ananyev@intel.com/mbox/",
    "series": [],
    "comments": "https://patches.dpdk.org/api/patches/8074/comments/",
    "check": "pending",
    "checks": "https://patches.dpdk.org/api/patches/8074/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id 6E2AD8E73;\n\tTue, 27 Oct 2015 13:52:32 +0100 (CET)",
            "from mga09.intel.com (mga09.intel.com [134.134.136.24])\n\tby dpdk.org (Postfix) with ESMTP id 902198D9F\n\tfor <dev@dpdk.org>; Tue, 27 Oct 2015 13:52:23 +0100 (CET)",
            "from fmsmga002.fm.intel.com ([10.253.24.26])\n\tby orsmga102.jf.intel.com with ESMTP; 27 Oct 2015 05:52:21 -0700",
            "from irvmail001.ir.intel.com ([163.33.26.43])\n\tby fmsmga002.fm.intel.com with ESMTP; 27 Oct 2015 05:52:20 -0700",
            "from sivswdev02.ir.intel.com (sivswdev02.ir.intel.com\n\t[10.237.217.46])\n\tby irvmail001.ir.intel.com (8.14.3/8.13.6/MailSET/Hub) with ESMTP id\n\tt9RCqIcs028737; Tue, 27 Oct 2015 12:52:18 GMT",
            "from sivswdev02.ir.intel.com (localhost [127.0.0.1])\n\tby sivswdev02.ir.intel.com with ESMTP id t9RCqIuC020580;\n\tTue, 27 Oct 2015 12:52:18 GMT",
            "(from kananye1@localhost)\n\tby sivswdev02.ir.intel.com with  id t9RCqI8C020576;\n\tTue, 27 Oct 2015 12:52:18 GMT"
        ],
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.20,205,1444719600\"; d=\"scan'208\";a=\"836532051\"",
        "From": "Konstantin Ananyev <konstantin.ananyev@intel.com>",
        "To": "dev@dpdk.org",
        "Date": "Tue, 27 Oct 2015 12:51:46 +0000",
        "Message-Id": "<1445950311-20497-5-git-send-email-konstantin.ananyev@intel.com>",
        "X-Mailer": "git-send-email 1.7.4.1",
        "In-Reply-To": [
            "<1445950311-20497-1-git-send-email-konstantin.ananyev@intel.com>",
            "<1445515592-25920-2-git-send-email-konstantin.ananyev@intel.com>"
        ],
        "References": [
            "<1445950311-20497-1-git-send-email-konstantin.ananyev@intel.com>",
            "<1445515592-25920-2-git-send-email-konstantin.ananyev@intel.com>"
        ],
        "Subject": "[dpdk-dev] [PATCHv7 4/9] e1000: add support for\n\teth_(rxq|txq)_info_get and (rx|tx)_desc_lim",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>\n---\n drivers/net/e1000/e1000_ethdev.h | 36 ++++++++++++++++++++\n drivers/net/e1000/em_ethdev.c    | 14 ++++++++\n drivers/net/e1000/em_rxtx.c      | 71 +++++++++++++++++++++++-----------------\n drivers/net/e1000/igb_ethdev.c   | 22 +++++++++++++\n drivers/net/e1000/igb_rxtx.c     | 66 ++++++++++++++++++++++++-------------\n 5 files changed, 156 insertions(+), 53 deletions(-)",
    "diff": "diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h\nindex 4e69e44..3c6f613 100644\n--- a/drivers/net/e1000/e1000_ethdev.h\n+++ b/drivers/net/e1000/e1000_ethdev.h\n@@ -108,6 +108,30 @@\n \tETH_RSS_IPV6_TCP_EX | \\\n \tETH_RSS_IPV6_UDP_EX)\n \n+/*\n+ * Maximum number of Ring Descriptors.\n+ *\n+ * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring\n+ * desscriptors should meet the following condition:\n+ * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0\n+ */\n+#define\tE1000_MIN_RING_DESC\t32\n+#define\tE1000_MAX_RING_DESC\t4096\n+\n+/*\n+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be\n+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.\n+ * This will also optimize cache line size effect.\n+ * H/W supports up to cache line size 128.\n+ */\n+#define\tE1000_ALIGN\t128\n+\n+#define\tIGB_RXD_ALIGN\t(E1000_ALIGN / sizeof(union e1000_adv_rx_desc))\n+#define\tIGB_TXD_ALIGN\t(E1000_ALIGN / sizeof(union e1000_adv_tx_desc))\n+\n+#define\tEM_RXD_ALIGN\t(E1000_ALIGN / sizeof(struct e1000_rx_desc))\n+#define\tEM_TXD_ALIGN\t(E1000_ALIGN / sizeof(struct e1000_data_desc))\n+\n /* structure for interrupt relative data */\n struct e1000_interrupt {\n \tuint32_t flags;\n@@ -307,6 +331,12 @@ void igb_pf_mbx_process(struct rte_eth_dev *eth_dev);\n \n int igb_pf_host_configure(struct rte_eth_dev *eth_dev);\n \n+void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\tstruct rte_eth_rxq_info *qinfo);\n+\n+void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\tstruct rte_eth_txq_info *qinfo);\n+\n /*\n  * RX/TX EM function prototypes\n  */\n@@ -343,6 +373,12 @@ uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n uint16_t eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\tuint16_t nb_pkts);\n \n+void em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\tstruct rte_eth_rxq_info *qinfo);\n+\n+void em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\tstruct rte_eth_txq_info *qinfo);\n+\n void igb_pf_host_uninit(struct rte_eth_dev *dev);\n \n #endif /* _E1000_ETHDEV_H_ */\ndiff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c\nindex 912f5dd..0cbc228 100644\n--- a/drivers/net/e1000/em_ethdev.c\n+++ b/drivers/net/e1000/em_ethdev.c\n@@ -166,6 +166,8 @@ static const struct eth_dev_ops eth_em_ops = {\n \t.mac_addr_add         = eth_em_rar_set,\n \t.mac_addr_remove      = eth_em_rar_clear,\n \t.set_mc_addr_list     = eth_em_set_mc_addr_list,\n+\t.rxq_info_get         = em_rxq_info_get,\n+\t.txq_info_get         = em_txq_info_get,\n };\n \n /**\n@@ -933,6 +935,18 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \n \tdev_info->max_rx_queues = 1;\n \tdev_info->max_tx_queues = 1;\n+\n+\tdev_info->rx_desc_lim = (struct rte_eth_desc_lim) {\n+\t\t.nb_max = E1000_MAX_RING_DESC,\n+\t\t.nb_min = E1000_MIN_RING_DESC,\n+\t\t.nb_align = EM_RXD_ALIGN,\n+\t};\n+\n+\tdev_info->tx_desc_lim = (struct rte_eth_desc_lim) {\n+\t\t.nb_max = E1000_MAX_RING_DESC,\n+\t\t.nb_min = E1000_MIN_RING_DESC,\n+\t\t.nb_align = EM_TXD_ALIGN,\n+\t};\n }\n \n /* return 0 means link status changed, -1 means not changed */\ndiff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c\nindex 3b8776d..03e1bc2 100644\n--- a/drivers/net/e1000/em_rxtx.c\n+++ b/drivers/net/e1000/em_rxtx.c\n@@ -1081,26 +1081,6 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \treturn (nb_rx);\n }\n \n-/*\n- * Rings setup and release.\n- *\n- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be\n- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.\n- * This will also optimize cache line size effect.\n- * H/W supports up to cache line size 128.\n- */\n-#define EM_ALIGN 128\n-\n-/*\n- * Maximum number of Ring Descriptors.\n- *\n- * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring\n- * desscriptors should meet the following condition:\n- * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0\n- */\n-#define EM_MIN_RING_DESC 32\n-#define EM_MAX_RING_DESC 4096\n-\n #define\tEM_MAX_BUF_SIZE     16384\n #define EM_RCTL_FLXBUF_STEP 1024\n \n@@ -1210,11 +1190,11 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,\n \t/*\n \t * Validate number of transmit descriptors.\n \t * It must not exceed hardware maximum, and must be multiple\n-\t * of EM_ALIGN.\n+\t * of E1000_ALIGN.\n \t */\n-\tif (((nb_desc * sizeof(*txq->tx_ring)) % EM_ALIGN) != 0 ||\n-\t\t\t(nb_desc > EM_MAX_RING_DESC) ||\n-\t\t\t(nb_desc < EM_MIN_RING_DESC)) {\n+\tif (nb_desc % EM_TXD_ALIGN != 0 ||\n+\t\t\t(nb_desc > E1000_MAX_RING_DESC) ||\n+\t\t\t(nb_desc < E1000_MIN_RING_DESC)) {\n \t\treturn -(EINVAL);\n \t}\n \n@@ -1272,7 +1252,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,\n \t * handle the maximum ring size is allocated in order to allow for\n \t * resizing in later calls to the queue setup function.\n \t */\n-\ttsize = sizeof (txq->tx_ring[0]) * EM_MAX_RING_DESC;\n+\ttsize = sizeof(txq->tx_ring[0]) * E1000_MAX_RING_DESC;\n \tif ((tz = ring_dma_zone_reserve(dev, \"tx_ring\", queue_idx, tsize,\n \t\t\tsocket_id)) == NULL)\n \t\treturn (-ENOMEM);\n@@ -1375,11 +1355,11 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,\n \t/*\n \t * Validate number of receive descriptors.\n \t * It must not exceed hardware maximum, and must be multiple\n-\t * of EM_ALIGN.\n+\t * of E1000_ALIGN.\n \t */\n-\tif (((nb_desc * sizeof(rxq->rx_ring[0])) % EM_ALIGN) != 0 ||\n-\t\t\t(nb_desc > EM_MAX_RING_DESC) ||\n-\t\t\t(nb_desc < EM_MIN_RING_DESC)) {\n+\tif (nb_desc % EM_RXD_ALIGN != 0 ||\n+\t\t\t(nb_desc > E1000_MAX_RING_DESC) ||\n+\t\t\t(nb_desc < E1000_MIN_RING_DESC)) {\n \t\treturn (-EINVAL);\n \t}\n \n@@ -1399,7 +1379,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,\n \t}\n \n \t/* Allocate RX ring for max possible mumber of hardware descriptors. */\n-\trsize = sizeof (rxq->rx_ring[0]) * EM_MAX_RING_DESC;\n+\trsize = sizeof(rxq->rx_ring[0]) * E1000_MAX_RING_DESC;\n \tif ((rz = ring_dma_zone_reserve(dev, \"rx_ring\", queue_idx, rsize,\n \t\t\tsocket_id)) == NULL)\n \t\treturn (-ENOMEM);\n@@ -1881,3 +1861,34 @@ eth_em_tx_init(struct rte_eth_dev *dev)\n \t/* This write will effectively turn on the transmit unit. */\n \tE1000_WRITE_REG(hw, E1000_TCTL, tctl);\n }\n+\n+void\n+em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\tstruct rte_eth_rxq_info *qinfo)\n+{\n+\tstruct em_rx_queue *rxq;\n+\n+\trxq = dev->data->rx_queues[queue_id];\n+\n+\tqinfo->mp = rxq->mb_pool;\n+\tqinfo->scattered_rx = dev->data->scattered_rx;\n+\tqinfo->nb_desc = rxq->nb_rx_desc;\n+\tqinfo->conf.rx_free_thresh = rxq->rx_free_thresh;\n+}\n+\n+void\n+em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\tstruct rte_eth_txq_info *qinfo)\n+{\n+\tstruct em_tx_queue *txq;\n+\n+\ttxq = dev->data->tx_queues[queue_id];\n+\n+\tqinfo->nb_desc = txq->nb_tx_desc;\n+\n+\tqinfo->conf.tx_thresh.pthresh = txq->pthresh;\n+\tqinfo->conf.tx_thresh.hthresh = txq->hthresh;\n+\tqinfo->conf.tx_thresh.wthresh = txq->wthresh;\n+\tqinfo->conf.tx_free_thresh = txq->tx_free_thresh;\n+\tqinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;\n+}\ndiff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c\nindex 848ef6e..73c067e 100644\n--- a/drivers/net/e1000/igb_ethdev.c\n+++ b/drivers/net/e1000/igb_ethdev.c\n@@ -281,6 +281,18 @@ static const struct rte_pci_id pci_id_igbvf_map[] = {\n {0},\n };\n \n+static const struct rte_eth_desc_lim rx_desc_lim = {\n+\t.nb_max = E1000_MAX_RING_DESC,\n+\t.nb_min = E1000_MIN_RING_DESC,\n+\t.nb_align = IGB_RXD_ALIGN,\n+};\n+\n+static const struct rte_eth_desc_lim tx_desc_lim = {\n+\t.nb_max = E1000_MAX_RING_DESC,\n+\t.nb_min = E1000_MIN_RING_DESC,\n+\t.nb_align = IGB_RXD_ALIGN,\n+};\n+\n static const struct eth_dev_ops eth_igb_ops = {\n \t.dev_configure        = eth_igb_configure,\n \t.dev_start            = eth_igb_start,\n@@ -319,6 +331,8 @@ static const struct eth_dev_ops eth_igb_ops = {\n \t.rss_hash_conf_get    = eth_igb_rss_hash_conf_get,\n \t.filter_ctrl          = eth_igb_filter_ctrl,\n \t.set_mc_addr_list     = eth_igb_set_mc_addr_list,\n+\t.rxq_info_get         = igb_rxq_info_get,\n+\t.txq_info_get         = igb_txq_info_get,\n \t.timesync_enable      = igb_timesync_enable,\n \t.timesync_disable     = igb_timesync_disable,\n \t.timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp,\n@@ -349,6 +363,8 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = {\n \t.tx_queue_setup       = eth_igb_tx_queue_setup,\n \t.tx_queue_release     = eth_igb_tx_queue_release,\n \t.set_mc_addr_list     = eth_igb_set_mc_addr_list,\n+\t.rxq_info_get         = igb_rxq_info_get,\n+\t.txq_info_get         = igb_txq_info_get,\n \t.mac_addr_set         = igbvf_default_mac_addr_set,\n \t.get_reg_length       = igbvf_get_reg_length,\n \t.get_reg              = igbvf_get_regs,\n@@ -1570,6 +1586,9 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \t\t},\n \t\t.txq_flags = 0,\n \t};\n+\n+\tdev_info->rx_desc_lim = rx_desc_lim;\n+\tdev_info->tx_desc_lim = tx_desc_lim;\n }\n \n static void\n@@ -1621,6 +1640,9 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \t\t},\n \t\t.txq_flags = 0,\n \t};\n+\n+\tdev_info->rx_desc_lim = rx_desc_lim;\n+\tdev_info->tx_desc_lim = tx_desc_lim;\n }\n \n /* return 0 means link status changed, -1 means not changed */\ndiff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c\nindex 19905fd..cca3300 100644\n--- a/drivers/net/e1000/igb_rxtx.c\n+++ b/drivers/net/e1000/igb_rxtx.c\n@@ -1148,25 +1148,12 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n }\n \n /*\n- * Rings setup and release.\n- *\n- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be\n- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.\n- * This will also optimize cache line size effect.\n- * H/W supports up to cache line size 128.\n- */\n-#define IGB_ALIGN 128\n-\n-/*\n  * Maximum number of Ring Descriptors.\n  *\n  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring\n  * desscriptors should meet the following condition:\n  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0\n  */\n-#define IGB_MIN_RING_DESC 32\n-#define IGB_MAX_RING_DESC 4096\n-\n static const struct rte_memzone *\n ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,\n \t\t      uint16_t queue_id, uint32_t ring_size, int socket_id)\n@@ -1183,10 +1170,10 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,\n \n #ifdef RTE_LIBRTE_XEN_DOM0\n \treturn rte_memzone_reserve_bounded(z_name, ring_size,\n-\t\t\tsocket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);\n+\t\t\tsocket_id, 0, E1000_ALIGN, RTE_PGSIZE_2M);\n #else\n \treturn rte_memzone_reserve_aligned(z_name, ring_size,\n-\t\t\tsocket_id, 0, IGB_ALIGN);\n+\t\t\tsocket_id, 0, E1000_ALIGN);\n #endif\n }\n \n@@ -1282,10 +1269,11 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,\n \t/*\n \t * Validate number of transmit descriptors.\n \t * It must not exceed hardware maximum, and must be multiple\n-\t * of IGB_ALIGN.\n+\t * of E1000_ALIGN.\n \t */\n-\tif (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||\n-\t    (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {\n+\tif (nb_desc % IGB_TXD_ALIGN != 0 ||\n+\t\t\t(nb_desc > E1000_MAX_RING_DESC) ||\n+\t\t\t(nb_desc < E1000_MIN_RING_DESC)) {\n \t\treturn -EINVAL;\n \t}\n \n@@ -1321,7 +1309,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,\n \t * handle the maximum ring size is allocated in order to allow for\n \t * resizing in later calls to the queue setup function.\n \t */\n-\tsize = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;\n+\tsize = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;\n \ttz = ring_dma_zone_reserve(dev, \"tx_ring\", queue_idx,\n \t\t\t\t\tsize, socket_id);\n \tif (tz == NULL) {\n@@ -1430,10 +1418,11 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,\n \t/*\n \t * Validate number of receive descriptors.\n \t * It must not exceed hardware maximum, and must be multiple\n-\t * of IGB_ALIGN.\n+\t * of E1000_ALIGN.\n \t */\n-\tif (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||\n-\t    (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {\n+\tif (nb_desc % IGB_RXD_ALIGN != 0 ||\n+\t\t\t(nb_desc > E1000_MAX_RING_DESC) ||\n+\t\t\t(nb_desc < E1000_MIN_RING_DESC)) {\n \t\treturn (-EINVAL);\n \t}\n \n@@ -1469,7 +1458,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,\n \t *  handle the maximum ring size is allocated in order to allow for\n \t *  resizing in later calls to the queue setup function.\n \t */\n-\tsize = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;\n+\tsize = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;\n \trz = ring_dma_zone_reserve(dev, \"rx_ring\", queue_idx, size, socket_id);\n \tif (rz == NULL) {\n \t\tigb_rx_queue_release(rxq);\n@@ -2482,3 +2471,34 @@ eth_igbvf_tx_init(struct rte_eth_dev *dev)\n \t}\n \n }\n+\n+void\n+igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\tstruct rte_eth_rxq_info *qinfo)\n+{\n+\tstruct igb_rx_queue *rxq;\n+\n+\trxq = dev->data->rx_queues[queue_id];\n+\n+\tqinfo->mp = rxq->mb_pool;\n+\tqinfo->scattered_rx = dev->data->scattered_rx;\n+\tqinfo->nb_desc = rxq->nb_rx_desc;\n+\n+\tqinfo->conf.rx_free_thresh = rxq->rx_free_thresh;\n+\tqinfo->conf.rx_drop_en = rxq->drop_en;\n+}\n+\n+void\n+igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\tstruct rte_eth_txq_info *qinfo)\n+{\n+\tstruct igb_tx_queue *txq;\n+\n+\ttxq = dev->data->tx_queues[queue_id];\n+\n+\tqinfo->nb_desc = txq->nb_tx_desc;\n+\n+\tqinfo->conf.tx_thresh.pthresh = txq->pthresh;\n+\tqinfo->conf.tx_thresh.hthresh = txq->hthresh;\n+\tqinfo->conf.tx_thresh.wthresh = txq->wthresh;\n+}\n",
    "prefixes": [
        "dpdk-dev",
        "PATCHv7",
        "4/9"
    ]
}