get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/8075/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 8075,
    "url": "https://patches.dpdk.org/api/patches/8075/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1445950311-20497-4-git-send-email-konstantin.ananyev@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1445950311-20497-4-git-send-email-konstantin.ananyev@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1445950311-20497-4-git-send-email-konstantin.ananyev@intel.com",
    "date": "2015-10-27T12:51:45",
    "name": "[dpdk-dev,PATCHv7,3/9] ixgbe: add support for eth_(rxq|txq)_info_get and (rx|tx)_desc_lim",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "4fc3b1be58a7e80fcc4ef8fe493948cb626e24fe",
    "submitter": {
        "id": 33,
        "url": "https://patches.dpdk.org/api/people/33/?format=api",
        "name": "Ananyev, Konstantin",
        "email": "konstantin.ananyev@intel.com"
    },
    "delegate": null,
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1445950311-20497-4-git-send-email-konstantin.ananyev@intel.com/mbox/",
    "series": [],
    "comments": "https://patches.dpdk.org/api/patches/8075/comments/",
    "check": "pending",
    "checks": "https://patches.dpdk.org/api/patches/8075/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id 69AA48E7B;\n\tTue, 27 Oct 2015 13:52:44 +0100 (CET)",
            "from mga14.intel.com (mga14.intel.com [192.55.52.115])\n\tby dpdk.org (Postfix) with ESMTP id 4F2878DA5\n\tfor <dev@dpdk.org>; Tue, 27 Oct 2015 13:52:27 +0100 (CET)",
            "from orsmga002.jf.intel.com ([10.7.209.21])\n\tby fmsmga103.fm.intel.com with ESMTP; 27 Oct 2015 05:52:26 -0700",
            "from irvmail001.ir.intel.com ([163.33.26.43])\n\tby orsmga002.jf.intel.com with ESMTP; 27 Oct 2015 05:52:19 -0700",
            "from sivswdev02.ir.intel.com (sivswdev02.ir.intel.com\n\t[10.237.217.46])\n\tby irvmail001.ir.intel.com (8.14.3/8.13.6/MailSET/Hub) with ESMTP id\n\tt9RCqI3g028734; Tue, 27 Oct 2015 12:52:18 GMT",
            "from sivswdev02.ir.intel.com (localhost [127.0.0.1])\n\tby sivswdev02.ir.intel.com with ESMTP id t9RCqI5w020573;\n\tTue, 27 Oct 2015 12:52:18 GMT",
            "(from kananye1@localhost)\n\tby sivswdev02.ir.intel.com with  id t9RCqI77020569;\n\tTue, 27 Oct 2015 12:52:18 GMT"
        ],
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.20,205,1444719600\"; d=\"scan'208\";a=\"836094346\"",
        "From": "Konstantin Ananyev <konstantin.ananyev@intel.com>",
        "To": "dev@dpdk.org",
        "Date": "Tue, 27 Oct 2015 12:51:45 +0000",
        "Message-Id": "<1445950311-20497-4-git-send-email-konstantin.ananyev@intel.com>",
        "X-Mailer": "git-send-email 1.7.4.1",
        "In-Reply-To": [
            "<1445950311-20497-1-git-send-email-konstantin.ananyev@intel.com>",
            "<1445515592-25920-2-git-send-email-konstantin.ananyev@intel.com>"
        ],
        "References": [
            "<1445950311-20497-1-git-send-email-konstantin.ananyev@intel.com>",
            "<1445515592-25920-2-git-send-email-konstantin.ananyev@intel.com>"
        ],
        "Subject": "[dpdk-dev] [PATCHv7 3/9] ixgbe: add support for\n\teth_(rxq|txq)_info_get and (rx|tx)_desc_lim",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>\n---\n drivers/net/ixgbe/ixgbe_ethdev.c | 23 ++++++++++++++\n drivers/net/ixgbe/ixgbe_ethdev.h |  6 ++++\n drivers/net/ixgbe/ixgbe_rxtx.c   | 68 +++++++++++++++++++++++++---------------\n drivers/net/ixgbe/ixgbe_rxtx.h   | 21 +++++++++++++\n 4 files changed, 93 insertions(+), 25 deletions(-)",
    "diff": "diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c\nindex ec2918c..4769bb0 100644\n--- a/drivers/net/ixgbe/ixgbe_ethdev.c\n+++ b/drivers/net/ixgbe/ixgbe_ethdev.c\n@@ -386,6 +386,18 @@ static const struct rte_pci_id pci_id_ixgbevf_map[] = {\n \n };\n \n+static const struct rte_eth_desc_lim rx_desc_lim = {\n+\t.nb_max = IXGBE_MAX_RING_DESC,\n+\t.nb_min = IXGBE_MIN_RING_DESC,\n+\t.nb_align = IXGBE_RXD_ALIGN,\n+};\n+\n+static const struct rte_eth_desc_lim tx_desc_lim = {\n+\t.nb_max = IXGBE_MAX_RING_DESC,\n+\t.nb_min = IXGBE_MIN_RING_DESC,\n+\t.nb_align = IXGBE_TXD_ALIGN,\n+};\n+\n static const struct eth_dev_ops ixgbe_eth_dev_ops = {\n \t.dev_configure        = ixgbe_dev_configure,\n \t.dev_start            = ixgbe_dev_start,\n@@ -456,6 +468,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {\n \t.rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,\n \t.filter_ctrl          = ixgbe_dev_filter_ctrl,\n \t.set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,\n+\t.rxq_info_get         = ixgbe_rxq_info_get,\n+\t.txq_info_get         = ixgbe_txq_info_get,\n \t.timesync_enable      = ixgbe_timesync_enable,\n \t.timesync_disable     = ixgbe_timesync_disable,\n \t.timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,\n@@ -494,6 +508,8 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {\n \t.mac_addr_add         = ixgbevf_add_mac_addr,\n \t.mac_addr_remove      = ixgbevf_remove_mac_addr,\n \t.set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,\n+\t.rxq_info_get         = ixgbe_rxq_info_get,\n+\t.txq_info_get         = ixgbe_txq_info_get,\n \t.mac_addr_set         = ixgbevf_set_default_mac_addr,\n \t.get_reg_length       = ixgbevf_get_reg_length,\n \t.get_reg              = ixgbevf_get_regs,\n@@ -2396,6 +2412,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \t\t.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |\n \t\t\t\tETH_TXQ_FLAGS_NOOFFLOADS,\n \t};\n+\n+\tdev_info->rx_desc_lim = rx_desc_lim;\n+\tdev_info->tx_desc_lim = tx_desc_lim;\n+\n \tdev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);\n \tdev_info->reta_size = ETH_RSS_RETA_SIZE_128;\n \tdev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;\n@@ -2449,6 +2469,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,\n \t\t.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |\n \t\t\t\tETH_TXQ_FLAGS_NOOFFLOADS,\n \t};\n+\n+\tdev_info->rx_desc_lim = rx_desc_lim;\n+\tdev_info->tx_desc_lim = tx_desc_lim;\n }\n \n /* return 0 means link status changed, -1 means not changed */\ndiff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h\nindex c3d4f4f..d16f476 100644\n--- a/drivers/net/ixgbe/ixgbe_ethdev.h\n+++ b/drivers/net/ixgbe/ixgbe_ethdev.h\n@@ -351,6 +351,12 @@ int ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n \n int ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n \n+void ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\tstruct rte_eth_rxq_info *qinfo);\n+\n+void ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\tstruct rte_eth_txq_info *qinfo);\n+\n int ixgbevf_dev_rx_init(struct rte_eth_dev *dev);\n \n void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);\ndiff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c\nindex a598a72..ba08588 100644\n--- a/drivers/net/ixgbe/ixgbe_rxtx.c\n+++ b/drivers/net/ixgbe/ixgbe_rxtx.c\n@@ -1821,25 +1821,6 @@ ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,\n  **********************************************************************/\n \n /*\n- * Rings setup and release.\n- *\n- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be\n- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will\n- * also optimize cache line size effect. H/W supports up to cache line size 128.\n- */\n-#define IXGBE_ALIGN 128\n-\n-/*\n- * Maximum number of Ring Descriptors.\n- *\n- * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring\n- * descriptors should meet the following condition:\n- *      (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0\n- */\n-#define IXGBE_MIN_RING_DESC 32\n-#define IXGBE_MAX_RING_DESC 4096\n-\n-/*\n  * Create memzone for HW rings. malloc can't be used as the physical address is\n  * needed. If the memzone is already created, then this function returns a ptr\n  * to the old one.\n@@ -2007,9 +1988,9 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,\n \t * It must not exceed hardware maximum, and must be multiple\n \t * of IXGBE_ALIGN.\n \t */\n-\tif (((nb_desc * sizeof(union ixgbe_adv_tx_desc)) % IXGBE_ALIGN) != 0 ||\n-\t    (nb_desc > IXGBE_MAX_RING_DESC) ||\n-\t    (nb_desc < IXGBE_MIN_RING_DESC)) {\n+\tif (nb_desc % IXGBE_TXD_ALIGN != 0 ||\n+\t\t\t(nb_desc > IXGBE_MAX_RING_DESC) ||\n+\t\t\t(nb_desc < IXGBE_MIN_RING_DESC)) {\n \t\treturn -EINVAL;\n \t}\n \n@@ -2374,9 +2355,9 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,\n \t * It must not exceed hardware maximum, and must be multiple\n \t * of IXGBE_ALIGN.\n \t */\n-\tif (((nb_desc * sizeof(union ixgbe_adv_rx_desc)) % IXGBE_ALIGN) != 0 ||\n-\t    (nb_desc > IXGBE_MAX_RING_DESC) ||\n-\t    (nb_desc < IXGBE_MIN_RING_DESC)) {\n+\tif (nb_desc % IXGBE_RXD_ALIGN != 0 ||\n+\t\t\t(nb_desc > IXGBE_MAX_RING_DESC) ||\n+\t\t\t(nb_desc < IXGBE_MIN_RING_DESC)) {\n \t\treturn (-EINVAL);\n \t}\n \n@@ -4649,6 +4630,43 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n \treturn 0;\n }\n \n+void\n+ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\tstruct rte_eth_rxq_info *qinfo)\n+{\n+\tstruct ixgbe_rx_queue *rxq;\n+\n+\trxq = dev->data->rx_queues[queue_id];\n+\n+\tqinfo->mp = rxq->mb_pool;\n+\tqinfo->scattered_rx = dev->data->scattered_rx;\n+\tqinfo->nb_desc = rxq->nb_rx_desc;\n+\n+\tqinfo->conf.rx_free_thresh = rxq->rx_free_thresh;\n+\tqinfo->conf.rx_drop_en = rxq->drop_en;\n+\tqinfo->conf.rx_deferred_start = rxq->rx_deferred_start;\n+}\n+\n+void\n+ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,\n+\tstruct rte_eth_txq_info *qinfo)\n+{\n+\tstruct ixgbe_tx_queue *txq;\n+\n+\ttxq = dev->data->tx_queues[queue_id];\n+\n+\tqinfo->nb_desc = txq->nb_tx_desc;\n+\n+\tqinfo->conf.tx_thresh.pthresh = txq->pthresh;\n+\tqinfo->conf.tx_thresh.hthresh = txq->hthresh;\n+\tqinfo->conf.tx_thresh.wthresh = txq->wthresh;\n+\n+\tqinfo->conf.tx_free_thresh = txq->tx_free_thresh;\n+\tqinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;\n+\tqinfo->conf.txq_flags = txq->txq_flags;\n+\tqinfo->conf.tx_deferred_start = txq->tx_deferred_start;\n+}\n+\n /*\n  * [VF] Initializes Receive Unit.\n  */\ndiff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h\nindex b9eca67..475a800 100644\n--- a/drivers/net/ixgbe/ixgbe_rxtx.h\n+++ b/drivers/net/ixgbe/ixgbe_rxtx.h\n@@ -34,6 +34,27 @@\n #ifndef _IXGBE_RXTX_H_\n #define _IXGBE_RXTX_H_\n \n+/*\n+ * Rings setup and release.\n+ *\n+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be\n+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will\n+ * also optimize cache line size effect. H/W supports up to cache line size 128.\n+ */\n+#define\tIXGBE_ALIGN\t128\n+\n+#define IXGBE_RXD_ALIGN\t(IXGBE_ALIGN / sizeof(union ixgbe_adv_rx_desc))\n+#define IXGBE_TXD_ALIGN\t(IXGBE_ALIGN / sizeof(union ixgbe_adv_tx_desc))\n+\n+/*\n+ * Maximum number of Ring Descriptors.\n+ *\n+ * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring\n+ * descriptors should meet the following condition:\n+ *      (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0\n+ */\n+#define\tIXGBE_MIN_RING_DESC\t32\n+#define\tIXGBE_MAX_RING_DESC\t4096\n \n #define RTE_PMD_IXGBE_TX_MAX_BURST 32\n #define RTE_PMD_IXGBE_RX_MAX_BURST 32\n",
    "prefixes": [
        "dpdk-dev",
        "PATCHv7",
        "3/9"
    ]
}