From patchwork Thu Oct 1 19:54:49 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Ananyev, Konstantin" X-Patchwork-Id: 7350 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id B0CB78E6E; Thu, 1 Oct 2015 21:55:07 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by dpdk.org (Postfix) with ESMTP id BBC368E65 for ; Thu, 1 Oct 2015 21:55:04 +0200 (CEST) Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga101.fm.intel.com with ESMTP; 01 Oct 2015 12:55:03 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.17,619,1437462000"; d="scan'208";a="801668097" Received: from irvmail001.ir.intel.com ([163.33.26.43]) by fmsmga001.fm.intel.com with ESMTP; 01 Oct 2015 12:55:03 -0700 Received: from sivswdev02.ir.intel.com (sivswdev02.ir.intel.com [10.237.217.46]) by irvmail001.ir.intel.com (8.14.3/8.13.6/MailSET/Hub) with ESMTP id t91Jt2DX015474; Thu, 1 Oct 2015 20:55:02 +0100 Received: from sivswdev02.ir.intel.com (localhost [127.0.0.1]) by sivswdev02.ir.intel.com with ESMTP id t91Jt2BZ020821; Thu, 1 Oct 2015 20:55:02 +0100 Received: (from kananye1@localhost) by sivswdev02.ir.intel.com with id t91Jt2t8020817; Thu, 1 Oct 2015 20:55:02 +0100 From: Konstantin Ananyev To: dev@dpdk.org Date: Thu, 1 Oct 2015 20:54:49 +0100 Message-Id: <1443729293-20753-5-git-send-email-konstantin.ananyev@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1443729293-20753-1-git-send-email-konstantin.ananyev@intel.com> References: <1443729293-20753-1-git-send-email-konstantin.ananyev@intel.com> Subject: [dpdk-dev] [PATCHv5 4/8] e1000: add support for eth_(rxq|txq)_info_get and (rx|tx)_desc_lim X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: "Ananyev, Konstantin" Signed-off-by: Konstantin Ananyev Acked-by: Remy Horton --- drivers/net/e1000/e1000_ethdev.h | 36 ++++++++++++++++++++ drivers/net/e1000/em_ethdev.c | 14 ++++++++ drivers/net/e1000/em_rxtx.c | 71 +++++++++++++++++++++++----------------- drivers/net/e1000/igb_ethdev.c | 22 +++++++++++++ drivers/net/e1000/igb_rxtx.c | 66 ++++++++++++++++++++++++------------- 5 files changed, 156 insertions(+), 53 deletions(-) diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h index 4e69e44..3c6f613 100644 --- a/drivers/net/e1000/e1000_ethdev.h +++ b/drivers/net/e1000/e1000_ethdev.h @@ -108,6 +108,30 @@ ETH_RSS_IPV6_TCP_EX | \ ETH_RSS_IPV6_UDP_EX) +/* + * Maximum number of Ring Descriptors. + * + * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring + * desscriptors should meet the following condition: + * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0 + */ +#define E1000_MIN_RING_DESC 32 +#define E1000_MAX_RING_DESC 4096 + +/* + * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be + * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. + * This will also optimize cache line size effect. + * H/W supports up to cache line size 128. + */ +#define E1000_ALIGN 128 + +#define IGB_RXD_ALIGN (E1000_ALIGN / sizeof(union e1000_adv_rx_desc)) +#define IGB_TXD_ALIGN (E1000_ALIGN / sizeof(union e1000_adv_tx_desc)) + +#define EM_RXD_ALIGN (E1000_ALIGN / sizeof(struct e1000_rx_desc)) +#define EM_TXD_ALIGN (E1000_ALIGN / sizeof(struct e1000_data_desc)) + /* structure for interrupt relative data */ struct e1000_interrupt { uint32_t flags; @@ -307,6 +331,12 @@ void igb_pf_mbx_process(struct rte_eth_dev *eth_dev); int igb_pf_host_configure(struct rte_eth_dev *eth_dev); +void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo); + +void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo); + /* * RX/TX EM function prototypes */ @@ -343,6 +373,12 @@ uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +void em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo); + +void em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo); + void igb_pf_host_uninit(struct rte_eth_dev *dev); #endif /* _E1000_ETHDEV_H_ */ diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c index 912f5dd..0cbc228 100644 --- a/drivers/net/e1000/em_ethdev.c +++ b/drivers/net/e1000/em_ethdev.c @@ -166,6 +166,8 @@ static const struct eth_dev_ops eth_em_ops = { .mac_addr_add = eth_em_rar_set, .mac_addr_remove = eth_em_rar_clear, .set_mc_addr_list = eth_em_set_mc_addr_list, + .rxq_info_get = em_rxq_info_get, + .txq_info_get = em_txq_info_get, }; /** @@ -933,6 +935,18 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_queues = 1; dev_info->max_tx_queues = 1; + + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = E1000_MAX_RING_DESC, + .nb_min = E1000_MIN_RING_DESC, + .nb_align = EM_RXD_ALIGN, + }; + + dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = E1000_MAX_RING_DESC, + .nb_min = E1000_MIN_RING_DESC, + .nb_align = EM_TXD_ALIGN, + }; } /* return 0 means link status changed, -1 means not changed */ diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c index 3b8776d..03e1bc2 100644 --- a/drivers/net/e1000/em_rxtx.c +++ b/drivers/net/e1000/em_rxtx.c @@ -1081,26 +1081,6 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, return (nb_rx); } -/* - * Rings setup and release. - * - * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be - * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. - * This will also optimize cache line size effect. - * H/W supports up to cache line size 128. - */ -#define EM_ALIGN 128 - -/* - * Maximum number of Ring Descriptors. - * - * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring - * desscriptors should meet the following condition: - * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0 - */ -#define EM_MIN_RING_DESC 32 -#define EM_MAX_RING_DESC 4096 - #define EM_MAX_BUF_SIZE 16384 #define EM_RCTL_FLXBUF_STEP 1024 @@ -1210,11 +1190,11 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev, /* * Validate number of transmit descriptors. * It must not exceed hardware maximum, and must be multiple - * of EM_ALIGN. + * of E1000_ALIGN. */ - if (((nb_desc * sizeof(*txq->tx_ring)) % EM_ALIGN) != 0 || - (nb_desc > EM_MAX_RING_DESC) || - (nb_desc < EM_MIN_RING_DESC)) { + if (nb_desc % EM_TXD_ALIGN != 0 || + (nb_desc > E1000_MAX_RING_DESC) || + (nb_desc < E1000_MIN_RING_DESC)) { return -(EINVAL); } @@ -1272,7 +1252,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev, * handle the maximum ring size is allocated in order to allow for * resizing in later calls to the queue setup function. */ - tsize = sizeof (txq->tx_ring[0]) * EM_MAX_RING_DESC; + tsize = sizeof(txq->tx_ring[0]) * E1000_MAX_RING_DESC; if ((tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize, socket_id)) == NULL) return (-ENOMEM); @@ -1375,11 +1355,11 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, /* * Validate number of receive descriptors. * It must not exceed hardware maximum, and must be multiple - * of EM_ALIGN. + * of E1000_ALIGN. */ - if (((nb_desc * sizeof(rxq->rx_ring[0])) % EM_ALIGN) != 0 || - (nb_desc > EM_MAX_RING_DESC) || - (nb_desc < EM_MIN_RING_DESC)) { + if (nb_desc % EM_RXD_ALIGN != 0 || + (nb_desc > E1000_MAX_RING_DESC) || + (nb_desc < E1000_MIN_RING_DESC)) { return (-EINVAL); } @@ -1399,7 +1379,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, } /* Allocate RX ring for max possible mumber of hardware descriptors. */ - rsize = sizeof (rxq->rx_ring[0]) * EM_MAX_RING_DESC; + rsize = sizeof(rxq->rx_ring[0]) * E1000_MAX_RING_DESC; if ((rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize, socket_id)) == NULL) return (-ENOMEM); @@ -1881,3 +1861,34 @@ eth_em_tx_init(struct rte_eth_dev *dev) /* This write will effectively turn on the transmit unit. */ E1000_WRITE_REG(hw, E1000_TCTL, tctl); } + +void +em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct em_rx_queue *rxq; + + rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mb_pool; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_rx_desc; + qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; +} + +void +em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct em_tx_queue *txq; + + txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_tx_desc; + + qinfo->conf.tx_thresh.pthresh = txq->pthresh; + qinfo->conf.tx_thresh.hthresh = txq->hthresh; + qinfo->conf.tx_thresh.wthresh = txq->wthresh; + qinfo->conf.tx_free_thresh = txq->tx_free_thresh; + qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; +} diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c index 848ef6e..73c067e 100644 --- a/drivers/net/e1000/igb_ethdev.c +++ b/drivers/net/e1000/igb_ethdev.c @@ -281,6 +281,18 @@ static const struct rte_pci_id pci_id_igbvf_map[] = { {0}, }; +static const struct rte_eth_desc_lim rx_desc_lim = { + .nb_max = E1000_MAX_RING_DESC, + .nb_min = E1000_MIN_RING_DESC, + .nb_align = IGB_RXD_ALIGN, +}; + +static const struct rte_eth_desc_lim tx_desc_lim = { + .nb_max = E1000_MAX_RING_DESC, + .nb_min = E1000_MIN_RING_DESC, + .nb_align = IGB_RXD_ALIGN, +}; + static const struct eth_dev_ops eth_igb_ops = { .dev_configure = eth_igb_configure, .dev_start = eth_igb_start, @@ -319,6 +331,8 @@ static const struct eth_dev_ops eth_igb_ops = { .rss_hash_conf_get = eth_igb_rss_hash_conf_get, .filter_ctrl = eth_igb_filter_ctrl, .set_mc_addr_list = eth_igb_set_mc_addr_list, + .rxq_info_get = igb_rxq_info_get, + .txq_info_get = igb_txq_info_get, .timesync_enable = igb_timesync_enable, .timesync_disable = igb_timesync_disable, .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp, @@ -349,6 +363,8 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = { .tx_queue_setup = eth_igb_tx_queue_setup, .tx_queue_release = eth_igb_tx_queue_release, .set_mc_addr_list = eth_igb_set_mc_addr_list, + .rxq_info_get = igb_rxq_info_get, + .txq_info_get = igb_txq_info_get, .mac_addr_set = igbvf_default_mac_addr_set, .get_reg_length = igbvf_get_reg_length, .get_reg = igbvf_get_regs, @@ -1570,6 +1586,9 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .txq_flags = 0, }; + + dev_info->rx_desc_lim = rx_desc_lim; + dev_info->tx_desc_lim = tx_desc_lim; } static void @@ -1621,6 +1640,9 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .txq_flags = 0, }; + + dev_info->rx_desc_lim = rx_desc_lim; + dev_info->tx_desc_lim = tx_desc_lim; } /* return 0 means link status changed, -1 means not changed */ diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c index 19905fd..cca3300 100644 --- a/drivers/net/e1000/igb_rxtx.c +++ b/drivers/net/e1000/igb_rxtx.c @@ -1148,25 +1148,12 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, } /* - * Rings setup and release. - * - * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be - * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. - * This will also optimize cache line size effect. - * H/W supports up to cache line size 128. - */ -#define IGB_ALIGN 128 - -/* * Maximum number of Ring Descriptors. * * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring * desscriptors should meet the following condition: * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0 */ -#define IGB_MIN_RING_DESC 32 -#define IGB_MAX_RING_DESC 4096 - static const struct rte_memzone * ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, uint16_t queue_id, uint32_t ring_size, int socket_id) @@ -1183,10 +1170,10 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, #ifdef RTE_LIBRTE_XEN_DOM0 return rte_memzone_reserve_bounded(z_name, ring_size, - socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M); + socket_id, 0, E1000_ALIGN, RTE_PGSIZE_2M); #else return rte_memzone_reserve_aligned(z_name, ring_size, - socket_id, 0, IGB_ALIGN); + socket_id, 0, E1000_ALIGN); #endif } @@ -1282,10 +1269,11 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, /* * Validate number of transmit descriptors. * It must not exceed hardware maximum, and must be multiple - * of IGB_ALIGN. + * of E1000_ALIGN. */ - if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 || - (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) { + if (nb_desc % IGB_TXD_ALIGN != 0 || + (nb_desc > E1000_MAX_RING_DESC) || + (nb_desc < E1000_MIN_RING_DESC)) { return -EINVAL; } @@ -1321,7 +1309,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, * handle the maximum ring size is allocated in order to allow for * resizing in later calls to the queue setup function. */ - size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC; + size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC; tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx, size, socket_id); if (tz == NULL) { @@ -1430,10 +1418,11 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, /* * Validate number of receive descriptors. * It must not exceed hardware maximum, and must be multiple - * of IGB_ALIGN. + * of E1000_ALIGN. */ - if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 || - (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) { + if (nb_desc % IGB_RXD_ALIGN != 0 || + (nb_desc > E1000_MAX_RING_DESC) || + (nb_desc < E1000_MIN_RING_DESC)) { return (-EINVAL); } @@ -1469,7 +1458,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, * handle the maximum ring size is allocated in order to allow for * resizing in later calls to the queue setup function. */ - size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC; + size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC; rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id); if (rz == NULL) { igb_rx_queue_release(rxq); @@ -2482,3 +2471,34 @@ eth_igbvf_tx_init(struct rte_eth_dev *dev) } } + +void +igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct igb_rx_queue *rxq; + + rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mb_pool; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_rx_desc; + + qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.rx_drop_en = rxq->drop_en; +} + +void +igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct igb_tx_queue *txq; + + txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_tx_desc; + + qinfo->conf.tx_thresh.pthresh = txq->pthresh; + qinfo->conf.tx_thresh.hthresh = txq->hthresh; + qinfo->conf.tx_thresh.wthresh = txq->wthresh; +}