From patchwork Mon Aug 25 06:12:50 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Cunming Liang X-Patchwork-Id: 203 Return-Path: Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by dpdk.org (Postfix) with ESMTP id 7DDA158D2 for ; Mon, 25 Aug 2014 08:09:37 +0200 (CEST) Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga103.fm.intel.com with ESMTP; 24 Aug 2014 23:05:34 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.04,395,1406617200"; d="scan'208";a="581204286" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by fmsmga001.fm.intel.com with ESMTP; 24 Aug 2014 23:13:28 -0700 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shvmail01.sh.intel.com with ESMTP id s7P6DRSD011960; Mon, 25 Aug 2014 14:13:27 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id s7P6DNvx011382; Mon, 25 Aug 2014 14:13:25 +0800 Received: (from cliang18@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id s7P6DN1C011378; Mon, 25 Aug 2014 14:13:23 +0800 From: Cunming Liang To: dev@dpdk.org Date: Mon, 25 Aug 2014 14:12:50 +0800 Message-Id: <1408947174-11323-2-git-send-email-cunming.liang@intel.com> X-Mailer: git-send-email 1.7.0.7 In-Reply-To: <1408947174-11323-1-git-send-email-cunming.liang@intel.com> References: <1408947174-11323-1-git-send-email-cunming.liang@intel.com> Subject: [dpdk-dev] [PATCH 1/5] app/test: unit test for rx and tx cycles/packet X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 25 Aug 2014 06:09:38 -0000 Signed-off-by: Cunming Liang Acked-by: Bruce Richardson --- app/test/Makefile | 1 + app/test/commands.c | 41 +++ app/test/packet_burst_generator.c | 4 +- app/test/test.h | 6 + app/test/test_pmd_perf.c | 620 ++++++++++++++++++++++++++++++++++++ lib/librte_pmd_ixgbe/ixgbe_ethdev.c | 6 + 6 files changed, 676 insertions(+), 2 deletions(-) create mode 100644 app/test/test_pmd_perf.c diff --git a/app/test/Makefile b/app/test/Makefile index 0024737..8418b2e 100644 --- a/app/test/Makefile +++ b/app/test/Makefile @@ -104,6 +104,7 @@ SRCS-$(CONFIG_RTE_APP_TEST) += test_distributor_perf.c SRCS-$(CONFIG_RTE_APP_TEST) += test_devargs.c SRCS-$(CONFIG_RTE_APP_TEST) += virtual_pmd.c SRCS-$(CONFIG_RTE_APP_TEST) += packet_burst_generator.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_pmd_perf.c ifeq ($(CONFIG_RTE_APP_TEST),y) SRCS-$(CONFIG_RTE_LIBRTE_ACL) += test_acl.c SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += test_link_bonding.c diff --git a/app/test/commands.c b/app/test/commands.c index 5f23420..d0e583e 100644 --- a/app/test/commands.c +++ b/app/test/commands.c @@ -204,6 +204,8 @@ static void cmd_autotest_parsed(void *parsed_result, if (!strcmp(res->autotest, "kvargs_autotest")) ret |= test_kvargs(); #endif /* RTE_LIBRTE_KVARGS */ + if (!strcmp(res->autotest, "pmd_perf_autotest")) + ret = test_pmd_perf(); if (ret == 0) printf("Test OK\n"); @@ -251,6 +253,7 @@ cmdline_parse_token_string_t cmd_autotest_autotest = #ifdef RTE_LIBRTE_KVARGS "kvargs_autotest#" #endif + "pmd_perf_autotest#" "common_autotest#" "distributor_autotest#distributor_perf_autotest"); @@ -451,12 +454,50 @@ cmdline_parse_inst_t cmd_quit = { /****************/ +/****************/ + +struct cmd_set_rxtx_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t mode; +}; + +static void cmd_set_rxtx_parsed(void *parsed_result, struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_set_rxtx_result *res = parsed_result; + if (test_set_rxtx_conf(res->mode) < 0) + cmdline_printf(cl, "Cannot find such mode\n"); +} + +cmdline_parse_token_string_t cmd_set_rxtx_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_rxtx_result, set, + "set_rxtx_mode"); + +cmdline_parse_token_string_t cmd_set_rxtx_mode = + TOKEN_STRING_INITIALIZER(struct cmd_set_rxtx_result, mode, NULL); + +cmdline_parse_inst_t cmd_set_rxtx = { + .f = cmd_set_rxtx_parsed, /* function to call */ + .data = NULL, /* 2nd arg of func */ + .help_str = "set rxtx routine: " + "set_rxtx ", + .tokens = { /* token list, NULL terminated */ + (void *)&cmd_set_rxtx_set, + (void *)&cmd_set_rxtx_mode, + NULL, + }, +}; + +/****************/ + + cmdline_parse_ctx_t main_ctx[] = { (cmdline_parse_inst_t *)&cmd_autotest, (cmdline_parse_inst_t *)&cmd_dump, (cmdline_parse_inst_t *)&cmd_dump_one, (cmdline_parse_inst_t *)&cmd_set_ring, (cmdline_parse_inst_t *)&cmd_quit, + (cmdline_parse_inst_t *)&cmd_set_rxtx, NULL, }; diff --git a/app/test/packet_burst_generator.c b/app/test/packet_burst_generator.c index 5d539f1..943ea98 100644 --- a/app/test/packet_burst_generator.c +++ b/app/test/packet_burst_generator.c @@ -190,12 +190,12 @@ initialize_ipv4_header(struct ipv4_hdr *ip_hdr, uint32_t src_addr, */ #define RTE_MAX_SEGS_PER_PKT 255 /**< pkt.nb_segs is a 8-bit unsigned char. */ -#define TXONLY_DEF_PACKET_LEN 64 +#define TXONLY_DEF_PACKET_LEN 60 #define TXONLY_DEF_PACKET_LEN_128 128 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { - TXONLY_DEF_PACKET_LEN_128, + TXONLY_DEF_PACKET_LEN, }; uint8_t tx_pkt_nb_segs = 1; diff --git a/app/test/test.h b/app/test/test.h index 181c38e..ad01eec 100644 --- a/app/test/test.h +++ b/app/test/test.h @@ -123,6 +123,9 @@ int unit_test_suite_runner(struct unit_test_suite *suite); #define RECURSIVE_ENV_VAR "RTE_TEST_RECURSIVE" +#include +#include + extern const char *prgname; int main(int argc, char **argv); @@ -180,6 +183,9 @@ int test_distributor_perf(void); int test_kvargs(void); int test_devargs(void); int test_link_bonding(void); +int test_pmd_perf(void); +int test_set_rxtx_conf(cmdline_fixed_string_t mode); + int test_pci_run; diff --git a/app/test/test_pmd_perf.c b/app/test/test_pmd_perf.c new file mode 100644 index 0000000..ee527d3 --- /dev/null +++ b/app/test/test_pmd_perf.c @@ -0,0 +1,620 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include "packet_burst_generator.h" +#include "test.h" + +#define NB_ETHPORTS_USED (1) +#define NB_SOCKETS (2) +#define MEMPOOL_CACHE_SIZE 250 +#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) +#define MAX_PKT_BURST (32) +#define RTE_TEST_RX_DESC_DEFAULT (128) +#define RTE_TEST_TX_DESC_DEFAULT (512) +#define RTE_PORT_ALL (~(uint8_t)0x0) + +/* how long test would take at full line rate */ +#define RTE_TEST_DURATION (2) + +/* + * RX and TX Prefetch, Host, and Write-back threshold values should be + * carefully set for optimal performance. Consult the network + * controller's datasheet and supporting DPDK documentation for guidance + * on how these parameters should be set. + */ +#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */ +#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */ +#define RX_WTHRESH 0 /**< Default values of RX write-back threshold reg. */ + +/* + * These default values are optimized for use with the Intel(R) 82599 10 GbE + * Controller and the DPDK ixgbe PMD. Consider using other values for other + * network controllers and/or network drivers. + */ +#define TX_PTHRESH 32 /**< Default values of TX prefetch threshold reg. */ +#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */ +#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */ + + +#define NB_MBUF RTE_MAX( \ + (unsigned)(nb_ports*nb_rx_queue*RTE_TEST_RX_DESC_DEFAULT + \ + nb_ports*nb_lcores*MAX_PKT_BURST + \ + nb_ports*nb_tx_queue*RTE_TEST_TX_DESC_DEFAULT + \ + nb_lcores*MEMPOOL_CACHE_SIZE), \ + (unsigned)8192) + + +static struct rte_mempool *mbufpool[NB_SOCKETS]; +/* ethernet addresses of ports */ +static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS]; + +static struct rte_eth_conf port_conf = { + .rxmode = { + .mq_mode = ETH_MQ_RX_NONE, + .max_rx_pkt_len = ETHER_MAX_LEN, + .split_hdr_size = 0, + .header_split = 0, /**< Header Split disabled */ + .hw_ip_checksum = 0, /**< IP checksum offload enabled */ + .hw_vlan_filter = 0, /**< VLAN filtering disabled */ + .hw_vlan_strip = 0, /**< VLAN strip enabled. */ + .hw_vlan_extend = 0, /**< Extended VLAN disabled. */ + .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ + .hw_strip_crc = 0, /**< CRC stripped by hardware */ + .enable_scatter = 0, /**< scatter rx disabled */ + }, + .txmode = { + .mq_mode = ETH_MQ_TX_NONE, + }, + .lpbk_mode = 1, /* enable loopback */ +}; + +static struct rte_eth_rxconf rx_conf = { + .rx_thresh = { + .pthresh = RX_PTHRESH, + .hthresh = RX_HTHRESH, + .wthresh = RX_WTHRESH, + }, + .rx_free_thresh = 32, +}; + +static struct rte_eth_txconf tx_conf = { + .tx_thresh = { + .pthresh = TX_PTHRESH, + .hthresh = TX_HTHRESH, + .wthresh = TX_WTHRESH, + }, + .tx_free_thresh = 32, /* Use PMD default values */ + .tx_rs_thresh = 32, /* Use PMD default values */ + .txq_flags = (ETH_TXQ_FLAGS_NOMULTSEGS | + ETH_TXQ_FLAGS_NOVLANOFFL | + ETH_TXQ_FLAGS_NOXSUMSCTP | + ETH_TXQ_FLAGS_NOXSUMUDP | + ETH_TXQ_FLAGS_NOXSUMTCP) +}; + +enum { + LCORE_INVALID = 0, + LCORE_AVAIL, + LCORE_USED, +}; + +struct lcore_conf { + uint8_t status; + uint8_t socketid; + uint16_t nb_ports; + uint8_t portlist[RTE_MAX_ETHPORTS]; +} __rte_cache_aligned; + +struct lcore_conf lcore_conf[RTE_MAX_LCORE]; + +static uint64_t link_mbps; + +/* Check the link status of all ports in up to 3s, and print them finally */ +static void +check_all_ports_link_status(uint8_t port_num, uint32_t port_mask) +{ +#define CHECK_INTERVAL 100 /* 100ms */ +#define MAX_CHECK_TIME 30 /* 3s (30 * 100ms) in total */ + uint8_t portid, count, all_ports_up, print_flag = 0; + struct rte_eth_link link; + + printf("Checking link statuses...\n"); + fflush(stdout); + for (count = 0; count <= MAX_CHECK_TIME; count++) { + all_ports_up = 1; + for (portid = 0; portid < port_num; portid++) { + if ((port_mask & (1 << portid)) == 0) + continue; + memset(&link, 0, sizeof(link)); + rte_eth_link_get_nowait(portid, &link); + /* print link status if flag set */ + if (print_flag == 1) { + if (link.link_status) { + printf("Port %d Link Up - speed %u " + "Mbps - %s\n", (uint8_t)portid, + (unsigned)link.link_speed, + (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? + ("full-duplex") : ("half-duplex\n")); + if (link_mbps == 0) + link_mbps = link.link_speed; + } else + printf("Port %d Link Down\n", + (uint8_t)portid); + continue; + } + /* clear all_ports_up flag if any link down */ + if (link.link_status == 0) { + all_ports_up = 0; + break; + } + } + /* after finally printing all link status, get out */ + if (print_flag == 1) + break; + + if (all_ports_up == 0) { + fflush(stdout); + rte_delay_ms(CHECK_INTERVAL); + } + + /* set the print_flag if all ports up or timeout */ + if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) + print_flag = 1; + } +} + +static void +print_ethaddr(const char *name, const struct ether_addr *eth_addr) +{ + printf("%s%02X:%02X:%02X:%02X:%02X:%02X", name, + eth_addr->addr_bytes[0], + eth_addr->addr_bytes[1], + eth_addr->addr_bytes[2], + eth_addr->addr_bytes[3], + eth_addr->addr_bytes[4], + eth_addr->addr_bytes[5]); +} + +static int +init_traffic(struct rte_mempool *mp, + struct rte_mbuf **pkts_burst, uint32_t burst_size) +{ + struct ether_hdr pkt_eth_hdr; + struct ipv4_hdr pkt_ipv4_hdr; + struct udp_hdr pkt_udp_hdr; + uint32_t pktlen; + static uint8_t src_mac[] = { 0x00, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF }; + static uint8_t dst_mac[] = { 0x00, 0xAA, 0xFF, 0xAA, 0xFF, 0xAA }; + + + initialize_eth_header(&pkt_eth_hdr, + (struct ether_addr *)src_mac, + (struct ether_addr *)dst_mac, 0, 0); + pkt_eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + + pktlen = initialize_ipv4_header(&pkt_ipv4_hdr, + IPV4_ADDR(10, 0, 0, 1), + IPV4_ADDR(10, 0, 0, 2), 26); + printf("IPv4 pktlen %u\n", pktlen); + + pktlen = initialize_udp_header(&pkt_udp_hdr, 0, 0, 18); + + printf("UDP pktlen %u\n", pktlen); + + return generate_packet_burst(mp, pkts_burst, &pkt_eth_hdr, + 0, &pkt_ipv4_hdr, 1, + &pkt_udp_hdr, burst_size); +} + +static int +init_lcores(void) +{ + unsigned lcore_id; + + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + lcore_conf[lcore_id].socketid = + rte_lcore_to_socket_id(lcore_id); + if (rte_lcore_is_enabled(lcore_id) == 0) { + lcore_conf[lcore_id].status = LCORE_INVALID; + continue; + } else + lcore_conf[lcore_id].status = LCORE_AVAIL; + } + return 0; +} + +static int +init_mbufpool(unsigned nb_mbuf) +{ + int socketid; + unsigned lcore_id; + char s[64]; + + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + if (rte_lcore_is_enabled(lcore_id) == 0) + continue; + + socketid = rte_lcore_to_socket_id(lcore_id); + if (socketid >= NB_SOCKETS) { + rte_exit(EXIT_FAILURE, + "Socket %d of lcore %u is out of range %d\n", + socketid, lcore_id, NB_SOCKETS); + } + if (mbufpool[socketid] == NULL) { + snprintf(s, sizeof(s), "mbuf_pool_%d", socketid); + mbufpool[socketid] = + rte_mempool_create(s, nb_mbuf, MBUF_SIZE, + MEMPOOL_CACHE_SIZE, + sizeof(struct rte_pktmbuf_pool_private), + rte_pktmbuf_pool_init, NULL, + rte_pktmbuf_init, NULL, + socketid, 0); + if (mbufpool[socketid] == NULL) + rte_exit(EXIT_FAILURE, + "Cannot init mbuf pool on socket %d\n", + socketid); + else + printf("Allocated mbuf pool on socket %d\n", + socketid); + } + } + return 0; +} + +static uint16_t +alloc_lcore(uint16_t socketid) +{ + unsigned lcore_id; + + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + if (LCORE_AVAIL != lcore_conf[lcore_id].status || + lcore_conf[lcore_id].socketid != socketid || + lcore_id == rte_get_master_lcore()) + continue; + lcore_conf[lcore_id].status = LCORE_USED; + lcore_conf[lcore_id].nb_ports = 0; + return lcore_id; + } + + return (uint16_t)-1; +} + +volatile uint64_t stop; +uint64_t count; +uint64_t drop; +uint64_t idle; + +static void +reset_count(void) +{ + count = 0; + drop = 0; + idle = 0; +} + +static void +stats_display(uint8_t port_id) +{ + struct rte_eth_stats stats; + rte_eth_stats_get(port_id, &stats); + + printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " + "%-"PRIu64"\n", + stats.ipackets, stats.imissed, stats.ibytes); + printf(" RX-badcrc: %-10"PRIu64" RX-badlen: %-10"PRIu64" RX-errors: " + "%-"PRIu64"\n", + stats.ibadcrc, stats.ibadlen, stats.ierrors); + printf(" RX-nombuf: %-10"PRIu64"\n", + stats.rx_nombuf); + printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " + "%-"PRIu64"\n", + stats.opackets, stats.oerrors, stats.obytes); +} + +static void +signal_handler(int signum) +{ + /* When we receive a USR1 signal, print stats */ + if (signum == SIGUSR1) { + printf("Force Stop!\n"); + stop = 1; + } + if (signum == SIGUSR2) + stats_display(0); +} + +#define MAX_TRAFIC_BURST (4096) +struct rte_mbuf *tx_burst[MAX_TRAFIC_BURST]; + +/* main processing loop */ +static int +main_loop(__rte_unused void *args) +{ +#define PACKET_SIZE 64 +#define FRAME_GAP 12 +#define MAC_PREAMBLE 8 + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + unsigned lcore_id; + unsigned i, portid, nb_rx = 0, nb_tx = 0; + struct lcore_conf *conf; + uint64_t prev_tsc, cur_tsc; + int pkt_per_port; + uint64_t packets_per_second, total_packets; + + lcore_id = rte_lcore_id(); + conf = &lcore_conf[lcore_id]; + if (conf->status != LCORE_USED) + return 0; + + pkt_per_port = MAX_TRAFIC_BURST / conf->nb_ports; + + int idx = 0; + for (i = 0; i < conf->nb_ports; i++) { + int num = pkt_per_port; + portid = conf->portlist[i]; + printf("inject %d packet to port %d\n", num, portid); + while (num) { + nb_tx = RTE_MIN(MAX_PKT_BURST, num); + nb_tx = rte_eth_tx_burst(portid, 0, + &tx_burst[idx], nb_tx); + num -= nb_tx; + idx += nb_tx; + } + } + printf("Total packets inject to prime ports = %u\n", idx); + + packets_per_second = (link_mbps * 1000 * 1000) / + +((PACKET_SIZE + FRAME_GAP + MAC_PREAMBLE) * CHAR_BIT); + printf("Each port will do %"PRIu64" packets per second\n", + +packets_per_second); + + total_packets = RTE_TEST_DURATION * conf->nb_ports * packets_per_second; + printf("Test will stop after at least %"PRIu64" packets received\n", + + total_packets); + + prev_tsc = rte_rdtsc(); + + while (likely(!stop)) { + for (i = 0; i < conf->nb_ports; i++) { + portid = conf->portlist[i]; + nb_rx = rte_eth_rx_burst((uint8_t) portid, 0, + pkts_burst, MAX_PKT_BURST); + if (unlikely(nb_rx == 0)) { + idle++; + continue; + } + + count += nb_rx; + nb_tx = rte_eth_tx_burst(portid, 0, pkts_burst, nb_rx); + if (unlikely(nb_tx < nb_rx)) { + drop += (nb_rx - nb_tx); + do { + rte_pktmbuf_free(pkts_burst[nb_tx]); + } while (++nb_tx < nb_rx); + } + } + if (unlikely(count >= total_packets)) + break; + } + + cur_tsc = rte_rdtsc(); + + for (i = 0; i < conf->nb_ports; i++) { + portid = conf->portlist[i]; + int nb_free = pkt_per_port; + do { /* dry out */ + nb_rx = rte_eth_rx_burst((uint8_t) portid, 0, + pkts_burst, MAX_PKT_BURST); + nb_tx = 0; + while (nb_tx < nb_rx) + rte_pktmbuf_free(pkts_burst[nb_tx++]); + nb_free -= nb_rx; + } while (nb_free != 0); + printf("free %d mbuf left in port %u\n", pkt_per_port, portid); + } + + if (count == 0) + return -1; + + printf("%lu packet, %lu drop, %lu idle\n", count, drop, idle); + printf("Result: %ld cycles per packet\n", (cur_tsc - prev_tsc) / count); + + return 0; +} + +int +test_pmd_perf(void) +{ + uint16_t nb_ports, num, nb_lcores, slave_id = (uint16_t)-1; + uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; + uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; + uint16_t portid; + uint16_t nb_rx_queue = 1, nb_tx_queue = 1; + int socketid = -1; + int ret; + + printf("Start PMD RXTX cycles cost test.\n"); + + signal(SIGUSR1, signal_handler); + signal(SIGUSR2, signal_handler); + + nb_ports = rte_eth_dev_count(); + if (nb_ports < NB_ETHPORTS_USED) { + printf("At least %u port(s) used for perf. test\n", + NB_ETHPORTS_USED); + return -1; + } + + if (nb_ports > RTE_MAX_ETHPORTS) + nb_ports = RTE_MAX_ETHPORTS; + + nb_lcores = rte_lcore_count(); + + memset(lcore_conf, 0, sizeof(lcore_conf)); + init_lcores(); + + init_mbufpool(NB_MBUF); + + reset_count(); + num = 0; + for (portid = 0; portid < nb_ports; portid++) { + if (socketid == -1) { + socketid = rte_eth_dev_socket_id(portid); + slave_id = alloc_lcore(socketid); + if (slave_id == (uint16_t)-1) { + printf("No avail lcore to run test\n"); + return -1; + } + printf("Performance test runs on lcore %u socket %u\n", + slave_id, socketid); + } + + if (socketid != rte_eth_dev_socket_id(portid)) { + printf("Skip port %d\n", portid); + continue; + } + + /* port configure */ + ret = rte_eth_dev_configure(portid, nb_rx_queue, + nb_tx_queue, &port_conf); + if (ret < 0) + rte_exit(EXIT_FAILURE, + "Cannot configure device: err=%d, port=%d\n", + ret, portid); + + rte_eth_macaddr_get(portid, &ports_eth_addr[portid]); + printf("Port %u ", portid); + print_ethaddr("Address:", &ports_eth_addr[portid]); + printf("\n"); + + /* tx queue setup */ + ret = rte_eth_tx_queue_setup(portid, 0, nb_txd, + socketid, &tx_conf); + if (ret < 0) + rte_exit(EXIT_FAILURE, + "rte_eth_tx_queue_setup: err=%d, " + "port=%d\n", ret, portid); + + /* rx queue steup */ + ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd, + socketid, &rx_conf, + mbufpool[socketid]); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d," + "port=%d\n", ret, portid); + + /* Start device */ + stop = 0; + ret = rte_eth_dev_start(portid); + if (ret < 0) + rte_exit(EXIT_FAILURE, + "rte_eth_dev_start: err=%d, port=%d\n", + ret, portid); + + /* always eanble promiscuous */ + rte_eth_promiscuous_enable(portid); + + lcore_conf[slave_id].portlist[num++] = portid; + lcore_conf[slave_id].nb_ports++; + } + check_all_ports_link_status(nb_ports, RTE_PORT_ALL); + + init_traffic(mbufpool[socketid], tx_burst, MAX_TRAFIC_BURST); + + rte_eal_remote_launch(main_loop, NULL, slave_id); + if (rte_eal_wait_lcore(slave_id) < 0) + return -1; + + /* port tear down */ + for (portid = 0; portid < nb_ports; portid++) { + if (socketid != rte_eth_dev_socket_id(portid)) + continue; + + rte_eth_dev_stop(portid); + } + + return 0; +} + +int +test_set_rxtx_conf(cmdline_fixed_string_t mode) +{ + printf("mode is %s\n", mode); + + if (!strcmp(mode, "vector")) { + /* vector rx, tx */ + tx_conf.txq_flags = 0xf01; + tx_conf.tx_rs_thresh = 32; + tx_conf.tx_free_thresh = 32; + port_conf.rxmode.hw_ip_checksum = 0; + port_conf.rxmode.enable_scatter = 0; + return 0; + } else if (!strcmp(mode, "scalar")) { + /* bulk alloc rx, simple tx */ + tx_conf.txq_flags = 0xf01; + tx_conf.tx_rs_thresh = 128; + tx_conf.tx_free_thresh = 128; + port_conf.rxmode.hw_ip_checksum = 1; + port_conf.rxmode.enable_scatter = 0; + return 0; + } else if (!strcmp(mode, "hybrid")) { + /* bulk alloc rx, vector tx + * when vec macro not define, + * using the same rx/tx as scalar + */ + tx_conf.txq_flags = 0xf01; + tx_conf.tx_rs_thresh = 32; + tx_conf.tx_free_thresh = 32; + port_conf.rxmode.hw_ip_checksum = 1; + port_conf.rxmode.enable_scatter = 0; + return 0; + } else if (!strcmp(mode, "full")) { + /* full feature rx,tx pair */ + tx_conf.txq_flags = 0x0; /* must condition */ + tx_conf.tx_rs_thresh = 32; + tx_conf.tx_free_thresh = 32; + port_conf.rxmode.hw_ip_checksum = 0; + port_conf.rxmode.enable_scatter = 1; /* must condition */ + return 0; + } + + return -1; +} diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c index 59122a1..c581dbf 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c @@ -1582,6 +1582,9 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) ixgbe_dev_clear_queues(dev); + /* Clear stored conf */ + dev->data->scattered_rx = 0; + /* Clear recorded link status */ memset(&link, 0, sizeof(link)); rte_ixgbe_dev_atomic_write_link_status(dev, &link); @@ -2853,6 +2856,9 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev) */ ixgbevf_set_vfta_all(dev,0); + /* Clear stored conf */ + dev->data->scattered_rx = 0; + ixgbe_dev_clear_queues(dev); }