From patchwork Fri Sep 17 16:41:35 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Kevin Laatz X-Patchwork-Id: 99245 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id F04E6A0C43; Fri, 17 Sep 2021 18:42:20 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id A56884115F; Fri, 17 Sep 2021 18:42:00 +0200 (CEST) Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by mails.dpdk.org (Postfix) with ESMTP id 01D6E4113D for ; Fri, 17 Sep 2021 18:41:56 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10110"; a="222491279" X-IronPort-AV: E=Sophos;i="5.85,301,1624345200"; d="scan'208";a="222491279" Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by fmsmga103.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Sep 2021 09:41:56 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.85,301,1624345200"; d="scan'208";a="546488181" Received: from silpixa00401122.ir.intel.com ([10.55.128.10]) by FMSMGA003.fm.intel.com with ESMTP; 17 Sep 2021 09:41:55 -0700 From: Kevin Laatz To: dev@dpdk.org Cc: bruce.richardson@intel.com, fengchengwen@huawei.com, conor.walsh@intel.com, Kevin Laatz Date: Fri, 17 Sep 2021 16:41:35 +0000 Message-Id: <20210917164136.3499904-6-kevin.laatz@intel.com> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20210917164136.3499904-1-kevin.laatz@intel.com> References: <20210910172737.2561156-1-kevin.laatz@intel.com> <20210917164136.3499904-1-kevin.laatz@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v2 5/6] examples/ioat: update naming to match change to dmadev X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Existing functions, structures, defines etc need to be updated to reflect the change to using the dmadev APIs. Signed-off-by: Kevin Laatz Reviewed-by: Conor Walsh --- examples/ioat/ioatfwd.c | 175 ++++++++++++++++++++-------------------- 1 file changed, 87 insertions(+), 88 deletions(-) diff --git a/examples/ioat/ioatfwd.c b/examples/ioat/ioatfwd.c index df6a28f9e5..d4bff58633 100644 --- a/examples/ioat/ioatfwd.c +++ b/examples/ioat/ioatfwd.c @@ -52,13 +52,13 @@ struct rxtx_transmission_config { /* >8 End of configuration of ports and number of assigned lcores. */ /* per-port statistics struct */ -struct ioat_port_statistics { +struct dma_port_statistics { uint64_t rx[RTE_MAX_ETHPORTS]; uint64_t tx[RTE_MAX_ETHPORTS]; uint64_t tx_dropped[RTE_MAX_ETHPORTS]; uint64_t copy_dropped[RTE_MAX_ETHPORTS]; }; -struct ioat_port_statistics port_statistics; +struct dma_port_statistics port_statistics; struct total_statistics { uint64_t total_packets_dropped; uint64_t total_packets_tx; @@ -71,14 +71,14 @@ struct total_statistics { typedef enum copy_mode_t { #define COPY_MODE_SW "sw" COPY_MODE_SW_NUM, -#define COPY_MODE_IOAT "hw" - COPY_MODE_IOAT_NUM, +#define COPY_MODE_DMA "hw" + COPY_MODE_DMA_NUM, COPY_MODE_INVALID_NUM, COPY_MODE_SIZE_NUM = COPY_MODE_INVALID_NUM } copy_mode_t; /* mask of enabled ports */ -static uint32_t ioat_enabled_port_mask; +static uint32_t dma_enabled_port_mask; /* number of RX queues per port */ static uint16_t nb_queues = 1; @@ -87,9 +87,9 @@ static uint16_t nb_queues = 1; static int mac_updating = 1; /* hardare copy mode enabled by default. */ -static copy_mode_t copy_mode = COPY_MODE_IOAT_NUM; +static copy_mode_t copy_mode = COPY_MODE_DMA_NUM; -/* size of IOAT rawdev ring for hardware copy mode or +/* size of descriptor ring for hardware copy mode or * rte_ring for software copy mode */ static unsigned short ring_size = 2048; @@ -113,14 +113,14 @@ static uint16_t nb_txd = TX_DEFAULT_RINGSIZE; static volatile bool force_quit; -static uint32_t ioat_batch_sz = MAX_PKT_BURST; +static uint32_t dma_batch_sz = MAX_PKT_BURST; static uint32_t max_frame_size = RTE_ETHER_MAX_LEN; /* ethernet addresses of ports */ -static struct rte_ether_addr ioat_ports_eth_addr[RTE_MAX_ETHPORTS]; +static struct rte_ether_addr dma_ports_eth_addr[RTE_MAX_ETHPORTS]; static struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS]; -struct rte_mempool *ioat_pktmbuf_pool; +struct rte_mempool *dma_pktmbuf_pool; /* Print out statistics for one port. */ static void @@ -138,7 +138,7 @@ print_port_stats(uint16_t port_id) port_statistics.copy_dropped[port_id]); } -/* Print out statistics for one IOAT rawdev device. */ +/* Print out statistics for one dmadev device. */ static void print_dmadev_stats(uint32_t dev_id, struct rte_dma_stats stats) { @@ -159,7 +159,7 @@ print_total_stats(struct total_statistics *ts) ts->total_packets_rx, ts->total_packets_dropped); - if (copy_mode == COPY_MODE_IOAT_NUM) { + if (copy_mode == COPY_MODE_DMA_NUM) { printf("\nTotal submitted ops: %19"PRIu64" [ops/s]" "\nTotal completed ops: %19"PRIu64" [ops/s]" "\nTotal failed ops: %22"PRIu64" [ops/s]", @@ -193,7 +193,7 @@ print_stats(char *prgname) status_strlen += snprintf(status_string + status_strlen, sizeof(status_string) - status_strlen, "Copy Mode = %s,\n", copy_mode == COPY_MODE_SW_NUM ? - COPY_MODE_SW : COPY_MODE_IOAT); + COPY_MODE_SW : COPY_MODE_DMA); status_strlen += snprintf(status_string + status_strlen, sizeof(status_string) - status_strlen, "Updating MAC = %s, ", mac_updating ? @@ -232,7 +232,7 @@ print_stats(char *prgname) delta_ts.total_packets_rx += port_statistics.rx[port_id]; - if (copy_mode == COPY_MODE_IOAT_NUM) { + if (copy_mode == COPY_MODE_DMA_NUM) { uint32_t j; for (j = 0; j < cfg.ports[i].nb_queues; j++) { @@ -283,7 +283,7 @@ update_mac_addrs(struct rte_mbuf *m, uint32_t dest_portid) *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dest_portid << 40); /* src addr */ - rte_ether_addr_copy(&ioat_ports_eth_addr[dest_portid], ð->s_addr); + rte_ether_addr_copy(&dma_ports_eth_addr[dest_portid], ð->s_addr); } /* Perform packet copy there is a user-defined function. 8< */ @@ -306,7 +306,7 @@ pktmbuf_sw_copy(struct rte_mbuf *src, struct rte_mbuf *dst) /* >8 End of perform packet copy there is a user-defined function. */ static uint32_t -ioat_enqueue_packets(struct rte_mbuf *pkts[], struct rte_mbuf *pkts_copy[], +dma_enqueue_packets(struct rte_mbuf *pkts[], struct rte_mbuf *pkts_copy[], uint32_t nb_rx, uint16_t dev_id) { struct dma_bufs *dma = &dma_bufs[dev_id]; @@ -332,7 +332,7 @@ ioat_enqueue_packets(struct rte_mbuf *pkts[], struct rte_mbuf *pkts_copy[], } static inline uint32_t -ioat_enqueue(struct rte_mbuf *pkts[], struct rte_mbuf *pkts_copy[], +dma_enqueue(struct rte_mbuf *pkts[], struct rte_mbuf *pkts_copy[], uint32_t num, uint32_t step, uint16_t dev_id) { uint32_t i, k, m, n; @@ -341,7 +341,7 @@ ioat_enqueue(struct rte_mbuf *pkts[], struct rte_mbuf *pkts_copy[], for (i = 0; i < num; i += m) { m = RTE_MIN(step, num - i); - n = ioat_enqueue_packets(pkts + i, pkts_copy + i, m, dev_id); + n = dma_enqueue_packets(pkts + i, pkts_copy + i, m, dev_id); k += n; if (n > 0) rte_dma_submit(dev_id, 0); @@ -355,12 +355,12 @@ ioat_enqueue(struct rte_mbuf *pkts[], struct rte_mbuf *pkts_copy[], } static inline uint32_t -ioat_dequeue(struct rte_mbuf *src[], struct rte_mbuf *dst[], uint32_t num, +dma_dequeue(struct rte_mbuf *src[], struct rte_mbuf *dst[], uint32_t num, uint16_t dev_id) { struct dma_bufs *dma = &dma_bufs[dev_id]; uint16_t nb_dq, filled; - /* Dequeue the mbufs from IOAT device. Since all memory + /* Dequeue the mbufs from DMA device. Since all memory * is DPDK pinned memory and therefore all addresses should * be valid, we don't check for copy errors */ @@ -370,7 +370,7 @@ ioat_dequeue(struct rte_mbuf *src[], struct rte_mbuf *dst[], uint32_t num, if (unlikely(nb_dq == 0)) return nb_dq; - /* Populate pkts_copy with the copies bufs from dma->copies */ + /* Populate pkts_copy with the copies bufs from dma->copies for tx */ for (filled = 0; filled < nb_dq; filled++) { src[filled] = dma->bufs[(dma->sent + filled) & MBUF_RING_MASK]; dst[filled] = dma->copies[(dma->sent + filled) & MBUF_RING_MASK]; @@ -381,9 +381,9 @@ ioat_dequeue(struct rte_mbuf *src[], struct rte_mbuf *dst[], uint32_t num, } -/* Receive packets on one port and enqueue to IOAT rawdev or rte_ring. 8< */ +/* Receive packets on one port and enqueue to dmadev or rte_ring. 8< */ static void -ioat_rx_port(struct rxtx_port_config *rx_config) +dma_rx_port(struct rxtx_port_config *rx_config) { int32_t ret; uint32_t nb_rx, nb_enq, i, j; @@ -400,7 +400,7 @@ ioat_rx_port(struct rxtx_port_config *rx_config) port_statistics.rx[rx_config->rxtx_port] += nb_rx; - ret = rte_mempool_get_bulk(ioat_pktmbuf_pool, + ret = rte_mempool_get_bulk(dma_pktmbuf_pool, (void *)pkts_burst_copy, nb_rx); if (unlikely(ret < 0)) @@ -411,17 +411,16 @@ ioat_rx_port(struct rxtx_port_config *rx_config) pktmbuf_metadata_copy(pkts_burst[j], pkts_burst_copy[j]); - if (copy_mode == COPY_MODE_IOAT_NUM) { - + if (copy_mode == COPY_MODE_DMA_NUM) { /* enqueue packets for hardware copy */ - nb_enq = ioat_enqueue(pkts_burst, pkts_burst_copy, - nb_rx, ioat_batch_sz, rx_config->dmadev_ids[i]); + nb_enq = dma_enqueue(pkts_burst, pkts_burst_copy, + nb_rx, dma_batch_sz, rx_config->dmadev_ids[i]); /* free any not enqueued packets. */ - rte_mempool_put_bulk(ioat_pktmbuf_pool, + rte_mempool_put_bulk(dma_pktmbuf_pool, (void *)&pkts_burst[nb_enq], nb_rx - nb_enq); - rte_mempool_put_bulk(ioat_pktmbuf_pool, + rte_mempool_put_bulk(dma_pktmbuf_pool, (void *)&pkts_burst_copy[nb_enq], nb_rx - nb_enq); @@ -429,7 +428,7 @@ ioat_rx_port(struct rxtx_port_config *rx_config) (nb_rx - nb_enq); /* get completed copies */ - nb_rx = ioat_dequeue(pkts_burst, pkts_burst_copy, + nb_rx = dma_dequeue(pkts_burst, pkts_burst_copy, MAX_PKT_BURST, rx_config->dmadev_ids[i]); } else { /* Perform packet software copy, free source packets */ @@ -438,14 +437,14 @@ ioat_rx_port(struct rxtx_port_config *rx_config) pkts_burst_copy[j]); } - rte_mempool_put_bulk(ioat_pktmbuf_pool, + rte_mempool_put_bulk(dma_pktmbuf_pool, (void *)pkts_burst, nb_rx); nb_enq = rte_ring_enqueue_burst(rx_config->rx_to_tx_ring, (void *)pkts_burst_copy, nb_rx, NULL); /* Free any not enqueued packets. */ - rte_mempool_put_bulk(ioat_pktmbuf_pool, + rte_mempool_put_bulk(dma_pktmbuf_pool, (void *)&pkts_burst_copy[nb_enq], nb_rx - nb_enq); @@ -453,11 +452,11 @@ ioat_rx_port(struct rxtx_port_config *rx_config) (nb_rx - nb_enq); } } -/* >8 End of receive packets on one port and enqueue to IOAT rawdev or rte_ring. */ +/* >8 End of receive packets on one port and enqueue to dmadev or rte_ring. */ -/* Transmit packets from IOAT rawdev/rte_ring for one port. 8< */ +/* Transmit packets from dmadev/rte_ring for one port. 8< */ static void -ioat_tx_port(struct rxtx_port_config *tx_config) +dma_tx_port(struct rxtx_port_config *tx_config) { uint32_t i, j, nb_dq, nb_tx; struct rte_mbuf *mbufs[MAX_PKT_BURST]; @@ -484,13 +483,13 @@ ioat_tx_port(struct rxtx_port_config *tx_config) /* Free any unsent packets. */ if (unlikely(nb_tx < nb_dq)) - rte_mempool_put_bulk(ioat_pktmbuf_pool, + rte_mempool_put_bulk(dma_pktmbuf_pool, (void *)&mbufs[nb_tx], nb_dq - nb_tx); } } -/* >8 End of transmitting packets from IOAT. */ +/* >8 End of transmitting packets from dmadev. */ -/* Main rx processing loop for IOAT rawdev. */ +/* Main rx processing loop for dmadev. */ static void rx_main_loop(void) { @@ -502,7 +501,7 @@ rx_main_loop(void) while (!force_quit) for (i = 0; i < nb_ports; i++) - ioat_rx_port(&cfg.ports[i]); + dma_rx_port(&cfg.ports[i]); } /* Main tx processing loop for hardware copy. */ @@ -517,7 +516,7 @@ tx_main_loop(void) while (!force_quit) for (i = 0; i < nb_ports; i++) - ioat_tx_port(&cfg.ports[i]); + dma_tx_port(&cfg.ports[i]); } /* Main rx and tx loop if only one worker lcore available */ @@ -532,8 +531,8 @@ rxtx_main_loop(void) while (!force_quit) for (i = 0; i < nb_ports; i++) { - ioat_rx_port(&cfg.ports[i]); - ioat_tx_port(&cfg.ports[i]); + dma_rx_port(&cfg.ports[i]); + dma_tx_port(&cfg.ports[i]); } } @@ -563,7 +562,7 @@ static void start_forwarding_cores(void) /* Display usage */ static void -ioat_usage(const char *prgname) +dma_usage(const char *prgname) { printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n" " -b --dma-batch-size: number of requests per DMA batch\n" @@ -575,12 +574,12 @@ ioat_usage(const char *prgname) " - The source MAC address is replaced by the TX port MAC address\n" " - The destination MAC address is replaced by 02:00:00:00:00:TX_PORT_ID\n" " -c --copy-type CT: type of copy: sw|hw\n" - " -s --ring-size RS: size of IOAT rawdev ring for hardware copy mode or rte_ring for software copy mode\n", + " -s --ring-size RS: size of dmadev descriptor ring for hardware copy mode or rte_ring for software copy mode\n", prgname); } static int -ioat_parse_portmask(const char *portmask) +dma_parse_portmask(const char *portmask) { char *end = NULL; unsigned long pm; @@ -594,19 +593,19 @@ ioat_parse_portmask(const char *portmask) } static copy_mode_t -ioat_parse_copy_mode(const char *copy_mode) +dma_parse_copy_mode(const char *copy_mode) { if (strcmp(copy_mode, COPY_MODE_SW) == 0) return COPY_MODE_SW_NUM; - else if (strcmp(copy_mode, COPY_MODE_IOAT) == 0) - return COPY_MODE_IOAT_NUM; + else if (strcmp(copy_mode, COPY_MODE_DMA) == 0) + return COPY_MODE_DMA_NUM; return COPY_MODE_INVALID_NUM; } /* Parse the argument given in the command line of the application */ static int -ioat_parse_args(int argc, char **argv, unsigned int nb_ports) +dma_parse_args(int argc, char **argv, unsigned int nb_ports) { static const char short_options[] = "b:" /* dma batch size */ @@ -635,7 +634,7 @@ ioat_parse_args(int argc, char **argv, unsigned int nb_ports) int option_index; char *prgname = argv[0]; - ioat_enabled_port_mask = default_port_mask; + dma_enabled_port_mask = default_port_mask; argvopt = argv; while ((opt = getopt_long(argc, argvopt, short_options, @@ -643,10 +642,10 @@ ioat_parse_args(int argc, char **argv, unsigned int nb_ports) switch (opt) { case 'b': - ioat_batch_sz = atoi(optarg); - if (ioat_batch_sz > MAX_PKT_BURST) { + dma_batch_sz = atoi(optarg); + if (dma_batch_sz > MAX_PKT_BURST) { printf("Invalid dma batch size, %s.\n", optarg); - ioat_usage(prgname); + dma_usage(prgname); return -1; } break; @@ -654,19 +653,19 @@ ioat_parse_args(int argc, char **argv, unsigned int nb_ports) max_frame_size = atoi(optarg); if (max_frame_size > RTE_ETHER_MAX_JUMBO_FRAME_LEN) { printf("Invalid max frame size, %s.\n", optarg); - ioat_usage(prgname); + dma_usage(prgname); return -1; } break; /* portmask */ case 'p': - ioat_enabled_port_mask = ioat_parse_portmask(optarg); - if (ioat_enabled_port_mask & ~default_port_mask || - ioat_enabled_port_mask <= 0) { + dma_enabled_port_mask = dma_parse_portmask(optarg); + if (dma_enabled_port_mask & ~default_port_mask || + dma_enabled_port_mask <= 0) { printf("Invalid portmask, %s, suggest 0x%x\n", optarg, default_port_mask); - ioat_usage(prgname); + dma_usage(prgname); return -1; } break; @@ -676,16 +675,16 @@ ioat_parse_args(int argc, char **argv, unsigned int nb_ports) if (nb_queues == 0 || nb_queues > MAX_RX_QUEUES_COUNT) { printf("Invalid RX queues number %s. Max %u\n", optarg, MAX_RX_QUEUES_COUNT); - ioat_usage(prgname); + dma_usage(prgname); return -1; } break; case 'c': - copy_mode = ioat_parse_copy_mode(optarg); + copy_mode = dma_parse_copy_mode(optarg); if (copy_mode == COPY_MODE_INVALID_NUM) { printf("Invalid copy type. Use: sw, hw\n"); - ioat_usage(prgname); + dma_usage(prgname); return -1; } break; @@ -694,7 +693,7 @@ ioat_parse_args(int argc, char **argv, unsigned int nb_ports) ring_size = atoi(optarg); if (ring_size == 0) { printf("Invalid ring size, %s.\n", optarg); - ioat_usage(prgname); + dma_usage(prgname); return -1; } break; @@ -704,7 +703,7 @@ ioat_parse_args(int argc, char **argv, unsigned int nb_ports) break; default: - ioat_usage(prgname); + dma_usage(prgname); return -1; } } @@ -753,7 +752,7 @@ check_link_status(uint32_t port_mask) /* Configuration of device. 8< */ static void -configure_rawdev_queue(uint32_t dev_id) +configure_dmadev_queue(uint32_t dev_id) { struct rte_dma_info info; struct rte_dma_conf dev_config = { .nb_vchans = 1 }; @@ -780,11 +779,11 @@ configure_rawdev_queue(uint32_t dev_id) } /* >8 End of configuration of device. */ -/* Using IOAT rawdev API functions. 8< */ +/* Using dmadev API functions. 8< */ static void -assign_rawdevs(void) +assign_dmadevs(void) { - uint16_t nb_rawdev = 0, rdev_id = 0; + uint16_t nb_dmadev = 0, rdev_id = 0; uint32_t i, j; for (i = 0; i < cfg.nb_ports; i++) { @@ -798,18 +797,18 @@ assign_rawdevs(void) } while (!rte_dma_is_valid(rdev_id)); cfg.ports[i].dmadev_ids[j] = rdev_id - 1; - configure_rawdev_queue(cfg.ports[i].dmadev_ids[j]); - ++nb_rawdev; + configure_dmadev_queue(cfg.ports[i].dmadev_ids[j]); + ++nb_dmadev; } } end: - if (nb_rawdev < cfg.nb_ports * cfg.ports[0].nb_queues) + if (nb_dmadev < cfg.nb_ports * cfg.ports[0].nb_queues) rte_exit(EXIT_FAILURE, - "Not enough IOAT rawdevs (%u) for all queues (%u).\n", - nb_rawdev, cfg.nb_ports * cfg.ports[0].nb_queues); - RTE_LOG(INFO, DMA, "Number of used rawdevs: %u.\n", nb_rawdev); + "Not enough dmadevs (%u) for all queues (%u).\n", + nb_dmadev, cfg.nb_ports * cfg.ports[0].nb_queues); + RTE_LOG(INFO, DMA, "Number of used dmadevs: %u.\n", nb_dmadev); } -/* >8 End of using IOAT rawdev API functions. */ +/* >8 End of using dmadev API functions. */ /* Assign ring structures for packet exchanging. 8< */ static void @@ -867,7 +866,7 @@ port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues) } /* Skip ports that are not enabled */ - if ((ioat_enabled_port_mask & (1 << portid)) == 0) { + if ((dma_enabled_port_mask & (1 << portid)) == 0) { printf("Skipping disabled port %u\n", portid); return; } @@ -894,7 +893,7 @@ port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues) "Cannot adjust number of descriptors: err=%d, port=%u\n", ret, portid); - rte_eth_macaddr_get(portid, &ioat_ports_eth_addr[portid]); + rte_eth_macaddr_get(portid, &dma_ports_eth_addr[portid]); /* Init RX queues */ rxq_conf = dev_info.default_rxconf; @@ -953,7 +952,7 @@ port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues) printf("Port %u, MAC address: " RTE_ETHER_ADDR_PRT_FMT "\n\n", portid, - RTE_ETHER_ADDR_BYTES(&ioat_ports_eth_addr[portid])); + RTE_ETHER_ADDR_BYTES(&dma_ports_eth_addr[portid])); cfg.ports[cfg.nb_ports].rxtx_port = portid; cfg.ports[cfg.nb_ports++].nb_queues = nb_queues; @@ -995,9 +994,9 @@ main(int argc, char **argv) rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n"); /* Parse application arguments (after the EAL ones) */ - ret = ioat_parse_args(argc, argv, nb_ports); + ret = dma_parse_args(argc, argv, nb_ports); if (ret < 0) - rte_exit(EXIT_FAILURE, "Invalid IOAT arguments\n"); + rte_exit(EXIT_FAILURE, "Invalid DMA arguments\n"); /* Allocates mempool to hold the mbufs. 8< */ nb_mbufs = RTE_MAX(nb_ports * (nb_queues * (nb_rxd + nb_txd + @@ -1008,23 +1007,23 @@ main(int argc, char **argv) /* Create the mbuf pool */ sz = max_frame_size + RTE_PKTMBUF_HEADROOM; sz = RTE_MAX(sz, (size_t)RTE_MBUF_DEFAULT_BUF_SIZE); - ioat_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", nb_mbufs, + dma_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", nb_mbufs, MEMPOOL_CACHE_SIZE, 0, sz, rte_socket_id()); - if (ioat_pktmbuf_pool == NULL) + if (dma_pktmbuf_pool == NULL) rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n"); /* >8 End of allocates mempool to hold the mbufs. */ /* Initialize each port. 8< */ cfg.nb_ports = 0; RTE_ETH_FOREACH_DEV(portid) - port_init(portid, ioat_pktmbuf_pool, nb_queues); + port_init(portid, dma_pktmbuf_pool, nb_queues); /* >8 End of initializing each port. */ /* Initialize port xstats */ memset(&port_statistics, 0, sizeof(port_statistics)); /* Assigning each port resources. 8< */ - while (!check_link_status(ioat_enabled_port_mask) && !force_quit) + while (!check_link_status(dma_enabled_port_mask) && !force_quit) sleep(1); /* Check if there is enough lcores for all ports. */ @@ -1033,8 +1032,8 @@ main(int argc, char **argv) rte_exit(EXIT_FAILURE, "There should be at least one worker lcore.\n"); - if (copy_mode == COPY_MODE_IOAT_NUM) - assign_rawdevs(); + if (copy_mode == COPY_MODE_DMA_NUM) + assign_dmadevs(); assign_rings(); /* >8 End of assigning each port resources. */ @@ -1055,9 +1054,9 @@ main(int argc, char **argv) rte_strerror(-ret), cfg.ports[i].rxtx_port); rte_eth_dev_close(cfg.ports[i].rxtx_port); - if (copy_mode == COPY_MODE_IOAT_NUM) { + if (copy_mode == COPY_MODE_DMA_NUM) { for (j = 0; j < cfg.ports[i].nb_queues; j++) { - printf("Stopping rawdev %d\n", + printf("Stopping dmadev %d\n", cfg.ports[i].dmadev_ids[j]); rte_dma_stop(cfg.ports[i].dmadev_ids[j]); }