[5/6] examples/ioat: update naming to match change to dmadev

Message ID 20210910172737.2561156-6-kevin.laatz@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series port ioatfwd app to dmadev |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Kevin Laatz Sept. 10, 2021, 5:27 p.m. UTC
  Existing functions, structures, defines etc need to be updated to reflect
the change to using the dmadev APIs.

Signed-off-by: Kevin Laatz <kevin.laatz@intel.com>
---
 examples/ioat/ioatfwd.c | 187 ++++++++++++++++++++--------------------
 1 file changed, 93 insertions(+), 94 deletions(-)
  

Patch

diff --git a/examples/ioat/ioatfwd.c b/examples/ioat/ioatfwd.c
index 610cb1a5cb..93f9f629a8 100644
--- a/examples/ioat/ioatfwd.c
+++ b/examples/ioat/ioatfwd.c
@@ -52,13 +52,13 @@  struct rxtx_transmission_config {
 /* >8 End of configuration of ports and number of assigned lcores. */
 
 /* per-port statistics struct */
-struct ioat_port_statistics {
+struct dma_port_statistics {
 	uint64_t rx[RTE_MAX_ETHPORTS];
 	uint64_t tx[RTE_MAX_ETHPORTS];
 	uint64_t tx_dropped[RTE_MAX_ETHPORTS];
 	uint64_t copy_dropped[RTE_MAX_ETHPORTS];
 };
-struct ioat_port_statistics port_statistics;
+struct dma_port_statistics port_statistics;
 struct total_statistics {
 	uint64_t total_packets_dropped;
 	uint64_t total_packets_tx;
@@ -71,14 +71,14 @@  struct total_statistics {
 typedef enum copy_mode_t {
 #define COPY_MODE_SW "sw"
 	COPY_MODE_SW_NUM,
-#define COPY_MODE_IOAT "hw"
-	COPY_MODE_IOAT_NUM,
+#define COPY_MODE_DMA "hw"
+	COPY_MODE_DMA_NUM,
 	COPY_MODE_INVALID_NUM,
 	COPY_MODE_SIZE_NUM = COPY_MODE_INVALID_NUM
 } copy_mode_t;
 
 /* mask of enabled ports */
-static uint32_t ioat_enabled_port_mask;
+static uint32_t dma_enabled_port_mask;
 
 /* number of RX queues per port */
 static uint16_t nb_queues = 1;
@@ -87,9 +87,9 @@  static uint16_t nb_queues = 1;
 static int mac_updating = 1;
 
 /* hardare copy mode enabled by default. */
-static copy_mode_t copy_mode = COPY_MODE_IOAT_NUM;
+static copy_mode_t copy_mode = COPY_MODE_DMA_NUM;
 
-/* size of IOAT rawdev ring for hardware copy mode or
+/* size of descriptor ring for hardware copy mode or
  * rte_ring for software copy mode
  */
 static unsigned short ring_size = 2048;
@@ -97,7 +97,7 @@  static unsigned short ring_size = 2048;
 /* global mbuf arrays for tracking DMA bufs */
 #define MBUF_RING_SIZE	1024
 #define MBUF_RING_MASK	(MBUF_RING_SIZE - 1)
-#define MAX_DEVS		64	/* 64 is max number of DSA WQ's */
+#define MAX_DEVS		64
 struct dma_bufs {
 	struct rte_mbuf *bufs[MBUF_RING_SIZE];
 	struct rte_mbuf *copies[MBUF_RING_SIZE];
@@ -114,14 +114,14 @@  static uint16_t nb_txd = TX_DEFAULT_RINGSIZE;
 
 static volatile bool force_quit;
 
-static uint32_t ioat_batch_sz = MAX_PKT_BURST;
+static uint32_t dma_batch_sz = MAX_PKT_BURST;
 static uint32_t max_frame_size = RTE_ETHER_MAX_LEN;
 
 /* ethernet addresses of ports */
-static struct rte_ether_addr ioat_ports_eth_addr[RTE_MAX_ETHPORTS];
+static struct rte_ether_addr dma_ports_eth_addr[RTE_MAX_ETHPORTS];
 
 static struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
-struct rte_mempool *ioat_pktmbuf_pool;
+struct rte_mempool *dma_pktmbuf_pool;
 
 /* Print out statistics for one port. */
 static void
@@ -139,7 +139,7 @@  print_port_stats(uint16_t port_id)
 		port_statistics.copy_dropped[port_id]);
 }
 
-/* Print out statistics for one IOAT rawdev device. */
+/* Print out statistics for one dmadev device. */
 static void
 print_dmadev_stats(uint32_t dev_id, struct rte_dmadev_stats stats)
 {
@@ -160,7 +160,7 @@  print_total_stats(struct total_statistics *ts)
 		ts->total_packets_rx,
 		ts->total_packets_dropped);
 
-	if (copy_mode == COPY_MODE_IOAT_NUM) {
+	if (copy_mode == COPY_MODE_DMA_NUM) {
 		printf("\nTotal submitted ops: %19"PRIu64" [ops/s]"
 			"\nTotal completed ops: %19"PRIu64" [ops/s]"
 			"\nTotal failed ops: %22"PRIu64" [ops/s]",
@@ -194,7 +194,7 @@  print_stats(char *prgname)
 	status_strlen += snprintf(status_string + status_strlen,
 		sizeof(status_string) - status_strlen,
 		"Copy Mode = %s,\n", copy_mode == COPY_MODE_SW_NUM ?
-		COPY_MODE_SW : COPY_MODE_IOAT);
+		COPY_MODE_SW : COPY_MODE_DMA);
 	status_strlen += snprintf(status_string + status_strlen,
 		sizeof(status_string) - status_strlen,
 		"Updating MAC = %s, ", mac_updating ?
@@ -233,7 +233,7 @@  print_stats(char *prgname)
 			delta_ts.total_packets_rx +=
 				port_statistics.rx[port_id];
 
-			if (copy_mode == COPY_MODE_IOAT_NUM) {
+			if (copy_mode == COPY_MODE_DMA_NUM) {
 				uint32_t j;
 
 				for (j = 0; j < cfg.ports[i].nb_queues; j++) {
@@ -284,7 +284,7 @@  update_mac_addrs(struct rte_mbuf *m, uint32_t dest_portid)
 	*((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dest_portid << 40);
 
 	/* src addr */
-	rte_ether_addr_copy(&ioat_ports_eth_addr[dest_portid], &eth->s_addr);
+	rte_ether_addr_copy(&dma_ports_eth_addr[dest_portid], &eth->s_addr);
 }
 
 /* Perform packet copy there is a user-defined function. 8< */
@@ -307,7 +307,7 @@  pktmbuf_sw_copy(struct rte_mbuf *src, struct rte_mbuf *dst)
 /* >8 End of perform packet copy there is a user-defined function. */
 
 static uint32_t
-ioat_enqueue_packets(struct rte_mbuf *pkts[], struct rte_mbuf *pkts_copy[],
+dma_enqueue_packets(struct rte_mbuf *pkts[], struct rte_mbuf *pkts_copy[],
 	uint32_t nb_rx, uint16_t dev_id)
 {
 	struct dma_bufs *dma = &dma_bufs[dev_id];
@@ -333,7 +333,7 @@  ioat_enqueue_packets(struct rte_mbuf *pkts[], struct rte_mbuf *pkts_copy[],
 }
 
 static inline uint32_t
-ioat_enqueue(struct rte_mbuf *pkts[], struct rte_mbuf *pkts_copy[],
+dma_enqueue(struct rte_mbuf *pkts[], struct rte_mbuf *pkts_copy[],
 		uint32_t num, uint32_t step, uint16_t dev_id)
 {
 	uint32_t i, k, m, n;
@@ -342,7 +342,7 @@  ioat_enqueue(struct rte_mbuf *pkts[], struct rte_mbuf *pkts_copy[],
 	for (i = 0; i < num; i += m) {
 
 		m = RTE_MIN(step, num - i);
-		n = ioat_enqueue_packets(pkts + i, pkts_copy + i, m, dev_id);
+		n = dma_enqueue_packets(pkts + i, pkts_copy + i, m, dev_id);
 		k += n;
 		if (n > 0)
 			rte_dmadev_submit(dev_id, 0);
@@ -356,12 +356,12 @@  ioat_enqueue(struct rte_mbuf *pkts[], struct rte_mbuf *pkts_copy[],
 }
 
 static inline uint32_t
-ioat_dequeue(struct rte_mbuf *src[], struct rte_mbuf *dst[], uint32_t num,
+dma_dequeue(struct rte_mbuf *src[], struct rte_mbuf *dst[], uint32_t num,
 	uint16_t dev_id)
 {
 	struct dma_bufs *dma = &dma_bufs[dev_id];
 	uint16_t nb_dq, filled;
-	/* Dequeue the mbufs from IOAT device. Since all memory
+	/* Dequeue the mbufs from DMA device. Since all memory
 	 * is DPDK pinned memory and therefore all addresses should
 	 * be valid, we don't check for copy errors
 	 */
@@ -371,7 +371,7 @@  ioat_dequeue(struct rte_mbuf *src[], struct rte_mbuf *dst[], uint32_t num,
 	if (unlikely(nb_dq == 0))
 		return nb_dq;
 
-	/* Populate pkts_copy with the copies bufs from dma->copies */
+	/* Populate pkts_copy with the copies bufs from dma->copies for tx */
 	for (filled = 0; filled < nb_dq; filled++) {
 		src[filled] = dma->bufs[(dma->sent + filled) & MBUF_RING_MASK];
 		dst[filled] = dma->copies[(dma->sent + filled) & MBUF_RING_MASK];
@@ -382,9 +382,9 @@  ioat_dequeue(struct rte_mbuf *src[], struct rte_mbuf *dst[], uint32_t num,
 
 }
 
-/* Receive packets on one port and enqueue to IOAT rawdev or rte_ring. 8< */
+/* Receive packets on one port and enqueue to dmadev or rte_ring. 8< */
 static void
-ioat_rx_port(struct rxtx_port_config *rx_config)
+dma_rx_port(struct rxtx_port_config *rx_config)
 {
 	int32_t ret;
 	uint32_t nb_rx, nb_enq, i, j;
@@ -401,7 +401,7 @@  ioat_rx_port(struct rxtx_port_config *rx_config)
 
 		port_statistics.rx[rx_config->rxtx_port] += nb_rx;
 
-		ret = rte_mempool_get_bulk(ioat_pktmbuf_pool,
+		ret = rte_mempool_get_bulk(dma_pktmbuf_pool,
 			(void *)pkts_burst_copy, nb_rx);
 
 		if (unlikely(ret < 0))
@@ -412,17 +412,16 @@  ioat_rx_port(struct rxtx_port_config *rx_config)
 			pktmbuf_metadata_copy(pkts_burst[j],
 				pkts_burst_copy[j]);
 
-		if (copy_mode == COPY_MODE_IOAT_NUM) {
-
+		if (copy_mode == COPY_MODE_DMA_NUM) {
 			/* enqueue packets for  hardware copy */
-			nb_enq = ioat_enqueue(pkts_burst, pkts_burst_copy,
-				nb_rx, ioat_batch_sz, rx_config->dmadev_ids[i]);
+			nb_enq = dma_enqueue(pkts_burst, pkts_burst_copy,
+				nb_rx, dma_batch_sz, rx_config->dmadev_ids[i]);
 
 			/* free any not enqueued packets. */
-			rte_mempool_put_bulk(ioat_pktmbuf_pool,
+			rte_mempool_put_bulk(dma_pktmbuf_pool,
 				(void *)&pkts_burst[nb_enq],
 				nb_rx - nb_enq);
-			rte_mempool_put_bulk(ioat_pktmbuf_pool,
+			rte_mempool_put_bulk(dma_pktmbuf_pool,
 				(void *)&pkts_burst_copy[nb_enq],
 				nb_rx - nb_enq);
 
@@ -430,7 +429,7 @@  ioat_rx_port(struct rxtx_port_config *rx_config)
 				(nb_rx - nb_enq);
 
 			/* get completed copies */
-			nb_rx = ioat_dequeue(pkts_burst, pkts_burst_copy,
+			nb_rx = dma_dequeue(pkts_burst, pkts_burst_copy,
 				MAX_PKT_BURST, rx_config->dmadev_ids[i]);
 		} else {
 			/* Perform packet software copy, free source packets */
@@ -439,14 +438,14 @@  ioat_rx_port(struct rxtx_port_config *rx_config)
 					pkts_burst_copy[j]);
 		}
 
-		rte_mempool_put_bulk(ioat_pktmbuf_pool,
+		rte_mempool_put_bulk(dma_pktmbuf_pool,
 			(void *)pkts_burst, nb_rx);
 
 		nb_enq = rte_ring_enqueue_burst(rx_config->rx_to_tx_ring,
 			(void *)pkts_burst_copy, nb_rx, NULL);
 
 		/* Free any not enqueued packets. */
-		rte_mempool_put_bulk(ioat_pktmbuf_pool,
+		rte_mempool_put_bulk(dma_pktmbuf_pool,
 			(void *)&pkts_burst_copy[nb_enq],
 			nb_rx - nb_enq);
 
@@ -454,11 +453,11 @@  ioat_rx_port(struct rxtx_port_config *rx_config)
 			(nb_rx - nb_enq);
 	}
 }
-/* >8 End of receive packets on one port and enqueue to IOAT rawdev or rte_ring. */
+/* >8 End of receive packets on one port and enqueue to dmadev or rte_ring. */
 
-/* Transmit packets from IOAT rawdev/rte_ring for one port. 8< */
+/* Transmit packets from dmadev/rte_ring for one port. 8< */
 static void
-ioat_tx_port(struct rxtx_port_config *tx_config)
+dma_tx_port(struct rxtx_port_config *tx_config)
 {
 	uint32_t i, j, nb_dq, nb_tx;
 	struct rte_mbuf *mbufs[MAX_PKT_BURST];
@@ -485,13 +484,13 @@  ioat_tx_port(struct rxtx_port_config *tx_config)
 
 		/* Free any unsent packets. */
 		if (unlikely(nb_tx < nb_dq))
-			rte_mempool_put_bulk(ioat_pktmbuf_pool,
+			rte_mempool_put_bulk(dma_pktmbuf_pool,
 			(void *)&mbufs[nb_tx], nb_dq - nb_tx);
 	}
 }
-/* >8 End of transmitting packets from IOAT. */
+/* >8 End of transmitting packets from dmadev. */
 
-/* Main rx processing loop for IOAT rawdev. */
+/* Main rx processing loop for dmadev. */
 static void
 rx_main_loop(void)
 {
@@ -503,7 +502,7 @@  rx_main_loop(void)
 
 	while (!force_quit)
 		for (i = 0; i < nb_ports; i++)
-			ioat_rx_port(&cfg.ports[i]);
+			dma_rx_port(&cfg.ports[i]);
 }
 
 /* Main tx processing loop for hardware copy. */
@@ -518,7 +517,7 @@  tx_main_loop(void)
 
 	while (!force_quit)
 		for (i = 0; i < nb_ports; i++)
-			ioat_tx_port(&cfg.ports[i]);
+			dma_tx_port(&cfg.ports[i]);
 }
 
 /* Main rx and tx loop if only one worker lcore available */
@@ -533,8 +532,8 @@  rxtx_main_loop(void)
 
 	while (!force_quit)
 		for (i = 0; i < nb_ports; i++) {
-			ioat_rx_port(&cfg.ports[i]);
-			ioat_tx_port(&cfg.ports[i]);
+			dma_rx_port(&cfg.ports[i]);
+			dma_tx_port(&cfg.ports[i]);
 		}
 }
 
@@ -564,7 +563,7 @@  static void start_forwarding_cores(void)
 
 /* Display usage */
 static void
-ioat_usage(const char *prgname)
+dma_usage(const char *prgname)
 {
 	printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
 		"  -b --dma-batch-size: number of requests per DMA batch\n"
@@ -576,12 +575,12 @@  ioat_usage(const char *prgname)
 		"       - The source MAC address is replaced by the TX port MAC address\n"
 		"       - The destination MAC address is replaced by 02:00:00:00:00:TX_PORT_ID\n"
 		"  -c --copy-type CT: type of copy: sw|hw\n"
-		"  -s --ring-size RS: size of IOAT rawdev ring for hardware copy mode or rte_ring for software copy mode\n",
+		"  -s --ring-size RS: size of dmadev descriptor ring for hardware copy mode or rte_ring for software copy mode\n",
 			prgname);
 }
 
 static int
-ioat_parse_portmask(const char *portmask)
+dma_parse_portmask(const char *portmask)
 {
 	char *end = NULL;
 	unsigned long pm;
@@ -595,19 +594,19 @@  ioat_parse_portmask(const char *portmask)
 }
 
 static copy_mode_t
-ioat_parse_copy_mode(const char *copy_mode)
+dma_parse_copy_mode(const char *copy_mode)
 {
 	if (strcmp(copy_mode, COPY_MODE_SW) == 0)
 		return COPY_MODE_SW_NUM;
-	else if (strcmp(copy_mode, COPY_MODE_IOAT) == 0)
-		return COPY_MODE_IOAT_NUM;
+	else if (strcmp(copy_mode, COPY_MODE_DMA) == 0)
+		return COPY_MODE_DMA_NUM;
 
 	return COPY_MODE_INVALID_NUM;
 }
 
 /* Parse the argument given in the command line of the application */
 static int
-ioat_parse_args(int argc, char **argv, unsigned int nb_ports)
+dma_parse_args(int argc, char **argv, unsigned int nb_ports)
 {
 	static const char short_options[] =
 		"b:"  /* dma batch size */
@@ -636,7 +635,7 @@  ioat_parse_args(int argc, char **argv, unsigned int nb_ports)
 	int option_index;
 	char *prgname = argv[0];
 
-	ioat_enabled_port_mask = default_port_mask;
+	dma_enabled_port_mask = default_port_mask;
 	argvopt = argv;
 
 	while ((opt = getopt_long(argc, argvopt, short_options,
@@ -644,10 +643,10 @@  ioat_parse_args(int argc, char **argv, unsigned int nb_ports)
 
 		switch (opt) {
 		case 'b':
-			ioat_batch_sz = atoi(optarg);
-			if (ioat_batch_sz > MAX_PKT_BURST) {
+			dma_batch_sz = atoi(optarg);
+			if (dma_batch_sz > MAX_PKT_BURST) {
 				printf("Invalid dma batch size, %s.\n", optarg);
-				ioat_usage(prgname);
+				dma_usage(prgname);
 				return -1;
 			}
 			break;
@@ -655,19 +654,19 @@  ioat_parse_args(int argc, char **argv, unsigned int nb_ports)
 			max_frame_size = atoi(optarg);
 			if (max_frame_size > RTE_ETHER_MAX_JUMBO_FRAME_LEN) {
 				printf("Invalid max frame size, %s.\n", optarg);
-				ioat_usage(prgname);
+				dma_usage(prgname);
 				return -1;
 			}
 			break;
 
 		/* portmask */
 		case 'p':
-			ioat_enabled_port_mask = ioat_parse_portmask(optarg);
-			if (ioat_enabled_port_mask & ~default_port_mask ||
-					ioat_enabled_port_mask <= 0) {
+			dma_enabled_port_mask = dma_parse_portmask(optarg);
+			if (dma_enabled_port_mask & ~default_port_mask ||
+					dma_enabled_port_mask <= 0) {
 				printf("Invalid portmask, %s, suggest 0x%x\n",
 						optarg, default_port_mask);
-				ioat_usage(prgname);
+				dma_usage(prgname);
 				return -1;
 			}
 			break;
@@ -677,16 +676,16 @@  ioat_parse_args(int argc, char **argv, unsigned int nb_ports)
 			if (nb_queues == 0 || nb_queues > MAX_RX_QUEUES_COUNT) {
 				printf("Invalid RX queues number %s. Max %u\n",
 					optarg, MAX_RX_QUEUES_COUNT);
-				ioat_usage(prgname);
+				dma_usage(prgname);
 				return -1;
 			}
 			break;
 
 		case 'c':
-			copy_mode = ioat_parse_copy_mode(optarg);
+			copy_mode = dma_parse_copy_mode(optarg);
 			if (copy_mode == COPY_MODE_INVALID_NUM) {
 				printf("Invalid copy type. Use: sw, hw\n");
-				ioat_usage(prgname);
+				dma_usage(prgname);
 				return -1;
 			}
 			break;
@@ -695,7 +694,7 @@  ioat_parse_args(int argc, char **argv, unsigned int nb_ports)
 			ring_size = atoi(optarg);
 			if (ring_size == 0) {
 				printf("Invalid ring size, %s.\n", optarg);
-				ioat_usage(prgname);
+				dma_usage(prgname);
 				return -1;
 			}
 			break;
@@ -705,7 +704,7 @@  ioat_parse_args(int argc, char **argv, unsigned int nb_ports)
 			break;
 
 		default:
-			ioat_usage(prgname);
+			dma_usage(prgname);
 			return -1;
 		}
 	}
@@ -754,7 +753,7 @@  check_link_status(uint32_t port_mask)
 
 /* Configuration of device. 8< */
 static void
-configure_rawdev_queue(uint32_t dev_id)
+configure_dmadev_queue(uint32_t dev_id)
 {
 	struct rte_dmadev_info info;
 	struct rte_dmadev_conf dev_config = { .nb_vchans = 1 };
@@ -781,11 +780,11 @@  configure_rawdev_queue(uint32_t dev_id)
 }
 /* >8 End of configuration of device. */
 
-/* Using IOAT rawdev API functions. 8< */
+/* Using dmadev API functions. 8< */
 static void
-assign_rawdevs(void)
+assign_dmadevs(void)
 {
-	uint16_t nb_rawdev = 0, rdev_id = 0;
+	uint16_t nb_dmadev = 0, rdev_id = 0;
 	uint32_t i, j;
 
 	for (i = 0; i < cfg.nb_ports; i++) {
@@ -799,18 +798,18 @@  assign_rawdevs(void)
 			} while (dmadev_info.device->driver->name == NULL);
 
 			cfg.ports[i].dmadev_ids[j] = rdev_id - 1;
-			configure_rawdev_queue(cfg.ports[i].dmadev_ids[j]);
-			++nb_rawdev;
+			configure_dmadev_queue(cfg.ports[i].dmadev_ids[j]);
+			++nb_dmadev;
 		}
 	}
 end:
-	if (nb_rawdev < cfg.nb_ports * cfg.ports[0].nb_queues)
+	if (nb_dmadev < cfg.nb_ports * cfg.ports[0].nb_queues)
 		rte_exit(EXIT_FAILURE,
-			"Not enough IOAT rawdevs (%u) for all queues (%u).\n",
-			nb_rawdev, cfg.nb_ports * cfg.ports[0].nb_queues);
-	RTE_LOG(INFO, DMA, "Number of used rawdevs: %u.\n", nb_rawdev);
+			"Not enough dmadevs (%u) for all queues (%u).\n",
+			nb_dmadev, cfg.nb_ports * cfg.ports[0].nb_queues);
+	RTE_LOG(INFO, DMA, "Number of used dmadevs: %u.\n", nb_dmadev);
 }
-/* >8 End of using IOAT rawdev API functions. */
+/* >8 End of using dmadev API functions. */
 
 /* Assign ring structures for packet exchanging. 8< */
 static void
@@ -868,7 +867,7 @@  port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
 	}
 
 	/* Skip ports that are not enabled */
-	if ((ioat_enabled_port_mask & (1 << portid)) == 0) {
+	if ((dma_enabled_port_mask & (1 << portid)) == 0) {
 		printf("Skipping disabled port %u\n", portid);
 		return;
 	}
@@ -895,7 +894,7 @@  port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
 			"Cannot adjust number of descriptors: err=%d, port=%u\n",
 			ret, portid);
 
-	rte_eth_macaddr_get(portid, &ioat_ports_eth_addr[portid]);
+	rte_eth_macaddr_get(portid, &dma_ports_eth_addr[portid]);
 
 	/* Init RX queues */
 	rxq_conf = dev_info.default_rxconf;
@@ -954,12 +953,12 @@  port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
 
 	printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
 			portid,
-			ioat_ports_eth_addr[portid].addr_bytes[0],
-			ioat_ports_eth_addr[portid].addr_bytes[1],
-			ioat_ports_eth_addr[portid].addr_bytes[2],
-			ioat_ports_eth_addr[portid].addr_bytes[3],
-			ioat_ports_eth_addr[portid].addr_bytes[4],
-			ioat_ports_eth_addr[portid].addr_bytes[5]);
+			dma_ports_eth_addr[portid].addr_bytes[0],
+			dma_ports_eth_addr[portid].addr_bytes[1],
+			dma_ports_eth_addr[portid].addr_bytes[2],
+			dma_ports_eth_addr[portid].addr_bytes[3],
+			dma_ports_eth_addr[portid].addr_bytes[4],
+			dma_ports_eth_addr[portid].addr_bytes[5]);
 
 	cfg.ports[cfg.nb_ports].rxtx_port = portid;
 	cfg.ports[cfg.nb_ports++].nb_queues = nb_queues;
@@ -1001,9 +1000,9 @@  main(int argc, char **argv)
 		rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
 
 	/* Parse application arguments (after the EAL ones) */
-	ret = ioat_parse_args(argc, argv, nb_ports);
+	ret = dma_parse_args(argc, argv, nb_ports);
 	if (ret < 0)
-		rte_exit(EXIT_FAILURE, "Invalid IOAT arguments\n");
+		rte_exit(EXIT_FAILURE, "Invalid DMA arguments\n");
 
 	/* Allocates mempool to hold the mbufs. 8< */
 	nb_mbufs = RTE_MAX(nb_ports * (nb_queues * (nb_rxd + nb_txd +
@@ -1014,23 +1013,23 @@  main(int argc, char **argv)
 	/* Create the mbuf pool */
 	sz = max_frame_size + RTE_PKTMBUF_HEADROOM;
 	sz = RTE_MAX(sz, (size_t)RTE_MBUF_DEFAULT_BUF_SIZE);
-	ioat_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", nb_mbufs,
+	dma_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", nb_mbufs,
 		MEMPOOL_CACHE_SIZE, 0, sz, rte_socket_id());
-	if (ioat_pktmbuf_pool == NULL)
+	if (dma_pktmbuf_pool == NULL)
 		rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
 	/* >8 End of allocates mempool to hold the mbufs. */
 
 	/* Initialize each port. 8< */
 	cfg.nb_ports = 0;
 	RTE_ETH_FOREACH_DEV(portid)
-		port_init(portid, ioat_pktmbuf_pool, nb_queues);
+		port_init(portid, dma_pktmbuf_pool, nb_queues);
 	/* >8 End of initializing each port. */
 
 	/* Initialize port xstats */
 	memset(&port_statistics, 0, sizeof(port_statistics));
 
 	/* Assigning each port resources. 8< */
-	while (!check_link_status(ioat_enabled_port_mask) && !force_quit)
+	while (!check_link_status(dma_enabled_port_mask) && !force_quit)
 		sleep(1);
 
 	/* Check if there is enough lcores for all ports. */
@@ -1039,8 +1038,8 @@  main(int argc, char **argv)
 		rte_exit(EXIT_FAILURE,
 			"There should be at least one worker lcore.\n");
 
-	if (copy_mode == COPY_MODE_IOAT_NUM)
-		assign_rawdevs();
+	if (copy_mode == COPY_MODE_DMA_NUM)
+		assign_dmadevs();
 
 	assign_rings();
 	/* >8 End of assigning each port resources. */
@@ -1061,9 +1060,9 @@  main(int argc, char **argv)
 				rte_strerror(-ret), cfg.ports[i].rxtx_port);
 
 		rte_eth_dev_close(cfg.ports[i].rxtx_port);
-		if (copy_mode == COPY_MODE_IOAT_NUM) {
+		if (copy_mode == COPY_MODE_DMA_NUM) {
 			for (j = 0; j < cfg.ports[i].nb_queues; j++) {
-				printf("Stopping rawdev %d\n",
+				printf("Stopping dmadev %d\n",
 					cfg.ports[i].dmadev_ids[j]);
 				rte_dmadev_stop(cfg.ports[i].dmadev_ids[j]);
 			}