From patchwork Wed Sep 18 09:11:36 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Marcin Baran X-Patchwork-Id: 59336 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E9A111C1C9; Wed, 18 Sep 2019 11:12:46 +0200 (CEST) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id 8E3C11C1BD for ; Wed, 18 Sep 2019 11:12:44 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga105.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 18 Sep 2019 02:12:43 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,520,1559545200"; d="scan'208";a="202021955" Received: from baranmx-mobl.ger.corp.intel.com ([10.103.104.83]) by fmsmga001.fm.intel.com with ESMTP; 18 Sep 2019 02:12:42 -0700 From: Marcin Baran To: dev@dpdk.org, bruce.richardson@intel.com Cc: Marcin Baran , Pawel Modrak Date: Wed, 18 Sep 2019 11:11:36 +0200 Message-Id: <20190918091139.1430-4-marcinx.baran@intel.com> X-Mailer: git-send-email 2.22.0.windows.1 In-Reply-To: <20190918091139.1430-1-marcinx.baran@intel.com> References: <20190913143914.2332-1-marcinx.baran@intel.com> <20190918091139.1430-1-marcinx.baran@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v3 3/6] examples/ioat: add rawdev copy mode support X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Added support for copying packets using rawdev device. Each port's Rx queue is assigned DMA channel for copy. Signed-off-by: Marcin Baran Signed-off-by: Pawel Modrak --- examples/ioat/ioatfwd.c | 236 ++++++++++++++++++++++++++++++++-------- 1 file changed, 189 insertions(+), 47 deletions(-) diff --git a/examples/ioat/ioatfwd.c b/examples/ioat/ioatfwd.c index 3a092c6cf..c66ce7e49 100644 --- a/examples/ioat/ioatfwd.c +++ b/examples/ioat/ioatfwd.c @@ -121,6 +121,50 @@ pktmbuf_sw_copy(struct rte_mbuf *src, struct rte_mbuf *dst) rte_pktmbuf_mtod(src, char *), src->data_len); } +static uint32_t +ioat_enqueue_packets(struct rte_mbuf **pkts, + uint32_t nb_rx, uint16_t dev_id) +{ + int ret; + uint32_t i; + struct rte_mbuf *pkts_copy[MAX_PKT_BURST]; + + const uint64_t addr_offset = RTE_PTR_DIFF(pkts[0]->buf_addr, + &pkts[0]->rearm_data); + + ret = rte_mempool_get_bulk(ioat_pktmbuf_pool, + (void *)pkts_copy, nb_rx); + + if (unlikely(ret < 0)) + rte_exit(EXIT_FAILURE, "Unable to allocate memory.\n"); + + for (i = 0; i < nb_rx; i++) { + /* Perform data copy */ + ret = rte_ioat_enqueue_copy(dev_id, + pkts[i]->buf_iova + - addr_offset, + pkts_copy[i]->buf_iova + - addr_offset, + rte_pktmbuf_data_len(pkts[i]) + + addr_offset, + (uintptr_t)pkts[i], + (uintptr_t)pkts_copy[i], + 0 /* nofence */); + + if (ret != 1) + break; + } + + ret = i; + /* Free any not enqueued packets. */ + rte_mempool_put_bulk(ioat_pktmbuf_pool, (void *)&pkts[i], nb_rx - i); + rte_mempool_put_bulk(ioat_pktmbuf_pool, (void *)&pkts_copy[i], + nb_rx - i); + + + return ret; +} + /* Receive packets on one port and enqueue to IOAT rawdev or rte_ring. */ static void ioat_rx_port(struct rxtx_port_config *rx_config) @@ -136,32 +180,40 @@ ioat_rx_port(struct rxtx_port_config *rx_config) if (nb_rx == 0) continue; - /* Perform packet software copy, free source packets */ - int ret; - struct rte_mbuf *pkts_burst_copy[MAX_PKT_BURST]; - - ret = rte_mempool_get_bulk(ioat_pktmbuf_pool, - (void *)pkts_burst_copy, nb_rx); - - if (unlikely(ret < 0)) - rte_exit(EXIT_FAILURE, - "Unable to allocate memory.\n"); - - for (j = 0; j < nb_rx; j++) - pktmbuf_sw_copy(pkts_burst[j], - pkts_burst_copy[j]); - - rte_mempool_put_bulk(ioat_pktmbuf_pool, - (void *)pkts_burst, nb_rx); - - nb_enq = rte_ring_enqueue_burst( - rx_config->rx_to_tx_ring, - (void *)pkts_burst_copy, nb_rx, NULL); - - /* Free any not enqueued packets. */ - rte_mempool_put_bulk(ioat_pktmbuf_pool, - (void *)&pkts_burst_copy[nb_enq], - nb_rx - nb_enq); + if (copy_mode == COPY_MODE_IOAT_NUM) { + /* Perform packet hardware copy */ + nb_enq = ioat_enqueue_packets(pkts_burst, + nb_rx, rx_config->ioat_ids[i]); + if (nb_enq > 0) + rte_ioat_do_copies(rx_config->ioat_ids[i]); + } else { + /* Perform packet software copy, free source packets */ + int ret; + struct rte_mbuf *pkts_burst_copy[MAX_PKT_BURST]; + + ret = rte_mempool_get_bulk(ioat_pktmbuf_pool, + (void *)pkts_burst_copy, nb_rx); + + if (unlikely(ret < 0)) + rte_exit(EXIT_FAILURE, + "Unable to allocate memory.\n"); + + for (j = 0; j < nb_rx; j++) + pktmbuf_sw_copy(pkts_burst[j], + pkts_burst_copy[j]); + + rte_mempool_put_bulk(ioat_pktmbuf_pool, + (void *)pkts_burst, nb_rx); + + nb_enq = rte_ring_enqueue_burst( + rx_config->rx_to_tx_ring, + (void *)pkts_burst_copy, nb_rx, NULL); + + /* Free any not enqueued packets. */ + rte_mempool_put_bulk(ioat_pktmbuf_pool, + (void *)&pkts_burst_copy[nb_enq], + nb_rx - nb_enq); + } } } @@ -169,31 +221,64 @@ ioat_rx_port(struct rxtx_port_config *rx_config) static void ioat_tx_port(struct rxtx_port_config *tx_config) { - uint32_t i, nb_dq = 0; + uint32_t i, j, nb_dq = 0; + struct rte_mbuf *mbufs_src[MAX_PKT_BURST]; struct rte_mbuf *mbufs_dst[MAX_PKT_BURST]; - /* Deque the mbufs from rx_to_tx_ring. */ - nb_dq = rte_ring_dequeue_burst(tx_config->rx_to_tx_ring, - (void *)mbufs_dst, MAX_PKT_BURST, NULL); + if (copy_mode == COPY_MODE_IOAT_NUM) { + /* Deque the mbufs from IOAT device. */ + for (i = 0; i < tx_config->nb_queues; i++) { + nb_dq = rte_ioat_completed_copies( + tx_config->ioat_ids[i], MAX_PKT_BURST, + (void *)mbufs_src, (void *)mbufs_dst); - if (nb_dq == 0) - return; + if (nb_dq == 0) + break; - /* Update macs if enabled */ - if (mac_updating) { - for (i = 0; i < nb_dq; i++) - update_mac_addrs(mbufs_dst[i], - tx_config->rxtx_port); - } + rte_mempool_put_bulk(ioat_pktmbuf_pool, + (void *)mbufs_src, nb_dq); - const uint16_t nb_tx = rte_eth_tx_burst(tx_config->rxtx_port, - 0, (void *)mbufs_dst, nb_dq); + /* Update macs if enabled */ + if (mac_updating) { + for (j = 0; j < nb_dq; j++) + update_mac_addrs(mbufs_dst[j], + tx_config->rxtx_port); + } + + const uint16_t nb_tx = rte_eth_tx_burst( + tx_config->rxtx_port, 0, + (void *)mbufs_dst, nb_dq); - /* Free any unsent packets. */ - if (unlikely(nb_tx < nb_dq)) - rte_mempool_put_bulk(ioat_pktmbuf_pool, - (void *)&mbufs_dst[nb_tx], - nb_dq - nb_tx); + /* Free any unsent packets. */ + if (unlikely(nb_tx < nb_dq)) + rte_mempool_put_bulk(ioat_pktmbuf_pool, + (void *)&mbufs_dst[nb_tx], + nb_dq - nb_tx); + } + } else { + /* Deque the mbufs from rx_to_tx_ring. */ + nb_dq = rte_ring_dequeue_burst(tx_config->rx_to_tx_ring, + (void *)mbufs_dst, MAX_PKT_BURST, NULL); + + if (nb_dq == 0) + return; + + /* Update macs if enabled */ + if (mac_updating) { + for (i = 0; i < nb_dq; i++) + update_mac_addrs(mbufs_dst[i], + tx_config->rxtx_port); + } + + const uint16_t nb_tx = rte_eth_tx_burst(tx_config->rxtx_port, + 0, (void *)mbufs_dst, nb_dq); + + /* Free any unsent packets. */ + if (unlikely(nb_tx < nb_dq)) + rte_mempool_put_bulk(ioat_pktmbuf_pool, + (void *)&mbufs_dst[nb_tx], + nb_dq - nb_tx); + } } /* Main rx and tx loop if only one slave lcore available */ @@ -389,6 +474,52 @@ check_link_status(uint32_t port_mask) return retval; } +static void +configure_rawdev_queue(uint32_t dev_id) +{ + struct rte_ioat_rawdev_config dev_config = { .ring_size = ring_size }; + struct rte_rawdev_info info = { .dev_private = &dev_config }; + + if (rte_rawdev_configure(dev_id, &info) != 0) { + rte_exit(EXIT_FAILURE, + "Error with rte_rawdev_configure()\n"); + } + if (rte_rawdev_start(dev_id) != 0) { + rte_exit(EXIT_FAILURE, + "Error with rte_rawdev_start()\n"); + } +} + +static void +assign_rawdevs(void) +{ + uint16_t nb_rawdev = 0, rdev_id = 0; + uint32_t i, j; + + for (i = 0; i < cfg.nb_ports; i++) { + for (j = 0; j < cfg.ports[i].nb_queues; j++) { + struct rte_rawdev_info rdev_info = { 0 }; + + do { + if (rdev_id == rte_rawdev_count()) + goto end; + rte_rawdev_info_get(rdev_id++, &rdev_info); + } while (strcmp(rdev_info.driver_name, + IOAT_PMD_RAWDEV_NAME_STR) != 0); + + cfg.ports[i].ioat_ids[j] = rdev_id - 1; + configure_rawdev_queue(cfg.ports[i].ioat_ids[j]); + ++nb_rawdev; + } + } +end: + if (nb_rawdev < cfg.nb_ports * cfg.ports[0].nb_queues) + rte_exit(EXIT_FAILURE, + "Not enough IOAT rawdevs (%u) for all queues (%u).\n", + nb_rawdev, cfg.nb_ports * cfg.ports[0].nb_queues); + RTE_LOG(INFO, IOAT, "Number of used rawdevs: %u.\n", nb_rawdev); +} + static void assign_rings(void) { @@ -585,18 +716,29 @@ main(int argc, char **argv) rte_exit(EXIT_FAILURE, "There should be at least one slave lcore.\n"); - assign_rings(); + if (copy_mode == COPY_MODE_IOAT_NUM) + assign_rawdevs(); + else /* copy_mode == COPY_MODE_SW_NUM */ + assign_rings(); start_forwarding_cores(); /* force_quit is true when we get here */ rte_eal_mp_wait_lcore(); + uint32_t j; for (i = 0; i < cfg.nb_ports; i++) { printf("Closing port %d\n", cfg.ports[i].rxtx_port); rte_eth_dev_stop(cfg.ports[i].rxtx_port); rte_eth_dev_close(cfg.ports[i].rxtx_port); - rte_ring_free(cfg.ports[i].rx_to_tx_ring); + if (copy_mode == COPY_MODE_IOAT_NUM) { + for (j = 0; j < cfg.ports[i].nb_queues; j++) { + printf("Stopping rawdev %d\n", + cfg.ports[i].ioat_ids[j]); + rte_rawdev_stop(cfg.ports[i].ioat_ids[j]); + } + } else /* copy_mode == COPY_MODE_SW_NUM */ + rte_ring_free(cfg.ports[i].rx_to_tx_ring); } printf("Bye...\n");