[v4,1/3] app/testpmd: fix mempool free on exit

Message ID cacf4fc79730e6c2225636214d0d1d8a39b62316.1554613242.git.shahafs@mellanox.com (mailing list archive)
State Accepted, archived
Delegated to: Ferruh Yigit
Headers
Series DMA map anonymous memory to eth devices |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/intel-Performance-Testing success Performance Testing PASS
ci/mellanox-Performance-Testing success Performance Testing PASS
ci/Intel-compilation success Compilation OK

Commit Message

Shahaf Shuler April 7, 2019, 5:02 a.m. UTC
  Allocated mempools were never free. it is bad practice.

Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org

Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
---
 app/test-pmd/testpmd.c | 24 ++++++++++++++++++------
 1 file changed, 18 insertions(+), 6 deletions(-)
  

Comments

Iremonger, Bernard April 8, 2019, 2:14 p.m. UTC | #1
> -----Original Message-----
> From: Shahaf Shuler [mailto:shahafs@mellanox.com]
> Sent: Sunday, April 7, 2019 6:02 AM
> To: Lu, Wenzhuo <wenzhuo.lu@intel.com>; Wu, Jingjing
> <jingjing.wu@intel.com>; Iremonger, Bernard <bernard.iremonger@intel.com>
> Cc: dev@dpdk.org; rasland@mellanox.com; thomas@monjalon.net; Yigit,
> Ferruh <ferruh.yigit@intel.com>; stable@dpdk.org
> Subject: [PATCH v4 1/3] app/testpmd: fix mempool free on exit
> 
> Allocated mempools were never free. it is bad practice.
> 
> Fixes: af75078fece3 ("first public release")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>

Acked-by:  Bernard Iremonger <bernard.iremonger@intel.com>
  

Patch

diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index aeaa74c989..b7f70b0c47 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -188,6 +188,8 @@  struct fwd_engine * fwd_engines[] = {
 	NULL,
 };
 
+struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
+
 struct fwd_config cur_fwd_config;
 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
 uint32_t retry_enabled;
@@ -835,7 +837,7 @@  setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
 /*
  * Configuration initialisation done once at init time.
  */
-static void
+static struct rte_mempool *
 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
 		 unsigned int socket_id)
 {
@@ -913,6 +915,7 @@  mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
 	} else if (verbose_level > 0) {
 		rte_mempool_dump(stdout, rte_mp);
 	}
+	return rte_mp;
 }
 
 /*
@@ -1130,14 +1133,18 @@  init_config(void)
 		uint8_t i;
 
 		for (i = 0; i < num_sockets; i++)
-			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
-					 socket_ids[i]);
+			mempools[i] = mbuf_pool_create(mbuf_data_size,
+						       nb_mbuf_per_pool,
+						       socket_ids[i]);
 	} else {
 		if (socket_num == UMA_NO_CONFIG)
-			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
+			mempools[0] = mbuf_pool_create(mbuf_data_size,
+						       nb_mbuf_per_pool, 0);
 		else
-			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
-						 socket_num);
+			mempools[socket_num] = mbuf_pool_create
+							(mbuf_data_size,
+							 nb_mbuf_per_pool,
+							 socket_num);
 	}
 
 	init_port_config();
@@ -2394,6 +2401,7 @@  pmd_test_exit(void)
 	struct rte_device *device;
 	portid_t pt_id;
 	int ret;
+	int i;
 
 	if (test_done == 0)
 		stop_packet_forwarding();
@@ -2447,6 +2455,10 @@  pmd_test_exit(void)
 			return;
 		}
 	}
+	for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
+		if (mempools[i])
+			rte_mempool_free(mempools[i]);
+	}
 
 	printf("\nBye...\n");
 }