From patchwork Mon Nov 14 07:14:39 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: Fengnan Chang X-Patchwork-Id: 119815 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id A3B7CA0C1C; Mon, 14 Nov 2022 09:13:40 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id DED0C41148; Mon, 14 Nov 2022 09:13:35 +0100 (CET) Received: from mail-pl1-f173.google.com (mail-pl1-f173.google.com [209.85.214.173]) by mails.dpdk.org (Postfix) with ESMTP id 72DFE4014F for ; Mon, 14 Nov 2022 08:14:58 +0100 (CET) Received: by mail-pl1-f173.google.com with SMTP id b21so9265017plc.9 for ; Sun, 13 Nov 2022 23:14:58 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=bytedance-com.20210112.gappssmtp.com; s=20210112; h=content-transfer-encoding:mime-version:message-id:date:subject:cc :to:from:from:to:cc:subject:date:message-id:reply-to; bh=Q3IgDI3zIv3aZAGpNik3wnx//xHMObc5aKqd2LOparc=; b=3fpYnC44+ODyMLt+sOhJanph3MxyeZlvftMMcfGWKzPlBxGtNly5vLl/S7UqFHICRL LC35Gd7LowKG5ykoxlzqNc3ptFCMA952IAlsshOToIR4DxJlsFTLwPxHZcQsKpC+0EmM 5XXTi73tiHYq4C0sDMvM94yUYvMAWWxoCHyUCi1iXLCsvBtA+e7r62Cc9Kpu9QJLDKFi k5LJMPW3Zg1NMpEd5IyEspd8XhOHQRP0xmt+4Y3kM80uIONr42AKuySBcOFRbGm953iN o22ae9cltxgP5Zo1+qObbNSarHuUqdME3XxlhaUsdMXH4FbZ9TK2RpraBavykQxQpS8e NAEA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:message-id:date:subject:cc :to:from:x-gm-message-state:from:to:cc:subject:date:message-id :reply-to; bh=Q3IgDI3zIv3aZAGpNik3wnx//xHMObc5aKqd2LOparc=; b=wMOkDLA6R763C9Xh2tfcuv5iRpzumj2FXL8x4wQsgBPv8tHKxhv8PTy0a17rjoLO83 2G5tYt3ZEX3roIusbGlLvU1uqKg70Diebx/lAgH4gDtgpMAs+EmhXAkfPbfN+LoU5n1C y4vW12443SXUvd7hLPPhaIsScerD7TcSwJz6uqBG7QIdG17ce72bIRt1nVwXWBAEtx4J HTp00jDaQQPC+tC3xhkiwv58yfG4+W9YOFBEnML1hBV4G0V3XFLyxlJ0CewJwdhRHIV2 im+RW8zH8QhvhyCpfvJM983uTpsjlgIC33zseCcc9YswUAvoErC9JIaKxZmkgX18sbrM 9n6Q== X-Gm-Message-State: ANoB5pkeOeJAKtKFX3mf29VTcU6cgxGt1cgimj4og6YjQyR7+VRgDdN1 RbfXjbL0uVNIGYV9qjOIYcATSA== X-Google-Smtp-Source: AA0mqf7PTsK8qMMRzQHLzQYwF0L2bghLBklsvLGkUe31NUTA2Hbu6zWob5AGM8zq6wOtw5MpIHufJQ== X-Received: by 2002:a17:902:f7d1:b0:186:6180:fb89 with SMTP id h17-20020a170902f7d100b001866180fb89mr11996689plw.142.1668410097676; Sun, 13 Nov 2022 23:14:57 -0800 (PST) Received: from HTW5T2C6VL.bytedance.net ([139.177.225.242]) by smtp.gmail.com with ESMTPSA id p6-20020a63f446000000b0047079cb8875sm5280821pgk.42.2022.11.13.23.14.55 (version=TLS1_3 cipher=TLS_CHACHA20_POLY1305_SHA256 bits=256/256); Sun, 13 Nov 2022 23:14:57 -0800 (PST) From: changfengnan To: olivier.matz@6wind.com, andrew.rybchenko@oktetlabs.ru, dev@dpdk.org Cc: changfengnan Subject: [PATCH] mempool: fix rte_mempool_avail_count may segment fault when used in multiprocess Date: Mon, 14 Nov 2022 15:14:39 +0800 Message-Id: <20221114071439.38902-1-changfengnan@bytedance.com> X-Mailer: git-send-email 2.37.0 (Apple Git-136) MIME-Version: 1.0 X-Mailman-Approved-At: Mon, 14 Nov 2022 09:13:34 +0100 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org rte_mempool_create put tailq entry into rte_mempool_tailq list before populate, and pool_data set when populate. So in multi process, if process A create mempool, and process B can get mempool through rte_mempool_lookup before pool_data set, if B call rte_mempool_lookup, it will cause segment fault. Fix this by put tailq entry into rte_mempool_tailq after populate. Signed-off-by: changfengnan Acked-by: Morten Brørup --- lib/mempool/rte_mempool.c | 40 +++++++++++++++++++-------------------- 1 file changed, 19 insertions(+), 21 deletions(-) diff --git a/lib/mempool/rte_mempool.c b/lib/mempool/rte_mempool.c index 4c78071a34..b23d6138ff 100644 --- a/lib/mempool/rte_mempool.c +++ b/lib/mempool/rte_mempool.c @@ -798,9 +798,7 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, int socket_id, unsigned flags) { char mz_name[RTE_MEMZONE_NAMESIZE]; - struct rte_mempool_list *mempool_list; struct rte_mempool *mp = NULL; - struct rte_tailq_entry *te = NULL; const struct rte_memzone *mz = NULL; size_t mempool_size; unsigned int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY; @@ -820,8 +818,6 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, RTE_CACHE_LINE_MASK) != 0); #endif - mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list); - /* asked for zero items */ if (n == 0) { rte_errno = EINVAL; @@ -866,14 +862,6 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, private_data_size = (private_data_size + RTE_MEMPOOL_ALIGN_MASK) & (~RTE_MEMPOOL_ALIGN_MASK); - - /* try to allocate tailq entry */ - te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0); - if (te == NULL) { - RTE_LOG(ERR, MEMPOOL, "Cannot allocate tailq entry!\n"); - goto exit_unlock; - } - mempool_size = RTE_MEMPOOL_HEADER_SIZE(mp, cache_size); mempool_size += private_data_size; mempool_size = RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN); @@ -908,7 +896,6 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, mp->private_data_size = private_data_size; STAILQ_INIT(&mp->elt_list); STAILQ_INIT(&mp->mem_list); - /* * local_cache pointer is set even if cache_size is zero. * The local_cache points to just past the elt_pa[] array. @@ -922,12 +909,6 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, mempool_cache_init(&mp->local_cache[lcore_id], cache_size); } - - te->data = mp; - - rte_mcfg_tailq_write_lock(); - TAILQ_INSERT_TAIL(mempool_list, te, next); - rte_mcfg_tailq_write_unlock(); rte_mcfg_mempool_write_unlock(); rte_mempool_trace_create_empty(name, n, elt_size, cache_size, @@ -936,7 +917,6 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, exit_unlock: rte_mcfg_mempool_write_unlock(); - rte_free(te); rte_mempool_free(mp); return NULL; } @@ -951,11 +931,22 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, { int ret; struct rte_mempool *mp; + struct rte_mempool_list *mempool_list; + struct rte_tailq_entry *te = NULL; + + /* try to allocate tailq entry */ + te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0); + if (te == NULL) { + RTE_LOG(ERR, MEMPOOL, "Cannot allocate tailq entry!\n"); + return NULL; + } mp = rte_mempool_create_empty(name, n, elt_size, cache_size, private_data_size, socket_id, flags); - if (mp == NULL) + if (mp == NULL) { + rte_free(te); return NULL; + } /* * Since we have 4 combinations of the SP/SC/MP/MC examine the flags to @@ -984,12 +975,19 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, if (obj_init) rte_mempool_obj_iter(mp, obj_init, obj_init_arg); + te->data = mp; + mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list); + rte_mcfg_tailq_write_lock(); + TAILQ_INSERT_TAIL(mempool_list, te, next); + rte_mcfg_tailq_write_unlock(); + rte_mempool_trace_create(name, n, elt_size, cache_size, private_data_size, mp_init, mp_init_arg, obj_init, obj_init_arg, flags, mp); return mp; fail: + rte_free(te); rte_mempool_free(mp); return NULL; }