get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/7328/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 7328,
    "url": "https://patches.dpdk.org/api/patches/7328/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1443623388-29104-3-git-send-email-ian.betts@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1443623388-29104-3-git-send-email-ian.betts@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1443623388-29104-3-git-send-email-ian.betts@intel.com",
    "date": "2015-09-30T14:29:45",
    "name": "[dpdk-dev,v1,2/5] examples: add cooperative scheduler subsytem for performance-thread app",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "c519e1544790aaa1e0a4b7ca12708d89540bb5d9",
    "submitter": {
        "id": 340,
        "url": "https://patches.dpdk.org/api/people/340/?format=api",
        "name": "ibetts",
        "email": "ian.betts@intel.com"
    },
    "delegate": null,
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1443623388-29104-3-git-send-email-ian.betts@intel.com/mbox/",
    "series": [],
    "comments": "https://patches.dpdk.org/api/patches/7328/comments/",
    "check": "pending",
    "checks": "https://patches.dpdk.org/api/patches/7328/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id DEF1C8E7B;\n\tWed, 30 Sep 2015 16:30:14 +0200 (CEST)",
            "from mga01.intel.com (mga01.intel.com [192.55.52.88])\n\tby dpdk.org (Postfix) with ESMTP id 252408DA4\n\tfor <dev@dpdk.org>; Wed, 30 Sep 2015 16:30:07 +0200 (CEST)",
            "from fmsmga003.fm.intel.com ([10.253.24.29])\n\tby fmsmga101.fm.intel.com with ESMTP; 30 Sep 2015 07:29:55 -0700",
            "from irvmail001.ir.intel.com ([163.33.26.43])\n\tby FMSMGA003.fm.intel.com with ESMTP; 30 Sep 2015 07:29:55 -0700",
            "from sivswdev01.ir.intel.com (sivswdev01.ir.intel.com\n\t[10.237.217.45])\n\tby irvmail001.ir.intel.com (8.14.3/8.13.6/MailSET/Hub) with ESMTP id\n\tt8UETqnS003358; Wed, 30 Sep 2015 15:29:52 +0100",
            "from sivswdev01.ir.intel.com (localhost [127.0.0.1])\n\tby sivswdev01.ir.intel.com with ESMTP id t8UETqen029156;\n\tWed, 30 Sep 2015 15:29:52 +0100",
            "(from ibetts@localhost)\n\tby sivswdev01.ir.intel.com with  id t8UETqBG029152;\n\tWed, 30 Sep 2015 15:29:52 +0100"
        ],
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.17,612,1437462000\"; d=\"scan'208\";a=\"571363088\"",
        "From": "ibetts <ian.betts@intel.com>",
        "To": "dev@dpdk.org",
        "Date": "Wed, 30 Sep 2015 15:29:45 +0100",
        "Message-Id": "<1443623388-29104-3-git-send-email-ian.betts@intel.com>",
        "X-Mailer": "git-send-email 1.7.4.1",
        "In-Reply-To": "<1443623388-29104-1-git-send-email-ian.betts@intel.com>",
        "References": "<1443623388-29104-1-git-send-email-ian.betts@intel.com>",
        "Cc": "Ian Betts <ian.betts@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v1 2/5] examples: add cooperative scheduler\n\tsubsytem for performance-thread app",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Ian Betts <ian.betts@intel.com>\n\nThis commit adds a cooperative scheduler subsystem in the\nperformance-thread sample application.\n\nIt is used in the performance-thread sample application\nby the l3fwd-thread application to enable multiple\nlightweight threads to be run in an EAL thread.\n\nSigned-off-by: Ian Betts <ian.betts@intel.com>\n---\n .../performance-thread/common/arch/x86/atomic.h    |  60 ++\n examples/performance-thread/common/arch/x86/ctx.c  |  66 ++\n examples/performance-thread/common/arch/x86/ctx.h  |  57 ++\n examples/performance-thread/common/common.mk       |  40 +\n examples/performance-thread/common/lthread.c       | 528 +++++++++++++\n examples/performance-thread/common/lthread.h       |  99 +++\n examples/performance-thread/common/lthread_api.h   | 822 +++++++++++++++++++++\n examples/performance-thread/common/lthread_cond.c  | 228 ++++++\n examples/performance-thread/common/lthread_cond.h  |  77 ++\n examples/performance-thread/common/lthread_diag.c  | 315 ++++++++\n examples/performance-thread/common/lthread_diag.h  | 129 ++++\n .../performance-thread/common/lthread_diag_api.h   | 295 ++++++++\n examples/performance-thread/common/lthread_int.h   | 212 ++++++\n examples/performance-thread/common/lthread_mutex.c | 244 ++++++\n examples/performance-thread/common/lthread_mutex.h |  52 ++\n .../performance-thread/common/lthread_objcache.h   | 160 ++++\n examples/performance-thread/common/lthread_pool.h  | 338 +++++++++\n examples/performance-thread/common/lthread_queue.h | 303 ++++++++\n examples/performance-thread/common/lthread_sched.c | 644 ++++++++++++++++\n examples/performance-thread/common/lthread_sched.h | 152 ++++\n examples/performance-thread/common/lthread_timer.h |  47 ++\n examples/performance-thread/common/lthread_tls.c   | 242 ++++++\n examples/performance-thread/common/lthread_tls.h   |  64 ++\n 23 files changed, 5174 insertions(+)\n create mode 100644 examples/performance-thread/common/arch/x86/atomic.h\n create mode 100644 examples/performance-thread/common/arch/x86/ctx.c\n create mode 100644 examples/performance-thread/common/arch/x86/ctx.h\n create mode 100644 examples/performance-thread/common/common.mk\n create mode 100644 examples/performance-thread/common/lthread.c\n create mode 100644 examples/performance-thread/common/lthread.h\n create mode 100644 examples/performance-thread/common/lthread_api.h\n create mode 100644 examples/performance-thread/common/lthread_cond.c\n create mode 100644 examples/performance-thread/common/lthread_cond.h\n create mode 100644 examples/performance-thread/common/lthread_diag.c\n create mode 100644 examples/performance-thread/common/lthread_diag.h\n create mode 100644 examples/performance-thread/common/lthread_diag_api.h\n create mode 100644 examples/performance-thread/common/lthread_int.h\n create mode 100644 examples/performance-thread/common/lthread_mutex.c\n create mode 100644 examples/performance-thread/common/lthread_mutex.h\n create mode 100644 examples/performance-thread/common/lthread_objcache.h\n create mode 100644 examples/performance-thread/common/lthread_pool.h\n create mode 100644 examples/performance-thread/common/lthread_queue.h\n create mode 100644 examples/performance-thread/common/lthread_sched.c\n create mode 100644 examples/performance-thread/common/lthread_sched.h\n create mode 100644 examples/performance-thread/common/lthread_timer.h\n create mode 100644 examples/performance-thread/common/lthread_tls.c\n create mode 100644 examples/performance-thread/common/lthread_tls.h",
    "diff": "diff --git a/examples/performance-thread/common/arch/x86/atomic.h b/examples/performance-thread/common/arch/x86/atomic.h\nnew file mode 100644\nindex 0000000..b1fa703\n--- /dev/null\n+++ b/examples/performance-thread/common/arch/x86/atomic.h\n@@ -0,0 +1,59 @@\n+/*\n+ *-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#ifndef ATOMIC_H_\n+#define ATOMIC_H_\n+\n+#include <stdint.h>\n+\n+/*\n+ * Atomically set a value and return the old value\n+ */\n+static inline uint64_t\n+atomic64_xchg(uint64_t *ptr, uint64_t val) __attribute__ ((always_inline));\n+static inline uint64_t\n+atomic64_xchg(uint64_t *ptr, uint64_t val)\n+{\n+\tasm volatile (\n+\t\t\t\t\"lock;\"\n+\t\t\t\t\"xchgq %0,%1;\"\n+\t\t\t\t : \"=r\" ((uint64_t) val)\n+\t\t\t\t : \"m\" (*(uint64_t *) ptr), \"0\" (val)\n+\t\t\t\t : \"memory\");\n+\n+\treturn val;\n+}\n+\n+\n+#endif /* ATOMIC_H_ */\ndiff --git a/examples/performance-thread/common/arch/x86/ctx.c b/examples/performance-thread/common/arch/x86/ctx.c\nnew file mode 100644\nindex 0000000..299d74f\n--- /dev/null\n+++ b/examples/performance-thread/common/arch/x86/ctx.c\n@@ -0,0 +1,64 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#if defined(__x86_64__)\n+__asm__ (\n+\".text\\n\"\n+\".p2align 4,,15\\n\"\n+\".globl ctx_switch\\n\"\n+\".globl _ctx_switch\\n\"\n+\"ctx_switch:\\n\"\n+\"_ctx_switch:\\n\"\n+\"\tmovq %rsp, 0(%rsi)\t# save stack_pointer\\n\"\n+\"\tmovq %rbp, 8(%rsi)\t# save frame_pointer\\n\"\n+\"\tmovq (%rsp), %rax\t# save insn_pointer\\n\"\n+\"\tmovq %rax, 16(%rsi)\\n\"\n+\"\tmovq %rbx, 24(%rsi)\\n\t# save rbx,r12-r15\\n\"\n+\"\tmovq 24(%rdi), %rbx\\n\"\n+\"\tmovq %r15, 56(%rsi)\\n\"\n+\"\tmovq %r14, 48(%rsi)\\n\"\n+\"\tmovq 48(%rdi), %r14\\n\"\n+\"\tmovq 56(%rdi), %r15\\n\"\n+\"\tmovq %r13, 40(%rsi)\\n\"\n+\"\tmovq %r12, 32(%rsi)\\n\"\n+\"\tmovq 32(%rdi), %r12\\n\"\n+\"\tmovq 40(%rdi), %r13\\n\"\n+\"\tmovq 0(%rdi), %rsp\t# restore stack_pointer\\n\"\n+\"\tmovq 16(%rdi), %rax\t# restore insn_pointer\\n\"\n+\"\tmovq 8(%rdi), %rbp\t# restore frame_pointer\\n\"\n+\"\tmovq %rax, (%rsp)\\n\"\n+\"\tret\\n\"\n+\t);\n+#else\n+#pragma GCC error \"__x86_64__ is not defined\"\n+#endif\ndiff --git a/examples/performance-thread/common/arch/x86/ctx.h b/examples/performance-thread/common/arch/x86/ctx.h\nnew file mode 100644\nindex 0000000..d0a626d\n--- /dev/null\n+++ b/examples/performance-thread/common/arch/x86/ctx.h\n@@ -0,0 +1,57 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+\n+#ifndef CTX_H\n+#define CTX_H\n+\n+/*\n+ * CPU context registers\n+ */\n+struct ctx {\n+\tvoid\t*rsp;\t\t/* 0  */\n+\tvoid\t*rbp;\t\t/* 8  */\n+\tvoid\t*rip;\t\t/* 16 */\n+\tvoid\t*rbx;\t\t/* 24 */\n+\tvoid\t*r12;\t\t/* 32 */\n+\tvoid\t*r13;\t\t/* 40 */\n+\tvoid\t*r14;\t\t/* 48 */\n+\tvoid\t*r15;\t\t/* 56 */\n+};\n+\n+\n+void\n+ctx_switch(struct ctx *new_ctx, struct ctx *curr_ctx);\n+\n+\n+#endif /* RTE_CTX_H_ */\ndiff --git a/examples/performance-thread/common/common.mk b/examples/performance-thread/common/common.mk\nnew file mode 100644\nindex 0000000..a47161f\n--- /dev/null\n+++ b/examples/performance-thread/common/common.mk\n@@ -0,0 +1,40 @@\n+#   BSD LICENSE\n+#\n+#   Copyright(c) 2014 6WIND S.A.\n+#\n+#   Redistribution and use in source and binary forms, with or without\n+#   modification, are permitted provided that the following conditions\n+#   are met:\n+#\n+#     * Redistributions of source code must retain the above copyright\n+#       notice, this list of conditions and the following disclaimer.\n+#     * Redistributions in binary form must reproduce the above copyright\n+#       notice, this list of conditions and the following disclaimer in\n+#       the documentation and/or other materials provided with the\n+#       distribution.\n+#     * Neither the name of 6WIND S.A. nor the names of its\n+#       contributors may be used to endorse or promote products derived\n+#       from this software without specific prior written permission.\n+#\n+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+#   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+\n+# list the C files belonhing to the lthread subsystem, these are common to all lthread apps\n+SRCS-y +=\t../common/lthread.c \\\n+\t\t\t../common/lthread_sched.c \\\n+\t\t\t../common/lthread_cond.c \\\n+\t\t\t../common/lthread_tls.c \\\n+\t\t\t../common/lthread_mutex.c \\\n+\t\t\t../common/lthread_diag.c \\\n+\t\t\t../common/arch/x86/ctx.c\n+\n+INCLUDES += -I$(RTE_SDK)/examples/performance-thread/common/ -I$(RTE_SDK)/examples/performance-thread/common/arch/x86/\ndiff --git a/examples/performance-thread/common/lthread.c b/examples/performance-thread/common/lthread.c\nnew file mode 100644\nindex 0000000..579a539\n--- /dev/null\n+++ b/examples/performance-thread/common/lthread.c\n@@ -0,0 +1,528 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+/*\n+ * Some portions of this software is derived from the\n+ * https://github.com/halayli/lthread which carrys the following license.\n+ *\n+ * Copyright (C) 2012, Hasan Alayli <halayli@gmail.com>\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions\n+ * are met:\n+ * 1. Redistributions of source code must retain the above copyright\n+ *    notice, this list of conditions and the following disclaimer.\n+ * 2. Redistributions in binary form must reproduce the above copyright\n+ *    notice, this list of conditions and the following disclaimer in the\n+ *    documentation and/or other materials provided with the distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND\n+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE\n+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n+ * SUCH DAMAGE.\n+ */\n+\n+#define RTE_MEM 1\n+\n+#include <stdio.h>\n+#include <stdlib.h>\n+#include <string.h>\n+#include <stdint.h>\n+#include <stddef.h>\n+#include <limits.h>\n+#include <inttypes.h>\n+#include <unistd.h>\n+#include <pthread.h>\n+#include <fcntl.h>\n+#include <sys/time.h>\n+#include <sys/mman.h>\n+\n+#include <rte_config.h>\n+#include <rte_log.h>\n+#include <ctx.h>\n+\n+#include \"lthread_api.h\"\n+#include \"lthread.h\"\n+#include \"lthread_timer.h\"\n+#include \"lthread_tls.h\"\n+#include \"lthread_objcache.h\"\n+#include \"lthread_diag.h\"\n+\n+\n+/*\n+ * This function gets called after an lthread function has returned.\n+ */\n+void _lthread_exit_handler(struct lthread *lt)\n+{\n+\n+\tlt->state |= BIT(ST_LT_EXITED);\n+\n+\tif (!(lt->state & BIT(ST_LT_DETACH))) {\n+\t\t/* thread is this not explicitly detached\n+\t\t * it must be joinable, so we call lthread_exit().\n+\t\t */\n+\t\tlthread_exit(NULL);\n+\t}\n+\n+\t/* if we get here the thread is detached so we can reschedule it,\n+\t * allowing the scheduler to free it\n+\t */\n+\t_reschedule();\n+}\n+\n+\n+/*\n+ * Free resources allocated to an lthread\n+ */\n+void _lthread_free(struct lthread *lt)\n+{\n+\n+\tDIAG_EVENT(lt, LT_DIAG_LTHREAD_FREE, lt, 0);\n+\n+\t/* delete pthread style TLS keys associated with this thread */\n+\tint i;\n+\n+\tfor (i = 0; i < LTHREAD_MAX_KEYS; i++) {\n+\t\tif (lt->tls->keys[i].key == NULL)\n+\t\t\tcontinue;\n+\n+\t\ttls_destructor_func destructor;\n+\n+\t\tdestructor = lt->tls->keys[i].key->destructor;\n+\t\tvoid *data = lt->tls->keys[i].data;\n+\n+\n+\t\tif ((destructor != NULL) && (data != NULL))\n+\t\t\tdestructor(data);\n+\t\t_lthread_tls_delete_ref(&lt->tls->keys[i]);\n+\n+\t}\n+\n+\t/* free memory allocated for TLS defined using RTE_PER_LTHREAD macros */\n+\tif (sizeof(void *) < (uint64_t)RTE_PER_LTHREAD_SECTION_SIZE)\n+\t\t_lthread_objcache_free(lt->tls->root_sched->per_lthread_cache,\n+\t\t\t\t\tlt->per_lthread_data);\n+\n+\t/* free pthread style TLS memory */\n+\t_lthread_objcache_free(lt->tls->root_sched->tls_cache, lt->tls);\n+\n+\t/* free the stack */\n+\t_lthread_objcache_free(lt->stack_container->root_sched->stack_cache,\n+\t\t\t\tlt->stack_container);\n+\n+\t/* now free the thread */\n+\t_lthread_objcache_free(lt->root_sched->lthread_cache, lt);\n+\n+}\n+\n+/*\n+ * Allocate a stack and maintain a cache of stacks\n+ */\n+struct lthread_stack *_stack_alloc(void)\n+{\n+\tstruct lthread_stack *s;\n+\n+\ts = _lthread_objcache_alloc((THIS_SCHED)->stack_cache);\n+\tLTHREAD_ASSERT(s != NULL);\n+\n+\ts->root_sched = THIS_SCHED;\n+\ts->stack_size = LTHREAD_MAX_STACK_SIZE;\n+\treturn s;\n+}\n+\n+/*\n+ * Execute a ctx by invoking the start function\n+ * On return call an exit handler if the user has provided one\n+ */\n+static void _lthread_exec(void *arg)\n+{\n+\tstruct lthread *lt = (struct lthread *)arg;\n+\n+\t/* invoke the contexts function */\n+\tlt->fun(lt->arg);\n+\t/* do exit handling */\n+\tif (lt->exit_handler != NULL)\n+\t\tlt->exit_handler(lt);\n+}\n+\n+/*\n+ *\tInitialize an lthread\n+ *\tSet its function, args, and exit handler\n+ */\n+void\n+_lthread_init(struct lthread *lt,\n+\tlthread_func_t fun, void *arg, lthread_exit_func exit_handler)\n+{\n+\n+\t/* set ctx func and args */\n+\tlt->fun = fun;\n+\tlt->arg = arg;\n+\tlt->exit_handler = exit_handler;\n+\n+\t/* set initial state */\n+\tlt->birth = _sched_now();\n+\tlt->state = BIT(ST_LT_INIT);\n+\tlt->join = LT_JOIN_INITIAL;\n+}\n+\n+/*\n+ *\tset the lthread stack\n+ */\n+void _lthread_set_stack(struct lthread *lt, void *stack, size_t stack_size)\n+{\n+\tchar *stack_top = (char *)stack + stack_size;\n+\tvoid **s = (void **)stack_top;\n+\n+\t/* set stack */\n+\tlt->stack = stack;\n+\tlt->stack_size = stack_size;\n+\n+\t/* set initial context */\n+\ts[-3] = NULL;\n+\ts[-2] = (void *)lt;\n+\tlt->ctx.rsp = (void *)(stack_top - (4 * sizeof(void *)));\n+\tlt->ctx.rbp = (void *)(stack_top - (3 * sizeof(void *)));\n+\tlt->ctx.rip = (void *)_lthread_exec;\n+}\n+\n+/*\n+ * Create an lthread on the current scheduler\n+ * If there is no current scheduler on this pthread then first create one\n+ */\n+int\n+lthread_create(struct lthread **new_lt, int lcore_id,\n+\t\tlthread_func_t fun, void *arg)\n+{\n+\tif ((new_lt == NULL) || (fun == NULL))\n+\t\treturn POSIX_ERRNO(EINVAL);\n+\n+\tif (lcore_id < 0)\n+\t\tlcore_id = rte_lcore_id();\n+\telse if (lcore_id > LTHREAD_MAX_LCORES)\n+\t\treturn POSIX_ERRNO(EINVAL);\n+\n+\tstruct lthread *lt = NULL;\n+\n+\tif (THIS_SCHED == NULL) {\n+\t\tTHIS_SCHED = _lthread_sched_create(0);\n+\t\tif (THIS_SCHED == NULL) {\n+\t\t\tperror(\"Failed to create scheduler\");\n+\t\t\treturn POSIX_ERRNO(EAGAIN);\n+\t\t}\n+\t}\n+\n+\t/* allocate a thread structure */\n+\tlt = _lthread_objcache_alloc((THIS_SCHED)->lthread_cache);\n+\tif (lt == NULL)\n+\t\treturn POSIX_ERRNO(EAGAIN);\n+\n+\tbzero(lt, sizeof(struct lthread));\n+\tlt->root_sched = THIS_SCHED;\n+\n+\t/* set the function args and exit handlder */\n+\t_lthread_init(lt, fun, arg, _lthread_exit_handler);\n+\n+\t/* put it in the ready queue */\n+\t*new_lt = lt;\n+\n+\tif (lcore_id < 0)\n+\t\tlcore_id = rte_lcore_id();\n+\n+\tDIAG_CREATE_EVENT(lt, LT_DIAG_LTHREAD_CREATE);\n+\n+\trte_wmb();\n+\t_ready_queue_insert(_lthread_sched_get(lcore_id), lt);\n+\treturn 0;\n+}\n+\n+/*\n+ * Schedules lthread to sleep for `nsecs`\n+ * setting the lthread state to LT_ST_SLEEPING.\n+ * lthread state is cleared upon resumption or expiry.\n+ */\n+static inline void _lthread_sched_sleep(struct lthread *lt, uint64_t nsecs)\n+{\n+\tuint64_t state = lt->state;\n+\tuint64_t clks = _ns_to_clks(nsecs);\n+\n+\tif (clks) {\n+\t\t_timer_start(lt, clks);\n+\t\tlt->state = state | BIT(ST_LT_SLEEPING);\n+\t}\n+\tDIAG_EVENT(lt, LT_DIAG_LTHREAD_SLEEP, clks, 0);\n+\t_suspend();\n+}\n+\n+/*\n+ * Insert the thread into timer tree with specified timeout\n+ */\n+void _lthread_sched_busy_sleep(struct lthread *lt, uint64_t nsecs)\n+{\n+\t_lthread_sched_sleep(lt, nsecs);\n+}\n+\n+/*\n+ * Cancels any running timer.\n+ * This can be called multiple times on the same lthread regardless if it was\n+ * sleeping or not.\n+ */\n+int _lthread_desched_sleep(struct lthread *lt)\n+{\n+\tuint64_t state = lt->state;\n+\n+\tif (state & BIT(ST_LT_SLEEPING)) {\n+\t\t_timer_stop(lt);\n+\t\tstate &= (CLEARBIT(ST_LT_SLEEPING) & CLEARBIT(ST_LT_EXPIRED));\n+\t\tlt->state = state | BIT(ST_LT_READY);\n+\t}\n+\treturn 1;\n+}\n+\n+/*\n+ * set user data pointer in an lthread\n+ */\n+void lthread_set_data(void *data)\n+{\n+\tif (sizeof(void *) == RTE_PER_LTHREAD_SECTION_SIZE)\n+\t\tTHIS_LTHREAD->per_lthread_data = data;\n+}\n+\n+/*\n+ * Retrieve user data pointer from an lthread\n+ */\n+void *lthread_get_data(void)\n+{\n+\treturn THIS_LTHREAD->per_lthread_data;\n+}\n+\n+/*\n+ * Return the current lthread handle\n+ */\n+struct lthread *lthread_current(void)\n+{\n+\tstruct lthread_sched *sched = THIS_SCHED;\n+\n+\tif (sched)\n+\t\treturn sched->current_lthread;\n+\treturn NULL;\n+}\n+\n+/*\n+ * Mark the specified as canceled\n+ */\n+int lthread_cancel(struct lthread *lt)\n+{\n+\tif (lt == NULL)\n+\t\treturn POSIX_ERRNO(EINVAL);\n+\n+\tDIAG_EVENT(lt, LT_DIAG_LTHREAD_CANCEL, lt, 0);\n+\n+\tlt->state |= BIT(ST_LT_CANCELLED);\n+\tif (_lthread_desched_sleep(lt))\n+\t\t_ready_queue_insert(THIS_SCHED, lt);\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * Suspend the current lthread for specified time\n+ */\n+void lthread_sleep(uint64_t nsecs)\n+{\n+\tstruct lthread *lt = THIS_LTHREAD;\n+\n+\t_lthread_sched_sleep(lt, nsecs);\n+\n+}\n+\n+/*\n+ * Suspend the current lthread for specified time\n+ */\n+void lthread_sleep_clks(uint64_t clks)\n+{\n+\tstruct lthread *lt = THIS_LTHREAD;\n+\tuint64_t state = lt->state;\n+\n+\tif (clks) {\n+\t\t_timer_start(lt, clks);\n+\t\tlt->state = state | BIT(ST_LT_SLEEPING);\n+\t}\n+\tDIAG_EVENT(lt, LT_DIAG_LTHREAD_SLEEP, clks, 0);\n+\t_suspend();\n+}\n+\n+/*\n+ * Requeue the current thread to the back of the ready queue\n+ */\n+void lthread_yield(void)\n+{\n+\tstruct lthread *lt = THIS_LTHREAD;\n+\n+\tDIAG_EVENT(lt, LT_DIAG_LTHREAD_YIELD, 0, 0);\n+\n+\t_ready_queue_insert(THIS_SCHED, lt);\n+\tctx_switch(&(THIS_SCHED)->ctx, &lt->ctx);\n+}\n+\n+/*\n+ * Exit the current lthread\n+ * If a thread is joining pass the user pointer to it\n+ */\n+void lthread_exit(void *ptr)\n+{\n+\tstruct lthread *lt = THIS_LTHREAD;\n+\n+\t/* if thread is detached (this is not valid) just exit */\n+\tif (lt->state & BIT(ST_LT_DETACH))\n+\t\treturn;\n+\n+\t/* There is a race between lthread_join() and lthread_exit()\n+\t *  - if exit before join then we suspend and resume on join\n+\t *  - if join before exit then we resume the joining thread\n+\t */\n+\tif ((lt->join == LT_JOIN_INITIAL)\n+\t    && rte_atomic64_cmpset(&lt->join, LT_JOIN_INITIAL,\n+\t\t\t\t   LT_JOIN_EXITING)) {\n+\n+\t\tDIAG_EVENT(lt, LT_DIAG_LTHREAD_EXIT, 1, 0);\n+\t\t_suspend();\n+\t\t/* set the exit value */\n+\t\tif ((ptr != NULL) && (lt->lt_join->lt_exit_ptr != NULL))\n+\t\t\t*(lt->lt_join->lt_exit_ptr) = ptr;\n+\n+\t\t/* let the joining thread know we have set the exit value */\n+\t\tlt->join = LT_JOIN_EXIT_VAL_SET;\n+\t} else {\n+\n+\t\tDIAG_EVENT(lt, LT_DIAG_LTHREAD_EXIT, 0, 0);\n+\t\t/* set the exit value */\n+\t\tif ((ptr != NULL) && (lt->lt_join->lt_exit_ptr != NULL))\n+\t\t\t*(lt->lt_join->lt_exit_ptr) = ptr;\n+\t\t/* let the joining thread know we have set the exit value */\n+\t\tlt->join = LT_JOIN_EXIT_VAL_SET;\n+\t\t_ready_queue_insert(lt->lt_join->sched,\n+\t\t\t\t    (struct lthread *)lt->lt_join);\n+\t}\n+\n+\n+\t/* wait until the joinging thread has collected the exit value */\n+\twhile (lt->join != LT_JOIN_EXIT_VAL_READ)\n+\t\t_reschedule();\n+\n+\t/* reset join state */\n+\tlt->join = LT_JOIN_INITIAL;\n+\n+\t/* detach it so its resources can be released */\n+\tlt->state |= (BIT(ST_LT_DETACH) | BIT(ST_LT_EXITED));\n+}\n+\n+/*\n+ * Join an lthread\n+ * Suspend until the joined thread returns\n+ */\n+int lthread_join(struct lthread *lt, void **ptr)\n+{\n+\tif (lt == NULL)\n+\t\treturn POSIX_ERRNO(EINVAL);\n+\n+\tstruct lthread *current = THIS_LTHREAD;\n+\tuint64_t lt_state = lt->state;\n+\n+\t/* invalid to join a detached thread, or a thread that is joined */\n+\tif ((lt_state & BIT(ST_LT_DETACH)) || (lt->join == LT_JOIN_THREAD_SET))\n+\t\treturn POSIX_ERRNO(EINVAL);\n+\t/* pointer to the joining thread and a poingter to return a value */\n+\tlt->lt_join = current;\n+\tcurrent->lt_exit_ptr = ptr;\n+\t/* There is a race between lthread_join() and lthread_exit()\n+\t *  - if join before exit we suspend and will resume when exit is called\n+\t *  - if exit before join we resume the exiting thread\n+\t */\n+\tif ((lt->join == LT_JOIN_INITIAL)\n+\t    && rte_atomic64_cmpset(&lt->join, LT_JOIN_INITIAL,\n+\t\t\t\t   LT_JOIN_THREAD_SET)) {\n+\n+\t\tDIAG_EVENT(current, LT_DIAG_LTHREAD_JOIN, lt, 1);\n+\t\t_suspend();\n+\t} else {\n+\t\tDIAG_EVENT(current, LT_DIAG_LTHREAD_JOIN, lt, 0);\n+\t\t_ready_queue_insert(lt->sched, lt);\n+\t}\n+\n+\t/* wait for exiting thread to set return value */\n+\twhile (lt->join != LT_JOIN_EXIT_VAL_SET)\n+\t\t_reschedule();\n+\n+\t/* collect the return value */\n+\tif (ptr != NULL)\n+\t\t*ptr = *current->lt_exit_ptr;\n+\n+\t/* let the exiting thread proceed to exit */\n+\tlt->join = LT_JOIN_EXIT_VAL_READ;\n+\treturn 0;\n+}\n+\n+\n+/*\n+ * Detach current lthread\n+ * A detached thread cannot be joined\n+ */\n+void lthread_detach(void)\n+{\n+\tstruct lthread *lt = THIS_LTHREAD;\n+\n+\tDIAG_EVENT(lt, LT_DIAG_LTHREAD_DETACH, 0, 0);\n+\n+\tuint64_t state = lt->state;\n+\n+\tlt->state = state | BIT(ST_LT_DETACH);\n+}\n+\n+/*\n+ * Set function name of an lthread\n+ * this is a debug aid\n+ */\n+void lthread_set_funcname(const char *f)\n+{\n+\tstruct lthread *lt = THIS_LTHREAD;\n+\n+\tstrncpy(lt->funcname, f, sizeof(lt->funcname));\n+\tlt->funcname[sizeof(lt->funcname)-1] = 0;\n+}\ndiff --git a/examples/performance-thread/common/lthread.h b/examples/performance-thread/common/lthread.h\nnew file mode 100644\nindex 0000000..571c289\n--- /dev/null\n+++ b/examples/performance-thread/common/lthread.h\n@@ -0,0 +1,99 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+/*\n+ * Some portions of this software is derived from the\n+ * https://github.com/halayli/lthread which carrys the following license.\n+ *\n+ * Copyright (C) 2012, Hasan Alayli <halayli@gmail.com>\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions\n+ * are met:\n+ * 1. Redistributions of source code must retain the above copyright\n+ *    notice, this list of conditions and the following disclaimer.\n+ * 2. Redistributions in binary form must reproduce the above copyright\n+ *    notice, this list of conditions and the following disclaimer in the\n+ *    documentation and/or other materials provided with the distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND\n+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE\n+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n+ * SUCH DAMAGE.\n+ */\n+\n+#ifndef LTHREAD_H_\n+#define LTHREAD_H_\n+\n+#include <rte_per_lcore.h>\n+\n+#include \"lthread_api.h\"\n+#include \"lthread_diag.h\"\n+\n+struct lthread;\n+struct lthread_sched;\n+\n+/* function to be called when a context function returns */\n+typedef void (*lthread_exit_func) (struct lthread *);\n+\n+void _lthread_exit_handler(struct lthread *lt);\n+\n+void lthread_set_funcname(const char *f);\n+\n+void _lthread_sched_busy_sleep(struct lthread *lt, uint64_t nsecs);\n+\n+int _lthread_desched_sleep(struct lthread *lt);\n+\n+void _lthread_free(struct lthread *lt);\n+\n+struct lthread_sched *_lthread_sched_get(int lcore_id);\n+\n+struct lthread_stack *_stack_alloc(void);\n+\n+struct\n+lthread_sched *_lthread_sched_create(size_t stack_size);\n+\n+void\n+_lthread_init(struct lthread *lt,\n+\t      lthread_func_t fun, void *arg, lthread_exit_func exit_handler);\n+\n+void _lthread_set_stack(struct lthread *lt, void *stack, size_t stack_size);\n+\n+#endif\t\t\t\t/* LTHREAD_H_ */\ndiff --git a/examples/performance-thread/common/lthread_api.h b/examples/performance-thread/common/lthread_api.h\nnew file mode 100644\nindex 0000000..1330df0\n--- /dev/null\n+++ b/examples/performance-thread/common/lthread_api.h\n@@ -0,0 +1,822 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+/*\n+ * Some portions of this software may have been derived from the\n+ * https://github.com/halayli/lthread which carrys the following license.\n+ *\n+ * Copyright (C) 2012, Hasan Alayli <halayli@gmail.com>\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions\n+ * are met:\n+ * 1. Redistributions of source code must retain the above copyright\n+ *    notice, this list of conditions and the following disclaimer.\n+ * 2. Redistributions in binary form must reproduce the above copyright\n+ *    notice, this list of conditions and the following disclaimer in the\n+ *    documentation and/or other materials provided with the distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND\n+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE\n+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n+ * SUCH DAMAGE.\n+ */\n+\n+/**\n+ *  @file\n+ *\n+ *  This file contains the public API for the L-thread subsystem\n+ *\n+ *  The L_thread subsystem provides a simple cooperative scheduler to\n+ *  enable arbitrary functions to run as cooperative threads within a\n+ * single P-thread.\n+ *\n+ * The subsystem provides a P-thread like API that is intended to assist in\n+ * reuse of legacy code written for POSIX p_threads.\n+ *\n+ * The L-thread subsystem relies on cooperative multitasking, as such\n+ * an L-thread must possess frequent rescheduling points. Often these\n+ * rescheduling points are provided transparently when the application\n+ * invokes an L-thread API.\n+ *\n+ * In some applications it is possible that the program may enter a loop the\n+ * exit condition for which depends on the action of another thread or a\n+ * response from hardware. In such a case it is necessary to yield the thread\n+ * periodically in the loop body, to allow other threads an opportunity to\n+ * run. This can be done by inserting a call to lthread_yield() or\n+ * lthread_sleep(n) in the body of the loop.\n+ *\n+ * If the application makes expensive / blocking system calls or does other\n+ * work that would take an inordinate amount of time to complete, this will\n+ * stall the cooperative scheduler resulting in very poor performance.\n+ *\n+ * In such cases an L-thread can be migrated temporarily to another scheduler\n+ * running in a different P-thread on another core. When the expensive or\n+ * blocking operation is completed it can be migrated back to the original\n+ * scheduler.  In this way other threads can continue to run on the original\n+ * scheduler and will be completely unaffected by the blocking behaviour.\n+ * To migrate an L-thread to another scheduler the API lthread_set_affinity()\n+ * is provided.\n+ *\n+ * If L-threads that share data are running on the same core it is possible\n+ * to design programs where mutual exclusion mechanisms to protect shared data\n+ * can be avoided. This is due to the fact that the cooperative threads cannot\n+ * preempt each other.\n+ *\n+ * There are two cases where mutual exclusion mechanisms are necessary.\n+ *\n+ *  a) Where the L-threads sharing data are running on different cores.\n+ *  b) Where code must yield while updating data shared with another thread.\n+ *\n+ * The L-thread subsystem provides a set of mutex APIs to help with such\n+ * scenarios, however excessive reliance on on these will impact performance\n+ * and is best avoided if possible.\n+ *\n+ * L-threads can synchronise using a fast condition variable implementation\n+ * that supports signal and broadcast. An L-thread running on any core can\n+ * wait on a condition.\n+ *\n+ * L-threads can have L-thread local storage with an API modelled on either the\n+ * P-thread get/set specific API or using PER_LTHREAD macros modelled on the\n+ * RTE_PER_LCORE macros. Alternatively a simple user data pointer may be set\n+ * and retrieved from a thread.\n+ */\n+#ifndef LTHREAD_H\n+#define LTHREAD_H\n+\n+#include <stdint.h>\n+#include <sys/socket.h>\n+#include <fcntl.h>\n+#include <netinet/in.h>\n+\n+#include <rte_cycles.h>\n+\n+\n+struct lthread;\n+struct lthread_cond;\n+struct lthread_mutex;\n+\n+struct lthread_condattr;\n+struct lthread_mutexattr;\n+\n+typedef void (*lthread_func_t) (void *);\n+\n+/*\n+ * Define the size of stack for an lthread\n+ * Then this is the size that will be allocated on lthread creation\n+ * This is a fixed size and will not grow.\n+ */\n+#define LTHREAD_MAX_STACK_SIZE (1024*64)\n+\n+/**\n+ * Define the maximum number of TLS keys that can be created\n+ *\n+ */\n+#define LTHREAD_MAX_KEYS 1024\n+\n+/**\n+ * Define the maximum number of lcores that will support lthreads\n+ */\n+#define LTHREAD_MAX_LCORES RTE_MAX_LCORE\n+\n+/**\n+ * How many lthread objects to pre-allocate as the system grows\n+ * applies to lthreads + stacks, TLS, mutexs, cond vars.\n+ *\n+ * @see _lthread_alloc()\n+ * @see _cond_alloc()\n+ * @see _mutex_alloc()\n+ *\n+ */\n+#define LTHREAD_PREALLOC 100\n+\n+/**\n+ * Set the number of schedulers in the system.\n+ *\n+ * This function may optionally be called before starting schedulers.\n+ *\n+ * If the number of schedulers is not set, or set to 0 then each scheduler\n+ * will begin scheduling lthreads immediately it is started.\n+\n+ * If the number of schedulers is set to greater than 0, then each scheduler\n+ * will wait until all schedulers have started before beginning to schedule\n+ * lthreads.\n+ *\n+ * If an application wishes to have threads migrate between cores using\n+ * lthread_set_affinity(), or join threads running on other cores using\n+ * lthread_join(), then it is prudent to set the number of schedulers to ensure\n+ * that all schedulers are initialised beforehand.\n+ *\n+ * @param num\n+ *  the number of schedulers in the system\n+ * @return\n+ * the number of schedulers in the system\n+ */\n+int lthread_num_schedulers_set(int num);\n+\n+/**\n+ * Return the number of schedulers currently running\n+ * @return\n+ *  the number of schedulers in the system\n+ */\n+int lthread_active_schedulers(void);\n+\n+/**\n+  * Shutdown the specified scheduler\n+  *\n+  *  This function tells the specified scheduler to\n+  *  exit if/when there is no more work to do.\n+  *\n+  *  Note that although the scheduler will stop\n+  *  resources are not freed.\n+  *\n+  * @param lcore\n+  *\tThe lcore of the scheduler to shutdown\n+  *\n+  * @return\n+  *  none\n+  */\n+void lthread_scheduler_shutdown(unsigned lcore);\n+\n+/**\n+  * Shutdown all schedulers\n+  *\n+  *  This function tells all schedulers  including the current scheduler to\n+  *  exit if/when there is no more work to do.\n+  *\n+  *  Note that although the schedulers will stop\n+  *  resources are not freed.\n+  *\n+  * @return\n+  *  none\n+  */\n+void lthread_scheduler_shutdown_all(void);\n+\n+/**\n+  * Run the lthread scheduler\n+  *\n+  *  Runs the lthread scheduler.\n+  *  This function returns only if/when all lthreads have exited.\n+  *  This function must be the main loop of an EAL thread.\n+  *\n+  * @return\n+  *\t none\n+  */\n+\n+void lthread_run(void);\n+\n+/**\n+  * Create an lthread\n+  *\n+  *  Creates an lthread and places it in the ready queue on a particular\n+  *  lcore.\n+  *\n+  *  If no scheduler exists yet on the curret lcore then one is created.\n+  *\n+  * @param new_lt\n+  *  Pointer to an lthread pointer that will be initialized\n+  * @param lcore\n+  *  the lcore the thread should be started on or the current clore\n+  *    -1 the current lcore\n+  *    0 - LTHREAD_MAX_LCORES any other lcore\n+  * @param lthread_func\n+  *  Pointer to the function the for the thread to run\n+  * @param arg\n+  *  Pointer to args that will be passed to the thread\n+  *\n+  * @return\n+  *\t 0    success\n+  *\t EAGAIN  no resources available\n+  *\t EINVAL  NULL thread or function pointer, or lcore_id out of range\n+  */\n+int\n+lthread_create(struct lthread **new_lt,\n+\t\tint lcore, lthread_func_t func, void *arg);\n+\n+/**\n+  * Cancel an lthread\n+  *\n+  *  Cancels an lthread and causes it to be terminated\n+  *  If the lthread is detached it will be freed immediately\n+  *  otherwise its resources will not be released until it is joined.\n+  *\n+  * @param new_lt\n+  *  Pointer to an lthread that will be cancelled\n+  *\n+  * @return\n+  *\t 0    success\n+  *\t EINVAL  thread was NULL\n+  */\n+int lthread_cancel(struct lthread *lt);\n+\n+/**\n+  * Join an lthread\n+  *\n+  *  Joins the current thread with the specified lthread, and waits for that\n+  *  thread to exit.\n+  *  Passes an optional pointer to collect returned data.\n+  *\n+  * @param lt\n+  *  Pointer to the lthread to be joined\n+  * @param ptr\n+  *  Pointer to pointer to collect returned data\n+  *\n+0  * @return\n+  *  0    success\n+  *  EINVAL lthread could not be joined.\n+  */\n+int lthread_join(struct lthread *lt, void **ptr);\n+\n+/**\n+  * Detach an lthread\n+  *\n+  * Detaches the current thread\n+  * On exit a detached lthread will be freed immediately and will not wait\n+  * to be joined. The default state for a thread is not detached.\n+  *\n+  * @return\n+  *  none\n+  */\n+void lthread_detach(void);\n+\n+/**\n+  *  Exit an lthread\n+  *\n+  * Terminate the current thread, optionally return data.\n+  * The data may be collected by lthread_join()\n+  *\n+  * After calling this function the lthread will be suspended until it is\n+  * joined. After it is joined then its resources will be freed.\n+  *\n+  * @param ptr\n+  *  Pointer to pointer to data to be returned\n+  *\n+  * @return\n+  *  none\n+  */\n+void lthread_exit(void *val);\n+\n+/**\n+  * Cause the current lthread to sleep for n nanoseconds\n+  *\n+  * The current thread will be suspended until the specified time has elapsed\n+  * or has been exceeded.\n+  *\n+  * Execution will switch to the next lthread that is ready to run\n+  *\n+  * @param nsecs\n+  *  Number of nanoseconds to sleep\n+  *\n+  * @return\n+  *  none\n+  */\n+void lthread_sleep(uint64_t nsecs);\n+\n+/**\n+  * Cause the current lthread to sleep for n cpu clock ticks\n+  *\n+  *  The current thread will be suspended until the specified time has elapsed\n+  *  or has been exceeded.\n+  *\n+  *\t Execution will switch to the next lthread that is ready to run\n+  *\n+  * @param clks\n+  *  Number of clock ticks to sleep\n+  *\n+  * @return\n+  *  none\n+  */\n+void lthread_sleep_clks(uint64_t clks);\n+\n+/**\n+  * Yield the current lthread\n+  *\n+  *  The current thread will yield and execution will switch to the\n+  *  next lthread that is ready to run\n+  *\n+  * @return\n+  *  none\n+  */\n+void lthread_yield(void);\n+\n+/**\n+  * Migrate the current thread to another scheduler\n+  *\n+  *  This function migrates the current thread to another scheduler.\n+  *  Execution will switch to the next lthread that is ready to run on the\n+  *  current scheduler. The current thread will be resumed on the new scheduler.\n+  *\n+  * @param lcore\n+  *\tThe lcore to migrate to\n+  *\n+  * @return\n+  *  0   success we are now running on the specified core\n+  *  EINVAL the destination lcore was not valid\n+  */\n+int lthread_set_affinity(unsigned lcore);\n+\n+/**\n+  * Return the current lthread\n+  *\n+  *  Returns the current lthread\n+  *\n+  * @return\n+  *  pointer to the current lthread\n+  */\n+struct lthread\n+*lthread_current(void);\n+\n+/**\n+  * Associate user data with an lthread\n+  *\n+  *  This function sets a user data pointer in the current lthread\n+  *  The pointer can be retrieved with lthread_get_data()\n+  *  It is the users responsibility to allocate and free any data referenced\n+  *  by the user pointer.\n+  *\n+  * @param data\n+  *  pointer to user data\n+  *\n+  * @return\n+  *  none\n+  */\n+void lthread_set_data(void *data);\n+\n+/**\n+  * Get user data for the current lthread\n+  *\n+  *  This function returns a user data pointer for the current lthread\n+  *  The pointer must first be set with lthread_set_data()\n+  *  It is the users responsibility to allocate and free any data referenced\n+  *  by the user pointer.\n+  *\n+  * @return\n+  *  pointer to user data\n+  */\n+void\n+*lthread_get_data(void);\n+\n+struct lthread_key;\n+typedef void (*tls_destructor_func) (void *);\n+\n+/**\n+  * Create a key for lthread TLS\n+  *\n+  *  This function is modelled on pthread_key_create\n+  *  It creates a thread-specific data key visible to all lthreads on the\n+  *  current scheduler.\n+  *\n+  *  Key values may be used to locate thread-specific data.\n+  *  The same key value\tmay be used by different threads, the values bound\n+  *  to the key by\tlthread_setspecific() are maintained on\ta per-thread\n+  *  basis and persist for the life of the calling thread.\n+  *\n+  *  An\toptional destructor function may be associated with each key value.\n+  *  At\tthread exit, if\ta key value has\ta non-NULL destructor pointer, and the\n+  *  thread has\ta non-NULL value associated with the key, the function pointed\n+  *  to\tis called with the current associated value as its sole\targument.\n+  *\n+  * @param key\n+  *   Pointer to the key to be created\n+  * @param destructor\n+  *   Pointer to destructor function\n+  *\n+  * @return\n+  *  0 success\n+  *  EINVAL the key ptr was NULL\n+  *  EAGAIN no resources available\n+  */\n+int lthread_key_create(unsigned int *key, tls_destructor_func destructor);\n+\n+/**\n+  * Delete key for lthread TLS\n+  *\n+  *  This function is modelled on pthread_key_delete().\n+  *  It deletes a thread-specific data key previously returned by\n+  *  lthread_key_create().\n+  *  The thread-specific data values associated with the key need not be NULL\n+  *  at the time that lthread_key_delete is called.\n+  *  It is the responsibility of the application to free any application\n+  *  storage or perform any cleanup actions for data structures related to the\n+  *  deleted key. This cleanup can be done either before or after\n+  * lthread_key_delete is called.\n+  *\n+  * @param key\n+  *  The key to be deleted\n+  *\n+  * @return\n+  *  0 Success\n+  *  EINVAL the key was invalid\n+  */\n+int lthread_key_delete(unsigned int key);\n+\n+/**\n+  * Get lthread TLS\n+  *\n+  *  This function is modelled on pthread_get_specific().\n+  *  It returns the value currently bound to the specified key on behalf of the\n+  *  calling thread. Calling lthread_getspecific() with a key value not\n+  *  obtained from lthread_key_create() or after key has been deleted with\n+  *  lthread_key_delete() will result in undefined behaviour.\n+  *  lthread_getspecific() may be called from a thread-specific data destructor\n+  *  function.\n+  *\n+  * @param key\n+  *  The key for which data is requested\n+  *\n+  * @return\n+  *  Pointer to the thread specific data associated with that key\n+  *  or NULL if no data has been set.\n+  */\n+void\n+*lthread_getspecific(unsigned int key);\n+\n+/**\n+  * Set lthread TLS\n+  *\n+  *  This function is modelled on pthread_set_sepcific()\n+  *  It associates a thread-specific value with a key obtained via a previous\n+  *  call to lthread_key_create().\n+  *  Different threads may bind different values to the same key. These values\n+  *  are typically pointers to dynamically allocated memory that have been\n+  *  reserved by the calling thread. Calling lthread_setspecific with a key\n+  *  value not obtained from lthread_key_create or after the key has been\n+  *  deleted with lthread_key_delete will result in undefined behaviour.\n+  *\n+  * @param key\n+  *  The key for which data is to be set\n+  * @param key\n+  *  Pointer to the user data\n+  *\n+  * @return\n+  *  0 success\n+  *  EINVAL the key was invalid\n+  */\n+\n+int lthread_setspecific(unsigned int key, const void *value);\n+\n+/**\n+ * The macros below provide an alternative mechanism to access lthread local\n+ *  storage.\n+ *\n+ * The macros can be used to declare define and access per lthread local\n+ * storage in a similar way to the RTE_PER_LCORE macros which control storage\n+ * local to an lcore.\n+ *\n+ * Memory for per lthread variables declared in this way is allocated when the\n+ * lthread is created and a pointer to this memory is stored in the lthread.\n+ * The per lthread variables are accessed via the pointer + the offset of the\n+ * particular variable.\n+ *\n+ * The total size of per lthread storage, and the variable offsets are found by\n+ * defining the variables in a unique global memory section, the start and end\n+ * of which is known. This global memory section is used only in the\n+ * computation of the addresses of the lthread variables, and is never actually\n+ * used to store any data.\n+ *\n+ * Due to the fact that variables declared this way may be scattered across\n+ * many files, the start and end of the section and variable offsets are only\n+ * known after linking, thus the computation of section size and variable\n+ * addresses is performed at run time.\n+ *\n+ * These macros are primarily provided to aid porting of code that makes use\n+ * of the existing RTE_PER_LCORE macros. In principle it would be more efficient\n+ * to gather all lthread local variables into a single structure and\n+ * set/retrieve a pointer to that struct using the alternative\n+ * lthread_data_set/get APIs.\n+ *\n+ * These macros are mutually exclusive with the lthread_data_set/get APIs.\n+ * If you define storage using these macros then the lthread_data_set/get APIs\n+ * will not perform as expected, the lthread_data_set API does nothing, and the\n+ * lthread_data_get API returns the start of global section.\n+ *\n+ */\n+/* start and end of per lthread section */\n+extern char __start_per_lt;\n+extern char __stop_per_lt;\n+\n+\n+#define RTE_DEFINE_PER_LTHREAD(type, name)                      \\\n+__typeof__(type)__attribute((section(\"per_lt\"))) per_lt_##name\n+\n+/**\n+ * Macro to declare an extern per lthread variable \"var\" of type \"type\"\n+ */\n+#define RTE_DECLARE_PER_LTHREAD(type, name)                     \\\n+extern __typeof__(type)__attribute((section(\"per_lt\"))) per_lt_##name\n+\n+/**\n+ * Read/write the per-lcore variable value\n+ */\n+#define RTE_PER_LTHREAD(name) ((typeof(per_lt_##name) *)\\\n+((char *)lthread_get_data() +\\\n+((char *) &per_lt_##name - &__start_per_lt)))\n+\n+/**\n+  * Initialize a mutex\n+  *\n+  *  This function provides a mutual exclusion device, the need for which\n+  *  can normally be avoided in a cooperative multitasking environment.\n+  *  It is provided to aid porting of legacy code originally written for\n+  *   preemptive multitasking environments such as pthreads.\n+  *\n+  *  A mutex may be unlocked (not owned by any thread), or locked (owned by\n+  *  one thread).\n+  *\n+  *  A mutex can never be owned  by more than one thread simultaneously.\n+  *  A thread attempting to lock a mutex that is already locked by another\n+  *  thread is suspended until the owning thread unlocks the mutex.\n+  *\n+  *  lthread_mutex_init() initializes the mutex object pointed to by mutex\n+  *  Optional mutex attributes specified in mutexattr, are reserved for future\n+  *  use and are currently ignored.\n+  *\n+  *  If a thread calls lthread_mutex_lock() on the mutex, then if the mutex\n+  *  is currently unlocked,  it  becomes  locked  and  owned  by  the calling\n+  *  thread, and lthread_mutex_lock returns immediately. If the mutex is\n+  *  already locked by another thread, lthread_mutex_lock suspends the calling\n+  *  thread until the mutex is unlocked.\n+  *\n+  *  lthread_mutex_trylock behaves identically to rte_thread_mutex_lock, except\n+  *  that it does not block the calling  thread  if the mutex is already locked\n+  *  by another thread.\n+  *\n+  *  lthread_mutex_unlock() unlocks the specified mutex. The mutex is assumed\n+  *  to be locked and owned by the calling thread.\n+  *\n+  *  lthread_mutex_destroy() destroys a\tmutex object, freeing its resources.\n+  *  The mutex must be unlocked with nothing blocked on it before calling\n+  *  lthread_mutex_destroy.\n+  *\n+  * @param name\n+  *  Optional pointer to string describing the mutex\n+  * @param mutex\n+  *  Pointer to pointer to the mutex to be initialized\n+  * @param attribute\n+  *  Pointer to attribute - unused reserved\n+  *\n+  * @return\n+  *  0 success\n+  *  EINVAL mutex was not a valid pointer\n+  *  EAGAIN insufficient resources\n+  */\n+\n+int\n+lthread_mutex_init(char *name, struct lthread_mutex **mutex,\n+\t\t   const struct lthread_mutexattr *attr);\n+\n+/**\n+  * Destroy a mutex\n+  *\n+  *  This function destroys the specified mutex freeing its resources.\n+  *  The mutex must be unlocked before calling lthread_mutex_destroy.\n+  *\n+  * @see lthread_mutex_init()\n+  *\n+  * @param mutex\n+  *  Pointer to pointer to the mutex to be initialized\n+  *\n+  * @return\n+  *  0 success\n+  *  EINVAL mutex was not an initialized mutex\n+  *  EBUSY mutex was still in use\n+  */\n+int lthread_mutex_destroy(struct lthread_mutex *mutex);\n+\n+/**\n+  * Lock a mutex\n+  *\n+  *  This function attempts to lock a mutex.\n+  *  If a thread calls lthread_mutex_lock() on the mutex, then if the mutex\n+  *  is currently unlocked,  it  becomes  locked  and  owned  by  the calling\n+  *  thread, and lthread_mutex_lock returns immediately. If the mutex is\n+  *  already locked by another thread, lthread_mutex_lock suspends the calling\n+  *  thread until the mutex is unlocked.\n+  *\n+  * @see lthread_mutex_init()\n+  *\n+  * @param mutex\n+  *  Pointer to pointer to the mutex to be initialized\n+  *\n+  * @return\n+  *  0 success\n+  *  EINVAL mutex was not an initialized mutex\n+  *  EDEADLOCK the mutex was already owned by the calling thread\n+  */\n+\n+int lthread_mutex_lock(struct lthread_mutex *mutex);\n+\n+/**\n+  * Try to lock a mutex\n+  *\n+  *  This function attempts to lock a mutex.\n+  *  lthread_mutex_trylock behaves identically to rte_thread_mutex_lock, except\n+  *  that it does not block the calling  thread  if the mutex is already locked\n+  *  by another thread.\n+  *\n+  *\n+  * @see lthread_mutex_init()\n+  *\n+  * @param mutex\n+  *  Pointer to pointer to the mutex to be initialized\n+  *\n+  * @return\n+  * 0 success\n+  * EINVAL mutex was not an initialized mutex\n+  * EBUSY the mutex was already locked by another thread\n+  */\n+int lthread_mutex_trylock(struct lthread_mutex *mutex);\n+\n+/**\n+  * Unlock a mutex\n+  *\n+  * This function attempts to unlock the specified mutex. The mutex is assumed\n+  * to be locked and owned by the calling thread.\n+  *\n+  * The oldest of any threads blocked on the mutex is made ready and may\n+  * compete with any other running thread to gain the mutex, it fails it will\n+  *  be blocked again.\n+  *\n+  * @param mutex\n+  * Pointer to pointer to the mutex to be initialized\n+  *\n+  * @return\n+  *  0 mutex was unlocked\n+  *  EINVAL mutex was not an initialized mutex\n+  *  EPERM the mutex was not owned by the calling thread\n+  */\n+\n+int lthread_mutex_unlock(struct lthread_mutex *mutex);\n+\n+/**\n+  * Initialize a condition variable\n+  *\n+  *  This function initializes a condition variable.\n+  *\n+  *  Condition variables can be used to communicate changes in the state of data\n+  *  shared between threads.\n+  *\n+  * @see lthread_cond_wait()\n+  *\n+  * @param name\n+  *  Pointer to optional string describing the condition variable\n+  * @param c\n+  *  Pointer to pointer to the condition variable to be initialized\n+  * @param attr\n+  *  Pointer to optional attribute reserved for future use, currently ignored\n+  *\n+  * @return\n+  *  0 success\n+  *  EINVAL cond was not a valid pointer\n+  *  EAGAIN insufficient resources\n+  */\n+int\n+lthread_cond_init(char *name, struct lthread_cond **c,\n+\t\t  const struct lthread_condattr *attr);\n+\n+/**\n+  * Destroy a condition variable\n+  *\n+  *  This function destroys a condition variable that was created with\n+  *  lthread_cond_init() and releases its resources.\n+  *\n+  * @param cond\n+  *  Pointer to pointer to the condition variable to be destroyed\n+  *\n+  * @return\n+  *  0 Success\n+  *  EBUSY condition variable was still in use\n+  *  EINVAL was not an initialised condition variable\n+  */\n+int lthread_cond_destroy(struct lthread_cond *cond);\n+\n+/**\n+  * Wait on a condition variable\n+  *\n+  *  The function blocks the current thread waiting on the condition variable\n+  *  specified by cond. The waiting thread unblocks only after another thread\n+  *  calls lthread_cond_signal, or lthread_cond_broadcast, specifying the\n+  *  same condition variable.\n+  *\n+  * @param cond\n+  *  Pointer to pointer to the condition variable to be waited on\n+  *\n+  * @param reserved\n+  *  reserved for future use\n+  *\n+  * @return\n+  *  0 The condition was signalled ( Success )\n+  *  EINVAL was not a an initialised condition variable\n+  */\n+int lthread_cond_wait(struct lthread_cond *c, uint64_t reserved);\n+\n+/**\n+  * Signal a condition variable\n+  *\n+  *  The function unblocks one thread waiting for the condition variable cond.\n+  *  If no threads are waiting on cond, the rte_lthead_cond_signal() function\n+  *  has no effect.\n+  *\n+  * @param cond\n+  *  Pointer to pointer to the condition variable to be signalled\n+  *\n+  * @return\n+  *  0 The condition was signalled ( Success )\n+  *  EINVAL was not a an initialised condition variable\n+  */\n+int lthread_cond_signal(struct lthread_cond *c);\n+\n+/**\n+  * Broadcast a condition variable\n+  *\n+  *  The function unblocks all threads waiting for the condition variable cond.\n+  *  If no threads are waiting on cond, the rte_lthead_cond_broadcast()\n+  *  function has no effect.\n+  *\n+  * @param cond\n+  *  Pointer to pointer to the condition variable to be signalled\n+  *\n+  * @return\n+  *  0 The condition was signalled ( Success )\n+  *  EINVAL was not a an initialised condition variable\n+  */\n+int lthread_cond_broadcast(struct lthread_cond *c);\n+\n+#endif\t\t\t\t/* LTHREAD_H */\ndiff --git a/examples/performance-thread/common/lthread_cond.c b/examples/performance-thread/common/lthread_cond.c\nnew file mode 100644\nindex 0000000..9ff918d\n--- /dev/null\n+++ b/examples/performance-thread/common/lthread_cond.c\n@@ -0,0 +1,228 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+/*\n+ * Some portions of this software may have been derived from the\n+ * https://github.com/halayli/lthread which carrys the following license.\n+ *\n+ * Copyright (C) 2012, Hasan Alayli <halayli@gmail.com>\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions\n+ * are met:\n+ * 1. Redistributions of source code must retain the above copyright\n+ *    notice, this list of conditions and the following disclaimer.\n+ * 2. Redistributions in binary form must reproduce the above copyright\n+ *    notice, this list of conditions and the following disclaimer in the\n+ *    documentation and/or other materials provided with the distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND\n+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE\n+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n+ * SUCH DAMAGE.\n+ */\n+\n+#include <stdio.h>\n+#include <stdlib.h>\n+#include <string.h>\n+#include <stdint.h>\n+#include <stddef.h>\n+#include <limits.h>\n+#include <inttypes.h>\n+#include <unistd.h>\n+#include <pthread.h>\n+#include <fcntl.h>\n+#include <sys/time.h>\n+#include <sys/mman.h>\n+#include <errno.h>\n+\n+#include <rte_config.h>\n+#include <rte_log.h>\n+#include <rte_common.h>\n+\n+#include \"lthread_api.h\"\n+#include \"lthread_diag_api.h\"\n+#include \"lthread_diag.h\"\n+#include \"lthread_int.h\"\n+#include \"lthread_sched.h\"\n+#include \"lthread_queue.h\"\n+#include \"lthread_objcache.h\"\n+#include \"lthread_timer.h\"\n+#include \"lthread_mutex.h\"\n+#include \"lthread_cond.h\"\n+\n+/*\n+ * Create a condition variable\n+ */\n+int\n+lthread_cond_init(char *name, struct lthread_cond **cond,\n+\t\t  __rte_unused const struct lthread_condattr *attr)\n+{\n+\tstruct lthread_cond *c;\n+\n+\tif (cond == NULL)\n+\t\treturn POSIX_ERRNO(EINVAL);\n+\n+\t/* allocate a condition variable from cache */\n+\tc = _lthread_objcache_alloc((THIS_SCHED)->cond_cache);\n+\n+\tif (c == NULL)\n+\t\treturn POSIX_ERRNO(EAGAIN);\n+\n+\tc->blocked = _lthread_queue_create(\"blocked\");\n+\tif (c->blocked == NULL) {\n+\t\t_lthread_objcache_free((THIS_SCHED)->cond_cache, (void *)c);\n+\t\treturn POSIX_ERRNO(EAGAIN);\n+\t}\n+\n+\tif (name == NULL)\n+\t\tstrncpy(c->name, \"no name\", sizeof(c->name));\n+\telse\n+\t\tstrncpy(c->name, name, sizeof(c->name));\n+\n+\tc->root_sched = THIS_SCHED;\n+\n+\t(*cond) = c;\n+\tDIAG_CREATE_EVENT((*cond), LT_DIAG_COND_CREATE);\n+\treturn 0;\n+}\n+\n+/*\n+ * Destroy a condition variable\n+ */\n+int lthread_cond_destroy(struct lthread_cond *c)\n+{\n+\tif (c == NULL) {\n+\t\tDIAG_EVENT(c, LT_DIAG_COND_DESTROY, c, POSIX_ERRNO(EINVAL));\n+\t\treturn POSIX_ERRNO(EINVAL);\n+\t}\n+\n+\t/* try to free it */\n+\tif (_lthread_queue_destroy(c->blocked) < 0) {\n+\t\t/* queue in use */\n+\t\tDIAG_EVENT(c, LT_DIAG_COND_DESTROY, c, POSIX_ERRNO(EBUSY));\n+\t\treturn POSIX_ERRNO(EBUSY);\n+\t}\n+\n+\t/* okay free it */\n+\t_lthread_objcache_free(c->root_sched->cond_cache, c);\n+\tDIAG_EVENT(c, LT_DIAG_COND_DESTROY, c, 0);\n+\treturn 0;\n+}\n+\n+/*\n+ * Wait on a condition variable\n+ */\n+int lthread_cond_wait(struct lthread_cond *c, __rte_unused uint64_t reserved)\n+{\n+\tstruct lthread *lt = THIS_LTHREAD;\n+\n+\tif (c == NULL) {\n+\t\tDIAG_EVENT(c, LT_DIAG_COND_WAIT, c, POSIX_ERRNO(EINVAL));\n+\t\treturn POSIX_ERRNO(EINVAL);\n+\t}\n+\n+\n+\tDIAG_EVENT(c, LT_DIAG_COND_WAIT, c, 0);\n+\n+\t/* queue the current thread in the blocked queue\n+\t * this will be written when we return to the scheduler\n+\t * to ensure that the current thread context is saved\n+\t * before any signal could result in it being dequeued and\n+\t * resumed\n+\t */\n+\tlt->pending_wr_queue = c->blocked;\n+\t_suspend();\n+\n+\t/* the condition happened */\n+\treturn 0;\n+}\n+\n+/*\n+ * Signal a condition variable\n+ * attempt to resume any blocked thread\n+ */\n+int lthread_cond_signal(struct lthread_cond *c)\n+{\n+\tstruct lthread *lt;\n+\n+\tif (c == NULL) {\n+\t\tDIAG_EVENT(c, LT_DIAG_COND_SIGNAL, c, POSIX_ERRNO(EINVAL));\n+\t\treturn POSIX_ERRNO(EINVAL);\n+\t}\n+\n+\tlt = _lthread_queue_remove(c->blocked);\n+\n+\tif (lt != NULL) {\n+\t\t/* okay wake up this thread */\n+\t\tDIAG_EVENT(c, LT_DIAG_COND_SIGNAL, c, lt);\n+\t\t_ready_queue_insert((struct lthread_sched *)lt->sched, lt);\n+\t}\n+\treturn 0;\n+}\n+\n+/*\n+ * Broadcast a condition variable\n+ */\n+int lthread_cond_broadcast(struct lthread_cond *c)\n+{\n+\tstruct lthread *lt;\n+\n+\tif (c == NULL) {\n+\t\tDIAG_EVENT(c, LT_DIAG_COND_BROADCAST, c, POSIX_ERRNO(EINVAL));\n+\t\treturn POSIX_ERRNO(EINVAL);\n+\t}\n+\n+\tDIAG_EVENT(c, LT_DIAG_COND_BROADCAST, c, 0);\n+\tdo {\n+\t\t/* drain the queue waking everybody */\n+\t\tlt = _lthread_queue_remove(c->blocked);\n+\n+\t\tif (lt != NULL) {\n+\t\t\tDIAG_EVENT(c, LT_DIAG_COND_BROADCAST, c, lt);\n+\t\t\t/* wake up */\n+\t\t\t_ready_queue_insert((struct lthread_sched *)lt->sched,\n+\t\t\t\t\t    lt);\n+\t\t}\n+\t} while (!_lthread_queue_empty(c->blocked));\n+\t_reschedule();\n+\tDIAG_EVENT(c, LT_DIAG_COND_BROADCAST, c, 0);\n+\treturn 0;\n+}\ndiff --git a/examples/performance-thread/common/lthread_cond.h b/examples/performance-thread/common/lthread_cond.h\nnew file mode 100644\nindex 0000000..9341df3\n--- /dev/null\n+++ b/examples/performance-thread/common/lthread_cond.h\n@@ -0,0 +1,77 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+/*\n+ * Some portions of this software may have been derived from the\n+ * https://github.com/halayli/lthread which carrys the following license.\n+ *\n+ * Copyright (C) 2012, Hasan Alayli <halayli@gmail.com>\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions\n+ * are met:\n+ * 1. Redistributions of source code must retain the above copyright\n+ *    notice, this list of conditions and the following disclaimer.\n+ * 2. Redistributions in binary form must reproduce the above copyright\n+ *    notice, this list of conditions and the following disclaimer in the\n+ *    documentation and/or other materials provided with the distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND\n+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE\n+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n+ * SUCH DAMAGE.\n+ */\n+\n+#ifndef LTHREAD_COND_H_\n+#define LTHREAD_COND_H_\n+\n+#include \"lthread_queue.h\"\n+\n+#define MAX_COND_NAME_SIZE 64\n+\n+struct lthread_cond {\n+\tstruct lthread_queue *blocked;\n+\tstruct lthread_sched *root_sched;\n+\tint count;\n+\tchar name[MAX_COND_NAME_SIZE];\n+\tuint64_t diag_ref;\t/* optional ref to user diag data */\n+} __rte_cache_aligned;\n+\n+#endif\t\t\t\t/* LTHREAD_COND_H_ */\ndiff --git a/examples/performance-thread/common/lthread_diag.c b/examples/performance-thread/common/lthread_diag.c\nnew file mode 100644\nindex 0000000..a62ee5f\n--- /dev/null\n+++ b/examples/performance-thread/common/lthread_diag.c\n@@ -0,0 +1,314 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#include <rte_config.h>\n+#include <rte_log.h>\n+#include <rte_common.h>\n+\n+#include \"lthread_diag.h\"\n+#include \"lthread_queue.h\"\n+#include \"lthread_pool.h\"\n+#include \"lthread_objcache.h\"\n+#include \"lthread_sched.h\"\n+#include \"lthread_diag_api.h\"\n+\n+\n+/* dummy ref value of default diagnostic callback */\n+static uint64_t dummy_ref;\n+\n+#define DIAG_SCHED_STATS_FORMAT \\\n+\"core %d\\n%33s %12s %12s %12s %12s\\n\"\n+\n+#define DIAG_CACHE_STATS_FORMAT \\\n+\"%20s %12lu %12lu %12lu %12lu %12lu\\n\"\n+\n+#define DIAG_QUEUE_STATS_FORMAT \\\n+\"%20s %12lu %12lu %12lu\\n\"\n+\n+\n+/*\n+ * texts used in diagnostic events,\n+ * corresponding diagnostic mask bit positions are given as comment\n+ */\n+const char *diag_event_text[] = {\n+\t\"LTHREAD_CREATE     \",\t/* 00 */\n+\t\"LTHREAD_EXIT       \",\t/* 01 */\n+\t\"LTHREAD_JOIN       \",\t/* 02 */\n+\t\"LTHREAD_CANCEL     \",\t/* 03 */\n+\t\"LTHREAD_DETACH     \",\t/* 04 */\n+\t\"LTHREAD_FREE       \",\t/* 05 */\n+\t\"LTHREAD_SUSPENDED  \",\t/* 06 */\n+\t\"LTHREAD_YIELD      \",\t/* 07 */\n+\t\"LTHREAD_RESCHEDULED\",\t/* 08 */\n+\t\"LTHREAD_SLEEP      \",\t/* 09 */\n+\t\"LTHREAD_RESUMED    \",\t/* 10 */\n+\t\"LTHREAD_AFFINITY   \",\t/* 11 */\n+\t\"LTHREAD_TMR_START  \",\t/* 12 */\n+\t\"LTHREAD_TMR_DELETE \",\t/* 13 */\n+\t\"LTHREAD_TMR_EXPIRED\",\t/* 14 */\n+\t\"COND_CREATE        \",\t/* 15 */\n+\t\"COND_DESTROY       \",\t/* 16 */\n+\t\"COND_WAIT          \",\t/* 17 */\n+\t\"COND_SIGNAL        \",\t/* 18 */\n+\t\"COND_BROADCAST     \",\t/* 19 */\n+\t\"MUTEX_CREATE       \",\t/* 20 */\n+\t\"MUTEX_DESTROY      \",\t/* 21 */\n+\t\"MUTEX_LOCK         \",\t/* 22 */\n+\t\"MUTEX_TRYLOCK      \",\t/* 23 */\n+\t\"MUTEX_BLOCKED      \",\t/* 24 */\n+\t\"MUTEX_UNLOCKED     \",\t/* 25 */\n+\t\"SCHED_CREATE       \",\t/* 26 */\n+\t\"SCHED_SHUTDOWN     \"\t/* 27 */\n+};\n+\n+/*\n+ * enable diagnostics\n+ */\n+void lthread_diagnostic_enable(DIAG_USED diag_callback cb,\n+\t\t\t\tDIAG_USED uint64_t mask)\n+{\n+#if LTHREAD_DIAG\n+\tdiag_cb = cb;\n+\tdiag_mask = mask;\n+#else\n+\tRTE_LOG(INFO, LTHREAD,\n+\t\t\"LTHREAD_DIAG is not set, see lthread_diag_api.h\\n\");\n+#endif\n+}\n+\n+/*\n+ * set diagnostic ,ask\n+ */\n+void lthread_diagnostic_set_mask(DIAG_USED uint64_t mask)\n+{\n+#if LTHREAD_DIAG\n+\tdiag_mask = mask;\n+#else\n+\tRTE_LOG(INFO, LTHREAD,\n+\t\t\"LTHREAD_DIAG is not set, see lthread_diag_api.h\\n\");\n+#endif\n+}\n+\n+\n+/*\n+ * Check consistency of the scheduler stats\n+ * Only sensible run after the schedulers are stopped\n+ * Count the number of objects lying in caches and queues\n+ * and available in the qnode pool.\n+ * This should be equal to the total capacity of all\n+ * qnode pools.\n+ */\n+void\n+_sched_stats_consistency_check(void);\n+void\n+_sched_stats_consistency_check(void)\n+{\n+#if LTHREAD_DIAG\n+\tint i;\n+\tstruct lthread_sched *sched;\n+\tuint64_t count = 0;\n+\tuint64_t capacity = 0;\n+\n+\tfor (i = 0; i < LTHREAD_MAX_LCORES; i++) {\n+\t\tsched = schedcore[i];\n+\t\tif (sched == NULL)\n+\t\t\tcontinue;\n+\n+\t\t/* each of these queues consumes a stub node */\n+\t\tcount += 8;\n+\t\tcount += DIAG_COUNT(sched->ready, size);\n+\t\tcount += DIAG_COUNT(sched->pready, size);\n+\t\tcount += DIAG_COUNT(sched->lthread_cache, available);\n+\t\tcount += DIAG_COUNT(sched->stack_cache, available);\n+\t\tcount += DIAG_COUNT(sched->tls_cache, available);\n+\t\tcount += DIAG_COUNT(sched->per_lthread_cache, available);\n+\t\tcount += DIAG_COUNT(sched->cond_cache, available);\n+\t\tcount += DIAG_COUNT(sched->mutex_cache, available);\n+\n+\t\t/* the node pool does not consume a stub node */\n+\t\tif (sched->qnode_pool->fast_alloc != NULL)\n+\t\t\tcount++;\n+\t\tcount += DIAG_COUNT(sched->qnode_pool, available);\n+\n+\t\tcapacity += DIAG_COUNT(sched->qnode_pool, capacity);\n+\t}\n+\tif (count != capacity) {\n+\t\tRTE_LOG(CRIT, LTHREAD,\n+\t\t\t\"Scheduler caches are inconsistent\\n\");\n+\t} else {\n+\t\tRTE_LOG(INFO, LTHREAD,\n+\t\t\t\"Scheduler caches are ok\\n\");\n+\t}\n+#endif\n+}\n+\n+/*\n+ * Display node pool stats\n+ */\n+static inline void\n+_qnode_pool_display(DIAG_USED struct qnode_pool *p)\n+{\n+#if LTHREAD_DIAG\n+\n+\tprintf(DIAG_CACHE_STATS_FORMAT,\n+\t\t\tp->name,\n+\t\t\tDIAG_COUNT(p, rd),\n+\t\t\tDIAG_COUNT(p, wr),\n+\t\t\tDIAG_COUNT(p, available),\n+\t\t\tDIAG_COUNT(p, prealloc),\n+\t\t\tDIAG_COUNT(p, capacity));\n+\tfflush(stdout);\n+#endif\n+}\n+\n+\n+/*\n+ * Display queue stats\n+ */\n+static inline void\n+_lthread_queue_display(DIAG_USED struct lthread_queue *q)\n+{\n+#if LTHREAD_DIAG\n+\n+\tprintf(DIAG_QUEUE_STATS_FORMAT,\n+\t\t\tq->name,\n+\t\t\tDIAG_COUNT(q, rd),\n+\t\t\tDIAG_COUNT(q, wr),\n+\t\t\tDIAG_COUNT(q, size));\n+\tfflush(stdout);\n+#endif\n+}\n+\n+/*\n+ * Display objcache stats\n+ */\n+static inline void\n+_objcache_display(DIAG_USED struct lthread_objcache *c)\n+{\n+#if LTHREAD_DIAG\n+\n+\tprintf(DIAG_CACHE_STATS_FORMAT,\n+\t\t\tc->name,\n+\t\t\tDIAG_COUNT(c, rd),\n+\t\t\tDIAG_COUNT(c, wr),\n+\t\t\tDIAG_COUNT(c, available),\n+\t\t\tDIAG_COUNT(c, prealloc),\n+\t\t\tDIAG_COUNT(c, capacity));\n+#if DISPLAY_OBCACHE_QUEUES\n+\t_lthread_queue_display(c->q);\n+#endif\n+\tfflush(stdout);\n+#endif\n+}\n+\n+\n+/*\n+ * Display sched stats\n+ */\n+void\n+lthread_sched_stats_display(void)\n+{\n+#if LTHREAD_DIAG\n+\tint i;\n+\tstruct lthread_sched *sched;\n+\n+\tfor (i = 0; i < LTHREAD_MAX_LCORES; i++) {\n+\t\tsched = schedcore[i];\n+\t\tif (sched != NULL) {\n+\t\t\tprintf(DIAG_SCHED_STATS_FORMAT,\n+\t\t\t\t\tsched->lcore_id,\n+\t\t\t\t\t\"rd\",\n+\t\t\t\t\t\"wr\",\n+\t\t\t\t\t\"present\",\n+\t\t\t\t\t\"nb preallocs\",\n+\t\t\t\t\t\"capacity\");\n+\t\t\t_lthread_queue_display(sched->ready);\n+\t\t\t_lthread_queue_display(sched->pready);\n+\t\t\t_qnode_pool_display(sched->qnode_pool);\n+\t\t\t_objcache_display(sched->lthread_cache);\n+\t\t\t_objcache_display(sched->stack_cache);\n+\t\t\t_objcache_display(sched->tls_cache);\n+\t\t\t_objcache_display(sched->per_lthread_cache);\n+\t\t\t_objcache_display(sched->cond_cache);\n+\t\t\t_objcache_display(sched->mutex_cache);\n+\t\tfflush(stdout);\n+\t\t}\n+\t}\n+\t_sched_stats_consistency_check();\n+#else\n+\tRTE_LOG(INFO, LTHREAD,\n+\t\t\"lthread diagnostics disabled\\n\"\n+\t\t\"hint - set LTHREAD_DIAG in lthread_diag_api.h\\n\");\n+#endif\n+}\n+\n+/*\n+ * Defafult diagnostic callback\n+ */\n+static uint64_t\n+_lthread_diag_default_cb(uint64_t time, struct lthread *lt, int diag_event,\n+\t\tuint64_t diag_ref, const char *text, uint64_t p1, uint64_t p2)\n+{\n+\tuint64_t _p2;\n+\n+\tswitch (diag_event) {\n+\tcase LT_DIAG_LTHREAD_CREATE:\n+\tcase LT_DIAG_MUTEX_CREATE:\n+\tcase LT_DIAG_COND_CREATE:\n+\t\t_p2 = dummy_ref;\n+\t\tbreak;\n+\tdefault:\n+\t\t_p2 = p2;\n+\t\tbreak;\n+\t}\n+\n+\tprintf(\"%\"PRIu64\" %8.8lx %8.8lx %s %8.8lx %8.8lx\\n\",\n+\t\ttime,\n+\t\t(uint64_t) lt,\n+\t\tdiag_ref,\n+\t\ttext,\n+\t\tp1,\n+\t\t_p2);\n+\n+\treturn dummy_ref++;\n+}\n+\n+/*\n+ * plug in default diag callback with mask off\n+ */\n+void _lthread_diag_ctor(void)__attribute__((constructor));\n+void _lthread_diag_ctor(void)\n+{\n+\tdiag_cb = _lthread_diag_default_cb;\n+\tdiag_mask = 0;\n+}\ndiff --git a/examples/performance-thread/common/lthread_diag.h b/examples/performance-thread/common/lthread_diag.h\nnew file mode 100644\nindex 0000000..7b2f35b\n--- /dev/null\n+++ b/examples/performance-thread/common/lthread_diag.h\n@@ -0,0 +1,129 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#ifndef LTHREAD_DIAG_H_\n+#define LTHREAD_DIAG_H_\n+\n+#include <stdint.h>\n+#include <inttypes.h>\n+\n+#include <rte_log.h>\n+#include <rte_common.h>\n+\n+#include \"lthread_api.h\"\n+#include \"lthread_diag_api.h\"\n+\n+extern diag_callback diag_cb;\n+\n+extern const char *diag_event_text[];\n+extern uint64_t diag_mask;\n+\n+/* max size of name strings */\n+#define LT_MAX_NAME_SIZE 64\n+\n+#if LTHREAD_DIAG\n+\n+/*\n+ * Generate a diagnostic trace or event in the case where an object is created.\n+ *\n+ * The value returned by the callback is stored in the object.\n+ *\n+ * @ param obj\n+ *  pointer to the object that was created\n+ * @ param ev\n+ *  the event code\n+ *\n+ */\n+#define DIAG_CREATE_EVENT(obj, ev) do {\t\t\t\t\t\\\n+\tstruct lthread *ct = RTE_PER_LCORE(this_sched)->current_lthread;\\\n+\tif ((BIT(ev) & diag_mask) && (ev < LT_DIAG_EVENT_MAX)) {\t\\\n+\t\t(obj)->diag_ref = (diag_cb)(rte_rdtsc(),\t\t\\\n+\t\t\t\t\tct,\t\t\t\t\\\n+\t\t\t\t\t(ev),\t\t\t\t\\\n+\t\t\t\t\t0,\t\t\t\t\\\n+\t\t\t\t\tdiag_event_text[(ev)],\t\t\\\n+\t\t\t\t\t(uint64_t)obj,\t\t\t\\\n+\t\t\t\t\t0);\t\t\t\t\\\n+\t}\t\t\t\t\t\t\t\t\\\n+} while (0)\n+\n+/*\n+ * Generate a diagnostic trace event.\n+ *\n+ * @ param obj\n+ *  pointer to the lthread, cond or mutex object\n+ * @ param ev\n+ *  the event code\n+ * @ param p1\n+ *  object specific value ( see lthread_diag_api.h )\n+ * @ param p2\n+ *  object specific value ( see lthread_diag_api.h )\n+ */\n+#define DIAG_EVENT(obj, ev, p1, p2) do {\t\t\t\t\\\n+\tstruct lthread *ct = RTE_PER_LCORE(this_sched)->current_lthread;\\\n+\tif ((BIT(ev) & diag_mask) && (ev < LT_DIAG_EVENT_MAX)) {\t\\\n+\t\t(diag_cb)(rte_rdtsc(),\t\t\t\t\t\\\n+\t\t\t\tct,\t\t\t\t\t\\\n+\t\t\t\tev,\t\t\t\t\t\\\n+\t\t\t\t(obj)->diag_ref,\t\t\t\\\n+\t\t\t\tdiag_event_text[(ev)],\t\t\t\\\n+\t\t\t\t(uint64_t)(p1),\t\t\t\t\\\n+\t\t\t\t(uint64_t)(p2));\t\t\t\\\n+\t}\t\t\t\t\t\t\t\t\\\n+} while (0)\n+\n+#define DIAG_COUNT_DEFINE(x) rte_atomic64_t count_##x\n+#define DIAG_COUNT_INIT(o, x) rte_atomic64_init(&((o)->count_##x))\n+#define DIAG_COUNT_INC(o, x) rte_atomic64_inc(&((o)->count_##x))\n+#define DIAG_COUNT_DEC(o, x) rte_atomic64_dec(&((o)->count_##x))\n+#define DIAG_COUNT(o, x) rte_atomic64_read(&((o)->count_##x))\n+\n+#define DIAG_USED\n+\n+#else\n+\n+/* no diagnostics configured */\n+\n+#define DIAG_CREATE_EVENT(obj, ev)\n+#define DIAG_EVENT(obj, ev, p1, p)\n+\n+#define DIAG_COUNT_DEFINE(x)\n+#define DIAG_COUNT_INIT(o, x) do {} while (0)\n+#define DIAG_COUNT_INC(o, x) do {} while (0)\n+#define DIAG_COUNT_DEC(o, x) do {} while (0)\n+#define DIAG_COUNT(o, x) 0\n+\n+#define DIAG_USED __rte_unused\n+\n+#endif\t\t\t\t/* LTHREAD_DIAG */\n+#endif\t\t\t\t/* LTHREAD_DIAG_H_ */\ndiff --git a/examples/performance-thread/common/lthread_diag_api.h b/examples/performance-thread/common/lthread_diag_api.h\nnew file mode 100644\nindex 0000000..f8e64a5\n--- /dev/null\n+++ b/examples/performance-thread/common/lthread_diag_api.h\n@@ -0,0 +1,295 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+#ifndef LTHREAD_DIAG_API_H_\n+#define LTHREAD_DIAG_API_H_\n+\n+#include <stdint.h>\n+#include <inttypes.h>\n+\n+/*\n+ * Enable diagnostics\n+ * 0 = conditionally compiled out\n+ * 1 = compiled in and maskable at run time, see below for details\n+ */\n+#define LTHREAD_DIAG 0\n+\n+/**\n+ * lthread diagnostic interface\n+ *\n+ * If enabled via configuration file option ( tbd ) the lthread subsystem\n+ * can generate selected trace information, either RTE_LOG  (INFO) messages,\n+ * or else invoke a user supplied callback function when any of the events\n+ * listed below occur.\n+ *\n+ * Reporting of events can be selectively masked, the bit position in the\n+ * mask is determined by the corresponding event identifier listed below.\n+ *\n+ * Diagnostics are enabled by registering the callback function and mask\n+ * using the API lthread_diagnostic_enable().\n+ *\n+ * Various interesting parameters are passed to the callback, including the\n+ * time in cpu clks, the lthread id, the diagnostic event id, a user ref value,\n+ * event text string, object being traced, and two context dependent parameters\n+ * (p1 and p2). The meaning of the two parameters p1 and p2 depends on\n+ * the specific event.\n+ *\n+ * The events LT_DIAG_LTHREAD_CREATE, LT_DIAG_MUTEX_CREATE and\n+ * LT_DIAG_COND_CREATE are implicitly enabled if the event mask includes any of\n+ * the LT_DIAG_LTHREAD_XXX, LT_DIAG_MUTEX_XXX or LT_DIAG_COND_XXX events\n+ * respectively.\n+ *\n+ * These create events may also be included in the mask discreetly if it is\n+ * desired to monitor only create events.\n+ *\n+ * @param  time\n+ *  The time in cpu clks at which the event occurred\n+ *\n+ * @param  lthread\n+ *  The current lthread\n+ *\n+ * @param diag_event\n+ *  The diagnostic event id (bit position in the mask)\n+ *\n+ * @param  diag_ref\n+ *\n+ * For LT_DIAG_LTHREAD_CREATE, LT_DIAG_MUTEX_CREATE or LT_DIAG_COND_CREATE\n+ * this parameter is not used and set to 0.\n+ * All other events diag_ref contains the user ref value returned by the\n+ * callback function when lthread, mutex, or cond are created.\n+ *\n+ * @param p1\n+ *  see below\n+ *\n+ * @param p1\n+ *  see below\n+ *\n+ * @returns\n+ * For LT_DIAG_LTHREAD_CREATE, LT_DIAG_MUTEX_CREATE or LT_DIAG_COND_CREATE\n+ * expects a user diagnostic ref value that will be saved in the lthread, mutex\n+ * or cond var.\n+ *\n+ * For all other events return value is ignored.\n+ *\n+ *\tLT_DIAG_SCHED_CREATE - Invoked when a scheduler is created\n+ *\t\tp1 = the scheduler that was created\n+ *\t\tp2 = not used\n+ *\t\treturn value will be ignored\n+ *\n+ *\tLT_DIAG_SCHED_SHUTDOWN - Invoked when a shutdown request is received\n+ *\t\tp1 = the scheduler to be shutdown\n+ *\t\tp2 = not used\n+ *\t\treturn value will be ignored\n+ *\n+ *\tLT_DIAG_LTHREAD_CREATE - Invoked when a thread is created\n+ *\t\tp1 = the lthread that was created\n+ *\t\tp2 = not used\n+ *\t\treturn value will be stored in the lthread\n+ *\n+ *\tLT_DIAG_LTHREAD_EXIT - Invoked when a lthread exits\n+ *\t\tp2 = 0 if the thread was already joined\n+ *\t\tp2 = 1 if the thread was not already joined\n+ *\t\treturn val ignored\n+ *\n+ *\tLT_DIAG_LTHREAD_JOIN - Invoked when a lthread exits\n+ *\t\tp1 = the lthread that is being joined\n+ *\t\tp2 = 0 if the thread was already exited\n+ *\t\tp2 = 1 if the thread was not already exited\n+ *\t\treturn val ignored\n+ *\n+ *\tLT_DIAG_LTHREAD_CANCELLED - Invoked when an lthread is cancelled\n+ *\t\tp1 = not used\n+ *\t\tp2 = not used\n+ *\t\treturn val ignored\n+ *\n+ *\tLT_DIAG_LTHREAD_DETACH - Invoked when an lthread is detached\n+ *\t\tp1 = not used\n+ *\t\tp2 = not used\n+ *\t\treturn val ignored\n+ *\n+ *\tLT_DIAG_LTHREAD_FREE - Invoked when an lthread is freed\n+ *\t\tp1 = not used\n+ *\t\tp2 = not used\n+ *\t\treturn val ignored\n+ *\n+ *\tLT_DIAG_LTHREAD_SUSPENDED - Invoked when an lthread is suspended\n+ *\t\tp1 = not used\n+ *\t\tp2 = not used\n+ *\t\treturn val ignored\n+ *\n+ *\tLT_DIAG_LTHREAD_YIELD - Invoked when an lthread explicitly yields\n+ *\t\tp1 = not used\n+ *\t\tp2 = not used\n+ *\t\treturn val ignored\n+ *\n+ *\tLT_DIAG_LTHREAD_RESCHEDULED - Invoked when an lthread is rescheduled\n+ *\t\tp1 = not used\n+ *\t\tp2 = not used\n+ *\t\treturn val ignored\n+ *\n+ *\tLT_DIAG_LTHREAD_RESUMED - Invoked when an lthread is resumed\n+ *\t\tp1 = not used\n+ *\t\tp2 = not used\n+ *\t\treturn val ignored\n+ *\n+ *\tLT_DIAG_LTHREAD_AFFINITY - Invoked when an lthread is affinitised\n+ *\t\tp1 = the destination lcore_id\n+ *\t\tp2 = not used\n+ *\t\treturn val ignored\n+ *\n+ *\tLT_DIAG_LTHREAD_TMR_START - Invoked when an lthread starts a timer\n+ *\t\tp1 = address of timer node\n+ *\t\tp2 = the timeout value\n+ *\t\treturn val ignored\n+ *\n+ *\tLT_DIAG_LTHREAD_TMR_DELETE - Invoked when an lthread deletes a timer\n+ *\t\tp1 = address of the timer node\n+ *\t\tp2 = 0 the timer and the was successfully deleted\n+ *\t\tp2 = not usee\n+ *\t\treturn val ignored\n+ *\n+ *\tLT_DIAG_LTHREAD_TMR_EXPIRED - Invoked when an lthread timer expires\n+ *\t\tp1 = address of scheduler the timer expired on\n+ *\t\tp2 = the thread associated with the timer\n+ *\t\treturn val ignored\n+ *\n+ *\tLT_DIAG_COND_CREATE - Invoked when a condition variable is created\n+ *\t\tp1 = address of cond var that was created\n+ *\t\tp2 = not used\n+ *\t\treturn diag ref value will be stored in the condition variable\n+ *\n+ *\tLT_DIAG_COND_DESTROY - Invoked when a condition variable is destroyed\n+ *\t\tp1 = not used\n+ *\t\tp2 = not used\n+ *\t\treturn val ignored\n+ *\n+ *\tLT_DIAG_COND_WAIT - Invoked when an lthread waits on a cond var\n+ *\t\tp1 = the address of the condition variable\n+ *\t\tp2 = not used\n+ *\t\treturn val ignored\n+ *\n+ *\tLT_DIAG_COND_SIGNAL - Invoked when an lthread signals a cond var\n+ *\t\tp1 = the address of the cond var\n+ *\t\tp2 = the lthread that was signalled, or error code\n+ *\t\treturn val ignored\n+ *\n+ *\tLT_DIAG_COND_BROADCAST - Invoked when an lthread broadcasts a cond var\n+ *\t\tp1 = the address of the condition variable\n+ *\t\tp2 = the lthread(s) that are signalled, or error code\n+ *\n+ *\tLT_DIAG_MUTEX_CREATE - Invoked when a mutex is created\n+ *\t\tp1 = address of muex\n+ *\t\tp2 = not used\n+ *\t\treturn diag ref value will be stored in the mutex variable\n+ *\n+ *\tLT_DIAG_MUTEX_DESTROY - Invoked when a mutex is destroyed\n+ *\t\tp1 = address of mutex\n+ *\t\tp2 = not used\n+ *\t\treturn val ignored\n+ *\n+ *\tLT_DIAG_MUTEX_LOCK - Invoked when a mutex lock is obtained\n+ *\t\tp1 = address of mutex\n+ *\t\tp2 = function return value\n+ *\t\treturn val ignored\n+ *\n+ *\tLT_DIAG_MUTEX_BLOCKED  - Invoked when an lthread blocks on a mutex\n+ *\t\tp1 = address of mutex\n+ *\t\tp2 = function return value\n+ *\t\treturn val ignored\n+ *\n+ *\tLT_DIAG_MUTEX_TRYLOCK - Invoked when a mutex try lock is attempted\n+ *\t\tp1 = address of mutex\n+ *\t\tp2 = the function return value\n+ *\t\treturn val ignored\n+ *\n+ *\tLT_DIAG_MUTEX_UNLOCKED - Invoked when a mutex is unlocked\n+ *\t\tp1 = address of mutex\n+ *\t\tp2 = the thread that was unlocked, or error code\n+ *\t\treturn val ignored\n+ */\n+typedef uint64_t (*diag_callback) (uint64_t time, struct lthread *lt,\n+\t\t\t\t  int diag_event, uint64_t diag_ref,\n+\t\t\t\tconst char *text, uint64_t p1, uint64_t p2);\n+\n+void lthread_diagnostic_enable(diag_callback cb, uint64_t diag_mask);\n+\n+void lthread_diagnostic_set_mask(uint64_t mask);\n+\n+/*\n+ * lthread diagnostic callback\n+ */\n+enum lthread_diag_ev {\n+\t/* bits 0 - 14 lthread flag group */\n+\tLT_DIAG_LTHREAD_CREATE,\t\t/* 00 mask 0x00000001 */\n+\tLT_DIAG_LTHREAD_EXIT,\t\t/* 01 mask 0x00000002 */\n+\tLT_DIAG_LTHREAD_JOIN,\t\t/* 02 mask 0x00000004 */\n+\tLT_DIAG_LTHREAD_CANCEL,\t\t/* 03 mask 0x00000008 */\n+\tLT_DIAG_LTHREAD_DETACH,\t\t/* 04 mask 0x00000010 */\n+\tLT_DIAG_LTHREAD_FREE,\t\t/* 05 mask 0x00000020 */\n+\tLT_DIAG_LTHREAD_SUSPENDED,\t/* 06 mask 0x00000040 */\n+\tLT_DIAG_LTHREAD_YIELD,\t\t/* 07 mask 0x00000080 */\n+\tLT_DIAG_LTHREAD_RESCHEDULED,\t/* 08 mask 0x00000100 */\n+\tLT_DIAG_LTHREAD_SLEEP,\t\t/* 09 mask 0x00000200 */\n+\tLT_DIAG_LTHREAD_RESUMED,\t/* 10 mask 0x00000400 */\n+\tLT_DIAG_LTHREAD_AFFINITY,\t/* 11 mask 0x00000800 */\n+\tLT_DIAG_LTHREAD_TMR_START,\t/* 12 mask 0x00001000 */\n+\tLT_DIAG_LTHREAD_TMR_DELETE,\t/* 13 mask 0x00002000 */\n+\tLT_DIAG_LTHREAD_TMR_EXPIRED,\t/* 14 mask 0x00004000 */\n+\t/* bits 15 - 19 conditional variable flag group */\n+\tLT_DIAG_COND_CREATE,\t\t/* 15 mask 0x00008000 */\n+\tLT_DIAG_COND_DESTROY,\t\t/* 16 mask 0x00010000 */\n+\tLT_DIAG_COND_WAIT,\t\t/* 17 mask 0x00020000 */\n+\tLT_DIAG_COND_SIGNAL,\t\t/* 18 mask 0x00040000 */\n+\tLT_DIAG_COND_BROADCAST,\t\t/* 19 mask 0x00080000 */\n+\t/* bits 20 - 25 mutex flag group */\n+\tLT_DIAG_MUTEX_CREATE,\t\t/* 20 mask 0x00100000 */\n+\tLT_DIAG_MUTEX_DESTROY,\t\t/* 21 mask 0x00200000 */\n+\tLT_DIAG_MUTEX_LOCK,\t\t/* 22 mask 0x00400000 */\n+\tLT_DIAG_MUTEX_TRYLOCK,\t\t/* 23 mask 0x00800000 */\n+\tLT_DIAG_MUTEX_BLOCKED,\t\t/* 24 mask 0x01000000 */\n+\tLT_DIAG_MUTEX_UNLOCKED,\t\t/* 25 mask 0x02000000 */\n+\t/* bits 26 - 27 scheduler flag group - 8 bits */\n+\tLT_DIAG_SCHED_CREATE,\t\t/* 26 mask 0x04000000 */\n+\tLT_DIAG_SCHED_SHUTDOWN,\t\t/* 27 mask 0x08000000 */\n+\tLT_DIAG_EVENT_MAX\n+};\n+\n+#define LT_DIAG_ALL 0xffffffffffffffff\n+\n+\n+/*\n+ * Display scheduler stats\n+ */\n+void\n+lthread_sched_stats_display(void);\n+\n+#endif\t\t\t\t/* LTHREAD_DIAG_API_H_ */\ndiff --git a/examples/performance-thread/common/lthread_int.h b/examples/performance-thread/common/lthread_int.h\nnew file mode 100644\nindex 0000000..60ec289\n--- /dev/null\n+++ b/examples/performance-thread/common/lthread_int.h\n@@ -0,0 +1,212 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+/*\n+ * Some portions of this software may have been derived from the\n+ * https://github.com/halayli/lthread which carrys the following license.\n+ *\n+ * Copyright (C) 2012, Hasan Alayli <halayli@gmail.com>\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions\n+ * are met:\n+ * 1. Redistributions of source code must retain the above copyright\n+ *    notice, this list of conditions and the following disclaimer.\n+ * 2. Redistributions in binary form must reproduce the above copyright\n+ *    notice, this list of conditions and the following disclaimer in the\n+ *    documentation and/or other materials provided with the distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND\n+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE\n+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n+ * SUCH DAMAGE.\n+ */\n+#ifndef LTHREAD_INT_H\n+#include <lthread_api.h>\n+#define LTHREAD_INT_H\n+\n+#include <stdint.h>\n+#include <sys/time.h>\n+#include <sys/types.h>\n+#include <errno.h>\n+#include <pthread.h>\n+#include <time.h>\n+\n+#include <rte_cycles.h>\n+#include <rte_per_lcore.h>\n+#include <rte_timer.h>\n+#include <rte_ring.h>\n+#include <rte_atomic_64.h>\n+#include <rte_spinlock.h>\n+#include <ctx.h>\n+\n+#include <lthread_api.h>\n+#include \"lthread.h\"\n+#include \"lthread_diag.h\"\n+#include \"lthread_tls.h\"\n+\n+struct lthread;\n+struct lthread_sched;\n+struct lthread_cond;\n+struct lthread_mutex;\n+struct lthread_key;\n+\n+struct key_pool;\n+struct qnode;\n+struct qnode_pool;\n+struct lthread_sched;\n+struct lthread_tls;\n+\n+\n+#define BIT(x) (1 << (x))\n+#define CLEARBIT(x) ~(1 << (x))\n+\n+#define POSIX_ERRNO(x)  (x)\n+\n+#define MAX_LTHREAD_NAME_SIZE 64\n+\n+#define RTE_LOGTYPE_LTHREAD RTE_LOGTYPE_USER1\n+\n+\n+/* define some shorthand for current scheduler and current thread */\n+#define THIS_SCHED RTE_PER_LCORE(this_sched)\n+#define THIS_LTHREAD RTE_PER_LCORE(this_sched)->current_lthread\n+\n+/*\n+ * Definition of an scheduler struct\n+ */\n+struct lthread_sched {\n+\tstruct ctx ctx;\t\t\t\t\t/* cpu context */\n+\tuint64_t birth;\t\t\t\t\t/* time created */\n+\tstruct lthread *current_lthread;\t\t/* running thread */\n+\tunsigned lcore_id;\t\t\t\t/* this sched lcore */\n+\tint run_flag;\t\t\t\t\t/* sched shutdown */\n+\tuint64_t nb_blocked_threads;\t/* blocked threads */\n+\tstruct lthread_queue *ready;\t\t\t/* local ready queue */\n+\tstruct lthread_queue *pready;\t\t\t/* peer ready queue */\n+\tstruct lthread_objcache *lthread_cache;\t\t/* free lthreads */\n+\tstruct lthread_objcache *stack_cache;\t\t/* free stacks */\n+\tstruct lthread_objcache *per_lthread_cache;\t/* free per lthread */\n+\tstruct lthread_objcache *tls_cache;\t\t/* free TLS */\n+\tstruct lthread_objcache *cond_cache;\t\t/* free cond vars */\n+\tstruct lthread_objcache *mutex_cache;\t\t/* free mutexes */\n+\tstruct qnode_pool *qnode_pool;\t\t/* pool of queue nodes */\n+\tstruct key_pool *key_pool;\t\t/* pool of free TLS keys */\n+\tsize_t stack_size;\n+\tuint64_t diag_ref;\t\t\t\t/* diag ref */\n+} __rte_cache_aligned;\n+\n+RTE_DECLARE_PER_LCORE(struct lthread_sched *, this_sched);\n+\n+\n+/*\n+ * State for an lthread\n+ */\n+enum lthread_st {\n+\tST_LT_INIT,\t\t/* initial state */\n+\tST_LT_READY,\t\t/* lthread is ready to run */\n+\tST_LT_SLEEPING,\t\t/* lthread is sleeping */\n+\tST_LT_EXPIRED,\t\t/* lthread timeout has expired  */\n+\tST_LT_EXITED,\t\t/* lthread has exited and needs cleanup */\n+\tST_LT_DETACH,\t\t/* lthread frees on exit*/\n+\tST_LT_CANCELLED,\t/* lthread has been cancelled */\n+};\n+\n+/*\n+ * lthread sub states for exit/join\n+ */\n+enum join_st {\n+\tLT_JOIN_INITIAL,\t/* initial state */\n+\tLT_JOIN_EXITING,\t/* thread is exiting */\n+\tLT_JOIN_THREAD_SET,\t/* joining thread has been set */\n+\tLT_JOIN_EXIT_VAL_SET,\t/* exiting thread has set ret val */\n+\tLT_JOIN_EXIT_VAL_READ,\t/* joining thread has collected ret val */\n+};\n+\n+/* defnition of an lthread stack object */\n+struct lthread_stack {\n+\tuint8_t stack[LTHREAD_MAX_STACK_SIZE];\n+\tsize_t stack_size;\n+\tstruct lthread_sched *root_sched;\n+} __rte_cache_aligned;\n+\n+/*\n+ * Definition of an lthread\n+ */\n+struct lthread {\n+\tstruct ctx ctx;\t\t\t\t/* cpu context */\n+\n+\tuint64_t state;\t\t\t\t/* current lthread state */\n+\n+\tstruct lthread_sched *sched;\t\t/* current scheduler */\n+\tvoid *stack;\t\t\t\t/* ptr to actual stack */\n+\tsize_t stack_size;\t\t\t/* current stack_size */\n+\tsize_t last_stack_size;\t\t\t/* last yield  stack_size */\n+\tlthread_func_t fun;\t\t\t/* func ctx is running */\n+\tvoid *arg;\t\t\t\t/* func args passed to func */\n+\tvoid *per_lthread_data;\t\t\t/* per lthread user data */\n+\tlthread_exit_func exit_handler;\t\t/* called when thread exits */\n+\tuint64_t birth;\t\t\t\t/* time lthread was born */\n+\tstruct lthread_queue *pending_wr_queue;\t/* deferred  queue to write */\n+\tstruct lthread *lt_join;\t\t/* lthread to join on */\n+\tuint64_t join;\t\t\t\t/* state for joining */\n+\tvoid **lt_exit_ptr;\t\t\t/* exit ptr for lthread_join */\n+\tstruct lthread_sched *root_sched;\t/* thread was created here*/\n+\tstruct queue_node *qnode;\t\t/* node when in a queue */\n+\tstruct rte_timer tim;\t\t\t/* sleep timer */\n+\tstruct lthread_tls *tls;\t\t/* keys in use by the thread */\n+\tstruct lthread_stack *stack_container;\t/* stack */\n+\tchar funcname[MAX_LTHREAD_NAME_SIZE];\t/* thread func name */\n+\tuint64_t diag_ref;\t\t\t/* ref to user diag data */\n+} __rte_cache_aligned;\n+\n+/*\n+ * Assert\n+ */\n+#if LTHREAD_DIAG\n+#define LTHREAD_ASSERT(expr) do {\t\t\t\t\t\\\n+\tif (!(expr))\t\t\t\t\t\t\t\\\n+\t\trte_panic(\"line%d\\tassert \\\"\" #expr \"\\\" failed\\n\", __LINE__);\\\n+} while (0)\n+#else\n+#define LTHREAD_ASSERT(expr) do {} while (0)\n+#endif\n+\n+#endif\t\t\t\t/* LTHREAD_INT_H */\ndiff --git a/examples/performance-thread/common/lthread_mutex.c b/examples/performance-thread/common/lthread_mutex.c\nnew file mode 100644\nindex 0000000..d4b4602\n--- /dev/null\n+++ b/examples/performance-thread/common/lthread_mutex.c\n@@ -0,0 +1,244 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#include <stdio.h>\n+#include <stdlib.h>\n+#include <string.h>\n+#include <stdint.h>\n+#include <stddef.h>\n+#include <limits.h>\n+#include <inttypes.h>\n+#include <unistd.h>\n+#include <pthread.h>\n+#include <fcntl.h>\n+#include <sys/time.h>\n+#include <sys/mman.h>\n+\n+#include <rte_config.h>\n+#include <rte_per_lcore.h>\n+#include <rte_log.h>\n+#include <rte_spinlock.h>\n+#include <rte_common.h>\n+\n+#include \"lthread_api.h\"\n+#include \"lthread_int.h\"\n+#include \"lthread_mutex.h\"\n+#include \"lthread_sched.h\"\n+#include \"lthread_queue.h\"\n+#include \"lthread_objcache.h\"\n+#include \"lthread_diag.h\"\n+\n+/*\n+ * Create a mutex\n+ */\n+int\n+lthread_mutex_init(char *name, struct lthread_mutex **mutex,\n+\t\t   __rte_unused const struct lthread_mutexattr *attr)\n+{\n+\tstruct lthread_mutex *m;\n+\n+\tif (mutex == NULL)\n+\t\treturn POSIX_ERRNO(EINVAL);\n+\n+\n+\tm = _lthread_objcache_alloc((THIS_SCHED)->mutex_cache);\n+\tif (m == NULL)\n+\t\treturn POSIX_ERRNO(EAGAIN);\n+\n+\tm->blocked = _lthread_queue_create(\"blocked queue\");\n+\tif (m->blocked == NULL) {\n+\t\t_lthread_objcache_free((THIS_SCHED)->mutex_cache, m);\n+\t\treturn POSIX_ERRNO(EAGAIN);\n+\t}\n+\n+\tif (name == NULL)\n+\t\tstrncpy(m->name, \"no name\", sizeof(m->name));\n+\telse\n+\t\tstrncpy(m->name, name, sizeof(m->name));\n+\tm->name[sizeof(m->name)-1] = 0;\n+\n+\tm->root_sched = THIS_SCHED;\n+\tm->owner = NULL;\n+\n+\trte_atomic64_init(&m->count);\n+\n+\tDIAG_CREATE_EVENT(m, LT_DIAG_MUTEX_CREATE);\n+\t/* success */\n+\t(*mutex) = m;\n+\treturn 0;\n+}\n+\n+/*\n+ * Destroy a mutex\n+ */\n+int lthread_mutex_destroy(struct lthread_mutex *m)\n+{\n+\tif ((m == NULL) || (m->blocked == NULL)) {\n+\t\tDIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, POSIX_ERRNO(EINVAL));\n+\t\treturn POSIX_ERRNO(EINVAL);\n+\t}\n+\n+\tif (m->owner == NULL) {\n+\t\t/* try to delete the blocked queue */\n+\t\tif (_lthread_queue_destroy(m->blocked) < 0) {\n+\t\t\tDIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY,\n+\t\t\t\t\tm, POSIX_ERRNO(EBUSY));\n+\t\t\treturn POSIX_ERRNO(EBUSY);\n+\t\t}\n+\n+\t\t/* free the mutex to cache */\n+\t\t_lthread_objcache_free(m->root_sched->mutex_cache, m);\n+\t\tDIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, 0);\n+\t\treturn 0;\n+\t}\n+\t/* can't do its still in use */\n+\tDIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, POSIX_ERRNO(EBUSY));\n+\treturn POSIX_ERRNO(EBUSY);\n+}\n+\n+/*\n+ * Try to obtain a mutex\n+ */\n+int lthread_mutex_lock(struct lthread_mutex *m)\n+{\n+\tstruct lthread *lt = THIS_LTHREAD;\n+\n+\tif ((m == NULL) || (m->blocked == NULL)) {\n+\t\tDIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, POSIX_ERRNO(EINVAL));\n+\t\treturn POSIX_ERRNO(EINVAL);\n+\t}\n+\n+\t/* allow no recursion */\n+\tif (m->owner == lt) {\n+\t\tDIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, POSIX_ERRNO(EDEADLK));\n+\t\treturn POSIX_ERRNO(EDEADLK);\n+\t}\n+\n+\tfor (;;) {\n+\t\trte_atomic64_inc(&m->count);\n+\t\tdo {\n+\t\t\tif (rte_atomic64_cmpset\n+\t\t\t    ((uint64_t *) &m->owner, 0, (uint64_t) lt)) {\n+\t\t\t\t/* happy days, we got the lock */\n+\t\t\t\tDIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, 0);\n+\t\t\t\treturn 0;\n+\t\t\t}\n+\t\t\t/* spin due to race with unlock when\n+\t\t\t* nothing was blocked\n+\t\t\t*/\n+\t\t} while ((rte_atomic64_read(&m->count) == 1) &&\n+\t\t\t\t(m->owner == NULL));\n+\n+\t\t/* queue the current thread in the blocked queue\n+\t\t * we defer this to after we return to the scheduler\n+\t\t * to ensure that the current thread context is saved\n+\t\t * before unlock could result in it being dequeued and\n+\t\t * resumed\n+\t\t */\n+\t\tDIAG_EVENT(m, LT_DIAG_MUTEX_BLOCKED, m, lt);\n+\t\tlt->pending_wr_queue = m->blocked;\n+\t\t/* now relinquish cpu */\n+\t\t_suspend();\n+\t\t/* resumed, must loop and compete for the lock again */\n+\t}\n+\tLTHREAD_ASSERT(0);\n+\treturn 0;\n+}\n+\n+/* try to lock a mutex but dont block */\n+int lthread_mutex_trylock(struct lthread_mutex *m)\n+{\n+\tstruct lthread *lt = THIS_LTHREAD;\n+\n+\tif ((m == NULL) || (m->blocked == NULL)) {\n+\t\tDIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EINVAL));\n+\t\treturn POSIX_ERRNO(EINVAL);\n+\t}\n+\n+\tif (m->owner == lt) {\n+\t\t/* no recursion */\n+\t\tDIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EDEADLK));\n+\t\treturn POSIX_ERRNO(EDEADLK);\n+\t}\n+\n+\trte_atomic64_inc(&m->count);\n+\tif (rte_atomic64_cmpset\n+\t    ((uint64_t *) &m->owner, (uint64_t) NULL, (uint64_t) lt)) {\n+\t\t/* got the lock */\n+\t\tDIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, 0);\n+\t\treturn 0;\n+\t}\n+\n+\t/* failed so return busy */\n+\trte_atomic64_dec(&m->count);\n+\tDIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EBUSY));\n+\treturn POSIX_ERRNO(EBUSY);\n+}\n+\n+/*\n+ * Unlock a mutex\n+ */\n+int lthread_mutex_unlock(struct lthread_mutex *m)\n+{\n+\tstruct lthread *lt = THIS_LTHREAD;\n+\tstruct lthread *unblocked;\n+\n+\tif ((m == NULL) || (m->blocked == NULL)) {\n+\t\tDIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, POSIX_ERRNO(EINVAL));\n+\t\treturn POSIX_ERRNO(EINVAL);\n+\t}\n+\n+\t/* fail if its owned */\n+\tif (m->owner != lt || m->owner == NULL) {\n+\t\tDIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, POSIX_ERRNO(EPERM));\n+\t\treturn POSIX_ERRNO(EPERM);\n+\t}\n+\n+\trte_atomic64_dec(&m->count);\n+\t/* if there are blocked threads then make one ready */\n+\twhile (rte_atomic64_read(&m->count) > 0) {\n+\t\tunblocked = _lthread_queue_remove(m->blocked);\n+\n+\t\tif (unblocked != NULL) {\n+\t\t\trte_atomic64_dec(&m->count);\n+\t\t\tDIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, unblocked);\n+\t\t\tLTHREAD_ASSERT(unblocked->sched != NULL);\n+\t\t\t_ready_queue_insert((struct lthread_sched *)\n+\t\t\t\t\t    unblocked->sched, unblocked);\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\t/* release the lock */\n+\tm->owner = NULL;\n+\treturn 0;\n+}\ndiff --git a/examples/performance-thread/common/lthread_mutex.h b/examples/performance-thread/common/lthread_mutex.h\nnew file mode 100644\nindex 0000000..aebe77b\n--- /dev/null\n+++ b/examples/performance-thread/common/lthread_mutex.h\n@@ -0,0 +1,52 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+\n+#ifndef LTHREAD_MUTEX_H_\n+#define LTHREAD_MUTEX_H_\n+\n+#include \"lthread_queue.h\"\n+\n+\n+#define MAX_MUTEX_NAME_SIZE 64\n+\n+struct lthread_mutex {\n+\tstruct lthread *owner;\n+\trte_atomic64_t\tcount;\n+\tstruct lthread_queue *blocked __rte_cache_aligned;\n+\tstruct lthread_sched *root_sched;\n+\tchar\t\t\tname[MAX_MUTEX_NAME_SIZE];\n+\tuint64_t\t\tdiag_ref; /* optional ref to user diag data */\n+} __rte_cache_aligned;\n+\n+#endif /* LTHREAD_MUTEX_H_ */\ndiff --git a/examples/performance-thread/common/lthread_objcache.h b/examples/performance-thread/common/lthread_objcache.h\nnew file mode 100644\nindex 0000000..2101ad2\n--- /dev/null\n+++ b/examples/performance-thread/common/lthread_objcache.h\n@@ -0,0 +1,160 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+#ifndef LTHREAD_OBJCACHE_H_\n+#define LTHREAD_OBJCACHE_H_\n+\n+#include <string.h>\n+\n+#include <rte_per_lcore.h>\n+#include <rte_malloc.h>\n+#include <rte_memory.h>\n+\n+#include \"lthread_int.h\"\n+#include \"lthread_diag.h\"\n+#include \"lthread_queue.h\"\n+\n+\n+#define DISPLAY_OBCACHE_QUEUES 0\n+\n+RTE_DECLARE_PER_LCORE(struct lthread_sched *, this_sched);\n+\n+struct lthread_objcache {\n+\tstruct lthread_queue *q;\n+\tsize_t obj_size;\n+\tint prealloc_size;\n+\tchar name[LT_MAX_NAME_SIZE];\n+\n+\tDIAG_COUNT_DEFINE(rd);\n+\tDIAG_COUNT_DEFINE(wr);\n+\tDIAG_COUNT_DEFINE(prealloc);\n+\tDIAG_COUNT_DEFINE(capacity);\n+\tDIAG_COUNT_DEFINE(available);\n+};\n+\n+/*\n+ * Create a cache\n+ */\n+static inline struct\n+lthread_objcache *_lthread_objcache_create(const char *name,\n+\t\t\t\t\tsize_t obj_size,\n+\t\t\t\t\tint prealloc_size)\n+{\n+\tstruct lthread_objcache *c =\n+\t    rte_malloc_socket(NULL, sizeof(struct lthread_objcache),\n+\t\t\t\tRTE_CACHE_LINE_SIZE,\n+\t\t\t\trte_socket_id());\n+\tif (c == NULL)\n+\t\treturn NULL;\n+\n+\tc->q = _lthread_queue_create(\"cache queue\");\n+\tif (c->q == NULL) {\n+\t\trte_free(c);\n+\t\treturn NULL;\n+\t}\n+\tc->obj_size = obj_size;\n+\tc->prealloc_size = prealloc_size;\n+\n+\tif (name != NULL)\n+\t\tstrncpy(c->name, name, LT_MAX_NAME_SIZE);\n+\tc->name[sizeof(c->name)-1] = 0;\n+\n+\tDIAG_COUNT_INIT(c, rd);\n+\tDIAG_COUNT_INIT(c, wr);\n+\tDIAG_COUNT_INIT(c, prealloc);\n+\tDIAG_COUNT_INIT(c, capacity);\n+\tDIAG_COUNT_INIT(c, available);\n+\treturn c;\n+}\n+\n+/*\n+ * Destroy an objcache\n+ */\n+static inline int\n+_lthread_objcache_destroy(struct lthread_objcache *c)\n+{\n+\tif (_lthread_queue_destroy(c->q) == 0) {\n+\t\trte_free(c);\n+\t\treturn 0;\n+\t}\n+\treturn -1;\n+}\n+\n+/*\n+ * Allocate an object from an object cache\n+ */\n+static inline void *\n+_lthread_objcache_alloc(struct lthread_objcache *c)\n+{\n+\tint i;\n+\tvoid *data;\n+\tstruct lthread_queue *q = c->q;\n+\tsize_t obj_size = c->obj_size;\n+\tint prealloc_size = c->prealloc_size;\n+\n+\tdata = _lthread_queue_remove(q);\n+\n+\tif (data == NULL) {\n+\t\tDIAG_COUNT_INC(c, prealloc);\n+\t\tfor (i = 0; i < prealloc_size; i++) {\n+\t\t\tdata =\n+\t\t\t    rte_zmalloc_socket(NULL, obj_size,\n+\t\t\t\t\tRTE_CACHE_LINE_SIZE,\n+\t\t\t\t\trte_socket_id());\n+\t\t\tif (data == NULL)\n+\t\t\t\treturn NULL;\n+\n+\t\t\tDIAG_COUNT_INC(c, available);\n+\t\t\tDIAG_COUNT_INC(c, capacity);\n+\t\t\t_lthread_queue_insert_mp(q, data);\n+\t\t}\n+\t\tdata = _lthread_queue_remove(q);\n+\t}\n+\tDIAG_COUNT_INC(c, rd);\n+\tDIAG_COUNT_DEC(c, available);\n+\treturn data;\n+}\n+\n+/*\n+ * free an object to a cache\n+ */\n+static inline void\n+_lthread_objcache_free(struct lthread_objcache *c, void *obj)\n+{\n+\tDIAG_COUNT_INC(c, wr);\n+\tDIAG_COUNT_INC(c, available);\n+\t_lthread_queue_insert_mp(c->q, obj);\n+}\n+\n+\n+\n+#endif\t\t\t\t/* LTHREAD_OBJCACHE_H_ */\ndiff --git a/examples/performance-thread/common/lthread_pool.h b/examples/performance-thread/common/lthread_pool.h\nnew file mode 100644\nindex 0000000..509a01c\n--- /dev/null\n+++ b/examples/performance-thread/common/lthread_pool.h\n@@ -0,0 +1,338 @@\n+/*\n+ *-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+/*\n+ * Some portions of this software is derived from the producer\n+ * consumer queues described by Dmitry Vyukov and published  here\n+ * http://www.1024cores.net\n+ *\n+ * Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions\n+ * are met\n+ *\n+ * 1. Redistributions of source code must retain the above copyright notice,\n+ * this list of conditions and the following disclaimer.\n+ *\n+ * 2. Redistributions in binary form must reproduce the above copyright notice,\n+ * this list of conditions and the following disclaimer in the documentation\n+ * and/or other materials provided with the distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV \"AS IS\"\n+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DMITRY VYUKOV OR CONTRIBUTORS\n+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,\n+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT\n+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE\n+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ *\n+ * The views and conclusions contained in the software and documentation are\n+ * those of the authors and should not be interpreted as representing official\n+ * policies, either expressed or implied, of Dmitry Vyukov.\n+ */\n+\n+#ifndef LTHREAD_POOL_H_\n+#define LTHREAD_POOL_H_\n+\n+#include <rte_malloc.h>\n+#include <rte_per_lcore.h>\n+#include <rte_log.h>\n+\n+#include \"lthread_int.h\"\n+#include \"lthread_diag.h\"\n+#include \"atomic.h\"\n+\n+/*\n+ * This file implements pool of queue nodes used by the queue implemented\n+ * in lthread_queue.h.\n+ *\n+ * The pool is an intrusive lock free MPSC queue.\n+ *\n+ * The pool is created empty and populated lazily, i.e. on first attempt to\n+ * allocate a the pool.\n+ *\n+ * Whenever the pool is empty more objects are added to the pool\n+ * The number of objects preallocated in this way is a parameter of\n+ * _qnode_pool_create. Freeing an object returns it to the pool.\n+ *\n+ * Each lthread scheduler maintains its own pool. L-threads must always\n+ * allocate from this local pool ( because it is a single consumer queue ).\n+ * L-threads can free nodes to any pool (because it is a multi producer queue)\n+ * This enables threads that have affinitized to a different scheduler to free\n+ * nodes safely.\n+ *\n+ * Pools allocated from huge pages, and can never be destroyed\n+ * There is no garbage collection, they will continue to grow whenever demand\n+ * exceeds supply, or until memory is exhausted\n+ *\n+ */\n+\n+struct qnode;\n+struct qnode_cache;\n+\n+/*\n+ * define intermediate node\n+ */\n+struct qnode {\n+\tstruct qnode *next;\n+\tvoid *data;\n+\tstruct qnode_pool *pool;\n+} __rte_cache_aligned;\n+\n+/*\n+ * a pool structure\n+ */\n+struct qnode_pool {\n+\tstruct qnode *head;\n+\tstruct qnode *stub;\n+\tstruct qnode *fast_alloc;\n+\tstruct qnode *tail __rte_cache_aligned;\n+\tint pre_alloc;\n+\tchar name[LT_MAX_NAME_SIZE];\n+\n+\tDIAG_COUNT_DEFINE(rd);\n+\tDIAG_COUNT_DEFINE(wr);\n+\tDIAG_COUNT_DEFINE(available);\n+\tDIAG_COUNT_DEFINE(prealloc);\n+\tDIAG_COUNT_DEFINE(capacity);\n+} __rte_cache_aligned;\n+\n+/*\n+ * Create a pool of qnodes\n+ */\n+\n+static inline struct qnode_pool *\n+_qnode_pool_create(const char *name, int prealloc_size) {\n+\n+\tstruct qnode_pool *p = rte_malloc_socket(NULL,\n+\t\t\t\t\tsizeof(struct qnode_pool),\n+\t\t\t\t\tRTE_CACHE_LINE_SIZE,\n+\t\t\t\t\trte_socket_id());\n+\n+\tLTHREAD_ASSERT(p);\n+\n+\tp->stub = rte_malloc_socket(NULL,\n+\t\t\t\tsizeof(struct qnode),\n+\t\t\t\tRTE_CACHE_LINE_SIZE,\n+\t\t\t\trte_socket_id());\n+\n+\tLTHREAD_ASSERT(p->stub);\n+\n+\tif (name != NULL)\n+\t\tstrncpy(p->name, name, LT_MAX_NAME_SIZE);\n+\tp->name[sizeof(p->name)-1] = 0;\n+\n+\tp->stub->pool = p;\n+\tp->stub->next = NULL;\n+\tp->tail = p->stub;\n+\tp->head = p->stub;\n+\tp->pre_alloc = prealloc_size;\n+\n+\tDIAG_COUNT_INIT(p, rd);\n+\tDIAG_COUNT_INIT(p, wr);\n+\tDIAG_COUNT_INIT(p, available);\n+\tDIAG_COUNT_INIT(p, prealloc);\n+\tDIAG_COUNT_INIT(p, capacity);\n+\n+\treturn p;\n+}\n+\n+\n+/*\n+ * Insert a node into the pool\n+ */\n+static inline void __attribute__ ((always_inline))\n+_qnode_pool_insert(struct qnode_pool *p, struct qnode *n)\n+{\n+\tn->next = NULL;\n+\tstruct qnode *prev = n;\n+\t/* We insert at the head */\n+\tprev = (struct qnode *) atomic64_xchg((uint64_t *)&p->head,\n+\t\t\t\t\t\t(uint64_t) prev);\n+\t/* there is a window of inconsistency until prev next is set */\n+\t/* which is why remove must retry */\n+\tprev->next = (n);\n+}\n+\n+/*\n+ * Remove a node from the pool\n+ *\n+ * There is a race with _qnode_pool_insert() whereby the queue could appear\n+ * empty during a concurrent insert, this is handled by retrying\n+ *\n+ * The queue uses a stub node, which must be swung as the queue becomes\n+ * empty, this requires an insert of the stub, which means that removing the\n+ * last item from the queue incurs the penalty of an atomic exchange. Since the\n+ * pool is maintained with a bulk pre-allocation the cost of this is amortised.\n+ */\n+static inline struct qnode *__attribute__ ((always_inline))\n+_pool_remove(struct qnode_pool *p)\n+{\n+\tstruct qnode *head;\n+\tstruct qnode *tail = p->tail;\n+\tstruct qnode *next = tail->next;\n+\n+\t/* we remove from the tail */\n+\tif (tail == p->stub) {\n+\t\tif (next == NULL)\n+\t\t\treturn NULL;\n+\t\t/* advance the tail */\n+\t\tp->tail = next;\n+\t\ttail = next;\n+\t\tnext = next->next;\n+\t}\n+\tif (likely(next != NULL)) {\n+\t\tp->tail = next;\n+\t\treturn tail;\n+\t}\n+\n+\thead = p->head;\n+\tif (tail == head)\n+\t\treturn NULL;\n+\n+\t/* swing stub node */\n+\t_qnode_pool_insert(p, p->stub);\n+\n+\tnext = tail->next;\n+\tif (next) {\n+\t\tp->tail = next;\n+\t\treturn tail;\n+\t}\n+\treturn NULL;\n+}\n+\n+\n+/*\n+ * This adds a retry to the _pool_remove function\n+ * defined above\n+ */\n+static inline struct qnode *__attribute__ ((always_inline))\n+_qnode_pool_remove(struct qnode_pool *p)\n+{\n+\tstruct qnode *n;\n+\n+\tdo {\n+\t\tn = _pool_remove(p);\n+\t\tif (likely(n != NULL))\n+\t\t\treturn n;\n+\n+\t\trte_compiler_barrier();\n+\t}  while ((p->head != p->tail) &&\n+\t\t\t(p->tail != p->stub));\n+\treturn NULL;\n+}\n+\n+/*\n+ * Allocate a node from the pool\n+ * If the pool is empty add mode nodes\n+ */\n+static inline struct qnode *__attribute__ ((always_inline))\n+_qnode_alloc(void)\n+{\n+\tstruct qnode_pool *p = (THIS_SCHED)->qnode_pool;\n+\tint prealloc_size = p->pre_alloc;\n+\tstruct qnode *n;\n+\tint i;\n+\n+\tif (likely(p->fast_alloc != NULL)) {\n+\t\tn = p->fast_alloc;\n+\t\tp->fast_alloc = NULL;\n+\t\treturn n;\n+\t}\n+\n+\tn = _qnode_pool_remove(p);\n+\n+\tif (unlikely(n == NULL)) {\n+\t\tDIAG_COUNT_INC(p, prealloc);\n+\t\tfor (i = 0; i < prealloc_size; i++) {\n+\t\t\tn = rte_malloc_socket(NULL,\n+\t\t\t\t\tsizeof(struct qnode),\n+\t\t\t\t\tRTE_CACHE_LINE_SIZE,\n+\t\t\t\t\trte_socket_id());\n+\t\t\tif (n == NULL)\n+\t\t\t\treturn NULL;\n+\n+\t\t\tDIAG_COUNT_INC(p, available);\n+\t\t\tDIAG_COUNT_INC(p, capacity);\n+\n+\t\t\tn->pool = p;\n+\t\t\t_qnode_pool_insert(p, n);\n+\t\t}\n+\t\tn = _qnode_pool_remove(p);\n+\t}\n+\tn->pool = p;\n+\tDIAG_COUNT_INC(p, rd);\n+\tDIAG_COUNT_DEC(p, available);\n+\treturn n;\n+}\n+\n+\n+\n+/*\n+* free a queue node to the per scheduler pool from which it came\n+*/\n+static inline void __attribute__ ((always_inline))\n+_qnode_free(struct qnode *n)\n+{\n+\tstruct qnode_pool *p = n->pool;\n+\n+\n+\tif (unlikely(p->fast_alloc != NULL) ||\n+\t\t\tunlikely(n->pool != (THIS_SCHED)->qnode_pool)) {\n+\t\tDIAG_COUNT_INC(p, wr);\n+\t\tDIAG_COUNT_INC(p, available);\n+\t\t_qnode_pool_insert(p, n);\n+\t\treturn;\n+\t}\n+\tp->fast_alloc = n;\n+}\n+\n+/*\n+ * Destroy an qnode pool\n+ * queue must be empty when this is called\n+ */\n+static inline int\n+_qnode_pool_destroy(struct qnode_pool *p)\n+{\n+\trte_free(p->stub);\n+\trte_free(p);\n+\treturn 0;\n+}\n+\n+\n+#endif\t\t\t\t/* LTHREAD_POOL_H_ */\ndiff --git a/examples/performance-thread/common/lthread_queue.h b/examples/performance-thread/common/lthread_queue.h\nnew file mode 100644\nindex 0000000..36b35e9\n--- /dev/null\n+++ b/examples/performance-thread/common/lthread_queue.h\n@@ -0,0 +1,303 @@\n+/*\n+ *-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+/*\n+ * Some portions of this software is derived from the producer\n+ * consumer queues described by Dmitry Vyukov and published  here\n+ * http://www.1024cores.net\n+ *\n+ * Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions\n+ * are met:\n+ *\n+ * 1. Redistributions of source code must retain the above copyright notice,\n+ * this list of conditions and the following disclaimer.\n+ *\n+ * 2. Redistributions in binary form must reproduce the above copyright notice,\n+ * this list of conditions and the following disclaimer in the documentation\n+ * and/or other materials provided with the distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV \"AS IS\"\n+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DMITRY VYUKOV OR CONTRIBUTORS\n+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,\n+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT\n+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE\n+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ *\n+ * The views and conclusions contained in the software and documentation are\n+ * those of the authors and should not be interpreted as representing official\n+ * policies, either expressed or implied, of Dmitry Vyukov.\n+ */\n+\n+#ifndef LTHREAD_QUEUE_H_\n+#define LTHREAD_QUEUE_H_\n+\n+#include <string.h>\n+\n+#include <rte_prefetch.h>\n+#include <rte_per_lcore.h>\n+\n+#include \"lthread_int.h\"\n+#include \"lthread.h\"\n+#include \"lthread_diag.h\"\n+#include \"lthread_pool.h\"\n+#include \"atomic.h\"\n+\n+struct lthread_queue;\n+\n+/*\n+ * This file implements an unbounded FIFO queue based on a lock free\n+ * linked list.\n+ *\n+ * The queue is non-intrusive in that it uses intermediate nodes, and does\n+ * not require these nodes to be inserted into the object being placed i\n+ * in the queue.\n+ *\n+ * This is slightly more efficient than the very similar queue in lthread_pool\n+ * ih in that it does not have to swing a stub node as the queue becomes empty.\n+ *\n+ * The queue access functions allocate and free intermediate node\n+ * transparently from/to a per scheduler pool ( see lthread_pool.h ).\n+ *\n+ * The queue provides both MPSC and SPSC insert methods\n+ */\n+\n+/*\n+ * define a queue of lthread nodes\n+ */\n+struct lthread_queue {\n+\tstruct qnode *head;\n+\tstruct qnode *tail __rte_cache_aligned;\n+\tstruct lthread_queue *p;\n+\tchar name[LT_MAX_NAME_SIZE];\n+\n+\tDIAG_COUNT_DEFINE(rd);\n+\tDIAG_COUNT_DEFINE(wr);\n+\tDIAG_COUNT_DEFINE(size);\n+\n+} __rte_cache_aligned;\n+\n+\n+\n+static inline struct lthread_queue *\n+_lthread_queue_create(const char *name)\n+{\n+\tstruct qnode *stub;\n+\tstruct lthread_queue *new_queue;\n+\n+\tnew_queue = rte_malloc_socket(NULL, sizeof(struct lthread_queue),\n+\t\t\t\t\tRTE_CACHE_LINE_SIZE,\n+\t\t\t\t\trte_socket_id());\n+\tif (new_queue == NULL)\n+\t\treturn NULL;\n+\n+\t/* allocated stub node */\n+\tstub = _qnode_alloc();\n+\tLTHREAD_ASSERT(stub);\n+\n+\tif (name != NULL)\n+\t\tstrncpy(new_queue->name, name, sizeof(new_queue->name));\n+\tnew_queue->name[sizeof(new_queue->name)-1] = 0;\n+\n+\t/* initialize queue as empty */\n+\tstub->next = NULL;\n+\tnew_queue->head = stub;\n+\tnew_queue->tail = stub;\n+\n+\tDIAG_COUNT_INIT(new_queue, rd);\n+\tDIAG_COUNT_INIT(new_queue, wr);\n+\tDIAG_COUNT_INIT(new_queue, size);\n+\n+\treturn new_queue;\n+}\n+\n+/**\n+ * Return true if the queue is empty\n+ */\n+static inline int __attribute__ ((always_inline))\n+_lthread_queue_empty(struct lthread_queue *q)\n+{\n+\treturn (q->tail == q->head);\n+}\n+\n+\n+\n+/**\n+ * Destroy a queue\n+ * fail if queue is not empty\n+ */\n+static inline int _lthread_queue_destroy(struct lthread_queue *q)\n+{\n+\tif (q == NULL)\n+\t\treturn -1;\n+\n+\tif (!_lthread_queue_empty(q))\n+\t\treturn -1;\n+\n+\t_qnode_free(q->head);\n+\trte_free(q);\n+\treturn 0;\n+}\n+\n+RTE_DECLARE_PER_LCORE(struct lthread_sched *, this_sched);\n+\n+/*\n+ * Insert a node into a queue\n+ * this implementation is multi procucer safe\n+ */\n+static inline struct qnode *__attribute__ ((always_inline))\n+_lthread_queue_insert_mp(struct lthread_queue\n+\t\t\t\t\t\t\t  *q, void *data)\n+{\n+\tstruct qnode *prev;\n+\tstruct qnode *n = _qnode_alloc();\n+\n+\tif (n == NULL)\n+\t\treturn NULL;\n+\n+\t/* set object in node */\n+\tn->data = data;\n+\tn->next = NULL;\n+\n+\t/* this is an MPSC method, perform a locked update */\n+\tprev = n;\n+\tprev =\n+\t    (struct qnode *)atomic64_xchg((uint64_t *) &(q)->head,\n+\t\t\t\t\t       (uint64_t) prev);\n+\t/* there is a window of inconsistency until prev next is set,\n+\t * which is why remove must retry\n+\t */\n+\tprev->next = n;\n+\n+\tDIAG_COUNT_INC(q, wr);\n+\tDIAG_COUNT_INC(q, size);\n+\n+\treturn n;\n+}\n+\n+/*\n+ * Insert an node into a queue in single producer mode\n+ * this implementation is NOT mult producer safe\n+ */\n+static inline struct qnode *__attribute__ ((always_inline))\n+_lthread_queue_insert_sp(struct lthread_queue\n+\t\t\t\t\t\t\t  *q, void *data)\n+{\n+\t/* allocate a queue node */\n+\tstruct qnode *prev;\n+\tstruct qnode *n = _qnode_alloc();\n+\n+\tif (n == NULL)\n+\t\treturn NULL;\n+\n+\t/* set data in node */\n+\tn->data = data;\n+\tn->next = NULL;\n+\n+\t/* this is an SPSC method, no need for locked exchange operation */\n+\tprev = q->head;\n+\tprev->next = q->head = n;\n+\n+\tDIAG_COUNT_INC(q, wr);\n+\tDIAG_COUNT_INC(q, size);\n+\n+\treturn n;\n+}\n+\n+/*\n+ * Remove a node from a queue\n+ */\n+static inline void *__attribute__ ((always_inline))\n+_lthread_queue_poll(struct lthread_queue *q)\n+{\n+\tvoid *data = NULL;\n+\tstruct qnode *tail = q->tail;\n+\tstruct qnode *next = (struct qnode *)tail->next;\n+\t/*\n+\t * There is a small window of inconsistency between producer and\n+\t * consumer whereby the queue may appear empty if consumer and\n+\t * producer access it at the same time.\n+\t * The consumer must handle this by retrying\n+\t */\n+\n+\tif (likely(next != NULL)) {\n+\t\tq->tail = next;\n+\t\ttail->data = next->data;\n+\t\tdata = tail->data;\n+\n+\t\t/* free the node */\n+\t\t_qnode_free(tail);\n+\n+\t\tDIAG_COUNT_INC(q, rd);\n+\t\tDIAG_COUNT_DEC(q, size);\n+\t\treturn data;\n+\t}\n+\treturn NULL;\n+}\n+\n+/*\n+ * Remove a node from a queue\n+ */\n+static inline void *__attribute__ ((always_inline))\n+_lthread_queue_remove(struct lthread_queue *q)\n+{\n+\tvoid *data = NULL;\n+\n+\t/*\n+\t * There is a small window of inconsistency between producer and\n+\t * consumer whereby the queue may appear empty if consumer and\n+\t * producer access it at the same time. We handle this by retrying\n+\t */\n+\tdo {\n+\t\tdata = _lthread_queue_poll(q);\n+\n+\t\tif (likely(data != NULL)) {\n+\n+\t\t\tDIAG_COUNT_INC(q, rd);\n+\t\t\tDIAG_COUNT_DEC(q, size);\n+\t\t\treturn data;\n+\t\t}\n+\t\trte_compiler_barrier();\n+\t} while (unlikely(!_lthread_queue_empty(q)));\n+\treturn NULL;\n+}\n+\n+\n+#endif\t\t\t\t/* LTHREAD_QUEUE_H_ */\ndiff --git a/examples/performance-thread/common/lthread_sched.c b/examples/performance-thread/common/lthread_sched.c\nnew file mode 100644\nindex 0000000..1e3306b\n--- /dev/null\n+++ b/examples/performance-thread/common/lthread_sched.c\n@@ -0,0 +1,642 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+/*\n+ * Some portions of this software is derived from the\n+ * https://github.com/halayli/lthread which carrys the following license.\n+ *\n+ * Copyright (C) 2012, Hasan Alayli <halayli@gmail.com>\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions\n+ * are met:\n+ * 1. Redistributions of source code must retain the above copyright\n+ *    notice, this list of conditions and the following disclaimer.\n+ * 2. Redistributions in binary form must reproduce the above copyright\n+ *    notice, this list of conditions and the following disclaimer in the\n+ *    documentation and/or other materials provided with the distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND\n+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE\n+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n+ * SUCH DAMAGE.\n+ */\n+\n+/*\n+ * Lthread\n+ * Copyright (C) 2012, Hasan Alayli <halayli@gmail.com>\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions\n+ * are met:\n+ * 1. Redistributions of source code must retain the above copyright\n+ *    notice, this list of conditions and the following disclaimer.\n+ * 2. Redistributions in binary form must reproduce the above copyright\n+ *    notice, this list of conditions and the following disclaimer in the\n+ *    documentation and/or other materials provided with the distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND\n+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE\n+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n+ * SUCH DAMAGE.\n+ *\n+ * lthread.c\n+ */\n+\n+#define RTE_MEM 1\n+\n+#include <stdio.h>\n+#include <stdlib.h>\n+#include <string.h>\n+#include <stdint.h>\n+#include <stddef.h>\n+#include <limits.h>\n+#include <inttypes.h>\n+#include <unistd.h>\n+#include <pthread.h>\n+#include <fcntl.h>\n+#include <sys/time.h>\n+#include <sys/mman.h>\n+#include <sched.h>\n+\n+#include <rte_config.h>\n+#include <rte_prefetch.h>\n+#include <rte_per_lcore.h>\n+#include <rte_atomic.h>\n+#include <rte_atomic_64.h>\n+#include <rte_log.h>\n+#include <rte_common.h>\n+#include <rte_branch_prediction.h>\n+\n+#include \"lthread_api.h\"\n+#include \"lthread_int.h\"\n+#include \"lthread_sched.h\"\n+#include \"lthread_objcache.h\"\n+#include \"lthread_timer.h\"\n+#include \"lthread_mutex.h\"\n+#include \"lthread_cond.h\"\n+#include \"lthread_tls.h\"\n+#include \"lthread_diag.h\"\n+\n+/*\n+ * This file implements the lthread scheduler\n+ * The scheduler is the function lthread_scheduler_run()\n+ * This must be run as the main loop of an EAL thread.\n+ *\n+ * Currently once a scheduler is created it cannot be destroyed\n+ * When a scheduler shuts down it is assumed that the application is terminating\n+ */\n+\n+static rte_atomic16_t num_schedulers;\n+static rte_atomic16_t active_schedulers;\n+\n+/* one scheduler per lcore */\n+RTE_DEFINE_PER_LCORE(struct lthread_sched *, this_sched) = NULL;\n+\n+struct lthread_sched *schedcore[LTHREAD_MAX_LCORES];\n+\n+diag_callback diag_cb;\n+\n+uint64_t diag_mask;\n+\n+\n+/* constructor */\n+void lthread_sched_ctor(void) __attribute__ ((constructor));\n+void lthread_sched_ctor(void)\n+{\n+\tmemset(schedcore, 0, sizeof(schedcore));\n+\trte_atomic16_init(&num_schedulers);\n+\trte_atomic16_set(&num_schedulers, 1);\n+\trte_atomic16_init(&active_schedulers);\n+\trte_atomic16_set(&active_schedulers, 0);\n+\tdiag_cb = NULL;\n+}\n+\n+\n+enum sched_alloc_phase {\n+\tSCHED_ALLOC_OK,\n+\tSCHED_ALLOC_QNODE_POOL,\n+\tSCHED_ALLOC_READY_QUEUE,\n+\tSCHED_ALLOC_PREADY_QUEUE,\n+\tSCHED_ALLOC_LTHREAD_CACHE,\n+\tSCHED_ALLOC_STACK_CACHE,\n+\tSCHED_ALLOC_PERLT_CACHE,\n+\tSCHED_ALLOC_TLS_CACHE,\n+\tSCHED_ALLOC_COND_CACHE,\n+\tSCHED_ALLOC_MUTEX_CACHE,\n+};\n+\n+static int\n+_lthread_sched_alloc_resources(struct lthread_sched *new_sched)\n+{\n+\tint alloc_status;\n+\n+\tdo {\n+\t\t/* Initialize per scheduler queue node pool */\n+\t\talloc_status = SCHED_ALLOC_QNODE_POOL;\n+\t\tnew_sched->qnode_pool =\n+\t\t\t_qnode_pool_create(\"qnode pool\", LTHREAD_PREALLOC);\n+\t\tif (new_sched->qnode_pool == NULL)\n+\t\t\tbreak;\n+\n+\t\t/* Initialize per scheduler local ready queue */\n+\t\talloc_status = SCHED_ALLOC_READY_QUEUE;\n+\t\tnew_sched->ready = _lthread_queue_create(\"ready queue\");\n+\t\tif (new_sched->ready == NULL)\n+\t\t\tbreak;\n+\n+\t\t/* Initialize per scheduler local peer ready queue */\n+\t\talloc_status = SCHED_ALLOC_PREADY_QUEUE;\n+\t\tnew_sched->pready = _lthread_queue_create(\"pready queue\");\n+\t\tif (new_sched->pready == NULL)\n+\t\t\tbreak;\n+\n+\t\t/* Initialize per scheduler local free lthread cache */\n+\t\talloc_status = SCHED_ALLOC_LTHREAD_CACHE;\n+\t\tnew_sched->lthread_cache =\n+\t\t\t_lthread_objcache_create(\"lthread cache\",\n+\t\t\t\t\t\tsizeof(struct lthread),\n+\t\t\t\t\t\tLTHREAD_PREALLOC);\n+\t\tif (new_sched->lthread_cache == NULL)\n+\t\t\tbreak;\n+\n+\t\t/* Initialize per scheduler local free stack cache */\n+\t\talloc_status = SCHED_ALLOC_STACK_CACHE;\n+\t\tnew_sched->stack_cache =\n+\t\t\t_lthread_objcache_create(\"stack_cache\",\n+\t\t\t\t\t\tsizeof(struct lthread_stack),\n+\t\t\t\t\t\tLTHREAD_PREALLOC);\n+\t\tif (new_sched->stack_cache == NULL)\n+\t\t\tbreak;\n+\n+\t\t/* Initialize per scheduler local free per lthread data cache */\n+\t\talloc_status = SCHED_ALLOC_PERLT_CACHE;\n+\t\tnew_sched->per_lthread_cache =\n+\t\t\t_lthread_objcache_create(\"per_lt cache\",\n+\t\t\t\t\t\tRTE_PER_LTHREAD_SECTION_SIZE,\n+\t\t\t\t\t\tLTHREAD_PREALLOC);\n+\t\tif (new_sched->per_lthread_cache == NULL)\n+\t\t\tbreak;\n+\n+\t\t/* Initialize per scheduler local free tls cache */\n+\t\talloc_status = SCHED_ALLOC_TLS_CACHE;\n+\t\tnew_sched->tls_cache =\n+\t\t\t_lthread_objcache_create(\"TLS cache\",\n+\t\t\t\t\t\tsizeof(struct lthread_tls),\n+\t\t\t\t\t\tLTHREAD_PREALLOC);\n+\t\tif (new_sched->tls_cache == NULL)\n+\t\t\tbreak;\n+\n+\t\t/* Initialize per scheduler local free cond var cache */\n+\t\talloc_status = SCHED_ALLOC_COND_CACHE;\n+\t\tnew_sched->cond_cache =\n+\t\t\t_lthread_objcache_create(\"cond cache\",\n+\t\t\t\t\t\tsizeof(struct lthread_cond),\n+\t\t\t\t\t\tLTHREAD_PREALLOC);\n+\t\tif (new_sched->cond_cache == NULL)\n+\t\t\tbreak;\n+\n+\t\t/* Initialize per scheduler local free mutex cache */\n+\t\talloc_status = SCHED_ALLOC_MUTEX_CACHE;\n+\t\tnew_sched->mutex_cache =\n+\t\t\t_lthread_objcache_create(\"mutex cache\",\n+\t\t\t\t\t\tsizeof(struct lthread_mutex),\n+\t\t\t\t\t\tLTHREAD_PREALLOC);\n+\t\tif (new_sched->mutex_cache == NULL)\n+\t\t\tbreak;\n+\n+\t\talloc_status = SCHED_ALLOC_OK;\n+\t} while (0);\n+\n+\t/* roll back on any failure */\n+\tswitch (alloc_status) {\n+\tcase SCHED_ALLOC_QNODE_POOL:\n+\t\t_qnode_pool_destroy(new_sched->qnode_pool);\n+\t\t/* fall through */\n+\tcase SCHED_ALLOC_READY_QUEUE:\n+\t\t_lthread_queue_destroy(new_sched->ready);\n+\t\t/* fall through */\n+\tcase SCHED_ALLOC_PREADY_QUEUE:\n+\t\t_lthread_queue_destroy(new_sched->pready);\n+\t\t/* fall through */\n+\tcase SCHED_ALLOC_LTHREAD_CACHE:\n+\t\t_lthread_objcache_destroy(new_sched->lthread_cache);\n+\t\t/* fall through */\n+\tcase SCHED_ALLOC_STACK_CACHE:\n+\t\t_lthread_objcache_destroy(new_sched->stack_cache);\n+\t\t/* fall through */\n+\tcase SCHED_ALLOC_PERLT_CACHE:\n+\t\t_lthread_objcache_destroy(new_sched->per_lthread_cache);\n+\t\t/* fall through */\n+\tcase SCHED_ALLOC_TLS_CACHE:\n+\t\t_lthread_objcache_destroy(new_sched->tls_cache);\n+\t\t/* fall through */\n+\tcase SCHED_ALLOC_COND_CACHE:\n+\t\t_lthread_objcache_destroy(new_sched->cond_cache);\n+\t\t/* fall through */\n+\tcase SCHED_ALLOC_MUTEX_CACHE:\n+\t\t_lthread_objcache_destroy(new_sched->mutex_cache);\n+\t\t/* fall through */\n+\tcase SCHED_ALLOC_OK:\n+\t\tbreak;\n+\t}\n+\treturn alloc_status;\n+}\n+\n+\n+/*\n+ * Create a scheduler on the current lcore\n+ */\n+struct lthread_sched *_lthread_sched_create(size_t stack_size)\n+{\n+\tint status;\n+\tstruct lthread_sched *new_sched;\n+\tunsigned lcoreid = rte_lcore_id();\n+\n+\tLTHREAD_ASSERT(stack_size <= LTHREAD_MAX_STACK_SIZE);\n+\n+\tif (stack_size == 0)\n+\t\tstack_size = LTHREAD_MAX_STACK_SIZE;\n+\n+\tnew_sched =\n+\t     rte_calloc_socket(NULL, 1, sizeof(struct lthread_sched),\n+\t\t\t\tRTE_CACHE_LINE_SIZE,\n+\t\t\t\trte_socket_id());\n+\tif (new_sched == NULL) {\n+\t\tRTE_LOG(CRIT, LTHREAD,\n+\t\t\t\"Failed to allocate memory for scheduler\\n\");\n+\t\treturn NULL;\n+\t}\n+\n+\t_lthread_key_pool_init();\n+\n+\tnew_sched->stack_size = stack_size;\n+\tnew_sched->birth = rte_rdtsc();\n+\tTHIS_SCHED = new_sched;\n+\n+\tstatus = _lthread_sched_alloc_resources(new_sched);\n+\tif (status != SCHED_ALLOC_OK) {\n+\t\tRTE_LOG(CRIT, LTHREAD,\n+\t\t\t\"Failed to allocate resources for scheduler code = %d\\n\",\n+\t\t\tstatus);\n+\t\trte_free(new_sched);\n+\t\treturn NULL;\n+\t}\n+\n+\tbzero(&new_sched->ctx, sizeof(struct ctx));\n+\n+\tnew_sched->lcore_id = lcoreid;\n+\n+\tschedcore[lcoreid] = new_sched;\n+\n+\tnew_sched->run_flag = 1;\n+\n+\tDIAG_EVENT(new_sched, LT_DIAG_SCHED_CREATE, rte_lcore_id(), 0);\n+\n+\trte_wmb();\n+\treturn new_sched;\n+}\n+\n+/*\n+ * Set the number of schedulers in the system\n+ */\n+int lthread_num_schedulers_set(int num)\n+{\n+\trte_atomic16_set(&num_schedulers, num);\n+\treturn (int)rte_atomic16_read(&num_schedulers);\n+}\n+\n+/*\n+ * Return the number of schedulers active\n+ */\n+int lthread_active_schedulers(void)\n+{\n+\treturn (int)rte_atomic16_read(&active_schedulers);\n+}\n+\n+/**\n+ * This task-let shut clears the scheduler run flags and\n+ * then exits.\n+ */\n+void _sched_shutdown(__rte_unused void *arg)\n+{\n+\tlthread_detach();\n+\t/* clear the run flag */\n+\t(THIS_SCHED)->run_flag = 0;\n+}\n+\n+/**\n+ * shutdown the scheduler running on the specified lcore\n+ */\n+void lthread_scheduler_shutdown(unsigned lcoreid)\n+{\n+\tuint64_t coreid = (uint64_t) lcoreid;\n+\n+\tif (coreid < LTHREAD_MAX_LCORES) {\n+\t\tif (schedcore[coreid] != NULL) {\n+\t\t\tstruct lthread *lt;\n+\n+\t\t\tDIAG_EVENT(schedcore[coreid], LT_DIAG_SCHED_SHUTDOWN,\n+\t\t\t\t   lcoreid, 0);\n+\t\t\tlthread_create(&lt, lcoreid, _sched_shutdown, NULL);\n+\t\t}\n+\t}\n+}\n+\n+/**\n+ * shutdown all schedulers\n+ */\n+void lthread_scheduler_shutdown_all(void)\n+{\n+\tuint64_t i;\n+\tstruct lthread *lt;\n+\n+\t/*\n+\t * give time for all schedulers to have started\n+\t * Note we use sched_yield() rather than pthread_yield() to allow\n+\t * for the possibility of a pthread wrapper on lthread_yield(),\n+\t * something that is not possible unless the scheduler is running.\n+\t */\n+\twhile (rte_atomic16_read(&active_schedulers) <\n+\t       rte_atomic16_read(&num_schedulers))\n+\t\tsched_yield();\n+\n+\tfor (i = 0; i < LTHREAD_MAX_LCORES; i++) {\n+\t\t/* skip current lcore because the current scheduler is needed\n+\t\t * to schedule the threads to shutdown the other schedulers.\n+\t\t */\n+\t\tif (i == rte_lcore_id())\n+\t\t\tcontinue;\n+\t\tif (schedcore[i] != NULL)\n+\t\t\tlthread_create(&lt, i, _sched_shutdown, NULL);\n+\t}\n+\t/* now we can do the current one */\n+\t(THIS_SCHED)->run_flag = 0;\n+}\n+\n+/*\n+ * Resume a suspended lthread\n+ */\n+static inline void\n+_lthread_resume(struct lthread *lt) __attribute__ ((always_inline));\n+static inline void _lthread_resume(struct lthread *lt)\n+{\n+\tstruct lthread_sched *sched = THIS_SCHED;\n+\tstruct lthread_stack *s;\n+\tuint64_t state = lt->state;\n+#if LTHREAD_DIAG\n+\tint init = 0;\n+#endif\n+\n+\tif (state & (BIT(ST_LT_CANCELLED) | BIT(ST_LT_EXITED))) {\n+\t\t/* if detached we can free the thread now */\n+\t\tif (state & BIT(ST_LT_DETACH)) {\n+\t\t\t_lthread_free(lt);\n+\t\t\treturn;\n+\t\t}\n+\t}\n+\n+\tif (state & BIT(ST_LT_INIT)) {\n+\t\t/* first time this thread has been run */\n+\t\t/* assign thread to this scheduler */\n+\t\tlt->sched = THIS_SCHED;\n+\n+\t\t/* allocate stack */\n+\t\ts = _stack_alloc();\n+\n+\t\tlt->stack_container = s;\n+\t\t_lthread_set_stack(lt, s->stack, s->stack_size);\n+\n+\t\t/* allocate memory for TLS used by this thread */\n+\t\t_lthread_tls_alloc(lt);\n+\n+\t\tlt->state = BIT(ST_LT_READY);\n+#if LTHREAD_DIAG\n+\t\tinit = 1;\n+#endif\n+\t}\n+\n+\tDIAG_EVENT(lt, LT_DIAG_LTHREAD_RESUMED, init, lt);\n+\n+\t/* switch to the new thread */\n+\tsched->current_lthread = lt;\n+\tctx_switch(&lt->ctx, &sched->ctx);\n+\n+\t/* If posting to a queue that could be read by another lcore\n+\t * we defer the queue write till now to ensure the context has been\n+\t * saved before the other core tries to resume it\n+\t * This applies to blocking on mutex, cond, and to set_affinity\n+\t */\n+\tif (lt->pending_wr_queue != NULL) {\n+\t\tstruct lthread_queue *dest = lt->pending_wr_queue;\n+\n+\t\tlt->pending_wr_queue = NULL;\n+\n+\t\t/* queue the current thread to the specified queue */\n+\t\t_lthread_queue_insert_mp(dest, lt);\n+\t}\n+\n+\tsched->current_lthread = NULL;\n+}\n+\n+/*\n+ * Handle sleep timer expiry\n+*/\n+void\n+_sched_timer_cb(struct rte_timer *tim, void *arg)\n+{\n+\tstruct lthread *lt = (struct lthread *) arg;\n+\tuint64_t state = lt->state;\n+\n+\tDIAG_EVENT(lt, LT_DIAG_LTHREAD_TMR_EXPIRED, &lt->tim, 0);\n+\n+\trte_timer_stop(tim);\n+\n+\tlt->state = state | BIT(ST_LT_EXPIRED);\n+\t_lthread_resume(lt);\n+\tlt->state = state & CLEARBIT(ST_LT_EXPIRED);\n+}\n+\n+\n+\n+/*\n+ * Returns 0 if there is a pending job in scheduler or 1 if done and can exit.\n+ */\n+static inline int _lthread_sched_isdone(struct lthread_sched *sched)\n+{\n+\treturn ((sched->run_flag == 0) &&\n+\t\t\t(_lthread_queue_empty(sched->ready)) &&\n+\t\t\t(_lthread_queue_empty(sched->pready)) &&\n+\t\t\t(sched->nb_blocked_threads == 0));\n+}\n+\n+/*\n+ * Wait for all schedulers to start\n+ */\n+static inline void _lthread_schedulers_sync_start(void)\n+{\n+\trte_atomic16_inc(&active_schedulers);\n+\n+\t/* wait for lthread schedulers\n+\t * Note we use sched_yield() rather than pthread_yield() to allow\n+\t * for the possibility of a pthread wrapper on lthread_yield(),\n+\t * something that is not possible unless the scheduler is running.\n+\t */\n+\twhile (rte_atomic16_read(&active_schedulers) <\n+\t       rte_atomic16_read(&num_schedulers))\n+\t\tsched_yield();\n+\n+}\n+\n+/*\n+ * Wait for all schedulers to stop\n+ */\n+static inline void _lthread_schedulers_sync_stop(void)\n+{\n+\trte_atomic16_dec(&active_schedulers);\n+\trte_atomic16_dec(&num_schedulers);\n+\n+\t/* wait for schedulers\n+\t * Note we use sched_yield() rather than pthread_yield() to allow\n+\t * for the possibility of a pthread wrapper on lthread_yield(),\n+\t * something that is not possible unless the scheduler is running.\n+\t */\n+\twhile (rte_atomic16_read(&active_schedulers) > 0)\n+\t\tsched_yield();\n+\n+}\n+\n+\n+/*\n+ * Run the lthread scheduler\n+ * This loop is the heart of the system\n+ */\n+void lthread_run(void)\n+{\n+\n+\tstruct lthread_sched *sched = THIS_SCHED;\n+\tstruct lthread *lt = NULL;\n+\n+\tRTE_LOG(INFO, LTHREAD,\n+\t\t\"starting scheduler %p on lcore %u phys core %u\\n\",\n+\t\tsched, rte_lcore_id(),\n+\t\trte_lcore_index(rte_lcore_id()));\n+\n+\t/* if more than one, wait for all schedulers to start */\n+\t_lthread_schedulers_sync_start();\n+\n+\n+\t/*\n+\t * This is the main scheduling loop\n+\t * So long as there are tasks in existence we run this loop.\n+\t * We check for:-\n+\t *   expired timers,\n+\t *   the local ready queue,\n+\t *   and the peer ready queue,\n+\t *\n+\t * and resume lthreads ad infinitum.\n+\t */\n+\twhile (!_lthread_sched_isdone(sched)) {\n+\n+\t\trte_timer_manage();\n+\n+\t\tlt = _lthread_queue_poll(sched->ready);\n+\t\tif (lt != NULL)\n+\t\t\t_lthread_resume(lt);\n+\t\tlt = _lthread_queue_poll(sched->pready);\n+\t\tif (lt != NULL)\n+\t\t\t_lthread_resume(lt);\n+\t}\n+\n+\t/* if more than one wait for all schedulers to stop */\n+\t_lthread_schedulers_sync_stop();\n+\n+\tRTE_LOG(INFO, LTHREAD,\n+\t\t\"stopping scheduler %p on lcore %u phys core %u\\n\",\n+\t\tsched, rte_lcore_id(),\n+\t\trte_lcore_index(rte_lcore_id()));\n+\tfflush(stdout);\n+}\n+\n+/*\n+ * Return the scheduler for this lcore\n+ *\n+ */\n+struct lthread_sched *_lthread_sched_get(int lcore_id)\n+{\n+\tif (lcore_id > LTHREAD_MAX_LCORES)\n+\t\treturn NULL;\n+\treturn schedcore[lcore_id];\n+}\n+\n+/*\n+ * migrate the current thread to another scheduler running\n+ * on the specified lcore.\n+ */\n+int lthread_set_affinity(unsigned lcoreid)\n+{\n+\tstruct lthread *lt = THIS_LTHREAD;\n+\tstruct lthread_sched *dest_sched;\n+\n+\tif (unlikely(lcoreid > LTHREAD_MAX_LCORES))\n+\t\treturn POSIX_ERRNO(EINVAL);\n+\n+\n+\tDIAG_EVENT(lt, LT_DIAG_LTHREAD_AFFINITY, lcoreid, 0);\n+\n+\tdest_sched = schedcore[lcoreid];\n+\n+\tif (unlikely(dest_sched == NULL))\n+\t\treturn POSIX_ERRNO(EINVAL);\n+\n+\tif (likely(dest_sched != THIS_SCHED)) {\n+\t\tlt->pending_wr_queue = dest_sched->pready;\n+\t\t_affinitize();\n+\t\treturn 0;\n+\t}\n+\treturn 0;\n+}\ndiff --git a/examples/performance-thread/common/lthread_sched.h b/examples/performance-thread/common/lthread_sched.h\nnew file mode 100644\nindex 0000000..f23264c\n--- /dev/null\n+++ b/examples/performance-thread/common/lthread_sched.h\n@@ -0,0 +1,152 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+/*\n+ * Some portions of this software is derived from the\n+ * https://github.com/halayli/lthread which carrys the following license.\n+ *\n+ * Copyright (C) 2012, Hasan Alayli <halayli@gmail.com>\n+ *\n+ * Redistribution and use in source and binary forms, with or without\n+ * modification, are permitted provided that the following conditions\n+ * are met:\n+ * 1. Redistributions of source code must retain the above copyright\n+ *    notice, this list of conditions and the following disclaimer.\n+ * 2. Redistributions in binary form must reproduce the above copyright\n+ *    notice, this list of conditions and the following disclaimer in the\n+ *    documentation and/or other materials provided with the distribution.\n+ *\n+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND\n+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE\n+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n+ * SUCH DAMAGE.\n+ */\n+\n+#ifndef LTHREAD_SCHED_H_\n+#define LTHREAD_SCHED_H_\n+\n+#include \"lthread_int.h\"\n+#include \"lthread_queue.h\"\n+#include \"lthread_objcache.h\"\n+#include \"lthread_diag.h\"\n+#include \"ctx.h\"\n+\n+/*\n+ * insert an lthread into a queue\n+ */\n+static inline void\n+_ready_queue_insert(struct lthread_sched *sched, struct lthread *lt)\n+{\n+\tif (sched == THIS_SCHED)\n+\t\t_lthread_queue_insert_sp((THIS_SCHED)->ready, lt);\n+\telse\n+\t\t_lthread_queue_insert_mp(sched->pready, lt);\n+}\n+\n+/*\n+ * remove an lthread from a queue\n+ */\n+static inline struct lthread *_ready_queue_remove(struct lthread_queue *q)\n+{\n+\treturn _lthread_queue_remove(q);\n+}\n+\n+/**\n+ * Return true if the ready queue is empty\n+ */\n+static inline int _ready_queue_empty(struct lthread_queue *q)\n+{\n+\treturn _lthread_queue_empty(q);\n+}\n+\n+static inline uint64_t _sched_now(void)\n+{\n+\tuint64_t now = rte_rdtsc();\n+\n+\tif (now > (THIS_SCHED)->birth)\n+\t\treturn now - (THIS_SCHED)->birth;\n+\tif (now < (THIS_SCHED)->birth)\n+\t\treturn (THIS_SCHED)->birth - now;\n+\t/* never return 0 because this means sleep forever */\n+\treturn 1;\n+}\n+\n+static inline void\n+_affinitize(void) __attribute__ ((always_inline));\n+static inline void\n+_affinitize(void)\n+{\n+\tstruct lthread *lt = THIS_LTHREAD;\n+\n+\tDIAG_EVENT(lt, LT_DIAG_LTHREAD_SUSPENDED, 0, 0);\n+\tctx_switch(&(THIS_SCHED)->ctx, &lt->ctx);\n+}\n+\n+static inline void\n+_suspend(void) __attribute__ ((always_inline));\n+static inline void\n+_suspend(void)\n+{\n+\tstruct lthread *lt = THIS_LTHREAD;\n+\n+\t(THIS_SCHED)->nb_blocked_threads++;\n+\tDIAG_EVENT(lt, LT_DIAG_LTHREAD_SUSPENDED, 0, 0);\n+\tctx_switch(&(THIS_SCHED)->ctx, &lt->ctx);\n+\t(THIS_SCHED)->nb_blocked_threads--;\n+}\n+\n+static inline void\n+_reschedule(void) __attribute__ ((always_inline));\n+static inline void\n+_reschedule(void)\n+{\n+\tstruct lthread *lt = THIS_LTHREAD;\n+\n+\tDIAG_EVENT(lt, LT_DIAG_LTHREAD_RESCHEDULED, 0, 0);\n+\t_ready_queue_insert(THIS_SCHED, lt);\n+\tctx_switch(&(THIS_SCHED)->ctx, &lt->ctx);\n+}\n+\n+extern struct lthread_sched *schedcore[];\n+void _sched_timer_cb(struct rte_timer *tim, void *arg);\n+void _sched_shutdown(__rte_unused void *arg);\n+\n+\n+#endif\t\t\t\t/* LTHREAD_SCHED_H_ */\ndiff --git a/examples/performance-thread/common/lthread_timer.h b/examples/performance-thread/common/lthread_timer.h\nnew file mode 100644\nindex 0000000..7616694\n--- /dev/null\n+++ b/examples/performance-thread/common/lthread_timer.h\n@@ -0,0 +1,47 @@\n+/* <COPYRIGHT_TAG>\n+  */\n+#ifndef LTHREAD_TIMER_H_\n+#define LTHREAD_TIMER_H_\n+\n+#include \"lthread_int.h\"\n+#include \"lthread_sched.h\"\n+\n+\n+static inline uint64_t\n+_ns_to_clks(uint64_t ns)\n+{\n+\tunsigned __int128 clkns = rte_get_tsc_hz();\n+\n+\tclkns *= ns;\n+\tclkns /= 1000000000;\n+\treturn (uint64_t) clkns;\n+}\n+\n+\n+static inline void\n+_timer_start(struct lthread *lt, uint64_t clks)\n+{\n+\tif (clks > 0) {\n+\t\tDIAG_EVENT(lt, LT_DIAG_LTHREAD_TMR_START, &lt->tim, clks);\n+\t\trte_timer_init(&lt->tim);\n+\t\trte_timer_reset(&lt->tim,\n+\t\t\t\tclks,\n+\t\t\t\tSINGLE,\n+\t\t\t\trte_lcore_id(),\n+\t\t\t\t_sched_timer_cb,\n+\t\t\t\t(void *)lt);\n+\t}\n+}\n+\n+\n+static inline void\n+_timer_stop(struct lthread *lt)\n+{\n+\tif (lt != NULL) {\n+\t\tDIAG_EVENT(lt, LT_DIAG_LTHREAD_TMR_DELETE, &lt->tim, 0);\n+\t\trte_timer_stop(&lt->tim);\n+\t}\n+}\n+\n+\n+#endif /* LTHREAD_TIMER_H_ */\ndiff --git a/examples/performance-thread/common/lthread_tls.c b/examples/performance-thread/common/lthread_tls.c\nnew file mode 100644\nindex 0000000..1aae708\n--- /dev/null\n+++ b/examples/performance-thread/common/lthread_tls.c\n@@ -0,0 +1,242 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#include <stdio.h>\n+#include <stdlib.h>\n+#include <string.h>\n+#include <stdint.h>\n+#include <limits.h>\n+#include <inttypes.h>\n+#include <unistd.h>\n+#include <pthread.h>\n+#include <fcntl.h>\n+#include <sys/time.h>\n+#include <sys/mman.h>\n+#include <execinfo.h>\n+#include <sched.h>\n+\n+#include <rte_config.h>\n+#include <rte_malloc.h>\n+#include <rte_log.h>\n+#include <rte_ring.h>\n+#include <rte_atomic_64.h>\n+\n+#include \"lthread_tls.h\"\n+#include \"lthread_queue.h\"\n+#include \"lthread_objcache.h\"\n+#include \"lthread_sched.h\"\n+\n+static struct rte_ring *key_pool;\n+static uint64_t key_pool_init;\n+\n+/* needed to cause section start and end to be defined */\n+RTE_DEFINE_PER_LTHREAD(void *, dummy);\n+\n+static struct lthread_key key_table[LTHREAD_MAX_KEYS];\n+\n+void lthread_tls_ctor(void) __attribute__((constructor));\n+void\n+lthread_tls_ctor(void) {\n+\tkey_pool = NULL;\n+\tkey_pool_init = 0;\n+}\n+\n+/*\n+ * Initialize a pool of keys\n+ * These are unique tokens that can be obtained by threads\n+ * calling lthread_key_create()\n+ */\n+void _lthread_key_pool_init(void)\n+{\n+\tstatic struct rte_ring *pool;\n+\tstruct lthread_key *new_key;\n+\n+\tbzero(key_table, sizeof(key_table));\n+\n+\t/* only one lcore should do this */\n+\tif (rte_atomic64_cmpset(&key_pool_init, 0, 1)) {\n+\t\tpool = rte_ring_create(\"lthread_key_pool\",\n+\t\t\t\t\tLTHREAD_MAX_KEYS, 0, 0);\n+\t\tLTHREAD_ASSERT(pool);\n+\n+\t\tint i;\n+\n+\t\tfor (i = 0; i < LTHREAD_MAX_KEYS; i++) {\n+\t\t\tnew_key = &key_table[i];\n+\n+\t\t\trte_atomic32_init(&new_key->ref_cnt);\n+\t\t\tnew_key->value = i;\n+\t\t\trte_ring_mp_enqueue((struct rte_ring *)pool,\n+\t\t\t\t\t\t(void *)new_key);\n+\t\t}\n+\t\tkey_pool = pool;\n+\t}\n+\t/* other lcores wait here till done */\n+\twhile (key_pool == NULL) {\n+\t\trte_compiler_barrier();\n+\t\tsched_yield();\n+\t};\n+}\n+\n+/*\n+ * Create a key\n+ * this means getting a key from the the pool\n+ */\n+int lthread_key_create(unsigned int *key, tls_destructor_func destructor)\n+{\n+\tif (key == NULL)\n+\t\treturn POSIX_ERRNO(EINVAL);\n+\n+\tstruct lthread_key *new_key;\n+\n+\tif (rte_ring_mc_dequeue((struct rte_ring *)key_pool, (void **)&new_key)\n+\t    == 0) {\n+\t\tnew_key->destructor = destructor;\n+\t\t*key = new_key->value;\n+\t\treturn 0;\n+\t}\n+\treturn POSIX_ERRNO(EAGAIN);\n+}\n+\n+/*\n+ * Delete a key\n+ * This means returning a key to the pool\n+ * It must be safe for threads that have migrated to other core to delete a key\n+ */\n+int lthread_key_delete(unsigned int k)\n+{\n+\tstruct lthread_key *key;\n+\n+\tif (k > LTHREAD_MAX_KEYS)\n+\t\treturn POSIX_ERRNO(EINVAL);\n+\n+\tkey = &key_table[k];\n+\n+\tif (rte_atomic32_add_return(&key->ref_cnt, -1) == 0) {\n+\t\tkey->destructor = NULL;\n+\t\trte_ring_mp_enqueue((struct rte_ring *)key_pool,\n+\t\t\t\t\t(void *)key);\n+\t}\n+\treturn 0;\n+}\n+\n+/*\n+ * Delete a key by its ref\n+ * This means returning a key to the pool\n+ * It must be safe for threads on other cores to delete a key\n+ */\n+void _lthread_tls_delete_ref(struct tls *tls)\n+{\n+\tint val;\n+\tstruct lthread_key *key = (struct lthread_key *)tls->key;\n+\n+\ttls->key = NULL;\n+\ttls->data = NULL;\n+\n+\tif (key == NULL)\n+\t\treturn;\n+\n+\tval = key->value;\n+\n+\tif (val < LTHREAD_MAX_KEYS) {\n+\t\tif (rte_atomic32_add_return(&key->ref_cnt, -1) == 0) {\n+\t\t\tkey->destructor = NULL;\n+\t\t\trte_ring_mp_enqueue((struct rte_ring *)key_pool,\n+\t\t\t\t\t    (void *)key);\n+\t\t}\n+\t}\n+}\n+\n+/*\n+ * Return the pointer associated with a key\n+ * If the key is no longer valid return NULL\n+ */\n+void\n+*lthread_getspecific(unsigned int k)\n+{\n+\n+\tif (k > LTHREAD_MAX_KEYS)\n+\t\treturn NULL;\n+\n+\tstruct lthread_key *key = &key_table[k];\n+\n+\tif (rte_atomic32_read(&key->ref_cnt) > 0)\n+\t\treturn THIS_LTHREAD->tls->keys[key->value].data;\n+\n+\t/* this key is no longer in use */\n+\treturn NULL;\n+}\n+\n+/*\n+ * Set a value against a key\n+ * If the key is no longer valid return an error\n+ * when storing value\n+ */\n+int lthread_setspecific(unsigned int k, const void *value)\n+{\n+\tstruct lthread_key *key;\n+\n+\tif (k > LTHREAD_MAX_KEYS)\n+\t\treturn POSIX_ERRNO(EINVAL);\n+\n+\tkey = &key_table[k];\n+\n+\trte_atomic32_inc(&key->ref_cnt);\n+\n+\t/* discard const qualifier */\n+\tchar *p = (char *) (uintptr_t) value;\n+\n+\tTHIS_LTHREAD->tls->keys[key->value].data = (void *) p;\n+\treturn 0;\n+}\n+\n+/*\n+ * Allocate data for TLS cache\n+*/\n+void _lthread_tls_alloc(struct lthread *lt)\n+{\n+\tstruct lthread_tls *tls;\n+\n+\ttls = _lthread_objcache_alloc((THIS_SCHED)->tls_cache);\n+\n+\tLTHREAD_ASSERT(tls != NULL);\n+\n+\ttls->root_sched = (THIS_SCHED);\n+\tlt->tls = tls;\n+\n+\t/* allocate data for TLS varaiables using RTE_PER_LTHREAD macros */\n+\tif (sizeof(void *) < (uint64_t)RTE_PER_LTHREAD_SECTION_SIZE) {\n+\t\tlt->per_lthread_data =\n+\t\t    _lthread_objcache_alloc((THIS_SCHED)->per_lthread_cache);\n+\t}\n+}\ndiff --git a/examples/performance-thread/common/lthread_tls.h b/examples/performance-thread/common/lthread_tls.h\nnew file mode 100644\nindex 0000000..a690078\n--- /dev/null\n+++ b/examples/performance-thread/common/lthread_tls.h\n@@ -0,0 +1,64 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n+ *   All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+#ifndef LTHREAD_TLS_H_\n+#define LTHREAD_TLS_H_\n+\n+#include \"lthread_api.h\"\n+\n+\n+#define RTE_PER_LTHREAD_SECTION_SIZE \\\n+(&__stop_per_lt - &__start_per_lt)\n+\n+struct lthread_key {\n+\tunsigned int value;\n+\trte_atomic32_t ref_cnt;\n+\ttls_destructor_func destructor;\n+};\n+\n+struct tls {\n+\tstruct lthread_key *key;\n+\tvoid *data;\n+};\n+\n+struct lthread_tls {\n+\tstruct tls keys[LTHREAD_MAX_KEYS];\n+\tstruct lthread_sched *root_sched;\n+};\n+\n+void _lthread_tls_delete_ref(struct tls *tls);\n+void _lthread_key_pool_init(void);\n+void _lthread_tls_alloc(struct lthread *lt);\n+\n+\n+#endif\t\t\t\t/* LTHREAD_TLS_H_ */\n",
    "prefixes": [
        "dpdk-dev",
        "v1",
        "2/5"
    ]
}