get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/54422/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 54422,
    "url": "http://patches.dpdk.org/api/patches/54422/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1559750328-22377-4-git-send-email-phil.yang@arm.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1559750328-22377-4-git-send-email-phil.yang@arm.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1559750328-22377-4-git-send-email-phil.yang@arm.com",
    "date": "2019-06-05T15:58:48",
    "name": "[v1,3/3] test/mcslock: add mcs queued lock unit test",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "cc14721c33c0af4b6f778fb73c428f3351128ad0",
    "submitter": {
        "id": 833,
        "url": "http://patches.dpdk.org/api/people/833/?format=api",
        "name": "Phil Yang",
        "email": "phil.yang@arm.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1559750328-22377-4-git-send-email-phil.yang@arm.com/mbox/",
    "series": [
        {
            "id": 4910,
            "url": "http://patches.dpdk.org/api/series/4910/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=4910",
            "date": "2019-06-05T15:58:45",
            "name": "MCS queued lock implementation",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/4910/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/54422/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/54422/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 4771B1BBE3;\n\tWed,  5 Jun 2019 17:59:48 +0200 (CEST)",
            "from foss.arm.com (foss.arm.com [217.140.101.70])\n\tby dpdk.org (Postfix) with ESMTP id 980131BBDB\n\tfor <dev@dpdk.org>; Wed,  5 Jun 2019 17:59:45 +0200 (CEST)",
            "from usa-sjc-imap-foss1.foss.arm.com (unknown [10.72.51.249])\n\tby usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 006AF374;\n\tWed,  5 Jun 2019 08:59:45 -0700 (PDT)",
            "from phil-VirtualBox.shanghai.arm.com (unknown [10.171.20.59])\n\tby usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id\n\t7D5543F246; Wed,  5 Jun 2019 08:59:43 -0700 (PDT)"
        ],
        "From": "Phil Yang <phil.yang@arm.com>",
        "To": "dev@dpdk.org",
        "Cc": "thomas@monjalon.net, jerinj@marvell.com, hemant.agrawal@nxp.com,\n\tHonnappa.Nagarahalli@arm.com, gavin.hu@arm.com, phil.yang@arm.com,\n\tnd@arm.com",
        "Date": "Wed,  5 Jun 2019 23:58:48 +0800",
        "Message-Id": "<1559750328-22377-4-git-send-email-phil.yang@arm.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1559750328-22377-1-git-send-email-phil.yang@arm.com>",
        "References": "<1559750328-22377-1-git-send-email-phil.yang@arm.com>",
        "Subject": "[dpdk-dev] [PATCH v1 3/3] test/mcslock: add mcs queued lock unit\n\ttest",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Unit test and perf test for MCS queued lock.\n\nSigned-off-by: Phil Yang <phil.yang@arm.com>\nReviewed-by: Gavin Hu <gavin.hu@arm.com>\nReviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>\n\n---\n MAINTAINERS                     |   1 +\n app/test/Makefile               |   1 +\n app/test/autotest_data.py       |   6 +\n app/test/autotest_test_funcs.py |  32 ++++++\n app/test/meson.build            |   2 +\n app/test/test_mcslock.c         | 248 ++++++++++++++++++++++++++++++++++++++++\n 6 files changed, 290 insertions(+)\n create mode 100644 app/test/test_mcslock.c",
    "diff": "diff --git a/MAINTAINERS b/MAINTAINERS\nindex 1390238..33fdc8f 100644\n--- a/MAINTAINERS\n+++ b/MAINTAINERS\n@@ -225,6 +225,7 @@ F: app/test/test_ticketlock.c\n MCSlock - EXPERIMENTAL\n M: Phil Yang <phil.yang@arm.com>\n F: lib/librte_eal/common/include/generic/rte_mcslock.h\n+F: app/test/test_mcslock.c\n \n ARM v7\n M: Jan Viktorin <viktorin@rehivetech.com>\ndiff --git a/app/test/Makefile b/app/test/Makefile\nindex 68d6b4f..be405cd 100644\n--- a/app/test/Makefile\n+++ b/app/test/Makefile\n@@ -64,6 +64,7 @@ SRCS-y += test_atomic.c\n SRCS-y += test_barrier.c\n SRCS-y += test_malloc.c\n SRCS-y += test_cycles.c\n+SRCS-y += test_mcslock.c\n SRCS-y += test_spinlock.c\n SRCS-y += test_ticketlock.c\n SRCS-y += test_memory.c\ndiff --git a/app/test/autotest_data.py b/app/test/autotest_data.py\nindex 0f2c9a7..68ca23d 100644\n--- a/app/test/autotest_data.py\n+++ b/app/test/autotest_data.py\n@@ -177,6 +177,12 @@\n         \"Report\":  None,\n     },\n     {\n+        \"Name\":    \"MCSlock autotest\",\n+        \"Command\": \"mcslock_autotest\",\n+        \"Func\":    mcslock_autotest,\n+        \"Report\":  None,\n+    },\n+    {\n         \"Name\":    \"Byte order autotest\",\n         \"Command\": \"byteorder_autotest\",\n         \"Func\":    default_autotest,\ndiff --git a/app/test/autotest_test_funcs.py b/app/test/autotest_test_funcs.py\nindex 31cc0f5..26688b7 100644\n--- a/app/test/autotest_test_funcs.py\n+++ b/app/test/autotest_test_funcs.py\n@@ -164,6 +164,38 @@ def ticketlock_autotest(child, test_name):\n \n     return 0, \"Success\"\n \n+def mcslock_autotest(child, test_name):\n+    i = 0\n+    ir = 0\n+    child.sendline(test_name)\n+    while True:\n+        index = child.expect([\"Test OK\",\n+                              \"Test Failed\",\n+                              \"lcore ([0-9]*) state: ([0-1])\"\n+                              \"MCS lock taken on core ([0-9]*)\",\n+                              \"MCS lock released on core ([0-9]*)\",\n+                              pexpect.TIMEOUT], timeout=5)\n+        # ok\n+        if index == 0:\n+            break\n+\n+        # message, check ordering\n+        elif index == 2:\n+            if int(child.match.groups()[0]) < i:\n+                return -1, \"Fail [Bad order]\"\n+            i = int(child.match.groups()[0])\n+        elif index == 3:\n+            if int(child.match.groups()[0]) < ir:\n+                return -1, \"Fail [Bad order]\"\n+            ir = int(child.match.groups()[0])\n+\n+        # fail\n+        elif index == 4:\n+            return -1, \"Fail [Timeout]\"\n+        elif index == 1:\n+            return -1, \"Fail\"\n+\n+    return 0, \"Success\"\n \n def logs_autotest(child, test_name):\n     child.sendline(test_name)\ndiff --git a/app/test/meson.build b/app/test/meson.build\nindex 83391ce..3f5f17a 100644\n--- a/app/test/meson.build\n+++ b/app/test/meson.build\n@@ -75,6 +75,7 @@ test_sources = files('commands.c',\n \t'test_memzone.c',\n \t'test_meter.c',\n \t'test_metrics.c',\n+\t'test_mcslock.c',\n \t'test_mp_secondary.c',\n \t'test_pdump.c',\n \t'test_per_lcore.c',\n@@ -167,6 +168,7 @@ fast_parallel_test_names = [\n         'lpm6_autotest',\n         'malloc_autotest',\n         'mbuf_autotest',\n+        'mcslock_autotest',\n         'memcpy_autotest',\n         'memory_autotest',\n         'mempool_autotest',\ndiff --git a/app/test/test_mcslock.c b/app/test/test_mcslock.c\nnew file mode 100644\nindex 0000000..a2274e5\n--- /dev/null\n+++ b/app/test/test_mcslock.c\n@@ -0,0 +1,248 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2019 Arm Limited\n+ */\n+\n+#include <stdio.h>\n+#include <stdint.h>\n+#include <inttypes.h>\n+#include <string.h>\n+#include <unistd.h>\n+#include <sys/queue.h>\n+\n+#include <rte_common.h>\n+#include <rte_memory.h>\n+#include <rte_per_lcore.h>\n+#include <rte_launch.h>\n+#include <rte_eal.h>\n+#include <rte_lcore.h>\n+#include <rte_cycles.h>\n+#include <rte_mcslock.h>\n+#include <rte_atomic.h>\n+\n+#include \"test.h\"\n+\n+/*\n+ * RTE MCS lock test\n+ * =================\n+ *\n+ * These tests are derived from spin lock test cases.\n+ *\n+ * - The functional test takes all of these locks and launches the\n+ *   ''test_mcslock_per_core()'' function on each core (except the master).\n+ *\n+ *   - The function takes the global lock, display something, then releases\n+ *     the global lock on each core.\n+ *\n+ * - A load test is carried out, with all cores attempting to lock a single\n+ *   lock multiple times.\n+ */\n+#include <rte_per_lcore.h>\n+\n+RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_me);\n+RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_try_me);\n+RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_perf_me);\n+\n+rte_mcslock_t *p_ml;\n+rte_mcslock_t *p_ml_try;\n+rte_mcslock_t *p_ml_perf;\n+\n+static unsigned int count;\n+\n+static rte_atomic32_t synchro;\n+\n+static int\n+test_mcslock_per_core(__attribute__((unused)) void *arg)\n+{\n+\t/* Per core me node. */\n+\trte_mcslock_t ml_me = RTE_PER_LCORE(_ml_me);\n+\n+\trte_mcslock_lock(&p_ml, &ml_me);\n+\tprintf(\"MCS lock taken on core %u\\n\", rte_lcore_id());\n+\trte_mcslock_unlock(&p_ml, &ml_me);\n+\tprintf(\"MCS lock released on core %u\\n\", rte_lcore_id());\n+\n+\treturn 0;\n+}\n+\n+static uint64_t time_count[RTE_MAX_LCORE] = {0};\n+\n+#define MAX_LOOP 10000\n+\n+static int\n+load_loop_fn(void *func_param)\n+{\n+\tuint64_t time_diff = 0, begin;\n+\tuint64_t hz = rte_get_timer_hz();\n+\tvolatile uint64_t lcount = 0;\n+\tconst int use_lock = *(int *)func_param;\n+\tconst unsigned int lcore = rte_lcore_id();\n+\n+\t/**< Per core me node. */\n+\trte_mcslock_t ml_perf_me = RTE_PER_LCORE(_ml_perf_me);\n+\n+\t/* wait synchro */\n+\twhile (rte_atomic32_read(&synchro) == 0)\n+\t\t;\n+\n+\tbegin = rte_get_timer_cycles();\n+\twhile (lcount < MAX_LOOP) {\n+\t\tif (use_lock)\n+\t\t\trte_mcslock_lock(&p_ml_perf, &ml_perf_me);\n+\n+\t\tlcount++;\n+\t\tif (use_lock)\n+\t\t\trte_mcslock_unlock(&p_ml_perf, &ml_perf_me);\n+\t}\n+\ttime_diff = rte_get_timer_cycles() - begin;\n+\ttime_count[lcore] = time_diff * 1000000 / hz;\n+\treturn 0;\n+}\n+\n+static int\n+test_mcslock_perf(void)\n+{\n+\tunsigned int i;\n+\tuint64_t total = 0;\n+\tint lock = 0;\n+\tconst unsigned int lcore = rte_lcore_id();\n+\n+\tprintf(\"\\nTest with no lock on single core...\\n\");\n+\trte_atomic32_set(&synchro, 1);\n+\tload_loop_fn(&lock);\n+\tprintf(\"Core [%u] Cost Time = %\"PRIu64\" us\\n\",\n+\t\t\tlcore, time_count[lcore]);\n+\tmemset(time_count, 0, sizeof(time_count));\n+\n+\tprintf(\"\\nTest with lock on single core...\\n\");\n+\tlock = 1;\n+\trte_atomic32_set(&synchro, 1);\n+\tload_loop_fn(&lock);\n+\tprintf(\"Core [%u] Cost Time = %\"PRIu64\" us\\n\",\n+\t\t\tlcore, time_count[lcore]);\n+\tmemset(time_count, 0, sizeof(time_count));\n+\n+\tprintf(\"\\nTest with lock on %u cores...\\n\", (rte_lcore_count()-1));\n+\n+\trte_atomic32_set(&synchro, 0);\n+\trte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MASTER);\n+\trte_atomic32_set(&synchro, 1);\n+\n+\trte_eal_mp_wait_lcore();\n+\n+\tRTE_LCORE_FOREACH_SLAVE(i) {\n+\t\tprintf(\"Core [%u] Cost Time = %\"PRIu64\" us\\n\",\n+\t\t\t\ti, time_count[i]);\n+\t\ttotal += time_count[i];\n+\t}\n+\n+\tprintf(\"Total Cost Time = %\"PRIu64\" us\\n\", total);\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * Use rte_mcslock_trylock() to trylock a mcs lock object,\n+ * If it could not lock the object successfully, it would\n+ * return immediately.\n+ */\n+static int\n+test_mcslock_try(__attribute__((unused)) void *arg)\n+{\n+\t/**< Per core me node. */\n+\trte_mcslock_t ml_me     = RTE_PER_LCORE(_ml_me);\n+\trte_mcslock_t ml_try_me = RTE_PER_LCORE(_ml_try_me);\n+\n+\t/* Locked ml_try in the master lcore, so it should fail\n+\t * when trying to lock it in the slave lcore.\n+\t */\n+\tif (rte_mcslock_trylock(&p_ml_try, &ml_try_me) == 0) {\n+\t\trte_mcslock_lock(&p_ml, &ml_me);\n+\t\tcount++;\n+\t\trte_mcslock_unlock(&p_ml, &ml_me);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+\n+/*\n+ * Test rte_eal_get_lcore_state() in addition to mcs locks\n+ * as we have \"waiting\" then \"running\" lcores.\n+ */\n+static int\n+test_mcslock(void)\n+{\n+\tint ret = 0;\n+\tint i;\n+\n+\t/* Define per core me node. */\n+\trte_mcslock_t ml_me     = RTE_PER_LCORE(_ml_me);\n+\trte_mcslock_t ml_try_me = RTE_PER_LCORE(_ml_try_me);\n+\n+\t/*\n+\t * Test mcs lock & unlock on each core\n+\t */\n+\n+\t/* slave cores should be waiting: print it */\n+\tRTE_LCORE_FOREACH_SLAVE(i) {\n+\t\tprintf(\"lcore %d state: %d\\n\", i,\n+\t\t\t\t(int) rte_eal_get_lcore_state(i));\n+\t}\n+\n+\trte_mcslock_lock(&p_ml, &ml_me);\n+\n+\tRTE_LCORE_FOREACH_SLAVE(i) {\n+\t\trte_eal_remote_launch(test_mcslock_per_core, NULL, i);\n+\t}\n+\n+\t/* slave cores should be busy: print it */\n+\tRTE_LCORE_FOREACH_SLAVE(i) {\n+\t\tprintf(\"lcore %d state: %d\\n\", i,\n+\t\t\t\t(int) rte_eal_get_lcore_state(i));\n+\t}\n+\n+\trte_mcslock_unlock(&p_ml, &ml_me);\n+\n+\trte_eal_mp_wait_lcore();\n+\n+\t/*\n+\t * Test if it could return immediately from try-locking a locked object.\n+\t * Here it will lock the mcs lock object first, then launch all the\n+\t * slave lcores to trylock the same mcs lock object.\n+\t * All the slave lcores should give up try-locking a locked object and\n+\t * return immediately, and then increase the \"count\" initialized with\n+\t * zero by one per times.\n+\t * We can check if the \"count\" is finally equal to the number of all\n+\t * slave lcores to see if the behavior of try-locking a locked\n+\t * mcslock object is correct.\n+\t */\n+\tif (rte_mcslock_trylock(&p_ml_try, &ml_try_me) == 0)\n+\t\treturn -1;\n+\n+\tcount = 0;\n+\tRTE_LCORE_FOREACH_SLAVE(i) {\n+\t\trte_eal_remote_launch(test_mcslock_try, NULL, i);\n+\t}\n+\trte_mcslock_unlock(&p_ml_try, &ml_try_me);\n+\trte_eal_mp_wait_lcore();\n+\n+\t/* Test is_locked API */\n+\tif (rte_mcslock_is_locked(p_ml)) {\n+\t\tprintf(\"mcslock is locked but it should not be\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\t/* Counting the locked times in each core */\n+\trte_mcslock_lock(&p_ml, &ml_me);\n+\tif (count != (rte_lcore_count() - 1))\n+\t\tret = -1;\n+\trte_mcslock_unlock(&p_ml, &ml_me);\n+\n+\t/* mcs lock perf test */\n+\tif (test_mcslock_perf() < 0)\n+\t\treturn -1;\n+\n+\treturn ret;\n+}\n+\n+REGISTER_TEST_COMMAND(mcslock_autotest, test_mcslock);\n",
    "prefixes": [
        "v1",
        "3/3"
    ]
}