get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/69819/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 69819,
    "url": "http://patches.dpdk.org/api/patches/69819/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1588760683-11027-7-git-send-email-phil.yang@arm.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1588760683-11027-7-git-send-email-phil.yang@arm.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1588760683-11027-7-git-send-email-phil.yang@arm.com",
    "date": "2020-05-06T10:24:43",
    "name": "[v5,6/6] service: relax barriers with C11 atomics",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "f2f64bb66b93a880f2879af14fc0f7779b49978a",
    "submitter": {
        "id": 833,
        "url": "http://patches.dpdk.org/api/people/833/?format=api",
        "name": "Phil Yang",
        "email": "phil.yang@arm.com"
    },
    "delegate": {
        "id": 24651,
        "url": "http://patches.dpdk.org/api/users/24651/?format=api",
        "username": "dmarchand",
        "first_name": "David",
        "last_name": "Marchand",
        "email": "david.marchand@redhat.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1588760683-11027-7-git-send-email-phil.yang@arm.com/mbox/",
    "series": [
        {
            "id": 9863,
            "url": "http://patches.dpdk.org/api/series/9863/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=9863",
            "date": "2020-05-06T10:24:37",
            "name": "use c11 atomics for service core lib",
            "version": 5,
            "mbox": "http://patches.dpdk.org/series/9863/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/69819/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/69819/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 5BF0FA034F;\n\tWed,  6 May 2020 12:26:34 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 3A77E1D905;\n\tWed,  6 May 2020 12:26:16 +0200 (CEST)",
            "from foss.arm.com (foss.arm.com [217.140.110.172])\n by dpdk.org (Postfix) with ESMTP id 615211D921\n for <dev@dpdk.org>; Wed,  6 May 2020 12:26:15 +0200 (CEST)",
            "from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14])\n by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id D6697101E;\n Wed,  6 May 2020 03:26:14 -0700 (PDT)",
            "from phil-VirtualBox.shanghai.arm.com\n (phil-VirtualBox.shanghai.arm.com [10.169.108.145])\n by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id DA6AF3F71F;\n Wed,  6 May 2020 03:26:11 -0700 (PDT)"
        ],
        "From": "Phil Yang <phil.yang@arm.com>",
        "To": "dev@dpdk.org,\n\tharry.van.haaren@intel.com",
        "Cc": "thomas@monjalon.net, david.marchand@redhat.com,\n konstantin.ananyev@intel.com, jerinj@marvell.com, hemant.agrawal@nxp.com,\n gage.eads@intel.com, bruce.richardson@intel.com,\n Honnappa.Nagarahalli@arm.com, nd@arm.com",
        "Date": "Wed,  6 May 2020 18:24:43 +0800",
        "Message-Id": "<1588760683-11027-7-git-send-email-phil.yang@arm.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1588760683-11027-1-git-send-email-phil.yang@arm.com>",
        "References": "<20200505211732.25291-1-honnappa.nagarahalli@arm.com>\n <1588760683-11027-1-git-send-email-phil.yang@arm.com>",
        "Subject": "[dpdk-dev] [PATCH v5 6/6] service: relax barriers with C11 atomics",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "The runstate, comp_runstate and app_runstate are used as guard variables\nin the service core lib. To guarantee the inter-threads visibility of\nthese guard variables, it uses rte_smp_r/wmb. This patch use c11 atomic\nbuilt-ins to relax these barriers.\n\nSigned-off-by: Phil Yang <phil.yang@arm.com>\nReviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>\nAcked-by: Harry van Haaren <harry.van.haaren@intel.com>\n---\n lib/librte_eal/common/rte_service.c | 115 ++++++++++++++++++++++++++----------\n 1 file changed, 84 insertions(+), 31 deletions(-)",
    "diff": "diff --git a/lib/librte_eal/common/rte_service.c b/lib/librte_eal/common/rte_service.c\nindex 5d35f8a..3bae7d6 100644\n--- a/lib/librte_eal/common/rte_service.c\n+++ b/lib/librte_eal/common/rte_service.c\n@@ -265,7 +265,6 @@ rte_service_component_register(const struct rte_service_spec *spec,\n \ts->spec = *spec;\n \ts->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK;\n \n-\trte_smp_wmb();\n \trte_service_count++;\n \n \tif (id_ptr)\n@@ -282,7 +281,6 @@ rte_service_component_unregister(uint32_t id)\n \tSERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);\n \n \trte_service_count--;\n-\trte_smp_wmb();\n \n \ts->internal_flags &= ~(SERVICE_F_REGISTERED);\n \n@@ -301,12 +299,17 @@ rte_service_component_runstate_set(uint32_t id, uint32_t runstate)\n \tstruct rte_service_spec_impl *s;\n \tSERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);\n \n+\t/* comp_runstate act as the guard variable. Use store-release\n+\t * memory order. This synchronizes with load-acquire in\n+\t * service_run and service_runstate_get function.\n+\t */\n \tif (runstate)\n-\t\ts->comp_runstate = RUNSTATE_RUNNING;\n+\t\t__atomic_store_n(&s->comp_runstate, RUNSTATE_RUNNING,\n+\t\t\t\t__ATOMIC_RELEASE);\n \telse\n-\t\ts->comp_runstate = RUNSTATE_STOPPED;\n+\t\t__atomic_store_n(&s->comp_runstate, RUNSTATE_STOPPED,\n+\t\t\t\t__ATOMIC_RELEASE);\n \n-\trte_smp_wmb();\n \treturn 0;\n }\n \n@@ -316,12 +319,17 @@ rte_service_runstate_set(uint32_t id, uint32_t runstate)\n \tstruct rte_service_spec_impl *s;\n \tSERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);\n \n+\t/* app_runstate act as the guard variable. Use store-release\n+\t * memory order. This synchronizes with load-acquire in\n+\t * service_run runstate_get function.\n+\t */\n \tif (runstate)\n-\t\ts->app_runstate = RUNSTATE_RUNNING;\n+\t\t__atomic_store_n(&s->app_runstate, RUNSTATE_RUNNING,\n+\t\t\t\t__ATOMIC_RELEASE);\n \telse\n-\t\ts->app_runstate = RUNSTATE_STOPPED;\n+\t\t__atomic_store_n(&s->app_runstate, RUNSTATE_STOPPED,\n+\t\t\t\t__ATOMIC_RELEASE);\n \n-\trte_smp_wmb();\n \treturn 0;\n }\n \n@@ -330,15 +338,24 @@ rte_service_runstate_get(uint32_t id)\n {\n \tstruct rte_service_spec_impl *s;\n \tSERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);\n-\trte_smp_rmb();\n \n-\tint check_disabled = !(s->internal_flags & SERVICE_F_START_CHECK);\n-\tint lcore_mapped = (__atomic_load_n(&s->num_mapped_cores,\n+\t/* comp_runstate and app_runstate act as the guard variables.\n+\t * Use load-acquire memory order. This synchronizes with\n+\t * store-release in service state set functions.\n+\t */\n+\tif (__atomic_load_n(&s->comp_runstate,\n+\t\t\t__ATOMIC_ACQUIRE) == RUNSTATE_RUNNING &&\n+\t\t __atomic_load_n(&s->app_runstate,\n+\t\t\t__ATOMIC_ACQUIRE) == RUNSTATE_RUNNING) {\n+\t\tint check_disabled = !(s->internal_flags &\n+\t\t\t\t\tSERVICE_F_START_CHECK);\n+\t\tint lcore_mapped = (__atomic_load_n(&s->num_mapped_cores,\n \t\t\t\t\t    __ATOMIC_RELAXED) > 0);\n \n-\treturn (s->app_runstate == RUNSTATE_RUNNING) &&\n-\t\t(s->comp_runstate == RUNSTATE_RUNNING) &&\n-\t\t(check_disabled | lcore_mapped);\n+\t\treturn (check_disabled | lcore_mapped);\n+\t} else\n+\t\treturn 0;\n+\n }\n \n static inline void\n@@ -367,9 +384,15 @@ service_run(uint32_t i, struct core_state *cs, uint64_t service_mask,\n \tif (!s)\n \t\treturn -EINVAL;\n \n-\tif (s->comp_runstate != RUNSTATE_RUNNING ||\n-\t\t\ts->app_runstate != RUNSTATE_RUNNING ||\n-\t\t\t!(service_mask & (UINT64_C(1) << i))) {\n+\t/* comp_runstate and app_runstate act as the guard variables.\n+\t * Use load-acquire memory order. This synchronizes with\n+\t * store-release in service state set functions.\n+\t */\n+\tif (__atomic_load_n(&s->comp_runstate,\n+\t\t\t__ATOMIC_ACQUIRE) != RUNSTATE_RUNNING ||\n+\t\t __atomic_load_n(&s->app_runstate,\n+\t\t\t__ATOMIC_ACQUIRE) != RUNSTATE_RUNNING ||\n+\t\t!(service_mask & (UINT64_C(1) << i))) {\n \t\tcs->service_active_on_lcore[i] = 0;\n \t\treturn -ENOEXEC;\n \t}\n@@ -434,7 +457,12 @@ service_runner_func(void *arg)\n \tconst int lcore = rte_lcore_id();\n \tstruct core_state *cs = &lcore_states[lcore];\n \n-\twhile (cs->runstate == RUNSTATE_RUNNING) {\n+\t/* runstate act as the guard variable. Use load-acquire\n+\t * memory order here to synchronize with store-release\n+\t * in runstate update functions.\n+\t */\n+\twhile (__atomic_load_n(&cs->runstate,\n+\t\t\t__ATOMIC_ACQUIRE) == RUNSTATE_RUNNING) {\n \t\tconst uint64_t service_mask = cs->service_mask;\n \n \t\tfor (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {\n@@ -445,8 +473,6 @@ service_runner_func(void *arg)\n \t\t}\n \n \t\tcs->loops++;\n-\n-\t\trte_smp_rmb();\n \t}\n \n \tlcore_config[lcore].state = WAIT;\n@@ -614,15 +640,18 @@ rte_service_lcore_reset_all(void)\n \t\tif (lcore_states[i].is_service_core) {\n \t\t\tlcore_states[i].service_mask = 0;\n \t\t\tset_lcore_state(i, ROLE_RTE);\n-\t\t\tlcore_states[i].runstate = RUNSTATE_STOPPED;\n+\t\t\t/* runstate act as guard variable Use\n+\t\t\t * store-release memory order here to synchronize\n+\t\t\t * with load-acquire in runstate read functions.\n+\t\t\t */\n+\t\t\t__atomic_store_n(&lcore_states[i].runstate,\n+\t\t\t\tRUNSTATE_STOPPED, __ATOMIC_RELEASE);\n \t\t}\n \t}\n \tfor (i = 0; i < RTE_SERVICE_NUM_MAX; i++)\n \t\t__atomic_store_n(&rte_services[i].num_mapped_cores, 0,\n \t\t\t\t    __ATOMIC_RELAXED);\n \n-\trte_smp_wmb();\n-\n \treturn 0;\n }\n \n@@ -638,9 +667,11 @@ rte_service_lcore_add(uint32_t lcore)\n \n \t/* ensure that after adding a core the mask and state are defaults */\n \tlcore_states[lcore].service_mask = 0;\n-\tlcore_states[lcore].runstate = RUNSTATE_STOPPED;\n-\n-\trte_smp_wmb();\n+\t/* Use store-release memory order here to synchronize with\n+\t * load-acquire in runstate read functions.\n+\t */\n+\t__atomic_store_n(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,\n+\t\t__ATOMIC_RELEASE);\n \n \treturn rte_eal_wait_lcore(lcore);\n }\n@@ -655,7 +686,12 @@ rte_service_lcore_del(uint32_t lcore)\n \tif (!cs->is_service_core)\n \t\treturn -EINVAL;\n \n-\tif (cs->runstate != RUNSTATE_STOPPED)\n+\t/* runstate act as the guard variable. Use load-acquire\n+\t * memory order here to synchronize with store-release\n+\t * in runstate update functions.\n+\t */\n+\tif (__atomic_load_n(&cs->runstate,\n+\t\t\t__ATOMIC_ACQUIRE) != RUNSTATE_STOPPED)\n \t\treturn -EBUSY;\n \n \tset_lcore_state(lcore, ROLE_RTE);\n@@ -674,13 +710,21 @@ rte_service_lcore_start(uint32_t lcore)\n \tif (!cs->is_service_core)\n \t\treturn -EINVAL;\n \n-\tif (cs->runstate == RUNSTATE_RUNNING)\n+\t/* runstate act as the guard variable. Use load-acquire\n+\t * memory order here to synchronize with store-release\n+\t * in runstate update functions.\n+\t */\n+\tif (__atomic_load_n(&cs->runstate,\n+\t\t\t__ATOMIC_ACQUIRE) == RUNSTATE_RUNNING)\n \t\treturn -EALREADY;\n \n \t/* set core to run state first, and then launch otherwise it will\n \t * return immediately as runstate keeps it in the service poll loop\n \t */\n-\tcs->runstate = RUNSTATE_RUNNING;\n+\t/* Use load-acquire memory order here to synchronize with\n+\t * store-release in runstate update functions.\n+\t */\n+\t__atomic_store_n(&cs->runstate, RUNSTATE_RUNNING, __ATOMIC_RELEASE);\n \n \tint ret = rte_eal_remote_launch(service_runner_func, 0, lcore);\n \t/* returns -EBUSY if the core is already launched, 0 on success */\n@@ -693,7 +737,12 @@ rte_service_lcore_stop(uint32_t lcore)\n \tif (lcore >= RTE_MAX_LCORE)\n \t\treturn -EINVAL;\n \n-\tif (lcore_states[lcore].runstate == RUNSTATE_STOPPED)\n+\t/* runstate act as the guard variable. Use load-acquire\n+\t * memory order here to synchronize with store-release\n+\t * in runstate update functions.\n+\t */\n+\tif (__atomic_load_n(&lcore_states[lcore].runstate,\n+\t\t\t__ATOMIC_ACQUIRE) == RUNSTATE_STOPPED)\n \t\treturn -EALREADY;\n \n \tuint32_t i;\n@@ -713,7 +762,11 @@ rte_service_lcore_stop(uint32_t lcore)\n \t\t\treturn -EBUSY;\n \t}\n \n-\tlcore_states[lcore].runstate = RUNSTATE_STOPPED;\n+\t/* Use store-release memory order here to synchronize with\n+\t * load-acquire in runstate read functions.\n+\t */\n+\t__atomic_store_n(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,\n+\t\t__ATOMIC_RELEASE);\n \n \treturn 0;\n }\n",
    "prefixes": [
        "v5",
        "6/6"
    ]
}