get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/52280/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 52280,
    "url": "http://patches.dpdk.org/api/patches/52280/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20190404100127.29381-9-gage.eads@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20190404100127.29381-9-gage.eads@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20190404100127.29381-9-gage.eads@intel.com",
    "date": "2019-04-04T10:01:27",
    "name": "[v10,8/8] mempool/stack: add lock-free stack mempool handler",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "f0edb095b1e10087e2611db479346a90239afd84",
    "submitter": {
        "id": 586,
        "url": "http://patches.dpdk.org/api/people/586/?format=api",
        "name": "Eads, Gage",
        "email": "gage.eads@intel.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20190404100127.29381-9-gage.eads@intel.com/mbox/",
    "series": [
        {
            "id": 4111,
            "url": "http://patches.dpdk.org/api/series/4111/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=4111",
            "date": "2019-04-04T10:01:19",
            "name": "Add stack library and new mempool handler",
            "version": 10,
            "mbox": "http://patches.dpdk.org/series/4111/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/52280/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/52280/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id AC61D1B1FC;\n\tThu,  4 Apr 2019 12:02:34 +0200 (CEST)",
            "from mga07.intel.com (mga07.intel.com [134.134.136.100])\n\tby dpdk.org (Postfix) with ESMTP id 07E6A1B131\n\tfor <dev@dpdk.org>; Thu,  4 Apr 2019 12:02:18 +0200 (CEST)",
            "from orsmga006.jf.intel.com ([10.7.209.51])\n\tby orsmga105.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t04 Apr 2019 03:02:18 -0700",
            "from txasoft-yocto.an.intel.com ([10.123.72.192])\n\tby orsmga006.jf.intel.com with ESMTP; 04 Apr 2019 03:02:17 -0700"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.60,308,1549958400\"; d=\"scan'208\";a=\"132898937\"",
        "From": "Gage Eads <gage.eads@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "olivier.matz@6wind.com, arybchenko@solarflare.com,\n\tbruce.richardson@intel.com, konstantin.ananyev@intel.com,\n\tgavin.hu@arm.com, \n\tHonnappa.Nagarahalli@arm.com, nd@arm.com, thomas@monjalon.net",
        "Date": "Thu,  4 Apr 2019 05:01:27 -0500",
        "Message-Id": "<20190404100127.29381-9-gage.eads@intel.com>",
        "X-Mailer": "git-send-email 2.13.6",
        "In-Reply-To": "<20190404100127.29381-1-gage.eads@intel.com>",
        "References": "<20190403232020.12784-1-gage.eads@intel.com>\n\t<20190404100127.29381-1-gage.eads@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v10 8/8] mempool/stack: add lock-free stack\n\tmempool handler",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This commit adds support for lock-free (linked list based) stack mempool\nhandler.\n\nIn mempool_perf_autotest the lock-based stack outperforms the\nlock-free handler for certain lcore/alloc count/free count\ncombinations*, however:\n- For applications with preemptible pthreads, a standard (lock-based)\n  stack's worst-case performance (i.e. one thread being preempted while\n  holding the spinlock) is much worse than the lock-free stack's.\n- Using per-thread mempool caches will largely mitigate the performance\n  difference.\n\n*Test setup: x86_64 build with default config, dual-socket Xeon E5-2699 v4,\nrunning on isolcpus cores with a tickless scheduler. The lock-based stack's\nrate_persec was 0.6x-3.5x the lock-free stack's.\n\nSigned-off-by: Gage Eads <gage.eads@intel.com>\nReviewed-by: Olivier Matz <olivier.matz@6wind.com>\n---\n doc/guides/prog_guide/env_abstraction_layer.rst | 10 ++++++++++\n doc/guides/rel_notes/release_19_05.rst          |  5 +++++\n drivers/mempool/stack/rte_mempool_stack.c       | 26 +++++++++++++++++++++++--\n 3 files changed, 39 insertions(+), 2 deletions(-)",
    "diff": "diff --git a/doc/guides/prog_guide/env_abstraction_layer.rst b/doc/guides/prog_guide/env_abstraction_layer.rst\nindex 6a04c3c33..fa8afdb3a 100644\n--- a/doc/guides/prog_guide/env_abstraction_layer.rst\n+++ b/doc/guides/prog_guide/env_abstraction_layer.rst\n@@ -581,6 +581,16 @@ Known Issues\n \n   5. It MUST not be used by multi-producer/consumer pthreads, whose scheduling policies are SCHED_FIFO or SCHED_RR.\n \n+  Alternatively, applications can use the lock-free stack mempool handler. When\n+  considering this handler, note that:\n+\n+  - It is currently limited to the x86_64 platform, because it uses an\n+    instruction (16-byte compare-and-swap) that is not yet available on other\n+    platforms.\n+  - It has worse average-case performance than the non-preemptive rte_ring, but\n+    software caching (e.g. the mempool cache) can mitigate this by reducing the\n+    number of stack accesses.\n+\n + rte_timer\n \n   Running  ``rte_timer_manage()`` on a non-EAL pthread is not allowed. However, resetting/stopping the timer from a non-EAL pthread is allowed.\ndiff --git a/doc/guides/rel_notes/release_19_05.rst b/doc/guides/rel_notes/release_19_05.rst\nindex 3b115b5f6..f873984ad 100644\n--- a/doc/guides/rel_notes/release_19_05.rst\n+++ b/doc/guides/rel_notes/release_19_05.rst\n@@ -130,6 +130,11 @@ New Features\n   The library supports two stack implementations: standard (lock-based) and lock-free.\n   The lock-free implementation is currently limited to x86-64 platforms.\n \n+* **Added Lock-Free Stack Mempool Handler.**\n+\n+  Added a new lock-free stack handler, which uses the newly added stack\n+  library.\n+\n Removed Items\n -------------\n \ndiff --git a/drivers/mempool/stack/rte_mempool_stack.c b/drivers/mempool/stack/rte_mempool_stack.c\nindex 25ccdb9af..7e85c8d6b 100644\n--- a/drivers/mempool/stack/rte_mempool_stack.c\n+++ b/drivers/mempool/stack/rte_mempool_stack.c\n@@ -7,7 +7,7 @@\n #include <rte_stack.h>\n \n static int\n-stack_alloc(struct rte_mempool *mp)\n+__stack_alloc(struct rte_mempool *mp, uint32_t flags)\n {\n \tchar name[RTE_STACK_NAMESIZE];\n \tstruct rte_stack *s;\n@@ -20,7 +20,7 @@ stack_alloc(struct rte_mempool *mp)\n \t\treturn -rte_errno;\n \t}\n \n-\ts = rte_stack_create(name, mp->size, mp->socket_id, 0);\n+\ts = rte_stack_create(name, mp->size, mp->socket_id, flags);\n \tif (s == NULL)\n \t\treturn -rte_errno;\n \n@@ -30,6 +30,18 @@ stack_alloc(struct rte_mempool *mp)\n }\n \n static int\n+stack_alloc(struct rte_mempool *mp)\n+{\n+\treturn __stack_alloc(mp, 0);\n+}\n+\n+static int\n+lf_stack_alloc(struct rte_mempool *mp)\n+{\n+\treturn __stack_alloc(mp, RTE_STACK_F_LF);\n+}\n+\n+static int\n stack_enqueue(struct rte_mempool *mp, void * const *obj_table,\n \t      unsigned int n)\n {\n@@ -72,4 +84,14 @@ static struct rte_mempool_ops ops_stack = {\n \t.get_count = stack_get_count\n };\n \n+static struct rte_mempool_ops ops_lf_stack = {\n+\t.name = \"lf_stack\",\n+\t.alloc = lf_stack_alloc,\n+\t.free = stack_free,\n+\t.enqueue = stack_enqueue,\n+\t.dequeue = stack_dequeue,\n+\t.get_count = stack_get_count\n+};\n+\n MEMPOOL_REGISTER_OPS(ops_stack);\n+MEMPOOL_REGISTER_OPS(ops_lf_stack);\n",
    "prefixes": [
        "v10",
        "8/8"
    ]
}