get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/44562/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 44562,
    "url": "http://patches.dpdk.org/api/patches/44562/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20180911080216.3017-7-mattias.ronnblom@ericsson.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20180911080216.3017-7-mattias.ronnblom@ericsson.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20180911080216.3017-7-mattias.ronnblom@ericsson.com",
    "date": "2018-09-11T08:02:12",
    "name": "[v3,06/10] event/dsw: add DSW port load measurements",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "6b5a989facedfcd7d1364e1e4b83b1f2c4834fbb",
    "submitter": {
        "id": 1077,
        "url": "http://patches.dpdk.org/api/people/1077/?format=api",
        "name": "Mattias Rönnblom",
        "email": "mattias.ronnblom@ericsson.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20180911080216.3017-7-mattias.ronnblom@ericsson.com/mbox/",
    "series": [
        {
            "id": 1264,
            "url": "http://patches.dpdk.org/api/series/1264/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=1264",
            "date": "2018-09-11T08:02:07",
            "name": "A Distributed Software Event Device",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/1264/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/44562/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/44562/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id EE6CB58EC;\n\tTue, 11 Sep 2018 10:03:25 +0200 (CEST)",
            "from mail.lysator.liu.se (mail.lysator.liu.se [130.236.254.3])\n\tby dpdk.org (Postfix) with ESMTP id A0CA54CBB\n\tfor <dev@dpdk.org>; Tue, 11 Sep 2018 10:03:16 +0200 (CEST)",
            "from mail.lysator.liu.se (localhost [127.0.0.1])\n\tby mail.lysator.liu.se (Postfix) with ESMTP id 51D034008D\n\tfor <dev@dpdk.org>; Tue, 11 Sep 2018 10:03:16 +0200 (CEST)",
            "by mail.lysator.liu.se (Postfix, from userid 1004)\n\tid 3B81040087; Tue, 11 Sep 2018 10:03:16 +0200 (CEST)",
            "from isengard.friendlyfire.se\n\t(host-90-232-156-190.mobileonline.telia.com [90.232.156.190])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256\n\tbits)) (No client certificate requested)\n\tby mail.lysator.liu.se (Postfix) with ESMTPSA id A40F240080;\n\tTue, 11 Sep 2018 10:03:12 +0200 (CEST)"
        ],
        "X-Spam-Checker-Version": "SpamAssassin 3.4.1 (2015-04-28) on\n\tbernadotte.lysator.liu.se",
        "X-Spam-Level": "",
        "X-Spam-Status": "No, score=-0.9 required=5.0 tests=ALL_TRUSTED,AWL\n\tautolearn=disabled version=3.4.1",
        "X-Spam-Score": "-0.9",
        "From": "=?utf-8?q?Mattias_R=C3=B6nnblom?= <mattias.ronnblom@ericsson.com>",
        "To": "jerin.jacob@caviumnetworks.com",
        "Cc": "bruce.richardson@intel.com, dev@dpdk.org, =?utf-8?q?Mattias_R=C3=B6nnb?=\n\t=?utf-8?q?lom?= <mattias.ronnblom@ericsson.com>",
        "Date": "Tue, 11 Sep 2018 10:02:12 +0200",
        "Message-Id": "<20180911080216.3017-7-mattias.ronnblom@ericsson.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20180911080216.3017-1-mattias.ronnblom@ericsson.com>",
        "References": "<20180911080216.3017-1-mattias.ronnblom@ericsson.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=UTF-8",
        "Content-Transfer-Encoding": "8bit",
        "X-Virus-Scanned": "ClamAV using ClamSMTP",
        "Subject": "[dpdk-dev] [PATCH v3 06/10] event/dsw: add DSW port load\n\tmeasurements",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "The DSW event device port now attempts to estimate its load (i.e. how\nbusy it is). This is required for load balancing to work (although\nload balancing is not included in this patch), and may also be useful\nfor debugging purposes.\n\nSigned-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>\n---\n drivers/event/dsw/dsw_evdev.c |  14 +++++\n drivers/event/dsw/dsw_evdev.h |  40 +++++++++++++\n drivers/event/dsw/dsw_event.c | 109 ++++++++++++++++++++++++++++++++++\n 3 files changed, 163 insertions(+)",
    "diff": "diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c\nindex 40a7435be..bcfa17bab 100644\n--- a/drivers/event/dsw/dsw_evdev.c\n+++ b/drivers/event/dsw/dsw_evdev.c\n@@ -4,6 +4,7 @@\n \n #include <stdbool.h>\n \n+#include <rte_cycles.h>\n #include <rte_eventdev_pmd.h>\n #include <rte_eventdev_pmd_vdev.h>\n #include <rte_random.h>\n@@ -43,6 +44,11 @@ dsw_port_setup(struct rte_eventdev *dev, uint8_t port_id,\n \n \tport->in_ring = in_ring;\n \n+\trte_atomic16_init(&port->load);\n+\n+\tport->load_update_interval =\n+\t\t(DSW_LOAD_UPDATE_INTERVAL * rte_get_timer_hz()) / US_PER_S;\n+\n \tdev->data->ports[port_id] = port;\n \n \treturn 0;\n@@ -240,11 +246,19 @@ static int\n dsw_start(struct rte_eventdev *dev)\n {\n \tstruct dsw_evdev *dsw = dsw_pmd_priv(dev);\n+\tuint16_t i;\n+\tuint64_t now;\n \n \trte_atomic32_init(&dsw->credits_on_loan);\n \n \tinitial_flow_to_port_assignment(dsw);\n \n+\tnow = rte_get_timer_cycles();\n+\tfor (i = 0; i < dsw->num_ports; i++) {\n+\t\tdsw->ports[i].measurement_start = now;\n+\t\tdsw->ports[i].busy_start = now;\n+\t}\n+\n \treturn 0;\n }\n \ndiff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h\nindex f8e94e4a4..a5399dda5 100644\n--- a/drivers/event/dsw/dsw_evdev.h\n+++ b/drivers/event/dsw/dsw_evdev.h\n@@ -36,6 +36,15 @@\n  */\n #define DSW_PARALLEL_FLOWS (1024)\n \n+/* 'Background tasks' are polling the control rings for *\n+ *  migration-related messages, or flush the output buffer (so\n+ *  buffered events doesn't linger too long). Shouldn't be too low,\n+ *  since the system won't benefit from the 'batching' effects from\n+ *  the output buffer, and shouldn't be too high, since it will make\n+ *  buffered events linger too long in case the port goes idle.\n+ */\n+#define DSW_MAX_PORT_OPS_PER_BG_TASK (128)\n+\n /* Avoid making small 'loans' from the central in-flight event credit\n  * pool, to improve efficiency.\n  */\n@@ -50,6 +59,22 @@\n  */\n #define DSW_IN_RING_SIZE (DSW_MAX_EVENTS)\n \n+#define DSW_MAX_LOAD (INT16_MAX)\n+#define DSW_LOAD_FROM_PERCENT(x) ((int16_t)(((x)*DSW_MAX_LOAD)/100))\n+#define DSW_LOAD_TO_PERCENT(x) ((100*x)/DSW_MAX_LOAD)\n+\n+/* The thought behind keeping the load update interval shorter than\n+ * the migration interval is that the load from newly migrated flows\n+ * should 'show up' on the load measurement before new migrations are\n+ * considered. This is to avoid having too many flows, from too many\n+ * source ports, to be migrated too quickly to a lightly loaded port -\n+ * in particular since this might cause the system to oscillate.\n+ */\n+#define DSW_LOAD_UPDATE_INTERVAL (DSW_MIGRATION_INTERVAL/4)\n+#define DSW_OLD_LOAD_WEIGHT (1)\n+\n+#define DSW_MIGRATION_INTERVAL (1000)\n+\n struct dsw_port {\n \tuint16_t id;\n \n@@ -71,10 +96,25 @@ struct dsw_port {\n \n \tuint16_t next_parallel_flow_id;\n \n+\tuint16_t ops_since_bg_task;\n+\n+\tuint64_t last_bg;\n+\n+\t/* For port load measurement. */\n+\tuint64_t next_load_update;\n+\tuint64_t load_update_interval;\n+\tuint64_t measurement_start;\n+\tuint64_t busy_start;\n+\tuint64_t busy_cycles;\n+\tuint64_t total_busy_cycles;\n+\n \tuint16_t out_buffer_len[DSW_MAX_PORTS];\n \tstruct rte_event out_buffer[DSW_MAX_PORTS][DSW_MAX_PORT_OUT_BUFFER];\n \n \tstruct rte_event_ring *in_ring __rte_cache_aligned;\n+\n+\t/* Estimate of current port load. */\n+\trte_atomic16_t load __rte_cache_aligned;\n } __rte_cache_aligned;\n \n struct dsw_queue {\ndiff --git a/drivers/event/dsw/dsw_event.c b/drivers/event/dsw/dsw_event.c\nindex 4a3af8ecd..f326147c9 100644\n--- a/drivers/event/dsw/dsw_event.c\n+++ b/drivers/event/dsw/dsw_event.c\n@@ -7,6 +7,7 @@\n #include <stdbool.h>\n \n #include <rte_atomic.h>\n+#include <rte_cycles.h>\n #include <rte_random.h>\n \n static bool\n@@ -75,6 +76,70 @@ dsw_port_return_credits(struct dsw_evdev *dsw, struct dsw_port *port,\n \t}\n }\n \n+static void\n+dsw_port_load_record(struct dsw_port *port, unsigned int dequeued)\n+{\n+\tif (dequeued > 0 && port->busy_start == 0)\n+\t\t/* work period begins */\n+\t\tport->busy_start = rte_get_timer_cycles();\n+\telse if (dequeued == 0 && port->busy_start > 0) {\n+\t\t/* work period ends */\n+\t\tuint64_t work_period =\n+\t\t\trte_get_timer_cycles() - port->busy_start;\n+\t\tport->busy_cycles += work_period;\n+\t\tport->busy_start = 0;\n+\t}\n+}\n+\n+static int16_t\n+dsw_port_load_close_period(struct dsw_port *port, uint64_t now)\n+{\n+\tuint64_t passed = now - port->measurement_start;\n+\tuint64_t busy_cycles = port->busy_cycles;\n+\n+\tif (port->busy_start > 0) {\n+\t\tbusy_cycles += (now - port->busy_start);\n+\t\tport->busy_start = now;\n+\t}\n+\n+\tint16_t load = (DSW_MAX_LOAD * busy_cycles) / passed;\n+\n+\tport->measurement_start = now;\n+\tport->busy_cycles = 0;\n+\n+\tport->total_busy_cycles += busy_cycles;\n+\n+\treturn load;\n+}\n+\n+static void\n+dsw_port_load_update(struct dsw_port *port, uint64_t now)\n+{\n+\tint16_t old_load;\n+\tint16_t period_load;\n+\tint16_t new_load;\n+\n+\told_load = rte_atomic16_read(&port->load);\n+\n+\tperiod_load = dsw_port_load_close_period(port, now);\n+\n+\tnew_load = (period_load + old_load*DSW_OLD_LOAD_WEIGHT) /\n+\t\t(DSW_OLD_LOAD_WEIGHT+1);\n+\n+\trte_atomic16_set(&port->load, new_load);\n+}\n+\n+static void\n+dsw_port_consider_load_update(struct dsw_port *port, uint64_t now)\n+{\n+\tif (now < port->next_load_update)\n+\t\treturn;\n+\n+\tport->next_load_update = now + port->load_update_interval;\n+\n+\tdsw_port_load_update(port, now);\n+}\n+\n static uint8_t\n dsw_schedule(struct dsw_evdev *dsw, uint8_t queue_id, uint16_t flow_hash)\n {\n@@ -196,6 +261,39 @@ dsw_port_buffer_event(struct dsw_evdev *dsw, struct dsw_port *source_port,\n \tdsw_port_buffer_non_paused(dsw, source_port, dest_port_id, event);\n }\n \n+static void\n+dsw_port_note_op(struct dsw_port *port, uint16_t num_events)\n+{\n+\t/* To pull the control ring reasonbly often on busy ports,\n+\t * each dequeued/enqueued event is considered an 'op' too.\n+\t */\n+\tport->ops_since_bg_task += (num_events+1);\n+}\n+\n+static void\n+dsw_port_flush_out_buffers(struct dsw_evdev *dsw, struct dsw_port *source_port);\n+\n+static void\n+dsw_port_bg_process(struct dsw_evdev *dsw, struct dsw_port *port)\n+{\n+\tif (unlikely(port->ops_since_bg_task >= DSW_MAX_PORT_OPS_PER_BG_TASK)) {\n+\t\tuint64_t now;\n+\n+\t\tnow = rte_get_timer_cycles();\n+\n+\t\tport->last_bg = now;\n+\n+\t\t/* Logic to avoid having events linger in the output\n+\t\t * buffer too long.\n+\t\t */\n+\t\tdsw_port_flush_out_buffers(dsw, port);\n+\n+\t\tdsw_port_consider_load_update(port, now);\n+\n+\t\tport->ops_since_bg_task = 0;\n+\t}\n+}\n+\n static void\n dsw_port_flush_out_buffers(struct dsw_evdev *dsw, struct dsw_port *source_port)\n {\n@@ -225,6 +323,8 @@ dsw_event_enqueue_burst_generic(void *port, const struct rte_event events[],\n \tDSW_LOG_DP_PORT(DEBUG, source_port->id, \"Attempting to enqueue %d \"\n \t\t\t\"events to port %d.\\n\", events_len, source_port->id);\n \n+\tdsw_port_bg_process(dsw, source_port);\n+\n \t/* XXX: For performance (=ring efficiency) reasons, the\n \t * scheduler relies on internal non-ring buffers instead of\n \t * immediately sending the event to the destination ring. For\n@@ -238,6 +338,7 @@ dsw_event_enqueue_burst_generic(void *port, const struct rte_event events[],\n \t * considered.\n \t */\n \tif (unlikely(events_len == 0)) {\n+\t\tdsw_port_note_op(source_port, DSW_MAX_PORT_OPS_PER_BG_TASK);\n \t\tdsw_port_flush_out_buffers(dsw, source_port);\n \t\treturn 0;\n \t}\n@@ -245,6 +346,8 @@ dsw_event_enqueue_burst_generic(void *port, const struct rte_event events[],\n \tif (unlikely(events_len > source_port->enqueue_depth))\n \t\tevents_len = source_port->enqueue_depth;\n \n+\tdsw_port_note_op(source_port, events_len);\n+\n \tif (!op_types_known)\n \t\tfor (i = 0; i < events_len; i++) {\n \t\t\tswitch (events[i].op) {\n@@ -337,6 +440,8 @@ dsw_event_dequeue_burst(void *port, struct rte_event *events, uint16_t num,\n \n \tsource_port->pending_releases = 0;\n \n+\tdsw_port_bg_process(dsw, source_port);\n+\n \tif (unlikely(num > source_port->dequeue_depth))\n \t\tnum = source_port->dequeue_depth;\n \n@@ -344,6 +449,10 @@ dsw_event_dequeue_burst(void *port, struct rte_event *events, uint16_t num,\n \n \tsource_port->pending_releases = dequeued;\n \n+\tdsw_port_load_record(source_port, dequeued);\n+\n+\tdsw_port_note_op(source_port, dequeued);\n+\n \tif (dequeued > 0) {\n \t\tDSW_LOG_DP_PORT(DEBUG, source_port->id, \"Dequeued %d events.\\n\",\n \t\t\t\tdequeued);\n",
    "prefixes": [
        "v3",
        "06/10"
    ]
}