get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/22844/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 22844,
    "url": "http://patches.dpdk.org/api/patches/22844/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1490829963-106807-9-git-send-email-harry.van.haaren@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1490829963-106807-9-git-send-email-harry.van.haaren@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1490829963-106807-9-git-send-email-harry.van.haaren@intel.com",
    "date": "2017-03-29T23:25:50",
    "name": "[dpdk-dev,v6,08/21] event/sw: add support for event ports",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "7a05793c8ef51918b371c008ef6036dd141e8c74",
    "submitter": {
        "id": 317,
        "url": "http://patches.dpdk.org/api/people/317/?format=api",
        "name": "Van Haaren, Harry",
        "email": "harry.van.haaren@intel.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1490829963-106807-9-git-send-email-harry.van.haaren@intel.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/22844/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/22844/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id 6602E10CB1;\n\tThu, 30 Mar 2017 02:37:33 +0200 (CEST)",
            "from mga11.intel.com (mga11.intel.com [192.55.52.93])\n\tby dpdk.org (Postfix) with ESMTP id 288EE9E3\n\tfor <dev@dpdk.org>; Thu, 30 Mar 2017 01:26:26 +0200 (CEST)",
            "from fmsmga006.fm.intel.com ([10.253.24.20])\n\tby fmsmga102.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t29 Mar 2017 16:26:25 -0700",
            "from silpixa00398672.ir.intel.com ([10.237.223.128])\n\tby fmsmga006.fm.intel.com with ESMTP; 29 Mar 2017 16:26:24 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=simple/simple;\n\td=intel.com; i=@intel.com; q=dns/txt; s=intel;\n\tt=1490829986; x=1522365986;\n\th=from:to:cc:subject:date:message-id:in-reply-to: references;\n\tbh=8/yysg3jMTUJlqkkcGIL3ZsS+mH9bcNB9EA5Y6mUzDs=;\n\tb=NoKZ/isBpeXFKN6xRl8Qy3x3cqW+huQrG/b8IE9VO4YfPkrVT6dYej+k\n\tMqpOBxz0iFGd/AZhJxDy09nFyuGccw==;",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.36,243,1486454400\"; d=\"scan'208\";a=\"82491227\"",
        "From": "Harry van Haaren <harry.van.haaren@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "jerin.jacob@caviumnetworks.com,\n\tBruce Richardson <bruce.richardson@intel.com>,\n\tHarry van Haaren <harry.van.haaren@intel.com>",
        "Date": "Thu, 30 Mar 2017 00:25:50 +0100",
        "Message-Id": "<1490829963-106807-9-git-send-email-harry.van.haaren@intel.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1490829963-106807-1-git-send-email-harry.van.haaren@intel.com>",
        "References": "<1490374395-149320-1-git-send-email-harry.van.haaren@intel.com>\n\t<1490829963-106807-1-git-send-email-harry.van.haaren@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v6 08/21] event/sw: add support for event ports",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Bruce Richardson <bruce.richardson@intel.com>\n\nAdd in the data-structures for the ports used by workers to send\npackets to/from the scheduler. Also add in the functions to\ncreate/destroy those ports.\n\nSigned-off-by: Bruce Richardson <bruce.richardson@intel.com>\nSigned-off-by: Harry van Haaren <harry.van.haaren@intel.com>\n\nAcked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>\n\n---\n\nv6:\n- Remove enq/deq checks already performed by eventdev layer (Jerin)\n- Fix error printf() to use SW_LOG_ERR instead (Jerin)\n- Add rte_smp_wmb() to ensure writes completed before access (Jerin)\n---\n drivers/event/sw/event_ring.h | 185 ++++++++++++++++++++++++++++++++++++++++++\n drivers/event/sw/sw_evdev.c   |  81 ++++++++++++++++++\n drivers/event/sw/sw_evdev.h   |  80 ++++++++++++++++++\n 3 files changed, 346 insertions(+)\n create mode 100644 drivers/event/sw/event_ring.h",
    "diff": "diff --git a/drivers/event/sw/event_ring.h b/drivers/event/sw/event_ring.h\nnew file mode 100644\nindex 0000000..cdaee95\n--- /dev/null\n+++ b/drivers/event/sw/event_ring.h\n@@ -0,0 +1,185 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+/*\n+ * Generic ring structure for passing events from one core to another.\n+ *\n+ * Used by the software scheduler for the producer and consumer rings for\n+ * each port, i.e. for passing events from worker cores to scheduler and\n+ * vice-versa. Designed for single-producer, single-consumer use with two\n+ * cores working on each ring.\n+ */\n+\n+#ifndef _EVENT_RING_\n+#define _EVENT_RING_\n+\n+#include <stdint.h>\n+\n+#include <rte_common.h>\n+#include <rte_memory.h>\n+#include <rte_malloc.h>\n+\n+#define QE_RING_NAMESIZE 32\n+\n+struct qe_ring {\n+\tchar name[QE_RING_NAMESIZE] __rte_cache_aligned;\n+\tuint32_t ring_size; /* size of memory block allocated to the ring */\n+\tuint32_t mask;      /* mask for read/write values == ring_size -1 */\n+\tuint32_t size;      /* actual usable space in the ring */\n+\tvolatile uint32_t write_idx __rte_cache_aligned;\n+\tvolatile uint32_t read_idx __rte_cache_aligned;\n+\n+\tstruct rte_event ring[0] __rte_cache_aligned;\n+};\n+\n+#ifndef force_inline\n+#define force_inline inline __attribute__((always_inline))\n+#endif\n+\n+static inline struct qe_ring *\n+qe_ring_create(const char *name, unsigned int size, unsigned int socket_id)\n+{\n+\tstruct qe_ring *retval;\n+\tconst uint32_t ring_size = rte_align32pow2(size + 1);\n+\tsize_t memsize = sizeof(*retval) +\n+\t\t\t(ring_size * sizeof(retval->ring[0]));\n+\n+\tretval = rte_zmalloc_socket(NULL, memsize, 0, socket_id);\n+\tif (retval == NULL)\n+\t\tgoto end;\n+\n+\tsnprintf(retval->name, sizeof(retval->name), \"EVDEV_RG_%s\", name);\n+\tretval->ring_size = ring_size;\n+\tretval->mask = ring_size - 1;\n+\tretval->size = size;\n+end:\n+\treturn retval;\n+}\n+\n+static inline void\n+qe_ring_destroy(struct qe_ring *r)\n+{\n+\trte_free(r);\n+}\n+\n+static force_inline unsigned int\n+qe_ring_count(const struct qe_ring *r)\n+{\n+\treturn r->write_idx - r->read_idx;\n+}\n+\n+static force_inline unsigned int\n+qe_ring_free_count(const struct qe_ring *r)\n+{\n+\treturn r->size - qe_ring_count(r);\n+}\n+\n+static force_inline unsigned int\n+qe_ring_enqueue_burst(struct qe_ring *r, const struct rte_event *qes,\n+\t\tunsigned int nb_qes, uint16_t *free_count)\n+{\n+\tconst uint32_t size = r->size;\n+\tconst uint32_t mask = r->mask;\n+\tconst uint32_t read = r->read_idx;\n+\tuint32_t write = r->write_idx;\n+\tconst uint32_t space = read + size - write;\n+\tuint32_t i;\n+\n+\tif (space < nb_qes)\n+\t\tnb_qes = space;\n+\n+\tfor (i = 0; i < nb_qes; i++, write++)\n+\t\tr->ring[write & mask] = qes[i];\n+\n+\trte_smp_wmb();\n+\n+\tif (nb_qes != 0)\n+\t\tr->write_idx = write;\n+\n+\t*free_count = space - nb_qes;\n+\n+\treturn nb_qes;\n+}\n+\n+static force_inline unsigned int\n+qe_ring_enqueue_burst_with_ops(struct qe_ring *r, const struct rte_event *qes,\n+\t\tunsigned int nb_qes, uint8_t *ops)\n+{\n+\tconst uint32_t size = r->size;\n+\tconst uint32_t mask = r->mask;\n+\tconst uint32_t read = r->read_idx;\n+\tuint32_t write = r->write_idx;\n+\tconst uint32_t space = read + size - write;\n+\tuint32_t i;\n+\n+\tif (space < nb_qes)\n+\t\tnb_qes = space;\n+\n+\tfor (i = 0; i < nb_qes; i++, write++) {\n+\t\tr->ring[write & mask] = qes[i];\n+\t\tr->ring[write & mask].op = ops[i];\n+\t}\n+\n+\trte_smp_wmb();\n+\n+\tif (nb_qes != 0)\n+\t\tr->write_idx = write;\n+\n+\treturn nb_qes;\n+}\n+\n+static force_inline unsigned int\n+qe_ring_dequeue_burst(struct qe_ring *r, struct rte_event *qes,\n+\t\tunsigned int nb_qes)\n+{\n+\tconst uint32_t mask = r->mask;\n+\tuint32_t read = r->read_idx;\n+\tconst uint32_t write = r->write_idx;\n+\tconst uint32_t items = write - read;\n+\tuint32_t i;\n+\n+\tif (items < nb_qes)\n+\t\tnb_qes = items;\n+\n+\n+\tfor (i = 0; i < nb_qes; i++, read++)\n+\t\tqes[i] = r->ring[read & mask];\n+\n+\trte_smp_rmb();\n+\n+\tif (nb_qes != 0)\n+\t\tr->read_idx += nb_qes;\n+\n+\treturn nb_qes;\n+}\n+\n+#endif\ndiff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c\nindex 574696b..d33f259 100644\n--- a/drivers/event/sw/sw_evdev.c\n+++ b/drivers/event/sw/sw_evdev.c\n@@ -39,12 +39,91 @@\n \n #include \"sw_evdev.h\"\n #include \"iq_ring.h\"\n+#include \"event_ring.h\"\n \n #define EVENTDEV_NAME_SW_PMD event_sw\n #define NUMA_NODE_ARG \"numa_node\"\n #define SCHED_QUANTA_ARG \"sched_quanta\"\n #define CREDIT_QUANTA_ARG \"credit_quanta\"\n \n+static void\n+sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info);\n+\n+static int\n+sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,\n+\t\tconst struct rte_event_port_conf *conf)\n+{\n+\tstruct sw_evdev *sw = sw_pmd_priv(dev);\n+\tstruct sw_port *p = &sw->ports[port_id];\n+\tchar buf[QE_RING_NAMESIZE];\n+\tunsigned int i;\n+\n+\tstruct rte_event_dev_info info;\n+\tsw_info_get(dev, &info);\n+\n+\t/* detect re-configuring and return credits to instance if needed */\n+\tif (p->initialized) {\n+\t\t/* taking credits from pool is done one quanta at a time, and\n+\t\t * credits may be spend (counted in p->inflights) or still\n+\t\t * available in the port (p->inflight_credits). We must return\n+\t\t * the sum to no leak credits\n+\t\t */\n+\t\tint possible_inflights = p->inflight_credits + p->inflights;\n+\t\trte_atomic32_sub(&sw->inflights, possible_inflights);\n+\t}\n+\n+\t*p = (struct sw_port){0}; /* zero entire structure */\n+\tp->id = port_id;\n+\tp->sw = sw;\n+\n+\tsnprintf(buf, sizeof(buf), \"sw%d_%s\", dev->data->dev_id,\n+\t\t\t\"rx_worker_ring\");\n+\tp->rx_worker_ring = qe_ring_create(buf, MAX_SW_PROD_Q_DEPTH,\n+\t\t\tdev->data->socket_id);\n+\tif (p->rx_worker_ring == NULL) {\n+\t\tSW_LOG_ERR(\"%s %d: error creating RX worker ring\\n\",\n+\t\t\t\t__func__, __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tp->inflight_max = conf->new_event_threshold;\n+\n+\tsnprintf(buf, sizeof(buf), \"sw%d_%s\", dev->data->dev_id,\n+\t\t\t\"cq_worker_ring\");\n+\tp->cq_worker_ring = qe_ring_create(buf, conf->dequeue_depth,\n+\t\t\tdev->data->socket_id);\n+\tif (p->cq_worker_ring == NULL) {\n+\t\tqe_ring_destroy(p->rx_worker_ring);\n+\t\tSW_LOG_ERR(\"%s %d: error creating CQ worker ring\\n\",\n+\t\t\t\t__func__, __LINE__);\n+\t\treturn -1;\n+\t}\n+\tsw->cq_ring_space[port_id] = conf->dequeue_depth;\n+\n+\t/* set hist list contents to empty */\n+\tfor (i = 0; i < SW_PORT_HIST_LIST; i++) {\n+\t\tp->hist_list[i].fid = -1;\n+\t\tp->hist_list[i].qid = -1;\n+\t}\n+\tdev->data->ports[port_id] = p;\n+\n+\trte_smp_wmb();\n+\tp->initialized = 1;\n+\treturn 0;\n+}\n+\n+static void\n+sw_port_release(void *port)\n+{\n+\tstruct sw_port *p = (void *)port;\n+\tif (p == NULL)\n+\t\treturn;\n+\n+\tqe_ring_destroy(p->rx_worker_ring);\n+\tqe_ring_destroy(p->cq_worker_ring);\n+\tmemset(p, 0, sizeof(*p));\n+}\n+\n static int32_t\n qid_init(struct sw_evdev *sw, unsigned int idx, int type,\n \t\tconst struct rte_event_queue_conf *queue_conf)\n@@ -319,6 +398,8 @@ sw_probe(const char *name, const char *params)\n \t\t\t.queue_setup = sw_queue_setup,\n \t\t\t.queue_release = sw_queue_release,\n \t\t\t.port_def_conf = sw_port_def_conf,\n+\t\t\t.port_setup = sw_port_setup,\n+\t\t\t.port_release = sw_port_release,\n \t};\n \n \tstatic const char *const args[] = {\ndiff --git a/drivers/event/sw/sw_evdev.h b/drivers/event/sw/sw_evdev.h\nindex ddf0cd2..f5515e1 100644\n--- a/drivers/event/sw/sw_evdev.h\n+++ b/drivers/event/sw/sw_evdev.h\n@@ -49,6 +49,13 @@\n #define MAX_SW_PROD_Q_DEPTH 4096\n #define SW_FRAGMENTS_MAX 16\n \n+/* report dequeue burst sizes in buckets */\n+#define SW_DEQ_STAT_BUCKET_SHIFT 2\n+/* how many packets pulled from port by sched */\n+#define SCHED_DEQUEUE_BURST_SIZE 32\n+\n+#define SW_PORT_HIST_LIST (MAX_SW_PROD_Q_DEPTH) /* size of our history list */\n+\n #define EVENTDEV_NAME_SW_PMD event_sw\n #define SW_PMD_NAME RTE_STR(event_sw)\n \n@@ -129,12 +136,82 @@ struct sw_qid {\n \tuint8_t priority;\n };\n \n+struct sw_hist_list_entry {\n+\tint32_t qid;\n+\tint32_t fid;\n+\tstruct reorder_buffer_entry *rob_entry;\n+};\n+\n+struct sw_evdev;\n+\n+struct sw_port {\n+\t/* new enqueue / dequeue API doesn't have an instance pointer, only the\n+\t * pointer to the port being enqueue/dequeued from\n+\t */\n+\tstruct sw_evdev *sw;\n+\n+\t/* set when the port is initialized */\n+\tuint8_t initialized;\n+\t/* A numeric ID for the port */\n+\tuint8_t id;\n+\n+\tint16_t is_directed; /** Takes from a single directed QID */\n+\t/**\n+\t * For loadbalanced we can optimise pulling packets from\n+\t * producers if there is no reordering involved\n+\t */\n+\tint16_t num_ordered_qids;\n+\n+\t/** Ring and buffer for pulling events from workers for scheduling */\n+\tstruct qe_ring *rx_worker_ring __rte_cache_aligned;\n+\t/** Ring and buffer for pushing packets to workers after scheduling */\n+\tstruct qe_ring *cq_worker_ring;\n+\n+\t/* hole */\n+\n+\t/* num releases yet to be completed on this port */\n+\tuint16_t outstanding_releases __rte_cache_aligned;\n+\tuint16_t inflight_max; /* app requested max inflights for this port */\n+\tuint16_t inflight_credits; /* num credits this port has right now */\n+\n+\tuint16_t last_dequeue_burst_sz; /* how big the burst was */\n+\tuint64_t last_dequeue_ticks; /* used to track burst processing time */\n+\tuint64_t avg_pkt_ticks;      /* tracks average over NUM_SAMPLES burst */\n+\tuint64_t total_polls;        /* how many polls were counted in stats */\n+\tuint64_t zero_polls;         /* tracks polls returning nothing */\n+\tuint32_t poll_buckets[MAX_SW_CONS_Q_DEPTH >> SW_DEQ_STAT_BUCKET_SHIFT];\n+\t\t/* bucket values in 4s for shorter reporting */\n+\n+\t/* History list structs, containing info on pkts egressed to worker */\n+\tuint16_t hist_head __rte_cache_aligned;\n+\tuint16_t hist_tail;\n+\tuint16_t inflights;\n+\tstruct sw_hist_list_entry hist_list[SW_PORT_HIST_LIST];\n+\n+\t/* track packets in and out of this port */\n+\tstruct sw_point_stats stats;\n+\n+\n+\tuint32_t pp_buf_start;\n+\tuint32_t pp_buf_count;\n+\tuint16_t cq_buf_count;\n+\tstruct rte_event pp_buf[SCHED_DEQUEUE_BURST_SIZE];\n+\tstruct rte_event cq_buf[MAX_SW_CONS_Q_DEPTH];\n+\n+\tuint8_t num_qids_mapped;\n+};\n+\n struct sw_evdev {\n \tstruct rte_eventdev_data *data;\n \n \tuint32_t port_count;\n \tuint32_t qid_count;\n \n+\t/* Contains all ports - load balanced and directed */\n+\tstruct sw_port ports[SW_PORTS_MAX] __rte_cache_aligned;\n+\n+\trte_atomic32_t inflights __rte_cache_aligned;\n+\n \t/*\n \t * max events in this instance. Cached here for performance.\n \t * (also available in data->conf.nb_events_limit)\n@@ -144,6 +221,9 @@ struct sw_evdev {\n \t/* Internal queues - one per logical queue */\n \tstruct sw_qid qids[RTE_EVENT_MAX_QUEUES_PER_DEV] __rte_cache_aligned;\n \n+\t/* Cache how many packets are in each cq */\n+\tuint16_t cq_ring_space[SW_PORTS_MAX] __rte_cache_aligned;\n+\n \tint32_t sched_quanta;\n \n \tuint32_t credit_update_quanta;\n",
    "prefixes": [
        "dpdk-dev",
        "v6",
        "08/21"
    ]
}