get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/20106/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 20106,
    "url": "http://patches.dpdk.org/api/patches/20106/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1485879273-86228-8-git-send-email-harry.van.haaren@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1485879273-86228-8-git-send-email-harry.van.haaren@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1485879273-86228-8-git-send-email-harry.van.haaren@intel.com",
    "date": "2017-01-31T16:14:25",
    "name": "[dpdk-dev,v2,07/15] event/sw: add support for event queues",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "4c87baa85b88b6339b437d39de4057ea3d5eb640",
    "submitter": {
        "id": 317,
        "url": "http://patches.dpdk.org/api/people/317/?format=api",
        "name": "Van Haaren, Harry",
        "email": "harry.van.haaren@intel.com"
    },
    "delegate": {
        "id": 10,
        "url": "http://patches.dpdk.org/api/users/10/?format=api",
        "username": "bruce",
        "first_name": "Bruce",
        "last_name": "Richardson",
        "email": "bruce.richardson@intel.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1485879273-86228-8-git-send-email-harry.van.haaren@intel.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/20106/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/20106/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id 72A53F94B;\n\tTue, 31 Jan 2017 17:15:29 +0100 (CET)",
            "from mga09.intel.com (mga09.intel.com [134.134.136.24])\n\tby dpdk.org (Postfix) with ESMTP id 3353028FD\n\tfor <dev@dpdk.org>; Tue, 31 Jan 2017 17:14:54 +0100 (CET)",
            "from fmsmga003.fm.intel.com ([10.253.24.29])\n\tby orsmga102.jf.intel.com with ESMTP; 31 Jan 2017 08:14:54 -0800",
            "from silpixa00398672.ir.intel.com ([10.237.223.128])\n\tby FMSMGA003.fm.intel.com with ESMTP; 31 Jan 2017 08:14:52 -0800"
        ],
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.33,315,1477983600\"; d=\"scan'208\";a=\"815468161\"",
        "From": "Harry van Haaren <harry.van.haaren@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "jerin.jacob@caviumnetworks.com,\n\tBruce Richardson <bruce.richardson@intel.com>,\n\tHarry van Haaren <harry.van.haaren@intel.com>",
        "Date": "Tue, 31 Jan 2017 16:14:25 +0000",
        "Message-Id": "<1485879273-86228-8-git-send-email-harry.van.haaren@intel.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1485879273-86228-1-git-send-email-harry.van.haaren@intel.com>",
        "References": "<1484580885-148524-1-git-send-email-harry.van.haaren@intel.com>\n\t<1485879273-86228-1-git-send-email-harry.van.haaren@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v2 07/15] event/sw: add support for event queues",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Bruce Richardson <bruce.richardson@intel.com>\n\nAdd in the data structures for the event queues, and the eventdev\nfunctions to create and destroy those queues.\n\nSigned-off-by: Bruce Richardson <bruce.richardson@intel.com>\nSigned-off-by: Harry van Haaren <harry.van.haaren@intel.com>\n---\n drivers/event/sw/iq_ring.h  | 176 ++++++++++++++++++++++++++++++++++++++++++++\n drivers/event/sw/sw_evdev.c | 158 +++++++++++++++++++++++++++++++++++++++\n drivers/event/sw/sw_evdev.h |  75 +++++++++++++++++++\n 3 files changed, 409 insertions(+)\n create mode 100644 drivers/event/sw/iq_ring.h",
    "diff": "diff --git a/drivers/event/sw/iq_ring.h b/drivers/event/sw/iq_ring.h\nnew file mode 100644\nindex 0000000..d480d15\n--- /dev/null\n+++ b/drivers/event/sw/iq_ring.h\n@@ -0,0 +1,176 @@\n+/*-\n+ *   BSD LICENSE\n+ *\n+ *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.\n+ *\n+ *   Redistribution and use in source and binary forms, with or without\n+ *   modification, are permitted provided that the following conditions\n+ *   are met:\n+ *\n+ *     * Redistributions of source code must retain the above copyright\n+ *       notice, this list of conditions and the following disclaimer.\n+ *     * Redistributions in binary form must reproduce the above copyright\n+ *       notice, this list of conditions and the following disclaimer in\n+ *       the documentation and/or other materials provided with the\n+ *       distribution.\n+ *     * Neither the name of Intel Corporation nor the names of its\n+ *       contributors may be used to endorse or promote products derived\n+ *       from this software without specific prior written permission.\n+ *\n+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+ *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+ */\n+\n+/*\n+ * Ring structure definitions used for the internal ring buffers of the\n+ * SW eventdev implementation. These are designed for single-core use only.\n+ */\n+#ifndef _IQ_RING_\n+#define _IQ_RING_\n+\n+#include <stdint.h>\n+\n+#include <rte_common.h>\n+#include <rte_memory.h>\n+#include <rte_malloc.h>\n+#include <rte_eventdev.h>\n+\n+#define IQ_RING_NAMESIZE 12\n+#define QID_IQ_DEPTH 512\n+#define QID_IQ_MASK (uint16_t)(QID_IQ_DEPTH - 1)\n+\n+struct iq_ring {\n+\tchar name[IQ_RING_NAMESIZE] __rte_cache_aligned;\n+\tuint16_t write_idx;\n+\tuint16_t read_idx;\n+\n+\tstruct rte_event ring[QID_IQ_DEPTH];\n+};\n+\n+#ifndef force_inline\n+#define force_inline inline __attribute__((always_inline))\n+#endif\n+\n+static inline struct iq_ring *\n+iq_ring_create(const char *name, unsigned int socket_id)\n+{\n+\tstruct iq_ring *retval;\n+\n+\tretval = rte_malloc_socket(NULL, sizeof(*retval), 0, socket_id);\n+\tif (retval == NULL)\n+\t\tgoto end;\n+\n+\tsnprintf(retval->name, sizeof(retval->name), \"%s\", name);\n+\tretval->write_idx = retval->read_idx = 0;\n+end:\n+\treturn retval;\n+}\n+\n+static inline void\n+iq_ring_destroy(struct iq_ring *r)\n+{\n+\trte_free(r);\n+}\n+\n+static force_inline uint16_t\n+iq_ring_count(const struct iq_ring *r)\n+{\n+\treturn r->write_idx - r->read_idx;\n+}\n+\n+static force_inline uint16_t\n+iq_ring_free_count(const struct iq_ring *r)\n+{\n+\treturn QID_IQ_MASK - iq_ring_count(r);\n+}\n+\n+static force_inline uint16_t\n+iq_ring_enqueue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)\n+{\n+\tconst uint16_t read = r->read_idx;\n+\tuint16_t write = r->write_idx;\n+\tconst uint16_t space = read + QID_IQ_MASK - write;\n+\tuint16_t i;\n+\n+\tif (space < nb_qes)\n+\t\tnb_qes = space;\n+\n+\tfor (i = 0; i < nb_qes; i++, write++)\n+\t\tr->ring[write & QID_IQ_MASK] = qes[i];\n+\n+\tr->write_idx = write;\n+\n+\treturn nb_qes;\n+}\n+\n+static force_inline uint16_t\n+iq_ring_dequeue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)\n+{\n+\tuint16_t read = r->read_idx;\n+\tconst uint16_t write = r->write_idx;\n+\tconst uint16_t items = write - read;\n+\tuint16_t i;\n+\n+\tfor (i = 0; i < nb_qes; i++, read++)\n+\t\tqes[i] = r->ring[read & QID_IQ_MASK];\n+\n+\tif (items < nb_qes)\n+\t\tnb_qes = items;\n+\n+\tr->read_idx += nb_qes;\n+\n+\treturn nb_qes;\n+}\n+\n+/* assumes there is space, from a previous dequeue_burst */\n+static force_inline uint16_t\n+iq_ring_put_back(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)\n+{\n+\tuint16_t i, read = r->read_idx;\n+\n+\tfor (i = nb_qes; i-- > 0; )\n+\t\tr->ring[--read & QID_IQ_MASK] = qes[i];\n+\n+\tr->read_idx = read;\n+\treturn nb_qes;\n+}\n+\n+static force_inline const struct rte_event *\n+iq_ring_peek(const struct iq_ring *r)\n+{\n+\treturn &r->ring[r->read_idx & QID_IQ_MASK];\n+}\n+\n+static force_inline void\n+iq_ring_pop(struct iq_ring *r)\n+{\n+\tr->read_idx++;\n+}\n+\n+static force_inline int\n+iq_ring_enqueue(struct iq_ring *r, const struct rte_event *qe)\n+{\n+\tconst uint16_t read = r->read_idx;\n+\tconst uint16_t write = r->write_idx;\n+\tconst uint16_t space = read + QID_IQ_MASK - write;\n+\n+\tif (space == 0)\n+\t\treturn -1;\n+\n+\tr->ring[write & QID_IQ_MASK] = *qe;\n+\n+\tr->write_idx = write + 1;\n+\n+\treturn 0;\n+}\n+\n+#endif\ndiff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c\nindex ceca865..ec756b8 100644\n--- a/drivers/event/sw/sw_evdev.c\n+++ b/drivers/event/sw/sw_evdev.c\n@@ -38,12 +38,168 @@\n #include <rte_ring.h>\n \n #include \"sw_evdev.h\"\n+#include \"iq_ring.h\"\n \n #define EVENTDEV_NAME_SW_PMD event_sw\n #define NUMA_NODE_ARG \"numa_node\"\n #define SCHED_QUANTA_ARG \"sched_quanta\"\n #define CREDIT_QUANTA_ARG \"credit_quanta\"\n \n+static int32_t\n+qid_init(struct sw_evdev *sw, unsigned int idx, int type,\n+\t\tconst struct rte_event_queue_conf *queue_conf)\n+{\n+\tunsigned int i;\n+\tint dev_id = sw->data->dev_id;\n+\tint socket_id = sw->data->socket_id;\n+\tchar buf[IQ_RING_NAMESIZE];\n+\tstruct sw_qid *qid = &sw->qids[idx];\n+\n+\tfor (i = 0; i < SW_IQS_MAX; i++) {\n+\t\tsnprintf(buf, sizeof(buf), \"q_%u_iq_%d\", idx, i);\n+\t\tqid->iq[i] = iq_ring_create(buf, socket_id);\n+\t\tif (!qid->iq[i]) {\n+\t\t\tSW_LOG_DBG(\"ring create failed\");\n+\t\t\tgoto cleanup;\n+\t\t}\n+\t}\n+\n+\t/* Initialize the FID structures to no pinning (-1), and zero packets */\n+\tconst struct sw_fid_t fid = {.cq = -1, .pcount = 0};\n+\tfor (i = 0; i < RTE_DIM(qid->fids); i++)\n+\t\tqid->fids[i] = fid;\n+\n+\tqid->id = idx;\n+\tqid->type = type;\n+\tqid->priority = queue_conf->priority;\n+\n+\tif (qid->type == RTE_SCHED_TYPE_ORDERED) {\n+\t\tchar ring_name[RTE_RING_NAMESIZE];\n+\t\tuint32_t window_size;\n+\n+\t\t/* rte_ring and window_size_mask require require window_size to\n+\t\t * be a power-of-2.\n+\t\t */\n+\t\twindow_size = rte_align32pow2(\n+\t\t\t\tqueue_conf->nb_atomic_order_sequences);\n+\n+\t\tqid->window_size = window_size - 1;\n+\n+\t\tif (!window_size) {\n+\t\t\tSW_LOG_DBG(\n+\t\t\t\t\"invalid reorder_window_size for ordered queue\\n\"\n+\t\t\t\t);\n+\t\t\tgoto cleanup;\n+\t\t}\n+\n+\t\tsnprintf(buf, sizeof(buf), \"sw%d_iq_%d_rob\", dev_id, i);\n+\t\tqid->reorder_buffer = rte_zmalloc_socket(buf,\n+\t\t\t\twindow_size * sizeof(qid->reorder_buffer[0]),\n+\t\t\t\t0, socket_id);\n+\t\tif (!qid->reorder_buffer) {\n+\t\t\tSW_LOG_DBG(\"reorder_buffer malloc failed\\n\");\n+\t\t\tgoto cleanup;\n+\t\t}\n+\n+\t\tmemset(&qid->reorder_buffer[0],\n+\t\t       0,\n+\t\t       window_size * sizeof(qid->reorder_buffer[0]));\n+\n+\t\tsnprintf(ring_name, sizeof(ring_name), \"sw%d_q%d_freelist\",\n+\t\t\t\tdev_id, idx);\n+\t\tqid->reorder_buffer_freelist = rte_ring_create(ring_name,\n+\t\t\t\twindow_size,\n+\t\t\t\tsocket_id,\n+\t\t\t\tRING_F_SP_ENQ | RING_F_SC_DEQ);\n+\t\tif (!qid->reorder_buffer_freelist) {\n+\t\t\tSW_LOG_DBG(\"freelist ring create failed\");\n+\t\t\tgoto cleanup;\n+\t\t}\n+\n+\t\t/* Populate the freelist with reorder buffer entries. Enqueue\n+\t\t * 'window_size - 1' entries because the rte_ring holds only\n+\t\t * that many.\n+\t\t */\n+\t\tfor (i = 0; i < window_size - 1; i++) {\n+\t\t\tif (rte_ring_sp_enqueue(qid->reorder_buffer_freelist,\n+\t\t\t\t\t\t&qid->reorder_buffer[i]) < 0)\n+\t\t\t\tgoto cleanup;\n+\t\t}\n+\n+\t\tqid->reorder_buffer_index = 0;\n+\t\tqid->cq_next_tx = 0;\n+\t}\n+\n+\treturn 0;\n+\n+cleanup:\n+\tfor (i = 0; i < SW_IQS_MAX; i++) {\n+\t\tif (qid->iq[i])\n+\t\t\tiq_ring_destroy(qid->iq[i]);\n+\t}\n+\n+\tif (qid->reorder_buffer) {\n+\t\trte_free(qid->reorder_buffer);\n+\t\tqid->reorder_buffer = NULL;\n+\t}\n+\n+\tif (qid->reorder_buffer_freelist) {\n+\t\trte_ring_free(qid->reorder_buffer_freelist);\n+\t\tqid->reorder_buffer_freelist = NULL;\n+\t}\n+\n+\treturn -EINVAL;\n+}\n+\n+static int\n+sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,\n+\t\tconst struct rte_event_queue_conf *conf)\n+{\n+\tint type;\n+\n+\t/* TODO: Error check queue types and appropriate values */\n+\tswitch (conf->event_queue_cfg) {\n+\tcase RTE_EVENT_QUEUE_CFG_SINGLE_LINK:\n+\t\ttype = RTE_SCHED_TYPE_DIRECT;\n+\t\tbreak;\n+\tcase RTE_EVENT_QUEUE_CFG_DEFAULT:\n+\t\t/* fallthrough */\n+\tcase RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY:\n+\t\ttype = RTE_SCHED_TYPE_ATOMIC;\n+\t\tbreak;\n+\tcase RTE_EVENT_QUEUE_CFG_ORDERED_ONLY:\n+\t\ttype = RTE_SCHED_TYPE_ORDERED;\n+\t\tbreak;\n+\tcase RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY:\n+\t\ttype = RTE_SCHED_TYPE_PARALLEL;\n+\t\tbreak;\n+\tdefault:\n+\t\tprintf(\"%s : unknown queue type %d requested\\n\", __func__,\n+\t\t\t\tconf->event_queue_cfg);\n+\t\treturn -1;\n+\t}\n+\n+\tstruct sw_evdev *sw = sw_pmd_priv(dev);\n+\treturn qid_init(sw, queue_id, type, conf);\n+}\n+\n+static void\n+sw_queue_release(struct rte_eventdev *dev, uint8_t id)\n+{\n+\tstruct sw_evdev *sw = sw_pmd_priv(dev);\n+\tstruct sw_qid *qid = &sw->qids[id];\n+\tuint32_t i;\n+\n+\tfor (i = 0; i < SW_IQS_MAX; i++)\n+\t\tiq_ring_destroy(qid->iq[i]);\n+\n+\tif (qid->type == RTE_SCHED_TYPE_ORDERED) {\n+\t\trte_free(qid->reorder_buffer);\n+\t\trte_ring_free(qid->reorder_buffer_freelist);\n+\t}\n+\tmemset(qid, 0, sizeof(*qid));\n+}\n+\n static void\n sw_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,\n \t\t\t\t struct rte_event_queue_conf *conf)\n@@ -147,6 +303,8 @@ sw_probe(const char *name, const char *params)\n \t\t\t.dev_infos_get = sw_info_get,\n \n \t\t\t.queue_def_conf = sw_queue_def_conf,\n+\t\t\t.queue_setup = sw_queue_setup,\n+\t\t\t.queue_release = sw_queue_release,\n \t\t\t.port_def_conf = sw_port_def_conf,\n \t};\n \ndiff --git a/drivers/event/sw/sw_evdev.h b/drivers/event/sw/sw_evdev.h\nindex 65f00e4..aaa8056 100644\n--- a/drivers/event/sw/sw_evdev.h\n+++ b/drivers/event/sw/sw_evdev.h\n@@ -49,6 +49,78 @@\n #define SW_INFLIGHT_EVENTS_TOTAL 4096\n /* allow for lots of over-provisioning */\n #define MAX_SW_PROD_Q_DEPTH 4096\n+#define SW_FRAGMENTS_MAX 16\n+\n+/* have a new scheduling type for 1:1 queue to port links */\n+#define RTE_SCHED_TYPE_DIRECT (RTE_SCHED_TYPE_PARALLEL + 1)\n+\n+#ifdef RTE_LIBRTE_PMD_EVDEV_SW_DEBUG\n+#define SW_LOG_INFO(fmt, args...) \\\n+\tRTE_LOG(INFO, PMD, \"[%s] %s() line %u: \" fmt \"\\n\", \\\n+\t\t\tPMD_NAME, \\\n+\t\t\t__func__, __LINE__, ## args)\n+\n+#define SW_LOG_DBG(fmt, args...) \\\n+\tRTE_LOG(DEBUG, PMD, \"[%s] %s() line %u: \" fmt \"\\n\", \\\n+\t\t\tPMD_NAME, \\\n+\t\t\t__func__, __LINE__, ## args)\n+#else\n+#define SW_LOG_INFO(fmt, args...)\n+#define SW_LOG_DBG(fmt, args...)\n+#endif\n+\n+/* Records basic event stats at a given point. Used in port and qid structs */\n+struct sw_point_stats {\n+\tuint64_t rx_pkts;\n+\tuint64_t rx_dropped;\n+\tuint64_t tx_pkts;\n+};\n+\n+/* structure used to track what port a flow (FID) is pinned to */\n+struct sw_fid_t {\n+\t/* which CQ this FID is currently pinned to */\n+\tint32_t cq;\n+\t/* number of packets gone to the CQ with this FID */\n+\tuint32_t pcount;\n+};\n+\n+struct reorder_buffer_entry {\n+\tuint16_t num_fragments;\t\t/**< Number of packet fragments */\n+\tuint16_t fragment_index;\t/**< Points to the oldest valid frag */\n+\tuint8_t ready;\t\t\t/**< Entry is ready to be reordered */\n+\tstruct rte_event fragments[SW_FRAGMENTS_MAX];\n+};\n+\n+struct sw_qid {\n+\t/* The type of this QID */\n+\tint type;\n+\t/* Integer ID representing the queue. This is used in history lists,\n+\t * to identify the stage of processing.\n+\t */\n+\tuint32_t id;\n+\tstruct sw_point_stats stats;\n+\n+\t/* Internal priority rings for packets */\n+\tstruct iq_ring *iq[SW_IQS_MAX];\n+\tuint32_t iq_pkt_mask; /* A mask to indicate packets in an IQ */\n+\tuint64_t iq_pkt_count[SW_IQS_MAX];\n+\n+\t/* Information on what CQs are polling this IQ */\n+\tuint32_t cq_num_mapped_cqs;\n+\tuint32_t cq_next_tx; /* cq to write next (non-atomic) packet */\n+\tuint32_t cq_map[SW_PORTS_MAX];\n+\n+\t/* Track flow ids for atomic load balancing */\n+\tstruct sw_fid_t fids[SW_QID_NUM_FIDS];\n+\n+\t/* Track packet order for reordering when needed */\n+\tstruct reorder_buffer_entry *reorder_buffer; /*< pkts await reorder */\n+\tstruct rte_ring *reorder_buffer_freelist; /* available reorder slots */\n+\tuint32_t reorder_buffer_index; /* oldest valid reorder buffer entry */\n+\tuint32_t window_size;          /* Used to wrap reorder_buffer_index */\n+\n+\tuint8_t priority;\n+};\n \n struct sw_evdev {\n \tstruct rte_eventdev_data *data;\n@@ -62,6 +134,9 @@ struct sw_evdev {\n \t */\n \tuint32_t nb_events_limit;\n \n+\t/* Internal queues - one per logical queue */\n+\tstruct sw_qid qids[RTE_EVENT_MAX_QUEUES_PER_DEV] __rte_cache_aligned;\n+\n \tint32_t sched_quanta;\n \n \tuint32_t credit_update_quanta;\n",
    "prefixes": [
        "dpdk-dev",
        "v2",
        "07/15"
    ]
}