get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/77486/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 77486,
    "url": "http://patches.dpdk.org/api/patches/77486/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1599851920-16802-4-git-send-email-timothy.mcdaniel@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1599851920-16802-4-git-send-email-timothy.mcdaniel@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1599851920-16802-4-git-send-email-timothy.mcdaniel@intel.com",
    "date": "2020-09-11T19:18:21",
    "name": "[v4,03/22] event/dlb: add private data structures and constants",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "a5ebae4951259e5b8fdb6a2ab355116f075dcdeb",
    "submitter": {
        "id": 826,
        "url": "http://patches.dpdk.org/api/people/826/?format=api",
        "name": "Timothy McDaniel",
        "email": "timothy.mcdaniel@intel.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1599851920-16802-4-git-send-email-timothy.mcdaniel@intel.com/mbox/",
    "series": [
        {
            "id": 12163,
            "url": "http://patches.dpdk.org/api/series/12163/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=12163",
            "date": "2020-09-11T19:18:18",
            "name": "Add DLB PMD",
            "version": 4,
            "mbox": "http://patches.dpdk.org/series/12163/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/77486/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/77486/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 98159A04C1;\n\tFri, 11 Sep 2020 21:22:27 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id A4FD21C195;\n\tFri, 11 Sep 2020 21:22:10 +0200 (CEST)",
            "from mga12.intel.com (mga12.intel.com [192.55.52.136])\n by dpdk.org (Postfix) with ESMTP id 667F21C12A\n for <dev@dpdk.org>; Fri, 11 Sep 2020 21:22:06 +0200 (CEST)",
            "from fmsmga005.fm.intel.com ([10.253.24.32])\n by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 11 Sep 2020 12:22:05 -0700",
            "from txasoft-yocto.an.intel.com ([10.123.72.192])\n by fmsmga005.fm.intel.com with ESMTP; 11 Sep 2020 12:22:04 -0700"
        ],
        "IronPort-SDR": [
            "\n VYy2/Ju6ST8tzPhq0T7ryKa8GqYSb9BcyWwTktpp85oAJoJeY0uIQAhdqv3s3RFcjnZzYra7VN\n rTx2BujAXjyg==",
            "\n jhgCIjSwY7uEgxcPCwa5IqFhcmoUiWEtLPX9MaSUOTzBBS0jI/70ACypkaYbCUO3Rx/H1RI3Og\n fuPFL50OSoSQ=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6000,8403,9741\"; a=\"138352253\"",
            "E=Sophos;i=\"5.76,416,1592895600\"; d=\"scan'208\";a=\"138352253\"",
            "E=Sophos;i=\"5.76,416,1592895600\"; d=\"scan'208\";a=\"506375644\""
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "From": "Timothy McDaniel <timothy.mcdaniel@intel.com>",
        "To": "",
        "Cc": "dev@dpdk.org, erik.g.carrillo@intel.com, gage.eads@intel.com,\n harry.van.haaren@intel.com, jerinj@marvell.com",
        "Date": "Fri, 11 Sep 2020 14:18:21 -0500",
        "Message-Id": "<1599851920-16802-4-git-send-email-timothy.mcdaniel@intel.com>",
        "X-Mailer": "git-send-email 1.7.10",
        "In-Reply-To": "<1599851920-16802-1-git-send-email-timothy.mcdaniel@intel.com>",
        "References": "<1599851920-16802-1-git-send-email-timothy.mcdaniel@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v4 03/22] event/dlb: add private data structures\n\tand constants",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add headers used internally by the PMD. These headers are used\ninternally by the PMD. They include constants, macros for device\nresources, structure definitions for hardware interfaces and\nsoftware state, and various forward-declarations.\n\nSigned-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>\n---\n drivers/event/dlb/dlb_priv.h    | 566 ++++++++++++++++++++++++++++++++++++++++\n drivers/event/dlb/rte_pmd_dlb.h |  72 +++++\n 2 files changed, 638 insertions(+)\n create mode 100644 drivers/event/dlb/dlb_priv.h\n create mode 100644 drivers/event/dlb/rte_pmd_dlb.h",
    "diff": "diff --git a/drivers/event/dlb/dlb_priv.h b/drivers/event/dlb/dlb_priv.h\nnew file mode 100644\nindex 0000000..482c5b2\n--- /dev/null\n+++ b/drivers/event/dlb/dlb_priv.h\n@@ -0,0 +1,566 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2016-2020 Intel Corporation\n+ */\n+\n+#ifndef _DLB_PRIV_H_\n+#define _DLB_PRIV_H_\n+\n+#include <emmintrin.h>\n+#include <stdbool.h>\n+\n+#include <rte_bus_pci.h>\n+#include <rte_eventdev.h>\n+#include <rte_eventdev_pmd.h>\n+#include <rte_eventdev_pmd_pci.h>\n+#include <rte_pci.h>\n+\n+#include \"dlb_user.h\"\n+#include \"dlb_log.h\"\n+#include \"rte_pmd_dlb.h\"\n+\n+#ifndef RTE_LIBRTE_PMD_DLB_QUELL_STATS\n+#define DLB_INC_STAT(_stat, _incr_val) ((_stat) += _incr_val)\n+#else\n+#define DLB_INC_STAT(_stat, _incr_val)\n+#endif\n+\n+#define EVDEV_DLB_NAME_PMD_STR \"dlb_event\"\n+\n+/* command line arg strings */\n+#define NUMA_NODE_ARG \"numa_node\"\n+#define DLB_MAX_NUM_EVENTS \"max_num_events\"\n+#define DLB_NUM_DIR_CREDITS \"num_dir_credits\"\n+#define DEV_ID_ARG \"dev_id\"\n+#define DLB_DEFER_SCHED_ARG \"defer_sched\"\n+#define DLB_NUM_ATM_INFLIGHTS_ARG \"atm_inflights\"\n+\n+/* Begin HW related defines and structs */\n+\n+#define DLB_MAX_NUM_DOMAINS 32\n+#define DLB_MAX_NUM_VFS 16\n+#define DLB_MAX_NUM_LDB_QUEUES 128\n+#define DLB_MAX_NUM_LDB_PORTS 64\n+#define DLB_MAX_NUM_DIR_PORTS 128\n+#define DLB_MAX_NUM_DIR_QUEUES 128\n+#define DLB_MAX_NUM_FLOWS (64 * 1024)\n+#define DLB_MAX_NUM_LDB_CREDITS 16384\n+#define DLB_MAX_NUM_DIR_CREDITS 4096\n+#define DLB_MAX_NUM_LDB_CREDIT_POOLS 64\n+#define DLB_MAX_NUM_DIR_CREDIT_POOLS 64\n+#define DLB_MAX_NUM_HIST_LIST_ENTRIES 5120\n+#define DLB_MAX_NUM_ATM_INFLIGHTS 2048\n+#define DLB_MAX_NUM_QIDS_PER_LDB_CQ 8\n+#define DLB_QID_PRIORITIES 8\n+#define DLB_MAX_DEVICE_PATH 32\n+#define DLB_MIN_DEQUEUE_TIMEOUT_NS 1\n+#define DLB_NUM_SN_GROUPS 4\n+#define DLB_MAX_LDB_SN_ALLOC 1024\n+/* Note: \"- 1\" here to support the timeout range check in eventdev_autotest */\n+#define DLB_MAX_DEQUEUE_TIMEOUT_NS (UINT32_MAX - 1)\n+#define DLB_DEF_UNORDERED_QID_INFLIGHTS 2048\n+\n+/* 5120 total hist list entries and 64 total ldb ports, which\n+ * makes for 5120/64 == 80 hist list entries per port. However, CQ\n+ * depth must be a power of 2 and must also be >= HIST LIST entries.\n+ * As a result we just limit the maximum dequeue depth to 64.\n+ */\n+#define DLB_MIN_LDB_CQ_DEPTH 1\n+#define DLB_MIN_DIR_CQ_DEPTH 8\n+#define DLB_MIN_HARDWARE_CQ_DEPTH 8\n+#define DLB_MAX_CQ_DEPTH 64\n+#define DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT \\\n+\tDLB_MAX_CQ_DEPTH\n+\n+/* Static per queue/port provisioning values */\n+#define DLB_NUM_ATOMIC_INFLIGHTS_PER_QUEUE 16\n+\n+#define PP_BASE(is_dir) ((is_dir) ? DLB_DIR_PP_BASE : DLB_LDB_PP_BASE)\n+\n+#define PAGE_SIZE (sysconf(_SC_PAGESIZE))\n+\n+#define DLB_NUM_QES_PER_CACHE_LINE 4\n+\n+#define DLB_MAX_ENQUEUE_DEPTH 64\n+#define DLB_MIN_ENQUEUE_DEPTH 4\n+\n+#define DLB_NAME_SIZE 64\n+\n+/* Use the upper 3 bits of the event priority to select the DLB priority */\n+#define EV_TO_DLB_PRIO(x) ((x) >> 5)\n+#define DLB_TO_EV_PRIO(x) ((x) << 5)\n+\n+enum dlb_hw_port_type {\n+\tDLB_LDB,\n+\tDLB_DIR,\n+\n+\t/* NUM_DLB_PORT_TYPES must be last */\n+\tNUM_DLB_PORT_TYPES\n+};\n+\n+#define PORT_TYPE(p) ((p)->is_directed ? DLB_DIR : DLB_LDB)\n+\n+/* Do not change - must match hardware! */\n+enum dlb_hw_sched_type {\n+\tDLB_SCHED_ATOMIC = 0,\n+\tDLB_SCHED_UNORDERED,\n+\tDLB_SCHED_ORDERED,\n+\tDLB_SCHED_DIRECTED,\n+\n+\t/* DLB_NUM_HW_SCHED_TYPES must be last */\n+\tDLB_NUM_HW_SCHED_TYPES\n+};\n+\n+struct dlb_devargs {\n+\tint socket_id;\n+\tint max_num_events;\n+\tint num_dir_credits_override;\n+\tint dev_id;\n+\tint defer_sched;\n+\tint num_atm_inflights;\n+};\n+\n+struct dlb_hw_rsrcs {\n+\tint32_t nb_events_limit;\n+\tuint32_t num_queues;\t\t/* Total queues (ldb + dir) */\n+\tuint32_t num_ldb_queues;\t/* Number of available ldb queues */\n+\tuint32_t num_ldb_ports;         /* Number of load balanced ports */\n+\tuint32_t num_dir_ports;         /* Number of directed ports */\n+\tuint32_t num_ldb_credits;       /* Number of load balanced credits */\n+\tuint32_t num_dir_credits;       /* Number of directed credits */\n+\tuint32_t reorder_window_size;   /* Size of reorder window */\n+};\n+\n+struct dlb_hw_resource_info {\n+\t/**> Max resources that can be provided */\n+\tstruct dlb_hw_rsrcs hw_rsrc_max;\n+\tint num_sched_domains;\n+\tuint32_t socket_id;\n+\t/**> EAL flags passed to this DLB instance, allowing the application to\n+\t * identify the pmd backend indicating hardware or software.\n+\t */\n+\tconst char *eal_flags;\n+};\n+\n+/* hw-specific format - do not change */\n+\n+struct dlb_event_type {\n+\tuint8_t major:4;\n+\tuint8_t unused:4;\n+\tuint8_t sub;\n+};\n+\n+union dlb_opaque_data {\n+\tuint16_t opaque_data;\n+\tstruct dlb_event_type event_type;\n+};\n+\n+struct dlb_msg_info {\n+\tuint8_t qid;\n+\tuint8_t sched_type:2;\n+\tuint8_t priority:3;\n+\tuint8_t msg_type:3;\n+};\n+\n+#define DLB_NEW_CMD_BYTE 0x08\n+#define DLB_FWD_CMD_BYTE 0x0A\n+#define DLB_COMP_CMD_BYTE 0x02\n+#define DLB_NOOP_CMD_BYTE 0x00\n+#define DLB_POP_CMD_BYTE 0x01\n+\n+/* hw-specific format - do not change */\n+struct dlb_enqueue_qe {\n+\tuint64_t data;\n+\t/* Word 3 */\n+\tunion dlb_opaque_data u;\n+\tuint8_t qid;\n+\tuint8_t sched_type:2;\n+\tuint8_t priority:3;\n+\tuint8_t msg_type:3;\n+\t/* Word 4 */\n+\tuint16_t lock_id;\n+\tuint8_t meas_lat:1;\n+\tuint8_t rsvd1:2;\n+\tuint8_t no_dec:1;\n+\tuint8_t cmp_id:4;\n+\tunion {\n+\t\tuint8_t cmd_byte;\n+\t\tstruct {\n+\t\t\tuint8_t cq_token:1;\n+\t\t\tuint8_t qe_comp:1;\n+\t\t\tuint8_t qe_frag:1;\n+\t\t\tuint8_t qe_valid:1;\n+\t\t\tuint8_t int_arm:1;\n+\t\t\tuint8_t error:1;\n+\t\t\tuint8_t rsvd:2;\n+\t\t};\n+\t};\n+};\n+\n+/* hw-specific format - do not change */\n+struct dlb_cq_pop_qe {\n+\tuint64_t data;\n+\tunion dlb_opaque_data u;\n+\tuint8_t qid;\n+\tuint8_t sched_type:2;\n+\tuint8_t priority:3;\n+\tuint8_t msg_type:3;\n+\tuint16_t tokens:10;\n+\tuint16_t rsvd2:6;\n+\tuint8_t meas_lat:1;\n+\tuint8_t rsvd1:2;\n+\tuint8_t no_dec:1;\n+\tuint8_t cmp_id:4;\n+\tunion {\n+\t\tuint8_t cmd_byte;\n+\t\tstruct {\n+\t\t\tuint8_t cq_token:1;\n+\t\t\tuint8_t qe_comp:1;\n+\t\t\tuint8_t qe_frag:1;\n+\t\t\tuint8_t qe_valid:1;\n+\t\t\tuint8_t int_arm:1;\n+\t\t\tuint8_t error:1;\n+\t\t\tuint8_t rsvd:2;\n+\t\t};\n+\t};\n+};\n+\n+/* hw-specific format - do not change */\n+struct dlb_dequeue_qe {\n+\tuint64_t data;\n+\tunion dlb_opaque_data u;\n+\tuint8_t qid;\n+\tuint8_t sched_type:2;\n+\tuint8_t priority:3;\n+\tuint8_t msg_type:3;\n+\tuint16_t pp_id:10;\n+\tuint16_t rsvd0:6;\n+\tuint8_t debug;\n+\tuint8_t cq_gen:1;\n+\tuint8_t qid_depth:1;\n+\tuint8_t rsvd1:3;\n+\tuint8_t error:1;\n+\tuint8_t rsvd2:2;\n+};\n+\n+union dlb_port_config {\n+\tstruct dlb_create_ldb_port_args ldb;\n+\tstruct dlb_create_dir_port_args dir;\n+};\n+\n+enum DLB_PORT_STATE {\n+\tPORT_CLOSED,\n+\tPORT_STARTED,\n+\tPORT_STOPPED\n+};\n+\n+enum dlb_configuration_state {\n+\t/* The resource has not been configured */\n+\tDLB_NOT_CONFIGURED,\n+\t/* The resource was configured, but the device was stopped */\n+\tDLB_PREV_CONFIGURED,\n+\t/* The resource is currently configured */\n+\tDLB_CONFIGURED\n+};\n+\n+struct dlb_port {\n+\tuint32_t id;\n+\tbool is_directed;\n+\tbool gen_bit;\n+\tuint16_t dir_credits;\n+\tuint32_t dequeue_depth;\n+\tenum dlb_token_pop_mode token_pop_mode;\n+\tunion dlb_port_config cfg;\n+\tint pp_mmio_base;\n+\tuint16_t cached_ldb_credits;\n+\tuint16_t ldb_pushcount_at_credit_expiry;\n+\tuint16_t ldb_credits;\n+\tuint16_t cached_dir_credits;\n+\tuint16_t dir_pushcount_at_credit_expiry;\n+\tbool int_armed;\n+\tbool use_rsvd_token_scheme;\n+\tuint8_t cq_rsvd_token_deficit;\n+\tuint16_t owed_tokens;\n+\tint16_t issued_releases;\n+\tint16_t token_pop_thresh;\n+\tint cq_depth;\n+\tuint16_t cq_idx;\n+\tuint16_t cq_idx_unmasked;\n+\tuint16_t cq_depth_mask;\n+\tuint16_t gen_bit_shift;\n+\tenum DLB_PORT_STATE state;\n+\tenum dlb_configuration_state config_state;\n+\tint num_mapped_qids;\n+\tuint8_t *qid_mappings;\n+\tstruct dlb_enqueue_qe *qe4; /* Cache line's worth of QEs (4) */\n+\tstruct dlb_cq_pop_qe *consume_qe;\n+\tstruct dlb_eventdev *dlb; /* back ptr */\n+\tstruct dlb_eventdev_port *ev_port; /* back ptr */\n+};\n+\n+/* Per-process per-port mmio and memory pointers */\n+struct process_local_port_data {\n+\tuint64_t *pp_addr;\n+\tuint16_t *ldb_popcount;\n+\tuint16_t *dir_popcount;\n+\tstruct dlb_dequeue_qe *cq_base;\n+\tbool mmaped;\n+};\n+\n+struct dlb_config {\n+\tint configured;\n+\tint reserved;\n+\tuint32_t ldb_credit_pool_id;\n+\tuint32_t dir_credit_pool_id;\n+\tuint32_t num_ldb_credits;\n+\tuint32_t num_dir_credits;\n+\tstruct dlb_create_sched_domain_args resources;\n+};\n+\n+struct dlb_hw_dev {\n+\tchar device_name[DLB_NAME_SIZE];\n+\tchar device_path[DLB_MAX_DEVICE_PATH];\n+\tint device_path_id;\n+\tchar domain_device_path[DLB_MAX_DEVICE_PATH];\n+\tstruct dlb_config cfg;\n+\tstruct dlb_hw_resource_info info;\n+\tvoid *pf_dev; /* opaque pointer to PF PMD dev (struct dlb_dev) */\n+\tint device_id;\n+\tuint32_t domain_id;\n+\tint domain_id_valid;\n+\trte_spinlock_t resource_lock; /* for MP support */\n+}; __rte_cache_aligned\n+\n+/* End HW related defines and structs */\n+\n+/* Begin DLB PMD Eventdev related defines and structs */\n+\n+#define DLB_MAX_NUM_QUEUES \\\n+\t(DLB_MAX_NUM_DIR_QUEUES + DLB_MAX_NUM_LDB_QUEUES)\n+\n+#define DLB_MAX_NUM_PORTS (DLB_MAX_NUM_DIR_PORTS + DLB_MAX_NUM_LDB_PORTS)\n+#define DLB_MAX_INPUT_QUEUE_DEPTH 256\n+\n+/* Used for parsing dir ports/queues. */\n+\n+/** Structure to hold the queue to port link establishment attributes */\n+\n+struct dlb_event_queue_link {\n+\tuint8_t queue_id;\n+\tuint8_t priority;\n+\tbool mapped;\n+\tbool valid;\n+};\n+\n+struct dlb_traffic_stats {\n+\tuint64_t rx_ok;\n+\tuint64_t rx_drop;\n+\tuint64_t rx_interrupt_wait;\n+\tuint64_t rx_umonitor_umwait;\n+\tuint64_t tx_ok;\n+\tuint64_t total_polls;\n+\tuint64_t zero_polls;\n+\tuint64_t tx_nospc_ldb_hw_credits;\n+\tuint64_t tx_nospc_dir_hw_credits;\n+\tuint64_t tx_nospc_inflight_max;\n+\tuint64_t tx_nospc_new_event_limit;\n+\tuint64_t tx_nospc_inflight_credits;\n+};\n+\n+struct dlb_port_stats {\n+\tstruct dlb_traffic_stats traffic;\n+\tuint64_t tx_op_cnt[4]; /* indexed by rte_event.op */\n+\tuint64_t tx_implicit_rel;\n+\tuint64_t tx_sched_cnt[DLB_NUM_HW_SCHED_TYPES];\n+\tuint64_t tx_invalid;\n+\tuint64_t rx_sched_cnt[DLB_NUM_HW_SCHED_TYPES];\n+\tuint64_t rx_sched_invalid;\n+\tuint64_t enq_ok[DLB_MAX_NUM_QUEUES]; /* per-queue enq_ok */\n+};\n+\n+struct dlb_eventdev_port {\n+\tstruct dlb_port qm_port; /* hw specific data structure */\n+\tstruct rte_event_port_conf conf; /* user-supplied configuration */\n+\tuint16_t inflight_credits; /* num credits this port has right now */\n+\tuint16_t credit_update_quanta;\n+\tstruct dlb_eventdev *dlb; /* backlink optimization */\n+\tstruct dlb_port_stats stats __rte_cache_aligned;\n+\tstruct dlb_event_queue_link link[DLB_MAX_NUM_QIDS_PER_LDB_CQ];\n+\tint num_links;\n+\tuint32_t id;\n+\t/* num releases yet to be completed on this port.\n+\t * Only applies to load-balanced ports.\n+\t */\n+\tuint16_t outstanding_releases;\n+\tuint16_t inflight_max; /* app requested max inflights for this port */\n+\t/* setup_done is set when the event port is setup */\n+\tbool setup_done;\n+\t/* enq_configured is set when the qm port is created */\n+\tbool enq_configured;\n+\tuint8_t implicit_release; /* release events before dequeueing */\n+} __rte_cache_aligned;\n+\n+struct dlb_queue {\n+\tuint32_t num_qid_inflights; /* User config */\n+\tuint32_t num_atm_inflights; /* User config */\n+\tenum dlb_configuration_state config_state;\n+\tint sched_type; /* LB queue only */\n+\tuint32_t id;\n+\tbool is_directed;\n+};\n+\n+struct dlb_eventdev_queue {\n+\tstruct dlb_queue qm_queue;\n+\tstruct rte_event_queue_conf conf; /* User config */\n+\tuint64_t enq_ok;\n+\tuint32_t id;\n+\tbool setup_done;\n+\tuint8_t num_links;\n+};\n+\n+enum dlb_run_state {\n+\tDLB_RUN_STATE_STOPPED = 0,\n+\tDLB_RUN_STATE_STOPPING,\n+\tDLB_RUN_STATE_STARTING,\n+\tDLB_RUN_STATE_STARTED\n+};\n+\n+#define DLB_IS_VDEV true\n+#define DLB_NOT_VDEV false\n+\n+struct dlb_eventdev {\n+\tstruct dlb_eventdev_port ev_ports[DLB_MAX_NUM_PORTS];\n+\tstruct dlb_eventdev_queue ev_queues[DLB_MAX_NUM_QUEUES];\n+\tuint8_t qm_ldb_to_ev_queue_id[DLB_MAX_NUM_QUEUES];\n+\tuint8_t qm_dir_to_ev_queue_id[DLB_MAX_NUM_QUEUES];\n+\n+\t/* store num stats and offset of the stats for each queue */\n+\tuint16_t xstats_count_per_qid[DLB_MAX_NUM_QUEUES];\n+\tuint16_t xstats_offset_for_qid[DLB_MAX_NUM_QUEUES];\n+\n+\t/* store num stats and offset of the stats for each port */\n+\tuint16_t xstats_count_per_port[DLB_MAX_NUM_PORTS];\n+\tuint16_t xstats_offset_for_port[DLB_MAX_NUM_PORTS];\n+\tstruct dlb_get_num_resources_args hw_rsrc_query_results;\n+\tuint32_t xstats_count_mode_queue;\n+\tstruct dlb_hw_dev qm_instance; /* strictly hw related */\n+\tuint64_t global_dequeue_wait_ticks;\n+\tstruct dlb_xstats_entry *xstats;\n+\tstruct rte_eventdev *event_dev; /* backlink to dev */\n+\tuint32_t xstats_count_mode_port;\n+\tuint32_t xstats_count_mode_dev;\n+\tuint32_t xstats_count;\n+\tuint32_t inflights; /* use __atomic builtins to access */\n+\tuint32_t new_event_limit;\n+\tint max_num_events_override;\n+\tint num_dir_credits_override;\n+\tvolatile enum dlb_run_state run_state;\n+\tuint16_t num_dir_queues; /* total num of evdev dir queues requested */\n+\tuint16_t num_dir_credits;\n+\tuint16_t num_ldb_credits;\n+\tuint16_t num_queues; /* total queues */\n+\tuint16_t num_ldb_queues; /* total num of evdev ldb queues requested */\n+\tuint16_t num_ports; /* total num of evdev ports requested */\n+\tuint16_t num_ldb_ports; /* total num of ldb ports requested */\n+\tuint16_t num_dir_ports; /* total num of dir ports requested */\n+\tbool is_vdev;\n+\tbool umwait_allowed;\n+\tbool global_dequeue_wait; /* Not using per dequeue wait if true */\n+\tbool defer_sched;\n+\tunsigned int num_atm_inflights_per_queue;\n+\tenum dlb_cq_poll_modes poll_mode;\n+\tuint8_t revision;\n+\tbool configured;\n+};\n+\n+/* End Eventdev related defines and structs */\n+\n+/* externs */\n+\n+extern struct dlb_dev *dlb_probe(struct rte_pci_device *pdev);\n+extern struct process_local_port_data dlb_port[][NUM_DLB_PORT_TYPES];\n+\n+/* Forwards for non-inlined functions */\n+\n+void dlb_free_qe_mem(struct dlb_port *port);\n+\n+int dlb_init_qe_mem(struct dlb_port *port, char *mz_name);\n+\n+int dlb_init_send_qe(struct dlb_port *port, char *mz_name);\n+\n+int dlb_init_partial_qe(struct dlb_port *port, char *mz_name);\n+\n+int dlb_init_fwd_qe(struct dlb_port *port, char *mz_name);\n+\n+int dlb_init_complete_qe(struct dlb_port *port, char *mz_name);\n+\n+int dlb_init_noop_qe(struct dlb_port *port, char *mz_name);\n+\n+int dlb_uninit(const char *name);\n+\n+void dlb_eventdev_dump(struct rte_eventdev *dev, FILE *f);\n+\n+int dlb_xstats_init(struct dlb_eventdev *dlb);\n+\n+void dlb_xstats_uninit(struct dlb_eventdev *dlb);\n+\n+int dlb_eventdev_xstats_get(const struct rte_eventdev *dev,\n+\t\t\t    enum rte_event_dev_xstats_mode mode,\n+\t\t\t    uint8_t queue_port_id, const unsigned int ids[],\n+\t\t\t    uint64_t values[], unsigned int n);\n+\n+int dlb_eventdev_xstats_get_names(const struct rte_eventdev *dev,\n+\t\t\t\t  enum rte_event_dev_xstats_mode mode,\n+\t\t\t\t  uint8_t queue_port_id,\n+\t\t\t\t  struct rte_event_dev_xstats_name *xstat_names,\n+\t\t\t\t  unsigned int *ids, unsigned int size);\n+\n+uint64_t dlb_eventdev_xstats_get_by_name(const struct rte_eventdev *dev,\n+\t\t\t\t\t const char *name, unsigned int *id);\n+\n+int dlb_eventdev_xstats_reset(struct rte_eventdev *dev,\n+\t\t\t      enum rte_event_dev_xstats_mode mode,\n+\t\t\t      int16_t queue_port_id,\n+\t\t\t      const uint32_t ids[],\n+\t\t\t      uint32_t nb_ids);\n+\n+int test_dlb_eventdev(void);\n+\n+int dlb_primary_eventdev_probe(struct rte_eventdev *dev,\n+\t\t\t       const char *name,\n+\t\t\t       struct dlb_devargs *dlb_args,\n+\t\t\t       bool is_vdev);\n+\n+int dlb_secondary_eventdev_probe(struct rte_eventdev *dev,\n+\t\t\t\t const char *name,\n+\t\t\t\t bool is_vdev);\n+uint32_t dlb_get_queue_depth(struct dlb_eventdev *dlb,\n+\t\t\t     struct dlb_eventdev_queue *queue);\n+\n+int set_numa_node(const char *key __rte_unused, const char *value,\n+\t\t  void *opaque);\n+\n+int set_dir_ports(const char *key __rte_unused,\n+\t\t  const char *value __rte_unused,\n+\t\t  void *opaque __rte_unused);\n+\n+int set_dir_queues(const char *key __rte_unused,\n+\t\t   const char *value __rte_unused,\n+\t\t   void *opaque __rte_unused);\n+\n+int set_max_num_events(const char *key __rte_unused, const char *value,\n+\t\t       void *opaque);\n+\n+int set_num_dir_credits(const char *key __rte_unused, const char *value,\n+\t\t\tvoid *opaque);\n+\n+void dlb_drain(struct rte_eventdev *dev);\n+\n+void dlb_entry_points_init(struct rte_eventdev *dev);\n+\n+int dlb_parse_params(const char *params,\n+\t\t     const char *name,\n+\t\t     struct dlb_devargs *dlb_args);\n+\n+int dlb_string_to_int(int *result, const char *str);\n+\n+#endif\t/* _DLB_PRIV_H_ */\ndiff --git a/drivers/event/dlb/rte_pmd_dlb.h b/drivers/event/dlb/rte_pmd_dlb.h\nnew file mode 100644\nindex 0000000..cf28149\n--- /dev/null\n+++ b/drivers/event/dlb/rte_pmd_dlb.h\n@@ -0,0 +1,72 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2019-2020 Intel Corporation\n+ */\n+\n+/*!\n+ *  @file      rte_pmd_dlb.h\n+ *\n+ *  @brief     DLB PMD-specific functions\n+ *\n+ *  @b EXPERIMENTAL: this API may change, or be removed, without prior notice\n+ */\n+\n+#ifndef _RTE_PMD_DLB_H_\n+#define _RTE_PMD_DLB_H_\n+\n+#ifdef __cplusplus\n+extern \"C\" {\n+#endif\n+\n+#include <stdint.h>\n+\n+/**\n+ * Selects the token pop mode for an DLB port.\n+ */\n+enum dlb_token_pop_mode {\n+\t/* Pop the CQ tokens immediately after dequeueing. */\n+\tAUTO_POP,\n+\t/* Pop CQ tokens after (dequeue_depth - 1) events are released.\n+\t * Supported on load-balanced ports only.\n+\t */\n+\tDELAYED_POP,\n+\t/* Pop the CQ tokens during next dequeue operation. */\n+\tDEFERRED_POP,\n+\n+\t/* NUM_TOKEN_POP_MODES must be last */\n+\tNUM_TOKEN_POP_MODES\n+};\n+\n+/*!\n+ * Configure the token pop mode for an DLB port. By default, all ports use\n+ * AUTO_POP. This function must be called before calling rte_event_port_setup()\n+ * for the port, but after calling rte_event_dev_configure().\n+ *\n+ * @note\n+ *    The defer_sched vdev arg, which configures all load-balanced ports with\n+ *    dequeue_depth == 1 for DEFERRED_POP mode, takes precedence over this\n+ *    function.\n+ *\n+ * @param dev_id\n+ *    The identifier of the event device.\n+ * @param port_id\n+ *    The identifier of the event port.\n+ * @param mode\n+ *    The token pop mode.\n+ *\n+ * @return\n+ * - 0: Success\n+ * - EINVAL: Invalid dev_id, port_id, or mode\n+ * - EINVAL: The DLB is not configured, is already running, or the port is\n+ *   already setup\n+ */\n+\n+__rte_experimental\n+int\n+rte_pmd_dlb_set_token_pop_mode(uint8_t dev_id,\n+\t\t\t       uint8_t port_id,\n+\t\t\t       enum dlb_token_pop_mode mode);\n+#ifdef __cplusplus\n+}\n+#endif\n+\n+#endif /* _RTE_PMD_DLB_H_ */\n",
    "prefixes": [
        "v4",
        "03/22"
    ]
}