get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/83060/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 83060,
    "url": "https://patches.dpdk.org/api/patches/83060/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1604101295-15970-21-git-send-email-timothy.mcdaniel@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1604101295-15970-21-git-send-email-timothy.mcdaniel@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1604101295-15970-21-git-send-email-timothy.mcdaniel@intel.com",
    "date": "2020-10-30T23:41:32",
    "name": "[v11,20/23] event/dlb: add PMD's token pop public interface",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "d8026b38b8ead32fe08aae2821700ff5cd3b17ba",
    "submitter": {
        "id": 826,
        "url": "https://patches.dpdk.org/api/people/826/?format=api",
        "name": "Timothy McDaniel",
        "email": "timothy.mcdaniel@intel.com"
    },
    "delegate": null,
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1604101295-15970-21-git-send-email-timothy.mcdaniel@intel.com/mbox/",
    "series": [
        {
            "id": 13512,
            "url": "https://patches.dpdk.org/api/series/13512/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=13512",
            "date": "2020-10-30T23:41:12",
            "name": "Add DLB PMD",
            "version": 11,
            "mbox": "https://patches.dpdk.org/series/13512/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/83060/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/83060/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 58D1FA04E6;\n\tSat, 31 Oct 2020 00:46:34 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id EEC1BC936;\n\tSat, 31 Oct 2020 00:40:46 +0100 (CET)",
            "from mga04.intel.com (mga04.intel.com [192.55.52.120])\n by dpdk.org (Postfix) with ESMTP id 3866CC313\n for <dev@dpdk.org>; Sat, 31 Oct 2020 00:40:06 +0100 (CET)",
            "from fmsmga004.fm.intel.com ([10.253.24.48])\n by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 30 Oct 2020 16:40:05 -0700",
            "from txasoft-yocto.an.intel.com ([10.123.72.192])\n by fmsmga004.fm.intel.com with ESMTP; 30 Oct 2020 16:40:05 -0700"
        ],
        "IronPort-SDR": [
            "\n ylyYqy8u5LWDluzMbHm9yvUNfxF+CzlQOKOZvVbzuEOd3BD+arlJQTe137Gi+UyytCqiQ+c+zS\n AUlu6YxBuemQ==",
            "\n kGvoUNhIWr5T5TpHDVqzQFvmLZE9jkHsfELi1u1lsNXaDZ/sewEdvXSVD/hUjceZ8+p6A7RXqK\n 1vzXocJERw4w=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6000,8403,9790\"; a=\"166094690\"",
            "E=Sophos;i=\"5.77,435,1596524400\"; d=\"scan'208\";a=\"166094690\"",
            "E=Sophos;i=\"5.77,435,1596524400\"; d=\"scan'208\";a=\"352025758\""
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "From": "Timothy McDaniel <timothy.mcdaniel@intel.com>",
        "To": "Ray Kinsella <mdr@ashroe.eu>,\n\tNeil Horman <nhorman@tuxdriver.com>",
        "Cc": "dev@dpdk.org, erik.g.carrillo@intel.com, gage.eads@intel.com,\n harry.van.haaren@intel.com, jerinj@marvell.com, thomas@monjalon.net",
        "Date": "Fri, 30 Oct 2020 18:41:32 -0500",
        "Message-Id": "<1604101295-15970-21-git-send-email-timothy.mcdaniel@intel.com>",
        "X-Mailer": "git-send-email 1.7.10",
        "In-Reply-To": "<1604101295-15970-1-git-send-email-timothy.mcdaniel@intel.com>",
        "References": "<20200612212434.6852-2-timothy.mcdaniel@intel.com>\n <1604101295-15970-1-git-send-email-timothy.mcdaniel@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v11 20/23] event/dlb: add PMD's token pop public\n\tinterface",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "The PMD uses a public interface to allow applications to\ncontrol the token pop mode. Supported token pop modes are\nas follows, and they impact core scheduling affinity for\nldb ports.\n\nAUTO_POP: Pop the CQ tokens immediately after dequeueing.\nDELAYED_POP: Pop CQ tokens after (dequeue_depth - 1) events\n\t     are released. Supported on load-balanced ports\n\t     only.\nDEFERRED_POP: Pop the CQ tokens during next dequeue operation.\n\nSigned-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>\nReviewed-by: Gage Eads <gage.eads@intel.com>\n---\n doc/api/doxy-api-index.md       |   1 +\n drivers/event/dlb/dlb.c         | 121 ++++++++++++++++++++++++++++++++++++----\n drivers/event/dlb/dlb_priv.h    |   3 +\n drivers/event/dlb/meson.build   |   4 +-\n drivers/event/dlb/rte_pmd_dlb.c |  38 +++++++++++++\n drivers/event/dlb/rte_pmd_dlb.h |  77 +++++++++++++++++++++++++\n drivers/event/dlb/version.map   |   6 ++\n 7 files changed, 237 insertions(+), 13 deletions(-)\n create mode 100644 drivers/event/dlb/rte_pmd_dlb.c\n create mode 100644 drivers/event/dlb/rte_pmd_dlb.h",
    "diff": "diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md\nindex a9c12d1..1c83bf4 100644\n--- a/doc/api/doxy-api-index.md\n+++ b/doc/api/doxy-api-index.md\n@@ -52,6 +52,7 @@ The public API headers are grouped by topics:\n   [dpaa2_cmdif]        (@ref rte_pmd_dpaa2_cmdif.h),\n   [dpaa2_qdma]         (@ref rte_pmd_dpaa2_qdma.h),\n   [crypto_scheduler]   (@ref rte_cryptodev_scheduler.h)\n+  [dlb]\t\t       (@ref rte_pmd_dlb.h),\n \n - **memory**:\n   [memseg]             (@ref rte_memory.h),\ndiff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c\nindex a8efcc1..31e4d50 100644\n--- a/drivers/event/dlb/dlb.c\n+++ b/drivers/event/dlb/dlb.c\n@@ -1022,6 +1022,33 @@ dlb_hw_create_ldb_port(struct dlb_eventdev *dlb,\n \n \tqm_port->dequeue_depth = dequeue_depth;\n \n+\t/* When using the reserved token scheme, token_pop_thresh is\n+\t * initially 2 * dequeue_depth. Once the tokens are reserved,\n+\t * the enqueue code re-assigns it to dequeue_depth.\n+\t */\n+\tqm_port->token_pop_thresh = cq_depth;\n+\n+\t/* When the deferred scheduling vdev arg is selected, use deferred pop\n+\t * for all single-entry CQs.\n+\t */\n+\tif (cfg.cq_depth == 1 || (cfg.cq_depth == 2 && use_rsvd_token_scheme)) {\n+\t\tif (dlb->defer_sched)\n+\t\t\tqm_port->token_pop_mode = DEFERRED_POP;\n+\t}\n+\n+\t/* The default enqueue functions do not include delayed-pop support for\n+\t * performance reasons.\n+\t */\n+\tif (qm_port->token_pop_mode == DELAYED_POP) {\n+\t\tdlb->event_dev->enqueue = dlb_event_enqueue_delayed;\n+\t\tdlb->event_dev->enqueue_burst =\n+\t\t\tdlb_event_enqueue_burst_delayed;\n+\t\tdlb->event_dev->enqueue_new_burst =\n+\t\t\tdlb_event_enqueue_new_burst_delayed;\n+\t\tdlb->event_dev->enqueue_forward_burst =\n+\t\t\tdlb_event_enqueue_forward_burst_delayed;\n+\t}\n+\n \tqm_port->owed_tokens = 0;\n \tqm_port->issued_releases = 0;\n \n@@ -1182,6 +1209,8 @@ dlb_hw_create_dir_port(struct dlb_eventdev *dlb,\n \n \tqm_port->dequeue_depth = dequeue_depth;\n \n+\t/* Directed ports are auto-pop, by default. */\n+\tqm_port->token_pop_mode = AUTO_POP;\n \tqm_port->owed_tokens = 0;\n \tqm_port->issued_releases = 0;\n \n@@ -2682,7 +2711,8 @@ dlb_consume_qe_immediate(struct dlb_port *qm_port, int num)\n static inline uint16_t\n __dlb_event_enqueue_burst(void *event_port,\n \t\t\t  const struct rte_event events[],\n-\t\t\t  uint16_t num)\n+\t\t\t  uint16_t num,\n+\t\t\t  bool use_delayed)\n {\n \tstruct dlb_eventdev_port *ev_port = event_port;\n \tstruct dlb_port *qm_port = &ev_port->qm_port;\n@@ -2710,6 +2740,35 @@ __dlb_event_enqueue_burst(void *event_port,\n \n \t\tfor (; j < DLB_NUM_QES_PER_CACHE_LINE && (i + j) < num; j++) {\n \t\t\tconst struct rte_event *ev = &events[i + j];\n+\t\t\tint16_t thresh = qm_port->token_pop_thresh;\n+\n+\t\t\tif (use_delayed &&\n+\t\t\t    qm_port->token_pop_mode == DELAYED_POP &&\n+\t\t\t    (ev->op == RTE_EVENT_OP_FORWARD ||\n+\t\t\t     ev->op == RTE_EVENT_OP_RELEASE) &&\n+\t\t\t    qm_port->issued_releases >= thresh - 1) {\n+\t\t\t\t/* Insert the token pop QE and break out. This\n+\t\t\t\t * may result in a partial HCW, but that is\n+\t\t\t\t * simpler than supporting arbitrary QE\n+\t\t\t\t * insertion.\n+\t\t\t\t */\n+\t\t\t\tdlb_construct_token_pop_qe(qm_port, j);\n+\n+\t\t\t\t/* Reset the releases for the next QE batch */\n+\t\t\t\tqm_port->issued_releases -= thresh;\n+\n+\t\t\t\t/* When using delayed token pop mode, the\n+\t\t\t\t * initial token threshold is the full CQ\n+\t\t\t\t * depth. After the first token pop, we need to\n+\t\t\t\t * reset it to the dequeue_depth.\n+\t\t\t\t */\n+\t\t\t\tqm_port->token_pop_thresh =\n+\t\t\t\t\tqm_port->dequeue_depth;\n+\n+\t\t\t\tpop_offs = 1;\n+\t\t\t\tj++;\n+\t\t\t\tbreak;\n+\t\t\t}\n \n \t\t\tif (dlb_event_enqueue_prep(ev_port, qm_port, ev,\n \t\t\t\t\t\t   port_data, &sched_types[j],\n@@ -2745,7 +2804,7 @@ dlb_event_enqueue_burst(void *event_port,\n \t\t\tconst struct rte_event events[],\n \t\t\tuint16_t num)\n {\n-\treturn __dlb_event_enqueue_burst(event_port, events, num);\n+\treturn __dlb_event_enqueue_burst(event_port, events, num, false);\n }\n \n static inline uint16_t\n@@ -2753,21 +2812,21 @@ dlb_event_enqueue_burst_delayed(void *event_port,\n \t\t\t\tconst struct rte_event events[],\n \t\t\t\tuint16_t num)\n {\n-\treturn __dlb_event_enqueue_burst(event_port, events, num);\n+\treturn __dlb_event_enqueue_burst(event_port, events, num, true);\n }\n \n static inline uint16_t\n dlb_event_enqueue(void *event_port,\n \t\t  const struct rte_event events[])\n {\n-\treturn __dlb_event_enqueue_burst(event_port, events, 1);\n+\treturn __dlb_event_enqueue_burst(event_port, events, 1, false);\n }\n \n static inline uint16_t\n dlb_event_enqueue_delayed(void *event_port,\n \t\t\t  const struct rte_event events[])\n {\n-\treturn __dlb_event_enqueue_burst(event_port, events, 1);\n+\treturn __dlb_event_enqueue_burst(event_port, events, 1, true);\n }\n \n static uint16_t\n@@ -2775,7 +2834,7 @@ dlb_event_enqueue_new_burst(void *event_port,\n \t\t\t    const struct rte_event events[],\n \t\t\t    uint16_t num)\n {\n-\treturn __dlb_event_enqueue_burst(event_port, events, num);\n+\treturn __dlb_event_enqueue_burst(event_port, events, num, false);\n }\n \n static uint16_t\n@@ -2783,7 +2842,7 @@ dlb_event_enqueue_new_burst_delayed(void *event_port,\n \t\t\t\t    const struct rte_event events[],\n \t\t\t\t    uint16_t num)\n {\n-\treturn __dlb_event_enqueue_burst(event_port, events, num);\n+\treturn __dlb_event_enqueue_burst(event_port, events, num, true);\n }\n \n static uint16_t\n@@ -2791,7 +2850,7 @@ dlb_event_enqueue_forward_burst(void *event_port,\n \t\t\t\tconst struct rte_event events[],\n \t\t\t\tuint16_t num)\n {\n-\treturn __dlb_event_enqueue_burst(event_port, events, num);\n+\treturn __dlb_event_enqueue_burst(event_port, events, num, false);\n }\n \n static uint16_t\n@@ -2799,7 +2858,7 @@ dlb_event_enqueue_forward_burst_delayed(void *event_port,\n \t\t\t\t\tconst struct rte_event events[],\n \t\t\t\t\tuint16_t num)\n {\n-\treturn __dlb_event_enqueue_burst(event_port, events, num);\n+\treturn __dlb_event_enqueue_burst(event_port, events, num, true);\n }\n \n static __rte_always_inline int\n@@ -3199,7 +3258,8 @@ dlb_hw_dequeue(struct dlb_eventdev *dlb,\n \n \tqm_port->owed_tokens += num;\n \n-\tdlb_consume_qe_immediate(qm_port, num);\n+\tif (num && qm_port->token_pop_mode == AUTO_POP)\n+\t\tdlb_consume_qe_immediate(qm_port, num);\n \n \tev_port->outstanding_releases += num;\n \n@@ -3324,7 +3384,8 @@ dlb_hw_dequeue_sparse(struct dlb_eventdev *dlb,\n \n \tqm_port->owed_tokens += num;\n \n-\tdlb_consume_qe_immediate(qm_port, num);\n+\tif (num && qm_port->token_pop_mode == AUTO_POP)\n+\t\tdlb_consume_qe_immediate(qm_port, num);\n \n \tev_port->outstanding_releases += num;\n \n@@ -3368,6 +3429,28 @@ dlb_event_release(struct dlb_eventdev *dlb, uint8_t port_id, int n)\n \t\tqm_port->qe4[3].cmd_byte = 0;\n \n \t\tfor (; j < DLB_NUM_QES_PER_CACHE_LINE && (i + j) < n; j++) {\n+\t\t\tint16_t thresh = qm_port->token_pop_thresh;\n+\n+\t\t\tif (qm_port->token_pop_mode == DELAYED_POP &&\n+\t\t\t    qm_port->issued_releases >= thresh - 1) {\n+\t\t\t\t/* Insert the token pop QE */\n+\t\t\t\tdlb_construct_token_pop_qe(qm_port, j);\n+\n+\t\t\t\t/* Reset the releases for the next QE batch */\n+\t\t\t\tqm_port->issued_releases -= thresh;\n+\n+\t\t\t\t/* When using delayed token pop mode, the\n+\t\t\t\t * initial token threshold is the full CQ\n+\t\t\t\t * depth. After the first token pop, we need to\n+\t\t\t\t * reset it to the dequeue_depth.\n+\t\t\t\t */\n+\t\t\t\tqm_port->token_pop_thresh =\n+\t\t\t\t\tqm_port->dequeue_depth;\n+\n+\t\t\t\tpop_offs = 1;\n+\t\t\t\tj++;\n+\t\t\t\tbreak;\n+\t\t\t}\n \n \t\t\tqm_port->qe4[j].cmd_byte = DLB_COMP_CMD_BYTE;\n \t\t\tqm_port->issued_releases++;\n@@ -3400,6 +3483,7 @@ dlb_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,\n \t\t\tuint64_t wait)\n {\n \tstruct dlb_eventdev_port *ev_port = event_port;\n+\tstruct dlb_port *qm_port = &ev_port->qm_port;\n \tstruct dlb_eventdev *dlb = ev_port->dlb;\n \tuint16_t cnt;\n \tint ret;\n@@ -3419,6 +3503,10 @@ dlb_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,\n \t\tDLB_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);\n \t}\n \n+\tif (qm_port->token_pop_mode == DEFERRED_POP &&\n+\t\t\tqm_port->owed_tokens)\n+\t\tdlb_consume_qe_immediate(qm_port, qm_port->owed_tokens);\n+\n \tcnt = dlb_hw_dequeue(dlb, ev_port, ev, num, wait);\n \n \tDLB_INC_STAT(ev_port->stats.traffic.total_polls, 1);\n@@ -3437,6 +3525,7 @@ dlb_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,\n \t\t\t       uint16_t num, uint64_t wait)\n {\n \tstruct dlb_eventdev_port *ev_port = event_port;\n+\tstruct dlb_port *qm_port = &ev_port->qm_port;\n \tstruct dlb_eventdev *dlb = ev_port->dlb;\n \tuint16_t cnt;\n \tint ret;\n@@ -3456,6 +3545,10 @@ dlb_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,\n \t\tDLB_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);\n \t}\n \n+\tif (qm_port->token_pop_mode == DEFERRED_POP &&\n+\t    qm_port->owed_tokens)\n+\t\tdlb_consume_qe_immediate(qm_port, qm_port->owed_tokens);\n+\n \tcnt = dlb_hw_dequeue_sparse(dlb, ev_port, ev, num, wait);\n \n \tDLB_INC_STAT(ev_port->stats.traffic.total_polls, 1);\n@@ -3762,7 +3855,7 @@ dlb_primary_eventdev_probe(struct rte_eventdev *dev,\n \t\t\t   struct dlb_devargs *dlb_args)\n {\n \tstruct dlb_eventdev *dlb;\n-\tint err;\n+\tint err, i;\n \n \tdlb = dev->data->dev_private;\n \n@@ -3811,6 +3904,10 @@ dlb_primary_eventdev_probe(struct rte_eventdev *dev,\n \t\treturn err;\n \t}\n \n+\t/* Initialize each port's token pop mode */\n+\tfor (i = 0; i < DLB_MAX_NUM_PORTS; i++)\n+\t\tdlb->ev_ports[i].qm_port.token_pop_mode = AUTO_POP;\n+\n \trte_spinlock_init(&dlb->qm_instance.resource_lock);\n \n \tdlb_iface_low_level_io_init(dlb);\ndiff --git a/drivers/event/dlb/dlb_priv.h b/drivers/event/dlb/dlb_priv.h\nindex adb1f7a..58ff428 100644\n--- a/drivers/event/dlb/dlb_priv.h\n+++ b/drivers/event/dlb/dlb_priv.h\n@@ -16,6 +16,7 @@\n \n #include \"dlb_user.h\"\n #include \"dlb_log.h\"\n+#include \"rte_pmd_dlb.h\"\n \n #ifndef RTE_LIBRTE_PMD_DLB_QUELL_STATS\n #define DLB_INC_STAT(_stat, _incr_val) ((_stat) += _incr_val)\n@@ -262,6 +263,7 @@ struct dlb_port {\n \tbool gen_bit;\n \tuint16_t dir_credits;\n \tuint32_t dequeue_depth;\n+\tenum dlb_token_pop_mode token_pop_mode;\n \tint pp_mmio_base;\n \tuint16_t cached_ldb_credits;\n \tuint16_t ldb_pushcount_at_credit_expiry;\n@@ -273,6 +275,7 @@ struct dlb_port {\n \tuint8_t cq_rsvd_token_deficit;\n \tuint16_t owed_tokens;\n \tint16_t issued_releases;\n+\tint16_t token_pop_thresh;\n \tint cq_depth;\n \tuint16_t cq_idx;\n \tuint16_t cq_idx_unmasked;\ndiff --git a/drivers/event/dlb/meson.build b/drivers/event/dlb/meson.build\nindex 552ff9d..7f38c30 100644\n--- a/drivers/event/dlb/meson.build\n+++ b/drivers/event/dlb/meson.build\n@@ -12,7 +12,9 @@ sources = files('dlb.c',\n \t\t'dlb_xstats.c',\n \t\t'pf/dlb_main.c',\n \t\t'pf/dlb_pf.c',\n-\t\t'pf/base/dlb_resource.c'\n+\t\t'pf/base/dlb_resource.c',\n+\t\t'rte_pmd_dlb.c',\n )\n \n deps += ['mbuf', 'mempool', 'ring', 'pci', 'bus_pci']\n+install_headers('rte_pmd_dlb.h')\ndiff --git a/drivers/event/dlb/rte_pmd_dlb.c b/drivers/event/dlb/rte_pmd_dlb.c\nnew file mode 100644\nindex 0000000..bc802d3\n--- /dev/null\n+++ b/drivers/event/dlb/rte_pmd_dlb.c\n@@ -0,0 +1,38 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2020 Intel Corporation\n+ */\n+\n+#include \"rte_eventdev.h\"\n+#include \"rte_eventdev_pmd.h\"\n+#include \"rte_pmd_dlb.h\"\n+#include \"dlb_priv.h\"\n+#include \"dlb_inline_fns.h\"\n+\n+int\n+rte_pmd_dlb_set_token_pop_mode(uint8_t dev_id,\n+\t\t\t       uint8_t port_id,\n+\t\t\t       enum dlb_token_pop_mode mode)\n+{\n+\tstruct dlb_eventdev *dlb;\n+\tstruct rte_eventdev *dev;\n+\n+\tRTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);\n+\tdev = &rte_eventdevs[dev_id];\n+\n+\tdlb = dlb_pmd_priv(dev);\n+\n+\tif (mode >= NUM_TOKEN_POP_MODES)\n+\t\treturn -EINVAL;\n+\n+\t/* The event device must be configured, but not yet started */\n+\tif (!dlb->configured || dlb->run_state != DLB_RUN_STATE_STOPPED)\n+\t\treturn -EINVAL;\n+\n+\t/* The token pop mode must be set before configuring the port */\n+\tif (port_id >= dlb->num_ports || dlb->ev_ports[port_id].setup_done)\n+\t\treturn -EINVAL;\n+\n+\tdlb->ev_ports[port_id].qm_port.token_pop_mode = mode;\n+\n+\treturn 0;\n+}\ndiff --git a/drivers/event/dlb/rte_pmd_dlb.h b/drivers/event/dlb/rte_pmd_dlb.h\nnew file mode 100644\nindex 0000000..9cf6dd3\n--- /dev/null\n+++ b/drivers/event/dlb/rte_pmd_dlb.h\n@@ -0,0 +1,77 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2019-2020 Intel Corporation\n+ */\n+\n+/*!\n+ *  @file      rte_pmd_dlb.h\n+ *\n+ *  @brief     DLB PMD-specific functions\n+ *\n+ */\n+\n+#ifndef _RTE_PMD_DLB_H_\n+#define _RTE_PMD_DLB_H_\n+\n+#ifdef __cplusplus\n+extern \"C\" {\n+#endif\n+\n+#include <stdint.h>\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice\n+ *\n+ * Selects the token pop mode for an DLB port.\n+ */\n+enum dlb_token_pop_mode {\n+\t/* Pop the CQ tokens immediately after dequeueing. */\n+\tAUTO_POP,\n+\t/* Pop CQ tokens after (dequeue_depth - 1) events are released.\n+\t * Supported on load-balanced ports only.\n+\t */\n+\tDELAYED_POP,\n+\t/* Pop the CQ tokens during next dequeue operation. */\n+\tDEFERRED_POP,\n+\n+\t/* NUM_TOKEN_POP_MODES must be last */\n+\tNUM_TOKEN_POP_MODES\n+};\n+\n+/*!\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice\n+ *\n+ * Configure the token pop mode for an DLB port. By default, all ports use\n+ * AUTO_POP. This function must be called before calling rte_event_port_setup()\n+ * for the port, but after calling rte_event_dev_configure().\n+ *\n+ * @note\n+ *    The defer_sched vdev arg, which configures all load-balanced ports with\n+ *    dequeue_depth == 1 for DEFERRED_POP mode, takes precedence over this\n+ *    function.\n+ *\n+ * @param dev_id\n+ *    The identifier of the event device.\n+ * @param port_id\n+ *    The identifier of the event port.\n+ * @param mode\n+ *    The token pop mode.\n+ *\n+ * @return\n+ * - 0: Success\n+ * - EINVAL: Invalid dev_id, port_id, or mode\n+ * - EINVAL: The DLB is not configured, is already running, or the port is\n+ *   already setup\n+ */\n+\n+__rte_experimental\n+int\n+rte_pmd_dlb_set_token_pop_mode(uint8_t dev_id,\n+\t\t\t       uint8_t port_id,\n+\t\t\t       enum dlb_token_pop_mode mode);\n+#ifdef __cplusplus\n+}\n+#endif\n+\n+#endif /* _RTE_PMD_DLB_H_ */\ndiff --git a/drivers/event/dlb/version.map b/drivers/event/dlb/version.map\nindex 4a76d1d..3338a22 100644\n--- a/drivers/event/dlb/version.map\n+++ b/drivers/event/dlb/version.map\n@@ -1,3 +1,9 @@\n DPDK_21 {\n \tlocal: *;\n };\n+\n+EXPERIMENTAL {\n+\tglobal:\n+\n+\trte_pmd_dlb_set_token_pop_mode;\n+};\n",
    "prefixes": [
        "v11",
        "20/23"
    ]
}