get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/75052/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 75052,
    "url": "http://patches.dpdk.org/api/patches/75052/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1596138614-17409-2-git-send-email-timothy.mcdaniel@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1596138614-17409-2-git-send-email-timothy.mcdaniel@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1596138614-17409-2-git-send-email-timothy.mcdaniel@intel.com",
    "date": "2020-07-30T19:49:48",
    "name": "[01/27] eventdev: dlb upstream prerequisites",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "ef58751d3811f07fbe8ac50bea4e1115778f3614",
    "submitter": {
        "id": 826,
        "url": "http://patches.dpdk.org/api/people/826/?format=api",
        "name": "Timothy McDaniel",
        "email": "timothy.mcdaniel@intel.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1596138614-17409-2-git-send-email-timothy.mcdaniel@intel.com/mbox/",
    "series": [
        {
            "id": 11425,
            "url": "http://patches.dpdk.org/api/series/11425/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=11425",
            "date": "2020-07-30T19:49:47",
            "name": "Add Intel DLM PMD to 20.11",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/11425/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/75052/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/75052/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 672C0A052B;\n\tThu, 30 Jul 2020 21:53:06 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 1B54B4C93;\n\tThu, 30 Jul 2020 21:53:01 +0200 (CEST)",
            "from mga05.intel.com (mga05.intel.com [192.55.52.43])\n by dpdk.org (Postfix) with ESMTP id 204762BC7\n for <dev@dpdk.org>; Thu, 30 Jul 2020 21:52:57 +0200 (CEST)",
            "from orsmga005.jf.intel.com ([10.7.209.41])\n by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 30 Jul 2020 12:52:57 -0700",
            "from txasoft-yocto.an.intel.com ([10.123.72.192])\n by orsmga005.jf.intel.com with ESMTP; 30 Jul 2020 12:52:56 -0700"
        ],
        "IronPort-SDR": [
            "\n 3t8v/gcFW+GGri41jiD0PnnCnpInQXZZ0fSAAirn/Q9Gm/1MT8LzvQ3hMClK0hMon/02NH2JzK\n H0We/UnPI4jg==",
            "\n PaLvrARpQn1gX6bj0B78a1b8Rik9l2FBNe7e7NHe3Nz1H17/na/Yj4sAmawW2SgGOJ5pIiFPXK\n nwO2cg2wIHHg=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6000,8403,9698\"; a=\"236547443\"",
            "E=Sophos;i=\"5.75,415,1589266800\"; d=\"scan'208\";a=\"236547443\"",
            "E=Sophos;i=\"5.75,415,1589266800\"; d=\"scan'208\";a=\"465378048\""
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "From": "\"McDaniel, Timothy\" <timothy.mcdaniel@intel.com>",
        "To": "jerinj@marvell.com",
        "Cc": "mattias.ronnblom@ericsson.com, dev@dpdk.org, gage.eads@intel.com,\n harry.van.haaren@intel.com,\n \"McDaniel, Timothy\" <timothy.mcdaniel@intel.com>",
        "Date": "Thu, 30 Jul 2020 14:49:48 -0500",
        "Message-Id": "<1596138614-17409-2-git-send-email-timothy.mcdaniel@intel.com>",
        "X-Mailer": "git-send-email 1.7.10",
        "In-Reply-To": "<1596138614-17409-1-git-send-email-timothy.mcdaniel@intel.com>",
        "References": "<1593232671-5690-0-git-send-email-timothy.mcdaniel@intel.com>\n <1596138614-17409-1-git-send-email-timothy.mcdaniel@intel.com>",
        "Subject": "[dpdk-dev] [PATCH 01/27] eventdev: dlb upstream prerequisites",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: \"McDaniel, Timothy\" <timothy.mcdaniel@intel.com>\n\nThe DLB hardware does not conform exactly to the eventdev interface.\n1) It has a limit on the number of queues that may be linked to a port.\n2) Some ports a further restricted to a maximum of 1 linked queue.\n3) It does not (currently) have the ability to carry the flow_id as part\nof the event (QE) payload.\n\nDue to the above, we would like to propose the following enhancements.\n\n1) Add new fields to the rte_event_dev_info struct. These fields allow\nthe device to advertize its capabilities so that applications can take\nthe appropriate actions based on those capabilities.\n\n    struct rte_event_dev_info {\n\tuint32_t max_event_port_links;\n\t/**< Maximum number of queues that can be linked to a single event\n\t * port by this device.\n\t */\n\n\tuint8_t max_single_link_event_port_queue_pairs;\n\t/**< Maximum number of event ports and queues that are optimized for\n\t * (and only capable of) single-link configurations supported by this\n\t * device. These ports and queues are not accounted for in\n\t * max_event_ports or max_event_queues.\n\t */\n    }\n\n2) Add a new field to the rte_event_dev_config struct. This field allows\nthe application to specify how many of its ports are limited to a single\nlink, or will be used in single link mode.\n\n    /** Event device configuration structure */\n    struct rte_event_dev_config {\n\tuint8_t nb_single_link_event_port_queues;\n\t/**< Number of event ports and queues that will be singly-linked to\n\t * each other. These are a subset of the overall event ports and\n\t * queues; this value cannot exceed *nb_event_ports* or\n\t * *nb_event_queues*. If the device has ports and queues that are\n\t * optimized for single-link usage, this field is a hint for how many\n\t * to allocate; otherwise, regular event ports and queues can be used.\n\t */\n    }\n\n3) Replace the dedicated implicit_release_disabled field with a bit field\nof explicit port capabilities. The implicit_release_disable functionality\nis assigned to one bit, and a port-is-single-link-only  attribute is\nassigned to other, with the remaining bits available for future assignment.\n\n\t* Event port configuration bitmap flags */\n\t#define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL    (1ULL << 0)\n\t/**< Configure the port not to release outstanding events in\n\t * rte_event_dev_dequeue_burst(). If set, all events received through\n\t * the port must be explicitly released with RTE_EVENT_OP_RELEASE or\n\t * RTE_EVENT_OP_FORWARD. Must be unset if the device is not\n\t * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.\n\t */\n\t#define RTE_EVENT_PORT_CFG_SINGLE_LINK         (1ULL << 1)\n\n\t/**< This event port links only to a single event queue.\n\t *\n\t *  @see rte_event_port_setup(), rte_event_port_link()\n\t */\n\n\t#define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3\n\t/**\n\t * The implicit release disable attribute of the port\n\t */\n\n\tstruct rte_event_port_conf {\n\t\tuint32_t event_port_cfg; /**< Port cfg flags(EVENT_PORT_CFG_) */\n\t}\n\n4) Add UMWAIT/UMONITOR bit to rte_cpuflags\n\n5) Added a new API that is useful for probing PCI devices.\n\n\t/**\n\t * @internal\n\t * Wrapper for use by pci drivers as a .probe function to attach to a event\n\t * interface.  Same as rte_event_pmd_pci_probe, except caller can specify\n\t * the name.\n\t */\n\tstatic inline int\n\trte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,\n\t\t\t\t    struct rte_pci_device *pci_dev,\n\t\t\t\t    size_t private_data_size,\n\t\t\t\t    eventdev_pmd_pci_callback_t devinit,\n\t\t\t\t    const char *name);\n\nSigned-off-by: McDaniel, Timothy <timothy.mcdaniel@intel.com>\n---\n app/test-eventdev/evt_common.h                     |   11 ++++\n app/test-eventdev/test_order_atq.c                 |   30 +++++++--\n app/test-eventdev/test_order_common.c              |    5 +-\n app/test-eventdev/test_order_queue.c               |   31 +++++++--\n app/test/test_eventdev.c                           |    4 +-\n drivers/event/dpaa/dpaa_eventdev.c                 |    3 +-\n drivers/event/dpaa2/dpaa2_eventdev.c               |    5 +-\n drivers/event/dsw/dsw_evdev.c                      |    3 +-\n drivers/event/octeontx/ssovf_evdev.c               |    5 +-\n drivers/event/octeontx2/otx2_evdev.c               |    3 +-\n drivers/event/opdl/opdl_evdev.c                    |    3 +-\n drivers/event/skeleton/skeleton_eventdev.c         |    5 +-\n drivers/event/sw/sw_evdev.c                        |    8 ++-\n drivers/event/sw/sw_evdev_selftest.c               |    6 +-\n .../eventdev_pipeline/pipeline_worker_generic.c    |    6 +-\n examples/eventdev_pipeline/pipeline_worker_tx.c    |    1 +\n examples/l2fwd-event/l2fwd_event_generic.c         |    5 +-\n examples/l2fwd-event/l2fwd_event_internal_port.c   |    5 +-\n examples/l3fwd/l3fwd_event_generic.c               |    5 +-\n examples/l3fwd/l3fwd_event_internal_port.c         |    5 +-\n lib/librte_eal/x86/include/rte_cpuflags.h          |    1 +\n lib/librte_eal/x86/rte_cpuflags.c                  |    1 +\n lib/librte_eventdev/meson.build                    |    1 +\n lib/librte_eventdev/rte_event_eth_tx_adapter.c     |    2 +-\n lib/librte_eventdev/rte_eventdev.c                 |   67 +++++++++++++++++---\n lib/librte_eventdev/rte_eventdev.h                 |   51 ++++++++++++---\n lib/librte_eventdev/rte_eventdev_pmd_pci.h         |   54 ++++++++++++++++\n lib/librte_eventdev/rte_eventdev_version.map       |    4 +-\n 28 files changed, 268 insertions(+), 62 deletions(-)",
    "diff": "diff --git a/app/test-eventdev/evt_common.h b/app/test-eventdev/evt_common.h\nindex f9d7378..a1da1cf 100644\n--- a/app/test-eventdev/evt_common.h\n+++ b/app/test-eventdev/evt_common.h\n@@ -104,6 +104,16 @@ struct evt_options {\n \t\t\ttrue : false;\n }\n \n+static inline bool\n+evt_has_flow_id(uint8_t dev_id)\n+{\n+\tstruct rte_event_dev_info dev_info;\n+\n+\trte_event_dev_info_get(dev_id, &dev_info);\n+\treturn (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_CARRY_FLOW_ID) ?\n+\t\t\ttrue : false;\n+}\n+\n static inline int\n evt_service_setup(uint32_t service_id)\n {\n@@ -169,6 +179,7 @@ struct evt_options {\n \t\t\t.dequeue_timeout_ns = opt->deq_tmo_nsec,\n \t\t\t.nb_event_queues = nb_queues,\n \t\t\t.nb_event_ports = nb_ports,\n+\t\t\t.nb_single_link_event_port_queues = 0,\n \t\t\t.nb_events_limit  = info.max_num_events,\n \t\t\t.nb_event_queue_flows = opt->nb_flows,\n \t\t\t.nb_event_port_dequeue_depth =\ndiff --git a/app/test-eventdev/test_order_atq.c b/app/test-eventdev/test_order_atq.c\nindex 3366cfc..8ef3b40 100644\n--- a/app/test-eventdev/test_order_atq.c\n+++ b/app/test-eventdev/test_order_atq.c\n@@ -19,7 +19,7 @@\n }\n \n static int\n-order_atq_worker(void *arg)\n+order_atq_worker(void *arg,  const uint64_t flow_id_cap)\n {\n \tORDER_WORKER_INIT;\n \tstruct rte_event ev;\n@@ -34,6 +34,9 @@\n \t\t\tcontinue;\n \t\t}\n \n+\t\tif (!flow_id_cap)\n+\t\t\tev.flow_id = ev.mbuf->udata64;\n+\n \t\tif (ev.sub_event_type == 0) { /* stage 0 from producer */\n \t\t\torder_atq_process_stage_0(&ev);\n \t\t\twhile (rte_event_enqueue_burst(dev_id, port, &ev, 1)\n@@ -50,7 +53,7 @@\n }\n \n static int\n-order_atq_worker_burst(void *arg)\n+order_atq_worker_burst(void *arg,  const uint64_t flow_id_cap)\n {\n \tORDER_WORKER_INIT;\n \tstruct rte_event ev[BURST_SIZE];\n@@ -68,6 +71,9 @@\n \t\t}\n \n \t\tfor (i = 0; i < nb_rx; i++) {\n+\t\t\tif (!flow_id_cap)\n+\t\t\t\tev[i].flow_id = ev[i].mbuf->udata64;\n+\n \t\t\tif (ev[i].sub_event_type == 0) { /*stage 0 */\n \t\t\t\torder_atq_process_stage_0(&ev[i]);\n \t\t\t} else if (ev[i].sub_event_type == 1) { /* stage 1 */\n@@ -95,11 +101,21 @@\n {\n \tstruct worker_data *w  = arg;\n \tconst bool burst = evt_has_burst_mode(w->dev_id);\n-\n-\tif (burst)\n-\t\treturn order_atq_worker_burst(arg);\n-\telse\n-\t\treturn order_atq_worker(arg);\n+\tconst bool flow_id_cap = evt_has_flow_id(w->dev_id);\n+\n+\tif (burst) {\n+\t\tif (flow_id_cap)\n+\t\t\treturn order_atq_worker_burst(arg,\n+\t\t\t\t\tRTE_EVENT_DEV_CAP_CARRY_FLOW_ID);\n+\t\telse\n+\t\t\treturn order_atq_worker_burst(arg, 0);\n+\t} else {\n+\t\tif (flow_id_cap)\n+\t\t\treturn order_atq_worker(arg,\n+\t\t\t\t\tRTE_EVENT_DEV_CAP_CARRY_FLOW_ID);\n+\t\telse\n+\t\t\treturn order_atq_worker(arg, 0);\n+\t}\n }\n \n static int\ndiff --git a/app/test-eventdev/test_order_common.c b/app/test-eventdev/test_order_common.c\nindex 4190f9a..928904e 100644\n--- a/app/test-eventdev/test_order_common.c\n+++ b/app/test-eventdev/test_order_common.c\n@@ -49,6 +49,7 @@\n \t\tconst uint32_t flow = (uintptr_t)m % nb_flows;\n \t\t/* Maintain seq number per flow */\n \t\tm->seqn = producer_flow_seq[flow]++;\n+\t\tm->udata64 = flow;\n \n \t\tev.flow_id = flow;\n \t\tev.mbuf = m;\n@@ -318,7 +319,7 @@\n \t\topt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;\n \n \t/* port configuration */\n-\tconst struct rte_event_port_conf p_conf = {\n+\tstruct rte_event_port_conf p_conf = {\n \t\t\t.dequeue_depth = opt->wkr_deq_dep,\n \t\t\t.enqueue_depth = dev_info.max_event_port_dequeue_depth,\n \t\t\t.new_event_threshold = dev_info.max_num_events,\n@@ -351,6 +352,8 @@\n \tp->queue_id = 0;\n \tp->t = t;\n \n+\tp_conf.new_event_threshold /= 2;\n+\n \tret = rte_event_port_setup(opt->dev_id, port, &p_conf);\n \tif (ret) {\n \t\tevt_err(\"failed to setup producer port %d\", port);\ndiff --git a/app/test-eventdev/test_order_queue.c b/app/test-eventdev/test_order_queue.c\nindex 495efd9..7a71d2b 100644\n--- a/app/test-eventdev/test_order_queue.c\n+++ b/app/test-eventdev/test_order_queue.c\n@@ -19,7 +19,7 @@\n }\n \n static int\n-order_queue_worker(void *arg)\n+order_queue_worker(void *arg, const uint64_t flow_id_cap)\n {\n \tORDER_WORKER_INIT;\n \tstruct rte_event ev;\n@@ -34,6 +34,9 @@\n \t\t\tcontinue;\n \t\t}\n \n+\t\tif (!flow_id_cap)\n+\t\t\tev.flow_id = ev.mbuf->udata64;\n+\n \t\tif (ev.queue_id == 0) { /* from ordered queue */\n \t\t\torder_queue_process_stage_0(&ev);\n \t\t\twhile (rte_event_enqueue_burst(dev_id, port, &ev, 1)\n@@ -50,7 +53,7 @@\n }\n \n static int\n-order_queue_worker_burst(void *arg)\n+order_queue_worker_burst(void *arg, const uint64_t flow_id_cap)\n {\n \tORDER_WORKER_INIT;\n \tstruct rte_event ev[BURST_SIZE];\n@@ -68,6 +71,10 @@\n \t\t}\n \n \t\tfor (i = 0; i < nb_rx; i++) {\n+\n+\t\t\tif (!flow_id_cap)\n+\t\t\t\tev[i].flow_id = ev[i].mbuf->udata64;\n+\n \t\t\tif (ev[i].queue_id == 0) { /* from ordered queue */\n \t\t\t\torder_queue_process_stage_0(&ev[i]);\n \t\t\t} else if (ev[i].queue_id == 1) {/* from atomic queue */\n@@ -95,11 +102,21 @@\n {\n \tstruct worker_data *w  = arg;\n \tconst bool burst = evt_has_burst_mode(w->dev_id);\n-\n-\tif (burst)\n-\t\treturn order_queue_worker_burst(arg);\n-\telse\n-\t\treturn order_queue_worker(arg);\n+\tconst bool flow_id_cap = evt_has_flow_id(w->dev_id);\n+\n+\tif (burst) {\n+\t\tif (flow_id_cap)\n+\t\t\treturn order_queue_worker_burst(arg,\n+\t\t\t\t\tRTE_EVENT_DEV_CAP_CARRY_FLOW_ID);\n+\t\telse\n+\t\t\treturn order_queue_worker_burst(arg, 0);\n+\t} else {\n+\t\tif (flow_id_cap)\n+\t\t\treturn order_queue_worker(arg,\n+\t\t\t\t\tRTE_EVENT_DEV_CAP_CARRY_FLOW_ID);\n+\t\telse\n+\t\t\treturn order_queue_worker(arg, 0);\n+\t}\n }\n \n static int\ndiff --git a/app/test/test_eventdev.c b/app/test/test_eventdev.c\nindex 43ccb1c..62019c1 100644\n--- a/app/test/test_eventdev.c\n+++ b/app/test/test_eventdev.c\n@@ -559,10 +559,10 @@\n \tif (!(info.event_dev_cap &\n \t      RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {\n \t\tpconf.enqueue_depth = info.max_event_port_enqueue_depth;\n-\t\tpconf.disable_implicit_release = 1;\n+\t\tpconf.event_port_cfg = RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;\n \t\tret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);\n \t\tTEST_ASSERT(ret == -EINVAL, \"Expected -EINVAL, %d\", ret);\n-\t\tpconf.disable_implicit_release = 0;\n+\t\tpconf.event_port_cfg = 0;\n \t}\n \n \tret = rte_event_port_setup(TEST_DEV_ID, info.max_event_ports,\ndiff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c\nindex a3c138b..0804c6e 100644\n--- a/drivers/event/dpaa/dpaa_eventdev.c\n+++ b/drivers/event/dpaa/dpaa_eventdev.c\n@@ -357,7 +357,8 @@ static void drain_4_bytes(int fd, fd_set *fdset)\n \t\tRTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |\n \t\tRTE_EVENT_DEV_CAP_BURST_MODE |\n \t\tRTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |\n-\t\tRTE_EVENT_DEV_CAP_NONSEQ_MODE;\n+\t\tRTE_EVENT_DEV_CAP_NONSEQ_MODE |\n+\t\tRTE_EVENT_DEV_CAP_CARRY_FLOW_ID;\n }\n \n static int\ndiff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c\nindex a545baf..d30812f 100644\n--- a/drivers/event/dpaa2/dpaa2_eventdev.c\n+++ b/drivers/event/dpaa2/dpaa2_eventdev.c\n@@ -405,7 +405,8 @@ static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,\n \t\tRTE_EVENT_DEV_CAP_BURST_MODE|\n \t\tRTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |\n \t\tRTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |\n-\t\tRTE_EVENT_DEV_CAP_NONSEQ_MODE;\n+\t\tRTE_EVENT_DEV_CAP_NONSEQ_MODE |\n+\t\tRTE_EVENT_DEV_CAP_CARRY_FLOW_ID;\n \n }\n \n@@ -535,7 +536,7 @@ static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,\n \t\tDPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;\n \tport_conf->enqueue_depth =\n \t\tDPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;\n-\tport_conf->disable_implicit_release = 0;\n+\tport_conf->event_port_cfg = 0;\n }\n \n static int\ndiff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c\nindex e796975..933a5a5 100644\n--- a/drivers/event/dsw/dsw_evdev.c\n+++ b/drivers/event/dsw/dsw_evdev.c\n@@ -224,7 +224,8 @@\n \t\t.event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE|\n \t\tRTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED|\n \t\tRTE_EVENT_DEV_CAP_NONSEQ_MODE|\n-\t\tRTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT\n+\t\tRTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT|\n+\t\tRTE_EVENT_DEV_CAP_CARRY_FLOW_ID\n \t};\n }\n \ndiff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c\nindex 4fc4e8f..1c6bcca 100644\n--- a/drivers/event/octeontx/ssovf_evdev.c\n+++ b/drivers/event/octeontx/ssovf_evdev.c\n@@ -152,7 +152,8 @@ struct ssovf_mbox_convert_ns_getworks_iter {\n \t\t\t\t\tRTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES|\n \t\t\t\t\tRTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |\n \t\t\t\t\tRTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |\n-\t\t\t\t\tRTE_EVENT_DEV_CAP_NONSEQ_MODE;\n+\t\t\t\t\tRTE_EVENT_DEV_CAP_NONSEQ_MODE |\n+\t\t\t\t\tRTE_EVENT_DEV_CAP_CARRY_FLOW_ID;\n \n }\n \n@@ -218,7 +219,7 @@ struct ssovf_mbox_convert_ns_getworks_iter {\n \tport_conf->new_event_threshold = edev->max_num_events;\n \tport_conf->dequeue_depth = 1;\n \tport_conf->enqueue_depth = 1;\n-\tport_conf->disable_implicit_release = 0;\n+\tport_conf->event_port_cfg = 0;\n }\n \n static void\ndiff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c\nindex b8b57c3..ae35bb5 100644\n--- a/drivers/event/octeontx2/otx2_evdev.c\n+++ b/drivers/event/octeontx2/otx2_evdev.c\n@@ -501,7 +501,8 @@\n \t\t\t\t\tRTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |\n \t\t\t\t\tRTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |\n \t\t\t\t\tRTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |\n-\t\t\t\t\tRTE_EVENT_DEV_CAP_NONSEQ_MODE;\n+\t\t\t\t\tRTE_EVENT_DEV_CAP_NONSEQ_MODE |\n+\t\t\t\t\tRTE_EVENT_DEV_CAP_CARRY_FLOW_ID;\n }\n \n static void\ndiff --git a/drivers/event/opdl/opdl_evdev.c b/drivers/event/opdl/opdl_evdev.c\nindex 9b2f75f..3050578 100644\n--- a/drivers/event/opdl/opdl_evdev.c\n+++ b/drivers/event/opdl/opdl_evdev.c\n@@ -374,7 +374,8 @@\n \t\t.max_event_port_dequeue_depth = MAX_OPDL_CONS_Q_DEPTH,\n \t\t.max_event_port_enqueue_depth = MAX_OPDL_CONS_Q_DEPTH,\n \t\t.max_num_events = OPDL_INFLIGHT_EVENTS_TOTAL,\n-\t\t.event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE,\n+\t\t.event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE |\n+\t\t\t\t RTE_EVENT_DEV_CAP_CARRY_FLOW_ID,\n \t};\n \n \t*info = evdev_opdl_info;\ndiff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c\nindex c889220..6fd1102 100644\n--- a/drivers/event/skeleton/skeleton_eventdev.c\n+++ b/drivers/event/skeleton/skeleton_eventdev.c\n@@ -101,7 +101,8 @@\n \tdev_info->max_num_events = (1ULL << 20);\n \tdev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |\n \t\t\t\t\tRTE_EVENT_DEV_CAP_BURST_MODE |\n-\t\t\t\t\tRTE_EVENT_DEV_CAP_EVENT_QOS;\n+\t\t\t\t\tRTE_EVENT_DEV_CAP_EVENT_QOS |\n+\t\t\t\t\tRTE_EVENT_DEV_CAP_CARRY_FLOW_ID;\n }\n \n static int\n@@ -209,7 +210,7 @@\n \tport_conf->new_event_threshold = 32 * 1024;\n \tport_conf->dequeue_depth = 16;\n \tport_conf->enqueue_depth = 16;\n-\tport_conf->disable_implicit_release = 0;\n+\tport_conf->event_port_cfg = 0;\n }\n \n static void\ndiff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c\nindex 98dae71..058f568 100644\n--- a/drivers/event/sw/sw_evdev.c\n+++ b/drivers/event/sw/sw_evdev.c\n@@ -175,7 +175,8 @@\n \t}\n \n \tp->inflight_max = conf->new_event_threshold;\n-\tp->implicit_release = !conf->disable_implicit_release;\n+\tp->implicit_release = !(conf->event_port_cfg &\n+\t\t\t\tRTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);\n \n \t/* check if ring exists, same as rx_worker above */\n \tsnprintf(buf, sizeof(buf), \"sw%d_p%u, %s\", dev->data->dev_id,\n@@ -508,7 +509,7 @@\n \tport_conf->new_event_threshold = 1024;\n \tport_conf->dequeue_depth = 16;\n \tport_conf->enqueue_depth = 16;\n-\tport_conf->disable_implicit_release = 0;\n+\tport_conf->event_port_cfg = 0;\n }\n \n static int\n@@ -615,7 +616,8 @@\n \t\t\t\tRTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE|\n \t\t\t\tRTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |\n \t\t\t\tRTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |\n-\t\t\t\tRTE_EVENT_DEV_CAP_NONSEQ_MODE),\n+\t\t\t\tRTE_EVENT_DEV_CAP_NONSEQ_MODE |\n+\t\t\t\tRTE_EVENT_DEV_CAP_CARRY_FLOW_ID),\n \t};\n \n \t*info = evdev_sw_info;\ndiff --git a/drivers/event/sw/sw_evdev_selftest.c b/drivers/event/sw/sw_evdev_selftest.c\nindex 38c21fa..4a7d823 100644\n--- a/drivers/event/sw/sw_evdev_selftest.c\n+++ b/drivers/event/sw/sw_evdev_selftest.c\n@@ -172,7 +172,6 @@ struct test {\n \t\t\t.new_event_threshold = 1024,\n \t\t\t.dequeue_depth = 32,\n \t\t\t.enqueue_depth = 64,\n-\t\t\t.disable_implicit_release = 0,\n \t};\n \tif (num_ports > MAX_PORTS)\n \t\treturn -1;\n@@ -1227,7 +1226,6 @@ struct test_event_dev_stats {\n \t\t\t\t.new_event_threshold = 128,\n \t\t\t\t.dequeue_depth = 32,\n \t\t\t\t.enqueue_depth = 64,\n-\t\t\t\t.disable_implicit_release = 0,\n \t\t};\n \t\tif (rte_event_port_setup(evdev, 0, &port_conf) < 0) {\n \t\t\tprintf(\"%d Error setting up port\\n\", __LINE__);\n@@ -1317,7 +1315,6 @@ struct test_event_dev_stats {\n \t\t.new_event_threshold = 128,\n \t\t.dequeue_depth = 32,\n \t\t.enqueue_depth = 64,\n-\t\t.disable_implicit_release = 0,\n \t};\n \tif (rte_event_port_setup(evdev, 0, &port_conf) < 0) {\n \t\tprintf(\"%d Error setting up port\\n\", __LINE__);\n@@ -3079,7 +3076,8 @@ struct test_event_dev_stats {\n \t * only be initialized once - and this needs to be set for multiple runs\n \t */\n \tconf.new_event_threshold = 512;\n-\tconf.disable_implicit_release = disable_implicit_release;\n+\tconf.event_port_cfg = disable_implicit_release ?\n+\t\tRTE_EVENT_PORT_CFG_DISABLE_IMPL_REL : 0;\n \n \tif (rte_event_port_setup(evdev, 0, &conf) < 0) {\n \t\tprintf(\"Error setting up RX port\\n\");\ndiff --git a/examples/eventdev_pipeline/pipeline_worker_generic.c b/examples/eventdev_pipeline/pipeline_worker_generic.c\nindex 42ff4ee..f70ab0c 100644\n--- a/examples/eventdev_pipeline/pipeline_worker_generic.c\n+++ b/examples/eventdev_pipeline/pipeline_worker_generic.c\n@@ -129,6 +129,7 @@\n \tstruct rte_event_dev_config config = {\n \t\t\t.nb_event_queues = nb_queues,\n \t\t\t.nb_event_ports = nb_ports,\n+\t\t\t.nb_single_link_event_port_queues = 1,\n \t\t\t.nb_events_limit  = 4096,\n \t\t\t.nb_event_queue_flows = 1024,\n \t\t\t.nb_event_port_dequeue_depth = 128,\n@@ -143,7 +144,7 @@\n \t\t\t.schedule_type = cdata.queue_type,\n \t\t\t.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,\n \t\t\t.nb_atomic_flows = 1024,\n-\t\t.nb_atomic_order_sequences = 1024,\n+\t\t\t.nb_atomic_order_sequences = 1024,\n \t};\n \tstruct rte_event_queue_conf tx_q_conf = {\n \t\t\t.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,\n@@ -167,7 +168,8 @@\n \tdisable_implicit_release = (dev_info.event_dev_cap &\n \t\t\tRTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);\n \n-\twkr_p_conf.disable_implicit_release = disable_implicit_release;\n+\twkr_p_conf.event_port_cfg = disable_implicit_release ?\n+\t\tRTE_EVENT_PORT_CFG_DISABLE_IMPL_REL : 0;\n \n \tif (dev_info.max_num_events < config.nb_events_limit)\n \t\tconfig.nb_events_limit = dev_info.max_num_events;\ndiff --git a/examples/eventdev_pipeline/pipeline_worker_tx.c b/examples/eventdev_pipeline/pipeline_worker_tx.c\nindex 55bb2f7..ca6cd20 100644\n--- a/examples/eventdev_pipeline/pipeline_worker_tx.c\n+++ b/examples/eventdev_pipeline/pipeline_worker_tx.c\n@@ -436,6 +436,7 @@\n \tstruct rte_event_dev_config config = {\n \t\t\t.nb_event_queues = nb_queues,\n \t\t\t.nb_event_ports = nb_ports,\n+\t\t\t.nb_single_link_event_port_queues = 0,\n \t\t\t.nb_events_limit  = 4096,\n \t\t\t.nb_event_queue_flows = 1024,\n \t\t\t.nb_event_port_dequeue_depth = 128,\ndiff --git a/examples/l2fwd-event/l2fwd_event_generic.c b/examples/l2fwd-event/l2fwd_event_generic.c\nindex 2dc95e5..e01df04 100644\n--- a/examples/l2fwd-event/l2fwd_event_generic.c\n+++ b/examples/l2fwd-event/l2fwd_event_generic.c\n@@ -126,8 +126,9 @@\n \tif (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)\n \t\tevent_p_conf.enqueue_depth = def_p_conf.enqueue_depth;\n \n-\tevent_p_conf.disable_implicit_release =\n-\t\tevt_rsrc->disable_implicit_release;\n+\tevent_p_conf.event_port_cfg = 0;\n+\tif (evt_rsrc->disable_implicit_release)\n+\t\tevent_p_conf.event_port_cfg |= RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;\n \tevt_rsrc->deq_depth = def_p_conf.dequeue_depth;\n \n \tfor (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;\ndiff --git a/examples/l2fwd-event/l2fwd_event_internal_port.c b/examples/l2fwd-event/l2fwd_event_internal_port.c\nindex 63d57b4..f54327b 100644\n--- a/examples/l2fwd-event/l2fwd_event_internal_port.c\n+++ b/examples/l2fwd-event/l2fwd_event_internal_port.c\n@@ -123,8 +123,9 @@\n \tif (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)\n \t\tevent_p_conf.enqueue_depth = def_p_conf.enqueue_depth;\n \n-\tevent_p_conf.disable_implicit_release =\n-\t\tevt_rsrc->disable_implicit_release;\n+\tevent_p_conf.event_port_cfg = 0;\n+\tif (evt_rsrc->disable_implicit_release)\n+\t\tevent_p_conf.event_port_cfg |= RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;\n \n \tfor (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;\n \t\t\t\t\t\t\t\tevent_p_id++) {\ndiff --git a/examples/l3fwd/l3fwd_event_generic.c b/examples/l3fwd/l3fwd_event_generic.c\nindex f8c9843..409a410 100644\n--- a/examples/l3fwd/l3fwd_event_generic.c\n+++ b/examples/l3fwd/l3fwd_event_generic.c\n@@ -115,8 +115,9 @@\n \tif (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)\n \t\tevent_p_conf.enqueue_depth = def_p_conf.enqueue_depth;\n \n-\tevent_p_conf.disable_implicit_release =\n-\t\tevt_rsrc->disable_implicit_release;\n+\tevent_p_conf.event_port_cfg = 0;\n+\tif (evt_rsrc->disable_implicit_release)\n+\t\tevent_p_conf.event_port_cfg |= RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;\n \tevt_rsrc->deq_depth = def_p_conf.dequeue_depth;\n \n \tfor (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;\ndiff --git a/examples/l3fwd/l3fwd_event_internal_port.c b/examples/l3fwd/l3fwd_event_internal_port.c\nindex 03ac581..df410f1 100644\n--- a/examples/l3fwd/l3fwd_event_internal_port.c\n+++ b/examples/l3fwd/l3fwd_event_internal_port.c\n@@ -113,8 +113,9 @@\n \tif (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)\n \t\tevent_p_conf.enqueue_depth = def_p_conf.enqueue_depth;\n \n-\tevent_p_conf.disable_implicit_release =\n-\t\tevt_rsrc->disable_implicit_release;\n+\tevent_p_conf.event_port_cfg = 0;\n+\tif (evt_rsrc->disable_implicit_release)\n+\t\tevent_p_conf.event_port_cfg |= RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;\n \n \tfor (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;\n \t\t\t\t\t\t\t\tevent_p_id++) {\ndiff --git a/lib/librte_eal/x86/include/rte_cpuflags.h b/lib/librte_eal/x86/include/rte_cpuflags.h\nindex c1d2036..ab2c3b3 100644\n--- a/lib/librte_eal/x86/include/rte_cpuflags.h\n+++ b/lib/librte_eal/x86/include/rte_cpuflags.h\n@@ -130,6 +130,7 @@ enum rte_cpu_flag_t {\n \tRTE_CPUFLAG_CLDEMOTE,               /**< Cache Line Demote */\n \tRTE_CPUFLAG_MOVDIRI,                /**< Direct Store Instructions */\n \tRTE_CPUFLAG_MOVDIR64B,              /**< Direct Store Instructions 64B */\n+\tRTE_CPUFLAG_UMWAIT,                 /**< UMONITOR/UMWAIT */\n \tRTE_CPUFLAG_AVX512VP2INTERSECT,     /**< AVX512 Two Register Intersection */\n \n \t/* The last item */\ndiff --git a/lib/librte_eal/x86/rte_cpuflags.c b/lib/librte_eal/x86/rte_cpuflags.c\nindex 30439e7..6bed3eb 100644\n--- a/lib/librte_eal/x86/rte_cpuflags.c\n+++ b/lib/librte_eal/x86/rte_cpuflags.c\n@@ -137,6 +137,7 @@ struct feature_entry {\n \tFEAT_DEF(CLDEMOTE, 0x00000007, 0, RTE_REG_ECX, 25)\n \tFEAT_DEF(MOVDIRI, 0x00000007, 0, RTE_REG_ECX, 27)\n \tFEAT_DEF(MOVDIR64B, 0x00000007, 0, RTE_REG_ECX, 28)\n+\tFEAT_DEF(UMWAIT, 0x00000007, 0, RTE_REG_ECX, 5)\n \tFEAT_DEF(AVX512VP2INTERSECT, 0x00000007, 0, RTE_REG_EDX, 8)\n };\n \ndiff --git a/lib/librte_eventdev/meson.build b/lib/librte_eventdev/meson.build\nindex d1f25ee..17f7f40 100644\n--- a/lib/librte_eventdev/meson.build\n+++ b/lib/librte_eventdev/meson.build\n@@ -7,6 +7,7 @@ else\n \tcflags += '-DBSD'\n endif\n \n+use_function_versioning = true\n sources = files('rte_eventdev.c',\n \t\t'rte_event_ring.c',\n \t\t'eventdev_trace_points.c',\ndiff --git a/lib/librte_eventdev/rte_event_eth_tx_adapter.c b/lib/librte_eventdev/rte_event_eth_tx_adapter.c\nindex bb21dc4..8a72256 100644\n--- a/lib/librte_eventdev/rte_event_eth_tx_adapter.c\n+++ b/lib/librte_eventdev/rte_event_eth_tx_adapter.c\n@@ -286,7 +286,7 @@ static int txa_service_queue_del(uint8_t id,\n \t\treturn ret;\n \t}\n \n-\tpc->disable_implicit_release = 0;\n+\tpc->event_port_cfg = 0;\n \tret = rte_event_port_setup(dev_id, port_id, pc);\n \tif (ret) {\n \t\tRTE_EDEV_LOG_ERR(\"failed to setup event port %u\\n\",\ndiff --git a/lib/librte_eventdev/rte_eventdev.c b/lib/librte_eventdev/rte_eventdev.c\nindex 82c177c..db2f9b7 100644\n--- a/lib/librte_eventdev/rte_eventdev.c\n+++ b/lib/librte_eventdev/rte_eventdev.c\n@@ -32,6 +32,8 @@\n #include <rte_ethdev.h>\n #include <rte_cryptodev.h>\n #include <rte_cryptodev_pmd.h>\n+#include <rte_compat.h>\n+#include <rte_function_versioning.h>\n \n #include \"rte_eventdev.h\"\n #include \"rte_eventdev_pmd.h\"\n@@ -87,7 +89,8 @@\n }\n \n int\n-rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)\n+rte_event_dev_info_get(uint8_t dev_id,\n+\t\t\t     struct rte_event_dev_info *dev_info)\n {\n \tstruct rte_eventdev *dev;\n \n@@ -437,9 +440,29 @@\n \t\t\t\t\tdev_id);\n \t\treturn -EINVAL;\n \t}\n-\tif (dev_conf->nb_event_queues > info.max_event_queues) {\n-\t\tRTE_EDEV_LOG_ERR(\"%d nb_event_queues=%d > max_event_queues=%d\",\n-\t\tdev_id, dev_conf->nb_event_queues, info.max_event_queues);\n+\tif (dev_conf->nb_event_queues > info.max_event_queues +\n+\t\t\tinfo.max_single_link_event_port_queue_pairs) {\n+\t\tRTE_EDEV_LOG_ERR(\"%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d\",\n+\t\t\t\t dev_id, dev_conf->nb_event_queues,\n+\t\t\t\t info.max_event_queues,\n+\t\t\t\t info.max_single_link_event_port_queue_pairs);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (dev_conf->nb_event_queues -\n+\t\t\tdev_conf->nb_single_link_event_port_queues >\n+\t\t\tinfo.max_event_queues) {\n+\t\tRTE_EDEV_LOG_ERR(\"id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d\",\n+\t\t\t\t dev_id, dev_conf->nb_event_queues,\n+\t\t\t\t dev_conf->nb_single_link_event_port_queues,\n+\t\t\t\t info.max_event_queues);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (dev_conf->nb_single_link_event_port_queues >\n+\t\t\tdev_conf->nb_event_queues) {\n+\t\tRTE_EDEV_LOG_ERR(\"dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d\",\n+\t\t\t\t dev_id,\n+\t\t\t\t dev_conf->nb_single_link_event_port_queues,\n+\t\t\t\t dev_conf->nb_event_queues);\n \t\treturn -EINVAL;\n \t}\n \n@@ -448,9 +471,31 @@\n \t\tRTE_EDEV_LOG_ERR(\"dev%d nb_event_ports cannot be zero\", dev_id);\n \t\treturn -EINVAL;\n \t}\n-\tif (dev_conf->nb_event_ports > info.max_event_ports) {\n-\t\tRTE_EDEV_LOG_ERR(\"id%d nb_event_ports=%d > max_event_ports= %d\",\n-\t\tdev_id, dev_conf->nb_event_ports, info.max_event_ports);\n+\tif (dev_conf->nb_event_ports > info.max_event_ports +\n+\t\t\tinfo.max_single_link_event_port_queue_pairs) {\n+\t\tRTE_EDEV_LOG_ERR(\"id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d\",\n+\t\t\t\t dev_id, dev_conf->nb_event_ports,\n+\t\t\t\t info.max_event_ports,\n+\t\t\t\t info.max_single_link_event_port_queue_pairs);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (dev_conf->nb_event_ports -\n+\t\t\tdev_conf->nb_single_link_event_port_queues\n+\t\t\t> info.max_event_ports) {\n+\t\tRTE_EDEV_LOG_ERR(\"id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d\",\n+\t\t\t\t dev_id, dev_conf->nb_event_ports,\n+\t\t\t\t dev_conf->nb_single_link_event_port_queues,\n+\t\t\t\t info.max_event_ports);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (dev_conf->nb_single_link_event_port_queues >\n+\t    dev_conf->nb_event_ports) {\n+\t\tRTE_EDEV_LOG_ERR(\n+\t\t\t\t \"dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d\",\n+\t\t\t\t dev_id,\n+\t\t\t\t dev_conf->nb_single_link_event_port_queues,\n+\t\t\t\t dev_conf->nb_event_ports);\n \t\treturn -EINVAL;\n \t}\n \n@@ -737,7 +782,8 @@\n \t\treturn -EINVAL;\n \t}\n \n-\tif (port_conf && port_conf->disable_implicit_release &&\n+\tif (port_conf &&\n+\t    (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) &&\n \t    !(dev->data->event_dev_cap &\n \t      RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {\n \t\tRTE_EDEV_LOG_ERR(\n@@ -809,6 +855,7 @@\n \t\t\tuint32_t *attr_value)\n {\n \tstruct rte_eventdev *dev;\n+\tuint32_t config;\n \n \tif (!attr_value)\n \t\treturn -EINVAL;\n@@ -830,6 +877,10 @@\n \tcase RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:\n \t\t*attr_value = dev->data->ports_cfg[port_id].new_event_threshold;\n \t\tbreak;\n+\tcase RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE:\n+\t\tconfig = dev->data->ports_cfg[port_id].event_port_cfg;\n+\t\t*attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);\n+\t\tbreak;\n \tdefault:\n \t\treturn -EINVAL;\n \t};\ndiff --git a/lib/librte_eventdev/rte_eventdev.h b/lib/librte_eventdev/rte_eventdev.h\nindex 7dc8323..ce1fc2c 100644\n--- a/lib/librte_eventdev/rte_eventdev.h\n+++ b/lib/librte_eventdev/rte_eventdev.h\n@@ -291,6 +291,12 @@\n  * single queue to each port or map a single queue to many port.\n  */\n \n+#define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)\n+/**< Event device preserves the flow ID from the enqueued\n+ * event to the dequeued event if the flag is set. Otherwise,\n+ * the content of this field is implementation dependent.\n+ */\n+\n /* Event device priority levels */\n #define RTE_EVENT_DEV_PRIORITY_HIGHEST   0\n /**< Highest priority expressed across eventdev subsystem\n@@ -380,6 +386,10 @@ struct rte_event_dev_info {\n \t * event port by this device.\n \t * A device that does not support bulk enqueue will set this as 1.\n \t */\n+\tuint8_t max_event_port_links;\n+\t/**< Maximum number of queues that can be linked to a single event\n+\t * port by this device.\n+\t */\n \tint32_t max_num_events;\n \t/**< A *closed system* event dev has a limit on the number of events it\n \t * can manage at a time. An *open system* event dev does not have a\n@@ -387,6 +397,12 @@ struct rte_event_dev_info {\n \t */\n \tuint32_t event_dev_cap;\n \t/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/\n+\tuint8_t max_single_link_event_port_queue_pairs;\n+\t/**< Maximum number of event ports and queues that are optimized for\n+\t * (and only capable of) single-link configurations supported by this\n+\t * device. These ports and queues are not accounted for in\n+\t * max_event_ports or max_event_queues.\n+\t */\n };\n \n /**\n@@ -494,6 +510,14 @@ struct rte_event_dev_config {\n \t */\n \tuint32_t event_dev_cfg;\n \t/**< Event device config flags(RTE_EVENT_DEV_CFG_)*/\n+\tuint8_t nb_single_link_event_port_queues;\n+\t/**< Number of event ports and queues that will be singly-linked to\n+\t * each other. These are a subset of the overall event ports and\n+\t * queues; this value cannot exceed *nb_event_ports* or\n+\t * *nb_event_queues*. If the device has ports and queues that are\n+\t * optimized for single-link usage, this field is a hint for how many\n+\t * to allocate; otherwise, regular event ports and queues can be used.\n+\t */\n };\n \n /**\n@@ -519,7 +543,6 @@ struct rte_event_dev_config {\n rte_event_dev_configure(uint8_t dev_id,\n \t\t\tconst struct rte_event_dev_config *dev_conf);\n \n-\n /* Event queue specific APIs */\n \n /* Event queue configuration bitmap flags */\n@@ -671,6 +694,20 @@ struct rte_event_queue_conf {\n \n /* Event port specific APIs */\n \n+/* Event port configuration bitmap flags */\n+#define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL    (1ULL << 0)\n+/**< Configure the port not to release outstanding events in\n+ * rte_event_dev_dequeue_burst(). If set, all events received through\n+ * the port must be explicitly released with RTE_EVENT_OP_RELEASE or\n+ * RTE_EVENT_OP_FORWARD. Must be unset if the device is not\n+ * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.\n+ */\n+#define RTE_EVENT_PORT_CFG_SINGLE_LINK         (1ULL << 1)\n+/**< This event port links only to a single event queue.\n+ *\n+ *  @see rte_event_port_setup(), rte_event_port_link()\n+ */\n+\n /** Event port configuration structure */\n struct rte_event_port_conf {\n \tint32_t new_event_threshold;\n@@ -698,13 +735,7 @@ struct rte_event_port_conf {\n \t * which previously supplied to rte_event_dev_configure().\n \t * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.\n \t */\n-\tuint8_t disable_implicit_release;\n-\t/**< Configure the port not to release outstanding events in\n-\t * rte_event_dev_dequeue_burst(). If true, all events received through\n-\t * the port must be explicitly released with RTE_EVENT_OP_RELEASE or\n-\t * RTE_EVENT_OP_FORWARD. Must be false when the device is not\n-\t * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.\n-\t */\n+\tuint32_t event_port_cfg; /**< Port cfg flags(EVENT_PORT_CFG_) */\n };\n \n /**\n@@ -769,6 +800,10 @@ struct rte_event_port_conf {\n  * The new event threshold of the port\n  */\n #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2\n+/**\n+ * The implicit release disable attribute of the port\n+ */\n+#define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3\n \n /**\n  * Get an attribute from a port.\ndiff --git a/lib/librte_eventdev/rte_eventdev_pmd_pci.h b/lib/librte_eventdev/rte_eventdev_pmd_pci.h\nindex 443cd38..1572999 100644\n--- a/lib/librte_eventdev/rte_eventdev_pmd_pci.h\n+++ b/lib/librte_eventdev/rte_eventdev_pmd_pci.h\n@@ -88,6 +88,60 @@\n \treturn -ENXIO;\n }\n \n+/**\n+ * @internal\n+ * Wrapper for use by pci drivers as a .probe function to attach to a event\n+ * interface.  Same as rte_event_pmd_pci_probe, except caller can specify\n+ * the name.\n+ */\n+static inline int\n+rte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,\n+\t\t\t    struct rte_pci_device *pci_dev,\n+\t\t\t    size_t private_data_size,\n+\t\t\t    eventdev_pmd_pci_callback_t devinit,\n+\t\t\t    const char *name)\n+{\n+\tstruct rte_eventdev *eventdev;\n+\n+\tint retval;\n+\n+\tif (devinit == NULL)\n+\t\treturn -EINVAL;\n+\n+\teventdev = rte_event_pmd_allocate(name,\n+\t\t\t pci_dev->device.numa_node);\n+\tif (eventdev == NULL)\n+\t\treturn -ENOMEM;\n+\n+\tif (rte_eal_process_type() == RTE_PROC_PRIMARY) {\n+\t\teventdev->data->dev_private =\n+\t\t\t\trte_zmalloc_socket(\n+\t\t\t\t\t\t\"eventdev private structure\",\n+\t\t\t\t\t\tprivate_data_size,\n+\t\t\t\t\t\tRTE_CACHE_LINE_SIZE,\n+\t\t\t\t\t\trte_socket_id());\n+\n+\t\tif (eventdev->data->dev_private == NULL)\n+\t\t\trte_panic(\"Cannot allocate memzone for private \"\n+\t\t\t\t\t\"device data\");\n+\t}\n+\n+\teventdev->dev = &pci_dev->device;\n+\n+\t/* Invoke PMD device initialization function */\n+\tretval = devinit(eventdev);\n+\tif (retval == 0)\n+\t\treturn 0;\n+\n+\tRTE_EDEV_LOG_ERR(\"driver %s: (vendor_id=0x%x device_id=0x%x)\"\n+\t\t\t\" failed\", pci_drv->driver.name,\n+\t\t\t(unsigned int) pci_dev->id.vendor_id,\n+\t\t\t(unsigned int) pci_dev->id.device_id);\n+\n+\trte_event_pmd_release(eventdev);\n+\n+\treturn -ENXIO;\n+}\n \n /**\n  * @internal\ndiff --git a/lib/librte_eventdev/rte_eventdev_version.map b/lib/librte_eventdev/rte_eventdev_version.map\nindex 91a62cd..de197dd 100644\n--- a/lib/librte_eventdev/rte_eventdev_version.map\n+++ b/lib/librte_eventdev/rte_eventdev_version.map\n@@ -100,7 +100,6 @@ EXPERIMENTAL {\n \t# added in 20.05\n \t__rte_eventdev_trace_configure;\n \t__rte_eventdev_trace_queue_setup;\n-\t__rte_eventdev_trace_port_setup;\n \t__rte_eventdev_trace_port_link;\n \t__rte_eventdev_trace_port_unlink;\n \t__rte_eventdev_trace_start;\n@@ -134,4 +133,7 @@ EXPERIMENTAL {\n \t__rte_eventdev_trace_crypto_adapter_queue_pair_del;\n \t__rte_eventdev_trace_crypto_adapter_start;\n \t__rte_eventdev_trace_crypto_adapter_stop;\n+\n+\t# changed in 20.08\n+\t__rte_eventdev_trace_port_setup;\n };\n",
    "prefixes": [
        "01/27"
    ]
}