[01/27] eventdev: dlb upstream prerequisites

Message ID 1596138614-17409-2-git-send-email-timothy.mcdaniel@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Jerin Jacob
Headers
Series Add Intel DLM PMD to 20.11 |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation fail apply issues

Commit Message

Timothy McDaniel July 30, 2020, 7:49 p.m. UTC
  From: "McDaniel, Timothy" <timothy.mcdaniel@intel.com>

The DLB hardware does not conform exactly to the eventdev interface.
1) It has a limit on the number of queues that may be linked to a port.
2) Some ports a further restricted to a maximum of 1 linked queue.
3) It does not (currently) have the ability to carry the flow_id as part
of the event (QE) payload.

Due to the above, we would like to propose the following enhancements.

1) Add new fields to the rte_event_dev_info struct. These fields allow
the device to advertize its capabilities so that applications can take
the appropriate actions based on those capabilities.

    struct rte_event_dev_info {
	uint32_t max_event_port_links;
	/**< Maximum number of queues that can be linked to a single event
	 * port by this device.
	 */

	uint8_t max_single_link_event_port_queue_pairs;
	/**< Maximum number of event ports and queues that are optimized for
	 * (and only capable of) single-link configurations supported by this
	 * device. These ports and queues are not accounted for in
	 * max_event_ports or max_event_queues.
	 */
    }

2) Add a new field to the rte_event_dev_config struct. This field allows
the application to specify how many of its ports are limited to a single
link, or will be used in single link mode.

    /** Event device configuration structure */
    struct rte_event_dev_config {
	uint8_t nb_single_link_event_port_queues;
	/**< Number of event ports and queues that will be singly-linked to
	 * each other. These are a subset of the overall event ports and
	 * queues; this value cannot exceed *nb_event_ports* or
	 * *nb_event_queues*. If the device has ports and queues that are
	 * optimized for single-link usage, this field is a hint for how many
	 * to allocate; otherwise, regular event ports and queues can be used.
	 */
    }

3) Replace the dedicated implicit_release_disabled field with a bit field
of explicit port capabilities. The implicit_release_disable functionality
is assigned to one bit, and a port-is-single-link-only  attribute is
assigned to other, with the remaining bits available for future assignment.

	* Event port configuration bitmap flags */
	#define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL    (1ULL << 0)
	/**< Configure the port not to release outstanding events in
	 * rte_event_dev_dequeue_burst(). If set, all events received through
	 * the port must be explicitly released with RTE_EVENT_OP_RELEASE or
	 * RTE_EVENT_OP_FORWARD. Must be unset if the device is not
	 * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
	 */
	#define RTE_EVENT_PORT_CFG_SINGLE_LINK         (1ULL << 1)

	/**< This event port links only to a single event queue.
	 *
	 *  @see rte_event_port_setup(), rte_event_port_link()
	 */

	#define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
	/**
	 * The implicit release disable attribute of the port
	 */

	struct rte_event_port_conf {
		uint32_t event_port_cfg; /**< Port cfg flags(EVENT_PORT_CFG_) */
	}

4) Add UMWAIT/UMONITOR bit to rte_cpuflags

5) Added a new API that is useful for probing PCI devices.

	/**
	 * @internal
	 * Wrapper for use by pci drivers as a .probe function to attach to a event
	 * interface.  Same as rte_event_pmd_pci_probe, except caller can specify
	 * the name.
	 */
	static inline int
	rte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,
				    struct rte_pci_device *pci_dev,
				    size_t private_data_size,
				    eventdev_pmd_pci_callback_t devinit,
				    const char *name);

Signed-off-by: McDaniel, Timothy <timothy.mcdaniel@intel.com>
---
 app/test-eventdev/evt_common.h                     |   11 ++++
 app/test-eventdev/test_order_atq.c                 |   30 +++++++--
 app/test-eventdev/test_order_common.c              |    5 +-
 app/test-eventdev/test_order_queue.c               |   31 +++++++--
 app/test/test_eventdev.c                           |    4 +-
 drivers/event/dpaa/dpaa_eventdev.c                 |    3 +-
 drivers/event/dpaa2/dpaa2_eventdev.c               |    5 +-
 drivers/event/dsw/dsw_evdev.c                      |    3 +-
 drivers/event/octeontx/ssovf_evdev.c               |    5 +-
 drivers/event/octeontx2/otx2_evdev.c               |    3 +-
 drivers/event/opdl/opdl_evdev.c                    |    3 +-
 drivers/event/skeleton/skeleton_eventdev.c         |    5 +-
 drivers/event/sw/sw_evdev.c                        |    8 ++-
 drivers/event/sw/sw_evdev_selftest.c               |    6 +-
 .../eventdev_pipeline/pipeline_worker_generic.c    |    6 +-
 examples/eventdev_pipeline/pipeline_worker_tx.c    |    1 +
 examples/l2fwd-event/l2fwd_event_generic.c         |    5 +-
 examples/l2fwd-event/l2fwd_event_internal_port.c   |    5 +-
 examples/l3fwd/l3fwd_event_generic.c               |    5 +-
 examples/l3fwd/l3fwd_event_internal_port.c         |    5 +-
 lib/librte_eal/x86/include/rte_cpuflags.h          |    1 +
 lib/librte_eal/x86/rte_cpuflags.c                  |    1 +
 lib/librte_eventdev/meson.build                    |    1 +
 lib/librte_eventdev/rte_event_eth_tx_adapter.c     |    2 +-
 lib/librte_eventdev/rte_eventdev.c                 |   67 +++++++++++++++++---
 lib/librte_eventdev/rte_eventdev.h                 |   51 ++++++++++++---
 lib/librte_eventdev/rte_eventdev_pmd_pci.h         |   54 ++++++++++++++++
 lib/librte_eventdev/rte_eventdev_version.map       |    4 +-
 28 files changed, 268 insertions(+), 62 deletions(-)
  

Comments

Jerin Jacob Aug. 11, 2020, 5:44 p.m. UTC | #1
On Fri, Jul 31, 2020 at 1:23 AM McDaniel, Timothy
<timothy.mcdaniel@intel.com> wrote:
>
> From: "McDaniel, Timothy" <timothy.mcdaniel@intel.com>


Please change to "McDaniel Timothy <timothy.mcdaniel@intel.com>"

>
> The DLB hardware does not conform exactly to the eventdev interface.
> 1) It has a limit on the number of queues that may be linked to a port.
> 2) Some ports a further restricted to a maximum of 1 linked queue.
> 3) It does not (currently) have the ability to carry the flow_id as part
> of the event (QE) payload.
>
> Due to the above, we would like to propose the following enhancements.
>
> 1) Add new fields to the rte_event_dev_info struct. These fields allow
> the device to advertize its capabilities so that applications can take
> the appropriate actions based on those capabilities.
>
>     struct rte_event_dev_info {
>         uint32_t max_event_port_links;
>         /**< Maximum number of queues that can be linked to a single event
>          * port by this device.
>          */
>
>         uint8_t max_single_link_event_port_queue_pairs;
>         /**< Maximum number of event ports and queues that are optimized for
>          * (and only capable of) single-link configurations supported by this
>          * device. These ports and queues are not accounted for in
>          * max_event_ports or max_event_queues.
>          */
>     }
>
> 2) Add a new field to the rte_event_dev_config struct. This field allows
> the application to specify how many of its ports are limited to a single
> link, or will be used in single link mode.
>
>     /** Event device configuration structure */
>     struct rte_event_dev_config {
>         uint8_t nb_single_link_event_port_queues;
>         /**< Number of event ports and queues that will be singly-linked to
>          * each other. These are a subset of the overall event ports and
>          * queues; this value cannot exceed *nb_event_ports* or
>          * *nb_event_queues*. If the device has ports and queues that are
>          * optimized for single-link usage, this field is a hint for how many
>          * to allocate; otherwise, regular event ports and queues can be used.
>          */
>     }
>
> 3) Replace the dedicated implicit_release_disabled field with a bit field
> of explicit port capabilities. The implicit_release_disable functionality
> is assigned to one bit, and a port-is-single-link-only  attribute is
> assigned to other, with the remaining bits available for future assignment.
>
>         * Event port configuration bitmap flags */
>         #define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL    (1ULL << 0)
>         /**< Configure the port not to release outstanding events in
>          * rte_event_dev_dequeue_burst(). If set, all events received through
>          * the port must be explicitly released with RTE_EVENT_OP_RELEASE or
>          * RTE_EVENT_OP_FORWARD. Must be unset if the device is not
>          * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
>          */
>         #define RTE_EVENT_PORT_CFG_SINGLE_LINK         (1ULL << 1)
>
>         /**< This event port links only to a single event queue.
>          *
>          *  @see rte_event_port_setup(), rte_event_port_link()
>          */
>
>         #define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
>         /**
>          * The implicit release disable attribute of the port
>          */
>
>         struct rte_event_port_conf {
>                 uint32_t event_port_cfg; /**< Port cfg flags(EVENT_PORT_CFG_) */
>         }
>
> 4) Add UMWAIT/UMONITOR bit to rte_cpuflags
>
> 5) Added a new API that is useful for probing PCI devices.
>
>         /**
>          * @internal
>          * Wrapper for use by pci drivers as a .probe function to attach to a event
>          * interface.  Same as rte_event_pmd_pci_probe, except caller can specify
>          * the name.
>          */
>         static inline int
>         rte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,
>                                     struct rte_pci_device *pci_dev,
>                                     size_t private_data_size,
>                                     eventdev_pmd_pci_callback_t devinit,
>                                     const char *name);

Please sanitize the commit log.


>
> Signed-off-by: McDaniel, Timothy <timothy.mcdaniel@intel.com>
> ---
>  app/test-eventdev/evt_common.h                     |   11 ++++
>  app/test-eventdev/test_order_atq.c                 |   30 +++++++--
>  app/test-eventdev/test_order_common.c              |    5 +-
>  app/test-eventdev/test_order_queue.c               |   31 +++++++--
>  app/test/test_eventdev.c                           |    4 +-

Changes for example app and test-event to separate commit.
ie.
1) First patch to introduce API change and add capabalitly for existing drivers,
2) Add example app and test-event app change as next commit.


>  drivers/event/dpaa/dpaa_eventdev.c                 |    3 +-
>  drivers/event/dpaa2/dpaa2_eventdev.c               |    5 +-
>  drivers/event/dsw/dsw_evdev.c                      |    3 +-
>  drivers/event/octeontx/ssovf_evdev.c               |    5 +-
>  drivers/event/octeontx2/otx2_evdev.c               |    3 +-
>  drivers/event/opdl/opdl_evdev.c                    |    3 +-
>  drivers/event/skeleton/skeleton_eventdev.c         |    5 +-
>  drivers/event/sw/sw_evdev.c                        |    8 ++-
>  drivers/event/sw/sw_evdev_selftest.c               |    6 +-
>  .../eventdev_pipeline/pipeline_worker_generic.c    |    6 +-
>  examples/eventdev_pipeline/pipeline_worker_tx.c    |    1 +
>  examples/l2fwd-event/l2fwd_event_generic.c         |    5 +-
>  examples/l2fwd-event/l2fwd_event_internal_port.c   |    5 +-
>  examples/l3fwd/l3fwd_event_generic.c               |    5 +-
>  examples/l3fwd/l3fwd_event_internal_port.c         |    5 +-
>  lib/librte_eal/x86/include/rte_cpuflags.h          |    1 +
>  lib/librte_eal/x86/rte_cpuflags.c                  |    1 +

Found an EAL change, Please make a separate patch for EAL change
addressing master repo.
And make that patch depended on this series.

>  lib/librte_eventdev/meson.build                    |    1 +
>  lib/librte_eventdev/rte_event_eth_tx_adapter.c     |    2 +-
>  lib/librte_eventdev/rte_eventdev.c                 |   67 +++++++++++++++++---
>  lib/librte_eventdev/rte_eventdev.h                 |   51 ++++++++++++---
>  lib/librte_eventdev/rte_eventdev_pmd_pci.h         |   54 ++++++++++++++++
>  lib/librte_eventdev/rte_eventdev_version.map       |    4 +-
>  28 files changed, 268 insertions(+), 62 deletions(-)

> +/**
> + * @internal
> + * Wrapper for use by pci drivers as a .probe function to attach to a event
> + * interface.  Same as rte_event_pmd_pci_probe, except caller can specify
> + * the name.
> + */
> +static inline int
> +rte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,
> +                           struct rte_pci_device *pci_dev,
> +                           size_t private_data_size,
> +                           eventdev_pmd_pci_callback_t devinit,
> +                           const char *name)

Please make this function as a separate patch.

> +{
> +       struct rte_eventdev *eventdev;
> +
> +       int retval;
> +
> +       if (devinit == NULL)
> +               return -EINVAL;
> +
> +       eventdev = rte_event_pmd_allocate(name,
> +                        pci_dev->device.numa_node);
> +       if (eventdev == NULL)
> +               return -ENOMEM;
> +
> +       if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
> +               eventdev->data->dev_private =
> +                               rte_zmalloc_socket(
> +                                               "eventdev private structure",
> +                                               private_data_size,
> +                                               RTE_CACHE_LINE_SIZE,
> +                                               rte_socket_id());
> +
> +               if (eventdev->data->dev_private == NULL)
> +                       rte_panic("Cannot allocate memzone for private "
> +                                       "device data");
> +       }
> +
> +       eventdev->dev = &pci_dev->device;
> +
> +       /* Invoke PMD device initialization function */
> +       retval = devinit(eventdev);
> +       if (retval == 0)
> +               return 0;
> +
> +       RTE_EDEV_LOG_ERR("driver %s: (vendor_id=0x%x device_id=0x%x)"
> +                       " failed", pci_drv->driver.name,
> +                       (unsigned int) pci_dev->id.vendor_id,
> +                       (unsigned int) pci_dev->id.device_id);
> +
> +       rte_event_pmd_release(eventdev);
> +
> +       return -ENXIO;
> +}
>
>  /**
>   * @internal
> diff --git a/lib/librte_eventdev/rte_eventdev_version.map b/lib/librte_eventdev/rte_eventdev_version.map
> index 91a62cd..de197dd 100644
> --- a/lib/librte_eventdev/rte_eventdev_version.map
> +++ b/lib/librte_eventdev/rte_eventdev_version.map
> @@ -100,7 +100,6 @@ EXPERIMENTAL {
>         # added in 20.05
>         __rte_eventdev_trace_configure;
>         __rte_eventdev_trace_queue_setup;
> -       __rte_eventdev_trace_port_setup;
>         __rte_eventdev_trace_port_link;
>         __rte_eventdev_trace_port_unlink;
>         __rte_eventdev_trace_start;
> @@ -134,4 +133,7 @@ EXPERIMENTAL {
>         __rte_eventdev_trace_crypto_adapter_queue_pair_del;
>         __rte_eventdev_trace_crypto_adapter_start;
>         __rte_eventdev_trace_crypto_adapter_stop;
> +
> +       # changed in 20.08

Fix up version numbers.


> +       __rte_eventdev_trace_port_setup;
>  };
> --
> 1.7.10
>
  

Patch

diff --git a/app/test-eventdev/evt_common.h b/app/test-eventdev/evt_common.h
index f9d7378..a1da1cf 100644
--- a/app/test-eventdev/evt_common.h
+++ b/app/test-eventdev/evt_common.h
@@ -104,6 +104,16 @@  struct evt_options {
 			true : false;
 }
 
+static inline bool
+evt_has_flow_id(uint8_t dev_id)
+{
+	struct rte_event_dev_info dev_info;
+
+	rte_event_dev_info_get(dev_id, &dev_info);
+	return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_CARRY_FLOW_ID) ?
+			true : false;
+}
+
 static inline int
 evt_service_setup(uint32_t service_id)
 {
@@ -169,6 +179,7 @@  struct evt_options {
 			.dequeue_timeout_ns = opt->deq_tmo_nsec,
 			.nb_event_queues = nb_queues,
 			.nb_event_ports = nb_ports,
+			.nb_single_link_event_port_queues = 0,
 			.nb_events_limit  = info.max_num_events,
 			.nb_event_queue_flows = opt->nb_flows,
 			.nb_event_port_dequeue_depth =
diff --git a/app/test-eventdev/test_order_atq.c b/app/test-eventdev/test_order_atq.c
index 3366cfc..8ef3b40 100644
--- a/app/test-eventdev/test_order_atq.c
+++ b/app/test-eventdev/test_order_atq.c
@@ -19,7 +19,7 @@ 
 }
 
 static int
-order_atq_worker(void *arg)
+order_atq_worker(void *arg,  const uint64_t flow_id_cap)
 {
 	ORDER_WORKER_INIT;
 	struct rte_event ev;
@@ -34,6 +34,9 @@ 
 			continue;
 		}
 
+		if (!flow_id_cap)
+			ev.flow_id = ev.mbuf->udata64;
+
 		if (ev.sub_event_type == 0) { /* stage 0 from producer */
 			order_atq_process_stage_0(&ev);
 			while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
@@ -50,7 +53,7 @@ 
 }
 
 static int
-order_atq_worker_burst(void *arg)
+order_atq_worker_burst(void *arg,  const uint64_t flow_id_cap)
 {
 	ORDER_WORKER_INIT;
 	struct rte_event ev[BURST_SIZE];
@@ -68,6 +71,9 @@ 
 		}
 
 		for (i = 0; i < nb_rx; i++) {
+			if (!flow_id_cap)
+				ev[i].flow_id = ev[i].mbuf->udata64;
+
 			if (ev[i].sub_event_type == 0) { /*stage 0 */
 				order_atq_process_stage_0(&ev[i]);
 			} else if (ev[i].sub_event_type == 1) { /* stage 1 */
@@ -95,11 +101,21 @@ 
 {
 	struct worker_data *w  = arg;
 	const bool burst = evt_has_burst_mode(w->dev_id);
-
-	if (burst)
-		return order_atq_worker_burst(arg);
-	else
-		return order_atq_worker(arg);
+	const bool flow_id_cap = evt_has_flow_id(w->dev_id);
+
+	if (burst) {
+		if (flow_id_cap)
+			return order_atq_worker_burst(arg,
+					RTE_EVENT_DEV_CAP_CARRY_FLOW_ID);
+		else
+			return order_atq_worker_burst(arg, 0);
+	} else {
+		if (flow_id_cap)
+			return order_atq_worker(arg,
+					RTE_EVENT_DEV_CAP_CARRY_FLOW_ID);
+		else
+			return order_atq_worker(arg, 0);
+	}
 }
 
 static int
diff --git a/app/test-eventdev/test_order_common.c b/app/test-eventdev/test_order_common.c
index 4190f9a..928904e 100644
--- a/app/test-eventdev/test_order_common.c
+++ b/app/test-eventdev/test_order_common.c
@@ -49,6 +49,7 @@ 
 		const uint32_t flow = (uintptr_t)m % nb_flows;
 		/* Maintain seq number per flow */
 		m->seqn = producer_flow_seq[flow]++;
+		m->udata64 = flow;
 
 		ev.flow_id = flow;
 		ev.mbuf = m;
@@ -318,7 +319,7 @@ 
 		opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
 
 	/* port configuration */
-	const struct rte_event_port_conf p_conf = {
+	struct rte_event_port_conf p_conf = {
 			.dequeue_depth = opt->wkr_deq_dep,
 			.enqueue_depth = dev_info.max_event_port_dequeue_depth,
 			.new_event_threshold = dev_info.max_num_events,
@@ -351,6 +352,8 @@ 
 	p->queue_id = 0;
 	p->t = t;
 
+	p_conf.new_event_threshold /= 2;
+
 	ret = rte_event_port_setup(opt->dev_id, port, &p_conf);
 	if (ret) {
 		evt_err("failed to setup producer port %d", port);
diff --git a/app/test-eventdev/test_order_queue.c b/app/test-eventdev/test_order_queue.c
index 495efd9..7a71d2b 100644
--- a/app/test-eventdev/test_order_queue.c
+++ b/app/test-eventdev/test_order_queue.c
@@ -19,7 +19,7 @@ 
 }
 
 static int
-order_queue_worker(void *arg)
+order_queue_worker(void *arg, const uint64_t flow_id_cap)
 {
 	ORDER_WORKER_INIT;
 	struct rte_event ev;
@@ -34,6 +34,9 @@ 
 			continue;
 		}
 
+		if (!flow_id_cap)
+			ev.flow_id = ev.mbuf->udata64;
+
 		if (ev.queue_id == 0) { /* from ordered queue */
 			order_queue_process_stage_0(&ev);
 			while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
@@ -50,7 +53,7 @@ 
 }
 
 static int
-order_queue_worker_burst(void *arg)
+order_queue_worker_burst(void *arg, const uint64_t flow_id_cap)
 {
 	ORDER_WORKER_INIT;
 	struct rte_event ev[BURST_SIZE];
@@ -68,6 +71,10 @@ 
 		}
 
 		for (i = 0; i < nb_rx; i++) {
+
+			if (!flow_id_cap)
+				ev[i].flow_id = ev[i].mbuf->udata64;
+
 			if (ev[i].queue_id == 0) { /* from ordered queue */
 				order_queue_process_stage_0(&ev[i]);
 			} else if (ev[i].queue_id == 1) {/* from atomic queue */
@@ -95,11 +102,21 @@ 
 {
 	struct worker_data *w  = arg;
 	const bool burst = evt_has_burst_mode(w->dev_id);
-
-	if (burst)
-		return order_queue_worker_burst(arg);
-	else
-		return order_queue_worker(arg);
+	const bool flow_id_cap = evt_has_flow_id(w->dev_id);
+
+	if (burst) {
+		if (flow_id_cap)
+			return order_queue_worker_burst(arg,
+					RTE_EVENT_DEV_CAP_CARRY_FLOW_ID);
+		else
+			return order_queue_worker_burst(arg, 0);
+	} else {
+		if (flow_id_cap)
+			return order_queue_worker(arg,
+					RTE_EVENT_DEV_CAP_CARRY_FLOW_ID);
+		else
+			return order_queue_worker(arg, 0);
+	}
 }
 
 static int
diff --git a/app/test/test_eventdev.c b/app/test/test_eventdev.c
index 43ccb1c..62019c1 100644
--- a/app/test/test_eventdev.c
+++ b/app/test/test_eventdev.c
@@ -559,10 +559,10 @@ 
 	if (!(info.event_dev_cap &
 	      RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
 		pconf.enqueue_depth = info.max_event_port_enqueue_depth;
-		pconf.disable_implicit_release = 1;
+		pconf.event_port_cfg = RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
 		ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
 		TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
-		pconf.disable_implicit_release = 0;
+		pconf.event_port_cfg = 0;
 	}
 
 	ret = rte_event_port_setup(TEST_DEV_ID, info.max_event_ports,
diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c
index a3c138b..0804c6e 100644
--- a/drivers/event/dpaa/dpaa_eventdev.c
+++ b/drivers/event/dpaa/dpaa_eventdev.c
@@ -357,7 +357,8 @@  static void drain_4_bytes(int fd, fd_set *fdset)
 		RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
 		RTE_EVENT_DEV_CAP_BURST_MODE |
 		RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
-		RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+		RTE_EVENT_DEV_CAP_NONSEQ_MODE |
+		RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
 }
 
 static int
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index a545baf..d30812f 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -405,7 +405,8 @@  static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
 		RTE_EVENT_DEV_CAP_BURST_MODE|
 		RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
 		RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
-		RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+		RTE_EVENT_DEV_CAP_NONSEQ_MODE |
+		RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
 
 }
 
@@ -535,7 +536,7 @@  static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
 		DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
 	port_conf->enqueue_depth =
 		DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
-	port_conf->disable_implicit_release = 0;
+	port_conf->event_port_cfg = 0;
 }
 
 static int
diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c
index e796975..933a5a5 100644
--- a/drivers/event/dsw/dsw_evdev.c
+++ b/drivers/event/dsw/dsw_evdev.c
@@ -224,7 +224,8 @@ 
 		.event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE|
 		RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED|
 		RTE_EVENT_DEV_CAP_NONSEQ_MODE|
-		RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT
+		RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT|
+		RTE_EVENT_DEV_CAP_CARRY_FLOW_ID
 	};
 }
 
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index 4fc4e8f..1c6bcca 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -152,7 +152,8 @@  struct ssovf_mbox_convert_ns_getworks_iter {
 					RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES|
 					RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
 					RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
-					RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+					RTE_EVENT_DEV_CAP_NONSEQ_MODE |
+					RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
 
 }
 
@@ -218,7 +219,7 @@  struct ssovf_mbox_convert_ns_getworks_iter {
 	port_conf->new_event_threshold = edev->max_num_events;
 	port_conf->dequeue_depth = 1;
 	port_conf->enqueue_depth = 1;
-	port_conf->disable_implicit_release = 0;
+	port_conf->event_port_cfg = 0;
 }
 
 static void
diff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c
index b8b57c3..ae35bb5 100644
--- a/drivers/event/octeontx2/otx2_evdev.c
+++ b/drivers/event/octeontx2/otx2_evdev.c
@@ -501,7 +501,8 @@ 
 					RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
 					RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
 					RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
-					RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+					RTE_EVENT_DEV_CAP_NONSEQ_MODE |
+					RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
 }
 
 static void
diff --git a/drivers/event/opdl/opdl_evdev.c b/drivers/event/opdl/opdl_evdev.c
index 9b2f75f..3050578 100644
--- a/drivers/event/opdl/opdl_evdev.c
+++ b/drivers/event/opdl/opdl_evdev.c
@@ -374,7 +374,8 @@ 
 		.max_event_port_dequeue_depth = MAX_OPDL_CONS_Q_DEPTH,
 		.max_event_port_enqueue_depth = MAX_OPDL_CONS_Q_DEPTH,
 		.max_num_events = OPDL_INFLIGHT_EVENTS_TOTAL,
-		.event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE,
+		.event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE |
+				 RTE_EVENT_DEV_CAP_CARRY_FLOW_ID,
 	};
 
 	*info = evdev_opdl_info;
diff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c
index c889220..6fd1102 100644
--- a/drivers/event/skeleton/skeleton_eventdev.c
+++ b/drivers/event/skeleton/skeleton_eventdev.c
@@ -101,7 +101,8 @@ 
 	dev_info->max_num_events = (1ULL << 20);
 	dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
 					RTE_EVENT_DEV_CAP_BURST_MODE |
-					RTE_EVENT_DEV_CAP_EVENT_QOS;
+					RTE_EVENT_DEV_CAP_EVENT_QOS |
+					RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
 }
 
 static int
@@ -209,7 +210,7 @@ 
 	port_conf->new_event_threshold = 32 * 1024;
 	port_conf->dequeue_depth = 16;
 	port_conf->enqueue_depth = 16;
-	port_conf->disable_implicit_release = 0;
+	port_conf->event_port_cfg = 0;
 }
 
 static void
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index 98dae71..058f568 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -175,7 +175,8 @@ 
 	}
 
 	p->inflight_max = conf->new_event_threshold;
-	p->implicit_release = !conf->disable_implicit_release;
+	p->implicit_release = !(conf->event_port_cfg &
+				RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
 
 	/* check if ring exists, same as rx_worker above */
 	snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id,
@@ -508,7 +509,7 @@ 
 	port_conf->new_event_threshold = 1024;
 	port_conf->dequeue_depth = 16;
 	port_conf->enqueue_depth = 16;
-	port_conf->disable_implicit_release = 0;
+	port_conf->event_port_cfg = 0;
 }
 
 static int
@@ -615,7 +616,8 @@ 
 				RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE|
 				RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
 				RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
-				RTE_EVENT_DEV_CAP_NONSEQ_MODE),
+				RTE_EVENT_DEV_CAP_NONSEQ_MODE |
+				RTE_EVENT_DEV_CAP_CARRY_FLOW_ID),
 	};
 
 	*info = evdev_sw_info;
diff --git a/drivers/event/sw/sw_evdev_selftest.c b/drivers/event/sw/sw_evdev_selftest.c
index 38c21fa..4a7d823 100644
--- a/drivers/event/sw/sw_evdev_selftest.c
+++ b/drivers/event/sw/sw_evdev_selftest.c
@@ -172,7 +172,6 @@  struct test {
 			.new_event_threshold = 1024,
 			.dequeue_depth = 32,
 			.enqueue_depth = 64,
-			.disable_implicit_release = 0,
 	};
 	if (num_ports > MAX_PORTS)
 		return -1;
@@ -1227,7 +1226,6 @@  struct test_event_dev_stats {
 				.new_event_threshold = 128,
 				.dequeue_depth = 32,
 				.enqueue_depth = 64,
-				.disable_implicit_release = 0,
 		};
 		if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
 			printf("%d Error setting up port\n", __LINE__);
@@ -1317,7 +1315,6 @@  struct test_event_dev_stats {
 		.new_event_threshold = 128,
 		.dequeue_depth = 32,
 		.enqueue_depth = 64,
-		.disable_implicit_release = 0,
 	};
 	if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
 		printf("%d Error setting up port\n", __LINE__);
@@ -3079,7 +3076,8 @@  struct test_event_dev_stats {
 	 * only be initialized once - and this needs to be set for multiple runs
 	 */
 	conf.new_event_threshold = 512;
-	conf.disable_implicit_release = disable_implicit_release;
+	conf.event_port_cfg = disable_implicit_release ?
+		RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL : 0;
 
 	if (rte_event_port_setup(evdev, 0, &conf) < 0) {
 		printf("Error setting up RX port\n");
diff --git a/examples/eventdev_pipeline/pipeline_worker_generic.c b/examples/eventdev_pipeline/pipeline_worker_generic.c
index 42ff4ee..f70ab0c 100644
--- a/examples/eventdev_pipeline/pipeline_worker_generic.c
+++ b/examples/eventdev_pipeline/pipeline_worker_generic.c
@@ -129,6 +129,7 @@ 
 	struct rte_event_dev_config config = {
 			.nb_event_queues = nb_queues,
 			.nb_event_ports = nb_ports,
+			.nb_single_link_event_port_queues = 1,
 			.nb_events_limit  = 4096,
 			.nb_event_queue_flows = 1024,
 			.nb_event_port_dequeue_depth = 128,
@@ -143,7 +144,7 @@ 
 			.schedule_type = cdata.queue_type,
 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
 			.nb_atomic_flows = 1024,
-		.nb_atomic_order_sequences = 1024,
+			.nb_atomic_order_sequences = 1024,
 	};
 	struct rte_event_queue_conf tx_q_conf = {
 			.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
@@ -167,7 +168,8 @@ 
 	disable_implicit_release = (dev_info.event_dev_cap &
 			RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
 
-	wkr_p_conf.disable_implicit_release = disable_implicit_release;
+	wkr_p_conf.event_port_cfg = disable_implicit_release ?
+		RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL : 0;
 
 	if (dev_info.max_num_events < config.nb_events_limit)
 		config.nb_events_limit = dev_info.max_num_events;
diff --git a/examples/eventdev_pipeline/pipeline_worker_tx.c b/examples/eventdev_pipeline/pipeline_worker_tx.c
index 55bb2f7..ca6cd20 100644
--- a/examples/eventdev_pipeline/pipeline_worker_tx.c
+++ b/examples/eventdev_pipeline/pipeline_worker_tx.c
@@ -436,6 +436,7 @@ 
 	struct rte_event_dev_config config = {
 			.nb_event_queues = nb_queues,
 			.nb_event_ports = nb_ports,
+			.nb_single_link_event_port_queues = 0,
 			.nb_events_limit  = 4096,
 			.nb_event_queue_flows = 1024,
 			.nb_event_port_dequeue_depth = 128,
diff --git a/examples/l2fwd-event/l2fwd_event_generic.c b/examples/l2fwd-event/l2fwd_event_generic.c
index 2dc95e5..e01df04 100644
--- a/examples/l2fwd-event/l2fwd_event_generic.c
+++ b/examples/l2fwd-event/l2fwd_event_generic.c
@@ -126,8 +126,9 @@ 
 	if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
 		event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
 
-	event_p_conf.disable_implicit_release =
-		evt_rsrc->disable_implicit_release;
+	event_p_conf.event_port_cfg = 0;
+	if (evt_rsrc->disable_implicit_release)
+		event_p_conf.event_port_cfg |= RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
 	evt_rsrc->deq_depth = def_p_conf.dequeue_depth;
 
 	for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
diff --git a/examples/l2fwd-event/l2fwd_event_internal_port.c b/examples/l2fwd-event/l2fwd_event_internal_port.c
index 63d57b4..f54327b 100644
--- a/examples/l2fwd-event/l2fwd_event_internal_port.c
+++ b/examples/l2fwd-event/l2fwd_event_internal_port.c
@@ -123,8 +123,9 @@ 
 	if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
 		event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
 
-	event_p_conf.disable_implicit_release =
-		evt_rsrc->disable_implicit_release;
+	event_p_conf.event_port_cfg = 0;
+	if (evt_rsrc->disable_implicit_release)
+		event_p_conf.event_port_cfg |= RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
 
 	for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
 								event_p_id++) {
diff --git a/examples/l3fwd/l3fwd_event_generic.c b/examples/l3fwd/l3fwd_event_generic.c
index f8c9843..409a410 100644
--- a/examples/l3fwd/l3fwd_event_generic.c
+++ b/examples/l3fwd/l3fwd_event_generic.c
@@ -115,8 +115,9 @@ 
 	if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
 		event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
 
-	event_p_conf.disable_implicit_release =
-		evt_rsrc->disable_implicit_release;
+	event_p_conf.event_port_cfg = 0;
+	if (evt_rsrc->disable_implicit_release)
+		event_p_conf.event_port_cfg |= RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
 	evt_rsrc->deq_depth = def_p_conf.dequeue_depth;
 
 	for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
diff --git a/examples/l3fwd/l3fwd_event_internal_port.c b/examples/l3fwd/l3fwd_event_internal_port.c
index 03ac581..df410f1 100644
--- a/examples/l3fwd/l3fwd_event_internal_port.c
+++ b/examples/l3fwd/l3fwd_event_internal_port.c
@@ -113,8 +113,9 @@ 
 	if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
 		event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
 
-	event_p_conf.disable_implicit_release =
-		evt_rsrc->disable_implicit_release;
+	event_p_conf.event_port_cfg = 0;
+	if (evt_rsrc->disable_implicit_release)
+		event_p_conf.event_port_cfg |= RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL;
 
 	for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
 								event_p_id++) {
diff --git a/lib/librte_eal/x86/include/rte_cpuflags.h b/lib/librte_eal/x86/include/rte_cpuflags.h
index c1d2036..ab2c3b3 100644
--- a/lib/librte_eal/x86/include/rte_cpuflags.h
+++ b/lib/librte_eal/x86/include/rte_cpuflags.h
@@ -130,6 +130,7 @@  enum rte_cpu_flag_t {
 	RTE_CPUFLAG_CLDEMOTE,               /**< Cache Line Demote */
 	RTE_CPUFLAG_MOVDIRI,                /**< Direct Store Instructions */
 	RTE_CPUFLAG_MOVDIR64B,              /**< Direct Store Instructions 64B */
+	RTE_CPUFLAG_UMWAIT,                 /**< UMONITOR/UMWAIT */
 	RTE_CPUFLAG_AVX512VP2INTERSECT,     /**< AVX512 Two Register Intersection */
 
 	/* The last item */
diff --git a/lib/librte_eal/x86/rte_cpuflags.c b/lib/librte_eal/x86/rte_cpuflags.c
index 30439e7..6bed3eb 100644
--- a/lib/librte_eal/x86/rte_cpuflags.c
+++ b/lib/librte_eal/x86/rte_cpuflags.c
@@ -137,6 +137,7 @@  struct feature_entry {
 	FEAT_DEF(CLDEMOTE, 0x00000007, 0, RTE_REG_ECX, 25)
 	FEAT_DEF(MOVDIRI, 0x00000007, 0, RTE_REG_ECX, 27)
 	FEAT_DEF(MOVDIR64B, 0x00000007, 0, RTE_REG_ECX, 28)
+	FEAT_DEF(UMWAIT, 0x00000007, 0, RTE_REG_ECX, 5)
 	FEAT_DEF(AVX512VP2INTERSECT, 0x00000007, 0, RTE_REG_EDX, 8)
 };
 
diff --git a/lib/librte_eventdev/meson.build b/lib/librte_eventdev/meson.build
index d1f25ee..17f7f40 100644
--- a/lib/librte_eventdev/meson.build
+++ b/lib/librte_eventdev/meson.build
@@ -7,6 +7,7 @@  else
 	cflags += '-DBSD'
 endif
 
+use_function_versioning = true
 sources = files('rte_eventdev.c',
 		'rte_event_ring.c',
 		'eventdev_trace_points.c',
diff --git a/lib/librte_eventdev/rte_event_eth_tx_adapter.c b/lib/librte_eventdev/rte_event_eth_tx_adapter.c
index bb21dc4..8a72256 100644
--- a/lib/librte_eventdev/rte_event_eth_tx_adapter.c
+++ b/lib/librte_eventdev/rte_event_eth_tx_adapter.c
@@ -286,7 +286,7 @@  static int txa_service_queue_del(uint8_t id,
 		return ret;
 	}
 
-	pc->disable_implicit_release = 0;
+	pc->event_port_cfg = 0;
 	ret = rte_event_port_setup(dev_id, port_id, pc);
 	if (ret) {
 		RTE_EDEV_LOG_ERR("failed to setup event port %u\n",
diff --git a/lib/librte_eventdev/rte_eventdev.c b/lib/librte_eventdev/rte_eventdev.c
index 82c177c..db2f9b7 100644
--- a/lib/librte_eventdev/rte_eventdev.c
+++ b/lib/librte_eventdev/rte_eventdev.c
@@ -32,6 +32,8 @@ 
 #include <rte_ethdev.h>
 #include <rte_cryptodev.h>
 #include <rte_cryptodev_pmd.h>
+#include <rte_compat.h>
+#include <rte_function_versioning.h>
 
 #include "rte_eventdev.h"
 #include "rte_eventdev_pmd.h"
@@ -87,7 +89,8 @@ 
 }
 
 int
-rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
+rte_event_dev_info_get(uint8_t dev_id,
+			     struct rte_event_dev_info *dev_info)
 {
 	struct rte_eventdev *dev;
 
@@ -437,9 +440,29 @@ 
 					dev_id);
 		return -EINVAL;
 	}
-	if (dev_conf->nb_event_queues > info.max_event_queues) {
-		RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
-		dev_id, dev_conf->nb_event_queues, info.max_event_queues);
+	if (dev_conf->nb_event_queues > info.max_event_queues +
+			info.max_single_link_event_port_queue_pairs) {
+		RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d",
+				 dev_id, dev_conf->nb_event_queues,
+				 info.max_event_queues,
+				 info.max_single_link_event_port_queue_pairs);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_event_queues -
+			dev_conf->nb_single_link_event_port_queues >
+			info.max_event_queues) {
+		RTE_EDEV_LOG_ERR("id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d",
+				 dev_id, dev_conf->nb_event_queues,
+				 dev_conf->nb_single_link_event_port_queues,
+				 info.max_event_queues);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_single_link_event_port_queues >
+			dev_conf->nb_event_queues) {
+		RTE_EDEV_LOG_ERR("dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d",
+				 dev_id,
+				 dev_conf->nb_single_link_event_port_queues,
+				 dev_conf->nb_event_queues);
 		return -EINVAL;
 	}
 
@@ -448,9 +471,31 @@ 
 		RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
 		return -EINVAL;
 	}
-	if (dev_conf->nb_event_ports > info.max_event_ports) {
-		RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
-		dev_id, dev_conf->nb_event_ports, info.max_event_ports);
+	if (dev_conf->nb_event_ports > info.max_event_ports +
+			info.max_single_link_event_port_queue_pairs) {
+		RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d",
+				 dev_id, dev_conf->nb_event_ports,
+				 info.max_event_ports,
+				 info.max_single_link_event_port_queue_pairs);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_event_ports -
+			dev_conf->nb_single_link_event_port_queues
+			> info.max_event_ports) {
+		RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d",
+				 dev_id, dev_conf->nb_event_ports,
+				 dev_conf->nb_single_link_event_port_queues,
+				 info.max_event_ports);
+		return -EINVAL;
+	}
+
+	if (dev_conf->nb_single_link_event_port_queues >
+	    dev_conf->nb_event_ports) {
+		RTE_EDEV_LOG_ERR(
+				 "dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d",
+				 dev_id,
+				 dev_conf->nb_single_link_event_port_queues,
+				 dev_conf->nb_event_ports);
 		return -EINVAL;
 	}
 
@@ -737,7 +782,8 @@ 
 		return -EINVAL;
 	}
 
-	if (port_conf && port_conf->disable_implicit_release &&
+	if (port_conf &&
+	    (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) &&
 	    !(dev->data->event_dev_cap &
 	      RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
 		RTE_EDEV_LOG_ERR(
@@ -809,6 +855,7 @@ 
 			uint32_t *attr_value)
 {
 	struct rte_eventdev *dev;
+	uint32_t config;
 
 	if (!attr_value)
 		return -EINVAL;
@@ -830,6 +877,10 @@ 
 	case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
 		*attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
 		break;
+	case RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE:
+		config = dev->data->ports_cfg[port_id].event_port_cfg;
+		*attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
+		break;
 	default:
 		return -EINVAL;
 	};
diff --git a/lib/librte_eventdev/rte_eventdev.h b/lib/librte_eventdev/rte_eventdev.h
index 7dc8323..ce1fc2c 100644
--- a/lib/librte_eventdev/rte_eventdev.h
+++ b/lib/librte_eventdev/rte_eventdev.h
@@ -291,6 +291,12 @@ 
  * single queue to each port or map a single queue to many port.
  */
 
+#define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)
+/**< Event device preserves the flow ID from the enqueued
+ * event to the dequeued event if the flag is set. Otherwise,
+ * the content of this field is implementation dependent.
+ */
+
 /* Event device priority levels */
 #define RTE_EVENT_DEV_PRIORITY_HIGHEST   0
 /**< Highest priority expressed across eventdev subsystem
@@ -380,6 +386,10 @@  struct rte_event_dev_info {
 	 * event port by this device.
 	 * A device that does not support bulk enqueue will set this as 1.
 	 */
+	uint8_t max_event_port_links;
+	/**< Maximum number of queues that can be linked to a single event
+	 * port by this device.
+	 */
 	int32_t max_num_events;
 	/**< A *closed system* event dev has a limit on the number of events it
 	 * can manage at a time. An *open system* event dev does not have a
@@ -387,6 +397,12 @@  struct rte_event_dev_info {
 	 */
 	uint32_t event_dev_cap;
 	/**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
+	uint8_t max_single_link_event_port_queue_pairs;
+	/**< Maximum number of event ports and queues that are optimized for
+	 * (and only capable of) single-link configurations supported by this
+	 * device. These ports and queues are not accounted for in
+	 * max_event_ports or max_event_queues.
+	 */
 };
 
 /**
@@ -494,6 +510,14 @@  struct rte_event_dev_config {
 	 */
 	uint32_t event_dev_cfg;
 	/**< Event device config flags(RTE_EVENT_DEV_CFG_)*/
+	uint8_t nb_single_link_event_port_queues;
+	/**< Number of event ports and queues that will be singly-linked to
+	 * each other. These are a subset of the overall event ports and
+	 * queues; this value cannot exceed *nb_event_ports* or
+	 * *nb_event_queues*. If the device has ports and queues that are
+	 * optimized for single-link usage, this field is a hint for how many
+	 * to allocate; otherwise, regular event ports and queues can be used.
+	 */
 };
 
 /**
@@ -519,7 +543,6 @@  struct rte_event_dev_config {
 rte_event_dev_configure(uint8_t dev_id,
 			const struct rte_event_dev_config *dev_conf);
 
-
 /* Event queue specific APIs */
 
 /* Event queue configuration bitmap flags */
@@ -671,6 +694,20 @@  struct rte_event_queue_conf {
 
 /* Event port specific APIs */
 
+/* Event port configuration bitmap flags */
+#define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL    (1ULL << 0)
+/**< Configure the port not to release outstanding events in
+ * rte_event_dev_dequeue_burst(). If set, all events received through
+ * the port must be explicitly released with RTE_EVENT_OP_RELEASE or
+ * RTE_EVENT_OP_FORWARD. Must be unset if the device is not
+ * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
+ */
+#define RTE_EVENT_PORT_CFG_SINGLE_LINK         (1ULL << 1)
+/**< This event port links only to a single event queue.
+ *
+ *  @see rte_event_port_setup(), rte_event_port_link()
+ */
+
 /** Event port configuration structure */
 struct rte_event_port_conf {
 	int32_t new_event_threshold;
@@ -698,13 +735,7 @@  struct rte_event_port_conf {
 	 * which previously supplied to rte_event_dev_configure().
 	 * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
 	 */
-	uint8_t disable_implicit_release;
-	/**< Configure the port not to release outstanding events in
-	 * rte_event_dev_dequeue_burst(). If true, all events received through
-	 * the port must be explicitly released with RTE_EVENT_OP_RELEASE or
-	 * RTE_EVENT_OP_FORWARD. Must be false when the device is not
-	 * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
-	 */
+	uint32_t event_port_cfg; /**< Port cfg flags(EVENT_PORT_CFG_) */
 };
 
 /**
@@ -769,6 +800,10 @@  struct rte_event_port_conf {
  * The new event threshold of the port
  */
 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
+/**
+ * The implicit release disable attribute of the port
+ */
+#define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
 
 /**
  * Get an attribute from a port.
diff --git a/lib/librte_eventdev/rte_eventdev_pmd_pci.h b/lib/librte_eventdev/rte_eventdev_pmd_pci.h
index 443cd38..1572999 100644
--- a/lib/librte_eventdev/rte_eventdev_pmd_pci.h
+++ b/lib/librte_eventdev/rte_eventdev_pmd_pci.h
@@ -88,6 +88,60 @@ 
 	return -ENXIO;
 }
 
+/**
+ * @internal
+ * Wrapper for use by pci drivers as a .probe function to attach to a event
+ * interface.  Same as rte_event_pmd_pci_probe, except caller can specify
+ * the name.
+ */
+static inline int
+rte_event_pmd_pci_probe_named(struct rte_pci_driver *pci_drv,
+			    struct rte_pci_device *pci_dev,
+			    size_t private_data_size,
+			    eventdev_pmd_pci_callback_t devinit,
+			    const char *name)
+{
+	struct rte_eventdev *eventdev;
+
+	int retval;
+
+	if (devinit == NULL)
+		return -EINVAL;
+
+	eventdev = rte_event_pmd_allocate(name,
+			 pci_dev->device.numa_node);
+	if (eventdev == NULL)
+		return -ENOMEM;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		eventdev->data->dev_private =
+				rte_zmalloc_socket(
+						"eventdev private structure",
+						private_data_size,
+						RTE_CACHE_LINE_SIZE,
+						rte_socket_id());
+
+		if (eventdev->data->dev_private == NULL)
+			rte_panic("Cannot allocate memzone for private "
+					"device data");
+	}
+
+	eventdev->dev = &pci_dev->device;
+
+	/* Invoke PMD device initialization function */
+	retval = devinit(eventdev);
+	if (retval == 0)
+		return 0;
+
+	RTE_EDEV_LOG_ERR("driver %s: (vendor_id=0x%x device_id=0x%x)"
+			" failed", pci_drv->driver.name,
+			(unsigned int) pci_dev->id.vendor_id,
+			(unsigned int) pci_dev->id.device_id);
+
+	rte_event_pmd_release(eventdev);
+
+	return -ENXIO;
+}
 
 /**
  * @internal
diff --git a/lib/librte_eventdev/rte_eventdev_version.map b/lib/librte_eventdev/rte_eventdev_version.map
index 91a62cd..de197dd 100644
--- a/lib/librte_eventdev/rte_eventdev_version.map
+++ b/lib/librte_eventdev/rte_eventdev_version.map
@@ -100,7 +100,6 @@  EXPERIMENTAL {
 	# added in 20.05
 	__rte_eventdev_trace_configure;
 	__rte_eventdev_trace_queue_setup;
-	__rte_eventdev_trace_port_setup;
 	__rte_eventdev_trace_port_link;
 	__rte_eventdev_trace_port_unlink;
 	__rte_eventdev_trace_start;
@@ -134,4 +133,7 @@  EXPERIMENTAL {
 	__rte_eventdev_trace_crypto_adapter_queue_pair_del;
 	__rte_eventdev_trace_crypto_adapter_start;
 	__rte_eventdev_trace_crypto_adapter_stop;
+
+	# changed in 20.08
+	__rte_eventdev_trace_port_setup;
 };