@@ -73,6 +73,7 @@
#define RTE_EVENT_MAX_DEVS 16
#define RTE_EVENT_MAX_PORTS_PER_DEV 255
#define RTE_EVENT_MAX_QUEUES_PER_DEV 255
+#define RTE_EVENT_MAX_PROFILES_PER_PORT 8
#define RTE_EVENT_TIMER_ADAPTER_NUM_MAX 32
#define RTE_EVENT_ETH_INTR_RING_SIZE 1024
#define RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE 32
@@ -317,6 +317,64 @@ can be achieved like this:
}
int links_made = rte_event_port_link(dev_id, tx_port_id, &single_link_q, &priority, 1);
+An application can also use link profiles if supported by the underlying event device to setup up
+multiple link profile per port and change them run time depending up on heuristic data.
+
+An Example use case could be as follows.
+
+Config path:
+
+.. code-block:: c
+
+ uint8_t lowQ[4] = {4, 5, 6, 7};
+ uint8_t highQ[4] = {0, 1, 2, 3};
+
+ if (rte_event_dev_info.max_profiles_per_port < 2)
+ return -ENOTSUP;
+
+ rte_event_port_link_with_profile(0, 0, highQ, NULL, 4, 0);
+ rte_event_port_link_with_profile(0, 0, lowQ, NULL, 4, 1);
+
+Worker path:
+
+.. code-block:: c
+
+ uint8_t empty_high_deq = 0;
+ uint8_t empty_low_deq = 0;
+ uint8_t is_low_deq = 0;
+ while (1) {
+ deq = rte_event_dequeue_burst(0, 0, &ev, 1, 0);
+ if (deq == 0) {
+ /**
+ * Change link profile based on work activity on current
+ * active profile
+ */
+ if (is_low_deq) {
+ empty_low_deq++;
+ if (empty_low_deq == MAX_LOW_RETRY) {
+ rte_event_port_change_profile(0, 0, 0);
+ is_low_deq = 0;
+ empty_low_deq = 0;
+ }
+ continue;
+ }
+
+ if (empty_high_deq == MAX_HIGH_RETRY) {
+ rte_event_port_change_profile(0, 0, 1);
+ is_low_deq = 1;
+ empty_high_deq = 0;
+ }
+ continue;
+ }
+
+ // Process the event received.
+
+ if (is_low_deq++ == MAX_LOW_EVENTS) {
+ rte_event_port_change_profile(0, 0, 0);
+ is_low_deq = 0;
+ }
+ }
+
Starting the EventDev
~~~~~~~~~~~~~~~~~~~~~
@@ -31,6 +31,7 @@ cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
RTE_EVENT_DEV_CAP_MAINTENANCE_FREE |
RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR;
+ dev_info->max_profiles_per_port = 1;
}
int
@@ -133,7 +134,7 @@ cnxk_sso_restore_links(const struct rte_eventdev *event_dev,
for (i = 0; i < dev->nb_event_ports; i++) {
uint16_t nb_hwgrp = 0;
- links_map = event_dev->data->links_map;
+ links_map = event_dev->data->links_map[0];
/* Point links_map to this port specific area */
links_map += (i * RTE_EVENT_MAX_QUEUES_PER_DEV);
@@ -79,6 +79,7 @@ static struct rte_event_dev_info evdev_dlb2_default_info = {
RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
RTE_EVENT_DEV_CAP_MAINTENANCE_FREE),
+ .max_profiles_per_port = 1,
};
struct process_local_port_data
@@ -359,6 +359,7 @@ dpaa_event_dev_info_get(struct rte_eventdev *dev,
RTE_EVENT_DEV_CAP_NONSEQ_MODE |
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
+ dev_info->max_profiles_per_port = 1;
}
static int
@@ -411,7 +411,7 @@ dpaa2_eventdev_info_get(struct rte_eventdev *dev,
RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
-
+ dev_info->max_profiles_per_port = 1;
}
static int
@@ -218,6 +218,7 @@ dsw_info_get(struct rte_eventdev *dev __rte_unused,
.max_event_port_dequeue_depth = DSW_MAX_PORT_DEQUEUE_DEPTH,
.max_event_port_enqueue_depth = DSW_MAX_PORT_ENQUEUE_DEPTH,
.max_num_events = DSW_MAX_EVENTS,
+ .max_profiles_per_port = 1,
.event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE|
RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED|
RTE_EVENT_DEV_CAP_NONSEQ_MODE|
@@ -158,7 +158,7 @@ ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info)
RTE_EVENT_DEV_CAP_NONSEQ_MODE |
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
-
+ dev_info->max_profiles_per_port = 1;
}
static int
@@ -378,6 +378,7 @@ opdl_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
.event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE |
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
RTE_EVENT_DEV_CAP_MAINTENANCE_FREE,
+ .max_profiles_per_port = 1,
};
*info = evdev_opdl_info;
@@ -104,6 +104,7 @@ skeleton_eventdev_info_get(struct rte_eventdev *dev,
RTE_EVENT_DEV_CAP_EVENT_QOS |
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
+ dev_info->max_profiles_per_port = 1;
}
static int
@@ -609,6 +609,7 @@ sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
RTE_EVENT_DEV_CAP_NONSEQ_MODE |
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
RTE_EVENT_DEV_CAP_MAINTENANCE_FREE),
+ .max_profiles_per_port = 1,
};
*info = evdev_sw_info;
@@ -119,8 +119,8 @@ struct rte_eventdev_data {
/**< Array of port configuration structures. */
struct rte_event_queue_conf queues_cfg[RTE_EVENT_MAX_QUEUES_PER_DEV];
/**< Array of queue configuration structures. */
- uint16_t links_map[RTE_EVENT_MAX_PORTS_PER_DEV *
- RTE_EVENT_MAX_QUEUES_PER_DEV];
+ uint16_t links_map[RTE_EVENT_MAX_PROFILES_PER_PORT]
+ [RTE_EVENT_MAX_PORTS_PER_DEV * RTE_EVENT_MAX_QUEUES_PER_DEV];
/**< Memory to store queues to port connections. */
void *dev_private;
/**< PMD-specific private data */
@@ -180,6 +180,9 @@ struct rte_eventdev {
event_tx_adapter_enqueue_t txa_enqueue;
/**< Pointer to PMD eth Tx adapter enqueue function. */
event_crypto_adapter_enqueue_t ca_enqueue;
+ /**< PMD Crypto adapter enqueue function. */
+ event_change_profile_t change_profile;
+ /**< PMD Event switch profile function. */
uint64_t reserved_64s[4]; /**< Reserved for future fields */
void *reserved_ptrs[3]; /**< Reserved for future fields */
@@ -439,6 +442,32 @@ typedef int (*eventdev_port_link_t)(struct rte_eventdev *dev, void *port,
const uint8_t queues[], const uint8_t priorities[],
uint16_t nb_links);
+/**
+ * Link multiple source event queues associated with a profile to a destination
+ * event port.
+ *
+ * @param dev
+ * Event device pointer
+ * @param port
+ * Event port pointer
+ * @param queues
+ * Points to an array of *nb_links* event queues to be linked
+ * to the event port.
+ * @param priorities
+ * Points to an array of *nb_links* service priorities associated with each
+ * event queue link to event port.
+ * @param nb_links
+ * The number of links to establish.
+ * @param profile
+ * The profile ID to associate the links.
+ *
+ * @return
+ * Returns 0 on success.
+ */
+typedef int (*eventdev_port_link_profile_t)(struct rte_eventdev *dev, void *port,
+ const uint8_t queues[], const uint8_t priorities[],
+ uint16_t nb_links, uint8_t profile);
+
/**
* Unlink multiple source event queues from destination event port.
*
@@ -457,6 +486,28 @@ typedef int (*eventdev_port_link_t)(struct rte_eventdev *dev, void *port,
typedef int (*eventdev_port_unlink_t)(struct rte_eventdev *dev, void *port,
uint8_t queues[], uint16_t nb_unlinks);
+/**
+ * Unlink multiple source event queues associated with a profile from destination
+ * event port.
+ *
+ * @param dev
+ * Event device pointer
+ * @param port
+ * Event port pointer
+ * @param queues
+ * An array of *nb_unlinks* event queues to be unlinked from the event port.
+ * @param nb_unlinks
+ * The number of unlinks to establish
+ * @param profile
+ * The profile ID of the associated links.
+ *
+ * @return
+ * Returns 0 on success.
+ */
+typedef int (*eventdev_port_unlink_profile_t)(struct rte_eventdev *dev, void *port,
+ uint8_t queues[], uint16_t nb_unlinks,
+ uint8_t profile);
+
/**
* Unlinks in progress. Returns number of unlinks that the PMD is currently
* performing, but have not yet been completed.
@@ -1350,8 +1401,12 @@ struct eventdev_ops {
eventdev_port_link_t port_link;
/**< Link event queues to an event port. */
+ eventdev_port_link_profile_t port_link_profile;
+ /**< Link event queues associated with a profile to an event port. */
eventdev_port_unlink_t port_unlink;
/**< Unlink event queues from an event port. */
+ eventdev_port_unlink_profile_t port_unlink_profile;
+ /**< Unlink event queues associated with a profile from an event port. */
eventdev_port_unlinks_in_progress_t port_unlinks_in_progress;
/**< Unlinks in progress on an event port. */
eventdev_dequeue_timeout_ticks_t timeout_ticks;
@@ -81,6 +81,13 @@ dummy_event_crypto_adapter_enqueue(__rte_unused void *port,
return 0;
}
+static int
+dummy_event_port_change_profile(__rte_unused void *port, __rte_unused uint8_t profile)
+{
+ RTE_EDEV_LOG_ERR("change profile requested for unconfigured event device");
+ return -EINVAL;
+}
+
void
event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op)
{
@@ -97,6 +104,7 @@ event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op)
.txa_enqueue_same_dest =
dummy_event_tx_adapter_enqueue_same_dest,
.ca_enqueue = dummy_event_crypto_adapter_enqueue,
+ .change_profile = dummy_event_port_change_profile,
.data = dummy_data,
};
@@ -117,5 +125,6 @@ event_dev_fp_ops_set(struct rte_event_fp_ops *fp_op,
fp_op->txa_enqueue = dev->txa_enqueue;
fp_op->txa_enqueue_same_dest = dev->txa_enqueue_same_dest;
fp_op->ca_enqueue = dev->ca_enqueue;
+ fp_op->change_profile = dev->change_profile;
fp_op->data = dev->data->ports;
}
@@ -76,6 +76,17 @@ RTE_TRACE_POINT(
rte_trace_point_emit_int(rc);
)
+RTE_TRACE_POINT(
+ rte_eventdev_trace_port_link_with_profile,
+ RTE_TRACE_POINT_ARGS(uint8_t dev_id, uint8_t port_id,
+ uint16_t nb_links, uint8_t profile, int rc),
+ rte_trace_point_emit_u8(dev_id);
+ rte_trace_point_emit_u8(port_id);
+ rte_trace_point_emit_u16(nb_links);
+ rte_trace_point_emit_u8(profile);
+ rte_trace_point_emit_int(rc);
+)
+
RTE_TRACE_POINT(
rte_eventdev_trace_port_unlink,
RTE_TRACE_POINT_ARGS(uint8_t dev_id, uint8_t port_id,
@@ -86,6 +97,17 @@ RTE_TRACE_POINT(
rte_trace_point_emit_int(rc);
)
+RTE_TRACE_POINT(
+ rte_eventdev_trace_port_unlink_with_profile,
+ RTE_TRACE_POINT_ARGS(uint8_t dev_id, uint8_t port_id,
+ uint16_t nb_unlinks, uint8_t profile, int rc),
+ rte_trace_point_emit_u8(dev_id);
+ rte_trace_point_emit_u8(port_id);
+ rte_trace_point_emit_u16(nb_unlinks);
+ rte_trace_point_emit_u8(profile);
+ rte_trace_point_emit_int(rc);
+)
+
RTE_TRACE_POINT(
rte_eventdev_trace_start,
RTE_TRACE_POINT_ARGS(uint8_t dev_id, int rc),
@@ -19,9 +19,15 @@ RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_port_setup,
RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_port_link,
lib.eventdev.port.link)
+RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_port_link_with_profile,
+ lib.eventdev.port.link_with_profile)
+
RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_port_unlink,
lib.eventdev.port.unlink)
+RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_port_unlink_with_profile,
+ lib.eventdev.port.unlink_with_profile)
+
RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_start,
lib.eventdev.start)
@@ -270,7 +270,7 @@ event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
void **ports;
uint16_t *links_map;
struct rte_event_port_conf *ports_cfg;
- unsigned int i;
+ unsigned int i, j;
RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
dev->data->dev_id);
@@ -281,7 +281,6 @@ event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
ports = dev->data->ports;
ports_cfg = dev->data->ports_cfg;
- links_map = dev->data->links_map;
for (i = nb_ports; i < old_nb_ports; i++)
(*dev->dev_ops->port_release)(ports[i]);
@@ -297,9 +296,11 @@ event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
sizeof(ports[0]) * new_ps);
memset(ports_cfg + old_nb_ports, 0,
sizeof(ports_cfg[0]) * new_ps);
- for (i = old_links_map_end; i < links_map_end; i++)
- links_map[i] =
- EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
+ for (i = 0; i < RTE_EVENT_MAX_PROFILES_PER_PORT; i++) {
+ links_map = dev->data->links_map[i];
+ for (j = old_links_map_end; j < links_map_end; j++)
+ links_map[j] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
+ }
}
} else {
if (*dev->dev_ops->port_release == NULL)
@@ -953,21 +954,44 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id,
const uint8_t queues[], const uint8_t priorities[],
uint16_t nb_links)
{
- struct rte_eventdev *dev;
- uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ return rte_event_port_link_with_profile(dev_id, port_id, queues, priorities, nb_links, 0);
+}
+
+int
+rte_event_port_link_with_profile(uint8_t dev_id, uint8_t port_id, const uint8_t queues[],
+ const uint8_t priorities[], uint16_t nb_links, uint8_t profile)
+{
uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ struct rte_event_dev_info info;
+ struct rte_eventdev *dev;
uint16_t *links_map;
int i, diag;
RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
dev = &rte_eventdevs[dev_id];
+ if (*dev->dev_ops->dev_infos_get == NULL)
+ return -ENOTSUP;
+
+ (*dev->dev_ops->dev_infos_get)(dev, &info);
+ if (profile >= RTE_EVENT_MAX_PROFILES_PER_PORT || profile >= info.max_profiles_per_port) {
+ RTE_EDEV_LOG_ERR("Invalid profile=%" PRIu8, profile);
+ return -EINVAL;
+ }
+
if (*dev->dev_ops->port_link == NULL) {
RTE_EDEV_LOG_ERR("Function not supported\n");
rte_errno = ENOTSUP;
return 0;
}
+ if (profile && *dev->dev_ops->port_link_profile == NULL) {
+ RTE_EDEV_LOG_ERR("Function not supported\n");
+ rte_errno = ENOTSUP;
+ return 0;
+ }
+
if (!is_valid_port(dev, port_id)) {
RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
rte_errno = EINVAL;
@@ -995,18 +1019,22 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id,
return 0;
}
- diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
- queues, priorities, nb_links);
+ if (profile)
+ diag = (*dev->dev_ops->port_link_profile)(dev, dev->data->ports[port_id], queues,
+ priorities, nb_links, profile);
+ else
+ diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id], queues,
+ priorities, nb_links);
if (diag < 0)
return diag;
- links_map = dev->data->links_map;
+ links_map = dev->data->links_map[profile];
/* Point links_map to this port specific area */
links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
for (i = 0; i < diag; i++)
links_map[queues[i]] = (uint8_t)priorities[i];
- rte_eventdev_trace_port_link(dev_id, port_id, nb_links, diag);
+ rte_eventdev_trace_port_link_with_profile(dev_id, port_id, nb_links, profile, diag);
return diag;
}
@@ -1014,27 +1042,50 @@ int
rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
uint8_t queues[], uint16_t nb_unlinks)
{
- struct rte_eventdev *dev;
+ return rte_event_port_unlink_with_profile(dev_id, port_id, queues, nb_unlinks, 0);
+}
+
+int
+rte_event_port_unlink_with_profile(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
+ uint16_t nb_unlinks, uint8_t profile)
+{
uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
- int i, diag, j;
+ struct rte_event_dev_info info;
+ struct rte_eventdev *dev;
uint16_t *links_map;
+ int i, diag, j;
RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
dev = &rte_eventdevs[dev_id];
+ if (*dev->dev_ops->dev_infos_get == NULL)
+ return -ENOTSUP;
+
+ (*dev->dev_ops->dev_infos_get)(dev, &info);
+ if (profile >= RTE_EVENT_MAX_PROFILES_PER_PORT || profile >= info.max_profiles_per_port) {
+ RTE_EDEV_LOG_ERR("Invalid profile=%" PRIu8, profile);
+ return -EINVAL;
+ }
+
if (*dev->dev_ops->port_unlink == NULL) {
RTE_EDEV_LOG_ERR("Function not supported");
rte_errno = ENOTSUP;
return 0;
}
+ if (profile && *dev->dev_ops->port_unlink_profile == NULL) {
+ RTE_EDEV_LOG_ERR("Function not supported");
+ rte_errno = ENOTSUP;
+ return 0;
+ }
+
if (!is_valid_port(dev, port_id)) {
RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
rte_errno = EINVAL;
return 0;
}
- links_map = dev->data->links_map;
+ links_map = dev->data->links_map[profile];
/* Point links_map to this port specific area */
links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
@@ -1063,16 +1114,19 @@ rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
return 0;
}
- diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
- queues, nb_unlinks);
-
+ if (profile)
+ diag = (*dev->dev_ops->port_unlink_profile)(dev, dev->data->ports[port_id], queues,
+ nb_unlinks, profile);
+ else
+ diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id], queues,
+ nb_unlinks);
if (diag < 0)
return diag;
for (i = 0; i < diag; i++)
links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
- rte_eventdev_trace_port_unlink(dev_id, port_id, nb_unlinks, diag);
+ rte_eventdev_trace_port_unlink_with_profile(dev_id, port_id, nb_unlinks, profile, diag);
return diag;
}
@@ -1116,7 +1170,50 @@ rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
return -EINVAL;
}
- links_map = dev->data->links_map;
+ /* Use the default profile. */
+ links_map = dev->data->links_map[0];
+ /* Point links_map to this port specific area */
+ links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
+ for (i = 0; i < dev->data->nb_queues; i++) {
+ if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
+ queues[count] = i;
+ priorities[count] = (uint8_t)links_map[i];
+ ++count;
+ }
+ }
+
+ rte_eventdev_trace_port_links_get(dev_id, port_id, count);
+
+ return count;
+}
+
+int
+rte_event_port_links_get_with_profile(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
+ uint8_t priorities[], uint8_t profile)
+{
+ struct rte_event_dev_info info;
+ struct rte_eventdev *dev;
+ uint16_t *links_map;
+ int i, count = 0;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+
+ dev = &rte_eventdevs[dev_id];
+ if (*dev->dev_ops->dev_infos_get == NULL)
+ return -ENOTSUP;
+
+ (*dev->dev_ops->dev_infos_get)(dev, &info);
+ if (profile >= RTE_EVENT_MAX_PROFILES_PER_PORT || profile >= info.max_profiles_per_port) {
+ RTE_EDEV_LOG_ERR("Invalid profile=%" PRIu8, profile);
+ return -EINVAL;
+ }
+
+ if (!is_valid_port(dev, port_id)) {
+ RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+ return -EINVAL;
+ }
+
+ links_map = dev->data->links_map[profile];
/* Point links_map to this port specific area */
links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
for (i = 0; i < dev->data->nb_queues; i++) {
@@ -1440,7 +1537,7 @@ eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
{
char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
const struct rte_memzone *mz;
- int n;
+ int i, n;
/* Generate memzone name */
n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
@@ -1460,11 +1557,10 @@ eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
*data = mz->addr;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
memset(*data, 0, sizeof(struct rte_eventdev_data));
- for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV *
- RTE_EVENT_MAX_QUEUES_PER_DEV;
- n++)
- (*data)->links_map[n] =
- EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
+ for (i = 0; i < RTE_EVENT_MAX_PROFILES_PER_PORT; i++)
+ for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV * RTE_EVENT_MAX_QUEUES_PER_DEV;
+ n++)
+ (*data)->links_map[i][n] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
}
return 0;
@@ -446,6 +446,10 @@ struct rte_event_dev_info {
* device. These ports and queues are not accounted for in
* max_event_ports or max_event_queues.
*/
+ uint8_t max_profiles_per_port;
+ /**< Maximum number of event queue profiles per event port.
+ * A device that doesn't support multiple profiles will set this as 1.
+ */
};
/**
@@ -1537,6 +1541,10 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
* latency of critical work by establishing the link with more event ports
* at runtime.
*
+ * When the value of ``rte_event_dev_info::max_profiles_per_port`` is greater
+ * than or equal to one, this function links the event queues to the default
+ * profile i.e. profile 0 of the event port.
+ *
* @param dev_id
* The identifier of the device.
*
@@ -1594,6 +1602,10 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id,
* Event queue(s) to event port unlink establishment can be changed at runtime
* without re-configuring the device.
*
+ * When the value of ``rte_event_dev_info::max_profiles_per_port`` is greater
+ * than or equal to one, this function unlinks the event queues from the default
+ * profile i.e. profile 0 of the event port.
+ *
* @see rte_event_port_unlinks_in_progress() to poll for completed unlinks.
*
* @param dev_id
@@ -1627,6 +1639,137 @@ int
rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
uint8_t queues[], uint16_t nb_unlinks);
+/**
+ * Link multiple source event queues supplied in *queues* to the destination
+ * event port designated by its *port_id* with associated profile identifier
+ * supplied in *profile* with service priorities supplied in *priorities* on
+ * the event device designated by its *dev_id*.
+ *
+ * If *profile* is set to 0 then, the links created by the call `rte_event_port_link`
+ * will be overwritten.
+ *
+ * Event ports by default use profile 0 unless it is changed using the
+ * call ``rte_event_port_change_profile()``.
+ *
+ * The link establishment shall enable the event port *port_id* from
+ * receiving events from the specified event queue(s) supplied in *queues*
+ *
+ * An event queue may link to one or more event ports.
+ * The number of links can be established from an event queue to event port is
+ * implementation defined.
+ *
+ * Event queue(s) to event port link establishment can be changed at runtime
+ * without re-configuring the device to support scaling and to reduce the
+ * latency of critical work by establishing the link with more event ports
+ * at runtime.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param port_id
+ * Event port identifier to select the destination port to link.
+ *
+ * @param queues
+ * Points to an array of *nb_links* event queues to be linked
+ * to the event port.
+ * NULL value is allowed, in which case this function links all the configured
+ * event queues *nb_event_queues* which previously supplied to
+ * rte_event_dev_configure() to the event port *port_id*
+ *
+ * @param priorities
+ * Points to an array of *nb_links* service priorities associated with each
+ * event queue link to event port.
+ * The priority defines the event port's servicing priority for
+ * event queue, which may be ignored by an implementation.
+ * The requested priority should in the range of
+ * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
+ * The implementation shall normalize the requested priority to
+ * implementation supported priority value.
+ * NULL value is allowed, in which case this function links the event queues
+ * with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority
+ *
+ * @param nb_links
+ * The number of links to establish. This parameter is ignored if queues is
+ * NULL.
+ *
+ * @param profile
+ * The profile identifier associated with the links between event queues and
+ * event port. Should be less than the max capability reported by
+ * ``rte_event_dev_info::max_profiles_per_port``
+ *
+ * @return
+ * The number of links actually established. The return value can be less than
+ * the value of the *nb_links* parameter when the implementation has the
+ * limitation on specific queue to port link establishment or if invalid
+ * parameters are specified in *queues*
+ * If the return value is less than *nb_links*, the remaining links at the end
+ * of link[] are not established, and the caller has to take care of them.
+ * If return value is less than *nb_links* then implementation shall update the
+ * rte_errno accordingly, Possible rte_errno values are
+ * (EDQUOT) Quota exceeded(Application tried to link the queue configured with
+ * RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
+ * (EINVAL) Invalid parameter
+ *
+ */
+__rte_experimental
+int
+rte_event_port_link_with_profile(uint8_t dev_id, uint8_t port_id, const uint8_t queues[],
+ const uint8_t priorities[], uint16_t nb_links,
+ uint8_t profile);
+
+/**
+ * Unlink multiple source event queues supplied in *queues* that belong to profile
+ * designated by *profile* from the destination event port designated by its
+ * *port_id* on the event device designated by its *dev_id*.
+ *
+ * If *profile* is set to 0 i.e., the default profile then, then this function will
+ * act as ``rte_event_port_unlink``.
+ *
+ * The unlink call issues an async request to disable the event port *port_id*
+ * from receiving events from the specified event queue *queue_id*.
+ * Event queue(s) to event port unlink establishment can be changed at runtime
+ * without re-configuring the device.
+ *
+ * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param port_id
+ * Event port identifier to select the destination port to unlink.
+ *
+ * @param queues
+ * Points to an array of *nb_unlinks* event queues to be unlinked
+ * from the event port.
+ * NULL value is allowed, in which case this function unlinks all the
+ * event queue(s) from the event port *port_id*.
+ *
+ * @param nb_unlinks
+ * The number of unlinks to establish. This parameter is ignored if queues is
+ * NULL.
+ *
+ * @param profile
+ * The profile identifier associated with the links between event queues and
+ * event port. Should be less than the max capability reported by
+ * ``rte_event_dev_info::max_profiles_per_port``
+ *
+ * @return
+ * The number of unlinks successfully requested. The return value can be less
+ * than the value of the *nb_unlinks* parameter when the implementation has the
+ * limitation on specific queue to port unlink establishment or
+ * if invalid parameters are specified.
+ * If the return value is less than *nb_unlinks*, the remaining queues at the
+ * end of queues[] are not unlinked, and the caller has to take care of them.
+ * If return value is less than *nb_unlinks* then implementation shall update
+ * the rte_errno accordingly, Possible rte_errno values are
+ * (EINVAL) Invalid parameter
+ *
+ */
+__rte_experimental
+int
+rte_event_port_unlink_with_profile(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
+ uint16_t nb_unlinks, uint8_t profile);
+
/**
* Returns the number of unlinks in progress.
*
@@ -1681,6 +1824,42 @@ int
rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
uint8_t queues[], uint8_t priorities[]);
+/**
+ * Retrieve the list of source event queues and its service priority
+ * associated to a profile and linked to the destination event port
+ * designated by its *port_id* on the event device designated by its *dev_id*.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param port_id
+ * Event port identifier.
+ *
+ * @param[out] queues
+ * Points to an array of *queues* for output.
+ * The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
+ * store the event queue(s) linked with event port *port_id*
+ *
+ * @param[out] priorities
+ * Points to an array of *priorities* for output.
+ * The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
+ * store the service priority associated with each event queue linked
+ *
+ * @param profile
+ * The profile identifier associated with the links between event queues and
+ * event port. Should be less than the max capability reported by
+ * ``rte_event_dev_info::max_profiles_per_port``
+ *
+ * @return
+ * The number of links established on the event port designated by its
+ * *port_id*.
+ * - <0 on failure.
+ */
+__rte_experimental
+int
+rte_event_port_links_get_with_profile(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
+ uint8_t priorities[], uint8_t profile);
+
/**
* Retrieve the service ID of the event dev. If the adapter doesn't use
* a rte_service function, this function returns -ESRCH.
@@ -2266,6 +2445,53 @@ rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
return 0;
}
+/**
+ * Change the active profile on an event port.
+ *
+ * This function is used to change the current active profile on an event port
+ * when multiple link profiles are configured on an event port through the
+ * function call ``rte_event_port_link_with_profile``.
+ *
+ * On the subsequent ``rte_event_dequeue_burst`` call, only the event queues
+ * that were associated with the newly active profile will participate in
+ * scheduling.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param port_id
+ * The identifier of the event port.
+ * @param profile
+ * The identifier of the profile.
+ * @return
+ * - 0 on success.
+ * - -EINVAL if *dev_id*, *port_id*, or *profile* is invalid.
+ */
+__rte_experimental
+static inline uint8_t
+rte_event_port_change_profile(uint8_t dev_id, uint8_t port_id, uint8_t profile)
+{
+ const struct rte_event_fp_ops *fp_ops;
+ void *port;
+
+ fp_ops = &rte_event_fp_ops[dev_id];
+ port = fp_ops->data[port_id];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+ if (dev_id >= RTE_EVENT_MAX_DEVS ||
+ port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
+ return -EINVAL;
+
+ if (port == NULL)
+ return -EINVAL;
+
+ if (profile >= RTE_EVENT_MAX_PROFILES_PER_PORT)
+ return -EINVAL;
+#endif
+ rte_eventdev_trace_change_profile(dev_id, port_id, profile);
+
+ return fp_ops->change_profile(port, profile);
+}
+
#ifdef __cplusplus
}
#endif
@@ -42,6 +42,8 @@ typedef uint16_t (*event_crypto_adapter_enqueue_t)(void *port,
uint16_t nb_events);
/**< @internal Enqueue burst of events on crypto adapter */
+typedef int (*event_change_profile_t)(void *port, uint8_t profile);
+
struct rte_event_fp_ops {
void **data;
/**< points to array of internal port data pointers */
@@ -65,6 +67,8 @@ struct rte_event_fp_ops {
/**< PMD Tx adapter enqueue same destination function. */
event_crypto_adapter_enqueue_t ca_enqueue;
/**< PMD Crypto adapter enqueue function. */
+ event_change_profile_t change_profile;
+ /**< PMD Event switch profile function. */
uintptr_t reserved[6];
} __rte_cache_aligned;
@@ -46,6 +46,14 @@ RTE_TRACE_POINT_FP(
rte_trace_point_emit_int(op);
)
+RTE_TRACE_POINT_FP(
+ rte_eventdev_trace_change_profile,
+ RTE_TRACE_POINT_ARGS(uint8_t dev_id, uint8_t port_id, uint8_t profile),
+ rte_trace_point_emit_u8(dev_id);
+ rte_trace_point_emit_u8(port_id);
+ rte_trace_point_emit_u8(profile);
+)
+
RTE_TRACE_POINT_FP(
rte_eventdev_trace_eth_tx_adapter_enqueue,
RTE_TRACE_POINT_ARGS(uint8_t dev_id, uint8_t port_id, void *ev_table,
@@ -131,6 +131,11 @@ EXPERIMENTAL {
rte_event_eth_tx_adapter_runtime_params_init;
rte_event_eth_tx_adapter_runtime_params_set;
rte_event_timer_remaining_ticks_get;
+
+ # added in 23.11
+ rte_event_port_link_with_profile;
+ rte_event_port_unlink_with_profile;
+ rte_event_port_links_get_with_profile;
};
INTERNAL {