[v10,14/23] event/dlb: add port link
diff mbox series

Message ID 1604082458-15368-15-git-send-email-timothy.mcdaniel@intel.com
State Superseded
Delegated to: Jerin Jacob
Headers show
Series
  • Add DLB PMD
Related show

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Timothy McDaniel Oct. 30, 2020, 6:27 p.m. UTC
Add port link entry point. Directed queues are identified and created
at this stage. Their setup deferred until link-time, at which
point we know the directed port ID. Directed queue setup
will only fail if this queue is already setup or there are
no directed queues left to configure.

Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
Reviewed-by: Gage Eads <gage.eads@intel.com>
---
 drivers/event/dlb/dlb.c                  | 306 +++++++++++++++
 drivers/event/dlb/dlb_iface.c            |   9 +
 drivers/event/dlb/dlb_iface.h            |   9 +
 drivers/event/dlb/pf/base/dlb_resource.c | 641 +++++++++++++++++++++++++++++++
 drivers/event/dlb/pf/dlb_pf.c            |  69 ++++
 5 files changed, 1034 insertions(+)

Patch
diff mbox series

diff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c
index 4d91ddd..2ad195d 100644
--- a/drivers/event/dlb/dlb.c
+++ b/drivers/event/dlb/dlb.c
@@ -1532,6 +1532,311 @@  set_num_atm_inflights(const char *key __rte_unused,
 	return 0;
 }
 
+static int
+dlb_validate_port_link(struct dlb_eventdev_port *ev_port,
+		       uint8_t queue_id,
+		       bool link_exists,
+		       int index)
+{
+	struct dlb_eventdev *dlb = ev_port->dlb;
+	struct dlb_eventdev_queue *ev_queue;
+	bool port_is_dir, queue_is_dir;
+
+	if (queue_id > dlb->num_queues) {
+		DLB_LOG_ERR("queue_id %d > num queues %d\n",
+			    queue_id, dlb->num_queues);
+		rte_errno = -EINVAL;
+		return -1;
+	}
+
+	ev_queue = &dlb->ev_queues[queue_id];
+
+	if (!ev_queue->setup_done &&
+	    ev_queue->qm_queue.config_state != DLB_PREV_CONFIGURED) {
+		DLB_LOG_ERR("setup not done and not previously configured\n");
+		rte_errno = -EINVAL;
+		return -1;
+	}
+
+	port_is_dir = ev_port->qm_port.is_directed;
+	queue_is_dir = ev_queue->qm_queue.is_directed;
+
+	if (port_is_dir != queue_is_dir) {
+		DLB_LOG_ERR("%s queue %u can't link to %s port %u\n",
+			    queue_is_dir ? "DIR" : "LDB", ev_queue->id,
+			    port_is_dir ? "DIR" : "LDB", ev_port->id);
+
+		rte_errno = -EINVAL;
+		return -1;
+	}
+
+	/* Check if there is space for the requested link */
+	if (!link_exists && index == -1) {
+		DLB_LOG_ERR("no space for new link\n");
+		rte_errno = -ENOSPC;
+		return -1;
+	}
+
+	/* Check if the directed port is already linked */
+	if (ev_port->qm_port.is_directed && ev_port->num_links > 0 &&
+	    !link_exists) {
+		DLB_LOG_ERR("Can't link DIR port %d to >1 queues\n",
+			    ev_port->id);
+		rte_errno = -EINVAL;
+		return -1;
+	}
+
+	/* Check if the directed queue is already linked */
+	if (ev_queue->qm_queue.is_directed && ev_queue->num_links > 0 &&
+	    !link_exists) {
+		DLB_LOG_ERR("Can't link DIR queue %d to >1 ports\n",
+			    ev_queue->id);
+		rte_errno = -EINVAL;
+		return -1;
+	}
+
+	return 0;
+}
+
+static int16_t
+dlb_hw_map_ldb_qid_to_port(struct dlb_hw_dev *handle,
+			   uint32_t qm_port_id,
+			   uint16_t qm_qid,
+			   uint8_t priority)
+{
+	struct dlb_map_qid_args cfg;
+	struct dlb_cmd_response response;
+	int32_t ret;
+
+	if (handle == NULL)
+		return -EINVAL;
+
+	/* Build message */
+	cfg.response = (uintptr_t)&response;
+	cfg.port_id = qm_port_id;
+	cfg.qid = qm_qid;
+	cfg.priority = EV_TO_DLB_PRIO(priority);
+
+	ret = dlb_iface_map_qid(handle, &cfg);
+	if (ret < 0) {
+		DLB_LOG_ERR("dlb: map qid error, ret=%d (driver status: %s)\n",
+			    ret, dlb_error_strings[response.status]);
+		DLB_LOG_ERR("dlb: device_id=%d grp=%d, qm_port=%d, qm_qid=%d prio=%d\n",
+			    handle->device_id,
+			    handle->domain_id, cfg.port_id,
+			    cfg.qid,
+			    cfg.priority);
+	} else {
+		DLB_LOG_DBG("dlb: mapped queue %d to qm_port %d\n",
+			    qm_qid, qm_port_id);
+	}
+
+	return ret;
+}
+
+static int
+dlb_event_queue_join_ldb(struct dlb_eventdev *dlb,
+			 struct dlb_eventdev_port *ev_port,
+			 struct dlb_eventdev_queue *ev_queue,
+			 uint8_t priority)
+{
+	int first_avail = -1;
+	int ret, i;
+
+	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
+		if (ev_port->link[i].valid) {
+			if (ev_port->link[i].queue_id == ev_queue->id &&
+			    ev_port->link[i].priority == priority) {
+				if (ev_port->link[i].mapped)
+					return 0; /* already mapped */
+				first_avail = i;
+			}
+		} else {
+			if (first_avail == -1)
+				first_avail = i;
+		}
+	}
+	if (first_avail == -1) {
+		DLB_LOG_ERR("dlb: qm_port %d has no available QID slots.\n",
+			    ev_port->qm_port.id);
+		return -EINVAL;
+	}
+
+	ret = dlb_hw_map_ldb_qid_to_port(&dlb->qm_instance,
+					 ev_port->qm_port.id,
+					 ev_queue->qm_queue.id,
+					 priority);
+
+	if (!ret)
+		ev_port->link[first_avail].mapped = true;
+
+	return ret;
+}
+
+static int32_t
+dlb_hw_create_dir_queue(struct dlb_eventdev *dlb, int32_t qm_port_id)
+{
+	struct dlb_hw_dev *handle = &dlb->qm_instance;
+	struct dlb_create_dir_queue_args cfg;
+	struct dlb_cmd_response response;
+	int32_t ret;
+
+	cfg.response = (uintptr_t)&response;
+
+	/* The directed port is always configured before its queue */
+	cfg.port_id = qm_port_id;
+
+	ret = dlb_iface_dir_queue_create(handle, &cfg);
+	if (ret < 0) {
+		DLB_LOG_ERR("dlb: create DIR event queue error, ret=%d (driver status: %s)\n",
+			    ret, dlb_error_strings[response.status]);
+		return -EINVAL;
+	}
+
+	return response.id;
+}
+
+static int
+dlb_eventdev_dir_queue_setup(struct dlb_eventdev *dlb,
+			     struct dlb_eventdev_queue *ev_queue,
+			     struct dlb_eventdev_port *ev_port)
+{
+	int32_t qm_qid;
+
+	qm_qid = dlb_hw_create_dir_queue(dlb, ev_port->qm_port.id);
+
+	if (qm_qid < 0) {
+		DLB_LOG_ERR("Failed to create the DIR queue\n");
+		return qm_qid;
+	}
+
+	dlb->qm_dir_to_ev_queue_id[qm_qid] = ev_queue->id;
+
+	ev_queue->qm_queue.id = qm_qid;
+
+	return 0;
+}
+
+static int
+dlb_do_port_link(struct rte_eventdev *dev,
+		 struct dlb_eventdev_queue *ev_queue,
+		 struct dlb_eventdev_port *ev_port,
+		 uint8_t prio)
+{
+	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+	int err;
+
+	/* Don't link until start time. */
+	if (dlb->run_state == DLB_RUN_STATE_STOPPED)
+		return 0;
+
+	if (ev_queue->qm_queue.is_directed)
+		err = dlb_eventdev_dir_queue_setup(dlb, ev_queue, ev_port);
+	else
+		err = dlb_event_queue_join_ldb(dlb, ev_port, ev_queue, prio);
+
+	if (err) {
+		DLB_LOG_ERR("port link failure for %s ev_q %d, ev_port %d\n",
+			    ev_queue->qm_queue.is_directed ? "DIR" : "LDB",
+			    ev_queue->id, ev_port->id);
+
+		rte_errno = err;
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+dlb_eventdev_port_link(struct rte_eventdev *dev, void *event_port,
+		       const uint8_t queues[], const uint8_t priorities[],
+		       uint16_t nb_links)
+
+{
+	struct dlb_eventdev_port *ev_port = event_port;
+	struct dlb_eventdev *dlb;
+	int i, j;
+
+	RTE_SET_USED(dev);
+
+	if (ev_port == NULL) {
+		DLB_LOG_ERR("dlb: evport not setup\n");
+		rte_errno = -EINVAL;
+		return 0;
+	}
+
+	if (!ev_port->setup_done &&
+	    ev_port->qm_port.config_state != DLB_PREV_CONFIGURED) {
+		DLB_LOG_ERR("dlb: evport not setup\n");
+		rte_errno = -EINVAL;
+		return 0;
+	}
+
+	/* Note: rte_event_port_link() ensures the PMD won't receive a NULL
+	 * queues pointer.
+	 */
+	if (nb_links == 0) {
+		DLB_LOG_DBG("dlb: nb_links is 0\n");
+		return 0; /* Ignore and return success */
+	}
+
+	dlb = ev_port->dlb;
+
+	DLB_LOG_DBG("Linking %u queues to %s port %d\n",
+		    nb_links,
+		    ev_port->qm_port.is_directed ? "DIR" : "LDB",
+		    ev_port->id);
+
+	for (i = 0; i < nb_links; i++) {
+		struct dlb_eventdev_queue *ev_queue;
+		uint8_t queue_id, prio;
+		bool found = false;
+		int index = -1;
+
+		queue_id = queues[i];
+		prio = priorities[i];
+
+		/* Check if the link already exists. */
+		for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++)
+			if (ev_port->link[j].valid) {
+				if (ev_port->link[j].queue_id == queue_id) {
+					found = true;
+					index = j;
+					break;
+				}
+			} else {
+				if (index == -1)
+					index = j;
+			}
+
+		/* could not link */
+		if (index == -1)
+			break;
+
+		/* Check if already linked at the requested priority */
+		if (found && ev_port->link[j].priority == prio)
+			continue;
+
+		if (dlb_validate_port_link(ev_port, queue_id, found, index))
+			break; /* return index of offending queue */
+
+		ev_queue = &dlb->ev_queues[queue_id];
+
+		if (dlb_do_port_link(dev, ev_queue, ev_port, prio))
+			break; /* return index of offending queue */
+
+		ev_queue->num_links++;
+
+		ev_port->link[index].queue_id = queue_id;
+		ev_port->link[index].priority = prio;
+		ev_port->link[index].valid = true;
+		/* Entry already exists?  If so, then must be prio change */
+		if (!found)
+			ev_port->num_links++;
+	}
+	return i;
+}
+
 void
 dlb_entry_points_init(struct rte_eventdev *dev)
 {
@@ -1542,6 +1847,7 @@  dlb_entry_points_init(struct rte_eventdev *dev)
 		.port_def_conf    = dlb_eventdev_port_default_conf_get,
 		.queue_setup      = dlb_eventdev_queue_setup,
 		.port_setup       = dlb_eventdev_port_setup,
+		.port_link        = dlb_eventdev_port_link,
 		.dump             = dlb_eventdev_dump,
 		.xstats_get       = dlb_eventdev_xstats_get,
 		.xstats_get_names = dlb_eventdev_xstats_get_names,
diff --git a/drivers/event/dlb/dlb_iface.c b/drivers/event/dlb/dlb_iface.c
index fbbf9d7..aaf4506 100644
--- a/drivers/event/dlb/dlb_iface.c
+++ b/drivers/event/dlb/dlb_iface.c
@@ -47,6 +47,15 @@  int (*dlb_iface_dir_port_create)(struct dlb_hw_dev *handle,
 				 struct dlb_create_dir_port_args *cfg,
 				 enum dlb_cq_poll_modes poll_mode);
 
+int (*dlb_iface_map_qid)(struct dlb_hw_dev *handle,
+			 struct dlb_map_qid_args *cfg);
+
+int (*dlb_iface_unmap_qid)(struct dlb_hw_dev *handle,
+			   struct dlb_unmap_qid_args *cfg);
+
+int (*dlb_iface_pending_port_unmaps)(struct dlb_hw_dev *handle,
+				     struct dlb_pending_port_unmaps_args *args);
+
 int (*dlb_iface_get_cq_poll_mode)(struct dlb_hw_dev *handle,
 				  enum dlb_cq_poll_modes *mode);
 
diff --git a/drivers/event/dlb/dlb_iface.h b/drivers/event/dlb/dlb_iface.h
index d578185..c0f5f2e 100644
--- a/drivers/event/dlb/dlb_iface.h
+++ b/drivers/event/dlb/dlb_iface.h
@@ -49,6 +49,15 @@  extern int (*dlb_iface_dir_port_create)(struct dlb_hw_dev *handle,
 extern int (*dlb_iface_ldb_queue_create)(struct dlb_hw_dev *handle,
 				  struct dlb_create_ldb_queue_args *cfg);
 
+extern int (*dlb_iface_map_qid)(struct dlb_hw_dev *handle,
+			 struct dlb_map_qid_args *cfg);
+
+extern int (*dlb_iface_unmap_qid)(struct dlb_hw_dev *handle,
+				  struct dlb_unmap_qid_args *cfg);
+
+extern int (*dlb_iface_pending_port_unmaps)(struct dlb_hw_dev *handle,
+				struct dlb_pending_port_unmaps_args *args);
+
 extern int (*dlb_iface_get_cq_poll_mode)(struct dlb_hw_dev *handle,
 					 enum dlb_cq_poll_modes *mode);
 
diff --git a/drivers/event/dlb/pf/base/dlb_resource.c b/drivers/event/dlb/pf/base/dlb_resource.c
index 799cb2b..2d0b1d0 100644
--- a/drivers/event/dlb/pf/base/dlb_resource.c
+++ b/drivers/event/dlb/pf/base/dlb_resource.c
@@ -6030,3 +6030,644 @@  int dlb_hw_create_dir_port(struct dlb_hw *hw,
 	return 0;
 }
 
+static struct dlb_ldb_port *
+dlb_get_domain_used_ldb_port(u32 id, struct dlb_domain *domain)
+{
+	struct dlb_list_entry *iter;
+	struct dlb_ldb_port *port;
+	RTE_SET_USED(iter);
+
+	if (id >= DLB_MAX_NUM_LDB_PORTS)
+		return NULL;
+
+	DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)
+		if (port->id == id)
+			return port;
+
+	DLB_DOM_LIST_FOR(domain->avail_ldb_ports, port, iter)
+		if (port->id == id)
+			return port;
+
+	return NULL;
+}
+
+static void
+dlb_log_pending_port_unmaps_args(struct dlb_hw *hw,
+				 struct dlb_pending_port_unmaps_args *args)
+{
+	DLB_HW_INFO(hw, "DLB pending port unmaps arguments:\n");
+	DLB_HW_INFO(hw, "\tPort ID: %d\n", args->port_id);
+}
+
+int dlb_hw_pending_port_unmaps(struct dlb_hw *hw,
+			       u32 domain_id,
+			       struct dlb_pending_port_unmaps_args *args,
+			       struct dlb_cmd_response *resp)
+{
+	struct dlb_domain *domain;
+	struct dlb_ldb_port *port;
+
+	dlb_log_pending_port_unmaps_args(hw, args);
+
+	domain = dlb_get_domain_from_id(hw, domain_id);
+
+	if (domain == NULL) {
+		resp->status = DLB_ST_INVALID_DOMAIN_ID;
+		return -EINVAL;
+	}
+
+	port = dlb_get_domain_used_ldb_port(args->port_id, domain);
+	if (port == NULL || !port->configured) {
+		resp->status = DLB_ST_INVALID_PORT_ID;
+		return -EINVAL;
+	}
+
+	resp->id = port->num_pending_removals;
+
+	return 0;
+}
+
+static void dlb_log_unmap_qid(struct dlb_hw *hw,
+			      u32 domain_id,
+			      struct dlb_unmap_qid_args *args)
+{
+	DLB_HW_INFO(hw, "DLB unmap QID arguments:\n");
+	DLB_HW_INFO(hw, "\tDomain ID: %d\n",
+		    domain_id);
+	DLB_HW_INFO(hw, "\tPort ID:   %d\n",
+		    args->port_id);
+	DLB_HW_INFO(hw, "\tQueue ID:  %d\n",
+		    args->qid);
+	if (args->qid < DLB_MAX_NUM_LDB_QUEUES)
+		DLB_HW_INFO(hw, "\tQueue's num mappings:  %d\n",
+			    hw->rsrcs.ldb_queues[args->qid].num_mappings);
+}
+
+static struct dlb_ldb_queue *dlb_get_domain_ldb_queue(u32 id,
+						      struct dlb_domain *domain)
+{
+	struct dlb_list_entry *iter;
+	struct dlb_ldb_queue *queue;
+	RTE_SET_USED(iter);
+
+	if (id >= DLB_MAX_NUM_LDB_QUEUES)
+		return NULL;
+
+	DLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter)
+		if (queue->id == id)
+			return queue;
+
+	return NULL;
+}
+
+static bool
+dlb_port_find_slot_with_pending_map_queue(struct dlb_ldb_port *port,
+					  struct dlb_ldb_queue *queue,
+					  int *slot)
+{
+	int i;
+
+	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
+		struct dlb_ldb_port_qid_map *map = &port->qid_map[i];
+
+		if (map->state == DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP &&
+		    map->pending_qid == queue->id)
+			break;
+	}
+
+	*slot = i;
+
+	return (i < DLB_MAX_NUM_QIDS_PER_LDB_CQ);
+}
+
+static int dlb_verify_unmap_qid_args(struct dlb_hw *hw,
+				     u32 domain_id,
+				     struct dlb_unmap_qid_args *args,
+				     struct dlb_cmd_response *resp)
+{
+	enum dlb_qid_map_state state;
+	struct dlb_domain *domain;
+	struct dlb_ldb_port *port;
+	struct dlb_ldb_queue *queue;
+	int slot;
+	int id;
+
+	domain = dlb_get_domain_from_id(hw, domain_id);
+
+	if (domain == NULL) {
+		resp->status = DLB_ST_INVALID_DOMAIN_ID;
+		return -1;
+	}
+
+	if (!domain->configured) {
+		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
+		return -1;
+	}
+
+	id = args->port_id;
+
+	port = dlb_get_domain_used_ldb_port(id, domain);
+
+	if (port == NULL || !port->configured) {
+		resp->status = DLB_ST_INVALID_PORT_ID;
+		return -1;
+	}
+
+	if (port->domain_id != domain->id) {
+		resp->status = DLB_ST_INVALID_PORT_ID;
+		return -1;
+	}
+
+	queue = dlb_get_domain_ldb_queue(args->qid, domain);
+
+	if (queue == NULL || !queue->configured) {
+		DLB_HW_ERR(hw, "[%s()] Can't unmap unconfigured queue %d\n",
+			   __func__, args->qid);
+		resp->status = DLB_ST_INVALID_QID;
+		return -1;
+	}
+
+	/* Verify that the port has the queue mapped. From the application's
+	 * perspective a queue is mapped if it is actually mapped, the map is
+	 * in progress, or the map is blocked pending an unmap.
+	 */
+	state = DLB_QUEUE_MAPPED;
+	if (dlb_port_find_slot_queue(port, state, queue, &slot))
+		return 0;
+
+	state = DLB_QUEUE_MAP_IN_PROGRESS;
+	if (dlb_port_find_slot_queue(port, state, queue, &slot))
+		return 0;
+
+	if (dlb_port_find_slot_with_pending_map_queue(port, queue, &slot))
+		return 0;
+
+	resp->status = DLB_ST_INVALID_QID;
+	return -1;
+}
+
+int dlb_hw_unmap_qid(struct dlb_hw *hw,
+		     u32 domain_id,
+		     struct dlb_unmap_qid_args *args,
+		     struct dlb_cmd_response *resp)
+{
+	enum dlb_qid_map_state state;
+	struct dlb_ldb_queue *queue;
+	struct dlb_ldb_port *port;
+	struct dlb_domain *domain;
+	bool unmap_complete;
+	int i, ret, id;
+
+	dlb_log_unmap_qid(hw, domain_id, args);
+
+	/* Verify that hardware resources are available before attempting to
+	 * satisfy the request. This simplifies the error unwinding code.
+	 */
+	if (dlb_verify_unmap_qid_args(hw, domain_id, args, resp))
+		return -EINVAL;
+
+	domain = dlb_get_domain_from_id(hw, domain_id);
+	if (domain == NULL) {
+		DLB_HW_ERR(hw,
+			   "[%s():%d] Internal error: domain not found\n",
+			   __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	id = args->port_id;
+
+	port = dlb_get_domain_used_ldb_port(id, domain);
+	if (port == NULL) {
+		DLB_HW_ERR(hw,
+			   "[%s():%d] Internal error: port not found\n",
+			   __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	queue = dlb_get_domain_ldb_queue(args->qid, domain);
+	if (queue == NULL) {
+		DLB_HW_ERR(hw,
+			   "[%s():%d] Internal error: queue not found\n",
+			   __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	/* If the queue hasn't been mapped yet, we need to update the slot's
+	 * state and re-enable the queue's inflights.
+	 */
+	state = DLB_QUEUE_MAP_IN_PROGRESS;
+	if (dlb_port_find_slot_queue(port, state, queue, &i)) {
+		if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
+			DLB_HW_ERR(hw,
+				   "[%s():%d] Internal error: port slot tracking failed\n",
+				   __func__, __LINE__);
+			return -EFAULT;
+		}
+
+		/* Since the in-progress map was aborted, re-enable the QID's
+		 * inflights.
+		 */
+		if (queue->num_pending_additions == 0)
+			dlb_ldb_queue_set_inflight_limit(hw, queue);
+
+		state = DLB_QUEUE_UNMAPPED;
+		ret = dlb_port_slot_state_transition(hw, port, queue, i, state);
+		if (ret)
+			return ret;
+
+		goto unmap_qid_done;
+	}
+
+	/* If the queue mapping is on hold pending an unmap, we simply need to
+	 * update the slot's state.
+	 */
+	if (dlb_port_find_slot_with_pending_map_queue(port, queue, &i)) {
+		if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
+			DLB_HW_ERR(hw,
+				   "[%s():%d] Internal error: port slot tracking failed\n",
+				   __func__, __LINE__);
+			return -EFAULT;
+		}
+
+		state = DLB_QUEUE_UNMAP_IN_PROGRESS;
+		ret = dlb_port_slot_state_transition(hw, port, queue, i, state);
+		if (ret)
+			return ret;
+
+		goto unmap_qid_done;
+	}
+
+	state = DLB_QUEUE_MAPPED;
+	if (!dlb_port_find_slot_queue(port, state, queue, &i)) {
+		DLB_HW_ERR(hw,
+			   "[%s():%d] Internal error: no available CQ slots\n",
+			   __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
+		DLB_HW_ERR(hw,
+			   "[%s():%d] Internal error: port slot tracking failed\n",
+			   __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	/* QID->CQ mapping removal is an asynchronous procedure. It requires
+	 * stopping the DLB from scheduling this CQ, draining all inflights
+	 * from the CQ, then unmapping the queue from the CQ. This function
+	 * simply marks the port as needing the queue unmapped, and (if
+	 * necessary) starts the unmapping worker thread.
+	 */
+	dlb_ldb_port_cq_disable(hw, port);
+
+	state = DLB_QUEUE_UNMAP_IN_PROGRESS;
+	ret = dlb_port_slot_state_transition(hw, port, queue, i, state);
+	if (ret)
+		return ret;
+
+	/* Attempt to finish the unmapping now, in case the port has no
+	 * outstanding inflights. If that's not the case, this will fail and
+	 * the unmapping will be completed at a later time.
+	 */
+	unmap_complete = dlb_domain_finish_unmap_port(hw, domain, port);
+
+	/* If the unmapping couldn't complete immediately, launch the worker
+	 * thread (if it isn't already launched) to finish it later.
+	 */
+	if (!unmap_complete && !os_worker_active(hw))
+		os_schedule_work(hw);
+
+unmap_qid_done:
+	resp->status = 0;
+
+	return 0;
+}
+
+static void dlb_log_map_qid(struct dlb_hw *hw,
+			    u32 domain_id,
+			    struct dlb_map_qid_args *args)
+{
+	DLB_HW_INFO(hw, "DLB map QID arguments:\n");
+	DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
+	DLB_HW_INFO(hw, "\tPort ID:   %d\n", args->port_id);
+	DLB_HW_INFO(hw, "\tQueue ID:  %d\n", args->qid);
+	DLB_HW_INFO(hw, "\tPriority:  %d\n", args->priority);
+}
+
+static int dlb_verify_map_qid_args(struct dlb_hw *hw,
+				   u32 domain_id,
+				   struct dlb_map_qid_args *args,
+				   struct dlb_cmd_response *resp)
+{
+	struct dlb_domain *domain;
+	struct dlb_ldb_port *port;
+	struct dlb_ldb_queue *queue;
+	int id;
+
+	domain = dlb_get_domain_from_id(hw, domain_id);
+
+	if (domain == NULL) {
+		resp->status = DLB_ST_INVALID_DOMAIN_ID;
+		return -1;
+	}
+
+	if (!domain->configured) {
+		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
+		return -1;
+	}
+
+	id = args->port_id;
+
+	port = dlb_get_domain_used_ldb_port(id, domain);
+
+	if (port  == NULL || !port->configured) {
+		resp->status = DLB_ST_INVALID_PORT_ID;
+		return -1;
+	}
+
+	if (args->priority >= DLB_QID_PRIORITIES) {
+		resp->status = DLB_ST_INVALID_PRIORITY;
+		return -1;
+	}
+
+	queue = dlb_get_domain_ldb_queue(args->qid, domain);
+
+	if (queue  == NULL || !queue->configured) {
+		resp->status = DLB_ST_INVALID_QID;
+		return -1;
+	}
+
+	if (queue->domain_id != domain->id) {
+		resp->status = DLB_ST_INVALID_QID;
+		return -1;
+	}
+
+	if (port->domain_id != domain->id) {
+		resp->status = DLB_ST_INVALID_PORT_ID;
+		return -1;
+	}
+
+	return 0;
+}
+
+static int dlb_verify_map_qid_slot_available(struct dlb_ldb_port *port,
+					     struct dlb_ldb_queue *queue,
+					     struct dlb_cmd_response *resp)
+{
+	enum dlb_qid_map_state state;
+	int i;
+
+	/* Unused slot available? */
+	if (port->num_mappings < DLB_MAX_NUM_QIDS_PER_LDB_CQ)
+		return 0;
+
+	/* If the queue is already mapped (from the application's perspective),
+	 * this is simply a priority update.
+	 */
+	state = DLB_QUEUE_MAPPED;
+	if (dlb_port_find_slot_queue(port, state, queue, &i))
+		return 0;
+
+	state = DLB_QUEUE_MAP_IN_PROGRESS;
+	if (dlb_port_find_slot_queue(port, state, queue, &i))
+		return 0;
+
+	if (dlb_port_find_slot_with_pending_map_queue(port, queue, &i))
+		return 0;
+
+	/* If the slot contains an unmap in progress, it's considered
+	 * available.
+	 */
+	state = DLB_QUEUE_UNMAP_IN_PROGRESS;
+	if (dlb_port_find_slot(port, state, &i))
+		return 0;
+
+	state = DLB_QUEUE_UNMAPPED;
+	if (dlb_port_find_slot(port, state, &i))
+		return 0;
+
+	resp->status = DLB_ST_NO_QID_SLOTS_AVAILABLE;
+	return -EINVAL;
+}
+
+static void dlb_ldb_port_change_qid_priority(struct dlb_hw *hw,
+					     struct dlb_ldb_port *port,
+					     int slot,
+					     struct dlb_map_qid_args *args)
+{
+	union dlb_lsp_cq2priov r0;
+
+	/* Read-modify-write the priority and valid bit register */
+	r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ2PRIOV(port->id));
+
+	r0.field.v |= 1 << slot;
+	r0.field.prio |= (args->priority & 0x7) << slot * 3;
+
+	DLB_CSR_WR(hw, DLB_LSP_CQ2PRIOV(port->id), r0.val);
+
+	dlb_flush_csr(hw);
+
+	port->qid_map[slot].priority = args->priority;
+}
+
+int dlb_hw_map_qid(struct dlb_hw *hw,
+		   u32 domain_id,
+		   struct dlb_map_qid_args *args,
+		   struct dlb_cmd_response *resp)
+{
+	enum dlb_qid_map_state state;
+	struct dlb_ldb_queue *queue;
+	struct dlb_ldb_port *port;
+	struct dlb_domain *domain;
+	int ret, i, id;
+	u8 prio;
+
+	dlb_log_map_qid(hw, domain_id, args);
+
+	/* Verify that hardware resources are available before attempting to
+	 * satisfy the request. This simplifies the error unwinding code.
+	 */
+	if (dlb_verify_map_qid_args(hw, domain_id, args, resp))
+		return -EINVAL;
+
+	prio = args->priority;
+
+	domain = dlb_get_domain_from_id(hw, domain_id);
+	if (domain == NULL) {
+		DLB_HW_ERR(hw,
+			   "[%s():%d] Internal error: domain not found\n",
+			   __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	id = args->port_id;
+
+	port = dlb_get_domain_used_ldb_port(id, domain);
+	if (port == NULL) {
+		DLB_HW_ERR(hw,
+			   "[%s():%d] Internal error: port not found\n",
+			   __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	queue = dlb_get_domain_ldb_queue(args->qid, domain);
+	if (queue == NULL) {
+		DLB_HW_ERR(hw,
+			   "[%s():%d] Internal error: queue not found\n",
+			   __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	/* If there are any outstanding detach operations for this port,
+	 * attempt to complete them. This may be necessary to free up a QID
+	 * slot for this requested mapping.
+	 */
+	if (port->num_pending_removals)
+		dlb_domain_finish_unmap_port(hw, domain, port);
+
+	ret = dlb_verify_map_qid_slot_available(port, queue, resp);
+	if (ret)
+		return ret;
+
+	/* Hardware requires disabling the CQ before mapping QIDs. */
+	if (port->enabled)
+		dlb_ldb_port_cq_disable(hw, port);
+
+	/* If this is only a priority change, don't perform the full QID->CQ
+	 * mapping procedure
+	 */
+	state = DLB_QUEUE_MAPPED;
+	if (dlb_port_find_slot_queue(port, state, queue, &i)) {
+		if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
+			DLB_HW_ERR(hw,
+				   "[%s():%d] Internal error: port slot tracking failed\n",
+				   __func__, __LINE__);
+			return -EFAULT;
+		}
+
+		if (prio != port->qid_map[i].priority) {
+			dlb_ldb_port_change_qid_priority(hw, port, i, args);
+			DLB_HW_INFO(hw, "DLB map: priority change only\n");
+		}
+
+		state = DLB_QUEUE_MAPPED;
+		ret = dlb_port_slot_state_transition(hw, port, queue, i, state);
+		if (ret)
+			return ret;
+
+		goto map_qid_done;
+	}
+
+	state = DLB_QUEUE_UNMAP_IN_PROGRESS;
+	if (dlb_port_find_slot_queue(port, state, queue, &i)) {
+		if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
+			DLB_HW_ERR(hw,
+				   "[%s():%d] Internal error: port slot tracking failed\n",
+				   __func__, __LINE__);
+			return -EFAULT;
+		}
+
+		if (prio != port->qid_map[i].priority) {
+			dlb_ldb_port_change_qid_priority(hw, port, i, args);
+			DLB_HW_INFO(hw, "DLB map: priority change only\n");
+		}
+
+		state = DLB_QUEUE_MAPPED;
+		ret = dlb_port_slot_state_transition(hw, port, queue, i, state);
+		if (ret)
+			return ret;
+
+		goto map_qid_done;
+	}
+
+	/* If this is a priority change on an in-progress mapping, don't
+	 * perform the full QID->CQ mapping procedure.
+	 */
+	state = DLB_QUEUE_MAP_IN_PROGRESS;
+	if (dlb_port_find_slot_queue(port, state, queue, &i)) {
+		if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
+			DLB_HW_ERR(hw,
+				   "[%s():%d] Internal error: port slot tracking failed\n",
+				   __func__, __LINE__);
+			return -EFAULT;
+		}
+
+		port->qid_map[i].priority = prio;
+
+		DLB_HW_INFO(hw, "DLB map: priority change only\n");
+
+		goto map_qid_done;
+	}
+
+	/* If this is a priority change on a pending mapping, update the
+	 * pending priority
+	 */
+	if (dlb_port_find_slot_with_pending_map_queue(port, queue, &i)) {
+		if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
+			DLB_HW_ERR(hw,
+				   "[%s():%d] Internal error: port slot tracking failed\n",
+				   __func__, __LINE__);
+			return -EFAULT;
+		}
+
+		port->qid_map[i].pending_priority = prio;
+
+		DLB_HW_INFO(hw, "DLB map: priority change only\n");
+
+		goto map_qid_done;
+	}
+
+	/* If all the CQ's slots are in use, then there's an unmap in progress
+	 * (guaranteed by dlb_verify_map_qid_slot_available()), so add this
+	 * mapping to pending_map and return. When the removal is completed for
+	 * the slot's current occupant, this mapping will be performed.
+	 */
+	if (!dlb_port_find_slot(port, DLB_QUEUE_UNMAPPED, &i)) {
+		if (dlb_port_find_slot(port, DLB_QUEUE_UNMAP_IN_PROGRESS, &i)) {
+			enum dlb_qid_map_state state;
+
+			if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
+				DLB_HW_ERR(hw,
+					   "[%s():%d] Internal error: port slot tracking failed\n",
+					   __func__, __LINE__);
+				return -EFAULT;
+			}
+
+			port->qid_map[i].pending_qid = queue->id;
+			port->qid_map[i].pending_priority = prio;
+
+			state = DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP;
+
+			ret = dlb_port_slot_state_transition(hw, port, queue,
+							     i, state);
+			if (ret)
+				return ret;
+
+			DLB_HW_INFO(hw, "DLB map: map pending removal\n");
+
+			goto map_qid_done;
+		}
+	}
+
+	/* If the domain has started, a special "dynamic" CQ->queue mapping
+	 * procedure is required in order to safely update the CQ<->QID tables.
+	 * The "static" procedure cannot be used when traffic is flowing,
+	 * because the CQ<->QID tables cannot be updated atomically and the
+	 * scheduler won't see the new mapping unless the queue's if_status
+	 * changes, which isn't guaranteed.
+	 */
+	ret = dlb_ldb_port_map_qid(hw, domain, port, queue, prio);
+
+	/* If ret is less than zero, it's due to an internal error */
+	if (ret < 0)
+		return ret;
+
+map_qid_done:
+	if (port->enabled)
+		dlb_ldb_port_cq_enable(hw, port);
+
+	resp->status = 0;
+
+	return 0;
+}
+
diff --git a/drivers/event/dlb/pf/dlb_pf.c b/drivers/event/dlb/pf/dlb_pf.c
index 5e14271..fed6719 100644
--- a/drivers/event/dlb/pf/dlb_pf.c
+++ b/drivers/event/dlb/pf/dlb_pf.c
@@ -482,6 +482,72 @@  dlb_pf_get_sn_occupancy(struct dlb_hw_dev *handle,
 	return ret;
 }
 
+static int
+dlb_pf_pending_port_unmaps(struct dlb_hw_dev *handle,
+			   struct dlb_pending_port_unmaps_args *args)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+	struct dlb_cmd_response response = {0};
+	int ret;
+
+	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+	ret = dlb_hw_pending_port_unmaps(&dlb_dev->hw,
+					 handle->domain_id,
+					 args,
+					 &response);
+
+	*(struct dlb_cmd_response *)args->response = response;
+
+	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
+static int
+dlb_pf_map_qid(struct dlb_hw_dev *handle,
+	       struct dlb_map_qid_args *cfg)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+	struct dlb_cmd_response response = {0};
+	int ret;
+
+	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+	ret = dlb_hw_map_qid(&dlb_dev->hw,
+			     handle->domain_id,
+			     cfg,
+			     &response);
+
+	*(struct dlb_cmd_response *)cfg->response = response;
+
+	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
+static int
+dlb_pf_unmap_qid(struct dlb_hw_dev *handle,
+		 struct dlb_unmap_qid_args *cfg)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+	struct dlb_cmd_response response = {0};
+	int ret;
+
+	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+	ret = dlb_hw_unmap_qid(&dlb_dev->hw,
+			       handle->domain_id,
+			       cfg,
+			       &response);
+
+	*(struct dlb_cmd_response *)cfg->response = response;
+
+	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
 static void
 dlb_pf_iface_fn_ptrs_init(void)
 {
@@ -497,6 +563,9 @@  dlb_pf_iface_fn_ptrs_init(void)
 	dlb_iface_dir_queue_create = dlb_pf_dir_queue_create;
 	dlb_iface_ldb_port_create = dlb_pf_ldb_port_create;
 	dlb_iface_dir_port_create = dlb_pf_dir_port_create;
+	dlb_iface_map_qid = dlb_pf_map_qid;
+	dlb_iface_unmap_qid = dlb_pf_unmap_qid;
+	dlb_iface_pending_port_unmaps = dlb_pf_pending_port_unmaps;
 	dlb_iface_get_cq_poll_mode = dlb_pf_get_cq_poll_mode;
 	dlb_iface_get_sn_allocation = dlb_pf_get_sn_allocation;
 	dlb_iface_set_sn_allocation = dlb_pf_set_sn_allocation;