When a suitable device is found during the bus scan/probe, create a dmadev
instance for each HW queue. Internal structures required for device
creation are also added.
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Signed-off-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
drivers/dma/idxd/idxd_bus.c | 19 +++++++++
drivers/dma/idxd/idxd_common.c | 72 ++++++++++++++++++++++++++++++++
drivers/dma/idxd/idxd_internal.h | 40 ++++++++++++++++++
drivers/dma/idxd/meson.build | 1 +
4 files changed, 132 insertions(+)
create mode 100644 drivers/dma/idxd/idxd_common.c
@@ -85,6 +85,18 @@ dsa_get_sysfs_path(void)
return path ? path : DSA_SYSFS_PATH;
}
+static int
+idxd_dev_close(struct rte_dma_dev *dev)
+{
+ struct idxd_dmadev *idxd = dev->data->dev_private;
+ munmap(idxd->portal, 0x1000);
+ return 0;
+}
+
+static const struct rte_dma_dev_ops idxd_bus_ops = {
+ .dev_close = idxd_dev_close,
+};
+
static void *
idxd_bus_mmap_wq(struct rte_dsa_device *dev)
{
@@ -206,6 +218,7 @@ idxd_probe_dsa(struct rte_dsa_device *dev)
return -1;
idxd.max_batch_size = ret;
idxd.qid = dev->addr.wq_id;
+ idxd.u.bus.dsa_id = dev->addr.device_id;
idxd.sva_support = 1;
idxd.portal = idxd_bus_mmap_wq(dev);
@@ -214,6 +227,12 @@ idxd_probe_dsa(struct rte_dsa_device *dev)
return -ENOENT;
}
+ ret = idxd_dmadev_create(dev->wq_name, &dev->device, &idxd, &idxd_bus_ops);
+ if (ret) {
+ IDXD_PMD_ERR("Failed to create rawdev %s", dev->wq_name);
+ return ret;
+ }
+
return 0;
}
new file mode 100644
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 Intel Corporation
+ */
+
+#include <rte_dmadev_pmd.h>
+#include <rte_malloc.h>
+#include <rte_common.h>
+
+#include "idxd_internal.h"
+
+#define IDXD_PMD_NAME_STR "dmadev_idxd"
+
+int
+idxd_dmadev_create(const char *name, struct rte_device *dev,
+ const struct idxd_dmadev *base_idxd,
+ const struct rte_dma_dev_ops *ops)
+{
+ struct idxd_dmadev *idxd = NULL;
+ struct rte_dma_dev *dmadev = NULL;
+ int ret = 0;
+
+ if (!name) {
+ IDXD_PMD_ERR("Invalid name of the device!");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Allocate device structure */
+ dmadev = rte_dma_pmd_allocate(name, dev->numa_node, sizeof(struct idxd_dmadev));
+ if (dmadev == NULL) {
+ IDXD_PMD_ERR("Unable to allocate raw device");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ dmadev->dev_ops = ops;
+ dmadev->device = dev;
+
+ idxd = dmadev->data->dev_private;
+ *idxd = *base_idxd; /* copy over the main fields already passed in */
+ idxd->dmadev = dmadev;
+
+ /* allocate batch index ring and completion ring.
+ * The +1 is because we can never fully use
+ * the ring, otherwise read == write means both full and empty.
+ */
+ idxd->batch_comp_ring = rte_zmalloc(NULL, (sizeof(idxd->batch_idx_ring[0]) +
+ sizeof(idxd->batch_comp_ring[0])) * (idxd->max_batches + 1),
+ sizeof(idxd->batch_comp_ring[0]));
+ if (idxd->batch_comp_ring == NULL) {
+ IDXD_PMD_ERR("Unable to reserve memory for batch data\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ idxd->batch_idx_ring = (void *)&idxd->batch_comp_ring[idxd->max_batches+1];
+ idxd->batch_iova = rte_mem_virt2iova(idxd->batch_comp_ring);
+
+ dmadev->fp_obj->dev_private = idxd;
+
+ idxd->dmadev->state = RTE_DMA_DEV_READY;
+
+ return 0;
+
+cleanup:
+ if (dmadev)
+ rte_dma_pmd_release(name);
+
+ return ret;
+}
+
+int idxd_pmd_logtype;
+
+RTE_LOG_REGISTER_DEFAULT(idxd_pmd_logtype, WARNING);
@@ -24,4 +24,44 @@ extern int idxd_pmd_logtype;
#define IDXD_PMD_ERR(fmt, args...) IDXD_PMD_LOG(ERR, fmt, ## args)
#define IDXD_PMD_WARN(fmt, args...) IDXD_PMD_LOG(WARNING, fmt, ## args)
+struct idxd_dmadev {
+ /* counters to track the batches */
+ unsigned short max_batches;
+ unsigned short batch_idx_read;
+ unsigned short batch_idx_write;
+
+ /* track descriptors and handles */
+ unsigned short desc_ring_mask;
+ unsigned short ids_avail; /* handles for ops completed */
+ unsigned short ids_returned; /* the read pointer for hdls/desc rings */
+ unsigned short batch_start; /* start+size == write pointer for hdls/desc */
+ unsigned short batch_size;
+
+ void *portal; /* address to write the batch descriptor */
+
+ struct idxd_completion *batch_comp_ring;
+ unsigned short *batch_idx_ring; /* store where each batch ends */
+
+ struct rte_dma_stats stats;
+
+ rte_iova_t batch_iova; /* base address of the batch comp ring */
+ rte_iova_t desc_iova; /* base address of desc ring, needed for completions */
+
+ unsigned short max_batch_size;
+
+ struct rte_dma_dev *dmadev;
+ struct rte_dma_vchan_conf qcfg;
+ uint8_t sva_support;
+ uint8_t qid;
+
+ union {
+ struct {
+ unsigned int dsa_id;
+ } bus;
+ } u;
+};
+
+int idxd_dmadev_create(const char *name, struct rte_device *dev,
+ const struct idxd_dmadev *base_idxd, const struct rte_dma_dev_ops *ops);
+
#endif /* _IDXD_INTERNAL_H_ */
@@ -8,5 +8,6 @@ endif
deps += ['bus_pci']
sources = files(
'idxd_bus.c',
+ 'idxd_common.c',
'idxd_pci.c'
)