@@ -95,6 +95,7 @@ idxd_dev_close(struct rte_dma_dev *dev)
static const struct rte_dma_dev_ops idxd_bus_ops = {
.dev_close = idxd_dev_close,
+ .dev_dump = idxd_dump,
};
static void *
@@ -10,6 +10,35 @@
#define IDXD_PMD_NAME_STR "dmadev_idxd"
+int
+idxd_dump(const struct rte_dma_dev *dev, FILE *f)
+{
+ struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
+ unsigned int i;
+
+ fprintf(f, "== IDXD Private Data ==\n");
+ fprintf(f, " Portal: %p\n", idxd->portal);
+ fprintf(f, " Config: { ring_size: %u }\n",
+ idxd->qcfg.nb_desc);
+ fprintf(f, " Batch ring (sz = %u, max_batches = %u):\n\t",
+ idxd->max_batches + 1, idxd->max_batches);
+ for (i = 0; i <= idxd->max_batches; i++) {
+ fprintf(f, " %u ", idxd->batch_idx_ring[i]);
+ if (i == idxd->batch_idx_read && i == idxd->batch_idx_write)
+ fprintf(f, "[rd ptr, wr ptr] ");
+ else if (i == idxd->batch_idx_read)
+ fprintf(f, "[rd ptr] ");
+ else if (i == idxd->batch_idx_write)
+ fprintf(f, "[wr ptr] ");
+ if (i == idxd->max_batches)
+ fprintf(f, "\n");
+ }
+
+ fprintf(f, " Curr batch: start = %u, size = %u\n", idxd->batch_start, idxd->batch_size);
+ fprintf(f, " IDS: avail = %u, returned: %u\n", idxd->ids_avail, idxd->ids_returned);
+ return 0;
+}
+
int
idxd_dmadev_create(const char *name, struct rte_device *dev,
const struct idxd_dmadev *base_idxd,
@@ -19,6 +48,10 @@ idxd_dmadev_create(const char *name, struct rte_device *dev,
struct rte_dma_dev *dmadev = NULL;
int ret = 0;
+ RTE_BUILD_BUG_ON(sizeof(struct idxd_hw_desc) != 64);
+ RTE_BUILD_BUG_ON(offsetof(struct idxd_hw_desc, size) != 32);
+ RTE_BUILD_BUG_ON(sizeof(struct idxd_completion) != 32);
+
if (!name) {
IDXD_PMD_ERR("Invalid name of the device!");
ret = -EINVAL;
@@ -5,6 +5,66 @@
#ifndef _IDXD_HW_DEFS_H_
#define _IDXD_HW_DEFS_H_
+/*
+ * Defines used in the data path for interacting with IDXD hardware.
+ */
+#define IDXD_CMD_OP_SHIFT 24
+enum rte_idxd_ops {
+ idxd_op_nop = 0,
+ idxd_op_batch,
+ idxd_op_drain,
+ idxd_op_memmove,
+ idxd_op_fill
+};
+
+#define IDXD_FLAG_FENCE (1 << 0)
+#define IDXD_FLAG_COMPLETION_ADDR_VALID (1 << 2)
+#define IDXD_FLAG_REQUEST_COMPLETION (1 << 3)
+#define IDXD_FLAG_CACHE_CONTROL (1 << 8)
+
+/**
+ * Hardware descriptor used by DSA hardware, for both bursts and
+ * for individual operations.
+ */
+struct idxd_hw_desc {
+ uint32_t pasid;
+ uint32_t op_flags;
+ rte_iova_t completion;
+
+ RTE_STD_C11
+ union {
+ rte_iova_t src; /* source address for copy ops etc. */
+ rte_iova_t desc_addr; /* descriptor pointer for batch */
+ };
+ rte_iova_t dst;
+
+ uint32_t size; /* length of data for op, or batch size */
+
+ uint16_t intr_handle; /* completion interrupt handle */
+
+ /* remaining 26 bytes are reserved */
+ uint16_t __reserved[13];
+} __rte_aligned(64);
+
+#define IDXD_COMP_STATUS_INCOMPLETE 0
+#define IDXD_COMP_STATUS_SUCCESS 1
+#define IDXD_COMP_STATUS_INVALID_OPCODE 0x10
+#define IDXD_COMP_STATUS_INVALID_SIZE 0x13
+#define IDXD_COMP_STATUS_SKIPPED 0xFF /* not official IDXD error, needed as placeholder */
+
+/**
+ * Completion record structure written back by DSA
+ */
+struct idxd_completion {
+ uint8_t status;
+ uint8_t result;
+ /* 16-bits pad here */
+ uint32_t completed_size; /* data length, or descriptors for batch */
+
+ rte_iova_t fault_address;
+ uint32_t invalid_flags;
+} __rte_aligned(32);
+
/*** Definitions for Intel(R) Data Streaming Accelerator ***/
#define IDXD_CMD_SHIFT 20
@@ -39,6 +39,8 @@ struct idxd_pci_common {
};
struct idxd_dmadev {
+ struct idxd_hw_desc *desc_ring;
+
/* counters to track the batches */
unsigned short max_batches;
unsigned short batch_idx_read;
@@ -79,5 +81,6 @@ struct idxd_dmadev {
int idxd_dmadev_create(const char *name, struct rte_device *dev,
const struct idxd_dmadev *base_idxd, const struct rte_dma_dev_ops *ops);
+int idxd_dump(const struct rte_dma_dev *dev, FILE *f);
#endif /* _IDXD_INTERNAL_H_ */
@@ -83,6 +83,7 @@ idxd_pci_dev_close(struct rte_dma_dev *dev)
static const struct rte_dma_dev_ops idxd_pci_ops = {
.dev_close = idxd_pci_dev_close,
+ .dev_dump = idxd_dump,
};
/* each portal uses 4 x 4k pages */