@@ -102,6 +102,7 @@ static const struct rte_dma_dev_ops idxd_bus_ops = {
.stats_get = idxd_stats_get,
.stats_reset = idxd_stats_reset,
.vchan_status = idxd_vchan_status,
+ .burst_capacity = idxd_burst_capacity,
};
static void *
@@ -469,6 +469,26 @@ idxd_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *info, uint32_t
return 0;
}
+uint16_t
+idxd_burst_capacity(const struct rte_dma_dev *dev, uint16_t vchan __rte_unused)
+{
+ struct idxd_dmadev *idxd = dev->dev_private;
+ uint16_t write_idx = idxd->batch_start + idxd->batch_size;
+ uint16_t used_space;
+
+ /* Check for space in the batch ring */
+ if ((idxd->batch_idx_read == 0 && idxd->batch_idx_write == idxd->max_batches) ||
+ idxd->batch_idx_write + 1 == idxd->batch_idx_read)
+ return 0;
+
+ /* For descriptors, check for wrap-around on write but not read */
+ if (idxd->ids_returned > write_idx)
+ write_idx += idxd->desc_ring_mask + 1;
+ used_space = write_idx - idxd->ids_returned;
+
+ return RTE_MIN((idxd->desc_ring_mask - used_space), idxd->max_batch_size);
+}
+
int
idxd_configure(struct rte_dma_dev *dev __rte_unused, const struct rte_dma_conf *dev_conf,
uint32_t conf_sz)
@@ -103,5 +103,6 @@ int idxd_stats_get(const struct rte_dma_dev *dev, uint16_t vchan,
int idxd_stats_reset(struct rte_dma_dev *dev, uint16_t vchan);
int idxd_vchan_status(const struct rte_dma_dev *dev, uint16_t vchan,
enum rte_dma_vchan_status *status);
+uint16_t idxd_burst_capacity(const struct rte_dma_dev *dev, uint16_t vchan);
#endif /* _IDXD_INTERNAL_H_ */
@@ -119,6 +119,7 @@ static const struct rte_dma_dev_ops idxd_pci_ops = {
.dev_start = idxd_pci_dev_start,
.dev_stop = idxd_pci_dev_stop,
.vchan_status = idxd_vchan_status,
+ .burst_capacity = idxd_burst_capacity,
};
/* each portal uses 4 x 4k pages */
@@ -232,6 +233,7 @@ init_pci_device(struct rte_pci_device *dev, struct idxd_dmadev *idxd,
idxd->u.pci = pci;
idxd->max_batches = wq_size;
+ idxd->max_batch_size = 1 << lg2_max_batch;
/* enable the device itself */
err_code = idxd_pci_dev_command(idxd, idxd_enable_dev);