[v5,08/16] dma/idxd: add start and stop functions for pci devices
Checks
Commit Message
Add device start/stop functions for DSA devices bound to vfio. For devices
bound to the IDXD kernel driver, these are not required since the IDXD
kernel driver takes care of this.
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Signed-off-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
doc/guides/dmadevs/idxd.rst | 3 +++
drivers/dma/idxd/idxd_pci.c | 52 +++++++++++++++++++++++++++++++++++++
2 files changed, 55 insertions(+)
Comments
On 2021/9/17 23:24, Kevin Laatz wrote:
> Add device start/stop functions for DSA devices bound to vfio. For devices
> bound to the IDXD kernel driver, these are not required since the IDXD
> kernel driver takes care of this.
>
> Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
> Signed-off-by: Kevin Laatz <kevin.laatz@intel.com>
> Reviewed-by: Conor Walsh <conor.walsh@intel.com>
> ---
> doc/guides/dmadevs/idxd.rst | 3 +++
> drivers/dma/idxd/idxd_pci.c | 52 +++++++++++++++++++++++++++++++++++++
> 2 files changed, 55 insertions(+)
>
> diff --git a/doc/guides/dmadevs/idxd.rst b/doc/guides/dmadevs/idxd.rst
> index abfa5be9ea..a603c5dd22 100644
> --- a/doc/guides/dmadevs/idxd.rst
> +++ b/doc/guides/dmadevs/idxd.rst
> @@ -150,3 +150,6 @@ The following code shows how the device is configured in
> :start-after: Setup of the dmadev device. 8<
> :end-before: >8 End of setup of the dmadev device.
> :dedent: 1
> +
> +Once configured, the device can then be made ready for use by calling the
> +``rte_dma_start()`` API.
> diff --git a/drivers/dma/idxd/idxd_pci.c b/drivers/dma/idxd/idxd_pci.c
> index 0216ab80d9..cfb64ce220 100644
> --- a/drivers/dma/idxd/idxd_pci.c
> +++ b/drivers/dma/idxd/idxd_pci.c
> @@ -59,11 +59,63 @@ idxd_is_wq_enabled(struct idxd_dmadev *idxd)
> return ((state >> WQ_STATE_SHIFT) & WQ_STATE_MASK) == 0x1;
> }
>
> +static int
> +idxd_pci_dev_stop(struct rte_dma_dev *dev)
> +{
> + struct idxd_dmadev *idxd = dev->dev_private;
> + uint8_t err_code;
> +
> + if (!idxd_is_wq_enabled(idxd)) {
> + IDXD_PMD_ERR("Work queue %d already disabled", idxd->qid);
> + return -EALREADY;
> + }
> +
> + err_code = idxd_pci_dev_command(idxd, idxd_disable_wq);
> + if (err_code || idxd_is_wq_enabled(idxd)) {
> + IDXD_PMD_ERR("Failed disabling work queue %d, error code: %#x",
> + idxd->qid, err_code);
> + return -err_code;
The err_code may zero.
> + }
> + IDXD_PMD_DEBUG("Work queue %d disabled OK", idxd->qid);
> +
> + return 0;
> +}
> +
> +static int
> +idxd_pci_dev_start(struct rte_dma_dev *dev)
> +{
> + struct idxd_dmadev *idxd = dev->dev_private;
> + uint8_t err_code;
> +
> + if (idxd_is_wq_enabled(idxd)) {
> + IDXD_PMD_WARN("WQ %d already enabled", idxd->qid);
> + return 0;
> + }
> +
> + if (idxd->desc_ring == NULL) {
> + IDXD_PMD_ERR("WQ %d has not been fully configured", idxd->qid);
> + return -EINVAL;
> + }
> +
> + err_code = idxd_pci_dev_command(idxd, idxd_enable_wq);
> + if (err_code || !idxd_is_wq_enabled(idxd)) {
> + IDXD_PMD_ERR("Failed enabling work queue %d, error code: %#x",
> + idxd->qid, err_code);
> + return err_code == 0 ? -1 : err_code;
The rte_dma_start specified that a negative number is returned for failure.
Suggestions return err_code == 0 ? -1 : -err_code;
> + }
> +
> + IDXD_PMD_DEBUG("Work queue %d enabled OK", idxd->qid);
> +
> + return 0;
> +}
> +
> static const struct rte_dma_dev_ops idxd_pci_ops = {
> .dev_dump = idxd_dump,
> .dev_configure = idxd_configure,
> .vchan_setup = idxd_vchan_setup,
> .dev_info_get = idxd_info_get,
> + .dev_start = idxd_pci_dev_start,
> + .dev_stop = idxd_pci_dev_stop,
> };
>
> /* each portal uses 4 x 4k pages */
>
@@ -150,3 +150,6 @@ The following code shows how the device is configured in
:start-after: Setup of the dmadev device. 8<
:end-before: >8 End of setup of the dmadev device.
:dedent: 1
+
+Once configured, the device can then be made ready for use by calling the
+``rte_dma_start()`` API.
@@ -59,11 +59,63 @@ idxd_is_wq_enabled(struct idxd_dmadev *idxd)
return ((state >> WQ_STATE_SHIFT) & WQ_STATE_MASK) == 0x1;
}
+static int
+idxd_pci_dev_stop(struct rte_dma_dev *dev)
+{
+ struct idxd_dmadev *idxd = dev->dev_private;
+ uint8_t err_code;
+
+ if (!idxd_is_wq_enabled(idxd)) {
+ IDXD_PMD_ERR("Work queue %d already disabled", idxd->qid);
+ return -EALREADY;
+ }
+
+ err_code = idxd_pci_dev_command(idxd, idxd_disable_wq);
+ if (err_code || idxd_is_wq_enabled(idxd)) {
+ IDXD_PMD_ERR("Failed disabling work queue %d, error code: %#x",
+ idxd->qid, err_code);
+ return -err_code;
+ }
+ IDXD_PMD_DEBUG("Work queue %d disabled OK", idxd->qid);
+
+ return 0;
+}
+
+static int
+idxd_pci_dev_start(struct rte_dma_dev *dev)
+{
+ struct idxd_dmadev *idxd = dev->dev_private;
+ uint8_t err_code;
+
+ if (idxd_is_wq_enabled(idxd)) {
+ IDXD_PMD_WARN("WQ %d already enabled", idxd->qid);
+ return 0;
+ }
+
+ if (idxd->desc_ring == NULL) {
+ IDXD_PMD_ERR("WQ %d has not been fully configured", idxd->qid);
+ return -EINVAL;
+ }
+
+ err_code = idxd_pci_dev_command(idxd, idxd_enable_wq);
+ if (err_code || !idxd_is_wq_enabled(idxd)) {
+ IDXD_PMD_ERR("Failed enabling work queue %d, error code: %#x",
+ idxd->qid, err_code);
+ return err_code == 0 ? -1 : err_code;
+ }
+
+ IDXD_PMD_DEBUG("Work queue %d enabled OK", idxd->qid);
+
+ return 0;
+}
+
static const struct rte_dma_dev_ops idxd_pci_ops = {
.dev_dump = idxd_dump,
.dev_configure = idxd_configure,
.vchan_setup = idxd_vchan_setup,
.dev_info_get = idxd_info_get,
+ .dev_start = idxd_pci_dev_start,
+ .dev_stop = idxd_pci_dev_stop,
};
/* each portal uses 4 x 4k pages */