[v22,2/5] dmadev: add control plane function support

Message ID 20210916034145.51561-3-fengchengwen@huawei.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series support dmadev |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Chengwen Feng Sept. 16, 2021, 3:41 a.m. UTC
  This patch add control plane functions for dmadev.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 doc/guides/prog_guide/dmadev.rst       |  41 +++
 doc/guides/rel_notes/release_21_11.rst |   1 +
 lib/dmadev/rte_dmadev.c                | 360 +++++++++++++++++++
 lib/dmadev/rte_dmadev.h                | 479 +++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h           |  60 ++++
 lib/dmadev/version.map                 |   9 +
 6 files changed, 950 insertions(+)
  

Patch

diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
index c1c7579107..c724fefcb0 100644
--- a/doc/guides/prog_guide/dmadev.rst
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -60,3 +60,44 @@  identifiers:
 
 - A device name used to designate the DMA device in console messages, for
   administration or debugging purposes.
+
+
+Device Configuration
+~~~~~~~~~~~~~~~~~~~~
+
+The rte_dma_configure API is used to configure a DMA device.
+
+.. code-block:: c
+
+   int rte_dma_configure(int16_t dev_id,
+                         const struct rte_dma_conf *dev_conf);
+
+The ``rte_dma_conf`` structure is used to pass the configuration parameters
+for the DMA device for example the number of virtual DMA channels to set up,
+indication of whether to enable silent mode.
+
+
+Configuration of Virtual DMA Channels
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The rte_dma_vchan_setup API is used to configure a virtual DMA channel.
+
+.. code-block:: c
+
+   int rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
+                           const struct rte_dma_vchan_conf *conf);
+
+The ``rte_dma_vchan_conf`` structure is used to pass the configuration
+parameters for the virtual DMA channel for example transfer direction, number of
+descriptor for the virtual DMA channel, source device access port parameter,
+destination device access port parameter.
+
+
+Device Features and Capabilities
+--------------------------------
+
+DMA devices may support different feature sets. The ``rte_dma_info_get`` API
+can be used to get the device info and supported features.
+
+Silent mode is a special device capability which does not require the
+application to invoke dequeue APIs.
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 5a85198e0d..eb0315aaca 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -90,6 +90,7 @@  New Features
 * **Introduced dmadev library with:**
 
   * Device allocation and it's multi-process support.
+  * Control plane functions.
 
 
 Removed Items
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
index 12d8302f15..544937acf8 100644
--- a/lib/dmadev/rte_dmadev.c
+++ b/lib/dmadev/rte_dmadev.c
@@ -317,6 +317,9 @@  rte_dma_pmd_release(const char *name)
 	if (dev == NULL)
 		return -EINVAL;
 
+	if (dev->state == RTE_DMA_DEV_READY)
+		return rte_dma_close(dev->data->dev_id);
+
 	dma_release(dev);
 	return 0;
 }
@@ -356,3 +359,360 @@  rte_dma_count_avail(void)
 
 	return count;
 }
+
+int
+rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
+{
+	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_info == NULL)
+		return -EINVAL;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dma_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+					    sizeof(struct rte_dma_info));
+	if (ret != 0)
+		return ret;
+
+	dev_info->numa_node = dev->device->numa_node;
+	dev_info->nb_vchans = dev->data->dev_conf.nb_vchans;
+
+	return 0;
+}
+
+int
+rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	struct rte_dma_info dev_info;
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMA_LOG(ERR,
+			"Device %d must be stopped to allow configuration",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dma_info_get(dev_id, &dev_info);
+	if (ret != 0) {
+		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans == 0) {
+		RTE_DMA_LOG(ERR,
+			"Device %d configure zero vchans", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans > dev_info.max_vchans) {
+		RTE_DMA_LOG(ERR,
+			"Device %d configure too many vchans", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->enable_silent &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_SILENT)) {
+		RTE_DMA_LOG(ERR, "Device %d don't support silent", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
+					     sizeof(struct rte_dma_conf));
+	if (ret == 0)
+		memcpy(&dev->data->dev_conf, dev_conf,
+		       sizeof(struct rte_dma_conf));
+
+	return ret;
+}
+
+int
+rte_dma_start(int16_t dev_id)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_conf.nb_vchans == 0) {
+		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
+		return -EINVAL;
+	}
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMA_LOG(WARNING, "Device %d already started", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->data->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dma_stop(int16_t dev_id)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started == 0) {
+		RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dma_close(int16_t dev_id)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	/* Device must be stopped before it can be closed */
+	if (dev->data->dev_started == 1) {
+		RTE_DMA_LOG(ERR,
+			"Device %d must be stopped before closing", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_close)(dev);
+	if (ret == 0)
+		dma_release(dev);
+
+	return ret;
+}
+
+int
+rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
+		    const struct rte_dma_vchan_conf *conf)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	struct rte_dma_info dev_info;
+	bool src_is_dev, dst_is_dev;
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMA_LOG(ERR,
+			"Device %d must be stopped to allow configuration",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dma_info_get(dev_id, &dev_info);
+	if (ret != 0) {
+		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+		return -EINVAL;
+	}
+	if (dev->data->dev_conf.nb_vchans == 0) {
+		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
+		return -EINVAL;
+	}
+	if (vchan >= dev_info.nb_vchans) {
+		RTE_DMA_LOG(ERR, "Device %d vchan out range!", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
+		RTE_DMA_LOG(ERR, "Device %d direction invalid!", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_MEM)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support mem2mem transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_DEV)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support mem2dev transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_MEM)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support dev2mem transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_DEV)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support dev2dev transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < dev_info.min_desc ||
+	    conf->nb_desc > dev_info.max_desc) {
+		RTE_DMA_LOG(ERR,
+			"Device %d number of descriptors invalid", dev_id);
+		return -EINVAL;
+	}
+	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->src_port.port_type == RTE_DMA_PORT_NONE && src_is_dev) ||
+	    (conf->src_port.port_type != RTE_DMA_PORT_NONE && !src_is_dev)) {
+		RTE_DMA_LOG(ERR, "Device %d source port type invalid", dev_id);
+		return -EINVAL;
+	}
+	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->dst_port.port_type == RTE_DMA_PORT_NONE && dst_is_dev) ||
+	    (conf->dst_port.port_type != RTE_DMA_PORT_NONE && !dst_is_dev)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d destination port type invalid", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, vchan, conf,
+					sizeof(struct rte_dma_vchan_conf));
+}
+
+int
+rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
+{
+	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (stats == NULL)
+		return -EINVAL;
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
+	    vchan != RTE_DMA_ALL_VCHAN) {
+		RTE_DMA_LOG(ERR,
+			"Device %d vchan %u out of range", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	memset(stats, 0, sizeof(struct rte_dma_stats));
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+					  sizeof(struct rte_dma_stats));
+}
+
+int
+rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
+	    vchan != RTE_DMA_ALL_VCHAN) {
+		RTE_DMA_LOG(ERR,
+			"Device %d vchan %u out of range", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+static const char *
+dma_capability_name(uint64_t capability)
+{
+	static const struct {
+		uint64_t capability;
+		const char *name;
+	} capa_names[] = {
+		{ RTE_DMA_CAPA_MEM_TO_MEM,  "mem2mem" },
+		{ RTE_DMA_CAPA_MEM_TO_DEV,  "mem2dev" },
+		{ RTE_DMA_CAPA_DEV_TO_MEM,  "dev2mem" },
+		{ RTE_DMA_CAPA_DEV_TO_DEV,  "dev2dev" },
+		{ RTE_DMA_CAPA_SVA,         "sva"     },
+		{ RTE_DMA_CAPA_SILENT,      "silent"  },
+		{ RTE_DMA_CAPA_OPS_COPY,    "copy"    },
+		{ RTE_DMA_CAPA_OPS_COPY_SG, "copy_sg" },
+		{ RTE_DMA_CAPA_OPS_FILL,    "fill"    },
+	};
+
+	const char *name = "unknown";
+	uint32_t i;
+
+	for (i = 0; i < RTE_DIM(capa_names); i++) {
+		if (capability == capa_names[i].capability) {
+			name = capa_names[i].name;
+			break;
+		}
+	}
+
+	return name;
+}
+
+static void
+dma_dump_capability(FILE *f, uint64_t dev_capa)
+{
+	uint64_t capa;
+
+	fprintf(f, "  dev_capa: 0x%" PRIx64 " -", dev_capa);
+	while (dev_capa > 0) {
+		capa = 1ull << __builtin_ctzll(dev_capa);
+		fprintf(f, " %s", dma_capability_name(capa));
+		dev_capa &= ~capa;
+	}
+	fprintf(f, "\n");
+}
+
+int
+rte_dma_dump(int16_t dev_id, FILE *f)
+{
+	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	struct rte_dma_info dev_info;
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (f == NULL)
+		return -EINVAL;
+
+	ret = rte_dma_info_get(dev_id, &dev_info);
+	if (ret != 0) {
+		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+		return -EINVAL;
+	}
+
+	fprintf(f, "DMA Dev %d, '%s' [%s]\n",
+		dev->data->dev_id,
+		dev->data->dev_name,
+		dev->data->dev_started ? "started" : "stopped");
+	dma_dump_capability(f, dev_info.dev_capa);
+	fprintf(f, "  max_vchans_supported: %u\n", dev_info.max_vchans);
+	fprintf(f, "  nb_vchans_configured: %u\n", dev_info.nb_vchans);
+	fprintf(f, "  silent_mode: %s\n",
+		dev->data->dev_conf.enable_silent ? "on" : "off");
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 6074bae25d..a66144488a 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -53,6 +53,28 @@ 
  * This framework uses 'int16_t dev_id' as the device identifier of a dmadev,
  * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
  *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dma_configure()
+ *     - rte_dma_vchan_setup()
+ *     - rte_dma_start()
+ *
+ * If the application wants to change the configuration (i.e. invoke
+ * rte_dma_configure() or rte_dma_vchan_setup()), it must invoke
+ * rte_dma_stop() first to stop the device and then do the reconfiguration
+ * before invoking rte_dma_start() again. The dataplane functions should not
+ * be invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the rte_dma_close()
+ * function.
+ *
+ * About MT-safe, all the functions of the dmadev API exported by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target dmadev object.
+ * @note Different virtual DMA channels on the same dmadev *DO NOT* support
+ * parallel invocation because these virtual DMA channels share the same
+ * HW-DMA-channel.
+ *
  */
 
 #include <stdint.h>
@@ -124,6 +146,463 @@  bool rte_dma_is_valid(int16_t dev_id);
 __rte_experimental
 uint16_t rte_dma_count_avail(void);
 
+/** DMA device support memory-to-memory transfer.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_MEM_TO_MEM		RTE_BIT64(0)
+/** DMA device support memory-to-device transfer.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_MEM_TO_DEV		RTE_BIT64(1)
+/** DMA device support device-to-memory transfer.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_DEV_TO_MEM		RTE_BIT64(2)
+/** DMA device support device-to-device transfer.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_DEV_TO_DEV		RTE_BIT64(3)
+/** DMA device support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_SVA		RTE_BIT64(4)
+/** DMA device support work in silent mode.
+ * In this mode, application don't required to invoke rte_dma_completed*()
+ * API.
+ *
+ * @see struct rte_dma_conf::silent_mode
+ */
+#define RTE_DMA_CAPA_SILENT		RTE_BIT64(5)
+/** DMA device support copy ops.
+ * This capability start with index of 32, so that it could leave gap between
+ * normal capability and ops capability.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_OPS_COPY		RTE_BIT64(32)
+/** DMA device support scatter-gather list copy ops.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_OPS_COPY_SG	RTE_BIT64(33)
+/** DMA device support fill ops.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_OPS_FILL		RTE_BIT64(34)
+
+/**
+ * A structure used to retrieve the information of a DMA device.
+ *
+ * @see rte_dma_info_get
+ */
+struct rte_dma_info {
+	/** Device capabilities (RTE_DMA_CAPA_*). */
+	uint64_t dev_capa;
+	/** Maximum number of virtual DMA channels supported. */
+	uint16_t max_vchans;
+	/** Maximum allowed number of virtual DMA channel descriptors. */
+	uint16_t max_desc;
+	/** Minimum allowed number of virtual DMA channel descriptors. */
+	uint16_t min_desc;
+	/** Maximum number of source or destination scatter-gather entry
+	 * supported.
+	 * If the device does not support COPY_SG capability, this value can be
+	 * zero.
+	 * If the device supports COPY_SG capability, then rte_dma_copy_sg()
+	 * parameter nb_src/nb_dst should not exceed this value.
+	 */
+	uint16_t max_sges;
+	/** NUMA node connection, -1 if unknown. */
+	int16_t numa_node;
+	/** Number of virtual DMA channel configured. */
+	uint16_t nb_vchans;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dma_info* to be filled with the
+ *   information of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ *
+ * @see rte_dma_configure
+ */
+struct rte_dma_conf {
+	/** The number of virtual DMA channels to set up for the DMA device.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dma_info which get from rte_dma_info_get().
+	 */
+	uint16_t nb_vchans;
+	/** Indicates whether to enable silent mode.
+	 * false-default mode, true-silent mode.
+	 * This value can be set to true only when the SILENT capability is
+	 * supported.
+	 *
+	 * @see RTE_DMA_CAPA_SILENT
+	 */
+	bool enable_silent;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dma_conf
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_start(int16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dma_start().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_stop(int16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_close(int16_t dev_id);
+
+/**
+ * DMA transfer direction defines.
+ *
+ * @see struct rte_dma_vchan_conf::direction
+ */
+enum rte_dma_direction {
+	/** DMA transfer direction - from memory to memory.
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_MEM,
+	/** DMA transfer direction - from memory to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from memory
+	 * (which is SoCs memory) to device (which is host memory).
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_DEV,
+	/** DMA transfer direction - from device to memory.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to memory (which is SoCs memory).
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_MEM,
+	/** DMA transfer direction - from device to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to the device (which is another host memory).
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_DEV,
+};
+
+/**
+ * DMA access port type defines.
+ *
+ * @see struct rte_dma_port_param::port_type
+ */
+enum rte_dma_port_type {
+	RTE_DMA_PORT_NONE,
+	RTE_DMA_PORT_PCIE, /**< The DMA access port is PCIe. */
+};
+
+/**
+ * A structure used to descript DMA access port parameters.
+ *
+ * @see struct rte_dma_vchan_conf::src_port
+ * @see struct rte_dma_vchan_conf::dst_port
+ */
+struct rte_dma_port_param {
+	/** The device access port type.
+	 *
+	 * @see enum rte_dma_port_type
+	 */
+	enum rte_dma_port_type port_type;
+	union {
+		/** PCIe access port parameters.
+		 *
+		 * The following model shows SoC's PCIe module connects to
+		 * multiple PCIe hosts and multiple endpoints. The PCIe module
+		 * has an integrated DMA controller.
+		 *
+		 * If the DMA wants to access the memory of host A, it can be
+		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+		 *
+		 * \code{.unparsed}
+		 * System Bus
+		 *    |     ----------PCIe module----------
+		 *    |     Bus
+		 *    |     Interface
+		 *    |     -----        ------------------
+		 *    |     |   |        | PCIe Core0     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
+		 *    |     |   |--------|        |- VF-1 |--------| Root    |
+		 *    |     |   |        |   PF-1         |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |   |        | PCIe Core1     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
+		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
+		 *    |     |   |        |        |- VF-1 |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |DMA|        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |--------| PCIe Core2     |        ------
+		 *    |     |   |        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |        |                |        ------
+		 *    |     -----        ------------------
+		 *
+		 * \endcode
+		 *
+		 * @note If some fields can not be supported by the
+		 * hardware/driver, then the driver ignores those fields.
+		 * Please check driver-specific documentation for limitations
+		 * and capablites.
+		 */
+		struct {
+			uint64_t coreid : 4; /**< PCIe core id used. */
+			uint64_t pfid : 8; /**< PF id used. */
+			uint64_t vfen : 1; /**< VF enable bit. */
+			uint64_t vfid : 16; /**< VF id used. */
+			/** The pasid filed in TLP packet. */
+			uint64_t pasid : 20;
+			/** The attributes filed in TLP packet. */
+			uint64_t attr : 3;
+			/** The processing hint filed in TLP packet. */
+			uint64_t ph : 2;
+			/** The steering tag filed in TLP packet. */
+			uint64_t st : 16;
+		} pcie;
+	};
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ *
+ * @see rte_dma_vchan_setup
+ */
+struct rte_dma_vchan_conf {
+	/** Transfer direction
+	 *
+	 * @see enum rte_dma_direction
+	 */
+	enum rte_dma_direction direction;
+	/** Number of descriptor for the virtual DMA channel */
+	uint16_t nb_desc;
+	/** 1) Used to describes the device access port parameter in the
+	 * device-to-memory transfer scenario.
+	 * 2) Used to describes the source device access port parameter in the
+	 * device-to-device transfer scenario.
+	 *
+	 * @see struct rte_dma_port_param
+	 */
+	struct rte_dma_port_param src_port;
+	/** 1) Used to describes the device access port parameter in the
+	 * memory-to-device transfer scenario.
+	 * 2) Used to describes the destination device access port parameter in
+	 * the device-to-device transfer scenario.
+	 *
+	 * @see struct rte_dma_port_param
+	 */
+	struct rte_dma_port_param dst_port;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel. The value must be in the range
+ *   [0, nb_vchans - 1] previously supplied to rte_dma_configure().
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dma_vchan_conf object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
+			const struct rte_dma_vchan_conf *conf);
+
+/**
+ * A structure used to retrieve statistics.
+ *
+ * @see rte_dma_stats_get
+ */
+struct rte_dma_stats {
+	/** Count of operations which were submitted to hardware. */
+	uint64_t submitted;
+	/** Count of operations which were completed, including successful and
+	 * failed completions.
+	 */
+	uint64_t completed;
+	/** Count of operations which failed to complete. */
+	uint64_t errors;
+};
+
+/**
+ * Special ID, which is used to represent all virtual DMA channels.
+ *
+ * @see rte_dma_stats_get
+ * @see rte_dma_stats_reset
+ */
+#define RTE_DMA_ALL_VCHAN	0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMA_ALL_VCHAN means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dma_stats
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_stats_get(int16_t dev_id, uint16_t vchan,
+		      struct rte_dma_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMA_ALL_VCHAN means all channels.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_stats_reset(int16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_dump(int16_t dev_id, FILE *f);
 #include "rte_dmadev_core.h"
 
 #ifdef __cplusplus
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
index 1ce2cf0bf1..2436621594 100644
--- a/lib/dmadev/rte_dmadev_core.h
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -20,6 +20,43 @@ 
 
 #include <rte_dev.h>
 
+struct rte_dma_dev;
+
+/** @internal Used to get device information of a device. */
+typedef int (*rte_dma_info_get_t)(const struct rte_dma_dev *dev,
+				  struct rte_dma_info *dev_info,
+				  uint32_t info_sz);
+
+/** @internal Used to configure a device. */
+typedef int (*rte_dma_configure_t)(struct rte_dma_dev *dev,
+				   const struct rte_dma_conf *dev_conf,
+				   uint32_t conf_sz);
+
+/** @internal Used to start a configured device. */
+typedef int (*rte_dma_start_t)(struct rte_dma_dev *dev);
+
+/** @internal Used to stop a configured device. */
+typedef int (*rte_dma_stop_t)(struct rte_dma_dev *dev);
+
+/** @internal Used to close a configured device. */
+typedef int (*rte_dma_close_t)(struct rte_dma_dev *dev);
+
+/** @internal Used to allocate and set up a virtual DMA channel. */
+typedef int (*rte_dma_vchan_setup_t)(struct rte_dma_dev *dev, uint16_t vchan,
+				const struct rte_dma_vchan_conf *conf,
+				uint32_t conf_sz);
+
+/** @internal Used to retrieve basic statistics. */
+typedef int (*rte_dma_stats_get_t)(const struct rte_dma_dev *dev,
+			uint16_t vchan, struct rte_dma_stats *stats,
+			uint32_t stats_sz);
+
+/** @internal Used to reset basic statistics. */
+typedef int (*rte_dma_stats_reset_t)(struct rte_dma_dev *dev, uint16_t vchan);
+
+/** @internal Used to dump internal information. */
+typedef int (*rte_dma_dump_t)(const struct rte_dma_dev *dev, FILE *f);
+
 /**
  * Possible states of a DMA device.
  *
@@ -34,6 +71,26 @@  enum rte_dma_dev_state {
 
 };
 
+/**
+ * DMA device operations function pointer table.
+ *
+ * @see struct rte_dma_dev:dev_ops
+ */
+struct rte_dma_dev_ops {
+	rte_dma_info_get_t       dev_info_get;
+	rte_dma_configure_t      dev_configure;
+	rte_dma_start_t          dev_start;
+	rte_dma_stop_t           dev_stop;
+	rte_dma_close_t          dev_close;
+
+	rte_dma_vchan_setup_t    vchan_setup;
+
+	rte_dma_stats_get_t      stats_get;
+	rte_dma_stats_reset_t    stats_reset;
+
+	rte_dma_dump_t           dev_dump;
+};
+
 /**
  * @internal
  * The data part, with no function pointers, associated with each DMA device.
@@ -53,6 +110,7 @@  struct rte_dma_dev_data {
 	 * dev_private information.
 	 */
 	void *dev_private;
+	struct rte_dma_conf dev_conf; /**< DMA device configuration. */
 	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
 	uint64_t reserved[2]; /**< Reserved for future fields */
 } __rte_cache_aligned;
@@ -72,6 +130,8 @@  struct rte_dma_dev_data {
 struct rte_dma_dev {
 	void *dev_private; /**< PMD-specific private data. */
 	struct rte_dma_dev_data *data; /**< Pointer to device data. */
+	/** Functions exported by PMD. */
+	const struct rte_dma_dev_ops *dev_ops;
 	/** Device info which supplied during device initialization. */
 	struct rte_device *device;
 	enum rte_dma_dev_state state; /**< Flag indicating the device state. */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index 56ea0332cb..6b7939b10f 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -1,10 +1,19 @@ 
 EXPERIMENTAL {
 	global:
 
+	rte_dma_close;
+	rte_dma_configure;
 	rte_dma_count_avail;
 	rte_dma_dev_max;
+	rte_dma_dump;
 	rte_dma_get_dev_id;
+	rte_dma_info_get;
 	rte_dma_is_valid;
+	rte_dma_start;
+	rte_dma_stats_get;
+	rte_dma_stats_reset;
+	rte_dma_stop;
+	rte_dma_vchan_setup;
 
 	local: *;
 };