diff mbox series

[v23,2/6] dmadev: add control plane function support

Message ID 20210924105357.15386-3-fengchengwen@huawei.com (mailing list archive)
State Superseded, archived
Headers show
Series support dmadev | expand

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Fengchengwen Sept. 24, 2021, 10:53 a.m. UTC
This patch add control plane functions for dmadev.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 doc/guides/prog_guide/dmadev.rst       |  41 +++
 doc/guides/rel_notes/release_21_11.rst |   1 +
 lib/dmadev/rte_dmadev.c                | 359 ++++++++++++++++++
 lib/dmadev/rte_dmadev.h                | 480 +++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h           |  62 +++-
 lib/dmadev/version.map                 |   9 +
 6 files changed, 951 insertions(+), 1 deletion(-)

Comments

Matan Azrad Oct. 5, 2021, 10:16 a.m. UTC | #1
Hi Chengwen

API looks good to me, thanks!

I have some questions below.

> This patch add control plane functions for dmadev.
> 
<snip>
> +/**
> + * DMA transfer direction defines.
> + *
> + * @see struct rte_dma_vchan_conf::direction  */ enum rte_dma_direction
> +{
> +       /** DMA transfer direction - from memory to memory.
> +        *
> +        * @see struct rte_dma_vchan_conf::direction
> +        */
> +       RTE_DMA_DIR_MEM_TO_MEM,
> +       /** DMA transfer direction - from memory to device.
> +        * In a typical scenario, the SoCs are installed on host servers as
> +        * iNICs through the PCIe interface. In this case, the SoCs works in
> +        * EP(endpoint) mode, it could initiate a DMA move request from
> memory
> +        * (which is SoCs memory) to device (which is host memory).
> +        *
> +        * @see struct rte_dma_vchan_conf::direction
> +        */
> +       RTE_DMA_DIR_MEM_TO_DEV,


I don't understand precisely the meaning of mem and dev.

What does it mean SoCs memory?

What does it mean host memory?

What is the memory HW in these two types?

How does the user get the addresses of SoCs memory?

How does the user get the addresses of host memory?


Can dpdk app here access physical memory not mapped\allocated to the app?

Matan



> +       /** DMA transfer direction - from device to memory.
> +        * In a typical scenario, the SoCs are installed on host servers as
> +        * iNICs through the PCIe interface. In this case, the SoCs works in
> +        * EP(endpoint) mode, it could initiate a DMA move request from device
> +        * (which is host memory) to memory (which is SoCs memory).
> +        *
> +        * @see struct rte_dma_vchan_conf::direction
> +        */
> +       RTE_DMA_DIR_DEV_TO_MEM,
> +       /** DMA transfer direction - from device to device.
> +        * In a typical scenario, the SoCs are installed on host servers as
> +        * iNICs through the PCIe interface. In this case, the SoCs works in
> +        * EP(endpoint) mode, it could initiate a DMA move request from device
> +        * (which is host memory) to the device (which is another host
> memory).
> +        *
> +        * @see struct rte_dma_vchan_conf::direction
> +        */
> +       RTE_DMA_DIR_DEV_TO_DEV,
> +};
> +
> +/**
> + * DMA access port type defines.
> + *
> + * @see struct rte_dma_port_param::port_type  */ enum
> rte_dma_port_type
> +{
> +       RTE_DMA_PORT_NONE,
> +       RTE_DMA_PORT_PCIE, /**< The DMA access port is PCIe. */ };
> +
> +/**
> + * A structure used to descript DMA access port parameters.
> + *
> + * @see struct rte_dma_vchan_conf::src_port
> + * @see struct rte_dma_vchan_conf::dst_port  */ struct
> +rte_dma_port_param {
> +       /** The device access port type.
> +        *
> +        * @see enum rte_dma_port_type
> +        */
> +       enum rte_dma_port_type port_type;
> +       union {
> +               /** PCIe access port parameters.
> +                *
> +                * The following model shows SoC's PCIe module connects to
> +                * multiple PCIe hosts and multiple endpoints. The PCIe module
> +                * has an integrated DMA controller.
> +                *
> +                * If the DMA wants to access the memory of host A, it can be
> +                * initiated by PF1 in core0, or by VF0 of PF0 in core0.
> +                *
> +                * \code{.unparsed}
> +                * System Bus
> +                *    |     ----------PCIe module----------
> +                *    |     Bus
> +                *    |     Interface
> +                *    |     -----        ------------------
> +                *    |     |   |        | PCIe Core0     |
> +                *    |     |   |        |                |        -----------
> +                *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
> +                *    |     |   |--------|        |- VF-1 |--------| Root    |
> +                *    |     |   |        |   PF-1         |        | Complex |
> +                *    |     |   |        |   PF-2         |        -----------
> +                *    |     |   |        ------------------
> +                *    |     |   |
> +                *    |     |   |        ------------------
> +                *    |     |   |        | PCIe Core1     |
> +                *    |     |   |        |                |        -----------
> +                *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
> +                *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
> +                *    |     |   |        |        |- VF-1 |        | Complex |
> +                *    |     |   |        |   PF-2         |        -----------
> +                *    |     |   |        ------------------
> +                *    |     |   |
> +                *    |     |   |        ------------------
> +                *    |     |DMA|        |                |        ------
> +                *    |     |   |        |                |--------| EP |
> +                *    |     |   |--------| PCIe Core2     |        ------
> +                *    |     |   |        |                |        ------
> +                *    |     |   |        |                |--------| EP |
> +                *    |     |   |        |                |        ------
> +                *    |     -----        ------------------
> +                *
> +                * \endcode
> +                *
> +                * @note If some fields can not be supported by the
> +                * hardware/driver, then the driver ignores those fields.
> +                * Please check driver-specific documentation for limitations
> +                * and capablites.
> +                */
> +               struct {
> +                       uint64_t coreid : 4; /**< PCIe core id used. */
> +                       uint64_t pfid : 8; /**< PF id used. */
> +                       uint64_t vfen : 1; /**< VF enable bit. */
> +                       uint64_t vfid : 16; /**< VF id used. */
> +                       /** The pasid filed in TLP packet. */
> +                       uint64_t pasid : 20;
> +                       /** The attributes filed in TLP packet. */
> +                       uint64_t attr : 3;
> +                       /** The processing hint filed in TLP packet. */
> +                       uint64_t ph : 2;
> +                       /** The steering tag filed in TLP packet. */
> +                       uint64_t st : 16;
> +               } pcie;
> +       };
> +       uint64_t reserved[2]; /**< Reserved for future fields. */ };
> +
> +/**
> + * A structure used to configure a virtual DMA channel.
> + *
> + * @see rte_dma_vchan_setup
> + */
> +struct rte_dma_vchan_conf {
> +       /** Transfer direction
> +        *
> +        * @see enum rte_dma_direction
> +        */
> +       enum rte_dma_direction direction;
> +       /** Number of descriptor for the virtual DMA channel */
> +       uint16_t nb_desc;
> +       /** 1) Used to describes the device access port parameter in the
> +        * device-to-memory transfer scenario.
> +        * 2) Used to describes the source device access port parameter in the
> +        * device-to-device transfer scenario.
> +        *
> +        * @see struct rte_dma_port_param
> +        */
> +       struct rte_dma_port_param src_port;
> +       /** 1) Used to describes the device access port parameter in the
> +        * memory-to-device transfer scenario.
> +        * 2) Used to describes the destination device access port parameter in
> +        * the device-to-device transfer scenario.
> +        *
> +        * @see struct rte_dma_port_param
> +        */
> +       struct rte_dma_port_param dst_port; };
> +
<snip>
Thomas Monjalon Oct. 6, 2021, 10:46 a.m. UTC | #2
24/09/2021 12:53, Chengwen Feng:
> --- a/doc/guides/prog_guide/dmadev.rst
> +++ b/doc/guides/prog_guide/dmadev.rst
> @@ -62,3 +62,44 @@ identifiers:
>  
>  - A device name used to designate the DMA device in console messages, for
>    administration or debugging purposes.
> +
> +
> +Device Configuration
> +~~~~~~~~~~~~~~~~~~~~
> +
> +The rte_dma_configure API is used to configure a DMA device.
> +
> +.. code-block:: c
> +
> +   int rte_dma_configure(int16_t dev_id,
> +                         const struct rte_dma_conf *dev_conf);
> +
> +The ``rte_dma_conf`` structure is used to pass the configuration parameters
> +for the DMA device for example the number of virtual DMA channels to set up,
> +indication of whether to enable silent mode.
> +
> +
> +Configuration of Virtual DMA Channels
> +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> +
> +The rte_dma_vchan_setup API is used to configure a virtual DMA channel.
> +
> +.. code-block:: c
> +
> +   int rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
> +                           const struct rte_dma_vchan_conf *conf);
> +
> +The ``rte_dma_vchan_conf`` structure is used to pass the configuration
> +parameters for the virtual DMA channel for example transfer direction, number of
> +descriptor for the virtual DMA channel, source device access port parameter,
> +destination device access port parameter.

You should avoid being redundant with the Doxygen documentation.
In the guide, it should be only explaining the concepts, not the details.
For the details of each function, we refer to Doxygen.


> --- a/lib/dmadev/rte_dmadev.c
> +++ b/lib/dmadev/rte_dmadev.c
> @@ -218,6 +218,9 @@ rte_dma_pmd_release(const char *name)
>  	if (dev == NULL)
>  		return -EINVAL;
>  
> +	if (dev->state == RTE_DMA_DEV_READY)
> +		return rte_dma_close(dev->dev_id);

What is the logic here?
The only exposed function should be rte_dma_close()
and it should call the freeing function.
The API should use the dev_id. As you said somewhere else,
the name is only for debugging.
Please remove the function rte_dma_pmd_release(const char *name).

[...]
> --- a/lib/dmadev/rte_dmadev.h
> +++ b/lib/dmadev/rte_dmadev.h
> + * The functions exported by the dmadev API to setup a device designated by its
> + * device identifier must be invoked in the following order:
> + *     - rte_dma_configure()
> + *     - rte_dma_vchan_setup()
> + *     - rte_dma_start()
> + *
> + * If the application wants to change the configuration (i.e. invoke
> + * rte_dma_configure() or rte_dma_vchan_setup()), it must invoke
> + * rte_dma_stop() first to stop the device and then do the reconfiguration
> + * before invoking rte_dma_start() again. The dataplane functions should not
> + * be invoked when the device is stopped.
> + *
> + * Finally, an application can close a dmadev by invoking the rte_dma_close()
> + * function.

Yes rte_dma_close, not rte_dma_pmd_release.

> + *
> + * About MT-safe, all the functions of the dmadev API exported by a PMD are

API is not exported by a PMD, but implemented.

> + * lock-free functions which assume to not be invoked in parallel on different
> + * logical cores to work on the same target dmadev object.
> + * @note Different virtual DMA channels on the same dmadev *DO NOT* support
> + * parallel invocation because these virtual DMA channels share the same
> + * HW-DMA-channel.
> + *
>   */

No need of final blank line in a comment.

> +/** DMA device support memory-to-memory transfer.
> + *
> + * @see struct rte_dma_info::dev_capa
> + */
> +#define RTE_DMA_CAPA_MEM_TO_MEM		RTE_BIT64(0)
> +/** DMA device support memory-to-device transfer.
> + *
> + * @see struct rte_dma_info::dev_capa
> + */
> +#define RTE_DMA_CAPA_MEM_TO_DEV		RTE_BIT64(1)

Same comment as in earlier version: please group the flags
in a doxygen group. Example of doxygen group:
https://patches.dpdk.org/project/dpdk/patch/20210830104232.598703-1-thomas@monjalon.net/

[...]
You are using uint64_t bitfields and anonymous union in below struct,
it may not compile if not using __extension__ from RTE_STD_C11.

> +struct rte_dma_port_param {
> +	/** The device access port type.
> +	 *
> +	 * @see enum rte_dma_port_type
> +	 */
> +	enum rte_dma_port_type port_type;
> +	union {
[...]
> +		struct {
> +			uint64_t coreid : 4; /**< PCIe core id used. */
> +			uint64_t pfid : 8; /**< PF id used. */
> +			uint64_t vfen : 1; /**< VF enable bit. */
> +			uint64_t vfid : 16; /**< VF id used. */
> +			/** The pasid filed in TLP packet. */
> +			uint64_t pasid : 20;
> +			/** The attributes filed in TLP packet. */
> +			uint64_t attr : 3;
> +			/** The processing hint filed in TLP packet. */
> +			uint64_t ph : 2;
> +			/** The steering tag filed in TLP packet. */
> +			uint64_t st : 16;
> +		} pcie;
> +	};
> +	uint64_t reserved[2]; /**< Reserved for future fields. */
> +};

> --- a/lib/dmadev/rte_dmadev_core.h
> +++ b/lib/dmadev/rte_dmadev_core.h
> +/** @internal Used to get device information of a device. */
> +typedef int (*rte_dma_info_get_t)(const struct rte_dma_dev *dev,
> +				  struct rte_dma_info *dev_info,
> +				  uint32_t info_sz);

Please move all driver interfaces in a file dedicated to drivers.

[...]
> @@ -40,9 +96,13 @@ struct rte_dma_dev {
>  	int16_t dev_id; /**< Device [external] identifier. */
>  	int16_t numa_node; /**< Local NUMA memory ID. -1 if unknown. */
>  	void *dev_private; /**< PMD-specific private data. */
> +	/** Functions exported by PMD. */

s/exported/implemented/

> +	const struct rte_dma_dev_ops *dev_ops;
> +	struct rte_dma_conf dev_conf; /**< DMA device configuration. */
>  	/** Device info which supplied during device initialization. */
>  	struct rte_device *device;
>  	enum rte_dma_dev_state state; /**< Flag indicating the device state. */
> +	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
>  	uint64_t reserved[2]; /**< Reserved for future fields. */
>  } __rte_cache_aligned;
Fengchengwen Oct. 8, 2021, 3:28 a.m. UTC | #3
On 2021/10/5 18:16, Matan Azrad wrote:
> Hi Chengwen
> 
> API looks good to me, thanks!
> 
> I have some questions below.
> 
>> This patch add control plane functions for dmadev.
>>
> <snip>
>> +/**
>> + * DMA transfer direction defines.
>> + *
>> + * @see struct rte_dma_vchan_conf::direction  */ enum rte_dma_direction
>> +{
>> +       /** DMA transfer direction - from memory to memory.
>> +        *
>> +        * @see struct rte_dma_vchan_conf::direction
>> +        */
>> +       RTE_DMA_DIR_MEM_TO_MEM,
>> +       /** DMA transfer direction - from memory to device.
>> +        * In a typical scenario, the SoCs are installed on host servers as
>> +        * iNICs through the PCIe interface. In this case, the SoCs works in
>> +        * EP(endpoint) mode, it could initiate a DMA move request from
>> memory
>> +        * (which is SoCs memory) to device (which is host memory).
>> +        *
>> +        * @see struct rte_dma_vchan_conf::direction
>> +        */
>> +       RTE_DMA_DIR_MEM_TO_DEV,
> 
> 
> I don't understand precisely the meaning of mem and dev.
> 
> What does it mean SoCs memory?
> 
> What does it mean host memory?
> 
> What is the memory HW in these two types?
> 
> How does the user get the addresses of SoCs memory?
> 
> How does the user get the addresses of host memory?
> 

Hi Matan,

	System Bus
	    |     ----------PCIe module----------
	    |     Bus
	    |     Interface
	    |     -----        ------------------
	    |     |   |        | PCIe Core0     |
 DDR3 ------|     |   |        |                |        -----------
	    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
	    |     |   |--------|        |- VF-1 |--------| Root    | ---DDR1
	    |     |   |        |   PF-1         |        | Complex |
	    |     |   |        |   PF-2         |        -----------
	    |     |   |        ------------------
	    |     |DMA|
	    |     |   |        ------------------
	    |     |   |        | PCIe Core1     |
	    |     |   |        |                |        -----------
	    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
	    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    | ---DDR2
	    |     |   |        |        |- VF-1 |        | Complex |
	    |     |   |        |   PF-2         |        -----------
	    |     |   |        ------------------
	    |     -----

---------------------SOC-------------------------        -------HOST--------

As shown in the above figure, the SOC is connected to two hosts.
The DDR3 is the SOC's memory, the DDR1 is HOST-A's memory, and DDR2 is HOST-B's memory.

To access DDR3 memory, APP could mmap and pass its IOVA address to DMA.
To access DDR1/2 memory, some devices use parameterized descriptor which contain the
function from which the request is sent and the destination address. In this case, it is
up to the user to confirm the function and destination address which could passed in as
parameters.

Thanks.

> 
> Can dpdk app here access physical memory not mapped\allocated to the app?
> 
> Matan
> 
> 
> 
>> +       /** DMA transfer direction - from device to memory.
>> +        * In a typical scenario, the SoCs are installed on host servers as
>> +        * iNICs through the PCIe interface. In this case, the SoCs works in
>> +        * EP(endpoint) mode, it could initiate a DMA move request from device
>> +        * (which is host memory) to memory (which is SoCs memory).
>> +        *
>> +        * @see struct rte_dma_vchan_conf::direction
>> +        */
>> +       RTE_DMA_DIR_DEV_TO_MEM,
>> +       /** DMA transfer direction - from device to device.
>> +        * In a typical scenario, the SoCs are installed on host servers as
>> +        * iNICs through the PCIe interface. In this case, the SoCs works in
>> +        * EP(endpoint) mode, it could initiate a DMA move request from device
>> +        * (which is host memory) to the device (which is another host
>> memory).
>> +        *
>> +        * @see struct rte_dma_vchan_conf::direction
>> +        */
>> +       RTE_DMA_DIR_DEV_TO_DEV,
>> +};
>> +
>> +/**
>> + * DMA access port type defines.
>> + *
>> + * @see struct rte_dma_port_param::port_type  */ enum
>> rte_dma_port_type
>> +{
>> +       RTE_DMA_PORT_NONE,
>> +       RTE_DMA_PORT_PCIE, /**< The DMA access port is PCIe. */ };
>> +
>> +/**
>> + * A structure used to descript DMA access port parameters.
>> + *
>> + * @see struct rte_dma_vchan_conf::src_port
>> + * @see struct rte_dma_vchan_conf::dst_port  */ struct
>> +rte_dma_port_param {
>> +       /** The device access port type.
>> +        *
>> +        * @see enum rte_dma_port_type
>> +        */
>> +       enum rte_dma_port_type port_type;
>> +       union {
>> +               /** PCIe access port parameters.
>> +                *
>> +                * The following model shows SoC's PCIe module connects to
>> +                * multiple PCIe hosts and multiple endpoints. The PCIe module
>> +                * has an integrated DMA controller.
>> +                *
>> +                * If the DMA wants to access the memory of host A, it can be
>> +                * initiated by PF1 in core0, or by VF0 of PF0 in core0.
>> +                *
>> +                * \code{.unparsed}
>> +                * System Bus
>> +                *    |     ----------PCIe module----------
>> +                *    |     Bus
>> +                *    |     Interface
>> +                *    |     -----        ------------------
>> +                *    |     |   |        | PCIe Core0     |
>> +                *    |     |   |        |                |        -----------
>> +                *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
>> +                *    |     |   |--------|        |- VF-1 |--------| Root    |
>> +                *    |     |   |        |   PF-1         |        | Complex |
>> +                *    |     |   |        |   PF-2         |        -----------
>> +                *    |     |   |        ------------------
>> +                *    |     |   |
>> +                *    |     |   |        ------------------
>> +                *    |     |   |        | PCIe Core1     |
>> +                *    |     |   |        |                |        -----------
>> +                *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
>> +                *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
>> +                *    |     |   |        |        |- VF-1 |        | Complex |
>> +                *    |     |   |        |   PF-2         |        -----------
>> +                *    |     |   |        ------------------
>> +                *    |     |   |
>> +                *    |     |   |        ------------------
>> +                *    |     |DMA|        |                |        ------
>> +                *    |     |   |        |                |--------| EP |
>> +                *    |     |   |--------| PCIe Core2     |        ------
>> +                *    |     |   |        |                |        ------
>> +                *    |     |   |        |                |--------| EP |
>> +                *    |     |   |        |                |        ------
>> +                *    |     -----        ------------------
>> +                *
>> +                * \endcode
>> +                *
>> +                * @note If some fields can not be supported by the
>> +                * hardware/driver, then the driver ignores those fields.
>> +                * Please check driver-specific documentation for limitations
>> +                * and capablites.
>> +                */
>> +               struct {
>> +                       uint64_t coreid : 4; /**< PCIe core id used. */
>> +                       uint64_t pfid : 8; /**< PF id used. */
>> +                       uint64_t vfen : 1; /**< VF enable bit. */
>> +                       uint64_t vfid : 16; /**< VF id used. */
>> +                       /** The pasid filed in TLP packet. */
>> +                       uint64_t pasid : 20;
>> +                       /** The attributes filed in TLP packet. */
>> +                       uint64_t attr : 3;
>> +                       /** The processing hint filed in TLP packet. */
>> +                       uint64_t ph : 2;
>> +                       /** The steering tag filed in TLP packet. */
>> +                       uint64_t st : 16;
>> +               } pcie;
>> +       };
>> +       uint64_t reserved[2]; /**< Reserved for future fields. */ };
>> +
>> +/**
>> + * A structure used to configure a virtual DMA channel.
>> + *
>> + * @see rte_dma_vchan_setup
>> + */
>> +struct rte_dma_vchan_conf {
>> +       /** Transfer direction
>> +        *
>> +        * @see enum rte_dma_direction
>> +        */
>> +       enum rte_dma_direction direction;
>> +       /** Number of descriptor for the virtual DMA channel */
>> +       uint16_t nb_desc;
>> +       /** 1) Used to describes the device access port parameter in the
>> +        * device-to-memory transfer scenario.
>> +        * 2) Used to describes the source device access port parameter in the
>> +        * device-to-device transfer scenario.
>> +        *
>> +        * @see struct rte_dma_port_param
>> +        */
>> +       struct rte_dma_port_param src_port;
>> +       /** 1) Used to describes the device access port parameter in the
>> +        * memory-to-device transfer scenario.
>> +        * 2) Used to describes the destination device access port parameter in
>> +        * the device-to-device transfer scenario.
>> +        *
>> +        * @see struct rte_dma_port_param
>> +        */
>> +       struct rte_dma_port_param dst_port; };
>> +
> <snip>
>
Fengchengwen Oct. 8, 2021, 7:55 a.m. UTC | #4
On 2021/10/6 18:46, Thomas Monjalon wrote:
> 24/09/2021 12:53, Chengwen Feng:
>> --- a/doc/guides/prog_guide/dmadev.rst
>> +++ b/doc/guides/prog_guide/dmadev.rst
>> @@ -62,3 +62,44 @@ identifiers:
>>  
>>  - A device name used to designate the DMA device in console messages, for
>>    administration or debugging purposes.

[snip]

> 
>> --- a/lib/dmadev/rte_dmadev.c
>> +++ b/lib/dmadev/rte_dmadev.c
>> @@ -218,6 +218,9 @@ rte_dma_pmd_release(const char *name)
>>  	if (dev == NULL)
>>  		return -EINVAL;
>>  
>> +	if (dev->state == RTE_DMA_DEV_READY)
>> +		return rte_dma_close(dev->dev_id);
> 
> What is the logic here?
> The only exposed function should be rte_dma_close()
> and it should call the freeing function.
> The API should use the dev_id. As you said somewhere else,
> the name is only for debugging.
> Please remove the function rte_dma_pmd_release(const char *name).

The rte_dma_pmd_release corresponding to pmd_allocate, so both use the 'const char *name' parameter.

The rte_dma_pmd_release is also used for error handling when PMD init. Therefore, a status variable
is used here. If the device is not ready, only resources need to be released. Otherwise, the close
interface of the driver is invoked.

For PMD, the rte_dma_pmd_release is only wrap for dev_close when remove device, it does not need to
implement two callbacks.

If we replace rte_dma_pmd_release with rte_dma_close, then we should invoke rte_dma_close in error
handling when PMD init, this can lead to conceptual inconsistencies because the initialization has
not been successful.

So I think it's better keep rte_dma_pmd_release.

> 
> [...]
>> --- a/lib/dmadev/rte_dmadev.h
>> +++ b/lib/dmadev/rte_dmadev.h
>> + * The functions exported by the dmadev API to setup a device designated by its
>> + * device identifier must be invoked in the following order:
>> + *     - rte_dma_configure()
>> + *     - rte_dma_vchan_setup()
>> + *     - rte_dma_start()
>> + *
>> + * If the application wants to change the configuration (i.e. invoke
>> + * rte_dma_configure() or rte_dma_vchan_setup()), it must invoke
>> + * rte_dma_stop() first to stop the device and then do the reconfiguration
>> + * before invoking rte_dma_start() again. The dataplane functions should not
>> + * be invoked when the device is stopped.
>> + *
>> + * Finally, an application can close a dmadev by invoking the rte_dma_close()
>> + * function.
> 
> Yes rte_dma_close, not rte_dma_pmd_release.
> 
>> + *
>> + * About MT-safe, all the functions of the dmadev API exported by a PMD are
> 
> API is not exported by a PMD, but implemented.
> 
>> + * lock-free functions which assume to not be invoked in parallel on different
>> + * logical cores to work on the same target dmadev object.
>> + * @note Different virtual DMA channels on the same dmadev *DO NOT* support
>> + * parallel invocation because these virtual DMA channels share the same
>> + * HW-DMA-channel.
>> + *
>>   */
> 
> No need of final blank line in a comment.
> 
>> +/** DMA device support memory-to-memory transfer.
>> + *
>> + * @see struct rte_dma_info::dev_capa
>> + */
>> +#define RTE_DMA_CAPA_MEM_TO_MEM		RTE_BIT64(0)
>> +/** DMA device support memory-to-device transfer.
>> + *
>> + * @see struct rte_dma_info::dev_capa
>> + */
>> +#define RTE_DMA_CAPA_MEM_TO_DEV		RTE_BIT64(1)
> 
> Same comment as in earlier version: please group the flags
> in a doxygen group. Example of doxygen group:
> https://patches.dpdk.org/project/dpdk/patch/20210830104232.598703-1-thomas@monjalon.net/

Tried, but found it didn't coexist well with multi-line comments.

> 
> [...]
> You are using uint64_t bitfields and anonymous union in below struct,
> it may not compile if not using __extension__ from RTE_STD_C11.
> 
>> +struct rte_dma_port_param {
>> +	/** The device access port type.
>> +	 *
>> +	 * @see enum rte_dma_port_type
>> +	 */
>> +	enum rte_dma_port_type port_type;
>> +	union {
> [...]
>> +		struct {
>> +			uint64_t coreid : 4; /**< PCIe core id used. */
>> +			uint64_t pfid : 8; /**< PF id used. */
>> +			uint64_t vfen : 1; /**< VF enable bit. */
>> +			uint64_t vfid : 16; /**< VF id used. */
>> +			/** The pasid filed in TLP packet. */
>> +			uint64_t pasid : 20;
>> +			/** The attributes filed in TLP packet. */
>> +			uint64_t attr : 3;
>> +			/** The processing hint filed in TLP packet. */
>> +			uint64_t ph : 2;
>> +			/** The steering tag filed in TLP packet. */
>> +			uint64_t st : 16;
>> +		} pcie;
>> +	};
>> +	uint64_t reserved[2]; /**< Reserved for future fields. */
>> +};
> 
>> --- a/lib/dmadev/rte_dmadev_core.h
>> +++ b/lib/dmadev/rte_dmadev_core.h
>> +/** @internal Used to get device information of a device. */
>> +typedef int (*rte_dma_info_get_t)(const struct rte_dma_dev *dev,
>> +				  struct rte_dma_info *dev_info,
>> +				  uint32_t info_sz);
> 
> Please move all driver interfaces in a file dedicated to drivers.

There are three head file: rte_dmadev.h, rte_dmadev_core.h, rte_dmadev_pmd.h
And we build the following dependency:

                rte_dmadev.h   ---> rte_dmadev_core.h          // mainly because dataplane inline API.
                    ^
                    |
           ---------------------
           |                   |
       Application       rte_dmadev_pmd.h
                               ^
                               |
                             DMA PMD


If move all driver interfaces to rte_dmadev_pmd.h from rte_dmadev_core.h, bidirectional
dependency may exist, e.g.

                rte_dmadev.h   ---> rte_dmadev_core.h  ---> rte_dmadev_pmd.h
                    ^
                    |
           ---------------------
           |                   |
       Application       rte_dmadev_pmd.h
                               ^
                               |
                             DMA PMD

So I think it's better keep it that way.

> 
> [...]
>> @@ -40,9 +96,13 @@ struct rte_dma_dev {
>>  	int16_t dev_id; /**< Device [external] identifier. */
>>  	int16_t numa_node; /**< Local NUMA memory ID. -1 if unknown. */
>>  	void *dev_private; /**< PMD-specific private data. */
>> +	/** Functions exported by PMD. */
> 
> s/exported/implemented/
> 
>> +	const struct rte_dma_dev_ops *dev_ops;
>> +	struct rte_dma_conf dev_conf; /**< DMA device configuration. */
>>  	/** Device info which supplied during device initialization. */
>>  	struct rte_device *device;
>>  	enum rte_dma_dev_state state; /**< Flag indicating the device state. */
>> +	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
>>  	uint64_t reserved[2]; /**< Reserved for future fields. */
>>  } __rte_cache_aligned;
> 
> 
> 
> 
> .
> 

Thanks
Thomas Monjalon Oct. 8, 2021, 10:18 a.m. UTC | #5
08/10/2021 09:55, fengchengwen:
> On 2021/10/6 18:46, Thomas Monjalon wrote:
> > 24/09/2021 12:53, Chengwen Feng:
> >> --- a/lib/dmadev/rte_dmadev.c
> >> +++ b/lib/dmadev/rte_dmadev.c
> >> @@ -218,6 +218,9 @@ rte_dma_pmd_release(const char *name)
> >>  	if (dev == NULL)
> >>  		return -EINVAL;
> >>  
> >> +	if (dev->state == RTE_DMA_DEV_READY)
> >> +		return rte_dma_close(dev->dev_id);
> > 
> > What is the logic here?
> > The only exposed function should be rte_dma_close()
> > and it should call the freeing function.
> > The API should use the dev_id. As you said somewhere else,
> > the name is only for debugging.
> > Please remove the function rte_dma_pmd_release(const char *name).
> 
> The rte_dma_pmd_release corresponding to pmd_allocate, so both use the 'const char *name' parameter.
> 
> The rte_dma_pmd_release is also used for error handling when PMD init. Therefore, a status variable
> is used here. If the device is not ready, only resources need to be released. Otherwise, the close
> interface of the driver is invoked.
> 
> For PMD, the rte_dma_pmd_release is only wrap for dev_close when remove device, it does not need to
> implement two callbacks.
> 
> If we replace rte_dma_pmd_release with rte_dma_close, then we should invoke rte_dma_close in error
> handling when PMD init, this can lead to conceptual inconsistencies because the initialization has
> not been successful.
> 
> So I think it's better keep rte_dma_pmd_release.

I will review again this logic in the next version.

> >> +/** DMA device support memory-to-memory transfer.
> >> + *
> >> + * @see struct rte_dma_info::dev_capa
> >> + */
> >> +#define RTE_DMA_CAPA_MEM_TO_MEM		RTE_BIT64(0)
> >> +/** DMA device support memory-to-device transfer.
> >> + *
> >> + * @see struct rte_dma_info::dev_capa
> >> + */
> >> +#define RTE_DMA_CAPA_MEM_TO_DEV		RTE_BIT64(1)
> > 
> > Same comment as in earlier version: please group the flags
> > in a doxygen group. Example of doxygen group:
> > https://patches.dpdk.org/project/dpdk/patch/20210830104232.598703-1-thomas@monjalon.net/
> 
> Tried, but found it didn't coexist well with multi-line comments.

What is not working? Example?
I think you didn't get what to do.
You must add a comment to give a title and group all these flags.

> >> --- a/lib/dmadev/rte_dmadev_core.h
> >> +++ b/lib/dmadev/rte_dmadev_core.h
> >> +/** @internal Used to get device information of a device. */
> >> +typedef int (*rte_dma_info_get_t)(const struct rte_dma_dev *dev,
> >> +				  struct rte_dma_info *dev_info,
> >> +				  uint32_t info_sz);
> > 
> > Please move all driver interfaces in a file dedicated to drivers.
> 
> There are three head file: rte_dmadev.h, rte_dmadev_core.h, rte_dmadev_pmd.h
> And we build the following dependency:
> 
>                 rte_dmadev.h   ---> rte_dmadev_core.h          // mainly because dataplane inline API.
>                     ^
>                     |
>            ---------------------
>            |                   |
>        Application       rte_dmadev_pmd.h
>                                ^
>                                |
>                              DMA PMD
> 
> 
> If move all driver interfaces to rte_dmadev_pmd.h from rte_dmadev_core.h, bidirectional
> dependency may exist, e.g.
> 
>                 rte_dmadev.h   ---> rte_dmadev_core.h  ---> rte_dmadev_pmd.h
>                     ^
>                     |
>            ---------------------
>            |                   |
>        Application       rte_dmadev_pmd.h
>                                ^
>                                |
>                              DMA PMD
> 
> So I think it's better keep it that way.

Please make sure only what is needed for inline is kept in the "core.h"
You should look at the current effort done by Konstanti in ethdev to hide everything.
diff mbox series

Patch

diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
index 822282213c..c2b0b0420b 100644
--- a/doc/guides/prog_guide/dmadev.rst
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -62,3 +62,44 @@  identifiers:
 
 - A device name used to designate the DMA device in console messages, for
   administration or debugging purposes.
+
+
+Device Configuration
+~~~~~~~~~~~~~~~~~~~~
+
+The rte_dma_configure API is used to configure a DMA device.
+
+.. code-block:: c
+
+   int rte_dma_configure(int16_t dev_id,
+                         const struct rte_dma_conf *dev_conf);
+
+The ``rte_dma_conf`` structure is used to pass the configuration parameters
+for the DMA device for example the number of virtual DMA channels to set up,
+indication of whether to enable silent mode.
+
+
+Configuration of Virtual DMA Channels
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The rte_dma_vchan_setup API is used to configure a virtual DMA channel.
+
+.. code-block:: c
+
+   int rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
+                           const struct rte_dma_vchan_conf *conf);
+
+The ``rte_dma_vchan_conf`` structure is used to pass the configuration
+parameters for the virtual DMA channel for example transfer direction, number of
+descriptor for the virtual DMA channel, source device access port parameter,
+destination device access port parameter.
+
+
+Device Features and Capabilities
+--------------------------------
+
+DMA devices may support different feature sets. The ``rte_dma_info_get`` API
+can be used to get the device info and supported features.
+
+Silent mode is a special device capability which does not require the
+application to invoke dequeue APIs.
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 74639f1e81..0aceaa8837 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -109,6 +109,7 @@  New Features
 * **Introduced dmadev library with:**
 
   * Device allocation APIs.
+  * Control plane APIs.
 
 
 Removed Items
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
index 96af3f0772..e0134b9eec 100644
--- a/lib/dmadev/rte_dmadev.c
+++ b/lib/dmadev/rte_dmadev.c
@@ -218,6 +218,9 @@  rte_dma_pmd_release(const char *name)
 	if (dev == NULL)
 		return -EINVAL;
 
+	if (dev->state == RTE_DMA_DEV_READY)
+		return rte_dma_close(dev->dev_id);
+
 	dma_release(dev);
 	return 0;
 }
@@ -261,3 +264,359 @@  rte_dma_count_avail(void)
 
 	return count;
 }
+
+int
+rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
+{
+	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_info == NULL)
+		return -EINVAL;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dma_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+					    sizeof(struct rte_dma_info));
+	if (ret != 0)
+		return ret;
+
+	dev_info->numa_node = dev->device->numa_node;
+	dev_info->nb_vchans = dev->dev_conf.nb_vchans;
+
+	return 0;
+}
+
+int
+rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	struct rte_dma_info dev_info;
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_conf == NULL)
+		return -EINVAL;
+
+	if (dev->dev_started != 0) {
+		RTE_DMA_LOG(ERR,
+			"Device %d must be stopped to allow configuration",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dma_info_get(dev_id, &dev_info);
+	if (ret != 0) {
+		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans == 0) {
+		RTE_DMA_LOG(ERR,
+			"Device %d configure zero vchans", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans > dev_info.max_vchans) {
+		RTE_DMA_LOG(ERR,
+			"Device %d configure too many vchans", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->enable_silent &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_SILENT)) {
+		RTE_DMA_LOG(ERR, "Device %d don't support silent", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
+					     sizeof(struct rte_dma_conf));
+	if (ret == 0)
+		memcpy(&dev->dev_conf, dev_conf, sizeof(struct rte_dma_conf));
+
+	return ret;
+}
+
+int
+rte_dma_start(int16_t dev_id)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->dev_conf.nb_vchans == 0) {
+		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
+		return -EINVAL;
+	}
+
+	if (dev->dev_started != 0) {
+		RTE_DMA_LOG(WARNING, "Device %d already started", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dma_stop(int16_t dev_id)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->dev_started == 0) {
+		RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dma_close(int16_t dev_id)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	/* Device must be stopped before it can be closed */
+	if (dev->dev_started == 1) {
+		RTE_DMA_LOG(ERR,
+			"Device %d must be stopped before closing", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_close)(dev);
+	if (ret == 0)
+		dma_release(dev);
+
+	return ret;
+}
+
+int
+rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
+		    const struct rte_dma_vchan_conf *conf)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	struct rte_dma_info dev_info;
+	bool src_is_dev, dst_is_dev;
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (conf == NULL)
+		return -EINVAL;
+
+	if (dev->dev_started != 0) {
+		RTE_DMA_LOG(ERR,
+			"Device %d must be stopped to allow configuration",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dma_info_get(dev_id, &dev_info);
+	if (ret != 0) {
+		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+		return -EINVAL;
+	}
+	if (dev->dev_conf.nb_vchans == 0) {
+		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
+		return -EINVAL;
+	}
+	if (vchan >= dev_info.nb_vchans) {
+		RTE_DMA_LOG(ERR, "Device %d vchan out range!", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
+		RTE_DMA_LOG(ERR, "Device %d direction invalid!", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_MEM)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support mem2mem transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_DEV)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support mem2dev transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_MEM)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support dev2mem transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_DEV)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support dev2dev transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < dev_info.min_desc ||
+	    conf->nb_desc > dev_info.max_desc) {
+		RTE_DMA_LOG(ERR,
+			"Device %d number of descriptors invalid", dev_id);
+		return -EINVAL;
+	}
+	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->src_port.port_type == RTE_DMA_PORT_NONE && src_is_dev) ||
+	    (conf->src_port.port_type != RTE_DMA_PORT_NONE && !src_is_dev)) {
+		RTE_DMA_LOG(ERR, "Device %d source port type invalid", dev_id);
+		return -EINVAL;
+	}
+	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->dst_port.port_type == RTE_DMA_PORT_NONE && dst_is_dev) ||
+	    (conf->dst_port.port_type != RTE_DMA_PORT_NONE && !dst_is_dev)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d destination port type invalid", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, vchan, conf,
+					sizeof(struct rte_dma_vchan_conf));
+}
+
+int
+rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
+{
+	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (stats == NULL)
+		return -EINVAL;
+	if (vchan >= dev->dev_conf.nb_vchans &&
+	    vchan != RTE_DMA_ALL_VCHAN) {
+		RTE_DMA_LOG(ERR,
+			"Device %d vchan %u out of range", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	memset(stats, 0, sizeof(struct rte_dma_stats));
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+					  sizeof(struct rte_dma_stats));
+}
+
+int
+rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (vchan >= dev->dev_conf.nb_vchans &&
+	    vchan != RTE_DMA_ALL_VCHAN) {
+		RTE_DMA_LOG(ERR,
+			"Device %d vchan %u out of range", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+static const char *
+dma_capability_name(uint64_t capability)
+{
+	static const struct {
+		uint64_t capability;
+		const char *name;
+	} capa_names[] = {
+		{ RTE_DMA_CAPA_MEM_TO_MEM,  "mem2mem" },
+		{ RTE_DMA_CAPA_MEM_TO_DEV,  "mem2dev" },
+		{ RTE_DMA_CAPA_DEV_TO_MEM,  "dev2mem" },
+		{ RTE_DMA_CAPA_DEV_TO_DEV,  "dev2dev" },
+		{ RTE_DMA_CAPA_SVA,         "sva"     },
+		{ RTE_DMA_CAPA_SILENT,      "silent"  },
+		{ RTE_DMA_CAPA_OPS_COPY,    "copy"    },
+		{ RTE_DMA_CAPA_OPS_COPY_SG, "copy_sg" },
+		{ RTE_DMA_CAPA_OPS_FILL,    "fill"    },
+	};
+
+	const char *name = "unknown";
+	uint32_t i;
+
+	for (i = 0; i < RTE_DIM(capa_names); i++) {
+		if (capability == capa_names[i].capability) {
+			name = capa_names[i].name;
+			break;
+		}
+	}
+
+	return name;
+}
+
+static void
+dma_dump_capability(FILE *f, uint64_t dev_capa)
+{
+	uint64_t capa;
+
+	(void)fprintf(f, "  dev_capa: 0x%" PRIx64 " -", dev_capa);
+	while (dev_capa > 0) {
+		capa = 1ull << __builtin_ctzll(dev_capa);
+		(void)fprintf(f, " %s", dma_capability_name(capa));
+		dev_capa &= ~capa;
+	}
+	(void)fprintf(f, "\n");
+}
+
+int
+rte_dma_dump(int16_t dev_id, FILE *f)
+{
+	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	struct rte_dma_info dev_info;
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (f == NULL)
+		return -EINVAL;
+
+	ret = rte_dma_info_get(dev_id, &dev_info);
+	if (ret != 0) {
+		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+		return -EINVAL;
+	}
+
+	(void)fprintf(f, "DMA Dev %d, '%s' [%s]\n",
+		dev->dev_id,
+		dev->dev_name,
+		dev->dev_started ? "started" : "stopped");
+	dma_dump_capability(f, dev_info.dev_capa);
+	(void)fprintf(f, "  max_vchans_supported: %u\n", dev_info.max_vchans);
+	(void)fprintf(f, "  nb_vchans_configured: %u\n", dev_info.nb_vchans);
+	(void)fprintf(f, "  silent_mode: %s\n",
+		dev->dev_conf.enable_silent ? "on" : "off");
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 17dc0d1226..5114c37446 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -53,6 +53,28 @@ 
  * This framework uses 'int16_t dev_id' as the device identifier of a dmadev,
  * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
  *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dma_configure()
+ *     - rte_dma_vchan_setup()
+ *     - rte_dma_start()
+ *
+ * If the application wants to change the configuration (i.e. invoke
+ * rte_dma_configure() or rte_dma_vchan_setup()), it must invoke
+ * rte_dma_stop() first to stop the device and then do the reconfiguration
+ * before invoking rte_dma_start() again. The dataplane functions should not
+ * be invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the rte_dma_close()
+ * function.
+ *
+ * About MT-safe, all the functions of the dmadev API exported by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target dmadev object.
+ * @note Different virtual DMA channels on the same dmadev *DO NOT* support
+ * parallel invocation because these virtual DMA channels share the same
+ * HW-DMA-channel.
+ *
  */
 
 #include <stdint.h>
@@ -125,6 +147,464 @@  bool rte_dma_is_valid(int16_t dev_id);
 __rte_experimental
 uint16_t rte_dma_count_avail(void);
 
+/** DMA device support memory-to-memory transfer.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_MEM_TO_MEM		RTE_BIT64(0)
+/** DMA device support memory-to-device transfer.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_MEM_TO_DEV		RTE_BIT64(1)
+/** DMA device support device-to-memory transfer.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_DEV_TO_MEM		RTE_BIT64(2)
+/** DMA device support device-to-device transfer.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_DEV_TO_DEV		RTE_BIT64(3)
+/** DMA device support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_SVA		RTE_BIT64(4)
+/** DMA device support work in silent mode.
+ * In this mode, application don't required to invoke rte_dma_completed*()
+ * API.
+ *
+ * @see struct rte_dma_conf::silent_mode
+ */
+#define RTE_DMA_CAPA_SILENT		RTE_BIT64(5)
+/** DMA device support copy ops.
+ * This capability start with index of 32, so that it could leave gap between
+ * normal capability and ops capability.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_OPS_COPY		RTE_BIT64(32)
+/** DMA device support scatter-gather list copy ops.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_OPS_COPY_SG	RTE_BIT64(33)
+/** DMA device support fill ops.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_OPS_FILL		RTE_BIT64(34)
+
+/**
+ * A structure used to retrieve the information of a DMA device.
+ *
+ * @see rte_dma_info_get
+ */
+struct rte_dma_info {
+	/** Device capabilities (RTE_DMA_CAPA_*). */
+	uint64_t dev_capa;
+	/** Maximum number of virtual DMA channels supported. */
+	uint16_t max_vchans;
+	/** Maximum allowed number of virtual DMA channel descriptors. */
+	uint16_t max_desc;
+	/** Minimum allowed number of virtual DMA channel descriptors. */
+	uint16_t min_desc;
+	/** Maximum number of source or destination scatter-gather entry
+	 * supported.
+	 * If the device does not support COPY_SG capability, this value can be
+	 * zero.
+	 * If the device supports COPY_SG capability, then rte_dma_copy_sg()
+	 * parameter nb_src/nb_dst should not exceed this value.
+	 */
+	uint16_t max_sges;
+	/** NUMA node connection, -1 if unknown. */
+	int16_t numa_node;
+	/** Number of virtual DMA channel configured. */
+	uint16_t nb_vchans;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dma_info* to be filled with the
+ *   information of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ *
+ * @see rte_dma_configure
+ */
+struct rte_dma_conf {
+	/** The number of virtual DMA channels to set up for the DMA device.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dma_info which get from rte_dma_info_get().
+	 */
+	uint16_t nb_vchans;
+	/** Indicates whether to enable silent mode.
+	 * false-default mode, true-silent mode.
+	 * This value can be set to true only when the SILENT capability is
+	 * supported.
+	 *
+	 * @see RTE_DMA_CAPA_SILENT
+	 */
+	bool enable_silent;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dma_conf
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_start(int16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dma_start().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_stop(int16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_close(int16_t dev_id);
+
+/**
+ * DMA transfer direction defines.
+ *
+ * @see struct rte_dma_vchan_conf::direction
+ */
+enum rte_dma_direction {
+	/** DMA transfer direction - from memory to memory.
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_MEM,
+	/** DMA transfer direction - from memory to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from memory
+	 * (which is SoCs memory) to device (which is host memory).
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_DEV,
+	/** DMA transfer direction - from device to memory.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to memory (which is SoCs memory).
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_MEM,
+	/** DMA transfer direction - from device to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to the device (which is another host memory).
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_DEV,
+};
+
+/**
+ * DMA access port type defines.
+ *
+ * @see struct rte_dma_port_param::port_type
+ */
+enum rte_dma_port_type {
+	RTE_DMA_PORT_NONE,
+	RTE_DMA_PORT_PCIE, /**< The DMA access port is PCIe. */
+};
+
+/**
+ * A structure used to descript DMA access port parameters.
+ *
+ * @see struct rte_dma_vchan_conf::src_port
+ * @see struct rte_dma_vchan_conf::dst_port
+ */
+struct rte_dma_port_param {
+	/** The device access port type.
+	 *
+	 * @see enum rte_dma_port_type
+	 */
+	enum rte_dma_port_type port_type;
+	union {
+		/** PCIe access port parameters.
+		 *
+		 * The following model shows SoC's PCIe module connects to
+		 * multiple PCIe hosts and multiple endpoints. The PCIe module
+		 * has an integrated DMA controller.
+		 *
+		 * If the DMA wants to access the memory of host A, it can be
+		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+		 *
+		 * \code{.unparsed}
+		 * System Bus
+		 *    |     ----------PCIe module----------
+		 *    |     Bus
+		 *    |     Interface
+		 *    |     -----        ------------------
+		 *    |     |   |        | PCIe Core0     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
+		 *    |     |   |--------|        |- VF-1 |--------| Root    |
+		 *    |     |   |        |   PF-1         |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |   |        | PCIe Core1     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
+		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
+		 *    |     |   |        |        |- VF-1 |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |DMA|        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |--------| PCIe Core2     |        ------
+		 *    |     |   |        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |        |                |        ------
+		 *    |     -----        ------------------
+		 *
+		 * \endcode
+		 *
+		 * @note If some fields can not be supported by the
+		 * hardware/driver, then the driver ignores those fields.
+		 * Please check driver-specific documentation for limitations
+		 * and capablites.
+		 */
+		struct {
+			uint64_t coreid : 4; /**< PCIe core id used. */
+			uint64_t pfid : 8; /**< PF id used. */
+			uint64_t vfen : 1; /**< VF enable bit. */
+			uint64_t vfid : 16; /**< VF id used. */
+			/** The pasid filed in TLP packet. */
+			uint64_t pasid : 20;
+			/** The attributes filed in TLP packet. */
+			uint64_t attr : 3;
+			/** The processing hint filed in TLP packet. */
+			uint64_t ph : 2;
+			/** The steering tag filed in TLP packet. */
+			uint64_t st : 16;
+		} pcie;
+	};
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ *
+ * @see rte_dma_vchan_setup
+ */
+struct rte_dma_vchan_conf {
+	/** Transfer direction
+	 *
+	 * @see enum rte_dma_direction
+	 */
+	enum rte_dma_direction direction;
+	/** Number of descriptor for the virtual DMA channel */
+	uint16_t nb_desc;
+	/** 1) Used to describes the device access port parameter in the
+	 * device-to-memory transfer scenario.
+	 * 2) Used to describes the source device access port parameter in the
+	 * device-to-device transfer scenario.
+	 *
+	 * @see struct rte_dma_port_param
+	 */
+	struct rte_dma_port_param src_port;
+	/** 1) Used to describes the device access port parameter in the
+	 * memory-to-device transfer scenario.
+	 * 2) Used to describes the destination device access port parameter in
+	 * the device-to-device transfer scenario.
+	 *
+	 * @see struct rte_dma_port_param
+	 */
+	struct rte_dma_port_param dst_port;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel. The value must be in the range
+ *   [0, nb_vchans - 1] previously supplied to rte_dma_configure().
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dma_vchan_conf object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
+			const struct rte_dma_vchan_conf *conf);
+
+/**
+ * A structure used to retrieve statistics.
+ *
+ * @see rte_dma_stats_get
+ */
+struct rte_dma_stats {
+	/** Count of operations which were submitted to hardware. */
+	uint64_t submitted;
+	/** Count of operations which were completed, including successful and
+	 * failed completions.
+	 */
+	uint64_t completed;
+	/** Count of operations which failed to complete. */
+	uint64_t errors;
+};
+
+/**
+ * Special ID, which is used to represent all virtual DMA channels.
+ *
+ * @see rte_dma_stats_get
+ * @see rte_dma_stats_reset
+ */
+#define RTE_DMA_ALL_VCHAN	0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMA_ALL_VCHAN means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dma_stats
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_stats_get(int16_t dev_id, uint16_t vchan,
+		      struct rte_dma_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMA_ALL_VCHAN means all channels.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_stats_reset(int16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_dump(int16_t dev_id, FILE *f);
+
 #include "rte_dmadev_core.h"
 
 #ifdef __cplusplus
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
index 5ed96853b2..d6f885527a 100644
--- a/lib/dmadev/rte_dmadev_core.h
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -18,6 +18,43 @@ 
  *
  */
 
+struct rte_dma_dev;
+
+/** @internal Used to get device information of a device. */
+typedef int (*rte_dma_info_get_t)(const struct rte_dma_dev *dev,
+				  struct rte_dma_info *dev_info,
+				  uint32_t info_sz);
+
+/** @internal Used to configure a device. */
+typedef int (*rte_dma_configure_t)(struct rte_dma_dev *dev,
+				   const struct rte_dma_conf *dev_conf,
+				   uint32_t conf_sz);
+
+/** @internal Used to start a configured device. */
+typedef int (*rte_dma_start_t)(struct rte_dma_dev *dev);
+
+/** @internal Used to stop a configured device. */
+typedef int (*rte_dma_stop_t)(struct rte_dma_dev *dev);
+
+/** @internal Used to close a configured device. */
+typedef int (*rte_dma_close_t)(struct rte_dma_dev *dev);
+
+/** @internal Used to allocate and set up a virtual DMA channel. */
+typedef int (*rte_dma_vchan_setup_t)(struct rte_dma_dev *dev, uint16_t vchan,
+				const struct rte_dma_vchan_conf *conf,
+				uint32_t conf_sz);
+
+/** @internal Used to retrieve basic statistics. */
+typedef int (*rte_dma_stats_get_t)(const struct rte_dma_dev *dev,
+			uint16_t vchan, struct rte_dma_stats *stats,
+			uint32_t stats_sz);
+
+/** @internal Used to reset basic statistics. */
+typedef int (*rte_dma_stats_reset_t)(struct rte_dma_dev *dev, uint16_t vchan);
+
+/** @internal Used to dump internal information. */
+typedef int (*rte_dma_dump_t)(const struct rte_dma_dev *dev, FILE *f);
+
 /**
  * Possible states of a DMA device.
  *
@@ -32,7 +69,26 @@  enum rte_dma_dev_state {
 };
 
 /**
- * @internal
+ * DMA device operations function pointer table.
+ *
+ * @see struct rte_dma_dev:dev_ops
+ */
+struct rte_dma_dev_ops {
+	rte_dma_info_get_t       dev_info_get;
+	rte_dma_configure_t      dev_configure;
+	rte_dma_start_t          dev_start;
+	rte_dma_stop_t           dev_stop;
+	rte_dma_close_t          dev_close;
+
+	rte_dma_vchan_setup_t    vchan_setup;
+
+	rte_dma_stats_get_t      stats_get;
+	rte_dma_stats_reset_t    stats_reset;
+
+	rte_dma_dump_t           dev_dump;
+};
+
+/** @internal
  * The generic data structure associated with each DMA device.
  */
 struct rte_dma_dev {
@@ -40,9 +96,13 @@  struct rte_dma_dev {
 	int16_t dev_id; /**< Device [external] identifier. */
 	int16_t numa_node; /**< Local NUMA memory ID. -1 if unknown. */
 	void *dev_private; /**< PMD-specific private data. */
+	/** Functions exported by PMD. */
+	const struct rte_dma_dev_ops *dev_ops;
+	struct rte_dma_conf dev_conf; /**< DMA device configuration. */
 	/** Device info which supplied during device initialization. */
 	struct rte_device *device;
 	enum rte_dma_dev_state state; /**< Flag indicating the device state. */
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
 	uint64_t reserved[2]; /**< Reserved for future fields. */
 } __rte_cache_aligned;
 
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index 56ea0332cb..6b7939b10f 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -1,10 +1,19 @@ 
 EXPERIMENTAL {
 	global:
 
+	rte_dma_close;
+	rte_dma_configure;
 	rte_dma_count_avail;
 	rte_dma_dev_max;
+	rte_dma_dump;
 	rte_dma_get_dev_id;
+	rte_dma_info_get;
 	rte_dma_is_valid;
+	rte_dma_start;
+	rte_dma_stats_get;
+	rte_dma_stats_reset;
+	rte_dma_stop;
+	rte_dma_vchan_setup;
 
 	local: *;
 };