get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/96057/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 96057,
    "url": "https://patches.dpdk.org/api/patches/96057/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1626699900-6825-1-git-send-email-fengchengwen@huawei.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1626699900-6825-1-git-send-email-fengchengwen@huawei.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1626699900-6825-1-git-send-email-fengchengwen@huawei.com",
    "date": "2021-07-19T13:05:00",
    "name": "[v7] dmadev: introduce DMA device library",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "ce95665c516e7b0ce2b532f0c0f17c5e38994a24",
    "submitter": {
        "id": 2146,
        "url": "https://patches.dpdk.org/api/people/2146/?format=api",
        "name": "fengchengwen",
        "email": "fengchengwen@huawei.com"
    },
    "delegate": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1626699900-6825-1-git-send-email-fengchengwen@huawei.com/mbox/",
    "series": [
        {
            "id": 17894,
            "url": "https://patches.dpdk.org/api/series/17894/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=17894",
            "date": "2021-07-19T13:05:00",
            "name": "[v7] dmadev: introduce DMA device library",
            "version": 7,
            "mbox": "https://patches.dpdk.org/series/17894/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/96057/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/96057/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 79C73A034F;\n\tMon, 19 Jul 2021 15:08:42 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 0D82D4014E;\n\tMon, 19 Jul 2021 15:08:42 +0200 (CEST)",
            "from szxga03-in.huawei.com (szxga03-in.huawei.com [45.249.212.189])\n by mails.dpdk.org (Postfix) with ESMTP id 84DE64003E\n for <dev@dpdk.org>; Mon, 19 Jul 2021 15:08:39 +0200 (CEST)",
            "from dggemv704-chm.china.huawei.com (unknown [172.30.72.56])\n by szxga03-in.huawei.com (SkyGuard) with ESMTP id 4GT29G4L7Xz7vk6;\n Mon, 19 Jul 2021 21:04:02 +0800 (CST)",
            "from dggpeml500024.china.huawei.com (7.185.36.10) by\n dggemv704-chm.china.huawei.com (10.3.19.47) with Microsoft SMTP Server\n (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id\n 15.1.2176.2; Mon, 19 Jul 2021 21:08:36 +0800",
            "from localhost.localdomain (10.67.165.24) by\n dggpeml500024.china.huawei.com (7.185.36.10) with Microsoft SMTP Server\n (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id\n 15.1.2176.2; Mon, 19 Jul 2021 21:08:36 +0800"
        ],
        "From": "Chengwen Feng <fengchengwen@huawei.com>",
        "To": "<thomas@monjalon.net>, <ferruh.yigit@intel.com>,\n <bruce.richardson@intel.com>, <jerinj@marvell.com>, <jerinjacobk@gmail.com>,\n <andrew.rybchenko@oktetlabs.ru>",
        "CC": "<dev@dpdk.org>, <mb@smartsharesystems.com>, <nipun.gupta@nxp.com>,\n <hemant.agrawal@nxp.com>, <maxime.coquelin@redhat.com>,\n <honnappa.nagarahalli@arm.com>, <david.marchand@redhat.com>,\n <sburla@marvell.com>, <pkapoor@marvell.com>, <konstantin.ananyev@intel.com>",
        "Date": "Mon, 19 Jul 2021 21:05:00 +0800",
        "Message-ID": "<1626699900-6825-1-git-send-email-fengchengwen@huawei.com>",
        "X-Mailer": "git-send-email 2.8.1",
        "In-Reply-To": "<1625231891-2963-1-git-send-email-fengchengwen@huawei.com>",
        "References": "<1625231891-2963-1-git-send-email-fengchengwen@huawei.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.67.165.24]",
        "X-ClientProxiedBy": "dggems704-chm.china.huawei.com (10.3.19.181) To\n dggpeml500024.china.huawei.com (7.185.36.10)",
        "X-CFilter-Loop": "Reflected",
        "Subject": "[dpdk-dev] [PATCH v7] dmadev: introduce DMA device library",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch introduce 'dmadevice' which is a generic type of DMA\ndevice.\n\nThe APIs of dmadev library exposes some generic operations which can\nenable configuration and I/O with the DMA devices.\n\nSigned-off-by: Chengwen Feng <fengchengwen@huawei.com>\n---\nv7:\n* add rte_dmadev_get_dev_id API.\n* fix typo.\n* use the default macro assignment scheme.\n* rename RTE_DMA_DEV_CAPA_* to RTE_DMADEV_CAPA_*.\n* rename rte_dmadev_conf.silent_mode to enable_silent.\n* add memset when get stats.\nv6:\n* delete fence capability.\n* delete vchan_release ops.\n* copy_sg direct use src/dst/nb_src/nb_dst as paramter.\n* define rte_dma_direction, don't support multiple direction in the\n  same vchan.\n* fix segment fault when allocate.\n* fix typo.\n* fix comments format.\nv5:\n* add doxy-api-* file modify.\n* use RTE_LOG_REGISTER_DEFAULT.\n* fix typo.\n* resolve some incorrect comments.\n* fix some doxgen problem.\n* fix version.map still hold rte_dmadev_completed_fails.\nv4:\n* replace xxx_complete_fails with xxx_completed_status.\n* add SILENT capability, also a silent_mode in rte_dmadev_conf.\n* add op_flag_llc for performance.\n* rename dmadev_xxx_t to rte_dmadev_xxx_t to avoid namespace conflict.\n* delete filed 'enqueued_count' from rte_dmadev_stats.\n* make rte_dmadev hold 'dev_private' filed.\n* add RTE_DMA_STATUS_NOT_ATTEMPED status code.\n* rename RTE_DMA_STATUS_ACTIVE_DROP to RTE_DMA_STATUS_USER_ABORT.\n* rename rte_dma_sg(e) to rte_dmadev_sg(e) to make sure all struct\n  prefix with rte_dmadev.\n* put the comment afterwards.\n* fix some doxgen problem.\n* delete macro RTE_DMADEV_VALID_DEV_ID_OR_RET and\n  RTE_DMADEV_PTR_OR_ERR_RET.\n* replace strlcpy with rte_strscpy.\n* other minor modifications from review comment.\nv3:\n* rm reset and fill_sg ops.\n* rm MT-safe capabilities.\n* add submit flag.\n* redefine rte_dma_sg to implement asymmetric copy.\n* delete some reserved field for future use.\n* rearrangement rte_dmadev/rte_dmadev_data struct.\n* refresh rte_dmadev.h copyright.\n* update vchan setup parameter.\n* modified some inappropriate descriptions.\n* arrange version.map alphabetically.\n* other minor modifications from review comment.\n---\n MAINTAINERS                  |    4 +\n config/rte_config.h          |    3 +\n doc/api/doxy-api-index.md    |    1 +\n doc/api/doxy-api.conf.in     |    1 +\n lib/dmadev/meson.build       |    7 +\n lib/dmadev/rte_dmadev.c      |  545 ++++++++++++++++++++++\n lib/dmadev/rte_dmadev.h      | 1041 ++++++++++++++++++++++++++++++++++++++++++\n lib/dmadev/rte_dmadev_core.h |  182 ++++++++\n lib/dmadev/rte_dmadev_pmd.h  |   72 +++\n lib/dmadev/version.map       |   37 ++\n lib/meson.build              |    1 +\n 11 files changed, 1894 insertions(+)\n create mode 100644 lib/dmadev/meson.build\n create mode 100644 lib/dmadev/rte_dmadev.c\n create mode 100644 lib/dmadev/rte_dmadev.h\n create mode 100644 lib/dmadev/rte_dmadev_core.h\n create mode 100644 lib/dmadev/rte_dmadev_pmd.h\n create mode 100644 lib/dmadev/version.map",
    "diff": "diff --git a/MAINTAINERS b/MAINTAINERS\nindex af2a91d..e01a07f 100644\n--- a/MAINTAINERS\n+++ b/MAINTAINERS\n@@ -495,6 +495,10 @@ F: drivers/raw/skeleton/\n F: app/test/test_rawdev.c\n F: doc/guides/prog_guide/rawdev.rst\n \n+DMA device API - EXPERIMENTAL\n+M: Chengwen Feng <fengchengwen@huawei.com>\n+F: lib/dmadev/\n+\n \n Memory Pool Drivers\n -------------------\ndiff --git a/config/rte_config.h b/config/rte_config.h\nindex 590903c..331a431 100644\n--- a/config/rte_config.h\n+++ b/config/rte_config.h\n@@ -81,6 +81,9 @@\n /* rawdev defines */\n #define RTE_RAWDEV_MAX_DEVS 64\n \n+/* dmadev defines */\n+#define RTE_DMADEV_MAX_DEVS 64\n+\n /* ip_fragmentation defines */\n #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4\n #undef RTE_LIBRTE_IP_FRAG_TBL_STAT\ndiff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md\nindex 1992107..ce08250 100644\n--- a/doc/api/doxy-api-index.md\n+++ b/doc/api/doxy-api-index.md\n@@ -27,6 +27,7 @@ The public API headers are grouped by topics:\n   [event_timer_adapter]    (@ref rte_event_timer_adapter.h),\n   [event_crypto_adapter]   (@ref rte_event_crypto_adapter.h),\n   [rawdev]             (@ref rte_rawdev.h),\n+  [dmadev]             (@ref rte_dmadev.h),\n   [metrics]            (@ref rte_metrics.h),\n   [bitrate]            (@ref rte_bitrate.h),\n   [latency]            (@ref rte_latencystats.h),\ndiff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in\nindex 325a019..109ec1f 100644\n--- a/doc/api/doxy-api.conf.in\n+++ b/doc/api/doxy-api.conf.in\n@@ -35,6 +35,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \\\n                           @TOPDIR@/lib/compressdev \\\n                           @TOPDIR@/lib/cryptodev \\\n                           @TOPDIR@/lib/distributor \\\n+                          @TOPDIR@/lib/dmadev \\\n                           @TOPDIR@/lib/efd \\\n                           @TOPDIR@/lib/ethdev \\\n                           @TOPDIR@/lib/eventdev \\\ndiff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build\nnew file mode 100644\nindex 0000000..d2fc85e\n--- /dev/null\n+++ b/lib/dmadev/meson.build\n@@ -0,0 +1,7 @@\n+# SPDX-License-Identifier: BSD-3-Clause\n+# Copyright(c) 2021 HiSilicon Limited.\n+\n+sources = files('rte_dmadev.c')\n+headers = files('rte_dmadev.h')\n+indirect_headers += files('rte_dmadev_core.h')\n+driver_sdk_headers += files('rte_dmadev_pmd.h')\ndiff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c\nnew file mode 100644\nindex 0000000..a6d8e3be\n--- /dev/null\n+++ b/lib/dmadev/rte_dmadev.c\n@@ -0,0 +1,545 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2021 HiSilicon Limited.\n+ * Copyright(c) 2021 Intel Corporation.\n+ */\n+\n+#include <ctype.h>\n+#include <inttypes.h>\n+#include <stdint.h>\n+#include <stdio.h>\n+#include <stdlib.h>\n+#include <string.h>\n+\n+#include <rte_debug.h>\n+#include <rte_dev.h>\n+#include <rte_eal.h>\n+#include <rte_errno.h>\n+#include <rte_lcore.h>\n+#include <rte_log.h>\n+#include <rte_memory.h>\n+#include <rte_memzone.h>\n+#include <rte_malloc.h>\n+#include <rte_string_fns.h>\n+\n+#include \"rte_dmadev.h\"\n+#include \"rte_dmadev_pmd.h\"\n+\n+struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];\n+\n+static const char *MZ_RTE_DMADEV_DATA = \"rte_dmadev_data\";\n+/* Shared memory between primary and secondary processes. */\n+static struct {\n+\tstruct rte_dmadev_data data[RTE_DMADEV_MAX_DEVS];\n+} *dmadev_shared_data;\n+\n+RTE_LOG_REGISTER_DEFAULT(rte_dmadev_logtype, INFO);\n+#define RTE_DMADEV_LOG(level, ...) \\\n+\trte_log(RTE_LOG_ ## level, rte_dmadev_logtype, \"\" __VA_ARGS__)\n+\n+/* Macros to check for valid device id */\n+#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \\\n+\tif (!rte_dmadev_is_valid_dev(dev_id)) { \\\n+\t\tRTE_DMADEV_LOG(ERR, \"Invalid dev_id=%u\\n\", dev_id); \\\n+\t\treturn retval; \\\n+\t} \\\n+} while (0)\n+\n+static int\n+dmadev_check_name(const char *name)\n+{\n+\tsize_t name_len;\n+\n+\tif (name == NULL) {\n+\t\tRTE_DMADEV_LOG(ERR, \"Name can't be NULL\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tname_len = strnlen(name, RTE_DMADEV_NAME_MAX_LEN);\n+\tif (name_len == 0) {\n+\t\tRTE_DMADEV_LOG(ERR, \"Zero length DMA device name\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\tif (name_len >= RTE_DMADEV_NAME_MAX_LEN) {\n+\t\tRTE_DMADEV_LOG(ERR, \"DMA device name is too long\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static uint16_t\n+dmadev_find_free_dev(void)\n+{\n+\tuint16_t i;\n+\n+\tfor (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {\n+\t\tif (dmadev_shared_data->data[i].dev_name[0] == '\\0') {\n+\t\t\tRTE_ASSERT(rte_dmadevices[i].state ==\n+\t\t\t\t   RTE_DMADEV_UNUSED);\n+\t\t\treturn i;\n+\t\t}\n+\t}\n+\n+\treturn RTE_DMADEV_MAX_DEVS;\n+}\n+\n+static struct rte_dmadev*\n+dmadev_find(const char *name)\n+{\n+\tuint16_t i;\n+\n+\tfor (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {\n+\t\tif ((rte_dmadevices[i].state == RTE_DMADEV_ATTACHED) &&\n+\t\t    (!strcmp(name, rte_dmadevices[i].data->dev_name)))\n+\t\t\treturn &rte_dmadevices[i];\n+\t}\n+\n+\treturn NULL;\n+}\n+\n+static int\n+dmadev_shared_data_prepare(void)\n+{\n+\tconst struct rte_memzone *mz;\n+\n+\tif (dmadev_shared_data == NULL) {\n+\t\tif (rte_eal_process_type() == RTE_PROC_PRIMARY) {\n+\t\t\t/* Allocate port data and ownership shared memory. */\n+\t\t\tmz = rte_memzone_reserve(MZ_RTE_DMADEV_DATA,\n+\t\t\t\t\t sizeof(*dmadev_shared_data),\n+\t\t\t\t\t rte_socket_id(), 0);\n+\t\t} else\n+\t\t\tmz = rte_memzone_lookup(MZ_RTE_DMADEV_DATA);\n+\t\tif (mz == NULL)\n+\t\t\treturn -ENOMEM;\n+\n+\t\tdmadev_shared_data = mz->addr;\n+\t\tif (rte_eal_process_type() == RTE_PROC_PRIMARY)\n+\t\t\tmemset(dmadev_shared_data->data, 0,\n+\t\t\t       sizeof(dmadev_shared_data->data));\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static struct rte_dmadev *\n+dmadev_allocate(const char *name)\n+{\n+\tstruct rte_dmadev *dev;\n+\tuint16_t dev_id;\n+\n+\tdev = dmadev_find(name);\n+\tif (dev != NULL) {\n+\t\tRTE_DMADEV_LOG(ERR, \"DMA device already allocated\\n\");\n+\t\treturn NULL;\n+\t}\n+\n+\tif (dmadev_shared_data_prepare() != 0) {\n+\t\tRTE_DMADEV_LOG(ERR, \"Cannot allocate DMA shared data\\n\");\n+\t\treturn NULL;\n+\t}\n+\n+\tdev_id = dmadev_find_free_dev();\n+\tif (dev_id == RTE_DMADEV_MAX_DEVS) {\n+\t\tRTE_DMADEV_LOG(ERR, \"Reached maximum number of DMA devices\\n\");\n+\t\treturn NULL;\n+\t}\n+\n+\tdev = &rte_dmadevices[dev_id];\n+\tdev->data = &dmadev_shared_data->data[dev_id];\n+\tdev->data->dev_id = dev_id;\n+\trte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));\n+\n+\treturn dev;\n+}\n+\n+static struct rte_dmadev *\n+dmadev_attach_secondary(const char *name)\n+{\n+\tstruct rte_dmadev *dev;\n+\tuint16_t i;\n+\n+\tif (dmadev_shared_data_prepare() != 0) {\n+\t\tRTE_DMADEV_LOG(ERR, \"Cannot allocate DMA shared data\\n\");\n+\t\treturn NULL;\n+\t}\n+\n+\tfor (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {\n+\t\tif (!strcmp(dmadev_shared_data->data[i].dev_name, name))\n+\t\t\tbreak;\n+\t}\n+\tif (i == RTE_DMADEV_MAX_DEVS) {\n+\t\tRTE_DMADEV_LOG(ERR,\n+\t\t\t\"Device %s is not driven by the primary process\\n\",\n+\t\t\tname);\n+\t\treturn NULL;\n+\t}\n+\n+\tdev = &rte_dmadevices[i];\n+\tdev->data = &dmadev_shared_data->data[i];\n+\tRTE_ASSERT(dev->data->dev_id == i);\n+\tdev->dev_private = dev->data->dev_private;\n+\n+\treturn dev;\n+}\n+\n+struct rte_dmadev *\n+rte_dmadev_pmd_allocate(const char *name)\n+{\n+\tstruct rte_dmadev *dev;\n+\n+\tif (dmadev_check_name(name) != 0)\n+\t\treturn NULL;\n+\n+\tif (rte_eal_process_type() == RTE_PROC_PRIMARY)\n+\t\tdev = dmadev_allocate(name);\n+\telse\n+\t\tdev = dmadev_attach_secondary(name);\n+\n+\tif (dev == NULL)\n+\t\treturn NULL;\n+\tdev->state = RTE_DMADEV_ATTACHED;\n+\n+\treturn dev;\n+}\n+\n+int\n+rte_dmadev_pmd_release(struct rte_dmadev *dev)\n+{\n+\tvoid *dev_private_bak;\n+\n+\tif (dev == NULL)\n+\t\treturn -EINVAL;\n+\n+\tif (dev->state == RTE_DMADEV_UNUSED)\n+\t\treturn 0;\n+\n+\tif (rte_eal_process_type() == RTE_PROC_PRIMARY)\n+\t\tmemset(dev->data, 0, sizeof(struct rte_dmadev_data));\n+\n+\tdev_private_bak = dev->dev_private;\n+\tmemset(dev, 0, sizeof(struct rte_dmadev));\n+\tif (rte_eal_process_type() == RTE_PROC_PRIMARY)\n+\t\tdev->dev_private = dev_private_bak;\n+\tdev->state = RTE_DMADEV_UNUSED;\n+\n+\treturn 0;\n+}\n+\n+struct rte_dmadev *\n+rte_dmadev_get_device_by_name(const char *name)\n+{\n+\tif (dmadev_check_name(name) != 0)\n+\t\treturn NULL;\n+\treturn dmadev_find(name);\n+}\n+\n+int\n+rte_dmadev_get_dev_id(const char *name)\n+{\n+\tstruct rte_dmadev *dev = rte_dmadev_get_device_by_name(name);\n+\tif (dev != NULL)\n+\t\treturn dev->data->dev_id;\n+\treturn -EINVAL;\n+}\n+\n+bool\n+rte_dmadev_is_valid_dev(uint16_t dev_id)\n+{\n+\treturn (dev_id < RTE_DMADEV_MAX_DEVS) &&\n+\t\trte_dmadevices[dev_id].state == RTE_DMADEV_ATTACHED;\n+}\n+\n+uint16_t\n+rte_dmadev_count(void)\n+{\n+\tuint16_t count = 0;\n+\tuint16_t i;\n+\n+\tfor (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {\n+\t\tif (rte_dmadevices[i].state == RTE_DMADEV_ATTACHED)\n+\t\t\tcount++;\n+\t}\n+\n+\treturn count;\n+}\n+\n+int\n+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)\n+{\n+\tconst struct rte_dmadev *dev = &rte_dmadevices[dev_id];\n+\tint ret;\n+\n+\tRTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);\n+\tif (dev_info == NULL)\n+\t\treturn -EINVAL;\n+\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);\n+\tmemset(dev_info, 0, sizeof(struct rte_dmadev_info));\n+\tret = (*dev->dev_ops->dev_info_get)(dev, dev_info,\n+\t\t\t\t\t    sizeof(struct rte_dmadev_info));\n+\tif (ret != 0)\n+\t\treturn ret;\n+\n+\tdev_info->device = dev->device;\n+\tdev_info->nb_vchans = dev->data->dev_conf.max_vchans;\n+\n+\treturn 0;\n+}\n+\n+int\n+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)\n+{\n+\tstruct rte_dmadev *dev = &rte_dmadevices[dev_id];\n+\tstruct rte_dmadev_info info;\n+\tint ret;\n+\n+\tRTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);\n+\tif (dev_conf == NULL)\n+\t\treturn -EINVAL;\n+\n+\tret = rte_dmadev_info_get(dev_id, &info);\n+\tif (ret != 0) {\n+\t\tRTE_DMADEV_LOG(ERR, \"Device %u get device info fail\\n\", dev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (dev_conf->max_vchans == 0) {\n+\t\tRTE_DMADEV_LOG(ERR,\n+\t\t\t\"Device %u configure zero vchans\\n\", dev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (dev_conf->max_vchans > info.max_vchans) {\n+\t\tRTE_DMADEV_LOG(ERR,\n+\t\t\t\"Device %u configure too many vchans\\n\", dev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (dev_conf->enable_silent &&\n+\t    !(info.dev_capa & RTE_DMADEV_CAPA_SILENT)) {\n+\t\tRTE_DMADEV_LOG(ERR, \"Device %u don't support silent\\n\", dev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (dev->data->dev_started != 0) {\n+\t\tRTE_DMADEV_LOG(ERR,\n+\t\t\t\"Device %u must be stopped to allow configuration\\n\",\n+\t\t\tdev_id);\n+\t\treturn -EBUSY;\n+\t}\n+\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);\n+\tret = (*dev->dev_ops->dev_configure)(dev, dev_conf);\n+\tif (ret == 0)\n+\t\tmemcpy(&dev->data->dev_conf, dev_conf, sizeof(*dev_conf));\n+\n+\treturn ret;\n+}\n+\n+int\n+rte_dmadev_start(uint16_t dev_id)\n+{\n+\tstruct rte_dmadev *dev = &rte_dmadevices[dev_id];\n+\tint ret;\n+\n+\tRTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);\n+\n+\tif (dev->data->dev_started != 0) {\n+\t\tRTE_DMADEV_LOG(WARNING, \"Device %u already started\\n\", dev_id);\n+\t\treturn 0;\n+\t}\n+\n+\tif (dev->dev_ops->dev_start == NULL)\n+\t\tgoto mark_started;\n+\n+\tret = (*dev->dev_ops->dev_start)(dev);\n+\tif (ret != 0)\n+\t\treturn ret;\n+\n+mark_started:\n+\tdev->data->dev_started = 1;\n+\treturn 0;\n+}\n+\n+int\n+rte_dmadev_stop(uint16_t dev_id)\n+{\n+\tstruct rte_dmadev *dev = &rte_dmadevices[dev_id];\n+\tint ret;\n+\n+\tRTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);\n+\n+\tif (dev->data->dev_started == 0) {\n+\t\tRTE_DMADEV_LOG(WARNING, \"Device %u already stopped\\n\", dev_id);\n+\t\treturn 0;\n+\t}\n+\n+\tif (dev->dev_ops->dev_stop == NULL)\n+\t\tgoto mark_stopped;\n+\n+\tret = (*dev->dev_ops->dev_stop)(dev);\n+\tif (ret != 0)\n+\t\treturn ret;\n+\n+mark_stopped:\n+\tdev->data->dev_started = 0;\n+\treturn 0;\n+}\n+\n+int\n+rte_dmadev_close(uint16_t dev_id)\n+{\n+\tstruct rte_dmadev *dev = &rte_dmadevices[dev_id];\n+\n+\tRTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);\n+\n+\t/* Device must be stopped before it can be closed */\n+\tif (dev->data->dev_started == 1) {\n+\t\tRTE_DMADEV_LOG(ERR,\n+\t\t\t\"Device %u must be stopped before closing\\n\", dev_id);\n+\t\treturn -EBUSY;\n+\t}\n+\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);\n+\treturn (*dev->dev_ops->dev_close)(dev);\n+}\n+\n+int\n+rte_dmadev_vchan_setup(uint16_t dev_id,\n+\t\t       const struct rte_dmadev_vchan_conf *conf)\n+{\n+\tstruct rte_dmadev *dev = &rte_dmadevices[dev_id];\n+\tstruct rte_dmadev_info info;\n+\tint ret;\n+\n+\tRTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);\n+\tif (conf == NULL)\n+\t\treturn -EINVAL;\n+\n+\tdev = &rte_dmadevices[dev_id];\n+\n+\tret = rte_dmadev_info_get(dev_id, &info);\n+\tif (ret != 0) {\n+\t\tRTE_DMADEV_LOG(ERR, \"Device %u get device info fail\\n\", dev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&\n+\t    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&\n+\t    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&\n+\t    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {\n+\t\tRTE_DMADEV_LOG(ERR, \"Device %u direction invalid!\\n\", dev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&\n+\t    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_MEM)) {\n+\t\tRTE_DMADEV_LOG(ERR,\n+\t\t\t\"Device %u don't support mem2mem transfer\\n\", dev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&\n+\t    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_DEV)) {\n+\t\tRTE_DMADEV_LOG(ERR,\n+\t\t\t\"Device %u don't support mem2dev transfer\\n\", dev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&\n+\t    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_MEM)) {\n+\t\tRTE_DMADEV_LOG(ERR,\n+\t\t\t\"Device %u don't support dev2mem transfer\\n\", dev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&\n+\t    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_DEV)) {\n+\t\tRTE_DMADEV_LOG(ERR,\n+\t\t\t\"Device %u don't support dev2dev transfer\\n\", dev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\tif (conf->nb_desc < info.min_desc || conf->nb_desc > info.max_desc) {\n+\t\tRTE_DMADEV_LOG(ERR,\n+\t\t\t\"Device %u number of descriptors invalid\\n\", dev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);\n+\treturn (*dev->dev_ops->vchan_setup)(dev, conf);\n+}\n+\n+int\n+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,\n+\t\t     struct rte_dmadev_stats *stats)\n+{\n+\tconst struct rte_dmadev *dev = &rte_dmadevices[dev_id];\n+\n+\tRTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);\n+\tif (stats == NULL)\n+\t\treturn -EINVAL;\n+\tif (vchan >= dev->data->dev_conf.max_vchans &&\n+\t    vchan != RTE_DMADEV_ALL_VCHAN) {\n+\t\tRTE_DMADEV_LOG(ERR,\n+\t\t\t\"Device %u vchan %u out of range\\n\", dev_id, vchan);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);\n+\tmemset(stats, 0, sizeof(struct rte_dmadev_stats));\n+\treturn (*dev->dev_ops->stats_get)(dev, vchan, stats,\n+\t\t\t\t\t  sizeof(struct rte_dmadev_stats));\n+}\n+\n+int\n+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)\n+{\n+\tstruct rte_dmadev *dev = &rte_dmadevices[dev_id];\n+\n+\tRTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);\n+\tif (vchan >= dev->data->dev_conf.max_vchans &&\n+\t    vchan != RTE_DMADEV_ALL_VCHAN) {\n+\t\tRTE_DMADEV_LOG(ERR,\n+\t\t\t\"Device %u vchan %u out of range\\n\", dev_id, vchan);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);\n+\treturn (*dev->dev_ops->stats_reset)(dev, vchan);\n+}\n+\n+int\n+rte_dmadev_dump(uint16_t dev_id, FILE *f)\n+{\n+\tconst struct rte_dmadev *dev = &rte_dmadevices[dev_id];\n+\tstruct rte_dmadev_info info;\n+\tint ret;\n+\n+\tRTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);\n+\tif (f == NULL)\n+\t\treturn -EINVAL;\n+\n+\tret = rte_dmadev_info_get(dev_id, &info);\n+\tif (ret != 0) {\n+\t\tRTE_DMADEV_LOG(ERR, \"Device %u get device info fail\\n\", dev_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tfprintf(f, \"DMA Dev %u, '%s' [%s]\\n\",\n+\t\tdev->data->dev_id,\n+\t\tdev->data->dev_name,\n+\t\tdev->data->dev_started ? \"started\" : \"stopped\");\n+\tfprintf(f, \"  dev_capa: 0x%\" PRIx64 \"\\n\", info.dev_capa);\n+\tfprintf(f, \"  max_vchans_supported: %u\\n\", info.max_vchans);\n+\tfprintf(f, \"  max_vchans_configured: %u\\n\", info.nb_vchans);\n+\tfprintf(f, \"  silent_mode: %s\\n\",\n+\t\tdev->data->dev_conf.enable_silent ? \"on\" : \"off\");\n+\n+\tif (dev->dev_ops->dev_dump != NULL)\n+\t\treturn (*dev->dev_ops->dev_dump)(dev, f);\n+\n+\treturn 0;\n+}\n+\n+int\n+rte_dmadev_selftest(uint16_t dev_id)\n+{\n+\tstruct rte_dmadev *dev = &rte_dmadevices[dev_id];\n+\n+\tRTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);\n+\treturn (*dev->dev_ops->dev_selftest)(dev_id);\n+}\ndiff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h\nnew file mode 100644\nindex 0000000..1518187\n--- /dev/null\n+++ b/lib/dmadev/rte_dmadev.h\n@@ -0,0 +1,1041 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2021 HiSilicon Limited.\n+ * Copyright(c) 2021 Intel Corporation.\n+ * Copyright(c) 2021 Marvell International Ltd.\n+ * Copyright(c) 2021 SmartShare Systems.\n+ */\n+\n+#ifndef _RTE_DMADEV_H_\n+#define _RTE_DMADEV_H_\n+\n+/**\n+ * @file rte_dmadev.h\n+ *\n+ * RTE DMA (Direct Memory Access) device APIs.\n+ *\n+ * The DMA framework is built on the following model:\n+ *\n+ *     ---------------   ---------------       ---------------\n+ *     | virtual DMA |   | virtual DMA |       | virtual DMA |\n+ *     | channel     |   | channel     |       | channel     |\n+ *     ---------------   ---------------       ---------------\n+ *            |                |                      |\n+ *            ------------------                      |\n+ *                     |                              |\n+ *               ------------                    ------------\n+ *               |  dmadev  |                    |  dmadev  |\n+ *               ------------                    ------------\n+ *                     |                              |\n+ *            ------------------               ------------------\n+ *            | HW-DMA-channel |               | HW-DMA-channel |\n+ *            ------------------               ------------------\n+ *                     |                              |\n+ *                     --------------------------------\n+ *                                     |\n+ *                           ---------------------\n+ *                           | HW-DMA-Controller |\n+ *                           ---------------------\n+ *\n+ * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),\n+ * each HW-DMA-channel should be represented by a dmadev.\n+ *\n+ * The dmadev could create multiple virtual DMA channels, each virtual DMA\n+ * channel represents a different transfer context. The DMA operation request\n+ * must be submitted to the virtual DMA channel. e.g. Application could create\n+ * virtual DMA channel 0 for memory-to-memory transfer scenario, and create\n+ * virtual DMA channel 1 for memory-to-device transfer scenario.\n+ *\n+ * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the\n+ * PCI/SoC device probing phase performed at EAL initialization time. And could\n+ * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing\n+ * phase.\n+ *\n+ * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,\n+ * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.\n+ *\n+ * The functions exported by the dmadev API to setup a device designated by its\n+ * device identifier must be invoked in the following order:\n+ *     - rte_dmadev_configure()\n+ *     - rte_dmadev_vchan_setup()\n+ *     - rte_dmadev_start()\n+ *\n+ * Then, the application can invoke dataplane APIs to process jobs.\n+ *\n+ * If the application wants to change the configuration (i.e. invoke\n+ * rte_dmadev_configure() or rte_dmadev_vchan_setup()), it must invoke\n+ * rte_dmadev_stop() first to stop the device and then do the reconfiguration\n+ * before invoking rte_dmadev_start() again. The dataplane APIs should not be\n+ * invoked when the device is stopped.\n+ *\n+ * Finally, an application can close a dmadev by invoking the\n+ * rte_dmadev_close() function.\n+ *\n+ * The dataplane APIs include two parts:\n+ * The first part is the submission of operation requests:\n+ *     - rte_dmadev_copy()\n+ *     - rte_dmadev_copy_sg()\n+ *     - rte_dmadev_fill()\n+ *     - rte_dmadev_submit()\n+ *\n+ * These APIs could work with different virtual DMA channels which have\n+ * different contexts.\n+ *\n+ * The first three APIs are used to submit the operation request to the virtual\n+ * DMA channel, if the submission is successful, a uint16_t ring_idx is\n+ * returned, otherwise a negative number is returned.\n+ *\n+ * The last API was used to issue doorbell to hardware, and also there are flags\n+ * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the\n+ * same work.\n+ *\n+ * The second part is to obtain the result of requests:\n+ *     - rte_dmadev_completed()\n+ *         - return the number of operation requests completed successfully.\n+ *     - rte_dmadev_completed_status()\n+ *         - return the number of operation requests completed.\n+ *\n+ * @note If the dmadev works in silent mode (@see RTE_DMADEV_CAPA_SILENT),\n+ * application does not invoke the above two completed APIs.\n+ *\n+ * About the ring_idx which enqueue APIs (e.g. rte_dmadev_copy()\n+ * rte_dmadev_fill()) returned, the rules are as follows:\n+ *     - ring_idx for each virtual DMA channel are independent.\n+ *     - For a virtual DMA channel, the ring_idx is monotonically incremented,\n+ *       when it reach UINT16_MAX, it wraps back to zero.\n+ *     - This ring_idx can be used by applications to track per-operation\n+ *       metadata in an application-defined circular ring.\n+ *     - The initial ring_idx of a virtual DMA channel is zero, after the\n+ *       device is stopped, the ring_idx needs to be reset to zero.\n+ *\n+ * One example:\n+ *     - step-1: start one dmadev\n+ *     - step-2: enqueue a copy operation, the ring_idx return is 0\n+ *     - step-3: enqueue a copy operation again, the ring_idx return is 1\n+ *     - ...\n+ *     - step-101: stop the dmadev\n+ *     - step-102: start the dmadev\n+ *     - step-103: enqueue a copy operation, the cookie return is 0\n+ *     - ...\n+ *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535\n+ *     - step-x+1: enqueue a copy operation, the ring_idx return is 0\n+ *     - ...\n+ *\n+ * The DMA operation address used in enqueue APIs (i.e. rte_dmadev_copy(),\n+ * rte_dmadev_copy_sg(), rte_dmadev_fill()) defined as rte_iova_t type. The\n+ * dmadev supports two types of address: memory address and device address.\n+ *\n+ * - memory address: the source and destination address of the memory-to-memory\n+ * transfer type, or the source address of the memory-to-device transfer type,\n+ * or the destination address of the device-to-memory transfer type.\n+ * @note If the device support SVA (@see RTE_DMADEV_CAPA_SVA), the memory\n+ * address can be any VA address, otherwise it must be an IOVA address.\n+ *\n+ * - device address: the source and destination address of the device-to-device\n+ * transfer type, or the source address of the device-to-memory transfer type,\n+ * or the destination address of the memory-to-device transfer type.\n+ *\n+ * By default, all the functions of the dmadev API exported by a PMD are\n+ * lock-free functions which assume to not be invoked in parallel on different\n+ * logical cores to work on the same target dmadev object.\n+ * @note Different virtual DMA channels on the same dmadev *DO NOT* support\n+ * parallel invocation because these virtual DMA channels share the same\n+ * HW-DMA-channel.\n+ *\n+ */\n+\n+#include <rte_common.h>\n+#include <rte_compat.h>\n+#include <rte_dev.h>\n+#include <rte_errno.h>\n+#include <rte_memory.h>\n+\n+#ifdef __cplusplus\n+extern \"C\" {\n+#endif\n+\n+#define RTE_DMADEV_NAME_MAX_LEN\tRTE_DEV_NAME_MAX_LEN\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice.\n+ *\n+ * Get the device identifier for the named DMA device.\n+ *\n+ * @param name\n+ *   DMA device name.\n+ *\n+ * @return\n+ *   Returns DMA device identifier on success.\n+ *   - <0: Failure to find named DMA device.\n+ */\n+__rte_experimental\n+int\n+rte_dmadev_get_dev_id(const char *name);\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice.\n+ *\n+ * @param dev_id\n+ *   DMA device index.\n+ *\n+ * @return\n+ *   - If the device index is valid (true) or not (false).\n+ */\n+__rte_experimental\n+bool\n+rte_dmadev_is_valid_dev(uint16_t dev_id);\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice.\n+ *\n+ * Get the total number of DMA devices that have been successfully\n+ * initialised.\n+ *\n+ * @return\n+ *   The total number of usable DMA devices.\n+ */\n+__rte_experimental\n+uint16_t\n+rte_dmadev_count(void);\n+\n+/* Enumerates DMA device capabilities. */\n+#define RTE_DMADEV_CAPA_MEM_TO_MEM\t(1ull << 0)\n+/**< DMA device support memory-to-memory transfer.\n+ *\n+ * @see struct rte_dmadev_info::dev_capa\n+ */\n+\n+#define RTE_DMADEV_CAPA_MEM_TO_DEV\t(1ull << 1)\n+/**< DMA device support memory-to-device transfer.\n+ *\n+ * @see struct rte_dmadev_info::dev_capa\n+ * @see struct rte_dmadev_port_param::port_type\n+ */\n+\n+#define RTE_DMADEV_CAPA_DEV_TO_MEM\t(1ull << 2)\n+/**< DMA device support device-to-memory transfer.\n+ *\n+ * @see struct rte_dmadev_info::dev_capa\n+ * @see struct rte_dmadev_port_param::port_type\n+ */\n+\n+#define RTE_DMADEV_CAPA_DEV_TO_DEV\t(1ull << 3)\n+/**< DMA device support device-to-device transfer.\n+ *\n+ * @see struct rte_dmadev_info::dev_capa\n+ * @see struct rte_dmadev_port_param::port_type\n+ */\n+\n+#define RTE_DMADEV_CAPA_SVA\t\t(1ull << 4)\n+/**< DMA device support SVA which could use VA as DMA address.\n+ * If device support SVA then application could pass any VA address like memory\n+ * from rte_malloc(), rte_memzone(), malloc, stack memory.\n+ * If device don't support SVA, then application should pass IOVA address which\n+ * from rte_malloc(), rte_memzone().\n+ *\n+ * @see struct rte_dmadev_info::dev_capa\n+ */\n+\n+#define RTE_DMADEV_CAPA_SILENT\t\t(1ull << 5)\n+/**< DMA device support work in silent mode.\n+ * In this mode, application don't required to invoke rte_dmadev_completed*()\n+ * API.\n+ *\n+ * @see struct rte_dmadev_conf::silent_mode\n+ */\n+\n+#define RTE_DMADEV_CAPA_OPS_COPY\t(1ull << 32)\n+/**< DMA device support copy ops.\n+ * This capability start with index of 32, so that it could leave gap between\n+ * normal capability and ops capability.\n+ *\n+ * @see struct rte_dmadev_info::dev_capa\n+ */\n+\n+#define RTE_DMADEV_CAPA_OPS_COPY_SG\t(1ull << 33)\n+/**< DMA device support scatter-list copy ops.\n+ *\n+ * @see struct rte_dmadev_info::dev_capa\n+ */\n+\n+#define RTE_DMADEV_CAPA_OPS_FILL\t(1ull << 34)\n+/**< DMA device support fill ops.\n+ *\n+ * @see struct rte_dmadev_info::dev_capa\n+ */\n+\n+/**\n+ * A structure used to retrieve the information of a DMA device.\n+ */\n+struct rte_dmadev_info {\n+\tstruct rte_device *device; /**< Generic Device information. */\n+\tuint64_t dev_capa; /**< Device capabilities (RTE_DMADEV_CAPA_*). */\n+\tuint16_t max_vchans;\n+\t/**< Maximum number of virtual DMA channels supported. */\n+\tuint16_t max_desc;\n+\t/**< Maximum allowed number of virtual DMA channel descriptors. */\n+\tuint16_t min_desc;\n+\t/**< Minimum allowed number of virtual DMA channel descriptors. */\n+\tuint16_t nb_vchans; /**< Number of virtual DMA channel configured. */\n+};\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice.\n+ *\n+ * Retrieve information of a DMA device.\n+ *\n+ * @param dev_id\n+ *   The identifier of the device.\n+ * @param[out] dev_info\n+ *   A pointer to a structure of type *rte_dmadev_info* to be filled with the\n+ *   information of the device.\n+ *\n+ * @return\n+ *   - =0: Success, driver updates the information of the DMA device.\n+ *   - <0: Error code returned by the driver info get function.\n+ *\n+ */\n+__rte_experimental\n+int\n+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);\n+\n+/**\n+ * A structure used to configure a DMA device.\n+ */\n+struct rte_dmadev_conf {\n+\tuint16_t max_vchans;\n+\t/**< Maximum number of virtual DMA channel to use.\n+\t * This value cannot be greater than the field 'max_vchans' of struct\n+\t * rte_dmadev_info which get from rte_dmadev_info_get().\n+\t */\n+\tbool enable_silent;\n+\t/**< Indicates whether to enable silent mode.\n+\t * false-default mode, true-silent mode.\n+\t * This value can be set to true only when the SILENT capability is\n+\t * supported.\n+\t *\n+\t * @see RTE_DMADEV_CAPA_SILENT\n+\t */\n+};\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice.\n+ *\n+ * Configure a DMA device.\n+ *\n+ * This function must be invoked first before any other function in the\n+ * API. This function can also be re-invoked when a device is in the\n+ * stopped state.\n+ *\n+ * @param dev_id\n+ *   The identifier of the device to configure.\n+ * @param dev_conf\n+ *   The DMA device configuration structure encapsulated into rte_dmadev_conf\n+ *   object.\n+ *\n+ * @return\n+ *   - =0: Success, device configured.\n+ *   - <0: Error code returned by the driver configuration function.\n+ */\n+__rte_experimental\n+int\n+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice.\n+ *\n+ * Start a DMA device.\n+ *\n+ * The device start step is the last one and consists of setting the DMA\n+ * to start accepting jobs.\n+ *\n+ * @param dev_id\n+ *   The identifier of the device.\n+ *\n+ * @return\n+ *   - =0: Success, device started.\n+ *   - <0: Error code returned by the driver start function.\n+ */\n+__rte_experimental\n+int\n+rte_dmadev_start(uint16_t dev_id);\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice.\n+ *\n+ * Stop a DMA device.\n+ *\n+ * The device can be restarted with a call to rte_dmadev_start().\n+ *\n+ * @param dev_id\n+ *   The identifier of the device.\n+ *\n+ * @return\n+ *   - =0: Success, device stopped.\n+ *   - <0: Error code returned by the driver stop function.\n+ */\n+__rte_experimental\n+int\n+rte_dmadev_stop(uint16_t dev_id);\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice.\n+ *\n+ * Close a DMA device.\n+ *\n+ * The device cannot be restarted after this call.\n+ *\n+ * @param dev_id\n+ *   The identifier of the device.\n+ *\n+ * @return\n+ *  - =0: Successfully close device\n+ *  - <0: Failure to close device\n+ */\n+__rte_experimental\n+int\n+rte_dmadev_close(uint16_t dev_id);\n+\n+/**\n+ * rte_dma_direction - DMA transfer direction defines.\n+ */\n+enum rte_dma_direction {\n+\tRTE_DMA_DIR_MEM_TO_MEM,\n+\t/**< DMA transfer direction - from memory to memory.\n+\t *\n+\t * @see struct rte_dmadev_vchan_conf::direction\n+\t */\n+\tRTE_DMA_DIR_MEM_TO_DEV,\n+\t/**< DMA transfer direction - from memory to device.\n+\t * In a typical scenario, the SoCs are installed on host servers as\n+\t * iNICs through the PCIe interface. In this case, the SoCs works in\n+\t * EP(endpoint) mode, it could initiate a DMA move request from memory\n+\t * (which is SoCs memory) to device (which is host memory).\n+\t *\n+\t * @see struct rte_dmadev_vchan_conf::direction\n+\t */\n+\tRTE_DMA_DIR_DEV_TO_MEM,\n+\t/**< DMA transfer direction - from device to memory.\n+\t * In a typical scenario, the SoCs are installed on host servers as\n+\t * iNICs through the PCIe interface. In this case, the SoCs works in\n+\t * EP(endpoint) mode, it could initiate a DMA move request from device\n+\t * (which is host memory) to memory (which is SoCs memory).\n+\t *\n+\t * @see struct rte_dmadev_vchan_conf::direction\n+\t */\n+\tRTE_DMA_DIR_DEV_TO_DEV,\n+\t/**< DMA transfer direction - from device to device.\n+\t * In a typical scenario, the SoCs are installed on host servers as\n+\t * iNICs through the PCIe interface. In this case, the SoCs works in\n+\t * EP(endpoint) mode, it could initiate a DMA move request from device\n+\t * (which is host memory) to the device (which is another host memory).\n+\t *\n+\t * @see struct rte_dmadev_vchan_conf::direction\n+\t */\n+};\n+\n+/**\n+ * enum rte_dmadev_port_type - DMA access port type defines.\n+ *\n+ * @see struct rte_dmadev_port_param::port_type\n+ */\n+enum rte_dmadev_port_type {\n+\tRTE_DMADEV_PORT_NONE,\n+\tRTE_DMADEV_PORT_PCIE, /**< The DMA access port is PCIe. */\n+};\n+\n+/**\n+ * A structure used to descript DMA access port parameters.\n+ *\n+ * @see struct rte_dmadev_vchan_conf::src_port\n+ * @see struct rte_dmadev_vchan_conf::dst_port\n+ */\n+struct rte_dmadev_port_param {\n+\tenum rte_dmadev_port_type port_type;\n+\t/**< The device access port type.\n+\t * @see enum rte_dmadev_port_type\n+\t */\n+\tunion {\n+\t\t/* The following model shows SoC's PCIe module connects to\n+\t\t * multiple PCIe hosts and multiple endpoints. The PCIe module\n+\t\t * has an integrate DMA controller.\n+\t\t * If the DMA wants to access the memory of host A, it can be\n+\t\t * initiated by PF1 in core0, or by VF0 of PF0 in core0.\n+\t\t *\n+\t\t * System Bus\n+\t\t *    |     ----------PCIe module----------\n+\t\t *    |     Bus\n+\t\t *    |     Interface\n+\t\t *    |     -----        ------------------\n+\t\t *    |     |   |        | PCIe Core0     |\n+\t\t *    |     |   |        |                |        -----------\n+\t\t *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |\n+\t\t *    |     |   |--------|        |- VF-1 |--------| Root    |\n+\t\t *    |     |   |        |   PF-1         |        | Complex |\n+\t\t *    |     |   |        |   PF-2         |        -----------\n+\t\t *    |     |   |        ------------------\n+\t\t *    |     |   |\n+\t\t *    |     |   |        ------------------\n+\t\t *    |     |   |        | PCIe Core1     |\n+\t\t *    |     |   |        |                |        -----------\n+\t\t *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |\n+\t\t *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |\n+\t\t *    |     |   |        |        |- VF-1 |        | Complex |\n+\t\t *    |     |   |        |   PF-2         |        -----------\n+\t\t *    |     |   |        ------------------\n+\t\t *    |     |   |\n+\t\t *    |     |   |        ------------------\n+\t\t *    |     |DMA|        |                |        ------\n+\t\t *    |     |   |        |                |--------| EP |\n+\t\t *    |     |   |--------| PCIe Core2     |        ------\n+\t\t *    |     |   |        |                |        ------\n+\t\t *    |     |   |        |                |--------| EP |\n+\t\t *    |     |   |        |                |        ------\n+\t\t *    |     -----        ------------------\n+\t\t */\n+\t\t/** The following structure is used to describe the PCIe access\n+\t\t * port parameters.\n+\t\t *\n+\t\t * @note If some fields can not be supported by the\n+\t\t * hardware/driver, then the driver ignores those fields.\n+\t\t * Please check driver-specific documentation for limitations\n+\t\t * and capablites.\n+\t\t */\n+\t\tstruct {\n+\t\t\tuint64_t coreid : 4; /**< PCIe core id used. */\n+\t\t\tuint64_t pfid : 8; /**< PF id used. */\n+\t\t\tuint64_t vfen : 1; /**< VF enable bit. */\n+\t\t\tuint64_t vfid : 16; /**< VF id used. */\n+\t\t\tuint64_t pasid : 20;\n+\t\t\t/**< The pasid filed in TLP packet. */\n+\t\t\tuint64_t attr : 3;\n+\t\t\t/**< The attributes filed in TLP packet. */\n+\t\t\tuint64_t ph : 2;\n+\t\t\t/**< The processing hint filed in TLP packet. */\n+\t\t\tuint64_t st : 16;\n+\t\t\t/**< The steering tag filed in TLP packet. */\n+\t\t} pcie;\n+\t};\n+\tuint64_t reserved[2]; /**< Reserved for future fields. */\n+};\n+\n+/**\n+ * A structure used to configure a virtual DMA channel.\n+ */\n+struct rte_dmadev_vchan_conf {\n+\tenum rte_dma_direction direction;\n+\t/**< Transfer direction\n+\t * @see enum rte_dma_direction\n+\t */\n+\tuint16_t nb_desc;\n+\t/**< Number of descriptor for the virtual DMA channel */\n+\tstruct rte_dmadev_port_param src_port;\n+\t/**< 1) Used to describes the device access port parameter in the\n+\t * device-to-memory transfer scenario.\n+\t * 2) Used to describes the source device access port parameter in the\n+\t * device-to-device transfer scenario.\n+\t * @see struct rte_dmadev_port_param\n+\t */\n+\tstruct rte_dmadev_port_param dst_port;\n+\t/**< 1) Used to describes the device access port parameter in the\n+\t * memory-to-device transfer scenario.\n+\t * 2) Used to describes the destination device access port parameter in\n+\t * the device-to-device transfer scenario.\n+\t * @see struct rte_dmadev_port_param\n+\t */\n+};\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice.\n+ *\n+ * Allocate and set up a virtual DMA channel.\n+ *\n+ * @param dev_id\n+ *   The identifier of the device.\n+ * @param conf\n+ *   The virtual DMA channel configuration structure encapsulated into\n+ *   rte_dmadev_vchan_conf object.\n+ *\n+ * @return\n+ *   - >=0: Allocate success, it is the virtual DMA channel id. This value must\n+ *          be less than the field 'max_vchans' of struct rte_dmadev_conf\n+ *          which configured by rte_dmadev_configure().\n+ *   - <0: Error code returned by the driver virtual channel setup function.\n+ */\n+__rte_experimental\n+int\n+rte_dmadev_vchan_setup(uint16_t dev_id,\n+\t\t       const struct rte_dmadev_vchan_conf *conf);\n+\n+/**\n+ * rte_dmadev_stats - running statistics.\n+ */\n+struct rte_dmadev_stats {\n+\tuint64_t submitted_count;\n+\t/**< Count of operations which were submitted to hardware. */\n+\tuint64_t completed_fail_count;\n+\t/**< Count of operations which failed to complete. */\n+\tuint64_t completed_count;\n+\t/**< Count of operations which successfully complete. */\n+};\n+\n+#define RTE_DMADEV_ALL_VCHAN\t0xFFFFu\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice.\n+ *\n+ * Retrieve basic statistics of a or all virtual DMA channel(s).\n+ *\n+ * @param dev_id\n+ *   The identifier of the device.\n+ * @param vchan\n+ *   The identifier of virtual DMA channel.\n+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.\n+ * @param[out] stats\n+ *   The basic statistics structure encapsulated into rte_dmadev_stats\n+ *   object.\n+ *\n+ * @return\n+ *   - =0: Successfully retrieve stats.\n+ *   - <0: Failure to retrieve stats.\n+ */\n+__rte_experimental\n+int\n+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,\n+\t\t     struct rte_dmadev_stats *stats);\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice.\n+ *\n+ * Reset basic statistics of a or all virtual DMA channel(s).\n+ *\n+ * @param dev_id\n+ *   The identifier of the device.\n+ * @param vchan\n+ *   The identifier of virtual DMA channel.\n+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.\n+ *\n+ * @return\n+ *   - =0: Successfully reset stats.\n+ *   - <0: Failure to reset stats.\n+ */\n+__rte_experimental\n+int\n+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan);\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice.\n+ *\n+ * Dump DMA device info.\n+ *\n+ * @param dev_id\n+ *   The identifier of the device.\n+ * @param f\n+ *   The file to write the output to.\n+ *\n+ * @return\n+ *   0 on success. Non-zero otherwise.\n+ */\n+__rte_experimental\n+int\n+rte_dmadev_dump(uint16_t dev_id, FILE *f);\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice.\n+ *\n+ * Trigger the dmadev self test.\n+ *\n+ * @param dev_id\n+ *   The identifier of the device.\n+ *\n+ * @return\n+ *   - 0: Selftest successful.\n+ *   - -ENOTSUP if the device doesn't support selftest\n+ *   - other values < 0 on failure.\n+ */\n+__rte_experimental\n+int\n+rte_dmadev_selftest(uint16_t dev_id);\n+\n+/**\n+ * rte_dma_status_code - DMA transfer result status code defines.\n+ */\n+enum rte_dma_status_code {\n+\tRTE_DMA_STATUS_SUCCESSFUL,\n+\t/**< The operation completed successfully. */\n+\tRTE_DMA_STATUS_USRER_ABORT,\n+\t/**< The operation failed to complete due abort by user.\n+\t * This is mainly used when processing dev_stop, user could modidy the\n+\t * descriptors (e.g. change one bit to tell hardware abort this job),\n+\t * it allows outstanding requests to be complete as much as possible,\n+\t * so reduce the time to stop the device.\n+\t */\n+\tRTE_DMA_STATUS_NOT_ATTEMPTED,\n+\t/**< The operation failed to complete due to following scenarios:\n+\t * The jobs in a particular batch are not attempted because they\n+\t * appeared after a fence where a previous job failed. In some HW\n+\t * implementation it's possible for jobs from later batches would be\n+\t * completed, though, so report the status from the not attempted jobs\n+\t * before reporting those newer completed jobs.\n+\t */\n+\tRTE_DMA_STATUS_INVALID_SRC_ADDR,\n+\t/**< The operation failed to complete due invalid source address. */\n+\tRTE_DMA_STATUS_INVALID_DST_ADDR,\n+\t/**< The operation failed to complete due invalid destination\n+\t * address.\n+\t */\n+\tRTE_DMA_STATUS_INVALID_LENGTH,\n+\t/**< The operation failed to complete due invalid length. */\n+\tRTE_DMA_STATUS_INVALID_OPCODE,\n+\t/**< The operation failed to complete due invalid opcode.\n+\t * The DMA descriptor could have multiple format, which are\n+\t * distinguished by the opcode field.\n+\t */\n+\tRTE_DMA_STATUS_BUS_ERROR,\n+\t/**< The operation failed to complete due bus err. */\n+\tRTE_DMA_STATUS_DATA_POISION,\n+\t/**< The operation failed to complete due data poison. */\n+\tRTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,\n+\t/**< The operation failed to complete due descriptor read error. */\n+\tRTE_DMA_STATUS_DEV_LINK_ERROR,\n+\t/**< The operation failed to complete due device link error.\n+\t * Used to indicates that the link error in the memory-to-device/\n+\t * device-to-memory/device-to-device transfer scenario.\n+\t */\n+\tRTE_DMA_STATUS_UNKNOWN = 0x100,\n+\t/**< The operation failed to complete due unknown reason.\n+\t * The initial value is 256, which reserves space for future errors.\n+\t */\n+};\n+\n+/**\n+ * rte_dmadev_sge - can hold scatter DMA operation request entry.\n+ */\n+struct rte_dmadev_sge {\n+\trte_iova_t addr; /**< The DMA operation address. */\n+\tuint32_t length; /**< The DMA operation length. */\n+};\n+\n+#include \"rte_dmadev_core.h\"\n+\n+/* DMA flags to augment operation preparation. */\n+#define RTE_DMA_OP_FLAG_FENCE\t(1ull << 0)\n+/**< DMA fence flag.\n+ * It means the operation with this flag must be processed only after all\n+ * previous operations are completed.\n+ * If the specify DMA HW works in-order (it means it has default fence between\n+ * operations), this flag could be NOP.\n+ *\n+ * @see rte_dmadev_copy()\n+ * @see rte_dmadev_copy_sg()\n+ * @see rte_dmadev_fill()\n+ */\n+\n+#define RTE_DMA_OP_FLAG_SUBMIT\t(1ull << 1)\n+/**< DMA submit flag.\n+ * It means the operation with this flag must issue doorbell to hardware after\n+ * enqueued jobs.\n+ */\n+\n+#define RTE_DMA_OP_FLAG_LLC\t(1ull << 2)\n+/**< DMA write data to low level cache hint.\n+ * Used for performance optimization, this is just a hint, and there is no\n+ * capability bit for this, driver should not return error if this flag was set.\n+ */\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice.\n+ *\n+ * Enqueue a copy operation onto the virtual DMA channel.\n+ *\n+ * This queues up a copy operation to be performed by hardware, if the 'flags'\n+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin\n+ * this operation, otherwise do not trigger doorbell.\n+ *\n+ * @param dev_id\n+ *   The identifier of the device.\n+ * @param vchan\n+ *   The identifier of virtual DMA channel.\n+ * @param src\n+ *   The address of the source buffer.\n+ * @param dst\n+ *   The address of the destination buffer.\n+ * @param length\n+ *   The length of the data to be copied.\n+ * @param flags\n+ *   An flags for this operation.\n+ *   @see RTE_DMA_OP_FLAG_*\n+ *\n+ * @return\n+ *   - 0..UINT16_MAX: index of enqueued copy job.\n+ *   - <0: Error code returned by the driver copy function.\n+ */\n+__rte_experimental\n+static inline int\n+rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,\n+\t\tuint32_t length, uint64_t flags)\n+{\n+\tstruct rte_dmadev *dev = &rte_dmadevices[dev_id];\n+\n+#ifdef RTE_DMADEV_DEBUG\n+\tif (!rte_dmadev_is_valid_dev(dev_id) ||\n+\t    vchan >= dev->data->dev_conf.max_vchans ||\n+\t    src == NULL || dst == NULL || length == 0)\n+\t\treturn -EINVAL;\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);\n+#endif\n+\n+\treturn (*dev->copy)(dev, vchan, src, dst, length, flags);\n+}\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice.\n+ *\n+ * Enqueue a scatter list copy operation onto the virtual DMA channel.\n+ *\n+ * This queues up a scatter list copy operation to be performed by hardware, if\n+ * the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell\n+ * to begin this operation, otherwise do not trigger doorbell.\n+ *\n+ * @param dev_id\n+ *   The identifier of the device.\n+ * @param vchan\n+ *   The identifier of virtual DMA channel.\n+ * @param src\n+ *   The pointer of source scatter entry array.\n+ * @param dst\n+ *   The pointer of destination scatter entry array.\n+ * @param nb_src\n+ *   The number of source scatter entry.\n+ * @param nb_dst\n+ *   The number of destination scatter entry.\n+ * @param flags\n+ *   An flags for this operation.\n+ *   @see RTE_DMA_OP_FLAG_*\n+ *\n+ * @return\n+ *   - 0..UINT16_MAX: index of enqueued copy scatterlist job.\n+ *   - <0: Error code returned by the driver copy scatterlist function.\n+ */\n+__rte_experimental\n+static inline int\n+rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,\n+\t\t   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,\n+\t\t   uint64_t flags)\n+{\n+\tstruct rte_dmadev *dev = &rte_dmadevices[dev_id];\n+\n+#ifdef RTE_DMADEV_DEBUG\n+\tif (!rte_dmadev_is_valid_dev(dev_id) ||\n+\t    vchan >= dev->data->dev_conf.max_vchans ||\n+\t    src == NULL || dst == NULL || nb_src == 0 || nb_dst == 0)\n+\t\treturn -EINVAL;\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);\n+#endif\n+\n+\treturn (*dev->copy_sg)(dev, vchan, src, dst, nb_src, nb_dst, flags);\n+}\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice.\n+ *\n+ * Enqueue a fill operation onto the virtual DMA channel.\n+ *\n+ * This queues up a fill operation to be performed by hardware, if the 'flags'\n+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin\n+ * this operation, otherwise do not trigger doorbell.\n+ *\n+ * @param dev_id\n+ *   The identifier of the device.\n+ * @param vchan\n+ *   The identifier of virtual DMA channel.\n+ * @param pattern\n+ *   The pattern to populate the destination buffer with.\n+ * @param dst\n+ *   The address of the destination buffer.\n+ * @param length\n+ *   The length of the destination buffer.\n+ * @param flags\n+ *   An flags for this operation.\n+ *   @see RTE_DMA_OP_FLAG_*\n+ *\n+ * @return\n+ *   - 0..UINT16_MAX: index of enqueued fill job.\n+ *   - <0: Error code returned by the driver fill function.\n+ */\n+__rte_experimental\n+static inline int\n+rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,\n+\t\trte_iova_t dst, uint32_t length, uint64_t flags)\n+{\n+\tstruct rte_dmadev *dev = &rte_dmadevices[dev_id];\n+\n+#ifdef RTE_DMADEV_DEBUG\n+\tif (!rte_dmadev_is_valid_dev(dev_id) ||\n+\t    vchan >= dev->data->dev_conf.max_vchans ||\n+\t    dst == NULL || length == 0)\n+\t\treturn -EINVAL;\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);\n+#endif\n+\n+\treturn (*dev->fill)(dev, vchan, pattern, dst, length, flags);\n+}\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice.\n+ *\n+ * Trigger hardware to begin performing enqueued operations.\n+ *\n+ * This API is used to write the \"doorbell\" to the hardware to trigger it\n+ * to begin the operations previously enqueued by rte_dmadev_copy/fill().\n+ *\n+ * @param dev_id\n+ *   The identifier of the device.\n+ * @param vchan\n+ *   The identifier of virtual DMA channel.\n+ *\n+ * @return\n+ *   - =0: Successfully trigger hardware.\n+ *   - <0: Failure to trigger hardware.\n+ */\n+__rte_experimental\n+static inline int\n+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)\n+{\n+\tstruct rte_dmadev *dev = &rte_dmadevices[dev_id];\n+\n+#ifdef RTE_DMADEV_DEBUG\n+\tif (!rte_dmadev_is_valid_dev(dev_id) ||\n+\t    vchan >= dev->data->dev_conf.max_vchans)\n+\t\treturn -EINVAL;\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);\n+#endif\n+\n+\treturn (*dev->submit)(dev, vchan);\n+}\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice.\n+ *\n+ * Returns the number of operations that have been successfully completed.\n+ *\n+ * @param dev_id\n+ *   The identifier of the device.\n+ * @param vchan\n+ *   The identifier of virtual DMA channel.\n+ * @param nb_cpls\n+ *   The maximum number of completed operations that can be processed.\n+ * @param[out] last_idx\n+ *   The last completed operation's index.\n+ *   If not required, NULL can be passed in.\n+ * @param[out] has_error\n+ *   Indicates if there are transfer error.\n+ *   If not required, NULL can be passed in.\n+ *\n+ * @return\n+ *   The number of operations that successfully completed. This return value\n+ *   must be less than or equal to the value of nb_cpls.\n+ */\n+__rte_experimental\n+static inline uint16_t\n+rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,\n+\t\t     uint16_t *last_idx, bool *has_error)\n+{\n+\tstruct rte_dmadev *dev = &rte_dmadevices[dev_id];\n+\tuint16_t idx;\n+\tbool err;\n+\n+#ifdef RTE_DMADEV_DEBUG\n+\tif (!rte_dmadev_is_valid_dev(dev_id) ||\n+\t    vchan >= dev->data->dev_conf.max_vchans ||\n+\t    nb_cpls == 0)\n+\t\treturn 0;\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->completed, 0);\n+#endif\n+\n+\t/* Ensure the pointer values are non-null to simplify drivers.\n+\t * In most cases these should be compile time evaluated, since this is\n+\t * an inline function.\n+\t * - If NULL is explicitly passed as parameter, then compiler knows the\n+\t *   value is NULL\n+\t * - If address of local variable is passed as parameter, then compiler\n+\t *   can know it's non-NULL.\n+\t */\n+\tif (last_idx == NULL)\n+\t\tlast_idx = &idx;\n+\tif (has_error == NULL)\n+\t\thas_error = &err;\n+\n+\t*has_error = false;\n+\treturn (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);\n+}\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice.\n+ *\n+ * Returns the number of operations that have been completed, and the\n+ * operations result may succeed or fail.\n+ *\n+ * @param dev_id\n+ *   The identifier of the device.\n+ * @param vchan\n+ *   The identifier of virtual DMA channel.\n+ * @param nb_cpls\n+ *   Indicates the size of status array.\n+ * @param[out] last_idx\n+ *   The last completed operation's index.\n+ *   If not required, NULL can be passed in.\n+ * @param[out] status\n+ *   The error code of operations that completed.\n+ *   @see enum rte_dma_status_code\n+ *\n+ * @return\n+ *   The number of operations that completed. This return value must be less\n+ *   than or equal to the value of nb_cpls.\n+ */\n+__rte_experimental\n+static inline uint16_t\n+rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,\n+\t\t\t    const uint16_t nb_cpls, uint16_t *last_idx,\n+\t\t\t    enum rte_dma_status_code *status)\n+{\n+\tstruct rte_dmadev *dev = &rte_dmadevices[dev_id];\n+\tuint16_t idx;\n+\n+#ifdef RTE_DMADEV_DEBUG\n+\tif (!rte_dmadev_is_valid_dev(dev_id) ||\n+\t    vchan >= dev->data->dev_conf.max_vchans ||\n+\t    nb_cpls == 0 || status == NULL)\n+\t\treturn 0;\n+\tRTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);\n+#endif\n+\n+\tif (last_idx == NULL)\n+\t\tlast_idx = &idx;\n+\n+\treturn (*dev->completed_status)(dev, vchan, nb_cpls, last_idx, status);\n+}\n+\n+#ifdef __cplusplus\n+}\n+#endif\n+\n+#endif /* _RTE_DMADEV_H_ */\ndiff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h\nnew file mode 100644\nindex 0000000..0122f67\n--- /dev/null\n+++ b/lib/dmadev/rte_dmadev_core.h\n@@ -0,0 +1,182 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2021 HiSilicon Limited.\n+ * Copyright(c) 2021 Intel Corporation.\n+ */\n+\n+#ifndef _RTE_DMADEV_CORE_H_\n+#define _RTE_DMADEV_CORE_H_\n+\n+/**\n+ * @file\n+ *\n+ * RTE DMA Device internal header.\n+ *\n+ * This header contains internal data types, that are used by the DMA devices\n+ * in order to expose their ops to the class.\n+ *\n+ * Applications should not use these API directly.\n+ *\n+ */\n+\n+struct rte_dmadev;\n+\n+typedef int (*rte_dmadev_info_get_t)(const struct rte_dmadev *dev,\n+\t\t\t\t     struct rte_dmadev_info *dev_info,\n+\t\t\t\t     uint32_t info_sz);\n+/**< @internal Used to get device information of a device. */\n+\n+typedef int (*rte_dmadev_configure_t)(struct rte_dmadev *dev,\n+\t\t\t\t      const struct rte_dmadev_conf *dev_conf);\n+/**< @internal Used to configure a device. */\n+\n+typedef int (*rte_dmadev_start_t)(struct rte_dmadev *dev);\n+/**< @internal Used to start a configured device. */\n+\n+typedef int (*rte_dmadev_stop_t)(struct rte_dmadev *dev);\n+/**< @internal Used to stop a configured device. */\n+\n+typedef int (*rte_dmadev_close_t)(struct rte_dmadev *dev);\n+/**< @internal Used to close a configured device. */\n+\n+typedef int (*rte_dmadev_vchan_setup_t)(struct rte_dmadev *dev,\n+\t\t\t\tconst struct rte_dmadev_vchan_conf *conf);\n+/**< @internal Used to allocate and set up a virtual DMA channel. */\n+\n+typedef int (*rte_dmadev_stats_get_t)(const struct rte_dmadev *dev,\n+\t\t\tuint16_t vchan, struct rte_dmadev_stats *stats,\n+\t\t\tuint32_t stats_sz);\n+/**< @internal Used to retrieve basic statistics. */\n+\n+typedef int (*rte_dmadev_stats_reset_t)(struct rte_dmadev *dev, uint16_t vchan);\n+/**< @internal Used to reset basic statistics. */\n+\n+typedef int (*rte_dmadev_dump_t)(const struct rte_dmadev *dev, FILE *f);\n+/**< @internal Used to dump internal information. */\n+\n+typedef int (*rte_dmadev_selftest_t)(uint16_t dev_id);\n+/**< @internal Used to start dmadev selftest. */\n+\n+typedef int (*rte_dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vchan,\n+\t\t\t\t rte_iova_t src, rte_iova_t dst,\n+\t\t\t\t uint32_t length, uint64_t flags);\n+/**< @internal Used to enqueue a copy operation. */\n+\n+typedef int (*rte_dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vchan,\n+\t\t\t\t    const struct rte_dmadev_sge *src,\n+\t\t\t\t    const struct rte_dmadev_sge *dst,\n+\t\t\t\t    uint16_t nb_src, uint16_t nb_dst,\n+\t\t\t\t    uint64_t flags);\n+/**< @internal Used to enqueue a scatter list copy operation. */\n+\n+typedef int (*rte_dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vchan,\n+\t\t\t\t uint64_t pattern, rte_iova_t dst,\n+\t\t\t\t uint32_t length, uint64_t flags);\n+/**< @internal Used to enqueue a fill operation. */\n+\n+typedef int (*rte_dmadev_submit_t)(struct rte_dmadev *dev, uint16_t vchan);\n+/**< @internal Used to trigger hardware to begin working. */\n+\n+typedef uint16_t (*rte_dmadev_completed_t)(struct rte_dmadev *dev,\n+\t\t\t\tuint16_t vchan, const uint16_t nb_cpls,\n+\t\t\t\tuint16_t *last_idx, bool *has_error);\n+/**< @internal Used to return number of successful completed operations. */\n+\n+typedef uint16_t (*rte_dmadev_completed_status_t)(struct rte_dmadev *dev,\n+\t\t\tuint16_t vchan, const uint16_t nb_cpls,\n+\t\t\tuint16_t *last_idx, enum rte_dma_status_code *status);\n+/**< @internal Used to return number of completed operations. */\n+\n+/**\n+ * Possible states of a DMA device.\n+ */\n+enum rte_dmadev_state {\n+\tRTE_DMADEV_UNUSED = 0,\n+\t/**< Device is unused before being probed. */\n+\tRTE_DMADEV_ATTACHED,\n+\t/**< Device is attached when allocated in probing. */\n+};\n+\n+/**\n+ * DMA device operations function pointer table\n+ */\n+struct rte_dmadev_ops {\n+\trte_dmadev_info_get_t dev_info_get;\n+\trte_dmadev_configure_t dev_configure;\n+\trte_dmadev_start_t dev_start;\n+\trte_dmadev_stop_t dev_stop;\n+\trte_dmadev_close_t dev_close;\n+\trte_dmadev_vchan_setup_t vchan_setup;\n+\trte_dmadev_stats_get_t stats_get;\n+\trte_dmadev_stats_reset_t stats_reset;\n+\trte_dmadev_dump_t dev_dump;\n+\trte_dmadev_selftest_t dev_selftest;\n+};\n+\n+/**\n+ * @internal\n+ * The data part, with no function pointers, associated with each DMA device.\n+ *\n+ * This structure is safe to place in shared memory to be common among different\n+ * processes in a multi-process configuration.\n+ */\n+struct rte_dmadev_data {\n+\tvoid *dev_private;\n+\t/**< PMD-specific private data.\n+\t * This is a copy of the 'dev_private' field in the 'struct rte_dmadev'\n+\t * from primary process, it is used by the secondary process to get\n+\t * dev_private information.\n+\t */\n+\tuint16_t dev_id; /**< Device [external] identifier. */\n+\tchar dev_name[RTE_DMADEV_NAME_MAX_LEN]; /**< Unique identifier name */\n+\tstruct rte_dmadev_conf dev_conf; /**< DMA device configuration. */\n+\tuint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */\n+\tuint64_t reserved[2]; /**< Reserved for future fields */\n+} __rte_cache_aligned;\n+\n+/**\n+ * @internal\n+ * The generic data structure associated with each DMA device.\n+ *\n+ * The dataplane APIs are located at the beginning of the structure, along\n+ * with the pointer to where all the data elements for the particular device\n+ * are stored in shared memory. This split scheme allows the function pointer\n+ * and driver data to be per-process, while the actual configuration data for\n+ * the device is shared.\n+ * And the 'dev_private' field was placed in the first cache line to optimize\n+ * performance because the PMD driver mainly depends on this field.\n+ */\n+struct rte_dmadev {\n+\trte_dmadev_copy_t copy;\n+\trte_dmadev_copy_sg_t copy_sg;\n+\trte_dmadev_fill_t fill;\n+\trte_dmadev_submit_t submit;\n+\trte_dmadev_completed_t completed;\n+\trte_dmadev_completed_status_t completed_status;\n+\tvoid *reserved_ptr; /**< Reserved for future IO function. */\n+\tvoid *dev_private;\n+\t/**< PMD-specific private data.\n+\t *\n+\t * - If is the primary process, after dmadev allocated by\n+\t * rte_dmadev_pmd_allocate(), the PCI/SoC device probing should\n+\t * initialize this field, and copy it's value to the 'dev_private'\n+\t * field of 'struct rte_dmadev_data' which pointer by 'data' filed.\n+\t *\n+\t * - If is the secondary process, dmadev framework will initialize this\n+\t * field by copy from 'dev_private' field of 'struct rte_dmadev_data'\n+\t * which initialized by primary process.\n+\t *\n+\t * @note It's the primary process responsibility to deinitialize this\n+\t * field after invoke rte_dmadev_pmd_release() in the PCI/SoC device\n+\t * removing stage.\n+\t */\n+\tstruct rte_dmadev_data *data; /**< Pointer to device data. */\n+\tconst struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */\n+\tstruct rte_device *device;\n+\t/**< Device info which supplied during device initialization. */\n+\tenum rte_dmadev_state state; /**< Flag indicating the device state. */\n+\tuint64_t reserved[2]; /**< Reserved for future fields. */\n+} __rte_cache_aligned;\n+\n+extern struct rte_dmadev rte_dmadevices[];\n+\n+#endif /* _RTE_DMADEV_CORE_H_ */\ndiff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h\nnew file mode 100644\nindex 0000000..45141f9\n--- /dev/null\n+++ b/lib/dmadev/rte_dmadev_pmd.h\n@@ -0,0 +1,72 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2021 HiSilicon Limited.\n+ */\n+\n+#ifndef _RTE_DMADEV_PMD_H_\n+#define _RTE_DMADEV_PMD_H_\n+\n+/**\n+ * @file\n+ *\n+ * RTE DMA Device PMD APIs\n+ *\n+ * Driver facing APIs for a DMA device. These are not to be called directly by\n+ * any application.\n+ */\n+\n+#include \"rte_dmadev.h\"\n+\n+#ifdef __cplusplus\n+extern \"C\" {\n+#endif\n+\n+/**\n+ * @internal\n+ * Allocates a new dmadev slot for an DMA device and returns the pointer\n+ * to that slot for the driver to use.\n+ *\n+ * @param name\n+ *   DMA device name.\n+ *\n+ * @return\n+ *   A pointer to the DMA device slot case of success,\n+ *   NULL otherwise.\n+ */\n+__rte_internal\n+struct rte_dmadev *\n+rte_dmadev_pmd_allocate(const char *name);\n+\n+/**\n+ * @internal\n+ * Release the specified dmadev.\n+ *\n+ * @param dev\n+ *   Device to be released.\n+ *\n+ * @return\n+ *   - 0 on success, negative on error\n+ */\n+__rte_internal\n+int\n+rte_dmadev_pmd_release(struct rte_dmadev *dev);\n+\n+/**\n+ * @internal\n+ * Return the DMA device based on the device name.\n+ *\n+ * @param name\n+ *   DMA device name.\n+ *\n+ * @return\n+ *   A pointer to the DMA device slot case of success,\n+ *   NULL otherwise.\n+ */\n+__rte_internal\n+struct rte_dmadev *\n+rte_dmadev_get_device_by_name(const char *name);\n+\n+#ifdef __cplusplus\n+}\n+#endif\n+\n+#endif /* _RTE_DMADEV_PMD_H_ */\ndiff --git a/lib/dmadev/version.map b/lib/dmadev/version.map\nnew file mode 100644\nindex 0000000..0f2ed4b\n--- /dev/null\n+++ b/lib/dmadev/version.map\n@@ -0,0 +1,37 @@\n+EXPERIMENTAL {\n+\tglobal:\n+\n+\trte_dmadev_close;\n+\trte_dmadev_completed;\n+\trte_dmadev_completed_status;\n+\trte_dmadev_configure;\n+\trte_dmadev_copy;\n+\trte_dmadev_copy_sg;\n+\trte_dmadev_count;\n+\trte_dmadev_dump;\n+\trte_dmadev_fill;\n+\trte_dmadev_get_dev_id;\n+\trte_dmadev_info_get;\n+\trte_dmadev_is_valid_dev;\n+\trte_dmadev_selftest;\n+\trte_dmadev_start;\n+\trte_dmadev_stats_get;\n+\trte_dmadev_stats_reset;\n+\trte_dmadev_stop;\n+\trte_dmadev_submit;\n+\trte_dmadev_vchan_setup;\n+\n+\tlocal: *;\n+};\n+\n+INTERNAL {\n+        global:\n+\n+\trte_dmadevices;\n+\trte_dmadev_get_device_by_name;\n+\trte_dmadev_pmd_allocate;\n+\trte_dmadev_pmd_release;\n+\n+\tlocal: *;\n+};\n+\ndiff --git a/lib/meson.build b/lib/meson.build\nindex 1673ca4..68d239f 100644\n--- a/lib/meson.build\n+++ b/lib/meson.build\n@@ -60,6 +60,7 @@ libraries = [\n         'bpf',\n         'graph',\n         'node',\n+        'dmadev',\n ]\n \n if is_windows\n",
    "prefixes": [
        "v7"
    ]
}