@@ -81,6 +81,9 @@
/* rawdev defines */
#define RTE_RAWDEV_MAX_DEVS 64
+/* dmadev defines */
+#define RTE_DMADEV_MAX_DEVS 64
+
/* ip_fragmentation defines */
#define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
#undef RTE_LIBRTE_IP_FRAG_TBL_STAT
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2021 HiSilicon Limited.
+sources = files('rte_dmadev.c')
headers = files('rte_dmadev.h')
indirect_headers += files('rte_dmadev_core.h')
driver_sdk_headers += files('rte_dmadev_pmd.h')
new file mode 100644
@@ -0,0 +1,607 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <inttypes.h>
+
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_memzone.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
+
+static const char *mz_rte_dmadev_data = "rte_dmadev_data";
+/* Shared memory between primary and secondary processes. */
+static struct {
+ struct rte_dmadev_data data[RTE_DMADEV_MAX_DEVS];
+} *dmadev_shared_data;
+
+RTE_LOG_REGISTER_DEFAULT(rte_dmadev_logtype, INFO);
+#define RTE_DMADEV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+/* Macros to check for valid device id */
+#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
+ if (!rte_dmadev_is_valid_dev(dev_id)) { \
+ RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u", dev_id); \
+ return retval; \
+ } \
+} while (0)
+
+static int
+dmadev_check_name(const char *name)
+{
+ size_t name_len;
+
+ if (name == NULL) {
+ RTE_DMADEV_LOG(ERR, "Name can't be NULL");
+ return -EINVAL;
+ }
+
+ name_len = strnlen(name, RTE_DMADEV_NAME_MAX_LEN);
+ if (name_len == 0) {
+ RTE_DMADEV_LOG(ERR, "Zero length DMA device name");
+ return -EINVAL;
+ }
+ if (name_len >= RTE_DMADEV_NAME_MAX_LEN) {
+ RTE_DMADEV_LOG(ERR, "DMA device name is too long");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static uint16_t
+dmadev_find_free_dev(void)
+{
+ uint16_t i;
+
+ for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+ if (dmadev_shared_data->data[i].dev_name[0] == '\0')
+ return i;
+ }
+
+ return RTE_DMADEV_MAX_DEVS;
+}
+
+static struct rte_dmadev*
+dmadev_find(const char *name)
+{
+ uint16_t i;
+
+ for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+ if ((rte_dmadevices[i].state == RTE_DMADEV_ATTACHED) &&
+ (!strcmp(name, rte_dmadevices[i].data->dev_name)))
+ return &rte_dmadevices[i];
+ }
+
+ return NULL;
+}
+
+static int
+dmadev_shared_data_prepare(void)
+{
+ const struct rte_memzone *mz;
+
+ if (dmadev_shared_data == NULL) {
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ /* Allocate port data and ownership shared memory. */
+ mz = rte_memzone_reserve(mz_rte_dmadev_data,
+ sizeof(*dmadev_shared_data),
+ rte_socket_id(), 0);
+ } else
+ mz = rte_memzone_lookup(mz_rte_dmadev_data);
+ if (mz == NULL)
+ return -ENOMEM;
+
+ dmadev_shared_data = mz->addr;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ memset(dmadev_shared_data->data, 0,
+ sizeof(dmadev_shared_data->data));
+ }
+
+ return 0;
+}
+
+static struct rte_dmadev *
+dmadev_allocate(const char *name)
+{
+ struct rte_dmadev *dev;
+ uint16_t dev_id;
+
+ dev = dmadev_find(name);
+ if (dev != NULL) {
+ RTE_DMADEV_LOG(ERR, "DMA device already allocated");
+ return NULL;
+ }
+
+ if (dmadev_shared_data_prepare() != 0) {
+ RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data");
+ return NULL;
+ }
+
+ dev_id = dmadev_find_free_dev();
+ if (dev_id == RTE_DMADEV_MAX_DEVS) {
+ RTE_DMADEV_LOG(ERR, "Reached maximum number of DMA devices");
+ return NULL;
+ }
+
+ dev = &rte_dmadevices[dev_id];
+ dev->data = &dmadev_shared_data->data[dev_id];
+ dev->data->dev_id = dev_id;
+ rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+
+ return dev;
+}
+
+static struct rte_dmadev *
+dmadev_attach_secondary(const char *name)
+{
+ struct rte_dmadev *dev;
+ uint16_t i;
+
+ if (dmadev_shared_data_prepare() != 0) {
+ RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data");
+ return NULL;
+ }
+
+ for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+ if (!strcmp(dmadev_shared_data->data[i].dev_name, name))
+ break;
+ }
+ if (i == RTE_DMADEV_MAX_DEVS) {
+ RTE_DMADEV_LOG(ERR,
+ "Device %s is not driven by the primary process",
+ name);
+ return NULL;
+ }
+
+ dev = &rte_dmadevices[i];
+ dev->data = &dmadev_shared_data->data[i];
+ dev->dev_private = dev->data->dev_private;
+
+ return dev;
+}
+
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name)
+{
+ struct rte_dmadev *dev;
+
+ if (dmadev_check_name(name) != 0)
+ return NULL;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ dev = dmadev_allocate(name);
+ else
+ dev = dmadev_attach_secondary(name);
+
+ if (dev == NULL)
+ return NULL;
+ dev->state = RTE_DMADEV_ATTACHED;
+
+ return dev;
+}
+
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev)
+{
+ void *dev_private_tmp;
+
+ if (dev == NULL)
+ return -EINVAL;
+
+ if (dev->state == RTE_DMADEV_UNUSED)
+ return 0;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ memset(dev->data, 0, sizeof(struct rte_dmadev_data));
+
+ dev_private_tmp = dev->dev_private;
+ memset(dev, 0, sizeof(struct rte_dmadev));
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ dev->dev_private = dev_private_tmp;
+ dev->state = RTE_DMADEV_UNUSED;
+
+ return 0;
+}
+
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name)
+{
+ if (dmadev_check_name(name) != 0)
+ return NULL;
+ return dmadev_find(name);
+}
+
+int
+rte_dmadev_get_dev_id(const char *name)
+{
+ struct rte_dmadev *dev = rte_dmadev_get_device_by_name(name);
+ if (dev != NULL)
+ return dev->data->dev_id;
+ return -EINVAL;
+}
+
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id)
+{
+ return (dev_id < RTE_DMADEV_MAX_DEVS) &&
+ rte_dmadevices[dev_id].state == RTE_DMADEV_ATTACHED;
+}
+
+uint16_t
+rte_dmadev_count(void)
+{
+ uint16_t count = 0;
+ uint16_t i;
+
+ for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+ if (rte_dmadevices[i].state == RTE_DMADEV_ATTACHED)
+ count++;
+ }
+
+ return count;
+}
+
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
+{
+ const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+ int ret;
+
+ RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+ if (dev_info == NULL)
+ return -EINVAL;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+ memset(dev_info, 0, sizeof(struct rte_dmadev_info));
+ ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+ sizeof(struct rte_dmadev_info));
+ if (ret != 0)
+ return ret;
+
+ dev_info->device = dev->device;
+ dev_info->nb_vchans = dev->data->dev_conf.nb_vchans;
+
+ return 0;
+}
+
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
+{
+ struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+ struct rte_dmadev_info info;
+ int ret;
+
+ RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+ if (dev_conf == NULL)
+ return -EINVAL;
+
+ if (dev->data->dev_started != 0) {
+ RTE_DMADEV_LOG(ERR,
+ "Device %u must be stopped to allow configuration",
+ dev_id);
+ return -EBUSY;
+ }
+
+ ret = rte_dmadev_info_get(dev_id, &info);
+ if (ret != 0) {
+ RTE_DMADEV_LOG(ERR, "Device %u get device info fail", dev_id);
+ return -EINVAL;
+ }
+ if (dev_conf->nb_vchans == 0) {
+ RTE_DMADEV_LOG(ERR,
+ "Device %u configure zero vchans", dev_id);
+ return -EINVAL;
+ }
+ if (dev_conf->nb_vchans > info.max_vchans) {
+ RTE_DMADEV_LOG(ERR,
+ "Device %u configure too many vchans", dev_id);
+ return -EINVAL;
+ }
+ if (dev_conf->enable_silent &&
+ !(info.dev_capa & RTE_DMADEV_CAPA_SILENT)) {
+ RTE_DMADEV_LOG(ERR, "Device %u don't support silent", dev_id);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+ ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
+ sizeof(struct rte_dmadev_conf));
+ if (ret == 0)
+ memcpy(&dev->data->dev_conf, dev_conf,
+ sizeof(struct rte_dmadev_conf));
+
+ return ret;
+}
+
+int
+rte_dmadev_start(uint16_t dev_id)
+{
+ struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+ int ret;
+
+ RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+ if (dev->data->dev_conf.nb_vchans == 0) {
+ RTE_DMADEV_LOG(ERR, "Device %u must be configured first",
+ dev_id);
+ return -EINVAL;
+ }
+
+ if (dev->data->dev_started != 0) {
+ RTE_DMADEV_LOG(WARNING, "Device %u already started", dev_id);
+ return 0;
+ }
+
+ if (dev->dev_ops->dev_start == NULL)
+ goto mark_started;
+
+ ret = (*dev->dev_ops->dev_start)(dev);
+ if (ret != 0)
+ return ret;
+
+mark_started:
+ dev->data->dev_started = 1;
+ return 0;
+}
+
+int
+rte_dmadev_stop(uint16_t dev_id)
+{
+ struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+ int ret;
+
+ RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+ if (dev->data->dev_started == 0) {
+ RTE_DMADEV_LOG(WARNING, "Device %u already stopped", dev_id);
+ return 0;
+ }
+
+ if (dev->dev_ops->dev_stop == NULL)
+ goto mark_stopped;
+
+ ret = (*dev->dev_ops->dev_stop)(dev);
+ if (ret != 0)
+ return ret;
+
+mark_stopped:
+ dev->data->dev_started = 0;
+ return 0;
+}
+
+int
+rte_dmadev_close(uint16_t dev_id)
+{
+ struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+ RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+ /* Device must be stopped before it can be closed */
+ if (dev->data->dev_started == 1) {
+ RTE_DMADEV_LOG(ERR,
+ "Device %u must be stopped before closing", dev_id);
+ return -EBUSY;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+ return (*dev->dev_ops->dev_close)(dev);
+}
+
+int
+rte_dmadev_vchan_setup(uint16_t dev_id, uint16_t vchan,
+ const struct rte_dmadev_vchan_conf *conf)
+{
+ struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+ struct rte_dmadev_info info;
+ bool src_is_dev, dst_is_dev;
+ int ret;
+
+ RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+ if (conf == NULL)
+ return -EINVAL;
+
+ if (dev->data->dev_started != 0) {
+ RTE_DMADEV_LOG(ERR,
+ "Device %u must be stopped to allow configuration",
+ dev_id);
+ return -EBUSY;
+ }
+
+ ret = rte_dmadev_info_get(dev_id, &info);
+ if (ret != 0) {
+ RTE_DMADEV_LOG(ERR, "Device %u get device info fail", dev_id);
+ return -EINVAL;
+ }
+ if (dev->data->dev_conf.nb_vchans == 0) {
+ RTE_DMADEV_LOG(ERR, "Device %u must be configured first",
+ dev_id);
+ return -EINVAL;
+ }
+ if (vchan >= info.nb_vchans) {
+ RTE_DMADEV_LOG(ERR, "Device %u vchan out range!", dev_id);
+ return -EINVAL;
+ }
+ if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
+ conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
+ conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
+ conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
+ RTE_DMADEV_LOG(ERR, "Device %u direction invalid!", dev_id);
+ return -EINVAL;
+ }
+ if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
+ !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_MEM)) {
+ RTE_DMADEV_LOG(ERR,
+ "Device %u don't support mem2mem transfer", dev_id);
+ return -EINVAL;
+ }
+ if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
+ !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_DEV)) {
+ RTE_DMADEV_LOG(ERR,
+ "Device %u don't support mem2dev transfer", dev_id);
+ return -EINVAL;
+ }
+ if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
+ !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_MEM)) {
+ RTE_DMADEV_LOG(ERR,
+ "Device %u don't support dev2mem transfer", dev_id);
+ return -EINVAL;
+ }
+ if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
+ !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_DEV)) {
+ RTE_DMADEV_LOG(ERR,
+ "Device %u don't support dev2dev transfer", dev_id);
+ return -EINVAL;
+ }
+ if (conf->nb_desc < info.min_desc || conf->nb_desc > info.max_desc) {
+ RTE_DMADEV_LOG(ERR,
+ "Device %u number of descriptors invalid", dev_id);
+ return -EINVAL;
+ }
+ src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+ conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+ if ((conf->src_port.port_type == RTE_DMADEV_PORT_NONE && src_is_dev) ||
+ (conf->src_port.port_type != RTE_DMADEV_PORT_NONE && !src_is_dev)) {
+ RTE_DMADEV_LOG(ERR,
+ "Device %u source port type invalid", dev_id);
+ return -EINVAL;
+ }
+ dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+ conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+ if ((conf->dst_port.port_type == RTE_DMADEV_PORT_NONE && dst_is_dev) ||
+ (conf->dst_port.port_type != RTE_DMADEV_PORT_NONE && !dst_is_dev)) {
+ RTE_DMADEV_LOG(ERR,
+ "Device %u destination port type invalid", dev_id);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+ return (*dev->dev_ops->vchan_setup)(dev, vchan, conf,
+ sizeof(struct rte_dmadev_vchan_conf));
+}
+
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+ struct rte_dmadev_stats *stats)
+{
+ const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+ RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+ if (stats == NULL)
+ return -EINVAL;
+ if (vchan >= dev->data->dev_conf.nb_vchans &&
+ vchan != RTE_DMADEV_ALL_VCHAN) {
+ RTE_DMADEV_LOG(ERR,
+ "Device %u vchan %u out of range", dev_id, vchan);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+ memset(stats, 0, sizeof(struct rte_dmadev_stats));
+ return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+ sizeof(struct rte_dmadev_stats));
+}
+
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)
+{
+ struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+ RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+ if (vchan >= dev->data->dev_conf.nb_vchans &&
+ vchan != RTE_DMADEV_ALL_VCHAN) {
+ RTE_DMADEV_LOG(ERR,
+ "Device %u vchan %u out of range", dev_id, vchan);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+ return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+static const char *
+dmadev_capability_name(uint64_t capability)
+{
+ static const struct {
+ uint64_t capability;
+ const char *name;
+ } capa_names[] = {
+ { RTE_DMADEV_CAPA_MEM_TO_MEM, "mem2mem" },
+ { RTE_DMADEV_CAPA_MEM_TO_DEV, "mem2dev" },
+ { RTE_DMADEV_CAPA_DEV_TO_MEM, "dev2mem" },
+ { RTE_DMADEV_CAPA_DEV_TO_DEV, "dev2dev" },
+ { RTE_DMADEV_CAPA_SVA, "sva" },
+ { RTE_DMADEV_CAPA_SILENT, "silent" },
+ { RTE_DMADEV_CAPA_OPS_COPY, "copy" },
+ { RTE_DMADEV_CAPA_OPS_COPY_SG, "copy_sg" },
+ { RTE_DMADEV_CAPA_OPS_FILL, "fill" },
+ };
+
+ const char *name = "unknown";
+ uint32_t i;
+
+ for (i = 0; i < RTE_DIM(capa_names); i++) {
+ if (capability == capa_names[i].capability) {
+ name = capa_names[i].name;
+ break;
+ }
+ }
+
+ return name;
+}
+
+static void
+dmadev_dump_capability(FILE *f, uint64_t dev_capa)
+{
+ uint64_t capa;
+
+ fprintf(f, " dev_capa: 0x%" PRIx64 " -", dev_capa);
+ while (dev_capa > 0) {
+ capa = 1ull << __builtin_ctzll(dev_capa);
+ fprintf(f, " %s", dmadev_capability_name(capa));
+ dev_capa &= ~capa;
+ }
+ fprintf(f, "\n");
+}
+
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f)
+{
+ const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+ struct rte_dmadev_info info;
+ int ret;
+
+ RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+ if (f == NULL)
+ return -EINVAL;
+
+ ret = rte_dmadev_info_get(dev_id, &info);
+ if (ret != 0) {
+ RTE_DMADEV_LOG(ERR, "Device %u get device info fail", dev_id);
+ return -EINVAL;
+ }
+
+ fprintf(f, "DMA Dev %u, '%s' [%s]\n",
+ dev->data->dev_id,
+ dev->data->dev_name,
+ dev->data->dev_started ? "started" : "stopped");
+ dmadev_dump_capability(f, info.dev_capa);
+ fprintf(f, " max_vchans_supported: %u\n", info.max_vchans);
+ fprintf(f, " nb_vchans_configured: %u\n", info.nb_vchans);
+ fprintf(f, " silent_mode: %s\n",
+ dev->data->dev_conf.enable_silent ? "on" : "off");
+
+ if (dev->dev_ops->dev_dump != NULL)
+ return (*dev->dev_ops->dev_dump)(dev, f);
+
+ return 0;
+}
@@ -787,9 +787,21 @@ struct rte_dmadev_sge {
* - other values < 0 on failure.
*/
__rte_experimental
-int
+static inline int
rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
- uint32_t length, uint64_t flags);
+ uint32_t length, uint64_t flags)
+{
+ struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+ if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+ vchan >= dev->data->dev_conf.nb_vchans || length == 0)
+ return -EINVAL;
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
+#endif
+
+ return (*dev->copy)(dev, vchan, src, dst, length, flags);
+}
/**
* @warning
@@ -825,10 +837,23 @@ rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
* - other values < 0 on failure.
*/
__rte_experimental
-int
+static inline int
rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
- uint64_t flags);
+ uint64_t flags)
+{
+ struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+ if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+ vchan >= dev->data->dev_conf.nb_vchans ||
+ src == NULL || dst == NULL || nb_src == 0 || nb_dst == 0)
+ return -EINVAL;
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
+#endif
+
+ return (*dev->copy_sg)(dev, vchan, src, dst, nb_src, nb_dst, flags);
+}
/**
* @warning
@@ -860,9 +885,21 @@ rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
* - other values < 0 on failure.
*/
__rte_experimental
-int
+static inline int
rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
- rte_iova_t dst, uint32_t length, uint64_t flags);
+ rte_iova_t dst, uint32_t length, uint64_t flags)
+{
+ struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+ if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+ vchan >= dev->data->dev_conf.nb_vchans || length == 0)
+ return -EINVAL;
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
+#endif
+
+ return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
+}
/**
* @warning
@@ -882,8 +919,20 @@ rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
* 0 on success. Otherwise negative value is returned.
*/
__rte_experimental
-int
-rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
+static inline int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
+{
+ struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+ if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+ vchan >= dev->data->dev_conf.nb_vchans)
+ return -EINVAL;
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
+#endif
+
+ return (*dev->submit)(dev, vchan);
+}
/**
* @warning
@@ -909,9 +958,37 @@ rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
* must be less than or equal to the value of nb_cpls.
*/
__rte_experimental
-uint16_t
+static inline uint16_t
rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
- uint16_t *last_idx, bool *has_error);
+ uint16_t *last_idx, bool *has_error)
+{
+ struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+ uint16_t idx;
+ bool err;
+
+#ifdef RTE_DMADEV_DEBUG
+ if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+ vchan >= dev->data->dev_conf.nb_vchans || nb_cpls == 0)
+ return 0;
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, 0);
+#endif
+
+ /* Ensure the pointer values are non-null to simplify drivers.
+ * In most cases these should be compile time evaluated, since this is
+ * an inline function.
+ * - If NULL is explicitly passed as parameter, then compiler knows the
+ * value is NULL
+ * - If address of local variable is passed as parameter, then compiler
+ * can know it's non-NULL.
+ */
+ if (last_idx == NULL)
+ last_idx = &idx;
+ if (has_error == NULL)
+ has_error = &err;
+
+ *has_error = false;
+ return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
+}
/**
* @warning
@@ -941,10 +1018,27 @@ rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
* status array are also set.
*/
__rte_experimental
-uint16_t
+static inline uint16_t
rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
const uint16_t nb_cpls, uint16_t *last_idx,
- enum rte_dma_status_code *status);
+ enum rte_dma_status_code *status)
+{
+ struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+ uint16_t idx;
+
+#ifdef RTE_DMADEV_DEBUG
+ if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+ vchan >= dev->data->dev_conf.nb_vchans ||
+ nb_cpls == 0 || status == NULL)
+ return 0;
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
+#endif
+
+ if (last_idx == NULL)
+ last_idx = &idx;
+
+ return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx, status);
+}
#ifdef __cplusplus
}
@@ -182,4 +182,6 @@ struct rte_dmadev {
uint64_t reserved[2]; /**< Reserved for future fields. */
} __rte_cache_aligned;
+extern struct rte_dmadev rte_dmadevices[];
+
#endif /* _RTE_DMADEV_CORE_H_ */
@@ -26,6 +26,7 @@ EXPERIMENTAL {
INTERNAL {
global:
+ rte_dmadevices;
rte_dmadev_get_device_by_name;
rte_dmadev_pmd_allocate;
rte_dmadev_pmd_release;