From patchwork Fri Jan 26 08:57:25 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: fengchengwen X-Patchwork-Id: 136180 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 0941D439CF; Fri, 26 Jan 2024 10:01:34 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 2F02842E28; Fri, 26 Jan 2024 10:01:24 +0100 (CET) Received: from szxga02-in.huawei.com (szxga02-in.huawei.com [45.249.212.188]) by mails.dpdk.org (Postfix) with ESMTP id 1B2F240289 for ; Fri, 26 Jan 2024 10:01:19 +0100 (CET) Received: from mail.maildlp.com (unknown [172.19.163.174]) by szxga02-in.huawei.com (SkyGuard) with ESMTP id 4TLs8f3YV5zXgcZ; Fri, 26 Jan 2024 17:00:02 +0800 (CST) Received: from dggpeml500024.china.huawei.com (unknown [7.185.36.10]) by mail.maildlp.com (Postfix) with ESMTPS id AF67C1400D6; Fri, 26 Jan 2024 17:01:17 +0800 (CST) Received: from localhost.localdomain (10.50.165.33) by dggpeml500024.china.huawei.com (7.185.36.10) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2507.35; Fri, 26 Jan 2024 17:01:17 +0800 From: Chengwen Feng To: , CC: , Subject: [PATCH 1/2] dma/skeleton: support SG copy ops Date: Fri, 26 Jan 2024 08:57:25 +0000 Message-ID: <20240126085726.54581-2-fengchengwen@huawei.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20240126085726.54581-1-fengchengwen@huawei.com> References: <20240126085726.54581-1-fengchengwen@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.50.165.33] X-ClientProxiedBy: dggems701-chm.china.huawei.com (10.3.19.178) To dggpeml500024.china.huawei.com (7.185.36.10) X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add support scatter gather copy. Signed-off-by: Chengwen Feng --- drivers/dma/skeleton/skeleton_dmadev.c | 96 ++++++++++++++++++++++++-- drivers/dma/skeleton/skeleton_dmadev.h | 28 ++++++-- 2 files changed, 113 insertions(+), 11 deletions(-) diff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c index eab03852dd..d1d257a064 100644 --- a/drivers/dma/skeleton/skeleton_dmadev.c +++ b/drivers/dma/skeleton/skeleton_dmadev.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2021 HiSilicon Limited + * Copyright(c) 2021-2024 HiSilicon Limited */ #include @@ -37,10 +37,12 @@ skeldma_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info, dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM | RTE_DMA_CAPA_SVA | - RTE_DMA_CAPA_OPS_COPY; + RTE_DMA_CAPA_OPS_COPY | + RTE_DMA_CAPA_OPS_COPY_SG; dev_info->max_vchans = 1; dev_info->max_desc = SKELDMA_MAX_DESC; dev_info->min_desc = SKELDMA_MIN_DESC; + dev_info->max_sges = SKELDMA_MAX_SGES; return 0; } @@ -55,6 +57,49 @@ skeldma_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *conf, return 0; } +static inline void +do_copy_sg_one(struct rte_dma_sge *src, struct rte_dma_sge *dst, uint16_t nb_dst, uint64_t offset) +{ + uint32_t src_off = 0, dst_off = 0; + uint32_t copy_len = 0; + uint64_t tmp = 0; + uint16_t i; + + /* Locate the segment from which the copy is started. */ + for (i = 0; i < nb_dst; i++) { + tmp += dst[i].length; + if (offset < tmp) { + copy_len = tmp - offset; + dst_off = dst[i].length - copy_len; + break; + } + } + + for (/* Use the above index */; i < nb_dst; i++, copy_len = dst[i].length) { + copy_len = RTE_MIN(copy_len, src->length - src_off); + rte_memcpy((uint8_t *)(uintptr_t)dst[i].addr + dst_off, + (uint8_t *)(uintptr_t)src->addr + src_off, + copy_len); + src_off += copy_len; + if (src_off >= src->length) + break; + dst_off = 0; + } +} + +static inline void +do_copy_sg(struct skeldma_desc *desc) +{ + uint64_t offset = 0; + uint16_t i; + + for (i = 0; i < desc->copy_sg.nb_src; i++) { + do_copy_sg_one(&desc->copy_sg.src[i], desc->copy_sg.dst, + desc->copy_sg.nb_dst, offset); + offset += desc->copy_sg.src[i].length; + } +} + static uint32_t cpucopy_thread(void *param) { @@ -76,9 +121,13 @@ cpucopy_thread(void *param) rte_delay_us_sleep(SLEEP_US_VAL); continue; } - hw->zero_req_count = 0; - rte_memcpy(desc->dst, desc->src, desc->len); + + if (desc->op == SKELDMA_OP_COPY) + rte_memcpy(desc->copy.dst, desc->copy.src, desc->copy.len); + else if (desc->op == SKELDMA_OP_COPY_SG) + do_copy_sg(desc); + __atomic_fetch_add(&hw->completed_count, 1, __ATOMIC_RELEASE); (void)rte_ring_enqueue(hw->desc_completed, (void *)desc); } @@ -368,10 +417,42 @@ skeldma_copy(void *dev_private, uint16_t vchan, ret = rte_ring_dequeue(hw->desc_empty, (void **)&desc); if (ret) return -ENOSPC; - desc->src = (void *)(uintptr_t)src; - desc->dst = (void *)(uintptr_t)dst; - desc->len = length; + desc->op = SKELDMA_OP_COPY; + desc->ridx = hw->ridx; + desc->copy.src = (void *)(uintptr_t)src; + desc->copy.dst = (void *)(uintptr_t)dst; + desc->copy.len = length; + if (flags & RTE_DMA_OP_FLAG_SUBMIT) + submit(hw, desc); + else + (void)rte_ring_enqueue(hw->desc_pending, (void *)desc); + hw->submitted_count++; + + return hw->ridx++; +} + +static int +skeldma_copy_sg(void *dev_private, uint16_t vchan, + const struct rte_dma_sge *src, + const struct rte_dma_sge *dst, + uint16_t nb_src, uint16_t nb_dst, + uint64_t flags) +{ + struct skeldma_hw *hw = dev_private; + struct skeldma_desc *desc; + int ret; + + RTE_SET_USED(vchan); + + ret = rte_ring_dequeue(hw->desc_empty, (void **)&desc); + if (ret) + return -ENOSPC; + desc->op = SKELDMA_OP_COPY_SG; desc->ridx = hw->ridx; + memcpy(desc->copy_sg.src, src, sizeof(*src) * nb_src); + memcpy(desc->copy_sg.dst, dst, sizeof(*dst) * nb_dst); + desc->copy_sg.nb_src = nb_src; + desc->copy_sg.nb_dst = nb_dst; if (flags & RTE_DMA_OP_FLAG_SUBMIT) submit(hw, desc); else @@ -491,6 +572,7 @@ skeldma_create(const char *name, struct rte_vdev_device *vdev, int lcore_id) dev->dev_ops = &skeldma_ops; dev->fp_obj->dev_private = dev->data->dev_private; dev->fp_obj->copy = skeldma_copy; + dev->fp_obj->copy_sg = skeldma_copy_sg; dev->fp_obj->submit = skeldma_submit; dev->fp_obj->completed = skeldma_completed; dev->fp_obj->completed_status = skeldma_completed_status; diff --git a/drivers/dma/skeleton/skeleton_dmadev.h b/drivers/dma/skeleton/skeleton_dmadev.h index 3582db852a..7d32dd5095 100644 --- a/drivers/dma/skeleton/skeleton_dmadev.h +++ b/drivers/dma/skeleton/skeleton_dmadev.h @@ -1,20 +1,40 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2021 HiSilicon Limited + * Copyright(c) 2021-2024 HiSilicon Limited */ #ifndef SKELETON_DMADEV_H #define SKELETON_DMADEV_H +#include #include #include #define SKELDMA_ARG_LCORE "lcore" +#define SKELDMA_MAX_SGES 4 + +enum skeldma_op { + SKELDMA_OP_COPY, + SKELDMA_OP_COPY_SG, +}; + struct skeldma_desc { - void *src; - void *dst; - uint32_t len; + enum skeldma_op op; uint16_t ridx; /* ring idx */ + + union { + struct { + void *src; + void *dst; + uint32_t len; + } copy; + struct { + struct rte_dma_sge src[SKELDMA_MAX_SGES]; + struct rte_dma_sge dst[SKELDMA_MAX_SGES]; + uint16_t nb_src; + uint16_t nb_dst; + } copy_sg; + }; }; struct skeldma_hw {