From patchwork Wed Jun 8 08:50:05 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: fengchengwen X-Patchwork-Id: 112535 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 966D3A0548; Wed, 8 Jun 2022 10:56:39 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 8DCEF427F6; Wed, 8 Jun 2022 10:56:32 +0200 (CEST) Received: from szxga01-in.huawei.com (szxga01-in.huawei.com [45.249.212.187]) by mails.dpdk.org (Postfix) with ESMTP id C286940689 for ; Wed, 8 Jun 2022 10:56:29 +0200 (CEST) Received: from dggpeml500024.china.huawei.com (unknown [172.30.72.54]) by szxga01-in.huawei.com (SkyGuard) with ESMTP id 4LJ1Jx5zr3zjXPb; Wed, 8 Jun 2022 16:55:29 +0800 (CST) Received: from localhost.localdomain (10.67.165.24) by dggpeml500024.china.huawei.com (7.185.36.10) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Wed, 8 Jun 2022 16:56:27 +0800 From: Chengwen Feng To: , , CC: , , Subject: [PATCH 1/3] dma/skeleton: fix return last-idx when no memcopy completed Date: Wed, 8 Jun 2022 16:50:05 +0800 Message-ID: <20220608085007.10679-2-fengchengwen@huawei.com> X-Mailer: git-send-email 2.33.0 In-Reply-To: <20220608085007.10679-1-fengchengwen@huawei.com> References: <20220608085007.10679-1-fengchengwen@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.24] X-ClientProxiedBy: dggems701-chm.china.huawei.com (10.3.19.178) To dggpeml500024.china.huawei.com (7.185.36.10) X-CFilter-Loop: Reflected X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org If no memcopy request is completed, the ring_idx of the last completed operation need returned by last_idx parameter. This patch fixes it. Fixes: 05d5fc66a269 ("dma/skeleton: introduce skeleton driver") Cc: stable@dpdk.org Signed-off-by: Chengwen Feng --- drivers/dma/skeleton/skeleton_dmadev.c | 17 ++++++++++++++--- drivers/dma/skeleton/skeleton_dmadev.h | 1 + 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c index 81cbdd286e..6b0bb14e2c 100644 --- a/drivers/dma/skeleton/skeleton_dmadev.c +++ b/drivers/dma/skeleton/skeleton_dmadev.c @@ -118,6 +118,7 @@ skeldma_start(struct rte_dma_dev *dev) fflush_ring(hw, hw->desc_running); fflush_ring(hw, hw->desc_completed); hw->ridx = 0; + hw->last_ridx = hw->ridx - 1; hw->submitted_count = 0; hw->zero_req_count = 0; hw->completed_count = 0; @@ -322,9 +323,11 @@ skeldma_dump(const struct rte_dma_dev *dev, FILE *f) GET_RING_COUNT(hw->desc_completed)); (void)fprintf(f, " next_ring_idx: %u\n" + " last_ring_idx: %u\n" " submitted_count: %" PRIu64 "\n" " completed_count: %" PRIu64 "\n", - hw->ridx, hw->submitted_count, hw->completed_count); + hw->ridx, hw->last_ridx, + hw->submitted_count, hw->completed_count); return 0; } @@ -398,11 +401,15 @@ skeldma_completed(void *dev_private, count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed)); while (index < count) { (void)rte_ring_dequeue(hw->desc_completed, (void **)&desc); - if (index == count - 1) + if (index == count - 1) { + hw->last_ridx = desc->ridx; *last_idx = desc->ridx; + } index++; (void)rte_ring_enqueue(hw->desc_empty, (void *)desc); } + if (unlikely(count == 0)) + *last_idx = hw->last_ridx; return count; } @@ -422,11 +429,15 @@ skeldma_completed_status(void *dev_private, count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed)); while (index < count) { (void)rte_ring_dequeue(hw->desc_completed, (void **)&desc); - if (index == count - 1) + if (index == count - 1) { + hw->last_ridx = desc->ridx; *last_idx = desc->ridx; + } status[index++] = RTE_DMA_STATUS_SUCCESSFUL; (void)rte_ring_enqueue(hw->desc_empty, (void *)desc); } + if (unlikely(count == 0)) + *last_idx = hw->last_ridx; return count; } diff --git a/drivers/dma/skeleton/skeleton_dmadev.h b/drivers/dma/skeleton/skeleton_dmadev.h index 91eb5460fc..6f89400480 100644 --- a/drivers/dma/skeleton/skeleton_dmadev.h +++ b/drivers/dma/skeleton/skeleton_dmadev.h @@ -50,6 +50,7 @@ struct skeldma_hw { /* Cache delimiter for dataplane API's operation data */ char cache1 __rte_cache_aligned; uint16_t ridx; /* ring idx */ + uint16_t last_ridx; uint64_t submitted_count; /* Cache delimiter for cpucopy thread's operation data */ From patchwork Wed Jun 8 08:50:06 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: fengchengwen X-Patchwork-Id: 112534 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 0F960A0548; Wed, 8 Jun 2022 10:56:35 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id ACD1941614; Wed, 8 Jun 2022 10:56:31 +0200 (CEST) Received: from szxga02-in.huawei.com (szxga02-in.huawei.com [45.249.212.188]) by mails.dpdk.org (Postfix) with ESMTP id D756241132 for ; Wed, 8 Jun 2022 10:56:29 +0200 (CEST) Received: from dggpeml500024.china.huawei.com (unknown [172.30.72.56]) by szxga02-in.huawei.com (SkyGuard) with ESMTP id 4LJ1GM35ZczRhjb; Wed, 8 Jun 2022 16:53:15 +0800 (CST) Received: from localhost.localdomain (10.67.165.24) by dggpeml500024.china.huawei.com (7.185.36.10) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Wed, 8 Jun 2022 16:56:27 +0800 From: Chengwen Feng To: , , CC: , , Subject: [PATCH 2/3] test/dma: reset last-idx before invoke DMA completed ops Date: Wed, 8 Jun 2022 16:50:06 +0800 Message-ID: <20220608085007.10679-3-fengchengwen@huawei.com> X-Mailer: git-send-email 2.33.0 In-Reply-To: <20220608085007.10679-1-fengchengwen@huawei.com> References: <20220608085007.10679-1-fengchengwen@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.24] X-ClientProxiedBy: dggems701-chm.china.huawei.com (10.3.19.178) To dggpeml500024.china.huawei.com (7.185.36.10) X-CFilter-Loop: Reflected X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Some DMA PMD may not update last-idx when no DMA completed, the previous patch [1] cannot detect this problem actually. This patch resets last-idx before invoking DMA completed ops to fix it. [1] test/dma: check index when no DMA completed Signed-off-by: Chengwen Feng --- app/test/test_dmadev.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/app/test/test_dmadev.c b/app/test/test_dmadev.c index a7651a486f..9e8e101f40 100644 --- a/app/test/test_dmadev.c +++ b/app/test/test_dmadev.c @@ -209,6 +209,7 @@ test_enqueue_copies(int16_t dev_id, uint16_t vchan) dst_data[i], src_data[i]); /* now check completion works */ + id = ~id; if (rte_dma_completed(dev_id, vchan, 1, &id, NULL) != 1) ERR_RETURN("Error with rte_dma_completed\n"); @@ -217,6 +218,7 @@ test_enqueue_copies(int16_t dev_id, uint16_t vchan) id, id_count); /* check for completed and id when no job done */ + id = ~id; if (rte_dma_completed(dev_id, vchan, 1, &id, NULL) != 0) ERR_RETURN("Error with rte_dma_completed when no job done\n"); if (id != id_count) @@ -224,6 +226,7 @@ test_enqueue_copies(int16_t dev_id, uint16_t vchan) id, id_count); /* check for completed_status and id when no job done */ + id = ~id; if (rte_dma_completed_status(dev_id, vchan, 1, &id, &status) != 0) ERR_RETURN("Error with rte_dma_completed_status when no job done\n"); if (id != id_count) From patchwork Wed Jun 8 08:50:07 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: fengchengwen X-Patchwork-Id: 112536 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 99F20A0548; Wed, 8 Jun 2022 10:56:44 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 7F2984280E; Wed, 8 Jun 2022 10:56:33 +0200 (CEST) Received: from szxga03-in.huawei.com (szxga03-in.huawei.com [45.249.212.189]) by mails.dpdk.org (Postfix) with ESMTP id CE2764069C for ; Wed, 8 Jun 2022 10:56:29 +0200 (CEST) Received: from dggpeml500024.china.huawei.com (unknown [172.30.72.57]) by szxga03-in.huawei.com (SkyGuard) with ESMTP id 4LJ1Kj2b1rz17HJL; Wed, 8 Jun 2022 16:56:09 +0800 (CST) Received: from localhost.localdomain (10.67.165.24) by dggpeml500024.china.huawei.com (7.185.36.10) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Wed, 8 Jun 2022 16:56:27 +0800 From: Chengwen Feng To: , , CC: , , Subject: [PATCH 3/3] dma/skeleton: support multiple instances Date: Wed, 8 Jun 2022 16:50:07 +0800 Message-ID: <20220608085007.10679-4-fengchengwen@huawei.com> X-Mailer: git-send-email 2.33.0 In-Reply-To: <20220608085007.10679-1-fengchengwen@huawei.com> References: <20220608085007.10679-1-fengchengwen@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.24] X-ClientProxiedBy: dggems701-chm.china.huawei.com (10.3.19.178) To dggpeml500024.china.huawei.com (7.185.36.10) X-CFilter-Loop: Reflected X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Sivaprasad Tummala dpdk app can support multiple hardware dma instances. with dma skeleton, only a single instance can be configured. This patch supports multiple driver instances per device. Signed-off-by: Sivaprasad Tummala Tested-by: Vipin Varghese Reviewed-by: Chengwen Feng Tested-by: Chengwen Feng --- drivers/dma/skeleton/skeleton_dmadev.c | 50 +++++++++++--------------- 1 file changed, 21 insertions(+), 29 deletions(-) diff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c index 6b0bb14e2c..82b4661323 100644 --- a/drivers/dma/skeleton/skeleton_dmadev.c +++ b/drivers/dma/skeleton/skeleton_dmadev.c @@ -22,9 +22,6 @@ RTE_LOG_REGISTER_DEFAULT(skeldma_logtype, INFO); rte_log(RTE_LOG_ ## level, skeldma_logtype, "%s(): " fmt "\n", \ __func__, ##args) -/* Count of instances, currently only 1 is supported. */ -static uint16_t skeldma_count; - static int skeldma_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info, uint32_t info_sz) @@ -100,6 +97,7 @@ static int skeldma_start(struct rte_dma_dev *dev) { struct skeldma_hw *hw = dev->data->dev_private; + char name[RTE_MAX_THREAD_NAME_LEN]; rte_cpuset_t cpuset; int ret; @@ -126,7 +124,8 @@ skeldma_start(struct rte_dma_dev *dev) rte_mb(); - ret = rte_ctrl_thread_create(&hw->thread, "dma_skeleton", NULL, + snprintf(name, sizeof(name), "dma_skel_%d", dev->data->dev_id); + ret = rte_ctrl_thread_create(&hw->thread, name, NULL, cpucopy_thread, dev); if (ret) { SKELDMA_LOG(ERR, "Start cpucopy thread fail!"); @@ -161,8 +160,9 @@ skeldma_stop(struct rte_dma_dev *dev) } static int -vchan_setup(struct skeldma_hw *hw, uint16_t nb_desc) +vchan_setup(struct skeldma_hw *hw, int16_t dev_id, uint16_t nb_desc) { + char name[RTE_RING_NAMESIZE]; struct skeldma_desc *desc; struct rte_ring *empty; struct rte_ring *pending; @@ -170,22 +170,25 @@ vchan_setup(struct skeldma_hw *hw, uint16_t nb_desc) struct rte_ring *completed; uint16_t i; - desc = rte_zmalloc_socket("dma_skeleton_desc", - nb_desc * sizeof(struct skeldma_desc), + desc = rte_zmalloc_socket(NULL, nb_desc * sizeof(struct skeldma_desc), RTE_CACHE_LINE_SIZE, hw->socket_id); if (desc == NULL) { SKELDMA_LOG(ERR, "Malloc dma skeleton desc fail!"); return -ENOMEM; } - empty = rte_ring_create("dma_skeleton_desc_empty", nb_desc, - hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ); - pending = rte_ring_create("dma_skeleton_desc_pending", nb_desc, - hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ); - running = rte_ring_create("dma_skeleton_desc_running", nb_desc, - hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ); - completed = rte_ring_create("dma_skeleton_desc_completed", nb_desc, - hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ); + snprintf(name, RTE_RING_NAMESIZE, "dma_skel_desc_empty_%d", dev_id); + empty = rte_ring_create(name, nb_desc, hw->socket_id, + RING_F_SP_ENQ | RING_F_SC_DEQ); + snprintf(name, RTE_RING_NAMESIZE, "dma_skel_desc_pend_%d", dev_id); + pending = rte_ring_create(name, nb_desc, hw->socket_id, + RING_F_SP_ENQ | RING_F_SC_DEQ); + snprintf(name, RTE_RING_NAMESIZE, "dma_skel_desc_run_%d", dev_id); + running = rte_ring_create(name, nb_desc, hw->socket_id, + RING_F_SP_ENQ | RING_F_SC_DEQ); + snprintf(name, RTE_RING_NAMESIZE, "dma_skel_desc_comp_%d", dev_id); + completed = rte_ring_create(name, nb_desc, hw->socket_id, + RING_F_SP_ENQ | RING_F_SC_DEQ); if (empty == NULL || pending == NULL || running == NULL || completed == NULL) { SKELDMA_LOG(ERR, "Create dma skeleton desc ring fail!"); @@ -255,7 +258,7 @@ skeldma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan, } vchan_release(hw); - return vchan_setup(hw, conf->nb_desc); + return vchan_setup(hw, dev->data->dev_id, conf->nb_desc); } static int @@ -559,21 +562,12 @@ skeldma_probe(struct rte_vdev_device *vdev) return -EINVAL; } - /* More than one instance is not supported */ - if (skeldma_count > 0) { - SKELDMA_LOG(ERR, "Multiple instance not supported for %s", - name); - return -EINVAL; - } - skeldma_parse_vdev_args(vdev, &lcore_id); ret = skeldma_create(name, vdev, lcore_id); - if (ret >= 0) { + if (ret >= 0) SKELDMA_LOG(INFO, "Create %s dmadev with lcore-id %d", name, lcore_id); - skeldma_count = 1; - } return ret < 0 ? ret : 0; } @@ -589,10 +583,8 @@ skeldma_remove(struct rte_vdev_device *vdev) return -1; ret = skeldma_destroy(name); - if (!ret) { - skeldma_count = 0; + if (!ret) SKELDMA_LOG(INFO, "Remove %s dmadev", name); - } return ret; }