From patchwork Mon Apr 26 09:52:58 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Bruce Richardson X-Patchwork-Id: 92162 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id DA526A0548; Mon, 26 Apr 2021 11:54:17 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 3305F411DC; Mon, 26 Apr 2021 11:53:31 +0200 (CEST) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by mails.dpdk.org (Postfix) with ESMTP id 52AA2411BF for ; Mon, 26 Apr 2021 11:53:27 +0200 (CEST) IronPort-SDR: KXuP5S4aKTce9u2ymefa78PwBPU2sU0Nar0HByDIx3jkHPvdTQojwDhzbRsjVqs7aauIbBIu42 s+CgGZwIzPoQ== X-IronPort-AV: E=McAfee;i="6200,9189,9965"; a="183442915" X-IronPort-AV: E=Sophos;i="5.82,252,1613462400"; d="scan'208";a="183442915" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 26 Apr 2021 02:53:25 -0700 IronPort-SDR: ZrYR/GX6f0ilbmtfOAM5sZv8u+JiQlbihqeHQt4NoSykWtq4GNcM51ZEghjoJxrfRH8U9dapIr KUWjDSUZneKA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.82,252,1613462400"; d="scan'208";a="429336959" Received: from silpixa00399126.ir.intel.com ([10.237.223.81]) by orsmga008.jf.intel.com with ESMTP; 26 Apr 2021 02:53:23 -0700 From: Bruce Richardson To: dev@dpdk.org Cc: kevin.laatz@intel.com, jiayu.hu@intel.com, Bruce Richardson Date: Mon, 26 Apr 2021 10:52:58 +0100 Message-Id: <20210426095259.225354-12-bruce.richardson@intel.com> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20210426095259.225354-1-bruce.richardson@intel.com> References: <20210318182042.43658-2-bruce.richardson@intel.com> <20210426095259.225354-1-bruce.richardson@intel.com> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v2 11/12] raw/ioat: add API to query remaining ring space X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Kevin Laatz Add a new API to query remaining descriptor ring capacity. This API is useful, for example, when an application needs to enqueue a fragmented packet and wants to ensure that all segments of the packet will be enqueued together. Signed-off-by: Kevin Laatz Signed-off-by: Bruce Richardson --- drivers/raw/ioat/ioat_rawdev_test.c | 138 ++++++++++++++++++++++++- drivers/raw/ioat/rte_idxd_rawdev_fns.h | 22 ++++ drivers/raw/ioat/rte_ioat_rawdev_fns.h | 24 +++++ 3 files changed, 183 insertions(+), 1 deletion(-) diff --git a/drivers/raw/ioat/ioat_rawdev_test.c b/drivers/raw/ioat/ioat_rawdev_test.c index 51eebe152f..5f75c6ff69 100644 --- a/drivers/raw/ioat/ioat_rawdev_test.c +++ b/drivers/raw/ioat/ioat_rawdev_test.c @@ -277,6 +277,138 @@ test_enqueue_fill(int dev_id) return 0; } +static inline void +reset_ring_ptrs(int dev_id) +{ + enum rte_ioat_dev_type *type = + (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private; + + if (*type == RTE_IDXD_DEV) { + struct rte_idxd_rawdev *idxd = + (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private; + + idxd->batch_start = 0; + idxd->hdls_read = 0; + } else { + struct rte_ioat_rawdev *ioat = + (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private; + + ioat->next_read = 0; + ioat->next_write = 0; + } +} + +static int +test_burst_capacity(int dev_id) +{ +#define BURST_SIZE 64 + struct rte_mbuf *src, *dst; + unsigned int bursts_enqueued = 0; + unsigned int i; + unsigned int length = 1024; + uintptr_t completions[BURST_SIZE]; + + /* Ring pointer reset needed for checking test results */ + reset_ring_ptrs(dev_id); + + const unsigned int ring_space = rte_ioat_burst_capacity(dev_id); + const unsigned int expected_bursts = (ring_space)/BURST_SIZE; + src = rte_pktmbuf_alloc(pool); + dst = rte_pktmbuf_alloc(pool); + + /* Enqueue burst until they won't fit */ + while (rte_ioat_burst_capacity(dev_id) >= BURST_SIZE) { + for (i = 0; i < BURST_SIZE; i++) { + + if (rte_ioat_enqueue_copy(dev_id, rte_pktmbuf_iova(src), + rte_pktmbuf_iova(dst), length, 0, 0) != 1) { + PRINT_ERR("Error with rte_ioat_enqueue_copy\n"); + return -1; + } + } + bursts_enqueued++; + if ((i & 1) == 1) /* hit doorbell every second burst */ + rte_ioat_perform_ops(dev_id); + } + rte_ioat_perform_ops(dev_id); + + /* check the number of bursts enqueued was as expected */ + if (bursts_enqueued != expected_bursts) { + PRINT_ERR("Capacity test failed, enqueued %u not %u bursts\n", + bursts_enqueued, expected_bursts); + return -1; + } + + /* check the space is now as expected */ + if (rte_ioat_burst_capacity(dev_id) != ring_space - bursts_enqueued * BURST_SIZE) { + printf("Capacity error. Expected %u free slots, got %u\n", + ring_space - bursts_enqueued * BURST_SIZE, + rte_ioat_burst_capacity(dev_id)); + return -1; + } + + /* do cleanup before next tests */ + usleep(100); + for (i = 0; i < bursts_enqueued; i++) { + if (rte_ioat_completed_ops(dev_id, BURST_SIZE, completions, + completions) != BURST_SIZE) { + PRINT_ERR("error with completions\n"); + return -1; + } + } + + /* Since we reset the ring pointers before the previous test, and we enqueued + * the max amount of bursts, enqueuing one more burst will enable us to test + * the wrap around handling in rte_ioat_burst_capacity(). + */ + + /* Verify the descriptor ring is empty before we test */ + if (rte_ioat_burst_capacity(dev_id) != ring_space) { + PRINT_ERR("Error, ring should be empty\n"); + return -1; + } + + /* Enqueue one burst of mbufs & verify the expected space is taken */ + for (i = 0; i < BURST_SIZE; i++) { + if (rte_ioat_enqueue_copy(dev_id, rte_pktmbuf_iova(src), + rte_pktmbuf_iova(dst), length, 0, 0) != 1) { + PRINT_ERR("Error with rte_ioat_enqueue_copy\n"); + return -1; + } + } + + /* Perform the copy before checking the capacity again so that the write + * pointer in the descriptor ring is wrapped/masked + */ + rte_ioat_perform_ops(dev_id); + usleep(100); + + /* This check will confirm both that the correct amount of space is taken + * the ring, and that the ring wrap around handling is correct. + */ + if (rte_ioat_burst_capacity(dev_id) != ring_space - BURST_SIZE) { + PRINT_ERR("Error, space available not as expected\n"); + return -1; + } + + /* Now we gather completions to update the read pointer */ + if (rte_ioat_completed_ops(dev_id, BURST_SIZE, completions, completions) != BURST_SIZE) { + PRINT_ERR("Error with completions\n"); + return -1; + } + + /* After gathering the completions, the descriptor ring should be empty */ + if (rte_ioat_burst_capacity(dev_id) != ring_space) { + PRINT_ERR("Error, space available not as expected\n"); + return -1; + } + + rte_pktmbuf_free(src); + rte_pktmbuf_free(dst); + + return 0; +} + int ioat_rawdev_test(uint16_t dev_id) { @@ -321,7 +453,7 @@ ioat_rawdev_test(uint16_t dev_id) } pool = rte_pktmbuf_pool_create("TEST_IOAT_POOL", - 256, /* n == num elements */ + p.ring_size * 2, /* n == num elements */ 32, /* cache size */ 0, /* priv size */ 2048, /* data room size */ @@ -385,6 +517,10 @@ ioat_rawdev_test(uint16_t dev_id) } printf("\n"); + printf("Running Burst Capacity Test\n"); + if (test_burst_capacity(dev_id) != 0) + goto err; + rte_rawdev_stop(dev_id); if (rte_rawdev_xstats_reset(dev_id, NULL, 0) != 0) { PRINT_ERR("Error resetting xstat values\n"); diff --git a/drivers/raw/ioat/rte_idxd_rawdev_fns.h b/drivers/raw/ioat/rte_idxd_rawdev_fns.h index 4c49d2b84a..41f0ad6e99 100644 --- a/drivers/raw/ioat/rte_idxd_rawdev_fns.h +++ b/drivers/raw/ioat/rte_idxd_rawdev_fns.h @@ -106,6 +106,28 @@ struct rte_idxd_rawdev { struct rte_idxd_user_hdl *hdl_ring; }; +static __rte_always_inline uint16_t +__idxd_burst_capacity(int dev_id) +{ + struct rte_idxd_rawdev *idxd = + (struct rte_idxd_rawdev *)rte_rawdevs[dev_id].dev_private; + uint16_t write_idx = idxd->batch_start + idxd->batch_size; + uint16_t used_space; + + /* Check for space in the batch ring */ + if ((idxd->batch_idx_read == 0 && idxd->batch_idx_write == idxd->max_batches) || + idxd->batch_idx_write + 1 == idxd->batch_idx_read) + return 0; + + /* for descriptors, check for wrap-around on write but not read */ + if (idxd->hdls_read > write_idx) + write_idx += idxd->desc_ring_mask + 1; + used_space = write_idx - idxd->hdls_read; + + /* Return amount of free space in the descriptor ring */ + return idxd->desc_ring_mask - used_space; +} + static __rte_always_inline rte_iova_t __desc_idx_to_iova(struct rte_idxd_rawdev *idxd, uint16_t n) { diff --git a/drivers/raw/ioat/rte_ioat_rawdev_fns.h b/drivers/raw/ioat/rte_ioat_rawdev_fns.h index 598852b1fa..92ccdd03b9 100644 --- a/drivers/raw/ioat/rte_ioat_rawdev_fns.h +++ b/drivers/raw/ioat/rte_ioat_rawdev_fns.h @@ -100,6 +100,19 @@ struct rte_ioat_rawdev { #define RTE_IOAT_CHANSTS_HALTED 0x3 #define RTE_IOAT_CHANSTS_ARMED 0x4 +static __rte_always_inline uint16_t +__ioat_burst_capacity(int dev_id) +{ + struct rte_ioat_rawdev *ioat = + (struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private; + unsigned short size = ioat->ring_size - 1; + unsigned short read = ioat->next_read; + unsigned short write = ioat->next_write; + unsigned short space = size - (write - read); + + return space; +} + static __rte_always_inline int __ioat_write_desc(int dev_id, uint32_t op, uint64_t src, phys_addr_t dst, unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl) @@ -260,6 +273,17 @@ __ioat_completed_ops(int dev_id, uint8_t max_copies, return count; } +static inline uint16_t +rte_ioat_burst_capacity(int dev_id) +{ + enum rte_ioat_dev_type *type = + (enum rte_ioat_dev_type *)rte_rawdevs[dev_id].dev_private; + if (*type == RTE_IDXD_DEV) + return __idxd_burst_capacity(dev_id); + else + return __ioat_burst_capacity(dev_id); +} + static inline int rte_ioat_enqueue_fill(int dev_id, uint64_t pattern, phys_addr_t dst, unsigned int len, uintptr_t dst_hdl)